diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 92d95bfe34c..b541376f4de 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,7 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /.github/ISSUE_TEMPLATE/ @deepthi @frouioui @mattlord /.github/workflows/ @deepthi @frouioui @mattlord @rohit-nayak-ps /config/mycnf/ @deepthi @shlomi-noach @mattlord -/doc/ @deepthi @frouioui @GuptaManan100 @rsajwani +/doc/ @deepthi @frouioui @GuptaManan100 /docker/ @deepthi @derekperkins @dkhenry @mattlord @GuptaManan100 @frouioui /examples/compose @shlomi-noach @GuptaManan100 @frouioui /examples/demo @mattlord @rohit-nayak-ps @@ -26,12 +26,12 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/test/endtoend/onlineddl @rohit-nayak-ps @shlomi-noach /go/test/endtoend/messaging @mattlord @rohit-nayak-ps @derekperkins /go/test/endtoend/vtgate @harshit-gangal @systay @frouioui -/go/test/endtoend/vtorc @deepthi @shlomi-noach @GuptaManan100 @rsajwani +/go/test/endtoend/vtorc @deepthi @shlomi-noach @GuptaManan100 /go/tools/ @frouioui @systay /go/vt/dbconnpool @harshit-gangal @mattlord /go/vt/discovery @deepthi @frouioui /go/vt/discovery/*tablet_picker* @rohit-nayak-ps @mattlord -/go/vt/mysqlctl @deepthi @mattlord @rsajwani +/go/vt/mysqlctl @deepthi @mattlord /go/vt/proto @deepthi @harshit-gangal @mattlord /go/vt/proto/vtadmin @ajm188 @notfelineit /go/vt/schema @mattlord @shlomi-noach @@ -39,8 +39,8 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/vt/sqlparser @harshit-gangal @systay @GuptaManan100 /go/vt/srvtopo @deepthi @mattlord /go/vt/sysvars @harshit-gangal @systay -/go/vt/topo @deepthi @mattlord @rsajwani -/go/vt/topotools @deepthi @mattlord @rsajwani +/go/vt/topo @deepthi @mattlord +/go/vt/topotools @deepthi @mattlord /go/vt/vitessdriver @harshit-gangal /go/vt/vtadmin @ajm188 @notfelineit @rohit-nayak-ps /go/vt/vtctl @ajm188 @deepthi @rohit-nayak-ps @@ -56,18 +56,18 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/vt/vtgate/endtoend/*vstream* @rohit-nayak-ps @mattlord /go/vt/vtgate/planbuilder @harshit-gangal @systay @frouioui @GuptaManan100 @arthurschreiber /go/vt/vtgate/*vstream* @rohit-nayak-ps @mattlord -/go/vt/vtorc @deepthi @shlomi-noach @GuptaManan100 @rsajwani +/go/vt/vtorc @deepthi @shlomi-noach @GuptaManan100 /go/vt/vttablet/*conn* @harshit-gangal @systay /go/vt/vttablet/endtoend @harshit-gangal @mattlord @rohit-nayak-ps @systay -/go/vt/vttablet/grpc* @ajm188 @rohit-nayak-ps @rsajwani @shlomi-noach @harshit-gangal +/go/vt/vttablet/grpc* @ajm188 @rohit-nayak-ps @shlomi-noach @harshit-gangal /go/vt/vttablet/onlineddl @mattlord @rohit-nayak-ps @shlomi-noach /go/vt/vttablet/queryservice @harshit-gangal @systay -/go/vt/vttablet/tabletmanager @deepthi @GuptaManan100 @rohit-nayak-ps @rsajwani @shlomi-noach +/go/vt/vttablet/tabletmanager @deepthi @GuptaManan100 @rohit-nayak-ps @shlomi-noach /go/vt/vttablet/tabletmanager/vreplication @rohit-nayak-ps @mattlord /go/vt/vttablet/tabletmanager/vstreamer @rohit-nayak-ps @mattlord /go/vt/vttablet/tabletserver* @harshit-gangal @systay @shlomi-noach @rohit-nayak-ps /go/vt/vttablet/tabletserver/messager @mattlord @rohit-nayak-ps @derekperkins -/go/vt/vttablet/*tmclient* @ajm188 @GuptaManan100 @rohit-nayak-ps @rsajwani @shlomi-noach +/go/vt/vttablet/*tmclient* @ajm188 @GuptaManan100 @rohit-nayak-ps @shlomi-noach /go/vt/vttablet/vexec @mattlord @rohit-nayak-ps @shlomi-noach /go/vt/wrangler @deepthi @mattlord @rohit-nayak-ps /go/vt/workflow @mattlord @rohit-nayak-ps diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml new file mode 100644 index 00000000000..3a81bc4067c --- /dev/null +++ b/.github/workflows/assign_milestone.yml @@ -0,0 +1,30 @@ +name: Assign Milestone + +on: + pull_request_target: + types: [opened] + +permissions: read-all + +env: + GH_TOKEN: ${{ github.token }} + +jobs: + build: + name: Assign Milestone + runs-on: ubuntu-22.04 + permissions: + pull-requests: write + + steps: + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.21.1 + + - name: Checkout code + uses: actions/checkout@v3 + + - name: Assign Milestone + run: | + gh pr edit ${{ github.event.number }} --milestone "v$(sed -n 's/.*versionName.*\"\([[:digit:]\.]*\).*\"/\1/p' ./go/vt/servenv/version.go)" diff --git a/.github/workflows/check_label.yml b/.github/workflows/check_label.yml index df9e5173dd8..c3c89273df8 100644 --- a/.github/workflows/check_label.yml +++ b/.github/workflows/check_label.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Check Pull Request labels') cancel-in-progress: true +permissions: read-all + jobs: check_pull_request_labels: name: Check Pull Request labels @@ -17,7 +19,7 @@ jobs: - name: Release Notes label run: | if [[ "${{contains( github.event.pull_request.labels.*.name, 'release notes (needs details)')}}" == "true" ]]; then - echo The "release notes (needs details)" label is set. The changes made in this Pull Request need to be documented in the release notes summary "('./doc/releasenotes/16_0_0_summary.md')". Once documented, the "release notes (needs details)" label can be removed. + echo The "release notes (needs details)" label is set. The changes made in this Pull Request need to be documented in the release notes summary "('./changelog/17.0/17.0.0/summary.md')". Once documented, the "release notes (needs details)" label can be removed. exit 1 fi @@ -42,7 +44,7 @@ jobs: exit 1 fi - - name: Check NeedsWebsiteDocsUpdate and NeedsDescriptionUpdate are off + - name: Check all Needs labels are off env: PR_NUMBER: ${{ github.event.pull_request.number }} run: | @@ -62,6 +64,10 @@ jobs: echo "Expecting PR to not have the NeedsWebsiteDocsUpdate label, please update the documentation and remove the label." exit 1 fi + if cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'NeedsIssue' ; then + echo "Expecting PR to not have the NeedsIssue label; please create a linked issue and remove the label." + exit 1 + fi - name: Do Not Merge label diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml index dac6d60996e..a61169c94e1 100644 --- a/.github/workflows/check_make_vtadmin_authz_testgen.yml +++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml @@ -1,7 +1,9 @@ name: check_make_vtadmin_authz_testgen on: [push, pull_request] -jobs: +permissions: read-all + +jobs: build: name: Check Make vtadmin_authz_testgen runs-on: ubuntu-22.04 @@ -45,10 +47,10 @@ jobs: - '.github/workflows/check_make_vtadmin_authz_testgen.yml' - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml index 2ff3144ed85..12d8da13f7c 100644 --- a/.github/workflows/check_make_vtadmin_web_proto.yml +++ b/.github/workflows/check_make_vtadmin_web_proto.yml @@ -1,7 +1,9 @@ name: check_make_vtadmin_web_proto on: [push, pull_request] -jobs: +permissions: read-all + +jobs: build: name: Check Make VTAdmin Web Proto runs-on: ubuntu-22.04 @@ -47,17 +49,17 @@ jobs: - '.github/workflows/check_make_vtadmin_web_proto.yml' - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Setup Node if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' uses: actions/setup-node@v3 with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install npm dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' diff --git a/.github/workflows/close_stale_pull_requests.yml b/.github/workflows/close_stale_pull_requests.yml index 971fcc39f06..e0201c0104b 100644 --- a/.github/workflows/close_stale_pull_requests.yml +++ b/.github/workflows/close_stale_pull_requests.yml @@ -5,12 +5,14 @@ on: workflow_dispatch: {} -permissions: - pull-requests: write +permissions: read-all jobs: close_stale_pull_requests: runs-on: ubuntu-22.04 + permissions: + pull-requests: write + steps: - uses: actions/stale@v5 with: diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index 47d8315fde4..0fb288573be 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (12)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (12) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 12 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index 63b00f89a3e..4878167a91f 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (13)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (13) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 13 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index 42e5d1c91d1..ba78ff2ab8d 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (15)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (15) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 15 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index bf70e99fa0b..af5df2313cd 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (18)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (18) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -107,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 18 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 1705e38762a..30b1372399d 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (21)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (21) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 21 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml index e1decec0259..266352f7436 100644 --- a/.github/workflows/cluster_endtoend_22.yml +++ b/.github/workflows/cluster_endtoend_22.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (22)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (22) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard 22 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml index 1f913eb801e..5983e95761d 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (backup_pitr) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml index 35f6c6c925d..ea8020f519a 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (backup_pitr) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -113,7 +122,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -122,7 +131,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,16 +142,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml similarity index 68% rename from .github/workflows/cluster_endtoend_tabletmanager_throttler.yml rename to .github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml index 02c9ea87657..e11ae259062 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml @@ -1,11 +1,13 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (tabletmanager_throttler) +name: Cluster (backup_pitr_xtrabackup) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -13,8 +15,8 @@ env: jobs: build: - name: Run endtoend tests on Cluster (tabletmanager_throttler) - runs-on: ubuntu-22.04 + name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -56,13 +65,13 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_tabletmanager_throttler.yml' + - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -82,15 +91,16 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + # Setup Percona Server for MySQL 8.0 sudo apt-get update + sudo apt-get install -y lsb-release gnupg2 curl + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo percona-release setup ps80 + sudo apt-get update + # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -101,8 +111,10 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + sudo apt-get install -y percona-xtrabackup-80 lz4 + - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml new file mode 100644 index 00000000000..866af80713c --- /dev/null +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml @@ -0,0 +1,175 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (backup_pitr_xtrabackup) mysql57 +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup) mysql57') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + + # This is used if we need to pin the xtrabackup version used in tests. + # If this is NOT set then the latest version available will be used. + #XTRABACKUP_VERSION: "2.4.24-1" + +jobs: + build: + name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) mysql57 + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo apt-get update + + # Uninstall any previously installed MySQL first + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 + # packages for Jammy. + echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections + echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb" + sudo apt-get install -y gnupg2 + sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb" + sudo apt-get update + if [[ -n $XTRABACKUP_VERSION ]]; then + debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb" + wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile" + sudo apt install -y "./$debfile" + else + sudo apt-get install -y percona-xtrabackup-24 + fi + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml index a6739b71f2c..3a411e90238 100644 --- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml +++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (ers_prs_newfeatures_heavy)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (ers_prs_newfeatures_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -145,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard ers_prs_newfeatures_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml index 7f27203c5ce..5ab2697ef52 100644 --- a/.github/workflows/cluster_endtoend_mysql80.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (mysql80)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (mysql80) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard mysql80 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml index 9a2c3d198ae..462838b2ef9 100644 --- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml +++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (mysql_server_vault)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (mysql_server_vault) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -107,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard mysql_server_vault | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml index 50ca5315c86..511865c710c 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_ghost) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml index a073589625f..252e73a7cd6 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index f3479168002..307d0e8e81d 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revert)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_revert) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml index 33c9436d62a..5ba94452b85 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revert) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_revert) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml index 45cfa768821..c30dd0f71f5 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_scheduler) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml index 4f0c1a5b698..485ac7667ad 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index ae982540c38..ae7d17bc965 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,22 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml index 45d6ae0f029..1639f26e106 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index 02236a6f4a4..3221e35099f 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,22 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml index 42d83f85756..7f1fd124c3d 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml index ad52f5c396c..cd7e3ccfcee 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress_suite)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,22 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml index 6321e28069a..e0a83f18ef7 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress_suite) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml index 0c682296e9c..19ab02ac2dd 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_suite)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,22 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml index fb4ab1fd4d2..d28e472cd4b 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_suite) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml index 49c0a3e0d48..c415d1e131c 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (schemadiff_vrepl)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (schemadiff_vrepl) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -103,7 +112,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -112,7 +121,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -123,16 +132,22 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml index 7510e14aa4c..28319debd01 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (schemadiff_vrepl) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -61,9 +70,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,7 +123,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -123,7 +132,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -134,16 +143,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml index 21a0369083d..838f476f463 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_consul)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_consul) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -107,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml index 53e26a5e1a7..e050c82ec0c 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_tablegc)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_tablegc) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml index 7ee6aaa46a3..cb29cc5d7ad 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_tablegc) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -113,7 +122,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -122,7 +131,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,16 +142,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml index 20b45988bdf..2d9ccd8cbbd 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_topo)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (tabletmanager_throttler_topo) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml index ce64d2ff3b6..2e8201a6044 100644 --- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml +++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (topo_connection_cache)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (topo_connection_cache) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard topo_connection_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml index ba4d149a711..c0001a6aaea 100644 --- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml +++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_across_db_versions)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_across_db_versions) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index 3e6a33ae7df..fe0f08136ef 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_basic)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_basic) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index 9ab7c51f43e..3c010b6d558 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_cellalias)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_cellalias) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml index 93d71284166..5291a7aaf28 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_migrate_vdiff2_convert_tz)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_migrate_vdiff2_convert_tz) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate_vdiff2_convert_tz | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml index ed1050fbde2..8bb26aea060 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multicell)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_multicell) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml similarity index 69% rename from .github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml index 2deb7b3c8f5..681c038a3f2 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml @@ -1,11 +1,13 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (tabletmanager_throttler_custom_config) +name: Cluster (vreplication_partial_movetables_basic) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_custom_config)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -13,8 +15,8 @@ env: jobs: build: - name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config) - runs-on: ubuntu-22.04 + name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic) + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -56,13 +65,13 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,40 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x - + set -exo pipefail + + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql80.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_custom_config | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml new file mode 100644 index 00000000000..27ed797032d --- /dev/null +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml @@ -0,0 +1,170 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vreplication_partial_movetables_sequences) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences) + runs-on: gh-hosted-runners-4cores-1 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v4 + with: + go-version: 1.21.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + # Install everything else we need, and configure + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql80.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index a1a478aa982..7e389bc7b07 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_v2)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vreplication_v2) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -142,14 +151,20 @@ jobs: slow-query-log=OFF EOF + cat <<-EOF>>./config/mycnf/mysql80.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml index cdf5c899ddc..f2c459605b7 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream_failover.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_failover)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_failover) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml index 978c92b4a30..524930341b0 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_false)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_stoponreshard_false) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml index 983ad94e794..04054cb5ced 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_true)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_stoponreshard_true) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml index f62019138cb..29ebd2fec74 100644 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_with_keyspaces_to_watch)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml index da3c849bc04..6a615f23bb2 100644 --- a/.github/workflows/cluster_endtoend_vtbackup.yml +++ b/.github/workflows/cluster_endtoend_vtbackup.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtbackup)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtbackup) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtbackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml index 697e3d31780..d8dcfc50768 100644 --- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtctlbackup_sharded_clustertest_heavy)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtctlbackup_sharded_clustertest_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -145,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtctlbackup_sharded_clustertest_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml index 83e91d390c4..0cdc30bc676 100644 --- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_concurrentdml)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_concurrentdml) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_concurrentdml | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml index 7c39d688bcc..8e298848a1a 100644 --- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_gen4)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_gen4) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_gen4 | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml index a69c7634137..b525f3f6e4f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_general_heavy)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_general_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -145,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_general_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml index 2bf75e96788..090d740daa2 100644 --- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml +++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_godriver)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_godriver) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_godriver | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml index 209f26943e8..261116f5938 100644 --- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml +++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_partial_keyspace)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_partial_keyspace) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_partial_keyspace -partial-keyspace=true | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml index 2ba81844503..6d7e842df82 100644 --- a/.github/workflows/cluster_endtoend_vtgate_queries.yml +++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_queries)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_queries | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml index 55dd4c3c34b..0a9b8b36c8c 100644 --- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_readafterwrite)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_readafterwrite) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_readafterwrite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml index d5e90674bc5..e440f38895d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_reservedconn)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_reservedconn) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_reservedconn | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml index 07f06f06452..48c3dda15b1 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_schema)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml index 57ff927fb7d..ca97a400efe 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_schema_tracker)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_schema_tracker) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema_tracker | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml index e81c473ecc3..7689c02e796 100644 --- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml +++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_tablet_healthcheck_cache)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_tablet_healthcheck_cache) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_tablet_healthcheck_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml index bdbf6b09c6a..dc1c634d634 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml index 36cc1319b04..28c35af0137 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo_consul)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo_consul) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -107,7 +116,7 @@ jobs: make tools - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -116,7 +125,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -127,16 +136,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml index ee9329d4fa7..1affa75b2f4 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_topo_etcd)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_topo_etcd) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_etcd | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml index d3914dd0e2d..2d8f1708bd4 100644 --- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_transaction)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_transaction) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_transaction | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml index 679ec5ee324..f4e713fd2c9 100644 --- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_unsharded)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_unsharded) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_unsharded | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml index 2372f13cfb3..bfe68c86770 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_vindex_heavy)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_vindex_heavy) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,7 +131,7 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # Increase our open file descriptor limit as we could hit this ulimit -n 65536 @@ -145,11 +154,13 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_vindex_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml index 139421f9ff7..935ee642fa4 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_vschema)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtgate_vschema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtgate_vschema | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml index 300dc1864ba..b14c99f6648 100644 --- a/.github/workflows/cluster_endtoend_vtorc.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtorc) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml index e3cb4e639f1..1dd7b021556 100644 --- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml +++ b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vtorc) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -113,7 +122,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -122,7 +131,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,16 +142,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml index 9bc4c4d5b07..6fb044df961 100644 --- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml +++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vttablet_prscomplex)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (vttablet_prscomplex) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,7 +111,7 @@ jobs: go install github.com/vitessio/go-junit-report@HEAD - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -111,7 +120,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -122,16 +131,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard vttablet_prscomplex | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml index 3e10039b7e1..1b88ab9d64b 100644 --- a/.github/workflows/cluster_endtoend_xb_backup.yml +++ b/.github/workflows/cluster_endtoend_xb_backup.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_backup)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_backup) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,10 +111,10 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install percona-xtrabackup-80 lz4 + sudo apt-get install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml index 3e4dcab8d04..294dcc56eeb 100644 --- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_backup) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -18,7 +20,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_backup) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -38,6 +40,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,9 +73,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,7 +138,7 @@ jobs: fi - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -138,7 +147,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -149,16 +158,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml index 1d402e7f883..33015d4ca60 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_recovery)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_recovery) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -102,10 +111,10 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install percona-xtrabackup-80 lz4 + sudo apt-get install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -114,7 +123,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -125,16 +134,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml index 55562726fa4..728448c19ac 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_recovery) mysql57') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -18,7 +20,7 @@ env: jobs: build: name: Run endtoend tests on Cluster (xb_recovery) mysql57 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -38,6 +40,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -64,9 +73,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -129,7 +138,7 @@ jobs: fi - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -138,7 +147,7 @@ jobs: launchable verify || true # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --source . + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run cluster endtoend test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -149,16 +158,18 @@ jobs: export VTDATAROOT="/tmp/" source build.env - set -x + set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() run: | - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/code_freeze.yml b/.github/workflows/code_freeze.yml index efe1d6e8856..5640bc01b69 100644 --- a/.github/workflows/code_freeze.yml +++ b/.github/workflows/code_freeze.yml @@ -2,6 +2,8 @@ name: Code Freeze on: pull_request: +permissions: read-all + jobs: build: name: Code Freeze diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml index 16c7264a506..7f87d2745fc 100644 --- a/.github/workflows/codeql_analysis.yml +++ b/.github/workflows/codeql_analysis.yml @@ -9,6 +9,8 @@ on: - cron: '0 0 * * 1' workflow_dispatch: +permissions: read-all + jobs: analyze: name: Analyze @@ -40,9 +42,9 @@ jobs: # queries: security-extended,security-and-quality - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Get base dependencies run: | @@ -77,7 +79,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 - name: Building binaries timeout-minutes: 30 diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 39cd7592fa9..1da6d9f7190 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -7,27 +7,30 @@ on: release: types: [created] +permissions: read-all + jobs: build: name: Create Release runs-on: ubuntu-22.04 + permissions: + contents: write steps: - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 + with: + go-version: 1.21.1 + + - name: Setup node + uses: actions/setup-node@v3 with: - go-version: 1.20.1 + node-version: '18.16.0' - name: Tune the OS run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Check out code uses: actions/checkout@v3 diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml index 3438b84ef89..72dd314590a 100644 --- a/.github/workflows/docker_test_cluster_10.yml +++ b/.github/workflows/docker_test_cluster_10.yml @@ -1,10 +1,11 @@ name: docker_test_cluster_10 on: [push, pull_request] +permissions: read-all jobs: build: name: Docker Test Cluster 10 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -51,9 +52,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -61,13 +62,6 @@ jobs: echo "value: " ${{steps.skip-workflow.outputs.skip-workflow}} sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Run tests which require docker - 1 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml index ae77594a6bb..e1ad48bf396 100644 --- a/.github/workflows/docker_test_cluster_25.yml +++ b/.github/workflows/docker_test_cluster_25.yml @@ -1,10 +1,11 @@ name: docker_test_cluster_25 on: [push, pull_request] +permissions: read-all jobs: build: name: Docker Test Cluster 25 - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -51,22 +52,15 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Run tests which require docker - 2 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml index 682e658bb72..1b52520ba6d 100644 --- a/.github/workflows/e2e_race.yml +++ b/.github/workflows/e2e_race.yml @@ -1,10 +1,11 @@ name: e2e_race on: [push, pull_request] +permissions: read-all jobs: build: name: End-to-End Test (Race) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -49,9 +50,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -86,4 +87,4 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 run: | - make e2e_test_race + NOVTADMINBUILD=1 make e2e_test_race diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml index aea86fd21e6..1b8b496ba51 100644 --- a/.github/workflows/endtoend.yml +++ b/.github/workflows/endtoend.yml @@ -1,10 +1,11 @@ name: endtoend on: [push, pull_request] +permissions: read-all jobs: build: name: End-to-End Test - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -49,22 +50,15 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Get dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | @@ -84,10 +78,14 @@ jobs: - name: Build if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | - make build + NOVTADMINBUILD=1 make build - name: endtoend if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' timeout-minutes: 30 run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + eatmydata -- tools/e2e_test_runner.sh diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml index c1b1cd3a7c2..cd470dfcc63 100644 --- a/.github/workflows/local_example.yml +++ b/.github/workflows/local_example.yml @@ -1,13 +1,13 @@ name: local_example on: [push, pull_request] +permissions: read-all jobs: build: - name: Local example using ${{ matrix.topo }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} + name: Local example using ${{ matrix.topo }} on ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 strategy: matrix: - os: [ubuntu-22.04] topo: [consul,etcd,k8s] steps: @@ -55,9 +55,15 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 + + - uses: actions/setup-node@v3 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' + with: + # node-version should match package.json + node-version: '18.16.0' - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml index f86bd2e6a31..e7f7b36b408 100644 --- a/.github/workflows/region_example.yml +++ b/.github/workflows/region_example.yml @@ -1,13 +1,13 @@ name: region_example on: [push, pull_request] +permissions: read-all jobs: build: - name: Region Sharding example using ${{ matrix.topo }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} + name: Region Sharding example using ${{ matrix.topo }} on ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 strategy: matrix: - os: [ubuntu-22.04] topo: [etcd] steps: @@ -55,9 +55,15 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 + + - uses: actions/setup-node@v3 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' + with: + # node-version should match package.json + node-version: '18.16.0' - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true' diff --git a/.github/workflows/sonar_analysis.yml b/.github/workflows/sonar_analysis.yml deleted file mode 100644 index c28966de09d..00000000000 --- a/.github/workflows/sonar_analysis.yml +++ /dev/null @@ -1,63 +0,0 @@ -name: sonar_analysis -on: - push: - branches: - - 'sonartest' -jobs: - - build: - runs-on: ubuntu-22.04 - - steps: - - name: Set up Go - uses: actions/setup-go@v3 - with: - go-version: 1.19 - - - name: Tune the OS - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - - name: Check out code - uses: actions/checkout@v3 - - - name: Get dependencies - run: | - sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - - name: Execute unit test and cluster endtoend test - run: | - eatmydata -- ./tools/all_test_for_coverage.sh - mkdir report - cp /tmp/*.out ./report/. - - - name: Analyse sonar - run: | - export SONAR_SCANNER_VERSION=4.2.0.1873 - export SONAR_SCANNER_HOME=$HOME/.sonar/sonar-scanner-$SONAR_SCANNER_VERSION-linux - curl --create-dirs -sSLo $HOME/.sonar/sonar-scanner.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-$SONAR_SCANNER_VERSION-linux.zip - unzip -o $HOME/.sonar/sonar-scanner.zip -d $HOME/.sonar/ - export PATH=$SONAR_SCANNER_HOME/bin:$PATH - export SONAR_SCANNER_OPTS="-server" - - sonar-scanner \ - -Dsonar.projectKey=vitessio \ - -Dsonar.organization=vitess \ - -Dsonar.host.url=https://sonarcloud.io \ - -Dsonar.login=${SONAR_TOKEN} \ - -Dsonar.go.coverage.reportPaths=report/*.out - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml index 41a27adfe31..6b708f0aea1 100644 --- a/.github/workflows/static_checks_etc.yml +++ b/.github/workflows/static_checks_etc.yml @@ -4,6 +4,8 @@ on: - pull_request - push +permissions: read-all + jobs: build: name: Static Code Checks Etc @@ -33,7 +35,7 @@ jobs: - name: Run FOSSA scan and upload build data if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: fossa-contrib/fossa-action@v1 + uses: fossa-contrib/fossa-action@v2 with: fossa-api-key: 76d7483ea206d530d9452e44bffe7ba8 @@ -97,25 +99,22 @@ jobs: ci_config: - 'test/config.json' - '.github/workflows/static_checks_etc.yml' + release_notes: + - 'changelog/**' + - './go/tools/releases/**' + - '.github/workflows/static_checks_etc.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true') - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Run go fmt if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' run: | @@ -169,7 +168,7 @@ jobs: - name: Install golangci-lint if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' - run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 + run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2 - name: Clean Env if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' @@ -181,7 +180,7 @@ jobs: - name: Run golangci-lint if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' - run: $(go env GOPATH)/bin/golangci-lint run go/... || exit 1 + run: $(go env GOPATH)/bin/golangci-lint run go/... --timeout 10m || exit 1 - name: Run go mod tidy if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true' @@ -206,3 +205,18 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.ci_config == 'true') run: | go run ./go/tools/ci-config/main.go || exit 1 + + - name: Check changelog + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.release_notes == 'true' + run: | + set -e + go run ./go/tools/releases/releases.go + output=$(git status -s) + if [ -z "${output}" ]; then + exit 0 + fi + echo 'We wish to maintain a consistent changelog directory, please run `go run ./go/tools/releases/releases.go`, commit and push again.' + echo 'Running `go run ./go/tools/releases/releases.go` on CI yields the following changes:' + echo "$output" + echo "" + exit 1 diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml index 098741d08f2..d1cb499bbe1 100644 --- a/.github/workflows/unit_race.yml +++ b/.github/workflows/unit_race.yml @@ -4,11 +4,13 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'unit_race') cancel-in-progress: true +permissions: read-all + jobs: build: name: Unit Test (Race) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI run: | @@ -53,9 +55,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -91,6 +93,11 @@ jobs: - name: unit_race if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - timeout-minutes: 30 + timeout-minutes: 45 run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + export NOVTADMINBUILD=1 + eatmydata -- make unit_test_race diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index 6e315b38986..c08d4ed89d7 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mysql57)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: test: name: Unit Test (mysql57) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -125,30 +134,37 @@ jobs: run: | make tools - # Temporarily stop sending unit test data to launchable - # - name: Setup launchable dependencies - # if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - # run: | - # # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - # pip3 install --user launchable~=1.0 > /dev/null + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null - # # verify that launchable setup is all correct. - # # launchable verify || true + # verify that launchable setup is all correct. + launchable verify || true - # # Tell Launchable about the build you are producing and testing - # launchable record build --name "$GITHUB_RUN_ID" --source . + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | - # send recorded tests to launchable - # launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index a4716164ca6..7eac25a23ce 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -6,6 +6,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (mysql80)') cancel-in-progress: true +permissions: read-all + env: LAUNCHABLE_ORGANIZATION: "vitess" LAUNCHABLE_WORKSPACE: "vitess-app" @@ -14,7 +16,7 @@ env: jobs: test: name: Unit Test (mysql80) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI @@ -34,6 +36,13 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 @@ -60,9 +69,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -122,30 +131,37 @@ jobs: run: | make tools - # Temporarily stop sending unit test data to launchable - # - name: Setup launchable dependencies - # if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - # run: | - # # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - # pip3 install --user launchable~=1.0 > /dev/null + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null - # # verify that launchable setup is all correct. - # # launchable verify || true + # verify that launchable setup is all correct. + launchable verify || true - # # Tell Launchable about the build you are producing and testing - # launchable record build --name "$GITHUB_RUN_ID" --source . + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - name: Run test if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 30 run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - name: Print test output and Record test result in launchable + - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | - # send recorded tests to launchable - # launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi # print test output cat output.txt diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml new file mode 100644 index 00000000000..31b17e4a6b3 --- /dev/null +++ b/.github/workflows/update_golang_version.yml @@ -0,0 +1,90 @@ +name: Update Golang Version + +on: + schedule: + - cron: "0 0 * * *" # Runs every day at midnight UTC + workflow_dispatch: + +permissions: read-all + +jobs: + update_golang_version: + if: github.repository == 'vitessio/vitess' + permissions: + contents: write + pull-requests: write + strategy: + matrix: + branch: [ main, release-17.0, release-16.0, release-15.0 ] + name: Update Golang Version + runs-on: ubuntu-latest + steps: + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.21.1 + + - name: Check out code + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + + - name: Detect new version and update codebase + env: + GH_TOKEN: ${{ github.token }} + id: detect-and-update + run: | + old_go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version) + echo "old-go-version=${old_go_version}" >> $GITHUB_OUTPUT + + if [ ${{ matrix.branch }} == "main" ]; then + go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false --main --allow-major-upgrade + else + go run ./go/tools/go-upgrade/go-upgrade.go upgrade --workflow-update=false + fi + + output=$(git status -s) + if [ -z "${output}" ]; then + exit 0 + fi + + go_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get go-version) + bootstrap_version=$(go run ./go/tools/go-upgrade/go-upgrade.go get bootstrap-version) + echo "go-version=${go_version}" >> $GITHUB_OUTPUT + echo "bootstrap-version=${bootstrap_version}" >> $GITHUB_OUTPUT + + # Check if the PR already exists, if it does then do not create new PR. + gh pr list -S "is:open [${{ matrix.branch }}] Upgrade the Golang version to go${go_version}" > out.txt 2>&1 | true + if [ -s out.txt ]; then + rm -f out.txt + exit 0 + fi + rm -f out.txt + echo "create-pr=true" >> $GITHUB_OUTPUT + + - name: Create Pull Request + if: steps.detect-and-update.outputs.create-pr == 'true' + uses: peter-evans/create-pull-request@v4 + with: + branch: "upgrade-go-to-${{steps.detect-and-update.outputs.go-version}}-on-${{ matrix.branch }}" + commit-message: "bump go version to go${{steps.detect-and-update.outputs.go-version}}" + signoff: true + delete-branch: true + title: "[${{ matrix.branch }}] Upgrade the Golang version to `go${{steps.detect-and-update.outputs.go-version}}`" + body: | + This Pull Request bumps the Golang version to `go${{steps.detect-and-update.outputs.go-version}}` and the bootstrap version to `${{steps.detect-and-update.outputs.bootstrap-version}}`. + + > Do not trust the bot blindly. A thorough code review must be done to ensure all the files have been correctly modified. + + There are a few manual steps remaining: + - [ ] Make sure you update the Golang version used in the previous and next release branches for the Upgrade/Downgrade tests. + - [ ] Build and Push the bootstrap images to Docker Hub, the bot cannot handle that. + - [ ] Update the `./.github/workflows/*.yml` files with the newer Golang version, the bot cannot handle that due to permissions. + - To accomplish this, run the following: `go run ./go/tools/go-upgrade/go-upgrade.go upgrade workflows --go-to=${{steps.detect-and-update.outputs.go-version}}` + base: ${{ matrix.branch }} + labels: | + Skip CI + go + Benchmark me + Component: General + Type: CI/Build diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index bde1a3283f7..ed7ee91d20d 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -7,11 +7,13 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing - Backups - E2E') cancel-in-progress: true +permissions: read-all + jobs: get_previous_release: if: always() name: Get Previous Release - Backups - E2E - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -32,7 +34,7 @@ jobs: timeout-minutes: 60 if: always() && needs.get_previous_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -81,9 +83,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -112,7 +114,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -131,7 +133,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -151,7 +153,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -161,8 +163,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Run test with VTTablet at version N-1 and VTBackup at version N @@ -181,9 +185,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtbackup $PWD/bin/vttablet + rm -f $PWD/bin/vtbackup $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtbackup $PWD/bin/vtbackup cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtbackup --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index d92da4088e0..47877bb78b8 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -7,11 +7,13 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing - Backups - E2E - Next Release') cancel-in-progress: true +permissions: read-all + jobs: get_next_release: if: always() name: Get Latest Release - Backups - E2E - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -32,7 +34,7 @@ jobs: timeout-minutes: 60 if: always() && needs.get_next_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -84,9 +86,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -115,7 +117,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -134,7 +136,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -154,7 +156,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -164,8 +166,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Run test with VTTablet at version N+1 and VTBackup at version N @@ -184,9 +188,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtbackup $PWD/bin/vttablet - cp /tmp/vitess-build-current/bin/vtbackup $PWD/bin/vtbackup - cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + rm -f $PWD/bin/vtbackup $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld + cp /tmp/vitess-build-other/bin/vtbackup $PWD/bin/vtbackup + cp /tmp/vitess-build-current/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-current/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-current/bin/mysqlctld $PWD/bin/mysqlctld vtbackup --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index 6201d9b49ba..d310e01c0ac 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -7,11 +7,13 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing - Backups - Manual') cancel-in-progress: true +permissions: read-all + jobs: get_previous_release: if: always() name: Get Previous Release - Backups - Manual - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -33,7 +35,7 @@ jobs: timeout-minutes: 40 if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -83,9 +85,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -96,13 +98,6 @@ jobs: run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Get base dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | @@ -139,7 +134,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -158,7 +153,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -183,7 +178,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -234,8 +229,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Starting the tablets again, they will automatically start restoring the last backup. @@ -276,22 +273,16 @@ jobs: source build.env ; cd examples/backups ./take_backups.sh - # Stopping the tablets so we can perform the upgrade. - - name: Stop tablets - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 10 - run: | - source build.env ; cd examples/backups - ./stop_tablets.sh - # We upgrade: we swap binaries and use the version N of the tablet. - name: Upgrade - Swap binaries, use VTTablet N if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-current/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-current/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Starting the tablets again and restoring the previous backup. @@ -300,9 +291,7 @@ jobs: timeout-minutes: 10 run: | source build.env ; cd examples/backups - ./restart_tablets.sh - # give enough time to the tablets to restore the backup - sleep 90 + ./upgrade_cluster.sh # We count the number of rows in every table to check that the restore step was successful. - name: Assert the number of rows in every table diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index dc0ad902478..1622622c556 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -7,11 +7,13 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing - Backups - Manual - Next Release') cancel-in-progress: true +permissions: read-all + jobs: get_next_release: if: always() name: Get Previous Release - Backups - Manual - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -33,7 +35,7 @@ jobs: timeout-minutes: 40 if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -86,9 +88,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -99,13 +101,6 @@ jobs: run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Get base dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | @@ -142,7 +137,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -161,7 +156,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -186,7 +181,7 @@ jobs: timeout-minutes: 5 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -237,8 +232,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Starting the tablets again, they will automatically start restoring the last backup. @@ -279,22 +276,16 @@ jobs: source build.env ; cd examples/backups ./take_backups.sh - # Stopping the tablets so we can perform the upgrade. - - name: Stop tablets - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 10 - run: | - source build.env ; cd examples/backups - ./stop_tablets.sh - # We upgrade: we swap binaries and use the version N of the tablet. - name: Upgrade - Swap binaries, use VTTablet N if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-current/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-current/bin/mysqlctld $PWD/bin/mysqlctld vttablet --version # Starting the tablets again and restoring the next backup. @@ -303,9 +294,7 @@ jobs: timeout-minutes: 10 run: | source build.env ; cd examples/backups - ./restart_tablets.sh - # give enough time to the tablets to restore the backup - sleep 90 + ./upgrade_cluster.sh # We count the number of rows in every table to check that the restore step was successful. - name: Assert the number of rows in every table diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 2d8aceb6ceb..9c299c0cec6 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Queries)') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtgate, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Query Serving (Queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -83,9 +85,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -130,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -149,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -169,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -209,9 +211,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtgate $PWD/bin/vttablet + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtgate --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index 7de07a3892d..326384bb3cc 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Queries) Next Release') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtgate, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Query Serving (Queries) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -86,9 +88,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -152,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -172,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -212,9 +214,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtgate $PWD/bin/vttablet + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtgate --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 5b8e3107757..45d61d955fc 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Schema)') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtgate, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Query Serving (Schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -83,9 +85,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -130,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -149,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -169,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -209,9 +211,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtgate $PWD/bin/vttablet + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtgate --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index 18075b62e0f..19ee1819763 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Query Serving (Schema) Next Release') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtgate, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Query Serving (Schema) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -86,9 +88,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -152,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -172,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -212,9 +214,11 @@ jobs: run: | source build.env - rm -f $PWD/bin/vtgate $PWD/bin/vttablet + rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtgate --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index cca38725a37..088c82b21da 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Reparent New Vtctl') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtctl, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Reparent New Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -86,9 +88,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -152,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -172,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index 6cc0f1d9882..d331ec978dc 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Reparent New VTTablet') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtctl, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_next_release: if: always() name: Get Latest Release - Reparent New VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_next_release @@ -86,9 +88,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -133,7 +135,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) @@ -152,7 +154,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -172,7 +174,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -182,8 +184,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtctl --version vttablet --version diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index 908be6519f6..8d9374ecac0 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Reparent Old Vtctl') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtctl, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Reparent Old Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old Vtctl - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -83,9 +85,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -130,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -149,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -169,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index bbb17c43e99..a4356a16217 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -7,6 +7,8 @@ concurrency: group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing Reparent Old VTTablet') cancel-in-progress: true +permissions: read-all + # This test ensures that our end-to-end tests work using Vitess components # (vtctl, vttablet, etc) built on different versions. @@ -14,7 +16,7 @@ jobs: get_previous_release: if: always() name: Get Previous Release - Reparent Old VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 outputs: previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} @@ -34,7 +36,7 @@ jobs: upgrade_downgrade_test: if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old VTTablet - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-16cores-1 needs: - get_previous_release @@ -83,9 +85,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: - go-version: 1.20.1 + go-version: 1.21.1 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -130,7 +132,7 @@ jobs: sudo apt-get install -y gnupg2 sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo apt-get update - sudo apt-get install percona-xtrabackup-24 + sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) @@ -149,7 +151,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-other/ cp -R bin /tmp/vitess-build-other/ rm -Rf bin/* @@ -169,7 +171,7 @@ jobs: timeout-minutes: 10 run: | source build.env - make build + NOVTADMINBUILD=1 make build mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ @@ -179,8 +181,10 @@ jobs: run: | source build.env - rm -f $PWD/bin/vttablet + rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet + cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl + cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld vtctl --version vttablet --version diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml index c0b668641ea..24ade4d9227 100644 --- a/.github/workflows/vtadmin_web_build.yml +++ b/.github/workflows/vtadmin_web_build.yml @@ -12,9 +12,11 @@ on: - '.github/workflows/vtadmin_web_build.yml' - 'web/vtadmin/**' +permissions: read-all + jobs: build: - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -40,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml index 9b7e9a68847..055e1934fb0 100644 --- a/.github/workflows/vtadmin_web_lint.yml +++ b/.github/workflows/vtadmin_web_lint.yml @@ -12,6 +12,8 @@ on: - '.github/workflows/vtadmin_web_lint.yml' - 'web/vtadmin/**' +permissions: read-all + jobs: lint: runs-on: ubuntu-22.04 @@ -40,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml index bfc4a5d15ff..1efa474fde3 100644 --- a/.github/workflows/vtadmin_web_unit_tests.yml +++ b/.github/workflows/vtadmin_web_unit_tests.yml @@ -12,9 +12,11 @@ on: - '.github/workflows/vtadmin_web_unit_tests.yml' - 'web/vtadmin/**' +permissions: read-all + jobs: unit-tests: - runs-on: ubuntu-22.04 + runs-on: gh-hosted-runners-4cores-1 steps: - name: Skip CI run: | @@ -40,7 +42,7 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '16.19.0' + node-version: '18.16.0' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.gitignore b/.gitignore index 25dfe2c63f3..881e89890cc 100644 --- a/.gitignore +++ b/.gitignore @@ -70,7 +70,7 @@ _test/ /vendor/*/ # release folder -releases +/releases/ # Local examples /examples/local/vtdataroot @@ -83,3 +83,6 @@ venv .scannerwork report + +# plan test output +/go/vt/vtgate/planbuilder/testdata/plan_test* diff --git a/.golangci.yml b/.golangci.yml index e2bdb5336e4..9c674953a76 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,8 +1,6 @@ run: - go: 1.19 + go: 1.20 timeout: 10m - skip-dirs: - - go/vt/topo/k8stopo/client linters-settings: errcheck: @@ -154,4 +152,4 @@ issues: # https://github.com/golangci/golangci/wiki/Configuration service: - golangci-lint-version: 1.51.2 # use the fixed version to not introduce new linters unexpectedly + golangci-lint-version: 1.52.2 # use the fixed version to not introduce new linters unexpectedly diff --git a/ADOPTERS.md b/ADOPTERS.md index a254b54e14d..a471983a06e 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -19,5 +19,6 @@ This is an alphabetical list of known adopters of Vitess. Some have already gone * [Square](https://square.com) * [Stitch Labs](https://stitchlabs.com) * [Twitter](https://twitter.com) +* [Vinted](https://www.vinted.com/) * [Weave](https://www.getweave.com) * [YouTube](https://youtube.com) diff --git a/GOVERNANCE.md b/GOVERNANCE.md index 8adec25f8d9..8b408d8ba55 100644 --- a/GOVERNANCE.md +++ b/GOVERNANCE.md @@ -55,13 +55,13 @@ A maintainer is not allowed to merge their change without approval from other ma Anyone can become a maintainer; there are no special requirements, other than to have shown a willingness and ability to participate in the project as a team player. Typically, a potential maintainer will need to show that they have an understanding of the project, its objectives and its strategy. They will also have provided valuable contributions to the project over a period of time. -New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the steering committee. Maintainer voting is one of the few activities that takes place on the project’s private management list. This is to allow committee members to freely express their opinions about a nominee without causing embarrassment. Once the vote has been held, the aggregated voting results are published on the public mailing list. The nominee is entitled to request an explanation of any ‘no’ votes against them, regardless of the outcome of the vote. This explanation will be provided by the Steering Committee Chair (see below) and will be anonymous and constructive in nature. +New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the maintainer team to decide whether to accept or reject the nomination. -Nominees may decline their appointment as a maintainer. However, this is unusual, as the project does not expect any specific time or resource commitment from its community members. The intention behind the role of maintainer is to allow people to contribute to the project more easily, not to tie them in to the project in any formal way. +Nominees may decline their appointment as a maintainer. The project does not expect any specific time or resource commitment from its community members, however it is expected that maintainers are evangelists for the project. -It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the Steering Committee for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project. +It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the [Steering Committee](https://github.com/vitessio/vitess/blob/main/STEERING.md) for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project. -A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee. This role is described in the [Steering Committee document](https://github.com/vitessio/vitess/blob/main/STEERING.md). +A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee. # Support @@ -72,7 +72,7 @@ Anyone can contribute to the project, regardless of their skills, as there are m The Slack workspace is the most appropriate place for a contributor to ask for help when making their first contribution. -# Decision Making Process +# Decision-Making Process Decisions about the future of the project are made by the Steering Committee. New proposals and ideas can be brought to the Committee’s attention through the Slack workspace or by filing an issue. If necessary, the Committee will seek input from others to come to the final decision. diff --git a/Makefile b/Makefile index 24b0bb6ed49..e1831f76b3a 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ export REWRITER=go/vt/sqlparser/rewriter.go # Since we are not using this Makefile for compilation, limiting parallelism will not increase build time. .NOTPARALLEL: -.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools generate_ci_workflows +.PHONY: all build install test clean unit_test unit_test_cover unit_test_race integration_test proto proto_banner site_test site_integration_test docker_bootstrap docker_test docker_unit_test java_test reshard_tests e2e_test e2e_test_race minimaltools tools generate_ci_workflows generate-flag-testdata all: build @@ -61,6 +61,10 @@ ifdef VT_EXTRA_BUILD_FLAGS export EXTRA_BUILD_FLAGS := $(VT_EXTRA_BUILD_FLAGS) endif +ifdef VT_EXTRA_BUILD_LDFLAGS +export EXTRA_BUILD_LDFLAGS := $(VT_EXTRA_BUILD_LDFLAGS) +endif + # This should be the root of the vitess Git directory. ifndef VTROOT export VTROOT=${PWD} @@ -76,7 +80,7 @@ ifndef NOBANNER endif bash ./build.env go build -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN} ./go/... # build the vitess binaries statically @@ -89,8 +93,12 @@ endif # Binaries will be placed in ${VTROOTBIN}. CGO_ENABLED=0 go build \ -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN} ./go/... +ifndef NOVTADMINBUILD + echo "Building VTAdmin Web, disable VTAdmin build by setting 'NOVTADMINBUILD'" + PREFIX="" ./web/vtadmin/build.sh +endif # cross-build can be used to cross-compile Vitess client binaries # Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments @@ -107,7 +115,7 @@ endif mkdir -p ${VTROOTBIN}/${GOOS}_${GOARCH} CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build \ -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -o ${VTROOTBIN}/${GOOS}_${GOARCH} ./go/... @if [ ! -x "${VTROOTBIN}/${GOOS}_${GOARCH}/vttablet" ]; then \ @@ -121,7 +129,7 @@ endif bash ./build.env go build -trimpath \ $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \ - -ldflags "$(shell tools/build_version_flags.sh)" \ + -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \ -gcflags -'N -l' \ -o ${VTROOTBIN} ./go/... @@ -245,7 +253,7 @@ PROTO_SRCS = $(wildcard proto/*.proto) PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS))) PROTO_GO_OUTS = $(foreach name, $(PROTO_SRC_NAMES), go/vt/proto/$(name)/$(name).pb.go) # This rule rebuilds all the go files from the proto definitions for gRPC. -proto: $(PROTO_GO_OUTS) +proto: $(PROTO_GO_OUTS) vtadmin_web_proto_types ifndef NOBANNER echo $$(date): Compiling proto definitions @@ -256,9 +264,10 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto --go_out=. --plugin protoc-gen-go="${VTROOTBIN}/protoc-gen-go" \ --go-grpc_out=. --plugin protoc-gen-go-grpc="${VTROOTBIN}/protoc-gen-go-grpc" \ --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${VTROOTBIN}/protoc-gen-go-vtproto" \ - --go-vtproto_opt=features=marshal+unmarshal+size+pool \ + --go-vtproto_opt=features=marshal+unmarshal+size+pool+clone \ --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/query.Row \ --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamRowsResponse \ + --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamTablesResponse \ -I${PWD}/dist/vt-protoc-21.3/include:proto $(PROTO_SRCS) cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto rm -rf vitess.io/vitess/go/vt/proto/ @@ -269,7 +278,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # This rule builds the bootstrap images for all flavors. DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) -BOOTSTRAP_VERSION=15 +BOOTSTRAP_VERSION=22 ensure_bootstrap_version: find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \; sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go @@ -323,6 +332,9 @@ DOCKER_LITE_TARGETS = $(addprefix docker_lite_,$(DOCKER_LITE_SUFFIX)) $(DOCKER_LITE_TARGETS): docker_lite_%: ${call build_docker_image,docker/lite/Dockerfile.$*,vitess/lite:$*} +docker_lite_push: + for i in $(DOCKER_LITE_SUFFIX); do echo "pushing lite image: $$i"; docker push vitess/lite:$$i || exit 1; done + docker_lite_all: docker_lite $(DOCKER_LITE_TARGETS) docker_local: @@ -375,69 +387,11 @@ tools: minimaltools: echo $$(date): Installing minimal dependencies - BUILD_CHROME=0 BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh + BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh dependency_check: ./tools/dependency_check.sh -install_k8s-code-generator: tools/tools.go go.mod - go install k8s.io/code-generator/cmd/deepcopy-gen - go install k8s.io/code-generator/cmd/client-gen - go install k8s.io/code-generator/cmd/lister-gen - go install k8s.io/code-generator/cmd/informer-gen - -DEEPCOPY_GEN=$(VTROOTBIN)/deepcopy-gen -CLIENT_GEN=$(VTROOTBIN)/client-gen -LISTER_GEN=$(VTROOTBIN)/lister-gen -INFORMER_GEN=$(VTROOTBIN)/informer-gen - -GEN_BASE_DIR ?= vitess.io/vitess/go/vt/topo/k8stopo - -client_go_gen: install_k8s-code-generator - echo $$(date): Regenerating client-go code - # Delete and re-generate the deepcopy types - find $(VTROOT)/go/vt/topo/k8stopo/apis/topo/v1beta1 -name "zz_generated.deepcopy.go" -delete - - # We output to ./ and then copy over the generated files to the appropriate path - # This is done so we don't have rely on the repository being cloned to `$GOPATH/src/vitess.io/vitess` - - $(DEEPCOPY_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - -O zz_generated.deepcopy \ - --bounding-dirs $(GEN_BASE_DIR)/apis \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Delete existing code - rm -rf go/vt/topo/k8stopo/client - - # Generate clientset - $(CLIENT_GEN) -o ./ \ - --clientset-name versioned \ - --input-base $(GEN_BASE_DIR)/apis \ - --input 'topo/v1beta1' \ - --output-package $(GEN_BASE_DIR)/client/clientset \ - --fake-clientset=true \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Generate listers - $(LISTER_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - --output-package $(GEN_BASE_DIR)/client/listers \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Generate informers - $(INFORMER_GEN) -o ./ \ - --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \ - --output-package $(GEN_BASE_DIR)/client/informers \ - --versioned-clientset-package $(GEN_BASE_DIR)/client/clientset/versioned \ - --listers-package $(GEN_BASE_DIR)/client/listers \ - --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt - - # Move and cleanup - mv vitess.io/vitess/go/vt/topo/k8stopo/client go/vt/topo/k8stopo/ - mv vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go - rm -rf vitess.io/vitess/go/vt/topo/k8stopo/ - vtadmin_web_install: cd web/vtadmin && npm install @@ -457,8 +411,8 @@ vtadmin_authz_testgen: generate_ci_workflows: cd test && go run ci_workflow_gen.go && cd .. -release-notes: - go run ./go/tools/release-notes --from "$(FROM)" --to "$(TO)" --version "$(VERSION)" --summary "$(SUMMARY)" +generate-flag-testdata: + ./tools/generate_flag_testdata.sh install_kubectl_kind: ./tools/get_kubectl_kind.sh diff --git a/README.md b/README.md index ed2d8d80404..6f021141aca 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ [![Maven Central](https://maven-badges.herokuapp.com/maven-central/io.vitess/vitess-jdbc/badge.svg)](https://maven-badges.herokuapp.com/maven-central/io.vitess/vitess-jdbc) -[![Build Status](https://travis-ci.org/vitessio/vitess.svg?branch=master)](https://travis-ci.org/vitessio/vitess/builds) [![codebeat badge](https://codebeat.co/badges/51c9a056-1103-4522-9a9c-dc623821ea87)](https://codebeat.co/projects/github-com-youtube-vitess) [![Go Report Card](https://goreportcard.com/badge/vitess.io/vitess)](https://goreportcard.com/report/vitess.io/vitess) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fvitessio%2Fvitess.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fvitessio%2Fvitess?ref=badge_shield) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1724/badge)](https://bestpractices.coreinfrastructure.org/projects/1724) -[![Coverage](https://sonarcloud.io/api/project_badges/measure?project=vitessio&metric=coverage)](https://sonarcloud.io/dashboard?id=vitessio) # Vitess diff --git a/bootstrap.sh b/bootstrap.sh index 4dffa15cb7c..f95302ea771 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -25,7 +25,6 @@ source ./dev.env BUILD_JAVA=${BUILD_JAVA:-1} BUILD_CONSUL=${BUILD_CONSUL:-1} -BUILD_CHROME=${BUILD_CHROME:-1} VITESS_RESOURCES_DOWNLOAD_BASE_URL="https://github.com/vitessio/vitess-resources/releases/download" VITESS_RESOURCES_RELEASE="v4.0" @@ -114,8 +113,8 @@ install_protoc() { esac # This is how we'd download directly from source: - $VTROOT/tools/wget-retry https://github.com/protocolbuffers/protobuf/releases/download/v$version/protoc-$version-$platform-${target}.zip - #$VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/protoc-$version-$platform-${target}.zip" + "${VTROOT}/tools/wget-retry" https://github.com/protocolbuffers/protobuf/releases/download/v$version/protoc-$version-$platform-${target}.zip + #"${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/protoc-$version-$platform-${target}.zip" unzip "protoc-$version-$platform-${target}.zip" ln -snf "$dist/bin/protoc" "$VTROOT/bin/protoc" @@ -129,7 +128,7 @@ install_zookeeper() { zk="zookeeper-$version" # This is how we'd download directly from source: # wget "https://dlcdn.apache.org/zookeeper/$zk/apache-$zk.tar.gz" - $VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/apache-${zk}.tar.gz" + "${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/apache-${zk}.tar.gz" tar -xzf "$dist/apache-$zk.tar.gz" mvn -f $dist/apache-$zk/zookeeper-contrib/zookeeper-contrib-fatjar/pom.xml clean install -P fatjar -DskipTests mkdir -p $dist/lib @@ -159,8 +158,8 @@ install_etcd() { file="etcd-${version}-${platform}-${target}.${ext}" # This is how we'd download directly from source: - $VTROOT/tools/wget-retry "https://github.com/etcd-io/etcd/releases/download/$version/$file" - #$VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/${file}" + "${VTROOT}/tools/wget-retry" "https://github.com/etcd-io/etcd/releases/download/$version/$file" + #"${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/${file}" if [ "$ext" = "tar.gz" ]; then tar xzf "$file" else @@ -171,35 +170,6 @@ install_etcd() { ln -snf "$dist/etcd-${version}-${platform}-${target}/etcdctl" "$VTROOT/bin/etcdctl" } - -# Download and install k3s, link k3s binary into our root -install_k3s() { - local version="$1" - local dist="$2" - case $(uname) in - Linux) local platform=linux;; - *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology will not be available for local examples."; return;; - esac - - case $(get_arch) in - aarch64) local target="-arm64";; - x86_64) local target="";; - arm64) local target="-arm64";; - *) echo "WARNING: unsupported architecture, the k8s topology will not be available for local examples."; return;; - esac - - file="k3s${target}" - - local dest="$dist/k3s${target}-${version}-${platform}" - # This is how we'd download directly from source: - # download_url=https://github.com/rancher/k3s/releases/download - # wget -O $dest "$download_url/$version/$file" - $VTROOT/tools/wget-retry -O $dest "${VITESS_RESOURCES_DOWNLOAD_URL}/$file-$version" - chmod +x $dest - ln -snf $dest "$VTROOT/bin/k3s" -} - - # Download and install consul, link consul binary into our root. install_consul() { local version="$1" @@ -221,41 +191,35 @@ install_consul() { # This is how we'd download directly from source: # download_url=https://releases.hashicorp.com/consul # wget "${download_url}/${version}/consul_${version}_${platform}_${target}.zip" - $VTROOT/tools/wget-retry "${VITESS_RESOURCES_DOWNLOAD_URL}/consul_${version}_${platform}_${target}.zip" + "${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/consul_${version}_${platform}_${target}.zip" unzip "consul_${version}_${platform}_${target}.zip" ln -snf "$dist/consul" "$VTROOT/bin/consul" } -# Download chromedriver -install_chromedriver() { +# Download and install toxiproxy, link toxiproxy binary into our root. +install_toxiproxy() { local version="$1" local dist="$2" case $(uname) in Linux) local platform=linux;; - *) echo "Platform not supported for vtctl-web tests. Skipping chromedriver install."; return;; + Darwin) local platform=darwin;; + *) echo "WARNING: unsupported platform. Some tests that rely on toxiproxy will not function."; return;; esac - if [ "$(arch)" == "aarch64" ] ; then - os=$(cat /etc/*release | grep "^ID=" | cut -d '=' -f 2) - case $os in - ubuntu|debian) - sudo apt-get update -y && sudo apt install -y --no-install-recommends unzip libglib2.0-0 libnss3 libx11-6 - ;; - centos|fedora) - sudo yum update -y && yum install -y libX11 unzip wget - ;; - esac - echo "For Arm64, using prebuilt binary from electron (https://github.com/electron/electron/) of version 76.0.3809.126" - $VTROOT/tools/wget-retry https://github.com/electron/electron/releases/download/v6.0.3/chromedriver-v6.0.3-linux-arm64.zip - unzip -o -q chromedriver-v6.0.3-linux-arm64.zip -d "$dist" - rm chromedriver-v6.0.3-linux-arm64.zip - else - $VTROOT/tools/wget-retry "https://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip" - unzip -o -q chromedriver_linux64.zip -d "$dist" - rm chromedriver_linux64.zip - fi + case $(get_arch) in + aarch64) local target=arm64;; + x86_64) local target=amd64;; + arm64) local target=arm64;; + *) echo "WARNING: unsupported architecture. Some tests that rely on toxiproxy will not function."; return;; + esac + + # This is how we'd download directly from source: + file="toxiproxy-server-${platform}-${target}" + "${VTROOT}/tools/wget-retry" "https://github.com/Shopify/toxiproxy/releases/download/$version/$file" + chmod +x "$dist/$file" + ln -snf "$dist/$file" "$VTROOT/bin/toxiproxy-server" } install_all() { @@ -274,18 +238,13 @@ install_all() { # etcd install_dep "etcd" "v3.5.6" "$VTROOT/dist/etcd" install_etcd - # k3s - command -v k3s || install_dep "k3s" "v1.0.0" "$VTROOT/dist/k3s" install_k3s - # consul if [ "$BUILD_CONSUL" == 1 ] ; then install_dep "Consul" "1.11.4" "$VTROOT/dist/consul" install_consul fi - # chromedriver - if [ "$BUILD_CHROME" == 1 ] ; then - install_dep "chromedriver" "90.0.4430.24" "$VTROOT/dist/chromedriver" install_chromedriver - fi + # toxiproxy + install_dep "toxiproxy" "v2.5.0" "$VTROOT/dist/toxiproxy" install_toxiproxy echo echo "bootstrap finished - run 'make build' to compile" diff --git a/build.env b/build.env index 64a55711da8..6077197fe88 100755 --- a/build.env +++ b/build.env @@ -17,7 +17,7 @@ source ./tools/shell_functions.inc go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions." -goversion_min 1.20.1 || fail "Go version reported: `go version`. Version 1.20.1+ required. See https://vitess.io/contributing/build-from-source for install instructions." +goversion_min 1.21.1 || echo "Go version reported: `go version`. Version 1.21.1+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." mkdir -p dist mkdir -p bin diff --git a/doc/releasenotes/10_0_0_release_notes.md b/changelog/10.0/10.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_0_release_notes.md rename to changelog/10.0/10.0.0/release_notes.md diff --git a/doc/releasenotes/10_0_1_release_notes.md b/changelog/10.0/10.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_1_release_notes.md rename to changelog/10.0/10.0.1/release_notes.md diff --git a/doc/releasenotes/10_0_2_release_notes.md b/changelog/10.0/10.0.2/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_2_release_notes.md rename to changelog/10.0/10.0.2/release_notes.md diff --git a/doc/releasenotes/10_0_3_release_notes.md b/changelog/10.0/10.0.3/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_3_release_notes.md rename to changelog/10.0/10.0.3/release_notes.md diff --git a/doc/releasenotes/10_0_3_summary.md b/changelog/10.0/10.0.3/summary.md similarity index 100% rename from doc/releasenotes/10_0_3_summary.md rename to changelog/10.0/10.0.3/summary.md diff --git a/doc/releasenotes/10_0_4_release_notes.md b/changelog/10.0/10.0.4/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_4_release_notes.md rename to changelog/10.0/10.0.4/release_notes.md diff --git a/doc/releasenotes/10_0_4_summary.md b/changelog/10.0/10.0.4/summary.md similarity index 100% rename from doc/releasenotes/10_0_4_summary.md rename to changelog/10.0/10.0.4/summary.md diff --git a/doc/releasenotes/10_0_5_release_notes.md b/changelog/10.0/10.0.5/release_notes.md similarity index 100% rename from doc/releasenotes/10_0_5_release_notes.md rename to changelog/10.0/10.0.5/release_notes.md diff --git a/doc/releasenotes/10_0_5_summary.md b/changelog/10.0/10.0.5/summary.md similarity index 100% rename from doc/releasenotes/10_0_5_summary.md rename to changelog/10.0/10.0.5/summary.md diff --git a/changelog/10.0/README.md b/changelog/10.0/README.md new file mode 100644 index 00000000000..304cc933a16 --- /dev/null +++ b/changelog/10.0/README.md @@ -0,0 +1,18 @@ +## v10.0 +* **[10.0.5](10.0.5)** + * [Release Notes](10.0.5/release_notes.md) + +* **[10.0.4](10.0.4)** + * [Release Notes](10.0.4/release_notes.md) + +* **[10.0.3](10.0.3)** + * [Release Notes](10.0.3/release_notes.md) + +* **[10.0.2](10.0.2)** + * [Release Notes](10.0.2/release_notes.md) + +* **[10.0.1](10.0.1)** + * [Release Notes](10.0.1/release_notes.md) + +* **[10.0.0](10.0.0)** + * [Release Notes](10.0.0/release_notes.md) diff --git a/doc/releasenotes/11_0_0_release_notes.md b/changelog/11.0/11.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/11_0_0_release_notes.md rename to changelog/11.0/11.0.0/release_notes.md diff --git a/doc/releasenotes/11_0_1_release_notes.md b/changelog/11.0/11.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/11_0_1_release_notes.md rename to changelog/11.0/11.0.1/release_notes.md diff --git a/doc/releasenotes/11_0_2_release_notes.md b/changelog/11.0/11.0.2/release_notes.md similarity index 100% rename from doc/releasenotes/11_0_2_release_notes.md rename to changelog/11.0/11.0.2/release_notes.md diff --git a/doc/releasenotes/11_0_2_summary.md b/changelog/11.0/11.0.2/summary.md similarity index 100% rename from doc/releasenotes/11_0_2_summary.md rename to changelog/11.0/11.0.2/summary.md diff --git a/doc/releasenotes/11_0_3_release_notes.md b/changelog/11.0/11.0.3/release_notes.md similarity index 100% rename from doc/releasenotes/11_0_3_release_notes.md rename to changelog/11.0/11.0.3/release_notes.md diff --git a/doc/releasenotes/11_0_3_summary.md b/changelog/11.0/11.0.3/summary.md similarity index 100% rename from doc/releasenotes/11_0_3_summary.md rename to changelog/11.0/11.0.3/summary.md diff --git a/doc/releasenotes/11_0_4_release_notes.md b/changelog/11.0/11.0.4/release_notes.md similarity index 100% rename from doc/releasenotes/11_0_4_release_notes.md rename to changelog/11.0/11.0.4/release_notes.md diff --git a/doc/releasenotes/11_0_4_summary.md b/changelog/11.0/11.0.4/summary.md similarity index 100% rename from doc/releasenotes/11_0_4_summary.md rename to changelog/11.0/11.0.4/summary.md diff --git a/changelog/11.0/README.md b/changelog/11.0/README.md new file mode 100644 index 00000000000..51dfb2e5648 --- /dev/null +++ b/changelog/11.0/README.md @@ -0,0 +1,15 @@ +## v11.0 +* **[11.0.4](11.0.4)** + * [Release Notes](11.0.4/release_notes.md) + +* **[11.0.3](11.0.3)** + * [Release Notes](11.0.3/release_notes.md) + +* **[11.0.2](11.0.2)** + * [Release Notes](11.0.2/release_notes.md) + +* **[11.0.1](11.0.1)** + * [Release Notes](11.0.1/release_notes.md) + +* **[11.0.0](11.0.0)** + * [Release Notes](11.0.0/release_notes.md) diff --git a/doc/releasenotes/12_0_0_release_notes.md b/changelog/12.0/12.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/12_0_0_release_notes.md rename to changelog/12.0/12.0.0/release_notes.md diff --git a/doc/releasenotes/12_0_0_summary.md b/changelog/12.0/12.0.0/summary.md similarity index 100% rename from doc/releasenotes/12_0_0_summary.md rename to changelog/12.0/12.0.0/summary.md diff --git a/doc/releasenotes/12_0_1_release_notes.md b/changelog/12.0/12.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/12_0_1_release_notes.md rename to changelog/12.0/12.0.1/release_notes.md diff --git a/doc/releasenotes/12_0_1_summary.md b/changelog/12.0/12.0.1/summary.md similarity index 100% rename from doc/releasenotes/12_0_1_summary.md rename to changelog/12.0/12.0.1/summary.md diff --git a/doc/releasenotes/12_0_2_release_notes.md b/changelog/12.0/12.0.2/release_notes.md similarity index 100% rename from doc/releasenotes/12_0_2_release_notes.md rename to changelog/12.0/12.0.2/release_notes.md diff --git a/doc/releasenotes/12_0_2_summary.md b/changelog/12.0/12.0.2/summary.md similarity index 100% rename from doc/releasenotes/12_0_2_summary.md rename to changelog/12.0/12.0.2/summary.md diff --git a/doc/releasenotes/12_0_3_release_notes.md b/changelog/12.0/12.0.3/release_notes.md similarity index 100% rename from doc/releasenotes/12_0_3_release_notes.md rename to changelog/12.0/12.0.3/release_notes.md diff --git a/doc/releasenotes/12_0_3_summary.md b/changelog/12.0/12.0.3/summary.md similarity index 100% rename from doc/releasenotes/12_0_3_summary.md rename to changelog/12.0/12.0.3/summary.md diff --git a/doc/releasenotes/12_0_4_release_notes.md b/changelog/12.0/12.0.4/release_notes.md similarity index 100% rename from doc/releasenotes/12_0_4_release_notes.md rename to changelog/12.0/12.0.4/release_notes.md diff --git a/doc/releasenotes/12_0_5_changelog.md b/changelog/12.0/12.0.5/changelog.md similarity index 100% rename from doc/releasenotes/12_0_5_changelog.md rename to changelog/12.0/12.0.5/changelog.md diff --git a/doc/releasenotes/12_0_5_release_notes.md b/changelog/12.0/12.0.5/release_notes.md similarity index 92% rename from doc/releasenotes/12_0_5_release_notes.md rename to changelog/12.0/12.0.5/release_notes.md index fc2c613e4da..dbff8a5aade 100644 --- a/doc/releasenotes/12_0_5_release_notes.md +++ b/changelog/12.0/12.0.5/release_notes.md @@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d > go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details. ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_5_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.5/changelog.md). The release includes 7 commits (excluding merges) diff --git a/doc/releasenotes/12_0_6_changelog.md b/changelog/12.0/12.0.6/changelog.md similarity index 100% rename from doc/releasenotes/12_0_6_changelog.md rename to changelog/12.0/12.0.6/changelog.md diff --git a/doc/releasenotes/12_0_6_release_notes.md b/changelog/12.0/12.0.6/release_notes.md similarity index 93% rename from doc/releasenotes/12_0_6_release_notes.md rename to changelog/12.0/12.0.6/release_notes.md index 8afbe0a4239..c9c743d95ea 100644 --- a/doc/releasenotes/12_0_6_release_notes.md +++ b/changelog/12.0/12.0.6/release_notes.md @@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/12.0/over ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_6_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.6/changelog.md). The release includes 11 commits (excluding merges) diff --git a/doc/releasenotes/12_0_6_summary.md b/changelog/12.0/12.0.6/summary.md similarity index 100% rename from doc/releasenotes/12_0_6_summary.md rename to changelog/12.0/12.0.6/summary.md diff --git a/changelog/12.0/README.md b/changelog/12.0/README.md new file mode 100644 index 00000000000..131b2df443d --- /dev/null +++ b/changelog/12.0/README.md @@ -0,0 +1,23 @@ +## v12.0 +* **[12.0.6](12.0.6)** + * [Changelog](12.0.6/changelog.md) + * [Release Notes](12.0.6/release_notes.md) + +* **[12.0.5](12.0.5)** + * [Changelog](12.0.5/changelog.md) + * [Release Notes](12.0.5/release_notes.md) + +* **[12.0.4](12.0.4)** + * [Release Notes](12.0.4/release_notes.md) + +* **[12.0.3](12.0.3)** + * [Release Notes](12.0.3/release_notes.md) + +* **[12.0.2](12.0.2)** + * [Release Notes](12.0.2/release_notes.md) + +* **[12.0.1](12.0.1)** + * [Release Notes](12.0.1/release_notes.md) + +* **[12.0.0](12.0.0)** + * [Release Notes](12.0.0/release_notes.md) diff --git a/doc/releasenotes/13_0_0_release_notes.md b/changelog/13.0/13.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/13_0_0_release_notes.md rename to changelog/13.0/13.0.0/release_notes.md diff --git a/doc/releasenotes/13_0_0_summary.md b/changelog/13.0/13.0.0/summary.md similarity index 100% rename from doc/releasenotes/13_0_0_summary.md rename to changelog/13.0/13.0.0/summary.md diff --git a/doc/releasenotes/13_0_1_release_notes.md b/changelog/13.0/13.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/13_0_1_release_notes.md rename to changelog/13.0/13.0.1/release_notes.md diff --git a/doc/releasenotes/13_0_2_changelog.md b/changelog/13.0/13.0.2/changelog.md similarity index 100% rename from doc/releasenotes/13_0_2_changelog.md rename to changelog/13.0/13.0.2/changelog.md diff --git a/doc/releasenotes/13_0_2_release_notes.md b/changelog/13.0/13.0.2/release_notes.md similarity index 93% rename from doc/releasenotes/13_0_2_release_notes.md rename to changelog/13.0/13.0.2/release_notes.md index 310eb5e633a..12692031e2a 100644 --- a/doc/releasenotes/13_0_2_release_notes.md +++ b/changelog/13.0/13.0.2/release_notes.md @@ -9,7 +9,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d > go1.17.12 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the runtime, and the runtime/metrics package. [See the Go 1.17.12 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.17.12+label%3ACherryPickApproved) on our issue tracker for details. ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_2_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.2/changelog.md). The release includes 28 commits (excluding merges) Thanks to all our contributors: @GuptaManan100, @aquarapid, @frouioui, @harshit-gangal, @mattlord, @rohit-nayak-ps, @systay, @vitess-bot[bot], @vmg \ No newline at end of file diff --git a/doc/releasenotes/13_0_2_summary.md b/changelog/13.0/13.0.2/summary.md similarity index 100% rename from doc/releasenotes/13_0_2_summary.md rename to changelog/13.0/13.0.2/summary.md diff --git a/doc/releasenotes/13_0_3_changelog.md b/changelog/13.0/13.0.3/changelog.md similarity index 100% rename from doc/releasenotes/13_0_3_changelog.md rename to changelog/13.0/13.0.3/changelog.md diff --git a/doc/releasenotes/13_0_3_release_notes.md b/changelog/13.0/13.0.3/release_notes.md similarity index 93% rename from doc/releasenotes/13_0_3_release_notes.md rename to changelog/13.0/13.0.3/release_notes.md index 3fee980f099..b04c0d69d20 100644 --- a/doc/releasenotes/13_0_3_release_notes.md +++ b/changelog/13.0/13.0.3/release_notes.md @@ -15,7 +15,7 @@ This change is documented on our website [here](https://vitess.io/docs/13.0/over ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/13_0_3_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/13.0/13.0.3/changelog.md). The release includes 17 commits(excluding merges) diff --git a/doc/releasenotes/13_0_3_summary.md b/changelog/13.0/13.0.3/summary.md similarity index 100% rename from doc/releasenotes/13_0_3_summary.md rename to changelog/13.0/13.0.3/summary.md diff --git a/changelog/13.0/README.md b/changelog/13.0/README.md new file mode 100644 index 00000000000..780625ef69a --- /dev/null +++ b/changelog/13.0/README.md @@ -0,0 +1,14 @@ +## v13.0 +* **[13.0.3](13.0.3)** + * [Changelog](13.0.3/changelog.md) + * [Release Notes](13.0.3/release_notes.md) + +* **[13.0.2](13.0.2)** + * [Changelog](13.0.2/changelog.md) + * [Release Notes](13.0.2/release_notes.md) + +* **[13.0.1](13.0.1)** + * [Release Notes](13.0.1/release_notes.md) + +* **[13.0.0](13.0.0)** + * [Release Notes](13.0.0/release_notes.md) diff --git a/doc/releasenotes/14_0_0_changelog.md b/changelog/14.0/14.0.0/changelog.md similarity index 100% rename from doc/releasenotes/14_0_0_changelog.md rename to changelog/14.0/14.0.0/changelog.md diff --git a/doc/releasenotes/14_0_0_release_notes.md b/changelog/14.0/14.0.0/release_notes.md similarity index 99% rename from doc/releasenotes/14_0_0_release_notes.md rename to changelog/14.0/14.0.0/release_notes.md index a7b4bcf4ef0..5f88f6975db 100644 --- a/doc/releasenotes/14_0_0_release_notes.md +++ b/changelog/14.0/14.0.0/release_notes.md @@ -319,7 +319,7 @@ Work has gone into making the advisory locks (`get_lock()`, `release_lock()`, et A long time ago, the sharding column and type were specified at the keyspace level. This syntax is now deprecated and will be removed in v15. ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_0_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.0/changelog.md). The release includes 1101 commits (excluding merges) diff --git a/doc/releasenotes/14_0_0_summary.md b/changelog/14.0/14.0.0/summary.md similarity index 100% rename from doc/releasenotes/14_0_0_summary.md rename to changelog/14.0/14.0.0/summary.md diff --git a/doc/releasenotes/14_0_1_changelog.md b/changelog/14.0/14.0.1/changelog.md similarity index 100% rename from doc/releasenotes/14_0_1_changelog.md rename to changelog/14.0/14.0.1/changelog.md diff --git a/doc/releasenotes/14_0_1_release_notes.md b/changelog/14.0/14.0.1/release_notes.md similarity index 94% rename from doc/releasenotes/14_0_1_release_notes.md rename to changelog/14.0/14.0.1/release_notes.md index 57d68be31c2..639af4fce96 100644 --- a/doc/releasenotes/14_0_1_release_notes.md +++ b/changelog/14.0/14.0.1/release_notes.md @@ -13,7 +13,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d > go1.18.4 (released 2022-07-12) includes security fixes to the compress/gzip, encoding/gob, encoding/xml, go/parser, io/fs, net/http, and path/filepath packages, as well as bug fixes to the compiler, the go command, the linker, the runtime, and the runtime/metrics package. [See the Go 1.18.4 milestone](https://github.com/golang/go/issues?q=milestone%3AGo1.18.4+label%3ACherryPickApproved) on our issue tracker for details. ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_1_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.1/changelog.md). The release includes 25 commits (excluding merges) diff --git a/doc/releasenotes/14_0_1_summary.md b/changelog/14.0/14.0.1/summary.md similarity index 100% rename from doc/releasenotes/14_0_1_summary.md rename to changelog/14.0/14.0.1/summary.md diff --git a/doc/releasenotes/14_0_2_changelog.md b/changelog/14.0/14.0.2/changelog.md similarity index 100% rename from doc/releasenotes/14_0_2_changelog.md rename to changelog/14.0/14.0.2/changelog.md diff --git a/doc/releasenotes/14_0_2_release_notes.md b/changelog/14.0/14.0.2/release_notes.md similarity index 95% rename from doc/releasenotes/14_0_2_release_notes.md rename to changelog/14.0/14.0.2/release_notes.md index d83b940f45c..724673af576 100644 --- a/doc/releasenotes/14_0_2_release_notes.md +++ b/changelog/14.0/14.0.2/release_notes.md @@ -18,7 +18,7 @@ Since the end-of-life of MariaDB 10.2, its Docker image is unavailable, and we d You can find more information on the list of supported databases on our documentation website, [here](https://vitess.io/docs/14.0/overview/supported-databases/). ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_2_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.2/changelog.md). The release includes 23 commits (excluding merges) diff --git a/doc/releasenotes/14_0_2_summary.md b/changelog/14.0/14.0.2/summary.md similarity index 100% rename from doc/releasenotes/14_0_2_summary.md rename to changelog/14.0/14.0.2/summary.md diff --git a/doc/releasenotes/14_0_3_changelog.md b/changelog/14.0/14.0.3/changelog.md similarity index 100% rename from doc/releasenotes/14_0_3_changelog.md rename to changelog/14.0/14.0.3/changelog.md diff --git a/doc/releasenotes/14_0_3_release_notes.md b/changelog/14.0/14.0.3/release_notes.md similarity index 93% rename from doc/releasenotes/14_0_3_release_notes.md rename to changelog/14.0/14.0.3/release_notes.md index 721c47640e2..5d5cc9b871c 100644 --- a/doc/releasenotes/14_0_3_release_notes.md +++ b/changelog/14.0/14.0.3/release_notes.md @@ -12,7 +12,7 @@ This problem could be resolved by restarting the VTOrc so that it discovers all frequently, this posed a greater challenge, since some pods when evicted and rescheduled on a different node, would sometimes fail to be discovered by VTOrc. This has problem has been addressed in this patch by the fix https://github.com/vitessio/vitess/pull/10662. ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_3_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.3/changelog.md). The release includes 12 commits (excluding merges) diff --git a/doc/releasenotes/14_0_3_summary.md b/changelog/14.0/14.0.3/summary.md similarity index 100% rename from doc/releasenotes/14_0_3_summary.md rename to changelog/14.0/14.0.3/summary.md diff --git a/doc/releasenotes/14_0_4_changelog.md b/changelog/14.0/14.0.4/changelog.md similarity index 100% rename from doc/releasenotes/14_0_4_changelog.md rename to changelog/14.0/14.0.4/changelog.md diff --git a/doc/releasenotes/14_0_4_release_notes.md b/changelog/14.0/14.0.4/release_notes.md similarity index 93% rename from doc/releasenotes/14_0_4_release_notes.md rename to changelog/14.0/14.0.4/release_notes.md index 334efefd05f..31cbbc26627 100644 --- a/doc/releasenotes/14_0_4_release_notes.md +++ b/changelog/14.0/14.0.4/release_notes.md @@ -14,7 +14,7 @@ Below is a summary of this patch release. You can learn more [here](https://go.d An issue in versions `<= v14.0.3` and `<= v15.0.0` that generated corrupted results for non-full-group-by queries with a JOIN is now fixed. The full issue can be found [here](https://github.com/vitessio/vitess/issues/11625), and its fix [here](https://github.com/vitessio/vitess/pull/11633). ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/14_0_4_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.4/changelog.md). The release includes 24 commits (excluding merges) diff --git a/doc/releasenotes/14_0_4_summary.md b/changelog/14.0/14.0.4/summary.md similarity index 100% rename from doc/releasenotes/14_0_4_summary.md rename to changelog/14.0/14.0.4/summary.md diff --git a/changelog/14.0/14.0.5/changelog.md b/changelog/14.0/14.0.5/changelog.md new file mode 100644 index 00000000000..695adc1274d --- /dev/null +++ b/changelog/14.0/14.0.5/changelog.md @@ -0,0 +1,55 @@ +# Changelog of Vitess v14.0.5 + +### Bug fixes +#### Observability + * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683) +#### Online DDL + * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641) +#### Query Serving + * Fix CheckMySQL by setting the correct wanted state [#11895](https://github.com/vitessio/vitess/pull/11895) + * [release-14.0] Fix sending a ServerLost error when reading a packet fails (#11920) [#11928](https://github.com/vitessio/vitess/pull/11928) + * Fix: Date math with Interval keyword [#12082](https://github.com/vitessio/vitess/pull/12082) + * BugFix: Cast expression translation by evaluation engine [#12111](https://github.com/vitessio/vitess/pull/12111) + * Fix aggregation on outer joins [#12298](https://github.com/vitessio/vitess/pull/12298) + * [release-14.0] fix: added null safe operator precendence rule (#12297) [#12305](https://github.com/vitessio/vitess/pull/12305) + * Fix scalar aggregation engine primitive for column truncation [#12468](https://github.com/vitessio/vitess/pull/12468) + * [release-16.0] BugFix: Unsharded query using a derived table and a dual table [#12484](https://github.com/vitessio/vitess/pull/12484) + * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555) + * [release-14.0] `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema (#12519) [#12597](https://github.com/vitessio/vitess/pull/12597) + * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607) + * [release-14.0] Fix `panic` when executing a prepare statement with over `65,528` parameters [#12628](https://github.com/vitessio/vitess/pull/12628) + * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668) + * [release-14.0] Always add columns in the `Derived` operator [#12681](https://github.com/vitessio/vitess/pull/12681) + * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704) +### CI/Build +#### Build/CI + * Move towards MySQL 8.0 as the default template generation [#11153](https://github.com/vitessio/vitess/pull/11153) + * Fix deprecated usage of set-output [#11844](https://github.com/vitessio/vitess/pull/11844) + * Use `go1.18.9` in the next release upgrade downgrade E2E tests [#11925](https://github.com/vitessio/vitess/pull/11925) + * [release-14.0] Make upgrade downgrade job names unique [#12497](https://github.com/vitessio/vitess/pull/12497) + * v14 backport: CI: increase overall test timeouts for all OnlineDDL tests [#12590](https://github.com/vitessio/vitess/pull/12590) +#### Online DDL + * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583) +#### TabletManager + * Fix closing the body for HTTP requests [#11842](https://github.com/vitessio/vitess/pull/11842) +### Enhancement +#### Build/CI + * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585) +#### Governance + * [release-14.0] Add manan and florent to Docker files CODEOWNERS (#11981) [#11982](https://github.com/vitessio/vitess/pull/11982) +### Internal Cleanup +#### General + * [release-14.0] Fix release script for the version in the docker script (#12285) [#12290](https://github.com/vitessio/vitess/pull/12290) +### Performance +#### Cluster management + * Bug fix: Cache filtered out tablets in topology watcher to avoid unnecessary GetTablet calls to topo [#12194](https://github.com/vitessio/vitess/pull/12194) +### Release +#### Build/CI + * [release-14.0] Tooling improvements backports [#12526](https://github.com/vitessio/vitess/pull/12526) +#### Documentation + * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566) +#### General + * Back to dev mode after v14.0.4 [#11845](https://github.com/vitessio/vitess/pull/11845) + * Release of v14.0.4 [#11846](https://github.com/vitessio/vitess/pull/11846) + * Code freeze of `release-14.0` for `v14.0.5` [#12763](https://github.com/vitessio/vitess/pull/12763) + diff --git a/changelog/14.0/14.0.5/release_notes.md b/changelog/14.0/14.0.5/release_notes.md new file mode 100644 index 00000000000..ec2a762d4a5 --- /dev/null +++ b/changelog/14.0/14.0.5/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v14.0.5 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/14.0/14.0.5/changelog.md). + +The release includes 33 commits (excluding merges) + +Thanks to all our contributors: @GuptaManan100, @dbussink, @frouioui, @harshit-gangal, @rsajwani, @shlomi-noach, @systay, @vitess-bot[bot], @vmg + diff --git a/changelog/14.0/README.md b/changelog/14.0/README.md new file mode 100644 index 00000000000..1b251dd0b01 --- /dev/null +++ b/changelog/14.0/README.md @@ -0,0 +1,24 @@ +## v14.0 +* **[14.0.5](14.0.5)** + * [Changelog](14.0.5/changelog.md) + * [Release Notes](14.0.5/release_notes.md) + +* **[14.0.4](14.0.4)** + * [Changelog](14.0.4/changelog.md) + * [Release Notes](14.0.4/release_notes.md) + +* **[14.0.3](14.0.3)** + * [Changelog](14.0.3/changelog.md) + * [Release Notes](14.0.3/release_notes.md) + +* **[14.0.2](14.0.2)** + * [Changelog](14.0.2/changelog.md) + * [Release Notes](14.0.2/release_notes.md) + +* **[14.0.1](14.0.1)** + * [Changelog](14.0.1/changelog.md) + * [Release Notes](14.0.1/release_notes.md) + +* **[14.0.0](14.0.0)** + * [Changelog](14.0.0/changelog.md) + * [Release Notes](14.0.0/release_notes.md) diff --git a/doc/releasenotes/15_0_0_changelog.md b/changelog/15.0/15.0.0/changelog.md similarity index 100% rename from doc/releasenotes/15_0_0_changelog.md rename to changelog/15.0/15.0.0/changelog.md diff --git a/doc/releasenotes/15_0_0_release_notes.md b/changelog/15.0/15.0.0/release_notes.md similarity index 99% rename from doc/releasenotes/15_0_0_release_notes.md rename to changelog/15.0/15.0.0/release_notes.md index 430100fa8f5..f60fd0547f1 100644 --- a/doc/releasenotes/15_0_0_release_notes.md +++ b/changelog/15.0/15.0.0/release_notes.md @@ -455,7 +455,7 @@ Below are the changes for each binary. - [zkctld](https://github.com/vitessio/vitess/blob/release-15.0/doc/flags/14.0-to-15.0-transition/zkctld.diff) ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/15_0_0_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.0/changelog.md). The release includes 595 commits (excluding merges) diff --git a/doc/releasenotes/15_0_0_summary.md b/changelog/15.0/15.0.0/summary.md similarity index 100% rename from doc/releasenotes/15_0_0_summary.md rename to changelog/15.0/15.0.0/summary.md diff --git a/doc/releasenotes/15_0_1_changelog.md b/changelog/15.0/15.0.1/changelog.md similarity index 100% rename from doc/releasenotes/15_0_1_changelog.md rename to changelog/15.0/15.0.1/changelog.md diff --git a/doc/releasenotes/15_0_1_release_notes.md b/changelog/15.0/15.0.1/release_notes.md similarity index 93% rename from doc/releasenotes/15_0_1_release_notes.md rename to changelog/15.0/15.0.1/release_notes.md index caeba725787..1737f0fd2f8 100644 --- a/doc/releasenotes/15_0_1_release_notes.md +++ b/changelog/15.0/15.0.1/release_notes.md @@ -16,7 +16,7 @@ This issue is fixed in 15.0.1. The full issue can be found [here](https://github ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/15_0_1_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.1/changelog.md). The release includes 25 commits (excluding merges) diff --git a/doc/releasenotes/15_0_1_summary.md b/changelog/15.0/15.0.1/summary.md similarity index 100% rename from doc/releasenotes/15_0_1_summary.md rename to changelog/15.0/15.0.1/summary.md diff --git a/doc/releasenotes/15_0_2_changelog.md b/changelog/15.0/15.0.2/changelog.md similarity index 100% rename from doc/releasenotes/15_0_2_changelog.md rename to changelog/15.0/15.0.2/changelog.md diff --git a/doc/releasenotes/15_0_2_release_notes.md b/changelog/15.0/15.0.2/release_notes.md similarity index 91% rename from doc/releasenotes/15_0_2_release_notes.md rename to changelog/15.0/15.0.2/release_notes.md index 92317b60dc4..33ece0e1c73 100644 --- a/doc/releasenotes/15_0_2_release_notes.md +++ b/changelog/15.0/15.0.2/release_notes.md @@ -12,7 +12,7 @@ Below is a summary of this patch release. You can learn more [here](https://grou ------------ -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/15_0_2_changelog.md). +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.2/changelog.md). The release includes 14 commits (excluding merges) diff --git a/doc/releasenotes/15_0_2_summary.md b/changelog/15.0/15.0.2/summary.md similarity index 79% rename from doc/releasenotes/15_0_2_summary.md rename to changelog/15.0/15.0.2/summary.md index 6f3346efa47..b12a97879a5 100644 --- a/doc/releasenotes/15_0_2_summary.md +++ b/changelog/15.0/15.0.2/summary.md @@ -3,7 +3,7 @@ ### Upgrade to `go1.18.9` Vitess `v15.0.2` now runs on `go1.18.9`. -The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess. +The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fix to `net/http` package, which is used extensively by Vitess. Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU). > go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages. diff --git a/changelog/15.0/15.0.3/changelog.md b/changelog/15.0/15.0.3/changelog.md new file mode 100644 index 00000000000..5634b7aa24a --- /dev/null +++ b/changelog/15.0/15.0.3/changelog.md @@ -0,0 +1,89 @@ +# Changelog of Vitess v15.0.3 + +### Bug fixes +#### Backup and Restore + * mysqlctl: flags should be added to vtbackup [#12048](https://github.com/vitessio/vitess/pull/12048) +#### Build/CI + * Fix `codeql` workflow timeout issue [#11760](https://github.com/vitessio/vitess/pull/11760) + * [release-15.0] Use `go1.20.1` in upgrade/downgrade tests [#12512](https://github.com/vitessio/vitess/pull/12512) +#### CLI + * Purge logs without panicking [#12187](https://github.com/vitessio/vitess/pull/12187) + * Fix `vtctldclient`'s Root command to return an error on unknown command [#12481](https://github.com/vitessio/vitess/pull/12481) +#### Cluster management + * Skip `TestReparentDoesntHangIfPrimaryFails` in vttablet v16 and above [#12387](https://github.com/vitessio/vitess/pull/12387) + * Fix initialization code to also stop replication to prevent crash [#12534](https://github.com/vitessio/vitess/pull/12534) +#### Observability + * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683) +#### Online DDL + * Bugfix/Backport to v15: Fix schema migrations requested_timestamp zero values [#12263](https://github.com/vitessio/vitess/pull/12263) + * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641) +#### Operator + * Fix rbac config in the vtop example [#12034](https://github.com/vitessio/vitess/pull/12034) +#### Query Serving + * [release-15.0] only expand when we have full information (#11998) [#12002](https://github.com/vitessio/vitess/pull/12002) + * Fix: Date math with Interval keyword [#12082](https://github.com/vitessio/vitess/pull/12082) + * BugFix: Cast expression translation by evaluation engine [#12111](https://github.com/vitessio/vitess/pull/12111) + * [Gen4] Fix lookup vindexes with `autocommit` enabled [#12172](https://github.com/vitessio/vitess/pull/12172) + * VTGate: Ensure HealthCheck Cache Secondary Maps Stay in Sync With Authoritative Map on Tablet Delete [#12178](https://github.com/vitessio/vitess/pull/12178) + * Fix aggregation on outer joins [#12298](https://github.com/vitessio/vitess/pull/12298) + * [release-15.0] fix: added null safe operator precendence rule (#12297) [#12306](https://github.com/vitessio/vitess/pull/12306) + * [release-15.0] Fix bug in vtexplain around JOINs (#12376) [#12383](https://github.com/vitessio/vitess/pull/12383) + * Fix scalar aggregation engine primitive for column truncation [#12468](https://github.com/vitessio/vitess/pull/12468) + * [release-16.0] BugFix: Unsharded query using a derived table and a dual table [#12484](https://github.com/vitessio/vitess/pull/12484) + * [bug fix] USING planning on information_schema [#12542](https://github.com/vitessio/vitess/pull/12542) + * handle filter on top of UNION [#12543](https://github.com/vitessio/vitess/pull/12543) + * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555) + * VSchema DDL: Add grammar to accept qualified table names in Vindex option values [#12577](https://github.com/vitessio/vitess/pull/12577) + * [release-15.0] `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema (#12519) [#12598](https://github.com/vitessio/vitess/pull/12598) + * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607) + * Fix `panic` when executing a prepare statement with over `65,528` parameters [#12614](https://github.com/vitessio/vitess/pull/12614) + * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668) + * Use a left join to make sure that tables with tablespace=innodb_system are included in the schema [#12672](https://github.com/vitessio/vitess/pull/12672) + * [release-15.0] Always add columns in the `Derived` operator [#12680](https://github.com/vitessio/vitess/pull/12680) + * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704) +#### VReplication + * VReplication Last Error: retry error if it happens after timeout [#12114](https://github.com/vitessio/vitess/pull/12114) +#### VTorc + * Fix unhandled error in VTOrc `recoverDeadPrimary` [#12511](https://github.com/vitessio/vitess/pull/12511) +### CI/Build +#### Build/CI + * [release-15.0] Make upgrade downgrade job names unique [#12498](https://github.com/vitessio/vitess/pull/12498) + * v15 backport: CI: increase overall test timeouts for all OnlineDDL tests [#12591](https://github.com/vitessio/vitess/pull/12591) +#### Online DDL + * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583) +#### Query Serving + * [release-15.0] Flakes: Properly Test HealthCheck Cache Response Handling (#12226) [#12227](https://github.com/vitessio/vitess/pull/12227) +### Dependabot +#### Build/CI + * Bump golang.org/x/net from 0.5.0 to 0.7.0 (#12390) [#12405](https://github.com/vitessio/vitess/pull/12405) +### Enhancement +#### Build/CI + * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585) +#### Governance + * [release-15.0] Add manan and florent to Docker files CODEOWNERS (#11981) [#11983](https://github.com/vitessio/vitess/pull/11983) +#### VTorc + * Release-15: Cherry pick vtorc no cgo [#12223](https://github.com/vitessio/vitess/pull/12223) +### Internal Cleanup +#### Build/CI + * [15.0] CI: remove pitrtls test [#12064](https://github.com/vitessio/vitess/pull/12064) +#### General + * Remove removed flags from being used for v16+ binaries [#12128](https://github.com/vitessio/vitess/pull/12128) + * [release-15.0] Fix release script for the version in the docker script [#12285](https://github.com/vitessio/vitess/pull/12285) +### Other +#### Other + * Code freeze of release-15.0 [#12764](https://github.com/vitessio/vitess/pull/12764) +### Performance +#### Cluster management + * Bug fix: Cache filtered out tablets in topology watcher to avoid unnecessary GetTablet calls to topo [#12194](https://github.com/vitessio/vitess/pull/12194) +### Release +#### Build/CI + * [release-15.0] Tooling improvements backports [#12527](https://github.com/vitessio/vitess/pull/12527) +#### Documentation + * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566) +#### General + * Release of v15.0.2 [#11961](https://github.com/vitessio/vitess/pull/11961) + * Back to dev mode after v15.0.2 [#11962](https://github.com/vitessio/vitess/pull/11962) +### Testing +#### General + * Fix vtbackup upgrade/downgrade test [#12437](https://github.com/vitessio/vitess/pull/12437) + diff --git a/changelog/15.0/15.0.3/release_notes.md b/changelog/15.0/15.0.3/release_notes.md new file mode 100644 index 00000000000..aabb3770528 --- /dev/null +++ b/changelog/15.0/15.0.3/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v15.0.3 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.3/changelog.md). + +The release includes 52 commits (excluding merges) + +Thanks to all our contributors: @GuptaManan100, @ajm188, @dbussink, @deepthi, @frouioui, @harshit-gangal, @mattlord, @rsajwani, @shlomi-noach, @systay, @vitess-bot[bot], @vmg + diff --git a/changelog/15.0/15.0.4/changelog.md b/changelog/15.0/15.0.4/changelog.md new file mode 100644 index 00000000000..f70fd1090a7 --- /dev/null +++ b/changelog/15.0/15.0.4/changelog.md @@ -0,0 +1,61 @@ +# Changelog of Vitess v15.0.4 + +### Bug fixes +#### Build/CI + * [release-15.0] Small fixes to the auto-upgrade golang tool (#12838) [#12847](https://github.com/vitessio/vitess/pull/12847) + * [release-15.0] Add timeout to golangci-lint and bump its version (#12852) [#12853](https://github.com/vitessio/vitess/pull/12853) + * [release-15.0] Remove recent golangci-lint version bump [#12910](https://github.com/vitessio/vitess/pull/12910) +#### Cluster management + * [release-15.0] Prevent resetting replication every time we set replication source (#13377) [#13393](https://github.com/vitessio/vitess/pull/13393) + * [release-15.0] Don't run any reparent commands if the host is empty (#13396) [#13403](https://github.com/vitessio/vitess/pull/13403) + * [release-15.0] ignore all error for views in engine reload (#13590) [#13592](https://github.com/vitessio/vitess/pull/13592) +#### Examples + * [release-15.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13471](https://github.com/vitessio/vitess/pull/13471) +#### Online DDL + * v15 backport: vitess Online DDL atomic cut-over [#13376](https://github.com/vitessio/vitess/pull/13376) +#### Query Serving + * [release-15.0] planbuilder bugfix - do not push aggregations into derived tables [#12824](https://github.com/vitessio/vitess/pull/12824) + * [release-15.0] Fix `vtgate_schema_tracker` flaky tests (#12780) [#12850](https://github.com/vitessio/vitess/pull/12850) + * [release-15.0] fix: union distinct between unsharded route and sharded join (#12968) [#12982](https://github.com/vitessio/vitess/pull/12982) + * gen4 planner: allow last_insert_id with arguments (15.0) [#13035](https://github.com/vitessio/vitess/pull/13035) + * [release-15.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13086](https://github.com/vitessio/vitess/pull/13086) + * [release-15.0] Remove indentation limit in the sqlparser (#13158) [#13167](https://github.com/vitessio/vitess/pull/13167) + * [release-15.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error (#13193) [#13196](https://github.com/vitessio/vitess/pull/13196) + * [15.0] Fix: errant GTID in health streamer (#13184) [#13226](https://github.com/vitessio/vitess/pull/13226) +#### Schema Tracker + * [release-15.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13425](https://github.com/vitessio/vitess/pull/13425) + * Backport v15: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13457](https://github.com/vitessio/vitess/pull/13457) +### Enhancement +#### Build/CI + * Use go1.20.3 in the upgrade downgrade tests [#12839](https://github.com/vitessio/vitess/pull/12839) + * [release-15.0] Set the number of threads for release notes generation with a flag [#13315](https://github.com/vitessio/vitess/pull/13315) +#### General + * Use `go1.20.4` on `release-15.0` upgrade test [#13071](https://github.com/vitessio/vitess/pull/13071) +#### Query Serving + * [release-15.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12890](https://github.com/vitessio/vitess/pull/12890) +### Internal Cleanup +#### Operator + * Use vitess-operator `v2.8.4` in the examples [#12993](https://github.com/vitessio/vitess/pull/12993) +#### VTorc + * [release-15.0] Remove excessive logging in VTOrc APIs (#13459) [#13463](https://github.com/vitessio/vitess/pull/13463) +### Performance +#### TabletManager + * [release-15.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13388](https://github.com/vitessio/vitess/pull/13388) +### Release +#### Build/CI + * [release-15.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13620](https://github.com/vitessio/vitess/pull/13620) +#### Documentation + * Prepare release note `v15.0.4` [#13619](https://github.com/vitessio/vitess/pull/13619) +### Testing +#### Build/CI + * [release-15.0] fakedbclient: Add locking to avoid races (#12814) [#12821](https://github.com/vitessio/vitess/pull/12821) +#### Cluster management + * [release-15.0] Flaky tests: Fix wrangler tests (#13568) [#13570](https://github.com/vitessio/vitess/pull/13570) +#### General + * [release-15.0] Update Upgrade/Downgrade tests to use `go1.20.5` [#13271](https://github.com/vitessio/vitess/pull/13271) +#### Query Serving + * [release-15.0] Fix benchmarks in `plan_test.go` (#13096) [#13125](https://github.com/vitessio/vitess/pull/13125) + * [release-15.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13502](https://github.com/vitessio/vitess/pull/13502) +#### VTorc + * [release-15.0]: Fix flakiness in VTOrc tests (#13489) [#13529](https://github.com/vitessio/vitess/pull/13529) + diff --git a/changelog/15.0/15.0.4/release_notes.md b/changelog/15.0/15.0.4/release_notes.md new file mode 100644 index 00000000000..38fa25f9c78 --- /dev/null +++ b/changelog/15.0/15.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v15.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.4/changelog.md). + +The release includes 33 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args b/changelog/15.0/15.0.4/summary.md similarity index 100% rename from go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args rename to changelog/15.0/15.0.4/summary.md diff --git a/changelog/15.0/README.md b/changelog/15.0/README.md new file mode 100644 index 00000000000..17807db2ebc --- /dev/null +++ b/changelog/15.0/README.md @@ -0,0 +1,21 @@ +## v15.0 +The dedicated team for this release can be found [here](team.md). +* **[15.0.4](15.0.4)** + * [Changelog](15.0.4/changelog.md) + * [Release Notes](15.0.4/release_notes.md) + +* **[15.0.3](15.0.3)** + * [Changelog](15.0.3/changelog.md) + * [Release Notes](15.0.3/release_notes.md) + +* **[15.0.2](15.0.2)** + * [Changelog](15.0.2/changelog.md) + * [Release Notes](15.0.2/release_notes.md) + +* **[15.0.1](15.0.1)** + * [Changelog](15.0.1/changelog.md) + * [Release Notes](15.0.1/release_notes.md) + +* **[15.0.0](15.0.0)** + * [Changelog](15.0.0/changelog.md) + * [Release Notes](15.0.0/release_notes.md) diff --git a/changelog/15.0/team.md b/changelog/15.0/team.md new file mode 100644 index 00000000000..b8cbdf809bf --- /dev/null +++ b/changelog/15.0/team.md @@ -0,0 +1,5 @@ +## Release Team for v15 + +- **Lead:** Rameez Sajwani ([rsajwani](https://github.com/rsajwani)) rameez@planetscale.com +- **Shadow:** Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com +- **Mentor:** Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com \ No newline at end of file diff --git a/doc/releasenotes/16_0_0_changelog.md b/changelog/16.0/16.0.0/changelog.md similarity index 100% rename from doc/releasenotes/16_0_0_changelog.md rename to changelog/16.0/16.0.0/changelog.md diff --git a/doc/releasenotes/16_0_0_summary.md b/changelog/16.0/16.0.0/release_notes.md similarity index 91% rename from doc/releasenotes/16_0_0_summary.md rename to changelog/16.0/16.0.0/release_notes.md index 7a14f2533b7..e17a74a8d0f 100644 --- a/doc/releasenotes/16_0_0_summary.md +++ b/changelog/16.0/16.0.0/release_notes.md @@ -1,9 +1,13 @@ +# Release of Vitess v16.0.0 ## Summary ### Table of Contents - **[Known Issues](#known-issues)** - - [MySQL & Xtrabackup known issue](#mysql-xtrabackup-ddl) + - [MySQL & Xtrabackup known issue](#mysql-xtrabackup-ddl) + - [VTTablet Restore Metrics](#vttablet-restore-metrics) + - [Schema-initialization stuck on semi-sync ACKs while upgrading to v16.0.0](#schema-init-upgrade) + - [Broken downgrade from v17.x.x when super_read_only turned on by default](#init-db-sql-turned-on) - **[Major Changes](#major-changes)** - **[Breaking Changes](#breaking-changes)** - [VTGate Advertised MySQL Version](#advertised-mysql-version) @@ -99,6 +103,29 @@ or > ALTER TABLE your_table ENGINE=InnoDB; ``` +### VTTablet Restore Metrics + +As part of the VTTablet Sidecar Schema Maintenance Refactor in v16.0.0, we dropped the `local_metadata` table from the sidecar database schema. This table was storing a couple of metrics related to restores from backup, which have now been lost. +They have been re-introduced in v17.0.0 as metrics that can be accessed from `/debug/vars`. + +The original issue can be found [here](https://github.com/vitessio/vitess/issues/13336). + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.0` + +During upgrades from `<= v15.x.x` to `v16.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database. +The issue is that if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.0` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525) + ## Major Changes ### Breaking Changes @@ -108,6 +135,8 @@ or Since [Pull Request #11989](https://github.com/vitessio/vitess/pull/11989), VTGate advertises MySQL version 8.0.30. This is a breaking change for clients that rely on the VTGate advertised MySQL version and still use MySQL 5.7. The users can set the `mysql_server_version` flag to advertise the correct version. +It is worth noting that [the feature to avoid using reserved connections](https://vitess.io/docs/16.0/reference/query-serving/reserved-conn/#avoiding-the-use-of-reserved-connections) depends on the `mysql_server_version` CLI flag, which default value has been changed from `5.7.9-vitess` to `8.0.30-vitess`. We recommend that users running MySQL 5.7 set vtgate's `mysql_server_version` CLI flag to `5.7.9-vitess` to prevent the queries from being unexpectedly rewritten. + #### Default MySQL version on Docker The default major MySQL version used by our `vitess/lite:latest` image is going from `5.7` to `8.0`. Additionally, the patch version of MySQL80 has been upgraded from `8.0.23` to `8.0.30`. @@ -522,3 +551,11 @@ performs the required `create` or `alter` to reach it. This is done whenever a p The sidecar tables `local_metadata` and `shard_metadata` are no longer in use and all references to them are removed as part of this refactor. They were used previously for Orchestrator support, which has been superseded by `vtorc`. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.0/changelog.md). + +The release includes 378 commits (excluding merges) + +Thanks to all our contributors: @EmadMokhtar, @GuptaManan100, @Weijun-H, @WilliamLu99, @ajm188, @arthurschreiber, @arvind-murty, @brendar, @brirams, @dbussink, @deepthi, @dependabot[bot], @draftcode, @ejortegau, @frouioui, @harshit-gangal, @jjh-kim, @johanoskarsson, @kbslvsk, @mattlord, @maxenglander, @mdlayher, @notfelineit, @pbibra, @pudiva, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @timvaillancourt, @vitess-bot[bot], @vmg, @yoheimuta + diff --git a/doc/releasenotes/16_0_0_release_notes.md b/changelog/16.0/16.0.0/summary.md similarity index 93% rename from doc/releasenotes/16_0_0_release_notes.md rename to changelog/16.0/16.0.0/summary.md index 1b91f15c28d..c895a33130b 100644 --- a/doc/releasenotes/16_0_0_release_notes.md +++ b/changelog/16.0/16.0.0/summary.md @@ -1,10 +1,12 @@ -# Release of Vitess v16.0.0 ## Summary ### Table of Contents - **[Known Issues](#known-issues)** - [MySQL & Xtrabackup known issue](#mysql-xtrabackup-ddl) + - [VTTablet Restore Metrics](#vttablet-restore-metrics) + - [Schema-initialization stuck on semi-sync ACKs while upgrading to v16.0.0](#schema-init-upgrade) + - [Broken downgrade from v17.x.x when super_read_only turned on by default](#init-db-sql-turned-on) - **[Major Changes](#major-changes)** - **[Breaking Changes](#breaking-changes)** - [VTGate Advertised MySQL Version](#advertised-mysql-version) @@ -100,6 +102,28 @@ or > ALTER TABLE your_table ENGINE=InnoDB; ``` +### VTTablet Restore Metrics + +As part of the VTTablet Sidecar Schema Maintenance Refactor in v16.0.0, we dropped the `local_metadata` table from the sidecar database schema. This table was storing a couple of metrics related to restores from backup, which have now been lost. +They have been re-introduced in v17.0.0 as metrics that can be accessed from `/debug/vars`. + +The original issue can be found [here](https://github.com/vitessio/vitess/issues/13336). + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.0` + +During upgrades from `<= v15.x.x` to `v16.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.0` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525). + ## Major Changes ### Breaking Changes @@ -109,6 +133,8 @@ or Since [Pull Request #11989](https://github.com/vitessio/vitess/pull/11989), VTGate advertises MySQL version 8.0.30. This is a breaking change for clients that rely on the VTGate advertised MySQL version and still use MySQL 5.7. The users can set the `mysql_server_version` flag to advertise the correct version. +It is worth noting that [the feature to avoid using reserved connections](https://vitess.io/docs/16.0/reference/query-serving/reserved-conn/#avoiding-the-use-of-reserved-connections) depends on the `mysql_server_version` CLI flag, which default value has been changed from `5.7.9-vitess` to `8.0.30-vitess`. We recommend that users running MySQL 5.7 set vtgate's `mysql_server_version` CLI flag to `5.7.9-vitess` to prevent the queries from being unexpectedly rewritten. + #### Default MySQL version on Docker The default major MySQL version used by our `vitess/lite:latest` image is going from `5.7` to `8.0`. Additionally, the patch version of MySQL80 has been upgraded from `8.0.23` to `8.0.30`. @@ -523,11 +549,3 @@ performs the required `create` or `alter` to reach it. This is done whenever a p The sidecar tables `local_metadata` and `shard_metadata` are no longer in use and all references to them are removed as part of this refactor. They were used previously for Orchestrator support, which has been superseded by `vtorc`. - ------------- -The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/16_0_0_changelog.md). - -The release includes 378 commits (excluding merges) - -Thanks to all our contributors: @EmadMokhtar, @GuptaManan100, @Weijun-H, @WilliamLu99, @ajm188, @arthurschreiber, @arvind-murty, @brendar, @brirams, @dbussink, @deepthi, @dependabot[bot], @draftcode, @ejortegau, @frouioui, @harshit-gangal, @jjh-kim, @johanoskarsson, @kbslvsk, @mattlord, @maxenglander, @mdlayher, @notfelineit, @pbibra, @pudiva, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @timvaillancourt, @vitess-bot[bot], @vmg, @yoheimuta - diff --git a/changelog/16.0/16.0.1/changelog.md b/changelog/16.0/16.0.1/changelog.md new file mode 100644 index 00000000000..47ea58c2469 --- /dev/null +++ b/changelog/16.0/16.0.1/changelog.md @@ -0,0 +1,71 @@ +# Changelog of Vitess v16.0.1 + +### Bug fixes +#### Build/CI + * Fix `TestFuzz` that hangs on `go1.20.1` [#12514](https://github.com/vitessio/vitess/pull/12514) + * Fix dubious ownership of git directory in `vitess/base` Docker build [#12530](https://github.com/vitessio/vitess/pull/12530) +#### CLI + * Purge logs without panicking [#12187](https://github.com/vitessio/vitess/pull/12187) + * Fix `vtctldclient`'s Root command to return an error on unknown command [#12481](https://github.com/vitessio/vitess/pull/12481) +#### Cluster management + * Fix initialization code to also stop replication to prevent crash [#12534](https://github.com/vitessio/vitess/pull/12534) + * [Backport] Update topo {Get,Create}Keyspace to prevent invalid keyspace names [#12732](https://github.com/vitessio/vitess/pull/12732) +#### General + * Fixing backup tests flakiness [#12655](https://github.com/vitessio/vitess/pull/12655) + * [release-16.0] Port two flaky test fixes #12603 and #12546 [#12745](https://github.com/vitessio/vitess/pull/12745) +#### Observability + * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683) +#### Online DDL + * Throttler: Store Config in Global Keyspace Topo Record [#12520](https://github.com/vitessio/vitess/pull/12520) + * v16: Online DDL: enforce ALGORITHM=COPY on shadow table [#12522](https://github.com/vitessio/vitess/pull/12522) + * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641) +#### Query Serving + * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555) + * VSchema DDL: Add grammar to accept qualified table names in Vindex option values [#12577](https://github.com/vitessio/vitess/pull/12577) + * [release-16.0] `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema (#12519) [#12599](https://github.com/vitessio/vitess/pull/12599) + * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607) + * Fix `panic` when executing a prepare statement with over `65,528` parameters [#12614](https://github.com/vitessio/vitess/pull/12614) + * Always add columns in the `Derived` operator [#12634](https://github.com/vitessio/vitess/pull/12634) + * planner: fix predicate simplifier [#12650](https://github.com/vitessio/vitess/pull/12650) + * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668) + * Use a left join to make sure that tables with tablespace=innodb_system are included in the schema [#12672](https://github.com/vitessio/vitess/pull/12672) + * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704) +#### VReplication + * VStreamer: improve representation of integers in json data types [#12630](https://github.com/vitessio/vitess/pull/12630) +#### VTorc + * Fix unhandled error in VTOrc `recoverDeadPrimary` [#12510](https://github.com/vitessio/vitess/pull/12510) +### CI/Build +#### Build/CI + * [release-16.0] Make upgrade downgrade job names unique [#12499](https://github.com/vitessio/vitess/pull/12499) +#### Examples + * Examples, Flakes: Wait for Shard's VReplication Engine to Open [#12560](https://github.com/vitessio/vitess/pull/12560) +#### General + * [release-16.0] Upgrade the Golang version to `go1.20.2` [#12723](https://github.com/vitessio/vitess/pull/12723) +#### Online DDL + * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583) + * [release-16.0] CI: increase overall test timeouts for all OnlineDDL tests (#12584) [#12589](https://github.com/vitessio/vitess/pull/12589) +### Enhancement +#### Build/CI + * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585) +### Internal Cleanup +#### Build/CI + * Run launchable only on PRs against `main` [#12694](https://github.com/vitessio/vitess/pull/12694) +#### General + * Add a known issue into the release notes for xtrabackup and DDLs [#12536](https://github.com/vitessio/vitess/pull/12536) +### Release +#### Build/CI + * [release-16.0] Tooling improvements backports [#12528](https://github.com/vitessio/vitess/pull/12528) +#### Documentation + * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566) + * Addition of the `v16.0.1` release summary [#12751](https://github.com/vitessio/vitess/pull/12751) +#### General + * Back to dev mode after v16.0.0 [#12515](https://github.com/vitessio/vitess/pull/12515) + * Release 16.0 code freeze for 16.0.1 patch release [#12762](https://github.com/vitessio/vitess/pull/12762) +#### VTAdmin + * Add the vtadmin `web` directory to the release packages [#12639](https://github.com/vitessio/vitess/pull/12639) +### Testing +#### General + * Fix fullstatus test for backward compat [#12685](https://github.com/vitessio/vitess/pull/12685) +#### VReplication + * Flakes: Use new healthy shard check in vreplication e2e tests [#12502](https://github.com/vitessio/vitess/pull/12502) + diff --git a/changelog/16.0/16.0.1/release_notes.md b/changelog/16.0/16.0.1/release_notes.md new file mode 100644 index 00000000000..c1354eac4ee --- /dev/null +++ b/changelog/16.0/16.0.1/release_notes.md @@ -0,0 +1,41 @@ +# Release of Vitess v16.0.1 + +## Known Issues + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.1` + +During upgrades from `<= v15.x.x` to `v16.0.1`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.1` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525) + +## Major Changes + +### Upgrade to `go1.20.2` + +Vitess `v16.0.1` now runs on `go1.20.2`. +Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20). + +> go1.20.2 (released 2023-03-07) includes a security fix to the crypto/elliptic package, as well as bug fixes to the compiler, the covdata command, the linker, the runtime, and the crypto/ecdh, crypto/rsa, crypto/x509, os, and syscall packages. + +### Keyspace name validation in TopoServer + +Prior to v16.0.1, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations. + +Keyspace names may no longer contain the forward slash ("/") character, and TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given such a name. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.1/changelog.md). + +The release includes 39 commits (excluding merges) + +Thanks to all our contributors: @GuptaManan100, @ajm188, @frouioui, @github-actions[bot], @mattlord, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @vitess-bot[bot] + diff --git a/changelog/16.0/16.0.1/summary.md b/changelog/16.0/16.0.1/summary.md new file mode 100644 index 00000000000..f9af9672ea1 --- /dev/null +++ b/changelog/16.0/16.0.1/summary.md @@ -0,0 +1,32 @@ +## Known Issues + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.1` + +During upgrades from `<= v15.x.x` to `v16.0.1`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.1` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525) + +## Major Changes + +### Upgrade to `go1.20.2` + +Vitess `v16.0.1` now runs on `go1.20.2`. +Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20). + +> go1.20.2 (released 2023-03-07) includes a security fix to the crypto/elliptic package, as well as bug fixes to the compiler, the covdata command, the linker, the runtime, and the crypto/ecdh, crypto/rsa, crypto/x509, os, and syscall packages. + +### Keyspace name validation in TopoServer + +Prior to v16.0.1, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations. + +Keyspace names may no longer contain the forward slash ("/") character, and TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given such a name. + diff --git a/changelog/16.0/16.0.2/changelog.md b/changelog/16.0/16.0.2/changelog.md new file mode 100644 index 00000000000..978cf9441e6 --- /dev/null +++ b/changelog/16.0/16.0.2/changelog.md @@ -0,0 +1,50 @@ +# Changelog of Vitess v16.0.2 + +### Bug fixes +#### Build/CI + * Small fixes to the auto-upgrade golang tool [#12838](https://github.com/vitessio/vitess/pull/12838) + * Add timeout to `golangci-lint` and bump its version [#12852](https://github.com/vitessio/vitess/pull/12852) + * [release-16.0] Remove recent golangci-lint version bump [#12909](https://github.com/vitessio/vitess/pull/12909) +#### Cluster management + * Backport: [topo] Disallow the slash character in shard names #12843 [#12858](https://github.com/vitessio/vitess/pull/12858) +#### Query Serving + * Fix `vtgate_schema_tracker` flaky tests [#12780](https://github.com/vitessio/vitess/pull/12780) + * [planbuilder bugfix] do not push aggregations into derived tables [#12810](https://github.com/vitessio/vitess/pull/12810) + * [16.0] Fix: reset transaction session when no reserved connection [#12877](https://github.com/vitessio/vitess/pull/12877) + * [release-16.0] fix: union distinct between unsharded route and sharded join (#12968) [#12974](https://github.com/vitessio/vitess/pull/12974) +### CI/Build +#### General + * Do not fail build on incorrect Go version [#12809](https://github.com/vitessio/vitess/pull/12809) + * [release-16.0] Upgrade the Golang version to `go1.20.3` [#12832](https://github.com/vitessio/vitess/pull/12832) +### Documentation +#### Query Serving + * update v16 release notes about VTGate Advertised MySQL Version [#12957](https://github.com/vitessio/vitess/pull/12957) +### Enhancement +#### Build/CI + * Remove unnecessary code bits in workflows [#12756](https://github.com/vitessio/vitess/pull/12756) +#### General + * Automatically add milestone to new Pull Request [#12759](https://github.com/vitessio/vitess/pull/12759) +#### Query Serving + * [release-16.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12891](https://github.com/vitessio/vitess/pull/12891) +### Internal Cleanup +#### CLI + * Cleanup TODOs in vtorc flag parsing code from v15 [#12787](https://github.com/vitessio/vitess/pull/12787) +#### TabletManager + * Table GC: remove spammy log entry [#12625](https://github.com/vitessio/vitess/pull/12625) +### Regression +#### ACL + * vtgate : Disable Automatically setting immediateCallerID to user from static authentication context [#12961](https://github.com/vitessio/vitess/pull/12961) +#### Query Serving + * gen4 planner: allow last_insert_id with arguments [#13026](https://github.com/vitessio/vitess/pull/13026) +### Release +#### Documentation + * Fix incorrect path during release notes generation [#12769](https://github.com/vitessio/vitess/pull/12769) +#### General + * Back to dev mode after v16.0.1 [#12783](https://github.com/vitessio/vitess/pull/12783) + * Summary changes and code freeze for release of v16.0.2 [#13049](https://github.com/vitessio/vitess/pull/13049) +### Testing +#### Build/CI + * [release-16.0] Throttler: Expose Tablet's Config & Leverage to Deflake Tests [#12791](https://github.com/vitessio/vitess/pull/12791) + * fakedbclient: Add locking to avoid races [#12814](https://github.com/vitessio/vitess/pull/12814) + * [release-16.0] test: fix cfc flaky test (#12941) [#12960](https://github.com/vitessio/vitess/pull/12960) + diff --git a/changelog/16.0/16.0.2/release_notes.md b/changelog/16.0/16.0.2/release_notes.md new file mode 100644 index 00000000000..806cf87d208 --- /dev/null +++ b/changelog/16.0/16.0.2/release_notes.md @@ -0,0 +1,48 @@ +# Release of Vitess v16.0.2 + +## Known Issues + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.2` + +During upgrades from `<= v15.x.x` to `v16.0.2`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.2` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525) + +## Major Changes + +### Upgrade to `go1.20.3` + +Vitess `v16.0.2` now runs on `go1.20.3`. +Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20). + +> go1.20.3 (released 2023-04-04) includes security fixes to the go/parser, html/template, mime/multipart, net/http, and net/textproto packages, as well as bug fixes to the compiler, the linker, the runtime, and the time package. See the Go 1.20.3 milestone on our issue tracker for details. + +### EffectiveCallerId in Vtgate gRPC calls + +A new flag `grpc-use-static-authentication-callerid` is added to gate the behavior introduced in https://github.com/vitessio/vitess/pull/12050. +Earlier, we used to automatically set immediateCallerID to user from static authentication context that overrode the EffectiveCallerId. + + +### Shard name validation in TopoServer + +Prior to v16.0.2, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations. + +Shard names may no longer contain the forward slash ("/") character, and TopoServer's `CreateShard` method returns an error if given such a name. + + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.2/changelog.md). + +The release includes 24 commits (excluding merges) + +Thanks to all our contributors: @GuptaManan100, @ajm188, @frouioui, @github-actions[bot], @harshit-gangal, @mattlord, @systay, @vitess-bot[bot] + diff --git a/changelog/16.0/16.0.2/summary.md b/changelog/16.0/16.0.2/summary.md new file mode 100644 index 00000000000..faef0207921 --- /dev/null +++ b/changelog/16.0/16.0.2/summary.md @@ -0,0 +1,39 @@ +## Known Issues + +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v16.0.2` + +During upgrades from `<= v15.x.x` to `v16.0.2`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/16.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-16.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13441), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is fixed in `v16.0.3` and later patch releases. + +### Broken downgrade from v17.x.x when super_read_only turned on by default + +In `v17.x.x` `super_read_only` is turned on by default meaning that downgrading from `v17` to `v16.0.2` breaks due to `init_db.sql` needing write access. + +This issue is fixed in `>= v16.0.3` thanks to [PR #13525](https://github.com/vitessio/vitess/pull/13525) + + +## Major Changes + +### Upgrade to `go1.20.3` + +Vitess `v16.0.2` now runs on `go1.20.3`. +Below is a summary of this Go patch release. You can learn more [here](https://go.dev/doc/devel/release#go1.20). + +> go1.20.3 (released 2023-04-04) includes security fixes to the go/parser, html/template, mime/multipart, net/http, and net/textproto packages, as well as bug fixes to the compiler, the linker, the runtime, and the time package. See the Go 1.20.3 milestone on our issue tracker for details. + +### EffectiveCallerId in Vtgate gRPC calls + +A new flag `grpc-use-static-authentication-callerid` is added to gate the behavior introduced in https://github.com/vitessio/vitess/pull/12050. +Earlier, we used to automatically set immediateCallerID to user from static authentication context that overrode the EffectiveCallerId. + + +### Shard name validation in TopoServer + +Prior to v16.0.2, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations. + +Shard names may no longer contain the forward slash ("/") character, and TopoServer's `CreateShard` method returns an error if given such a name. + diff --git a/changelog/16.0/16.0.3/changelog.md b/changelog/16.0/16.0.3/changelog.md new file mode 100644 index 00000000000..3f43d9b6049 --- /dev/null +++ b/changelog/16.0/16.0.3/changelog.md @@ -0,0 +1,67 @@ +# Changelog of Vitess v16.0.3 + +### Bug fixes +#### Cluster management + * [release-16.0] Prevent resetting replication every time we set replication source (#13377) [#13392](https://github.com/vitessio/vitess/pull/13392) + * [release-16.0] Don't run any reparent commands if the host is empty (#13396) [#13402](https://github.com/vitessio/vitess/pull/13402) + * [release-16.0] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13441](https://github.com/vitessio/vitess/pull/13441) + * [release-16.0] Flaky tests: Fix race in memory topo (#13559) [#13576](https://github.com/vitessio/vitess/pull/13576) + * [release-16.0] ignore all error for views in engine reload (#13590) [#13593](https://github.com/vitessio/vitess/pull/13593) + * [release-16.0] check keyspace snapshot time if none specified for backup restores (#13557) [#13634](https://github.com/vitessio/vitess/pull/13634) +#### Examples + * [release-16.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13472](https://github.com/vitessio/vitess/pull/13472) +#### Operator + * [release-16.0] Upgrade mysqld memory limits to 1024Mi (#13122) [#13204](https://github.com/vitessio/vitess/pull/13204) +#### Query Serving + * [release-16.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13087](https://github.com/vitessio/vitess/pull/13087) + * [16.0] evalengine: TypeOf for Columns should only use value type when we have a value [#13154](https://github.com/vitessio/vitess/pull/13154) + * [release-16.0] Remove indentation limit in the sqlparser (#13158) [#13166](https://github.com/vitessio/vitess/pull/13166) + * Fix: errant GTID in health streamer [#13184](https://github.com/vitessio/vitess/pull/13184) + * [16.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error [#13193](https://github.com/vitessio/vitess/pull/13193) + * [release-16.0] Bug fix: SQL queries erroring with message `unknown aggregation random` (#13330) [#13334](https://github.com/vitessio/vitess/pull/13334) + * [release-16.0] ignore ongoing backfill vindex from routing selection (#13523) [#13607](https://github.com/vitessio/vitess/pull/13607) +#### Schema Tracker + * [release-16.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13424](https://github.com/vitessio/vitess/pull/13424) + * Backport v16: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13456](https://github.com/vitessio/vitess/pull/13456) +#### TabletManager + * [release-16.0] mysqlctl: Correctly encode database and table names (#13312) [#13323](https://github.com/vitessio/vitess/pull/13323) +#### VReplication + * [release-16.0] VReplication: Do not delete sharded target vschema table entries on Cancel (#13146) [#13155](https://github.com/vitessio/vitess/pull/13155) + * [release-16.0] VReplication: Pass on --keep_routing_rules flag value for Cancel action (#13171) [#13194](https://github.com/vitessio/vitess/pull/13194) + * [release-16.0] VReplication: Fix VDiff2 DeleteByUUID Query (#13255) [#13282](https://github.com/vitessio/vitess/pull/13282) + * [release-16.0] VReplication: Ensure ROW events are sent within a transaction (#13547) [#13580](https://github.com/vitessio/vitess/pull/13580) +### CI/Build +#### General + * [release-16.0] Upgrade the Golang version to `go1.20.4` [#13053](https://github.com/vitessio/vitess/pull/13053) +### Documentation +#### Documentation + * [release-16.0] update link for reparenting guide (#13350) [#13356](https://github.com/vitessio/vitess/pull/13356) +### Enhancement +#### Build/CI + * [release-16.0] Set the number of threads for release notes generation with a flag [#13316](https://github.com/vitessio/vitess/pull/13316) +### Performance +#### TabletManager + * [release-16.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13389](https://github.com/vitessio/vitess/pull/13389) +### Release +#### Build/CI + * [release-16.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13621](https://github.com/vitessio/vitess/pull/13621) +#### Documentation + * [release-16.0] Fix format error in the `v16.0.2` release notes (#13057) [#13058](https://github.com/vitessio/vitess/pull/13058) +### Testing +#### Backup and Restore + * [release-16.0]: Fix `upgrade-downgrade` test setup and fix the `init_db.sql` [#13525](https://github.com/vitessio/vitess/pull/13525) +#### Cluster management + * [release-16.0] Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) [#13549](https://github.com/vitessio/vitess/pull/13549) + * [release-16.0] Flaky tests: Fix wrangler tests (#13568) [#13571](https://github.com/vitessio/vitess/pull/13571) +#### General + * TestFix: `Upgrade Downgrade Testing - Backups - Manual` [#13408](https://github.com/vitessio/vitess/pull/13408) +#### Query Serving + * [release-16.0] Fix benchmarks in `plan_test.go` (#13096) [#13126](https://github.com/vitessio/vitess/pull/13126) + * [release-16.0] Deflake `TestQueryTimeoutWithDual` test (#13405) [#13409](https://github.com/vitessio/vitess/pull/13409) + * [release-16.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13500](https://github.com/vitessio/vitess/pull/13500) + * [release-16.0] fix TestQueryTimeoutWithTables flaky test (#13579) [#13585](https://github.com/vitessio/vitess/pull/13585) +#### VTorc + * [release-16.0]: Fix flakiness in VTOrc tests (#13489) [#13528](https://github.com/vitessio/vitess/pull/13528) +#### vtctl + * Fix new vtctl upgrade downgrade test on `release-16.0` [#13252](https://github.com/vitessio/vitess/pull/13252) + diff --git a/changelog/16.0/16.0.3/release_notes.md b/changelog/16.0/16.0.3/release_notes.md new file mode 100644 index 00000000000..d377bdc24f9 --- /dev/null +++ b/changelog/16.0/16.0.3/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.3 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.3/changelog.md). + +The release includes 38 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/changelog/16.0/16.0.3/summary.md b/changelog/16.0/16.0.3/summary.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/16.0/16.0.4/changelog.md b/changelog/16.0/16.0.4/changelog.md new file mode 100644 index 00000000000..45c4944aa25 --- /dev/null +++ b/changelog/16.0/16.0.4/changelog.md @@ -0,0 +1,24 @@ +# Changelog of Vitess v16.0.4 + +### Bug fixes +#### Backup and Restore + * Manual cherry-pick of 13339 [#13733](https://github.com/vitessio/vitess/pull/13733) + * [release-16.0] Address vttablet memory usage with backups to Azure Blob Service (#13770) [#13774](https://github.com/vitessio/vitess/pull/13774) +#### Online DDL + * v16 backport: Fix closed channel panic in Online DDL cutover [#13732](https://github.com/vitessio/vitess/pull/13732) + * v16 backport: Solve RevertMigration.Comment read/write concurrency issue [#13736](https://github.com/vitessio/vitess/pull/13736) +#### Query Serving + * planbuilder: Fix infinite recursion for subqueries [#13783](https://github.com/vitessio/vitess/pull/13783) + * [release-16.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13795](https://github.com/vitessio/vitess/pull/13795) + * [16.0] bugfixes: collection of fixes to bugs found while fuzzing [#13805](https://github.com/vitessio/vitess/pull/13805) +### CI/Build +#### Online DDL + * [release-16.0] CI: fix onlineddl_scheduler flakiness (#13754) [#13759](https://github.com/vitessio/vitess/pull/13759) +### Release +#### General + * Back to dev mode after v16.0.3 [#13660](https://github.com/vitessio/vitess/pull/13660) + * Release 16.0 code freeze for `v16.0.3` release [#13810](https://github.com/vitessio/vitess/pull/13810) +### Testing +#### Build/CI + * [release-16.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#13797](https://github.com/vitessio/vitess/pull/13797) + diff --git a/changelog/16.0/16.0.4/release_notes.md b/changelog/16.0/16.0.4/release_notes.md new file mode 100644 index 00000000000..d46559f5fec --- /dev/null +++ b/changelog/16.0/16.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.4/changelog.md). + +The release includes 11 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @dbussink, @rohit-nayak-ps, @shlomi-noach, @systay + diff --git a/changelog/16.0/16.0.4/summary.md b/changelog/16.0/16.0.4/summary.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/16.0/README.md b/changelog/16.0/README.md new file mode 100644 index 00000000000..75b3f3a0a1f --- /dev/null +++ b/changelog/16.0/README.md @@ -0,0 +1,21 @@ +## v16.0 +The dedicated team for this release can be found [here](team.md). +* **[16.0.4](16.0.4)** + * [Changelog](16.0.4/changelog.md) + * [Release Notes](16.0.4/release_notes.md) + +* **[16.0.3](16.0.3)** + * [Changelog](16.0.3/changelog.md) + * [Release Notes](16.0.3/release_notes.md) + +* **[16.0.2](16.0.2)** + * [Changelog](16.0.2/changelog.md) + * [Release Notes](16.0.2/release_notes.md) + +* **[16.0.1](16.0.1)** + * [Changelog](16.0.1/changelog.md) + * [Release Notes](16.0.1/release_notes.md) + +* **[16.0.0](16.0.0)** + * [Changelog](16.0.0/changelog.md) + * [Release Notes](16.0.0/release_notes.md) diff --git a/changelog/16.0/team.md b/changelog/16.0/team.md new file mode 100644 index 00000000000..76b11c38145 --- /dev/null +++ b/changelog/16.0/team.md @@ -0,0 +1,5 @@ +## Release Team for v16 + +- **Lead:** Manan Gupta ([GuptaManan100](https://github.com/GuptaManan100)) manan@planetscale.com +- **Shadow:** Matt Lord ([mattlord](https://github.com/mattlord)) mlord@planetscale.com +- **Mentor:** Florent Poinsard ([frouioui](https://github.com/frouioui)) florent@planetscale.com \ No newline at end of file diff --git a/changelog/17.0/17.0.0/changelog.md b/changelog/17.0/17.0.0/changelog.md new file mode 100644 index 00000000000..3c3e107b54e --- /dev/null +++ b/changelog/17.0/17.0.0/changelog.md @@ -0,0 +1,557 @@ +# Changelog of Vitess v17.0.0 + +### Announcement +#### Governance + * maintainers: add dbussink and rework some sections [#12329](https://github.com/vitessio/vitess/pull/12329) +### Bug fixes +#### Backup and Restore + * go/vt/mysqlctl/backupstats: increment *_bytes metric by actual number of bytes [#12501](https://github.com/vitessio/vitess/pull/12501) + * Avoid crash during BuiltinBackup FileUpload method [#12703](https://github.com/vitessio/vitess/pull/12703) + * do not demote a new primary after backup completion [#12856](https://github.com/vitessio/vitess/pull/12856) + * go/vt/mysqlctl: fix dup open/close backup stats [#12947](https://github.com/vitessio/vitess/pull/12947) + * Incremental backup: fix calculation of binlog files to use [#13066](https://github.com/vitessio/vitess/pull/13066) + * restore: Run PITR through mysqlctl [#13123](https://github.com/vitessio/vitess/pull/13123) +#### Build/CI + * Prevent bootstrap of ZK from clearing previous dist binaries [#12236](https://github.com/vitessio/vitess/pull/12236) + * Fix dubious ownership of git directory in `vitess/base` Docker build [#12530](https://github.com/vitessio/vitess/pull/12530) + * Small fixes to the auto-upgrade golang tool [#12838](https://github.com/vitessio/vitess/pull/12838) + * Add timeout to `golangci-lint` and bump its version [#12852](https://github.com/vitessio/vitess/pull/12852) + * codegen: ensure goimports also simplifies [#12883](https://github.com/vitessio/vitess/pull/12883) +#### CLI + * Purge logs without panicking [#12187](https://github.com/vitessio/vitess/pull/12187) + * Fix `vtctldclient`'s Root command to return an error on unknown command [#12481](https://github.com/vitessio/vitess/pull/12481) + * vtctldclient: Format GetKeyspace output using cli.MarshalJSON [#12495](https://github.com/vitessio/vitess/pull/12495) + * VReplication: Pass on --keep_routing_rules flag value for Cancel action [#13171](https://github.com/vitessio/vitess/pull/13171) +#### Cluster management + * Fix initialization code to also stop replication to prevent crash [#12534](https://github.com/vitessio/vitess/pull/12534) + * Update topo {Get,Create}Keyspace to prevent invalid keyspace names [#12732](https://github.com/vitessio/vitess/pull/12732) + * Flaky TestRestart: give mysqlctld process time to shutdown [#12799](https://github.com/vitessio/vitess/pull/12799) + * Fix `TestGatewayBufferingWhileReparenting` flakiness [#12817](https://github.com/vitessio/vitess/pull/12817) + * [topo] Disallow the slash character in shard names [#12843](https://github.com/vitessio/vitess/pull/12843) + * TabletThrottler: `vtctlclient UpdateThrottlerConfig` use VtctldServer RPC [#12936](https://github.com/vitessio/vitess/pull/12936) + * Keep retrying mysql port if it is 0 in Vttablets [#12986](https://github.com/vitessio/vitess/pull/12986) + * Fix: convertBoolToSemiSyncAction method to account for all semi sync actions [#13075](https://github.com/vitessio/vitess/pull/13075) +#### Evalengine + * evalengine: fix bugs with decimal rounding [#12360](https://github.com/vitessio/vitess/pull/12360) + * json: Fix JSON SQL serialization and ensure to run tests [#12861](https://github.com/vitessio/vitess/pull/12861) + * evalengine: unset Type for NewColumnWithCollation [#12886](https://github.com/vitessio/vitess/pull/12886) + * datetime: Fix weekday calculation for DST changes [#12931](https://github.com/vitessio/vitess/pull/12931) + * evalengine: Fix wrong coercion into float [#12998](https://github.com/vitessio/vitess/pull/12998) + * evalengine: TypeOf for Columns should only use value type when we have a value [#13148](https://github.com/vitessio/vitess/pull/13148) +#### Examples + * Examples: Add lib functions to wait for shard states [#12239](https://github.com/vitessio/vitess/pull/12239) + * Fixes for local docker environment [#12382](https://github.com/vitessio/vitess/pull/12382) + * Examples: Correct VTAdmin Discovery File Path And Add Check [#12415](https://github.com/vitessio/vitess/pull/12415) + * Fix Vtop example and release script [#12457](https://github.com/vitessio/vitess/pull/12457) + * Open vtadmin-api port in docker-local example to enable vtadmin access [#12467](https://github.com/vitessio/vitess/pull/12467) + * Use new init_db.sql file in all places and update v17.0.0 summary [#12716](https://github.com/vitessio/vitess/pull/12716) + * [release-17.0] Use $hostname in vtadmin script as all other scripts do (#13231) [#13236](https://github.com/vitessio/vitess/pull/13236) +#### General + * Fix shorthand flag for recursive delete in zk tool [#12460](https://github.com/vitessio/vitess/pull/12460) + * Forward port of #12436: Sidecar schema init: use COPY algorithm while altering sidecardb tables [#12464](https://github.com/vitessio/vitess/pull/12464) + * Fixing backup tests flakiness [#12655](https://github.com/vitessio/vitess/pull/12655) + * Fix backup flaky tests [#12834](https://github.com/vitessio/vitess/pull/12834) + * Fix auto-upgrade-golang tooling [#13048](https://github.com/vitessio/vitess/pull/13048) + * Flakes: Skip flaky viperutil test TestPersistConfig for now to unblock CI [#13186](https://github.com/vitessio/vitess/pull/13186) + * Temporarily disable panic in mysqld when socket files are directly used [#13198](https://github.com/vitessio/vitess/pull/13198) +#### Observability + * Reset the current lag when closing the replication lag reader. [#12683](https://github.com/vitessio/vitess/pull/12683) +#### Online DDL + * Allow zero (in) date when setting up internal _vt schema [#12262](https://github.com/vitessio/vitess/pull/12262) + * Online DDL: improve retry of vreplication errors with `vitess` `ALTER TABLE` migrations [#12323](https://github.com/vitessio/vitess/pull/12323) + * OnlineDDL: mitigate scenario where a migration sees recurring cut-over timeouts [#12451](https://github.com/vitessio/vitess/pull/12451) + * Throttler: Store Config in Global Keyspace Topo Record [#12520](https://github.com/vitessio/vitess/pull/12520) + * Online DDL: enforce ALGORITHM=COPY on shadow table [#12521](https://github.com/vitessio/vitess/pull/12521) + * Online DDL: `ready_to_complete` race fix [#12612](https://github.com/vitessio/vitess/pull/12612) + * Mysqld.GetSchema: tolerate tables being dropped while inspecting schema [#12641](https://github.com/vitessio/vitess/pull/12641) + * Flaky test fix: OnlineDDL: convert time->unix->TIMESTAMP in golang space [#12826](https://github.com/vitessio/vitess/pull/12826) + * OnlineDDL: reject partial key coverage in PRIMARY KEY for vitess migrations [#12921](https://github.com/vitessio/vitess/pull/12921) +#### Operator + * Use latest k8s for operator example to work [#13085](https://github.com/vitessio/vitess/pull/13085) + * Upgrade mysqld memory limits to 1024Mi [#13122](https://github.com/vitessio/vitess/pull/13122) +#### Query Serving + * fix dual table handling [#12204](https://github.com/vitessio/vitess/pull/12204) + * grpcvtgateconn: add Dial and deprecate DialWithOpts [#12210](https://github.com/vitessio/vitess/pull/12210) + * Subquery bugfix [#12254](https://github.com/vitessio/vitess/pull/12254) + * fix: transaction_isolation to be applied at session level [#12281](https://github.com/vitessio/vitess/pull/12281) + * fix dual table handling [#12292](https://github.com/vitessio/vitess/pull/12292) + * fix: added null safe operator precendence rule [#12297](https://github.com/vitessio/vitess/pull/12297) + * Fix aggregation on outer joins [#12298](https://github.com/vitessio/vitess/pull/12298) + * (re)Formalize SQLError in VReplication, add underlying wrap/unwrap functionality [#12327](https://github.com/vitessio/vitess/pull/12327) + * sqlparser/schemadiff: normalize CAST type case [#12372](https://github.com/vitessio/vitess/pull/12372) + * Fix bug in vtexplain around JOINs [#12376](https://github.com/vitessio/vitess/pull/12376) + * Fix for USING when column names not lower cased [#12378](https://github.com/vitessio/vitess/pull/12378) + * Fix additional casting bugs in the sqlparser [#12393](https://github.com/vitessio/vitess/pull/12393) + * Fix scalar aggregation engine primitive for column truncation [#12468](https://github.com/vitessio/vitess/pull/12468) + * [release-16.0] BugFix: Unsharded query using a derived table and a dual table [#12484](https://github.com/vitessio/vitess/pull/12484) + * `ApplyVSchemaDDL`: escape Sequence names when writing the VSchema [#12519](https://github.com/vitessio/vitess/pull/12519) + * collations: fix sorting in UCA900 collations [#12555](https://github.com/vitessio/vitess/pull/12555) + * VSchema DDL: Add grammar to accept qualified table names in Vindex option values [#12577](https://github.com/vitessio/vitess/pull/12577) + * [gen4 planner] Make sure to not push down expressions when not possible [#12607](https://github.com/vitessio/vitess/pull/12607) + * Fix `panic` when executing a prepare statement with over `65,528` parameters [#12614](https://github.com/vitessio/vitess/pull/12614) + * Always add columns in the `Derived` operator [#12634](https://github.com/vitessio/vitess/pull/12634) + * planner: fix predicate simplifier [#12650](https://github.com/vitessio/vitess/pull/12650) + * [planner bugfix] add expressions to HAVING [#12668](https://github.com/vitessio/vitess/pull/12668) + * Use a left join to make sure that tables with tablespace=innodb_system are included in the schema [#12672](https://github.com/vitessio/vitess/pull/12672) + * [planner fix] make unknown column an error only for sharded queries [#12704](https://github.com/vitessio/vitess/pull/12704) + * Fix handling of table information for missing column [#12705](https://github.com/vitessio/vitess/pull/12705) + * Fix the returned schema object for broken views [#12713](https://github.com/vitessio/vitess/pull/12713) + * Fix `vtgate_schema_tracker` flaky tests [#12780](https://github.com/vitessio/vitess/pull/12780) + * [planbuilder bugfix] do not push aggregations into derived tables [#12810](https://github.com/vitessio/vitess/pull/12810) + * Fix: reset transaction session when no reserved connection [#12878](https://github.com/vitessio/vitess/pull/12878) + * fix: union distinct between unsharded route and sharded join [#12968](https://github.com/vitessio/vitess/pull/12968) + * bugfix: fix nil pointer in vtgate on topo connection error [#13010](https://github.com/vitessio/vitess/pull/13010) + * Cleanup rate limiters [#13016](https://github.com/vitessio/vitess/pull/13016) + * restrict flush statement on non primary tablets [#13042](https://github.com/vitessio/vitess/pull/13042) + * Fix the resilientQuery to give correct results during initialization [#13080](https://github.com/vitessio/vitess/pull/13080) + * Fix flakiness in `TestGatewayBufferingWhileReparenting` [#13106](https://github.com/vitessio/vitess/pull/13106) + * sqlparser: Treat view names as case sensitive [#13107](https://github.com/vitessio/vitess/pull/13107) + * Add a timeout to prevent unbounded waits that cause the primary tablet's health check stuck [#13111](https://github.com/vitessio/vitess/pull/13111) + * VTGate StreamExecute rpc to return session as response [#13131](https://github.com/vitessio/vitess/pull/13131) + * Remove the `200` indentation limit in the SQL parser [#13158](https://github.com/vitessio/vitess/pull/13158) + * vindexes: make lookup_unicodeloosemd5_hash non-planable [#13200](https://github.com/vitessio/vitess/pull/13200) + * fix: GetField to use existing session for query [#13219](https://github.com/vitessio/vitess/pull/13219) + * [17.0] Fix and Make aggregation planner handle aggregation functions better and handle Distinct in operator [#13277](https://github.com/vitessio/vitess/pull/13277) + * Bug fix: SQL queries erroring with message `unknown aggregation random` [#13330](https://github.com/vitessio/vitess/pull/13330) + * bugfixes: collection of fixes to bugs found while fuzzing [#13332](https://github.com/vitessio/vitess/pull/13332) + * bug: don't always wrap aggregation in coalesce [#13348](https://github.com/vitessio/vitess/pull/13348) +#### TabletManager + * TableFilter: fix excluded RE listing [#12318](https://github.com/vitessio/vitess/pull/12318) + * TableGC: fix PURGE race condition [#12505](https://github.com/vitessio/vitess/pull/12505) + * Fix transaction throttler ignoring the initial rate [#12618](https://github.com/vitessio/vitess/pull/12618) + * Skip recalculating the rate in MaxReplicationLagModule when it can't be done [#12620](https://github.com/vitessio/vitess/pull/12620) + * mysqlctl: Correctly encode database and table names [#13312](https://github.com/vitessio/vitess/pull/13312) +#### VReplication + * VReplication Last Error: retry error if it happens after timeout [#12114](https://github.com/vitessio/vitess/pull/12114) + * VReplication: workflows with multiple streams fail to start on multi-primary setups [#12228](https://github.com/vitessio/vitess/pull/12228) + * VReplication: ignore GC tables in schema analysis [#12320](https://github.com/vitessio/vitess/pull/12320) + * VSCopy: Ensure that vgtid event includes a complete TableLastPK proto message [#12623](https://github.com/vitessio/vitess/pull/12623) + * VDiff: use PK column collation for proper ordering [#12845](https://github.com/vitessio/vitess/pull/12845) + * Fix bug in `SwitchTraffic` that wasn't respecting `--dry_run` for readonly and replica tablets during a resharding event [#12992](https://github.com/vitessio/vitess/pull/12992) + * Fix vreplication_log usage in VReplicationExec [#13038](https://github.com/vitessio/vitess/pull/13038) + * ApplySchema vttablet RPC: allow special characters in table and column names [#13054](https://github.com/vitessio/vitess/pull/13054) + * Purge old schema versions from memory in historian [#13056](https://github.com/vitessio/vitess/pull/13056) + * Flaky vplayer tests: temporarily disable noblob variant [#13100](https://github.com/vitessio/vitess/pull/13100) + * VReplication: Do not delete sharded target vschema table entries on Cancel [#13146](https://github.com/vitessio/vitess/pull/13146) + * VReplication: Handle reference tables in sharded->sharded MoveTables [#13161](https://github.com/vitessio/vitess/pull/13161) + * VReplication: Fix VDiff2 DeleteByUUID Query [#13255](https://github.com/vitessio/vitess/pull/13255) +#### VTAdmin + * [vtadmin] racy vtexplain [#12635](https://github.com/vitessio/vitess/pull/12635) + * fix: vtadmin vttablet url protocol [#12836](https://github.com/vitessio/vitess/pull/12836) + * update vite base to respect absolute path [#13110](https://github.com/vitessio/vitess/pull/13110) +#### VTorc + * Fix Vtorc recovery time during `DeadPrimary` [#12870](https://github.com/vitessio/vitess/pull/12870) + * Add new field to health status [#12942](https://github.com/vitessio/vitess/pull/12942) +### CI/Build +#### Build/CI + * Makefile: use buildx when GOOS/GOARCH are specified [#12081](https://github.com/vitessio/vitess/pull/12081) + * onlineddl_vrepl suite: fix auto_increment flakyness [#12246](https://github.com/vitessio/vitess/pull/12246) + * Use a working MariaDB mirror for CI [#12253](https://github.com/vitessio/vitess/pull/12253) + * Download mariadb from vitess-resources [#12271](https://github.com/vitessio/vitess/pull/12271) + * Bump the vitess-resources release increment from v2 to v4 [#12272](https://github.com/vitessio/vitess/pull/12272) + * sqlparser: Tools cleanup [#12407](https://github.com/vitessio/vitess/pull/12407) + * Make upgrade downgrade job names unique [#12485](https://github.com/vitessio/vitess/pull/12485) + * Update fossa check for Node deprecation [#12871](https://github.com/vitessio/vitess/pull/12871) + * Flakes: Wait for throttler to open in throttler custom config test [#12980](https://github.com/vitessio/vitess/pull/12980) + * Add minimal token permissions (continued) [#12996](https://github.com/vitessio/vitess/pull/12996) + * workflows: Update to latest actions/setup-go [#13023](https://github.com/vitessio/vitess/pull/13023) + * CI: add a check on NeedsIssue label [#13044](https://github.com/vitessio/vitess/pull/13044) +#### Examples + * Examples, Flakes: Wait for Shard's VReplication Engine to Open [#12560](https://github.com/vitessio/vitess/pull/12560) +#### General + * Bump reported version of main to 17.0.0-SNAPSHOT [#12234](https://github.com/vitessio/vitess/pull/12234) + * [main] Upgrade the Golang version to `go1.20.2` [#12706](https://github.com/vitessio/vitess/pull/12706) + * Do not fail build on incorrect Go version [#12809](https://github.com/vitessio/vitess/pull/12809) + * [main] Upgrade the Golang version to `go1.20.3` [#12833](https://github.com/vitessio/vitess/pull/12833) + * Remove misleading message in `build.env` [#12841](https://github.com/vitessio/vitess/pull/12841) + * [main] Upgrade the Golang version to `go1.20.4` [#13052](https://github.com/vitessio/vitess/pull/13052) + * Code owners update [#13070](https://github.com/vitessio/vitess/pull/13070) + * [release-17.0] Upgrade the Golang version to `go1.20.5` [#13266](https://github.com/vitessio/vitess/pull/13266) +#### Online DDL + * Fixing `onlineddl_vrepl` flakiness, and adding more tests [#12325](https://github.com/vitessio/vitess/pull/12325) + * CI: extend timeouts in onlineddl_vrepl due to slow CI runners [#12583](https://github.com/vitessio/vitess/pull/12583) + * CI: increase overall test timeouts for all OnlineDDL tests [#12584](https://github.com/vitessio/vitess/pull/12584) +#### Query Serving + * Fix `SHOW VSCHEMA TABLES` tests using v17 vtgate that expected `dual` [#12381](https://github.com/vitessio/vitess/pull/12381) + * go/vt/vttablet/tabletserver: temporarily skip flaky consolidation test [#12604](https://github.com/vitessio/vitess/pull/12604) +#### VReplication + * [release-17.0] Flakes: Remove CI endtoend test for VReplication Copy Phase Throttling (#13343) [#13345](https://github.com/vitessio/vitess/pull/13345) +#### VTAdmin + * Remove `nvm use` from vtadmin-up.sh script [#12788](https://github.com/vitessio/vitess/pull/12788) +### Dependabot +#### Build/CI + * Bump golang.org/x/net from 0.5.0 to 0.7.0 [#12390](https://github.com/vitessio/vitess/pull/12390) + * Update additional Go dependencies [#12401](https://github.com/vitessio/vitess/pull/12401) + * Fix another mixin dep [#12403](https://github.com/vitessio/vitess/pull/12403) +#### Observability + * Bump vitess.io/vitess from 0.15.2 to 0.16.1 in /vitess-mixin [#12875](https://github.com/vitessio/vitess/pull/12875) + * build(deps): bump vitess.io/vitess from 0.16.1 to 0.16.2 in /vitess-mixin [#13076](https://github.com/vitessio/vitess/pull/13076) +#### VTAdmin + * Bump dns-packet from 5.3.1 to 5.4.0 in /web/vtadmin [#12545](https://github.com/vitessio/vitess/pull/12545) + * Bump webpack from 5.65.0 to 5.76.1 in /web/vtadmin [#12632](https://github.com/vitessio/vitess/pull/12632) + * [release-17.0] build(deps-dev): bump vite from 4.2.1 to 4.2.3 in /web/vtadmin (#13240) [#13254](https://github.com/vitessio/vitess/pull/13254) +### Documentation +#### Cluster management + * fix help text for backup_storage_number_blocks [#12258](https://github.com/vitessio/vitess/pull/12258) +#### Documentation + * release notes and misc cleanup around tabletenv flags [#12953](https://github.com/vitessio/vitess/pull/12953) + * Add Vinted to the list of adopters [#13134](https://github.com/vitessio/vitess/pull/13134) + * update link for reparenting guide [#13350](https://github.com/vitessio/vitess/pull/13350) +#### Query Serving + * Copy remaining query serving design docs [#12170](https://github.com/vitessio/vitess/pull/12170) + * Add release notes summary for views [#12422](https://github.com/vitessio/vitess/pull/12422) + * SQLError: adding some code comments [#12613](https://github.com/vitessio/vitess/pull/12613) + * update v16 release notes about VTGate Advertised MySQL Version [#12957](https://github.com/vitessio/vitess/pull/12957) +### Enhancement +#### ACL + * remove tablet server's ACL check on the /healthz HTTP api route [#12897](https://github.com/vitessio/vitess/pull/12897) +#### Backup and Restore + * vtbackup, mysqlctl: detailed backup and restore metrics [#11979](https://github.com/vitessio/vitess/pull/11979) + * go/vt/mysqlctl: add configurable read buffer to builtin backups [#12073](https://github.com/vitessio/vitess/pull/12073) + * go/{vt,flags}: register builtin backup flags with vtbackup [#12558](https://github.com/vitessio/vitess/pull/12558) + * go/vt/mysqlctl: optionally store decompressor cmd in manifest [#12633](https://github.com/vitessio/vitess/pull/12633) + * Fix restore from backup execution path to use context from caller [#12828](https://github.com/vitessio/vitess/pull/12828) + * Incremental backup and point in time recovery for XtraBackup [#13156](https://github.com/vitessio/vitess/pull/13156) +#### Build/CI + * Revert default MySQL 80 version to `8.0.30` [#12252](https://github.com/vitessio/vitess/pull/12252) + * Block merge if the `Do Not Merge` label is set [#12489](https://github.com/vitessio/vitess/pull/12489) + * Auto upgrade the Golang version [#12585](https://github.com/vitessio/vitess/pull/12585) + * Add read-only token permissions to GitHub Action workflows [#12718](https://github.com/vitessio/vitess/pull/12718) + * Remove unnecessary code bits in workflows [#12756](https://github.com/vitessio/vitess/pull/12756) + * tools/build_version_flags: allow configurable `build time` [#12876](https://github.com/vitessio/vitess/pull/12876) + * Set the number of threads for release notes generation with a flag [#13273](https://github.com/vitessio/vitess/pull/13273) +#### CLI + * [flags] extract tabletenv seconds type [#12920](https://github.com/vitessio/vitess/pull/12920) +#### Cluster management + * Introducing new error counter in VTTablet [#12266](https://github.com/vitessio/vitess/pull/12266) + * Auto refresh tables on vtgate Status Page [#12335](https://github.com/vitessio/vitess/pull/12335) + * Add configuration option for minimum TLS version [#12424](https://github.com/vitessio/vitess/pull/12424) + * Add tablet type filtering to vtctldclient GetTablets [#12915](https://github.com/vitessio/vitess/pull/12915) + * topo: Move to allow list for invalid names [#12917](https://github.com/vitessio/vitess/pull/12917) + * Add vstream metrics to vtgate [#13098](https://github.com/vitessio/vitess/pull/13098) + * [release-17.0] increase length of reparent_journal columns (#13287) [#13291](https://github.com/vitessio/vitess/pull/13291) +#### Evalengine + * evalengine: virtual machine [#12369](https://github.com/vitessio/vitess/pull/12369) + * Add 128 bit hashing for the evalengine [#12428](https://github.com/vitessio/vitess/pull/12428) + * evalengine: add more coercion paths to Hash128 [#12463](https://github.com/vitessio/vitess/pull/12463) + * evalengine: Implement integer division and modulo [#12656](https://github.com/vitessio/vitess/pull/12656) + * [ast & semantics] Make more type information available [#12710](https://github.com/vitessio/vitess/pull/12710) + * evalengine: New integration API [#12724](https://github.com/vitessio/vitess/pull/12724) + * vtgate: use collation when hashing query plans [#12728](https://github.com/vitessio/vitess/pull/12728) + * Implement logic to handle more JSON conversion [#12733](https://github.com/vitessio/vitess/pull/12733) + * evalengine: Add handling for current timestamp functions [#12742](https://github.com/vitessio/vitess/pull/12742) + * evalengine: Add support for more arithmetic functions [#12778](https://github.com/vitessio/vitess/pull/12778) + * evalengine: Fix handling of datetime and numeric comparisons [#12789](https://github.com/vitessio/vitess/pull/12789) + * evalengine: Implement ROUND() and TRUNCATE() [#12797](https://github.com/vitessio/vitess/pull/12797) + * evalengine: Implement CRC32 & CONV [#12804](https://github.com/vitessio/vitess/pull/12804) + * evalengine: Improve the timezone parsing logic [#12827](https://github.com/vitessio/vitess/pull/12827) + * evalengine: Implement initial crypto functions [#12835](https://github.com/vitessio/vitess/pull/12835) + * datetime: Add week number formatting [#12884](https://github.com/vitessio/vitess/pull/12884) + * evalengine: Add CONVERT_TZ function [#12892](https://github.com/vitessio/vitess/pull/12892) + * evalengine: Improve handling of datetime / time with precision and add more functions [#12907](https://github.com/vitessio/vitess/pull/12907) + * evalengine: Implement additional week modes [#12916](https://github.com/vitessio/vitess/pull/12916) + * evalengine: Add support for `UNIX_TIMESTAMP & `FROM_UNIXTIME` [#12928](https://github.com/vitessio/vitess/pull/12928) + * evalengine: Implement `MAKEDATE` and `MAKETIME`. [#12938](https://github.com/vitessio/vitess/pull/12938) + * evalengine: Implement LEFT, RIGHT, LPAD & RPAD [#13013](https://github.com/vitessio/vitess/pull/13013) + * evalengine: Implement TRIM, RTRIM and LTRIM [#13024](https://github.com/vitessio/vitess/pull/13024) + * evalengine: Implement UNHEX [#13028](https://github.com/vitessio/vitess/pull/13028) + * evalengine: Implement ORD [#13029](https://github.com/vitessio/vitess/pull/13029) + * evalengine: Add support for `STRCMP` [#13063](https://github.com/vitessio/vitess/pull/13063) + * evalengine: Add support for handling `IF` [#13067](https://github.com/vitessio/vitess/pull/13067) + * evalengine: `INTERVAL` support [#13073](https://github.com/vitessio/vitess/pull/13073) + * evalengine: Implement IP related functions [#13082](https://github.com/vitessio/vitess/pull/13082) + * evalengine: Implement `CONCAT` and `CONCAT_WS` [#13091](https://github.com/vitessio/vitess/pull/13091) + * evalengine: Add UUID functions [#13097](https://github.com/vitessio/vitess/pull/13097) +#### Examples + * examples schema: demi-idempotency via CREATE TABLE IF NOT EXISTS [#12453](https://github.com/vitessio/vitess/pull/12453) +#### General + * Upgrade to `go1.20.1` [#12399](https://github.com/vitessio/vitess/pull/12399) + * adding log statment to vtbackup [#12569](https://github.com/vitessio/vitess/pull/12569) + * Automatically add milestone to new Pull Request [#12759](https://github.com/vitessio/vitess/pull/12759) + * Miscellaneous code modifications based on observations made while doing a code walkthrough [#12873](https://github.com/vitessio/vitess/pull/12873) + * Remove viper warnings from local examples [#13234](https://github.com/vitessio/vitess/pull/13234) +#### Observability + * Add `Uptime` metric [#12712](https://github.com/vitessio/vitess/pull/12712) + * go/{stats,vt}: publish VReplicationStreamState to prometheus backend [#12772](https://github.com/vitessio/vitess/pull/12772) + * [vtadmin] log response errors [#12844](https://github.com/vitessio/vitess/pull/12844) +#### Online DDL + * schemadiff/OnlineDDL: ExtractConstraintOriginalName understands auto-generated names [#12275](https://github.com/vitessio/vitess/pull/12275) + * schemadiff: diffing two schemas generates a rich SchemaDiff object [#12551](https://github.com/vitessio/vitess/pull/12551) + * Online DDL: remove artifact entry upon GC [#12592](https://github.com/vitessio/vitess/pull/12592) + * Online DDL: configurable cut-over threshold [#12594](https://github.com/vitessio/vitess/pull/12594) + * gh-ost migrations: improved error log message [#12882](https://github.com/vitessio/vitess/pull/12882) + * OnlineDDL/vitess: only KILL 'RENAME' statement if not known to be successful [#12989](https://github.com/vitessio/vitess/pull/12989) + * Online DDL: better reporting of error message when RENAME fails [#13143](https://github.com/vitessio/vitess/pull/13143) +#### Query Serving + * VSCopy: Enable to copy from all shards in either a specified keyspace or all keyspaces [#11909](https://github.com/vitessio/vitess/pull/11909) + * vtgate: metrics for prepared stmts [#12141](https://github.com/vitessio/vitess/pull/12141) + * Add flag to select tx throttler tablet type [#12174](https://github.com/vitessio/vitess/pull/12174) + * sqlparser: Add parsing support to spatial POINT expression [#12198](https://github.com/vitessio/vitess/pull/12198) + * evalengine: new evaluation framework [#12247](https://github.com/vitessio/vitess/pull/12247) + * evalengine: it's time for JSON! [#12274](https://github.com/vitessio/vitess/pull/12274) + * Add parsing support for linestring constructor [#12299](https://github.com/vitessio/vitess/pull/12299) + * [main] add database name to _vt.views table (#12368) [#12374](https://github.com/vitessio/vitess/pull/12374) + * Schema RPC to fetch table/view definition [#12375](https://github.com/vitessio/vitess/pull/12375) + * sqlparser: add parsing support for polygon spatial datatype [#12377](https://github.com/vitessio/vitess/pull/12377) + * Emit per workload labels for existing per table vttablet metrics [#12394](https://github.com/vitessio/vitess/pull/12394) + * go/vt/vtgate: add json and use_fallback opts to numeric_static_map vdx [#12414](https://github.com/vitessio/vitess/pull/12414) + * Add basic metrics to `vttablet` transaction throttler [#12418](https://github.com/vitessio/vitess/pull/12418) + * Change `GetSchema` RPC to return `CreateView` instead of `SelectStmt` [#12421](https://github.com/vitessio/vitess/pull/12421) + * schemadiff: AlterTableAlgorithmStrategy [#12442](https://github.com/vitessio/vitess/pull/12442) + * GetSchema rpc to streaming api [#12447](https://github.com/vitessio/vitess/pull/12447) + * streamlog: make generic [#12494](https://github.com/vitessio/vitess/pull/12494) + * Horizon planning on operators [#12506](https://github.com/vitessio/vitess/pull/12506) + * Refactor and cleanup treatment of keyspace IDs and KeyRange [#12524](https://github.com/vitessio/vitess/pull/12524) + * collations: upgrade to MySQL 8.0.32 [#12557](https://github.com/vitessio/vitess/pull/12557) + * schemadiff: validate views' referenced columns via semantics [#12565](https://github.com/vitessio/vitess/pull/12565) + * sqlparser: Add parsing support to geom Collection types [#12608](https://github.com/vitessio/vitess/pull/12608) + * Parser: support PURGE BINARY LOGS statement [#12615](https://github.com/vitessio/vitess/pull/12615) + * sqlparser: Add parsing support for wkt funcs [#12651](https://github.com/vitessio/vitess/pull/12651) + * Add priority support to transaction throttler [#12662](https://github.com/vitessio/vitess/pull/12662) + * `schemadiff`: multi-error in schema normalization [#12675](https://github.com/vitessio/vitess/pull/12675) + * Add parsing support to wkb spatial funcs [#12739](https://github.com/vitessio/vitess/pull/12739) + * Log the length of the actual rows when the query exceeds the warning threshold [#12755](https://github.com/vitessio/vitess/pull/12755) + * [gen4 planner] Better handling of projections in operators [#12790](https://github.com/vitessio/vitess/pull/12790) + * Add parsing support to format functions [#12829](https://github.com/vitessio/vitess/pull/12829) + * Add parsing suppport to geom property functions [#12855](https://github.com/vitessio/vitess/pull/12855) + * planner fix: scoping rules for JOIN ON expression inside a subquery [#12881](https://github.com/vitessio/vitess/pull/12881) + * schemadiff: formalize SchemaDiff as the only schema diffing mechanism [#12885](https://github.com/vitessio/vitess/pull/12885) + * Trigger the loadTablets function when no primary is assigned to the shard [#12893](https://github.com/vitessio/vitess/pull/12893) + * Add vtgate and vttablet flag to truncate errors sent to client [#12899](https://github.com/vitessio/vitess/pull/12899) + * Add parsing support to point property functions [#12904](https://github.com/vitessio/vitess/pull/12904) + * schemadiff: rich `ImpossibleApplyDiffOrderError` [#12918](https://github.com/vitessio/vitess/pull/12918) + * Gen4: move insert planner to gen4 [#12934](https://github.com/vitessio/vitess/pull/12934) + * Optionally truncate queries that are written to logs [#12944](https://github.com/vitessio/vitess/pull/12944) + * Throttled transactions return MySQL error code 1041 ER_OUT_OF_RESOURCES [#12949](https://github.com/vitessio/vitess/pull/12949) + * Gen4 Operator Refactoring: ORDER BY & Debugging Enhancements [#12954](https://github.com/vitessio/vitess/pull/12954) + * `schemadiff`: better naming; formalizing `SchemaDiff` use. [#12955](https://github.com/vitessio/vitess/pull/12955) + * gen4 planner: Derived table handling on operators [#12978](https://github.com/vitessio/vitess/pull/12978) + * [Gen4] Aggregation and Grouping on Operators [#12994](https://github.com/vitessio/vitess/pull/12994) + * Add parsing support to linestring property functions [#12995](https://github.com/vitessio/vitess/pull/12995) + * planner: allow reference tables to specify global sources [#13030](https://github.com/vitessio/vitess/pull/13030) + * Add parsing support to := operator [#13032](https://github.com/vitessio/vitess/pull/13032) + * TxThrottler support for transactions outside BEGIN/COMMIT [#13040](https://github.com/vitessio/vitess/pull/13040) + * Add parsing support to geomCollection property functions and geohash/geojson functions [#13072](https://github.com/vitessio/vitess/pull/13072) + * Schema Tracking Refactor: Merge schema-tracking in health-streamer into schema.Engine [#13121](https://github.com/vitessio/vitess/pull/13121) + * enable settings pool by default [#13127](https://github.com/vitessio/vitess/pull/13127) + * Augmenting the `GetSchema` RPC to also work for `Table` and `All` type of input [#13197](https://github.com/vitessio/vitess/pull/13197) +#### TabletManager + * ApplySchema: deprecate '--skip_preflight' flag [#10716](https://github.com/vitessio/vitess/pull/10716) + * Update tabletpicker to support cell pref and tablet order options [#12282](https://github.com/vitessio/vitess/pull/12282) + * SidecarDB Init: don't fail on schema init errors [#12328](https://github.com/vitessio/vitess/pull/12328) + * Tablet throttler: adding log entries [#12966](https://github.com/vitessio/vitess/pull/12966) + * Tablet throttler: throttler-config-via-topo defaults 'true', deprecation message for old flags [#13130](https://github.com/vitessio/vitess/pull/13130) + * Tablet throttler: be explicit about client app name, exempt some apps from checks and heartbeat renewals [#13195](https://github.com/vitessio/vitess/pull/13195) +#### VReplication + * [main] VReplication: Use MariaDB Compat JSON Functions (#12420) [#12434](https://github.com/vitessio/vitess/pull/12434) + * Add Workflow Update Client Command [#12622](https://github.com/vitessio/vitess/pull/12622) + * VDiff: Add --update-table-stats flag to VDiff2 [#12868](https://github.com/vitessio/vitess/pull/12868) + * VReplication: Support MySQL Binary Log Transaction Compression [#12950](https://github.com/vitessio/vitess/pull/12950) + * VReplication: More intelligently manage vschema table entries on unsharded targets [#13220](https://github.com/vitessio/vitess/pull/13220) +#### VTorc + * vtorc: have cooldown only apply to the same kind of analysis [#13103](https://github.com/vitessio/vitess/pull/13103) +#### web UI + * update query-string dep due to reported vulnerability [#12673](https://github.com/vitessio/vitess/pull/12673) + * [VTAdmin] Migrate to Vite [#12831](https://github.com/vitessio/vitess/pull/12831) +### Feature Request +#### Cluster management + * Initialize Tablet with super_read_only mode [#12206](https://github.com/vitessio/vitess/pull/12206) +#### Online DDL + * SchemaDiff: normalize boolean columns to tinyint(1), and, add TableQualifier hint to force adding a db qualifier to the alter diff. [#12133](https://github.com/vitessio/vitess/pull/12133) +#### Query Serving + * Support Custom SidecarDB Names on VTTablets [#12240](https://github.com/vitessio/vitess/pull/12240) + * Create Views allowed for same keyspace [#12409](https://github.com/vitessio/vitess/pull/12409) + * Add Prepare, Execute and Deallocate Statement Support related to prepared statement [#12752](https://github.com/vitessio/vitess/pull/12752) + * feat: keep track of the full table name of expanded columns [#12976](https://github.com/vitessio/vitess/pull/12976) + * gen4 planner: push aggregation under filtering [#13169](https://github.com/vitessio/vitess/pull/13169) +#### VReplication + * Workflow Show: display rows copied [#12231](https://github.com/vitessio/vitess/pull/12231) + * VReplication Workflows: support noblob binlog_row_image format for MoveTables and Reshard [#12905](https://github.com/vitessio/vitess/pull/12905) +#### VTAdmin + * [VTAdmin] Add GetSrvKeyspaces and GetSrvKeyspace [#12702](https://github.com/vitessio/vitess/pull/12702) + * Add panic recovery handler to vtadmin http middleware [#12864](https://github.com/vitessio/vitess/pull/12864) +### Internal Cleanup +#### Build/CI + * Stop launchable integration for unit tests [#12386](https://github.com/vitessio/vitess/pull/12386) + * Run launchable only on PRs against `main` [#12694](https://github.com/vitessio/vitess/pull/12694) +#### CLI + * Cleanup TODOs in vtorc flag parsing code from v15 [#12787](https://github.com/vitessio/vitess/pull/12787) + * [vtctld] Delete legacy dual purpose duration/int flag parsing [#12860](https://github.com/vitessio/vitess/pull/12860) +#### Cluster management + * delete deprecated vtctld healthcheck flags and associated realtime stats code [#12373](https://github.com/vitessio/vitess/pull/12373) + * [misc] Delete `automation*` protos [#12449](https://github.com/vitessio/vitess/pull/12449) + * Deprecate `durability_policy` flag in vtctld [#12930](https://github.com/vitessio/vitess/pull/12930) +#### Evalengine + * evalengine: use 128 bit hashing internally [#12452](https://github.com/vitessio/vitess/pull/12452) + * evalengine: Refactorings & fixes [#12554](https://github.com/vitessio/vitess/pull/12554) + * evalengine: More cleanup [#12573](https://github.com/vitessio/vitess/pull/12573) + * Move the JSON parser from evalengine [#12757](https://github.com/vitessio/vitess/pull/12757) + * datetime: Unify parse and print APIs [#12815](https://github.com/vitessio/vitess/pull/12815) + * evalengine/compiler: Unify compilation [#12913](https://github.com/vitessio/vitess/pull/12913) + * mysql/evalengine: Unify float parsing [#12979](https://github.com/vitessio/vitess/pull/12979) +#### Examples + * Remove `are-you-alive` example [#12432](https://github.com/vitessio/vitess/pull/12432) +#### General + * Fix CodeQL identified issues [#12199](https://github.com/vitessio/vitess/pull/12199) + * Fix typecasting issue for workflow types [#12217](https://github.com/vitessio/vitess/pull/12217) + * Fix additional integer type handling [#12237](https://github.com/vitessio/vitess/pull/12237) + * Move to independent sets package [#12251](https://github.com/vitessio/vitess/pull/12251) + * Copy release notes from release-16 to `main` [#12287](https://github.com/vitessio/vitess/pull/12287) + * Fix additional typecasting alerts [#12309](https://github.com/vitessio/vitess/pull/12309) + * Switch to using new Go 1.19 CRL parser [#12315](https://github.com/vitessio/vitess/pull/12315) + * Internal refactor: LastError as a public struct [#12321](https://github.com/vitessio/vitess/pull/12321) + * Move to Go 1.19 atomics [#12391](https://github.com/vitessio/vitess/pull/12391) + * Remove a bunch of debug logging [#12404](https://github.com/vitessio/vitess/pull/12404) + * Update the pull request template to include a checkpoint for verifying modified/created tests aren't flaky [#12443](https://github.com/vitessio/vitess/pull/12443) + * Add steps on how to merge during code-freeze [#12444](https://github.com/vitessio/vitess/pull/12444) + * [main] Add a known issue into the release notes for xtrabackup and DDLs (#12536) [#12537](https://github.com/vitessio/vitess/pull/12537) + * Remove unused JQuery includes [#12552](https://github.com/vitessio/vitess/pull/12552) + * Minor Cleanup of SidecarDB Package [#12652](https://github.com/vitessio/vitess/pull/12652) + * [misc] Cleanup/fix/delete TODO comments [#12758](https://github.com/vitessio/vitess/pull/12758) + * sqlutils: Cleanup dead code [#12929](https://github.com/vitessio/vitess/pull/12929) + * topo: Remove unused templating code [#12939](https://github.com/vitessio/vitess/pull/12939) + * template: Move all HTML templates to safehtml [#12940](https://github.com/vitessio/vitess/pull/12940) + * style(grpc_codec): use switch-case [#12956](https://github.com/vitessio/vitess/pull/12956) + * fix some code comments throughout the codebase [#12964](https://github.com/vitessio/vitess/pull/12964) + * servenv: Move away from using default HTTP muxer [#12987](https://github.com/vitessio/vitess/pull/12987) + * tools: Cleanup more unused code [#12991](https://github.com/vitessio/vitess/pull/12991) + * coverage: Remove sonar coverage setup [#13000](https://github.com/vitessio/vitess/pull/13000) + * Update GOVERNANCE.md [#13047](https://github.com/vitessio/vitess/pull/13047) + * Deprecate VTGR [#13301](https://github.com/vitessio/vitess/pull/13301) + * Cherry-pick all pending PRs into `release-17.0` [#13364](https://github.com/vitessio/vitess/pull/13364) +#### Governance + * Added [Twitter] & [Etsy] to ADOPTERS.md [#12508](https://github.com/vitessio/vitess/pull/12508) +#### Observability + * flags: deprecate vtctld_addr and delete related code [#12580](https://github.com/vitessio/vitess/pull/12580) +#### Online DDL + * Deprecating VExec, part 2 - *post v16* [#12074](https://github.com/vitessio/vitess/pull/12074) + * Online DDL: remove legacy "stowaway table" logic [#12288](https://github.com/vitessio/vitess/pull/12288) + * [onlineddl] Remove some old backwards-compatibilities in online ddl code [#12427](https://github.com/vitessio/vitess/pull/12427) + * Query executor: preparing for `SHOW VITESS_MIGRATIONS` via `ShowBasic` [#12688](https://github.com/vitessio/vitess/pull/12688) +#### Query Serving + * [vtgate planner] Routing & Merging refactor [#12197](https://github.com/vitessio/vitess/pull/12197) + * Use the correct uint32 type for the GTID file position [#12229](https://github.com/vitessio/vitess/pull/12229) + * Fix typecasting alerts inside vtgate engine [#12238](https://github.com/vitessio/vitess/pull/12238) + * Change SQL error codes to use explicit type [#12249](https://github.com/vitessio/vitess/pull/12249) + * fix linter warnings [#12286](https://github.com/vitessio/vitess/pull/12286) + * [healthcheck] Remove deprecated fields from TabletHealth json marshalling [#12429](https://github.com/vitessio/vitess/pull/12429) + * Semantics refactor: formalize errors [#12572](https://github.com/vitessio/vitess/pull/12572) + * Refactor: go/vt/vtgate/engine/opcode to reduce `semantics` package dependencies [#12663](https://github.com/vitessio/vitess/pull/12663) + * evalengine: misc. cleanups [#12684](https://github.com/vitessio/vitess/pull/12684) + * [gen4 planner] Move more horizon planning to operators [#12750](https://github.com/vitessio/vitess/pull/12750) + * Move vreplication to vitess json parser [#12761](https://github.com/vitessio/vitess/pull/12761) + * simplify views ddl execution path [#12874](https://github.com/vitessio/vitess/pull/12874) + * refactor: clean up and simplify the semantics package [#12894](https://github.com/vitessio/vitess/pull/12894) + * Cleanup panics in `txthrottler`, reorder for readability [#12901](https://github.com/vitessio/vitess/pull/12901) + * txthrottler: further code cleanup [#12902](https://github.com/vitessio/vitess/pull/12902) + * feat: re-introduce expanded columns info in semantic state [#12908](https://github.com/vitessio/vitess/pull/12908) + * Add limit planning to operators [#12927](https://github.com/vitessio/vitess/pull/12927) + * [vttablet] cleanup unused code in tabletserver [#12943](https://github.com/vitessio/vitess/pull/12943) + * goyacc: don't import "unsafe" if not used [#13008](https://github.com/vitessio/vitess/pull/13008) + * fix misleading primary not serving message [#13022](https://github.com/vitessio/vitess/pull/13022) + * Remove enable-query-plan-field-caching flag that was deprecated in v15 [#13141](https://github.com/vitessio/vitess/pull/13141) + * sqlparser: Improve interval parsing [#13165](https://github.com/vitessio/vitess/pull/13165) + * refactor QueryProjection [#13174](https://github.com/vitessio/vitess/pull/13174) +#### TabletManager + * Table GC: remove spammy log entry [#12625](https://github.com/vitessio/vitess/pull/12625) + * mysqlctl: Remove unused schema version [#12999](https://github.com/vitessio/vitess/pull/12999) + * vttablet: Cleanup unused db version string [#13102](https://github.com/vitessio/vitess/pull/13102) + * mysqlctl: Remove custom callbacks [#13119](https://github.com/vitessio/vitess/pull/13119) + * mysqlctl: Remove usage of MYSQL_FLAVOR [#13135](https://github.com/vitessio/vitess/pull/13135) + * [release-17.0] k8stopo: Include deprecation warning (#13299) [#13302](https://github.com/vitessio/vitess/pull/13302) +#### VReplication + * Use consistent type for vreplication id [#12218](https://github.com/vitessio/vitess/pull/12218) + * Use consistent type port and replication state [#12248](https://github.com/vitessio/vitess/pull/12248) +#### VTAdmin + * [vtadmin-api] standardize cluster ids [#12803](https://github.com/vitessio/vitess/pull/12803) +#### VTorc + * Cleanup vtorc ssl package [#12423](https://github.com/vitessio/vitess/pull/12423) + * vtgr: Remove unused code [#12975](https://github.com/vitessio/vitess/pull/12975) +#### web UI + * Add nvm and node to vtadmin-up.sh [#12439](https://github.com/vitessio/vitess/pull/12439) +### Performance +#### General + * Use `MarshalVT`/`UnmarshalVT` instead of `proto.Marshal`/`proto.Unmarshal`. [#12525](https://github.com/vitessio/vitess/pull/12525) +#### Online DDL + * OnlineDDL: optimizing --singleton-context conflict check [#12539](https://github.com/vitessio/vitess/pull/12539) +### RFC +#### General + * [RFC] Standardized viper framework for vitess configuration parameters [#11456](https://github.com/vitessio/vitess/pull/11456) +### Regression +#### ACL + * vtgate : Disable Automatically setting immediateCallerID to user from static authentication context [#12961](https://github.com/vitessio/vitess/pull/12961) +#### Backup and Restore + * Add RestorePosition and RestoredBackupTime as metrics to vttablet [#13339](https://github.com/vitessio/vitess/pull/13339) +#### Query Serving + * gen4 planner bugfix: issue when merging subqueries [#13025](https://github.com/vitessio/vitess/pull/13025) + * gen4 planner: allow last_insert_id with arguments [#13026](https://github.com/vitessio/vitess/pull/13026) + * fix: ShardedRouting clone to clone slice of reference correctly [#13265](https://github.com/vitessio/vitess/pull/13265) + * [release-17.0] Handle inconsistent state error in query buffering (#13333) [#13353](https://github.com/vitessio/vitess/pull/13353) +### Release +#### Documentation + * Update the release instructions after v16 code freeze [#12245](https://github.com/vitessio/vitess/pull/12245) + * Improve release instructions post `v16.0.0 GA` release [#12538](https://github.com/vitessio/vitess/pull/12538) + * Re-organize the `releasenotes` directory into `changelog` [#12566](https://github.com/vitessio/vitess/pull/12566) + * Update the release instructions for the local install guide [#12746](https://github.com/vitessio/vitess/pull/12746) + * Fix incorrect path during release notes generation [#12769](https://github.com/vitessio/vitess/pull/12769) + * Fix format error in the `v16.0.2` release notes [#13057](https://github.com/vitessio/vitess/pull/13057) + * consolidate all deprecated flags under one header in release notes [#13163](https://github.com/vitessio/vitess/pull/13163) + * Prepare and clean summary docs for `v17.0.0` [#13363](https://github.com/vitessio/vitess/pull/13363) +#### General + * Fix release script for the version in the docker script [#12284](https://github.com/vitessio/vitess/pull/12284) + * Improve release process post `v16.0.0 GA` code freeze [#12487](https://github.com/vitessio/vitess/pull/12487) + * Copy release notes from release-16.0.0 to main [#12516](https://github.com/vitessio/vitess/pull/12516) + * Copy release notes for v16.0.1, v15.0.3, and v14.0.5 [#12768](https://github.com/vitessio/vitess/pull/12768) + * Copy release notes of v16.0.2 [#13051](https://github.com/vitessio/vitess/pull/13051) + * Review and cleanup of v17 RC release notes summary [#13209](https://github.com/vitessio/vitess/pull/13209) + * Code freeze of release-17.0 [#13210](https://github.com/vitessio/vitess/pull/13210) + * Release of v17.0.0-rc1 [#13211](https://github.com/vitessio/vitess/pull/13211) + * Back to dev mode [#13249](https://github.com/vitessio/vitess/pull/13249) + * Code freeze of release-17.0 for rc2 [#13296](https://github.com/vitessio/vitess/pull/13296) + * Release of v17.0.0-rc2 [#13305](https://github.com/vitessio/vitess/pull/13305) + * Back to dev mode after v17.0.0-rc2 [#13313](https://github.com/vitessio/vitess/pull/13313) +#### VTAdmin + * Add the vtadmin `web` directory to the release packages [#12639](https://github.com/vitessio/vitess/pull/12639) +### Testing +#### Build/CI + * testutils: use a rows parser for test comparisons [#12138](https://github.com/vitessio/vitess/pull/12138) + * [main] Don't keep data in upgrade-downgrade tests (#12462) [#12465](https://github.com/vitessio/vitess/pull/12465) + * Flakes: Address Common Unit Test Races [#12546](https://github.com/vitessio/vitess/pull/12546) + * Throttler: Expose Tablet's Config & Leverage to Deflake Tests [#12737](https://github.com/vitessio/vitess/pull/12737) + * Remove TODO from e2e upgrade test [#12792](https://github.com/vitessio/vitess/pull/12792) + * fakesqldb: Guard query log usage with lock [#12813](https://github.com/vitessio/vitess/pull/12813) + * fakedbclient: Add locking to avoid races [#12814](https://github.com/vitessio/vitess/pull/12814) + * test: fix cfc flaky test [#12941](https://github.com/vitessio/vitess/pull/12941) + * [ci] add generator for templated flag testdata [#13150](https://github.com/vitessio/vitess/pull/13150) +#### CLI + * Skip `--config-file-not-found-handling` in upgrade downgrade tests when `vttablet < 17.x.x` [#13275](https://github.com/vitessio/vitess/pull/13275) +#### Evalengine + * evalengine: Try to reduce test flakyness around time [#12819](https://github.com/vitessio/vitess/pull/12819) + * evalengine: Skip integration tests under race detector [#12948](https://github.com/vitessio/vitess/pull/12948) +#### Examples + * Examples: Add ENV Var to Local Examples to Skip VTAdmin [#12547](https://github.com/vitessio/vitess/pull/12547) +#### General + * Fix fullstatus test for backward compat [#12685](https://github.com/vitessio/vitess/pull/12685) + * VDiff: cleanup test log output [#12840](https://github.com/vitessio/vitess/pull/12840) +#### Online DDL + * OnlineDDL endtoend/CI: timestamp deviation toleration [#13046](https://github.com/vitessio/vitess/pull/13046) +#### Query Serving + * Use atomic.Bool for fakesqldb behavior flags [#12603](https://github.com/vitessio/vitess/pull/12603) + * go/{mysql,sync2,vt}: rework tabletserver consolidator test, modify consolidator result type [#12609](https://github.com/vitessio/vitess/pull/12609) + * debug: add ToString to operators to make it easier to debug [#12952](https://github.com/vitessio/vitess/pull/12952) + * fix: deflake TestGetSchemaRPC test [#12977](https://github.com/vitessio/vitess/pull/12977) + * Fix `TestStartFindMysqlPort` test flakiness [#13019](https://github.com/vitessio/vitess/pull/13019) + * Fix benchmarks in `plan_test.go` [#13096](https://github.com/vitessio/vitess/pull/13096) + * tests: stop tests not supported by older versions from running [#13183](https://github.com/vitessio/vitess/pull/13183) +#### VReplication + * Flakes: Use new healthy shard check in vreplication e2e tests [#12502](https://github.com/vitessio/vitess/pull/12502) + * Flakes: Handle Non-Determinstic Shard Ordering in VGTID [#12637](https://github.com/vitessio/vitess/pull/12637) + * Flakes: Handle Non-Deterministic VStream Event Ordering [#12642](https://github.com/vitessio/vitess/pull/12642) + * vreplication: Run all appropriate tests [#12862](https://github.com/vitessio/vitess/pull/12862) + * Generated Invisible Primary Keys: add vreplication test cases [#13004](https://github.com/vitessio/vitess/pull/13004) + * Flakes: enforce no concurrency/parallelism in vreplication unit test framework [#13104](https://github.com/vitessio/vitess/pull/13104) +#### VTAdmin + * [vtctldserver] Remove parallelization from tests that mutate shared state [#12257](https://github.com/vitessio/vitess/pull/12257) +#### VTorc + * Fix flakiness in `TestDeadPrimaryRecoversImmediately` [#13232](https://github.com/vitessio/vitess/pull/13232) + diff --git a/changelog/17.0/17.0.0/release_notes.md b/changelog/17.0/17.0.0/release_notes.md new file mode 100644 index 00000000000..9900ded0e3c --- /dev/null +++ b/changelog/17.0/17.0.0/release_notes.md @@ -0,0 +1,494 @@ +# Release of Vitess v17.0.0 +## Summary + +### Table of Contents + +- **[Known Issues](#known-issues)** + - [Schema-initialization stuck on semi-sync ACKs while upgrading to v17.0.0](#schema-init-upgrade) +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [Default Local Cell Preference for TabletPicker](#tablet-picker-cell-preference) + - [Dedicated stats for VTGate Prepare operations](#dedicated-vtgate-prepare-stats) + - [VTAdmin web migrated from create-react-app to vite](#migrated-vtadmin) + - [Keyspace name validation in TopoServer](#keyspace-name-validation) + - [Shard name validation in TopoServer](#shard-name-validation) + - [Compression CLI flags removed from vtctld and vtctldclient binaries](#remove-compression-flags-from-vtctld-binaries) + - [VtctldClient command RestoreFromBackup will now use the correct context](#vtctldclient-command-restorefrombackup-will-now-use-the-correct-context) + - [VTTablet Restore Metrics](#vttablet-restore-metrics) + - **[New command line flags and behavior](#new-flag)** + - [Builtin backup: read buffering flags](#builtin-backup-read-buffering-flags) + - [Manifest backup external decompressor command](#manifest-backup-external-decompressor-command) + - [Throttler config via topo enabled by default](#throttler-config-via-topo) + - **[New stats](#new-stats)** + - [Detailed backup and restore stats](#detailed-backup-and-restore-stats) + - [VTtablet Error count with code](#vttablet-error-count-with-code) + - [VReplication stream status for Prometheus](#vreplication-stream-status-for-prometheus) + - **[Online DDL](#online-ddl)** + - [--cut-over-threshold DDL strategy flag](#online-ddl-cut-over-threshold-flag) + - **[VReplication](#vreplication)** + - [Support for MySQL 8.0 `binlog_transaction_compression`](#binlog-compression) + - [Support for the `noblob` binlog row image mode](#noblob) + - **[VTTablet](#vttablet)** + - [VTTablet: Initializing all replicas with super_read_only](#vttablet-initialization) + - [Vttablet Schema Reload Timeout](#vttablet-schema-reload-timeout) + - [Settings pool enabled](#settings-pool) + - **[VTGate](#vtgate)** + - [StreamExecute GRPC API](#stream-execute) + - [Insert Planner Gen4](#insert-planner) + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Deprecated Flags](#deprecated-flags) + - [Deprecated Stats](#deprecated-stats) + - [Deprecated `vtgr`](#deprecated-vtgr) + - [Deprecated `k8stopo`](#deprecated-k8stopo) + +## Known Issues +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v17.0.0` + +During upgrades from `<= v16.x.x` to `v17.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/17.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-17.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13411), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is addressed in the `>= v17.0.1` patch releases. + + +## Major Changes + +### Breaking Changes + +#### Default Local Cell Preference for TabletPicker + +We added options to the `TabletPicker` that allow for specifying a cell preference in addition to making the default behavior to give priority to the local cell *and any alias it belongs to*. We are also introducing a new way to select tablet type preference which should eventually replace the `in_order:` hint currently used as a prefix for tablet types. The signature for creating a new `TabletPicker` now looks like: + +```go +func NewTabletPicker( + ctx context.Context, + ts *topo.Server, + cells []string, + localCell, keyspace, shard, tabletTypesStr string, + options TabletPickerOptions, +) (*TabletPicker, error) {...} +``` + +Where ctx, localCell, option are all new parameters. + +`option` is of type `TabletPickerOptions` and includes two fields, `CellPreference` and `TabletOrder`. +- `CellPreference`: "PreferLocalWithAlias" (default) gives preference to vtgate's local cell, or "OnlySpecified" which only picks from the cells explicitly passed in by the client +- `TabletOrder`: "Any" (default) for no ordering or random, or "InOrder" to use the order specified by the client + +See [PR 12282 Description](https://github.com/vitessio/vitess/pull/12282) for examples on how this changes cell picking behavior. + +#### Default TLS version changed for `vtgr` + +When using TLS with `vtgr`, we now default to TLS 1.2 if no other explicit version is configured. Configuration flags are provided to explicitly configure the minimum TLS version to be used. + +`vtgr` is now deprecated as part of `v17.0.0`, please see [the deprecation notice](#deprecated-vtgr). + +#### Dedicated stats for VTGate Prepare operations + +Prior to v17 Vitess incorrectly combined stats for VTGate `Execute` and `Prepare` operations under a single stats key (`Execute`). In v17 `Execute` and `Prepare` operations generate stats under independent stats keys. + +Here is a (condensed) example of stats output: + +```json +{ + "VtgateApi": { + "Histograms": { + "Execute.src.primary": { + "500000": 5 + }, + "Prepare.src.primary": { + "100000000": 0 + } + } + }, + "VtgateApiErrorCounts": { + "Execute.src.primary.INVALID_ARGUMENT": 3, + "Execute.src.primary.ALREADY_EXISTS": 1 + } +} +``` + +#### VTAdmin web migrated to vite + +Previously, VTAdmin web used the Create React App framework to test, build, and serve the application. In v17, Create React App has been removed, and [Vite](https://vitejs.dev/) is used in its place. Some of the main changes include: +- Vite uses `VITE_*` environment variables instead of `REACT_APP_*` environment variables +- Vite uses `import.meta.env` in place of `process.env` +- [Vitest](https://vitest.dev/) is used in place of Jest for testing +- Our protobufjs generator now produces an es6 module instead of commonjs to better work with Vite's defaults +- `public/index.html` has been moved to root directory in web/vtadmin + +#### Keyspace name validation in TopoServer + +Prior to v17, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations. + +Keyspace names are restricted to using only ASCII characters, digits and `_` and `-`. TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given an invalid name. + +#### Shard name validation in TopoServer + +Prior to v17, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations. + +Shard names are restricted to using only ASCII characters, digits and `_` and `-`. TopoServer's `GetShard` and `CreateShard` methods return an error if given an invalid name. + +#### Compression CLI flags remove from vtctld and vtctldclient binaries + +The CLI flags below were mistakenly added to `vtctld` and `vtctldclient` in v15. In v17, they are no longer present in those binaries. + + * `--compression-engine-name` + * `--compression-level` + * `--external-compressor` + * `--external-compressor-extension` + * `--external-decompressor` + +#### VtctldClient command RestoreFromBackup will now use the correct context + +The VtctldClient command `RestoreFromBackup` initiates an asynchronous process on the specified tablet to restore data from either the latest backup or the closest one before the specified backup-timestamp. +Prior to v17, this asynchronous process could run indefinitely in the background since it was called using the background context. In v17 [PR#12830](https://github.com/vitessio/vitess/issues/12830), +this behavior was changed to use a context with a timeout of `action_timeout`. If you are using VtctldClient to initiate a restore, make sure you provide an appropriate value for action_timeout to give enough +time for the restore process to complete. Otherwise, the restore will throw an error if the context expires before it completes. + +#### VTTablet Restore Metrics + +As part of the VTTablet Sidecar Schema Maintenance Refactor in v16.0.0, we dropped the `local_metadata` table from the sidecar database schema. This table was storing a couple of metrics related to restores from backup. +They have now been re-introduced as metrics that can be accessed from `/debug/vars`. + +### Vttablet's transaction throttler now also throttles DML outside of `BEGIN; ...; COMMIT;` blocks + +Prior to v17, `vttablet`'s transaction throttler (enabled with `--enable-tx-throttler`) would only throttle requests done inside an explicit transaction, i.e., a `BEGIN; ...; COMMIT;` block. +In v17 [PR#13040](https://github.com/vitessio/vitess/issues/13037), this behavior was being changed so that it also throttles work outside of explicit transactions for `INSERT/UPDATE/DELETE/LOAD` queries. + +### New command line flags and behavior + +#### Backup --builtinbackup-file-read-buffer-size and --builtinbackup-file-write-buffer-size + +Prior to v17 the builtin Backup Engine does not use read buffering for restores, and for backups uses a hardcoded write buffer size of `2097152 bytes`. + +In v17 these defaults may be tuned with, respectively `--builtinbackup-file-read-buffer-size` and `--builtinbackup-file-write-buffer-size`. + +- `--builtinbackup-file-read-buffer-size`: read files using an IO buffer of this many bytes. Golang defaults are used when set to `0`. +- `--builtinbackup-file-write-buffer-size`: write files using an IO buffer of this many bytes. Golang defaults are used when set to `0`. (default `2097152`) + +These flags are applicable to the following programs: + +- `vtbackup` +- `vtctld` +- `vttablet` +- `vttestserver` + +#### Manifest backup external decompressor command + +Add a new builtin/xtrabackup flag `--manifest-external-decompressor`. When set the value of that flag is stored in the manifest field `ExternalDecompressor`. This manifest field may be consulted when decompressing a backup that was compressed with an external command. + +This feature enables the following flow: + + 1. Take a backup using an external compressor + ``` + Backup --compression-engine=external \ + --external-compressor=zstd \ + --manifest-external-decompressor="zstd -d" + ``` + 2. Restore that backup with a mere `Restore` command, without having to specify `--external-decompressor`. + +#### vttablet --throttler-config-via-topo + +This flag was introduced in v16 and defaulted to `false`. In v17 it defaults to `true`, and there is no need to supply it. + +Note that this flag overrides `--enable-lag-throttler` and `--throttle-threshold`, which now give warnings, and will be removed in v18. + +### New stats + +#### Detailed backup and restore stats + +##### Backup metrics + +Metrics related to backup operations are available in both Vtbackup and VTTablet. + +- `BackupBytes` +- `BackupCount` +- `BackupDurationNanosecond` + +Depending on the Backup Engine and Backup Storage in-use, a backup may be a complex pipeline of operations, including but not limited to: + +* Reading files from disk. +* Compressing files. +* Uploading compress files to cloud object storage. + +These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. + +##### Restore metrics + +Metrics related to restore operations are available in both Vtbackup and VTTablet: + +- `RestoreBytes` +- `RestoreCount` +- `RestoreDurationNanoseconds` + +Depending on the Backup Engine and Backup Storage in-use, a restore may be a complex pipeline of operations, including but not limited to: + +* Downloading compressed files from cloud object storage. +* Decompressing files. +* Writing decompressed files to disk. + +These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. + +##### Vtbackup metrics + +Vtbackup exports some metrics which are not available elsewhere: + +- `DurationByPhaseSeconds` + +Vtbackup fetches the last backup, restores it to an empty mysql installation, replicates recent changes into that installation, and then takes a backup of that installation. + +`DurationByPhaseSeconds` exports timings for these individual phases. + +##### Example + +**A snippet of vtbackup metrics after running it against the local example after creating the initial cluster** + +(Processed with `jq` for readability.) + +```json +{ + "BackupBytes": { + "BackupEngine.Builtin.Source:Read": 4777, + "BackupEngine.Builtin.Compressor:Write": 4616, + "BackupEngine.Builtin.Destination:Write": 162, + "BackupStorage.File.File:Write": 163 + }, + "BackupCount": { + "-.-.Backup": 1, + "BackupEngine.Builtin.Source:Open": 161, + "BackupEngine.Builtin.Source:Close": 322, + "BackupEngine.Builtin.Compressor:Close": 161, + "BackupEngine.Builtin.Destination:Open": 161, + "BackupEngine.Builtin.Destination:Close": 322 + }, + "BackupDurationNanoseconds": { + "-.-.Backup": 4188508542, + "BackupEngine.Builtin.Source:Open": 10649832, + "BackupEngine.Builtin.Source:Read": 55901067, + "BackupEngine.Builtin.Source:Close": 960826, + "BackupEngine.Builtin.Compressor:Write": 278358826, + "BackupEngine.Builtin.Compressor:Close": 79358372, + "BackupEngine.Builtin.Destination:Open": 16456627, + "BackupEngine.Builtin.Destination:Write": 11021043, + "BackupEngine.Builtin.Destination:Close": 17144630, + "BackupStorage.File.File:Write": 10743169 + }, + "DurationByPhaseSeconds": { + "InitMySQLd": 2, + "RestoreLastBackup": 6, + "CatchUpReplication": 1, + "TakeNewBackup": 4 + }, + "RestoreBytes": { + "BackupEngine.Builtin.Source:Read": 1095, + "BackupEngine.Builtin.Decompressor:Read": 950, + "BackupEngine.Builtin.Destination:Write": 209, + "BackupStorage.File.File:Read": 1113 + }, + "RestoreCount": { + "-.-.Restore": 1, + "BackupEngine.Builtin.Source:Open": 161, + "BackupEngine.Builtin.Source:Close": 322, + "BackupEngine.Builtin.Decompressor:Close": 161, + "BackupEngine.Builtin.Destination:Open": 161, + "BackupEngine.Builtin.Destination:Close": 322 + }, + "RestoreDurationNanoseconds": { + "-.-.Restore": 6204765541, + "BackupEngine.Builtin.Source:Open": 10542539, + "BackupEngine.Builtin.Source:Read": 104658370, + "BackupEngine.Builtin.Source:Close": 773038, + "BackupEngine.Builtin.Decompressor:Read": 165692120, + "BackupEngine.Builtin.Decompressor:Close": 51040, + "BackupEngine.Builtin.Destination:Open": 22715122, + "BackupEngine.Builtin.Destination:Write": 41679581, + "BackupEngine.Builtin.Destination:Close": 26954624, + "BackupStorage.File.File:Read": 102416075 + }, + "backup_duration_seconds": 4, + "restore_duration_seconds": 6 +} +``` + +Some notes to help understand these metrics: + +* `BackupBytes["BackupStorage.File.File:Write"]` measures how many bytes were read from disk by the `file` Backup Storage implementation during the backup phase. +* `DurationByPhaseSeconds["CatchUpReplication"]` measures how long it took to catch-up replication after the restore phase. +* `DurationByPhaseSeconds["RestoreLastBackup"]` measures to the duration of the restore phase. +* `RestoreDurationNanoseconds["-.-.Restore"]` also measures to the duration of the restore phase. + +#### VTTablet error count with error code + +##### VTTablet Error Count + +We are introducing new error counter `QueryErrorCountsWithCode` for VTTablet. It is similar to existing [QueryErrorCounts](https://github.com/vitessio/vitess/blob/main/go/vt/vttablet/tabletserver/query_engine.go#L174) except it contains errorCode as additional dimension. +We will deprecate `QueryErrorCounts` in v18. + +#### VReplication stream status for Prometheus + +VReplication publishes the `VReplicationStreamState` status which reports the state of VReplication streams. For example, here's what it looks like in the local cluster example after the MoveTables step: + +``` +"VReplicationStreamState": { + "commerce2customer.1": "Running" +} +``` + +Prior to v17, this data was not available via the Prometheus backend. In v17, workflow states are also published as a Prometheus gauge with a `state` label and a value of `1.0`. For example: + +``` +# HELP vttablet_v_replication_stream_state State of vreplication workflow +# TYPE vttablet_v_replication_stream_state gauge +vttablet_v_replication_stream_state{counts="1",state="Running",workflow="commerce2customer"} 1 +``` + +### VTTablet + +#### Initializing all replicas with super_read_only + +In order to prevent SUPER privileged users like `root` or `vt_dba` from producing errant GTIDs on replicas, all the replica MySQL servers are initialized with the MySQL +global variable `super_read_only` value set to `ON`. During failovers, we set `super_read_only` to `OFF` for the promoted primary tablet. This will allow the +primary to accept writes. All of the shard's tablets, except the current primary, will still have their global variable `super_read_only` set to `ON`. This will make sure that apart from +MySQL replication no other component, offline system or operator can write directly to a replica. + +Reference PR for this change is [PR #12206](https://github.com/vitessio/vitess/pull/12206) + +An important note regarding this change is how the default `init_db.sql` file has changed. +This is even more important if you are running Vitess on the vitess-operator. +You must ensure your `init_db.sql` is up-to-date with the new default for `v17.0.0`. +The default file can be found in `./config/init_db.sql`. + +#### Vttablet Schema Reload Timeout + +A new flag, `--schema-change-reload-timeout` has been added to timeout the reload of the schema that Vttablet does periodically. This is required because sometimes this operation can get stuck after MySQL restarts, etc. More details available in the issue https://github.com/vitessio/vitess/issues/13001. + +#### Settings Pool + +This was introduced in v15 and it enables pooling the connection with modified connection settings. +To know more what it does read the [v15 release notes](https://github.com/vitessio/vitess/releases/tag/v15.0.0) or the [blog](https://vitess.io/blog/2023-03-27-connection-pooling-in-vitess/) or [docs](https://vitess.io/docs/17.0/reference/query-serving/reserved-conn/) + +### Online DDL + +#### --cut-over-threshold DDL strategy flag + +Online DDL's strategy now accepts `--cut-over-threshold` (type: `duration`) flag. + +This flag stand for the timeout in a `vitess` migration's cut-over phase, which includes the final locking of tables before finalizing the migration. + +The value of the cut-over threshold should be high enough to support the async nature of vreplication catchup phase, as well as accommodate some replication lag. But it mustn't be too high. While cutting over, the migrated table is being locked, causing app connection and query pileup, consuming query buffers, and holding internal mutexes. + +Recommended range for this variable is `5s` - `30s`. Default: `10s`. + +### VReplication + +#### Support for the `noblob` binlog row image mode + +The `noblob` binlog row image is now supported by the MoveTables and Reshard VReplication workflows. If the source +or target database has this mode, other workflows like OnlineDDL, Materialize and CreateLookupVindex will error out. +The row events streamed by the VStream API, where blobs and text columns have not changed, will contain null values +for those columns, indicated by a `length:-1`. + +Reference PR for this change is [PR #12905](https://github.com/vitessio/vitess/pull/12905) + +#### Support for MySQL 8.0 binary log transaction compression + +MySQL 8.0 added support for [binary log compression via transaction (GTID) compression in 8.0.20](https://dev.mysql.com/blog-archive/mysql-8-0-20-replication-enhancements/). +You can read more about this feature here: https://dev.mysql.com/doc/refman/8.0/en/binary-log-transaction-compression.html + +This can — at the cost of increased CPU usage — dramatically reduce the amount of data sent over the wire for MySQL replication while also dramatically reducing the overall +storage space needed to retain binary logs (for replication, backup and recovery, CDC, etc). For larger installations this was a very desirable feature and while you could +technically use it with Vitess (the MySQL replica-sets making up each shard could use it fine) there was one very big limitation — [VReplication workflows](https://vitess.io/docs/reference/vreplication/vreplication/) +would not work. Given the criticality of VReplication workflows within Vitess, this meant that in practice this MySQL feature was not usable within Vitess clusters. + +We have addressed this issue in [PR #12950](https://github.com/vitessio/vitess/pull/12950) by adding support for processing the compressed transaction events in VReplication, +without any (known) limitations. + +### VTGate + +#### Modified StreamExecute GRPC API + +Earlier VTGate grpc api for `StreamExecute` did not return the session in the response. +Even though the underlying implementation supported transactions and other features that requires session persistence. +With [PR #13131](https://github.com/vitessio/vitess/pull/13131) VTGate will return the session to the client +so that it can be persisted with the client and sent back to VTGate on the next api call. + +This does not impact anyone using the mysql client library to connect to VTGate. +This could be a breaking change for grpc api users based on how they have implemented their grpc clients. + +#### Insert Planning with Gen4 + +Gen4 planner was made default in v14 for `SELECT` queries. In v15 `UPDATE` and `DELETE` queries were moved to Gen4 framework. +With this release `INSERT` queries are moved to Gen4. + +Clients can move to old v3 planner for inserts by using `V3Insert` planner version with `--planner-version` vtgate flag or with comment directive /*vt+ planner=` for individual query. + +### Deprecations and Deletions + +- The deprecated `automation` and `automationservice` protobuf definitions and associated client and server packages have been removed. +- Auto-population of DDL revert actions and tables at execution-time has been removed. This is now handled entirely at enqueue-time. +- Backwards-compatibility for failed migrations without a `completed_timestamp` has been removed (see https://github.com/vitessio/vitess/issues/8499). +- The deprecated `Key`, `Name`, `Up`, and `TabletExternallyReparentedTimestamp` fields were removed from the JSON representation of `TabletHealth` structures. +- The `MYSQL_FLAVOR` environment variable is no longer used. +- The `--enable-query-plan-field-caching`/`--enable_query_plan_field_caching` vttablet flag was deprecated in v15 and has now been removed. + +#### Deprecated Command Line Flags + +- Flag `vtctld_addr` has been deprecated and will be deleted in a future release. This affects `vtgate`, `vttablet` and `vtcombo`. +- The flag `schema_change_check_interval` used to accept either a Go duration value (e.g. `1m` or `30s`) or a bare integer, which was treated as seconds. + This behavior was deprecated in v15.0.0 and has been removed. + `schema_change_check_interval` now **only** accepts Go duration values. This affects `vtctld`. +- The flag `durability_policy` is no longer used by vtctld. Instead it reads the durability policies for all keyspaces from the topology server. +- The flag `use_super_read_only` is deprecated and will be removed in a later release. This affects `vttablet`. +- The flag `queryserver-config-schema-change-signal-interval` is deprecated and will be removed in a later release. This affects `vttablet`. + Schema-tracking has been refactored in this release to not use polling anymore, therefore the signal interval isn't required anymore. + +In `vttablet` various flags that took float values as seconds have updated to take the standard duration syntax as well. +Float-style parsing is now deprecated and will be removed in a later release. +For example, instead of `--queryserver-config-query-pool-timeout 12.2`, use `--queryserver-config-query-pool-timeout 12s200ms`. +Affected flags and YAML config keys: + +- `degraded_threshold` +- `heartbeat_interval` +- `heartbeat_on_demand_duration` +- `health_check_interval` +- `queryserver-config-idle-timeout` +- `queryserver-config-pool-conn-max-lifetime` +- `queryserver-config-olap-transaction-timeout` +- `queryserver-config-query-timeout` +- `queryserver-config-query-pool-timeout` +- `queryserver-config-schema-reload-time` +- `queryserver-config-schema-change-signal-interval` +- `queryserver-config-stream-pool-timeout` +- `queryserver-config-stream-pool-idle-timeout` +- `queryserver-config-transaction-timeout` +- `queryserver-config-txpool-timeout` +- `queryserver-config-txpool-idle-timeout` +- `shutdown_grace_period` +- `unhealthy_threshold` + +#### Deprecated Stats + +These stats are deprecated in v17. + +| Deprecated stat | Supported alternatives | +|-|-| +| `backup_duration_seconds` | `BackupDurationNanoseconds` | +| `restore_duration_seconds` | `RestoreDurationNanoseconds` | + +### Deprecated `vtgr` + +The `vtgr` component has been deprecated, also see https://github.com/vitessio/vitess/issues/13300. In Vitess 18 `vtgr` will be removed. + +#### Deprecated `k8stopo` + +The `k8stopo` has been deprecated, also see https://github.com/vitessio/vitess/issues/13298. With Vitess 18 the `k8stopo` will be removed. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.0/changelog.md). + +The release includes 457 commits (excluding merges) + +Thanks to all our contributors: @Ayman161803, @GuptaManan100, @L3o-pold, @Phanatic, @WilliamLu99, @adsr, @ajm188, @andylim-duo, @arthurschreiber, @arvind-murty, @austenLacy, @cuishuang, @dasl-, @dbussink, @deepthi, @dependabot[bot], @ejortegau, @fatih, @frouioui, @github-actions[bot], @harshit-gangal, @hkdsun, @jeremycole, @jhump, @johanstenberg92, @jwangace, @kevinpurwito, @kovyrin, @lixin963, @mattlord, @maxbrunet, @maxenglander, @mdlayher, @moberghammer, @notfelineit, @olyazavr, @pbibra, @pnacht, @rohit-nayak-ps, @rsajwani, @shlomi-noach, @systay, @timvaillancourt, @twthorn, @vbalys, @vinimdocarmo, @vitess-bot[bot], @vmg, @yoheimuta + diff --git a/changelog/17.0/17.0.0/summary.md b/changelog/17.0/17.0.0/summary.md new file mode 100644 index 00000000000..92ac2897463 --- /dev/null +++ b/changelog/17.0/17.0.0/summary.md @@ -0,0 +1,480 @@ +## Summary + +### Table of Contents + +- **[Known Issues](#known-issues)** + - [Schema-initialization stuck on semi-sync ACKs while upgrading to v17.0.0](#schema-init-upgrade) +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [Default Local Cell Preference for TabletPicker](#tablet-picker-cell-preference) + - [Dedicated stats for VTGate Prepare operations](#dedicated-vtgate-prepare-stats) + - [VTAdmin web migrated from create-react-app to vite](#migrated-vtadmin) + - [Keyspace name validation in TopoServer](#keyspace-name-validation) + - [Shard name validation in TopoServer](#shard-name-validation) + - [Compression CLI flags removed from vtctld and vtctldclient binaries](#remove-compression-flags-from-vtctld-binaries) + - [VtctldClient command RestoreFromBackup will now use the correct context](#vtctldclient-command-restorefrombackup-will-now-use-the-correct-context) + - **[New command line flags and behavior](#new-flag)** + - [Builtin backup: read buffering flags](#builtin-backup-read-buffering-flags) + - [Manifest backup external decompressor command](#manifest-backup-external-decompressor-command) + - [Throttler config via topo enabled by default](#throttler-config-via-topo) + - **[New stats](#new-stats)** + - [Detailed backup and restore stats](#detailed-backup-and-restore-stats) + - [VTtablet Error count with code](#vttablet-error-count-with-code) + - [VReplication stream status for Prometheus](#vreplication-stream-status-for-prometheus) + - **[Online DDL](#online-ddl)** + - [--cut-over-threshold DDL strategy flag](#online-ddl-cut-over-threshold-flag) + - **[VReplication](#vreplication)** + - [Support for MySQL 8.0 `binlog_transaction_compression`](#binlog-compression) + - [Support for the `noblob` binlog row image mode](#noblob) + - **[VTTablet](#vttablet)** + - [VTTablet: Initializing all replicas with super_read_only](#vttablet-initialization) + - [Vttablet Schema Reload Timeout](#vttablet-schema-reload-timeout) + - [Settings pool enabled](#settings-pool) + - **[VTGate](#vtgate)** + - [StreamExecute GRPC API](#stream-execute) + - [Insert Planner Gen4](#insert-planner) + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Deprecated Flags](#deprecated-flags) + - [Deprecated Stats](#deprecated-stats) + - [Deprecated `vtgr`](#deprecated-vtgr) + - [Deprecated `k8stopo`](#deprecated-k8stopo) + + +## Known Issues +### Schema-initialization stuck on semi-sync ACKs while upgrading to `v17.0.0` + +During upgrades from `<= v16.x.x` to `v17.0.0`, as part of `PromoteReplica` call, the schema-init realizes that there are schema diffs to apply and ends up writing to the database if [semi-sync](https://vitess.io/docs/17.0/reference/features/mysql-replication/#semi-sync) is enabled, all of these writes get blocked indefinitely. +Eventually, `PromoteReplica` fails, and this fails the entire PRS call. + +A fix for this issue was merged on `release-17.0` in [PR#13441](https://github.com/vitessio/vitess/pull/13411), read the [corresponding bug report to learn more](https://github.com/vitessio/vitess/issues/13426). + +This issue is addressed in the `>= v17.0.1` patch releases. + + +## Major Changes + +### Breaking Changes + +#### Default Local Cell Preference for TabletPicker + +We added options to the `TabletPicker` that allow for specifying a cell preference in addition to making the default behavior to give priority to the local cell *and any alias it belongs to*. We are also introducing a new way to select tablet type preference which should eventually replace the `in_order:` hint currently used as a prefix for tablet types. The signature for creating a new `TabletPicker` now looks like: + +```go +func NewTabletPicker( + ctx context.Context, + ts *topo.Server, + cells []string, + localCell, keyspace, shard, tabletTypesStr string, + options TabletPickerOptions, +) (*TabletPicker, error) {...} +``` + +Where ctx, localCell, option are all new parameters. + +`option` is of type `TabletPickerOptions` and includes two fields, `CellPreference` and `TabletOrder`. +- `CellPreference`: "PreferLocalWithAlias" (default) gives preference to vtgate's local cell, or "OnlySpecified" which only picks from the cells explicitly passed in by the client +- `TabletOrder`: "Any" (default) for no ordering or random, or "InOrder" to use the order specified by the client + +See [PR 12282 Description](https://github.com/vitessio/vitess/pull/12282) for examples on how this changes cell picking behavior. + +#### Default TLS version changed for `vtgr` + +When using TLS with `vtgr`, we now default to TLS 1.2 if no other explicit version is configured. Configuration flags are provided to explicitly configure the minimum TLS version to be used. + +`vtgr` is now deprecated as part of `v17.0.0`, please see [the deprecation notice](#deprecated-vtgr). + +#### Dedicated stats for VTGate Prepare operations + +Prior to v17 Vitess incorrectly combined stats for VTGate `Execute` and `Prepare` operations under a single stats key (`Execute`). In v17 `Execute` and `Prepare` operations generate stats under independent stats keys. + +Here is a (condensed) example of stats output: + +```json +{ + "VtgateApi": { + "Histograms": { + "Execute.src.primary": { + "500000": 5 + }, + "Prepare.src.primary": { + "100000000": 0 + } + } + }, + "VtgateApiErrorCounts": { + "Execute.src.primary.INVALID_ARGUMENT": 3, + "Execute.src.primary.ALREADY_EXISTS": 1 + } +} +``` + +#### VTAdmin web migrated to vite + +Previously, VTAdmin web used the Create React App framework to test, build, and serve the application. In v17, Create React App has been removed, and [Vite](https://vitejs.dev/) is used in its place. Some of the main changes include: +- Vite uses `VITE_*` environment variables instead of `REACT_APP_*` environment variables +- Vite uses `import.meta.env` in place of `process.env` +- [Vitest](https://vitest.dev/) is used in place of Jest for testing +- Our protobufjs generator now produces an es6 module instead of commonjs to better work with Vite's defaults +- `public/index.html` has been moved to root directory in web/vtadmin + +#### Keyspace name validation in TopoServer + +Prior to v17, it was possible to create a keyspace with invalid characters, which would then be inaccessible to various cluster management operations. + +Keyspace names are restricted to using only ASCII characters, digits and `_` and `-`. TopoServer's `GetKeyspace` and `CreateKeyspace` methods return an error if given an invalid name. + +#### Shard name validation in TopoServer + +Prior to v17, it was possible to create a shard name with invalid characters, which would then be inaccessible to various cluster management operations. + +Shard names are restricted to using only ASCII characters, digits and `_` and `-`. TopoServer's `GetShard` and `CreateShard` methods return an error if given an invalid name. + +#### Compression CLI flags remove from vtctld and vtctldclient binaries + +The CLI flags below were mistakenly added to `vtctld` and `vtctldclient` in v15. In v17, they are no longer present in those binaries. + + * `--compression-engine-name` + * `--compression-level` + * `--external-compressor` + * `--external-compressor-extension` + * `--external-decompressor` + +#### VtctldClient command RestoreFromBackup will now use the correct context + +The VtctldClient command `RestoreFromBackup` initiates an asynchronous process on the specified tablet to restore data from either the latest backup or the closest one before the specified backup-timestamp. +Prior to v17, this asynchronous process could run indefinitely in the background since it was called using the background context. In v17 [PR#12830](https://github.com/vitessio/vitess/issues/12830), +this behavior was changed to use a context with a timeout of `action_timeout`. If you are using VtctldClient to initiate a restore, make sure you provide an appropriate value for action_timeout to give enough +time for the restore process to complete. Otherwise, the restore will throw an error if the context expires before it completes. + +### Vttablet's transaction throttler now also throttles DML outside of `BEGIN; ...; COMMIT;` blocks + +Prior to v17, `vttablet`'s transaction throttler (enabled with `--enable-tx-throttler`) would only throttle requests done inside an explicit transaction, i.e., a `BEGIN; ...; COMMIT;` block. +In v17 [PR#13040](https://github.com/vitessio/vitess/issues/13037), this behavior was being changed so that it also throttles work outside of explicit transactions for `INSERT/UPDATE/DELETE/LOAD` queries. + +### New command line flags and behavior + +#### Backup --builtinbackup-file-read-buffer-size and --builtinbackup-file-write-buffer-size + +Prior to v17 the builtin Backup Engine does not use read buffering for restores, and for backups uses a hardcoded write buffer size of `2097152 bytes`. + +In v17 these defaults may be tuned with, respectively `--builtinbackup-file-read-buffer-size` and `--builtinbackup-file-write-buffer-size`. + +- `--builtinbackup-file-read-buffer-size`: read files using an IO buffer of this many bytes. Golang defaults are used when set to `0`. +- `--builtinbackup-file-write-buffer-size`: write files using an IO buffer of this many bytes. Golang defaults are used when set to `0`. (default `2097152`) + +These flags are applicable to the following programs: + +- `vtbackup` +- `vtctld` +- `vttablet` +- `vttestserver` + +#### Manifest backup external decompressor command + +Add a new builtin/xtrabackup flag `--manifest-external-decompressor`. When set the value of that flag is stored in the manifest field `ExternalDecompressor`. This manifest field may be consulted when decompressing a backup that was compressed with an external command. + +This feature enables the following flow: + + 1. Take a backup using an external compressor + ``` + Backup --compression-engine=external \ + --external-compressor=zstd \ + --manifest-external-decompressor="zstd -d" + ``` + 2. Restore that backup with a mere `Restore` command, without having to specify `--external-decompressor`. + +#### vttablet --throttler-config-via-topo + +This flag was introduced in v16 and defaulted to `false`. In v17 it defaults to `true`, and there is no need to supply it. + +Note that this flag overrides `--enable-lag-throttler` and `--throttle-threshold`, which now give warnings, and will be removed in v18. + +### New stats + +#### Detailed backup and restore stats + +##### Backup metrics + +Metrics related to backup operations are available in both Vtbackup and VTTablet. + +- `BackupBytes` +- `BackupCount` +- `BackupDurationNanosecond` + +Depending on the Backup Engine and Backup Storage in-use, a backup may be a complex pipeline of operations, including but not limited to: + +* Reading files from disk. +* Compressing files. +* Uploading compress files to cloud object storage. + +These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. + +##### Restore metrics + +Metrics related to restore operations are available in both Vtbackup and VTTablet: + +- `RestoreBytes` +- `RestoreCount` +- `RestoreDurationNanoseconds` + +Depending on the Backup Engine and Backup Storage in-use, a restore may be a complex pipeline of operations, including but not limited to: + +* Downloading compressed files from cloud object storage. +* Decompressing files. +* Writing decompressed files to disk. + +These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. + +##### Vtbackup metrics + +Vtbackup exports some metrics which are not available elsewhere: + +- `DurationByPhaseSeconds` + +Vtbackup fetches the last backup, restores it to an empty mysql installation, replicates recent changes into that installation, and then takes a backup of that installation. + +`DurationByPhaseSeconds` exports timings for these individual phases. + +##### Example + +**A snippet of vtbackup metrics after running it against the local example after creating the initial cluster** + +(Processed with `jq` for readability.) + +```json +{ + "BackupBytes": { + "BackupEngine.Builtin.Source:Read": 4777, + "BackupEngine.Builtin.Compressor:Write": 4616, + "BackupEngine.Builtin.Destination:Write": 162, + "BackupStorage.File.File:Write": 163 + }, + "BackupCount": { + "-.-.Backup": 1, + "BackupEngine.Builtin.Source:Open": 161, + "BackupEngine.Builtin.Source:Close": 322, + "BackupEngine.Builtin.Compressor:Close": 161, + "BackupEngine.Builtin.Destination:Open": 161, + "BackupEngine.Builtin.Destination:Close": 322 + }, + "BackupDurationNanoseconds": { + "-.-.Backup": 4188508542, + "BackupEngine.Builtin.Source:Open": 10649832, + "BackupEngine.Builtin.Source:Read": 55901067, + "BackupEngine.Builtin.Source:Close": 960826, + "BackupEngine.Builtin.Compressor:Write": 278358826, + "BackupEngine.Builtin.Compressor:Close": 79358372, + "BackupEngine.Builtin.Destination:Open": 16456627, + "BackupEngine.Builtin.Destination:Write": 11021043, + "BackupEngine.Builtin.Destination:Close": 17144630, + "BackupStorage.File.File:Write": 10743169 + }, + "DurationByPhaseSeconds": { + "InitMySQLd": 2, + "RestoreLastBackup": 6, + "CatchUpReplication": 1, + "TakeNewBackup": 4 + }, + "RestoreBytes": { + "BackupEngine.Builtin.Source:Read": 1095, + "BackupEngine.Builtin.Decompressor:Read": 950, + "BackupEngine.Builtin.Destination:Write": 209, + "BackupStorage.File.File:Read": 1113 + }, + "RestoreCount": { + "-.-.Restore": 1, + "BackupEngine.Builtin.Source:Open": 161, + "BackupEngine.Builtin.Source:Close": 322, + "BackupEngine.Builtin.Decompressor:Close": 161, + "BackupEngine.Builtin.Destination:Open": 161, + "BackupEngine.Builtin.Destination:Close": 322 + }, + "RestoreDurationNanoseconds": { + "-.-.Restore": 6204765541, + "BackupEngine.Builtin.Source:Open": 10542539, + "BackupEngine.Builtin.Source:Read": 104658370, + "BackupEngine.Builtin.Source:Close": 773038, + "BackupEngine.Builtin.Decompressor:Read": 165692120, + "BackupEngine.Builtin.Decompressor:Close": 51040, + "BackupEngine.Builtin.Destination:Open": 22715122, + "BackupEngine.Builtin.Destination:Write": 41679581, + "BackupEngine.Builtin.Destination:Close": 26954624, + "BackupStorage.File.File:Read": 102416075 + }, + "backup_duration_seconds": 4, + "restore_duration_seconds": 6 +} +``` + +Some notes to help understand these metrics: + +* `BackupBytes["BackupStorage.File.File:Write"]` measures how many bytes were read from disk by the `file` Backup Storage implementation during the backup phase. +* `DurationByPhaseSeconds["CatchUpReplication"]` measures how long it took to catch-up replication after the restore phase. +* `DurationByPhaseSeconds["RestoreLastBackup"]` measures to the duration of the restore phase. +* `RestoreDurationNanoseconds["-.-.Restore"]` also measures to the duration of the restore phase. + +#### VTTablet error count with error code + +##### VTTablet Error Count + +We are introducing new error counter `QueryErrorCountsWithCode` for VTTablet. It is similar to existing [QueryErrorCounts](https://github.com/vitessio/vitess/blob/main/go/vt/vttablet/tabletserver/query_engine.go#L174) except it contains errorCode as additional dimension. +We will deprecate `QueryErrorCounts` in v18. + +#### VReplication stream status for Prometheus + +VReplication publishes the `VReplicationStreamState` status which reports the state of VReplication streams. For example, here's what it looks like in the local cluster example after the MoveTables step: + +``` +"VReplicationStreamState": { + "commerce2customer.1": "Running" +} +``` + +Prior to v17, this data was not available via the Prometheus backend. In v17, workflow states are also published as a Prometheus gauge with a `state` label and a value of `1.0`. For example: + +``` +# HELP vttablet_v_replication_stream_state State of vreplication workflow +# TYPE vttablet_v_replication_stream_state gauge +vttablet_v_replication_stream_state{counts="1",state="Running",workflow="commerce2customer"} 1 +``` + +### VTTablet + +#### Initializing all replicas with super_read_only + +In order to prevent SUPER privileged users like `root` or `vt_dba` from producing errant GTIDs on replicas, all the replica MySQL servers are initialized with the MySQL +global variable `super_read_only` value set to `ON`. During failovers, we set `super_read_only` to `OFF` for the promoted primary tablet. This will allow the +primary to accept writes. All of the shard's tablets, except the current primary, will still have their global variable `super_read_only` set to `ON`. This will make sure that apart from +MySQL replication no other component, offline system or operator can write directly to a replica. + +Reference PR for this change is [PR #12206](https://github.com/vitessio/vitess/pull/12206) + +An important note regarding this change is how the default `init_db.sql` file has changed. +This is even more important if you are running Vitess on the vitess-operator. +You must ensure your `init_db.sql` is up-to-date with the new default for `v17.0.0`. +The default file can be found in `./config/init_db.sql`. + +#### Vttablet Schema Reload Timeout + +A new flag, `--schema-change-reload-timeout` has been added to timeout the reload of the schema that Vttablet does periodically. This is required because sometimes this operation can get stuck after MySQL restarts, etc. More details available in the issue https://github.com/vitessio/vitess/issues/13001. + +#### Settings Pool + +This was introduced in v15 and it enables pooling the connection with modified connection settings. +To know more what it does read the [v15 release notes](https://github.com/vitessio/vitess/releases/tag/v15.0.0) or the [blog](https://vitess.io/blog/2023-03-27-connection-pooling-in-vitess/) or [docs](https://vitess.io/docs/17.0/reference/query-serving/reserved-conn/) + +### Online DDL + +#### --cut-over-threshold DDL strategy flag + +Online DDL's strategy now accepts `--cut-over-threshold` (type: `duration`) flag. + +This flag stand for the timeout in a `vitess` migration's cut-over phase, which includes the final locking of tables before finalizing the migration. + +The value of the cut-over threshold should be high enough to support the async nature of vreplication catchup phase, as well as accommodate some replication lag. But it mustn't be too high. While cutting over, the migrated table is being locked, causing app connection and query pileup, consuming query buffers, and holding internal mutexes. + +Recommended range for this variable is `5s` - `30s`. Default: `10s`. + +### VReplication + +#### Support for the `noblob` binlog row image mode + +The `noblob` binlog row image is now supported by the MoveTables and Reshard VReplication workflows. If the source +or target database has this mode, other workflows like OnlineDDL, Materialize and CreateLookupVindex will error out. +The row events streamed by the VStream API, where blobs and text columns have not changed, will contain null values +for those columns, indicated by a `length:-1`. + +Reference PR for this change is [PR #12905](https://github.com/vitessio/vitess/pull/12905) + +#### Support for MySQL 8.0 binary log transaction compression + +MySQL 8.0 added support for [binary log compression via transaction (GTID) compression in 8.0.20](https://dev.mysql.com/blog-archive/mysql-8-0-20-replication-enhancements/). +You can read more about this feature here: https://dev.mysql.com/doc/refman/8.0/en/binary-log-transaction-compression.html + +This can — at the cost of increased CPU usage — dramatically reduce the amount of data sent over the wire for MySQL replication while also dramatically reducing the overall +storage space needed to retain binary logs (for replication, backup and recovery, CDC, etc). For larger installations this was a very desirable feature and while you could +technically use it with Vitess (the MySQL replica-sets making up each shard could use it fine) there was one very big limitation — [VReplication workflows](https://vitess.io/docs/reference/vreplication/vreplication/) +would not work. Given the criticality of VReplication workflows within Vitess, this meant that in practice this MySQL feature was not usable within Vitess clusters. + +We have addressed this issue in [PR #12950](https://github.com/vitessio/vitess/pull/12950) by adding support for processing the compressed transaction events in VReplication, +without any (known) limitations. + +### VTGate + +#### Modified StreamExecute GRPC API + +Earlier VTGate grpc api for `StreamExecute` did not return the session in the response. +Even though the underlying implementation supported transactions and other features that requires session persistence. +With [PR #13131](https://github.com/vitessio/vitess/pull/13131) VTGate will return the session to the client +so that it can be persisted with the client and sent back to VTGate on the next api call. + +This does not impact anyone using the mysql client library to connect to VTGate. +This could be a breaking change for grpc api users based on how they have implemented their grpc clients. + +#### Insert Planning with Gen4 + +Gen4 planner was made default in v14 for `SELECT` queries. In v15 `UPDATE` and `DELETE` queries were moved to Gen4 framework. +With this release `INSERT` queries are moved to Gen4. + +Clients can move to old v3 planner for inserts by using `V3Insert` planner version with `--planner-version` vtgate flag or with comment directive /*vt+ planner=` for individual query. + +### Deprecations and Deletions + +- The deprecated `automation` and `automationservice` protobuf definitions and associated client and server packages have been removed. +- Auto-population of DDL revert actions and tables at execution-time has been removed. This is now handled entirely at enqueue-time. +- Backwards-compatibility for failed migrations without a `completed_timestamp` has been removed (see https://github.com/vitessio/vitess/issues/8499). +- The deprecated `Key`, `Name`, `Up`, and `TabletExternallyReparentedTimestamp` fields were removed from the JSON representation of `TabletHealth` structures. +- The `MYSQL_FLAVOR` environment variable is no longer used. +- The `--enable-query-plan-field-caching`/`--enable_query_plan_field_caching` vttablet flag was deprecated in v15 and has now been removed. + +#### Deprecated Command Line Flags + +- Flag `vtctld_addr` has been deprecated and will be deleted in a future release. This affects `vtgate`, `vttablet` and `vtcombo`. +- The flag `schema_change_check_interval` used to accept either a Go duration value (e.g. `1m` or `30s`) or a bare integer, which was treated as seconds. + This behavior was deprecated in v15.0.0 and has been removed. + `schema_change_check_interval` now **only** accepts Go duration values. This affects `vtctld`. +- The flag `durability_policy` is no longer used by vtctld. Instead it reads the durability policies for all keyspaces from the topology server. +- The flag `use_super_read_only` is deprecated and will be removed in a later release. This affects `vttablet`. +- The flag `queryserver-config-schema-change-signal-interval` is deprecated and will be removed in a later release. This affects `vttablet`. + Schema-tracking has been refactored in this release to not use polling anymore, therefore the signal interval isn't required anymore. + +In `vttablet` various flags that took float values as seconds have updated to take the standard duration syntax as well. +Float-style parsing is now deprecated and will be removed in a later release. +For example, instead of `--queryserver-config-query-pool-timeout 12.2`, use `--queryserver-config-query-pool-timeout 12s200ms`. +Affected flags and YAML config keys: + +- `degraded_threshold` +- `heartbeat_interval` +- `heartbeat_on_demand_duration` +- `health_check_interval` +- `queryserver-config-idle-timeout` +- `queryserver-config-pool-conn-max-lifetime` +- `queryserver-config-olap-transaction-timeout` +- `queryserver-config-query-timeout` +- `queryserver-config-query-pool-timeout` +- `queryserver-config-schema-reload-time` +- `queryserver-config-schema-change-signal-interval` +- `queryserver-config-stream-pool-timeout` +- `queryserver-config-stream-pool-idle-timeout` +- `queryserver-config-transaction-timeout` +- `queryserver-config-txpool-timeout` +- `queryserver-config-txpool-idle-timeout` +- `shutdown_grace_period` +- `unhealthy_threshold` + +#### Deprecated Stats + +These stats are deprecated in v17. + +| Deprecated stat | Supported alternatives | +|-|-| +| `backup_duration_seconds` | `BackupDurationNanoseconds` | +| `restore_duration_seconds` | `RestoreDurationNanoseconds` | + +### Deprecated `vtgr` + +The `vtgr` component has been deprecated, also see https://github.com/vitessio/vitess/issues/13300. In Vitess 18 `vtgr` will be removed. + +#### Deprecated `k8stopo` + +The `k8stopo` has been deprecated, also see https://github.com/vitessio/vitess/issues/13298. With Vitess 18 the `k8stopo` will be removed. diff --git a/changelog/17.0/17.0.1/changelog.md b/changelog/17.0/17.0.1/changelog.md new file mode 100644 index 00000000000..05cbe14d522 --- /dev/null +++ b/changelog/17.0/17.0.1/changelog.md @@ -0,0 +1,47 @@ +# Changelog of Vitess v17.0.1 + +### Bug fixes +#### Cluster management + * [release-17.0] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading [#13411](https://github.com/vitessio/vitess/pull/13411) + * [release-17.0] ignore all error for views in engine reload (#13590) [#13594](https://github.com/vitessio/vitess/pull/13594) + * [release-17.0] check keyspace snapshot time if none specified for backup restores (#13557) [#13635](https://github.com/vitessio/vitess/pull/13635) +#### Examples + * [release-17.0] Local example 101: idempotent on existing clusters (#13373) [#13383](https://github.com/vitessio/vitess/pull/13383) + * [release-17.0] Examples: only terminate vtadmin if it was started (#13433) [#13443](https://github.com/vitessio/vitess/pull/13443) + * [release-17.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13473](https://github.com/vitessio/vitess/pull/13473) +#### Schema Tracker + * [release-17.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13423](https://github.com/vitessio/vitess/pull/13423) + * Backport v17: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13455](https://github.com/vitessio/vitess/pull/13455) +#### Throttler + * [release-17.0] Tablet throttler: only start watching SrvKeyspace once it's confirmed to exist (#13384) [#13399](https://github.com/vitessio/vitess/pull/13399) +#### VReplication + * [release-17.0] VReplication: Ensure ROW events are sent within a transaction (#13547) [#13581](https://github.com/vitessio/vitess/pull/13581) +#### VTorc + * [release-17.0] Ensure to call `servenv.Init` when needed (#13638) [#13643](https://github.com/vitessio/vitess/pull/13643) +### CI/Build +#### Build/CI + * Backport v17: Replace deprecated github.com/golang/mock with go.uber.org/mock #13512 [#13601](https://github.com/vitessio/vitess/pull/13601) +### Internal Cleanup +#### VTorc + * [release-17.0] Remove excessive logging in VTOrc APIs (#13459) [#13461](https://github.com/vitessio/vitess/pull/13461) +### Performance +#### TabletManager + * [release-17.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13390](https://github.com/vitessio/vitess/pull/13390) +### Release +#### Build/CI + * [release-17.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13622](https://github.com/vitessio/vitess/pull/13622) +#### General + * Back to dev mode after v17.0.0 [#13386](https://github.com/vitessio/vitess/pull/13386) +### Testing +#### Cluster management + * [release-17.0] Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) [#13550](https://github.com/vitessio/vitess/pull/13550) + * [release-17.0] Flaky tests: Fix wrangler tests (#13568) [#13572](https://github.com/vitessio/vitess/pull/13572) +#### General + * [release-17.0] Upgrade-downgrade test fix: Remove throttler flags in `vttablet-up.sh` [#13516](https://github.com/vitessio/vitess/pull/13516) +#### Query Serving + * [release-17.0] Deflake `TestQueryTimeoutWithDual` test (#13405) [#13410](https://github.com/vitessio/vitess/pull/13410) + * [release-17.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13499](https://github.com/vitessio/vitess/pull/13499) + * [release-17.0] fix TestQueryTimeoutWithTables flaky test (#13579) [#13586](https://github.com/vitessio/vitess/pull/13586) +#### VTorc + * [release-17.0]: Fix flakiness in VTOrc tests (#13489) [#13527](https://github.com/vitessio/vitess/pull/13527) + diff --git a/changelog/17.0/17.0.1/release_notes.md b/changelog/17.0/17.0.1/release_notes.md new file mode 100644 index 00000000000..21b5a562ab2 --- /dev/null +++ b/changelog/17.0/17.0.1/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v17.0.1 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.1/changelog.md). + +The release includes 23 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @frouioui, @mattlord, @shlomi-noach + diff --git a/changelog/17.0/17.0.2/changelog.md b/changelog/17.0/17.0.2/changelog.md new file mode 100644 index 00000000000..b51ebd69637 --- /dev/null +++ b/changelog/17.0/17.0.2/changelog.md @@ -0,0 +1,28 @@ +# Changelog of Vitess v17.0.2 + +### Bug fixes +#### Backup and Restore + * [release-17.0] Address vttablet memory usage with backups to Azure Blob Service (#13770) [#13775](https://github.com/vitessio/vitess/pull/13775) + * [release-17.0] Do not drain tablet in incremental backup (#13773) [#13789](https://github.com/vitessio/vitess/pull/13789) +#### Cluster management + * [release-17.0] Flaky tests: Fix race in memory topo (#13559) [#13577](https://github.com/vitessio/vitess/pull/13577) +#### Evalengine + * [release-17.0] Fix a number of encoding issues when evaluating expressions with the evalengine (#13509) [#13551](https://github.com/vitessio/vitess/pull/13551) + * [release-17.0] fastparse: Fix bug in overflow detection (#13702) [#13705](https://github.com/vitessio/vitess/pull/13705) +#### Online DDL + * v17 backport: Fix closed channel panic in Online DDL cutover [#13731](https://github.com/vitessio/vitess/pull/13731) + * v17 backport: Solve RevertMigration.Comment read/write concurrency issue [#13734](https://github.com/vitessio/vitess/pull/13734) +#### Query Serving + * [release-17.0] Fix flaky vtgate test TestInconsistentStateDetectedBuffering (#13560) [#13575](https://github.com/vitessio/vitess/pull/13575) + * [release-17.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13796](https://github.com/vitessio/vitess/pull/13796) +### CI/Build +#### Backup and Restore + * [release-17.0] Fixing `backup_pitr` flaky tests via wait-for loop on topo reads (#13781) [#13790](https://github.com/vitessio/vitess/pull/13790) +#### Online DDL + * [release-17.0] CI: fix onlineddl_scheduler flakiness (#13754) [#13760](https://github.com/vitessio/vitess/pull/13760) +### Release +#### General + * Back to dev mode after v17.0.1 [#13663](https://github.com/vitessio/vitess/pull/13663) +### Testing +#### Build/CI + * [release-17.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#13798](https://github.com/vitessio/vitess/pull/13798) diff --git a/changelog/17.0/17.0.2/release_notes.md b/changelog/17.0/17.0.2/release_notes.md new file mode 100644 index 00000000000..1d3fd71f7ef --- /dev/null +++ b/changelog/17.0/17.0.2/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v17.0.2 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.2/changelog.md). + +The release includes 13 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @frouioui, @shlomi-noach + diff --git a/changelog/17.0/17.0.2/summary.md b/changelog/17.0/17.0.2/summary.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/changelog/17.0/README.md b/changelog/17.0/README.md new file mode 100644 index 00000000000..641a52e1079 --- /dev/null +++ b/changelog/17.0/README.md @@ -0,0 +1,12 @@ +## v17.0 +* **[17.0.2](17.0.2)** + * [Changelog](17.0.2/changelog.md) + * [Release Notes](17.0.2/release_notes.md) + +* **[17.0.1](17.0.1)** + * [Changelog](17.0.1/changelog.md) + * [Release Notes](17.0.1/release_notes.md) + +* **[17.0.0](17.0.0)** + * [Changelog](17.0.0/changelog.md) + * [Release Notes](17.0.0/release_notes.md) diff --git a/changelog/18.0/18.0.0/summary.md b/changelog/18.0/18.0.0/summary.md new file mode 100644 index 00000000000..61cf6be1cf8 --- /dev/null +++ b/changelog/18.0/18.0.0/summary.md @@ -0,0 +1,168 @@ +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [Local examples now use etcd v3 storage and API](#local-examples-etcd-v3) + - **[New command line flags and behavior](#new-flag)** + - [VTOrc flag `--allow-emergency-reparent`](#new-flag-toggle-ers) + - [VTOrc flag `--change-tablets-with-errant-gtid-to-drained`](#new-flag-errant-gtid-convert) + - [ERS sub flag `--wait-for-all-tablets`](#new-ers-subflag) + - **[VTAdmin](#vtadmin)** + - [Updated to node v18.16.0](#update-node) + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [Deprecated Flags](#deprecated-flags) + - [Deleted `V3` planner](#deleted-v3) + - [Deleted `k8stopo`](#deleted-k8stopo) + - [Deleted `vtgr`](#deleted-vtgr) + - **[New stats](#new-stats)** + - [VTGate Vindex unknown parameters](#vtgate-vindex-unknown-parameters) + - [VTBackup stat `PhaseStatus`](#vtbackup-stat-phase-status) + - **[VTTablet](#vttablet)** + - [VTTablet: New ResetSequences RPC](#vttablet-new-rpc-reset-sequences) + - **[Docker](#docker)** + - [Debian: Bookworm added and made default](#debian-bookworm) + - [Debian: Buster removed](#debian-buster) + - **[Durability Policies](#durability-policies)** + - [New Durability Policies](#new-durability-policies) + +## Major Changes + +### Breaking Changes + +#### Local examples now use etcd v3 storage and API +In previous releases the [local examples](https://github.com/vitessio/vitess/tree/main/examples/local) were +explicitly using etcd v2 storage (`etcd --enable-v2=true`) and API (`ETCDCTL_API=2`) mode. We have now +removed this legacy etcd usage and instead use the new (default) etcd v3 storage and API. Please see +[PR #13791](https://github.com/vitessio/vitess/pull/13791) for additional info. If you are using the local +examples in any sort of long-term non-testing capacity, then you will need to explicitly use the v2 storage +and API mode or [migrate your existing data from v2 to v3](https://etcd.io/docs/v3.5/tutorials/how-to-migrate/). + +### New command line flags and behavior + +#### VTOrc flag `--allow-emergency-reparent` + +VTOrc has a new flag `--allow-emergency-reparent` that allows the users to toggle the ability of VTOrc to run emergency +reparent operations. The users that want VTOrc to fix the replication issues, but don't want it to run any reparents +should start using this flag. By default, VTOrc will be able to run `EmergencyReparentShard`. The users must specify the +flag to `false` to change the behaviour. + +#### VTOrc flag `--change-tablets-with-errant-gtid-to-drained` + +VTOrc has a new flag `--change-tablets-with-errant-gtid-to-drained` that allows users to choose whether VTOrc should change the +tablet type of tablets with errant GTIDs to `DRAINED`. By default, the flag is false. + +This feature allows users to configure VTOrc such that any tablet that encounters errant GTIDs is automatically taken out of the +serving graph. These tablets can then be inspected for what the errant GTIDs are, and once fixed, they can rejoin the cluster. + +#### ERS sub flag `--wait-for-all-tablets` + +Running `EmergencyReparentShard` from the vtctldclient has a new sub-flag `--wait-for-all-tablets` that makes `EmergencyReparentShard` wait +for a response from all the tablets. Originally `EmergencyReparentShard` was meant only to be run when a primary tablet is unreachable. +We have realized now that there are cases when the replication is broken but all the tablets are reachable. In these cases, it is advisable to +call `EmergencyReparentShard` with `--wait-for-all-tablets` so that it doesn't ignore one of the tablets. + +### VTAdmin + +#### vtadmin-web updated to node v18.16.0 (LTS) + +Building vtadmin-web now requires node >= v18.16.0 (LTS). Breaking changes from v16 to v18 are listed +in https://nodejs.org/en/blog/release/v18.0.0, but none apply to VTAdmin. Full details on v18.16.0 are listed +here https://nodejs.org/en/blog/release/v18.16.0. + +### Deprecations and Deletions + +#### Deprecated Command Line Flags + +Throttler related `vttablet` flags: + +- `--enable-lag-throttler` is now removed after being deprecated in `v17.0` +- `--throttle_threshold` is deprecated and will be removed in `v19.0` +- `--throttle_metrics_query` is deprecated and will be removed in `v19.0` +- `--throttle_metrics_threshold` is deprecated and will be removed in `v19.0` +- `--throttle_check_as_check_self` is deprecated and will be removed in `v19.0` +- `--throttler-config-via-topo` is deprecated after assumed `true` in `v17.0`. It will be removed in a future version. + +Cache related `vttablet` flags: + +- `--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now. +- `--queryserver-config-query-cache-size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported. + +Buffering related `vtgate` flags: + +- `--buffer_implementation` is deprecated and will be removed in `v19.0` + +Cache related `vtgate` flags: + +- `--gate_query_cache_lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now. +- `--gate_query_cache_size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported. + +VTGate flag: + +- `--schema_change_signal_user` is deprecated and will be removed in `v19.0` + +#### Deleted `v3` planner + +The `Gen4` planner has been the default planner since Vitess 14. The `v3` planner was deprecated in Vitess 15 and has now been removed in this release. + +#### Deleted `k8stopo` + +The `k8stopo` has been deprecated in Vitess 17, also see https://github.com/vitessio/vitess/issues/13298. With Vitess 18 +the `k8stopo` has been removed. + +#### Deleted `vtgr` + +The `vtgr` has been deprecated in Vitess 17, also see https://github.com/vitessio/vitess/issues/13300. With Vitess 18 `vtgr` has been removed. + +### New stats + +#### VTGate Vindex unknown parameters + +The VTGate stat `VindexUnknownParameters` gauges unknown Vindex parameters found in the latest VSchema pulled from the topology. + +#### VTBackup `PhaseStatus` stat + +`PhaseStatus` reports a 1 (active) or a 0 (inactive) for each of the following phases and statuses: + + * `CatchUpReplication` phase has statuses `Stalled` and `Stopped`. + * `Stalled` is set to `1` when replication stops advancing. + * `Stopped` is set to `1` when replication stops before `vtbackup` catches up with the primary. + +### VTTablet + +#### New ResetSequences rpc + +A new vttablet RPC `ResetSequences` has been added, which is being used by `MoveTables` and `Migrate` for workflows +where a `sequence` table is being moved (https://github.com/vitessio/vitess/pull/13238). This has an impact on the +Vitess upgrade process from an earlier version if you need to use such a workflow before the entire cluster is upgraded. + +Any MoveTables or Migrate workflow that moves a sequence table should only be run after all vitess components have been +upgraded, and no upgrade should be done while such a workflow is in progress. + +#### New Dry-run/monitoring-only mode for the transaction throttler + +A new CLI flag `--tx-throttler-dry-run` to set the Transaction Throttler to monitoring-only/dry-run mode has been added. +If the transaction throttler is enabled with `--enable-tx-throttler` and the new dry-run flag is also specified, the +tablet will not actually throttle any transactions; however, it will increase the counters for transactions throttled +(`vttablet_transaction_throttler_throttled`). This allows users to deploy the transaction throttler in production and +gain observability on how much throttling would take place, without actually throttling any requests. + +### Docker + +#### Bookworm added and made default + +Bookworm was released on 2023-06-10, and will be the new default base container for Docker builds. +Bullseye images will still be built and available as long as the OS build is current, tagged with the `-bullseye` suffix. + +#### Buster removed + +Buster LTS supports will stop in June 2024, and Vitess v18.0 will be supported through October 2024. +To prevent supporting a deprecated buster build for several months after June 2024, we are preemptively +removing Vitess support. + +### Durability Policies + +#### New Durability Policies + +2 new inbuilt durability policies have been added to Vitess in this release namely `semi_sync_with_rdonly_ack` and `cross_cell_with_rdonly_ack`. These policies are exactly like `semi_sync` and `cross_cell` respectively, and differ just in the part where the rdonly tablets can also send semi-sync ACKs. \ No newline at end of file diff --git a/changelog/18.0/README.md b/changelog/18.0/README.md new file mode 100644 index 00000000000..5b691530084 --- /dev/null +++ b/changelog/18.0/README.md @@ -0,0 +1,2 @@ +## v18.0 +* **[18.0.0](18.0.0)** diff --git a/doc/releasenotes/7_0_0_release_notes.md b/changelog/7.0/7.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/7_0_0_release_notes.md rename to changelog/7.0/7.0.0/release_notes.md diff --git a/doc/releasenotes/7_0_1_release_notes.md b/changelog/7.0/7.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/7_0_1_release_notes.md rename to changelog/7.0/7.0.1/release_notes.md diff --git a/doc/releasenotes/7_0_2_release_notes.md b/changelog/7.0/7.0.2/release_notes.md similarity index 100% rename from doc/releasenotes/7_0_2_release_notes.md rename to changelog/7.0/7.0.2/release_notes.md diff --git a/doc/releasenotes/7_0_3_release_notes.md b/changelog/7.0/7.0.3/release_notes.md similarity index 100% rename from doc/releasenotes/7_0_3_release_notes.md rename to changelog/7.0/7.0.3/release_notes.md diff --git a/changelog/7.0/README.md b/changelog/7.0/README.md new file mode 100644 index 00000000000..7177c6be673 --- /dev/null +++ b/changelog/7.0/README.md @@ -0,0 +1,12 @@ +## v7.0 +* **[7.0.3](7.0.3)** + * [Release Notes](7.0.3/release_notes.md) + +* **[7.0.2](7.0.2)** + * [Release Notes](7.0.2/release_notes.md) + +* **[7.0.1](7.0.1)** + * [Release Notes](7.0.1/release_notes.md) + +* **[7.0.0](7.0.0)** + * [Release Notes](7.0.0/release_notes.md) diff --git a/doc/releasenotes/8_0_0_release_notes.md b/changelog/8.0/8.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/8_0_0_release_notes.md rename to changelog/8.0/8.0.0/release_notes.md diff --git a/changelog/8.0/README.md b/changelog/8.0/README.md new file mode 100644 index 00000000000..fa359e7302f --- /dev/null +++ b/changelog/8.0/README.md @@ -0,0 +1,3 @@ +## v8.0 +* **[8.0.0](8.0.0)** + * [Release Notes](8.0.0/release_notes.md) diff --git a/doc/releasenotes/9_0_0_release_notes.md b/changelog/9.0/9.0.0/release_notes.md similarity index 100% rename from doc/releasenotes/9_0_0_release_notes.md rename to changelog/9.0/9.0.0/release_notes.md diff --git a/doc/releasenotes/9_0_1_release_notes.md b/changelog/9.0/9.0.1/release_notes.md similarity index 100% rename from doc/releasenotes/9_0_1_release_notes.md rename to changelog/9.0/9.0.1/release_notes.md diff --git a/doc/releasenotes/9_0_2_release_notes.md b/changelog/9.0/9.0.2/release_notes.md similarity index 100% rename from doc/releasenotes/9_0_2_release_notes.md rename to changelog/9.0/9.0.2/release_notes.md diff --git a/changelog/9.0/README.md b/changelog/9.0/README.md new file mode 100644 index 00000000000..17f49aa3b47 --- /dev/null +++ b/changelog/9.0/README.md @@ -0,0 +1,9 @@ +## v9.0 +* **[9.0.2](9.0.2)** + * [Release Notes](9.0.2/release_notes.md) + +* **[9.0.1](9.0.1)** + * [Release Notes](9.0.1/release_notes.md) + +* **[9.0.0](9.0.0)** + * [Release Notes](9.0.0/release_notes.md) diff --git a/changelog/README.md b/changelog/README.md new file mode 100644 index 00000000000..ffb8d698b28 --- /dev/null +++ b/changelog/README.md @@ -0,0 +1,13 @@ +## Releases +* [18.0](18.0) +* [17.0](17.0) +* [16.0](16.0) +* [15.0](15.0) +* [14.0](14.0) +* [13.0](13.0) +* [12.0](12.0) +* [11.0](11.0) +* [10.0](10.0) +* [9.0](9.0) +* [8.0](8.0) +* [7.0](7.0) \ No newline at end of file diff --git a/config/init_db.sql b/config/init_db.sql index 7be4de6f7ea..d04960633de 100644 --- a/config/init_db.sql +++ b/config/init_db.sql @@ -1,5 +1,4 @@ -# This file is executed immediately after mysql_install_db, -# to initialize a fresh data directory. +# This file is executed immediately after initializing a fresh data directory. ############################################################################### # WARNING: This sql is *NOT* safe for production use, @@ -11,6 +10,12 @@ ############################################################################### # Equivalent of mysql_secure_installation ############################################################################### +# We need to ensure that super_read_only is disabled so that we can execute +# these commands. Note that disabling it does NOT disable read_only. +# We save the current value so that we only re-enable it at the end if it was +# enabled before. +SET @original_super_read_only=IF(@@global.super_read_only=1, 'ON', 'OFF'); +SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. @@ -77,3 +82,9 @@ FLUSH PRIVILEGES; RESET SLAVE ALL; RESET MASTER; + +# custom sql is used to add custom scripts like creating users/passwords. We use it in our tests +# {{custom_sql}} + +# We need to set super_read_only back to what it was before +SET GLOBAL super_read_only=IFNULL(@original_super_read_only, 'ON'); diff --git a/config/mycnf/default.cnf b/config/mycnf/default.cnf index 0a375cb69c7..c17165f9959 100644 --- a/config/mycnf/default.cnf +++ b/config/mycnf/default.cnf @@ -14,9 +14,6 @@ port = {{.MysqlPort}} secure-file-priv = {{.SecureFilePriv}} {{end}} -# all db instances should start in read-only mode - once the db is started and -# fully functional, we'll push it into read-write mode -read-only server-id = {{.ServerID}} # all db instances should skip starting replication threads - that way we can do any diff --git a/config/mycnf/mariadb10.cnf b/config/mycnf/mariadb10.cnf index 1912cd8e154..120bd7b7d00 100644 --- a/config/mycnf/mariadb10.cnf +++ b/config/mycnf/mariadb10.cnf @@ -39,3 +39,5 @@ slave_net_timeout = 60 character_set_server = utf8 collation_server = utf8_general_ci +# All MariaDB instances should start in read-only mode +read-only diff --git a/config/mycnf/mysql57.cnf b/config/mycnf/mysql57.cnf index 7a8c45a187c..44c462749a7 100644 --- a/config/mycnf/mysql57.cnf +++ b/config/mycnf/mysql57.cnf @@ -32,3 +32,7 @@ plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisy rpl_semi_sync_master_timeout = 1000000000000000000 rpl_semi_sync_master_wait_no_slave = 1 +# In order to protect against any errand GTIDs we will start the mysql instance +# in super-read-only mode. +super-read-only + diff --git a/config/mycnf/mysql80.cnf b/config/mycnf/mysql80.cnf index 39fab576533..13447a7de0a 100644 --- a/config/mycnf/mysql80.cnf +++ b/config/mycnf/mysql80.cnf @@ -28,3 +28,7 @@ plugin-load = rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisy loose_rpl_semi_sync_master_timeout = 1000000000000000000 loose_rpl_semi_sync_master_wait_no_slave = 1 +# In order to protect against any errand GTIDs we will start the mysql instance +# in super-read-only mode. +super-read-only + diff --git a/config/mycnf/test-suite.cnf b/config/mycnf/test-suite.cnf index e57368a41db..e6d0992f6e6 100644 --- a/config/mycnf/test-suite.cnf +++ b/config/mycnf/test-suite.cnf @@ -23,3 +23,6 @@ sql_mode = STRICT_TRANS_TABLES # set a short heartbeat interval in order to detect failures quickly slave_net_timeout = 4 +# Disabling `super-read-only`. `test-suite` is mainly used for `vttestserver`. Since `vttestserver` uses a single MySQL for primary and replicas, +# so it is not possible to run it with `super-read-only`. Therefore, we are disabling it. +super-read-only = false diff --git a/config/tablet/default.yaml b/config/tablet/default.yaml index 427465d4598..f996bb04737 100644 --- a/config/tablet/default.yaml +++ b/config/tablet/default.yaml @@ -106,8 +106,8 @@ queryCacheSize: 5000 # queryserver-config-query-cache-size schemaReloadIntervalSeconds: 1800 # queryserver-config-schema-reload-time watchReplication: false # watch_replication_stream terseErrors: false # queryserver-config-terse-errors +truncateErrorLen: 0 # queryserver-config-truncate-error-len messagePostponeParallelism: 4 # queryserver-config-message-postpone-cap -cacheResultFields: true # enable-query-plan-field-caching # The following flags are currently not supported. @@ -118,6 +118,7 @@ cacheResultFields: true # enable-query-plan-field-caching # enable-tx-throttler # tx-throttler-config # tx-throttler-healthcheck-cells +# tx-throttler-tablet-types # enable_transaction_limit # enable_transaction_limit_dry_run # transaction_limit_per_user diff --git a/dev.env b/dev.env index 7426dde45f2..b90ef7eed40 100644 --- a/dev.env +++ b/dev.env @@ -25,9 +25,6 @@ source ./build.env export VTPORTSTART=6700 -# Add chromedriver to path for Selenium tests. -PATH=$(prepend_path "$PATH" "$VTROOT/dist/chromedriver") - # Node path. PATH=$(prepend_path "$PATH" "$VTROOT/dist/node/bin") export PATH diff --git a/doc/VIT-03-report-security-audit.pdf b/doc/VIT-03-report-security-audit.pdf new file mode 100644 index 00000000000..500ee693377 Binary files /dev/null and b/doc/VIT-03-report-security-audit.pdf differ diff --git a/doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md b/doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md index ad1d98b151f..68686d4f72f 100644 --- a/doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md +++ b/doc/design-docs/ReplicationLagBasedThrottlingOfTransactions.md @@ -30,7 +30,13 @@ If this is not specified a [default](https://github.com/vitessio/vitess/tree/mai * *tx-throttler-healthcheck-cells* A comma separated list of datacenter cells. The throttler will only monitor -the non-RDONLY replicas found in these cells for replication lag. +the replicas found in these cells for replication lag. + +* *tx-throttler-tablet-types* + +A comma separated list of tablet types. The throttler will only monitor tablets +with these types. Only `replica` and/or `rdonly` types are supported. The default +is `replica`. # Caveats and Known Issues * The throttler keeps trying to explore the maximum rate possible while keeping @@ -39,4 +45,3 @@ lag limit may occasionally be slightly violated. * Transactions are considered homogeneous. There is currently no support for specifying how `expensive` a transaction is. - diff --git a/doc/design-docs/TabletServerParamsAsYAML.md b/doc/design-docs/TabletServerParamsAsYAML.md index 25543ad9018..49d073d1313 100644 --- a/doc/design-docs/TabletServerParamsAsYAML.md +++ b/doc/design-docs/TabletServerParamsAsYAML.md @@ -134,7 +134,6 @@ schemaReloadIntervalSeconds: 1800 # queryserver-config-schema-reload-time watchReplication: false # watch_replication_stream terseErrors: false # queryserver-config-terse-errors messagePostponeParallelism: 4 # queryserver-config-message-postpone-cap -cacheResultFields: true # enable-query-plan-field-caching sanitizeLogMessages: false # sanitize_log_messages @@ -146,6 +145,7 @@ sanitizeLogMessages: false # sanitize_log_messages # enable-tx-throttler # tx-throttler-config # tx-throttler-healthcheck-cells +# tx-throttler-tablet-types # enable_transaction_limit # enable_transaction_limit_dry_run # transaction_limit_per_user diff --git a/doc/design-docs/VTGateBuffering.md b/doc/design-docs/VTGateBuffering.md new file mode 100644 index 00000000000..9155929ea49 --- /dev/null +++ b/doc/design-docs/VTGateBuffering.md @@ -0,0 +1,63 @@ +# Adding buffering to VTGate while switching traffic during a movetables operation + +## Current buffering support in VTGate + +VTGate currently supports buffering of queries during reparenting and resharding operations. This is done by buffering +the failing queries in the tablet gateway layer in vtgate. When a query fails, the reason for the failure is checked, to +see if is due to one of these. + +To assist in diagnosing the root cause a _KeyspaceEventWatcher_ (aka *KEW*) was introduced. This watches the +SrvKeyspace (in a goroutine): if there is a change to the keyspace partitions in the topo it is considered that there is +a resharding operation in progress. The buffering logic subscribes to the keyspace event watcher. + +Otherwise, if there are no tables to serve from, based on the health check results, it is assumed that there is a +cluster event where either the primary is being reparented or if the cluster is being restarted and all tablets are in +the process of starting up. + +If either of these occurs, the _consistent_ flag is set to false for that keyspace. When that happens the keyspace +watcher checks, on every SrvKeyspace update, if the event has got resolved. This can happen when tablets are now +available (in case of a cluster event) or if the partition information indicates that resharding is complete. + +When that happens. the keyspace event watcher publishes an event that the keyspace is now consistent. The buffers are +then drained and the queries retried by the tablet gateway. + +## Adding buffering support for MoveTables + +### Background + +MoveTables does not affect the entire keyspace, just the tables being moved. Even if all tables are being moved there is +no change in existing keyspace or shard configurations. So the KEW doesn't detect a cluster event since the tablets are +still available and shard partitions are unchanged. + +MoveTables moves tables from one keyspace to another. There are two flavors of MoveTables: one where the tables are +moved into all shards in the target keyspace. In Shard-By-Shard Migration user can specify a subset of shards to move +the tables into. + +These are the topo attributes that are affected during a MoveTables (regular or shard-by-shard): + +* *DeniedTables* in a shard's TabletControls. These are used to stop writes to the source keyspace for these tables. + While switching writes we first create these entries, wait for the target to catchup to the source (using gtid + positions), and then update the routing rules to point these tables to the target. When a primary sees a DeniedTables + entry during a DML it will error with an "enforce denied tables". +* *RoutingRules* (for regular movetables) and *ShardRoutingRules* (for shard by shard migration). Routing rules are + pointers for each table being moved to a keyspace. When a MoveTables is initiated, that keyspace is the source + keyspace. After traffic is switched the pointer is changed to point to the target keyspace. If routing rules are + specified, VTGate uses them to decide which keyspace to route each table. + +### Changes + +There are two main changes: + +* The keyspace event watcher is enhanced to look at the topo attributes mentioned above. An SrvVSchema watcher looks for + changes in the Routing Rules. DeniedTables are only in the Shard records in the topo. So any changes to the + DeniedTables would not result in a notification. To get around that we change the traffic switcher to also rebuild + SrvVSchema when DeniedTables are modified. +* The logic to start buffering needs to look for the "enforce denied tables" error that is thrown by the vttablets when + it tries to execute a query on a table being switched. +* We cannot use the current query retry logic which is at the tablet gateway level: meaning the keyspace is already + fixed by the planner and cannot be changed in that layer. We need to add a new retry logic at a higher level (the + _newExecute_ method) and always replan before retrying a query. This also means that we need to bypass the plan cache + while retrying. + + + diff --git a/doc/internal/Overview.md b/doc/internal/README.md similarity index 61% rename from doc/internal/Overview.md rename to doc/internal/README.md index e1cb74e4ddd..7ed4950e877 100644 --- a/doc/internal/Overview.md +++ b/doc/internal/README.md @@ -1,5 +1,7 @@ # Internal Documentation -The documents in this category document internal processes which are taken care of by the Vitess Team e.g. re-publishing the website [vitess.io](https://vitess.io) or creating a new release. +The documents in this category document internal processes which are taken care of by the Vitess Team e.g. creating a new release. We have put them here to increase transparency and make it easy for others to follow and improve processes. + +- [**Release**](./release/README.md) \ No newline at end of file diff --git a/doc/internal/.images/post-release-01.png b/doc/internal/release/.images/post-release-01.png similarity index 100% rename from doc/internal/.images/post-release-01.png rename to doc/internal/release/.images/post-release-01.png diff --git a/doc/internal/.images/release-01.png b/doc/internal/release/.images/release-01.png similarity index 100% rename from doc/internal/.images/release-01.png rename to doc/internal/release/.images/release-01.png diff --git a/doc/internal/.images/release-02.png b/doc/internal/release/.images/release-02.png similarity index 100% rename from doc/internal/.images/release-02.png rename to doc/internal/release/.images/release-02.png diff --git a/doc/internal/.images/release-03.png b/doc/internal/release/.images/release-03.png similarity index 100% rename from doc/internal/.images/release-03.png rename to doc/internal/release/.images/release-03.png diff --git a/doc/internal/.images/release-04.png b/doc/internal/release/.images/release-04.png similarity index 100% rename from doc/internal/.images/release-04.png rename to doc/internal/release/.images/release-04.png diff --git a/doc/internal/release/README.md b/doc/internal/release/README.md new file mode 100644 index 00000000000..8f593ee9e66 --- /dev/null +++ b/doc/internal/release/README.md @@ -0,0 +1,13 @@ +# Release Instructions + +This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases). + +### Summary + +- [How to Release](./how-to-release.md) +- [Versioning](./versioning.md) +- [Release Branches](./release-branches.md) +- [Release Tags](./release-tags.md) +- [Docker Images](./docker-images.md) +- [Java Packages](./java-packages.md) +- [End Of Life Process](./eol-process.md) diff --git a/doc/internal/release/docker-images.md b/doc/internal/release/docker-images.md new file mode 100644 index 00000000000..75941ca6309 --- /dev/null +++ b/doc/internal/release/docker-images.md @@ -0,0 +1,3 @@ +# Docker Images + +Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/). diff --git a/doc/internal/release/eol-process.md b/doc/internal/release/eol-process.md new file mode 100644 index 00000000000..f1d2a343d0f --- /dev/null +++ b/doc/internal/release/eol-process.md @@ -0,0 +1,12 @@ +# End-of-Life Process + +The lifespan of a major version is one year long, after that time, the version has reached its end-of-life. +To properly deprecate a major of Vitess follow the following steps: + +- **Update the website documentation** + > - In the ['Releases' documentation](https://vitess.io/docs/releases/), the EOL version must be moved under the ['Archived Releases' section](https://vitess.io/docs/releases/#archived-releases). + > - The sidebar of the website must be changed. We need to remove the EOL version from it. To do so, we move the version folder onto the `archive` folder. +- **Delete the `Backport To: ...` label** + > - Delete the corresponding label for the EOL version, we do not want to motivate anymore backport to the EOL release branch. +- **Make proper announcement on Slack** + > - Notify the community of this deprecation. \ No newline at end of file diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/release/how-to-release.md similarity index 65% rename from doc/internal/ReleaseInstructions.md rename to doc/internal/release/how-to-release.md index 6c659041607..450127bd869 100644 --- a/doc/internal/ReleaseInstructions.md +++ b/doc/internal/release/how-to-release.md @@ -1,155 +1,53 @@ -# Release Instructions +# Release Cutover -This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases). +In this section we describe our current release process. Below is a summary of this document. -### Summary +- [**Pre-requisite for the release team**](#pre-requisites) +- [**Overview**](#overview) +- [**Pre-Release**](#pre-release) +- [**Release**](#release) +- [**Post-Release**](#post-release) +- [**How To prepare the release of Vitess**](#how-to-prepare-the-release-of-vitess) +- [**How To Release Vitess**](#how-to-release-vitess) +- [**How To Code Freeze**](#how-to-code-freeze) +- [**How To Merge During Code Freeze**](#how-to-merge-during-code-freeze) +- [**Java Packages: Deploy & Release**](#java-packages-deploy--release) -- [Versioning](#versioning) -- [Release Branches](#release-branches) -- [Release Tags](#release-tags) -- [Docker Images](#docker-images) -- [Java Packages](#java-packages) -- [Release Cutover](#release-cutover) -------- +----- -## Versioning - -Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md). - -### Major Release (vX) - -A new major release is needed when the public API changes in a -backward-incompatible way -- for example, when removing deprecated interfaces. - -Our public API includes (but is not limited to): - -* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto). -* The interfaces exposed by the VTGate client library in each language. - -Care must also be taken when changing the format of any data stored by a live -system, such as topology data or Vitess-internal tables (used for sequences, -distributed transactions, etc.). Although this data is considered as internal to -Vitess, if any change breaks the upgrade path for a live system (for example, -requiring that it be shut down and reinitialized from scratch), then it must be -considered as a breaking change. - -### Minor Release (vX.Y) - -A new minor release indicates that functionality has been added or changed in a -backward-compatible way. This should be the majority of normal releases. - -### Patch Release (vX.Y.Z) - -A patch release indicates that only a select set of bugfixes have been -cherry-picked onto the associated minor release. The expectation is that -upgrading by a patch release should be painless (not requiring any config -changes) and safe (isolated from active development on `main`). - -### Pre-Release Labels (vX.Y.Z-labelN) - -Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`. - -------- - -## Release Branches - -Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named -`release-X.Y`. This branch should diverge from `main` when the release -is declared, after which point only bugfix PRs should be cherry-picked onto the branch. -All other activity on `main` will go out with a subsequent major or minor release. - -```shell -git checkout main -git pull --ff-only upstream main - -git checkout -b release-X.Y -git push upstream release-X.Y -``` - -The branches are named `release-X.Y` to distinguish them from point-in-time -tags, which are named `vX.Y.Z`. - -------- - -## Release Tags - -While the release branch is a moving target, release tags mark point-in-time -snapshots of the repository. Essentially, a tag assigns a human-readable name to -a specific Git commit hash. Although it's technically possible to reassign a tag -name to a different hash, we must never do this. - -------- - -## Docker Images - -Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/). - -------- - -## Java Packages - -We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess"). - -To do so, we use the http://oss.sonatype.org/ repository. -New packages must be uploaded there ("deployed") and will be automatically published ("released"). -Once they are released there, they will be automatically synchronized with Maven Central. -The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours. - -### Access to oss.sonatype.org - -[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa) -Then you must be added as member to our `io.vitess` namespace. -Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)). - -### One-time setup - -#### Set up GPG - -Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html). - -Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`. -for Mac you need to install 'gnupg' via 'brew install gnupg' - -#### Login configuration - -Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html). - -------- - -## Release Cutover - -In this section we describe our current release process. We begin with a list of [**pre-requisite for the release team**](#pre-requisites) and with a short [**overview**](#overview). -The release process is divided into three parts: [**Pre-Release**](#pre-release), [**Release**](#release), [**Post-Release**](#post-release), which are detailed after the overview. - -### Pre-Requisites +## Pre-Requisites This section highlights the different pre-requisites the release team has to meet before releasing. - The tool `gh` must be installed locally and ready to be used. -- You must have access to the Java release, more information in the [**Java Packages**](#java-packages) section. +- You must have access to the Java release, more information in the [**Java Packages**](./java-packages.md) section. - You must be able to create branches and have admin right on the `vitessio/vitess` and `planetscale/vitess-operator` repositories. -### Overview +----- + +## Overview -#### Schedule +### Schedule A new major version of Vitess is released every four months. For each major version there is at least one release candidate, which we release three weeks before the GA version. We usually create the RC1 during the first week of the month, and the GA version three weeks later. -#### Code Freeze +### Code Freeze Before creating RC1, there is a code freeze. Assuming the release of RC1 happens on a Tuesday, the release branch will be frozen Friday of the previous week. This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches. However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday) Regarding patch releases, no code freeze is planned. -#### Tracking Issue for each Release +### Tracking Issue for each Release For each release, it is recommended to create an issue like [this one](https://github.com/vitessio/vitess/issues/10476) to track the current and past progress of a release. It also allows us to document what happened during a release. -### Pre-Release +----- + +## Pre-Release This step happens a few weeks before the actual release (whether it is an RC, GA or a patch release). The main goal of this step is to make sure everything is ready to be released for the release day. @@ -158,11 +56,13 @@ That includes: > - All the Pull Requests that need to be in the release must be reviewed and merged before the code freeze. > - The code freeze usually happens a few days before the release. - **Making sure the people doing the release have access to all the tools and infrastructure needed to do the release.** - > - This includes write access to the Vitess repository and to the Maven repository. + > - This includes write access to the Vitess repository and to the Maven repository. - **Preparing and cleaning the release notes summary.** + > - If the release does not contain significant changes (i.e. a small patch release) then this step can be skipped > - One or more Pull Requests have to be submitted in advance to create and update the release summary. - > - The summary files are located in: `./doc/releasenotes/*_*_*_summary.md`. + > - The summary files are located in: `./changelog/*.0/*.*.*/summary.md`. > - The summary file for a release candidate is the same as the one for the GA release. + > - Make sure to run `go run ./go/tools/releases/releases.go` to update the `changelog` directory with the latest release notes. - **Finishing the blog post, and coordinating with the different organizations for cross-posting. Usually CNCF and PlanetScale. This step applies only for GA releases.** > - The blog post must be finished and reviewed. > - A Pull Request on the website repository of Vitess has to be created so we can easily publish the blog during the release day. @@ -178,13 +78,28 @@ That includes: > - While the Vitess Operator is located in a different repository, we also need to do a release for it. > - The Operator follows the same cycle: RC1 -> GA -> Patches. > - Documentation for the pre-release of the Vitess Operator is available [here](https://github.com/planetscale/vitess-operator/blob/main/docs/release-process.md#prepare-for-release). +- **Update the website documentation.** + > - We want to open a preparatory **draft** Pull Request to update the documentation. + > - There are several pages we want to update: + > - [The releases page](https://vitess.io/docs/releases/): we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine. + > - [The local install page](https://vitess.io/docs/get-started/local/): we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged. + > - If we are doing a GA or RC release follow the instructions below: + > - There are two scripts in the website repository in `./tools/{ga|rc}_release.sh`, use them to update the website documentation. The scripts automate: + > - For an RC, we need to create a new entry in the sidebar which represents the next version on `main` and mark the version we are releasing as RC. + > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development". + +----- -### Release +## Release On the release day, there are several things to do: +- **Merge the Release Pull Request.** + > - During the code freeze, we created a Release Pull Request. It must be merged. - **Tag the Vitess release.** > - A guide on how to tag a version is available in the [How To Release Vitess](#how-to-release-vitess) section. +- **Update the release notes on `main`.** + > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request. - **Create the corresponding Vitess operator release.** > - Applies only to versions greater or equal to `v14.0.0`. > - If we are doing an RC release, then we will need to create the Vitess Operator RC too. If we are doing a GA release, we're also doing a GA release in the Operator. @@ -193,10 +108,9 @@ On the release day, there are several things to do: > - Applies only to GA releases. > - This step is explained in the [Java Packages: Deploy & Release](#java-packages-deploy--release) section. - **Update the website documentation repository.** - > - Applies only to GA and RC releases. - > - There are two scripts in the website repository in `./tools/{ga|rc}_release.sh`, use them to update the website documentation. The scripts automate: - > - For an RC, we need to create a new version in the sidebar and mark the current version as RC. - > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development". + > - Review the Website Pull Request that was opened during the Pre-Release. + > - The git SHA used in [the local install page](https://vitess.io/docs/get-started/local/) should be updated with the new proper SHA for this release. + > - Merge the Pull Request. - **Publish the blog post on the Vitess website.** > - Applies only to GA releases. > - The corresponding Pull Request was created beforehand during the pre-release. Merge it. @@ -206,21 +120,23 @@ On the release day, there are several things to do: > - After a while, those elements will finish their execution and their status will be green. > - This step is even more important for GA releases as we often include a link to _arewefastyet_ in the blog post. > - The benchmarks need to complete before announcing the blog posts or before they get cross-posted. -- **Update the release notes on `main`.** - > - One Pull Request against `main` must be created, it will contain the new release notes. - **Go back to dev mode on the release branch.** - > - The version constants across the codebase must be updated to `SNAPSHOT`. -- **Build k8s Docker images and publish them** + > - The version constants across the codebase must be updated to `SNAPSHOT`. +- **Build k8s Docker images and publish them.** > - The docker image for `base`, `lite`, etc are built automatically by DockerHub. The k8s images however are dependent on these images and are required to be built manually. > - These images should be built after the `base` image has been built and available on DockerHub. - > - To build and publish these images, run `./release.sh` from the directory `vitess/docker`. + > - To build and publish these images, checkout the new release tag that was just created and run `./release.sh` from the directory `./docker`. -### Post-Release +----- + +## Post-Release Once the release is over, we need to announce it on both Slack and Twitter. We also want to make sure the blog post was cross-posted, if applicable. We need to verify that _arewefastyet_ has finished the benchmark too. -### How to prepare the release of Vitess +----- + +## How to prepare the release of Vitess > In this example our current version is `v14.0.3` and we release the version `v15.0.0`. > Alongside Vitess' release, we also release a new version of the operator. @@ -239,8 +155,8 @@ We need to verify that _arewefastyet_ has finished the benchmark too. ``` 2. Creation of the Release Pull Request. - > This step will create the Release Pull Request that will then be reviewed ahead of the release day. - > The merge commit of that Pull Request will be used during the release day to tag the release. + > This step will create the Release Pull Request that will then be reviewed ahead of the release day. + > The merge commit of that Pull Request will be used during the release day to tag the release. 1. Run the `create_release` script using the Makefile: 1. Release Candidate: ```shell @@ -253,17 +169,18 @@ We need to verify that _arewefastyet_ has finished the benchmark too. The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step. - 2. Run the following command to generate the release notes: - 1. Release Candidate: - ```shell - make VERSION="v15.0.0-rc1" FROM="v14.0.3" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes - ``` - 2. General Availability: - ```shell - make VERSION="v15.0.0-rc1" FROM="v14.0.3" TO="HEAD" SUMMARY="./doc/releasenotes/15_0_0_summary.md" release-notes - ``` + 2. Run the following command to generate the release notes. Note that you can omit the `--summary` flag if there are no summary. + ```shell + go run ./go/tools/release-notes --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" + ``` + + > Make sure to also run `go run ./go/tools/releases/releases.go` to update the `./changelog` directory. + + > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit. + In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default). + This command will generate the release notes by looking at all the commits between the tag `v14.0.3` and the reference `HEAD`. - It will also use the file located in `./doc/releasenotes/15_0_0_summary.md` to prefix the release notes with a text that the maintainers wrote before the release. + It will also use the file located in `./changelog/15.0/15.0.0/summary.md` to prefix the release notes with a text that the maintainers wrote before the release. Please verify the generated release notes to make sure it is well-formatted and all the bookmarks are generated properly. @@ -271,25 +188,27 @@ We need to verify that _arewefastyet_ has finished the benchmark too. 4. If we are doing an RC release it means we created a new branch from `main`. We need to update `main` with the next SNAPSHOT version. If `main` was on `15.0.0-SNAPSHOT`, we need to update it to `16.0.0-SNAPSHOT`. A simple find and replace in the IDE is sufficient, there only a handful of files that must be changed: `version.go` and several java files. -### How To Release Vitess +----- + +## How To Release Vitess This section is divided into two parts: - [Creation of the tags and release notes](#creation-of-the-tags-and-release-notes). - [Creating Release or Release Candidate on the GitHub UI](#creating-release-or-release-candidate-on-the-github-ui) -#### Creation of the tags and release notes +### Creation of the tags and release notes > This step implies that you have created a [Release Pull Request](#how-to-prepare-the-release-of-vitess) beforehand and that it has been reviewed. > The merge commit of this Release Pull Request will be used to tag the release. -> +> > In this example our current version is `v14.0.3` and we release the version `v15.0.0`. > Alongside Vitess' release, we also release a new version of the operator. > Since we are releasing a release candidate here, the new version of the operator will also be a release candidate. > In this example, the new operator version is `2.8.0`. -> +> > It is important to note that before the RC, there is a code freeze during which we create the release branch. > > The release branch in this example is `release-15.0`. -> +> > The example also assumes that `origin` is the `vitessio/vitess` remote. 1. Fetch `github.com/vitessio/vitess`'s remote. @@ -304,33 +223,33 @@ This section is divided into two parts: git tag v15.0.0 && git tag v0.15.0 && git push origin v15.0.0 && git push origin v0.15.0 ``` -4. Create a Pull Request against the `main` branch with the release notes found in `doc/releasenotes/15_0_0_*.md`. +4. Create a Pull Request against the `main` branch with the release notes found in `./changelog/15.0/15.0.0/15_0_0_*.md`. 5. Run the back to dev mode tool. ```shell make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" back_to_dev_mode ``` > You will then need to follow the instructions given by the output of the back_to_dev_mode Makefile command. You will need to push the newly created branch and open a Pull Request. - + 6. Release the tag on GitHub UI as explained in the following section. -#### Creating Release or Release Candidate on the GitHub UI +### Creating Release or Release Candidate on the GitHub UI > In the below steps, we use `v8.0.0` and `v9.0.0` as an example. -##### 1. Open the releases page +#### 1. Open the releases page On Vitess' GitHub repository main page, click on Code -> [Releases](https://github.com/vitessio/vitess/releases). ![alt text](.images/release-01.png) -##### 2. Draft a new release +#### 2. Draft a new release On the Releases page, click on `Draft a new release`. ![alt text](.images/release-02.png) -##### 3. Tag a new release +#### 3. Tag a new release When drafting a new release, we are asked to choose the release's tag and branch. We format the tag this way: `v9.0.0`. We append `-rcN` to the tag name for release candidates, @@ -338,7 +257,7 @@ with `N` being the increment of the release candidate. ![alt text](.images/release-03.png) -##### 4. Add release notes and release +#### 4. Add release notes and release Copy/paste the previously built Release Notes into the description of the release. @@ -348,7 +267,9 @@ And finally, click on `Publish release`. ![alt text](.images/release-04.png) -### How To Code Freeze +----- + +## How To Code Freeze In this example we are going to do a code freeze on the `release-15.0` branch. If we are doing a release candidate, there won't be a branch yet, hence we need to create it. @@ -376,10 +297,14 @@ Finally, let's run the code freeze script: The script will prompt the command that will allow you to push the code freeze change. Once pushed, open a PR that will be merged on `release-15.0`. -### How To Merge During Code Freeze +Remember, you should also disable the Launchable integration from the newly created release branch. + +----- + +## How To Merge During Code Freeze > **Warning:** It is not advised to merge a PR during code-freeze. If it is deemed absolutely necessary, then the following steps can be followed. - + The PR that needs to be merged will be failing on the `Code Freeze` CI. To merge this PR, we'll have to mark this CI action as not required. You will need administrator privileges on the vitess repository to be able to make this change. @@ -390,15 +315,17 @@ You will need administrator privileges on the vitess repository to be able to ma 5. Within this list find `Code Freeze` and click on the cross next to it to remove it from this list. 6. Save your changes on the bottom of the page. 7. Refresh the page of the PR, and you should be able to merge it. -8. After merging the PR, you need to do 2 more things - - 1. Add `Code Freeze` back as a required check. - 2. Check if the release PR has any merge conflicts. If it does, fix them and push. +8. After merging the PR, you need to do 2 more things - + 1. Add `Code Freeze` back as a required check. + 2. Check if the release PR has any merge conflicts. If it does, fix them and push. + +----- -### Java Packages: Deploy & Release +## Java Packages: Deploy & Release > **Warning:** This section's steps need to be executed only when releasing a new major version of Vitess, > or if the Java packages changed from one minor/patch version to another. -> +> > For this example, we assume we juste released `v12.0.0`. 1. Checkout to the release commit. @@ -406,7 +333,7 @@ You will need administrator privileges on the vitess repository to be able to ma git checkout v12.0.0 ``` -2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key. +2. Run `gpg-agent` to avoid that Maven will constantly prompt you for the password of your private key. Note that this can print error messages that can be ignored on Mac. ```bash eval $(gpg-agent --daemon --no-grab --write-env-file $HOME/.gpg-agent-info) @@ -422,10 +349,12 @@ You will need administrator privileges on the vitess repository to be able to ma 4. Deploy (upload) the Java code to the oss.sonatype.org repository: - > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).

+ > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).

```bash + cd ./java/ mvn clean deploy -P release -DskipTests cd .. ``` + 5. It will take some time for artifacts to appear on [maven directory](https://mvnrepository.com/artifact/io.vitess/vitess-client) diff --git a/doc/internal/release/java-packages.md b/doc/internal/release/java-packages.md new file mode 100644 index 00000000000..3b3d2a38472 --- /dev/null +++ b/doc/internal/release/java-packages.md @@ -0,0 +1,27 @@ +# Java Packages + +We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess"). + +To do so, we use the http://oss.sonatype.org/ repository. +New packages must be uploaded there ("deployed") and will be automatically published ("released"). +Once they are released there, they will be automatically synchronized with Maven Central. +The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours. + +## Access to oss.sonatype.org + +[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa) +Then you must be added as member to our `io.vitess` namespace. +Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)). + +## One-time setup + +### Set up GPG + +Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html). + +Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`. +for Mac you need to install 'gnupg' via 'brew install gnupg' + +### Login configuration + +Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html). diff --git a/doc/internal/release/release-branches.md b/doc/internal/release/release-branches.md new file mode 100644 index 00000000000..876ec9070d3 --- /dev/null +++ b/doc/internal/release/release-branches.md @@ -0,0 +1,17 @@ +# Release Branches + +Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named +`release-X.Y`. This branch should diverge from `main` when the release +is declared, after which point only bugfix PRs should be cherry-picked onto the branch. +All other activity on `main` will go out with a subsequent major or minor release. + +```shell +git checkout main +git pull --ff-only upstream main + +git checkout -b release-X.Y +git push upstream release-X.Y +``` + +The branches are named `release-X.Y` to distinguish them from point-in-time +tags, which are named `vX.Y.Z`. \ No newline at end of file diff --git a/doc/internal/release/release-tags.md b/doc/internal/release/release-tags.md new file mode 100644 index 00000000000..4136df1bbb9 --- /dev/null +++ b/doc/internal/release/release-tags.md @@ -0,0 +1,6 @@ +# Release Tags + +While the release branch is a moving target, release tags mark point-in-time +snapshots of the repository. Essentially, a tag assigns a human-readable name to +a specific Git commit hash. Although it's technically possible to reassign a tag +name to a different hash, we must never do this. \ No newline at end of file diff --git a/doc/internal/release/versioning.md b/doc/internal/release/versioning.md new file mode 100644 index 00000000000..b760e32d1b5 --- /dev/null +++ b/doc/internal/release/versioning.md @@ -0,0 +1,36 @@ +# Versioning + +Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md). + +## Major Release (vX) + +A new major release is needed when the public API changes in a +backward-incompatible way -- for example, when removing deprecated interfaces. + +Our public API includes (but is not limited to): + +* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto). +* The interfaces exposed by the VTGate client library in each language. + +Care must also be taken when changing the format of any data stored by a live +system, such as topology data or Vitess-internal tables (used for sequences, +distributed transactions, etc.). Although this data is considered as internal to +Vitess, if any change breaks the upgrade path for a live system (for example, +requiring that it be shut down and reinitialized from scratch), then it must be +considered as a breaking change. + +## Minor Release (vX.Y) + +A new minor release indicates that functionality has been added or changed in a +backward-compatible way. This should be the majority of normal releases. + +## Patch Release (vX.Y.Z) + +A patch release indicates that only a select set of bugfixes have been +cherry-picked onto the associated minor release. The expectation is that +upgrading by a patch release should be painless (not requiring any config +changes) and safe (isolated from active development on `main`). + +## Pre-Release Labels (vX.Y.Z-labelN) + +Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`. \ No newline at end of file diff --git a/doc/releasenotes/17_0_0_summary.md b/doc/releasenotes/17_0_0_summary.md deleted file mode 100644 index a45bccb19ca..00000000000 --- a/doc/releasenotes/17_0_0_summary.md +++ /dev/null @@ -1,208 +0,0 @@ -## Summary - -### Table of Contents - -- **[Major Changes](#major-changes)** - - **[Breaking Changes](#breaking-changes)** - - [Dedicated stats for VTGate Prepare operations](#dedicated-vtgate-prepare-stats) - - **[New command line flags and behavior](#new-flag)** - - [Builtin backup: read buffering flags](#builtin-backup-read-buffering-flags) - - **[New stats](#new-stats)** - - [Detailed backup and restore stats](#detailed-backup-and-restore-stats) - - [VTtablet Error count with code ](#vttablet-error-count-with-code) - - **[Deprecations and Deletions](#deprecations-and-deletions)** - - [Deprecated Stats](#deprecated-stats) - -##
Major Changes - -### Breaking Changes - -#### Default TLS version changed for `vtgr` - -When using TLS with `vtgr`, we now default to TLS 1.2 if no other explicit version is configured. Configuration flags are provided to explicitly configure the minimum TLS version to be used. - -#### Dedicated stats for VTGate Prepare operations - -Prior to v17 Vitess incorrectly combined stats for VTGate Execute and Prepare operations under a single stats key (`Execute`). In v17 Execute and Prepare operations generate stats under independent stats keys. - -Here is a (condensed) example of stats output: - -``` -{ - "VtgateApi": { - "Histograms": { - "Execute.src.primary": { - "500000": 5 - }, - "Prepare.src.primary": { - "100000000": 0 - } - } - }, - "VtgateApiErrorCounts": { - "Execute.src.primary.INVALID_ARGUMENT": 3, - "Execute.src.primary.ALREADY_EXISTS": 1 - } -} -``` - -### New command line flags and behavior - -#### Backup --builtinbackup-file-read-buffer-size and --builtinbackup-file-write-buffer-size - -Prior to v17 the builtin Backup Engine does not use read buffering for restores, and for backups uses a hardcoded write buffer size of 2097152 bytes. - -In v17 these defaults may be tuned with, respectively `--builtinbackup-file-read-buffer-size` and `--builtinbackup-file-write-buffer-size`. - - - `--builtinbackup-file-read-buffer-size`: read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. - - `--builtinbackup-file-write-buffer-size`: write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) - -These flags are applicable to the following programs: - - - `vtbackup` - - `vtctld` - - `vttablet` - - `vttestserver` - -### New stats - -#### Detailed backup and restore stats - -##### Backup metrics - -Metrics related to backup operations are available in both Vtbackup and VTTablet. - -**BackupBytes, BackupCount, BackupDurationNanoseconds** - -Depending on the Backup Engine and Backup Storage in-use, a backup may be a complex pipeline of operations, including but not limited to: - - * Reading files from disk. - * Compressing files. - * Uploading compress files to cloud object storage. - -These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. - -##### Restore metrics - -Metrics related to restore operations are available in both Vtbackup and VTTablet. - -**RestoreBytes, RestoreCount, RestoreDurationNanoseconds** - -Depending on the Backup Engine and Backup Storage in-use, a restore may be a complex pipeline of operations, including but not limited to: - - * Downloading compressed files from cloud object storage. - * Decompressing files. - * Writing decompressed files to disk. - -These operations are counted and timed, and the number of bytes consumed or produced by each stage of the pipeline are counted as well. - -##### Vtbackup metrics - -Vtbackup exports some metrics which are not available elsewhere. - -**DurationByPhaseSeconds** - -Vtbackup fetches the last backup, restores it to an empty mysql installation, replicates recent changes into that installation, and then takes a backup of that installation. - -_DurationByPhaseSeconds_ exports timings for these individual phases. - -##### Example - -**A snippet of vtbackup metrics after running it against the local example after creating the initial cluster** - -(Processed with `jq` for readability.) - -``` -{ - "BackupBytes": { - "BackupEngine.Builtin.Source:Read": 4777, - "BackupEngine.Builtin.Compressor:Write": 4616, - "BackupEngine.Builtin.Destination:Write": 162, - "BackupStorage.File.File:Write": 163 - }, - "BackupCount": { - "-.-.Backup": 1, - "BackupEngine.Builtin.Source:Open": 161, - "BackupEngine.Builtin.Source:Close": 322, - "BackupEngine.Builtin.Compressor:Close": 161, - "BackupEngine.Builtin.Destination:Open": 161, - "BackupEngine.Builtin.Destination:Close": 322 - }, - "BackupDurationNanoseconds": { - "-.-.Backup": 4188508542, - "BackupEngine.Builtin.Source:Open": 10649832, - "BackupEngine.Builtin.Source:Read": 55901067, - "BackupEngine.Builtin.Source:Close": 960826, - "BackupEngine.Builtin.Compressor:Write": 278358826, - "BackupEngine.Builtin.Compressor:Close": 79358372, - "BackupEngine.Builtin.Destination:Open": 16456627, - "BackupEngine.Builtin.Destination:Write": 11021043, - "BackupEngine.Builtin.Destination:Close": 17144630, - "BackupStorage.File.File:Write": 10743169 - }, - "DurationByPhaseSeconds": { - "InitMySQLd": 2, - "RestoreLastBackup": 6, - "CatchUpReplication": 1, - "TakeNewBackup": 4 - }, - "RestoreBytes": { - "BackupEngine.Builtin.Source:Read": 1095, - "BackupEngine.Builtin.Decompressor:Read": 950, - "BackupEngine.Builtin.Destination:Write": 209, - "BackupStorage.File.File:Read": 1113 - }, - "RestoreCount": { - "-.-.Restore": 1, - "BackupEngine.Builtin.Source:Open": 161, - "BackupEngine.Builtin.Source:Close": 322, - "BackupEngine.Builtin.Decompressor:Close": 161, - "BackupEngine.Builtin.Destination:Open": 161, - "BackupEngine.Builtin.Destination:Close": 322 - }, - "RestoreDurationNanoseconds": { - "-.-.Restore": 6204765541, - "BackupEngine.Builtin.Source:Open": 10542539, - "BackupEngine.Builtin.Source:Read": 104658370, - "BackupEngine.Builtin.Source:Close": 773038, - "BackupEngine.Builtin.Decompressor:Read": 165692120, - "BackupEngine.Builtin.Decompressor:Close": 51040, - "BackupEngine.Builtin.Destination:Open": 22715122, - "BackupEngine.Builtin.Destination:Write": 41679581, - "BackupEngine.Builtin.Destination:Close": 26954624, - "BackupStorage.File.File:Read": 102416075 - }, - "backup_duration_seconds": 4, - "restore_duration_seconds": 6 -} -``` - -Some notes to help understand these metrics: - - * `BackupBytes["BackupStorage.File.File:Write"]` measures how many bytes were read from disk by the `file` Backup Storage implementation during the backup phase. - * `DurationByPhaseSeconds["CatchUpReplication"]` measures how long it took to catch-up replication after the restore phase. - * `DurationByPhaseSeconds["RestoreLastBackup"]` measures to the duration of the restore phase. - * `RestoreDurationNanoseconds["-.-.Restore"]` also measures to the duration of the restore phase. - -#### VTTablet error count with error code - -##### VTTablet Error Count - -We are introducing new error counter `QueryErrorCountsWithCode` for VTTablet. It is similar to existing [QueryErrorCounts](https://github.com/vitessio/vitess/blob/main/go/vt/vttablet/tabletserver/query_engine.go#L174) except it contains errorCode as additional dimension. -We will deprecate `QueryErrorCounts` in v18. - -## Deprecations and Deletions - -* The deprecated `automation` and `automationservice` protobuf definitions and associated client and server packages have been removed. -* Auto-population of DDL revert actions and tables at execution-time has been removed. This is now handled entirely at enqueue-time. -* Backwards-compatibility for failed migrations without a `completed_timestamp` has been removed (see https://github.com/vitessio/vitess/issues/8499). -* The deprecated `Key`, `Name`, `Up`, and `TabletExternallyReparentedTimestamp` fields were removed from the JSON representation of `TabletHealth` structures. - -### Deprecated Stats - -These stats are deprecated in v17. - -| Deprecated stat | Supported alternatives | -|-|-| -| `backup_duration_seconds` | `BackupDurationNanoseconds` | -| `restore_duration_seconds` | `RestoreDurationNanoseconds` | \ No newline at end of file diff --git a/doc/viper/viper.md b/doc/viper/viper.md new file mode 100644 index 00000000000..f9050ef842c --- /dev/null +++ b/doc/viper/viper.md @@ -0,0 +1,324 @@ +# Vitess Viper Guidelines + +## What is Viper? + +[`viper`][viper] is a configuration-management library for Go programs. +It acts as a registry for configuration values coming from a variety of sources, including: + +- Default values. +- Configuration files (JSON, YAML, TOML, and other formats supported), including optionally watching and live-reloading. +- Environment variables. +- Command-line flags, primarily from `pflag.Flag` types. + +It is used by a wide variety of Go projects, including [hugo][hugo] and [kops][kops]. + +## "Normal" Usage + +Normally, and if you were to follow the examples on the viper documentation, you "just" load in a config file, maybe bind some flags, and then load values all across your codebase, like so: + +```go +// cmd/main.go +package main + +import ( + "log" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "example.com/pkg/stuff" +) + +func main() { + pflag.String("name", "", "name to print") + pflag.Parse() + + viper.AddConfigPath(".") + viper.AddConfigPath("/var/mypkg") + + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + log.Fatal(err) + } + } + + viper.BindPFlags(pflag.CommandLine) + viper.BindEnv("name", "MY_COOL_ENVVAR") + + stuff.Do() +} + +// pkg/stuff/do_stuff.go +package stuff + +import ( + "fmt" + + "github.com/spf13/viper" +) + +func Do() { + fmt.Println(viper.GetString("name")) +} +``` + +While this example is great for getting started with `viper` quickly — it is very easy, and very fast, to _write_ go from nothing to working code — it's not likely to scale well for a codebase the size and complexity of Vitess, for several reasons: + +1. Everything is globally-accessible. + + Currently, most of the config values in Vitess modules are un-exported (and we un-exported more of these during the `pflag` migration). + This is a good thing, as it gives each module control over how its configuration values are used, rather than allowing the raw values to leak across package boundaries. + +1. [Magical][a_theory_of_modern_go] access and lack of compile-time safety. + + In the above example, `package stuff` just "happens to know" that (1) `package main` binds a value to `viper` with the key `"name"` and (2) `"name"` is going to be bound to a `string` value specifically. + + If `package main` ever changes either of these two facts, `package stuff` is going to break (precisely how it breaks depends on what the changes are), and there's no way to catch this _before_ runtime without writing additional linters. (Note this is strongly-related to point 1 above). + +1. Hard to document. + + `viper` does not provide any sort of automatic documentation-generation code (we'll discuss this more later), so we will need to write our own tooling if we want to update our flag documentation with information like "this flag is also settable via this config key and these environment variables". + + If anyone anywhere can just magically try to read a value from the global registry without first declaring (1) that a config value with that key _should_ exist; (2) what flags, aliases, environment variables it reads from; and (3) what type it is, then writing that tooling is going to be vastly more complicated, and possibly impossible to do correctly. + +So, we use an approach that requires a bit more verbosity up-front to mitigate these drawbacks to the simpler approach. + +## Our Approach + +Instead of relying on the global `viper.Viper` singleton, we use a shim layer introduced in `package viperutil` to configure values in a standardized way across the entire Vitess codebase. + +This function, `Configure`, then returns a value object, with a `Get` method that returns the actual value from the viper registry. +Packages may then choose to export their config values, or not, as they see fit for their API consumers. + +### `Configure` Options + +In order to properly configure a value for use, `Configure` needs to know, broadly speaking, three things: + +1. The key name being bound. +1. What "things" it should be bound to (i.e. other keys via aliases, environment variables, and flag names), as well as if it has a default value. +1. How to `Get` it out of a viper. + +`Configure`, therefore, has the following signature: + +```go +func Configure[T any](key string, options Options[T]) Value[T] +``` + +The first parameter provides the key name (point 1 of our above list); all other information is provided via various `Options` fields, which looks like: + +```go +type Options[T any] struct { + // what "things" to bind to + Aliases []string + FlagName string + EnvVars []string + + // default, if any + Default T + + // whether it can reload or not (more on this later) + Dynamic bool + + // how to "get" it from a viper (more on this slightly less later) + GetFunc func(v *viper.Viper) func(key string) T +} +``` + +### `Get` funcs + +In most cases, module authors will not need to specify a `GetFunc` option, since, if not provided, `viperutil` will do its best to provide a sensible default for the given type `T`. + +This requires a fair amount of `reflect`ion code, which we won't go into here, and unfortunately cannot support even all primitive types (notably, array (not slice!!) types). +In these cases, the `GetFuncForType` will panic, allowing the module author to catch this during testing of their package. +They may then provide their own `GetFunc`. + +Authors may also want to provide their own `GetFunc` to provide additional logic to load a value even for types supported by `GetFuncForType` (for example, post-processing a string to ensure it is always lowercase). + +The full suite of types, both supported and panic-inducing, are documented by way of unit tests in [`go/viperutil/get_func_test.go`](../../go/viperutil/get_func_test.go). + +### Dynamic values + +Values can be configured to be either static or dynamic. +Static values are loaded once at startup (more precisely, when `viperutil.LoadConfig` is called), and whatever value is loaded at the point will be the result of calling `Get` on that value for the remainder of the process's lifetime. +Dynamic values, conversely, may respond to config changes. + +In order for dynamic configs to be truly dynamic, `LoadConfig` must have found a config file (as opposed to pulling values entirely from defaults, flags, and environment variables). +If this is the case, a second viper shim, which backs the dynamic registry, will start a watch on that file, and any changes to that file will be reflected in the `Get` methods of any values configured with `Dynamic: true`. + +**An important caveat** is that viper on its own is not threadsafe, meaning that if a config reload is being processed while a value is being accessed, a race condition can occur. +To protect against this, the dynamic registry uses a second shim, [`sync.Viper`](../../go/viperutil/internal/sync/sync.go). +This works by assigning each dynamic value its own `sync.RWMutex`, and locking it for writes whenever a config change is detected. Value `GetFunc`s are then adapted to wrap the underlying get in a `m.RLock(); defer m.RUnlock()` layer. +This means that there's a potential throughput impact of using dynamic values, which module authors should be aware of when deciding to make a given value dynamic. + +### A brief aside on flags + +In the name of "we will catch as many mistakes as possible in tests" ("mistakes" here referring to typos in flag names, deleting a flag in one place but forgetting to clean up another reference, and so on), `Values` will panic at bind-time if they are configured to bind to a flag name that does not exist. +Then, **as long as every binary is at least invoked** (including just `mycmd --help`) in an end-to-end test, our CI will fail if we ever misconfigure a value in this way. + +However, since `Configure` handles the binding of defaults, aliases, and envirnomnent variables, and is usually called in `var` blocks, this binding can actually happen before the module registers its flags via the `servenv.{OnParse,OnParseFor}` hooks. +If we were to also bind any named flags at the point of `Configure`, this would cause panics even if the module later registered a flag with that name. +Therefore, we introduce a separate function, namely `viperutil.BindFlags`, which binds the flags on one or more values, which modules can call _after_ registering their flags, usually in the same `OnParse` hook function. +For example: + +```go +package trace + +import ( + "github.com/spf13/pflag" + + "vitess.io/vitess/go/viperutil" + "vitess.io/vitess/go/vt/servenv" +) + +var ( + configKey = viperutil.KeyPrefixFunc("trace") + + tracingServer = viperutil.Configure( + configKey("service"), + viperutil.Options[string]{ + Default: "noop", + FlagName: "tracer", + }, + ) + enableLogging = viperutil.Configure( + configKey("enable-logging"), + viperutil.Options[bool]{ + FlagName: "tracing-enable-logging", + }, + ) +) + +func RegisterFlags(fs *pflag.FlagSet) { + fs.String("tracer", tracingServer.Default(), "tracing service to use") + fs.Bool("tracing-enable-logging", false, "whether to enable logging in the tracing service") + + viperutil.BindFlags(fs, tracingServer, enableLogging) +} + +func init() { + servenv.OnParse(RegisterFlags) +} +``` + +## Config Files + +`viperutil` provides a few flags that allow binaries to read values from config files in addition to defaults, environment variables and flags. +They are: + +- `--config-path` + - Default: `$(pwd)` + - EnvVar: `VT_CONFIG_PATH` (parsed exactly like a `$PATH` style shell variable). + - FlagType: `StringSlice` + - Behavior: Paths for `ReadInConfig` to search. +- `--config-type` + - Default: `""` + - EnvVar: `VT_CONFIG_TYPE` + - FlagType: `flagutil.StringEnum` + - Values: everything contained in `viper.SupportedExts`, case-insensitive. + - Behavior: Force viper to use a particular unmarshalling strategy; required if the config file does not have an extension (by default, viper infers the config type from the file extension). +- `--config-name` + - Default: `"vtconfig"` + - EnvVar: `VT_CONFIG_NAME` + - FlagType: `string` + - Behavior: Instructs `ReadInConfig` to only look in `ConfigPaths` for files named with this name (with any supported extension, unless `ConfigType` is also set, in which case only with that extension). +- `--config-file` + - Default: `""` + - EnvVar: `VT_CONFIG_FILE` + - FlagType: `string` + - Behavior: Instructs `ReadInConfig` to search in `ConfigPaths` for explicitly a file with this name. Takes precedence over `ConfigName`. +- `--config-file-not-found-handling` + - Default: `WarnOnConfigFileNotFound` + - EnvVar: (none) + - FlagType: `string` (options: `IgnoreConfigFileNotFound`, `WarnOnConfigFileNotFound`, `ErrorOnConfigFileNotFound`, `ExitOnConfigFileNotFound`) + - Behavior: If viper is unable to locate a config file (based on the other flags here), then `LoadConfig` will: + - `Ignore` => do nothing, return no error. Program values will come entirely from defaults, environment variables and flags. + - `Warn` => log at the WARNING level, but return no error. + - `Error` => log at the ERROR level and return the error back to the caller (usually `servenv`.) + - `Exit` => log at the FATAL level, exiting immediately. +- `--config-persistence-min-interval` + - Default: `1s` + - EnvVar: `VT_CONFIG_PERSISTENCE_MIN_INTERVAL` + - FlagType: `time.Duration` + - Behavior: If viper is watching a config file, in order to synchronize between changes to the file, and changes made in-memory to dynamic values (for example, via vtgate's `/debug/env` endpoint), it will periodically write in-memory changes back to disk, waiting _at least_ this long between writes. + If the value is 0, each in-memory `Set` is immediately followed by a write to disk. + +For more information on how viper searches for config files, see the [documentation][viper_read_in_config_docs]. + +If viper was able to locate and load a config file, `LoadConfig` will then configure the dynamic registry to set up a watch on that file, enabling all dynamic values to pick up changes to that file for the remainder of the program's execution. +If no config file was used, then dynamic values behave exactly like static values (i.e. the dynamic registry copies in the settings loaded into the static registry, but does not set up a file watch). + +### Re-persistence for Dynamic Values + +Prior to the introduction of viper in Vitess, certain components (such as `vttablet` or `vtgate`) exposed `/debug/env` HTTP endpoints that permitted the user to modify certain configuration parameters at runtime. + +This behavior is still supported, and to maintain consistency between update mechanisms, if: +- A config file was loaded at startup +- A value is configured with the `Dynamic: true` option + +then in-memory updates to that value (via `.Set()`) will be written back to disk. +If we skipped this step, then the next time viper reloaded the disk config, the in-memory change would be undone, since viper does a full load rather than something more differential. +Unfortunately, this seems unavoidable. + +To migitate against potentially writing to disk "too often" for a given user, the `--config-persistence-min-interval` flag defines the _minimum_ time to wait between writes. +Internally, the system is notified to write "soon" only when a dynamic value is updated. +If the wait period has elapsed between changes, a write happens immediately; otherwise, the system waits out the remainder of the period and persists any changes that happened while it was waiting. +Setting this interval to zero means that writes happen immediately. + +## Auto-Documentation + +One of the benefits of all values being created through a single function is that we can pretty easily build tooling to generate documentation for the config values available to a given binary. +The exact formatting can be tweaked, obviously, but as an example, something like: + +``` +{{ .BinaryName }} + +{{ range .Values }} +{{ .Key }}: + - Aliases: {{ join .Aliases ", " }} + - Environment Variables: {{ join .EnvVars ", " }} + {{- if hasFlag $.BinaryName .FlagName }} + - Flag: {{ .FlagName }} + {{ end -}} + {{- if hasDefault . }} + - Default: {{ .Default }} + {{ end -}} +{{ end }} +``` + +If/when we migrate other binaries to cobra, we can figure out how to combine this documntation with cobra's doc-generation tooling (which we use for `vtctldclient` and `vtadmin`). + +## Debug Endpoint + +Any component that parses its flags via one of `servenv`'s parsing methods will get an HTTP endpoint registered at `/debug/config` which displays the full viper configuration for debugging purposes. +It accepts a query parameter to control the format; anything in `viper.SupportedExts` is permitted. + +Components that do not use `servenv` to parse their flags may manually register the `(go/viperutil/debug).HandlerFunc` if they wish. + +## Caveats and Gotchas + +- Config keys are case-insensitive. +`Foo`, `foo`, `fOo`, and `FOO` will all have the same value. + - **Except** for environment variables, which, when read, are case-sensitive (but the config key they are _bound to_ remains case-insensitive). + For example, if you have `viper.BindEnv("foo", "VT_FOO")`, then `VT_FOO=1 ./myprogram` will set the value to `1`, but `Vt_FoO=1 ./myprogram will not`. + The value, though, can still be read _from_ viper as `Foo`, `foo`, `FOO`, and so on. + +- `Sub` is a split-brain. + The viper docs discuss using the `Sub` method on a viper to extract a subtree of a config to pass to a submodule. + This seems like a good idea, but has some fun surprises. + Each viper maintains its own settings map, and extracting a sub-tree creates a second settings map that is now completely divorced from the parent. + If you were to `parent.Set(key, value)`, the sub-viper will still have the old value. + Furthermore, if the parent was watching a config file for changes, the sub-viper is _not_ watching that file. + + For these reasons, we **strongly** discourage use of `v.Sub`. + +- The `Unmarshal*` functions rely on `mapstructure` tags, not `json|yaml|...` tags. + +- Any config files/paths added _after_ calling `WatchConfig` will not get picked up by that viper, and a viper can only watch a single config file. + +[viper]: https://github.com/spf13/viper +[viper_read_in_config_docs]: https://github.com/spf13/viper#reading-config-files + +[hugo]: https://github.com/gohugoio/hugo +[kops]: https://github.com/kubernetes/kops + +[a_theory_of_modern_go]: https://peter.bourgon.org/blog/2017/06/09/theory-of-modern-go.html diff --git a/doc/vtadmin/clusters.yaml b/doc/vtadmin/clusters.yaml index 55779df60b2..e4ed5335cc6 100644 --- a/doc/vtadmin/clusters.yaml +++ b/doc/vtadmin/clusters.yaml @@ -40,7 +40,16 @@ defaults: vtsql-discovery-tags: "tag1,tag2" # Username to send queries on behalf of. See package callerid. vtsql-effective-user: "my-effective-user" - + # Username used to make requests against vtgates in the cluster. Can be used with + # vtsql-credentials-password in place of vtsql-credentials-path-tmpl. + # If both vtsql-credentials-username and vtsql-credentials-path-tmpl are + # provided, vtsql-credentials-username takes precedent over username from vtsql-credentials-path-tmpl. + vtsql-credentials-username: "my-username" + # Password used to make requests against vtgates in the cluster. Used with + # vtsql-credentials-username in place of vtsql-credentials-path-tmpl. + # If both vtsql-credentials-password and vtsql-credentials-path-tmpl are + # provided, vtsql-credentials-password takes precedent over password from vtsql-credentials-path-tmpl. + vtsql-credentials-password: "my-password" # VTAdmin also provides different RPC pools to gate the number of concurrent # requests it will make against vtctlds/vtgates in a given cluster, to prevent # overwhelming those components. diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index 41cec5ff4bb..dce65921590 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" @@ -29,6 +29,9 @@ ARG BUILD_GIT_BRANCH # Allows docker builds to set the BUILD_GIT_REV ARG BUILD_GIT_REV +# Allows docker builds to set the BUILD_TIME +ARG BUILD_TIME + # Re-copy sources from working tree COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57 index 079fb143fb1..18b0397324b 100644 --- a/docker/base/Dockerfile.mysql57 +++ b/docker/base/Dockerfile.mysql57 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" @@ -29,6 +29,9 @@ ARG BUILD_GIT_BRANCH # Allows docker builds to set the BUILD_GIT_REV ARG BUILD_GIT_REV +# Allows docker builds to set the BUILD_TIME +ARG BUILD_TIME + # Re-copy sources from working tree COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57 index fc3a5369ec5..cfcf5467f1d 100644 --- a/docker/base/Dockerfile.percona57 +++ b/docker/base/Dockerfile.percona57 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" @@ -29,6 +29,9 @@ ARG BUILD_GIT_BRANCH # Allows docker builds to set the BUILD_GIT_REV ARG BUILD_GIT_REV +# Allows docker builds to set the BUILD_TIME +ARG BUILD_TIME + # Re-copy sources from working tree COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80 index f8923a94629..208d0d28ee1 100644 --- a/docker/base/Dockerfile.percona80 +++ b/docker/base/Dockerfile.percona80 @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" @@ -29,6 +29,9 @@ ARG BUILD_GIT_BRANCH # Allows docker builds to set the BUILD_GIT_REV ARG BUILD_GIT_REV +# Allows docker builds to set the BUILD_TIME +ARG BUILD_TIME + # Re-copy sources from working tree COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md index d8a3eda34ac..f72aadb9581 100644 --- a/docker/bootstrap/CHANGELOG.md +++ b/docker/bootstrap/CHANGELOG.md @@ -56,4 +56,32 @@ List of changes between bootstrap image versions. ## [14] - 2023-02-21 ### Changes -- Update build to golang 1.20.1 \ No newline at end of file +- Update build to golang 1.20.1 + +## [16] - 2023-03-24 +### Changes +- Update build to golang 1.20.2 + +## [17] - 2023-04-05 +### Changes +- Update build to golang 1.20.3 + +## [18] - 2023-05-09 +### Changes +- Update build to golang 1.20.4 + +## [19] - 2023-06-07 +### Changes +- Update build to golang 1.20.5 + +## [20] - 2023-08-03 +### Changes +- Bump all images to bullseye base image + +## [21] - 2023-08-25 +### Changes +- Update build to golang 1.21.0 + +## [22] - 2023-09-07 +### Changes +- Update build to golang 1.21.1 \ No newline at end of file diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index 72b1b961ea8..73e2b22ebe0 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 golang:1.20.1-buster +FROM --platform=linux/amd64 golang:1.21.1-bullseye # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ @@ -22,7 +22,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins ENV VTROOT /vt/src/vitess.io/vitess ENV VTDATAROOT /vt/vtdataroot ENV VTPORTSTART 15000 -ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$VTROOT/dist/chromedriver:$PATH +ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$PATH ENV USER vitess # Copy files needed for bootstrap diff --git a/docker/bootstrap/Dockerfile.mysql57-arm64v8 b/docker/bootstrap/Dockerfile.mysql57-arm64v8 deleted file mode 100644 index 96b08413aa1..00000000000 --- a/docker/bootstrap/Dockerfile.mysql57-arm64v8 +++ /dev/null @@ -1,65 +0,0 @@ -FROM debian:9 AS builder - -WORKDIR /opt -#Build xtrabackup -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - autoconf \ - automake \ - bison \ - build-essential \ - bzr \ - ca-certificates \ - cmake \ - flex \ - libaio-dev \ - libcurl4-gnutls-dev \ - libev-dev \ - libgcrypt11-dev \ - libncurses-dev \ - libtool \ - mysql-client \ - vim-common \ - wget \ - zlib1g-dev && \ - wget https://github.com/percona/percona-xtrabackup/archive/percona-xtrabackup-2.4.13.tar.gz \ - -P /opt && \ - tar zxf /opt/percona-xtrabackup-2.4.13.tar.gz -C /opt && \ - rm /opt/percona-xtrabackup-2.4.13.tar.gz && \ - cd /opt/percona-xtrabackup-percona-xtrabackup-2.4.13 && \ - mkdir bld && cd bld && \ - cmake .. -DBUILD_CONFIG=xtrabackup_release -DWITH_MAN_PAGES=OFF \ - -DDOWNLOAD_BOOST=1 -DWITH_BOOST=/usr/local && \ - make -j4 && \ - make install - -ARG bootstrap_version -ARG image="vitess/bootstrap:${bootstrap_version}-common" - -FROM --platform=linux/arm64/v8 "${image}" - -# Install MySQL 5.7 -RUN add-apt-repository 'deb http://ftp.debian.org/debian sid main' && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - libmysqlclient-dev \ - mysql-client-5.7 \ - mysql-server-5.7 \ - libdbd-mysql-perl \ - python3-distutils-extra \ - rsync \ - libev4 \ - libcurl4-openssl-dev \ - libaio1 && \ - rm -rf /var/lib/apt/lists/* && \ - mkdir -p /usr/local/xtrabackup/bin && \ - mkdir -p /usr/local/xtrabackup/lib - -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess -COPY --from=builder /usr/local/xtrabackup/bin /usr/local/xtrabackup/bin -COPY --from=builder /usr/local/xtrabackup/lib /usr/local/xtrabackup/lib -ENV PATH="/usr/local/xtrabackup/bin:${PATH}" -ENV MYSQL_FLAVOR MySQL56 -USER vitess -RUN ./bootstrap.sh diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80 index e064c638d99..059f01b8101 100644 --- a/docker/bootstrap/Dockerfile.mysql80 +++ b/docker/bootstrap/Dockerfile.mysql80 @@ -6,9 +6,9 @@ FROM --platform=linux/amd64 "${image}" # Install MySQL 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \ - add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \ + add-apt-repository 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' && \ for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ - echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57 index 2d8beb5e95d..febe09fd8bf 100644 --- a/docker/bootstrap/Dockerfile.percona57 +++ b/docker/bootstrap/Dockerfile.percona57 @@ -5,16 +5,15 @@ FROM --platform=linux/amd64 "${image}" # Install Percona 5.7 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ - add-apt-repository 'deb http://repo.percona.com/apt buster main' && \ + add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \ echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \ } | debconf-set-selections && \ apt-get update && \ - apt-get install -y --no-install-recommends \ - percona-server-server-5.7 \ - libperconaserverclient20-dev percona-xtrabackup-24 && \ + apt-get install -y --no-install-recommends percona-server-server-5.7 && \ + apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \ rm -rf /var/lib/apt/lists/* # Bootstrap Vitess diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80 index 5dadc32cd0a..446ec554612 100644 --- a/docker/bootstrap/Dockerfile.percona80 +++ b/docker/bootstrap/Dockerfile.percona80 @@ -5,7 +5,7 @@ FROM --platform=linux/amd64 "${image}" # Install Percona 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \ - && echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona.list && \ + && echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ @@ -21,7 +21,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c rsync \ libev4 \ # && rm -f /etc/apt/sources.list.d/percona.list \ - && echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list \ + && echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list \ # { \ # echo debconf debconf/frontend select Noninteractive; \ # echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \ diff --git a/docker/bootstrap/build.sh b/docker/bootstrap/build.sh index a3ac24d916d..d84e37fced9 100755 --- a/docker/bootstrap/build.sh +++ b/docker/bootstrap/build.sh @@ -47,11 +47,9 @@ fi chmod -R o=rx *; arch=$(uname -m) -[ "$arch" == "aarch64" ] && [ $flavor != "common" ] && arch_ext='-arm64v8' - base_image="${base_image:-vitess/bootstrap:$version-common}" -image="${image:-vitess/bootstrap:$version-$flavor$arch_ext}" +image="${image:-vitess/bootstrap:$version-$flavor}" while [ $# -gt 0 ]; do if [[ $1 == *"--"* ]]; then @@ -61,6 +59,11 @@ while [ $# -gt 0 ]; do shift done -if [ -f "docker/bootstrap/Dockerfile.$flavor$arch_ext" ]; then - docker build --no-cache -f docker/bootstrap/Dockerfile.$flavor$arch_ext -t $image --build-arg bootstrap_version=$version --build-arg image=$base_image . +if [ -f "docker/bootstrap/Dockerfile.$flavor" ]; then + docker build \ + -f docker/bootstrap/Dockerfile.$flavor \ + -t $image \ + --build-arg bootstrap_version=$version \ + --build-arg image=$base_image \ + . fi diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile index 30ff33952bc..db296a8803d 100644 --- a/docker/k8s/Dockerfile +++ b/docker/k8s/Dockerfile @@ -19,14 +19,6 @@ FROM vitess/base:${VT_BASE_VER} AS base FROM debian:${DEBIAN_VER} -# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install default-mysql-client -qq --no-install-recommends && \ - apt-get autoremove && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTROOT /vt/src/vitess.io/vitess ENV VTDATAROOT /vtdataroot diff --git a/docker/k8s/orchestrator/Dockerfile b/docker/k8s/orchestrator/Dockerfile deleted file mode 100644 index e3e8f3ac346..00000000000 --- a/docker/k8s/orchestrator/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG VT_BASE_VER=latest -ARG DEBIAN_VER=stable-slim - -FROM vitess/k8s:${VT_BASE_VER} AS k8s - -FROM debian:${DEBIAN_VER} -ARG ORC_VER='3.2.3' - -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install wget ca-certificates jq -qq --no-install-recommends && \ - wget https://github.com/openark/orchestrator/releases/download/v${ORC_VER}/orchestrator_${ORC_VER}_amd64.deb && \ - dpkg -i orchestrator_${ORC_VER}_amd64.deb && \ - rm orchestrator_${ORC_VER}_amd64.deb && \ - apt-get purge wget -qq && \ - apt-get autoremove -qq && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Copy vtctlclient to be used to notify -COPY --from=k8s /vt/bin/vtctlclient /usr/bin/ - -WORKDIR /usr/local/orchestrator -CMD ["./orchestrator", "--config=/conf/orchestrator.conf.json", "http"] diff --git a/docker/k8s/pmm-client/Dockerfile b/docker/k8s/pmm-client/Dockerfile deleted file mode 100644 index 732e2e0a2ee..00000000000 --- a/docker/k8s/pmm-client/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG VT_BASE_VER=latest -ARG DEBIAN_VER=stable-slim - -FROM vitess/k8s:${VT_BASE_VER} AS k8s - -FROM debian:${DEBIAN_VER} -ARG PMM_CLIENT_VER='1.17.4' - -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install procps wget ca-certificates -qq --no-install-recommends && \ - wget https://www.percona.com/redir/downloads/pmm-client/${PMM_CLIENT_VER}/binary/debian/buster/x86_64/pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - dpkg -i pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - rm pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \ - apt-get purge wget ca-certificates -qq && \ - apt-get autoremove -qq && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# Copy CA certs for https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/k8s/vtadmin/Dockerfile index 837ac8a525a..f952681d3c9 100644 --- a/docker/k8s/vtadmin/Dockerfile +++ b/docker/k8s/vtadmin/Dockerfile @@ -17,7 +17,7 @@ ARG DEBIAN_VER=bullseye-slim FROM vitess/k8s:${VT_BASE_VER} AS k8s -FROM node:16-${DEBIAN_VER} as node +FROM node:18-${DEBIAN_VER} as node # Prepare directory structure. RUN mkdir -p /vt/web diff --git a/docker/k8s/vttablet/Dockerfile b/docker/k8s/vttablet/Dockerfile index 95453a69771..dd504d7860d 100644 --- a/docker/k8s/vttablet/Dockerfile +++ b/docker/k8s/vttablet/Dockerfile @@ -19,14 +19,6 @@ FROM vitess/k8s:${VT_BASE_VER} AS k8s FROM debian:${DEBIAN_VER} -# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed -RUN apt-get update && \ - apt-get upgrade -qq && \ - apt-get install wget default-mysql-client jq curl -qq --no-install-recommends && \ - apt-get autoremove && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTROOT /vt ENV VTDATAROOT /vtdataroot diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57 index 1f72d5ec244..6560725f730 100644 --- a/docker/lite/Dockerfile.mysql57 +++ b/docker/lite/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80 index 3377fba3896..f91031f5996 100644 --- a/docker/lite/Dockerfile.mysql80 +++ b/docker/lite/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57 index 9e270f95cd0..2ee3250181b 100644 --- a/docker/lite/Dockerfile.percona57 +++ b/docker/lite/Dockerfile.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80 index 58e8d706c2b..5e45d1d2113 100644 --- a/docker/lite/Dockerfile.percona80 +++ b/docker/lite/Dockerfile.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing index 05f9c951688..d6a6649144c 100644 --- a/docker/lite/Dockerfile.testing +++ b/docker/lite/Dockerfile.testing @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57 index ffd3b43c757..15612dd5353 100644 --- a/docker/lite/Dockerfile.ubi7.mysql57 +++ b/docker/lite/Dockerfile.ubi7.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80 index 59dcd223760..33e71b1f0e7 100644 --- a/docker/lite/Dockerfile.ubi7.mysql80 +++ b/docker/lite/Dockerfile.ubi7.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57 index 09578713564..118419053f2 100644 --- a/docker/lite/Dockerfile.ubi7.percona57 +++ b/docker/lite/Dockerfile.ubi7.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80 index b64a30ed331..8303fe54585 100644 --- a/docker/lite/Dockerfile.ubi7.percona80 +++ b/docker/lite/Dockerfile.ubi7.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80 index abfd4c3302b..fb8220c64fc 100644 --- a/docker/lite/Dockerfile.ubi8.arm64.mysql80 +++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make cross-install PREFIX=/vt/install GOOS=linux GOARCH=arm64 FROM registry.access.redhat.com/ubi8/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ @@ -54,7 +55,7 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm + && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ procps-ng rsync wget openssl hostname curl tzdata make \ diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80 index 7fd9f63e746..cf1799bfc5e 100644 --- a/docker/lite/Dockerfile.ubi8.mysql80 +++ b/docker/lite/Dockerfile.ubi8.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install FROM registry.access.redhat.com/ubi8/ubi:latest # Install keys and dependencies +RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh index 92f7ab67397..2175df5def3 100755 --- a/docker/lite/install_dependencies.sh +++ b/docker/lite/install_dependencies.sh @@ -84,23 +84,25 @@ mysql57) ;; mysql80) mysql8_version=8.0.30 - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb - do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-common_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb PACKAGES=( - /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb - /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb + /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb + /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb + /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb percona-xtrabackup-80 ) ;; @@ -146,18 +148,21 @@ mysql57) echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' > /etc/apt/sources.list.d/mysql.list ;; mysql80) - echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' > /etc/apt/sources.list.d/mysql.list + echo 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' > /etc/apt/sources.list.d/mysql.list ;; esac # Add extra apt repositories for Percona Server and/or Percona XtraBackup. case "${FLAVOR}" in -mysql57|mysql80|percona57) +mysql57) echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list ;; +mysql80|percona57) + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list + ;; percona80) - echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list - echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona80.list + echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list + echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona80.list ;; esac diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile index ef787858ebb..101c725935f 100644 --- a/docker/local/Dockerfile +++ b/docker/local/Dockerfile @@ -1,4 +1,4 @@ -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" diff --git a/docker/mini/Dockerfile b/docker/mini/Dockerfile index f9c14932eb0..469fbef8d9e 100644 --- a/docker/mini/Dockerfile +++ b/docker/mini/Dockerfile @@ -31,16 +31,12 @@ RUN ln -s /usr/bin/python3 /usr/bin/python COPY docker/mini/install_mini_dependencies.sh /vt/dist/install_mini_dependencies.sh RUN /vt/dist/install_mini_dependencies.sh -COPY docker/mini/orchestrator-vitess-mini.conf.json /etc/orchestrator.conf.json -RUN chown vitess:vitess /etc/orchestrator.conf.json - COPY docker/mini/docker-entry /vt/dist/docker/mini/docker-entry COPY examples/common/scripts /vt/dist/scripts COPY examples/common/env.sh /vt/dist/scripts/env.sh COPY examples/common/lib/utils.sh /vt/dist/scripts/lib/utils.sh COPY docker/mini/vtctld-mini-up.sh /vt/dist/scripts/vtctld-mini-up.sh COPY docker/mini/vttablet-mini-up.sh /vt/dist/scripts/vttablet-mini-up.sh -COPY docker/mini/orchestrator-up.sh /vt/dist/scripts/orchestrator-up.sh RUN echo "hostname=127.0.0.1" >> /vt/dist/scripts/env.sh RUN cat /vt/dist/scripts/env.sh | egrep "^alias" >> /etc/bash.bashrc diff --git a/docker/mini/orchestrator-up.sh b/docker/mini/orchestrator-up.sh deleted file mode 100755 index 6e4ff486fad..00000000000 --- a/docker/mini/orchestrator-up.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -source ./env.sh - -echo "- Configuring orchestrator with given topology server and credentials..." -cp /etc/orchestrator.conf.json /tmp/ -sed -i /tmp/orchestrator.conf.json -e "s/DISCOVERY_SEED_PLACEHOLDER/$TOPOLOGY_SERVER/g" -sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_USER_PLACEHOLDER/$TOPOLOGY_USER/g" -sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER/$TOPOLOGY_PASSWORD/g" - -cat /tmp/orchestrator.conf.json > /etc/orchestrator.conf.json -rm /tmp/orchestrator.conf.json - -ORCHESTRATOR_LOG="${VTDATAROOT}/tmp/orchestrator.out" - -echo "- Starting orchestrator... Logfile is $ORCHESTRATOR_LOG" - -cd /usr/local/orchestrator -./orchestrator http > $ORCHESTRATOR_LOG 2>&1 & diff --git a/docker/mini/orchestrator-vitess-mini.conf.json b/docker/mini/orchestrator-vitess-mini.conf.json deleted file mode 100644 index 604801603c2..00000000000 --- a/docker/mini/orchestrator-vitess-mini.conf.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "Debug": true, - "EnableSyslog": false, - "ListenAddress": ":3000", - "MySQLTopologyUser": "MYSQL_TOPOLOGY_USER_PLACEHOLDER", - "MySQLTopologyPassword": "MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER", - "BackendDB": "sqlite", - "SQLite3DataFile": "/tmp/orchestrator.sqlite3", - "MySQLConnectTimeoutSeconds": 1, - "DefaultInstancePort": 3306, - "DiscoverByShowSlaveHosts": true, - "InstancePollSeconds": 1, - "HostnameResolveMethod": "none", - "MySQLHostnameResolveMethod": "@@report_host", - "SkipBinlogServerUnresolveCheck": true, - "ExpiryHostnameResolvesMinutes": 60, - "VerifyReplicationFilters": false, - "ReasonableMaintenanceReplicationLagSeconds": 20, - "CandidateInstanceExpireMinutes": 60, - "ReadOnly": false, - "AuthenticationMethod": "", - "ReplicationLagQuery": "", - "DetectClusterAliasQuery": "", - "DetectClusterDomainQuery": "", - "DetectInstanceAliasQuery": "", - "DetectPromotionRuleQuery": "", - "DetectDataCenterQuery": "", - "DetectRegionQuery": "", - "DetectPhysicalEnvironmentQuery": "", - "DetectSemiSyncEnforcedQuery": "", - "DiscoverySeeds": [ - "DISCOVERY_SEED_PLACEHOLDER" - ], - "ServeAgentsHttp": false, - "UseSSL": false, - "UseMutualTLS": false, - "MySQLTopologyUseMixedTLS": false, - "StatusEndpoint": "/api/status", - "StatusSimpleHealth": true, - "StatusOUVerify": false, - "BinlogEventsChunkSize": 10000, - "SkipBinlogEventsContaining": [], - "ReduceReplicationAnalysisCount": false, - "FailureDetectionPeriodBlockMinutes": 5, - "FailMasterPromotionOnLagMinutes": 0, - "RecoveryPeriodBlockSeconds": 0, - "RecoveryIgnoreHostnameFilters": [], - "RecoverMasterClusterFilters": [], - "RecoverIntermediateMasterClusterFilters": [], - "OnFailureDetectionProcesses": [], - "PreFailoverProcesses": [], - "PostFailoverProcesses": [], - "PostUnsuccessfulFailoverProcesses": [], - "PostMasterFailoverProcesses": [], - "PostIntermediateMasterFailoverProcesses": [], - "CoMasterRecoveryMustPromoteOtherCoMaster": true, - "DetachLostReplicasAfterMasterFailover": true, - "ApplyMySQLPromotionAfterMasterFailover": true, - "PreventCrossDataCenterMasterFailover": false, - "PreventCrossRegionMasterFailover": true, - "MasterFailoverDetachReplicaMasterHost": false, - "MasterFailoverLostInstancesDowntimeMinutes": 0, - "PostponeReplicaRecoveryOnLagMinutes": 0, - "RaftEnabled": false -} diff --git a/docker/orchestrator/Dockerfile b/docker/orchestrator/Dockerfile deleted file mode 100644 index 13622322443..00000000000 --- a/docker/orchestrator/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM debian:jessie - -# Install Percona XtraDB Cluster (Galera) -RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && \ - echo 'deb http://repo.percona.com/apt jessie main' > /etc/apt/sources.list.d/mysql.list && \ - apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - percona-xtradb-cluster-server-5.6 && \ - rm -rf /var/lib/apt/lists/* - -# Set up Orchestrator database -RUN service mysql start && \ - mysql -e "CREATE DATABASE orchestrator; GRANT ALL PRIVILEGES ON orchestrator.* TO 'orc_server_user'@'127.0.0.1' IDENTIFIED BY 'orc_server_user_password'" && \ - service mysql stop - -# Copy Orchestrator files (placed in workdir by build.sh) -COPY vtctlclient /usr/bin/vtctlclient -COPY orchestrator /usr/bin/orchestrator -COPY orchestrator.conf.json /orc/conf/orchestrator.conf.json -COPY resources /orc/resources - -WORKDIR /orc -CMD ["/usr/bin/orchestrator", "http"] - diff --git a/docker/orchestrator/build.sh b/docker/orchestrator/build.sh deleted file mode 100755 index 45236582a12..00000000000 --- a/docker/orchestrator/build.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -tmpdir=`mktemp -d` - -script="go install vitess.io/vitess/go/cmd/vtctlclient@latest && \ - git clone https://github.com/openark/orchestrator.git src/github.com/openark/orchestrator && \ - go install github.com/openark/orchestrator/go/cmd/orchestrator" - -echo "Building orchestrator..." -docker run -ti --name=vt_orc_build golang:1.14.4-buster bash -c "$script" -docker cp vt_orc_build:/go/bin/orchestrator $tmpdir -docker cp vt_orc_build:/go/bin/vtctlclient $tmpdir -docker cp vt_orc_build:/go/src/github.com/openark/orchestrator/resources $tmpdir -docker rm vt_orc_build - -echo "Building Docker image..." -cp Dockerfile orchestrator.conf.json $tmpdir -(cd $tmpdir && docker build -t vitess/orchestrator .) - -# Clean up -rm -r $tmpdir diff --git a/docker/orchestrator/orchestrator.conf.json b/docker/orchestrator/orchestrator.conf.json deleted file mode 100644 index 729594044ed..00000000000 --- a/docker/orchestrator/orchestrator.conf.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "ActiveNodeExpireSeconds": 5, - "ApplyMySQLPromotionAfterMasterFailover": true, - "AuditLogFile": "/tmp/orchestrator-audit.log", - "AuditToSyslog": false, - "AuthenticationMethod": "", - "AuthUserHeader": "", - "BackendDB": "sqlite", - "BinlogEventsChunkSize": 10000, - "CandidateInstanceExpireMinutes": 60, - "CoMasterRecoveryMustPromoteOtherCoMaster": false, - "DataCenterPattern": "[.]([^.]+)[.][^.]+[.]vitess[.]io", - "Debug": true, - "DefaultInstancePort": 3306, - "DetachLostSlavesAfterMasterFailover": true, - "DetectClusterAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='ClusterAlias'", - "DetectClusterDomainQuery": "", - "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'", - "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'", - "DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'", - "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000", - "DiscoverByShowSlaveHosts": false, - "EnableSyslog": false, - "ExpiryHostnameResolvesMinutes": 60, - "DelayMasterPromotionIfSQLThreadNotUpToDate": true, - "FailureDetectionPeriodBlockMinutes": 10, - "GraphiteAddr": "", - "GraphiteConvertHostnameDotsToUnderscores": true, - "GraphitePath": "", - "HostnameResolveMethod": "none", - "HTTPAuthPassword": "", - "HTTPAuthUser": "", - "InstanceBulkOperationsWaitTimeoutSeconds": 10, - "InstancePollSeconds": 5, - "ListenAddress": ":3000", - "MasterFailoverLostInstancesDowntimeMinutes": 0, - "MySQLConnectTimeoutSeconds": 1, - "MySQLHostnameResolveMethod": "none", - "MySQLTopologyCredentialsConfigFile": "", - "MySQLTopologyMaxPoolConnections": 3, - "MySQLTopologyPassword": "orc_client_user_password", - "MySQLTopologyReadTimeoutSeconds": 3, - "MySQLTopologySSLCAFile": "", - "MySQLTopologySSLCertFile": "", - "MySQLTopologySSLPrivateKeyFile": "", - "MySQLTopologySSLSkipVerify": true, - "MySQLTopologyUseMutualTLS": false, - "MySQLTopologyUser": "orc_client_user", - "OnFailureDetectionProcesses": [ - "echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}' >> /tmp/recovery.log" - ], - "OSCIgnoreHostnameFilters": [ - ], - "PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]vitess[.]io", - "PostFailoverProcesses": [ - "echo '(for all types) Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostIntermediateMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log" - ], - "PostMasterFailoverProcesses": [ - "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log", - "n=0; until [ $n -ge 10 ]; do vtctlclient -server vtctld:15999 TabletExternallyReparented {successorAlias} && break; n=$[$n+1]; sleep 5; done" - ], - "PostponeSlaveRecoveryOnLagMinutes": 0, - "PostUnsuccessfulFailoverProcesses": [ - ], - "PowerAuthUsers": [ - "*" - ], - "PreFailoverProcesses": [ - "echo 'Will recover from {failureType} on {failureCluster}' >> /tmp/recovery.log" - ], - "ProblemIgnoreHostnameFilters": [ - ], - "PromotionIgnoreHostnameFilters": [ - ], - "ReadLongRunningQueries": false, - "ReadOnly": false, - "ReasonableMaintenanceReplicationLagSeconds": 20, - "ReasonableReplicationLagSeconds": 10, - "RecoverMasterClusterFilters": [ - ".*" - ], - "RecoveryIgnoreHostnameFilters": [ - ], - "RecoveryPeriodBlockSeconds": 60, - "ReduceReplicationAnalysisCount": true, - "RejectHostnameResolvePattern": "", - "RemoveTextFromHostnameDisplay": ".vitess.io:3306", - "ReplicationLagQuery": "", - "ServeAgentsHttp": false, - "SkipBinlogEventsContaining": [ - ], - "SkipBinlogServerUnresolveCheck": true, - "SkipOrchestratorDatabaseUpdate": false, - "SlaveStartPostWaitMilliseconds": 1000, - "SnapshotTopologiesIntervalHours": 0, - "SQLite3DataFile": ":memory:", - "SSLCAFile": "", - "SSLCertFile": "", - "SSLPrivateKeyFile": "", - "SSLSkipVerify": false, - "SSLValidOUs": [ - ], - "StaleSeedFailMinutes": 60, - "StatusEndpoint": "/api/status", - "StatusOUVerify": false, - "UnseenAgentForgetHours": 6, - "UnseenInstanceForgetHours": 240, - "UseMutualTLS": false, - "UseSSL": false, - "VerifyReplicationFilters": false -} diff --git a/docker/release.sh b/docker/release.sh index d73b4ec2054..2a4ea68b983 100755 --- a/docker/release.sh +++ b/docker/release.sh @@ -1,9 +1,9 @@ #!/bin/bash set -ex -vt_base_version='v16.0.0' -debian_versions='buster bullseye' -default_debian_version='bullseye' +vt_base_version='v18.0.0-SNAPSHOT' +debian_versions='bullseye bookworm' +default_debian_version='bookworm' docker pull --platform linux/amd64 vitess/base:$vt_base_version diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57 index 8c9a3c409b3..2659852df94 100644 --- a/docker/vttestserver/Dockerfile.mysql57 +++ b/docker/vttestserver/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 index a7e62800096..3def07978d8 100644 --- a/docker/vttestserver/Dockerfile.mysql80 +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=15 +ARG bootstrap_version=22 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -33,7 +33,7 @@ USER vitess RUN make install-testing PREFIX=/vt/install # Start over and build the final image. -FROM debian:buster-slim +FROM debian:bullseye-slim # Install dependencies COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh diff --git a/examples/backups/restart_tablets.sh b/examples/backups/restart_tablets.sh index 13c4ed0cfb6..de812a0ea8e 100755 --- a/examples/backups/restart_tablets.sh +++ b/examples/backups/restart_tablets.sh @@ -35,9 +35,9 @@ for i in 300 301 302; do done sleep 5 -# Wait for all the replica tablets to be in the serving state before initiating -# InitShardPrimary. This is essential, since we want the RESTORE phase to be -# complete before we start InitShardPrimary, otherwise we end up reading the +# Wait for all the tablets to be in the serving state before initiating +# PlannedReparentShard. This is essential, since we want the RESTORE phase to be +# complete before we start PlannedReparentShard, otherwise we end up reading the # tablet type to RESTORE and do not set semi-sync, which leads to the primary # hanging on writes. totalTime=600 @@ -50,6 +50,15 @@ for i in 101 201 301; do done done +for i in 102 202 302; do + while [ $totalTime -gt 0 ]; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "RDONLY: Serving" && break + totalTime=$((totalTime-1)) + sleep 0.1 + done +done + # Check that all the replica tablets have reached REPLICA: Serving state for i in 101 201 301; do status=$(curl "http://$hostname:15$i/debug/status_details") @@ -57,7 +66,14 @@ for i in 101 201 301; do echo "tablet-$i did not reach REPLICA: Serving state. Exiting due to failure." exit 1 done +# Check that all the rdonly tablets have reached RDONLY: Serving state +for i in 102 202 302; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "RDONLY: Serving" && continue + echo "tablet-$i did not reach RDONLY: Serving state. Exiting due to failure." + exit 1 +done -vtctldclient InitShardPrimary --force commerce/0 zone1-100 -vtctldclient InitShardPrimary --force customer/-80 zone1-200 -vtctldclient InitShardPrimary --force customer/80- zone1-300 +vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100" +vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200" +vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-300" diff --git a/examples/backups/start_cluster.sh b/examples/backups/start_cluster.sh index a9e2de606cb..047f19a7a18 100755 --- a/examples/backups/start_cluster.sh +++ b/examples/backups/start_cluster.sh @@ -22,8 +22,6 @@ source ../common/env.sh # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh else CELL=zone1 ../common/scripts/etcd-up.sh fi @@ -31,6 +29,8 @@ fi # start vtctld CELL=zone1 ../common/scripts/vtctld-up.sh +# Create keyspace and set the semi_sync durability policy. +vtctldclient CreateKeyspace --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace" # start vttablets for keyspace commerce for i in 100 101 102; do @@ -39,12 +39,14 @@ for i in 100 101 102; do done # set one of the replicas to primary -vtctldclient InitShardPrimary --force commerce/0 zone1-100 +vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100" # create the schema for commerce vtctlclient ApplySchema -- --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace" vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace" +# Create keyspace and set the semi_sync durability policy. +vtctldclient CreateKeyspace --durability-policy=semi_sync customer || fail "Failed to create and configure the customer keyspace" # start vttablets for keyspace customer for i in 200 201 202; do @@ -57,8 +59,8 @@ for i in 300 301 302; do done # set one of the replicas to primary -vtctldclient InitShardPrimary --force customer/-80 zone1-200 -vtctldclient InitShardPrimary --force customer/80- zone1-300 +vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200" +vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-300" for shard in "-80" "80-"; do wait_for_healthy_shard customer "${shard}" || exit 1 diff --git a/examples/backups/upgrade_cluster.sh b/examples/backups/upgrade_cluster.sh new file mode 100755 index 00000000000..0144dc94579 --- /dev/null +++ b/examples/backups/upgrade_cluster.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this script brings up new tablets for the two new shards that we will +# be creating in the customer keyspace and copies the schema + +source ../common/env.sh + +# Restart the replica tablets so that they come up with new vttablet versions +for i in 101 102; do + echo "Shutting down tablet zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh + echo "Shutting down mysql zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh + echo "Removing tablet directory zone1-$i" + vtctlclient DeleteTablet -- --allow_primary=true zone1-$i + rm -Rf $VTDATAROOT/vt_0000000$i + echo "Starting tablet zone1-$i again" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh + CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh +done + +for i in 201 202; do + echo "Shutting down tablet zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh + echo "Shutting down mysql zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh + echo "Removing tablet directory zone1-$i" + vtctlclient DeleteTablet -- --allow_primary=true zone1-$i + rm -Rf $VTDATAROOT/vt_0000000$i + echo "Starting tablet zone1-$i again" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh + SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh +done + +for i in 301 302; do + echo "Shutting down tablet zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh + echo "Shutting down mysql zone1-$i" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh + echo "Removing tablet directory zone1-$i" + vtctlclient DeleteTablet -- --allow_primary=true zone1-$i + rm -Rf $VTDATAROOT/vt_0000000$i + echo "Starting tablet zone1-$i again" + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh + SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=$i ../common/scripts/vttablet-up.sh +done + +# Wait for all the replica tablets to be in the serving state before reparenting to them. +totalTime=600 +for i in 101 201 301; do + while [ $totalTime -gt 0 ]; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "REPLICA: Serving" && break + totalTime=$((totalTime-1)) + sleep 0.1 + done +done + +# Check that all the replica tablets have reached REPLICA: Serving state +for i in 101 201 301; do + status=$(curl "http://$hostname:15$i/debug/status_details") + echo "$status" | grep "REPLICA: Serving" && continue + echo "tablet-$i did not reach REPLICA: Serving state. Exiting due to failure." + exit 1 +done + +# Promote the replica tablets to primary +vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-101" +vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-201" +vtctldclient PlannedReparentShard customer/80- --new-primary "zone1-301" + +# Restart the old primary tablets so that they are on the latest version of vttablet too. +echo "Restarting tablet zone1-100" +CELL=zone1 TABLET_UID=100 ../common/scripts/vttablet-down.sh +CELL=zone1 KEYSPACE=commerce TABLET_UID=100 ../common/scripts/vttablet-up.sh + +echo "Restarting tablet zone1-200" +CELL=zone1 TABLET_UID=200 ../common/scripts/vttablet-down.sh +SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=200 ../common/scripts/vttablet-up.sh + +echo "Restarting tablet zone1-300" +CELL=zone1 TABLET_UID=300 ../common/scripts/vttablet-down.sh +SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh \ No newline at end of file diff --git a/examples/common/env.sh b/examples/common/env.sh index adee0f34d3f..e0c61a3ff92 100644 --- a/examples/common/env.sh +++ b/examples/common/env.sh @@ -26,10 +26,15 @@ fi # mysqld might be in /usr/sbin which will not be in the default PATH PATH="/usr/sbin:$PATH" -for binary in mysqld etcd etcdctl curl vtctlclient vttablet vtgate vtctld mysqlctl; do +for binary in mysqld etcd etcdctl curl vtctlclient vtctldclient vttablet vtgate vtctld mysqlctl; do command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions." done; +# vtctlclient has a separate alias setup below +for binary in vttablet vtgate vtctld mysqlctl vtorc vtctl; do + alias $binary="$binary --config-file-not-found-handling=ignore" +done; + if [ "${TOPO}" = "zk2" ]; then # Each ZooKeeper server needs a list of all servers in the quorum. # Since we're running them all locally, we need to give them unique ports. @@ -51,13 +56,6 @@ if [ "${TOPO}" = "zk2" ]; then TOPOLOGY_FLAGS="--topo_implementation zk2 --topo_global_server_address ${ZK_SERVER} --topo_global_root /vitess/global" mkdir -p "${VTDATAROOT}/tmp" -elif [ "${TOPO}" = "k8s" ]; then - # Set topology environment parameters. - K8S_ADDR="localhost" - K8S_PORT="8443" - K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig - # shellcheck disable=SC2034 - TOPOLOGY_FLAGS="--topo_implementation k8s --topo_k8s_kubeconfig ${K8S_KUBECONFIG} --topo_global_server_address ${K8S_ADDR}:${K8S_PORT} --topo_global_root /vitess/global" elif [ "${TOPO}" = "consul" ]; then # Set up topology environment parameters. CONSUL_SERVER=127.0.0.1 @@ -79,7 +77,7 @@ mkdir -p "${VTDATAROOT}/tmp" # such as ~/.my.cnf alias mysql="command mysql --no-defaults -h 127.0.0.1 -P 15306" -alias vtctlclient="command vtctlclient --server localhost:15999 --log_dir ${VTDATAROOT}/tmp --alsologtostderr" +alias vtctlclient="command vtctlclient --server localhost:15999 --log_dir ${VTDATAROOT}/tmp --alsologtostderr --config-file-not-found-handling=ignore" alias vtctldclient="command vtctldclient --server localhost:15999" # Make sure aliases are expanded in non-interactive shell diff --git a/examples/common/lib/utils.sh b/examples/common/lib/utils.sh index 842a1a2cec4..ed93f139a52 100644 --- a/examples/common/lib/utils.sh +++ b/examples/common/lib/utils.sh @@ -38,9 +38,9 @@ function wait_for_shard_tablets() { done; cur_tablets=$(vtctldclient GetTablets --keyspace "${keyspace}" --shard "${shard}" | wc -l) - if [[ ${cur_tablets} -lt ${num_tablets} ]]; then + if [[ ${cur_tablets} -lt ${num_tablets} ]]; then fail "Timed out after ${wait_secs} seconds waiting for tablets to come up in ${keyspace}/${shard}" - fi + fi } # Wait for a primary tablet to be elected and become healthy and serving @@ -56,17 +56,43 @@ function wait_for_healthy_shard_primary() { local wait_secs=180 for _ in $(seq 1 ${wait_secs}); do - if ! vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then + if ! vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then break fi sleep 1 done; - if vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then + if vtctldclient --server=localhost:15999 GetShard "${keyspace}/${shard}" | grep -qi "${unhealthy_indicator}"; then fail "Timed out after ${wait_secs} seconds waiting for a primary tablet to be elected and become healthy in ${keyspace}/${shard}" fi } + +# Wait for a primary tablet to be writeable, ie read_only=0 and super_read_only=0 +function wait_for_writeable_shard_primary() { + if [[ -z ${1} || -z ${2} ]]; then + fail "A keyspace and shard must be specified when waiting for the shard's primary to be healthy" + fi + local keyspace=${1} + local shard=${2} + local wait_secs=30 + + PRIMARY_TABLET="$(vtctldclient --server=localhost:15999 GetTablets --keyspace "$keyspace" --shard "$shard" | grep -w "primary" | awk '{print $1}')" + if [ -z "$PRIMARY_TABLET" ] ; then + fail "Cannot determine primary tablet for keyspace/shard $keyspace/$shard" + fi + + for _ in $(seq 1 ${wait_secs}); do + if vtctldclient --server=localhost:15999 GetFullStatus "$PRIMARY_TABLET" | grep "super_read_only" | grep --quiet "false" ; then + break + fi + sleep 1 + done + if vtctldclient --server=localhost:15999 GetFullStatus "$PRIMARY_TABLET" | grep "super_read_only" | grep --quiet "true" ; then + fail "Timed out after ${wait_secs} seconds waiting for a primary tablet $PRIMARY_TABLET to be writeable in ${keyspace}/${shard}" + fi +} + # Wait for the shard primary tablet's VReplication engine to open. # There is currently no API call or client command that can be specifically used # to check the VReplication engine's status (no vars in /debug/vars etc. either). @@ -109,9 +135,72 @@ function wait_for_healthy_shard() { wait_for_shard_tablets "${keyspace}" "${shard}" "${num_tablets}" wait_for_healthy_shard_primary "${keyspace}" "${shard}" + wait_for_writeable_shard_primary "${keyspace}" "${shard}" wait_for_shard_vreplication_engine "${keyspace}" "${shard}" } +# Wait for a workflow to reach the running state. Example: +# wait_for_workflow_running customer customer2customer +function wait_for_workflow_running() { + if [[ -z ${1} || -z ${2} ]]; then + fail "A keyspace and workflow must be specified when waiting for a workflow to reach the running state" + fi + + local keyspace=${1} + local workflow=${2} + local wait_secs=90 + local result="" + + echo "Waiting for the ${workflow} workflow in the ${keyspace} keyspace to finish the copy phase..." + + for _ in $(seq 1 ${wait_secs}); do + result=$(vtctldclient Workflow --keyspace="${keyspace}" show --workflow="${workflow}" 2>/dev/null | grep "Copy phase completed") + if [[ ${result} != "" ]]; then + break + fi + sleep 1 + done; + + if [[ ${result} == "" ]]; then + fail "Timed out after ${wait_secs} seconds waiting for the ${workflow} workflow in the ${keyspace} keyspace to reach the running state" + fi + + echo "The ${workflow} workflow in the ${keyspace} keyspace is now running. $(sed -rn 's/.*"(Copy phase.*)".*/\1/p' <<< "${result}")." +} + +# Stop the specified binary name using the provided PID file. +# Example: +# stop_process "vtadmin-web" "$VTDATAROOT/tmp/vtadmin-web.pid" +function stop_process() { + if [[ -z ${1} || -z ${2} ]]; then + fail "A binary name and PID file must be specified when attempting to shutdown a process" + fi + + local binary_name="${1}" + local pidfile="${2}" + local pid="" + local wait_secs=90 + + if [[ -e "${pidfile}" ]]; then + pid=$(cat "${pidfile}") + echo "Stopping ${binary_name}..." + kill "${pid}" + + # Wait for the process to terminate + for _ in $(seq 1 ${wait_secs}); do + if ! ps -p "${pid}" > /dev/null; then + break + fi + sleep 1 + done + if ps -p "${pid}" > /dev/null; then + fail "Timed out after ${wait_secs} seconds waiting for the ${binary_name} using PID file ${pidfile} to terminate" + fi + else + echo "Skipping stopping ${binary_name} because the specified PID file (${pidfile}) does not exist." + fi +} + # Print error message and exit with error code. function fail() { echo "ERROR: ${1}" diff --git a/examples/common/scripts/consul-down.sh b/examples/common/scripts/consul-down.sh index 4da5694525a..2eccb8b5d96 100755 --- a/examples/common/scripts/consul-down.sh +++ b/examples/common/scripts/consul-down.sh @@ -18,5 +18,5 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -echo "Stopping consul..." -kill -9 `cat $VTDATAROOT/tmp/consul.pid` +stop_process "consul" "$VTDATAROOT/tmp/consul.pid" + diff --git a/examples/common/scripts/etcd-down.sh b/examples/common/scripts/etcd-down.sh index f9894f8659c..dbc5d8b1fd6 100755 --- a/examples/common/scripts/etcd-down.sh +++ b/examples/common/scripts/etcd-down.sh @@ -18,5 +18,5 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -echo "Stopping etcd..." -kill -9 `cat $VTDATAROOT/tmp/etcd.pid` +stop_process "etcd" "$VTDATAROOT/tmp/etcd.pid" + diff --git a/examples/common/scripts/etcd-up.sh b/examples/common/scripts/etcd-up.sh index 20a16a42260..ac81c1fbd28 100755 --- a/examples/common/scripts/etcd-up.sh +++ b/examples/common/scripts/etcd-up.sh @@ -19,22 +19,17 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" cell=${CELL:-'test'} -export ETCDCTL_API=2 + +echo "Starting etcd..." # Check that etcd is not already running curl "http://${ETCD_SERVER}" > /dev/null 2>&1 && fail "etcd is already running. Exiting." -etcd --enable-v2=true --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 & +etcd --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 & PID=$! echo $PID > "${VTDATAROOT}/tmp/etcd.pid" sleep 5 -echo "add /vitess/global" -etcdctl --endpoints "http://${ETCD_SERVER}" mkdir /vitess/global & - -echo "add /vitess/$cell" -etcdctl --endpoints "http://${ETCD_SERVER}" mkdir /vitess/$cell & - # And also add the CellInfo description for the cell. # If the node already exists, it's fine, means we used existing data. echo "add $cell CellInfo" @@ -46,6 +41,6 @@ vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ $cell set -e -echo "etcd start done..." +echo "etcd is running!" diff --git a/examples/common/scripts/k3s-down.sh b/examples/common/scripts/k3s-down.sh deleted file mode 100755 index 195b024bf91..00000000000 --- a/examples/common/scripts/k3s-down.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an example script that stops the k3s server started by k3s-up.sh. - -set -e - -source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" - -# Stop K3s server. -echo "Stopping k3s server..." - -pid=`cat $VTDATAROOT/tmp/k3s.pid` -echo "Stopping k3s..." -kill -9 $pid diff --git a/examples/common/scripts/k3s-up.sh b/examples/common/scripts/k3s-up.sh deleted file mode 100755 index 7c85cb0ac07..00000000000 --- a/examples/common/scripts/k3s-up.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an example script that creates a Kubernetes api for topo use by running k3s - -set -e -cell=${CELL:-'test'} - -script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" -source "${script_dir}/../env.sh" - -case $(uname) in - Linux) ;; - *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology is available for local examples."; exit 1;; -esac - -case $(uname -m) in - aarch64) ;; - x86_64) ;; - *) echo "ERROR: unsupported architecture, the k8s topology is not available for local examples."; exit 1;; -esac - -k3s server --disable-agent --data-dir "${VTDATAROOT}/k3s/" --https-listen-port "${K8S_PORT}" --write-kubeconfig "${K8S_KUBECONFIG}" > "${VTDATAROOT}"/tmp/k3s.out 2>&1 & -PID=$! -echo $PID > "${VTDATAROOT}/tmp/k3s.pid" -disown -a -echo "Waiting for k3s server to start" -sleep 15 - -# Use k3s built-in kubectl with custom config -KUBECTL="k3s kubectl --kubeconfig=${K8S_KUBECONFIG}" - -# Create the CRD for vitesstopologynodes -$KUBECTL create -f "${script_dir}/../../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml" - -# Add the CellInfo description for the cell -set +e -echo "add $cell CellInfo" -vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ - --root /vitess/$cell \ - $cell -set -e - -echo "k3s start done..." diff --git a/examples/common/scripts/mysqlctl-up.sh b/examples/common/scripts/mysqlctl-up.sh index d9df27ccdc0..ff20cae5793 100755 --- a/examples/common/scripts/mysqlctl-up.sh +++ b/examples/common/scripts/mysqlctl-up.sh @@ -40,3 +40,5 @@ mysqlctl \ --tablet_uid $uid \ --mysql_port $mysql_port \ $action + +echo -e "MySQL for tablet $alias is running!" diff --git a/examples/common/scripts/vtadmin-down.sh b/examples/common/scripts/vtadmin-down.sh index 011e6da7f49..c592f0991ee 100755 --- a/examples/common/scripts/vtadmin-down.sh +++ b/examples/common/scripts/vtadmin-down.sh @@ -1,9 +1,20 @@ #!/bin/bash -source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -echo "Stopping vtadmin-web..." -kill -9 "$(cat "$VTDATAROOT/tmp/vtadmin-web.pid")" +source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -echo "Stopping vtadmin-api..." -kill -9 "$(cat "$VTDATAROOT/tmp/vtadmin-api.pid")" +stop_process "vtadmin-web" "$VTDATAROOT/tmp/vtadmin-web.pid" +stop_process "vtadmin-api" "$VTDATAROOT/tmp/vtadmin-api.pid" diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh index 6b9cc1fbeb2..faa2e6a177f 100755 --- a/examples/common/scripts/vtadmin-up.sh +++ b/examples/common/scripts/vtadmin-up.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" source "${script_dir}/../env.sh" @@ -11,8 +25,8 @@ vtadmin_api_port=14200 vtadmin_web_port=14201 vtadmin \ - --addr ":${vtadmin_api_port}" \ - --http-origin "http://localhost:${vtadmin_web_port}" \ + --addr "${hostname}:${vtadmin_api_port}" \ + --http-origin "http://${hostname}:${vtadmin_web_port}" \ --http-tablet-url-tmpl "http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ --tracer "opentracing-jaeger" \ --grpc-tracing \ @@ -21,7 +35,7 @@ vtadmin \ --alsologtostderr \ --rbac \ --rbac-config="${script_dir}/../vtadmin/rbac.yaml" \ - --cluster "id=${cluster_name},name=${cluster_name},discovery=staticfile,discovery-staticfile-path=${script_dir}/../vtadmin/discovery.json,tablet-fqdn-tmpl={{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ + --cluster "id=${cluster_name},name=${cluster_name},discovery=staticfile,discovery-staticfile-path=${script_dir}/../vtadmin/discovery.json,tablet-fqdn-tmpl=http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ > "${log_dir}/vtadmin-api.out" 2>&1 & vtadmin_api_pid=$! @@ -29,7 +43,7 @@ echo ${vtadmin_api_pid} > "${log_dir}/vtadmin-api.pid" echo "\ vtadmin-api is running! - - API: http://localhost:${vtadmin_api_port} + - API: http://${hostname}:${vtadmin_api_port} - Logs: ${log_dir}/vtadmin-api.out - PID: ${vtadmin_api_pid} " @@ -37,7 +51,7 @@ vtadmin-api is running! # Wait for vtadmin to successfully discover the cluster expected_cluster_result="{\"result\":{\"clusters\":[{\"id\":\"${cluster_name}\",\"name\":\"${cluster_name}\"}]},\"ok\":true}" for _ in {0..300}; do - result=$(curl -s "http://localhost:${vtadmin_api_port}/api/clusters") + result=$(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters") if [[ ${result} == "${expected_cluster_result}" ]]; then break fi @@ -45,40 +59,9 @@ for _ in {0..300}; do done # Check one last time -[[ $(curl -s "http://localhost:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster." - -# Download nvm and node -if [[ -z ${NVM_DIR} ]]; then - export NVM_DIR="$HOME/.nvm" -fi - -if [[ -z ${NODE_VERSION} ]]; then - export NODE_VERSION="16" -fi - -output "\nInstalling nvm...\n" - -if [ -d "$NVM_DIR" ]; then - output "\033[1;32mnvm is already installed!\033[0m" -else - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash && output "\033[1;32mnvm is installed!\033[0m" || fail "\033[1;32mnvm failed to install!\033[0m" -fi - -source "$NVM_DIR/nvm.sh" - -output "\nConfiguring Node.js $NODE_VERSION\n" -nvm install "$NODE_VERSION" || fail "Could not install nvm $NODE_VERSION." -nvm use "$NODE_VERSION" || fail "Could not use nvm $NODE_VERSION." -nvm use "$NODE_VERSION" || fail "Could not use nvm $NODE_VERSION." - -# As a TODO, it'd be nice to make the assumption that vtadmin-web is already -# installed and built (since we assume that `make` has already been run for -# other Vitess components.) -npm --prefix "$web_dir" --silent install +[[ $(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster." -REACT_APP_VTADMIN_API_ADDRESS="http://localhost:${vtadmin_api_port}" \ - REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \ - npm run --prefix "$web_dir" build +[[ ! -d "$web_dir/build" ]] && fail "Please make sure the VTAdmin files are built in $web_dir/build, using 'make build'" "${web_dir}/node_modules/.bin/serve" --no-clipboard -l $vtadmin_web_port -s "${web_dir}/build" \ > "${log_dir}/vtadmin-web.out" 2>&1 & @@ -88,7 +71,7 @@ echo ${vtadmin_web_pid} > "${log_dir}/vtadmin-web.pid" echo "\ vtadmin-web is running! - - Browser: http://localhost:${vtadmin_web_port} + - Browser: http://${hostname}:${vtadmin_web_port} - Logs: ${log_dir}/vtadmin-web.out - PID: ${vtadmin_web_pid} " diff --git a/examples/common/scripts/vtctld-down.sh b/examples/common/scripts/vtctld-down.sh index a56d59b97e5..723b1a77bb6 100755 --- a/examples/common/scripts/vtctld-down.sh +++ b/examples/common/scripts/vtctld-down.sh @@ -18,5 +18,5 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -echo "Stopping vtctld..." -kill -9 `cat $VTDATAROOT/tmp/vtctld.pid` +stop_process "vtctld" "$VTDATAROOT/tmp/vtctld.pid" + diff --git a/examples/common/scripts/vtctld-up.sh b/examples/common/scripts/vtctld-up.sh index e49b346ca1e..6902a851997 100755 --- a/examples/common/scripts/vtctld-up.sh +++ b/examples/common/scripts/vtctld-up.sh @@ -34,3 +34,13 @@ vtctld \ --grpc_port $grpc_port \ --pid_file $VTDATAROOT/tmp/vtctld.pid \ > $VTDATAROOT/tmp/vtctld.out 2>&1 & + +for _ in {0..300}; do + curl -I "http://${hostname}:${vtctld_web_port}/debug/status" &>/dev/null && break + sleep 0.1 +done + +# check one last time +curl -I "http://${hostname}:${vtctld_web_port}/debug/status" &>/dev/null || fail "vtctld could not be started!" + +echo -e "vtctld is running!" diff --git a/examples/common/scripts/vtgate-down.sh b/examples/common/scripts/vtgate-down.sh index 3eea5fdf94d..c25036c7955 100755 --- a/examples/common/scripts/vtgate-down.sh +++ b/examples/common/scripts/vtgate-down.sh @@ -18,6 +18,5 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -# Stop vtgate. -echo "Stopping vtgate..." -kill `cat $VTDATAROOT/tmp/vtgate.pid` +stop_process "vtgate" "$VTDATAROOT/tmp/vtgate.pid" + diff --git a/examples/common/scripts/vtgate-up.sh b/examples/common/scripts/vtgate-up.sh index 03b85869e5d..dbaaad02367 100755 --- a/examples/common/scripts/vtgate-up.sh +++ b/examples/common/scripts/vtgate-up.sh @@ -24,7 +24,7 @@ grpc_port=15991 mysql_server_port=15306 mysql_server_socket_path="/tmp/mysql.sock" -# Start vtgate. +echo "Starting vtgate..." # shellcheck disable=SC2086 vtgate \ $TOPOLOGY_FLAGS \ @@ -39,13 +39,13 @@ vtgate \ --tablet_types_to_wait PRIMARY,REPLICA \ --service_map 'grpc-vtgateservice' \ --pid_file $VTDATAROOT/tmp/vtgate.pid \ + --enable_buffer \ --mysql_auth_server_impl none \ > $VTDATAROOT/tmp/vtgate.out 2>&1 & # Block waiting for vtgate to be listening # Not the same as healthy -echo "Waiting for vtgate to be up..." while true; do curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break sleep 0.1 diff --git a/examples/common/scripts/vtorc-down.sh b/examples/common/scripts/vtorc-down.sh index f4d2e4cb8a0..084c3e8e541 100755 --- a/examples/common/scripts/vtorc-down.sh +++ b/examples/common/scripts/vtorc-down.sh @@ -1,7 +1,20 @@ #!/bin/bash +# Copyright 2023 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -echo "Stopping vtorc." -kill -9 "$(cat "$VTDATAROOT/tmp/vtorc.pid")" +stop_process "vtorc" "$VTDATAROOT/tmp/vtorc.pid" diff --git a/examples/common/scripts/vtorc-up.sh b/examples/common/scripts/vtorc-up.sh index 66a826da288..23ca4e62b48 100755 --- a/examples/common/scripts/vtorc-up.sh +++ b/examples/common/scripts/vtorc-up.sh @@ -6,6 +6,7 @@ source "${script_dir}/../env.sh" log_dir="${VTDATAROOT}/tmp" port=16000 +echo "Starting vtorc..." vtorc \ $TOPOLOGY_FLAGS \ --logtostderr \ diff --git a/examples/common/scripts/vttablet-down.sh b/examples/common/scripts/vttablet-down.sh index 3de266def76..b81b94674ea 100755 --- a/examples/common/scripts/vttablet-down.sh +++ b/examples/common/scripts/vttablet-down.sh @@ -19,12 +19,7 @@ source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh" -printf -v tablet_dir 'vt_%010d' $TABLET_UID -pid=`cat $VTDATAROOT/$tablet_dir/vttablet.pid` - -kill $pid - -# Wait for vttablet to die. -while ps -p $pid > /dev/null; do sleep 1; done +printf -v tablet_dir 'vt_%010d' "$TABLET_UID" +stop_process "vttablet" "$VTDATAROOT/$tablet_dir/vttablet.pid" diff --git a/examples/common/scripts/vttablet-up.sh b/examples/common/scripts/vttablet-up.sh index 0e70837d235..b0d1511d927 100755 --- a/examples/common/scripts/vttablet-up.sh +++ b/examples/common/scripts/vttablet-up.sh @@ -46,7 +46,6 @@ vttablet \ --init_shard $shard \ --init_tablet_type $tablet_type \ --health_check_interval 5s \ - --enable_replication_reporter \ --backup_storage_implementation file \ --file_backup_storage_root $VTDATAROOT/backups \ --restore_from_backup \ @@ -55,7 +54,9 @@ vttablet \ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ --pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \ --vtctld_addr http://$hostname:$vtctld_web_port/ \ - --disable_active_reparents \ + --heartbeat_enable \ + --heartbeat_interval=250ms \ + --heartbeat_on_demand_duration=5s \ > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 & # Block waiting for the tablet to be listening @@ -68,3 +69,5 @@ done # check one last time curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!" + +echo -e "vttablet for $alias is running!" diff --git a/examples/compose/client.go b/examples/compose/client.go index a4933f21833..8beaef683cd 100644 --- a/examples/compose/client.go +++ b/examples/compose/client.go @@ -42,7 +42,6 @@ var ( func main() { pflag.Parse() - rand.Seed(time.Now().UnixNano()) // Connect to vtgate. db, err := vitessdriver.Open(*server, "@primary") diff --git a/examples/compose/config/init_db.sql b/examples/compose/config/init_db.sql index d29f16073cd..8239d5ed5ec 100644 --- a/examples/compose/config/init_db.sql +++ b/examples/compose/config/init_db.sql @@ -3,6 +3,12 @@ ############################################################################### # Equivalent of mysql_secure_installation ############################################################################### +# We need to ensure that super_read_only is disabled so that we can execute +# these commands. Note that disabling it does NOT disable read_only. +# We save the current value so that we only re-enable it at the end if it was +# enabled before. +SET @original_super_read_only=IF(@@global.super_read_only=1, 'ON', 'OFF'); +SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; @@ -67,3 +73,9 @@ GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, FLUSH PRIVILEGES; RESET SLAVE ALL; RESET MASTER; + +# custom sql is used to add custom scripts like creating users/passwords. We use it in our tests +# {{custom_sql}} + +# We need to set super_read_only back to what it was before +SET GLOBAL super_read_only=IFNULL(@original_super_read_only, 'ON'); diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml index bf916df0e42..d647d9fdc4d 100644 --- a/examples/compose/docker-compose.beginners.yml +++ b/examples/compose/docker-compose.beginners.yml @@ -1,7 +1,7 @@ version: "2.1" services: consul1: - image: consul:latest + image: hashicorp/consul:latest hostname: "consul1" ports: - "8400:8400" @@ -9,7 +9,7 @@ services: - "8600:8600" command: "agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0" consul2: - image: consul:latest + image: hashicorp/consul:latest hostname: "consul2" expose: - "8400" @@ -19,7 +19,7 @@ services: depends_on: - consul1 consul3: - image: consul:latest + image: hashicorp/consul:latest hostname: "consul3" expose: - "8400" diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index 17f9e5fe65e..f0b869781de 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -2,7 +2,7 @@ services: consul1: command: agent -server -bootstrap-expect 3 -ui -disable-host-node-id -client 0.0.0.0 hostname: consul1 - image: consul:latest + image: hashicorp/consul:latest ports: - 8400:8400 - 8500:8500 @@ -16,7 +16,7 @@ services: - "8500" - "8600" hostname: consul2 - image: consul:latest + image: hashicorp/consul:latest consul3: command: agent -server -retry-join consul1 -disable-host-node-id depends_on: @@ -26,7 +26,7 @@ services: - "8500" - "8600" hostname: consul3 - image: consul:latest + image: hashicorp/consul:latest external_db_host: build: context: ./external_db/mysql diff --git a/examples/local/101_initial_cluster.sh b/examples/local/101_initial_cluster.sh index 25e8042afe9..95b51f168cd 100755 --- a/examples/local/101_initial_cluster.sh +++ b/examples/local/101_initial_cluster.sh @@ -19,11 +19,18 @@ source ../common/env.sh +# This is done here as a means to support testing the experimental +# custom sidecar database name work in a wide variety of scenarios +# as the local examples are used to test many features locally. +# This is NOT here to indicate that you should normally use a +# non-default (_vt) value or that it is somehow a best practice +# to do so. In production, you should ONLY use a non-default +# sidecar database name when it's truly needed. +SIDECAR_DB_NAME=${SIDECAR_DB_NAME:-"_vt"} + # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh elif [ "${TOPO}" = "consul" ]; then CELL=zone1 ../common/scripts/consul-up.sh else @@ -33,15 +40,34 @@ fi # start vtctld CELL=zone1 ../common/scripts/vtctld-up.sh +if vtctldclient GetKeyspace commerce > /dev/null 2>&1 ; then + # Keyspace already exists: we could be running this 101 example on an non-empty VTDATAROOT + vtctldclient SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace" +else + # Create the keyspace with the sidecar database name and set the + # correct durability policy. Please see the comment above for + # more context on using a custom sidecar database name in your + # Vitess clusters. + vtctldclient CreateKeyspace --sidecar-db-name="${SIDECAR_DB_NAME}" --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace" +fi + +# start mysqlctls for keyspace commerce +# because MySQL takes time to start, we do this in parallel +for i in 100 101 102; do + CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh & +done + +# without a sleep, we can have below echo happen before the echo of mysqlctl-up.sh +sleep 2 +echo "Waiting for mysqlctls to start..." +wait +echo "mysqlctls are running!" + # start vttablets for keyspace commerce for i in 100 101 102; do - CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh done -# set the correct durability policy for the keyspace -vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace" - # start vtorc ../common/scripts/vtorc-up.sh diff --git a/examples/local/202_move_tables.sh b/examples/local/202_move_tables.sh index f385acb12a3..a4a24150973 100755 --- a/examples/local/202_move_tables.sh +++ b/examples/local/202_move_tables.sh @@ -19,4 +19,7 @@ source ../common/env.sh -vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder" + +# Wait for the workflow to reach the running state. +wait_for_workflow_running customer commerce2customer diff --git a/examples/local/203_switch_reads.sh b/examples/local/203_switch_reads.sh index 4bca7e4e257..a307c583171 100755 --- a/examples/local/203_switch_reads.sh +++ b/examples/local/203_switch_reads.sh @@ -19,4 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" diff --git a/examples/local/204_switch_writes.sh b/examples/local/204_switch_writes.sh index 743ca1e2512..8305356a1cf 100755 --- a/examples/local/204_switch_writes.sh +++ b/examples/local/204_switch_writes.sh @@ -19,4 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary diff --git a/examples/local/205_clean_commerce.sh b/examples/local/205_clean_commerce.sh index 5d307a231d3..127437d1d1b 100755 --- a/examples/local/205_clean_commerce.sh +++ b/examples/local/205_clean_commerce.sh @@ -19,5 +19,4 @@ source ../common/env.sh -vtctlclient MoveTables Complete customer.commerce2customer - +vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete diff --git a/examples/local/303_reshard.sh b/examples/local/303_reshard.sh index ea12987e9ed..5bf36ff7a19 100755 --- a/examples/local/303_reshard.sh +++ b/examples/local/303_reshard.sh @@ -19,4 +19,8 @@ source ../common/env.sh -vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '0' --target-shards '-80,80-' + +# Wait for the workflow to reach the running state. +wait_for_workflow_running customer cust2cust + diff --git a/examples/local/304_switch_reads.sh b/examples/local/304_switch_reads.sh index 52d6093f4ff..5e4edff7f0d 100755 --- a/examples/local/304_switch_reads.sh +++ b/examples/local/304_switch_reads.sh @@ -18,4 +18,4 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica" diff --git a/examples/local/305_switch_writes.sh b/examples/local/305_switch_writes.sh index 9bbc7ed9ea5..c9bd66b92a5 100755 --- a/examples/local/305_switch_writes.sh +++ b/examples/local/305_switch_writes.sh @@ -18,4 +18,5 @@ source ../common/env.sh -vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "primary" + diff --git a/examples/local/306_down_shard_0.sh b/examples/local/306_down_shard_0.sh index db860b3e23c..5c8332f95bc 100755 --- a/examples/local/306_down_shard_0.sh +++ b/examples/local/306_down_shard_0.sh @@ -17,7 +17,7 @@ source ../common/env.sh -vtctlclient Reshard Complete customer.cust2cust +vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete for i in 200 201 202; do CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh diff --git a/examples/local/401_teardown.sh b/examples/local/401_teardown.sh index 08dcbf3cd29..8f3e7844c5a 100755 --- a/examples/local/401_teardown.sh +++ b/examples/local/401_teardown.sh @@ -26,15 +26,22 @@ source ../common/env.sh ../common/scripts/vtgate-down.sh for tablet in 100 200 300 400; do - if vtctlclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then + if vtctldclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then # The zero tablet is up. Try to shutdown 0-2 tablet + mysqlctl for i in 0 1 2; do uid=$((tablet + i)) printf -v alias '%s-%010d' 'zone1' $uid echo "Shutting down tablet $alias" CELL=zone1 TABLET_UID=$uid ../common/scripts/vttablet-down.sh - CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh + # because MySQL takes time to stop, we do this in parallel + CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh & done + + # without a sleep below, we can have the echo happen before the echo of mysqlctl-down.sh + sleep 2 + echo "Waiting mysqlctl to stop..." + wait + echo "mysqlctls are stopped!" fi done @@ -42,8 +49,6 @@ done if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-down.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-down.sh elif [ "${TOPO}" = "consul" ]; then CELL=zone1 ../common/scripts/consul-down.sh else diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml index 72c1f979f15..22cdc6f686d 100644 --- a/examples/operator/101_initial_cluster.yaml +++ b/examples/operator/101_initial_cluster.yaml @@ -103,7 +103,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi @@ -135,6 +135,12 @@ stringData: ############################################################################### # Equivalent of mysql_secure_installation ############################################################################### + # We need to ensure that super_read_only is disabled so that we can execute + # these commands. Note that disabling it does NOT disable read_only. + # We save the current value so that we only re-enable it at the end if it was + # enabled before. + SET @original_super_read_only=IF(@@global.super_read_only=1, 'ON', 'OFF'); + SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. @@ -213,6 +219,12 @@ stringData: RESET SLAVE ALL; RESET MASTER; + + # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests + # {{custom_sql}} + + # We need to set super_read_only back to what it was before + SET GLOBAL super_read_only=IFNULL(@original_super_read_only, 'ON'); rbac.yaml: | rules: - resource: "*" diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml index d925c348d7b..5800a5e05df 100644 --- a/examples/operator/201_customer_tablets.yaml +++ b/examples/operator/201_customer_tablets.yaml @@ -99,7 +99,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi @@ -134,7 +134,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml index a86c28b3864..2e15bc40d28 100644 --- a/examples/operator/302_new_shards.yaml +++ b/examples/operator/302_new_shards.yaml @@ -99,7 +99,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi @@ -134,7 +134,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi @@ -165,7 +165,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml index 171f00166d0..4bdb694d678 100644 --- a/examples/operator/306_down_shard_0.yaml +++ b/examples/operator/306_down_shard_0.yaml @@ -99,7 +99,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi @@ -134,7 +134,7 @@ spec: mysqld: resources: limits: - memory: 512Mi + memory: 1024Mi requests: cpu: 100m memory: 512Mi diff --git a/examples/operator/README.md b/examples/operator/README.md index 6418b2fa225..de2e598b516 100644 --- a/examples/operator/README.md +++ b/examples/operator/README.md @@ -2,7 +2,7 @@ ``` # Start minikube -minikube start --cpus=8 --memory=11000 --disk-size=50g --kubernetes-version=v1.19.5 +minikube start --cpus=8 --memory=11000 --disk-size=50g --kubernetes-version=v1.25.8 # Install Operator kubectl apply -f operator.yaml diff --git a/examples/region_sharding/101_initial_cluster.sh b/examples/region_sharding/101_initial_cluster.sh index c2692440189..6dd8989a32f 100755 --- a/examples/region_sharding/101_initial_cluster.sh +++ b/examples/region_sharding/101_initial_cluster.sh @@ -22,8 +22,6 @@ source ../common/env.sh # start topo server if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-up.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-up.sh else CELL=zone1 ../common/scripts/etcd-up.sh fi diff --git a/examples/region_sharding/301_teardown.sh b/examples/region_sharding/301_teardown.sh index 25f3bb259f2..6d5a2e9fa1c 100755 --- a/examples/region_sharding/301_teardown.sh +++ b/examples/region_sharding/301_teardown.sh @@ -38,8 +38,6 @@ done if [ "${TOPO}" = "zk2" ]; then CELL=zone1 ../common/scripts/zk-down.sh -elif [ "${TOPO}" = "k8s" ]; then - CELL=zone1 ../common/scripts/k3s-down.sh else CELL=zone1 ../common/scripts/etcd-down.sh fi diff --git a/go.mod b/go.mod index 1c82c9f0942..639a22edc6b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module vitess.io/vitess -go 1.20 +go 1.21 require ( cloud.google.com/go/storage v1.29.0 @@ -9,22 +9,18 @@ require ( github.com/Azure/azure-storage-blob-go v0.15.0 github.com/DataDog/datadog-go v4.8.3+incompatible github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect - github.com/PuerkitoBio/goquery v1.5.1 github.com/aquarapid/vaultlib v0.5.1 github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.44.192 + github.com/aws/aws-sdk-go v1.44.258 github.com/buger/jsonparser v1.1.1 github.com/cespare/xxhash/v2 v2.2.0 - github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect github.com/corpix/uarand v0.1.1 // indirect github.com/dave/jennifer v1.6.0 github.com/evanphx/json-patch v5.6.0+incompatible github.com/fsnotify/fsnotify v1.6.0 - github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab github.com/go-sql-driver/mysql v1.7.0 github.com/golang/glog v1.0.0 - github.com/golang/mock v1.6.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.9 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -33,14 +29,12 @@ require ( github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/consul/api v1.18.0 + github.com/hashicorp/consul/api v1.20.0 github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 - github.com/imdario/mergo v0.3.13 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.15.15 + github.com/klauspost/compress v1.16.5 github.com/klauspost/pgzip v1.2.5 github.com/krishicks/yaml-patch v0.0.10 github.com/magiconair/properties v1.8.7 @@ -56,17 +50,16 @@ require ( github.com/pires/go-proxyproto v0.6.2 github.com/pkg/errors v0.9.1 github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a - github.com/planetscale/vtprotobuf v0.4.0 - github.com/prometheus/client_golang v1.14.0 - github.com/prometheus/common v0.39.0 // indirect + github.com/planetscale/vtprotobuf v0.5.0 + github.com/prometheus/client_golang v1.15.1 + github.com/prometheus/common v0.43.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/sjmudd/stopwatch v0.1.1 github.com/soheilhy/cmux v0.1.5 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.15.0 - github.com/spyzhov/ajson v0.7.2 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/tchap/go-patricia v2.3.0+incompatible github.com/tidwall/gjson v1.12.1 github.com/tinylib/msgp v1.1.8 // indirect @@ -74,146 +67,125 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 github.com/z-division/go-zookeeper v1.0.0 - go.etcd.io/etcd/api/v3 v3.5.7 - go.etcd.io/etcd/client/pkg/v3 v3.5.7 - go.etcd.io/etcd/client/v3 v3.5.7 - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/mod v0.7.0 // indirect - golang.org/x/net v0.7.0 - golang.org/x/oauth2 v0.4.0 - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 - golang.org/x/text v0.7.0 + go.etcd.io/etcd/api/v3 v3.5.8 + go.etcd.io/etcd/client/pkg/v3 v3.5.8 + go.etcd.io/etcd/client/v3 v3.5.8 + go.uber.org/mock v0.2.0 + golang.org/x/crypto v0.12.0 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/net v0.14.0 + golang.org/x/oauth2 v0.7.0 + golang.org/x/sys v0.11.0 + golang.org/x/term v0.11.0 + golang.org/x/text v0.12.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.5.0 - google.golang.org/api v0.109.0 - google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect - google.golang.org/grpc v1.52.3 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 + google.golang.org/api v0.121.0 + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.55.0-dev + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b - google.golang.org/protobuf v1.28.1 - gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 + google.golang.org/protobuf v1.30.0 + gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect - gopkg.in/gcfg.v1 v1.2.3 gopkg.in/ldap.v2 v2.5.1 - gopkg.in/warnings.v0 v0.1.2 // indirect gotest.tools v2.2.0+incompatible - honnef.co/go/tools v0.3.3 - k8s.io/apiextensions-apiserver v0.18.19 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v0.26.1 - k8s.io/code-generator v0.26.1 sigs.k8s.io/yaml v1.3.0 ) require ( + github.com/Shopify/toxiproxy/v2 v2.5.0 github.com/bndr/gotabulate v1.1.2 + github.com/gammazero/deque v0.2.1 + github.com/google/safehtml v0.1.0 + github.com/hashicorp/go-version v1.6.0 github.com/kr/pretty v0.3.1 github.com/kr/text v0.2.0 + github.com/mitchellh/mapstructure v1.5.0 github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 - golang.org/x/exp v0.0.0-20230131160201-f062dba9d201 - golang.org/x/sync v0.1.0 + github.com/spf13/afero v1.9.3 + github.com/spf13/jwalterweatherman v1.1.0 + github.com/xlab/treeprint v1.2.0 + go.uber.org/goleak v1.2.1 + golang.org/x/sync v0.3.0 modernc.org/sqlite v1.20.3 ) require ( - cloud.google.com/go v0.109.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.10.0 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + github.com/DataDog/appsec-internal-go v1.0.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 // indirect github.com/DataDog/datadog-go/v5 v5.2.0 // indirect + github.com/DataDog/go-libddwaf v1.1.0 // indirect github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect github.com/DataDog/sketches-go v1.4.1 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/andybalholm/cascadia v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/fatih/color v1.14.1 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/s2a-go v0.1.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.8.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-ieproxy v0.0.10 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml/v2 v2.0.6 // indirect + github.com/onsi/gomega v1.23.0 // indirect + github.com/outcaste-io/ristretto v0.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.4.3 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect - github.com/spf13/afero v1.9.3 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.23.0 // indirect - go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect - golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.24.0 // indirect + go4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect + go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect - k8s.io/api v0.26.1 // indirect - k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect - k8s.io/klog/v2 v2.90.0 // indirect - k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect - k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect lukechampine.com/uint128 v1.2.0 // indirect modernc.org/cc/v3 v3.40.0 // indirect modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.22.2 // indirect + modernc.org/libc v1.22.5 // indirect modernc.org/mathutil v1.5.0 // indirect modernc.org/memory v1.5.0 // indirect modernc.org/opt v0.1.3 // indirect modernc.org/strutil v1.1.3 // indirect modernc.org/token v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) diff --git a/go.sum b/go.sum index e47c7dda131..ffadd6498b9 100644 --- a/go.sum +++ b/go.sum @@ -17,23 +17,24 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI= -cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -53,42 +54,37 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 h1:p9uCmbyi4gEbJAOLoT/GjIAQMGe3velLmiC3mMgSIy4= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0/go.mod h1:7Bsrm5U8/B+B8dffT3t733tDvdCr7upqIPSVuDqJ0Mw= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 h1:b/RFr5T6HcEOKoXfKFOqZf33hsUbvskY1F5LDld7HCI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= +github.com/DataDog/appsec-internal-go v1.0.0 h1:2u5IkF4DBj3KVeQn5Vg2vjPUtt513zxEYglcqnd500U= +github.com/DataDog/appsec-internal-go v1.0.0/go.mod h1:+Y+4klVWKPOnZx6XESG7QHydOaUGEXyH2j/vSg9JiNM= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 h1:HG4dOM6Ou+zZsaKC++4kpM9VGJ/TYo9X61LPz2mmjDE= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1/go.mod h1:o+rJy3B2o+Zb+wCgLSkMlkD7EiUEA5Q63cid53fZkQY= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 h1:0OK84DbAucLUwoDYoBFve1cuhDWtoquruVVDjgucYlI= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY= github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q= +github.com/DataDog/go-libddwaf v1.1.0 h1:PhlI/31yxu88JEgTYqxffhd8oM4KQMfNWUVyICqIDMY= +github.com/DataDog/go-libddwaf v1.1.0/go.mod h1:DI5y8obPajk+Tvy2o+nZc2g/5Ria/Rfq5/624k7pHpE= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= +github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk= +github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY= github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= @@ -100,50 +96,35 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4= github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM= -github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.258 h1:JVk1lgpsTnb1kvUw3eGhPLcTpEBp6HeSf1fxcYDs2Ho= +github.com/aws/aws-sdk-go v1.44.258/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bndr/gotabulate v1.1.2 h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c= github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -156,31 +137,20 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -189,58 +159,41 @@ github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+Egvsz github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -248,75 +201,17 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -329,9 +224,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -347,15 +240,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -367,18 +260,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -391,54 +283,48 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE= +github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8= +github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= +github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= -github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= -github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= +github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= +github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= +github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= +github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -447,6 +333,7 @@ github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -458,8 +345,8 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -472,17 +359,11 @@ github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -490,54 +371,35 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -546,19 +408,16 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.9 h1:RvVbLiMv/Hbjf1gRaC2AQyzwbdVhdId7D2vPnXIml4k= -github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0= +github.com/mattn/go-ieproxy v0.0.10 h1:P+2QihaKCLgbs/32dhFLbxXlqsy8tIG1LUXHIoPaQPo= +github.com/mattn/go-ieproxy v0.0.10/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -572,70 +431,54 @@ github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2wt+SiEUov/YDyTCTDuPtIKgQIvk0= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/outcaste-io/ristretto v0.2.0/go.mod h1:iBZA7RCt6jaOr0z6hiBQ6t662/oZ6Gx/yauuPvIWHAI= +github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= +github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= @@ -650,28 +493,27 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= -github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY= -github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= +github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM= +github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us= +github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -682,25 +524,24 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= +github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= +github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -708,38 +549,24 @@ github.com/sjmudd/stopwatch v0.1.1 h1:x45OvxFB5OtCkjvYtzRF5fWB857Jzjjk84Oyd5C5eb github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/spyzhov/ajson v0.7.2 h1:kyl+ovUoId/RSBbSbCm31xyQvPixA6Sxgvb0eWyt1Ko= -github.com/spyzhov/ajson v0.7.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -752,8 +579,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -763,25 +591,17 @@ github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -792,17 +612,12 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/z-division/go-zookeeper v1.0.0 h1:ULsCj0nP6+U1liDFWe+2oEF6o4amixoDcDlwEUghVUY= github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= -go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4= +go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M= +go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4= +go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -812,33 +627,34 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= +go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0= -go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= +go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI= +go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE= go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 h1:LrTREdITdNDW/JRlUuG3fhXvCK3ZcKXTCf1BbxE8sT4= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -848,8 +664,9 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -860,10 +677,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230131160201-f062dba9d201 h1:BEABXpNXLEz0WxtA+6CQIz2xkg80e+1zrhWyMcq8VzE= -golang.org/x/exp v0.0.0-20230131160201-f062dba9d201/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201 h1:O1QcdQUR9htWjzzsXVFPX+RJ3n1P/u/5bsQR8dbs5BY= -golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -877,8 +690,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -891,21 +702,17 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -914,8 +721,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -946,16 +751,13 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= +golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -965,8 +767,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -979,37 +781,30 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1033,7 +828,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1050,50 +844,44 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1102,8 +890,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1129,7 +915,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1149,8 +934,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1176,8 +961,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.121.0 h1:8Oopoo8Vavxx6gt+sgs8s8/X60WBAtKQq6JqnkF+xow= +google.golang.org/api v0.121.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1225,9 +1010,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1247,11 +1031,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo= +google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ= google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1266,12 +1050,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 h1:w3mHEgOR1o52mkyCbkTM+El8DG732+Fnug4FAGhIpsk= -gopkg.in/DataDog/dd-trace-go.v1 v1.47.0/go.mod h1:aHb6c4hPRANXnB64LDAKyfWotKgfRjlHv23MnahM8AI= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 h1:DUpHhh+MHtpYnUyGr5rpfvKUXkRg93TSEHii/LZVF6g= +gopkg.in/DataDog/dd-trace-go.v1 v1.50.1/go.mod h1:sw4gV8LIXseC5ISMbDJmm79OJDdl8I2Hhtelb6lpHuQ= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1280,26 +1063,15 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU= gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1310,8 +1082,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1323,42 +1093,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= -k8s.io/api v0.18.19/go.mod h1:lmViaHqL3es8JiaK3pCJMjBKm2CnzIcAXpHKifwbmAg= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.18.19 h1:z7tzzrsODC0cqvp3Pcy2HHc6wOnaSQQEWn0l/jbrJ6c= -k8s.io/apiextensions-apiserver v0.18.19/go.mod h1:kiomVdryKCrn+R0E+iPx+bZ/00rgj5tPXEBduSEJwgI= -k8s.io/apimachinery v0.18.19/go.mod h1:70HIRzSveORLKbatTlXzI2B2UUhbWzbq8Vqyf+HbdUQ= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apiserver v0.18.19/go.mod h1:VY80gRUh89Cmnx2s9S5nZTF8vwzEKweAFy7nTFuFLRU= -k8s.io/client-go v0.18.19/go.mod h1:lB+d4UqdzSjaU41VODLYm/oon3o05LAzsVpm6Me5XkY= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/code-generator v0.18.19/go.mod h1:l5yJd8cLSvkIb0ZJMsQdWuDOx5rWfLNpgmHQyl3LmBE= -k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo= -k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I= -k8s.io/component-base v0.18.19/go.mod h1:nQMCdH6RaS/GD0J1YZqc5NInfCdknth4BwlAT5Mf7tA= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms= -k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= -k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts= -k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE= -k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= @@ -1366,9 +1102,11 @@ modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= @@ -1380,20 +1118,13 @@ modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34= +modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= +modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.1/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/go/cache/cache.go b/go/cache/cache.go deleted file mode 100644 index b6466132452..00000000000 --- a/go/cache/cache.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -// Cache is a generic interface type for a data structure that keeps recently used -// objects in memory and evicts them when it becomes full. -type Cache interface { - Get(key string) (any, bool) - Set(key string, val any) bool - ForEach(callback func(any) bool) - - Delete(key string) - Clear() - - // Wait waits for all pending operations on the cache to settle. Since cache writes - // are asynchronous, a write may not be immediately accessible unless the user - // manually calls Wait. - Wait() - - Len() int - Evictions() int64 - Hits() int64 - Misses() int64 - UsedCapacity() int64 - MaxCapacity() int64 - SetCapacity(int64) -} - -type cachedObject interface { - CachedSize(alloc bool) int64 -} - -// NewDefaultCacheImpl returns the default cache implementation for Vitess. The options in the -// Config struct control the memory and entry limits for the cache, and the underlying cache -// implementation. -func NewDefaultCacheImpl(cfg *Config) Cache { - switch { - case cfg == nil: - return &nullCache{} - - case cfg.LFU: - if cfg.MaxEntries == 0 || cfg.MaxMemoryUsage == 0 { - return &nullCache{} - } - return NewRistrettoCache(cfg.MaxEntries, cfg.MaxMemoryUsage, func(val any) int64 { - return val.(cachedObject).CachedSize(true) - }) - - default: - if cfg.MaxEntries == 0 { - return &nullCache{} - } - return NewLRUCache(cfg.MaxEntries, func(_ any) int64 { - return 1 - }) - } -} - -// Config is the configuration options for a cache instance -type Config struct { - // MaxEntries is the estimated amount of entries that the cache will hold at capacity - MaxEntries int64 - // MaxMemoryUsage is the maximum amount of memory the cache can handle - MaxMemoryUsage int64 - // LFU toggles whether to use a new cache implementation with a TinyLFU admission policy - LFU bool -} - -// DefaultConfig is the default configuration for a cache instance in Vitess -var DefaultConfig = &Config{ - MaxEntries: 5000, - MaxMemoryUsage: 32 * 1024 * 1024, - LFU: true, -} diff --git a/go/cache/cache_test.go b/go/cache/cache_test.go deleted file mode 100644 index 911a3bb207b..00000000000 --- a/go/cache/cache_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package cache - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/cache/ristretto" -) - -func TestNewDefaultCacheImpl(t *testing.T) { - assertNullCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*nullCache) - require.True(t, ok) - } - - assertLFUCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*ristretto.Cache) - require.True(t, ok) - } - - assertLRUCache := func(t *testing.T, cache Cache) { - _, ok := cache.(*LRUCache) - require.True(t, ok) - } - - tests := []struct { - cfg *Config - verify func(t *testing.T, cache Cache) - }{ - {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: false}, assertNullCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: false}, assertLRUCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: false}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: false}, assertLRUCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: true}, assertNullCache}, - {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: true}, assertLFUCache}, - {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: true}, assertNullCache}, - } - for _, tt := range tests { - t.Run(fmt.Sprintf("%d.%d.%v", tt.cfg.MaxEntries, tt.cfg.MaxMemoryUsage, tt.cfg.LFU), func(t *testing.T) { - cache := NewDefaultCacheImpl(tt.cfg) - tt.verify(t, cache) - }) - } -} diff --git a/go/cache/lru_cache.go b/go/cache/lru_cache.go index 31ceadaf201..d845265b77b 100644 --- a/go/cache/lru_cache.go +++ b/go/cache/lru_cache.go @@ -29,8 +29,6 @@ import ( "time" ) -var _ Cache = &LRUCache{} - // LRUCache is a typical LRU cache implementation. If the cache // reaches the capacity, the least recently used item is deleted from // the cache. Note the capacity is not the number of items, but the @@ -250,3 +248,7 @@ func (lru *LRUCache) checkCapacity() { lru.evictions++ } } + +func (lru *LRUCache) Close() { + lru.Clear() +} diff --git a/go/cache/null.go b/go/cache/null.go deleted file mode 100644 index c99d52eb2ec..00000000000 --- a/go/cache/null.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -// nullCache is a no-op cache that does not store items -type nullCache struct{} - -// Get never returns anything on the nullCache -func (n *nullCache) Get(_ string) (any, bool) { - return nil, false -} - -// Set is a no-op in the nullCache -func (n *nullCache) Set(_ string, _ any) bool { - return false -} - -// ForEach iterates the nullCache, which is always empty -func (n *nullCache) ForEach(_ func(any) bool) {} - -// Delete is a no-op in the nullCache -func (n *nullCache) Delete(_ string) {} - -// Clear is a no-op in the nullCache -func (n *nullCache) Clear() {} - -// Wait is a no-op in the nullcache -func (n *nullCache) Wait() {} - -func (n *nullCache) Len() int { - return 0 -} - -// Hits returns number of cache hits since creation -func (n *nullCache) Hits() int64 { - return 0 -} - -// Hits returns number of cache misses since creation -func (n *nullCache) Misses() int64 { - return 0 -} - -// Capacity returns the capacity of the nullCache, which is always 0 -func (n *nullCache) UsedCapacity() int64 { - return 0 -} - -// Capacity returns the capacity of the nullCache, which is always 0 -func (n *nullCache) MaxCapacity() int64 { - return 0 -} - -// SetCapacity sets the capacity of the null cache, which is a no-op -func (n *nullCache) SetCapacity(_ int64) {} - -func (n *nullCache) Evictions() int64 { - return 0 -} diff --git a/go/cache/ristretto.go b/go/cache/ristretto.go deleted file mode 100644 index 6d6f596a5b9..00000000000 --- a/go/cache/ristretto.go +++ /dev/null @@ -1,28 +0,0 @@ -package cache - -import ( - "vitess.io/vitess/go/cache/ristretto" -) - -var _ Cache = &ristretto.Cache{} - -// NewRistrettoCache returns a Cache implementation based on Ristretto -func NewRistrettoCache(maxEntries, maxCost int64, cost func(any) int64) *ristretto.Cache { - // The TinyLFU paper recommends to allocate 10x times the max entries amount as counters - // for the admission policy; since our caches are small and we're very interested on admission - // accuracy, we're a bit more greedy than 10x - const CounterRatio = 12 - - config := ristretto.Config{ - NumCounters: maxEntries * CounterRatio, - MaxCost: maxCost, - BufferItems: 64, - Metrics: true, - Cost: cost, - } - cache, err := ristretto.NewCache(&config) - if err != nil { - panic(err) - } - return cache -} diff --git a/go/cache/ristretto/bloom/bbloom.go b/go/cache/ristretto/bloom/bbloom.go deleted file mode 100644 index ce5daa6864d..00000000000 --- a/go/cache/ristretto/bloom/bbloom.go +++ /dev/null @@ -1,151 +0,0 @@ -// The MIT License (MIT) -// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt - -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -// the Software, and to permit persons to whom the Software is furnished to do so, -// subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package bloom - -import ( - "math" - "unsafe" -) - -// helper -var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} - -func getSize(ui64 uint64) (size uint64, exponent uint64) { - if ui64 < uint64(512) { - ui64 = uint64(512) - } - size = uint64(1) - for size < ui64 { - size <<= 1 - exponent++ - } - return size, exponent -} - -// NewBloomFilterWithErrorRate returns a new bloomfilter with optimal size for the given -// error rate -func NewBloomFilterWithErrorRate(numEntries uint64, wrongs float64) *Bloom { - size := -1 * float64(numEntries) * math.Log(wrongs) / math.Pow(0.69314718056, 2) - locs := math.Ceil(0.69314718056 * size / float64(numEntries)) - return NewBloomFilter(uint64(size), uint64(locs)) -} - -// NewBloomFilter returns a new bloomfilter. -func NewBloomFilter(entries, locs uint64) (bloomfilter *Bloom) { - size, exponent := getSize(entries) - bloomfilter = &Bloom{ - sizeExp: exponent, - size: size - 1, - setLocs: locs, - shift: 64 - exponent, - } - bloomfilter.Size(size) - return bloomfilter -} - -// Bloom filter -type Bloom struct { - bitset []uint64 - ElemNum uint64 - sizeExp uint64 - size uint64 - setLocs uint64 - shift uint64 -} - -// <--- http://www.cse.yorku.ca/~oz/hash.html -// modified Berkeley DB Hash (32bit) -// hash is casted to l, h = 16bit fragments -// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { -// hash := uint64(len(*b)) -// for _, c := range *b { -// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash -// } -// h = hash >> bl.shift -// l = hash << bl.shift >> bl.shift -// return l, h -// } - -// Add adds hash of a key to the bloomfilter. -func (bl *Bloom) Add(hash uint64) { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - bl.Set((h + i*l) & bl.size) - bl.ElemNum++ - } -} - -// Has checks if bit(s) for entry hash is/are set, -// returns true if the hash was added to the Bloom Filter. -func (bl Bloom) Has(hash uint64) bool { - h := hash >> bl.shift - l := hash << bl.shift >> bl.shift - for i := uint64(0); i < bl.setLocs; i++ { - if !bl.IsSet((h + i*l) & bl.size) { - return false - } - } - return true -} - -// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. -// Returns true if hash was added. -// Returns false if hash was already registered in the bloomfilter. -func (bl *Bloom) AddIfNotHas(hash uint64) bool { - if bl.Has(hash) { - return false - } - bl.Add(hash) - return true -} - -// TotalSize returns the total size of the bloom filter. -func (bl *Bloom) TotalSize() int { - // The bl struct has 5 members and each one is 8 byte. The bitset is a - // uint64 byte slice. - return len(bl.bitset)*8 + 5*8 -} - -// Size makes Bloom filter with as bitset of size sz. -func (bl *Bloom) Size(sz uint64) { - bl.bitset = make([]uint64, sz>>6) -} - -// Clear resets the Bloom filter. -func (bl *Bloom) Clear() { - for i := range bl.bitset { - bl.bitset[i] = 0 - } -} - -// Set sets the bit[idx] of bitset. -func (bl *Bloom) Set(idx uint64) { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - *(*uint8)(ptr) |= mask[idx%8] -} - -// IsSet checks if bit[idx] of bitset is set, returns true/false. -func (bl *Bloom) IsSet(idx uint64) bool { - ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) - r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 - return r == 1 -} diff --git a/go/cache/ristretto/bloom/bbloom_test.go b/go/cache/ristretto/bloom/bbloom_test.go deleted file mode 100644 index c0f9a916d10..00000000000 --- a/go/cache/ristretto/bloom/bbloom_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package bloom - -import ( - "crypto/rand" - "fmt" - "os" - "testing" - - "vitess.io/vitess/go/hack" -) - -var ( - wordlist1 [][]byte - n = uint64(1 << 16) - bf *Bloom -) - -func TestMain(m *testing.M) { - wordlist1 = make([][]byte, n) - for i := range wordlist1 { - b := make([]byte, 32) - _, _ = rand.Read(b) - wordlist1[i] = b - } - fmt.Println("\n###############\nbbloom_test.go") - fmt.Print("Benchmarks relate to 2**16 OP. --> output/65536 op/ns\n###############\n\n") - - os.Exit(m.Run()) -} - -func TestM_NumberOfWrongs(t *testing.T) { - bf = NewBloomFilter(n*10, 7) - - cnt := 0 - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - if !bf.AddIfNotHas(hash) { - cnt++ - } - } - fmt.Printf("Bloomfilter New(7* 2**16, 7) (-> size=%v bit): \n Check for 'false positives': %v wrong positive 'Has' results on 2**16 entries => %v %%\n", len(bf.bitset)<<6, cnt, float64(cnt)/float64(n)) - -} - -func BenchmarkM_New(b *testing.B) { - for r := 0; r < b.N; r++ { - _ = NewBloomFilter(n*10, 7) - } -} - -func BenchmarkM_Clear(b *testing.B) { - bf = NewBloomFilter(n*10, 7) - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Add(hash) - } - b.ResetTimer() - for r := 0; r < b.N; r++ { - bf.Clear() - } -} - -func BenchmarkM_Add(b *testing.B) { - bf = NewBloomFilter(n*10, 7) - b.ResetTimer() - for r := 0; r < b.N; r++ { - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Add(hash) - } - } - -} - -func BenchmarkM_Has(b *testing.B) { - b.ResetTimer() - for r := 0; r < b.N; r++ { - for i := range wordlist1 { - hash := hack.RuntimeMemhash(wordlist1[i], 0) - bf.Has(hash) - } - } -} diff --git a/go/cache/ristretto/cache.go b/go/cache/ristretto/cache.go deleted file mode 100644 index b745d6dc991..00000000000 --- a/go/cache/ristretto/cache.go +++ /dev/null @@ -1,697 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package ristretto is a fast, fixed size, in-memory cache with a dual focus on -// throughput and hit ratio performance. You can easily add Ristretto to an -// existing system and keep the most valuable data where you need it. -package ristretto - -import ( - "bytes" - "errors" - "fmt" - "sync" - "sync/atomic" - "time" - "unsafe" - - "vitess.io/vitess/go/hack" -) - -var ( - // TODO: find the optimal value for this or make it configurable - setBufSize = 32 * 1024 -) - -func defaultStringHash(key string) (uint64, uint64) { - const Seed1 = uint64(0x1122334455667788) - const Seed2 = uint64(0x8877665544332211) - return hack.RuntimeStrhash(key, Seed1), hack.RuntimeStrhash(key, Seed2) -} - -type itemCallback func(*Item) - -// CacheItemSize is the overhead in bytes for every stored cache item -var CacheItemSize = hack.RuntimeAllocSize(int64(unsafe.Sizeof(storeItem{}))) - -// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission -// policy and a Sampled LFU eviction policy. You can use the same Cache instance -// from as many goroutines as you want. -type Cache struct { - // store is the central concurrent hashmap where key-value items are stored. - store store - // policy determines what gets let in to the cache and what gets kicked out. - policy policy - // getBuf is a custom ring buffer implementation that gets pushed to when - // keys are read. - getBuf *ringBuffer - // setBuf is a buffer allowing us to batch/drop Sets during times of high - // contention. - setBuf chan *Item - // onEvict is called for item evictions. - onEvict itemCallback - // onReject is called when an item is rejected via admission policy. - onReject itemCallback - // onExit is called whenever a value goes out of scope from the cache. - onExit func(any) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - keyToHash func(string) (uint64, uint64) - // stop is used to stop the processItems goroutine. - stop chan struct{} - // indicates whether cache is closed. - isClosed bool - // cost calculates cost from a value. - cost func(value any) int64 - // ignoreInternalCost dictates whether to ignore the cost of internally storing - // the item in the cost calculation. - ignoreInternalCost bool - // Metrics contains a running log of important statistics like hits, misses, - // and dropped items. - Metrics *Metrics -} - -// Config is passed to NewCache for creating new Cache instances. -type Config struct { - // NumCounters determines the number of counters (keys) to keep that hold - // access frequency information. It's generally a good idea to have more - // counters than the max cache capacity, as this will improve eviction - // accuracy and subsequent hit ratios. - // - // For example, if you expect your cache to hold 1,000,000 items when full, - // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so - // keeping 10,000,000 counters would require 5MB of memory. - NumCounters int64 - // MaxCost can be considered as the cache capacity, in whatever units you - // choose to use. - // - // For example, if you want the cache to have a max capacity of 100MB, you - // would set MaxCost to 100,000,000 and pass an item's number of bytes as - // the `cost` parameter for calls to Set. If new items are accepted, the - // eviction process will take care of making room for the new item and not - // overflowing the MaxCost value. - MaxCost int64 - // BufferItems determines the size of Get buffers. - // - // Unless you have a rare use case, using `64` as the BufferItems value - // results in good performance. - BufferItems int64 - // Metrics determines whether cache statistics are kept during the cache's - // lifetime. There *is* some overhead to keeping statistics, so you should - // only set this flag to true when testing or throughput performance isn't a - // major factor. - Metrics bool - // OnEvict is called for every eviction and passes the hashed key, value, - // and cost to the function. - OnEvict func(item *Item) - // OnReject is called for every rejection done via the policy. - OnReject func(item *Item) - // OnExit is called whenever a value is removed from cache. This can be - // used to do manual memory deallocation. Would also be called on eviction - // and rejection of the value. - OnExit func(val any) - // KeyToHash function is used to customize the key hashing algorithm. - // Each key will be hashed using the provided function. If keyToHash value - // is not set, the default keyToHash function is used. - KeyToHash func(string) (uint64, uint64) - // Cost evaluates a value and outputs a corresponding cost. This function - // is ran after Set is called for a new item or an item update with a cost - // param of 0. - Cost func(value any) int64 - // IgnoreInternalCost set to true indicates to the cache that the cost of - // internally storing the value should be ignored. This is useful when the - // cost passed to set is not using bytes as units. Keep in mind that setting - // this to true will increase the memory usage. - IgnoreInternalCost bool -} - -type itemFlag byte - -const ( - itemNew itemFlag = iota - itemDelete - itemUpdate -) - -// Item is passed to setBuf so items can eventually be added to the cache. -type Item struct { - flag itemFlag - Key uint64 - Conflict uint64 - Value any - Cost int64 - wg *sync.WaitGroup -} - -// NewCache returns a new Cache instance and any configuration errors, if any. -func NewCache(config *Config) (*Cache, error) { - switch { - case config.NumCounters == 0: - return nil, errors.New("NumCounters can't be zero") - case config.MaxCost == 0: - return nil, errors.New("Capacity can't be zero") - case config.BufferItems == 0: - return nil, errors.New("BufferItems can't be zero") - } - policy := newPolicy(config.NumCounters, config.MaxCost) - cache := &Cache{ - store: newStore(), - policy: policy, - getBuf: newRingBuffer(policy, config.BufferItems), - setBuf: make(chan *Item, setBufSize), - keyToHash: config.KeyToHash, - stop: make(chan struct{}), - cost: config.Cost, - ignoreInternalCost: config.IgnoreInternalCost, - } - cache.onExit = func(val any) { - if config.OnExit != nil && val != nil { - config.OnExit(val) - } - } - cache.onEvict = func(item *Item) { - if config.OnEvict != nil { - config.OnEvict(item) - } - cache.onExit(item.Value) - } - cache.onReject = func(item *Item) { - if config.OnReject != nil { - config.OnReject(item) - } - cache.onExit(item.Value) - } - if cache.keyToHash == nil { - cache.keyToHash = defaultStringHash - } - if config.Metrics { - cache.collectMetrics() - } - // NOTE: benchmarks seem to show that performance decreases the more - // goroutines we have running cache.processItems(), so 1 should - // usually be sufficient - go cache.processItems() - return cache, nil -} - -// Wait blocks until all the current cache operations have been processed in the background -func (c *Cache) Wait() { - if c == nil || c.isClosed { - return - } - wg := &sync.WaitGroup{} - wg.Add(1) - c.setBuf <- &Item{wg: wg} - wg.Wait() -} - -// Get returns the value (if any) and a boolean representing whether the -// value was found or not. The value can be nil and the boolean can be true at -// the same time. -func (c *Cache) Get(key string) (any, bool) { - if c == nil || c.isClosed { - return nil, false - } - keyHash, conflictHash := c.keyToHash(key) - c.getBuf.Push(keyHash) - value, ok := c.store.Get(keyHash, conflictHash) - if ok { - c.Metrics.add(hit, keyHash, 1) - } else { - c.Metrics.add(miss, keyHash, 1) - } - return value, ok -} - -// Set attempts to add the key-value item to the cache. If it returns false, -// then the Set was dropped and the key-value item isn't added to the cache. If -// it returns true, there's still a chance it could be dropped by the policy if -// its determined that the key-value item isn't worth keeping, but otherwise the -// item will be added and other items will be evicted in order to make room. -// -// The cost of the entry will be evaluated lazily by the cache's Cost function. -func (c *Cache) Set(key string, value any) bool { - return c.SetWithCost(key, value, 0) -} - -// SetWithCost works like Set but adds a key-value pair to the cache with a specific -// cost. The built-in Cost function will not be called to evaluate the object's cost -// and instead the given value will be used. -func (c *Cache) SetWithCost(key string, value any, cost int64) bool { - if c == nil || c.isClosed { - return false - } - - keyHash, conflictHash := c.keyToHash(key) - i := &Item{ - flag: itemNew, - Key: keyHash, - Conflict: conflictHash, - Value: value, - Cost: cost, - } - // cost is eventually updated. The expiration must also be immediately updated - // to prevent items from being prematurely removed from the map. - if prev, ok := c.store.Update(i); ok { - c.onExit(prev) - i.flag = itemUpdate - } - // Attempt to send item to policy. - select { - case c.setBuf <- i: - return true - default: - if i.flag == itemUpdate { - // Return true if this was an update operation since we've already - // updated the store. For all the other operations (set/delete), we - // return false which means the item was not inserted. - return true - } - c.Metrics.add(dropSets, keyHash, 1) - return false - } -} - -// Delete deletes the key-value item from the cache if it exists. -func (c *Cache) Delete(key string) { - if c == nil || c.isClosed { - return - } - keyHash, conflictHash := c.keyToHash(key) - // Delete immediately. - _, prev := c.store.Del(keyHash, conflictHash) - c.onExit(prev) - // If we've set an item, it would be applied slightly later. - // So we must push the same item to `setBuf` with the deletion flag. - // This ensures that if a set is followed by a delete, it will be - // applied in the correct order. - c.setBuf <- &Item{ - flag: itemDelete, - Key: keyHash, - Conflict: conflictHash, - } -} - -// Close stops all goroutines and closes all channels. -func (c *Cache) Close() { - if c == nil || c.isClosed { - return - } - c.Clear() - - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - close(c.stop) - close(c.setBuf) - c.policy.Close() - c.isClosed = true -} - -// Clear empties the hashmap and zeroes all policy counters. Note that this is -// not an atomic operation (but that shouldn't be a problem as it's assumed that -// Set/Get calls won't be occurring until after this). -func (c *Cache) Clear() { - if c == nil || c.isClosed { - return - } - // Block until processItems goroutine is returned. - c.stop <- struct{}{} - - // Clear out the setBuf channel. -loop: - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - if i.flag != itemUpdate { - // In itemUpdate, the value is already set in the store. So, no need to call - // onEvict here. - c.onEvict(i) - } - default: - break loop - } - } - - // Clear value hashmap and policy data. - c.policy.Clear() - c.store.Clear(c.onEvict) - // Only reset metrics if they're enabled. - if c.Metrics != nil { - c.Metrics.Clear() - } - // Restart processItems goroutine. - go c.processItems() -} - -// Len returns the size of the cache (in entries) -func (c *Cache) Len() int { - if c == nil { - return 0 - } - return c.store.Len() -} - -// UsedCapacity returns the size of the cache (in bytes) -func (c *Cache) UsedCapacity() int64 { - if c == nil { - return 0 - } - return c.policy.Used() -} - -// MaxCapacity returns the max cost of the cache (in bytes) -func (c *Cache) MaxCapacity() int64 { - if c == nil { - return 0 - } - return c.policy.MaxCost() -} - -// SetCapacity updates the maxCost of an existing cache. -func (c *Cache) SetCapacity(maxCost int64) { - if c == nil { - return - } - c.policy.UpdateMaxCost(maxCost) -} - -// Evictions returns the number of evictions -func (c *Cache) Evictions() int64 { - // TODO - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.KeysEvicted()) -} - -// Hits returns the number of cache hits -func (c *Cache) Hits() int64 { - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.Hits()) -} - -// Misses returns the number of cache misses -func (c *Cache) Misses() int64 { - if c == nil || c.Metrics == nil { - return 0 - } - return int64(c.Metrics.Misses()) -} - -// ForEach yields all the values currently stored in the cache to the given callback. -// The callback may return `false` to stop the iteration early. -func (c *Cache) ForEach(forEach func(any) bool) { - if c == nil { - return - } - c.store.ForEach(forEach) -} - -// processItems is ran by goroutines processing the Set buffer. -func (c *Cache) processItems() { - startTs := make(map[uint64]time.Time) - numToKeep := 100000 // TODO: Make this configurable via options. - - trackAdmission := func(key uint64) { - if c.Metrics == nil { - return - } - startTs[key] = time.Now() - if len(startTs) > numToKeep { - for k := range startTs { - if len(startTs) <= numToKeep { - break - } - delete(startTs, k) - } - } - } - onEvict := func(i *Item) { - delete(startTs, i.Key) - if c.onEvict != nil { - c.onEvict(i) - } - } - - for { - select { - case i := <-c.setBuf: - if i.wg != nil { - i.wg.Done() - continue - } - // Calculate item cost value if new or update. - if i.Cost == 0 && c.cost != nil && i.flag != itemDelete { - i.Cost = c.cost(i.Value) - } - if !c.ignoreInternalCost { - // Add the cost of internally storing the object. - i.Cost += CacheItemSize - } - - switch i.flag { - case itemNew: - victims, added := c.policy.Add(i.Key, i.Cost) - if added { - c.store.Set(i) - c.Metrics.add(keyAdd, i.Key, 1) - trackAdmission(i.Key) - } else { - c.onReject(i) - } - for _, victim := range victims { - victim.Conflict, victim.Value = c.store.Del(victim.Key, 0) - onEvict(victim) - } - - case itemUpdate: - c.policy.Update(i.Key, i.Cost) - - case itemDelete: - c.policy.Del(i.Key) // Deals with metrics updates. - _, val := c.store.Del(i.Key, i.Conflict) - c.onExit(val) - } - case <-c.stop: - return - } - } -} - -// collectMetrics just creates a new *Metrics instance and adds the pointers -// to the cache and policy instances. -func (c *Cache) collectMetrics() { - c.Metrics = newMetrics() - c.policy.CollectMetrics(c.Metrics) -} - -type metricType int - -const ( - // The following 2 keep track of hits and misses. - hit = iota - miss - // The following 3 keep track of number of keys added, updated and evicted. - keyAdd - keyUpdate - keyEvict - // The following 2 keep track of cost of keys added and evicted. - costAdd - costEvict - // The following keep track of how many sets were dropped or rejected later. - dropSets - rejectSets - // The following 2 keep track of how many gets were kept and dropped on the - // floor. - dropGets - keepGets - // This should be the final enum. Other enums should be set before this. - doNotUse -) - -func stringFor(t metricType) string { - switch t { - case hit: - return "hit" - case miss: - return "miss" - case keyAdd: - return "keys-added" - case keyUpdate: - return "keys-updated" - case keyEvict: - return "keys-evicted" - case costAdd: - return "cost-added" - case costEvict: - return "cost-evicted" - case dropSets: - return "sets-dropped" - case rejectSets: - return "sets-rejected" // by policy. - case dropGets: - return "gets-dropped" - case keepGets: - return "gets-kept" - default: - return "unidentified" - } -} - -// Metrics is a snapshot of performance statistics for the lifetime of a cache instance. -type Metrics struct { - all [doNotUse][]*uint64 -} - -func newMetrics() *Metrics { - s := &Metrics{} - for i := 0; i < doNotUse; i++ { - s.all[i] = make([]*uint64, 256) - slice := s.all[i] - for j := range slice { - slice[j] = new(uint64) - } - } - return s -} - -func (p *Metrics) add(t metricType, hash, delta uint64) { - if p == nil { - return - } - valp := p.all[t] - // Avoid false sharing by padding at least 64 bytes of space between two - // atomic counters which would be incremented. - idx := (hash % 25) * 10 - atomic.AddUint64(valp[idx], delta) -} - -func (p *Metrics) get(t metricType) uint64 { - if p == nil { - return 0 - } - valp := p.all[t] - var total uint64 - for i := range valp { - total += atomic.LoadUint64(valp[i]) - } - return total -} - -// Hits is the number of Get calls where a value was found for the corresponding key. -func (p *Metrics) Hits() uint64 { - return p.get(hit) -} - -// Misses is the number of Get calls where a value was not found for the corresponding key. -func (p *Metrics) Misses() uint64 { - return p.get(miss) -} - -// KeysAdded is the total number of Set calls where a new key-value item was added. -func (p *Metrics) KeysAdded() uint64 { - return p.get(keyAdd) -} - -// KeysUpdated is the total number of Set calls where the value was updated. -func (p *Metrics) KeysUpdated() uint64 { - return p.get(keyUpdate) -} - -// KeysEvicted is the total number of keys evicted. -func (p *Metrics) KeysEvicted() uint64 { - return p.get(keyEvict) -} - -// CostAdded is the sum of costs that have been added (successful Set calls). -func (p *Metrics) CostAdded() uint64 { - return p.get(costAdd) -} - -// CostEvicted is the sum of all costs that have been evicted. -func (p *Metrics) CostEvicted() uint64 { - return p.get(costEvict) -} - -// SetsDropped is the number of Set calls that don't make it into internal -// buffers (due to contention or some other reason). -func (p *Metrics) SetsDropped() uint64 { - return p.get(dropSets) -} - -// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). -func (p *Metrics) SetsRejected() uint64 { - return p.get(rejectSets) -} - -// GetsDropped is the number of Get counter increments that are dropped -// internally. -func (p *Metrics) GetsDropped() uint64 { - return p.get(dropGets) -} - -// GetsKept is the number of Get counter increments that are kept. -func (p *Metrics) GetsKept() uint64 { - return p.get(keepGets) -} - -// Ratio is the number of Hits over all accesses (Hits + Misses). This is the -// percentage of successful Get calls. -func (p *Metrics) Ratio() float64 { - if p == nil { - return 0.0 - } - hits, misses := p.get(hit), p.get(miss) - if hits == 0 && misses == 0 { - return 0.0 - } - return float64(hits) / float64(hits+misses) -} - -// Clear resets all the metrics. -func (p *Metrics) Clear() { - if p == nil { - return - } - for i := 0; i < doNotUse; i++ { - for j := range p.all[i] { - atomic.StoreUint64(p.all[i][j], 0) - } - } -} - -// String returns a string representation of the metrics. -func (p *Metrics) String() string { - if p == nil { - return "" - } - var buf bytes.Buffer - for i := 0; i < doNotUse; i++ { - t := metricType(i) - fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) - } - fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) - fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) - return buf.String() -} diff --git a/go/cache/ristretto/cache_test.go b/go/cache/ristretto/cache_test.go deleted file mode 100644 index c4980a18cc9..00000000000 --- a/go/cache/ristretto/cache_test.go +++ /dev/null @@ -1,688 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -var wait = time.Millisecond * 10 - -func TestCacheKeyToHash(t *testing.T) { - keyToHashCount := 0 - c, err := NewCache(&Config{ - NumCounters: 10, - MaxCost: 1000, - BufferItems: 64, - IgnoreInternalCost: true, - KeyToHash: func(key string) (uint64, uint64) { - keyToHashCount++ - return defaultStringHash(key) - }, - }) - require.NoError(t, err) - if c.SetWithCost("1", 1, 1) { - time.Sleep(wait) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - c.Delete("1") - } - require.Equal(t, 3, keyToHashCount) -} - -func TestCacheMaxCost(t *testing.T) { - charset := "abcdefghijklmnopqrstuvwxyz0123456789" - key := func() string { - k := make([]byte, 2) - for i := range k { - k[i] = charset[rand.Intn(len(charset))] - } - return string(k) - } - c, err := NewCache(&Config{ - NumCounters: 12960, // 36^2 * 10 - MaxCost: 1e6, // 1mb - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - stop := make(chan struct{}, 8) - for i := 0; i < 8; i++ { - go func() { - for { - select { - case <-stop: - return - default: - time.Sleep(time.Millisecond) - - k := key() - if _, ok := c.Get(k); !ok { - val := "" - if rand.Intn(100) < 10 { - val = "test" - } else { - val = strings.Repeat("a", 1000) - } - c.SetWithCost(key(), val, int64(2+len(val))) - } - } - } - }() - } - for i := 0; i < 20; i++ { - time.Sleep(time.Second) - cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted() - t.Logf("total cache cost: %d\n", cacheCost) - require.True(t, float64(cacheCost) <= float64(1e6*1.05)) - } - for i := 0; i < 8; i++ { - stop <- struct{}{} - } -} - -func TestUpdateMaxCost(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 10, - MaxCost: 10, - BufferItems: 64, - }) - require.NoError(t, err) - require.Equal(t, int64(10), c.MaxCapacity()) - require.True(t, c.SetWithCost("1", 1, 1)) - time.Sleep(wait) - _, ok := c.Get("1") - // Set is rejected because the cost of the entry is too high - // when accounting for the internal cost of storing the entry. - require.False(t, ok) - - // Update the max cost of the cache and retry. - c.SetCapacity(1000) - require.Equal(t, int64(1000), c.MaxCapacity()) - require.True(t, c.SetWithCost("1", 1, 1)) - time.Sleep(wait) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - c.Delete("1") -} - -func TestNewCache(t *testing.T) { - _, err := NewCache(&Config{ - NumCounters: 0, - }) - require.Error(t, err) - - _, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 0, - }) - require.Error(t, err) - - _, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 0, - }) - require.Error(t, err) - - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - require.NotNil(t, c) -} - -func TestNilCache(t *testing.T) { - var c *Cache - val, ok := c.Get("1") - require.False(t, ok) - require.Nil(t, val) - - require.False(t, c.SetWithCost("1", 1, 1)) - c.Delete("1") - c.Clear() - c.Close() -} - -func TestMultipleClose(t *testing.T) { - var c *Cache - c.Close() - - var err error - c, err = NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - c.Close() - c.Close() -} - -func TestSetAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - c.Close() - require.False(t, c.SetWithCost("1", 1, 1)) -} - -func TestClearAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - c.Close() - c.Clear() -} - -func TestGetAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - require.True(t, c.SetWithCost("1", 1, 1)) - c.Close() - - _, ok := c.Get("2") - require.False(t, ok) -} - -func TestDelAfterClose(t *testing.T) { - c, err := newTestCache() - require.NoError(t, err) - require.NotNil(t, c) - - require.True(t, c.SetWithCost("1", 1, 1)) - c.Close() - - c.Delete("1") -} - -func TestCacheProcessItems(t *testing.T) { - m := &sync.Mutex{} - evicted := make(map[uint64]struct{}) - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - IgnoreInternalCost: true, - Cost: func(value any) int64 { - return int64(value.(int)) - }, - OnEvict: func(item *Item) { - m.Lock() - defer m.Unlock() - evicted[item.Key] = struct{}{} - }, - }) - require.NoError(t, err) - - var key uint64 - var conflict uint64 - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 1, - Cost: 0, - } - time.Sleep(wait) - require.True(t, c.policy.Has(key)) - require.Equal(t, int64(1), c.policy.Cost(key)) - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemUpdate, - Key: key, - Conflict: conflict, - Value: 2, - Cost: 0, - } - time.Sleep(wait) - require.Equal(t, int64(2), c.policy.Cost(key)) - - key, conflict = defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemDelete, - Key: key, - Conflict: conflict, - } - time.Sleep(wait) - key, conflict = defaultStringHash("1") - val, ok := c.store.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - require.False(t, c.policy.Has(1)) - - key, conflict = defaultStringHash("2") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 2, - Cost: 3, - } - key, conflict = defaultStringHash("3") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 3, - } - key, conflict = defaultStringHash("4") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 3, - } - key, conflict = defaultStringHash("5") - c.setBuf <- &Item{ - flag: itemNew, - Key: key, - Conflict: conflict, - Value: 3, - Cost: 5, - } - time.Sleep(wait) - m.Lock() - require.NotEqual(t, 0, len(evicted)) - m.Unlock() - - defer func() { - require.NotNil(t, recover()) - }() - c.Close() - c.setBuf <- &Item{flag: itemNew} -} - -func TestCacheGet(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - IgnoreInternalCost: true, - Metrics: true, - }) - require.NoError(t, err) - - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - c.store.Set(&i) - val, ok := c.Get("1") - require.True(t, ok) - require.NotNil(t, val) - - val, ok = c.Get("2") - require.False(t, ok) - require.Nil(t, val) - - // 0.5 and not 1.0 because we tried Getting each item twice - require.Equal(t, 0.5, c.Metrics.Ratio()) - - c = nil - val, ok = c.Get("0") - require.False(t, ok) - require.Nil(t, val) -} - -// retrySet calls SetWithCost until the item is accepted by the cache. -func retrySet(t *testing.T, c *Cache, key string, value int, cost int64) { - for { - if set := c.SetWithCost(key, value, cost); !set { - time.Sleep(wait) - continue - } - - time.Sleep(wait) - val, ok := c.Get(key) - require.True(t, ok) - require.NotNil(t, val) - require.Equal(t, value, val.(int)) - return - } -} - -func TestCacheSet(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - retrySet(t, c, "1", 1, 1) - - c.SetWithCost("1", 2, 2) - val, ok := c.store.Get(defaultStringHash("1")) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - c.stop <- struct{}{} - for i := 0; i < setBufSize; i++ { - key, conflict := defaultStringHash("1") - c.setBuf <- &Item{ - flag: itemUpdate, - Key: key, - Conflict: conflict, - Value: 1, - Cost: 1, - } - } - require.False(t, c.SetWithCost("2", 2, 1)) - require.Equal(t, uint64(1), c.Metrics.SetsDropped()) - close(c.setBuf) - close(c.stop) - - c = nil - require.False(t, c.SetWithCost("1", 1, 1)) -} - -func TestCacheInternalCost(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - // Get should return false because the cache's cost is too small to store the item - // when accounting for the internal cost. - c.SetWithCost("1", 1, 1) - time.Sleep(wait) - _, ok := c.Get("1") - require.False(t, ok) -} - -func TestCacheDel(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - }) - require.NoError(t, err) - - c.SetWithCost("1", 1, 1) - c.Delete("1") - // The deletes and sets are pushed through the setbuf. It might be possible - // that the delete is not processed before the following get is called. So - // wait for a millisecond for things to be processed. - time.Sleep(time.Millisecond) - val, ok := c.Get("1") - require.False(t, ok) - require.Nil(t, val) - - c = nil - defer func() { - require.Nil(t, recover()) - }() - c.Delete("1") -} - -func TestCacheClear(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - c.SetWithCost(strconv.Itoa(i), i, 1) - } - time.Sleep(wait) - require.Equal(t, uint64(10), c.Metrics.KeysAdded()) - - c.Clear() - require.Equal(t, uint64(0), c.Metrics.KeysAdded()) - - for i := 0; i < 10; i++ { - val, ok := c.Get(strconv.Itoa(i)) - require.False(t, ok) - require.Nil(t, val) - } -} - -func TestCacheMetrics(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - IgnoreInternalCost: true, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - c.SetWithCost(strconv.Itoa(i), i, 1) - } - time.Sleep(wait) - m := c.Metrics - require.Equal(t, uint64(10), m.KeysAdded()) -} - -func TestMetrics(t *testing.T) { - newMetrics() -} - -func TestNilMetrics(t *testing.T) { - var m *Metrics - for _, f := range []func() uint64{ - m.Hits, - m.Misses, - m.KeysAdded, - m.KeysEvicted, - m.CostEvicted, - m.SetsDropped, - m.SetsRejected, - m.GetsDropped, - m.GetsKept, - } { - require.Equal(t, uint64(0), f()) - } -} - -func TestMetricsAddGet(t *testing.T) { - m := newMetrics() - m.add(hit, 1, 1) - m.add(hit, 2, 2) - m.add(hit, 3, 3) - require.Equal(t, uint64(6), m.Hits()) - - m = nil - m.add(hit, 1, 1) - require.Equal(t, uint64(0), m.Hits()) -} - -func TestMetricsRatio(t *testing.T) { - m := newMetrics() - require.Equal(t, float64(0), m.Ratio()) - - m.add(hit, 1, 1) - m.add(hit, 2, 2) - m.add(miss, 1, 1) - m.add(miss, 2, 2) - require.Equal(t, 0.5, m.Ratio()) - - m = nil - require.Equal(t, float64(0), m.Ratio()) -} - -func TestMetricsString(t *testing.T) { - m := newMetrics() - m.add(hit, 1, 1) - m.add(miss, 1, 1) - m.add(keyAdd, 1, 1) - m.add(keyUpdate, 1, 1) - m.add(keyEvict, 1, 1) - m.add(costAdd, 1, 1) - m.add(costEvict, 1, 1) - m.add(dropSets, 1, 1) - m.add(rejectSets, 1, 1) - m.add(dropGets, 1, 1) - m.add(keepGets, 1, 1) - require.Equal(t, uint64(1), m.Hits()) - require.Equal(t, uint64(1), m.Misses()) - require.Equal(t, 0.5, m.Ratio()) - require.Equal(t, uint64(1), m.KeysAdded()) - require.Equal(t, uint64(1), m.KeysUpdated()) - require.Equal(t, uint64(1), m.KeysEvicted()) - require.Equal(t, uint64(1), m.CostAdded()) - require.Equal(t, uint64(1), m.CostEvicted()) - require.Equal(t, uint64(1), m.SetsDropped()) - require.Equal(t, uint64(1), m.SetsRejected()) - require.Equal(t, uint64(1), m.GetsDropped()) - require.Equal(t, uint64(1), m.GetsKept()) - - require.NotEqual(t, 0, len(m.String())) - - m = nil - require.Equal(t, 0, len(m.String())) - - require.Equal(t, "unidentified", stringFor(doNotUse)) -} - -func TestCacheMetricsClear(t *testing.T) { - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) - require.NoError(t, err) - - c.SetWithCost("1", 1, 1) - stop := make(chan struct{}) - go func() { - for { - select { - case <-stop: - return - default: - c.Get("1") - } - } - }() - time.Sleep(wait) - c.Clear() - stop <- struct{}{} - c.Metrics = nil - c.Metrics.Clear() -} - -// Regression test for bug https://github.com/dgraph-io/ristretto/issues/167 -func TestDropUpdates(t *testing.T) { - originalSetBugSize := setBufSize - defer func() { setBufSize = originalSetBugSize }() - - test := func() { - // dropppedMap stores the items dropped from the cache. - droppedMap := make(map[int]struct{}) - lastEvictedSet := int64(-1) - - var err error - handler := func(_ any, value any) { - v := value.(string) - lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32) - require.NoError(t, err) - - _, ok := droppedMap[int(lastEvictedSet)] - if ok { - panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n", - lastEvictedSet, droppedMap)) - } - } - - // This is important. The race condition shows up only when the setBuf - // is full and that's why we reduce the buf size here. The test will - // try to fill up the setbuf to it's capacity and then perform an - // update on a key. - setBufSize = 10 - - c, err := NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - OnEvict: func(item *Item) { - if item.Value != nil { - handler(nil, item.Value) - } - }, - }) - require.NoError(t, err) - - for i := 0; i < 5*setBufSize; i++ { - v := fmt.Sprintf("%0100d", i) - // We're updating the same key. - if !c.SetWithCost("0", v, 1) { - // The race condition doesn't show up without this sleep. - time.Sleep(time.Microsecond) - droppedMap[i] = struct{}{} - } - } - // Wait for all the items to be processed. - c.Wait() - // This will cause eviction from the cache. - require.True(t, c.SetWithCost("1", nil, 10)) - c.Close() - } - - // Run the test 100 times since it's not reliable. - for i := 0; i < 100; i++ { - test() - } -} - -func newTestCache() (*Cache, error) { - return NewCache(&Config{ - NumCounters: 100, - MaxCost: 10, - BufferItems: 64, - Metrics: true, - }) -} diff --git a/go/cache/ristretto/policy.go b/go/cache/ristretto/policy.go deleted file mode 100644 index 84cc008cb99..00000000000 --- a/go/cache/ristretto/policy.go +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "math" - "sync" - "sync/atomic" - - "vitess.io/vitess/go/cache/ristretto/bloom" -) - -const ( - // lfuSample is the number of items to sample when looking at eviction - // candidates. 5 seems to be the most optimal number [citation needed]. - lfuSample = 5 -) - -// policy is the interface encapsulating eviction/admission behavior. -// -// TODO: remove this interface and just rename defaultPolicy to policy, as we -// -// are probably only going to use/implement/maintain one policy. -type policy interface { - ringConsumer - // Add attempts to Add the key-cost pair to the Policy. It returns a slice - // of evicted keys and a bool denoting whether or not the key-cost pair - // was added. If it returns true, the key should be stored in cache. - Add(uint64, int64) ([]*Item, bool) - // Has returns true if the key exists in the Policy. - Has(uint64) bool - // Del deletes the key from the Policy. - Del(uint64) - // Cap returns the amount of used capacity. - Used() int64 - // Close stops all goroutines and closes all channels. - Close() - // Update updates the cost value for the key. - Update(uint64, int64) - // Cost returns the cost value of a key or -1 if missing. - Cost(uint64) int64 - // Optionally, set stats object to track how policy is performing. - CollectMetrics(*Metrics) - // Clear zeroes out all counters and clears hashmaps. - Clear() - // MaxCost returns the current max cost of the cache policy. - MaxCost() int64 - // UpdateMaxCost updates the max cost of the cache policy. - UpdateMaxCost(int64) -} - -func newPolicy(numCounters, maxCost int64) policy { - return newDefaultPolicy(numCounters, maxCost) -} - -type defaultPolicy struct { - sync.Mutex - admit *tinyLFU - evict *sampledLFU - itemsCh chan []uint64 - stop chan struct{} - isClosed bool - metrics *Metrics - numCounters int64 - maxCost int64 -} - -func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { - p := &defaultPolicy{ - admit: newTinyLFU(numCounters), - evict: newSampledLFU(maxCost), - itemsCh: make(chan []uint64, 3), - stop: make(chan struct{}), - numCounters: numCounters, - maxCost: maxCost, - } - go p.processItems() - return p -} - -func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { - p.metrics = metrics - p.evict.metrics = metrics -} - -type policyPair struct { - key uint64 - cost int64 -} - -func (p *defaultPolicy) processItems() { - for { - select { - case items := <-p.itemsCh: - p.Lock() - p.admit.Push(items) - p.Unlock() - case <-p.stop: - return - } - } -} - -func (p *defaultPolicy) Push(keys []uint64) bool { - if p.isClosed { - return false - } - - if len(keys) == 0 { - return true - } - - select { - case p.itemsCh <- keys: - p.metrics.add(keepGets, keys[0], uint64(len(keys))) - return true - default: - p.metrics.add(dropGets, keys[0], uint64(len(keys))) - return false - } -} - -// Add decides whether the item with the given key and cost should be accepted by -// the policy. It returns the list of victims that have been evicted and a boolean -// indicating whether the incoming item should be accepted. -func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) { - p.Lock() - defer p.Unlock() - - // Cannot add an item bigger than entire cache. - if cost > p.evict.getMaxCost() { - return nil, false - } - - // No need to go any further if the item is already in the cache. - if has := p.evict.updateIfHas(key, cost); has { - // An update does not count as an addition, so return false. - return nil, false - } - - // If the execution reaches this point, the key doesn't exist in the cache. - // Calculate the remaining room in the cache (usually bytes). - room := p.evict.roomLeft(cost) - if room >= 0 { - // There's enough room in the cache to store the new item without - // overflowing. Do that now and stop here. - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return nil, true - } - - // incHits is the hit count for the incoming item. - incHits := p.admit.Estimate(key) - // sample is the eviction candidate pool to be filled via random sampling. - // TODO: perhaps we should use a min heap here. Right now our time - // complexity is N for finding the min. Min heap should bring it down to - // O(lg N). - sample := make([]*policyPair, 0, lfuSample) - // As items are evicted they will be appended to victims. - victims := make([]*Item, 0) - - // Delete victims until there's enough space or a minKey is found that has - // more hits than incoming item. - for ; room < 0; room = p.evict.roomLeft(cost) { - // Fill up empty slots in sample. - sample = p.evict.fillSample(sample) - - // Find minimally used item in sample. - minKey, minHits, minID, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) - for i, pair := range sample { - // Look up hit count for sample key. - if hits := p.admit.Estimate(pair.key); hits < minHits { - minKey, minHits, minID, minCost = pair.key, hits, i, pair.cost - } - } - - // If the incoming item isn't worth keeping in the policy, reject. - if incHits < minHits { - p.metrics.add(rejectSets, key, 1) - return victims, false - } - - // Delete the victim from metadata. - p.evict.del(minKey) - - // Delete the victim from sample. - sample[minID] = sample[len(sample)-1] - sample = sample[:len(sample)-1] - // Store victim in evicted victims slice. - victims = append(victims, &Item{ - Key: minKey, - Conflict: 0, - Cost: minCost, - }) - } - - p.evict.add(key, cost) - p.metrics.add(costAdd, key, uint64(cost)) - return victims, true -} - -func (p *defaultPolicy) Has(key uint64) bool { - p.Lock() - _, exists := p.evict.keyCosts[key] - p.Unlock() - return exists -} - -func (p *defaultPolicy) Del(key uint64) { - p.Lock() - p.evict.del(key) - p.Unlock() -} - -func (p *defaultPolicy) Used() int64 { - p.Lock() - used := p.evict.used - p.Unlock() - return used -} - -func (p *defaultPolicy) Update(key uint64, cost int64) { - p.Lock() - p.evict.updateIfHas(key, cost) - p.Unlock() -} - -func (p *defaultPolicy) Cost(key uint64) int64 { - p.Lock() - if cost, found := p.evict.keyCosts[key]; found { - p.Unlock() - return cost - } - p.Unlock() - return -1 -} - -func (p *defaultPolicy) Clear() { - p.Lock() - p.admit = newTinyLFU(p.numCounters) - p.evict = newSampledLFU(p.maxCost) - p.Unlock() -} - -func (p *defaultPolicy) Close() { - if p.isClosed { - return - } - - // Block until the p.processItems goroutine returns. - p.stop <- struct{}{} - close(p.stop) - close(p.itemsCh) - p.isClosed = true -} - -func (p *defaultPolicy) MaxCost() int64 { - if p == nil || p.evict == nil { - return 0 - } - return p.evict.getMaxCost() -} - -func (p *defaultPolicy) UpdateMaxCost(maxCost int64) { - if p == nil || p.evict == nil { - return - } - p.evict.updateMaxCost(maxCost) -} - -// sampledLFU is an eviction helper storing key-cost pairs. -type sampledLFU struct { - keyCosts map[uint64]int64 - maxCost int64 - used int64 - metrics *Metrics -} - -func newSampledLFU(maxCost int64) *sampledLFU { - return &sampledLFU{ - keyCosts: make(map[uint64]int64), - maxCost: maxCost, - } -} - -func (p *sampledLFU) getMaxCost() int64 { - return atomic.LoadInt64(&p.maxCost) -} - -func (p *sampledLFU) updateMaxCost(maxCost int64) { - atomic.StoreInt64(&p.maxCost, maxCost) -} - -func (p *sampledLFU) roomLeft(cost int64) int64 { - return p.getMaxCost() - (p.used + cost) -} - -func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { - if len(in) >= lfuSample { - return in - } - for key, cost := range p.keyCosts { - in = append(in, &policyPair{key, cost}) - if len(in) >= lfuSample { - return in - } - } - return in -} - -func (p *sampledLFU) del(key uint64) { - cost, ok := p.keyCosts[key] - if !ok { - return - } - p.used -= cost - delete(p.keyCosts, key) - p.metrics.add(costEvict, key, uint64(cost)) - p.metrics.add(keyEvict, key, 1) -} - -func (p *sampledLFU) add(key uint64, cost int64) { - p.keyCosts[key] = cost - p.used += cost -} - -func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { - if prev, found := p.keyCosts[key]; found { - // Update the cost of an existing key, but don't worry about evicting. - // Evictions will be handled the next time a new item is added. - p.metrics.add(keyUpdate, key, 1) - if prev > cost { - diff := prev - cost - p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1)) - } else if cost > prev { - diff := cost - prev - p.metrics.add(costAdd, key, uint64(diff)) - } - p.used += cost - prev - p.keyCosts[key] = cost - return true - } - return false -} - -func (p *sampledLFU) clear() { - p.used = 0 - p.keyCosts = make(map[uint64]int64) -} - -// tinyLFU is an admission helper that keeps track of access frequency using -// tiny (4-bit) counters in the form of a count-min sketch. -// tinyLFU is NOT thread safe. -type tinyLFU struct { - freq *cmSketch - door *bloom.Bloom - incrs int64 - resetAt int64 -} - -func newTinyLFU(numCounters int64) *tinyLFU { - return &tinyLFU{ - freq: newCmSketch(numCounters), - door: bloom.NewBloomFilterWithErrorRate(uint64(numCounters), 0.01), - resetAt: numCounters, - } -} - -func (p *tinyLFU) Push(keys []uint64) { - for _, key := range keys { - p.Increment(key) - } -} - -func (p *tinyLFU) Estimate(key uint64) int64 { - hits := p.freq.Estimate(key) - if p.door.Has(key) { - hits++ - } - return hits -} - -func (p *tinyLFU) Increment(key uint64) { - // Flip doorkeeper bit if not already done. - if added := p.door.AddIfNotHas(key); !added { - // Increment count-min counter if doorkeeper bit is already set. - p.freq.Increment(key) - } - p.incrs++ - if p.incrs >= p.resetAt { - p.reset() - } -} - -func (p *tinyLFU) reset() { - // Zero out incrs. - p.incrs = 0 - // clears doorkeeper bits - p.door.Clear() - // halves count-min counters - p.freq.Reset() -} - -func (p *tinyLFU) clear() { - p.incrs = 0 - p.freq.Clear() - p.door.Clear() -} diff --git a/go/cache/ristretto/policy_test.go b/go/cache/ristretto/policy_test.go deleted file mode 100644 index c864b6c74d0..00000000000 --- a/go/cache/ristretto/policy_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func TestPolicy(t *testing.T) { - defer func() { - require.Nil(t, recover()) - }() - newPolicy(100, 10) -} - -func TestPolicyMetrics(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.CollectMetrics(newMetrics()) - require.NotNil(t, p.metrics) - require.NotNil(t, p.evict.metrics) -} - -func TestPolicyProcessItems(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.itemsCh <- []uint64{1, 2, 2} - time.Sleep(wait) - p.Lock() - require.Equal(t, int64(2), p.admit.Estimate(2)) - require.Equal(t, int64(1), p.admit.Estimate(1)) - p.Unlock() - - p.stop <- struct{}{} - p.itemsCh <- []uint64{3, 3, 3} - time.Sleep(wait) - p.Lock() - require.Equal(t, int64(0), p.admit.Estimate(3)) - p.Unlock() -} - -func TestPolicyPush(t *testing.T) { - p := newDefaultPolicy(100, 10) - require.True(t, p.Push([]uint64{})) - - keepCount := 0 - for i := 0; i < 10; i++ { - if p.Push([]uint64{1, 2, 3, 4, 5}) { - keepCount++ - } - } - require.NotEqual(t, 0, keepCount) -} - -func TestPolicyAdd(t *testing.T) { - p := newDefaultPolicy(1000, 100) - if victims, added := p.Add(1, 101); victims != nil || added { - t.Fatal("can't add an item bigger than entire cache") - } - p.Lock() - p.evict.add(1, 1) - p.admit.Increment(1) - p.admit.Increment(2) - p.admit.Increment(3) - p.Unlock() - - victims, added := p.Add(1, 1) - require.Nil(t, victims) - require.False(t, added) - - victims, added = p.Add(2, 20) - require.Nil(t, victims) - require.True(t, added) - - victims, added = p.Add(3, 90) - require.NotNil(t, victims) - require.True(t, added) - - victims, added = p.Add(4, 20) - require.NotNil(t, victims) - require.False(t, added) -} - -func TestPolicyHas(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - require.True(t, p.Has(1)) - require.False(t, p.Has(2)) -} - -func TestPolicyDel(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Del(1) - p.Del(2) - require.False(t, p.Has(1)) - require.False(t, p.Has(2)) -} - -func TestPolicyCap(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - require.Equal(t, int64(9), p.MaxCost()-p.Used()) -} - -func TestPolicyUpdate(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Update(1, 2) - p.Lock() - require.Equal(t, int64(2), p.evict.keyCosts[1]) - p.Unlock() -} - -func TestPolicyCost(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 2) - require.Equal(t, int64(2), p.Cost(1)) - require.Equal(t, int64(-1), p.Cost(2)) -} - -func TestPolicyClear(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Add(2, 2) - p.Add(3, 3) - p.Clear() - require.Equal(t, int64(10), p.MaxCost()-p.Used()) - require.False(t, p.Has(1)) - require.False(t, p.Has(2)) - require.False(t, p.Has(3)) -} - -func TestPolicyClose(t *testing.T) { - defer func() { - require.NotNil(t, recover()) - }() - - p := newDefaultPolicy(100, 10) - p.Add(1, 1) - p.Close() - p.itemsCh <- []uint64{1} -} - -func TestPushAfterClose(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Close() - require.False(t, p.Push([]uint64{1, 2})) -} - -func TestAddAfterClose(t *testing.T) { - p := newDefaultPolicy(100, 10) - p.Close() - p.Add(1, 1) -} - -func TestSampledLFUAdd(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.add(3, 1) - require.Equal(t, int64(4), e.used) - require.Equal(t, int64(2), e.keyCosts[2]) -} - -func TestSampledLFUDel(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.del(2) - require.Equal(t, int64(1), e.used) - _, ok := e.keyCosts[2] - require.False(t, ok) - e.del(4) -} - -func TestSampledLFUUpdate(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - require.True(t, e.updateIfHas(1, 2)) - require.Equal(t, int64(2), e.used) - require.False(t, e.updateIfHas(2, 2)) -} - -func TestSampledLFUClear(t *testing.T) { - e := newSampledLFU(4) - e.add(1, 1) - e.add(2, 2) - e.add(3, 1) - e.clear() - require.Equal(t, 0, len(e.keyCosts)) - require.Equal(t, int64(0), e.used) -} - -func TestSampledLFURoom(t *testing.T) { - e := newSampledLFU(16) - e.add(1, 1) - e.add(2, 2) - e.add(3, 3) - require.Equal(t, int64(6), e.roomLeft(4)) -} - -func TestSampledLFUSample(t *testing.T) { - e := newSampledLFU(16) - e.add(4, 4) - e.add(5, 5) - sample := e.fillSample([]*policyPair{ - {1, 1}, - {2, 2}, - {3, 3}, - }) - k := sample[len(sample)-1].key - require.Equal(t, 5, len(sample)) - require.NotEqual(t, 1, k) - require.NotEqual(t, 2, k) - require.NotEqual(t, 3, k) - require.Equal(t, len(sample), len(e.fillSample(sample))) - e.del(5) - sample = e.fillSample(sample[:len(sample)-2]) - require.Equal(t, 4, len(sample)) -} - -func TestTinyLFUIncrement(t *testing.T) { - a := newTinyLFU(4) - a.Increment(1) - a.Increment(1) - a.Increment(1) - require.True(t, a.door.Has(1)) - require.Equal(t, int64(2), a.freq.Estimate(1)) - - a.Increment(1) - require.False(t, a.door.Has(1)) - require.Equal(t, int64(1), a.freq.Estimate(1)) -} - -func TestTinyLFUEstimate(t *testing.T) { - a := newTinyLFU(8) - a.Increment(1) - a.Increment(1) - a.Increment(1) - require.Equal(t, int64(3), a.Estimate(1)) - require.Equal(t, int64(0), a.Estimate(2)) -} - -func TestTinyLFUPush(t *testing.T) { - a := newTinyLFU(16) - a.Push([]uint64{1, 2, 2, 3, 3, 3}) - require.Equal(t, int64(1), a.Estimate(1)) - require.Equal(t, int64(2), a.Estimate(2)) - require.Equal(t, int64(3), a.Estimate(3)) - require.Equal(t, int64(6), a.incrs) -} - -func TestTinyLFUClear(t *testing.T) { - a := newTinyLFU(16) - a.Push([]uint64{1, 3, 3, 3}) - a.clear() - require.Equal(t, int64(0), a.incrs) - require.Equal(t, int64(0), a.Estimate(3)) -} diff --git a/go/cache/ristretto/ring.go b/go/cache/ristretto/ring.go deleted file mode 100644 index 84d8689ee37..00000000000 --- a/go/cache/ristretto/ring.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// ringConsumer is the user-defined object responsible for receiving and -// processing items in batches when buffers are drained. -type ringConsumer interface { - Push([]uint64) bool -} - -// ringStripe is a singular ring buffer that is not concurrent safe. -type ringStripe struct { - cons ringConsumer - data []uint64 - capa int -} - -func newRingStripe(cons ringConsumer, capa int64) *ringStripe { - return &ringStripe{ - cons: cons, - data: make([]uint64, 0, capa), - capa: int(capa), - } -} - -// Push appends an item in the ring buffer and drains (copies items and -// sends to Consumer) if full. -func (s *ringStripe) Push(item uint64) { - s.data = append(s.data, item) - // Decide if the ring buffer should be drained. - if len(s.data) >= s.capa { - // Send elements to consumer and create a new ring stripe. - if s.cons.Push(s.data) { - s.data = make([]uint64, 0, s.capa) - } else { - s.data = s.data[:0] - } - } -} - -// ringBuffer stores multiple buffers (stripes) and distributes Pushed items -// between them to lower contention. -// -// This implements the "batching" process described in the BP-Wrapper paper -// (section III part A). -type ringBuffer struct { - pool *sync.Pool -} - -// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will -// be called when individual stripes are full and need to drain their elements. -func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { - // LOSSY buffers use a very simple sync.Pool for concurrently reusing - // stripes. We do lose some stripes due to GC (unheld items in sync.Pool - // are cleared), but the performance gains generally outweigh the small - // percentage of elements lost. The performance primarily comes from - // low-level runtime functions used in the standard library that aren't - // available to us (such as runtime_procPin()). - return &ringBuffer{ - pool: &sync.Pool{ - New: func() any { return newRingStripe(cons, capa) }, - }, - } -} - -// Push adds an element to one of the internal stripes and possibly drains if -// the stripe becomes full. -func (b *ringBuffer) Push(item uint64) { - // Reuse or create a new stripe. - stripe := b.pool.Get().(*ringStripe) - stripe.Push(item) - b.pool.Put(stripe) -} diff --git a/go/cache/ristretto/ring_test.go b/go/cache/ristretto/ring_test.go deleted file mode 100644 index 0dbe962ccc6..00000000000 --- a/go/cache/ristretto/ring_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -type testConsumer struct { - push func([]uint64) - save bool -} - -func (c *testConsumer) Push(items []uint64) bool { - if c.save { - c.push(items) - return true - } - return false -} - -func TestRingDrain(t *testing.T) { - drains := 0 - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - drains++ - }, - save: true, - }, 1) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - require.Equal(t, 100, drains, "buffers shouldn't be dropped with BufferItems == 1") -} - -func TestRingReset(t *testing.T) { - drains := 0 - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - drains++ - }, - save: false, - }, 4) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - require.Equal(t, 0, drains, "testConsumer shouldn't be draining") -} - -func TestRingConsumer(t *testing.T) { - mu := &sync.Mutex{} - drainItems := make(map[uint64]struct{}) - r := newRingBuffer(&testConsumer{ - push: func(items []uint64) { - mu.Lock() - defer mu.Unlock() - for i := range items { - drainItems[items[i]] = struct{}{} - } - }, - save: true, - }, 4) - for i := 0; i < 100; i++ { - r.Push(uint64(i)) - } - l := len(drainItems) - require.NotEqual(t, 0, l) - require.True(t, l <= 100) -} diff --git a/go/cache/ristretto/sketch.go b/go/cache/ristretto/sketch.go deleted file mode 100644 index ce0504a2a83..00000000000 --- a/go/cache/ristretto/sketch.go +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package ristretto includes multiple probabalistic data structures needed for -// admission/eviction metadata. Most are Counting Bloom Filter variations, but -// a caching-specific feature that is also required is a "freshness" mechanism, -// which basically serves as a "lifetime" process. This freshness mechanism -// was described in the original TinyLFU paper [1], but other mechanisms may -// be better suited for certain data distributions. -// -// [1]: https://arxiv.org/abs/1512.00727 -package ristretto - -import ( - "fmt" - "math/rand" - "time" -) - -// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily -// based on Damian Gryski's CM4 [1]. -// -// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go -type cmSketch struct { - rows [cmDepth]cmRow - seed [cmDepth]uint64 - mask uint64 -} - -const ( - // cmDepth is the number of counter copies to store (think of it as rows). - cmDepth = 4 -) - -func newCmSketch(numCounters int64) *cmSketch { - if numCounters == 0 { - panic("cmSketch: bad numCounters") - } - // Get the next power of 2 for better cache performance. - numCounters = next2Power(numCounters) - sketch := &cmSketch{mask: uint64(numCounters - 1)} - // Initialize rows of counters and seeds. - source := rand.New(rand.NewSource(time.Now().UnixNano())) - for i := 0; i < cmDepth; i++ { - sketch.seed[i] = source.Uint64() - sketch.rows[i] = newCmRow(numCounters) - } - return sketch -} - -// Increment increments the count(ers) for the specified key. -func (s *cmSketch) Increment(hashed uint64) { - for i := range s.rows { - s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) - } -} - -// Estimate returns the value of the specified key. -func (s *cmSketch) Estimate(hashed uint64) int64 { - min := byte(255) - for i := range s.rows { - val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) - if val < min { - min = val - } - } - return int64(min) -} - -// Reset halves all counter values. -func (s *cmSketch) Reset() { - for _, r := range s.rows { - r.reset() - } -} - -// Clear zeroes all counters. -func (s *cmSketch) Clear() { - for _, r := range s.rows { - r.clear() - } -} - -// cmRow is a row of bytes, with each byte holding two counters. -type cmRow []byte - -func newCmRow(numCounters int64) cmRow { - return make(cmRow, numCounters/2) -} - -func (r cmRow) get(n uint64) byte { - return byte(r[n/2]>>((n&1)*4)) & 0x0f -} - -func (r cmRow) increment(n uint64) { - // Index of the counter. - i := n / 2 - // Shift distance (even 0, odd 4). - s := (n & 1) * 4 - // Counter value. - v := (r[i] >> s) & 0x0f - // Only increment if not max value (overflow wrap is bad for LFU). - if v < 15 { - r[i] += 1 << s - } -} - -func (r cmRow) reset() { - // Halve each counter. - for i := range r { - r[i] = (r[i] >> 1) & 0x77 - } -} - -func (r cmRow) clear() { - // Zero each counter. - for i := range r { - r[i] = 0 - } -} - -func (r cmRow) string() string { - s := "" - for i := uint64(0); i < uint64(len(r)*2); i++ { - s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) - } - s = s[:len(s)-1] - return s -} - -// next2Power rounds x up to the next power of 2, if it's not already one. -func next2Power(x int64) int64 { - x-- - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - x |= x >> 32 - x++ - return x -} diff --git a/go/cache/ristretto/sketch_test.go b/go/cache/ristretto/sketch_test.go deleted file mode 100644 index f0d523df559..00000000000 --- a/go/cache/ristretto/sketch_test.go +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSketch(t *testing.T) { - defer func() { - require.NotNil(t, recover()) - }() - - s := newCmSketch(5) - require.Equal(t, uint64(7), s.mask) - newCmSketch(0) -} - -func TestSketchIncrement(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(5) - s.Increment(9) - for i := 0; i < cmDepth; i++ { - if s.rows[i].string() != s.rows[0].string() { - break - } - require.False(t, i == cmDepth-1, "identical rows, bad seeding") - } -} - -func TestSketchEstimate(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(1) - require.Equal(t, int64(2), s.Estimate(1)) - require.Equal(t, int64(0), s.Estimate(0)) -} - -func TestSketchReset(t *testing.T) { - s := newCmSketch(16) - s.Increment(1) - s.Increment(1) - s.Increment(1) - s.Increment(1) - s.Reset() - require.Equal(t, int64(2), s.Estimate(1)) -} - -func TestSketchClear(t *testing.T) { - s := newCmSketch(16) - for i := 0; i < 16; i++ { - s.Increment(uint64(i)) - } - s.Clear() - for i := 0; i < 16; i++ { - require.Equal(t, int64(0), s.Estimate(uint64(i))) - } -} - -func TestNext2Power(t *testing.T) { - sz := 12 << 30 - szf := float64(sz) * 0.01 - val := int64(szf) - t.Logf("szf = %.2f val = %d\n", szf, val) - pow := next2Power(val) - t.Logf("pow = %d. mult 4 = %d\n", pow, pow*4) -} - -func BenchmarkSketchIncrement(b *testing.B) { - s := newCmSketch(16) - b.SetBytes(1) - for n := 0; n < b.N; n++ { - s.Increment(1) - } -} - -func BenchmarkSketchEstimate(b *testing.B) { - s := newCmSketch(16) - s.Increment(1) - b.SetBytes(1) - for n := 0; n < b.N; n++ { - s.Estimate(1) - } -} diff --git a/go/cache/ristretto/store.go b/go/cache/ristretto/store.go deleted file mode 100644 index 0e455e7052f..00000000000 --- a/go/cache/ristretto/store.go +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright 2019 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "sync" -) - -// TODO: Do we need this to be a separate struct from Item? -type storeItem struct { - key uint64 - conflict uint64 - value any -} - -// store is the interface fulfilled by all hash map implementations in this -// file. Some hash map implementations are better suited for certain data -// distributions than others, so this allows us to abstract that out for use -// in Ristretto. -// -// Every store is safe for concurrent usage. -type store interface { - // Get returns the value associated with the key parameter. - Get(uint64, uint64) (any, bool) - // Set adds the key-value pair to the Map or updates the value if it's - // already present. The key-value pair is passed as a pointer to an - // item object. - Set(*Item) - // Del deletes the key-value pair from the Map. - Del(uint64, uint64) (uint64, any) - // Update attempts to update the key with a new value and returns true if - // successful. - Update(*Item) (any, bool) - // Clear clears all contents of the store. - Clear(onEvict itemCallback) - // ForEach yields all the values in the store - ForEach(forEach func(any) bool) - // Len returns the number of entries in the store - Len() int -} - -// newStore returns the default store implementation. -func newStore() store { - return newShardedMap() -} - -const numShards uint64 = 256 - -type shardedMap struct { - shards []*lockedMap -} - -func newShardedMap() *shardedMap { - sm := &shardedMap{ - shards: make([]*lockedMap, int(numShards)), - } - for i := range sm.shards { - sm.shards[i] = newLockedMap() - } - return sm -} - -func (sm *shardedMap) Get(key, conflict uint64) (any, bool) { - return sm.shards[key%numShards].get(key, conflict) -} - -func (sm *shardedMap) Set(i *Item) { - if i == nil { - // If item is nil make this Set a no-op. - return - } - - sm.shards[i.Key%numShards].Set(i) -} - -func (sm *shardedMap) Del(key, conflict uint64) (uint64, any) { - return sm.shards[key%numShards].Del(key, conflict) -} - -func (sm *shardedMap) Update(newItem *Item) (any, bool) { - return sm.shards[newItem.Key%numShards].Update(newItem) -} - -func (sm *shardedMap) ForEach(forEach func(any) bool) { - for _, shard := range sm.shards { - if !shard.foreach(forEach) { - break - } - } -} - -func (sm *shardedMap) Len() int { - l := 0 - for _, shard := range sm.shards { - l += shard.Len() - } - return l -} - -func (sm *shardedMap) Clear(onEvict itemCallback) { - for i := uint64(0); i < numShards; i++ { - sm.shards[i].Clear(onEvict) - } -} - -type lockedMap struct { - sync.RWMutex - data map[uint64]storeItem -} - -func newLockedMap() *lockedMap { - return &lockedMap{ - data: make(map[uint64]storeItem), - } -} - -func (m *lockedMap) get(key, conflict uint64) (any, bool) { - m.RLock() - item, ok := m.data[key] - m.RUnlock() - if !ok { - return nil, false - } - if conflict != 0 && (conflict != item.conflict) { - return nil, false - } - return item.value, true -} - -func (m *lockedMap) Set(i *Item) { - if i == nil { - // If the item is nil make this Set a no-op. - return - } - - m.Lock() - defer m.Unlock() - item, ok := m.data[i.Key] - - if ok { - // The item existed already. We need to check the conflict key and reject the - // update if they do not match. Only after that the expiration map is updated. - if i.Conflict != 0 && (i.Conflict != item.conflict) { - return - } - } - - m.data[i.Key] = storeItem{ - key: i.Key, - conflict: i.Conflict, - value: i.Value, - } -} - -func (m *lockedMap) Del(key, conflict uint64) (uint64, any) { - m.Lock() - item, ok := m.data[key] - if !ok { - m.Unlock() - return 0, nil - } - if conflict != 0 && (conflict != item.conflict) { - m.Unlock() - return 0, nil - } - - delete(m.data, key) - m.Unlock() - return item.conflict, item.value -} - -func (m *lockedMap) Update(newItem *Item) (any, bool) { - m.Lock() - item, ok := m.data[newItem.Key] - if !ok { - m.Unlock() - return nil, false - } - if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) { - m.Unlock() - return nil, false - } - - m.data[newItem.Key] = storeItem{ - key: newItem.Key, - conflict: newItem.Conflict, - value: newItem.Value, - } - - m.Unlock() - return item.value, true -} - -func (m *lockedMap) Len() int { - m.RLock() - l := len(m.data) - m.RUnlock() - return l -} - -func (m *lockedMap) Clear(onEvict itemCallback) { - m.Lock() - i := &Item{} - if onEvict != nil { - for _, si := range m.data { - i.Key = si.key - i.Conflict = si.conflict - i.Value = si.value - onEvict(i) - } - } - m.data = make(map[uint64]storeItem) - m.Unlock() -} - -func (m *lockedMap) foreach(forEach func(any) bool) bool { - m.RLock() - defer m.RUnlock() - for _, si := range m.data { - if !forEach(si.value) { - return false - } - } - return true -} diff --git a/go/cache/ristretto/store_test.go b/go/cache/ristretto/store_test.go deleted file mode 100644 index 54634736a72..00000000000 --- a/go/cache/ristretto/store_test.go +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2020 Dgraph Labs, Inc. and Contributors - * Copyright 2021 The Vitess Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ristretto - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestStoreSetGet(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - s.Set(&i) - val, ok := s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - i.Value = 3 - s.Set(&i) - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 3, val.(int)) - - key, conflict = defaultStringHash("2") - i = Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - s.Set(&i) - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) -} - -func TestStoreDel(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - s.Del(key, conflict) - val, ok := s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - - s.Del(2, 0) -} - -func TestStoreClear(t *testing.T) { - s := newStore() - for i := 0; i < 1000; i++ { - key, conflict := defaultStringHash(strconv.Itoa(i)) - it := Item{ - Key: key, - Conflict: conflict, - Value: i, - } - s.Set(&it) - } - s.Clear(nil) - for i := 0; i < 1000; i++ { - key, conflict := defaultStringHash(strconv.Itoa(i)) - val, ok := s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) - } -} - -func TestStoreUpdate(t *testing.T) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - i.Value = 2 - _, ok := s.Update(&i) - require.True(t, ok) - - val, ok := s.Get(key, conflict) - require.True(t, ok) - require.NotNil(t, val) - - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 2, val.(int)) - - i.Value = 3 - _, ok = s.Update(&i) - require.True(t, ok) - - val, ok = s.Get(key, conflict) - require.True(t, ok) - require.Equal(t, 3, val.(int)) - - key, conflict = defaultStringHash("2") - i = Item{ - Key: key, - Conflict: conflict, - Value: 2, - } - _, ok = s.Update(&i) - require.False(t, ok) - val, ok = s.Get(key, conflict) - require.False(t, ok) - require.Nil(t, val) -} - -func TestStoreCollision(t *testing.T) { - s := newShardedMap() - s.shards[1].Lock() - s.shards[1].data[1] = storeItem{ - key: 1, - conflict: 0, - value: 1, - } - s.shards[1].Unlock() - val, ok := s.Get(1, 1) - require.False(t, ok) - require.Nil(t, val) - - i := Item{ - Key: 1, - Conflict: 1, - Value: 2, - } - s.Set(&i) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotEqual(t, 2, val.(int)) - - _, ok = s.Update(&i) - require.False(t, ok) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotEqual(t, 2, val.(int)) - - s.Del(1, 1) - val, ok = s.Get(1, 0) - require.True(t, ok) - require.NotNil(t, val) -} - -func BenchmarkStoreGet(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - s.Get(key, conflict) - } - }) -} - -func BenchmarkStoreSet(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - } - }) -} - -func BenchmarkStoreUpdate(b *testing.B) { - s := newStore() - key, conflict := defaultStringHash("1") - i := Item{ - Key: key, - Conflict: conflict, - Value: 1, - } - s.Set(&i) - b.SetBytes(1) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - s.Update(&Item{ - Key: key, - Conflict: conflict, - Value: 2, - }) - } - }) -} diff --git a/go/cache/theine/LICENSE b/go/cache/theine/LICENSE new file mode 100644 index 00000000000..0161260b7b6 --- /dev/null +++ b/go/cache/theine/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Yiling-J + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/go/cache/theine/bf/bf.go b/go/cache/theine/bf/bf.go new file mode 100644 index 00000000000..f68e34d81e3 --- /dev/null +++ b/go/cache/theine/bf/bf.go @@ -0,0 +1,116 @@ +package bf + +import ( + "math" +) + +// doorkeeper is a small bloom-filter-based cache admission policy +type Bloomfilter struct { + Filter bitvector // our filter bit vector + M uint32 // size of bit vector in bits + K uint32 // distinct hash functions needed + FalsePositiveRate float64 + Capacity int +} + +func New(falsePositiveRate float64) *Bloomfilter { + d := &Bloomfilter{FalsePositiveRate: falsePositiveRate} + d.EnsureCapacity(320) + return d +} + +// create new bloomfilter with given size in bytes +func NewWithSize(size uint32) *Bloomfilter { + d := &Bloomfilter{} + bits := size * 8 + m := nextPowerOfTwo(uint32(bits)) + d.M = m + d.Filter = newbv(m) + return d +} + +func (d *Bloomfilter) EnsureCapacity(capacity int) { + if capacity <= d.Capacity { + return + } + capacity = int(nextPowerOfTwo(uint32(capacity))) + bits := float64(capacity) * -math.Log(d.FalsePositiveRate) / (math.Log(2.0) * math.Log(2.0)) // in bits + m := nextPowerOfTwo(uint32(bits)) + + if m < 1024 { + m = 1024 + } + + k := uint32(0.7 * float64(m) / float64(capacity)) + if k < 2 { + k = 2 + } + d.Capacity = capacity + d.M = m + d.Filter = newbv(m) + d.K = k +} + +func (d *Bloomfilter) Exist(h uint64) bool { + h1, h2 := uint32(h), uint32(h>>32) + var o uint = 1 + for i := uint32(0); i < d.K; i++ { + o &= d.Filter.get((h1 + (i * h2)) & (d.M - 1)) + } + return o == 1 +} + +// insert inserts the byte array b into the bloom filter. Returns true if the value +// was already considered to be in the bloom filter. +func (d *Bloomfilter) Insert(h uint64) bool { + h1, h2 := uint32(h), uint32(h>>32) + var o uint = 1 + for i := uint32(0); i < d.K; i++ { + o &= d.Filter.getset((h1 + (i * h2)) & (d.M - 1)) + } + return o == 1 +} + +// Reset clears the bloom filter +func (d *Bloomfilter) Reset() { + for i := range d.Filter { + d.Filter[i] = 0 + } +} + +// Internal routines for the bit vector +type bitvector []uint64 + +func newbv(size uint32) bitvector { + return make([]uint64, uint(size+63)/64) +} + +func (b bitvector) get(bit uint32) uint { + shift := bit % 64 + idx := bit / 64 + bb := b[idx] + m := uint64(1) << shift + return uint((bb & m) >> shift) +} + +// set bit 'bit' in the bitvector d and return previous value +func (b bitvector) getset(bit uint32) uint { + shift := bit % 64 + idx := bit / 64 + bb := b[idx] + m := uint64(1) << shift + b[idx] |= m + return uint((bb & m) >> shift) +} + +// return the integer >= i which is a power of two +func nextPowerOfTwo(i uint32) uint32 { + n := i - 1 + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n++ + return n +} diff --git a/go/cache/theine/bf/bf_test.go b/go/cache/theine/bf/bf_test.go new file mode 100644 index 00000000000..f0e505766e7 --- /dev/null +++ b/go/cache/theine/bf/bf_test.go @@ -0,0 +1,24 @@ +package bf + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBloom(t *testing.T) { + bf := NewWithSize(5) + bf.FalsePositiveRate = 0.1 + bf.EnsureCapacity(5) + bf.EnsureCapacity(500) + bf.EnsureCapacity(200) + + exist := bf.Insert(123) + require.False(t, exist) + + exist = bf.Exist(123) + require.True(t, exist) + + exist = bf.Exist(456) + require.False(t, exist) +} diff --git a/go/cache/theine/entry.go b/go/cache/theine/entry.go new file mode 100644 index 00000000000..48e3bd5a09a --- /dev/null +++ b/go/cache/theine/entry.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import "sync/atomic" + +const ( + NEW int8 = iota + REMOVE + UPDATE +) + +type ReadBufItem[K cachekey, V any] struct { + entry *Entry[K, V] + hash uint64 +} +type WriteBufItem[K cachekey, V any] struct { + entry *Entry[K, V] + costChange int64 + code int8 +} + +type MetaData[K cachekey, V any] struct { + prev *Entry[K, V] + next *Entry[K, V] +} + +type Entry[K cachekey, V any] struct { + key K + value V + meta MetaData[K, V] + cost atomic.Int64 + frequency atomic.Int32 + epoch atomic.Uint32 + removed bool + deque bool + root bool + list uint8 // used in slru, probation or protected +} + +func NewEntry[K cachekey, V any](key K, value V, cost int64) *Entry[K, V] { + entry := &Entry[K, V]{ + key: key, + value: value, + } + entry.cost.Store(cost) + return entry +} + +func (e *Entry[K, V]) Next() *Entry[K, V] { + if p := e.meta.next; !p.root { + return e.meta.next + } + return nil +} + +func (e *Entry[K, V]) Prev() *Entry[K, V] { + if p := e.meta.prev; !p.root { + return e.meta.prev + } + return nil +} + +func (e *Entry[K, V]) prev() *Entry[K, V] { + return e.meta.prev +} + +func (e *Entry[K, V]) next() *Entry[K, V] { + return e.meta.next +} + +func (e *Entry[K, V]) setPrev(entry *Entry[K, V]) { + e.meta.prev = entry +} + +func (e *Entry[K, V]) setNext(entry *Entry[K, V]) { + e.meta.next = entry +} diff --git a/go/cache/theine/list.go b/go/cache/theine/list.go new file mode 100644 index 00000000000..19854190cba --- /dev/null +++ b/go/cache/theine/list.go @@ -0,0 +1,205 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "strings" +) + +const ( + LIST_PROBATION uint8 = 1 + LIST_PROTECTED uint8 = 2 +) + +// List represents a doubly linked list. +// The zero value for List is an empty list ready to use. +type List[K cachekey, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list length(sum of costs) excluding (this) sentinel element + count int // count of entries in list + capacity uint + bounded bool + listType uint8 // 1 tinylfu list, 2 timerwheel list +} + +// New returns an initialized list. +func NewList[K cachekey, V any](size uint, listType uint8) *List[K, V] { + l := &List[K, V]{listType: listType, capacity: size, root: Entry[K, V]{}} + l.root.root = true + l.root.setNext(&l.root) + l.root.setPrev(&l.root) + l.len = 0 + l.capacity = size + if size > 0 { + l.bounded = true + } + return l +} + +func (l *List[K, V]) Reset() { + l.root.setNext(&l.root) + l.root.setPrev(&l.root) + l.len = 0 +} + +// Len returns the number of elements of list l. +// The complexity is O(1). +func (l *List[K, V]) Len() int { return l.len } + +func (l *List[K, V]) display() string { + var s []string + for e := l.Front(); e != nil; e = e.Next() { + s = append(s, fmt.Sprintf("%v", e.key)) + } + return strings.Join(s, "/") +} + +func (l *List[K, V]) displayReverse() string { + var s []string + for e := l.Back(); e != nil; e = e.Prev() { + s = append(s, fmt.Sprintf("%v", e.key)) + } + return strings.Join(s, "/") +} + +// Front returns the first element of list l or nil if the list is empty. +func (l *List[K, V]) Front() *Entry[K, V] { + e := l.root.next() + if e != &l.root { + return e + } + return nil +} + +// Back returns the last element of list l or nil if the list is empty. +func (l *List[K, V]) Back() *Entry[K, V] { + e := l.root.prev() + if e != &l.root { + return e + } + return nil +} + +// insert inserts e after at, increments l.len, and evicted entry if capacity exceed +func (l *List[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + var evicted *Entry[K, V] + if l.bounded && l.len >= int(l.capacity) { + evicted = l.PopTail() + } + e.list = l.listType + e.setPrev(at) + e.setNext(at.next()) + e.prev().setNext(e) + e.next().setPrev(e) + if l.bounded { + l.len += int(e.cost.Load()) + l.count += 1 + } + return evicted +} + +// PushFront push entry to list head +func (l *List[K, V]) PushFront(e *Entry[K, V]) *Entry[K, V] { + return l.insert(e, &l.root) +} + +// Push push entry to the back of list +func (l *List[K, V]) PushBack(e *Entry[K, V]) *Entry[K, V] { + return l.insert(e, l.root.prev()) +} + +// remove removes e from its list, decrements l.len +func (l *List[K, V]) remove(e *Entry[K, V]) { + e.prev().setNext(e.next()) + e.next().setPrev(e.prev()) + e.setNext(nil) + e.setPrev(nil) + e.list = 0 + if l.bounded { + l.len -= int(e.cost.Load()) + l.count -= 1 + } +} + +// move moves e to next to at. +func (l *List[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev().setNext(e.next()) + e.next().setPrev(e.prev()) + + e.setPrev(at) + e.setNext(at.next()) + e.prev().setNext(e) + e.next().setPrev(e) +} + +// Remove removes e from l if e is an element of list l. +// It returns the element value e.Value. +// The element must not be nil. +func (l *List[K, V]) Remove(e *Entry[K, V]) { + l.remove(e) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[K, V]) MoveToFront(e *Entry[K, V]) { + l.move(e, &l.root) +} + +// MoveToBack moves element e to the back of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *List[K, V]) MoveToBack(e *Entry[K, V]) { + l.move(e, l.root.prev()) +} + +// MoveBefore moves element e to its new position before mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[K, V]) MoveBefore(e, mark *Entry[K, V]) { + l.move(e, mark.prev()) +} + +// MoveAfter moves element e to its new position after mark. +// If e or mark is not an element of l, or e == mark, the list is not modified. +// The element and mark must not be nil. +func (l *List[K, V]) MoveAfter(e, mark *Entry[K, V]) { + l.move(e, mark) +} + +func (l *List[K, V]) PopTail() *Entry[K, V] { + entry := l.root.prev() + if entry != nil && entry != &l.root { + l.remove(entry) + return entry + } + return nil +} + +func (l *List[K, V]) Contains(entry *Entry[K, V]) bool { + for e := l.Front(); e != nil; e = e.Next() { + if e == entry { + return true + } + } + return false +} diff --git a/go/cache/theine/list_test.go b/go/cache/theine/list_test.go new file mode 100644 index 00000000000..aad68f5c142 --- /dev/null +++ b/go/cache/theine/list_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestList(t *testing.T) { + l := NewList[StringKey, string](5, LIST_PROBATION) + require.Equal(t, uint(5), l.capacity) + require.Equal(t, LIST_PROBATION, l.listType) + for i := 0; i < 5; i++ { + evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)) + require.Nil(t, evicted) + } + require.Equal(t, 5, l.len) + require.Equal(t, "4/3/2/1/0", l.display()) + require.Equal(t, "0/1/2/3/4", l.displayReverse()) + + evicted := l.PushFront(NewEntry(StringKey("5"), "", 1)) + require.Equal(t, StringKey("0"), evicted.key) + require.Equal(t, 5, l.len) + require.Equal(t, "5/4/3/2/1", l.display()) + require.Equal(t, "1/2/3/4/5", l.displayReverse()) + + for i := 0; i < 5; i++ { + entry := l.PopTail() + require.Equal(t, StringKey(fmt.Sprintf("%d", i+1)), entry.key) + } + entry := l.PopTail() + require.Nil(t, entry) + + var entries []*Entry[StringKey, string] + for i := 0; i < 5; i++ { + new := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + evicted := l.PushFront(new) + entries = append(entries, new) + require.Nil(t, evicted) + } + require.Equal(t, "4/3/2/1/0", l.display()) + l.MoveToBack(entries[2]) + require.Equal(t, "4/3/1/0/2", l.display()) + require.Equal(t, "2/0/1/3/4", l.displayReverse()) + l.MoveBefore(entries[1], entries[3]) + require.Equal(t, "4/1/3/0/2", l.display()) + require.Equal(t, "2/0/3/1/4", l.displayReverse()) + l.MoveAfter(entries[2], entries[4]) + require.Equal(t, "4/2/1/3/0", l.display()) + require.Equal(t, "0/3/1/2/4", l.displayReverse()) + l.Remove(entries[1]) + require.Equal(t, "4/2/3/0", l.display()) + require.Equal(t, "0/3/2/4", l.displayReverse()) + +} + +func TestListCountCost(t *testing.T) { + l := NewList[StringKey, string](100, LIST_PROBATION) + require.Equal(t, uint(100), l.capacity) + require.Equal(t, LIST_PROBATION, l.listType) + for i := 0; i < 5; i++ { + evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 20)) + require.Nil(t, evicted) + } + require.Equal(t, 100, l.len) + require.Equal(t, 5, l.count) + for i := 0; i < 3; i++ { + entry := l.PopTail() + require.NotNil(t, entry) + } + require.Equal(t, 40, l.len) + require.Equal(t, 2, l.count) +} diff --git a/go/cache/theine/mpsc.go b/go/cache/theine/mpsc.go new file mode 100644 index 00000000000..c00e2ce5a26 --- /dev/null +++ b/go/cache/theine/mpsc.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +// This implementation is based on http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue + +import ( + "sync" + "sync/atomic" +) + +type node[V any] struct { + next atomic.Pointer[node[V]] + val V +} + +type Queue[V any] struct { + head, tail atomic.Pointer[node[V]] + nodePool sync.Pool +} + +func NewQueue[V any]() *Queue[V] { + q := &Queue[V]{nodePool: sync.Pool{New: func() any { + return new(node[V]) + }}} + stub := &node[V]{} + q.head.Store(stub) + q.tail.Store(stub) + return q +} + +// Push adds x to the back of the queue. +// +// Push can be safely called from multiple goroutines +func (q *Queue[V]) Push(x V) { + n := q.nodePool.Get().(*node[V]) + n.val = x + + // current producer acquires head node + prev := q.head.Swap(n) + + // release node to consumer + prev.next.Store(n) +} + +// Pop removes the item from the front of the queue or nil if the queue is empty +// +// Pop must be called from a single, consumer goroutine +func (q *Queue[V]) Pop() (V, bool) { + tail := q.tail.Load() + next := tail.next.Load() + if next != nil { + var null V + q.tail.Store(next) + v := next.val + next.val = null + tail.next.Store(nil) + q.nodePool.Put(tail) + return v, true + } + var null V + return null, false +} + +// Empty returns true if the queue is empty +// +// Empty must be called from a single, consumer goroutine +func (q *Queue[V]) Empty() bool { + tail := q.tail.Load() + return tail.next.Load() == nil +} diff --git a/go/cache/perf_test.go b/go/cache/theine/mpsc_test.go similarity index 53% rename from go/cache/perf_test.go rename to go/cache/theine/mpsc_test.go index 693e55238a0..eca50efed3e 100644 --- a/go/cache/perf_test.go +++ b/go/cache/theine/mpsc_test.go @@ -1,5 +1,6 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +15,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cache +package theine import ( "testing" + + "github.com/stretchr/testify/assert" ) -func BenchmarkGet(b *testing.B) { - cache := NewLRUCache(64*1024*1024, func(val any) int64 { - return int64(cap(val.([]byte))) - }) - value := make([]byte, 1000) - cache.Set("stuff", value) - for i := 0; i < b.N; i++ { - val, ok := cache.Get("stuff") - if !ok { - panic("error") - } - _ = val - } +func TestQueue_PushPop(t *testing.T) { + q := NewQueue[int]() + + q.Push(1) + q.Push(2) + v, ok := q.Pop() + assert.True(t, ok) + assert.Equal(t, 1, v) + v, ok = q.Pop() + assert.True(t, ok) + assert.Equal(t, 2, v) + _, ok = q.Pop() + assert.False(t, ok) +} + +func TestQueue_Empty(t *testing.T) { + q := NewQueue[int]() + assert.True(t, q.Empty()) + q.Push(1) + assert.False(t, q.Empty()) } diff --git a/go/cache/theine/singleflight.go b/go/cache/theine/singleflight.go new file mode 100644 index 00000000000..fde56670514 --- /dev/null +++ b/go/cache/theine/singleflight.go @@ -0,0 +1,196 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J +Copyright 2013 The Go Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package theine + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" + "sync/atomic" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call[V any] struct { + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val V + err error + + wg sync.WaitGroup + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups atomic.Int32 +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group[K comparable, V any] struct { + m map[K]*call[V] // lazily initialized + mu sync.Mutex // protects m + callPool sync.Pool +} + +func NewGroup[K comparable, V any]() *Group[K, V] { + return &Group[K, V]{ + callPool: sync.Pool{New: func() any { + return new(call[V]) + }}, + } +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[K]*call[V]) + } + if c, ok := g.m[key]; ok { + _ = c.dups.Add(1) + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + // assign value/err before put back to pool to avoid race + v = c.val + err = c.err + n := c.dups.Add(-1) + if n == 0 { + g.callPool.Put(c) + } + return v, err, true + } + c := g.callPool.Get().(*call[V]) + defer func() { + n := c.dups.Add(-1) + if n == 0 { + g.callPool.Put(c) + } + }() + _ = c.dups.Add(1) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, true +} + +// doCall handles the single call for a key. +func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + panic(e) + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} diff --git a/go/cache/theine/singleflight_test.go b/go/cache/theine/singleflight_test.go new file mode 100644 index 00000000000..60b28e69b4e --- /dev/null +++ b/go/cache/theine/singleflight_test.go @@ -0,0 +1,211 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J +Copyright 2013 The Go Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package theine + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestDo(t *testing.T) { + g := NewGroup[string, string]() + v, err, _ := g.Do("key", func() (string, error) { + return "bar", nil + }) + if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { + t.Errorf("Do = %v; want %v", got, want) + } + if err != nil { + t.Errorf("Do error = %v", err) + } +} + +func TestDoErr(t *testing.T) { + g := NewGroup[string, string]() + someErr := errors.New("Some error") + v, err, _ := g.Do("key", func() (string, error) { + return "", someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want someErr %v", err, someErr) + } + if v != "" { + t.Errorf("unexpected non-nil value %#v", v) + } +} + +func TestDoDupSuppress(t *testing.T) { + g := NewGroup[string, string]() + var wg1, wg2 sync.WaitGroup + c := make(chan string, 1) + var calls int32 + fn := func() (string, error) { + if atomic.AddInt32(&calls, 1) == 1 { + // First invocation. + wg1.Done() + } + v := <-c + c <- v // pump; make available for any future calls + + time.Sleep(10 * time.Millisecond) // let more goroutines enter Do + + return v, nil + } + + const n = 10 + wg1.Add(1) + for i := 0; i < n; i++ { + wg1.Add(1) + wg2.Add(1) + go func() { + defer wg2.Done() + wg1.Done() + v, err, _ := g.Do("key", fn) + if err != nil { + t.Errorf("Do error: %v", err) + return + } + if s := v; s != "bar" { + t.Errorf("Do = %T %v; want %q", v, v, "bar") + } + }() + } + wg1.Wait() + // At least one goroutine is in fn now and all of them have at + // least reached the line before the Do. + c <- "bar" + wg2.Wait() + if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { + t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) + } +} + +// Test singleflight behaves correctly after Do panic. +// See https://github.com/golang/go/issues/41133 +func TestPanicDo(t *testing.T) { + g := NewGroup[string, string]() + fn := func() (string, error) { + panic("invalid memory address or nil pointer dereference") + } + + const n = 5 + waited := int32(n) + panicCount := int32(0) + done := make(chan struct{}) + for i := 0; i < n; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + atomic.AddInt32(&panicCount, 1) + } + + if atomic.AddInt32(&waited, -1) == 0 { + close(done) + } + }() + + _, _, _ = g.Do("key", fn) + }() + } + + select { + case <-done: + if panicCount != n { + t.Errorf("Expect %d panic, but got %d", n, panicCount) + } + case <-time.After(time.Second): + t.Fatalf("Do hangs") + } +} + +func TestGoexitDo(t *testing.T) { + g := NewGroup[string, int]() + fn := func() (int, error) { + runtime.Goexit() + return 0, nil + } + + const n = 5 + waited := int32(n) + done := make(chan struct{}) + for i := 0; i < n; i++ { + go func() { + var err error + defer func() { + if err != nil { + t.Errorf("Error should be nil, but got: %v", err) + } + if atomic.AddInt32(&waited, -1) == 0 { + close(done) + } + }() + _, err, _ = g.Do("key", fn) + }() + } + + select { + case <-done: + case <-time.After(time.Second): + t.Fatalf("Do hangs") + } +} + +func BenchmarkDo(b *testing.B) { + keys := randKeys(b, 10240, 10) + benchDo(b, NewGroup[string, int](), keys) + +} + +func benchDo(b *testing.B, g *Group[string, int], keys []string) { + keyc := len(keys) + b.ReportAllocs() + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + for i := 0; pb.Next(); i++ { + _, _, _ = g.Do(keys[i%keyc], func() (int, error) { + return 0, nil + }) + } + }) +} + +func randKeys(b *testing.B, count, length uint) []string { + keys := make([]string, 0, count) + key := make([]byte, length) + + for i := uint(0); i < count; i++ { + if _, err := io.ReadFull(rand.Reader, key); err != nil { + b.Fatalf("Failed to generate random key %d of %d of length %d: %s", i+1, count, length, err) + } + keys = append(keys, string(key)) + } + return keys +} diff --git a/go/cache/theine/sketch.go b/go/cache/theine/sketch.go new file mode 100644 index 00000000000..7d241d94fc8 --- /dev/null +++ b/go/cache/theine/sketch.go @@ -0,0 +1,137 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +type CountMinSketch struct { + Table []uint64 + Additions uint + SampleSize uint + BlockMask uint +} + +func NewCountMinSketch() *CountMinSketch { + new := &CountMinSketch{} + new.EnsureCapacity(16) + return new +} + +// indexOf return table index and counter index together +func (s *CountMinSketch) indexOf(h uint64, block uint64, offset uint8) (uint, uint) { + counterHash := h + uint64(1+offset)*(h>>32) + // max block + 7(8 * 8 bytes), fit 64 bytes cache line + index := block + counterHash&1 + uint64(offset<<1) + return uint(index), uint((counterHash & 0xF) << 2) +} + +func (s *CountMinSketch) inc(index uint, offset uint) bool { + mask := uint64(0xF << offset) + if s.Table[index]&mask != mask { + s.Table[index] += 1 << offset + return true + } + return false +} + +func (s *CountMinSketch) Add(h uint64) bool { + hn := spread(h) + block := (hn & uint64(s.BlockMask)) << 3 + hc := rehash(h) + index0, offset0 := s.indexOf(hc, block, 0) + index1, offset1 := s.indexOf(hc, block, 1) + index2, offset2 := s.indexOf(hc, block, 2) + index3, offset3 := s.indexOf(hc, block, 3) + + added := s.inc(index0, offset0) + added = s.inc(index1, offset1) || added + added = s.inc(index2, offset2) || added + added = s.inc(index3, offset3) || added + + if added { + s.Additions += 1 + if s.Additions == s.SampleSize { + s.reset() + return true + } + } + return false +} + +func (s *CountMinSketch) reset() { + for i := range s.Table { + s.Table[i] = s.Table[i] >> 1 + } + s.Additions = s.Additions >> 1 +} + +func (s *CountMinSketch) count(h uint64, block uint64, offset uint8) uint { + index, off := s.indexOf(h, block, offset) + count := (s.Table[index] >> off) & 0xF + return uint(count) +} + +func (s *CountMinSketch) Estimate(h uint64) uint { + hn := spread(h) + block := (hn & uint64(s.BlockMask)) << 3 + hc := rehash(h) + m := min(s.count(hc, block, 0), 100) + m = min(s.count(hc, block, 1), m) + m = min(s.count(hc, block, 2), m) + m = min(s.count(hc, block, 3), m) + return m +} + +func next2Power(x uint) uint { + x-- + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + x |= x >> 32 + x++ + return x +} + +func (s *CountMinSketch) EnsureCapacity(size uint) { + if len(s.Table) >= int(size) { + return + } + if size < 16 { + size = 16 + } + newSize := next2Power(size) + s.Table = make([]uint64, newSize) + s.SampleSize = 10 * size + s.BlockMask = uint((len(s.Table) >> 3) - 1) + s.Additions = 0 +} + +func spread(h uint64) uint64 { + h ^= h >> 17 + h *= 0xed5ad4bb + h ^= h >> 11 + h *= 0xac4c1b51 + h ^= h >> 15 + return h +} + +func rehash(h uint64) uint64 { + h *= 0x31848bab + h ^= h >> 14 + return h +} diff --git a/go/cache/theine/sketch_test.go b/go/cache/theine/sketch_test.go new file mode 100644 index 00000000000..3437f0cac3c --- /dev/null +++ b/go/cache/theine/sketch_test.go @@ -0,0 +1,54 @@ +package theine + +import ( + "fmt" + "testing" + + "github.com/cespare/xxhash/v2" + "github.com/stretchr/testify/require" +) + +func TestEnsureCapacity(t *testing.T) { + sketch := NewCountMinSketch() + sketch.EnsureCapacity(1) + require.Equal(t, 16, len(sketch.Table)) +} + +func TestSketch(t *testing.T) { + sketch := NewCountMinSketch() + sketch.EnsureCapacity(100) + require.Equal(t, 128, len(sketch.Table)) + require.Equal(t, uint(1000), sketch.SampleSize) + // override sampleSize so test won't reset + sketch.SampleSize = 5120 + + failed := 0 + for i := 0; i < 500; i++ { + key := fmt.Sprintf("key:%d", i) + keyh := xxhash.Sum64String(key) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + sketch.Add(keyh) + key = fmt.Sprintf("key:%d:b", i) + keyh2 := xxhash.Sum64String(key) + sketch.Add(keyh2) + sketch.Add(keyh2) + sketch.Add(keyh2) + + es1 := sketch.Estimate(keyh) + es2 := sketch.Estimate(keyh2) + if es2 > es1 { + failed++ + } + require.True(t, es1 >= 5) + require.True(t, es2 >= 3) + + } + require.True(t, float32(failed)/4000 < 0.1) + require.True(t, sketch.Additions > 3500) + a := sketch.Additions + sketch.reset() + require.Equal(t, a>>1, sketch.Additions) +} diff --git a/go/cache/theine/slru.go b/go/cache/theine/slru.go new file mode 100644 index 00000000000..e3bcb2532b1 --- /dev/null +++ b/go/cache/theine/slru.go @@ -0,0 +1,79 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +type Slru[K cachekey, V any] struct { + probation *List[K, V] + protected *List[K, V] + maxsize uint +} + +func NewSlru[K cachekey, V any](size uint) *Slru[K, V] { + return &Slru[K, V]{ + maxsize: size, + probation: NewList[K, V](size, LIST_PROBATION), + protected: NewList[K, V](uint(float32(size)*0.8), LIST_PROTECTED), + } +} + +func (s *Slru[K, V]) insert(entry *Entry[K, V]) *Entry[K, V] { + var evicted *Entry[K, V] + if s.probation.Len()+s.protected.Len() >= int(s.maxsize) { + evicted = s.probation.PopTail() + } + s.probation.PushFront(entry) + return evicted +} + +func (s *Slru[K, V]) victim() *Entry[K, V] { + if s.probation.Len()+s.protected.Len() < int(s.maxsize) { + return nil + } + return s.probation.Back() +} + +func (s *Slru[K, V]) access(entry *Entry[K, V]) { + switch entry.list { + case LIST_PROBATION: + s.probation.remove(entry) + evicted := s.protected.PushFront(entry) + if evicted != nil { + s.probation.PushFront(evicted) + } + case LIST_PROTECTED: + s.protected.MoveToFront(entry) + } +} + +func (s *Slru[K, V]) remove(entry *Entry[K, V]) { + switch entry.list { + case LIST_PROBATION: + s.probation.remove(entry) + case LIST_PROTECTED: + s.protected.remove(entry) + } +} + +func (s *Slru[K, V]) updateCost(entry *Entry[K, V], delta int64) { + switch entry.list { + case LIST_PROBATION: + s.probation.len += int(delta) + case LIST_PROTECTED: + s.protected.len += int(delta) + } +} diff --git a/go/cache/theine/store.go b/go/cache/theine/store.go new file mode 100644 index 00000000000..3d86e549867 --- /dev/null +++ b/go/cache/theine/store.go @@ -0,0 +1,615 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "runtime" + "sync" + "sync/atomic" + "time" + + "github.com/gammazero/deque" + + "vitess.io/vitess/go/cache/theine/bf" + "vitess.io/vitess/go/hack" +) + +const ( + MaxReadBuffSize = 64 + MinWriteBuffSize = 4 + MaxWriteBuffSize = 1024 +) + +type RemoveReason uint8 + +const ( + REMOVED RemoveReason = iota + EVICTED + EXPIRED +) + +type Shard[K cachekey, V any] struct { + hashmap map[K]*Entry[K, V] + dookeeper *bf.Bloomfilter + deque *deque.Deque[*Entry[K, V]] + group *Group[K, V] + qsize uint + qlen int + counter uint + mu sync.RWMutex +} + +func NewShard[K cachekey, V any](size uint, qsize uint, doorkeeper bool) *Shard[K, V] { + s := &Shard[K, V]{ + hashmap: make(map[K]*Entry[K, V]), + qsize: qsize, + deque: deque.New[*Entry[K, V]](), + group: NewGroup[K, V](), + } + if doorkeeper { + s.dookeeper = bf.New(0.01) + } + return s +} + +func (s *Shard[K, V]) set(key K, entry *Entry[K, V]) { + s.hashmap[key] = entry + if s.dookeeper != nil { + ds := 20 * len(s.hashmap) + if ds > s.dookeeper.Capacity { + s.dookeeper.EnsureCapacity(ds) + } + } +} + +func (s *Shard[K, V]) get(key K) (entry *Entry[K, V], ok bool) { + entry, ok = s.hashmap[key] + return +} + +func (s *Shard[K, V]) delete(entry *Entry[K, V]) bool { + var deleted bool + exist, ok := s.hashmap[entry.key] + if ok && exist == entry { + delete(s.hashmap, exist.key) + deleted = true + } + return deleted +} + +func (s *Shard[K, V]) len() int { + return len(s.hashmap) +} + +type Metrics struct { + evicted atomic.Int64 + hits atomic.Int64 + misses atomic.Int64 +} + +func (m *Metrics) Evicted() int64 { + return m.evicted.Load() +} + +func (m *Metrics) Hits() int64 { + return m.hits.Load() +} + +func (m *Metrics) Misses() int64 { + return m.misses.Load() +} + +func (m *Metrics) Accesses() int64 { + return m.Hits() + m.Misses() +} + +type cachekey interface { + comparable + Hash() uint64 + Hash2() (uint64, uint64) +} + +type HashKey256 [32]byte + +func (h HashKey256) Hash() uint64 { + return uint64(h[0]) | uint64(h[1])<<8 | uint64(h[2])<<16 | uint64(h[3])<<24 | + uint64(h[4])<<32 | uint64(h[5])<<40 | uint64(h[6])<<48 | uint64(h[7])<<56 +} + +func (h HashKey256) Hash2() (uint64, uint64) { + h0 := h.Hash() + h1 := uint64(h[8]) | uint64(h[9])<<8 | uint64(h[10])<<16 | uint64(h[11])<<24 | + uint64(h[12])<<32 | uint64(h[13])<<40 | uint64(h[14])<<48 | uint64(h[15])<<56 + return h0, h1 +} + +type StringKey string + +func (h StringKey) Hash() uint64 { + return hack.RuntimeStrhash(string(h), 13850135847636357301) +} + +func (h StringKey) Hash2() (uint64, uint64) { + h0 := h.Hash() + h1 := ((h0 >> 16) ^ h0) * 0x45d9f3b + h1 = ((h1 >> 16) ^ h1) * 0x45d9f3b + h1 = (h1 >> 16) ^ h1 + return h0, h1 +} + +type cacheval interface { + CachedSize(alloc bool) int64 +} + +type Store[K cachekey, V cacheval] struct { + Metrics Metrics + OnRemoval func(K, V, RemoveReason) + + entryPool sync.Pool + writebuf chan WriteBufItem[K, V] + policy *TinyLfu[K, V] + readbuf *Queue[ReadBufItem[K, V]] + shards []*Shard[K, V] + cap uint + shardCount uint + writebufsize int64 + tailUpdate bool + doorkeeper bool + + mlock sync.Mutex + readCounter atomic.Uint32 + open atomic.Bool +} + +func NewStore[K cachekey, V cacheval](maxsize int64, doorkeeper bool) *Store[K, V] { + writeBufSize := maxsize / 100 + if writeBufSize < MinWriteBuffSize { + writeBufSize = MinWriteBuffSize + } + if writeBufSize > MaxWriteBuffSize { + writeBufSize = MaxWriteBuffSize + } + shardCount := 1 + for shardCount < runtime.GOMAXPROCS(0)*2 { + shardCount *= 2 + } + if shardCount < 16 { + shardCount = 16 + } + if shardCount > 128 { + shardCount = 128 + } + dequeSize := int(maxsize) / 100 / shardCount + shardSize := int(maxsize) / shardCount + if shardSize < 50 { + shardSize = 50 + } + policySize := int(maxsize) - (dequeSize * shardCount) + + s := &Store[K, V]{ + cap: uint(maxsize), + policy: NewTinyLfu[K, V](uint(policySize)), + readbuf: NewQueue[ReadBufItem[K, V]](), + writebuf: make(chan WriteBufItem[K, V], writeBufSize), + entryPool: sync.Pool{New: func() any { return &Entry[K, V]{} }}, + shardCount: uint(shardCount), + doorkeeper: doorkeeper, + writebufsize: writeBufSize, + } + s.shards = make([]*Shard[K, V], 0, s.shardCount) + for i := 0; i < int(s.shardCount); i++ { + s.shards = append(s.shards, NewShard[K, V](uint(shardSize), uint(dequeSize), doorkeeper)) + } + + go s.maintenance() + s.open.Store(true) + return s +} + +func (s *Store[K, V]) EnsureOpen() { + if s.open.Swap(true) { + return + } + s.writebuf = make(chan WriteBufItem[K, V], s.writebufsize) + go s.maintenance() +} + +func (s *Store[K, V]) getFromShard(key K, hash uint64, shard *Shard[K, V], epoch uint32) (V, bool) { + new := s.readCounter.Add(1) + shard.mu.RLock() + entry, ok := shard.get(key) + var value V + if ok { + if entry.epoch.Load() < epoch { + s.Metrics.misses.Add(1) + ok = false + } else { + s.Metrics.hits.Add(1) + s.policy.hit.Add(1) + value = entry.value + } + } else { + s.Metrics.misses.Add(1) + } + shard.mu.RUnlock() + switch { + case new < MaxReadBuffSize: + var send ReadBufItem[K, V] + send.hash = hash + if ok { + send.entry = entry + } + s.readbuf.Push(send) + case new == MaxReadBuffSize: + var send ReadBufItem[K, V] + send.hash = hash + if ok { + send.entry = entry + } + s.readbuf.Push(send) + s.drainRead() + } + return value, ok +} + +func (s *Store[K, V]) Get(key K, epoch uint32) (V, bool) { + h, index := s.index(key) + shard := s.shards[index] + return s.getFromShard(key, h, shard, epoch) +} + +func (s *Store[K, V]) GetOrLoad(key K, epoch uint32, load func() (V, error)) (V, bool, error) { + h, index := s.index(key) + shard := s.shards[index] + v, ok := s.getFromShard(key, h, shard, epoch) + if !ok { + loaded, err, _ := shard.group.Do(key, func() (V, error) { + loaded, err := load() + if err == nil { + s.Set(key, loaded, 0, epoch) + } + return loaded, err + }) + return loaded, false, err + } + return v, true, nil +} + +func (s *Store[K, V]) setEntry(shard *Shard[K, V], cost int64, epoch uint32, entry *Entry[K, V]) { + shard.set(entry.key, entry) + // cost larger than deque size, send to policy directly + if cost > int64(shard.qsize) { + shard.mu.Unlock() + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW} + return + } + entry.deque = true + shard.deque.PushFront(entry) + shard.qlen += int(cost) + s.processDeque(shard, epoch) +} + +func (s *Store[K, V]) setInternal(key K, value V, cost int64, epoch uint32) (*Shard[K, V], *Entry[K, V], bool) { + h, index := s.index(key) + shard := s.shards[index] + shard.mu.Lock() + exist, ok := shard.get(key) + if ok { + var costChange int64 + exist.value = value + oldCost := exist.cost.Swap(cost) + if oldCost != cost { + costChange = cost - oldCost + if exist.deque { + shard.qlen += int(costChange) + } + } + shard.mu.Unlock() + exist.epoch.Store(epoch) + if costChange != 0 { + s.writebuf <- WriteBufItem[K, V]{ + entry: exist, code: UPDATE, costChange: costChange, + } + } + return shard, exist, true + } + if s.doorkeeper { + if shard.counter > uint(shard.dookeeper.Capacity) { + shard.dookeeper.Reset() + shard.counter = 0 + } + hit := shard.dookeeper.Insert(h) + if !hit { + shard.counter += 1 + shard.mu.Unlock() + return shard, nil, false + } + } + entry := s.entryPool.Get().(*Entry[K, V]) + entry.frequency.Store(-1) + entry.key = key + entry.value = value + entry.cost.Store(cost) + entry.epoch.Store(epoch) + s.setEntry(shard, cost, epoch, entry) + return shard, entry, true + +} + +func (s *Store[K, V]) Set(key K, value V, cost int64, epoch uint32) bool { + if cost == 0 { + cost = value.CachedSize(true) + } + if cost > int64(s.cap) { + return false + } + _, _, ok := s.setInternal(key, value, cost, epoch) + return ok +} + +type dequeKV[K cachekey, V cacheval] struct { + k K + v V +} + +func (s *Store[K, V]) processDeque(shard *Shard[K, V], epoch uint32) { + if shard.qlen <= int(shard.qsize) { + shard.mu.Unlock() + return + } + var evictedkv []dequeKV[K, V] + var expiredkv []dequeKV[K, V] + + // send to slru + send := make([]*Entry[K, V], 0, 2) + for shard.qlen > int(shard.qsize) { + evicted := shard.deque.PopBack() + evicted.deque = false + shard.qlen -= int(evicted.cost.Load()) + + if evicted.epoch.Load() < epoch { + deleted := shard.delete(evicted) + if deleted { + if s.OnRemoval != nil { + evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value}) + } + s.postDelete(evicted) + s.Metrics.evicted.Add(1) + } + } else { + count := evicted.frequency.Load() + threshold := s.policy.threshold.Load() + if count == -1 { + send = append(send, evicted) + } else { + if int32(count) >= threshold { + send = append(send, evicted) + } else { + deleted := shard.delete(evicted) + // double check because entry maybe removed already by Delete API + if deleted { + if s.OnRemoval != nil { + evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value}) + } + s.postDelete(evicted) + s.Metrics.evicted.Add(1) + } + } + } + } + } + + shard.mu.Unlock() + for _, entry := range send { + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW} + } + if s.OnRemoval != nil { + for _, kv := range evictedkv { + s.OnRemoval(kv.k, kv.v, EVICTED) + } + for _, kv := range expiredkv { + s.OnRemoval(kv.k, kv.v, EXPIRED) + } + } +} + +func (s *Store[K, V]) Delete(key K) { + _, index := s.index(key) + shard := s.shards[index] + shard.mu.Lock() + entry, ok := shard.get(key) + if ok { + shard.delete(entry) + } + shard.mu.Unlock() + if ok { + s.writebuf <- WriteBufItem[K, V]{entry: entry, code: REMOVE} + } +} + +func (s *Store[K, V]) Len() int { + total := 0 + for _, s := range s.shards { + s.mu.RLock() + total += s.len() + s.mu.RUnlock() + } + return total +} + +func (s *Store[K, V]) UsedCapacity() int { + total := 0 + for _, s := range s.shards { + s.mu.RLock() + total += s.qlen + s.mu.RUnlock() + } + return total +} + +func (s *Store[K, V]) MaxCapacity() int { + return int(s.cap) +} + +// spread hash before get index +func (s *Store[K, V]) index(key K) (uint64, int) { + h0, h1 := key.Hash2() + return h0, int(h1 & uint64(s.shardCount-1)) +} + +func (s *Store[K, V]) postDelete(entry *Entry[K, V]) { + var zero V + entry.value = zero + s.entryPool.Put(entry) +} + +// remove entry from cache/policy/timingwheel and add back to pool +func (s *Store[K, V]) removeEntry(entry *Entry[K, V], reason RemoveReason) { + if prev := entry.meta.prev; prev != nil { + s.policy.Remove(entry) + } + switch reason { + case EVICTED, EXPIRED: + _, index := s.index(entry.key) + shard := s.shards[index] + shard.mu.Lock() + deleted := shard.delete(entry) + shard.mu.Unlock() + if deleted { + if s.OnRemoval != nil { + s.OnRemoval(entry.key, entry.value, reason) + } + s.postDelete(entry) + s.Metrics.evicted.Add(1) + } + case REMOVED: + // already removed from shard map + if s.OnRemoval != nil { + s.OnRemoval(entry.key, entry.value, reason) + } + } +} + +func (s *Store[K, V]) drainRead() { + s.policy.total.Add(MaxReadBuffSize) + s.mlock.Lock() + for { + v, ok := s.readbuf.Pop() + if !ok { + break + } + s.policy.Access(v) + } + s.mlock.Unlock() + s.readCounter.Store(0) +} + +func (s *Store[K, V]) maintenanceItem(item WriteBufItem[K, V]) { + s.mlock.Lock() + defer s.mlock.Unlock() + + entry := item.entry + if entry == nil { + return + } + + // lock free because store API never read/modify entry metadata + switch item.code { + case NEW: + if entry.removed { + return + } + evicted := s.policy.Set(entry) + if evicted != nil { + s.removeEntry(evicted, EVICTED) + s.tailUpdate = true + } + removed := s.policy.EvictEntries() + for _, e := range removed { + s.tailUpdate = true + s.removeEntry(e, EVICTED) + } + case REMOVE: + entry.removed = true + s.removeEntry(entry, REMOVED) + s.policy.threshold.Store(-1) + case UPDATE: + if item.costChange != 0 { + s.policy.UpdateCost(entry, item.costChange) + removed := s.policy.EvictEntries() + for _, e := range removed { + s.tailUpdate = true + s.removeEntry(e, EVICTED) + } + } + } + item.entry = nil + if s.tailUpdate { + s.policy.UpdateThreshold() + s.tailUpdate = false + } +} + +func (s *Store[K, V]) maintenance() { + tick := time.NewTicker(500 * time.Millisecond) + defer tick.Stop() + + for { + select { + case <-tick.C: + s.mlock.Lock() + s.policy.UpdateThreshold() + s.mlock.Unlock() + + case item, ok := <-s.writebuf: + if !ok { + return + } + s.maintenanceItem(item) + } + } +} + +func (s *Store[K, V]) Range(epoch uint32, f func(key K, value V) bool) { + for _, shard := range s.shards { + shard.mu.RLock() + for _, entry := range shard.hashmap { + if entry.epoch.Load() < epoch { + continue + } + if !f(entry.key, entry.value) { + shard.mu.RUnlock() + return + } + } + shard.mu.RUnlock() + } +} + +func (s *Store[K, V]) Close() { + if !s.open.Swap(false) { + panic("theine.Store: double close") + } + + for _, s := range s.shards { + s.mu.Lock() + clear(s.hashmap) + s.mu.Unlock() + } + close(s.writebuf) +} diff --git a/go/cache/theine/store_test.go b/go/cache/theine/store_test.go new file mode 100644 index 00000000000..880acf30193 --- /dev/null +++ b/go/cache/theine/store_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type cachedint int + +func (ci cachedint) CachedSize(bool) int64 { + return 1 +} + +type keyint int + +func (k keyint) Hash() uint64 { + return uint64(k) +} + +func (k keyint) Hash2() (uint64, uint64) { + return uint64(k), uint64(k) * 333 +} + +func TestProcessDeque(t *testing.T) { + store := NewStore[keyint, cachedint](20000, false) + + evicted := map[keyint]cachedint{} + store.OnRemoval = func(key keyint, value cachedint, reason RemoveReason) { + if reason == EVICTED { + evicted[key] = value + } + } + _, index := store.index(123) + shard := store.shards[index] + shard.qsize = 10 + + for i := keyint(0); i < 5; i++ { + entry := &Entry[keyint, cachedint]{key: i} + entry.cost.Store(1) + store.shards[index].deque.PushFront(entry) + store.shards[index].qlen += 1 + store.shards[index].hashmap[i] = entry + } + + // move 0,1,2 entries to slru + store.Set(123, 123, 8, 0) + require.Equal(t, store.shards[index].deque.Len(), 3) + var keys []keyint + for store.shards[index].deque.Len() != 0 { + e := store.shards[index].deque.PopBack() + keys = append(keys, e.key) + } + require.Equal(t, []keyint{3, 4, 123}, keys) +} + +func TestDoorKeeperDynamicSize(t *testing.T) { + store := NewStore[keyint, cachedint](200000, true) + shard := store.shards[0] + require.True(t, shard.dookeeper.Capacity == 512) + for i := keyint(0); i < 5000; i++ { + shard.set(i, &Entry[keyint, cachedint]{}) + } + require.True(t, shard.dookeeper.Capacity > 100000) +} diff --git a/go/cache/theine/tlfu.go b/go/cache/theine/tlfu.go new file mode 100644 index 00000000000..f7a4f8dec51 --- /dev/null +++ b/go/cache/theine/tlfu.go @@ -0,0 +1,197 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "sync/atomic" +) + +type TinyLfu[K cachekey, V any] struct { + slru *Slru[K, V] + sketch *CountMinSketch + size uint + counter uint + total atomic.Uint32 + hit atomic.Uint32 + hr float32 + threshold atomic.Int32 + lruFactor uint8 + step int8 +} + +func NewTinyLfu[K cachekey, V any](size uint) *TinyLfu[K, V] { + tlfu := &TinyLfu[K, V]{ + size: size, + slru: NewSlru[K, V](size), + sketch: NewCountMinSketch(), + step: 1, + } + // default threshold to -1 so all entries are admitted until cache is full + tlfu.threshold.Store(-1) + return tlfu +} + +func (t *TinyLfu[K, V]) climb() { + total := t.total.Load() + hit := t.hit.Load() + current := float32(hit) / float32(total) + delta := current - t.hr + var diff int8 + if delta > 0.0 { + if t.step < 0 { + t.step -= 1 + } else { + t.step += 1 + } + if t.step < -13 { + t.step = -13 + } else if t.step > 13 { + t.step = 13 + } + newFactor := int8(t.lruFactor) + t.step + if newFactor < 0 { + newFactor = 0 + } else if newFactor > 16 { + newFactor = 16 + } + diff = newFactor - int8(t.lruFactor) + t.lruFactor = uint8(newFactor) + } else if delta < 0.0 { + // reset + if t.step > 0 { + t.step = -1 + } else { + t.step = 1 + } + newFactor := int8(t.lruFactor) + t.step + if newFactor < 0 { + newFactor = 0 + } else if newFactor > 16 { + newFactor = 16 + } + diff = newFactor - int8(t.lruFactor) + t.lruFactor = uint8(newFactor) + } + t.threshold.Add(-int32(diff)) + t.hr = current + t.hit.Store(0) + t.total.Store(0) +} + +func (t *TinyLfu[K, V]) Set(entry *Entry[K, V]) *Entry[K, V] { + t.counter++ + if t.counter > 10*t.size { + t.climb() + t.counter = 0 + } + if entry.meta.prev == nil { + if victim := t.slru.victim(); victim != nil { + freq := int(entry.frequency.Load()) + if freq == -1 { + freq = int(t.sketch.Estimate(entry.key.Hash())) + } + evictedCount := uint(freq) + uint(t.lruFactor) + victimCount := t.sketch.Estimate(victim.key.Hash()) + if evictedCount <= uint(victimCount) { + return entry + } + } else { + count := t.slru.probation.count + t.slru.protected.count + t.sketch.EnsureCapacity(uint(count + count/100)) + } + evicted := t.slru.insert(entry) + return evicted + } + + return nil +} + +func (t *TinyLfu[K, V]) Access(item ReadBufItem[K, V]) { + t.counter++ + if t.counter > 10*t.size { + t.climb() + t.counter = 0 + } + if entry := item.entry; entry != nil { + reset := t.sketch.Add(item.hash) + if reset { + t.threshold.Store(t.threshold.Load() / 2) + } + if entry.meta.prev != nil { + var tail bool + if entry == t.slru.victim() { + tail = true + } + t.slru.access(entry) + if tail { + t.UpdateThreshold() + } + } else { + entry.frequency.Store(int32(t.sketch.Estimate(item.hash))) + } + } else { + reset := t.sketch.Add(item.hash) + if reset { + t.threshold.Store(t.threshold.Load() / 2) + } + } +} + +func (t *TinyLfu[K, V]) Remove(entry *Entry[K, V]) { + t.slru.remove(entry) +} + +func (t *TinyLfu[K, V]) UpdateCost(entry *Entry[K, V], delta int64) { + t.slru.updateCost(entry, delta) +} + +func (t *TinyLfu[K, V]) EvictEntries() []*Entry[K, V] { + removed := []*Entry[K, V]{} + + for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) { + entry := t.slru.probation.PopTail() + if entry == nil { + break + } + removed = append(removed, entry) + } + for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) { + entry := t.slru.protected.PopTail() + if entry == nil { + break + } + removed = append(removed, entry) + } + return removed +} + +func (t *TinyLfu[K, V]) UpdateThreshold() { + if t.slru.probation.Len()+t.slru.protected.Len() < int(t.slru.maxsize) { + t.threshold.Store(-1) + } else { + tail := t.slru.victim() + if tail != nil { + t.threshold.Store( + int32(t.sketch.Estimate(tail.key.Hash()) - uint(t.lruFactor)), + ) + } else { + // cache is not full + t.threshold.Store(-1) + } + } +} diff --git a/go/cache/theine/tlfu_test.go b/go/cache/theine/tlfu_test.go new file mode 100644 index 00000000000..ac6ddaabdb6 --- /dev/null +++ b/go/cache/theine/tlfu_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright 2023 Yiling-J + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package theine + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTlfu(t *testing.T) { + tlfu := NewTinyLfu[StringKey, string](1000) + require.Equal(t, uint(1000), tlfu.slru.probation.capacity) + require.Equal(t, uint(800), tlfu.slru.protected.capacity) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + var entries []*Entry[StringKey, string] + for i := 0; i < 200; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + evicted := tlfu.Set(e) + entries = append(entries, e) + require.Nil(t, evicted) + } + + require.Equal(t, 200, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // probation -> protected + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 199, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 199, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + for i := 200; i < 1000; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) + entries = append(entries, e) + evicted := tlfu.Set(e) + require.Nil(t, evicted) + } + // access protected + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]}) + require.Equal(t, 999, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + evicted := tlfu.Set(NewEntry(StringKey("0a"), "", 1)) + require.Equal(t, StringKey("0a"), evicted.key) + require.Equal(t, 999, tlfu.slru.probation.len) + require.Equal(t, 1, tlfu.slru.protected.len) + + victim := tlfu.slru.victim() + require.Equal(t, StringKey("0"), victim.key) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]}) + evicted = tlfu.Set(NewEntry(StringKey("1a"), "", 1)) + require.Equal(t, StringKey("1a"), evicted.key) + require.Equal(t, 998, tlfu.slru.probation.len) + + var entries2 []*Entry[StringKey, string] + for i := 0; i < 1000; i++ { + e := NewEntry(StringKey(fmt.Sprintf("%d*", i)), "", 1) + tlfu.Set(e) + entries2 = append(entries2, e) + } + require.Equal(t, 998, tlfu.slru.probation.len) + require.Equal(t, 2, tlfu.slru.protected.len) + + for _, i := range []int{997, 998, 999} { + tlfu.Remove(entries2[i]) + tlfu.slru.probation.display() + tlfu.slru.probation.displayReverse() + tlfu.slru.protected.display() + tlfu.slru.protected.displayReverse() + } + +} + +func TestEvictEntries(t *testing.T) { + tlfu := NewTinyLfu[StringKey, string](500) + require.Equal(t, uint(500), tlfu.slru.probation.capacity) + require.Equal(t, uint(400), tlfu.slru.protected.capacity) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + for i := 0; i < 500; i++ { + tlfu.Set(NewEntry(StringKey(fmt.Sprintf("%d:1", i)), "", 1)) + } + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + new := NewEntry(StringKey("l:10"), "", 10) + new.frequency.Store(10) + tlfu.Set(new) + require.Equal(t, 509, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + // 2. probation length is 509, so remove 9 entries from probation + removed := tlfu.EvictEntries() + for _, rm := range removed { + require.True(t, strings.HasSuffix(string(rm.key), ":1")) + } + require.Equal(t, 9, len(removed)) + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // put l:450 to probation, this will remove 1 entry, probation len is 949 now + // remove 449 entries from probation + new = NewEntry(StringKey("l:450"), "", 450) + new.frequency.Store(10) + tlfu.Set(new) + removed = tlfu.EvictEntries() + require.Equal(t, 449, len(removed)) + require.Equal(t, 500, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // put l:460 to probation, this will remove 1 entry, probation len is 959 now + // remove all entries except the new l:460 one + new = NewEntry(StringKey("l:460"), "", 460) + new.frequency.Store(10) + tlfu.Set(new) + removed = tlfu.EvictEntries() + require.Equal(t, 41, len(removed)) + require.Equal(t, 460, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + + // access + tlfu.Access(ReadBufItem[StringKey, string]{entry: new}) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 460, tlfu.slru.protected.len) + new.cost.Store(600) + tlfu.UpdateCost(new, 140) + removed = tlfu.EvictEntries() + require.Equal(t, 1, len(removed)) + require.Equal(t, 0, tlfu.slru.probation.len) + require.Equal(t, 0, tlfu.slru.protected.len) + +} diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go index 6fe461e5af7..3fdaf7a59bf 100644 --- a/go/cmd/internal/docgen/docgen.go +++ b/go/cmd/internal/docgen/docgen.go @@ -46,6 +46,7 @@ import ( "fmt" "io/fs" "os" + "os/exec" "path/filepath" "strings" @@ -79,9 +80,85 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error { return fmt.Errorf("failed to index doc (generated at %s) into proper position (%s): %w", rootDocPath, indexDocPath, err) } + if err := restructure(dir, dir, cmd.Name(), cmd.Commands()); err != nil { + return err + } + return nil } +/* +_index.md (aka vtctldclient.md) +vtctldclient_AddCellInfo.md +vtctldclient_movetables.md +vtctldclient_movetables_show.md + +becomes + +_index.md +vtctldclient_AddCellInfo.md +vtctldclient_movetables/ + _index.md + vtctldclient_movetables_show.md +*/ + +func restructure(rootDir string, dir string, name string, commands []*cobra.Command) error { + for _, cmd := range commands { + fullCmdFilename := strings.Join([]string{name, cmd.Name()}, "_") + + children := cmd.Commands() + + switch { + case len(children) > 0: + // Command (top-level or not) with children. + // 1. Set up a directory for its children. + // 2. Move its doc into that dir as "_index.md" + // 3. Restructure its children. + cmdDir := filepath.Join(dir, fullCmdFilename) + if err := os.MkdirAll(cmdDir, 0755); err != nil { + return fmt.Errorf("failed to create subdir for %s: %w", fullCmdFilename, err) + } + + if err := os.Rename(filepath.Join(rootDir, fullCmdFilename+".md"), filepath.Join(cmdDir, "_index.md")); err != nil { + return fmt.Errorf("failed to move index doc for command %s with children: %w", fullCmdFilename, err) + } + + if err := restructure(rootDir, cmdDir, fullCmdFilename, children); err != nil { + return fmt.Errorf("failed to restructure child commands for %s: %w", fullCmdFilename, err) + } + case rootDir != dir: + // Sub-command without children. + // 1. Move its doc into the directory for its parent, name unchanged. + if cmd.Name() == "help" { + // all commands with children have their own "help" subcommand, + // which we do not generate docs for + continue + } + + oldName := filepath.Join(rootDir, fullCmdFilename+".md") + newName := filepath.Join(dir, fullCmdFilename+".md") + + if err := os.Rename(oldName, newName); err != nil { + return fmt.Errorf("failed to move child command %s to its parent's dir: %w", fullCmdFilename, err) + } + + sed := newParentLinkSedCommand(name, newName) + if out, err := sed.CombinedOutput(); err != nil { + return fmt.Errorf("failed to rewrite links to parent command in child %s: %w (extra: %s)", newName, err, out) + } + default: + // Top-level command without children. Nothing to restructure. + continue + } + } + + return nil +} + +func newParentLinkSedCommand(parent string, file string) *exec.Cmd { + return exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:(./%s/):(../):i", parent), file) +} + func recursivelyDisableAutoGenTags(root *cobra.Command) { commands := []*cobra.Command{root} for cmd := commands[0]; len(commands) > 0; cmd, commands = commands[0], commands[1:] { @@ -106,16 +183,19 @@ func frontmatterFilePrepender(filename string) string { cmdName = root } + cmdName = strings.ReplaceAll(cmdName, "_", " ") + return fmt.Sprintf(frontmatter, cmdName, root) } func linkHandler(filename string) string { - name := filepath.Base(filename) - base := strings.TrimSuffix(name, filepath.Ext(name)) + base := filepath.Base(filename) + name := strings.TrimSuffix(base, filepath.Ext(base)) - if _, _, ok := strings.Cut(base, "_"); !ok { + _, _, ok := strings.Cut(name, "_") + if !ok { return "../" } - return fmt.Sprintf("./%s/", strings.ToLower(base)) + return fmt.Sprintf("./%s/", strings.ToLower(name)) } diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go index 6873cc2bf56..ba59309e981 100644 --- a/go/cmd/mysqlctl/mysqlctl.go +++ b/go/cmd/mysqlctl/mysqlctl.go @@ -25,11 +25,12 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cmd" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/flagutil" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -175,30 +176,30 @@ func positionCmd(subFlags *pflag.FlagSet, args []string) error { return fmt.Errorf("not enough arguments for position operation") } - pos1, err := mysql.DecodePosition(args[1]) + pos1, err := replication.DecodePosition(args[1]) if err != nil { return err } switch args[0] { case "equal": - pos2, err := mysql.DecodePosition(args[2]) + pos2, err := replication.DecodePosition(args[2]) if err != nil { return err } fmt.Println(pos1.Equal(pos2)) case "at_least": - pos2, err := mysql.DecodePosition(args[2]) + pos2, err := replication.DecodePosition(args[2]) if err != nil { return err } fmt.Println(pos1.AtLeast(pos2)) case "append": - gtid, err := mysql.DecodeGTID(args[2]) + gtid, err := replication.DecodeGTID(args[2]) if err != nil { return err } - fmt.Println(mysql.AppendGTID(pos1, gtid)) + fmt.Println(replication.AppendGTID(pos1, gtid)) } return nil diff --git a/go/cmd/topo2topo/topo2topo.go b/go/cmd/topo2topo/topo2topo.go index df2cef54862..157960548b8 100644 --- a/go/cmd/topo2topo/topo2topo.go +++ b/go/cmd/topo2topo/topo2topo.go @@ -77,6 +77,7 @@ func main() { logutil.RegisterFlags(fs) servenv.ParseFlags("topo2topo") + servenv.Init() fromTS, err := topo.OpenServer(fromImplementation, fromServerAddress, fromRoot) if err != nil { diff --git a/go/cmd/vtaclcheck/vtaclcheck.go b/go/cmd/vtaclcheck/vtaclcheck.go index 74e9261e67b..8b916a8cc0c 100644 --- a/go/cmd/vtaclcheck/vtaclcheck.go +++ b/go/cmd/vtaclcheck/vtaclcheck.go @@ -47,6 +47,7 @@ func main() { defer logutil.Flush() servenv.ParseFlags("vtaclcheck") + servenv.Init() err := run() if err != nil { diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index c75ecc63eae..f27a991d35e 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -70,10 +70,11 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cmd" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -98,6 +99,10 @@ const ( // place a hard cap on the overall time for a backup, while also not waiting // forever for things that should be quick. operationTimeout = 1 * time.Minute + + phaseNameCatchUpReplication = "CatchUpReplication" + phaseStatusCatchUpReplicationStalled = "Stalled" + phaseStatusCatchUpReplicationStopped = "Stopped" ) var ( @@ -107,6 +112,7 @@ var ( initialBackup bool allowFirstBackup bool restartBeforeBackup bool + upgradeSafe bool // vttablet-like flags initDbNameOverride string initKeyspace string @@ -126,6 +132,17 @@ var ( "How long it took vtbackup to perform each phase (in seconds).", "phase", ) + phaseStatus = stats.NewGaugesWithMultiLabels( + "PhaseStatus", + "Internal state of vtbackup phase.", + []string{"phase", "status"}, + ) + phaseStatuses = map[string][]string{ + phaseNameCatchUpReplication: { + phaseStatusCatchUpReplicationStalled, + phaseStatusCatchUpReplicationStopped, + }, + } ) func registerFlags(fs *pflag.FlagSet) { @@ -135,6 +152,7 @@ func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&initialBackup, "initial_backup", initialBackup, "Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).") fs.BoolVar(&allowFirstBackup, "allow_first_backup", allowFirstBackup, "Allow this job to take the first backup of an existing shard.") fs.BoolVar(&restartBeforeBackup, "restart_before_backup", restartBeforeBackup, "Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.") + fs.BoolVar(&upgradeSafe, "upgrade-safe", upgradeSafe, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") // vttablet-like flags fs.StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet") fs.StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet") @@ -200,6 +218,13 @@ func main() { topoServer := topo.Open() defer topoServer.Close() + // Initialize stats. + for phaseName, statuses := range phaseStatuses { + for _, status := range statuses { + phaseStatus.Set([]string{phaseName, status}, 0) + } + } + // Try to take a backup, if it's been long enough since the last one. // Skip pruning if backup wasn't fully successful. We don't want to be // deleting things if the backup process is not healthy. @@ -301,6 +326,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back Shard: initShard, TabletAlias: topoproto.TabletAliasString(tabletAlias), Stats: backupstats.BackupStats(), + UpgradeSafe: upgradeSafe, } // In initial_backup mode, just take a backup of this empty database. if initialBackup { @@ -312,6 +338,19 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back if err := mysqld.ResetReplication(ctx); err != nil { return fmt.Errorf("can't reset replication: %v", err) } + // We need to switch off super_read_only before we create the database. + resetFunc, err := mysqld.SetSuperReadOnly(false) + if err != nil { + return fmt.Errorf("failed to disable super_read_only during backup: %v", err) + } + if resetFunc != nil { + defer func() { + err := resetFunc() + if err != nil { + log.Error("Failed to set super_read_only back to its original value during backup") + } + }() + } cmd := mysqlctl.GenerateInitialBinlogEntry() if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil { return err @@ -343,7 +382,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back Stats: backupstats.RestoreStats(), } backupManifest, err := mysqlctl.Restore(ctx, params) - var restorePos mysql.Position + var restorePos replication.Position switch err { case nil: // if err is nil, we expect backupManifest to be non-nil @@ -354,13 +393,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back if !allowFirstBackup { return fmt.Errorf("no backup found; not starting up empty since --initial_backup flag was not enabled") } - restorePos = mysql.Position{} + restorePos = replication.Position{} default: return fmt.Errorf("can't restore from backup: %v", err) } durationByPhase.Set("RestoreLastBackup", int64(time.Since(restoreAt).Seconds())) - // Disable redo logging (if we can) before we start replication. + // As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE + // DISABLE INNODB REDO_LOG statement. This functionality is intended for + // loading data into a new MySQL instance. Disabling redo logging speeds up + // data loading by avoiding redo log writes and doublewrite buffering. disabledRedoLog := false if disableRedoLog { if err := mysqld.DisableRedoLog(ctx); err != nil { @@ -389,7 +431,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back tmc := tmclient.NewTabletManagerClient() // Keep retrying if we can't contact the primary. The primary might be // changing, moving, or down temporarily. - var primaryPos mysql.Position + var primaryPos replication.Position err = retryOnError(ctx, func() error { // Add a per-operation timeout so we re-read topo if the primary is unreachable. opCtx, optCancel := context.WithTimeout(ctx, operationTimeout) @@ -405,13 +447,21 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back return fmt.Errorf("can't get the primary replication position after all retries: %v", err) } + log.Infof("takeBackup: primary position is: %s", primaryPos.String()) + // Remember the time when we fetched the primary position, not when we caught // up to it, so the timestamp on our backup is honest (assuming we make it // to the goal position). backupParams.BackupTime = time.Now() // Wait for replication to catch up. - waitStartTime := time.Now() + var ( + lastStatus replication.ReplicationStatus + status replication.ReplicationStatus + statusErr error + + waitStartTime = time.Now() + ) for { select { case <-ctx.Done(): @@ -419,7 +469,8 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back case <-time.After(time.Second): } - status, statusErr := mysqld.ReplicationStatus() + lastStatus = status + status, statusErr = mysqld.ReplicationStatus() if statusErr != nil { log.Warningf("Error getting replication status: %v", statusErr) continue @@ -431,11 +482,21 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back durationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds())) break } + if !lastStatus.Position.IsZero() { + if status.Position.Equal(lastStatus.Position) { + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStalled}, 1) + } else { + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStalled}, 0) + } + } if !status.Healthy() { log.Warning("Replication has stopped before backup could be taken. Trying to restart replication.") + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStopped}, 1) if err := startReplication(ctx, mysqld, topoServer); err != nil { log.Warningf("Failed to restart replication: %v", err) } + } else { + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStopped}, 0) } } @@ -445,14 +506,16 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } // Did we make any progress? - status, err := mysqld.ReplicationStatus() - if err != nil { + status, statusErr = mysqld.ReplicationStatus() + if statusErr != nil { return fmt.Errorf("can't get replication status: %v", err) } log.Infof("Replication caught up to %v", status.Position) if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) { return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos) } + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStalled}, 0) + phaseStatus.Set([]string{phaseNameCatchUpReplication, phaseStatusCatchUpReplicationStopped}, 0) // Re-enable redo logging. if disabledRedoLog { @@ -495,7 +558,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back return nil } -func resetReplication(ctx context.Context, pos mysql.Position, mysqld mysqlctl.MysqlDaemon) error { +func resetReplication(ctx context.Context, pos replication.Position, mysqld mysqlctl.MysqlDaemon) error { cmds := []string{ "STOP SLAVE", "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port. @@ -542,27 +605,27 @@ func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServ return nil } -func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (mysql.Position, error) { +func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (replication.Position, error) { si, err := ts.GetShard(ctx, initKeyspace, initShard) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "can't read shard") + return replication.Position{}, vterrors.Wrap(err, "can't read shard") } if topoproto.TabletAliasIsZero(si.PrimaryAlias) { // Normal tablets will sit around waiting to be reparented in this case. // Since vtbackup is a batch job, we just have to fail. - return mysql.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard) + return replication.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard) } ti, err := ts.GetTablet(ctx, si.PrimaryAlias) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) + return replication.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) } posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary replication position: %v", err) + return replication.Position{}, fmt.Errorf("can't get primary replication position: %v", err) } - pos, err := mysql.DecodePosition(posStr) + pos, err := replication.DecodePosition(posStr) if err != nil { - return mysql.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) + return replication.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) } return pos, nil } diff --git a/go/cmd/vtbench/vtbench.go b/go/cmd/vtbench/vtbench.go index 13c024fdd8e..19044aae4ed 100644 --- a/go/cmd/vtbench/vtbench.go +++ b/go/cmd/vtbench/vtbench.go @@ -121,6 +121,7 @@ func main() { }) servenv.ParseFlags("vtbench") + servenv.Init() defer exit.Recover() diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go index adc060d7737..26c8cfd4806 100644 --- a/go/cmd/vtclient/vtclient.go +++ b/go/cmd/vtclient/vtclient.go @@ -197,7 +197,6 @@ func run() (*results, error) { if maxSeqID > minSeqID { go func() { if useRandom { - rand.Seed(time.Now().UnixNano()) for { seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID } diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index a6870a017fb..24994f06c6e 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -28,11 +28,10 @@ import ( "time" "github.com/spf13/pflag" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -61,7 +60,13 @@ var ( mysqlPort = flags.Int("mysql_port", 3306, "mysql port") externalTopoServer = flags.Bool("external_topo_server", false, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+ "If true, vtcombo will use the flags defined in topo/server.go to open topo server") - plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") + plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + vschemaPersistenceDir = flags.String("vschema-persistence-dir", "", "If set, per-keyspace vschema will be persisted in this directory "+ + "and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. "+ + "This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same "+ + "vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, "+ + "this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if "+ + "you require a more complete solution. This flag is ignored if --external_topo_server is set.") tpb vttestpb.VTTestTopology ts *topo.Server @@ -158,7 +163,7 @@ func main() { // // We will use this to determine the shard structure when keyspaces // get recreated. - originalTopology := proto.Clone(&tpb).(*vttestpb.VTTestTopology) + originalTopology := (&tpb).CloneVT() // default cell to "test" if unspecified if len(tpb.Cells) == 0 { @@ -179,7 +184,7 @@ func main() { ts = topo.Open() } else { // Create topo server. We use a 'memorytopo' implementation. - ts = memorytopo.NewServer(tpb.Cells...) + ts = memorytopo.NewServer(context.Background(), tpb.Cells...) } // attempt to load any routing rules specified by tpb @@ -233,7 +238,7 @@ func main() { // will end up with the same number of shards. for _, originalKs := range originalTopology.Keyspaces { if originalKs.Name == ks.Name { - ks = proto.Clone(originalKs).(*vttestpb.Keyspace) + ks = originalKs.CloneVT() } } @@ -272,7 +277,7 @@ func main() { } // vtgate configuration and init - resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer") + resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") tabletTypesToWait := []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, @@ -292,6 +297,10 @@ func main() { exit.Return(1) } + if *vschemaPersistenceDir != "" && !*externalTopoServer { + startVschemaWatcher(*vschemaPersistenceDir, tpb.Keyspaces, ts) + } + servenv.OnRun(func() { addStatusParts(vtg) }) @@ -332,7 +341,7 @@ func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) } // StartReplicationUntilAfter implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error { +func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error { return nil } @@ -345,3 +354,8 @@ func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) err func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error { return nil } + +// SemiSyncExtensionLoaded implements the MysqlDaemon interface +func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) { + return true, nil +} diff --git a/go/cmd/vtcombo/plugin_dbddl.go b/go/cmd/vtcombo/plugin_dbddl.go index 49a7a601fb1..1a95e073308 100644 --- a/go/cmd/vtcombo/plugin_dbddl.go +++ b/go/cmd/vtcombo/plugin_dbddl.go @@ -18,6 +18,7 @@ package main import ( "context" + "sync" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/engine" @@ -29,7 +30,9 @@ var globalCreateDb func(ctx context.Context, ks *vttestpb.Keyspace) error var globalDropDb func(ctx context.Context, ksName string) error // DBDDL doesn't need to store any state - we use the global variables above instead -type DBDDL struct{} +type DBDDL struct { + mu sync.Mutex +} // CreateDatabase implements the engine.DBDDLPlugin interface func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error { @@ -39,6 +42,8 @@ func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error { Name: "0", }}, } + plugin.mu.Lock() + defer plugin.mu.Unlock() return globalCreateDb(ctx, ks) } diff --git a/go/cmd/vtcombo/vschema_watcher.go b/go/cmd/vtcombo/vschema_watcher.go new file mode 100644 index 00000000000..948ed67bea7 --- /dev/null +++ b/go/cmd/vtcombo/vschema_watcher.go @@ -0,0 +1,116 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "encoding/json" + "os" + "path" + + "vitess.io/vitess/go/vt/log" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func startVschemaWatcher(vschemaPersistenceDir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + // Create the directory if it doesn't exist. + if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil { + log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err) + } + + // If there are keyspace files, load them. + loadKeyspacesFromDir(vschemaPersistenceDir, keyspaces, ts) + + // Rebuild the SrvVSchema object in case we loaded vschema from file + if err := ts.RebuildSrvVSchema(context.Background(), tpb.Cells); err != nil { + log.Fatalf("RebuildSrvVSchema failed: %v", err) + } + + // Now watch for changes in the SrvVSchema object and persist them to disk. + go watchSrvVSchema(context.Background(), ts, tpb.Cells[0]) +} + +func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { + for _, ks := range tpb.Keyspaces { + ksFile := path.Join(dir, ks.Name+".json") + if _, err := os.Stat(ksFile); err == nil { + jsonData, err := os.ReadFile(ksFile) + if err != nil { + log.Fatalf("Unable to read keyspace file %v: %v", ksFile, err) + } + + keyspace := &vschemapb.Keyspace{} + err = json.Unmarshal(jsonData, keyspace) + if err != nil { + log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) + } + + _, err = vindexes.BuildKeyspace(keyspace) + if err != nil { + log.Fatalf("Invalid keyspace definition: %v", err) + } + ts.SaveVSchema(context.Background(), ks.Name, keyspace) + log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile) + } + } +} + +func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) { + data, ch, err := ts.WatchSrvVSchema(context.Background(), tpb.Cells[0]) + if err != nil { + log.Fatalf("WatchSrvVSchema failed: %v", err) + } + + if data.Err != nil { + log.Fatalf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err) + } + persistNewSrvVSchema(data.Value) + + for update := range ch { + if update.Err != nil { + log.Errorf("WatchSrvVSchema returned an error: %v", update.Err) + } else { + persistNewSrvVSchema(update.Value) + } + } +} + +func persistNewSrvVSchema(srvVSchema *vschemapb.SrvVSchema) { + for ksName, ks := range srvVSchema.Keyspaces { + jsonBytes, err := json.MarshalIndent(ks, "", " ") + if err != nil { + log.Errorf("Error marshaling keyspace: %v", err) + continue + } + + err = os.WriteFile(path.Join(*vschemaPersistenceDir, ksName+".json"), jsonBytes, 0644) + if err != nil { + log.Errorf("Error writing keyspace file: %v", err) + } + log.Infof("Persisted keyspace %v to %v", ksName, *vschemaPersistenceDir) + } +} + +func createDirectoryIfNotExists(dir string) error { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return os.Mkdir(dir, 0755) + } + return nil +} diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go new file mode 100644 index 00000000000..42ab1cfde1e --- /dev/null +++ b/go/cmd/vtctld/cli/cli.go @@ -0,0 +1,90 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctld" +) + +var ( + ts *topo.Server + Main = &cobra.Command{ + Use: "vtctld", + Short: "The Vitess cluster management daemon.", + Long: `vtctld provides web and gRPC interfaces to manage a single Vitess cluster. +It is usually the first Vitess component to be started after a valid global topology service has been created. + +For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests. +This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release. +To enable this newer service, include "grpc-vtctld" in the --service_map argument. +This is demonstrated in the example usage below.`, + Example: `vtctld \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --service_map 'grpc-vtctl,grpc-vtctld' \ + --backup_storage_implementation file \ + --file_backup_storage_root $VTDATAROOT/backups \ + --port 15000 \ + --grpc_port 15999`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + defer servenv.Close() + + ts = topo.Open() + defer ts.Close() + + // Init the vtctld core + if err := vtctld.InitVtctld(ts); err != nil { + return err + } + + // Register http debug/health + vtctld.RegisterDebugHealthHandler(ts) + + // Start schema manager service. + initSchema() + + // And run the server. + servenv.RunDefault() + + return nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) +} diff --git a/go/cmd/vttablet/plugin_azblobbackupstorage.go b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_azblobbackupstorage.go rename to go/cmd/vtctld/cli/plugin_azblobbackupstorage.go index a4ca64096a9..bdadc894aae 100644 --- a/go/cmd/vttablet/plugin_azblobbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage" diff --git a/go/cmd/vttablet/plugin_cephbackupstorage.go b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_cephbackupstorage.go rename to go/cmd/vtctld/cli/plugin_cephbackupstorage.go index 6cd2d5619d0..171198f5e29 100644 --- a/go/cmd/vttablet/plugin_cephbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage" diff --git a/go/cmd/vtctld/plugin_consultopo.go b/go/cmd/vtctld/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtctld/plugin_consultopo.go rename to go/cmd/vtctld/cli/plugin_consultopo.go index a0c53abe5ea..4617d753953 100644 --- a/go/cmd/vtctld/plugin_consultopo.go +++ b/go/cmd/vtctld/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'consul' topo.Server. diff --git a/go/cmd/vtctld/plugin_etcd2topo.go b/go/cmd/vtctld/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtctld/plugin_etcd2topo.go rename to go/cmd/vtctld/cli/plugin_etcd2topo.go index 6ec507f910d..06e014fc19f 100644 --- a/go/cmd/vtctld/plugin_etcd2topo.go +++ b/go/cmd/vtctld/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'etcd2' topo.Server. diff --git a/go/cmd/vtctld/plugin_filebackupstorage.go b/go/cmd/vtctld/cli/plugin_filebackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_filebackupstorage.go rename to go/cmd/vtctld/cli/plugin_filebackupstorage.go index cf2ceb5150f..9edc82d6a1b 100644 --- a/go/cmd/vtctld/plugin_filebackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_filebackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" diff --git a/go/cmd/vttablet/plugin_gcsbackupstorage.go b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_gcsbackupstorage.go rename to go/cmd/vtctld/cli/plugin_gcsbackupstorage.go index 82a22cef1da..655583c8ca2 100644 --- a/go/cmd/vttablet/plugin_gcsbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage" diff --git a/go/cmd/vtctld/plugin_grpctabletconn.go b/go/cmd/vtctld/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vtctld/plugin_grpctabletconn.go rename to go/cmd/vtctld/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vtctld/plugin_grpctabletconn.go +++ b/go/cmd/vtctld/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vtorc/plugin_grpctmclient.go b/go/cmd/vtctld/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vtorc/plugin_grpctmclient.go rename to go/cmd/vtctld/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vtorc/plugin_grpctmclient.go +++ b/go/cmd/vtctld/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vtctld/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtctldserver.go rename to go/cmd/vtctld/cli/plugin_grpcvtctldserver.go index ee5d0aba22a..ff283d91336 100644 --- a/go/cmd/vtctld/plugin_grpcvtctldserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtctld/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtctlserver.go rename to go/cmd/vtctld/cli/plugin_grpcvtctlserver.go index 4ec5323b075..8b7f918bc58 100644 --- a/go/cmd/vtctld/plugin_grpcvtctlserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" diff --git a/go/cmd/vtctld/plugin_grpcvtgateconn.go b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go similarity index 98% rename from go/cmd/vtctld/plugin_grpcvtgateconn.go rename to go/cmd/vtctld/cli/plugin_grpcvtgateconn.go index 87019ea4260..2f05e6d9a4e 100644 --- a/go/cmd/vtctld/plugin_grpcvtgateconn.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateconn client diff --git a/go/cmd/vtctld/plugin_opentracing.go b/go/cmd/vtctld/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtctld/plugin_opentracing.go rename to go/cmd/vtctld/cli/plugin_opentracing.go index c35034d42a2..76423623493 100644 --- a/go/cmd/vtctld/plugin_opentracing.go +++ b/go/cmd/vtctld/cli/plugin_opentracing.go @@ -14,11 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/servenv" ) diff --git a/go/cmd/vtctld/plugin_opentsdb.go b/go/cmd/vtctld/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vtctld/plugin_opentsdb.go rename to go/cmd/vtctld/cli/plugin_opentsdb.go index 38f464dd887..e4f76d29009 100644 --- a/go/cmd/vtctld/plugin_opentsdb.go +++ b/go/cmd/vtctld/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vtctld/plugin_prometheusbackend.go b/go/cmd/vtctld/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtctld/plugin_prometheusbackend.go rename to go/cmd/vtctld/cli/plugin_prometheusbackend.go index f3c33e5637b..3c66018fe75 100644 --- a/go/cmd/vtctld/plugin_prometheusbackend.go +++ b/go/cmd/vtctld/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vttablet/plugin_s3backupstorage.go b/go/cmd/vtctld/cli/plugin_s3backupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_s3backupstorage.go rename to go/cmd/vtctld/cli/plugin_s3backupstorage.go index a5b5c671ebb..4b3ecb33edb 100644 --- a/go/cmd/vttablet/plugin_s3backupstorage.go +++ b/go/cmd/vtctld/cli/plugin_s3backupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" diff --git a/go/cmd/vtctld/plugin_zk2topo.go b/go/cmd/vtctld/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtctld/plugin_zk2topo.go rename to go/cmd/vtctld/cli/plugin_zk2topo.go index 531d92c4cdd..77f86d98d52 100644 --- a/go/cmd/vtctld/plugin_zk2topo.go +++ b/go/cmd/vtctld/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the 'zk2' topo.Server. diff --git a/go/cmd/vtctld/schema.go b/go/cmd/vtctld/cli/schema.go similarity index 57% rename from go/cmd/vtctld/schema.go rename to go/cmd/vtctld/cli/schema.go index 3f20ae0be55..480679a09e6 100644 --- a/go/cmd/vtctld/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -14,20 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "context" "time" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schemamanager" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" ) @@ -36,27 +34,25 @@ var ( schemaChangeDir string schemaChangeController string schemaChangeUser string - schemaChangeCheckInterval = flagutil.NewDurationOrIntVar("schema_change_check_interval", time.Minute, time.Second) - schemaChangeReplicasTimeout = wrangler.DefaultWaitReplicasTimeout + schemaChangeCheckInterval = time.Minute + schemaChangeReplicasTimeout = grpcvtctldserver.DefaultWaitReplicasTimeout ) func init() { - servenv.OnParse(func(fs *pflag.FlagSet) { - fs.StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.") - fs.StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.") - fs.StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.") + Main.Flags().StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.") + Main.Flags().StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.") + Main.Flags().StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.") - fs.Var(schemaChangeCheckInterval, "schema_change_check_interval", "How often the schema change dir is checked for schema changes (deprecated: if passed as a bare integer, the duration will be in seconds).") - fs.DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.") - }) + Main.Flags().DurationVar(&schemaChangeCheckInterval, "schema_change_check_interval", schemaChangeCheckInterval, "How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used.") + Main.Flags().DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.") } func initSchema() { // Start schema manager service if needed. if schemaChangeDir != "" { - interval := time.Minute - if schemaChangeCheckInterval.Value() > time.Duration(0) { - interval = schemaChangeCheckInterval.Value() + interval := schemaChangeCheckInterval + if interval <= 0 { + interval = time.Minute } timer := timer.NewTimer(interval) controllerFactory, err := @@ -79,7 +75,7 @@ func initSchema() { _, err = schemamanager.Run( ctx, controller, - schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout), + schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0), ) if err != nil { log.Errorf("Schema change failed, error: %v", err) diff --git a/go/cmd/vtctld/docgen/main.go b/go/cmd/vtctld/docgen/main.go new file mode 100644 index 00000000000..4243153859e --- /dev/null +++ b/go/cmd/vtctld/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtctld/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 26f9e100c19..6f9ab7384fc 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -17,52 +17,12 @@ limitations under the License. package main import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtctld" -) - -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - - servenv.OnParse(func(fs *pflag.FlagSet) { - acl.RegisterFlags(fs) - }) -} - -// used at runtime by plug-ins -var ( - ts *topo.Server + "vitess.io/vitess/go/cmd/vtctld/cli" + "vitess.io/vitess/go/vt/log" ) func main() { - servenv.ParseFlags("vtctld") - servenv.Init() - defer servenv.Close() - - ts = topo.Open() - defer ts.Close() - - // Init the vtctld core - err := vtctld.InitVtctld(ts) - if err != nil { - exit.Return(1) + if err := cli.Main.Execute(); err != nil { + log.Fatal(err) } - - // Register http debug/health - vtctld.RegisterDebugHealthHandler(ts) - - // Start schema manager service. - initSchema() - - // And run the server. - servenv.RunDefault() } diff --git a/go/cmd/vtctldclient/cli/awk.go b/go/cmd/vtctldclient/cli/awk.go index c68b0fc0627..2916034a3ca 100644 --- a/go/cmd/vtctldclient/cli/awk.go +++ b/go/cmd/vtctldclient/cli/awk.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -66,7 +66,7 @@ func MarshalTabletAWK(t *topodatapb.Tablet) string { // special case for old primary that hasn't been updated in the topo // yet. if t.PrimaryTermStartTime != nil && t.PrimaryTermStartTime.Seconds > 0 { - mtst = logutil.ProtoToTime(t.PrimaryTermStartTime).Format(time.RFC3339) + mtst = protoutil.TimeFromProto(t.PrimaryTermStartTime).UTC().Format(time.RFC3339) } return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(t.Alias), keyspace, shard, topoproto.TabletTypeLString(t.Type), ti.Addr(), ti.MysqlAddr(), MarshalMapAWK(t.Tags), mtst) diff --git a/go/cmd/vtctldclient/cli/json.go b/go/cmd/vtctldclient/cli/json.go index 80af6d80d72..c76a505e670 100644 --- a/go/cmd/vtctldclient/cli/json.go +++ b/go/cmd/vtctldclient/cli/json.go @@ -54,3 +54,25 @@ func MarshalJSON(obj any) ([]byte, error) { return data, nil } } + +// MarshalJSONCompact works the same as MarshalJSON but elides zero value elements. +func MarshalJSONCompact(obj any) ([]byte, error) { + switch obj := obj.(type) { + case proto.Message: + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseEnumNumbers: true, + UseProtoNames: true, + EmitUnpopulated: false, // elide zero value elements + } + return m.Marshal(obj) + default: + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return nil, fmt.Errorf("json.Marshal = %v", err) + } + + return data, nil + } +} diff --git a/go/cmd/vtctldclient/cli/shards.go b/go/cmd/vtctldclient/cli/shards.go index 93d7529d9a8..8ee38eff0d4 100644 --- a/go/cmd/vtctldclient/cli/shards.go +++ b/go/cmd/vtctldclient/cli/shards.go @@ -19,7 +19,7 @@ package cli import ( "sort" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/topo/topoproto" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" @@ -83,12 +83,12 @@ func (rts rTablets) Less(i, j int) bool { } // then compare replication positions - lpos, err := mysql.DecodePosition(l.Status.Position) + lpos, err := replication.DecodePosition(l.Status.Position) if err != nil { return true } - rpos, err := mysql.DecodePosition(r.Status.Position) + rpos, err := replication.DecodePosition(r.Status.Position) if err != nil { return false } diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go index 53aac5b51bb..e6314ed7d6e 100644 --- a/go/cmd/vtctldclient/command/backups.go +++ b/go/cmd/vtctldclient/command/backups.go @@ -35,7 +35,7 @@ import ( var ( // Backup makes a Backup gRPC call to a vtctld. Backup = &cobra.Command{ - Use: "Backup [--concurrency ] [--allow-primary] ", + Use: "Backup [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", Short: "Uses the BackupStorage service on the given tablet to create and store a new backup.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -43,7 +43,7 @@ var ( } // BackupShard makes a BackupShard gRPC call to a vtctld. BackupShard = &cobra.Command{ - Use: "BackupShard [--concurrency ] [--allow-primary] ", + Use: "BackupShard [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", Short: "Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.", Long: `Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup. @@ -70,7 +70,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i } // RestoreFromBackup makes a RestoreFromBackup gRPC call to a vtctld. RestoreFromBackup = &cobra.Command{ - Use: "RestoreFromBackup [--backup-timestamp|-t ] ", + Use: "RestoreFromBackup [--backup-timestamp|-t ] [--restore-to-pos ] [--dry-run] ", Short: "Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -79,8 +79,10 @@ If no replica-type tablet can be found, the backup can be taken on the primary i ) var backupOptions = struct { - AllowPrimary bool - Concurrency uint64 + AllowPrimary bool + Concurrency uint64 + IncrementalFromPos string + UpgradeSafe bool }{} func commandBackup(cmd *cobra.Command, args []string) error { @@ -92,9 +94,11 @@ func commandBackup(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) stream, err := client.Backup(commandCtx, &vtctldatapb.BackupRequest{ - TabletAlias: tabletAlias, - AllowPrimary: backupOptions.AllowPrimary, - Concurrency: backupOptions.Concurrency, + TabletAlias: tabletAlias, + AllowPrimary: backupOptions.AllowPrimary, + Concurrency: backupOptions.Concurrency, + IncrementalFromPos: backupOptions.IncrementalFromPos, + UpgradeSafe: backupOptions.UpgradeSafe, }) if err != nil { return err @@ -114,8 +118,10 @@ func commandBackup(cmd *cobra.Command, args []string) error { } var backupShardOptions = struct { - AllowPrimary bool - Concurrency uint64 + AllowPrimary bool + Concurrency uint64 + IncrementalFromPos string + UpgradeSafe bool }{} func commandBackupShard(cmd *cobra.Command, args []string) error { @@ -127,10 +133,12 @@ func commandBackupShard(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) stream, err := client.BackupShard(commandCtx, &vtctldatapb.BackupShardRequest{ - Keyspace: keyspace, - Shard: shard, - AllowPrimary: backupOptions.AllowPrimary, - Concurrency: backupOptions.Concurrency, + Keyspace: keyspace, + Shard: shard, + AllowPrimary: backupShardOptions.AllowPrimary, + Concurrency: backupShardOptions.Concurrency, + IncrementalFromPos: backupShardOptions.IncrementalFromPos, + UpgradeSafe: backupShardOptions.UpgradeSafe, }) if err != nil { return err @@ -210,7 +218,10 @@ func commandRemoveBackup(cmd *cobra.Command, args []string) error { } var restoreFromBackupOptions = struct { - BackupTimestamp string + BackupTimestamp string + RestoreToPos string + RestoreToTimestamp string + DryRun bool }{} func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { @@ -219,8 +230,23 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { return err } + if restoreFromBackupOptions.RestoreToPos != "" && restoreFromBackupOptions.RestoreToTimestamp != "" { + return fmt.Errorf("--restore-to-pos and --restore-to-timestamp are mutually exclusive") + } + + var restoreToTimestamp time.Time + if restoreFromBackupOptions.RestoreToTimestamp != "" { + restoreToTimestamp, err = mysqlctl.ParseRFC3339(restoreFromBackupOptions.RestoreToTimestamp) + if err != nil { + return err + } + } + req := &vtctldatapb.RestoreFromBackupRequest{ - TabletAlias: alias, + TabletAlias: alias, + RestoreToPos: restoreFromBackupOptions.RestoreToPos, + RestoreToTimestamp: protoutil.TimeToProto(restoreToTimestamp), + DryRun: restoreFromBackupOptions.DryRun, } if restoreFromBackupOptions.BackupTimestamp != "" { @@ -255,10 +281,15 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { func init() { Backup.Flags().BoolVar(&backupOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") Backup.Flags().Uint64Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + Backup.Flags().StringVar(&backupOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + + Backup.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(Backup) BackupShard.Flags().BoolVar(&backupShardOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") BackupShard.Flags().Uint64Var(&backupShardOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + BackupShard.Flags().StringVar(&backupShardOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + BackupShard.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(BackupShard) GetBackups.Flags().Uint32VarP(&getBackupsOptions.Limit, "limit", "l", 0, "Retrieve only the most recent N backups.") @@ -268,5 +299,8 @@ func init() { Root.AddCommand(RemoveBackup) RestoreFromBackup.Flags().StringVarP(&restoreFromBackupOptions.BackupTimestamp, "backup-timestamp", "t", "", "Use the backup taken at, or closest before, this timestamp. Omit to use the latest backup. Timestamp format is \"YYYY-mm-DD.HHMMSS\".") + RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToPos, "restore-to-pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups") + RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToTimestamp, "restore-to-timestamp", "", "Run a point in time recovery that restores up to, and excluding, given timestamp in RFC3339 format (`2006-01-02T15:04:05Z07:00`). This will attempt to use one full backup followed by zero or more incremental backups") + RestoreFromBackup.Flags().BoolVar(&restoreFromBackupOptions.DryRun, "dry-run", false, "Only validate restore steps, do not actually restore data") Root.AddCommand(RestoreFromBackup) } diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index d952168f909..420c274ddd5 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -19,12 +19,17 @@ package command import ( "errors" "fmt" + "strings" "time" "github.com/spf13/cobra" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/cmd/vtctldclient/cli" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -35,7 +40,7 @@ import ( var ( // CreateKeyspace makes a CreateKeyspace gRPC call to a vtctld. CreateKeyspace = &cobra.Command{ - Use: "CreateKeyspace [--force|-f] [--type KEYSPACE_TYPE] [--base-keyspace KEYSPACE --snapshot-timestamp TIME] [--served-from DB_TYPE:KEYSPACE ...] [--durability-policy ]", + Use: "CreateKeyspace [--force|-f] [--type KEYSPACE_TYPE] [--base-keyspace KEYSPACE --snapshot-timestamp TIME] [--served-from DB_TYPE:KEYSPACE ...] [--durability-policy ] [--sidecar-db-name ]", Short: "Creates the specified keyspace in the topology.", Long: `Creates the specified keyspace in the topology. @@ -136,6 +141,7 @@ var createKeyspaceOptions = struct { BaseKeyspace string SnapshotTimestamp string DurabilityPolicy string + SidecarDBName string }{ KeyspaceType: cli.KeyspaceTypeFlag(topodatapb.KeyspaceType_NORMAL), } @@ -172,7 +178,16 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { return fmt.Errorf("--snapshot-time cannot be in the future; snapshot = %v, now = %v", t, now) } - snapshotTime = logutil.TimeToProto(t) + snapshotTime = protoutil.TimeToProto(t) + } + + createKeyspaceOptions.SidecarDBName = strings.TrimSpace(createKeyspaceOptions.SidecarDBName) + if createKeyspaceOptions.SidecarDBName == "" { + return errors.New("--sidecar-db-name cannot be empty when creating a keyspace") + } + if len(createKeyspaceOptions.SidecarDBName) > mysql.MaxIdentifierLength { + return sqlerror.NewSQLError(sqlerror.ERTooLongIdent, sqlerror.SSDataTooLong, "--sidecar-db-name identifier value of %q is too long (%d chars), max length for database identifiers is %d characters", + createKeyspaceOptions.SidecarDBName, len(createKeyspaceOptions.SidecarDBName), mysql.MaxIdentifierLength) } cli.FinishedParsing(cmd) @@ -185,6 +200,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { BaseKeyspace: createKeyspaceOptions.BaseKeyspace, SnapshotTime: snapshotTime, DurabilityPolicy: createKeyspaceOptions.DurabilityPolicy, + SidecarDbName: createKeyspaceOptions.SidecarDBName, } for n, v := range createKeyspaceOptions.ServedFromsMap.StringMapValue { @@ -411,6 +427,7 @@ func init() { CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") Root.AddCommand(CreateKeyspace) DeleteKeyspace.Flags().BoolVarP(&deleteKeyspaceOptions.Recursive, "recursive", "r", false, "Recursively delete all shards in the keyspace, and all tablets in those shards.") diff --git a/go/cmd/vtctldclient/command/onlineddl.go b/go/cmd/vtctldclient/command/onlineddl.go new file mode 100644 index 00000000000..660f41f60b3 --- /dev/null +++ b/go/cmd/vtctldclient/command/onlineddl.go @@ -0,0 +1,404 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package command + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const ( + AllMigrationsIndicator = "all" +) + +var ( + OnlineDDL = &cobra.Command{ + Use: "OnlineDDL [args]", + Short: "Operates on online DDL (schema migrations).", + DisableFlagsInUseLine: true, + Args: cobra.MinimumNArgs(2), + } + OnlineDDLCancel = &cobra.Command{ + Use: "cancel ", + Short: "cancel one or all migrations, terminating any running ones as needed.", + Example: "OnlineDDL cancel test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLCancel, + } + OnlineDDLCleanup = &cobra.Command{ + Use: "cleanup ", + Short: "Mark a given schema migration ready for artifact cleanup.", + Example: "OnlineDDL cleanup test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLCleanup, + } + OnlineDDLComplete = &cobra.Command{ + Use: "complete ", + Short: "complete one or all migrations executed with --postpone-completion", + Example: "OnlineDDL complete test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLComplete, + } + OnlineDDLLaunch = &cobra.Command{ + Use: "launch ", + Short: "launch one or all migrations executed with --postpone-launch", + Example: "OnlineDDL launch test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLLaunch, + } + OnlineDDLRetry = &cobra.Command{ + Use: "retry ", + Short: "Mark a given schema migration for retry.", + Example: "vtctl OnlineDDL retry test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLRetry, + } + OnlineDDLThrottle = &cobra.Command{ + Use: "throttle ", + Short: "Throttles one or all migrations", + Example: "OnlineDDL throttle all", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLThrottle, + } + OnlineDDLUnthrottle = &cobra.Command{ + Use: "unthrottle ", + Short: "Unthrottles one or all migrations", + Example: "OnlineDDL unthrottle all", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLUnthrottle, + } + OnlineDDLShow = &cobra.Command{ + Use: "show", + Short: "Display information about online DDL operations.", + Example: `OnlineDDL show test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90 +OnlineDDL show test_keyspace all +OnlineDDL show --order descending test_keyspace all +OnlineDDL show --limit 10 test_keyspace all +OnlineDDL show --skip 5 --limit 10 test_keyspace all +OnlineDDL show test_keyspace running +OnlineDDL show test_keyspace complete +OnlineDDL show test_keyspace failed`, + DisableFlagsInUseLine: true, + Args: cobra.RangeArgs(1, 2), + RunE: commandOnlineDDLShow, + } +) + +// analyzeOnlineDDLCommandWithUuidOrAllArgument is a general helper function for OnlineDDL commands that +// accept either a valid UUID or the "all" argument. +func analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd *cobra.Command) (keyspace, uuid string, err error) { + keyspace = cmd.Flags().Arg(0) + uuid = cmd.Flags().Arg(1) + + switch { + case strings.ToLower(uuid) == AllMigrationsIndicator: + case schema.IsOnlineDDLUUID(uuid): + default: + return "", "", fmt.Errorf("argument must be 'all' or a valid UUID. Got '%s'", uuid) + } + return keyspace, uuid, nil +} + +func commandOnlineDDLCancel(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.CancelSchemaMigration(commandCtx, &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLCleanup(cmd *cobra.Command, args []string) error { + keyspace := cmd.Flags().Arg(0) + uuid := cmd.Flags().Arg(1) + if !schema.IsOnlineDDLUUID(uuid) { + return fmt.Errorf("%s is not a valid UUID", uuid) + } + + cli.FinishedParsing(cmd) + + resp, err := client.CleanupSchemaMigration(commandCtx, &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLComplete(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.CompleteSchemaMigration(commandCtx, &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLLaunch(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.LaunchSchemaMigration(commandCtx, &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +func commandOnlineDDLRetry(cmd *cobra.Command, args []string) error { + keyspace := cmd.Flags().Arg(0) + uuid := cmd.Flags().Arg(1) + if !schema.IsOnlineDDLUUID(uuid) { + return fmt.Errorf("%s is not a valid UUID", uuid) + } + + cli.FinishedParsing(cmd) + + resp, err := client.RetrySchemaMigration(commandCtx, &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +// throttleCommandHelper is a helper function that implements the logic for both +// commandOnlineDDLThrottle and commandOnlineDDLUnthrottle ; the only difference between the two +// is the ThrottledApp *rule* sent in UpdateThrottlerConfigRequest. +// input: `throttleType`: true stands for "throttle", `false` stands for "unthrottle" +func throttleCommandHelper(cmd *cobra.Command, throttleType bool) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + var rule topodatapb.ThrottledAppRule + if throttleType { + rule.Ratio = throttle.DefaultThrottleRatio + rule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttle.DefaultAppThrottleDuration)) + } else { + rule.Ratio = 0 + rule.ExpiresAt = protoutil.TimeToProto(time.Now()) + } + + if strings.ToLower(uuid) == AllMigrationsIndicator { + rule.Name = throttlerapp.OnlineDDLName.String() + } else { + rule.Name = uuid + } + + updateThrottlerConfigOptions := vtctldatapb.UpdateThrottlerConfigRequest{ + Keyspace: keyspace, + ThrottledApp: &rule, + } + resp, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + +// commandOnlineDDLThrottle throttles one or multiple migrations. +// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets. +// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen. +func commandOnlineDDLThrottle(cmd *cobra.Command, args []string) error { + return throttleCommandHelper(cmd, true) +} + +// commandOnlineDDLUnthrottle unthrottles one or multiple migrations. +// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets. +// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen. +func commandOnlineDDLUnthrottle(cmd *cobra.Command, args []string) error { + return throttleCommandHelper(cmd, false) +} + +var onlineDDLShowArgs = struct { + JSON bool + OrderStr string + Limit uint64 + Skip uint64 +}{ + OrderStr: "ascending", +} + +func commandOnlineDDLShow(cmd *cobra.Command, args []string) error { + var order vtctldatapb.QueryOrdering + switch strings.ToLower(onlineDDLShowArgs.OrderStr) { + case "": + order = vtctldatapb.QueryOrdering_NONE + case "asc", "ascending": + order = vtctldatapb.QueryOrdering_ASCENDING + case "desc", "descending": + order = vtctldatapb.QueryOrdering_DESCENDING + default: + return fmt.Errorf("invalid ordering %s (choices are 'asc', 'ascending', 'desc', 'descending')", onlineDDLShowArgs.OrderStr) + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: cmd.Flags().Arg(0), + Order: order, + Limit: onlineDDLShowArgs.Limit, + Skip: onlineDDLShowArgs.Skip, + } + + switch arg := cmd.Flags().Arg(1); arg { + case "", "all": + case "recent": + req.Recent = protoutil.DurationToProto(7 * 24 * time.Hour) + default: + if status, err := schematools.ParseSchemaMigrationStatus(arg); err == nil { + // Argument is a status name. + req.Status = status + } else if schema.IsOnlineDDLUUID(arg) { + req.Uuid = arg + } else { + req.MigrationContext = arg + } + } + + resp, err := client.GetSchemaMigrations(commandCtx, req) + if err != nil { + return err + } + + switch { + case onlineDDLShowArgs.JSON: + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + fmt.Printf("%s\n", data) + default: + res, err := sqltypes.MarshalResult(schematools.MarshallableSchemaMigrations(resp.Migrations)) + if err != nil { + return err + } + + cli.WriteQueryResultTable(os.Stdout, res) + } + return nil +} + +func init() { + OnlineDDL.AddCommand(OnlineDDLCancel) + OnlineDDL.AddCommand(OnlineDDLCleanup) + OnlineDDL.AddCommand(OnlineDDLComplete) + OnlineDDL.AddCommand(OnlineDDLLaunch) + OnlineDDL.AddCommand(OnlineDDLRetry) + OnlineDDL.AddCommand(OnlineDDLThrottle) + OnlineDDL.AddCommand(OnlineDDLUnthrottle) + + OnlineDDLShow.Flags().BoolVar(&onlineDDLShowArgs.JSON, "json", false, "Output JSON instead of human-readable table.") + OnlineDDLShow.Flags().StringVar(&onlineDDLShowArgs.OrderStr, "order", "asc", "Sort the results by `id` property of the Schema migration.") + OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Limit, "limit", 0, "Limit number of rows returned in output.") + OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Skip, "skip", 0, "Skip specified number of rows returned in output.") + + OnlineDDL.AddCommand(OnlineDDLShow) + Root.AddCommand(OnlineDDL) +} diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index f755c051bee..5c83016701a 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -80,7 +80,7 @@ EmergencyReparentShard should be used instead. Short: "Updates the topology record for the tablet's shard to acknowledge that an external tool made this tablet the primary.", Long: `Updates the topology record for the tablet's shard to acknowledge that an external tool made this tablet the primary. -See the Reparenting guide for more information: https://vitess.io/docs/user-guides/reparenting/#external-reparenting. +See the Reparenting guide for more information: https://vitess.io/docs/user-guides/configuration-advanced/reparenting/#external-reparenting. `, DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -94,6 +94,7 @@ var emergencyReparentShardOptions = struct { NewPrimaryAliasStr string IgnoreReplicaAliasStrList []string PreventCrossCellPromotion bool + WaitForAllTablets bool }{} func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { @@ -132,6 +133,7 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error { IgnoreReplicas: ignoreReplicaAliases, WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout), PreventCrossCellPromotion: emergencyReparentShardOptions.PreventCrossCellPromotion, + WaitForAllTablets: emergencyReparentShardOptions.WaitForAllTablets, }) if err != nil { return err @@ -281,6 +283,7 @@ func init() { EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.") EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.") EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.PreventCrossCellPromotion, "prevent-cross-cell-promotion", false, "Only promotes a new primary from the same cell as the previous primary.") + EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.WaitForAllTablets, "wait-for-all-tablets", false, "Should ERS wait for all the tablets to respond. Useful when all the tablets are reachable.") EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.") Root.AddCommand(EmergencyReparentShard) diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go index 9e59276993c..7a9f59ad3a4 100644 --- a/go/cmd/vtctldclient/command/root.go +++ b/go/cmd/vtctldclient/command/root.go @@ -26,6 +26,11 @@ import ( "github.com/spf13/cobra" + // These imports ensure init()s within them get called and they register their commands/subcommands. + vreplcommon "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/movetables" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/reshard" + _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/workflow" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" @@ -36,8 +41,9 @@ var ( // VtctldClientProtocol is the protocol to use when creating the vtctldclient.VtctldClient. VtctldClientProtocol = "grpc" - client vtctldclient.VtctldClient - traceCloser io.Closer + client vtctldclient.VtctldClient + traceCloser io.Closer + commandCtx context.Context commandCancel func() @@ -59,6 +65,8 @@ var ( ctx = context.Background() } commandCtx, commandCancel = context.WithTimeout(ctx, actionTimeout) + vreplcommon.SetClient(client) + vreplcommon.SetCommandCtx(commandCtx) return err }, // Similarly, PersistentPostRun cleans up the resources spawned by @@ -132,4 +140,5 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) func init() { Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection (required)") Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command") + vreplcommon.RegisterCommands(Root) } diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index a2d7843756d..063bd10b8ba 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -31,7 +31,7 @@ import ( "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/wrangler" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -40,7 +40,7 @@ import ( var ( // ApplySchema makes an ApplySchema gRPC call to a vtctld. ApplySchema = &cobra.Command{ - Use: "ApplySchema [--allow-long-unavailability] [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ", + Use: "ApplySchema [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ", Short: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.", Long: `Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. @@ -103,6 +103,7 @@ var applySchemaOptions = struct { WaitReplicasTimeout time.Duration SkipPreflight bool CallerID string + BatchSize int64 }{} func commandApplySchema(cmd *cobra.Command, args []string) error { @@ -137,15 +138,15 @@ func commandApplySchema(cmd *cobra.Command, args []string) error { ks := cmd.Flags().Arg(0) resp, err := client.ApplySchema(commandCtx, &vtctldatapb.ApplySchemaRequest{ - Keyspace: ks, - AllowLongUnavailability: applySchemaOptions.AllowLongUnavailability, - DdlStrategy: applySchemaOptions.DDLStrategy, - Sql: parts, - SkipPreflight: true, - UuidList: applySchemaOptions.UUIDList, - MigrationContext: applySchemaOptions.MigrationContext, - WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout), - CallerId: cid, + Keyspace: ks, + DdlStrategy: applySchemaOptions.DDLStrategy, + Sql: parts, + SkipPreflight: true, + UuidList: applySchemaOptions.UUIDList, + MigrationContext: applySchemaOptions.MigrationContext, + WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout), + CallerId: cid, + BatchSize: applySchemaOptions.BatchSize, }) if err != nil { return err @@ -286,15 +287,18 @@ func commandReloadSchemaShard(cmd *cobra.Command, args []string) error { } func init() { + ApplySchema.Flags().Bool("allow-long-unavailability", false, "Deprecated and has no effect.") + ApplySchema.Flags().MarkDeprecated("--allow-long-unavailability", "") + ApplySchema.Flags().Bool("skip-preflight", false, "Deprecated and has no effect.") ApplySchema.Flags().MarkDeprecated("--skip-preflight", "Deprecated. Assumed to be always 'true'") - ApplySchema.Flags().BoolVar(&applySchemaOptions.AllowLongUnavailability, "allow-long-unavailability", false, "Allow large schema changes which incur a longer unavailability of the database.") ApplySchema.Flags().StringVar(&applySchemaOptions.DDLStrategy, "ddl-strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'.") ApplySchema.Flags().StringSliceVar(&applySchemaOptions.UUIDList, "uuid", nil, "Optional, comma-delimited, repeatable, explicit UUIDs for migration. If given, must match number of DDL changes.") ApplySchema.Flags().StringVar(&applySchemaOptions.MigrationContext, "migration-context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.") - ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", wrangler.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.") + ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.") ApplySchema.Flags().StringVar(&applySchemaOptions.CallerID, "caller-id", "", "Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used).") ApplySchema.Flags().StringArrayVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.") ApplySchema.Flags().StringVar(&applySchemaOptions.SQLFile, "sql-file", "", "Path to a file containing semicolon-delimited SQL commands to apply. Exactly one of --sql|--sql-file is required.") + ApplySchema.Flags().Int64Var(&applySchemaOptions.BatchSize, "batch-size", 0, "How many queries to batch together. Only applicable when all queries are CREATE TABLE|VIEW") Root.AddCommand(ApplySchema) diff --git a/go/cmd/vtctldclient/command/tablets.go b/go/cmd/vtctldclient/command/tablets.go index e17d911ba0d..3e1c1114133 100644 --- a/go/cmd/vtctldclient/command/tablets.go +++ b/go/cmd/vtctldclient/command/tablets.go @@ -99,22 +99,26 @@ Note: hook names may not contain slash (/) characters. } // GetTablets makes a GetTablets gRPC call to a vtctld. GetTablets = &cobra.Command{ - Use: "GetTablets [--strict] [{--cell $c1 [--cell $c2 ...], --keyspace $ks [--shard $shard], --tablet-alias $alias}]", + Use: "GetTablets [--strict] [{--cell $c1 [--cell $c2 ...] [--tablet-type $t1] [--keyspace $ks [--shard $shard]], --tablet-alias $alias}]", Short: "Looks up tablets according to filter criteria.", - Long: `Looks up tablets according to the filter criteria. + Long: fmt.Sprintf(`Looks up tablets according to the filter criteria. -If --tablet-alias is passed, none of the other filters (keyspace, shard, cell) may -be passed, and tablets are looked up by tablet alias only. +If --tablet-alias is passed, none of the other filters (tablet-type, keyspace, +shard, cell) may be passed, and tablets are looked up by tablet alias only. If --keyspace is passed, then all tablets in the keyspace are retrieved. The --shard flag may also be passed to further narrow the set of tablets to that . Passing --shard without also passing --keyspace will fail. +If --tablet-type is passed, only tablets of the specified type will be +returned. Valid tablet types are: +"%s". + Passing --cell limits the set of tablets to those in the specified cells. The --cell flag accepts a CSV argument (e.g. --cell "c1,c2") and may be repeated (e.g. --cell "c1" --cell "c2"). -Valid output formats are "awk" and "json".`, +Valid output formats are "awk" and "json".`, strings.Join(topoproto.MakeUniqueStringTypeList(topoproto.AllTabletTypes), "\", \"")), DisableFlagsInUseLine: true, Args: cobra.NoArgs, RunE: commandGetTablets, @@ -379,9 +383,10 @@ func commandGetTablet(cmd *cobra.Command, args []string) error { } var getTabletsOptions = struct { - Cells []string - Keyspace string - Shard string + Cells []string + TabletType topodatapb.TabletType + Keyspace string + Shard string TabletAliasStrings []string @@ -408,6 +413,8 @@ func commandGetTablets(cmd *cobra.Command, args []string) error { return fmt.Errorf("--shard (= %s) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.Shard, getTabletsOptions.TabletAliasStrings) case len(getTabletsOptions.Cells) > 0: return fmt.Errorf("--cell (= %v) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.Cells, getTabletsOptions.TabletAliasStrings) + case cmd.Flags().Lookup("tablet-type").Changed: + return fmt.Errorf("--tablet-type (= %s) cannot be passed when using --tablet-alias (= %v)", getTabletsOptions.TabletType, getTabletsOptions.TabletAliasStrings) } var err error @@ -426,6 +433,7 @@ func commandGetTablets(cmd *cobra.Command, args []string) error { resp, err := client.GetTablets(commandCtx, &vtctldatapb.GetTabletsRequest{ TabletAliases: aliases, Cells: getTabletsOptions.Cells, + TabletType: getTabletsOptions.TabletType, Keyspace: getTabletsOptions.Keyspace, Shard: getTabletsOptions.Shard, Strict: getTabletsOptions.Strict, @@ -634,6 +642,7 @@ func init() { GetTablets.Flags().StringSliceVarP(&getTabletsOptions.TabletAliasStrings, "tablet-alias", "t", nil, "List of tablet aliases to filter by.") GetTablets.Flags().StringSliceVarP(&getTabletsOptions.Cells, "cell", "c", nil, "List of cells to filter tablets by.") + GetTablets.Flags().Var((*topoproto.TabletTypeFlag)(&getTabletsOptions.TabletType), "tablet-type", "Tablet type to filter by (e.g. primary or replica).") GetTablets.Flags().StringVarP(&getTabletsOptions.Keyspace, "keyspace", "k", "", "Keyspace to filter tablets by.") GetTablets.Flags().StringVarP(&getTabletsOptions.Shard, "shard", "s", "", "Shard to filter tablets by.") GetTablets.Flags().StringVar(&getTabletsOptions.Format, "format", "awk", "Output format to use; valid choices are (json, awk).") diff --git a/go/cmd/vtctldclient/command/throttler.go b/go/cmd/vtctldclient/command/throttler.go index b0dbd663013..9783f76720d 100644 --- a/go/cmd/vtctldclient/command/throttler.go +++ b/go/cmd/vtctldclient/command/throttler.go @@ -17,17 +17,23 @@ limitations under the License. package command import ( + "fmt" + "time" + "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" ) var ( // UpdateThrottlerConfig makes a UpdateThrottlerConfig gRPC call to a vtctld. UpdateThrottlerConfig = &cobra.Command{ - Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ", + Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] [--throttle-app|unthrottle-app=] [--throttle-app-ratio=] [--throttle-app-duration=] ", Short: "Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -35,14 +41,32 @@ var ( } ) -var updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest +var ( + updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest + throttledAppRule topodatapb.ThrottledAppRule + unthrottledAppRule topodatapb.ThrottledAppRule + throttledAppDuration time.Duration +) func commandUpdateThrottlerConfig(cmd *cobra.Command, args []string) error { keyspace := cmd.Flags().Arg(0) cli.FinishedParsing(cmd) + if throttledAppRule.Name != "" && unthrottledAppRule.Name != "" { + return fmt.Errorf("throttle-app and unthrottle-app are mutually exclusive") + } + updateThrottlerConfigOptions.CustomQuerySet = cmd.Flags().Changed("custom-query") updateThrottlerConfigOptions.Keyspace = keyspace + + if throttledAppRule.Name != "" { + throttledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttledAppDuration)) + updateThrottlerConfigOptions.ThrottledApp = &throttledAppRule + } else if unthrottledAppRule.Name != "" { + unthrottledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now()) + updateThrottlerConfigOptions.ThrottledApp = &unthrottledAppRule + } + _, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions) if err != nil { return err @@ -57,5 +81,12 @@ func init() { UpdateThrottlerConfig.Flags().StringVar(&updateThrottlerConfigOptions.CustomQuery, "custom-query", "", "custom throttler check query") UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckSelf, "check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called") UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckShard, "check-as-check-shard", false, "use standard behavior for /throttler/check requests") + + UpdateThrottlerConfig.Flags().StringVar(&unthrottledAppRule.Name, "unthrottle-app", "", "an app name to unthrottle") + UpdateThrottlerConfig.Flags().StringVar(&throttledAppRule.Name, "throttle-app", "", "an app name to throttle") + UpdateThrottlerConfig.Flags().Float64Var(&throttledAppRule.Ratio, "throttle-app-ratio", throttle.DefaultThrottleRatio, "ratio to throttle app (app specififed in --throttled-app)") + UpdateThrottlerConfig.Flags().DurationVar(&throttledAppDuration, "throttle-app-duration", throttle.DefaultAppThrottleDuration, "duration after which throttled app rule expires (app specififed in --throttled-app)") + UpdateThrottlerConfig.Flags().BoolVar(&throttledAppRule.Exempt, "throttle-app-exempt", throttledAppRule.Exempt, "exempt this app from being at all throttled. WARNING: use with extreme care, as this is likely to push metrics beyond the throttler's threshold, and starve other apps") + Root.AddCommand(UpdateThrottlerConfig) } diff --git a/go/cmd/vtctldclient/command/vreplication/common/cancel.go b/go/cmd/vtctldclient/command/vreplication/common/cancel.go new file mode 100644 index 00000000000..9187d1ca4fa --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/cancel.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var cancelOptions = struct { + KeepData bool + KeepRoutingRules bool +}{} + +func GetCancelCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "cancel", + Short: fmt.Sprintf("Cancel a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer cancel`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Cancel"}, + Args: cobra.NoArgs, + RunE: commandCancel, + } + return cmd +} + +func commandCancel(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + KeepData: cancelOptions.KeepData, + KeepRoutingRules: cancelOptions.KeepRoutingRules, + } + resp, err := GetClient().WorkflowDelete(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + output, err = cli.MarshalJSONCompact(resp) + if err != nil { + return err + } + } else { + output = []byte(resp.Summary + "\n") + } + fmt.Printf("%s\n", output) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/complete.go b/go/cmd/vtctldclient/command/vreplication/common/complete.go new file mode 100644 index 00000000000..027503d8781 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/complete.go @@ -0,0 +1,74 @@ +package common + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var CompleteOptions = struct { + KeepData bool + KeepRoutingRules bool + RenameTables bool + DryRun bool +}{} + +func GetCompleteCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "complete", + Short: "Complete a MoveTables VReplication workflow.", + Example: `vtctldclient --server localhost:15999 movetables --workflow commerce2customer --target-keyspace customer complete`, + DisableFlagsInUseLine: true, + Aliases: []string{"Complete"}, + Args: cobra.NoArgs, + RunE: commandComplete, + } + return cmd +} + +func commandComplete(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MoveTablesCompleteRequest{ + Workflow: BaseOptions.Workflow, + TargetKeyspace: BaseOptions.TargetKeyspace, + KeepData: CompleteOptions.KeepData, + KeepRoutingRules: CompleteOptions.KeepRoutingRules, + RenameTables: CompleteOptions.RenameTables, + DryRun: CompleteOptions.DryRun, + } + resp, err := GetClient().MoveTablesComplete(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + output, err = cli.MarshalJSONCompact(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(resp.Summary + "\n") + if len(resp.DryRunResults) > 0 { + tout.WriteString("\n") + for _, r := range resp.DryRunResults { + tout.WriteString(r + "\n") + } + } + output = tout.Bytes() + } + fmt.Printf("%s\n", output) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/show.go b/go/cmd/vtctldclient/command/vreplication/common/show.go new file mode 100644 index 00000000000..e650aa24509 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/show.go @@ -0,0 +1,62 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func GetShowCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "show", + Short: fmt.Sprintf("Show the details for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer show`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandShow, + } + return cmd +} + +func commandShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetWorkflowsRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + } + resp, err := GetClient().GetWorkflows(GetCommandCtx(), req) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/status.go b/go/cmd/vtctldclient/command/vreplication/common/status.go new file mode 100644 index 00000000000..d4eb9b0f344 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/status.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func GetStatusCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: fmt.Sprintf("Show the current status for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer status`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Status", "progress", "Progress"}, + Args: cobra.NoArgs, + RunE: commandStatus, + } + return cmd +} + +func commandStatus(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowStatusRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + } + resp, err := GetClient().WorkflowStatus(GetCommandCtx(), req) + if err != nil { + return err + } + + if err = OutputStatusResponse(resp, "json"); err != nil { + return err + } + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go new file mode 100644 index 00000000000..0ba3ab595bb --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bytes" + "fmt" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vtctl/workflow" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func GetSwitchTrafficCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "switchtraffic", + Short: fmt.Sprintf("Switch traffic for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer switchtraffic --tablet-types "replica,rdonly"`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"SwitchTraffic"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + SwitchTrafficOptions.Direction = workflow.DirectionForward + if !cmd.Flags().Lookup("tablet-types").Changed { + // We switch traffic for all tablet types if none are provided. + SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + } + return nil + }, + RunE: commandSwitchTraffic, + } + return cmd +} + +func GetReverseTrafficCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "reversetraffic", + Short: fmt.Sprintf("Reverse traffic for a %s VReplication workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer reversetraffic`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"ReverseTraffic"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + SwitchTrafficOptions.Direction = workflow.DirectionBackward + if !cmd.Flags().Lookup("tablet-types").Changed { + // We switch traffic for all tablet types if none are provided. + SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + } + return nil + }, + RunE: commandSwitchTraffic, + } + return cmd +} + +func commandSwitchTraffic(cmd *cobra.Command, args []string) error { + format, err := GetOutputFormat(cmd) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: BaseOptions.TargetKeyspace, + Workflow: BaseOptions.Workflow, + TabletTypes: SwitchTrafficOptions.TabletTypes, + MaxReplicationLagAllowed: protoutil.DurationToProto(SwitchTrafficOptions.MaxReplicationLagAllowed), + Timeout: protoutil.DurationToProto(SwitchTrafficOptions.Timeout), + DryRun: SwitchTrafficOptions.DryRun, + EnableReverseReplication: SwitchTrafficOptions.EnableReverseReplication, + InitializeTargetSequences: SwitchTrafficOptions.InitializeTargetSequences, + Direction: int32(SwitchTrafficOptions.Direction), + } + resp, err := GetClient().WorkflowSwitchTraffic(GetCommandCtx(), req) + if err != nil { + return err + } + + var output []byte + if format == "json" { + output, err = cli.MarshalJSONCompact(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(resp.Summary + "\n\n") + if req.DryRun { + for _, line := range resp.DryRunResults { + tout.WriteString(line + "\n") + } + } else { + tout.WriteString(fmt.Sprintf("Start State: %s\n", resp.StartState)) + tout.WriteString(fmt.Sprintf("Current State: %s\n", resp.CurrentState)) + } + output = tout.Bytes() + } + fmt.Printf("%s\n", output) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/update.go b/go/cmd/vtctldclient/command/vreplication/common/update.go new file mode 100644 index 00000000000..21fb6281f62 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/update.go @@ -0,0 +1,170 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "fmt" + "sort" + "strings" + + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/textutil" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func bridgeToWorkflow(cmd *cobra.Command, args []string) { + workflowUpdateOptions.Workflow = BaseOptions.Workflow + workflowOptions.Keyspace = BaseOptions.TargetKeyspace +} + +var ( + workflowOptions = struct { + Keyspace string + }{} + + workflowUpdateOptions = struct { + Workflow string + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + }{} +) + +func GetStartCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: fmt.Sprintf("Start a %s workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer start`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Start"}, + Args: cobra.NoArgs, + PreRun: bridgeToWorkflow, + RunE: commandUpdateState, + } + return cmd +} + +func GetStopCommand(opts *SubCommandsOpts) *cobra.Command { + cmd := &cobra.Command{ + Use: "stop", + Short: fmt.Sprintf("Stop a %s workflow.", opts.SubCommand), + Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer stop`, opts.SubCommand, opts.Workflow), + DisableFlagsInUseLine: true, + Aliases: []string{"Stop"}, + Args: cobra.NoArgs, + PreRun: bridgeToWorkflow, + RunE: commandUpdateState, + } + return cmd +} + +func getWorkflow(keyspace, workflow string) (*vtctldatapb.GetWorkflowsResponse, error) { + resp, err := GetClient().GetWorkflows(GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{ + Keyspace: keyspace, + Workflow: workflow, + }) + if err != nil { + return &vtctldatapb.GetWorkflowsResponse{}, err + } + return resp, nil +} + +// CanRestartWorkflow validates that, for an atomic copy workflow, none of the streams are still in the copy phase. +// Since we copy all tables in a single snapshot, we cannot restart a workflow which broke before all tables were copied. +func CanRestartWorkflow(keyspace, workflow string) error { + resp, err := getWorkflow(keyspace, workflow) + if err != nil { + return err + } + if len(resp.Workflows) == 0 { + return fmt.Errorf("workflow %s not found", workflow) + } + if len(resp.Workflows) > 1 { + return vterrors.Errorf(vtrpc.Code_INTERNAL, "multiple results found for workflow %s", workflow) + } + wf := resp.Workflows[0] + if wf.WorkflowSubType != binlogdatapb.VReplicationWorkflowSubType_AtomicCopy.String() { + return nil + } + // If we're here, we have an atomic copy workflow. + for _, shardStream := range wf.ShardStreams { + for _, stream := range shardStream.Streams { + if len(stream.CopyStates) > 0 { + return fmt.Errorf("stream %d is still in the copy phase: can only start workflow %s if all streams have completed the copy phase", stream.Id, workflow) + } + } + } + return nil +} + +func commandUpdateState(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + var state binlogdatapb.VReplicationWorkflowState + switch strings.ToLower(cmd.Name()) { + case "start": + if err := CanRestartWorkflow(workflowUpdateOptions.Workflow, workflowOptions.Keyspace); err != nil { + return err + } + state = binlogdatapb.VReplicationWorkflowState_Running + case "stop": + state = binlogdatapb.VReplicationWorkflowState_Stopped + default: + return fmt.Errorf("invalid workstate: %s", args[0]) + } + + // The only thing we're updating is the state. + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: workflowOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflowUpdateOptions.Workflow, + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + State: state, + }, + } + + resp, err := GetClient().WorkflowUpdate(GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go new file mode 100644 index 00000000000..2bf107ce23c --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go @@ -0,0 +1,213 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtctl/workflow" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + client vtctldclient.VtctldClient + commandCtx context.Context + // The generic default for most commands. + tabletTypesDefault = []topodatapb.TabletType{ + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_PRIMARY, + } + onDDLDefault = binlogdatapb.OnDDLAction_IGNORE.String() + MaxReplicationLagDefault = 30 * time.Second + TimeoutDefault = 30 * time.Second + + BaseOptions = struct { + Workflow string + TargetKeyspace string + Format string + }{} + + CreateOptions = struct { + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + DeferSecondaryKeys bool + AutoStart bool + StopAfterCopy bool + }{} +) + +var commandHandlers = make(map[string]func(cmd *cobra.Command)) + +func RegisterCommandHandler(command string, handler func(cmd *cobra.Command)) { + commandHandlers[command] = handler +} + +func RegisterCommands(root *cobra.Command) { + for _, handler := range commandHandlers { + handler(root) + } +} + +type SubCommandsOpts struct { + SubCommand string + Workflow string // used to specify an example workflow name for the Examples section of the help output. +} + +func SetClient(c vtctldclient.VtctldClient) { + client = c +} + +func GetClient() vtctldclient.VtctldClient { + return client +} + +func SetCommandCtx(ctx context.Context) { + commandCtx = ctx +} + +func GetCommandCtx() context.Context { + return commandCtx +} + +func ParseCells(cmd *cobra.Command) { + if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s) + for i, cell := range CreateOptions.Cells { // Which only means trimming whitespace + CreateOptions.Cells[i] = strings.TrimSpace(cell) + } + } +} + +func ParseTabletTypes(cmd *cobra.Command) { + if !cmd.Flags().Lookup("tablet-types").Changed { + CreateOptions.TabletTypes = tabletTypesDefault + } +} + +func validateOnDDL(cmd *cobra.Command) error { + if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(CreateOptions.OnDDL)]; !ok { + return fmt.Errorf("invalid on-ddl value: %s", CreateOptions.OnDDL) + } + return nil +} + +func ParseAndValidateCreateOptions(cmd *cobra.Command) error { + if err := validateOnDDL(cmd); err != nil { + return err + } + ParseCells(cmd) + ParseTabletTypes(cmd) + return nil +} + +func GetOutputFormat(cmd *cobra.Command) (string, error) { + format := strings.ToLower(strings.TrimSpace(BaseOptions.Format)) + switch format { + case "text", "json": + return format, nil + default: + return "", fmt.Errorf("invalid output format, got %s", BaseOptions.Format) + } +} + +func GetTabletSelectionPreference(cmd *cobra.Command) tabletmanagerdatapb.TabletSelectionPreference { + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if CreateOptions.TabletTypesInPreferenceOrder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + return tsp +} + +func OutputStatusResponse(resp *vtctldatapb.WorkflowStatusResponse, format string) error { + var output []byte + var err error + if format == "json" { + output, err = cli.MarshalJSON(resp) + if err != nil { + return err + } + } else { + tout := bytes.Buffer{} + tout.WriteString(fmt.Sprintf("The following vreplication streams exist for workflow %s.%s:\n\n", + BaseOptions.TargetKeyspace, BaseOptions.Workflow)) + for _, shardstreams := range resp.ShardStreams { + for _, shardstream := range shardstreams.Streams { + tablet := fmt.Sprintf("%s-%d", shardstream.Tablet.Cell, shardstream.Tablet.Uid) + tout.WriteString(fmt.Sprintf("id=%d on %s/%s: Status: %s. %s.\n", + shardstream.Id, BaseOptions.TargetKeyspace, tablet, shardstream.Status, shardstream.Info)) + } + } + output = tout.Bytes() + } + fmt.Printf("%s\n", output) + return nil +} + +func AddCommonFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&BaseOptions.TargetKeyspace, "target-keyspace", "", "Target keyspace for this workflow exists (required).") + cmd.MarkFlagRequired("target-keyspace") + cmd.Flags().StringVarP(&BaseOptions.Workflow, "workflow", "w", "", "The workflow you want to perform the command on (required).") + cmd.MarkFlagRequired("workflow") + cmd.Flags().StringVar(&BaseOptions.Format, "format", "text", "The format of the output; supported formats are: text,json.") +} + +func AddCommonCreateFlags(cmd *cobra.Command) { + cmd.Flags().StringSliceVarP(&CreateOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to copy table data from.") + cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&CreateOptions.TabletTypes), "tablet-types", "Source tablet types to replicate table data from (e.g. PRIMARY,REPLICA,RDONLY).") + cmd.Flags().BoolVar(&CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + cmd.Flags().StringVar(&CreateOptions.OnDDL, "on-ddl", onDDLDefault, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") + cmd.Flags().BoolVar(&CreateOptions.DeferSecondaryKeys, "defer-secondary-keys", false, "Defer secondary index creation for a table until after it has been copied.") + cmd.Flags().BoolVar(&CreateOptions.AutoStart, "auto-start", true, "Start the MoveTables workflow after creating it.") + cmd.Flags().BoolVar(&CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the MoveTables workflow after it's finished copying the existing rows and before it starts replicating changes.") +} + +var SwitchTrafficOptions = struct { + Cells []string + TabletTypes []topodatapb.TabletType + Timeout time.Duration + MaxReplicationLagAllowed time.Duration + EnableReverseReplication bool + DryRun bool + Direction workflow.TrafficSwitchDirection + InitializeTargetSequences bool +}{} + +func AddCommonSwitchTrafficFlags(cmd *cobra.Command, initializeTargetSequences bool) { + cmd.Flags().StringSliceVarP(&SwitchTrafficOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to switch traffic in.") + cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&SwitchTrafficOptions.TabletTypes), "tablet-types", "Tablet types to switch traffic for.") + cmd.Flags().DurationVar(&SwitchTrafficOptions.Timeout, "timeout", TimeoutDefault, "Specifies the maximum time to wait, in seconds, for VReplication to catch up on primary tablets. The traffic switch will be cancelled on timeout.") + cmd.Flags().DurationVar(&SwitchTrafficOptions.MaxReplicationLagAllowed, "max-replication-lag-allowed", MaxReplicationLagDefault, "Allow traffic to be switched only if VReplication lag is below this.") + cmd.Flags().BoolVar(&SwitchTrafficOptions.EnableReverseReplication, "enable-reverse-replication", true, "Setup replication going back to the original source keyspace to support rolling back the traffic cutover.") + cmd.Flags().BoolVar(&SwitchTrafficOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.") + if initializeTargetSequences { + cmd.Flags().BoolVar(&SwitchTrafficOptions.InitializeTargetSequences, "initialize-target-sequences", false, "When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.") + } +} diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/create.go b/go/cmd/vtctldclient/command/vreplication/movetables/create.go new file mode 100644 index 00000000000..17db9df55af --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/movetables/create.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package movetables + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + moveTablesCreateOptions = struct { + SourceKeyspace string + SourceShards []string + ExternalClusterName string + AllTables bool + IncludeTables []string + ExcludeTables []string + SourceTimeZone string + NoRoutingRules bool + AtomicCopy bool + }{} + + // moveTablesCreate makes a moveTablesCreate gRPC call to a vtctld. + moveTablesCreate = &cobra.Command{ + Use: "create", + Short: "Create and optionally run a moveTables VReplication workflow.", + Example: `vtctldclient --server localhost:15999 movetables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --cells zone1 --cells zone2 --tablet-types replica`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + // Either specific tables or the all tables flags are required. + if !cmd.Flags().Lookup("tables").Changed && !cmd.Flags().Lookup("all-tables").Changed { + return fmt.Errorf("tables or all-tables are required to specify which tables to move") + } + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + checkAtomicCopyOptions := func() error { + var errors []string + if !moveTablesCreateOptions.AtomicCopy { + return nil + } + if !moveTablesCreateOptions.AllTables { + errors = append(errors, "atomic copy requires --all-tables") + } + if len(moveTablesCreateOptions.IncludeTables) > 0 || len(moveTablesCreateOptions.ExcludeTables) > 0 { + errors = append(errors, "atomic copy does not support specifying tables") + } + if len(errors) > 0 { + return fmt.Errorf("found options incompatible with atomic copy: %s", strings.Join(errors, ", ")) + } + return nil + } + if err := checkAtomicCopyOptions(); err != nil { + return err + } + return nil + }, + RunE: commandMoveTablesCreate, + } +) + +func commandMoveTablesCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + req := &vtctldatapb.MoveTablesCreateRequest{ + Workflow: common.BaseOptions.Workflow, + TargetKeyspace: common.BaseOptions.TargetKeyspace, + SourceKeyspace: moveTablesCreateOptions.SourceKeyspace, + SourceShards: moveTablesCreateOptions.SourceShards, + SourceTimeZone: moveTablesCreateOptions.SourceTimeZone, + Cells: common.CreateOptions.Cells, + TabletTypes: common.CreateOptions.TabletTypes, + TabletSelectionPreference: tsp, + AllTables: moveTablesCreateOptions.AllTables, + IncludeTables: moveTablesCreateOptions.IncludeTables, + ExcludeTables: moveTablesCreateOptions.ExcludeTables, + OnDdl: common.CreateOptions.OnDDL, + DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, + AutoStart: common.CreateOptions.AutoStart, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + NoRoutingRules: moveTablesCreateOptions.NoRoutingRules, + AtomicCopy: moveTablesCreateOptions.AtomicCopy, + } + + resp, err := common.GetClient().MoveTablesCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + if err = common.OutputStatusResponse(resp, format); err != nil { + return err + } + return nil +} + +func registerCreateCommand(root *cobra.Command) { + common.AddCommonCreateFlags(moveTablesCreate) + moveTablesCreate.PersistentFlags().StringVar(&moveTablesCreateOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables are being moved from (required).") + moveTablesCreate.MarkPersistentFlagRequired("source-keyspace") + moveTablesCreate.Flags().StringSliceVar(&moveTablesCreateOptions.SourceShards, "source-shards", nil, "Source shards to copy data from when performing a partial moveTables (experimental).") + moveTablesCreate.Flags().StringVar(&moveTablesCreateOptions.SourceTimeZone, "source-time-zone", "", "Specifying this causes any DATETIME fields to be converted from the given time zone into UTC.") + moveTablesCreate.Flags().BoolVar(&moveTablesCreateOptions.AllTables, "all-tables", false, "Copy all tables from the source.") + moveTablesCreate.Flags().StringSliceVar(&moveTablesCreateOptions.IncludeTables, "tables", nil, "Source tables to copy.") + moveTablesCreate.Flags().StringSliceVar(&moveTablesCreateOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.") + moveTablesCreate.Flags().BoolVar(&moveTablesCreateOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") + moveTablesCreate.Flags().BoolVar(&moveTablesCreateOptions.AtomicCopy, "atomic-copy", false, "(EXPERIMENTAL) A single copy phase is run for all tables from the source. Use this, for example, if your source keyspace has tables which use foreign key constraints.") + moveTables.AddCommand(moveTablesCreate) +} diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go new file mode 100644 index 00000000000..7ff7924d968 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package movetables + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" +) + +var ( + // moveTables is the base command for all actions related to moveTables. + moveTables = &cobra.Command{ + Use: "MoveTables --workflow --keyspace [command] [command-flags]", + Short: "Perform commands related to moving tables from a source keyspace to a target keyspace.", + Long: `moveTables commands: Create, Show, Status, SwitchTraffic, ReverseTraffic, Stop, Start, Cancel, and Delete. +See the --help output for each command for more details.`, + DisableFlagsInUseLine: true, + Aliases: []string{"movetables"}, + Args: cobra.ExactArgs(1), + } +) + +func registerMoveTablesCommands(root *cobra.Command) { + common.AddCommonFlags(moveTables) + root.AddCommand(moveTables) + + registerCreateCommand(moveTables) + opts := &common.SubCommandsOpts{ + SubCommand: "MoveTables", + Workflow: "commerce2customer", + } + moveTables.AddCommand(common.GetShowCommand(opts)) + moveTables.AddCommand(common.GetStatusCommand(opts)) + + moveTables.AddCommand(common.GetStartCommand(opts)) + moveTables.AddCommand(common.GetStopCommand(opts)) + + switchTrafficCommand := common.GetSwitchTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(switchTrafficCommand, true) + moveTables.AddCommand(switchTrafficCommand) + + reverseTrafficCommand := common.GetReverseTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false) + moveTables.AddCommand(reverseTrafficCommand) + + moveTables.AddCommand(common.GetCompleteCommand(opts)) + moveTables.AddCommand(common.GetCancelCommand(opts)) +} + +func init() { + common.RegisterCommandHandler("MoveTables", registerMoveTablesCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/create.go b/go/cmd/vtctldclient/command/vreplication/reshard/create.go new file mode 100644 index 00000000000..9b713370077 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/reshard/create.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reshard + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + reshardCreateOptions = struct { + sourceShards []string + targetShards []string + skipSchemaCopy bool + }{} + + // reshardCreate makes a ReshardCreate gRPC call to a vtctld. + reshardCreate = &cobra.Command{ + Use: "create", + Short: "Create and optionally run a reshard VReplication workflow.", + Example: `vtctldclient --server localhost:15999 reshard --workflow customer2customer --target-keyspace customer create --source_shards="0" --target_shards="-80,80-" --cells zone1 --cells zone2 --tablet-types replica`, + SilenceUsage: true, + DisableFlagsInUseLine: true, + Aliases: []string{"Create"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + if err := common.ParseAndValidateCreateOptions(cmd); err != nil { + return err + } + return nil + }, + RunE: commandReshardCreate, + } +) + +func commandReshardCreate(cmd *cobra.Command, args []string) error { + format, err := common.GetOutputFormat(cmd) + if err != nil { + return err + } + tsp := common.GetTabletSelectionPreference(cmd) + cli.FinishedParsing(cmd) + + req := &vtctldatapb.ReshardCreateRequest{ + Workflow: common.BaseOptions.Workflow, + Keyspace: common.BaseOptions.TargetKeyspace, + + TabletTypes: common.CreateOptions.TabletTypes, + TabletSelectionPreference: tsp, + Cells: common.CreateOptions.Cells, + OnDdl: common.CreateOptions.OnDDL, + DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, + AutoStart: common.CreateOptions.AutoStart, + StopAfterCopy: common.CreateOptions.StopAfterCopy, + + SourceShards: reshardCreateOptions.sourceShards, + TargetShards: reshardCreateOptions.targetShards, + SkipSchemaCopy: reshardCreateOptions.skipSchemaCopy, + } + resp, err := common.GetClient().ReshardCreate(common.GetCommandCtx(), req) + if err != nil { + return err + } + if err = common.OutputStatusResponse(resp, format); err != nil { + return err + } + return nil +} + +func registerCreateCommand(root *cobra.Command) { + common.AddCommonCreateFlags(reshardCreate) + reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.sourceShards, "source-shards", nil, "Source shards.") + reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.targetShards, "target-shards", nil, "Target shards.") + reshardCreate.Flags().BoolVar(&reshardCreateOptions.skipSchemaCopy, "skip-schema-copy", false, "Skip copying the schema from the source shards to the target shards.") + root.AddCommand(reshardCreate) +} diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go new file mode 100644 index 00000000000..88a3b71a634 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go @@ -0,0 +1,67 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reshard + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" +) + +var ( + // reshard is the base command for all actions related to reshard. + reshard = &cobra.Command{ + Use: "Reshard --workflow --keyspace [command] [command-flags]", + Short: "Perform commands related to resharding a keyspace.", + Long: `Reshard commands: Create, Show, Status, SwitchTraffic, ReverseTraffic, Stop, Start, Cancel, and Delete. +See the --help output for each command for more details.`, + DisableFlagsInUseLine: true, + Aliases: []string{"reshard"}, + Args: cobra.ExactArgs(1), + } +) + +func registerReshardCommands(root *cobra.Command) { + common.AddCommonFlags(reshard) + root.AddCommand(reshard) + + registerCreateCommand(reshard) + opts := &common.SubCommandsOpts{ + SubCommand: "Reshard", + Workflow: "cust2cust", + } + reshard.AddCommand(common.GetShowCommand(opts)) + reshard.AddCommand(common.GetStatusCommand(opts)) + + reshard.AddCommand(common.GetStartCommand(opts)) + reshard.AddCommand(common.GetStopCommand(opts)) + + switchTrafficCommand := common.GetSwitchTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(switchTrafficCommand, false) + reshard.AddCommand(switchTrafficCommand) + + reverseTrafficCommand := common.GetReverseTrafficCommand(opts) + common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false) + reshard.AddCommand(reverseTrafficCommand) + + reshard.AddCommand(common.GetCompleteCommand(opts)) + reshard.AddCommand(common.GetCancelCommand(opts)) +} + +func init() { + common.RegisterCommandHandler("Reshard", registerReshardCommands) +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/delete.go b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go new file mode 100644 index 00000000000..2707c8b7ba5 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + workflowDeleteOptions = struct { + Workflow string + KeepData bool + KeepRoutingRules bool + }{} + + // WorkflowDelete makes a WorkflowDelete gRPC call to a vtctld. + workflowDelete = &cobra.Command{ + Use: "delete", + Short: "Delete a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer delete --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Delete"}, + Args: cobra.NoArgs, + RunE: commandWorkflowDelete, + } +) + +func commandWorkflowDelete(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.WorkflowDeleteRequest{ + Keyspace: workflowOptions.Keyspace, + Workflow: workflowDeleteOptions.Workflow, + KeepData: workflowDeleteOptions.KeepData, + KeepRoutingRules: workflowDeleteOptions.KeepRoutingRules, + } + resp, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func addWorkflowDeleteFlags(cmd *cobra.Command) { + workflowDelete.Flags().StringVarP(&workflowDeleteOptions.Workflow, "workflow", "w", "", "The workflow you want to delete (required).") + workflowDelete.MarkFlagRequired("workflow") + workflowDelete.Flags().BoolVar(&workflowDeleteOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the workflow in the target keyspace.") + workflowDelete.Flags().BoolVar(&workflowDeleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the workflow.") + +} diff --git a/go/cmd/vtctldclient/command/workflows.go b/go/cmd/vtctldclient/command/vreplication/workflow/get.go similarity index 73% rename from go/cmd/vtctldclient/command/workflows.go rename to go/cmd/vtctldclient/command/vreplication/workflow/get.go index 136c5c42e4c..8dd8ba4eee1 100644 --- a/go/cmd/vtctldclient/command/workflows.go +++ b/go/cmd/vtctldclient/command/vreplication/workflow/get.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package command +package workflow import ( "fmt" @@ -22,13 +22,17 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( + getWorkflowsOptions = struct { + ShowAll bool + }{} // GetWorkflows makes a GetWorkflows gRPC call to a vtctld. - GetWorkflows = &cobra.Command{ + getWorkflows = &cobra.Command{ Use: "GetWorkflows ", Short: "Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.", DisableFlagsInUseLine: true, @@ -37,16 +41,12 @@ var ( } ) -var getWorkflowsOptions = struct { - ShowAll bool -}{} - func commandGetWorkflows(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) ks := cmd.Flags().Arg(0) - resp, err := client.GetWorkflows(commandCtx, &vtctldatapb.GetWorkflowsRequest{ + resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{ Keyspace: ks, ActiveOnly: !getWorkflowsOptions.ShowAll, }) @@ -65,7 +65,6 @@ func commandGetWorkflows(cmd *cobra.Command, args []string) error { return nil } -func init() { - GetWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.") - Root.AddCommand(GetWorkflows) +func addGetWorkflowsFlags(cmd *cobra.Command) { + cmd.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.") } diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/show.go b/go/cmd/vtctldclient/command/vreplication/workflow/show.go new file mode 100644 index 00000000000..e85a582a20d --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/show.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // WorkflowList makes a GetWorkflows gRPC call to a vtctld. + workflowList = &cobra.Command{ + Use: "list", + Short: "List the VReplication workflows in the given keyspace.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer list`, + DisableFlagsInUseLine: true, + Aliases: []string{"List"}, + Args: cobra.NoArgs, + RunE: commandWorkflowShow, + } + + // WorkflowShow makes a GetWorkflows gRPC call to a vtctld. + workflowShow = &cobra.Command{ + Use: "show", + Short: "Show the details for a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer show --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Show"}, + Args: cobra.NoArgs, + RunE: commandWorkflowShow, + } +) + +func commandWorkflowShow(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + req := &vtctldatapb.GetWorkflowsRequest{ + Keyspace: workflowOptions.Keyspace, + Workflow: workflowDeleteOptions.Workflow, + } + resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req) + if err != nil { + return err + } + + var data []byte + if strings.ToLower(cmd.Name()) == "list" { + // We only want the names + Names := make([]string, len(resp.Workflows)) + for i, wf := range resp.Workflows { + Names[i] = wf.Name + } + data, err = cli.MarshalJSON(Names) + } else { + data, err = cli.MarshalJSON(resp) + } + if err != nil { + return err + } + fmt.Printf("%s\n", data) + + return nil +} + +func addWorkflowShowFlags(cmd *cobra.Command) { + workflowShow.Flags().StringVarP(&workflowDeleteOptions.Workflow, "workflow", "w", "", "The workflow you want the details for (required).") + workflowShow.MarkFlagRequired("workflow") +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/state.go b/go/cmd/vtctldclient/command/vreplication/workflow/state.go new file mode 100644 index 00000000000..bc304790a96 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/state.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/textutil" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // WorkflowStart makes a WorfklowUpdate gRPC call to a vtctld. + workflowStart = &cobra.Command{ + Use: "start", + Short: "Start a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer start --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Start"}, + Args: cobra.NoArgs, + RunE: commandWorkflowUpdateState, + } + + // WorkflowStop makes a WorfklowUpdate gRPC call to a vtctld. + workflowStop = &cobra.Command{ + Use: "stop", + Short: "Stop a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer stop --workflow commerce2customer`, + DisableFlagsInUseLine: true, + Aliases: []string{"Stop"}, + Args: cobra.NoArgs, + RunE: commandWorkflowUpdateState, + } +) + +func commandWorkflowUpdateState(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + var state binlogdatapb.VReplicationWorkflowState + switch strings.ToLower(cmd.Name()) { + case "start": + if err := common.CanRestartWorkflow(workflowUpdateOptions.Workflow, workflowOptions.Keyspace); err != nil { + return err + } + state = binlogdatapb.VReplicationWorkflowState_Running + case "stop": + state = binlogdatapb.VReplicationWorkflowState_Stopped + default: + return fmt.Errorf("invalid workstate: %s", args[0]) + } + + // The only thing we're updating is the state. + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: workflowOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflowUpdateOptions.Workflow, + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + State: state, + }, + } + + resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/update.go b/go/cmd/vtctldclient/command/vreplication/workflow/update.go new file mode 100644 index 00000000000..d76f6516d12 --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/update.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/topo/topoproto" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // WorkflowUpdate makes a WorkflowUpdate gRPC call to a vtctld. + workflowUpdate = &cobra.Command{ + Use: "update", + Short: "Update the configuration parameters for a VReplication workflow.", + Example: `vtctldclient --server localhost:15999 workflow --keyspace customer update --workflow commerce2customer --cells zone1 --cells zone2 -c "zone3,zone4" -c zone5`, + DisableFlagsInUseLine: true, + Aliases: []string{"Update"}, + Args: cobra.NoArgs, + PreRunE: func(cmd *cobra.Command, args []string) error { + changes := false + if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s) + changes = true + for i, cell := range workflowUpdateOptions.Cells { // Which only means trimming whitespace + workflowUpdateOptions.Cells[i] = strings.TrimSpace(cell) + } + } else { + workflowUpdateOptions.Cells = textutil.SimulatedNullStringSlice + } + if cmd.Flags().Lookup("tablet-types").Changed { + changes = true + } else { + workflowUpdateOptions.TabletTypes = []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)} + } + if cmd.Flags().Lookup("on-ddl").Changed { // Validate the provided value + changes = true + if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; !ok { + return fmt.Errorf("invalid on-ddl value: %s", workflowUpdateOptions.OnDDL) + } + } // Simulated NULL will need to be handled in command + if !changes { + return fmt.Errorf("no configuration options specified to update") + } + return nil + }, + RunE: commandWorkflowUpdate, + } +) + +func commandWorkflowUpdate(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + // We've already validated any provided value, if one WAS provided. + // Now we need to do the mapping from the string representation to + // the enum value. + onddl := int32(textutil.SimulatedNullInt) // Simulated NULL when no value provided + if val, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; ok { + onddl = val + } + + // Simulated NULL when no value is provided. + tsp := tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN + if cmd.Flags().Lookup("tablet-types-in-order").Changed { + if workflowUpdateOptions.TabletTypesInPreferenceOrder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } else { + tsp = tabletmanagerdatapb.TabletSelectionPreference_ANY + } + } + + req := &vtctldatapb.WorkflowUpdateRequest{ + Keyspace: workflowOptions.Keyspace, + TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflowUpdateOptions.Workflow, + Cells: workflowUpdateOptions.Cells, + TabletTypes: workflowUpdateOptions.TabletTypes, + TabletSelectionPreference: tsp, + OnDdl: binlogdatapb.OnDDLAction(onddl), + }, + } + + resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req) + if err != nil { + return err + } + + // Sort the inner TabletInfo slice for deterministic output. + sort.Slice(resp.Details, func(i, j int) bool { + return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String() + }) + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func addWorkflowUpdateFlags(cmd *cobra.Command) { + workflowUpdate.Flags().StringVarP(&workflowUpdateOptions.Workflow, "workflow", "w", "", "The workflow you want to update (required).") + workflowUpdate.MarkFlagRequired("workflow") + workflowUpdate.Flags().StringSliceVarP(&workflowUpdateOptions.Cells, "cells", "c", nil, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from.") + workflowUpdate.Flags().VarP((*topoproto.TabletTypeListFlag)(&workflowUpdateOptions.TabletTypes), "tablet-types", "t", "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY).") + workflowUpdate.Flags().BoolVar(&workflowUpdateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") + workflowUpdate.Flags().StringVar(&workflowUpdateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") + +} diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go new file mode 100644 index 00000000000..f7c0188a41f --- /dev/null +++ b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go @@ -0,0 +1,81 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var ( + + // workflow is a parent command for Workflow* sub commands. + workflow = &cobra.Command{ + Use: "Workflow --keyspace [command] [command-flags]", + Short: "Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace.", + Long: `Workflow commands: List, Show, Start, Stop, Update, and Delete. +See the --help output for each command for more details.`, + DisableFlagsInUseLine: true, + Aliases: []string{"workflow"}, + Args: cobra.ExactArgs(1), + RunE: commandGetWorkflows, + } +) + +var ( + workflowOptions = struct { + Keyspace string + }{} + + workflowUpdateOptions = struct { + Workflow string + Cells []string + TabletTypes []topodatapb.TabletType + TabletTypesInPreferenceOrder bool + OnDDL string + }{} +) + +func RegisterWorkflowCommands(root *cobra.Command) { + workflow.PersistentFlags().StringVarP(&workflowOptions.Keyspace, "keyspace", "k", "", "Keyspace context for the workflow (required).") + workflow.MarkPersistentFlagRequired("keyspace") + root.AddCommand(workflow) + + addGetWorkflowsFlags(getWorkflows) + root.AddCommand(getWorkflows) + + addWorkflowDeleteFlags(workflowDelete) + workflow.AddCommand(workflowDelete) + + workflow.AddCommand(workflowList) + + addWorkflowShowFlags(workflowShow) + workflow.AddCommand(workflowShow) + + workflow.AddCommand(workflowStart) + workflow.AddCommand(workflowStop) + + addWorkflowUpdateFlags(workflowUpdate) + workflow.AddCommand(workflowUpdate) +} + +func init() { + common.RegisterCommandHandler("Workflow", RegisterWorkflowCommands) +} diff --git a/go/cmd/vtexplain/vtexplain.go b/go/cmd/vtexplain/vtexplain.go index 2fa59dc0476..68ceed51316 100644 --- a/go/cmd/vtexplain/vtexplain.go +++ b/go/cmd/vtexplain/vtexplain.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "fmt" "os" @@ -63,7 +64,7 @@ func registerFlags(fs *pflag.FlagSet) { fs.StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT") fs.BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization") fs.StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing") - fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner") + fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") fs.IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.") fs.StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc") fs.StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json") @@ -104,6 +105,7 @@ func main() { defer logutil.Flush() servenv.ParseFlags("vtexplain") + servenv.Init() err := parseAndRun() if err != nil { fmt.Printf("ERROR: %s\n", err) @@ -113,8 +115,8 @@ func main() { func parseAndRun() error { plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr) - if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_V3 && plannerVersion != querypb.ExecuteOptions_Gen4 { - return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid values are V3 and Gen4 or an empty value to use the default planner", plannerVersionStr) + if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_Gen4 { + return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid value is Gen4 or an empty value to use the default planner", plannerVersionStr) } sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true) @@ -146,7 +148,7 @@ func parseAndRun() error { Target: dbName, } - vte, err := vtexplain.Init(vschema, schema, ksShardMap, opts) + vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts) if err != nil { return err } diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go new file mode 100644 index 00000000000..0ba24162f41 --- /dev/null +++ b/go/cmd/vtgate/cli/cli.go @@ -0,0 +1,193 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +var ( + cell string + tabletTypesToWait []topodatapb.TabletType + plannerName string + resilientServer *srvtopo.ResilientServer + + Main = &cobra.Command{ + Use: "vtgate", + Short: "VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol.", + Long: `VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol. + +### Key Options +` + + "\n* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number:\n" + + ` * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds.`, + Example: `vtgate \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15001 \ + --grpc_port 15991 \ + --mysql_server_port 15306 \ + --cell test \ + --cells_to_watch test \ + --tablet_types_to_wait PRIMARY,REPLICA \ + --service_map 'grpc-vtgateservice' \ + --pid_file $VTDATAROOT/tmp/vtgate.pid \ + --mysql_auth_server_impl none`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +// CheckCellFlags will check validation of cell and cells_to_watch flag +// it will help to avoid strange behaviors when vtgate runs but actually does not work +func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { + // topo check + var topoServer *topo.Server + if serv != nil { + var err error + topoServer, err = serv.GetTopoServer() + if err != nil { + return fmt.Errorf("Unable to create gateway: %w", err) + } + } else { + return fmt.Errorf("topo server cannot be nil") + } + cellsInTopo, err := topoServer.GetKnownCells(ctx) + if err != nil { + return err + } + if len(cellsInTopo) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell") + } + + // cell valid check + if cell == "" { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set") + } + hasCell := false + for _, v := range cellsInTopo { + if v == cell { + hasCell = true + break + } + } + if !hasCell { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell) + } + + // cells_to_watch valid check + cells := make([]string, 0, 1) + for _, c := range strings.Split(cellsToWatch, ",") { + if c == "" { + continue + } + // cell should contained in cellsInTopo + if exists := topo.InCellList(c, cellsInTopo); !exists { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ",")) + } + cells = append(cells, c) + } + if len(cells) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty") + } + + return nil +} + +func run(cmd *cobra.Command, args []string) error { + defer exit.Recover() + + servenv.Init() + defer servenv.Close() + + ts := topo.Open() + defer ts.Close() + + resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") + + tabletTypes := make([]topodatapb.TabletType, 0, 1) + for _, tt := range tabletTypesToWait { + if topoproto.IsServingType(tt) { + tabletTypes = append(tabletTypes, tt) + } + } + + if len(tabletTypes) == 0 { + return fmt.Errorf("tablet_types_to_wait must contain at least one serving tablet type") + } + + err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch) + if err != nil { + return fmt.Errorf("cells_to_watch validation failed: %v", err) + } + + plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) + + // pass nil for HealthCheck and it will be created + vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) + + servenv.OnRun(func() { + // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. + discovery.ParseTabletURLTemplateFromFlag() + addStatusParts(vtg) + }) + servenv.OnClose(func() { + _ = vtg.Gateway().Close(context.Background()) + }) + servenv.RunDefault() + + return nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) + Main.Flags().StringVar(&cell, "cell", cell, "cell to use") + Main.Flags().Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.") + Main.Flags().StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") + + Main.MarkFlagRequired("tablet_types_to_wait") +} diff --git a/go/cmd/vtgate/plugin_auth_clientcert.go b/go/cmd/vtgate/cli/plugin_auth_clientcert.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_clientcert.go rename to go/cmd/vtgate/cli/plugin_auth_clientcert.go index 4f3d65ef626..1a1334e71ba 100644 --- a/go/cmd/vtgate/plugin_auth_clientcert.go +++ b/go/cmd/vtgate/cli/plugin_auth_clientcert.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports clientcert to register the client certificate implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_ldap.go b/go/cmd/vtgate/cli/plugin_auth_ldap.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_ldap.go rename to go/cmd/vtgate/cli/plugin_auth_ldap.go index 257f0742733..7dc5b246f72 100644 --- a/go/cmd/vtgate/plugin_auth_ldap.go +++ b/go/cmd/vtgate/cli/plugin_auth_ldap.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports ldapauthserver to register the LDAP implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_static.go b/go/cmd/vtgate/cli/plugin_auth_static.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_static.go rename to go/cmd/vtgate/cli/plugin_auth_static.go index 8e4a552cecf..9ffd60a79f2 100644 --- a/go/cmd/vtgate/plugin_auth_static.go +++ b/go/cmd/vtgate/cli/plugin_auth_static.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports staticauthserver to register the flat-file implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_auth_vault.go b/go/cmd/vtgate/cli/plugin_auth_vault.go similarity index 98% rename from go/cmd/vtgate/plugin_auth_vault.go rename to go/cmd/vtgate/cli/plugin_auth_vault.go index ca271b496ca..2aee32e3940 100644 --- a/go/cmd/vtgate/plugin_auth_vault.go +++ b/go/cmd/vtgate/cli/plugin_auth_vault.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports InitAuthServerVault to register the HashiCorp Vault implementation of AuthServer. diff --git a/go/cmd/vtgate/plugin_consultopo.go b/go/cmd/vtgate/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtgate/plugin_consultopo.go rename to go/cmd/vtgate/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vtgate/plugin_consultopo.go +++ b/go/cmd/vtgate/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vtorc/plugin_etcd2topo.go b/go/cmd/vtgate/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtorc/plugin_etcd2topo.go rename to go/cmd/vtgate/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vtorc/plugin_etcd2topo.go +++ b/go/cmd/vtgate/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vtgate/plugin_grpctabletconn.go b/go/cmd/vtgate/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vtgate/plugin_grpctabletconn.go rename to go/cmd/vtgate/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vtgate/plugin_grpctabletconn.go +++ b/go/cmd/vtgate/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vtgate/plugin_grpcvtgateservice.go b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go similarity index 98% rename from go/cmd/vtgate/plugin_grpcvtgateservice.go rename to go/cmd/vtgate/cli/plugin_grpcvtgateservice.go index 4ee159710ca..bbbc6e3039e 100644 --- a/go/cmd/vtgate/plugin_grpcvtgateservice.go +++ b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC vtgateservice server diff --git a/go/cmd/vtgate/plugin_opentracing.go b/go/cmd/vtgate/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vtgate/plugin_opentracing.go rename to go/cmd/vtgate/cli/plugin_opentracing.go index 9a6786d3d64..7ec15423f5a 100644 --- a/go/cmd/vtgate/plugin_opentracing.go +++ b/go/cmd/vtgate/cli/plugin_opentracing.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" diff --git a/go/cmd/vtgate/plugin_opentsdb.go b/go/cmd/vtgate/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vtgate/plugin_opentsdb.go rename to go/cmd/vtgate/cli/plugin_opentsdb.go index 0988f3b9a64..37c81f271c9 100644 --- a/go/cmd/vtgate/plugin_opentsdb.go +++ b/go/cmd/vtgate/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vtgate/plugin_prometheusbackend.go b/go/cmd/vtgate/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtgate/plugin_prometheusbackend.go rename to go/cmd/vtgate/cli/plugin_prometheusbackend.go index 6bffd133332..a1797abdcd1 100644 --- a/go/cmd/vtgate/plugin_prometheusbackend.go +++ b/go/cmd/vtgate/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/tools/coverage-go/vtgate_test.go b/go/cmd/vtgate/cli/plugin_statsd.go similarity index 80% rename from tools/coverage-go/vtgate_test.go rename to go/cmd/vtgate/cli/plugin_statsd.go index c3601fc0372..fc42fa4f447 100644 --- a/tools/coverage-go/vtgate_test.go +++ b/go/cmd/vtgate/cli/plugin_statsd.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,10 +13,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main -import "testing" +package cli -func TestVtgate(t *testing.T) { - main() +import "vitess.io/vitess/go/stats/statsd" + +func init() { + statsd.Init("vtgate") } diff --git a/go/cmd/vtgate/plugin_zk2topo.go b/go/cmd/vtgate/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtgate/plugin_zk2topo.go rename to go/cmd/vtgate/cli/plugin_zk2topo.go index d75a1c6bcb4..1870a3b2bb3 100644 --- a/go/cmd/vtgate/plugin_zk2topo.go +++ b/go/cmd/vtgate/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( // Imports and register the zk2 TopologyServer diff --git a/go/cmd/vtgate/status.go b/go/cmd/vtgate/cli/status.go similarity index 99% rename from go/cmd/vtgate/status.go rename to go/cmd/vtgate/cli/status.go index 436a1301438..6efbca51dc7 100644 --- a/go/cmd/vtgate/status.go +++ b/go/cmd/vtgate/cli/status.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/discovery" diff --git a/go/cmd/vtgate/docgen/main.go b/go/cmd/vtgate/docgen/main.go new file mode 100644 index 00000000000..763d38b7e7b --- /dev/null +++ b/go/cmd/vtgate/docgen/main.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtgate/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + // Here because we inadvertently transfer the required "tablet-types-to-wait" + // flag during vtgate/cli's init func. + pflag.CommandLine = cmd.Flags() + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vtgate/index.go b/go/cmd/vtgate/index.go index be06ed6f10b..aec221b5339 100644 --- a/go/cmd/vtgate/index.go +++ b/go/cmd/vtgate/index.go @@ -18,6 +18,8 @@ package main import ( "net/http" + + "vitess.io/vitess/go/vt/servenv" ) // This is a separate file so it can be selectively included/excluded from @@ -25,7 +27,7 @@ import ( func init() { // Anything unrecognized gets redirected to the status page. - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/debug/status", http.StatusFound) }) } diff --git a/go/cmd/vtgate/plugin_kubernetestopo.go b/go/cmd/vtgate/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vtgate/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vtgate/plugin_statsd.go b/go/cmd/vtgate/plugin_statsd.go deleted file mode 100644 index ae2ecb5b2e0..00000000000 --- a/go/cmd/vtgate/plugin_statsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "vitess.io/vitess/go/stats/statsd" - -func init() { - statsd.Init("vtgate") -} diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index d043ecf4f95..fd81fe85a68 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -17,153 +17,12 @@ limitations under the License. package main import ( - "context" - "math/rand" - "strings" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/exit" - "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/cmd/vtgate/cli" "vitess.io/vitess/go/vt/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/srvtopo" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -var ( - cell = "" - tabletTypesToWait []topodatapb.TabletType - plannerName string -) - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&cell, "cell", cell, "cell to use") - fs.Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.") - fs.StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.") - - acl.RegisterFlags(fs) -} - -var resilientServer *srvtopo.ResilientServer - -func init() { - rand.Seed(time.Now().UnixNano()) - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - servenv.OnParse(registerFlags) -} - -// CheckCellFlags will check validation of cell and cells_to_watch flag -// it will help to avoid strange behaviors when vtgate runs but actually does not work -func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { - // topo check - var topoServer *topo.Server - if serv != nil { - var err error - topoServer, err = serv.GetTopoServer() - if err != nil { - log.Exitf("Unable to create gateway: %v", err) - } - } else { - log.Exitf("topo server cannot be nil") - } - cellsInTopo, err := topoServer.GetKnownCells(ctx) - if err != nil { - return err - } - if len(cellsInTopo) == 0 { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell") - } - - // cell valid check - if cell == "" { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set") - } - hasCell := false - for _, v := range cellsInTopo { - if v == cell { - hasCell = true - break - } - } - if !hasCell { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell) - } - - // cells_to_watch valid check - cells := make([]string, 0, 1) - for _, c := range strings.Split(cellsToWatch, ",") { - if c == "" { - continue - } - // cell should contained in cellsInTopo - if exists := topo.InCellList(c, cellsInTopo); !exists { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ",")) - } - cells = append(cells, c) - } - if len(cells) == 0 { - return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty") - } - - return nil -} - func main() { - defer exit.Recover() - - servenv.ParseFlags("vtgate") - servenv.Init() - - ts := topo.Open() - defer ts.Close() - - resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer") - - tabletTypes := make([]topodatapb.TabletType, 0, 1) - if len(tabletTypesToWait) != 0 { - for _, tt := range tabletTypesToWait { - if topoproto.IsServingType(tt) { - tabletTypes = append(tabletTypes, tt) - } - } - } else { - log.Exitf("tablet_types_to_wait flag must be set") - } - - if len(tabletTypes) == 0 { - log.Exitf("tablet_types_to_wait should contain at least one serving tablet type") - } - - err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch) - if err != nil { - log.Exitf("cells_to_watch validation failed: %v", err) + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - - plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) - - // pass nil for HealthCheck and it will be created - vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) - - servenv.OnRun(func() { - // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. - discovery.ParseTabletURLTemplateFromFlag() - addStatusParts(vtg) - }) - servenv.OnClose(func() { - _ = vtg.Gateway().Close(context.Background()) - }) - servenv.RunDefault() } diff --git a/go/cmd/vtgateclienttest/services/callerid.go b/go/cmd/vtgateclienttest/services/callerid.go index 54893f3bb07..0e4a7da8495 100644 --- a/go/cmd/vtgateclienttest/services/callerid.go +++ b/go/cmd/vtgateclienttest/services/callerid.go @@ -17,21 +17,19 @@ limitations under the License. package services import ( + "context" "encoding/json" "fmt" "strings" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - querypb "vitess.io/vitess/go/vt/proto/query" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) // CallerIDPrefix is the prefix to send with queries so they go @@ -77,11 +75,11 @@ func (c *callerIDClient) checkCallerID(ctx context.Context, received string) (bo return true, fmt.Errorf("SUCCESS: callerid matches") } -func (c *callerIDClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *callerIDClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if ok, err := c.checkCallerID(ctx, sql); ok { return session, nil, err } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, session, sql, bindVariables) } func (c *callerIDClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { @@ -93,9 +91,9 @@ func (c *callerIDClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Ses return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c *callerIDClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (c *callerIDClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if ok, err := c.checkCallerID(ctx, sql); ok { - return err + return session, err } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, session, sql, bindVariables, callback) } diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go index 2181009be47..5a4d78aeb3e 100644 --- a/go/cmd/vtgateclienttest/services/echo.go +++ b/go/cmd/vtgateclienttest/services/echo.go @@ -18,13 +18,13 @@ package services import ( "bytes" + "context" "fmt" "reflect" "sort" "strings" - "context" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/vtgate/vtgateservice" @@ -78,13 +78,13 @@ func echoQueryResult(vals map[string]any) *sqltypes.Result { // The first two returned fields are always a field with a MySQL NULL value, // and another field with a zero-length string. // Client tests can use this to check that they correctly distinguish the two. - qr.Fields = append(qr.Fields, &querypb.Field{Name: "null", Type: sqltypes.VarBinary}) + qr.Fields = append(qr.Fields, &querypb.Field{Name: "null", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}) row = append(row, sqltypes.NULL) - qr.Fields = append(qr.Fields, &querypb.Field{Name: "emptyString", Type: sqltypes.VarBinary}) + qr.Fields = append(qr.Fields, &querypb.Field{Name: "emptyString", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}) row = append(row, sqltypes.NewVarBinary("")) for k, v := range vals { - qr.Fields = append(qr.Fields, &querypb.Field{Name: k, Type: sqltypes.VarBinary}) + qr.Fields = append(qr.Fields, &querypb.Field{Name: k, Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}) val := reflect.ValueOf(v) if val.Kind() == reflect.Map { @@ -98,7 +98,7 @@ func echoQueryResult(vals map[string]any) *sqltypes.Result { return qr } -func (c *echoClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *echoClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if strings.HasPrefix(sql, EchoPrefix) { return session, echoQueryResult(map[string]any{ "callerId": callerid.EffectiveCallerIDFromContext(ctx), @@ -107,10 +107,10 @@ func (c *echoClient) Execute(ctx context.Context, session *vtgatepb.Session, sql "session": session, }), nil } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, session, sql, bindVariables) } -func (c *echoClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (c *echoClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if strings.HasPrefix(sql, EchoPrefix) { callback(echoQueryResult(map[string]any{ "callerId": callerid.EffectiveCallerIDFromContext(ctx), @@ -118,9 +118,9 @@ func (c *echoClient) StreamExecute(ctx context.Context, session *vtgatepb.Sessio "bindVars": bindVariables, "session": session, })) - return nil + return session, nil } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, session, sql, bindVariables, callback) } func (c *echoClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index 9a4a5e39366..ad877054850 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -17,9 +17,8 @@ limitations under the License. package services import ( - "strings" - "context" + "strings" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vterrors" @@ -111,14 +110,14 @@ func trimmedRequestToError(received string) error { } } -func (c *errorClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *errorClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if err := requestToPartialError(sql, session); err != nil { return session, nil, err } if err := requestToError(sql); err != nil { return session, nil, err } - return c.fallbackClient.Execute(ctx, session, sql, bindVariables) + return c.fallbackClient.Execute(ctx, mysqlCtx, session, sql, bindVariables) } func (c *errorClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { @@ -133,11 +132,11 @@ func (c *errorClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Sessio return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c *errorClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (c *errorClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if err := requestToError(sql); err != nil { - return err + return session, err } - return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) + return c.fallbackClient.StreamExecute(ctx, mysqlCtx, session, sql, bindVariables, callback) } func (c *errorClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { diff --git a/go/cmd/vtgateclienttest/services/fallback.go b/go/cmd/vtgateclienttest/services/fallback.go index 02f9239260b..72175fe01ce 100644 --- a/go/cmd/vtgateclienttest/services/fallback.go +++ b/go/cmd/vtgateclienttest/services/fallback.go @@ -40,16 +40,16 @@ func newFallbackClient(fallback vtgateservice.VTGateService) fallbackClient { return fallbackClient{fallback: fallback} } -func (c fallbackClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { - return c.fallback.Execute(ctx, session, sql, bindVariables) +func (c fallbackClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { + return c.fallback.Execute(ctx, mysqlCtx, session, sql, bindVariables) } func (c fallbackClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { return c.fallback.ExecuteBatch(ctx, session, sqlList, bindVariablesList) } -func (c fallbackClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { - return c.fallback.StreamExecute(ctx, session, sql, bindVariables, callback) +func (c fallbackClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { + return c.fallback.StreamExecute(ctx, mysqlCtx, session, sql, bindVariables, callback) } func (c fallbackClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go index 85fa664c2c2..7245be547ac 100644 --- a/go/cmd/vtgateclienttest/services/terminal.go +++ b/go/cmd/vtgateclienttest/services/terminal.go @@ -17,10 +17,11 @@ limitations under the License. package services import ( + "context" "errors" "fmt" - "context" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/tb" @@ -42,7 +43,7 @@ func newTerminalClient() *terminalClient { return &terminalClient{} } -func (c *terminalClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (c *terminalClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if sql == "quit://" { log.Fatal("Received quit:// query. Going down.") } @@ -58,8 +59,8 @@ func (c *terminalClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Ses return session, nil, errTerminal } -func (c *terminalClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { - return errTerminal +func (c *terminalClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { + return session, errTerminal } func (c *terminalClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { diff --git a/go/cmd/vtgr/main.go b/go/cmd/vtgr/main.go deleted file mode 100644 index 23e932e8f5d..00000000000 --- a/go/cmd/vtgr/main.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr" -) - -func main() { - var clustersToWatch []string - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.StringSliceVar(&clustersToWatch, "clusters_to_watch", nil, `Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"`) - - acl.RegisterFlags(fs) - }) - servenv.ParseFlags("vtgr") - - // openTabletDiscovery will open up a connection to topo server - // and populate the tablets in memory - vtgr := vtgr.OpenTabletDiscovery(context.Background(), nil, clustersToWatch) - vtgr.RefreshCluster() - vtgr.ScanAndRepair() - - // block here so that we don't exit directly - select {} -} diff --git a/go/cmd/vtorc/cli/cli.go b/go/cmd/vtorc/cli/cli.go new file mode 100644 index 00000000000..63e4a69068f --- /dev/null +++ b/go/cmd/vtorc/cli/cli.go @@ -0,0 +1,104 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtorc/config" + "vitess.io/vitess/go/vt/vtorc/inst" + "vitess.io/vitess/go/vt/vtorc/logic" + "vitess.io/vitess/go/vt/vtorc/server" +) + +var ( + configFile string + Main = &cobra.Command{ + Use: "vtorc", + Short: "VTOrc is the automated fault detection and repair tool in Vitess.", + Example: `vtorc \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15000 \ + --recovery-period-block-duration "10m" \ + --instance-poll-time "1s" \ + --topo-information-refresh-duration "30s" \ + --alsologtostderr`, + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + Run: run, + } +) + +func run(cmd *cobra.Command, args []string) { + servenv.Init() + config.UpdateConfigValuesFromFlags() + inst.RegisterStats() + + log.Info("starting vtorc") + if len(configFile) > 0 { + config.ForceRead(configFile) + } else { + config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json") + } + if config.Config.AuditToSyslog { + inst.EnableAuditSyslog() + } + config.MarkConfigurationLoaded() + + // Log final config values to debug if something goes wrong. + config.LogConfigValues() + server.StartVTOrcDiscovery() + + server.RegisterVTOrcAPIEndpoints() + servenv.OnRun(func() { + addStatusParts() + }) + + // For backward compatability, we require that VTOrc functions even when the --port flag is not provided. + // In this case, it should function like before but without the servenv pages. + // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check + // can be added to always have the serenv page running in VTOrc. + servenv.RunDefault() +} + +// addStatusParts adds UI parts to the /debug/status page of VTOrc +func addStatusParts() { + servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any { + recoveries, _ := logic.ReadRecentRecoveries(false, 0) + return recoveries + }) +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + logic.RegisterFlags(Main.Flags()) + server.RegisterFlags(Main.Flags()) + config.RegisterFlags(Main.Flags()) + acl.RegisterFlags(Main.Flags()) + Main.Flags().StringVar(&configFile, "config", "", "config file name") +} diff --git a/go/cmd/vttablet/plugin_consultopo.go b/go/cmd/vtorc/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vttablet/plugin_consultopo.go rename to go/cmd/vtorc/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vttablet/plugin_consultopo.go +++ b/go/cmd/vtorc/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vtgate/plugin_etcd2topo.go b/go/cmd/vtorc/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vtgate/plugin_etcd2topo.go rename to go/cmd/vtorc/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vtgate/plugin_etcd2topo.go +++ b/go/cmd/vtorc/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vttablet/plugin_grpctmclient.go b/go/cmd/vtorc/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctmclient.go rename to go/cmd/vtorc/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vttablet/plugin_grpctmclient.go +++ b/go/cmd/vtorc/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vtorc/plugin_prometheusbackend.go b/go/cmd/vtorc/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vtorc/plugin_prometheusbackend.go rename to go/cmd/vtorc/cli/plugin_prometheusbackend.go index 868e097ade2..8cb6e034d8a 100644 --- a/go/cmd/vtorc/plugin_prometheusbackend.go +++ b/go/cmd/vtorc/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtorc/plugin_zk2topo.go b/go/cmd/vtorc/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vtorc/plugin_zk2topo.go rename to go/cmd/vtorc/cli/plugin_zk2topo.go index ebf385ec1af..d71a7e2e196 100644 --- a/go/cmd/vtorc/plugin_zk2topo.go +++ b/go/cmd/vtorc/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the zk2 TopologyServer diff --git a/tools/coverage-go/vtctld_test.go b/go/cmd/vtorc/docgen/main.go similarity index 54% rename from tools/coverage-go/vtctld_test.go rename to go/cmd/vtorc/docgen/main.go index eaa7ebda1e5..22daccab302 100644 --- a/tools/coverage-go/vtctld_test.go +++ b/go/cmd/vtorc/docgen/main.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,10 +13,25 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package main -import "testing" +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vtorc/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } -func TestVtctld(t *testing.T) { - main() + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() } diff --git a/go/cmd/vtorc/index.go b/go/cmd/vtorc/index.go index dcbe1113e53..43ad41850a4 100644 --- a/go/cmd/vtorc/index.go +++ b/go/cmd/vtorc/index.go @@ -18,11 +18,13 @@ package main import ( "net/http" + + "vitess.io/vitess/go/vt/servenv" ) func init() { // Anything unrecognized gets redirected to the status page. - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/debug/status", http.StatusFound) }) } diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index 0d28c8da9bf..101265b16c5 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -17,140 +17,18 @@ package main import ( - "os" - "reflect" - "strings" - _ "github.com/go-sql-driver/mysql" - "github.com/spf13/pflag" _ "modernc.org/sqlite" - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/grpccommon" + "vitess.io/vitess/go/cmd/vtorc/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/inst" - "vitess.io/vitess/go/vt/vtorc/logic" - "vitess.io/vitess/go/vt/vtorc/server" ) -// transformArgsForPflag turns a slice of raw args passed on the command line, -// possibly incompatible with pflag (because the user is expecting stdlib flag -// parsing behavior) and transforms them into the arguments that should have -// been passed to conform to pflag parsing behavior. -// -// the primary function is to catch any cases where the user specified a longopt -// with only a single hyphen (e.g. `-myflag`) and correct it to be -// double-hyphenated. -// -// note that this transformation does _not_ actually validate the arguments; for -// example if the user specifies `--myflag`, but the FlagSet has no such flag -// defined, that will still appear in the returned result and will (correctly) -// cause a parse error later on in `main`, at which point the CLI usage will -// be printed. -// -// note also that this transformation is incomplete. pflag allows interspersing -// of flag and positional arguments, whereas stdlib flag does not. however, for -// vtorc specifically, with the exception of `vtorc help `, the CLI only -// consumes flag arguments (in other words, there are no supported subcommands), -// so this is a non-issue, and is not implemented here in order to make this -// function a bit simpler. -func transformArgsForPflag(fs *pflag.FlagSet, args []string) (result []string) { - for i, arg := range args { - switch { - case arg == "--": - // pflag stops parsing at `--`, so we're done transforming the CLI - // arguments. Just append everything remaining and be done. - result = append(result, args[i:]...) - return result - case strings.HasPrefix(arg, "--"): - // Long-hand flag. Append it and continue. - result = append(result, arg) - case strings.HasPrefix(arg, "-"): - // Most complex case. This is either: - // 1. A legacy long-hand flag that needs a double-dash (e.g. `-myflag` => `--myflag`). - // 2. One _or more_ pflag shortopts all shoved together (think `rm -rf` as `rm -r -f`). - // - // In the latter case, we don't need to do any transformations, but - // in the former, we do. - name := strings.SplitN(arg[1:], "=", 2)[0] // discard any potential value (`-myflag` and `-myflag=10` both have the name of `myflag`) - if fs.Lookup(name) != nil || name == "help" { - // Case 1: We have a long opt with this name, so we need to - // prepend an additional hyphen. - result = append(result, "-"+arg) - } else { - // Case 2: No transformation needed. - result = append(result, arg) - } - default: - // Just a flag argument. Nothing to transform. - result = append(result, arg) - } - } - - return result -} - // main is the application's entry point. It will spawn an HTTP interface. func main() { - // TODO(ajm188): after v15, remove this pflag hack and use servenv.ParseFlags - // directly. - fs := pflag.NewFlagSet("vtorc", pflag.ExitOnError) - grpccommon.RegisterFlags(fs) - log.RegisterFlags(fs) - logutil.RegisterFlags(fs) - logic.RegisterFlags(fs) - server.RegisterFlags(fs) - config.RegisterFlags(fs) - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - acl.RegisterFlags(fs) - servenv.OnParseFor("vtorc", func(flags *pflag.FlagSet) { flags.AddFlagSet(fs) }) - - args := append([]string{}, os.Args...) - os.Args = os.Args[0:1] - - configFile := fs.String("config", "", "config file name") + // TODO: viperutil.BindFlags() - os.Args = append(os.Args, transformArgsForPflag(fs, args[1:])...) - if !reflect.DeepEqual(args, os.Args) { - // warn the user so they can adjust their CLI scripts - warning := `CLI args passed do not conform to pflag parsing behavior -The arguments have been transformed for compatibility as follows: - %v => %v -Please update your scripts before the next version, when this will begin to break. -` - log.Warningf(warning, args, os.Args) + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - - servenv.ParseFlags("vtorc") - config.UpdateConfigValuesFromFlags() - - log.Info("starting vtorc") - if len(*configFile) > 0 { - config.ForceRead(*configFile) - } else { - config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json") - } - if config.Config.AuditToSyslog { - inst.EnableAuditSyslog() - } - config.MarkConfigurationLoaded() - - // Log final config values to debug if something goes wrong. - config.LogConfigValues() - server.StartVTOrcDiscovery() - - server.RegisterVTOrcAPIEndpoints() - servenv.OnRun(func() { - addStatusParts() - }) - - // For backward compatability, we require that VTOrc functions even when the --port flag is not provided. - // In this case, it should function like before but without the servenv pages. - // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check - // can be added to always have the serenv page running in VTOrc. - servenv.RunDefault() } diff --git a/go/cmd/vtorc/main_test.go b/go/cmd/vtorc/main_test.go deleted file mode 100644 index 5bbdcdaf981..00000000000 --- a/go/cmd/vtorc/main_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "strings" - "testing" - - "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" -) - -func Test_transformArgsForPflag(t *testing.T) { - fs := pflag.NewFlagSet("test", pflag.ContinueOnError) - fs.String("foobar", "baz", "") - fs.StringP("name", "n", "", "") - fs.BoolP("debug", "d", true, "") - - tests := []struct { - args []string - transformed []string - }{ - { - args: []string{"--foobar=hello", "--name", "myname", "-d"}, - transformed: []string{"--foobar=hello", "--name", "myname", "-d"}, - }, - { - args: []string{"-foobar=hello", "-name", "myname", "-d"}, - transformed: []string{"--foobar=hello", "--name", "myname", "-d"}, - }, - { - args: []string{"--", "-foobar=hello"}, - transformed: []string{"--", "-foobar=hello"}, - }, - { - args: []string{"-dn"}, // combined shortopts - transformed: []string{"-dn"}, - }, - } - - for _, tt := range tests { - tt := tt - name := strings.Join(tt.args, " ") - - t.Run(name, func(t *testing.T) { - got := transformArgsForPflag(fs, tt.args) - assert.Equal(t, tt.transformed, got) - }) - } -} diff --git a/go/cmd/vtorc/plugin_kubernetestopo.go b/go/cmd/vtorc/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vtorc/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vtorc/status.go b/go/cmd/vtorc/status.go deleted file mode 100644 index a4d8a59d3fc..00000000000 --- a/go/cmd/vtorc/status.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtorc/logic" -) - -// addStatusParts adds UI parts to the /debug/status page of VTOrc -func addStatusParts() { - servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any { - recoveries, _ := logic.ReadRecentRecoveries(false, 0) - return recoveries - }) -} diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go new file mode 100644 index 00000000000..e967aafc164 --- /dev/null +++ b/go/cmd/vttablet/cli/cli.go @@ -0,0 +1,277 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cli + +import ( + "bytes" + "context" + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/binlog" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/tableacl" + "vitess.io/vitess/go/vt/tableacl/simpleacl" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vttablet/onlineddl" + "vitess.io/vitess/go/vt/vttablet/tabletmanager" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tabletserver" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/yaml2" + "vitess.io/vitess/resources" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var ( + enforceTableACLConfig bool + tableACLConfig string + tableACLConfigReloadInterval time.Duration + tabletPath string + tabletConfig string + + tm *tabletmanager.TabletManager + + Main = &cobra.Command{ + Use: "vttablet", + Short: "The VTTablet server controls a running MySQL server.", + Long: `The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments: + +* Managed MySQL (most common) +* External MySQL + +In addition to these deployment types, a partially managed VTTablet is also possible by setting ` + "`--disable_active_reparents`." + ` + +### Managed MySQL + +In this mode, Vitess actively manages MySQL. + +### External MySQL. + +In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation. + +See "Unmanaged Tablet" for the full guide. + +Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: + +` + + "* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary.\n" + + "* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter.\n" + + "* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on.\n" + + "* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag.\n", + Example: ` +vttablet \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --tablet-path $alias \ + --init_keyspace $keyspace \ + --init_shard $shard \ + --init_tablet_type $tablet_type \ + --port $port \ + --grpc_port $grpc_port \ + --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'` + "\n\n`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous.", + Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), + PreRunE: servenv.CobraPreRunE, + RunE: run, + } +) + +func run(cmd *cobra.Command, args []string) error { + servenv.Init() + defer servenv.Close() + + tabletAlias, err := topoproto.ParseTabletAlias(tabletPath) + if err != nil { + return fmt.Errorf("failed to parse --tablet-path: %w", err) + } + + // config and mycnf initializations are intertwined. + config, mycnf, err := initConfig(tabletAlias) + if err != nil { + return err + } + + ts := topo.Open() + qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias) + if err != nil { + ts.Close() + return err + } + + mysqld := mysqlctl.NewMysqld(config.DB) + servenv.OnClose(mysqld.Close) + + if err := extractOnlineDDL(); err != nil { + ts.Close() + return fmt.Errorf("failed to extract online DDL binaries: %w", err) + } + + // Initialize and start tm. + gRPCPort := int32(0) + if servenv.GRPCPort() != 0 { + gRPCPort = int32(servenv.GRPCPort()) + } + tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB) + if err != nil { + return fmt.Errorf("failed to parse --tablet-path: %w", err) + } + tm = &tabletmanager.TabletManager{ + BatchCtx: context.Background(), + TopoServer: ts, + Cnf: mycnf, + MysqlDaemon: mysqld, + DBConfigs: config.DB.Clone(), + QueryServiceControl: qsc, + UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), + VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), + VDiffEngine: vdiff.NewEngine(config, ts, tablet), + } + if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { + ts.Close() + return fmt.Errorf("failed to parse --tablet-path or initialize DB credentials: %w", err) + } + servenv.OnClose(func() { + // Close the tm so that our topo entry gets pruned properly and any + // background goroutines that use the topo connection are stopped. + tm.Close() + + // tm uses ts. So, it should be closed after tm. + ts.Close() + }) + + servenv.RunDefault() + + return nil +} + +func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { + tabletenv.Init() + // Load current config after tabletenv.Init, because it changes it. + config := tabletenv.NewCurrentConfig() + if err := config.Verify(); err != nil { + return nil, nil, fmt.Errorf("invalid config: %w", err) + } + + if tabletConfig != "" { + bytes, err := os.ReadFile(tabletConfig) + if err != nil { + return nil, nil, fmt.Errorf("error reading config file %s: %w", tabletConfig, err) + } + if err := yaml2.Unmarshal(bytes, config); err != nil { + return nil, nil, fmt.Errorf("error parsing config file %s: %w", bytes, err) + } + } + gotBytes, _ := yaml2.Marshal(config) + log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes) + + var ( + mycnf *mysqlctl.Mycnf + socketFile string + ) + // If no connection parameters were specified, load the mycnf file + // and use the socket from it. If connection parameters were specified, + // we assume that the mysql is not local, and we skip loading mycnf. + // This also means that backup and restore will not be allowed. + if !config.DB.HasGlobalSettings() { + var err error + if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil { + return nil, nil, fmt.Errorf("mycnf read failed: %w", err) + } + + socketFile = mycnf.SocketFile + } else { + log.Info("connection parameters were specified. Not loading my.cnf.") + } + + // If connection parameters were specified, socketFile will be empty. + // Otherwise, the socketFile (read from mycnf) will be used to initialize + // dbconfigs. + config.DB.InitWithSocket(socketFile) + for _, cfg := range config.ExternalConnections { + cfg.InitWithSocket("") + } + return config, mycnf, nil +} + +// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended +// to vttablet executable by `make build` with a go:embed +func extractOnlineDDL() error { + if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride { + if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil { + // One possibility of failure is that gh-ost is up and running. In that case, + // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract. + foundBytes, _ := os.ReadFile(binaryFileName) + if bytes.Equal(resources.GhostBinary, foundBytes) { + // OK, it's the same binary, there is no need to extract the file anyway + return nil + } + return err + } + } + + return nil +} + +func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) (*tabletserver.TabletServer, error) { + if tableACLConfig != "" { + // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory + tableacl.Register("simpleacl", &simpleacl.Factory{}) + } else if enforceTableACLConfig { + return nil, fmt.Errorf("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") + } + // creates and registers the query service + qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias) + servenv.OnRun(func() { + qsc.Register() + addStatusParts(qsc) + }) + servenv.OnClose(qsc.StopService) + qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval) + return qsc, nil +} + +func init() { + servenv.RegisterDefaultFlags() + servenv.RegisterFlags() + servenv.RegisterGRPCServerFlags() + servenv.RegisterGRPCServerAuthFlags() + servenv.RegisterServiceMapFlag() + + dbconfigs.RegisterFlags(dbconfigs.All...) + mysqlctl.RegisterFlags() + + servenv.MoveFlagsToCobraCommand(Main) + + acl.RegisterFlags(Main.Flags()) + Main.Flags().BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist") + Main.Flags().StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file") + Main.Flags().DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload") + Main.Flags().StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias") + Main.Flags().StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet") +} diff --git a/go/cmd/vtctld/plugin_azblobbackupstorage.go b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_azblobbackupstorage.go rename to go/cmd/vttablet/cli/plugin_azblobbackupstorage.go index a4ca64096a9..bdadc894aae 100644 --- a/go/cmd/vtctld/plugin_azblobbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage" diff --git a/go/cmd/vtctld/plugin_cephbackupstorage.go b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_cephbackupstorage.go rename to go/cmd/vttablet/cli/plugin_cephbackupstorage.go index 6cd2d5619d0..171198f5e29 100644 --- a/go/cmd/vtctld/plugin_cephbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage" diff --git a/go/cmd/vtorc/plugin_consultopo.go b/go/cmd/vttablet/cli/plugin_consultopo.go similarity index 98% rename from go/cmd/vtorc/plugin_consultopo.go rename to go/cmd/vttablet/cli/plugin_consultopo.go index 59d6774fdbc..a128f294a42 100644 --- a/go/cmd/vtorc/plugin_consultopo.go +++ b/go/cmd/vttablet/cli/plugin_consultopo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports consultopo to register the consul implementation of TopoServer. diff --git a/go/cmd/vttablet/plugin_etcd2topo.go b/go/cmd/vttablet/cli/plugin_etcd2topo.go similarity index 98% rename from go/cmd/vttablet/plugin_etcd2topo.go rename to go/cmd/vttablet/cli/plugin_etcd2topo.go index d99ef51d4af..5a51923cf00 100644 --- a/go/cmd/vttablet/plugin_etcd2topo.go +++ b/go/cmd/vttablet/cli/plugin_etcd2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vttablet/cli/plugin_filebackupstorage.go similarity index 97% rename from go/cmd/vttablet/plugin_filebackupstorage.go rename to go/cmd/vttablet/cli/plugin_filebackupstorage.go index cf2ceb5150f..9edc82d6a1b 100644 --- a/go/cmd/vttablet/plugin_filebackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_filebackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" diff --git a/go/cmd/vttablet/plugin_filecustomrule.go b/go/cmd/vttablet/cli/plugin_filecustomrule.go similarity index 98% rename from go/cmd/vttablet/plugin_filecustomrule.go rename to go/cmd/vttablet/cli/plugin_filecustomrule.go index 854c484d3c1..1bf3c4297d5 100644 --- a/go/cmd/vttablet/plugin_filecustomrule.go +++ b/go/cmd/vttablet/cli/plugin_filecustomrule.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the file custom rule source diff --git a/go/cmd/vttablet/plugin_filelogger.go b/go/cmd/vttablet/cli/plugin_filelogger.go similarity index 98% rename from go/cmd/vttablet/plugin_filelogger.go rename to go/cmd/vttablet/cli/plugin_filelogger.go index bc5d968d2f7..fd5104f69a8 100644 --- a/go/cmd/vttablet/plugin_filelogger.go +++ b/go/cmd/vttablet/cli/plugin_filelogger.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the file-based query logger diff --git a/go/cmd/vtctld/plugin_gcsbackupstorage.go b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_gcsbackupstorage.go rename to go/cmd/vttablet/cli/plugin_gcsbackupstorage.go index 82a22cef1da..655583c8ca2 100644 --- a/go/cmd/vtctld/plugin_gcsbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage" diff --git a/go/cmd/vttablet/plugin_grpcbinlogplayer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcbinlogplayer.go rename to go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go index f8b2380c7c7..31920b97fae 100644 --- a/go/cmd/vttablet/plugin_grpcbinlogplayer.go +++ b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC binlog player diff --git a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcbinlogstreamer.go rename to go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go index 26683ea7ccf..716dd499785 100644 --- a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go +++ b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC binlog streamer diff --git a/go/cmd/vttablet/plugin_grpcqueryservice.go b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcqueryservice.go rename to go/cmd/vttablet/cli/plugin_grpcqueryservice.go index 073c2009151..a46701d16aa 100644 --- a/go/cmd/vttablet/plugin_grpcqueryservice.go +++ b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC queryservice server diff --git a/go/cmd/vttablet/plugin_grpctabletconn.go b/go/cmd/vttablet/cli/plugin_grpctabletconn.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctabletconn.go rename to go/cmd/vttablet/cli/plugin_grpctabletconn.go index 08291a7c916..4a97e36eec4 100644 --- a/go/cmd/vttablet/plugin_grpctabletconn.go +++ b/go/cmd/vttablet/cli/plugin_grpctabletconn.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletconn client diff --git a/go/cmd/vttablet/plugin_grpcthrottlerserver.go b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go similarity index 98% rename from go/cmd/vttablet/plugin_grpcthrottlerserver.go rename to go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go index 40cce4bd51c..f25fdb73df3 100644 --- a/go/cmd/vttablet/plugin_grpcthrottlerserver.go +++ b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC throttler server. diff --git a/go/cmd/vtctld/plugin_grpctmclient.go b/go/cmd/vttablet/cli/plugin_grpctmclient.go similarity index 98% rename from go/cmd/vtctld/plugin_grpctmclient.go rename to go/cmd/vttablet/cli/plugin_grpctmclient.go index ce554da96df..8cd349c7f87 100644 --- a/go/cmd/vtctld/plugin_grpctmclient.go +++ b/go/cmd/vttablet/cli/plugin_grpctmclient.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager client diff --git a/go/cmd/vttablet/plugin_grpctmserver.go b/go/cmd/vttablet/cli/plugin_grpctmserver.go similarity index 98% rename from go/cmd/vttablet/plugin_grpctmserver.go rename to go/cmd/vttablet/cli/plugin_grpctmserver.go index 094d273fe39..6dee0146c21 100644 --- a/go/cmd/vttablet/plugin_grpctmserver.go +++ b/go/cmd/vttablet/cli/plugin_grpctmserver.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the gRPC tabletmanager server diff --git a/go/cmd/vttablet/plugin_opentracing.go b/go/cmd/vttablet/cli/plugin_opentracing.go similarity index 98% rename from go/cmd/vttablet/plugin_opentracing.go rename to go/cmd/vttablet/cli/plugin_opentracing.go index 942bb25c895..f836daf4036 100644 --- a/go/cmd/vttablet/plugin_opentracing.go +++ b/go/cmd/vttablet/cli/plugin_opentracing.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/trace" diff --git a/go/cmd/vttablet/plugin_opentsdb.go b/go/cmd/vttablet/cli/plugin_opentsdb.go similarity index 98% rename from go/cmd/vttablet/plugin_opentsdb.go rename to go/cmd/vttablet/cli/plugin_opentsdb.go index 494dbbee20d..328628c2a3d 100644 --- a/go/cmd/vttablet/plugin_opentsdb.go +++ b/go/cmd/vttablet/cli/plugin_opentsdb.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports opentsdb to register the opentsdb stats backend. diff --git a/go/cmd/vttablet/plugin_prometheusbackend.go b/go/cmd/vttablet/cli/plugin_prometheusbackend.go similarity index 98% rename from go/cmd/vttablet/plugin_prometheusbackend.go rename to go/cmd/vttablet/cli/plugin_prometheusbackend.go index 4066b5ba6ec..a169c6d9777 100644 --- a/go/cmd/vttablet/plugin_prometheusbackend.go +++ b/go/cmd/vttablet/cli/plugin_prometheusbackend.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // This plugin imports Prometheus to allow for instrumentation // with the Prometheus client library diff --git a/go/cmd/vtctld/plugin_s3backupstorage.go b/go/cmd/vttablet/cli/plugin_s3backupstorage.go similarity index 97% rename from go/cmd/vtctld/plugin_s3backupstorage.go rename to go/cmd/vttablet/cli/plugin_s3backupstorage.go index a5b5c671ebb..4b3ecb33edb 100644 --- a/go/cmd/vtctld/plugin_s3backupstorage.go +++ b/go/cmd/vttablet/cli/plugin_s3backupstorage.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( _ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage" diff --git a/tools/coverage-go/mysqlctl_test.go b/go/cmd/vttablet/cli/plugin_statsd.go similarity index 80% rename from tools/coverage-go/mysqlctl_test.go rename to go/cmd/vttablet/cli/plugin_statsd.go index f3d6876a2d4..189e0367eb0 100644 --- a/tools/coverage-go/mysqlctl_test.go +++ b/go/cmd/vttablet/cli/plugin_statsd.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,10 +13,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli -import "testing" +import "vitess.io/vitess/go/stats/statsd" -func TestMysqlCtl(t *testing.T) { - main() +func init() { + statsd.Init("vttablet") } diff --git a/go/cmd/vttablet/plugin_sysloglogger.go b/go/cmd/vttablet/cli/plugin_sysloglogger.go similarity index 98% rename from go/cmd/vttablet/plugin_sysloglogger.go rename to go/cmd/vttablet/cli/plugin_sysloglogger.go index 4c57ad006c3..a7260d6f8cc 100644 --- a/go/cmd/vttablet/plugin_sysloglogger.go +++ b/go/cmd/vttablet/cli/plugin_sysloglogger.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the syslog-based query logger diff --git a/go/cmd/vttablet/plugin_topocustomrule.go b/go/cmd/vttablet/cli/plugin_topocustomrule.go similarity index 98% rename from go/cmd/vttablet/plugin_topocustomrule.go rename to go/cmd/vttablet/cli/plugin_topocustomrule.go index cef81458155..9fce319558e 100644 --- a/go/cmd/vttablet/plugin_topocustomrule.go +++ b/go/cmd/vttablet/cli/plugin_topocustomrule.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the topo custom rule source diff --git a/go/cmd/vttablet/plugin_zk2topo.go b/go/cmd/vttablet/cli/plugin_zk2topo.go similarity index 98% rename from go/cmd/vttablet/plugin_zk2topo.go rename to go/cmd/vttablet/cli/plugin_zk2topo.go index ebf385ec1af..d71a7e2e196 100644 --- a/go/cmd/vttablet/plugin_zk2topo.go +++ b/go/cmd/vttablet/cli/plugin_zk2topo.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli // Imports and register the zk2 TopologyServer diff --git a/go/cmd/vttablet/status.go b/go/cmd/vttablet/cli/status.go similarity index 82% rename from go/cmd/vttablet/status.go rename to go/cmd/vttablet/cli/status.go index fa14192bc1e..ec2460b0d19 100644 --- a/go/cmd/vttablet/status.go +++ b/go/cmd/vttablet/cli/status.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package cli import ( "vitess.io/vitess/go/vt/servenv" @@ -52,10 +52,10 @@ var ( " +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ "No Tag Here ..." +"(\{\\f\d*)\\([^;]+;)" G "<0>{\\f0\\Some Font names here;" +"(\{\\f\d*)\\([^;]+;)" G "<0>{\\f1\\fswiss\\fcharset0\\fprq2{\\*\\panose 020b0604020202020204}Arial;" +"(\{\\f\d*)\\([^;]+;)" G "{\\f" +"(\{\\f\d*)\\([^;]+;)" "{f0fs20 some text}" +#"" G '<0>space' # TODO: Can't quote this pattern with the test syntax! +#"" "this is not a tag" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>12/30/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>01/12/1998 13:30" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" G "<0>01/28/2002 22:35:00" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "13/30/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "01/12/1998 24:30" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0-1]\d|[2][0-3])(\:[0-5]\d){1,2})?$" "01/28/2002 22:35:64" +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>BEGIN:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>TEL;WORK;VOICE:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" G "<0>TEL:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" "begin:" #named capture +#"((?(^[A-Z0-9-;=]*:))(?(.*)))" "TEL;PREF;" #named capture +'^]*)>(.*?(?=<\/a>))<\/a>$' G '<0>my external link' +'^]*)>(.*?(?=<\/a>))<\/a>$' G ']*)>(.*?(?=<\/a>))<\/a>$' 'my internal link' +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002 08:00" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" G "<0>12/31/2002 08:00 AM" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" "12/31/02" +"^([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\/([2][01]|[1][6-9])\d{2}(\s([0]\d|[1][0-2])(\:[0-5]\d){1,2})*\s*([aApP][mM]{0,2})?$" "12/31/2002 14:00" +"
(?:\s*([^<]+)
\s*)+
" G "<0>
string1
string2
string3
" +"
(?:\s*([^<]+)
\s*)+
" ".." +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>1/2/03" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>2/30/1999" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" G "<0>03/04/19" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" "3/4/2020" +"^((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((19)([2-9])(\d{1})|(20)([01])(\d{1})|([8901])(\d{1})))$" "3/4/1919" +']*))*|/?>' G '<0>' +']*))*|/?>' G "<0>" +']*))*|/?>' G "<0>
" +']*))*|/?>' "this is a test..." +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0>12:00am" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0>1:00 PM" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" G "<0> 12:59 pm" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "0:00" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "0:01 am" +"^ *(1[0-2]|[1-9]):[0-5][0-9] *(a|p|A|P)(m|M) *$" "13:00 pm" +"\({1}[0-9]{3}\){1}\-{1}[0-9]{3}\-{1}[0-9]{4}" G "<0>(111)-111-1111" +"\({1}[0-9]{3}\){1}\-{1}[0-9]{3}\-{1}[0-9]{4}" "11111111111" +"[^abc]" G "<0>def" +"[^abc]" "abc" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>01/01/2002 04:42" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>5-12-02 04:42 AM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" G "<0>01.01/02 04-42aM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-1999 4:50PM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-2002 15:10PM" +"^(([0]?[1-9]|[1][0-2])[\/|\-|\.]([0-2]\d|[3][0-1]|[1-9])[\/|\-|\.]([2][0])?\d{2}\s+((([0][0-9]|[1][0-2]|[0-9])[\:|\-|\.]([0-5]\d)\s*([aApP][mM])?)|(([0-1][0-9]|[2][0-3]|[0-9])[\:|\-|\.]([0-5]\d))))$" "01-12-002 8:20PM" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>11-02-02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>1-25-2002" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" G "<0>01/25/2002" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "13-02-02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "11.02.02" +"^([1][12]|[0]?[1-9])[\/-]([3][01]|[12]\d|[0]?[1-9])[\/-](\d{4}|\d{2})$" "11/32/2002" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>09:30:00" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>17:45:20" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>23:59:59" +"(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9])" "24:00:00" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>29/02/2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>31/01/2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" G "<0>30-01-2000" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "29/02/2002" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "32/01/2002" +"(((0[1-9]|[12][0-9]|3[01])([-./])(0[13578]|10|12)([-./])(\d{4}))|(([0][1-9]|[12][0-9]|30)([-./])(0[469]|11)([-./])(\d{4}))|((0[1-9]|1[0-9]|2[0-8])([-./])(02)([-./])(\d{4}))|((29)(\.|-|\/)(02)([-./])([02468][048]00))|((29)([-./])(02)([-./])([13579][26]00))|((29)([-./])(02)([-./])([0-9][0-9][0][48]))|((29)([-./])(02)([-./])([0-9][0-9][2468][048]))|((29)([-./])(02)([-./])([0-9][0-9][13579][26])))" "10/2/2002" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>01 46 70 89 12" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>01-46-70-89-12" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" G "<0>0146708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "01-46708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "01 46708912" +"^0[1-6]{1}(([0-9]{2}){4})|((\s[0-9]{2}){4})|((-[0-9]{2}){4})$" "+33235256677" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>good.gif" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>go d.GIf" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" G "<0>goo_d.jPg" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "junk" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "bad.bad.gif" +"^[0-9A-Za-z_ ]+(.[jJ][pP][gG]|.[gG][iI][fF])$" "slash\gif." +"<[^>\s]*\bauthor\b[^>]*>" G '<0>' +"<[^>\s]*\bauthor\b[^>]*>" G "<0>" +# "<[^>\s]*\bauthor\b[^>]*>" G '<0>' #Debug should work +"<[^> ]*\bauthor\b[^>]*>" G "<0>" +"<[^> ]*\bauthor\b[^>]*>" G '<0>' +"<[^>\s]*\bauthor\b[^>]*>" "" +"<[^>\s]*\bauthor\b[^>]*>" "" +"<[^>\s]*\bauthor\b[^>]*>" "author" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>04/2/29" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>2002-4-30" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" G "<0>02.10.31" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "2003/2/29" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "02.4.31" +"^(?:(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00)))(\/|-|\.)(?:0?2\1(?:29))$)|(?:(?:1[6-9]|[2-9]\d)?\d{2})(\/|-|\.)(?:(?:(?:0?[13578]|1[02])\2(?:31))|(?:(?:0?[1,3-9]|1[0-2])\2(29|30))|(?:(?:0?[1-9])|(?:1[0-2]))\2(?:0?[1-9]|1\d|2[0-8]))$" "00/00/00" +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>5\u0027-3/16"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>1\u0027-2"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' G '<0>5/16"' +'(\d*)\u0027*-*(\d*)/*(\d*)"' '1 3/16' +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>1" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>23" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" G "<0>50" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "0" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "111" +"^[1-9]{1}$|^[1-4]{1}[0-9]{1}$|^50$" "xyz" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>Jon Doe" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>J\u00f8rn" +"^([ \u00c0-\u01ffa-zA-Z'])+$" G "<0>Mc'Neelan" +"^([ \u00c0-\u01ffa-zA-Z'])+$" "Henry); hacking attempt" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>1:00 PM" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>6:45 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" G "<0>17:30" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "4:32 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "5:30:00 am" +"^((([0]?[1-9]|1[0-2])(:|\.)(00|15|30|45)?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)(00|15|30|45)?))$" "17:01" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>0.050" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>5.0000" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" G "<0>5000" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" "0" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" "0.0" +"(^\d*\.?\d*[1-9]+\d*$)|(^[1-9]+\d*\.\d*$)" ".0" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" G "<0>Sacramento" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "<0><2>San Francisco" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "<0><3>San Luis Obispo" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "SanFrancisco" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "SanLuisObispo" +"^([A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^([A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,}\040[A-Z]{1}[a-z]{1,})$|^$" "San francisco" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" G "<0>{e02ff0e4-00ad-090A-c030-0d00a0008ba0}" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" G "<0>e02ff0e4-00ad-090A-c030-0d00a0008ba0" +"^\{?[a-fA-F\d]{8}-([a-fA-F\d]{4}-){3}[a-fA-F\d]{12}\}?$" "0xe02ff0e400ad090Ac0300d00a0008ba0" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" G "<0>{e02ff0e4-00ad-090A-c030-0d00a0008ba0}" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" G "<0>e02ff0e4-00ad-090A-c030-0d00a0008ba0" +"^\{?[a-fA-F0-9]{8}-([a-fA-F0-9]{4}-){3}[a-fA-F0-9]{12}\}?$" "0xe02ff0e400ad090Ac0300d00a0008ba0" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>@12X*567" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>1#Zv96g@*Yfasd4" +"^([a-zA-Z0-9@*#]{8,15})$" G "<0>#67jhgt@erd" +"^([a-zA-Z0-9@*#]{8,15})$" "$12X*567" +"^([a-zA-Z0-9@*#]{8,15})$" "1#Zv_96" +"^([a-zA-Z0-9@*#]{8,15})$" "+678jhgt@erd" +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' G '<0>href="produktsida.asp?kategori2=218"' +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' G '<0>href="NuclearTesting.htm"' +'(("|\u0027)[a-z0-9\/\.\?\=\&]*(\.htm|\.asp|\.php|\.jsp)[a-z0-9\/\.\?\=\&]*("|\u0027))|(href=*?[a-z0-9\/\.\?\=\&"\u0027]*)' 'U Suck' +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>05-01-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>29-02-2004" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" G "<0>31-12-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "1-1-02" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "29-02-2002" +"^(((((0[1-9])|(1\d)|(2[0-8]))-((0[1-9])|(1[0-2])))|((31-((0[13578])|(1[02])))|((29|30)-((0[1,3-9])|(1[0-2])))))-((20[0-9][0-9]))|(29-02-20(([02468][048])|([13579][26]))))$" "31-11-2002" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456.123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456,123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" G "<0>123456" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "123a.123" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "123a,123" +"^\d*[0-9](|.\d*[0-9]|,\d*[0-9])?$" "a" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>AC" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>RJ" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" G "<0>SP" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "XX" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "AB" +"^(ac|AC|al|AL|am|AM|ap|AP|ba|BA|ce|CE|df|DF|es|ES|go|GO|ma|MA|mg|MG|ms|MS|mt|MT|pa|PA|pb|PB|pe|PE|pi|PI|pr|PR|rj|RJ|rn|RN|ro|RO|rr|RR|rs|RS|sc|SC|se|SE|sp|SP|to|TO)$" "HJ" +"^[+]?\d*$" G "<0>0123456789" +"^[+]?\d*$" G "<0>1234" +"^[+]?\d*$" G "<0>1" +"^[+]?\d*$" "1.0?&" +"^[+]?\d*$" "a1" +"^[+]?\d*$" "2a-" +#/<[aA][ ]{0,}([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,}>((<(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})>([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})|(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})){1,}/ G "<0>this text is italicized" #TODO: Need infinite loop breaking +#/<[aA][ ]{0,}([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,}>((<(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})>([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})|(([a-zA-Z0-9"'_,.:;!?@$\&()%=\u002f ]|[\-]|[ \f]){0,})){1,}/ "

" #TODO: need infinite loop breaking. +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>0:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>23:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" G "<0>00:59" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "0:0" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "24:00" +"^([0-1]?[0-9]|[2][0-3]):([0-5][0-9])$" "00:60" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" G "<0>11/03" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" G "<0>01/04" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" "13/03" +"^((0[1-9])|(1[0-2]))\/(\d{2})$" "10/2003" +"]*>[\w|\t|\r|\W]*" G '<0>' +"]*>[\w|\t|\r|\W]*" "--" +"]*>[\w|\t|\r|\W]*" "A-Z][a-z]+" +#"]*>[\w|\t|\r|\W]*" G "<0>strFirstName" # Test Case damaged? +#"]*>[\w|\t|\r|\W]*" G "<0>intAgeInYears" # Test Case damaged? +#"]*>[\w|\t|\r|\W]*" G "<0>Where the Wild Things Are" # Test Case damaged? +"]*>[\w|\t|\r|\W]*" "123" +"]*>[\w|\t|\r|\W]*" "abc" +"]*>[\w|\t|\r|\W]*" "this has no caps in it" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-0.050" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-5.000" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" G "<0>-5" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" "0" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" "0.0" +"(^-\d*\.?\d*[1-9]+\d*$)|(^-[1-9]+\d*\.\d*$)" ".0" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>2002/02/03" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>2002/02/03 12:12:18" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "2002/02/36" +"^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1]))$|^([2][0]\d{2}\/([0]\d|[1][0-2])\/([0-2]\d|[3][0-1])\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "02/03/2002" +"^(\d|,)*\.?\d*$" G "<0>1,000" +"^(\d|,)*\.?\d*$" G "<0>3,000.05" +"^(\d|,)*\.?\d*$" G "<0>5,000,000" +"^(\d|,)*\.?\d*$" "abc" +"^(\d|,)*\.?\d*$" "$100,000" +"^(\d|,)*\.?\d*$" "Forty" +"^\d$" G "<0>1" +"^\d$" G "<0>2" +"^\d$" G "<0>3" +"^\d$" "a" +"^\d$" "324" +"^\d$" "num" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" G "<0>1234567890" +"^[0-9]+$" "http://none" +"^[0-9]+$" "http://none" +"^[0-9]+$" "http://none" +"^.{4,8}$" G "<0>asdf" +"^.{4,8}$" G "<0>1234" +"^.{4,8}$" G "<0>asdf1234" +"^.{4,8}$" "asd" +"^.{4,8}$" "123" +"^.{4,8}$" "asdfe12345" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.com" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.com.au" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" G "<0>a@a.au" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "word" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "word@" +"^[\w\.=-]+@[\w\.-]+\.[\w]{2,3}$" "@word" +"^\d{5}-\d{4}$" G "<0>22222-3333" +"^\d{5}-\d{4}$" G "<0>34545-2367" +"^\d{5}-\d{4}$" G "<0>56334-2343" +"^\d{5}-\d{4}$" "123456789" +"^\d{5}-\d{4}$" "A3B 4C5" +"^\d{5}-\d{4}$" "55335" +"(a|b|c).(a.b)*.b+.c" G "<0>autbfc" +"(a|b|c).(a.b)*.b+.c" "attc" +'"((\\")|[^"(\\")])+"' G '<0>"test"' +'"((\\")|[^"(\\")])+"' G '<0>"escape\"quote"' +'"((\\")|[^"(\\")])+"' G '<0>"\\""' +'"((\\")|[^"(\\")])+"' "test" +'"((\\")|[^"(\\")])+"' '"test' +'"((\\")|[^"(\\")])+"' '""test\\"' +"((0[1-9])|(1[02]))/\d{2}" G "<0>01/00" +"((0[1-9])|(1[02]))/\d{2}" G "<0>12/99" +"((0[1-9])|(1[02]))/\d{2}" "13/00" +"((0[1-9])|(1[02]))/\d{2}" "12/AS" +"^[a-zA-Z]$" G "<0>a" +"^[a-zA-Z]$" G "<0>B" +"^[a-zA-Z]$" G "<0>c" +"^[a-zA-Z]$" "0" +"^[a-zA-Z]$" "&" +"^[a-zA-Z]$" "AbC" +"^[a-zA-Z]+$" G "<0>abc" +"^[a-zA-Z]+$" G "<0>ABC" +"^[a-zA-Z]+$" G "<0>aBcDeF" +"^[a-zA-Z]+$" "abc123" +"^[a-zA-Z]+$" "mr." +"^[a-zA-Z]+$" "a word" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>Smith, Ed" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>Ed Smith" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" G "<0>aBcDeFgH" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "a123" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "AB5" +"^\s*[a-zA-Z,\p{Zs}]+\s*$" "Mr. Ed" +"(\w+?@\w+?\u002E.+)" G "<0>bob@vsnl.com" +"(\w+?@\w+?\u002E.+)" "[AABB]" +"^\d+$" G "<0>123" +"^\d+$" G "<0>10" +"^\d+$" G "<0>54" +"^\d+$" "-54" +"^\d+$" "54.234" +"^\d+$" "abc" +"^(\+|-)?\d+$" G "<0>-34" +"^(\+|-)?\d+$" G "<0>34" +"^(\+|-)?\d+$" G "<0>+5" +"^(\+|-)?\d+$" "abc" +"^(\+|-)?\d+$" "3.1415" +"^(\+|-)?\d+$" "-5.3" +"foo" G "<0>foo" +"foo" "bar" +"^[1-5]$" G "<0>1" +"^[1-5]$" G "<0>3" +"^[1-5]$" G "<0>4" +"^[1-5]$" "6" +"^[1-5]$" "23" +"^[1-5]$" "a" +"^[12345]$" G "<0>1" +"^[12345]$" G "<0>2" +"^[12345]$" G "<0>4" +"^[12345]$" "6" +"^[12345]$" "-1" +"^[12345]$" "abc" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@aol.com" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@wrox.co.uk" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" G "<0>joe@domain.info" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "a@b" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "notanemail" +"^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$" "joe@@." +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>joe@aol.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>ssmith@aspalliance.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" G "<0>a@b.cc" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@123aspx.com" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@web.info" +"^\w+@[a-zA-Z_]+?\.[a-zA-Z]{2,3}$" "joe@company.co.uk" +"[\w-]+@([\w-]+\.)+[\w-]+" G "<0>joe@aol.com" +"[\w-]+@([\w-]+\.)+[\w-]+" G "<0>a@b.c" +"[\w-]+@([\w-]+\.)+[\w-]+" "asdf" +"[\w-]+@([\w-]+\.)+[\w-]+" "1234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" G "<0>1234-1234-1234-1234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" G "<0>1234123412341234" +"\d{4}-?\d{4}-?\d{4}-?\d{4}" "1234123412345" +"^\d{5}$" G "<0>33333" +"^\d{5}$" G "<0>55555" +"^\d{5}$" G "<0>23445" +"^\d{5}$" "abcd" +"^\d{5}$" "1324" +"^\d{5}$" "as;lkjdf" +"(\w+)\s+\1" G "<0>hubba hubba" +"(\w+)\s+\1" G "<0>mandate dated" +"(\w+)\s+\1" G "<0>an annual" +"(\w+)\s+\1" "may day" +"(\w+)\s+\1" "gogo" +"(\w+)\s+\1" "1212" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>3SquareBand.com" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>asp.net" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>army.mil" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "$SquareBand.com" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "asp/dot.net" +"^[a-zA-Z0-9\-\.]+\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "army.military" + diff --git a/go/mysql/icuregex/testdata/regextst_extended.txt b/go/mysql/icuregex/testdata/regextst_extended.txt new file mode 100644 index 00000000000..c6b567931e3 --- /dev/null +++ b/go/mysql/icuregex/testdata/regextst_extended.txt @@ -0,0 +1,128 @@ +# Copyright (C) 2016 and later: Unicode, Inc. and others. +# License & terms of use: http://www.unicode.org/copyright.html +# Copyright (c) 2001-2015 International Business Machines +# Corporation and others. All Rights Reserved. +# +# file: +# +# ICU regular expression test cases. +# +# format: one test case per line, +# = [# comment] +# = "" +# = "" +# the quotes on the pattern and match string can be " or ' or / +# = text, with the start and end of each +# capture group tagged with .... The overall match, +# if any, is group 0, as in <0>matched text +# A region can be specified with ... tags. +# Standard ICU unescape will be applied, allowing \u, \U, etc. to appear. +# +# = any combination of +# i case insensitive match +# x free spacing and comments +# s dot-matches-all mode +# m multi-line mode. +# ($ and ^ match at embedded new-lines) +# D Unix Lines mode (only recognize 0x0a as new-line) +# Q UREGEX_LITERAL flag. Entire pattern is literal string. +# v If icu configured without break iteration, this +# regex test pattern should not compile. +# e set the UREGEX_ERROR_ON_UNKNOWN_ESCAPES flag +# d dump the compiled pattern +# t trace operation of match engine. +# 2-9 a digit between 2 and 9, specifies the number of +# times to execute find(). The expected results are +# for the last find() in the sequence. +# G Only check match / no match. Do not check capture groups. +# E Pattern compilation error expected +# L Use LookingAt() rather than find() +# M Use matches() rather than find(). +# +# a Use non-Anchoring Bounds. +# b Use Transparent Bounds. +# The a and b options only make a difference if +# a region has been specified in the string. +# z|Z hitEnd was expected(z) or not expected (Z). +# With neither, hitEnd is not checked. +# y|Y Require End expected(y) or not expected (Y). +# +# White space must be present between the flags and the match string. +# + +"[:xdigit:]" " <0>4f" +"\P{XDIGIT}+" "4f<0> " + +"[:blank:]" "<0> 4f" +"\P{BLANK}+" "<0>4f " + +"[:print:]" "<0> 4f\x07" +"\P{PRINT}+" " 4f<0>\x07" + +"\p{Age=1.1}" "<0>4f🥱" +"\p{Age=11}" "4f🥱" +"\p{Age=12}" "4f<0>🥱" + +"\p{Name=LATIN SMALL LETTER B}" "Good<0>bye" + +"\p{Numeric_Value=3}" "Good<0>3ye" +"\p{Numeric_Value=14}" "Good<0>⑭ye" + +"\p{Script_Extensions=Greek}" "Good<0>βye" + +"\p{Bidi_Control}" "Good<0>\u200Eye" +"\p{Bidi_Class=LeftToRight}" "<0>Goodbye" +"\p{Bidi_Class=RightToLeft}" "Goodbye" +"\p{Bidi_Class=LeftToRight}" "؈" +"\p{Bidi_Paired_Bracket_Type=Open}" "Good<0>(ye" + +"\p{Soft_Dotted}" "Good<0>iye" + +"\p{Changes_When_Lowercased}" "<0>Goodbye" +"\p{Changes_When_Titlecased}" "<0>goodbye" +"\p{Changes_When_Uppercased}" "G<0>oodbye" +"\p{Changes_When_CaseMapped}" " <0>Goodbye3" +"\p{Cased}" " <0>Goodbye3" +"\p{CaseIgnorable}" "foo<0>.bar" + +"\p{Indic_Syllabic_Category=Avagraha}" "foo<0>\u09BDbar" +"\p{IndicPositionalCategory=Top_And_Left_And_Right}" "foo<0>\u0B4Cbar" +"\p{VerticalOrientation=U}" "foo<0>\uA015bar" + +"\p{Canonical_Combining_Class=Nukta}" "foo<0>\u093Cbar" +"\p{Lead_Canonical_Combining_Class=Above}" "foo<0>\u0300bar" +"\p{Trail_Canonical_Combining_Class=Above}" "foo<0>\u0300bar" + +"\p{Changes_When_Casefolded}" "<0>\uFB03Goodbye" +"\p{Changes_When_Casefolded}" 2 "\uFB03<0>Goodbye" + +"\p{NFC_Inert}" "foo<0>\uFB03bar" +"\p{NFKC_Inert}" "foo<0>\uFB03bar" +"\P{NFD_Inert}" "foo<0>Àbar" +"\P{NFKD_Inert}" "foo<0>Àbar" + +"\p{NFC_Quick_Check=No}" "foo<0>\u0340bar" +"\p{NFKC_Quick_Check=No}" "foo<0>\u0340bar" +"\p{NFD_Quick_Check=No}" "foo<0>\u00C0bar" +"\p{NFKD_Quick_Check=No}" "foo<0>\u00C0bar" + +"\p{Full_Composition_Exclusion}" "foo<0>\u0374bar" + +"\p{Numeric_Type=Decimal}" "foo<0>3bar" +"\p{Joining_Type=Dual_Joining}" "foo<0>\u0626bar" +"\p{Joining_Group=African_Feh}" "foo<0>\u08BBbar" +"\p{General_Category=Close_Punctuation}" "foo[bar" +"\p{General_Category=Close_Punctuation}" "foo<0>]]bar" +"\p{General_Category=Close_Punctuation}" 2 "foo]<0>]bar" + +"\p{Hangul_Syllable_Type=Not_Applicable}" "<0>f" +"\p{Hangul_Syllable_Type=Leading_Jamo}" "foo<0>\u1100bar" + +"\p{Regional_Indicator=Yes}" "foo<0>\U0001F1E6bar" + +# Currently unsupported property classes below. They require +# significant additional code to support. +"\p{Changes_When_NFKC_Casefolded}" E "foo<0>\uFB03bar" +"\p{Segment_Starter}" E "<0>\uFB03Goodbye" + +"\p{Emoji}" "foo<0>😀bar" \ No newline at end of file diff --git a/go/vt/vtgate/evalengine/internal/json/LICENSE b/go/mysql/json/LICENSE similarity index 100% rename from go/vt/vtgate/evalengine/internal/json/LICENSE rename to go/mysql/json/LICENSE diff --git a/go/vt/vtgate/evalengine/internal/json/cached_size.go b/go/mysql/json/cached_size.go similarity index 84% rename from go/vt/vtgate/evalengine/internal/json/cached_size.go rename to go/mysql/json/cached_size.go index 09bc71769ec..27fd511dafc 100644 --- a/go/vt/vtgate/evalengine/internal/json/cached_size.go +++ b/go/mysql/json/cached_size.go @@ -27,7 +27,7 @@ func (cached *Object) CachedSize(alloc bool) int64 { if alloc { size += int64(24) } - // field kvs []vitess.io/vitess/go/vt/vtgate/evalengine/internal/json.kv + // field kvs []vitess.io/vitess/go/mysql/json.kv { size += hack.RuntimeAllocSize(int64(cap(cached.kvs)) * int64(24)) for _, elem := range cached.kvs { @@ -44,9 +44,9 @@ func (cached *Value) CachedSize(alloc bool) int64 { if alloc { size += int64(80) } - // field o vitess.io/vitess/go/vt/vtgate/evalengine/internal/json.Object + // field o vitess.io/vitess/go/mysql/json.Object size += cached.o.CachedSize(false) - // field a []*vitess.io/vitess/go/vt/vtgate/evalengine/internal/json.Value + // field a []*vitess.io/vitess/go/mysql/json.Value { size += hack.RuntimeAllocSize(int64(cap(cached.a)) * int64(8)) for _, elem := range cached.a { @@ -67,7 +67,7 @@ func (cached *kv) CachedSize(alloc bool) int64 { } // field k string size += hack.RuntimeAllocSize(int64(len(cached.k))) - // field v *vitess.io/vitess/go/vt/vtgate/evalengine/internal/json.Value + // field v *vitess.io/vitess/go/mysql/json.Value size += cached.v.CachedSize(true) return size } diff --git a/go/mysql/json/helpers.go b/go/mysql/json/helpers.go new file mode 100644 index 00000000000..1df38b2d769 --- /dev/null +++ b/go/mysql/json/helpers.go @@ -0,0 +1,138 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" +) + +const hashPrefixJSON = 0xCCBB + +func (v *Value) Hash(h *vthash.Hasher) { + h.Write16(hashPrefixJSON) + _, _ = h.Write(v.WeightString(nil)) +} + +func (v *Value) ToRawBytes() []byte { + return v.MarshalTo(nil) +} + +func (v *Value) ToUnencodedBytes() []byte { + return []byte(v.s) +} + +func (v *Value) SQLType() sqltypes.Type { + return sqltypes.TypeJSON +} + +func NewArray(vals []*Value) *Value { + return &Value{a: vals, t: TypeArray} +} + +func NewObject(obj Object) *Value { + obj.sort() + return &Value{o: obj, t: TypeObject} +} + +func NewNumber(num string, n NumberType) *Value { + return &Value{s: num, t: TypeNumber, n: n} +} + +func NewString(raw string) *Value { + return &Value{s: raw, t: TypeString} +} + +func NewBlob(raw string) *Value { + return &Value{s: raw, t: TypeBlob} +} + +func NewBit(raw string) *Value { + return &Value{s: raw, t: TypeBit} +} + +func NewDate(raw string) *Value { + return &Value{s: raw, t: TypeDate} +} + +func NewDateTime(raw string) *Value { + return &Value{s: raw, t: TypeDateTime} +} + +func NewTime(raw string) *Value { + return &Value{s: raw, t: TypeTime} +} + +func NewOpaqueValue(raw string) *Value { + return &Value{s: raw, t: TypeOpaque} +} + +func NewFromSQL(v sqltypes.Value) (*Value, error) { + switch { + case v.Type() == sqltypes.TypeJSON: + var p Parser + return p.ParseBytes(v.Raw()) + case v.IsSigned(): + return NewNumber(v.RawStr(), NumberTypeSigned), nil + case v.IsUnsigned(): + return NewNumber(v.RawStr(), NumberTypeUnsigned), nil + case v.IsDecimal(): + return NewNumber(v.RawStr(), NumberTypeDecimal), nil + case v.IsFloat(): + return NewNumber(v.RawStr(), NumberTypeFloat), nil + case v.IsText(): + return NewString(v.RawStr()), nil + case v.IsBinary(): + return NewBlob(v.RawStr()), nil + case v.IsDateTime(), v.IsTimestamp(): + return NewDateTime(v.RawStr()), nil + case v.IsDate(): + return NewDate(v.RawStr()), nil + case v.IsTime(): + return NewTime(v.RawStr()), nil + default: + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot coerce %v as a JSON type", v) + } +} + +func (v *Value) Depth() int { + var depth int + switch v.t { + case TypeObject: + for _, kv := range v.o.kvs { + depth = max(kv.v.Depth(), depth) + } + case TypeArray: + for _, a := range v.a { + depth = max(a.Depth(), depth) + } + } + return depth + 1 +} + +func (v *Value) Len() int { + switch v.t { + case TypeArray: + return len(v.a) + case TypeObject: + return v.o.Len() + default: + return 1 + } +} diff --git a/go/vt/vtgate/evalengine/internal/json/json_path.go b/go/mysql/json/json_path.go similarity index 99% rename from go/vt/vtgate/evalengine/internal/json/json_path.go rename to go/mysql/json/json_path.go index 8401fafffa5..8ff0ac4a8c9 100644 --- a/go/vt/vtgate/evalengine/internal/json/json_path.go +++ b/go/mysql/json/json_path.go @@ -152,7 +152,7 @@ func (m *matcher) any(p *Path, v *Value) { m.value(p, v) if obj, ok := v.Object(); ok { - obj.Visit(func(_ []byte, v *Value) { + obj.Visit(func(_ string, v *Value) { m.any(p, v) }) } @@ -182,7 +182,7 @@ func (m *matcher) value(p *Path, v *Value) { } case jpMemberAny: if obj, ok := v.Object(); ok { - obj.Visit(func(_ []byte, v *Value) { + obj.Visit(func(_ string, v *Value) { m.value(p.next, v) }) } diff --git a/go/vt/vtgate/evalengine/internal/json/json_path_test.go b/go/mysql/json/json_path_test.go similarity index 99% rename from go/vt/vtgate/evalengine/internal/json/json_path_test.go rename to go/mysql/json/json_path_test.go index 7dc7e7f58ba..63313b55ac3 100644 --- a/go/vt/vtgate/evalengine/internal/json/json_path_test.go +++ b/go/mysql/json/json_path_test.go @@ -17,9 +17,8 @@ limitations under the License. package json import ( + "slices" "testing" - - "golang.org/x/exp/slices" ) func TestParseJSONPath(t *testing.T) { diff --git a/go/mysql/json/marshal.go b/go/mysql/json/marshal.go new file mode 100644 index 00000000000..e1ea916151d --- /dev/null +++ b/go/mysql/json/marshal.go @@ -0,0 +1,181 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "fmt" + "math/big" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/hex" + querypb "vitess.io/vitess/go/vt/proto/query" + + "vitess.io/vitess/go/sqltypes" +) + +// MarshalSQLTo appends marshaled v to dst and returns the result in +// the form like `JSON_OBJECT` or `JSON_ARRAY` to ensure we don't +// lose any type information. +func (v *Value) MarshalSQLTo(dst []byte) []byte { + return v.marshalSQLInternal(true, dst) +} + +func (v *Value) marshalSQLInternal(top bool, dst []byte) []byte { + switch v.Type() { + case TypeObject: + dst = append(dst, "JSON_OBJECT("...) + for i, vv := range v.o.kvs { + if i != 0 { + dst = append(dst, ", "...) + } + dst = append(dst, "_utf8mb4'"...) + dst = append(dst, vv.k...) + dst = append(dst, "', "...) + dst = vv.v.marshalSQLInternal(false, dst) + } + dst = append(dst, ')') + return dst + case TypeArray: + dst = append(dst, "JSON_ARRAY("...) + for i, vv := range v.a { + if i != 0 { + dst = append(dst, ", "...) + } + dst = vv.marshalSQLInternal(false, dst) + } + dst = append(dst, ')') + return dst + case TypeString: + if top { + dst = append(dst, "CAST(JSON_QUOTE("...) + } + dst = append(dst, "_utf8mb4"...) + dst = append(dst, sqltypes.EncodeStringSQL(v.s)...) + if top { + dst = append(dst, ") as JSON)"...) + } + return dst + case TypeDate: + if top { + dst = append(dst, "CAST("...) + } + dst = append(dst, "date '"...) + dst = append(dst, v.MarshalDate()...) + dst = append(dst, "'"...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeDateTime: + if top { + dst = append(dst, "CAST("...) + } + dst = append(dst, "timestamp '"...) + dst = append(dst, v.MarshalDateTime()...) + dst = append(dst, "'"...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeTime: + if top { + dst = append(dst, "CAST("...) + } + dst = append(dst, "time '"...) + dst = append(dst, v.MarshalTime()...) + dst = append(dst, "'"...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeBlob: + if top { + dst = append(dst, "CAST("...) + } + dst = append(dst, "x'"...) + dst = append(dst, hex.EncodeBytes(hack.StringBytes(v.s))...) + dst = append(dst, "'"...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeBit: + if top { + dst = append(dst, "CAST("...) + } + var i big.Int + i.SetBytes([]byte(v.s)) + dst = append(dst, "b'"...) + dst = append(dst, i.Text(2)...) + dst = append(dst, "'"...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeNumber: + if top { + dst = append(dst, "CAST("...) + } + dst = append(dst, v.s...) + if top { + dst = append(dst, " as JSON)"...) + } + return dst + case TypeBoolean: + if top { + dst = append(dst, "CAST(_utf8mb4'"...) + } + if v == ValueTrue { + dst = append(dst, "true"...) + } else { + dst = append(dst, "false"...) + } + if top { + dst = append(dst, "' as JSON)"...) + } + return dst + case TypeNull: + if top { + dst = append(dst, "CAST(_utf8mb4'"...) + } + dst = append(dst, "null"...) + if top { + dst = append(dst, "' as JSON)"...) + } + return dst + default: + panic(fmt.Errorf("BUG: unexpected Value type: %d", v.t)) + } +} + +// MarshalSQLValue converts the byte representation of a json value +// and returns it formatted by MarshalSQLTo +func MarshalSQLValue(buf []byte) (*sqltypes.Value, error) { + var parser Parser + if len(buf) == 0 { + buf = sqltypes.NullBytes + } + jsonVal, err := parser.ParseBytes(buf) + if err != nil { + return nil, err + } + newVal := sqltypes.MakeTrusted(querypb.Type_JSON, jsonVal.MarshalSQLTo(nil)) + if err != nil { + return nil, err + } + return &newVal, nil +} diff --git a/go/vt/vtgate/evalengine/internal/json/parser.go b/go/mysql/json/parser.go similarity index 69% rename from go/vt/vtgate/evalengine/internal/json/parser.go rename to go/mysql/json/parser.go index 909bfa241fd..322c623058e 100644 --- a/go/vt/vtgate/evalengine/internal/json/parser.go +++ b/go/mysql/json/parser.go @@ -18,14 +18,21 @@ limitations under the License. package json import ( + "bytes" + "encoding/base64" "fmt" + "slices" "strconv" "strings" + "time" "unicode/utf16" - "golang.org/x/exp/slices" + "vitess.io/vitess/go/mysql/fastparse" "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/format" ) // Parser parses JSON. @@ -188,14 +195,16 @@ func parseValue(s string, c *cache, depth int) (*Value, string, error) { return ValueNull, s[len("null"):], nil } - ns, tail, err := parseRawNumber(s) - if err != nil { - return nil, tail, fmt.Errorf("cannot parse number: %s", err) + flen, ok := readFloat(s) + if !ok { + return nil, s[flen:], fmt.Errorf("invalid number in JSON string: %q", s) } + v := c.getValue() v.t = TypeNumber - v.s = ns - return v, tail, nil + v.s = s[:flen] + v.n = numberTypeRaw + return v, s[flen:], nil } func parseArray(s string, c *cache, depth int) (*Value, string, error) { @@ -456,29 +465,63 @@ func parseRawString(s string) (string, string, error) { } } -func parseRawNumber(s string) (string, string, error) { - // The caller must ensure len(s) > 0 +func readFloat(s string) (i int, ok bool) { + // optional sign + if i >= len(s) { + return + } + if s[i] == '+' || s[i] == '-' { + i++ + } - // Find the end of the number. - for i := 0; i < len(s); i++ { - ch := s[i] - if (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == 'e' || ch == 'E' || ch == '+' { + // digits + sawdot := false + sawdigits := false + nd := 0 +loop: + for ; i < len(s); i++ { + switch c := s[i]; true { + case c == '.': + if sawdot { + break loop + } + sawdot = true continue - } - if i == 0 || i == 1 && (s[0] == '-' || s[0] == '+') { - if len(s[i:]) >= 3 { - xs := s[i : i+3] - if strings.EqualFold(xs, "inf") || strings.EqualFold(xs, "nan") { - return s[:i+3], s[i+3:], nil - } + + case '0' <= c && c <= '9': + sawdigits = true + if c == '0' && nd == 0 { // ignore leading zeros + continue } - return "", s, fmt.Errorf("unexpected char: %q", s[:1]) + nd++ + continue } - ns := s[:i] - s = s[i:] - return ns, s, nil + break } - return s, "", nil + if !sawdigits { + return + } + + // optional exponent moves decimal point. + // if we read a very large, very long number, + // just be sure to move the decimal point by + // a lot (say, 100000). it doesn't matter if it's + // not the exact number. + if i < len(s) && (s[i] == 'e' || s[i] == 'E') { + i++ + if i >= len(s) { + return + } + if s[i] == '+' || s[i] == '-' { + i++ + } + if i >= len(s) || s[i] < '0' || s[i] > '9' { + return + } + for ; i < len(s) && ('0' <= s[i] && s[i] <= '9'); i++ { + } + } + return i, true } // Object represents JSON object. @@ -493,6 +536,14 @@ func (o *Object) reset() { o.kvs = o.kvs[:0] } +func (o *Object) Keys() []string { + keys := make([]string, 0, len(o.kvs)) + for _, kv := range o.kvs { + keys = append(keys, kv.k) + } + return keys +} + // MarshalTo appends marshaled o to dst and returns the result. func (o *Object) MarshalTo(dst []byte) []byte { dst = append(dst, '{') @@ -533,8 +584,18 @@ func (o *Object) sort() { return } - slices.SortStableFunc(o.kvs, func(a, b kv) bool { - return a.k < b.k + slices.SortStableFunc(o.kvs, func(a, b kv) int { + // TODO: switch to cmp.Compare for Go 1.21+. + // + // https://pkg.go.dev/cmp@master#Compare. + switch { + case a.k < b.k: + return -1 + case a.k > b.k: + return 1 + default: + return 0 + } }) uniq := o.kvs[:1] for _, kv := range o.kvs[1:] { @@ -585,12 +646,12 @@ func (o *Object) Get(key string) *Value { // of the parsed JSON. // // f cannot hold key and/or v after returning. -func (o *Object) Visit(f func(key []byte, v *Value)) { +func (o *Object) Visit(f func(key string, v *Value)) { if o == nil { return } for _, kv := range o.kvs { - f(hack.StringBytes(kv.k), kv.v) + f(kv.k, kv.v) } } @@ -605,6 +666,60 @@ type Value struct { a []*Value s string t Type + n NumberType +} + +func (v *Value) MarshalDate() string { + if d, ok := v.Date(); ok { + return d.ToStdTime(time.Local).Format("2006-01-02") + } + return "" +} + +func (v *Value) MarshalDateTime() string { + if dt, ok := v.DateTime(); ok { + return dt.ToStdTime(time.Local).Format("2006-01-02 15:04:05.000000") + } + return "" +} + +func (v *Value) MarshalTime() string { + if t, ok := v.Time(); ok { + t = t.RoundForJSON() + diff := t.ToDuration() + var neg bool + if diff < 0 { + diff = -diff + neg = true + } + + b := strings.Builder{} + if neg { + b.WriteByte('-') + } + + hours := diff / time.Hour + diff -= hours * time.Hour + fmt.Fprintf(&b, "%02d", hours) + minutes := diff / time.Minute + fmt.Fprintf(&b, ":%02d", minutes) + diff -= minutes * time.Minute + seconds := diff / time.Second + fmt.Fprintf(&b, ":%02d", seconds) + diff -= seconds * time.Second + fmt.Fprintf(&b, ".%06d", diff/1000) + return b.String() + } + return "" +} + +func (v *Value) marshalFloat(dst []byte) []byte { + f, _ := v.Float64() + buf := format.FormatFloat(f) + if bytes.IndexByte(buf, '.') == -1 && bytes.IndexByte(buf, 'e') == -1 { + buf = append(buf, '.', '0') + } + return append(dst, buf...) } // MarshalTo appends marshaled v to dst and returns the result. @@ -629,11 +744,31 @@ func (v *Value) MarshalTo(dst []byte) []byte { return dst case TypeString: return escapeString(dst, v.s) + case TypeDate: + return escapeString(dst, v.MarshalDate()) + case TypeDateTime: + return escapeString(dst, v.MarshalDateTime()) + case TypeTime: + return escapeString(dst, v.MarshalTime()) + case TypeBlob, TypeBit: + const prefix = "base64:type15:" + + size := 2 + len(prefix) + base64.StdEncoding.EncodedLen(len(v.s)) + dst := make([]byte, size) + dst[0] = '"' + copy(dst[1:], prefix) + base64.StdEncoding.Encode(dst[len(prefix)+1:], []byte(v.s)) + dst[size-1] = '"' + return dst case TypeNumber: + if v.NumberType() == NumberTypeFloat { + return v.marshalFloat(dst) + } return append(dst, v.s...) - case TypeTrue: - return append(dst, "true"...) - case TypeFalse: + case TypeBoolean: + if v == ValueTrue { + return append(dst, "true"...) + } return append(dst, "false"...) case TypeNull: return append(dst, "null"...) @@ -656,32 +791,80 @@ func (v *Value) String() string { return hack.String(b) } +func (v *Value) ToBoolean() bool { + switch v.Type() { + case TypeNumber: + switch v.NumberType() { + case NumberTypeSigned: + i, _ := v.Int64() + return i != 0 + case NumberTypeUnsigned: + i, _ := v.Uint64() + return i != 0 + case NumberTypeFloat: + f, _ := v.Float64() + return f != 0.0 + case NumberTypeDecimal: + d, _ := v.Decimal() + return !d.IsZero() + } + } + return true +} + // Type represents JSON type. -type Type int +type Type int32 +// See https://dev.mysql.com/doc/refman/8.0/en/json.html#json-comparison for the ordering here const ( // TypeNull is JSON null. - TypeNull Type = 0 + TypeNull Type = iota + + // TypeNumber is JSON number type. + TypeNumber + + // TypeString is JSON string type. + TypeString // TypeObject is JSON object type. - TypeObject Type = 1 + TypeObject // TypeArray is JSON array type. - TypeArray Type = 2 + TypeArray - // TypeString is JSON string type. - TypeString Type = 3 + // TypeBoolean is JSON boolean. + TypeBoolean - // TypeNumber is JSON number type. - TypeNumber Type = 4 + // TypeDate is JSON date. + TypeDate - // TypeTrue is JSON true. - TypeTrue Type = 5 + // TypeTime is JSON time. + TypeTime - // TypeFalse is JSON false. - TypeFalse Type = 6 + // TypeDateTime is JSON time. + TypeDateTime - typeRawString Type = 7 + // TypeOpaque is JSON opaque type. + TypeOpaque + + // TypeBit is JSON bit string. + TypeBit + + // TypeBlob is JSON blob. + TypeBlob + + typeRawString +) + +type NumberType int32 + +const ( + NumberTypeUnknown NumberType = iota + NumberTypeSigned + NumberTypeUnsigned + NumberTypeDecimal + NumberTypeFloat + numberTypeRaw ) // String returns string representation of t. @@ -695,10 +878,20 @@ func (t Type) String() string { return "string" case TypeNumber: return "number" - case TypeTrue: - return "true" - case TypeFalse: - return "false" + case TypeBoolean: + return "boolean" + case TypeBlob: + return "blob" + case TypeBit: + return "bit" + case TypeDate: + return "date" + case TypeTime: + return "time" + case TypeDateTime: + return "datetime" + case TypeOpaque: + return "opaque" case TypeNull: return "null" @@ -721,6 +914,37 @@ func (v *Value) Type() Type { return v.t } +func (v *Value) Date() (datetime.Date, bool) { + switch v.t { + case TypeDate: + return datetime.ParseDate(v.s) + case TypeDateTime: + dt, _, ok := datetime.ParseDateTime(v.s, datetime.DefaultPrecision) + return dt.Date, ok + } + return datetime.Date{}, false +} + +func (v *Value) DateTime() (datetime.DateTime, bool) { + switch v.t { + case TypeDate: + d, ok := datetime.ParseDate(v.s) + return datetime.DateTime{Date: d}, ok + case TypeDateTime: + dt, _, ok := datetime.ParseDateTime(v.s, datetime.DefaultPrecision) + return dt, ok + } + return datetime.DateTime{}, false +} + +func (v *Value) Time() (datetime.Time, bool) { + if v.t != TypeTime { + return datetime.Time{}, false + } + t, _, ok := datetime.ParseTime(v.s, datetime.DefaultPrecision) + return t, ok +} + // Object returns the underlying JSON object for the v. func (v *Value) Object() (*Object, bool) { if v.t != TypeObject { @@ -749,21 +973,79 @@ func (v *Value) Raw() string { return v.s } +func (v *Value) NumberType() NumberType { + if v.t != TypeNumber { + return NumberTypeUnknown + } + if v.n == numberTypeRaw { + v.n = parseNumberType(v.s) + } + return v.n +} + +func parseNumberType(ns string) NumberType { + _, err := fastparse.ParseInt64(ns, 10) + if err == nil { + return NumberTypeSigned + } + _, err = fastparse.ParseUint64(ns, 10) + if err == nil { + return NumberTypeUnsigned + } + _, err = fastparse.ParseFloat64(ns) + if err == nil { + return NumberTypeFloat + } + return NumberTypeUnknown +} + +func (v *Value) Int64() (int64, bool) { + i, err := fastparse.ParseInt64(v.s, 10) + if err != nil { + return i, false + } + return i, true +} + +func (v *Value) Uint64() (uint64, bool) { + u, err := fastparse.ParseUint64(v.s, 10) + if err != nil { + return u, false + } + return u, true +} + +func (v *Value) Float64() (float64, bool) { + val, err := fastparse.ParseFloat64(v.s) + if err != nil { + return val, false + } + return val, true +} + +func (v *Value) Decimal() (decimal.Decimal, bool) { + dec, err := decimal.NewFromString(v.s) + if err != nil { + return decimal.Zero, false + } + return dec, true +} + // Bool returns the underlying JSON bool for the v. // // Use GetBool if you don't need error handling. func (v *Value) Bool() (bool, bool) { - if v.t == TypeTrue { + if v == ValueTrue { return true, true } - if v.t == TypeFalse { + if v == ValueFalse { return false, true } return false, false } var ( - ValueTrue = &Value{t: TypeTrue} - ValueFalse = &Value{t: TypeFalse} + ValueTrue = &Value{t: TypeBoolean} + ValueFalse = &Value{t: TypeBoolean} ValueNull = &Value{t: TypeNull} ) diff --git a/go/vt/vtgate/evalengine/internal/json/parser_test.go b/go/mysql/json/parser_test.go similarity index 92% rename from go/vt/vtgate/evalengine/internal/json/parser_test.go rename to go/mysql/json/parser_test.go index 94b4a3a62a6..b327cd776ba 100644 --- a/go/vt/vtgate/evalengine/internal/json/parser_test.go +++ b/go/mysql/json/parser_test.go @@ -28,10 +28,13 @@ func TestParseRawNumber(t *testing.T) { f := func(s, expectedRN, expectedTail string) { t.Helper() - rn, tail, err := parseRawNumber(s) - if err != nil { - t.Fatalf("unexpected error: %s", err) + flen, ok := readFloat(s) + if !ok { + t.Fatalf("unexpected error when parsing '%s'", s) } + + rn, tail := s[:flen], s[flen:] + if rn != expectedRN { t.Fatalf("unexpected raw number; got %q; want %q", rn, expectedRN) } @@ -53,24 +56,18 @@ func TestParseRawNumber(t *testing.T) { f("12.tail", "12.", "tail") f(".2tail", ".2", "tail") f("-.2tail", "-.2", "tail") - f("NaN", "NaN", "") - f("nantail", "nan", "tail") - f("inf", "inf", "") - f("Inftail", "Inf", "tail") - f("-INF", "-INF", "") - f("-Inftail", "-Inf", "tail") }) t.Run("error", func(t *testing.T) { f := func(s, expectedTail string) { t.Helper() - _, tail, err := parseRawNumber(s) - if err == nil { + flen, ok := readFloat(s) + if ok { t.Fatalf("expecting non-nil error") } - if tail != expectedTail { - t.Fatalf("unexpected tail; got %q; want %q", tail, expectedTail) + if s[flen:] != expectedTail { + t.Fatalf("unexpected tail; got %q; want %q", s[flen:], expectedTail) } } @@ -270,13 +267,6 @@ func TestParserParse(t *testing.T) { } }) - t.Run("invalid-number", func(t *testing.T) { - _, err := p.Parse("123+456") - if err != nil { - t.Fatalf("unexpected error when parsing int") - } - }) - t.Run("empty-json", func(t *testing.T) { _, err := p.Parse("") if err == nil { @@ -470,8 +460,7 @@ func TestParserParse(t *testing.T) { if err != nil { t.Fatalf("cannot parse true: %s", err) } - tp := v.Type() - if tp != TypeTrue || tp.String() != "true" { + if v != ValueTrue { t.Fatalf("unexpected value obtained for true: %#v", v) } b, ok := v.Bool() @@ -492,8 +481,7 @@ func TestParserParse(t *testing.T) { if err != nil { t.Fatalf("cannot parse false: %s", err) } - tp := v.Type() - if tp != TypeFalse || tp.String() != "false" { + if v != ValueFalse { t.Fatalf("unexpected value obtained for false: %#v", v) } b, ok := v.Bool() @@ -518,6 +506,9 @@ func TestParserParse(t *testing.T) { if tp != TypeNumber || tp.String() != "number" { t.Fatalf("unexpected type obtained for integer: %#v", v) } + if v.NumberType() != NumberTypeSigned { + t.Fatalf("unexpected non integer value: %#v", v) + } s := v.String() if s != "12345" { t.Fatalf("unexpected string representation of integer; got %q; want %q", s, "12345") @@ -572,18 +563,57 @@ func TestParserParse(t *testing.T) { t.Run("float", func(t *testing.T) { v, err := p.Parse("-12.345") if err != nil { - t.Fatalf("cannot parse integer: %s", err) + t.Fatalf("cannot parse float: %s", err) } tp := v.Type() if tp != TypeNumber || tp.String() != "number" { t.Fatalf("unexpected type obtained for integer: %#v", v) } + if v.NumberType() != NumberTypeFloat { + t.Fatalf("unexpected integer value: %#v", v) + } s := v.String() if s != "-12.345" { t.Fatalf("unexpected string representation of integer; got %q; want %q", s, "-12.345") } }) + t.Run("float with zero", func(t *testing.T) { + v, err := p.Parse("12.0") + if err != nil { + t.Fatalf("cannot parse float: %s", err) + } + tp := v.Type() + if tp != TypeNumber || tp.String() != "number" { + t.Fatalf("unexpected type obtained for number: %#v", v) + } + if v.NumberType() != NumberTypeFloat { + t.Fatalf("unexpected integer value: %#v", v) + } + s := v.String() + if s != "12.0" { + t.Fatalf("unexpected string representation of float; got %q; want %q", s, "12.0") + } + }) + + t.Run("float with large exponent", func(t *testing.T) { + v, err := p.Parse("1e100") + if err != nil { + t.Fatalf("cannot parse float: %s", err) + } + tp := v.Type() + if tp != TypeNumber || tp.String() != "number" { + t.Fatalf("unexpected type obtained for number: %#v", v) + } + if v.NumberType() != NumberTypeFloat { + t.Fatalf("unexpected integer value: %#v", v) + } + s := v.String() + if s != "1e100" { + t.Fatalf("unexpected string representation of float; got %q; want %q", s, "1e100") + } + }) + t.Run("string", func(t *testing.T) { v, err := p.Parse(`"foo bar"`) if err != nil { diff --git a/go/vt/vtgate/evalengine/internal/json/update.go b/go/mysql/json/update.go similarity index 94% rename from go/vt/vtgate/evalengine/internal/json/update.go rename to go/mysql/json/update.go index 0a808d7a383..eb74af46f49 100644 --- a/go/vt/vtgate/evalengine/internal/json/update.go +++ b/go/mysql/json/update.go @@ -17,7 +17,7 @@ limitations under the License. package json -import "golang.org/x/exp/slices" +import "slices" // Del deletes the entry with the given key from o. func (o *Object) Del(key string) { @@ -29,6 +29,10 @@ func (o *Object) Del(key string) { } } +func (o *Object) Add(key string, value *Value) { + o.kvs = append(o.kvs, kv{key, value}) +} + // Set sets (key, value) entry in the o. // // The value must be unchanged during o lifetime. diff --git a/go/vt/vtgate/evalengine/internal/json/update_test.go b/go/mysql/json/update_test.go similarity index 100% rename from go/vt/vtgate/evalengine/internal/json/update_test.go rename to go/mysql/json/update_test.go diff --git a/go/mysql/json/weights.go b/go/mysql/json/weights.go new file mode 100644 index 00000000000..262fe96e9cf --- /dev/null +++ b/go/mysql/json/weights.go @@ -0,0 +1,169 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/binary" + "strings" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/fastparse" +) + +const ( + JSON_KEY_NULL = '\x00' + JSON_KEY_NUMBER_NEG = '\x01' + JSON_KEY_NUMBER_ZERO = '\x02' + JSON_KEY_NUMBER_POS = '\x03' + JSON_KEY_STRING = '\x04' + JSON_KEY_OBJECT = '\x05' + JSON_KEY_ARRAY = '\x06' + JSON_KEY_FALSE = '\x07' + JSON_KEY_TRUE = '\x08' + JSON_KEY_DATE = '\x09' + JSON_KEY_TIME = '\x0A' + JSON_KEY_DATETIME = '\x0B' + JSON_KEY_OPAQUE = '\x0C' +) + +// numericWeightString generates a fixed-width weight string for any JSON +// number. It requires the `num` representation to be normalized, otherwise +// the resulting string will not sort. +func (v *Value) numericWeightString(dst []byte, num string) []byte { + const MaxPadLength = 30 + + var ( + exponent string + exp int64 + significant string + negative bool + original = len(dst) + ) + + if num[0] == '-' { + negative = true + num = num[1:] + } + + if i := strings.IndexByte(num, 'e'); i >= 0 { + exponent = num[i+1:] + num = num[:i] + } + + significant = num + for len(significant) > 0 { + if significant[0] >= '1' && significant[0] <= '9' { + break + } + significant = significant[1:] + } + if len(significant) == 0 { + return append(dst, JSON_KEY_NUMBER_ZERO) + } + + if len(exponent) > 0 { + exp, _ = fastparse.ParseInt64(exponent, 10) + } else { + dec := strings.IndexByte(num, '.') + ofs := len(num) - len(significant) + if dec < 0 { + exp = int64(len(significant) - 1) + } else if ofs < dec { + exp = int64(dec - ofs - 1) + } else { + exp = int64(dec - ofs) + } + } + + if negative { + dst = append(dst, JSON_KEY_NUMBER_NEG) + dst = binary.BigEndian.AppendUint16(dst, uint16(-exp)^(1<<15)) + + for _, ch := range []byte(significant) { + if ch >= '0' && ch <= '9' { + dst = append(dst, '9'-ch+'0') + } + } + for len(dst)-original < MaxPadLength { + dst = append(dst, '9') + } + } else { + dst = append(dst, JSON_KEY_NUMBER_POS) + dst = binary.BigEndian.AppendUint16(dst, uint16(exp)^(1<<15)) + + for _, ch := range []byte(significant) { + if ch >= '0' && ch <= '9' { + dst = append(dst, ch) + } + } + for len(dst)-original < MaxPadLength { + dst = append(dst, '0') + } + } + + return dst +} + +func (v *Value) WeightString(dst []byte) []byte { + switch v.Type() { + case TypeNull: + dst = append(dst, JSON_KEY_NULL) + case TypeNumber: + if v.NumberType() == NumberTypeFloat { + f := v.marshalFloat(nil) + dst = v.numericWeightString(dst, hack.String(f)) + } else { + dst = v.numericWeightString(dst, v.s) + } + case TypeString: + dst = append(dst, JSON_KEY_STRING) + dst = append(dst, v.s...) + case TypeObject: + // MySQL compat: we follow the same behavior as MySQL does for weight strings in JSON, + // where Objects and Arrays are only sorted by their length and not by the values + // of their contents. + // Note that in MySQL, generating the weight string of a JSON Object or Array will actually + // print a warning in the logs! We're not printing anything. + dst = append(dst, JSON_KEY_OBJECT) + dst = binary.BigEndian.AppendUint32(dst, uint32(v.o.Len())) + case TypeArray: + dst = append(dst, JSON_KEY_ARRAY) + dst = binary.BigEndian.AppendUint32(dst, uint32(len(v.a))) + case TypeBoolean: + switch v { + case ValueTrue: + dst = append(dst, JSON_KEY_TRUE) + case ValueFalse: + dst = append(dst, JSON_KEY_FALSE) + default: + panic("invalid JSON Boolean") + } + case TypeDate: + dst = append(dst, JSON_KEY_DATE) + dst = append(dst, v.MarshalDate()...) + case TypeDateTime: + dst = append(dst, JSON_KEY_DATETIME) + dst = append(dst, v.MarshalDateTime()...) + case TypeTime: + dst = append(dst, JSON_KEY_TIME) + dst = append(dst, v.MarshalTime()...) + case TypeOpaque, TypeBit, TypeBlob: + dst = append(dst, JSON_KEY_OPAQUE) + dst = append(dst, v.s...) + } + return dst +} diff --git a/go/mysql/json/weights_test.go b/go/mysql/json/weights_test.go new file mode 100644 index 00000000000..9bbcd548e50 --- /dev/null +++ b/go/mysql/json/weights_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "testing" + + "vitess.io/vitess/go/mysql/format" +) + +func TestWeightStrings(t *testing.T) { + var cases = []struct { + l, r *Value + }{ + {NewNumber("-2.3742940301417033", NumberTypeFloat), NewNumber("-0.024384053736998118", NumberTypeFloat)}, + {NewNumber("2.3742940301417033", NumberTypeFloat), NewNumber("20.3742940301417033", NumberTypeFloat)}, + {NewNumber(string(format.FormatFloat(1000000000000000.0)), NumberTypeFloat), NewNumber("100000000000000000", NumberTypeDecimal)}, + } + + for _, tc := range cases { + l := tc.l.WeightString(nil) + r := tc.r.WeightString(nil) + + if bytes.Compare(l, r) >= 0 { + t.Errorf("expected %s < %s\nl = %v\n = %v\nr = %v\n = %v", + tc.l.String(), tc.r.String(), l, string(l), r, string(r)) + } + } +} diff --git a/go/mysql/query.go b/go/mysql/query.go index 0107e7606bc..7cfeafd258f 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -22,6 +22,8 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -47,7 +49,7 @@ func (c *Conn) WriteComQuery(query string) error { pos++ copy(data[pos:], query) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -61,7 +63,7 @@ func (c *Conn) writeComInitDB(db string) error { pos++ copy(data[pos:], db) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -74,7 +76,7 @@ func (c *Conn) writeComSetOption(operation uint16) error { pos++ writeUint16(data, pos, operation) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -84,36 +86,36 @@ func (c *Conn) writeComSetOption(operation uint16) error { func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { colDef, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() // Catalog is ignored, always set to "def" pos, ok := skipLenEncString(colDef, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v catalog failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v catalog failed", index) } // schema, table, orgTable, name and OrgName are strings. field.Database, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v schema failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v schema failed", index) } field.Table, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v table failed", index) } field.OrgTable, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v org_table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v org_table failed", index) } field.Name, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v name failed", index) } field.OrgName, pos, ok = readLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v org_name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v org_name failed", index) } // Skip length of fixed-length fields. @@ -122,37 +124,37 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { // characterSet is a uint16. characterSet, pos, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v characterSet failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v characterSet failed", index) } field.Charset = uint32(characterSet) // columnLength is a uint32. field.ColumnLength, pos, ok = readUint32(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v columnLength failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v columnLength failed", index) } // type is one byte. t, pos, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v type failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v type failed", index) } // flags is 2 bytes. flags, pos, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v flags failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v flags failed", index) } // Convert MySQL type to Vitess type. field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) if err != nil { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } // Decimals is a byte. decimals, _, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v decimals failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v decimals failed", index) } field.Decimals = uint32(decimals) @@ -182,7 +184,7 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { colDef, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() @@ -190,27 +192,27 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { // strings, all skipped. pos, ok := skipLenEncString(colDef, 0) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v catalog failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v catalog failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v schema failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v schema failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v table failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v org_table failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v org_table failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v name failed", index) } pos, ok = skipLenEncString(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "skipping col %v org_name failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "skipping col %v org_name failed", index) } // Skip length of fixed-length fields. @@ -219,31 +221,31 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { // characterSet is a uint16. _, pos, ok = readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v characterSet failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v characterSet failed", index) } // columnLength is a uint32. _, pos, ok = readUint32(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v columnLength failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v columnLength failed", index) } // type is one byte t, pos, ok := readByte(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v type failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v type failed", index) } // flags is 2 bytes flags, _, ok := readUint16(colDef, pos) if !ok { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extracting col %v flags failed", index) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extracting col %v flags failed", index) } // Convert MySQL type to Vitess type. field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) if err != nil { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } // skip decimals @@ -269,7 +271,7 @@ func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte var ok bool s, pos, ok = reader(data, pos) if !ok { - return nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding string failed") + return nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "decoding string failed") } result = append(result, sqltypes.MakeTrusted(fields[i].Type, s)) } @@ -310,7 +312,7 @@ func (c *Conn) ExecuteFetch(query string, maxrows int, wantfields bool) (result func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (result *sqltypes.Result, more bool, err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -334,7 +336,7 @@ func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (re func (c *Conn) ExecuteFetchWithWarningCount(query string, maxrows int, wantfields bool) (result *sqltypes.Result, warnings uint16, err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -394,7 +396,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, // EOF is only present here if it's not deprecated. data, err := c.readEphemeralPacket() if err != nil { - return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, false, 0, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { @@ -416,7 +418,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, for { data, err := c.readEphemeralPacket() if err != nil { - return nil, false, 0, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, false, 0, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { @@ -481,7 +483,7 @@ func (c *Conn) drainResults() error { for { data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } if c.isEOFPacket(data) { c.recycleReadPacket() @@ -497,11 +499,11 @@ func (c *Conn) drainResults() error { func (c *Conn) readComQueryResponse() (int, *PacketOK, error) { data, err := c.readEphemeralPacket() if err != nil { - return 0, nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() if len(data) == 0 { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } switch data[0] { @@ -517,10 +519,10 @@ func (c *Conn) readComQueryResponse() (int, *PacketOK, error) { } n, pos, ok := readLenEncInt(data, 0) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") } if pos != len(data) { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") } return int(n), &PacketOK{}, nil } @@ -550,32 +552,32 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b // statement ID stmtID, pos, ok := readUint32(payload, 0) if !ok { - return 0, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading statement ID failed") + return 0, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading statement ID failed") } prepare, ok := prepareData[stmtID] if !ok { - return 0, 0, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "statement ID is not found from record") + return 0, 0, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "statement ID is not found from record") } // cursor type flags cursorType, pos, ok := readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading cursor type flags failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading cursor type flags failed") } // iteration count iterCount, pos, ok := readUint32(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading iteration count failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading iteration count failed") } if iterCount != uint32(1) { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "iteration count is not equal to 1") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "iteration count is not equal to 1") } if prepare.ParamsCount > 0 { - bitMap, pos, ok = readBytes(payload, pos, int((prepare.ParamsCount+7)/8)) + bitMap, pos, ok = readBytes(payload, pos, (int(prepare.ParamsCount)+7)/8) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading NULL-bitmap failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading NULL-bitmap failed") } } @@ -585,18 +587,18 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b for i := uint16(0); i < prepare.ParamsCount; i++ { mysqlType, pos, ok = readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter type failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading parameter type failed") } flags, pos, ok = readByte(payload, pos) if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "reading parameter flags failed") + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "reading parameter flags failed") } // convert MySQL type to internal type. valType, err := sqltypes.MySQLToType(int64(mysqlType), int64(flags)) if err != nil { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) } prepare.ParamsType[i] = int32(valType) @@ -618,7 +620,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b val, pos, ok = c.parseStmtArgs(payload, querypb.Type(prepare.ParamsType[i]), pos) } if !ok { - return stmtID, 0, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding parameter value failed: %v", prepare.ParamsType[i]) + return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "decoding parameter value failed: %v", prepare.ParamsType[i]) } prepare.BindVars[parameterID] = sqltypes.ValueBindVariable(val) @@ -1073,7 +1075,9 @@ func (c *Conn) writePrepare(fld []*querypb.Field, prepare *PrepareData) error { if err := c.writeColumnDefinition(&querypb.Field{ Name: "?", Type: sqltypes.VarBinary, - Charset: 63}); err != nil { + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), + }); err != nil { return err } } @@ -1517,3 +1521,17 @@ func val2MySQLLen(v sqltypes.Value) (int, error) { } return length, nil } + +func FlagsForColumn(t sqltypes.Type, col collations.ID) uint32 { + var fl uint32 + if sqltypes.IsNumber(t) { + fl |= uint32(querypb.MySqlFlag_NUM_FLAG) + } + if sqltypes.IsUnsigned(t) { + fl |= uint32(querypb.MySqlFlag_UNSIGNED_FLAG) + } + if sqltypes.IsQuoted(t) && col == collations.CollationBinaryID { + fl |= uint32(querypb.MySqlFlag_BINARY_FLAG) + } + return fl +} diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index 8305103c891..07012f83b9f 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -24,6 +24,10 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/mysql/collations" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -48,8 +52,10 @@ func MockPrepareData(t *testing.T) (*PrepareData, *sqltypes.Result) { result := &sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "id", - Type: querypb.Type_INT32, + Name: "id", + Type: querypb.Type_INT32, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, }, Rows: [][]sqltypes.Value{ @@ -374,7 +380,7 @@ func TestSQLErrorOnServerClose(t *testing.T) { // We should be getting a Connection lost error. _, _, _, err = cConn.ReadQueryResult(100, true) require.Error(t, err) - require.True(t, IsConnLostDuringQuery(err), err.Error()) + require.True(t, sqlerror.IsConnLostDuringQuery(err), err.Error()) } func TestQueries(t *testing.T) { @@ -399,12 +405,15 @@ func TestQueries(t *testing.T) { checkQuery(t, "type and name", sConn, cConn, &sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "id", - Type: querypb.Type_INT32, + Name: "id", + Type: querypb.Type_INT32, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, { - Name: "name", - Type: querypb.Type_VARCHAR, + Name: "name", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -424,36 +433,36 @@ func TestQueries(t *testing.T) { // One row has all NULL values. checkQuery(t, "all types", sConn, cConn, &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "Type_INT8 ", Type: querypb.Type_INT8}, - {Name: "Type_UINT8 ", Type: querypb.Type_UINT8}, - {Name: "Type_INT16 ", Type: querypb.Type_INT16}, - {Name: "Type_UINT16 ", Type: querypb.Type_UINT16}, - {Name: "Type_INT24 ", Type: querypb.Type_INT24}, - {Name: "Type_UINT24 ", Type: querypb.Type_UINT24}, - {Name: "Type_INT32 ", Type: querypb.Type_INT32}, - {Name: "Type_UINT32 ", Type: querypb.Type_UINT32}, - {Name: "Type_INT64 ", Type: querypb.Type_INT64}, - {Name: "Type_UINT64 ", Type: querypb.Type_UINT64}, - {Name: "Type_FLOAT32 ", Type: querypb.Type_FLOAT32}, - {Name: "Type_FLOAT64 ", Type: querypb.Type_FLOAT64}, - {Name: "Type_TIMESTAMP", Type: querypb.Type_TIMESTAMP}, - {Name: "Type_DATE ", Type: querypb.Type_DATE}, - {Name: "Type_TIME ", Type: querypb.Type_TIME}, - {Name: "Type_DATETIME ", Type: querypb.Type_DATETIME}, - {Name: "Type_YEAR ", Type: querypb.Type_YEAR}, - {Name: "Type_DECIMAL ", Type: querypb.Type_DECIMAL}, - {Name: "Type_TEXT ", Type: querypb.Type_TEXT}, - {Name: "Type_BLOB ", Type: querypb.Type_BLOB}, - {Name: "Type_VARCHAR ", Type: querypb.Type_VARCHAR}, - {Name: "Type_VARBINARY", Type: querypb.Type_VARBINARY}, - {Name: "Type_CHAR ", Type: querypb.Type_CHAR}, - {Name: "Type_BINARY ", Type: querypb.Type_BINARY}, - {Name: "Type_BIT ", Type: querypb.Type_BIT}, - {Name: "Type_ENUM ", Type: querypb.Type_ENUM}, - {Name: "Type_SET ", Type: querypb.Type_SET}, + {Name: "Type_INT8 ", Type: querypb.Type_INT8, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_UINT8 ", Type: querypb.Type_UINT8, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Type_INT16 ", Type: querypb.Type_INT16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_UINT16 ", Type: querypb.Type_UINT16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Type_INT24 ", Type: querypb.Type_INT24, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_UINT24 ", Type: querypb.Type_UINT24, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Type_INT32 ", Type: querypb.Type_INT32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_UINT32 ", Type: querypb.Type_UINT32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Type_INT64 ", Type: querypb.Type_INT64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_UINT64 ", Type: querypb.Type_UINT64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Type_FLOAT32 ", Type: querypb.Type_FLOAT32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_FLOAT64 ", Type: querypb.Type_FLOAT64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_TIMESTAMP", Type: querypb.Type_TIMESTAMP, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_TIMESTAMP_FLAG)}, + {Name: "Type_DATE ", Type: querypb.Type_DATE, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_TIME ", Type: querypb.Type_TIME, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_DATETIME ", Type: querypb.Type_DATETIME, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_YEAR ", Type: querypb.Type_YEAR, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_DECIMAL ", Type: querypb.Type_DECIMAL, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "Type_TEXT ", Type: querypb.Type_TEXT, Charset: uint32(collations.Default())}, + {Name: "Type_BLOB ", Type: querypb.Type_BLOB, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_VARCHAR ", Type: querypb.Type_VARCHAR, Charset: uint32(collations.Default())}, + {Name: "Type_VARBINARY", Type: querypb.Type_VARBINARY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_CHAR ", Type: querypb.Type_CHAR, Charset: uint32(collations.Default())}, + {Name: "Type_BINARY ", Type: querypb.Type_BINARY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_BIT ", Type: querypb.Type_BIT, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, + {Name: "Type_ENUM ", Type: querypb.Type_ENUM, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_ENUM_FLAG)}, + {Name: "Type_SET ", Type: querypb.Type_SET, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_SET_FLAG)}, // Skip TUPLE, not possible in Result. - {Name: "Type_GEOMETRY ", Type: querypb.Type_GEOMETRY}, - {Name: "Type_JSON ", Type: querypb.Type_JSON}, + {Name: "Type_GEOMETRY ", Type: querypb.Type_GEOMETRY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_BLOB_FLAG)}, + {Name: "Type_JSON ", Type: querypb.Type_JSON, Charset: collations.CollationUtf8mb4ID}, }, Rows: [][]sqltypes.Value{ { @@ -526,8 +535,9 @@ func TestQueries(t *testing.T) { checkQuery(t, "first empty string", sConn, cConn, &sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "name", - Type: querypb.Type_VARCHAR, + Name: "name", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -544,7 +554,9 @@ func TestQueries(t *testing.T) { checkQuery(t, "type only", sConn, cConn, &sqltypes.Result{ Fields: []*querypb.Field{ { - Type: querypb.Type_INT64, + Type: querypb.Type_INT64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, }, Rows: [][]sqltypes.Value{ @@ -667,6 +679,7 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * if !got.Equal(&expected) { for i, f := range got.Fields { if i < len(expected.Fields) && !proto.Equal(f, expected.Fields[i]) { + t.Logf("Query = %v", query) t.Logf("Got field(%v) = %v", i, f) t.Logf("Expected field(%v) = %v", i, expected.Fields[i]) } @@ -757,25 +770,6 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * } -// nolint -func writeResult(conn *Conn, result *sqltypes.Result) error { - if len(result.Fields) == 0 { - return conn.writeOKPacket(&PacketOK{ - affectedRows: result.RowsAffected, - lastInsertID: result.InsertID, - statusFlags: conn.StatusFlags, - warnings: 0, - }) - } - if err := conn.writeFields(result); err != nil { - return err - } - if err := conn.writeRows(result); err != nil { - return err - } - return conn.writeEndResult(false, 0, 0, 0) -} - func RowString(row []sqltypes.Value) string { l := len(row) result := fmt.Sprintf("%v values:", l) diff --git a/go/mysql/replication.go b/go/mysql/replication.go index 33f24860266..399698d6a2a 100644 --- a/go/mysql/replication.go +++ b/go/mysql/replication.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -45,7 +46,7 @@ func (c *Conn) WriteComBinlogDump(serverID uint32, binlogFilename string, binlog pos = writeUint32(data, pos, serverID) _ = writeEOFString(data, pos, binlogFilename) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } @@ -92,7 +93,7 @@ func (c *Conn) WriteComBinlogDumpGTID(serverID uint32, binlogFilename string, bi pos = writeUint32(data, pos, uint32(len(gtidSet))) //nolint pos += copy(data[pos:], gtidSet) //nolint if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } @@ -110,7 +111,7 @@ func (c *Conn) SendSemiSyncAck(binlogFilename string, binlogPos uint64) error { pos = writeUint64(data, pos, binlogPos) _ = writeEOFString(data, pos, binlogFilename) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil @@ -132,7 +133,7 @@ func (c *Conn) WriteBinlogEvent(ev BinlogEvent, semiSyncEnabled bool) error { } _ = writeEOFString(data, pos, string(ev.Bytes())) if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } return nil } diff --git a/go/mysql/filepos_gtid.go b/go/mysql/replication/filepos_gtid.go similarity index 68% rename from go/mysql/filepos_gtid.go rename to go/mysql/replication/filepos_gtid.go index e5bfd055bee..850fb421915 100644 --- a/go/mysql/filepos_gtid.go +++ b/go/mysql/replication/filepos_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" @@ -38,9 +38,9 @@ func parseFilePosGTID(s string) (GTID, error) { return nil, fmt.Errorf("invalid FilePos GTID (%v): expecting pos to be an integer", s) } - return filePosGTID{ - file: parts[0], - pos: uint32(pos), + return FilePosGTID{ + File: parts[0], + Pos: uint32(pos), }, nil } @@ -50,69 +50,69 @@ func ParseFilePosGTIDSet(s string) (GTIDSet, error) { if err != nil { return nil, err } - return gtid.(filePosGTID), err + return gtid.(FilePosGTID), err } -// filePosGTID implements GTID. -type filePosGTID struct { - file string - pos uint32 +// FilePosGTID implements GTID. +type FilePosGTID struct { + File string + Pos uint32 } // String implements GTID.String(). -func (gtid filePosGTID) String() string { - return fmt.Sprintf("%s:%d", gtid.file, gtid.pos) +func (gtid FilePosGTID) String() string { + return fmt.Sprintf("%s:%d", gtid.File, gtid.Pos) } // Flavor implements GTID.Flavor(). -func (gtid filePosGTID) Flavor() string { +func (gtid FilePosGTID) Flavor() string { return FilePosFlavorID } // SequenceDomain implements GTID.SequenceDomain(). -func (gtid filePosGTID) SequenceDomain() any { +func (gtid FilePosGTID) SequenceDomain() any { return nil } // SourceServer implements GTID.SourceServer(). -func (gtid filePosGTID) SourceServer() any { +func (gtid FilePosGTID) SourceServer() any { return nil } // SequenceNumber implements GTID.SequenceNumber(). -func (gtid filePosGTID) SequenceNumber() any { +func (gtid FilePosGTID) SequenceNumber() any { return nil } // GTIDSet implements GTID.GTIDSet(). -func (gtid filePosGTID) GTIDSet() GTIDSet { +func (gtid FilePosGTID) GTIDSet() GTIDSet { return gtid } // ContainsGTID implements GTIDSet.ContainsGTID(). -func (gtid filePosGTID) ContainsGTID(other GTID) bool { +func (gtid FilePosGTID) ContainsGTID(other GTID) bool { if other == nil { return true } - filePosOther, ok := other.(filePosGTID) + filePosOther, ok := other.(FilePosGTID) if !ok { return false } - if filePosOther.file < gtid.file { + if filePosOther.File < gtid.File { return true } - if filePosOther.file > gtid.file { + if filePosOther.File > gtid.File { return false } - return filePosOther.pos <= gtid.pos + return filePosOther.Pos <= gtid.Pos } // Contains implements GTIDSet.Contains(). -func (gtid filePosGTID) Contains(other GTIDSet) bool { +func (gtid FilePosGTID) Contains(other GTIDSet) bool { if other == nil { return false } - filePosOther, ok := other.(filePosGTID) + filePosOther, ok := other.(FilePosGTID) if !ok { return false } @@ -120,8 +120,8 @@ func (gtid filePosGTID) Contains(other GTIDSet) bool { } // Equal implements GTIDSet.Equal(). -func (gtid filePosGTID) Equal(other GTIDSet) bool { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) Equal(other GTIDSet) bool { + filePosOther, ok := other.(FilePosGTID) if !ok { return false } @@ -129,8 +129,8 @@ func (gtid filePosGTID) Equal(other GTIDSet) bool { } // AddGTID implements GTIDSet.AddGTID(). -func (gtid filePosGTID) AddGTID(other GTID) GTIDSet { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) AddGTID(other GTID) GTIDSet { + filePosOther, ok := other.(FilePosGTID) if !ok { return gtid } @@ -138,8 +138,8 @@ func (gtid filePosGTID) AddGTID(other GTID) GTIDSet { } // Union implements GTIDSet.Union(). -func (gtid filePosGTID) Union(other GTIDSet) GTIDSet { - filePosOther, ok := other.(filePosGTID) +func (gtid FilePosGTID) Union(other GTIDSet) GTIDSet { + filePosOther, ok := other.(FilePosGTID) if !ok || gtid.Contains(other) { return gtid } @@ -150,12 +150,11 @@ func (gtid filePosGTID) Union(other GTIDSet) GTIDSet { // Last returns last filePosition // For filePos based GTID we have only one position // here we will just return the current filePos -func (gtid filePosGTID) Last() string { +func (gtid FilePosGTID) Last() string { return gtid.String() } func init() { gtidParsers[FilePosFlavorID] = parseFilePosGTID gtidSetParsers[FilePosFlavorID] = ParseFilePosGTIDSet - flavors[FilePosFlavorID] = newFilePosFlavor } diff --git a/go/mysql/filepos_gtid_test.go b/go/mysql/replication/filepos_gtid_test.go similarity index 77% rename from go/mysql/filepos_gtid_test.go rename to go/mysql/replication/filepos_gtid_test.go index ec7f9d33142..174aed6ccf9 100644 --- a/go/mysql/filepos_gtid_test.go +++ b/go/mysql/replication/filepos_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "testing" @@ -38,12 +38,12 @@ func Test_filePosGTID_String(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gtid := filePosGTID{ - file: tt.fields.file, - pos: tt.fields.pos, + gtid := FilePosGTID{ + File: tt.fields.file, + Pos: tt.fields.pos, } if got := gtid.String(); got != tt.want { - t.Errorf("filePosGTID.String() = %v, want %v", got, tt.want) + t.Errorf("FilePosGTID.String() = %v, want %v", got, tt.want) } }) } @@ -66,36 +66,36 @@ func Test_filePosGTID_ContainsGTID(t *testing.T) { { "returns true when the position is equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1234}}, + args{other: FilePosGTID{File: "testfile", Pos: 1234}}, true, }, { "returns true when the position is less than equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1233}}, + args{other: FilePosGTID{File: "testfile", Pos: 1233}}, true, }, { "returns false when the position is less than equal", fields{file: "testfile", pos: 1234}, - args{other: filePosGTID{file: "testfile", pos: 1235}}, + args{other: FilePosGTID{File: "testfile", Pos: 1235}}, false, }, { "it uses integer value for comparison (it is not lexicographical order)", fields{file: "testfile", pos: 99761227}, - args{other: filePosGTID{file: "testfile", pos: 103939867}}, + args{other: FilePosGTID{File: "testfile", Pos: 103939867}}, false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gtid := filePosGTID{ - file: tt.fields.file, - pos: tt.fields.pos, + gtid := FilePosGTID{ + File: tt.fields.file, + Pos: tt.fields.pos, } if got := gtid.ContainsGTID(tt.args.other); got != tt.want { - t.Errorf("filePosGTID.ContainsGTID() = %v, want %v", got, tt.want) + t.Errorf("FilePosGTID.ContainsGTID() = %v, want %v", got, tt.want) } }) } diff --git a/go/mysql/gtid.go b/go/mysql/replication/gtid.go similarity index 99% rename from go/mysql/gtid.go rename to go/mysql/replication/gtid.go index d5f6a44df74..14e781a714f 100644 --- a/go/mysql/gtid.go +++ b/go/mysql/replication/gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" diff --git a/go/mysql/gtid_set.go b/go/mysql/replication/gtid_set.go similarity index 99% rename from go/mysql/gtid_set.go rename to go/mysql/replication/gtid_set.go index 812b7f33caf..1e4ca29b42e 100644 --- a/go/mysql/gtid_set.go +++ b/go/mysql/replication/gtid_set.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication // GTIDSet represents the set of transactions received or applied by a server. // In some flavors, a single GTID is enough to specify the set of all diff --git a/go/mysql/gtid_test.go b/go/mysql/replication/gtid_test.go similarity index 99% rename from go/mysql/gtid_test.go rename to go/mysql/replication/gtid_test.go index 8dfea641727..8713f94b115 100644 --- a/go/mysql/gtid_test.go +++ b/go/mysql/replication/gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" diff --git a/go/mysql/mariadb_gtid.go b/go/mysql/replication/mariadb_gtid.go similarity index 97% rename from go/mysql/mariadb_gtid.go rename to go/mysql/replication/mariadb_gtid.go index 713ef2c72b4..ff63964bbf1 100644 --- a/go/mysql/mariadb_gtid.go +++ b/go/mysql/replication/mariadb_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" @@ -62,8 +62,8 @@ func parseMariadbGTID(s string) (GTID, error) { }, nil } -// parseMariadbGTIDSet is registered as a GTIDSet parser. -func parseMariadbGTIDSet(s string) (GTIDSet, error) { +// ParseMariadbGTIDSet is registered as a GTIDSet parser. +func ParseMariadbGTIDSet(s string) (GTIDSet, error) { gtidStrings := strings.Split(s, ",") gtidSet := make(MariadbGTIDSet, len(gtidStrings)) for _, gtidString := range gtidStrings { @@ -272,5 +272,5 @@ func (gtidSet MariadbGTIDSet) addGTID(otherGTID MariadbGTID) { func init() { gtidParsers[MariadbFlavorID] = parseMariadbGTID - gtidSetParsers[MariadbFlavorID] = parseMariadbGTIDSet + gtidSetParsers[MariadbFlavorID] = ParseMariadbGTIDSet } diff --git a/go/mysql/mariadb_gtid_test.go b/go/mysql/replication/mariadb_gtid_test.go similarity index 98% rename from go/mysql/mariadb_gtid_test.go rename to go/mysql/replication/mariadb_gtid_test.go index 49472ab8d33..3fe02b31822 100644 --- a/go/mysql/mariadb_gtid_test.go +++ b/go/mysql/replication/mariadb_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" @@ -81,9 +81,9 @@ func TestParseMariaGTIDSet(t *testing.T) { 11: MariadbGTID{Domain: 11, Server: 22, Sequence: 3333}, } - got, err := parseMariadbGTIDSet(input) + got, err := ParseMariadbGTIDSet(input) assert.NoError(t, err, "%v", err) - assert.True(t, got.Equal(want), "parseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want) + assert.True(t, got.Equal(want), "ParseMariadbGTIDSet(%#v) = %#v, want %#v", input, got, want) } @@ -91,13 +91,13 @@ func TestParseInvalidMariaGTIDSet(t *testing.T) { input := "12-34-5678,11-22-33e33" want := "invalid MariaDB GTID Sequence number" - _, err := parseMariadbGTIDSet(input) + _, err := ParseMariadbGTIDSet(input) if err == nil { t.Errorf("expected error for invalid input (%#v)", input) return } if got := err.Error(); !strings.HasPrefix(got, want) { - t.Errorf("parseMariadbGTIDSet(%#v) error = %#v, want %#v", input, got, want) + t.Errorf("ParseMariadbGTIDSet(%#v) error = %#v, want %#v", input, got, want) } } @@ -621,7 +621,7 @@ func TestMariaGTIDSetLast(t *testing.T) { "12-34-5678": "12-34-5678", } for input, want := range testCases { - got, err := parseMariadbGTIDSet(input) + got, err := ParseMariadbGTIDSet(input) require.NoError(t, err) assert.Equal(t, want, got.Last()) } diff --git a/go/mysql/mysql56_gtid.go b/go/mysql/replication/mysql56_gtid.go similarity index 99% rename from go/mysql/mysql56_gtid.go rename to go/mysql/replication/mysql56_gtid.go index 0aae3d54336..4ec861b84e5 100644 --- a/go/mysql/mysql56_gtid.go +++ b/go/mysql/replication/mysql56_gtid.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/hex" diff --git a/go/mysql/mysql56_gtid_set.go b/go/mysql/replication/mysql56_gtid_set.go similarity index 97% rename from go/mysql/mysql56_gtid_set.go rename to go/mysql/replication/mysql56_gtid_set.go index 63e778f3527..1d46176b19a 100644 --- a/go/mysql/mysql56_gtid_set.go +++ b/go/mysql/replication/mysql56_gtid_set.go @@ -14,16 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "bytes" "encoding/binary" + "slices" "strconv" "strings" - "golang.org/x/exp/slices" - "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -126,8 +125,18 @@ func ParseMysql56GTIDSet(s string) (Mysql56GTIDSet, error) { intervals = append(sidIntervals, intervals...) } // Internally we expect intervals to be stored in order. - slices.SortFunc(intervals, func(a, b interval) bool { - return a.start < b.start + slices.SortFunc(intervals, func(a, b interval) int { + // TODO: switch to cmp.Compare for Go 1.21+. + // + // https://pkg.go.dev/cmp@master#Compare. + switch { + case a.start < b.start: + return -1 + case a.start > b.start: + return 1 + default: + return 0 + } }) set[sid] = intervals } @@ -149,8 +158,8 @@ func (set Mysql56GTIDSet) SIDs() []SID { } func sortSIDs(sids []SID) { - slices.SortFunc(sids, func(a, b SID) bool { - return bytes.Compare(a[:], b[:]) < 0 + slices.SortFunc(sids, func(a, b SID) int { + return bytes.Compare(a[:], b[:]) }) } diff --git a/go/mysql/mysql56_gtid_set_test.go b/go/mysql/replication/mysql56_gtid_set_test.go similarity index 99% rename from go/mysql/mysql56_gtid_set_test.go rename to go/mysql/replication/mysql56_gtid_set_test.go index 98162513fd7..323baae3885 100644 --- a/go/mysql/mysql56_gtid_set_test.go +++ b/go/mysql/replication/mysql56_gtid_set_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" @@ -123,7 +123,7 @@ func TestParseMysql56GTIDSetInvalid(t *testing.T) { for _, input := range table { _, err := ParseMysql56GTIDSet(input) - assert.Error(t, err, "parseMysql56GTIDSet(%#v) expected error, got none", err) + assert.Error(t, err, "ParseMysql56GTIDSet(%#v) expected error, got none", err) } } diff --git a/go/mysql/mysql56_gtid_test.go b/go/mysql/replication/mysql56_gtid_test.go similarity index 90% rename from go/mysql/mysql56_gtid_test.go rename to go/mysql/replication/mysql56_gtid_test.go index 335835d8199..7a4bc9862a8 100644 --- a/go/mysql/mysql56_gtid_test.go +++ b/go/mysql/replication/mysql56_gtid_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "strings" @@ -141,3 +141,15 @@ func TestMysql56GTIDGTIDSet(t *testing.T) { t.Errorf("%#v.GTIDSet() = %#v, want %#v", input, got, want) } } + +func TestMysql56ParseGTID(t *testing.T) { + input := "00010203-0405-0607-0809-0A0B0C0D0E0F:56789" + want := Mysql56GTID{ + Server: SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + Sequence: 56789, + } + + got, err := parseMysql56GTID(input) + require.NoError(t, err, "unexpected error: %v", err) + assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want) +} diff --git a/go/mysql/primary_status.go b/go/mysql/replication/primary_status.go similarity index 53% rename from go/mysql/primary_status.go rename to go/mysql/replication/primary_status.go index e8524862917..679b152f9d4 100644 --- a/go/mysql/primary_status.go +++ b/go/mysql/replication/primary_status.go @@ -14,10 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( + "fmt" + + "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/vterrors" ) // PrimaryStatus holds replication information from SHOW MASTER STATUS. @@ -35,3 +39,32 @@ func PrimaryStatusToProto(s PrimaryStatus) *replicationdatapb.PrimaryStatus { FilePosition: EncodePosition(s.FilePosition), } } + +func ParseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := ParsePrimaryStatus(resultMap) + + var err error + status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) + if err != nil { + return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + } + + return status, nil +} + +// ParsePrimaryStatus parses the common fields of SHOW MASTER STATUS. +func ParsePrimaryStatus(fields map[string]string) PrimaryStatus { + status := PrimaryStatus{} + + fileExecPosStr := fields["Position"] + file := fields["File"] + if file != "" && fileExecPosStr != "" { + var err error + status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, fileExecPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, fileExecPosStr, err) + } + } + + return status +} diff --git a/go/mysql/replication_position.go b/go/mysql/replication/replication_position.go similarity index 84% rename from go/mysql/replication_position.go rename to go/mysql/replication/replication_position.go index 7d242a9b248..240321f2c6f 100644 --- a/go/mysql/replication_position.go +++ b/go/mysql/replication/replication_position.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/json" @@ -26,12 +26,11 @@ import ( ) const ( - // MaximumPositionSize is the maximum size of a - // replication position. It is used as the maximum column size in the _vt.reparent_journal and - // other related tables. A row has a maximum size of 65535 bytes. So - // we want to stay under that. We use VARBINARY so the - // character set doesn't matter, we only store ascii - // characters anyway. + // MaximumPositionSize is the maximum size of a replication position. + // It is used as the maximum column size in the reparent_journal table + // and other related tables. A row has a maximum size of 65535 bytes. + // So we want to stay under that. We use VARBINARY so the character + // set doesn't matter, we only store ascii characters anyway. MaximumPositionSize = 64000 ) @@ -146,6 +145,25 @@ func DecodePosition(s string) (rp Position, err error) { return ParsePosition(flav, gtid) } +// DecodePositionDefaultFlavor converts a string in the format returned by +// EncodePosition back into a Position value with the +// correct underlying flavor. If the string does not indicate a flavor, then the 'flavor' argument +// is used. For example: +// - DecodePositionDefaultFlavor("MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", "foo"): "MySQL56" explicitly indicated, this is the flavor. +// - DecodePositionDefaultFlavor("16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", "MySQL56"): No flavor indicated in `s`, therefore using "MySQL56" +func DecodePositionDefaultFlavor(s string, flavor string) (rp Position, err error) { + if s == "" { + return rp, nil + } + + flav, gtid, ok := strings.Cut(s, "/") + if !ok { + gtid = s + flav = flavor + } + return ParsePosition(flav, gtid) +} + // ParsePosition calls the parser for the specified flavor. func ParsePosition(flavor, value string) (rp Position, err error) { parser := gtidSetParsers[flavor] @@ -191,7 +209,7 @@ func (rp *Position) MatchesFlavor(flavor string) bool { _, matches := rp.GTIDSet.(MariadbGTIDSet) return matches case FilePosFlavorID: - _, matches := rp.GTIDSet.(filePosGTID) + _, matches := rp.GTIDSet.(FilePosGTID) return matches } return false diff --git a/go/mysql/replication_position_test.go b/go/mysql/replication/replication_position_test.go similarity index 94% rename from go/mysql/replication_position_test.go rename to go/mysql/replication/replication_position_test.go index 5bb2e5385d0..125f5929bbe 100644 --- a/go/mysql/replication_position_test.go +++ b/go/mysql/replication/replication_position_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "encoding/json" @@ -272,6 +272,24 @@ func TestDecodePosition(t *testing.T) { } +func TestDecodePositionDefaultFlavor(t *testing.T) { + gtidSetParsers[Mysql56FlavorID] = func(s string) (GTIDSet, error) { + return ParseMysql56GTIDSet(s) + } + { + pos := "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" + rp, err := DecodePositionDefaultFlavor(pos, "foo") + assert.NoError(t, err) + assert.Equal(t, "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", rp.GTIDSet.String()) + } + { + pos := "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" + rp, err := DecodePositionDefaultFlavor(pos, Mysql56FlavorID) + assert.NoError(t, err) + assert.Equal(t, "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", rp.GTIDSet.String()) + } +} + func TestDecodePositionZero(t *testing.T) { input := "" want := Position{} diff --git a/go/mysql/replication_status.go b/go/mysql/replication/replication_status.go similarity index 63% rename from go/mysql/replication_status.go rename to go/mysql/replication/replication_status.go index ff06d559a56..6b3d1bf2214 100644 --- a/go/mysql/replication_status.go +++ b/go/mysql/replication/replication_status.go @@ -14,11 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package replication import ( "fmt" + "strconv" + "vitess.io/vitess/go/vt/log" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" "vitess.io/vitess/go/vt/vterrors" ) @@ -219,3 +221,124 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS return diffSet, nil } + +func ParseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + uuidString := resultMap["Master_UUID"] + if uuidString != "" { + sid, err := ParseSID(uuidString) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "cannot decode SourceUUID") + } + status.SourceUUID = sid + } + + var err error + status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + } + relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"]) + } + // We take the union of the executed and retrieved gtidset, because the retrieved gtidset only represents GTIDs since + // the relay log has been reset. To get the full Position, we need to take a union of executed GTIDSets, since these would + // have been in the relay log's GTIDSet in the past, prior to a reset. + status.RelayLogPosition.GTIDSet = status.Position.GTIDSet.Union(relayLogGTIDSet) + + return status, nil +} + +func ParseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + + var err error + status.Position.GTIDSet, err = ParseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"]) + if err != nil { + return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v)", resultMap["Gtid_Slave_Pos"]) + } + + return status, nil +} + +func ParseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap) + + status.Position = status.FilePosition + status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition + + return status, nil +} + +func ParseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := ParsePrimaryStatus(resultMap) + + status.Position = status.FilePosition + + return status, nil +} + +// ParseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus +func ParseReplicationStatus(fields map[string]string) ReplicationStatus { + // The field names in the map are identical to what we receive from the database + // Hence the names still contain Master + status := ReplicationStatus{ + SourceHost: fields["Master_Host"], + SourceUser: fields["Master_User"], + SSLAllowed: fields["Master_SSL_Allowed"] == "Yes", + AutoPosition: fields["Auto_Position"] == "1", + UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "", + HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""), + // These fields are returned from the underlying DB and cannot be renamed + IOState: ReplicationStatusToState(fields["Slave_IO_Running"]), + LastIOError: fields["Last_IO_Error"], + SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]), + LastSQLError: fields["Last_SQL_Error"], + } + parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32) + status.SourcePort = int32(parseInt) + parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32) + status.ConnectRetry = int32(parseInt) + parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32) + if err != nil { + // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the + // database -- so let's reflect that the underlying value was unknown on our last check + status.ReplicationLagUnknown = true + } else { + status.ReplicationLagUnknown = false + status.ReplicationLagSeconds = uint32(parseUint) + } + parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32) + status.SourceServerID = uint32(parseUint) + parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32) + status.SQLDelay = uint32(parseUint) + + executedPosStr := fields["Exec_Master_Log_Pos"] + file := fields["Relay_Master_Log_File"] + if file != "" && executedPosStr != "" { + status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, executedPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, executedPosStr, err) + } + } + + readPosStr := fields["Read_Master_Log_Pos"] + file = fields["Master_Log_File"] + if file != "" && readPosStr != "" { + status.RelayLogSourceBinlogEquivalentPosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, readPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, readPosStr, err) + } + } + + relayPosStr := fields["Relay_Log_Pos"] + file = fields["Relay_Log_File"] + if file != "" && relayPosStr != "" { + status.RelayLogFilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, relayPosStr)) + if err != nil { + log.Warningf("Error parsing GTID set %s:%s: %v", file, relayPosStr, err) + } + } + return status +} diff --git a/go/mysql/replication/replication_status_test.go b/go/mysql/replication/replication_status_test.go new file mode 100644 index 00000000000..c1f5991f253 --- /dev/null +++ b/go/mysql/replication/replication_status_test.go @@ -0,0 +1,292 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replication + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStatusReplicationRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("yes"), + SQLState: ReplicationStatusToState("yes"), + } + want := true + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestStatusIOThreadNotRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("no"), + SQLState: ReplicationStatusToState("yes"), + } + want := false + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestStatusSQLThreadNotRunning(t *testing.T) { + input := &ReplicationStatus{ + IOState: ReplicationStatusToState("yes"), + SQLState: ReplicationStatusToState("no"), + } + want := false + if got := input.Running(); got != want { + t.Errorf("%#v.Running() = %v, want %v", input, got, want) + } +} + +func TestFindErrantGTIDs(t *testing.T) { + sid1 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid2 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} + sid3 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17} + sid4 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18} + sourceSID := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19} + + set1 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 39}, {40, 53}, {55, 75}}, + sid2: []interval{{1, 7}, {20, 50}, {60, 70}}, + sid4: []interval{{1, 30}}, + sourceSID: []interval{{1, 7}, {20, 30}}, + } + + set2 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 37}, {50, 60}}, + sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, + sid3: []interval{{1, 45}}, + sourceSID: []interval{{2, 6}, {15, 40}}, + } + + set3 := Mysql56GTIDSet{ + sid1: []interval{{20, 30}, {35, 38}, {50, 70}}, + sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, + sid3: []interval{{1, 45}}, + sourceSID: []interval{{2, 6}, {15, 45}}, + } + + testcases := []struct { + mainRepStatus *ReplicationStatus + otherRepStatuses []*ReplicationStatus + want Mysql56GTIDSet + }{{ + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{ + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}}, + {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}}, + }, + want: Mysql56GTIDSet{ + sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, + sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, + sid4: []interval{{1, 30}}, + }, + }, { + mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, + otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}}, + // servers with the same GTID sets should not be diagnosed with errant GTIDs + want: nil, + }} + + for _, testcase := range testcases { + t.Run("", func(t *testing.T) { + got, err := testcase.mainRepStatus.FindErrantGTIDs(testcase.otherRepStatuses) + require.NoError(t, err) + require.Equal(t, testcase.want, got) + }) + } +} + +func TestMysqlShouldGetPosition(t *testing.T) { + resultMap := map[string]string{ + "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + "Position": "1307", + "File": "source-bin.000003", + } + + sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") + want := PrimaryStatus{ + Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + } + got, err := ParseMysqlPrimaryStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) +} + +func TestMysqlRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) +} + +func TestMysqlRetrieveFileBasedPositions(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) +} + +func TestMysqlShouldGetRelayLogPosition(t *testing.T) { + resultMap := map[string]string{ + "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + } + + sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") + want := ReplicationStatus{ + Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, + RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, + } + got, err := ParseMysqlReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) +} + +func TestMariadbRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + "Gtid_Slave_Pos": "0-101-2320", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equal(t, got.SourceServerID, want.SourceServerID, fmt.Sprintf("got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)) +} + +func TestMariadbRetrieveFileBasedPositions(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Gtid_Slave_Pos": "0-101-2320", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equal(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, fmt.Sprintf("got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)) + assert.Equal(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, fmt.Sprintf("got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)) +} + +func TestMariadbShouldGetNilRelayLogPosition(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Gtid_Slave_Pos": "0-101-2320", + } + got, err := ParseMariadbReplicationStatus(resultMap) + require.NoError(t, err) + assert.Truef(t, got.RelayLogPosition.IsZero(), "Got a filled in RelayLogPosition. For MariaDB we should get back nil, because MariaDB does not return the retrieved GTIDSet. got: %#v", got.RelayLogPosition) +} + +func TestFilePosRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Master_Server_Id": "1", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseFilePosReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) +} + +func TestFilePosRetrieveExecutedPosition(t *testing.T) { + resultMap := map[string]string{ + "Exec_Master_Log_Pos": "1307", + "Relay_Master_Log_File": "master-bin.000002", + "Read_Master_Log_Pos": "1308", + "Master_Log_File": "master-bin.000003", + "Relay_Log_Pos": "1309", + "Relay_Log_File": "relay-bin.000004", + } + + want := ReplicationStatus{ + Position: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000002", Pos: 1307}}, + RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, + RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, + } + got, err := ParseFilePosReplicationStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet, "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) + assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) + assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") + assert.Equalf(t, got.RelayLogPosition.GTIDSet, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "RelayLogPosition and RelayLogSourceBinlogEquivalentPosition don't match when they should for the FilePos flavor") +} + +func TestFilePosShouldGetPosition(t *testing.T) { + resultMap := map[string]string{ + "Position": "1307", + "File": "source-bin.000003", + } + + want := PrimaryStatus{ + Position: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + FilePosition: Position{GTIDSet: FilePosGTID{File: "source-bin.000003", Pos: 1307}}, + } + got, err := ParseFilePosPrimaryStatus(resultMap) + require.NoError(t, err) + assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) + assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) + assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") +} diff --git a/go/mysql/replication/state.go b/go/mysql/replication/state.go new file mode 100644 index 00000000000..d08965a6fb6 --- /dev/null +++ b/go/mysql/replication/state.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replication + +import "strings" + +type ReplicationState int32 + +const ( + ReplicationStateUnknown ReplicationState = iota + ReplicationStateStopped + ReplicationStateConnecting + ReplicationStateRunning +) + +// ReplicationStatusToState converts a value you have for the IO thread(s) or SQL +// thread(s) or Group Replication applier thread(s) from MySQL or intermediate +// layers to a ReplicationState. +// on,yes,true == ReplicationStateRunning +// off,no,false == ReplicationStateStopped +// connecting == ReplicationStateConnecting +// anything else == ReplicationStateUnknown +func ReplicationStatusToState(s string) ReplicationState { + // Group Replication uses ON instead of Yes + switch strings.ToLower(s) { + case "yes", "on", "true": + return ReplicationStateRunning + case "no", "off", "false": + return ReplicationStateStopped + case "connecting": + return ReplicationStateConnecting + default: + return ReplicationStateUnknown + } +} diff --git a/go/mysql/replication_constants.go b/go/mysql/replication_constants.go index 5fcb17271b5..6b6e34b2333 100644 --- a/go/mysql/replication_constants.go +++ b/go/mysql/replication_constants.go @@ -18,103 +18,6 @@ package mysql // This file contains the constant definitions for this package. -// This is the data type for a field. -// Values taken from include/mysql/mysql_com.h -const ( - // TypeDecimal is MYSQL_TYPE_DECIMAL. It is deprecated. - TypeDecimal = 0 - - // TypeTiny is MYSQL_TYPE_TINY - TypeTiny = 1 - - // TypeShort is MYSQL_TYPE_SHORT - TypeShort = 2 - - // TypeLong is MYSQL_TYPE_LONG - TypeLong = 3 - - // TypeFloat is MYSQL_TYPE_FLOAT - TypeFloat = 4 - - // TypeDouble is MYSQL_TYPE_DOUBLE - TypeDouble = 5 - - // TypeNull is MYSQL_TYPE_NULL - TypeNull = 6 - - // TypeTimestamp is MYSQL_TYPE_TIMESTAMP - TypeTimestamp = 7 - - // TypeLongLong is MYSQL_TYPE_LONGLONG - TypeLongLong = 8 - - // TypeInt24 is MYSQL_TYPE_INT24 - TypeInt24 = 9 - - // TypeDate is MYSQL_TYPE_DATE - TypeDate = 10 - - // TypeTime is MYSQL_TYPE_TIME - TypeTime = 11 - - // TypeDateTime is MYSQL_TYPE_DATETIME - TypeDateTime = 12 - - // TypeYear is MYSQL_TYPE_YEAR - TypeYear = 13 - - // TypeNewDate is MYSQL_TYPE_NEWDATE - TypeNewDate = 14 - - // TypeVarchar is MYSQL_TYPE_VARCHAR - TypeVarchar = 15 - - // TypeBit is MYSQL_TYPE_BIT - TypeBit = 16 - - // TypeTimestamp2 is MYSQL_TYPE_TIMESTAMP2 - TypeTimestamp2 = 17 - - // TypeDateTime2 is MYSQL_TYPE_DATETIME2 - TypeDateTime2 = 18 - - // TypeTime2 is MYSQL_TYPE_TIME2 - TypeTime2 = 19 - - // TypeJSON is MYSQL_TYPE_JSON - TypeJSON = 245 - - // TypeNewDecimal is MYSQL_TYPE_NEWDECIMAL - TypeNewDecimal = 246 - - // TypeEnum is MYSQL_TYPE_ENUM - TypeEnum = 247 - - // TypeSet is MYSQL_TYPE_SET - TypeSet = 248 - - // TypeTinyBlob is MYSQL_TYPE_TINY_BLOB - TypeTinyBlob = 249 - - // TypeMediumBlob is MYSQL_TYPE_MEDIUM_BLOB - TypeMediumBlob = 250 - - // TypeLongBlob is MYSQL_TYPE_LONG_BLOB - TypeLongBlob = 251 - - // TypeBlob is MYSQL_TYPE_BLOB - TypeBlob = 252 - - // TypeVarString is MYSQL_TYPE_VAR_STRING - TypeVarString = 253 - - // TypeString is MYSQL_TYPE_STRING - TypeString = 254 - - // TypeGeometry is MYSQL_TYPE_GEOMETRY - TypeGeometry = 255 -) - // Constants for the type of an INTVAR_EVENT. const ( // IntVarInvalidInt is INVALID_INT_EVENT @@ -207,8 +110,8 @@ const ( //eViewChangeEvent = 37 //eXAPrepareLogEvent = 38 - // Transaction_payload_event when binlog compression is turned on - eCompressedEvent = 40 + // Transaction_payload_event when binlog_transaction_compression=ON. + eTransactionPayloadEvent = 40 // MariaDB specific values. They start at 160. //eMariaAnnotateRowsEvent = 160 diff --git a/go/mysql/replication_status_test.go b/go/mysql/replication_status_test.go deleted file mode 100644 index 556f2cfaaeb..00000000000 --- a/go/mysql/replication_status_test.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysql - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestStatusReplicationRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("yes"), - SQLState: ReplicationStatusToState("yes"), - } - want := true - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestStatusIOThreadNotRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("no"), - SQLState: ReplicationStatusToState("yes"), - } - want := false - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestStatusSQLThreadNotRunning(t *testing.T) { - input := &ReplicationStatus{ - IOState: ReplicationStatusToState("yes"), - SQLState: ReplicationStatusToState("no"), - } - want := false - if got := input.Running(); got != want { - t.Errorf("%#v.Running() = %v, want %v", input, got, want) - } -} - -func TestFindErrantGTIDs(t *testing.T) { - sid1 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - sid2 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} - sid3 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 17} - sid4 := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18} - sourceSID := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 19} - - set1 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 39}, {40, 53}, {55, 75}}, - sid2: []interval{{1, 7}, {20, 50}, {60, 70}}, - sid4: []interval{{1, 30}}, - sourceSID: []interval{{1, 7}, {20, 30}}, - } - - set2 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 37}, {50, 60}}, - sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, - sid3: []interval{{1, 45}}, - sourceSID: []interval{{2, 6}, {15, 40}}, - } - - set3 := Mysql56GTIDSet{ - sid1: []interval{{20, 30}, {35, 38}, {50, 70}}, - sid2: []interval{{3, 5}, {22, 25}, {32, 37}, {67, 70}}, - sid3: []interval{{1, 45}}, - sourceSID: []interval{{2, 6}, {15, 45}}, - } - - testcases := []struct { - mainRepStatus *ReplicationStatus - otherRepStatuses []*ReplicationStatus - want Mysql56GTIDSet - }{{ - mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, - otherRepStatuses: []*ReplicationStatus{ - {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}}, - {SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}}, - }, - want: Mysql56GTIDSet{ - sid1: []interval{{39, 39}, {40, 49}, {71, 75}}, - sid2: []interval{{1, 2}, {6, 7}, {20, 21}, {26, 31}, {38, 50}, {60, 66}}, - sid4: []interval{{1, 30}}, - }, - }, { - mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set1}}, - otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}}, - // servers with the same GTID sets should not be diagnosed with errant GTIDs - want: nil, - }} - - for _, testcase := range testcases { - t.Run("", func(t *testing.T) { - got, err := testcase.mainRepStatus.FindErrantGTIDs(testcase.otherRepStatuses) - require.NoError(t, err) - require.Equal(t, testcase.want, got) - }) - } -} diff --git a/go/mysql/schema.go b/go/mysql/schema.go index 1b3f50b31cd..933ce657c3a 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -46,25 +46,7 @@ FROM ( UNION ALL SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM _vt.schemacopy - WHERE table_schema = database() -) _inner -GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key -HAVING COUNT(*) = 1 -` - - // DetectSchemaChangeOnlyBaseTable query detects if there is any schema change from previous copy excluding view tables. - DetectSchemaChangeOnlyBaseTable = ` -SELECT DISTINCT table_name -FROM ( - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM information_schema.columns - WHERE table_schema = database() and table_name in (select table_name from information_schema.tables where table_schema = database() and table_type = 'BASE TABLE') - - UNION ALL - - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM _vt.schemacopy + FROM %s.schemacopy WHERE table_schema = database() ) _inner GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key @@ -72,59 +54,16 @@ HAVING COUNT(*) = 1 ` // ClearSchemaCopy query clears the schemacopy table. - ClearSchemaCopy = `delete from _vt.schemacopy where table_schema = database()` + ClearSchemaCopy = `delete from %s.schemacopy where table_schema = database()` // InsertIntoSchemaCopy query copies over the schema information from information_schema.columns table. - InsertIntoSchemaCopy = `insert _vt.schemacopy + InsertIntoSchemaCopy = `insert %s.schemacopy select table_schema, table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key from information_schema.columns where table_schema = database()` - // fetchColumns are the columns we fetch - fetchColumns = "table_name, column_name, data_type, collation_name" - - // FetchUpdatedTables queries fetches all information about updated tables - FetchUpdatedTables = `select ` + fetchColumns + ` -from _vt.schemacopy -where table_schema = database() and - table_name in ::tableNames -order by table_name, ordinal_position` - - // FetchTables queries fetches all information about tables - FetchTables = `select ` + fetchColumns + ` -from _vt.schemacopy -where table_schema = database() -order by table_name, ordinal_position` - // GetColumnNamesQueryPatternForTable is used for mocking queries in unit tests GetColumnNamesQueryPatternForTable = `SELECT COLUMN_NAME.*TABLE_NAME.*%s.*` - - // Views - InsertIntoViewsTable = `insert into _vt.views ( - table_schema, - table_name, - create_statement) values (database(), :table_name, :create_statement)` - - ReplaceIntoViewsTable = `replace into _vt.views ( - table_schema, - table_name, - create_statement) values (database(), :table_name, :create_statement)` - - UpdateViewsTable = `update _vt.views - set create_statement = :create_statement - where table_schema = database() and table_name = :table_name` - - DeleteFromViewsTable = `delete from _vt.views where table_schema = database() and table_name in ::table_name` - - SelectFromViewsTable = `select table_name from _vt.views where table_schema = database() and table_name in ::table_name` - - SelectAllViews = `select table_name, updated_at from _vt.views where table_schema = database()` - - // FetchUpdatedViews queries fetches information about updated views - FetchUpdatedViews = `select table_name, create_statement from _vt.views where table_schema = database() and table_name in ::viewnames` - - // FetchViews queries fetches all views - FetchViews = `select table_name, create_statement from _vt.views where table_schema = database()` ) // BaseShowTablesFields contains the fields returned by a BaseShowTables or a BaseShowTablesForTable command. @@ -138,7 +77,7 @@ var BaseShowTablesFields = []*querypb.Field{{ Database: "information_schema", OrgName: "TABLE_NAME", ColumnLength: 192, - Charset: collations.CollationUtf8ID, + Charset: uint32(collations.SystemCollation.Collation), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, { Name: "t.table_type", @@ -148,7 +87,7 @@ var BaseShowTablesFields = []*querypb.Field{{ Database: "information_schema", OrgName: "TABLE_TYPE", ColumnLength: 192, - Charset: collations.CollationUtf8ID, + Charset: uint32(collations.SystemCollation.Collation), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, { Name: "unix_timestamp(t.create_time)", @@ -164,7 +103,7 @@ var BaseShowTablesFields = []*querypb.Field{{ Database: "information_schema", OrgName: "TABLE_COMMENT", ColumnLength: 6144, - Charset: collations.CollationUtf8ID, + Charset: uint32(collations.SystemCollation.Collation), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), }, { Name: "i.file_size", diff --git a/go/mysql/server.go b/go/mysql/server.go index e17bd82ef90..ec2d7538daa 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -25,20 +25,21 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/servenv" - - "vitess.io/vitess/go/sqlescape" + "github.com/pires/go-proxyproto" - proxyproto "github.com/pires/go-proxyproto" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -122,7 +123,7 @@ type Handler interface { ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error // ComBinlogDumpGTID is called when a connection receives a ComBinlogDumpGTID request - ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error + ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error // WarningCount is called at the end of each query to obtain // the value to be returned to the client in the EOF packet. @@ -196,6 +197,9 @@ type Listener struct { // connBufferPooling configures if vtgate server pools connection buffers connBufferPooling bool + // connKeepAlivePeriod is period between tcp keep-alives. + connKeepAlivePeriod time.Duration + // shutdown indicates that Shutdown method was called. shutdown atomic.Bool @@ -218,15 +222,17 @@ func NewFromListener( connReadTimeout time.Duration, connWriteTimeout time.Duration, connBufferPooling bool, + keepAlivePeriod time.Duration, ) (*Listener, error) { cfg := ListenerConfig{ - Listener: l, - AuthServer: authServer, - Handler: handler, - ConnReadTimeout: connReadTimeout, - ConnWriteTimeout: connWriteTimeout, - ConnReadBufferSize: connBufferSize, - ConnBufferPooling: connBufferPooling, + Listener: l, + AuthServer: authServer, + Handler: handler, + ConnReadTimeout: connReadTimeout, + ConnWriteTimeout: connWriteTimeout, + ConnReadBufferSize: connBufferSize, + ConnBufferPooling: connBufferPooling, + ConnKeepAlivePeriod: keepAlivePeriod, } return NewListenerWithConfig(cfg) } @@ -240,6 +246,7 @@ func NewListener( connWriteTimeout time.Duration, proxyProtocol bool, connBufferPooling bool, + keepAlivePeriod time.Duration, ) (*Listener, error) { listener, err := net.Listen(protocol, address) if err != nil { @@ -247,24 +254,25 @@ func NewListener( } if proxyProtocol { proxyListener := &proxyproto.Listener{Listener: listener} - return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling) + return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) } - return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling) + return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) } // ListenerConfig should be used with NewListenerWithConfig to specify listener parameters. type ListenerConfig struct { // Protocol-Address pair and Listener are mutually exclusive parameters - Protocol string - Address string - Listener net.Listener - AuthServer AuthServer - Handler Handler - ConnReadTimeout time.Duration - ConnWriteTimeout time.Duration - ConnReadBufferSize int - ConnBufferPooling bool + Protocol string + Address string + Listener net.Listener + AuthServer AuthServer + Handler Handler + ConnReadTimeout time.Duration + ConnWriteTimeout time.Duration + ConnReadBufferSize int + ConnBufferPooling bool + ConnKeepAlivePeriod time.Duration } // NewListenerWithConfig creates new listener using provided config. There are @@ -282,15 +290,16 @@ func NewListenerWithConfig(cfg ListenerConfig) (*Listener, error) { } return &Listener{ - authServer: cfg.AuthServer, - handler: cfg.Handler, - listener: l, - ServerVersion: servenv.AppVersion.MySQLVersion(), - connectionID: 1, - connReadTimeout: cfg.ConnReadTimeout, - connWriteTimeout: cfg.ConnWriteTimeout, - connReadBufferSize: cfg.ConnReadBufferSize, - connBufferPooling: cfg.ConnBufferPooling, + authServer: cfg.AuthServer, + handler: cfg.Handler, + listener: l, + ServerVersion: servenv.AppVersion.MySQLVersion(), + connectionID: 1, + connReadTimeout: cfg.ConnReadTimeout, + connWriteTimeout: cfg.ConnWriteTimeout, + connReadBufferSize: cfg.ConnReadBufferSize, + connBufferPooling: cfg.ConnBufferPooling, + connKeepAlivePeriod: cfg.ConnKeepAlivePeriod, }, nil } @@ -449,12 +458,12 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti } if negotiatedAuthMethod == nil { - c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "No authentication methods available for authentication.") + c.writeErrorPacket(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "No authentication methods available for authentication.") return } if !l.AllowClearTextWithoutTLS.Load() && !c.TLSEnabled() && !negotiatedAuthMethod.AllowClearTextWithoutTLS() { - c.writeErrorPacket(CRServerHandshakeErr, SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") + c.writeErrorPacket(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "Cannot use clear text authentication over non-SSL connections.") return } @@ -525,7 +534,8 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti for { kontinue := c.handleNextCommand(l.handler) - if !kontinue { + // before going for next command check if the connection should be closed or not. + if !kontinue || c.IsMarkedForClose() { return } } diff --git a/go/mysql/server_flaky_test.go b/go/mysql/server_flaky_test.go index 7225f29a816..509fccaa47a 100644 --- a/go/mysql/server_flaky_test.go +++ b/go/mysql/server_flaky_test.go @@ -32,6 +32,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" vtenv "vitess.io/vitess/go/vt/env" @@ -46,12 +51,15 @@ import ( var selectRowsResult = &sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "id", - Type: querypb.Type_INT32, + Name: "id", + Type: querypb.Type_INT32, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, { - Name: "name", - Type: querypb.Type_VARCHAR, + Name: "name", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.CollationUtf8mb4ID), }, }, Rows: [][]sqltypes.Value{ @@ -136,8 +144,9 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R callback(&sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "schema_name", - Type: querypb.Type_VARCHAR, + Name: "schema_name", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -154,8 +163,9 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R callback(&sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "ssl_flag", - Type: querypb.Type_VARCHAR, + Name: "ssl_flag", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -168,12 +178,14 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R callback(&sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "user", - Type: querypb.Type_VARCHAR, + Name: "user", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, { - Name: "user_data", - Type: querypb.Type_VARCHAR, + Name: "user_data", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -186,8 +198,9 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R case "50ms delay": callback(&sqltypes.Result{ Fields: []*querypb.Field{{ - Name: "result", - Type: querypb.Type_VARCHAR, + Name: "result", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }}, }) time.Sleep(50 * time.Millisecond) @@ -201,8 +214,9 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R callback(&sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "result", - Type: querypb.Type_VARCHAR, + Name: "result", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{ @@ -232,7 +246,7 @@ func (th *testHandler) ComRegisterReplica(c *Conn, replicaHost string, replicaPo func (th *testHandler) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error { return nil } -func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } @@ -263,7 +277,7 @@ func TestConnectionFromListener(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:") require.NoError(t, err, "net.Listener failed") - l, err := NewFromListener(listener, authServer, th, 0, 0, false) + l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -292,7 +306,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -325,7 +339,7 @@ func TestConnectionWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -358,7 +372,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -396,7 +410,7 @@ func TestConnectionUnixSocket(t *testing.T) { os.Remove(unixSocket.Name()) - l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false) + l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -422,7 +436,7 @@ func TestClientFoundRows(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -471,7 +485,7 @@ func TestConnCounts(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -503,12 +517,12 @@ func TestConnCounts(t *testing.T) { // Test after closing connections. time.Sleep lets it work, but seems flakey. c.Close() - //time.Sleep(10 * time.Millisecond) - //checkCountsForUser(t, user, 1) + // time.Sleep(10 * time.Millisecond) + // checkCountsForUser(t, user, 1) c2.Close() - //time.Sleep(10 * time.Millisecond) - //checkCountsForUser(t, user, 0) + // time.Sleep(10 * time.Millisecond) + // checkCountsForUser(t, user, 0) } func checkCountsForUser(t *testing.T, user string, expected int64) { @@ -528,7 +542,7 @@ func TestServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -565,7 +579,7 @@ func TestServer(t *testing.T) { // If there's an error after streaming has started, // we should get a 2013 - th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced error after send")) + th.SetErr(sqlerror.NewSQLError(sqlerror.ERUnknownComError, sqlerror.SSNetError, "forced error after send")) output, err = runMysqlWithErr(t, params, "error after send") require.Error(t, err) assert.Contains(t, output, "ERROR 2013 (HY000)", "Unexpected output for 'panic'") @@ -628,7 +642,7 @@ func TestServerStats(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -651,7 +665,7 @@ func TestServerStats(t *testing.T) { connRefuse.Reset() // Run an 'error' command. - th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced query error")) + th.SetErr(sqlerror.NewSQLError(sqlerror.ERUnknownComError, sqlerror.SSNetError, "forced query error")) output, ok := runMysql(t, params, "error") require.False(t, ok, "mysql should have failed: %v", output) @@ -702,7 +716,7 @@ func TestClearTextServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -775,7 +789,7 @@ func TestDialogServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) l.AllowClearTextWithoutTLS.Store(true) defer l.Close() @@ -818,7 +832,7 @@ func TestTLSServer(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -870,7 +884,7 @@ func TestTLSServer(t *testing.T) { // Run a 'select rows' command with results. conn, err := Connect(context.Background(), params) - //output, ok := runMysql(t, params, "select rows") + // output, ok := runMysql(t, params, "select rows") require.NoError(t, err) results, err := conn.ExecuteFetch("select rows", 1000, true) require.NoError(t, err) @@ -916,7 +930,7 @@ func TestTLSRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -1005,7 +1019,7 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1099,7 +1113,7 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1168,7 +1182,7 @@ func TestCachingSha2PasswordAuthWithoutTLS(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1210,7 +1224,7 @@ func TestErrorCodes(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1233,7 +1247,7 @@ func TestErrorCodes(t *testing.T) { // internal vitess errors tests := []struct { err error - code ErrorCode + code sqlerror.ErrorCode sqlState string text string }{ @@ -1241,48 +1255,48 @@ func TestErrorCodes(t *testing.T) { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, "invalid argument"), - code: ERUnknownError, - sqlState: SSUnknownSQLState, + code: sqlerror.ERUnknownError, + sqlState: sqlerror.SSUnknownSQLState, text: "invalid argument", }, { err: vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, - "(errno %v) (sqlstate %v) invalid argument with errno", ERDupEntry, SSConstraintViolation), - code: ERDupEntry, - sqlState: SSConstraintViolation, + "(errno %v) (sqlstate %v) invalid argument with errno", sqlerror.ERDupEntry, sqlerror.SSConstraintViolation), + code: sqlerror.ERDupEntry, + sqlState: sqlerror.SSConstraintViolation, text: "invalid argument with errno", }, { err: vterrors.Errorf( vtrpcpb.Code_DEADLINE_EXCEEDED, "connection deadline exceeded"), - code: ERQueryInterrupted, - sqlState: SSQueryInterrupted, + code: sqlerror.ERQueryInterrupted, + sqlState: sqlerror.SSQueryInterrupted, text: "deadline exceeded", }, { err: vterrors.Errorf( vtrpcpb.Code_RESOURCE_EXHAUSTED, "query pool timeout"), - code: ERTooManyUserConnections, - sqlState: SSClientError, + code: sqlerror.ERTooManyUserConnections, + sqlState: sqlerror.SSClientError, text: "resource exhausted", }, { err: vterrors.Wrap(vterrors.Errorf(vtrpcpb.Code_ABORTED, "Row count exceeded 10000"), "wrapped"), - code: ERQueryInterrupted, - sqlState: SSQueryInterrupted, + code: sqlerror.ERQueryInterrupted, + sqlState: sqlerror.SSQueryInterrupted, text: "aborted", }, } for _, test := range tests { t.Run(test.err.Error(), func(t *testing.T) { - th.SetErr(NewSQLErrorFromError(test.err)) + th.SetErr(sqlerror.NewSQLErrorFromError(test.err)) rs, err := client.ExecuteFetch("error", 100, false) require.Error(t, err, "mysql should have failed but returned: %v", rs) - serr, ok := err.(*SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "mysql should have returned a SQLError") assert.Equal(t, test.code, serr.Number(), "error in %s: want code %v got %v", test.text, test.code, serr.Number()) @@ -1388,7 +1402,7 @@ func TestListenerShutdown(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1419,11 +1433,11 @@ func TestListenerShutdown(t *testing.T) { err = conn.Ping() require.EqualError(t, err, "Server shutdown in progress (errno 1053) (sqlstate 08S01)") - sqlErr, ok := err.(*SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "Wrong error type: %T", err) - require.Equal(t, ERServerShutdown, sqlErr.Number()) - require.Equal(t, SSNetError, sqlErr.SQLState()) + require.Equal(t, sqlerror.ERServerShutdown, sqlErr.Number()) + require.Equal(t, sqlerror.SSNetError, sqlErr.SQLState()) require.Equal(t, "Server shutdown in progress", sqlErr.Message) } @@ -1461,7 +1475,7 @@ func TestServerFlush(t *testing.T) { th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1486,8 +1500,9 @@ func TestServerFlush(t *testing.T) { assert.Fail(t, "duration out of expected range", "duration: %v, want between %v and %v", duration.String(), (mysqlServerFlushDelay).String(), want.String()) } want1 := []*querypb.Field{{ - Name: "result", - Type: querypb.Type_VARCHAR, + Name: "result", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }} assert.Equal(t, want1, flds) @@ -1503,3 +1518,30 @@ func TestServerFlush(t *testing.T) { require.NoError(t, err) assert.Nil(t, row) } + +func TestTcpKeepAlive(t *testing.T) { + th := &testHandler{} + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) + require.NoError(t, err) + defer l.Close() + go l.Accept() + + host, port := getHostPort(t, l.Addr()) + params := &ConnParams{ + Host: host, + Port: port, + } + + // on connect, the tcp method should be called. + c, err := Connect(context.Background(), params) + require.NoError(t, err) + defer c.Close() + require.True(t, th.lastConn.keepAliveOn, "tcp property method not called") + + // close the connection + th.lastConn.Close() + + // now calling this method should fail. + err = setTcpConnProperties(th.lastConn.conn.(*net.TCPConn), 0) + require.ErrorContains(t, err, "unable to enable keepalive on tcp connection") +} diff --git a/go/mysql/sqlerror/constants.go b/go/mysql/sqlerror/constants.go new file mode 100644 index 00000000000..0074e904e4a --- /dev/null +++ b/go/mysql/sqlerror/constants.go @@ -0,0 +1,496 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlerror + +import ( + "strconv" + "strings" +) + +type ErrorCode uint16 + +func (e ErrorCode) ToString() string { + return strconv.FormatUint(uint64(e), 10) +} + +// Error codes for server-side errors. +// Originally found in include/mysql/mysqld_error.h and +// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html +// The below are in sorted order by value, grouped by vterror code they should be bucketed into. +// See above reference for more information on each code. +const ( + // Vitess specific errors, (100-999) + ERNotReplica = ErrorCode(100) + + // unknown + ERUnknownError = ErrorCode(1105) + + // internal + ERInternalError = ErrorCode(1815) + + // unimplemented + ERNotSupportedYet = ErrorCode(1235) + ERUnsupportedPS = ErrorCode(1295) + + // resource exhausted + ERDiskFull = ErrorCode(1021) + EROutOfMemory = ErrorCode(1037) + EROutOfSortMemory = ErrorCode(1038) + ERConCount = ErrorCode(1040) + EROutOfResources = ErrorCode(1041) + ERRecordFileFull = ErrorCode(1114) + ERHostIsBlocked = ErrorCode(1129) + ERCantCreateThread = ErrorCode(1135) + ERTooManyDelayedThreads = ErrorCode(1151) + ERNetPacketTooLarge = ErrorCode(1153) + ERTooManyUserConnections = ErrorCode(1203) + ERLockTableFull = ErrorCode(1206) + ERUserLimitReached = ErrorCode(1226) + + // deadline exceeded + ERLockWaitTimeout = ErrorCode(1205) + + // unavailable + ERServerShutdown = ErrorCode(1053) + + // not found + ERDbDropExists = ErrorCode(1008) + ERCantFindFile = ErrorCode(1017) + ERFormNotFound = ErrorCode(1029) + ERKeyNotFound = ErrorCode(1032) + ERBadFieldError = ErrorCode(1054) + ERNoSuchThread = ErrorCode(1094) + ERUnknownTable = ErrorCode(1109) + ERCantFindUDF = ErrorCode(1122) + ERNonExistingGrant = ErrorCode(1141) + ERNoSuchTable = ErrorCode(1146) + ERNonExistingTableGrant = ErrorCode(1147) + ERKeyDoesNotExist = ErrorCode(1176) + + // permissions + ERDBAccessDenied = ErrorCode(1044) + ERAccessDeniedError = ErrorCode(1045) + ERKillDenied = ErrorCode(1095) + ERNoPermissionToCreateUsers = ErrorCode(1211) + ERSpecifiedAccessDenied = ErrorCode(1227) + + // failed precondition + ERNoDb = ErrorCode(1046) + ERNoSuchIndex = ErrorCode(1082) + ERCantDropFieldOrKey = ErrorCode(1091) + ERTableNotLockedForWrite = ErrorCode(1099) + ERTableNotLocked = ErrorCode(1100) + ERTooBigSelect = ErrorCode(1104) + ERNotAllowedCommand = ErrorCode(1148) + ERTooLongString = ErrorCode(1162) + ERDelayedInsertTableLocked = ErrorCode(1165) + ERDupUnique = ErrorCode(1169) + ERRequiresPrimaryKey = ErrorCode(1173) + ERCantDoThisDuringAnTransaction = ErrorCode(1179) + ERReadOnlyTransaction = ErrorCode(1207) + ERCannotAddForeign = ErrorCode(1215) + ERNoReferencedRow = ErrorCode(1216) + ERRowIsReferenced = ErrorCode(1217) + ERCantUpdateWithReadLock = ErrorCode(1223) + ERNoDefault = ErrorCode(1230) + ERMasterFatalReadingBinlog = ErrorCode(1236) + EROperandColumns = ErrorCode(1241) + ERSubqueryNo1Row = ErrorCode(1242) + ERUnknownStmtHandler = ErrorCode(1243) + ERWarnDataOutOfRange = ErrorCode(1264) + ERNonUpdateableTable = ErrorCode(1288) + ERFeatureDisabled = ErrorCode(1289) + EROptionPreventsStatement = ErrorCode(1290) + ERDuplicatedValueInType = ErrorCode(1291) + ERSPDoesNotExist = ErrorCode(1305) + ERNoDefaultForField = ErrorCode(1364) + ErSPNotVarArg = ErrorCode(1414) + ERRowIsReferenced2 = ErrorCode(1451) + ErNoReferencedRow2 = ErrorCode(1452) + ERDupIndex = ErrorCode(1831) + ERInnodbReadOnly = ErrorCode(1874) + + // already exists + ERDbCreateExists = ErrorCode(1007) + ERTableExists = ErrorCode(1050) + ERDupEntry = ErrorCode(1062) + ERFileExists = ErrorCode(1086) + ERUDFExists = ErrorCode(1125) + + // aborted + ERGotSignal = ErrorCode(1078) + ERForcingClose = ErrorCode(1080) + ERAbortingConnection = ErrorCode(1152) + ERLockDeadlock = ErrorCode(1213) + + // invalid arg + ERUnknownComError = ErrorCode(1047) + ERBadNullError = ErrorCode(1048) + ERBadDb = ErrorCode(1049) + ERBadTable = ErrorCode(1051) + ERNonUniq = ErrorCode(1052) + ERWrongFieldWithGroup = ErrorCode(1055) + ERWrongGroupField = ErrorCode(1056) + ERWrongSumSelect = ErrorCode(1057) + ERWrongValueCount = ErrorCode(1058) + ERTooLongIdent = ErrorCode(1059) + ERDupFieldName = ErrorCode(1060) + ERDupKeyName = ErrorCode(1061) + ERWrongFieldSpec = ErrorCode(1063) + ERParseError = ErrorCode(1064) + EREmptyQuery = ErrorCode(1065) + ERNonUniqTable = ErrorCode(1066) + ERInvalidDefault = ErrorCode(1067) + ERMultiplePriKey = ErrorCode(1068) + ERTooManyKeys = ErrorCode(1069) + ERTooManyKeyParts = ErrorCode(1070) + ERTooLongKey = ErrorCode(1071) + ERKeyColumnDoesNotExist = ErrorCode(1072) + ERBlobUsedAsKey = ErrorCode(1073) + ERTooBigFieldLength = ErrorCode(1074) + ERWrongAutoKey = ErrorCode(1075) + ERWrongFieldTerminators = ErrorCode(1083) + ERBlobsAndNoTerminated = ErrorCode(1084) + ERTextFileNotReadable = ErrorCode(1085) + ERWrongSubKey = ErrorCode(1089) + ERCantRemoveAllFields = ErrorCode(1090) + ERUpdateTableUsed = ErrorCode(1093) + ERNoTablesUsed = ErrorCode(1096) + ERTooBigSet = ErrorCode(1097) + ERBlobCantHaveDefault = ErrorCode(1101) + ERWrongDbName = ErrorCode(1102) + ERWrongTableName = ErrorCode(1103) + ERUnknownProcedure = ErrorCode(1106) + ERWrongParamCountToProcedure = ErrorCode(1107) + ERWrongParametersToProcedure = ErrorCode(1108) + ERFieldSpecifiedTwice = ErrorCode(1110) + ERInvalidGroupFuncUse = ErrorCode(1111) + ERTableMustHaveColumns = ErrorCode(1113) + ERUnknownCharacterSet = ErrorCode(1115) + ERTooManyTables = ErrorCode(1116) + ERTooManyFields = ErrorCode(1117) + ERTooBigRowSize = ErrorCode(1118) + ERWrongOuterJoin = ErrorCode(1120) + ERNullColumnInIndex = ErrorCode(1121) + ERFunctionNotDefined = ErrorCode(1128) + ERWrongValueCountOnRow = ErrorCode(1136) + ERInvalidUseOfNull = ErrorCode(1138) + ERRegexpError = ErrorCode(1139) + ERMixOfGroupFuncAndFields = ErrorCode(1140) + ERIllegalGrantForTable = ErrorCode(1144) + ERSyntaxError = ErrorCode(1149) + ERWrongColumnName = ErrorCode(1166) + ERWrongKeyColumn = ErrorCode(1167) + ERBlobKeyWithoutLength = ErrorCode(1170) + ERPrimaryCantHaveNull = ErrorCode(1171) + ERTooManyRows = ErrorCode(1172) + ERLockOrActiveTransaction = ErrorCode(1192) + ERUnknownSystemVariable = ErrorCode(1193) + ERSetConstantsOnly = ErrorCode(1204) + ERWrongArguments = ErrorCode(1210) + ERWrongUsage = ErrorCode(1221) + ERWrongNumberOfColumnsInSelect = ErrorCode(1222) + ERDupArgument = ErrorCode(1225) + ERLocalVariable = ErrorCode(1228) + ERGlobalVariable = ErrorCode(1229) + ERWrongValueForVar = ErrorCode(1231) + ERWrongTypeForVar = ErrorCode(1232) + ERVarCantBeRead = ErrorCode(1233) + ERCantUseOptionHere = ErrorCode(1234) + ERIncorrectGlobalLocalVar = ErrorCode(1238) + ERWrongFKDef = ErrorCode(1239) + ERKeyRefDoNotMatchTableRef = ErrorCode(1240) + ERCyclicReference = ErrorCode(1245) + ERIllegalReference = ErrorCode(1247) + ERDerivedMustHaveAlias = ErrorCode(1248) + ERTableNameNotAllowedHere = ErrorCode(1250) + ERCollationCharsetMismatch = ErrorCode(1253) + ERWarnDataTruncated = ErrorCode(1265) + ERCantAggregate2Collations = ErrorCode(1267) + ERCantAggregate3Collations = ErrorCode(1270) + ERCantAggregateNCollations = ErrorCode(1271) + ERVariableIsNotStruct = ErrorCode(1272) + ERUnknownCollation = ErrorCode(1273) + ERWrongNameForIndex = ErrorCode(1280) + ERWrongNameForCatalog = ErrorCode(1281) + ERBadFTColumn = ErrorCode(1283) + ERTruncatedWrongValue = ErrorCode(1292) + ERTooMuchAutoTimestampCols = ErrorCode(1293) + ERInvalidOnUpdate = ErrorCode(1294) + ERUnknownTimeZone = ErrorCode(1298) + ERInvalidCharacterString = ErrorCode(1300) + ERQueryInterrupted = ErrorCode(1317) + ERTruncatedWrongValueForField = ErrorCode(1366) + ERIllegalValueForType = ErrorCode(1367) + ERDataTooLong = ErrorCode(1406) + ErrWrongValueForType = ErrorCode(1411) + ERNoSuchUser = ErrorCode(1449) + ERForbidSchemaChange = ErrorCode(1450) + ERWrongValue = ErrorCode(1525) + ERDataOutOfRange = ErrorCode(1690) + ERInvalidJSONText = ErrorCode(3140) + ERInvalidJSONTextInParams = ErrorCode(3141) + ERInvalidJSONBinaryData = ErrorCode(3142) + ERInvalidJSONCharset = ErrorCode(3144) + ERInvalidCastToJSON = ErrorCode(3147) + ERJSONValueTooBig = ErrorCode(3150) + ERJSONDocumentTooDeep = ErrorCode(3157) + + ERRegexpStringNotTerminated = ErrorCode(3684) + ERRegexpBufferOverflow = ErrorCode(3684) + ERRegexpIllegalArgument = ErrorCode(3685) + ERRegexpIndexOutOfBounds = ErrorCode(3686) + ERRegexpInternal = ErrorCode(3687) + ERRegexpRuleSyntax = ErrorCode(3688) + ERRegexpBadEscapeSequence = ErrorCode(3689) + ERRegexpUnimplemented = ErrorCode(3690) + ERRegexpMismatchParen = ErrorCode(3691) + ERRegexpBadInterval = ErrorCode(3692) + ERRRegexpMaxLtMin = ErrorCode(3693) + ERRegexpInvalidBackRef = ErrorCode(3694) + ERRegexpLookBehindLimit = ErrorCode(3695) + ERRegexpMissingCloseBracket = ErrorCode(3696) + ERRegexpInvalidRange = ErrorCode(3697) + ERRegexpStackOverflow = ErrorCode(3698) + ERRegexpTimeOut = ErrorCode(3699) + ERRegexpPatternTooBig = ErrorCode(3700) + ERRegexpInvalidCaptureGroup = ErrorCode(3887) + ERRegexpInvalidFlag = ErrorCode(3900) + + ERCharacterSetMismatch = ErrorCode(3995) + + ERWrongParametersToNativeFct = ErrorCode(1583) + + // max execution time exceeded + ERQueryTimeout = ErrorCode(3024) + + ErrCantCreateGeometryObject = ErrorCode(1416) + ErrGISDataWrongEndianess = ErrorCode(3055) + ErrNotImplementedForCartesianSRS = ErrorCode(3704) + ErrNotImplementedForProjectedSRS = ErrorCode(3705) + ErrNonPositiveRadius = ErrorCode(3706) + + // server not available + ERServerIsntAvailable = ErrorCode(3168) +) + +// Sql states for errors. +// Originally found in include/mysql/sql_state.h +const ( + // SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in + // include/mysql/sql_state.h, but: + // const char *unknown_sqlstate= "HY000" + // in client.c. So using that one. + SSUnknownSQLState = "HY000" + + // SSNetError is network related error + SSNetError = "08S01" + + // SSWrongNumberOfColumns is related to columns error + SSWrongNumberOfColumns = "21000" + + // SSWrongValueCountOnRow is related to columns count mismatch error + SSWrongValueCountOnRow = "21S01" + + // SSDataTooLong is ER_DATA_TOO_LONG + SSDataTooLong = "22001" + + // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE + SSDataOutOfRange = "22003" + + // SSConstraintViolation is constraint violation + SSConstraintViolation = "23000" + + // SSCantDoThisDuringAnTransaction is + // ER_CANT_DO_THIS_DURING_AN_TRANSACTION + SSCantDoThisDuringAnTransaction = "25000" + + // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR + SSAccessDeniedError = "28000" + + // SSNoDB is ER_NO_DB_ERROR + SSNoDB = "3D000" + + // SSLockDeadlock is ER_LOCK_DEADLOCK + SSLockDeadlock = "40001" + + // SSClientError is the state on client errors + SSClientError = "42000" + + // SSDupFieldName is ER_DUP_FIELD_NAME + SSDupFieldName = "42S21" + + // SSBadFieldError is ER_BAD_FIELD_ERROR + SSBadFieldError = "42S22" + + // SSUnknownTable is ER_UNKNOWN_TABLE + SSUnknownTable = "42S02" + + // SSQueryInterrupted is ER_QUERY_INTERRUPTED; + SSQueryInterrupted = "70100" +) + +// IsConnErr returns true if the error is a connection error. +func IsConnErr(err error) bool { + if IsTooManyConnectionsErr(err) { + return false + } + if sqlErr, ok := err.(*SQLError); ok { + num := sqlErr.Number() + return (num >= CRUnknownError && num <= CRNamedPipeStateError) || num == ERQueryInterrupted + } + return false +} + +// IsConnLostDuringQuery returns true if the error is a CRServerLost error. +// Happens most commonly when a query is killed MySQL server-side. +func IsConnLostDuringQuery(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + num := sqlErr.Number() + return (num == CRServerLost) + } + return false +} + +// IsEphemeralError returns true if the error is ephemeral and the caller should +// retry if possible. Note: non-SQL errors are always treated as ephemeral. +func IsEphemeralError(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + en := sqlErr.Number() + switch en { + case + CRConnectionError, + CRConnHostError, + CRMalformedPacket, + CRNamedPipeStateError, + CRServerHandshakeErr, + CRServerGone, + CRServerLost, + CRSSLConnectionError, + CRUnknownError, + CRUnknownHost, + ERCantCreateThread, + ERDiskFull, + ERForcingClose, + ERGotSignal, + ERHostIsBlocked, + ERLockTableFull, + ERInnodbReadOnly, + ERInternalError, + ERLockDeadlock, + ERLockWaitTimeout, + ERQueryTimeout, + EROutOfMemory, + EROutOfResources, + EROutOfSortMemory, + ERQueryInterrupted, + ERServerIsntAvailable, + ERServerShutdown, + ERTooManyUserConnections, + ERUnknownError, + ERUserLimitReached: + return true + default: + return false + } + } + // If it's not an sqlError then we assume it's ephemeral + return true +} + +// IsTooManyConnectionsErr returns true if the error is due to too many connections. +func IsTooManyConnectionsErr(err error) bool { + if sqlErr, ok := err.(*SQLError); ok { + if sqlErr.Number() == CRServerHandshakeErr && strings.Contains(sqlErr.Message, "Too many connections") { + return true + } + } + return false +} + +// IsSchemaApplyError returns true when given error is a MySQL error applying schema change +func IsSchemaApplyError(err error) bool { + merr, isSQLErr := err.(*SQLError) + if !isSQLErr { + return false + } + switch merr.Num { + case + ERDupKeyName, + ERCantDropFieldOrKey, + ERTableExists, + ERDupFieldName: + return true + } + return false +} + +// Error codes for client-side errors. +// Originally found in include/mysql/errmsg.h and +// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html +const ( + // CRUnknownError is CR_UNKNOWN_ERROR + CRUnknownError = ErrorCode(2000) + + // CRConnectionError is CR_CONNECTION_ERROR + // This is returned if a connection via a Unix socket fails. + CRConnectionError = ErrorCode(2002) + + // CRConnHostError is CR_CONN_HOST_ERROR + // This is returned if a connection via a TCP socket fails. + CRConnHostError = ErrorCode(2003) + + // CRUnknownHost is CR_UNKNOWN_HOST + // This is returned if the host name cannot be resolved. + CRUnknownHost = ErrorCode(2005) + + // CRServerGone is CR_SERVER_GONE_ERROR. + // This is returned if the client tries to send a command but it fails. + CRServerGone = ErrorCode(2006) + + // CRVersionError is CR_VERSION_ERROR + // This is returned if the server versions don't match what we support. + CRVersionError = ErrorCode(2007) + + // CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR + CRServerHandshakeErr = ErrorCode(2012) + + // CRServerLost is CR_SERVER_LOST. + // Used when: + // - the client cannot write an initial auth packet. + // - the client cannot read an initial auth packet. + // - the client cannot read a response from the server. + // This happens when a running query is killed. + CRServerLost = ErrorCode(2013) + + // CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC + // Sent when the streaming calls are not done in the right order. + CRCommandsOutOfSync = ErrorCode(2014) + + // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR. + // This is the highest possible number for a connection error. + CRNamedPipeStateError = ErrorCode(2018) + + // CRCantReadCharset is CR_CANT_READ_CHARSET + CRCantReadCharset = ErrorCode(2019) + + // CRSSLConnectionError is CR_SSL_CONNECTION_ERROR + CRSSLConnectionError = ErrorCode(2026) + + // CRMalformedPacket is CR_MALFORMED_PACKET + CRMalformedPacket = ErrorCode(2027) +) diff --git a/go/mysql/sql_error.go b/go/mysql/sqlerror/sql_error.go similarity index 75% rename from go/mysql/sql_error.go rename to go/mysql/sqlerror/sql_error.go index 369b486c048..9b1f65c82e3 100644 --- a/go/mysql/sql_error.go +++ b/go/mysql/sqlerror/sql_error.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package sqlerror import ( "bytes" "fmt" "regexp" "strconv" + "strings" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -146,7 +147,11 @@ func mapToSQLErrorFromErrorCode(err error, msg string) *SQLError { ss = SSAccessDeniedError case vtrpcpb.Code_RESOURCE_EXHAUSTED: num = demuxResourceExhaustedErrors(err.Error()) - ss = SSClientError + // 1041 ER_OUT_OF_RESOURCES has SQLSTATE HYOOO as per https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_out_of_resources, + // so don't override it here in that case. + if num != EROutOfResources { + ss = SSClientError + } case vtrpcpb.Code_UNIMPLEMENTED: num = ERNotSupportedYet ss = SSClientError @@ -207,9 +212,37 @@ var stateToMysqlCode = map[vterrors.State]mysqlCode{ vterrors.ServerNotAvailable: {num: ERServerIsntAvailable, state: SSNetError}, vterrors.CantDoThisInTransaction: {num: ERCantDoThisDuringAnTransaction, state: SSCantDoThisDuringAnTransaction}, vterrors.RequiresPrimaryKey: {num: ERRequiresPrimaryKey, state: SSClientError}, + vterrors.RowIsReferenced2: {num: ERRowIsReferenced2, state: SSConstraintViolation}, + vterrors.NoReferencedRow2: {num: ErNoReferencedRow2, state: SSConstraintViolation}, vterrors.NoSuchSession: {num: ERUnknownComError, state: SSNetError}, vterrors.OperandColumns: {num: EROperandColumns, state: SSWrongNumberOfColumns}, vterrors.WrongValueCountOnRow: {num: ERWrongValueCountOnRow, state: SSWrongValueCountOnRow}, + vterrors.WrongArguments: {num: ERWrongArguments, state: SSUnknownSQLState}, + vterrors.UnknownStmtHandler: {num: ERUnknownStmtHandler, state: SSUnknownSQLState}, + vterrors.UnknownTimeZone: {num: ERUnknownTimeZone, state: SSUnknownSQLState}, + vterrors.RegexpStringNotTerminated: {num: ERRegexpStringNotTerminated, state: SSUnknownSQLState}, + vterrors.RegexpBufferOverflow: {num: ERRegexpBufferOverflow, state: SSUnknownSQLState}, + vterrors.RegexpIllegalArgument: {num: ERRegexpIllegalArgument, state: SSUnknownSQLState}, + vterrors.RegexpIndexOutOfBounds: {num: ERRegexpIndexOutOfBounds, state: SSUnknownSQLState}, + vterrors.RegexpInternal: {num: ERRegexpInternal, state: SSUnknownSQLState}, + vterrors.RegexpRuleSyntax: {num: ERRegexpRuleSyntax, state: SSUnknownSQLState}, + vterrors.RegexpBadEscapeSequence: {num: ERRegexpBadEscapeSequence, state: SSUnknownSQLState}, + vterrors.RegexpUnimplemented: {num: ERRegexpUnimplemented, state: SSUnknownSQLState}, + vterrors.RegexpMismatchParen: {num: ERRegexpMismatchParen, state: SSUnknownSQLState}, + vterrors.RegexpBadInterval: {num: ERRegexpBadInterval, state: SSUnknownSQLState}, + vterrors.RegexpMaxLtMin: {num: ERRRegexpMaxLtMin, state: SSUnknownSQLState}, + vterrors.RegexpInvalidBackRef: {num: ERRegexpInvalidBackRef, state: SSUnknownSQLState}, + vterrors.RegexpLookBehindLimit: {num: ERRegexpLookBehindLimit, state: SSUnknownSQLState}, + vterrors.RegexpMissingCloseBracket: {num: ERRegexpMissingCloseBracket, state: SSUnknownSQLState}, + vterrors.RegexpInvalidRange: {num: ERRegexpInvalidRange, state: SSUnknownSQLState}, + vterrors.RegexpStackOverflow: {num: ERRegexpStackOverflow, state: SSUnknownSQLState}, + vterrors.RegexpTimeOut: {num: ERRegexpTimeOut, state: SSUnknownSQLState}, + vterrors.RegexpPatternTooBig: {num: ERRegexpPatternTooBig, state: SSUnknownSQLState}, + vterrors.RegexpInvalidFlag: {num: ERRegexpInvalidFlag, state: SSUnknownSQLState}, + vterrors.RegexpInvalidCaptureGroup: {num: ERRegexpInvalidCaptureGroup, state: SSUnknownSQLState}, + vterrors.CharacterSetMismatch: {num: ERCharacterSetMismatch, state: SSUnknownSQLState}, + vterrors.WrongParametersToNativeFct: {num: ERWrongParametersToNativeFct, state: SSUnknownSQLState}, + vterrors.KillDeniedError: {num: ERKillDenied, state: SSUnknownSQLState}, } func getStateToMySQLState(state vterrors.State) mysqlCode { @@ -258,6 +291,8 @@ func demuxResourceExhaustedErrors(msg string) ErrorCode { switch { case isGRPCOverflowRE.Match([]byte(msg)): return ERNetPacketTooLarge + case strings.Contains(msg, "Transaction throttled"): + return EROutOfResources default: return ERTooManyUserConnections } diff --git a/go/mysql/sql_error_test.go b/go/mysql/sqlerror/sql_error_test.go similarity index 93% rename from go/mysql/sql_error_test.go rename to go/mysql/sqlerror/sql_error_test.go index 544c5dc94ae..3c7f3114b68 100644 --- a/go/mysql/sql_error_test.go +++ b/go/mysql/sqlerror/sql_error_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package sqlerror import ( "fmt" @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestDumuxResourceExhaustedErrors(t *testing.T) { +func TestDemuxResourceExhaustedErrors(t *testing.T) { type testCase struct { msg string want ErrorCode @@ -43,6 +43,7 @@ func TestDumuxResourceExhaustedErrors(t *testing.T) { // This should be explicitly handled by returning ERNetPacketTooLarge from the execturo directly // and therefore shouldn't need to be teased out of another error. {"in-memory row count exceeded allowed limit of 13", ERTooManyUserConnections}, + {"rpc error: code = ResourceExhausted desc = Transaction throttled", EROutOfResources}, } for _, c := range cases { @@ -167,6 +168,11 @@ func TestNewSQLErrorFromError(t *testing.T) { num: ERBadNullError, ss: SSConstraintViolation, }, + { + err: vterrors.Errorf(vtrpc.Code_RESOURCE_EXHAUSTED, "vttablet: rpc error: code = ResourceExhausted desc = Transaction throttled"), + num: EROutOfResources, + ss: SSUnknownSQLState, + }, } for _, tc := range tCases { diff --git a/go/mysql/streaming_query.go b/go/mysql/streaming_query.go index 9e023150455..257c56e076f 100644 --- a/go/mysql/streaming_query.go +++ b/go/mysql/streaming_query.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -30,7 +31,7 @@ import ( func (c *Conn) ExecuteStreamFetch(query string) (err error) { defer func() { if err != nil { - if sqlerr, ok := err.(*SQLError); ok { + if sqlerr, ok := err.(*sqlerror.SQLError); ok { sqlerr.Query = query } } @@ -38,7 +39,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // Sanity check. if c.fields != nil { - return NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "streaming query already in progress") + return sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "streaming query already in progress") } // Send the query as a COM_QUERY packet. @@ -75,7 +76,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // EOF is only present here if it's not deprecated. data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() if c.isEOFPacket(data) { @@ -85,7 +86,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { } else if isErrorPacket(data) { return ParseErrorPacket(data) } else { - return NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "unexpected packet after fields: %v", data) + return sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "unexpected packet after fields: %v", data) } } @@ -96,7 +97,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { // Fields returns the fields for an ongoing streaming query. func (c *Conn) Fields() ([]*querypb.Field, error) { if c.fields == nil { - return nil, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "no streaming query in progress") + return nil, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "no streaming query in progress") } if len(c.fields) == 0 { // The query returned an empty field list. @@ -110,7 +111,7 @@ func (c *Conn) Fields() ([]*querypb.Field, error) { func (c *Conn) FetchNext(in []sqltypes.Value) ([]sqltypes.Value, error) { if c.fields == nil { // We are already done, and the result was closed. - return nil, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "no streaming query in progress") + return nil, sqlerror.NewSQLError(sqlerror.CRCommandsOutOfSync, sqlerror.SSUnknownSQLState, "no streaming query in progress") } if len(c.fields) == 0 { diff --git a/go/mysql/vault/auth_server_vault.go b/go/mysql/vault/auth_server_vault.go index 8d6f566b6d4..ccdef9f1d53 100644 --- a/go/mysql/vault/auth_server_vault.go +++ b/go/mysql/vault/auth_server_vault.go @@ -30,6 +30,8 @@ import ( vaultapi "github.com/aquarapid/vaultlib" "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" @@ -186,14 +188,14 @@ func (a *AuthServerVault) UserEntryWithHash(conn *mysql.Conn, salt []byte, user a.mu.Unlock() if !ok { - return &mysql.StaticUserData{}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range userEntries { if entry.MysqlNativePassword != "" { hash, err := mysql.DecodeMysqlNativePasswordHex(entry.MysqlNativePassword) if err != nil { - return &mysql.StaticUserData{Username: entry.UserData, Groups: entry.Groups}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{Username: entry.UserData, Groups: entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } isPass := mysql.VerifyHashedMysqlNativePassword(authResponse, salt, hash) if mysql.MatchSourceHost(remoteAddr, entry.SourceHost) && isPass { @@ -207,7 +209,7 @@ func (a *AuthServerVault) UserEntryWithHash(conn *mysql.Conn, salt []byte, user } } } - return &mysql.StaticUserData{}, mysql.NewSQLError(mysql.ERAccessDeniedError, mysql.SSAccessDeniedError, "Access denied for user '%v'", user) + return &mysql.StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } func (a *AuthServerVault) setTTLTicker(ttl time.Duration) { diff --git a/go/netutil/netutil.go b/go/netutil/netutil.go index 54e53e85226..fbac6e88424 100644 --- a/go/netutil/netutil.go +++ b/go/netutil/netutil.go @@ -29,10 +29,6 @@ import ( "time" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - // byPriorityWeight sorts records by ascending priority and weight. type byPriorityWeight []*net.SRV @@ -48,7 +44,7 @@ func (addrs byPriorityWeight) Less(i, j int) bool { // shuffleByWeight shuffles SRV records by weight using the algorithm // described in RFC 2782. // NOTE(msolo) This is disabled when the weights are zero. -func (addrs byPriorityWeight) shuffleByWeight() { +func (addrs byPriorityWeight) shuffleByWeight(rand *rand.Rand) { sum := 0 for _, addr := range addrs { sum += int(addr.Weight) @@ -72,21 +68,21 @@ func (addrs byPriorityWeight) shuffleByWeight() { } } -func (addrs byPriorityWeight) sortRfc2782() { +func (addrs byPriorityWeight) sortRfc2782(rand *rand.Rand) { sort.Sort(addrs) i := 0 for j := 1; j < len(addrs); j++ { if addrs[i].Priority != addrs[j].Priority { - addrs[i:j].shuffleByWeight() + addrs[i:j].shuffleByWeight(rand) i = j } } - addrs[i:].shuffleByWeight() + addrs[i:].shuffleByWeight(rand) } // SortRfc2782 reorders SRV records as specified in RFC 2782. func SortRfc2782(srvs []*net.SRV) { - byPriorityWeight(srvs).sortRfc2782() + byPriorityWeight(srvs).sortRfc2782(rand.New(rand.NewSource(time.Now().UTC().UnixNano()))) } // SplitHostPort is an alternative to net.SplitHostPort that also parses the diff --git a/go/netutil/netutil_test.go b/go/netutil/netutil_test.go index 574bda5f26b..b8cfc563acb 100644 --- a/go/netutil/netutil_test.go +++ b/go/netutil/netutil_test.go @@ -24,7 +24,7 @@ import ( "testing" ) -func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { +func checkDistribution(t *testing.T, rand *rand.Rand, data []*net.SRV, margin float64) { sum := 0 for _, srv := range data { sum += int(srv.Weight) @@ -36,7 +36,7 @@ func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { for j := 0; j < count; j++ { d := make([]*net.SRV, len(data)) copy(d, data) - byPriorityWeight(d).shuffleByWeight() + byPriorityWeight(d).shuffleByWeight(rand) key := d[0].Target results[key] = results[key] + 1 } @@ -54,12 +54,11 @@ func checkDistribution(t *testing.T, data []*net.SRV, margin float64) { } func testUniformity(t *testing.T, size int, margin float64) { - rand.Seed(1) data := make([]*net.SRV, size) for i := 0; i < size; i++ { data[i] = &net.SRV{Target: fmt.Sprintf("%c", 'a'+i), Weight: 1} } - checkDistribution(t, data, margin) + checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) } func TestUniformity(t *testing.T) { @@ -70,13 +69,12 @@ func TestUniformity(t *testing.T) { } func testWeighting(t *testing.T, margin float64) { - rand.Seed(1) data := []*net.SRV{ {Target: "a", Weight: 60}, {Target: "b", Weight: 30}, {Target: "c", Weight: 10}, } - checkDistribution(t, data, margin) + checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) } func TestWeighting(t *testing.T) { diff --git a/go/pools/resource_pool.go b/go/pools/resource_pool.go index 940a2b2026c..f049b667fe6 100644 --- a/go/pools/resource_pool.go +++ b/go/pools/resource_pool.go @@ -589,7 +589,7 @@ func (rp *ResourcePool) IdleClosed() int64 { return rp.idleClosed.Load() } -// extendedLifetimeTimeout returns random duration within range [maxLifetime, 2*maxLifetime) +// extendedMaxLifetime returns random duration within range [maxLifetime, 2*maxLifetime) func (rp *ResourcePool) extendedMaxLifetime() time.Duration { maxLifetime := rp.maxLifetime.Load() if maxLifetime == 0 { diff --git a/go/slice/slice.go b/go/slice/slice.go new file mode 100644 index 00000000000..36130354c3d --- /dev/null +++ b/go/slice/slice.go @@ -0,0 +1,64 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slice contains generic Slice helpers; +// Some of this code is sourced from https://github.com/luraim/fun (Apache v2) +package slice + +// All returns true if all elements return true for given predicate +func All[T any](s []T, fn func(T) bool) bool { + for _, e := range s { + if !fn(e) { + return false + } + } + return true +} + +// Any returns true if at least one element returns true for given predicate +func Any[T any](s []T, fn func(T) bool) bool { + for _, e := range s { + if fn(e) { + return true + } + } + return false +} + +func Map[From, To any](in []From, f func(From) To) []To { + if in == nil { + return nil + } + result := make([]To, len(in)) + for i, col := range in { + result[i] = f(col) + } + return result +} + +func MapWithError[From, To any](in []From, f func(From) (To, error)) (result []To, err error) { + if in == nil { + return nil, nil + } + result = make([]To, len(in)) + for i, col := range in { + result[i], err = f(col) + if err != nil { + return nil, err + } + } + return +} diff --git a/go/sqltypes/bind_variables.go b/go/sqltypes/bind_variables.go index 232be6415f3..021558286a7 100644 --- a/go/sqltypes/bind_variables.go +++ b/go/sqltypes/bind_variables.go @@ -27,7 +27,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) -type DecimalFloat float64 +type DecimalString string var ( // BvSchemaName is bind variable to be sent down to vttablet for schema name. @@ -42,12 +42,16 @@ var ( // ValueToProto converts Value to a *querypb.Value. func ValueToProto(v Value) *querypb.Value { - return &querypb.Value{Type: v.typ, Value: v.val} + var protoValues []*querypb.Value + for _, value := range v.values { + protoValues = append(protoValues, ValueToProto(value)) + } + return &querypb.Value{Type: v.typ, Value: v.val, Values: protoValues} } // ProtoToValue converts a *querypb.Value to a Value. func ProtoToValue(v *querypb.Value) Value { - return MakeTrusted(v.Type, v.Value) + return MakeTrustedValues(v.Type, v.Value, v.Values) } // BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]any @@ -120,9 +124,8 @@ func Float64BindVariable(v float64) *querypb.BindVariable { return ValueBindVariable(NewFloat64(v)) } -func DecimalBindVariable(v DecimalFloat) *querypb.BindVariable { - f := strconv.FormatFloat(float64(v), 'f', -1, 64) - return ValueBindVariable(NewDecimal(f)) +func DecimalBindVariable(v DecimalString) *querypb.BindVariable { + return ValueBindVariable(NewDecimal(string(v))) } // StringBindVariable converts a string to a bind var. @@ -170,7 +173,7 @@ func BuildBindVariable(v any) (*querypb.BindVariable, error) { return Int64BindVariable(v), nil case uint64: return Uint64BindVariable(v), nil - case DecimalFloat: + case DecimalString: return DecimalBindVariable(v), nil case float64: return Float64BindVariable(v), nil diff --git a/go/sqltypes/bind_variables_test.go b/go/sqltypes/bind_variables_test.go index 40925d228a1..fe22cea63a6 100644 --- a/go/sqltypes/bind_variables_test.go +++ b/go/sqltypes/bind_variables_test.go @@ -18,7 +18,6 @@ package sqltypes import ( "fmt" - "reflect" "strings" "testing" @@ -30,16 +29,83 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) +// TestProtoConversions checks coverting to and fro between querypb.Value and sqltypes.Value. func TestProtoConversions(t *testing.T) { - v := TestValue(Int64, "1") - got := ValueToProto(v) - want := &querypb.Value{Type: Int64, Value: []byte("1")} - if !proto.Equal(got, want) { - t.Errorf("ValueToProto: %v, want %v", got, want) + tcases := []struct { + name string + val Value + protoVal *querypb.Value + }{ + { + name: "integer value", + val: TestValue(Int64, "1"), + protoVal: &querypb.Value{Type: Int64, Value: []byte("1")}, + }, { + name: "tuple value", + val: Value{ + typ: Tuple, + values: []Value{ + TestValue(VarChar, "1"), + TestValue(Int64, "3"), + }, + }, + protoVal: &querypb.Value{ + Type: Tuple, + Values: []*querypb.Value{ + { + Type: VarChar, + Value: []byte("1"), + }, { + Type: Int64, + Value: []byte("3"), + }, + }, + }, + }, { + name: "tuple of tuple as a value", + val: Value{ + typ: Tuple, + values: []Value{ + { + typ: Tuple, + values: []Value{ + TestValue(VarChar, "1"), + TestValue(Int64, "3"), + }, + }, + TestValue(Int64, "5"), + }, + }, + protoVal: &querypb.Value{ + Type: Tuple, + Values: []*querypb.Value{ + { + Type: Tuple, + Values: []*querypb.Value{ + { + Type: VarChar, + Value: []byte("1"), + }, { + Type: Int64, + Value: []byte("3"), + }, + }, + }, { + Type: Int64, + Value: []byte("5"), + }, + }, + }, + }, } - gotback := ProtoToValue(got) - if !reflect.DeepEqual(gotback, v) { - t.Errorf("ProtoToValue: %v, want %v", gotback, v) + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + got := ValueToProto(tcase.val) + require.True(t, proto.Equal(got, tcase.protoVal), "ValueToProto: %v, want %v", got, tcase.protoVal) + gotback := ProtoToValue(got) + require.EqualValues(t, tcase.val, gotback) + }) } } @@ -329,7 +395,7 @@ func TestValidateBindVarables(t *testing.T) { Value: []byte("a"), }, }, - err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + err: `v: cannot parse int64 from "a"`, }, { in: map[string]*querypb.BindVariable{ "v": { @@ -340,7 +406,7 @@ func TestValidateBindVarables(t *testing.T) { }}, }, }, - err: `v: strconv.ParseInt: parsing "a": invalid syntax`, + err: `v: cannot parse int64 from "a"`, }} for _, tcase := range tcases { err := ValidateBindVariables(tcase.in) @@ -500,31 +566,31 @@ func TestValidateBindVariable(t *testing.T) { Type: querypb.Type_INT64, Value: []byte(InvalidNeg), }, - err: "out of range", + err: `cannot parse int64 from "-9223372036854775809": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_INT64, Value: []byte(InvalidPos), }, - err: "out of range", + err: `cannot parse int64 from "18446744073709551616": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_UINT64, Value: []byte("-1"), }, - err: "invalid syntax", + err: `cannot parse uint64 from "-1"`, }, { in: &querypb.BindVariable{ Type: querypb.Type_UINT64, Value: []byte(InvalidPos), }, - err: "out of range", + err: `cannot parse uint64 from "18446744073709551616": overflow`, }, { in: &querypb.BindVariable{ Type: querypb.Type_FLOAT64, Value: []byte("a"), }, - err: "invalid syntax", + err: `unparsed tail left after parsing float64 from "a"`, }, { in: &querypb.BindVariable{ Type: querypb.Type_EXPRESSION, diff --git a/go/sqltypes/cached_size.go b/go/sqltypes/cached_size.go index 2a488f8450e..7a7c5c674e2 100644 --- a/go/sqltypes/cached_size.go +++ b/go/sqltypes/cached_size.go @@ -39,7 +39,7 @@ func (cached *Result) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(cap(cached.Rows)) * int64(24)) for _, elem := range cached.Rows { { - size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(32)) + size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(56)) for _, elem := range elem { size += elem.CachedSize(false) } @@ -58,11 +58,18 @@ func (cached *Value) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(64) } // field val []byte { size += hack.RuntimeAllocSize(int64(cap(cached.val))) } + // field values []vitess.io/vitess/go/sqltypes.Value + { + size += hack.RuntimeAllocSize(int64(cap(cached.values)) * int64(56)) + for _, elem := range cached.values { + size += elem.CachedSize(false) + } + } return size } diff --git a/go/sqltypes/cast.go b/go/sqltypes/cast.go new file mode 100644 index 00000000000..e97e47ea17c --- /dev/null +++ b/go/sqltypes/cast.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Cast converts a Value to the target type. +func Cast(v Value, typ Type) (Value, error) { + if v.Type() == typ || v.IsNull() { + return v, nil + } + vBytes, err := v.ToBytes() + if err != nil { + return v, err + } + if IsSigned(typ) && v.IsSigned() { + return MakeTrusted(typ, vBytes), nil + } + if IsUnsigned(typ) && v.IsUnsigned() { + return MakeTrusted(typ, vBytes), nil + } + if (IsFloat(typ) || typ == Decimal) && (v.IsIntegral() || v.IsFloat() || v.Type() == Decimal) { + return MakeTrusted(typ, vBytes), nil + } + if IsQuoted(typ) && (v.IsIntegral() || v.IsFloat() || v.Type() == Decimal || v.IsQuoted()) { + return MakeTrusted(typ, vBytes), nil + } + + // Explicitly disallow Expression. + if v.Type() == Expression { + return NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be cast to %v", v, typ) + } + + // If the above fast-paths were not possible, + // go through full validation. + return NewValue(typ, vBytes) +} diff --git a/go/sqltypes/cast_test.go b/go/sqltypes/cast_test.go new file mode 100644 index 00000000000..f2a7d24e88a --- /dev/null +++ b/go/sqltypes/cast_test.go @@ -0,0 +1,123 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "reflect" + "testing" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func TestCast(t *testing.T) { + tcases := []struct { + typ Type + v Value + out Value + err error + }{{ + typ: VarChar, + v: NULL, + out: NULL, + }, { + typ: VarChar, + v: TestValue(VarChar, "exact types"), + out: TestValue(VarChar, "exact types"), + }, { + typ: Int64, + v: TestValue(Int32, "32"), + out: TestValue(Int64, "32"), + }, { + typ: Int24, + v: TestValue(Uint64, "64"), + out: TestValue(Int24, "64"), + }, { + typ: Int24, + v: TestValue(VarChar, "bad int"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `cannot parse int64 from "bad int"`), + }, { + typ: Uint64, + v: TestValue(Uint32, "32"), + out: TestValue(Uint64, "32"), + }, { + typ: Uint24, + v: TestValue(Int64, "64"), + out: TestValue(Uint24, "64"), + }, { + typ: Uint24, + v: TestValue(Int64, "-1"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `cannot parse uint64 from "-1"`), + }, { + typ: Float64, + v: TestValue(Int64, "64"), + out: TestValue(Float64, "64"), + }, { + typ: Float32, + v: TestValue(Float64, "64"), + out: TestValue(Float32, "64"), + }, { + typ: Float32, + v: TestValue(Decimal, "1.24"), + out: TestValue(Float32, "1.24"), + }, { + typ: Float64, + v: TestValue(VarChar, "1.25"), + out: TestValue(Float64, "1.25"), + }, { + typ: Float64, + v: TestValue(VarChar, "bad float"), + err: vterrors.New(vtrpcpb.Code_UNKNOWN, `unparsed tail left after parsing float64 from "bad float": "bad float"`), + }, { + typ: VarChar, + v: TestValue(Int64, "64"), + out: TestValue(VarChar, "64"), + }, { + typ: VarBinary, + v: TestValue(Float64, "64"), + out: TestValue(VarBinary, "64"), + }, { + typ: VarBinary, + v: TestValue(Decimal, "1.24"), + out: TestValue(VarBinary, "1.24"), + }, { + typ: VarBinary, + v: TestValue(VarChar, "1.25"), + out: TestValue(VarBinary, "1.25"), + }, { + typ: VarChar, + v: TestValue(VarBinary, "valid string"), + out: TestValue(VarChar, "valid string"), + }, { + typ: VarChar, + v: TestValue(Expression, "bad string"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "expression cannot be converted to bytes"), + }} + for _, tcase := range tcases { + got, err := Cast(tcase.v, tcase.typ) + if !vterrors.Equals(err, tcase.err) { + t.Errorf("Cast(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + continue + } + + if !reflect.DeepEqual(got, tcase.out) { + t.Errorf("Cast(%v): %v, want %v", tcase.v, got, tcase.out) + } + } +} diff --git a/go/sqltypes/marshal.go b/go/sqltypes/marshal.go new file mode 100644 index 00000000000..bbf43106110 --- /dev/null +++ b/go/sqltypes/marshal.go @@ -0,0 +1,484 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + "reflect" + "strings" + "time" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/vterrors" + + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/vttime" +) + +// ResultMarshaller knows how to marshal itself into a Result. +type ResultMarshaller interface { + MarshalResult() (*Result, error) +} + +// ValueMarshaller knows how to marshal itself into the bytes for a column of +// a particular type. +type ValueMarshaller interface { + MarshalSQL(typ querypb.Type) ([]byte, error) +} + +// ReplaceFields remaps the fields and/or row columns of a given result. This +// is useful when you need to embed a struct to modify its marshalling behavior, +// then cleanup or otherwise transfer the redunant fields. +// For example: +/* +| uuid | tablet | retries | migration_uuid | $$tablet | +| abc | --- | 1 | abc | zone1-101 | + +=> becomes + +| migration_uuid | tablet | retries | +| abc | zone1-101 | 1 | +*/ +func ReplaceFields(result *Result, remap map[string]string) *Result { + var ( + // orig maps fieldname => original col (field and row) + orig = make(map[string]int, len(result.Fields)) + // fieldIdx maps final col (field) => fieldname + fieldIdx = make([]string, len(result.Fields)) + // rowIdx maps final col (row) => fieldname + rowIdx = make([]string, len(result.Fields)) + + // inverseRemap is the inverse of the remapping, so we know also if a + // field is the target of a rename + inverseRemap = make(map[string]string, len(remap)) + ) + + for i, field := range result.Fields { + orig[field.Name] = i + + if n, ok := remap[field.Name]; ok { + inverseRemap[n] = field.Name + } + } + + for i, field := range result.Fields { + if _, ok := inverseRemap[field.Name]; ok { + continue + } + + if newName, ok := remap[field.Name]; ok { + rowIdx[i] = newName + rowIdx[orig[newName]] = field.Name + + if strings.HasPrefix(field.Name, "$$") { + // Replace rows only; field stays unchanged. + fieldIdx[i] = field.Name + fieldIdx[orig[newName]] = newName + } else { + fieldIdx[i] = newName + fieldIdx[orig[newName]] = field.Name + } + } else { + fieldIdx[i] = field.Name + rowIdx[i] = field.Name + } + } + + var fields []*querypb.Field + for _, name := range fieldIdx { + fields = append(fields, result.Fields[orig[name]]) + } + + fields = fields[:len(result.Fields)-len(remap)] + + var rows []Row + for _, origRow := range result.Rows { + var row []Value + for _, name := range rowIdx { + row = append(row, origRow[orig[name]]) + } + + rows = append(rows, row[:len(fields)]) + } + + return &Result{ + Fields: fields, + Rows: rows, + } +} + +// MarshalResult marshals the object into a Result object. It is semi-complete. +func MarshalResult(v any) (*Result, error) { + if m, ok := v.(ResultMarshaller); ok { + return m.MarshalResult() + } + + val := reflect.ValueOf(v) + if val.Type().Kind() != reflect.Slice { + vals := reflect.Append( + reflect.MakeSlice(reflect.SliceOf(val.Type()), 0, 1), + val, + ) + return MarshalResult(vals.Interface()) + } + + // Value of the slice element. + // TODO: handle other cases; We're assuming it's a pointer to a struct + elem := val.Type().Elem() + elemType := elem.Elem() + + var ( + exportedStructFields []reflect.StructField + fields []*querypb.Field + rows []Row + ) + + for _, field := range reflect.VisibleFields(elemType) { + if !field.IsExported() { + continue + } + + // Anonymous fields are redundant. For example, consider the following: + // + // type T1 struct { Foo string } + // type T2 struct { *T1; Bar string } + // + // If we did not skip Anonymous fields, marshalling T2 would result in + // the following "fields": + // | t1 | foo | bar | + // + // Skipping Anonymous fields results in the correct set: + // | foo | bar | + // + // From the VisibleFields documentation: + // > The returned fields include fields inside anonymous struct members + // > and unexported fields. They follow the same order found in the + // > struct, with anonymous fields followed immediately by their + // > promoted fields. + if field.Anonymous { + continue + } + + exportedStructFields = append(exportedStructFields, field) + sqlField, err := structToQueryField(field) + if err != nil { + return nil, err + } + fields = append(fields, sqlField) + } + + for i := 0; i < val.Len(); i++ { + // TODO: handle case where val is a slice of non-pointer objects. + v := val.Index(i).Elem() + row, err := marshalRow(v, fields, exportedStructFields) + if err != nil { + return nil, err + } + + rows = append(rows, row) + } + + return &Result{ + Fields: fields, + Rows: rows, + }, nil +} + +func marshalRow(val reflect.Value, sqlFields []*querypb.Field, structFields []reflect.StructField) (Row, error) { + var row Row + for i, structField := range structFields { + var ( + sqlField = sqlFields[i] + + sqlVal Value + err error + ) + if f := val.FieldByName(structField.Name); f.IsValid() { + sqlVal, err = structToQueryValue(f.Interface(), structField, sqlField.Type) + if err != nil { + return nil, err + } + } else { + sqlVal = NULL + } + + row = append(row, sqlVal) + } + + return row, nil +} + +func structToQueryField(field reflect.StructField) (*querypb.Field, error) { + name := field.Name + parts := strings.SplitN(field.Tag.Get("sqltypes"), ",", 3) + for len(parts) < 3 { + parts = append(parts, "") + } + + if parts[0] != "" { + name = parts[0] + } + + typ, err := fieldType(field) + if err != nil { + return nil, err + } + + return &querypb.Field{ + Name: snakeCase(name), + Type: typ, + }, nil +} + +func fieldType(field reflect.StructField) (querypb.Type, error) { + var err error + typeName := field.Type.String() + switch field.Type.Kind() { + case reflect.Pointer: + ptr := field.Type.Elem() + switch ptr.Kind() { + case reflect.Struct: + switch ptr.PkgPath() { + case "vitess.io/vitess/go/vt/proto/vttime": + switch ptr.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + // Impossible unless we add a new type to vttime.proto and + // forget to update this function. + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown vttime proto message %s", ptr.Name()) + } + case "time": + switch ptr.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown time type %s", ptr.Name()) + } + } + default: + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported pointer type %v", ptr.Kind()) + } + case reflect.Struct: + switch field.Type.PkgPath() { + case "vitess.io/vitess/go/vt/proto/vttime": + switch field.Type.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + // Impossible unless we add a new type to vttime.proto and + // forget to update this function. + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown vttime proto message %s", field.Type.Name()) + } + case "time": + switch field.Type.Name() { + case "Time": + typeName = "timestamp" + case "Duration": + typeName = "varchar" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unknown time type %s", field.Type.Name()) + } + } + case reflect.Int: + typeName = "int64" + case reflect.Uint: + typeName = "uint64" + case reflect.String: + typeName = "varchar" + case reflect.Slice: + elem := field.Type.Elem() + switch elem.Kind() { + case reflect.Uint8: + typeName = "varbinary" + default: + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unsupported field type %v", field.Type.Kind()) + } + } + + if err != nil { + return 0, err + } + + return querypb.Type(querypb.Type_value[strings.ToUpper(typeName)]), nil +} + +func structToQueryValue(value any, field reflect.StructField, typ querypb.Type) (Value, error) { + if v, ok := value.(ValueMarshaller); ok { + col, err := v.MarshalSQL(typ) + if err != nil { + return Value{}, err + } + + return MakeTrusted(typ, col), nil + } + + switch typ { + case querypb.Type_UINT8: + if v, ok := value.(bool); ok { + return NewBoolean(v), nil + } else if v, ok := value.(uint8); ok { + return NewUint8(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint8 or bool", value, value) + } + case querypb.Type_UINT16: + if v, ok := value.(uint16); ok { + return NewUint16(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint16", value, value) + } + case querypb.Type_UINT32: + if v, ok := value.(uint32); ok { + return NewUint32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint32", value, value) + } + case querypb.Type_UINT64: + switch v := value.(type) { + case uint64: + return NewUint64(v), nil + case uint: + return NewUint64(uint64(v)), nil + default: + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not uint64", value, value) + } + case querypb.Type_INT8: + if v, ok := value.(int8); ok { + return NewInt8(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int8", value, value) + } + case querypb.Type_INT16: + if v, ok := value.(int16); ok { + return NewInt16(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int16", value, value) + } + case querypb.Type_INT32: + if v, ok := value.(int32); ok { + return NewInt32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int32", value, value) + } + case querypb.Type_INT64: + switch v := value.(type) { + case int64: + return NewInt64(v), nil + case int: + return NewInt64(int64(v)), nil + default: + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not int64", value, value) + } + case querypb.Type_FLOAT32: + if v, ok := value.(float32); ok { + return NewFloat32(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not float32", value, value) + } + case querypb.Type_FLOAT64: + if v, ok := value.(float64); ok { + return NewFloat64(v), nil + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not float64", value, value) + } + case querypb.Type_VARCHAR, querypb.Type_VARBINARY: + var s string + if v, ok := value.(fmt.Stringer); ok { + s = v.String() + } else if v, ok := value.(string); ok { + s = v + } else { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not string-like", value, value) + } + + if typ == querypb.Type_VARBINARY { + return NewVarBinary(s), nil + } + + return NewVarChar(s), nil + case querypb.Type_TIMESTAMP: + var s string + switch v := value.(type) { // TODO: support overrides for other timestamp formats + case *time.Time: + if v == nil { + return NULL, nil + } + + s = v.Format(TimestampFormat) + case time.Time: + s = v.Format(TimestampFormat) + case *vttime.Time: + if v == nil { + return NULL, nil + } + + s = protoutil.TimeFromProto(v).Format(TimestampFormat) + case vttime.Time: + s = protoutil.TimeFromProto(&v).Format(TimestampFormat) + case string: + s = v + default: + _s, ok := value.(string) + if !ok { + return Value{}, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v (%T) is not time or string-like", value, value) + } + + s = _s + } + + return NewTimestamp(s), nil + case querypb.Type_NULL_TYPE: + return NewValue(Null, nil) + } + + return Value{}, vterrors.Errorf(0, "unsupported query field type %s", strings.ToLower(querypb.Type_name[int32(typ)])) +} + +func snakeCase(s string) string { + var ( + buf strings.Builder + start = true + lower = strings.ToLower(s) + ) + + /* + Foo => foo + FooBar => foo_bar + */ + for i, c := range s { + // `c` is an uppercase letter + if byte(c) != lower[i] { + if !start { + buf.WriteByte('_') + } + + start = false + } + + buf.WriteByte(lower[i]) + } + + return buf.String() +} diff --git a/go/sqltypes/marshal_test.go b/go/sqltypes/marshal_test.go new file mode 100644 index 00000000000..e8e62018456 --- /dev/null +++ b/go/sqltypes/marshal_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo/topoproto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +type T1 struct { + Name string + Age int + Tablet *topodatapb.TabletAlias + AddedAt time.Time + Period time.Duration +} + +type T2 T1 + +func (t2 *T2) MarshalResult() (*Result, error) { + tmp := struct { + *T1 + Tablet_ string `sqltypes:"$$tablet"` + AddedTimestamp time.Time + PeriodSeconds int + }{ + T1: (*T1)(t2), + Tablet_: topoproto.TabletAliasString(t2.Tablet), + AddedTimestamp: t2.AddedAt, + PeriodSeconds: int(t2.Period.Seconds()), + } + + res, err := MarshalResult(&tmp) + if err != nil { + return nil, err + } + + return ReplaceFields(res, map[string]string{ + // Replace `period`/'added_at` field and column values. + "period": "period_seconds", + "added_at": "added_timestamp", + // Replace `tablet` column values only. + "$$tablet": "tablet", + }), nil +} + +func TestMarshalResult(t *testing.T) { + t.Parallel() + + now := time.Now() + t1 := &T1{ + Name: "test", + Age: 10, + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + AddedAt: now, + Period: time.Minute, + } + + r, err := MarshalResult((*T2)(t1)) + require.NoError(t, err) + + row := r.Named().Rows[0] + + assert.Equal(t, "test", row.AsString("name", "")) + assert.Equal(t, int64(10), row.AsInt64("age", 0)) + assert.Equal(t, "zone1-0000000100", row.AsString("tablet", "")) + assert.Equal(t, now.Format(TimestampFormat), row.AsString("added_timestamp", "")) + assert.Equal(t, int64(60), row.AsInt64("period_seconds", 0)) + + // fields we renamed/remapped are not present + assert.Empty(t, row.AsString("$$tablet", "")) + assert.Empty(t, row.AsString("added_at", "")) + assert.Empty(t, row.AsString("period", "")) +} + +func TestSnakeCase(t *testing.T) { + t.Parallel() + + tests := []struct { + in, out string + }{ + {"Foo", "foo"}, + {"FooBar", "foo_bar"}, + } + + for _, test := range tests { + t.Run(test.in, func(t *testing.T) { + assert.Equal(t, test.out, snakeCase(test.in)) + }) + } +} diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go index 80952598ec9..7c04e1d89fa 100644 --- a/go/sqltypes/result.go +++ b/go/sqltypes/result.go @@ -99,7 +99,7 @@ func (result *Result) Copy() *Result { if result.Fields != nil { out.Fields = make([]*querypb.Field, len(result.Fields)) for i, f := range result.Fields { - out.Fields[i] = proto.Clone(f).(*querypb.Field) + out.Fields[i] = f.CloneVT() } } if result.Rows != nil { diff --git a/go/sqltypes/testing.go b/go/sqltypes/testing.go index 50daed076a0..b591cf710f0 100644 --- a/go/sqltypes/testing.go +++ b/go/sqltypes/testing.go @@ -18,8 +18,14 @@ package sqltypes import ( "bytes" + crand "crypto/rand" + "encoding/base64" + "encoding/hex" "fmt" + "math/rand" + "strconv" "strings" + "time" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -72,6 +78,7 @@ func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { result.Rows[i] = make([]Value, len(fields)) for j, col := range split(row) { if col == "null" { + result.Rows[i][j] = NULL continue } result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col)) @@ -153,3 +160,124 @@ func PrintResults(results []*Result) string { func split(str string) []string { return strings.Split(str, "|") } + +func TestRandomValues() (Value, Value) { + if rand.Int()%2 == 0 { + // create a single value, and turn it into two different types + v := rand.Int() + return randomNumericType(v), randomNumericType(v) + } + + // just produce two arbitrary random values and compare + return randomNumericType(rand.Int()), randomNumericType(rand.Int()) +} + +func randomNumericType(i int) Value { + r := rand.Intn(len(numericTypes)) + return numericTypes[r](i) +} + +var numericTypes = []func(int) Value{ + func(i int) Value { return NULL }, + func(i int) Value { return NewInt8(int8(i)) }, + func(i int) Value { return NewInt32(int32(i)) }, + func(i int) Value { return NewInt64(int64(i)) }, + func(i int) Value { return NewUint64(uint64(i)) }, + func(i int) Value { return NewUint32(uint32(i)) }, + func(i int) Value { return NewFloat64(float64(i)) }, + func(i int) Value { return NewDecimal(fmt.Sprintf("%d", i)) }, + func(i int) Value { return NewVarChar(fmt.Sprintf("%d", i)) }, + func(i int) Value { return NewVarChar(fmt.Sprintf(" %f aa", float64(i))) }, +} + +type RandomGenerator func() Value + +func randomBytes() []byte { + b := make([]byte, rand.Intn(128)) + _, _ = crand.Read(b) + return b +} + +var RandomGenerators = map[Type]RandomGenerator{ + Null: func() Value { + return NULL + }, + Int8: func() Value { + return NewInt8(int8(rand.Intn(255))) + }, + Int32: func() Value { + return NewInt32(rand.Int31()) + }, + Int64: func() Value { + return NewInt64(rand.Int63()) + }, + Uint32: func() Value { + return NewUint32(rand.Uint32()) + }, + Uint64: func() Value { + return NewUint64(rand.Uint64()) + }, + Float64: func() Value { + return NewFloat64(rand.ExpFloat64()) + }, + Decimal: func() Value { + dec := fmt.Sprintf("%d.%d", rand.Intn(9999999999), rand.Intn(9999999999)) + if rand.Int()&0x1 == 1 { + dec = "-" + dec + } + return NewDecimal(dec) + }, + VarChar: func() Value { + return NewVarChar(base64.StdEncoding.EncodeToString(randomBytes())) + }, + VarBinary: func() Value { + return NewVarBinary(string(randomBytes())) + }, + Date: func() Value { + return NewDate(randTime().Format(time.DateOnly)) + }, + Datetime: func() Value { + return NewDatetime(randTime().Format(time.DateTime)) + }, + Timestamp: func() Value { + return NewTimestamp(randTime().Format(time.DateTime)) + }, + Time: func() Value { + return NewTime(randTime().Format(time.TimeOnly)) + }, + TypeJSON: func() Value { + var j string + switch rand.Intn(6) { + case 0: + j = "null" + case 1: + i := rand.Int63() + if rand.Int()&0x1 == 1 { + i = -i + } + j = strconv.FormatInt(i, 10) + case 2: + j = strconv.FormatFloat(rand.NormFloat64(), 'g', -1, 64) + case 3: + j = strconv.Quote(hex.EncodeToString(randomBytes())) + case 4: + j = "true" + case 5: + j = "false" + } + v, err := NewJSON(j) + if err != nil { + panic(err) + } + return v + }, +} + +func randTime() time.Time { + min := time.Date(1970, 1, 0, 0, 0, 0, 0, time.UTC).Unix() + max := time.Date(2070, 1, 0, 0, 0, 0, 0, time.UTC).Unix() + delta := max - min + + sec := rand.Int63n(delta) + min + return time.Unix(sec, 0) +} diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index 74b7bb46c72..eeaa4e9ddf6 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -100,11 +100,16 @@ func IsNumber(t querypb.Type) bool { return IsIntegral(t) || IsFloat(t) || t == Decimal } -// IsDate returns true if the type represents a date and/or time. -func IsDate(t querypb.Type) bool { +// IsDateOrTime returns true if the type represents a date and/or time. +func IsDateOrTime(t querypb.Type) bool { return t == Datetime || t == Date || t == Timestamp || t == Time } +// IsDate returns true if the type has a date component +func IsDate(t querypb.Type) bool { + return t == Datetime || t == Date || t == Timestamp +} + // IsNull returns true if the type is NULL type func IsNull(t querypb.Type) bool { return t == Null @@ -131,6 +136,7 @@ func IsNull(t querypb.Type) bool { // switch statements for those who want to cover types // by their category. const ( + Unknown = -1 Null = querypb.Type_NULL_TYPE Int8 = querypb.Type_INT8 Uint8 = querypb.Type_UINT8 diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 745058a5bed..8d95a94561f 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -29,9 +29,10 @@ import ( "vitess.io/vitess/go/bytes2" "vitess.io/vitess/go/hack" - + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/proto/vtrpc" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -42,8 +43,8 @@ var ( // DontEscape tells you if a character should not be escaped. DontEscape = byte(255) - - nullstr = []byte("null") + NullStr = "null" + NullBytes = []byte(NullStr) // ErrIncompatibleTypeCast indicates a casting problem ErrIncompatibleTypeCast = errors.New("Cannot convert value to desired type") @@ -62,8 +63,9 @@ type ( // an integral type, the bytes are always stored as a canonical // representation that matches how MySQL returns such values. Value struct { - typ querypb.Type - val []byte + typ querypb.Type + val []byte + values []Value } Row = []Value @@ -74,17 +76,22 @@ type ( func NewValue(typ querypb.Type, val []byte) (v Value, err error) { switch { case IsSigned(typ): - if _, err := strconv.ParseInt(string(val), 10, 64); err != nil { + if _, err := fastparse.ParseInt64(hack.String(val), 10); err != nil { return NULL, err } return MakeTrusted(typ, val), nil case IsUnsigned(typ): - if _, err := strconv.ParseUint(string(val), 10, 64); err != nil { + if _, err := fastparse.ParseUint64(hack.String(val), 10); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsFloat(typ): + if _, err := fastparse.ParseFloat64(hack.String(val)); err != nil { return NULL, err } return MakeTrusted(typ, val), nil - case IsFloat(typ) || typ == Decimal: - if _, err := strconv.ParseFloat(string(val), 64); err != nil { + case IsDecimal(typ): + if _, err := decimal.NewFromMySQL(val); err != nil { return NULL, err } return MakeTrusted(typ, val), nil @@ -103,12 +110,19 @@ func NewValue(typ querypb.Type, val []byte) (v Value, err error) { // comments. Other packages can also use the function to create // VarBinary or VarChar values. func MakeTrusted(typ querypb.Type, val []byte) Value { + return MakeTrustedValues(typ, val, nil) +} +func MakeTrustedValues(typ querypb.Type, val []byte, values []*querypb.Value) Value { if typ == Null { return NULL } - - return Value{typ: typ, val: val} + var sqlValues []Value + for _, v := range values { + sqlValues = append(sqlValues, + MakeTrustedValues(v.Type, v.Value, v.Values)) + } + return Value{typ: typ, val: val, values: sqlValues} } // NewHexNum builds an Hex Value. @@ -141,6 +155,11 @@ func NewInt32(v int32) Value { return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) } +// NewInt16 builds a Int16 Value. +func NewInt16(v int16) Value { + return MakeTrusted(Int16, strconv.AppendInt(nil, int64(v), 10)) +} + // NewUint64 builds an Uint64 Value. func NewUint64(v uint64) Value { return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) @@ -151,9 +170,29 @@ func NewUint32(v uint32) Value { return MakeTrusted(Uint32, strconv.AppendUint(nil, uint64(v), 10)) } +// NewUint16 builds a Uint16 Value. +func NewUint16(v uint16) Value { + return MakeTrusted(Uint16, strconv.AppendUint(nil, uint64(v), 10)) +} + +// NewUint8 builds a Uint8 Value. +func NewUint8(v uint8) Value { + return MakeTrusted(Uint8, strconv.AppendUint(nil, uint64(v), 10)) +} + +// NewBoolean builds a Uint8 Value from a boolean. +func NewBoolean(v bool) Value { + return MakeTrusted(Uint8, strconv.AppendBool(nil, v)) +} + // NewFloat64 builds an Float64 Value. func NewFloat64(v float64) Value { - return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) + return MakeTrusted(Float64, format.FormatFloat(v)) +} + +// NewFloat32 builds a Float32 Value. +func NewFloat32(v float32) Value { + return MakeTrusted(Float32, format.FormatFloat(float64(v))) } // NewVarChar builds a VarChar Value. @@ -286,7 +325,12 @@ func (v Value) ToInt64() (int64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseInt(v.RawStr(), 10, 64) + return fastparse.ParseInt64(v.RawStr(), 10) +} + +// ToCastInt64 returns the best effort value as MySQL would return it as a int64. +func (v Value) ToCastInt64() (int64, error) { + return fastparse.ParseInt64(v.RawStr(), 10) } func (v Value) ToInt32() (int32, error) { @@ -313,7 +357,7 @@ func (v Value) ToFloat64() (float64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseFloat(v.RawStr(), 64) + return fastparse.ParseFloat64(v.RawStr()) } // ToUint16 returns the value as MySQL would return it as a uint16. @@ -332,7 +376,12 @@ func (v Value) ToUint64() (uint64, error) { return 0, ErrIncompatibleTypeCast } - return strconv.ParseUint(v.RawStr(), 10, 64) + return fastparse.ParseUint64(v.RawStr(), 10) +} + +// ToCastUint64 returns the best effort value as MySQL would return it as a uint64. +func (v Value) ToCastUint64() (uint64, error) { + return fastparse.ParseUint64(v.RawStr(), 10) } func (v Value) ToUint32() (uint32, error) { @@ -383,7 +432,7 @@ func (v Value) String() string { func (v Value) EncodeSQL(b BinWriter) { switch { case v.typ == Null: - b.Write(nullstr) + b.Write(NullBytes) case v.IsQuoted(): encodeBytesSQL(v.val, b) case v.typ == Bit: @@ -398,11 +447,20 @@ func (v Value) EncodeSQL(b BinWriter) { func (v Value) EncodeSQLStringBuilder(b *strings.Builder) { switch { case v.typ == Null: - b.Write(nullstr) + b.Write(NullBytes) case v.IsQuoted(): encodeBytesSQLStringBuilder(v.val, b) case v.typ == Bit: encodeBytesSQLBits(v.val, b) + case v.typ == Tuple: + b.WriteByte('(') + for i, bv := range v.values { + if i != 0 { + b.WriteString(", ") + } + bv.EncodeSQLStringBuilder(b) + } + b.WriteByte(')') default: b.Write(v.val) } @@ -413,7 +471,7 @@ func (v Value) EncodeSQLStringBuilder(b *strings.Builder) { func (v Value) EncodeSQLBytes2(b *bytes2.Buffer) { switch { case v.typ == Null: - b.Write(nullstr) + b.Write(NullBytes) case v.IsQuoted(): encodeBytesSQLBytes2(v.val, b) case v.typ == Bit: @@ -427,7 +485,7 @@ func (v Value) EncodeSQLBytes2(b *bytes2.Buffer) { func (v Value) EncodeASCII(b BinWriter) { switch { case v.typ == Null: - b.Write(nullstr) + b.Write(NullBytes) case v.IsQuoted() || v.typ == Bit: encodeBytesASCII(v.val, b) default: @@ -477,8 +535,22 @@ func (v Value) IsBinary() bool { // IsDateTime returns true if Value is datetime. func (v Value) IsDateTime() bool { - dt := int(querypb.Type_DATETIME) - return int(v.typ)&dt == dt + return v.typ == querypb.Type_DATETIME +} + +// IsTimestamp returns true if Value is date. +func (v Value) IsTimestamp() bool { + return v.typ == querypb.Type_TIMESTAMP +} + +// IsDate returns true if Value is date. +func (v Value) IsDate() bool { + return v.typ == querypb.Type_DATE +} + +// IsTime returns true if Value is time. +func (v Value) IsTime() bool { + return v.typ == querypb.Type_TIME } // IsDecimal returns true if Value is a decimal. @@ -505,7 +577,7 @@ func (v Value) MarshalJSON() ([]byte, error) { case v.IsQuoted() || v.typ == Bit: return json.Marshal(v.ToString()) case v.typ == Null: - return nullstr, nil + return NullBytes, nil } return v.val, nil } @@ -546,7 +618,7 @@ func (v *Value) UnmarshalJSON(b []byte) error { // an INSERT was performed with x'A1' having been specified as a value func (v *Value) decodeHexVal() ([]byte, error) { if len(v.val) < 3 || (v.val[0] != 'x' && v.val[0] != 'X') || v.val[1] != '\'' || v.val[len(v.val)-1] != '\'' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid hex value: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid hex value: %v", v.val) } hexBytes := v.val[2 : len(v.val)-1] decodedHexBytes, err := hex.DecodeString(string(hexBytes)) @@ -561,7 +633,7 @@ func (v *Value) decodeHexVal() ([]byte, error) { // an INSERT was performed with 0xA1 having been specified as a value func (v *Value) decodeHexNum() ([]byte, error) { if len(v.val) < 3 || v.val[0] != '0' || v.val[1] != 'x' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid hex number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid hex number: %v", v.val) } hexBytes := v.val[2:] decodedHexBytes, err := hex.DecodeString(string(hexBytes)) @@ -576,12 +648,12 @@ func (v *Value) decodeHexNum() ([]byte, error) { // an INSERT was performed with 0x5 having been specified as a value func (v *Value) decodeBitNum() ([]byte, error) { if len(v.val) < 3 || v.val[0] != '0' || v.val[1] != 'b' { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) } var i big.Int _, ok := i.SetString(string(v.val), 0) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid bit number: %v", v.val) } return i.Bytes(), nil } diff --git a/go/sqltypes/value_test.go b/go/sqltypes/value_test.go index 82aea752480..86c751f3d0d 100644 --- a/go/sqltypes/value_test.go +++ b/go/sqltypes/value_test.go @@ -165,23 +165,23 @@ func TestNewValue(t *testing.T) { }, { inType: Int64, inVal: InvalidNeg, - outErr: "out of range", + outErr: `cannot parse int64 from "-9223372036854775809": overflow`, }, { inType: Int64, inVal: InvalidPos, - outErr: "out of range", + outErr: `cannot parse int64 from "18446744073709551616": overflow`, }, { inType: Uint64, inVal: "-1", - outErr: "invalid syntax", + outErr: `cannot parse uint64 from "-1"`, }, { inType: Uint64, inVal: InvalidPos, - outErr: "out of range", + outErr: `cannot parse uint64 from "18446744073709551616": overflow`, }, { inType: Float64, inVal: "a", - outErr: "invalid syntax", + outErr: `unparsed tail left after parsing float64 from "a"`, }, { inType: Expression, inVal: "a", diff --git a/go/stats/counter.go b/go/stats/counter.go index 406ab5843bc..4428dfe1136 100644 --- a/go/stats/counter.go +++ b/go/stats/counter.go @@ -177,7 +177,7 @@ type GaugeFloat64 struct { help string } -// NewCounter returns a new GaugeFloat64. +// NewGaugeFloat64 returns a new GaugeFloat64. func NewGaugeFloat64(name string, help string) *GaugeFloat64 { v := &GaugeFloat64{help: help} if name != "" { diff --git a/go/stats/counter_test.go b/go/stats/counter_test.go index e4153f5bc33..f290dc733d7 100644 --- a/go/stats/counter_test.go +++ b/go/stats/counter_test.go @@ -26,7 +26,7 @@ import ( func TestCounter(t *testing.T) { var gotname string var gotv *Counter - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Counter) @@ -54,7 +54,7 @@ func TestCounter(t *testing.T) { func TestGaugeFunc(t *testing.T) { var gotname string var gotv *GaugeFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeFunc) @@ -77,7 +77,7 @@ func TestGaugeFunc(t *testing.T) { func TestGaugeFloat64(t *testing.T) { var gotname string var gotv *GaugeFloat64 - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeFloat64) diff --git a/go/stats/counters.go b/go/stats/counters.go index f144c0ce3dd..371cbd53818 100644 --- a/go/stats/counters.go +++ b/go/stats/counters.go @@ -70,9 +70,7 @@ func (c *counters) ZeroAll() { c.mu.Lock() defer c.mu.Unlock() - for k := range c.counts { - c.counts[k] = 0 - } + clear(c.counts) } // Counts returns a copy of the Counters' map. diff --git a/go/stats/counters_test.go b/go/stats/counters_test.go index d3be6ccf02f..22d6e769d3d 100644 --- a/go/stats/counters_test.go +++ b/go/stats/counters_test.go @@ -29,7 +29,7 @@ import ( ) func TestCounters(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("counter1", "help", "label") c.Add("c1", 1) c.Add("c2", 1) @@ -49,7 +49,7 @@ func TestCounters(t *testing.T) { } func TestCountersTags(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("counterTag1", "help", "label") want := map[string]int64{} got := c.Counts() @@ -66,7 +66,7 @@ func TestCountersTags(t *testing.T) { } func TestMultiCounters(t *testing.T) { - clear() + clearStats() c := NewCountersWithMultiLabels("mapCounter1", "help", []string{"aaa", "bbb"}) c.Add([]string{"c1a", "c1b"}, 1) c.Add([]string{"c2a", "c2b"}, 1) @@ -95,7 +95,7 @@ func TestMultiCounters(t *testing.T) { } func TestMultiCountersDot(t *testing.T) { - clear() + clearStats() c := NewCountersWithMultiLabels("mapCounter2", "help", []string{"aaa", "bbb"}) c.Add([]string{"c1.a", "c1b"}, 1) c.Add([]string{"c2a", "c2.b"}, 1) @@ -121,7 +121,7 @@ func TestMultiCountersDot(t *testing.T) { func TestCountersHook(t *testing.T) { var gotname string var gotv *CountersWithSingleLabel - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CountersWithSingleLabel) @@ -139,7 +139,7 @@ func TestCountersHook(t *testing.T) { var benchCounter = NewCountersWithSingleLabel("bench", "help", "label") func BenchmarkCounters(b *testing.B) { - clear() + clearStats() benchCounter.Add("c1", 1) b.ResetTimer() @@ -153,7 +153,7 @@ func BenchmarkCounters(b *testing.B) { var benchMultiCounter = NewCountersWithMultiLabels("benchMulti", "help", []string{"call", "keyspace", "dbtype"}) func BenchmarkMultiCounters(b *testing.B) { - clear() + clearStats() key := []string{"execute-key-ranges", "keyspacename", "replica"} benchMultiCounter.Add(key, 1) b.ResetTimer() @@ -169,7 +169,7 @@ func BenchmarkCountersTailLatency(b *testing.B) { // For this one, ignore the time reported by 'go test'. // The 99th Percentile log line is all that matters. // (Cmd: go test -bench=BenchmarkCountersTailLatency -benchtime=30s -cpu=10) - clear() + clearStats() benchCounter.Add("c1", 1) c := make(chan time.Duration, 100) done := make(chan struct{}) @@ -208,7 +208,7 @@ func BenchmarkCountersTailLatency(b *testing.B) { } func TestCountersFuncWithMultiLabels(t *testing.T) { - clear() + clearStats() f := NewCountersFuncWithMultiLabels("TestCountersFuncWithMultiLabels", "help", []string{"label1"}, func() map[string]int64 { return map[string]int64{ "c1": 1, @@ -226,7 +226,7 @@ func TestCountersFuncWithMultiLabels(t *testing.T) { func TestCountersFuncWithMultiLabels_Hook(t *testing.T) { var gotname string var gotv *CountersFuncWithMultiLabels - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CountersFuncWithMultiLabels) @@ -244,13 +244,13 @@ func TestCountersFuncWithMultiLabels_Hook(t *testing.T) { } func TestCountersCombineDimension(t *testing.T) { - clear() + clearStats() // Empty labels shouldn't be combined. c0 := NewCountersWithSingleLabel("counter_combine_dim0", "help", "") c0.Add("c1", 1) assert.Equal(t, `{"c1": 1}`, c0.String()) - clear() + clearStats() combineDimensions = "a,c" c1 := NewCountersWithSingleLabel("counter_combine_dim1", "help", "label") diff --git a/go/stats/duration_test.go b/go/stats/duration_test.go index cabc79ae77a..b1aeb0cd1f5 100644 --- a/go/stats/duration_test.go +++ b/go/stats/duration_test.go @@ -25,7 +25,7 @@ import ( func TestCounterDuration(t *testing.T) { var gotname string var gotv *CounterDuration - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CounterDuration) @@ -52,7 +52,7 @@ func TestCounterDuration(t *testing.T) { func TestCounterDurationFunc(t *testing.T) { var gotname string var gotv *CounterDurationFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*CounterDurationFunc) @@ -75,7 +75,7 @@ func TestCounterDurationFunc(t *testing.T) { func TestGaugeDuration(t *testing.T) { var gotname string var gotv *GaugeDuration - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeDuration) @@ -103,7 +103,7 @@ func TestGaugeDuration(t *testing.T) { func TestGaugeDurationFunc(t *testing.T) { var gotname string var gotv *GaugeDurationFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*GaugeDurationFunc) diff --git a/go/stats/export.go b/go/stats/export.go index 0a335517a14..e98ef0a969c 100644 --- a/go/stats/export.go +++ b/go/stats/export.go @@ -121,6 +121,60 @@ func Publish(name string, v expvar.Var) { publish(name, v) } +// StringMapFuncWithMultiLabels is a multidimensional string map publisher. +// +// Map keys are compound names made with joining multiple strings with '.', +// and are named by corresponding key labels. +// +// Map values are any string, and are named by the value label. +// +// Since the map is returned by the function, we assume it's in the right +// format (meaning each key is of the form 'aaa.bbb.ccc' with as many elements +// as there are in Labels). +// +// Backends which need to provide a numeric value can set a constant value of 1 +// (or whatever is appropriate for the backend) for each key-value pair present +// in the map. +type StringMapFuncWithMultiLabels struct { + StringMapFunc + help string + keyLabels []string + valueLabel string +} + +// Help returns the descriptive help message. +func (s StringMapFuncWithMultiLabels) Help() string { + return s.help +} + +// KeyLabels returns the list of key labels. +func (s StringMapFuncWithMultiLabels) KeyLabels() []string { + return s.keyLabels +} + +// ValueLabel returns the value label. +func (s StringMapFuncWithMultiLabels) ValueLabel() string { + return s.valueLabel +} + +// NewStringMapFuncWithMultiLabels creates a new StringMapFuncWithMultiLabels, +// mapping to the provided function. The key labels correspond with components +// of map keys. The value label names the map values. +func NewStringMapFuncWithMultiLabels(name, help string, keyLabels []string, valueLabel string, f func() map[string]string) *StringMapFuncWithMultiLabels { + t := &StringMapFuncWithMultiLabels{ + StringMapFunc: StringMapFunc(f), + help: help, + keyLabels: keyLabels, + valueLabel: valueLabel, + } + + if name != "" { + publish(name, t) + } + + return t +} + func publish(name string, v expvar.Var) { defaultVarGroup.publish(name, v) } diff --git a/go/stats/export_test.go b/go/stats/export_test.go index 8f788090f59..e6160f77184 100644 --- a/go/stats/export_test.go +++ b/go/stats/export_test.go @@ -20,9 +20,11 @@ import ( "expvar" "reflect" "testing" + + "github.com/stretchr/testify/require" ) -func clear() { +func clearStats() { defaultVarGroup.vars = make(map[string]expvar.Var) defaultVarGroup.newVarHook = nil combineDimensions = "" @@ -32,7 +34,7 @@ func clear() { } func TestNoHook(t *testing.T) { - clear() + clearStats() v := NewCounter("plainint", "help") v.Add(1) if v.String() != "1" { @@ -43,7 +45,7 @@ func TestNoHook(t *testing.T) { func TestString(t *testing.T) { var gotname string var gotv *String - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*String) @@ -80,7 +82,7 @@ func (m *Mystr) String() string { func TestPublish(t *testing.T) { var gotname string var gotv expvar.Var - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Mystr) @@ -108,7 +110,7 @@ func (f expvarFunc) String() string { func TestPublishFunc(t *testing.T) { var gotname string var gotv expvarFunc - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(expvarFunc) @@ -123,7 +125,7 @@ func TestPublishFunc(t *testing.T) { } func TestDropVariable(t *testing.T) { - clear() + clearStats() dropVariables = "dropTest" // This should not panic. @@ -157,3 +159,33 @@ func TestParseCommonTags(t *testing.T) { t.Errorf("expected %v, got %v", expected2, res) } } + +func TestStringMapWithMultiLabels(t *testing.T) { + clearStats() + c := NewStringMapFuncWithMultiLabels("stringMap1", "help", []string{"aaa", "bbb"}, "ccc", func() map[string]string { + m := make(map[string]string) + m["c1a.c1b"] = "1" + m["c2a.c2b"] = "1" + return m + }) + + want1 := `{"c1a.c1b": "1", "c2a.c2b": "1"}` + want2 := `{"c2a.c2b": "1", "c1a.c1b": "1"}` + if s := c.String(); s != want1 && s != want2 { + t.Errorf("want %s or %s, got %s", want1, want2, s) + } + + m := c.StringMapFunc() + require.Len(t, m, 2) + require.Contains(t, m, "c1a.c1b") + require.Equal(t, m["c1a.c1b"], "1") + require.Contains(t, m, "c2a.c2b") + require.Equal(t, m["c2a.c2b"], "1") + + keyLabels := c.KeyLabels() + require.Len(t, keyLabels, 2) + require.Equal(t, keyLabels[0], "aaa") + require.Equal(t, keyLabels[1], "bbb") + + require.Equal(t, c.ValueLabel(), "ccc") +} diff --git a/go/stats/histogram_test.go b/go/stats/histogram_test.go index f78934e7ba6..1c7b05d8e9a 100644 --- a/go/stats/histogram_test.go +++ b/go/stats/histogram_test.go @@ -22,7 +22,7 @@ import ( ) func TestHistogram(t *testing.T) { - clear() + clearStats() h := NewHistogram("hist1", "help", []int64{1, 5}) for i := 0; i < 10; i++ { h.Add(int64(i)) @@ -54,7 +54,7 @@ func TestHistogram(t *testing.T) { } func TestGenericHistogram(t *testing.T) { - clear() + clearStats() h := NewGenericHistogram( "histgen", "help", @@ -72,7 +72,7 @@ func TestGenericHistogram(t *testing.T) { func TestHistogramHook(t *testing.T) { var gotname string var gotv *Histogram - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Histogram) diff --git a/go/stats/multidimensional_test.go b/go/stats/multidimensional_test.go index 84805e00a2e..61dd8bb3b10 100644 --- a/go/stats/multidimensional_test.go +++ b/go/stats/multidimensional_test.go @@ -23,7 +23,7 @@ import ( ) func TestMultiTimingsCounterFor(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("multitimings3", "help", []string{"dim1", "dim2"}) mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) diff --git a/go/stats/opentsdb/opentsdb.go b/go/stats/opentsdb/opentsdb.go index f12fa02e2fe..3e85052b5f4 100644 --- a/go/stats/opentsdb/opentsdb.go +++ b/go/stats/opentsdb/opentsdb.go @@ -113,7 +113,7 @@ func InitWithoutServenv(prefix string) { stats.RegisterPushBackend("opentsdb", backend) - http.HandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=utf-8") dataPoints := (*backend).getDataPoints() sort.Sort(byMetric(dataPoints)) diff --git a/go/stats/prometheusbackend/collectors.go b/go/stats/prometheusbackend/collectors.go index a1126c0d211..7469167cf74 100644 --- a/go/stats/prometheusbackend/collectors.go +++ b/go/stats/prometheusbackend/collectors.go @@ -395,3 +395,39 @@ func (c *histogramCollector) Collect(ch chan<- prometheus.Metric) { ch <- metric } } + +type stringMapFuncWithMultiLabelsCollector struct { + smf *stats.StringMapFuncWithMultiLabels + desc *prometheus.Desc +} + +func newStringMapFuncWithMultiLabelsCollector(smf *stats.StringMapFuncWithMultiLabels, name string) { + c := &stringMapFuncWithMultiLabelsCollector{ + smf: smf, + desc: prometheus.NewDesc( + name, + smf.Help(), + labelsToSnake(append(smf.KeyLabels(), smf.ValueLabel())), + nil), + } + + prometheus.MustRegister(c) +} + +// Describe implements Collector. +func (c *stringMapFuncWithMultiLabelsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.desc +} + +// Collect implements Collector. +func (c *stringMapFuncWithMultiLabelsCollector) Collect(ch chan<- prometheus.Metric) { + for lvs, val := range c.smf.StringMapFunc() { + labelValues := append(strings.Split(lvs, "."), val) + metric, err := prometheus.NewConstMetric(c.desc, prometheus.GaugeValue, 1.0, labelValues...) + if err != nil { + log.Errorf("Error adding metric: %s", c.desc) + } else { + ch <- metric + } + } +} diff --git a/go/stats/prometheusbackend/prometheusbackend.go b/go/stats/prometheusbackend/prometheusbackend.go index c3e76797b40..62165e117c1 100644 --- a/go/stats/prometheusbackend/prometheusbackend.go +++ b/go/stats/prometheusbackend/prometheusbackend.go @@ -18,7 +18,6 @@ package prometheusbackend import ( "expvar" - "net/http" "strings" "github.com/prometheus/client_golang/prometheus" @@ -26,6 +25,7 @@ import ( "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" ) // PromBackend implements PullBackend using Prometheus as the backing metrics storage. @@ -39,12 +39,12 @@ var ( // Init initializes the Prometheus be with the given namespace. func Init(namespace string) { - http.Handle("/metrics", promhttp.Handler()) + servenv.HTTPHandle("/metrics", promhttp.Handler()) be.namespace = namespace stats.Register(be.publishPrometheusMetric) } -// PublishPromMetric is used to publish the metric to Prometheus. +// publishPrometheusMetric is used to publish the metric to Prometheus. func (be PromBackend) publishPrometheusMetric(name string, v expvar.Var) { switch st := v.(type) { case *stats.Counter: @@ -85,6 +85,8 @@ func (be PromBackend) publishPrometheusMetric(name string, v expvar.Var) { newMultiTimingsCollector(st, be.buildPromName(name)) case *stats.Histogram: newHistogramCollector(st, be.buildPromName(name)) + case *stats.StringMapFuncWithMultiLabels: + newStringMapFuncWithMultiLabelsCollector(st, be.buildPromName(name)) case *stats.String, stats.StringFunc, stats.StringMapFunc, *stats.Rates, *stats.RatesFunc: // Silently ignore these types since they don't make sense to // export to Prometheus' data model. @@ -107,7 +109,7 @@ func labelsToSnake(labels []string) []string { return output } -// normalizeMetricForPrometheus produces a compliant name by applying +// normalizeMetric produces a compliant name by applying // special case conversions and then applying a camel case to snake case converter. func normalizeMetric(name string) string { // Special cases diff --git a/go/stats/prometheusbackend/prometheusbackend_test.go b/go/stats/prometheusbackend/prometheusbackend_test.go index 888dd630941..594265153f7 100644 --- a/go/stats/prometheusbackend/prometheusbackend_test.go +++ b/go/stats/prometheusbackend/prometheusbackend_test.go @@ -240,10 +240,33 @@ func TestPrometheusCountersFuncWithMultiLabels(t *testing.T) { checkHandlerForMetricWithMultiLabels(t, name, labels, []string{"bar", "baz"}, 1) } +func TestPrometheusStringMapFuncWithMultiLabels(t *testing.T) { + name := "blah_stringmapfuncwithmultilabels" + keyLabels := []string{"label1", "label2"} + valueLabel := "label3" + + stats.NewStringMapFuncWithMultiLabels(name, "help", keyLabels, valueLabel, func() map[string]string { + m := make(map[string]string) + m["foo.bar"] = "hello" + m["bar.baz"] = "world" + return m + }) + + allLabels := append(keyLabels, valueLabel) + + checkHandlerForMetricWithMultiLabels(t, name, allLabels, []string{"foo", "bar", "hello"}, 1) + checkHandlerForMetricWithMultiLabels(t, name, allLabels, []string{"bar", "baz", "world"}, 1) +} + func checkHandlerForMetricWithMultiLabels(t *testing.T, metric string, labels []string, labelValues []string, value int64) { response := testMetricsHandler(t) - expected := fmt.Sprintf("%s_%s{%s=\"%s\",%s=\"%s\"} %d", namespace, metric, labels[0], labelValues[0], labels[1], labelValues[1], value) + kvPairs := make([]string, 0) + for i := 0; i < len(labels); i++ { + kvPairs = append(kvPairs, fmt.Sprintf("%s=\"%s\"", labels[i], labelValues[i])) + } + + expected := fmt.Sprintf("%s_%s{%s} %d", namespace, metric, strings.Join(kvPairs, ","), value) if !strings.Contains(response.Body.String(), expected) { t.Fatalf("Expected %s got %s", expected, response.Body.String()) diff --git a/go/stats/rates.go b/go/stats/rates.go index 7aa4f7d3ce7..48864585225 100644 --- a/go/stats/rates.go +++ b/go/stats/rates.go @@ -17,6 +17,7 @@ limitations under the License. package stats import ( + "context" "encoding/json" "math" "sync" @@ -65,6 +66,8 @@ type Rates struct { // totalRate is the rate of total counts per second seen in the latest // sampling interval e.g. 100 queries / 5 seconds sampling interval = 20 QPS. totalRate float64 + ctx context.Context + cancel context.CancelFunc } // NewRates reports rolling rate information for countTracker. samples specifies @@ -76,6 +79,7 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time if interval < 1*time.Second && interval != -1*time.Second { panic("interval too small") } + ctx, cancel := context.WithCancel(context.Background()) rt := &Rates{ timeStamps: NewRingInt64(samples + 1), counts: make(map[string]*RingInt64), @@ -83,6 +87,8 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time samples: samples + 1, interval: interval, timestampLastSampling: timeNow(), + ctx: ctx, + cancel: cancel, } if name != "" { publish(name, rt) @@ -93,10 +99,20 @@ func NewRates(name string, countTracker CountTracker, samples int, interval time return rt } +func (rt *Rates) Stop() { + rt.cancel() +} + func (rt *Rates) track() { + t := time.NewTicker(rt.interval) + defer t.Stop() for { - rt.snapshot() - <-time.After(rt.interval) + select { + case <-rt.ctx.Done(): + return + case <-t.C: + rt.snapshot() + } } } diff --git a/go/stats/rates_test.go b/go/stats/rates_test.go index e37cbbd8af8..a25a055020a 100644 --- a/go/stats/rates_test.go +++ b/go/stats/rates_test.go @@ -41,9 +41,10 @@ func TestRates(t *testing.T) { return now } - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter1", "rcounter help", "label") r := NewRates("rates1", c, 3, -1*time.Second) + defer r.Stop() r.snapshot() now = now.Add(epsilon) c.Add("tag1", 0) @@ -89,9 +90,10 @@ func TestRatesConsistency(t *testing.T) { // This tests the following invariant: in the time window // covered by rates, the sum of the rates reported must be // equal to the count reported by the counter. - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter4", "rcounter4 help", "label") r := NewRates("rates4", c, 100, -1*time.Second) + defer r.Stop() r.snapshot() now = now.Add(epsilon) @@ -122,17 +124,18 @@ func TestRatesConsistency(t *testing.T) { } func TestRatesHook(t *testing.T) { - clear() + clearStats() c := NewCountersWithSingleLabel("rcounter2", "rcounter2 help", "label") var gotname string var gotv *Rates - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Rates) }) v := NewRates("rates2", c, 2, 10*time.Second) + defer v.Stop() if gotname != "rates2" { t.Errorf("want rates2, got %s", gotname) } diff --git a/go/stats/timings.go b/go/stats/timings.go index fe12ccd0604..9b12adfa7c0 100644 --- a/go/stats/timings.go +++ b/go/stats/timings.go @@ -61,7 +61,7 @@ func NewTimings(name, help, label string, categories ...string) *Timings { return t } -// Reset will clear histograms: used during testing +// Reset will clearStats histograms: used during testing func (t *Timings) Reset() { t.mu.RLock() t.histograms = make(map[string]*Histogram) diff --git a/go/stats/timings_test.go b/go/stats/timings_test.go index 9657004a76f..a632f3fba6a 100644 --- a/go/stats/timings_test.go +++ b/go/stats/timings_test.go @@ -26,7 +26,7 @@ import ( ) func TestTimings(t *testing.T) { - clear() + clearStats() tm := NewTimings("timings1", "help", "category") tm.Add("tag1", 500*time.Microsecond) tm.Add("tag1", 1*time.Millisecond) @@ -38,7 +38,7 @@ func TestTimings(t *testing.T) { } func TestMultiTimings(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("maptimings1", "help", []string{"dim1", "dim2"}) mtm.Add([]string{"tag1a", "tag1b"}, 500*time.Microsecond) mtm.Add([]string{"tag1a", "tag1b"}, 1*time.Millisecond) @@ -50,7 +50,7 @@ func TestMultiTimings(t *testing.T) { } func TestMultiTimingsDot(t *testing.T) { - clear() + clearStats() mtm := NewMultiTimings("maptimings2", "help", []string{"label"}) mtm.Add([]string{"value.dot"}, 500*time.Microsecond) safe := safeLabel("value.dot") @@ -64,7 +64,7 @@ func TestMultiTimingsDot(t *testing.T) { func TestTimingsHook(t *testing.T) { var gotname string var gotv *Timings - clear() + clearStats() Register(func(name string, v expvar.Var) { gotname = name gotv = v.(*Timings) @@ -81,7 +81,7 @@ func TestTimingsHook(t *testing.T) { } func TestTimingsCombineDimension(t *testing.T) { - clear() + clearStats() combineDimensions = "a,c" t1 := NewTimings("timing_combine_dim1", "help", "label") diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index c7916c49256..26248fcd1b1 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -183,7 +183,7 @@ func (logger *StreamLogger[T]) Name() string { // ServeLogs registers the URL on which messages will be broadcast. // It is safe to register multiple URLs for the same StreamLogger. func (logger *StreamLogger[T]) ServeLogs(url string, logf LogFormatter) { - http.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc(url, func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { acl.SendError(w, err) return @@ -262,7 +262,7 @@ func GetFormatter[T any](logger *StreamLogger[T]) LogFormatter { // ShouldEmitLog returns whether the log with the given SQL query // should be emitted or filtered func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool { - if queryLogRowThreshold > maxUint64(rowsAffected, rowsReturned) && queryLogFilterTag == "" { + if queryLogRowThreshold > max(rowsAffected, rowsReturned) && queryLogFilterTag == "" { return false } if queryLogFilterTag != "" { @@ -270,10 +270,3 @@ func ShouldEmitLog(sql string, rowsAffected, rowsReturned uint64) bool { } return true } - -func maxUint64(a, b uint64) uint64 { - if a < b { - return b - } - return a -} diff --git a/go/streamlog/streamlog_flaky_test.go b/go/streamlog/streamlog_flaky_test.go index b829f965174..9c0b0366a1d 100644 --- a/go/streamlog/streamlog_flaky_test.go +++ b/go/streamlog/streamlog_flaky_test.go @@ -28,6 +28,8 @@ import ( "syscall" "testing" "time" + + "vitess.io/vitess/go/vt/servenv" ) type logMessage struct { @@ -51,7 +53,12 @@ func TestHTTP(t *testing.T) { defer l.Close() addr := l.Addr().String() - go http.Serve(l, nil) + go func() { + err := servenv.HTTPServe(l) + if err != nil { + t.Errorf("http serve returned unexpected error: %v", err) + } + }() logger := New[*logMessage]("logger", 1) logger.ServeLogs("/log", testLogf) diff --git a/go/sync2/consolidator.go b/go/sync2/consolidator.go index cf5dd88a7d9..604d7fff35b 100644 --- a/go/sync2/consolidator.go +++ b/go/sync2/consolidator.go @@ -21,47 +21,65 @@ import ( "sync/atomic" "vitess.io/vitess/go/cache" + "vitess.io/vitess/go/sqltypes" ) // Consolidator consolidates duplicate queries from executing simulaneously // and shares results between them. -type Consolidator struct { +type Consolidator interface { + Create(string) (PendingResult, bool) + Items() []ConsolidatorCacheItem + Record(query string) +} + +// PendingResult is a wrapper for result of a query. +type PendingResult interface { + Broadcast() + Err() error + SetErr(error) + SetResult(*sqltypes.Result) + Result() *sqltypes.Result + Wait() +} + +type consolidator struct { *ConsolidatorCache mu sync.Mutex - queries map[string]*Result + queries map[string]*pendingResult } // NewConsolidator creates a new Consolidator -func NewConsolidator() *Consolidator { - return &Consolidator{ - queries: make(map[string]*Result), +func NewConsolidator() Consolidator { + return &consolidator{ ConsolidatorCache: NewConsolidatorCache(1000), + queries: make(map[string]*pendingResult), } } -// Result is a wrapper for result of a query. -type Result struct { +// pendingResult is a wrapper for result of a query. +type pendingResult struct { // executing is used to block additional requests. // The original request holds a write lock while additional ones are blocked // on acquiring a read lock (see Wait() below.) executing sync.RWMutex - consolidator *Consolidator + consolidator *consolidator query string - Result any - Err error + result *sqltypes.Result + err error } // Create adds a query to currently executing queries and acquires a // lock on its Result if it is not already present. If the query is // a duplicate, Create returns false. -func (co *Consolidator) Create(query string) (r *Result, created bool) { +func (co *consolidator) Create(query string) (PendingResult, bool) { co.mu.Lock() defer co.mu.Unlock() + var r *pendingResult if r, ok := co.queries[query]; ok { return r, false } - r = &Result{consolidator: co, query: query} + r = &pendingResult{consolidator: co, query: query} r.executing.Lock() co.queries[query] = r return r, true @@ -70,16 +88,36 @@ func (co *Consolidator) Create(query string) (r *Result, created bool) { // Broadcast removes the entry from current queries and releases the // lock on its Result. Broadcast should be invoked when original // query completes execution. -func (rs *Result) Broadcast() { +func (rs *pendingResult) Broadcast() { rs.consolidator.mu.Lock() defer rs.consolidator.mu.Unlock() delete(rs.consolidator.queries, rs.query) rs.executing.Unlock() } +// Err returns any error returned by the query. +func (rs *pendingResult) Err() error { + return rs.err +} + +// Result returns any result returned by the query. +func (rs *pendingResult) Result() *sqltypes.Result { + return rs.result +} + +// SetErr sets any error returned by the query. +func (rs *pendingResult) SetErr(err error) { + rs.err = err +} + +// SetResult sets any result returned by the query. +func (rs *pendingResult) SetResult(res *sqltypes.Result) { + rs.result = res +} + // Wait waits for the original query to complete execution. Wait should // be invoked for duplicate queries. -func (rs *Result) Wait() { +func (rs *pendingResult) Wait() { rs.consolidator.Record(rs.query) rs.executing.RLock() } diff --git a/go/sync2/consolidator_test.go b/go/sync2/consolidator_test.go index 35d32e94051..132a253ba29 100644 --- a/go/sync2/consolidator_test.go +++ b/go/sync2/consolidator_test.go @@ -19,6 +19,8 @@ package sync2 import ( "reflect" "testing" + + "vitess.io/vitess/go/sqltypes" ) func TestConsolidator(t *testing.T) { @@ -44,17 +46,17 @@ func TestConsolidator(t *testing.T) { t.Fatalf("did not expect consolidator to register a new entry") } - result := 1 + result := &sqltypes.Result{} go func() { - orig.Result = &result + orig.SetResult(result) orig.Broadcast() }() dup.Wait() - if *orig.Result.(*int) != result { + if orig.Result() != result { t.Errorf("failed to pass result") } - if *orig.Result.(*int) != *dup.Result.(*int) { + if orig.Result() != dup.Result() { t.Fatalf("failed to share the result") } @@ -71,7 +73,7 @@ func TestConsolidator(t *testing.T) { } go func() { - second.Result = &result + second.SetResult(result) second.Broadcast() }() dup.Wait() diff --git a/go/sync2/fake_consolidator.go b/go/sync2/fake_consolidator.go new file mode 100644 index 00000000000..64c59e78a5a --- /dev/null +++ b/go/sync2/fake_consolidator.go @@ -0,0 +1,114 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync2 + +import ( + "vitess.io/vitess/go/sqltypes" +) + +// FakeConsolidator satisfies the Consolidator interface and can be used to mock +// how Vitess interacts with the Consolidator. +type FakeConsolidator struct { + // CreateCalls can be used to inspect Create calls. + CreateCalls []string + // CreateReturns can be used to inspect Create return values. + CreateReturns []*FakeConsolidatorCreateReturn + // CreateReturnCreated pre-configures the return value of Create calls. + CreateReturn *FakeConsolidatorCreateReturn + // RecordCalls can be usd to inspect Record calls. + RecordCalls []string +} + +// FakeConsolidatorCreateReturn wraps the two return values of a call to +// FakeConsolidator.Create. +type FakeConsolidatorCreateReturn struct { + // PendingResult contains the PendingResult return value of a call to + // FakeConsolidator.Create. + PendingResult + // PendingResult contains the Created return value of a call to + // FakeConsolidator.Create. + Created bool +} + +// FakePendingResult satisfies the PendingResult interface and can be used to +// mock how Vitess interacts with the Consolidator. +type FakePendingResult struct { + // BroadcastCalls can be used to inspect Broadcast calls. + BroadcastCalls int + // WaitCalls can be used to inspect Wait calls. + WaitCalls int + err error + result *sqltypes.Result +} + +var ( + _ Consolidator = &FakeConsolidator{} + _ PendingResult = &FakePendingResult{} +) + +// NewFakeConsolidator creates a new FakeConsolidator. +func NewFakeConsolidator() *FakeConsolidator { + return &FakeConsolidator{} +} + +// Create records the Create call for later verification, and returns a +// pre-configured PendingResult and "created" bool. +func (fc *FakeConsolidator) Create(sql string) (PendingResult, bool) { + fc.CreateCalls = append(fc.CreateCalls, sql) + fc.CreateReturns = append(fc.CreateReturns, fc.CreateReturn) + return fc.CreateReturn.PendingResult, fc.CreateReturn.Created +} + +// Record records the Record call for later verification. +func (fc *FakeConsolidator) Record(sql string) { + fc.RecordCalls = append(fc.RecordCalls, sql) +} + +// Items is currently a no-op. +func (fc *FakeConsolidator) Items() []ConsolidatorCacheItem { + return nil +} + +// Broadcast records the Broadcast call for later verification. +func (fr *FakePendingResult) Broadcast() { + fr.BroadcastCalls++ +} + +// Err returns the pre-configured error. +func (fr *FakePendingResult) Err() error { + return fr.err +} + +// Result returns the pre-configured Result. +func (fr *FakePendingResult) Result() *sqltypes.Result { + return fr.result +} + +// SetErr stores the err, which can be retrieved with Err. +func (fr *FakePendingResult) SetErr(err error) { + fr.err = err +} + +// SetResult stores the result, which can be retrieved with Result. +func (fr *FakePendingResult) SetResult(result *sqltypes.Result) { + fr.result = result +} + +// Wait records the Wait call for later verification. +func (fr *FakePendingResult) Wait() { + fr.WaitCalls++ +} diff --git a/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go b/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go deleted file mode 100644 index f93dfa475b6..00000000000 --- a/go/test/endtoend/backup/pitr/backup_mysqlctld_pitr_test.go +++ /dev/null @@ -1,212 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctld - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" - "vitess.io/vitess/go/test/endtoend/cluster" -) - -func waitForReplica(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - pMsgs := backup.ReadRowsFromPrimary(t) - for { - rMsgs := backup.ReadRowsFromReplica(t) - if len(pMsgs) == len(rMsgs) { - // success - return - } - select { - case <-ctx.Done(): - assert.FailNow(t, "timeout waiting for replica to catch up") - return - case <-time.After(time.Second): - // - } - } -} - -// TestIncrementalBackupMysqlctld - tests incremental backups using myslctld -func TestIncrementalBackupMysqlctld(t *testing.T) { - defer cluster.PanicHandler(t) - // setup cluster for the testing - code, err := backup.LaunchCluster(backup.Mysqlctld, "xbstream", 0, nil) - require.NoError(t, err, "setup failed with status code %d", code) - defer backup.TearDownCluster() - - backup.InitTestTable(t) - - rowsPerPosition := map[string]int{} - backupPositions := []string{} - - recordRowsPerPosition := func(t *testing.T) { - pos := backup.GetReplicaPosition(t) - msgs := backup.ReadRowsFromReplica(t) - if _, ok := rowsPerPosition[pos]; !ok { - backupPositions = append(backupPositions, pos) - rowsPerPosition[pos] = len(msgs) - } - } - - var fullBackupPos mysql.Position - t.Run("full backup", func(t *testing.T) { - backup.InsertRowOnPrimary(t, "before-full-backup") - waitForReplica(t) - manifest, _ := backup.TestReplicaFullBackup(t) - fullBackupPos = manifest.Position - require.False(t, fullBackupPos.IsZero()) - // - msgs := backup.ReadRowsFromReplica(t) - pos := mysql.EncodePosition(fullBackupPos) - backupPositions = append(backupPositions, pos) - rowsPerPosition[pos] = len(msgs) - }) - - lastBackupPos := fullBackupPos - backup.InsertRowOnPrimary(t, "before-incremental-backups") - - tt := []struct { - name string - writeBeforeBackup bool - fromFullPosition bool - autoPosition bool - expectError string - }{ - { - name: "first incremental backup", - }, - { - name: "make writes, succeed", - writeBeforeBackup: true, - }, - { - name: "fail, no binary logs to backup", - expectError: "no binary logs to backup", - }, - { - name: "make writes again, succeed", - writeBeforeBackup: true, - }, - { - name: "auto position, succeed", - writeBeforeBackup: true, - autoPosition: true, - }, - { - name: "fail auto position, no binary logs to backup", - autoPosition: true, - expectError: "no binary logs to backup", - }, - { - name: "auto position, make writes again, succeed", - writeBeforeBackup: true, - autoPosition: true, - }, - { - name: "from full backup position", - fromFullPosition: true, - }, - } - var fromFullPositionBackups []string - for _, tc := range tt { - t.Run(tc.name, func(t *testing.T) { - if tc.writeBeforeBackup { - backup.InsertRowOnPrimary(t, "") - } - // we wait for 1 second because backups ar ewritten to a directory named after the current timestamp, - // in 1 second resolution. We want to aoid two backups that have the same pathname. Realistically this - // is only ever a problem in this endtoend test, not in production. - // Also, we gie the replica a chance to catch up. - time.Sleep(1100 * time.Millisecond) - waitForReplica(t) - recordRowsPerPosition(t) - // configure --incremental-from-pos to either: - // - auto - // - explicit last backup pos - // - back in history to the original full backup - var incrementalFromPos mysql.Position - if !tc.autoPosition { - incrementalFromPos = lastBackupPos - if tc.fromFullPosition { - incrementalFromPos = fullBackupPos - } - } - manifest, backupName := backup.TestReplicaIncrementalBackup(t, incrementalFromPos, tc.expectError) - if tc.expectError != "" { - return - } - defer func() { - lastBackupPos = manifest.Position - }() - if tc.fromFullPosition { - fromFullPositionBackups = append(fromFullPositionBackups, backupName) - } - require.False(t, manifest.FromPosition.IsZero()) - require.NotEqual(t, manifest.Position, manifest.FromPosition) - require.True(t, manifest.Position.GTIDSet.Contains(manifest.FromPosition.GTIDSet)) - - gtidPurgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, backup.GetReplicaGtidPurged(t)) - require.NoError(t, err) - fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) - - expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) - if !incrementalFromPos.IsZero() { - expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) - } - require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v", expectFromPosition, fromPositionIncludingPurged) - }) - } - - testRestores := func(t *testing.T) { - for _, r := range rand.Perm(len(backupPositions)) { - pos := backupPositions[r] - testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) - t.Run(testName, func(t *testing.T) { - restoreToPos, err := mysql.DecodePosition(pos) - require.NoError(t, err) - backup.TestReplicaRestoreToPos(t, restoreToPos, "") - msgs := backup.ReadRowsFromReplica(t) - count, ok := rowsPerPosition[pos] - require.True(t, ok) - assert.Equalf(t, count, len(msgs), "messages: %v", msgs) - }) - } - } - t.Run("PITR", func(t *testing.T) { - testRestores(t) - }) - t.Run("remove full position backups", func(t *testing.T) { - // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. - for _, backupName := range fromFullPositionBackups { - backup.RemoveBackup(t, backupName) - } - }) - t.Run("PITR-2", func(t *testing.T) { - testRestores(t) - }) -} diff --git a/go/test/endtoend/backup/pitr/backup_pitr_test.go b/go/test/endtoend/backup/pitr/backup_pitr_test.go new file mode 100644 index 00000000000..a1b29ef47dd --- /dev/null +++ b/go/test/endtoend/backup/pitr/backup_pitr_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctld + +import ( + "testing" + + backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" +) + +// TestIncrementalBackupAndRestoreToPos +func TestIncrementalBackupAndRestoreToPos(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupAndRestoreToPos(t, tcase) +} + +// TestIncrementalBackupAndRestoreToTimestamp - tests incremental backups and restores. +// The general outline of the test: +// - Generate some schema with data +// - Take a full backup +// - Proceed to take a series of inremental backups. In between, inject data (insert rows), and keep record +// of which data (number of rows) is present in each backup, and at which timestamp. +// - Expect backups success/failure per scenario +// - Next up, we start testing restores. Randomly pick recorded timestamps and restore to those points in time. +// - In each restore, excpect to find the data (number of rows) recorded for said timestamp +// - Some restores should fail because the timestamp exceeds the last binlog +// - Do so for all recorded tiemstamps. +// - Then, a 2nd round where some backups are purged -- this tests to see that we're still able to find a restore path +// (of course we only delete backups that still leave us with valid restore paths). +// +// All of the above is done for BuiltinBackup, XtraBackup, Mysqlctld (which is technically builtin) +func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupAndRestoreToTimestamp(t, tcase) +} + +// TestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. +// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on +// one another. +func TestIncrementalBackupOnTwoTablets(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "BuiltinBackup", + SetupType: backup.BuiltinBackup, + ComprssDetails: nil, + } + backup.ExecTestIncrementalBackupOnTwoTablets(t, tcase) +} diff --git a/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go b/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go new file mode 100644 index 00000000000..b69e950fe0b --- /dev/null +++ b/go/test/endtoend/backup/pitr_xtrabackup/backup_pitr_xtrabackup_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctld + +import ( + "testing" + + backup "vitess.io/vitess/go/test/endtoend/backup/vtctlbackup" +) + +// TestIncrementalBackupAndRestoreToPos +func TestIncrementalBackupAndRestoreToPos(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "XtraBackup", + SetupType: backup.XtraBackup, + ComprssDetails: &backup.CompressionDetails{ + CompressorEngineName: "pgzip", + }, + } + backup.ExecTestIncrementalBackupAndRestoreToPos(t, tcase) +} + +// TestIncrementalBackupAndRestoreToTimestamp - tests incremental backups and restores. +// The general outline of the test: +// - Generate some schema with data +// - Take a full backup +// - Proceed to take a series of inremental backups. In between, inject data (insert rows), and keep record +// of which data (number of rows) is present in each backup, and at which timestamp. +// - Expect backups success/failure per scenario +// - Next up, we start testing restores. Randomly pick recorded timestamps and restore to those points in time. +// - In each restore, excpect to find the data (number of rows) recorded for said timestamp +// - Some restores should fail because the timestamp exceeds the last binlog +// - Do so for all recorded tiemstamps. +// - Then, a 2nd round where some backups are purged -- this tests to see that we're still able to find a restore path +// (of course we only delete backups that still leave us with valid restore paths). +// +// All of the above is done for BuiltinBackup, XtraBackup, Mysqlctld (which is technically builtin) +func TestIncrementalBackupAndRestoreToTimestamp(t *testing.T) { + tcase := &backup.PITRTestCase{ + Name: "XtraBackup", + SetupType: backup.XtraBackup, + ComprssDetails: &backup.CompressionDetails{ + CompressorEngineName: "pgzip", + }, + } + backup.ExecTestIncrementalBackupAndRestoreToTimestamp(t, tcase) +} diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 3730a1fa586..408cc64a21b 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -18,6 +18,7 @@ package vtbackup import ( "context" + "encoding/json" "fmt" "os" "path" @@ -56,18 +57,29 @@ func TestTabletInitialBackup(t *testing.T) { // - list the backups, remove them defer cluster.PanicHandler(t) + waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) + vtBackup(t, true, false, false) verifyBackupCount(t, shardKsName, 1) // Initialize the tablets initTablets(t, false, false) - // Restore the Tablets + vtTabletVersion, err := cluster.GetMajorVersion("vttablet") + require.NoError(t, err) + // For all version at or above v17.0.0, each replica will start in super_read_only mode. Let's verify that is working correctly. + if vtTabletVersion >= 17 { + err := primary.VttabletProcess.CreateDB("testDB") + require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement") + err = replica1.VttabletProcess.CreateDB("testDB") + require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement") + } + // Restore the Tablet restore(t, primary, "replica", "NOT_SERVING") // Vitess expects that the user has set the database into ReadWrite mode before calling // TabletExternallyReparented - err := localCluster.VtctlclientProcess.ExecuteCommand( + err = localCluster.VtctlclientProcess.ExecuteCommand( "SetReadWrite", primary.Alias) require.Nil(t, err) err = localCluster.VtctlclientProcess.ExecuteCommand( @@ -254,19 +266,15 @@ func initTablets(t *testing.T, startTablet bool, initShardPrimary bool) { func restore(t *testing.T, tablet *cluster.Vttablet, tabletType string, waitForState string) { // Erase mysql/tablet dir, then start tablet with restore enabled. - log.Infof("restoring tablet %s", time.Now()) resetTabletDirectory(t, *tablet, true) - err := tablet.VttabletProcess.CreateDB(keyspaceName) - require.Nil(t, err) - // Start tablets tablet.VttabletProcess.ExtraArgs = []string{"--db-credentials-file", dbCredentialFile} tablet.VttabletProcess.TabletType = tabletType tablet.VttabletProcess.ServingStatus = waitForState tablet.VttabletProcess.SupportsBackup = true - err = tablet.VttabletProcess.Setup() + err := tablet.VttabletProcess.Setup() require.Nil(t, err) } @@ -294,6 +302,12 @@ func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool) func tearDown(t *testing.T, initMysql bool) { // reset replication + for _, db := range []string{"_vt", "vt_insert_test"} { + _, err := primary.VttabletProcess.QueryTablet(fmt.Sprintf("drop database if exists %s", db), keyspaceName, true) + require.Nil(t, err) + } + caughtUp := waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) + require.True(t, caughtUp, "Timed out waiting for all replicas to catch up") promoteCommands := "STOP SLAVE; RESET SLAVE ALL; RESET MASTER;" disableSemiSyncCommands := "SET GLOBAL rpl_semi_sync_master_enabled = false; SET GLOBAL rpl_semi_sync_slave_enabled = false" for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { @@ -301,10 +315,6 @@ func tearDown(t *testing.T, initMysql bool) { require.Nil(t, err) _, err = tablet.VttabletProcess.QueryTablet(disableSemiSyncCommands, keyspaceName, true) require.Nil(t, err) - for _, db := range []string{"_vt", "vt_insert_test"} { - _, err = tablet.VttabletProcess.QueryTablet(fmt.Sprintf("drop database if exists %s", db), keyspaceName, true) - require.Nil(t, err) - } } // TODO: Ideally we should not be resetting the mysql. @@ -367,3 +377,39 @@ func verifyDisableEnableRedoLogs(ctx context.Context, t *testing.T, mysqlSocket } } } + +// This helper function wait for all replicas to catch-up the replication. +// It does this by querying the status detail url of each replica and find the lag. +func waitForReplicationToCatchup(tablets []cluster.Vttablet) bool { + endTime := time.Now().Add(time.Second * 30) + timeout := time.After(time.Until(endTime)) + // key-value structure returned by status url. + type kv struct { + Key string + Class string + Value string + } + // defining a struct instance + var statuslst []kv + for { + select { + case <-timeout: + return false + default: + var replicaCount = 0 + for _, tablet := range tablets { + status := tablet.VttabletProcess.GetStatusDetails() + json.Unmarshal([]byte(status), &statuslst) + for _, obj := range statuslst { + if obj.Key == "Replication Lag" && obj.Value == "0s" { + replicaCount++ + } + } + if replicaCount == len(tablets) { + return true + } + } + time.Sleep(time.Second * 1) + } + } +} diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 069f83fbba5..36bfae123d8 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -25,6 +25,7 @@ import ( "testing" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" ) @@ -86,11 +87,16 @@ func TestMain(m *testing.M) { // Create a new init_db.sql file that sets up passwords for all users. // Then we use a db-credentials-file with the passwords. + // TODO: We could have operated with empty password here. Create a separate test for --db-credentials-file functionality (@rsajwani) dbCredentialFile = cluster.WriteDbCredentialToTmp(localCluster.TmpDirectory) initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql")) sql := string(initDb) + // The original init_db.sql does not have any passwords. Here we update the init file with passwords + sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(localCluster), "") + if err != nil { + return 1, err + } newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql") - sql = sql + cluster.GetPasswordUpdateSQL(localCluster) err = os.WriteFile(newInitDBFile, []byte(sql), 0666) if err != nil { return 1, err @@ -112,7 +118,11 @@ func TestMain(m *testing.M) { tablet.VttabletProcess.ExtraArgs = commonTabletArg tablet.VttabletProcess.SupportsBackup = true - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctlProcess = *mysqlctlProcess tablet.MysqlctlProcess.InitDBFile = newInitDBFile tablet.MysqlctlProcess.ExtraArgs = extraArgs proc, err := tablet.MysqlctlProcess.StartProcess() @@ -127,13 +137,6 @@ func TestMain(m *testing.M) { } } - // Create database - for _, tablet := range []cluster.Vttablet{*primary, *replica1} { - if err := tablet.VttabletProcess.CreateDB(keyspaceName); err != nil { - return 1, err - } - } - if localCluster.VtTabletMajorVersion >= 16 { // If vttablets are any lower than version 16, then they are running the replication manager. // Running VTOrc and replication manager sometimes creates the situation where VTOrc has set up semi-sync on the primary, diff --git a/go/test/endtoend/backup/vtctlbackup/backup_test.go b/go/test/endtoend/backup/vtctlbackup/backup_test.go index e8d3ba2c8d7..92c7a2f3048 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_test.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_test.go @@ -22,12 +22,12 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" ) -// TestBackupMain - main tests backup using vtctl commands -func TestBackupMain(t *testing.T) { - TestBackup(t, Backup, "xbstream", 0, nil, nil) +// TestBuiltinBackup - main tests backup using vtctl commands +func TestBuiltinBackup(t *testing.T) { + TestBackup(t, BuiltinBackup, "xbstream", 0, nil, nil) } -func TestBackupMainWithZstdCompression(t *testing.T) { +func TestBuiltinBackupWithZstdCompression(t *testing.T) { defer setDefaultCompressionFlag() cDetails := &CompressionDetails{ CompressorEngineName: "zstd", @@ -36,7 +36,31 @@ func TestBackupMainWithZstdCompression(t *testing.T) { ExternalDecompressorCmd: "zstd -d", } - TestBackup(t, Backup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"}) + TestBackup(t, BuiltinBackup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"}) +} + +func TestBuiltinBackupWithExternalZstdCompression(t *testing.T) { + defer setDefaultCompressionFlag() + cDetails := &CompressionDetails{ + CompressorEngineName: "external", + ExternalCompressorCmd: "zstd", + ExternalCompressorExt: ".zst", + ExternalDecompressorCmd: "zstd -d", + } + + TestBackup(t, BuiltinBackup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"}) +} + +func TestBuiltinBackupWithExternalZstdCompressionAndManifestedDecompressor(t *testing.T) { + defer setDefaultCompressionFlag() + cDetails := &CompressionDetails{ + CompressorEngineName: "external", + ExternalCompressorCmd: "zstd", + ExternalCompressorExt: ".zst", + ManifestExternalDecompressorCmd: "zstd -d", + } + + TestBackup(t, BuiltinBackup, "xbstream", 0, cDetails, []string{"TestReplicaBackup", "TestPrimaryBackup"}) } func setDefaultCompressionFlag() { @@ -44,4 +68,5 @@ func setDefaultCompressionFlag() { mysqlctl.ExternalCompressorCmd = "" mysqlctl.ExternalCompressorExt = "" mysqlctl.ExternalDecompressorCmd = "" + mysqlctl.ManifestExternalDecompressorCmd = "" } diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 87e854b7d64..35edab98928 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -18,37 +18,43 @@ package vtctlbackup import ( "bufio" + "context" "encoding/json" "fmt" "os" "os/exec" "path" "strings" + "sync" "syscall" "testing" "time" - "vitess.io/vitess/go/mysql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) // constants for test variants const ( XtraBackup = iota - Backup + BuiltinBackup Mysqlctld - timeout = time.Duration(60 * time.Second) + timeout = time.Duration(60 * time.Second) + topoConsistencyTimeout = 20 * time.Second ) var ( @@ -78,18 +84,20 @@ var ( } vtInsertTest = ` - create table vt_insert_test ( - id bigint auto_increment, - msg varchar(64), - primary key (id) - ) Engine=InnoDB` + create table vt_insert_test ( + id bigint auto_increment, + msg varchar(64), + primary key (id) + ) Engine=InnoDB + ` ) type CompressionDetails struct { - CompressorEngineName string - ExternalCompressorCmd string - ExternalCompressorExt string - ExternalDecompressorCmd string + CompressorEngineName string + ExternalCompressorCmd string + ExternalCompressorExt string + ExternalDecompressorCmd string + ManifestExternalDecompressorCmd string } // LaunchCluster : starts the cluster as per given params. @@ -115,11 +123,18 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp } shard := &localCluster.Keyspaces[0].Shards[0] + // Create a new init_db.sql file that sets up passwords for all users. + // Then we use a db-credentials-file with the passwords. + // TODO: We could have operated with empty password here. Create a separate test for --db-credentials-file functionality (@rsajwani) dbCredentialFile = cluster.WriteDbCredentialToTmp(localCluster.TmpDirectory) initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql")) sql := string(initDb) + // The original init_db.sql does not have any passwords. Here we update the init file with passwords + sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(localCluster), "") + if err != nil { + return 1, err + } newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql") - sql = sql + cluster.GetPasswordUpdateSQL(localCluster) err = os.WriteFile(newInitDBFile, []byte(sql), 0666) if err != nil { return 1, err @@ -142,7 +157,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp // if streamMode is xbstream, add some additional args to test other xtrabackup flags if streamMode == "xbstream" { - xtrabackupArgs = append(xtrabackupArgs, "--xtrabackup_prepare_flags", fmt.Sprintf("--use-memory=100M")) //nolint + xtrabackupArgs = append(xtrabackupArgs, "--xtrabackup_prepare_flags", "--use-memory=100M") } commonTabletArg = append(commonTabletArg, xtrabackupArgs...) @@ -151,11 +166,13 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp commonTabletArg = append(commonTabletArg, getCompressorArgs(cDetails)...) var mysqlProcs []*exec.Cmd + tabletTypes := map[int]string{ + 0: "primary", + 1: "replica", + 2: "rdonly", + } for i := 0; i < 3; i++ { - tabletType := "replica" - if i == 0 { - tabletType = "primary" - } + tabletType := tabletTypes[i] tablet := localCluster.NewVttabletInstance(tabletType, 0, cell) tablet.VttabletProcess = localCluster.VtprocessInstanceFromVttablet(tablet, shard.Name, keyspaceName) tablet.VttabletProcess.DbPassword = dbPassword @@ -163,7 +180,11 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp tablet.VttabletProcess.SupportsBackup = true if setupType == Mysqlctld { - tablet.MysqlctldProcess = *cluster.MysqlCtldProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + mysqlctldProcess, err := cluster.MysqlCtldProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctldProcess = *mysqlctldProcess tablet.MysqlctldProcess.InitDBFile = newInitDBFile tablet.MysqlctldProcess.ExtraArgs = extraArgs tablet.MysqlctldProcess.Password = tablet.VttabletProcess.DbPassword @@ -174,7 +195,11 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp continue } - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctlProcess = *mysqlctlProcess tablet.MysqlctlProcess.InitDBFile = newInitDBFile tablet.MysqlctlProcess.ExtraArgs = extraArgs proc, err := tablet.MysqlctlProcess.StartProcess() @@ -200,16 +225,16 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp if err := localCluster.VtctlclientProcess.InitTablet(replica1, cell, keyspaceName, hostname, shard.Name); err != nil { return 1, err } + if err := localCluster.VtctlclientProcess.InitTablet(replica2, cell, keyspaceName, hostname, shard.Name); err != nil { + return 1, err + } vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", localCluster.VtctldProcess.GrpcPort, localCluster.TmpDirectory) _, err = vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") if err != nil { return 1, err } - for _, tablet := range []cluster.Vttablet{*primary, *replica1} { - if err := tablet.VttabletProcess.CreateDB(keyspaceName); err != nil { - return 1, err - } + for _, tablet := range []*cluster.Vttablet{primary, replica1, replica2} { if err := tablet.VttabletProcess.Setup(); err != nil { return 1, err } @@ -245,9 +270,11 @@ func getCompressorArgs(cDetails *CompressionDetails) []string { if cDetails.ExternalDecompressorCmd != "" { args = append(args, fmt.Sprintf("--external-decompressor=%s", cDetails.ExternalDecompressorCmd)) } + if cDetails.ManifestExternalDecompressorCmd != "" { + args = append(args, fmt.Sprintf("--manifest-external-decompressor=%s", cDetails.ManifestExternalDecompressorCmd)) + } return args - } // update arguments with new values of compressionDetail. @@ -338,6 +365,10 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe name: "TestTerminatedRestore", method: terminatedRestore, }, // + { + name: "DoNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup", + method: doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup, + }, // } defer cluster.PanicHandler(t) @@ -353,6 +384,10 @@ func TestBackup(t *testing.T, setupType int, streamMode string, stripes int, cDe if len(runSpecific) > 0 && !isRegistered(test.name, runSpecific) { continue } + // don't run this one unless specified + if len(runSpecific) == 0 && test.name == "DoNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup" { + continue + } if retVal := t.Run(test.name, test.method); !retVal { return vterrors.Errorf(vtrpc.Code_UNKNOWN, "test failure: %s", test.name) } @@ -387,6 +422,14 @@ type restoreMethod func(t *testing.T, tablet *cluster.Vttablet) // 13. verify that don't have the data added after the first backup // 14. remove the backups func primaryBackup(t *testing.T) { + // Having the VTOrc in this test causes a lot of flakiness. For example when we delete the tablet `replica2` which + // is the current primary and then try to restore from backup the old primary (`primary.Alias`), but before that sometimes the VTOrc + // promotes the `replica1` to primary right after we delete the replica2 (current primary). + // This can result in unexpected behavior. Therefore, disabling the VTOrc in this test to remove flakiness. + localCluster.DisableVTOrcRecoveries(t) + defer func() { + localCluster.EnableVTOrcRecoveries(t) + }() verifyInitialReplication(t) output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", primary.Alias) @@ -421,6 +464,8 @@ func primaryBackup(t *testing.T) { backups = localCluster.VerifyBackupCount(t, shardKsName, 2) assert.Contains(t, backups[1], primary.Alias) + verifyTabletBackupStats(t, primary.VttabletProcess.GetVars()) + // Perform PRS to demote the primary tablet (primary) so that we can do a restore there and verify we don't have the // data from after the older/first backup err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", @@ -439,6 +484,8 @@ func primaryBackup(t *testing.T) { err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", "--", "--backup_timestamp", firstBackupTimestamp, primary.Alias) require.Nil(t, err) + verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) + // Re-init the shard -- making the original primary tablet (primary) primary again -- for subsequent tests err = localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) require.Nil(t, err) @@ -465,6 +512,8 @@ func primaryReplicaSameBackup(t *testing.T) { err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) + verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) require.Nil(t, err) @@ -531,6 +580,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) + verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) require.Nil(t, err) @@ -583,6 +634,8 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica2.Alias) require.Nil(t, err) + verifyTabletBackupStats(t, replica2.VttabletProcess.GetVars()) + // Force replica2 to restore from backup. verifyRestoreTablet(t, replica2, "SERVING") cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 4) @@ -621,6 +674,8 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) + verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) require.Nil(t, err) @@ -641,6 +696,8 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { // wait for it to catch up. cluster.VerifyRowsInTablet(t, primary, keyspaceName, 3) + verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) + // teardown restartPrimaryAndReplica(t) } @@ -679,8 +736,6 @@ func restartPrimaryAndReplica(t *testing.T) { for _, tablet := range []*cluster.Vttablet{primary, replica1} { err := localCluster.VtctlclientProcess.InitTablet(tablet, cell, keyspaceName, hostname, shardName) require.Nil(t, err) - err = tablet.VttabletProcess.CreateDB(keyspaceName) - require.Nil(t, err) err = tablet.VttabletProcess.Setup() require.Nil(t, err) } @@ -718,9 +773,17 @@ func terminatedRestore(t *testing.T) { // previous test to complete (suspicion: MySQL does not fully start) time.Sleep(5 * time.Second) + checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) + terminateBackup(t, replica1.Alias) + // If backup fails then the tablet type goes back to original type. + checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) + // backup the replica err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) + checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) + + verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) @@ -736,26 +799,80 @@ func terminatedRestore(t *testing.T) { _, err = replica1.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) require.Nil(t, err) + checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) terminateRestore(t) + // If restore fails then the tablet type goes back to original type. + checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) require.Nil(t, err) - - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", primary.Alias) - require.Nil(t, err) - - var tabletPB topodata.Tablet - err = json.Unmarshal([]byte(output), &tabletPB) - require.Nil(t, err) - assert.Equal(t, tabletPB.Type, topodata.TabletType_REPLICA) + checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) _, err = os.Stat(path.Join(primary.VttabletProcess.Directory, "restore_in_progress")) assert.True(t, os.IsNotExist(err)) cluster.VerifyRowsInTablet(t, primary, keyspaceName, 3) + + verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) + stopAllTablets() } +func checkTabletType(t *testing.T, alias string, tabletType topodata.TabletType) { + t.Helper() + // for loop for 15 seconds to check if tablet type is correct + for i := 0; i < 15; i++ { + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetTablet", alias) + require.Nil(t, err) + var tabletPB topodata.Tablet + err = json2.Unmarshal([]byte(output), &tabletPB) + require.NoError(t, err) + if tabletType == tabletPB.Type { + return + } + time.Sleep(1 * time.Second) + } + require.Failf(t, "checkTabletType failed.", "Tablet type is not correct. Expected: %v", tabletType) +} + +func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { + var wg sync.WaitGroup + wg.Add(2) + + // Start the backup on a replica + go func() { + defer wg.Done() + // ensure this is a primary first + checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) + + // now backup + err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + require.Nil(t, err) + }() + + // Perform a graceful reparent operation + go func() { + defer wg.Done() + // ensure this is a primary first + checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) + + // now reparent + _, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", "--", + "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName), + "--new_primary", replica1.Alias) + require.Nil(t, err) + + // check that we reparented + checkTabletType(t, replica1.Alias, topodata.TabletType_PRIMARY) + }() + + wg.Wait() + + // check that this is still a primary + checkTabletType(t, replica1.Alias, topodata.TabletType_PRIMARY) +} + // test_backup will: // - create a shard with primary and replica1 only // - run InitShardPrimary @@ -770,15 +887,9 @@ func terminatedRestore(t *testing.T) { // Args: // tablet_type: 'replica' or 'rdonly'. func vtctlBackup(t *testing.T, tabletType string) { - // Start vtorc before running backups - vtorcProcess := localCluster.NewVTOrcProcess(cluster.VTOrcConfiguration{}) - err := vtorcProcess.Setup() - require.NoError(t, err) - localCluster.VTOrcProcesses = append(localCluster.VTOrcProcesses, vtorcProcess) - // StopReplication on replica1. We verify that the replication works fine later in // verifyInitialReplication. So this will also check that VTOrc is running. - err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias) + err := localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias) require.Nil(t, err) verifyInitialReplication(t) @@ -789,6 +900,8 @@ func vtctlBackup(t *testing.T, tabletType string) { backups := localCluster.VerifyBackupCount(t, shardKsName, 1) + verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) + _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) require.Nil(t, err) @@ -797,12 +910,6 @@ func vtctlBackup(t *testing.T, tabletType string) { cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups) - - // Stop VTOrc - err = localCluster.VTOrcProcesses[0].TearDown() - localCluster.VTOrcProcesses = nil - require.NoError(t, err) - err = replica2.VttabletProcess.TearDown() require.Nil(t, err) @@ -810,7 +917,6 @@ func vtctlBackup(t *testing.T, tabletType string) { require.Nil(t, err) _, err = primary.VttabletProcess.QueryTablet("DROP TABLE vt_insert_test", keyspaceName, true) require.Nil(t, err) - } func InitTestTable(t *testing.T) { @@ -900,11 +1006,48 @@ func verifySemiSyncStatus(t *testing.T, vttablet *cluster.Vttablet, expectedStat assert.Equal(t, status, expectedStatus) } +func terminateBackup(t *testing.T, alias string) { + stopBackupMsg := "Done taking Backup" + if useXtrabackup { + stopBackupMsg = "Starting backup with" + useXtrabackup = false + defer func() { + useXtrabackup = true + }() + } + + args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "Backup", "--", alias) + tmpProcess := exec.Command( + "vtctlclient", + args..., + ) + + reader, _ := tmpProcess.StderrPipe() + err := tmpProcess.Start() + require.Nil(t, err) + found := false + + scanner := bufio.NewScanner(reader) + + for scanner.Scan() { + text := scanner.Text() + if strings.Contains(text, stopBackupMsg) { + tmpProcess.Process.Signal(syscall.SIGTERM) + found = true //nolint + return + } + } + assert.True(t, found, "backup message not found") +} + func terminateRestore(t *testing.T) { stopRestoreMsg := "Copying file 10" if useXtrabackup { stopRestoreMsg = "Restore: Preparing" useXtrabackup = false + defer func() { + useXtrabackup = true + }() } args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", "--", primary.Alias) @@ -927,45 +1070,38 @@ func terminateRestore(t *testing.T) { assert.Fail(t, "restore in progress file missing") } tmpProcess.Process.Signal(syscall.SIGTERM) - found = true //nolint - return + found = true + break } } assert.True(t, found, "Restore message not found") } -func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, tabletType string) (backups []string, destroy func(t *testing.T)) { - restoreWaitForBackup(t, tabletType, nil, true) - verifyInitialReplication(t) +func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, replicaIndex int) (backups []string) { + replica := getReplica(t, replicaIndex) + numBackups := len(waitForNumBackups(t, -1)) - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica.Alias) require.Nil(t, err) - backups = localCluster.VerifyBackupCount(t, shardKsName, 1) + backups = waitForNumBackups(t, numBackups+1) + require.NotEmpty(t, backups) - err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 25*time.Second) - require.Nil(t, err) + verifyTabletBackupStats(t, replica.VttabletProcess.GetVars()) - err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) - - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias) - require.Nil(t, err) - - destroy = func(t *testing.T) { - verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups) - } - return backups, destroy + return backups } -func GetReplicaPosition(t *testing.T) string { - pos, _ := cluster.GetPrimaryPosition(t, *replica1, hostname) +func GetReplicaPosition(t *testing.T, replicaIndex int) string { + replica := getReplica(t, replicaIndex) + pos, _ := cluster.GetPrimaryPosition(t, *replica, hostname) return pos } -func GetReplicaGtidPurged(t *testing.T) string { +func GetReplicaGtidPurged(t *testing.T, replicaIndex int) string { + replica := getReplica(t, replicaIndex) query := "select @@global.gtid_purged as gtid_purged" - rs, err := replica1.VttabletProcess.QueryTablet(query, keyspaceName, true) + rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) require.NoError(t, err) row := rs.Named().Row() require.NotNil(t, row) @@ -998,13 +1134,62 @@ func ReadRowsFromPrimary(t *testing.T) (msgs []string) { return ReadRowsFromTablet(t, primary) } -func ReadRowsFromReplica(t *testing.T) (msgs []string) { - return ReadRowsFromTablet(t, replica1) +func getReplica(t *testing.T, replicaIndex int) *cluster.Vttablet { + switch replicaIndex { + case 0: + return replica1 + case 1: + return replica2 + default: + assert.Failf(t, "invalid replica index", "index=%d", replicaIndex) + return nil + } +} + +func ReadRowsFromReplica(t *testing.T, replicaIndex int) (msgs []string) { + return ReadRowsFromTablet(t, getReplica(t, replicaIndex)) +} + +// FlushBinaryLogsOnReplica issues `FLUSH BINARY LOGS` times +func FlushBinaryLogsOnReplica(t *testing.T, replicaIndex int, count int) { + replica := getReplica(t, replicaIndex) + query := "flush binary logs" + for i := 0; i < count; i++ { + _, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + } +} + +// FlushAndPurgeBinaryLogsOnReplica intentionally loses all existing binary logs. It flushes into a new binary log +// and immediately purges all previous logs. +// This is used to lose information. +func FlushAndPurgeBinaryLogsOnReplica(t *testing.T, replicaIndex int) (lastBinlog string) { + FlushBinaryLogsOnReplica(t, replicaIndex, 1) + + replica := getReplica(t, replicaIndex) + { + query := "show binary logs" + rs, err := replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + for _, row := range rs.Rows { + // binlog file name is first column + lastBinlog = row[0].ToString() + } + } + { + query, err := sqlparser.ParseAndBind("purge binary logs to %a", sqltypes.StringBindVariable(lastBinlog)) + require.NoError(t, err) + _, err = replica.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + } + return lastBinlog } func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.BackupManifest) { // reading manifest - data, err := os.ReadFile(backupLocation + "/MANIFEST") + fullPath := backupLocation + "/MANIFEST" + data, err := os.ReadFile(fullPath) require.NoErrorf(t, err, "error while reading MANIFEST %v", err) // parsing manifest @@ -1014,19 +1199,50 @@ func readManifestFile(t *testing.T, backupLocation string) (manifest *mysqlctl.B return manifest } -func TestReplicaFullBackup(t *testing.T) (manifest *mysqlctl.BackupManifest, destroy func(t *testing.T)) { - backups, destroy := vtctlBackupReplicaNoDestroyNoWrites(t, "replica") +func TestReplicaFullBackup(t *testing.T, replicaIndex int) (manifest *mysqlctl.BackupManifest) { + backups := vtctlBackupReplicaNoDestroyNoWrites(t, replicaIndex) backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backups[len(backups)-1] - return readManifestFile(t, backupLocation), destroy + return readManifestFile(t, backupLocation) +} + +// waitForNumBackups waits for GetBackups to list exactly the given expected number. +// If expectNumBackups < 0 then any response is considered valid +func waitForNumBackups(t *testing.T, expectNumBackups int) []string { + ctx, cancel := context.WithTimeout(context.Background(), topoConsistencyTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + backups, err := localCluster.ListBackups(shardKsName) + require.NoError(t, err) + if expectNumBackups < 0 { + // any result is valid + return backups + } + if len(backups) == expectNumBackups { + // what we waited for + return backups + } + assert.Less(t, len(backups), expectNumBackups) + select { + case <-ctx.Done(): + assert.Failf(t, ctx.Err().Error(), "expected %d backups, got %d", expectNumBackups, len(backups)) + return nil + case <-ticker.C: + } + } } -func TestReplicaIncrementalBackup(t *testing.T, incrementalFromPos mysql.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { +func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { + numBackups := len(waitForNumBackups(t, -1)) incrementalFromPosArg := "auto" if !incrementalFromPos.IsZero() { - incrementalFromPosArg = mysql.EncodePosition(incrementalFromPos) + incrementalFromPosArg = replication.EncodePosition(incrementalFromPos) } - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", "--", "--incremental_from_pos", incrementalFromPosArg, replica1.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", "--incremental-from-pos", incrementalFromPosArg, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1034,21 +1250,159 @@ func TestReplicaIncrementalBackup(t *testing.T, incrementalFromPos mysql.Positio } require.NoErrorf(t, err, "output: %v", output) - backups, err := localCluster.ListBackups(shardKsName) - require.NoError(t, err) + backups := waitForNumBackups(t, numBackups+1) + require.NotEmptyf(t, backups, "output: %v", output) + + verifyTabletBackupStats(t, replica.VttabletProcess.GetVars()) backupName = backups[len(backups)-1] backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backupName return readManifestFile(t, backupLocation), backupName } -func TestReplicaRestoreToPos(t *testing.T, restoreToPos mysql.Position, expectError string) { +func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { + replica := getReplica(t, replicaIndex) + return testReplicaIncrementalBackup(t, replica, incrementalFromPos, expectError) +} + +func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) { + replica := getReplica(t, replicaIndex) + + output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) + if expectError != "" { + require.Errorf(t, err, "expected: %v", expectError) + require.Contains(t, output, expectError) + return + } + require.NoErrorf(t, err, "output: %v", output) + verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars()) +} + +func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos replication.Position, expectError string) { + replica := getReplica(t, replicaIndex) + require.False(t, restoreToPos.IsZero()) - restoreToPosArg := mysql.EncodePosition(restoreToPos) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica1.Alias) + restoreToPosArg := replication.EncodePosition(restoreToPos) + output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica.Alias) + if expectError != "" { + require.Errorf(t, err, "expected: %v", expectError) + require.Contains(t, output, expectError) + return + } + require.NoErrorf(t, err, "output: %v", output) + verifyTabletRestoreStats(t, replica.VttabletProcess.GetVars()) +} + +func TestReplicaRestoreToTimestamp(t *testing.T, restoreToTimestamp time.Time, expectError string) { + require.False(t, restoreToTimestamp.IsZero()) + restoreToTimestampArg := mysqlctl.FormatRFC3339(restoreToTimestamp) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-timestamp", restoreToTimestampArg, replica1.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) return } require.NoErrorf(t, err, "output: %v", output) + verifyTabletRestoreStats(t, replica1.VttabletProcess.GetVars()) +} + +func verifyTabletBackupStats(t *testing.T, vars map[string]any) { + // Currently only the builtin backup engine instruments bytes-processed + // counts. + if !useXtrabackup { + require.Contains(t, vars, "BackupBytes") + bb := vars["BackupBytes"].(map[string]any) + require.Contains(t, bb, "BackupEngine.Builtin.Compressor:Write") + require.Contains(t, bb, "BackupEngine.Builtin.Destination:Write") + require.Contains(t, bb, "BackupEngine.Builtin.Source:Read") + if backupstorage.BackupStorageImplementation == "file" { + require.Contains(t, bb, "BackupStorage.File.File:Write") + } + } + + require.Contains(t, vars, "BackupCount") + bc := vars["BackupCount"].(map[string]any) + require.Contains(t, bc, "-.-.Backup") + // Currently only the builtin backup engine implements operation counts. + if !useXtrabackup { + require.Contains(t, bc, "BackupEngine.Builtin.Compressor:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Destination:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Destination:Open") + require.Contains(t, bc, "BackupEngine.Builtin.Source:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Source:Open") + } + + require.Contains(t, vars, "BackupDurationNanoseconds") + bd := vars["BackupDurationNanoseconds"] + require.Contains(t, bd, "-.-.Backup") + // Currently only the builtin backup engine emits timings. + if !useXtrabackup { + require.Contains(t, bd, "BackupEngine.Builtin.Compressor:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Compressor:Write") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Open") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Write") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Open") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Read") + } + if backupstorage.BackupStorageImplementation == "file" { + require.Contains(t, bd, "BackupStorage.File.File:Write") + } + +} + +func verifyRestorePositionAndTimeStats(t *testing.T, vars map[string]any) { + backupPosition := vars["RestorePosition"].(string) + backupTime := vars["RestoredBackupTime"].(string) + require.Contains(t, vars, "RestoredBackupTime") + require.Contains(t, vars, "RestorePosition") + require.NotEqual(t, "", backupPosition) + require.NotEqual(t, "", backupTime) + rp, err := replication.DecodePosition(backupPosition) + require.NoError(t, err) + require.False(t, rp.IsZero()) +} + +func verifyTabletRestoreStats(t *testing.T, vars map[string]any) { + // Currently only the builtin backup engine instruments bytes-processed + // counts. + + verifyRestorePositionAndTimeStats(t, vars) + + if !useXtrabackup { + require.Contains(t, vars, "RestoreBytes") + bb := vars["RestoreBytes"].(map[string]any) + require.Contains(t, bb, "BackupEngine.Builtin.Decompressor:Read") + require.Contains(t, bb, "BackupEngine.Builtin.Destination:Write") + require.Contains(t, bb, "BackupEngine.Builtin.Source:Read") + require.Contains(t, bb, "BackupStorage.File.File:Read") + } + + require.Contains(t, vars, "RestoreCount") + bc := vars["RestoreCount"].(map[string]any) + require.Contains(t, bc, "-.-.Restore") + // Currently only the builtin backup engine emits operation counts. + if !useXtrabackup { + require.Contains(t, bc, "BackupEngine.Builtin.Decompressor:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Destination:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Destination:Open") + require.Contains(t, bc, "BackupEngine.Builtin.Source:Close") + require.Contains(t, bc, "BackupEngine.Builtin.Source:Open") + } + + require.Contains(t, vars, "RestoreDurationNanoseconds") + bd := vars["RestoreDurationNanoseconds"] + require.Contains(t, bd, "-.-.Restore") + // Currently only the builtin backup engine emits timings. + if !useXtrabackup { + require.Contains(t, bd, "BackupEngine.Builtin.Decompressor:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Decompressor:Read") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Open") + require.Contains(t, bd, "BackupEngine.Builtin.Destination:Write") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Close") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Open") + require.Contains(t, bd, "BackupEngine.Builtin.Source:Read") + } + require.Contains(t, bd, "BackupStorage.File.File:Read") } diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go new file mode 100644 index 00000000000..2468940b641 --- /dev/null +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -0,0 +1,641 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtctlbackup + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/mysqlctl" +) + +var ( + gracefulPostBackupDuration = 10 * time.Millisecond +) + +const ( + postWriteSleepDuration = 2 * time.Second // Nice for debugging purposes: clearly distinguishes the timestamps of certain operations, and as results the names/timestamps of backups. +) + +const ( + operationFullBackup = iota + operationIncrementalBackup + operationRestore + operationFlushAndPurge +) + +type PITRTestCase struct { + Name string + SetupType int + ComprssDetails *CompressionDetails +} + +type testedBackupTimestampInfo struct { + rows int + postTimestamp time.Time +} + +func waitForReplica(t *testing.T, replicaIndex int) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + pMsgs := ReadRowsFromPrimary(t) + for { + rMsgs := ReadRowsFromReplica(t, replicaIndex) + if len(pMsgs) == len(rMsgs) { + // success + return + } + select { + case <-ctx.Done(): + assert.FailNow(t, "timeout waiting for replica to catch up") + return + case <-time.After(time.Second): + // + } + } +} + +// ExecTestIncrementalBackupAndRestoreToPos runs a series of backups: a full backup and multiple incremental backups. +// in between, it makes writes to the database, and takes notes: what data was available in what backup. +// It then restores each and every one of those backups, in random order, and expects to find the specific data associated with the backup. +func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, tcase.ComprssDetails) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + rowsPerPosition := map[string]int{} + backupPositions := []string{} + + recordRowsPerPosition := func(t *testing.T) { + pos := GetReplicaPosition(t, 0) + msgs := ReadRowsFromReplica(t, 0) + if _, ok := rowsPerPosition[pos]; !ok { + backupPositions = append(backupPositions, pos) + rowsPerPosition[pos] = len(msgs) + } + } + + var fullBackupPos replication.Position + t.Run("full backup", func(t *testing.T) { + InsertRowOnPrimary(t, "before-full-backup") + waitForReplica(t, 0) + + manifest := TestReplicaFullBackup(t, 0) + fullBackupPos = manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + msgs := ReadRowsFromReplica(t, 0) + pos := replication.EncodePosition(fullBackupPos) + backupPositions = append(backupPositions, pos) + rowsPerPosition[pos] = len(msgs) + }) + + lastBackupPos := fullBackupPos + InsertRowOnPrimary(t, "before-incremental-backups") + + tt := []struct { + name string + writeBeforeBackup bool + fromFullPosition bool + autoPosition bool + expectError string + }{ + { + name: "first incremental backup", + }, + { + name: "fail1", + expectError: "no binary logs to backup", + }, + { + name: "fail2", + expectError: "no binary logs to backup", + }, + { + name: "make writes, succeed", + writeBeforeBackup: true, + }, + { + name: "fail, no binary logs to backup", + expectError: "no binary logs to backup", + }, + { + name: "make writes again, succeed", + writeBeforeBackup: true, + }, + { + name: "auto position, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "fail auto position, no binary logs to backup", + autoPosition: true, + expectError: "no binary logs to backup", + }, + { + name: "auto position, make writes again, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "from full backup position", + fromFullPosition: true, + }, + } + var fromFullPositionBackups []string + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.writeBeforeBackup { + InsertRowOnPrimary(t, "") + } + // we wait for >1 second because backups are written to a directory named after the current timestamp, + // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this + // is only ever a problem in this end-to-end test, not in production. + // Also, we give the replica a chance to catch up. + time.Sleep(postWriteSleepDuration) + // randomly flush binary logs 0, 1 or 2 times + FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) + waitForReplica(t, 0) + recordRowsPerPosition(t) + // configure --incremental-from-pos to either: + // - auto + // - explicit last backup pos + // - back in history to the original full backup + var incrementalFromPos replication.Position + if !tc.autoPosition { + incrementalFromPos = lastBackupPos + if tc.fromFullPosition { + incrementalFromPos = fullBackupPos + } + } + // always use same 1st replica + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + defer func() { + lastBackupPos = manifest.Position + }() + if tc.fromFullPosition { + fromFullPositionBackups = append(fromFullPositionBackups, backupName) + } + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, 0)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + expectFromPosition := lastBackupPos.GTIDSet + if !incrementalFromPos.IsZero() { + expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + } + require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) + }) + } + + testRestores := func(t *testing.T) { + for _, r := range rand.Perm(len(backupPositions)) { + pos := backupPositions[r] + testName := fmt.Sprintf("%s, %d records", pos, rowsPerPosition[pos]) + t.Run(testName, func(t *testing.T) { + restoreToPos, err := replication.DecodePosition(pos) + require.NoError(t, err) + TestReplicaRestoreToPos(t, 0, restoreToPos, "") + msgs := ReadRowsFromReplica(t, 0) + count, ok := rowsPerPosition[pos] + require.True(t, ok) + assert.Equalf(t, count, len(msgs), "messages: %v", msgs) + }) + } + } + t.Run("PITR", func(t *testing.T) { + testRestores(t) + }) + t.Run("remove full position backups", func(t *testing.T) { + // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + for _, backupName := range fromFullPositionBackups { + RemoveBackup(t, backupName) + } + }) + t.Run("PITR-2", func(t *testing.T) { + testRestores(t) + }) + }) +} + +// ExecTestIncrementalBackupAndRestoreToPos +func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + var lastInsertedRowTimestamp time.Time + insertRowOnPrimary := func(t *testing.T, hint string) { + InsertRowOnPrimary(t, hint) + lastInsertedRowTimestamp = time.Now() + } + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, &CompressionDetails{ + CompressorEngineName: "pgzip", + }) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + testedBackups := []testedBackupTimestampInfo{} + + var fullBackupPos replication.Position + t.Run("full backup", func(t *testing.T) { + insertRowOnPrimary(t, "before-full-backup") + waitForReplica(t, 0) + + manifest := TestReplicaFullBackup(t, 0) + fullBackupPos = manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + rows := ReadRowsFromReplica(t, 0) + testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rows), time.Now()}) + }) + + lastBackupPos := fullBackupPos + insertRowOnPrimary(t, "before-incremental-backups") + + tt := []struct { + name string + writeBeforeBackup bool + fromFullPosition bool + autoPosition bool + expectError string + }{ + { + name: "first incremental backup", + }, + { + name: "fail1", + expectError: "no binary logs to backup", + }, + { + name: "fail2", + expectError: "no binary logs to backup", + }, + { + name: "make writes, succeed", + writeBeforeBackup: true, + }, + { + name: "fail, no binary logs to backup", + expectError: "no binary logs to backup", + }, + { + name: "make writes again, succeed", + writeBeforeBackup: true, + }, + { + name: "auto position, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "fail auto position, no binary logs to backup", + autoPosition: true, + expectError: "no binary logs to backup", + }, + { + name: "auto position, make writes again, succeed", + writeBeforeBackup: true, + autoPosition: true, + }, + { + name: "from full backup position", + fromFullPosition: true, + }, + } + var fromFullPositionBackups []string + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.writeBeforeBackup { + insertRowOnPrimary(t, "") + } + // we wait for >1 second because backups are written to a directory named after the current timestamp, + // in 1 second resolution. We want to avoid two backups that have the same pathname. Realistically this + // is only ever a problem in this end-to-end test, not in production. + // Also, we give the replica a chance to catch up. + time.Sleep(postWriteSleepDuration) + waitForReplica(t, 0) + rowsBeforeBackup := ReadRowsFromReplica(t, 0) + // configure --incremental-from-pos to either: + // - auto + // - explicit last backup pos + // - back in history to the original full backup + var incrementalFromPos replication.Position + if !tc.autoPosition { + incrementalFromPos = lastBackupPos + if tc.fromFullPosition { + incrementalFromPos = fullBackupPos + } + } + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + // We wish to mark the current post-backup timestamp. We will later on retore to this point in time. + // However, the restore is up to and _exclusive_ of the timestamp. So for test's sake, we sleep + // an extra few milliseconds just to ensure the timestamp we read is strictly after the backup time. + // This is basicaly to avoid weird flakiness in CI. + time.Sleep(gracefulPostBackupDuration) + testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rowsBeforeBackup), time.Now()}) + defer func() { + lastBackupPos = manifest.Position + }() + if tc.fromFullPosition { + fromFullPositionBackups = append(fromFullPositionBackups, backupName) + } + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + { + incrDetails := manifest.IncrementalDetails + require.NotNil(t, incrDetails) + require.NotEmpty(t, incrDetails.FirstTimestamp) + require.NotEmpty(t, incrDetails.FirstTimestampBinlog) + require.NotEmpty(t, incrDetails.LastTimestamp) + require.NotEmpty(t, incrDetails.LastTimestampBinlog) + require.GreaterOrEqual(t, incrDetails.LastTimestamp, incrDetails.FirstTimestamp) + + if tc.fromFullPosition { + require.Greater(t, incrDetails.LastTimestampBinlog, incrDetails.FirstTimestampBinlog) + } else { + // No binlog rotation + require.Equal(t, incrDetails.LastTimestampBinlog, incrDetails.FirstTimestampBinlog) + } + } + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, 0)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + if !incrementalFromPos.IsZero() { + expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + } + require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) + }) + } + + testRestores := func(t *testing.T) { + numFailedRestores := 0 + numSuccessfulRestores := 0 + for _, backupIndex := range rand.Perm(len(testedBackups)) { + testedBackup := testedBackups[backupIndex] + testName := fmt.Sprintf("backup num%v at %v, %v rows", backupIndex, mysqlctl.FormatRFC3339(testedBackup.postTimestamp), testedBackup.rows) + t.Run(testName, func(t *testing.T) { + expectError := "" + if testedBackup.postTimestamp.After(lastInsertedRowTimestamp) { + // The restore_to_timestamp value is beyond the last incremental + // There is no path to restore to this timestamp. + expectError = "no path found" + } + TestReplicaRestoreToTimestamp(t, testedBackup.postTimestamp, expectError) + if expectError == "" { + msgs := ReadRowsFromReplica(t, 0) + assert.Equalf(t, testedBackup.rows, len(msgs), "messages: %v", msgs) + numSuccessfulRestores++ + } else { + numFailedRestores++ + } + }) + } + // Integrity check for the test itself: ensure we have both successful and failed restores. + require.NotZero(t, numFailedRestores) + require.NotZero(t, numSuccessfulRestores) + } + t.Run("PITR", func(t *testing.T) { + testRestores(t) + }) + t.Run("remove full position backups", func(t *testing.T) { + // Delete the fromFullPosition backup(s), which leaves us with less restore options. Try again. + for _, backupName := range fromFullPositionBackups { + RemoveBackup(t, backupName) + } + }) + t.Run("PITR-2", func(t *testing.T) { + testRestores(t) + }) + }) +} + +// ExecTestIncrementalBackupOnTwoTablets runs a series of interleaved backups on two different replicas: full and incremental. +// Specifically, it's designed to test how incremental backups are taken by interleaved replicas, so that they successfully build on +// one another. +func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { + defer cluster.PanicHandler(t) + + t.Run(tcase.Name, func(t *testing.T) { + // setup cluster for the testing + code, err := LaunchCluster(tcase.SetupType, "xbstream", 0, tcase.ComprssDetails) + require.NoError(t, err, "setup failed with status code %d", code) + defer TearDownCluster() + + InitTestTable(t) + + rowsPerPosition := map[string]int{} + + recordRowsPerPosition := func(t *testing.T, replicaIndex int) { + pos := GetReplicaPosition(t, replicaIndex) + msgs := ReadRowsFromReplica(t, replicaIndex) + if _, ok := rowsPerPosition[pos]; !ok { + rowsPerPosition[pos] = len(msgs) + } + } + + var lastBackupPos replication.Position + InsertRowOnPrimary(t, "before-incremental-backups") + waitForReplica(t, 0) + waitForReplica(t, 1) + + tt := []struct { + name string + operationType int + replicaIndex int + expectError string + }{ + // The following tests run sequentially and build on top of previous results + { + name: "full1", + operationType: operationFullBackup, + }, + { + name: "incremental1", + operationType: operationIncrementalBackup, + }, + { + name: "restore1", + operationType: operationRestore, + }, + { + // Shows you can take an incremental restore when full & incremental backups were only ever executed on a different replica + name: "incremental2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "full2", + operationType: operationFullBackup, + replicaIndex: 1, + }, + { + // This incremental backup will use full2 as the base backup + name: "incremental2-after-full2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "restore2", + operationType: operationRestore, + replicaIndex: 1, + }, + // Begin a series of interleaved incremental backups + { + name: "incremental-replica1", + operationType: operationIncrementalBackup, + }, + { + name: "incremental-replica2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + { + name: "incremental-replica1", + operationType: operationIncrementalBackup, + }, + { + name: "incremental-replica2", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + // Done interleaved backups. + { + // Lose binary log data + name: "flush and purge 1", + operationType: operationFlushAndPurge, + replicaIndex: 0, + }, + { + // Fail to run incremental backup due to lost data + name: "incremental-replica1 failure", + operationType: operationIncrementalBackup, + expectError: "Required entries have been purged", + }, + { + // Lose binary log data + name: "flush and purge 2", + operationType: operationFlushAndPurge, + replicaIndex: 1, + }, + { + // Fail to run incremental backup due to lost data + name: "incremental-replica2 failure", + operationType: operationIncrementalBackup, + replicaIndex: 1, + expectError: "Required entries have been purged", + }, + { + // Since we've lost binlog data, incremental backups are no longer possible. The situation can be salvaged by running a full backup + name: "full1 after purge", + operationType: operationFullBackup, + }, + { + // Show that replica2 incremental backup is able to work based on the above full backup + name: "incremental-replica2 after purge and backup", + operationType: operationIncrementalBackup, + replicaIndex: 1, + }, + } + insertRowAndWait := func(t *testing.T, replicaIndex int, data string) { + t.Run("insert row and wait", func(t *testing.T) { + InsertRowOnPrimary(t, data) + time.Sleep(postWriteSleepDuration) + waitForReplica(t, replicaIndex) + recordRowsPerPosition(t, replicaIndex) + }) + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + insertRowAndWait(t, tc.replicaIndex, tc.name) + t.Run("running operation", func(t *testing.T) { + switch tc.operationType { + case operationFlushAndPurge: + FlushAndPurgeBinaryLogsOnReplica(t, tc.replicaIndex) + case operationFullBackup: + manifest := TestReplicaFullBackup(t, tc.replicaIndex) + fullBackupPos := manifest.Position + require.False(t, fullBackupPos.IsZero()) + // + msgs := ReadRowsFromReplica(t, tc.replicaIndex) + pos := replication.EncodePosition(fullBackupPos) + rowsPerPosition[pos] = len(msgs) + + lastBackupPos = fullBackupPos + case operationIncrementalBackup: + var incrementalFromPos replication.Position // keep zero, we will use "auto" + manifest, _ := TestReplicaIncrementalBackup(t, tc.replicaIndex, incrementalFromPos, tc.expectError) + if tc.expectError != "" { + return + } + defer func() { + lastBackupPos = manifest.Position + }() + require.False(t, manifest.FromPosition.IsZero()) + require.NotEqual(t, manifest.Position, manifest.FromPosition) + require.True(t, manifest.Position.GTIDSet.Union(manifest.PurgedPosition.GTIDSet).Contains(manifest.FromPosition.GTIDSet)) + + gtidPurgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, GetReplicaGtidPurged(t, tc.replicaIndex)) + require.NoError(t, err) + fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) + + require.True(t, lastBackupPos.GTIDSet.Contains(fromPositionIncludingPurged), "expected: %v to contain %v", lastBackupPos.GTIDSet, fromPositionIncludingPurged) + case operationRestore: + TestReplicaFullRestore(t, tc.replicaIndex, "") + // should return into replication stream + insertRowAndWait(t, tc.replicaIndex, "post-restore-check") + default: + require.FailNowf(t, "unknown operation type", "operation: %d", tc.operationType) + } + }) + }) + } + }) +} diff --git a/go/test/endtoend/backup/xtrabackup/xtrabackup_test.go b/go/test/endtoend/backup/xtrabackup/xtrabackup_test.go index 972e829cad7..3402a170310 100644 --- a/go/test/endtoend/backup/xtrabackup/xtrabackup_test.go +++ b/go/test/endtoend/backup/xtrabackup/xtrabackup_test.go @@ -29,7 +29,7 @@ func TestXtrabackup(t *testing.T) { backup.TestBackup(t, backup.XtraBackup, "tar", 0, nil, nil) } -func TestXtrabackWithZstdCompression(t *testing.T) { +func TestXtrabackupWithZstdCompression(t *testing.T) { defer setDefaultCompressionFlag() cDetails := &backup.CompressionDetails{ CompressorEngineName: "zstd", @@ -41,9 +41,38 @@ func TestXtrabackWithZstdCompression(t *testing.T) { backup.TestBackup(t, backup.XtraBackup, "tar", 0, cDetails, []string{"TestReplicaBackup"}) } +func TestXtrabackupWithExternalZstdCompression(t *testing.T) { + defer setDefaultCompressionFlag() + cDetails := &backup.CompressionDetails{ + CompressorEngineName: "external", + ExternalCompressorCmd: "zstd", + ExternalCompressorExt: ".zst", + ExternalDecompressorCmd: "zstd -d", + } + + backup.TestBackup(t, backup.XtraBackup, "tar", 0, cDetails, []string{"TestReplicaBackup"}) +} + +func TestXtrabackupWithExternalZstdCompressionAndManifestedDecompressor(t *testing.T) { + defer setDefaultCompressionFlag() + cDetails := &backup.CompressionDetails{ + CompressorEngineName: "external", + ExternalCompressorCmd: "zstd", + ExternalCompressorExt: ".zst", + ManifestExternalDecompressorCmd: "zstd -d", + } + + backup.TestBackup(t, backup.XtraBackup, "tar", 0, cDetails, []string{"TestReplicaBackup"}) +} + +func TestDoNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { + backup.TestBackup(t, backup.XtraBackup, "xbstream", 0, nil, []string{"DoNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup"}) +} + func setDefaultCompressionFlag() { mysqlctl.CompressionEngineName = "pgzip" mysqlctl.ExternalCompressorCmd = "" mysqlctl.ExternalCompressorExt = "" mysqlctl.ExternalDecompressorCmd = "" + mysqlctl.ManifestExternalDecompressorCmd = "" } diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index 47f3108e6c7..9c2a29d2eb1 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -28,6 +28,7 @@ import ( "os" "os/exec" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -132,7 +133,11 @@ func TestMain(m *testing.M) { var mysqlProcs []*exec.Cmd for _, tablet := range []*cluster.Vttablet{shard1Primary, shard1Replica, shard1Rdonly, shard2Primary, shard2Replica, shard2Rdonly} { - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctlProcess = *mysqlctlProcess tablet.VttabletProcess = cluster.VttabletProcessInstance(tablet.HTTPPort, tablet.GrpcPort, tablet.TabletUID, @@ -339,12 +344,9 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { func waitTillAllTabletsAreHealthyInVtgate(t *testing.T, vtgateInstance cluster.VtgateProcess, shards ...string) { for _, shard := range shards { - err := vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1) - require.Nil(t, err) - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1) - require.Nil(t, err) - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1) - require.Nil(t, err) + require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shard), 1, 30*time.Second)) + require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shard), 1, 30*time.Second)) + require.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspaceName, shard), 1, 30*time.Second)) } } diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 82c667f95ae..46efd8dc974 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -36,18 +36,21 @@ import ( "testing" "time" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/filelock" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vtgateconn" "vitess.io/vitess/go/vt/vttablet/tabletconn" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" // Ensure dialers are registered (needed by ExecOnTablet and ExecOnVTGate). _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" @@ -58,6 +61,7 @@ import ( const ( DefaultCell = "zone1" DefaultStartPort = 6700 + DefaultVttestEnv = "VTTEST=endtoend" ) var ( @@ -142,15 +146,15 @@ type Vttablet struct { MysqlctlProcess MysqlctlProcess MysqlctldProcess MysqlctldProcess VttabletProcess *VttabletProcess - VtgrProcess *VtgrProcess } // Keyspace : Cluster accepts keyspace to launch it type Keyspace struct { - Name string - SchemaSQL string - VSchema string - Shards []Shard + Name string + SchemaSQL string + VSchema string + SidecarDBName string + Shards []Shard } // Shard with associated vttablets @@ -229,8 +233,8 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) { } } + cluster.VtctlProcess = *VtctlProcessInstance(cluster.TopoProcess.Port, cluster.Hostname) if !cluster.ReusingVTDATAROOT { - cluster.VtctlProcess = *VtctlProcessInstance(cluster.TopoProcess.Port, cluster.Hostname) if err = cluster.VtctlProcess.AddCellInfo(cluster.Cell); err != nil { log.Error(err) return @@ -248,6 +252,7 @@ func (cluster *LocalProcessCluster) StartTopo() (err error) { } cluster.VtctlclientProcess = *VtctlClientProcessInstance("localhost", cluster.VtctldProcess.GrpcPort, cluster.TmpDirectory) + cluster.VtctldClientProcess = *VtctldClientProcessInstance("localhost", cluster.VtctldProcess.GrpcPort, cluster.TmpDirectory) return } @@ -324,16 +329,17 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames } log.Infof("Starting keyspace: %v", keyspace.Name) - if !cluster.ReusingVTDATAROOT { - _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name) + if keyspace.SidecarDBName == "" { + keyspace.SidecarDBName = sidecar.DefaultName } - var mysqlctlProcessList []*exec.Cmd + // Create the keyspace if it doesn't already exist. + _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName) for _, shardName := range shardNames { shard := &Shard{ Name: shardName, } log.Infof("Starting shard: %v", shardName) - mysqlctlProcessList = []*exec.Cmd{} + var mysqlctlProcessList []*exec.Cmd for i := 0; i < totalTabletsRequired; i++ { // instantiate vttablet object with reserved ports tabletUID := cluster.GetAndReserveTabletUID() @@ -352,7 +358,11 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames } // Start Mysqlctl process log.Infof("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort) - tablet.MysqlctlProcess = *MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) + mysqlctlProcess, err := MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) + if err != nil { + return err + } + tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { log.Errorf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err) @@ -471,9 +481,11 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard } log.Infof("Starting keyspace: %v", keyspace.Name) - if !cluster.ReusingVTDATAROOT { - _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name) + if keyspace.SidecarDBName == "" { + keyspace.SidecarDBName = sidecar.DefaultName } + // Create the keyspace if it doesn't already exist. + _ = cluster.VtctlProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName) var mysqlctlProcessList []*exec.Cmd for _, shardName := range shardNames { shard := &Shard{ @@ -499,7 +511,11 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard } // Start Mysqlctl process log.Infof("Starting mysqlctl for table uid %d, mysql port %d", tablet.TabletUID, tablet.MySQLPort) - tablet.MysqlctlProcess = *MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) + mysqlctlProcess, err := MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory, !cluster.ReusingVTDATAROOT) + if err != nil { + return err + } + tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { log.Errorf("error starting mysqlctl process: %v, %v", tablet.MysqlctldProcess, err) @@ -606,9 +622,13 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Shard) (err error) { log.Infof("Starting keyspace: %v", keyspace.Name) + if keyspace.SidecarDBName == "" { + keyspace.SidecarDBName = sidecar.DefaultName + } + if !cluster.ReusingVTDATAROOT { // Create Keyspace - err = cluster.VtctlProcess.CreateKeyspace(keyspace.Name) + err = cluster.VtctlProcess.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName) if err != nil { log.Error(err) return @@ -619,7 +639,11 @@ func (cluster *LocalProcessCluster) SetupCluster(keyspace *Keyspace, shards []Sh for _, shard := range shards { for _, tablet := range shard.Vttablets { // Setup MysqlctlProcess - tablet.MysqlctlProcess = *MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory) + mysqlctlProcess, err := MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, cluster.TmpDirectory) + if err != nil { + return err + } + tablet.MysqlctlProcess = *mysqlctlProcess // Setup VttabletProcess tablet.VttabletProcess = VttabletProcessInstance( tablet.HTTPPort, @@ -680,7 +704,7 @@ func (cluster *LocalProcessCluster) NewVtgateInstance() *VtgateProcess { cluster.Cell, cluster.Cell, cluster.Hostname, - "PRIMARY,REPLICA", + "PRIMARY", cluster.TopoProcess.Port, cluster.TmpDirectory, cluster.VtGateExtraArgs, @@ -703,7 +727,10 @@ func NewBareCluster(cell string, hostname string) *LocalProcessCluster { // path/to/whatever exists cluster.ReusingVTDATAROOT = true } else { - _ = createDirectory(cluster.CurrentVTDATAROOT, 0700) + err = createDirectory(cluster.CurrentVTDATAROOT, 0700) + if err != nil { + log.Fatal(err) + } } _ = os.Setenv("VTDATAROOT", cluster.CurrentVTDATAROOT) log.Infof("Created cluster on %s. ReusingVTDATAROOT=%v", cluster.CurrentVTDATAROOT, cluster.ReusingVTDATAROOT) @@ -783,14 +810,14 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error rdonlyTabletCount++ } } - if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1); err != nil { + if err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, 2*time.Minute); err != nil { return err } if replicaTabletCount > 0 { - err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount) + err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), replicaTabletCount, 2*time.Minute) } if rdonlyTabletCount > 0 { - err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount) + err = cluster.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), rdonlyTabletCount, 2*time.Minute) } if err != nil { return err @@ -800,6 +827,24 @@ func (cluster *LocalProcessCluster) WaitForTabletsToHealthyInVtgate() (err error return nil } +// WaitForVTGateAndVTTablets waits for as long as you like for the vtgate and any +// vttablets to be healthy. +func (cluster *LocalProcessCluster) WaitForVTGateAndVTTablets(howlong time.Duration) error { + timeout := time.After(howlong) + for { + select { + case <-timeout: + return vterrors.New(vtrpcpb.Code_CANCELED, "timed out waiting for cluster to become healthy") + default: + err := cluster.WaitForTabletsToHealthyInVtgate() + if err != nil { + continue + } + return nil + } + } +} + // ExecOnTablet executes a query on the local cluster Vttablet and returns the // result. func (cluster *LocalProcessCluster) ExecOnTablet(ctx context.Context, vttablet *Vttablet, sql string, binds map[string]any, opts *querypb.ExecuteOptions) (*sqltypes.Result, error) { @@ -808,7 +853,7 @@ func (cluster *LocalProcessCluster) ExecOnTablet(ctx context.Context, vttablet * return nil, err } - tablet, err := cluster.vtctlclientGetTablet(vttablet) + tablet, err := cluster.VtctlclientGetTablet(vttablet) if err != nil { return nil, err } @@ -851,7 +896,7 @@ func (cluster *LocalProcessCluster) ExecOnVTGate(ctx context.Context, addr strin // returns the responses. It returns an error if the stream ends with fewer than // `count` responses. func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vttablet *Vttablet, count int) (responses []*querypb.StreamHealthResponse, err error) { - tablet, err := cluster.vtctlclientGetTablet(vttablet) + tablet, err := cluster.VtctlclientGetTablet(vttablet) if err != nil { return nil, err } @@ -883,7 +928,47 @@ func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vtta return responses, nil } -func (cluster *LocalProcessCluster) vtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) { +// StreamTabletHealthUntil invokes a HealthStream on a local cluster Vttablet and +// returns the responses. It waits until a certain condition is met. The amount of time to wait is an input that it takes. +func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, vttablet *Vttablet, timeout time.Duration, condition func(shr *querypb.StreamHealthResponse) bool) error { + tablet, err := cluster.VtctlclientGetTablet(vttablet) + if err != nil { + return err + } + + conn, err := tabletconn.GetDialer()(tablet, grpcclient.FailFast(false)) + if err != nil { + return err + } + + conditionSuccess := false + timeoutExceeded := false + go func() { + time.Sleep(timeout) + timeoutExceeded = true + }() + + err = conn.StreamHealth(ctx, func(shr *querypb.StreamHealthResponse) error { + if condition(shr) { + conditionSuccess = true + } + if timeoutExceeded || conditionSuccess { + return io.EOF + } + return nil + }) + + if conditionSuccess { + return nil + } + + if timeoutExceeded { + return errors.New("timeout exceed while waiting for the condition in StreamHealth") + } + return err +} + +func (cluster *LocalProcessCluster) VtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) { result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", "--", tablet.Alias) if err != nil { return nil, err @@ -1158,19 +1243,6 @@ func (cluster *LocalProcessCluster) NewVTOrcProcess(config VTOrcConfiguration) * } } -// NewVtgrProcess creates a new VtgrProcess object -func (cluster *LocalProcessCluster) NewVtgrProcess(clusters []string, config string, grPort int) *VtgrProcess { - base := VtctlProcessInstance(cluster.TopoProcess.Port, cluster.Hostname) - base.Binary = "vtgr" - return &VtgrProcess{ - VtctlProcess: *base, - LogDir: cluster.TmpDirectory, - clusters: clusters, - config: config, - grPort: grPort, - } -} - // VtprocessInstanceFromVttablet creates a new vttablet object func (cluster *LocalProcessCluster) VtprocessInstanceFromVttablet(tablet *Vttablet, shardName string, ksName string) *VttabletProcess { return VttabletProcessInstance( @@ -1190,8 +1262,16 @@ func (cluster *LocalProcessCluster) VtprocessInstanceFromVttablet(tablet *Vttabl } // StartVttablet starts a new tablet -func (cluster *LocalProcessCluster) StartVttablet(tablet *Vttablet, servingStatus string, - supportBackup bool, cell string, keyspaceName string, hostname string, shardName string) error { +func (cluster *LocalProcessCluster) StartVttablet( + tablet *Vttablet, + explicitServingStatus bool, + servingStatus string, + supportBackup bool, + cell string, + keyspaceName string, + hostname string, + shardName string, +) error { tablet.VttabletProcess = VttabletProcessInstance( tablet.HTTPPort, tablet.GrpcPort, @@ -1209,6 +1289,7 @@ func (cluster *LocalProcessCluster) StartVttablet(tablet *Vttablet, servingStatu tablet.VttabletProcess.SupportsBackup = supportBackup tablet.VttabletProcess.ServingStatus = servingStatus + tablet.VttabletProcess.ExplicitServingStatus = explicitServingStatus return tablet.VttabletProcess.Setup() } @@ -1267,3 +1348,19 @@ func (cluster *LocalProcessCluster) EnableVTOrcRecoveries(t *testing.T) { vtorc.EnableGlobalRecoveries(t) } } + +// EnableGeneralLog enables generals logs on all the mysql server started by this cluster. +// This method should be used only for local debugging purpose. +func (cluster *LocalProcessCluster) EnableGeneralLog() error { + for _, ks := range cluster.Keyspaces { + for _, shard := range ks.Shards { + for _, vttablet := range shard.Vttablets { + _, err := vttablet.VttabletProcess.QueryTablet("set global general_log = 1", "", false) + if err != nil { + return err + } + } + } + } + return nil +} diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index ea2dd0d7e20..0e3cc2d0c95 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -26,6 +26,11 @@ import ( "testing" "time" + "google.golang.org/grpc" + + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" + "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -43,7 +48,7 @@ var ( dbCredentialFile string InsertTabletTemplateKsID = `insert into %s (id, msg) values (%d, '%s') /* id:%d */` defaultOperationTimeout = 60 * time.Second - defeaultRetryDelay = 1 * time.Second + defaultRetryDelay = 1 * time.Second ) // Restart restarts vttablet and mysql. @@ -54,15 +59,17 @@ func (tablet *Vttablet) Restart() error { if tablet.MysqlctlProcess.TabletUID > 0 { tablet.MysqlctlProcess.Stop() + tablet.MysqlctldProcess.WaitForMysqlCtldShutdown() tablet.VttabletProcess.TearDown() - os.RemoveAll(tablet.VttabletProcess.Directory) + tablet.MysqlctldProcess.CleanupFiles(tablet.TabletUID) return tablet.MysqlctlProcess.Start() } tablet.MysqlctldProcess.Stop() + tablet.MysqlctldProcess.WaitForMysqlCtldShutdown() tablet.VttabletProcess.TearDown() - os.RemoveAll(tablet.VttabletProcess.Directory) + tablet.MysqlctldProcess.CleanupFiles(tablet.TabletUID) return tablet.MysqlctldProcess.Start() } @@ -179,12 +186,22 @@ func getTablet(tabletGrpcPort int, hostname string) *topodatapb.Tablet { func filterResultForWarning(input string) string { lines := strings.Split(input, "\n") var result string - for _, line := range lines { + for i, line := range lines { if strings.Contains(line, "WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") { continue } - result = result + line + "\n" + + if strings.Contains(line, "Failed to read in config") && strings.Contains(line, `Config File "vtconfig" Not Found in`) { + continue + } + + result += line + + if i < len(lines)-1 { + result += "\n" + } } + return result } @@ -306,6 +323,7 @@ func GetPasswordUpdateSQL(localCluster *LocalProcessCluster) string { SET PASSWORD FOR 'vt_allprivs'@'localhost' = 'VtAllprivsPass'; SET PASSWORD FOR 'vt_repl'@'%' = 'VtReplPass'; SET PASSWORD FOR 'vt_filtered'@'localhost' = 'VtFilteredPass'; + SET PASSWORD FOR 'vt_appdebug'@'localhost' = 'VtDebugPass'; FLUSH PRIVILEGES; ` return pwdChangeCmd @@ -385,6 +403,20 @@ func WaitForTabletSetup(vtctlClientProcess *VtctlClientProcess, expectedTablets return fmt.Errorf("all %d tablet are not in expected state %s", expectedTablets, expectedStatus) } +// GetSidecarDBName returns the sidecar database name configured for +// the keyspace in the topo server. +func (cluster LocalProcessCluster) GetSidecarDBName(keyspace string) (string, error) { + res, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetKeyspace", keyspace) + if err != nil { + return "", err + } + sdbn, err := jsonparser.GetString([]byte(res), "sidecar_db_name") + if err != nil { + return "", err + } + return sdbn, nil +} + // WaitForHealthyShard waits for the given shard info record in the topo // server to list a tablet (alias and uid) as the primary serving tablet // for the shard. This is done using "vtctldclient GetShard" and parsing @@ -426,6 +458,16 @@ func WaitForHealthyShard(vtctldclient *VtctldClientProcess, keyspace, shard stri default: } - time.Sleep(defeaultRetryDelay) + time.Sleep(defaultRetryDelay) } } + +// DialVTGate returns a VTGate grpc connection. +func DialVTGate(ctx context.Context, name, addr, username, password string) (*vtgateconn.VTGateConn, error) { + clientCreds := &grpcclient.StaticAuthClientCreds{Username: username, Password: password} + creds := grpc.WithPerRPCCredentials(clientCreds) + dialerFunc := grpcvtgateconn.Dial(creds) + dialerName := name + vtgateconn.RegisterDialer(dialerName, dialerFunc) + return vtgateconn.DialProtocol(ctx, dialerName, addr) +} diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index eafc8f6b98f..b5e7cfb5a32 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -19,7 +19,6 @@ package cluster import ( "context" "fmt" - "html/template" "os" "os/exec" "path" @@ -28,8 +27,11 @@ import ( "syscall" "time" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/tlstest" ) @@ -146,6 +148,8 @@ ssl_key={{.ServerKey}} } } tmpProcess.Args = append(tmpProcess.Args, "start") + tmpProcess.Env = append(tmpProcess.Env, os.Environ()...) + tmpProcess.Env = append(tmpProcess.Env, DefaultVttestEnv) log.Infof("Starting mysqlctl with command: %v", tmpProcess.Args) return tmpProcess, tmpProcess.Start() } @@ -216,11 +220,7 @@ func (mysqlctl *MysqlctlProcess) BinaryLogsPath() string { // CleanupFiles clean the mysql files to make sure we can start the same process again func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) { - os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/data", tabletUID))) - os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/relay-logs", tabletUID))) - os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/tmp", tabletUID))) - os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/bin-logs", tabletUID))) - os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/innodb", tabletUID))) + os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", tabletUID))) } // Connect returns a new connection to the underlying MySQL server @@ -235,36 +235,65 @@ func (mysqlctl *MysqlctlProcess) Connect(ctx context.Context, username string) ( // MysqlCtlProcessInstanceOptionalInit returns a Mysqlctl handle for mysqlctl process // configured with the given Config. -func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) *MysqlctlProcess { +func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) (*MysqlctlProcess, error) { + initFile, err := getInitDBFileUsed() + if err != nil { + return nil, err + } mysqlctl := &MysqlctlProcess{ Name: "mysqlctl", Binary: "mysqlctl", LogDirectory: tmpDirectory, - InitDBFile: path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"), + InitDBFile: initFile, } mysqlctl.MySQLPort = mySQLPort mysqlctl.TabletUID = tabletUID mysqlctl.InitMysql = initMySQL mysqlctl.SecureTransport = false - return mysqlctl + return mysqlctl, nil +} + +func getInitDBFileUsed() (string, error) { + versionStr, err := mysqlctl.GetVersionString() + if err != nil { + return "", err + } + flavor, _, err := mysqlctl.ParseVersionString(versionStr) + if err != nil { + return "", err + } + if flavor == mysqlctl.FlavorMySQL || flavor == mysqlctl.FlavorPercona { + return path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"), nil + } + // Non-MySQL instances for example MariaDB, will use init_testserver_db.sql which does not contain super_read_only global variable. + // Even though MariaDB support is deprecated (https://github.com/vitessio/vitess/issues/9518) but we still support migration scenario. + return path.Join(os.Getenv("VTROOT"), "go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql"), nil } // MysqlCtlProcessInstance returns a Mysqlctl handle for mysqlctl process // configured with the given Config. -func MysqlCtlProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string) *MysqlctlProcess { +func MysqlCtlProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string) (*MysqlctlProcess, error) { return MysqlCtlProcessInstanceOptionalInit(tabletUID, mySQLPort, tmpDirectory, true) } // StartMySQL starts mysqlctl process func StartMySQL(ctx context.Context, tablet *Vttablet, username string, tmpDirectory string) error { - tablet.MysqlctlProcess = *MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, tmpDirectory) + mysqlctlProcess, err := MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, tmpDirectory) + if err != nil { + return err + } + tablet.MysqlctlProcess = *mysqlctlProcess return tablet.MysqlctlProcess.Start() } // StartMySQLAndGetConnection create a connection to tablet mysql func StartMySQLAndGetConnection(ctx context.Context, tablet *Vttablet, username string, tmpDirectory string) (*mysql.Conn, error) { - tablet.MysqlctlProcess = *MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, tmpDirectory) - err := tablet.MysqlctlProcess.Start() + mysqlctlProcess, err := MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, tmpDirectory) + if err != nil { + return nil, err + } + tablet.MysqlctlProcess = *mysqlctlProcess + err = tablet.MysqlctlProcess.Start() if err != nil { return nil, err } diff --git a/go/test/endtoend/cluster/mysqlctld_process.go b/go/test/endtoend/cluster/mysqlctld_process.go index d71f2e3b1c8..9a0f36e3918 100644 --- a/go/test/endtoend/cluster/mysqlctld_process.go +++ b/go/test/endtoend/cluster/mysqlctld_process.go @@ -43,17 +43,24 @@ type MysqlctldProcess struct { process *exec.Cmd exit chan error InitMysql bool + SocketFile string exitSignalReceived bool } // InitDb executes mysqlctld command to add cell info func (mysqlctld *MysqlctldProcess) InitDb() (err error) { - tmpProcess := exec.Command( - mysqlctld.Binary, + args := []string{ "--log_dir", mysqlctld.LogDirectory, "--tablet_uid", fmt.Sprintf("%d", mysqlctld.TabletUID), "--mysql_port", fmt.Sprintf("%d", mysqlctld.MySQLPort), "--init_db_sql_file", mysqlctld.InitDBFile, + } + if mysqlctld.SocketFile != "" { + args = append(args, "--socket_file", mysqlctld.SocketFile) + } + tmpProcess := exec.Command( + mysqlctld.Binary, + args..., ) return tmpProcess.Run() } @@ -64,11 +71,17 @@ func (mysqlctld *MysqlctldProcess) Start() error { return fmt.Errorf("process is already running") } _ = createDirectory(mysqlctld.LogDirectory, 0700) - tempProcess := exec.Command( - mysqlctld.Binary, + args := []string{ "--log_dir", mysqlctld.LogDirectory, "--tablet_uid", fmt.Sprintf("%d", mysqlctld.TabletUID), "--mysql_port", fmt.Sprintf("%d", mysqlctld.MySQLPort), + } + if mysqlctld.SocketFile != "" { + args = append(args, "--socket_file", mysqlctld.SocketFile) + } + tempProcess := exec.Command( + mysqlctld.Binary, + args..., ) tempProcess.Args = append(tempProcess.Args, mysqlctld.ExtraArgs...) @@ -82,6 +95,7 @@ func (mysqlctld *MysqlctldProcess) Start() error { tempProcess.Stderr = errFile tempProcess.Env = append(tempProcess.Env, os.Environ()...) + tempProcess.Env = append(tempProcess.Env, DefaultVttestEnv) tempProcess.Stdout = os.Stdout tempProcess.Stderr = os.Stderr @@ -144,17 +158,21 @@ func (mysqlctld *MysqlctldProcess) CleanupFiles(tabletUID int) { // MysqlCtldProcessInstance returns a Mysqlctld handle for mysqlctld process // configured with the given Config. -func MysqlCtldProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string) *MysqlctldProcess { +func MysqlCtldProcessInstance(tabletUID int, mySQLPort int, tmpDirectory string) (*MysqlctldProcess, error) { + initFile, err := getInitDBFileUsed() + if err != nil { + return nil, err + } mysqlctld := &MysqlctldProcess{ Name: "mysqlctld", Binary: "mysqlctld", LogDirectory: tmpDirectory, - InitDBFile: path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"), + InitDBFile: initFile, } mysqlctld.MySQLPort = mySQLPort mysqlctld.TabletUID = tabletUID mysqlctld.InitMysql = true - return mysqlctld + return mysqlctld, nil } // IsHealthy gives the health status of mysql. @@ -164,3 +182,24 @@ func (mysqlctld *MysqlctldProcess) IsHealthy() bool { _, err := mysql.Connect(context.Background(), ¶ms) return err == nil } + +// HasShutdown checks if the process has been set to nil +func (mysqlctld *MysqlctldProcess) hasShutdown() bool { + return mysqlctld.process == nil +} + +func (mysqlctld *MysqlctldProcess) WaitForMysqlCtldShutdown() bool { + tmr := time.NewTimer(defaultOperationTimeout) + defer tmr.Stop() + for { + if mysqlctld.hasShutdown() { + return true + } + select { + case <-tmr.C: + return false + default: + } + time.Sleep(defaultRetryDelay) + } +} diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go index 7326aa57a52..6a9ba1ec438 100644 --- a/go/test/endtoend/cluster/topo_process.go +++ b/go/test/endtoend/cluster/topo_process.go @@ -17,8 +17,10 @@ limitations under the License. package cluster import ( + "context" "encoding/json" "fmt" + "net" "net/http" "os" "os/exec" @@ -27,7 +29,10 @@ import ( "syscall" "time" + clientv3 "go.etcd.io/etcd/client/v3" + "vitess.io/vitess/go/vt/log" + vtopo "vitess.io/vitess/go/vt/topo" ) // TopoProcess is a generic handle for a running Topo service . @@ -44,6 +49,7 @@ type TopoProcess struct { VerifyURL string PeerURL string ZKPorts string + Client interface{} proc *exec.Cmd exit chan error @@ -57,10 +63,9 @@ func (topo *TopoProcess) Setup(topoFlavor string, cluster *LocalProcessCluster) case "consul": return topo.SetupConsul(cluster) default: - // We still rely on the etcd v2 API for things like mkdir. - // If this ENV var is not set then some tests may fail with etcd 3.4+ - // where the v2 API is disabled by default in both the client and server. - os.Setenv("ETCDCTL_API", "2") + // Override any inherited ETCDCTL_API env value to + // ensure that we use the v3 API and storage. + os.Setenv("ETCDCTL_API", "3") return topo.SetupEtcd() } } @@ -77,7 +82,6 @@ func (topo *TopoProcess) SetupEtcd() (err error) { "--initial-advertise-peer-urls", topo.PeerURL, "--listen-peer-urls", topo.PeerURL, "--initial-cluster", fmt.Sprintf("%s=%s", topo.Name, topo.PeerURL), - "--enable-v2=true", ) err = createDirectory(topo.DataDirectory, 0700) @@ -92,6 +96,7 @@ func (topo *TopoProcess) SetupEtcd() (err error) { topo.proc.Stderr = errFile topo.proc.Env = append(topo.proc.Env, os.Environ()...) + topo.proc.Env = append(topo.proc.Env, DefaultVttestEnv) log.Infof("Starting etcd with command: %v", strings.Join(topo.proc.Args, " ")) @@ -109,6 +114,14 @@ func (topo *TopoProcess) SetupEtcd() (err error) { timeout := time.Now().Add(60 * time.Second) for time.Now().Before(timeout) { if topo.IsHealthy() { + cli, cerr := clientv3.New(clientv3.Config{ + Endpoints: []string{net.JoinHostPort(topo.Host, fmt.Sprintf("%d", topo.Port))}, + DialTimeout: 5 * time.Second, + }) + if cerr != nil { + return err + } + topo.Client = cli return } select { @@ -125,7 +138,6 @@ func (topo *TopoProcess) SetupEtcd() (err error) { // SetupZookeeper spawns a new zookeeper topo service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error) { - host, err := os.Hostname() if err != nil { return @@ -171,7 +183,6 @@ type PortsInfo struct { // SetupConsul spawns a new consul service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { - topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port) _ = os.MkdirAll(topo.LogDirectory, os.ModePerm) @@ -247,8 +258,16 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { return fmt.Errorf("process '%s' timed out after 60s (err: %s)", topo.Binary, <-topo.exit) } -// TearDown shutdowns the running topo service +// TearDown shutdowns the running topo service. func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoot string, keepdata bool, topoFlavor string) error { + if topo.Client != nil { + switch cli := topo.Client.(type) { + case *clientv3.Client: + _ = cli.Close() + default: + log.Errorf("Unknown topo client type %T", cli) + } + } if topoFlavor == "zk2" { cmd := "shutdown" @@ -324,6 +343,9 @@ func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err er url := topo.VerifyURL + directory payload := strings.NewReader(`{"dir":"true"}`) if command == "mkdir" { + if *topoFlavor == "etcd2" { // No need to create the empty prefix keys in v3 + return nil + } req, _ := http.NewRequest("PUT", url, payload) req.Header.Add("content-type", "application/json") resp, err := http.DefaultClient.Do(req) @@ -332,6 +354,22 @@ func (topo *TopoProcess) ManageTopoDir(command string, directory string) (err er } return err } else if command == "rmdir" { + if *topoFlavor == "etcd2" { + if topo.Client == nil { + return fmt.Errorf("etcd client is not initialized") + } + cli, ok := topo.Client.(*clientv3.Client) + if !ok { + return fmt.Errorf("etcd client is invalid") + } + ctx, cancel := context.WithTimeout(context.Background(), vtopo.RemoteOperationTimeout) + defer cancel() + _, err = cli.Delete(ctx, directory, clientv3.WithPrefix()) + if err != nil { + return err + } + return nil + } req, _ := http.NewRequest("DELETE", url+"?dir=true", payload) resp, err := http.DefaultClient.Do(req) if err == nil { @@ -366,7 +404,7 @@ func TopoProcessInstance(port int, peerPort int, hostname string, flavor string, topo.ListenClientURL = fmt.Sprintf("http://%s:%d", topo.Host, topo.Port) topo.DataDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port)) topo.LogDirectory = path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("%s_%d", "topo", port), "logs") - topo.VerifyURL = fmt.Sprintf("http://%s:%d/v2/keys", topo.Host, topo.Port) + topo.VerifyURL = fmt.Sprintf("http://%s:%d/health", topo.Host, topo.Port) topo.PeerURL = fmt.Sprintf("http://%s:%d", hostname, peerPort) return topo } diff --git a/go/test/endtoend/cluster/vtbackup_process.go b/go/test/endtoend/cluster/vtbackup_process.go index b7beed67936..ba508e8d593 100644 --- a/go/test/endtoend/cluster/vtbackup_process.go +++ b/go/test/endtoend/cluster/vtbackup_process.go @@ -54,7 +54,6 @@ type VtbackupProcess struct { // Setup starts vtbackup process with required arguements func (vtbackup *VtbackupProcess) Setup() (err error) { - vtbackup.proc = exec.Command( vtbackup.Binary, "--topo_implementation", vtbackup.CommonArg.TopoImplementation, @@ -85,6 +84,7 @@ func (vtbackup *VtbackupProcess) Setup() (err error) { vtbackup.proc.Stdout = os.Stdout vtbackup.proc.Env = append(vtbackup.proc.Env, os.Environ()...) + vtbackup.proc.Env = append(vtbackup.proc.Env, DefaultVttestEnv) log.Infof("Running vtbackup with args: %v", strings.Join(vtbackup.proc.Args, " ")) err = vtbackup.proc.Run() diff --git a/go/test/endtoend/cluster/vtctl_process.go b/go/test/endtoend/cluster/vtctl_process.go index 16bf19ecd0e..9b3d1a5f4e1 100644 --- a/go/test/endtoend/cluster/vtctl_process.go +++ b/go/test/endtoend/cluster/vtctl_process.go @@ -60,8 +60,15 @@ func (vtctl *VtctlProcess) AddCellInfo(Cell string) (err error) { } // CreateKeyspace executes vtctl command to create keyspace -func (vtctl *VtctlProcess) CreateKeyspace(keyspace string) (err error) { - output, err := vtctl.ExecuteCommandWithOutput("CreateKeyspace", keyspace) +func (vtctl *VtctlProcess) CreateKeyspace(keyspace, sidecarDBName string) (err error) { + var output string + // For upgrade/downgrade tests where an older version is also used. + if vtctl.VtctlMajorVersion < 17 { + log.Errorf("CreateKeyspace does not support the --sidecar-db-name flag in vtctl version %d; ignoring...", vtctl.VtctlMajorVersion) + output, err = vtctl.ExecuteCommandWithOutput("CreateKeyspace", keyspace) + } else { + output, err = vtctl.ExecuteCommandWithOutput("CreateKeyspace", keyspace, "--sidecar-db-name", sidecarDBName) + } if err != nil { log.Errorf("CreateKeyspace returned err: %s, output: %s", err, output) } diff --git a/go/test/endtoend/cluster/vtctlclient_process.go b/go/test/endtoend/cluster/vtctlclient_process.go index 3c4941da2c2..0c5fb1bc8c2 100644 --- a/go/test/endtoend/cluster/vtctlclient_process.go +++ b/go/test/endtoend/cluster/vtctlclient_process.go @@ -44,6 +44,7 @@ type VtctlClientParams struct { MigrationContext string UUIDList string CallerID string + BatchSize int } // InitShardPrimary executes vtctlclient command to make specified tablet the primary for the shard. @@ -87,7 +88,9 @@ func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQ if params.UUIDList != "" { args = append(args, "--uuid_list", params.UUIDList) } - + if params.BatchSize > 0 { + args = append(args, "--batch_size", fmt.Sprintf("%d", params.BatchSize)) + } if params.CallerID != "" { args = append(args, "--caller_id", params.CallerID) } @@ -215,7 +218,7 @@ func (vtctlclient *VtctlClientProcess) ExecuteCommandWithOutput(args ...string) } time.Sleep(retryDelay) } - return filterResultWhenRunsForCoverage(resultStr), err + return filterResultForWarning(filterResultWhenRunsForCoverage(resultStr)), err } // VtctlClientProcessInstance returns a VtctlProcess handle for vtctlclient process diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go index 5e85f172ce1..b8f6cf240fc 100644 --- a/go/test/endtoend/cluster/vtctld_process.go +++ b/go/test/endtoend/cluster/vtctld_process.go @@ -74,6 +74,7 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) vtctld.proc.Stderr = errFile vtctld.proc.Env = append(vtctld.proc.Env, os.Environ()...) + vtctld.proc.Env = append(vtctld.proc.Env, DefaultVttestEnv) log.Infof("Starting vtctld with command: %v", strings.Join(vtctld.proc.Args, " ")) diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 55c4102835e..52e0f985680 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -92,3 +92,41 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str } return vtctldclient } + +// PlannedReparentShard executes vtctlclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, Shard string, alias string) (err error) { + output, err := vtctldclient.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", Keyspace, Shard), + "--new-primary", alias) + if err != nil { + log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + } + return err +} + +// CreateKeyspace executes the vtctl command to create a keyspace +func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sidecarDBName string) (err error) { + var output string + // For upgrade/downgrade tests where an older version is also used. + if vtctldclient.VtctldClientMajorVersion < 17 { + log.Errorf("CreateKeyspace does not support the --sidecar-db-name flag in vtctl version %d; ignoring...", vtctldclient.VtctldClientMajorVersion) + output, err = vtctldclient.ExecuteCommandWithOutput("CreateKeyspace", keyspaceName) + } else { + output, err = vtctldclient.ExecuteCommandWithOutput("CreateKeyspace", keyspaceName, "--sidecar-db-name", sidecarDBName) + } + if err != nil { + log.Errorf("CreateKeyspace returned err: %s, output: %s", err, output) + } + return err +} + +// OnlineDDLShowRecent responds with recent schema migration list +func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) { + return vtctldclient.ExecuteCommandWithOutput( + "OnlineDDL", + "show", + Keyspace, + "recent", + ) +} diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index 80e2d4cc4d7..48aecab7c1e 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -65,7 +65,7 @@ type VtgateProcess struct { exit chan error } -const defaultVtGatePlannerVersion = planbuilder.Gen4CompareV3 +const defaultVtGatePlannerVersion = planbuilder.Gen4 // Setup starts Vtgate process with required arguements func (vtgate *VtgateProcess) Setup() (err error) { @@ -127,6 +127,7 @@ func (vtgate *VtgateProcess) Setup() (err error) { vtgate.proc.Stderr = errFile vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) + vtgate.proc.Env = append(vtgate.proc.Env, DefaultVttestEnv) log.Infof("Running vtgate with command: %v", strings.Join(vtgate.proc.Args, " ")) @@ -201,11 +202,11 @@ func (vtgate *VtgateProcess) GetStatusForTabletOfShard(name string, endPointsCou // WaitForStatusOfTabletInShard function waits till status of a tablet in shard is 1 // endPointsCount: how many endpoints to wait for -func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int) error { +func (vtgate *VtgateProcess) WaitForStatusOfTabletInShard(name string, endPointsCount int, timeout time.Duration) error { log.Infof("Waiting for healthy status of %d %s tablets in cell %s", endPointsCount, name, vtgate.Cell) - timeout := time.Now().Add(30 * time.Second) - for time.Now().Before(timeout) { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { if vtgate.GetStatusForTabletOfShard(name, endPointsCount) { return nil } diff --git a/go/test/endtoend/cluster/vtgr_process.go b/go/test/endtoend/cluster/vtgr_process.go deleted file mode 100644 index 1960e469489..00000000000 --- a/go/test/endtoend/cluster/vtgr_process.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "fmt" - "os" - "os/exec" - "path" - "strings" - "syscall" - "time" - - "vitess.io/vitess/go/vt/log" -) - -// VtgrProcess represents the vtgr process -type VtgrProcess struct { - VtctlProcess - LogDir string - ExtraArgs []string - clusters []string - config string - grPort int - proc *exec.Cmd - exit chan error -} - -// Start starts vtgr process with required arguements -func (vtgr *VtgrProcess) Start(alias string) (err error) { - /* minimal command line arguments: - $ vtgr --topo_implementation etcd2 \ - --topo_global_server_address localhost:2379 \ - --topo_global_root /vitess/global \ - --clusters_to_watch ks/0 - */ - vtgr.proc = exec.Command( - vtgr.Binary, - "--topo_implementation", vtgr.TopoImplementation, - "--topo_global_server_address", vtgr.TopoGlobalAddress, - "--topo_global_root", vtgr.TopoGlobalRoot, - "--tablet_manager_protocol", "grpc", - "--scan_repair_timeout", "50s", - "--clusters_to_watch", strings.Join(vtgr.clusters, ","), - ) - if vtgr.config != "" { - vtgr.proc.Args = append(vtgr.proc.Args, fmt.Sprintf("--config=%s", vtgr.config)) - } - if vtgr.grPort != 0 { - vtgr.proc.Args = append(vtgr.proc.Args, fmt.Sprintf("--gr_port=%d", vtgr.grPort)) - } - vtgr.proc.Args = append(vtgr.proc.Args, vtgr.ExtraArgs...) - errFile, _ := os.Create(path.Join(vtgr.LogDir, fmt.Sprintf("vtgr-stderr-%v.txt", alias))) - vtgr.proc.Stderr = errFile - vtgr.proc.Env = append(vtgr.proc.Env, os.Environ()...) - log.Infof("Running vtgr with command: %v", strings.Join(vtgr.proc.Args, " ")) - err = vtgr.proc.Start() - if err != nil { - return - } - - vtgr.exit = make(chan error) - go func() { - if vtgr.proc != nil { - vtgr.exit <- vtgr.proc.Wait() - close(vtgr.exit) - } - }() - - return nil -} - -// TearDown shuts down the running vtgr service -func (vtgr *VtgrProcess) TearDown() error { - if vtgr.proc == nil || vtgr.exit == nil { - return nil - } - // Attempt graceful shutdown with SIGTERM first - _ = vtgr.proc.Process.Signal(syscall.SIGTERM) - - select { - case <-vtgr.exit: - vtgr.proc = nil - return nil - - case <-time.After(10 * time.Second): - vtgr.proc.Process.Kill() - err := <-vtgr.exit - vtgr.proc = nil - return err - } -} diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go index 882b716208a..28c6355a175 100644 --- a/go/test/endtoend/cluster/vtorc_process.go +++ b/go/test/endtoend/cluster/vtorc_process.go @@ -37,14 +37,15 @@ import ( // vtorc as a separate process for testing type VTOrcProcess struct { VtctlProcess - Port int - LogDir string - ExtraArgs []string - ConfigPath string - Config VTOrcConfiguration - WebPort int - proc *exec.Cmd - exit chan error + Port int + LogDir string + LogFileName string + ExtraArgs []string + ConfigPath string + Config VTOrcConfiguration + WebPort int + proc *exec.Cmd + exit chan error } type VTOrcConfiguration struct { @@ -55,6 +56,7 @@ type VTOrcConfiguration struct { MySQLReplicaUser string MySQLReplicaPassword string RecoveryPeriodBlockSeconds int + TopologyRefreshSeconds int `json:",omitempty"` PreventCrossDataCenterPrimaryFailover bool `json:",omitempty"` LockShardTimeoutSeconds int `json:",omitempty"` ReplicationLagQuery string `json:",omitempty"` @@ -124,10 +126,14 @@ func (orc *VTOrcProcess) Setup() (err error) { orc.proc.Args = append(orc.proc.Args, orc.ExtraArgs...) orc.proc.Args = append(orc.proc.Args, "--alsologtostderr") - errFile, _ := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-stderr-%d.txt", timeNow))) + if orc.LogFileName == "" { + orc.LogFileName = fmt.Sprintf("orc-stderr-%d.txt", timeNow) + } + errFile, _ := os.Create(path.Join(orc.LogDir, orc.LogFileName)) orc.proc.Stderr = errFile orc.proc.Env = append(orc.proc.Env, os.Environ()...) + orc.proc.Env = append(orc.proc.Env, DefaultVttestEnv) log.Infof("Running vtorc with command: %v", strings.Join(orc.proc.Args, " ")) diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index 943a8750fa3..3bacb71b154 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -35,11 +35,15 @@ import ( "testing" "time" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) +const vttabletStateTimeout = 60 * time.Second + // VttabletProcess is a generic handle for a running vttablet . // It can be spawned manually type VttabletProcess struct { @@ -67,6 +71,7 @@ type VttabletProcess struct { QueryzURL string StatusDetailsURL string SupportsBackup bool + ExplicitServingStatus bool ServingStatus string DbPassword string DbPort int @@ -75,7 +80,7 @@ type VttabletProcess struct { Charset string ConsolidationsURL string - //Extra Args to be set before starting the vttablet process + // Extra Args to be set before starting the vttablet process ExtraArgs []string proc *exec.Cmd @@ -84,7 +89,6 @@ type VttabletProcess struct { // Setup starts vttablet process with required arguements func (vttablet *VttabletProcess) Setup() (err error) { - vttablet.proc = exec.Command( vttablet.Binary, "--topo_implementation", vttablet.CommonArg.TopoImplementation, @@ -129,6 +133,7 @@ func (vttablet *VttabletProcess) Setup() (err error) { vttablet.proc.Stderr = errFile vttablet.proc.Env = append(vttablet.proc.Env, os.Environ()...) + vttablet.proc.Env = append(vttablet.proc.Env, DefaultVttestEnv) log.Infof("Running vttablet with command: %v", strings.Join(vttablet.proc.Args, " ")) @@ -146,7 +151,15 @@ func (vttablet *VttabletProcess) Setup() (err error) { }() if vttablet.ServingStatus != "" { - if err = vttablet.WaitForTabletStatus(vttablet.ServingStatus); err != nil { + // If the tablet has an explicit serving status we use the serving status + // otherwise we wait for any serving status to show up in the healthcheck. + var servingStatus []string + if vttablet.ExplicitServingStatus { + servingStatus = append(servingStatus, vttablet.ServingStatus) + } else { + servingStatus = append(servingStatus, "SERVING", "NOT_SERVING") + } + if err = vttablet.WaitForTabletStatuses(servingStatus); err != nil { errFileContent, _ := os.ReadFile(fname) if errFileContent != nil { log.Infof("vttablet error:\n%s\n", string(errFileContent)) @@ -268,19 +281,19 @@ func (vttablet *VttabletProcess) GetTabletType() string { return "" } -// WaitForTabletStatus waits for 10 second till expected status is reached +// WaitForTabletStatus waits for one of the expected statuses to be reached func (vttablet *VttabletProcess) WaitForTabletStatus(expectedStatus string) error { - return vttablet.WaitForTabletStatusesForTimeout([]string{expectedStatus}, 10*time.Second) + return vttablet.WaitForTabletStatusesForTimeout([]string{expectedStatus}, vttabletStateTimeout) } -// WaitForTabletStatuses waits for 10 second till one of expected statuses is reached +// WaitForTabletStatuses waits for one of expected statuses is reached func (vttablet *VttabletProcess) WaitForTabletStatuses(expectedStatuses []string) error { - return vttablet.WaitForTabletStatusesForTimeout(expectedStatuses, 10*time.Second) + return vttablet.WaitForTabletStatusesForTimeout(expectedStatuses, vttabletStateTimeout) } -// WaitForTabletTypes waits for 10 second till one of expected statuses is reached +// WaitForTabletTypes waits for one of expected statuses is reached func (vttablet *VttabletProcess) WaitForTabletTypes(expectedTypes []string) error { - return vttablet.WaitForTabletTypesForTimeout(expectedTypes, 10*time.Second) + return vttablet.WaitForTabletTypesForTimeout(expectedTypes, vttabletStateTimeout) } // WaitForTabletStatusesForTimeout waits till the tablet reaches to any of the provided statuses @@ -334,7 +347,7 @@ func contains(arr []string, str string) bool { // WaitForBinLogPlayerCount waits till binlog player count var matches func (vttablet *VttabletProcess) WaitForBinLogPlayerCount(expectedCount int) error { - timeout := time.Now().Add(10 * time.Second) + timeout := time.Now().Add(vttabletStateTimeout) for time.Now().Before(timeout) { if vttablet.getVReplStreamCount() == fmt.Sprintf("%d", expectedCount) { return nil @@ -351,19 +364,23 @@ func (vttablet *VttabletProcess) WaitForBinLogPlayerCount(expectedCount int) err // WaitForBinlogServerState wait for the tablet's binlog server to be in the provided state. func (vttablet *VttabletProcess) WaitForBinlogServerState(expectedStatus string) error { - timeout := time.Now().Add(10 * time.Second) - for time.Now().Before(timeout) { + ctx, cancel := context.WithTimeout(context.Background(), vttabletStateTimeout) + defer cancel() + t := time.NewTicker(300 * time.Millisecond) + defer t.Stop() + for { if vttablet.getVarValue("UpdateStreamState") == expectedStatus { return nil } select { case err := <-vttablet.exit: return fmt.Errorf("process '%s' exited prematurely (err: %s)", vttablet.Name, err) - default: - time.Sleep(300 * time.Millisecond) + case <-ctx.Done(): + return fmt.Errorf("vttablet %s, expected status of %s not reached before timeout of %v", + vttablet.TabletPath, expectedStatus, vttabletStateTimeout) + case <-t.C: } } - return fmt.Errorf("vttablet %s, expected status not reached", vttablet.TabletPath) } func (vttablet *VttabletProcess) getVReplStreamCount() string { @@ -376,9 +393,20 @@ func (vttablet *VttabletProcess) getVarValue(keyname string) string { return fmt.Sprintf("%v", object) } -// TearDown shuts down the running vttablet service and fails after 10 seconds +// TearDown shuts down the running vttablet service and fails after a timeout func (vttablet *VttabletProcess) TearDown() error { - return vttablet.TearDownWithTimeout(10 * time.Second) + return vttablet.TearDownWithTimeout(vttabletStateTimeout) +} + +// Kill shuts down the running vttablet service immediately. +func (vttablet *VttabletProcess) Kill() error { + if vttablet.proc == nil || vttablet.exit == nil { + return nil + } + vttablet.proc.Process.Kill() + err := <-vttablet.exit + vttablet.proc = nil + return err } // TearDownWithTimeout shuts down the running vttablet service and fails once the given @@ -396,10 +424,7 @@ func (vttablet *VttabletProcess) TearDownWithTimeout(timeout time.Duration) erro return nil case <-time.After(timeout): - vttablet.proc.Process.Kill() - err := <-vttablet.exit - vttablet.proc = nil - return err + return vttablet.Kill() } } @@ -502,11 +527,19 @@ func (vttablet *VttabletProcess) ToggleProfiling() error { } // WaitForVReplicationToCatchup waits for "workflow" to finish copying -func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, workflow, database string, duration time.Duration) { +func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, workflow, database string, sidecarDBName string, duration time.Duration) { + if sidecarDBName == "" { + sidecarDBName = sidecar.DefaultName + } + // Escape it if/as needed + ics := sqlparser.NewIdentifierCS(sidecarDBName) + sdbi := sqlparser.String(ics) queries := [3]string{ - fmt.Sprintf(`select count(*) from _vt.vreplication where workflow = "%s" and db_name = "%s" and pos = ''`, workflow, database), - "select count(*) from information_schema.tables where table_schema='_vt' and table_name='copy_state' limit 1;", - fmt.Sprintf(`select count(*) from _vt.copy_state where vrepl_id in (select id from _vt.vreplication where workflow = "%s" and db_name = "%s" )`, workflow, database), + sqlparser.BuildParsedQuery(`select count(*) from %s.vreplication where workflow = "%s" and db_name = "%s" and pos = ''`, + sdbi, workflow, database).Query, + sqlparser.BuildParsedQuery("select count(*) from information_schema.tables where table_schema='%s' and table_name='copy_state' limit 1", sidecarDBName).Query, + sqlparser.BuildParsedQuery(`select count(*) from %s.copy_state where vrepl_id in (select id from %s.vreplication where workflow = "%s" and db_name = "%s" )`, + sdbi, sdbi, workflow, database).Query, } results := [3]string{"[INT64(0)]", "[INT64(1)]", "[INT64(0)]"} diff --git a/go/test/endtoend/clustertest/etcd_test.go b/go/test/endtoend/clustertest/etcd_test.go index 1f5e548696f..5239d960c47 100644 --- a/go/test/endtoend/clustertest/etcd_test.go +++ b/go/test/endtoend/clustertest/etcd_test.go @@ -18,15 +18,46 @@ package clustertest import ( "fmt" + "net" "testing" + "time" + + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" "vitess.io/vitess/go/test/endtoend/cluster" ) func TestEtcdServer(t *testing.T) { defer cluster.PanicHandler(t) - etcdURL := fmt.Sprintf("http://%s:%d/v2/keys", clusterInstance.Hostname, clusterInstance.TopoPort) - testURL(t, etcdURL, "generic etcd url") - testURL(t, etcdURL+"/vitess/global", "vitess global key") - testURL(t, etcdURL+"/vitess/zone1", "vitess zone1 key") + + // Confirm the basic etcd cluster health. + etcdHealthURL := fmt.Sprintf("http://%s:%d/health", clusterInstance.Hostname, clusterInstance.TopoPort) + testURL(t, etcdHealthURL, "generic etcd health url") + + // Confirm that we have a working topo server by looking for some + // expected keys. + etcdClientOptions := []clientv3.OpOption{ + clientv3.WithPrefix(), + clientv3.WithKeysOnly(), + clientv3.WithLimit(1), + } + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{net.JoinHostPort(clusterInstance.TopoProcess.Host, fmt.Sprintf("%d", clusterInstance.TopoProcess.Port))}, + DialTimeout: 5 * time.Second, + }) + require.NoError(t, err) + defer cli.Close() + keyPrefixes := []string{ + // At a minimum, this prefix confirms that we have a functioning + // global topo server with a valid cell from the test env. + fmt.Sprintf("/vitess/global/cells/%s", cell), + } + for _, keyPrefix := range keyPrefixes { + res, err := cli.Get(cli.Ctx(), keyPrefix, etcdClientOptions...) + require.NoError(t, err) + require.NotNil(t, res) + // Confirm that we have at least one key matching the prefix. + require.Greaterf(t, len(res.Kvs), 0, "no keys found matching prefix: %s", keyPrefix) + } } diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index 0ba4af1ee41..45643d869b1 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -123,7 +123,7 @@ func testTabletStatus(t *testing.T) { require.NoError(t, err) result := string(respByte) log.Infof("Tablet status response: %v", result) - assert.True(t, strings.Contains(result, `Alias:
\n", status) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() } t.Run("postponed revert", func(t *testing.T) { testPostponedRevert(t, schema.OnlineDDLStatusRunning) diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 7bed08c71ca..e471931a20c 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -35,9 +35,11 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -166,6 +168,38 @@ func TestParseTableName(t *testing.T) { } } +func waitForReadyToComplete(t *testing.T, uuid string, expected bool) { + ctx, cancel := context.WithTimeout(context.Background(), normalWaitTime) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + readyToComplete := row.AsInt64("ready_to_complete", 0) + if expected == (readyToComplete > 0) { + // all good. This is what we waited for + if expected { + // if migration is ready to complete, the nthe timestamp should be non-null + assert.False(t, row["ready_to_complete_timestamp"].IsNull()) + } else { + assert.True(t, row["ready_to_complete_timestamp"].IsNull()) + } + + return + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + } + require.NoError(t, ctx.Err()) + } +} + func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) flag.Parse() @@ -183,12 +217,10 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1"} + "--schema_change_check_interval", "1s", + } clusterInstance.VtTabletExtraArgs = []string{ - "--enable-lag-throttler", - "--throttle_threshold", "1s", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--watch_replication_stream", @@ -233,6 +265,9 @@ func TestMain(m *testing.M) { } func TestSchemaChange(t *testing.T) { + + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + t.Run("scheduler", testScheduler) t.Run("singleton", testSingleton) t.Run("declarative", testDeclarative) @@ -240,6 +275,9 @@ func TestSchemaChange(t *testing.T) { t.Run("summary: validate sequential migration IDs", func(t *testing.T) { onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) }) + t.Run("summary: validate completed_timestamp", func(t *testing.T) { + onlineddl.ValidateCompletedTimestamp(t, &vtParams) + }) } func testScheduler(t *testing.T) { @@ -499,7 +537,7 @@ func testScheduler(t *testing.T) { testTableSequentialTimes(t, t1uuid, t2uuid) }) - t.Run("ALTER both tables, elligible for concurrenct", func(t *testing.T) { + t.Run("ALTER both tables, elligible for concurrent", func(t *testing.T) { // ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", "", true)) // skip wait @@ -534,9 +572,11 @@ func testScheduler(t *testing.T) { }) testTableCompletionTimes(t, t2uuid, t1uuid) }) - t.Run("ALTER both tables, elligible for concurrenct, with throttling", func(t *testing.T) { + t.Run("ALTER both tables, elligible for concurrent, with throttling", func(t *testing.T) { onlineddl.ThrottleAllMigrations(t, &vtParams) defer onlineddl.UnthrottleAllMigrations(t, &vtParams) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + // ALTER TABLE is allowed to run concurrently when no other ALTER is busy with copy state. Our tables are tiny so we expect to find both migrations running t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", "", true)) // skip wait @@ -553,6 +593,12 @@ func testScheduler(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning) onlineddl.CheckMigrationStatus(t, &vtParams, shards, t2uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) }) + + t.Run("check ready to complete (before)", func(t *testing.T) { + for _, uuid := range []string{t1uuid, t2uuid} { + waitForReadyToComplete(t, uuid, false) + } + }) t.Run("unthrottle, expect t2 running", func(t *testing.T) { onlineddl.UnthrottleAllMigrations(t, &vtParams) // t1 should now be ready_to_complete, hence t2 should start running @@ -580,11 +626,19 @@ func testScheduler(t *testing.T) { fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) }) + t.Run("check ready to complete (after)", func(t *testing.T) { + for _, uuid := range []string{t1uuid, t2uuid} { + waitForReadyToComplete(t, uuid, true) + } + }) + testTableCompletionTimes(t, t2uuid, t1uuid) }) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) + t.Run("REVERT both tables concurrent, postponed", func(t *testing.T) { - t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true)) - t2uuid = testRevertMigration(t, createRevertParams(t2uuid, ddlStrategy+" -allow-concurrent -postpone-completion", "vtgate", "", true)) + t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true)) + t2uuid = testRevertMigration(t, createRevertParams(t2uuid, ddlStrategy+" --allow-concurrent --postpone-completion", "vtgate", "", true)) testAllowConcurrent(t, "t1", t1uuid, 1) t.Run("expect both migrations to run", func(t *testing.T) { @@ -592,12 +646,7 @@ func testScheduler(t *testing.T) { onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t2uuid, normalWaitTime, schema.OnlineDDLStatusRunning) }) t.Run("test ready-to-complete", func(t *testing.T) { - rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - readyToComplete := row.AsInt64("ready_to_complete", 0) - assert.Equal(t, int64(1), readyToComplete) - } + waitForReadyToComplete(t, t1uuid, true) }) t.Run("complete t2", func(t *testing.T) { // now that both are running, let's unblock t2. We expect it to complete. @@ -735,12 +784,7 @@ func testScheduler(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, drop1uuid, schema.OnlineDDLStatusReady) }) t.Run("t3 ready to complete", func(t *testing.T) { - rs := onlineddl.ReadMigrations(t, &vtParams, drop1uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - readyToComplete := row.AsInt64("ready_to_complete", 0) - assert.Equal(t, int64(1), readyToComplete) - } + waitForReadyToComplete(t, drop1uuid, true) }) t.Run("t3drop complete", func(t *testing.T) { // drop3 migration should not block. It can run concurrently to t1, and does not conflict @@ -840,6 +884,60 @@ func testScheduler(t *testing.T) { }) }) + t.Run("Cleanup artifacts", func(t *testing.T) { + // Create a migration with a low --retain-artifacts value. + // We will cancel the migration and expect the artifact to be cleaned. + t.Run("start migration", func(t *testing.T) { + t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion --retain-artifacts=1s", "vtctl", "", "", true)) // skip wait + onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + }) + var artifacts []string + t.Run("validate artifact exists", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + + artifacts = textutil.SplitDelimitedList(row.AsString("artifacts", "")) + assert.NotEmpty(t, artifacts) + assert.Equal(t, 1, len(artifacts)) + checkTable(t, artifacts[0], true) + + retainArtifactsSeconds := row.AsInt64("retain_artifacts_seconds", 0) + assert.Equal(t, int64(1), retainArtifactsSeconds) // due to --retain-artifacts=1s + }) + t.Run("cancel migration", func(t *testing.T) { + onlineddl.CheckCancelMigration(t, &vtParams, shards, t1uuid, true) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusCancelled) + }) + t.Run("wait for cleanup", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), normalWaitTime) + defer cancel() + + for { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + if !row["cleanup_timestamp"].IsNull() { + // This is what we've been waiting for + break + } + select { + case <-ctx.Done(): + assert.Fail(t, "timeout waiting for cleanup") + return + case <-time.After(time.Second): + } + } + }) + t.Run("validate artifact does not exist", func(t *testing.T) { + checkTable(t, artifacts[0], false) + }) + }) + // INSTANT DDL instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability) require.NoError(t, err) diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index bc886a7a83f..49e72eda290 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -19,7 +19,6 @@ package vrepl import ( "flag" "fmt" - "io" "os" "path" "strings" @@ -27,24 +26,24 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) var ( - clusterInstance *cluster.LocalProcessCluster - shards []cluster.Shard - vtParams mysql.ConnParams - httpClient = throttlebase.SetupHTTPClient(time.Second) - onlineDDLThrottlerAppName = "online-ddl" - vstreamerThrottlerAppName = "vstreamer" + clusterInstance *cluster.LocalProcessCluster + shards []cluster.Shard + vtParams mysql.ConnParams normalMigrationWait = 45 * time.Second extendedMigrationWait = 60 * time.Second @@ -150,6 +149,12 @@ var ( ` ) +const ( + customThreshold = 5 + throttlerEnabledTimeout = 60 * time.Second + noCustomQuery = "" +) + func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) flag.Parse() @@ -167,12 +172,10 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1", + "--schema_change_check_interval", "1s", } clusterInstance.VtTabletExtraArgs = []string{ - "--throttler-config-via-topo", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", @@ -194,7 +197,6 @@ func TestMain(m *testing.M) { if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1, err } - vtgateInstance := clusterInstance.NewVtgateInstance() // Start vtgate if err := vtgateInstance.Setup(); err != nil { @@ -218,29 +220,6 @@ func TestMain(m *testing.M) { } -// direct per-tablet throttler API instruction -func throttleResponse(tablet *cluster.Vttablet, path string) (respBody string, err error) { - apiURL := fmt.Sprintf("http://%s:%d/%s", tablet.VttabletProcess.TabletHostname, tablet.HTTPPort, path) - resp, err := httpClient.Get(apiURL) - if err != nil { - return "", err - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - respBody = string(b) - return respBody, err -} - -// direct per-tablet throttler API instruction -func throttleApp(tablet *cluster.Vttablet, app string) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app)) -} - -// direct per-tablet throttler API instruction -func unthrottleApp(tablet *cluster.Vttablet, app string) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app)) -} - func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) @@ -259,16 +238,34 @@ func TestSchemaChange(t *testing.T) { err := clusterInstance.WaitForTabletsToHealthyInVtgate() require.NoError(t, err) - _, err = onlineddl.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, "", false) - require.NoError(t, err) + t.Run("WaitForSrvKeyspace", func(t *testing.T) { + for _, ks := range clusterInstance.Keyspaces { + t.Run(ks.Name, func(t *testing.T) { + err := throttler.WaitForSrvKeyspace(clusterInstance, cell, ks.Name) + require.NoError(t, err) + }) + } + }) + t.Run("updating throttler config", func(t *testing.T) { + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, noCustomQuery, nil) + require.NoError(t, err) + }) - for _, ks := range clusterInstance.Keyspaces { - for _, shard := range ks.Shards { - for _, tablet := range shard.Vttablets { - onlineddl.WaitForThrottlerStatusEnabled(t, tablet, extendedMigrationWait) - } + t.Run("checking throttler config", func(t *testing.T) { + for _, ks := range clusterInstance.Keyspaces { + t.Run(ks.Name, func(t *testing.T) { + for _, shard := range ks.Shards { + t.Run(shard.Name, func(t *testing.T) { + for _, tablet := range shard.Vttablets { + t.Run(tablet.Alias, func(t *testing.T) { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: customThreshold}, throttlerEnabledTimeout) + }) + } + }) + } + }) } - } + }) testWithInitialSchema(t) t.Run("alter non_online", func(t *testing.T) { @@ -383,7 +380,7 @@ func TestSchemaChange(t *testing.T) { // begin throttling: onlineddl.ThrottleAllMigrations(t, &vtParams) defer onlineddl.UnthrottleAllMigrations(t, &vtParams) - onlineddl.CheckThrottledApps(t, &vtParams, onlineDDLThrottlerAppName, true) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) @@ -398,11 +395,11 @@ func TestSchemaChange(t *testing.T) { // to be strictly higher than started_timestamp assert.GreaterOrEqual(t, lastThrottledTimestamp, startedTimestamp) component := row.AsString("component_throttled", "") - assert.Contains(t, []string{string(vreplication.VCopierComponentName), string(vreplication.VPlayerComponentName)}, component) + assert.Contains(t, []string{throttlerapp.VCopierName.String(), throttlerapp.VPlayerName.String()}, component) // unthrottle onlineddl.UnthrottleAllMigrations(t, &vtParams) - onlineddl.CheckThrottledApps(t, &vtParams, onlineDDLThrottlerAppName, false) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) @@ -414,18 +411,9 @@ func TestSchemaChange(t *testing.T) { var uuid string func() { - for _, shard := range shards { - // technically we only need to throttle on a REPLICA, because that's the - // vstreamer source; but it's OK to be on the safe side and throttle on all tablets. Doesn't - // change the essence of this test. - for _, tablet := range shard.Vttablets { - body, err := throttleApp(tablet, vstreamerThrottlerAppName) - defer unthrottleApp(tablet, vstreamerThrottlerAppName) - - assert.NoError(t, err) - assert.Contains(t, body, vstreamerThrottlerAppName) - } - } + _, err := throttler.ThrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.VStreamerName) + defer throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.VStreamerName) + require.NoError(t, err) uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) @@ -436,10 +424,18 @@ func TestSchemaChange(t *testing.T) { // to _vt.schema_migrations row, startedTimestamp, lastThrottledTimestamp := onlineddl.WaitForThrottledTimestamp(t, &vtParams, uuid, normalMigrationWait) require.NotNil(t, row) + + startedTime, err := time.Parse(sqltypes.TimestampFormat, startedTimestamp) + require.NoError(t, err) + lastThrottledTime, err := time.Parse(sqltypes.TimestampFormat, lastThrottledTimestamp) + require.NoError(t, err) + // rowstreamer throttle timestamp only updates once in 10 seconds, so greater or equals" is good enough here. - assert.GreaterOrEqual(t, lastThrottledTimestamp, startedTimestamp) + // Technically, lastThrottledTime has to be >= startedTime, but we allow a deviation of 1 sec due to + // clock irregularities + assert.GreaterOrEqual(t, lastThrottledTime.Add(time.Second), startedTime) component := row.AsString("component_throttled", "") - assert.Contains(t, []string{string(vreplication.VStreamerComponentName), string(vreplication.RowStreamerComponentName)}, component) + assert.Contains(t, []string{throttlerapp.VStreamerName.String(), throttlerapp.RowStreamerName.String()}, component) }() // now unthrottled status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) @@ -468,7 +464,7 @@ func TestSchemaChange(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` onlineddl.ThrottleAllMigrations(t, &vtParams) defer onlineddl.UnthrottleAllMigrations(t, &vtParams) - onlineddl.CheckThrottledApps(t, &vtParams, onlineDDLThrottlerAppName, true) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) // spawn n migrations; cancel them via cancel-all var wg sync.WaitGroup @@ -487,7 +483,7 @@ func TestSchemaChange(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` onlineddl.ThrottleAllMigrations(t, &vtParams) defer onlineddl.UnthrottleAllMigrations(t, &vtParams) - onlineddl.CheckThrottledApps(t, &vtParams, onlineDDLThrottlerAppName, true) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) // spawn n migrations; cancel them via cancel-all var wg sync.WaitGroup @@ -514,24 +510,10 @@ func TestSchemaChange(t *testing.T) { t.Run(fmt.Sprintf("PlannedReparentShard via throttling %d/2", (currentPrimaryTabletIndex+1)), func(t *testing.T) { insertRows(t, 2) - for i := range shards { - var body string - var err error - switch i { - case 0: - // this is the shard where we run PRS - // Use per-tablet throttling API - body, err = throttleApp(currentPrimaryTablet, onlineDDLThrottlerAppName) - defer unthrottleApp(currentPrimaryTablet, onlineDDLThrottlerAppName) - case 1: - // no PRS on this shard - // Use per-tablet throttling API - body, err = throttleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName) - defer unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName) - } - assert.NoError(t, err) - assert.Contains(t, body, onlineDDLThrottlerAppName) - } + _, err = throttler.ThrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + assert.NoError(t, err) + defer throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) t.Run("wait for migration to run", func(t *testing.T) { @@ -539,12 +521,12 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' @@ -579,22 +561,8 @@ func TestSchemaChange(t *testing.T) { onlineddl.PrintQueryResult(os.Stdout, rs) }) t.Run("unthrottle", func(t *testing.T) { - for i := range shards { - var body string - var err error - switch i { - case 0: - // this is the shard where we run PRS - // Use per-tablet throttling API - body, err = unthrottleApp(currentPrimaryTablet, onlineDDLThrottlerAppName) - case 1: - // no PRS on this shard - // Use per-tablet throttling API - body, err = unthrottleApp(shards[i].Vttablets[0], onlineDDLThrottlerAppName) - } - assert.NoError(t, err) - assert.Contains(t, body, onlineDDLThrottlerAppName) - } + _, err = throttler.UnthrottleAppAndWaitUntilTabletsConfirm(t, clusterInstance, throttlerapp.OnlineDDLName) + assert.NoError(t, err) }) t.Run("expect completion", func(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) @@ -648,12 +616,12 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) + vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()) + require.Contains(t, []string{binlogdatapb.VReplicationWorkflowState_Copying.String(), binlogdatapb.VReplicationWorkflowState_Running.String()}, vreplStatus) }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' @@ -812,36 +780,28 @@ func TestSchemaChange(t *testing.T) { // - two shards as opposed to one // - tablet throttling t.Run("Revert a migration completed on one shard and cancelled on another", func(t *testing.T) { - // shard 0 will run normally, shard 1 will be throttled - defer unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) - t.Run("throttle shard 1", func(t *testing.T) { - body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) - assert.NoError(t, err) - assert.Contains(t, body, onlineDDLThrottlerAppName) - }) + // shard 0 will run normally, shard 1 will be postponed var uuid string - t.Run("run migrations, expect 1st to complete, 2nd to be running", func(t *testing.T) { - uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + t.Run("run migrations, expect running on both shards", func(t *testing.T) { + uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess --postpone-launch", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + onlineddl.CheckLaunchMigration(t, &vtParams, shards[0:1], uuid, "-80", true) { status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], uuid, schema.OnlineDDLStatusComplete) } { - // shard 1 is throttled - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], uuid, normalMigrationWait, schema.OnlineDDLStatusQueued) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], uuid, schema.OnlineDDLStatusRunning) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], uuid, schema.OnlineDDLStatusQueued) } }) t.Run("check cancel migration", func(t *testing.T) { onlineddl.CheckCancelAllMigrations(t, &vtParams, 1) }) - t.Run("unthrottle shard 1", func(t *testing.T) { - body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) - assert.NoError(t, err) - assert.Contains(t, body, onlineDDLThrottlerAppName) + t.Run("launch-all", func(t *testing.T) { + onlineddl.CheckLaunchAllMigrations(t, &vtParams, 0) }) var revertUUID string t.Run("issue revert migration", func(t *testing.T) { @@ -853,12 +813,12 @@ func TestSchemaChange(t *testing.T) { revertUUID = row.AsString("uuid", "") assert.NotEmpty(t, revertUUID) }) - t.Run("expect one revert successful, another failed", func(t *testing.T) { + t.Run("migrations were cancelled, revert should impossible", func(t *testing.T) { { // shard 0 migration was complete. Revert should be successful status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], revertUUID, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID, schema.OnlineDDLStatusFailed) } { // shard 0 migration was cancelled. Revert should not be possible @@ -890,6 +850,9 @@ func TestSchemaChange(t *testing.T) { t.Run("summary: validate sequential migration IDs", func(t *testing.T) { onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) }) + t.Run("summary: validate completed_timestamp", func(t *testing.T) { + onlineddl.ValidateCompletedTimestamp(t, &vtParams) + }) } func insertRow(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 7201fa70652..107050c2708 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -173,13 +174,10 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1", + "--schema_change_check_interval", "1s", } clusterInstance.VtTabletExtraArgs = []string{ - "--enable-lag-throttler", - "--throttle_threshold", "1s", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", @@ -232,6 +230,8 @@ func TestSchemaChange(t *testing.T) { shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + t.Run("create schema", func(t *testing.T) { assert.Equal(t, 1, len(clusterInstance.Keyspaces[0].Shards)) testWithInitialSchema(t) @@ -372,6 +372,7 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() rowcount := 0 + for { queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) require.Nil(t, err) @@ -382,10 +383,14 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri select { case <-time.After(time.Second): + continue // Keep looping case <-ctx.Done(): - break + // Break below to the assertion } + + break } + assert.Equal(t, expectCount, rowcount) } diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index 40c1f998a77..bac59241cf2 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -41,12 +41,14 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -136,73 +138,73 @@ var ( { name: "negative UK, different PK", prepareStatement: "add unique key negative_uidx(id_negative)", - alterStatement: "drop primary key, add primary key(rand_text(40))", + alterStatement: "drop primary key, add primary key(rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "text UK, no PK", - prepareStatement: "add unique key text_uidx(rand_text(40))", + prepareStatement: "add unique key text_uidx(rand_text)", alterStatement: "drop primary key", expectRemovedUniqueKeys: 1, }, { name: "text UK, different PK", - prepareStatement: "add unique key text_uidx(rand_text(40))", + prepareStatement: "add unique key text_uidx(rand_text)", alterStatement: "drop primary key, add primary key (id, id_negative)", expectRemovedUniqueKeys: 1, }, { name: "compound UK 1 by text, no PK", - prepareStatement: "add unique key compound_uidx(rand_text(40), id_negative)", + prepareStatement: "add unique key compound_uidx(rand_text, id_negative)", alterStatement: "drop primary key", expectRemovedUniqueKeys: 1, }, { name: "compound UK 2 by negative, no PK", - prepareStatement: "add unique key compound_uidx(id_negative, rand_text(40))", + prepareStatement: "add unique key compound_uidx(id_negative, rand_text)", alterStatement: "drop primary key", expectRemovedUniqueKeys: 1, }, { name: "compound UK 3 by ascending int, no PK", - prepareStatement: "add unique key compound_uidx(id, rand_num, rand_text(40))", + prepareStatement: "add unique key compound_uidx(id, rand_num, rand_text)", alterStatement: "drop primary key", expectRemovedUniqueKeys: 1, }, { name: "compound UK 4 by rand int, no PK", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40))", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text)", alterStatement: "drop primary key", expectRemovedUniqueKeys: 1, }, { name: "compound UK 5 by rand int, different PK", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40))", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text)", alterStatement: "drop primary key, add primary key (id, id_negative)", expectRemovedUniqueKeys: 1, }, { name: "multiple UK choices 1", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40)), add unique key negative_uidx(id_negative)", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text), add unique key negative_uidx(id_negative)", alterStatement: "drop primary key, add primary key(updates, id)", expectRemovedUniqueKeys: 1, }, { name: "multiple UK choices 2", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40)), add unique key negative_uidx(id_negative)", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text), add unique key negative_uidx(id_negative)", alterStatement: "drop primary key, add primary key(id, id_negative)", expectRemovedUniqueKeys: 1, }, { name: "multiple UK choices including nullable with PK", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40)), add unique key nullable_uidx(nullable_num, id_negative), add unique key negative_uidx(id_negative)", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text), add unique key nullable_uidx(nullable_num, id_negative), add unique key negative_uidx(id_negative)", alterStatement: "drop primary key, drop key negative_uidx, add primary key(id_negative)", expectRemovedUniqueKeys: 1, }, { name: "multiple UK choices including nullable", - prepareStatement: "add unique key compound_uidx(rand_num, rand_text(40)), add unique key nullable_uidx(nullable_num, id_negative), add unique key negative_uidx(id_negative)", + prepareStatement: "add unique key compound_uidx(rand_num, rand_text), add unique key nullable_uidx(nullable_num, id_negative), add unique key negative_uidx(id_negative)", alterStatement: "drop primary key, add primary key(updates, id)", expectRemovedUniqueKeys: 1, }, @@ -240,14 +242,14 @@ var ( { name: "different PRIMARY KEY, text", prepareStatement: "", - alterStatement: "drop primary key, add primary key(rand_text(40))", + alterStatement: "drop primary key, add primary key(rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "different PRIMARY KEY, rand", prepareStatement: "", - alterStatement: "drop primary key, add primary key(rand_num, rand_text(40))", + alterStatement: "drop primary key, add primary key(rand_num, rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, @@ -260,42 +262,42 @@ var ( }, { name: "different PRIMARY KEY, from text to int", - prepareStatement: "drop primary key, add primary key(rand_text(40))", + prepareStatement: "drop primary key, add primary key(rand_text)", alterStatement: "drop primary key, add primary key(id)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "different PRIMARY KEY, from text to rand", - prepareStatement: "drop primary key, add primary key(rand_text(40))", - alterStatement: "drop primary key, add primary key(rand_num, rand_text(40))", + prepareStatement: "drop primary key, add primary key(rand_text)", + alterStatement: "drop primary key, add primary key(rand_num, rand_text)", expectRemovedUniqueKeys: 1, }, { name: "partially shared PRIMARY KEY 1", prepareStatement: "drop primary key, add primary key(id, id_negative)", - alterStatement: "drop primary key, add primary key(id, rand_text(40))", + alterStatement: "drop primary key, add primary key(id, rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "partially shared PRIMARY KEY 2", prepareStatement: "drop primary key, add primary key(id, id_negative)", - alterStatement: "drop primary key, add primary key(id_negative, rand_text(40))", + alterStatement: "drop primary key, add primary key(id_negative, rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "partially shared PRIMARY KEY 3", prepareStatement: "drop primary key, add primary key(id, id_negative)", - alterStatement: "drop primary key, add primary key(rand_text(40), id)", + alterStatement: "drop primary key, add primary key(rand_text, id)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, { name: "partially shared PRIMARY KEY 4", prepareStatement: "drop primary key, add primary key(id_negative, id)", - alterStatement: "drop primary key, add primary key(rand_text(40), id)", + alterStatement: "drop primary key, add primary key(rand_text, id)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 1, }, @@ -309,7 +311,7 @@ var ( { name: "no shared UK, multiple options", prepareStatement: "add unique key negative_uidx(id_negative)", - alterStatement: "drop primary key, drop key negative_uidx, add primary key(rand_text(40)), add unique key negtext_uidx(id_negative, rand_text(40))", + alterStatement: "drop primary key, drop key negative_uidx, add primary key(rand_text), add unique key negtext_uidx(id_negative, rand_text)", expectAddedUniqueKeys: 1, expectRemovedUniqueKeys: 2, }, @@ -422,16 +424,13 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1", + "--schema_change_check_interval", "1s", } // --vstream_packet_size is set to a small value that ensures we get multiple stream iterations, // thereby examining lastPK on vcopier side. We will be iterating tables using non-PK order throughout // this test suite, and so the low setting ensures we hit the more interesting code paths. clusterInstance.VtTabletExtraArgs = []string{ - "--enable-lag-throttler", - "--throttle_threshold", "1s", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", @@ -485,6 +484,8 @@ func TestSchemaChange(t *testing.T) { shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + for _, testcase := range testCases { require.NotEmpty(t, testcase.name) t.Run(testcase.name, func(t *testing.T) { @@ -706,9 +707,9 @@ func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, // Table renamed to _before, due to -vreplication-test-suite flag err = nil } - if sqlErr, ok := err.(*mysql.SQLError); ok { + if sqlErr, ok := err.(*sqlerror.SQLError); ok { switch sqlErr.Number() { - case mysql.ERLockDeadlock: + case sqlerror.ERLockDeadlock: // That's fine. We create a lot of contention; some transactions will deadlock and // rollback. It happens, and we can ignore those and keep on going. err = nil diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 30fa4d65736..c8b87215036 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -78,13 +79,10 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1", + "--schema_change_check_interval", "1s", } clusterInstance.VtTabletExtraArgs = []string{ - "--enable-lag-throttler", - "--throttle_threshold", "1s", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", @@ -134,6 +132,8 @@ func TestSchemaChange(t *testing.T) { shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + files, err := os.ReadDir(testDataPath) require.NoError(t, err) for _, f := range files { diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/alter new file mode 100644 index 00000000000..64d721c8b34 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/alter @@ -0,0 +1 @@ +modify val bigint not null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/create.sql new file mode 100644 index 00000000000..638b7b4aafd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/create.sql @@ -0,0 +1,23 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id varchar(64) not null, + val int default null, + ts timestamp, + primary key(id(32)) +) auto_increment=1; + +insert into onlineddl_test values (sha1(rand()), 2, now()); +insert into onlineddl_test values (sha1(rand()), 3, now()); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (sha1(rand()), 11, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/expect_failure new file mode 100644 index 00000000000..f4caebc4889 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-partial-text-pk/expect_failure @@ -0,0 +1 @@ +no possible unique key diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql deleted file mode 100644 index b9a14cdc156..00000000000 --- a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql +++ /dev/null @@ -1,25 +0,0 @@ -drop table if exists onlineddl_test; -create table onlineddl_test ( - id int(11) NOT NULL AUTO_INCREMENT, - name varchar(512) DEFAULT NULL, - v varchar(255) DEFAULT NULL COMMENT '添加普通列测试', - PRIMARY KEY (id) -) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk; - -insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); -insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); - -drop event if exists onlineddl_test; -delimiter ;; -create event onlineddl_test - on schedule every 1 second - starts current_timestamp - ends current_timestamp + interval 60 second - on completion not preserve - enable - do -begin - insert into onlineddl_test (name) values ('gbk-test-default'); - insert into onlineddl_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试'); - update onlineddl_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1; -end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/alter new file mode 100644 index 00000000000..64d721c8b34 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/alter @@ -0,0 +1 @@ +modify val bigint not null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/create.sql new file mode 100644 index 00000000000..8f30460203b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/partial-text-pk-full/create.sql @@ -0,0 +1,23 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id varchar(64) not null, + val int default null, + ts timestamp, + primary key(id(64)) +) auto_increment=1; + +insert into onlineddl_test values (sha1(rand()), 2, now()); +insert into onlineddl_test values (sha1(rand()), 3, now()); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (sha1(rand()), 11, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/alter new file mode 100644 index 00000000000..64d721c8b34 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/alter @@ -0,0 +1 @@ +modify val bigint not null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/create.sql new file mode 100644 index 00000000000..29891bfac55 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/text-pk/create.sql @@ -0,0 +1,23 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id varchar(64) not null, + val int default null, + ts timestamp, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (sha1(rand()), 2, now()); +insert into onlineddl_test values (sha1(rand()), 3, now()); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (sha1(rand()), 11, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go index c3046637846..19a6ff79604 100644 --- a/go/test/endtoend/onlineddl/vtctlutil.go +++ b/go/test/endtoend/onlineddl/vtctlutil.go @@ -17,18 +17,13 @@ limitations under the License. package onlineddl import ( - "context" - "fmt" "testing" - "time" "vitess.io/vitess/go/test/endtoend/cluster" "github.com/stretchr/testify/assert" ) -var throttlerConfigTimeout = 60 * time.Second - // CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations. func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) { cancelQuery := "alter vitess_migration cancel all" @@ -36,51 +31,3 @@ func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlCl _, err := vtctlclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.VtctlClientParams{}) assert.NoError(t, err) } - -// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig. -// This retries the command until it succeeds or times out as the -// SrvKeyspace record may not yet exist for a newly created -// Keyspace that is still initializing before it becomes serving. -func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, enable bool, disable bool, threshold float64, metricsQuery string, viaVtctldClient bool) (result string, err error) { - args := []string{} - clientfunc := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput - if !viaVtctldClient { - args = append(args, "--") - clientfunc = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput - } - args = append(args, "UpdateThrottlerConfig") - if enable { - args = append(args, "--enable") - } - if disable { - args = append(args, "--disable") - } - if threshold > 0 { - args = append(args, "--threshold", fmt.Sprintf("%f", threshold)) - } - if metricsQuery != "" { - args = append(args, "--custom-query", metricsQuery) - args = append(args, "--check-as-check-self") - } else { - args = append(args, "--check-as-check-shard") - } - args = append(args, clusterInstance.Keyspaces[0].Name) - - ctx, cancel := context.WithTimeout(context.Background(), throttlerConfigTimeout) - defer cancel() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - result, err = clientfunc(args...) - if err == nil { - return result, nil - } - select { - case <-ctx.Done(): - return "", fmt.Errorf("timed out waiting for UpdateThrottlerConfig to succeed after %v. Last seen value: %+v, error: %v", throttlerConfigTimeout, result, err) - case <-ticker.C: - } - } -} diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go index 75c35061da9..ae214a644b6 100644 --- a/go/test/endtoend/onlineddl/vtgate_util.go +++ b/go/test/endtoend/onlineddl/vtgate_util.go @@ -19,26 +19,35 @@ package onlineddl import ( "context" "fmt" - "io" "math" - "net/http" "os" "testing" "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + ThrottledAppsTimeout = 60 * time.Second +) + +var ( + testsStartupTime time.Time +) + +func init() { + testsStartupTime = time.Now() +} + // VtgateExecQuery runs a query on VTGate using given query params func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, expectError string) *sqltypes.Result { t.Helper() @@ -311,17 +320,36 @@ func UnthrottleAllMigrations(t *testing.T, vtParams *mysql.ConnParams) { } // CheckThrottledApps checks for existence or non-existence of an app in the throttled apps list -func CheckThrottledApps(t *testing.T, vtParams *mysql.ConnParams, appName string, expectFind bool) { - query := "show vitess_throttled_apps" - r := VtgateExecQuery(t, vtParams, query, "") +func CheckThrottledApps(t *testing.T, vtParams *mysql.ConnParams, throttlerApp throttlerapp.Name, expectFind bool) { - found := false - for _, row := range r.Named().Rows { - if row.AsString("app", "") == appName { - found = true + ctx, cancel := context.WithTimeout(context.Background(), ThrottledAppsTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + query := "show vitess_throttled_apps" + r := VtgateExecQuery(t, vtParams, query, "") + + appFound := false + for _, row := range r.Named().Rows { + if throttlerApp.Equals(row.AsString("app", "")) { + appFound = true + } + } + if appFound == expectFind { + // we're all good + return + } + + select { + case <-ctx.Done(): + assert.Failf(t, "CheckThrottledApps timed out waiting for %v to be in throttled status '%v'", throttlerApp.String(), expectFind) + return + case <-ticker.C: } } - assert.Equal(t, expectFind, found, "check app %v in throttled apps: %v", appName, found) } // WaitForThrottledTimestamp waits for a migration to have a non-empty last_throttled_timestamp @@ -349,49 +377,6 @@ func WaitForThrottledTimestamp(t *testing.T, vtParams *mysql.ConnParams, uuid st return } -// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as enabled. -func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, timeout time.Duration) { - jsonPath := "IsEnabled" - url := fmt.Sprintf("http://localhost:%d/throttler/status", tablet.HTTPPort) - - ctx, cancel := context.WithTimeout(context.Background(), throttlerConfigTimeout) - defer cancel() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - for { - body := getHTTPBody(url) - val, err := jsonparser.GetBoolean([]byte(body), jsonPath) - require.NoError(t, err) - if val { - return - } - select { - case <-ctx.Done(): - t.Error("timeout waiting for tablet's throttler status to be enabled") - return - case <-ticker.C: - } - } -} - -func getHTTPBody(url string) string { - resp, err := http.Get(url) - if err != nil { - log.Infof("http Get returns %+v", err) - return "" - } - if resp.StatusCode != 200 { - log.Infof("http Get returns status %d", resp.StatusCode) - return "" - } - respByte, _ := io.ReadAll(resp.Body) - defer resp.Body.Close() - body := string(respByte) - return body -} - // ValidateSequentialMigrationIDs validates that schem_migrations.id column, which is an AUTO_INCREMENT, does // not have gaps func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard) { @@ -428,3 +413,31 @@ func ValidateSequentialMigrationIDs(t *testing.T, vtParams *mysql.ConnParams, sh assert.Equalf(t, count, shardMax[shard]-shardMin[shard]+1, "mismatch: shared=%v, count=%v, min=%v, max=%v", shard, count, shardMin[shard], shardMax[shard]) } } + +// ValidateCompletedTimestamp ensures that any migration in `cancelled`, `completed`, `failed` statuses +// has a non-nil and valid `completed_timestamp` value. +func ValidateCompletedTimestamp(t *testing.T, vtParams *mysql.ConnParams) { + require.False(t, testsStartupTime.IsZero()) + r := VtgateExecQuery(t, vtParams, "show vitess_migrations", "") + + completedTimestampNumValidations := 0 + for _, row := range r.Named().Rows { + migrationStatus := row.AsString("migration_status", "") + require.NotEmpty(t, migrationStatus) + switch migrationStatus { + case string(schema.OnlineDDLStatusComplete), + string(schema.OnlineDDLStatusFailed), + string(schema.OnlineDDLStatusCancelled): + { + assert.False(t, row["completed_timestamp"].IsNull()) + // Also make sure the timestamp is "real", and that it is recent. + timestamp := row.AsString("completed_timestamp", "") + completedTime, err := time.Parse(sqltypes.TimestampFormat, timestamp) + assert.NoError(t, err) + assert.Greater(t, completedTime.Unix(), testsStartupTime.Unix()) + completedTimestampNumValidations++ + } + } + } + assert.NotZero(t, completedTimestampNumValidations) +} diff --git a/go/test/endtoend/preparestmt/stmt_methods_test.go b/go/test/endtoend/preparestmt/stmt_methods_test.go index 21369ea4d3a..24fb58bff81 100644 --- a/go/test/endtoend/preparestmt/stmt_methods_test.go +++ b/go/test/endtoend/preparestmt/stmt_methods_test.go @@ -436,3 +436,24 @@ func TestShowColumns(t *testing.T) { require.Len(t, cols, 6) require.False(t, rows.Next()) } + +func TestBinaryColumn(t *testing.T) { + defer cluster.PanicHandler(t) + dbo := Connect(t, "interpolateParams=false") + defer dbo.Close() + + _, err := dbo.Query(`SELECT DISTINCT + BINARY table_info.table_name AS table_name, + table_info.create_options AS create_options, + table_info.table_comment AS table_comment + FROM information_schema.tables AS table_info + JOIN information_schema.columns AS column_info + ON BINARY column_info.table_name = BINARY table_info.table_name + WHERE + table_info.table_schema = ? + AND column_info.table_schema = ? + -- Exclude views. + AND table_info.table_type = 'BASE TABLE' + ORDER BY BINARY table_info.table_name`, keyspaceName, keyspaceName) + require.NoError(t, err) +} diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index 5a7ae3e1399..d04b5600362 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -19,7 +19,9 @@ package pitr import ( "context" "fmt" + "os" "os/exec" + "path" "testing" "time" @@ -27,8 +29,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" ) @@ -51,20 +55,22 @@ var ( shard1Replica1 *cluster.Vttablet shard1Replica2 *cluster.Vttablet - cell = "zone1" - hostname = "localhost" - binlogHost = "127.0.0.1" - keyspaceName = "ks" - restoreKS1Name = "restoreks1" - restoreKS2Name = "restoreks2" - restoreKS3Name = "restoreks3" - shardName = "0" - shard0Name = "-80" - shard1Name = "80-" - dbName = "vt_ks" - mysqlUserName = "vt_dba" - mysqlPassword = "password" - vSchema = `{ + cell = "zone1" + hostname = "localhost" + binlogHost = "127.0.0.1" + keyspaceName = "ks" + restoreKS1Name = "restoreks1" + restoreKS2Name = "restoreks2" + restoreKS3Name = "restoreks3" + shardName = "0" + shard0Name = "-80" + shard1Name = "80-" + dbName = "vt_ks" + mysqlUserName = "vt_dba" + mysqlPassword = "VtDbaPass" + dbCredentialFile = "" + initDBFileWithPassword = "" + vSchema = `{ "sharded": true, "vindexes": { "hash_index": { @@ -298,8 +304,8 @@ func performResharding(t *testing.T) { require.NoError(t, err) waitTimeout := 30 * time.Second - shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, waitTimeout) - shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, waitTimeout) + shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) + shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") @@ -408,6 +414,10 @@ func initializeCluster(t *testing.T) { shard0.Vttablets = []*cluster.Vttablet{shard0Primary, shard0Replica1, shard0Replica2} shard1.Vttablets = []*cluster.Vttablet{shard1Primary, shard1Replica1, shard1Replica2} + dbCredentialFile = cluster.WriteDbCredentialToTmp(clusterInstance.TmpDirectory) + extraArgs := []string{"--db-credentials-file", dbCredentialFile} + commonTabletArg = append(commonTabletArg, "--db-credentials-file", dbCredentialFile) + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, commonTabletArg...) clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--restore_from_backup") @@ -416,10 +426,23 @@ func initializeCluster(t *testing.T) { vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory) out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") require.NoError(t, err, out) + + initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql")) + sql := string(initDb) + // The original init_db.sql does not have any passwords. Here we update the init file with passwords + sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(clusterInstance), "") + require.NoError(t, err, "expected to load init_db file") + initDBFileWithPassword = path.Join(clusterInstance.TmpDirectory, "init_db_with_passwords.sql") + err = os.WriteFile(initDBFileWithPassword, []byte(sql), 0660) + require.NoError(t, err, "expected to load init_db file") + // Start MySql var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { + tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword + tablet.VttabletProcess.DbPassword = mysqlPassword + tablet.MysqlctlProcess.ExtraArgs = extraArgs proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err) mysqlCtlProcessList = append(mysqlCtlProcessList, proc) @@ -432,21 +455,8 @@ func initializeCluster(t *testing.T) { require.NoError(t, err) } - queryCmds := []string{ - fmt.Sprintf("CREATE USER '%s'@'%%' IDENTIFIED BY '%s';", mysqlUserName, mysqlPassword), - fmt.Sprintf("GRANT ALL ON *.* TO '%s'@'%%';", mysqlUserName), - fmt.Sprintf("GRANT GRANT OPTION ON *.* TO '%s'@'%%';", mysqlUserName), - fmt.Sprintf("create database %s;", "vt_ks"), - "FLUSH PRIVILEGES;", - } - for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { - for _, query := range queryCmds { - _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false) - require.NoError(t, err) - } - err = tablet.VttabletProcess.Setup() require.NoError(t, err) } @@ -509,8 +519,14 @@ func testTabletRecovery(t *testing.T, binlogServer *binLogServer, lookupTimeout, } func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer *binLogServer, lookupTimeout, restoreKeyspaceName, shardName string) { - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) - err := tablet.MysqlctlProcess.Start() + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + require.NoError(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess + extraArgs := []string{"--db-credentials-file", dbCredentialFile} + tablet.MysqlctlProcess.InitDBFile = initDBFileWithPassword + tablet.VttabletProcess.DbPassword = mysqlPassword + tablet.MysqlctlProcess.ExtraArgs = extraArgs + err = tablet.MysqlctlProcess.Start() require.NoError(t, err) tablet.VttabletProcess = cluster.VttabletProcessInstance( @@ -550,6 +566,7 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * "--lock_tables_timeout", "5s", "--watch_replication_stream", "--serving_state_grace_period", "1s", + "--db-credentials-file", dbCredentialFile, } tablet.VttabletProcess.ServingStatus = "" diff --git a/go/test/endtoend/recovery/recovery_util.go b/go/test/endtoend/recovery/recovery_util.go index 66084e2f4b5..cffae6a5005 100644 --- a/go/test/endtoend/recovery/recovery_util.go +++ b/go/test/endtoend/recovery/recovery_util.go @@ -51,17 +51,20 @@ func VerifyQueriesUsingVtgate(t *testing.T, session *vtgateconn.VTGateSession, q } // RestoreTablet performs a PITR restore. -func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string) { +func RestoreTablet(t *testing.T, localCluster *cluster.LocalProcessCluster, tablet *cluster.Vttablet, restoreKSName string, shardName string, keyspaceName string, commonTabletArg []string, restoreTime time.Time) { tablet.ValidateTabletRestart(t) replicaTabletArgs := commonTabletArg _, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("GetKeyspace", restoreKSName) + if restoreTime.IsZero() { + restoreTime = time.Now().UTC() + } + if err != nil { - tm := time.Now().UTC() _, err := localCluster.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName, - "--snapshot_time", tm.Format(time.RFC3339), restoreKSName) + "--snapshot_time", restoreTime.Format(time.RFC3339), restoreKSName) require.Nil(t, err) } diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index 1ab9f1647ca..f4db74bbf4e 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -24,12 +24,14 @@ import ( "os/exec" "path" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/recovery" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtgate/vtgateconn" ) @@ -91,16 +93,19 @@ func TestMainImpl(m *testing.M) { } localCluster.Keyspaces = append(localCluster.Keyspaces, *keyspace) + // Create a new init_db.sql file that sets up passwords for all users. + // Then we use a db-credentials-file with the passwords. + // TODO: We could have operated with empty password here. Create a separate test for --db-credentials-file functionality (@rsajwani) dbCredentialFile = cluster.WriteDbCredentialToTmp(localCluster.TmpDirectory) initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql")) sql := string(initDb) + // The original init_db.sql does not have any passwords. Here we update the init file with passwords + oldAlterTableMode := `SET GLOBAL old_alter_table = ON;` + sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(localCluster), oldAlterTableMode) + if err != nil { + return 1, err + } newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql") - sql = sql + cluster.GetPasswordUpdateSQL(localCluster) - // https://github.com/vitessio/vitess/issues/8315 - oldAlterTableMode := ` -SET GLOBAL old_alter_table = ON; -` - sql = sql + oldAlterTableMode os.WriteFile(newInitDBFile, []byte(sql), 0666) extraArgs := []string{"--db-credentials-file", dbCredentialFile} @@ -125,7 +130,11 @@ SET GLOBAL old_alter_table = ON; } tablet.VttabletProcess.SupportsBackup = true - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctlProcess = *mysqlctlProcess tablet.MysqlctlProcess.InitDBFile = newInitDBFile tablet.MysqlctlProcess.ExtraArgs = extraArgs proc, err := tablet.MysqlctlProcess.StartProcess() @@ -176,24 +185,29 @@ SET GLOBAL old_alter_table = ON; } // TestRecoveryImpl does following -// - create a shard with primary and replica1 only -// - run InitShardPrimary -// - insert some data -// - take a backup -// - insert more data on the primary -// - take another backup -// - create a recovery keyspace after first backup -// - bring up tablet_replica2 in the new keyspace -// - check that new tablet does not have data created after backup1 -// - create second recovery keyspace after second backup -// - bring up tablet_replica3 in second keyspace -// - check that new tablet has data created after backup1 but not data created after backup2 -// - check that vtgate queries work correctly +// 1. create a shard with primary and replica1 only +// - run InitShardPrimary +// - insert some data +// +// 2. take a backup +// 3.create a recovery keyspace after first backup +// - bring up tablet_replica2 in the new keyspace +// - check that new tablet has data from backup1 +// +// 4. insert more data on the primary +// +// 5. take another backup +// 6. create a recovery keyspace after second backup +// - bring up tablet_replica3 in the new keyspace +// - check that new tablet has data from backup2 +// +// 7. check that vtgate queries work correctly func TestRecoveryImpl(t *testing.T) { defer cluster.PanicHandler(t) defer tabletsTeardown() verifyInitialReplication(t) + // take first backup of value = test1 err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) @@ -201,10 +215,6 @@ func TestRecoveryImpl(t *testing.T) { require.Equal(t, len(backups), 1) assert.Contains(t, backups[0], replica1.Alias) - _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - assert.NoError(t, err) - cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2) - err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) assert.NoError(t, err) @@ -212,64 +222,80 @@ func TestRecoveryImpl(t *testing.T) { assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") - recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg) + // restore with latest backup + restoreTime := time.Now().UTC() + recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime) output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) assert.NoError(t, err) assert.Contains(t, output, keyspaceName) assert.Contains(t, output, recoveryKS1) - err = localCluster.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, keyspaceName) - assert.NoError(t, err) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 1) + // verify that restored replica has value = test1 + qr, err := replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) + assert.NoError(t, err) + assert.Equal(t, "test1", qr.Rows[0][0].ToString()) + + // insert new row on primary + _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) + assert.NoError(t, err) + cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2) + // update the original row in primary _, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx1' where id = 1", keyspaceName, true) assert.NoError(t, err) - //verify that primary has new value - qr, err := primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) + // verify that primary has new value + qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) assert.NoError(t, err) assert.Equal(t, "msgx1", qr.Rows[0][0].ToString()) - //verify that restored replica has old value - qr, err = replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) - assert.NoError(t, err) - assert.Equal(t, "test1", qr.Rows[0][0].ToString()) + // check that replica1, used for the backup, has the new value + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - assert.NoError(t, err) + ticker := time.NewTicker(time.Second) + defer ticker.Stop() - _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) - assert.NoError(t, err) - cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3) + for { + qr, err = replica1.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) + assert.NoError(t, err) + if qr.Rows[0][0].ToString() == "msgx1" { + break + } - recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg) + select { + case <-ctx.Done(): + t.Error("timeout waiting for new value to be replicated on replica 1") + break + case <-ticker.C: + } + } - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) + // take second backup of value = msgx1 + err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) - assert.Contains(t, output, "vt_insert_test") - cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 2) + // restore to first backup + recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime) - // update the original row in primary - _, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx2' where id = 1", keyspaceName, true) + output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) assert.NoError(t, err) + assert.Contains(t, output, "vt_insert_test") - //verify that primary has new value - qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) - assert.NoError(t, err) - assert.Equal(t, "msgx2", qr.Rows[0][0].ToString()) + // only one row from first backup + cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 1) - //verify that restored replica has old value + //verify that restored replica has value = test1 qr, err = replica3.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true) assert.NoError(t, err) - assert.Equal(t, "msgx1", qr.Rows[0][0].ToString()) + assert.Equal(t, "test1", qr.Rows[0][0].ToString()) vtgateInstance := localCluster.NewVtgateInstance() vtgateInstance.TabletTypesToWait = "REPLICA" @@ -277,14 +303,10 @@ func TestRecoveryImpl(t *testing.T) { localCluster.VtgateGrpcPort = vtgateInstance.GrpcPort assert.NoError(t, err) defer vtgateInstance.TearDown() - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1) - assert.NoError(t, err) - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1) - assert.NoError(t, err) - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1) - assert.NoError(t, err) - err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1) - assert.NoError(t, err) + assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1, 30*time.Second)) + assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1, 30*time.Second)) + assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1, 30*time.Second)) + assert.NoError(t, vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1, 30*time.Second)) // Build vtgate grpc connection grpcAddress := fmt.Sprintf("%s:%d", localCluster.Hostname, localCluster.VtgateGrpcPort) @@ -293,27 +315,27 @@ func TestRecoveryImpl(t *testing.T) { defer vtgateConn.Close() session := vtgateConn.Session("@replica", nil) - //check that vtgate doesn't route queries to new tablet - recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(3)") - recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx2")`) + // check that vtgate doesn't route queries to new tablet + recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)") + recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx1")`) recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS1), "INT64(1)") recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS1), `VARCHAR("test1")`) - recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(2)") - recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("msgx1")`) + recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(1)") + recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("test1")`) // check that new keyspace is accessible with 'use ks' cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS1+"@replica") recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)") cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS2+"@replica") - recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)") + recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)") // check that new tablet is accessible with use `ks:shard` cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS1+":0@replica`") recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)") cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS2+":0@replica`") - recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)") + recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)") } // verifyInitialReplication will create schema in primary, insert some data to primary and verify the same data in replica. diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index db7784f6459..d5f37dc8604 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -17,6 +17,8 @@ limitations under the License. package newfeaturetest import ( + "context" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -109,3 +111,38 @@ func TestTabletRestart(t *testing.T) { err := tablets[1].VttabletProcess.Setup() require.NoError(t, err) } + +// Tests ensures that ChangeTabletType works even when semi-sync plugins are not loaded. +func TestChangeTypeWithoutSemiSync(t *testing.T) { + defer cluster.PanicHandler(t) + clusterInstance := utils.SetupReparentCluster(t, "none") + defer utils.TeardownCluster(clusterInstance) + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + + ctx := context.Background() + + primary, replica := tablets[0], tablets[1] + + // Unload semi sync plugins + for _, tablet := range tablets[0:4] { + qr := utils.RunSQL(ctx, t, "select @@global.super_read_only", tablet) + result := fmt.Sprintf("%v", qr.Rows[0][0].ToString()) + if result == "1" { + utils.RunSQL(ctx, t, "set global super_read_only = 0", tablet) + } + + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_slave;", tablet) + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_master;", tablet) + } + + utils.ValidateTopology(t, clusterInstance, true) + utils.CheckPrimaryTablet(t, clusterInstance, primary) + + // Change replica's type to rdonly + err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + require.NoError(t, err) + + // Change tablets type from rdonly back to replica + err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") + require.NoError(t, err) +} diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index de7e6a0368b..f7afea1431b 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -20,15 +20,17 @@ import ( "context" "fmt" "strconv" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "google.golang.org/protobuf/encoding/protojson" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" @@ -123,9 +125,16 @@ func TestReparentReplicaOffline(t *testing.T) { // Perform a graceful reparent operation. out, err := utils.PrsWithTimeout(t, clusterInstance, tablets[1], false, "", "31s") require.Error(t, err) - assert.True(t, utils.SetReplicationSourceFailed(tablets[3], out)) - utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) + // Assert that PRS failed + if clusterInstance.VtctlMajorVersion <= 17 { + assert.True(t, utils.SetReplicationSourceFailed(tablets[3], out)) + utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) + } else { + assert.Contains(t, out, "rpc error: code = DeadlineExceeded desc") + utils.CheckPrimaryTablet(t, clusterInstance, tablets[0]) + } + } func TestReparentAvoid(t *testing.T) { @@ -155,7 +164,11 @@ func TestReparentAvoid(t *testing.T) { utils.StopTablet(t, tablets[0], true) out, err := utils.PrsAvoid(t, clusterInstance, tablets[1]) require.Error(t, err) - assert.Contains(t, out, "cannot find a tablet to reparent to in the same cell as the current primary") + if clusterInstance.VtctlMajorVersion <= 17 { + assert.Contains(t, out, "cannot find a tablet to reparent to in the same cell as the current primary") + } else { + assert.Contains(t, out, "rpc error: code = DeadlineExceeded desc = latest balancer error") + } utils.ValidateTopology(t, clusterInstance, false) utils.CheckPrimaryTablet(t, clusterInstance, tablets[1]) } @@ -275,17 +288,24 @@ func TestReparentWithDownReplica(t *testing.T) { // Perform a graceful reparent operation. It will fail as one tablet is down. out, err := utils.Prs(t, clusterInstance, tablets[1]) require.Error(t, err) - assert.True(t, utils.SetReplicationSourceFailed(tablets[2], out)) - - // insert data into the new primary, check the connected replica work - insertVal := utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0], tablets[3]}) + var insertVal int + // Assert that PRS failed + if clusterInstance.VtctlMajorVersion <= 17 { + assert.True(t, utils.SetReplicationSourceFailed(tablets[2], out)) + // insert data into the new primary, check the connected replica work + insertVal = utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0], tablets[3]}) + } else { + assert.Contains(t, out, fmt.Sprintf("TabletManager.PrimaryStatus on %s error", tablets[2].Alias)) + // insert data into the old primary, check the connected replica works. The primary tablet shouldn't have changed. + insertVal = utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[3]}) + } // restart mysql on the old replica, should still be connecting to the old primary tablets[2].MysqlctlProcess.InitMysql = false err = tablets[2].MysqlctlProcess.Start() require.NoError(t, err) - // Use the same PlannedReparentShard command to fix up the tablet. + // Use the same PlannedReparentShard command to promote the new primary. _, err = utils.Prs(t, clusterInstance, tablets[1]) require.NoError(t, err) @@ -418,7 +438,8 @@ func TestFullStatus(t *testing.T) { primaryStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", primaryTablet.Alias) require.NoError(t, err) primaryStatus := &replicationdatapb.FullStatus{} - err = protojson.Unmarshal([]byte(primaryStatusString), primaryStatus) + opt := protojson.UnmarshalOptions{DiscardUnknown: true} + err = opt.Unmarshal([]byte(primaryStatusString), primaryStatus) require.NoError(t, err) assert.NotEmpty(t, primaryStatus.ServerUuid) assert.NotEmpty(t, primaryStatus.ServerId) @@ -427,6 +448,14 @@ func TestFullStatus(t *testing.T) { assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin") assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/") assert.False(t, primaryStatus.ReadOnly) + vtTabletVersion, err := cluster.GetMajorVersion("vttablet") + require.NoError(t, err) + vtcltlVersion, err := cluster.GetMajorVersion("vtctl") + require.NoError(t, err) + // For all version at or above v17.0.0, each replica will start in super_read_only mode. + if vtTabletVersion >= 17 && vtcltlVersion >= 17 { + assert.False(t, primaryStatus.SuperReadOnly) + } assert.True(t, primaryStatus.SemiSyncPrimaryEnabled) assert.True(t, primaryStatus.SemiSyncReplicaEnabled) assert.True(t, primaryStatus.SemiSyncPrimaryStatus) @@ -450,13 +479,14 @@ func TestFullStatus(t *testing.T) { replicaStatusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", replicaTablet.Alias) require.NoError(t, err) replicaStatus := &replicationdatapb.FullStatus{} - err = protojson.Unmarshal([]byte(replicaStatusString), replicaStatus) + opt = protojson.UnmarshalOptions{DiscardUnknown: true} + err = opt.Unmarshal([]byte(replicaStatusString), replicaStatus) require.NoError(t, err) assert.NotEmpty(t, replicaStatus.ServerUuid) assert.NotEmpty(t, replicaStatus.ServerId) assert.Contains(t, replicaStatus.ReplicationStatus.Position, "MySQL56/"+replicaStatus.ReplicationStatus.SourceUuid) - assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState) - assert.EqualValues(t, mysql.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState) + assert.EqualValues(t, replication.ReplicationStateRunning, replicaStatus.ReplicationStatus.IoState) + assert.EqualValues(t, replication.ReplicationStateRunning, replicaStatus.ReplicationStatus.SqlState) assert.Equal(t, fileNameFromPosition(replicaStatus.ReplicationStatus.FilePosition), fileNameFromPosition(primaryStatus.PrimaryStatus.FilePosition)) assert.LessOrEqual(t, rowNumberFromPosition(replicaStatus.ReplicationStatus.FilePosition), rowNumberFromPosition(primaryStatus.PrimaryStatus.FilePosition)) assert.Equal(t, replicaStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition, primaryStatus.PrimaryStatus.FilePosition) @@ -479,6 +509,10 @@ func TestFullStatus(t *testing.T) { assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin") assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/") assert.True(t, replicaStatus.ReadOnly) + // For all version at or above v17.0.0, each replica will start in super_read_only mode. + if vtTabletVersion >= 17 && vtcltlVersion >= 17 { + assert.True(t, replicaStatus.SuperReadOnly) + } assert.False(t, replicaStatus.SemiSyncPrimaryEnabled) assert.True(t, replicaStatus.SemiSyncReplicaEnabled) assert.False(t, replicaStatus.SemiSyncPrimaryStatus) @@ -499,7 +533,8 @@ func getFullStatus(t *testing.T, clusterInstance *cluster.LocalProcessCluster, t statusString, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetFullStatus", tablet.Alias) require.NoError(t, err) status := &replicationdatapb.FullStatus{} - err = protojson.Unmarshal([]byte(statusString), status) + opt := protojson.UnmarshalOptions{DiscardUnknown: true} + err = opt.Unmarshal([]byte(statusString), status) require.NoError(t, err) return status } @@ -523,7 +558,16 @@ func waitForFilePosition(t *testing.T, clusterInstance *cluster.LocalProcessClus // fileNameFromPosition gets the file name from the position func fileNameFromPosition(pos string) string { - return pos[0 : len(pos)-4] + s := strings.SplitN(pos, ":", 2) + if len(s) != 2 { + return "" + } + return s[0] +} + +func TestFileNameFromPosition(t *testing.T) { + assert.Equal(t, "", fileNameFromPosition("shouldfail")) + assert.Equal(t, "FilePos/vt-0000000101-bin.000001", fileNameFromPosition("FilePos/vt-0000000101-bin.000001:123456789")) } // rowNumberFromPosition gets the row number from the position diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go new file mode 100644 index 00000000000..a9f4312caea --- /dev/null +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package misc + +import ( + "context" + _ "embed" + "flag" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + rutils "vitess.io/vitess/go/test/endtoend/reparent/utils" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + keyspaceName = "ks" + cell = "test" + + //go:embed schema.sql + schemaSQL string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + } + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-enable-settings-pool") + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 2, false) + if err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--planner-version", "gen4") + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestSettingsPoolWithTXAndPRS(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // set a system settings that will trigger reserved connection usage. + utils.Exec(t, conn, "set default_week_format = 5") + + // have transaction on the session + utils.Exec(t, conn, "begin") + utils.Exec(t, conn, "select id1, id2 from t1") + utils.Exec(t, conn, "commit") + + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + + // prs should happen without any error. + text, err := rutils.Prs(t, clusterInstance, tablets[1]) + require.NoError(t, err, text) + rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute) + + defer func() { + // reset state + text, err = rutils.Prs(t, clusterInstance, tablets[0]) + require.NoError(t, err, text) + rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute) + }() + + // no error should occur and it should go to the right tablet. + utils.Exec(t, conn, "select id1, id2 from t1") +} + +func TestSettingsPoolWithoutTXAndPRS(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // set a system settings that will trigger reserved connection usage. + utils.Exec(t, conn, "set default_week_format = 5") + + // execute non-tx query + utils.Exec(t, conn, "select id1, id2 from t1") + + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + + // prs should happen without any error. + text, err := rutils.Prs(t, clusterInstance, tablets[1]) + require.NoError(t, err, text) + rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute) + defer func() { + // reset state + text, err = rutils.Prs(t, clusterInstance, tablets[0]) + require.NoError(t, err, text) + rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute) + }() + + // no error should occur and it should go to the right tablet. + utils.Exec(t, conn, "select id1, id2 from t1") + +} diff --git a/go/test/endtoend/reparent/prssettingspool/schema.sql b/go/test/endtoend/reparent/prssettingspool/schema.sql new file mode 100644 index 00000000000..3e78cab09d6 --- /dev/null +++ b/go/test/endtoend/reparent/prssettingspool/schema.sql @@ -0,0 +1,5 @@ +create table t1( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index c2ab9d48306..170f0b1575b 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -31,6 +31,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -72,9 +75,25 @@ func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalPro return setupCluster(ctx, t, ShardName, []string{cell1}, []int{2}, "semi_sync") } -// TeardownCluster is used to teardown the reparent cluster +// TeardownCluster is used to teardown the reparent cluster. When +// run in a CI environment -- which is considered true when the +// "CI" env variable is set to "true" -- the teardown also removes +// the VTDATAROOT directory that was used for the test/cluster. func TeardownCluster(clusterInstance *cluster.LocalProcessCluster) { + usedRoot := clusterInstance.CurrentVTDATAROOT clusterInstance.Teardown() + // This is always set to "true" on GitHub Actions runners: + // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables + ci, ok := os.LookupEnv("CI") + if !ok || strings.ToLower(ci) != "true" { + // Leave the directory in place to support local debugging. + return + } + // We're running in the CI, so free up disk space for any + // subsequent tests. + if err := os.RemoveAll(usedRoot); err != nil { + log.Errorf("Failed to remove previously used VTDATAROOT (%s): %v", usedRoot, err) + } } func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int, durability string) *cluster.LocalProcessCluster { @@ -193,7 +212,9 @@ func StartNewVTTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster shard := keyspace.Shards[0] // Setup MysqlctlProcess - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + require.NoError(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess // Setup VttabletProcess tablet.VttabletProcess = cluster.VttabletProcessInstance( tablet.HTTPPort, @@ -607,7 +628,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces streamHealthResponse := shrs[0] assert.Equal(t, streamHealthResponse.Target.TabletType, topodatapb.TabletType_PRIMARY) - assert.True(t, streamHealthResponse.TabletExternallyReparentedTimestamp >= baseTime) + assert.True(t, streamHealthResponse.PrimaryTermStartTimestamp >= baseTime) } // WaitForReplicationPosition waits for tablet B to catch up to the replication position of tablet A. @@ -693,3 +714,24 @@ func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.V require.Equal(t, "No", res.Rows[0][11].ToString()) } } + +func WaitForTabletToBeServing(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) { + vTablet, err := clusterInstance.VtctlclientGetTablet(tablet) + require.NoError(t, err) + + tConn, err := tabletconn.GetDialer()(vTablet, false) + require.NoError(t, err) + + newCtx, cancel := context.WithTimeout(context.Background(), timeout) + err = tConn.StreamHealth(newCtx, func(shr *querypb.StreamHealthResponse) error { + if shr.Serving { + cancel() + } + return nil + }) + + // the error should only be because we cancelled the context when the tablet became serving again. + if err != nil && !strings.Contains(err.Error(), "context canceled") { + t.Fatal(err.Error()) + } +} diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index e76c8f701ab..2dc79840018 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -25,6 +25,7 @@ import ( "regexp" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,6 +34,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" ) @@ -83,13 +85,10 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1", + "--schema_change_check_interval", "1s", } clusterInstance.VtTabletExtraArgs = []string{ - "--enable-lag-throttler", - "--throttle_threshold", "1s", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", @@ -139,6 +138,8 @@ func TestSchemaChange(t *testing.T) { shards := clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + files, err := os.ReadDir(testDataPath) require.NoError(t, err) for _, f := range files { diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index d5f5e5b2255..857dc455206 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" ) @@ -83,7 +84,7 @@ func TestMain(m *testing.M) { if err := clusterInstance.StartTopo(); err != nil { return 1, err } - if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName); err != nil { + if err := clusterInstance.VtctlProcess.CreateKeyspace(keyspaceName, sidecar.DefaultName); err != nil { return 1, err } @@ -215,7 +216,11 @@ func initCluster(shardNames []string, totalTabletsRequired int) { tablet.Type = "primary" } // Start Mysqlctl process - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + if err != nil { + return + } + tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() if err != nil { return diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 843c6800622..8a3dd4f9b73 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -251,7 +251,6 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { if err := clusterInstance.StartVtgate(); err != nil { return nil, 1 } - rand.Seed(time.Now().UnixNano()) return clusterInstance, 0 } diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index 0d68c7a2521..aa09a99e0fe 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -71,7 +71,7 @@ func TestTopoCustomRule(t *testing.T) { require.Nil(t, err, "error should be Nil") // Start Vttablet - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.Nil(t, err, "error should be Nil") err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") @@ -119,5 +119,5 @@ func TestTopoCustomRule(t *testing.T) { // Reset the VtTabletExtraArgs clusterInstance.VtTabletExtraArgs = []string{} // Tear down custom processes - killTablets(t, rTablet) + killTablets(rTablet) } diff --git a/go/test/endtoend/tabletmanager/main_test.go b/go/test/endtoend/tabletmanager/main_test.go index 39f4830b33d..1d5992bd839 100644 --- a/go/test/endtoend/tabletmanager/main_test.go +++ b/go/test/endtoend/tabletmanager/main_test.go @@ -95,6 +95,7 @@ func TestMain(m *testing.M) { // List of users authorized to execute vschema ddl operations clusterInstance.VtGateExtraArgs = []string{ "--vschema_ddl_authorized_users=%", + "--enable-views", "--discovery_low_replication_lag", tabletUnhealthyThreshold.String(), } // Set extra tablet args for lock timeout diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index 3db692694b5..f6255b1f71a 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -155,11 +155,11 @@ func TestRepeatedInitShardPrimary(t *testing.T) { checkTabletType(t, replicaTablet.Alias, "REPLICA") } -func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { +func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { defer cluster.PanicHandler(t) - // Test that TER timestamp is set when we restart the PRIMARY vttablet. - // TER = TabletExternallyReparented. - // See StreamHealthResponse.tablet_externally_reparented_timestamp for details. + // Test that PTS timestamp is set when we restart the PRIMARY vttablet. + // PTS = PrimaryTermStart. + // See StreamHealthResponse.primary_term_start_timestamp for details. // Make replica as primary err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) @@ -168,7 +168,7 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) - // Capture the current TER. + // Capture the current PTS. shrs, err := clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1) require.NoError(t, err) @@ -178,9 +178,9 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { got := fmt.Sprintf("%d", actualType) want := fmt.Sprintf("%d", tabletType) assert.Equal(t, want, got) - assert.NotNil(t, streamHealthRes1.GetTabletExternallyReparentedTimestamp()) - assert.True(t, streamHealthRes1.GetTabletExternallyReparentedTimestamp() > 0, - "TER on PRIMARY must be set after InitShardPrimary") + assert.NotNil(t, streamHealthRes1.GetPrimaryTermStartTimestamp()) + assert.True(t, streamHealthRes1.GetPrimaryTermStartTimestamp() > 0, + "PTS on PRIMARY must be set after InitShardPrimary") // Restart the PRIMARY vttablet and test again @@ -189,10 +189,10 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { require.NoError(t, err) // Start Vttablet - err = clusterInstance.StartVttablet(&replicaTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(&replicaTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) - // Make sure that the TER did not change + // Make sure that the PTS did not change shrs, err = clusterInstance.StreamTabletHealth(context.Background(), &replicaTablet, 1) require.NoError(t, err) @@ -204,12 +204,12 @@ func TestPrimaryRestartSetsTERTimestamp(t *testing.T) { want = fmt.Sprintf("%d", tabletType) assert.Equal(t, want, got) - assert.NotNil(t, streamHealthRes2.GetTabletExternallyReparentedTimestamp()) - assert.True(t, streamHealthRes2.GetTabletExternallyReparentedTimestamp() == streamHealthRes1.GetTabletExternallyReparentedTimestamp(), + assert.NotNil(t, streamHealthRes2.GetPrimaryTermStartTimestamp()) + assert.True(t, streamHealthRes2.GetPrimaryTermStartTimestamp() == streamHealthRes1.GetPrimaryTermStartTimestamp(), fmt.Sprintf("When the PRIMARY vttablet was restarted, "+ - "the TER timestamp must be set by reading the old value from the tablet record. Old: %d, New: %d", - streamHealthRes1.GetTabletExternallyReparentedTimestamp(), - streamHealthRes2.GetTabletExternallyReparentedTimestamp())) + "the PTS timestamp must be set by reading the old value from the tablet record. Old: %d, New: %d", + streamHealthRes1.GetPrimaryTermStartTimestamp(), + streamHealthRes2.GetPrimaryTermStartTimestamp())) // Reset primary err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index eca52cbb106..1b43ecf2d90 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -97,7 +97,6 @@ func TestMain(m *testing.M) { "--lock_tables_timeout", "5s", "--watch_replication_stream", "--enable_replication_reporter", - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--gc_check_interval", gcCheckInterval.String(), "--gc_purge_check_interval", gcPurgeCheckInterval.String(), diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 19359406607..83a3ce08cfb 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "net/http" + "slices" "sync" "testing" "time" @@ -32,7 +33,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -73,7 +73,7 @@ func TestTabletReshuffle(t *testing.T) { // SupportsBackup=False prevents vttablet from trying to restore // Start vttablet process - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) sql := "select value from t1" @@ -87,13 +87,15 @@ func TestTabletReshuffle(t *testing.T) { err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", rTablet.Alias) assert.Error(t, err, "cannot perform backup without my.cnf") - killTablets(t, rTablet) + killTablets(rTablet) } func TestHealthCheck(t *testing.T) { // Add one replica that starts not initialized defer cluster.PanicHandler(t) ctx := context.Background() + clusterInstance.DisableVTOrcRecoveries(t) + defer clusterInstance.EnableVTOrcRecoveries(t) rTablet := clusterInstance.NewVttabletInstance("replica", 0, "") @@ -103,11 +105,8 @@ func TestHealthCheck(t *testing.T) { defer replicaConn.Close() - // Create database in mysql - utils.Exec(t, replicaConn, fmt.Sprintf("create database vt_%s", keyspaceName)) - // start vttablet process, should be in SERVING state as we already have a primary - err = clusterInstance.StartVttablet(rTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(rTablet, true, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) conn, err := mysql.Connect(ctx, &primaryTabletParams) @@ -195,7 +194,141 @@ func TestHealthCheck(t *testing.T) { } // Manual cleanup of processes - killTablets(t, rTablet) + killTablets(rTablet) +} + +// TestHealthCheckSchemaChangeSignal tests the tables and views, which report their schemas have changed in the output of a StreamHealth. +func TestHealthCheckSchemaChangeSignal(t *testing.T) { + // Add one replica that starts not initialized + defer cluster.PanicHandler(t) + ctx := context.Background() + + vtParams := clusterInstance.GetVTParams(keyspaceName) + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // Make sure the primary is the primary when the test starts. + // This state should be ensured before we actually test anything. + checkTabletType(t, primaryTablet.Alias, "PRIMARY") + + // Run a bunch of DDL queries and verify that the tables/views changed show up in the health stream. + // These tests are for the part where `--queryserver-enable-views` flag is not set. + verifyHealthStreamSchemaChangeSignals(t, conn, &primaryTablet, false) + + // We start a new vttablet, this time with `--queryserver-enable-views` flag specified. + tempTablet := clusterInstance.NewVttabletInstance("replica", 0, "") + // Start Mysql Processes and return connection + _, err = cluster.StartMySQLAndGetConnection(ctx, tempTablet, username, clusterInstance.TmpDirectory) + require.NoError(t, err) + oldArgs := clusterInstance.VtTabletExtraArgs + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-views") + defer func() { + clusterInstance.VtTabletExtraArgs = oldArgs + }() + // start vttablet process, should be in SERVING state as we already have a primary. + err = clusterInstance.StartVttablet(tempTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) + require.NoError(t, err) + + defer func() { + // Restore the primary tablet back to the original. + err = clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, shardName, primaryTablet.Alias) + require.NoError(t, err) + // Manual cleanup of processes + killTablets(tempTablet) + }() + + // Now we reparent the cluster to the new tablet we have. + err = clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, shardName, tempTablet.Alias) + require.NoError(t, err) + + checkTabletType(t, tempTablet.Alias, "PRIMARY") + // Run a bunch of DDL queries and verify that the tables/views changed show up in the health stream. + // These tests are for the part where `--queryserver-enable-views` flag is set. + verifyHealthStreamSchemaChangeSignals(t, conn, tempTablet, true) +} + +func verifyHealthStreamSchemaChangeSignals(t *testing.T, vtgateConn *mysql.Conn, primaryTablet *cluster.Vttablet, viewsEnabled bool) { + var streamErr error + wg := sync.WaitGroup{} + wg.Add(1) + ranOnce := false + finished := false + ch := make(chan *querypb.StreamHealthResponse) + go func() { + defer wg.Done() + streamErr = clusterInstance.StreamTabletHealthUntil(context.Background(), primaryTablet, 30*time.Second, func(shr *querypb.StreamHealthResponse) bool { + ranOnce = true + // If we are finished, then close the channel and end the stream. + if finished { + close(ch) + return true + } + // Put the response in the channel. + ch <- shr + return false + }) + }() + // The test becomes flaky if we run the DDL immediately after starting the above go routine because the client for the Stream + // sometimes isn't registered by the time DDL runs, and it misses the update we get. To prevent this situation, we wait for one Stream packet + // to have returned. Once we know we received a Stream packet, then we know that we are registered for the health stream and can execute the DDL. + for i := 0; i < 30; i++ { + if ranOnce { + break + } + time.Sleep(1 * time.Second) + } + + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE TABLE `area` (`id` int NOT NULL, `country` varchar(30), PRIMARY KEY (`id`))", "area") + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE TABLE `area2` (`id` int NOT NULL, PRIMARY KEY (`id`))", "area2") + verifyViewDDLSchemaChangeSignal(t, vtgateConn, ch, "CREATE VIEW v2 as select * from t1", viewsEnabled) + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "ALTER TABLE `area` ADD COLUMN name varchar(30) NOT NULL", "area") + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "DROP TABLE `area2`", "area2") + verifyViewDDLSchemaChangeSignal(t, vtgateConn, ch, "ALTER VIEW v2 as select id from t1", viewsEnabled) + verifyViewDDLSchemaChangeSignal(t, vtgateConn, ch, "DROP VIEW v2", viewsEnabled) + verifyTableDDLSchemaChangeSignal(t, vtgateConn, ch, "DROP TABLE `area`", "area") + + finished = true + wg.Wait() + require.NoError(t, streamErr) +} + +func verifyTableDDLSchemaChangeSignal(t *testing.T, vtgateConn *mysql.Conn, ch chan *querypb.StreamHealthResponse, query string, table string) { + _, err := vtgateConn.ExecuteFetch(query, 10000, false) + require.NoError(t, err) + + timeout := time.After(15 * time.Second) + for { + select { + case shr := <-ch: + if shr != nil && shr.RealtimeStats != nil && slices.Contains(shr.RealtimeStats.TableSchemaChanged, table) { + return + } + case <-timeout: + t.Errorf("didn't get the correct tables changed in stream response until timeout") + } + } +} + +func verifyViewDDLSchemaChangeSignal(t *testing.T, vtgateConn *mysql.Conn, ch chan *querypb.StreamHealthResponse, query string, viewsEnabled bool) { + _, err := vtgateConn.ExecuteFetch(query, 10000, false) + require.NoError(t, err) + + timeout := time.After(15 * time.Second) + for { + select { + case shr := <-ch: + listToUse := shr.RealtimeStats.TableSchemaChanged + if viewsEnabled { + listToUse = shr.RealtimeStats.ViewSchemaChanged + } + if shr != nil && shr.RealtimeStats != nil && slices.Contains(listToUse, "v2") { + return + } + case <-timeout: + t.Errorf("didn't get the correct views changed in stream response until timeout") + } + } } func checkHealth(t *testing.T, port int, shouldError bool) { @@ -248,8 +381,10 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // - the second tablet will be set to 'drained' and we expect that // - the query service won't be shutdown - //Wait if tablet is not in service state + // Wait if tablet is not in service state defer cluster.PanicHandler(t) + clusterInstance.DisableVTOrcRecoveries(t) + defer clusterInstance.EnableVTOrcRecoveries(t) err := rdonlyTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) @@ -287,7 +422,7 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { checkHealth(t, rdonlyTablet.HTTPPort, false) } -func killTablets(t *testing.T, tablets ...*cluster.Vttablet) { +func killTablets(tablets ...*cluster.Vttablet) { var wg sync.WaitGroup for _, tablet := range tablets { wg.Add(1) @@ -295,6 +430,7 @@ func killTablets(t *testing.T, tablets ...*cluster.Vttablet) { defer wg.Done() _ = tablet.VttabletProcess.TearDown() _ = tablet.MysqlctlProcess.Stop() + _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) }(tablet) } wg.Wait() diff --git a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go index a2e1e8bd987..b3b11405abb 100644 --- a/go/test/endtoend/tabletmanager/tablet_security_policy_test.go +++ b/go/test/endtoend/tabletmanager/tablet_security_policy_test.go @@ -39,7 +39,7 @@ func TestFallbackSecurityPolicy(t *testing.T) { // Requesting an unregistered security_policy should fallback to deny-all. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "bogus"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. @@ -57,7 +57,7 @@ func TestFallbackSecurityPolicy(t *testing.T) { // Reset the VtTabletExtraArgs clusterInstance.VtTabletExtraArgs = []string{} // Tear down custom processes - killTablets(t, mTablet) + killTablets(mTablet) } func assertNotAllowedURLTest(t *testing.T, url string) { @@ -94,7 +94,7 @@ func TestDenyAllSecurityPolicy(t *testing.T) { // Requesting a deny-all security_policy. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "deny-all"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. @@ -112,7 +112,7 @@ func TestDenyAllSecurityPolicy(t *testing.T) { // Reset the VtTabletExtraArgs clusterInstance.VtTabletExtraArgs = []string{} // Tear down custom processes - killTablets(t, mTablet) + killTablets(mTablet) } func TestReadOnlySecurityPolicy(t *testing.T) { @@ -126,7 +126,7 @@ func TestReadOnlySecurityPolicy(t *testing.T) { // Requesting a read-only security_policy. clusterInstance.VtTabletExtraArgs = []string{"--security_policy", "read-only"} - err = clusterInstance.StartVttablet(mTablet, "SERVING", false, cell, keyspaceName, hostname, shardName) + err = clusterInstance.StartVttablet(mTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.NoError(t, err) // It should deny ADMIN role. @@ -144,5 +144,5 @@ func TestReadOnlySecurityPolicy(t *testing.T) { // Reset the VtTabletExtraArgs clusterInstance.VtTabletExtraArgs = []string{} // Tear down custom processes - killTablets(t, mTablet) + killTablets(mTablet) } diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 3c597e97981..4fe5a70d125 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -34,13 +34,16 @@ func TestEnsureDB(t *testing.T) { // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) - err := tablet.MysqlctlProcess.Start() + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + require.NoError(t, err) + + tablet.MysqlctlProcess = *mysqlctlProcess + err = tablet.MysqlctlProcess.Start() require.NoError(t, err) log.Info(fmt.Sprintf("Started vttablet %v", tablet)) // Start vttablet process as replica. It won't be able to serve because there's no db. - err = clusterInstance.StartVttablet(tablet, "NOT_SERVING", false, cell, "dbtest", hostname, "0") + err = clusterInstance.StartVttablet(tablet, false, "NOT_SERVING", false, cell, "dbtest", hostname, "0") require.NoError(t, err) // Make it the primary. @@ -58,7 +61,7 @@ func TestEnsureDB(t *testing.T) { require.NoError(t, err) err = tablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) - killTablets(t, tablet) + killTablets(tablet) } // TestResetReplicationParameters tests that the RPC ResetReplicationParameters works as intended. @@ -67,13 +70,15 @@ func TestResetReplicationParameters(t *testing.T) { // Create new tablet tablet := clusterInstance.NewVttabletInstance("replica", 0, "") - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) - err := tablet.MysqlctlProcess.Start() + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + require.NoError(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess + err = tablet.MysqlctlProcess.Start() require.NoError(t, err) log.Info(fmt.Sprintf("Started vttablet %v", tablet)) // Start vttablet process as replica. It won't be able to serve because there's no db. - err = clusterInstance.StartVttablet(tablet, "NOT_SERVING", false, cell, "dbtest", hostname, "0") + err = clusterInstance.StartVttablet(tablet, false, "NOT_SERVING", false, cell, "dbtest", hostname, "0") require.NoError(t, err) // Set a replication source on the tablet and start replication diff --git a/go/test/endtoend/tabletmanager/throttler/throttler_test.go b/go/test/endtoend/tabletmanager/throttler/throttler_test.go deleted file mode 100644 index 28d0c287c24..00000000000 --- a/go/test/endtoend/tabletmanager/throttler/throttler_test.go +++ /dev/null @@ -1,319 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package throttler - -import ( - "context" - "flag" - "fmt" - "io" - "net/http" - "os" - "testing" - "time" - - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - - "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - primaryTablet *cluster.Vttablet - replicaTablet *cluster.Vttablet - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - sqlSchema = ` - create table t1( - id bigint, - value varchar(16), - primary key(id) - ) Engine=InnoDB; -` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "t1": { - "column_vindexes": [ - { - "column": "id", - "name": "hash" - } - ] - } - } - }` - - httpClient = base.SetupHTTPClient(time.Second) - throttledAppsAPIPath = "throttler/throttled-apps" - checkAPIPath = "throttler/check" - checkSelfAPIPath = "throttler/check-self" -) - -const ( - throttlerThreshold = 1 * time.Second // standard, tight threshold - onDemandHeartbeatDuration = 5 * time.Second - applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitCode := func() int { - clusterInstance = cluster.NewCluster(cell, hostname) - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Set extra tablet args for lock timeout - clusterInstance.VtTabletExtraArgs = []string{ - "--lock_tables_timeout", "5s", - "--watch_replication_stream", - "--enable_replication_reporter", - "--enable-lag-throttler", - "--throttle_threshold", throttlerThreshold.String(), - "--heartbeat_enable", - "--heartbeat_interval", "250ms", - "--heartbeat_on_demand_duration", onDemandHeartbeatDuration.String(), - "--disable_active_reparents", - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - SchemaSQL: sqlSchema, - VSchema: vSchema, - } - - if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { - return 1 - } - - // Collect table paths and ports - tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - for _, tablet := range tablets { - if tablet.Type == "primary" { - primaryTablet = tablet - } else if tablet.Type != "rdonly" { - replicaTablet = tablet - } - } - - return m.Run() - }() - os.Exit(exitCode) -} - -func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody string, err error) { - resp, err = httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, throttledAppsAPIPath)) - if err != nil { - return resp, respBody, err - } - b, err := io.ReadAll(resp.Body) - if err != nil { - return resp, respBody, err - } - respBody = string(b) - return resp, respBody, err -} - -func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) { - return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats)) -} - -func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) -} - -func warmUpHeartbeat(t *testing.T) (respStatus int) { - // because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now. - // Let's warm it up. - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - time.Sleep(time.Second) - return resp.StatusCode -} - -// waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check -func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode int) { - _ = warmUpHeartbeat(t) - ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration+applyConfigWait) - defer cancel() - - for { - resp, err := throttleCheck(tablet, true) - require.NoError(t, err) - - if wantCode == resp.StatusCode { - // Wait for any cached check values to be cleared and the new - // status value to be in effect everywhere before returning. - resp.Body.Close() - return - } - select { - case <-ctx.Done(): - b, err := io.ReadAll(resp.Body) - require.NoError(t, err) - resp.Body.Close() - - assert.Equal(t, wantCode, resp.StatusCode, "body: %v", string(b)) - return - default: - resp.Body.Close() - time.Sleep(time.Second) - } - } -} - -func TestThrottlerAfterMetricsCollected(t *testing.T) { - defer cluster.PanicHandler(t) - - // We run with on-demand heartbeats. Immediately as the tablet manager opens, it sends a one-time - // request for heartbeats, which means the throttler is able to collect initial "good" data. - // After a few seconds, the heartbeat lease terminates. We wait for that. - // {"StatusCode":429,"Value":4.864921,"Threshold":1,"Message":"Threshold exceeded"} - t.Run("expect push back once initial heartbeat lease terminates", func(t *testing.T) { - time.Sleep(onDemandHeartbeatDuration) - waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) - }) - t.Run("requesting heartbeats", func(t *testing.T) { - respStatus := warmUpHeartbeat(t) - assert.NotEqual(t, http.StatusOK, respStatus) - }) - t.Run("expect OK once heartbeats lease renewed", func(t *testing.T) { - time.Sleep(1 * time.Second) - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("expect OK once heartbeats lease renewed, still", func(t *testing.T) { - time.Sleep(1 * time.Second) - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("validate throttled-apps", func(t *testing.T) { - resp, body, err := throttledApps(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - assert.Contains(t, body, "always-throttled-app") - }) - t.Run("validate check-self", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("validate check-self, again", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestLag(t *testing.T) { - defer cluster.PanicHandler(t) - // Stop VTOrc because we want to stop replication to increase lag. - // We don't want VTOrc to fix this. - clusterInstance.DisableVTOrcRecoveries(t) - defer clusterInstance.EnableVTOrcRecoveries(t) - - t.Run("stopping replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) - assert.NoError(t, err) - }) - t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { - time.Sleep(2 * throttlerThreshold) - - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - }) - t.Run("primary self-check should still be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("replica self-check should show error", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - }) - t.Run("starting replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) - assert.NoError(t, err) - }) - t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) { - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) - t.Run("primary self-check should be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("replica self-check should be fine", func(t *testing.T) { - resp, err := throttleCheckSelf(replicaTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestNoReplicas(t *testing.T) { - defer cluster.PanicHandler(t) - t.Run("changing replica to RDONLY", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") - assert.NoError(t, err) - - // This makes no REPLICA servers available. We expect something like: - // {"StatusCode":200,"Value":0,"Threshold":1,"Message":""} - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) - t.Run("restoring to REPLICA", func(t *testing.T) { - - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") - assert.NoError(t, err) - - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) -} diff --git a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go deleted file mode 100644 index 04f520defe9..00000000000 --- a/go/test/endtoend/tabletmanager/throttler_custom_config/throttler_test.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -package throttler - -import ( - "context" - "flag" - "fmt" - "net/http" - "os" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - - "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - primaryTablet *cluster.Vttablet - replicaTablet *cluster.Vttablet - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - sqlSchema = ` - create table t1( - id bigint, - value varchar(16), - primary key(id) - ) Engine=InnoDB; -` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "t1": { - "column_vindexes": [ - { - "column": "id", - "name": "hash" - } - ] - } - } - }` - - httpClient = base.SetupHTTPClient(time.Second) - checkAPIPath = "throttler/check" - checkSelfAPIPath = "throttler/check-self" - vtParams mysql.ConnParams -) - -const ( - testThreshold = 5 - applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitCode := func() int { - clusterInstance = cluster.NewCluster(cell, hostname) - defer clusterInstance.Teardown() - - // Start topo server - err := clusterInstance.StartTopo() - if err != nil { - return 1 - } - - // Set extra tablet args for lock timeout - clusterInstance.VtTabletExtraArgs = []string{ - "--lock_tables_timeout", "5s", - "--watch_replication_stream", - "--enable_replication_reporter", - "--enable-lag-throttler", - "--throttle_metrics_query", "show global status like 'threads_running'", - "--throttle_metrics_threshold", fmt.Sprintf("%d", testThreshold), - "--throttle_check_as_check_self", - "--heartbeat_enable", - "--heartbeat_interval", "250ms", - } - - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - SchemaSQL: sqlSchema, - VSchema: vSchema, - } - - if err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false); err != nil { - return 1 - } - - // Collect table paths and ports - tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - for _, tablet := range tablets { - if tablet.Type == "primary" { - primaryTablet = tablet - } else if tablet.Type != "rdonly" { - replicaTablet = tablet - } - } - - vtgateInstance := clusterInstance.NewVtgateInstance() - // Start vtgate - if err := vtgateInstance.Setup(); err != nil { - return 1 - } - // ensure it is torn down during cluster TearDown - clusterInstance.VtgateProcess = *vtgateInstance - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - - return m.Run() - }() - os.Exit(exitCode) -} - -func throttleCheck(tablet *cluster.Vttablet) (*http.Response, error) { - resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkAPIPath)) - return resp, err -} - -func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Head(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) -} - -func TestThrottlerThresholdOK(t *testing.T) { - defer cluster.PanicHandler(t) - - t.Run("immediately", func(t *testing.T) { - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - t.Run("after long wait", func(t *testing.T) { - time.Sleep(applyConfigWait) - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) -} - -func TestThreadsRunning(t *testing.T) { - defer cluster.PanicHandler(t) - - sleepDuration := 10 * time.Second - var wg sync.WaitGroup - for i := 0; i < testThreshold; i++ { - // generate different Sleep() calls, all at minimum sleepDuration - wg.Add(1) - go func(i int) { - defer wg.Done() - vtgateExec(t, fmt.Sprintf("select sleep(%d)", int(sleepDuration.Seconds())+i), "") - }(i) - } - t.Run("exceeds threshold", func(t *testing.T) { - time.Sleep(sleepDuration / 2) - // by this time we will have testThreshold+1 threads_running, and we should hit the threshold - // {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"} - { - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - } - { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) - } - }) - t.Run("wait for queries to terminate", func(t *testing.T) { - wg.Wait() - }) - t.Run("restored below threshold", func(t *testing.T) { - { - resp, err := throttleCheck(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - } - { - resp, err := throttleCheckSelf(primaryTablet) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - } - }) -} - -func vtgateExec(t *testing.T, query string, expectError string) *sqltypes.Result { - t.Helper() - - ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) - defer conn.Close() - - qr, err := conn.ExecuteFetch(query, 1000, true) - if expectError == "" { - require.NoError(t, err) - } else { - require.Error(t, err, "error should not be nil") - assert.Contains(t, err.Error(), expectError, "Unexpected error") - } - return qr -} diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 78daaed63a0..7c0f05bdcc2 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -27,16 +27,28 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" - "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +const ( + customQuery = "show global status like 'threads_running'" + customThreshold = 5 + unreasonablyLowThreshold = 1 * time.Millisecond + extremelyHighThreshold = 1 * time.Hour + onDemandHeartbeatDuration = 5 * time.Second + throttlerEnabledTimeout = 60 * time.Second + useDefaultQuery = "" + testAppName = "test" +) + var ( clusterInstance *cluster.LocalProcessCluster primaryTablet *cluster.Vttablet @@ -77,16 +89,10 @@ var ( throttledAppsAPIPath = "throttler/throttled-apps" checkAPIPath = "throttler/check" checkSelfAPIPath = "throttler/check-self" - customQuery = "show global status like 'threads_running'" - customThreshold = 5 -) - -const ( - throttlerThreshold = 1 * time.Second // standard, tight threshold - unreasonablyLowThreshold = 1 * time.Millisecond - extremelyHighThreshold = 1 * time.Hour - onDemandHeartbeatDuration = 5 * time.Second - applyConfigWait = 15 * time.Second // time after which we're sure the throttler has refreshed config and tablets + getResponseBody = func(resp *http.Response) string { + body, _ := io.ReadAll(resp.Body) + return string(body) + } ) func TestMain(m *testing.M) { @@ -108,9 +114,6 @@ func TestMain(m *testing.M) { "--lock_tables_timeout", "5s", "--watch_replication_stream", "--enable_replication_reporter", - "--throttler-config-via-topo", - "--throttle_threshold", throttlerThreshold.String(), - "--heartbeat_enable", "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", onDemandHeartbeatDuration.String(), "--disable_active_reparents", @@ -169,12 +172,12 @@ func throttledApps(tablet *cluster.Vttablet) (resp *http.Response, respBody stri } func throttleCheck(tablet *cluster.Vttablet, skipRequestHeartbeats bool) (*http.Response, error) { - resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?s=%t", tablet.HTTPPort, checkAPIPath, skipRequestHeartbeats)) + resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=%s&s=%t", tablet.HTTPPort, checkAPIPath, testAppName, skipRequestHeartbeats)) return resp, err } func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { - return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, checkSelfAPIPath)) + return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=%s", tablet.HTTPPort, checkSelfAPIPath, testAppName)) } func warmUpHeartbeat(t *testing.T) (respStatus int) { @@ -191,7 +194,7 @@ func warmUpHeartbeat(t *testing.T) (respStatus int) { // waitForThrottleCheckStatus waits for the tablet to return the provided HTTP code in a throttle check func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode int) { _ = warmUpHeartbeat(t) - ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration+applyConfigWait) + ctx, cancel := context.WithTimeout(context.Background(), onDemandHeartbeatDuration*4) defer cancel() for { @@ -210,7 +213,7 @@ func waitForThrottleCheckStatus(t *testing.T, tablet *cluster.Vttablet, wantCode require.NoError(t, err) resp.Body.Close() - assert.Equal(t, wantCode, resp.StatusCode, "body: %v", string(b)) + assert.Equalf(t, wantCode, resp.StatusCode, "body: %s", string(b)) return default: resp.Body.Close() @@ -243,37 +246,64 @@ func TestInitialThrottler(t *testing.T) { t.Run("validating OK response from disabled throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) - t.Run("enabling throttler with low threshold", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, true, false, unreasonablyLowThreshold.Seconds(), "", false) + t.Run("enabling throttler with very low threshold", func(t *testing.T) { + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, unreasonablyLowThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be enabled everywhere with the new config. + for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: unreasonablyLowThreshold.Seconds()}, throttlerEnabledTimeout) + } }) t.Run("validating pushback response from throttler", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) t.Run("disabling throttler", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, false, true, unreasonablyLowThreshold.Seconds(), "", false) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, true, unreasonablyLowThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be disabled everywhere. + for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, false, nil, throttlerEnabledTimeout) + } }) t.Run("validating OK response from disabled throttler, again", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("enabling throttler, again", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, "", true) + // Enable the throttler again with the default query which also moves us back + // to the default threshold. + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, 0, useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be enabled everywhere again with the default config. + for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout) + } }) t.Run("validating pushback response from throttler, again", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) t.Run("setting high threshold", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, false, false, extremelyHighThreshold.Seconds(), "", true) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, extremelyHighThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be enabled everywhere with new config. + for _, tablet := range []cluster.Vttablet{*primaryTablet, *replicaTablet} { + throttler.WaitForThrottlerStatusEnabled(t, &tablet, true, &throttler.Config{Query: throttler.DefaultQuery, Threshold: extremelyHighThreshold.Seconds()}, throttlerEnabledTimeout) + } }) t.Run("validating OK response from throttler with high threshold", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("setting low threshold", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttlerThreshold.Seconds(), "", true) + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be enabled everywhere with new config. + for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout) + } }) t.Run("validating pushback response from throttler on low threshold", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) @@ -287,16 +317,17 @@ func TestInitialThrottler(t *testing.T) { resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("validating OK response from throttler with low threshold, heartbeats running still", func(t *testing.T) { time.Sleep(1 * time.Second) resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("validating pushback response from throttler on low threshold once heartbeats go stale", func(t *testing.T) { + time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) } @@ -306,7 +337,6 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { // By this time metrics will have been collected. We expect no lag, and something like: // {"StatusCode":200,"Value":0.282278,"Threshold":1,"Message":""} - // t.Run("validating throttler OK", func(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) @@ -314,51 +344,76 @@ func TestThrottlerAfterMetricsCollected(t *testing.T) { resp, body, err := throttledApps(primaryTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) assert.Contains(t, body, "always-throttled-app") }) t.Run("validating primary check self", func(t *testing.T) { resp, err := throttleCheckSelf(primaryTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("validating replica check self", func(t *testing.T) { resp, err := throttleCheckSelf(replicaTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) } func TestLag(t *testing.T) { defer cluster.PanicHandler(t) + // Temporarily disable VTOrc recoveries because we want to + // STOP replication specifically in order to increase the + // lag and we DO NOT want VTOrc to try and fix this. + clusterInstance.DisableVTOrcRecoveries(t) + defer clusterInstance.EnableVTOrcRecoveries(t) t.Run("stopping replication", func(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { - time.Sleep(2 * throttlerThreshold) + time.Sleep(2 * throttler.DefaultThreshold) resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("primary self-check should still be fine", func(t *testing.T) { resp, err := throttleCheckSelf(primaryTablet) require.NoError(t, err) defer resp.Body.Close() // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("replica self-check should show error", func(t *testing.T) { resp, err := throttleCheckSelf(replicaTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) + }) + t.Run("exempting test app", func(t *testing.T) { + appRule := &topodatapb.ThrottledAppRule{ + Name: testAppName, + ExpiresAt: protoutil.TimeToProto(time.Now().Add(time.Hour)), + Exempt: true, + } + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, appRule) + assert.NoError(t, err) + waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) + t.Run("unexempting test app", func(t *testing.T) { + appRule := &topodatapb.ThrottledAppRule{ + Name: testAppName, + ExpiresAt: protoutil.TimeToProto(time.Now()), + } + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, false, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, appRule) + assert.NoError(t, err) + waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) + }) + t.Run("starting replication", func(t *testing.T) { err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) @@ -371,13 +426,13 @@ func TestLag(t *testing.T) { require.NoError(t, err) defer resp.Body.Close() // self (on primary) is unaffected by replication lag - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("replica self-check should be fine", func(t *testing.T) { resp, err := throttleCheckSelf(replicaTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) } @@ -392,7 +447,6 @@ func TestNoReplicas(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("restoring to REPLICA", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") assert.NoError(t, err) @@ -403,91 +457,91 @@ func TestNoReplicas(t *testing.T) { func TestCustomQuery(t *testing.T) { defer cluster.PanicHandler(t) - t.Run("enabling throttler with low threshold", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, true, false, float64(customThreshold), customQuery, false) + t.Run("enabling throttler with custom query and threshold", func(t *testing.T) { + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, customThreshold, customQuery, nil) assert.NoError(t, err) - time.Sleep(applyConfigWait) + + // Wait for the throttler to be enabled everywhere with new custom config. + expectConfig := &throttler.Config{Query: customQuery, Threshold: customThreshold} + for _, ks := range clusterInstance.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, expectConfig, throttlerEnabledTimeout) + } + } + } }) t.Run("validating OK response from throttler with custom query", func(t *testing.T) { + throttler.WaitForValidData(t, primaryTablet, throttlerEnabledTimeout) resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode, "response: %v", string(b)) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) }) t.Run("test threads running", func(t *testing.T) { - sleepDuration := 10 * time.Second + sleepDuration := 20 * time.Second var wg sync.WaitGroup - for i := 0; i < customThreshold; i++ { - // generate different Sleep() calls, all at minimum sleepDuration - wg.Add(1) - go func(i int) { - defer wg.Done() - vtgateExec(t, fmt.Sprintf("select sleep(%d)", int(sleepDuration.Seconds())+i), "") - }(i) - } - t.Run("exceeds threshold", func(t *testing.T) { - time.Sleep(sleepDuration / 2) - // by this time we will have testThreshold+1 threads_running, and we should hit the threshold - // {"StatusCode":429,"Value":2,"Threshold":2,"Message":"Threshold exceeded"} - { - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode, "response: %v", string(b)) + t.Run("generate running queries", func(t *testing.T) { + for i := 0; i < customThreshold+1; i++ { + // Generate different Sleep() calls, all at minimum sleepDuration. + wg.Add(1) + go func(i int) { + defer wg.Done() + // Make sure to generate a different query in each goroutine, so that vtgate does not oversmart us + // and optimizes connections/caching. + query := fmt.Sprintf("select sleep(%d) + %d", int(sleepDuration.Seconds()), i) + vtgateExec(t, query, "") + }(i) } + }) + t.Run("exceeds threshold", func(t *testing.T) { + // Now we should be reporting ~ customThreshold+1 threads_running, and we should + // hit the threshold. For example: + // {"StatusCode":429,"Value":6,"Threshold":5,"Message":"Threshold exceeded"} + waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) { resp, err := throttleCheckSelf(primaryTablet) require.NoError(t, err) defer resp.Body.Close() - - b, err := io.ReadAll(resp.Body) - assert.NoError(t, err) - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode, "response: %v", string(b)) + assert.Equalf(t, http.StatusTooManyRequests, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) } }) t.Run("wait for queries to terminate", func(t *testing.T) { wg.Wait() - time.Sleep(1 * time.Second) // graceful time to let throttler read metrics }) t.Run("restored below threshold", func(t *testing.T) { - { - resp, err := throttleCheck(primaryTablet, false) - require.NoError(t, err) - defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) - } + waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) { resp, err := throttleCheckSelf(primaryTablet) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) } }) }) } func TestRestoreDefaultQuery(t *testing.T) { - // validte going back from custom-query to default-query (replication lag) still works defer cluster.PanicHandler(t) - t.Run("enabling throttler with standard threshold", func(t *testing.T) { - _, err := onlineddl.UpdateThrottlerTopoConfig(clusterInstance, true, false, throttlerThreshold.Seconds(), "", false) + // Validate going back from custom-query to default-query (replication lag) still works. + t.Run("enabling throttler with default query and threshold", func(t *testing.T) { + _, err := throttler.UpdateThrottlerTopoConfig(clusterInstance, true, false, throttler.DefaultThreshold.Seconds(), useDefaultQuery, nil) assert.NoError(t, err) + + // Wait for the throttler to be up and running everywhere again with the default config. + for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + throttler.WaitForThrottlerStatusEnabled(t, tablet, true, throttler.DefaultConfig, throttlerEnabledTimeout) + } }) - t.Run("validating OK response from throttler with low threshold, heartbeats running", func(t *testing.T) { - waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) - }) - t.Run("validating pushback response from throttler on low threshold once heartbeats go stale", func(t *testing.T) { - time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops + t.Run("validating OK response from throttler with default threshold, heartbeats running", func(t *testing.T) { resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equal(t, http.StatusTooManyRequests, resp.StatusCode) + assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) + }) + t.Run("validating pushback response from throttler on default threshold once heartbeats go stale", func(t *testing.T) { + time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops + waitForThrottleCheckStatus(t, primaryTablet, http.StatusTooManyRequests) }) } diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go new file mode 100644 index 00000000000..40cfdb53118 --- /dev/null +++ b/go/test/endtoend/throttler/util.go @@ -0,0 +1,410 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package throttler + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +type Config struct { + Query string + Threshold float64 +} + +const ( + DefaultQuery = "select unix_timestamp(now(6))-max(ts/1000000000) as replication_lag from _vt.heartbeat" + DefaultThreshold = 5 * time.Second + ConfigTimeout = 60 * time.Second +) + +var DefaultConfig = &Config{ + Query: DefaultQuery, + Threshold: DefaultThreshold.Seconds(), +} + +// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig. +// This retries the command until it succeeds or times out as the +// SrvKeyspace record may not yet exist for a newly created +// Keyspace that is still initializing before it becomes serving. +func UpdateThrottlerTopoConfigRaw(vtctldProcess *cluster.VtctldClientProcess, keyspaceName string, enable bool, disable bool, threshold float64, metricsQuery string, appRule *topodatapb.ThrottledAppRule) (result string, err error) { + args := []string{} + args = append(args, "UpdateThrottlerConfig") + if enable { + args = append(args, "--enable") + } + if disable { + args = append(args, "--disable") + } + if threshold > 0 { + args = append(args, "--threshold", fmt.Sprintf("%f", threshold)) + } + args = append(args, "--custom-query", metricsQuery) + if metricsQuery != "" { + args = append(args, "--check-as-check-self") + } else { + args = append(args, "--check-as-check-shard") + } + if appRule != nil { + args = append(args, "--throttle-app", appRule.Name) + args = append(args, "--throttle-app-duration", time.Until(protoutil.TimeFromProto(appRule.ExpiresAt).UTC()).String()) + args = append(args, "--throttle-app-ratio", fmt.Sprintf("%f", appRule.Ratio)) + if appRule.Exempt { + args = append(args, "--throttle-app-exempt") + } + } + args = append(args, keyspaceName) + + ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + result, err = vtctldProcess.ExecuteCommandWithOutput(args...) + if err == nil { + return result, nil + } + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out waiting for UpdateThrottlerConfig to succeed after %v; last seen value: %+v, error: %v", ConfigTimeout, result, err) + case <-ticker.C: + } + } +} + +// UpdateThrottlerTopoConfig runs vtctlclient UpdateThrottlerConfig. +// This retries the command until it succeeds or times out as the +// SrvKeyspace record may not yet exist for a newly created +// Keyspace that is still initializing before it becomes serving. +func UpdateThrottlerTopoConfig(clusterInstance *cluster.LocalProcessCluster, enable bool, disable bool, threshold float64, metricsQuery string, appRule *topodatapb.ThrottledAppRule) (string, error) { + rec := concurrency.AllErrorRecorder{} + var ( + err error + res strings.Builder + ) + for _, ks := range clusterInstance.Keyspaces { + ires, err := UpdateThrottlerTopoConfigRaw(&clusterInstance.VtctldClientProcess, ks.Name, enable, disable, threshold, metricsQuery, appRule) + if err != nil { + rec.RecordError(err) + } + res.WriteString(ires) + } + if rec.HasErrors() { + err = rec.Error() + } + return res.String(), err +} + +// WaitForSrvKeyspace waits until the given srvkeyspace entry is found in the given cell +func WaitForSrvKeyspace(clusterInstance *cluster.LocalProcessCluster, cell, keyspace string) error { + args := []string{"GetSrvKeyspaceNames", cell} + + ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) + if err != nil { + return err + } + if strings.Contains(result, `"`+keyspace+`"`) { + return nil + } + select { + case <-ctx.Done(): + return fmt.Errorf("timed out waiting for GetSrvKeyspaceNames to contain '%v'", keyspace) + case <-ticker.C: + } + } +} + +// throttleAppRaw runs vtctlclient UpdateThrottlerConfig with --throttle-app flags +// This retries the command until it succeeds or times out as the +// SrvKeyspace record may not yet exist for a newly created +// Keyspace that is still initializing before it becomes serving. +func throttleAppRaw(vtctldProcess *cluster.VtctldClientProcess, keyspaceName string, throttlerApp throttlerapp.Name, throttle bool) (result string, err error) { + args := []string{} + args = append(args, "UpdateThrottlerConfig") + if throttle { + args = append(args, "--throttle-app", throttlerApp.String()) + args = append(args, "--throttle-app-duration", "1h") + } else { + args = append(args, "--unthrottle-app", throttlerApp.String()) + } + args = append(args, keyspaceName) + + ctx, cancel := context.WithTimeout(context.Background(), ConfigTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + result, err = vtctldProcess.ExecuteCommandWithOutput(args...) + if err == nil { + return result, nil + } + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out waiting for UpdateThrottlerConfig to succeed after %v; last seen value: %+v, error: %v", ConfigTimeout, result, err) + case <-ticker.C: + } + } +} + +// throttleApp throttles or unthrottles an app +func throttleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name, throttle bool) (string, error) { + rec := concurrency.AllErrorRecorder{} + var ( + err error + res strings.Builder + ) + for _, ks := range clusterInstance.Keyspaces { + ires, err := throttleAppRaw(&clusterInstance.VtctldClientProcess, ks.Name, throttlerApp, throttle) + if err != nil { + rec.RecordError(err) + } + res.WriteString(ires) + } + if rec.HasErrors() { + err = rec.Error() + } + return res.String(), err +} + +// ThrottleApp throttles given app name for the next hour +func ThrottleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + return throttleApp(clusterInstance, throttlerApp, true) +} + +// ThrottleApp unthrottles given app name +func UnthrottleApp(clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + return throttleApp(clusterInstance, throttlerApp, false) +} + +func WaitUntilTabletsConfirmThrottledApp(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name, expectThrottled bool) { + for _, ks := range clusterInstance.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Vttablets { + WaitForThrottledApp(t, tablet, throttlerApp, expectThrottled, ConfigTimeout) + } + } + } +} + +// ThrottleAppAndWaitUntilTabletsConfirm +func ThrottleAppAndWaitUntilTabletsConfirm(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + res, err := throttleApp(clusterInstance, throttlerApp, true) + if err != nil { + return res, err + } + WaitUntilTabletsConfirmThrottledApp(t, clusterInstance, throttlerApp, true) + return res, nil +} + +// UnthrottleAppAndWaitUntilTabletsConfirm +func UnthrottleAppAndWaitUntilTabletsConfirm(t *testing.T, clusterInstance *cluster.LocalProcessCluster, throttlerApp throttlerapp.Name) (string, error) { + res, err := throttleApp(clusterInstance, throttlerApp, false) + if err != nil { + return res, err + } + WaitUntilTabletsConfirmThrottledApp(t, clusterInstance, throttlerApp, false) + return res, nil +} + +// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as +// enabled/disabled and have the provided config (if any) until the specified timeout. +func WaitForThrottlerStatusEnabled(t *testing.T, tablet *cluster.Vttablet, enabled bool, config *Config, timeout time.Duration) { + enabledJSONPath := "IsEnabled" + queryJSONPath := "Query" + thresholdJSONPath := "Threshold" + throttlerURL := fmt.Sprintf("http://localhost:%d/throttler/status", tablet.HTTPPort) + tabletURL := fmt.Sprintf("http://localhost:%d/debug/status_details", tablet.HTTPPort) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + throttlerBody := getHTTPBody(throttlerURL) + isEnabled := gjson.Get(throttlerBody, enabledJSONPath).Bool() + if isEnabled == enabled { + if config == nil { + return + } + query := gjson.Get(throttlerBody, queryJSONPath).String() + threshold := gjson.Get(throttlerBody, thresholdJSONPath).Float() + if query == config.Query && threshold == config.Threshold { + return + } + } + // If the tablet is Not Serving due to e.g. being involved in a + // Reshard where its QueryService is explicitly disabled, then + // we should not fail the test as the throttler will not be Open. + tabletBody := getHTTPBody(tabletURL) + class := strings.ToLower(gjson.Get(tabletBody, "0.Class").String()) + value := strings.ToLower(gjson.Get(tabletBody, "0.Value").String()) + if class == "unhappy" && strings.Contains(value, "not serving") { + log.Infof("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias) + return + } + select { + case <-ctx.Done(): + t.Errorf("timed out waiting for the %s tablet's throttler status enabled to be %t with the correct config after %v; last seen value: %s", + tablet.Alias, enabled, timeout, throttlerBody) + return + case <-ticker.C: + } + } +} + +// WaitForThrottlerStatusEnabled waits for a tablet to report its throttler status as +// enabled/disabled and have the provided config (if any) until the specified timeout. +func WaitForThrottledApp(t *testing.T, tablet *cluster.Vttablet, throttlerApp throttlerapp.Name, expectThrottled bool, timeout time.Duration) { + throttledAppsURL := fmt.Sprintf("http://localhost:%d/throttler/throttled-apps", tablet.HTTPPort) + tabletURL := fmt.Sprintf("http://localhost:%d/debug/status_details", tablet.HTTPPort) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + throttledAppsBody := getHTTPBody(throttledAppsURL) + var throttledApps []base.AppThrottle + err := json.Unmarshal([]byte(throttledAppsBody), &throttledApps) + assert.NoError(t, err) + require.NotEmpty(t, throttledApps) // "always-throttled-app" is always there. + appFoundThrottled := false + for _, throttledApp := range throttledApps { + if throttledApp.AppName == throttlerApp.String() && throttledApp.ExpireAt.After(time.Now()) { + appFoundThrottled = true + break + } + } + if appFoundThrottled == expectThrottled { + return + } + // If the tablet is Not Serving due to e.g. being involved in a + // Reshard where its QueryService is explicitly disabled, then + // we should not fail the test as the throttler will not be Open. + tabletBody := getHTTPBody(tabletURL) + class := strings.ToLower(gjson.Get(tabletBody, "0.Class").String()) + value := strings.ToLower(gjson.Get(tabletBody, "0.Value").String()) + if class == "unhappy" && strings.Contains(value, "not serving") { + log.Infof("tablet %s is Not Serving, so ignoring throttler status as the throttler will not be Opened", tablet.Alias) + return + } + select { + case <-ctx.Done(): + t.Errorf("timed out waiting for the %s tablet's throttled apps with the correct config (expecting %s to be %v) after %v; last seen value: %s", + tablet.Alias, throttlerApp.String(), expectThrottled, timeout, throttledAppsBody) + return + case <-ticker.C: + } + } +} + +// EnableLagThrottlerAndWaitForStatus is a utility function to enable the throttler at the beginning of an endtoend test. +// The throttler is configued to use the standard replication lag metric. The function waits until the throttler is confirmed +// to be running on all tablets. +func EnableLagThrottlerAndWaitForStatus(t *testing.T, clusterInstance *cluster.LocalProcessCluster, lag time.Duration) { + _, err := UpdateThrottlerTopoConfig(clusterInstance, true, false, lag.Seconds(), "", nil) + require.NoError(t, err) + + for _, ks := range clusterInstance.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Vttablets { + WaitForThrottlerStatusEnabled(t, tablet, true, nil, time.Minute) + } + } + } +} + +func getHTTPBody(url string) string { + resp, err := http.Get(url) + if err != nil { + log.Infof("http Get returns %+v", err) + return "" + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + log.Infof("http Get returns status %d", resp.StatusCode) + return "" + } + respByte, _ := io.ReadAll(resp.Body) + body := string(respByte) + return body +} + +// WaitForValidData waits for a tablet's checks to return a non 500 http response +// which indicates that it's not able to provide valid results. This is most +// commonly caused by the throttler still gathering the initial results for +// the given configuration. +func WaitForValidData(t *testing.T, tablet *cluster.Vttablet, timeout time.Duration) { + checkURL := fmt.Sprintf("http://localhost:%d/throttler/check", tablet.HTTPPort) + selfCheckURL := fmt.Sprintf("http://localhost:%d/throttler/check-self", tablet.HTTPPort) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + for { + checkResp, checkErr := http.Get(checkURL) + if checkErr != nil { + defer checkResp.Body.Close() + } + selfCheckResp, selfCheckErr := http.Get(selfCheckURL) + if selfCheckErr != nil { + defer selfCheckResp.Body.Close() + } + if checkErr == nil && selfCheckErr == nil && + checkResp.StatusCode != http.StatusInternalServerError && + selfCheckResp.StatusCode != http.StatusInternalServerError { + return + } + select { + case <-ctx.Done(): + t.Errorf("timed out waiting for %s tablet's throttler to return a valid result after %v; last seen value: %+v", + tablet.Alias, timeout, checkResp) + return + case <-ticker.C: + } + } +} diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 2a074e8428a..7cfea8839b0 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -140,7 +140,11 @@ func TestMain(m *testing.M) { var mysqlProcs []*exec.Cmd for _, tablet := range []*cluster.Vttablet{shard1Primary, shard1Replica, shard1Rdonly, shard2Primary, shard2Replica, shard2Rdonly} { - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + if err != nil { + return 1, err + } + tablet.MysqlctlProcess = *mysqlctlProcess tablet.VttabletProcess = cluster.VttabletProcessInstance(tablet.HTTPPort, tablet.GrpcPort, tablet.TabletUID, diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 02f14a7304d..504ca218047 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -136,7 +136,9 @@ func addCellback(t *testing.T) { // create sql process for vttablets var mysqlProcs []*exec.Cmd for _, tablet := range []*cluster.Vttablet{shard1Replica, shard1Rdonly, shard2Replica, shard2Rdonly} { - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory) + require.NoError(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess tablet.VttabletProcess = cluster.VttabletProcessInstance(tablet.HTTPPort, tablet.GrpcPort, tablet.TabletUID, diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index 849f81240e9..38726d6c3aa 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -89,7 +89,7 @@ func (mcmp *MySQLCompare) AssertMatchesAny(query string, expected ...string) { func (mcmp *MySQLCompare) AssertMatchesAnyNoCompare(query string, expected ...string) { mcmp.t.Helper() - mQr, vQr := mcmp.execNoCompare(query) + mQr, vQr := mcmp.ExecNoCompare(query) got := fmt.Sprintf("%v", mQr.Rows) valid := false for _, e := range expected { @@ -171,7 +171,7 @@ func (mcmp *MySQLCompare) AssertFoundRowsValue(query, workload string, count int // AssertMatchesNoCompare compares the record of mysql and vitess separately and not with each other. func (mcmp *MySQLCompare) AssertMatchesNoCompare(query, mExp string, vExp string) { mcmp.t.Helper() - mQr, vQr := mcmp.execNoCompare(query) + mQr, vQr := mcmp.ExecNoCompare(query) got := fmt.Sprintf("%v", mQr.Rows) diff := cmp.Diff(mExp, got) if diff != "" { @@ -196,11 +196,12 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(mcmp.t, query, vtQr, mysqlQr, false) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) return vtQr } -func (mcmp *MySQLCompare) execNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { +// ExecNoCompare executes the query on vitess and mysql but does not compare the result with each other. +func (mcmp *MySQLCompare) ExecNoCompare(query string) (*sqltypes.Result, *sqltypes.Result) { mcmp.t.Helper() vtQr, err := mcmp.VtConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[Vitess Error] for query: "+query) @@ -222,7 +223,7 @@ func (mcmp *MySQLCompare) ExecWithColumnCompare(query string) *sqltypes.Result { mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(mcmp.t, query, vtQr, mysqlQr, true) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, true) return vtQr } @@ -232,6 +233,8 @@ func (mcmp *MySQLCompare) ExecWithColumnCompare(query string) *sqltypes.Result { // - MySQL and Vitess did not find an error, but their results are matching // // The result set and error produced by Vitess are returned to the caller. +// If the Vitess and MySQL error are both nil, but the results do not match, +// the mismatched results are instead returned as an error, as well as the Vitess result set func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Result, error) { mcmp.t.Helper() vtQr, vtErr := mcmp.VtConn.ExecuteFetch(query, 1000, true) @@ -241,7 +244,7 @@ func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Resu // Since we allow errors, we don't want to compare results if one of the client failed. // Vitess and MySQL should always be agreeing whether the query returns an error or not. if vtErr == nil && mysqlErr == nil { - compareVitessAndMySQLResults(mcmp.t, query, vtQr, mysqlQr, false) + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) } return vtQr, vtErr } @@ -253,3 +256,33 @@ func (mcmp *MySQLCompare) ExecAndIgnore(query string) (*sqltypes.Result, error) _, _ = mcmp.MySQLConn.ExecuteFetch(query, 1000, true) return mcmp.VtConn.ExecuteFetch(query, 1000, true) } + +func (mcmp *MySQLCompare) Run(query string, f func(mcmp *MySQLCompare)) { + mcmp.t.Run(query, func(t *testing.T) { + inner := &MySQLCompare{ + t: t, + MySQLConn: mcmp.MySQLConn, + VtConn: mcmp.VtConn, + } + f(inner) + }) +} + +// ExecAllowError executes the query against both Vitess and MySQL. +// If there is no error, it compares the result +// Return any Vitess execution error without comparing the results. +func (mcmp *MySQLCompare) ExecAllowError(query string) (*sqltypes.Result, error) { + mcmp.t.Helper() + vtQr, vtErr := mcmp.VtConn.ExecuteFetch(query, 1000, true) + if vtErr != nil { + return nil, vtErr + } + mysqlQr, mysqlErr := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) + + // Since we allow errors, we don't want to compare results if one of the client failed. + // Vitess and MySQL should always be agreeing whether the query returns an error or not. + if mysqlErr == nil { + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + } + return vtQr, vtErr +} diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index a73160503cd..6e85ec6bdf7 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -18,6 +18,7 @@ package utils import ( "context" + "errors" "fmt" "os" "path" @@ -40,7 +41,8 @@ import ( // The mysql.ConnParams to connect to the new database is returned, along with a function to // teardown the database. func NewMySQL(cluster *cluster.LocalProcessCluster, dbName string, schemaSQL ...string) (mysql.ConnParams, func(), error) { - return NewMySQLWithDetails(cluster.GetAndReservePort(), cluster.Hostname, dbName, schemaSQL...) + mysqlParam, _, closer, error := NewMySQLWithMysqld(cluster.GetAndReservePort(), cluster.Hostname, dbName, schemaSQL...) + return mysqlParam, closer, error } // CreateMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL @@ -60,24 +62,24 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( return mysqlctl.NewMysqld(&cfg), mycnf, nil } -func NewMySQLWithDetails(port int, hostname, dbName string, schemaSQL ...string) (mysql.ConnParams, func(), error) { +func NewMySQLWithMysqld(port int, hostname, dbName string, schemaSQL ...string) (mysql.ConnParams, *mysqlctl.Mysqld, func(), error) { mysqlDir, err := createMySQLDir() if err != nil { - return mysql.ConnParams{}, nil, err + return mysql.ConnParams{}, nil, nil, err } initMySQLFile, err := createInitSQLFile(mysqlDir, dbName) if err != nil { - return mysql.ConnParams{}, nil, err + return mysql.ConnParams{}, nil, nil, err } mysqlPort := port mysqld, mycnf, err := CreateMysqldAndMycnf(0, "", mysqlPort) if err != nil { - return mysql.ConnParams{}, nil, err + return mysql.ConnParams{}, nil, nil, err } err = initMysqld(mysqld, mycnf, initMySQLFile) if err != nil { - return mysql.ConnParams{}, nil, err + return mysql.ConnParams{}, nil, nil, err } params := mysql.ConnParams{ @@ -89,10 +91,10 @@ func NewMySQLWithDetails(port int, hostname, dbName string, schemaSQL ...string) for _, sql := range schemaSQL { err = prepareMySQLWithSchema(params, sql) if err != nil { - return mysql.ConnParams{}, nil, err + return mysql.ConnParams{}, nil, nil, err } } - return params, func() { + return params, mysqld, func() { ctx := context.Background() _ = mysqld.Teardown(ctx, mycnf, true) }, nil @@ -114,7 +116,10 @@ func createInitSQLFile(mysqlDir, ksName string) (string, error) { return "", err } defer f.Close() - + _, err = f.WriteString("SET GLOBAL super_read_only='OFF';") + if err != nil { + return "", err + } _, err = f.WriteString(fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s;", ksName)) if err != nil { return "", err @@ -150,24 +155,27 @@ func prepareMySQLWithSchema(params mysql.ConnParams, sql string) error { return nil } -func compareVitessAndMySQLResults(t *testing.T, query string, vtQr, mysqlQr *sqltypes.Result, compareColumns bool) { +func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn, vtQr, mysqlQr *sqltypes.Result, compareColumns bool) error { if vtQr == nil && mysqlQr == nil { - return + return nil } if vtQr == nil { t.Error("Vitess result is 'nil' while MySQL's is not.") - return + return errors.New("Vitess result is 'nil' while MySQL's is not.\n") } if mysqlQr == nil { t.Error("MySQL result is 'nil' while Vitess' is not.") - return + return errors.New("MySQL result is 'nil' while Vitess' is not.\n") } + + var errStr string if compareColumns { vtColCount := len(vtQr.Fields) myColCount := len(mysqlQr.Fields) if vtColCount > 0 && myColCount > 0 { if vtColCount != myColCount { t.Errorf("column count does not match: %d vs %d", vtColCount, myColCount) + errStr += fmt.Sprintf("column count does not match: %d vs %d\n", vtColCount, myColCount) } var vtCols []string @@ -176,26 +184,27 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtQr, mysqlQr *sql vtCols = append(vtCols, vtField.Name) myCols = append(myCols, mysqlQr.Fields[i].Name) } - assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") + if !assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") { + errStr += "column names do not match - the expected values are what mysql produced\n" + errStr += fmt.Sprintf("Not equal: \nexpected: %v\nactual: %v\n", myCols, vtCols) + } } } stmt, err := sqlparser.Parse(query) if err != nil { t.Error(err) - return + return err } orderBy := false if selStmt, isSelStmt := stmt.(sqlparser.SelectStatement); isSelStmt { orderBy = selStmt.GetOrderBy() != nil } - if orderBy && sqltypes.ResultsEqual([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { - return - } else if sqltypes.ResultsEqualUnordered([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { - return + if (orderBy && sqltypes.ResultsEqual([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr})) || sqltypes.ResultsEqualUnordered([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { + return nil } - errStr := "Query (" + query + ") results mismatched.\nVitess Results:\n" + errStr += "Query (" + query + ") results mismatched.\nVitess Results:\n" for _, row := range vtQr.Rows { errStr += fmt.Sprintf("%s\n", row) } @@ -203,7 +212,12 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtQr, mysqlQr *sql for _, row := range mysqlQr.Rows { errStr += fmt.Sprintf("%s\n", row) } + if vtConn != nil { + qr := Exec(t, vtConn, fmt.Sprintf("vexplain plan %s", query)) + errStr += fmt.Sprintf("query plan: \n%s\n", qr.Rows[0][0].ToString()) + } t.Error(errStr) + return errors.New(errStr) } func compareVitessAndMySQLErrors(t *testing.T, vtErr, mysqlErr error) { diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index d2816cb1227..de9db23dab1 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -22,15 +22,18 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/mysqlctl" ) var ( clusterInstance *cluster.LocalProcessCluster mysqlParams mysql.ConnParams + mysqld *mysqlctl.Mysqld keyspaceName = "ks" cell = "test" schemaSQL = `create table t1( @@ -48,13 +51,14 @@ func TestMain(m *testing.M) { clusterInstance = cluster.NewCluster(cell, "localhost") defer clusterInstance.Teardown() - conn, closer, err := NewMySQL(clusterInstance, keyspaceName, schemaSQL) + var closer func() + var err error + mysqlParams, mysqld, closer, err = NewMySQLWithMysqld(clusterInstance.GetAndReservePort(), clusterInstance.Hostname, keyspaceName, schemaSQL) if err != nil { fmt.Println(err) return 1 } defer closer() - mysqlParams = conn return m.Run() }() os.Exit(exitCode) @@ -64,9 +68,58 @@ func TestCreateMySQL(t *testing.T) { ctx := context.Background() conn, err := mysql.Connect(ctx, &mysqlParams) require.NoError(t, err) - AssertMatches(t, conn, "show databases;", `[[VARCHAR("information_schema")] [VARCHAR("ks")] [VARCHAR("mysql")] [VARCHAR("performance_schema")] [VARCHAR("sys")]]`) AssertMatches(t, conn, "show tables;", `[[VARCHAR("t1")]]`) Exec(t, conn, "insert into t1(id1, id2, id3) values (1, 1, 1), (2, 2, 2), (3, 3, 3)") AssertMatches(t, conn, "select * from t1;", `[[INT64(1) INT64(1) INT64(1)] [INT64(2) INT64(2) INT64(2)] [INT64(3) INT64(3) INT64(3)]]`) } + +func TestSetSuperReadOnlyMySQL(t *testing.T) { + require.NotNil(t, mysqld) + isSuperReadOnly, _ := mysqld.IsSuperReadOnly() + assert.False(t, isSuperReadOnly, "super_read_only should be set to False") + retFunc1, err := mysqld.SetSuperReadOnly(true) + assert.NotNil(t, retFunc1, "SetSuperReadOnly is supposed to return a defer function") + assert.NoError(t, err, "SetSuperReadOnly should not have failed") + + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.True(t, isSuperReadOnly, "super_read_only should be set to True") + // if value is already true then retFunc2 will be nil + retFunc2, err := mysqld.SetSuperReadOnly(true) + assert.Nil(t, retFunc2, "SetSuperReadOnly is supposed to return a nil function") + assert.NoError(t, err, "SetSuperReadOnly should not have failed") + + retFunc1() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.False(t, isSuperReadOnly, "super_read_only should be set to False") + isReadOnly, _ := mysqld.IsReadOnly() + assert.True(t, isReadOnly, "read_only should be set to True") + + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.False(t, isSuperReadOnly, "super_read_only should be set to False") + retFunc1, err = mysqld.SetSuperReadOnly(false) + assert.Nil(t, retFunc1, "SetSuperReadOnly is supposed to return a nil function") + assert.NoError(t, err, "SetSuperReadOnly should not have failed") + + _, err = mysqld.SetSuperReadOnly(true) + assert.NoError(t, err) + + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.True(t, isSuperReadOnly, "super_read_only should be set to True") + retFunc1, err = mysqld.SetSuperReadOnly(false) + assert.NotNil(t, retFunc1, "SetSuperReadOnly is supposed to return a defer function") + assert.NoError(t, err, "SetSuperReadOnly should not have failed") + + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.False(t, isSuperReadOnly, "super_read_only should be set to False") + // if value is already false then retFunc2 will be nil + retFunc2, err = mysqld.SetSuperReadOnly(false) + assert.Nil(t, retFunc2, "SetSuperReadOnly is supposed to return a nil function") + assert.NoError(t, err, "SetSuperReadOnly should not have failed") + + retFunc1() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + assert.True(t, isSuperReadOnly, "super_read_only should be set to True") + isReadOnly, _ = mysqld.IsReadOnly() + assert.True(t, isReadOnly, "read_only should be set to True") +} diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index 1aca889025b..c0137b27066 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -22,14 +22,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/endtoend/cluster" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" ) // AssertContains ensures the given query result contains the expected results. @@ -164,7 +163,7 @@ func ExecCompareMySQL(t *testing.T, vtConn, mysqlConn *mysql.Conn, query string) mysqlQr, err := mysqlConn.ExecuteFetch(query, 1000, true) require.NoError(t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(t, query, vtQr, mysqlQr, false) + compareVitessAndMySQLResults(t, query, vtConn, vtQr, mysqlQr, false) return vtQr } @@ -175,6 +174,12 @@ func ExecAllowError(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Res return conn.ExecuteFetch(query, 1000, true) } +// ExecWithRowCount is similar to ExecAllowError with max row count provided. +func ExecWithRowCount(t testing.TB, conn *mysql.Conn, query string, rowCount int) (*sqltypes.Result, error) { + t.Helper() + return conn.ExecuteFetch(query, rowCount, true) +} + // SkipIfBinaryIsBelowVersion skips the given test if the binary's major version is below majorVersion. func SkipIfBinaryIsBelowVersion(t *testing.T, majorVersion int, binary string) { version, err := cluster.GetMajorVersion(binary) @@ -186,6 +191,16 @@ func SkipIfBinaryIsBelowVersion(t *testing.T, majorVersion int, binary string) { } } +// BinaryIsAtVersion returns true if this binary is at or above the required version +func BinaryIsAtVersion(majorVersion int, binary string) bool { + version, err := cluster.GetMajorVersion(binary) + if err != nil { + return false + } + return version >= majorVersion + +} + // AssertMatchesWithTimeout asserts that the given query produces the expected result. // The query will be executed every 'r' duration until it matches the expected result. // If after 'd' duration we still did not find the expected result, the test will be marked as failed. @@ -211,15 +226,15 @@ func AssertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected st } // WaitForAuthoritative waits for a table to become authoritative -func WaitForAuthoritative(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl string) error { - timeout := time.After(10 * time.Second) +func WaitForAuthoritative(t *testing.T, ks, tbl string, readVSchema func() (*interface{}, error)) error { + timeout := time.After(60 * time.Second) for { select { case <-timeout: return fmt.Errorf("schema tracking didn't mark table t2 as authoritative until timeout") default: time.Sleep(1 * time.Second) - res, err := vtgateProcess.ReadVSchema() + res, err := readVSchema() require.NoError(t, err, res) t2Map := getTableT2Map(res, ks, tbl) authoritative, fieldPresent := t2Map["column_list_authoritative"] @@ -237,7 +252,7 @@ func WaitForAuthoritative(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, // WaitForColumn waits for a table's column to be present func WaitForColumn(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { - timeout := time.After(10 * time.Second) + timeout := time.After(60 * time.Second) for { select { case <-timeout: @@ -288,3 +303,40 @@ func convertToMap(input interface{}) map[string]interface{} { output := input.(map[string]interface{}) return output } + +func GetInitDBSQL(initDBSQL string, updatedPasswords string, oldAlterTableMode string) (string, error) { + // Since password update is DML we need to insert it before we disable + // super_read_only therefore doing the split below. + splitString := strings.Split(initDBSQL, "# {{custom_sql}}") + if len(splitString) != 2 { + return "", fmt.Errorf("missing `# {{custom_sql}}` in init_db.sql file") + } + var builder strings.Builder + builder.WriteString(splitString[0]) + builder.WriteString(updatedPasswords) + + // https://github.com/vitessio/vitess/issues/8315 + if oldAlterTableMode != "" { + builder.WriteString(oldAlterTableMode) + } + builder.WriteString(splitString[1]) + + return builder.String(), nil +} + +// TimeoutAction performs the action within the given timeout limit. +// If the timeout is reached, the test is failed with errMsg. +// If action returns false, the timeout loop continues, if it returns true, the function succeeds. +func TimeoutAction(t *testing.T, timeout time.Duration, errMsg string, action func() bool) { + deadline := time.After(timeout) + ok := false + for !ok { + select { + case <-deadline: + t.Error(errMsg) + return + case <-time.After(1 * time.Second): + ok = action() + } + } +} diff --git a/go/test/endtoend/vault/dbcreds_secret.json b/go/test/endtoend/vault/dbcreds_secret.json index 96fff38bdcd..ee0a4af534b 100644 --- a/go/test/endtoend/vault/dbcreds_secret.json +++ b/go/test/endtoend/vault/dbcreds_secret.json @@ -1,17 +1,23 @@ { + "root": [ + "RootPass" + ], "vt_app": [ - "password" + "VtAppPass" ], "vt_dba": [ - "password" + "VtDbaPass" ], "vt_repl": [ - "password" + "VtReplPass" ], "vt_appdebug": [ - "password" + "VtDebugPass" ], "vt_filtered": [ - "password" + "VtFilteredPass" + ], + "vt_allprivs": [ + "VtAllprivsPass" ] } diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 25ed88f4335..9bc5b9cb977 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" ) @@ -51,9 +52,7 @@ var ( hostname = "localhost" keyspaceName = "ks" shardName = "0" - dbName = "vt_ks" - mysqlUsers = []string{"vt_dba", "vt_app", "vt_appdebug", "vt_repl", "vt_filtered"} - mysqlPassword = "password" + mysqlPassword = "VtDbaPass" vtgateUser = "vtgate_user" vtgatePassword = "password123" commonTabletArg = []string{ @@ -251,10 +250,21 @@ func initializeClusterLate(t *testing.T) { out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") require.NoError(t, err, out) + initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql")) + sql := string(initDb) + // The original init_db.sql does not have any passwords. Here we update the init file with passwords + sql, err = utils.GetInitDBSQL(sql, cluster.GetPasswordUpdateSQL(clusterInstance), "") + require.NoError(t, err, "expected to load init_db file") + newInitDBFile := path.Join(clusterInstance.TmpDirectory, "init_db_with_passwords.sql") + err = os.WriteFile(newInitDBFile, []byte(sql), 0660) + require.NoError(t, err, "expected to load init_db file") + // Start MySQL var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { + tablet.MysqlctlProcess.InitDBFile = newInitDBFile + tablet.VttabletProcess.DbPassword = mysqlPassword proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err) mysqlCtlProcessList = append(mysqlCtlProcessList, proc) @@ -268,21 +278,6 @@ func initializeClusterLate(t *testing.T) { } for _, tablet := range []*cluster.Vttablet{primary, replica} { - for _, user := range mysqlUsers { - query := fmt.Sprintf("ALTER USER '%s'@'%s' IDENTIFIED BY '%s';", user, hostname, mysqlPassword) - _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false) - // Reset after the first ALTER, or we lock ourselves out. - tablet.VttabletProcess.DbPassword = mysqlPassword - if err != nil { - query = fmt.Sprintf("ALTER USER '%s'@'%%' IDENTIFIED BY '%s';", user, mysqlPassword) - _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false) - require.NoError(t, err) - } - } - query := fmt.Sprintf("create database %s;", dbName) - _, err = tablet.VttabletProcess.QueryTablet(query, keyspace.Name, false) - require.NoError(t, err) - err = tablet.VttabletProcess.Setup() require.NoError(t, err) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index bf7108a8291..87f7f9e8675 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -88,7 +88,8 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1"} + "--schema_change_check_interval", "1s", + } if err := clusterInstance.StartTopo(); err != nil { return 1, err @@ -106,9 +107,6 @@ func TestMain(m *testing.M) { return 1, err } - // TODO: remove this once we upgrade to v12 - // setting the planner version to 0, so the vtgate binary's default is used - clusterInstance.VtGatePlannerVersion = 0 vtgateInstance := clusterInstance.NewVtgateInstance() // Start vtgate if err := vtgateInstance.Setup(); err != nil { diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index 43991454b6e..1fd0aabc822 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -30,28 +30,39 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/throttler" "vitess.io/vitess/go/vt/log" ) var ( debugMode = false // set to true for local debugging: this uses the local env vtdataroot and does not teardown clusters - originalVtdataroot string - vtdataroot string + originalVtdataroot string + vtdataroot string + // If you query the sidecar database directly against mysqld then you will need to specify the + // sidecarDBIdentifier + sidecarDBName = "__vt_e2e-test" // test a non-default sidecar database name that also needs to be escaped + sidecarDBIdentifier = sqlparser.String(sqlparser.NewIdentifierCS(sidecarDBName)) mainClusterConfig *ClusterConfig externalClusterConfig *ClusterConfig - extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms"} - extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"} + extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms", "--enable_buffer", "--buffer_window", loadTestBufferingWindowDurationStr, + "--buffer_size", "100000", "--buffer_min_time_between_failovers", "0s", "--buffer_max_failover_duration", loadTestBufferingWindowDurationStr} + extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"} // This variable can be used within specific tests to alter vttablet behavior extraVTTabletArgs = []string{} parallelInsertWorkers = "--vreplication-parallel-insert-workers=4" + + throttlerConfig = throttler.Config{Threshold: 15} ) // ClusterConfig defines the parameters like ports, tmpDir, tablet types which uniquely define a vitess cluster @@ -78,6 +89,7 @@ type ClusterConfig struct { // VitessCluster represents all components within the test cluster type VitessCluster struct { + t *testing.T ClusterConfig *ClusterConfig Name string Cells map[string]*Cell @@ -98,10 +110,11 @@ type Cell struct { // Keyspace represents a Vitess keyspace contained by a cell within the test cluster type Keyspace struct { - Name string - Shards map[string]*Shard - VSchema string - Schema string + Name string + Shards map[string]*Shard + VSchema string + Schema string + SidecarDBName string } // Shard represents a Vitess shard in a keyspace @@ -172,37 +185,10 @@ func setVtMySQLRoot(mysqlRoot string) error { return nil } -// setDBFlavor sets the MYSQL_FLAVOR OS env var. -// You should call this after calling setVtMySQLRoot() to ensure that the -// correct flavor is used by mysqlctl based on the current mysqld version -// in the path. If you don't do this then mysqlctl will use the incorrect -// config/mycnf/.cnf file and mysqld may fail to start. -func setDBFlavor() error { - versionStr, err := mysqlctl.GetVersionString() - if err != nil { - return err - } - f, v, err := mysqlctl.ParseVersionString(versionStr) - if err != nil { - return err - } - flavor := fmt.Sprintf("%s%d%d", f, v.Major, v.Minor) - err = os.Setenv("MYSQL_FLAVOR", string(flavor)) - if err != nil { - return err - } - fmt.Printf("MYSQL_FLAVOR is %s\n", string(flavor)) - return nil -} - func unsetVtMySQLRoot() { _ = os.Unsetenv("VT_MYSQL_ROOT") } -func unsetDBFlavor() { - _ = os.Unsetenv("MYSQL_FLAVOR") -} - // getDBTypeVersionInUse checks the major DB version of the mysqld binary // that mysqlctl would currently use, e.g. 5.7 or 8.0 (in semantic versioning // this would be major.minor but in MySQL it's effectively the major version). @@ -322,7 +308,6 @@ func init() { if os.Getenv("VREPLICATION_E2E_DEBUG") != "" { debugMode = true } - rand.Seed(time.Now().UTC().UnixNano()) originalVtdataroot = os.Getenv("VTDATAROOT") var mainVtDataRoot string if debugMode { @@ -336,8 +321,11 @@ func init() { // NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { - vc := &VitessCluster{Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} + vc := &VitessCluster{t: t, Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} require.NotNil(t, vc) + + vc.CleanupDataroot(t, true) + topo := cluster.TopoProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.topoPort+1, vc.ClusterConfig.hostname, "etcd2", "global") require.NotNil(t, topo) @@ -373,17 +361,43 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf return vc } +// CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard, +// we can run out of disk space due to all the leftover artifacts from previous tests. +func (vc *VitessCluster) CleanupDataroot(t *testing.T, recreate bool) { + // This is always set to "true" on GitHub Actions runners: + // https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables + ci, ok := os.LookupEnv("CI") + if !ok || strings.ToLower(ci) != "true" { + // Leave the directory in place to support local debugging. + return + } + dir := vc.ClusterConfig.vtdataroot + log.Infof("Deleting vtdataroot %s", dir) + err := os.RemoveAll(dir) + require.NoError(t, err) + if recreate { + err = os.Mkdir(dir, 0700) + require.NoError(t, err) + } +} + // AddKeyspace creates a keyspace with specified shard keys and number of replica/read-only tablets. // You can pass optional key value pairs (opts) if you want conditional behavior. func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, shards string, vschema string, schema string, numReplicas int, numRdonly int, tabletIDBase int, opts map[string]string) (*Keyspace, error) { keyspace := &Keyspace{ - Name: ksName, - Shards: make(map[string]*Shard), + Name: ksName, + Shards: make(map[string]*Shard), + SidecarDBName: sidecarDBName, } - if err := vc.Vtctl.CreateKeyspace(keyspace.Name); err != nil { + if err := vc.VtctldClient.CreateKeyspace(keyspace.Name, keyspace.SidecarDBName); err != nil { t.Fatalf(err.Error()) } + + log.Infof("Applying throttler config for keyspace %s", keyspace.Name) + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, keyspace.Name, true, false, throttlerConfig.Threshold, throttlerConfig.Query, nil) + require.NoError(t, err, res) + cellsToWatch := "" for i, cell := range cells { if i > 0 { @@ -412,7 +426,9 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, vc.StartVtgate(t, cell, cellsToWatch) } } - _ = vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", ksName) + + err = vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", ksName) + require.NoError(t, err) return keyspace, nil } @@ -422,8 +438,7 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace, options := []string{ "--queryserver-config-schema-reload-time", "5", - "--enable-lag-throttler", - "--heartbeat_enable", + "--heartbeat_on_demand_duration", "5s", "--heartbeat_interval", "250ms", } // FIXME: for multi-cell initial schema doesn't seem to load without "--queryserver-config-schema-reload-time" options = append(options, extraVTTabletArgs...) @@ -450,7 +465,9 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace, require.NotNil(t, vttablet) vttablet.SupportsBackup = false - tablet.DbServer = cluster.MysqlCtlProcessInstance(tabletID, vc.ClusterConfig.tabletMysqlPortBase+tabletID, vc.ClusterConfig.tmpDir) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstance(tabletID, vc.ClusterConfig.tabletMysqlPortBase+tabletID, vc.ClusterConfig.tmpDir) + require.NoError(t, err) + tablet.DbServer = mysqlctlProcess require.NotNil(t, tablet.DbServer) tablet.DbServer.InitMysql = true proc, err := tablet.DbServer.StartProcess() @@ -555,9 +572,26 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa require.NotEqual(t, 0, primaryTabletUID, "Should have created a primary tablet") log.Infof("InitializeShard and make %d primary", primaryTabletUID) require.NoError(t, vc.VtctlClient.InitializeShard(keyspace.Name, shardName, cells[0].Name, primaryTabletUID)) + log.Infof("Finished creating shard %s", shard.Name) } + err := vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", keyspace.Name) + require.NoError(t, err) + + log.Infof("Waiting for throttler config to be applied on all shards") + for _, shard := range keyspace.Shards { + for _, tablet := range shard.Tablets { + clusterTablet := &cluster.Vttablet{ + Alias: tablet.Name, + HTTPPort: tablet.Vttablet.Port, + } + log.Infof("+ Waiting for throttler config to be applied on %s, type=%v", tablet.Name, tablet.Vttablet.TabletType) + throttler.WaitForThrottlerStatusEnabled(t, clusterTablet, true, nil, time.Minute) + } + } + log.Infof("Throttler config applied on all shards") + return nil } @@ -605,7 +639,7 @@ func (vc *VitessCluster) AddCell(t testing.TB, name string) (*Cell, error) { return cell, nil } -func (vc *VitessCluster) teardown(t testing.TB) { +func (vc *VitessCluster) teardown() { for _, cell := range vc.Cells { for _, vtgate := range cell.Vtgates { if err := vtgate.TearDown(); err != nil { @@ -668,13 +702,13 @@ func (vc *VitessCluster) teardown(t testing.TB) { } // TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown(t testing.TB) { +func (vc *VitessCluster) TearDown(t *testing.T) { if debugMode { return } done := make(chan bool) go func() { - vc.teardown(t) + vc.teardown() done <- true }() select { @@ -685,6 +719,7 @@ func (vc *VitessCluster) TearDown(t testing.TB) { } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) + vc.CleanupDataroot(t, false) } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { @@ -722,6 +757,10 @@ func (vc *VitessCluster) getPrimaryTablet(t *testing.T, ksName, shardName string return nil } +func (vc *VitessCluster) GetVTGateConn(t *testing.T) *mysql.Conn { + return getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) +} + func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing.T), func(t *testing.T)) { conn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) _, err := conn.ExecuteFetch("begin", 1000, false) @@ -772,12 +811,7 @@ func setupDBTypeVersion(t *testing.T, value string) func() { if err := downloadDBTypeVersion(dbType, majorVersion, path); err != nil { t.Fatalf("Could not download %s, error: %v", majorVersion, err) } - // Set the MYSQL_FLAVOR OS ENV var for mysqlctl to use the correct config file - if err := setDBFlavor(); err != nil { - t.Fatalf("Could not set MYSQL_FLAVOR: %v", err) - } return func() { - unsetDBFlavor() unsetVtMySQLRoot() } } diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 9cb05fa1044..8ac0edf2885 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -31,13 +31,18 @@ package vreplication // // The internal table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431 should be ignored by vreplication // The db_order_test table is used to ensure vreplication and vdiff work well with complex non-integer PKs, even across DB versions. +// The db_order_test table needs to use a collation that exists in all versions for cross version tests as we use the collation for the PK +// based merge sort in VDiff. The table is using a non-default collation for any version with utf8mb4 as 5.7 does NOT show the default +// collation in the SHOW CREATE TABLE output which means in the cross version tests the source and target will be using a different collation. +// The vdiff_order table is used to test MySQL sort->VDiff merge sort ordering and ensure it aligns across Reshards. It must not use the +// default collation as it has to work across versions and the 8.0 default does not exist in 5.7. var ( // All standard user tables should have a primary key and at least one secondary key. initialProductSchema = ` create table product(pid int, description varbinary(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid), key(date1,date2)) CHARSET=utf8mb4; create table customer(cid int, name varchar(128) collate utf8mb4_bin, meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'), ts timestamp not null default current_timestamp, bits bit(2) default b'11', date1 datetime not null default '0000-00-00 00:00:00', - date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), primary key(cid,typ), key(name)) CHARSET=utf8mb4; + date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), blb blob, primary key(cid,typ), key(name)) CHARSET=utf8mb4; create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table merchant(mname varchar(128), category varchar(128), primary key(mname), key(category)) CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid), key(pid), key(cid)) CHARSET=utf8; @@ -47,10 +52,15 @@ create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id) create table ` + "`Lead`(`Lead-id`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead-id`" + `), key (date1)); create table ` + "`Lead-1`(`Lead`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead`" + `), key (date2)); create table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431(id int, val varbinary(128), primary key(id), key(val)); -create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at), key (dstuff)) CHARSET=utf8mb4; +create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at), key (dstuff)) CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +create table vdiff_order (order_id varchar(50) collate utf8mb4_unicode_ci not null, primary key (order_id), key (order_id)) charset=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table datze (id int, dt1 datetime not null default current_timestamp, dt2 datetime not null, ts1 timestamp default current_timestamp, primary key (id), key (dt1)); +create table json_tbl (id int, j1 json, j2 json, j3 json not null, primary key(id)); +create table geom_tbl (id int, g geometry, p point, ls linestring, pg polygon, mp multipoint, mls multilinestring, mpg multipolygon, gc geometrycollection, primary key(id)); +create table ` + "`blüb_tbl`" + ` (id int, val1 varchar(20), ` + "`blöb1`" + ` blob, val2 varbinary(20), ` + "`bl@b2`" + ` longblob, txt1 text, blb3 tinyblob, txt2 longtext, blb4 mediumblob, primary key(id)); +create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); +create table loadtest (id int, name varchar(256), primary key(id), key(name)); ` - // These should always be ignored in vreplication internalSchema = ` create table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho(id int, val varbinary(128), primary key(id)); @@ -64,27 +74,59 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 initialProductVSchema = ` { "tables": { - "product": {}, - "merchant": {}, - "orders": {}, - "customer": {}, - "customer_seq": { - "type": "sequence" - }, - "customer2": {}, - "customer_seq2": { - "type": "sequence" - }, - "order_seq": { - "type": "sequence" - }, - "Lead": {}, - "Lead-1": {}, - "db_order_test": {}, - "datze": {} + "product": {}, + "merchant": {}, + "orders": {}, + "loadtest": {}, + "customer": {}, + "customer_seq": { + "type": "sequence" + }, + "customer2": {}, + "customer_seq2": { + "type": "sequence" + }, + "order_seq": { + "type": "sequence" + }, + "Lead": {}, + "Lead-1": {}, + "db_order_test": {}, + "vdiff_order": {}, + "datze": {}, + "reftable": { + "type": "reference" + } + } +} +` + + createLookupVindexVSchema = ` +{ + "sharded": true, + "vindexes": { + "customer_name_keyspace_id": { + "type": "consistent_lookup", + "params": { + "table": "product.customer_name_keyspace_id", + "from": "name,cid", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "customer" + } + }, + "tables": { + "customer": { + "column_vindexes": [{ + "columns": ["name", "cid"], + "name": "customer_name_keyspace_id" + }] + } } } ` + customerSchema = "" customerVSchema = ` { @@ -96,11 +138,22 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 "xxhash": { "type": "xxhash" }, + "unicode_loose_md5": { + "type": "unicode_loose_md5" + }, "bmd5": { "type": "binary_md5" } }, "tables": { + "loadtest": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, "customer": { "column_vindexes": [ { @@ -149,6 +202,38 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 } ] }, + "vdiff_order": { + "column_vindexes": [ + { + "column": "order_id", + "name": "unicode_loose_md5" + } + ] + }, + "geom_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "json_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "blüb_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, "datze": { "column_vindexes": [ { @@ -156,6 +241,9 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 "name": "reverse_bits" } ] + }, + "reftable": { + "type": "reference" } } } @@ -224,12 +312,23 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 "reverse_bits": { "type": "reverse_bits" }, + "unicode_loose_md5": { + "type": "unicode_loose_md5" + }, "xxhash": { "type": "xxhash" } }, "tables": { - "customer": { + "loadtest": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "customer": { "column_vindexes": [ { "column": "cid", @@ -261,12 +360,47 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 } ] }, - "cproduct": { + "vdiff_order": { + "column_vindexes": [ + { + "column": "order_id", + "name": "unicode_loose_md5" + } + ] + }, + "geom_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "json_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "blüb_tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "cproduct": { "type": "reference" }, "vproduct": { "type": "reference" - } + }, + "reftable": { + "type": "reference" + } } } ` @@ -393,7 +527,6 @@ create table datze (id int, dt1 datetime not null default current_timestamp, dt2 create table review(rid int, pid int, review varbinary(128), primary key(rid)); create table rating(gid int, pid int, rating int, primary key(gid)); ` - initialExternalVSchema = ` { "tables": { @@ -402,4 +535,40 @@ create table rating(gid int, pid int, rating int, primary key(gid)); } } ` + + jsonValues = []string{ + `"abc"`, + `123`, + `{"foo": 456}`, + `{"bar": "foo"}`, + `[1, "abc", 932409834098324908234092834092834, 234234234234234234234234.2342342342349]`, + `{"a":2947293482093480923840923840923, "cba":334234234234234234234234234.234234239090}`, + `[1, "abc", -1, 0.2342342342349, {"a":"b","c":"d","ab":"abc","bc":["x","y"]}]`, + `{"a":2947293482093480923840923840923, "cba":{"a":2947293482093480923840923840923, "cba":334234234234234234234234234.234234239090}}`, + `{"asdf":{"foo":123}}`, + `{"a":"b","c":"d","ab":"abc","bc":["x","y"]}`, + `["here",["I","am"],"!!!"]`, + `{"scopes":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}`, + `"scalar string"`, + `"scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"`, + `"first line\\r\\nsecond line\\rline with escapes\\\\ \\r\\n"`, + `true`, + `false`, + `""`, + `-1`, + `1`, + `32767`, + `32768`, + `-32768`, + `-32769`, + `2.147483647e+09`, + `1.8446744073709552e+19`, + `-9.223372036854776e+18`, + `{}`, + `[]`, + `"2015-01-15 23:24:25.000000"`, + `"23:24:25.000000"`, + `"23:24:25.120000"`, + `"2015-01-15"`, + } ) diff --git a/go/test/endtoend/vreplication/fk_config_test.go b/go/test/endtoend/vreplication/fk_config_test.go new file mode 100644 index 00000000000..5b02aeb62bb --- /dev/null +++ b/go/test/endtoend/vreplication/fk_config_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +var ( + initialFKSchema = ` +create table parent(id int, name varchar(128), primary key(id)) engine=innodb; +create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; +` + initialFKData = ` +insert into parent values(1, 'parent1'), (2, 'parent2'); +insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');` + + initialFKSourceVSchema = ` +{ + "tables": { + "parent": {}, + "child": {} + } +} +` + + initialFKTargetVSchema = ` +{ + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + } +} +` +) diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go new file mode 100644 index 00000000000..31886864f11 --- /dev/null +++ b/go/test/endtoend/vreplication/fk_test.go @@ -0,0 +1,274 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +// TestFKWorkflow runs a MoveTables workflow with atomic copy for a db with foreign key constraints. +// It inserts initial data, then simulates load. We insert both child rows with foreign keys and those without, +// i.e. with foreign_key_checks=0. +func TestFKWorkflow(t *testing.T) { + // ensure that there are multiple copy phase cycles per table + extraVTTabletArgs = []string{"--vstream_packet_size=256"} + defer func() { extraVTTabletArgs = nil }() + + cellName := "zone" + cells := []string{cellName} + vc = NewVitessCluster(t, "TestFKWorkflow", cells, mainClusterConfig) + + require.NotNil(t, vc) + allCellNames = cellName + defaultCellName := cellName + defaultCell = vc.Cells[defaultCellName] + sourceKeyspace := "fksource" + shardName := "0" + + defer vc.TearDown(t) + + cell := vc.Cells[cellName] + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) + + vtgate = cell.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) + require.NoError(t, err) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + var ls *fkLoadSimulator + + insertInitialFKData(t) + withLoad := true // Set it to false to skip load simulation, while debugging + var cancel context.CancelFunc + var ctx context.Context + if withLoad { + ctx, cancel = context.WithCancel(context.Background()) + ls = newFKLoadSimulator(t, ctx) + defer func() { + select { + case <-ctx.Done(): + default: + cancel() + } + }() + go ls.simulateLoad() + } + targetKeyspace := "fktarget" + targetTabletId := 200 + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, initialFKSchema, 0, 0, targetTabletId, sourceKsOpts) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) + + workflowName := "fk" + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + + mt := newMoveTables(vc, &moveTables{ + workflowName: workflowName, + targetKeyspace: targetKeyspace, + sourceKeyspace: sourceKeyspace, + atomicCopy: true, + }, moveTablesFlavorRandom) + mt.Create() + + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + targetKs := vc.Cells[cellName].Keyspaces[targetKeyspace] + targetTab := targetKs.Shards["0"].Tablets[fmt.Sprintf("%s-%d", cellName, targetTabletId)].Vttablet + require.NotNil(t, targetTab) + catchup(t, targetTab, workflowName, "MoveTables") + vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + ls.waitForAdditionalRows(200) + vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) + if withLoad { + cancel() + <-ch + } + mt.SwitchReadsAndWrites() + + log.Infof("Switch traffic done") + + if withLoad { + ctx, cancel = context.WithCancel(context.Background()) + ls = newFKLoadSimulator(t, ctx) + defer cancel() + go ls.simulateLoad() + } + ls.waitForAdditionalRows(200) + if withLoad { + cancel() + <-ch + } +} + +func insertInitialFKData(t *testing.T) { + t.Run("insertInitialFKData", func(t *testing.T) { + sourceKeyspace := "fksource" + shard := "0" + db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) + log.Infof("Inserting initial FK data") + execMultipleQueries(t, vtgateConn, db, initialFKData) + log.Infof("Done inserting initial FK data") + waitForRowCount(t, vtgateConn, db, "parent", 2) + waitForRowCount(t, vtgateConn, db, "child", 3) + }) +} + +var currentParentId int64 +var currentChildId int64 + +func init() { + currentParentId = 100 + currentChildId = 100 +} + +var ch = make(chan bool) + +type fkLoadSimulator struct { + t *testing.T + ctx context.Context +} + +func newFKLoadSimulator(t *testing.T, ctx context.Context) *fkLoadSimulator { + return &fkLoadSimulator{ + t: t, + ctx: ctx, + } +} + +func (ls *fkLoadSimulator) simulateLoad() { + t := ls.t + var err error + for i := 0; ; i++ { + if i%1000 == 0 { + log.Infof("Load simulation iteration %d", i) + } + select { + case <-ls.ctx.Done(): + ch <- true + return + default: + } + // Decide operation based on random number + op := rand.Intn(100) + switch { + case op < 50: // 50% chance to insert + ls.insert() + case op < 80: // 30% chance to update + ls.update() + default: // 20% chance to delete + ls.delete() + } + require.NoError(t, err) + time.Sleep(1 * time.Millisecond) + } +} + +func (ls *fkLoadSimulator) getNumRowsParent(vtgateConn *mysql.Conn) int { + t := ls.t + qr := execVtgateQuery(t, vtgateConn, "fksource", "SELECT COUNT(*) FROM parent") + require.NotNil(t, qr) + numRows, err := strconv.Atoi(qr.Rows[0][0].ToString()) + require.NoError(t, err) + return numRows +} + +func (ls *fkLoadSimulator) waitForAdditionalRows(count int) { + t := ls.t + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + numRowsStart := ls.getNumRowsParent(vtgateConn) + shortCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + for { + switch { + case shortCtx.Err() != nil: + t.Fatalf("Timed out waiting for additional rows") + default: + numRows := ls.getNumRowsParent(vtgateConn) + if numRows >= numRowsStart+count { + return + } + time.Sleep(10 * time.Millisecond) + } + } +} + +func (ls *fkLoadSimulator) insert() { + t := ls.t + currentParentId++ + insertQuery := fmt.Sprintf("INSERT INTO parent (id) VALUES (%d)", currentParentId) + qr := ls.exec(insertQuery) + require.NotNil(t, qr) + // insert one or more children, some with valid foreign keys, some without. + for i := 0; i < rand.Intn(4)+1; i++ { + currentChildId++ + if i == 3 { + insertQuery = fmt.Sprintf("INSERT /*+ SET_VAR(foreign_key_checks=0) */ INTO child (id, parent_id) VALUES (%d, %d)", currentChildId, currentParentId+1000000) + ls.exec(insertQuery) + } else { + insertQuery = fmt.Sprintf("INSERT INTO child (id, parent_id) VALUES (%d, %d)", currentChildId, currentParentId) + ls.exec(insertQuery) + } + } +} + +func (ls *fkLoadSimulator) getRandomId() int64 { + t := ls.t + selectQuery := "SELECT id FROM parent ORDER BY RAND() LIMIT 1" + qr := ls.exec(selectQuery) + require.NotNil(t, qr) + if len(qr.Rows) == 0 { + return 0 + } + id, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return id +} + +func (ls *fkLoadSimulator) update() { + updateQuery := fmt.Sprintf("UPDATE parent SET name = 'parent%d' WHERE id = %d", rand.Intn(1000)+1, ls.getRandomId()) + ls.exec(updateQuery) +} + +func (ls *fkLoadSimulator) delete() { + deleteQuery := fmt.Sprintf("DELETE FROM parent WHERE id = %d", ls.getRandomId()) + ls.exec(deleteQuery) +} + +func (ls *fkLoadSimulator) exec(query string) *sqltypes.Result { + t := ls.t + qr := execVtgateQuery(t, vtgateConn, "fksource", query) + require.NotNil(t, qr) + return qr +} diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index bc3ace3f064..b9574c24b8f 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -18,40 +18,42 @@ package vreplication import ( "context" + "crypto/rand" + "encoding/hex" + "encoding/json" "fmt" "io" "net/http" "os/exec" "regexp" "sort" - "strconv" "strings" + "sync/atomic" "testing" "time" - "github.com/PuerkitoBio/goquery" "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) const ( defaultTick = 1 * time.Second defaultTimeout = 30 * time.Second workflowStateTimeout = 90 * time.Second - workflowStateCopying = "Copying" // nolint - workflowStateRunning = "Running" // nolint - workflowStateStopped = "Stopped" // nolint - workflowStateError = "Error" // nolint ) func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines string) { @@ -122,12 +124,12 @@ func waitForQueryResult(t *testing.T, conn *mysql.Conn, database string, query s // waitForTabletThrottlingStatus waits for the tablet to return the provided HTTP code for // the provided app name in its self check. -func waitForTabletThrottlingStatus(t *testing.T, tablet *cluster.VttabletProcess, appName string, wantCode int64) { +func waitForTabletThrottlingStatus(t *testing.T, tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name, wantCode int64) { var gotCode int64 timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { - output, err := throttlerCheckSelf(tablet, appName) + output, err := throttlerCheckSelf(tablet, throttlerApp) require.NoError(t, err) gotCode, err = jsonparser.GetInt([]byte(output), "StatusCode") @@ -141,7 +143,7 @@ func waitForTabletThrottlingStatus(t *testing.T, tablet *cluster.VttabletProcess select { case <-timer.C: require.FailNow(t, fmt.Sprintf("tablet %q did not return expected status of %d for application %q before the timeout of %s; last seen status: %d", - tablet.Name, wantCode, appName, defaultTimeout, gotCode)) + tablet.Name, wantCode, throttlerApp, defaultTimeout, gotCode)) default: time.Sleep(defaultTick) } @@ -228,12 +230,28 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da } } -func validateThatQueryExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) bool { - count := getQueryCount(tablet.QueryzURL, matchQuery) +func executeOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) (int, []byte, int, []byte) { + queryStatsURL := fmt.Sprintf("http://%s:%d/debug/query_stats", tablet.TabletHostname, tablet.Port) + + count0, body0 := getQueryCount(t, queryStatsURL, matchQuery) + qr := execVtgateQuery(t, conn, ksName, query) require.NotNil(t, qr) - newCount := getQueryCount(tablet.QueryzURL, matchQuery) - return newCount == count+1 + + count1, body1 := getQueryCount(t, queryStatsURL, matchQuery) + return count0, body0, count1, body1 +} + +func assertQueryExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) { + t.Helper() + count0, body0, count1, body1 := executeOnTablet(t, conn, tablet, ksName, query, matchQuery) + assert.Equalf(t, count0+1, count1, "query %q did not execute in target;\ntried to match %q\nbefore:\n%s\n\nafter:\n%s\n\n", query, matchQuery, body0, body1) +} + +func assertQueryDoesNotExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet *cluster.VttabletProcess, ksName string, query string, matchQuery string) { + t.Helper() + count0, body0, count1, body1 := executeOnTablet(t, conn, tablet, ksName, query, matchQuery) + assert.Equalf(t, count0, count1, "query %q executed in target;\ntried to match %q\nbefore:\n%s\n\nafter:\n%s\n\n", query, matchQuery, body0, body1) } // waitForWorkflowState waits for all of the given workflow's @@ -348,77 +366,36 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro } } -func getHTTPBody(url string) string { +func getHTTPBody(t *testing.T, url string) []byte { resp, err := http.Get(url) - if err != nil { - log.Infof("http Get returns %+v", err) - return "" - } - if resp.StatusCode != 200 { - log.Infof("http Get returns status %d", resp.StatusCode) - return "" - } - respByte, _ := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, 200, resp.StatusCode) + defer resp.Body.Close() - body := string(respByte) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) return body } -func getQueryCount(url string, query string) int { - var headings, row []string - var rows [][]string - body := getHTTPBody(url) - doc, err := goquery.NewDocumentFromReader(strings.NewReader(body)) - if err != nil { - log.Infof("goquery parsing returns %+v\n", err) - return 0 +func getQueryCount(t *testing.T, url string, query string) (int, []byte) { + body := getHTTPBody(t, url) + + var queryStats []struct { + Query string + QueryCount uint64 } - var queryIndex, countIndex, count int - queryIndex = -1 - countIndex = -1 + err := json.Unmarshal(body, &queryStats) + require.NoError(t, err) - doc.Find("table").Each(func(index int, tablehtml *goquery.Selection) { - tablehtml.Find("tr").Each(func(indextr int, rowhtml *goquery.Selection) { - rowhtml.Find("th").Each(func(indexth int, tableheading *goquery.Selection) { - heading := tableheading.Text() - if heading == "Query" { - queryIndex = indexth - } - if heading == "Count" { - countIndex = indexth - } - headings = append(headings, heading) - }) - rowhtml.Find("td").Each(func(indexth int, tablecell *goquery.Selection) { - row = append(row, tablecell.Text()) - }) - rows = append(rows, row) - row = nil - }) - }) - if queryIndex == -1 || countIndex == -1 { - log.Infof("Queryz response is incorrect") - return 0 - } - for _, row := range rows { - if len(row) != len(headings) { - continue - } - filterChars := []string{"_", "`"} - //Queries seem to include non-printable characters at times and hence equality fails unless these are removed - re := regexp.MustCompile("[[:^ascii:]]") - foundQuery := re.ReplaceAllLiteralString(row[queryIndex], "") - cleanQuery := re.ReplaceAllLiteralString(query, "") - for _, filterChar := range filterChars { - foundQuery = strings.ReplaceAll(foundQuery, filterChar, "") - cleanQuery = strings.ReplaceAll(cleanQuery, filterChar, "") - } - if foundQuery == cleanQuery || strings.Contains(foundQuery, cleanQuery) { - count, _ = strconv.Atoi(row[countIndex]) + for _, q := range queryStats { + if strings.Contains(q.Query, query) { + return int(q.QueryCount), body } } - return count + + return 0, body } func validateDryRunResults(t *testing.T, output string, want []string) { @@ -474,7 +451,17 @@ func checkIfTableExists(t *testing.T, vc *VitessCluster, tabletAlias string, tab return found, nil } -func checkIfDenyListExists(t *testing.T, vc *VitessCluster, ksShard string, table string) (bool, error) { +func validateTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table string, mustExist bool) { + found, err := isTableInDenyList(t, vc, ksShard, table) + require.NoError(t, err) + if mustExist { + require.True(t, found, "Table %s not found in deny list", table) + } else { + require.False(t, found, "Table %s found in deny list", table) + } +} + +func isTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table string) (bool, error) { var output string var err error found := false @@ -491,7 +478,7 @@ func checkIfDenyListExists(t *testing.T, vc *VitessCluster, ksShard string, tabl } func expectNumberOfStreams(t *testing.T, vtgateConn *mysql.Conn, name string, workflow string, database string, want int) { - query := fmt.Sprintf("select count(*) from _vt.vreplication where workflow='%s';", workflow) + query := sqlparser.BuildParsedQuery("select count(*) from %s.vreplication where workflow='%s'", sidecarDBIdentifier, workflow).Query waitForQueryResult(t, vtgateConn, database, query, fmt.Sprintf(`[[INT64(%d)]]`, want)) } @@ -527,8 +514,8 @@ func getDebugVar(t *testing.T, port int, varPath []string) (string, error) { var err error url := fmt.Sprintf("http://localhost:%d/debug/vars", port) log.Infof("url: %s, varPath: %s", url, strings.Join(varPath, ":")) - body := getHTTPBody(url) - val, _, _, err = jsonparser.Get([]byte(body), varPath...) + body := getHTTPBody(t, url) + val, _, _, err = jsonparser.Get(body, varPath...) require.NoError(t, err) return string(val), nil } @@ -548,7 +535,7 @@ func confirmWorkflowHasCopiedNoData(t *testing.T, targetKS, workflow string) { state := attributeValue.Get("State").String() pos := attributeValue.Get("Pos").String() // If we've actually copied anything then we'll have a position in the stream - if (state == workflowStateRunning || state == workflowStateCopying) && pos != "" { + if (state == binlogdatapb.VReplicationWorkflowState_Running.String() || state == binlogdatapb.VReplicationWorkflowState_Copying.String()) && pos != "" { require.FailNowf(t, "Unexpected data copied in workflow", "The MoveTables workflow %q copied data in less than %s when it should have been waiting. Show output: %s", ksWorkflow, defaultTimeout, output) @@ -608,14 +595,14 @@ func getShardRoutingRules(t *testing.T) string { func verifyCopyStateIsOptimized(t *testing.T, tablet *cluster.VttabletProcess) { // Update information_schem with the latest data - _, err := tablet.QueryTablet("analyze table _vt.copy_state", "", false) + _, err := tablet.QueryTablet(sqlparser.BuildParsedQuery("analyze table %s.copy_state", sidecarDBIdentifier).Query, "", false) require.NoError(t, err) // Verify that there's no delete marked rows and we reset the auto-inc value. // MySQL doesn't always immediately update information_schema so we wait. tmr := time.NewTimer(defaultTimeout) defer tmr.Stop() - query := "select data_free, auto_increment from information_schema.tables where table_schema='_vt' and table_name='copy_state'" + query := sqlparser.BuildParsedQuery("select data_free, auto_increment from information_schema.tables where table_schema='%s' and table_name='copy_state'", sidecarDBName).Query var dataFree, autoIncrement int64 for { res, err := tablet.QueryTablet(query, "", false) @@ -640,3 +627,164 @@ func verifyCopyStateIsOptimized(t *testing.T, tablet *cluster.VttabletProcess) { } } } + +// randHex can be used to generate random strings of +// hex characters to the given length. This can e.g. +// be used to generate and insert test data. +func randHex(n int) (string, error) { + bytes := make([]byte, n) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func getIntVal(t *testing.T, vars map[string]interface{}, key string) int { + i, ok := vars[key].(float64) + require.True(t, ok) + return int(i) +} + +func getPartialMetrics(t *testing.T, key string, tab *cluster.VttabletProcess) (int, int, int, int) { + vars := tab.GetVars() + insertKey := fmt.Sprintf("%s.insert", key) + updateKey := fmt.Sprintf("%s.insert", key) + cacheSizes := vars["VReplicationPartialQueryCacheSize"].(map[string]interface{}) + queryCounts := vars["VReplicationPartialQueryCount"].(map[string]interface{}) + if cacheSizes[insertKey] == nil || cacheSizes[updateKey] == nil || + queryCounts[insertKey] == nil || queryCounts[updateKey] == nil { + return 0, 0, 0, 0 + } + inserts := getIntVal(t, cacheSizes, insertKey) + updates := getIntVal(t, cacheSizes, updateKey) + insertQueries := getIntVal(t, queryCounts, insertKey) + updateQueries := getIntVal(t, queryCounts, updateKey) + return inserts, updates, insertQueries, updateQueries +} + +// check that the connection's binlog row image is set to NOBLOB +func isBinlogRowImageNoBlob(t *testing.T, tablet *cluster.VttabletProcess) bool { + rs, err := tablet.QueryTablet("select @@global.binlog_row_image", "", false) + require.NoError(t, err) + require.Equal(t, 1, len(rs.Rows)) + mode := strings.ToLower(rs.Rows[0][0].ToString()) + return mode == "noblob" +} + +const ( + loadTestBufferingWindowDurationStr = "30s" + loadTestPostBufferingInsertWindow = 60 * time.Second // should be greater than loadTestBufferingWindowDurationStr + loadTestWaitForCancel = 30 * time.Second + loadTestWaitBetweenQueries = 2 * time.Millisecond +) + +type loadGenerator struct { + t *testing.T + vc *VitessCluster + ctx context.Context + cancel context.CancelFunc +} + +func newLoadGenerator(t *testing.T, vc *VitessCluster) *loadGenerator { + return &loadGenerator{ + t: t, + vc: vc, + } +} + +func (lg *loadGenerator) stop() { + time.Sleep(loadTestPostBufferingInsertWindow) // wait for buffering to stop and additional records to be inserted by startLoad after traffic is switched + log.Infof("Canceling load") + lg.cancel() + time.Sleep(loadTestWaitForCancel) // wait for cancel to take effect + log.Flush() + +} + +func (lg *loadGenerator) start() { + t := lg.t + lg.ctx, lg.cancel = context.WithCancel(context.Background()) + + var id int64 + log.Infof("startLoad: starting") + queryTemplate := "insert into loadtest(id, name) values (%d, 'name-%d')" + var totalQueries, successfulQueries int64 + var deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors int64 + defer func() { + + log.Infof("startLoad: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + totalQueries, successfulQueries, deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + }() + logOnce := true + for { + select { + case <-lg.ctx.Done(): + log.Infof("startLoad: context cancelled") + log.Infof("startLoad: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) + require.Equal(t, int64(0), deniedErrors) + require.Equal(t, int64(0), otherErrors) + require.Equal(t, totalQueries, successfulQueries) + return + default: + go func() { + conn := vc.GetVTGateConn(t) + defer conn.Close() + atomic.AddInt64(&id, 1) + query := fmt.Sprintf(queryTemplate, id, id) + _, err := conn.ExecuteFetch(query, 1, false) + atomic.AddInt64(&totalQueries, 1) + if err != nil { + sqlErr := err.(*sqlerror.SQLError) + if strings.Contains(strings.ToLower(err.Error()), "denied tables") { + log.Infof("startLoad: denied tables error executing query: %d:%v", sqlErr.Number(), err) + atomic.AddInt64(&deniedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "ambiguous") { + // this can happen when a second keyspace is setup with the same tables, but there are no routing rules + // set yet by MoveTables. So we ignore these errors. + atomic.AddInt64(&ambiguousErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "current keyspace is being resharded") { + atomic.AddInt64(&reshardedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "not found") { + atomic.AddInt64(&tableNotFoundErrors, 1) + } else { + if logOnce { + log.Infof("startLoad: error executing query: %d:%v", sqlErr.Number(), err) + logOnce = false + } + atomic.AddInt64(&otherErrors, 1) + } + time.Sleep(loadTestWaitBetweenQueries) + } else { + atomic.AddInt64(&successfulQueries, 1) + } + }() + time.Sleep(loadTestWaitBetweenQueries) + } + } +} + +func (lg *loadGenerator) waitForCount(want int64) { + t := lg.t + conn := vc.GetVTGateConn(t) + defer conn.Close() + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + qr, err := conn.ExecuteFetch("select count(*) from loadtest", 1, false) + require.NoError(t, err) + require.NotNil(t, qr) + got, _ := qr.Rows[0][0].ToInt64() + + if int64(got) >= want { + return + } + select { + case <-timer.C: + require.FailNow(t, fmt.Sprintf("table %q did not reach the expected number of rows (%d) before the timeout of %s; last seen count: %v", + "loadtest", want, defaultTimeout, got)) + default: + time.Sleep(defaultTick) + } + } +} diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go new file mode 100644 index 00000000000..9443f62abc2 --- /dev/null +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "math/rand" + "os" + "testing" + + "vitess.io/vitess/go/vt/log" +) + +func insertInitialData(t *testing.T) { + t.Run("insertInitialData", func(t *testing.T) { + log.Infof("Inserting initial data") + lines, _ := os.ReadFile("unsharded_init_data.sql") + execMultipleQueries(t, vtgateConn, "product:0", string(lines)) + execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") + execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") + log.Infof("Done inserting initial data") + + waitForRowCount(t, vtgateConn, "product:0", "product", 2) + waitForRowCount(t, vtgateConn, "product:0", "customer", 3) + waitForQueryResult(t, vtgateConn, "product:0", "select * from merchant", + `[[VARCHAR("Monoprice") VARCHAR("eléctronics")] [VARCHAR("newegg") VARCHAR("elec†ronics")]]`) + + insertJSONValues(t) + }) +} + +const NumJSONRows = 100 + +func insertJSONValues(t *testing.T) { + // insert null value combinations + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j2, j3) values(4, NULL, 'null', '\"null\"')") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(5, JSON_QUOTE('null'))") + execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(6, '{}')") + + id := 8 // 6 inserted above and one after copy phase is done + + q := "insert into json_tbl(id, j1, j2, j3) values(%d, '%s', '%s', '{}')" + numJsonValues := len(jsonValues) + for id <= NumJSONRows { + id++ + j1 := rand.Intn(numJsonValues) + j2 := rand.Intn(numJsonValues) + query := fmt.Sprintf(q, id, jsonValues[j1], jsonValues[j2]) + execVtgateQuery(t, vtgateConn, "product:0", query) + } +} + +// insertMoreCustomers creates additional customers. +// Note: this will only work when the customer sequence is in place. +func insertMoreCustomers(t *testing.T, numCustomers int) { + sql := "insert into customer (name) values " + i := 0 + for i < numCustomers { + i++ + sql += fmt.Sprintf("('customer%d')", i) + if i != numCustomers { + sql += "," + } + } + execVtgateQuery(t, vtgateConn, "customer", sql) +} + +func insertMoreProducts(t *testing.T) { + sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" + execVtgateQuery(t, vtgateConn, "product", sql) +} + +func insertMoreProductsForSourceThrottler(t *testing.T) { + sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" + execVtgateQuery(t, vtgateConn, "product", sql) +} + +func insertMoreProductsForTargetThrottler(t *testing.T) { + sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" + execVtgateQuery(t, vtgateConn, "product", sql) +} + +var blobTableQueries = []string{ + "insert into `blüb_tbl`(id, val1, txt1) values (1, 'Jøhn \"❤️\" Paül','Jøhn \"❤️\" Paül keyböard ⌨️ jo˙n')", + "insert into `blüb_tbl`(id, val1, `blöb1`, `bl@b2`) values (2, 'val1_aaa', 'blb1_aaa', 'blb2_AAAA')", + "update `blüb_tbl` set val1 = 'val1_bbb', `bl@b2` = 'blb2_bbb' where id = 1", + "insert into `blüb_tbl`(id, val2, txt1, txt2, blb4) values (3, 'val2_ccc', 'txt1_ccc', 'txt2_ccc', 'blb4_CCC')", + "update `blüb_tbl` set txt1 = 'txt1_ddd'", + "update `blüb_tbl` set blb3 = 'blb3_eee'", + "delete from `blüb_tbl` where id = 2", + "insert into `blüb_tbl`(id, val2, txt1, txt2, blb4) values (4, 'val2_fff', 'txt1_fff', 'txt2_fff', 'blb4_FFF')", + "update `blüb_tbl` set txt1 = 'txt1_eee', blb3 = 'blb3_eee' where id = 4", +} + +func insertIntoBlobTable(t *testing.T) { + for _, query := range blobTableQueries { + execVtgateQuery(t, vtgateConn, "product:0", query) + } +} diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 0c83658cee8..6155e6ec2e3 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -24,6 +24,8 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { @@ -109,7 +111,7 @@ func TestMigrate(t *testing.T) { "--source=ext1.rating", "create", ksWorkflow); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) waitForRowCount(t, vtgateConn, "product:0", "rating", 2) waitForRowCount(t, vtgateConn, "product:0", "review", 3) diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go new file mode 100644 index 00000000000..f2e60cb0db1 --- /dev/null +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -0,0 +1,45 @@ +package vreplication + +import ( + "testing" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/wrangler" +) + +func TestMoveTablesBuffering(t *testing.T) { + defaultRdonly = 1 + vc = setupMinimalCluster(t) + defer vtgateConn.Close() + defer vc.TearDown(t) + + currentWorkflowType = wrangler.MoveTablesWorkflow + setupMinimalCustomerKeyspace(t) + tables := "loadtest" + err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, + tables, workflowActionCreate, "", "", "", false) + require.NoError(t, err) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + + lg := newLoadGenerator(t, vc) + go func() { + lg.start() + }() + lg.waitForCount(1000) + + catchup(t, targetTab1, workflowName, "MoveTables") + catchup(t, targetTab2, workflowName, "MoveTables") + vdiff1(t, ksWorkflow, "") + waitForLowLag(t, "customer", workflowName) + tstWorkflowSwitchReads(t, "", "") + tstWorkflowSwitchWrites(t) + log.Infof("SwitchWrites done") + lg.stop() + + log.Infof("TestMoveTablesBuffering: done") + log.Flush() +} diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go new file mode 100644 index 00000000000..36e73f80f31 --- /dev/null +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -0,0 +1,583 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/wrangler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +/* + This file introduces a new helper framework for vreplication tests. The current one uses a lot of globals + and make assumptions which make adding new types of tests difficult. + + As part of a separate cleanup we will build on this framework to replace the existing one. +*/ + +type keyspace struct { + name string + vschema string + schema string + baseID int64 + shards []string +} + +type workflowOptions struct { + tables []string + sourceShards []string + targetShards []string +} + +type workflow struct { + name string + fromKeyspace string + toKeyspace string + typ string + tc *vrepTestCase + options *workflowOptions +} + +type vrepTestCase struct { + testName string + t *testing.T + defaultCellName string + vtgateConn *mysql.Conn + keyspaces map[string]*keyspace + workflows map[string]*workflow + + vc *VitessCluster + vtgate *cluster.VtgateProcess +} + +func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCase { + const ( + seqVSchema = `{ + "sharded": false, + "tables": { + "customer_seq": { + "type": "sequence" + } + } + }` + seqSchema = `create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence';` + commerceSchema = `create table customer(cid int, name varchar(128), ts timestamp(3) not null default current_timestamp(3), primary key(cid));` + commerceVSchema = ` + { + "tables": { + "customer": {} + } + } +` + customerSchema = "" + customerVSchema = ` + { + "sharded": true, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "customer": { + "column_vindexes": [ + { + "column": "cid", + "name": "reverse_bits" + } + ], + "auto_increment": { + "column": "cid", + "sequence": "customer_seq" + } + } + } + } + ` + ) + tc := &vrepTestCase{ + t: t, + testName: name, + keyspaces: make(map[string]*keyspace), + defaultCellName: "zone1", + workflows: make(map[string]*workflow), + } + tc.keyspaces["commerce"] = &keyspace{ + name: "commerce", + vschema: commerceVSchema, + schema: commerceSchema, + baseID: 100, + shards: []string{"0"}, + } + tc.keyspaces["customer"] = &keyspace{ + name: "customer", + vschema: customerVSchema, + schema: customerSchema, + baseID: 200, + shards: []string{"-80", "80-"}, + } + tc.keyspaces["customer2"] = &keyspace{ + name: "customer2", + vschema: customerVSchema, + schema: "", + baseID: 1200, + shards: []string{"-80", "80-"}, + } + tc.keyspaces["seqSrc"] = &keyspace{ + name: "seqSrc", + vschema: seqVSchema, + schema: seqSchema, + baseID: 400, + shards: []string{"0"}, + } + tc.keyspaces["seqTgt"] = &keyspace{ + name: "seqTgt", + vschema: "", + schema: "", + baseID: 500, + shards: []string{"0"}, + } + tc.setupCluster() + tc.initData() + return tc +} + +func (tc *vrepTestCase) teardown() { + tc.vtgateConn.Close() + vc.TearDown(tc.t) +} + +func (tc *vrepTestCase) setupCluster() { + cells := []string{"zone1"} + + tc.vc = NewVitessCluster(tc.t, tc.testName, cells, mainClusterConfig) + vc = tc.vc // for backward compatibility since vc is used globally in this package + require.NotNil(tc.t, tc.vc) + tc.setupKeyspaces([]string{"commerce", "seqSrc"}) + tc.vtgateConn = getConnection(tc.t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + vtgateConn = tc.vtgateConn // for backward compatibility since vtgateConn is used globally in this package +} + +func (tc *vrepTestCase) initData() { + _, err := tc.vtgateConn.ExecuteFetch("insert into customer_seq(id, next_id, cache) values(0, 1000, 1000)", 1000, false) + require.NoError(tc.t, err) + _, err = tc.vtgateConn.ExecuteFetch("insert into customer(cid, name) values(1, 'customer1'), (2, 'customer2'),(3, 'customer3')", 1000, false) + require.NoError(tc.t, err) +} + +func (tc *vrepTestCase) setupKeyspaces(keyspaces []string) { + for _, keyspace := range keyspaces { + ks, ok := tc.keyspaces[keyspace] + require.Equal(tc.t, true, ok, "keyspace %s not found", keyspace) + tc.setupKeyspace(ks) + } +} + +func (tc *vrepTestCase) setupKeyspace(ks *keyspace) { + t := tc.t + if _, err := tc.vc.AddKeyspace(t, []*Cell{tc.vc.Cells["zone1"]}, ks.name, strings.Join(ks.shards, ","), + ks.vschema, ks.schema, 0, 0, int(ks.baseID), nil); err != nil { + t.Fatal(err) + } + if tc.vtgate == nil { + defaultCellName := "zone1" + defaultCell := tc.vc.Cells[defaultCellName] + require.NotNil(tc.t, defaultCell) + tc.vtgate = defaultCell.Vtgates[0] + + } + for _, shard := range ks.shards { + require.NoError(t, cluster.WaitForHealthyShard(tc.vc.VtctldClient, ks.name, shard)) + require.NoError(t, tc.vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks.name, shard), 1, 30*time.Second)) + } +} + +func (tc *vrepTestCase) newWorkflow(typ, workflowName, fromKeyspace, toKeyspace string, options *workflowOptions) *workflow { + wf := &workflow{ + name: workflowName, + fromKeyspace: fromKeyspace, + toKeyspace: toKeyspace, + typ: typ, + tc: tc, + options: options, + } + return wf +} + +func (wf *workflow) create() { + var err error + t := wf.tc.t + typ := strings.ToLower(wf.typ) + cell := wf.tc.defaultCellName + switch typ { + case "movetables": + currentWorkflowType = wrangler.MoveTablesWorkflow + sourceShards := strings.Join(wf.options.sourceShards, ",") + err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", false) + case "reshard": + currentWorkflowType = wrangler.ReshardWorkflow + sourceShards := strings.Join(wf.options.sourceShards, ",") + targetShards := strings.Join(wf.options.targetShards, ",") + if targetShards == "" { + targetShards = sourceShards + } + err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, false) + default: + panic(fmt.Sprintf("unknown workflow type: %s", wf.typ)) + } + require.NoError(t, err) + waitForWorkflowState(t, wf.tc.vc, fmt.Sprintf("%s.%s", wf.toKeyspace, wf.name), binlogdatapb.VReplicationWorkflowState_Running.String()) + ks2 := wf.tc.vc.Cells[cell].Keyspaces[wf.toKeyspace] + var i int64 + for _, shardName := range wf.tc.keyspaces[wf.toKeyspace].shards { + tab := ks2.Shards[shardName].Tablets[fmt.Sprintf("%s-%d", cell, wf.tc.keyspaces[wf.toKeyspace].baseID+i)].Vttablet + catchup(t, tab, wf.name, wf.typ) + i += 100 + } + doVdiff2(t, wf.toKeyspace, wf.name, cell, nil) + +} + +func (wf *workflow) switchTraffic() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", false)) +} + +func (wf *workflow) reverseTraffic() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", false)) +} + +func (wf *workflow) complete() { + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", false)) +} + +// TestPartialMoveTablesWithSequences enhances TestPartialMoveTables by adding an unsharded keyspace which has a +// sequence. This tests that the sequence is migrated correctly and that we can reverse traffic back to the source +func TestPartialMoveTablesWithSequences(t *testing.T) { + + origExtraVTGateArgs := extraVTGateArgs + + extraVTGateArgs = append(extraVTGateArgs, []string{ + "--enable-partial-keyspace-migration", + "--schema_change_signal=false", + }...) + defer func() { + extraVTGateArgs = origExtraVTGateArgs + }() + + tc := initPartialMoveTablesComplexTestCase(t, "TestPartialMoveTablesComplex") + defer tc.teardown() + var err error + + t.Run("Move customer table from unsharded product keyspace to sharded customer keyspace.", func(t *testing.T) { + tc.setupKeyspaces([]string{"customer"}) + wf := tc.newWorkflow("MoveTables", "customer", "commerce", "customer", &workflowOptions{ + tables: []string{"customer"}, + }) + wf.create() + wf.switchTraffic() + wf.complete() + }) + + var wfSeq *workflow + t.Run("Start MoveTables for Sequence", func(t *testing.T) { + tc.setupKeyspace(tc.keyspaces["seqTgt"]) + wfSeq = tc.newWorkflow("MoveTables", "seq", "seqSrc", "seqTgt", &workflowOptions{ + tables: []string{"customer_seq"}, + }) + wfSeq.create() + }) + + var emptyGlobalRoutingRules, emptyShardRoutingRules, preCutoverShardRoutingRules, halfCutoverShardRoutingRules, postCutoverShardRoutingRules string + t.Run("Define and setup RoutingRules", func(t *testing.T) { + emptyGlobalRoutingRules = "{}\n" + + // These should be listed in shard order + emptyShardRoutingRules = `{"rules":[]}` + preCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer2","to_keyspace":"customer","shard":"80-"}]}` + halfCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer2","to_keyspace":"customer","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + postCutoverShardRoutingRules = `{"rules":[{"from_keyspace":"customer","to_keyspace":"customer2","shard":"-80"},{"from_keyspace":"customer","to_keyspace":"customer2","shard":"80-"}]}` + + // Remove any manually applied shard routing rules as these + // should be set by SwitchTraffic. + applyShardRoutingRules(t, emptyShardRoutingRules) + require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + }) + + wfName := "partial80Dash" + sourceKs := "customer" + targetKs := "customer2" + shard := "80-" + var wf80Dash, wfDash80 *workflow + currentCustomerCount = getCustomerCount(t, "before customer2.80-") + t.Run("Start MoveTables on customer2.80-", func(t *testing.T) { + // Now setup the customer2 keyspace so we can do a partial move tables for one of the two shards: 80-. + defaultRdonly = 0 + tc.setupKeyspaces([]string{"customer2"}) + wf80Dash = tc.newWorkflow("MoveTables", wfName, "customer", "customer2", &workflowOptions{ + sourceShards: []string{"80-"}, + tables: []string{"customer"}, + }) + wf80Dash.create() + + currentCustomerCount = getCustomerCount(t, "after customer2.80-") + waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- + waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards + waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards + }) + + currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") + log.Flush() + + // This query uses an ID that should always get routed to shard 80- + shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + // This query uses an ID that should always get routed to shard -80 + shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + + // Reset any existing vtgate connection state. + vtgateConn.Close() + vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + t.Run("Confirm routing rules", func(t *testing.T) { + + // Global routing rules should be in place with everything going to the source keyspace (customer). + confirmGlobalRoutingToSource(t) + + // Shard routing rules should now also be in place with everything + // going to the source keyspace (customer). + require.Equal(t, preCutoverShardRoutingRules, getShardRoutingRules(t)) + + // Confirm shard targeting works before we switch any traffic. + // Everything should be routed to the source keyspace (customer). + + log.Infof("Testing reverse route (target->source) for shard being switched") + _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") + + log.Infof("Testing reverse route (target->source) for shard NOT being switched") + _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + currentCustomerCount = getCustomerCount(t, "") + + // Switch all traffic for the shard + wf80Dash.switchTraffic() + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", + targetKs, wfName, shard, shard) + require.Equal(t, expectedSwitchOutput, lastOutput) + currentCustomerCount = getCustomerCount(t, "") + + // Confirm global routing rules -- everything should still be routed + // to the source side, customer, globally. + confirmGlobalRoutingToSource(t) + + // Confirm shard routing rules -- all traffic for the 80- shard should be + // routed into the customer2 keyspace, overriding the global routing rules. + require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t)) + + // Confirm global routing rules: -80 should still be be routed to customer + // while 80- should be routed to customer2. + require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t)) + }) + vtgateConn.Close() + vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + + t.Run("Validate shard and tablet type routing", func(t *testing.T) { + + // No shard targeting + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") + + // Shard targeting + _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) + require.NoError(t, err) + _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + require.Error(t, err) + require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + }) + currentCustomerCount = getCustomerCount(t, "") + + // Now move the other shard: -80 + t.Run("Move shard -80 and validate routing rules", func(t *testing.T) { + // Now move the other shard: -80 + wfName = "partialDash80" + shard = "-80" + wfDash80 = tc.newWorkflow("MoveTables", wfName, "customer", "customer2", &workflowOptions{ + sourceShards: []string{"-80"}, + tables: []string{"customer"}, + }) + wfDash80.create() + wfDash80.switchTraffic() + + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", + targetKs, wfName) + require.Equal(t, expectedSwitchOutput, lastOutput) + + // Confirm global routing rules: everything should still be routed + // to the source side, customer, globally. + confirmGlobalRoutingToSource(t) + + // Confirm shard routing rules: all shards should be routed to the + // target side (customer2). + require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t)) + }) + + var output string + + _, err = vtgateConn.ExecuteFetch("use `customer`", 0, false) // switch vtgate default db back to customer + require.NoError(t, err) + currentCustomerCount = getCustomerCount(t, "") + t.Run("Switch sequence traffic forward and reverse and validate workflows still exist and sequence routing works", func(t *testing.T) { + wfSeq.switchTraffic() + log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + wfSeq.reverseTraffic() + log.Infof("ReverseTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + wfSeq.switchTraffic() + log.Infof("SwitchTraffic was successful for workflow seqTgt.seq, with output %s", lastOutput) + + insertCustomers(t) + + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqTgt.seq", "show") + require.NoError(t, err) + + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "seqSrc.seq_reverse", "show") + require.NoError(t, err) + + wfSeq.complete() + }) + + t.Run("Cancel reverse workflows and validate", func(t *testing.T) { + // Cancel both reverse workflows (as we've done the cutover), which should + // clean up both the global routing rules and the shard routing rules. + for _, wf := range []string{"partialDash80", "partial80Dash"} { + // We switched traffic, so it's the reverse workflow we want to cancel. + reverseWf := wf + "_reverse" + reverseKs := sourceKs // customer + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + require.NoError(t, err) + + output, err := tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") + require.Error(t, err) + require.Contains(t, output, "no streams found") + + // Delete the original workflow + originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf) + _, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete") + require.NoError(t, err) + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show") + require.Error(t, err) + require.Contains(t, output, "no streams found") + } + + // Confirm that the global routing rules are now gone. + output, err = tc.vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + require.Equal(t, emptyGlobalRoutingRules, output) + + // Confirm that the shard routing rules are now gone. + require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + }) +} + +var customerCount int64 +var currentCustomerCount int64 +var newCustomerCount = int64(201) +var lastCustomerId int64 + +func getCustomerCount(t *testing.T, msg string) int64 { + qr := execVtgateQuery(t, vtgateConn, "", "select count(*) from customer") + require.NotNil(t, qr) + count, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return count +} + +func confirmLastCustomerIdHasIncreased(t *testing.T) { + qr := execVtgateQuery(t, vtgateConn, "", "select cid from customer order by cid desc limit 1") + require.NotNil(t, qr) + currentCustomerId, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + require.Greater(t, currentCustomerId, lastCustomerId) + lastCustomerId = currentCustomerId +} + +func insertCustomers(t *testing.T) { + for i := int64(1); i < newCustomerCount+1; i++ { + execVtgateQuery(t, vtgateConn, "customer@primary", fmt.Sprintf("insert into customer(name) values ('name-%d')", currentCustomerCount+i)) + } + customerCount = getCustomerCount(t, "") + require.Equal(t, currentCustomerCount+newCustomerCount, customerCount) + currentCustomerCount = customerCount + + confirmLastCustomerIdHasIncreased(t) +} + +func confirmGlobalRoutingToSource(t *testing.T) { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + result := gjson.Get(output, "rules") + result.ForEach(func(attributeKey, attributeValue gjson.Result) bool { + // 0 is the keyspace and 1 is optional tablename[@tablettype] + fromKsTbl := strings.Split(attributeValue.Get("fromTable").String(), ".") + // 0 is the keyspace and 1 is the tablename + toKsTbl := strings.Split(attributeValue.Get("toTables.0").String(), ".") + // All tables in the customer and customer2 keyspaces should be + // routed to the customer keyspace. + if fromKsTbl[0] == "customer" || fromKsTbl[0] == "customer2" { + require.Equal(t, "customer", toKsTbl[0]) + } + return true + }) +} diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index c130000e53a..0ea9f9d3a08 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -21,6 +21,8 @@ import ( "strings" "testing" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -28,9 +30,55 @@ import ( "vitess.io/vitess/go/vt/wrangler" ) -// TestPartialMoveTables tests partial move tables by moving each +// testCancel() starts and cancels a partial MoveTables for one of the shards which will be actually moved later on. +// Before canceling, we first switch traffic to the target keyspace and then reverse it back to the source keyspace. +// This tests that artifacts are being properly cleaned up when a MoveTables ia canceled. +func testCancel(t *testing.T) { + targetKeyspace := "customer2" + sourceKeyspace := "customer" + workflowName := "partial80DashForCancel" + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + // We use a different table in this MoveTables than the subsequent one, so that setting up of the artifacts + // while creating MoveTables do not paper over any issues with cleaning up artifacts when MoveTables is canceled. + // Ref: https://github.com/vitessio/vitess/issues/13998 + table := "customer2" + shard := "80-" + // start the partial movetables for 80- + mt := newMoveTables(vc, &moveTables{ + workflowName: workflowName, + targetKeyspace: targetKeyspace, + sourceKeyspace: sourceKeyspace, + tables: table, + sourceShards: shard, + }, moveTablesFlavorRandom) + mt.Create() + + checkDenyList := func(keyspace string, expected bool) { + validateTableInDenyList(t, vc, fmt.Sprintf("%s:%s", keyspace, shard), table, expected) + } + + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) + + mt.SwitchReadsAndWrites() + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, true) + + mt.ReverseReadsAndWrites() + checkDenyList(targetKeyspace, true) + checkDenyList(sourceKeyspace, false) + + mt.Cancel() + checkDenyList(targetKeyspace, false) + checkDenyList(sourceKeyspace, false) + +} + +// TestPartialMoveTablesBasic tests partial move tables by moving each // customer shard -- -80,80- -- once a a time to customer2. -func TestPartialMoveTables(t *testing.T) { +func TestPartialMoveTablesBasic(t *testing.T) { origDefaultRdonly := defaultRdonly defer func() { defaultRdonly = origDefaultRdonly @@ -51,14 +99,14 @@ func TestPartialMoveTables(t *testing.T) { defer func() { extraVTGateArgs = origExtraVTGateArgs }() - vc = setupCluster(t) + vc = setupMinimalCluster(t) defer vtgateConn.Close() defer vc.TearDown(t) - setupCustomerKeyspace(t) + setupMinimalCustomerKeyspace(t) // Move customer table from unsharded product keyspace to // sharded customer keyspace. - createMoveTablesWorkflow(t, "customer") + createMoveTablesWorkflow(t, "customer,loadtest,customer2") tstWorkflowSwitchReadsAndWrites(t) tstWorkflowComplete(t) @@ -75,10 +123,15 @@ func TestPartialMoveTables(t *testing.T) { applyShardRoutingRules(t, emptyShardRoutingRules) require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + runWithLoad := true + // Now setup the customer2 keyspace so we can do a partial // move tables for one of the two shards: 80-. defaultRdonly = 0 setupCustomer2Keyspace(t) + + testCancel(t) + currentWorkflowType = wrangler.MoveTablesWorkflow wfName := "partial80Dash" sourceKs := "customer" @@ -88,8 +141,17 @@ func TestPartialMoveTables(t *testing.T) { // start the partial movetables for 80- err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer", workflowActionCreate, "", shard, "") + "customer,loadtest", workflowActionCreate, "", shard, "", false) require.NoError(t, err) + var lg *loadGenerator + if runWithLoad { // start load after routing rules are set, otherwise we end up with ambiguous tables + lg = newLoadGenerator(t, vc) + go func() { + lg.start() + }() + lg.waitForCount(1000) + } + targetTab1 = vc.getPrimaryTablet(t, targetKs, shard) catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2") vdiff1(t, ksWf, "") @@ -152,7 +214,7 @@ func TestPartialMoveTables(t *testing.T) { require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "")) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", targetKs, wfName, shard, shard) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -210,7 +272,7 @@ func TestPartialMoveTables(t *testing.T) { // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "") + err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "", false) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer @@ -221,16 +283,16 @@ func TestPartialMoveTables(t *testing.T) { wfName = "partialDash80" shard = "-80" ksWf = fmt.Sprintf("%s.%s", targetKs, wfName) - // Start the partial movetables for -80, 80- has already been switched err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer", workflowActionCreate, "", shard, "") + "customer,loadtest", workflowActionCreate, "", shard, "", false) require.NoError(t, err) targetTab2 := vc.getPrimaryTablet(t, targetKs, shard) catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80") vdiff1(t, ksWf, "") + // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "")) + require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", targetKs, wfName) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -243,13 +305,15 @@ func TestPartialMoveTables(t *testing.T) { // target side (customer2). require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t)) + lg.stop() + // Cancel both reverse workflows (as we've done the cutover), which should // clean up both the global routing rules and the shard routing rules. for _, wf := range []string{"partialDash80", "partial80Dash"} { // We switched traffic, so it's the reverse workflow we want to cancel. reverseWf := wf + "_reverse" reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "") + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) require.NoError(t, err) output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") @@ -272,4 +336,5 @@ func TestPartialMoveTables(t *testing.T) { // Confirm that the shard routing rules are now gone. require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) + } diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index ce47e027f2d..9e0ae797e72 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -103,7 +103,7 @@ create table customer(cid int, name varbinary(128), meta json default null, typ for _, shard := range keyspaceTgt.Shards { for _, tablet := range shard.Tablets { t.Logf("catchup shard=%v, tablet=%v", shard.Name, tablet.Name) - tablet.Vttablet.WaitForVReplicationToCatchup(t, "stress_workflow", fmt.Sprintf("vt_%s", tablet.Vttablet.Keyspace), 5*time.Minute) + tablet.Vttablet.WaitForVReplicationToCatchup(t, "stress_workflow", fmt.Sprintf("vt_%s", tablet.Vttablet.Keyspace), sidecarDBName, 5*time.Minute) } } diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 58b92e0b65c..993da344905 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -29,6 +30,8 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/wrangler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) const ( @@ -60,9 +63,9 @@ var ( func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, - "", workflowActionCreate, "", sourceShards, targetShards) + "", workflowActionCreate, "", sourceShards, targetShards, false) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") catchup(t, targetTab1, workflowName, "Reshard") catchup(t, targetTab2, workflowName, "Reshard") @@ -75,9 +78,9 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { tables = tablesToMove } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "") + tables, workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, ksWorkflow, workflowStateRunning) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) catchup(t, targetTab1, workflowName, "MoveTables") catchup(t, targetTab2, workflowName, "MoveTables") @@ -85,10 +88,10 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "") + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", false) } -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string) error { +func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string, atomicCopy bool) error { var args []string if currentWorkflowType == wrangler.MoveTablesWorkflow { args = append(args, "MoveTables") @@ -101,11 +104,18 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, if BypassLagCheck { args = append(args, "--max_replication_lag_allowed=2542087h") } - + if atomicCopy { + args = append(args, "--atomic-copy") + } switch action { case workflowActionCreate: if currentWorkflowType == wrangler.MoveTablesWorkflow { - args = append(args, "--source", sourceKs, "--tables", tables) + args = append(args, "--source", sourceKs) + if tables != "" { + args = append(args, "--tables", tables) + } else { + args = append(args, "--all") + } if sourceShards != "" { args = append(args, "--source_shards", sourceShards) } @@ -115,7 +125,10 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, // Test new experimental --defer-secondary-keys flag switch currentWorkflowType { case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow, wrangler.ReshardWorkflow: - args = append(args, "--defer-secondary-keys") + if !atomicCopy { + args = append(args, "--defer-secondary-keys") + } + args = append(args, "--initialize-target-sequences") // Only used for MoveTables } } if cells != "" { @@ -124,6 +137,7 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, if tabletTypes != "" { args = append(args, "--tablet_types", tabletTypes) } + args = append(args, "--timeout", time.Minute.String()) ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) args = append(args, action, ksWorkflow) output, err := vc.VtctlClient.ExecuteCommandWithOutput(args...) @@ -172,6 +186,27 @@ func tstWorkflowComplete(t *testing.T) error { return tstWorkflowAction(t, workflowActionComplete, "", "") } +// testWorkflowUpdate is a very simple test of the workflow update +// vtctlclient/vtctldclient command. +// It performs a non-behavior impacting update, setting tablet-types +// to primary,replica,rdonly (the only applicable types in these tests). +func testWorkflowUpdate(t *testing.T) { + tabletTypes := "primary,replica,rdonly" + // Test vtctlclient first + _, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, "noexist.noexist", "update") + require.Error(t, err, err) + resp, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, ksWorkflow, "update") + require.NoError(t, err) + require.NotEmpty(t, resp) + + // Test vtctldclient last + _, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) + require.Error(t, err) + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + require.NoError(t, err, err) + require.NotEmpty(t, resp) +} + func tstWorkflowCancel(t *testing.T) error { return tstWorkflowAction(t, workflowActionCancel, "", "") } @@ -183,7 +218,7 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery)) + assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery) } } } @@ -198,17 +233,17 @@ func validateReadsRouteToTarget(t *testing.T, tabletTypes string) { func validateWritesRouteToSource(t *testing.T) { insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" - matchInsertQuery := "insert into customer(name, cid) values" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery)) + matchInsertQuery := "insert into customer(`name`, cid) values" + assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") } func validateWritesRouteToTarget(t *testing.T) { insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" - matchInsertQuery := "insert into customer(name, cid) values" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery)) + matchInsertQuery := "insert into customer(`name`, cid) values" + assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery)) + assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") } @@ -219,7 +254,7 @@ func revert(t *testing.T, workflowType string) { validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup - _, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "Cancel", ksWorkflow) + _, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--target-keyspace", targetKs, "--workflow", workflowName, "cancel") require.NoError(t, err, fmt.Sprintf("%s Cancel error: %v", workflowType, err)) } @@ -282,17 +317,17 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 from product to customer using currentWorkflowType = wrangler.MoveTablesWorkflow err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "customer2", workflowActionCreate, "", "", "") + "customer2", workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, "customer.wf2", workflowStateRunning) + waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "customer", "wf2") err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionSwitchTraffic, "", "", "") + "", workflowActionSwitchTraffic, "", "", "", false) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionComplete, "", "", "") + "", workflowActionComplete, "", "", "", false) require.NoError(t, err) // sanity check @@ -317,16 +352,16 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "customer2", workflowActionCreate, "", "", "") + "customer2", workflowActionCreate, "", "", "", false) require.NoError(t, err) - waitForWorkflowState(t, vc, "product.wf3", workflowStateRunning) + waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "product", "wf3") err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionSwitchTraffic, "", "", "") + "", workflowActionSwitchTraffic, "", "", "", false) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionComplete, "", "", "") + "", workflowActionComplete, "", "", "", false) require.NoError(t, err) // sanity check @@ -391,6 +426,9 @@ func testReshardV2Workflow(t *testing.T) { verifyNoInternalTables(t, vtgateConn, targetKs+"/-40") verifyNoInternalTables(t, vtgateConn, targetKs+"/c0-") + // Confirm that updating Reshard workflows works. + testWorkflowUpdate(t) + testRestOfWorkflow(t) } @@ -401,7 +439,7 @@ func testMoveTablesV2Workflow(t *testing.T) { setupCustomerKeyspace(t) // The purge table should get skipped/ignored // If it's not then we'll get an error as the table doesn't exist in the vschema - createMoveTablesWorkflow(t, "customer,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") + createMoveTablesWorkflow(t, "customer,loadtest,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") if !strings.Contains(lastOutput, "Workflow started successfully") { t.Fail() } @@ -414,6 +452,9 @@ func testMoveTablesV2Workflow(t *testing.T) { testReplicatingWithPKEnumCols(t) + // Confirm that updating MoveTable workflows works. + testWorkflowUpdate(t) + testRestOfWorkflow(t) listAllArgs := []string{"workflow", "customer", "listall"} @@ -434,7 +475,7 @@ func testMoveTablesV2Workflow(t *testing.T) { } func testPartialSwitches(t *testing.T) { - //nothing switched + // nothing switched require.Equal(t, getCurrentState(t), wrangler.WorkflowStateNotSwitched) tstWorkflowSwitchReads(t, "replica,rdonly", "zone1") nextState := "Reads partially switched. Replica switched in cells: zone1. Rdonly switched in cells: zone1. Writes Not Switched" @@ -446,7 +487,7 @@ func testPartialSwitches(t *testing.T) { checkStates(t, currentState, nextState) tstWorkflowSwitchReads(t, "", "") - checkStates(t, nextState, nextState) //idempotency + checkStates(t, nextState, nextState) // idempotency tstWorkflowSwitchWrites(t) currentState = nextState @@ -454,7 +495,7 @@ func testPartialSwitches(t *testing.T) { checkStates(t, currentState, nextState) tstWorkflowSwitchWrites(t) - checkStates(t, nextState, nextState) //idempotency + checkStates(t, nextState, nextState) // idempotency keyspace := "product" if currentWorkflowType == wrangler.ReshardWorkflow { @@ -572,8 +613,8 @@ func setupCluster(t *testing.T) *VitessCluster { require.NotNil(t, vtgate) err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second)) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) verifyClusterHealth(t, vc) @@ -591,22 +632,12 @@ func setupCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-") - require.NoError(t, err) - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1); err != nil { - t.Fatal(err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1); err != nil { - t.Fatal(err) - } + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second)) custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -618,25 +649,58 @@ func setupCustomer2Keyspace(t *testing.T) { c2shards := []string{"-80", "80-"} c2keyspace := "customer2" if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, c2keyspace, strings.Join(c2shards, ","), - customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 1200, nil); err != nil { + customerVSchema, customerSchema, 0, 0, 1200, nil); err != nil { t.Fatal(err) } for _, c2shard := range c2shards { err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard) require.NoError(t, err) - if defaultReplicas > 0 { - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", c2keyspace, c2shard), defaultReplicas); err != nil { - t.Fatal(err) - } - } - if defaultRdonly > 0 { - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", c2keyspace, c2shard), defaultRdonly); err != nil { - t.Fatal(err) - } - } + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second)) } } +func setupMinimalCluster(t *testing.T) *VitessCluster { + cells := []string{"zone1"} + + vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) + require.NotNil(t, vc) + defaultCellName := "zone1" + allCellNames = defaultCellName + defaultCell = vc.Cells[defaultCellName] + + zone1 := vc.Cells["zone1"] + + vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, 0, 0, 100, nil) + + vtgate = zone1.Vtgates[0] + require.NotNil(t, vtgate) + err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") + require.NoError(t, err) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + verifyClusterHealth(t, vc) + insertInitialData(t) + + sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet + + return vc +} + +func setupMinimalCustomerKeyspace(t *testing.T) { + if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, "customer", "-80,80-", + customerVSchema, customerSchema, 0, 0, 200, nil); err != nil { + t.Fatal(err) + } + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) + custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] + targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet + targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet +} + func TestSwitchReadsWritesInAnyOrder(t *testing.T) { vc = setupCluster(t) defer vc.TearDown(t) @@ -758,12 +822,8 @@ func createAdditionalCustomerShards(t *testing.T, shards string) { for _, shardName := range arrTargetShardNames { err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) require.NoError(t, err) - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2); err != nil { - require.NoError(t, err) - } - if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1); err != nil { - require.NoError(t, err) - } + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second)) + require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second)) } custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index 56ca2d08acd..ef05e051be2 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -38,7 +38,7 @@ var ddls1, ddls2 []string func init() { sidecarDBTables = []string{"copy_state", "dt_participant", "dt_state", "heartbeat", "post_copy_action", "redo_state", - "redo_statement", "reparent_journal", "resharding_journal", "schema_migrations", "schema_version", "schemacopy", + "redo_statement", "reparent_journal", "resharding_journal", "schema_migrations", "schema_version", "schemacopy", "tables", "vdiff", "vdiff_log", "vdiff_table", "views", "vreplication", "vreplication_log"} numSidecarDBTables = len(sidecarDBTables) ddls1 = []string{ diff --git a/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql new file mode 100644 index 00000000000..03df754ea21 --- /dev/null +++ b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql @@ -0,0 +1,91 @@ +# This file is for testing purpose only. +# This file is executed immediately after initializing a fresh data directory. +# It is equivalent of init_db.sql. Given init_db.sql is for mysql which has super_read_only +# related stuff therefore for testing purpose we avoid setting `super_read_only` during initialization. + +############################################################################### +# WARNING: Any change to init_db.sql should gets reflected in this file as well. +############################################################################### + +############################################################################### +# WARNING: This sql is *NOT* safe for production use, +# as it contains default well-known users and passwords. +# Care should be taken to change these users and passwords +# for production. +############################################################################### + +############################################################################### +# Equivalent of mysql_secure_installation +############################################################################### +# We need to ensure that read_only is disabled so that we can execute +# these commands. +SET GLOBAL read_only='OFF'; + +# Changes during the init db should not make it to the binlog. +# They could potentially create errant transactions on replicas. +SET sql_log_bin = 0; +# Remove anonymous users. +DELETE FROM mysql.user WHERE User = ''; + +# Disable remote root access (only allow UNIX socket). +DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; + +# Remove test database. +DROP DATABASE IF EXISTS test; + +############################################################################### +# Vitess defaults +############################################################################### + +# Admin user with all privileges. +CREATE USER 'vt_dba'@'localhost'; +GRANT ALL ON *.* TO 'vt_dba'@'localhost'; +GRANT GRANT OPTION ON *.* TO 'vt_dba'@'localhost'; + +# User for app traffic, with global read-write access. +CREATE USER 'vt_app'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_app'@'localhost'; + +# User for app debug traffic, with global read access. +CREATE USER 'vt_appdebug'@'localhost'; +GRANT SELECT, SHOW DATABASES, PROCESS ON *.* TO 'vt_appdebug'@'localhost'; + +# User for administrative operations that need to be executed as non-SUPER. +# Same permissions as vt_app here. +CREATE USER 'vt_allprivs'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_allprivs'@'localhost'; + +# User for slave replication connections. +CREATE USER 'vt_repl'@'%'; +GRANT REPLICATION SLAVE ON *.* TO 'vt_repl'@'%'; + +# User for Vitess VReplication (base vstreamers and vplayer). +CREATE USER 'vt_filtered'@'localhost'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, + REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, + LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, + SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER + ON *.* TO 'vt_filtered'@'localhost'; + +# User for general MySQL monitoring. +CREATE USER 'vt_monitoring'@'localhost'; +GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD + ON *.* TO 'vt_monitoring'@'localhost'; +GRANT SELECT, UPDATE, DELETE, DROP + ON performance_schema.* TO 'vt_monitoring'@'localhost'; + +FLUSH PRIVILEGES; + +RESET SLAVE ALL; +RESET MASTER; + +# custom sql is used to add custom scripts like creating users/passwords. We use it in our tests +# {{custom_sql}} diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index f5d57eac9df..2bf63bd6e61 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow @@ -191,7 +192,8 @@ func TestMoveTablesTZ(t *testing.T) { output, err = vc.VtctlClient.ExecuteCommandWithOutput("MoveTables", "--", "SwitchTraffic", ksWorkflow) require.NoError(t, err, output) - qr, err := productTab.QueryTablet(fmt.Sprintf("select * from _vt.vreplication where workflow='%s_reverse'", workflow), "", false) + qr, err := productTab.QueryTablet(sqlparser.BuildParsedQuery("select * from %s.vreplication where workflow='%s_reverse'", + sidecarDBIdentifier, workflow).Query, "", false) if err != nil { return } diff --git a/go/test/endtoend/vreplication/unsharded_init_data.sql b/go/test/endtoend/vreplication/unsharded_init_data.sql index b12aaa9bf79..8af0cab6608 100644 --- a/go/test/endtoend/vreplication/unsharded_init_data.sql +++ b/go/test/endtoend/vreplication/unsharded_init_data.sql @@ -1,6 +1,7 @@ insert into customer(cid, name, typ, sport, meta) values(1, 'Jøhn "❤️" Rizzolo',1,'football,baseball','{}'); insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4)); -insert into customer(cid, name, typ, sport) values(3, 'ringo','enterprise',''); +-- We use a high cid value here to test the target sequence initialization. +insert into customer(cid, name, typ, sport, blb) values(999999, 'ringo','enterprise','','blob data'); insert into merchant(mname, category) values('Monoprice', 'eléctronics'); insert into merchant(mname, category) values('newegg', 'elec†ronics'); insert into product(pid, description) values(1, 'keyböard ⌨️'); @@ -26,6 +27,15 @@ insert into db_order_test (c_uuid, created_at, dstuff, dtstuff, dbstuff, cstuff) insert into db_order_test (c_uuid, created_at, dstuff, dtstuff, dbstuff, cstuff) values ('b169-7359cfb5-9ff5-064d3874effb-14e4', '2018-12-11 01:46:46', 'Fohk0aif4oov!e>ith)eeghoo`Goh)p0', 'shi3ahde9doo5Uph6CeiSheCh/uw0nae', 'audaek{eceenooPh8wichahcheiv9thu', 'chickens'); insert into db_order_test (c_uuid, created_at, dstuff, dtstuff, dbstuff, cstuff) values ('b169-a8411858-a983-123139285dbf-14e2', '2020-03-23 04:42:39', 'ooM4pej>eashaeko', 'too$Shei&s2eing3ashoh0Sh9fiey7th', 'Ohx9saf#eiz*echoo0eechSues_u2que', 'dogs'); +-- You can see all of the workflow and test failures that this data caused without the fixes +-- from https://github.com/vitessio/vitess/pull/12845 here: +-- https://github.com/vitessio/vitess/pull/12865 +insert into vdiff_order VALUES ('PTM1679987542cpodyy4sf09xhwcdt0'),('PTM1679987542cpovyi26kdjr99r9mh'),('PTM1679987542cpoyhemvuqhawgu7h2'),('PTM1679987542cppebyl74z5umuvbgy'),('PTM1679987542cppenm12s7132oix1u'),('PTM1679987542cpprhwddmnlqrmxgv8'),('PTM1679987542cpqccvoplfrtedup38'),('PTM1679987542cpqiy7z5nyiebmgw0b'),('PTM1679987542cpqsmx82uo181zk0hu'),('PTM1679987542cpqw99mrvp5w0zfwze'),('PTM1679987542cprb7l0j5sv31xkdsh'),('PTM1679987542cprnvhdovs3ht3rulz'),('PTM1679987542cprqvrlpsei07bdp0k'),('PTM1679987542cps0c2zhmudrkfc7vq'),('PTM1679987542cps2nj7xcpgrnhrowr'),('PTM1679987542cps2rlvmw652x7fvhi'),('PTM1679987542cps3t9lltjgx561nq2'),('PTM1679987542cps3zgqy64q29f0r2m'),('PTM1679987542cps7r9yf6h2k2bfh4p'),('PTM1679987542cps90m9y34wytn0t50'),('PTM1679987542cpsb601kau592eo52v'),('PTM1679987542cpskzepmf9hs4djvvm'),('PTM1679987542cpsuu774fe3ts74b6k'),('PTM1679987542cpszu54y0ei3iv7cvl'),('PTM1679987542cpt6cljprzj910q37t'),('PTM1679987542cpt7ebkolh2m0w057q'),('PTM1679987542cptav05kc5f183fsbs'),('PTM1679987542cptnln3zpcpfz3n5mr'); +insert into vdiff_order VALUES ('fgemkbjemiihhj'),('fgemladahfmfln'),('fgemladcjehjhl'),('fgemmkfkeglelk'),('fgemnbeijlchkd'),('fgenacflnlchlc'),('fgenagakbiknan'),('fgenbemkgckigh'),('fgenbjhkaicaab'),('fgenchmieliead'),('fgencmadlmihhc'),('fgendemknladdb'),('fgendkffcdfckb'),('fgengbdgcdnbke'),('fgenggbbeafdlm'),('fgenhhdgmbacff'),('fgenhkfifnliec'),('fgenidghedhlbi'),('fgeniljmlifbdf'),('fgenjhghnnblek'),('fgenjimadfeada'),('fgenjmibjgbjej'),('fgenkijeghjgjh'),('fgenkjlheiiagc'),('fgenldehanihlh'),('fgenleblfkfidk'),('fgenlfbadakknn'),('fgenlnfmnhahel'),('fgenmaehniienf'),('fgenmdnnjnhaam'),('fgennkilhlchgl'),('fgfaafkmkekinb'),('fgfablecfdbhcg'),('fgfaegmnkalhdb'),('fgfaekdiginclh'),('fgfaencghmcdeg'),('fgfafacfabhldf'),('fgfafcakbfebkh'),('fgfaffifggdmkm'),('fgfafgdghjjbdl'),('fgfagdhififefa'),('fgfagebgahnajh'),('fgfaggabajdjlf'),('fgfahemldkkmkj'),('fgfaiijcagdmhf'),('fgfakdnagaebje'),('fgfakldkjgdeei'),('fgfamdcfejngbl'),('fgfamebkblfkah'),('fgfbbajhmmbfjc'),('fgfbcgfhbkjmkf'),('fgfbcmmmkhkhkd'),('fgfbcmndbmdgdd'); +insert into vdiff_order VALUES ('PTM1679907937cpqxpxok13e1l8uhb8'),('PTM1679907937cpr2clvpd8cx8lyeh6'),('PTM1679907937cpr2v5ysgjwxhp0iwj'),('PTM1679907937cpr74yu5duf4ljcwh5'),('PTM1679907937cprdo5jdvlvdjoyz39'),('PTM1679907937cprud30qk89mhyq8sn'),('PTM1679907937cprwj0ib1d2waomzm3'),('PTM1679907937cprxsj9gjvnvonvebg'),('PTM1679907937cprz8t299i57tzowsx'),('PTM1679907937cpswqjwdy610hxn4m8'),('PTM1679907937cptb3o8207kzgy4o7d'),('PTM1679907937cptp2t62nkozfdjun1'),('PTM1679907937cptr3mwkre3uyak3wp'),('PTM1679907937cptsq80wyeckfsisox'),('PTM1679907937cptx6zcxw5c5wksaa5'),('PTM1679907937cpu6t847eclmho0iya'),('PTM1679907937cpuaxr313hudr48pc6'),('PTM1679907937cpuj9tey8be8itwblq'),('PTM1679907937cpuqhc2exc2xmbbdbm'),('PTM1679907937cpvamfjapnz9i7z4dw'),('PTM1679907937cpvkm584ncpu3eznbk'),('PTM1679907937cpvu1quoskgy23pcro'),('PTM1679907937cpwicmfxbbepuro58c'),('PTM1679907937cpwlsdyb0kgsfzl9oh'),('PTM1679907937cpwvzrrwn9l0yhkxeg'),('PTM1679907937cpwxamsgvp23wvm4tw'),('PTM1679907937cpx58xjmmjjzujbjaj'),('PTM1679907937cpx9b61jv3y66fytjw'); +insert into vdiff_order VALUES ('icljhkijabacma'),('icljidchfakcbm'),('icljkgccaabhin'),('icljlccinfhjhh'),('icljmkhnjbjeib'),('iclkaajhekmafc'),('iclkafbikjjhbn'),('iclkanegjkjlif'),('iclkbbkjendihg'),('iclkbelchkimhg'),('iclkbnhcediand'),('iclkcbgfkmdagf'),('iclkcggjbanhaj'),('iclkdcdkbdmdji'),('iclkeamemigglm'),('iclkfclhlnbnhn'),('iclkgdlidjafjj'),('iclkgnkhceakgf'),('iclkibamdjmhga'),('iclkihikbnnhel'),('iclkihindgdgdj'),('iclkkagcccniia'),('iclkkfmifacfnn'),('iclkkgaegcbjdn'),('iclkkihhbcjcin'),('iclkmblkmjbhmk'),('iclkmcfeeeilde'),('iclkmdchhmdfmb'),('iclkmdighhgfdj'),('iclkmelendhiih'),('iclkmmbabaadkb'),('icllabaaiieffl'),('icllaclainhmhc'),('icllbajjaknaae'),('icllbhnbbddhem'),('icllbignhmlmmm'),('icllcicgmehhbn'),('iclldbchhkinii'),('icllebcahebbfn'),('icllegcilbhjhl'),('icllemgnfgdmnn'),('icllhkncfglhie'),('icllicmcjgmhhh'),('iclligcijkalhn'),('iclljgndkaljjf'),('iclljijagmjgeb'),('iclljmecicfdki'),('icllkalfkkcfcd'),('icllkfnigibgdj'),('icllklnfcbhgki'),('iclllkhdgcehfg'),('icllnfalfjgnfa'),('iclmbneakkfcdc'); +insert into vdiff_order VALUES ('PTM1679898802cpy2sm2f7eeyx1c418'),('PTM1679898802cpy8ilwl38iynquriy'),('PTM1679898802cpydluko9k1ia8soeq'),('PTM1679898802cpyg40s5cle7mq31a0'),('PTM1679898802cpyw1d0vcaqhz13rfv'),('PTM1679898802cpz2a1bg85dsvoe6sd'),('PTM1679898802cpz4525uzoak8cwoob'),('PTM1679898802cpz5cnd2ntkqwhcens'),('PTM1679898802cpzcmtf2x3zg0tl3mq'),('PTM1679898802cpzqxlexdl5ccmswg2'),('PTM1679898802cpzrjs7mexu24zx9j0'),('PTM1679898802cpzvb8eofhntv5zv2w'),('PTM1679898803cp01m9zwvcqel0alnv'),('PTM1679898803cp0h30qplkzbwu47nl'),('PTM1679898803cp0qpuscot9gb98tzx'),('PTM1679898803cp1fyxdh866k0fbp4k'),('PTM1679898803cp1l0powfj4htk4czt'),('PTM1679898803cp2l77jwdrbg7u9fdy'),('PTM1679898803cp2vrcc8bwz9ef8ezu'),('PTM1679898803cp34krilv0gs8made9'),('PTM1679898803cp3agd45d3i69v16mp'),('PTM1679898803cp3ews08395tov755k'),('PTM1679898803cp3jv1qnrtk7v72u2o'),('PTM1679898803cp44d2ks7bfhj955hi'),('PTM1679898803cp4cnjdfc27ut3t2jj'),('PTM1679898803cp4twsyo8w21f14sb0'),('PTM1679898803cp5st72scversqq0z0'),('PTM1679898803cp6ak3ycgwvjj8rawt'); + insert into datze(id, dt2, ts1) values (1, '2022-01-01 00:00:00', current_timestamp); insert into datze(id, dt2, ts1) values (2, '2022-03-27 02:00:00', current_timestamp); insert into datze(id, dt2, ts1) values (3, '2022-03-27 02:15:00', current_timestamp); @@ -33,3 +43,10 @@ insert into datze(id, dt2, ts1) values (4, '2022-03-27 03:00:00', current_timest insert into datze(id, dt2, ts1) values (5, '2022-03-27 03:15:00', current_timestamp); insert into datze(id, dt2, ts1) values (6, current_timestamp, current_timestamp); +insert into geom_tbl(id, g, p, ls, pg, mp, mls, mpg, gc) values(1,ST_GeomFromText("LINESTRING(0 0,1 2,2 4)"), POINT(32767, 12345678901234567890),ST_GeomFromText("LINESTRING(-1 1,32627 32678,32679 65536,1234567890123456789 489749749734.234212908)"),ST_GeomFromText("POLYGON ((1 2, 2 3, 3 4, 1 2))"), ST_GeomFromText("MULTIPOINT(0 0, 15 25, 45 65)"),ST_GeomFromText("MULTILINESTRING((12 12, 22 22), (19 19, 32 18))"),ST_GeomFromText("MULTIPOLYGON(((0 0,11 0,12 11,0 9,0 0)),((3 5,7 4,4 7,7 7,3 5)))"),ST_GeomFromText("GEOMETRYCOLLECTION(POINT(3 2),LINESTRING(0 0,1 3,2 5,3 5,4 7))")); + +insert into reftable (id, val1) values (1, 'a') +insert into reftable (id, val1) values (2, 'b') +insert into reftable (id, val1) values (3, 'c') +insert into reftable (id, val1) values (4, 'd') +insert into reftable (id, val1) values (5, 'e') diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 06eddf95c9b..2011f8613c8 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -23,8 +23,10 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/sqlparser" ) type testCase struct { @@ -45,7 +47,7 @@ type testCase struct { } const ( - sqlSimulateError = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'error', vdt.state = 'error', vd.completed_at = NULL, + sqlSimulateError = `update %s.vdiff as vd, %s.vdiff_table as vdt set vd.state = 'error', vdt.state = 'error', vd.completed_at = NULL, vd.last_error = 'vttablet: rpc error: code = Unknown desc = (errno 1213) (sqlstate 40001): Deadlock found when trying to get lock; try restarting transaction' where vd.vdiff_uuid = %s and vd.id = vdt.vdiff_id` sqlAnalyzeTable = `analyze table %s` @@ -63,9 +65,9 @@ var testCases = []*testCase{ tabletBaseID: 200, tables: "customer,Lead,Lead-1", autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(91234, 'Testy McTester', 'soho')`, + retryInsert: `insert into customer(cid, name, typ) values(1991234, 'Testy McTester', 'soho')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(92234, 'Testy McTester (redux)', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1992234, 'Testy McTester (redux)', 'enterprise')`, testCLIErrors: true, // test for errors in the simplest workflow testCLICreateWait: true, // test wait on create feature against simplest workflow }, @@ -79,9 +81,9 @@ var testCases = []*testCase{ targetShards: "-40,40-a0,a0-", tabletBaseID: 400, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(93234, 'Testy McTester Jr', 'enterprise'), (94234, 'Testy McTester II', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(1993234, 'Testy McTester Jr', 'enterprise'), (1993235, 'Testy McTester II', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(95234, 'Testy McTester III', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1994234, 'Testy McTester III', 'enterprise')`, stop: true, }, { @@ -94,9 +96,9 @@ var testCases = []*testCase{ targetShards: "0", tabletBaseID: 700, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(96234, 'Testy McTester IV', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(1995234, 'Testy McTester IV', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(97234, 'Testy McTester V', 'enterprise'), (98234, 'Testy McTester VI', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(1996234, 'Testy McTester V', 'enterprise'), (1996235, 'Testy McTester VI', 'enterprise')`, stop: true, }, } @@ -176,7 +178,6 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, cells []*Cell) for _, shard := range arrTargetShards { tab := vc.getPrimaryTablet(t, tc.targetKs, shard) catchup(t, tab, tc.workflow, tc.typ) - updateTableStats(t, tab, tc.tables) // need to do this in order to test progress reports } vdiff(t, tc.targetKs, tc.workflow, cells[0].Name, true, true, nil) @@ -234,23 +235,54 @@ func testCLIErrors(t *testing.T, ksWorkflow, cells string) { func testDelete(t *testing.T, ksWorkflow, cells string) { t.Run("Delete", func(t *testing.T) { - // test show verbose too as a side effect + // Let's be sure that we have at least 3 unique VDiffs. + // We have one record in the SHOW output per VDiff, per + // shard. So we want to get a count of the unique VDiffs + // by UUID. + uuidCount := func(uuids []gjson.Result) int64 { + seen := make(map[string]struct{}) + for _, uuid := range uuids { + seen[uuid.String()] = struct{}{} + } + return int64(len(seen)) + } + _, output := performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + initialVDiffCount := uuidCount(gjson.Get(output, "#.UUID").Array()) + for ; initialVDiffCount < 3; initialVDiffCount++ { + _, _ = performVDiff2Action(t, ksWorkflow, cells, "create", "", false) + } + + // Now let's confirm that we have at least 3 unique VDiffs. + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + require.GreaterOrEqual(t, uuidCount(gjson.Get(output, "#.UUID").Array()), int64(3)) + // And that our initial count is what we expect. + require.Equal(t, initialVDiffCount, uuidCount(gjson.Get(output, "#.UUID").Array())) + + // Test show last with verbose too as a side effect. uuid, output := performVDiff2Action(t, ksWorkflow, cells, "show", "last", false, "--verbose") - // only present with --verbose + // The TableSummary is only present with --verbose. require.Contains(t, output, `"TableSummary":`) + + // Now let's delete one of the VDiffs. _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", uuid, false) - require.Contains(t, output, `"Status": "completed"`) + require.Equal(t, "completed", gjson.Get(output, "Status").String()) + // And confirm that our unique VDiff count has only decreased by one. + _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) + require.Equal(t, initialVDiffCount-1, uuidCount(gjson.Get(output, "#.UUID").Array())) + + // Now let's delete all of them. _, output = performVDiff2Action(t, ksWorkflow, cells, "delete", "all", false) - require.Contains(t, output, `"Status": "completed"`) + require.Equal(t, "completed", gjson.Get(output, "Status").String()) + // And finally confirm that we have no more VDiffs. _, output = performVDiff2Action(t, ksWorkflow, cells, "show", "all", false) - require.Equal(t, "[]\n", output) + require.Equal(t, int64(0), gjson.Get(output, "#").Int()) }) } func testNoOrphanedData(t *testing.T, keyspace, workflow string, shards []string) { t.Run("No orphaned data", func(t *testing.T) { - query := fmt.Sprintf("select vd.id as vdiff_id, vdt.vdiff_id as vdiff_table_id, vdl.vdiff_id as vdiff_log_id from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) inner join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) where vd.keyspace = %s and vd.workflow = %s", - encodeString(keyspace), encodeString(workflow)) + query := sqlparser.BuildParsedQuery("select vd.id as vdiff_id, vdt.vdiff_id as vdiff_table_id, vdl.vdiff_id as vdiff_log_id from %s.vdiff as vd inner join %s.vdiff_table as vdt on (vd.id = vdt.vdiff_id) inner join %s.vdiff_log as vdl on (vd.id = vdl.vdiff_id) where vd.keyspace = %s and vd.workflow = %s", + sidecarDBIdentifier, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(keyspace), encodeString(workflow)).Query for _, shard := range shards { res, err := vc.getPrimaryTablet(t, keyspace, shard).QueryTablet(query, keyspace, false) require.NoError(t, err) @@ -325,7 +357,7 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { // update the VDiff to simulate an ephemeral error having occurred for _, shard := range strings.Split(tc.targetShards, ",") { tab := vc.getPrimaryTablet(t, tc.targetKs, shard) - res, err := tab.QueryTabletWithDB(fmt.Sprintf(sqlSimulateError, encodeString(uuid)), "vt_"+tc.targetKs) + res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.targetKs) require.NoError(t, err) // should have updated the vdiff record and at least one vdiff_table record require.GreaterOrEqual(t, int(res.RowsAffected), 2) diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 4a7e5cb8902..982ea04c957 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -23,12 +23,10 @@ import ( "testing" "time" - "github.com/buger/jsonparser" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/wrangler" @@ -67,11 +65,12 @@ func doVDiff1(t *testing.T, ksWorkflow, cells string) { t.Run(fmt.Sprintf("vdiff1 %s", ksWorkflow), func(t *testing.T) { output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "--", "--v1", "--tablet_types=primary", "--source_cell="+cells, "--format", "json", ksWorkflow) log.Infof("vdiff1 err: %+v, output: %+v", err, output) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, output) diffReports := make(map[string]*wrangler.DiffReport) + t.Logf("vdiff1 output: %s", output) err = json.Unmarshal([]byte(output), &diffReports) - require.Nil(t, err) + require.NoErrorf(t, err, "full output: %s", output) if len(diffReports) < 1 { t.Fatal("VDiff did not return a valid json response " + output + "\n") } @@ -110,12 +109,20 @@ func waitForVDiff2ToComplete(t *testing.T, ksWorkflow, cells, uuid string, compl // The timestamp format allows us to compare them lexicographically. // We don't test that the ETA always increases as it can decrease based on how // quickly we're doing work. - if info.Progress.ETA != "" { - // If we're operating at the second boundary then the ETA can be up - // to 1 second in the past due to using second based precision. - loc, _ := time.LoadLocation("UTC") - require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat)) - } + + // Commenting out this check for now as it is quite flaky in Github CI: we sometimes get a difference of + // more than 1s between the ETA and the current time, empirically seen 2s when it has failed, + // but presumably it can be higher. Keeping the code here for now in case we want to re-enable it. + + /* + if info.Progress.ETA != "" { + // If we're operating at the second boundary then the ETA can be up + // to 1 second in the past due to using second based precision. + loc, _ := time.LoadLocation("UTC") + require.GreaterOrEqual(t, info.Progress.ETA, time.Now().Add(-time.Second).In(loc).Format(vdiff2.TimestampFormat)) + } + */ + if !first { require.GreaterOrEqual(t, info.Progress.Percentage, previousProgress.Percentage) } @@ -143,7 +150,8 @@ type expectedVDiff2Result struct { func doVdiff2(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) t.Run(fmt.Sprintf("vdiff2 %s", ksWorkflow), func(t *testing.T) { - uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, "--auto-retry") + // update-table-stats is needed in order to test progress reports. + uuid, _ := performVDiff2Action(t, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") info := waitForVDiff2ToComplete(t, ksWorkflow, cells, uuid, time.Time{}) require.Equal(t, workflow, info.Workflow) @@ -153,8 +161,8 @@ func doVdiff2(t *testing.T, keyspace, workflow, cells string, want *expectedVDif require.Equal(t, strings.Join(want.shards, ","), info.Shards) require.Equal(t, want.hasMismatch, info.HasMismatch) } else { - require.Equal(t, "completed", info.State) - require.False(t, info.HasMismatch) + require.Equal(t, "completed", info.State, "vdiff results: %+v", info) + require.False(t, info.HasMismatch, "vdiff results: %+v", info) } if strings.Contains(t.Name(), "AcrossDBVersions") { log.Errorf("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") @@ -174,7 +182,7 @@ func performVDiff2Action(t *testing.T, ksWorkflow, cells, action, actionArg stri log.Infof("vdiff2 output: %+v (err: %+v)", output, err) if !expectError { require.Nil(t, err) - uuid, err = jsonparser.GetString([]byte(output), "UUID") + uuid = gjson.Get(output, "UUID").String() if action != "delete" && !(action == "show" && actionArg == "all") { // a UUID is not required require.NoError(t, err) require.NotEmpty(t, uuid) @@ -193,19 +201,18 @@ type vdiffInfo struct { Progress vdiff2.ProgressReport } -func getVDiffInfo(jsonStr string) *vdiffInfo { +func getVDiffInfo(json string) *vdiffInfo { var info vdiffInfo - json := []byte(jsonStr) - info.Workflow, _ = jsonparser.GetString(json, "Workflow") - info.Keyspace, _ = jsonparser.GetString(json, "Keyspace") - info.State, _ = jsonparser.GetString(json, "State") - info.Shards, _ = jsonparser.GetString(json, "Shards") - info.RowsCompared, _ = jsonparser.GetInt(json, "RowsCompared") - info.StartedAt, _ = jsonparser.GetString(json, "StartedAt") - info.CompletedAt, _ = jsonparser.GetString(json, "CompletedAt") - info.HasMismatch, _ = jsonparser.GetBoolean(json, "HasMismatch") - info.Progress.Percentage, _ = jsonparser.GetFloat(json, "Progress", "Percentage") - info.Progress.ETA, _ = jsonparser.GetString(json, "Progress", "ETA") + info.Workflow = gjson.Get(json, "Workflow").String() + info.Keyspace = gjson.Get(json, "Keyspace").String() + info.State = gjson.Get(json, "State").String() + info.Shards = gjson.Get(json, "Shards").String() + info.RowsCompared = gjson.Get(json, "RowsCompared").Int() + info.StartedAt = gjson.Get(json, "StartedAt").String() + info.CompletedAt = gjson.Get(json, "CompletedAt").String() + info.HasMismatch = gjson.Get(json, "HasMismatch").Bool() + info.Progress.Percentage = gjson.Get(json, "Progress.Percentage").Float() + info.Progress.ETA = gjson.Get(json, "Progress.ETA").String() return &info } @@ -215,30 +222,6 @@ func encodeString(in string) string { return buf.String() } -// updateTableStats runs ANALYZE TABLE on each table involved in the workflow. -// You should execute this if you leverage table information from e.g. -// information_schema.tables in your test. -func updateTableStats(t *testing.T, tablet *cluster.VttabletProcess, tables string) { - dbName := "vt_" + tablet.Keyspace - tableList := strings.Split(strings.TrimSpace(tables), ",") - if len(tableList) == 0 { - // we need to get all of the tables in the keyspace - res, err := tablet.QueryTabletWithDB("show tables", dbName) - require.NoError(t, err) - for _, row := range res.Rows { - tableList = append(tableList, row[0].String()) - } - } - for _, table := range tableList { - table = strings.TrimSpace(table) - if table != "" { - res, err := tablet.QueryTabletWithDB(fmt.Sprintf(sqlAnalyzeTable, sqlescape.EscapeID(table)), dbName) - require.NoError(t, err) - require.Equal(t, 1, len(res.Rows)) - } - } -} - // generateMoreCustomers creates additional test data for better tests // when needed. func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 3f8f7f91997..6e6c2a8b11b 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -21,7 +21,7 @@ import ( "fmt" "io" "net/http" - "os" + "runtime" "strings" "sync" "testing" @@ -30,10 +30,12 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vtgateconn" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" "github.com/buger/jsonparser" @@ -44,6 +46,7 @@ import ( "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" ) @@ -58,8 +61,8 @@ var ( sourceKsOpts = make(map[string]string) targetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) - sourceThrottlerAppName = "vstreamer" - targetThrottlerAppName = "vreplication" + sourceThrottlerAppName = throttlerapp.VStreamerName + targetThrottlerAppName = throttlerapp.VReplicationName ) const ( @@ -94,16 +97,16 @@ func throttleResponse(tablet *cluster.VttabletProcess, path string) (respBody st return respBody, err } -func throttleApp(tablet *cluster.VttabletProcess, app string) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", app)) +func throttleApp(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/throttle-app?app=%s&duration=1h", throttlerApp.String())) } -func unthrottleApp(tablet *cluster.VttabletProcess, app string) (string, error) { - return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", app)) +func unthrottleApp(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (string, error) { + return throttleResponse(tablet, fmt.Sprintf("throttler/unthrottle-app?app=%s", throttlerApp.String())) } -func throttlerCheckSelf(tablet *cluster.VttabletProcess, app string) (respBody string, err error) { - apiURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", tablet.TabletHostname, tablet.Port, app) +func throttlerCheckSelf(tablet *cluster.VttabletProcess, throttlerApp throttlerapp.Name) (respBody string, err error) { + apiURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", tablet.TabletHostname, tablet.Port, throttlerApp.String()) resp, err := httpClient.Get(apiURL) if err != nil { return "", err @@ -168,12 +171,24 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", addColDDL, err) // Confirm workflow is still running fine - waitForWorkflowState(t, vc, ksWorkflow, "Running") + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col does not exist on target waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm new col does exist on source waitForQueryResult(t, vtgateConn, sourceKs, checkColQuerySource, "[[INT64(1)]]") - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) + // Also test Cancel --keep_routing_rules + moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep_routing_rules") + // Confirm that the routing rules were NOT cleared + rr, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + require.Greater(t, len(gjson.Get(rr, "rules").Array()), 0) + // Manually clear the routing rules + err = vc.VtctldClient.ExecuteCommand("ApplyRoutingRules", "--rules", "{}") + require.NoError(t, err) + // Confirm that the routing rules are gone + rr, err = vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + require.Equal(t, len(gjson.Get(rr, "rules").Array()), 0) // Drop the column on source to start fresh again _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", dropColDDL, err) @@ -186,7 +201,7 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(addColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", addColDDL, err) // Confirm that the worfklow stopped because of the DDL - waitForWorkflowState(t, vc, ksWorkflow, "Stopped", fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String(), fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) // Confirm that the target does not have new col waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) @@ -201,12 +216,19 @@ func TestVReplicationDDLHandling(t *testing.T) { _, err = vtgateConn.ExecuteFetch(dropColDDL, 1, false) require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Confirm workflow is still running fine - waitForWorkflowState(t, vc, ksWorkflow, "Running") + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col was dropped on target waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) } +// TestVreplicationCopyThrottling tests the logic that is used +// by vstreamer when starting a copy phase cycle. +// This logic today supports waiting for MySQL replication lag +// and/or InnoDB MVCC history to be below a certain threshold +// before starting the next copy phase. This test focuses on +// the innodb history list length check. +// NOTE: this is a manual test. It is not executed in the CI. func TestVreplicationCopyThrottling(t *testing.T) { workflow := "copy-throttling" cell := "zone1" @@ -248,9 +270,9 @@ func TestVreplicationCopyThrottling(t *testing.T) { // We need to force primary tablet types as the history list has been increased on the source primary // We use a small timeout and ignore errors as we don't expect the MoveTables to start here // because of the InnoDB History List length. - moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", 5*time.Second, true) + moveTablesActionWithTabletTypes(t, "Create", defaultCell.Name, workflow, sourceKs, targetKs, table, "primary", true) // Wait for the copy phase to start - waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), workflowStateCopying) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKs, workflow), binlogdatapb.VReplicationWorkflowState_Copying.String()) // The initial copy phase should be blocking on the history list confirmWorkflowHasCopiedNoData(t, targetKs, workflow) releaseInnoDBRowHistory(t, trxConn) @@ -258,9 +280,9 @@ func TestVreplicationCopyThrottling(t *testing.T) { } func TestBasicVreplicationWorkflow(t *testing.T) { - sourceKsOpts["DBTypeVersion"] = "mysql-5.7" - targetKsOpts["DBTypeVersion"] = "mysql-5.7" - testBasicVreplicationWorkflow(t) + sourceKsOpts["DBTypeVersion"] = "mysql-8.0" + targetKsOpts["DBTypeVersion"] = "mysql-8.0" + testBasicVreplicationWorkflow(t, "noblob") } func TestVreplicationCopyParallel(t *testing.T) { @@ -269,14 +291,15 @@ func TestVreplicationCopyParallel(t *testing.T) { extraVTTabletArgs = []string{ parallelInsertWorkers, } - testBasicVreplicationWorkflow(t) + testBasicVreplicationWorkflow(t, "") } -func testBasicVreplicationWorkflow(t *testing.T) { - testVreplicationWorkflows(t, false) +func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { + testVreplicationWorkflows(t, false, binlogRowImage) } -func testVreplicationWorkflows(t *testing.T, minimal bool) { +// If limited == true, we only run a limited set of workflows. +func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { defaultCellName := "zone1" allCells := []string{"zone1"} allCellNames = "zone1" @@ -288,6 +311,10 @@ func testVreplicationWorkflows(t *testing.T, minimal bool) { defaultRdonly = 0 defer func() { defaultReplicas = 1 }() + if binlogRowImage != "" { + require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) + defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) + } defer vc.TearDown(t) defaultCell = vc.Cells[defaultCellName] @@ -302,6 +329,7 @@ func testVreplicationWorkflows(t *testing.T, minimal bool) { verifyClusterHealth(t, vc) insertInitialData(t) materializeRollup(t) + shardCustomer(t, true, []*Cell{defaultCell}, defaultCellName, false) // the Lead and Lead-1 tables tested a specific case with binary sharding keys. Drop it now so that we don't @@ -311,7 +339,7 @@ func testVreplicationWorkflows(t *testing.T, minimal bool) { shardOrders(t) shardMerchant(t) - if minimal { + if limited { return } @@ -340,12 +368,34 @@ func testVreplicationWorkflows(t *testing.T, minimal bool) { verifyCopyStateIsOptimized(t, tablet) } }) + + t.Run("Test CreateLookupVindex", func(t *testing.T) { + // CreateLookupVindex does not support noblob images. + if strings.ToLower(binlogRowImage) == "noblob" { + return + } + _, err = vtgateConn.ExecuteFetch("use customer", 1, false) + require.NoError(t, err, "error using customer keyspace: %v", err) + res, err := vtgateConn.ExecuteFetch("select count(*) from customer where name is not null", 1, false) + require.NoError(t, err, "error getting current row count in customer: %v", err) + require.Equal(t, 1, len(res.Rows), "expected 1 row in count(*) query, got %d", len(res.Rows)) + rows, _ := res.Rows[0][0].ToInt32() + // Insert a couple of rows with a NULL name to confirm that they + // are ignored. + insert := "insert into customer (cid, name, typ, sport, meta) values (100, NULL, 'soho', 'football','{}'), (101, NULL, 'enterprise','baseball','{}')" + _, err = vtgateConn.ExecuteFetch(insert, -1, false) + require.NoError(t, err, "error executing %q: %v", insert, err) + err = vc.VtctlClient.ExecuteCommand("CreateLookupVindex", "--", "--tablet_types=PRIMARY", "customer", createLookupVindexVSchema) + require.NoError(t, err, "error executing CreateLookupVindex: %v", err) + waitForWorkflowState(t, vc, "product.customer_name_keyspace_id_vdx", binlogdatapb.VReplicationWorkflowState_Stopped.String()) + waitForRowCount(t, vtgateConn, "product", "customer_name_keyspace_id", int(rows)) + }) } func TestV2WorkflowsAcrossDBVersions(t *testing.T) { sourceKsOpts["DBTypeVersion"] = "mysql-5.7" targetKsOpts["DBTypeVersion"] = "mysql-8.0" - testBasicVreplicationWorkflow(t) + testBasicVreplicationWorkflow(t, "") } // TestMoveTablesMariaDBToMySQL tests that MoveTables works between a MariaDB source @@ -354,7 +404,7 @@ func TestV2WorkflowsAcrossDBVersions(t *testing.T) { func TestMoveTablesMariaDBToMySQL(t *testing.T) { sourceKsOpts["DBTypeVersion"] = "mariadb-10.10" targetKsOpts["DBTypeVersion"] = "mysql-8.0" - testVreplicationWorkflows(t, true /* only do MoveTables */) + testVreplicationWorkflows(t, true /* only do MoveTables */, "") } func TestMultiCellVreplicationWorkflow(t *testing.T) { @@ -378,14 +428,14 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { require.NotNil(t, vtgate) err := cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name, true) - + isTableInDenyList(t, vc, "product:0", "customer") // we tag along this test so as not to create the overhead of creating another cluster testVStreamCellFlag(t) } @@ -435,11 +485,13 @@ func TestVStreamFlushBinlog(t *testing.T) { require.Equal(t, flushCount, int64(0), "VStreamerFlushedBinlogs should be 0") // Generate a lot of binlog event bytes - targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 16 + targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 1024 vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', repeat('A', 65000), now())" - for i := 100; i < 5000; i++ { - res, err := vtgateConn.ExecuteFetch(fmt.Sprintf(queryF, i), -1, false) + queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', '%s', now())" + for i := 100; i < 10000; i++ { + randStr, err := randHex(6500) + require.NoError(t, err) + res, err := vtgateConn.ExecuteFetch(fmt.Sprintf(queryF, i, randStr), -1, false) require.NoError(t, err) require.Greater(t, res.RowsAffected, uint64(0)) @@ -506,6 +558,7 @@ func testVStreamCellFlag(t *testing.T) { flags := &vtgatepb.VStreamFlags{} if tc.cells != "" { flags.Cells = tc.cells + flags.CellPreference = "onlyspecified" } ctx2, cancel := context.WithTimeout(ctx, 30*time.Second) @@ -564,6 +617,8 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { keyspace := "product" shard := "0" + require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) + defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) defer vc.TearDown(t) cell1 := vc.Cells["zone1"] @@ -578,13 +633,14 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { require.NotNil(t, vtgate) err = cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) + t.Run("VStreamFrom", func(t *testing.T) { testVStreamFrom(t, keyspace, 2) }) @@ -655,53 +711,6 @@ func testVStreamFrom(t *testing.T, table string, expectedRowCount int) { } } -func insertInitialData(t *testing.T) { - t.Run("insertInitialData", func(t *testing.T) { - log.Infof("Inserting initial data") - lines, _ := os.ReadFile("unsharded_init_data.sql") - execMultipleQueries(t, vtgateConn, "product:0", string(lines)) - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq2(id, next_id, cache) values(0, 100, 100);") - log.Infof("Done inserting initial data") - - waitForRowCount(t, vtgateConn, "product:0", "product", 2) - waitForRowCount(t, vtgateConn, "product:0", "customer", 3) - waitForQueryResult(t, vtgateConn, "product:0", "select * from merchant", - `[[VARCHAR("Monoprice") VARCHAR("eléctronics")] [VARCHAR("newegg") VARCHAR("elec†ronics")]]`) - }) -} - -// insertMoreCustomers creates additional customers. -// Note: this will only work when the customer sequence is in place. -func insertMoreCustomers(t *testing.T, numCustomers int) { - sql := "insert into customer (name) values " - i := 0 - for i < numCustomers { - i++ - sql += fmt.Sprintf("('customer%d')", i) - if i != numCustomers { - sql += "," - } - } - execVtgateQuery(t, vtgateConn, "customer", sql) -} - -func insertMoreProducts(t *testing.T) { - sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) -} - -func insertMoreProductsForSourceThrottler(t *testing.T) { - sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) -} - -func insertMoreProductsForTargetThrottler(t *testing.T) { - sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" - execVtgateQuery(t, vtgateConn, "product", sql) -} - func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAlias string, withOpenTx bool) { t.Run("shardCustomer", func(t *testing.T) { workflow := "p2c" @@ -720,7 +729,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl defaultCell := cells[0] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] - tables := "customer,Lead,Lead-1,db_order_test" + tables := "customer,loadtest,Lead,Lead-1,db_order_test,geom_tbl,json_tbl,blüb_tbl,vdiff_order,reftable" moveTablesAction(t, "Create", sourceCellOrAlias, workflow, sourceKs, targetKs, tables) customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet @@ -732,9 +741,16 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) + // The wait in the next code block which checks that customer.dec80 is updated, also confirms that the + // blob-related dmls we execute here are vreplicated. + insertIntoBlobTable(t) + // Confirm that the 0 scale decimal field, dec80, is replicated correctly dec80Replicated := false execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") + execVtgateQuery(t, vtgateConn, sourceKs, "update customer set blb = \"new blob data\" where cid=3") + execVtgateQuery(t, vtgateConn, sourceKs, "update json_tbl set j1 = null, j2 = 'null', j3 = '\"null\"'") + execVtgateQuery(t, vtgateConn, sourceKs, "insert into json_tbl(id, j1, j2, j3) values (7, null, 'null', '\"null\"')") waitForNoWorkflowLag(t, vc, targetKs, workflow) for _, shard := range []string{"-80", "80-"} { shardTarget := fmt.Sprintf("%s:%s", targetKs, shard) @@ -745,27 +761,55 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } require.Equal(t, true, dec80Replicated) + // Confirm that all partial query metrics get updated when we are testing the noblob mode. + t.Run("validate partial query counts", func(t *testing.T) { + if !isBinlogRowImageNoBlob(t, productTab) { + return + } + + // the two primaries of the new reshard targets + tablet200 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet + tablet300 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet + + totalInserts, totalUpdates, totalInsertQueries, totalUpdateQueries := 0, 0, 0, 0 + for _, tab := range []*cluster.VttabletProcess{tablet200, tablet300} { + insertCount, updateCount, insertQueries, updateQueries := getPartialMetrics(t, "product.0.p2c.1", tab) + totalInserts += insertCount + totalUpdates += updateCount + totalInsertQueries += insertQueries + totalUpdateQueries += updateQueries + } + // Counts are total queries from `blobTableQueries` across shards + customer updates from above. + require.NotZero(t, totalInserts) + require.NotZero(t, totalUpdates) + require.NotZero(t, totalInsertQueries) + require.NotZero(t, totalUpdateQueries) + }) + query := "select cid from customer" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", query, query) insertQuery1 := "insert into customer(cid, name) values(1001, 'tempCustomer1')" - matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1, :vtg2)" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) - - // confirm that the backticking of table names in the routing rules works - tbls := []string{"Lead", "Lead-1"} - for _, tbl := range tbls { - output, err := osExec(t, "mysql", []string{"-u", "vtdba", "-P", fmt.Sprintf("%d", vc.ClusterConfig.vtgateMySQLPort), - "--host=127.0.0.1", "-e", fmt.Sprintf("select * from `%s`", tbl)}) - if err != nil { - require.FailNow(t, output) + matchInsertQuery1 := "insert into customer(cid, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)" + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) + + // FIXME for some reason, these inserts fails on mac, need to investigate, some + // vreplication bug because of case insensitiveness of table names on mac? + if runtime.GOOS == "linux" { + // Confirm that the backticking of table names in the routing rules works. + tbls := []string{"Lead", "Lead-1"} + for _, tbl := range tbls { + output, err := osExec(t, "mysql", []string{"-u", "vtdba", "-P", fmt.Sprintf("%d", vc.ClusterConfig.vtgateMySQLPort), + "--host=127.0.0.1", "--default-character-set=utf8mb4", "-e", fmt.Sprintf("select * from `%s`", tbl)}) + if err != nil { + require.FailNow(t, output) + } + execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) } - execVtgateQuery(t, vtgateConn, "product", fmt.Sprintf("update `%s` set name='xyz'", tbl)) } - vdiff1(t, ksWorkflow, "") switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) switchReads(t, workflowType, allCellNames, ksWorkflow, false) - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) var commit func(t *testing.T) if withOpenTx { @@ -773,8 +817,15 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultsSwitchWritesCustomerShard) switchWrites(t, workflowType, ksWorkflow, false) + checkThatVDiffFails(t, targetKs, workflow) + // The original unsharded customer data included an insert with the + // vindex column (cid) of 999999, so the backing sequence table should + // now have a next_id of 1000000 after SwitchTraffic. + res := execVtgateQuery(t, vtgateConn, sourceKs, "select next_id from customer_seq where id = 0") + require.Equal(t, "1000000", res.Rows[0][0].ToString()) + if withOpenTx && commit != nil { commit(t) } @@ -789,14 +840,14 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) insertQuery2 := "insert into customer(name, cid) values('tempCustomer2', 100)" - matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1, :_cid0)" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) + matchInsertQuery2 := "insert into customer(`name`, cid) values (:vtg1 /* VARCHAR */, :_cid_0)" + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer3', 101)" // ID 101, hence due to reverse_bits in shard 80- - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer4', 102)" // ID 102, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { @@ -811,12 +862,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.Contains(t, output, "'customer.bmd5'") insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) + assertQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch insertQuery1 = "insert into customer(cid, name) values(1003, 'tempCustomer6')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery1, matchInsertQuery1) insertQuery1 = "insert into customer(cid, name) values(1004, 'tempCustomer7')" - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery1, matchInsertQuery1) waitForNoWorkflowLag(t, vc, targetKs, workflow) @@ -825,13 +876,13 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl switchWrites(t, workflowType, ksWorkflow, false) var exists bool - exists, err = checkIfDenyListExists(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables) - exists, err = checkIfDenyListExists(t, vc, "product:0", "customer") + exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") require.False(t, exists) @@ -851,11 +902,11 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.True(t, found) insertQuery2 = "insert into customer(name, cid) values('tempCustomer8', 103)" // ID 103, hence due to reverse_bits in shard 80- - require.False(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2)) + assertQueryDoesNotExecutesOnTablet(t, vtgateConn, productTab, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer10', 104)" // ID 105, hence due to reverse_bits in shard -80 - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab1, "customer", insertQuery2, matchInsertQuery2) insertQuery2 = "insert into customer(name, cid) values('tempCustomer9', 105)" // ID 104, hence due to reverse_bits in shard 80- - require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2)) + assertQueryExecutesOnTablet(t, vtgateConn, customerTab2, "customer", insertQuery2, matchInsertQuery2) execVtgateQuery(t, vtgateConn, "customer", "delete from customer where name like 'tempCustomer%'") waitForRowCountInTablet(t, customerTab1, "customer", "customer", 1) @@ -885,7 +936,7 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", - 600, counts, nil, cells, sourceCellOrAlias, 1) + 600, counts, nil, nil, cells, sourceCellOrAlias, 1) waitForRowCount(t, vtgateConn, ksName, "customer", 20) query := "insert into customer (name) values('yoko')" execVtgateQuery(t, vtgateConn, ksName, query) @@ -898,7 +949,7 @@ func reshardMerchant2to3SplitMerge(t *testing.T) { ksName := merchantKeyspace counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", - 1600, counts, dryRunResultsSwitchWritesM2m3, nil, "", 1) + 1600, counts, dryRunResultsSwitchReadM2m3, dryRunResultsSwitchWritesM2m3, nil, "", 1) waitForRowCount(t, vtgateConn, ksName, "merchant", 2) query := "insert into merchant (mname, category) values('amazon', 'electronics')" execVtgateQuery(t, vtgateConn, ksName, query) @@ -945,7 +996,7 @@ func reshardMerchant3to1Merge(t *testing.T) { ksName := merchantKeyspace counts := map[string]int{"zone1-2000": 3} reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", - 2000, counts, nil, nil, "", 1) + 2000, counts, nil, nil, nil, "", 1) waitForRowCount(t, vtgateConn, ksName, "merchant", 3) query := "insert into merchant (mname, category) values('flipkart', 'electronics')" execVtgateQuery(t, vtgateConn, ksName, query) @@ -958,7 +1009,7 @@ func reshardCustomer3to2SplitMerge(t *testing.T) { // -40,40-80,80-c0 => merge/s ksName := "customer" counts := map[string]int{"zone1-1000": 8, "zone1-1100": 8, "zone1-1200": 5} reshard(t, ksName, "customer", "c4c3", "-40,40-80,80-c0", "-60,60-c0", - 1000, counts, nil, nil, "", 1) + 1000, counts, nil, nil, nil, "", 1) }) } @@ -967,12 +1018,12 @@ func reshardCustomer3to1Merge(t *testing.T) { // to unsharded ksName := "customer" counts := map[string]int{"zone1-1500": 21} reshard(t, ksName, "customer", "c3c1", "-60,60-c0,c0-", "0", - 1500, counts, nil, nil, "", 3) + 1500, counts, nil, nil, nil, "", 3) }) } func reshard(t *testing.T, ksName string, tableName string, workflow string, sourceShards string, targetShards string, - tabletIDBase int, counts map[string]int, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string, + tabletIDBase int, counts map[string]int, dryRunResultSwitchReads, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string, autoIncrementStep int) { t.Run("reshard", func(t *testing.T) { if cells == nil { @@ -998,11 +1049,8 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou autoIncrementStep, autoIncrementStep) tablet.QueryTablet(autoIncrementSetQuery, "", false) } - workflowType := "Reshard" - if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards, - "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Create", ksWorkflow); err != nil { - t.Fatalf("Reshard Create command failed with %+v\n", err) - } + reshardAction(t, "Create", workflow, ksName, sourceShards, targetShards, sourceCellOrAlias, "replica,primary") + targetShards = "," + targetShards + "," for _, tab := range tablets { if strings.Contains(targetShards, ","+tab.Shard+",") { @@ -1014,15 +1062,15 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou } } vdiff1(t, ksWorkflow, "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) - if dryRunResultSwitchWrites != nil { - switchWritesDryRun(t, workflowType, ksWorkflow, dryRunResultSwitchWrites) + if dryRunResultSwitchReads != nil { + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica", "--dry-run") } - switchWrites(t, workflowType, ksWorkflow, false) - if err := vc.VtctlClient.ExecuteCommand(workflowType, "--", "--source_shards="+sourceShards, "--target_shards="+targetShards, - "--cells="+sourceCellOrAlias, "--tablet_types=replica,primary", "Complete", ksWorkflow); err != nil { - t.Fatalf("Reshard Complete command failed with %+v\n", err) + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica") + if dryRunResultSwitchWrites != nil { + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary", "--dry-run") } + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary") + reshardAction(t, "Complete", workflow, ksName, "", "", "", "") for tabletName, count := range counts { if tablets[tabletName] == nil { continue @@ -1104,7 +1152,7 @@ func shardMerchant(t *testing.T) { // confirm that the backticking of keyspaces in the routing rules works output, err := osExec(t, "mysql", []string{"-u", "vtdba", "-P", fmt.Sprintf("%d", vc.ClusterConfig.vtgateMySQLPort), - fmt.Sprintf("--host=%s", vc.ClusterConfig.hostname), "-e", "select * from merchant"}) + fmt.Sprintf("--host=%s", vc.ClusterConfig.hostname), "--default-character-set=utf8mb4", "-e", "select * from merchant"}) if err != nil { require.FailNow(t, output) } @@ -1331,32 +1379,67 @@ func waitForLowLag(t *testing.T, keyspace, workflow string) { } func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info string) { - vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), maxWait) + vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), sidecarDBName, maxWait) } func moveTablesAction(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, extraFlags ...string) { var err error - if len(extraFlags) > 0 { - err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, - "--cells="+cell, "--tablet_types=primary,replica,rdonly", strings.Join(extraFlags, " "), - action, fmt.Sprintf("%s.%s", targetKs, workflow)) - } else { - err = vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell, - "--tablet_types=primary,replica,rdonly", action, fmt.Sprintf("%s.%s", targetKs, workflow)) + args := []string{"MoveTables", "--workflow=" + workflow, "--target-keyspace=" + targetKs, action} + switch strings.ToLower(action) { + case strings.ToLower(workflowActionCreate): + extraFlags = append(extraFlags, "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types=primary,replica,rdonly") + case strings.ToLower(workflowActionSwitchTraffic): + extraFlags = append(extraFlags, "--initialize-target-sequences") + } + args = append(args, extraFlags...) + output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) + if output != "" { + fmt.Printf("Output of vtctldclient MoveTables %s for %s workflow:\n++++++\n%s\n--------\n", + action, workflow, output) } if err != nil { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } -func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, timeout time.Duration, ignoreErrors bool) { - if err := vc.VtctlClient.ExecuteCommand("MoveTables", "--", "--source="+sourceKs, "--tables="+tables, "--cells="+cell, - "--tablet_types="+tabletTypes, "--timeout="+timeout.String(), action, fmt.Sprintf("%s.%s", targetKs, workflow)); err != nil { +func moveTablesActionWithTabletTypes(t *testing.T, action, cell, workflow, sourceKs, targetKs, tables string, tabletTypes string, ignoreErrors bool) { + if err := vc.VtctldClient.ExecuteCommand("MoveTables", "--workflow="+workflow, "--target-keyspace="+targetKs, action, + "--source-keyspace="+sourceKs, "--tables="+tables, "--cells="+cell, "--tablet-types="+tabletTypes); err != nil { if !ignoreErrors { t.Fatalf("MoveTables %s command failed with %+v\n", action, err) } } } +// reshardAction is a helper function to run the reshard command and +// action using vtctldclient. +func reshardAction(t *testing.T, action, workflow, keyspaceName, sourceShards, targetShards, cell, tabletTypes string, extraFlags ...string) { + var err error + args := []string{"Reshard", "--workflow=" + workflow, "--target-keyspace=" + keyspaceName, action} + + switch strings.ToLower(action) { + case strings.ToLower(workflowActionCreate): + if tabletTypes == "" { + tabletTypes = "replica,rdonly,primary" + } + args = append(args, "--source-shards="+sourceShards, "--target-shards="+targetShards) + } + if cell != "" { + args = append(args, "--cells="+cell) + } + if tabletTypes != "" { + args = append(args, "--tablet-types="+tabletTypes) + } + args = append(args, extraFlags...) + output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) + if output != "" { + log.Infof("Output of vtctldclient Reshard %s for %s workflow:\n++++++\n%s\n--------\n", + action, workflow, output) + } + if err != nil { + t.Fatalf("Reshard %s command failed with %+v\n", action, err) + } +} + func applyVSchema(t *testing.T, vschema, keyspace string) { err := vc.VtctlClient.ExecuteCommand("ApplyVSchema", "--", "--vschema", vschema, keyspace) require.NoError(t, err) @@ -1368,15 +1451,35 @@ func switchReadsDryRun(t *testing.T, workflowType, cells, ksWorkflow string, dry require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", "workflow type specified: %s", workflowType) } + ensureCanSwitch(t, workflowType, cells, ksWorkflow) output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly,replica", "--dry_run", "SwitchTraffic", ksWorkflow) require.NoError(t, err, fmt.Sprintf("Switching Reads DryRun Error: %s: %s", err, output)) - validateDryRunResults(t, output, dryRunResults) + if dryRunResults != nil { + validateDryRunResults(t, output, dryRunResults) + } +} + +func ensureCanSwitch(t *testing.T, workflowType, cells, ksWorkflow string) { + timer := time.NewTimer(defaultTimeout) + defer timer.Stop() + for { + _, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--dry_run", "SwitchTraffic", ksWorkflow) + if err == nil { + return + } + select { + case <-timer.C: + t.Fatalf("Did not become ready to switch traffic for %s before the timeout of %s", ksWorkflow, defaultTimeout) + default: + time.Sleep(defaultTick) + } + } } func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse bool) { - if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && - workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { + if workflowType != binlogdatapb.VReplicationWorkflowType_MoveTables.String() && + workflowType != binlogdatapb.VReplicationWorkflowType_Reshard.String() { require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", "workflow type specified: %s", workflowType) } @@ -1386,6 +1489,7 @@ func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse b if reverse { command = "ReverseTraffic" } + ensureCanSwitch(t, workflowType, cells, ksWorkflow) output, err = vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, "--tablet_types=rdonly", command, ksWorkflow) require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output)) @@ -1394,6 +1498,35 @@ func switchReads(t *testing.T, workflowType, cells, ksWorkflow string, reverse b require.NoError(t, err, fmt.Sprintf("%s Error: %s: %s", command, err, output)) } +func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { + if workflowType != binlogdatapb.VReplicationWorkflowType_MoveTables.String() && + workflowType != binlogdatapb.VReplicationWorkflowType_Reshard.String() { + require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", + "workflow type specified: %s", workflowType) + } + command := "SwitchTraffic" + if reverse { + command = "ReverseTraffic" + } + const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 + ensureCanSwitch(t, workflowType, "", ksWorkflow) + // Use vtctldclient for MoveTables SwitchTraffic ~ 50% of the time. + if workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables.String() && time.Now().Second()%2 == 0 { + parts := strings.Split(ksWorkflow, ".") + require.Equal(t, 2, len(parts)) + moveTablesAction(t, command, defaultCellName, parts[1], sourceKs, parts[0], "", "--timeout="+SwitchWritesTimeout, "--tablet-types=primary") + return + } + output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary", + "--timeout="+SwitchWritesTimeout, "--initialize-target-sequences", command, ksWorkflow) + if output != "" { + fmt.Printf("Output of switching writes with vtctlclient for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) + } + // printSwitchWritesExtraDebug is useful when debugging failures in Switch writes due to corner cases/races + _ = printSwitchWritesExtraDebug + require.NoError(t, err, fmt.Sprintf("Switch writes Error: %s: %s", err, output)) +} + func switchWritesDryRun(t *testing.T, workflowType, ksWorkflow string, dryRunResults []string) { if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { @@ -1420,9 +1553,9 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { productTab := productKs.Shards["0"].Tablets["zone1-100"].Vttablet tabs := []*cluster.VttabletProcess{productTab, customerTab1, customerTab2} queries := []string{ - "select id, workflow, pos, stop_pos, cell, tablet_types, time_updated, transaction_timestamp, state, message from _vt.vreplication", - "select * from _vt.copy_state", - "select * from _vt.resharding_journal", + sqlparser.BuildParsedQuery("select id, workflow, pos, stop_pos, cell, tablet_types, time_updated, transaction_timestamp, state, message from %s.vreplication", sidecarDBIdentifier).Query, + sqlparser.BuildParsedQuery("select * from %s.copy_state", sidecarDBIdentifier).Query, + sqlparser.BuildParsedQuery("select * from %s.resharding_journal", sidecarDBIdentifier).Query, } for _, tab := range tabs { for _, query := range queries { @@ -1436,27 +1569,6 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { } } -func switchWrites(t *testing.T, workflowType, ksWorkflow string, reverse bool) { - if workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_MoveTables)] && - workflowType != binlogdatapb.VReplicationWorkflowType_name[int32(binlogdatapb.VReplicationWorkflowType_Reshard)] { - require.FailNowf(t, "Invalid workflow type for SwitchTraffic, must be MoveTables or Reshard", - "workflow type specified: %s", workflowType) - } - command := "SwitchTraffic" - if reverse { - command = "ReverseTraffic" - } - const SwitchWritesTimeout = "91s" // max: 3 tablet picker 30s waits + 1 - output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--tablet_types=primary", - "--timeout="+SwitchWritesTimeout, command, ksWorkflow) - if output != "" { - fmt.Printf("Output of switching writes for %s:\n++++++\n%s\n--------\n", ksWorkflow, output) - } - // printSwitchWritesExtraDebug is useful when debugging failures in Switch writes due to corner cases/races - _ = printSwitchWritesExtraDebug - require.NoError(t, err, fmt.Sprintf("Switch writes Error: %s: %s", err, output)) -} - // generateInnoDBRowHistory generates at least maxSourceTrxHistory rollback segment entries. // This allows us to confirm two behaviors: // 1. MoveTables blocks on starting its first copy phase until we rollback @@ -1493,6 +1605,7 @@ func generateInnoDBRowHistory(t *testing.T, sourceKS string, neededTrxHistory in // expected length. func waitForInnoDBHistoryLength(t *testing.T, tablet *cluster.VttabletProcess, expectedLength int64) { timer := time.NewTimer(defaultTimeout) + defer timer.Stop() historyLen := int64(0) for { res, err := tablet.QueryTablet(historyLenQuery, tablet.Keyspace, false) diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index 24f4f30e9e4..4500a98868c 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -19,14 +19,14 @@ package vreplication var dryRunResultsSwitchWritesCustomerShard = []string{ "Lock keyspace product", "Lock keyspace customer", - "Stop writes on keyspace product, tables [Lead,Lead-1,customer,db_order_test]:", + "Stop writes on keyspace product, tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]:", "/ Keyspace product, Shard 0 at Position", "Wait for VReplication on stopped streams to catchup for up to 30s", "Create reverse replication workflow p2c_reverse", "Create journal entries on source databases", - "Enable writes on keyspace customer tables [Lead,Lead-1,customer,db_order_test]", + "Enable writes on keyspace customer tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order]", "Switch routing from keyspace product to keyspace customer", - "Routing rules for tables [Lead,Lead-1,customer,db_order_test] will be updated", + "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Switch writes completed, freeze and delete vreplication streams on:", " tablet 200 ", " tablet 300 ", @@ -41,8 +41,8 @@ var dryRunResultsSwitchWritesCustomerShard = []string{ var dryRunResultsReadCustomerShard = []string{ "Lock keyspace product", - "Switch reads for tables [Lead,Lead-1,customer,db_order_test] to keyspace customer for tablet types [RDONLY,REPLICA]", - "Routing rules for tables [Lead,Lead-1,customer,db_order_test] will be updated", + "Switch reads for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] to keyspace customer for tablet types [RDONLY,REPLICA]", + "Routing rules for tables [Lead,Lead-1,blüb_tbl,customer,db_order_test,geom_tbl,json_tbl,loadtest,reftable,vdiff_order] will be updated", "Unlock keyspace product", } @@ -85,3 +85,9 @@ var dryRunResultsSwitchWritesM2m3 = []string{ " Keyspace merchant-type, Shard c0-, Tablet 1800, Workflow m2m3, DbName vt_merchant-type", "Unlock keyspace merchant-type", } + +var dryRunResultsSwitchReadM2m3 = []string{ + "Lock keyspace merchant-type", + "Switch reads from keyspace merchant-type to keyspace merchant-type for shards -80,80- to shards -40,40-c0,c0-", + "Unlock keyspace merchant-type", +} diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index 5d20d7f2d32..a5cac4c68f8 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -55,7 +55,7 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { require.NotNil(t, vtgate) err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index fa95a28dbb7..5c5e6a80130 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -33,7 +33,6 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" - "vitess.io/vitess/go/vt/vtgate/evalengine" _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -58,7 +57,7 @@ func testVStreamWithFailover(t *testing.T, failover bool) { vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) vtgate = defaultCell.Vtgates[0] require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second) vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() @@ -171,13 +170,14 @@ func testVStreamWithFailover(t *testing.T, failover bool) { qr := execVtgateQuery(t, vtgateConn, "product", "select count(*) from customer") require.NotNil(t, qr) // total number of row events found by the VStream API should match the rows inserted - insertedRows, err := evalengine.ToInt64(qr.Rows[0][0]) + insertedRows, err := qr.Rows[0][0].ToCastInt64() require.NoError(t, err) require.Equal(t, insertedRows, numRowEvents) } const schemaUnsharded = ` create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; +insert into customer_seq(id, next_id, cache) values(0, 1, 3); ` const vschemaUnsharded = ` { @@ -189,7 +189,7 @@ const vschemaUnsharded = ` } ` const schemaSharded = ` -create table customer(cid int, name varbinary(128), primary key(cid)) CHARSET=utf8mb4; +create table customer(cid int, name varbinary(128), primary key(cid)) TABLESPACE innodb_system CHARSET=utf8mb4; ` const vschemaSharded = ` { @@ -219,14 +219,18 @@ const vschemaSharded = ` func insertRow(keyspace, table string, id int) { vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false) vtgateConn.ExecuteFetch("begin", 1000, false) - vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (cid, name) values (%d, '%s%d')", table, id+100, table, id), 1000, false) + _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) + if err != nil { + log.Infof("error inserting row %d: %v", id, err) + } vtgateConn.ExecuteFetch("commit", 1000, false) } type numEvents struct { - numRowEvents, numJournalEvents int64 - numLessThan80Events, numGreaterThan80Events int64 - numLessThan40Events, numGreaterThan40Events int64 + numRowEvents, numJournalEvents int64 + numLessThan80Events, numGreaterThan80Events int64 + numLessThan40Events, numGreaterThan40Events int64 + numShard0BeforeReshardEvents, numShard0AfterReshardEvents int64 } // tests the StopOnReshard flag @@ -367,7 +371,7 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID switch tickCount { case 1: reshard(t, "sharded", "customer", "vstreamStopOnReshard", "-80,80-", - "-40,40-", baseTabletID+400, nil, nil, nil, defaultCellName, 1) + "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) case 60: done = true } @@ -378,6 +382,150 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID return &ne } +// Validate that we can continue streaming from multiple keyspaces after first copying some tables and then resharding one of the keyspaces +// Ensure that there are no missing row events during the resharding process. +func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents { + defaultCellName := "zone1" + allCellNames = defaultCellName + allCells := []string{allCellNames} + vc = NewVitessCluster(t, "VStreamCopyMultiKeyspaceReshard", allCells, mainClusterConfig) + + require.NotNil(t, vc) + ogdr := defaultReplicas + defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets + defer func(dr int) { defaultReplicas = dr }(ogdr) + + defer vc.TearDown(t) + + defaultCell = vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + vc.AddKeyspace(t, []*Cell{defaultCell}, "sharded", "-80,80-", vschemaSharded, schemaSharded, defaultReplicas, defaultRdonly, baseTabletID+200, nil) + + ctx := context.Background() + vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) + if err != nil { + log.Fatal(err) + } + defer vstreamConn.Close() + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "/.*", + }}} + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + // We want to confirm that the following two tables are streamed. + // 1. the customer_seq in the unsharded keyspace + // 2. the customer table in the sharded keyspace + Match: "/customer.*/", + }}, + } + flags := &vtgatepb.VStreamFlags{} + done := false + + id := 1000 + // First goroutine that keeps inserting rows into the table being streamed until a minute after reshard + // We should keep getting events on the new shards + go func() { + for { + if done { + return + } + id++ + time.Sleep(1 * time.Second) + insertRow("sharded", "customer", id) + } + }() + // stream events from the VStream API + var ne numEvents + reshardDone := false + go func() { + var reader vtgateconn.VStreamReader + reader, err = vstreamConn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags) + require.NoError(t, err) + for { + evs, err := reader.Recv() + + switch err { + case nil: + for _, ev := range evs { + switch ev.Type { + case binlogdatapb.VEventType_ROW: + shard := ev.RowEvent.Shard + switch shard { + case "0": + if reshardDone { + ne.numShard0AfterReshardEvents++ + } else { + ne.numShard0BeforeReshardEvents++ + } + case "-80": + ne.numLessThan80Events++ + case "80-": + ne.numGreaterThan80Events++ + case "-40": + ne.numLessThan40Events++ + case "40-": + ne.numGreaterThan40Events++ + } + ne.numRowEvents++ + case binlogdatapb.VEventType_JOURNAL: + ne.numJournalEvents++ + } + } + case io.EOF: + log.Infof("Stream Ended") + done = true + default: + log.Errorf("Returned err %v", err) + done = true + } + if done { + return + } + } + }() + + ticker := time.NewTicker(1 * time.Second) + tickCount := 0 + for { + <-ticker.C + tickCount++ + switch tickCount { + case 1: + reshard(t, "sharded", "customer", "vstreamCopyMultiKeyspaceReshard", "-80,80-", "-40,40-", baseTabletID+400, nil, nil, nil, nil, defaultCellName, 1) + reshardDone = true + case 60: + done = true + } + if done { + break + } + } + log.Infof("ne=%v", ne) + + // The number of row events streamed by the VStream API should match the number of rows inserted. + // This is important for sharded tables, where we need to ensure that no row events are missed during the resharding process. + // + // On the other hand, we don't verify the exact number of row events for the unsharded keyspace + // because the keyspace remains unsharded and the number of rows in the customer_seq table is always 1. + // We believe that checking the number of row events for the unsharded keyspace, which should always be greater than 0 before and after resharding, + // is sufficient to confirm that the resharding of one keyspace does not affect another keyspace, while keeping the test straightforward. + customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer") + insertedCustomerRows, err := customerResult.Rows[0][0].ToCastInt64() + require.NoError(t, err) + require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events) + return ne +} + func TestVStreamFailover(t *testing.T) { testVStreamWithFailover(t, true) } @@ -409,3 +557,15 @@ func TestVStreamWithKeyspacesToWatch(t *testing.T) { testVStreamWithFailover(t, false) } + +func TestVStreamCopyMultiKeyspaceReshard(t *testing.T) { + ne := testVStreamCopyMultiKeyspaceReshard(t, 3000) + require.Equal(t, int64(0), ne.numJournalEvents) + require.NotZero(t, ne.numRowEvents) + require.NotZero(t, ne.numShard0BeforeReshardEvents) + require.NotZero(t, ne.numShard0AfterReshardEvents) + require.NotZero(t, ne.numLessThan80Events) + require.NotZero(t, ne.numGreaterThan80Events) + require.NotZero(t, ne.numLessThan40Events) + require.NotZero(t, ne.numGreaterThan40Events) +} diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go new file mode 100644 index 00000000000..6bd0bbb19d8 --- /dev/null +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -0,0 +1,206 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "math/rand" + "strconv" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/log" +) + +type moveTablesFlavor int + +const ( + moveTablesFlavorRandom moveTablesFlavor = iota + moveTablesFlavorVtctl + moveTablesFlavorVtctld +) + +var moveTablesFlavors = []moveTablesFlavor{ + moveTablesFlavorVtctl, + moveTablesFlavorVtctld, +} + +type moveTables struct { + vc *VitessCluster + workflowName string + targetKeyspace string + sourceKeyspace string + tables string + atomicCopy bool + sourceShards string +} + +type iMoveTables interface { + Create() + Show() + SwitchReads() + SwitchWrites() + SwitchReadsAndWrites() + ReverseReadsAndWrites() + Cancel() + Complete() + Flavor() string +} + +func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) iMoveTables { + mt.vc = vc + var mt2 iMoveTables + if flavor == moveTablesFlavorRandom { + flavor = moveTablesFlavors[rand.Intn(len(moveTablesFlavors))] + } + switch flavor { + case moveTablesFlavorVtctl: + mt2 = newVtctlMoveTables(mt) + case moveTablesFlavorVtctld: + mt2 = newVtctldMoveTables(mt) + default: + panic("unreachable") + } + log.Infof("Using moveTables flavor: %s", mt2.Flavor()) + return mt2 +} + +type VtctlMoveTables struct { + *moveTables +} + +func (vmt *VtctlMoveTables) Flavor() string { + return "vtctl" +} + +func newVtctlMoveTables(mt *moveTables) *VtctlMoveTables { + return &VtctlMoveTables{mt} +} + +func (vmt *VtctlMoveTables) Create() { + log.Infof("vmt is %+v", vmt.vc, vmt.tables) + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionCreate, "", vmt.sourceShards, "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) SwitchReadsAndWrites() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionSwitchTraffic, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) ReverseReadsAndWrites() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionReverseTraffic, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) Show() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) SwitchReads() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (vmt *VtctlMoveTables) Cancel() { + err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionCancel, "", "", "", vmt.atomicCopy) + require.NoError(vmt.vc.t, err) +} + +func (vmt *VtctlMoveTables) Complete() { + //TODO implement me + panic("implement me") +} + +var _ iMoveTables = (*VtctldMoveTables)(nil) + +type VtctldMoveTables struct { + *moveTables +} + +func newVtctldMoveTables(mt *moveTables) *VtctldMoveTables { + return &VtctldMoveTables{mt} +} + +func (v VtctldMoveTables) Flavor() string { + return "vtctld" +} + +func (v VtctldMoveTables) exec(args ...string) { + args2 := []string{"MoveTables", "--workflow=" + v.workflowName, "--target-keyspace=" + v.targetKeyspace} + args2 = append(args2, args...) + if err := vc.VtctldClient.ExecuteCommand(args2...); err != nil { + v.vc.t.Fatalf("failed to create MoveTables workflow: %v", err) + } +} + +func (v VtctldMoveTables) Create() { + args := []string{"Create", "--source-keyspace=" + v.sourceKeyspace} + if v.tables != "" { + args = append(args, "--tables="+v.tables) + } else { + args = append(args, "--all-tables") + } + if v.atomicCopy { + args = append(args, "--atomic-copy="+strconv.FormatBool(v.atomicCopy)) + } + if v.sourceShards != "" { + args = append(args, "--source-shards="+v.sourceShards) + } + v.exec(args...) +} + +func (v VtctldMoveTables) SwitchReadsAndWrites() { + v.exec("SwitchTraffic") +} + +func (v VtctldMoveTables) ReverseReadsAndWrites() { + v.exec("ReverseTraffic") +} + +func (v VtctldMoveTables) Show() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) SwitchReads() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) SwitchWrites() { + //TODO implement me + panic("implement me") +} + +func (v VtctldMoveTables) Cancel() { + v.exec("Cancel") +} + +func (v VtctldMoveTables) Complete() { + //TODO implement me + panic("implement me") +} diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 97c73f5f458..a2446c1df87 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" @@ -147,8 +148,8 @@ func TestScatterErrsAsWarns(t *testing.T) { // invalid_field should throw error and not warning _, err = mode.conn.ExecuteFetch("SELECT /*vt+ PLANNER=Gen4 SCATTER_ERRORS_AS_WARNINGS */ invalid_field from t1;", 1, false) require.Error(t, err) - serr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - require.Equal(t, mysql.ERBadFieldError, serr.Number(), serr.Error()) + serr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + require.Equal(t, sqlerror.ERBadFieldError, serr.Number(), serr.Error()) }) } } diff --git a/go/test/endtoend/vtgate/foreignkey/fk_test.go b/go/test/endtoend/vtgate/foreignkey/fk_test.go new file mode 100644 index 00000000000..d50e66cbd13 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/fk_test.go @@ -0,0 +1,795 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + "context" + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +// TestInsertWithFK tests that insertions work as expected when foreign key management is enabled in Vitess. +func TestInsertWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + + // Verify that inserting data into a table that has shard scoped foreign keys works. + utils.Exec(t, conn, `insert into t2(id, col) values (100, 125), (1, 132)`) + + // Verify that insertion fails if the data doesn't follow the fk constraint. + _, err := utils.ExecAllowError(t, conn, `insert into t2(id, col) values (1310, 125)`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // Verify that insertion fails if the table has cross-shard foreign keys (even if the data follows the constraints). + _, err = utils.ExecAllowError(t, conn, `insert into t3(id, col) values (100, 100)`) + assert.ErrorContains(t, err, "VT12002: unsupported: cross-shard foreign keys") + + // insert some data in a table with multicol vindex. + utils.Exec(t, conn, `insert into multicol_tbl1(cola, colb, colc, msg) values (100, 'a', 'b', 'msg'), (101, 'c', 'd', 'msg2')`) + + // Verify that inserting data into a table that has shard scoped multi-column foreign keys works. + utils.Exec(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (100, 'a', 'b', 'msg3')`) + + // Verify that insertion fails if the data doesn't follow the fk constraint. + _, err = utils.ExecAllowError(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (103, 'c', 'd', 'msg2')`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") +} + +// TestDeleteWithFK tests that deletions work as expected when foreign key management is enabled in Vitess. +func TestDeleteWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + utils.Exec(t, conn, `insert into t2(id, col) values (100, 125), (1, 132)`) + utils.Exec(t, conn, `insert into t4(id, col) values (1, 321)`) + utils.Exec(t, conn, `insert into multicol_tbl1(cola, colb, colc, msg) values (100, 'a', 'b', 'msg'), (101, 'c', 'd', 'msg2')`) + utils.Exec(t, conn, `insert into multicol_tbl2(cola, colb, colc, msg) values (100, 'a', 'b', 'msg3')`) + + // child foreign key is shard scoped. Query will fail at mysql due to On Delete Restrict. + _, err := utils.ExecAllowError(t, conn, `delete from t2 where col = 132`) + assert.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + + // child row does not exist so query will succeed. + qr := utils.Exec(t, conn, `delete from t2 where col = 125`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // table's child foreign key has cross shard fk, so query will fail at vtgate. + _, err = utils.ExecAllowError(t, conn, `delete from t1 where id = 42`) + assert.ErrorContains(t, err, "VT12002: unsupported: cross-shard foreign keys (errno 1235) (sqlstate 42000)") + + // child foreign key is cascade, so this should work as expected. + qr = utils.Exec(t, conn, `delete from multicol_tbl1 where cola = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // we also verify that the rows in the child table were deleted. + qr = utils.Exec(t, conn, `select * from multicol_tbl2 where cola = 100`) + assert.Zero(t, qr.Rows) + + // Unsharded keyspace tests + utils.Exec(t, conn, `use uks`) + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (100, 123), (10, 12), (1, 13), (1000, 1234)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (342, 123), (19, 1234)`) + + // Delete from u_t1 which has a foreign key constraint to t2 with SET NULL type. + qr = utils.Exec(t, conn, `delete from u_t1 where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // Verify the result in u_t2 as well + utils.AssertMatches(t, conn, `select * from u_t2`, `[[INT64(342) NULL] [INT64(19) INT64(1234)]]`) +} + +// TestUpdateWithFK tests that update work as expected when foreign key management is enabled in Vitess. +func TestUpdateWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // insert some data. + utils.Exec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + utils.Exec(t, conn, `insert into t2(id, col, mycol) values (100, 125, 'foo'), (1, 132, 'bar')`) + utils.Exec(t, conn, `insert into t4(id, col, t2_col, t2_mycol) values (1, 321, 132, 'bar')`) + utils.Exec(t, conn, `insert into t5(pk, sk, col1) values (1, 1, 1),(2, 1, 1),(3, 1, 10),(4, 1, 20),(5, 1, 30),(6, 1, 40)`) + utils.Exec(t, conn, `insert into t6(pk, sk, col1) values (10, 1, 1), (20, 1, 20)`) + + // parent foreign key is shard scoped and value is not updated. Query will succeed. + _ = utils.Exec(t, conn, `update t4 set t2_mycol = 'bar' where id = 1`) + + // parent foreign key is shard scoped and value does not exists in parent table. Query will fail at mysql due to On Update Restrict. + _, err := utils.ExecAllowError(t, conn, `update t4 set t2_mycol = 'foo' where id = 1`) + assert.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + + // updating column which does not have foreign key constraint, so query will succeed. + qr := utils.Exec(t, conn, `update t4 set col = 20 where id = 1`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // updating column which does not have foreign key constraint, so query will succeed. + _ = utils.Exec(t, conn, `update t2 set mycol = 'baz' where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // child table have restrict in shard scoped and value exists in parent table. + _ = utils.Exec(t, conn, `update t6 set col1 = 40 where pk = 20`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // Unsharded keyspace tests + utils.Exec(t, conn, `use uks`) + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (100, 123), (10, 12), (1, 13), (1000, 1234)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (342, 123), (19, 1234)`) + utils.Exec(t, conn, `insert into u_t3(id, col3) values (32, 123), (1, 12)`) + + // Cascade update with a new value + _ = utils.Exec(t, conn, `update u_t1 set col1 = 2 where id = 100`) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(2)]]`) + + // Update u_t1 which has a foreign key constraint to u_t2 with SET NULL type, and to u_t3 with CASCADE type. + qr = utils.Exec(t, conn, `update u_t1 set col1 = 13 where id = 100`) + assert.EqualValues(t, 1, qr.RowsAffected) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) + + // Update u_t1 which has a foreign key constraint to u_t2 with SET NULL type, and to u_t3 with CASCADE type. + // This update however doesn't change the table. + qr = utils.Exec(t, conn, `update u_t1 set col1 = 1234 where id = 1000`) + assert.EqualValues(t, 0, qr.RowsAffected) + // Verify the result in u_t2 and u_t3 as well. + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) +} + +// TestVstreamForFKBinLog tests that dml queries with fks are written with child row first approach in the binary logs. +func TestVstreamForFKBinLog(t *testing.T) { + vtgateConn, err := cluster.DialVTGate(context.Background(), t.Name(), vtgateGrpcAddress, "fk_user", "") + require.NoError(t, err) + defer vtgateConn.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan *binlogdatapb.VEvent) + runVStream(t, ctx, ch, vtgateConn) + + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + defer cancel() + + utils.Exec(t, conn, `use uks`) + + // insert some data. + utils.Exec(t, conn, `insert into u_t1(id, col1) values (1,2), (11,4), (111,6)`) + utils.Exec(t, conn, `insert into u_t2(id, col2) values (2,2), (22,4)`) + utils.Exec(t, conn, `insert into u_t3(id, col3) values (33,4), (333,6)`) + // drain 3 row events. + _ = drainEvents(t, ch, 3) + + tcases := []struct { + query string + events int + rowEvents []string + }{{ + query: `update u_t1 set col1 = 3 where id = 11`, + events: 3, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"334"} after:{lengths:2 lengths:1 values:"333"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t2" row_changes:{before:{lengths:2 lengths:1 values:"224"} after:{lengths:2 lengths:-1 values:"22"}} keyspace:"uks" shard:"0" flags:1`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"114"} after:{lengths:2 lengths:1 values:"113"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `update u_t1 set col1 = 5 where id = 11`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"333"} after:{lengths:2 lengths:1 values:"335"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"113"} after:{lengths:2 lengths:1 values:"115"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `delete from u_t1 where col1 = 6`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:3 lengths:1 values:"3336"}} keyspace:"uks" shard:"0" flags:1`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:3 lengths:1 values:"1116"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `update u_t1 set col1 = null where id = 11`, + events: 2, + rowEvents: []string{ + `table_name:"uks.u_t3" row_changes:{before:{lengths:2 lengths:1 values:"335"} after:{lengths:2 lengths:-1 values:"33"}} keyspace:"uks" shard:"0" flags:3`, + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:1 values:"115"} after:{lengths:2 lengths:-1 values:"11"}} keyspace:"uks" shard:"0" flags:1`, + }, + }, { + query: `delete from u_t1 where id = 11`, + events: 1, + rowEvents: []string{ + `table_name:"uks.u_t1" row_changes:{before:{lengths:2 lengths:-1 values:"11"}} keyspace:"uks" shard:"0" flags:1`, + }, + }} + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + utils.Exec(t, conn, tcase.query) + // drain row events. + rowEvents := drainEvents(t, ch, tcase.events) + assert.ElementsMatch(t, tcase.rowEvents, rowEvents) + }) + } +} + +func runVStream(t *testing.T, ctx context.Context, ch chan *binlogdatapb.VEvent, vtgateConn *vtgateconn.VTGateConn) { + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + {Keyspace: unshardedKs, Shard: "0", Gtid: "current"}, + }} + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/u.*", + }}, + } + vReader, err := vtgateConn.VStream(ctx, topodatapb.TabletType_REPLICA, vgtid, filter, nil) + require.NoError(t, err) + + go func() { + for { + evs, err := vReader.Recv() + if err == io.EOF || ctx.Err() != nil { + return + } + require.NoError(t, err) + + for _, ev := range evs { + if ev.Type == binlogdatapb.VEventType_ROW { + ch <- ev + } + } + } + }() +} + +func drainEvents(t *testing.T, ch chan *binlogdatapb.VEvent, count int) []string { + var rowEvents []string + for i := 0; i < count; i++ { + select { + case re := <-ch: + rowEvents = append(rowEvents, re.RowEvent.String()) + case <-time.After(10 * time.Second): + t.Fatalf("timeout waiting for event number: %d", i+1) + } + } + return rowEvents +} + +// TestFkScenarios tests the various foreign key scenarios with different constraints +// and makes sure that Vitess works with them as expected. All the tables are present in both sharded and unsharded keyspace +// and all the foreign key constraints are cross-shard ones for the sharded keyspace. +// The test has 4 independent Schemas that are used for testing - +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ +func TestFkScenarios(t *testing.T) { + // Wait for schema-tracking to be complete. + err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t1", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t18", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t11", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t1", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t18", "col") + require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t11", "col") + require.NoError(t, err) + + testcases := []struct { + name string + dataQueries []string + dmlQuery string + assertionQueries []string + }{ + { + name: "Insert failure due to parent key not existing", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 5)", + }, + dmlQuery: "insert into t2(id, col) values (1, 7)", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Insert success", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + }, + dmlQuery: "insert into fk_t2(id, col) values (1, 7)", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update failure with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t1 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update success with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t1 set col = 5 where id = 2", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Delete failure with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t1 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Delete success with restrict foreign keys", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t1 where id = 2", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + }, + }, { + name: "Update success with set null foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t3 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Update failure with set null foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7)", + "insert into fk_t5(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t3 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t5 order by id", + }, + }, { + name: "Update success with cascaded set nulls", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7), (2, 9)", + }, + dmlQuery: "update fk_t2 set col = 9 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Delete success with set null foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t3 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Delete failure with set null foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7)", + "insert into fk_t5(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t3 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t5 order by id", + }, + }, { + name: "Delete success with cascaded set nulls", + dataQueries: []string{ + "insert into fk_t1(id, col) values (1, 7), (2, 9)", + "insert into fk_t2(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t4(id, col) values (1, 7), (2, 9)", + "insert into fk_t6(id, col) values (1, 7), (2, 9)", + }, + dmlQuery: "delete from fk_t2 where id = 1", + assertionQueries: []string{ + "select * from fk_t1 order by id", + "select * from fk_t2 order by id", + "select * from fk_t3 order by id", + "select * from fk_t4 order by id", + "select * from fk_t6 order by id", + }, + }, { + name: "Update success with cascade foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + }, + }, { + name: "Update failure with cascade foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t13(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t13 order by id", + }, + }, { + name: "Update success with cascaded cascade foreign keys", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t12(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t10 set col = 5 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t12 order by id", + }, + }, { + name: "Delete success with cascade foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + }, + }, { + name: "Delete failure with cascade foreign key with child having a restrict foreign key", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t13(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t13 order by id", + }, + }, { + name: "Delete success with cascaded cascade foreign keys", + dataQueries: []string{ + "insert into fk_t10(id, col) values (1, 7), (2, 9)", + "insert into fk_t11(id, col) values (1, 7)", + "insert into fk_t12(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t10 where id = 1", + assertionQueries: []string{ + "select * from fk_t10 order by id", + "select * from fk_t11 order by id", + "select * from fk_t12 order by id", + }, + }, { + name: "Delete success with set null to an update cascade foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t18(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t16 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t18 order by id", + }, + }, { + name: "Delete success with cascade to delete with set null to an update set null foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t19(id, col) values (1, 7)", + }, + dmlQuery: "delete from fk_t15 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t19 order by id", + }, + }, { + name: "Update success with cascade to an update set null to an update cascade foreign key", + dataQueries: []string{ + "insert into fk_t15(id, col) values (1, 7), (2, 9)", + "insert into fk_t16(id, col) values (1, 7), (2, 9)", + "insert into fk_t17(id, col) values (1, 7)", + "insert into fk_t18(id, col) values (1, 7)", + }, + dmlQuery: "update fk_t15 set col = 3 where id = 1", + assertionQueries: []string{ + "select * from fk_t15 order by id", + "select * from fk_t16 order by id", + "select * from fk_t17 order by id", + "select * from fk_t18 order by id", + }, + }, { + name: "Insert success for self-referenced foreign key", + dataQueries: []string{ + "insert into fk_t20(id, col, col2) values (1, 7, NULL)", + }, + dmlQuery: "insert into fk_t20(id, col, col2) values (2, 9, 7), (3, 10, 9)", + assertionQueries: []string{ + "select * from fk_t20 order by id", + }, + }, { + name: "Insert failure for self-referenced foreign key", + dataQueries: []string{ + "insert into fk_t20(id, col, col2) values (5, 7, NULL)", + }, + dmlQuery: "insert into fk_t20(id, col, col2) values (6, 9, 6)", + assertionQueries: []string{ + "select * from fk_t20 order by id", + }, + }, + } + + for _, tt := range testcases { + for _, testSharded := range []bool{false, true} { + t.Run(getTestName(tt.name, testSharded), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + // Set the correct keyspace to use from VtGates. + if testSharded { + t.Skip("Skip test since we don't have sharded foreign key support yet") + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + } + + // Insert all the data required for running the test. + for _, query := range tt.dataQueries { + mcmp.Exec(query) + } + + // Run the DML query that needs to be tested and verify output with MySQL. + _, _ = mcmp.ExecAllowAndCompareError(tt.dmlQuery) + + // Run the assertion queries and verify we get the expected outputs. + for _, query := range tt.assertionQueries { + mcmp.Exec(query) + } + }) + } + } + + for _, testSharded := range []bool{false, true} { + t.Run(getTestName("Transactions with intermediate failure", testSharded), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + // Set the correct keyspace to use from VtGates. + if testSharded { + t.Skip("Skip test since we don't have sharded foreign key support yet") + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } else { + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + } + + // Insert some rows + mcmp.Exec("INSERT INTO fk_t10(id, col) VALUES (1, 7), (2, 9), (3, 5)") + mcmp.Exec("INSERT INTO fk_t11(id, col) VALUES (1, 7), (2, 9), (3, 5)") + mcmp.Exec("INSERT INTO fk_t12(id, col) VALUES (1, 7), (2, 9), (3, 5)") + + // Start a transaction + mcmp.Exec("BEGIN") + + // Insert another row. + mcmp.Exec("INSERT INTO fk_t13(id, col) VALUES (1, 7)") + + // Delete success for cascaded (2, 9) + mcmp.Exec("DELETE FROM fk_t10 WHERE id = 2") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Update that fails + _, err = mcmp.ExecAllowAndCompareError("UPDATE fk_t10 SET col = 15 WHERE id = 1") + require.Error(t, err) + + // Verify the results + // Since we are in a transaction, we still expect the transaction to be ongoing, with no change to the tables + // since the update should fail. + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Update that is a success + mcmp.Exec("UPDATE fk_t10 SET col = 15 where id = 3") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Insert a new row + mcmp.Exec("INSERT INTO fk_t13(id, col) VALUES (3, 15)") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + + // Rollback the transaction. + mcmp.Exec("ROLLBACK") + + // Verify the results + mcmp.Exec("SELECT * FROM fk_t10 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t11 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t12 ORDER BY id") + mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") + }) + } +} + +// getTestName prepends whether the test is for a sharded keyspace or not to the test name. +func getTestName(testName string, testSharded bool) string { + if testSharded { + return "Sharded - " + testName + } + return "Unsharded - " + testName +} diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go new file mode 100644 index 00000000000..fc42c56f311 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -0,0 +1,153 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package foreignkey + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + vtgateGrpcAddress string + shardedKs = "ks" + unshardedKs = "uks" + Cell = "test" + //go:embed sharded_schema.sql + shardedSchemaSQL string + + //go:embed unsharded_schema.sql + unshardedSchemaSQL string + + //go:embed sharded_vschema.json + shardedVSchema string + + //go:embed unsharded_vschema.json + unshardedVSchema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + sKs := &cluster.Keyspace{ + Name: shardedKs, + SchemaSQL: shardedSchemaSQL, + VSchema: shardedVSchema, + } + + err = clusterInstance.StartKeyspace(*sKs, []string{"-80", "80-"}, 0, false) + if err != nil { + return 1 + } + + uKs := &cluster.Keyspace{ + Name: unshardedKs, + SchemaSQL: unshardedSchemaSQL, + VSchema: unshardedVSchema, + } + err = clusterInstance.StartUnshardedKeyspace(*uKs, 1, false) + if err != nil { + return 1 + } + + err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) + + connParams, closer, err := utils.NewMySQL(clusterInstance, shardedKs, shardedSchemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = connParams + return m.Run() + }() + os.Exit(exitCode) +} + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + _ = utils.Exec(t, mcmp.VtConn, "use `ks/-80`") + tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} + for i := 20; i > 0; i-- { + tables = append(tables, fmt.Sprintf("fk_t%v", i)) + } + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `ks/80-`") + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + tables = []string{"u_t1", "u_t2", "u_t3"} + for i := 20; i > 0; i-- { + tables = append(tables, fmt.Sprintf("fk_t%v", i)) + } + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + _ = utils.Exec(t, mcmp.VtConn, "use `ks`") + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} diff --git a/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql new file mode 100644 index 00000000000..b530c982904 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql @@ -0,0 +1,297 @@ +create table t1 +( + id bigint, + col bigint, + primary key (id) +) Engine = InnoDB; + +create table t2 +( + id bigint, + col bigint, + mycol varchar(50), + primary key (id), + index(id, mycol), + index(id, col), + foreign key (id) references t1 (id) on delete restrict +) Engine = InnoDB; + +create table t3 +( + id bigint, + col bigint, + primary key (id), + foreign key (col) references t1 (id) on delete restrict +) Engine = InnoDB; + +create table multicol_tbl1 +( + cola bigint, + colb varbinary(50), + colc varchar(50), + msg varchar(50), + primary key (cola, colb, colc) +) Engine = InnoDB; + +create table multicol_tbl2 +( + cola bigint, + colb varbinary(50), + colc varchar(50), + msg varchar(50), + primary key (cola, colb, colc), + foreign key (cola, colb, colc) references multicol_tbl1 (cola, colb, colc) on delete cascade +) Engine = InnoDB; + +create table t4 +( + id bigint, + col bigint, + t2_mycol varchar(50), + t2_col bigint, + primary key (id), + foreign key (id) references t2 (id) on delete restrict, + foreign key (id, t2_mycol) references t2 (id, mycol) on update restrict, + foreign key (id, t2_col) references t2 (id, col) on update cascade +) Engine = InnoDB; + +create table t5 +( + pk bigint, + sk bigint, + col1 varchar(50), + primary key (pk), + index(sk, col1) +) Engine = InnoDB; + +create table t6 +( + pk bigint, + sk bigint, + col1 varchar(50), + primary key (pk), + foreign key (sk, col1) references t5 (sk, col1) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ + +create table fk_t1 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t2 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t1(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t3 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t4 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t5 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t4(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t6 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t7 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ + +create table fk_t10 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t11 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t10(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t12 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t13 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ + +create table fk_t15 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t16 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t15(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t17 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t16(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t18 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t19 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete set null on update set null +) Engine = InnoDB; + +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ + +create table fk_t20 +( + id bigint, + col varchar(10), + col2 varchar(10), + primary key (id), + index(col), + foreign key (col2) references fk_t20(col) on delete restrict on update restrict +) Engine = InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json b/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json new file mode 100644 index 00000000000..074f08ce848 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/sharded_vschema.json @@ -0,0 +1,219 @@ +{ + "sharded": true, + "foreignKeyMode": "FK_MANAGED", + "vindexes": { + "xxhash": { + "type": "xxhash" + }, + "multicol_vdx": { + "type": "multicol", + "params": { + "column_count": "3", + "column_bytes": "1,3,4", + "column_vindex": "hash,binary,unicode_loose_xxhash" + } + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t5": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "t6": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "multicol_tbl1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "multicol_tbl2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "fk_t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t5": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t6": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t7": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t10": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t11": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t12": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t13": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t15": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t16": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t17": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t18": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "fk_t19": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql new file mode 100644 index 00000000000..dc6cba7bb08 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql @@ -0,0 +1,247 @@ +create table u_t1 +( + id bigint, + col1 bigint, + index(col1), + primary key (id) +) Engine = InnoDB; + +create table u_t2 +( + id bigint, + col2 bigint, + primary key (id), + foreign key (col2) references u_t1 (col1) on delete set null on update set null +) Engine = InnoDB; + +create table u_t3 +( + id bigint, + col3 bigint, + primary key (id), + foreign key (col3) references u_t1 (col1) on delete cascade on update cascade +) Engine = InnoDB; + + +/* + * fk_t1 + * │ + * │ On Delete Restrict + * │ On Update Restrict + * ▼ + * ┌────────────────fk_t2────────────────┐ + * │ │ + * │On Delete Set Null │ On Delete Set Null + * │On Update Set Null │ On Update Set Null + * ▼ ▼ + * fk_t7 fk_t3───────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Set Null │ │ On Update Set Null + * On Update Set Null │ │ + * ▼ ▼ + * fk_t4 fk_t6 + * │ + * │ + * On Delete Restrict │ + * On Update Restrict │ + * │ + * ▼ + * fk_t5 + */ + +create table fk_t1 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t2 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t1(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t3 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t4 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t5 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t4(col) on delete restrict on update restrict +) Engine = InnoDB; + +create table fk_t6 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t3(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t7 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t2(col) on delete set null on update set null +) Engine = InnoDB; + +/* + * fk_t10 + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t11──────────────────┐ + * │ │ + * │ │ On Delete Restrict + * On Delete Cascade │ │ On Update Restrict + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t12 fk_t13 + */ + +create table fk_t10 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t11 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t10(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t12 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t13 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t11(col) on delete restrict on update restrict +) Engine = InnoDB; + +/* + * fk_t15 + * │ + * │ + * On Delete Cascade │ + * On Update Cascade │ + * │ + * ▼ + * fk_t16 + * │ + * On Delete Set Null │ + * On Update Set Null │ + * │ + * ▼ + * fk_t17──────────────────┐ + * │ │ + * │ │ On Delete Set Null + * On Delete Cascade │ │ On Update Set Null + * On Update Cascade │ │ + * │ │ + * ▼ ▼ + * fk_t18 fk_t19 + */ + +create table fk_t15 +( + id bigint, + col varchar(10), + primary key (id), + index(col) +) Engine = InnoDB; + +create table fk_t16 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t15(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t17 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t16(col) on delete set null on update set null +) Engine = InnoDB; + +create table fk_t18 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete cascade on update cascade +) Engine = InnoDB; + +create table fk_t19 +( + id bigint, + col varchar(10), + primary key (id), + index(col), + foreign key (col) references fk_t17(col) on delete set null on update set null +) Engine = InnoDB; + +/* + Self referenced foreign key from col2 to col in fk_t20 +*/ + +create table fk_t20 +( + id bigint, + col varchar(10), + col2 varchar(10), + primary key (id), + index(col), + foreign key (col2) references fk_t20(col) on delete restrict on update restrict +) Engine = InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json b/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json new file mode 100644 index 00000000000..c0d2368849f --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_vschema.json @@ -0,0 +1,24 @@ +{ + "sharded": false, + "foreignKeyMode": "FK_MANAGED", + "tables": { + "u_t1": {}, + "u_t2": {}, + "fk_t1": {}, + "fk_t2": {}, + "fk_t3": {}, + "fk_t4": {}, + "fk_t5": {}, + "fk_t6": {}, + "fk_t7": {}, + "fk_t10": {}, + "fk_t11": {}, + "fk_t12": {}, + "fk_t13": {}, + "fk_t15": {}, + "fk_t16": {}, + "fk_t17": {}, + "fk_t18": {}, + "fk_t19": {} + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/gen4/gen4_test.go b/go/test/endtoend/vtgate/gen4/gen4_test.go index c1521012909..fe26ef32829 100644 --- a/go/test/endtoend/vtgate/gen4/gen4_test.go +++ b/go/test/endtoend/vtgate/gen4/gen4_test.go @@ -132,34 +132,28 @@ func TestDistinctAggregationFunc(t *testing.T) { defer closer() // insert some data. - utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) + mcmp.Exec(`insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) // count on primary vindex - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct id) from t2 group by tcol1`, - `[[VARCHAR("A") INT64(3)] [VARCHAR("B") INT64(3)] [VARCHAR("C") INT64(2)]]`) + mcmp.Exec(`select tcol1, count(distinct id) from t2 group by tcol1`) // count on any column - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct tcol2) from t2 group by tcol1`, - `[[VARCHAR("A") INT64(2)] [VARCHAR("B") INT64(2)] [VARCHAR("C") INT64(1)]]`) + mcmp.Exec(`select tcol1, count(distinct tcol2) from t2 group by tcol1`) // sum of columns - utils.AssertMatches(t, mcmp.VtConn, `select sum(id), sum(tcol1) from t2`, - `[[DECIMAL(36) FLOAT64(0)]]`) + mcmp.Exec(`select sum(id), sum(tcol1) from t2`) // sum on primary vindex - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, sum(distinct id) from t2 group by tcol1`, - `[[VARCHAR("A") DECIMAL(9)] [VARCHAR("B") DECIMAL(15)] [VARCHAR("C") DECIMAL(12)]]`) + mcmp.Exec(`select tcol1, sum(distinct id) from t2 group by tcol1`) // sum on any column - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, sum(distinct tcol2) from t2 group by tcol1`, - `[[VARCHAR("A") DECIMAL(0)] [VARCHAR("B") DECIMAL(0)] [VARCHAR("C") DECIMAL(0)]]`) + mcmp.Exec(`select tcol1, sum(distinct tcol2) from t2 group by tcol1`) // insert more data to get values on sum - utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (9, 'AA', null),(10, 'AA', '4'),(11, 'AA', '4'),(12, null, '5'),(13, null, '6'),(14, 'BB', '10'),(15, 'BB', '20'),(16, 'BB', 'X')`) + mcmp.Exec(`insert into t2(id, tcol1, tcol2) values (9, 'AA', null),(10, 'AA', '4'),(11, 'AA', '4'),(12, null, '5'),(13, null, '6'),(14, 'BB', '10'),(15, 'BB', '20'),(16, 'BB', 'X')`) // multi distinct - utils.AssertMatches(t, mcmp.VtConn, `select tcol1, count(distinct tcol2), sum(distinct tcol2) from t2 group by tcol1`, - `[[NULL INT64(2) DECIMAL(11)] [VARCHAR("A") INT64(2) DECIMAL(0)] [VARCHAR("AA") INT64(1) DECIMAL(4)] [VARCHAR("B") INT64(2) DECIMAL(0)] [VARCHAR("BB") INT64(3) DECIMAL(30)] [VARCHAR("C") INT64(1) DECIMAL(0)]]`) + mcmp.Exec(`select tcol1, count(distinct tcol2), sum(distinct tcol2) from t2 group by tcol1`) } func TestDistinct(t *testing.T) { @@ -170,7 +164,7 @@ func TestDistinct(t *testing.T) { utils.Exec(t, mcmp.VtConn, `insert into t2(id, tcol1, tcol2) values (1, 'A', 'A'),(2, 'B', 'C'),(3, 'A', 'C'),(4, 'C', 'A'),(5, 'A', 'A'),(6, 'B', 'C'),(7, 'B', 'A'),(8, 'C', 'A')`) // multi distinct - utils.AssertMatches(t, mcmp.VtConn, `select distinct tcol1, tcol2 from t2`, + utils.AssertMatchesNoOrder(t, mcmp.VtConn, `select distinct tcol1, tcol2 from t2`, `[[VARCHAR("A") VARCHAR("A")] [VARCHAR("A") VARCHAR("C")] [VARCHAR("B") VARCHAR("A")] [VARCHAR("B") VARCHAR("C")] [VARCHAR("C") VARCHAR("A")]]`) } @@ -430,9 +424,9 @@ func TestOuterJoin(t *testing.T) { } func TestUsingJoin(t *testing.T) { - require.NoError(t, utils.WaitForAuthoritative(t, clusterInstance.VtgateProcess, shardedKs, "t1")) - require.NoError(t, utils.WaitForAuthoritative(t, clusterInstance.VtgateProcess, shardedKs, "t2")) - require.NoError(t, utils.WaitForAuthoritative(t, clusterInstance.VtgateProcess, shardedKs, "t3")) + require.NoError(t, utils.WaitForAuthoritative(t, shardedKs, "t1", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, shardedKs, "t2", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, shardedKs, "t3", clusterInstance.VtgateProcess.ReadVSchema)) mcmp, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/gen4/system_schema_test.go b/go/test/endtoend/vtgate/gen4/system_schema_test.go index c075479bb11..fba953d51ae 100644 --- a/go/test/endtoend/vtgate/gen4/system_schema_test.go +++ b/go/test/endtoend/vtgate/gen4/system_schema_test.go @@ -126,7 +126,8 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { query := "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on fk.constraint_schema = rc.constraint_schema and fk.constraint_name = rc.constraint_name where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = 't7_fk' and rc.constraint_schema = database() and rc.table_name = 't7_fk'" utils.AssertMatchesAny(t, conn, query, `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`, - `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) + `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`, + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) } func TestConnectWithSystemSchema(t *testing.T) { diff --git a/go/test/endtoend/vtgate/grpc_api/acl_test.go b/go/test/endtoend/vtgate/grpc_api/acl_test.go new file mode 100644 index 00000000000..2819a3e41d1 --- /dev/null +++ b/go/test/endtoend/vtgate/grpc_api/acl_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpc_api + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/callerid" +) + +// TestEffectiveCallerIDWithAccess verifies that an authenticated gRPC static user with an effectiveCallerID that has ACL access can execute queries +func TestEffectiveCallerIDWithAccess(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "some_other_user", "test_password") + require.NoError(t, err) + defer vtgateConn.Close() + + session := vtgateConn.Session(keyspaceName+"@primary", nil) + query := "SELECT id FROM test_table" + ctx = callerid.NewContext(ctx, callerid.NewEffectiveCallerID("user_with_access", "", ""), nil) + _, err = session.Execute(ctx, query, nil) + assert.NoError(t, err) +} + +// TestEffectiveCallerIDWithNoAccess verifies that an authenticated gRPC static user without an effectiveCallerID that has ACL access cannot execute queries +func TestEffectiveCallerIDWithNoAccess(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "another_unrelated_user", "test_password") + require.NoError(t, err) + defer vtgateConn.Close() + + session := vtgateConn.Session(keyspaceName+"@primary", nil) + query := "SELECT id FROM test_table" + ctx = callerid.NewContext(ctx, callerid.NewEffectiveCallerID("user_no_access", "", ""), nil) + _, err = session.Execute(ctx, query, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "Select command denied to user") + assert.Contains(t, err.Error(), "for table 'test_table' (ACL check error)") +} + +// TestAuthenticatedUserWithAccess verifies that an authenticated gRPC static user with ACL access can execute queries +func TestAuthenticatedUserWithAccess(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "user_with_access", "test_password") + require.NoError(t, err) + defer vtgateConn.Close() + + session := vtgateConn.Session(keyspaceName+"@primary", nil) + query := "SELECT id FROM test_table" + _, err = session.Execute(ctx, query, nil) + assert.NoError(t, err) +} + +// TestAuthenticatedUserNoAccess verifies that an authenticated gRPC static user with no ACL access cannot execute queries +func TestAuthenticatedUserNoAccess(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "user_no_access", "test_password") + require.NoError(t, err) + defer vtgateConn.Close() + + session := vtgateConn.Session(keyspaceName+"@primary", nil) + query := "SELECT id FROM test_table" + _, err = session.Execute(ctx, query, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "Select command denied to user") + assert.Contains(t, err.Error(), "for table 'test_table' (ACL check error)") +} + +// TestUnauthenticatedUser verifies that an unauthenticated gRPC user cannot execute queries +func TestUnauthenticatedUser(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "", "") + require.NoError(t, err) + defer vtgateConn.Close() + + session := vtgateConn.Session(keyspaceName+"@primary", nil) + query := "SELECT id FROM test_table" + _, err = session.Execute(ctx, query, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "invalid credentials") +} diff --git a/go/test/endtoend/vtgate/grpc_api/execute_test.go b/go/test/endtoend/vtgate/grpc_api/execute_test.go new file mode 100644 index 00000000000..b1a5f3b8d80 --- /dev/null +++ b/go/test/endtoend/vtgate/grpc_api/execute_test.go @@ -0,0 +1,132 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpc_api + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + querypb "vitess.io/vitess/go/vt/proto/query" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +// TestTransactionsWithGRPCAPI test the transaction queries through vtgate grpc apis. +// It is done through both streaming api and non-streaming api. +func TestTransactionsWithGRPCAPI(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "user_with_access", "test_password") + require.NoError(t, err) + defer vtgateConn.Close() + + vtSession := vtgateConn.Session(keyspaceName, nil) + workload := []string{"OLTP", "OLAP"} + for i := 0; i < 4; i++ { // running all switch combinations. + index := i % len(workload) + _, session, err := exec(ctx, vtSession, fmt.Sprintf("set workload = %s", workload[index]), nil) + require.NoError(t, err) + + require.Equal(t, workload[index], session.Options.Workload.String()) + execTest(ctx, t, workload[index], vtSession) + } + +} + +func execTest(ctx context.Context, t *testing.T, workload string, vtSession *vtgateconn.VTGateSession) { + tcases := []struct { + query string + + expRowCount int + expRowAffected int + expInTransaction bool + }{{ + query: "select id, val from test_table", + }, { + query: "begin", + expInTransaction: true, + }, { + query: "insert into test_table(id, val) values (1, 'A')", + expRowAffected: 1, + expInTransaction: true, + }, { + query: "select id, val from test_table", + expRowCount: 1, + expInTransaction: true, + }, { + query: "commit", + }, { + query: "select id, val from test_table", + expRowCount: 1, + }, { + query: "delete from test_table", + expRowAffected: 1, + }} + + for _, tc := range tcases { + t.Run(workload+":"+tc.query, func(t *testing.T) { + qr, session, err := exec(ctx, vtSession, tc.query, nil) + require.NoError(t, err) + + assert.Len(t, qr.Rows, tc.expRowCount) + assert.EqualValues(t, tc.expRowAffected, qr.RowsAffected) + assert.EqualValues(t, tc.expInTransaction, session.InTransaction) + }) + } +} + +func exec(ctx context.Context, conn *vtgateconn.VTGateSession, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, *vtgatepb.Session, error) { + options := conn.SessionPb().GetOptions() + if options != nil && options.Workload == querypb.ExecuteOptions_OLAP { + return streamExec(ctx, conn, sql, bv) + } + res, err := conn.Execute(ctx, sql, bv) + return res, conn.SessionPb(), err +} + +func streamExec(ctx context.Context, conn *vtgateconn.VTGateSession, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, *vtgatepb.Session, error) { + stream, err := conn.StreamExecute(ctx, sql, bv) + if err != nil { + return nil, conn.SessionPb(), err + } + result := &sqltypes.Result{} + for { + res, err := stream.Recv() + if err != nil { + if err == io.EOF { + return result, conn.SessionPb(), nil + } + return nil, conn.SessionPb(), err + } + result.Rows = append(result.Rows, res.Rows...) + result.RowsAffected += res.RowsAffected + if res.InsertID != 0 { + result.InsertID = res.InsertID + } + if res.Fields != nil { + result.Fields = res.Fields + } + } +} diff --git a/go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go b/go/test/endtoend/vtgate/grpc_api/main_test.go similarity index 57% rename from go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go rename to go/test/endtoend/vtgate/grpc_api/main_test.go index d0cb6d6aa0f..a51c6d9e6f2 100644 --- a/go/test/endtoend/vtgate/grpc_server_auth_static/main_test.go +++ b/go/test/endtoend/vtgate/grpc_api/main_test.go @@ -14,24 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package grpcserverauthstatic +package grpc_api import ( - "context" "flag" "fmt" "os" "path" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/grpcclient" - "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" ) var ( @@ -49,6 +41,14 @@ var ( ` grpcServerAuthStaticJSON = ` [ + { + "Username": "some_other_user", + "Password": "test_password" + }, + { + "Username": "another_unrelated_user", + "Password": "test_password" + }, { "Username": "user_with_access", "Password": "test_password" @@ -109,6 +109,8 @@ func TestMain(m *testing.M) { clusterInstance.VtGateExtraArgs = []string{ "--grpc_auth_mode", "static", "--grpc_auth_static_password_file", grpcServerAuthStaticPath, + "--grpc_use_effective_callerid", + "--grpc-use-static-authentication-callerid", } // Configure vttablet to use table ACL @@ -139,69 +141,6 @@ func TestMain(m *testing.M) { os.Exit(exitcode) } -// TestAuthenticatedUserWithAccess verifies that an authenticated gRPC static user with ACL access can execute queries -func TestAuthenticatedUserWithAccess(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vtgateConn, err := dialVTGate(ctx, t, "user_with_access", "test_password") - if err != nil { - t.Fatal(err) - } - defer vtgateConn.Close() - - session := vtgateConn.Session(keyspaceName+"@primary", nil) - query := "SELECT id FROM test_table" - _, err = session.Execute(ctx, query, nil) - assert.NoError(t, err) -} - -// TestAuthenticatedUserNoAccess verifies that an authenticated gRPC static user with no ACL access cannot execute queries -func TestAuthenticatedUserNoAccess(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vtgateConn, err := dialVTGate(ctx, t, "user_no_access", "test_password") - if err != nil { - t.Fatal(err) - } - defer vtgateConn.Close() - - session := vtgateConn.Session(keyspaceName+"@primary", nil) - query := "SELECT id FROM test_table" - _, err = session.Execute(ctx, query, nil) - require.Error(t, err) - assert.Contains(t, err.Error(), "Select command denied to user") - assert.Contains(t, err.Error(), "for table 'test_table' (ACL check error)") -} - -// TestUnauthenticatedUser verifies that an unauthenticated gRPC user cannot execute queries -func TestUnauthenticatedUser(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vtgateConn, err := dialVTGate(ctx, t, "", "") - if err != nil { - t.Fatal(err) - } - defer vtgateConn.Close() - - session := vtgateConn.Session(keyspaceName+"@primary", nil) - query := "SELECT id FROM test_table" - _, err = session.Execute(ctx, query, nil) - require.Error(t, err) - assert.Contains(t, err.Error(), "invalid credentials") -} - -func dialVTGate(ctx context.Context, t *testing.T, username string, password string) (*vtgateconn.VTGateConn, error) { - clientCreds := &grpcclient.StaticAuthClientCreds{Username: username, Password: password} - creds := grpc.WithPerRPCCredentials(clientCreds) - dialerFunc := grpcvtgateconn.Dial(creds) - dialerName := t.Name() - vtgateconn.RegisterDialer(dialerName, dialerFunc) - return vtgateconn.DialProtocol(ctx, dialerName, vtgateGrpcAddress) -} - func createFile(path string, contents string) error { f, err := os.Create(path) if err != nil { diff --git a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go index ab844a8ffd1..4971d03060b 100644 --- a/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go +++ b/go/test/endtoend/vtgate/keyspace_watches/keyspace_watch_test.go @@ -23,10 +23,8 @@ package keyspacewatches import ( "database/sql" "fmt" - "math/rand" "os" "testing" - "time" _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/require" @@ -115,7 +113,6 @@ func createCluster(extraVTGateArgs []string) (*cluster.LocalProcessCluster, int) Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, } - rand.Seed(time.Now().UnixNano()) return clusterInstance, 0 } diff --git a/go/test/endtoend/vtgate/lookup_test.go b/go/test/endtoend/vtgate/lookup_test.go index deb3710cb7d..b4b53295d8d 100644 --- a/go/test/endtoend/vtgate/lookup_test.go +++ b/go/test/endtoend/vtgate/lookup_test.go @@ -21,13 +21,13 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/utils" ) func TestUnownedLookupInsertNull(t *testing.T) { @@ -128,8 +128,8 @@ func TestConsistentLookup(t *testing.T) { _, err = conn.ExecuteFetch("insert into t1(id1, id2) values(1, 4)", 1000, false) utils.Exec(t, conn, "rollback") require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "reverted partial DML execution") @@ -581,3 +581,10 @@ func TestUnicodeLooseMD5CaseInsensitive(t *testing.T) { utils.AssertMatches(t, conn, "SELECT id1, id2 from t4 where id2 = 'Test'", `[[INT64(1) VARCHAR("test")]]`) } + +func TestJoinWithPredicateAndJoinOnDifferentVindex(t *testing.T) { + conn, closer := start(t) + defer closer() + + utils.Exec(t, conn, "select t4.id1 from t4, t3 where t4.id2 = 'foo' and t4.id1 = t3.id6") +} diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 1d2bc59b50a..12abcf4dd01 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -73,7 +73,7 @@ func TestMain(m *testing.M) { VSchema: VSchema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1", "--queryserver-config-max-result-size", "100", "--queryserver-config-terse-errors"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-max-result-size", "100", "--queryserver-config-terse-errors"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index e24db73547d..65a6a0525d6 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -20,7 +20,7 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -428,7 +428,7 @@ ts12 TIMESTAMP DEFAULT LOCALTIME() )`) utils.Exec(t, conn, "drop table function_default") - utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT UTC_TIMESTAMP)`) + utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT (UTC_TIMESTAMP))`) utils.Exec(t, conn, "drop table function_default") utils.Exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) @@ -694,8 +694,8 @@ func TestDescribeVindex(t *testing.T) { _, err := conn.ExecuteFetch("describe hash", 1000, false) require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERNoSuchTable, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERNoSuchTable, mysqlErr.Num) assert.Equal(t, "42S02", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "NotFound desc") } diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index 6a642178432..b29eb13ecdc 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -65,8 +65,8 @@ ts12 TIMESTAMP DEFAULT LOCALTIME() )`) utils.Exec(t, conn, "drop table function_default") - // this query works because utc_timestamp will get parenthesised before reaching MySQL. However, this syntax is not supported in MySQL 8.0 - utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT UTC_TIMESTAMP)`) + // this query works only as an expression. + utils.Exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT (UTC_TIMESTAMP))`) utils.Exec(t, conn, "drop table function_default") utils.Exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) diff --git a/go/test/endtoend/vtgate/partialfailure/main_test.go b/go/test/endtoend/vtgate/partialfailure/main_test.go index cf47ad6a70f..9e39e7b5dd5 100644 --- a/go/test/endtoend/vtgate/partialfailure/main_test.go +++ b/go/test/endtoend/vtgate/partialfailure/main_test.go @@ -18,16 +18,16 @@ package reservedconn import ( "context" + _ "embed" "flag" "os" "testing" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) var ( @@ -36,67 +36,12 @@ var ( keyspaceName = "ks" cell = "zone1" hostname = "localhost" - sqlSchema = ` - create table test( - id bigint, - val1 varchar(16), - val2 int, - val3 float, - primary key(id) - )Engine=InnoDB; - -CREATE TABLE test_vdx ( - val1 varchar(16) NOT NULL, - keyspace_id binary(8), - UNIQUE KEY (val1) -) ENGINE=Innodb; -` - - vSchema = ` - { - "sharded":true, - "vindexes": { - "hash_index": { - "type": "hash" - }, - "lookup1": { - "type": "consistent_lookup", - "params": { - "table": "test_vdx", - "from": "val1", - "to": "keyspace_id", - "ignore_nulls": "true" - }, - "owner": "test" - }, - "unicode_vdx":{ - "type": "unicode_loose_md5" - } - }, - "tables": { - "test":{ - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - }, - { - "column": "val1", - "name": "lookup1" - } - ] - }, - "test_vdx":{ - "column_vindexes": [ - { - "column": "val1", - "name": "unicode_vdx" - } - ] - } - } - } - ` + + //go:embed schema.sql + SchemaSQL string + + //go:embed vschema.json + VSchema string ) func TestMain(m *testing.M) { @@ -115,8 +60,8 @@ func TestMain(m *testing.M) { // Start keyspace keyspace := &cluster.Keyspace{ Name: keyspaceName, - SchemaSQL: sqlSchema, - VSchema: vSchema, + SchemaSQL: SchemaSQL, + VSchema: VSchema, } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-40", "40-80", "80-c0", "c0-"}, 0, false); err != nil { @@ -145,6 +90,7 @@ func testAllModes(t *testing.T, stmts func(conn *mysql.Conn)) { {"oltp-reserved", []string{"set workload = oltp", "set sql_mode = ''"}}, {"olap", []string{"set workload = olap"}}, {"olap-reserved", []string{"set workload = olap", "set sql_mode = ''"}}, + {"oltp", []string{"set workload = oltp"}}, // to make a circle on the workload change. } for _, tc := range tcases { diff --git a/go/test/endtoend/vtgate/partialfailure/schema.sql b/go/test/endtoend/vtgate/partialfailure/schema.sql new file mode 100644 index 00000000000..0b5ae986139 --- /dev/null +++ b/go/test/endtoend/vtgate/partialfailure/schema.sql @@ -0,0 +1,13 @@ +create table test( + id bigint, + val1 varchar(16), + val2 int, + val3 float, + primary key(id) +)Engine=InnoDB; + +create table test_vdx ( + val1 varchar(16) not null, + keyspace_id binary(8), + unique key(val1) +)ENGINE=Innodb; diff --git a/go/test/endtoend/vtgate/partialfailure/vschema.json b/go/test/endtoend/vtgate/partialfailure/vschema.json new file mode 100644 index 00000000000..64ea1c42eda --- /dev/null +++ b/go/test/endtoend/vtgate/partialfailure/vschema.json @@ -0,0 +1,43 @@ +{ + "sharded":true, + "vindexes": { + "hash_index": { + "type": "hash" + }, + "lookup1": { + "type": "consistent_lookup", + "params": { + "table": "test_vdx", + "from": "val1", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "test" + }, + "unicode_vdx":{ + "type": "unicode_loose_md5" + } + }, + "tables": { + "test":{ + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + }, + { + "column": "val1", + "name": "lookup1" + } + ] + }, + "test_vdx":{ + "column_vindexes": [ + { + "column": "val1", + "name": "unicode_vdx" + } + ] + } + } +} diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index b7ef4c4a78d..8d40988263d 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -18,10 +18,14 @@ package aggregation import ( "fmt" + "slices" + "sort" + "strings" "testing" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -33,7 +37,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { deleteAll := func() { _, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp") - tables := []string{"t9", "aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2"} + tables := []string{"t9", "aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2", "t10"} for _, table := range tables { _, _ = mcmp.ExecAndIgnore("delete from " + table) } @@ -65,6 +69,7 @@ func TestAggregateTypes(t *testing.T) { mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`) + mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`) } func TestGroupBy(t *testing.T) { @@ -73,24 +78,12 @@ func TestGroupBy(t *testing.T) { mcmp.Exec("insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)") // test ordering and group by int column mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) + mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) // Test the same queries in streaming mode utils.Exec(t, mcmp.VtConn, "set workload = olap") mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) -} - -func TestDistinct(t *testing.T) { - mcmp, closer := start(t) - defer closer() - mcmp.Exec("insert into t3(id5,id6,id7) values(1,3,3), (2,3,4), (3,3,6), (4,5,7), (5,5,6)") - mcmp.Exec("insert into t7_xxhash(uid,phone) values('1',4), ('2',4), ('3',3), ('4',1), ('5',1)") - mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") - mcmp.Exec("insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)") - mcmp.AssertMatches("select distinct val2, count(*) from aggr_test group by val2", `[[NULL INT64(2)] [INT64(1) INT64(4)] [INT64(3) INT64(1)] [INT64(4) INT64(1)]]`) - mcmp.AssertMatches("select distinct id6 from t3 join t7_xxhash on t3.id5 = t7_xxhash.phone", `[[INT64(3)] [INT64(5)]]`) - mcmp.Exec("delete from t3") - mcmp.Exec("delete from t7_xxhash") - mcmp.Exec("delete from aggr_test") + mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) } func TestEqualFilterOnScatter(t *testing.T) { @@ -104,18 +97,18 @@ func TestEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 = 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 5", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 5 = a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1+4 = 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = \"1\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a = 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a, val1 from aggr_test group by val1 having a = 1.00", `[[INT64(1) VARCHAR("a")] [INT64(1) VARCHAR("b")] [INT64(1) VARCHAR("c")] [INT64(1) VARCHAR("d")] [INT64(1) VARCHAR("e")]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) = 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 = 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 5", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 5 = a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1+4 = 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = \"1\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a = 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a, val1 from aggr_test group by val1 having a = 1.00", `[[INT64(1) VARCHAR("a")] [INT64(1) VARCHAR("b")] [INT64(1) VARCHAR("c")] [INT64(1) VARCHAR("d")] [INT64(1) VARCHAR("e")]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) = 5", `[[INT64(1)]]`) }) } } @@ -127,7 +120,7 @@ func TestAggrOnJoin(t *testing.T) { mcmp.Exec("insert into t3(id5, id6, id7) values(1,1,1), (2,2,4), (3,2,4), (4,1,2), (5,1,1), (6,3,6)") mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'a',1), (3,'b',1), (4,'c',3), (5,'c',4)") - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) from aggr_test a join t3 t on a.val2 = t.id7", + mcmp.AssertMatches("select count(*) from aggr_test a join t3 t on a.val2 = t.id7", "[[INT64(8)]]") /* mysql> select count(*) from aggr_test a join t3 t on a.val2 = t.id7; @@ -138,7 +131,7 @@ func TestAggrOnJoin(t *testing.T) { +----------+ 1 row in set (0.00 sec) */ - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1", + mcmp.AssertMatches("select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1", `[[VARCHAR("a") INT64(4)] [VARCHAR("b") INT64(2)] [VARCHAR("c") INT64(2)]]`) /* mysql> select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1; @@ -152,7 +145,7 @@ func TestAggrOnJoin(t *testing.T) { 3 rows in set (0.00 sec) */ - mcmp.AssertMatches(`select /*vt+ PLANNER=gen4 */ max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, + mcmp.AssertMatches(`select max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, "[[INT64(3) INT64(1) INT64(8)]]") /* mysql> select max(a1.val2), max(a2.val2), count(*) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7; @@ -164,17 +157,17 @@ func TestAggrOnJoin(t *testing.T) { 1 row in set (0.00 sec) */ - mcmp.AssertMatches(`select /*vt+ PLANNER=gen4 */ a1.val1, count(distinct a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, + mcmp.AssertMatches(`select a1.val1, count(distinct a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, `[[VARCHAR("a") INT64(1)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(1)]]`) // having on aggregation on top of join - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", + mcmp.AssertMatches("select a.val1, count(*) from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a") INT64(4)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1, count(*) as leCount from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having leCount = 4", + mcmp.AssertMatches("select a.val1, count(*) as leCount from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having leCount = 4", `[[VARCHAR("a") INT64(4)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", + mcmp.AssertMatches("select a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a")]]`) } @@ -189,15 +182,15 @@ func TestNotEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 5", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 5 != a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 3+2", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a != 5.00", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) != 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 5 != a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 3+2", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a != 5.00", `[]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) != 5", `[]`) }) } } @@ -212,15 +205,15 @@ func TestLessFilterOnScatter(t *testing.T) { for _, workload := range workloads { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 10", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 < a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 3+2", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < \"10\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a < 6.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) < 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 10", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 < a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 3+2", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < \"10\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a < 6.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) < 5", `[]`) }) } } @@ -236,15 +229,15 @@ func TestLessEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 10", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 <= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 1", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= \"10\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a <= 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) <= 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 10", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 <= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 1", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= \"10\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) <= 5", `[[INT64(1)]]`) }) } } @@ -260,15 +253,15 @@ func TestGreaterFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 > a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 3+1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 10", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > \"5\"", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a > 4.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) > 5", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 > a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 3+1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 10", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > \"5\"", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a > 4.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) > 5", `[]`) }) } } @@ -284,15 +277,15 @@ func TestGreaterEqualFilterOnScatter(t *testing.T) { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 1", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having 1 >= a", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= a", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 3+2", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 10", `[]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= \"1\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= \"5\"", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(*) as a from aggr_test having a >= 5.00", `[[INT64(5)]]`) - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ 1 from aggr_test having count(*) >= 5", `[[INT64(1)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 1", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having 1 >= a", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= a", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 3+2", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 10", `[]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= \"1\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= \"5\"", `[[INT64(5)]]`) + mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 5.00", `[[INT64(5)]]`) + mcmp.AssertMatches("select 1 from aggr_test having count(*) >= 5", `[[INT64(1)]]`) }) } } @@ -306,7 +299,7 @@ func TestGroupByOnlyFullGroupByOff(t *testing.T) { mcmp.Exec("set @@sql_mode = ' '") // We do not use AssertMatches here because the results for the second column are random - _, err := mcmp.ExecAndIgnore("select /*vt+ PLANNER=gen4 */ id2, id3 from t9 group by id2") + _, err := mcmp.ExecAndIgnore("select id2, id3 from t9 group by id2") require.NoError(t, err) } @@ -318,22 +311,22 @@ func TestAggOnTopOfLimit(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) - mcmp.AssertMatchesNoOrder(" select /*vt+ PLANNER=gen4 */ val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) + mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") + mcmp.AssertMatches(" select count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") + mcmp.AssertMatches(" select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) + mcmp.AssertMatchesNoOrder(" select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) // mysql returns FLOAT64(0), vitess returns DECIMAL(0) - mcmp.AssertMatchesNoCompare(" select /*vt+ PLANNER=gen4 */ count(*), sum(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0)]]", "[[INT64(2) DECIMAL(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) - mcmp.AssertMatchesNoOrder(" select /*vt+ PLANNER=gen4 */ val1, count(val2), sum(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1) DECIMAL(2)] [VARCHAR("a") INT64(2) DECIMAL(7)] [VARCHAR("b") INT64(1) DECIMAL(1)] [VARCHAR("c") INT64(2) DECIMAL(7)]]`) + mcmp.AssertMatchesNoCompare(" select count(*), sum(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0)]]", "[[INT64(2) FLOAT64(0)]]") + mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") + mcmp.AssertMatches(" select count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") + mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") + mcmp.AssertMatches(" select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") + mcmp.AssertMatches(" select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) + mcmp.AssertMatchesNoOrder(" select val1, count(val2), sum(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1) DECIMAL(2)] [VARCHAR("a") INT64(2) DECIMAL(7)] [VARCHAR("b") INT64(1) DECIMAL(1)] [VARCHAR("c") INT64(2) DECIMAL(7)]]`) }) } } @@ -345,10 +338,10 @@ func TestEmptyTableAggr(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") }) } @@ -357,10 +350,10 @@ func TestEmptyTableAggr(t *testing.T) { for _, workload := range []string{"oltp", "olap"} { t.Run(workload, func(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") - mcmp.AssertMatches(" select /*vt+ PLANNER=gen4 */ t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") }) } @@ -372,17 +365,20 @@ func TestOrderByCount(t *testing.T) { mcmp.Exec("insert into t9(id1, id2, id3) values(1, '1', '1'), (2, '2', '2'), (3, '2', '2'), (4, '3', '3'), (5, '3', '3'), (6, '3', '3')") - mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`) + mcmp.AssertMatches("SELECT t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC", `[[VARCHAR("3")] [VARCHAR("2")] [VARCHAR("1")]]`) } -func TestAggregateRandom(t *testing.T) { +func TestAggregateAnyValue(t *testing.T) { mcmp, closer := start(t) defer closer() mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (1, 'name 1', 'value 1', 1), (2, 'name 2', 'value 2', 2)") mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") - mcmp.AssertMatches("SELECT /*vt+ PLANNER=gen4 */ t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`) + mcmp.AssertMatches("SELECT t1.shardKey, t1.name, count(t2.id) FROM t1 JOIN t2 ON t1.value != t2.shardKey GROUP BY t1.t1_id", `[[INT64(1) VARCHAR("name 1") INT64(2)] [INT64(2) VARCHAR("name 2") INT64(2)]]`) + + mcmp.Exec("set sql_mode=''") + mcmp.AssertMatches("select tbl0.comm, count(*) from emp as tbl0, emp as tbl1 where tbl0.empno = tbl1.deptno", `[[NULL INT64(0)]]`) } // TestAggregateLeftJoin tests that aggregates work with left joins and does not ignore the count when column value does not match the right side table. @@ -395,8 +391,11 @@ func TestAggregateLeftJoin(t *testing.T) { mcmp.AssertMatchesNoOrder("SELECT t1.shardkey FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(1)] [INT64(0)]]`) mcmp.AssertMatches("SELECT count(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`) + mcmp.AssertMatches("SELECT count(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(1)]]`) mcmp.AssertMatches("SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`) mcmp.AssertMatches("SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) + mcmp.AssertMatches("SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) + mcmp.AssertMatches("SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'", `[[INT64(1)]]`) } // TestScalarAggregate tests validates that only count is returned and no additional field is returned.gst @@ -423,5 +422,145 @@ func TestScalarAggregate(t *testing.T) { defer closer() mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") - mcmp.AssertMatches("select /*vt+ PLANNER=gen4 */ count(distinct val1) from aggr_test", `[[INT64(3)]]`) + mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`) +} + +func TestAggregationRandomOnAnAggregatedValue(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20);") + + mcmp.AssertMatchesNoOrder("select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from t10 where a = 100) A;", + `[[DECIMAL(100) DECIMAL(10) DECIMAL(10.0000)]]`) +} + +func TestBuggyQueries(t *testing.T) { + // These queries have been found to be producing the wrong results by the query fuzzer + // Adding them as end2end tests to make sure we never get them wrong again + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t10(k, a, b) values (0, 100, 10), (10, 200, 20), (20, null, null)") + + mcmp.AssertMatches("select sum(t1.a) from t10 as t1, t10 as t2", + `[[DECIMAL(900)]]`) + + mcmp.AssertMatches("select t1.a, sum(t1.a), count(*), t1.a, sum(t1.a), count(*) from t10 as t1, t10 as t2 group by t1.a", + "[[NULL NULL INT64(3) NULL NULL INT64(3)] "+ + "[INT32(100) DECIMAL(300) INT64(3) INT32(100) DECIMAL(300) INT64(3)] "+ + "[INT32(200) DECIMAL(600) INT64(3) INT32(200) DECIMAL(600) INT64(3)]]") + + mcmp.Exec("select sum(tbl1.a), min(tbl0.b) from t10 as tbl0, t10 as tbl1 left join t10 as tbl2 on tbl1.a = tbl2.a and tbl1.b = tbl2.k") + mcmp.Exec("select count(*) from t10 left join t10 as t11 on t10.a = t11.b where t11.a") +} + +func TestMinMaxAcrossJoins(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, name, value, shardKey) values (1, 'name 1', 'value 1', 1), (2, 'name 2', 'value 2', 2)") + mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") + + mcmp.AssertMatchesNoOrder( + `SELECT t1.name, max(t1.shardKey), t2.shardKey, min(t2.id) FROM t1 JOIN t2 ON t1.t1_id != t2.shardKey GROUP BY t1.name, t2.shardKey`, + `[[VARCHAR("name 2") INT64(2) INT64(10) INT64(1)] [VARCHAR("name 1") INT64(1) INT64(10) INT64(1)] [VARCHAR("name 2") INT64(2) INT64(20) INT64(2)] [VARCHAR("name 1") INT64(1) INT64(20) INT64(2)]]`) +} + +func TestComplexAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1','tata',893), (7,'a1','titi',2380), (8,'b1','tete',12833), (9,'e1','yoyo',783493)") + + mcmp.Exec("set @@sql_mode = ' '") + mcmp.Exec(`SELECT 1+COUNT(t1_id) FROM t1`) + mcmp.Exec(`SELECT COUNT(t1_id)+1 FROM t1`) + mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey) FROM t1`) + mcmp.Exec(`SELECT shardkey, MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) + mcmp.Exec(`SELECT shardkey + MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) + mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`) + mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`) +} + +// TestGroupConcatAggregation tests the group_concat function with vitess doing the aggregation. +func TestGroupConcatAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1',null,100), (2,'b1','foo',20), (3,'c1','foo',10), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1',null,893), (10,'a1','titi',2380), (20,'b1','tete',12833), (9,'e1','yoyo',783493)") + mcmp.Exec("insert into t2(id, shardKey) values (1, 10), (2, 20)") + + mQr, vtQr := mcmp.ExecNoCompare(`SELECT group_concat(name) FROM t1`) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.shardKey = t2.shardKey `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.t1_id = t2.shardKey `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value) FROM t1 join t2 on t1.shardKey = t2.id `) + compareRow(t, mQr, vtQr, nil, []int{0}) + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value), t1.name FROM t1, t2 group by t1.name`) + compareRow(t, mQr, vtQr, []int{1}, []int{0}) +} + +func compareRow(t *testing.T, mRes *sqltypes.Result, vtRes *sqltypes.Result, grpCols []int, fCols []int) { + require.Equal(t, len(mRes.Rows), len(vtRes.Rows), "mysql and vitess result count does not match") + for _, row := range vtRes.Rows { + var grpKey string + for _, col := range grpCols { + grpKey += row[col].String() + } + var foundKey bool + for _, mRow := range mRes.Rows { + var mKey string + for _, col := range grpCols { + mKey += mRow[col].String() + } + if grpKey != mKey { + continue + } + foundKey = true + for _, col := range fCols { + vtFValSplit := strings.Split(row[col].ToString(), ",") + sort.Strings(vtFValSplit) + mFValSplit := strings.Split(mRow[col].ToString(), ",") + sort.Strings(mFValSplit) + require.True(t, slices.Equal(vtFValSplit, mFValSplit), "mysql and vitess result are not same: vitess:%v, mysql:%v", vtRes.Rows, mRes.Rows) + } + } + require.True(t, foundKey, "mysql and vitess result does not same row: vitess:%v, mysql:%v", vtRes.Rows, mRes.Rows) + } +} + +func TestDistinctAggregation(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1','tata',893), (7,'a1','titi',2380), (8,'b1','tete',12833), (9,'e1','yoyo',783493)") + + tcases := []struct { + query string + expectedErr string + }{{ + query: `SELECT COUNT(DISTINCT value), SUM(DISTINCT shardkey) FROM t1`, + expectedErr: "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct shardkey) (errno 1235) (sqlstate 42000)", + }, { + query: `SELECT a.t1_id, SUM(DISTINCT b.shardkey) FROM t1 a, t1 b group by a.t1_id`, + }, { + query: `SELECT a.value, SUM(DISTINCT b.shardkey) FROM t1 a, t1 b group by a.value`, + }, { + query: `SELECT count(distinct a.value), SUM(DISTINCT b.t1_id) FROM t1 a, t1 b`, + expectedErr: "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct b.t1_id) (errno 1235) (sqlstate 42000)", + }, { + query: `SELECT a.value, SUM(DISTINCT b.t1_id), min(DISTINCT a.t1_id) FROM t1 a, t1 b group by a.value`, + }, { + query: `SELECT distinct count(*) from t1, (select distinct count(*) from t1) as t2`, + }} + + for _, tc := range tcases { + mcmp.Run(tc.query, func(mcmp *utils.MySQLCompare) { + _, err := mcmp.ExecAllowError(tc.query) + if tc.expectedErr == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tc.expectedErr) + }) + } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go b/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go new file mode 100644 index 00000000000..a09808bbf47 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregation + +import ( + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" +) + +func TestDistinct(t *testing.T) { + mcmp, closer := start(t) + defer closer() + mcmp.Exec("insert into t3(id5,id6,id7) values(1,3,3), (2,3,4), (3,3,6), (4,5,7), (5,5,6)") + mcmp.Exec("insert into t7_xxhash(uid,phone) values('1',4), ('2',4), ('3',3), ('4',1), ('5',1)") + mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") + mcmp.Exec("insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)") + mcmp.AssertMatches("select distinct val2, count(*) from aggr_test group by val2", `[[NULL INT64(2)] [INT64(1) INT64(4)] [INT64(3) INT64(1)] [INT64(4) INT64(1)]]`) + mcmp.AssertMatches("select distinct id6 from t3 join t7_xxhash on t3.id5 = t7_xxhash.phone", `[[INT64(3)] [INT64(5)]]`) +} + +func TestDistinctIt(t *testing.T) { + // tests more variations of DISTINCT + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") + mcmp.Exec("insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)") + + mcmp.AssertMatchesNoOrder("select distinct val1 from aggr_test", `[[VARCHAR("c")] [VARCHAR("d")] [VARCHAR("e")] [VARCHAR("a")] [VARCHAR("b")]]`) + mcmp.AssertMatchesNoOrder("select distinct val2 from aggr_test", `[[INT64(1)] [INT64(4)] [INT64(3)] [NULL]]`) + mcmp.AssertMatchesNoOrder("select distinct id from aggr_test", `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(5)] [INT64(4)] [INT64(6)] [INT64(7)] [INT64(8)]]`) + + if utils.BinaryIsAtVersion(17, "vtgate") { + mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ distinct val1 from aggr_test order by val1 desc", `[[VARCHAR("e")] [VARCHAR("d")] [VARCHAR("c")] [VARCHAR("b")] [VARCHAR("a")]]`) + mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct val1, count(*) from aggr_test group by val1", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) + mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct val1+val2 from aggr_test", `[[NULL] [FLOAT64(1)] [FLOAT64(3)] [FLOAT64(4)]]`) + mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct count(*) from aggr_test group by val1", `[[INT64(2)] [INT64(1)]]`) + } +} diff --git a/go/test/endtoend/vtgate/queries/aggregation/schema.sql b/go/test/endtoend/vtgate/queries/aggregation/schema.sql index a538a3dafed..e1489b4bd21 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/schema.sql +++ b/go/test/endtoend/vtgate/queries/aggregation/schema.sql @@ -71,3 +71,29 @@ CREATE TABLE t2 ( PRIMARY KEY (id) ) ENGINE InnoDB; +CREATE TABLE t10 ( + k BIGINT PRIMARY KEY, + a INT, + b INT +); + +CREATE TABLE emp ( + empno bigint NOT NULL, + ename VARCHAR(10), + job VARCHAR(9), + mgr bigint, + hiredate DATE, + sal bigint, + comm bigint, + deptno bigint, + PRIMARY KEY (empno) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; + +CREATE TABLE dept ( + deptno bigint, + dname VARCHAR(14), + loc VARCHAR(13), + PRIMARY KEY (deptno) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/aggregation/vschema.json b/go/test/endtoend/vtgate/queries/aggregation/vschema.json index c2d3f133a35..050202aed81 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/vschema.json +++ b/go/test/endtoend/vtgate/queries/aggregation/vschema.json @@ -123,6 +123,30 @@ "name": "hash" } ] + }, + "t10": { + "column_vindexes": [ + { + "column": "k", + "name": "hash" + } + ] + }, + "emp": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] + }, + "dept": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go index a6b5d1a1fc4..867b3b46fc8 100644 --- a/go/test/endtoend/vtgate/queries/dml/insert_test.go +++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go @@ -394,3 +394,60 @@ func TestRedactDupError(t *testing.T) { // inserting same rows, throws error. mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `BindVars: {REDACTED}`) } + +// TestMixedCases test all the cases for insert when lookup column is also the auto increment column. +func TestMixedCases(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + tcases := []struct { + insQuery string + selQuery string + exp string + }{{ + // values are provided for all columns + insQuery: "insert into mixed_tbl(shard_key, lkp_key) values (1, 1000)", + selQuery: "select * from mixed_tbl where lkp_key = 1000", + exp: "[[INT64(1000) INT64(1)]]", + }, { + // lookup column value not provided - auto increment value should be used. + insQuery: "insert into mixed_tbl(shard_key) values (2)", + selQuery: "select * from mixed_tbl where lkp_key = 1", + exp: "[[INT64(1) INT64(2)]]", + }, { + // lookup column value not provided in the select - auto increment value should be used. + insQuery: "insert into mixed_tbl(shard_key) select 3", + selQuery: "select * from mixed_tbl where lkp_key = 2", + exp: "[[INT64(2) INT64(3)]]", + }, { + // lookup column value provided as NULL in the select - auto increment value should be used. + insQuery: "insert into mixed_tbl(shard_key, lkp_key) select 4, null", + selQuery: "select * from mixed_tbl where lkp_key = 3", + exp: "[[INT64(3) INT64(4)]]", + }, { + // values are provided for all column in the select + insQuery: "insert into mixed_tbl(shard_key, lkp_key) select 5, 2000", + selQuery: "select * from mixed_tbl where lkp_key = 2000", + exp: "[[INT64(2000) INT64(5)]]", + }, { + // multiple values are inserted - lookup column value not provided - use auto increment value + insQuery: "insert into mixed_tbl(shard_key) select shard_key from mixed_tbl order by shard_key desc", + selQuery: "select * from mixed_tbl where lkp_key between 4 and 8 order by lkp_key", + exp: "[[INT64(4) INT64(5)] [INT64(5) INT64(4)] [INT64(6) INT64(3)] [INT64(7) INT64(2)] [INT64(8) INT64(1)]]", + }, { + // partial values are provided from lookup column - use auto increment value where missing. + insQuery: "insert into mixed_tbl(shard_key, lkp_key) (select 2, 3000 union select 5, null)", + selQuery: "select * from mixed_tbl where lkp_key in (9, 3000) order by lkp_key", + exp: "[[INT64(9) INT64(5)] [INT64(3000) INT64(2)]]", + }} + + for _, tc := range tcases { + t.Run(tc.insQuery, func(t *testing.T) { + utils.Exec(t, mcmp.VtConn, tc.insQuery) + utils.AssertMatches(t, mcmp.VtConn, tc.selQuery, tc.exp) + }) + } + + // final check count on the lookup vindex table. + utils.AssertMatches(t, mcmp.VtConn, "select count(*) from lkp_mixed_idx", "[[INT64(12)]]") +} diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go index 7fb361837f8..c00e27fe3a0 100644 --- a/go/test/endtoend/vtgate/queries/dml/main_test.go +++ b/go/test/endtoend/vtgate/queries/dml/main_test.go @@ -57,6 +57,9 @@ var ( }, "auto_seq": { "type": "sequence" + }, + "mixed_seq": { + "type": "sequence" } } }` @@ -130,7 +133,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { tables := []string{ "s_tbl", "num_vdx_tbl", "user_tbl", "order_tbl", "oevent_tbl", "oextra_tbl", - "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", + "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", "mixed_tbl", "lkp_map_idx", } for _, table := range tables { // TODO (@frouioui): following assertions produce different results between MySQL and Vitess diff --git a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql index a6298b6e63c..3310724d420 100644 --- a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql +++ b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql @@ -71,4 +71,18 @@ create table nonunq_idx id bigint, keyspace_id varbinary(20), primary key (nonunq_col, id) -) Engine = InnoDB; \ No newline at end of file +) Engine = InnoDB; + +create table mixed_tbl +( + lkp_key bigint, + shard_key bigint, + primary key (lkp_key) +) Engine = InnoDB; + +create table lkp_mixed_idx +( + lkp_key bigint, + keyspace_id varbinary(20), + primary key (lkp_key) +) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql index 3a3d1f53602..4d2ad06618a 100644 --- a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql +++ b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql @@ -14,6 +14,14 @@ create table auto_seq primary key (id) ) comment 'vitess_sequence' Engine = InnoDB; +create table mixed_seq +( + id int default 0, + next_id bigint default null, + cache bigint default null, + primary key (id) +) comment 'vitess_sequence' Engine = InnoDB; + create table u_tbl ( id bigint, @@ -25,3 +33,5 @@ insert into user_seq(id, next_id, cache) values (0, 1, 1000); insert into auto_seq(id, next_id, cache) values (0, 666, 1000); +insert into mixed_seq(id, next_id, cache) +values (0, 1, 1000); \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/dml/vschema.json b/go/test/endtoend/vtgate/queries/dml/vschema.json index 9f725955f89..a42a93d7403 100644 --- a/go/test/endtoend/vtgate/queries/dml/vschema.json +++ b/go/test/endtoend/vtgate/queries/dml/vschema.json @@ -41,6 +41,16 @@ "ignore_nulls": "true" }, "owner": "auto_tbl" + }, + "lkp_map_vdx": { + "type": "consistent_lookup_unique", + "params": { + "table": "lkp_mixed_idx", + "from": "lkp_key", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "mixed_tbl" } }, "tables": { @@ -154,6 +164,30 @@ "name": "hash" } ] + }, + "mixed_tbl": { + "auto_increment": { + "column": "lkp_key", + "sequence": "uks.mixed_seq" + }, + "column_vindexes": [ + { + "column": "shard_key", + "name": "hash" + }, + { + "column": "lkp_key", + "name": "lkp_map_vdx" + } + ] + }, + "lkp_mixed_idx": { + "column_vindexes": [ + { + "column": "lkp_key", + "name": "hash" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go index 6c56bd19991..f52e2eff532 100644 --- a/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go +++ b/go/test/endtoend/vtgate/queries/foundrows/found_rows_test.go @@ -44,7 +44,7 @@ func TestFoundRows(t *testing.T) { // Wait for schema tracking to run and mark t2 as authoritative before we try out the queries. // Some of the queries depend on schema tracking to run successfully to be able to replace the StarExpr // in the select clause with the definitive column list. - err = utils.WaitForAuthoritative(t, clusterInstance.VtgateProcess, keyspaceName, "t2") + err = utils.WaitForAuthoritative(t, keyspaceName, "t2", clusterInstance.VtgateProcess.ReadVSchema) require.NoError(t, err) runTests := func(workload string) { mcmp.AssertFoundRowsValue("select * from t2", workload, 5) diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index 0b7b72a4f25..337ec3d2ff9 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -99,8 +99,8 @@ func TestInformationSchemaQueryGetsRoutedToTheRightTableAndKeyspace(t *testing.T utils.Exec(t, mcmp.VtConn, "insert into t1(id1, id2) values (1, 1), (2, 2), (3,3), (4,4)") - _ = utils.Exec(t, mcmp.VtConn, "SELECT /*vt+ PLANNER=gen4 */ * FROM t1000") // test that the routed table is available to us - result := utils.Exec(t, mcmp.VtConn, "SELECT /*vt+ PLANNER=gen4 */ * FROM information_schema.tables WHERE table_schema = database() and table_name='t1000'") + _ = utils.Exec(t, mcmp.VtConn, "SELECT * FROM t1000") // test that the routed table is available to us + result := utils.Exec(t, mcmp.VtConn, "SELECT * FROM information_schema.tables WHERE table_schema = database() and table_name='t1000'") assert.NotEmpty(t, result.Rows) } @@ -111,7 +111,8 @@ func TestFKConstraintUsingInformationSchema(t *testing.T) { query := "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk on fk.constraint_schema = rc.constraint_schema and fk.constraint_name = rc.constraint_name where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = 't7_fk' and rc.constraint_schema = database() and rc.table_name = 't7_fk'" mcmp.AssertMatchesAny(query, `[[VARBINARY("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`, - `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`) + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") VARCHAR("CASCADE") VARCHAR("SET NULL")]]`, + `[[VARCHAR("t7_xxhash") VARCHAR("uid") VARCHAR("t7_uid") VARCHAR("t7_fk_ibfk_1") BINARY("CASCADE") BINARY("SET NULL")]]`) } func TestConnectWithSystemSchema(t *testing.T) { diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index c15c546dfc9..06c5b188d18 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -72,7 +72,7 @@ func TestMain(m *testing.M) { VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 @@ -88,7 +88,6 @@ func TestMain(m *testing.M) { return 1 } - clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { diff --git a/go/test/endtoend/vtgate/queries/kill/kill_test.go b/go/test/endtoend/vtgate/queries/kill/kill_test.go new file mode 100644 index 00000000000..ad57722dd97 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/kill_test.go @@ -0,0 +1,246 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kill + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/utils" +) + +// TestKillConnection kills its own connection and checks the error message received. +func TestKillOwnConnection(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + _, err = utils.ExecAllowError(t, conn, fmt.Sprintf("kill %d", conn.ConnectionID)) + require.NoError(t, err) + + // the connection should be closed. + _, err = utils.ExecAllowError(t, conn, "select 1") + require.ErrorContains(t, err, "EOF (errno 2013) (sqlstate HY000)") +} + +// TestKillDifferentConnection kills different connection and check relevant error messages. +func TestKillDifferentConnection(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + // connection does not exist + _, err = utils.ExecAllowError(t, killConn, "kill 12345") + require.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + // connection exist + _, err = utils.ExecAllowError(t, killConn, fmt.Sprintf("kill %d", conn.ConnectionID)) + require.NoError(t, err) + + // executing on closed connection + _, err = utils.ExecAllowError(t, conn, "select 1") + require.ErrorContains(t, err, "EOF (errno 2013) (sqlstate HY000)") +} + +// TestKillOwnQuery kills the kill statement itself +func TestKillOwnQuery(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + _, err = utils.ExecAllowError(t, conn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + // TODO: does not really change anything, but expect to receive Queery Interrupted error + // "(errno 1317) (sqlstate 70100)" + require.NoError(t, err) +} + +// TestKillDifferentConnectionQuery kills query on different connection and check relevant error messages. +func TestKillDifferentConnectionQuery(t *testing.T) { + setupData(t, false) + defer dropData(t) + + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + // killing query on non-existent connection + _, err = utils.ExecAllowError(t, killConn, "kill query 12345") + require.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + done := make(chan error) + go func() { + // 20 seconds sleep. Should be stopped by kill statement. + _, err := utils.ExecAllowError(t, conn, "select sleep(20) from test") + done <- err + }() + + for { + select { + case execErr := <-done: + require.ErrorContains(t, execErr, "context canceled (errno 1317) (sqlstate 70100)") + return + case <-time.After(100 * time.Millisecond): + _, err = utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + require.NoError(t, err) + case <-time.After(5 * time.Second): + t.Fatal("test did not complete in 5 seconds.") + } + } +} + +// TestKillOnHungQuery test that any hung query should return. +func TestKillOnHungQuery(t *testing.T) { + + execFunc := func(conn *mysql.Conn) error { + utils.Exec(t, conn, "begin") + _, err := utils.ExecAllowError(t, conn, "insert into test(id, msg, extra) values (1, 'a', 'e')") + require.Error(t, err) + return err + } + + t.Run("connection close", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, _ *mysql.Conn) { + // closing the hung query connection. + hungConn.Close() + }, "(errno 2013) (sqlstate HY000)") + }) + + t.Run("connection kill", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, killConn *mysql.Conn) { + // kill the hung connection + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill %d", hungConn.ConnectionID)) + }, "context canceled") + }) + + t.Run("query kill", func(t *testing.T) { + testHungQuery(t, execFunc, func(hungConn *mysql.Conn, killConn *mysql.Conn) { + // kill the hung query + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", hungConn.ConnectionID)) + }, "context canceled") + }) +} + +func testHungQuery(t *testing.T, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs ...string) { + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + + utils.Exec(t, killConn, "begin") + utils.Exec(t, killConn, "insert into test(id, msg, extra) values (1, 'a', 'e')") + + hungConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer hungConn.Close() + + runQueryInGoRoutineAndCheckError(t, hungConn, killConn, execFunc, killFunc, errMsgs) +} + +// TestKillStmtOnHugeData tests different kill scenario on huge data. +func TestKillStmtOnHugeData(t *testing.T) { + setupData(t, true) + defer dropData(t) + + execFunc := func(conn *mysql.Conn) error { + _, err := utils.ExecWithRowCount(t, conn, "select * from test", 640000) + require.Error(t, err) + return err + } + + t.Run("oltp - kill conn", func(t *testing.T) { + testHugeData(t, "oltp", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)") + }) + + t.Run("oltp - kill query", func(t *testing.T) { + testHugeData(t, "oltp", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "(errno 1317) (sqlstate 70100)") + }) + + t.Run("olap - kill conn", func(t *testing.T) { + testHugeData(t, "olap", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)", "EOF (errno 2013) (sqlstate HY000)") + }) + + t.Run("olap - kill query", func(t *testing.T) { + testHugeData(t, "olap", execFunc, func(conn *mysql.Conn, killConn *mysql.Conn) { + utils.ExecAllowError(t, killConn, fmt.Sprintf("kill query %d", conn.ConnectionID)) + }, "context canceled (errno 1317) (sqlstate 70100)", "EOF (errno 2013) (sqlstate HY000)") + }) +} + +func testHugeData(t *testing.T, workload string, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs ...string) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + utils.Exec(t, conn, fmt.Sprintf("set workload = %s", workload)) + + killConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer killConn.Close() + utils.Exec(t, killConn, fmt.Sprintf("set workload = %s", workload)) + + runQueryInGoRoutineAndCheckError(t, conn, killConn, execFunc, killFunc, errMsgs) +} + +func runQueryInGoRoutineAndCheckError(t *testing.T, conn *mysql.Conn, killConn *mysql.Conn, execFunc func(*mysql.Conn) error, killFunc func(*mysql.Conn, *mysql.Conn), errMsgs []string) { + done := make(chan bool) + go func() { + err := execFunc(conn) + // if exec has failed, marking channel done to fail fast. + if t.Failed() { + done <- true + } + // going through all the expected error messages and if it matches any then test passes. + for _, errMsg := range errMsgs { + if strings.Contains(err.Error(), errMsg) { + done <- true + return + } + } + require.Failf(t, "error message does not match", "%v does not contain any of %v", err.Error(), errMsgs) + done <- true + }() + + totalTime := time.After(5 * time.Second) + for { + select { + case <-done: + return + case <-time.After(20 * time.Millisecond): + killFunc(conn, killConn) + case <-totalTime: + t.Fatal("test did not complete in 5 seconds.") + } + } +} diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go new file mode 100644 index 00000000000..836603c91ee --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kill + +import ( + "context" + _ "embed" + "flag" + "fmt" + "math/rand" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/planbuilder" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + cell = "zone1" + hostname = "localhost" + ks = "ks" + + //go:embed schema.sql + schema string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: ks, + SchemaSQL: schema, + VSchema: vschema, + } + var maxGrpcSize int64 = 256 * 1024 * 1024 + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-config-max-result-size", "10000000", + "--grpc_max_message_size", strconv.FormatInt(maxGrpcSize, 10)) + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false); err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGatePlannerVersion = planbuilder.Gen4 + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--grpc_max_message_size", strconv.FormatInt(maxGrpcSize, 10), + "--max_memory_rows", "999999", + "--allow-kill-statement") + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(ks) + + return m.Run() + }() + os.Exit(exitCode) +} + +func setupData(t *testing.T, huge bool) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + initialRow := 9999 + multiplier := 32 + if !huge { + initialRow = 4 + multiplier = 0 + } + r1 := getRandomString(10) + r2 := getRandomString(20) + r3 := getRandomString(30) + r4 := getRandomString(40) + + for i := 0; i < initialRow; i += 4 { + utils.Exec(t, conn, fmt.Sprintf("insert into test(id, msg, extra) values (%d, '%s', '%s'),(%d, '%s', '%s'),(%d, '%s', '%s'),(%d, '%s', '%s')", + i, r1, r2, + i+1, r2, r3, + i+2, r3, r4, + i+3, r4, r1)) + } + if !huge { + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(4) INT64(0) INT64(3)]]`) + return + } + + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(10000) INT64(0) INT64(9999)]]`) + for i := 1; i < multiplier; i = i << 1 { + utils.Exec(t, conn, fmt.Sprintf("insert into test(id, msg, extra) select id+%d, msg, extra from test", (initialRow+1)*i)) + } + utils.AssertMatches(t, conn, `select count(*), min(id), max(id) from test`, `[[INT64(320000) INT64(0) INT64(319999)]]`) +} + +func dropData(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + utils.Exec(t, conn, "drop table if exists test") + utils.Exec(t, conn, schema) +} + +func getRandomString(size int) string { + var str strings.Builder + + for i := 0; i < size; i++ { + str.WriteByte(byte((rand.Int() % 26) + 97)) + } + + return str.String() +} diff --git a/go/test/endtoend/vtgate/queries/kill/schema.sql b/go/test/endtoend/vtgate/queries/kill/schema.sql new file mode 100644 index 00000000000..21a059f69ac --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/schema.sql @@ -0,0 +1,16 @@ +create table test +( + id bigint not null, + msg varchar(50) not null, + extra varchar(100), + primary key (id), + index(msg) +) ENGINE=InnoDB; + +create table test_idx +( + msg varchar(50) not null, + id bigint not null, + keyspace_id varbinary(50), + primary key (msg, id) +) ENGINE=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/kill/vschema.json b/go/test/endtoend/vtgate/queries/kill/vschema.json new file mode 100644 index 00000000000..3173d8c7819 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/kill/vschema.json @@ -0,0 +1,42 @@ +{ + "sharded": true, + "vindexes": { + "unicode_loose_xxhash" : { + "type": "unicode_loose_xxhash" + }, + "xxhash" : { + "type": "xxhash" + }, + "test_vdx": { + "type": "consistent_lookup", + "params": { + "table": "test_idx", + "from": "msg,id", + "to": "keyspace_id" + }, + "owner": "test" + } + }, + "tables": { + "test": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + }, + { + "columns": ["msg", "id"], + "name": "test_vdx" + } + ] + }, + "test_idx": { + "column_vindexes": [ + { + "column": "msg", + "name": "unicode_loose_xxhash" + } + ] + } + } +} diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index de2d00219b6..a3858284884 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -61,9 +61,8 @@ func TestMain(m *testing.M) { return 1 } - clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-max-result-size", "1000000", - "--queryserver-config-query-timeout", "200", - "--queryserver-config-query-pool-timeout", "200") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-config-max-result-size", "1000000") // Start Unsharded keyspace ukeyspace := &cluster.Keyspace{ Name: uks, @@ -85,7 +84,6 @@ func TestMain(m *testing.M) { return 1 } - clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true", "--query-timeout=100") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 8115d46a53e..0bd8f930946 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -17,10 +17,13 @@ limitations under the License. package misc import ( + "database/sql" "fmt" + "strconv" "strings" "testing" + _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -33,7 +36,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { require.NoError(t, err) deleteAll := func() { - tables := []string{"t1"} + tables := []string{"t1", "uks.unsharded"} for _, table := range tables { _, _ = mcmp.ExecAndIgnore("delete from " + table) } @@ -93,75 +96,6 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) { require.Error(t, err) } -func TestQueryTimeoutWithDual(t *testing.T) { - mcmp, closer := start(t) - defer closer() - - _, err := utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 */ sleep(0.04) from dual") - assert.NoError(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 */ sleep(0.24) from dual") - assert.Error(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "set @@session.query_timeout=20") - require.NoError(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 */ sleep(0.04) from dual") - assert.Error(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 */ sleep(0.01) from dual") - assert.NoError(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=500 */ sleep(0.24) from dual") - assert.NoError(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=10 */ sleep(0.04) from dual") - assert.Error(t, err) - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=10 */ sleep(0.001) from dual") - assert.NoError(t, err) -} - -func TestQueryTimeoutWithTables(t *testing.T) { - mcmp, closer := start(t) - defer closer() - - // unsharded - utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) values (1),(2),(3),(4),(5)") - for i := 0; i < 12; i++ { - utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) select id1+5 from uks.unsharded") - } - - utils.Exec(t, mcmp.VtConn, "select count(*) from uks.unsharded where id1 > 31") - utils.Exec(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=100 */ count(*) from uks.unsharded where id1 > 31") - - // the query usually takes more than 5ms to return. So this should fail. - _, err := utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=1 */ count(*) from uks.unsharded where id1 > 31") - require.Error(t, err) - assert.Contains(t, err.Error(), "context deadline exceeded") - assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") - - // sharded - for i := 0; i < 300000; i += 1000 { - var str strings.Builder - for j := 1; j <= 1000; j++ { - if j == 1 { - str.WriteString(fmt.Sprintf("(%d)", i*1000+j)) - continue - } - str.WriteString(fmt.Sprintf(",(%d)", i*1000+j)) - } - utils.Exec(t, mcmp.VtConn, fmt.Sprintf("insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into t1(id1) values %s", str.String())) - } - // too much data added in the loop, do drop and recreate the table. - defer func() { - mcmp.Exec("drop table t1") - mcmp.Exec(schemaSQL) - }() - - utils.Exec(t, mcmp.VtConn, "select count(*) from t1 where id1 > 31") - utils.Exec(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=100 */ count(*) from t1 where id1 > 31") - - // the query usually takes more than 5ms to return. So this should fail. - _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ PLANNER=gen4 QUERY_TIMEOUT_MS=1 */ count(*) from t1 where id1 > 31") - require.Error(t, err) - assert.Contains(t, err.Error(), "context deadline exceeded") - assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") -} - // TestIntervalWithMathFunctions tests that the Interval keyword can be used with math functions. func TestIntervalWithMathFunctions(t *testing.T) { mcmp, closer := start(t) @@ -170,8 +104,8 @@ func TestIntervalWithMathFunctions(t *testing.T) { // Set the time zone explicitly to UTC, otherwise the output of FROM_UNIXTIME is going to be dependent // on the time zone of the system. mcmp.Exec("SET time_zone = '+00:00'") - mcmp.AssertMatches("select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month", `[[CHAR("2020-12-01")]]`) - mcmp.AssertMatches("select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", `[[DATETIME("2023-01-08 13:48:42")]]`) + mcmp.AssertMatches("select '2020-01-01' + interval month(date_sub(FROM_UNIXTIME(1234), interval 1 month))-1 month", `[[CHAR("2020-12-01")]]`) + mcmp.AssertMatches("select date_add(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", `[[DATETIME("2023-01-08 13:48:42")]]`) } // TestCast tests the queries that contain the cast function. @@ -202,3 +136,119 @@ func TestOuterJoinWithPredicate(t *testing.T) { mcmp.AssertMatchesNoOrder("select A.id1, B.id2 from t1 as A left join t1 as B on A.id1*10 = B.id2 WHERE B.id2 NOT BETWEEN 20 AND 30", `[[INT64(0) INT64(0)] [INT64(1) INT64(10)] [INT64(4) INT64(40)]]`) } + +// This test ensures that we support PREPARE statement with 65530 parameters. +// It opens a MySQL connection using the go-mysql driver and execute a select query +// it then checks the result contains the proper rows and that it's not failing. +func TestHighNumberOfParams(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1) values (0), (1), (2), (3), (4)") + + paramCount := 65530 + + // create the value and argument slices used to build the prepare stmt + var vals []any + var params []string + for i := 0; i < paramCount; i++ { + vals = append(vals, strconv.Itoa(i)) + params = append(params, "?") + } + + // connect to the vitess cluster + db, err := sql.Open("mysql", fmt.Sprintf("@tcp(%s:%v)/%s", vtParams.Host, vtParams.Port, vtParams.DbName)) + require.NoError(t, err) + defer db.Close() + + // run the query + r, err := db.Query(fmt.Sprintf("SELECT id1 FROM t1 WHERE id1 in (%s) ORDER BY id1 ASC", strings.Join(params, ", ")), vals...) + require.NoError(t, err) + defer r.Close() + + // check the results we got, we should get 5 rows with each: 0, 1, 2, 3, 4 + // count is the row number we are currently visiting, also correspond to the + // column value we expect. + count := 0 + for r.Next() { + j := -1 + err := r.Scan(&j) + require.NoError(t, err) + require.Equal(t, j, count) + count++ + } + require.Equal(t, 5, count) +} + +func TestPrepareStatements(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (0,0), (1,0), (2,0)") + + // prepare query with equal sharding key + mcmp.Exec(`prepare prep_pk from 'select count(*) from t1 where id1 = ?'`) + mcmp.AssertMatches(`execute prep_pk using @id1`, `[[INT64(0)]]`) + mcmp.Exec(`set @id1 = 1`) + mcmp.AssertMatches(`execute prep_pk using @id1`, `[[INT64(1)]]`) + + // prepare query with equal non sharding key + mcmp.Exec(`prepare prep_non_pk from 'select id1, id2 from t1 where id2 = ?'`) + mcmp.Exec(`set @id2 = 0`) + mcmp.AssertMatches(`execute prep_non_pk using @id1`, `[]`) + mcmp.AssertMatchesNoOrder(`execute prep_non_pk using @id2`, `[[INT64(0) INT64(0)] [INT64(1) INT64(0)] [INT64(2) INT64(0)]]`) + + // prepare query with in on sharding key + mcmp.Exec(`prepare prep_in_pk from 'select id1, id2 from t1 where id1 in (?, ?)'`) + mcmp.AssertMatches(`execute prep_in_pk using @id1, @id1`, `[[INT64(1) INT64(0)]]`) + mcmp.AssertMatchesNoOrder(`execute prep_in_pk using @id1, @id2`, `[[INT64(0) INT64(0)] [INT64(1) INT64(0)]]`) + + // Fail by providing wrong number of arguments + _, err := mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1, @id1, @id`) + incorrectCount := "VT03025: Incorrect arguments to EXECUTE" + assert.ErrorContains(t, err, incorrectCount) + _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1`) + assert.ErrorContains(t, err, incorrectCount) + _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk`) + assert.ErrorContains(t, err, incorrectCount) + + mcmp.Exec(`prepare prep_art from 'select 1+?, 10/?'`) + mcmp.Exec(`set @x1 = 1, @x2 = 2.0, @x3 = "v", @x4 = 9999999999999999999999999999`) + + // We are not matching types and precision with mysql at the moment, so not comparing with `mcmp` + // This is because of the difference in how MySQL executes a raw query with literal values and + // the PREPARE/EXEC way that is missing type info at the PREPARE stage + utils.AssertMatches(t, mcmp.VtConn, `execute prep_art using @x1, @x1`, `[[INT64(2) DECIMAL(10.0000)]]`) + utils.AssertMatches(t, mcmp.VtConn, `execute prep_art using @x2, @x2`, `[[DECIMAL(3.0) DECIMAL(5.0000)]]`) + utils.AssertMatches(t, mcmp.VtConn, `execute prep_art using @x3, @x3`, `[[FLOAT64(1) NULL]]`) + utils.AssertMatches(t, mcmp.VtConn, `execute prep_art using @x4, @x4`, `[[DECIMAL(10000000000000000000000000000) DECIMAL(0.0000)]]`) + + mcmp.Exec(`select 1+1, 10/1 from t1 limit 1`) + mcmp.Exec(`select 1+2.0, 10/2.0 from t1 limit 1`) + mcmp.Exec(`select 1+'v', 10/'v' from t1 limit 1`) + mcmp.Exec(`select 1+9999999999999999999999999999, 10/9999999999999999999999999999 from t1 limit 1`) + + mcmp.Exec("deallocate prepare prep_art") + _, err = mcmp.ExecAllowAndCompareError(`execute prep_art using @id1, @id1`) + assert.ErrorContains(t, err, "VT09011: Unknown prepared statement handler (prep_art) given to EXECUTE") + + _, err = mcmp.ExecAllowAndCompareError("deallocate prepare prep_art") + assert.ErrorContains(t, err, "VT09011: Unknown prepared statement handler (prep_art) given to DEALLOCATE PREPARE") +} + +// TestBuggyOuterJoin validates inconsistencies around outer joins, adding these tests to stop regressions. +func TestBuggyOuterJoin(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)") + mcmp.Exec("select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2") +} + +func TestLeftJoinUsingUnsharded(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, "insert into uks.unsharded(id1) values (1),(2),(3),(4),(5)") + utils.Exec(t, mcmp.VtConn, "select * from uks.unsharded as A left join uks.unsharded as B using(id1)") +} diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go index dd603ad5d1e..52e30accf03 100644 --- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go @@ -25,11 +25,11 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/mysql" ) @@ -39,45 +39,31 @@ func TestNormalizeAllFields(t *testing.T) { defer conn.Close() insertQuery := `insert into t1 values (1, "chars", "variable chars", x'73757265', 0x676F, 0.33, 9.99, 1, "1976-06-08", "small", "b", "{\"key\":\"value\"}", point(1,5), b'011', 0b0101)` - normalizedInsertQuery := `insert into t1 values (:vtg1, :vtg2, :vtg3, :vtg4, :vtg5, :vtg6, :vtg7, :vtg8, :vtg9, :vtg10, :vtg11, :vtg12, point(:vtg13, :vtg14), :vtg15, :vtg16)` + normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* HEXNUM */, :vtg16 /* HEXNUM */)` selectQuery := "select * from t1" utils.Exec(t, conn, insertQuery) qr := utils.Exec(t, conn, selectQuery) assert.Equal(t, 1, len(qr.Rows), "wrong number of table rows, expected 1 but had %d. Results: %v", len(qr.Rows), qr.Rows) // Now need to figure out the best way to check the normalized query in the planner cache... - results, err := getPlanCache(fmt.Sprintf("%s:%d", vtParams.Host, clusterInstance.VtgateProcess.Port)) - require.Nil(t, err) - found := false - for _, record := range results { - key := record["Key"].(string) - if key == normalizedInsertQuery { - found = true - break - } - } - assert.True(t, found, "correctly normalized record not found in planner cache") + results := getPlanCache(t, fmt.Sprintf("%s:%d", vtParams.Host, clusterInstance.VtgateProcess.Port)) + assert.Contains(t, results, normalizedInsertQuery) } -func getPlanCache(vtgateHostPort string) ([]map[string]any, error) { - var results []map[string]any +func getPlanCache(t *testing.T, vtgateHostPort string) map[string]any { + var results map[string]any client := http.Client{ Timeout: 10 * time.Second, } resp, err := client.Get(fmt.Sprintf("http://%s/debug/query_plans", vtgateHostPort)) - if err != nil { - return results, err - } + require.NoError(t, err) defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) - if err != nil { - return results, err - } + require.NoError(t, err) err = json.Unmarshal(body, &results) - if err != nil { - return results, err - } + require.NoErrorf(t, err, "failed to unmarshal results. contents:\n%s\n\n", body) - return results, nil + return results } diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index 5c6c7503a97..445a4d5a32f 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -66,6 +66,10 @@ func TestOrderBy(t *testing.T) { mcmp.AssertMatches("select id1, id2 from t4 order by id2 desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) // test ordering of int column mcmp.AssertMatches("select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) + // test ordering of complex column + if utils.BinaryIsAtVersion(17, "vtgate") { + mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) + } defer func() { utils.Exec(t, mcmp.VtConn, "set workload = oltp") @@ -75,4 +79,7 @@ func TestOrderBy(t *testing.T) { utils.Exec(t, mcmp.VtConn, "set workload = olap") mcmp.AssertMatches("select id1, id2 from t4 order by id2 desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) mcmp.AssertMatches("select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) + if utils.BinaryIsAtVersion(17, "vtgate") { + mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) + } } diff --git a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go index 77a2ceeb519..a20c7ad54c6 100644 --- a/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/without_schematracker/orderby_test.go @@ -54,5 +54,5 @@ func TestSimpleOrderBy(t *testing.T) { mcmp.Exec("insert into user(id, name) values (0,'Apa'),(1,'Banan'),(3,'Ceasar'),(4,'David')") mcmp.AssertMatches(`SELECT name FROM user WHERE id in (0,4) ORDER BY name ASC`, - `[[VARCHAR("Apa")] [VARCHAR("David)]]`) + `[[VARCHAR("Apa")] [VARCHAR("David")]]`) } diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go b/go/test/endtoend/vtgate/queries/random/main_test.go similarity index 52% rename from go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go rename to go/test/endtoend/vtgate/queries/random/main_test.go index 276664c74fd..bfef910f036 100644 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/unauthorized_test.go +++ b/go/test/endtoend/vtgate/queries/random/main_test.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unauthorized +package random import ( - "context" _ "embed" "flag" + "fmt" "os" - "path" - "strings" "testing" - "time" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" @@ -35,14 +32,15 @@ import ( var ( clusterInstance *cluster.LocalProcessCluster vtParams mysql.ConnParams - KeyspaceName = "ks" - Cell = "test" + mysqlParams mysql.ConnParams + keyspaceName = "ks_random" + cell = "test_random" //go:embed schema.sql - SchemaSQL string + schemaSQL string //go:embed vschema.json - VSchema string + vschema string ) func TestMain(m *testing.M) { @@ -50,7 +48,7 @@ func TestMain(m *testing.M) { flag.Parse() exitCode := func() int { - clusterInstance = cluster.NewCluster(Cell, "localhost") + clusterInstance = cluster.NewCluster(cell, "localhost") defer clusterInstance.Teardown() // Start topo server @@ -61,56 +59,36 @@ func TestMain(m *testing.M) { // Start keyspace keyspace := &cluster.Keyspace{ - Name: KeyspaceName, - SchemaSQL: SchemaSQL, - VSchema: VSchema, + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vschema, } clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1", "--queryserver-config-strict-table-acl", "--queryserver-config-acl-exempt-acl", "userData1", "--table-acl-config", "dummy.json"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) if err != nil { return 1 } + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable_system_settings=true") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { return 1 } - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, + vtParams = clusterInstance.GetVTParams(keyspaceName) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + if err != nil { + fmt.Println(err) + return 1 } + defer closer() + mysqlParams = conn + return m.Run() }() os.Exit(exitCode) } - -func TestSchemaTrackingError(t *testing.T) { - ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) - defer conn.Close() - - logDir := clusterInstance.VtgateProcess.LogDir - - timeout := time.After(1 * time.Minute) - var present bool - for { - select { - case <-timeout: - t.Error("timeout waiting for schema tracking error") - case <-time.After(1 * time.Second): - // check info logs - all, err := os.ReadFile(path.Join(logDir, "vtgate.WARNING")) - require.NoError(t, err) - if strings.Contains(string(all), "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io") { - present = true - } - } - if present { - break - } - } -} diff --git a/go/test/endtoend/vtgate/queries/random/query_gen.go b/go/test/endtoend/vtgate/queries/random/query_gen.go new file mode 100644 index 00000000000..3f8fccb05bb --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/query_gen.go @@ -0,0 +1,639 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "slices" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" +) + +// this file contains the structs and functions to generate random queries + +// to test only a particular type of query, delete the corresponding testFailingQueries clause +// there should be a comment indicating the type of query being disabled +// if true then known failing query types are still generated by randomQuery() +const testFailingQueries = false + +type ( + // selectGenerator generates select statements + selectGenerator struct { + r *rand.Rand + genConfig sqlparser.ExprGeneratorConfig + maxTables int + maxAggrs int + maxGBs int + schemaTables []tableT + sel *sqlparser.Select + } + + // queryGenerator generates queries, which can either be unions or select statements + queryGenerator struct { + stmt sqlparser.SelectStatement + selGen *selectGenerator + } + + column struct { + name string + // TODO: perhaps remove tableName and always pass columns through a tableT + tableName string + typ string + } + + tableT struct { + // the tableT struct can be used to represent the schema of a table or a derived table + // in the former case tableExpr will be a sqlparser.TableName, in the latter a sqlparser.DerivedTable + // in order to create a query with a derived table, its AST form is retrieved from tableExpr + // once the derived table is aliased, alias is updated + tableExpr sqlparser.SimpleTableExpr + alias string + cols []column + } +) + +var _ sqlparser.ExprGenerator = (*tableT)(nil) +var _ sqlparser.ExprGenerator = (*column)(nil) +var _ sqlparser.QueryGenerator = (*selectGenerator)(nil) +var _ sqlparser.QueryGenerator = (*queryGenerator)(nil) + +func newQueryGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *queryGenerator { + return &queryGenerator{ + selGen: newSelectGenerator(r, genConfig, maxTables, maxAggrs, maxGBs, schemaTables), + } +} + +func newSelectGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *selectGenerator { + if maxTables <= 0 { + log.Fatalf("maxTables must be at least 1, currently %d\n", maxTables) + } + + return &selectGenerator{ + r: r, + genConfig: genConfig, + maxTables: maxTables, + maxAggrs: maxAggrs, + maxGBs: maxGBs, + schemaTables: schemaTables, + sel: &sqlparser.Select{}, + } +} + +// getASTExpr returns the AST representation of a column +func (c *column) getASTExpr() sqlparser.Expr { + return sqlparser.NewColNameWithQualifier(c.name, sqlparser.NewTableName(c.tableName)) +} + +// getName returns the alias if it is nonempty +// if the alias is nonempty and tableExpr is of type sqlparser.TableName, +// then getName returns Name from tableExpr +// otherwise getName returns an empty string +func (t *tableT) getName() string { + if t.alias != "" { + return t.alias + } else if tName, ok := t.tableExpr.(sqlparser.TableName); ok { + return sqlparser.String(tName.Name) + } + + return "" +} + +// setAlias sets the alias for t, as well as setting the tableName for all columns in cols +func (t *tableT) setAlias(newName string) { + t.alias = newName + for i := range t.cols { + t.cols[i].tableName = newName + } +} + +// addColumns adds columns to t, and automatically assigns each column.tableName +// this makes it unnatural to modify tableName +func (t *tableT) addColumns(col ...column) { + for i := range col { + col[i].tableName = t.getName() + t.cols = append(t.cols, col[i]) + } +} + +func (t *tableT) clone() *tableT { + return &tableT{ + tableExpr: sqlparser.CloneSimpleTableExpr(t.tableExpr), + alias: t.alias, + cols: slices.Clone(t.cols), + } +} + +func (c *column) Generate(_ *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + if c.typ == genConfig.Type || genConfig.Type == "" { + return c.getASTExpr() + } + + return nil +} + +func (t *tableT) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + colsCopy := slices.Clone(t.cols) + + for len(colsCopy) > 0 { + idx := r.Intn(len(colsCopy)) + randCol := colsCopy[idx] + if randCol.typ == genConfig.Type || genConfig.Type == "" { + return randCol.getASTExpr() + } + + // delete randCol from colsCopy + colsCopy[idx] = colsCopy[len(colsCopy)-1] + colsCopy = colsCopy[:len(colsCopy)-1] + } + + return nil +} + +// Generate generates a subquery based on sg +// TODO: currently unused; generate random expressions with union +func (sg *selectGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + var schemaTablesCopy []tableT + for _, tbl := range sg.schemaTables { + schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) + } + + newSG := newQueryGenerator(r, genConfig, sg.maxTables, sg.maxAggrs, sg.maxGBs, schemaTablesCopy) + newSG.randomQuery() + + return &sqlparser.Subquery{Select: newSG.selGen.sel} +} + +// Generate generates a subquery based on qg +func (qg *queryGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { + var schemaTablesCopy []tableT + for _, tbl := range qg.selGen.schemaTables { + schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) + } + + newQG := newQueryGenerator(r, genConfig, qg.selGen.maxTables, qg.selGen.maxAggrs, qg.selGen.maxGBs, schemaTablesCopy) + newQG.randomQuery() + + return &sqlparser.Subquery{Select: newQG.stmt} +} + +func (sg *selectGenerator) IsQueryGenerator() {} +func (qg *queryGenerator) IsQueryGenerator() {} + +func (qg *queryGenerator) randomQuery() { + if qg.selGen.r.Intn(10) < 1 && testFailingQueries { + qg.createUnion() + } else { + qg.selGen.randomSelect() + qg.stmt = qg.selGen.sel + } +} + +// createUnion creates a simple UNION or UNION ALL; no LIMIT or ORDER BY +func (qg *queryGenerator) createUnion() { + union := &sqlparser.Union{} + + if qg.selGen.r.Intn(2) < 1 { + union.Distinct = true + } + + // specify between 1-4 columns + qg.selGen.genConfig.NumCols = qg.selGen.r.Intn(4) + 1 + + qg.randomQuery() + union.Left = qg.stmt + qg.randomQuery() + union.Right = qg.stmt + + qg.stmt = union +} + +func (sg *selectGenerator) randomSelect() { + // make sure the random expressions can generally not contain aggregates; change appropriately + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + sg.sel = &sqlparser.Select{} + sg.sel.SetComments(sqlparser.Comments{"/*vt+ PLANNER=Gen4 */"}) + + // select distinct (fails with group by bigint) + isDistinct := sg.r.Intn(2) < 1 + if isDistinct { + sg.sel.MakeDistinct() + } + + // create both tables and join at the same time since both occupy the from clause + tables, isJoin := sg.createTablesAndJoin() + + // canAggregate determines if the query will have + // aggregate columns, group by, and having + canAggregate := sg.r.Intn(4) < 3 + + var ( + grouping, aggregates []column + newTable tableT + ) + // TODO: distinct makes vitess think there is grouping on aggregation columns + if canAggregate { + if testFailingQueries || !isDistinct { + // group by + if !sg.genConfig.SingleRow { + grouping = sg.createGroupBy(tables) + } + } + + // having + isHaving := sg.r.Intn(2) < 1 + // TODO: having creates a lot of results mismatched + if isHaving && testFailingQueries { + sg.createHavingPredicates(grouping) + } + + // alias the grouping columns + grouping = sg.aliasGroupingColumns(grouping) + + // aggregation columns + aggregates = sg.createAggregations(tables) + + // add the grouping and aggregation to newTable + newTable.addColumns(grouping...) + newTable.addColumns(aggregates...) + } + + // where + sg.createWherePredicates(tables) + + // add random expression to select + // TODO: random expressions cause a lot of failures + isRandomExpr := sg.r.Intn(2) < 1 && testFailingQueries + + // TODO: selecting a random expression potentially with columns creates + // TODO: only_full_group_by related errors in Vitess + var exprGenerators []sqlparser.ExprGenerator + if canAggregate && testFailingQueries { + exprGenerators = slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + if sg.r.Intn(10) < 1 { + exprGenerators = append(exprGenerators, sg) + } + } + + // make sure we have at least one select expression + for isRandomExpr || len(sg.sel.SelectExprs) == 0 { + // TODO: if the random expression is an int literal, + // TODO: and if the query is (potentially) an aggregate query, + // TODO: then we must group by the random expression, + // TODO: but we cannot do this for int literals, + // TODO: so we loop until we get a non-int-literal random expression + // TODO: this is necessary because grouping by the alias (crandom0) currently fails on vitess + randomExpr := sg.getRandomExpr(exprGenerators...) + literal, ok := randomExpr.(*sqlparser.Literal) + isIntLiteral := ok && literal.Type == sqlparser.IntVal + if isIntLiteral && canAggregate { + continue + } + + // TODO: select distinct [literal] fails + sg.sel.Distinct = false + + // alias randomly + col := sg.randomlyAlias(randomExpr, "crandom0") + newTable.addColumns(col) + + // make sure to add the random expression to group by for only_full_group_by + if canAggregate { + sg.sel.AddGroupBy(randomExpr) + } + + break + } + + // can add both aggregate and grouping columns to order by + // TODO: order fails with distinct and outer joins + isOrdered := sg.r.Intn(2) < 1 && (!isDistinct || testFailingQueries) && (!isJoin || testFailingQueries) + if isOrdered || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { + sg.createOrderBy() + } + + // only add a limit if there is an ordering + // TODO: limit fails a lot + isLimit := sg.r.Intn(2) < 1 && len(sg.sel.OrderBy) > 0 && testFailingQueries + if isLimit || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { + sg.createLimit() + } + + // this makes sure the query generated has the correct number of columns (sg.selGen.genConfig.numCols) + newTable = sg.matchNumCols(tables, newTable, canAggregate) + + // add new table to schemaTables + newTable.tableExpr = sqlparser.NewDerivedTable(false, sg.sel) + sg.schemaTables = append(sg.schemaTables, newTable) + + // derived tables (partially unsupported) + if sg.r.Intn(10) < 1 { + sg.randomSelect() + } +} + +func (sg *selectGenerator) createTablesAndJoin() ([]tableT, bool) { + var tables []tableT + // add at least one of original emp/dept tables + tables = append(tables, sg.schemaTables[sg.r.Intn(2)]) + + tables[0].setAlias("tbl0") + sg.sel.From = append(sg.sel.From, newAliasedTable(tables[0], "tbl0")) + + numTables := sg.r.Intn(sg.maxTables) + for i := 0; i < numTables; i++ { + tables = append(tables, randomEl(sg.r, sg.schemaTables)) + alias := fmt.Sprintf("tbl%d", i+1) + sg.sel.From = append(sg.sel.From, newAliasedTable(tables[i+1], alias)) + tables[i+1].setAlias(alias) + } + + // TODO: outer joins produce results mismatched + isJoin := sg.r.Intn(2) < 1 && testFailingQueries + if isJoin { + // TODO: do nested joins + newTable := randomEl(sg.r, sg.schemaTables) + alias := fmt.Sprintf("tbl%d", numTables+1) + newTable.setAlias(alias) + tables = append(tables, newTable) + + sg.createJoin(tables) + } + + return tables, isJoin +} + +// creates a left join (without the condition) between the last table in sel and newTable +// tables should have one more table than sel +func (sg *selectGenerator) createJoin(tables []tableT) { + n := len(sg.sel.From) + if len(tables) != n+1 { + log.Fatalf("sel has %d tables and tables has %d tables", len(sg.sel.From), n) + } + + joinPredicate := sqlparser.AndExpressions(sg.createJoinPredicates(tables)...) + joinCondition := sqlparser.NewJoinCondition(joinPredicate, nil) + newTable := newAliasedTable(tables[n], fmt.Sprintf("tbl%d", n)) + sg.sel.From[n-1] = sqlparser.NewJoinTableExpr(sg.sel.From[n-1], getRandomJoinType(sg.r), newTable, joinCondition) +} + +// returns 1-3 random expressions based on the last two elements of tables +// tables should have at least two elements +func (sg *selectGenerator) createJoinPredicates(tables []tableT) sqlparser.Exprs { + if len(tables) < 2 { + log.Fatalf("tables has %d elements, needs at least 2", len(tables)) + } + + exprGenerators := []sqlparser.ExprGenerator{&tables[len(tables)-2], &tables[len(tables)-1]} + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + return sg.createRandomExprs(1, 3, exprGenerators...) +} + +// returns the grouping columns as []column +func (sg *selectGenerator) createGroupBy(tables []tableT) (grouping []column) { + if sg.maxGBs <= 0 { + return + } + numGBs := sg.r.Intn(sg.maxGBs + 1) + for i := 0; i < numGBs; i++ { + tblIdx := sg.r.Intn(len(tables)) + col := randomEl(sg.r, tables[tblIdx].cols) + // TODO: grouping by a date column sometimes errors + if col.typ == "date" && !testFailingQueries { + continue + } + sg.sel.GroupBy = append(sg.sel.GroupBy, col.getASTExpr()) + + // add to select + if sg.r.Intn(2) < 1 { + sg.sel.SelectExprs = append(sg.sel.SelectExprs, newAliasedColumn(col, "")) + grouping = append(grouping, col) + } + } + + return +} + +// aliasGroupingColumns randomly aliases the grouping columns in the SelectExprs +func (sg *selectGenerator) aliasGroupingColumns(grouping []column) []column { + if len(grouping) != len(sg.sel.SelectExprs) { + log.Fatalf("grouping (length: %d) and sg.sel.SelectExprs (length: %d) should have the same length at this point", len(grouping), len(sg.sel.SelectExprs)) + } + + for i := range grouping { + if sg.r.Intn(2) < 1 { + if aliasedExpr, ok := sg.sel.SelectExprs[i].(*sqlparser.AliasedExpr); ok { + alias := fmt.Sprintf("cgroup%d", i) + aliasedExpr.SetAlias(alias) + grouping[i].name = alias + } + } + } + + return grouping +} + +// returns the aggregation columns as three types: sqlparser.SelectExprs, []column +func (sg *selectGenerator) createAggregations(tables []tableT) (aggregates []column) { + exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + sg.genConfig = sg.genConfig.IsAggregateConfig() + aggrExprs := sg.createRandomExprs(0, sg.maxAggrs, exprGenerators...) + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + for i, expr := range aggrExprs { + col := sg.randomlyAlias(expr, fmt.Sprintf("caggr%d", i)) + aggregates = append(aggregates, col) + } + + return +} + +// orders on all grouping expressions and on random SelectExprs +func (sg *selectGenerator) createOrderBy() { + // always order on grouping expressions + for _, expr := range sg.sel.GroupBy { + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(expr, getRandomOrderDirection(sg.r))) + } + + // randomly order on SelectExprs + for _, selExpr := range sg.sel.SelectExprs { + if aliasedExpr, ok := selExpr.(*sqlparser.AliasedExpr); ok && sg.r.Intn(2) < 1 { + literal, ok := aliasedExpr.Expr.(*sqlparser.Literal) + isIntLiteral := ok && literal.Type == sqlparser.IntVal + if isIntLiteral { + continue + } + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(aliasedExpr.Expr, getRandomOrderDirection(sg.r))) + } + } +} + +// returns 0-2 random expressions based on tables +func (sg *selectGenerator) createWherePredicates(tables []tableT) { + exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + predicates := sg.createRandomExprs(0, 2, exprGenerators...) + sg.sel.AddWhere(sqlparser.AndExpressions(predicates...)) +} + +// creates predicates for the having clause comparing a column to a random expression +func (sg *selectGenerator) createHavingPredicates(grouping []column) { + exprGenerators := slice.Map(grouping, func(c column) sqlparser.ExprGenerator { return &c }) + // add scalar subqueries + // TODO: subqueries fail + if sg.r.Intn(10) < 1 && testFailingQueries { + exprGenerators = append(exprGenerators, sg) + } + + sg.genConfig = sg.genConfig.CanAggregateConfig() + predicates := sg.createRandomExprs(0, 2, exprGenerators...) + sg.genConfig = sg.genConfig.CannotAggregateConfig() + + sg.sel.AddHaving(sqlparser.AndExpressions(predicates...)) +} + +// returns between minExprs and maxExprs random expressions using generators +func (sg *selectGenerator) createRandomExprs(minExprs, maxExprs int, generators ...sqlparser.ExprGenerator) (predicates sqlparser.Exprs) { + if minExprs > maxExprs { + log.Fatalf("minExprs is greater than maxExprs; minExprs: %d, maxExprs: %d\n", minExprs, maxExprs) + } else if maxExprs <= 0 { + return + } + numPredicates := sg.r.Intn(maxExprs-minExprs+1) + minExprs + for i := 0; i < numPredicates; i++ { + predicates = append(predicates, sg.getRandomExpr(generators...)) + } + + return +} + +// getRandomExpr returns a random expression +func (sg *selectGenerator) getRandomExpr(generators ...sqlparser.ExprGenerator) sqlparser.Expr { + var g *sqlparser.Generator + if generators == nil { + g = sqlparser.NewGenerator(sg.r, 2) + } else { + g = sqlparser.NewGenerator(sg.r, 2, generators...) + } + + return g.Expression(sg.genConfig.SingleRowConfig().SetNumCols(1)) +} + +// creates sel.Limit +func (sg *selectGenerator) createLimit() { + if sg.genConfig.SingleRow { + sg.sel.Limit = sqlparser.NewLimitWithoutOffset(1) + return + } + + limitNum := sg.r.Intn(10) + if sg.r.Intn(2) < 1 { + offset := sg.r.Intn(10) + sg.sel.Limit = sqlparser.NewLimit(offset, limitNum) + } else { + sg.sel.Limit = sqlparser.NewLimitWithoutOffset(limitNum) + } +} + +// randomlyAlias randomly aliases expr with alias alias, adds it to sel.SelectExprs, and returns the column created +func (sg *selectGenerator) randomlyAlias(expr sqlparser.Expr, alias string) column { + var col column + if sg.r.Intn(2) < 1 { + alias = "" + col.name = sqlparser.String(expr) + } else { + col.name = alias + } + sg.sel.SelectExprs = append(sg.sel.SelectExprs, sqlparser.NewAliasedExpr(expr, alias)) + + return col +} + +// matchNumCols makes sure sg.sel.SelectExprs and newTable both have the same number of cols: sg.genConfig.NumCols +func (sg *selectGenerator) matchNumCols(tables []tableT, newTable tableT, canAggregate bool) tableT { + // remove SelectExprs and newTable.cols randomly until there are sg.genConfig.NumCols amount + for len(sg.sel.SelectExprs) > sg.genConfig.NumCols && sg.genConfig.NumCols > 0 { + // select a random index and remove it from SelectExprs and newTable + idx := sg.r.Intn(len(sg.sel.SelectExprs)) + + sg.sel.SelectExprs[idx] = sg.sel.SelectExprs[len(sg.sel.SelectExprs)-1] + sg.sel.SelectExprs = sg.sel.SelectExprs[:len(sg.sel.SelectExprs)-1] + + newTable.cols[idx] = newTable.cols[len(newTable.cols)-1] + newTable.cols = newTable.cols[:len(newTable.cols)-1] + } + + // alternatively, add random expressions until there are sg.genConfig.NumCols amount + if sg.genConfig.NumCols > len(sg.sel.SelectExprs) { + diff := sg.genConfig.NumCols - len(sg.sel.SelectExprs) + exprs := sg.createRandomExprs(diff, diff, + slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t })...) + + for i, expr := range exprs { + col := sg.randomlyAlias(expr, fmt.Sprintf("crandom%d", i+1)) + newTable.addColumns(col) + + if canAggregate { + sg.sel.AddGroupBy(expr) + } + } + } + + return newTable +} + +func getRandomOrderDirection(r *rand.Rand) sqlparser.OrderDirection { + // asc, desc + return randomEl(r, []sqlparser.OrderDirection{0, 1}) +} + +func getRandomJoinType(r *rand.Rand) sqlparser.JoinType { + // normal, straight, left, right, natural, natural left, natural right + return randomEl(r, []sqlparser.JoinType{0, 1, 2, 3, 4, 5, 6}) +} + +func randomEl[K any](r *rand.Rand, in []K) K { + return in[r.Intn(len(in))] +} + +func newAliasedTable(tbl tableT, alias string) *sqlparser.AliasedTableExpr { + return sqlparser.NewAliasedTableExpr(tbl.tableExpr, alias) +} + +func newAliasedColumn(col column, alias string) *sqlparser.AliasedExpr { + return sqlparser.NewAliasedExpr(col.getASTExpr(), alias) +} diff --git a/go/test/endtoend/vtgate/queries/random/query_gen_test.go b/go/test/endtoend/vtgate/queries/random/query_gen_test.go new file mode 100644 index 00000000000..fe8aa6f6492 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/query_gen_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// TestSeed makes sure that the seed is deterministic +func TestSeed(t *testing.T) { + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + seed := int64(1689757943775102000) + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) + qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query1 := sqlparser.String(qg.stmt) + qg = newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query2 := sqlparser.String(qg.stmt) + fmt.Println(query1) + require.Equal(t, query1, query2) +} diff --git a/go/test/endtoend/vtgate/queries/random/random_expr_test.go b/go/test/endtoend/vtgate/queries/random/random_expr_test.go new file mode 100644 index 00000000000..450169a8d9f --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/random_expr_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "math/rand" + "testing" + "time" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" +) + +// This test tests that generating random expressions with a schema does not panic +func TestRandomExprWithTables(t *testing.T) { + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + for i := 0; i < 100; i++ { + + seed := time.Now().UnixNano() + r := rand.New(rand.NewSource(seed)) + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CanAggregate, "", 0, false) + g := sqlparser.NewGenerator(r, 3, slice.Map(schemaTables, func(t tableT) sqlparser.ExprGenerator { return &t })...) + g.Expression(genConfig) + } +} diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go new file mode 100644 index 00000000000..7b0ab93c165 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -0,0 +1,371 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/sqlparser" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +// this test uses the AST defined in the sqlparser package to randomly generate queries + +// if true then execution will always stop on a "must fix" error: a results mismatched or EOF +const stopOnMustFixError = false + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + _, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp") + + tables := []string{"emp", "dept"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + // disable only_full_group_by + // mcmp.Exec("set sql_mode=''") + + // insert data + mcmp.Exec("INSERT INTO emp(empno, ename, job, mgr, hiredate, sal, comm, deptno) VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20), (7499,'ALLEN','SALESMAN',7698,'1981-02-20',1600,300,30), (7521,'WARD','SALESMAN',7698,'1981-02-22',1250,500,30), (7566,'JONES','MANAGER',7839,'1981-04-02',2975,NULL,20), (7654,'MARTIN','SALESMAN',7698,'1981-09-28',1250,1400,30), (7698,'BLAKE','MANAGER',7839,'1981-05-01',2850,NULL,30), (7782,'CLARK','MANAGER',7839,'1981-06-09',2450,NULL,10), (7788,'SCOTT','ANALYST',7566,'1982-12-09',3000,NULL,20), (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10), (7844,'TURNER','SALESMAN',7698,'1981-09-08',1500,0,30), (7876,'ADAMS','CLERK',7788,'1983-01-12',1100,NULL,20), (7900,'JAMES','CLERK',7698,'1981-12-03',950,NULL,30), (7902,'FORD','ANALYST',7566,'1981-12-03',3000,NULL,20), (7934,'MILLER','CLERK',7782,'1982-01-23',1300,NULL,10)") + mcmp.Exec("INSERT INTO dept(deptno, dname, loc) VALUES ('10','ACCOUNTING','NEW YORK'), ('20','RESEARCH','DALLAS'), ('30','SALES','CHICAGO'), ('40','OPERATIONS','BOSTON')") + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} + +func helperTest(t *testing.T, query string) { + t.Helper() + t.Run(query, func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + result, err := mcmp.ExecAllowAndCompareError(query) + fmt.Println(result) + fmt.Println(err) + }) +} + +func TestMustFix(t *testing.T) { + t.Skip("Skip CI") + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct case count(*) when 0 then -0 end from emp as tbl0, emp as tbl1 where 0") + + // results mismatched (maybe derived tables) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 0 as crandom0 from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ distinct count(*) from emp as tbl1 where 0) as tbl1") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct case count(distinct true) when 'b' then 't' end from emp as tbl1 where 's'") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1") + + // mismatched number of columns + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) + 0 from emp as tbl0 order by count(*) desc") + + // results mismatched (mismatched types) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(0 >> 0), sum(distinct tbl2.empno) from emp as tbl0 left join emp as tbl2 on -32") + + // results mismatched (decimals off by a little; evalengine problem) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1, tbl1.deptno as crandom0 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc") + + // results mismatched + // limit >= 9 works + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr1 from emp as tbl1 group by tbl1.sal having max(0) != true") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 0 as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(0) <= 0") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ min(0) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 0 as crandom0 from dept as tbl0, emp as tbl1 where 0") + + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 0 as crandom0 from dept as tbl0, emp as tbl1 where 'o'") + + // similar to previous two + // results mismatched + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 'o' as crandom0 from dept as tbl0, emp as tbl1 where 0 having count(*) = count(*)") + + // results mismatched (group by + right join) + // left instead of right works + // swapping tables and predicates and changing to left fails + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 0 from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno") + + // results mismatched (count + right join) + // left instead of right works + // swapping tables and predicates and changing to left fails + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") + + // Passes with different errors + // vitess error: EOF + // mysql error: Operand should contain 1 column(s) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ 8 < -31 xor (-29, sum((tbl0.deptno, 'wren', 'ostrich')), max(distinct (tbl0.dname, -15, -8))) in ((sum(distinct (tbl0.dname, 'bengal', -10)), 'ant', true)) as caggr0 from dept as tbl0 where tbl0.deptno * (77 - 61)") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.deptno as cgroup0, tbl1.loc as cgroup1, count(distinct tbl1.loc) as caggr1, tbl1.loc as crandom0 from dept as tbl0, dept as tbl1 group by tbl1.deptno, tbl1.loc") + + // EOF + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from dept as tbl0, (select count(*) from emp as tbl0, emp as tbl1 limit 18) as tbl1") +} + +func TestKnownFailures(t *testing.T) { + t.Skip("Skip CI") + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // logs more stuff + //clusterInstance.EnableGeneralLog() + + // column 'tbl1.`not exists (select 1 from dual)`' not found + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.`not exists (select 1 from dual)`, count(*) from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ not exists (select 1 from dual) from dept as tbl0 where tbl0.dname) as tbl1 group by tbl0.deptno, tbl1.`not exists (select 1 from dual)`") + + // VT13001: [BUG] failed to find the corresponding column + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc") + + // vitess error: + // mysql error: Operand should contain 1 column(s) + helperTest(t, "select (count('sheepdog') ^ (-71 % sum(emp.mgr) ^ count('koi')) and count(*), 'fly') from emp, dept") + + // rhs of an In operation should be a tuple + helperTest(t, "select /*vt+ PLANNER=Gen4 */ (case when true then min(distinct tbl1.job) else 'bee' end, 'molly') not in (('dane', 0)) as caggr1 from emp as tbl0, emp as tbl1") + + // VT13001: [BUG] in scatter query: complex ORDER BY expression: :vtg1 /* VARCHAR */ + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.job as cgroup0, sum(distinct 'mudfish'), tbl1.job as crandom0 from emp as tbl0, emp as tbl1 group by tbl1.job order by tbl1.job asc limit 8, 1") + + // VT13001: [BUG] column should not be pushed to projection while doing a column lookup + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -26 in (tbl2.mgr, -8, tbl0.deptno) as crandom0 from dept as tbl0, emp as tbl1 left join emp as tbl2 on tbl2.ename") + + // unsupported: min/max on types that are not comparable is not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ max(case true when false then 'gnu' when true then 'meerkat' end) as caggr0 from dept as tbl0") + + // vttablet: rpc error: code = InvalidArgument desc = BIGINT UNSIGNED value is out of range in '(-(273) + (-(15) & 124))' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -273 + (-15 & 124) as crandom0 from emp as tbl0, emp as tbl1 where tbl1.sal >= tbl1.mgr") + + // vitess error: cannot compare strings, collation is unknown or unsupported (collation ID: 0) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ max(tbl1.dname) as caggr1 from dept as tbl0, dept as tbl1 group by tbl1.dname order by tbl1.dname asc") + + // vitess error: + // mysql error: Incorrect DATE value: 'tuna' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ min(tbl0.empno) as caggr0 from emp as tbl0 where case 'gator' when false then 314 else 'weevil' end > tbl0.job having min(tbl0.hiredate) <=> 'tuna'") + + // vitess error: + // mysql error: Unknown column 'tbl0.deptno' in 'having clause' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0 from dept as tbl0 having tbl0.deptno") + + // coercion should not try to coerce this value: DATE("1980-12-17") + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct tbl1.hiredate as cgroup0, count(tbl1.mgr) as caggr0 from emp as tbl1 group by tbl1.hiredate, tbl1.ename") + + // only_full_group_by enabled + // vitess error: In aggregated query without GROUP BY, expression #1 of SELECT list contains nonaggregated column 'ks_random.tbl0.EMPNO'; this is incompatible with sql_mode=only_full_group_by + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct tbl0.empno as cgroup0, count(distinct 56) as caggr0, min('flounder' = 'penguin') as caggr1 from emp as tbl0, (select /*vt+ PLANNER=Gen4 */ 'manatee' as crandom0 from dept as tbl0 where -26 limit 2) as tbl2 where 'anteater' like 'catfish' is null and -11 group by tbl0.empno order by tbl0.empno asc, count(distinct 56) asc, min('flounder' = 'penguin') desc") + + // only_full_group_by enabled + // vitess error: + // mysql error: In aggregated query without GROUP BY, expression #1 of SELECT list contains nonaggregated column 'ks_random.tbl0.ENAME'; this is incompatible with sql_mode=only_full_group_by + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl0.ename, min(tbl0.comm) from emp as tbl0 left join emp as tbl1 on tbl0.empno = tbl1.comm and tbl0.empno = tbl1.empno") + + // only_full_group_by enabled + // vitess error: + // mysql error: Expression #1 of ORDER BY clause is not in SELECT list, references column 'ks_random.tbl2.DNAME' which is not in SELECT list; this is incompatible with DISTINCT + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr0 from dept as tbl2 group by tbl2.dname order by tbl2.dname asc") + + // vttablet: rpc error: code = NotFound desc = Unknown column 'cgroup0' in 'field list' (errno 1054) (sqlstate 42S22) (CallerID: userData1) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ tbl1.ename as cgroup0, max(tbl0.comm) as caggr0 from emp as tbl0, emp as tbl1 group by cgroup0") + + // vttablet: rpc error: code = NotFound desc = Unknown column '347' in 'group statement' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct 347 as crandom0 from emp as tbl0") + + // vttablet: rpc error: code = InvalidArgument desc = Can't group on 'count(*)' (errno 1056) (sqlstate 42000) (CallerID: userData1) + helperTest(t, "select /*vt+ PLANNER=Gen4 */ distinct count(*) from dept as tbl0 group by tbl0.deptno") + + // unsupported + // VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct 1) as caggr1 + helperTest(t, "select /*vt+ PLANNER=Gen4 */ sum(distinct tbl0.comm) as caggr0, sum(distinct 1) as caggr1 from emp as tbl0 having 'redfish' < 'blowfish'") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from dept as tbl1 join (select count(*) from emp as tbl0, dept as tbl1 group by tbl1.loc) as tbl2") + + // unsupported + // VT12001: unsupported: in scatter query: complex aggregate expression + helperTest(t, "select /*vt+ PLANNER=Gen4 */ (select count(*) from emp as tbl0) from emp as tbl0") + + // unsupported + // VT12001: unsupported: using aggregation on top of a *planbuilder.filter plan + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(tbl1.dname) as caggr1 from dept as tbl0 left join dept as tbl1 on tbl1.dname > tbl1.loc where tbl1.loc <=> tbl1.dname group by tbl1.dname order by tbl1.dname asc") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*) from (select count(*) from dept as tbl0) as tbl0") + + // unsupported + // VT12001: unsupported: aggregation on top of aggregation not supported + helperTest(t, "select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0) as tbl0, dept as tbl1") + + // unsupported + // VT12001: unsupported: in scatter query: aggregation function 'avg(tbl0.deptno)' + helperTest(t, "select /*vt+ PLANNER=Gen4 */ avg(tbl0.deptno) from dept as tbl0") + + // unsupported + // VT12001: unsupported: LEFT JOIN with derived tables + helperTest(t, "select /*vt+ PLANNER=Gen4 */ -1 as crandom0 from emp as tbl2 left join (select count(*) from dept as tbl1) as tbl3 on 6 != tbl2.deptno") + + // unsupported + // VT12001: unsupported: subqueries in GROUP BY + helperTest(t, "select /*vt+ PLANNER=Gen4 */ exists (select 1) as crandom0 from dept as tbl0 group by exists (select 1)") +} + +func TestRandom(t *testing.T) { + t.Skip("Skip CI; random expressions generate too many failures to properly limit") + + mcmp, closer := start(t) + defer closer() + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + // specify the schema (that is defined in schema.sql) + schemaTables := []tableT{ + {tableExpr: sqlparser.NewTableName("emp")}, + {tableExpr: sqlparser.NewTableName("dept")}, + } + schemaTables[0].addColumns([]column{ + {name: "empno", typ: "bigint"}, + {name: "ename", typ: "varchar"}, + {name: "job", typ: "varchar"}, + {name: "mgr", typ: "bigint"}, + {name: "hiredate", typ: "date"}, + {name: "sal", typ: "bigint"}, + {name: "comm", typ: "bigint"}, + {name: "deptno", typ: "bigint"}, + }...) + schemaTables[1].addColumns([]column{ + {name: "deptno", typ: "bigint"}, + {name: "dname", typ: "varchar"}, + {name: "loc", typ: "varchar"}, + }...) + + endBy := time.Now().Add(1 * time.Second) + + var queryCount, queryFailCount int + // continue testing after an error if and only if testFailingQueries is true + for time.Now().Before(endBy) && (!t.Failed() || !testFailingQueries) { + seed := time.Now().UnixNano() + genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) + qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg.randomQuery() + query := sqlparser.String(qg.stmt) + _, vtErr := mcmp.ExecAllowAndCompareError(query) + + // this assumes all queries are valid mysql queries + if vtErr != nil { + fmt.Printf("seed: %d\n", seed) + fmt.Println(query) + fmt.Println(vtErr) + + if stopOnMustFixError { + // results mismatched + if strings.Contains(vtErr.Error(), "results mismatched") { + simplified := simplifyResultsMismatchedQuery(t, query) + fmt.Printf("final simplified query: %s\n", simplified) + break + } + // EOF + if sqlError, ok := vtErr.(*sqlerror.SQLError); ok && strings.Contains(sqlError.Message, "EOF") { + break + } + } + + // restart the mysql and vitess connections in case something bad happened + closer() + mcmp, closer = start(t) + + fmt.Printf("\n\n\n") + queryFailCount++ + } + queryCount++ + } + fmt.Printf("Queries successfully executed: %d\n", queryCount) + fmt.Printf("Queries failed: %d\n", queryFailCount) +} + +// these queries were previously failing and have now been fixed +func TestBuggyQueries(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.sal) as caggr1 from emp as tbl0, emp as tbl1 group by tbl1.ename order by tbl1.ename asc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*), count(*) from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.deptno group by tbl1.empno order by tbl1.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(tbl0.deptno) from dept as tbl0, emp as tbl1 group by tbl1.job order by tbl1.job limit 3") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from emp as tbl0 group by tbl0.empno order by tbl0.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct count(*), tbl0.loc from dept as tbl0 group by tbl0.loc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct count(*) from dept as tbl0 group by tbl0.loc") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.comm) from emp as tbl0, emp as tbl1") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.mgr, tbl1.mgr, count(*) from emp as tbl1 group by tbl1.mgr") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.mgr, tbl1.mgr, count(*) from emp as tbl0, emp as tbl1 group by tbl1.mgr") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*), count(tbl0.comm) from emp as tbl0, emp as tbl1 join dept as tbl2") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0, dept as tbl1") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ min(tbl0.loc) from dept as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.empno, max(tbl1.job) from dept as tbl0, emp as tbl1 group by tbl1.empno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl1.ename, max(tbl0.comm) from emp as tbl0, emp as tbl1 group by tbl1.ename") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl0.dname, tbl0.dname, min(tbl0.deptno) from dept as tbl0, dept as tbl1 group by tbl0.dname, tbl0.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ tbl0.dname, min(tbl1.deptno) from dept as tbl0, dept as tbl1 group by tbl0.dname, tbl1.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ max(tbl0.hiredate) from emp as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ min(tbl0.deptno) as caggr0, count(*) as caggr1 from dept as tbl0 left join dept as tbl1 on tbl1.loc = tbl1.dname") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(tbl1.loc) as caggr0 from dept as tbl1 left join dept as tbl2 on tbl1.loc = tbl2.loc where (tbl2.deptno)") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ sum(tbl1.ename), min(tbl0.empno) from emp as tbl0, emp as tbl1 left join dept as tbl2 on tbl1.job = tbl2.loc and tbl1.comm = tbl2.deptno where ('trout') and tbl0.deptno = tbl1.comm") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct max(tbl0.deptno), count(tbl0.job) from emp as tbl0, dept as tbl1 left join dept as tbl2 on tbl1.dname = tbl2.loc and tbl1.dname = tbl2.loc where (tbl2.loc) and tbl0.deptno = tbl1.deptno") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*), count(*) from (select count(*) from dept as tbl0 group by tbl0.deptno) as tbl0") + mcmp.Exec("select /*vt+ PLANNER=Gen4 */ distinct max(tbl0.dname) as caggr0, 'cattle' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno != tbl1.sal group by tbl1.comm") + +} diff --git a/go/test/endtoend/vtgate/queries/random/schema.sql b/go/test/endtoend/vtgate/queries/random/schema.sql new file mode 100644 index 00000000000..7ef4721a381 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/schema.sql @@ -0,0 +1,20 @@ +CREATE TABLE emp ( + EMPNO bigint NOT NULL, + ENAME VARCHAR(10), + JOB VARCHAR(9), + MGR bigint, + HIREDATE DATE, + SAL bigint, + COMM bigint, + DEPTNO bigint, + PRIMARY KEY (EMPNO) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; + +CREATE TABLE dept ( + DEPTNO bigint, + DNAME VARCHAR(14), + LOC VARCHAR(13), + PRIMARY KEY (DEPTNO) +) Engine = InnoDB + COLLATE = utf8mb4_general_ci; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/random/simplifier_test.go b/go/test/endtoend/vtgate/queries/random/simplifier_test.go new file mode 100644 index 00000000000..478ee355d34 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/simplifier_test.go @@ -0,0 +1,116 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package random + +import ( + "fmt" + "strings" + "testing" + + "vitess.io/vitess/go/test/vschemawrapper" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder" + "vitess.io/vitess/go/vt/vtgate/simplifier" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestSimplifyResultsMismatchedQuery(t *testing.T) { + t.Skip("Skip CI") + + var queries []string + queries = append(queries, "select /*vt+ PLANNER=Gen4 */ (68 - -16) / case false when -45 then 3 when 28 then -43 else -62 end as crandom0 from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ distinct not not false and count(*) from emp as tbl0, emp as tbl1 where tbl1.ename) as tbl1 limit 1", + "select /*vt+ PLANNER=Gen4 */ distinct case true when 'burro' then 'trout' else 'elf' end < case count(distinct true) when 'bobcat' then 'turkey' else 'penguin' end from dept as tbl0, emp as tbl1 where 'spider'", + "select /*vt+ PLANNER=Gen4 */ distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1 where tbl0.deptno and tbl1.comm in (12, tbl0.deptno, case false when 67 then -17 when -78 then -35 end, -76 >> -68)", + "select /*vt+ PLANNER=Gen4 */ count(*) + 1 from emp as tbl0 order by count(*) desc", + "select /*vt+ PLANNER=Gen4 */ count(2 >> tbl2.mgr), sum(distinct tbl2.empno <=> 15) from emp as tbl0 left join emp as tbl2 on -32", + "select /*vt+ PLANNER=Gen4 */ sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1", + "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc", + "select /*vt+ PLANNER=Gen4 */ tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) = sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8", + "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr1 from dept as tbl0, emp as tbl1 group by tbl1.sal having max(tbl1.comm) != true", + "select /*vt+ PLANNER=Gen4 */ distinct sum(tbl1.loc) as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(tbl1.dname) <= 1", + "select /*vt+ PLANNER=Gen4 */ min(tbl0.deptno) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm", + "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 1 = 0", + "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 'octopus'", + "select /*vt+ PLANNER=Gen4 */ distinct 'octopus' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.empno having count(*) = count(*)", + "select /*vt+ PLANNER=Gen4 */ max(tbl0.deptno) from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno", + "select /*vt+ PLANNER=Gen4 */ count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") + + for _, query := range queries { + var simplified string + t.Run("simplification "+query, func(t *testing.T) { + simplified = simplifyResultsMismatchedQuery(t, query) + }) + + t.Run("simplified "+query, func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.ExecAllowAndCompareError(simplified) + }) + + fmt.Printf("final simplified query: %s\n", simplified) + } +} + +// given a query that errors with results mismatched, simplifyResultsMismatchedQuery returns a simpler version with the same error +func simplifyResultsMismatchedQuery(t *testing.T, query string) string { + t.Helper() + mcmp, closer := start(t) + defer closer() + + _, err := mcmp.ExecAllowAndCompareError(query) + if err == nil { + t.Fatalf("query (%s) does not error", query) + } else if !strings.Contains(err.Error(), "mismatched") { + t.Fatalf("query (%s) does not error with results mismatched\nError: %v", query, err) + } + + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "emp", clusterInstance.VtgateProcess.ReadVSchema)) + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "dept", clusterInstance.VtgateProcess.ReadVSchema)) + + formal, err := vindexes.LoadFormal("svschema.json") + require.NoError(t, err) + vSchema := vindexes.BuildVSchema(formal) + vSchemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: vSchema, + Version: planbuilder.Gen4, + } + + stmt, err := sqlparser.Parse(query) + require.NoError(t, err) + + simplified := simplifier.SimplifyStatement( + stmt.(sqlparser.SelectStatement), + vSchemaWrapper.CurrentDb(), + vSchemaWrapper, + func(statement sqlparser.SelectStatement) bool { + q := sqlparser.String(statement) + _, newErr := mcmp.ExecAllowAndCompareError(q) + if newErr == nil { + return false + } else { + return strings.Contains(newErr.Error(), "mismatched") + } + }, + ) + + return sqlparser.String(simplified) +} diff --git a/go/test/endtoend/vtgate/queries/random/svschema.json b/go/test/endtoend/vtgate/queries/random/svschema.json new file mode 100644 index 00000000000..ccbbc6ed3a6 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/svschema.json @@ -0,0 +1,6 @@ +{ + "keyspaces": { + "ks_random": { + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/random/vschema.json b/go/test/endtoend/vtgate/queries/random/vschema.json new file mode 100644 index 00000000000..21e31d5618c --- /dev/null +++ b/go/test/endtoend/vtgate/queries/random/vschema.json @@ -0,0 +1,26 @@ +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "emp": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] + }, + "dept": { + "column_vindexes": [ + { + "column": "deptno", + "name": "hash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go new file mode 100644 index 00000000000..d71dc55ef46 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package misc + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/test/endtoend/utils" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + keyspaceName = "ks_misc" + uks = "uks" + cell = "test_misc" + + //go:embed uschema.sql + uschemaSQL string + + //go:embed schema.sql + schemaSQL string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--queryserver-config-max-result-size", "1000000", + "--queryserver-config-query-timeout", "200", + "--queryserver-config-query-pool-timeout", "200") + // Start Unsharded keyspace + ukeyspace := &cluster.Keyspace{ + Name: uks, + SchemaSQL: uschemaSQL, + } + err = clusterInstance.StartUnshardedKeyspace(*ukeyspace, 0, false) + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vschema, + } + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + return 1 + } + + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--query-timeout", "100") + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(keyspaceName) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/queries/timeout/schema.sql b/go/test/endtoend/vtgate/queries/timeout/schema.sql new file mode 100644 index 00000000000..ceac0c07e6d --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/schema.sql @@ -0,0 +1,5 @@ +create table if not exists t1( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go new file mode 100644 index 00000000000..9c81a6c5822 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package misc + +import ( + "testing" + + _ "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + tables := []string{"t1", "uks.unsharded"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} + +func TestQueryTimeoutWithDual(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + _, err := utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.24) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "set @@session.query_timeout=20") + require.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.04) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select sleep(0.01) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=500 */ sleep(0.24) from dual") + assert.NoError(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=10 */ sleep(0.04) from dual") + assert.Error(t, err) + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=15 */ sleep(0.001) from dual") + assert.NoError(t, err) +} + +func TestQueryTimeoutWithTables(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + // unsharded + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into uks.unsharded(id1) values (1),(2),(3),(4),(5)") + for i := 0; i < 12; i++ { + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=2000 */ into uks.unsharded(id1) select id1+5 from uks.unsharded") + } + + utils.Exec(t, mcmp.VtConn, "select count(*) from uks.unsharded where id1 > 31") + utils.Exec(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=100 */ count(*) from uks.unsharded where id1 > 31") + + // the query usually takes more than 5ms to return. So this should fail. + _, err := utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=1 */ count(*) from uks.unsharded where id1 > 31") + require.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") + assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") + + // sharded + utils.Exec(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1000 */ into ks_misc.t1(id1, id2) values (1,2),(2,4),(3,6),(4,8),(5,10)") + + // sleep take in seconds, so 0.1 is 100ms + utils.Exec(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=500 */ sleep(0.1) from t1 where id1 = 1") + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=20 */ sleep(0.1) from t1 where id1 = 1") + require.Error(t, err) + assert.Contains(t, err.Error(), "context deadline exceeded") + assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") +} diff --git a/go/test/endtoend/vtgate/queries/timeout/uschema.sql b/go/test/endtoend/vtgate/queries/timeout/uschema.sql new file mode 100644 index 00000000000..6ba158b134e --- /dev/null +++ b/go/test/endtoend/vtgate/queries/timeout/uschema.sql @@ -0,0 +1,5 @@ +create table unsharded( + id1 bigint, + id2 bigint, + key(id1) +) Engine=InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json b/go/test/endtoend/vtgate/queries/timeout/vschema.json similarity index 54% rename from go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json rename to go/test/endtoend/vtgate/queries/timeout/vschema.json index 002c6f00386..60aa2bc9c07 100644 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/vschema.json +++ b/go/test/endtoend/vtgate/queries/timeout/vschema.json @@ -1,16 +1,16 @@ { "sharded": true, "vindexes": { - "xxhash": { - "type": "xxhash" + "hash": { + "type": "hash" } }, "tables": { - "t2": { + "t1": { "column_vindexes": [ { - "column": "id3", - "name": "xxhash" + "column": "id1", + "name": "hash" } ] } diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 577155f16cb..11325a0f2f8 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -179,9 +179,17 @@ func TestServingChangeStreaming(t *testing.T) { rdonlyTablet.Type = "replica" // this should fail as there is no rdonly present + // This can also close the streaming connection if it goes to 80- shard first and sends the fields from that. + // Current, stream logic is to close the server connection if partial stream result is sent and an error is received later. _, err = utils.ExecAllowError(t, conn, "select * from test") require.Error(t, err) + // check if connection is still available + _, err = utils.ExecAllowError(t, conn, "select 1") + if err != nil { + t.Skip("connection is closed, cannot continue with the test") + } + // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index 0dc2261c7ba..564cc671d5f 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -344,10 +345,10 @@ func TestSysvarSocket(t *testing.T) { _, err = utils.ExecAllowError(t, conn, "set socket = '/any/path'") require.Error(t, err) - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "not a mysql error: %T", err) - assert.Equal(t, mysql.ERIncorrectGlobalLocalVar, sqlErr.Number()) - assert.Equal(t, mysql.SSUnknownSQLState, sqlErr.SQLState()) + assert.Equal(t, sqlerror.ERIncorrectGlobalLocalVar, sqlErr.Number()) + assert.Equal(t, sqlerror.SSUnknownSQLState, sqlErr.SQLState()) assert.Equal(t, "VT03010: variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error()) } diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 456c5589f37..04d91d8d978 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -71,7 +71,8 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "--schema_change_dir", schemaChangeDirectory, "--schema_change_controller", "local", - "--schema_change_check_interval", "1"} + "--schema_change_check_interval", "1s", + } if err := clusterInstance.StartTopo(); err != nil { return 1, err @@ -106,6 +107,7 @@ func TestSchemaChange(t *testing.T) { testWithAlterDatabase(t) testWithDropCreateSchema(t) testDropNonExistentTables(t) + testApplySchemaBatch(t) testCreateInvalidView(t) testCopySchemaShards(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, 2) testCopySchemaShards(t, fmt.Sprintf("%s/0", keyspaceName), 3) @@ -125,7 +127,6 @@ func testWithInitialSchema(t *testing.T) { // Check if 4 tables are created checkTables(t, totalTableCount) - checkTables(t, totalTableCount) // Also match the vschema for those tablets matchSchema(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, clusterInstance.Keyspaces[0].Shards[1].Vttablets[0].VttabletProcess.TabletPath) @@ -143,7 +144,7 @@ func testWithAlterSchema(t *testing.T) { func testWithAlterDatabase(t *testing.T) { sql := "create database alter_database_test; alter database alter_database_test default character set = utf8mb4; drop database alter_database_test" err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sql) - assert.Nil(t, err) + assert.NoError(t, err) } // testWithDropCreateSchema , we should be able to drop and create same schema @@ -157,7 +158,7 @@ func testWithAlterDatabase(t *testing.T) { func testWithDropCreateSchema(t *testing.T) { dropCreateTable := fmt.Sprintf("DROP TABLE vt_select_test_%02d ;", 2) + fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", 2)) err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropCreateTable) - require.Nil(t, err) + require.NoError(t, err) checkTables(t, totalTableCount) } @@ -224,6 +225,33 @@ func testCreateInvalidView(t *testing.T) { } } +func testApplySchemaBatch(t *testing.T) { + { + sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, "--batch_size", "2", keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount+5) + } + { + sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount) + } + { + sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch_size", "2", keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount+5) + } + { + sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" + _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount) + } +} + // checkTables checks the number of tables in the first two shards. func checkTables(t *testing.T, count int) { checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0], count) diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go index 8580240a942..9586206221e 100644 --- a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -19,10 +19,14 @@ package loadkeyspace import ( "os" "path" + "strings" "testing" + "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/test/endtoend/cluster" ) @@ -68,7 +72,7 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { Name: keyspaceName, SchemaSQL: sqlSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-schema-change-signal") err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) require.NoError(t, err) @@ -76,18 +80,23 @@ func TestLoadKeyspaceWithNoTablet(t *testing.T) { for _, vttablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { err = vttablet.VttabletProcess.TearDown() require.NoError(t, err) + utils.TimeoutAction(t, 1*time.Minute, "timeout - teardown of VTTablet", func() bool { + return vttablet.VttabletProcess.GetStatus() == "" + }) } // Start vtgate with the schema_change_signal flag - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") err = clusterInstance.StartVtgate() require.NoError(t, err) - // check warning logs - logDir := clusterInstance.VtgateProcess.LogDir - all, err := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt")) - require.NoError(t, err) - require.Contains(t, string(all), "Unable to get initial schema reload") + // After starting VTGate we need to leave enough time for resolveAndLoadKeyspace to reach + // the schema tracking timeout (5 seconds). + utils.TimeoutAction(t, 5*time.Minute, "timeout - could not find 'Unable to get initial schema reload' in 'vtgate-stderr.txt'", func() bool { + logDir := clusterInstance.VtgateProcess.LogDir + all, _ := os.ReadFile(path.Join(logDir, "vtgate-stderr.txt")) + return strings.Contains(string(all), "Unable to get initial schema reload") + }) } func TestNoInitialKeyspace(t *testing.T) { diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index 6519e326328..b89b0916e37 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -42,7 +42,6 @@ var ( hostname = "localhost" keyspaceName = "ks" cell = "zone1" - signalInterval = 1 sqlSchema = ` create table vt_user ( id bigint, @@ -78,8 +77,7 @@ func TestMain(m *testing.M) { } // List of users authorized to execute vschema ddl operations - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal"} - + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") // Start keyspace keyspace := &cluster.Keyspace{ Name: keyspaceName, @@ -91,10 +89,7 @@ func TestMain(m *testing.M) { // restart the tablet so that the schema.Engine gets a chance to start with existing schema tablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() - tablet.VttabletProcess.ExtraArgs = []string{ - "--queryserver-config-schema-change-signal", - fmt.Sprintf("--queryserver-config-schema-change-signal-interval=%d", signalInterval), - } + tablet.VttabletProcess.ExtraArgs = append(tablet.VttabletProcess.ExtraArgs, "--queryserver-config-schema-change-signal") if err := tablet.RestartOnlyTablet(); err != nil { return 1 } @@ -104,6 +99,13 @@ func TestMain(m *testing.M) { clusterInstance.VtgateProcess = cluster.VtgateProcess{} return 1 } + + err := clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) + if err != nil { + fmt.Println(err) + return 1 + } + vtParams = mysql.ConnParams{ Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, @@ -120,10 +122,13 @@ func TestVSchemaTrackerInit(t *testing.T) { require.NoError(t, err) defer conn.Close() - qr := utils.Exec(t, conn, "SHOW VSCHEMA TABLES") - got := fmt.Sprintf("%v", qr.Rows) want := `[[VARCHAR("main")] [VARCHAR("test_table")] [VARCHAR("vt_user")]]` - assert.Equal(t, want, got) + utils.AssertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + want, + 100*time.Millisecond, + 60*time.Second, + "initial table list not complete") } // TestVSchemaTrackerKeyspaceReInit tests that the vschema tracker @@ -147,11 +152,12 @@ func TestVSchemaTrackerKeyspaceReInit(t *testing.T) { require.NoError(t, err) err = clusterInstance.WaitForTabletsToHealthyInVtgate() require.NoError(t, err) - time.Sleep(time.Duration(signalInterval*2) * time.Second) - var newResults any - readVSchema(t, &clusterInstance.VtgateProcess, &newResults) - assert.Equal(t, originalResults, newResults) - newResults = nil + + utils.TimeoutAction(t, 1*time.Minute, "timeout - could not find the updated vschema in VTGate", func() bool { + var newResults any + readVSchema(t, &clusterInstance.VtgateProcess, &newResults) + return assert.ObjectsAreEqual(originalResults, newResults) + }) } } diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 532e5edae90..1c9f4b0b6e2 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -26,20 +26,19 @@ import ( "time" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/vtgate/planbuilder" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) var ( clusterInstance *cluster.LocalProcessCluster vtParams mysql.ConnParams KeyspaceName = "ks" + sidecarDBName = "_vt_schema_tracker_metadata" // custom sidecar database name for testing Cell = "test" //go:embed schema.sql SchemaSQL string @@ -56,36 +55,39 @@ func TestMain(m *testing.M) { clusterInstance = cluster.NewCluster(Cell, "localhost") defer clusterInstance.Teardown() - // Start topo server - err := clusterInstance.StartTopo() + vtgateVer, err := cluster.GetMajorVersion("vtgate") + if err != nil { + return 1 + } + vttabletVer, err := cluster.GetMajorVersion("vttablet") if err != nil { return 1 } - // Start keyspace - keyspace := &cluster.Keyspace{ - Name: KeyspaceName, - SchemaSQL: SchemaSQL, - VSchema: VSchema, + // For upgrade/downgrade tests. + if vtgateVer < 17 || vttabletVer < 17 { + // Then only the default sidecarDBName is supported. + sidecarDBName = sidecar.DefaultName } - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal", - "--vschema_ddl_authorized_users", "%", - "--schema_change_signal_user", "userData1"} - clusterInstance.VtGatePlannerVersion = planbuilder.Gen4 - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", - "--queryserver-config-schema-change-signal-interval", "0.1", - "--queryserver-config-strict-table-acl", - "--queryserver-config-acl-exempt-acl", "userData1", - "--table-acl-config", "dummy.json"} - vtgateVer, err := cluster.GetMajorVersion("vtgate") + // Start topo server + err = clusterInstance.StartTopo() if err != nil { return 1 } - vttabletVer, err := cluster.GetMajorVersion("vttablet") - if err != nil { - return 1 + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: KeyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, } + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, + "--schema_change_signal", + "--vschema_ddl_authorized_users", "%") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-schema-change-signal") + if vtgateVer >= 16 && vttabletVer >= 16 { clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable-views") clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-views") @@ -102,6 +104,12 @@ func TestMain(m *testing.M) { return 1 } + err = clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) + if err != nil { + fmt.Println(err) + return 1 + } + vtParams = mysql.ConnParams{ Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, @@ -131,11 +139,21 @@ func TestNewTable(t *testing.T) { _ = utils.Exec(t, conn, "create table test_table (id bigint, name varchar(100))") - time.Sleep(2 * time.Second) - - utils.AssertMatches(t, conn, "select * from test_table", `[]`) - utils.AssertMatches(t, connShard1, "select * from test_table", `[]`) - utils.AssertMatches(t, connShard2, "select * from test_table", `[]`) + utils.AssertMatchesWithTimeout(t, conn, + "select * from test_table", `[]`, + 100*time.Millisecond, + 60*time.Second, // longer timeout as this is the first query after setup + "could not query test_table through vtgate") + utils.AssertMatchesWithTimeout(t, connShard1, + "select * from test_table", `[]`, + 100*time.Millisecond, + 30*time.Second, + "could not query test_table on "+shard1Params.DbName) + utils.AssertMatchesWithTimeout(t, connShard2, + "select * from test_table", `[]`, + 100*time.Millisecond, + 30*time.Second, + "could not query test_table on "+shard2Params.DbName) utils.Exec(t, conn, "drop table test_table") @@ -171,7 +189,7 @@ func TestInitAndUpdate(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "initial table list not complete") // Init @@ -184,7 +202,7 @@ func TestInitAndUpdate(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "test_sc not in vschema tables") // Tables Update via health check. @@ -197,7 +215,7 @@ func TestInitAndUpdate(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "test_sc1 not in vschema tables") _ = utils.Exec(t, conn, "drop table test_sc, test_sc1") @@ -209,7 +227,7 @@ func TestInitAndUpdate(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "test_sc and test_sc_1 should not be in vschema tables") } @@ -234,11 +252,19 @@ func TestDMLOnNewTable(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "test_sc not in vschema tables") - utils.AssertMatches(t, conn, "select id from new_table_tracked", `[]`) // select - utils.AssertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select + utils.AssertMatchesWithTimeout(t, conn, + "select id from new_table_tracked", `[]`, + 100*time.Millisecond, + 60*time.Second, // longer timeout as it's the first query after setup + "could not query new_table_tracked through vtgate") + utils.AssertMatchesWithTimeout(t, conn, + "select id from new_table_tracked where id = 5", `[]`, + 100*time.Millisecond, + 30*time.Second, + "could not query new_table_tracked through vtgate") // DML on new table // insert initial data ,update and delete will fail since we have not added a primary vindex errorMessage := "table 'new_table_tracked' does not have a primary vindex (errno 1173) (sqlstate 42000)" @@ -254,6 +280,11 @@ func TestDMLOnNewTable(t *testing.T) { utils.Exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`) utils.Exec(t, conn, `insert into t8(id8) values(2)`) defer utils.Exec(t, conn, `delete from t8`) + utils.AssertMatchesWithTimeout(t, conn, + "select count(*) from new_table_tracked join t8", `[[INT64(2)]]`, + 100*time.Millisecond, + 30*time.Second, + "did not get expected number of rows when joining new_table_tracked with t8") utils.AssertMatchesNoOrder(t, conn, `select id from new_table_tracked join t8`, `[[INT64(0)] [INT64(1)]]`) } @@ -279,7 +310,7 @@ func TestNewView(t *testing.T) { // executing the query directly qr := utils.Exec(t, conn, selQuery) // selecting it through the view. - utils.AssertMatchesWithTimeout(t, conn, "select * from test_view", fmt.Sprintf("%v", qr.Rows), 100*time.Millisecond, 10*time.Second, "test_view not in vschema tables") + utils.AssertMatchesWithTimeout(t, conn, "select * from test_view", fmt.Sprintf("%v", qr.Rows), 100*time.Millisecond, 30*time.Second, "test_view not in vschema tables") } // TestViewAndTable validates that new column added in table is present in the view definition @@ -305,7 +336,7 @@ func TestViewAndTable(t *testing.T) { _ = utils.Exec(t, conn, "create view t8_view as select * from t8") // executing the view query, with the new column in the select field. - utils.AssertMatchesWithTimeout(t, conn, "select new_col from t8_view", `[[VARCHAR("V")]]`, 100*time.Millisecond, 5*time.Second, "t8_view not in vschema tables") + utils.AssertMatchesWithTimeout(t, conn, "select new_col from t8_view", `[[VARCHAR("V")]]`, 100*time.Millisecond, 30*time.Second, "t8_view not in vschema tables") // add another column to the table t8 _ = utils.Exec(t, conn, "alter table t8 add column additional_col bigint") diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index a441a1a2826..3ff0b61b482 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -24,9 +24,7 @@ import ( "testing" "time" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/require" @@ -39,6 +37,7 @@ var ( clusterInstance *cluster.LocalProcessCluster vtParams mysql.ConnParams KeyspaceName = "ks" + sidecarDBName = "_vt_schema_tracker_metadata" // custom sidecar database name for testing Cell = "test" SchemaSQL = ` create table t2( @@ -130,20 +129,36 @@ func TestMain(m *testing.M) { clusterInstance = cluster.NewCluster(Cell, "localhost") defer clusterInstance.Teardown() - clusterInstance.VtGateExtraArgs = []string{"--schema_change_signal", "--schema_change_signal_user", "userData1"} - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "5", "--queryserver-config-strict-table-acl", "--queryserver-config-acl-exempt-acl", "userData1", "--table-acl-config", "dummy.json"} + vtgateVer, err := cluster.GetMajorVersion("vtgate") + if err != nil { + return 1 + } + vttabletVer, err := cluster.GetMajorVersion("vttablet") + if err != nil { + return 1 + } + + // For upgrade/downgrade tests. + if vtgateVer < 17 || vttabletVer < 17 { + // Then only the default sidecarDBName is supported. + sidecarDBName = sidecar.DefaultName + } + + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--schema_change_signal") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-schema-change-signal") // Start topo server - err := clusterInstance.StartTopo() + err = clusterInstance.StartTopo() if err != nil { return 1 } // Start keyspace keyspace := &cluster.Keyspace{ - Name: KeyspaceName, - SchemaSQL: SchemaSQL, - VSchema: VSchema, + Name: KeyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + SidecarDBName: sidecarDBName, } err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 2, false) if err != nil { @@ -156,7 +171,7 @@ func TestMain(m *testing.M) { return 1 } - err = waitForVTGateAndVTTablet() + err = clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) if err != nil { fmt.Println(err) return 1 @@ -177,7 +192,7 @@ func TestMain(m *testing.M) { return 1 } - err = waitForVTGateAndVTTablet() + err = clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) if err != nil { fmt.Println(err) return 1 @@ -192,22 +207,6 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func waitForVTGateAndVTTablet() error { - timeout := time.After(5 * time.Minute) - for { - select { - case <-timeout: - return vterrors.New(vtrpcpb.Code_INTERNAL, "timeout") - default: - err := clusterInstance.WaitForTabletsToHealthyInVtgate() - if err != nil { - continue - } - return nil - } - } -} - func TestAddColumn(t *testing.T) { defer cluster.PanicHandler(t) utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate") @@ -217,6 +216,9 @@ func TestAddColumn(t *testing.T) { defer conn.Close() _ = utils.Exec(t, conn, `alter table t2 add column aaa int`) - time.Sleep(10 * time.Second) - _ = utils.Exec(t, conn, "select aaa from t2") + utils.AssertMatchesWithTimeout(t, conn, + "select aaa from t2", `[]`, + 100*time.Millisecond, + 30*time.Second, + "t2 did not have the expected aaa column") } diff --git a/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql b/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql deleted file mode 100644 index 48771a04267..00000000000 --- a/go/test/endtoend/vtgate/schematracker/unauthorized/schema.sql +++ /dev/null @@ -1,5 +0,0 @@ -create table t2( - id3 bigint, - id4 bigint, - primary key(id3) -) Engine=InnoDB; diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index d858d1d4c66..1a37dfb5cf7 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -19,14 +19,16 @@ package unsharded import ( "context" "flag" + "fmt" "os" "testing" "time" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" ) @@ -35,6 +37,7 @@ var ( clusterInstance *cluster.LocalProcessCluster vtParams mysql.ConnParams keyspaceName = "ks" + sidecarDBName = "_vt_schema_tracker_metadata" // custom sidecar database name for testing cell = "zone1" sqlSchema = ` create table main ( @@ -53,18 +56,34 @@ func TestMain(m *testing.M) { clusterInstance = cluster.NewCluster(cell, "localhost") defer clusterInstance.Teardown() + vtgateVer, err := cluster.GetMajorVersion("vtgate") + if err != nil { + return 1 + } + vttabletVer, err := cluster.GetMajorVersion("vttablet") + if err != nil { + return 1 + } + + // For upgrade/downgrade tests. + if vtgateVer < 17 || vttabletVer < 17 { + // Then only the default sidecarDBName is supported. + sidecarDBName = sidecar.DefaultName + } + // Start topo server - err := clusterInstance.StartTopo() + err = clusterInstance.StartTopo() if err != nil { return 1 } // Start keyspace keyspace := &cluster.Keyspace{ - Name: keyspaceName, - SchemaSQL: sqlSchema, + Name: keyspaceName, + SchemaSQL: sqlSchema, + SidecarDBName: sidecarDBName, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal", "--queryserver-config-schema-change-signal-interval", "0.1"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-schema-change-signal"} err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) if err != nil { return 1 @@ -77,6 +96,12 @@ func TestMain(m *testing.M) { return 1 } + err = clusterInstance.WaitForVTGateAndVTTablets(5 * time.Minute) + if err != nil { + fmt.Println(err) + return 1 + } + vtParams = mysql.ConnParams{ Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, @@ -107,7 +132,7 @@ func TestNewUnshardedTable(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "initial table list not complete") // create a new table which is not part of the VSchema @@ -123,17 +148,30 @@ func TestNewUnshardedTable(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "new_table_tracked not in vschema tables") - utils.AssertMatches(t, conn, "select id from new_table_tracked", `[]`) // select - utils.AssertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select + utils.AssertMatchesWithTimeout(t, conn, + "select id from new_table_tracked", `[]`, + 100*time.Millisecond, + 60*time.Second, // longer timeout as it's the first query after setup + "could not query new_table_tracked through vtgate") + utils.AssertMatchesWithTimeout(t, conn, + "select id from new_table_tracked where id = 5", `[]`, + 100*time.Millisecond, + 30*time.Second, + "could not query new_table_tracked through vtgate") + // DML on new table // insert initial data ,update and delete for the new table utils.Exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`) utils.Exec(t, conn, `update new_table_tracked set name = "newName1"`) utils.Exec(t, conn, "delete from new_table_tracked where id = 0") - utils.AssertMatches(t, conn, `select * from new_table_tracked`, `[[INT64(1) VARCHAR("newName1")]]`) + utils.AssertMatchesWithTimeout(t, conn, + `select * from new_table_tracked`, `[[INT64(1) VARCHAR("newName1")]]`, + 100*time.Millisecond, + 30*time.Second, + "could not query expected row in new_table_tracked through vtgate") utils.Exec(t, conn, `drop table new_table_tracked`) @@ -146,6 +184,6 @@ func TestNewUnshardedTable(t *testing.T) { "SHOW VSCHEMA TABLES", expected, 100*time.Millisecond, - 3*time.Second, + 30*time.Second, "new_table_tracked not in vschema tables") } diff --git a/go/test/endtoend/vtgate/sequence/seq_test.go b/go/test/endtoend/vtgate/sequence/seq_test.go index f9583a4083b..dd7542becc5 100644 --- a/go/test/endtoend/vtgate/sequence/seq_test.go +++ b/go/test/endtoend/vtgate/sequence/seq_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -191,7 +192,7 @@ func TestMain(m *testing.M) { SchemaSQL: unshardedSQLSchema, VSchema: unshardedVSchema, } - if err := clusterInstance.StartUnshardedKeyspace(*uKeyspace, 1, false); err != nil { + if err := clusterInstance.StartUnshardedKeyspace(*uKeyspace, 0, false); err != nil { return 1 } @@ -200,7 +201,7 @@ func TestMain(m *testing.M) { SchemaSQL: shardedSQLSchema, VSchema: shardedVSchema, } - if err := clusterInstance.StartKeyspace(*sKeyspace, []string{"-80", "80-"}, 1, false); err != nil { + if err := clusterInstance.StartKeyspace(*sKeyspace, []string{"-80", "80-"}, 0, false); err != nil { return 1 } @@ -289,8 +290,8 @@ func TestDotTableSeq(t *testing.T) { _, err = conn.ExecuteFetch("insert into `dotted.tablename` (c1,c2) values (10,10)", 1000, true) require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) assert.Contains(t, mysqlErr.Message, "Duplicate entry") } diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go new file mode 100644 index 00000000000..dbc46bdda77 --- /dev/null +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -0,0 +1,243 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tablethealthcheck + +import ( + "context" + "flag" + "fmt" + "os" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + + // tabletRefreshInterval is the interval at which the tablet health check will refresh. + // This value is set to a high value to ensure that the vtgate does not attempt to refresh the tablet between the time a tablet is added and the time it is promoted. + tabletRefreshInterval = time.Hour + + keyspaceName = "healthcheck_test_ks" + cell = "healthcheck_test_cell" + shards = []string{"-80", "80-"} + schemaSQL = ` +create table customer( + customer_id bigint not null auto_increment, + email varbinary(128), + primary key(customer_id) +) ENGINE=InnoDB; +create table corder( + order_id bigint not null auto_increment, + customer_id bigint, + sku varbinary(128), + price bigint, + primary key(order_id) +) ENGINE=InnoDB; +` + + vSchema = ` +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "customer": { + "column_vindexes": [ + { + "column": "customer_id", + "name": "hash" + } + ] + }, + "corder": { + "column_vindexes": [ + { + "column": "customer_id", + "name": "hash" + } + ] + } + } +} +` +) + +// TestMain sets up the vitess cluster for any subsequent tests +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vSchema, + } + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, []string{"--health_check_interval", "1s"}...) + err = clusterInstance.StartKeyspace(*keyspace, shards, 0, false) + if err != nil { + return 1 + } + + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, []string{"--tablet_refresh_interval", tabletRefreshInterval.String()}...) + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +// TestHealthCheckExternallyReparentNewTablet ensures that calling TabletExternallyReparented on a new tablet will switch the primary tablet +// without having to wait for the tabletRefreshInterval. +func TestHealthCheckExternallyReparentNewTablet(t *testing.T) { + ctx := context.Background() + + // verify output of `show vitess_tablets` and `INSERT` statement + vtgateConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer vtgateConn.Close() + + // add a new tablet + reparentTabletUID := 9999 + reparentTabletType := "rdonly" + tablet := addTablet(t, reparentTabletUID, reparentTabletType) + + // promote the new tablet to the primary + err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + require.NoError(t, err) + + // update the new primary tablet to be read-write + err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + require.NoError(t, err) + + // wait for the vtgate to finish updating the new primary tablet + // While 1 second is enough time in most cases, we'll wait for 3 seconds just to be safe, especially if we're running on a slow machine. + time.Sleep(3 * time.Second) + + // verify that the vtgate will recognize the new primary tablet + qr, _ := vtgateConn.ExecuteFetch("show vitess_tablets", 100, true) + require.Equal(t, 3, len(qr.Rows), "wrong number of tablet records in healthcheck, expected %d but had %d. Got result=%v", 3, len(qr.Rows), qr) + require.Equal(t, "-80", qr.Rows[0][2].ToString()) + require.Equal(t, "PRIMARY", qr.Rows[0][3].ToString()) + require.Equal(t, "SERVING", qr.Rows[0][4].ToString()) + require.Equal(t, tabletAlias(reparentTabletUID), qr.Rows[0][5].ToString()) + + // delete the old primary tablet + // This will ensure that the vtgate will experience the primary connection error if the switch would have to wait for the `tabletRefreshInterval`. + deleteTablet(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0]) + + // verify that the vtgate will route the `INSERT` statement to the new primary tablet instead of the deleted tablet + qr, err = vtgateConn.ExecuteFetch("insert into customer(customer_id, email) values(2, 'dummy1')", 100, true) // -80 + require.EqualValues(t, 1, qr.RowsAffected) + require.NoError(t, err) +} + +func tabletAlias(tabletUID int) string { + return fmt.Sprintf("%s-%010d", cell, tabletUID) +} + +func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet { + tablet := &cluster.Vttablet{ + TabletUID: tabletUID, + Type: tabletType, + HTTPPort: clusterInstance.GetAndReservePort(), + GrpcPort: clusterInstance.GetAndReservePort(), + MySQLPort: clusterInstance.GetAndReservePort(), + Alias: tabletAlias(tabletUID), + } + // Start Mysqlctl process + mysqlctlProcess, err := cluster.MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory, !clusterInstance.ReusingVTDATAROOT) + require.NoError(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess + proc, err := tablet.MysqlctlProcess.StartProcess() + require.NoError(t, err) + + // Start vttablet process + tablet.VttabletProcess = cluster.VttabletProcessInstance( + tablet.HTTPPort, + tablet.GrpcPort, + tabletUID, + cell, + shards[0], + keyspaceName, + clusterInstance.VtctldProcess.Port, + tablet.Type, + clusterInstance.TopoProcess.Port, + clusterInstance.Hostname, + clusterInstance.TmpDirectory, + clusterInstance.VtTabletExtraArgs, + clusterInstance.DefaultCharset) + + // wait for mysqld to be ready + err = proc.Wait() + require.NoError(t, err) + + tablet.VttabletProcess.ServingStatus = "" + err = tablet.VttabletProcess.Setup() + require.NoError(t, err) + + serving := tablet.VttabletProcess.WaitForStatus("SERVING", time.Duration(60*time.Second)) + require.Equal(t, serving, true, "Tablet did not become ready within a reasonable time") + + t.Logf("Added tablet: %s", tablet.Alias) + return tablet +} + +func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { + var wg sync.WaitGroup + wg.Add(1) + go func(tablet *cluster.Vttablet) { + defer wg.Done() + _ = tablet.VttabletProcess.TearDown() + _ = tablet.MysqlctlProcess.Stop() + tablet.MysqlctlProcess.CleanupFiles(tablet.TabletUID) + }(tablet) + wg.Wait() + + err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + require.NoError(t, err) + + t.Logf("Deleted tablet: %s", tablet.Alias) +} diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 386ef325996..9386c307a12 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -183,7 +183,9 @@ func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet Alias: fmt.Sprintf("%s-%010d", cell, tabletUID), } // Start Mysqlctl process - tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory, !clusterInstance.ReusingVTDATAROOT) + mysqlctlProcess, err := cluster.MysqlCtlProcessInstanceOptionalInit(tablet.TabletUID, tablet.MySQLPort, clusterInstance.TmpDirectory, !clusterInstance.ReusingVTDATAROOT) + require.Nil(t, err) + tablet.MysqlctlProcess = *mysqlctlProcess proc, err := tablet.MysqlctlProcess.StartProcess() require.Nil(t, err) @@ -214,7 +216,7 @@ func addTablet(t *testing.T, tabletUID int, tabletType string) *cluster.Vttablet serving := tablet.VttabletProcess.WaitForStatus("SERVING", time.Duration(60*time.Second)) assert.Equal(t, serving, true, "Tablet did not become ready within a reasonable time") err = clusterInstance.VtgateProcess.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.%s", - tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1) + tablet.VttabletProcess.Keyspace, tablet.VttabletProcess.Shard, tablet.Type), 1, 30*time.Second) require.Nil(t, err) t.Logf("Added tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtgate/transaction/single/main_test.go b/go/test/endtoend/vtgate/transaction/single/main_test.go index 3d907e91390..ec2dbd6378a 100644 --- a/go/test/endtoend/vtgate/transaction/single/main_test.go +++ b/go/test/endtoend/vtgate/transaction/single/main_test.go @@ -20,15 +20,16 @@ import ( "context" _ "embed" "flag" + "fmt" "os" "testing" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/vtgate/planbuilder" ) var ( @@ -70,6 +71,7 @@ func TestMain(m *testing.M) { } // Start vtgate + clusterInstance.VtGatePlannerVersion = planbuilder.Gen4 clusterInstance.VtGateExtraArgs = []string{"--transaction_mode", "SINGLE"} err = clusterInstance.StartVtgate() if err != nil { @@ -168,12 +170,8 @@ func TestLookupDangleRowLaterMultiDB(t *testing.T) { } func TestLookupDangleRowRecordInSameShard(t *testing.T) { - conn, err := mysql.Connect(context.Background(), &vtParams) - require.NoError(t, err) - defer conn.Close() - defer func() { - utils.Exec(t, conn, `delete from txn_unique_constraints where txn_id = 'txn1'`) - }() + conn, cleanup := setup(t) + defer cleanup() // insert a dangling row in lookup table utils.Exec(t, conn, `INSERT INTO uniqueConstraint_vdx(unique_constraint, keyspace_id) VALUES ('foo', 'J\xda\xf0p\x0e\xcc(\x8fਁ\xa7P\x86\xa5=')`) @@ -190,12 +188,8 @@ func TestLookupDangleRowRecordInSameShard(t *testing.T) { } func TestMultiDbSecondRecordLookupDangle(t *testing.T) { - conn, err := mysql.Connect(context.Background(), &vtParams) - require.NoError(t, err) - defer conn.Close() - defer func() { - utils.Exec(t, conn, `delete from uniqueConstraint_vdx where unique_constraint = 'bar'`) - }() + conn, cleanup := setup(t) + defer cleanup() // insert a dangling row in lookup table utils.Exec(t, conn, `INSERT INTO uniqueConstraint_vdx(unique_constraint, keyspace_id) VALUES ('bar', '\x86\xc8\xc5\x1ac\xfb\x8c+6\xe4\x1f\x03\xd8ϝB')`) @@ -220,3 +214,42 @@ func TestMultiDbSecondRecordLookupDangle(t *testing.T) { // no row should exist. utils.AssertMatches(t, conn, `select txn_id from txn_unique_constraints`, `[]`) } + +// TestNoRecordInTableNotFail test that vindex lookup query creates a transaction on one shard say x. +// To fetch the fields for the actual table, the Select Impossible query should also be reouted to x. +// If it routes to other shard then the test will fail with multi-shard transaction attempted error. +// The fix ensures it does not happen. +func TestNoRecordInTableNotFail(t *testing.T) { + conn, cleanup := setup(t) + defer cleanup() + + utils.AssertMatches(t, conn, `select @@transaction_mode`, `[[VARCHAR("SINGLE")]]`) + // Need to run this test multiple times as shards are picked randomly for Impossible query. + // After the fix it is not random if a shard session already exists then it reuses that same shard session. + for i := 0; i < 100; i++ { + utils.Exec(t, conn, `begin`) + utils.Exec(t, conn, `INSERT INTO t1(id, txn_id) VALUES (1, "t1")`) + utils.Exec(t, conn, `SELECT * FROM t2 WHERE id = 1`) + utils.Exec(t, conn, `rollback`) + } +} + +func setup(t *testing.T) (*mysql.Conn, func()) { + t.Helper() + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + + tables := []string{ + "txn_unique_constraints", "uniqueConstraint_vdx", + "t1", "t1_id_vdx", "t2", "t2_id_vdx", + } + cleanup := func() { + utils.Exec(t, conn, "set transaction_mode=multi") + for _, table := range tables { + utils.Exec(t, conn, fmt.Sprintf("delete from %s /* cleanup */", table)) + } + utils.Exec(t, conn, "set transaction_mode=single") + } + cleanup() + return conn, cleanup +} diff --git a/go/test/endtoend/vtgate/transaction/single/schema.sql b/go/test/endtoend/vtgate/transaction/single/schema.sql index e97ea11e233..3faa0fabcfc 100644 --- a/go/test/endtoend/vtgate/transaction/single/schema.sql +++ b/go/test/endtoend/vtgate/transaction/single/schema.sql @@ -11,4 +11,28 @@ CREATE TABLE uniqueConstraint_vdx( `unique_constraint` VARCHAR(50) NOT NULL, `keyspace_id` VARBINARY(50) NOT NULL, PRIMARY KEY(unique_constraint) -) ENGINE=InnoDB; \ No newline at end of file +) ENGINE=InnoDB; + +CREATE TABLE `t1` ( + `id` bigint(20) NOT NULL, + `txn_id` varchar(50) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +CREATE TABLE `t1_id_vdx` ( + `id` bigint(20) NOT NULL, + `keyspace_id` varbinary(50) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +CREATE TABLE `t2` ( + `id` bigint(20) NOT NULL, + `txn_id` varchar(50) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +CREATE TABLE `t2_id_vdx` ( + `id` bigint(20) NOT NULL, + `keyspace_id` varbinary(50) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/transaction/single/vschema.json b/go/test/endtoend/vtgate/transaction/single/vschema.json index a6156e9ff2f..3eb1ee720f9 100644 --- a/go/test/endtoend/vtgate/transaction/single/vschema.json +++ b/go/test/endtoend/vtgate/transaction/single/vschema.json @@ -13,6 +13,29 @@ "autocommit": "true" }, "owner": "txn_unique_constraints" + }, + "hash_vdx": { + "type": "hash" + }, + "t1_id_vdx": { + "type": "consistent_lookup_unique", + "params": { + "autocommit": "true", + "from": "id", + "table": "t1_id_vdx", + "to": "keyspace_id" + }, + "owner": "t1" + }, + "t2_id_vdx": { + "type": "consistent_lookup_unique", + "params": { + "autocommit": "true", + "from": "id", + "table": "t2_id_vdx", + "to": "keyspace_id" + }, + "owner": "t2" } }, "tables": { @@ -35,6 +58,46 @@ "name": "unicode_loose_md5_vdx" } ] + }, + "t1": { + "columnVindexes": [ + { + "column": "txn_id", + "name": "unicode_loose_md5_vdx" + }, + { + "column": "id", + "name": "t1_id_vdx" + } + ] + }, + "t2": { + "columnVindexes": [ + { + "column": "txn_id", + "name": "unicode_loose_md5_vdx" + }, + { + "column": "id", + "name": "t2_id_vdx" + } + ] + }, + "t1_id_vdx": { + "columnVindexes": [ + { + "column": "id", + "name": "hash_vdx" + } + ] + }, + "t2_id_vdx": { + "columnVindexes": [ + { + "column": "id", + "name": "hash_vdx" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgr/my.cnf b/go/test/endtoend/vtgr/my.cnf deleted file mode 100644 index 14185182e5a..00000000000 --- a/go/test/endtoend/vtgr/my.cnf +++ /dev/null @@ -1,41 +0,0 @@ -[mysqld] -innodb_log_file_size=4GB -innodb_flush_neighbors=0 -innodb_log_buffer_size=67108864 -innodb_buffer_pool_size=96GB -innodb_buffer_pool_instances=16 -innodb_io_capacity=100 - -log_error_verbosity=3 - -# binlog appliers -slave_parallel_type=LOGICAL_CLOCK -slave_preserve_commit_order=1 -binlog_transaction_dependency_tracking=WRITESET_SESSION -slave_parallel_workers=32 -sync_relay_log=0 -relay_log_recovery=1 - -plugin-load-add='mysql_clone.so' -plugin-load-add='group_replication.so' - -gtid_mode=ON -enforce_gtid_consistency=ON -log_slave_updates=ON -binlog_format=ROW - -# Group replication -loose_group_replication_start_on_boot=OFF -loose_group_replication_bootstrap_group=OFF -# use auto-rejoin instead of expel timeout so that we can remove the group member -# loose_group_replication_member_expel_timeout=0 -loose_group_replication_autorejoin_tries=3 -loose_group_replication_exit_state_action=OFFLINE_MODE -loose_group_replication_communication_debug_options='GCS_DEBUG_BASIC,XCOM_DEBUG_BASIC' -loose_group-replication-recovery-retry-count=3 -loose-group_replication_ssl_mode = REQUIRED -loose-group_replication_recovery_use_ssl = 1 -loose-group_replication_ip_whitelist = "0.0.0.0/0" - -# Set multi-primary mode -loose-group_replication_single_primary_mode = ON \ No newline at end of file diff --git a/go/test/endtoend/vtgr/test_config.json b/go/test/endtoend/vtgr/test_config.json deleted file mode 100644 index 03cf0e49701..00000000000 --- a/go/test/endtoend/vtgr/test_config.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "MySQLTopologyUser": "orc_client_user", - "MySQLTopologyPassword": "orc_client_user_password", - "MySQLReplicaUser": "vt_repl", - "MySQLReplicaPassword": "", - "InstancePollSeconds": 1, - "MySQLConnectTimeoutSeconds": 50, - "MySQLTopologyReadTimeoutSeconds": 50 -} diff --git a/go/test/endtoend/vtgr/vtgr_test.go b/go/test/endtoend/vtgr/vtgr_test.go deleted file mode 100644 index fa3f7abe6c6..00000000000 --- a/go/test/endtoend/vtgr/vtgr_test.go +++ /dev/null @@ -1,366 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "testing" - "time" - - "vitess.io/vitess/go/sqltypes" - - "github.com/stretchr/testify/require" - "gotest.tools/assert" - - "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/test/endtoend/cluster" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -// To run this test locally on MacOS, set hostname to localhost first: -// $ sudo scutil --set HostName localhost - -func createCluster(t *testing.T, numReplicas int) *cluster.LocalProcessCluster { - keyspaceName := "ks" - shardName := "0" - keyspace := &cluster.Keyspace{Name: keyspaceName} - shard0 := &cluster.Shard{Name: shardName} - hostname := "localhost" - cell1 := "zone1" - tablets := []*cluster.Vttablet{} - clusterInstance := cluster.NewCluster(cell1, hostname) - - os.Setenv("EXTRA_MY_CNF", path.Join(os.Getenv("PWD"), "my.cnf")) - - // Start topo server - err := clusterInstance.StartTopo() - require.NoError(t, err) - - uidBase := 100 - for i := 0; i < numReplicas; i++ { - tablet := clusterInstance.NewVttabletInstance("replica", uidBase+i, cell1) - tablets = append(tablets, tablet) - } - - // Initialize Cluster - shard0.Vttablets = tablets - err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard0}) - require.NoError(t, err) - - // Start MySql - var mysqlCtlProcessList []*exec.Cmd - for _, tablet := range shard0.Vttablets { - proc, err := tablet.MysqlctlProcess.StartProcess() - require.NoError(t, err) - mysqlCtlProcessList = append(mysqlCtlProcessList, proc) - } - - // Wait for mysql processes to start - for _, proc := range mysqlCtlProcessList { - err := proc.Wait() - require.NoError(t, err) - } - for _, tablet := range shard0.Vttablets { - // Reset status, don't wait for the tablet status. We will check it later - tablet.VttabletProcess.ServingStatus = "" - tablet.VttabletProcess.DbFlavor = "MysqlGR" - // If we enable backup the GR setup is a bit wacky - tablet.VttabletProcess.SupportsBackup = false - // Start the tablet - err := tablet.VttabletProcess.Setup() - require.NoError(t, err) - } - - // Start vtgr - we deploy vtgr on the tablet node in the test - baseGrPort := 33061 - for i, tablet := range shard0.Vttablets { - tablet.VtgrProcess = clusterInstance.NewVtgrProcess( - []string{fmt.Sprintf("%s/%s", keyspaceName, shardName)}, - path.Join(os.Getenv("PWD"), "test_config.json"), - baseGrPort+i, - ) - } - - for _, tablet := range shard0.Vttablets { - err := tablet.VttabletProcess.WaitForTabletTypes([]string{"NOT_SERVING"}) - require.NoError(t, err) - } - return clusterInstance -} - -func killTablets(t *testing.T, shard *cluster.Shard) { - for _, tablet := range shard.Vttablets { - if tablet.VtgrProcess != nil { - err := tablet.VtgrProcess.TearDown() - require.NoError(t, err) - } - err := tablet.VttabletProcess.TearDown() - require.NoError(t, err) - } -} - -func TestBasicSetup(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - _, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - assert.ErrorContains(t, err, "timeout looking for primary tablet") - - tablet1 := shard0.Vttablets[0] - query := `select count(*) - from performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - var count int - err = getSQLResult(t, tablet1, query, func(values []sqltypes.Value) bool { - cnt, err := values[0].ToInt64() - if err != nil { - return false - } - count = int(cnt) - return true - }) - require.NoError(t, err) - require.NoError(t, err) - // without vtgr, tablet process will not create a mysql group - // and all the nodes are replicas type in NOT_SERVING state - assert.Equal(t, 0, int(count)) -} - -func TestVTGRSetup(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - - // VTGR will pick one tablet as the primary - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - require.NotEqual(t, nil, primaryAlias) - - tablet1 := shard0.Vttablets[0] - query := `select count(*) - from performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - err = getSQLResult(t, tablet1, query, func(values []sqltypes.Value) bool { - cnt, err := values[0].ToInt64() - if err != nil { - return false - } - // VTGR should bootstrap the group and put the replica into the group - return cnt == 2 - }) - require.NoError(t, err) -} - -func TestVTGRWrongPrimaryTablet(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 2) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - // VTGR will pick one tablet as the primary - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - require.NotEqual(t, nil, primaryAlias) - tablet := shard0.Vttablets[0] - query := `select member_id - from performance_schema.replication_group_members - where member_role='SECONDARY' and member_state='ONLINE'` - var member string - err = getSQLResult(t, tablet, query, func(values []sqltypes.Value) bool { - member = values[0].ToString() - return true - }) - require.NoError(t, err) - query = fmt.Sprintf(`select group_replication_set_as_primary('%s')`, member) - _, err = tablet.VttabletProcess.QueryTabletWithDB(query, "") - require.NoError(t, err) - - // Verify the mysql primary changed, and also the primary tablet changed as well - query = fmt.Sprintf(`select member_role from performance_schema.replication_group_members where member_id='%s'`, member) - err = getSQLResult(t, tablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "PRIMARY" - }) - require.NoError(t, err) - err = verifyPrimaryChange(t, clusterInstance, keyspace.Name, shard0.Name, primaryAlias) - require.NoError(t, err) -} - -func TestVTGRFailover(t *testing.T) { - defer cluster.PanicHandler(t) - clusterInstance := createCluster(t, 3) - keyspace := &clusterInstance.Keyspaces[0] - shard0 := &keyspace.Shards[0] - defer func() { - clusterInstance.Teardown() - killTablets(t, shard0) - }() - for _, tablet := range shard0.Vttablets { - // Until there is a primary, all tablets are replica and should all be NOT_SERVING status - tab := getTablet(t, clusterInstance, tablet.Alias) - assert.Equal(t, tab.Type.String(), "REPLICA") - assert.Equal(t, tablet.VttabletProcess.GetTabletStatus(), "NOT_SERVING") - } - // start VTGR processes - for _, tablet := range shard0.Vttablets { - err := tablet.VtgrProcess.Start(tablet.Alias) - require.NoError(t, err) - } - primaryAlias, err := getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - // VTGR has init the cluster - require.NotEqual(t, "", primaryAlias) - primaryTablet := findTabletByAlias(shard0.Vttablets, primaryAlias) - require.NotNil(t, primaryTablet) - // Wait until there are two nodes in the group - query := `select count(*) from - performance_schema.replication_group_members - where MEMBER_STATE='ONLINE'` - err = getSQLResult(t, primaryTablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "3" - }) - require.NoError(t, err) - - // Now kill the primary - // VTGR should move mysql primary to a different node and change failover primary tablet - err = primaryTablet.VttabletProcess.TearDown() - require.NoError(t, err) - err = verifyPrimaryChange(t, clusterInstance, keyspace.Name, shard0.Name, primaryAlias) - require.NoError(t, err) - // now the primary has changed - primaryAlias, err = getPrimaryTablet(t, clusterInstance, keyspace.Name, shard0.Name) - require.NoError(t, err) - // verify on the _new_ primary node, we are running the mysql primary as well - primaryTablet = findTabletByAlias(shard0.Vttablets, primaryAlias) - require.NotNil(t, primaryTablet) - query = `SELECT count(*) FROM - performance_schema.replication_group_members - WHERE MEMBER_STATE='ONLINE' AND MEMBER_ROLE='PRIMARY' AND MEMBER_PORT=@@port` - err = getSQLResult(t, primaryTablet, query, func(values []sqltypes.Value) bool { - return values[0].ToString() == "1" - }) - require.NoError(t, err) -} - -func getTablet(t *testing.T, cluster *cluster.LocalProcessCluster, alias string) *topodatapb.Tablet { - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", alias) - require.NoError(t, err) - var tabletInfo *topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) - require.NoError(t, err) - return tabletInfo -} - -func findTabletByAlias(tablets []*cluster.Vttablet, alias *topodatapb.TabletAlias) *cluster.Vttablet { - for _, tablet := range tablets { - if tablet.Cell == alias.Cell && strings.HasSuffix(tablet.Alias, strconv.Itoa(int(alias.Uid))) { - return tablet - } - } - return nil -} - -func verifyPrimaryChange(t *testing.T, cluster *cluster.LocalProcessCluster, ks, shard string, old *topodatapb.TabletAlias) error { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", ks, shard)) - require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - require.NoError(t, err) - if shardInfo.PrimaryAlias.String() != old.String() { - return nil - } - } - return fmt.Errorf("fail to verify primary change") -} - -func getPrimaryTablet(t *testing.T, cluster *cluster.LocalProcessCluster, ks, shard string) (*topodatapb.TabletAlias, error) { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", ks, shard)) - require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - require.NoError(t, err) - if shardInfo.PrimaryAlias != nil { - return shardInfo.PrimaryAlias, nil - } - } - return nil, fmt.Errorf("timeout looking for primary tablet") -} - -func getSQLResult(t *testing.T, tablet *cluster.Vttablet, query string, check func([]sqltypes.Value) bool) error { - timeToWait := time.Now().Add(180 * time.Second) - for time.Now().Before(timeToWait) { - time.Sleep(1 * time.Second) - qr, err := tablet.VttabletProcess.QueryTabletWithDB(query, "") - require.NoError(t, err) - if len(qr.Rows) == 1 && check(qr.Rows[0]) { - return nil - } - } - return fmt.Errorf("timeout waiting for sql result") -} diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 4885a67aa9c..7dd5c50eefa 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -17,7 +17,10 @@ limitations under the License. package api import ( + "encoding/json" "fmt" + "math" + "reflect" "testing" "time" @@ -28,17 +31,27 @@ import ( "vitess.io/vitess/go/test/endtoend/vtorc/utils" ) -// make an api call to /api/problems endpoint -// and verify the output -func TestProblemsAPI(t *testing.T) { +// TestAPIEndpoints tests the various API endpoints that VTOrc offers. +func TestAPIEndpoints(t *testing.T) { defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, RecoveryPeriodBlockSeconds: 5, + // The default topo refresh time is 3 seconds. We are intentionally making it slower for the test, so that we have time to verify + // the /debug/health output before and after the first refresh runs. + TopologyRefreshSeconds: 10, }, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] + // Call API with retry to ensure VTOrc is up + status, resp := utils.MakeAPICallRetry(t, vtorc, "/debug/health", func(code int, response string) bool { + return code == 0 + }) + // When VTOrc is up and hasn't run the topo-refresh, is should be healthy but HasDiscovered should be false. + assert.Equal(t, 500, status) + assert.Contains(t, resp, `"Healthy": true,`) + assert.Contains(t, resp, `"DiscoveredOnce": false`) // find primary from topo primary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) @@ -63,26 +76,30 @@ func TestProblemsAPI(t *testing.T) { t.Run("Health API", func(t *testing.T) { // Check that VTOrc is healthy - status, resp := utils.MakeAPICall(t, vtorc, "/debug/health") + status, resp, err := utils.MakeAPICall(t, vtorc, "/debug/health") + require.NoError(t, err) assert.Equal(t, 200, status) assert.Contains(t, resp, `"Healthy": true,`) + assert.Contains(t, resp, `"DiscoveredOnce": true`) }) t.Run("Liveness API", func(t *testing.T) { // Check that VTOrc is live - status, resp := utils.MakeAPICall(t, vtorc, "/debug/liveness") + status, resp, err := utils.MakeAPICall(t, vtorc, "/debug/liveness") + require.NoError(t, err) assert.Equal(t, 200, status) assert.Empty(t, resp) }) // Before we disable recoveries, let us wait until VTOrc has fixed all the issues (if any). _, _ = utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { - return response != "[]" + return response != "null" }) t.Run("Disable Recoveries API", func(t *testing.T) { // Disable recoveries of VTOrc - status, resp := utils.MakeAPICall(t, vtorc, "/api/disable-global-recoveries") + status, resp, err := utils.MakeAPICall(t, vtorc, "/api/disable-global-recoveries") + require.NoError(t, err) assert.Equal(t, 200, status) assert.Equal(t, "Global recoveries disabled\n", resp) }) @@ -96,36 +113,41 @@ func TestProblemsAPI(t *testing.T) { // Wait until VTOrc picks up on this issue and verify // that we see a not null result on the api/replication-analysis page status, resp := utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { - return response == "[]" + return response == "null" }) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) assert.Contains(t, resp, `"Analysis": "ReplicationStopped"`) // Verify that filtering also works in the API as intended - status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=0") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=0") + require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) // Verify that filtering by keyspace also works in the API as intended - status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks") + require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Contains(t, resp, fmt.Sprintf(`"Port": %d`, replica.MySQLPort)) + assert.Contains(t, resp, fmt.Sprintf(`"AnalyzedInstanceAlias": "%s"`, replica.Alias)) // Check that filtering using keyspace and shard works - status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=80-") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?keyspace=ks&shard=80-") + require.NoError(t, err) assert.Equal(t, 200, status, resp) - assert.Equal(t, "[]", resp) + assert.Equal(t, "null", resp) // Check that filtering using just the shard fails - status, resp = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?shard=0") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/replication-analysis?shard=0") + require.NoError(t, err) assert.Equal(t, 400, status, resp) assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp) }) t.Run("Enable Recoveries API", func(t *testing.T) { // Enable recoveries of VTOrc - status, resp := utils.MakeAPICall(t, vtorc, "/api/enable-global-recoveries") + status, resp, err := utils.MakeAPICall(t, vtorc, "/api/enable-global-recoveries") + require.NoError(t, err) assert.Equal(t, 200, status) assert.Equal(t, "Global recoveries enabled\n", resp) @@ -156,23 +178,82 @@ func TestProblemsAPI(t *testing.T) { assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) // Check that filtering using keyspace and shard works - status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=0") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=0") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace and shard works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=80-") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Equal(t, "null", resp) + + // Check that filtering using just the shard fails + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/problems?shard=0") + require.NoError(t, err) + assert.Equal(t, 400, status, resp) + assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp) + + // Also verify that we see the tablet in the errant GTIDs API call + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids") + require.NoError(t, err) + assert.Equal(t, 200, status, resp) + assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) + + // Check that filtering using keyspace and shard works + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks&shard=0") + require.NoError(t, err) assert.Equal(t, 200, status, resp) assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) // Check that filtering using keyspace works - status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks") + require.NoError(t, err) assert.Equal(t, 200, status, resp) assert.Contains(t, resp, fmt.Sprintf(`"InstanceAlias": "%v"`, replica.Alias)) // Check that filtering using keyspace and shard works - status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?keyspace=ks&shard=80-") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?keyspace=ks&shard=80-") + require.NoError(t, err) assert.Equal(t, 200, status, resp) assert.Equal(t, "null", resp) // Check that filtering using just the shard fails - status, resp = utils.MakeAPICall(t, vtorc, "/api/problems?shard=0") + status, resp, err = utils.MakeAPICall(t, vtorc, "/api/errant-gtids?shard=0") + require.NoError(t, err) assert.Equal(t, 400, status, resp) assert.Equal(t, "Filtering by shard without keyspace isn't supported\n", resp) + + // Also verify that the metric for errant GTIDs is reporting the correct count. + waitForErrantGTIDCount(t, vtorc, 1) }) } + +func waitForErrantGTIDCount(t *testing.T, vtorc *cluster.VTOrcProcess, errantGTIDCountWanted int) { + timeout := time.After(15 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("Timed out waiting for errant gtid count in the metrics to be %v", errantGTIDCountWanted) + return + default: + _, resp, err := utils.MakeAPICall(t, vtorc, "/debug/vars") + require.NoError(t, err) + resultMap := make(map[string]any) + err = json.Unmarshal([]byte(resp), &resultMap) + require.NoError(t, err) + errantGTIDTabletsCount := reflect.ValueOf(resultMap["ErrantGtidTabletCount"]) + if int(math.Round(errantGTIDTabletsCount.Float())) == errantGTIDCountWanted { + return + } + time.Sleep(100 * time.Millisecond) + } + } +} diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 4254606dd94..8013a8fb98b 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -37,6 +37,7 @@ import ( // verify replication is setup // verify that with multiple vtorc instances, we still only have 1 PlannedReparentShard call func TestPrimaryElection(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -64,6 +65,7 @@ func TestPrimaryElection(t *testing.T) { // verify rdonly is not elected, only replica // verify replication is setup func TestSingleKeyspace(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -81,6 +83,7 @@ func TestSingleKeyspace(t *testing.T) { // verify rdonly is not elected, only replica // verify replication is setup func TestKeyspaceShard(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, []string{"--clusters_to_watch", "ks/0"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -100,8 +103,9 @@ func TestKeyspaceShard(t *testing.T) { // 4. setup replication from non-primary, let vtorc repair // 5. make instance A replicates from B and B from A, wait for repair func TestVTOrcRepairs(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, nil, cluster.VTOrcConfiguration{ + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 0, []string{"--change-tablets-with-errant-gtid-to-drained"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] @@ -211,11 +215,21 @@ func TestVTOrcRepairs(t *testing.T) { // check that the writes still succeed utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 10*time.Second) }) + + t.Run("Errant GTID Detected", func(t *testing.T) { + // insert an errant GTID in the replica + _, err := utils.RunSQL(t, "insert into vt_insert_test(id, msg) values (10173, 'test 178342')", replica, "vt_ks") + require.NoError(t, err) + // When VTOrc detects errant GTIDs, it should change the tablet to a drained type. + utils.WaitForTabletType(t, replica, "drained") + }) + } func TestRepairAfterTER(t *testing.T) { // test fails intermittently on CI, skip until it can be fixed. t.SkipNow() + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -252,6 +266,7 @@ func TestSemiSync(t *testing.T) { // stop any vtorc instance running due to a previous test. utils.StopVTOrcs(t, clusterInfo) newCluster := utils.SetupNewClusterSemiSync(t) + defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance) utils.StartVTOrcs(t, newCluster, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1) @@ -316,6 +331,7 @@ func TestSemiSync(t *testing.T) { // TestVTOrcWithPrs tests that VTOrc works fine even when PRS is called from vtctld func TestVTOrcWithPrs(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -364,6 +380,7 @@ func TestVTOrcWithPrs(t *testing.T) { // TestMultipleDurabilities tests that VTOrc works with 2 keyspaces having 2 different durability policies func TestMultipleDurabilities(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) // Setup a normal cluster and start vtorc utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{}, 1, "") @@ -388,6 +405,7 @@ func TestDurabilityPolicySetLater(t *testing.T) { // stop any vtorc instance running due to a previous test. utils.StopVTOrcs(t, clusterInfo) newCluster := utils.SetupNewClusterSemiSync(t) + defer utils.PrintVTOrcLogsOnFailure(t, newCluster.ClusterInstance) keyspace := &newCluster.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] // Before starting VTOrc we explicity want to set the durability policy of the keyspace to an empty string @@ -421,7 +439,7 @@ func TestDurabilityPolicySetLater(t *testing.T) { time.Sleep(30 * time.Second) // Now set the correct durability policy - out, err := newCluster.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspace.Name, "--durability-policy=semi_sync") + out, err := newCluster.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspace.Name, "--durability-policy=semi_sync") require.NoError(t, err, out) // VTOrc should promote a new primary after seeing the durability policy change diff --git a/go/test/endtoend/vtorc/primaryfailure/main_test.go b/go/test/endtoend/vtorc/primaryfailure/main_test.go index 7d9c57b6b22..a3e50bd0cc9 100644 --- a/go/test/endtoend/vtorc/primaryfailure/main_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/main_test.go @@ -32,7 +32,7 @@ func TestMain(m *testing.M) { var cellInfos []*utils.CellInfo cellInfos = append(cellInfos, &utils.CellInfo{ CellName: utils.Cell1, - NumReplicas: 12, + NumReplicas: 13, NumRdonly: 3, UIDBase: 100, }) diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 8e91028926c..a93c2423f47 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -17,6 +17,12 @@ limitations under the License. package primaryfailure import ( + "bufio" + "fmt" + "os" + "path" + "regexp" + "strings" "testing" "time" @@ -32,8 +38,12 @@ import ( // covers the test case master-failover from orchestrator // Also tests that VTOrc can handle multiple failures, if the durability policies allow it func TestDownPrimary(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) - utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{ + // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. + // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. + // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, }, 1, "semi_sync") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] @@ -63,12 +73,15 @@ func TestDownPrimary(t *testing.T) { // check that the replication is setup correctly before we failover utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica, crossCellReplica}, 10*time.Second) - + // since all tablets are up and running, InstancePollSecondsExceeded should have `0` zero value + utils.WaitForInstancePollSecondsExceededCount(t, vtOrcProcess, "InstancePollSecondsExceeded", 0, true) // Make the rdonly vttablet unavailable err := rdonly.VttabletProcess.TearDown() require.NoError(t, err) err = rdonly.MysqlctlProcess.Stop() require.NoError(t, err) + // We have bunch of Vttablets down. Therefore we expect at least 1 occurrence of InstancePollSecondsExceeded + utils.WaitForInstancePollSecondsExceededCount(t, vtOrcProcess, "InstancePollSecondsExceeded", 1, false) // Make the current primary vttablet unavailable. err = curPrimary.VttabletProcess.TearDown() require.NoError(t, err) @@ -82,14 +95,217 @@ func TestDownPrimary(t *testing.T) { // check that the replica gets promoted utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + // also check that the replication is working correctly after failover utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second) utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) } +// bring down primary before VTOrc has started, let vtorc repair. +func TestDownPrimaryBeforeVTOrc(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{}, 0, "none") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + curPrimary := shard0.Vttablets[0] + + // Promote the first tablet as the primary + err := clusterInfo.ClusterInstance.VtctlclientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) + require.NoError(t, err) + + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica}, 10*time.Second) + + // Make the current primary vttablet unavailable. + _ = curPrimary.VttabletProcess.TearDown() + err = curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + + // Start a VTOrc instance + utils.StartVTOrcs(t, clusterInfo, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{ + PreventCrossDataCenterPrimaryFailover: true, + }, 1) + + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + defer func() { + // we remove the tablet from our global list + utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) + }() + + // check that the replica gets promoted + utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + + // also check that the replication is working correctly after failover + utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) +} + +// delete the primary record and let vtorc repair. +func TestDeletedPrimaryTablet(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s"}, cluster.VTOrcConfiguration{}, 1, "none") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // check that the replication is setup correctly before we failover + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, rdonly}, 10*time.Second) + + // Disable VTOrc recoveries + vtOrcProcess.DisableGlobalRecoveries(t) + // use vtctlclient to stop replication on the replica + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + require.NoError(t, err) + // insert a write that is not available on the replica. + utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly}, 10*time.Second) + + // Make the current primary vttablet unavailable and delete its tablet record. + _ = curPrimary.VttabletProcess.TearDown() + err = curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + // use vtctlclient to start replication on the replica back + _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StartReplication", replica.Alias) + require.NoError(t, err) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", curPrimary.Alias) + require.NoError(t, err) + // Enable VTOrc recoveries now + vtOrcProcess.EnableGlobalRecoveries(t) + + defer func() { + // we remove the tablet from our global list + utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) + }() + + // check that the replica gets promoted. Also verify that it has all the writes. + utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + utils.CheckTabletUptoDate(t, clusterInfo, replica) + + // also check that the replication is working correctly after failover + utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{rdonly}, 10*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverPrimaryTabletDeletedRecoveryName, 1) +} + +// TestDeadPrimaryRecoversImmediately test Vtorc ability to recover immediately if primary is dead. +// Reason is, unlike other recoveries, in DeadPrimary we don't call DiscoverInstance since we know +// that primary is unreachable. This help us save few seconds depending on value of `RemoteOperationTimeout` flag. +func TestDeadPrimaryRecoversImmediately(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + // We specify the --wait-replicas-timeout to a small value because we spawn a cross-cell replica later in the test. + // If that replica is more advanced than the same-cell-replica, then we try to promote the cross-cell replica as an intermediate source. + // If we don't specify a small value of --wait-replicas-timeout, then we would end up waiting for 30 seconds for the dead-primary to respond, failing this test. + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, []string{"--remote_operation_timeout=10s", "--wait-replicas-timeout=5s"}, cluster.VTOrcConfiguration{ + PreventCrossDataCenterPrimaryFailover: true, + }, 1, "semi_sync") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + + // find the replica and rdonly tablets + var replica, rdonly *cluster.Vttablet + for _, tablet := range shard0.Vttablets { + // we know we have only two replcia tablets, so the one not the primary must be the other replica + if tablet.Alias != curPrimary.Alias && tablet.Type == "replica" { + replica = tablet + } + if tablet.Type == "rdonly" { + rdonly = tablet + } + } + assert.NotNil(t, replica, "could not find replica tablet") + assert.NotNil(t, rdonly, "could not find rdonly tablet") + + // Start a cross-cell replica + crossCellReplica := utils.StartVttablet(t, clusterInfo, utils.Cell2, false) + + // check that the replication is setup correctly before we failover + utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, replica, crossCellReplica}, 10*time.Second) + + // Make the current primary vttablet unavailable. + curPrimary.VttabletProcess.Kill() + err := curPrimary.MysqlctlProcess.Stop() + require.NoError(t, err) + defer func() { + // we remove the tablet from our global list + utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) + }() + + // check that the replica gets promoted + utils.CheckPrimaryTablet(t, clusterInfo, replica, true) + utils.WaitForInstancePollSecondsExceededCount(t, vtOrcProcess, "InstancePollSecondsExceeded", 2, false) + // also check that the replication is working correctly after failover + utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica}, 10*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.RecoverDeadPrimaryRecoveryName, 1) + + // Parse log file and find out how much time it took for DeadPrimary to recover. + logFile := path.Join(vtOrcProcess.LogDir, vtOrcProcess.LogFileName) + // log prefix printed at the end of analysis where we conclude we have DeadPrimary + t1 := extractTimeFromLog(t, logFile, "Proceeding with DeadPrimary recovery") + // log prefix printed at the end of recovery + t2 := extractTimeFromLog(t, logFile, "auditType:RecoverDeadPrimary") + curr := time.Now().Format("2006-01-02") + timeLayout := "2006-01-02 15:04:05.000000" + timeStr1 := fmt.Sprintf("%s %s", curr, t1) + timeStr2 := fmt.Sprintf("%s %s", curr, t2) + time1, err := time.Parse(timeLayout, timeStr1) + if err != nil { + t.Errorf("unable to parse time %s", err.Error()) + } + time2, err := time.Parse(timeLayout, timeStr2) + if err != nil { + t.Errorf("unable to parse time %s", err.Error()) + } + diff := time2.Sub(time1) + fmt.Printf("The difference between %s and %s is %v seconds.\n", t1, t2, diff.Seconds()) + // assert that it takes less than `remote_operation_timeout` to recover from `DeadPrimary` + // use the value provided in `remote_operation_timeout` flag to compare with. + // We are testing against 9.5 seconds to be safe and prevent flakiness. + assert.Less(t, diff.Seconds(), 9.5) +} + // Failover should not be cross data centers, according to the configuration file // covers part of the test case master-failover-lost-replicas from orchestrator func TestCrossDataCenterFailure(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -135,6 +351,7 @@ func TestCrossDataCenterFailure(t *testing.T) { // Failover should not be cross data centers, according to the configuration file // In case of no viable candidates, we should error out func TestCrossDataCenterFailureError(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 1, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -181,6 +398,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // Earlier any replicas that were not able to replicate from the previous primary // were detected by vtorc and could be configured to have their sources detached t.Skip() + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 2, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, @@ -262,6 +480,7 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // This test checks that the promotion of a tablet succeeds if it passes the promotion lag test // covers the test case master-failover-fail-promotion-lag-minutes-success from orchestrator func TestPromotionLagSuccess(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 59", @@ -311,6 +530,7 @@ func TestPromotionLagFailure(t *testing.T) { // Earlier vtorc used to check that the promotion lag between the new primary and the old one // was smaller than the configured value, otherwise it would fail the promotion t.Skip() + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 3, 1, nil, cluster.VTOrcConfiguration{ ReplicationLagQuery: "select 61", @@ -363,6 +583,7 @@ func TestPromotionLagFailure(t *testing.T) { // We explicitly set one of the replicas to Prefer promotion rule. // That is the replica which should be promoted in case of primary failure func TestDownPrimaryPromotionRule(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, @@ -410,6 +631,7 @@ func TestDownPrimaryPromotionRule(t *testing.T) { // That is the replica which should be promoted in case of primary failure // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, @@ -489,6 +711,7 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // We let a replica in our own cell lag. That is the replica which should be promoted in case of primary failure // It should also be caught up when it is promoted func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) defer cluster.PanicHandler(t) utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ LockShardTimeoutSeconds: 5, @@ -563,3 +786,29 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { // check that rdonly and crossCellReplica are able to replicate from the replica utils.VerifyWritesSucceed(t, clusterInfo, replica, []*cluster.Vttablet{crossCellReplica, rdonly}, 15*time.Second) } + +func extractTimeFromLog(t *testing.T, logFile string, logStatement string) string { + file, err := os.Open(logFile) + if err != nil { + t.Errorf("fail to extract time from log statement %s", err.Error()) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, logStatement) { + // Regular expression pattern for date format + pattern := `\d{2}:\d{2}:\d{2}\.\d{6}` + re := regexp.MustCompile(pattern) + match := re.FindString(line) + return match + } + } + + if err := scanner.Err(); err != nil { + t.Errorf("fail to extract time from log statement %s", err.Error()) + } + return "" +} diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go index fbc29600e98..e3b55d64c6b 100644 --- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go +++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/inst" + "vitess.io/vitess/go/vt/vtorc/logic" "vitess.io/vitess/go/vt/vtorc/server" _ "github.com/go-sql-driver/mysql" @@ -40,6 +41,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { defer func() { clusterInfo.ClusterInstance.Teardown() }() + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] oldArgs := os.Args @@ -70,12 +72,11 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { } } - primaryInstance, err := inst.ReadTopologyInstanceBufferable(&inst.InstanceKey{ - Hostname: utils.Hostname, - Port: primary.MySQLPort, - }, nil) + primaryInstance, err := inst.ReadTopologyInstanceBufferable(primary.Alias, nil) require.NoError(t, err) require.NotNil(t, primaryInstance) + assert.Equal(t, utils.Hostname, primaryInstance.Hostname) + assert.Equal(t, primary.MySQLPort, primaryInstance.Port) assert.Contains(t, primaryInstance.InstanceAlias, "zone1") assert.NotEqual(t, 0, primaryInstance.ServerID) assert.Greater(t, len(primaryInstance.ServerUUID), 10) @@ -104,16 +105,26 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, primaryInstance.ReplicationIOThreadState, inst.ReplicationThreadStateNoThread) assert.Equal(t, primaryInstance.ReplicationSQLThreadState, inst.ReplicationThreadStateNoThread) - // insert an errant GTID in the replica - _, err = utils.RunSQL(t, "insert into vt_insert_test(id, msg) values (10173, 'test 178342')", replica, "vt_ks") + // Insert an errant GTID in the replica. + // The way to do this is to disable global recoveries, stop replication and inject an errant GTID. + // After this we restart the replication and enable the recoveries again. + err = logic.DisableRecovery() + require.NoError(t, err) + err = utils.RunSQLs(t, []string{`STOP SLAVE;`, + `SET GTID_NEXT="12345678-1234-1234-1234-123456789012:1";`, + `BEGIN;`, `COMMIT;`, + `SET GTID_NEXT="AUTOMATIC";`, + `START SLAVE;`, + }, replica, "") + require.NoError(t, err) + err = logic.EnableRecovery() require.NoError(t, err) - replicaInstance, err := inst.ReadTopologyInstanceBufferable(&inst.InstanceKey{ - Hostname: utils.Hostname, - Port: replica.MySQLPort, - }, nil) + replicaInstance, err := inst.ReadTopologyInstanceBufferable(replica.Alias, nil) require.NoError(t, err) require.NotNil(t, replicaInstance) + assert.Equal(t, utils.Hostname, replicaInstance.Hostname) + assert.Equal(t, replica.MySQLPort, replicaInstance.Port) assert.Contains(t, replicaInstance.InstanceAlias, "zone1") assert.NotEqual(t, 0, replicaInstance.ServerID) assert.Greater(t, len(replicaInstance.ServerUUID), 10) @@ -125,6 +136,8 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, "ROW", replicaInstance.BinlogFormat) assert.Equal(t, "ON", replicaInstance.GTIDMode) assert.Equal(t, "FULL", replicaInstance.BinlogRowImage) + assert.Equal(t, utils.Hostname, replicaInstance.SourceHost) + assert.Equal(t, primary.MySQLPort, replicaInstance.SourcePort) assert.Contains(t, replicaInstance.SelfBinlogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-bin", replica.TabletUID)) assert.Greater(t, replicaInstance.SelfBinlogCoordinates.LogPos, uint32(0)) assert.False(t, replicaInstance.SemiSyncPrimaryEnabled) @@ -146,7 +159,7 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.Equal(t, replicaInstance.ReadBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile) assert.Greater(t, replicaInstance.ReadBinlogCoordinates.LogPos, uint32(0)) assert.Equal(t, replicaInstance.ExecBinlogCoordinates.LogFile, primaryInstance.SelfBinlogCoordinates.LogFile) - assert.LessOrEqual(t, replicaInstance.ExecBinlogCoordinates.LogPos, replicaInstance.ReadBinlogCoordinates.LogPos) + assert.Greater(t, replicaInstance.ExecBinlogCoordinates.LogPos, uint32(0)) assert.Contains(t, replicaInstance.RelaylogCoordinates.LogFile, fmt.Sprintf("vt-0000000%d-relay", replica.TabletUID)) assert.Greater(t, replicaInstance.RelaylogCoordinates.LogPos, uint32(0)) assert.Empty(t, replicaInstance.LastIOError) diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index d4f23c0de70..de48b8f4781 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -18,10 +18,12 @@ package utils import ( "context" + "encoding/json" "fmt" "os" "os/exec" "path" + "strconv" "strings" "testing" "time" @@ -41,7 +43,6 @@ import ( // Register topo implementations. _ "vitess.io/vitess/go/vt/topo/consultopo" _ "vitess.io/vitess/go/vt/topo/etcd2topo" - _ "vitess.io/vitess/go/vt/topo/k8stopo" _ "vitess.io/vitess/go/vt/topo/zk2topo" ) @@ -66,11 +67,10 @@ type CellInfo struct { // VTOrcClusterInfo stores the information for a cluster. This is supposed to be used only for VTOrc tests. type VTOrcClusterInfo struct { - ClusterInstance *cluster.LocalProcessCluster - Ts *topo.Server - CellInfos []*CellInfo - VtctldClientProcess *cluster.VtctldClientProcess - lastUsedValue int + ClusterInstance *cluster.LocalProcessCluster + Ts *topo.Server + CellInfos []*CellInfo + lastUsedValue int } // CreateClusterAndStartTopo starts the cluster and topology service @@ -99,17 +99,13 @@ func CreateClusterAndStartTopo(cellInfos []*CellInfo) (*VTOrcClusterInfo, error) return nil, err } - // store the vtctldclient process - vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory) - // create topo server connection ts, err := topo.OpenServer(*clusterInstance.TopoFlavorString(), clusterInstance.VtctlProcess.TopoGlobalAddress, clusterInstance.VtctlProcess.TopoGlobalRoot) return &VTOrcClusterInfo{ - ClusterInstance: clusterInstance, - Ts: ts, - CellInfos: cellInfos, - lastUsedValue: 100, - VtctldClientProcess: vtctldClientProcess, + ClusterInstance: clusterInstance, + Ts: ts, + CellInfos: cellInfos, + lastUsedValue: 100, }, err } @@ -207,10 +203,8 @@ func shutdownVttablets(clusterInfo *VTOrcClusterInfo) error { } // Remove the tablet record for this tablet } - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) - if err != nil { - return err - } + // Ignoring error here because some tests delete tablets themselves. + _ = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) } clusterInfo.ClusterInstance.Keyspaces[0].Shards[0].Vttablets = nil return nil @@ -306,8 +300,15 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep if durability == "" { durability = "none" } - out, err := clusterInfo.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) + out, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, fmt.Sprintf("--durability-policy=%s", durability)) require.NoError(t, err, out) + // VTOrc now uses shard record too, so we need to clear that as well for correct testing. + _, err = clusterInfo.Ts.UpdateShardFields(context.Background(), keyspaceName, shardName, func(info *topo.ShardInfo) error { + info.PrimaryTermStartTime = nil + info.PrimaryAlias = nil + return nil + }) + require.NoError(t, err) // start vtorc StartVTOrcs(t, clusterInfo, orcExtraArgs, config, vtorcCount) @@ -316,7 +317,7 @@ func SetupVttabletsAndVTOrcs(t *testing.T, clusterInfo *VTOrcClusterInfo, numRep // cleanAndStartVttablet cleans the MySQL instance underneath for running a new test. It also starts the vttablet. func cleanAndStartVttablet(t *testing.T, clusterInfo *VTOrcClusterInfo, vttablet *cluster.Vttablet) { t.Helper() - // set super-read-only to false + // set super_read_only to false _, err := RunSQL(t, "SET GLOBAL super_read_only = OFF", vttablet, "") require.NoError(t, err) // remove the databases if they exist @@ -434,8 +435,8 @@ func CheckReplication(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *clus time.Sleep(100 * time.Millisecond) break } - confirmReplication(t, primary, replicas, time.Until(endTime), clusterInfo.lastUsedValue) clusterInfo.lastUsedValue++ + confirmReplication(t, primary, replicas, time.Until(endTime), clusterInfo.lastUsedValue) validateTopology(t, clusterInfo, true, time.Until(endTime)) return } @@ -446,8 +447,8 @@ func CheckReplication(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *clus // Call this function only after CheckReplication has been executed once, since that function creates the table that this function uses. func VerifyWritesSucceed(t *testing.T, clusterInfo *VTOrcClusterInfo, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration) { t.Helper() - confirmReplication(t, primary, replicas, timeToWait, clusterInfo.lastUsedValue) clusterInfo.lastUsedValue++ + confirmReplication(t, primary, replicas, timeToWait, clusterInfo.lastUsedValue) } func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet, timeToWait time.Duration, valueToInsert int) { @@ -482,6 +483,12 @@ func confirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu } } +// CheckTabletUptoDate verifies that the tablet has all the writes so far +func CheckTabletUptoDate(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *cluster.Vttablet) { + err := checkInsertedValues(t, tablet, clusterInfo.lastUsedValue) + require.NoError(t, err) +} + func checkInsertedValues(t *testing.T, tablet *cluster.Vttablet, index int) error { selectSQL := fmt.Sprintf("select msg from vt_ks.vt_insert_test where id=%d", index) qr, err := RunSQL(t, selectSQL, tablet, "") @@ -585,6 +592,26 @@ func RunSQL(t *testing.T, sql string, tablet *cluster.Vttablet, db string) (*sql return execute(t, conn, sql) } +// RunSQLs is used to run a list of SQL statements on the given tablet +func RunSQLs(t *testing.T, sqls []string, tablet *cluster.Vttablet, db string) error { + // Get Connection + tabletParams := getMysqlConnParam(tablet, db) + var timeoutDuration = time.Duration(5 * len(sqls)) + ctx, cancel := context.WithTimeout(context.Background(), timeoutDuration*time.Second) + defer cancel() + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + + // Run SQLs + for _, sql := range sqls { + if _, err := execute(t, conn, sql); err != nil { + return err + } + } + return nil +} + func execute(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) { t.Helper() return conn.ExecuteFetch(query, 1000, true) @@ -717,12 +744,10 @@ func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vt } // MakeAPICall is used make an API call given the url. It returns the status and the body of the response received -func MakeAPICall(t *testing.T, vtorc *cluster.VTOrcProcess, url string) (status int, response string) { +func MakeAPICall(t *testing.T, vtorc *cluster.VTOrcProcess, url string) (status int, response string, err error) { t.Helper() - var err error status, response, err = vtorc.MakeAPICall(url) - require.NoError(t, err) - return status, response + return status, response, err } // MakeAPICallRetry is used to make an API call and retry on the given condition. @@ -736,7 +761,7 @@ func MakeAPICallRetry(t *testing.T, vtorc *cluster.VTOrcProcess, url string, ret t.Fatal("timed out waiting for api to work") return default: - status, response = MakeAPICall(t, vtorc, url) + status, response, _ := MakeAPICall(t, vtorc, url) if retry(status, response) { time.Sleep(1 * time.Second) break @@ -810,20 +835,17 @@ func SetupNewClusterSemiSync(t *testing.T) *VTOrcClusterInfo { require.NoError(t, err) } - vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", clusterInstance.VtctldProcess.GrpcPort, clusterInstance.TmpDirectory) - - out, err := vtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("SetKeyspaceDurabilityPolicy", keyspaceName, "--durability-policy=semi_sync") require.NoError(t, err, out) // create topo server connection ts, err := topo.OpenServer(*clusterInstance.TopoFlavorString(), clusterInstance.VtctlProcess.TopoGlobalAddress, clusterInstance.VtctlProcess.TopoGlobalRoot) require.NoError(t, err) clusterInfo := &VTOrcClusterInfo{ - ClusterInstance: clusterInstance, - Ts: ts, - CellInfos: nil, - lastUsedValue: 100, - VtctldClientProcess: vtctldClientProcess, + ClusterInstance: clusterInstance, + Ts: ts, + CellInfos: nil, + lastUsedValue: 100, } return clusterInfo } @@ -946,3 +968,64 @@ func WaitForSuccessfulRecoveryCount(t *testing.T, vtorcInstance *cluster.VTOrcPr successCount := successfulRecoveriesMap[recoveryName] assert.EqualValues(t, countExpected, successCount) } + +// WaitForTabletType waits for the tablet to reach a certain type. +func WaitForTabletType(t *testing.T, tablet *cluster.Vttablet, expectedTabletType string) { + t.Helper() + err := tablet.VttabletProcess.WaitForTabletTypes([]string{expectedTabletType}) + require.NoError(t, err) +} + +// WaitForInstancePollSecondsExceededCount waits for 30 seconds and then queries api/aggregated-discovery-metrics. +// It expects to find minimum occurrence or exact count of `keyName` provided. +func WaitForInstancePollSecondsExceededCount(t *testing.T, vtorcInstance *cluster.VTOrcProcess, keyName string, minCountExpected float64, enforceEquality bool) { + t.Helper() + var sinceInSeconds = 30 + duration := time.Duration(sinceInSeconds) + time.Sleep(duration * time.Second) + + statusCode, res, err := vtorcInstance.MakeAPICall("api/aggregated-discovery-metrics?seconds=" + strconv.Itoa(sinceInSeconds)) + if err != nil { + assert.Fail(t, "Not able to call api/aggregated-discovery-metrics") + } + if statusCode == 200 { + resultMap := make(map[string]any) + err := json.Unmarshal([]byte(res), &resultMap) + if err != nil { + assert.Fail(t, "invalid response from api/aggregated-discovery-metrics") + } + successCount := resultMap[keyName] + if iSuccessCount, ok := successCount.(float64); ok { + if enforceEquality { + assert.Equal(t, iSuccessCount, minCountExpected) + } else { + assert.GreaterOrEqual(t, iSuccessCount, minCountExpected) + } + return + } + } + assert.Fail(t, "invalid response from api/aggregated-discovery-metrics") +} + +// PrintVTOrcLogsOnFailure prints the VTOrc logs on failure of the test. +// This function is supposed to be called as the first defer command from the vtorc tests. +func PrintVTOrcLogsOnFailure(t *testing.T, clusterInstance *cluster.LocalProcessCluster) { + // If the test has not failed, then we don't need to print anything. + if !t.Failed() { + return + } + + log.Errorf("Printing VTOrc logs") + for _, vtorc := range clusterInstance.VTOrcProcesses { + if vtorc == nil || vtorc.LogFileName == "" { + continue + } + filePath := path.Join(vtorc.LogDir, vtorc.LogFileName) + log.Errorf("Printing file - %s", filePath) + content, err := os.ReadFile(filePath) + if err != nil { + log.Errorf("Error while reading the file - %v", err) + } + log.Errorf("%s", string(content)) + } +} diff --git a/go/test/fuzzing/tablet_manager_fuzzer.go b/go/test/fuzzing/tablet_manager_fuzzer.go index 316cf75fb82..4c61afa64bc 100644 --- a/go/test/fuzzing/tablet_manager_fuzzer.go +++ b/go/test/fuzzing/tablet_manager_fuzzer.go @@ -41,6 +41,7 @@ func FuzzTabletManagerExecuteFetchAsDba(data []byte) int { ctx := context.Background() cp := mysql.ConnParams{} db := fakesqldb.New(t) + defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) daemon := mysqlctl.NewFakeMysqlDaemon(db) diff --git a/go/test/fuzzing/tabletserver_schema_fuzzer.go b/go/test/fuzzing/tabletserver_schema_fuzzer.go index f7e88d7313a..67bb36e52ed 100644 --- a/go/test/fuzzing/tabletserver_schema_fuzzer.go +++ b/go/test/fuzzing/tabletserver_schema_fuzzer.go @@ -59,10 +59,12 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl ctx := context.Background() appParams := db.ConnParams() dbaParams := db.ConnParams() - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: 10, - }) + cfg := tabletenv.ConnPoolConfig{ + Size: 2, + } + _ = cfg.IdleTimeoutSeconds.Set("10s") + + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -70,5 +72,5 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl } defer conn.Recycle() - return schema.LoadTable(conn, "fakesqldb", tableName, comment) + return schema.LoadTable(conn, "fakesqldb", tableName, "BASE_TABLE", comment) } diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go index aed11774cc8..82fdaa572de 100644 --- a/go/test/fuzzing/vtctl_fuzzer.go +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -189,6 +189,6 @@ func Fuzz(data []byte) int { } func createTopo(ctx context.Context) (*topo.Server, error) { - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") return ts, nil } diff --git a/go/test/utils/binlog.go b/go/test/utils/binlog.go new file mode 100644 index 00000000000..d3f686f1a8a --- /dev/null +++ b/go/test/utils/binlog.go @@ -0,0 +1,70 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "os" + "strings" +) + +const ( + ExtraCnf = "EXTRA_MY_CNF" + BinlogRowImageCnf = "binlog-row-image.cnf" +) + +// SetBinlogRowImageMode creates a temp cnf file to set binlog_row_image to noblob for vreplication unit tests. +// It adds it to the EXTRA_MY_CNF environment variable which appends text from them into my.cnf. +func SetBinlogRowImageMode(mode string, cnfDir string) error { + var newCnfs []string + + // remove any existing extra cnfs for binlog row image + cnfPath := fmt.Sprintf("%s/%s", cnfDir, BinlogRowImageCnf) + os.Remove(cnfPath) + extraCnf := strings.TrimSpace(os.Getenv(ExtraCnf)) + if extraCnf != "" { + cnfs := strings.Split(extraCnf, ":") + for _, cnf := range cnfs { + if !strings.Contains(cnf, BinlogRowImageCnf) { + newCnfs = append(newCnfs, cnf) + } + } + } + + // If specified add extra cnf for binlog row image, otherwise we will have reverted any previous specification. + if mode != "" { + f, err := os.Create(cnfPath) + if err != nil { + return err + } + _, err = f.WriteString(fmt.Sprintf("\nbinlog_row_image=%s\n", mode)) + if err != nil { + return err + } + err = f.Close() + if err != nil { + return err + } + + newCnfs = append(newCnfs, cnfPath) + } + err := os.Setenv(ExtraCnf, strings.Join(newCnfs, ":")) + if err != nil { + return err + } + return nil +} diff --git a/go/test/utils/binlog_test.go b/go/test/utils/binlog_test.go new file mode 100644 index 00000000000..593b964a171 --- /dev/null +++ b/go/test/utils/binlog_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestSetBinlogRowImageMode tests the SetBinlogRowImageMode function. +func TestUtils(t *testing.T) { + tmpDir := "/tmp" + cnfFile := fmt.Sprintf("%s/%s", tmpDir, BinlogRowImageCnf) + // Test that setting the mode will create the cnf file and add it to the EXTRA_MY_CNF env var. + require.NoError(t, SetBinlogRowImageMode("noblob", tmpDir)) + data, err := os.ReadFile(cnfFile) + require.NoError(t, err) + require.Contains(t, string(data), "binlog_row_image=noblob") + require.Contains(t, os.Getenv(ExtraCnf), BinlogRowImageCnf) + + // Test that clearing the mode will remove the cnf file and the cnf from the EXTRA_MY_CNF env var. + require.NoError(t, SetBinlogRowImageMode("", tmpDir)) + require.NotContains(t, os.Getenv(ExtraCnf), BinlogRowImageCnf) + _, err = os.Stat(cnfFile) + require.True(t, os.IsNotExist(err)) +} diff --git a/go/test/utils/noleak.go b/go/test/utils/noleak.go new file mode 100644 index 00000000000..31d454ec789 --- /dev/null +++ b/go/test/utils/noleak.go @@ -0,0 +1,96 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "context" + "testing" + "time" + + "go.uber.org/goleak" +) + +// LeakCheckContext returns a Context that will be automatically cancelled at the end +// of this test. If the test has finished successfully, it will be checked for goroutine +// leaks after context cancellation. +func LeakCheckContext(t testing.TB) context.Context { + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(func() { + cancel() + EnsureNoLeaks(t) + }) + return ctx +} + +// LeakCheckContextTimeout behaves like LeakCheckContext but the returned Context will +// be cancelled after `timeout`, or after the test finishes, whichever happens first. +func LeakCheckContextTimeout(t testing.TB, timeout time.Duration) context.Context { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + t.Cleanup(func() { + cancel() + EnsureNoLeaks(t) + }) + return ctx +} + +// EnsureNoLeaks checks for goroutine and socket leaks and fails the test if any are found. +func EnsureNoLeaks(t testing.TB) { + if t.Failed() { + return + } + if err := ensureNoLeaks(); err != nil { + t.Fatal(err) + } +} + +// GetLeaks checks for goroutine and socket leaks and returns an error if any are found. +// One use case is in TestMain()s to ensure that all tests are cleaned up. +func GetLeaks() error { + return ensureNoLeaks() +} + +func ensureNoLeaks() error { + if err := ensureNoGoroutines(); err != nil { + return err + } + return nil +} + +func ensureNoGoroutines() error { + var ignored = []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/dbconfigs.init.0.func1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.resetAggregators"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.processQueryInfo"), + goleak.IgnoreTopFunction("github.com/patrickmn/go-cache.(*janitor).Run"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/logutil.(*ThrottledLogger).log.func1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vttablet/tabletserver/throttle.initThrottleTicker.func1.1"), + goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vttablet/tabletserver/throttle.NewBackgroundClient.initThrottleTicker.func1.1"), + goleak.IgnoreTopFunction("testing.tRunner.func1"), + } + + var err error + for i := 0; i < 5; i++ { + err = goleak.Find(ignored...) + if err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return err +} diff --git a/go/test/vschemawrapper/vschema_wrapper.go b/go/test/vschemawrapper/vschema_wrapper.go new file mode 100644 index 00000000000..e85b18ce36d --- /dev/null +++ b/go/test/vschemawrapper/vschema_wrapper.go @@ -0,0 +1,320 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vschemawrapper + +import ( + "context" + "fmt" + "strings" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ plancontext.VSchema = (*VSchemaWrapper)(nil) + +type VSchemaWrapper struct { + V *vindexes.VSchema + Keyspace *vindexes.Keyspace + TabletType_ topodatapb.TabletType + Dest key.Destination + SysVarEnabled bool + Version plancontext.PlannerVersion + EnableViews bool + TestBuilder func(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) +} + +func (vw *VSchemaWrapper) GetPrepareData(stmtName string) *vtgatepb.PrepareData { + switch stmtName { + case "prep_one_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user where id = :v1", + ParamsCount: 1, + } + case "prep_in_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user where id in (:v1, :v2)", + ParamsCount: 2, + } + case "prep_no_param": + return &vtgatepb.PrepareData{ + PrepareStatement: "select 1 from user", + ParamsCount: 0, + } + } + return nil +} + +func (vw *VSchemaWrapper) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) { + plan, err := vw.TestBuilder(query, vw, vw.CurrentDb()) + if err != nil { + return nil, nil, err + } + stmt, _, err := sqlparser.Parse2(query) + if err != nil { + return nil, nil, err + } + return plan, stmt, nil +} + +func (vw *VSchemaWrapper) ClearPrepareData(string) {} + +func (vw *VSchemaWrapper) StorePrepareData(string, *vtgatepb.PrepareData) {} + +func (vw *VSchemaWrapper) GetUDV(name string) *querypb.BindVariable { + if strings.EqualFold(name, "prep_stmt") { + return sqltypes.StringBindVariable("select * from user where id in (?, ?, ?)") + } + return nil +} + +func (vw *VSchemaWrapper) IsShardRoutingEnabled() bool { + return false +} + +func (vw *VSchemaWrapper) GetVSchema() *vindexes.VSchema { + return vw.V +} + +func (vw *VSchemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { + return &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "user": { + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{}, + Tables: map[string]*vschemapb.Table{ + "user": {}, + }, + }, + }, + } +} + +func (vw *VSchemaWrapper) ConnCollation() collations.ID { + return collations.CollationUtf8mb3ID +} + +func (vw *VSchemaWrapper) PlannerWarning(_ string) { +} + +func (vw *VSchemaWrapper) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { + defaultFkMode := vschemapb.Keyspace_FK_UNMANAGED + if vw.V.Keyspaces[keyspace] != nil && vw.V.Keyspaces[keyspace].ForeignKeyMode != vschemapb.Keyspace_FK_DEFAULT { + return vw.V.Keyspaces[keyspace].ForeignKeyMode, nil + } + return defaultFkMode, nil +} + +func (vw *VSchemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { + if vw.Keyspace == nil { + return nil, vterrors.VT13001("keyspace not available") + } + return []*vindexes.Keyspace{vw.Keyspace}, nil +} + +// FindKeyspace implements the VSchema interface +func (vw *VSchemaWrapper) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) { + if vw.Keyspace == nil { + return nil, vterrors.VT13001("keyspace not available") + } + if vw.Keyspace.Name == keyspace { + return vw.Keyspace, nil + } + return nil, nil +} + +func (vw *VSchemaWrapper) Planner() plancontext.PlannerVersion { + return vw.Version +} + +// SetPlannerVersion implements the ContextVSchema interface +func (vw *VSchemaWrapper) SetPlannerVersion(v plancontext.PlannerVersion) { + vw.Version = v +} + +func (vw *VSchemaWrapper) GetSemTable() *semantics.SemTable { + return nil +} + +func (vw *VSchemaWrapper) KeyspaceExists(keyspace string) bool { + if vw.Keyspace != nil { + return vw.Keyspace.Name == keyspace + } + return false +} + +func (vw *VSchemaWrapper) SysVarSetEnabled() bool { + return vw.SysVarEnabled +} + +func (vw *VSchemaWrapper) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) { + var keyspaceName string + if vw.Keyspace != nil { + keyspaceName = vw.Keyspace.Name + } + if vw.Dest == nil && qualifier != "" { + keyspaceName = qualifier + } + if keyspaceName == "" { + return nil, nil, 0, vterrors.VT03007() + } + keyspace := vw.V.Keyspaces[keyspaceName] + if keyspace == nil { + return nil, nil, 0, vterrors.VT05003(keyspaceName) + } + return vw.Dest, keyspace.Keyspace, vw.TabletType_, nil + +} + +func (vw *VSchemaWrapper) TabletType() topodatapb.TabletType { + return vw.TabletType_ +} + +func (vw *VSchemaWrapper) Destination() key.Destination { + return vw.Dest +} + +func (vw *VSchemaWrapper) FindTable(tab sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) { + destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, destKeyspace, destTabletType, destTarget, err + } + table, err := vw.V.FindTable(destKeyspace, tab.Name.String()) + if err != nil { + return nil, destKeyspace, destTabletType, destTarget, err + } + return table, destKeyspace, destTabletType, destTarget, nil +} + +func (vw *VSchemaWrapper) FindView(tab sqlparser.TableName) sqlparser.SelectStatement { + destKeyspace, _, _, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil + } + return vw.V.FindView(destKeyspace, tab.Name.String()) +} + +func (vw *VSchemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) { + if tab.Qualifier.IsEmpty() && tab.Name.String() == "dual" { + ksName := vw.getActualKeyspace() + var ks *vindexes.Keyspace + if ksName == "" { + ks = vw.getfirstKeyspace() + ksName = ks.Name + } else { + ks = vw.V.Keyspaces[ksName].Keyspace + } + tbl := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("dual"), + Keyspace: ks, + Type: vindexes.TypeReference, + } + return tbl, nil, ksName, topodatapb.TabletType_PRIMARY, nil, nil + } + destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, nil, destKeyspace, destTabletType, destTarget, err + } + if destKeyspace == "" { + destKeyspace = vw.getActualKeyspace() + } + table, vindex, err := vw.V.FindTableOrVindex(destKeyspace, tab.Name.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + return nil, nil, destKeyspace, destTabletType, destTarget, err + } + return table, vindex, destKeyspace, destTabletType, destTarget, nil +} + +func (vw *VSchemaWrapper) getfirstKeyspace() (ks *vindexes.Keyspace) { + var f string + for name, schema := range vw.V.Keyspaces { + if f == "" || f > name { + f = name + ks = schema.Keyspace + } + } + return +} + +func (vw *VSchemaWrapper) getActualKeyspace() string { + if vw.Keyspace == nil { + return "" + } + if !sqlparser.SystemSchema(vw.Keyspace.Name) { + return vw.Keyspace.Name + } + ks, err := vw.AnyKeyspace() + if err != nil { + return "" + } + return ks.Name +} + +func (vw *VSchemaWrapper) DefaultKeyspace() (*vindexes.Keyspace, error) { + return vw.V.Keyspaces["main"].Keyspace, nil +} + +func (vw *VSchemaWrapper) AnyKeyspace() (*vindexes.Keyspace, error) { + return vw.DefaultKeyspace() +} + +func (vw *VSchemaWrapper) FirstSortedKeyspace() (*vindexes.Keyspace, error) { + return vw.V.Keyspaces["main"].Keyspace, nil +} + +func (vw *VSchemaWrapper) TargetString() string { + return "targetString" +} + +func (vw *VSchemaWrapper) WarnUnshardedOnly(_ string, _ ...any) { + +} + +func (vw *VSchemaWrapper) ErrorIfShardedF(keyspace *vindexes.Keyspace, _, errFmt string, params ...any) error { + if keyspace.Sharded { + return fmt.Errorf(errFmt, params...) + } + return nil +} + +func (vw *VSchemaWrapper) CurrentDb() string { + ksName := "" + if vw.Keyspace != nil { + ksName = vw.Keyspace.Name + } + return ksName +} + +func (vw *VSchemaWrapper) FindRoutedShard(keyspace, shard string) (string, error) { + return "", nil +} + +func (vw *VSchemaWrapper) IsViewsEnabled() bool { + return vw.EnableViews +} diff --git a/go/textutil/hash.go b/go/textutil/hash.go index 3266a698f15..5ea675caff2 100644 --- a/go/textutil/hash.go +++ b/go/textutil/hash.go @@ -53,7 +53,7 @@ func UUIDv5(inputs ...string) string { return uuidv5(inputs...).String() } -// UUIDv5Var creeates a UUID v5 string based on the given inputs. Return value is a big.Int +// UUIDv5Val creates a UUID v5 string based on the given inputs. Return value is a big.Int func UUIDv5Val(inputs ...string) big.Int { u := uuidv5(inputs...) var i big.Int @@ -61,7 +61,7 @@ func UUIDv5Val(inputs ...string) big.Int { return i } -// UUIDv5Base36 creeates a UUID v5 string based on the given inputs. Return value is a 25 character, base36 string +// UUIDv5Base36 creates a UUID v5 string based on the given inputs. Return value is a 25 character, base36 string func UUIDv5Base36(inputs ...string) string { i := UUIDv5Val(inputs...) return fmt.Sprintf("%025s", i.Text(36)) diff --git a/go/textutil/strings.go b/go/textutil/strings.go index b4ce5319eab..212ea742893 100644 --- a/go/textutil/strings.go +++ b/go/textutil/strings.go @@ -20,10 +20,18 @@ import ( "net/url" "regexp" "strings" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/binlogdata" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( - delimitedListRegexp = regexp.MustCompile(`[ ,;]+`) + delimitedListRegexp = regexp.MustCompile(`[ ,;]+`) + SimulatedNullString = sqltypes.NULL.String() + SimulatedNullStringSlice = []string{sqltypes.NULL.String()} + SimulatedNullInt = -1 ) // SplitDelimitedList splits a given string by comma, semi-colon or space, and returns non-empty strings @@ -73,3 +81,29 @@ func SingleWordCamel(w string) string { } return strings.ToUpper(w[0:1]) + strings.ToLower(w[1:]) } + +// ValueIsSimulatedNull returns true if the value represents +// a NULL or unknown/unspecified value. This is used to +// distinguish between a zero value / default and a user +// provided value that is equivalent (e.g. an empty string +// or slice). +func ValueIsSimulatedNull(val any) bool { + switch cval := val.(type) { + case string: + return cval == SimulatedNullString + case []string: + return len(cval) == 1 && cval[0] == sqltypes.NULL.String() + case binlogdata.OnDDLAction: + return int32(cval) == int32(SimulatedNullInt) + case int: + return cval == SimulatedNullInt + case int32: + return int32(cval) == int32(SimulatedNullInt) + case int64: + return int64(cval) == int64(SimulatedNullInt) + case []topodatapb.TabletType: + return len(cval) == 1 && cval[0] == topodatapb.TabletType(SimulatedNullInt) + default: + return false + } +} diff --git a/go/timer/rate_limiter_test.go b/go/timer/rate_limiter_test.go index ec70ed243d2..84122233996 100644 --- a/go/timer/rate_limiter_test.go +++ b/go/timer/rate_limiter_test.go @@ -27,6 +27,7 @@ import ( func TestRateLimiterLong(t *testing.T) { r := NewRateLimiter(time.Hour) require.NotNil(t, r) + defer r.Stop() val := 0 incr := func() error { val++; return nil } for i := 0; i < 10; i++ { @@ -39,6 +40,7 @@ func TestRateLimiterLong(t *testing.T) { func TestRateLimiterShort(t *testing.T) { r := NewRateLimiter(time.Millisecond * 250) require.NotNil(t, r) + defer r.Stop() val := 0 incr := func() error { val++; return nil } for i := 0; i < 10; i++ { @@ -54,6 +56,7 @@ func TestRateLimiterShort(t *testing.T) { func TestRateLimiterStop(t *testing.T) { r := NewRateLimiter(time.Millisecond * 10) require.NotNil(t, r) + defer r.Stop() val := 0 incr := func() error { val++; return nil } for i := 0; i < 5; i++ { diff --git a/go/timer/timer.go b/go/timer/timer.go index 5a28820274f..5407190ef55 100644 --- a/go/timer/timer.go +++ b/go/timer/timer.go @@ -59,7 +59,7 @@ type Timer struct { // state management mu sync.Mutex - running bool + running atomic.Bool // msg is used for out-of-band messages msg chan typeAction @@ -78,10 +78,10 @@ func NewTimer(interval time.Duration) *Timer { func (tm *Timer) Start(keephouse func()) { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + isRunning := tm.running.Swap(true) + if isRunning { return } - tm.running = true go tm.run(keephouse) } @@ -118,7 +118,7 @@ func (tm *Timer) SetInterval(ns time.Duration) { tm.interval.Store(ns.Nanoseconds()) tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + if tm.running.Load() { tm.msg <- timerReset } } @@ -128,7 +128,7 @@ func (tm *Timer) SetInterval(ns time.Duration) { func (tm *Timer) Trigger() { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + if tm.running.Load() { tm.msg <- timerTrigger } } @@ -146,9 +146,9 @@ func (tm *Timer) TriggerAfter(duration time.Duration) { func (tm *Timer) Stop() { tm.mu.Lock() defer tm.mu.Unlock() - if tm.running { + isRunning := tm.running.Swap(false) + if isRunning { tm.msg <- timerStop - tm.running = false } } @@ -158,7 +158,5 @@ func (tm *Timer) Interval() time.Duration { } func (tm *Timer) Running() bool { - tm.mu.Lock() - defer tm.mu.Unlock() - return tm.running + return tm.running.Load() } diff --git a/go/tools/astfmtgen/main.go b/go/tools/astfmtgen/main.go index ea968715ac1..38a14d77e7a 100644 --- a/go/tools/astfmtgen/main.go +++ b/go/tools/astfmtgen/main.go @@ -25,10 +25,10 @@ import ( "log" "os" "path" + "slices" "strconv" "strings" - "golang.org/x/exp/slices" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go index c0e870c21c5..89aa40b6127 100644 --- a/go/tools/asthelpergen/asthelpergen.go +++ b/go/tools/asthelpergen/asthelpergen.go @@ -26,6 +26,8 @@ import ( "strings" "github.com/dave/jennifer/jen" + "golang.org/x/text/cases" + "golang.org/x/text/language" "golang.org/x/tools/go/packages" "vitess.io/vitess/go/tools/codegen" @@ -304,7 +306,7 @@ func printableTypeName(t types.Type) string { case *types.Named: return t.Obj().Name() case *types.Basic: - return strings.Title(t.Name()) // nolint + return cases.Title(language.AmericanEnglish).String(t.Name()) case *types.Interface: return t.String() default: diff --git a/go/tools/asthelpergen/clone_gen.go b/go/tools/asthelpergen/clone_gen.go index 79251140845..10387a5dc25 100644 --- a/go/tools/asthelpergen/clone_gen.go +++ b/go/tools/asthelpergen/clone_gen.go @@ -20,10 +20,10 @@ import ( "fmt" "go/types" "log" + "slices" "strings" "github.com/dave/jennifer/jen" - "golang.org/x/exp/slices" ) type CloneOptions struct { diff --git a/go/tools/asthelpergen/copy_on_rewrite_gen.go b/go/tools/asthelpergen/copy_on_rewrite_gen.go index 09d00c26308..1daa8d18981 100644 --- a/go/tools/asthelpergen/copy_on_rewrite_gen.go +++ b/go/tools/asthelpergen/copy_on_rewrite_gen.go @@ -132,22 +132,6 @@ func (c *cowGen) basicMethod(t types.Type, basic *types.Basic, spi generatorSPI) return nil } -func (c *cowGen) copySliceElement(t types.Type, elType types.Type, spi generatorSPI) jen.Code { - if !isNamed(t) && isBasic(elType) { - // copy(res, n) - return jen.Id("copy").Call(jen.Id("res"), jen.Id("n")) - } - - // for i := range n { - // res[i] = CloneAST(x) - // } - spi.addType(elType) - - return jen.For(jen.List(jen.Id("i"), jen.Id("x"))).Op(":=").Range().Id("n").Block( - jen.Id("res").Index(jen.Id("i")).Op("=").Add(c.readValueOfType(elType, jen.Id("x"), spi)), - ) -} - func ifNotNil(id string, stmts ...jen.Code) *jen.Statement { return jen.If(jen.Id(id).Op("!=").Nil()).Block(stmts...) } diff --git a/go/tools/codegen/common.go b/go/tools/codegen/common.go index 86b188eb67e..a4732474cb7 100644 --- a/go/tools/codegen/common.go +++ b/go/tools/codegen/common.go @@ -28,10 +28,13 @@ func CheckErrors(loaded []*packages.Package, skip func(fileName string) bool) er var errors []string for _, l := range loaded { for _, e := range l.Errors { - idx := strings.Index(e.Pos, ":") - filePath := e.Pos[:idx] - _, fileName := path.Split(filePath) - if !skip(fileName) { + if idx := strings.Index(e.Pos, ":"); idx >= 0 { + filePath := e.Pos[:idx] + _, fileName := path.Split(filePath) + if !skip(fileName) { + errors = append(errors, e.Error()) + } + } else { errors = append(errors, e.Error()) } } diff --git a/go/tools/codegen/goimports.go b/go/tools/codegen/goimports.go index 7e9a332c24b..f705172399d 100644 --- a/go/tools/codegen/goimports.go +++ b/go/tools/codegen/goimports.go @@ -45,9 +45,22 @@ func FormatJenFile(file *jen.File) ([]byte, error) { } func GoImports(fullPath string) error { - cmd := exec.Command("goimports", "-local", "vitess.io/vitess", "-w", fullPath) + // we need to run both gofmt and goimports because goimports does not support the + // simplification flag (-s) that our static linter checks require. + + cmd := exec.Command("gofmt", "-s", "-w", fullPath) + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return err + } + + cmd = exec.Command("goimports", "-local", "vitess.io/vitess", "-w", fullPath) cmd.Stderr = os.Stderr - return cmd.Run() + if err := cmd.Run(); err != nil { + return err + } + + return nil } func SaveJenFile(fullPath string, file *jen.File) error { diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go new file mode 100644 index 00000000000..b3ba7ca628d --- /dev/null +++ b/go/tools/go-upgrade/go-upgrade.go @@ -0,0 +1,573 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bufio" + "fmt" + "io" + "log" + "net/http" + "os" + "path" + "regexp" + "strconv" + "strings" + "time" + + "encoding/json" + + "github.com/hashicorp/go-version" + "github.com/spf13/cobra" +) + +const ( + goDevAPI = "https://go.dev/dl/?mode=json" + + // regexpFindBootstrapVersion greps the current bootstrap version from the Makefile. The bootstrap + // version is composed of either one or two numbers, for instance: 18.1 or 18. + // The expected format of the input is BOOTSTRAP_VERSION=18 or BOOTSTRAP_VERSION=18.1 + regexpFindBootstrapVersion = "(?i).*BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*([0-9.]+).*" + + // regexpFindGolangVersion greps all numbers separated by a . after the goversion_min function call + // This is used to understand what is the current version of Golang using either two or three numbers + // The major, minor and optional patch number of the Golang version + regexpFindGolangVersion = "(?i).*goversion_min[[:space:]]*([0-9.]+).*" + + // regexpReplaceGoModGoVersion replaces the top-level golang version instruction in the go.mod file + // Example going from go1.20 to go1.20: `go 1.20` -> `go 1.21` + regexpReplaceGoModGoVersion = `go[[:space:]]([0-9.]+)\.([0-9.]+)` + + // The regular expressions below match the entire bootstrap_version declaration in Dockerfiles and Makefile + // A bootstrap version declaration is usually: 'ARG bootstrap_version = 18' in Dockerfile, and + // 'BOOTSTRAP_VERSION=18' in the Makefile. Note that the value 18 can also be a float. + regexpReplaceDockerfileBootstrapVersion = "ARG[[:space:]]*bootstrap_version[[:space:]]*=[[:space:]]*[0-9.]+" + regexpReplaceMakefileBootstrapVersion = "BOOTSTRAP_VERSION[[:space:]]*=[[:space:]]*[0-9.]+" + + // The regular expression below matches the bootstrap_version we are using in the test.go file. + // In test.go, there is a flag named 'bootstrap-version' that has a default value. We are looking + // to match the entire flag name + the default value (being the current bootstrap version) + // Example input: "flag.String("bootstrap-version", "20", "the version identifier to use for the docker images")" + regexpReplaceTestGoBootstrapVersion = `\"bootstrap-version\",[[:space:]]*\"([0-9.]+)\"` + + // regexpReplaceGolangVersionInWorkflow matches the golang version increment in the string `go-version: 1.20.5` + // which is used to replace the golang version we use inside our workflows + regexpReplaceGolangVersionInWorkflow = `go-version:[[:space:]]*([0-9.]+).*` +) + +type ( + latestGolangRelease struct { + Version string `json:"version"` + Stable bool `json:"stable"` + } + + bootstrapVersion struct { + major, minor int // when minor == -1, it means there are no minor version + } +) + +var ( + workflowUpdate = true + allowMajorUpgrade = false + isMainBranch = false + goTo = "" + + rootCmd = &cobra.Command{ + Use: "go-upgrade", + Short: "Automates the Golang upgrade.", + Long: `go-upgrade allows us to automate some tasks required to bump the version of Golang used throughout our codebase. + +It mostly used by the update_golang_version.yml CI workflow that runs on a CRON. + +This tool is meant to be run at the root of the repository. +`, + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, + Args: cobra.NoArgs, + } + + getCmd = &cobra.Command{ + Use: "get", + Short: "Command to get useful information about the codebase.", + Long: "Command to get useful information about the codebase.", + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, + Args: cobra.NoArgs, + } + + getGoCmd = &cobra.Command{ + Use: "go-version", + Short: "go-version prints the Golang version used by the current codebase.", + Long: "go-version prints the Golang version used by the current codebase.", + Run: runGetGoCmd, + Args: cobra.NoArgs, + } + + getBootstrapCmd = &cobra.Command{ + Use: "bootstrap-version", + Short: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.", + Long: "bootstrap-version prints the Docker Bootstrap version used by the current codebase.", + Run: runGetBootstrapCmd, + Args: cobra.NoArgs, + } + + upgradeCmd = &cobra.Command{ + Use: "upgrade", + Short: "upgrade will upgrade the Golang and Bootstrap versions of the codebase to the latest available version.", + Long: `This command bumps the Golang and Bootstrap versions of the codebase. + +The latest available version of Golang will be fetched and used instead of the old version. + +By default, we do not allow major Golang version upgrade such as 1.20 to 1.21 but this can be overridden using the +--allow-major-upgrade CLI flag. Usually, we only allow such upgrade on the main branch of the repository. + +In CI, particularly, we do not want to modify the workflow files before automatically creating a Pull Request to +avoid permission issues. The rewrite of workflow files can be disabled using the --workflow-update=false CLI flag. + +Moreover, this command automatically bumps the bootstrap version of our codebase. If we are on the main branch, we +want to use the CLI flag --main to remember to increment the bootstrap version by 1 instead of 0.1.`, + Run: runUpgradeCmd, + Args: cobra.NoArgs, + } + + upgradeWorkflowsCmd = &cobra.Command{ + Use: "workflows", + Short: "workflows will upgrade the Golang version used in our CI workflows files.", + Long: "This step is omitted by the bot since. We let the maintainers of Vitess manually upgrade the version used by the workflows using this command.", + Run: runUpgradeWorkflowsCmd, + Args: cobra.NoArgs, + } +) + +func init() { + rootCmd.AddCommand(getCmd) + rootCmd.AddCommand(upgradeCmd) + + getCmd.AddCommand(getGoCmd) + getCmd.AddCommand(getBootstrapCmd) + + upgradeCmd.AddCommand(upgradeWorkflowsCmd) + + upgradeCmd.Flags().BoolVar(&workflowUpdate, "workflow-update", workflowUpdate, "Whether or not the workflow files should be updated. Useful when using this script to auto-create PRs.") + upgradeCmd.Flags().BoolVar(&allowMajorUpgrade, "allow-major-upgrade", allowMajorUpgrade, "Defines if Golang major version upgrade are allowed.") + upgradeCmd.Flags().BoolVar(&isMainBranch, "main", isMainBranch, "Defines if the current branch is the main branch.") + + upgradeWorkflowsCmd.Flags().StringVar(&goTo, "go-to", goTo, "The Golang version we want to upgrade to.") +} + +func main() { + cobra.CheckErr(rootCmd.Execute()) +} + +func runGetGoCmd(_ *cobra.Command, _ []string) { + currentVersion, err := currentGolangVersion() + if err != nil { + log.Fatal(err) + } + fmt.Println(currentVersion.String()) +} + +func runGetBootstrapCmd(_ *cobra.Command, _ []string) { + currentVersion, err := currentBootstrapVersion() + if err != nil { + log.Fatal(err) + } + fmt.Println(currentVersion.toString()) +} + +func runUpgradeWorkflowsCmd(_ *cobra.Command, _ []string) { + err := updateWorkflowFilesOnly(goTo) + if err != nil { + log.Fatal(err) + } +} + +func runUpgradeCmd(_ *cobra.Command, _ []string) { + err := upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch) + if err != nil { + log.Fatal(err) + } +} + +func updateWorkflowFilesOnly(goTo string) error { + newV, err := version.NewVersion(goTo) + if err != nil { + return err + } + filesToChange, err := getListOfFilesInPaths([]string{"./.github/workflows"}) + if err != nil { + return err + } + + for _, fileToChange := range filesToChange { + err = replaceInFile( + []*regexp.Regexp{regexp.MustCompile(regexpReplaceGolangVersionInWorkflow)}, + []string{"go-version: " + newV.String()}, + fileToChange, + ) + if err != nil { + return err + } + } + return nil +} + +func upgradePath(allowMajorUpgrade, workflowUpdate, isMainBranch bool) error { + currentVersion, err := currentGolangVersion() + if err != nil { + return err + } + + availableVersions, err := getLatestStableGolangReleases() + if err != nil { + return err + } + + upgradeTo := chooseNewVersion(currentVersion, availableVersions, allowMajorUpgrade) + if upgradeTo == nil { + return nil + } + + err = replaceGoVersionInCodebase(currentVersion, upgradeTo, workflowUpdate) + if err != nil { + return err + } + + currentBootstrapVersionF, err := currentBootstrapVersion() + if err != nil { + return err + } + nextBootstrapVersionF := currentBootstrapVersionF + if isMainBranch { + nextBootstrapVersionF.major += 1 + } else { + nextBootstrapVersionF.minor += 1 + } + err = updateBootstrapVersionInCodebase(currentBootstrapVersionF.toString(), nextBootstrapVersionF.toString(), upgradeTo) + if err != nil { + return err + } + return nil +} + +// currentGolangVersion gets the running version of Golang in Vitess +// and returns it as a *version.Version. +// +// The file `./build.env` describes which version of Golang is expected by Vitess. +// We use this file to detect the current Golang version of our codebase. +// The file contains `goversion_min x.xx.xx`, we will grep `goversion_min` to finally find +// the precise golang version we're using. +func currentGolangVersion() (*version.Version, error) { + contentRaw, err := os.ReadFile("build.env") + if err != nil { + return nil, err + } + content := string(contentRaw) + + versre := regexp.MustCompile(regexpFindGolangVersion) + versionStr := versre.FindStringSubmatch(content) + if len(versionStr) != 2 { + return nil, fmt.Errorf("malformatted error, got: %v", versionStr) + } + return version.NewVersion(versionStr[1]) +} + +func currentBootstrapVersion() (bootstrapVersion, error) { + contentRaw, err := os.ReadFile("Makefile") + if err != nil { + return bootstrapVersion{}, err + } + content := string(contentRaw) + + versre := regexp.MustCompile(regexpFindBootstrapVersion) + versionStr := versre.FindStringSubmatch(content) + if len(versionStr) != 2 { + return bootstrapVersion{}, fmt.Errorf("malformatted error, got: %v", versionStr) + } + _, err = strconv.ParseFloat(versionStr[1], 64) + if err != nil { + return bootstrapVersion{}, err + } + + vs := strings.Split(versionStr[1], ".") + major, err := strconv.Atoi(vs[0]) + if err != nil { + return bootstrapVersion{}, err + } + + minor := -1 + if len(vs) > 1 { + minor, err = strconv.Atoi(vs[1]) + if err != nil { + return bootstrapVersion{}, err + } + } + + return bootstrapVersion{ + major: major, + minor: minor, + }, nil +} + +// getLatestStableGolangReleases fetches the latest stable releases of Golang from +// the official website using the goDevAPI URL. +// Once fetched, the releases are returned as version.Collection. +func getLatestStableGolangReleases() (version.Collection, error) { + resp, err := http.Get(goDevAPI) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var latestGoReleases []latestGolangRelease + err = json.Unmarshal(body, &latestGoReleases) + if err != nil { + return nil, err + } + + var versions version.Collection + for _, release := range latestGoReleases { + if !release.Stable { + continue + } + if !strings.HasPrefix(release.Version, "go") { + return nil, fmt.Errorf("golang version malformatted: %s", release.Version) + } + newVersion, err := version.NewVersion(release.Version[2:]) + if err != nil { + return nil, err + } + versions = append(versions, newVersion) + } + return versions, nil +} + +// chooseNewVersion decides what will be the next version we're going to use in our codebase. +// Given the current Golang version, the available latest versions and whether we allow major upgrade or not, +// chooseNewVersion will return either the new version or nil if we cannot/don't need to upgrade. +func chooseNewVersion(curVersion *version.Version, latestVersions version.Collection, allowMajorUpgrade bool) *version.Version { + selectedVersion := curVersion + for _, latestVersion := range latestVersions { + if !allowMajorUpgrade && !isSameMajorMinorVersion(latestVersion, selectedVersion) { + continue + } + if latestVersion.GreaterThan(selectedVersion) { + selectedVersion = latestVersion + } + } + // No change detected, return nil meaning that we do not want to have a new Golang version. + if selectedVersion.Equal(curVersion) { + return nil + } + return selectedVersion +} + +// replaceGoVersionInCodebase goes through all the files in the codebase where the +// Golang version must be updated +func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) error { + if old.Equal(new) { + return nil + } + explore := []string{ + "./test/templates", + "./build.env", + "./docker/bootstrap/Dockerfile.common", + } + if workflowUpdate { + explore = append(explore, "./.github/workflows") + } + filesToChange, err := getListOfFilesInPaths(explore) + if err != nil { + return err + } + + for _, fileToChange := range filesToChange { + // The regular expression below simply replace the old version string by the new golang version + err = replaceInFile( + []*regexp.Regexp{regexp.MustCompile(fmt.Sprintf(`(%s)`, old.String()))}, + []string{new.String()}, + fileToChange, + ) + if err != nil { + return err + } + } + + if !isSameMajorMinorVersion(old, new) { + err = replaceInFile( + []*regexp.Regexp{regexp.MustCompile(regexpReplaceGoModGoVersion)}, + []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])}, + "./go.mod", + ) + if err != nil { + return err + } + } + return nil +} + +func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Version) error { + if old == new { + return nil + } + files, err := getListOfFilesInPaths([]string{ + "./docker/base", + "./docker/lite", + "./docker/local", + "./docker/vttestserver", + "./Makefile", + "./test/templates", + }) + if err != nil { + return err + } + + for _, file := range files { + err = replaceInFile( + []*regexp.Regexp{ + regexp.MustCompile(regexpReplaceDockerfileBootstrapVersion), // Dockerfile + regexp.MustCompile(regexpReplaceMakefileBootstrapVersion), // Makefile + }, + []string{ + fmt.Sprintf("ARG bootstrap_version=%s", new), // Dockerfile + fmt.Sprintf("BOOTSTRAP_VERSION=%s", new), // Makefile + }, + file, + ) + if err != nil { + return err + } + } + + err = replaceInFile( + []*regexp.Regexp{regexp.MustCompile(regexpReplaceTestGoBootstrapVersion)}, + []string{fmt.Sprintf("\"bootstrap-version\", \"%s\"", new)}, + "./test.go", + ) + if err != nil { + return err + } + + err = updateBootstrapChangelog(new, newGoVersion) + if err != nil { + return err + } + + return nil +} + +func updateBootstrapChangelog(new string, goVersion *version.Version) error { + file, err := os.OpenFile("./docker/bootstrap/CHANGELOG.md", os.O_RDWR, 0600) + if err != nil { + return err + } + defer file.Close() + + s, err := file.Stat() + if err != nil { + return err + } + newContent := fmt.Sprintf(` + +## [%s] - %s +### Changes +- Update build to golang %s`, new, time.Now().Format(time.DateOnly), goVersion.String()) + + _, err = file.WriteAt([]byte(newContent), s.Size()) + if err != nil { + return err + } + return nil +} + +func isSameMajorMinorVersion(a, b *version.Version) bool { + return a.Segments()[0] == b.Segments()[0] && a.Segments()[1] == b.Segments()[1] +} + +func getListOfFilesInPaths(pathsToExplore []string) ([]string, error) { + var filesToChange []string + for _, pathToExplore := range pathsToExplore { + stat, err := os.Stat(pathToExplore) + if err != nil { + return nil, err + } + if stat.IsDir() { + dirEntries, err := os.ReadDir(pathToExplore) + if err != nil { + return nil, err + } + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + filesToChange = append(filesToChange, path.Join(pathToExplore, entry.Name())) + } + } else { + filesToChange = append(filesToChange, pathToExplore) + } + } + return filesToChange, nil +} + +// replaceInFile replaces old with new in the given file. +func replaceInFile(oldexps []*regexp.Regexp, new []string, fileToChange string) error { + if len(oldexps) != len(new) { + panic("old and new should be of the same length") + } + + f, err := os.OpenFile(fileToChange, os.O_RDWR, 0600) + if err != nil { + return err + } + defer f.Close() + + var res []string + reader := bufio.NewReader(f) + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + panic(err) + } + for i, oldexp := range oldexps { + line = oldexp.ReplaceAllString(line, new[i]) + } + res = append(res, line) + } + + _, err = f.WriteAt([]byte(strings.Join(res, "")), 0) + if err != nil { + return err + } + return nil +} + +func (b bootstrapVersion) toString() string { + if b.minor == -1 { + return fmt.Sprintf("%d", b.major) + } + return fmt.Sprintf("%d.%d", b.major, b.minor) +} diff --git a/go/tools/go-upgrade/go-upgrade_test.go b/go/tools/go-upgrade/go-upgrade_test.go new file mode 100644 index 00000000000..378672d544f --- /dev/null +++ b/go/tools/go-upgrade/go-upgrade_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRegularExpressions(t *testing.T) { + lists := []struct { + regexp string + input string + checkF func(t *testing.T, regexp *regexp.Regexp, input string) + }{ + { + regexp: regexpFindBootstrapVersion, + input: "BOOTSTRAP_VERSION=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + submatch := regexp.FindStringSubmatch(input) + require.Len(t, submatch, 2, "Should have two submatches in the regular expression") + require.Equal(t, "18.1", submatch[1]) + }, + }, + { + regexp: regexpFindGolangVersion, + input: `goversion_min 1.20.5 || echo "Go version reported`, + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + submatch := regexp.FindStringSubmatch(input) + require.Len(t, submatch, 2, "Should have two submatches in the regular expression") + require.Equal(t, "1.20.5", submatch[1]) + }, + }, + { + regexp: regexpReplaceGoModGoVersion, + input: "go 1.20", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go 1.21") + require.Equal(t, "go 1.21", res) + }, + }, + { + regexp: regexpReplaceGoModGoVersion, + input: "go 1 20", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go 1.21") + require.Equal(t, "go 1 20", res) + }, + }, + { + regexp: regexpReplaceDockerfileBootstrapVersion, + input: "ARG bootstrap_version=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "ARG bootstrap_version=18.2") + require.Equal(t, "ARG bootstrap_version=18.2", res) + }, + }, + { + regexp: regexpReplaceMakefileBootstrapVersion, + input: "BOOTSTRAP_VERSION=18.1", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "BOOTSTRAP_VERSION=18.2") + require.Equal(t, "BOOTSTRAP_VERSION=18.2", res) + }, + }, + { + regexp: regexpReplaceTestGoBootstrapVersion, + input: `flag.String("bootstrap-version", "18.1", "the version identifier to use for the docker images")`, + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "\"bootstrap-version\", \"18.2\"") + require.Equal(t, `flag.String("bootstrap-version", "18.2", "the version identifier to use for the docker images")`, res) + }, + }, + { + regexp: regexpReplaceGolangVersionInWorkflow, + input: "go-version: 1.20.5", + checkF: func(t *testing.T, regexp *regexp.Regexp, input string) { + res := regexp.ReplaceAllString(input, "go-version: 1.20.6") + require.Equal(t, `go-version: 1.20.6`, res) + }, + }, + } + + for _, list := range lists { + t.Run(list.regexp+" "+list.input, func(t *testing.T) { + list.checkF(t, regexp.MustCompile(list.regexp), list.input) + }) + } +} diff --git a/go/tools/release-notes/release_notes.go b/go/tools/release-notes/release_notes.go index 73b6fd200f2..5bb03339245 100644 --- a/go/tools/release-notes/release_notes.go +++ b/go/tools/release-notes/release_notes.go @@ -27,7 +27,6 @@ import ( "regexp" "sort" "strings" - "sync" "text/template" "github.com/spf13/pflag" @@ -40,24 +39,24 @@ type ( labels []label - author struct { - Login string `json:"login"` + pullRequestAuthor struct { + Login string } - prInfo struct { - Labels labels `json:"labels"` - Number int `json:"number"` - Title string `json:"title"` - Author author `json:"author"` + pullRequestInformation struct { + Number int + Title string + Labels labels + Author pullRequestAuthor } - prsByComponent = map[string][]prInfo + prsByComponent = map[string][]pullRequestInformation prsByType = map[string]prsByComponent sortedPRComponent struct { Name string - PrInfos []prInfo + PrInfos []pullRequestInformation } sortedPRType struct { @@ -76,14 +75,17 @@ type ( KnownIssues string AddDetails string PathToChangeLogFileOnGH, ChangeLog, ChangeMetrics string + SubDirPath string } ) -const ( - releaseNotesPath = `doc/releasenotes/` - releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/` + releaseNotesPath +var ( + releaseNotesPath = `changelog/` +) - markdownTemplate = `# Release of Vitess {{.Version}} +const ( + releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/` + markdownTemplate = `# Release of Vitess {{.Version}} {{- if or .Announcement .AddDetails }} {{ .Announcement }} @@ -131,16 +133,15 @@ The entire changelog for this release can be found [here]({{ .PathToChangeLogFil prefixType = "Type: " prefixComponent = "Component: " - numberOfThreads = 10 lengthOfSingleSHA = 40 ) func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error { var err error // Generate the release notes - rn.PathToChangeLogFileOnGH = fmt.Sprintf(releaseNotesPathGitHub+"%s_changelog.md", rn.VersionUnderscore) + rn.PathToChangeLogFileOnGH = releaseNotesPathGitHub + path.Join(rn.SubDirPath, "changelog.md") if rnFile == nil { - rnFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_release_notes.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + rnFile, err = os.OpenFile(path.Join(rn.SubDirPath, "release_notes.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } @@ -154,7 +155,7 @@ func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error { // Generate the changelog if changelogFile == nil { - changelogFile, err = os.OpenFile(fmt.Sprintf(path.Join(releaseNotesPath, "%s_changelog.md"), rn.VersionUnderscore), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + changelogFile, err = os.OpenFile(path.Join(rn.SubDirPath, "changelog.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return err } @@ -185,61 +186,27 @@ func loadKnownIssues(release string) ([]knownIssue, error) { return knownIssues, nil } -func loadMergedPRs(from, to string) (prs []string, authors []string, commitCount int, err error) { - // load the git log with "author \t title \t parents" - out, err := execCmd("git", "log", `--pretty=format:%ae%x09%s%x09%P%x09%h`, fmt.Sprintf("%s..%s", from, to)) - +func loadMergedPRsAndAuthors(name string) (pris []pullRequestInformation, authors []string, err error) { + out, err := execCmd("gh", "pr", "list", "-s", "merged", "-S", fmt.Sprintf("milestone:%s", name), "--json", "number,title,labels,author", "--limit", "5000") if err != nil { return } - return parseGitLog(string(out)) -} - -func parseGitLog(s string) (prs []string, authorCommits []string, commitCount int, err error) { - rx := regexp.MustCompile(`(.+)\t(.+)\t(.+)\t(.+)`) - mergePR := regexp.MustCompile(`Merge pull request #(\d+)`) - squashPR := regexp.MustCompile(`\(#(\d+)\)`) - authMap := map[string]string{} // here we will store email <-> gh user mappings - lines := strings.Split(s, "\n") - for _, line := range lines { - lineInfo := rx.FindStringSubmatch(line) - if len(lineInfo) != 5 { - log.Fatalf("failed to parse the output from git log: %s", line) - } - authorEmail := lineInfo[1] - title := lineInfo[2] - parents := lineInfo[3] - sha := lineInfo[4] - merged := mergePR.FindStringSubmatch(title) - if len(merged) == 2 { - // this is a merged PR. remember the PR # - prs = append(prs, merged[1]) - continue - } - - if len(parents) <= lengthOfSingleSHA { - // we have a single parent, and the commit counts - commitCount++ - if _, exists := authMap[authorEmail]; !exists { - authMap[authorEmail] = sha - } - } - - squashed := squashPR.FindStringSubmatch(title) - if len(squashed) == 2 { - // this is a merged PR. remember the PR # - prs = append(prs, squashed[1]) - continue - } + err = json.Unmarshal(out, &pris) + if err != nil { + return nil, nil, err } - for _, author := range authMap { - authorCommits = append(authorCommits, author) + // Get the full list of distinct PRs authors and sort them + authorMap := map[string]bool{} + for _, pri := range pris { + login := pri.Author.Login + if ok := authorMap[login]; !ok { + authors = append(authors, login) + authorMap[login] = true + } } - - sort.Strings(prs) - sort.Strings(authorCommits) // not really needed, but makes testing easier + sort.Strings(authors) return } @@ -259,133 +226,10 @@ func execCmd(name string, arg ...string) ([]byte, error) { return out, nil } -func loadPRInfo(pr string) (prInfo, error) { - out, err := execCmd("gh", "pr", "view", pr, "--json", "title,number,labels,author") - if err != nil { - return prInfo{}, err - } - var prInfo prInfo - err = json.Unmarshal(out, &prInfo) - return prInfo, err -} - -func loadAuthorInfo(sha string) (string, error) { - out, err := execCmd("gh", "api", "/repos/vitessio/vitess/commits/"+sha) - if err != nil { - return "", err - } - var prInfo prInfo - err = json.Unmarshal(out, &prInfo) - if err != nil { - return "", err - } - return prInfo.Author.Login, nil -} - -type req struct { - isPR bool - key string -} - -func loadAllPRs(prs, authorCommits []string) ([]prInfo, []string, error) { - errChan := make(chan error) - wgDone := make(chan bool) - prChan := make(chan req, len(prs)+len(authorCommits)) - // fill the work queue - for _, s := range prs { - prChan <- req{isPR: true, key: s} - } - for _, s := range authorCommits { - prChan <- req{isPR: false, key: s} - } - close(prChan) - - var prInfos []prInfo - var authors []string - fmt.Printf("Found %d merged PRs. Loading PR info", len(prs)) - wg := sync.WaitGroup{} - mu := sync.Mutex{} - - shouldLoad := func(in string) bool { - if in == "" { - return false - } - mu.Lock() - defer mu.Unlock() - - for _, existing := range authors { - if existing == in { - return false - } - } - return true - } - addAuthor := func(in string) { - mu.Lock() - defer mu.Unlock() - authors = append(authors, in) - } - addPR := func(in prInfo) { - mu.Lock() - defer mu.Unlock() - prInfos = append(prInfos, in) - } - - for i := 0; i < numberOfThreads; i++ { - wg.Add(1) - go func() { - // load meta data about PRs - defer wg.Done() - - for b := range prChan { - fmt.Print(".") - if b.isPR { - prInfo, err := loadPRInfo(b.key) - if err != nil { - errChan <- err - break - } - addPR(prInfo) - continue - } - author, err := loadAuthorInfo(b.key) - if err != nil { - errChan <- err - break - } - if shouldLoad(author) { - addAuthor(author) - } - - } - }() - } - - go func() { - // wait for the loading to finish - wg.Wait() - close(wgDone) - }() - - var err error - select { - case <-wgDone: - break - case err = <-errChan: - break - } - - fmt.Println() - - sort.Strings(authors) - - return prInfos, authors, err -} - -func groupPRs(prInfos []prInfo) prsByType { +func groupPRs(pris []pullRequestInformation) prsByType { prPerType := prsByType{} - for _, info := range prInfos { + for _, info := range pris { var typ, component string for _, lbl := range info.Labels { switch { @@ -475,11 +319,11 @@ func getStringForKnownIssues(issues []knownIssue) (string, error) { return buff.String(), nil } -func groupAndStringifyPullRequest(pr []prInfo) (string, error) { - if len(pr) == 0 { +func groupAndStringifyPullRequest(pris []pullRequestInformation) (string, error) { + if len(pris) == 0 { return "", nil } - prPerType := groupPRs(pr) + prPerType := groupPRs(pris) prStr, err := getStringForPullRequestInfos(prPerType) if err != nil { return "", err @@ -489,11 +333,8 @@ func groupAndStringifyPullRequest(pr []prInfo) (string, error) { func main() { var ( - from, versionName, summaryFile string - to = "HEAD" + versionName, summaryFile string ) - pflag.StringVarP(&from, "from", "f", "", "from sha/tag/branch") - pflag.StringVarP(&to, "to", "t", to, "to sha/tag/branch") pflag.StringVarP(&versionName, "version", "v", "", "name of the version (has to be the following format: v11.0.0)") pflag.StringVarP(&summaryFile, "summary", "s", "", "readme file on which there is a summary of the release") pflag.Parse() @@ -506,9 +347,20 @@ func main() { log.Fatal("The --version flag must be set using a valid format. Format: 'vX.X.X'.") } + // Define the path to the release notes folder + majorVersion := versionMatch[1] + "." + versionMatch[2] + patchVersion := versionMatch[1] + "." + versionMatch[2] + "." + versionMatch[3] + releaseNotesPath = path.Join(releaseNotesPath, majorVersion, patchVersion) + + err := os.MkdirAll(releaseNotesPath, os.ModePerm) + if err != nil { + log.Fatal(err) + } + releaseNotes := releaseNote{ Version: versionName, VersionUnderscore: fmt.Sprintf("%s_%s_%s", versionMatch[1], versionMatch[2], versionMatch[3]), // v14.0.0 -> 14_0_0, this is used to format filenames. + SubDirPath: releaseNotesPath, } // summary of the release @@ -532,26 +384,23 @@ func main() { releaseNotes.KnownIssues = knownIssuesStr // changelog with pull requests - prs, authorCommits, commits, err := loadMergedPRs(from, to) + prs, authors, err := loadMergedPRsAndAuthors(versionName) if err != nil { log.Fatal(err) } - prInfos, authors, err := loadAllPRs(prs, authorCommits) - if err != nil { - log.Fatal(err) - } - releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prInfos) + + releaseNotes.ChangeLog, err = groupAndStringifyPullRequest(prs) if err != nil { log.Fatal(err) } // changelog metrics - if commits > 0 && len(authors) > 0 { + if len(prs) > 0 && len(authors) > 0 { releaseNotes.ChangeMetrics = fmt.Sprintf(` -The release includes %d commits (excluding merges) +The release includes %d merged Pull Requests. Thanks to all our contributors: @%s -`, commits, strings.Join(authors, ", @")) +`, len(prs), strings.Join(authors, ", @")) } if err := releaseNotes.generate(nil, nil); err != nil { diff --git a/go/tools/release-notes/release_notes_test.go b/go/tools/release-notes/release_notes_test.go index 0622d458d28..19f946525c3 100644 --- a/go/tools/release-notes/release_notes_test.go +++ b/go/tools/release-notes/release_notes_test.go @@ -20,7 +20,6 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/utils" @@ -29,26 +28,26 @@ import ( func Test_groupPRs(t *testing.T) { tests := []struct { name string - prInfos []prInfo - want map[string]map[string][]prInfo + prInfos []pullRequestInformation + want map[string]map[string][]pullRequestInformation }{ { name: "Single PR info with no labels", - prInfos: []prInfo{{Title: "pr 1", Number: 1}}, - want: map[string]map[string][]prInfo{"Other": {"Other": []prInfo{{Title: "pr 1", Number: 1}}}}, + prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1}}, + want: map[string]map[string][]pullRequestInformation{"Other": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1}}}}, }, { name: "Single PR info with type label", - prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}, - want: map[string]map[string][]prInfo{"Bug fixes": {"Other": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}}, + prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}, + want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"Other": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}}, { name: "Single PR info with type and component labels", - prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}, - want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}}, + prInfos: []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}, + want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}}, { - name: "Multiple PR infos with type and component labels", prInfos: []prInfo{ + name: "Multiple PR infos with type and component labels", prInfos: []pullRequestInformation{ {Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}, {Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}, - want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []prInfo{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}}, + want: map[string]map[string][]pullRequestInformation{"Bug fixes": {"VTGate": []pullRequestInformation{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []pullRequestInformation{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -58,54 +57,6 @@ func Test_groupPRs(t *testing.T) { } } -func TestParseGitLogOutput(t *testing.T) { - in := `harshTEST@planetscale.com Merge pull request #7968 from planetscale/bump_java_snapshot_v11 7e8ebbb5b79b65d2d45fd6c838efb51bdafc7c0b 195a09df191d3e86a32ebcc7a1f1dde168fe819e 168fe819e -deeptTEST@planetscale.com Merge pull request #7970 from planetscale/vttestserver-default-charset 887be6914690b6d106aba001c72deea80a4d8dab ff8c750eda4b30787e772547a451ed1f50931150 f50931150 -deeptTEST@planetscale.com Merge pull request #7943 from planetscale/fix-mysql80-container-image 01fb7e55ab92df7c3f300b85976fdf3fd5bd35b3 3cc94a10752014c9ce311d88af9e1aa18e7fa2d8 18e7fa2d8 -57520317+rohit-nayak-TEST@users.noreply.github.com Merge pull request #7831 from planetscale/rn-vr-log2 37c09d3be83922a8ef936fbc028a5031f96b7dbf f57350c3ea1720496e5f1cec35d58f069e4df515 69e4df515 -TEST@planetscale.com docker/vttestserver/run.sh: Add $CHARSET environment variable 482a7008117ee3215663aeb33cad981e5242a88a e5242a88a -rohTEST@planetscale.com Add ability to select from vreplication_log in VReplicationExec 427cac89cd6b143d3a1928ee682b3a9538709da5 538709da5 -rohTEST@planetscale.com Use withDDL for vreplication log queries 4a1ab946e3628ba8ef610ea4a158186a5fdd17ba a5fdd17ba -rohTEST@planetscale.com Add license file. Minor refactor fa9de690ce0d27a781befbc1866aca5cd447798f cd447798f -rohTEST@planetscale.com Added comments and refactored tests b6d39acb08939ba56e9e9587f34f3b8bcdcdc504 bcdcdc504 -rohTEST@planetscale.com Add logs for start and end of the copy phase 1cf72866ddfbd554700d6c9e32b9835ebb3b444c ebb3b444c -rohTEST@planetscale.com Fix test 0992d39c6d473b548679d012cfa5a889ffa448ef 9ffa448ef -rohTEST@planetscale.com Add test for vreplication log and fix string conversion bug b616143b14b75e7c23042c2eef4f6b27a275b0f7 7a275b0f7 -rohTEST@planetscale.com Ignore queries related to _vt.vreplication_log in tests e6926932c14da9a2213be246bc2de5f011668551 011668551 -rohTEST@planetscale.com Create log table. Util functions to insert logs. Insert logs in VReplicationExec and setMessage/State 37c09d3be83922a8ef936fbc028a5031f96b7dbf 1f96b7dbf -harshTEST@planetscale.com Merge pull request #7951 from vmg/vmg/vr-client-perf 7794c62651066970e1176181cb7000d385d0b327 172fac7dec8b11937a4efb26ebf4bedf1771f189 f1771f189 -alkin.tezuysTEST@gmail.com java: Bump SNAPSHOT version to 11.0.0-SNAPSHOT after Vitess release v10 7794c62651066970e1176181cb7000d385d0b327 385d0b327 -alkin.tezuysTEST@gmail.com Merge pull request #7964 from planetscale/10_0_RC1_release_notes 31d84d6ce8e233a053794ad0ffe5168d34d04450 b020dc71f5c7dc663d814563f1b6c97340f4411f 340f4411f -vTEST@strn.cat vstreamer: fix docs e7bf329da0029414c3b18e18e5cb2226b9a731a2 6b9a731a2 -amasTEST@slack-corp.com [workflow] extract migration targets from wrangler (#7934) 8bd5a7cb093369b50a0926bfa3a112b3b744e782 3b744e782 -alkin.tezuysTEST@gmail.com More spacing issues fixed 7509d47ba785e7a39b8726dc80f93955953ab98d 5953ab98d -alkin.tezuysTEST@gmail.com Minor spacing fixes d31362e76ac69fb2bc4083e22e7c87683099fecd 83099fecd -alkin.tezuysTEST@gmail.com Update 10_0_0_release_notes.md a7034bdf5d454a47738335ed2afc75f72bdbcf37 72bdbcf37 -alkin.tezuysTEST@gmail.com v10 GA Release Notes ad37320b2637620ee36d44d163399ecc2c1eea6c c2c1eea6c -andrTEST@planetscale.com Merge pull request #7912 from planetscale/show-databases-like 7e13d4bccca0325ca07a488334e77c4f2f964f6b 95eceb17d10c62d56f2e94e5478afb5a1b63e1c2 a1b63e1c2 -andrTEST@planetscale.com Merge pull request #7629 from planetscale/gen4-table-aliases 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 7ad14e3f3d26cb1780cdbf9c22029740e5aebde4 0e5aebde4 -andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into show-databases-like 6b3ee1c31a939fc6628515f00087baa3e1e8acf7 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 860d3c66e -2607934+shlomi-noaTEST@users.noreply.github.com Merge pull request #7959 from Hellcatlk/master 6c826115937d28ef83f05a1f0d54db0fcb814db4 cdab3040aaaa11c51e291d6b1a7af6fadd83dedf add83dedf -zouy.fnTEST@cn.fujitsu.com Fix a gofmt warning 08038850a258d6de250cf9d864d6118616f5562c 616f5562c -vTEST@strn.cat mysql: allow reusing row storage when reading from a stream a2850bbf41100618cb1192067b16585ba7c6b0c7 ba7c6b0c7 -vTEST@strn.cat throttle: do not check for time constantly e0b90daebe9e6b98d969934a24899b41d25e3a68 1d25e3a68 -andrTEST@planetscale.com fix compilation error 18036f5fb5f58523dbf50726beb741cedac2baf8 edac2baf8 -andrTEST@planetscale.com better code comment c173c945cf0e75e8649e6fa621509b5fb4ebd6c9 fb4ebd6c9 -vTEST@strn.cat conn: do not let header escape to the heap d31fb23d8cb9463810ed9fc132df4060a6812f6e 0a6812f6e -vTEST@strn.cat vstreamer: do not allocate when filtering rows dafc1cb729d7be7dff2c05bd05a926005eb9a044 05eb9a044 -vTEST@strn.cat vstreamer: do not allocate when converting rows c5cd3067aeb9d952a2f45084c37634267e4f9062 67e4f9062 -andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into gen4-table-aliases 8c01827ed8b748240f213d9476ee162306ab01eb b1f9000ddd166d49adda6581e7ca9e0aca10c252 aca10c252 -aquarapTEST@gmail.com Fix mysql80 docker build with dep. a28591577b8d432b9c5d78abf59ad494a0a943b0 4a0a943b0 -TEST@planetscale.com Revert "docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24" 7858ff46545cff749b3663c92ae90ef27a5dfbc2 27a5dfbc2 -TEST@planetscale.com docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24 c91d46782933292941a846fef2590ff1a6fa193f a6fa193f` - - prs, authorCommits, nonMergeCommits, err := parseGitLog(in) - require.NoError(t, err) - assert.Equal(t, prs, []string{"7629", "7831", "7912", "7934", "7943", "7951", "7959", "7964", "7968", "7970"}) - assert.Equal(t, authorCommits, []string{"385d0b327", "3b744e782", "4a0a943b0", "538709da5", "616f5562c", "6b9a731a2", "e5242a88a", "edac2baf8"}) - assert.Equal(t, 28, nonMergeCommits) -} - func TestLoadSummaryReadme(t *testing.T) { readmeFile, err := os.CreateTemp("", "*.md") require.NoError(t, err) @@ -160,11 +111,12 @@ func TestGenerateReleaseNotes(t *testing.T) { VersionUnderscore: "12_0_0", ChangeLog: "* PR 1\n* PR 2\n", ChangeMetrics: "optimization is the root of all evil", + SubDirPath: "changelog/12.0/12.0.0", }, expectedOut: "# Release of Vitess v12.0.0\n" + "This is the new release.\n\nNew features got added.\n" + "------------\n" + - "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" + + "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" + "optimization is the root of all evil\n", expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" + "* PR 1\n" + @@ -176,9 +128,10 @@ func TestGenerateReleaseNotes(t *testing.T) { VersionUnderscore: "12_0_0", ChangeLog: "* PR 1\n* PR 2\n", ChangeMetrics: "optimization is the root of all evil", + SubDirPath: "changelog/12.0/12.0.0", }, expectedOut: "# Release of Vitess v12.0.0\n" + - "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/doc/releasenotes/12_0_0_changelog.md).\n" + + "The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/12.0/12.0.0/changelog.md).\n" + "optimization is the root of all evil\n", expectedOutChangeLog: "# Changelog of Vitess v12.0.0\n" + "* PR 1\n" + diff --git a/go/tools/releases/releases.go b/go/tools/releases/releases.go new file mode 100644 index 00000000000..10c29233494 --- /dev/null +++ b/go/tools/releases/releases.go @@ -0,0 +1,143 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// The changelog directory is composed of a README that lists +// and links to all major releases of Vitess. It has one +// sub-directory for each major version. Each sub-directory is +// composed of another README that also lists and links all the +// patch releases of this major release. Those sub-directories +// are composed of one directory per patch release. Finally, +// the patch release directory contains the old files markdown: +// summary, release_notes, changelog. +// +// This tool is solely responsible for generating the READMEs +// and making sure they are up-to-date with the list of major +// and patch releases we have. + +import ( + "log" + "os" + "path" + "sort" + "strings" + "text/template" +) + +const ( + rootDir = "./changelog/" + + rootFileTmpl = `## Releases + +{{- range $r := .SubDirs }} +* [{{ $r.Name }}]({{ $r.Name }}) +{{- end -}} +` + + majorVersionTmpl = `## v{{ .Name }} + +{{- if .Team }} +The dedicated team for this release can be found [here]({{.Team}}).{{ end }} + +{{- range $r := .SubDirs }} +* **[{{ $r.Name }}]({{ $r.Name }})** +{{ if $r.Changelog }} * [Changelog]({{ $r.Name }}/{{ $r.Changelog }}) +{{ end -}} +{{ if $r.ReleaseNotes }} * [Release Notes]({{ $r.Name }}/{{ $r.ReleaseNotes }}) +{{ end -}} +{{- end -}} +` +) + +type dir struct { + Name string + Path string + Changelog string + ReleaseNotes string + Team string + SubDirs []dir +} + +func main() { + rootDir, err := getDirs(dir{Path: rootDir}) + if err != nil { + log.Fatal(err) + } + + err = execReadMeTemplateWithDir(rootDir, rootFileTmpl) + if err != nil { + log.Fatal(err) + } + + for _, subDir := range rootDir.SubDirs { + err := execReadMeTemplateWithDir(subDir, majorVersionTmpl) + if err != nil { + log.Fatal(err) + } + } +} + +func execReadMeTemplateWithDir(d dir, tmpl string) error { + rootRM, err := os.OpenFile(path.Join(d.Path, "README.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) + if err != nil { + return err + } + + t := template.Must(template.New("root_readme").Parse(tmpl)) + err = t.ExecuteTemplate(rootRM, "root_readme", d) + if err != nil { + return err + } + return nil +} + +func getDirs(curDir dir) (dir, error) { + entries, err := os.ReadDir(curDir.Path) + if err != nil { + return dir{}, err + } + + for _, entry := range entries { + if entry.IsDir() { + subDir, err := getDirs(dir{ + Name: entry.Name(), + Path: path.Join(curDir.Path, entry.Name()), + }) + if err != nil { + return dir{}, err + } + curDir.SubDirs = append(curDir.SubDirs, subDir) + continue + } + + switch { + case strings.Contains(entry.Name(), "changelog.md"): + curDir.Changelog = entry.Name() + case strings.Contains(entry.Name(), "release_notes.md"): + curDir.ReleaseNotes = entry.Name() + case strings.Contains(entry.Name(), "team.md"): + curDir.Team = entry.Name() + } + } + sort.Slice(curDir.SubDirs, func(i, j int) bool { + if len(curDir.SubDirs[i].Name) < len(curDir.SubDirs[j].Name) { + return false + } + return curDir.SubDirs[i].Name > curDir.SubDirs[j].Name + }) + return curDir, nil +} diff --git a/go/trace/plugin_datadog.go b/go/trace/plugin_datadog.go index 25f0b6894cd..b101607592d 100644 --- a/go/trace/plugin_datadog.go +++ b/go/trace/plugin_datadog.go @@ -3,40 +3,58 @@ package trace import ( "fmt" "io" + "net" "github.com/opentracing/opentracing-go" "github.com/spf13/pflag" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" + + "vitess.io/vitess/go/viperutil" ) var ( - dataDogHost string - dataDogPort string + dataDogConfigKey = viperutil.KeyPrefixFunc(configKey("datadog")) + + dataDogHost = viperutil.Configure( + dataDogConfigKey("agent.host"), + viperutil.Options[string]{ + FlagName: "datadog-agent-host", + }, + ) + dataDogPort = viperutil.Configure( + dataDogConfigKey("agent.port"), + viperutil.Options[string]{ + FlagName: "datadog-agent-port", + }, + ) ) func init() { // If compiled with plugin_datadaog, ensure that trace.RegisterFlags // includes datadaog tracing flags. pluginFlags = append(pluginFlags, func(fs *pflag.FlagSet) { - fs.StringVar(&dataDogHost, "datadog-agent-host", "", "host to send spans to. if empty, no tracing will be done") - fs.StringVar(&dataDogPort, "datadog-agent-port", "", "port to send spans to. if empty, no tracing will be done") + fs.String("datadog-agent-host", "", "host to send spans to. if empty, no tracing will be done") + fs.String("datadog-agent-port", "", "port to send spans to. if empty, no tracing will be done") + + viperutil.BindFlags(fs, dataDogHost, dataDogPort) }) } func newDatadogTracer(serviceName string) (tracingService, io.Closer, error) { - if dataDogHost == "" || dataDogPort == "" { + host, port := dataDogHost.Get(), dataDogPort.Get() + if host == "" || port == "" { return nil, nil, fmt.Errorf("need host and port to datadog agent to use datadog tracing") } opts := []ddtracer.StartOption{ - ddtracer.WithAgentAddr(dataDogHost + ":" + dataDogPort), + ddtracer.WithAgentAddr(net.JoinHostPort(host, port)), ddtracer.WithServiceName(serviceName), ddtracer.WithDebugMode(true), ddtracer.WithSampler(ddtracer.NewRateSampler(samplingRate.Get())), } - if enableLogging { + if enableLogging.Get() { opts = append(opts, ddtracer.WithLogger(&traceLogger{})) } diff --git a/go/trace/plugin_jaeger.go b/go/trace/plugin_jaeger.go index b1449bc576f..9da9546c2ff 100644 --- a/go/trace/plugin_jaeger.go +++ b/go/trace/plugin_jaeger.go @@ -18,14 +18,12 @@ package trace import ( "io" - "os" "github.com/opentracing/opentracing-go" "github.com/spf13/pflag" - "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/config" - "vitess.io/vitess/go/flagutil" + "vitess.io/vitess/go/viperutil" "vitess.io/vitess/go/vt/log" ) @@ -36,18 +34,40 @@ included but nothing Jaeger specific. */ var ( - agentHost string - samplingType = flagutil.NewOptionalString("const") - samplingRate = flagutil.NewOptionalFloat64(0.1) + jaegerConfigKey = viperutil.KeyPrefixFunc(configKey("jaeger")) + agentHost = viperutil.Configure( + jaegerConfigKey("agent-host"), + viperutil.Options[string]{ + FlagName: "jaeger-agent-host", + }, + ) + samplingType = viperutil.Configure( + jaegerConfigKey("sampling_type"), + viperutil.Options[string]{ + Default: "const", + EnvVars: []string{"JAEGER_SAMPLER_TYPE"}, + FlagName: "tracing-sampling-type", + }, + ) + samplingRate = viperutil.Configure( + jaegerConfigKey("sampling_rate"), + viperutil.Options[float64]{ + Default: 0.1, + EnvVars: []string{"JAEGER_SAMPLER_PARAM"}, + FlagName: "tracing-sampling-rate", + }, + ) ) func init() { // If compiled with plugin_jaeger, ensure that trace.RegisterFlags includes // jaeger tracing flags. pluginFlags = append(pluginFlags, func(fs *pflag.FlagSet) { - fs.StringVar(&agentHost, "jaeger-agent-host", "", "host and port to send spans to. if empty, no tracing will be done") - fs.Var(samplingType, "tracing-sampling-type", "sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote'") - fs.Var(samplingRate, "tracing-sampling-rate", "sampling rate for the probabilistic jaeger sampler") + fs.String("jaeger-agent-host", agentHost.Default(), "host and port to send spans to. if empty, no tracing will be done") + fs.String("tracing-sampling-type", samplingType.Default(), "sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote'") + fs.Float64("tracing-sampling-rate", samplingRate.Default(), "sampling rate for the probabilistic jaeger sampler") + + viperutil.BindFlags(fs, agentHost, samplingRate, samplingType) }) } @@ -79,32 +99,17 @@ func newJagerTracerFromEnv(serviceName string) (tracingService, io.Closer, error } // Allow command line args to override environment variables. - if agentHost != "" { - cfg.Reporter.LocalAgentHostPort = agentHost + if host := agentHost.Get(); host != "" { + cfg.Reporter.LocalAgentHostPort = host } log.Infof("Tracing to: %v as %v", cfg.Reporter.LocalAgentHostPort, cfg.ServiceName) - if os.Getenv("JAEGER_SAMPLER_PARAM") == "" { - // If the environment variable was not set, we take the flag regardless - // of whether it was explicitly set on the command line. - cfg.Sampler.Param = samplingRate.Get() - } else if samplingRate.IsSet() { - // If the environment variable was set, but the user also explicitly - // passed the command line flag, the flag takes precedence. - cfg.Sampler.Param = samplingRate.Get() - } - - if samplingType.IsSet() { - cfg.Sampler.Type = samplingType.Get() - } else if cfg.Sampler.Type == "" { - log.Infof("--tracing-sampler-type was not set, and JAEGER_SAMPLER_TYPE was not set, defaulting to const sampler") - cfg.Sampler.Type = jaeger.SamplerTypeConst - } - + cfg.Sampler.Param = samplingRate.Get() + cfg.Sampler.Type = samplingType.Get() log.Infof("Tracing sampler type %v (param: %v)", cfg.Sampler.Type, cfg.Sampler.Param) var opts []config.Option - if enableLogging { + if enableLogging.Get() { opts = append(opts, config.Logger(&traceLogger{})) } else if cfg.Reporter.LogSpans { log.Warningf("JAEGER_REPORTER_LOG_SPANS was set, but --tracing-enable-logging was not; spans will not be logged") diff --git a/go/trace/trace.go b/go/trace/trace.go index dd0e1b56b62..7c43b4afedc 100644 --- a/go/trace/trace.go +++ b/go/trace/trace.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/pflag" "google.golang.org/grpc" + "vitess.io/vitess/go/viperutil" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vterrors" ) @@ -125,6 +126,8 @@ type tracingService interface { // object to make sure that all spans are sent to the backend before the process exits. type TracerFactory func(serviceName string) (tracingService, io.Closer, error) +const configKeyPrefix = "trace" + var ( // tracingBackendFactories should be added to by a plugin during init() to install itself tracingBackendFactories = make(map[string]TracerFactory) @@ -133,15 +136,30 @@ var ( /* flags */ - tracingServer = "noop" - enableLogging bool + configKey = viperutil.KeyPrefixFunc(configKeyPrefix) + + tracingServer = viperutil.Configure( + configKey("service"), + viperutil.Options[string]{ + Default: "noop", + FlagName: "tracer", + }, + ) + enableLogging = viperutil.Configure( + configKey("enable-logging"), + viperutil.Options[bool]{ + FlagName: "tracing-enable-logging", + }, + ) pluginFlags []func(fs *pflag.FlagSet) ) func RegisterFlags(fs *pflag.FlagSet) { - fs.StringVar(&tracingServer, "tracer", "noop", "tracing service to use") - fs.BoolVar(&enableLogging, "tracing-enable-logging", false, "whether to enable logging in the tracing service") + fs.String("tracer", tracingServer.Default(), "tracing service to use") + fs.Bool("tracing-enable-logging", false, "whether to enable logging in the tracing service") + + viperutil.BindFlags(fs, tracingServer, enableLogging) for _, fn := range pluginFlags { fn(fs) @@ -150,20 +168,21 @@ func RegisterFlags(fs *pflag.FlagSet) { // StartTracing enables tracing for a named service func StartTracing(serviceName string) io.Closer { - factory, ok := tracingBackendFactories[tracingServer] + tracingBackend := tracingServer.Get() + factory, ok := tracingBackendFactories[tracingBackend] if !ok { return fail(serviceName) } tracer, closer, err := factory(serviceName) if err != nil { - log.Error(vterrors.Wrapf(err, "failed to create a %s tracer", tracingServer)) + log.Error(vterrors.Wrapf(err, "failed to create a %s tracer", tracingBackend)) return &nilCloser{} } currentTracer = tracer - if tracingServer != "noop" { - log.Infof("successfully started tracing with [%s]", tracingServer) + if tracingBackend != "noop" { + log.Infof("successfully started tracing with [%s]", tracingBackend) } return closer diff --git a/go/trace/trace_test.go b/go/trace/trace_test.go index 35500c93b4e..c98a47167a8 100644 --- a/go/trace/trace_test.go +++ b/go/trace/trace_test.go @@ -20,10 +20,12 @@ import ( "context" "fmt" "io" - "strings" "testing" + "github.com/spf13/viper" "google.golang.org/grpc" + + "vitess.io/vitess/go/viperutil/vipertest" ) func TestFakeSpan(t *testing.T) { @@ -49,13 +51,16 @@ func TestRegisterService(t *testing.T) { return tracer, tracer, nil } - tracingServer = fakeName + v := viper.New() + t.Cleanup(vipertest.Stub(t, v, tracingServer)) + + v.Set(tracingServer.Key(), fakeName) serviceName := "vtservice" closer := StartTracing(serviceName) tracer, ok := closer.(*fakeTracer) if !ok { - t.Fatalf("did not get the expected tracer") + t.Fatalf("did not get the expected tracer, got %+v (%T)", tracer, tracer) } if tracer.name != serviceName { @@ -98,15 +103,6 @@ func (f *fakeTracer) Close() error { panic("implement me") } -func (f *fakeTracer) assertNoSpanWith(t *testing.T, substr string) { - t.Helper() - for _, logLine := range f.log { - if strings.Contains(logLine, substr) { - t.Fatalf("expected to not find [%v] but found it in [%v]", substr, logLine) - } - } -} - type mockSpan struct { tracer *fakeTracer } diff --git a/go/viperutil/config.go b/go/viperutil/config.go new file mode 100644 index 00000000000..49e3f960875 --- /dev/null +++ b/go/viperutil/config.go @@ -0,0 +1,299 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "context" + "fmt" + "os" + "reflect" + "sort" + "strings" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil/funcs" + "vitess.io/vitess/go/viperutil/internal/log" + "vitess.io/vitess/go/viperutil/internal/registry" + "vitess.io/vitess/go/viperutil/internal/value" +) + +var ( + configPaths = Configure( + "config.paths", + Options[[]string]{ + GetFunc: funcs.GetPath, + EnvVars: []string{"VT_CONFIG_PATH"}, + FlagName: "config-path", + }, + ) + configType = Configure( + "config.type", + Options[string]{ + EnvVars: []string{"VT_CONFIG_TYPE"}, + FlagName: "config-type", + }, + ) + configName = Configure( + "config.name", + Options[string]{ + Default: "vtconfig", + EnvVars: []string{"VT_CONFIG_NAME"}, + FlagName: "config-name", + }, + ) + configFile = Configure( + "config.file", + Options[string]{ + EnvVars: []string{"VT_CONFIG_FILE"}, + FlagName: "config-file", + }, + ) + configFileNotFoundHandling = Configure( + "config.notfound.handling", + Options[ConfigFileNotFoundHandling]{ + Default: WarnOnConfigFileNotFound, + GetFunc: getHandlingValue, + FlagName: "config-file-not-found-handling", + }, + ) + configPersistenceMinInterval = Configure( + "config.persistence.min_interval", + Options[time.Duration]{ + Default: time.Second, + EnvVars: []string{"VT_CONFIG_PERSISTENCE_MIN_INTERVAL"}, + FlagName: "config-persistence-min-interval", + }, + ) +) + +func init() { + wd, err := os.Getwd() + if err != nil { + log.WARN("failed to get working directory (err=%v), not appending to default config-paths", err) + return + } + + configPaths.(*value.Static[[]string]).DefaultVal = []string{wd} + // Need to re-trigger the SetDefault call done during Configure. + registry.Static.SetDefault(configPaths.Key(), configPaths.Default()) +} + +// RegisterFlags installs the flags that control viper config-loading behavior. +// It is exported to be called by servenv before parsing flags for all binaries. +// +// It cannot be registered here via servenv.OnParse since this causes an import +// cycle. +func RegisterFlags(fs *pflag.FlagSet) { + fs.StringSlice("config-path", configPaths.Default(), "Paths to search for config files in.") + fs.String("config-type", configType.Default(), "Config file type (omit to infer config type from file extension).") + fs.String("config-name", configName.Default(), "Name of the config file (without extension) to search for.") + fs.String("config-file", configFile.Default(), "Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.") + fs.Duration("config-persistence-min-interval", configPersistenceMinInterval.Default(), "minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done).") + + var h = configFileNotFoundHandling.Default() + fs.Var(&h, "config-file-not-found-handling", fmt.Sprintf("Behavior when a config file is not found. (Options: %s)", strings.Join(handlingNames, ", "))) + + BindFlags(fs, configPaths, configType, configName, configFile, configFileNotFoundHandling, configPersistenceMinInterval) +} + +// LoadConfig attempts to find, and then load, a config file for viper-backed +// config values to use. +// +// Config searching follows the behavior used by viper [1], namely: +// - --config-file (full path, including extension) if set will be used to the +// exclusion of all other flags. +// - --config-type is required if the config file does not have one of viper's +// supported extensions (.yaml, .yml, .json, and so on) +// +// An additional --config-file-not-found-handling flag controls how to treat the +// situation where viper cannot find any config files in any of the provided +// paths (for ex, users may want to exit immediately if a config file that +// should exist doesn't for some reason, or may wish to operate with flags and +// environment variables alone, and not use config files at all). +// +// If a config file is successfully loaded, then the dynamic registry will also +// start watching that file for changes. In addition, in-memory changes to the +// config (for example, from a vtgate or vttablet's debugenv) will be persisted +// back to disk, with writes occuring no more frequently than the +// --config-persistence-min-interval flag. +// +// A cancel function is returned to stop the re-persistence background thread, +// if one was started. +// +// [1]: https://github.com/spf13/viper#reading-config-files. +func LoadConfig() (context.CancelFunc, error) { + var err error + switch file := configFile.Get(); file { + case "": + if name := configName.Get(); name != "" { + registry.Static.SetConfigName(name) + + for _, path := range configPaths.Get() { + registry.Static.AddConfigPath(path) + } + + if cfgType := configType.Get(); cfgType != "" { + registry.Static.SetConfigType(cfgType) + } + + err = registry.Static.ReadInConfig() + } + default: + registry.Static.SetConfigFile(file) + err = registry.Static.ReadInConfig() + } + + if err != nil { + if nferr, ok := err.(viper.ConfigFileNotFoundError); ok { + msg := "Failed to read in config %s: %s" + switch configFileNotFoundHandling.Get() { + case WarnOnConfigFileNotFound: + msg += ". This is optional, and can be ignored if you are not using config files. For a detailed explanation, see https://github.com/vitessio/vitess/blob/main/doc/viper/viper.md#config-files." + log.WARN(msg, registry.Static.ConfigFileUsed(), nferr.Error()) + fallthrough // after warning, ignore the error + case IgnoreConfigFileNotFound: + err = nil + case ErrorOnConfigFileNotFound: + log.ERROR(msg, registry.Static.ConfigFileUsed(), nferr.Error()) + case ExitOnConfigFileNotFound: + log.CRITICAL(msg, registry.Static.ConfigFileUsed(), nferr.Error()) + } + } + } + + if err != nil { + return nil, err + } + + return registry.Dynamic.Watch(context.Background(), registry.Static, configPersistenceMinInterval.Get()) +} + +// NotifyConfigReload adds a subscription that the dynamic registry will attempt +// to notify on config changes. The notification fires after the updated config +// has been loaded from disk into the live config. +// +// Analogous to signal.Notify, notifications are sent non-blocking, so users +// should account for this when writing code to consume from the channel. +// +// This function must be called prior to LoadConfig; it will panic if called +// after the dynamic registry has started watching the loaded config. +func NotifyConfigReload(ch chan<- struct{}) { + registry.Dynamic.Notify(ch) +} + +// ConfigFileNotFoundHandling is an enum to control how LoadConfig treats errors +// of type viper.ConfigFileNotFoundError when loading a config. +type ConfigFileNotFoundHandling int + +const ( + // IgnoreConfigFileNotFound causes LoadConfig to completely ignore a + // ConfigFileNotFoundError (i.e. not even logging it). + IgnoreConfigFileNotFound ConfigFileNotFoundHandling = iota + // WarnOnConfigFileNotFound causes LoadConfig to log a warning with details + // about the failed config load, but otherwise proceeds with the given + // process, which will get config values entirely from defaults, + // envirnoment variables, and flags. + WarnOnConfigFileNotFound + // ErrorOnConfigFileNotFound causes LoadConfig to return the + // ConfigFileNotFoundError after logging an error. + ErrorOnConfigFileNotFound + // ExitOnConfigFileNotFound causes LoadConfig to log.Fatal on a + // ConfigFileNotFoundError. + ExitOnConfigFileNotFound +) + +var ( + handlingNames []string + handlingNamesToValues = map[string]int{ + "ignore": int(IgnoreConfigFileNotFound), + "warn": int(WarnOnConfigFileNotFound), + "error": int(ErrorOnConfigFileNotFound), + "exit": int(ExitOnConfigFileNotFound), + } + handlingValuesToNames map[int]string +) + +func getHandlingValue(v *viper.Viper) func(key string) ConfigFileNotFoundHandling { + return func(key string) (h ConfigFileNotFoundHandling) { + if err := v.UnmarshalKey(key, &h, viper.DecodeHook(mapstructure.ComposeDecodeHookFunc(decodeHandlingValue))); err != nil { + h = IgnoreConfigFileNotFound + log.WARN("failed to unmarshal %s: %s; defaulting to %s", err.Error(), key, err.Error(), h.String()) + } + + return h + } +} + +func decodeHandlingValue(from, to reflect.Type, data any) (any, error) { + var h ConfigFileNotFoundHandling + if to != reflect.TypeOf(h) { + return data, nil + } + + switch { + case from == reflect.TypeOf(h): + return data.(ConfigFileNotFoundHandling), nil + case from.Kind() == reflect.Int: + return ConfigFileNotFoundHandling(data.(int)), nil + case from.Kind() == reflect.String: + if err := h.Set(data.(string)); err != nil { + return h, err + } + + return h, nil + } + + return data, fmt.Errorf("invalid value for ConfigHandlingType: %v", data) +} + +func init() { + handlingNames = make([]string, 0, len(handlingNamesToValues)) + handlingValuesToNames = make(map[int]string, len(handlingNamesToValues)) + + for name, val := range handlingNamesToValues { + handlingValuesToNames[val] = name + handlingNames = append(handlingNames, name) + } + + sort.Slice(handlingNames, func(i, j int) bool { + return handlingNames[i] < handlingNames[j] + }) +} + +func (h *ConfigFileNotFoundHandling) Set(arg string) error { + larg := strings.ToLower(arg) + if v, ok := handlingNamesToValues[larg]; ok { + *h = ConfigFileNotFoundHandling(v) + return nil + } + + return fmt.Errorf("unknown handling name %s", arg) +} + +func (h *ConfigFileNotFoundHandling) String() string { + if name, ok := handlingValuesToNames[int(*h)]; ok { + return name + } + + return "" +} + +func (h *ConfigFileNotFoundHandling) Type() string { return "ConfigFileNotFoundHandling" } diff --git a/go/viperutil/config_test.go b/go/viperutil/config_test.go new file mode 100644 index 00000000000..8e00a4700ac --- /dev/null +++ b/go/viperutil/config_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "strings" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetConfigHandlingValue(t *testing.T) { + v := viper.New() + v.SetDefault("default", ExitOnConfigFileNotFound) + v.SetConfigType("yaml") + + cfg := ` +foo: 2 +bar: "2" # not valid, defaults to "ignore" (0) +baz: error +duration: 10h +` + err := v.ReadConfig(strings.NewReader(strings.NewReplacer("\t", " ").Replace(cfg))) + require.NoError(t, err) + + getHandlingValueFunc := getHandlingValue(v) + assert.Equal(t, ErrorOnConfigFileNotFound, getHandlingValueFunc("foo"), "failed to get int value") + assert.Equal(t, IgnoreConfigFileNotFound, getHandlingValueFunc("bar"), "failed to get int-like string value") + assert.Equal(t, ErrorOnConfigFileNotFound, getHandlingValueFunc("baz"), "failed to get string value") + assert.Equal(t, IgnoreConfigFileNotFound, getHandlingValueFunc("notset"), "failed to get value on unset key") + assert.Equal(t, IgnoreConfigFileNotFound, getHandlingValueFunc("duration"), "failed to get value on duration key") + assert.Equal(t, ExitOnConfigFileNotFound, getHandlingValueFunc("default"), "failed to get value on default key") +} diff --git a/tools/coverage-go/vtctl_test.go b/go/viperutil/debug/debug.go similarity index 64% rename from tools/coverage-go/vtctl_test.go rename to go/viperutil/debug/debug.go index db215e48a6f..66cbc7f2962 100644 --- a/tools/coverage-go/vtctl_test.go +++ b/go/viperutil/debug/debug.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,12 +13,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package main + +package debug import ( - "testing" + "vitess.io/vitess/go/viperutil/internal/registry" ) -func TestVtctl(t *testing.T) { - main() +// Debug provides the Debug functionality normally accessible to a given viper +// instance, but for a combination of the private static and dynamic registries. +func Debug() { + registry.Combined().Debug() } diff --git a/go/viperutil/debug/handler.go b/go/viperutil/debug/handler.go new file mode 100644 index 00000000000..b5730a2e41e --- /dev/null +++ b/go/viperutil/debug/handler.go @@ -0,0 +1,82 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debug + +import ( + "fmt" + "io" + "net/http" + "os" + "strings" + + "github.com/spf13/viper" + + "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/viperutil/internal/registry" +) + +// HandlerFunc provides an http.HandlerFunc that renders the combined config +// registry (both static and dynamic) for debugging purposes. +// +// By default, this writes the config in viper's "debug" format (what you get +// if you call viper.Debug()). If the query parameter "format" is present, and +// matches one of viper's supported config extensions (case-insensitively), the +// combined config will be written to the response in that format. +// +// Example requests: +// - GET /debug/config +// - GET /debug/config?format=json +// - GET /debug/config?format=yaml +func HandlerFunc(w http.ResponseWriter, r *http.Request) { + if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { + acl.SendError(w, err) + return + } + + v := registry.Combined() + format := strings.ToLower(r.URL.Query().Get("format")) + switch { + case format == "": + v.DebugTo(w) + case slice.Any(viper.SupportedExts, func(ext string) bool { return ext == format }): + // Got a supported format; write the config to a tempfile in that format, + // then copy it to the response. + // + // (Sadly, viper does not yet have a WriteConfigTo(w io.Writer), so we have + // to do this little hacky workaround). + v.SetConfigType(format) + tmp, err := os.CreateTemp("", "viper_debug") + if err != nil { + http.Error(w, fmt.Sprintf("failed to render config to tempfile: %v", err), http.StatusInternalServerError) + return + } + defer os.Remove(tmp.Name()) + + if err := v.WriteConfigAs(tmp.Name()); err != nil { + http.Error(w, fmt.Sprintf("failed to render config to tempfile: %v", err), http.StatusInternalServerError) + return + } + + if _, err := io.Copy(w, tmp); err != nil { + http.Error(w, fmt.Sprintf("failed to write rendered config: %v", err), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported config format", http.StatusBadRequest) + } +} diff --git a/go/viperutil/errors.go b/go/viperutil/errors.go new file mode 100644 index 00000000000..5d18774f998 --- /dev/null +++ b/go/viperutil/errors.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "vitess.io/vitess/go/viperutil/internal/sync" + "vitess.io/vitess/go/viperutil/internal/value" +) + +var ( + // ErrDuplicateWatch is returned when Watch is called multiple times on a + // single synced viper. Viper only supports reading/watching a single + // config file. + ErrDuplicateWatch = sync.ErrDuplicateWatch + // ErrNoFlagDefined is returned from Value's Flag method when the value was + // configured to bind to a given FlagName but the provided flag set does not + // define a flag with that name. + ErrNoFlagDefined = value.ErrNoFlagDefined +) diff --git a/go/viperutil/funcs/get.go b/go/viperutil/funcs/get.go new file mode 100644 index 00000000000..e33ffe9065f --- /dev/null +++ b/go/viperutil/funcs/get.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcs + +import ( + "strings" + + "github.com/spf13/viper" +) + +// GetPath returns a GetFunc that expands a slice of strings into individual +// paths based on standard POSIX shell $PATH separator parsing. +func GetPath(v *viper.Viper) func(key string) []string { + return func(key string) (paths []string) { + for _, val := range v.GetStringSlice(key) { + if val != "" { + paths = append(paths, strings.Split(val, ":")...) + } + } + + return paths + } +} diff --git a/go/viperutil/funcs/get_test.go b/go/viperutil/funcs/get_test.go new file mode 100644 index 00000000000..2af83e99aba --- /dev/null +++ b/go/viperutil/funcs/get_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcs_test + +import ( + "fmt" + + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil" + "vitess.io/vitess/go/viperutil/funcs" + "vitess.io/vitess/go/viperutil/internal/value" +) + +func ExampleGetPath() { + v := viper.New() + + val := viperutil.Configure("path", viperutil.Options[[]string]{ + GetFunc: funcs.GetPath, + }) + + stub(val, v) + + v.Set(val.Key(), []string{"/var/www", "/usr:/usr/bin", "/vt"}) + fmt.Println(val.Get()) + // Output: [/var/www /usr /usr/bin /vt] +} + +func stub[T any](val viperutil.Value[T], v *viper.Viper) { + // N.B.: You cannot do this in normal code because these types are internal + // to viperutil, but you also will not need to do this. However it's + // necessary for the example to work here. + base := val.(*value.Static[T]).Base + base.BoundGetFunc = base.GetFunc(v) +} diff --git a/go/viperutil/get_func.go b/go/viperutil/get_func.go new file mode 100644 index 00000000000..82a34df3fac --- /dev/null +++ b/go/viperutil/get_func.go @@ -0,0 +1,223 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "github.com/spf13/viper" +) + +// GetFuncForType returns the default getter function for a given type T. A +// getter function is a function which takes a viper and returns a function that +// takes a key and (finally!) returns a value of type T. +// +// For example, the default getter for a value of type string is a function that +// takes a viper instance v and calls v.GetString with the provided key. +// +// In most cases, callers of Configure should be able to rely on the defaults +// provided here (and may refer to get_func_test.go for an up-to-date example +// of the provided functionalities), but if more fine-grained control is needed +// (this should be an **exceptional** circumstance), they may provide their own +// GetFunc as an option to Configure. +// +// This function may panic if called for an unsupported type. This is captured +// in the test code as well. +func GetFuncForType[T any]() func(v *viper.Viper) func(key string) T { + var ( + t T + f any + ) + + typ := reflect.TypeOf(t) + switch typ.Kind() { + case reflect.Bool: + f = func(v *viper.Viper) func(key string) bool { + return v.GetBool + } + case reflect.Int: + f = func(v *viper.Viper) func(key string) int { + return v.GetInt + } + case reflect.Int8: + f = getCastedInt[int8]() + case reflect.Int16: + f = getCastedInt[int16]() + case reflect.Int32: + f = func(v *viper.Viper) func(key string) int32 { + return v.GetInt32 + } + case reflect.Int64: + switch typ { + case reflect.TypeOf(time.Duration(0)): + f = func(v *viper.Viper) func(key string) time.Duration { + return v.GetDuration + } + default: + f = func(v *viper.Viper) func(key string) int64 { + return v.GetInt64 + } + } + case reflect.Uint: + f = func(v *viper.Viper) func(key string) uint { + return v.GetUint + } + case reflect.Uint8: + f = getCastedUint[uint8]() + case reflect.Uint16: + f = getCastedUint[uint16]() + case reflect.Uint32: + f = func(v *viper.Viper) func(key string) uint32 { + return v.GetUint32 + } + case reflect.Uint64: + f = func(v *viper.Viper) func(key string) uint64 { + return v.GetUint64 + } + case reflect.Uintptr: + // Unupported, fallthrough to `if f == nil` check below switch. + case reflect.Float32: + f = func(v *viper.Viper) func(key string) float32 { + return func(key string) float32 { + return float32(v.GetFloat64(key)) + } + } + case reflect.Float64: + f = func(v *viper.Viper) func(key string) float64 { + return v.GetFloat64 + } + case reflect.Complex64: + f = getComplex[complex64](64) + case reflect.Complex128: + f = getComplex[complex128](128) + case reflect.Array: + // Even though the code would be extremely similar to slice types, we + // cannot support arrays because there's no way to write a function that + // returns, say, [N]int, for some value of N which we only know at + // runtime. + panic("GetFuncForType does not support array types") + case reflect.Chan: + panic("GetFuncForType does not support channel types") + case reflect.Func: + panic("GetFuncForType does not support function types") + case reflect.Interface: + panic("GetFuncForType does not support interface types (specify a specific implementation type instead)") + case reflect.Map: + switch typ.Key().Kind() { + case reflect.String: + switch val := typ.Elem(); val.Kind() { + case reflect.String: + f = func(v *viper.Viper) func(key string) map[string]string { + return v.GetStringMapString + } + case reflect.Slice: + switch val.Elem().Kind() { + case reflect.String: + f = func(v *viper.Viper) func(key string) map[string][]string { + return v.GetStringMapStringSlice + } + } + case reflect.Interface: + f = func(v *viper.Viper) func(key string) map[string]interface{} { + return v.GetStringMap + } + } + } + case reflect.Pointer: + switch typ.Elem().Kind() { + case reflect.Struct: + f = unmarshalFunc[T]() + } + case reflect.Slice: + switch typ.Elem().Kind() { + case reflect.Int: + f = func(v *viper.Viper) func(key string) []int { + return v.GetIntSlice + } + case reflect.String: + f = func(v *viper.Viper) func(key string) []string { + return v.GetStringSlice + } + } + case reflect.String: + f = func(v *viper.Viper) func(key string) string { + return v.GetString + } + case reflect.Struct: + switch typ { + case reflect.TypeOf(time.Time{}): + f = func(v *viper.Viper) func(key string) time.Time { + return v.GetTime + } + default: + f2 := unmarshalFunc[*T]() + f = func(v *viper.Viper) func(key string) T { + getPointer := f2(v) + return func(key string) T { + return *(getPointer(key)) + } + } + } + } + + if f == nil { + panic(fmt.Sprintf("no default GetFunc for type %T; call Configure with a custom GetFunc", t)) + } + return f.(func(v *viper.Viper) func(key string) T) +} + +func unmarshalFunc[T any]() func(v *viper.Viper) func(key string) T { + return func(v *viper.Viper) func(key string) T { + return func(key string) T { + t := new(T) + _ = v.UnmarshalKey(key, t) // TODO: panic on this error + return *t + } + } +} + +func getCastedInt[T int8 | int16]() func(v *viper.Viper) func(key string) T { + return func(v *viper.Viper) func(key string) T { + return func(key string) T { + return T(v.GetInt(key)) + } + } +} + +func getCastedUint[T uint8 | uint16]() func(v *viper.Viper) func(key string) T { + return func(v *viper.Viper) func(key string) T { + return func(key string) T { + return T(v.GetUint(key)) + } + } +} + +func getComplex[T complex64 | complex128](bitSize int) func(v *viper.Viper) func(key string) T { + return func(v *viper.Viper) func(key string) T { + return func(key string) T { + x, err := strconv.ParseComplex(v.GetString(key), bitSize) + if err != nil { + panic(err) // TODO: wrap with more details (key, type (64 vs 128), etc) + } + + return T(x) + } + } +} diff --git a/go/viperutil/get_func_test.go b/go/viperutil/get_func_test.go new file mode 100644 index 00000000000..346573704f1 --- /dev/null +++ b/go/viperutil/get_func_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" +) + +type myStruct struct { + Foo string + Bar int +} + +type myNestedStruct struct { + MyStruct *myStruct + Baz bool +} + +func TestGetFuncForType(t *testing.T) { + now := time.Now() + + v := viper.New() + v.Set("foo.bool", true) + v.Set("foo.int", 5) + v.Set("foo.duration", time.Second) + v.Set("foo.float", 5.1) + v.Set("foo.complex", fmt.Sprintf("%v", complex(1, 2))) + v.Set("foo.intslice", []int{1, 2, 3}) + v.Set("foo.stringslice", []string{"a", "b", "c"}) + v.Set("foo.string", "hello") + v.Set("foo.time", now) + + assert := assert.New(t) + + // Bool types + assert.Equal(true, get[bool](t, v, "foo.bool"), "GetFuncForType[bool](foo.bool)") + + // Int types + assert.Equal(5, get[int](t, v, "foo.int"), "GetFuncForType[int](foo.int)") + assert.Equal(int8(5), get[int8](t, v, "foo.int"), "GetFuncForType[int8](foo.int)") + assert.Equal(int16(5), get[int16](t, v, "foo.int"), "GetFuncForType[int16](foo.int)") + assert.Equal(int32(5), get[int32](t, v, "foo.int"), "GetFuncForType[int32](foo.int)") + assert.Equal(int64(5), get[int64](t, v, "foo.int"), "GetFuncForType[int64](foo.int)") + + // Duration types + assert.Equal(time.Second, get[time.Duration](t, v, "foo.duration"), "GetFuncForType[time.Duration](foo.duration)") + + // Uint types + assert.Equal(uint(5), get[uint](t, v, "foo.int"), "GetFuncForType[uint](foo.int)") + assert.Equal(uint8(5), get[uint8](t, v, "foo.int"), "GetFuncForType[uint8](foo.int)") + assert.Equal(uint16(5), get[uint16](t, v, "foo.int"), "GetFuncForType[uint16](foo.int)") + assert.Equal(uint32(5), get[uint32](t, v, "foo.int"), "GetFuncForType[uint32](foo.int)") + assert.Equal(uint64(5), get[uint64](t, v, "foo.int"), "GetFuncForType[uint64](foo.int)") + + // Float types + assert.Equal(5.1, get[float64](t, v, "foo.float"), "GetFuncForType[float64](foo.float)") + assert.Equal(float32(5.1), get[float32](t, v, "foo.float"), "GetFuncForType[float32](foo.float)") + assert.Equal(float64(5), get[float64](t, v, "foo.int"), "GetFuncForType[float64](foo.int)") + + // Complex types + assert.Equal(complex(5, 0), get[complex128](t, v, "foo.int"), "GetFuncForType[complex128](foo.int)") + assert.Equal(complex(5.1, 0), get[complex128](t, v, "foo.float"), "GetFuncForType[complex128](foo.float)") + assert.Equal(complex(1, 2), get[complex128](t, v, "foo.complex"), "GetFuncForType[complex128](foo.complex)") + assert.Equal(complex64(complex(5, 0)), get[complex64](t, v, "foo.int"), "GetFuncForType[complex64](foo.int)") + assert.Equal(complex64(complex(5.1, 0)), get[complex64](t, v, "foo.float"), "GetFuncForType[complex64](foo.float)") + assert.Equal(complex64(complex(1, 2)), get[complex64](t, v, "foo.complex"), "GetFuncForType[complex64](foo.complex)") + + // Slice types + assert.ElementsMatch([]int{1, 2, 3}, get[[]int](t, v, "foo.intslice"), "GetFuncForType[[]int](foo.intslice)") + assert.ElementsMatch([]string{"a", "b", "c"}, get[[]string](t, v, "foo.stringslice"), "GetFuncForType[[]string](foo.stringslice)") + + // String types + assert.Equal("hello", get[string](t, v, "foo.string"), "GetFuncForType[string](foo.string)") + + // Struct types + assert.Equal(now, get[time.Time](t, v, "foo.time"), "GetFuncForType[time.Time](foo.time)") + { + s := &myStruct{ + Foo: "hello", + Bar: 3, + } + v.Set("mystruct.foo", s.Foo) + v.Set("mystruct.bar", s.Bar) + + assert.Equal(s, get[*myStruct](t, v, "mystruct"), "GetFuncForType[*myStruct](mystruct)") + assert.IsType(&myStruct{}, get[*myStruct](t, v, "mystruct"), "GetFuncForType[*myStruct](mystruct) should return a pointer") + assert.Equal(*s, get[myStruct](t, v, "mystruct"), "GetFuncForType[myStruct](mystruct)") + assert.IsType(myStruct{}, get[myStruct](t, v, "mystruct"), "GetFuncForType[myStruct](mystruct) should return a struct (not pointer to struct)") + + s2 := &myNestedStruct{ + MyStruct: s, + Baz: true, + } + v.Set("mynestedstruct.mystruct.foo", s2.MyStruct.Foo) + v.Set("mynestedstruct.mystruct.bar", s2.MyStruct.Bar) + v.Set("mynestedstruct.baz", s2.Baz) + assert.Equal(*s2, get[myNestedStruct](t, v, "mynestedstruct"), "GetFuncForType[myNestedStruct](mynestedstruct)") + assert.IsType(myNestedStruct{}, get[myNestedStruct](t, v, "mynestedstruct"), "GetFuncForType[myNestedStruct](mynestedstruct) should return a struct (not pointer to struct)") + } + + // Map types. + v.Set("stringmap", map[string]string{ + "a": "A", + "b": "B", + }) + assert.Equal(map[string]string{"a": "A", "b": "B"}, get[map[string]string](t, v, "stringmap"), "GetFuncForType[map[string]string](stringmap)") + + v.Set("stringslicemap", map[string][]string{ + "uppers": strings.Split("ABCDEFG", ""), + "lowers": strings.Split("abcdefg", ""), + }) + assert.Equal(map[string][]string{"uppers": strings.Split("ABCDEFG", ""), "lowers": strings.Split("abcdefg", "")}, get[map[string][]string](t, v, "stringslicemap"), "GetFuncForType[map[string][]string](stringslicemap)") + + v.Set("anymap", map[string]any{ + "int": 5, + "bool": true, + "string": "hello", + }) + assert.Equal(map[string]any{"int": 5, "bool": true, "string": "hello"}, get[map[string]any](t, v, "anymap"), "GetFuncForType[map[string]any](anymap)") + + // Unsupported types. + t.Run("uintptr", func(t *testing.T) { + testPanic(t, GetFuncForType[uintptr], "GetFuncForType[uintptr]") + }) + t.Run("arrays", func(t *testing.T) { + testPanic(t, GetFuncForType[[5]int], "GetFuncForType[[5]int]") + testPanic(t, GetFuncForType[[3]string], "GetFuncForType[[3]string]") + }) + t.Run("channels", func(t *testing.T) { + testPanic(t, GetFuncForType[chan struct{}], "GetFuncForType[chan struct{}]") + testPanic(t, GetFuncForType[chan bool], "GetFuncForType[chan bool]") + testPanic(t, GetFuncForType[chan int], "GetFuncForType[chan int]") + testPanic(t, GetFuncForType[chan chan string], "GetFuncForType[chan chan string]") + }) + t.Run("funcs", func(t *testing.T) { + testPanic(t, GetFuncForType[func()], "GetFuncForType[func()]") + }) +} + +func testPanic[T any](t testing.TB, f func() func(v *viper.Viper) func(key string) T, fnName string) { + t.Helper() + + defer func() { + err := recover() + assert.NotNil(t, err, "%s should panic", fnName) + }() + + fn := f() + assert.Failf(t, fmt.Sprintf("%s should panic", fnName), "%s should panic; got %+v", fnName, fn) +} + +func get[T any](t testing.TB, v *viper.Viper, key string) T { + t.Helper() + return GetFuncForType[T]()(v)(key) +} diff --git a/go/viperutil/internal/log/log.go b/go/viperutil/internal/log/log.go new file mode 100644 index 00000000000..cded335cd47 --- /dev/null +++ b/go/viperutil/internal/log/log.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package log provides dual-use logging between vitess's vt/log package and +viper's jww log. +*/ +package log + +import ( + jww "github.com/spf13/jwalterweatherman" + + "vitess.io/vitess/go/vt/log" +) + +var ( + jwwlog = func(printer interface { + Printf(format string, args ...any) + }, vtlogger func(format string, args ...any)) func(format string, args ...any) { + switch vtlogger { + case nil: + return printer.Printf + default: + return func(format string, args ...any) { + printer.Printf(format, args...) + vtlogger(format, args...) + } + } + } + + // TRACE logs to viper's TRACE level, and nothing to vitess logs. + TRACE = jwwlog(jww.TRACE, nil) + // DEBUG logs to viper's DEBUG level, and nothing to vitess logs. + DEBUG = jwwlog(jww.DEBUG, nil) + // INFO logs to viper and vitess at INFO levels. + INFO = jwwlog(jww.INFO, log.Infof) + // WARN logs to viper and vitess at WARN/WARNING levels. + WARN = jwwlog(jww.WARN, log.Warningf) + // ERROR logs to viper and vitess at ERROR levels. + ERROR = jwwlog(jww.ERROR, log.Errorf) + // CRITICAL logs to viper at CRITICAL level, and then fatally logs to + // vitess, exiting the process. + CRITICAL = jwwlog(jww.CRITICAL, log.Fatalf) +) diff --git a/go/viperutil/internal/registry/registry.go b/go/viperutil/internal/registry/registry.go new file mode 100644 index 00000000000..10786a5f7b3 --- /dev/null +++ b/go/viperutil/internal/registry/registry.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil/internal/sync" +) + +var ( + // Static is the registry for static config variables. These variables will + // never be affected by a Watch-ed config, and maintain their original + // values for the lifetime of the process. + Static = viper.New() + // Dynamic is the registry for dynamic config variables. If a config file is + // found by viper, it will be watched by a threadsafe wrapper around a + // second viper (see sync.Viper), and variables registered to it will pick + // up changes to that config file throughout the lifetime of the process. + Dynamic = sync.New() + + _ Bindable = (*viper.Viper)(nil) + _ Bindable = (*sync.Viper)(nil) +) + +// Bindable represents the methods needed to bind a value.Value to a given +// registry. It exists primarly to allow us to treat a sync.Viper as a +// viper.Viper for configuration registration purposes. +type Bindable interface { + BindEnv(vars ...string) error + BindPFlag(key string, flag *pflag.Flag) error + RegisterAlias(alias string, key string) + SetDefault(key string, value any) +} + +// Combined returns a viper combining the Static and Dynamic registries. +func Combined() *viper.Viper { + v := viper.New() + _ = v.MergeConfigMap(Static.AllSettings()) + _ = v.MergeConfigMap(Dynamic.AllSettings()) + + v.SetConfigFile(Static.ConfigFileUsed()) + return v +} diff --git a/go/viperutil/internal/sync/sync.go b/go/viperutil/internal/sync/sync.go new file mode 100644 index 00000000000..a5d35c504cb --- /dev/null +++ b/go/viperutil/internal/sync/sync.go @@ -0,0 +1,325 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/spf13/afero" + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil/internal/log" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Viper is a wrapper around a pair of viper.Viper instances to provide config- +// reloading in a threadsafe manner. +// +// It maintains one viper, called "disk", which does the actual config watch and +// reload (via viper's WatchConfig), and a second viper, called "live", which +// Values (registered via viperutil.Configure with Dynamic=true) access their +// settings from. The "live" config only updates after blocking all values from +// reading in order to swap in the most recently-loaded config from the "disk". +type Viper struct { + m sync.Mutex // prevents races between loadFromDisk and AllSettings + disk *viper.Viper + live *viper.Viper + keys map[string]*sync.RWMutex + + subscribers []chan<- struct{} + watchingConfig bool + + fs afero.Fs + + setCh chan struct{} + + // for testing purposes only + onConfigWrite func() +} + +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs + v.disk.SetFs(fs) +} + +// New returns a new synced Viper. +func New() *Viper { + return &Viper{ + disk: viper.New(), + live: viper.New(), + keys: map[string]*sync.RWMutex{}, + fs: afero.NewOsFs(), // default Fs used by viper, but we need this set so loadFromDisk doesn't accidentally nil-out the live fs + setCh: make(chan struct{}, 1), + } +} + +// Set sets the given key to the given value, in both the disk and live vipers. +func (v *Viper) Set(key string, value any) { + m, ok := v.keys[key] + if !ok { + return + } + + m.Lock() + defer m.Unlock() + + v.m.Lock() + defer v.m.Unlock() + + // We must not update v.disk here; explicit calls to Set will supercede all + // future config reloads. + v.live.Set(key, value) + + // Do a non-blocking signal to persist here. Our channel has a buffer of 1, + // so if we've signalled for some other Set call that hasn't been persisted + // yet, this Set will get persisted along with that one and any other + // pending in-memory changes. + select { + case v.setCh <- struct{}{}: + default: + } +} + +// ErrDuplicateWatch is returned when Watch is called on a synced Viper which +// has already started a watch. +var ErrDuplicateWatch = vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "duplicate watch") + +// Watch starts watching the config used by the passed-in Viper. Before starting +// the watch, the synced viper will perform an initial read and load from disk +// so that the live config is ready for use without requiring an initial config +// change. +// +// If the given static viper did not load a config file (and is instead relying +// purely on defaults, flags, and environment variables), then the settings of +// that viper are merged over, and this synced Viper may be used to set up an +// actual watch later. Additionally, this starts a background goroutine to +// persist changes made in-memory back to disk. It returns a cancel func to stop +// the persist loop, which the caller is responsible for calling during +// shutdown (see package servenv for an example). +// +// This does two things — one which is a nice-to-have, and another which is +// necessary for correctness. +// +// 1. Writing in-memory changes (which usually occur through a request to a +// /debug/env endpoint) ensures they are persisted across process restarts. +// 2. Writing in-memory changes ensures that subsequent modifications to the +// config file do not clobber those changes. Because viper loads the entire +// config on-change, rather than an incremental (diff) load, if a user were to +// edit an unrelated key (keyA) in the file, and we did not persist the +// in-memory change (keyB), then future calls to keyB.Get() would return the +// older value. +// +// If this synced viper is already watching a config file, this function returns +// an ErrDuplicateWatch. Other errors may be returned via underlying viper code +// to ensure the config file can be read in properly. +func (v *Viper) Watch(ctx context.Context, static *viper.Viper, minWaitInterval time.Duration) (cancel context.CancelFunc, err error) { + if v.watchingConfig { + return nil, vterrors.Wrapf(ErrDuplicateWatch, "%s: viper is already watching %s", ErrDuplicateWatch.Error(), v.disk.ConfigFileUsed()) + } + + ctx, cancel = context.WithCancel(ctx) + + cfg := static.ConfigFileUsed() + if cfg == "" { + // No config file to watch, just merge the settings and return. + return cancel, v.live.MergeConfigMap(static.AllSettings()) + } + + v.disk.SetConfigFile(cfg) + if err := v.disk.ReadInConfig(); err != nil { + return nil, err + } + + v.watchingConfig = true + v.loadFromDisk() + v.disk.OnConfigChange(func(in fsnotify.Event) { + for _, m := range v.keys { + m.Lock() + // This won't fire until after the config has been updated on v.live. + defer m.Unlock() + } + + v.loadFromDisk() + + for _, ch := range v.subscribers { + select { + case ch <- struct{}{}: + default: + } + } + }) + v.disk.WatchConfig() + + go v.persistChanges(ctx, minWaitInterval) + + return cancel, nil +} + +func (v *Viper) persistChanges(ctx context.Context, minWaitInterval time.Duration) { + defer close(v.setCh) + + var timer *time.Timer + if minWaitInterval > 0 { + timer = time.NewTimer(minWaitInterval) + } + + persistOnce := func() { + if err := v.WriteConfig(); err != nil { + log.ERROR("failed to persist config changes back to disk: %s", err.Error()) + // If we failed to persist, don't wait the entire interval before + // writing again, instead writing immediately on the next request. + if timer != nil { + if !timer.Stop() { + <-timer.C + } + + timer = nil + } + } + + switch { + case minWaitInterval == 0: + return + case timer == nil: + timer = time.NewTimer(minWaitInterval) + default: + timer.Reset(minWaitInterval) + } + } + + for { + select { + case <-ctx.Done(): + return + case <-v.setCh: + if timer == nil { + persistOnce() + continue + } + + select { + case <-ctx.Done(): + return + case <-timer.C: + persistOnce() + } + } + } +} + +// WriteConfig writes the live viper config back to disk. +func (v *Viper) WriteConfig() error { + if v.onConfigWrite != nil { + defer v.onConfigWrite() + } + + for _, m := range v.keys { + m.Lock() + // This won't fire until after the config has been written. + defer m.Unlock() + } + + v.m.Lock() + defer v.m.Unlock() + + v.live.SetConfigFile(v.disk.ConfigFileUsed()) + + return v.live.WriteConfig() +} + +// Notify adds a subscription that this synced viper will attempt to notify on +// config changes, after the updated config has been copied over from disk to +// live. +// +// Analogous to signal.Notify, notifications are sent non-blocking, so users +// should account for this when consuming from the channel they've provided. +// +// This function must be called prior to setting up a Watch; it will panic if a +// a watch has already been established on this synced Viper. +func (v *Viper) Notify(ch chan<- struct{}) { + if v.watchingConfig { + panic("cannot Notify after starting to watch a config") + } + + v.subscribers = append(v.subscribers, ch) +} + +// AllSettings returns the current live settings. +func (v *Viper) AllSettings() map[string]any { + v.m.Lock() + defer v.m.Unlock() + + return v.live.AllSettings() +} + +func (v *Viper) loadFromDisk() { + v.m.Lock() + defer v.m.Unlock() + + // Reset v.live so explicit Set calls don't win over what's just changed on + // disk. + v.live = viper.New() + v.live.SetFs(v.fs) + + // Fun fact! MergeConfigMap actually only ever returns nil. Maybe in an + // older version of viper it used to actually handle errors, but now it + // decidedly does not. See https://github.com/spf13/viper/blob/v1.8.1/viper.go#L1492-L1499. + _ = v.live.MergeConfigMap(v.disk.AllSettings()) +} + +// begin implementation of registry.Bindable for sync.Viper + +func (v *Viper) BindEnv(vars ...string) error { return v.disk.BindEnv(vars...) } +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { return v.disk.BindPFlag(key, flag) } +func (v *Viper) RegisterAlias(alias string, key string) { v.disk.RegisterAlias(alias, key) } +func (v *Viper) SetDefault(key string, value any) { v.disk.SetDefault(key, value) } + +// end implementation of registry.Bindable for sync.Viper + +// AdaptGetter wraps a get function (matching the signature of +// viperutil.Options.GetFunc) to be threadsafe with the passed-in synced Viper. +// +// It must be called prior to starting a watch on the synced Viper; it will +// panic if a watch has already been established. +// +// This function must be called at most once per key; it will panic if attempting +// to adapt multiple getters for the same key. +func AdaptGetter[T any](key string, getter func(v *viper.Viper) func(key string) T, v *Viper) func(key string) T { + if v.watchingConfig { + panic("cannot adapt getter to synchronized viper which is already watching a config") + } + + if _, ok := v.keys[key]; ok { + panic(fmt.Sprintf("already adapted a getter for key %s", key)) + } + + var m sync.RWMutex + v.keys[key] = &m + + return func(key string) T { + m.RLock() + defer m.RUnlock() + + return getter(v.live)(key) + } +} diff --git a/go/viperutil/internal/sync/sync_darwin_test.go b/go/viperutil/internal/sync/sync_darwin_test.go new file mode 100644 index 00000000000..3c27ed97616 --- /dev/null +++ b/go/viperutil/internal/sync/sync_darwin_test.go @@ -0,0 +1,34 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync_test + +import "os" + +// atomicWrite overwrites a file in such a way as to produce exactly one +// filesystem event of the type CREATE or WRITE (which are tracked by viper) +// without producing any REMOVE events. +// +// At time of writing, this produces the following on darwin: +// CHMOD => WRITE => CHMOD. +func atomicWrite(path string, data []byte) error { + stat, err := os.Stat(path) + if err != nil { + return err + } + + return os.WriteFile(path, data, stat.Mode()) +} diff --git a/go/viperutil/internal/sync/sync_internal_test.go b/go/viperutil/internal/sync/sync_internal_test.go new file mode 100644 index 00000000000..cc8a163fa18 --- /dev/null +++ b/go/viperutil/internal/sync/sync_internal_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync + +import ( + "context" + "encoding/json" + "math/rand" + "testing" + "time" + + "github.com/spf13/afero" + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPersistConfig(t *testing.T) { + type config struct { + Foo int `json:"foo"` + } + + loadConfig := func(t *testing.T, fs afero.Fs) config { + t.Helper() + + data, err := afero.ReadFile(fs, "config.json") + require.NoError(t, err) + + var cfg config + require.NoError(t, json.Unmarshal(data, &cfg)) + + return cfg + } + + setup := func(t *testing.T, v *Viper, minWaitInterval time.Duration) (afero.Fs, <-chan struct{}) { + t.Helper() + + fs := afero.NewMemMapFs() + cfg := config{ + Foo: jitter(1, 100), + } + + data, err := json.Marshal(&cfg) + require.NoError(t, err) + + err = afero.WriteFile(fs, "config.json", data, 0644) + require.NoError(t, err) + + static := viper.New() + static.SetFs(fs) + static.SetConfigFile("config.json") + + require.NoError(t, static.ReadInConfig()) + require.Equal(t, cfg.Foo, static.GetInt("foo")) + + ch := make(chan struct{}, 1) + v.onConfigWrite = func() { ch <- struct{}{} } + v.SetFs(fs) + + cancel, err := v.Watch(context.Background(), static, minWaitInterval) + require.NoError(t, err) + + t.Cleanup(cancel) + return fs, ch + } + + t.Run("basic", func(t *testing.T) { + v := New() + + minPersistWaitInterval := 10 * time.Second + get := AdaptGetter("foo", func(v *viper.Viper) func(key string) int { return v.GetInt }, v) + fs, ch := setup(t, v, minPersistWaitInterval) + + old := get("foo") + loadConfig(t, fs) + v.Set("foo", old+1) + // This should happen immediately in-memory and on-disk. + assert.Equal(t, old+1, get("foo")) + <-ch + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + v.Set("foo", old+2) + // This should _also_ happen immediately in-memory, but not on-disk. + // It will take up to 2 * minPersistWaitInterval to reach the disk. + assert.Equal(t, old+2, get("foo")) + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + select { + case <-ch: + case <-time.After(3 * minPersistWaitInterval): + assert.Fail(t, "config was not persisted quickly enough", "config took longer than %s to persist (minPersistWaitInterval = %s)", 3*minPersistWaitInterval, minPersistWaitInterval) + } + + assert.Equal(t, old+2, loadConfig(t, fs).Foo) + }) + + t.Run("no persist interval", func(t *testing.T) { + v := New() + + var minPersistWaitInterval time.Duration + get := AdaptGetter("foo", func(v *viper.Viper) func(key string) int { return v.GetInt }, v) + fs, ch := setup(t, v, minPersistWaitInterval) + + old := get("foo") + loadConfig(t, fs) + v.Set("foo", old+1) + // This should happen immediately in-memory and on-disk. + assert.Equal(t, old+1, get("foo")) + <-ch + assert.Equal(t, old+1, loadConfig(t, fs).Foo) + + v.Set("foo", old+2) + // This should _also_ happen immediately in-memory, and on-disk. + assert.Equal(t, old+2, get("foo")) + <-ch + assert.Equal(t, old+2, loadConfig(t, fs).Foo) + }) +} + +func jitter(min, max int) int { + return min + rand.Intn(max-min+1) +} diff --git a/go/viperutil/internal/sync/sync_linux_test.go b/go/viperutil/internal/sync/sync_linux_test.go new file mode 100644 index 00000000000..83ccfad66cc --- /dev/null +++ b/go/viperutil/internal/sync/sync_linux_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync_test + +import "os" + +// atomicWrite overwrites a file in such a way as to produce exactly one +// filesystem event of the type CREATE or WRITE (which are tracked by viper) +// without producing any REMOVE events. +// +// At time of writing, this produces the following on x86_64 linux: +// CREATE. +func atomicWrite(path string, data []byte) error { + stat, err := os.Stat(path) + if err != nil { + return err + } + + tmp := path + ".tmp" + if err := os.WriteFile(tmp, data, stat.Mode()); err != nil { + return err + } + + return os.Rename(tmp, path) +} diff --git a/go/viperutil/internal/sync/sync_test.go b/go/viperutil/internal/sync/sync_test.go new file mode 100644 index 00000000000..5b1c5541896 --- /dev/null +++ b/go/viperutil/internal/sync/sync_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sync_test + +import ( + "context" + "encoding/json" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/viperutil" + vipersync "vitess.io/vitess/go/viperutil/internal/sync" + "vitess.io/vitess/go/viperutil/internal/value" +) + +func TestWatchConfig(t *testing.T) { + type config struct { + A, B int + } + + writeConfig := func(tmp *os.File, a, b int) error { + data, err := json.Marshal(&config{A: a, B: b}) + if err != nil { + return err + } + + // In order to guarantee viper's watcher detects exactly one config + // change, we perform a write specific to the platform we're executing + // on. + // + // Consequently, this test only supports linux and macos for now. + return atomicWrite(tmp.Name(), data) + } + writeRandomConfig := func(tmp *os.File) error { + a, b := rand.Intn(100), rand.Intn(100) + return writeConfig(tmp, a, b) + } + + tmp, err := os.CreateTemp(t.TempDir(), "TestWatchConfig_*.json") + require.NoError(t, err) + + require.NoError(t, writeRandomConfig(tmp)) + + v := viper.New() + v.SetConfigFile(tmp.Name()) + require.NoError(t, v.ReadInConfig()) + + wCh, rCh := make(chan struct{}), make(chan struct{}) + v.OnConfigChange(func(event fsnotify.Event) { + select { + case <-rCh: + return + default: + } + + wCh <- struct{}{} + // block forever to prevent this viper instance from double-updating. + <-rCh + }) + v.WatchConfig() + + // Make sure that basic, unsynchronized WatchConfig is set up before + // beginning the actual test. + a, b := v.GetInt("a"), v.GetInt("b") + require.NoError(t, writeConfig(tmp, a+1, b+1)) + <-wCh // wait for the update to finish + + require.Equal(t, a+1, v.GetInt("a")) + require.Equal(t, b+1, v.GetInt("b")) + + rCh <- struct{}{} + + sv := vipersync.New() + A := viperutil.Configure("a", viperutil.Options[int]{Dynamic: true}) + B := viperutil.Configure("b", viperutil.Options[int]{Dynamic: true}) + + A.(*value.Dynamic[int]).Base.BoundGetFunc = vipersync.AdaptGetter("a", func(v *viper.Viper) func(key string) int { + return v.GetInt + }, sv) + B.(*value.Dynamic[int]).Base.BoundGetFunc = vipersync.AdaptGetter("b", func(v *viper.Viper) func(key string) int { + return v.GetInt + }, sv) + + cancel, err := sv.Watch(context.Background(), v, 0) + require.NoError(t, err) + defer cancel() + + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(context.Background()) + + // Sleep between 25 and 50ms between reads. + readJitter := func() time.Duration { + return time.Duration(jitter(25, 50)) * time.Millisecond + } + // Sleep between 75 and 125ms between writes. + writeJitter := func() time.Duration { + return time.Duration(jitter(75, 125)) * time.Millisecond + } + + for i := 0; i < 10; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + for { + select { + case <-ctx.Done(): + return + default: + } + + switch i % 2 { + case 0: + A.Get() + case 1: + B.Get() + } + + time.Sleep(readJitter()) + } + }(i) + } + + for i := 0; i < 100; i++ { + require.NoError(t, writeRandomConfig(tmp)) + time.Sleep(writeJitter()) + } + + cancel() + wg.Wait() +} + +func jitter(min, max int) int { + return min + rand.Intn(max-min+1) +} diff --git a/go/viperutil/internal/value/value.go b/go/viperutil/internal/value/value.go new file mode 100644 index 00000000000..a958d642bcf --- /dev/null +++ b/go/viperutil/internal/value/value.go @@ -0,0 +1,170 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil/internal/registry" + "vitess.io/vitess/go/viperutil/internal/sync" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Registerable is the subset of the interface exposed by Values (which is +// declared in the public viperutil package). +// +// We need a separate interface type because Go generics do not let you define +// a function that takes Value[T] for many, different T's, which we want to do +// for BindFlags. +type Registerable interface { + Key() string + Registry() registry.Bindable + Flag(fs *pflag.FlagSet) (*pflag.Flag, error) +} + +// Base is the base functionality shared by Static and Dynamic values. It +// implements viperutil.Value. +type Base[T any] struct { + KeyName string + DefaultVal T + + GetFunc func(v *viper.Viper) func(key string) T + BoundGetFunc func(key string) T + + Aliases []string + FlagName string + EnvVars []string +} + +func (val *Base[T]) Key() string { return val.KeyName } +func (val *Base[T]) Default() T { return val.DefaultVal } +func (val *Base[T]) Get() T { return val.BoundGetFunc(val.Key()) } + +// ErrNoFlagDefined is returned when a Value has a FlagName set, but the given +// FlagSet does not define a flag with that name. +var ErrNoFlagDefined = vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "flag not defined") + +// Flag is part of the Registerable interface. If the given flag set has a flag +// with the name of this value's configured flag, that flag is returned, along +// with a nil error. If no flag exists on the flag set with that name, an error +// is returned. +// +// If the value is not configured to correspond to a flag (FlagName == ""), then +// (nil, nil) is returned. +func (val *Base[T]) Flag(fs *pflag.FlagSet) (*pflag.Flag, error) { + if val.FlagName == "" { + return nil, nil + } + + flag := fs.Lookup(val.FlagName) + if flag == nil { + return nil, vterrors.Wrapf(ErrNoFlagDefined, "%s with name %s (for key %s)", ErrNoFlagDefined.Error(), val.FlagName, val.Key()) + } + + return flag, nil +} + +func (val *Base[T]) bind(v registry.Bindable) { + v.SetDefault(val.Key(), val.DefaultVal) + + for _, alias := range val.Aliases { + v.RegisterAlias(alias, val.Key()) + } + + if len(val.EnvVars) > 0 { + vars := append([]string{val.Key()}, val.EnvVars...) + _ = v.BindEnv(vars...) + } +} + +// BindFlags creates bindings between each value's registry and the given flag +// set. This function will panic if any of the values defines a flag that does +// not exist in the flag set. +func BindFlags(fs *pflag.FlagSet, values ...Registerable) { + for _, val := range values { + flag, err := val.Flag(fs) + switch { + case err != nil: + panic(fmt.Errorf("failed to load flag for %s: %w", val.Key(), err)) + case flag == nil: + continue + } + + _ = val.Registry().BindPFlag(val.Key(), flag) + if flag.Name != val.Key() { + val.Registry().RegisterAlias(flag.Name, val.Key()) + } + } +} + +// Static is a static value. Static values register to the Static registry, and +// do not respond to changes to config files. Their Get() method will return the +// same value for the lifetime of the process. +type Static[T any] struct { + *Base[T] +} + +// NewStatic returns a static value derived from the given base value, after +// binding it to the static registry. +func NewStatic[T any](base *Base[T]) *Static[T] { + base.bind(registry.Static) + base.BoundGetFunc = base.GetFunc(registry.Static) + + return &Static[T]{ + Base: base, + } +} + +func (val *Static[T]) Registry() registry.Bindable { + return registry.Static +} + +func (val *Static[T]) Set(v T) { + registry.Static.Set(val.KeyName, v) +} + +// Dynamic is a dynamic value. Dynamic values register to the Dynamic registry, +// and respond to changes to watched config files. Their Get() methods will +// return whatever value is currently live in the config, in a threadsafe +// manner. +type Dynamic[T any] struct { + *Base[T] +} + +// NewDynamic returns a dynamic value derived from the given base value, after +// binding it to the dynamic registry and wrapping its GetFunc to be threadsafe +// with respect to config reloading. +func NewDynamic[T any](base *Base[T]) *Dynamic[T] { + base.bind(registry.Dynamic) + base.BoundGetFunc = sync.AdaptGetter(base.Key(), base.GetFunc, registry.Dynamic) + + return &Dynamic[T]{ + Base: base, + } +} + +func (val *Dynamic[T]) Registry() registry.Bindable { + return registry.Dynamic +} + +func (val *Dynamic[T]) Set(v T) { + registry.Dynamic.Set(val.KeyName, v) +} diff --git a/go/viperutil/value.go b/go/viperutil/value.go new file mode 100644 index 00000000000..4433b53b05d --- /dev/null +++ b/go/viperutil/value.go @@ -0,0 +1,63 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package viperutil + +import ( + "github.com/spf13/pflag" + + "vitess.io/vitess/go/viperutil/internal/value" +) + +var ( + _ Value[int] = (*value.Static[int])(nil) + _ Value[int] = (*value.Dynamic[int])(nil) +) + +// Value represents the public API to access viper-backed config values. +// +// N.B. the embedded value.Registerable interface is necessary only for +// BindFlags and other mechanisms of binding Values to the internal registries +// to work. Users of Value objects should only need to call Get(), Set(v T), and +// Default(). +type Value[T any] interface { + value.Registerable + + // Get returns the current value. For static implementations, this will + // never change after the initial config load. For dynamic implementations, + // this may change throughout the lifetime of the vitess process. + Get() T + // Set sets the underlying value. For both static and dynamic + // implementations, this is reflected in subsequent calls to Get. + // + // If a config file was loaded, changes to dynamic values will be persisted + // back to the config file in the background, governed by the behavior of + // the --config-persistence-min-interval flag. + Set(v T) + // Default returns the default value configured for this Value. For both + // static and dynamic implementations, it should never change. + Default() T +} + +// BindFlags binds a set of Registerable values to the given flag set. +// +// This function will panic if any of the values was configured to map to a flag +// which is not defined on the flag set. Therefore, this function should usually +// be called in an OnParse or OnParseFor hook after defining the flags for the +// values in question. +func BindFlags(fs *pflag.FlagSet, values ...value.Registerable) { + value.BindFlags(fs, values...) +} diff --git a/go/viperutil/viper.go b/go/viperutil/viper.go new file mode 100644 index 00000000000..7d120059587 --- /dev/null +++ b/go/viperutil/viper.go @@ -0,0 +1,189 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package viperutil provides a utility layer to streamline and standardize +interacting with viper-backed configuration values across vitess components. + +The common pattern is for a given module to declare their values by declaring +variables that are the result of calling Configure, for example in package trace: + + package trace + + import "vitess.io/vitess/go/viperutil" + + var ( + modulePrefix = viperutil.KeyPrefixFunc("trace") + + tracingServer = viperutil.Configure( + modulePrefix("service"), + viperutil.Options[string]{ + Default: "noop", + FlagName: "tracer", + } + ) + enableLogging = viperutil.Configure( + modulePrefix("enable-logging"), + viperutil.Options[bool]{ + FlagName: "tracing-enable-logging", + } + ) + ) + +Then, in an OnParseFor or OnParse hook, declare any flags, and bind viper values +to those flags, as appropriate: + + package trace + + import ( + "github.com/spf13/pflag" + + "vitess.io/vitess/go/viperutil" + "vitess.io/vitess/go/vt/servenv" + ) + + func init() { + servenv.OnParse(func(fs *pflag.FlagSet) { + fs.String("tracer", tracingServer.Default(), "") + fs.Bool("tracing-enable-logging", enableLogging.Default(), "") + + viperutil.BindFlags(fs, tracingServer, enableLogging) + }) + } + +Finally, after a call to `viperutil.LoadConfig` (which is done as a part of +`servenv.ParseFlags`), values may be accessed by calling their `.Get()` methods. + +For more details, refer to the package documentation, as well as the documents +in doc/viper/. +*/ +package viperutil + +import ( + "strings" + + "github.com/spf13/viper" + + "vitess.io/vitess/go/viperutil/internal/value" +) + +// Options represents the various options used to control how Values are +// configured by viperutil. +type Options[T any] struct { + // Aliases, if set, configures the Value to be accessible via additional + // keys. + // + // This is useful for deprecating old names gracefully while maintaining + // backwards-compatibility. + Aliases []string + // FlagName, if set, allows a value to be configured to also check the + // named flag for its final config value. If depending on a flag, BindFlags + // must be called on the Value returned from Configure. In most cases, + // modules will do this in the same OnParse hook that defines their flags. + // + // Note that if the set FlagName does not match the Value's key, Configure + // will automatically register an alias to allow both names to be used as + // the key, which is necessary for the flag value to be discoverable by + // viper for the Value's actual key. + FlagName string + // EnvVars, if set, configures the Value to also check the given environment + // variables for its final config value. + // + // Note that unlike keys and aliases, environment variable names are + // case-sensitive. + EnvVars []string + // Default is the default value that will be set for the key. If not + // explicitly set during a call to Configure, the default value will be the + // zero value for the type T. This means if T is a pointer type, the default + // will be nil, not the zeroed out struct. + Default T + + // Dynamic, if set, configures a value to be backed by the dynamic registry. + // If a config file is used (via LoadConfig), that file will be watched for + // changes, and dynamic Values will reflect changes via their Get() methods + // (whereas static values will only ever return the value loaded initially). + Dynamic bool + + // GetFunc is the function used to get this value out of a viper. + // + // If omitted, GetFuncForType will attempt to provide a useful default for + // the given type T. For primitive types, this should be sufficient. For + // more fine-grained control over value retrieval and unmarshalling, callers + // should provide their own function. + // + // See GetFuncForType for further details. + GetFunc func(v *viper.Viper) func(key string) T +} + +// Configure configures a viper-backed value associated with the given key to +// either the static or dynamic internal registries, returning the resulting +// Value. This value is partially ready for use (it will be able to get values +// from environment variables and defaults), but file-based configs will not +// available until servenv calls LoadConfig, and flag-based configs will not be +// available until a combination of BindFlags and pflag.Parse have been called, +// usually by servenv. +// +// Exact behavior of how the key is bound to the registries depend on the +// Options provided, +func Configure[T any](key string, opts Options[T]) (v Value[T]) { + getfunc := opts.GetFunc + if getfunc == nil { + getfunc = GetFuncForType[T]() + } + + base := &value.Base[T]{ + KeyName: key, + DefaultVal: opts.Default, + GetFunc: getfunc, + Aliases: opts.Aliases, + FlagName: opts.FlagName, + EnvVars: opts.EnvVars, + } + + switch { + case opts.Dynamic: + v = value.NewDynamic(base) + default: + v = value.NewStatic(base) + } + + return v +} + +// KeyPrefixFunc is a helper function to allow modules to extract a common key +// prefix used by that module to avoid repitition (and typos, missed updates, +// and so on). +// +// For example, package go/vt/vttablet/schema may want to do: +// +// moduleKey := viperutil.KeyPrefixFunc("vttablet.schema") +// watch := viperutil.Configure(moduleKey("watch_interval"), ...) // => "vttablet.schema.watch_interval" +// // ... and so on +func KeyPrefixFunc(prefix string) func(subkey string) (fullkey string) { + var keyParts []string + if prefix != "" { + keyParts = append(keyParts, prefix) + } + + return func(subkey string) (fullkey string) { + tmp := keyParts + if subkey != "" { + tmp = append(tmp, subkey) + } + + return strings.Join(tmp, ".") + } +} diff --git a/go/viperutil/vipertest/stub.go b/go/viperutil/vipertest/stub.go new file mode 100644 index 00000000000..5c8d3e78b43 --- /dev/null +++ b/go/viperutil/vipertest/stub.go @@ -0,0 +1,60 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vipertest + +import ( + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/viperutil" + "vitess.io/vitess/go/viperutil/internal/value" +) + +// Stub stubs out a given value to use the passed-in viper to retrieve its +// config value for testing purposes. It returns a function to undo this, +// resetting the Value to whatever registry (Static, or Dynamic) it was +// originally bound to. +// +// It fails the test if a caller attempts to stub the same value multiple times +// to a particular viper. +func Stub[T any](t *testing.T, v *viper.Viper, val viperutil.Value[T]) func() { + t.Helper() + + if !assert.False(t, v.InConfig(val.Key()), "value for key %s already stubbed", val.Key()) { + return func() {} + } + + var base *value.Base[T] + switch val := val.(type) { + case *value.Static[T]: + base = val.Base + case *value.Dynamic[T]: + base = val.Base + default: + assert.Fail(t, "value %+v does not support stubbing", val) + return func() {} + } + + oldGet := base.BoundGetFunc + base.BoundGetFunc = base.GetFunc(v) + + return func() { + base.BoundGetFunc = oldGet + } +} diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index 1cdb2d6cacc..f7c7acd8e9c 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -17,15 +17,16 @@ limitations under the License. package binlog import ( + "context" crand "crypto/rand" "fmt" "math" "math/big" "sync" - "context" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -99,12 +100,12 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { // StartBinlogDumpFromCurrent requests a replication binlog dump from // the current position. -func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, <-chan error, error) { +func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (replication.Position, <-chan mysql.BinlogEvent, <-chan error, error) { ctx, bc.cancel = context.WithCancel(ctx) position, err := bc.Conn.PrimaryPosition() if err != nil { - return mysql.Position{}, nil, nil, fmt.Errorf("failed to get primary position: %v", err) + return replication.Position{}, nil, nil, fmt.Errorf("failed to get primary position: %v", err) } c, e, err := bc.StartBinlogDumpFromPosition(ctx, "", position) @@ -120,7 +121,7 @@ func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mys // by canceling the context. // // Note the context is valid and used until eventChan is closed. -func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos mysql.Position) (<-chan mysql.BinlogEvent, <-chan error, error) { +func (bc *BinlogConnection) StartBinlogDumpFromPosition(ctx context.Context, binlogFilename string, startPos replication.Position) (<-chan mysql.BinlogEvent, <-chan error, error) { ctx, bc.cancel = context.WithCancel(ctx) log.Infof("sending binlog dump command: startPos=%v, serverID=%v", startPos, bc.serverID) @@ -156,7 +157,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) (chan mysql.Binlog case errChan <- err: case <-ctx.Done(): } - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.CRServerLost { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.CRServerLost { // CRServerLost = Lost connection to MySQL server during query // This is not necessarily an error. It could just be that we closed // the connection from outside. diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 6bf2c26bf9c..abbf73ba506 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -18,15 +18,17 @@ package binlog import ( "bytes" + "context" "fmt" "io" "strings" "google.golang.org/protobuf/proto" - "context" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" @@ -141,7 +143,7 @@ type Streamer struct { extractPK bool clientCharset *binlogdatapb.Charset - startPos mysql.Position + startPos replication.Position timestamp int64 sendTransaction sendTransactionFunc usePreviousGTIDs bool @@ -157,7 +159,7 @@ type Streamer struct { // startPos is the position to start streaming at. Incompatible with timestamp. // timestamp is the timestamp to start streaming at. Incompatible with startPos. // sendTransaction is called each time a transaction is committed or rolled back. -func NewStreamer(cp dbconfigs.Connector, se *schema.Engine, clientCharset *binlogdatapb.Charset, startPos mysql.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { +func NewStreamer(cp dbconfigs.Connector, se *schema.Engine, clientCharset *binlogdatapb.Charset, startPos replication.Position, timestamp int64, sendTransaction sendTransactionFunc) *Streamer { return &Streamer{ cp: cp, se: se, @@ -245,10 +247,10 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { // If the sendTransaction func returns io.EOF, parseEvents returns ErrClientEOF. // If the events channel is closed, parseEvents returns ErrServerEOF. // If the context is done, returns ctx.Err(). -func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent, errs <-chan error) (mysql.Position, error) { +func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.BinlogEvent, errs <-chan error) (replication.Position, error) { var statements []FullBinlogStatement var format mysql.BinlogFormat - var gtid mysql.GTID + var gtid replication.GTID var pos = bls.startPos var autocommit = true var err error @@ -273,7 +275,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog if int64(timestamp) >= bls.timestamp { eventToken := &querypb.EventToken{ Timestamp: int64(timestamp), - Position: mysql.EncodePosition(pos), + Position: replication.EncodePosition(pos), } if err = bls.sendTransaction(eventToken, statements); err != nil { if err == io.EOF { @@ -347,7 +349,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } oldpos := pos - pos = mysql.AppendGTID(pos, gtid) + pos = replication.AppendGTID(pos, gtid) // If the event is received outside of a transaction, it must // be sent. Otherwise, it will get lost and the targets will go out // of sync. @@ -362,7 +364,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog if err != nil { return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } - pos = mysql.AppendGTID(pos, gtid) + pos = replication.AppendGTID(pos, gtid) if hasBegin { begin() } @@ -758,7 +760,7 @@ func writeValuesAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *my } // We have real data. - value, l, err := mysql.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}) + value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}) if err != nil { return keyspaceIDCell, nil, err } @@ -766,7 +768,7 @@ func writeValuesAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *my if err != nil { return sqltypes.Value{}, nil, err } - if value.Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(vBytes, mysql.ZeroTimestamp) { + if value.Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(vBytes, binlog.ZeroTimestamp) { // Values in the binary log are UTC. Let's convert them // to whatever timezone the connection is using, // so MySQL properly converts them back to UTC. @@ -823,7 +825,7 @@ func writeIdentifiersAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, r sql.WriteByte('=') // We have real data. - value, l, err := mysql.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}) + value, l, err := binlog.CellValue(data, pos, tce.tm.Types[c], tce.tm.Metadata[c], &querypb.Field{Type: tce.ti.Fields[c].Type}) if err != nil { return keyspaceIDCell, nil, err } @@ -831,7 +833,7 @@ func writeIdentifiersAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, r if err != nil { return keyspaceIDCell, nil, err } - if value.Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(vBytes, mysql.ZeroTimestamp) { + if value.Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(vBytes, binlog.ZeroTimestamp) { // Values in the binary log are UTC. Let's convert them // to whatever timezone the connection is using, // so MySQL properly converts them back to UTC. diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index 6a5c22723fd..d8481ca0665 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -17,12 +17,14 @@ limitations under the License. package binlog import ( + "context" "reflect" "testing" - "context" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/binlog" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -44,11 +46,14 @@ func TestStreamerParseRBREvents(t *testing.T) { se.SetTableForTests(&schema.Table{ Name: sqlparser.NewIdentifierCS("vt_a"), Fields: []*querypb.Field{{ - Name: "id", - Type: querypb.Type_INT64, + Name: "id", + Type: querypb.Type_INT64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, { - Name: "message", - Type: querypb.Type_VARCHAR, + Name: "message", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }}, }) @@ -59,8 +64,8 @@ func TestStreamerParseRBREvents(t *testing.T) { Database: "vt_test_keyspace", Name: "vt_a", Types: []byte{ - mysql.TypeLong, - mysql.TypeVarchar, + binlog.TypeLong, + binlog.TypeVarchar, }, CanBeNull: mysql.NewServerBitmap(2), Metadata: []uint16{ @@ -163,7 +168,7 @@ func TestStreamerParseRBREvents(t *testing.T) { mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), mysql.NewTableMapEvent(f, s, tableID, tm), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -235,9 +240,9 @@ func TestStreamerParseRBREvents(t *testing.T) { }, eventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -261,7 +266,7 @@ func TestStreamerParseRBREvents(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, se, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -290,11 +295,14 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { se.SetTableForTests(&schema.Table{ Name: sqlparser.NewIdentifierCS("insert"), Fields: []*querypb.Field{{ - Name: "update", - Type: querypb.Type_INT64, + Name: "update", + Type: querypb.Type_INT64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, { - Name: "delete", - Type: querypb.Type_VARCHAR, + Name: "delete", + Type: querypb.Type_VARCHAR, + Charset: uint32(collations.Default()), }}, }) @@ -305,8 +313,8 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { Database: "vt_test_keyspace", Name: "insert", Types: []byte{ - mysql.TypeLong, - mysql.TypeVarchar, + binlog.TypeLong, + binlog.TypeVarchar, }, CanBeNull: mysql.NewServerBitmap(2), Metadata: []uint16{ @@ -409,7 +417,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), mysql.NewTableMapEvent(f, s, tableID, tm), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -481,9 +489,9 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { }, eventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -507,7 +515,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, se, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, se, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go index df2af984e21..47be1e27b11 100644 --- a/go/vt/binlog/binlog_streamer_test.go +++ b/go/vt/binlog/binlog_streamer_test.go @@ -17,6 +17,7 @@ limitations under the License. package binlog import ( + "context" "fmt" "io" "strings" @@ -26,7 +27,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "context" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" @@ -85,7 +87,7 @@ func TestStreamerParseEventsXID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -106,9 +108,9 @@ func TestStreamerParseEventsXID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -126,7 +128,7 @@ func TestStreamerParseEventsXID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -147,7 +149,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -170,9 +172,9 @@ func TestStreamerParseEventsCommit(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -189,7 +191,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { dbcfgs := dbconfigs.New(mcp) var got binlogStatements - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -216,7 +218,7 @@ func TestStreamerStop(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) // Start parseEvents(), but don't send it anything, so it just waits. ctx, cancel := context.WithCancel(context.Background()) @@ -269,7 +271,7 @@ func TestStreamerParseEventsClientEOF(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -294,7 +296,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) _, err := bls.parseEvents(context.Background(), events, errs) if err != want { t.Errorf("wrong error, got %#v, want %#v", err, want) @@ -308,7 +310,7 @@ func TestStreamerParseEventsServerEOF(t *testing.T) { func TestStreamerParseEventsGTIDPurged(t *testing.T) { events := make(chan mysql.BinlogEvent) errs := make(chan error) - expectedStreamErr := mysql.NewSQLError(mysql.ERMasterFatalReadingBinlog, mysql.SSUnknownSQLState, + expectedStreamErr := sqlerror.NewSQLError(sqlerror.ERMasterFatalReadingBinlog, sqlerror.SSUnknownSQLState, "Cannot replicate because the master purged required binary logs.") sendTransaction := func(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { @@ -330,13 +332,13 @@ func TestStreamerParseEventsGTIDPurged(t *testing.T) { } }() - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) _, err := bls.parseEvents(context.Background(), events, errs) require.Error(t, err) - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "expected SQLError, got %T", err) - require.True(t, sqlErr.Num == mysql.ERMasterFatalReadingBinlog, "expected ERMasterFatalReadingBinlog (%d), got %d", - mysql.ERMasterFatalReadingBinlog, sqlErr.Num) + require.True(t, sqlErr.Num == sqlerror.ERMasterFatalReadingBinlog, "expected ERMasterFatalReadingBinlog (%d), got %d", + sqlerror.ERMasterFatalReadingBinlog, sqlErr.Num) } func TestStreamerParseEventsSendErrorXID(t *testing.T) { @@ -369,7 +371,7 @@ func TestStreamerParseEventsSendErrorXID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) @@ -415,7 +417,7 @@ func TestStreamerParseEventsSendErrorCommit(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -456,7 +458,7 @@ func TestStreamerParseEventsInvalid(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -499,7 +501,7 @@ func TestStreamerParseEventsInvalidFormat(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -542,7 +544,7 @@ func TestStreamerParseEventsNoFormat(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -583,7 +585,7 @@ func TestStreamerParseEventsInvalidQuery(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -604,7 +606,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -634,9 +636,9 @@ func TestStreamerParseEventsRollback(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -652,9 +654,9 @@ func TestStreamerParseEventsRollback(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -671,7 +673,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -691,7 +693,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */"}), @@ -709,9 +711,9 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -724,9 +726,9 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -744,7 +746,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -764,7 +766,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "insert into vt_a(eid, id) values (1, 1) /* _stream vt_a (eid id ) (1 1 ); */"}), @@ -785,9 +787,9 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -800,9 +802,9 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { Statements: nil, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -820,7 +822,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -840,7 +842,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -863,9 +865,9 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -882,7 +884,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -924,7 +926,7 @@ func TestStreamerParseEventsInvalidIntVar(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) go sendTestEvents(events, input) _, err := bls.parseEvents(context.Background(), events, errs) @@ -945,7 +947,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "vt_test_keyspace", SQL: "BEGIN"}), @@ -969,9 +971,9 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -988,7 +990,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1008,7 +1010,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 0, ""), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0xd}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Database: "other", SQL: "BEGIN"}), // Check that this doesn't get filtered out. @@ -1032,9 +1034,9 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1407805592, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 0x0d, @@ -1051,7 +1053,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1093,7 +1095,7 @@ func TestStreamerParseEventsBeginAgain(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, sendTransaction) before := binlogStreamerErrors.Counts()["ParseEvents"] go sendTestEvents(events, input) @@ -1117,7 +1119,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 4, "filename.0001"), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 10}, true /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 10}, true /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Charset: &binlogdatapb.Charset{Client: 33, Conn: 33, Server: 33}, SQL: "insert into vt_insert_test(msg) values ('test 0') /* _stream vt_insert_test (id ) (null ); */", @@ -1144,9 +1146,9 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 10, @@ -1163,7 +1165,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { @@ -1186,7 +1188,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { input := []mysql.BinlogEvent{ mysql.NewRotateEvent(f, s, 4, "filename.0001"), mysql.NewFormatDescriptionEvent(f, s), - mysql.NewMariaDBGTIDEvent(f, s, mysql.MariadbGTID{Domain: 0, Sequence: 9}, false /* hasBegin */), + mysql.NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 9}, false /* hasBegin */), mysql.NewQueryEvent(f, s, mysql.Query{ Charset: &binlogdatapb.Charset{Client: 8, Conn: 8, Server: 33}, SQL: "create table if not exists vt_insert_test (\nid bigint auto_increment,\nmsg varchar(64),\nprimary key (id)\n) Engine=InnoDB", @@ -1204,9 +1206,9 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { }, EventToken: &querypb.EventToken{ Timestamp: 1409892744, - Position: mysql.EncodePosition(mysql.Position{ - GTIDSet: mysql.MariadbGTIDSet{ - 0: mysql.MariadbGTID{ + Position: replication.EncodePosition(replication.Position{ + GTIDSet: replication.MariadbGTIDSet{ + 0: replication.MariadbGTID{ Domain: 0, Server: 62344, Sequence: 9, @@ -1223,7 +1225,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { } dbcfgs := dbconfigs.New(mcp) - bls := NewStreamer(dbcfgs, nil, nil, mysql.Position{}, 0, (&got).sendTransaction) + bls := NewStreamer(dbcfgs, nil, nil, replication.Position{}, 0, (&got).sendTransaction) go sendTestEvents(events, input) if _, err := bls.parseEvents(context.Background(), events, errs); err != ErrServerEOF { diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index cfefc275269..6d689bc5436 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -35,6 +35,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/history" @@ -42,10 +45,11 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/throttler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -58,17 +62,6 @@ var ( BlplQuery = "Query" // BlplTransaction is the key for the stats map. BlplTransaction = "Transaction" - - // VReplicationInit is for the Init state. - VReplicationInit = "Init" - // VReplicationCopying is for the Copying state. - VReplicationCopying = "Copying" - // BlpRunning is for the Running state. - BlpRunning = "Running" - // BlpStopped is for the Stopped state. - BlpStopped = "Stopped" - // BlpError is for the Error state. - BlpError = "Error" ) // Stats is the internal stats of a player. It is a different @@ -81,7 +74,7 @@ type Stats struct { // Last saved status lastPositionMutex sync.Mutex - lastPosition mysql.Position + lastPosition replication.Position heartbeatMutex sync.Mutex heartbeat int64 @@ -104,6 +97,9 @@ type Stats struct { TableCopyRowCounts *stats.CountersWithSingleLabel TableCopyTimings *stats.Timings + + PartialQueryCount *stats.CountersWithMultiLabels + PartialQueryCacheSize *stats.CountersWithMultiLabels } // RecordHeartbeat updates the time the last heartbeat from vstreamer was seen @@ -121,14 +117,14 @@ func (bps *Stats) Heartbeat() int64 { } // SetLastPosition sets the last replication position. -func (bps *Stats) SetLastPosition(pos mysql.Position) { +func (bps *Stats) SetLastPosition(pos replication.Position) { bps.lastPositionMutex.Lock() defer bps.lastPositionMutex.Unlock() bps.lastPosition = pos } // LastPosition gets the last replication position. -func (bps *Stats) LastPosition() mysql.Position { +func (bps *Stats) LastPosition() replication.Position { bps.lastPositionMutex.Lock() defer bps.lastPositionMutex.Unlock() return bps.lastPosition @@ -146,6 +142,11 @@ func (bps *Stats) MessageHistory() []string { return strs } +func (bps *Stats) Stop() { + bps.Rates.Stop() + bps.VReplicationLagRates.Stop() +} + // NewStats creates a new Stats structure. func NewStats() *Stats { bps := &Stats{} @@ -164,6 +165,8 @@ func NewStats() *Stats { bps.VReplicationLagRates = stats.NewRates("", bps.VReplicationLags, 15*60/5, 5*time.Second) bps.TableCopyRowCounts = stats.NewCountersWithSingleLabel("", "", "Table", "") bps.TableCopyTimings = stats.NewTimings("", "", "Table") + bps.PartialQueryCacheSize = stats.NewCountersWithMultiLabels("", "", []string{"type"}) + bps.PartialQueryCount = stats.NewCountersWithMultiLabels("", "", []string{"type"}) return bps } @@ -180,8 +183,8 @@ type BinlogPlayer struct { // common to all uid int32 - position mysql.Position - stopPosition mysql.Position + position replication.Position + stopPosition replication.Position blplStats *Stats defaultCharset *binlogdatapb.Charset currentCharset *binlogdatapb.Charset @@ -226,12 +229,12 @@ func NewBinlogPlayerTables(dbClient DBClient, tablet *topodatapb.Tablet, tables // If an error is encountered, it updates the vreplication state to "Error". // If a stop position was specified, and reached, the state is updated to "Stopped". func (blp *BinlogPlayer) ApplyBinlogEvents(ctx context.Context) error { - if err := blp.setVReplicationState(BlpRunning, ""); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { log.Errorf("Error writing Running state: %v", err) } if err := blp.applyEvents(ctx); err != nil { - if err := blp.setVReplicationState(BlpError, err.Error()); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); err != nil { log.Errorf("Error writing stop state: %v", err) } return err @@ -286,14 +289,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { case blp.position.Equal(blp.stopPosition): msg := fmt.Sprintf("not starting BinlogPlayer, we're already at the desired position %v", blp.stopPosition) log.Info(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil case blp.position.AtLeast(blp.stopPosition): msg := fmt.Sprintf("starting point %v greater than stopping point %v", blp.position, blp.stopPosition) log.Error(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } // Don't return an error. Otherwise, it will keep retrying. @@ -342,9 +345,9 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { var stream BinlogTransactionStream if len(blp.tables) > 0 { - stream, err = blplClient.StreamTables(ctx, mysql.EncodePosition(blp.position), blp.tables, blp.defaultCharset) + stream, err = blplClient.StreamTables(ctx, replication.EncodePosition(blp.position), blp.tables, blp.defaultCharset) } else { - stream, err = blplClient.StreamKeyRange(ctx, mysql.EncodePosition(blp.position), blp.keyRange, blp.defaultCharset) + stream, err = blplClient.StreamKeyRange(ctx, replication.EncodePosition(blp.position), blp.keyRange, blp.defaultCharset) } if err != nil { err := fmt.Errorf("error sending streaming query to binlog server: %v", err) @@ -393,7 +396,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if blp.position.AtLeast(blp.stopPosition) { msg := "Reached stopping position, done playing logs" log.Info(msg) - if err := blp.setVReplicationState(BlpStopped, msg); err != nil { + if err := blp.setVReplicationState(binlogdatapb.VReplicationWorkflowState_Stopped, msg); err != nil { log.Errorf("Error writing stop state: %v", err) } return nil @@ -439,7 +442,7 @@ func (blp *BinlogPlayer) processTransaction(tx *binlogdatapb.BinlogTransaction) if _, err = blp.exec(string(stmt.Sql)); err == nil { continue } - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { // Deadlock: ask for retry log.Infof("Deadlock: %v", err) if err = blp.dbClient.Rollback(); err != nil { @@ -508,15 +511,15 @@ func (blp *BinlogPlayer) writeRecoveryPosition(tx *binlogdatapb.BinlogTransactio return nil } -func (blp *BinlogPlayer) setVReplicationState(state, message string) error { +func (blp *BinlogPlayer) setVReplicationState(state binlogdatapb.VReplicationWorkflowState, message string) error { if message != "" { blp.blplStats.History.Add(&StatsHistoryRecord{ Time: time.Now(), Message: message, }) } - blp.blplStats.State.Store(state) - query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(MessageTruncate(message)), blp.uid) + blp.blplStats.State.Store(state.String()) + query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state.String(), encodeString(MessageTruncate(message)), blp.uid) if _, err := blp.dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) } @@ -525,11 +528,11 @@ func (blp *BinlogPlayer) setVReplicationState(state, message string) error { // VRSettings contains the settings of a vreplication table. type VRSettings struct { - StartPos mysql.Position - StopPos mysql.Position + StartPos replication.Position + StopPos replication.Position MaxTPS int64 MaxReplicationLag int64 - State string + State binlogdatapb.VReplicationWorkflowState WorkflowType binlogdatapb.VReplicationWorkflowType WorkflowSubType binlogdatapb.VReplicationWorkflowSubType WorkflowName string @@ -552,7 +555,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { maxTPS, err := vrRow.ToInt64("max_tps") if err != nil { - return VRSettings{}, fmt.Errorf("failed to parse max_tps column2: %v", err) + return VRSettings{}, fmt.Errorf("failed to parse max_tps column: %v", err) } maxReplicationLag, err := vrRow.ToInt64("max_replication_lag") if err != nil { @@ -562,7 +565,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { if err != nil { return VRSettings{}, fmt.Errorf("failed to parse pos column: %v", err) } - stopPos, err := mysql.DecodePosition(vrRow.AsString("stop_pos", "")) + stopPos, err := replication.DecodePosition(vrRow.AsString("stop_pos", "")) if err != nil { return VRSettings{}, fmt.Errorf("failed to parse stop_pos column: %v", err) } @@ -583,7 +586,7 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { StopPos: stopPos, MaxTPS: maxTPS, MaxReplicationLag: maxReplicationLag, - State: vrRow.AsString("state", ""), + State: binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[vrRow.AsString("state", "")]), WorkflowType: binlogdatapb.VReplicationWorkflowType(workflowType), WorkflowName: vrRow.AsString("workflow", ""), WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType(workflowSubType), @@ -599,23 +602,23 @@ func CreateVReplication(workflow string, source *binlogdatapb.BinlogSource, posi "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) "+ "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v)", encodeString(workflow), encodeString(source.String()), encodeString(position), maxTPS, maxReplicationLag, - timeUpdated, BlpRunning, encodeString(dbName), workflowType, workflowSubType, deferSecondaryKeys) + timeUpdated, binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(dbName), workflowType, workflowSubType, deferSecondaryKeys) } // CreateVReplicationState returns a statement to create a stopped vreplication. -func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, position, state string, dbName string, +func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, position string, state binlogdatapb.VReplicationWorkflowState, dbName string, workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType) string { return fmt.Sprintf("insert into _vt.vreplication "+ "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) "+ "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d)", encodeString(workflow), encodeString(source.String()), encodeString(position), throttler.MaxRateModuleDisabled, - throttler.ReplicationLagModuleDisabled, time.Now().Unix(), state, encodeString(dbName), + throttler.ReplicationLagModuleDisabled, time.Now().Unix(), state.String(), encodeString(dbName), workflowType, workflowSubType) } // GenerateUpdatePos returns a statement to record the latest processed gtid in the _vt.vreplication table. -func GenerateUpdatePos(uid int32, pos mysql.Position, timeUpdated int64, txTimestamp int64, rowsCopied int64, compress bool) string { - strGTID := encodeString(mysql.EncodePosition(pos)) +func GenerateUpdatePos(uid int32, pos replication.Position, timeUpdated int64, txTimestamp int64, rowsCopied int64, compress bool) string { + strGTID := encodeString(replication.EncodePosition(pos)) if compress { strGTID = fmt.Sprintf("compress(%s)", strGTID) } @@ -653,21 +656,21 @@ func GenerateUpdateTimeThrottled(uid int32, timeThrottledUnix int64, componentTh func StartVReplication(uid int32) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', stop_pos=NULL where id=%v", - BlpRunning, uid) + binlogdatapb.VReplicationWorkflowState_Running.String(), uid) } // StartVReplicationUntil returns a statement to start the replication with a stop position. func StartVReplicationUntil(uid int32, pos string) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', stop_pos=%v where id=%v", - BlpRunning, encodeString(pos), uid) + binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(pos), uid) } // StopVReplication returns a statement to stop the replication. func StopVReplication(uid int32, message string) string { return fmt.Sprintf( "update _vt.vreplication set state='%v', message=%v where id=%v", - BlpStopped, encodeString(MessageTruncate(message)), uid) + binlogdatapb.VReplicationWorkflowState_Stopped.String(), encodeString(MessageTruncate(message)), uid) } // DeleteVReplication returns a statement to delete the replication. @@ -736,12 +739,12 @@ func MysqlUncompress(input string) []byte { } // DecodePosition attempts to uncompress the passed value first and if it fails tries to decode it as a valid GTID -func DecodePosition(gtid string) (mysql.Position, error) { +func DecodePosition(gtid string) (replication.Position, error) { b := MysqlUncompress(gtid) if b != nil { gtid = string(b) } - return mysql.DecodePosition(gtid) + return replication.DecodePosition(gtid) } // StatsHistoryRecord is used to store a Message with timestamp diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index 20f75430644..148c4fb386b 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -17,15 +17,15 @@ limitations under the License. package binlogplayer import ( + "context" "errors" "testing" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" querypb "vitess.io/vitess/go/vt/proto/query" - "context" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/throttler" @@ -54,11 +54,11 @@ var ( sqltypes.NULL, // stop_pos sqltypes.NewInt64(9223372036854775807), // max_tps sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(0), // workflow_sub_type - sqltypes.NewInt64(0), // defer_secondary_keys + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(0), // workflow_sub_type + sqltypes.NewInt64(0), // defer_secondary_keys }, }, } @@ -86,7 +86,9 @@ func TestNewBinlogPlayerKeyRange(t *testing.T) { } wantKeyRange := &topodatapb.KeyRange{End: []byte{0x80}} - blp := NewBinlogPlayerKeyRange(dbClient, wantTablet, wantKeyRange, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerKeyRange(dbClient, wantTablet, wantKeyRange, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -117,7 +119,9 @@ func TestNewBinlogPlayerTables(t *testing.T) { } wantTables := []string{"a", "b"} - blp := NewBinlogPlayerTables(dbClient, wantTablet, wantTables, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, wantTablet, wantTables, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -138,7 +142,9 @@ func TestApplyEventsFail(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -171,15 +177,15 @@ func TestStopPosEqual(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -188,7 +194,9 @@ func TestStopPosEqual(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -208,15 +216,15 @@ func TestStopPosLess(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1082"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1082"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -225,7 +233,9 @@ func TestStopPosLess(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -245,15 +255,15 @@ func TestStopPosGreater(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -266,7 +276,9 @@ func TestStopPosGreater(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -286,15 +298,15 @@ func TestContextCancel(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1085"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -307,7 +319,9 @@ func TestContextCancel(t *testing.T) { _ = newFakeBinlogClient() - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) errfunc := applyEvents(blp) dbClient.Wait() @@ -326,7 +340,7 @@ func TestRetryOnDeadlock(t *testing.T) { dbClient := NewMockDBClient(t) dbClient.ExpectRequest("update _vt.vreplication set state='Running', message='' where id=1", testDMLResponse, nil) dbClient.ExpectRequest("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1", testSettingsResponse, nil) - deadlocked := &mysql.SQLError{Num: 1213, Message: "deadlocked"} + deadlocked := &sqlerror.SQLError{Num: 1213, Message: "deadlocked"} dbClient.ExpectRequest("begin", nil, nil) dbClient.ExpectRequest("insert into t values(1)", nil, deadlocked) dbClient.ExpectRequest("rollback", nil, nil) @@ -335,7 +349,9 @@ func TestRetryOnDeadlock(t *testing.T) { dbClient.ExpectRequestRE("update _vt.vreplication set pos='MariaDB/0-1-1235', time_updated=.*", testDMLResponse, nil) dbClient.ExpectRequest("commit", nil, nil) - blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, NewStats()) + stats := NewStats() + defer stats.Stop() + blp := NewBinlogPlayerTables(dbClient, nil, []string{"a"}, 1, stats) blp.deadlockRetry = 10 * time.Millisecond errfunc := applyEvents(blp) @@ -400,24 +416,24 @@ func TestCreateVReplicationTables(t *testing.T) { } func TestUpdateVReplicationPos(t *testing.T) { - gtid := mysql.MustParseGTID("MariaDB", "0-1-8283") + gtid := replication.MustParseGTID("MariaDB", "0-1-8283") want := "update _vt.vreplication " + "set pos='MariaDB/0-1-8283', time_updated=88822, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0, 0, false) + got := GenerateUpdatePos(78522, replication.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } } func TestUpdateVReplicationTimestamp(t *testing.T) { - gtid := mysql.MustParseGTID("MariaDB", "0-2-582") + gtid := replication.MustParseGTID("MariaDB", "0-2-582") want := "update _vt.vreplication " + "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828, 0, false) + got := GenerateUpdatePos(78522, replication.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index 5ad73b686a6..f9cd03691a5 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -20,10 +20,13 @@ import ( "context" "fmt" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) // DBClient is a high level interface to the database. @@ -43,15 +46,28 @@ type dbClientImpl struct { dbConn *mysql.Conn } +// dbClientImplWithSidecarDBReplacement is a DBClient implementation +// that serves primarily as a pass-through to dbClientImpl, with the +// exception of ExecuteFetch, where it first replaces any default +// sidecar database qualifiers with the actual one in use on the tablet. +type dbClientImplWithSidecarDBReplacement struct { + dbClientImpl +} + // NewDBClient creates a DBClient instance func NewDBClient(params dbconfigs.Connector) DBClient { + if sidecar.GetName() != sidecar.DefaultName { + return &dbClientImplWithSidecarDBReplacement{ + dbClientImpl{dbConfig: params}, + } + } return &dbClientImpl{ dbConfig: params, } } func (dc *dbClientImpl) handleError(err error) { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { dc.Close() } } @@ -123,3 +139,12 @@ func (dc *dbClientImpl) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul } return mqr, nil } + +func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + return dcr.dbClientImpl.ExecuteFetch(uq, maxrows) +} diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index 50df683976d..d64c4d40146 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -17,8 +17,10 @@ limitations under the License. package binlogplayer import ( + "fmt" "regexp" "strings" + "sync" "testing" "time" @@ -34,9 +36,11 @@ type MockDBClient struct { t *testing.T UName string expect []*mockExpect + expectMu sync.Mutex currentResult int done chan struct{} invariants map[string]*sqltypes.Result + Tag string } type mockExpect struct { @@ -56,6 +60,28 @@ func NewMockDBClient(t *testing.T) *MockDBClient { "CREATE TABLE IF NOT EXISTS _vt.vreplication_log": {}, "select id, type, state, message from _vt.vreplication_log": {}, "insert into _vt.vreplication_log": {}, + // The following statements don't have a deterministic order as they are + // executed in the normal program flow, but ALSO done in a defer as a protective + // measure as they are resetting the values back to the original one. This also + // means that the values they set are based on the session defaults, which can + // change. So we make these invariants for unit test stability. + "select @@foreign_key_checks": sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@foreign_key_checks", + "int64", + ), + "1", + ), + "set @@session.foreign_key_checks": {}, + "set foreign_key_checks": {}, + "select @@session.sql_mode": sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "sql_mode", "varchar", + ), + "ONLY_FULL_GROUP_BY,NO_AUTO_VALUE_ON_ZERO,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION", + ), + "set @@session.sql_mode": {}, + "set sql_mode": {}, }, } } @@ -77,6 +103,8 @@ func (dc *MockDBClient) ExpectRequest(query string, result *sqltypes.Result, err dc.done = make(chan struct{}) default: } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() dc.expect = append(dc.expect, &mockExpect{ query: query, result: result, @@ -93,6 +121,8 @@ func (dc *MockDBClient) ExpectRequestRE(queryRE string, result *sqltypes.Result, dc.done = make(chan struct{}) default: } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() dc.expect = append(dc.expect, &mockExpect{ query: queryRE, re: regexp.MustCompile(queryRE), @@ -149,25 +179,43 @@ func (dc *MockDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { dc.t.Helper() - dc.t.Logf("DBClient query: %v", query) + msg := "DBClient query: %v" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Logf(msg, query) for q, result := range dc.invariants { - if strings.Contains(query, q) { + if strings.Contains(strings.ToLower(query), strings.ToLower(q)) { return result, nil } } + dc.expectMu.Lock() + defer dc.expectMu.Unlock() if dc.currentResult >= len(dc.expect) { - dc.t.Fatalf("DBClientMock: query: %s, no more requests are expected", query) + msg := "DBClientMock: query: %s, no more requests are expected" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query) } result := dc.expect[dc.currentResult] if result.re == nil { if query != result.query { - dc.t.Fatalf("DBClientMock: query: %s, want %s", query, result.query) + msg := "DBClientMock: query: %s, want %s" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query, result.query) } } else { if !result.re.MatchString(query) { - dc.t.Fatalf("DBClientMock: query: %s, must match %s", query, result.query) + msg := "DBClientMock: query: %s, must match %s" + if dc.Tag != "" { + msg = fmt.Sprintf("[%s] %s", dc.Tag, msg) + } + dc.t.Fatalf(msg, query, result.query) } } dc.currentResult++ diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go index a8cce64a0c9..a872b089bff 100644 --- a/go/vt/binlog/event_streamer.go +++ b/go/vt/binlog/event_streamer.go @@ -17,14 +17,13 @@ limitations under the License. package binlog import ( + "context" "encoding/base64" "fmt" "strconv" "strings" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -52,7 +51,7 @@ type EventStreamer struct { } // NewEventStreamer returns a new EventStreamer on top of a Streamer -func NewEventStreamer(cp dbconfigs.Connector, se *schema.Engine, startPos mysql.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { +func NewEventStreamer(cp dbconfigs.Connector, se *schema.Engine, startPos replication.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { evs := &EventStreamer{ sendEvent: sendEvent, } diff --git a/go/vt/binlog/eventtoken/compare.go b/go/vt/binlog/eventtoken/compare.go index 2fe908527d2..e1c9501a8dc 100644 --- a/go/vt/binlog/eventtoken/compare.go +++ b/go/vt/binlog/eventtoken/compare.go @@ -19,7 +19,7 @@ limitations under the License. package eventtoken import ( - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -45,11 +45,11 @@ func Fresher(ev1, ev2 *querypb.EventToken) int { } // We can parse them. - pos1, err := mysql.DecodePosition(ev1.Position) + pos1, err := replication.DecodePosition(ev1.Position) if err != nil { return -1 } - pos2, err := mysql.DecodePosition(ev2.Position) + pos2, err := replication.DecodePosition(ev2.Position) if err != nil { return -1 } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 7ece45cda9c..78d61c0860c 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -17,13 +17,12 @@ limitations under the License. package binlog import ( + "context" "fmt" "sync" "sync/atomic" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/dbconfigs" @@ -250,7 +249,7 @@ func (updateStream *UpdateStreamImpl) IsEnabled() bool { // StreamKeyRange is part of the UpdateStream interface func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, position string, keyRange *topodatapb.KeyRange, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -290,7 +289,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi // StreamTables is part of the UpdateStream interface func (updateStream *UpdateStreamImpl) StreamTables(ctx context.Context, position string, tables []string, charset *binlogdatapb.Charset, callback func(trans *binlogdatapb.BinlogTransaction) error) (err error) { - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } diff --git a/go/vt/callinfo/callinfo.go b/go/vt/callinfo/callinfo.go index 6d68a55a0bd..e20cd8a53a7 100644 --- a/go/vt/callinfo/callinfo.go +++ b/go/vt/callinfo/callinfo.go @@ -19,9 +19,9 @@ limitations under the License. package callinfo import ( - "html/template" - "context" + + "github.com/google/safehtml" ) // CallInfo is the extra data stored in the Context @@ -36,7 +36,7 @@ type CallInfo interface { Text() string // HTML represents this rpc call connection in a web-friendly way. - HTML() template.HTML + HTML() safehtml.HTML } // internal type and value @@ -57,8 +57,8 @@ func FromContext(ctx context.Context) (CallInfo, bool) { // HTMLFromContext returns that value of HTML() from the context, or "" if we're // not able to recover one -func HTMLFromContext(ctx context.Context) template.HTML { - var h template.HTML +func HTMLFromContext(ctx context.Context) safehtml.HTML { + var h safehtml.HTML ci, ok := FromContext(ctx) if ok { return ci.HTML() diff --git a/go/vt/callinfo/fakecallinfo/fakecallinfo.go b/go/vt/callinfo/fakecallinfo/fakecallinfo.go index 902f0723d77..3417f2195a6 100644 --- a/go/vt/callinfo/fakecallinfo/fakecallinfo.go +++ b/go/vt/callinfo/fakecallinfo/fakecallinfo.go @@ -18,7 +18,8 @@ package fakecallinfo import ( "fmt" - "html/template" + + "github.com/google/safehtml" ) // FakeCallInfo gives a fake Callinfo usable in callinfo @@ -26,7 +27,7 @@ type FakeCallInfo struct { Remote string Method string User string - Html string + Html safehtml.HTML } // RemoteAddr returns the remote address. @@ -45,6 +46,6 @@ func (fci *FakeCallInfo) Text() string { } // HTML returns the html. -func (fci *FakeCallInfo) HTML() template.HTML { - return template.HTML(fci.Html) +func (fci *FakeCallInfo) HTML() safehtml.HTML { + return fci.Html } diff --git a/go/vt/callinfo/plugin_grpc.go b/go/vt/callinfo/plugin_grpc.go index ebd0b797100..e402f2f7966 100644 --- a/go/vt/callinfo/plugin_grpc.go +++ b/go/vt/callinfo/plugin_grpc.go @@ -19,11 +19,11 @@ package callinfo // This file implements the CallInfo interface for gRPC contexts. import ( - "fmt" - "html/template" - "context" + "fmt" + "github.com/google/safehtml" + "github.com/google/safehtml/template" "google.golang.org/grpc" "google.golang.org/grpc/peer" ) @@ -64,6 +64,18 @@ func (gci *gRPCCallInfoImpl) Text() string { return fmt.Sprintf("%s:%s(gRPC)", gci.remoteAddr, gci.method) } -func (gci *gRPCCallInfoImpl) HTML() template.HTML { - return template.HTML("Method: " + gci.method + " Remote Addr: " + gci.remoteAddr) +var grpcTmpl = template.Must(template.New("tcs").Parse("Method: {{.Method}} Remote Addr: {{.RemoteAddr}}")) + +func (gci *gRPCCallInfoImpl) HTML() safehtml.HTML { + html, err := grpcTmpl.ExecuteToHTML(struct { + Method string + RemoteAddr string + }{ + Method: gci.method, + RemoteAddr: gci.remoteAddr, + }) + if err != nil { + panic(err) + } + return html } diff --git a/go/vt/callinfo/plugin_mysql.go b/go/vt/callinfo/plugin_mysql.go index b0a9a971279..37f743c89f0 100644 --- a/go/vt/callinfo/plugin_mysql.go +++ b/go/vt/callinfo/plugin_mysql.go @@ -19,10 +19,11 @@ package callinfo // This file implements the CallInfo interface for Mysql contexts. import ( + "context" "fmt" - "html/template" - "context" + "github.com/google/safehtml" + "github.com/google/safehtml/template" "vitess.io/vitess/go/mysql" ) @@ -53,6 +54,18 @@ func (mci *mysqlCallInfoImpl) Text() string { return fmt.Sprintf("%s@%s(Mysql)", mci.user, mci.remoteAddr) } -func (mci *mysqlCallInfoImpl) HTML() template.HTML { - return template.HTML("MySQL User: " + mci.user + " Remote Addr: " + mci.remoteAddr) +var mysqlTmpl = template.Must(template.New("tcs").Parse("MySQL User: {{.MySQLUser}} Remote Addr: {{.RemoteAddr}}")) + +func (mci *mysqlCallInfoImpl) HTML() safehtml.HTML { + html, err := mysqlTmpl.ExecuteToHTML(struct { + MySQLUser string + RemoteAddr string + }{ + MySQLUser: mci.user, + RemoteAddr: mci.remoteAddr, + }) + if err != nil { + panic(err) + } + return html } diff --git a/go/vt/dbconfigs/credentials.go b/go/vt/dbconfigs/credentials.go index 5a5dbc1c1a1..4e0e5518869 100644 --- a/go/vt/dbconfigs/credentials.go +++ b/go/vt/dbconfigs/credentials.go @@ -61,7 +61,6 @@ var ( "mysqlctld", "vtbackup", "vtcombo", - "vtgr", "vttablet", } ) diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index 371892144d3..fe3a228835c 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -216,12 +216,12 @@ func (dbcfgs *DBConfigs) AppDebugWithDB() Connector { return dbcfgs.makeParams(&dbcfgs.appdebugParams, true) } -// AllPrivsConnector returns connection parameters for appdebug with no dbname set. +// AllPrivsConnector returns connection parameters for allprivs with no dbname set. func (dbcfgs *DBConfigs) AllPrivsConnector() Connector { return dbcfgs.makeParams(&dbcfgs.allprivsParams, false) } -// AllPrivsWithDB returns connection parameters for appdebug with dbname set. +// AllPrivsWithDB returns connection parameters for allprivs with dbname set. func (dbcfgs *DBConfigs) AllPrivsWithDB() Connector { return dbcfgs.makeParams(&dbcfgs.allprivsParams, true) } @@ -416,6 +416,6 @@ func NewTestDBConfigs(genParams, appDebugParams mysql.ConnParams, dbname string) replParams: genParams, externalReplParams: genParams, DBName: dbname, - Charset: "utf8mb4_general_ci", + Charset: "", } } diff --git a/go/vt/dbconnpool/connection.go b/go/vt/dbconnpool/connection.go index bdf74b8a429..8e9a0f4a5c0 100644 --- a/go/vt/dbconnpool/connection.go +++ b/go/vt/dbconnpool/connection.go @@ -21,6 +21,7 @@ import ( "fmt" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" ) @@ -112,7 +113,7 @@ func (dbc *DBConnection) ExecuteStreamFetch(query string, callback func(*sqltype } func (dbc *DBConnection) handleError(err error) { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { dbc.Close() } } diff --git a/go/vt/dbconnpool/connection_pool.go b/go/vt/dbconnpool/connection_pool.go index bb339862dcf..e8e4acce017 100644 --- a/go/vt/dbconnpool/connection_pool.go +++ b/go/vt/dbconnpool/connection_pool.go @@ -22,6 +22,7 @@ object to pool these DBConnections. package dbconnpool import ( + "context" "errors" "net" "sync" @@ -29,8 +30,6 @@ import ( "vitess.io/vitess/go/netutil" - "context" - "vitess.io/vitess/go/pools" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" diff --git a/go/vt/discovery/fake_healthcheck.go b/go/vt/discovery/fake_healthcheck.go index 014c6b69a61..cb959902c19 100644 --- a/go/vt/discovery/fake_healthcheck.go +++ b/go/vt/discovery/fake_healthcheck.go @@ -176,6 +176,11 @@ func (fhc *FakeHealthCheck) SetTabletType(tablet *topodatapb.Tablet, tabletType func (fhc *FakeHealthCheck) Unsubscribe(c chan *TabletHealth) { } +// GetLoadTabletsTrigger is not implemented. +func (fhc *FakeHealthCheck) GetLoadTabletsTrigger() chan struct{} { + return nil +} + // AddTablet adds the tablet. func (fhc *FakeHealthCheck) AddTablet(tablet *topodatapb.Tablet) { key := TabletToMapKey(tablet) @@ -360,12 +365,24 @@ func (fhc *FakeHealthCheck) GetAllTablets() map[string]*topodatapb.Tablet { return res } +// BroadcastAll broadcasts all the tablets' healthchecks +func (fhc *FakeHealthCheck) BroadcastAll() { + if fhc.ch == nil { + return + } + fhc.mu.Lock() + defer fhc.mu.Unlock() + for _, item := range fhc.items { + fhc.ch <- simpleCopy(item.ts) + } +} + func simpleCopy(th *TabletHealth) *TabletHealth { return &TabletHealth{ Conn: th.Conn, - Tablet: proto.Clone(th.Tablet).(*topodatapb.Tablet), - Target: proto.Clone(th.Target).(*querypb.Target), - Stats: proto.Clone(th.Stats).(*querypb.RealtimeStats), + Tablet: th.Tablet.CloneVT(), + Target: th.Target.CloneVT(), + Stats: th.Stats.CloneVT(), LastError: th.LastError, PrimaryTermStartTime: th.PrimaryTermStartTime, Serving: th.Serving, diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index f6eb896db72..9d17005d0ad 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -37,13 +37,14 @@ import ( "encoding/json" "fmt" "hash/crc32" - "html/template" "net/http" "sort" "strings" "sync" "time" + "github.com/google/safehtml/template" + "github.com/google/safehtml/template/uncheckedconversions" "github.com/spf13/pflag" "vitess.io/vitess/go/netutil" @@ -55,6 +56,7 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" ) @@ -126,8 +128,8 @@ const (
{{range $i, $ts := .}} - - + + @@ -140,7 +142,7 @@ const ( // ParseTabletURLTemplateFromFlag loads or reloads the URL template. func ParseTabletURLTemplateFromFlag() { tabletURLTemplate = template.New("") - _, err := tabletURLTemplate.Parse(TabletURLTemplateString) + _, err := tabletURLTemplate.ParseFromTrustedTemplate(uncheckedconversions.TrustedTemplateFromStringKnownToSatisfyTypeContract(TabletURLTemplateString)) if err != nil { log.Exitf("error parsing template: %v", err) } @@ -228,21 +230,19 @@ type HealthCheck interface { // Unsubscribe removes a listener. Unsubscribe(c chan *TabletHealth) + + // GetLoadTabletsTrigger returns a channel that is used to inform when to load tablets. + GetLoadTabletsTrigger() chan struct{} } var _ HealthCheck = (*HealthCheckImpl)(nil) -// Target includes cell which we ignore here +// KeyFromTarget includes cell which we ignore here // because tabletStatsCache is intended to be per-cell func KeyFromTarget(target *query.Target) KeyspaceShardTabletType { return KeyspaceShardTabletType(fmt.Sprintf("%s.%s.%s", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType))) } -// KeyFromTablet returns the KeyspaceShardTabletType that matches the given topodata.Tablet -func KeyFromTablet(tablet *topodata.Tablet) KeyspaceShardTabletType { - return KeyspaceShardTabletType(fmt.Sprintf("%s.%s.%s", tablet.Keyspace, tablet.Shard, topoproto.TabletTypeLString(tablet.Type))) -} - // HealthCheckImpl performs health checking and stores the results. // The goal of this object is to maintain a StreamHealth RPC // to a lot of tablets. Tablets are added / removed by calling the @@ -282,6 +282,8 @@ type HealthCheckImpl struct { subMu sync.Mutex // subscribers subscribers map[chan *TabletHealth]struct{} + // loadTablets trigger is used to immediately load a new primary tablet when the current one has been demoted + loadTabletsTrigger chan struct{} } // NewHealthCheck creates a new HealthCheck object. @@ -321,6 +323,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur healthy: make(map[KeyspaceShardTabletType][]*TabletHealth), subscribers: make(map[chan *TabletHealth]struct{}), cellAliases: make(map[string]string), + loadTabletsTrigger: make(chan struct{}), } var topoWatchers []*TopologyWatcher var filter TabletFilter @@ -352,7 +355,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur hc.topoWatchers = topoWatchers healthcheckOnce.Do(func() { - http.Handle("/debug/gateway", hc) + servenv.HTTPHandle("/debug/gateway", hc) }) // start the topo watches here @@ -491,6 +494,18 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ if !ok { hc.healthData[targetKey] = make(map[tabletAliasString]*TabletHealth) } + + // If the previous tablet type was primary, we need to check if the next new primary has already been assigned. + // If no new primary has been assigned, we will trigger a `loadTablets` call to immediately redirect traffic to the new primary. + // + // This is to avoid a situation where a newly primary tablet for a shard has just been started and the tableRefreshInterval has not yet passed, + // causing an interruption where no primary is assigned to the shard. + if prevTarget.TabletType == topodata.TabletType_PRIMARY { + if primaries := hc.healthData[oldTargetKey]; len(primaries) == 0 { + log.Infof("We will have no health data for the next new primary tablet after demoting the tablet: %v, so start loading tablets now", topotools.TabletIdent(th.Tablet)) + hc.loadTabletsTrigger <- struct{}{} + } + } } // add it to the map by target and create the map record if needed if _, ok := hc.healthData[targetKey]; !ok { @@ -591,6 +606,11 @@ func (hc *HealthCheckImpl) broadcast(th *TabletHealth) { } } +// GetLoadTabletsTrigger returns a channel that is used to inform when to load tablets. +func (hc *HealthCheckImpl) GetLoadTabletsTrigger() chan struct{} { + return hc.loadTabletsTrigger +} + // CacheStatus returns a displayable version of the cache. func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { tcsMap := hc.CacheStatusMap() diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 9d4325404b9..5fadc57eb2e 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -20,19 +20,18 @@ import ( "bytes" "context" "fmt" - "html/template" "io" "strings" "sync" "testing" "time" + "github.com/google/safehtml/template" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/grpcclient" - "vitess.io/vitess/go/vt/status" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -65,10 +64,12 @@ func init() { } func TestHealthCheck(t *testing.T) { + ctx := utils.LeakCheckContext(t) // reset error counters hcErrorCounters.ResetAll() - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) // close healthcheck defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -98,8 +99,8 @@ func TestHealthCheck(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5}, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5}, } input <- shr result = <-resultChan @@ -131,11 +132,11 @@ func TestHealthCheck(t *testing.T) { // TabletType changed, should get both old and new event shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -160,11 +161,11 @@ func TestHealthCheck(t *testing.T) { // Serving & RealtimeStats changed shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: false, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.3}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: false, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.3}, } want = &TabletHealth{ Tablet: tablet, @@ -180,11 +181,11 @@ func TestHealthCheck(t *testing.T) { // HealthError shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", ReplicationLagSeconds: 1, CpuUsage: 0.3}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{HealthError: "some error", ReplicationLagSeconds: 1, CpuUsage: 0.3}, } want = &TabletHealth{ Tablet: tablet, @@ -207,8 +208,11 @@ func TestHealthCheck(t *testing.T) { } func TestHealthCheckStreamError(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -230,11 +234,11 @@ func TestHealthCheckStreamError(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -268,8 +272,11 @@ func TestHealthCheckStreamError(t *testing.T) { // TestHealthCheckErrorOnPrimary is the same as TestHealthCheckStreamError except for tablet type func TestHealthCheckErrorOnPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -291,11 +298,11 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -328,8 +335,11 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { } func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() resultChan := hc.Subscribe() @@ -349,20 +359,20 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { <-resultChan shr2 := &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } input2 <- shr2 <-resultChan shr1 := &querypb.StreamHealthResponse{ - TabletAlias: tablet1.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet1.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input1 <- shr1 <-resultChan @@ -378,11 +388,11 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { mustMatch(t, health, a, "unexpected result") shr2 = &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input2 <- shr2 <-resultChan @@ -406,8 +416,11 @@ func TestHealthCheckErrorOnPrimaryAfterExternalReparent(t *testing.T) { } func TestHealthCheckVerifiesTabletAlias(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -428,11 +441,11 @@ func TestHealthCheckVerifiesTabletAlias(t *testing.T) { mustMatch(t, want, result, "Wrong TabletHealth data") input <- &querypb.StreamHealthResponse{ - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - TabletAlias: &topodatapb.TabletAlias{Uid: 20, Cell: "cellb"}, - Serving: true, - TabletExternallyReparentedTimestamp: 10, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + TabletAlias: &topodatapb.TabletAlias{Uid: 20, Cell: "cellb"}, + Serving: true, + PrimaryTermStartTimestamp: 10, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } ticker := time.NewTicker(1 * time.Second) @@ -449,8 +462,12 @@ func TestHealthCheckVerifiesTabletAlias(t *testing.T) { // TestHealthCheckCloseWaitsForGoRoutines tests that Close() waits for all Go // routines to finish and the listener won't be called anymore. func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) + defer hc.Close() tablet := createTestTablet(0, "cell", "a") input := make(chan *querypb.StreamHealthResponse, 1) createFakeConn(tablet, input) @@ -470,11 +487,11 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -489,7 +506,7 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { mustMatch(t, want, result, "Wrong TabletHealth data") // Change input to distinguish between stats sent before and after Close(). - shr.TabletExternallyReparentedTimestamp = 11 + shr.PrimaryTermStartTimestamp = 11 // Close the healthcheck. Tablet connections are closed asynchronously and // Close() will block until all Go routines (one per connection) are done. assert.Nil(t, hc.Close(), "Close returned error") @@ -509,10 +526,13 @@ func TestHealthCheckCloseWaitsForGoRoutines(t *testing.T) { } func TestHealthCheckTimeout(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // reset counters hcErrorCounters.ResetAll() - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) hc.healthCheckTimeout = 500 * time.Millisecond defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -532,11 +552,11 @@ func TestHealthCheckTimeout(t *testing.T) { // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want = &TabletHealth{ Tablet: tablet, @@ -581,8 +601,11 @@ func TestHealthCheckTimeout(t *testing.T) { } func TestWaitForAllServingTablets(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -602,18 +625,19 @@ func TestWaitForAllServingTablets(t *testing.T) { // there will be a first result, get and discard it <-resultChan // empty - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) defer cancel() err := hc.WaitForAllServingTablets(ctx, targets) assert.NotNil(t, err, "error should not be nil") shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } input <- shr @@ -673,8 +697,11 @@ func TestWaitForAllServingTablets(t *testing.T) { // TestRemoveTablet tests the behavior when a tablet goes away. func TestRemoveTablet(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -688,11 +715,11 @@ func TestRemoveTablet(t *testing.T) { <-resultChan shrReplica := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -737,11 +764,11 @@ func TestRemoveTablet(t *testing.T) { // Change the tablet type to RDONLY. tablet.Type = topodatapb.TabletType_RDONLY shrRdonly := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_RDONLY}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.4}, } // Now Replace it, which does a Remove and Add. The tablet should be removed @@ -780,8 +807,11 @@ func TestRemoveTablet(t *testing.T) { // TestGetHealthyTablets tests the functionality of GetHealthyTabletStats. func TestGetHealthyTablets(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") tablet.Type = topodatapb.TabletType_REPLICA @@ -798,11 +828,11 @@ func TestGetHealthyTablets(t *testing.T) { assert.Empty(t, a, "wrong result, expected empty list") shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -819,11 +849,11 @@ func TestGetHealthyTablets(t *testing.T) { // update health with a change that won't change health array shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 2, CpuUsage: 0.2}, } input <- shr // wait for result before checking @@ -834,11 +864,11 @@ func TestGetHealthyTablets(t *testing.T) { // update stats with a change that will change health array shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 35, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 35, CpuUsage: 0.2}, } want = []*TabletHealth{{ Tablet: tablet, @@ -864,11 +894,11 @@ func TestGetHealthyTablets(t *testing.T) { <-resultChan shr2 := &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want2 := []*TabletHealth{{ Tablet: tablet, @@ -894,11 +924,11 @@ func TestGetHealthyTablets(t *testing.T) { mustMatch(t, want2, a, "unexpected result") shr2 = &querypb.StreamHealthResponse{ - TabletAlias: tablet2.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: false, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet2.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: false, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } input2 <- shr2 // wait for result @@ -912,7 +942,7 @@ func TestGetHealthyTablets(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, Serving: true, - TabletExternallyReparentedTimestamp: 10, + PrimaryTermStartTimestamp: 10, RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } @@ -936,11 +966,11 @@ func TestGetHealthyTablets(t *testing.T) { // reparent: old replica goes into primary shr = &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } input <- shr <-resultChan @@ -966,8 +996,11 @@ func TestGetHealthyTablets(t *testing.T) { } func TestPrimaryInOtherCell(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() // add a tablet as primary in different cell @@ -990,11 +1023,11 @@ func TestPrimaryInOtherCell(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, - Serving: true, - TabletExternallyReparentedTimestamp: 20, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY}, + Serving: true, + PrimaryTermStartTimestamp: 20, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.2}, } want := &TabletHealth{ Tablet: tablet, @@ -1023,8 +1056,11 @@ func TestPrimaryInOtherCell(t *testing.T) { } func TestReplicaInOtherCell(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() // add a tablet as replica @@ -1046,11 +1082,11 @@ func TestReplicaInOtherCell(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: local.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: local.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want := &TabletHealth{ Tablet: local, @@ -1092,11 +1128,11 @@ func TestReplicaInOtherCell(t *testing.T) { } shr2 := &querypb.StreamHealthResponse{ - TabletAlias: remote.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: remote.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want2 := &TabletHealth{ Tablet: remote, @@ -1125,8 +1161,11 @@ func TestReplicaInOtherCell(t *testing.T) { } func TestCellAliases(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") - hc := NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell1", "cell1, cell2") defer hc.Close() cellsAlias := &topodatapb.CellsAlias{ @@ -1155,11 +1194,11 @@ func TestCellAliases(t *testing.T) { } shr := &querypb.StreamHealthResponse{ - TabletAlias: tablet.Alias, - Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, - Serving: true, - TabletExternallyReparentedTimestamp: 0, - RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, + TabletAlias: tablet.Alias, + Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, + Serving: true, + PrimaryTermStartTimestamp: 0, + RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 10, CpuUsage: 0.2}, } want := []*TabletHealth{{ Tablet: tablet, @@ -1185,8 +1224,11 @@ func TestCellAliases(t *testing.T) { } func TestHealthCheckChecksGrpcPort(t *testing.T) { - ts := memorytopo.NewServer("cell") - hc := createTestHc(ts) + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "cell") + defer ts.Close() + hc := createTestHc(ctx, ts) defer hc.Close() tablet := createTestTablet(0, "cell", "a") @@ -1205,6 +1247,7 @@ func TestHealthCheckChecksGrpcPort(t *testing.T) { } func TestTemplate(t *testing.T) { + defer utils.EnsureNoLeaks(t) TabletURLTemplateString = "http://{{.GetTabletHostPort}}" ParseTabletURLTemplateFromFlag() @@ -1223,7 +1266,7 @@ func TestTemplate(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, TabletsStats: ts, } - templ := template.New("").Funcs(status.StatusFuncs) + templ := template.New("") templ, err := templ.Parse(HealthCheckTemplate) require.Nil(t, err, "error parsing template: %v", err) wr := &bytes.Buffer{} @@ -1232,6 +1275,7 @@ func TestTemplate(t *testing.T) { } func TestDebugURLFormatting(t *testing.T) { + defer utils.EnsureNoLeaks(t) TabletURLTemplateString = "https://{{.GetHostNameLevel 0}}.bastion.{{.Tablet.Alias.Cell}}.corp" ParseTabletURLTemplateFromFlag() @@ -1250,7 +1294,7 @@ func TestDebugURLFormatting(t *testing.T) { Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}, TabletsStats: ts, } - templ := template.New("").Funcs(status.StatusFuncs) + templ := template.New("") templ, err := templ.Parse(HealthCheckTemplate) require.Nil(t, err, "error parsing template") wr := &bytes.Buffer{} @@ -1271,8 +1315,8 @@ func tabletDialer(tablet *topodatapb.Tablet, _ grpcclient.FailFast) (queryservic return nil, fmt.Errorf("tablet %v not found", key) } -func createTestHc(ts *topo.Server) *HealthCheckImpl { - return NewHealthCheck(context.Background(), 1*time.Millisecond, time.Hour, ts, "cell", "") +func createTestHc(ctx context.Context, ts *topo.Server) *HealthCheckImpl { + return NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, "cell", "") } type fakeConn struct { diff --git a/go/vt/discovery/keyspace_events.go b/go/vt/discovery/keyspace_events.go index 0b3fa7e9efe..163f240de8c 100644 --- a/go/vt/discovery/keyspace_events.go +++ b/go/vt/discovery/keyspace_events.go @@ -23,12 +23,17 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) // KeyspaceEventWatcher is an auxiliary watcher that watches all availability incidents @@ -61,11 +66,14 @@ type KeyspaceEvent struct { // Shards is a list of all the shards in the keyspace, including their state after the event is resolved Shards []ShardEvent + + // MoveTablesState records the current state of an ongoing MoveTables workflow + MoveTablesState MoveTablesState } type ShardEvent struct { Tablet *topodatapb.TabletAlias - Target *query.Target + Target *querypb.Target Serving bool } @@ -85,6 +93,16 @@ func NewKeyspaceEventWatcher(ctx context.Context, topoServer srvtopo.Server, hc return kew } +type MoveTablesStatus int + +const ( + MoveTablesUnknown MoveTablesStatus = iota + // MoveTablesSwitching is set when the write traffic is the middle of being switched from the source to the target + MoveTablesSwitching + // MoveTablesSwitched is set when write traffic has been completely switched to the target + MoveTablesSwitched +) + // keyspaceState is the internal state for all the keyspaces that the KEW is // currently watching type keyspaceState struct { @@ -98,6 +116,8 @@ type keyspaceState struct { lastError error lastKeyspace *topodatapb.SrvKeyspace shards map[string]*shardState + + moveTablesState *MoveTablesState } // Format prints the internal state for this keyspace for debug purposes @@ -124,17 +144,27 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool { kss.mu.Lock() defer kss.mu.Unlock() - // if the keyspace is gone, or if it has no known availability events, the keyspace - // cannot be in the middle of a resharding operation - if kss.deleted || kss.consistent { + // If the keyspace is gone, has no known availability events, or is in the middle of a + // MoveTables then the keyspace cannot be in the middle of a resharding operation. + if kss.deleted || kss.consistent || (kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesType(MoveTablesNone)) { return false } - // for all the known shards, try to find a primary shard besides the one we're trying to access - // and which is currently healthy. if there are other healthy primaries in the keyspace, it means - // we're in the middle of a resharding operation + // If there are unequal and overlapping shards in the keyspace and any of them are + // currently serving then we assume that we are in the middle of a Reshard. + _, ckr, err := topo.ValidateShardName(currentShard) + if err != nil || ckr == nil { // Assume not and avoid potential panic + return false + } for shard, sstate := range kss.shards { - if shard != currentShard && sstate.serving { + if !sstate.serving || shard == currentShard { + continue + } + _, skr, err := topo.ValidateShardName(shard) + if err != nil || skr == nil { // Assume not and avoid potential panic + return false + } + if key.KeyRangeIntersect(ckr, skr) { return true } } @@ -143,7 +173,7 @@ func (kss *keyspaceState) beingResharded(currentShard string) bool { } type shardState struct { - target *query.Target + target *querypb.Target serving bool externallyReparented int64 currentPrimary *topodatapb.TabletAlias @@ -191,7 +221,7 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { if result == nil { return } - kew.processHealthCheck(result) + kew.processHealthCheck(ctx, result) } } }() @@ -204,7 +234,7 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { return } for _, ks := range keyspaces { - kew.getKeyspaceStatus(ks) + kew.getKeyspaceStatus(ctx, ks) } }() } @@ -217,6 +247,10 @@ func (kss *keyspaceState) ensureConsistentLocked() { return } + if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone && kss.moveTablesState.State != MoveTablesSwitched { + return + } + // get the topology metadata for our primary from `lastKeyspace`; this value is refreshed // from our topology watcher whenever a change is detected, so it should always be up to date primary := topoproto.SrvKeyspaceGetPartition(kss.lastKeyspace, topodatapb.TabletType_PRIMARY) @@ -251,16 +285,25 @@ func (kss *keyspaceState) ensureConsistentLocked() { } } + // clone the current moveTablesState, if any, to handle race conditions where it can get updated while we're broadcasting + var moveTablesState MoveTablesState + if kss.moveTablesState != nil { + moveTablesState = *kss.moveTablesState + } + + ksevent := &KeyspaceEvent{ + Cell: kss.kew.localCell, + Keyspace: kss.keyspace, + Shards: make([]ShardEvent, 0, len(kss.shards)), + MoveTablesState: moveTablesState, + } + // we haven't found any inconsistencies between the HealthCheck stream and the topology // watcher. this means the ongoing availability event has been resolved, so we can broadcast // a resolution event to all listeners kss.consistent = true - ksevent := &KeyspaceEvent{ - Cell: kss.kew.localCell, - Keyspace: kss.keyspace, - Shards: make([]ShardEvent, 0, len(kss.shards)), - } + kss.moveTablesState = nil for shard, sstate := range kss.shards { ksevent.Shards = append(ksevent.Shards, ShardEvent{ @@ -328,6 +371,97 @@ func (kss *keyspaceState) onHealthCheck(th *TabletHealth) { kss.ensureConsistentLocked() } +type MoveTablesType int + +const ( + MoveTablesNone MoveTablesType = iota + MoveTablesRegular + MoveTablesShardByShard +) + +type MoveTablesState struct { + Typ MoveTablesType + State MoveTablesStatus +} + +func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTablesState, error) { + mtState := &MoveTablesState{ + Typ: MoveTablesNone, + State: MoveTablesUnknown, + } + + // if there are no routing rules defined, then movetables is not in progress, exit early + if (vs.RoutingRules != nil && len(vs.RoutingRules.Rules) == 0) && + (vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) == 0) { + return mtState, nil + } + + shortCtx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer cancel() + ts, _ := kss.kew.ts.GetTopoServer() + + // collect all current shard information from the topo + var shardInfos []*topo.ShardInfo + for _, sstate := range kss.shards { + si, err := ts.GetShard(shortCtx, kss.keyspace, sstate.target.Shard) + if err != nil { + return nil, err + } + shardInfos = append(shardInfos, si) + } + + // check if any shard has denied tables and if so, record one of these to check where it currently points to + // using the (shard) routing rules + var shardsWithDeniedTables []string + var oneDeniedTable string + for _, si := range shardInfos { + for _, tc := range si.TabletControls { + if len(tc.DeniedTables) > 0 { + oneDeniedTable = tc.DeniedTables[0] + shardsWithDeniedTables = append(shardsWithDeniedTables, si.ShardName()) + } + } + } + if len(shardsWithDeniedTables) == 0 { + return mtState, nil + } + + // check if a shard by shard migration is in progress and if so detect if it has been switched + isPartialTables := vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) > 0 + + if isPartialTables { + srr := topotools.GetShardRoutingRulesMap(vs.ShardRoutingRules) + mtState.Typ = MoveTablesShardByShard + mtState.State = MoveTablesSwitched + for _, shard := range shardsWithDeniedTables { + ruleKey := topotools.GetShardRoutingRuleKey(kss.keyspace, shard) + if _, ok := srr[ruleKey]; ok { + // still pointing to the source shard + mtState.State = MoveTablesSwitching + break + } + } + log.Infof("getMoveTablesStatus: keyspace %s declaring partial move tables %v", kss.keyspace, mtState) + return mtState, nil + } + + // it wasn't a shard by shard migration, but since we have denied tables it must be a regular MoveTables + mtState.Typ = MoveTablesRegular + mtState.State = MoveTablesSwitching + rr := topotools.GetRoutingRulesMap(vs.RoutingRules) + if rr != nil { + r, ok := rr[oneDeniedTable] + // if a rule exists for the table and points to the target keyspace, writes have been switched + if ok && len(r) > 0 && r[0] != fmt.Sprintf("%s.%s", kss.keyspace, oneDeniedTable) { + mtState.State = MoveTablesSwitched + log.Infof("onSrvKeyspace:: keyspace %s writes have been switched for table %s, rule %v", kss.keyspace, oneDeniedTable, r[0]) + } + } + log.Infof("getMoveTablesStatus: keyspace %s declaring regular move tables %v", kss.keyspace, mtState) + + return mtState, nil +} + // onSrvKeyspace is the callback that updates this keyspace with fresh topology data from our topology server. // this callback is called from a Watcher in the topo server whenever a change to the topology for this keyspace // occurs. this watcher is dedicated to this keyspace, and will only yield topology metadata changes for as @@ -378,24 +512,54 @@ func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, new return true } +// isServing returns whether a keyspace has at least one serving shard or not. +func (kss *keyspaceState) isServing() bool { + kss.mu.Lock() + defer kss.mu.Unlock() + for _, state := range kss.shards { + if state.serving { + return true + } + } + return false +} + +// onSrvVSchema is called from a Watcher in the topo server whenever the SrvVSchema is updated by Vitess. +// For the purposes here, we are interested in updates to the RoutingRules or ShardRoutingRules. +// In addition, the traffic switcher updates SrvVSchema when the DeniedTables attributes in a Shard record is +// modified. +func (kss *keyspaceState) onSrvVSchema(vs *vschemapb.SrvVSchema, err error) bool { + kss.mu.Lock() + defer kss.mu.Unlock() + kss.moveTablesState, _ = kss.getMoveTablesStatus(vs) + if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone { + // mark the keyspace as inconsistent. ensureConsistentLocked() checks if the workflow is switched, + // and if so, it will send an event to the buffering subscribers to indicate that buffering can be stopped. + kss.consistent = false + kss.ensureConsistentLocked() + } + return true +} + // newKeyspaceState allocates the internal state required to keep track of availability incidents // in this keyspace, and starts up a SrvKeyspace watcher on our topology server which will update // our keyspaceState with any topology changes in real time. -func newKeyspaceState(kew *KeyspaceEventWatcher, cell, keyspace string) *keyspaceState { +func newKeyspaceState(ctx context.Context, kew *KeyspaceEventWatcher, cell, keyspace string) *keyspaceState { log.Infof("created dedicated watcher for keyspace %s/%s", cell, keyspace) kss := &keyspaceState{ kew: kew, keyspace: keyspace, shards: make(map[string]*shardState), } - kew.ts.WatchSrvKeyspace(context.Background(), cell, keyspace, kss.onSrvKeyspace) + kew.ts.WatchSrvKeyspace(ctx, cell, keyspace, kss.onSrvKeyspace) + kew.ts.WatchSrvVSchema(ctx, cell, kss.onSrvVSchema) return kss } // processHealthCheck is the callback that is called by the global HealthCheck stream that was initiated // by this KeyspaceEventWatcher. it redirects the TabletHealth event to the corresponding keyspaceState -func (kew *KeyspaceEventWatcher) processHealthCheck(th *TabletHealth) { - kss := kew.getKeyspaceStatus(th.Target.Keyspace) +func (kew *KeyspaceEventWatcher) processHealthCheck(ctx context.Context, th *TabletHealth) { + kss := kew.getKeyspaceStatus(ctx, th.Target.Keyspace) if kss == nil { return } @@ -405,18 +569,24 @@ func (kew *KeyspaceEventWatcher) processHealthCheck(th *TabletHealth) { // getKeyspaceStatus returns the keyspaceState object for the corresponding keyspace, allocating it // if we've never seen the keyspace before. -func (kew *KeyspaceEventWatcher) getKeyspaceStatus(keyspace string) *keyspaceState { +func (kew *KeyspaceEventWatcher) getKeyspaceStatus(ctx context.Context, keyspace string) *keyspaceState { kew.mu.Lock() defer kew.mu.Unlock() - kss := kew.keyspaces[keyspace] if kss == nil { - kss = newKeyspaceState(kew, kew.localCell, keyspace) + kss = newKeyspaceState(ctx, kew, kew.localCell, keyspace) kew.keyspaces[keyspace] = kss } if kss.deleted { kss = nil delete(kew.keyspaces, keyspace) + // Delete from the sidecar database identifier cache as well. + // Ignore any errors as they should all mean that the entry + // does not exist in the cache (which will be common). + sdbidc, _ := sidecardb.GetIdentifierCache() + if sdbidc != nil { + sdbidc.Delete(keyspace) + } } return kss } @@ -426,11 +596,11 @@ func (kew *KeyspaceEventWatcher) getKeyspaceStatus(keyspace string) *keyspaceSta // This is not a fully accurate heuristic, but it's good enough that we'd want to buffer the // request for the given target under the assumption that the reason why it cannot be completed // right now is transitory. -func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bool { +func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(ctx context.Context, target *querypb.Target) bool { if target.TabletType != topodatapb.TabletType_PRIMARY { return false } - ks := kew.getKeyspaceStatus(target.Keyspace) + ks := kew.getKeyspaceStatus(ctx, target.Keyspace) if ks == nil { return false } @@ -446,19 +616,34 @@ func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(target *query.Target) bo // The shard state keeps track of the current primary and the last externally reparented time, which we can use // to determine that there was a serving primary which now became non serving. This is only possible in a DemotePrimary // RPC which are only called from ERS and PRS. So buffering will stop when these operations succeed. -func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(target *query.Target) bool { +// We return the tablet alias of the primary if it is serving. +func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(ctx context.Context, target *querypb.Target) (*topodatapb.TabletAlias, bool) { if target.TabletType != topodatapb.TabletType_PRIMARY { - return false + return nil, false } - ks := kew.getKeyspaceStatus(target.Keyspace) + ks := kew.getKeyspaceStatus(ctx, target.Keyspace) if ks == nil { - return false + return nil, false } ks.mu.Lock() defer ks.mu.Unlock() if state, ok := ks.shards[target.Shard]; ok { // If the primary tablet was present then externallyReparented will be non-zero and currentPrimary will be not nil - return !state.serving && !ks.consistent && state.externallyReparented != 0 && state.currentPrimary != nil + return state.currentPrimary, !state.serving && !ks.consistent && state.externallyReparented != 0 && state.currentPrimary != nil } - return false + return nil, false +} + +// GetServingKeyspaces gets the serving keyspaces from the keyspace event watcher. +func (kew *KeyspaceEventWatcher) GetServingKeyspaces() []string { + kew.mu.Lock() + defer kew.mu.Unlock() + + var servingKeyspaces []string + for ksName, state := range kew.keyspaces { + if state.isServing() { + servingKeyspaces = append(servingKeyspaces, ksName) + } + } + return servingKeyspaces } diff --git a/go/vt/discovery/keyspace_events_test.go b/go/vt/discovery/keyspace_events_test.go new file mode 100644 index 00000000000..43af4bf49de --- /dev/null +++ b/go/vt/discovery/keyspace_events_test.go @@ -0,0 +1,322 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "context" + "encoding/hex" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/faketopo" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +func TestSrvKeyspaceWithNilNewKeyspace(t *testing.T) { + ctx := utils.LeakCheckContext(t) + cell := "cell" + keyspace := "testks" + factory := faketopo.NewFakeTopoFactory() + factory.AddCell(cell) + ts := faketopo.NewFakeTopoServer(ctx, factory) + ts2 := &fakeTopoServer{} + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, cell, "") + defer hc.Close() + kew := NewKeyspaceEventWatcher(ctx, ts2, hc, cell) + kss := &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: make(map[string]*shardState), + } + kss.lastKeyspace = &topodatapb.SrvKeyspace{ + ServedFrom: []*topodatapb.SrvKeyspace_ServedFrom{ + { + TabletType: topodatapb.TabletType_PRIMARY, + Keyspace: keyspace, + }, + }, + } + require.True(t, kss.onSrvKeyspace(nil, nil)) +} + +// TestKeyspaceEventTypes confirms that the keyspace event watcher determines +// that the unavailability event is caused by the correct scenario. We should +// consider it to be caused by a resharding operation when the following +// conditions are present: +// 1. The keyspace is inconsistent (in the middle of an availability event) +// 2. The target tablet is a primary +// 3. The keyspace has overlapping shards +// 4. The overlapping shard's tablet is serving +// And we should consider the cause to be a primary not serving when the +// following conditions exist: +// 1. The keyspace is inconsistent (in the middle of an availability event) +// 2. The target tablet is a primary +// 3. The target tablet is not serving +// 4. The shard's externallyReparented time is not 0 +// 5. The shard's currentPrimary state is not nil +// We should never consider both as a possible cause given the same +// keyspace state. +func TestKeyspaceEventTypes(t *testing.T) { + utils.EnsureNoLeaks(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "cell" + keyspace := "testks" + factory := faketopo.NewFakeTopoFactory() + factory.AddCell(cell) + ts := faketopo.NewFakeTopoServer(ctx, factory) + ts2 := &fakeTopoServer{} + hc := NewHealthCheck(ctx, 1*time.Millisecond, time.Hour, ts, cell, "") + defer hc.Close() + kew := NewKeyspaceEventWatcher(ctx, ts2, hc, cell) + + type testCase struct { + name string + kss *keyspaceState + shardToCheck string + expectResharding bool + expectPrimaryNotServing bool + } + + testCases := []testCase{ + { + name: "one to two resharding in progress", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + }, + consistent: false, + }, + shardToCheck: "-", + expectResharding: true, + expectPrimaryNotServing: false, + }, + { + name: "two to four resharding in progress", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "-40": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-40", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "40-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "40-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + "80-c0": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-c0", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + "c0-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "c0-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + }, + }, + consistent: false, + }, + shardToCheck: "-80", + expectResharding: true, + expectPrimaryNotServing: false, + }, + { + name: "unsharded primary not serving", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + externallyReparented: time.Now().UnixNano(), + currentPrimary: &topodatapb.TabletAlias{ + Cell: cell, + Uid: 100, + }, + }, + }, + consistent: false, + }, + shardToCheck: "-", + expectResharding: false, + expectPrimaryNotServing: true, + }, + { + name: "sharded primary not serving", + kss: &keyspaceState{ + kew: kew, + keyspace: keyspace, + shards: map[string]*shardState{ + "-80": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "-80", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: false, + externallyReparented: time.Now().UnixNano(), + currentPrimary: &topodatapb.TabletAlias{ + Cell: cell, + Uid: 100, + }, + }, + "80-": { + target: &querypb.Target{ + Keyspace: keyspace, + Shard: "80-", + TabletType: topodatapb.TabletType_PRIMARY, + }, + serving: true, + }, + }, + consistent: false, + }, + shardToCheck: "-80", + expectResharding: false, + expectPrimaryNotServing: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + kew.mu.Lock() + kew.keyspaces[keyspace] = tc.kss + kew.mu.Unlock() + + require.NotNil(t, tc.kss.shards[tc.shardToCheck], "the specified shardToCheck of %q does not exist in the shardState", tc.shardToCheck) + + resharding := kew.TargetIsBeingResharded(ctx, tc.kss.shards[tc.shardToCheck].target) + require.Equal(t, resharding, tc.expectResharding, "TargetIsBeingResharded should return %t", tc.expectResharding) + + _, primaryDown := kew.PrimaryIsNotServing(ctx, tc.kss.shards[tc.shardToCheck].target) + require.Equal(t, primaryDown, tc.expectPrimaryNotServing, "PrimaryIsNotServing should return %t", tc.expectPrimaryNotServing) + }) + } +} + +type fakeTopoServer struct { +} + +// GetTopoServer returns the full topo.Server instance. +func (f *fakeTopoServer) GetTopoServer() (*topo.Server, error) { + return nil, nil +} + +// GetSrvKeyspaceNames returns the list of keyspaces served in +// the provided cell. +func (f *fakeTopoServer) GetSrvKeyspaceNames(ctx context.Context, cell string, staleOK bool) ([]string, error) { + return []string{"ks1"}, nil +} + +// GetSrvKeyspace returns the SrvKeyspace for a cell/keyspace. +func (f *fakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace string) (*topodatapb.SrvKeyspace, error) { + zeroHexBytes, _ := hex.DecodeString("") + eightyHexBytes, _ := hex.DecodeString("80") + ks := &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_PRIMARY, + ShardReferences: []*topodatapb.ShardReference{ + {Name: "-80", KeyRange: &topodatapb.KeyRange{Start: zeroHexBytes, End: eightyHexBytes}}, + {Name: "80-", KeyRange: &topodatapb.KeyRange{Start: eightyHexBytes, End: zeroHexBytes}}, + }, + }, + }, + } + return ks, nil +} + +func (f *fakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) { + ks, err := f.GetSrvKeyspace(ctx, cell, keyspace) + callback(ks, err) +} + +// WatchSrvVSchema starts watching the SrvVSchema object for +// the provided cell. It will call the callback when +// a new value or an error occurs. +func (f *fakeTopoServer) WatchSrvVSchema(ctx context.Context, cell string, callback func(*vschemapb.SrvVSchema, error) bool) { + +} diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index 71ab78fd15b..e7afa5ca844 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -23,15 +23,44 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/viperutil" "vitess.io/vitess/go/vt/servenv" ) var ( + configKey = viperutil.KeyPrefixFunc("discovery") // lowReplicationLag defines the duration that replication lag is low enough that the VTTablet is considered healthy. - lowReplicationLag time.Duration - highReplicationLagMinServing time.Duration - minNumTablets int - legacyReplicationLagAlgorithm bool + lowReplicationLag = viperutil.Configure( + configKey("low_replication_lag"), + viperutil.Options[time.Duration]{ + FlagName: "discovery_low_replication_lag", + Default: 30 * time.Second, + Dynamic: true, + }, + ) + highReplicationLagMinServing = viperutil.Configure( + configKey("high_replication_lag"), + viperutil.Options[time.Duration]{ + FlagName: "discovery_high_replication_lag_minimum_serving", + Default: 2 * time.Hour, + Dynamic: true, + }, + ) + minNumTablets = viperutil.Configure( + configKey("min_number_serving_vttablets"), + viperutil.Options[int]{ + FlagName: "min_number_serving_vttablets", + Default: 2, + Dynamic: true, + }, + ) + legacyReplicationLagAlgorithm = viperutil.Configure( + configKey("legacy_replication_lag_algorithm"), + viperutil.Options[bool]{ + FlagName: "legacy_replication_lag_algorithm", + Default: true, + }, + ) ) func init() { @@ -39,52 +68,59 @@ func init() { } func registerReplicationFlags(fs *pflag.FlagSet) { - fs.DurationVar(&lowReplicationLag, "discovery_low_replication_lag", 30*time.Second, "Threshold below which replication lag is considered low enough to be healthy.") - fs.DurationVar(&highReplicationLagMinServing, "discovery_high_replication_lag_minimum_serving", 2*time.Hour, "Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag.") - fs.IntVar(&minNumTablets, "min_number_serving_vttablets", 2, "The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving.") - fs.BoolVar(&legacyReplicationLagAlgorithm, "legacy_replication_lag_algorithm", true, "Use the legacy algorithm when selecting vttablets for serving.") + fs.Duration("discovery_low_replication_lag", lowReplicationLag.Default(), "Threshold below which replication lag is considered low enough to be healthy.") + fs.Duration("discovery_high_replication_lag_minimum_serving", highReplicationLagMinServing.Default(), "Threshold above which replication lag is considered too high when applying the min_number_serving_vttablets flag.") + fs.Int("min_number_serving_vttablets", minNumTablets.Default(), "The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving.") + fs.Bool("legacy_replication_lag_algorithm", legacyReplicationLagAlgorithm.Default(), "Use the legacy algorithm when selecting vttablets for serving.") + + viperutil.BindFlags(fs, + lowReplicationLag, + highReplicationLagMinServing, + minNumTablets, + legacyReplicationLagAlgorithm, + ) } // GetLowReplicationLag getter for use by debugenv func GetLowReplicationLag() time.Duration { - return lowReplicationLag + return lowReplicationLag.Get() } // SetLowReplicationLag setter for use by debugenv func SetLowReplicationLag(lag time.Duration) { - lowReplicationLag = lag + lowReplicationLag.Set(lag) } // GetHighReplicationLagMinServing getter for use by debugenv func GetHighReplicationLagMinServing() time.Duration { - return highReplicationLagMinServing + return highReplicationLagMinServing.Get() } // SetHighReplicationLagMinServing setter for use by debugenv func SetHighReplicationLagMinServing(lag time.Duration) { - highReplicationLagMinServing = lag + highReplicationLagMinServing.Set(lag) } // GetMinNumTablets getter for use by debugenv func GetMinNumTablets() int { - return minNumTablets + return minNumTablets.Get() } // SetMinNumTablets setter for use by debugenv func SetMinNumTablets(numTablets int) { - minNumTablets = numTablets + minNumTablets.Set(numTablets) } // IsReplicationLagHigh verifies that the given LegacytabletHealth refers to a tablet with high // replication lag, i.e. higher than the configured discovery_low_replication_lag flag. func IsReplicationLagHigh(tabletHealth *TabletHealth) bool { - return float64(tabletHealth.Stats.ReplicationLagSeconds) > lowReplicationLag.Seconds() + return float64(tabletHealth.Stats.ReplicationLagSeconds) > lowReplicationLag.Get().Seconds() } // IsReplicationLagVeryHigh verifies that the given LegacytabletHealth refers to a tablet with very high // replication lag, i.e. higher than the configured discovery_high_replication_lag_minimum_serving flag. func IsReplicationLagVeryHigh(tabletHealth *TabletHealth) bool { - return float64(tabletHealth.Stats.ReplicationLagSeconds) > highReplicationLagMinServing.Seconds() + return float64(tabletHealth.Stats.ReplicationLagSeconds) > highReplicationLagMinServing.Get().Seconds() } // FilterStatsByReplicationLag filters the list of TabletHealth by TabletHealth.Stats.ReplicationLagSeconds. @@ -113,13 +149,13 @@ func IsReplicationLagVeryHigh(tabletHealth *TabletHealth) bool { // - degraded_threshold: this is only used by vttablet for display. It should match // discovery_low_replication_lag here, so the vttablet status display matches what vtgate will do of it. func FilterStatsByReplicationLag(tabletHealthList []*TabletHealth) []*TabletHealth { - if !legacyReplicationLagAlgorithm { + if !legacyReplicationLagAlgorithm.Get() { return filterStatsByLag(tabletHealthList) } res := filterStatsByLagWithLegacyAlgorithm(tabletHealthList) // run the filter again if exactly one tablet is removed, // and we have spare tablets. - if len(res) > minNumTablets && len(res) == len(tabletHealthList)-1 { + if len(res) > minNumTablets.Get() && len(res) == len(tabletHealthList)-1 { res = filterStatsByLagWithLegacyAlgorithm(res) } return res @@ -145,7 +181,7 @@ func filterStatsByLag(tabletHealthList []*TabletHealth) []*TabletHealth { // Pick those with low replication lag, but at least minNumTablets tablets regardless. res := make([]*TabletHealth, 0, len(list)) for i := 0; i < len(list); i++ { - if !IsReplicationLagHigh(list[i].ts) || i < minNumTablets { + if !IsReplicationLagHigh(list[i].ts) || i < minNumTablets.Get() { res = append(res, list[i].ts) } } @@ -186,7 +222,7 @@ func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*Ta res = append(res, ts) } } - if len(res) >= minNumTablets { + if len(res) >= minNumTablets.Get() { return res } // return at least minNumTablets tablets to avoid over loading, @@ -219,8 +255,8 @@ func filterStatsByLagWithLegacyAlgorithm(tabletHealthList []*TabletHealth) []*Ta sort.Sort(byReplag(snapshots)) // Pick the first minNumTablets tablets. - res = make([]*TabletHealth, 0, minNumTablets) - for i := 0; i < min(minNumTablets, len(snapshots)); i++ { + res = make([]*TabletHealth, 0, minNumTablets.Get()) + for i := 0; i < min(minNumTablets.Get(), len(snapshots)); i++ { res = append(res, snapshots[i].ts) } return res @@ -242,13 +278,6 @@ func (a tabletLagSnapshotList) Len() int { return len(a) } func (a tabletLagSnapshotList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a tabletLagSnapshotList) Less(i, j int) bool { return a[i].replag < a[j].replag } -func min(a, b int) int { - if a > b { - return b - } - return a -} - // mean calculates the mean value over the given list, // while excluding the item with the specified index. func mean(tabletHealthList []*TabletHealth, idxExclude int) (uint64, error) { diff --git a/go/vt/discovery/replicationlag_test.go b/go/vt/discovery/replicationlag_test.go index 80c3921d052..5cef05a3f4b 100644 --- a/go/vt/discovery/replicationlag_test.go +++ b/go/vt/discovery/replicationlag_test.go @@ -28,23 +28,24 @@ import ( ) func init() { - lowReplicationLag = 30 * time.Second - highReplicationLagMinServing = 2 * time.Hour - minNumTablets = 2 - legacyReplicationLagAlgorithm = true + lowReplicationLag.Set(30 * time.Second) + highReplicationLagMinServing.Set(2 * time.Hour) + minNumTablets.Set(2) + legacyReplicationLagAlgorithm.Set(true) } // testSetLegacyReplicationLagAlgorithm is a test helper function, if this is used by a production code path, something is wrong. func testSetLegacyReplicationLagAlgorithm(newLegacy bool) { - legacyReplicationLagAlgorithm = newLegacy + legacyReplicationLagAlgorithm.Set(newLegacy) } // testSetMinNumTablets is a test helper function, if this is used by a production code path, something is wrong. func testSetMinNumTablets(newMin int) { - minNumTablets = newMin + minNumTablets.Set(newMin) } func TestFilterByReplicationLagUnhealthy(t *testing.T) { + defer utils.EnsureNoLeaks(t) // 1 healthy serving tablet, 1 not healthy ts1 := &TabletHealth{ Tablet: topo.NewTablet(1, "cell", "host1"), @@ -62,6 +63,7 @@ func TestFilterByReplicationLagUnhealthy(t *testing.T) { } func TestFilterByReplicationLag(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use simplified logic testSetLegacyReplicationLagAlgorithm(false) @@ -138,6 +140,7 @@ func TestFilterByReplicationLag(t *testing.T) { } func TestFilterByReplicationLagThreeTabletMin(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use at least 3 tablets if possible testSetMinNumTablets(3) // lags of (1s, 1s, 10m, 11m) - returns at least32 items where the slightly delayed ones that are returned are the 10m and 11m ones. @@ -194,6 +197,7 @@ func TestFilterByReplicationLagThreeTabletMin(t *testing.T) { } func TestFilterStatsByReplicationLagOneTabletMin(t *testing.T) { + defer utils.EnsureNoLeaks(t) // Use at least 1 tablets if possible testSetMinNumTablets(1) // lags of (1s, 100m) - return only healthy tablet if that is all that is available. diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go index b3cc06e2d10..24496155e74 100644 --- a/go/vt/discovery/tablet_health_check.go +++ b/go/vt/discovery/tablet_health_check.go @@ -189,7 +189,7 @@ func (thc *tabletHealthCheck) processResponse(hc *HealthCheckImpl, shr *query.St prevTarget.TabletType != topodata.TabletType_PRIMARY && prevTarget.TabletType == shr.Target.TabletType && thc.isTrivialReplagChange(shr.RealtimeStats) thc.lastResponseTimestamp = time.Now() thc.Target = shr.Target - thc.PrimaryTermStartTime = shr.TabletExternallyReparentedTimestamp + thc.PrimaryTermStartTime = shr.PrimaryTermStartTimestamp thc.Stats = shr.RealtimeStats thc.LastError = healthErr reason := "healthCheck update" @@ -212,7 +212,7 @@ func (thc *tabletHealthCheck) isTrivialReplagChange(newStats *query.RealtimeStat } // Skip replag filter when replag remains in the low rep lag range, // which should be the case majority of the time. - lowRepLag := lowReplicationLag.Seconds() + lowRepLag := lowReplicationLag.Get().Seconds() oldRepLag := float64(thc.Stats.ReplicationLagSeconds) newRepLag := float64(newStats.ReplicationLagSeconds) if oldRepLag <= lowRepLag && newRepLag <= lowRepLag { diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go index cb0449c6191..99d95848d19 100644 --- a/go/vt/discovery/tablet_picker.go +++ b/go/vt/discovery/tablet_picker.go @@ -17,7 +17,9 @@ limitations under the License. package discovery import ( + "context" "fmt" + "io" "math/rand" "sort" "strings" @@ -25,27 +27,52 @@ import ( "time" "vitess.io/vitess/go/stats" - + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) - "vitess.io/vitess/go/vt/vttablet/tabletconn" +type TabletPickerCellPreference int - "vitess.io/vitess/go/vt/log" +const ( + // PreferLocalWithAlias gives preference to the local cell first, then specified cells, if any. + // This is the default when no other option is provided. + TabletPickerCellPreference_PreferLocalWithAlias TabletPickerCellPreference = iota + // OnlySpecified only picks tablets from the list of cells given. + TabletPickerCellPreference_OnlySpecified +) - "context" +type TabletPickerTabletOrder int - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" +const ( + // All provided tablet types are given equal priority. This is the default. + TabletPickerTabletOrder_Any TabletPickerTabletOrder = iota + // Provided tablet types are expected to be prioritized in the given order. + TabletPickerTabletOrder_InOrder + InOrderHint = "in_order:" ) var ( tabletPickerRetryDelay = 30 * time.Second muTabletPickerRetryDelay sync.Mutex globalTPStats *tabletPickerStats - inOrderHint = "in_order:" + + tabletPickerCellPreferenceMap = map[string]TabletPickerCellPreference{ + "preferlocalwithalias": TabletPickerCellPreference_PreferLocalWithAlias, + "onlyspecified": TabletPickerCellPreference_OnlySpecified, + } + + tabletPickerTabletOrderMap = map[string]TabletPickerTabletOrder{ + "any": TabletPickerTabletOrder_Any, + "inorder": TabletPickerTabletOrder_InOrder, + } ) // GetTabletPickerRetryDelay synchronizes changes to tabletPickerRetryDelay. Used in tests only at the moment @@ -62,18 +89,66 @@ func SetTabletPickerRetryDelay(delay time.Duration) { tabletPickerRetryDelay = delay } +type TabletPickerOptions struct { + CellPreference string + TabletOrder string +} + +func parseTabletPickerCellPreferenceString(str string) (TabletPickerCellPreference, error) { + // return default if blank + if str == "" { + return TabletPickerCellPreference_PreferLocalWithAlias, nil + } + + if c, ok := tabletPickerCellPreferenceMap[strings.ToLower(str)]; ok { + return c, nil + } + + return -1, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid cell preference: %v", str) +} + +func parseTabletPickerTabletOrderString(str string) (TabletPickerTabletOrder, error) { + // return default if blank + if str == "" { + return TabletPickerTabletOrder_Any, nil + } + + if o, ok := tabletPickerTabletOrderMap[strings.ToLower(str)]; ok { + return o, nil + } + + return -1, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid tablet order type: %v", str) +} + +type localCellInfo struct { + localCell string + cellsInAlias map[string]string +} + // TabletPicker gives a simplified API for picking tablets. type TabletPicker struct { - ts *topo.Server - cells []string - keyspace string - shard string - tabletTypes []topodatapb.TabletType - inOrder bool + ts *topo.Server + cells []string + keyspace string + shard string + tabletTypes []topodatapb.TabletType + inOrder bool + cellPref TabletPickerCellPreference + localCellInfo localCellInfo } // NewTabletPicker returns a TabletPicker. -func NewTabletPicker(ts *topo.Server, cells []string, keyspace, shard, tabletTypesStr string) (*TabletPicker, error) { +func NewTabletPicker( + ctx context.Context, + ts *topo.Server, + cells []string, + localCell, keyspace, shard, tabletTypesStr string, + options TabletPickerOptions, +) (*TabletPicker, error) { + // Keep inOrder parsing here for backward compatability until TabletPickerTabletOrder is fully adopted. + if tabletTypesStr == "" { + tabletTypesStr = "replica,rdonly,primary" + } tabletTypes, inOrder, err := ParseTabletTypesAndOrder(tabletTypesStr) if err != nil { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "failed to parse list of tablet types: %v", tabletTypesStr) @@ -92,23 +167,126 @@ func NewTabletPicker(ts *topo.Server, cells []string, keyspace, shard, tabletTyp return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, fmt.Sprintf("Missing required field(s) for tablet picker: %s", strings.Join(missingFields, ", "))) } + + // Resolve tablet picker options + cellPref, err := parseTabletPickerCellPreferenceString(options.CellPreference) + if err != nil { + return nil, err + } + + // For backward compatibility only parse the options for tablet ordering + // if the in_order hint wasn't already specified. Otherwise it could be overridden. + // We can remove this check once the in_order hint is deprecated. + if !inOrder { + order, err := parseTabletPickerTabletOrderString(options.TabletOrder) + if err != nil { + return nil, err + } + switch order { + case TabletPickerTabletOrder_Any: + inOrder = false + case TabletPickerTabletOrder_InOrder: + inOrder = true + } + } + + aliasCellMap := make(map[string]string) + if cellPref == TabletPickerCellPreference_PreferLocalWithAlias { + if localCell == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot have local cell preference without local cell") + } + + // Add local cell to the list of cells for tablet picking. + // This will be de-duped later if the local cell already exists in the original list - see: dedupeCells() + cells = append(cells, localCell) + aliasName := topo.GetAliasByCell(ctx, ts, localCell) + + // If an alias exists + if aliasName != localCell { + alias, err := ts.GetCellsAlias(ctx, aliasName, false) + if err != nil { + return nil, vterrors.Wrap(err, "error fetching local cell alias") + } + + // Add the aliasName to the list of cells for tablet picking. + cells = append(cells, aliasName) + + // Create a map of the cells in the alias to make lookup faster later when we're giving preference to these. + // see prioritizeTablets() + for _, c := range alias.Cells { + aliasCellMap[c] = c + } + } + } + return &TabletPicker{ - ts: ts, - cells: cells, - keyspace: keyspace, - shard: shard, - tabletTypes: tabletTypes, - inOrder: inOrder, + ts: ts, + cells: dedupeCells(cells), + localCellInfo: localCellInfo{localCell: localCell, cellsInAlias: aliasCellMap}, + keyspace: keyspace, + shard: shard, + tabletTypes: tabletTypes, + inOrder: inOrder, + cellPref: cellPref, }, nil } -// PickForStreaming picks an available tablet. -// All tablets that belong to tp.cells are evaluated and one is -// chosen at random. +// dedupeCells is used to remove duplicates in the cell list in case it is passed in +// and exists in the local cell's alias. Can happen if CellPreference is PreferLocalWithAlias. +func dedupeCells(cells []string) []string { + keys := make(map[string]bool) + dedupedCells := []string{} + + for _, c := range cells { + if _, value := keys[c]; !value { + keys[c] = true + dedupedCells = append(dedupedCells, c) + } + } + return dedupedCells +} + +// prioritizeTablets orders the candidate pool of tablets based on CellPreference. +// If CellPreference is PreferLocalWithAlias then tablets in the local cell will be prioritized for selection, +// followed by the tablets within the local cell's alias, and finally any others specified by the client. +// If CellPreference is OnlySpecified, then tablets will only be selected randomly from the cells specified by the client. +func (tp *TabletPicker) prioritizeTablets(candidates []*topo.TabletInfo) (sameCell, sameAlias, allOthers []*topo.TabletInfo) { + for _, c := range candidates { + if c.Alias.Cell == tp.localCellInfo.localCell { + sameCell = append(sameCell, c) + } else if _, ok := tp.localCellInfo.cellsInAlias[c.Alias.Cell]; ok { + sameAlias = append(sameAlias, c) + } else { + allOthers = append(allOthers, c) + } + } + + return sameCell, sameAlias, allOthers +} + +func (tp *TabletPicker) orderByTabletType(candidates []*topo.TabletInfo) []*topo.TabletInfo { + // Sort candidates slice such that tablets appear in same tablet type order as in tp.tabletTypes + orderMap := map[topodatapb.TabletType]int{} + for i, t := range tp.tabletTypes { + orderMap[t] = i + } + sort.Slice(candidates, func(i, j int) bool { + if orderMap[candidates[i].Type] == orderMap[candidates[j].Type] { + // identical tablet types: randomize order of tablets for this type + return rand.Intn(2) == 0 // 50% chance + } + return orderMap[candidates[i].Type] < orderMap[candidates[j].Type] + }) + + return candidates +} + +// PickForStreaming picks a tablet that is healthy and serving. +// Selection is based on CellPreference. +// See prioritizeTablets for prioritization logic. func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Tablet, error) { - rand.Seed(time.Now().UnixNano()) - // keep trying at intervals (tabletPickerRetryDelay) until a tablet is found - // or the context is canceled + // Keep trying at intervals (tabletPickerRetryDelay) until a healthy + // serving tablet is found or the context is cancelled. for { select { case <-ctx.Done(): @@ -116,29 +294,40 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table default: } candidates := tp.GetMatchingTablets(ctx) - if tp.inOrder { - // Sort candidates slice such that tablets appear in same tablet type order as in tp.tabletTypes - orderMap := map[topodatapb.TabletType]int{} - for i, t := range tp.tabletTypes { - orderMap[t] = i + if tp.cellPref == TabletPickerCellPreference_PreferLocalWithAlias { + sameCellCandidates, sameAliasCandidates, allOtherCandidates := tp.prioritizeTablets(candidates) + + if tp.inOrder { + sameCellCandidates = tp.orderByTabletType(sameCellCandidates) + sameAliasCandidates = tp.orderByTabletType(sameAliasCandidates) + allOtherCandidates = tp.orderByTabletType(allOtherCandidates) + } else { + // Randomize candidates + rand.Shuffle(len(sameCellCandidates), func(i, j int) { + sameCellCandidates[i], sameCellCandidates[j] = sameCellCandidates[j], sameCellCandidates[i] + }) + rand.Shuffle(len(sameAliasCandidates), func(i, j int) { + sameAliasCandidates[i], sameAliasCandidates[j] = sameAliasCandidates[j], sameAliasCandidates[i] + }) + rand.Shuffle(len(allOtherCandidates), func(i, j int) { + allOtherCandidates[i], allOtherCandidates[j] = allOtherCandidates[j], allOtherCandidates[i] + }) } - sort.Slice(candidates, func(i, j int) bool { - if orderMap[candidates[i].Type] == orderMap[candidates[j].Type] { - // identical tablet types: randomize order of tablets for this type - return rand.Intn(2) == 0 // 50% chance - } - return orderMap[candidates[i].Type] < orderMap[candidates[j].Type] - }) + + candidates = append(sameCellCandidates, sameAliasCandidates...) + candidates = append(candidates, allOtherCandidates...) + } else if tp.inOrder { + candidates = tp.orderByTabletType(candidates) } else { - // Randomize candidates + // Randomize candidates. rand.Shuffle(len(candidates), func(i, j int) { candidates[i], candidates[j] = candidates[j], candidates[i] }) } if len(candidates) == 0 { - // if no candidates were found, sleep and try again + // If no viable candidates were found, sleep and try again. tp.incNoTabletFoundStat() - log.Infof("No tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds", + log.Infof("No healthy serving tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds.", tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0) timer := time.NewTimer(GetTabletPickerRetryDelay()) select { @@ -149,70 +338,61 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table } continue } - for _, ti := range candidates { - // try to connect to tablet - if conn, err := tabletconn.GetDialer()(ti.Tablet, true); err == nil { - // OK to use ctx here because it is not actually used by the underlying Close implementation - _ = conn.Close(ctx) - log.Infof("tablet picker found tablet %s", ti.Tablet.String()) - return ti.Tablet, nil - } - // err found - log.Warningf("unable to connect to tablet for alias %v", ti.Alias) - } - // Got here? Means we iterated all tablets and did not find a healthy one - tp.incNoTabletFoundStat() + log.Infof("Tablet picker found a healthy serving tablet for streaming: %s", candidates[0].Tablet.String()) + return candidates[0].Tablet, nil } } -// GetMatchingTablets returns a list of TabletInfo for tablets -// that match the cells, keyspace, shard and tabletTypes for this TabletPicker +// GetMatchingTablets returns a list of TabletInfo for healthy +// serving tablets that match the cells, keyspace, shard and +// tabletTypes for this TabletPicker. func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletInfo { - // Special handling for PRIMARY tablet type - // Since there is only one primary, we ignore cell and find the primary + // Special handling for PRIMARY tablet type: since there is only + // one primary per shard, we ignore cell and find the primary. aliases := make([]*topodatapb.TabletAlias, 0) if len(tp.tabletTypes) == 1 && tp.tabletTypes[0] == topodatapb.TabletType_PRIMARY { shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() si, err := tp.ts.GetShard(shortCtx, tp.keyspace, tp.shard) if err != nil { - log.Errorf("error getting shard %s/%s: %s", tp.keyspace, tp.shard, err.Error()) + log.Errorf("Error getting shard %s/%s: %v", tp.keyspace, tp.shard, err) return nil } aliases = append(aliases, si.PrimaryAlias) } else { actualCells := make([]string, 0) for _, cell := range tp.cells { - // check if cell is actually an alias - // non-blocking read so that this is fast + // Check if cell is actually an alias; using a + // non-blocking read so that this is fast. shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() _, err := tp.ts.GetCellInfo(shortCtx, cell, false) if err != nil { - // not a valid cell, check whether it is a cell alias + // Not a valid cell, check whether it is a cell alias... shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() alias, err := tp.ts.GetCellsAlias(shortCtx, cell, false) - // if we get an error, either cellAlias doesn't exist or it isn't a cell alias at all. Ignore and continue + // If we get an error, either cellAlias doesn't exist or + // it isn't a cell alias at all; ignore and continue. if err == nil { actualCells = append(actualCells, alias.Cells...) } else { log.Infof("Unable to resolve cell %s, ignoring", cell) } } else { - // valid cell, add it to our list + // Valid cell, add it to our list. actualCells = append(actualCells, cell) } } + for _, cell := range actualCells { shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() - // match cell, keyspace and shard + // Match cell, keyspace, and shard. sri, err := tp.ts.GetShardReplication(shortCtx, cell, tp.keyspace, tp.shard) if err != nil { continue } - for _, node := range sri.Nodes { aliases = append(aliases, node.TabletAlias) } @@ -222,33 +402,47 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn if len(aliases) == 0 { return nil } + shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases) if err != nil { - log.Warningf("error fetching tablets from topo: %v", err) - // If we get a partial result we can still use it, otherwise return + log.Warningf("Error fetching tablets from topo: %v", err) + // If we get a partial result we can still use it, otherwise return. if len(tabletMap) == 0 { return nil } } + tablets := make([]*topo.TabletInfo, 0, len(aliases)) for _, tabletAlias := range aliases { tabletInfo, ok := tabletMap[topoproto.TabletAliasString(tabletAlias)] if !ok { - // Either tablet disappeared on us, or we got a partial result (GetTabletMap ignores - // topo.ErrNoNode). Just log a warning - log.Warningf("failed to load tablet %v", tabletAlias) + // Either tablet disappeared on us, or we got a partial result + // (GetTabletMap ignores topo.ErrNoNode); just log a warning. + log.Warningf("Tablet picker failed to load tablet %v", tabletAlias) } else if topoproto.IsTypeInList(tabletInfo.Type, tp.tabletTypes) { - tablets = append(tablets, tabletInfo) + // Try to connect to the tablet and confirm that it's usable. + if conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil { + // Ensure that the tablet is healthy and serving. + shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() + if err := conn.StreamHealth(shortCtx, func(shr *querypb.StreamHealthResponse) error { + if shr != nil && shr.Serving && shr.RealtimeStats != nil && shr.RealtimeStats.HealthError == "" { + return io.EOF // End the stream + } + return vterrors.New(vtrpcpb.Code_INTERNAL, "tablet is not healthy and serving") + }); err == nil || err == io.EOF { + tablets = append(tablets, tabletInfo) + } + _ = conn.Close(ctx) + } } } return tablets } func init() { - // TODO(sougou): consolidate this call to be once per process. - rand.Seed(time.Now().UnixNano()) globalTPStats = newTabletPickerStats() } @@ -267,7 +461,7 @@ func (tp *TabletPicker) incNoTabletFoundStat() { globalTPStats.mu.Lock() defer globalTPStats.mu.Unlock() cells := strings.Join(tp.cells, "_") - tabletTypes := strings.Join(topoproto.MakeStringTypeList(tp.tabletTypes), "_") + tabletTypes := strings.ReplaceAll(topoproto.MakeStringTypeCSV(tp.tabletTypes), ",", "_") labels := []string{cells, tp.keyspace, tp.shard, tabletTypes} globalTPStats.noTabletFoundError.Add(labels, 1) } diff --git a/go/vt/discovery/tablet_picker_test.go b/go/vt/discovery/tablet_picker_test.go index ed071af13ad..2999c251e93 100644 --- a/go/vt/discovery/tablet_picker_test.go +++ b/go/vt/discovery/tablet_picker_test.go @@ -1,12 +1,9 @@ /* Copyright 2019 The Vitess Authors. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -25,229 +22,315 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func TestPickSimple(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) +func TestPickPrimary(t *testing.T) { + defer utils.EnsureNoLeaks(t) + ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer cancel() + + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want := addTablet(ctx, te, 100, topodatapb.TabletType_PRIMARY, "cell", true, true) defer deleteTablet(t, te, want) + ctx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel() + _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = want.Alias + return nil + }) + require.NoError(t, err) - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"otherCell"}, "cell", te.keyspace, te.shard, "primary", TabletPickerOptions{}) require.NoError(t, err) - tablet, err := tp.PickForStreaming(context.Background()) + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel2() + tablet, err := tp.PickForStreaming(ctx2) require.NoError(t, err) assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) } -func TestPickFromTwoHealthy(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want2) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly") - require.NoError(t, err) - - // In 20 attempts, both tablet types must be picked at least once. - var picked1, picked2 bool - for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(context.Background()) - require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } +func TestPickLocalPreferences(t *testing.T) { + defer utils.EnsureNoLeaks(t) + type tablet struct { + id uint32 + typ topodatapb.TabletType + cell string } - assert.True(t, picked1) - assert.True(t, picked2) -} -func TestPickInOrder1(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want2) + type testCase struct { + name string - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:replica,rdonly") - require.NoError(t, err) + //inputs + tablets []tablet + envCells []string + inCells []string + localCell string + inTabletTypes string + options TabletPickerOptions - // In 20 attempts, we always pick the first healthy tablet in order - var picked1, picked2 bool - for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(context.Background()) - require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } + //expected + tpCells []string + wantTablets []uint32 } - assert.True(t, picked1) - assert.False(t, picked2) -} -func TestPickInOrder2(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want2) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:rdonly,replica") - require.NoError(t, err) - - // In 20 attempts, we always pick the first healthy tablet in order - var picked1, picked2 bool - for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(context.Background()) - require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } - } - assert.False(t, picked1) - assert.True(t, picked2) -} - -func TestPickInOrderMultipleInGroup(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want2) - want3 := addTablet(te, 102, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want3) - want4 := addTablet(te, 103, topodatapb.TabletType_RDONLY, "cell", true, true) - defer deleteTablet(t, te, want4) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "in_order:rdonly,replica") - require.NoError(t, err) - - // In 40 attempts, we pick each of the three RDONLY, but never the REPLICA - var picked1, picked2, picked3, picked4 bool - for i := 0; i < 40; i++ { - tablet, err := tp.PickForStreaming(context.Background()) - require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } - if proto.Equal(tablet, want3) { - picked3 = true - } - if proto.Equal(tablet, want4) { - picked4 = true - } + tcases := []testCase{ + { + name: "pick simple", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "pick from two healthy", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "replica,rdonly", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{100, 101}, + }, { + name: "pick in order replica", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "in_order:replica,rdonly", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "pick in order rdonly", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "in_order:rdonly,replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{101}, + }, { + name: "pick in order multiple in group", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "cell"}, + {102, topodatapb.TabletType_RDONLY, "cell"}, + {103, topodatapb.TabletType_RDONLY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "in_order:rdonly,replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{101, 102, 103}, + }, { + // Same test as above, except the in order preference is passed via the new TabletPickerOptions param. + // This will replace the above test when we deprecate the "in_order" hint in the tabletTypeStr + name: "pick in order multiple in group with new picker option", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "cell"}, + {102, topodatapb.TabletType_RDONLY, "cell"}, + {103, topodatapb.TabletType_RDONLY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "rdonly,replica", + options: TabletPickerOptions{TabletOrder: "InOrder"}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{101, 102, 103}, + }, { + name: "picker respects tablet type", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_PRIMARY, "cell"}, + }, + envCells: []string{"cell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "replica,rdonly", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "pick multi cell", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "otherCell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "pick from other cell", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "otherCell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "don't pick from other cell", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_REPLICA, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "multi cell two tablets, local preference default", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_REPLICA, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "otherCell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "multi cell two tablets, only specified cells", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_REPLICA, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica", + options: TabletPickerOptions{CellPreference: "OnlySpecified"}, + tpCells: []string{"cell", "otherCell"}, + wantTablets: []uint32{100, 101}, + }, { + name: "multi cell two tablet types, local preference default", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica,rdonly", + options: TabletPickerOptions{}, + tpCells: []string{"cell", "otherCell", "cella"}, + wantTablets: []uint32{100}, + }, { + name: "multi cell two tablet types, only specified cells", + tablets: []tablet{ + {100, topodatapb.TabletType_REPLICA, "cell"}, + {101, topodatapb.TabletType_RDONLY, "otherCell"}, + }, + envCells: []string{"cell", "otherCell"}, + inCells: []string{"cell", "otherCell"}, + localCell: "cell", + inTabletTypes: "replica,rdonly", + options: TabletPickerOptions{CellPreference: "OnlySpecified"}, + tpCells: []string{"cell", "otherCell"}, + wantTablets: []uint32{100, 101}, + }, } - assert.False(t, picked1) - assert.True(t, picked2) - assert.True(t, picked3) - assert.True(t, picked4) -} -func TestPickRespectsTabletType(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want) - dont := addTablet(te, 101, topodatapb.TabletType_PRIMARY, "cell", true, true) - defer deleteTablet(t, te, dont) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly") - require.NoError(t, err) - - // In 20 attempts, primary tablet must be never picked - for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(context.Background()) - require.NoError(t, err) - require.NotNil(t, tablet) - require.True(t, proto.Equal(tablet, want), "picked wrong tablet type") + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + te := newPickerTestEnv(t, ctx, tcase.envCells) + var testTablets []*topodatapb.Tablet + for _, tab := range tcase.tablets { + testTablets = append(testTablets, addTablet(ctx, te, int(tab.id), tab.typ, tab.cell, true, true)) + } + defer func() { + for _, tab := range testTablets { + deleteTablet(t, te, tab) + } + }() + tp, err := NewTabletPicker(ctx, te.topoServ, tcase.inCells, tcase.localCell, te.keyspace, te.shard, tcase.inTabletTypes, tcase.options) + require.NoError(t, err) + require.Equal(t, tp.localCellInfo.localCell, tcase.localCell) + require.ElementsMatch(t, tp.cells, tcase.tpCells) + + var selectedTablets []uint32 + selectedTabletMap := make(map[uint32]bool) + for i := 0; i < 40; i++ { + tab, err := tp.PickForStreaming(ctx) + require.NoError(t, err) + selectedTabletMap[tab.Alias.Uid] = true + } + for uid := range selectedTabletMap { + selectedTablets = append(selectedTablets, uid) + } + require.ElementsMatch(t, selectedTablets, tcase.wantTablets) + }) } } -func TestPickMultiCell(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() - tablet, err := tp.PickForStreaming(ctx) - require.NoError(t, err) - assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) -} +func TestPickCellPreferenceLocalCell(t *testing.T) { + ctx := utils.LeakCheckContext(t) -func TestPickPrimary(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want := addTablet(te, 100, topodatapb.TabletType_PRIMARY, "cell", true, true) - defer deleteTablet(t, te, want) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() - _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { - si.PrimaryAlias = want.Alias - return nil - }) - require.NoError(t, err) - - tp, err := NewTabletPicker(te.topoServ, []string{"otherCell"}, te.keyspace, te.shard, "primary") - require.NoError(t, err) - - ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel2() - tablet, err := tp.PickForStreaming(ctx2) - require.NoError(t, err) - assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) -} - -func TestPickFromOtherCell(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "otherCell", true, true) - defer deleteTablet(t, te, want) + // test env puts all cells into an alias called "cella" + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want1 := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + defer deleteTablet(t, te, want1) - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") + // Local cell preference is default + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) - assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) -} + assert.True(t, proto.Equal(want1, tablet), "Pick: %v, want %v", tablet, want1) -func TestDontPickFromOtherCell(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + // create a tablet in the other cell + want2 := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) defer deleteTablet(t, te, want2) - tp, err := NewTabletPicker(te.topoServ, []string{"cell"}, te.keyspace, te.shard, "replica") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel2() - // In 20 attempts, only want1 must be picked because TabletPicker.cells = "cell" + // In 20 attempts, only tablet in "cell" will be picked because we give local cell priority by default var picked1, picked2 bool for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(ctx) + tablet, err := tp.PickForStreaming(ctx2) require.NoError(t, err) if proto.Equal(tablet, want1) { picked1 = true @@ -260,95 +343,86 @@ func TestDontPickFromOtherCell(t *testing.T) { assert.False(t, picked2) } -func TestPickMultiCellTwoTablets(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) - defer deleteTablet(t, te, want2) +func TestPickCellPreferenceLocalAlias(t *testing.T) { + ctx := utils.LeakCheckContext(t) - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") + // test env puts all cells into an alias called "cella" + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() - - // In 20 attempts, both tablet types must be picked at least once. - var picked1, picked2 bool - for i := 0; i < 20; i++ { - tablet, err := tp.PickForStreaming(ctx) - require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } - } - assert.True(t, picked1) - assert.True(t, picked2) + // create a tablet in the other cell, it should be picked + want := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + defer deleteTablet(t, te, want) + tablet, err := tp.PickForStreaming(ctx) + require.NoError(t, err) + assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) } -func TestPickMultiCellTwoTabletTypes(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - defer deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_RDONLY, "otherCell", true, true) - defer deleteTablet(t, te, want2) - - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica,rdonly") +// TestPickUsingCellAsAlias confirms that when the tablet picker is +// given a cell name that is an alias, it will choose a tablet that +// exists within a cell that is part of the alias. +func TestPickUsingCellAsAlias(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // The test env puts all cells into an alias called "cella". + // We're also going to specify an optional extraCell that is NOT + // added to the alias. + te := newPickerTestEnv(t, ctx, []string{"cell1", "cell2", "cell3"}, "xtracell") + // Specify the alias as the cell. + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell1", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel() - - // In 20 attempts, both tablet types must be picked at least once. - var picked1, picked2 bool - for i := 0; i < 20; i++ { + // Create a tablet in one of the main cells, it should be + // picked as it is part of the cella alias. This tablet is + // NOT part of the talbet picker's local cell (cell1) so it + // will not be given local preference. + want := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "cell2", true, true) + defer deleteTablet(t, te, want) + // Create a tablet in an extra cell which is thus NOT part of + // the cella alias so it should NOT be picked. + noWant := addTablet(ctx, te, 102, topodatapb.TabletType_REPLICA, "xtracell", true, true) + defer deleteTablet(t, te, noWant) + // Try it many times to be sure we don't ever pick the wrong one. + for i := 0; i < 100; i++ { tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) - if proto.Equal(tablet, want1) { - picked1 = true - } - if proto.Equal(tablet, want2) { - picked2 = true - } + assert.True(t, proto.Equal(want, tablet), "Pick: %v, want %v", tablet, want) } - assert.True(t, picked1) - assert.True(t, picked2) } -func TestPickUsingCellAlias(t *testing.T) { +func TestPickUsingCellAliasOnlySpecified(t *testing.T) { + ctx := utils.LeakCheckContextTimeout(t, 200*time.Millisecond) + // test env puts all cells into an alias called "cella" - te := newPickerTestEnv(t, []string{"cell", "otherCell"}) - want1 := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + te := newPickerTestEnv(t, ctx, []string{"cell", "otherCell"}) + want1 := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) defer deleteTablet(t, te, want1) - tp, err := NewTabletPicker(te.topoServ, []string{"cella"}, te.keyspace, te.shard, "replica") + tp, err := NewTabletPicker(ctx, te.topoServ, []string{"cella"}, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) require.NoError(t, err) - ctx1, cancel1 := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer cancel1() - tablet, err := tp.PickForStreaming(ctx1) + tablet, err := tp.PickForStreaming(ctx) require.NoError(t, err) assert.True(t, proto.Equal(want1, tablet), "Pick: %v, want %v", tablet, want1) // create a tablet in the other cell, it should be picked deleteTablet(t, te, want1) - want2 := addTablet(te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) + want2 := addTablet(ctx, te, 101, topodatapb.TabletType_REPLICA, "otherCell", true, true) defer deleteTablet(t, te, want2) - ctx2, cancel2 := context.WithTimeout(context.Background(), 200*time.Millisecond) + ctx2, cancel2 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel2() tablet, err = tp.PickForStreaming(ctx2) require.NoError(t, err) assert.True(t, proto.Equal(want2, tablet), "Pick: %v, want %v", tablet, want2) // addTablet again and test that both are picked at least once - want1 = addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) - ctx3, cancel3 := context.WithTimeout(context.Background(), 200*time.Millisecond) + want1 = addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + ctx3, cancel3 := context.WithTimeout(ctx, 200*time.Millisecond) defer cancel3() - // In 20 attempts, both tablet types must be picked at least once. + // In 20 attempts each of the tablets should get picked at least once. + // Local cell is not given preference var picked1, picked2 bool for i := 0; i < 20; i++ { tablet, err := tp.PickForStreaming(ctx3) @@ -365,8 +439,10 @@ func TestPickUsingCellAlias(t *testing.T) { } func TestTabletAppearsDuringSleep(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") + ctx := utils.LeakCheckContextTimeout(t, 200*time.Millisecond) + + te := newPickerTestEnv(t, ctx, []string{"cell"}) + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) delay := GetTabletPickerRetryDelay() @@ -378,26 +454,28 @@ func TestTabletAppearsDuringSleep(t *testing.T) { result := make(chan *topodatapb.Tablet) // start picker first, then add tablet go func() { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) + ctx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) defer cancel() tablet, err := tp.PickForStreaming(ctx) assert.NoError(t, err) result <- tablet }() - want := addTablet(te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) + want := addTablet(ctx, te, 100, topodatapb.TabletType_REPLICA, "cell", true, true) defer deleteTablet(t, te, want) got := <-result require.NotNil(t, got, "Tablet should not be nil") assert.True(t, proto.Equal(want, got), "Pick: %v, want %v", got, want) } -func TestPickError(t *testing.T) { - te := newPickerTestEnv(t, []string{"cell"}) - _, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "badtype") +func TestPickErrorLocalPreferenceDefault(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + te := newPickerTestEnv(t, ctx, []string{"cell"}) + _, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "badtype", TabletPickerOptions{}) assert.EqualError(t, err, "failed to parse list of tablet types: badtype") - tp, err := NewTabletPicker(te.topoServ, te.cells, te.keyspace, te.shard, "replica") + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{}) require.NoError(t, err) delay := GetTabletPickerRetryDelay() defer func() { @@ -405,20 +483,91 @@ func TestPickError(t *testing.T) { }() SetTabletPickerRetryDelay(11 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() // no tablets - _, err = tp.PickForStreaming(ctx) + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") // no tablets of the correct type - defer deleteTablet(t, te, addTablet(te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) - ctx, cancel = context.WithTimeout(context.Background(), 20*time.Millisecond) - defer cancel() - _, err = tp.PickForStreaming(ctx) + defer deleteTablet(t, te, addTablet(ctx, te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) + timeoutCtx, timeoutCancel = context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() + _, err = tp.PickForStreaming(timeoutCtx) require.EqualError(t, err, "context has expired") + // if local preference is selected, tp cells include's the local cell's alias + require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell_cella.ks.0.replica"], int64(0)) +} + +func TestPickErrorOnlySpecified(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + te := newPickerTestEnv(t, ctx, []string{"cell"}) + + tp, err := NewTabletPicker(ctx, te.topoServ, te.cells, "cell", te.keyspace, te.shard, "replica", TabletPickerOptions{CellPreference: "OnlySpecified"}) + require.NoError(t, err) + delay := GetTabletPickerRetryDelay() + defer func() { + SetTabletPickerRetryDelay(delay) + }() + SetTabletPickerRetryDelay(11 * time.Millisecond) + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() + // no tablets + _, err = tp.PickForStreaming(timeoutCtx) + require.EqualError(t, err, "context has expired") + // no tablets of the correct type + defer deleteTablet(t, te, addTablet(ctx, te, 200, topodatapb.TabletType_RDONLY, "cell", true, true)) + timeoutCtx, timeoutCancel = context.WithTimeout(ctx, 20*time.Millisecond) + defer timeoutCancel() + _, err = tp.PickForStreaming(timeoutCtx) + require.EqualError(t, err, "context has expired") + require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell.ks.0.replica"], int64(0)) } +// TestPickFallbackType tests that when providing a list of tablet types to +// pick from, with the list in preference order, that when the primary/first +// type has no available healthy serving tablets that we select a healthy +// serving tablet from the secondary/second type. +func TestPickFallbackType(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + cells := []string{"cell1", "cell2"} + localCell := cells[0] + tabletTypes := "replica,primary" + options := TabletPickerOptions{ + TabletOrder: "InOrder", + } + te := newPickerTestEnv(t, ctx, cells) + + // This one should be selected even though it's the secondary type + // as it is healthy and serving. + primaryTablet := addTablet(ctx, te, 100, topodatapb.TabletType_PRIMARY, localCell, true, true) + defer deleteTablet(t, te, primaryTablet) + + // Replica tablet should not be selected as it is unhealthy. + replicaTablet := addTablet(ctx, te, 200, topodatapb.TabletType_REPLICA, localCell, false, false) + defer deleteTablet(t, te, replicaTablet) + + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) + defer cancel() + _, err := te.topoServ.UpdateShardFields(ctx, te.keyspace, te.shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = primaryTablet.Alias + return nil + }) + require.NoError(t, err) + + tp, err := NewTabletPicker(ctx, te.topoServ, cells, localCell, te.keyspace, te.shard, tabletTypes, options) + require.NoError(t, err) + ctx2, cancel2 := context.WithTimeout(ctx, 1*time.Second) + defer cancel2() + tablet, err := tp.PickForStreaming(ctx2) + require.NoError(t, err) + assert.True(t, proto.Equal(primaryTablet, tablet), "Pick: %v, want %v", tablet, primaryTablet) +} + type pickerTestEnv struct { t *testing.T keyspace string @@ -428,17 +577,20 @@ type pickerTestEnv struct { topoServ *topo.Server } -func newPickerTestEnv(t *testing.T, cells []string) *pickerTestEnv { - ctx := context.Background() - +// newPickerTestEnv creates a test environment for TabletPicker tests. +// It creates a cell alias called 'cella' which contains all of the +// provided cells. However, if any optional extraCells are provided, those +// are NOT added to the cell alias. +func newPickerTestEnv(t *testing.T, ctx context.Context, cells []string, extraCells ...string) *pickerTestEnv { + allCells := append(cells, extraCells...) te := &pickerTestEnv{ t: t, keyspace: "ks", shard: "0", cells: cells, - topoServ: memorytopo.NewServer(cells...), + topoServ: memorytopo.NewServer(ctx, allCells...), } - // create cell alias + // Create cell alias containing the cells (but NOT the extraCells). err := te.topoServ.CreateCellsAlias(ctx, "cella", &topodatapb.CellsAlias{ Cells: cells, }) @@ -450,7 +602,7 @@ func newPickerTestEnv(t *testing.T, cells []string) *pickerTestEnv { return te } -func addTablet(te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell string, serving, healthy bool) *topodatapb.Tablet { +func addTablet(ctx context.Context, te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell string, serving, healthy bool) *topodatapb.Tablet { tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: cell, @@ -464,21 +616,24 @@ func addTablet(te *pickerTestEnv, id int, tabletType topodatapb.TabletType, cell "test": int32(id), }, } - err := te.topoServ.CreateTablet(context.Background(), tablet) + err := te.topoServ.CreateTablet(ctx, tablet) require.NoError(te.t, err) + shr := &querypb.StreamHealthResponse{ + Serving: serving, + Target: &querypb.Target{ + Keyspace: te.keyspace, + Shard: te.shard, + TabletType: tabletType, + }, + RealtimeStats: &querypb.RealtimeStats{HealthError: "tablet is unhealthy"}, + } if healthy { - _ = createFixedHealthConn(tablet, &querypb.StreamHealthResponse{ - Serving: serving, - Target: &querypb.Target{ - Keyspace: te.keyspace, - Shard: te.shard, - TabletType: tabletType, - }, - RealtimeStats: &querypb.RealtimeStats{HealthError: ""}, - }) + shr.RealtimeStats.HealthError = "" } + _ = createFixedHealthConn(tablet, shr) + return tablet } diff --git a/go/vt/discovery/tablets_cache_status.go b/go/vt/discovery/tablets_cache_status.go index e0f9289a195..110974189f0 100644 --- a/go/vt/discovery/tablets_cache_status.go +++ b/go/vt/discovery/tablets_cache_status.go @@ -2,9 +2,10 @@ package discovery import ( "fmt" - "html/template" "sort" - "strings" + + "github.com/google/safehtml" + "github.com/google/safehtml/template" "google.golang.org/protobuf/proto" @@ -54,8 +55,20 @@ func (tsl TabletStatsList) deepEqual(other TabletStatsList) bool { } // StatusAsHTML returns an HTML version of the status. -func (tcs *TabletsCacheStatus) StatusAsHTML() template.HTML { - tLinks := make([]string, 0, 1) +func (tcs *TabletsCacheStatus) StatusAsHTML() safehtml.HTML { + linksTpl, err := template.New("tcs").Parse("{{ range . }}{{.Name}}{{.Extra}}
{{ end }}") + if err != nil { + panic(err) + } + + type link struct { + Link string + Color safehtml.Style + Name string + Extra string + } + + var tLinks []link if tcs.TabletsStats != nil { sort.Sort(tcs.TabletsStats) } @@ -74,9 +87,18 @@ func (tcs *TabletsCacheStatus) StatusAsHTML() template.HTML { extra = fmt.Sprintf(" (RepLag: %v)", ts.Stats.ReplicationLagSeconds) } name := topoproto.TabletAliasString(ts.Tablet.Alias) - tLinks = append(tLinks, fmt.Sprintf(`%v%v`, ts.getTabletDebugURL(), color, name, extra)) + tLinks = append(tLinks, link{ + Link: ts.getTabletDebugURL(), + Name: name, + Extra: extra, + Color: safehtml.StyleFromProperties(safehtml.StyleProperties{Color: color}), + }) + } + html, err := linksTpl.ExecuteToHTML(tLinks) + if err != nil { + panic(err) } - return template.HTML(strings.Join(tLinks, "
")) + return html } func (tcs *TabletsCacheStatus) deepEqual(otcs *TabletsCacheStatus) bool { diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go index 5e5c54ab57a..d1bd2d3acf8 100644 --- a/go/vt/discovery/topology_watcher.go +++ b/go/vt/discovery/topology_watcher.go @@ -18,6 +18,7 @@ package discovery import ( "bytes" + "context" "fmt" "hash/crc32" "sort" @@ -29,8 +30,6 @@ import ( "vitess.io/vitess/go/vt/key" - "context" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/trace" @@ -134,6 +133,7 @@ func (tw *TopologyWatcher) Start() { select { case <-t.ctx.Done(): return + case <-tw.healthcheck.GetLoadTabletsTrigger(): case <-ticker.C: } } @@ -362,7 +362,7 @@ func (fbs *FilterByShard) IsIncluded(tablet *topodata.Tablet) bool { // Exact match (probably a non-sharded keyspace). return true } - if kr != nil && c.keyRange != nil && key.KeyRangeIncludes(c.keyRange, kr) { + if kr != nil && c.keyRange != nil && key.KeyRangeContainsKeyRange(c.keyRange, kr) { // Our filter's KeyRange includes the provided KeyRange return true } diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go index dff8ba720c7..3ac567acef8 100644 --- a/go/vt/discovery/topology_watcher_test.go +++ b/go/vt/discovery/topology_watcher_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" @@ -56,8 +58,12 @@ func checkChecksum(t *testing.T, tw *TopologyWatcher, want uint32) { } func TestStartAndCloseTopoWatcher(t *testing.T) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() topologyWatcherOperations.ZeroAll() tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5) @@ -110,8 +116,12 @@ func TestCellTabletsWatcherNoRefreshKnown(t *testing.T) { } func checkWatcher(t *testing.T, refreshKnownTablets bool) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() logger := logutil.NewMemoryLogger() topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() @@ -197,7 +207,7 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { // if refreshKnownTablets is disabled, this case is *not* // detected and the tablet remains in the topo using the // old key - origTablet := proto.Clone(tablet).(*topodatapb.Tablet) + origTablet := tablet.CloneVT() origKey := TabletToMapKey(tablet) tablet.PortMap["vt"] = 456 if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { @@ -236,9 +246,8 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { // tablet2 happens to land on the host:port that tablet 1 used to be on. // This can only be tested when we refresh known tablets. if refreshKnownTablets { - origTablet := proto.Clone(tablet).(*topodatapb.Tablet) - origTablet2 := proto.Clone(tablet2).(*topodatapb.Tablet) - + origTablet := tablet.CloneVT() + origTablet2 := tablet2.CloneVT() if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = tablet.Hostname t.PortMap = tablet.PortMap @@ -429,9 +438,12 @@ var ( ) func TestFilterByKeyspace(t *testing.T) { + ctx := utils.LeakCheckContext(t) + hc := NewFakeHealthCheck(nil) f := NewFilterByKeyspace(testKeyspacesToWatch) - ts := memorytopo.NewServer(testCell) + ts := memorytopo.NewServer(ctx, testCell) + defer ts.Close() tw := NewCellTabletsWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5) for _, test := range testFilterByKeyspace { @@ -509,8 +521,12 @@ func TestFilterByKeyspace(t *testing.T) { // - does not continuosly call GetTablets for tablets that do not satisfy the filter // - does not add or remove these filtered out tablets from the its healtcheck func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { - ts := memorytopo.NewServer("aa") + ctx := utils.LeakCheckContext(t) + + ts := memorytopo.NewServer(ctx, "aa") + defer ts.Close() fhc := NewFakeHealthCheck(nil) + defer fhc.Close() topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() f := NewFilterByKeyspace(testKeyspacesToWatch) @@ -590,7 +606,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { allTablets = fhc.GetAllTablets() assert.Len(t, allTablets, 1) origKey := TabletToMapKey(tablet) - tabletWithNewPort := proto.Clone(tablet).(*topodatapb.Tablet) + tabletWithNewPort := tablet.CloneVT() tabletWithNewPort.PortMap["vt"] = 456 keyWithNewPort := TabletToMapKey(tabletWithNewPort) assert.Contains(t, allTablets, origKey) diff --git a/go/vt/discovery/utils.go b/go/vt/discovery/utils.go index 02f3b7132af..3a601830d35 100644 --- a/go/vt/discovery/utils.go +++ b/go/vt/discovery/utils.go @@ -50,9 +50,9 @@ func RemoveUnhealthyTablets(tabletStatsList []TabletHealth) []TabletHealth { func ParseTabletTypesAndOrder(tabletTypesStr string) ([]topodatapb.TabletType, bool, error) { inOrder := false - if strings.HasPrefix(tabletTypesStr, inOrderHint) { + if strings.HasPrefix(tabletTypesStr, InOrderHint) { inOrder = true - tabletTypesStr = tabletTypesStr[len(inOrderHint):] + tabletTypesStr = tabletTypesStr[len(InOrderHint):] } tabletTypes, err := topoproto.ParseTabletTypes(tabletTypesStr) diff --git a/go/vt/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go index c593d5f6319..eb1cb8c8941 100644 --- a/go/vt/external/golib/sqlutils/sqlutils.go +++ b/go/vt/external/golib/sqlutils/sqlutils.go @@ -25,7 +25,6 @@ import ( "encoding/json" "fmt" "strconv" - "strings" "sync" "time" @@ -78,35 +77,10 @@ func (this *RowData) MarshalJSON() ([]byte, error) { return json.Marshal(cells) } -func (this *RowData) Args() []any { - result := make([]any, len(*this)) - for i := range *this { - result[i] = (*(*this)[i].NullString()) - } - return result -} - -// ResultData is an ordered row set of RowData -type ResultData []RowData -type NamedResultData struct { - Columns []string - Data ResultData -} - -var EmptyResultData = ResultData{} - func (this *RowMap) GetString(key string) string { return (*this)[key].String } -// GetStringD returns a string from the map, or a default value if the key does not exist -func (this *RowMap) GetStringD(key string, def string) string { - if cell, ok := (*this)[key]; ok { - return cell.String - } - return def -} - func (this *RowMap) GetInt64(key string) int64 { res, _ := strconv.ParseInt(this.GetString(key), 10, 64) return res @@ -131,40 +105,16 @@ func (this *RowMap) GetInt(key string) int { return res } -func (this *RowMap) GetIntD(key string, def int) int { - res, err := strconv.Atoi(this.GetString(key)) - if err != nil { - return def - } - return res -} - func (this *RowMap) GetUint(key string) uint { res, _ := strconv.ParseUint(this.GetString(key), 10, 0) return uint(res) } -func (this *RowMap) GetUintD(key string, def uint) uint { - res, err := strconv.ParseUint(this.GetString(key), 10, 0) - if err != nil { - return def - } - return uint(res) -} - func (this *RowMap) GetUint64(key string) uint64 { res, _ := strconv.ParseUint(this.GetString(key), 10, 64) return res } -func (this *RowMap) GetUint64D(key string, def uint64) uint64 { - res, err := strconv.ParseUint(this.GetString(key), 10, 64) - if err != nil { - return def - } - return res -} - func (this *RowMap) GetUint32(key string) uint32 { res, _ := strconv.ParseUint(this.GetString(key), 10, 32) return uint32(res) @@ -182,10 +132,10 @@ func (this *RowMap) GetTime(key string) time.Time { } // knownDBs is a DB cache by uri -var knownDBs map[string]*sql.DB = make(map[string]*sql.DB) +var knownDBs = make(map[string]*sql.DB) var knownDBsMutex = &sync.Mutex{} -// GetDB returns a DB instance based on uri. +// GetGenericDB returns a DB instance based on uri. // bool result indicates whether the DB was returned from cache; err func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { knownDBsMutex.Lock() @@ -204,12 +154,6 @@ func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { return knownDBs[dataSourceName], exists, nil } -// GetDB returns a MySQL DB instance based on uri. -// bool result indicates whether the DB was returned from cache; err -func GetDB(mysql_uri string) (*sql.DB, bool, error) { - return GetGenericDB("mysql", mysql_uri) -} - // GetSQLiteDB returns a SQLite DB instance based on DB file name. // bool result indicates whether the DB was returned from cache; err func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) { @@ -290,64 +234,6 @@ func QueryRowsMap(db *sql.DB, query string, on_row func(RowMap) error, args ...a return } -// queryResultData returns a raw array of rows for a given query, optionally reading and returning column names -func queryResultData(db *sql.DB, query string, retrieveColumns bool, args ...any) (resultData ResultData, columns []string, err error) { - defer func() { - if derr := recover(); derr != nil { - err = fmt.Errorf("QueryRowsMap unexpected error: %+v", derr) - } - }() - - var rows *sql.Rows - rows, err = db.Query(query, args...) - if err != nil && err != sql.ErrNoRows { - log.Error(err) - return EmptyResultData, columns, err - } - defer rows.Close() - - if retrieveColumns { - // Don't pay if you don't want to - columns, _ = rows.Columns() - } - resultData = ResultData{} - err = ScanRowsToArrays(rows, func(rowData []CellData) error { - resultData = append(resultData, rowData) - return nil - }) - return resultData, columns, err -} - -// QueryResultData returns a raw array of rows -func QueryResultData(db *sql.DB, query string, args ...any) (ResultData, error) { - resultData, _, err := queryResultData(db, query, false, args...) - return resultData, err -} - -// QueryResultDataNamed returns a raw array of rows, with column names -func QueryNamedResultData(db *sql.DB, query string, args ...any) (NamedResultData, error) { - resultData, columns, err := queryResultData(db, query, true, args...) - return NamedResultData{Columns: columns, Data: resultData}, err -} - -// QueryRowsMapBuffered reads data from the database into a buffer, and only then applies the given function per row. -// This allows the application to take its time with processing the data, albeit consuming as much memory as required by -// the result set. -func QueryRowsMapBuffered(db *sql.DB, query string, on_row func(RowMap) error, args ...any) error { - resultData, columns, err := queryResultData(db, query, true, args...) - if err != nil { - // Already logged - return err - } - for _, row := range resultData { - err = on_row(rowToMap(row, columns)) - if err != nil { - return err - } - } - return nil -} - // ExecNoPrepare executes given query using given args on given DB, without using prepared statements. func ExecNoPrepare(db *sql.DB, query string, args ...any) (res sql.Result, err error) { defer func() { @@ -363,46 +249,6 @@ func ExecNoPrepare(db *sql.DB, query string, args ...any) (res sql.Result, err e return res, err } -// ExecQuery executes given query using given args on given DB. It will safele prepare, execute and close -// the statement. -func execInternal(silent bool, db *sql.DB, query string, args ...any) (res sql.Result, err error) { - defer func() { - if derr := recover(); derr != nil { - err = fmt.Errorf("execInternal unexpected error: %+v", derr) - } - }() - var stmt *sql.Stmt - stmt, err = db.Prepare(query) - if err != nil { - return nil, err - } - defer stmt.Close() - res, err = stmt.Exec(args...) - if err != nil && !silent { - log.Error(err) - } - return res, err -} - -// Exec executes given query using given args on given DB. It will safele prepare, execute and close -// the statement. -func Exec(db *sql.DB, query string, args ...any) (sql.Result, error) { - return execInternal(false, db, query, args...) -} - -// ExecSilently acts like Exec but does not report any error -func ExecSilently(db *sql.DB, query string, args ...any) (sql.Result, error) { - return execInternal(true, db, query, args...) -} - -func InClauseStringValues(terms []string) string { - quoted := []string{} - for _, s := range terms { - quoted = append(quoted, fmt.Sprintf("'%s'", strings.Replace(s, ",", "''", -1))) - } - return strings.Join(quoted, ", ") -} - // Convert variable length arguments into arguments array func Args(args ...any) []any { return args @@ -414,33 +260,3 @@ func NilIfZero(i int64) any { } return i } - -func ScanTable(db *sql.DB, tableName string) (NamedResultData, error) { - query := fmt.Sprintf("select * from %s", tableName) - return QueryNamedResultData(db, query) -} - -func WriteTable(db *sql.DB, tableName string, data NamedResultData) (err error) { - if len(data.Data) == 0 { - return nil - } - if len(data.Columns) == 0 { - return nil - } - placeholders := make([]string, len(data.Columns)) - for i := range placeholders { - placeholders[i] = "?" - } - query := fmt.Sprintf( - `replace into %s (%s) values (%s)`, - tableName, - strings.Join(data.Columns, ","), - strings.Join(placeholders, ","), - ) - for _, rowData := range data.Data { - if _, execErr := db.Exec(query, rowData.Args()...); execErr != nil { - err = execErr - } - } - return err -} diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go index d3865c88c84..b2ef0d4fb28 100644 --- a/go/vt/grpcclient/client.go +++ b/go/vt/grpcclient/client.go @@ -56,7 +56,6 @@ var ( "vtctld", "vtgate", "vtgateclienttest", - "vtgr", "vtorc", "vttablet", "vttestserver", diff --git a/go/vt/grpcclient/client_flaky_test.go b/go/vt/grpcclient/client_flaky_test.go index c6baad962de..edc6d9be98c 100644 --- a/go/vt/grpcclient/client_flaky_test.go +++ b/go/vt/grpcclient/client_flaky_test.go @@ -43,7 +43,7 @@ func TestDialErrors(t *testing.T) { t.Fatal(err) } vtg := vtgateservicepb.NewVitessClient(gconn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) cancel() gconn.Close() diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index c4fb37b7a9d..437e980f480 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -128,7 +128,7 @@ func (d DestinationExactKeyRange) String() string { func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyRange, addShard func(shard string) error) error { sort.SliceStable(allShards, func(i, j int) bool { - return KeyRangeStartSmaller(allShards[i].GetKeyRange(), allShards[j].GetKeyRange()) + return KeyRangeLess(allShards[i].GetKeyRange(), allShards[j].GetKeyRange()) }) shardnum := 0 @@ -139,7 +139,7 @@ func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb shardnum++ } for shardnum < len(allShards) { - if !KeyRangesIntersect(kr, allShards[shardnum].KeyRange) { + if !KeyRangeIntersect(kr, allShards[shardnum].KeyRange) { // If we are over the requested keyrange, we // can stop now, we won't find more. break @@ -215,7 +215,7 @@ func (d DestinationKeyRange) String() string { func processKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyRange, addShard func(shard string) error) error { for _, shard := range allShards { - if !KeyRangesIntersect(kr, shard.KeyRange) { + if !KeyRangeIntersect(kr, shard.KeyRange) { // We don't need that shard. continue } diff --git a/go/vt/key/key.go b/go/vt/key/key.go index fc603554ecf..dcdcda47f81 100644 --- a/go/vt/key/key.go +++ b/go/vt/key/key.go @@ -26,16 +26,18 @@ import ( "regexp" "strings" - "google.golang.org/protobuf/proto" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +var ( + KeyRangePattern = regexp.MustCompile(`^(0|([0-9a-fA-F]{2})*-([0-9a-fA-F]{2})*)$`) +) + // // Uint64Key definitions // -// Uint64Key is a uint64 that can be converted into a KeyspaceId. +// Uint64Key is a uint64 that can be converted into a keyspace id. type Uint64Key uint64 func (i Uint64Key) String() string { @@ -49,261 +51,214 @@ func (i Uint64Key) Bytes() []byte { return buf } -// -// KeyRange helper methods -// +// Helper methods for keyspace id values. -// EvenShardsKeyRange returns a key range definition for a shard at index "i", -// assuming range based sharding with "n" equal-width shards in total. -// i starts at 0. -// -// Example: (1, 2) returns the second out of two shards in total i.e. "80-". -// -// This function must not be used in the Vitess code base because Vitess also -// supports shards with different widths. In that case, the output of this -// function would be wrong. -// -// Note: start and end values have trailing zero bytes omitted. -// For example, "80-" has only the first byte (0x80) set. -// We do this to produce the same KeyRange objects as ParseKeyRangeParts() does. -// Because it's using the Go hex methods, it's omitting trailing zero bytes as -// well. -func EvenShardsKeyRange(i, n int) (*topodatapb.KeyRange, error) { - if n <= 0 { - return nil, fmt.Errorf("the shard count must be > 0: %v", n) - } - if i >= n { - return nil, fmt.Errorf("the index of the shard must be less than the total number of shards: %v < %v", i, n) - } - if n&(n-1) != 0 { - return nil, fmt.Errorf("the shard count must be a power of two: %v", n) +// Normalize removes any trailing zero bytes from id. This allows two id values to be compared even if they are +// different lengths. +// From a key range perspective, -80 == 00-80 == 0000-8000 == 000000-800000, etc. and they should +// always be treated the same even if they are different lengths. +func Normalize(id []byte) []byte { + trailingZeroes := 0 + for i := len(id) - 1; i >= 0 && id[i] == 0x00; i-- { + trailingZeroes += 1 } - // Determine the number of bytes which are required to represent any - // KeyRange start or end for the given n. - // This is required to trim the returned values to the same length e.g. - // (256, 512) should return 8000-8080 as shard key range. - minBytes := 0 - for nn := Uint64Key(n - 1); nn > 0; nn >>= 8 { - minBytes++ - } + return id[:len(id)-trailingZeroes] +} - width := Uint64Key(math.MaxUint64)/Uint64Key(n) + 1 - start := Uint64Key(i) * width - end := start + width +// Compare compares two keyspace IDs while taking care to normalize them; returns -1 if ab, 0 if equal. +func Compare(a, b []byte) int { + return bytes.Compare(Normalize(a), Normalize(b)) +} - // Note: The byte value is empty if start or end is the min or the max - // respectively. - startBytes := start.Bytes()[:minBytes] - endBytes := end.Bytes()[:minBytes] - if start == 0 { - startBytes = []byte{} - } - if end == 0 { - // Always set the end except for the last shard. In that case, the - // end value (2^64) flows over and is the same as 0. - endBytes = []byte{} - } - return &topodatapb.KeyRange{Start: startBytes, End: endBytes}, nil +// Less returns true if a is less than b. +func Less(a, b []byte) bool { + return Compare(a, b) < 0 +} + +// Equal returns true if a is equal to b. +func Equal(a, b []byte) bool { + return Compare(a, b) == 0 +} + +// Empty returns true if id is an empty keyspace ID. +func Empty(id []byte) bool { + return len(Normalize(id)) == 0 } -// KeyRangeAdd adds two adjacent keyranges into a single value. -// If the values are not adjacent, it returns false. -func KeyRangeAdd(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, bool) { - if first == nil || second == nil { +// +// KeyRange helper methods +// + +// KeyRangeAdd adds two adjacent KeyRange values (in any order) into a single value. If the values are not adjacent, +// it returns false. +func KeyRangeAdd(a, b *topodatapb.KeyRange) (*topodatapb.KeyRange, bool) { + if a == nil || b == nil { return nil, false } - if len(first.End) != 0 && bytes.Equal(first.End, second.Start) { - return &topodatapb.KeyRange{Start: first.Start, End: second.End}, true + if !Empty(a.End) && Equal(a.End, b.Start) { + return &topodatapb.KeyRange{Start: a.Start, End: b.End}, true } - if len(second.End) != 0 && bytes.Equal(second.End, first.Start) { - return &topodatapb.KeyRange{Start: second.Start, End: first.End}, true + if !Empty(b.End) && Equal(b.End, a.Start) { + return &topodatapb.KeyRange{Start: b.Start, End: a.End}, true } return nil, false } // KeyRangeContains returns true if the provided id is in the keyrange. -func KeyRangeContains(kr *topodatapb.KeyRange, id []byte) bool { - if kr == nil { +func KeyRangeContains(keyRange *topodatapb.KeyRange, id []byte) bool { + if KeyRangeIsComplete(keyRange) { return true } - return bytes.Compare(kr.Start, id) <= 0 && - (len(kr.End) == 0 || bytes.Compare(id, kr.End) < 0) + return (Empty(keyRange.Start) || Compare(id, keyRange.Start) >= 0) && (Empty(keyRange.End) || Compare(id, keyRange.End) < 0) } -// ParseKeyRangeParts parses a start and end hex values and build a proto KeyRange +// ParseKeyRangeParts parses a Start and End as hex encoded strings and builds a proto KeyRange. func ParseKeyRangeParts(start, end string) (*topodatapb.KeyRange, error) { - s, err := hex.DecodeString(start) + startKey, err := hex.DecodeString(start) if err != nil { return nil, err } - e, err := hex.DecodeString(end) + endKey, err := hex.DecodeString(end) if err != nil { return nil, err } - return &topodatapb.KeyRange{Start: s, End: e}, nil + return &topodatapb.KeyRange{Start: startKey, End: endKey}, nil } -// KeyRangeString prints a topodatapb.KeyRange -func KeyRangeString(k *topodatapb.KeyRange) string { - if k == nil { +// KeyRangeString formats a topodatapb.KeyRange into a hex encoded string. +func KeyRangeString(keyRange *topodatapb.KeyRange) string { + if KeyRangeIsComplete(keyRange) { return "-" } - return hex.EncodeToString(k.Start) + "-" + hex.EncodeToString(k.End) + return hex.EncodeToString(keyRange.Start) + "-" + hex.EncodeToString(keyRange.End) } -// KeyRangeIsPartial returns true if the KeyRange does not cover the entire space. -func KeyRangeIsPartial(kr *topodatapb.KeyRange) bool { - if kr == nil { - return false +// KeyRangeStartCompare compares the Start of two KeyRange values using semantics unique to Start values where an +// empty Start means the *minimum* value; returns -1 if ab, 0 if equal. +func KeyRangeStartCompare(a, b *topodatapb.KeyRange) int { + aIsMinimum := a == nil || Empty(a.Start) + bIsMinimum := b == nil || Empty(b.Start) + + if aIsMinimum && bIsMinimum { + return 0 + } else if aIsMinimum { + return -1 + } else if bIsMinimum { + return 1 } - return !(len(kr.Start) == 0 && len(kr.End) == 0) + + return Compare(a.Start, b.Start) } -// KeyRangeEqual returns true if both key ranges cover the same area -func KeyRangeEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || (len(right.Start) == 0 && len(right.End) == 0) - } - if right == nil { - return len(left.Start) == 0 && len(left.End) == 0 - } - return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) && - bytes.Equal(addPadding(left.End), addPadding(right.End)) +// KeyRangeStartEqual returns true if both KeyRange values have the same Start. +func KeyRangeStartEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeStartCompare(a, b) == 0 } -// addPadding adds padding to make sure keyrange represents an 8 byte integer. -// From Vitess docs: -// A hash vindex produces an 8-byte number. -// This means that all numbers less than 0x8000000000000000 will fall in shard -80. -// Any number with the highest bit set will be >= 0x8000000000000000, and will therefore -// belong to shard 80-. -// This means that from a keyrange perspective -80 == 00-80 == 0000-8000 == 000000-800000 -// If we don't add this padding, we could run into issues when transitioning from keyranges -// that use 2 bytes to 4 bytes. -func addPadding(kr []byte) []byte { - paddedKr := make([]byte, 8) - - for i := 0; i < len(kr); i++ { - paddedKr = append(paddedKr, kr[i]) - } +// KeyRangeEndCompare compares the End of two KeyRange values using semantics unique to End values where an +// empty End means the *maximum* value; returns -1 if ab, 0 if equal. +func KeyRangeEndCompare(a, b *topodatapb.KeyRange) int { + aIsMaximum := a == nil || Empty(a.End) + bIsMaximum := b == nil || Empty(b.End) - for i := len(kr); i < 8; i++ { - paddedKr = append(paddedKr, 0) + if aIsMaximum && bIsMaximum { + return 0 + } else if aIsMaximum { + return 1 + } else if bIsMaximum { + return -1 } - return paddedKr + + return Compare(a.End, b.End) } -// KeyRangeStartSmaller returns true if right's keyrange start is _after_ left's start -func KeyRangeStartSmaller(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right != nil - } - if right == nil { - return false - } - return bytes.Compare(left.Start, right.Start) < 0 +// KeyRangeEndEqual returns true if both KeyRange values have the same End. +func KeyRangeEndEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeEndCompare(a, b) == 0 } -// KeyRangeStartEqual returns true if both key ranges have the same start -func KeyRangeStartEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || len(right.Start) == 0 - } - if right == nil { - return len(left.Start) == 0 - } - return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) +// KeyRangeCompare compares two KeyRange values, taking into account both the Start and End fields and their +// field-specific comparison logic; returns -1 if ab, 0 if equal. Specifically: +// +// - The Start-specific KeyRangeStartCompare and End-specific KeyRangeEndCompare are used for proper comparison +// of an empty value for either Start or End. +// - The Start is compared first and End is only compared if Start is equal. +func KeyRangeCompare(a, b *topodatapb.KeyRange) int { + // First, compare the Start field. + if v := KeyRangeStartCompare(a, b); v != 0 { + // The Start field for a and b differ, and that is enough; return that comparison. + return v + } + + // The Start field was equal, so compare the End field and return that comparison. + return KeyRangeEndCompare(a, b) } -// KeyRangeContiguous returns true if the end of the left key range exactly -// matches the start of the right key range (i.e they are contigious) -func KeyRangeContiguous(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || (len(right.Start) == 0 && len(right.End) == 0) - } - if right == nil { - return len(left.Start) == 0 && len(left.End) == 0 - } - return bytes.Equal(addPadding(left.End), addPadding(right.Start)) +// KeyRangeEqual returns true if a and b are equal. +func KeyRangeEqual(a, b *topodatapb.KeyRange) bool { + return KeyRangeCompare(a, b) == 0 } -// KeyRangeEndEqual returns true if both key ranges have the same end -func KeyRangeEndEqual(left, right *topodatapb.KeyRange) bool { - if left == nil { - return right == nil || len(right.End) == 0 - } - if right == nil { - return len(left.End) == 0 - } - return bytes.Equal(addPadding(left.End), addPadding(right.End)) +// KeyRangeLess returns true if a is less than b. +func KeyRangeLess(a, b *topodatapb.KeyRange) bool { + return KeyRangeCompare(a, b) < 0 } -// For more info on the following functions, see: -// See: http://stackoverflow.com/questions/4879315/what-is-a-tidy-algorithm-to-find-overlapping-intervals -// two segments defined as (a,b) and (c,d) (with a c) && (a < d) -// overlap = min(b, d) - max(c, a) - -// KeyRangesIntersect returns true if some Keyspace values exist in both ranges. -func KeyRangesIntersect(first, second *topodatapb.KeyRange) bool { - if first == nil || second == nil { - return true - } - return (len(first.End) == 0 || bytes.Compare(second.Start, first.End) < 0) && - (len(second.End) == 0 || bytes.Compare(first.Start, second.End) < 0) +// KeyRangeIsComplete returns true if the KeyRange covers the entire keyspace. +func KeyRangeIsComplete(keyRange *topodatapb.KeyRange) bool { + return keyRange == nil || (Empty(keyRange.Start) && Empty(keyRange.End)) } -// KeyRangesOverlap returns the overlap between two KeyRanges. -// They need to overlap, otherwise an error is returned. -func KeyRangesOverlap(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, error) { - if !KeyRangesIntersect(first, second) { - return nil, fmt.Errorf("KeyRanges %v and %v don't overlap", first, second) - } - if first == nil { - return second, nil - } - if second == nil { - return first, nil - } - // compute max(c,a) and min(b,d) - // start with (a,b) - result := proto.Clone(first).(*topodatapb.KeyRange) - // if c > a, then use c - if bytes.Compare(second.Start, first.Start) > 0 { - result.Start = second.Start +// KeyRangeIsPartial returns true if the KeyRange does not cover the entire keyspace. +func KeyRangeIsPartial(keyRange *topodatapb.KeyRange) bool { + return !KeyRangeIsComplete(keyRange) +} + +// KeyRangeContiguous returns true if the End of KeyRange a is equivalent to the Start of the KeyRange b, +// which means that they are contiguous. +func KeyRangeContiguous(a, b *topodatapb.KeyRange) bool { + if KeyRangeIsComplete(a) || KeyRangeIsComplete(b) { + return false // no two KeyRange values can be contiguous if either is the complete range } - // if b is maxed out, or - // (d is not maxed out and d < b) - // ^ valid test as neither b nor d are max - // then use d - if len(first.End) == 0 || (len(second.End) != 0 && bytes.Compare(second.End, first.End) < 0) { - result.End = second.End + + return Equal(a.End, b.Start) +} + +// For more info on the following functions, see: +// http://stackoverflow.com/questions/4879315/what-is-a-tidy-algorithm-to-find-overlapping-intervals +// Two segments defined as (a,b) and (c,d) (with a c) && (a < d) +// * overlap = min(b, d) - max(c, a) + +// KeyRangeIntersect returns true if some part of KeyRange a and b overlap, meaning that some keyspace ID values +// exist in both a and b. +func KeyRangeIntersect(a, b *topodatapb.KeyRange) bool { + if KeyRangeIsComplete(a) || KeyRangeIsComplete(b) { + return true // if either KeyRange is complete, there must be an intersection } - return result, nil + + return (Empty(a.End) || Less(b.Start, a.End)) && (Empty(b.End) || Less(a.Start, b.End)) } -// KeyRangeIncludes returns true if the first provided KeyRange, big, -// contains the second KeyRange, small. If they intersect, but small -// spills out, this returns false. -func KeyRangeIncludes(big, small *topodatapb.KeyRange) bool { - if big == nil { - // The outside one covers everything, we're good. +// KeyRangeContainsKeyRange returns true if KeyRange a fully contains KeyRange b. +func KeyRangeContainsKeyRange(a, b *topodatapb.KeyRange) bool { + // If a covers the entire KeyRange, it always contains b. + if KeyRangeIsComplete(a) { return true } - if small == nil { - // The smaller one covers everything, better have the - // bigger one also cover everything. - return len(big.Start) == 0 && len(big.End) == 0 - } - // Now we check small.Start >= big.Start, and small.End <= big.End - if len(big.Start) != 0 && bytes.Compare(small.Start, big.Start) < 0 { - return false + + // If b covers the entire KeyRange, a must also cover the entire KeyRange. + if KeyRangeIsComplete(b) { + return KeyRangeIsComplete(a) } - if len(big.End) != 0 && (len(small.End) == 0 || bytes.Compare(small.End, big.End) > 0) { - return false + + // Ensure b.Start >= a.Start and b.End <= a.End. + if KeyRangeStartCompare(b, a) >= 0 && KeyRangeEndCompare(b, a) <= 0 { + return true } - return true + + return false } // ParseShardingSpec parses a string that describes a sharding @@ -351,11 +306,63 @@ func ParseShardingSpec(spec string) ([]*topodatapb.KeyRange, error) { return ranges, nil } -var krRegexp = regexp.MustCompile(`^[0-9a-fA-F]*-[0-9a-fA-F]*$`) +// IsValidKeyRange returns true if the string represents a valid key range. +func IsValidKeyRange(keyRangeString string) bool { + return KeyRangePattern.MatchString(keyRangeString) +} -// IsKeyRange returns true if the string represents a keyrange. -func IsKeyRange(kr string) bool { - return krRegexp.MatchString(kr) +// EvenShardsKeyRange returns a key range definition for a shard at index "i", +// assuming range based sharding with "n" equal-width shards in total. +// i starts at 0. +// +// Example: (1, 2) returns the second out of two shards in total i.e. "80-". +// +// This function must not be used in the Vitess code base because Vitess also +// supports shards with different widths. In that case, the output of this +// function would be wrong. +// +// Note: start and end values have trailing zero bytes omitted. +// For example, "80-" has only the first byte (0x80) set. +// We do this to produce the same KeyRange objects as ParseKeyRangeParts() does. +// Because it's using the Go hex methods, it's omitting trailing zero bytes as +// well. +func EvenShardsKeyRange(i, n int) (*topodatapb.KeyRange, error) { + if n <= 0 { + return nil, fmt.Errorf("the shard count must be > 0: %v", n) + } + if i >= n { + return nil, fmt.Errorf("the index of the shard must be less than the total number of shards: %v < %v", i, n) + } + if n&(n-1) != 0 { + return nil, fmt.Errorf("the shard count must be a power of two: %v", n) + } + + // Determine the number of bytes which are required to represent any + // KeyRange start or end for the given n. + // This is required to trim the returned values to the same length e.g. + // (256, 512) should return 8000-8080 as shard key range. + minBytes := 0 + for nn := Uint64Key(n - 1); nn > 0; nn >>= 8 { + minBytes++ + } + + width := Uint64Key(math.MaxUint64)/Uint64Key(n) + 1 + start := Uint64Key(i) * width + end := start + width + + // Note: The byte value is empty if start or end is the min or the max + // respectively. + startBytes := start.Bytes()[:minBytes] + endBytes := end.Bytes()[:minBytes] + if start == 0 { + startBytes = []byte{} + } + if end == 0 { + // Always set the end except for the last shard. In that case, the + // end value (2^64) flows over and is the same as 0. + endBytes = []byte{} + } + return &topodatapb.KeyRange{Start: startBytes, End: endBytes}, nil } // GenerateShardRanges returns shard ranges assuming a keyspace with N shards. diff --git a/go/vt/key/key_test.go b/go/vt/key/key_test.go index 639b81c5f18..8db45aa79b9 100644 --- a/go/vt/key/key_test.go +++ b/go/vt/key/key_test.go @@ -28,7 +28,222 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func TestKey(t *testing.T) { +func TestNormalize(t *testing.T) { + type args struct { + id []byte + } + tests := []struct { + name string + args args + want []byte + }{ + { + "empty should empty", + args{[]byte{}}, + []byte{}, + }, + { + "one zero should be empty", + args{[]byte{0x00}}, + []byte{}, + }, + { + "any number of zeroes should be empty", + args{[]byte{0x00, 0x00, 0x00}}, + []byte{}, + }, + { + "one non-zero byte should be left alone", + args{[]byte{0x11}}, + []byte{0x11}, + }, + { + "multiple non-zero bytes should be left alone", + args{[]byte{0x11, 0x22, 0x33}}, + []byte{0x11, 0x22, 0x33}, + }, + { + "zeroes that aren't trailing should be left alone", + args{[]byte{0x11, 0x00, 0x22, 0x00, 0x33, 0x00}}, + []byte{0x11, 0x00, 0x22, 0x00, 0x33}, + }, + { + "excess zero bytes should be removed after a non-zero byte", + args{[]byte{0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + []byte{0x11}, + }, + { + "excess zero bytes should be removed after multiple non-zero bytes", + args{[]byte{0x11, 0x22, 0x00, 0x00, 0x00}}, + []byte{0x11, 0x22}, + }, + { + "values longer than eight bytes should be supported", + args{[]byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0x00}}, + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Normalize(tt.args.id), "Normalize(%v)", tt.args.id) + }) + } +} + +func TestCompare(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want int + }{ + { + "empty ids are equal", + args{[]byte{}, []byte{}}, + 0, + }, + { + "equal full id values are equal", + args{ + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}, + []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88}, + }, + 0, + }, + { + "equal partial id values are equal", + args{ + []byte{0x11, 0x22}, + []byte{0x11, 0x22}, + }, + 0, + }, + { + "equal full and partial id values are equal", + args{[]byte{0x11, 0x22, 0x33, 0x44}, []byte{0x11, 0x22, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00}}, + 0, + }, + { + "equal partial and full id values are equal", + args{[]byte{0x11, 0x22, 0x33, 0x44, 0x00, 0x00, 0x00, 0x00}, []byte{0x11, 0x22, 0x33, 0x44}}, + 0, + }, + {"a less than b", args{[]byte{0x01}, []byte{0x02}}, -1}, + {"a greater than b", args{[]byte{0x02}, []byte{0x01}}, +1}, + { + "equal partial a and b with different lengths", + args{[]byte{0x30, 0x00}, []byte{0x20}}, + 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Compare(tt.args.a, tt.args.b), "Compare(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestLess(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want bool + }{ + // Less uses Compare which is already robustly tested, so we're just aiming to ensure that the result + // of the Compare is used correctly in context and not e.g. reversed, so test a few obvious cases. + { + "a is less than b", + args{[]byte{0x01}, []byte{0x02}}, + true, + }, + { + "a is equal to b", + args{[]byte{0x01}, []byte{0x01}}, + false, + }, + { + "a is greater than b", + args{[]byte{0x02}, []byte{0x01}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Less(tt.args.a, tt.args.b), "Less(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestEqual(t *testing.T) { + type args struct { + a []byte + b []byte + } + tests := []struct { + name string + args args + want bool + }{ + // Equal uses Compare which is already robustly tested, so we're just aiming to ensure that the result + // of the Compare is used correctly in context and not e.g. reversed, so test a few obvious cases. + { + "a is less than b", + args{[]byte{0x01}, []byte{0x02}}, + false, + }, + { + "a is equal to b", + args{[]byte{0x01}, []byte{0x01}}, + true, + }, + { + "a is greater than b", + args{[]byte{0x02}, []byte{0x01}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Equal(tt.args.a, tt.args.b), "Equal(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestEmpty(t *testing.T) { + type args struct { + id []byte + } + tests := []struct { + name string + args args + want bool + }{ + { + "empty", + args{[]byte{}}, + true, + }, + { + "not empty", + args{[]byte{0x11, 0x22, 0x33}}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, Empty(tt.args.id), "Empty(%v)", tt.args.id) + }) + } +} + +func TestUint64Key(t *testing.T) { k0 := Uint64Key(0) k1 := Uint64Key(1) k2 := Uint64Key(0x7FFFFFFFFFFFFFFF) @@ -362,7 +577,7 @@ func TestKeyRangeContiguous(t *testing.T) { }, { first: "-", second: "-40", - out: true, + out: false, }, { first: "40-80", second: "c0-", @@ -460,7 +675,398 @@ func TestParseShardingSpec(t *testing.T) { } } -func TestContains(t *testing.T) { +func TestKeyRangeComparisons(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange + } + type wants struct { + wantStartCompare int + wantStartEqual bool + wantEndCompare int + wantEndEqual bool + wantCompare int + wantEqual bool + } + tests := []struct { + name string + args args + wants wants + }{ + { + name: "a and b are both full range", + args: args{ + a: stringToKeyRange("-"), + b: stringToKeyRange("-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a is equal to b", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (2 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-80"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (4 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-8000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (6 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-800000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, end only) but equal to b (8 digits, end only)", + args: args{ + a: stringToKeyRange("-80"), + b: stringToKeyRange("-80000000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (2 digits, start only)", + args: args{ + stringToKeyRange("80-"), + stringToKeyRange("80-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (4 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("8000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (6 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("800000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (2 digit, start only) but equal to b (8 digits, start only)", + args: args{ + a: stringToKeyRange("80-"), + b: stringToKeyRange("80000000-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (4 digits) but equal to b (2 digits)", + args: args{ + a: stringToKeyRange("1000-3000"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a (8 digits) but equal to b (4 digits)", + args: args{ + a: stringToKeyRange("10000000-30000000"), + b: stringToKeyRange("1000-3000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "b (4 digits) but equal to a (2 digits)", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("1000-3000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "b (8 digits) but equal to a (4 digits)", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10000000-30000000"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 0, + wantEqual: true, + }, + }, + { + name: "a is full range, b is not", + args: args{ + a: stringToKeyRange("-"), + b: stringToKeyRange("20-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b is full range, a is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("-"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a start is greater than b start", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("20-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b start is greater than a start", + args: args{ + a: stringToKeyRange("20-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a start is empty, b start is not", + args: args{ + a: stringToKeyRange("-30"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: -1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "b start is empty, a start is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("-30"), + }, + wants: wants{ + wantStartCompare: 1, + wantStartEqual: false, + wantEndCompare: 0, + wantEndEqual: true, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "a end is greater than b end", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-20"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "b end is greater than a end", + args: args{ + a: stringToKeyRange("10-20"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + { + name: "a end is empty, b end is not", + args: args{ + a: stringToKeyRange("10-"), + b: stringToKeyRange("10-30"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: 1, + wantEndEqual: false, + wantCompare: 1, + wantEqual: false, + }, + }, + { + name: "b end is empty, a end is not", + args: args{ + a: stringToKeyRange("10-30"), + b: stringToKeyRange("10-"), + }, + wants: wants{ + wantStartCompare: 0, + wantStartEqual: true, + wantEndCompare: -1, + wantEndEqual: false, + wantCompare: -1, + wantEqual: false, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.wants.wantStartCompare, KeyRangeStartCompare(tt.args.a, tt.args.b), "KeyRangeStartCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantStartEqual, KeyRangeStartEqual(tt.args.a, tt.args.b), "KeyRangeStartEqual(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEndCompare, KeyRangeEndCompare(tt.args.a, tt.args.b), "KeyRangeEndCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEndEqual, KeyRangeEndEqual(tt.args.a, tt.args.b), "KeyRangeEndEqual(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantCompare, KeyRangeCompare(tt.args.a, tt.args.b), "KeyRangeCompare(%v, %v)", tt.args.a, tt.args.b) + assert.Equalf(t, tt.wants.wantEqual, KeyRangeEqual(tt.args.a, tt.args.b), "KeyRangeEqual(%v, %v)", tt.args.a, tt.args.b) + }) + } +} + +func TestKeyRangeContains(t *testing.T) { var table = []struct { kid string start string @@ -499,120 +1105,321 @@ func TestContains(t *testing.T) { } } -func TestIntersectOverlap(t *testing.T) { - var table = []struct { - a string - b string - c string - d string - intersects bool - overlap string - }{ - {a: "40", b: "80", c: "c0", d: "d0", intersects: false}, - {a: "", b: "80", c: "80", d: "", intersects: false}, - {a: "", b: "80", c: "", d: "40", intersects: true, overlap: "-40"}, - {a: "80", b: "", c: "c0", d: "", intersects: true, overlap: "c0-"}, - {a: "", b: "80", c: "40", d: "80", intersects: true, overlap: "40-80"}, - {a: "40", b: "80", c: "60", d: "a0", intersects: true, overlap: "60-80"}, - {a: "40", b: "80", c: "50", d: "60", intersects: true, overlap: "50-60"}, - {a: "40", b: "80", c: "10", d: "50", intersects: true, overlap: "40-50"}, - {a: "40", b: "80", c: "40", d: "80", intersects: true, overlap: "40-80"}, - {a: "", b: "80", c: "", d: "80", intersects: true, overlap: "-80"}, - {a: "40", b: "", c: "40", d: "", intersects: true, overlap: "40-"}, - {a: "40", b: "80", c: "20", d: "40", intersects: false}, - {a: "80", b: "", c: "80", d: "c0", intersects: true, overlap: "80-c0"}, - {a: "", b: "", c: "c0", d: "d0", intersects: true, overlap: "c0-d0"}, +func TestKeyRangeIntersect(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange } + tests := []struct { + name string + args args + want bool + }{ + // non-intersecting cases + { + name: "typical half-range split, ascending order", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("80-")}, + want: false, + }, + { + name: "typical half-range split, descending order", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("-80")}, + want: false, + }, + { + name: "partial ranges, ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("c0-d0")}, + want: false, + }, + { + name: "partial ranges, descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("20-40")}, + want: false, + }, + { + name: "partial ranges, different key lengths", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("20-40")}, + want: false, + }, - for _, el := range table { - a, err := hex.DecodeString(el.a) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - b, err := hex.DecodeString(el.b) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - left := &topodatapb.KeyRange{Start: a, End: b} - c, err := hex.DecodeString(el.c) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - d, err := hex.DecodeString(el.d) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - right := &topodatapb.KeyRange{Start: c, End: d} - if c := KeyRangesIntersect(left, right); c != el.intersects { - t.Errorf("Unexpected result: KeyRangesIntersect for %v and %v yields %v.", left, right, c) - } - overlap, err := KeyRangesOverlap(left, right) - if el.intersects { - if err != nil { - t.Errorf("Unexpected result: KeyRangesOverlap for overlapping %v and %v returned an error: %v", left, right, err) - } else { - got := hex.EncodeToString(overlap.Start) + "-" + hex.EncodeToString(overlap.End) - if got != el.overlap { - t.Errorf("Unexpected result: KeyRangesOverlap for overlapping %v and %v should have returned: %v but got: %v", left, right, el.overlap, got) - } - } - } else { - if err == nil { - t.Errorf("Unexpected result: KeyRangesOverlap for non-overlapping %v and %v should have returned an error", left, right) - } - } + // intersecting cases with a full range + { + name: "full range with full range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-")}, + want: true, + }, + { + name: "full range with maximum key partial range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-")}, + want: true, + }, + { + name: "full range with partial range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("c0-d0")}, + want: true, + }, + { + name: "minimum key partial range with full range", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-")}, + want: true, + }, + { + name: "partial range with full range", + args: args{a: stringToKeyRange("a0-b0"), b: stringToKeyRange("-")}, + want: true, + }, + + // intersecting cases with only partial ranges + { + name: "the same range, both from minimum key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-80")}, + want: true, + }, + { + name: "the same range, both from minimum key, different key lengths", + args: args{a: stringToKeyRange("-8000"), b: stringToKeyRange("-80")}, + want: true, + }, + { + name: "the same range, both to maximum key", + args: args{a: stringToKeyRange("40-"), b: stringToKeyRange("40-")}, + want: true, + }, + { + name: "the same range, both to maximum key, different key lengths", + args: args{a: stringToKeyRange("4000-"), b: stringToKeyRange("40-")}, + want: true, + }, + { + name: "the same range, both with mid-range keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("40-80")}, + want: true, + }, + { + name: "the same range, both with mid-range keys, different key lengths", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("4000-8000")}, + want: true, + }, + { + name: "different-sized partial ranges, both from minimum key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("-40")}, + want: true, + }, + { + name: "different-sized partial ranges, both to maximum key", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("c0-")}, + want: true, + }, + { + name: "different-sized partial ranges, from minimum key with mid-range key", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("40-80")}, + want: true, + }, + { + name: "different-sized partial ranges, from minimum key with mid-range key, different key lengths", + args: args{a: stringToKeyRange("-80"), b: stringToKeyRange("4000-8000")}, + want: true, + }, + { + name: "different-sized partial ranges, to maximum key with mid-range key", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("80-c0")}, + want: true, + }, + { + name: "different-sized partial ranges, to maximum key with mid-range key, different key lengths", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("8000-c000")}, + want: true, + }, + { + name: "partially overlapping ranges, in ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("60-a0")}, + want: true, + }, + { + name: "partially overlapping ranges, in descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("10-50")}, + want: true, + }, + { + name: "partially overlapping ranges, one fully containing the other, in ascending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("50-60")}, + want: true, + }, + { + name: "partially overlapping ranges, one fully containing the other, in descending order", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("30-90")}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, KeyRangeIntersect(tt.args.a, tt.args.b), "KeyRangeIntersect(%v, %v)", tt.args.a, tt.args.b) + }) } } -func TestKeyRangeIncludes(t *testing.T) { - var table = []struct { - name string - big string - small string - expected bool +func TestKeyRangeContainsKeyRange(t *testing.T) { + type args struct { + a *topodatapb.KeyRange + b *topodatapb.KeyRange + } + var tests = []struct { + name string + args args + want bool }{ - {"big nil, small nil", "nil", "nil", true}, - {"big nil, small non nil, fully partial", "nil", "80-c0", true}, - {"big nil, small non nil, full start", "nil", "-c0", true}, - {"big nil, small non nil, full end", "nil", "80-", true}, - {"big non-nil, fully partial, small nil", "80-c0", "nil", false}, - {"big non-nil, full start, small nil", "-c0", "nil", false}, - {"big non-nil, full end, small nil", "80-", "nil", false}, - {"big full, small full", "-", "-", true}, - {"big full, small partial", "-", "40-60", true}, - {"big partial, small full", "40-60", "-", false}, - - {"big partial, small to the end", "40-60", "40-", false}, - {"big partial, small bigger to the right", "40-60", "40-80", false}, - {"big partial, small equal", "40-60", "40-60", true}, - {"big partial, small smaller right", "40-60", "40-50", true}, - - {"big partial, small to the beginning", "40-60", "-60", false}, - {"big partial, small smaller to the left", "40-60", "20-60", false}, - {"big partial, small bigger left", "40-60", "50-60", true}, - } - - var err error - for _, tc := range table { - var big, small *topodatapb.KeyRange - if tc.big != "nil" { - parts := strings.Split(tc.big, "-") - big, err = ParseKeyRangeParts(parts[0], parts[1]) - if err != nil { - t.Fatalf("test data error in %v: %v", tc.big, err) - } - } - if tc.small != "nil" { - parts := strings.Split(tc.small, "-") - small, err = ParseKeyRangeParts(parts[0], parts[1]) - if err != nil { - t.Fatalf("test data error in %v: %v", tc.small, err) - } - } - got := KeyRangeIncludes(big, small) - if got != tc.expected { - t.Errorf("KeyRangeIncludes for test case '%v' returned %v but expected %v", tc.name, got, tc.expected) - } + // full range contains itself + { + name: "both full range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-")}, + want: true, + }, + + // full range always contains a partial range + { + name: "full range, partial range from minimum key", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("-c0")}, + want: true, + }, + { + name: "full range, partial range to maximum key", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-")}, + want: true, + }, + { + name: "full range, partial mid-key range", + args: args{a: stringToKeyRange("-"), b: stringToKeyRange("80-c0")}, + want: true, + }, + + // equal partial ranges contain each other + { + name: "equal partial ranges", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("40-60")}, + want: true, + }, + { + name: "equal partial ranges, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("4000-6000")}, + want: true, + }, + { + name: "equal partial ranges, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("40-60")}, + want: true, + }, + + // partial ranges may contain smaller partial ranges + { + name: "partial range, partial touching start", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("40-50")}, + want: true, + }, + { + name: "partial range, partial touching start, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("4000-5000")}, + want: true, + }, + { + name: "partial range, partial touching start, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("40-50")}, + want: true, + }, + { + name: "partial range, partial touching end", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("70-80")}, + want: true, + }, + { + name: "partial range, partial touching end, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("7000-8000")}, + want: true, + }, + { + name: "partial range, partial touching end, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("70-80")}, + want: true, + }, + { + name: "partial range, partial in the middle", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("50-70")}, + want: true, + }, + { + name: "partial range, partial in the middle, different size keys", + args: args{a: stringToKeyRange("40-80"), b: stringToKeyRange("5000-7000")}, + want: true, + }, + { + name: "partial range, partial in the middle, different size keys", + args: args{a: stringToKeyRange("4000-8000"), b: stringToKeyRange("50-70")}, + want: true, + }, + + // partial ranges do not contain the full range + { + name: "partial range from minimum key, full range", + args: args{a: stringToKeyRange("-c0"), b: stringToKeyRange("-")}, + want: false, + }, + { + name: "partial range to maximum key, full range", + args: args{a: stringToKeyRange("80-"), b: stringToKeyRange("-")}, + want: false, + }, + { + name: "partial mid-key range, full range", + args: args{a: stringToKeyRange("80-c0"), b: stringToKeyRange("-")}, + want: false, + }, + + // partial ranges do not contain overlapping but boundary-crossing partial ranges + { + name: "partial range mid-key range, overlapping partial range to maximum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("50-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("5000-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("50-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range to maximum key, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("5000-")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-50")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-5000")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key, different size keys", + args: args{a: stringToKeyRange("4000-6000"), b: stringToKeyRange("-50")}, + want: false, + }, + { + name: "partial range mid-key range, overlapping partial range from minimum key, different size keys", + args: args{a: stringToKeyRange("40-60"), b: stringToKeyRange("-5000")}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, KeyRangeContainsKeyRange(tt.args.a, tt.args.b), "KeyRangeContainsKeyRange(%v, %v)", tt.args.a, tt.args.b) + }) } } @@ -671,65 +1478,40 @@ func BenchmarkKeyRangesIntersect(b *testing.B) { } for i := 0; i < b.N; i++ { - KeyRangesIntersect(kr1, kr2) + KeyRangeIntersect(kr1, kr2) } } -func BenchmarkKeyRangesOverlap(b *testing.B) { - kr1 := &topodatapb.KeyRange{ - Start: []byte{0x40, 0, 0, 0, 0, 0, 0, 0}, - End: []byte{0x80, 0, 0, 0, 0, 0, 0, 0}, - } - kr2 := &topodatapb.KeyRange{ - Start: []byte{0x30, 0, 0, 0, 0, 0, 0, 0}, - End: []byte{0x50, 0, 0, 0, 0, 0, 0, 0}, - } - - for i := 0; i < b.N; i++ { - if _, err := KeyRangesOverlap(kr1, kr2); err != nil { - b.Fatal(err) - } - } -} +func TestIsValidKeyRange(t *testing.T) { + tests := []struct { + arg string + want bool + }{ + // normal cases + {"-", true}, + {"00-", true}, + {"-80", true}, + {"40-80", true}, + {"80-", true}, + {"a0-", true}, + {"-A0", true}, -func TestIsKeyRange(t *testing.T) { - testcases := []struct { - in string - out bool - }{{ - in: "-", - out: true, - }, { - in: "-80", - out: true, - }, { - in: "40-80", - out: true, - }, { - in: "80-", - out: true, - }, { - in: "a0-", - out: true, - }, { - in: "-A0", - out: true, - }, { - in: "", - out: false, - }, { - in: "x-80", - out: false, - }, { - in: "-80x", - out: false, - }, { - in: "select", - out: false, - }} + // special cases + {"0", true}, // equal to "-" - for _, tcase := range testcases { - assert.Equal(t, IsKeyRange(tcase.in), tcase.out, tcase.in) + // invalid cases + {"", false}, // empty is not allowed + {"11", false}, // no hyphen + {"-1", false}, // odd number of digits + {"-111", false}, // odd number of digits + {"1-2", false}, // odd number of digits + {"x-80", false}, // invalid character + {"-80x", false}, // invalid character + {"select", false}, // nonsense + {"+", false}, // nonsense + } + for _, tt := range tests { + assert.Equalf(t, tt.want, IsValidKeyRange(tt.arg), "IsValidKeyRange(%v)", tt.arg) } } diff --git a/go/vt/logutil/logger.go b/go/vt/logutil/logger.go index 8ebf88e085d..524ca4db4d7 100644 --- a/go/vt/logutil/logger.go +++ b/go/vt/logutil/logger.go @@ -25,6 +25,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" logutilpb "vitess.io/vitess/go/vt/proto/logutil" ) @@ -73,7 +74,7 @@ func EventToBuffer(event *logutilpb.Event, buf *bytes.Buffer) { return } - t := ProtoToTime(event.Time) + t := protoutil.TimeFromProto(event.Time).UTC() _, month, day := t.Date() hour, minute, second := t.Clock() twoDigits(buf, int(month)) @@ -137,7 +138,7 @@ func NewCallbackLogger(f func(*logutilpb.Event)) *CallbackLogger { func (cl *CallbackLogger) InfoDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_INFO, File: file, Line: line, @@ -149,7 +150,7 @@ func (cl *CallbackLogger) InfoDepth(depth int, s string) { func (cl *CallbackLogger) WarningDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_WARNING, File: file, Line: line, @@ -161,7 +162,7 @@ func (cl *CallbackLogger) WarningDepth(depth int, s string) { func (cl *CallbackLogger) ErrorDepth(depth int, s string) { file, line := fileAndLine(2 + depth) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_ERROR, File: file, Line: line, @@ -198,7 +199,7 @@ func (cl *CallbackLogger) Error(err error) { func (cl *CallbackLogger) Printf(format string, v ...any) { file, line := fileAndLine(2) cl.f(&logutilpb.Event{ - Time: TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_CONSOLE, File: file, Line: line, diff --git a/go/vt/logutil/logger_test.go b/go/vt/logutil/logger_test.go index c34f8cf8ec3..0eb4edb2b93 100644 --- a/go/vt/logutil/logger_test.go +++ b/go/vt/logutil/logger_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/race" logutilpb "vitess.io/vitess/go/vt/proto/logutil" ) @@ -31,7 +32,7 @@ func TestLogEvent(t *testing.T) { }{ { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.November, 10, 23, 30, 12, 123456000, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.November, 10, 23, 30, 12, 123456000, time.UTC)), Level: logutilpb.Level_INFO, File: "file.go", Line: 123, @@ -41,7 +42,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_WARNING, File: "file2.go", Line: 567, @@ -51,7 +52,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_ERROR, File: "file2.go", Line: 567, @@ -61,7 +62,7 @@ func TestLogEvent(t *testing.T) { }, { event: &logutilpb.Event{ - Time: TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), + Time: protoutil.TimeToProto(time.Date(2014, time.January, 20, 23, 30, 12, 0, time.UTC)), Level: logutilpb.Level_CONSOLE, File: "file2.go", Line: 567, diff --git a/go/vt/logutil/proto3.go b/go/vt/logutil/proto3.go index 2bde4656dbd..b62ed8810da 100644 --- a/go/vt/logutil/proto3.go +++ b/go/vt/logutil/proto3.go @@ -17,37 +17,11 @@ limitations under the License. package logutil import ( - "time" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) // This file contains a few functions to help with proto3. -// ProtoToTime converts a vttimepb.Time to a time.Time. -// proto3 will eventually support timestamps, at which point we'll retire -// this. -// -// A nil pointer is like the empty timestamp. -func ProtoToTime(ts *vttimepb.Time) time.Time { - if ts == nil { - // treat nil like the empty Timestamp - return time.Time{} - } - return time.Unix(ts.Seconds, int64(ts.Nanoseconds)).UTC() -} - -// TimeToProto converts the time.Time to a vttimepb.Time. -func TimeToProto(t time.Time) *vttimepb.Time { - seconds := t.Unix() - nanos := int64(t.Sub(time.Unix(seconds, 0))) - return &vttimepb.Time{ - Seconds: seconds, - Nanoseconds: int32(nanos), - } -} - // EventStream is an interface used by RPC clients when the streaming // RPC returns a stream of log events. type EventStream interface { diff --git a/go/vt/logutil/proto3_test.go b/go/vt/logutil/proto3_test.go deleted file mode 100644 index 58a78dea2ef..00000000000 --- a/go/vt/logutil/proto3_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logutil - -import ( - "math" - "testing" - "time" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/proto/vttime" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -func utcDate(year, month, day int) time.Time { - return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) -} - -var tests = []struct { - pt *vttime.Time - t time.Time -}{ - // The timestamp representing the Unix epoch date. - {pt: &vttime.Time{Seconds: 0, Nanoseconds: 0}, - t: utcDate(1970, 1, 1)}, - - // The smallest representable timestamp with non-negative nanos. - {pt: &vttime.Time{Seconds: math.MinInt64, Nanoseconds: 0}, - t: time.Unix(math.MinInt64, 0).UTC()}, - - // The earliest valid timestamp. - {pt: &vttime.Time{Seconds: minValidSeconds, Nanoseconds: 0}, - t: utcDate(1, 1, 1)}, - - // The largest representable timestamp with nanos in range. - {pt: &vttime.Time{Seconds: math.MaxInt64, Nanoseconds: 1e9 - 1}, - t: time.Unix(math.MaxInt64, 1e9-1).UTC()}, - - // The largest valid timestamp. - {pt: &vttime.Time{Seconds: maxValidSeconds - 1, Nanoseconds: 1e9 - 1}, - t: time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, - - // The smallest invalid timestamp that is larger than the valid range. - {pt: &vttime.Time{Seconds: maxValidSeconds, Nanoseconds: 0}, - t: time.Unix(maxValidSeconds, 0).UTC()}, - - // A date before the epoch. - {pt: &vttime.Time{Seconds: -281836800, Nanoseconds: 0}, - t: utcDate(1961, 1, 26)}, - - // A date after the epoch. - {pt: &vttime.Time{Seconds: 1296000000, Nanoseconds: 0}, - t: utcDate(2011, 1, 26)}, - - // A date after the epoch, in the middle of the day. - {pt: &vttime.Time{Seconds: 1296012345, Nanoseconds: 940483}, - t: time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, -} - -func TestProtoToTime(t *testing.T) { - for i, s := range tests { - got := ProtoToTime(s.pt) - if got != s.t { - t.Errorf("ProtoToTime[%v](%v) = %v, want %v", i, s.pt, got, s.t) - } - } -} - -func TestTimeToProto(t *testing.T) { - for i, s := range tests { - got := TimeToProto(s.t) - if !proto.Equal(got, s.pt) { - t.Errorf("TimeToProto[%v](%v) = %v, want %v", i, s.t, got, s.pt) - } - } -} diff --git a/go/vt/logutil/throttled.go b/go/vt/logutil/throttled.go index 917798626bb..4ee11912e71 100644 --- a/go/vt/logutil/throttled.go +++ b/go/vt/logutil/throttled.go @@ -69,7 +69,7 @@ func (tl *ThrottledLogger) log(logF logFunc, format string, v ...any) { // to log and reset skippedCount if tl.skippedCount == 0 { go func(d time.Duration) { - time.Sleep(d) + <-time.After(d) tl.mu.Lock() defer tl.mu.Unlock() // Because of the go func(), we lose the stack trace, diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go index 08fa24643b7..7058745d6c6 100644 --- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go +++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go @@ -32,6 +32,7 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/spf13/pflag" + "vitess.io/vitess/go/viperutil" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" @@ -40,26 +41,68 @@ import ( var ( // This is the account name - accountName string + accountName = viperutil.Configure( + configKey("account.name"), + viperutil.Options[string]{ + EnvVars: []string{"VT_AZBLOB_ACCOUNT_NAME"}, + FlagName: "azblob_backup_account_name", + }, + ) // This is the private access key - accountKeyFile string + accountKeyFile = viperutil.Configure( + configKey("account.key_file"), + viperutil.Options[string]{ + FlagName: "azblob_backup_account_key_file", + }, + ) // This is the name of the container that will store the backups - containerName string + containerName = viperutil.Configure( + configKey("container_name"), + viperutil.Options[string]{ + FlagName: "azblob_backup_container_name", + }, + ) // This is an optional prefix to prepend to all files - storageRoot string + storageRoot = viperutil.Configure( + configKey("storage_root"), + viperutil.Options[string]{ + FlagName: "azblob_backup_storage_root", + }, + ) + + azBlobBufferSize = viperutil.Configure( + configKey("buffer_size"), + viperutil.Options[int]{ + Default: 100 << (10 * 2), // 100 MiB + FlagName: "azblob_buffer_size", + }, + ) - azBlobParallelism int + azBlobParallelism = viperutil.Configure( + configKey("parallelism"), + viperutil.Options[int]{ + Default: 1, + FlagName: "azblob_backup_parallelism", + }, + ) ) +const configKeyPrefix = "backup.storage.azblob" + +var configKey = viperutil.KeyPrefixFunc(configKeyPrefix) + func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&accountName, "azblob_backup_account_name", "", "Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.") - fs.StringVar(&accountKeyFile, "azblob_backup_account_key_file", "", "Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).") - fs.StringVar(&containerName, "azblob_backup_container_name", "", "Azure Blob Container Name.") - fs.StringVar(&storageRoot, "azblob_backup_storage_root", "", "Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').") - fs.IntVar(&azBlobParallelism, "azblob_backup_parallelism", 1, "Azure Blob operation parallelism (requires extra memory when increased).") + fs.String("azblob_backup_account_name", accountName.Default(), "Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used.") + fs.String("azblob_backup_account_key_file", accountKeyFile.Default(), "Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).") + fs.String("azblob_backup_container_name", containerName.Default(), "Azure Blob Container Name.") + fs.String("azblob_backup_storage_root", storageRoot.Default(), "Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/').") + fs.Int("azblob_backup_buffer_size", azBlobBufferSize.Default(), "The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service.") + fs.Int("azblob_backup_parallelism", azBlobParallelism.Default(), "Azure Blob operation parallelism (requires extra memory when increased -- a multiple of azblob_backup_buffer_size).") + + viperutil.BindFlags(fs, accountName, accountKeyFile, containerName, storageRoot, azBlobParallelism) } func init() { @@ -79,16 +122,12 @@ const ( // 1. Direct Command Line Flag (azblob_backup_account_name, azblob_backup_account_key) // 2. Environment variables func azInternalCredentials() (string, string, error) { - actName := accountName - if actName == "" { - // Check the Environmental Value - actName = os.Getenv("VT_AZBLOB_ACCOUNT_NAME") - } + actName := accountName.Get() var actKey string - if accountKeyFile != "" { - log.Infof("Getting Azure Storage Account key from file: %s", accountKeyFile) - dat, err := os.ReadFile(accountKeyFile) + if keyFile := accountKeyFile.Get(); keyFile != "" { + log.Infof("Getting Azure Storage Account key from file: %s", keyFile) + dat, err := os.ReadFile(keyFile) if err != nil { return "", "", err } @@ -218,8 +257,8 @@ func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, file go func() { defer bh.waitGroup.Done() _, err := azblob.UploadStreamToBlockBlob(bh.ctx, reader, blockBlobURL, azblob.UploadStreamToBlockBlobOptions{ - BufferSize: azblob.BlockBlobMaxStageBlockBytes, - MaxBuffers: azBlobParallelism, + BufferSize: azBlobBufferSize.Get(), + MaxBuffers: azBlobParallelism.Get(), }) if err != nil { reader.CloseWithError(err) @@ -286,7 +325,7 @@ func (bs *AZBlobBackupStorage) containerURL() (*azblob.ContainerURL, error) { if err != nil { return nil, err } - u := azServiceURL(credentials).NewContainerURL(containerName) + u := azServiceURL(credentials).NewContainerURL(containerName.Get()) return &u, nil } @@ -425,8 +464,8 @@ func (bs *AZBlobBackupStorage) WithParams(params backupstorage.Params) backupsto // Unlike path.Join, it doesn't collapse ".." or strip trailing slashes. // It also adds the value of the -azblob_backup_storage_root flag if set. func objName(parts ...string) string { - if storageRoot != "" { - return storageRoot + "/" + strings.Join(parts, "/") + if root := storageRoot.Get(); root != "" { + return root + "/" + strings.Join(parts, "/") } return strings.Join(parts, "/") } diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index 0865759f2a3..9a19175164a 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -25,19 +25,15 @@ import ( "strings" "time" + "github.com/spf13/pflag" "golang.org/x/text/cases" "golang.org/x/text/language" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstats" - stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -60,6 +56,11 @@ const ( RestoreState = "restore_in_progress" // BackupTimestampFormat is the format in which we save BackupTime and FinishedTime BackupTimestampFormat = "2006-01-02.150405" + + // closeTimeout is the timeout for closing backup files after writing. + // The value is a bit arbitrary. How long does it make sense to wait for a Close()? With a cloud-based implementation, + // network might be an issue. _Seconds_ are probably too short. The whereabouts of a minute us a reasonable value. + closeTimeout = 1 * time.Minute ) const ( @@ -96,6 +97,18 @@ func init() { } } +func FormatRFC3339(t time.Time) string { + return t.Format(time.RFC3339) +} + +func ParseRFC3339(timestamp string) (time.Time, error) { + return time.Parse(time.RFC3339, timestamp) +} + +func ParseBinlogTimestamp(timestamp string) (time.Time, error) { + return time.Parse("060102 15:04:05", timestamp) +} + func registerBackupFlags(fs *pflag.FlagSet) { fs.BoolVar(&backupStorageCompress, "backup_storage_compress", backupStorageCompress, "if set, the backup files will be compressed.") fs.IntVar(&backupCompressBlockSize, "backup_storage_block_size", backupCompressBlockSize, "if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000).") @@ -108,7 +121,7 @@ func registerBackupFlags(fs *pflag.FlagSet) { // - remember if we were replicating, restore the exact same state func Backup(ctx context.Context, params BackupParams) error { if params.Stats == nil { - params.Stats = stats.NoStats() + params.Stats = backupstats.NoStats() } startTs := time.Now() @@ -123,8 +136,8 @@ func Backup(ctx context.Context, params BackupParams) error { // Scope bsStats to selected storage engine. bsStats := params.Stats.Scope( - stats.Component(stats.BackupStorage), - stats.Implementation( + backupstats.Component(backupstats.BackupStorage), + backupstats.Implementation( titleCase(backupstorage.BackupStorageImplementation), ), ) @@ -137,17 +150,26 @@ func Backup(ctx context.Context, params BackupParams) error { if err != nil { return vterrors.Wrap(err, "StartBackup failed") } + params.Logger.Infof("Starting backup %v", bh.Name()) - be, err := GetBackupEngine() - if err != nil { - return vterrors.Wrap(err, "failed to find backup engine") - } // Scope stats to selected backup engine. beParams := params.Copy() beParams.Stats = params.Stats.Scope( - stats.Component(stats.BackupEngine), - stats.Implementation(titleCase(backupEngineImplementation)), + backupstats.Component(backupstats.BackupEngine), + backupstats.Implementation(titleCase(backupEngineImplementation)), ) + var be BackupEngine + if isIncrementalBackup(beParams) { + // Incremental backups are always done via 'builtin' engine, which copies + // appropriate binlog files. + be = BackupRestoreEngineMap[builtinBackupEngineName] + } else { + be, err = GetBackupEngine() + if err != nil { + return vterrors.Wrap(err, "failed to find backup engine") + } + } + // Take the backup, and either AbortBackup or EndBackup. usable, err := be.ExecuteBackup(ctx, beParams, bh) logger := params.Logger @@ -169,8 +191,8 @@ func Backup(ctx context.Context, params BackupParams) error { } // The backup worked, so just return the finish error, if any. - stats.DeprecatedBackupDurationS.Set(int64(time.Since(startTs).Seconds())) - params.Stats.Scope(stats.Operation("Backup")).TimedIncrement(time.Since(startTs)) + backupstats.DeprecatedBackupDurationS.Set(int64(time.Since(startTs).Seconds())) + params.Stats.Scope(backupstats.Operation("Backup")).TimedIncrement(time.Since(startTs)) return finishErr } @@ -294,12 +316,49 @@ func ShouldRestore(ctx context.Context, params RestoreParams) (bool, error) { return checkNoDB(ctx, params.Mysqld, params.DbName) } +// ensureRestoredGTIDPurgedMatchesManifest sees the following: when you restore a full backup, you want the MySQL server to have +// @@gtid_purged == . This then also implies that @@gtid_executed equals same value. This is because we restore without +// any binary logs. +func ensureRestoredGTIDPurgedMatchesManifest(ctx context.Context, manifest *BackupManifest, params *RestoreParams) error { + if manifest == nil { + return nil + } + if manifest.Position.GTIDSet == nil { + return nil + } + gtid := manifest.Position.GTIDSet.String() + if gtid == "" { + return nil + } + // Xtrabackup 2.4's restore seems to set @@gtid_purged to be the @@gtid_purged at the time of backup. But this is not + // the desired value. We want to set @@gtid_purged to be the @@gtid_executed of the backup. + // As reminder, when restoring from a full backup, setting @@gtid_purged also sets @@gtid_executed. + restoredGTIDPurgedPos, err := params.Mysqld.GetGTIDPurged(ctx) + if err != nil { + return vterrors.Wrapf(err, "failed to read gtid_purged after restore") + } + if restoredGTIDPurgedPos.Equal(manifest.Position) { + return nil + } + params.Logger.Infof("Restore: @@gtid_purged does not equal manifest's GTID position. Setting @@gtid_purged to %v", gtid) + // This is not good. We want to apply a new @@gtid_purged value. + query := "RESET MASTER" // required dialect in 5.7 + if _, err := params.Mysqld.FetchSuperQuery(ctx, query); err != nil { + return vterrors.Wrapf(err, "error issuing %v", query) + } + query = fmt.Sprintf("SET GLOBAL gtid_purged='%s'", gtid) + if _, err := params.Mysqld.FetchSuperQuery(ctx, query); err != nil { + return vterrors.Wrapf(err, "failed to apply `%s` after restore", query) + } + return nil +} + // Restore is the main entry point for backup restore. If there is no // appropriate backup on the BackupStorage, Restore logs an error // and returns ErrNoBackup. Any other error is returned. func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) { if params.Stats == nil { - params.Stats = stats.NoStats() + params.Stats = backupstats.NoStats() } startTs := time.Now() @@ -313,8 +372,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // Scope bsStats to selected storage engine. bsStats := params.Stats.Scope( - stats.Component(backupstats.BackupStorage), - stats.Implementation( + backupstats.Component(backupstats.BackupStorage), + backupstats.Implementation( titleCase(backupstorage.BackupStorageImplementation), ), ) @@ -368,8 +427,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // Scope stats to selected backup engine. reParams := params.Copy() reParams.Stats = params.Stats.Scope( - stats.Component(backupstats.BackupEngine), - stats.Implementation(titleCase(backupEngineImplementation)), + backupstats.Component(backupstats.BackupEngine), + backupstats.Implementation(titleCase(backupEngineImplementation)), ) manifest, err := re.ExecuteRestore(ctx, reParams, bh) if err != nil { @@ -385,47 +444,35 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // of those who can connect. params.Logger.Infof("Restore: starting mysqld for mysql_upgrade") // Note Start will use dba user for waiting, this is fine, it will be allowed. - err = params.Mysqld.Start(context.Background(), params.Cnf, "--skip-grant-tables", "--skip-networking") - if err != nil { + if err := params.Mysqld.Start(context.Background(), params.Cnf, "--skip-grant-tables", "--skip-networking"); err != nil { return nil, err } - // We disable super_read_only, in case it is in the default MySQL startup - // parameters and will be blocking the writes we need to do in - // PopulateMetadataTables(). We do it blindly, since - // this will fail on MariaDB, which doesn't have super_read_only - // This is safe, since we're restarting MySQL after the restore anyway - params.Logger.Infof("Restore: disabling super_read_only") - if err := params.Mysqld.SetSuperReadOnly(false); err != nil { - if strings.Contains(err.Error(), mysql.ERUnknownSystemVariable.ToString()) { - params.Logger.Warningf("Restore: server does not know about super_read_only, continuing anyway...") - } else { - params.Logger.Errorf("Restore: unexpected error while trying to set super_read_only: %v", err) - return nil, err - } - } - params.Logger.Infof("Restore: running mysql_upgrade") - if err := params.Mysqld.RunMysqlUpgrade(); err != nil { + if err := params.Mysqld.RunMysqlUpgrade(ctx); err != nil { return nil, vterrors.Wrap(err, "mysql_upgrade failed") } // The MySQL manual recommends restarting mysqld after running mysql_upgrade, // so that any changes made to system tables take effect. params.Logger.Infof("Restore: restarting mysqld after mysql_upgrade") - err = params.Mysqld.Shutdown(context.Background(), params.Cnf, true) - if err != nil { + if err := params.Mysqld.Shutdown(context.Background(), params.Cnf, true); err != nil { return nil, err } - err = params.Mysqld.Start(context.Background(), params.Cnf) - if err != nil { + if err := params.Mysqld.Start(context.Background(), params.Cnf); err != nil { + return nil, err + } + if err = ensureRestoredGTIDPurgedMatchesManifest(ctx, manifest, ¶ms); err != nil { return nil, err } if handles := restorePath.IncrementalBackupHandles(); len(handles) > 0 { params.Logger.Infof("Restore: applying %v incremental backups", len(handles)) + // Incremental restores are always done via 'builtin' engine, which copies + // appropriate binlog files. + builtInRE := BackupRestoreEngineMap[builtinBackupEngineName] for _, bh := range handles { - manifest, err := re.ExecuteRestore(ctx, params, bh) + manifest, err := builtInRE.ExecuteRestore(ctx, params, bh) if err != nil { return nil, err } @@ -439,8 +486,8 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) return nil, err } - stats.DeprecatedRestoreDurationS.Set(int64(time.Since(startTs).Seconds())) - params.Stats.Scope(stats.Operation("Restore")).TimedIncrement(time.Since(startTs)) + backupstats.DeprecatedRestoreDurationS.Set(int64(time.Since(startTs).Seconds())) + params.Stats.Scope(backupstats.Operation("Restore")).TimedIncrement(time.Since(startTs)) params.Logger.Infof("Restore: complete") return manifest, nil } diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/backup_blackbox_test.go new file mode 100644 index 00000000000..62b58f2a5c8 --- /dev/null +++ b/go/vt/mysqlctl/backup_blackbox_test.go @@ -0,0 +1,603 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mysqlctl_test is the blackbox tests for package mysqlctl. +package mysqlctl_test + +import ( + "context" + "fmt" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/sqltypes" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/backupstats" + "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { + old := mysqlctl.BuiltinBackupMysqldTimeout + mysqlctl.BuiltinBackupMysqldTimeout = t + + return old +} + +func createBackupDir(root string, dirs ...string) error { + for _, dir := range dirs { + if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { + return err + } + } + + return nil +} + +func createBackupFiles(root string, fileCount int, ext string) error { + for i := 0; i < fileCount; i++ { + f, err := os.Create(path.Join(root, fmt.Sprintf("%d.%s", i, ext))) + if err != nil { + return err + } + if _, err := f.Write([]byte("hello, world!")); err != nil { + return err + } + defer f.Close() + } + + return nil +} + +func TestExecuteBackup(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + backupRoot := "testdata/builtinbackup_test" + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to actually backup files. + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) + defer setBuiltinBackupMysqldDeadline(oldDeadline) + + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + // mysqld.ShutdownTime = time.Minute + + fakeStats := backupstats.NewFakeStats() + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) + + var destinationCloseStats int + var destinationOpenStats int + var destinationWriteStats int + var sourceCloseStats int + var sourceOpenStats int + var sourceReadStats int + + for _, sr := range fakeStats.ScopeReturns { + sfs := sr.(*backupstats.FakeStats) + switch sfs.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + destinationCloseStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Destination:Open": + destinationOpenStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Destination:Write": + destinationWriteStats++ + require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) + case "Source:Close": + sourceCloseStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Source:Open": + sourceOpenStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Source:Read": + sourceReadStats++ + require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) + } + } + + require.Equal(t, 4, destinationCloseStats) + require.Equal(t, 4, destinationOpenStats) + require.Equal(t, 4, destinationWriteStats) + require.Equal(t, 4, sourceCloseStats) + require.Equal(t, 4, sourceOpenStats) + require.Equal(t, 4, sourceReadStats) + + mysqld.ExpectedExecuteSuperQueryCurrent = 0 // resest the index of what queries we've run + mysqld.ShutdownTime = time.Minute // reminder that shutdownDeadline is 1s + + ok, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + assert.Error(t, err) + assert.False(t, ok) +} + +func TestExecuteBackupWithSafeUpgrade(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + backupRoot := "testdata/builtinbackup_test" + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to actually backup files. + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + + // Configure a tight deadline to force a timeout + oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) + defer setBuiltinBackupMysqldDeadline(oldDeadline) + + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + // It also needs to be allowed to receive the query to disable the innodb_fast_shutdown flag. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SET GLOBAL innodb_fast_shutdown=0": {}, + } + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Concurrency: 2, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: backupstats.NewFakeStats(), + UpgradeSafe: true, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) +} + +// TestExecuteBackupWithCanceledContext tests the ability of the backup function to gracefully handle cases where errors +// occur due to various reasons, such as context time cancel. The process should not panic in these situations. +func TestExecuteBackupWithCanceledContext(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + // Cancel the context deliberately + cancelledCtx, cancelCtx := context.WithCancel(context.Background()) + cancelCtx() + + ok, err := be.ExecuteBackup(cancelledCtx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + require.Error(t, err) + // all four files will fail + require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") + assert.False(t, ok) +} + +// TestExecuteRestoreWithCanceledContext tests the ability of the restore function to gracefully handle cases where errors +// occur due to various reasons, such as context timed-out. The process should not panic in these situations. +func TestExecuteRestoreWithTimedOutContext(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Set up local backup directory + id := fmt.Sprintf("%d", time.Now().UnixNano()) + backupRoot := fmt.Sprintf("testdata/builtinbackup_test_%s", id) + filebackupstorage.FileBackupStorageRoot = backupRoot + require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) + dataDir := path.Join(backupRoot, "datadir") + // Add some files under data directory to force backup to execute semaphore acquire inside + // backupFiles() method (https://github.com/vitessio/vitess/blob/main/go/vt/mysqlctl/builtinbackupengine.go#L483). + require.NoError(t, createBackupDir(dataDir, "test1")) + require.NoError(t, createBackupDir(dataDir, "test2")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test1"), 2, "ibd")) + require.NoError(t, createBackupFiles(path.Join(dataDir, "test2"), 2, "ibd")) + defer os.RemoveAll(backupRoot) + + needIt, err := needInnoDBRedoLogSubdir() + require.NoError(t, err) + if needIt { + fpath := path.Join("log", mysql.DynamicRedoLogSubdir) + if err := createBackupDir(backupRoot, fpath); err != nil { + require.Failf(t, err.Error(), "failed to create directory: %s", fpath) + } + } + + // Set up topo + keyspace, shard := "mykeyspace", "-80" + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() + + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") + tablet.Keyspace = keyspace + tablet.Shard = shard + + require.NoError(t, ts.CreateTablet(ctx, tablet)) + + _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} + + now := time.Now() + si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} + + return nil + }) + require.NoError(t, err) + + be := &mysqlctl.BuiltinBackupEngine{} + bh := filebackupstorage.NewBackupHandle(nil, "", "", false) + // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: + // "STOP SLAVE", "START SLAVE", in that order. + fakedb := fakesqldb.New(t) + defer fakedb.Close() + mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + }, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + }, bh) + + require.NoError(t, err) + assert.True(t, ok) + + // Now try to restore the above backup. + bh = filebackupstorage.NewBackupHandle(nil, "", "", true) + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + + fakeStats := backupstats.NewFakeStats() + + restoreParams := mysqlctl.RestoreParams{ + Cnf: &mysqlctl.Mycnf{ + InnodbDataHomeDir: path.Join(backupRoot, "innodb"), + InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), + DataDir: path.Join(backupRoot, "datadir"), + BinLogPath: path.Join(backupRoot, "binlog"), + RelayLogPath: path.Join(backupRoot, "relaylog"), + RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), + RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), + }, + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + } + + // Successful restore. + bm, err := be.ExecuteRestore(ctx, restoreParams, bh) + assert.NoError(t, err) + assert.NotNil(t, bm) + + var destinationCloseStats int + var destinationOpenStats int + var destinationWriteStats int + var sourceCloseStats int + var sourceOpenStats int + var sourceReadStats int + + for _, sr := range fakeStats.ScopeReturns { + sfs := sr.(*backupstats.FakeStats) + switch sfs.ScopeV[backupstats.ScopeOperation] { + case "Destination:Close": + destinationCloseStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Destination:Open": + destinationOpenStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Destination:Write": + destinationWriteStats++ + require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) + case "Source:Close": + sourceCloseStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Source:Open": + sourceOpenStats++ + require.Len(t, sfs.TimedIncrementCalls, 1) + case "Source:Read": + sourceReadStats++ + require.GreaterOrEqual(t, len(sfs.TimedIncrementBytesCalls), 1) + } + } + + require.Equal(t, 4, destinationCloseStats) + require.Equal(t, 4, destinationOpenStats) + require.Equal(t, 4, destinationWriteStats) + require.Equal(t, 4, sourceCloseStats) + require.Equal(t, 4, sourceOpenStats) + require.Equal(t, 4, sourceReadStats) + + // Restore using timed-out context + fakedb = fakesqldb.New(t) + defer fakedb.Close() + mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) + defer mysqld.Close() + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + restoreParams.Mysqld = mysqld + timedOutCtx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + // Let the context time out. + time.Sleep(1 * time.Second) + bm, err = be.ExecuteRestore(timedOutCtx, restoreParams, bh) + // ExecuteRestore should fail. + assert.Error(t, err) + assert.Nil(t, bm) + // error message can contain any combination of "context deadline exceeded" or "context canceled" + if !strings.Contains(err.Error(), "context canceled") && !strings.Contains(err.Error(), "context deadline exceeded") { + assert.Fail(t, "Test should fail with either `context canceled` or `context deadline exceeded`") + } +} + +// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. +// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the +// (/. by default) called "#innodb_redo". See: +// +// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity +func needInnoDBRedoLogSubdir() (needIt bool, err error) { + mysqldVersionStr, err := mysqlctl.GetVersionString() + if err != nil { + return needIt, err + } + _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) + if err != nil { + return needIt, err + } + versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) + _, capableOf, _ := mysql.GetFlavor(versionStr, nil) + if capableOf == nil { + return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + } + return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) +} diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go index d26ca873243..5a135c26a30 100644 --- a/go/vt/mysqlctl/backup_test.go +++ b/go/vt/mysqlctl/backup_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "os" "path" @@ -30,6 +31,10 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/logutil" @@ -40,8 +45,7 @@ import ( // TestBackupExecutesBackupWithScopedParams tests that Backup passes // a Scope()-ed stats to backupengine ExecuteBackup. func TestBackupExecutesBackupWithScopedParams(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -62,9 +66,7 @@ func TestBackupExecutesBackupWithScopedParams(t *testing.T) { // TestBackupNoStats tests that if BackupParams.Stats is nil, then Backup will // pass non-nil Stats to sub-components. func TestBackupNoStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() - + env := createFakeBackupRestoreEnv(t) env.setStats(nil) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -77,8 +79,7 @@ func TestBackupNoStats(t *testing.T) { // TestBackupParameterizesBackupStorageWithScopedStats tests that Backup passes // a Scope()-ed stats to BackupStorage.WithParams. func TestBackupParameterizesBackupStorageWithScopedStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -97,8 +98,7 @@ func TestBackupParameterizesBackupStorageWithScopedStats(t *testing.T) { // TestBackupEmitsStats tests that Backup emits stats. func TestBackupEmitsStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) // Force ExecuteBackup to take time so we can test stats emission. env.backupEngine.ExecuteBackupDuration = 1001 * time.Millisecond @@ -114,8 +114,7 @@ func TestBackupEmitsStats(t *testing.T) { // backupstorage.Params to backupstorage, but only if it responds to // backupstorage.WithParams. func TestBackupTriesToParameterizeBackupStorage(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) require.Nil(t, Backup(env.ctx, env.backupParams), env.logger.Events) @@ -319,8 +318,7 @@ func TestFindFilesToBackupWithRedoLog(t *testing.T) { // TestRestoreEmitsStats tests that Restore emits stats. func TestRestoreEmitsStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) // Force ExecuteRestore to take time so we can test stats emission. env.backupEngine.ExecuteRestoreDuration = 1001 * time.Millisecond @@ -336,8 +334,7 @@ func TestRestoreEmitsStats(t *testing.T) { // TestRestoreExecutesRestoreWithScopedParams tests that Restore passes // a Scope()-ed stats to backupengine ExecuteRestore. func TestRestoreExecutesRestoreWithScopedParams(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -359,9 +356,7 @@ func TestRestoreExecutesRestoreWithScopedParams(t *testing.T) { // TestRestoreNoStats tests that if RestoreParams.Stats is nil, then Restore will // pass non-nil Stats to sub-components. func TestRestoreNoStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() - + env := createFakeBackupRestoreEnv(t) env.setStats(nil) _, err := Restore(env.ctx, env.restoreParams) @@ -375,8 +370,7 @@ func TestRestoreNoStats(t *testing.T) { // TestRestoreParameterizesBackupStorageWithScopedStats tests that Restore passes // a Scope()-ed stats to BackupStorage.WithParams. func TestRestoreParameterizesBackupStorageWithScopedStats(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -398,8 +392,7 @@ func TestRestoreParameterizesBackupStorageWithScopedStats(t *testing.T) { // backupstorage.Params to backupstorage, but only if it responds to // backupstorage.WithParams. func TestRestoreTriesToParameterizeBackupStorage(t *testing.T) { - env, closer := createFakeBackupRestoreEnv(t) - defer closer() + env := createFakeBackupRestoreEnv(t) _, err := Restore(env.ctx, env.restoreParams) require.Nil(t, err, env.logger.Events) @@ -419,6 +412,133 @@ func TestRestoreTriesToParameterizeBackupStorage(t *testing.T) { require.NotNil(t, scopedStats) } +// TestRestoreManifestMySQLVersionValidation tests that Restore tries to validate +// the MySQL version and safe upgrade attribute. +func TestRestoreManifestMySQLVersionValidation(t *testing.T) { + testCases := []struct { + fromVersion, toVersion string + upgradeSafe bool + wantErr bool + }{ + { + fromVersion: "mysqld Ver 5.6.42", + toVersion: "mysqld Ver 5.7.40", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 5.6.42", + toVersion: "mysqld Ver 5.7.40", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 5.7.42", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.31", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.31", + upgradeSafe: true, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: false, + wantErr: true, + }, + { + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: true, + wantErr: false, + }, + { + fromVersion: "", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: false, + wantErr: false, + }, + { + fromVersion: "", + toVersion: "mysqld Ver 8.0.33", + upgradeSafe: true, + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s->%s upgradeSafe=%t", tc.fromVersion, tc.toVersion, tc.upgradeSafe), func(t *testing.T) { + env := createFakeBackupRestoreEnv(t) + env.mysqld.Version = tc.toVersion + + manifest := BackupManifest{ + BackupTime: time.Now().Add(-1 * time.Hour).Format(time.RFC3339), + BackupMethod: "fake", + Keyspace: "test", + Shard: "-", + MySQLVersion: tc.fromVersion, + UpgradeSafe: tc.upgradeSafe, + } + + manifestBytes, err := json.Marshal(manifest) + require.Nil(t, err) + + env.backupEngine.ExecuteRestoreReturn = FakeBackupEngineExecuteRestoreReturn{&manifest, nil} + env.backupStorage.ListBackupsReturn = FakeBackupStorageListBackupsReturn{ + BackupHandles: []backupstorage.BackupHandle{ + &FakeBackupHandle{ + ReadFileReturnF: func(context.Context, string) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(manifestBytes)), nil + }, + }, + }, + } + + _, err = Restore(env.ctx, env.restoreParams) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } + +} + type forTest []FileEntry func (f forTest) Len() int { return len(f) } @@ -436,7 +556,7 @@ type fakeBackupRestoreEnv struct { stats *backupstats.FakeStats } -func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { +func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { ctx := context.Background() logger := logutil.NewMemoryLogger() @@ -444,7 +564,6 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { sqldb.SetNeverFail(true) mysqld := NewFakeMysqlDaemon(sqldb) require.Nil(t, mysqld.Shutdown(ctx, nil, false)) - defer mysqld.Close() dirName, err := os.MkdirTemp("", "vt_backup_test") require.Nil(t, err) @@ -480,16 +599,17 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { Keyspace: "test", Shard: "-", StartTime: time.Now(), - RestoreToPos: mysql.Position{}, + RestoreToPos: replication.Position{}, DryRun: false, Stats: stats, } manifest := BackupManifest{ - BackupTime: time.Now().Add(-1 * time.Hour).Format(time.RFC3339), + BackupTime: FormatRFC3339(time.Now().Add(-1 * time.Hour)), BackupMethod: "fake", Keyspace: "test", Shard: "-", + MySQLVersion: "8.0.32", } manifestBytes, err := json.Marshal(manifest) @@ -519,7 +639,12 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { previousBackupStorageImplementation := backupstorage.BackupStorageImplementation backupstorage.BackupStorageImplementation = "fake" - closer := func() { + // all restore integration tests must be leak checked + t.Cleanup(func() { + utils.EnsureNoLeaks(t) + }) + + t.Cleanup(func() { backupstats.DeprecatedBackupDurationS.Reset() backupstats.DeprecatedRestoreDurationS.Reset() @@ -528,7 +653,9 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { delete(backupstorage.BackupStorageMap, "fake") backupstorage.BackupStorageImplementation = previousBackupStorageImplementation - } + mysqld.Close() + sqldb.Close() + }) return &fakeBackupRestoreEnv{ backupEngine: &testBackupEngine, @@ -539,7 +666,7 @@ func createFakeBackupRestoreEnv(t *testing.T) (*fakeBackupRestoreEnv, func()) { mysqld: mysqld, restoreParams: restoreParams, stats: stats, - }, closer + } } func (fbe *fakeBackupRestoreEnv) setStats(stats *backupstats.FakeStats) { diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index d41780ca9e9..5a79edbdde0 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -29,9 +29,11 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -39,14 +41,14 @@ import ( ) var ( - // BackupEngineImplementation is the implementation to use for BackupEngine + // backupEngineImplementation is the implementation to use for BackupEngine backupEngineImplementation = builtinBackupEngineName ) // BackupEngine is the interface to take a backup with a given engine. type BackupEngine interface { ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) - ShouldDrainForBackup() bool + ShouldDrainForBackup(req *tabletmanagerdatapb.BackupRequest) bool } // BackupParams is the struct that holds all params passed to ExecuteBackup @@ -73,22 +75,25 @@ type BackupParams struct { IncrementalFromPos string // Stats let's backup engines report detailed backup timings. Stats backupstats.Stats + // UpgradeSafe indicates whether the backup is safe for upgrade and created with innodb_fast_shutdown=0 + UpgradeSafe bool } -func (b BackupParams) Copy() BackupParams { +func (b *BackupParams) Copy() BackupParams { return BackupParams{ - b.Cnf, - b.Mysqld, - b.Logger, - b.Concurrency, - b.HookExtraEnv, - b.TopoServer, - b.Keyspace, - b.Shard, - b.TabletAlias, - b.BackupTime, - b.IncrementalFromPos, - b.Stats, + Cnf: b.Cnf, + Mysqld: b.Mysqld, + Logger: b.Logger, + Concurrency: b.Concurrency, + HookExtraEnv: b.HookExtraEnv, + TopoServer: b.TopoServer, + Keyspace: b.Keyspace, + Shard: b.Shard, + TabletAlias: b.TabletAlias, + BackupTime: b.BackupTime, + IncrementalFromPos: b.IncrementalFromPos, + Stats: b.Stats, + UpgradeSafe: b.UpgradeSafe, } } @@ -116,33 +121,44 @@ type RestoreParams struct { StartTime time.Time // RestoreToPos hints that a point in time recovery is requested, to recover up to the specific given pos. // When empty, the restore is a normal from full backup - RestoreToPos mysql.Position + RestoreToPos replication.Position + // RestoreToTimestamp hints that a point in time recovery is requested, to recover up to, and excluding, the + // given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp time.Time // When DryRun is set, no restore actually takes place; but some of its steps are validated. DryRun bool // Stats let's restore engines report detailed restore timings. Stats backupstats.Stats } -func (p RestoreParams) Copy() RestoreParams { +func (p *RestoreParams) Copy() RestoreParams { return RestoreParams{ - p.Cnf, - p.Mysqld, - p.Logger, - p.Concurrency, - p.HookExtraEnv, - p.DeleteBeforeRestore, - p.DbName, - p.Keyspace, - p.Shard, - p.StartTime, - p.RestoreToPos, - p.DryRun, - p.Stats, + Cnf: p.Cnf, + Mysqld: p.Mysqld, + Logger: p.Logger, + Concurrency: p.Concurrency, + HookExtraEnv: p.HookExtraEnv, + DeleteBeforeRestore: p.DeleteBeforeRestore, + DbName: p.DbName, + Keyspace: p.Keyspace, + Shard: p.Shard, + StartTime: p.StartTime, + RestoreToPos: p.RestoreToPos, + RestoreToTimestamp: p.RestoreToTimestamp, + DryRun: p.DryRun, + Stats: p.Stats, } } func (p *RestoreParams) IsIncrementalRecovery() bool { - return !p.RestoreToPos.IsZero() + if !p.RestoreToPos.IsZero() { + return true + } + if !p.RestoreToTimestamp.IsZero() { + return true + } + return false } // RestoreEngine is the interface to restore a backup with a given engine. @@ -167,6 +183,11 @@ func init() { } } +// isIncrementalBackup is a convenience function to check whether the params indicate an incremental backup request +func isIncrementalBackup(params BackupParams) bool { + return params.IncrementalFromPos != "" +} + func registerBackupEngineFlags(fs *pflag.FlagSet) { fs.StringVar(&backupEngineImplementation, "backup_engine_implementation", backupEngineImplementation, "Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup.") } @@ -229,6 +250,14 @@ func getBackupManifestInto(ctx context.Context, backup backupstorage.BackupHandl return nil } +// IncrementalBackupDetails lists some incremental backup specific information +type IncrementalBackupDetails struct { + FirstTimestamp string + FirstTimestampBinlog string + LastTimestamp string + LastTimestampBinlog string +} + // BackupManifest defines the common fields in the MANIFEST file. // All backup engines must include at least these fields. They are free to add // their own custom fields by embedding this struct anonymously into their own @@ -241,14 +270,17 @@ type BackupManifest struct { BackupMethod string // Position is the replication position at which the backup was taken. - Position mysql.Position + Position replication.Position // PurgedPosition stands for purged GTIDs, information that is necessary for PITR recovery. This is specific to MySQL56 - PurgedPosition mysql.Position + PurgedPosition replication.Position // FromPosition is only applicable to incremental backups, and stands for the position from // which incremental changes are backed up. - FromPosition mysql.Position + FromPosition replication.Position + + // FromBackup indicates the backup name on which this incremental backup is based, assumign this is an incremental backup with "auto" pos`` + FromBackup string // Incremental indicates whether this is an incremental backup Incremental bool @@ -268,6 +300,15 @@ type BackupManifest struct { Keyspace string Shard string + + // MySQLversion is the version of MySQL when the backup was taken. + MySQLVersion string + + // UpgradeSafe indicates whether the backup is safe to use for an upgrade to a newer MySQL version + UpgradeSafe bool + + // IncrementalDetails is nil for non-incremental backups + IncrementalDetails *IncrementalBackupDetails } func (m *BackupManifest) HashKey() string { @@ -363,9 +404,16 @@ func (p *RestorePath) String() string { // FindLatestSuccessfulBackup returns the handle and manifest for the last good backup, // which can be either full or increment -func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle) (backupstorage.BackupHandle, *BackupManifest, error) { +func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle, excludeBackupName string) (backupstorage.BackupHandle, *BackupManifest, error) { for index := len(bhs) - 1; index >= 0; index-- { bh := bhs[index] + if bh.Name() == excludeBackupName { + // skip this bh. Use case: in an incremental backup, as we look for previous successful backups, + // the new incremental backup handle is partial: the directory exists, it will show in ListBackups, but + // the MANIFEST file does nto exist yet. So we avoid the errors/warnings associated with reading this partial backup, + // and just skip it. + continue + } // Check that the backup MANIFEST exists and can be successfully decoded. bm, err := GetBackupManifest(ctx, bh) if err != nil { @@ -377,9 +425,32 @@ func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs return nil, nil, ErrNoCompleteBackup } +// FindLatestSuccessfulBackupPosition returns the position of the last known successful backup +func FindLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams, excludeBackupName string) (backupName string, pos replication.Position, err error) { + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return "", pos, err + } + defer bs.Close() + + // Backups are stored in a directory structure that starts with + // / + backupDir := GetBackupDir(params.Keyspace, params.Shard) + bhs, err := bs.ListBackups(ctx, backupDir) + if err != nil { + return "", pos, vterrors.Wrap(err, "ListBackups failed") + } + bh, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs, excludeBackupName) + if err != nil { + return "", pos, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed") + } + pos = manifest.Position + return bh.Name(), pos, nil +} + // FindBackupToRestore returns a path, a sequence of backup handles, to be restored. // The returned handles stand for valid backups with complete manifests. -func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (*RestorePath, error) { +func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (restorePath *RestorePath, err error) { // if a StartTime is provided in params, then find a backup that was taken at or before that time checkBackupTime := !params.StartTime.IsZero() backupDir := GetBackupDir(params.Keyspace, params.Shard) @@ -387,83 +458,132 @@ func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backup manifests := make([]*BackupManifest, len(bhs)) manifestHandleMap := NewManifestHandleMap() - fullBackupIndex := func() int { - for index := len(bhs) - 1; index >= 0; index-- { - bh := bhs[index] - // Check that the backup MANIFEST exists and can be successfully decoded. - bm, err := GetBackupManifest(ctx, bh) - if err != nil { - params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err) - continue - } - // the manifest is valid - manifests[index] = bm // manifests's order is insignificant, it will be sorted later on - manifestHandleMap.Map(bm, bh) - if bm.Incremental { - // We're looking for a full backup - continue - } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return nil, err + } - var backupTime time.Time - if checkBackupTime { - backupTime, err = time.Parse(time.RFC3339, bm.BackupTime) - if err != nil { - params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err) + // Let's first populate the manifests + for i, bh := range bhs { + // Check that the backup MANIFEST exists and can be successfully decoded. + bm, err := GetBackupManifest(ctx, bh) + if err != nil { + params.Logger.Warningf("Possibly incomplete backup %v in directory %v on BackupStorage: can't read MANIFEST: %v)", bh.Name(), backupDir, err) + continue + } + // the manifest is valid + manifests[i] = bm // manifests's order is insignificant, it will be sorted later on + manifestHandleMap.Map(bm, bh) + } + restorePath = &RestorePath{ + manifestHandleMap: manifestHandleMap, + } + if !params.IsIncrementalRecovery() { + // incremental recovery has its own logic for searching the best full backup. Here we only deal with full backup recovery. + fullBackupIndex := func() int { + for index := len(manifests) - 1; index >= 0; index-- { + bm := manifests[index] + if bm == nil { continue } - } - - switch { - case checkBackupTime: - // restore to specific time - if backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) { - params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat)) - return index + if bm.Incremental { + // We're looking for a full backup + continue } - case !params.RestoreToPos.IsZero(): - // restore to specific pos - if params.RestoreToPos.GTIDSet.Contains(bm.Position.GTIDSet) { - // this is the most recent backup which is <= desired position + bh := manifestHandleMap.Handle(bm) + + // check if the backup can be used with this MySQL version. + if bm.MySQLVersion != "" { + if err := validateMySQLVersionUpgradeCompatible(mysqlVersion, bm.MySQLVersion, bm.UpgradeSafe); err != nil { + params.Logger.Warningf("Skipping backup %v/%v with incompatible MySQL version %v (upgrade safe: %v): %v", backupDir, bh.Name(), bm.MySQLVersion, bm.UpgradeSafe, err) + continue + } + } + + switch { + case checkBackupTime: + backupTime, err := ParseRFC3339(bm.BackupTime) + if err != nil { + params.Logger.Warningf("Restore: skipping backup %v/%v with invalid time %v: %v", backupDir, bh.Name(), bm.BackupTime, err) + continue + } + // restore to specific time + if backupTime.Equal(params.StartTime) || backupTime.Before(params.StartTime) { + params.Logger.Infof("Restore: found backup %v %v to restore using the specified timestamp of '%v'", bh.Directory(), bh.Name(), params.StartTime.Format(BackupTimestampFormat)) + return index + } + default: + // restore latest full backup + params.Logger.Infof("Restore: found latest backup %v %v to restore", bh.Directory(), bh.Name()) return index } - default: - // restore latest full backup - params.Logger.Infof("Restore: found latest backup %v %v to restore", bh.Directory(), bh.Name()) - return index } + return -1 + }() + if fullBackupIndex < 0 { + if checkBackupTime { + params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat)) + } + // There is at least one attempted backup, but none could be read. + // This implies there is data we ought to have, so it's not safe to start + // up empty. + return nil, ErrNoCompleteBackup } - return -1 - }() - if fullBackupIndex < 0 { - if checkBackupTime { - params.Logger.Errorf("No valid backup found before time %v", params.StartTime.Format(BackupTimestampFormat)) - } - // There is at least one attempted backup, but none could be read. - // This implies there is data we ought to have, so it's not safe to start - // up empty. - return nil, ErrNoCompleteBackup - } - // Anything taken before the full backup that we picked, is not of interest: - manifests = manifests[fullBackupIndex:] - restorePath := &RestorePath{ - manifestHandleMap: manifestHandleMap, - } - if params.RestoreToPos.IsZero() { // restoring from a single full backup: - restorePath.Add(manifests[0]) + restorePath.Add(manifests[fullBackupIndex]) return restorePath, nil } - // restore to a position (using incremental backups): + // restore to a position/timestamp (using incremental backups): // we calculate a possible restore path based on the manifests. The resulting manifests are // a sorted subsequence, with the full backup first, and zero or more incremental backups to follow. - manifests, err := FindPITRPath(params.RestoreToPos.GTIDSet, manifests) + switch { + case !params.RestoreToPos.IsZero(): + manifests, err = FindPITRPath(params.RestoreToPos.GTIDSet, manifests) + case !params.RestoreToTimestamp.IsZero(): + manifests, err = FindPITRToTimePath(params.RestoreToTimestamp, manifests) + } + restorePath.manifests = manifests if err != nil { return nil, err } - restorePath.manifests = manifests return restorePath, nil } +func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe bool) error { + // It's always safe to use the same version. + if to == from { + return nil + } + + flavorTo, parsedTo, err := ParseVersionString(to) + if err != nil { + return err + } + + flavorFrom, parsedFrom, err := ParseVersionString(from) + if err != nil { + return err + } + + if flavorTo != flavorFrom { + return fmt.Errorf("cannot use backup between different flavors: %q vs. %q", from, to) + } + + if parsedTo == parsedFrom { + return nil + } + + if !parsedTo.atLeast(parsedFrom) { + return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) + } + + if upgradeSafe { + return nil + } + + return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) +} + func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger) error { // shutdown mysqld if it is running logger.Infof("Restore: shutdown mysqld") diff --git a/go/vt/mysqlctl/backupstats/fake_stats.go b/go/vt/mysqlctl/backupstats/fake_stats.go index 6f487659827..e8e84431eb9 100644 --- a/go/vt/mysqlctl/backupstats/fake_stats.go +++ b/go/vt/mysqlctl/backupstats/fake_stats.go @@ -1,6 +1,9 @@ package backupstats -import "time" +import ( + "sync" + "time" +) type FakeStats struct { ScopeV map[ScopeType]ScopeValue @@ -11,6 +14,7 @@ type FakeStats struct { } ScopeCalls [][]Scope ScopeReturns []Stats + mutex sync.Mutex } func NewFakeStats(scopes ...Scope) *FakeStats { @@ -27,6 +31,8 @@ func NewFakeStats(scopes ...Scope) *FakeStats { // scopes and provided scopes. It also records the return value in // ScopeReturns, for use in unit test assertions. func (fs *FakeStats) Scope(scopes ...Scope) Stats { + fs.mutex.Lock() + defer fs.mutex.Unlock() fs.ScopeCalls = append(fs.ScopeCalls, scopes) newScopeV := map[ScopeType]ScopeValue{} for t, v := range fs.ScopeV { @@ -49,12 +55,16 @@ func (fs *FakeStats) Scope(scopes ...Scope) Stats { // TimedIncrement does nothing except record calls made to this function in // TimedIncrementCalls, for use in unit test assertions. func (fs *FakeStats) TimedIncrement(d time.Duration) { + fs.mutex.Lock() + defer fs.mutex.Unlock() fs.TimedIncrementCalls = append(fs.TimedIncrementCalls, d) } -// TimedIncrement does nothing except record calls made to this function in +// TimedIncrementBytes does nothing except record calls made to this function in // TimedIncrementBytesCalls, for use in unit test assertions. func (fs *FakeStats) TimedIncrementBytes(b int, d time.Duration) { + fs.mutex.Lock() + defer fs.mutex.Unlock() fs.TimedIncrementBytesCalls = append(fs.TimedIncrementBytesCalls, struct { Bytes int Duration time.Duration diff --git a/go/vt/mysqlctl/backupstats/stats.go b/go/vt/mysqlctl/backupstats/stats.go index e81bd569a97..6f64dec864f 100644 --- a/go/vt/mysqlctl/backupstats/stats.go +++ b/go/vt/mysqlctl/backupstats/stats.go @@ -21,7 +21,6 @@ import ( "time" "vitess.io/vitess/go/stats" - vtstats "vitess.io/vitess/go/stats" ) // Stats is a reporting interface meant to be shared among backup and restore @@ -52,9 +51,9 @@ type Stats interface { type noStats struct{} type scopedStats struct { - bytes *vtstats.CountersWithMultiLabels - count *vtstats.CountersWithMultiLabels - durationNs *vtstats.CountersWithMultiLabels + bytes *stats.CountersWithMultiLabels + count *stats.CountersWithMultiLabels + durationNs *stats.CountersWithMultiLabels labelValues []string } @@ -194,6 +193,6 @@ func (s *scopedStats) TimedIncrement(d time.Duration) { // TimedIncrementBytes increments the byte-count and duration of the current scope. func (s *scopedStats) TimedIncrementBytes(b int, d time.Duration) { - s.bytes.Add(s.labelValues, 1) + s.bytes.Add(s.labelValues, int64(b)) s.durationNs.Add(s.labelValues, int64(d.Nanoseconds())) } diff --git a/go/vt/mysqlctl/backupstats/stats_test.go b/go/vt/mysqlctl/backupstats/stats_test.go index f88bfdf8fb6..7fe61f0be60 100644 --- a/go/vt/mysqlctl/backupstats/stats_test.go +++ b/go/vt/mysqlctl/backupstats/stats_test.go @@ -1,6 +1,7 @@ package backupstats import ( + "fmt" "strings" "testing" "time" @@ -11,6 +12,7 @@ import ( ) func TestBackupStats(t *testing.T) { + require.Nil(t, backupBytes) require.Nil(t, backupCount) require.Nil(t, backupDurationNs) require.Nil(t, restoreCount) @@ -19,13 +21,16 @@ func TestBackupStats(t *testing.T) { BackupStats() defer resetStats() + require.NotNil(t, backupBytes) require.NotNil(t, backupCount) require.NotNil(t, backupDurationNs) + require.Nil(t, restoreBytes) require.Nil(t, restoreCount) require.Nil(t, restoreDurationNs) } func TestRestoreStats(t *testing.T) { + require.Nil(t, backupBytes) require.Nil(t, backupCount) require.Nil(t, backupDurationNs) require.Nil(t, restoreCount) @@ -34,8 +39,10 @@ func TestRestoreStats(t *testing.T) { RestoreStats() defer resetStats() + require.Nil(t, backupBytes) require.Nil(t, backupCount) require.Nil(t, backupDurationNs) + require.NotNil(t, restoreBytes) require.NotNil(t, restoreCount) require.NotNil(t, restoreDurationNs) } @@ -94,16 +101,18 @@ func TestScope(t *testing.T) { } func TestStatsAreNotInitializedByDefault(t *testing.T) { + require.Nil(t, backupBytes) require.Nil(t, backupCount) require.Nil(t, backupDurationNs) + require.Nil(t, restoreBytes) require.Nil(t, restoreCount) require.Nil(t, restoreDurationNs) } func TestTimedIncrement(t *testing.T) { - bytes := stats.NewCountersWithMultiLabels("test_timed_increment_bytes", "", labels) - count := stats.NewCountersWithMultiLabels("test_timed_increment_count", "", labels) - durationNs := stats.NewCountersWithMultiLabels("test_timed_increment_duration_ns", "", labels) + bytes := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_bytes", t.Name()), "", labels) + count := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_count", t.Name()), "", labels) + durationNs := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_duration_ns", t.Name()), "", labels) stats := newScopedStats(bytes, count, durationNs, nil) @@ -112,6 +121,8 @@ func TestTimedIncrement(t *testing.T) { stats.TimedIncrement(duration) + require.Equal(t, 0, len(bytes.Counts())) + require.Equal(t, 1, len(count.Counts())) require.Equal(t, int64(1), count.Counts()[path]) @@ -120,6 +131,8 @@ func TestTimedIncrement(t *testing.T) { stats.TimedIncrement(duration) + require.Equal(t, 0, len(bytes.Counts())) + require.Equal(t, 1, len(count.Counts())) require.Equal(t, int64(2), count.Counts()[path]) @@ -127,9 +140,43 @@ func TestTimedIncrement(t *testing.T) { require.Equal(t, 2*duration.Nanoseconds(), durationNs.Counts()[path]) } +func TestTimedIncrementBytes(t *testing.T) { + bytes := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_bytes", t.Name()), "", labels) + count := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_count", t.Name()), "", labels) + durationNs := stats.NewCountersWithMultiLabels(fmt.Sprintf("%s_test_timed_increment_duration_ns", t.Name()), "", labels) + + stats := newScopedStats(bytes, count, durationNs, nil) + + incBytes := 1024 + duration := 10 * time.Second + path := strings.Join([]string{unscoped, unscoped, unscoped}, ".") + + stats.TimedIncrementBytes(incBytes, duration) + + require.Equal(t, 1, len(bytes.Counts())) + require.Equal(t, int64(incBytes), bytes.Counts()[path]) + + require.Equal(t, 0, len(count.Counts())) + + require.Equal(t, 1, len(durationNs.Counts())) + require.Equal(t, duration.Nanoseconds(), durationNs.Counts()[path]) + + stats.TimedIncrementBytes(incBytes, duration) + + require.Equal(t, 1, len(bytes.Counts())) + require.Equal(t, int64(2*incBytes), bytes.Counts()[path]) + + require.Equal(t, 0, len(count.Counts())) + + require.Equal(t, 1, len(durationNs.Counts())) + require.Equal(t, 2*duration.Nanoseconds(), durationNs.Counts()[path]) +} + func resetStats() { + backupBytes = nil backupCount = nil backupDurationNs = nil + restoreBytes = nil restoreCount = nil restoreDurationNs = nil } diff --git a/go/vt/mysqlctl/binlogs_gtid.go b/go/vt/mysqlctl/binlogs_gtid.go index 73a69feda0b..3ea48663578 100644 --- a/go/vt/mysqlctl/binlogs_gtid.go +++ b/go/vt/mysqlctl/binlogs_gtid.go @@ -21,8 +21,9 @@ import ( "fmt" "sort" "strings" + "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -54,52 +55,60 @@ func (p *BackupManifestPath) String() string { // possible, or is empty. func ChooseBinlogsForIncrementalBackup( ctx context.Context, - lookFromGTIDSet mysql.GTIDSet, + backupFromGTIDSet replication.GTIDSet, + purgedGTIDSet replication.GTIDSet, binaryLogs []string, pgtids func(ctx context.Context, binlog string) (gtids string, err error), - unionPreviousGTIDs bool, ) ( binaryLogsToBackup []string, incrementalBackupFromGTID string, incrementalBackupToGTID string, err error, ) { - - var prevGTIDsUnion mysql.GTIDSet + var prevGTIDsUnion replication.GTIDSet for i, binlog := range binaryLogs { previousGtids, err := pgtids(ctx, binlog) if err != nil { return nil, "", "", vterrors.Wrapf(err, "cannot get previous gtids for binlog %v", binlog) } - prevPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, previousGtids) + previousGTIDsPos, err := replication.ParsePosition(replication.Mysql56FlavorID, previousGtids) if err != nil { - return nil, "", "", vterrors.Wrapf(err, "cannot decode binlog %s position in incremental backup: %v", binlog, prevPos) + return nil, "", "", vterrors.Wrapf(err, "cannot decode binlog %s position in incremental backup: %v", binlog, previousGTIDsPos) } if prevGTIDsUnion == nil { - prevGTIDsUnion = prevPos.GTIDSet + prevGTIDsUnion = previousGTIDsPos.GTIDSet } else { - prevGTIDsUnion = prevGTIDsUnion.Union(prevPos.GTIDSet) + prevGTIDsUnion = prevGTIDsUnion.Union(previousGTIDsPos.GTIDSet) } - containedInFromPos := lookFromGTIDSet.Contains(prevPos.GTIDSet) - // The binary logs are read in-order. They are build one on top of the other: we know - // the PreviousGTIDs of once binary log fully cover the previous binary log's. - if containedInFromPos { - // All previous binary logs are fully contained by backupPos. Carry on - continue - } - // We look for the first binary log whose "PreviousGTIDs" isn't already fully covered - // by "backupPos" (the position from which we want to create the inreemental backup). + // The binary logs are read in-order. They expand. For example, we know + // Previous-GTIDs of binlog file 0000018 contain those of binlog file 0000017. + // We look for the first binary log whose Previous-GTIDs isn't already fully covered + // by "backupPos" (the position from which we want to create the incremental backup). // That means the *previous* binary log is the first binary log to introduce GTID events on top // of "backupPos" - if i == 0 { - return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "the very first binlog file %v has PreviousGTIDs %s that exceed given incremental backup pos. There are GTID entries that are missing and this backup cannot run", binlog, prevPos) + if backupFromGTIDSet.Contains(previousGTIDsPos.GTIDSet) { + // Previous-GTIDs is contained by backupPos. So definitely all binlogs _prior_ to + // this binlog are not necessary. We still don't know about _this_ binlog. We can't tell yet if + // _this_ binlog introduces new GTID entries not covered by the last backup pos. But we will only + // know this when we look into the _next_ binlog file's Previous-GTIDs. + continue } - if unionPreviousGTIDs { - prevPos.GTIDSet = prevGTIDsUnion + // Got here? This means backupFromGTIDSet does not full contain the current binlog's Previous-GTIDs. + // In other words, Previoud-GTIDs have entries on top of backupFromGTIDSet. Which suggests that these + // entries were added by the previous binary log. + if i == 0 { + // Ummm... there _is no_ previous binary log. + return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Required entries have been purged. Oldest binary log %v expects entries not found in backup pos. Expected pos=%v", binlog, previousGTIDsPos) } - if !prevPos.GTIDSet.Contains(lookFromGTIDSet) { - return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "binary log %v with previous GTIDS %s neither contains requested GTID %s nor contains it. Backup cannot take place", binlog, prevPos.GTIDSet, lookFromGTIDSet) + // The other thing to validate, is that we can't allow a situation where the backup-GTIDs have entries not covered + // by our binary log's Previous-GTIDs (padded with purged GTIDs). Because that means we can't possibly restore to + // such position. + prevGTIDsUnionPurged := prevGTIDsUnion.Union(purgedGTIDSet) + if !prevGTIDsUnionPurged.Contains(backupFromGTIDSet) { + return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, + "Mismatching GTID entries. Requested backup pos has entries not found in the binary logs, and binary logs have entries not found in the requested backup pos. Neither fully contains the other.\n- Requested pos=%v\n- binlog pos=%v\n- purgedGTIDSet=%v\n- union=%v\n- union purged=%v", + backupFromGTIDSet, previousGTIDsPos.GTIDSet, purgedGTIDSet, prevGTIDsUnion, prevGTIDsUnionPurged) } // We begin with the previous binary log, and we ignore the last binary log, because it's still open and being written to. binaryLogsToBackup = binaryLogs[i-1 : len(binaryLogs)-1] @@ -107,7 +116,16 @@ func ChooseBinlogsForIncrementalBackup( if err != nil { return nil, "", "", vterrors.Wrapf(err, "cannot evaluate incremental backup from pos") } - // The "previous GTIDs" of the binary logs that _follows_ our binary-logs-to-backup indicates + if incrementalBackupFromGTID == "" { + // This can happen on the very first binary log file. It happens in two scenarios: + // 1. This is the first binlog ever in the history of the mysql server; the GTID is truly empty + // 2. A full backup was taken and restored, with all binlog scrapped. + // We take for granted that the first binary log file covers the + // requested "from GTID" + incrementalBackupFromGTID = backupFromGTIDSet.String() + } + + // The Previous-GTIDs of the binary logs that _follows_ our binary-logs-to-backup indicates // the backup's position. incrementalBackupToGTID, err := pgtids(ctx, binaryLogs[len(binaryLogs)-1]) if err != nil { @@ -121,7 +139,7 @@ func ChooseBinlogsForIncrementalBackup( // IsValidIncrementalBakcup determines whether the given manifest can be used to extend a backup // based on baseGTIDSet. The manifest must be able to pick up from baseGTIDSet, and must extend it by at least // one entry. -func IsValidIncrementalBakcup(baseGTIDSet mysql.GTIDSet, purgedGTIDSet mysql.GTIDSet, manifest *BackupManifest) bool { +func IsValidIncrementalBakcup(baseGTIDSet replication.GTIDSet, purgedGTIDSet replication.GTIDSet, manifest *BackupManifest) bool { if manifest == nil { return false } @@ -150,7 +168,7 @@ func IsValidIncrementalBakcup(baseGTIDSet mysql.GTIDSet, purgedGTIDSet mysql.GTI // - zero or more incremental backups // The path ends with restoreToGTIDSet or goes beyond it. No shorter path will do the same. // The function returns an error when a path cannot be found. -func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { +func FindPITRPath(restoreToGTIDSet replication.GTIDSet, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { sortedManifests := make([](*BackupManifest), 0, len(manifests)) for _, m := range manifests { if m != nil { @@ -190,8 +208,8 @@ func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) var validRestorePaths []BackupManifestPath // recursive function that searches for all possible paths: - var findPaths func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) - findPaths = func(baseGTIDSet mysql.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) { + var findPaths func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) + findPaths = func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) { // The algorithm was first designed to find all possible paths. But then we recognized that it will be // doing excessive work. At this time we choose to end the search once we find the first valid path, even if // it's not the most optimal. The next "if" statement is the addition to the algorithm, where we suffice with @@ -235,3 +253,148 @@ func FindPITRPath(restoreToGTIDSet mysql.GTIDSet, manifests [](*BackupManifest)) } return shortestPath, nil } + +// FindPITRToTimePath evaluates the shortest path to recover a restoreToGTIDSet. The past is composed of: +// - a full backup, followed by: +// - zero or more incremental backups +// The path ends with restoreToGTIDSet or goes beyond it. No shorter path will do the same. +// The function returns an error when a path cannot be found. +func FindPITRToTimePath(restoreToTime time.Time, manifests [](*BackupManifest)) (shortestPath [](*BackupManifest), err error) { + restoreToTimeStr := FormatRFC3339(restoreToTime) + sortedManifests := make([](*BackupManifest), 0, len(manifests)) + for _, m := range manifests { + if m != nil { + sortedManifests = append(sortedManifests, m) + } + } + sort.SliceStable(sortedManifests, func(i, j int) bool { + return sortedManifests[j].Position.GTIDSet.Union(sortedManifests[i].PurgedPosition.GTIDSet).Contains(sortedManifests[i].Position.GTIDSet) + }) + mostRelevantFullBackupIndex := -1 // an invalid value + for i, manifest := range sortedManifests { + if manifest.Incremental { + continue + } + startTime, err := ParseRFC3339(manifest.BackupTime) + if err != nil { + return nil, vterrors.Wrapf(err, "parsing manifest BackupTime %s", manifest.BackupTime) + } + finishedTime, err := ParseRFC3339(manifest.FinishedTime) + if err != nil { + return nil, vterrors.Wrapf(err, "parsing manifest FinishedTime %s", manifest.FinishedTime) + } + var compareWithTime time.Time + switch manifest.BackupMethod { + case xtrabackupEngineName: + // Xtrabackup backups are true to the time they complete (the snapshot is taken at the very end). + // Therefore the finish time best represents the backup time. + compareWithTime = finishedTime + case builtinBackupEngineName: + // Builtin takes down the MySQL server. Hence the _start time_ represents the backup time best + compareWithTime = startTime + default: + compareWithTime = startTime + } + if restoreToTime.Before(compareWithTime) { + // We want a bfull backup whose time is _before_ restore-to-time, and we will top it with + // inremental restore via binlogs. + continue + } + mostRelevantFullBackupIndex = i + } + + if mostRelevantFullBackupIndex < 0 { + // No full backup prior to desired restore point... + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no full backup found before timestmap %v", restoreToTimeStr) + } + // All that interests us starts with mostRelevantFullBackupIndex: that's where the full backup is, + // and any relevant incremental backups follow that point (because manifests are sorted by backup pos, ascending) + sortedManifests = sortedManifests[mostRelevantFullBackupIndex:] + // Of all relevant backups, we take the most recent one. + fullBackup := sortedManifests[0] + purgedGTIDSet := fullBackup.PurgedPosition.GTIDSet + + timeIsInRange := func(t, from, to time.Time) bool { + // integrity: + if to.Before(from) { + return false // bad input + } + if t.Before(from) { + return false + } + if t.After(to) { + return false + } + return true + } + + var validRestorePaths []BackupManifestPath + // recursive function that searches for all possible paths: + var findPaths func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) error + findPaths = func(baseGTIDSet replication.GTIDSet, pathManifests []*BackupManifest, remainingManifests []*BackupManifest) error { + // The algorithm was first designed to find all possible paths. But then we recognized that it will be + // doing excessive work. At this time we choose to end the search once we find the first valid path, even if + // it's not the most optimal. The next "if" statement is the addition to the algorithm, where we suffice with + // a single result. + if len(validRestorePaths) > 0 { + return nil + } + // remove the above if you wish to explore all paths. + lastManifest := pathManifests[len(pathManifests)-1] + if lastManifest.Incremental { + lastManifestIncrementalDetails := lastManifest.IncrementalDetails + + firstTimestamp, err := ParseRFC3339(lastManifestIncrementalDetails.FirstTimestamp) + if err != nil { + return err + } + if restoreToTime.Before(firstTimestamp) { + // the restore-to-time falls between previous manifest's timestamp (whether previous manifest is a + // full backup or incremental backup is not important), and this manifest's first-timestamp. + // This means the previous manifest is the end of a valid restore path. We couldn't know it back then. + validRestorePaths = append(validRestorePaths, pathManifests[0:len(pathManifests)-1]) + return nil + } + lastTimestamp, err := ParseRFC3339(lastManifestIncrementalDetails.LastTimestamp) + if err != nil { + return err + } + if timeIsInRange(restoreToTime, firstTimestamp, lastTimestamp) { + // successful end of path. Update list of successful paths + validRestorePaths = append(validRestorePaths, pathManifests) + return nil + } + } + if len(remainingManifests) == 0 { + // end of the road. No possibilities from here. + return nil + } + // if the next manifest is eligible to be part of the path, try it out + if IsValidIncrementalBakcup(baseGTIDSet, purgedGTIDSet, remainingManifests[0]) { + nextGTIDSet := baseGTIDSet.Union(remainingManifests[0].Position.GTIDSet) + findPaths(nextGTIDSet, append(pathManifests, remainingManifests[0]), remainingManifests[1:]) + } + // also, try without the next manifest + findPaths(baseGTIDSet, pathManifests, remainingManifests[1:]) + return nil + } + // find all paths, entry point + if err := findPaths(fullBackup.Position.GTIDSet, sortedManifests[0:1], sortedManifests[1:]); err != nil { + return nil, err + } + if len(validRestorePaths) == 0 { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no path found that leads to timestamp %v", restoreToTimeStr) + } + // Now find a shortest path + for i := range validRestorePaths { + path := validRestorePaths[i] + if shortestPath == nil { + shortestPath = path + continue + } + if len(path) < len(shortestPath) { + shortestPath = path + } + } + return shortestPath, nil +} diff --git a/go/vt/mysqlctl/binlogs_gtid_test.go b/go/vt/mysqlctl/binlogs_gtid_test.go index 336b835e3bc..655208e908e 100644 --- a/go/vt/mysqlctl/binlogs_gtid_test.go +++ b/go/vt/mysqlctl/binlogs_gtid_test.go @@ -14,17 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package mysqlctl_test is the blackbox tests for package mysqlctl. +// Tests that need to use fakemysqldaemon must be written as blackbox tests; +// since fakemysqldaemon imports mysqlctl, importing fakemysqldaemon in +// a `package mysqlctl` test would cause a circular import. package mysqlctl import ( "context" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" ) func TestChooseBinlogsForIncrementalBackup(t *testing.T) { @@ -45,47 +50,57 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", } tt := []struct { + name string previousGTIDs map[string]string backupPos string + gtidPurged string expectBinlogs []string expectError string }{ { + name: "exact match", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, }, { + name: "exact match, two binlogs with same previous GTIDs", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, }, { + name: "inexact match", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-63", expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, }, { + name: "one binlog match", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243", expectBinlogs: []string{"vt-bin.000005"}, }, { + name: "last binlog excluded, no binlogs found", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", expectError: "no binary logs to backup", }, { + name: "backup pos beyond all binlogs", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-630000", expectError: "no binary logs to backup", }, { + name: "missing GTID entries", previousGTIDs: basePreviousGTIDs, - backupPos: "16b1039f-22b6-11ed-b765-0a43f95f0000:1-63", - expectError: "There are GTID entries that are missing", + backupPos: "16b1039f-0000-0000-0000-000000000000:1-63", + expectError: "Required entries have been purged", }, { + name: "empty previous GTIDs in first binlog", previousGTIDs: map[string]string{ "vt-bin.000001": "", "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", @@ -94,10 +109,90 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243", "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", }, - backupPos: "16b1039f-22b6-11ed-b765-0a43f95f0000:1-63", - expectError: "neither contains requested GTID", + backupPos: "16b1039f-0000-0000-0000-000000000000:1-63", + expectError: "Mismatching GTID entries", }, { + name: "empty previous GTIDs in first binlog with gap, with good backup pos", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "empty previous GTIDs in first binlog with gap, and without gtid_purged", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + expectError: "Mismatching GTID entries", + }, + { + name: "empty previous GTIDs in first binlog but with proper gtid_purged", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:40-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-40", + expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "empty previous GTIDs in first binlog covering backup pos", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-30", + expectBinlogs: []string{"vt-bin.000001", "vt-bin.000002", "vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "empty previous GTIDs in first binlog not covering backup pos", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-65", + expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "empty previous GTIDs in first binlog not covering backup pos, 2", + previousGTIDs: map[string]string{ + "vt-bin.000001": "", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-78", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-243", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-100", + expectBinlogs: []string{"vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "match with non strictly monotonic sequence", previousGTIDs: map[string]string{ "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50", "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-60", @@ -109,15 +204,81 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-63", expectBinlogs: []string{"vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, }, + { + name: "exact, gitd_purged", + previousGTIDs: map[string]string{ + "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-100", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-110", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:2-300", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + expectBinlogs: []string{"vt-bin.000001", "vt-bin.000002", "vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "exact, gitd_purged 2", + previousGTIDs: basePreviousGTIDs, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-50", + expectBinlogs: []string{"vt-bin.000001", "vt-bin.000002", "vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + }, + { + name: "inexact, gitd_purged, missing", + previousGTIDs: map[string]string{ + "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-100", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-110", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:2-300", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-63", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2", + expectError: "Required entries have been purged", + }, + { + name: "inexact, gitd_purged, missing 2", + previousGTIDs: map[string]string{ + "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-100", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-110", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:2-300", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-80", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1", + expectBinlogs: []string{"vt-bin.000001", "vt-bin.000002", "vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + expectError: "Mismatching GTID entries", + }, + { + name: "inexact, gitd_purged, found", + previousGTIDs: map[string]string{ + "vt-bin.000001": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-78", + "vt-bin.000002": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000003": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-90", + "vt-bin.000004": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-100", + "vt-bin.000005": "16b1039f-22b6-11ed-b765-0a43f95f28a3:3-110", + "vt-bin.000006": "16b1039f-22b6-11ed-b765-0a43f95f28a3:2-300", + }, + backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-84", + gtidPurged: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-2", + expectBinlogs: []string{"vt-bin.000001", "vt-bin.000002", "vt-bin.000003", "vt-bin.000004", "vt-bin.000005"}, + }, } - for i, tc := range tt { - t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - backupPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.backupPos) + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + backupPos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.backupPos) require.NoError(t, err) + gtidPurged, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.gtidPurged) require.NoError(t, err) binlogsToBackup, fromGTID, toGTID, err := ChooseBinlogsForIncrementalBackup( context.Background(), backupPos.GTIDSet, + gtidPurged.GTIDSet, binlogs, func(ctx context.Context, binlog string) (gtids string, err error) { gtids, ok := tc.previousGTIDs[binlog] @@ -126,7 +287,6 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { } return gtids, nil }, - true, ) if tc.expectError != "" { require.Error(t, err) @@ -136,7 +296,9 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, binlogsToBackup) assert.Equal(t, tc.expectBinlogs, binlogsToBackup) - assert.Equal(t, tc.previousGTIDs[binlogsToBackup[0]], fromGTID) + if tc.previousGTIDs[binlogsToBackup[0]] != "" { + assert.Equal(t, tc.previousGTIDs[binlogsToBackup[0]], fromGTID) + } assert.Equal(t, tc.previousGTIDs[binlogs[len(binlogs)-1]], toGTID) assert.NotEqual(t, fromGTID, toGTID) }) @@ -146,8 +308,8 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { func TestIsValidIncrementalBakcup(t *testing.T) { incrementalManifest := func(backupPos string, backupFromPos string) *BackupManifest { return &BackupManifest{ - Position: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupPos)), - FromPosition: mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupFromPos)), + Position: replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupPos)), + FromPosition: replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", backupFromPos)), Incremental: true, } } @@ -223,9 +385,9 @@ func TestIsValidIncrementalBakcup(t *testing.T) { } for i, tc := range tt { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { - basePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.baseGTID) + basePos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.baseGTID) require.NoError(t, err) - purgedPos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID) + purgedPos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) require.NoError(t, err) isValid := IsValidIncrementalBakcup(basePos.GTIDSet, purgedPos.GTIDSet, incrementalManifest(tc.backupPos, tc.backupFromPos)) assert.Equal(t, tc.expectIsValid, isValid) @@ -234,8 +396,8 @@ func TestIsValidIncrementalBakcup(t *testing.T) { } func TestFindPITRPath(t *testing.T) { - generatePosition := func(posRange string) mysql.Position { - return mysql.MustParsePosition(mysql.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) + generatePosition := func(posRange string) replication.Position { + return replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) } fullManifest := func(backupPos string) *BackupManifest { return &BackupManifest{ @@ -425,17 +587,17 @@ func TestFindPITRPath(t *testing.T) { for i := range fullBackups { var err error fullBackup := fullBackups[i] - fullBackup.PurgedPosition, err = mysql.ParsePosition(mysql.Mysql56FlavorID, tc.purgedGTID) + fullBackup.PurgedPosition, err = replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) require.NoError(t, err) defer func() { - fullBackup.PurgedPosition = mysql.Position{} + fullBackup.PurgedPosition = replication.Position{} }() } var manifests []*BackupManifest manifests = append(manifests, fullBackups...) manifests = append(manifests, tc.incrementalBackups...) - restorePos, err := mysql.ParsePosition(mysql.Mysql56FlavorID, tc.restoreGTID) + restorePos, err := replication.ParsePosition(replication.Mysql56FlavorID, tc.restoreGTID) require.NoErrorf(t, err, "%v", err) path, err := FindPITRPath(restorePos.GTIDSet, manifests) if tc.expectError != "" { @@ -461,3 +623,297 @@ func TestFindPITRPath(t *testing.T) { }) } } + +func TestFindPITRToTimePath(t *testing.T) { + generatePosition := func(posRange string) replication.Position { + return replication.MustParsePosition(replication.Mysql56FlavorID, fmt.Sprintf("16b1039f-22b6-11ed-b765-0a43f95f28a3:%s", posRange)) + } + fullManifest := func(backupPos string, timeStr string) *BackupManifest { + _, err := ParseRFC3339(timeStr) + require.NoError(t, err) + return &BackupManifest{ + BackupMethod: builtinBackupEngineName, + Position: generatePosition(backupPos), + BackupTime: timeStr, + FinishedTime: timeStr, + } + } + incrementalManifest := func(backupPos string, backupFromPos string, firstTimestampStr string, lastTimestampStr string) *BackupManifest { + firstTimestamp, err := ParseRFC3339(firstTimestampStr) + require.NoError(t, err) + lastTimestamp, err := ParseRFC3339(lastTimestampStr) + require.NoError(t, err) + + return &BackupManifest{ + Position: generatePosition(backupPos), + FromPosition: generatePosition(backupFromPos), + Incremental: true, + IncrementalDetails: &IncrementalBackupDetails{ + FirstTimestamp: FormatRFC3339(firstTimestamp), + LastTimestamp: FormatRFC3339(lastTimestamp), + }, + } + } + + fullManifests := map[string]*BackupManifest{ + "1-50": fullManifest("1-50", "2020-02-02T02:20:20.000000Z"), + "1-5": fullManifest("1-5", "2020-02-02T02:01:20.000000Z"), + "1-80": fullManifest("1-80", "2020-02-02T03:31:00.000000Z"), + "1-70": fullManifest("1-70", "2020-02-02T03:10:01.000000Z"), + "1-70b": fullManifest("1-70", "2020-02-02T03:10:11.000000Z"), + } + fullBackups := []*BackupManifest{ + fullManifests["1-50"], + fullManifests["1-5"], + fullManifests["1-80"], + fullManifests["1-70"], + fullManifests["1-70b"], + } + incrementalManifests := map[string]*BackupManifest{ + "1-34:1-5": incrementalManifest("1-34", "1-5", "2020-02-02T02:01:44.000000Z", "2020-02-02T02:17:00.000000Z"), + "1-38:1-34": incrementalManifest("1-38", "1-34", "2020-02-02T02:17:05.000000Z", "2020-02-02T02:18:00.000000Z"), + "1-52:1-35": incrementalManifest("1-52", "1-35", "2020-02-02T02:17:59.000000Z", "2020-02-02T02:22:00.000000Z"), + "1-60:1-50": incrementalManifest("1-60", "1-50", "2020-02-02T02:20:21.000000Z", "2020-02-02T02:47:20.000000Z"), + "1-70:1-60": incrementalManifest("1-70", "1-60", "2020-02-02T02:47:20.000000Z", "2020-02-02T03:10:00.700000Z"), + "1-82:1-70": incrementalManifest("1-82", "1-70", "2020-02-02T03:10:11.000000Z", "2020-02-02T03:39:09.000000Z"), + "1-92:1-79": incrementalManifest("1-92", "1-79", "2020-02-02T03:37:07.000000Z", "2020-02-02T04:04:04.000000Z"), + "1-95:1-89": incrementalManifest("1-95", "1-89", "2020-02-02T03:59:05.000000Z", "2020-02-02T04:15:00.000000Z"), + } + incrementalBackups := []*BackupManifest{ + incrementalManifests["1-34:1-5"], + incrementalManifests["1-38:1-34"], + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + incrementalManifests["1-95:1-89"], + } + incrementalBackupName := func(manifest *BackupManifest) string { + for k, v := range incrementalManifests { + if v == manifest { + return k + } + } + return "unknown" + } + tt := []struct { + name string + restoreToTimestamp string + purgedGTID string + incrementalBackups []*BackupManifest + expectFullManifest *BackupManifest + expectIncrementalManifests []*BackupManifest + expectError string + }{ + { + name: "full is enough", + restoreToTimestamp: "2020-02-02T02:01:20.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "full is still enough", + restoreToTimestamp: "2020-02-02T02:01:41.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "full is just not enough", + restoreToTimestamp: "2020-02-02T02:01:44.000000Z", + expectFullManifest: fullManifests["1-5"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-34:1-5"], + }, + }, + { + name: "just one", + restoreToTimestamp: "2020-02-02T02:20:21.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + }, + }, + { + name: "two", + restoreToTimestamp: "2020-02-02T02:23:23.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + }, + }, + { + name: "three", + restoreToTimestamp: "2020-02-02T02:55:55.000000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "still three", + restoreToTimestamp: "2020-02-02T03:10:00.600000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "and still three", + restoreToTimestamp: "2020-02-02T03:10:00.700000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "and still three, exceeding binlog last timestamp", + restoreToTimestamp: "2020-02-02T03:10:00.800000Z", + expectFullManifest: fullManifests["1-50"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-52:1-35"], + incrementalManifests["1-60:1-50"], + incrementalManifests["1-70:1-60"], + }, + }, + { + name: "next backup 1-70", + restoreToTimestamp: "2020-02-02T03:10:01.000000Z", + expectFullManifest: fullManifests["1-70"], + expectIncrementalManifests: []*BackupManifest{}, + }, + { + name: "next backup 1-70 with one binlog", + restoreToTimestamp: "2020-02-02T03:10:13.000000Z", + expectFullManifest: fullManifests["1-70"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "next backup 1-70b, included first binlog", + restoreToTimestamp: "2020-02-02T03:10:11.000000Z", + expectFullManifest: fullManifests["1-70b"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "next backup 1-70b, still included first binlog", + restoreToTimestamp: "2020-02-02T03:20:11.000000Z", + expectFullManifest: fullManifests["1-70b"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + }, + }, + { + name: "1-80 and two binlogs", + restoreToTimestamp: "2020-02-02T04:00:00.000000Z", + expectFullManifest: fullManifests["1-80"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + }, + }, + { + name: "1-80 and all remaining binlogs", + restoreToTimestamp: "2020-02-02T04:10:00.000000Z", + expectFullManifest: fullManifests["1-80"], + expectIncrementalManifests: []*BackupManifest{ + incrementalManifests["1-82:1-70"], + incrementalManifests["1-92:1-79"], + incrementalManifests["1-95:1-89"], + }, + }, + { + name: "no incremental backup reaches this timestamp", + restoreToTimestamp: "2020-02-02T07:07:07.000000Z", + expectError: "no path found", + }, + { + name: "sooner than any full backup", + restoreToTimestamp: "2020-02-02T01:59:59.000000Z", + expectError: "no full backup", + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.incrementalBackups == nil { + tc.incrementalBackups = incrementalBackups + } + for i := range fullBackups { + var err error + fullBackup := fullBackups[i] + fullBackup.PurgedPosition, err = replication.ParsePosition(replication.Mysql56FlavorID, tc.purgedGTID) + require.NoError(t, err) + defer func() { + fullBackup.PurgedPosition = replication.Position{} + }() + } + var manifests []*BackupManifest + manifests = append(manifests, fullBackups...) + manifests = append(manifests, tc.incrementalBackups...) + + restoreToTime, err := ParseRFC3339(tc.restoreToTimestamp) + require.NoError(t, err) + require.False(t, restoreToTime.IsZero()) + + path, err := FindPITRToTimePath(restoreToTime, manifests) + if tc.expectError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectError) + return + } + require.NoError(t, err) + require.NotEmpty(t, path) + // the path always consists of one full backup and zero or more incremental backups + fullBackup := path[0] + require.False(t, fullBackup.Incremental) + for _, manifest := range path[1:] { + require.True(t, manifest.Incremental) + } + assert.Equal(t, tc.expectFullManifest.Position.GTIDSet, fullBackup.Position.GTIDSet) + if tc.expectIncrementalManifests == nil { + tc.expectIncrementalManifests = []*BackupManifest{} + } + expected := BackupManifestPath(tc.expectIncrementalManifests) + got := BackupManifestPath(path[1:]) + gotNames := []string{} + for _, manifest := range got { + gotNames = append(gotNames, incrementalBackupName(manifest)) + } + assert.Equal(t, expected, got, "got names: %v", gotNames) + }) + } + t.Run("iterate all valid timestamps", func(t *testing.T) { + var manifests []*BackupManifest + manifests = append(manifests, fullBackups...) + manifests = append(manifests, incrementalBackups...) + + firstTimestamp, err := ParseRFC3339(fullManifests["1-5"].BackupTime) + require.NoError(t, err) + lastTimestamp, err := ParseRFC3339(incrementalManifests["1-95:1-89"].IncrementalDetails.LastTimestamp) + require.NoError(t, err) + + for restoreToTime := firstTimestamp; !restoreToTime.After(lastTimestamp); restoreToTime = restoreToTime.Add(10 * time.Second) { + testName := fmt.Sprintf("restore to %v", restoreToTime) + t.Run(testName, func(t *testing.T) { + path, err := FindPITRToTimePath(restoreToTime, manifests) + require.NoError(t, err) + require.NotEmpty(t, path) + fullBackup := path[0] + require.False(t, fullBackup.Incremental) + for _, manifest := range path[1:] { + require.True(t, manifest.Incremental) + } + }) + } + }) +} diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index cb41ae1dc66..33d4ce688fd 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -21,6 +21,7 @@ import ( "context" "encoding/hex" "encoding/json" + "errors" "fmt" "hash" "hash/crc32" @@ -37,17 +38,22 @@ import ( "vitess.io/vitess/go/ioutil" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" stats "vitess.io/vitess/go/vt/mysqlctl/backupstats" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" - "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" + + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( @@ -103,6 +109,20 @@ type builtinBackupManifest struct { // false for backups that were created before the field existed, and those // backups all had compression enabled. SkipCompress bool + + // When CompressionEngine is "external", ExternalDecompressor may be + // consulted for the external decompressor command. + // + // When taking a backup with --compression-engine=external, + // ExternalDecompressor will be set to the value of + // --manifest-external-decompressor, if set, or else left as an empty + // string. + // + // When restoring from a backup with CompressionEngine "external", + // --external-decompressor will be consulted first and, if that is not set, + // ExternalDecompressor will be used. If neither are set, the restore will + // abort. + ExternalDecompressor string } // FileEntry is one file to backup @@ -139,11 +159,6 @@ func registerBuiltinBackupEngineFlags(fs *pflag.FlagSet) { fs.UintVar(&builtinBackupFileWriteBufferSize, "builtinbackup-file-write-buffer-size", builtinBackupFileWriteBufferSize, "write files using an IO buffer of this many bytes. Golang defaults are used when set to 0.") } -// isIncrementalBackup is a convenience function to check whether the params indicate an incremental backup request -func isIncrementalBackup(params BackupParams) bool { - return params.IncrementalFromPos != "" -} - // fullPath returns the full path of the entry, based on its type func (fe *FileEntry) fullPath(cnf *Mycnf) (string, error) { // find the root to use @@ -199,65 +214,75 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP return be.executeFullBackup(ctx, params, bh) } +// getIncrementalFromPosGTIDSet turns the given string into a valid Mysql56GTIDSet +func getIncrementalFromPosGTIDSet(incrementalFromPos string) (replication.Mysql56GTIDSet, error) { + pos, err := replication.DecodePositionDefaultFlavor(incrementalFromPos, replication.Mysql56FlavorID) + if err != nil { + return nil, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", incrementalFromPos) + } + if !pos.MatchesFlavor(replication.Mysql56FlavorID) { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", incrementalFromPos) + } + ifPosGTIDSet, ok := pos.GTIDSet.(replication.Mysql56GTIDSet) + if !ok { + return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", pos) + } + return ifPosGTIDSet, nil +} + // executeIncrementalBackup runs an incremental backup, based on given 'incremental_from_pos', which can be: // - A valid position // - "auto", indicating the incremental backup should begin with last successful backup end position. func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { + // Collect MySQL status: + // UUID + serverUUID, err := params.Mysqld.GetServerUUID(ctx) + if err != nil { + return false, vterrors.Wrap(err, "can't get server uuid") + } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return false, vterrors.Wrap(err, "can't get MySQL version") + } + + var fromBackupName string if params.IncrementalFromPos == autoIncrementalFromPos { params.Logger.Infof("auto evaluating incremental_from_pos") - bs, err := backupstorage.GetBackupStorage() + backupName, pos, err := FindLatestSuccessfulBackupPosition(ctx, params, bh.Name()) if err != nil { return false, err } - defer bs.Close() + fromBackupName = backupName + params.IncrementalFromPos = replication.EncodePosition(pos) + params.Logger.Infof("auto evaluated incremental_from_pos: %s", params.IncrementalFromPos) + } - // Backups are stored in a directory structure that starts with - // / - backupDir := GetBackupDir(params.Keyspace, params.Shard) - bhs, err := bs.ListBackups(ctx, backupDir) + // @@gtid_purged + getPurgedGTIDSet := func() (replication.Position, replication.Mysql56GTIDSet, error) { + gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) if err != nil { - return false, vterrors.Wrap(err, "ListBackups failed") + return gtidPurged, nil, vterrors.Wrap(err, "can't get @@gtid_purged") } - _, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs) - if err != nil { - return false, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed") + purgedGTIDSet, ok := gtidPurged.GTIDSet.(replication.Mysql56GTIDSet) + if !ok { + return gtidPurged, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", gtidPurged) } - params.IncrementalFromPos = mysql.EncodePosition(manifest.Position) - params.Logger.Infof("auto evaluated incremental_from_pos: %s", params.IncrementalFromPos) + return gtidPurged, purgedGTIDSet, nil } - rp, err := mysql.DecodePosition(params.IncrementalFromPos) + // params.IncrementalFromPos is a string. We want to turn that into a MySQL GTID + backupFromGTIDSet, err := getIncrementalFromPosGTIDSet(params.IncrementalFromPos) if err != nil { - return false, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", params.IncrementalFromPos) - } - if !rp.MatchesFlavor(mysql.Mysql56FlavorID) { - // incrementalFromGtidSet, ok := rp.GTIDSet.(mysql.Mysql56GTIDSet) - // if !ok { - return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", params.IncrementalFromPos) - } - serverUUID, err := params.Mysqld.GetServerUUID(ctx) - if err != nil { - return false, vterrors.Wrap(err, "can't get server uuid") - } - gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) - if err != nil { - return false, vterrors.Wrap(err, "can't get gtid_purged") - } - rpGTID, ok := rp.GTIDSet.(mysql.Mysql56GTIDSet) - if !ok { - return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", rpGTID) - } - purgedGTID, ok := gtidPurged.GTIDSet.(mysql.Mysql56GTIDSet) - if !ok { - return false, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", rpGTID) + return false, err } + // OK, we now have the formal MySQL GTID from which we want to take the incremental backip. + // binlogs may not contain information about purged GTIDs. e.g. some binlog.000003 may have // previous GTIDs like 00021324-1111-1111-1111-111111111111:30-60, ie 1-29 range is missing. This can happen // when a server is restored from backup and set with gtid_purged != "". // This is fine! // Shortly we will compare a binlog's "Previous GTIDs" with the backup's position. For the purpose of comparison, we // ignore the purged GTIDs: - binlogCompareGTID := rpGTID.Difference(purgedGTID) if err := params.Mysqld.FlushBinaryLogs(ctx); err != nil { return false, vterrors.Wrapf(err, "cannot flush binary logs in incremental backup") @@ -266,8 +291,15 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par if err != nil { return false, vterrors.Wrapf(err, "cannot get binary logs in incremental backup") } + // gtid_purged is important information. The restore flow uses this info to to complement binary logs' Previous-GTIDs. + // It is important to only get gtid_purged _after_ we've rotated into the new binary log, because the `FLUSH BINARY LOGS` + // command may also purge old logs, hence affecting the value of gtid_purged. + gtidPurged, purgedGTIDSet, err := getPurgedGTIDSet() + if err != nil { + return false, err + } previousGTIDs := map[string]string{} - getPreviousGTIDs := func(ctx context.Context, binlog string) (gtids string, err error) { + getBinlogPreviousGTIDs := func(ctx context.Context, binlog string) (gtids string, err error) { gtids, ok := previousGTIDs[binlog] if ok { // Found a cached entry! No need to query again @@ -280,18 +312,48 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par previousGTIDs[binlog] = gtids return gtids, nil } - binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, err := ChooseBinlogsForIncrementalBackup(ctx, binlogCompareGTID, binaryLogs, getPreviousGTIDs, true) + binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, err := ChooseBinlogsForIncrementalBackup(ctx, backupFromGTIDSet, purgedGTIDSet, binaryLogs, getBinlogPreviousGTIDs) if err != nil { return false, vterrors.Wrapf(err, "cannot get binary logs to backup in incremental backup") } - incrementalBackupFromPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupFromGTID) + incrementalBackupFromPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupFromGTID) if err != nil { return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupFromGTID) } - incrementalBackupToPosition, err := mysql.ParsePosition(mysql.Mysql56FlavorID, incrementalBackupToGTID) + incrementalBackupToPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupToGTID) if err != nil { return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupToGTID) } + // The backup position is the GTISset of the last binary log (taken from Previous-GTIDs of the one-next binary log), and we + // also include gtid_purged ; this complies with the "standard" way MySQL "thinks" about GTIDs: there's gtid_executed, which includes + // everything that's ever been applied, and a subset of that is gtid_purged, which are the event no longer available in binary logs. + // When we consider Vitess incremental backups, what's important for us is "what's the GTIDSet that's true when this backup was taken, + // and which will be true when we restore this backup". The answer to this is the GTIDSet that includes the purged GTIDs. + // It's also nice for incremental backups that are taken on _other_ tablets, so that they don't need to understand what exactly was purged + // on _this_ tablet. They don't care, all they want to know is "what GTIDSet can we get from this". + incrementalBackupToPosition.GTIDSet = incrementalBackupToPosition.GTIDSet.Union(gtidPurged.GTIDSet) + req := &mysqlctlpb.ReadBinlogFilesTimestampsRequest{} + for _, binlogFile := range binaryLogsToBackup { + fe := FileEntry{Base: backupBinlogDir, Name: binlogFile} + fullPath, err := fe.fullPath(params.Cnf) + if err != nil { + return false, err + } + req.BinlogFileNames = append(req.BinlogFileNames, fullPath) + } + resp, err := params.Mysqld.ReadBinlogFilesTimestamps(ctx, req) + if err != nil { + return false, vterrors.Wrapf(err, "reading timestamps from binlog files %v", binaryLogsToBackup) + } + if resp.FirstTimestampBinlog == "" || resp.LastTimestampBinlog == "" { + return false, vterrors.Errorf(vtrpc.Code_ABORTED, "empty binlog name in response. Request=%v, Response=%v", req, resp) + } + incrDetails := &IncrementalBackupDetails{ + FirstTimestamp: FormatRFC3339(protoutil.TimeFromProto(resp.FirstTimestamp).UTC()), + FirstTimestampBinlog: filepath.Base(resp.FirstTimestampBinlog), + LastTimestamp: FormatRFC3339(protoutil.TimeFromProto(resp.LastTimestamp).UTC()), + LastTimestampBinlog: filepath.Base(resp.LastTimestampBinlog), + } // It's worthwhile we explain the difference between params.IncrementalFromPos and incrementalBackupFromPosition. // params.IncrementalFromPos is supplied by the user. They want an incremental backup that covers that position. // However, we implement incremental backups by copying complete binlog files. That position could potentially @@ -301,7 +363,7 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par // incrementalBackupFromGTID is the "previous GTIDs" of the first binlog file we back up. // It is a fact that incrementalBackupFromGTID is earlier or equal to params.IncrementalFromPos. // In the backup manifest file, we document incrementalBackupFromGTID, not the user's requested position. - if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, mysql.Position{}, incrementalBackupFromPosition, binaryLogsToBackup, serverUUID); err != nil { + if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, gtidPurged, incrementalBackupFromPosition, fromBackupName, binaryLogsToBackup, serverUUID, mysqlVersion, incrDetails); err != nil { return false, err } return true, nil @@ -318,8 +380,9 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // Save initial state so we can restore. replicaStartRequired := false sourceIsPrimary := false - readOnly := true //nolint - var replicationPosition mysql.Position + superReadOnly := true //nolint + readOnly := true //nolint + var replicationPosition replication.Position semiSyncSource, semiSyncReplica := params.Mysqld.SemiSyncEnabled() // See if we need to restart replication after backup. @@ -338,16 +401,30 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // get the read-only flag readOnly, err = params.Mysqld.IsReadOnly() if err != nil { - return false, vterrors.Wrap(err, "can't get read-only status") + return false, vterrors.Wrap(err, "failed to get read_only status") + } + superReadOnly, err = params.Mysqld.IsSuperReadOnly() + if err != nil { + return false, vterrors.Wrap(err, "can't get super_read_only status") } + log.Infof("Flag values during full backup, read_only: %v, super_read_only:%t", readOnly, superReadOnly) // get the replication position if sourceIsPrimary { - if !readOnly { - params.Logger.Infof("turning primary read-only before backup") - if err = params.Mysqld.SetReadOnly(true); err != nil { - return false, vterrors.Wrap(err, "can't set read-only status") + // No need to set read_only because super_read_only will implicitly set read_only to true as well. + if !superReadOnly { + params.Logger.Infof("Enabling super_read_only on primary prior to backup") + if _, err = params.Mysqld.SetSuperReadOnly(true); err != nil { + return false, vterrors.Wrap(err, "failed to enable super_read_only") } + defer func() { + // Resetting super_read_only back to its original value + params.Logger.Infof("resetting mysqld super_read_only to %v", superReadOnly) + if _, err := params.Mysqld.SetSuperReadOnly(false); err != nil { + log.Error("Failed to set super_read_only back to its original value") + } + }() + } replicationPosition, err = params.Mysqld.PrimaryPosition() if err != nil { @@ -371,13 +448,21 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac return false, vterrors.Wrap(err, "can't get gtid_purged") } + serverUUID, err := params.Mysqld.GetServerUUID(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get purged position") + return false, vterrors.Wrap(err, "can't get server uuid") } - serverUUID, err := params.Mysqld.GetServerUUID(ctx) + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get server uuid") + return false, vterrors.Wrap(err, "can't get MySQL version") + } + + // check if we need to set innodb_fast_shutdown=0 for a backup safe for upgrades + if params.UpgradeSafe { + if _, err := params.Mysqld.FetchSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil { + return false, vterrors.Wrapf(err, "failed to disable fast shutdown") + } } // shutdown mysqld @@ -389,7 +474,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac } // Backup everything, capture the error. - backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, mysql.Position{}, nil, serverUUID) + backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, replication.Position{}, "", nil, serverUUID, mysqlVersion, nil) usable := backupErr == nil // Try to restart mysqld, use background context in case we timed out the original context @@ -398,9 +483,9 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac return usable, vterrors.Wrap(err, "can't restart mysqld") } - // And set read-only mode - params.Logger.Infof("resetting mysqld read-only to %v", readOnly) - if err := params.Mysqld.SetReadOnly(readOnly); err != nil { + // Resetting super_read_only back to its original value + params.Logger.Infof("resetting mysqld super_read_only to %v", superReadOnly) + if _, err := params.Mysqld.SetSuperReadOnly(superReadOnly); err != nil { return usable, err } @@ -469,13 +554,15 @@ func (be *BuiltinBackupEngine) backupFiles( ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, - replicationPosition mysql.Position, - purgedPosition mysql.Position, - fromPosition mysql.Position, + backupPosition replication.Position, + purgedPosition replication.Position, + fromPosition replication.Position, + fromBackupName string, binlogFiles []string, serverUUID string, + mysqlVersion string, + incrDetails *IncrementalBackupDetails, ) (finalErr error) { - // Get the files to backup. // We don't care about totalSize because we add each file separately. var fes []FileEntry @@ -497,18 +584,35 @@ func (be *BuiltinBackupEngine) backupFiles( wg.Add(1) go func(i int) { defer wg.Done() - - // Wait until we are ready to go, skip if we already - // encountered an error. - sema.Acquire(ctx, 1) + fe := &fes[i] + // Wait until we are ready to go, return if we encounter an error + acqErr := sema.Acquire(ctx, 1) + if acqErr != nil { + log.Errorf("Unable to acquire semaphore needed to backup file: %s, err: %s", fe.Name, acqErr.Error()) + bh.RecordError(acqErr) + return + } defer sema.Release(1) + // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might + // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, + // which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces + // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. + select { + case <-ctx.Done(): + log.Errorf("Context canceled or timed out during %q backup", fe.Name) + bh.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return + default: + } + if bh.HasErrors() { + params.Logger.Infof("failed to backup files due to error.") return } // Backup the individual file. name := fmt.Sprintf("%v", i) - bh.RecordError(be.backupFile(ctx, params, bh, &fes[i], name)) + bh.RecordError(be.backupFile(ctx, params, bh, fe, name)) }(i) } @@ -532,7 +636,8 @@ func (be *BuiltinBackupEngine) backupFiles( return vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } defer func() { - if closeErr := wc.Close(); finalErr == nil { + closeErr := wc.Close() + if finalErr == nil { finalErr = closeErr } }() @@ -541,23 +646,28 @@ func (be *BuiltinBackupEngine) backupFiles( bm := &builtinBackupManifest{ // Common base fields BackupManifest: BackupManifest{ - BackupMethod: builtinBackupEngineName, - Position: replicationPosition, - PurgedPosition: purgedPosition, - FromPosition: fromPosition, - Incremental: !fromPosition.IsZero(), - ServerUUID: serverUUID, - TabletAlias: params.TabletAlias, - Keyspace: params.Keyspace, - Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), + BackupMethod: builtinBackupEngineName, + Position: backupPosition, + PurgedPosition: purgedPosition, + FromPosition: fromPosition, + FromBackup: fromBackupName, + Incremental: !fromPosition.IsZero(), + ServerUUID: serverUUID, + TabletAlias: params.TabletAlias, + Keyspace: params.Keyspace, + Shard: params.Shard, + BackupTime: params.BackupTime.UTC().Format(time.RFC3339), + FinishedTime: time.Now().UTC().Format(time.RFC3339), + MySQLVersion: mysqlVersion, + UpgradeSafe: params.UpgradeSafe, + IncrementalDetails: incrDetails, }, // Builtin-specific fields - FileEntries: fes, - SkipCompress: !backupStorageCompress, - CompressionEngine: CompressionEngineName, + FileEntries: fes, + SkipCompress: !backupStorageCompress, + CompressionEngine: CompressionEngineName, + ExternalDecompressor: ManifestExternalDecompressorCmd, } data, err := json.MarshalIndent(bm, "", " ") if err != nil { @@ -655,6 +765,8 @@ func (bp *backupPipe) ReportProgress(period time.Duration, logger logutil.Logger // backupFile backs up an individual file. func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, fe *FileEntry, name string) (finalErr error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Open the source file for reading. openSourceAt := time.Now() source, err := fe.open(params.Cnf, true) @@ -692,12 +804,9 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara defer func(name, fileName string) { closeDestAt := time.Now() if rerr := dest.Close(); rerr != nil { - if finalErr != nil { - // We already have an error, just log this one. - params.Logger.Errorf2(rerr, "failed to close file %v,%v", name, fe.Name) - } else { - finalErr = rerr - } + rerr = vterrors.Wrapf(rerr, "failed to close file %v,%v", name, fe.Name) + params.Logger.Error(rerr) + finalErr = errors.Join(finalErr, rerr) } params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeDestAt)) }(name, fe.Name) @@ -707,57 +816,67 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara bw := newBackupWriter(fe.Name, builtinBackupStorageWriteBufferSize, fi.Size(), timedDest) - var reader io.Reader = br - var writer io.Writer = bw + // We create the following inner function because: + // - we must `defer` the compressor's Close() function + // - but it must take place before we close the pipe reader&writer + createAndCopy := func() (createAndCopyErr error) { + var reader io.Reader = br + var writer io.Writer = bw + + // Create the gzip compression pipe, if necessary. + if backupStorageCompress { + var compressor io.WriteCloser + if ExternalCompressorCmd != "" { + compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) + } else { + compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) + } + if err != nil { + return vterrors.Wrap(err, "can't create compressor") + } - // Create the gzip compression pipe, if necessary. - var compressor io.WriteCloser - if backupStorageCompress { - if ExternalCompressorCmd != "" { - compressor, err = newExternalCompressor(ctx, ExternalCompressorCmd, writer, params.Logger) - } else { - compressor, err = newBuiltinCompressor(CompressionEngineName, writer, params.Logger) - } - if err != nil { - return vterrors.Wrap(err, "can't create compressor") + compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) + writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) + + closer := ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout) + defer func() { + // Close gzip to flush it, after that all data is sent to writer. + closeCompressorAt := time.Now() + params.Logger.Infof("closing compressor") + if cerr := closer.Close(); err != nil { + cerr = vterrors.Wrapf(cerr, "failed to close compressor %v", name) + params.Logger.Error(cerr) + createAndCopyErr = errors.Join(createAndCopyErr, cerr) + } + params.Stats.Scope(stats.Operation("Compressor:Close")).TimedIncrement(time.Since(closeCompressorAt)) + }() } - compressStats := params.Stats.Scope(stats.Operation("Compressor:Write")) - writer = ioutil.NewMeteredWriter(compressor, compressStats.TimedIncrementBytes) - } - - if builtinBackupFileReadBufferSize > 0 { - reader = bufio.NewReaderSize(br, int(builtinBackupFileReadBufferSize)) - } + if builtinBackupFileReadBufferSize > 0 { + reader = bufio.NewReaderSize(br, int(builtinBackupFileReadBufferSize)) + } - // Copy from the source file to writer (optional gzip, - // optional pipe, tee, output file and hasher). - _, err = io.Copy(writer, reader) - if err != nil { - return vterrors.Wrap(err, "cannot copy data") + // Copy from the source file to writer (optional gzip, + // optional pipe, tee, output file and hasher). + _, err = io.Copy(writer, reader) + if err != nil { + return vterrors.Wrap(err, "cannot copy data") + } + return nil } - // Close gzip to flush it, after that all data is sent to writer. - if compressor != nil { - closeCompressorAt := time.Now() - if err = compressor.Close(); err != nil { - return vterrors.Wrap(err, "cannot close compressor") - } - params.Stats.Scope(stats.Operation("Compressor:Close")).TimedIncrement(time.Since(closeCompressorAt)) + if err := createAndCopy(); err != nil { + return err } // Close the backupPipe to finish writing on destination. - closeWriterAt := time.Now() if err = bw.Close(); err != nil { return vterrors.Wrapf(err, "cannot flush destination: %v", name) } - params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeWriterAt)) - closeReaderAt := time.Now() if err := br.Close(); err != nil { return vterrors.Wrap(err, "failed to close the source reader") } - params.Stats.Scope(stats.Operation("Source:Close")).TimedIncrement(time.Since(closeReaderAt)) // Save the hash. fe.Hash = bw.HashString() @@ -772,7 +891,7 @@ func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, par params.Logger.Infof("Restore: copying %v files", len(bm.FileEntries)) - if _, err := be.restoreFiles(context.Background(), params, bh, bm); err != nil { + if _, err := be.restoreFiles(ctx, params, bh, bm); err != nil { // don't delete the file here because that is how we detect an interrupted restore return vterrors.Wrap(err, "failed to restore files") } @@ -785,8 +904,7 @@ func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, par // The underlying mysql database is expected to be up and running. func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { params.Logger.Infof("Restoring incremental backup to position: %v", bm.Position) - - createdDir, err := be.restoreFiles(context.Background(), params, bh, bm) + createdDir, err := be.restoreFiles(ctx, params, bh, bm) defer os.RemoveAll(createdDir) mysqld, ok := params.Mysqld.(*Mysqld) if !ok { @@ -798,8 +916,15 @@ func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Conte if err != nil { return vterrors.Wrap(err, "failed to restore file") } - if err := mysqld.applyBinlogFile(binlogFile, params.RestoreToPos.GTIDSet); err != nil { - return vterrors.Wrap(err, "failed to extract binlog file") + req := &mysqlctlpb.ApplyBinlogFileRequest{ + BinlogFileName: binlogFile, + BinlogRestoreDatetime: protoutil.TimeToProto(params.RestoreToTimestamp), + } + if params.RestoreToPos.GTIDSet != nil { + req.BinlogRestorePosition = params.RestoreToPos.GTIDSet.String() + } + if err := mysqld.ApplyBinlogFile(ctx, req); err != nil { + return vterrors.Wrapf(err, "failed to apply binlog file %v", binlogFile) } defer os.Remove(binlogFile) params.Logger.Infof("Applied binlog file: %v", binlogFile) @@ -819,7 +944,6 @@ func (be *BuiltinBackupEngine) executeRestoreIncrementalBackup(ctx context.Conte func (be *BuiltinBackupEngine) ExecuteRestore(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle) (*BackupManifest, error) { var bm builtinBackupManifest - if err := getBackupManifestInto(ctx, bh, &bm); err != nil { return nil, err } @@ -869,16 +993,32 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP wg.Add(1) go func(i int) { defer wg.Done() - - // Wait until we are ready to go, skip if we already - // encountered an error. - sema.Acquire(ctx, 1) + fe := &fes[i] + // Wait until we are ready to go, return if we encounter an error + acqErr := sema.Acquire(ctx, 1) + if acqErr != nil { + log.Errorf("Unable to acquire semaphore needed to restore file: %s, err: %s", fe.Name, acqErr.Error()) + rec.RecordError(acqErr) + return + } defer sema.Release(1) + // Check for context cancellation explicitly because, the way semaphore code is written, theoretically we might + // end up not throwing an error even after cancellation. Please see https://cs.opensource.google/go/x/sync/+/refs/tags/v0.1.0:semaphore/semaphore.go;l=66, + // which suggests that if the context is already done, `Acquire()` may still succeed without blocking. This introduces + // unpredictability in my test cases, so in order to avoid that, I am adding this cancellation check. + select { + case <-ctx.Done(): + log.Errorf("Context canceled or timed out during %q restore", fe.Name) + rec.RecordError(vterrors.Errorf(vtrpc.Code_CANCELED, "context canceled")) + return + default: + } + if rec.HasErrors() { + params.Logger.Infof("Failed to restore files due to error.") return } - fe := &fes[i] fe.ParentPath = createdDir // And restore the file. name := fmt.Sprintf("%v", i) @@ -895,6 +1035,8 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP // restoreFile restores an individual file. func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, fe *FileEntry, bm builtinBackupManifest, name string) (finalErr error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() // Open the source file for reading. openSourceAt := time.Now() source, err := bh.ReadFile(ctx, name) @@ -927,12 +1069,7 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa defer func() { closeDestAt := time.Now() if cerr := dest.Close(); cerr != nil { - if finalErr != nil { - // We already have an error, just log this one. - log.Errorf("failed to close file %v: %v", name, cerr) - } else { - finalErr = vterrors.Wrap(cerr, "failed to close destination file") - } + finalErr = errors.Join(finalErr, vterrors.Wrap(cerr, "failed to close destination file")) } params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeDestAt)) }() @@ -951,9 +1088,13 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa // for backward compatibility deCompressionEngine = PgzipCompressor } - if ExternalDecompressorCmd != "" { + externalDecompressorCmd := ExternalDecompressorCmd + if externalDecompressorCmd == "" && bm.ExternalDecompressor != "" { + externalDecompressorCmd = bm.ExternalDecompressor + } + if externalDecompressorCmd != "" { if deCompressionEngine == ExternalCompressor { - deCompressionEngine = ExternalDecompressorCmd + deCompressionEngine = externalDecompressorCmd decompressor, err = newExternalDecompressor(ctx, deCompressionEngine, reader, params.Logger) } else { decompressor, err = newBuiltinDecompressor(deCompressionEngine, reader, params.Logger) @@ -967,27 +1108,25 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa if err != nil { return vterrors.Wrap(err, "can't create decompressor") } + closer := ioutil.NewTimeoutCloser(ctx, decompressor, closeTimeout) decompressStats := params.Stats.Scope(stats.Operation("Decompressor:Read")) reader = ioutil.NewMeteredReader(decompressor, decompressStats.TimedIncrementBytes) defer func() { closeDecompressorAt := time.Now() - if cerr := decompressor.Close(); cerr != nil { - params.Logger.Errorf("failed to close decompressor: %v", cerr) - if finalErr != nil { - // We already have an error, just log this one. - log.Errorf("failed to close decompressor %v: %v", name, cerr) - } else { - finalErr = vterrors.Wrap(cerr, "failed to close decompressor") - } + params.Logger.Infof("closing decompressor") + if cerr := closer.Close(); err != nil { + cerr = vterrors.Wrapf(cerr, "failed to close decompressor %v", name) + params.Logger.Error(cerr) + finalErr = errors.Join(finalErr, cerr) } params.Stats.Scope(stats.Operation("Decompressor:Close")).TimedIncrement(time.Since(closeDecompressorAt)) }() } // Copy the data. Will also write to the hasher. - if _, err = io.Copy(bufferedDest, reader); err != nil { + if _, err := io.Copy(bufferedDest, reader); err != nil { return vterrors.Wrap(err, "failed to copy file contents") } @@ -998,50 +1137,50 @@ func (be *BuiltinBackupEngine) restoreFile(ctx context.Context, params RestorePa } // Flush the buffer. - closeDestAt := time.Now() if err := bufferedDest.Flush(); err != nil { return vterrors.Wrap(err, "failed to flush destination buffer") } - params.Stats.Scope(stats.Operation("Destination:Close")).TimedIncrement(time.Since(closeDestAt)) - closeSourceAt := time.Now() if err := br.Close(); err != nil { return vterrors.Wrap(err, "failed to close the source reader") } - params.Stats.Scope(stats.Operation("Source:Close")).TimedIncrement(time.Since(closeSourceAt)) return nil } // ShouldDrainForBackup satisfies the BackupEngine interface // backup requires query service to be stopped, hence true -func (be *BuiltinBackupEngine) ShouldDrainForBackup() bool { +func (be *BuiltinBackupEngine) ShouldDrainForBackup(req *tabletmanagerdatapb.BackupRequest) bool { + if req != nil && req.IncrementalFromPos != "" { + // Incremental backup: we do not drain the tablet. + return false + } return true } -func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (mysql.Position, error) { +func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server, keyspace, shard string) (replication.Position, error) { si, err := ts.GetShard(ctx, keyspace, shard) if err != nil { - return mysql.Position{}, vterrors.Wrap(err, "can't read shard") + return replication.Position{}, vterrors.Wrap(err, "can't read shard") } if topoproto.TabletAliasIsZero(si.PrimaryAlias) { - return mysql.Position{}, fmt.Errorf("shard %v/%v has no primary", keyspace, shard) + return replication.Position{}, fmt.Errorf("shard %v/%v has no primary", keyspace, shard) } ti, err := ts.GetTablet(ctx, si.PrimaryAlias) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) + return replication.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err) } posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet) if err != nil { - return mysql.Position{}, fmt.Errorf("can't get primary replication position: %v", err) + return replication.Position{}, fmt.Errorf("can't get primary replication position: %v", err) } - pos, err := mysql.DecodePosition(posStr) + pos, err := replication.DecodePosition(posStr) if err != nil { - return mysql.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) + return replication.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err) } return pos, nil } func init() { - BackupRestoreEngineMap["builtin"] = &BuiltinBackupEngine{} + BackupRestoreEngineMap[builtinBackupEngineName] = &BuiltinBackupEngine{} } diff --git a/go/vt/mysqlctl/builtinbackupengine_test.go b/go/vt/mysqlctl/builtinbackupengine_test.go index c86086984f0..39e4aa7ae1c 100644 --- a/go/vt/mysqlctl/builtinbackupengine_test.go +++ b/go/vt/mysqlctl/builtinbackupengine_test.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,167 +15,66 @@ limitations under the License. */ // Package mysqlctl_test is the blackbox tests for package mysqlctl. -package mysqlctl_test +package mysqlctl import ( - "context" - "fmt" - "os" - "path" "testing" - "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" - "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vttime" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vttablet/faketmclient" - "vitess.io/vitess/go/vt/vttablet/tmclient" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) -func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { - old := mysqlctl.BuiltinBackupMysqldTimeout - mysqlctl.BuiltinBackupMysqldTimeout = t - - return old -} - -func createBackupDir(root string, dirs ...string) error { - for _, dir := range dirs { - if err := os.MkdirAll(path.Join(root, dir), 0755); err != nil { - return err - } - } - - return nil -} - -func TestExecuteBackup(t *testing.T) { - // Set up local backup directory - backupRoot := "testdata/builtinbackup_test" - filebackupstorage.FileBackupStorageRoot = backupRoot - require.NoError(t, createBackupDir(backupRoot, "innodb", "log", "datadir")) - defer os.RemoveAll(backupRoot) - - ctx := context.Background() - - needIt, err := needInnoDBRedoLogSubdir() - require.NoError(t, err) - if needIt { - fpath := path.Join("log", mysql.DynamicRedoLogSubdir) - if err := createBackupDir(backupRoot, fpath); err != nil { - t.Fatalf("failed to create directory %s: %v", fpath, err) - } - } - - // Set up topo - keyspace, shard := "mykeyspace", "-80" - ts := memorytopo.NewServer("cell1") - defer ts.Close() - - require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodata.Keyspace{})) - require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) - - tablet := topo.NewTablet(100, "cell1", "mykeyspace-00-80-0100") - tablet.Keyspace = keyspace - tablet.Shard = shard - - require.NoError(t, ts.CreateTablet(ctx, tablet)) - - _, err = ts.UpdateShardFields(ctx, keyspace, shard, func(si *topo.ShardInfo) error { - si.PrimaryAlias = &topodata.TabletAlias{Uid: 100, Cell: "cell1"} - - now := time.Now() - si.PrimaryTermStartTime = &vttime.Time{Seconds: int64(now.Second()), Nanoseconds: int32(now.Nanosecond())} - - return nil - }) - require.NoError(t, err) - - // Set up tm client - // Note that using faketmclient.NewFakeTabletManagerClient will cause infinite recursion :shrug: - tmclient.RegisterTabletManagerClientFactory("grpc", - func() tmclient.TabletManagerClient { return &faketmclient.FakeTabletManagerClient{} }, - ) - - be := &mysqlctl.BuiltinBackupEngine{} - - // Configure a tight deadline to force a timeout - oldDeadline := setBuiltinBackupMysqldDeadline(time.Second) - defer setBuiltinBackupMysqldDeadline(oldDeadline) - - bh := filebackupstorage.NewBackupHandle(nil, "", "", false) - - // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. - mysqld := mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} - // mysqld.ShutdownTime = time.Minute - - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), +func TestGetIncrementalFromPosGTIDSet(t *testing.T) { + tcases := []struct { + incrementalFromPos string + gtidSet string + expctError bool + }{ + { + "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + false, }, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - }, bh) - - require.NoError(t, err) - assert.True(t, ok) - - mysqld.ExpectedExecuteSuperQueryCurrent = 0 // resest the index of what queries we've run - mysqld.ShutdownTime = time.Minute // reminder that shutdownDeadline is 1s - - ok, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Cnf: &mysqlctl.Mycnf{ - InnodbDataHomeDir: path.Join(backupRoot, "innodb"), - InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), - DataDir: path.Join(backupRoot, "datadir"), + { + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615", + false, + }, + { + "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3", + "", + true, + }, + { + "MySQL56/invalid", + "", + true, + }, + { + "16b1039f-22b6-11ed-b765-0a43f95f28a3", + "", + true, }, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - }, bh) - - assert.Error(t, err) - assert.False(t, ok) -} - -// needInnoDBRedoLogSubdir indicates whether we need to create a redo log subdirectory. -// Starting with MySQL 8.0.30, the InnoDB redo logs are stored in a subdirectory of the -// (/. by default) called "#innodb_redo". See: -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-modifying-redo-log-capacity -func needInnoDBRedoLogSubdir() (needIt bool, err error) { - mysqldVersionStr, err := mysqlctl.GetVersionString() - if err != nil { - return needIt, err - } - _, sv, err := mysqlctl.ParseVersionString(mysqldVersionStr) - if err != nil { - return needIt, err } - versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - _, capableOf, _ := mysql.GetFlavor(versionStr, nil) - if capableOf == nil { - return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) + for _, tcase := range tcases { + t.Run(tcase.incrementalFromPos, func(t *testing.T) { + gtidSet, err := getIncrementalFromPosGTIDSet(tcase.incrementalFromPos) + if tcase.expctError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.gtidSet, gtidSet.String()) + } + }) } - return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) +} + +func TestShouldDrainForBackupBuiltIn(t *testing.T) { + be := &BuiltinBackupEngine{} + + assert.True(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "auto"})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{IncrementalFromPos: "MySQL56/99ca8ed4-399c-11ee-861b-0a43f95f28a3:1-197"})) } diff --git a/go/vt/mysqlctl/capabilityset.go b/go/vt/mysqlctl/capabilityset.go index a9d655c2bc4..281bc41f53e 100644 --- a/go/vt/mysqlctl/capabilityset.go +++ b/go/vt/mysqlctl/capabilityset.go @@ -36,6 +36,7 @@ type capabilitySet struct { } func newCapabilitySet(f MySQLFlavor, v ServerVersion) (c capabilitySet) { + noSocketFile() c.flavor = f c.version = v return @@ -51,18 +52,6 @@ func (c *capabilitySet) hasMaria104InstallDb() bool { return c.isMariaDB() && c.version.atLeast(ServerVersion{Major: 10, Minor: 4, Patch: 0}) } -// hasDisableRedoLog tells you if the version of MySQL in use can disable redo logging. -// -// As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE -// DISABLE INNODB REDO_LOG statement. This functionality is intended for -// loading data into a new MySQL instance. Disabling redo logging speeds up -// data loading by avoiding redo log writes and doublewrite buffering. -// -// https://dev.mysql.com/doc/refman/8.0/en/innodb-redo-log.html#innodb-disable-redo-logging -func (c *capabilitySet) hasDisableRedoLog() bool { - return c.isMySQLLike() && c.version.atLeast(ServerVersion{Major: 8, Minor: 0, Patch: 21}) -} - // IsMySQLLike tests if the server is either MySQL // or Percona Server. At least currently, Vitess doesn't // make use of any specific Percona Server features. diff --git a/go/vt/mysqlctl/compression.go b/go/vt/mysqlctl/compression.go index ea8f96cc100..c2d3cbbe18b 100644 --- a/go/vt/mysqlctl/compression.go +++ b/go/vt/mysqlctl/compression.go @@ -49,9 +49,10 @@ var ( // CompressionEngineName specifies which compressor/decompressor to use CompressionEngineName = "pargzip" // ExternalCompressorCmd / ExternalDecompressorCmd specify the external commands compress/decompress the backups - ExternalCompressorCmd string - ExternalCompressorExt string - ExternalDecompressorCmd string + ExternalCompressorCmd string + ExternalCompressorExt string + ExternalDecompressorCmd string + ManifestExternalDecompressorCmd string errUnsupportedDeCompressionEngine = errors.New("unsupported engine in MANIFEST. You need to provide --external-decompressor if using 'external' compression engine") errUnsupportedCompressionEngine = errors.New("unsupported engine value for --compression-engine-name. supported values are 'external', 'pgzip', 'pargzip', 'zstd', 'lz4'") @@ -65,7 +66,7 @@ var ( ) func init() { - for _, cmd := range []string{"vtbackup", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} { + for _, cmd := range []string{"vtbackup", "vtcombo", "vttablet", "vttestserver"} { servenv.OnParseFor(cmd, registerBackupCompressionFlags) } } @@ -76,6 +77,7 @@ func registerBackupCompressionFlags(fs *pflag.FlagSet) { fs.StringVar(&ExternalCompressorCmd, "external-compressor", ExternalCompressorCmd, "command with arguments to use when compressing a backup.") fs.StringVar(&ExternalCompressorExt, "external-compressor-extension", ExternalCompressorExt, "extension to use when using an external compressor.") fs.StringVar(&ExternalDecompressorCmd, "external-decompressor", ExternalDecompressorCmd, "command with arguments to use when decompressing a backup.") + fs.StringVar(&ManifestExternalDecompressorCmd, "manifest-external-decompressor", ManifestExternalDecompressorCmd, "command with arguments to store in the backup manifest when compressing a backup with an external compression engine.") } func getExtensionFromEngine(engine string) (string, error) { @@ -204,9 +206,9 @@ func newBuiltinDecompressor(engine string, reader io.Reader, logger logutil.Logg return nil, err } decompressor = d - case "lz4": + case Lz4Compressor: decompressor = io.NopCloser(lz4.NewReader(reader)) - case "zstd": + case ZstdCompressor: d, err := zstd.NewReader(reader) if err != nil { return nil, err diff --git a/go/vt/mysqlctl/compression_benchmark_test.go b/go/vt/mysqlctl/compression_benchmark_test.go index 73cd684c719..de52519fa57 100644 --- a/go/vt/mysqlctl/compression_benchmark_test.go +++ b/go/vt/mysqlctl/compression_benchmark_test.go @@ -19,6 +19,8 @@ import ( "github.com/klauspost/compress/zstd" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/vt/logutil" ) @@ -372,6 +374,7 @@ func (tw *timedWriter) Write(p []byte) (nbytes int, err error) { } func TestMain(m *testing.M) { + hack.DisableProtoBufRandomness() code := m.Run() u, _ := dataURL() diff --git a/go/vt/mysqlctl/fakebackupengine.go b/go/vt/mysqlctl/fakebackupengine.go index c0fce435d35..2b8c3208ac5 100644 --- a/go/vt/mysqlctl/fakebackupengine.go +++ b/go/vt/mysqlctl/fakebackupengine.go @@ -21,6 +21,7 @@ import ( "time" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) type FakeBackupEngine struct { @@ -86,7 +87,7 @@ func (be *FakeBackupEngine) ExecuteRestore( return be.ExecuteRestoreReturn.Manifest, be.ExecuteRestoreReturn.Err } -func (be *FakeBackupEngine) ShouldDrainForBackup() bool { +func (be *FakeBackupEngine) ShouldDrainForBackup(req *tabletmanagerdatapb.BackupRequest) bool { be.ShouldDrainForBackupCalls = be.ShouldDrainForBackupCalls + 1 return be.ShouldDrainForBackupReturn } diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index 25b4f328de4..39ecca84156 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -20,17 +20,19 @@ import ( "context" "fmt" "reflect" + "regexp" "strings" "sync" "sync/atomic" "time" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) @@ -71,10 +73,10 @@ type FakeMysqlDaemon struct { // CurrentPrimaryPosition is returned by PrimaryPosition // and ReplicationStatus - CurrentPrimaryPosition mysql.Position + CurrentPrimaryPosition replication.Position // CurrentSourceFilePosition is used to determine the executed file based positioning of the replication source. - CurrentSourceFilePosition mysql.Position + CurrentSourceFilePosition replication.Position // ReplicationStatusError is used by ReplicationStatus ReplicationStatusError error @@ -101,14 +103,14 @@ type FakeMysqlDaemon struct { ReadOnly bool // SuperReadOnly is the current value of the flag - SuperReadOnly bool + SuperReadOnly atomic.Bool // SetReplicationPositionPos is matched against the input of SetReplicationPosition. // If it doesn't match, SetReplicationPosition will return an error. - SetReplicationPositionPos mysql.Position + SetReplicationPositionPos replication.Position // StartReplicationUntilAfterPos is matched against the input - StartReplicationUntilAfterPos mysql.Position + StartReplicationUntilAfterPos replication.Position // SetReplicationSourceInputs are matched against the input of SetReplicationSource // (as "%v:%v"). If all of them don't match, SetReplicationSource will return an error. @@ -117,12 +119,15 @@ type FakeMysqlDaemon struct { // SetReplicationSourceError is used by SetReplicationSource SetReplicationSourceError error + // StopReplicationError error is used by StopReplication + StopReplicationError error + // WaitPrimaryPositions is checked by WaitSourcePos, if the value is found // in it, then the function returns nil, else the function returns an error - WaitPrimaryPositions []mysql.Position + WaitPrimaryPositions []replication.Position // PromoteResult is returned by Promote - PromoteResult mysql.Position + PromoteResult replication.Position // PromoteError is used by Promote PromoteError error @@ -158,9 +163,6 @@ type FakeMysqlDaemon struct { // FetchSuperQueryResults is used by FetchSuperQuery FetchSuperQueryMap map[string]*sqltypes.Result - // BinlogPlayerEnabled is used by {Enable,Disable}BinlogPlayer - BinlogPlayerEnabled atomic.Bool - // SemiSyncPrimaryEnabled represents the state of rpl_semi_sync_master_enabled. SemiSyncPrimaryEnabled bool // SemiSyncReplicaEnabled represents the state of rpl_semi_sync_slave_enabled. @@ -169,6 +171,9 @@ type FakeMysqlDaemon struct { // TimeoutHook is a func that can be called at the beginning of any method to fake a timeout. // all a test needs to do is make it { return context.DeadlineExceeded } TimeoutHook func() error + + // Version is the version that will be returned by GetVersionString. + Version string } // NewFakeMysqlDaemon returns a FakeMysqlDaemon where mysqld appears @@ -179,6 +184,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon { db: db, Running: true, IOThreadRunning: true, + Version: "8.0.32", } if db != nil { result.appPool = dbconnpool.NewConnectionPool("AppConnPool", 5, time.Minute, 0, 0) @@ -224,10 +230,20 @@ func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMys } // RunMysqlUpgrade is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) RunMysqlUpgrade() error { +func (fmd *FakeMysqlDaemon) RunMysqlUpgrade(ctx context.Context) error { + return nil +} + +// ApplyBinlogFile is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { return nil } +// ReadBinlogFilesTimestamps is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { + return nil, nil +} + // ReinitConfig is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) ReinitConfig(ctx context.Context, cnf *Mycnf) error { return nil @@ -262,47 +278,47 @@ func (fmd *FakeMysqlDaemon) GetServerUUID(ctx context.Context) (string, error) { } // CurrentPrimaryPositionLocked is thread-safe -func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos mysql.Position) { +func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos replication.Position) { fmd.mu.Lock() defer fmd.mu.Unlock() fmd.CurrentPrimaryPosition = pos } // ReplicationStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) ReplicationStatus() (mysql.ReplicationStatus, error) { +func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus, error) { if fmd.ReplicationStatusError != nil { - return mysql.ReplicationStatus{}, fmd.ReplicationStatusError + return replication.ReplicationStatus{}, fmd.ReplicationStatusError } fmd.mu.Lock() defer fmd.mu.Unlock() - return mysql.ReplicationStatus{ + return replication.ReplicationStatus{ Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentSourceFilePosition, RelayLogSourceBinlogEquivalentPosition: fmd.CurrentSourceFilePosition, ReplicationLagSeconds: fmd.ReplicationLagSeconds, // implemented as AND to avoid changing all tests that were // previously using Replicating = false - IOState: mysql.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating && fmd.IOThreadRunning)), - SQLState: mysql.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating)), + IOState: replication.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating && fmd.IOThreadRunning)), + SQLState: replication.ReplicationStatusToState(fmt.Sprintf("%v", fmd.Replicating)), SourceHost: fmd.CurrentSourceHost, SourcePort: fmd.CurrentSourcePort, }, nil } // PrimaryStatus is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { +func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) { if fmd.PrimaryStatusError != nil { - return mysql.PrimaryStatus{}, fmd.PrimaryStatusError + return replication.PrimaryStatus{}, fmd.PrimaryStatusError } - return mysql.PrimaryStatus{ + return replication.PrimaryStatus{ Position: fmd.CurrentPrimaryPosition, FilePosition: fmd.CurrentSourceFilePosition, }, nil } // GetGTIDPurged is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) GetGTIDPurged(ctx context.Context) (mysql.Position, error) { - return mysql.Position{}, nil +func (fmd *FakeMysqlDaemon) GetGTIDPurged(ctx context.Context) (replication.Position, error) { + return replication.Position{}, nil } // ResetReplication is part of the MysqlDaemon interface. @@ -355,7 +371,7 @@ func (fmd *FakeMysqlDaemon) GetPreviousGTIDs(ctx context.Context, binlog string) } // PrimaryPosition is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) PrimaryPosition() (mysql.Position, error) { +func (fmd *FakeMysqlDaemon) PrimaryPosition() (replication.Position, error) { return fmd.CurrentPrimaryPosition, nil } @@ -364,6 +380,11 @@ func (fmd *FakeMysqlDaemon) IsReadOnly() (bool, error) { return fmd.ReadOnly, nil } +// IsSuperReadOnly is part of the MysqlDaemon interface +func (fmd *FakeMysqlDaemon) IsSuperReadOnly() (bool, error) { + return fmd.SuperReadOnly.Load(), nil +} + // SetReadOnly is part of the MysqlDaemon interface func (fmd *FakeMysqlDaemon) SetReadOnly(on bool) error { fmd.ReadOnly = on @@ -371,10 +392,10 @@ func (fmd *FakeMysqlDaemon) SetReadOnly(on bool) error { } // SetSuperReadOnly is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) SetSuperReadOnly(on bool) error { - fmd.SuperReadOnly = on +func (fmd *FakeMysqlDaemon) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) { + fmd.SuperReadOnly.Store(on) fmd.ReadOnly = on - return nil + return nil, nil } // StartReplication is part of the MysqlDaemon interface. @@ -397,7 +418,7 @@ func (fmd *FakeMysqlDaemon) RestartReplication(hookExtraEnv map[string]string) e } // StartReplicationUntilAfter is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error { if !reflect.DeepEqual(fmd.StartReplicationUntilAfterPos, pos) { return fmt.Errorf("wrong pos for StartReplicationUntilAfter: expected %v got %v", fmd.SetReplicationPositionPos, pos) } @@ -409,6 +430,9 @@ func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos // StopReplication is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) StopReplication(hookExtraEnv map[string]string) error { + if fmd.StopReplicationError != nil { + return fmd.StopReplicationError + } return fmd.ExecuteSuperQueryList(context.Background(), []string{ "STOP SLAVE", }) @@ -422,7 +446,7 @@ func (fmd *FakeMysqlDaemon) StopIOThread(ctx context.Context) error { } // SetReplicationPosition is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos replication.Position) error { if !reflect.DeepEqual(fmd.SetReplicationPositionPos, pos) { return fmt.Errorf("wrong pos for SetReplicationPosition: expected %v got %v", fmd.SetReplicationPositionPos, pos) } @@ -450,11 +474,12 @@ func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host strin if stopReplicationBefore { cmds = append(cmds, "STOP SLAVE") } - cmds = append(cmds, "RESET SLAVE ALL") cmds = append(cmds, "FAKE SET MASTER") if startReplicationAfter { cmds = append(cmds, "START SLAVE") } + fmd.CurrentSourceHost = host + fmd.CurrentSourcePort = port return fmd.ExecuteSuperQueryList(ctx, cmds) } @@ -464,7 +489,7 @@ func (fmd *FakeMysqlDaemon) WaitForReparentJournal(ctx context.Context, timeCrea } // WaitSourcePos is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) error { +func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos replication.Position) error { if fmd.TimeoutHook != nil { return fmd.TimeoutHook() } @@ -477,12 +502,12 @@ func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos mysql.Position) } // Promote is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (mysql.Position, error) { +func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (replication.Position, error) { if fmd.PromoteLag > 0 { time.Sleep(fmd.PromoteLag) } if fmd.PromoteError != nil { - return mysql.Position{}, fmd.PromoteError + return replication.Position{}, fmd.PromoteError } return fmd.PromoteResult, nil } @@ -527,23 +552,15 @@ func (fmd *FakeMysqlDaemon) FetchSuperQuery(ctx context.Context, query string) ( return nil, fmt.Errorf("unexpected query: %v", query) } - qr, ok := fmd.FetchSuperQueryMap[query] - if !ok { - return nil, fmt.Errorf("unexpected query: %v", query) + if qr, ok := fmd.FetchSuperQueryMap[query]; ok { + return qr, nil } - return qr, nil -} - -// EnableBinlogPlayback is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) EnableBinlogPlayback() error { - fmd.BinlogPlayerEnabled.Store(true) - return nil -} - -// DisableBinlogPlayback disable playback of binlog events -func (fmd *FakeMysqlDaemon) DisableBinlogPlayback() error { - fmd.BinlogPlayerEnabled.Store(false) - return nil + for k, qr := range fmd.FetchSuperQueryMap { + if ok, _ := regexp.MatchString(k, query); ok { + return qr, nil + } + } + return nil, fmt.Errorf("unexpected query: %v", query) } // Close is part of the MysqlDaemon interface @@ -662,6 +679,11 @@ func (fmd *FakeMysqlDaemon) SemiSyncClients() uint32 { return 0 } +// SemiSyncExtensionLoaded is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) SemiSyncExtensionLoaded() (bool, error) { + return true, nil +} + // SemiSyncSettings is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) SemiSyncSettings() (timeout uint64, numReplicas uint32) { return 10000000, 1 @@ -673,12 +695,12 @@ func (fmd *FakeMysqlDaemon) SemiSyncReplicationStatus() (bool, error) { return fmd.SemiSyncReplicaEnabled, nil } -// GetVersionString is part of the MysqlDeamon interface. -func (fmd *FakeMysqlDaemon) GetVersionString() string { - return "" +// GetVersionString is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) GetVersionString(ctx context.Context) (string, error) { + return fmd.Version, nil } -// GetVersionComment is part of the MysqlDeamon interface. -func (fmd *FakeMysqlDaemon) GetVersionComment(ctx context.Context) string { - return "" +// GetVersionComment is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) GetVersionComment(ctx context.Context) (string, error) { + return "", nil } diff --git a/go/vt/mysqlctl/grpcmysqlctlclient/client.go b/go/vt/mysqlctl/grpcmysqlctlclient/client.go index af8d0ba7798..150402a8c44 100644 --- a/go/vt/mysqlctl/grpcmysqlctlclient/client.go +++ b/go/vt/mysqlctl/grpcmysqlctlclient/client.go @@ -19,16 +19,16 @@ limitations under the License. package grpcmysqlctlclient import ( + "context" "fmt" "net" "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" - "context" - "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/mysqlctl/mysqlctlclient" @@ -42,9 +42,14 @@ type client struct { func factory(network, addr string) (mysqlctlclient.MysqlctlClient, error) { // create the RPC client - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), grpc.WithInsecure(), grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { //nolint:staticcheck - return net.DialTimeout(network, addr, timeout) - })) + cc, err := grpcclient.Dial( + addr, + grpcclient.FailFast(false), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(func(ctx context.Context, addr string, + ) (net.Conn, error) { + return new(net.Dialer).DialContext(ctx, network, addr) + })) if err != nil { return nil, err } @@ -84,6 +89,23 @@ func (c *client) RunMysqlUpgrade(ctx context.Context) error { }) } +// ApplyBinlogFile is part of the MysqlctlClient interface. +func (c *client) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { + return c.withRetry(ctx, func() error { + _, err := c.c.ApplyBinlogFile(ctx, req) + return err + }) +} + +// ReadBinlogFilesTimestamps is part of the MysqlctlClient interface. +func (c *client) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (resp *mysqlctlpb.ReadBinlogFilesTimestampsResponse, err error) { + err = c.withRetry(ctx, func() error { + resp, err = c.c.ReadBinlogFilesTimestamps(ctx, req) + return err + }) + return resp, err +} + // ReinitConfig is part of the MysqlctlClient interface. func (c *client) ReinitConfig(ctx context.Context) error { return c.withRetry(ctx, func() error { @@ -100,6 +122,20 @@ func (c *client) RefreshConfig(ctx context.Context) error { }) } +// VersionString is part of the MysqlctlClient interface. +func (c *client) VersionString(ctx context.Context) (string, error) { + var version string + err := c.withRetry(ctx, func() error { + r, err := c.c.VersionString(ctx, &mysqlctlpb.VersionStringRequest{}) + if err != nil { + return err + } + version = r.Version + return nil + }) + return version, err +} + // Close is part of the MysqlctlClient interface. func (c *client) Close() { c.cc.Close() diff --git a/go/vt/mysqlctl/grpcmysqlctlserver/server.go b/go/vt/mysqlctl/grpcmysqlctlserver/server.go index 61903655044..84953020534 100644 --- a/go/vt/mysqlctl/grpcmysqlctlserver/server.go +++ b/go/vt/mysqlctl/grpcmysqlctlserver/server.go @@ -21,12 +21,11 @@ side of the remote execution of mysqlctl commands. package grpcmysqlctlserver import ( - "google.golang.org/grpc" - "context" - "vitess.io/vitess/go/vt/mysqlctl" + "google.golang.org/grpc" + "vitess.io/vitess/go/vt/mysqlctl" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" ) @@ -48,8 +47,13 @@ func (s *server) Shutdown(ctx context.Context, request *mysqlctlpb.ShutdownReque } // RunMysqlUpgrade implements the server side of the MysqlctlClient interface. -func (s *server) RunMysqlUpgrade(ctx context.Context, request *mysqlctlpb.RunMysqlUpgradeRequest) (*mysqlctlpb.RunMysqlUpgradeResponse, error) { - return &mysqlctlpb.RunMysqlUpgradeResponse{}, s.mysqld.RunMysqlUpgrade() +func (s *server) RunMysqlUpgrade(ctx context.Context, _ *mysqlctlpb.RunMysqlUpgradeRequest) (*mysqlctlpb.RunMysqlUpgradeResponse, error) { + return &mysqlctlpb.RunMysqlUpgradeResponse{}, s.mysqld.RunMysqlUpgrade(ctx) +} + +// RunMysqlUpgrade implements the server side of the MysqlctlClient interface. +func (s *server) ApplyBinlogFile(ctx context.Context, request *mysqlctlpb.ApplyBinlogFileRequest) (*mysqlctlpb.ApplyBinlogFileResponse, error) { + return &mysqlctlpb.ApplyBinlogFileResponse{}, s.mysqld.ApplyBinlogFile(ctx, request) } // ReinitConfig implements the server side of the MysqlctlClient interface. @@ -62,6 +66,15 @@ func (s *server) RefreshConfig(ctx context.Context, request *mysqlctlpb.RefreshC return &mysqlctlpb.RefreshConfigResponse{}, s.mysqld.RefreshConfig(ctx, s.cnf) } +// VersionString registers the Server for RPCs. +func (s *server) VersionString(ctx context.Context, request *mysqlctlpb.VersionStringRequest) (*mysqlctlpb.VersionStringResponse, error) { + version, err := s.mysqld.GetVersionString(ctx) + if err != nil { + return nil, err + } + return &mysqlctlpb.VersionStringResponse{Version: version}, nil +} + // StartServer registers the Server for RPCs. func StartServer(s *grpc.Server, cnf *mysqlctl.Mycnf, mysqld *mysqlctl.Mysqld) { mysqlctlpb.RegisterMysqlCtlServer(s, &server{cnf: cnf, mysqld: mysqld}) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 020595b0277..c0f97d438e6 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -19,11 +19,12 @@ package mysqlctl import ( "context" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) @@ -33,7 +34,9 @@ type MysqlDaemon interface { // methods related to mysql running or not Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error - RunMysqlUpgrade() error + RunMysqlUpgrade(ctx context.Context) error + ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error + ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) ReinitConfig(ctx context.Context, cnf *Mycnf) error Wait(ctx context.Context, cnf *Mycnf) error @@ -49,14 +52,15 @@ type MysqlDaemon interface { // replication related methods StartReplication(hookExtraEnv map[string]string) error RestartReplication(hookExtraEnv map[string]string) error - StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error + StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error StopReplication(hookExtraEnv map[string]string) error StopIOThread(ctx context.Context) error - ReplicationStatus() (mysql.ReplicationStatus, error) - PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) - GetGTIDPurged(ctx context.Context) (mysql.Position, error) + ReplicationStatus() (replication.ReplicationStatus, error) + PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) + GetGTIDPurged(ctx context.Context) (replication.Position, error) SetSemiSyncEnabled(source, replica bool) error SemiSyncEnabled() (source, replica bool) + SemiSyncExtensionLoaded() (bool, error) SemiSyncStatus() (source, replica bool) SemiSyncClients() (count uint32) SemiSyncSettings() (timeout uint64, numReplicas uint32) @@ -70,19 +74,20 @@ type MysqlDaemon interface { // reparenting related methods ResetReplication(ctx context.Context) error - PrimaryPosition() (mysql.Position, error) + PrimaryPosition() (replication.Position, error) IsReadOnly() (bool, error) + IsSuperReadOnly() (bool, error) SetReadOnly(on bool) error - SetSuperReadOnly(on bool) error - SetReplicationPosition(ctx context.Context, pos mysql.Position) error + SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) + SetReplicationPosition(ctx context.Context, pos replication.Position) error SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error - WaitSourcePos(context.Context, mysql.Position) error + WaitSourcePos(context.Context, replication.Position) error // Promote makes the current server the primary. It will not change // the read_only state of the server. - Promote(map[string]string) (mysql.Position, error) + Promote(map[string]string) (replication.Position, error) // Schema related methods GetSchema(ctx context.Context, dbName string, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) @@ -100,10 +105,10 @@ type MysqlDaemon interface { GetAllPrivsConnection(ctx context.Context) (*dbconnpool.DBConnection, error) // GetVersionString returns the database version as a string - GetVersionString() string + GetVersionString(ctx context.Context) (string, error) // GetVersionComment returns the version comment - GetVersionComment(ctx context.Context) string + GetVersionComment(ctx context.Context) (string, error) // ExecuteSuperQueryList executes a list of queries, no result ExecuteSuperQueryList(ctx context.Context, queryList []string) error @@ -111,12 +116,6 @@ type MysqlDaemon interface { // FetchSuperQuery executes one query, returns the result FetchSuperQuery(ctx context.Context, query string) (*sqltypes.Result, error) - // EnableBinlogPlayback enables playback of binlog events - EnableBinlogPlayback() error - - // DisableBinlogPlayback disable playback of binlog events - DisableBinlogPlayback() error - // Close will close this instance of Mysqld. It will wait for all dba // queries to be finished. Close() diff --git a/go/vt/mysqlctl/mysqlctlclient/interface.go b/go/vt/mysqlctl/mysqlctlclient/interface.go index fd620acb5cb..4ab03a9df5b 100644 --- a/go/vt/mysqlctl/mysqlctlclient/interface.go +++ b/go/vt/mysqlctl/mysqlctlclient/interface.go @@ -25,6 +25,7 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" "vitess.io/vitess/go/vt/servenv" ) @@ -49,12 +50,21 @@ type MysqlctlClient interface { // RunMysqlUpgrade calls Mysqld.RunMysqlUpgrade remotely. RunMysqlUpgrade(ctx context.Context) error + // ApplyBinlogFile calls Mysqld.ApplyBinlogFile remotely. + ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error + + // ReadBinlogFilesTimestamps calls Mysqld.ReadBinlogFilesTimestamps remotely. + ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) + // ReinitConfig calls Mysqld.ReinitConfig remotely. ReinitConfig(ctx context.Context) error // RefreshConfig calls Mysqld.RefreshConfig remotely. RefreshConfig(ctx context.Context) error + // VersionString calls Mysqld.VersionString remotely. + VersionString(ctx context.Context) (string, error) + // Close will terminate the connection. This object won't be used anymore. Close() } diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index f91d880bc5f..989963479f4 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -42,16 +42,23 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/config" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/mysqlctlclient" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vterrors" vtenv "vitess.io/vitess/go/vt/env" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + "vitess.io/vitess/go/vt/proto/vtrpc" ) var ( @@ -80,6 +87,9 @@ var ( replicationConnectRetry = 10 * time.Second versionRegex = regexp.MustCompile(`Ver ([0-9]+)\.([0-9]+)\.([0-9]+)`) + + binlogEntryCommittedTimestampRegex = regexp.MustCompile("original_committed_timestamp=([0-9]+)") + binlogEntryTimestampGTIDRegexp = regexp.MustCompile(`^#(.+) server id.*\bGTID\b`) ) // How many bytes from MySQL error log to sample for error messages @@ -145,57 +155,32 @@ func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld { result.appPool.Open(dbcfgs.AppWithDB()) /* - Unmanaged tablets are special because the MYSQL_FLAVOR detection - will not be accurate because the mysqld might not be the same - one as the server started. - - This skips the panic that checks that we can detect a server, - but also relies on none of the flavor detection features being - used at runtime. Currently this assumption is guaranteed true. + If we have an external unmanaged tablet, we can't do the flavor + detection here. We also won't need it, since mysqlctl itself is the only + one that needs capabilities and the flavor. */ if dbconfigs.GlobalDBConfigs.HasGlobalSettings() { log.Info("mysqld is unmanaged or remote. Skipping flavor detection") return result } - version, getErr := GetVersionString() - f, v, err := ParseVersionString(version) /* - By default Vitess searches in vtenv.VtMysqlRoot() for a mysqld binary. - This is historically the VT_MYSQL_ROOT env, but if it is unset or empty, - Vitess will search the PATH. See go/vt/env/env.go. - - A number of subdirs inside vtenv.VtMysqlRoot() will be searched, see - func binaryPath() for context. If no mysqld binary is found (possibly - because it is in a container or both VT_MYSQL_ROOT and VTROOT are set - incorrectly), there will be a fallback to using the MYSQL_FLAVOR env - variable. - - If MYSQL_FLAVOR is not defined, there will be a panic. - - Note: relying on MySQL_FLAVOR is not recommended, since for historical - purposes "MySQL56" actually means MySQL 5.7, which is a very strange - behavior. + If we have a socketFile here, it means we're not running inside mysqlctl. + This means we don't need the flavor and capability detection, since mysqlctl + itself is the only one that needs this. */ + if socketFile != "" { + log.Info("mysqld is remote. Skipping flavor detection") + return result + } - if getErr != nil || err != nil { - f, v, err = GetVersionFromEnv() - if err != nil { - vtenvMysqlRoot, _ := vtenv.VtMysqlRoot() - message := fmt.Sprintf(`could not auto-detect MySQL version. You may need to set your PATH so a mysqld binary can be found, or set the environment variable MYSQL_FLAVOR if mysqld is not available locally: - PATH: %s - VT_MYSQL_ROOT: %s - VTROOT: %s - vtenv.VtMysqlRoot(): %s - MYSQL_FLAVOR: %s - `, - os.Getenv("PATH"), - os.Getenv("VT_MYSQL_ROOT"), - os.Getenv("VTROOT"), - vtenvMysqlRoot, - os.Getenv("MYSQL_FLAVOR")) - panic(message) - } + version, err := GetVersionString() + if err != nil { + failVersionDetection(err) + } + f, v, err := ParseVersionString(version) + if err != nil { + failVersionDetection(err) } log.Infof("Using flavor: %v, version: %v", f, v) @@ -203,31 +188,9 @@ func NewMysqld(dbcfgs *dbconfigs.DBConfigs) *Mysqld { return result } -/* -GetVersionFromEnv returns the flavor and an assumed version based on the legacy -MYSQL_FLAVOR environment variable. - -The assumed version may not be accurate since the legacy variable only specifies -broad families of compatible versions. However, the differences between those -versions should only matter if Vitess is managing the lifecycle of mysqld, in which -case we should have a local copy of the mysqld binary from which we can fetch -the accurate version instead of falling back to this function (see GetVersionString). -*/ -func GetVersionFromEnv() (flavor MySQLFlavor, ver ServerVersion, err error) { - env := os.Getenv("MYSQL_FLAVOR") - switch env { - case "MariaDB": - return FlavorMariaDB, ServerVersion{10, 6, 11}, nil - case "MySQL80": - return FlavorMySQL, ServerVersion{8, 0, 11}, nil - case "MySQL56": - return FlavorMySQL, ServerVersion{5, 7, 10}, nil - } - return flavor, ver, fmt.Errorf("could not determine version from MYSQL_FLAVOR: %s", env) -} - // GetVersionString runs mysqld --version and returns its output as a string func GetVersionString() (string, error) { + noSocketFile() mysqlRoot, err := vtenv.VtMysqlRoot() if err != nil { return "", err @@ -277,7 +240,7 @@ func ParseVersionString(version string) (flavor MySQLFlavor, ver ServerVersion, // RunMysqlUpgrade will run the mysql_upgrade program on the current // install. Will be called only when mysqld is running with no // network and no grant tables. -func (mysqld *Mysqld) RunMysqlUpgrade() error { +func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", socketFile) @@ -286,7 +249,7 @@ func (mysqld *Mysqld) RunMysqlUpgrade() error { return fmt.Errorf("can't dial mysqlctld: %v", err) } defer client.Close() - return client.RunMysqlUpgrade(context.TODO()) + return client.RunMysqlUpgrade(ctx) } if mysqld.capabilities.hasMySQLUpgradeInServer() { @@ -443,7 +406,7 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs .. err := cmd.Wait() log.Infof("%v exit: %v", ts, err) - // The process exited. Trigger OnTerm callbacks, unless we were cancelled. + // The process exited. Trigger OnTerm callbacks, unless we were canceled. select { case <-cancel: default: @@ -614,7 +577,6 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo // If input is not nil, pipe it to the command's stdin. func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd *exec.Cmd, output string, err error) { cmdPath, _ := exec.LookPath(name) - log.Infof("execCmd: %v %v %v", name, cmdPath, args) cmd = exec.Command(cmdPath, args...) cmd.Env = env @@ -625,16 +587,16 @@ func execCmd(name string, args, env []string, dir string, input io.Reader) (cmd out, err := cmd.CombinedOutput() output = string(out) if err != nil { - log.Infof("execCmd: %v failed: %v", name, err) - err = fmt.Errorf("%v: %v, output: %v", name, err, output) + log.Errorf("execCmd: %v failed: %v", name, err) + err = fmt.Errorf("%v: %w, output: %v", name, err, output) } - log.Infof("execCmd: %v output: %v", name, output) return cmd, output, err } // binaryPath does a limited path lookup for a command, // searching only within sbin and bin in the given root. func binaryPath(root, binary string) (string, error) { + noSocketFile() subdirs := []string{"sbin", "bin", "libexec", "scripts"} for _, subdir := range subdirs { binPath := path.Join(root, subdir, binary) @@ -667,7 +629,7 @@ func (mysqld *Mysqld) InitConfig(cnf *Mycnf) error { // generate / configure a my.cnf file install a skeleton database, // and apply the provided initial SQL file. func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string) error { - log.Infof("mysqlctl.Init") + log.Infof("mysqlctl.Init running with contents previously embedded from %s", initDBSQLFile) err := mysqld.InitConfig(cnf) if err != nil { log.Errorf("%s", err.Error()) @@ -695,9 +657,8 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string log.Errorf("failed starting mysqld in time: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) return err } - if initDBSQLFile == "" { // default to built-in - if err := mysqld.executeMysqlScript(params, strings.NewReader(config.DefaultInitDB)); err != nil { + if err := mysqld.executeMysqlScript(ctx, params, config.DefaultInitDB); err != nil { return fmt.Errorf("failed to initialize mysqld: %v", err) } return nil @@ -709,7 +670,11 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string return fmt.Errorf("can't open init_db_sql_file (%v): %v", initDBSQLFile, err) } defer sqlFile.Close() - if err := mysqld.executeMysqlScript(params, sqlFile); err != nil { + script, err := io.ReadAll(sqlFile) + if err != nil { + return fmt.Errorf("can't read init_db_sql_file (%v): %v", initDBSQLFile, err) + } + if err := mysqld.executeMysqlScript(ctx, params, string(script)); err != nil { return fmt.Errorf("can't run init_db_sql_file (%v): %v", initDBSQLFile, err) } return nil @@ -1052,33 +1017,25 @@ func deleteTopDir(dir string) (removalErr error) { return } -// executeMysqlScript executes a .sql script from an io.Reader with the mysql -// command line tool. It uses the connParams as is, not adding credentials. -func (mysqld *Mysqld) executeMysqlScript(connParams *mysql.ConnParams, sql io.Reader) error { - dir, err := vtenv.VtMysqlRoot() - if err != nil { - return err - } - name, err := binaryPath(dir, "mysql") +// executeMysqlScript executes the contents of an SQL script as a string. +// It uses the connParams as is, not adding credentials. +func (mysqld *Mysqld) executeMysqlScript(ctx context.Context, connParams *mysql.ConnParams, sql string) error { + connector := dbconfigs.New(connParams) + conn, err := connector.Connect(ctx) if err != nil { return err } - cnf, err := mysqld.defaultsExtraFile(connParams) - if err != nil { - return err - } - defer os.Remove(cnf) - args := []string{ - "--defaults-extra-file=" + cnf, - "--batch", - } - env, err := buildLdPaths() + defer conn.Close() + + _, more, err := conn.ExecuteFetchMulti(sql, -1, false) if err != nil { return err } - _, _, err = execCmd(name, args, env, dir, sql) - if err != nil { - return err + for more { + _, more, _, err = conn.ReadQueryResult(0, false) + if err != nil { + return err + } } return nil } @@ -1110,7 +1067,7 @@ socket=%v `, connParams.Uname, connParams.Pass, connParams.UnixSocket) } - tmpfile, err := os.CreateTemp("", "example") + tmpfile, err := os.CreateTemp("", "defaults-extra-file-") if err != nil { return "", err } @@ -1177,28 +1134,45 @@ func buildLdPaths() ([]string, error) { return ldPaths, nil } -// GetVersionString is part of the MysqlDeamon interface. -func (mysqld *Mysqld) GetVersionString() string { - return fmt.Sprintf("%d.%d.%d", mysqld.capabilities.version.Major, mysqld.capabilities.version.Minor, mysqld.capabilities.version.Patch) +// GetVersionString is part of the MysqlExecutor interface. +func (mysqld *Mysqld) GetVersionString(ctx context.Context) (string, error) { + // Execute as remote action on mysqlctld to ensure we get the actual running MySQL version. + if socketFile != "" { + client, err := mysqlctlclient.New("unix", socketFile) + if err != nil { + return "", fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.VersionString(ctx) + } + return GetVersionString() } // GetVersionComment gets the version comment. -func (mysqld *Mysqld) GetVersionComment(ctx context.Context) string { +func (mysqld *Mysqld) GetVersionComment(ctx context.Context) (string, error) { qr, err := mysqld.FetchSuperQuery(ctx, "select @@global.version_comment") if err != nil { - return "" + return "", err } if len(qr.Rows) != 1 { - return "" + return "", fmt.Errorf("unexpected result length: %v", len(qr.Rows)) } res := qr.Named().Row() - versionComment, _ := res.ToString("@@global.version_comment") - return versionComment + return res.ToString("@@global.version_comment") } -// applyBinlogFile extracts a binary log file and applies it to MySQL. It is the equivalent of: +// ApplyBinlogFile extracts a binary log file and applies it to MySQL. It is the equivalent of: // $ mysqlbinlog --include-gtids binlog.file | mysql -func (mysqld *Mysqld) applyBinlogFile(binlogFile string, includeGTIDs mysql.GTIDSet) error { +func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { + if socketFile != "" { + log.Infof("executing Mysqld.ApplyBinlogFile() remotely via mysqlctld server: %v", socketFile) + client, err := mysqlctlclient.New("unix", socketFile) + if err != nil { + return fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.ApplyBinlogFile(ctx, req) + } var pipe io.ReadCloser var mysqlbinlogCmd *exec.Cmd var mysqlCmd *exec.Cmd @@ -1217,23 +1191,31 @@ func (mysqld *Mysqld) applyBinlogFile(binlogFile string, includeGTIDs mysql.GTID return err } args := []string{} - if gtids := includeGTIDs.String(); gtids != "" { + if gtids := req.BinlogRestorePosition; gtids != "" { args = append(args, "--include-gtids", gtids, ) } - args = append(args, binlogFile) + if restoreToTimestamp := protoutil.TimeFromProto(req.BinlogRestoreDatetime).UTC(); !restoreToTimestamp.IsZero() { + args = append(args, + "--stop-datetime", + restoreToTimestamp.Format(sqltypes.TimestampFormat), + ) + } + + args = append(args, req.BinlogFileName) mysqlbinlogCmd = exec.Command(name, args...) mysqlbinlogCmd.Dir = dir mysqlbinlogCmd.Env = env - log.Infof("applyBinlogFile: running %#v", mysqlbinlogCmd) + log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd) pipe, err = mysqlbinlogCmd.StdoutPipe() // to be piped into mysql if err != nil { return err } } + var mysqlErrFile *os.File { name, err := binaryPath(dir, "mysql") if err != nil { @@ -1245,30 +1227,235 @@ func (mysqld *Mysqld) applyBinlogFile(binlogFile string, includeGTIDs mysql.GTID } cnf, err := mysqld.defaultsExtraFile(params) if err != nil { - return err + return vterrors.Wrapf(err, "failed to create defaults extra file") } defer os.Remove(cnf) args := []string{ "--defaults-extra-file=" + cnf, } + + mysqlErrFile, err = os.CreateTemp("", "err-mysql-") + if err != nil { + return err + } + defer os.Remove(mysqlErrFile.Name()) + + // We disable super_read_only, in case it is in the default MySQL startup + // parameters. We do it blindly, since this will fail on MariaDB, which doesn't + // have super_read_only This is safe, since we're restarting MySQL after the restore anyway + log.Infof("ApplyBinlogFile: disabling super_read_only") + resetFunc, err := mysqld.SetSuperReadOnly(false) + if err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { + log.Warningf("ApplyBinlogFile: server does not know about super_read_only, continuing anyway...") + } else { + log.Errorf("ApplyBinlogFile: unexpected error while trying to set super_read_only: %v", err) + return err + } + } + if resetFunc != nil { + defer func() { + err := resetFunc() + if err != nil { + log.Error("Not able to set super_read_only to its original value during ApplyBinlogFile.") + } + }() + } + mysqlCmd = exec.Command(name, args...) mysqlCmd.Dir = dir mysqlCmd.Env = env mysqlCmd.Stdin = pipe // piped from mysqlbinlog + + mysqlCmd.Stderr = mysqlErrFile + log.Infof("ApplyBinlogFile: running mysql command: %#v with errfile=%v", mysqlCmd, mysqlErrFile.Name()) } // Run both processes, piped: if err := mysqlbinlogCmd.Start(); err != nil { return err } if err := mysqlCmd.Start(); err != nil { - return err + return vterrors.Wrapf(err, "failed to start mysql") } // Wait for both to complete: if err := mysqlbinlogCmd.Wait(); err != nil { - return err + return vterrors.Wrapf(err, "mysqlbinlog command failed") } if err := mysqlCmd.Wait(); err != nil { - return err + if mysqlErrFile != nil { + errFileContent, _ := os.ReadFile(mysqlErrFile.Name()) + if len(errFileContent) > 0 { + err = vterrors.Wrapf(err, "with error output: %s", string(errFileContent)) + } + } + return vterrors.Wrapf(err, "waiting on mysql command") } return nil } + +// parseBinlogEntryTimestamp attempts to extract a timestamp from a binlog entry. +func parseBinlogEntryTimestamp(logEntry string) (found bool, t time.Time, err error) { + if len(logEntry) == 0 { + return false, t, nil + } + if logEntry[0] != '#' { + return false, t, nil + } + if submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(logEntry); submatch != nil { + // MySQL 8.0 + binlogEntryCommittedTimestamp := submatch[1] + unixMicros, err := strconv.ParseInt(binlogEntryCommittedTimestamp, 10, 64) + if err != nil { + return false, t, err + } + return true, time.UnixMicro(unixMicros), nil + } + if submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(logEntry); submatch != nil { + // MySQL 5.7 + t, err = ParseBinlogTimestamp(submatch[1]) + if err != nil { + return false, t, err + } + return true, t, nil + } + return false, t, nil +} + +// scanBinlogTimestamp invokes a `mysqlbinlog` binary to look for a timestamp in the given binary. The function +// either looks for the first such timestamp or the last. +func (mysqld *Mysqld) scanBinlogTimestamp(mysqlbinlogDir string, mysqlbinlogEnv []string, mysqlbinlogName string, binlogFile string, stopAtFirst bool) (matchedTime time.Time, matchFound bool, err error) { + args := []string{binlogFile} + mysqlbinlogCmd := exec.Command(mysqlbinlogName, args...) + mysqlbinlogCmd.Dir = mysqlbinlogDir + mysqlbinlogCmd.Env = mysqlbinlogEnv + log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd) + pipe, err := mysqlbinlogCmd.StdoutPipe() // to be piped into mysql + if err != nil { + return matchedTime, false, err + } + scanComplete := make(chan error) + intentionalKill := false + scan := func() { + defer close(scanComplete) + defer func() { + intentionalKill = true + mysqlbinlogCmd.Process.Kill() // ensures the binlog file is released + }() + // Read line by line and process it + scanner := bufio.NewScanner(pipe) + for scanner.Scan() { + logEntry := scanner.Text() + + found, t, err := parseBinlogEntryTimestamp(logEntry) + if err != nil { + scanComplete <- err + return + } + if found { + matchedTime = t + matchFound = true + } + if found && stopAtFirst { + // Found the first timestamp and it's all we need. We won't scan any further and so we should also + // kill mysqlbinlog (otherwise it keeps waiting until we've read the entire pipe). + return + } + } + } + if err := mysqlbinlogCmd.Start(); err != nil { + return matchedTime, false, err + } + go scan() + if err := mysqlbinlogCmd.Wait(); err != nil && !intentionalKill { + return matchedTime, false, vterrors.Wrapf(err, "waiting on mysqlbinlog command in ReadBinlogFilesTimestamps") + } + if err := <-scanComplete; err != nil { + return matchedTime, false, vterrors.Wrapf(err, "scanning mysqlbinlog output in ReadBinlogFilesTimestamps ") + } + return matchedTime, matchFound, nil +} + +// ReadBinlogFilesTimestamps reads all given binlog files via `mysqlbinlog` command and returns the first and last found transaction timestamps +func (mysqld *Mysqld) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { + if len(req.BinlogFileNames) == 0 { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty binlog list in ReadBinlogFilesTimestampsRequest") + } + if socketFile != "" { + log.Infof("executing Mysqld.ReadBinlogFilesTimestamps() remotely via mysqlctld server: %v", socketFile) + client, err := mysqlctlclient.New("unix", socketFile) + if err != nil { + return nil, fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.ReadBinlogFilesTimestamps(ctx, req) + } + dir, err := vtenv.VtMysqlRoot() + if err != nil { + return nil, err + } + env, err := buildLdPaths() + if err != nil { + return nil, err + } + mysqlbinlogName, err := binaryPath(dir, "mysqlbinlog") + if err != nil { + return nil, err + } + + resp := &mysqlctlpb.ReadBinlogFilesTimestampsResponse{} + // Find first timestamp + for _, binlogFile := range req.BinlogFileNames { + t, found, err := mysqld.scanBinlogTimestamp(dir, env, mysqlbinlogName, binlogFile, true) + if err != nil { + return nil, err + } + if found { + resp.FirstTimestamp = protoutil.TimeToProto(t) + resp.FirstTimestampBinlog = binlogFile + break + } + } + // Find last timestamp + for i := len(req.BinlogFileNames) - 1; i >= 0; i-- { + binlogFile := req.BinlogFileNames[i] + t, found, err := mysqld.scanBinlogTimestamp(dir, env, mysqlbinlogName, binlogFile, false) + if err != nil { + return nil, err + } + if found { + resp.LastTimestamp = protoutil.TimeToProto(t) + resp.LastTimestampBinlog = binlogFile + break + } + } + return resp, nil +} + +// noSocketFile panics if socketFile is set. This is to prevent +// incorrect use of settings not supported when we're running +// remote through mysqlctl. +func noSocketFile() { + if socketFile != "" { + // We log an error for now until we fix the issue with ApplySchema surfacing in MoveTables. + // See https://github.com/vitessio/vitess/issues/13203 and https://github.com/vitessio/vitess/pull/13178 + // panic("Running remotely through mysqlctl, socketFile must not be set") + log.Warning("Running remotely through mysqlctl and thus socketFile should not be set") + } +} + +func failVersionDetection(err error) { + vtenvMysqlRoot, _ := vtenv.VtMysqlRoot() + message := fmt.Sprintf(`could not auto-detect MySQL version: %v +You may need to set your PATH so a mysqld binary can be found: + PATH: %s + VT_MYSQL_ROOT: %s + VTROOT: %s + vtenv.VtMysqlRoot(): %s + `, + err, + os.Getenv("PATH"), + os.Getenv("VT_MYSQL_ROOT"), + os.Getenv("VTROOT"), + vtenvMysqlRoot) + panic(message) +} diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go index 33fe00727e4..2053c0f0cc9 100644 --- a/go/vt/mysqlctl/mysqld_test.go +++ b/go/vt/mysqlctl/mysqld_test.go @@ -17,8 +17,11 @@ limitations under the License. package mysqlctl import ( - "os" "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type testcase struct { @@ -107,35 +110,71 @@ func TestParseVersionString(t *testing.T) { } -func TestAssumeVersionString(t *testing.T) { +func TestRegexps(t *testing.T) { + { + submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 259 CRC32 0xc07510d0 GTID last_committed=0 sequence_number=1 rbr_only=yes`) + require.NotEmpty(t, submatch) + assert.Equal(t, "230608 13:14:31", submatch[1]) + _, err := ParseBinlogTimestamp(submatch[1]) + assert.NoError(t, err) + } + { + submatch := binlogEntryTimestampGTIDRegexp.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 322 CRC32 0x651af842 Query thread_id=62 exec_time=0 error_code=0`) + assert.Empty(t, submatch) + } + + { + submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(`#230605 16:06:34 server id 22233 end_log_pos 1037 CRC32 0xa4707c5b GTID last_committed=4 sequence_number=5 rbr_only=no original_committed_timestamp=1685970394031366 immediate_commit_timestamp=1685970394032458 transaction_length=186`) + require.NotEmpty(t, submatch) + assert.Equal(t, "1685970394031366", submatch[1]) + } + { + submatch := binlogEntryCommittedTimestampRegex.FindStringSubmatch(`#230608 13:14:31 server id 484362839 end_log_pos 322 CRC32 0x651af842 Query thread_id=62 exec_time=0 error_code=0`) + assert.Empty(t, submatch) + } - // In these cases, the versionstring is nonsensical or unspecified. - // MYSQL_FLAVOR is used instead. +} - var testcases = []testcase{ +func TestParseBinlogEntryTimestamp(t *testing.T) { + tcases := []struct { + name string + entry string + found bool + tm time.Time + }{ { - versionString: "MySQL80", - version: ServerVersion{8, 0, 11}, - flavor: FlavorMySQL, + name: "empty", + entry: "", }, { - versionString: "MySQL56", - version: ServerVersion{5, 7, 10}, // Yes, this has to lie! - flavor: FlavorMySQL, // There was no MySQL57 option + name: "irrelevant", + entry: "/*!80001 SET @@session.original_commit_timestamp=1685970394031366*//*!*/;", }, { - versionString: "MariaDB", - version: ServerVersion{10, 6, 11}, - flavor: FlavorMariaDB, + name: "irrelevant 2", + entry: "#230605 16:06:34 server id 22233 end_log_pos 1139 CRC32 0x9fa6f3c8 Query thread_id=21 exec_time=0 error_code=0", + }, + { + name: "mysql80", + entry: "#230605 16:06:34 server id 22233 end_log_pos 1037 CRC32 0xa4707c5b GTID last_committed=4 sequence_number=5 rbr_only=no original_committed_timestamp=1685970394031366 immediate_commit_timestamp=1685970394032458 transaction_length=186", + found: true, + tm: time.UnixMicro(1685970394031366), + }, + { + name: "mysql57", + entry: "#230608 13:14:31 server id 484362839 end_log_pos 259 CRC32 0xc07510d0 GTID last_committed=0 sequence_number=1 rbr_only=yes", + found: true, + tm: time.Date(2023, time.June, 8, 13, 14, 31, 0, time.UTC), }, } - - for _, testcase := range testcases { - os.Setenv("MYSQL_FLAVOR", testcase.versionString) - f, v, err := GetVersionFromEnv() - if v != testcase.version || f != testcase.flavor || err != nil { - t.Errorf("GetVersionFromEnv() failed for: %#v, Got: %#v, %#v Expected: %#v, %#v", testcase.versionString, v, f, testcase.version, testcase.flavor) - } + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + found, tm, err := parseBinlogEntryTimestamp(tcase.entry) + assert.NoError(t, err) + assert.Equal(t, tcase.found, found) + if tcase.found { + assert.Equal(t, tcase.tm, tm) + } + }) } - } diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index 348600e28ae..ceed3f58e03 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" @@ -38,7 +38,7 @@ func getPoolReconnect(ctx context.Context, pool *dbconnpool.ConnectionPool) (*db // Run a test query to see if this connection is still good. if _, err := conn.ExecuteFetch("SELECT 1", 1, false); err != nil { // If we get a connection error, try to reconnect. - if sqlErr, ok := err.(*mysql.SQLError); ok && (sqlErr.Number() == mysql.CRServerGone || sqlErr.Number() == mysql.CRServerLost) { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && (sqlErr.Number() == sqlerror.CRServerGone || sqlErr.Number() == sqlerror.CRServerLost) { if err := conn.Reconnect(ctx); err != nil { conn.Recycle() return nil, err @@ -134,7 +134,7 @@ func (mysqld *Mysqld) executeFetchContext(ctx context.Context, conn *dbconnpool. default: } - // The context expired or was cancelled. + // The context expired or was canceled. // Try to kill the connection to effectively cancel the ExecuteFetch(). connID := conn.ID() log.Infof("Mysqld.executeFetchContext(): killing connID %v due to timeout of query: %v", connID, query) @@ -147,7 +147,7 @@ func (mysqld *Mysqld) executeFetchContext(ctx context.Context, conn *dbconnpool. // Close the connection. Upon Recycle() it will be thrown out. conn.Close() // ExecuteFetch() may have succeeded before we tried to kill it. - // If ExecuteFetch() had returned because we cancelled it, + // If ExecuteFetch() had returned because we canceled it, // then executeErr would be an error like "MySQL has gone away". if executeErr == nil { return qr, executeErr diff --git a/go/vt/mysqlctl/redo_log.go b/go/vt/mysqlctl/redo_log.go index e29a28ae49f..a0874c7b28d 100644 --- a/go/vt/mysqlctl/redo_log.go +++ b/go/vt/mysqlctl/redo_log.go @@ -21,10 +21,6 @@ import ( "fmt" ) -func (mysqld *Mysqld) BinaryHasDisableRedoLog() bool { - return mysqld.capabilities.hasDisableRedoLog() -} - func (mysqld *Mysqld) DisableRedoLog(ctx context.Context) error { return mysqld.ExecuteSuperQuery(ctx, "ALTER INSTANCE DISABLE INNODB REDO_LOG") } diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 5a25cf8d7e3..b76e342d0cd 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -21,41 +21,43 @@ This file contains the reparenting methods for mysqlctl. */ import ( - "fmt" + "context" "time" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" - - "context" ) -// GenerateInitialBinlogEntry is used to create a binlog entry when a primary comes up and we need to get a -// MySQL position so that we can set it as the starting position for replicas to do MySQL Replication from. +// GenerateInitialBinlogEntry is used to create a binlog entry when +// a primary comes up and we need to get a MySQL position so that we +// can set it as the starting position for replicas to start MySQL +// Replication from. func GenerateInitialBinlogEntry() string { - return sidecardb.CreateSidecarDatabaseQuery + return sidecar.GetCreateQuery() } // PopulateReparentJournal returns the SQL command to use to populate -// the _vt.reparent_journal table, as well as the time_created_ns +// the reparent_journal table, as well as the time_created_ns // value used. -func PopulateReparentJournal(timeCreatedNS int64, actionName, primaryAlias string, pos mysql.Position) string { - posStr := mysql.EncodePosition(pos) - if len(posStr) > mysql.MaximumPositionSize { - posStr = posStr[:mysql.MaximumPositionSize] +func PopulateReparentJournal(timeCreatedNS int64, actionName, primaryAlias string, pos replication.Position) string { + posStr := replication.EncodePosition(pos) + if len(posStr) > replication.MaximumPositionSize { + posStr = posStr[:replication.MaximumPositionSize] } - return fmt.Sprintf("INSERT INTO _vt.reparent_journal "+ + return sqlparser.BuildParsedQuery("INSERT INTO %s.reparent_journal "+ "(time_created_ns, action_name, primary_alias, replication_position) "+ - "VALUES (%v, '%v', '%v', '%v')", - timeCreatedNS, actionName, primaryAlias, posStr) + "VALUES (%d, '%s', '%s', '%s')", sidecar.GetIdentifier(), + timeCreatedNS, actionName, primaryAlias, posStr).Query } // queryReparentJournal returns the SQL query to use to query the database // for a reparent_journal row. func queryReparentJournal(timeCreatedNS int64) string { - return fmt.Sprintf("SELECT action_name, primary_alias, replication_position FROM _vt.reparent_journal WHERE time_created_ns=%v", timeCreatedNS) + return sqlparser.BuildParsedQuery("SELECT action_name, primary_alias, replication_position FROM %s.reparent_journal WHERE time_created_ns=%d", + sidecar.GetIdentifier(), timeCreatedNS).Query } // WaitForReparentJournal will wait until the context is done for @@ -83,11 +85,11 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS } // Promote will promote this server to be the new primary. -func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, error) { +func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (replication.Position, error) { ctx := context.TODO() conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -104,7 +106,7 @@ func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (mysql.Position, e } if err := mysqld.executeSuperQueryListConn(ctx, conn, cmds); err != nil { - return mysql.Position{}, err + return replication.Position{}, err } return conn.PrimaryPosition() } diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 3a4aee6e063..2b92f5d961d 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -29,14 +29,14 @@ import ( "strings" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" ) +type ResetSuperReadOnlyFunc func() error + // WaitForReplicationStart waits until the deadline for replication to start. // This validates the current primary is correct and can be connected to. func WaitForReplicationStart(mysqld MysqlDaemon, replicaStartDeadline int) error { @@ -85,7 +85,7 @@ func (mysqld *Mysqld) StartReplication(hookExtraEnv map[string]string) error { } // StartReplicationUntilAfter starts replication until replication has come to `targetPos`, then it stops replication -func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -98,7 +98,7 @@ func (mysqld *Mysqld) StartReplicationUntilAfter(ctx context.Context, targetPos } // StartSQLThreadUntilAfter starts replication's SQL thread(s) until replication has come to `targetPos`, then it stops it -func (mysqld *Mysqld) StartSQLThreadUntilAfter(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) StartSQLThreadUntilAfter(ctx context.Context, targetPos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -181,7 +181,7 @@ func (mysqld *Mysqld) GetMysqlPort() (int32, error) { if len(qr.Rows) != 1 { return 0, errors.New("no port variable in mysql") } - utemp, err := evalengine.ToUint64(qr.Rows[0][1]) + utemp, err := qr.Rows[0][1].ToCastUint64() if err != nil { return 0, err } @@ -197,7 +197,7 @@ func (mysqld *Mysqld) GetServerID(ctx context.Context) (uint32, error) { if len(qr.Rows) != 1 { return 0, errors.New("no server_id in mysql") } - utemp, err := evalengine.ToUint64(qr.Rows[0][0]) + utemp, err := qr.Rows[0][0].ToCastUint64() if err != nil { return 0, err } @@ -230,6 +230,22 @@ func (mysqld *Mysqld) IsReadOnly() (bool, error) { return false, nil } +// IsSuperReadOnly return true if the instance is super read only +func (mysqld *Mysqld) IsSuperReadOnly() (bool, error) { + qr, err := mysqld.FetchSuperQuery(context.TODO(), "SELECT @@global.super_read_only") + if err != nil { + return false, err + } + if err == nil && len(qr.Rows) == 1 { + sro := qr.Rows[0][0].ToString() + if sro == "1" || sro == "ON" { + return true, nil + } + } + + return false, nil +} + // SetReadOnly set/unset the read_only flag func (mysqld *Mysqld) SetReadOnly(on bool) error { // temp logging, to be removed in v17 @@ -240,8 +256,7 @@ func (mysqld *Mysqld) SetReadOnly(on bool) error { case true: newState = "ReadOnly" } - log.Infof("SetReadOnly setting connection setting of %s:%d to : %s", - mysqld.dbcfgs.Host, mysqld.dbcfgs.Port, newState) + log.Infof("SetReadOnly setting to : %s", newState) query := "SET GLOBAL read_only = " if on { @@ -252,19 +267,56 @@ func (mysqld *Mysqld) SetReadOnly(on bool) error { return mysqld.ExecuteSuperQuery(context.TODO(), query) } -// SetSuperReadOnly set/unset the super_read_only flag -func (mysqld *Mysqld) SetSuperReadOnly(on bool) error { +// SetSuperReadOnly set/unset the super_read_only flag. +// Returns a function which is called to set super_read_only back to its original value. +func (mysqld *Mysqld) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) { + // return function for switching `OFF` super_read_only + var resetFunc ResetSuperReadOnlyFunc + var disableFunc = func() error { + query := "SET GLOBAL super_read_only = 'OFF'" + err := mysqld.ExecuteSuperQuery(context.Background(), query) + return err + } + + // return function for switching `ON` super_read_only. + var enableFunc = func() error { + query := "SET GLOBAL super_read_only = 'ON'" + err := mysqld.ExecuteSuperQuery(context.Background(), query) + return err + } + + superReadOnlyEnabled, err := mysqld.IsSuperReadOnly() + if err != nil { + return nil, err + } + + // If non-idempotent then set the right call-back. + // We are asked to turn on super_read_only but original value is false, + // therefore return disableFunc, that can be used as defer by caller. + if on && !superReadOnlyEnabled { + resetFunc = disableFunc + } + // We are asked to turn off super_read_only but original value is true, + // therefore return enableFunc, that can be used as defer by caller. + if !on && superReadOnlyEnabled { + resetFunc = enableFunc + } + query := "SET GLOBAL super_read_only = " if on { - query += "ON" + query += "'ON'" } else { - query += "OFF" + query += "'OFF'" } - return mysqld.ExecuteSuperQuery(context.TODO(), query) + if err := mysqld.ExecuteSuperQuery(context.Background(), query); err != nil { + return nil, err + } + + return resetFunc, nil } // WaitSourcePos lets replicas wait to given replication position -func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Position) error { +func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos replication.Position) error { // Get a connection. conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { @@ -276,7 +328,7 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio // unless that flavor is also filePos. waitCommandName := "WaitUntilPositionCommand" var query string - if targetPos.MatchesFlavor(mysql.FilePosFlavorID) { + if targetPos.MatchesFlavor(replication.FilePosFlavorID) { // If we are the primary, WaitUntilFilePositionCommand will fail. // But position is most likely reached. So, check the position // first. @@ -332,10 +384,10 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos mysql.Positio } // ReplicationStatus returns the server replication status -func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { +func (mysqld *Mysqld) ReplicationStatus() (replication.ReplicationStatus, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { - return mysql.ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } defer conn.Recycle() @@ -343,10 +395,10 @@ func (mysqld *Mysqld) ReplicationStatus() (mysql.ReplicationStatus, error) { } // PrimaryStatus returns the primary replication statuses -func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, error) { +func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } defer conn.Recycle() @@ -354,10 +406,10 @@ func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (mysql.PrimaryStatus, e } // GetGTIDPurged returns the gtid purged statuses -func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (mysql.Position, error) { +func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (replication.Position, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -365,10 +417,10 @@ func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (mysql.Position, error) } // PrimaryPosition returns the primary replication position. -func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { +func (mysqld *Mysqld) PrimaryPosition() (replication.Position, error) { conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Recycle() @@ -377,7 +429,7 @@ func (mysqld *Mysqld) PrimaryPosition() (mysql.Position, error) { // SetReplicationPosition sets the replication position at which the replica will resume // when its replication is started. -func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos mysql.Position) error { +func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos replication.Position) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -402,18 +454,10 @@ func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, por } defer conn.Recycle() - cmds := []string{} + var cmds []string if stopReplicationBefore { cmds = append(cmds, conn.StopReplicationCommand()) } - // Reset replication parameters commands makes the instance forget the source host port - // This is required because sometimes MySQL gets stuck due to improper initialization of - // master info structure or related failures and throws errors like - // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log - // These errors can only be resolved by resetting the replication parameters, otherwise START SLAVE fails. - // Therefore, we have elected to always reset the replication parameters whenever we try to set the source host port - // Since there is no real overhead, but it makes this function robust enough to also handle failures like these. - cmds = append(cmds, conn.ResetReplicationParametersCommands()...) smc := conn.SetReplicationSourceCommand(params, host, port, int(replicationConnectRetry.Seconds())) cmds = append(cmds, smc) if startReplicationAfter { @@ -501,54 +545,6 @@ func FindReplicas(mysqld MysqlDaemon) ([]string, error) { return addrs, nil } -// EnableBinlogPlayback prepares the server to play back events from a binlog stream. -// Whatever it does for a given flavor, it must be idempotent. -func (mysqld *Mysqld) EnableBinlogPlayback() error { - // Get a connection. - conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) - if err != nil { - return err - } - defer conn.Recycle() - - // See if we have a command to run, and run it. - cmd := conn.EnableBinlogPlaybackCommand() - if cmd == "" { - return nil - } - if err := mysqld.ExecuteSuperQuery(context.TODO(), cmd); err != nil { - log.Errorf("EnableBinlogPlayback: cannot run query '%v': %v", cmd, err) - return fmt.Errorf("EnableBinlogPlayback: cannot run query '%v': %v", cmd, err) - } - - log.Info("EnableBinlogPlayback: successfully ran %v", cmd) - return nil -} - -// DisableBinlogPlayback returns the server to the normal state after streaming. -// Whatever it does for a given flavor, it must be idempotent. -func (mysqld *Mysqld) DisableBinlogPlayback() error { - // Get a connection. - conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) - if err != nil { - return err - } - defer conn.Recycle() - - // See if we have a command to run, and run it. - cmd := conn.DisableBinlogPlaybackCommand() - if cmd == "" { - return nil - } - if err := mysqld.ExecuteSuperQuery(context.TODO(), cmd); err != nil { - log.Errorf("DisableBinlogPlayback: cannot run query '%v': %v", cmd, err) - return fmt.Errorf("DisableBinlogPlayback: cannot run query '%v': %v", cmd, err) - } - - log.Info("DisableBinlogPlayback: successfully ran '%v'", cmd) - return nil -} - // GetBinlogInformation gets the binlog format, whether binlog is enabled and if updates on replica logging is enabled. func (mysqld *Mysqld) GetBinlogInformation(ctx context.Context) (string, bool, bool, string, error) { qr, err := mysqld.FetchSuperQuery(ctx, "select @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates, @@global.binlog_row_image") @@ -657,8 +653,8 @@ func (mysqld *Mysqld) SemiSyncEnabled() (primary, replica bool) { if err != nil { return false, false } - primary = (vars["rpl_semi_sync_master_enabled"] == "ON") - replica = (vars["rpl_semi_sync_slave_enabled"] == "ON") + primary = vars["rpl_semi_sync_master_enabled"] == "ON" + replica = vars["rpl_semi_sync_slave_enabled"] == "ON" return primary, replica } @@ -712,3 +708,16 @@ func (mysqld *Mysqld) SemiSyncReplicationStatus() (bool, error) { } return false, nil } + +// SemiSyncExtensionLoaded returns whether semi-sync plugins are loaded. +func (mysqld *Mysqld) SemiSyncExtensionLoaded() (bool, error) { + qr, err := mysqld.FetchSuperQuery(context.Background(), "SELECT COUNT(*) > 0 AS plugin_loaded FROM information_schema.plugins WHERE plugin_name LIKE 'rpl_semi_sync%'") + if err != nil { + return false, err + } + pluginPresent, err := qr.Rows[0][0].ToBool() + if err != nil { + return false, err + } + return pluginPresent, nil +} diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 518e9bf3ab5..397668145ef 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -22,38 +22,52 @@ import ( "regexp" "sort" "strings" - "sync" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "golang.org/x/sync/errgroup" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" - querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + // In a local environment and without latency, we have seen that an unbounded concurrency still translates to less than + // 20 concurrent MySQL connections. Which is why placing a limit of 20 concurrent goroutines (each mapped to a MySQL connection) + // is unlikely to affect optimal environments. + // In high latency environments, unbounded concurrency can translate to a very high number of concurrent MySQL connections. This + // is an undesirable behavior. We prefer to push back on GetSchema and make it run over longer time, instead. + getSchemaConcurrency = 20 ) var autoIncr = regexp.MustCompile(` AUTO_INCREMENT=\d+`) -// executeSchemaCommands executes some SQL commands, using the mysql -// command line tool. It uses the dba connection parameters, with credentials. -func (mysqld *Mysqld) executeSchemaCommands(sql string) error { +type EmptyColumnsErr struct { + dbName, tableName, query string +} + +func (e EmptyColumnsErr) Error() string { + return fmt.Sprintf("unable to get columns for table %s.%s using query %s", e.dbName, e.tableName, e.query) +} + +// executeSchemaCommands executes some SQL commands. It uses the dba connection parameters, with credentials. +func (mysqld *Mysqld) executeSchemaCommands(ctx context.Context, sql string) error { params, err := mysqld.dbcfgs.DbaConnector().MysqlParams() if err != nil { return err } - - return mysqld.executeMysqlScript(params, strings.NewReader(sql)) + return mysqld.executeMysqlScript(ctx, params, sql) } -func encodeTableName(tableName string) string { +func encodeEntityName(name string) string { var buf strings.Builder - sqltypes.NewVarChar(tableName).EncodeSQL(&buf) + sqltypes.NewVarChar(name).EncodeSQL(&buf) return buf.String() } @@ -65,7 +79,7 @@ func tableListSQL(tables []string) (string, error) { encodedTables := make([]string, len(tables)) for i, tableName := range tables { - encodedTables[i] = encodeTableName(tableName) + encodedTables[i] = encodeEntityName(tableName) } return "(" + strings.Join(encodedTables, ", ") + ")", nil @@ -95,49 +109,59 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab ctx, cancel := context.WithCancel(ctx) defer cancel() - var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(getSchemaConcurrency) + // Get per-table schema concurrently. tableNames := make([]string, 0, len(tds)) for _, td := range tds { tableNames = append(tableNames, td.Name) + td := td - wg.Add(1) - go func(td *tabletmanagerdatapb.TableDefinition) { - defer wg.Done() - + eg.Go(func() error { fields, columns, schema, err := mysqld.collectSchema(ctx, dbName, td.Name, td.Type, request.TableSchemaOnly) if err != nil { + // There's a possible race condition: it could happen that a table was dropped in between reading + // the list of tables (collectBasicTableData(), earlier) and the point above where we investigate + // the table. + // This is fine. We identify the situation and keep the table without any fields/columns/key information + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNoSuchTable { + return nil + } + allErrors.RecordError(err) cancel() - return + return err } td.Fields = fields td.Columns = columns td.Schema = schema - }(td) + return nil + }) } - // Get primary columns concurrently. colMap := map[string][]string{} - if len(tableNames) > 0 { - wg.Add(1) - go func() { - defer wg.Done() - + // Get primary columns concurrently. + // The below runs a single query on `INFORMATION_SCHEMA` and does not interact with the actual tables. + // It is therefore safe to run even if some tables are dropped in the interim. + if len(tableNames) > 0 && !request.TableSchemaOnly { + eg.Go(func() error { var err error colMap, err = mysqld.getPrimaryKeyColumns(ctx, dbName, tableNames...) if err != nil { allErrors.RecordError(err) cancel() - return + return err } - }() + return nil + }) } - wg.Wait() + eg.Wait() if err := allErrors.AggrError(vterrors.Aggregate); err != nil { return nil, err } @@ -147,8 +171,6 @@ func (mysqld *Mysqld) GetSchema(ctx context.Context, dbName string, request *tab } sd.TableDefinitions = tds - - tmutils.GenerateSchemaVersion(sd) return sd, nil } @@ -184,7 +206,7 @@ func (mysqld *Mysqld) collectBasicTableData(ctx context.Context, dbName string, var dataLength uint64 if !row[2].IsNull() { // dataLength is NULL for views, then we use 0 - dataLength, err = evalengine.ToUint64(row[2]) + dataLength, err = row[2].ToCastUint64() if err != nil { return nil, err } @@ -193,7 +215,7 @@ func (mysqld *Mysqld) collectBasicTableData(ctx context.Context, dbName string, // get row count var rowCount uint64 if !row[3].IsNull() { - rowCount, err = evalengine.ToUint64(row[3]) + rowCount, err = row[3].ToCastUint64() if err != nil { return nil, err } @@ -270,26 +292,30 @@ func ResolveTables(ctx context.Context, mysqld MysqlDaemon, dbName string, table const ( GetColumnNamesQuery = `SELECT COLUMN_NAME as column_name FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_SCHEMA = %s AND TABLE_NAME = '%s' + WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s ORDER BY ORDINAL_POSITION` GetFieldsQuery = "SELECT %s FROM %s WHERE 1 != 1" ) +// GetColumnsList returns the column names for a given table/view, using a query generating function. +// Returned values: +// - selectColumns: a string of comma delimited qualified names to be used in a SELECT query. e.g. "`id`, `name`, `val`" +// - err: error func GetColumnsList(dbName, tableName string, exec func(string, int, bool) (*sqltypes.Result, error)) (string, error) { var dbName2 string if dbName == "" { dbName2 = "database()" } else { - dbName2 = fmt.Sprintf("'%s'", dbName) + dbName2 = encodeEntityName(dbName) } - query := fmt.Sprintf(GetColumnNamesQuery, dbName2, sqlescape.UnescapeID(tableName)) + query := fmt.Sprintf(GetColumnNamesQuery, dbName2, encodeEntityName(sqlescape.UnescapeID(tableName))) qr, err := exec(query, -1, true) if err != nil { return "", err } if qr == nil || len(qr.Rows) == 0 { - err = fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query) - log.Errorf("%s", fmt.Errorf("unable to get columns for table %s.%s using query %s", dbName, tableName, query)) + err := &EmptyColumnsErr{dbName: dbName, tableName: tableName, query: query} + log.Error(err.Error()) return "", err } selectColumns := "" @@ -367,9 +393,9 @@ func (mysqld *Mysqld) getPrimaryKeyColumns(ctx context.Context, dbName string, t sql := ` SELECT TABLE_NAME as table_name, COLUMN_NAME as column_name FROM information_schema.STATISTICS - WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' + WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' ORDER BY table_name, SEQ_IN_INDEX` - sql = fmt.Sprintf(sql, dbName, tableList) + sql = fmt.Sprintf(sql, encodeEntityName(dbName), tableList) qr, err := conn.ExecuteFetch(sql, len(tables)*100, true) if err != nil { return nil, err @@ -417,7 +443,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, initialCopySQL += s + ";\n" } } - if err = mysqld.executeSchemaCommands(initialCopySQL); err != nil { + if err = mysqld.executeSchemaCommands(ctx, initialCopySQL); err != nil { return nil, err } @@ -433,7 +459,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, sql := "SET sql_log_bin = 0;\n" sql += "USE _vt_preflight;\n" sql += change - if err = mysqld.executeSchemaCommands(sql); err != nil { + if err = mysqld.executeSchemaCommands(ctx, sql); err != nil { return nil, err } @@ -449,7 +475,7 @@ func (mysqld *Mysqld) PreflightSchemaChange(ctx context.Context, dbName string, // and clean up the extra database dropSQL := "SET sql_log_bin = 0;\n" dropSQL += "DROP DATABASE _vt_preflight;\n" - if err = mysqld.executeSchemaCommands(dropSQL); err != nil { + if err = mysqld.executeSchemaCommands(ctx, dropSQL); err != nil { return nil, err } @@ -509,7 +535,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan // execute the schema change using an external mysql process // (to benefit from the extra commands in mysql cli) - if err = mysqld.executeSchemaCommands(sql); err != nil { + if err = mysqld.executeSchemaCommands(ctx, sql); err != nil { return nil, err } @@ -589,16 +615,18 @@ func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName END ) AS type_cost, COUNT(stats.COLUMN_NAME) AS col_count FROM information_schema.STATISTICS AS stats INNER JOIN information_schema.COLUMNS AS cols ON stats.TABLE_SCHEMA = cols.TABLE_SCHEMA AND stats.TABLE_NAME = cols.TABLE_NAME AND stats.COLUMN_NAME = cols.COLUMN_NAME - WHERE stats.TABLE_SCHEMA = '%s' AND stats.TABLE_NAME = '%s' AND stats.INDEX_NAME NOT IN + WHERE stats.TABLE_SCHEMA = %s AND stats.TABLE_NAME = %s AND stats.INDEX_NAME NOT IN ( SELECT DISTINCT INDEX_NAME FROM information_schema.STATISTICS - WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND (NON_UNIQUE = 1 OR NULLABLE = 'YES') + WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s AND (NON_UNIQUE = 1 OR NULLABLE = 'YES') ) GROUP BY INDEX_NAME ORDER BY type_cost ASC, col_count ASC LIMIT 1 ) AS pke ON index_cols.INDEX_NAME = pke.INDEX_NAME - WHERE index_cols.TABLE_SCHEMA = '%s' AND index_cols.TABLE_NAME = '%s' AND NON_UNIQUE = 0 AND NULLABLE != 'YES' + WHERE index_cols.TABLE_SCHEMA = %s AND index_cols.TABLE_NAME = %s AND NON_UNIQUE = 0 AND NULLABLE != 'YES' ORDER BY SEQ_IN_INDEX ASC` - sql = fmt.Sprintf(sql, dbName, table, dbName, table, dbName, table) + encodedDbName := encodeEntityName(dbName) + encodedTable := encodeEntityName(table) + sql = fmt.Sprintf(sql, encodedDbName, encodedTable, encodedDbName, encodedTable, encodedDbName, encodedTable) qr, err := conn.ExecuteFetch(sql, 1000, true) if err != nil { return nil, err diff --git a/go/vt/mysqlctl/schema_test.go b/go/vt/mysqlctl/schema_test.go index 5ec02be9960..fb64f8ca8ee 100644 --- a/go/vt/mysqlctl/schema_test.go +++ b/go/vt/mysqlctl/schema_test.go @@ -15,7 +15,7 @@ var queryMap map[string]*sqltypes.Result func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, error) { queryMap = make(map[string]*sqltypes.Result) - getColsQuery := fmt.Sprintf(GetColumnNamesQuery, "'test'", "t1") + getColsQuery := fmt.Sprintf(GetColumnNamesQuery, "'test'", "'t1'") queryMap[getColsQuery] = &sqltypes.Result{ Fields: []*querypb.Field{{ Name: "column_name", @@ -40,7 +40,7 @@ func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, err Type: sqltypes.VarBinary, }}, } - getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "t2") + getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "'t2'") queryMap[getColsQuery] = &sqltypes.Result{ Fields: []*querypb.Field{{ Name: "column_name", @@ -61,6 +61,29 @@ func mockExec(query string, maxRows int, wantFields bool) (*sqltypes.Result, err if ok { return result, nil } + + getColsQuery = fmt.Sprintf(GetColumnNamesQuery, "database()", "'with \\' quote'") + queryMap[getColsQuery] = &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "column_name", + Type: sqltypes.VarChar, + }}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarChar("col1")}, + }, + } + + queryMap["SELECT `col1` FROM `with ' quote` WHERE 1 != 1"] = &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: sqltypes.VarChar, + }}, + } + result, ok = queryMap[query] + if ok { + return result, nil + } + return nil, fmt.Errorf("query %s not found in mock setup", query) } @@ -74,4 +97,9 @@ func TestColumnList(t *testing.T) { fields, _, err = GetColumns("", "t2", mockExec) require.NoError(t, err) require.Equal(t, `[name:"col1" type:VARCHAR]`, fmt.Sprintf("%+v", fields)) + + fields, _, err = GetColumns("", "with ' quote", mockExec) + require.NoError(t, err) + require.Equal(t, `[name:"col1" type:VARCHAR]`, fmt.Sprintf("%+v", fields)) + } diff --git a/go/vt/mysqlctl/tmutils/schema.go b/go/vt/mysqlctl/tmutils/schema.go index 41842d40c07..aae529f89b0 100644 --- a/go/vt/mysqlctl/tmutils/schema.go +++ b/go/vt/mysqlctl/tmutils/schema.go @@ -17,8 +17,6 @@ limitations under the License. package tmutils import ( - "crypto/md5" - "encoding/hex" "fmt" "regexp" "strings" @@ -179,7 +177,7 @@ func (f *TableFilter) Includes(tableName string, tableType string) bool { // (tables), no denied tables (excludeTables) and optionally // views (includeViews). func FilterTables(sd *tabletmanagerdatapb.SchemaDefinition, tables, excludeTables []string, includeViews bool) (*tabletmanagerdatapb.SchemaDefinition, error) { - copy := proto.Clone(sd).(*tabletmanagerdatapb.SchemaDefinition) + copy := sd.CloneVT() copy.TableDefinitions = make([]*tabletmanagerdatapb.TableDefinition, 0, len(sd.TableDefinitions)) f, err := NewTableFilter(tables, excludeTables, includeViews) @@ -192,27 +190,9 @@ func FilterTables(sd *tabletmanagerdatapb.SchemaDefinition, tables, excludeTable copy.TableDefinitions = append(copy.TableDefinitions, table) } } - - // Regenerate hash over tables because it may have changed. - if copy.Version != "" { - GenerateSchemaVersion(copy) - } - return copy, nil } -// GenerateSchemaVersion return a unique schema version string based on -// its TableDefinitions. -func GenerateSchemaVersion(sd *tabletmanagerdatapb.SchemaDefinition) { - hasher := md5.New() - for _, td := range sd.TableDefinitions { - if _, err := hasher.Write([]byte(td.Schema)); err != nil { - panic(err) // extremely unlikely - } - } - sd.Version = hex.EncodeToString(hasher.Sum(nil)) -} - // SchemaDefinitionGetTable returns TableDefinition for a given table name. func SchemaDefinitionGetTable(sd *tabletmanagerdatapb.SchemaDefinition, table string) (td *tabletmanagerdatapb.TableDefinition, ok bool) { for _, td := range sd.TableDefinitions { diff --git a/go/vt/mysqlctl/tmutils/schema_test.go b/go/vt/mysqlctl/tmutils/schema_test.go index 472093cb869..0f3d9572107 100644 --- a/go/vt/mysqlctl/tmutils/schema_test.go +++ b/go/vt/mysqlctl/tmutils/schema_test.go @@ -234,7 +234,7 @@ func TestSchemaDiff(t *testing.T) { }) testDiff(t, sd4, sd5, "sd4", "sd5", []string{ - fmt.Sprintf("schemas differ on table type for table table2:\nsd4: VIEW\n differs from:\nsd5: BASE TABLE"), //nolint + "schemas differ on table type for table table2:\nsd4: VIEW\n differs from:\nsd5: BASE TABLE", }) sd1.DatabaseSchema = "CREATE DATABASE {{.DatabaseName}}" @@ -551,23 +551,6 @@ func TestFilterTables(t *testing.T) { }, }, }, - { - desc: "update schema version hash when list of tables has changed", - input: &tabletmanagerdatapb.SchemaDefinition{ - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - basicTable1, - basicTable2, - }, - Version: "dummy-version", - }, - excludeTables: []string{basicTable1.Name}, - want: &tabletmanagerdatapb.SchemaDefinition{ - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - basicTable2, - }, - Version: "6d1d294def9febdb21b35dd19a1dd4c6", - }, - }, { desc: "invalid regex for tables returns an error", input: &tabletmanagerdatapb.SchemaDefinition{ diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index f2bb50954dd..d11699167d9 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -32,9 +32,11 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/ioutil" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" @@ -70,9 +72,6 @@ const ( xtrabackupBinaryName = "xtrabackup" xtrabackupEngineName = "xtrabackup" xbstream = "xbstream" - - // closeTimeout is the timeout for closing backup files after writing. - closeTimeout = 10 * time.Minute ) // xtraBackupManifest represents a backup. @@ -103,6 +102,20 @@ type xtraBackupManifest struct { // false for backups that were created before the field existed, and those // backups all had compression enabled. SkipCompress bool + + // When CompressionEngine is "external", ExternalDecompressor may be + // consulted for the external decompressor command. + // + // When taking a backup with --compression-engine=external, + // ExternalDecompressor will be set to the value of + // --manifest-external-decompressor, if set, or else left as an empty + // string. + // + // When restoring from a backup with CompressionEngine "external", + // --external-decompressor will be consulted first and, if that is not set, + // ExternalDecompressor will be used. If neither are set, the restore will + // abort. + ExternalDecompressor string } func init() { @@ -153,10 +166,18 @@ func closeFile(wc io.WriteCloser, fileName string, logger logutil.Logger, finalE } } -// ExecuteBackup returns a boolean that indicates if the backup is usable, -// and an overall error. -func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { +// ExecuteBackup runs a backup based on given params. This could be a full or incremental backup. +// The function returns a boolean that indicates if the backup is usable, and an overall error. +func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { + params.Logger.Infof("Executing Backup at %v for keyspace/shard %v/%v on tablet %v, concurrency: %v, compress: %v, incrementalFromPos: %v", + params.BackupTime, params.Keyspace, params.Shard, params.TabletAlias, params.Concurrency, backupStorageCompress, params.IncrementalFromPos) + + return be.executeFullBackup(ctx, params, bh) +} +// executeFullBackup returns a boolean that indicates if the backup is usable, +// and an overall error. +func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { if params.IncrementalFromPos != "" { return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.") } @@ -188,6 +209,11 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara return false, vterrors.Wrap(err, "can't get server uuid") } + mysqlVersion, err := params.Mysqld.GetVersionString(ctx) + if err != nil { + return false, vterrors.Wrap(err, "can't get MySQL version") + } + flavor := pos.GTIDSet.Flavor() params.Logger.Infof("Detected MySQL flavor: %v", flavor) @@ -218,14 +244,19 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara bm := &xtraBackupManifest{ // Common base fields BackupManifest: BackupManifest{ - BackupMethod: xtrabackupEngineName, - Position: replicationPosition, - ServerUUID: serverUUID, - TabletAlias: params.TabletAlias, - Keyspace: params.Keyspace, - Shard: params.Shard, - BackupTime: params.BackupTime.UTC().Format(time.RFC3339), - FinishedTime: time.Now().UTC().Format(time.RFC3339), + BackupMethod: xtrabackupEngineName, + Position: replicationPosition, + PurgedPosition: replicationPosition, + ServerUUID: serverUUID, + TabletAlias: params.TabletAlias, + Keyspace: params.Keyspace, + Shard: params.Shard, + BackupTime: FormatRFC3339(params.BackupTime.UTC()), + FinishedTime: FormatRFC3339(time.Now().UTC()), + MySQLVersion: mysqlVersion, + // xtrabackup backups are always created such that they + // are safe to use for upgrades later on. + UpgradeSafe: true, }, // XtraBackup-specific fields @@ -236,7 +267,8 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara NumStripes: int32(numStripes), StripeBlockSize: int32(xtrabackupStripeBlockSize), // builtin specific field - CompressionEngine: CompressionEngineName, + CompressionEngine: CompressionEngineName, + ExternalDecompressor: ManifestExternalDecompressorCmd, } data, err := json.MarshalIndent(bm, "", " ") @@ -251,7 +283,14 @@ func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupPara return true, nil } -func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, backupFileName string, numStripes int, flavor string) (replicationPosition mysql.Position, finalErr error) { +func (be *XtrabackupEngine) backupFiles( + ctx context.Context, + params BackupParams, + bh backupstorage.BackupHandle, + backupFileName string, + numStripes int, + flavor string, +) (replicationPosition replication.Position, finalErr error) { backupProgram := path.Join(xtrabackupEnginePath, xtrabackupBinaryName) flagsToExec := []string{"--defaults-file=" + params.Cnf.Path, @@ -323,7 +362,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams destWriters := []io.Writer{} destBuffers := []*bufio.Writer{} - destCompressors := []io.WriteCloser{} + destCompressors := []io.Closer{} for _, file := range destFiles { buffer := bufio.NewWriterSize(file, writerBufferSize) destBuffers = append(destBuffers, buffer) @@ -343,7 +382,7 @@ func (be *XtrabackupEngine) backupFiles(ctx context.Context, params BackupParams } writer = compressor - destCompressors = append(destCompressors, compressor) + destCompressors = append(destCompressors, ioutil.NewTimeoutCloser(ctx, compressor, closeTimeout)) } destWriters = append(destWriters, writer) @@ -591,7 +630,7 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log }() srcReaders := []io.Reader{} - srcDecompressors := []io.ReadCloser{} + srcDecompressors := []io.Closer{} for _, file := range srcFiles { reader := io.Reader(file) @@ -604,9 +643,13 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log // then we assign the default value of compressionEngine. deCompressionEngine = PgzipCompressor } - if ExternalDecompressorCmd != "" { + externalDecompressorCmd := ExternalDecompressorCmd + if externalDecompressorCmd == "" && bm.ExternalDecompressor != "" { + externalDecompressorCmd = bm.ExternalDecompressor + } + if externalDecompressorCmd != "" { if deCompressionEngine == ExternalCompressor { - deCompressionEngine = ExternalDecompressorCmd + deCompressionEngine = externalDecompressorCmd decompressor, err = newExternalDecompressor(ctx, deCompressionEngine, reader, logger) } else { decompressor, err = newBuiltinDecompressor(deCompressionEngine, reader, logger) @@ -620,7 +663,7 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log if err != nil { return vterrors.Wrap(err, "can't create decompressor") } - srcDecompressors = append(srcDecompressors, decompressor) + srcDecompressors = append(srcDecompressors, ioutil.NewTimeoutCloser(ctx, decompressor, closeTimeout)) reader = decompressor } @@ -709,10 +752,10 @@ func (be *XtrabackupEngine) extractFiles(ctx context.Context, logger logutil.Log var xtrabackupReplicationPositionRegexp = regexp.MustCompile(`GTID of the last change '([^']*)'`) -func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql.Position, error) { +func findReplicationPosition(input, flavor string, logger logutil.Logger) (replication.Position, error) { match := xtrabackupReplicationPositionRegexp.FindStringSubmatch(input) if match == nil || len(match) != 2 { - return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "couldn't find replication position in xtrabackup stderr output") } position := match[1] // Remove all spaces, tabs, and newlines. @@ -721,13 +764,13 @@ func findReplicationPosition(input, flavor string, logger logutil.Logger) (mysql position = strings.Replace(position, "\n", "", -1) logger.Infof("Found position: %v", position) if position == "" { - return mysql.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty replication position from xtrabackup") } // flavor is required to parse a string into a mysql.Position - replicationPosition, err := mysql.ParsePosition(flavor, position) + replicationPosition, err := replication.ParsePosition(flavor, position) if err != nil { - return mysql.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) + return replication.Position{}, vterrors.Wrapf(err, "can't parse replication position from xtrabackup: %v", position) } return replicationPosition, nil } @@ -908,7 +951,7 @@ func stripeReader(readers []io.Reader, blockSize int64) io.Reader { // ShouldDrainForBackup satisfies the BackupEngine interface // xtrabackup can run while tablet is serving, hence false -func (be *XtrabackupEngine) ShouldDrainForBackup() bool { +func (be *XtrabackupEngine) ShouldDrainForBackup(req *tabletmanagerdatapb.BackupRequest) bool { return false } diff --git a/go/vt/mysqlctl/xtrabackupengine_test.go b/go/vt/mysqlctl/xtrabackupengine_test.go index 26e53c6c949..7a829ce4ba0 100644 --- a/go/vt/mysqlctl/xtrabackupengine_test.go +++ b/go/vt/mysqlctl/xtrabackupengine_test.go @@ -22,7 +22,10 @@ import ( "math/rand" "testing" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/logutil" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) func TestFindReplicationPosition(t *testing.T) { @@ -115,3 +118,10 @@ func TestStripeRoundTrip(t *testing.T) { // Test block size and stripe count that don't evenly divide data size. test(6000, 7) } + +func TestShouldDrainForBackupXtrabackup(t *testing.T) { + be := &XtrabackupEngine{} + + assert.False(t, be.ShouldDrainForBackup(nil)) + assert.False(t, be.ShouldDrainForBackup(&tabletmanagerdatapb.BackupRequest{})) +} diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index df2467d3e3c..3da747d3832 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: binlogdata.proto @@ -158,8 +158,9 @@ func (VReplicationWorkflowType) EnumDescriptor() ([]byte, []int) { type VReplicationWorkflowSubType int32 const ( - VReplicationWorkflowSubType_None VReplicationWorkflowSubType = 0 - VReplicationWorkflowSubType_Partial VReplicationWorkflowSubType = 1 + VReplicationWorkflowSubType_None VReplicationWorkflowSubType = 0 + VReplicationWorkflowSubType_Partial VReplicationWorkflowSubType = 1 + VReplicationWorkflowSubType_AtomicCopy VReplicationWorkflowSubType = 2 ) // Enum value maps for VReplicationWorkflowSubType. @@ -167,10 +168,12 @@ var ( VReplicationWorkflowSubType_name = map[int32]string{ 0: "None", 1: "Partial", + 2: "AtomicCopy", } VReplicationWorkflowSubType_value = map[string]int32{ - "None": 0, - "Partial": 1, + "None": 0, + "Partial": 1, + "AtomicCopy": 2, } ) @@ -201,6 +204,68 @@ func (VReplicationWorkflowSubType) EnumDescriptor() ([]byte, []int) { return file_binlogdata_proto_rawDescGZIP(), []int{2} } +// VReplicationWorklfowState defines the valid states that a workflow can be in. +type VReplicationWorkflowState int32 + +const ( + VReplicationWorkflowState_Unknown VReplicationWorkflowState = 0 + VReplicationWorkflowState_Init VReplicationWorkflowState = 1 + VReplicationWorkflowState_Stopped VReplicationWorkflowState = 2 + VReplicationWorkflowState_Copying VReplicationWorkflowState = 3 + VReplicationWorkflowState_Running VReplicationWorkflowState = 4 + VReplicationWorkflowState_Error VReplicationWorkflowState = 5 + VReplicationWorkflowState_Lagging VReplicationWorkflowState = 6 +) + +// Enum value maps for VReplicationWorkflowState. +var ( + VReplicationWorkflowState_name = map[int32]string{ + 0: "Unknown", + 1: "Init", + 2: "Stopped", + 3: "Copying", + 4: "Running", + 5: "Error", + 6: "Lagging", + } + VReplicationWorkflowState_value = map[string]int32{ + "Unknown": 0, + "Init": 1, + "Stopped": 2, + "Copying": 3, + "Running": 4, + "Error": 5, + "Lagging": 6, + } +) + +func (x VReplicationWorkflowState) Enum() *VReplicationWorkflowState { + p := new(VReplicationWorkflowState) + *p = x + return p +} + +func (x VReplicationWorkflowState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (VReplicationWorkflowState) Descriptor() protoreflect.EnumDescriptor { + return file_binlogdata_proto_enumTypes[3].Descriptor() +} + +func (VReplicationWorkflowState) Type() protoreflect.EnumType { + return &file_binlogdata_proto_enumTypes[3] +} + +func (x VReplicationWorkflowState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use VReplicationWorkflowState.Descriptor instead. +func (VReplicationWorkflowState) EnumDescriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{3} +} + // VEventType enumerates the event types. Many of these types // will not be encountered in RBR mode. type VEventType int32 @@ -301,11 +366,11 @@ func (x VEventType) String() string { } func (VEventType) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[3].Descriptor() + return file_binlogdata_proto_enumTypes[4].Descriptor() } func (VEventType) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[3] + return &file_binlogdata_proto_enumTypes[4] } func (x VEventType) Number() protoreflect.EnumNumber { @@ -314,7 +379,7 @@ func (x VEventType) Number() protoreflect.EnumNumber { // Deprecated: Use VEventType.Descriptor instead. func (VEventType) EnumDescriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{3} + return file_binlogdata_proto_rawDescGZIP(), []int{4} } // MigrationType specifies the type of migration for the Journal. @@ -348,11 +413,11 @@ func (x MigrationType) String() string { } func (MigrationType) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[4].Descriptor() + return file_binlogdata_proto_enumTypes[5].Descriptor() } func (MigrationType) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[4] + return &file_binlogdata_proto_enumTypes[5] } func (x MigrationType) Number() protoreflect.EnumNumber { @@ -361,7 +426,7 @@ func (x MigrationType) Number() protoreflect.EnumNumber { // Deprecated: Use MigrationType.Descriptor instead. func (MigrationType) EnumDescriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{4} + return file_binlogdata_proto_rawDescGZIP(), []int{5} } type BinlogTransaction_Statement_Category int32 @@ -419,11 +484,11 @@ func (x BinlogTransaction_Statement_Category) String() string { } func (BinlogTransaction_Statement_Category) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[5].Descriptor() + return file_binlogdata_proto_enumTypes[6].Descriptor() } func (BinlogTransaction_Statement_Category) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[5] + return &file_binlogdata_proto_enumTypes[6] } func (x BinlogTransaction_Statement_Category) Number() protoreflect.EnumNumber { @@ -465,11 +530,11 @@ func (x Filter_FieldEventMode) String() string { } func (Filter_FieldEventMode) Descriptor() protoreflect.EnumDescriptor { - return file_binlogdata_proto_enumTypes[6].Descriptor() + return file_binlogdata_proto_enumTypes[7].Descriptor() } func (Filter_FieldEventMode) Type() protoreflect.EnumType { - return &file_binlogdata_proto_enumTypes[6] + return &file_binlogdata_proto_enumTypes[7] } func (x Filter_FieldEventMode) Number() protoreflect.EnumNumber { @@ -1279,6 +1344,8 @@ type RowChange struct { Before *query.Row `protobuf:"bytes,1,opt,name=before,proto3" json:"before,omitempty"` After *query.Row `protobuf:"bytes,2,opt,name=after,proto3" json:"after,omitempty"` + // DataColumns is a bitmap of all columns: bit is set if column is present in the after image + DataColumns *RowChange_Bitmap `protobuf:"bytes,3,opt,name=data_columns,json=dataColumns,proto3" json:"data_columns,omitempty"` } func (x *RowChange) Reset() { @@ -1327,6 +1394,13 @@ func (x *RowChange) GetAfter() *query.Row { return nil } +func (x *RowChange) GetDataColumns() *RowChange_Bitmap { + if x != nil { + return x.DataColumns + } + return nil +} + // RowEvent represent row events for one table. type RowEvent struct { state protoimpl.MessageState @@ -1337,6 +1411,7 @@ type RowEvent struct { RowChanges []*RowChange `protobuf:"bytes,2,rep,name=row_changes,json=rowChanges,proto3" json:"row_changes,omitempty"` Keyspace string `protobuf:"bytes,3,opt,name=keyspace,proto3" json:"keyspace,omitempty"` Shard string `protobuf:"bytes,4,opt,name=shard,proto3" json:"shard,omitempty"` + Flags uint32 `protobuf:"varint,5,opt,name=flags,proto3" json:"flags,omitempty"` // https://dev.mysql.com/doc/dev/mysql-server/latest/classbinary__log_1_1Rows__event.html } func (x *RowEvent) Reset() { @@ -1399,6 +1474,13 @@ func (x *RowEvent) GetShard() string { return "" } +func (x *RowEvent) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + // FieldEvent represents the field info for a table. type FieldEvent struct { state protoimpl.MessageState @@ -2363,6 +2445,158 @@ func (x *VStreamRowsResponse) GetHeartbeat() bool { return false } +// VStreamTablesRequest is the payload for VStreamTables +type VStreamTablesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId,proto3" json:"effective_caller_id,omitempty"` + ImmediateCallerId *query.VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId,proto3" json:"immediate_caller_id,omitempty"` + Target *query.Target `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` +} + +func (x *VStreamTablesRequest) Reset() { + *x = VStreamTablesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_binlogdata_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VStreamTablesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VStreamTablesRequest) ProtoMessage() {} + +func (x *VStreamTablesRequest) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VStreamTablesRequest.ProtoReflect.Descriptor instead. +func (*VStreamTablesRequest) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{24} +} + +func (x *VStreamTablesRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if x != nil { + return x.EffectiveCallerId + } + return nil +} + +func (x *VStreamTablesRequest) GetImmediateCallerId() *query.VTGateCallerID { + if x != nil { + return x.ImmediateCallerId + } + return nil +} + +func (x *VStreamTablesRequest) GetTarget() *query.Target { + if x != nil { + return x.Target + } + return nil +} + +// VStreamTablesResponse is the response from VStreamTables +type VStreamTablesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + Pkfields []*query.Field `protobuf:"bytes,3,rep,name=pkfields,proto3" json:"pkfields,omitempty"` + Gtid string `protobuf:"bytes,4,opt,name=gtid,proto3" json:"gtid,omitempty"` + Rows []*query.Row `protobuf:"bytes,5,rep,name=rows,proto3" json:"rows,omitempty"` + Lastpk *query.Row `protobuf:"bytes,6,opt,name=lastpk,proto3" json:"lastpk,omitempty"` +} + +func (x *VStreamTablesResponse) Reset() { + *x = VStreamTablesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_binlogdata_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VStreamTablesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VStreamTablesResponse) ProtoMessage() {} + +func (x *VStreamTablesResponse) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VStreamTablesResponse.ProtoReflect.Descriptor instead. +func (*VStreamTablesResponse) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{25} +} + +func (x *VStreamTablesResponse) GetTableName() string { + if x != nil { + return x.TableName + } + return "" +} + +func (x *VStreamTablesResponse) GetFields() []*query.Field { + if x != nil { + return x.Fields + } + return nil +} + +func (x *VStreamTablesResponse) GetPkfields() []*query.Field { + if x != nil { + return x.Pkfields + } + return nil +} + +func (x *VStreamTablesResponse) GetGtid() string { + if x != nil { + return x.Gtid + } + return "" +} + +func (x *VStreamTablesResponse) GetRows() []*query.Row { + if x != nil { + return x.Rows + } + return nil +} + +func (x *VStreamTablesResponse) GetLastpk() *query.Row { + if x != nil { + return x.Lastpk + } + return nil +} + type LastPKEvent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2375,7 +2609,7 @@ type LastPKEvent struct { func (x *LastPKEvent) Reset() { *x = LastPKEvent{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[24] + mi := &file_binlogdata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2388,7 +2622,7 @@ func (x *LastPKEvent) String() string { func (*LastPKEvent) ProtoMessage() {} func (x *LastPKEvent) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[24] + mi := &file_binlogdata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2401,7 +2635,7 @@ func (x *LastPKEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use LastPKEvent.ProtoReflect.Descriptor instead. func (*LastPKEvent) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{24} + return file_binlogdata_proto_rawDescGZIP(), []int{26} } func (x *LastPKEvent) GetTableLastPK() *TableLastPK { @@ -2430,7 +2664,7 @@ type TableLastPK struct { func (x *TableLastPK) Reset() { *x = TableLastPK{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[25] + mi := &file_binlogdata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2443,7 +2677,7 @@ func (x *TableLastPK) String() string { func (*TableLastPK) ProtoMessage() {} func (x *TableLastPK) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[25] + mi := &file_binlogdata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2456,7 +2690,7 @@ func (x *TableLastPK) ProtoReflect() protoreflect.Message { // Deprecated: Use TableLastPK.ProtoReflect.Descriptor instead. func (*TableLastPK) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{25} + return file_binlogdata_proto_rawDescGZIP(), []int{27} } func (x *TableLastPK) GetTableName() string { @@ -2490,7 +2724,7 @@ type VStreamResultsRequest struct { func (x *VStreamResultsRequest) Reset() { *x = VStreamResultsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[26] + mi := &file_binlogdata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2503,7 +2737,7 @@ func (x *VStreamResultsRequest) String() string { func (*VStreamResultsRequest) ProtoMessage() {} func (x *VStreamResultsRequest) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[26] + mi := &file_binlogdata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2516,7 +2750,7 @@ func (x *VStreamResultsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamResultsRequest.ProtoReflect.Descriptor instead. func (*VStreamResultsRequest) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{26} + return file_binlogdata_proto_rawDescGZIP(), []int{28} } func (x *VStreamResultsRequest) GetEffectiveCallerId() *vtrpc.CallerID { @@ -2562,7 +2796,7 @@ type VStreamResultsResponse struct { func (x *VStreamResultsResponse) Reset() { *x = VStreamResultsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[27] + mi := &file_binlogdata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2575,7 +2809,7 @@ func (x *VStreamResultsResponse) String() string { func (*VStreamResultsResponse) ProtoMessage() {} func (x *VStreamResultsResponse) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[27] + mi := &file_binlogdata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2588,7 +2822,7 @@ func (x *VStreamResultsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamResultsResponse.ProtoReflect.Descriptor instead. func (*VStreamResultsResponse) Descriptor() ([]byte, []int) { - return file_binlogdata_proto_rawDescGZIP(), []int{27} + return file_binlogdata_proto_rawDescGZIP(), []int{29} } func (x *VStreamResultsResponse) GetFields() []*query.Field { @@ -2628,7 +2862,7 @@ type BinlogTransaction_Statement struct { func (x *BinlogTransaction_Statement) Reset() { *x = BinlogTransaction_Statement{} if protoimpl.UnsafeEnabled { - mi := &file_binlogdata_proto_msgTypes[28] + mi := &file_binlogdata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2641,7 +2875,7 @@ func (x *BinlogTransaction_Statement) String() string { func (*BinlogTransaction_Statement) ProtoMessage() {} func (x *BinlogTransaction_Statement) ProtoReflect() protoreflect.Message { - mi := &file_binlogdata_proto_msgTypes[28] + mi := &file_binlogdata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2678,6 +2912,61 @@ func (x *BinlogTransaction_Statement) GetSql() []byte { return nil } +type RowChange_Bitmap struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + Cols []byte `protobuf:"bytes,2,opt,name=cols,proto3" json:"cols,omitempty"` +} + +func (x *RowChange_Bitmap) Reset() { + *x = RowChange_Bitmap{} + if protoimpl.UnsafeEnabled { + mi := &file_binlogdata_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RowChange_Bitmap) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RowChange_Bitmap) ProtoMessage() {} + +func (x *RowChange_Bitmap) ProtoReflect() protoreflect.Message { + mi := &file_binlogdata_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RowChange_Bitmap.ProtoReflect.Descriptor instead. +func (*RowChange_Bitmap) Descriptor() ([]byte, []int) { + return file_binlogdata_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *RowChange_Bitmap) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *RowChange_Bitmap) GetCols() []byte { + if x != nil { + return x.Cols + } + return nil +} + var File_binlogdata_proto protoreflect.FileDescriptor var file_binlogdata_proto_rawDesc = []byte{ @@ -2849,181 +3138,145 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0x51, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, - 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, - 0x77, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, + 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, + 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, + 0x6f, 0x77, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0c, 0x64, 0x61, 0x74, + 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x52, 0x0b, 0x64, + 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, + 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xa9, + 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, + 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, - 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, + 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, + 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, + 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, + 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, + 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, + 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, + 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, + 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, + 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, + 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x68, 0x0a, 0x0c, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, + 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x0e, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, + 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, + 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, - 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, - 0x3f, 0x0a, 0x05, 0x56, 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, - 0x22, 0x41, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x22, 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x40, 0x0a, 0x0e, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0d, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, - 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x73, 0x22, 0x8b, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x08, 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x47, 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, - 0x0a, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, - 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, - 0x03, 0x64, 0x6d, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, - 0x22, 0x68, 0x0a, 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, - 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, - 0x09, 0x70, 0x4b, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69, - 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xc7, 0x02, - 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, - 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, - 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xf9, - 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, - 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, - 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, - 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, - 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61, - 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, - 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, - 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, + 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, + 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, + 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, @@ -3035,53 +3288,132 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x72, - 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, - 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, - 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, - 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x58, 0x45, 0x43, 0x10, - 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, - 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12, - 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, - 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x10, 0x04, - 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a, - 0x34, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, - 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74, - 0x69, 0x61, 0x6c, 0x10, 0x01, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, - 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, - 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, - 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, - 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, - 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, - 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, - 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, - 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, - 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, - 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, - 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, - 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, - 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, - 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, - 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, - 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, - 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, + 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x13, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, + 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, + 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, + 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0xc5, 0x01, 0x0a, 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x22, 0xde, + 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, + 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, + 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, + 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, + 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, + 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, + 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, + 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, + 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, + 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, + 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, + 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, + 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x6f, 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, + 0x12, 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0x8d, 0x02, + 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, + 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, + 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, + 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, + 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, + 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, + 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, + 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, + 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, + 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, + 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, + 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, + 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3096,121 +3428,133 @@ func file_binlogdata_proto_rawDescGZIP() []byte { return file_binlogdata_proto_rawDescData } -var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 7) -var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 32) +var file_binlogdata_proto_enumTypes = make([]protoimpl.EnumInfo, 8) +var file_binlogdata_proto_msgTypes = make([]protoimpl.MessageInfo, 35) var file_binlogdata_proto_goTypes = []interface{}{ (OnDDLAction)(0), // 0: binlogdata.OnDDLAction (VReplicationWorkflowType)(0), // 1: binlogdata.VReplicationWorkflowType (VReplicationWorkflowSubType)(0), // 2: binlogdata.VReplicationWorkflowSubType - (VEventType)(0), // 3: binlogdata.VEventType - (MigrationType)(0), // 4: binlogdata.MigrationType - (BinlogTransaction_Statement_Category)(0), // 5: binlogdata.BinlogTransaction.Statement.Category - (Filter_FieldEventMode)(0), // 6: binlogdata.Filter.FieldEventMode - (*Charset)(nil), // 7: binlogdata.Charset - (*BinlogTransaction)(nil), // 8: binlogdata.BinlogTransaction - (*StreamKeyRangeRequest)(nil), // 9: binlogdata.StreamKeyRangeRequest - (*StreamKeyRangeResponse)(nil), // 10: binlogdata.StreamKeyRangeResponse - (*StreamTablesRequest)(nil), // 11: binlogdata.StreamTablesRequest - (*StreamTablesResponse)(nil), // 12: binlogdata.StreamTablesResponse - (*CharsetConversion)(nil), // 13: binlogdata.CharsetConversion - (*Rule)(nil), // 14: binlogdata.Rule - (*Filter)(nil), // 15: binlogdata.Filter - (*BinlogSource)(nil), // 16: binlogdata.BinlogSource - (*RowChange)(nil), // 17: binlogdata.RowChange - (*RowEvent)(nil), // 18: binlogdata.RowEvent - (*FieldEvent)(nil), // 19: binlogdata.FieldEvent - (*ShardGtid)(nil), // 20: binlogdata.ShardGtid - (*VGtid)(nil), // 21: binlogdata.VGtid - (*KeyspaceShard)(nil), // 22: binlogdata.KeyspaceShard - (*Journal)(nil), // 23: binlogdata.Journal - (*VEvent)(nil), // 24: binlogdata.VEvent - (*MinimalTable)(nil), // 25: binlogdata.MinimalTable - (*MinimalSchema)(nil), // 26: binlogdata.MinimalSchema - (*VStreamRequest)(nil), // 27: binlogdata.VStreamRequest - (*VStreamResponse)(nil), // 28: binlogdata.VStreamResponse - (*VStreamRowsRequest)(nil), // 29: binlogdata.VStreamRowsRequest - (*VStreamRowsResponse)(nil), // 30: binlogdata.VStreamRowsResponse - (*LastPKEvent)(nil), // 31: binlogdata.LastPKEvent - (*TableLastPK)(nil), // 32: binlogdata.TableLastPK - (*VStreamResultsRequest)(nil), // 33: binlogdata.VStreamResultsRequest - (*VStreamResultsResponse)(nil), // 34: binlogdata.VStreamResultsResponse - (*BinlogTransaction_Statement)(nil), // 35: binlogdata.BinlogTransaction.Statement - nil, // 36: binlogdata.Rule.ConvertEnumToTextEntry - nil, // 37: binlogdata.Rule.ConvertCharsetEntry - nil, // 38: binlogdata.Rule.ConvertIntToEnumEntry - (*query.EventToken)(nil), // 39: query.EventToken - (*topodata.KeyRange)(nil), // 40: topodata.KeyRange - (topodata.TabletType)(0), // 41: topodata.TabletType - (*query.Row)(nil), // 42: query.Row - (*query.Field)(nil), // 43: query.Field - (*vtrpc.CallerID)(nil), // 44: vtrpc.CallerID - (*query.VTGateCallerID)(nil), // 45: query.VTGateCallerID - (*query.Target)(nil), // 46: query.Target - (*query.QueryResult)(nil), // 47: query.QueryResult + (VReplicationWorkflowState)(0), // 3: binlogdata.VReplicationWorkflowState + (VEventType)(0), // 4: binlogdata.VEventType + (MigrationType)(0), // 5: binlogdata.MigrationType + (BinlogTransaction_Statement_Category)(0), // 6: binlogdata.BinlogTransaction.Statement.Category + (Filter_FieldEventMode)(0), // 7: binlogdata.Filter.FieldEventMode + (*Charset)(nil), // 8: binlogdata.Charset + (*BinlogTransaction)(nil), // 9: binlogdata.BinlogTransaction + (*StreamKeyRangeRequest)(nil), // 10: binlogdata.StreamKeyRangeRequest + (*StreamKeyRangeResponse)(nil), // 11: binlogdata.StreamKeyRangeResponse + (*StreamTablesRequest)(nil), // 12: binlogdata.StreamTablesRequest + (*StreamTablesResponse)(nil), // 13: binlogdata.StreamTablesResponse + (*CharsetConversion)(nil), // 14: binlogdata.CharsetConversion + (*Rule)(nil), // 15: binlogdata.Rule + (*Filter)(nil), // 16: binlogdata.Filter + (*BinlogSource)(nil), // 17: binlogdata.BinlogSource + (*RowChange)(nil), // 18: binlogdata.RowChange + (*RowEvent)(nil), // 19: binlogdata.RowEvent + (*FieldEvent)(nil), // 20: binlogdata.FieldEvent + (*ShardGtid)(nil), // 21: binlogdata.ShardGtid + (*VGtid)(nil), // 22: binlogdata.VGtid + (*KeyspaceShard)(nil), // 23: binlogdata.KeyspaceShard + (*Journal)(nil), // 24: binlogdata.Journal + (*VEvent)(nil), // 25: binlogdata.VEvent + (*MinimalTable)(nil), // 26: binlogdata.MinimalTable + (*MinimalSchema)(nil), // 27: binlogdata.MinimalSchema + (*VStreamRequest)(nil), // 28: binlogdata.VStreamRequest + (*VStreamResponse)(nil), // 29: binlogdata.VStreamResponse + (*VStreamRowsRequest)(nil), // 30: binlogdata.VStreamRowsRequest + (*VStreamRowsResponse)(nil), // 31: binlogdata.VStreamRowsResponse + (*VStreamTablesRequest)(nil), // 32: binlogdata.VStreamTablesRequest + (*VStreamTablesResponse)(nil), // 33: binlogdata.VStreamTablesResponse + (*LastPKEvent)(nil), // 34: binlogdata.LastPKEvent + (*TableLastPK)(nil), // 35: binlogdata.TableLastPK + (*VStreamResultsRequest)(nil), // 36: binlogdata.VStreamResultsRequest + (*VStreamResultsResponse)(nil), // 37: binlogdata.VStreamResultsResponse + (*BinlogTransaction_Statement)(nil), // 38: binlogdata.BinlogTransaction.Statement + nil, // 39: binlogdata.Rule.ConvertEnumToTextEntry + nil, // 40: binlogdata.Rule.ConvertCharsetEntry + nil, // 41: binlogdata.Rule.ConvertIntToEnumEntry + (*RowChange_Bitmap)(nil), // 42: binlogdata.RowChange.Bitmap + (*query.EventToken)(nil), // 43: query.EventToken + (*topodata.KeyRange)(nil), // 44: topodata.KeyRange + (topodata.TabletType)(0), // 45: topodata.TabletType + (*query.Row)(nil), // 46: query.Row + (*query.Field)(nil), // 47: query.Field + (*vtrpc.CallerID)(nil), // 48: vtrpc.CallerID + (*query.VTGateCallerID)(nil), // 49: query.VTGateCallerID + (*query.Target)(nil), // 50: query.Target + (*query.QueryResult)(nil), // 51: query.QueryResult } var file_binlogdata_proto_depIdxs = []int32{ - 35, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement - 39, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken - 40, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange - 7, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset - 8, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction - 7, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset - 8, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction - 36, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry - 37, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry - 38, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry - 14, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule - 6, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode - 41, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType - 40, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange - 15, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter + 38, // 0: binlogdata.BinlogTransaction.statements:type_name -> binlogdata.BinlogTransaction.Statement + 43, // 1: binlogdata.BinlogTransaction.event_token:type_name -> query.EventToken + 44, // 2: binlogdata.StreamKeyRangeRequest.key_range:type_name -> topodata.KeyRange + 8, // 3: binlogdata.StreamKeyRangeRequest.charset:type_name -> binlogdata.Charset + 9, // 4: binlogdata.StreamKeyRangeResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction + 8, // 5: binlogdata.StreamTablesRequest.charset:type_name -> binlogdata.Charset + 9, // 6: binlogdata.StreamTablesResponse.binlog_transaction:type_name -> binlogdata.BinlogTransaction + 39, // 7: binlogdata.Rule.convert_enum_to_text:type_name -> binlogdata.Rule.ConvertEnumToTextEntry + 40, // 8: binlogdata.Rule.convert_charset:type_name -> binlogdata.Rule.ConvertCharsetEntry + 41, // 9: binlogdata.Rule.convert_int_to_enum:type_name -> binlogdata.Rule.ConvertIntToEnumEntry + 15, // 10: binlogdata.Filter.rules:type_name -> binlogdata.Rule + 7, // 11: binlogdata.Filter.field_event_mode:type_name -> binlogdata.Filter.FieldEventMode + 45, // 12: binlogdata.BinlogSource.tablet_type:type_name -> topodata.TabletType + 44, // 13: binlogdata.BinlogSource.key_range:type_name -> topodata.KeyRange + 16, // 14: binlogdata.BinlogSource.filter:type_name -> binlogdata.Filter 0, // 15: binlogdata.BinlogSource.on_ddl:type_name -> binlogdata.OnDDLAction - 42, // 16: binlogdata.RowChange.before:type_name -> query.Row - 42, // 17: binlogdata.RowChange.after:type_name -> query.Row - 17, // 18: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange - 43, // 19: binlogdata.FieldEvent.fields:type_name -> query.Field - 32, // 20: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK - 20, // 21: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid - 4, // 22: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType - 20, // 23: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid - 22, // 24: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard - 3, // 25: binlogdata.VEvent.type:type_name -> binlogdata.VEventType - 18, // 26: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent - 19, // 27: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent - 21, // 28: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid - 23, // 29: binlogdata.VEvent.journal:type_name -> binlogdata.Journal - 31, // 30: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent - 43, // 31: binlogdata.MinimalTable.fields:type_name -> query.Field - 25, // 32: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable - 44, // 33: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID - 45, // 34: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 46, // 35: binlogdata.VStreamRequest.target:type_name -> query.Target - 15, // 36: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter - 32, // 37: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK - 24, // 38: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent - 44, // 39: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 45, // 40: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 46, // 41: binlogdata.VStreamRowsRequest.target:type_name -> query.Target - 47, // 42: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult - 43, // 43: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field - 43, // 44: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field - 42, // 45: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row - 42, // 46: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row - 32, // 47: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK - 47, // 48: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult - 44, // 49: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID - 45, // 50: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 46, // 51: binlogdata.VStreamResultsRequest.target:type_name -> query.Target - 43, // 52: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field - 42, // 53: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row - 5, // 54: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category - 7, // 55: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset - 13, // 56: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion - 57, // [57:57] is the sub-list for method output_type - 57, // [57:57] is the sub-list for method input_type - 57, // [57:57] is the sub-list for extension type_name - 57, // [57:57] is the sub-list for extension extendee - 0, // [0:57] is the sub-list for field type_name + 46, // 16: binlogdata.RowChange.before:type_name -> query.Row + 46, // 17: binlogdata.RowChange.after:type_name -> query.Row + 42, // 18: binlogdata.RowChange.data_columns:type_name -> binlogdata.RowChange.Bitmap + 18, // 19: binlogdata.RowEvent.row_changes:type_name -> binlogdata.RowChange + 47, // 20: binlogdata.FieldEvent.fields:type_name -> query.Field + 35, // 21: binlogdata.ShardGtid.table_p_ks:type_name -> binlogdata.TableLastPK + 21, // 22: binlogdata.VGtid.shard_gtids:type_name -> binlogdata.ShardGtid + 5, // 23: binlogdata.Journal.migration_type:type_name -> binlogdata.MigrationType + 21, // 24: binlogdata.Journal.shard_gtids:type_name -> binlogdata.ShardGtid + 23, // 25: binlogdata.Journal.participants:type_name -> binlogdata.KeyspaceShard + 4, // 26: binlogdata.VEvent.type:type_name -> binlogdata.VEventType + 19, // 27: binlogdata.VEvent.row_event:type_name -> binlogdata.RowEvent + 20, // 28: binlogdata.VEvent.field_event:type_name -> binlogdata.FieldEvent + 22, // 29: binlogdata.VEvent.vgtid:type_name -> binlogdata.VGtid + 24, // 30: binlogdata.VEvent.journal:type_name -> binlogdata.Journal + 34, // 31: binlogdata.VEvent.last_p_k_event:type_name -> binlogdata.LastPKEvent + 47, // 32: binlogdata.MinimalTable.fields:type_name -> query.Field + 26, // 33: binlogdata.MinimalSchema.tables:type_name -> binlogdata.MinimalTable + 48, // 34: binlogdata.VStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 35: binlogdata.VStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 36: binlogdata.VStreamRequest.target:type_name -> query.Target + 16, // 37: binlogdata.VStreamRequest.filter:type_name -> binlogdata.Filter + 35, // 38: binlogdata.VStreamRequest.table_last_p_ks:type_name -> binlogdata.TableLastPK + 25, // 39: binlogdata.VStreamResponse.events:type_name -> binlogdata.VEvent + 48, // 40: binlogdata.VStreamRowsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 41: binlogdata.VStreamRowsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 42: binlogdata.VStreamRowsRequest.target:type_name -> query.Target + 51, // 43: binlogdata.VStreamRowsRequest.lastpk:type_name -> query.QueryResult + 47, // 44: binlogdata.VStreamRowsResponse.fields:type_name -> query.Field + 47, // 45: binlogdata.VStreamRowsResponse.pkfields:type_name -> query.Field + 46, // 46: binlogdata.VStreamRowsResponse.rows:type_name -> query.Row + 46, // 47: binlogdata.VStreamRowsResponse.lastpk:type_name -> query.Row + 48, // 48: binlogdata.VStreamTablesRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 49: binlogdata.VStreamTablesRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 50: binlogdata.VStreamTablesRequest.target:type_name -> query.Target + 47, // 51: binlogdata.VStreamTablesResponse.fields:type_name -> query.Field + 47, // 52: binlogdata.VStreamTablesResponse.pkfields:type_name -> query.Field + 46, // 53: binlogdata.VStreamTablesResponse.rows:type_name -> query.Row + 46, // 54: binlogdata.VStreamTablesResponse.lastpk:type_name -> query.Row + 35, // 55: binlogdata.LastPKEvent.table_last_p_k:type_name -> binlogdata.TableLastPK + 51, // 56: binlogdata.TableLastPK.lastpk:type_name -> query.QueryResult + 48, // 57: binlogdata.VStreamResultsRequest.effective_caller_id:type_name -> vtrpc.CallerID + 49, // 58: binlogdata.VStreamResultsRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 50, // 59: binlogdata.VStreamResultsRequest.target:type_name -> query.Target + 47, // 60: binlogdata.VStreamResultsResponse.fields:type_name -> query.Field + 46, // 61: binlogdata.VStreamResultsResponse.rows:type_name -> query.Row + 6, // 62: binlogdata.BinlogTransaction.Statement.category:type_name -> binlogdata.BinlogTransaction.Statement.Category + 8, // 63: binlogdata.BinlogTransaction.Statement.charset:type_name -> binlogdata.Charset + 14, // 64: binlogdata.Rule.ConvertCharsetEntry.value:type_name -> binlogdata.CharsetConversion + 65, // [65:65] is the sub-list for method output_type + 65, // [65:65] is the sub-list for method input_type + 65, // [65:65] is the sub-list for extension type_name + 65, // [65:65] is the sub-list for extension extendee + 0, // [0:65] is the sub-list for field type_name } func init() { file_binlogdata_proto_init() } @@ -3508,7 +3852,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LastPKEvent); i { + switch v := v.(*VStreamTablesRequest); i { case 0: return &v.state case 1: @@ -3520,7 +3864,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableLastPK); i { + switch v := v.(*VStreamTablesResponse); i { case 0: return &v.state case 1: @@ -3532,7 +3876,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamResultsRequest); i { + switch v := v.(*LastPKEvent); i { case 0: return &v.state case 1: @@ -3544,7 +3888,7 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamResultsResponse); i { + switch v := v.(*TableLastPK); i { case 0: return &v.state case 1: @@ -3556,6 +3900,30 @@ func file_binlogdata_proto_init() { } } file_binlogdata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VStreamResultsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_binlogdata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VStreamResultsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_binlogdata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BinlogTransaction_Statement); i { case 0: return &v.state @@ -3567,14 +3935,26 @@ func file_binlogdata_proto_init() { return nil } } + file_binlogdata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RowChange_Bitmap); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_binlogdata_proto_rawDesc, - NumEnums: 7, - NumMessages: 32, + NumEnums: 8, + NumMessages: 35, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go index ab1c3bc2495..379583b0354 100644 --- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go +++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: binlogdata.proto package binlogdata import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -22,327 +23,841 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *Charset) MarshalVT() (dAtA []byte, err error) { +func (m *Charset) CloneVT() *Charset { if m == nil { - return nil, nil + return (*Charset)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Charset{ + Client: m.Client, + Conn: m.Conn, + Server: m.Server, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Charset) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Charset) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Charset) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BinlogTransaction_Statement) CloneVT() *BinlogTransaction_Statement { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*BinlogTransaction_Statement)(nil) } - if m.Server != 0 { - i = encodeVarint(dAtA, i, uint64(m.Server)) - i-- - dAtA[i] = 0x18 + r := &BinlogTransaction_Statement{ + Category: m.Category, + Charset: m.Charset.CloneVT(), } - if m.Conn != 0 { - i = encodeVarint(dAtA, i, uint64(m.Conn)) - i-- - dAtA[i] = 0x10 + if rhs := m.Sql; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Sql = tmpBytes } - if m.Client != 0 { - i = encodeVarint(dAtA, i, uint64(m.Client)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BinlogTransaction_Statement) MarshalVT() (dAtA []byte, err error) { +func (m *BinlogTransaction_Statement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BinlogTransaction) CloneVT() *BinlogTransaction { if m == nil { - return nil, nil + return (*BinlogTransaction)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &BinlogTransaction{ + EventToken: m.EventToken.CloneVT(), } - return dAtA[:n], nil + if rhs := m.Statements; rhs != nil { + tmpContainer := make([]*BinlogTransaction_Statement, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Statements = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *BinlogTransaction_Statement) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *BinlogTransaction) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BinlogTransaction_Statement) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamKeyRangeRequest) CloneVT() *StreamKeyRangeRequest { if m == nil { - return 0, nil + return (*StreamKeyRangeRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StreamKeyRangeRequest{ + Position: m.Position, + KeyRange: m.KeyRange.CloneVT(), + Charset: m.Charset.CloneVT(), } - if len(m.Sql) > 0 { - i -= len(m.Sql) - copy(dAtA[i:], m.Sql) - i = encodeVarint(dAtA, i, uint64(len(m.Sql))) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Charset != nil { - size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *StreamKeyRangeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamKeyRangeResponse) CloneVT() *StreamKeyRangeResponse { + if m == nil { + return (*StreamKeyRangeResponse)(nil) } - if m.Category != 0 { - i = encodeVarint(dAtA, i, uint64(m.Category)) - i-- - dAtA[i] = 0x8 + r := &StreamKeyRangeResponse{ + BinlogTransaction: m.BinlogTransaction.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *BinlogTransaction) MarshalVT() (dAtA []byte, err error) { +func (m *StreamKeyRangeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamTablesRequest) CloneVT() *StreamTablesRequest { if m == nil { - return nil, nil + return (*StreamTablesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StreamTablesRequest{ + Position: m.Position, + Charset: m.Charset.CloneVT(), } - return dAtA[:n], nil + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *BinlogTransaction) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StreamTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BinlogTransaction) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamTablesResponse) CloneVT() *StreamTablesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*StreamTablesResponse)(nil) } - if m.EventToken != nil { - size, err := m.EventToken.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + r := &StreamTablesResponse{ + BinlogTransaction: m.BinlogTransaction.CloneVT(), } - if len(m.Statements) > 0 { - for iNdEx := len(m.Statements) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Statements[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *StreamKeyRangeRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StreamTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CharsetConversion) CloneVT() *CharsetConversion { if m == nil { - return nil, nil + return (*CharsetConversion)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CharsetConversion{ + FromCharset: m.FromCharset, + ToCharset: m.ToCharset, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *StreamKeyRangeRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CharsetConversion) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *StreamKeyRangeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Rule) CloneVT() *Rule { if m == nil { - return 0, nil + return (*Rule)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &Rule{ + Match: m.Match, + Filter: m.Filter, + SourceUniqueKeyColumns: m.SourceUniqueKeyColumns, + TargetUniqueKeyColumns: m.TargetUniqueKeyColumns, + SourceUniqueKeyTargetColumns: m.SourceUniqueKeyTargetColumns, } - if m.Charset != nil { - size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.ConvertEnumToText; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + r.ConvertEnumToText = tmpContainer } - if m.KeyRange != nil { - size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.ConvertCharset; rhs != nil { + tmpContainer := make(map[string]*CharsetConversion, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.ConvertCharset = tmpContainer } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa + if rhs := m.ConvertIntToEnum; rhs != nil { + tmpContainer := make(map[string]bool, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.ConvertIntToEnum = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *StreamKeyRangeResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Rule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Filter) CloneVT() *Filter { if m == nil { - return nil, nil + return (*Filter)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Filter{ + FieldEventMode: m.FieldEventMode, + WorkflowType: m.WorkflowType, + WorkflowName: m.WorkflowName, } - return dAtA[:n], nil + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*Rule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *StreamKeyRangeResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Filter) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *StreamKeyRangeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BinlogSource) CloneVT() *BinlogSource { if m == nil { - return 0, nil + return (*BinlogSource)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &BinlogSource{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + KeyRange: m.KeyRange.CloneVT(), + Filter: m.Filter.CloneVT(), + OnDdl: m.OnDdl, + ExternalMysql: m.ExternalMysql, + StopAfterCopy: m.StopAfterCopy, + ExternalCluster: m.ExternalCluster, + SourceTimeZone: m.SourceTimeZone, + TargetTimeZone: m.TargetTimeZone, } - if m.BinlogTransaction != nil { - size, err := m.BinlogTransaction.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *StreamTablesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *BinlogSource) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RowChange_Bitmap) CloneVT() *RowChange_Bitmap { if m == nil { - return nil, nil + return (*RowChange_Bitmap)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RowChange_Bitmap{ + Count: m.Count, } - return dAtA[:n], nil + if rhs := m.Cols; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Cols = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *StreamTablesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RowChange_Bitmap) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *StreamTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RowChange) CloneVT() *RowChange { if m == nil { - return 0, nil + return (*RowChange)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RowChange{ + Before: m.Before.CloneVT(), + After: m.After.CloneVT(), + DataColumns: m.DataColumns.CloneVT(), } - if m.Charset != nil { - size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RowChange) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RowEvent) CloneVT() *RowEvent { + if m == nil { + return (*RowEvent)(nil) + } + r := &RowEvent{ + TableName: m.TableName, + Keyspace: m.Keyspace, + Shard: m.Shard, + Flags: m.Flags, + } + if rhs := m.RowChanges; rhs != nil { + tmpContainer := make([]*RowChange, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + r.RowChanges = tmpContainer } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RowEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FieldEvent) CloneVT() *FieldEvent { + if m == nil { + return (*FieldEvent)(nil) + } + r := &FieldEvent{ + TableName: m.TableName, + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.Fields = tmpContainer } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *StreamTablesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *FieldEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardGtid) CloneVT() *ShardGtid { + if m == nil { + return (*ShardGtid)(nil) + } + r := &ShardGtid{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Gtid: m.Gtid, + } + if rhs := m.TablePKs; rhs != nil { + tmpContainer := make([]*TableLastPK, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TablePKs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardGtid) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VGtid) CloneVT() *VGtid { + if m == nil { + return (*VGtid)(nil) + } + r := &VGtid{} + if rhs := m.ShardGtids; rhs != nil { + tmpContainer := make([]*ShardGtid, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardGtids = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VGtid) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyspaceShard) CloneVT() *KeyspaceShard { + if m == nil { + return (*KeyspaceShard)(nil) + } + r := &KeyspaceShard{ + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyspaceShard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Journal) CloneVT() *Journal { + if m == nil { + return (*Journal)(nil) + } + r := &Journal{ + Id: m.Id, + MigrationType: m.MigrationType, + LocalPosition: m.LocalPosition, + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if rhs := m.ShardGtids; rhs != nil { + tmpContainer := make([]*ShardGtid, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardGtids = tmpContainer + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*KeyspaceShard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if rhs := m.SourceWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceWorkflows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Journal) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VEvent) CloneVT() *VEvent { + if m == nil { + return (*VEvent)(nil) + } + r := &VEvent{ + Type: m.Type, + Timestamp: m.Timestamp, + Gtid: m.Gtid, + Statement: m.Statement, + RowEvent: m.RowEvent.CloneVT(), + FieldEvent: m.FieldEvent.CloneVT(), + Vgtid: m.Vgtid.CloneVT(), + Journal: m.Journal.CloneVT(), + Dml: m.Dml, + CurrentTime: m.CurrentTime, + LastPKEvent: m.LastPKEvent.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Throttled: m.Throttled, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MinimalTable) CloneVT() *MinimalTable { + if m == nil { + return (*MinimalTable)(nil) + } + r := &MinimalTable{ + Name: m.Name, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.PKColumns; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.PKColumns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MinimalTable) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MinimalSchema) CloneVT() *MinimalSchema { + if m == nil { + return (*MinimalSchema)(nil) + } + r := &MinimalSchema{} + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]*MinimalTable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MinimalSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRequest) CloneVT() *VStreamRequest { + if m == nil { + return (*VStreamRequest)(nil) + } + r := &VStreamRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Position: m.Position, + Filter: m.Filter.CloneVT(), + } + if rhs := m.TableLastPKs; rhs != nil { + tmpContainer := make([]*TableLastPK, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableLastPKs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResponse) CloneVT() *VStreamResponse { + if m == nil { + return (*VStreamResponse)(nil) + } + r := &VStreamResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*VEvent, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRowsRequest) CloneVT() *VStreamRowsRequest { + if m == nil { + return (*VStreamRowsRequest)(nil) + } + r := &VStreamRowsRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query, + Lastpk: m.Lastpk.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRowsResponse) CloneVT() *VStreamRowsResponse { + if m == nil { + return (*VStreamRowsResponse)(nil) + } + r := &VStreamRowsResponse{ + Gtid: m.Gtid, + Lastpk: m.Lastpk.CloneVT(), + Throttled: m.Throttled, + Heartbeat: m.Heartbeat, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Pkfields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Pkfields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamTablesRequest) CloneVT() *VStreamTablesRequest { + if m == nil { + return (*VStreamTablesRequest)(nil) + } + r := &VStreamTablesRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamTablesResponse) CloneVT() *VStreamTablesResponse { + if m == nil { + return (*VStreamTablesResponse)(nil) + } + r := &VStreamTablesResponse{ + TableName: m.TableName, + Gtid: m.Gtid, + Lastpk: m.Lastpk.CloneVT(), + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Pkfields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Pkfields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LastPKEvent) CloneVT() *LastPKEvent { + if m == nil { + return (*LastPKEvent)(nil) + } + r := &LastPKEvent{ + TableLastPK: m.TableLastPK.CloneVT(), + Completed: m.Completed, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LastPKEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableLastPK) CloneVT() *TableLastPK { + if m == nil { + return (*TableLastPK)(nil) + } + r := &TableLastPK{ + TableName: m.TableName, + Lastpk: m.Lastpk.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TableLastPK) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResultsRequest) CloneVT() *VStreamResultsRequest { + if m == nil { + return (*VStreamResultsRequest)(nil) + } + r := &VStreamResultsRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResultsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResultsResponse) CloneVT() *VStreamResultsResponse { + if m == nil { + return (*VStreamResultsResponse)(nil) + } + r := &VStreamResultsResponse{ + Gtid: m.Gtid, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*query.Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResultsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Charset) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -355,12 +870,12 @@ func (m *StreamTablesResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StreamTablesResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Charset) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StreamTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Charset) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -372,20 +887,25 @@ func (m *StreamTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.BinlogTransaction != nil { - size, err := m.BinlogTransaction.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if m.Server != 0 { + i = encodeVarint(dAtA, i, uint64(m.Server)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x18 + } + if m.Conn != 0 { + i = encodeVarint(dAtA, i, uint64(m.Conn)) + i-- + dAtA[i] = 0x10 + } + if m.Client != 0 { + i = encodeVarint(dAtA, i, uint64(m.Client)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *CharsetConversion) MarshalVT() (dAtA []byte, err error) { +func (m *BinlogTransaction_Statement) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -398,12 +918,12 @@ func (m *CharsetConversion) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CharsetConversion) MarshalToVT(dAtA []byte) (int, error) { +func (m *BinlogTransaction_Statement) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CharsetConversion) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BinlogTransaction_Statement) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -415,24 +935,32 @@ func (m *CharsetConversion) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ToCharset) > 0 { - i -= len(m.ToCharset) - copy(dAtA[i:], m.ToCharset) - i = encodeVarint(dAtA, i, uint64(len(m.ToCharset))) + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x1a + } + if m.Charset != nil { + size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x12 } - if len(m.FromCharset) > 0 { - i -= len(m.FromCharset) - copy(dAtA[i:], m.FromCharset) - i = encodeVarint(dAtA, i, uint64(len(m.FromCharset))) + if m.Category != 0 { + i = encodeVarint(dAtA, i, uint64(m.Category)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *Rule) MarshalVT() (dAtA []byte, err error) { +func (m *BinlogTransaction) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -445,12 +973,12 @@ func (m *Rule) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Rule) MarshalToVT(dAtA []byte) (int, error) { +func (m *BinlogTransaction) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *Rule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BinlogTransaction) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -462,108 +990,32 @@ func (m *Rule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ConvertIntToEnum) > 0 { - for k := range m.ConvertIntToEnum { - v := m.ConvertIntToEnum[k] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x42 + if m.EventToken != nil { + size, err := m.EventToken.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } - } - if len(m.SourceUniqueKeyTargetColumns) > 0 { - i -= len(m.SourceUniqueKeyTargetColumns) - copy(dAtA[i:], m.SourceUniqueKeyTargetColumns) - i = encodeVarint(dAtA, i, uint64(len(m.SourceUniqueKeyTargetColumns))) - i-- - dAtA[i] = 0x3a - } - if len(m.TargetUniqueKeyColumns) > 0 { - i -= len(m.TargetUniqueKeyColumns) - copy(dAtA[i:], m.TargetUniqueKeyColumns) - i = encodeVarint(dAtA, i, uint64(len(m.TargetUniqueKeyColumns))) - i-- - dAtA[i] = 0x32 - } - if len(m.SourceUniqueKeyColumns) > 0 { - i -= len(m.SourceUniqueKeyColumns) - copy(dAtA[i:], m.SourceUniqueKeyColumns) - i = encodeVarint(dAtA, i, uint64(len(m.SourceUniqueKeyColumns))) + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } - if len(m.ConvertCharset) > 0 { - for k := range m.ConvertCharset { - v := m.ConvertCharset[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Statements) > 0 { + for iNdEx := len(m.Statements) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Statements[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.ConvertEnumToText) > 0 { - for k := range m.ConvertEnumToText { - v := m.ConvertEnumToText[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a } } - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarint(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0x12 - } - if len(m.Match) > 0 { - i -= len(m.Match) - copy(dAtA[i:], m.Match) - i = encodeVarint(dAtA, i, uint64(len(m.Match))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *Filter) MarshalVT() (dAtA []byte, err error) { +func (m *StreamKeyRangeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -576,12 +1028,12 @@ func (m *Filter) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Filter) MarshalToVT(dAtA []byte) (int, error) { +func (m *StreamKeyRangeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *Filter) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamKeyRangeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -593,39 +1045,37 @@ func (m *Filter) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.WorkflowName) > 0 { - i -= len(m.WorkflowName) - copy(dAtA[i:], m.WorkflowName) - i = encodeVarint(dAtA, i, uint64(len(m.WorkflowName))) + if m.Charset != nil { + size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } - if m.WorkflowType != 0 { - i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + if m.KeyRange != nil { + size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x12 } - if m.FieldEventMode != 0 { - i = encodeVarint(dAtA, i, uint64(m.FieldEventMode)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- - dAtA[i] = 0x10 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Rules[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BinlogSource) MarshalVT() (dAtA []byte, err error) { +func (m *StreamKeyRangeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -638,12 +1088,12 @@ func (m *BinlogSource) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BinlogSource) MarshalToVT(dAtA []byte) (int, error) { +func (m *StreamKeyRangeResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BinlogSource) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamKeyRangeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -655,101 +1105,20 @@ func (m *BinlogSource) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TargetTimeZone) > 0 { - i -= len(m.TargetTimeZone) - copy(dAtA[i:], m.TargetTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) - i-- - dAtA[i] = 0x62 - } - if len(m.SourceTimeZone) > 0 { - i -= len(m.SourceTimeZone) - copy(dAtA[i:], m.SourceTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) - i-- - dAtA[i] = 0x5a - } - if len(m.ExternalCluster) > 0 { - i -= len(m.ExternalCluster) - copy(dAtA[i:], m.ExternalCluster) - i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) - i-- - dAtA[i] = 0x52 - } - if m.StopAfterCopy { - i-- - if m.StopAfterCopy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if len(m.ExternalMysql) > 0 { - i -= len(m.ExternalMysql) - copy(dAtA[i:], m.ExternalMysql) - i = encodeVarint(dAtA, i, uint64(len(m.ExternalMysql))) - i-- - dAtA[i] = 0x42 - } - if m.OnDdl != 0 { - i = encodeVarint(dAtA, i, uint64(m.OnDdl)) - i-- - dAtA[i] = 0x38 - } - if m.Filter != nil { - size, err := m.Filter.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.KeyRange != nil { - size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if m.BinlogTransaction != nil { + size, err := m.BinlogTransaction.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RowChange) MarshalVT() (dAtA []byte, err error) { +func (m *StreamTablesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -762,12 +1131,12 @@ func (m *RowChange) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RowChange) MarshalToVT(dAtA []byte) (int, error) { +func (m *StreamTablesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RowChange) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -779,30 +1148,36 @@ func (m *RowChange) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.After != nil { - size, err := m.After.MarshalToSizedBufferVT(dAtA[:i]) + if m.Charset != nil { + size, err := m.Charset.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } - if m.Before != nil { - size, err := m.Before.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RowEvent) MarshalVT() (dAtA []byte, err error) { +func (m *StreamTablesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -815,12 +1190,12 @@ func (m *RowEvent) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RowEvent) MarshalToVT(dAtA []byte) (int, error) { +func (m *StreamTablesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RowEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StreamTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -832,43 +1207,20 @@ func (m *RowEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x22 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x1a - } - if len(m.RowChanges) > 0 { - for iNdEx := len(m.RowChanges) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.RowChanges[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + if m.BinlogTransaction != nil { + size, err := m.BinlogTransaction.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } - } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *FieldEvent) MarshalVT() (dAtA []byte, err error) { +func (m *CharsetConversion) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -881,12 +1233,12 @@ func (m *FieldEvent) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FieldEvent) MarshalToVT(dAtA []byte) (int, error) { +func (m *CharsetConversion) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FieldEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CharsetConversion) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -898,43 +1250,24 @@ func (m *FieldEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x22 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.ToCharset) > 0 { + i -= len(m.ToCharset) + copy(dAtA[i:], m.ToCharset) + i = encodeVarint(dAtA, i, uint64(len(m.ToCharset))) i-- - dAtA[i] = 0x1a - } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0x12 } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + if len(m.FromCharset) > 0 { + i -= len(m.FromCharset) + copy(dAtA[i:], m.FromCharset) + i = encodeVarint(dAtA, i, uint64(len(m.FromCharset))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ShardGtid) MarshalVT() (dAtA []byte, err error) { +func (m *Rule) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -947,12 +1280,12 @@ func (m *ShardGtid) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardGtid) MarshalToVT(dAtA []byte) (int, error) { +func (m *Rule) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Rule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -964,43 +1297,108 @@ func (m *ShardGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TablePKs) > 0 { - for iNdEx := len(m.TablePKs) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TablePKs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.ConvertIntToEnum) > 0 { + for k := range m.ConvertIntToEnum { + v := m.ConvertIntToEnum[k] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.SourceUniqueKeyTargetColumns) > 0 { + i -= len(m.SourceUniqueKeyTargetColumns) + copy(dAtA[i:], m.SourceUniqueKeyTargetColumns) + i = encodeVarint(dAtA, i, uint64(len(m.SourceUniqueKeyTargetColumns))) + i-- + dAtA[i] = 0x3a + } + if len(m.TargetUniqueKeyColumns) > 0 { + i -= len(m.TargetUniqueKeyColumns) + copy(dAtA[i:], m.TargetUniqueKeyColumns) + i = encodeVarint(dAtA, i, uint64(len(m.TargetUniqueKeyColumns))) + i-- + dAtA[i] = 0x32 + } + if len(m.SourceUniqueKeyColumns) > 0 { + i -= len(m.SourceUniqueKeyColumns) + copy(dAtA[i:], m.SourceUniqueKeyColumns) + i = encodeVarint(dAtA, i, uint64(len(m.SourceUniqueKeyColumns))) + i-- + dAtA[i] = 0x2a + } + if len(m.ConvertCharset) > 0 { + for k := range m.ConvertCharset { + v := m.ConvertCharset[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- dAtA[i] = 0x22 } } - if len(m.Gtid) > 0 { - i -= len(m.Gtid) - copy(dAtA[i:], m.Gtid) - i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) - i-- - dAtA[i] = 0x1a + if len(m.ConvertEnumToText) > 0 { + for k := range m.ConvertEnumToText { + v := m.ConvertEnumToText[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Filter) > 0 { + i -= len(m.Filter) + copy(dAtA[i:], m.Filter) + i = encodeVarint(dAtA, i, uint64(len(m.Filter))) i-- dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Match) > 0 { + i -= len(m.Match) + copy(dAtA[i:], m.Match) + i = encodeVarint(dAtA, i, uint64(len(m.Match))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VGtid) MarshalVT() (dAtA []byte, err error) { +func (m *Filter) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1013,12 +1411,12 @@ func (m *VGtid) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VGtid) MarshalToVT(dAtA []byte) (int, error) { +func (m *Filter) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Filter) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1030,9 +1428,26 @@ func (m *VGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ShardGtids) > 0 { - for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ShardGtids[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarint(dAtA, i, uint64(len(m.WorkflowName))) + i-- + dAtA[i] = 0x22 + } + if m.WorkflowType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + i-- + dAtA[i] = 0x18 + } + if m.FieldEventMode != 0 { + i = encodeVarint(dAtA, i, uint64(m.FieldEventMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -1045,7 +1460,7 @@ func (m *VGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *KeyspaceShard) MarshalVT() (dAtA []byte, err error) { +func (m *BinlogSource) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1058,12 +1473,12 @@ func (m *KeyspaceShard) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *KeyspaceShard) MarshalToVT(dAtA []byte) (int, error) { +func (m *BinlogSource) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *KeyspaceShard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BinlogSource) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1075,24 +1490,101 @@ func (m *KeyspaceShard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.TargetTimeZone) > 0 { + i -= len(m.TargetTimeZone) + copy(dAtA[i:], m.TargetTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x62 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x5a + } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x52 + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if len(m.ExternalMysql) > 0 { + i -= len(m.ExternalMysql) + copy(dAtA[i:], m.ExternalMysql) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalMysql))) + i-- + dAtA[i] = 0x42 + } + if m.OnDdl != 0 { + i = encodeVarint(dAtA, i, uint64(m.OnDdl)) + i-- + dAtA[i] = 0x38 + } + if m.Filter != nil { + size, err := m.Filter.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.KeyRange != nil { + size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Journal) MarshalVT() (dAtA []byte, err error) { +func (m *RowChange_Bitmap) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1105,12 +1597,12 @@ func (m *Journal) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Journal) MarshalToVT(dAtA []byte) (int, error) { +func (m *RowChange_Bitmap) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *Journal) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RowChange_Bitmap) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1122,69 +1614,22 @@ func (m *Journal) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.SourceWorkflows) > 0 { - for iNdEx := len(m.SourceWorkflows) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SourceWorkflows[iNdEx]) - copy(dAtA[i:], m.SourceWorkflows[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.SourceWorkflows[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Participants) > 0 { - for iNdEx := len(m.Participants) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Participants[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - } - if len(m.ShardGtids) > 0 { - for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ShardGtids[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.LocalPosition) > 0 { - i -= len(m.LocalPosition) - copy(dAtA[i:], m.LocalPosition) - i = encodeVarint(dAtA, i, uint64(len(m.LocalPosition))) - i-- - dAtA[i] = 0x22 - } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.MigrationType != 0 { - i = encodeVarint(dAtA, i, uint64(m.MigrationType)) + if len(m.Cols) > 0 { + i -= len(m.Cols) + copy(dAtA[i:], m.Cols) + i = encodeVarint(dAtA, i, uint64(len(m.Cols))) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) + if m.Count != 0 { + i = encodeVarint(dAtA, i, uint64(m.Count)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *VEvent) MarshalVT() (dAtA []byte, err error) { +func (m *RowChange) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1197,12 +1642,12 @@ func (m *VEvent) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VEvent) MarshalToVT(dAtA []byte) (int, error) { +func (m *RowChange) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RowChange) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1214,130 +1659,40 @@ func (m *VEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Throttled { - i-- - if m.Throttled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if m.LastPKEvent != nil { - size, err := m.LastPKEvent.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - if m.CurrentTime != 0 { - i = encodeVarint(dAtA, i, uint64(m.CurrentTime)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if len(m.Dml) > 0 { - i -= len(m.Dml) - copy(dAtA[i:], m.Dml) - i = encodeVarint(dAtA, i, uint64(len(m.Dml))) - i-- - dAtA[i] = 0x4a - } - if m.Journal != nil { - size, err := m.Journal.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x42 - } - if m.Vgtid != nil { - size, err := m.Vgtid.MarshalToSizedBufferVT(dAtA[:i]) + if m.DataColumns != nil { + size, err := m.DataColumns.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x1a } - if m.FieldEvent != nil { - size, err := m.FieldEvent.MarshalToSizedBufferVT(dAtA[:i]) + if m.After != nil { + size, err := m.After.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x12 } - if m.RowEvent != nil { - size, err := m.RowEvent.MarshalToSizedBufferVT(dAtA[:i]) + if m.Before != nil { + size, err := m.Before.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x2a - } - if len(m.Statement) > 0 { - i -= len(m.Statement) - copy(dAtA[i:], m.Statement) - i = encodeVarint(dAtA, i, uint64(len(m.Statement))) - i-- - dAtA[i] = 0x22 - } - if len(m.Gtid) > 0 { - i -= len(m.Gtid) - copy(dAtA[i:], m.Gtid) - i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) - i-- - dAtA[i] = 0x1a - } - if m.Timestamp != 0 { - i = encodeVarint(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x10 - } - if m.Type != 0 { - i = encodeVarint(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *MinimalTable) MarshalVT() (dAtA []byte, err error) { +func (m *RowEvent) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1350,12 +1705,12 @@ func (m *MinimalTable) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MinimalTable) MarshalToVT(dAtA []byte) (int, error) { +func (m *RowEvent) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *MinimalTable) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RowEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1367,30 +1722,28 @@ func (m *MinimalTable) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.PKColumns) > 0 { - var pksize2 int - for _, num := range m.PKColumns { - pksize2 += sov(uint64(num)) - } - i -= pksize2 - j1 := i - for _, num1 := range m.PKColumns { - num := uint64(num1) - for num >= 1<<7 { - dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j1++ - } - dAtA[j1] = uint8(num) - j1++ - } - i = encodeVarint(dAtA, i, uint64(pksize2)) + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x28 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x22 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0x1a } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.RowChanges) > 0 { + for iNdEx := len(m.RowChanges) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.RowChanges[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -1400,17 +1753,17 @@ func (m *MinimalTable) MarshalToSizedBufferVT(dAtA []byte) (int, error) { dAtA[i] = 0x12 } } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarint(dAtA, i, uint64(len(m.TableName))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *MinimalSchema) MarshalVT() (dAtA []byte, err error) { +func (m *FieldEvent) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1423,12 +1776,12 @@ func (m *MinimalSchema) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MinimalSchema) MarshalToVT(dAtA []byte) (int, error) { +func (m *FieldEvent) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *MinimalSchema) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FieldEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1440,22 +1793,43 @@ func (m *MinimalSchema) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Tables[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x22 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *VStreamRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShardGtid) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1468,12 +1842,12 @@ func (m *VStreamRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ShardGtid) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1485,69 +1859,43 @@ func (m *VStreamRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TableLastPKs) > 0 { - for iNdEx := len(m.TableLastPKs) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TableLastPKs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.TablePKs) > 0 { + for iNdEx := len(m.TablePKs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TablePKs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x32 - } - } - if m.Filter != nil { - size, err := m.Filter.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x22 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0x22 } - if m.Target != nil { - size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) i-- dAtA[i] = 0x1a } - if m.ImmediateCallerId != nil { - size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) i-- dAtA[i] = 0x12 } - if m.EffectiveCallerId != nil { - size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VStreamResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VGtid) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1560,12 +1908,12 @@ func (m *VStreamResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *VGtid) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VGtid) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1577,9 +1925,9 @@ func (m *VStreamResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.ShardGtids) > 0 { + for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ShardGtids[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -1592,7 +1940,7 @@ func (m *VStreamResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *VStreamRowsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *KeyspaceShard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1605,12 +1953,12 @@ func (m *VStreamRowsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamRowsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *KeyspaceShard) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamRowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *KeyspaceShard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1622,57 +1970,24 @@ func (m *VStreamRowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Lastpk != nil { - size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x22 - } - if m.Target != nil { - size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if m.ImmediateCallerId != nil { - size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) i-- dAtA[i] = 0x12 } - if m.EffectiveCallerId != nil { - size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VStreamRowsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Journal) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1685,12 +2000,12 @@ func (m *VStreamRowsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamRowsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Journal) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamRowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Journal) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1702,83 +2017,69 @@ func (m *VStreamRowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Heartbeat { - i-- - if m.Heartbeat { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.Throttled { - i-- - if m.Throttled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.Lastpk != nil { - size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.SourceWorkflows) > 0 { + for iNdEx := len(m.SourceWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceWorkflows[iNdEx]) + copy(dAtA[i:], m.SourceWorkflows[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x3a } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a } - if len(m.Rows) > 0 { - for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Participants) > 0 { + for iNdEx := len(m.Participants) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Participants[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x32 } } - if len(m.Gtid) > 0 { - i -= len(m.Gtid) - copy(dAtA[i:], m.Gtid) - i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) - i-- - dAtA[i] = 0x1a - } - if len(m.Pkfields) > 0 { - for iNdEx := len(m.Pkfields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Pkfields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.ShardGtids) > 0 { + for iNdEx := len(m.ShardGtids) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ShardGtids[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a } } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.LocalPosition) > 0 { + i -= len(m.LocalPosition) + copy(dAtA[i:], m.LocalPosition) + i = encodeVarint(dAtA, i, uint64(len(m.LocalPosition))) + i-- + dAtA[i] = 0x22 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } } + if m.MigrationType != 0 { + i = encodeVarint(dAtA, i, uint64(m.MigrationType)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } -func (m *LastPKEvent) MarshalVT() (dAtA []byte, err error) { +func (m *VEvent) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1791,12 +2092,12 @@ func (m *LastPKEvent) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *LastPKEvent) MarshalToVT(dAtA []byte) (int, error) { +func (m *VEvent) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *LastPKEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1808,30 +2109,130 @@ func (m *LastPKEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Completed { + if m.Throttled { i-- - if m.Completed { + if m.Throttled { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x10 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc0 } - if m.TableLastPK != nil { - size, err := m.TableLastPK.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.LastPKEvent != nil { + size, err := m.LastPKEvent.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CurrentTime != 0 { + i = encodeVarint(dAtA, i, uint64(m.CurrentTime)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if len(m.Dml) > 0 { + i -= len(m.Dml) + copy(dAtA[i:], m.Dml) + i = encodeVarint(dAtA, i, uint64(len(m.Dml))) + i-- + dAtA[i] = 0x4a + } + if m.Journal != nil { + size, err := m.Journal.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.Vgtid != nil { + size, err := m.Vgtid.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.FieldEvent != nil { + size, err := m.FieldEvent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.RowEvent != nil { + size, err := m.RowEvent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Statement) > 0 { + i -= len(m.Statement) + copy(dAtA[i:], m.Statement) + i = encodeVarint(dAtA, i, uint64(len(m.Statement))) + i-- + dAtA[i] = 0x22 + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if m.Timestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Type != 0 { + i = encodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *TableLastPK) MarshalVT() (dAtA []byte, err error) { +func (m *MinimalTable) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1844,12 +2245,12 @@ func (m *TableLastPK) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TableLastPK) MarshalToVT(dAtA []byte) (int, error) { +func (m *MinimalTable) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TableLastPK) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MinimalTable) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1861,27 +2262,50 @@ func (m *TableLastPK) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Lastpk != nil { - size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.PKColumns) > 0 { + var pksize2 int + for _, num := range m.PKColumns { + pksize2 += sov(uint64(num)) } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + i -= pksize2 + j1 := i + for _, num1 := range m.PKColumns { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) i-- dAtA[i] = 0x1a } - if len(m.TableName) > 0 { - i -= len(m.TableName) - copy(dAtA[i:], m.TableName) - i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VStreamResultsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *MinimalSchema) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1894,12 +2318,12 @@ func (m *VStreamResultsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamResultsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *MinimalSchema) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamResultsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MinimalSchema) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1911,47 +2335,22 @@ func (m *VStreamResultsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x22 - } - if m.Target != nil { - size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if m.ImmediateCallerId != nil { - size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Tables[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if m.EffectiveCallerId != nil { - size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VStreamResultsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VStreamRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -1964,12 +2363,12 @@ func (m *VStreamResultsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VStreamResultsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *VStreamRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamResultsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VStreamRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -1981,28 +2380,101 @@ func (m *VStreamResultsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Rows) > 0 { - for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.TableLastPKs) > 0 { + for iNdEx := len(m.TableLastPKs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TableLastPKs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x32 } } - if len(m.Gtid) > 0 { - i -= len(m.Gtid) - copy(dAtA[i:], m.Gtid) - i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + if m.Filter != nil { + size, err := m.Filter.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x1a } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -2015,91 +2487,717 @@ func (m *VStreamResultsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *VStreamRowsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - dAtA[offset] = uint8(v) - return base + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -var vtprotoPool_VStreamRowsResponse = sync.Pool{ - New: func() interface{} { - return &VStreamRowsResponse{} - }, +func (m *VStreamRowsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VStreamRowsResponse) ResetVT() { - for _, mm := range m.Rows { - mm.ResetVT() - } - m.Lastpk.ReturnToVTPool() - m.Reset() -} -func (m *VStreamRowsResponse) ReturnToVTPool() { - if m != nil { - m.ResetVT() - vtprotoPool_VStreamRowsResponse.Put(m) - } -} -func VStreamRowsResponseFromVTPool() *VStreamRowsResponse { - return vtprotoPool_VStreamRowsResponse.Get().(*VStreamRowsResponse) -} -func (m *Charset) SizeVT() (n int) { +func (m *VStreamRowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Client != 0 { - n += 1 + sov(uint64(m.Client)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Conn != 0 { - n += 1 + sov(uint64(m.Conn)) + if m.Lastpk != nil { + size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if m.Server != 0 { - n += 1 + sov(uint64(m.Server)) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x22 } - n += len(m.unknownFields) - return n + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *BinlogTransaction_Statement) SizeVT() (n int) { +func (m *VStreamRowsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Category != 0 { - n += 1 + sov(uint64(m.Category)) - } - if m.Charset != nil { - l = m.Charset.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.Sql) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *BinlogTransaction) SizeVT() (n int) { +func (m *VStreamRowsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamRowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Statements) > 0 { - for _, e := range m.Statements { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Heartbeat { + i-- + if m.Heartbeat { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.Throttled { + i-- + if m.Throttled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Lastpk != nil { + size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Pkfields) > 0 { + for iNdEx := len(m.Pkfields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Pkfields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *VStreamTablesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamTablesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamTablesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamTablesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Lastpk != nil { + size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pkfields) > 0 { + for iNdEx := len(m.Pkfields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Pkfields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LastPKEvent) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LastPKEvent) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LastPKEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Completed { + i-- + if m.Completed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.TableLastPK != nil { + size, err := m.TableLastPK.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TableLastPK) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableLastPK) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TableLastPK) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Lastpk != nil { + size, err := m.Lastpk.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.TableName) > 0 { + i -= len(m.TableName) + copy(dAtA[i:], m.TableName) + i = encodeVarint(dAtA, i, uint64(len(m.TableName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResultsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResultsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamResultsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0x22 + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.ImmediateCallerId != nil { + size, err := m.ImmediateCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.EffectiveCallerId != nil { + size, err := m.EffectiveCallerId.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VStreamResultsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VStreamResultsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VStreamResultsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rows) > 0 { + for iNdEx := len(m.Rows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Gtid) > 0 { + i -= len(m.Gtid) + copy(dAtA[i:], m.Gtid) + i = encodeVarint(dAtA, i, uint64(len(m.Gtid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} + +var vtprotoPool_VStreamRowsResponse = sync.Pool{ + New: func() interface{} { + return &VStreamRowsResponse{} + }, +} + +func (m *VStreamRowsResponse) ResetVT() { + for _, mm := range m.Fields { + mm.Reset() + } + f0 := m.Fields[:0] + for _, mm := range m.Pkfields { + mm.Reset() + } + f1 := m.Pkfields[:0] + for _, mm := range m.Rows { + mm.ResetVT() + } + f2 := m.Rows[:0] + m.Lastpk.ReturnToVTPool() + m.Reset() + m.Fields = f0 + m.Pkfields = f1 + m.Rows = f2 +} +func (m *VStreamRowsResponse) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_VStreamRowsResponse.Put(m) + } +} +func VStreamRowsResponseFromVTPool() *VStreamRowsResponse { + return vtprotoPool_VStreamRowsResponse.Get().(*VStreamRowsResponse) +} + +var vtprotoPool_VStreamTablesResponse = sync.Pool{ + New: func() interface{} { + return &VStreamTablesResponse{} + }, +} + +func (m *VStreamTablesResponse) ResetVT() { + for _, mm := range m.Fields { + mm.Reset() + } + f0 := m.Fields[:0] + for _, mm := range m.Pkfields { + mm.Reset() + } + f1 := m.Pkfields[:0] + for _, mm := range m.Rows { + mm.ResetVT() + } + f2 := m.Rows[:0] + m.Lastpk.ReturnToVTPool() + m.Reset() + m.Fields = f0 + m.Pkfields = f1 + m.Rows = f2 +} +func (m *VStreamTablesResponse) ReturnToVTPool() { + if m != nil { + m.ResetVT() + vtprotoPool_VStreamTablesResponse.Put(m) + } +} +func VStreamTablesResponseFromVTPool() *VStreamTablesResponse { + return vtprotoPool_VStreamTablesResponse.Get().(*VStreamTablesResponse) +} +func (m *Charset) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Client != 0 { + n += 1 + sov(uint64(m.Client)) + } + if m.Conn != 0 { + n += 1 + sov(uint64(m.Conn)) + } + if m.Server != 0 { + n += 1 + sov(uint64(m.Server)) + } + n += len(m.unknownFields) + return n +} + +func (m *BinlogTransaction_Statement) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Category != 0 { + n += 1 + sov(uint64(m.Category)) + } + if m.Charset != nil { + l = m.Charset.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BinlogTransaction) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statements) > 0 { + for _, e := range m.Statements { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } } if m.EventToken != nil { l = m.EventToken.SizeVT() @@ -2343,6 +3441,23 @@ func (m *BinlogSource) SizeVT() (n int) { return n } +func (m *RowChange_Bitmap) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != 0 { + n += 1 + sov(uint64(m.Count)) + } + l = len(m.Cols) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *RowChange) SizeVT() (n int) { if m == nil { return 0 @@ -2357,6 +3472,10 @@ func (m *RowChange) SizeVT() (n int) { l = m.After.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.DataColumns != nil { + l = m.DataColumns.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -2385,6 +3504,9 @@ func (m *RowEvent) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } n += len(m.unknownFields) return n } @@ -2752,6 +3874,68 @@ func (m *VStreamRowsResponse) SizeVT() (n int) { return n } +func (m *VStreamTablesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EffectiveCallerId != nil { + l = m.EffectiveCallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ImmediateCallerId != nil { + l = m.ImmediateCallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VStreamTablesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TableName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Pkfields) > 0 { + for _, e := range m.Pkfields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Gtid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Rows) > 0 { + for _, e := range m.Rows { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Lastpk != nil { + l = m.Lastpk.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *LastPKEvent) SizeVT() (n int) { if m == nil { return 0 @@ -3694,7 +4878,122 @@ func (m *StreamTablesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CharsetConversion) UnmarshalVT(dAtA []byte) error { +func (m *CharsetConversion) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CharsetConversion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CharsetConversion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromCharset", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromCharset = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToCharset", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToCharset = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3717,15 +5016,47 @@ func (m *CharsetConversion) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CharsetConversion: wiretype end group for non-group") + return fmt.Errorf("proto: Rule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CharsetConversion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FromCharset", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Match = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3742,22 +5073,278 @@ func (m *CharsetConversion) UnmarshalVT(dAtA []byte) error { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FromCharset = string(dAtA[iNdEx:postIndex]) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertEnumToText", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConvertEnumToText == nil { + m.ConvertEnumToText = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ConvertEnumToText[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConvertCharset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConvertCharset == nil { + m.ConvertCharset = make(map[string]*CharsetConversion) + } + var mapkey string + var mapvalue *CharsetConversion + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &CharsetConversion{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ConvertCharset[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ToCharset", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceUniqueKeyColumns", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3785,62 +5372,11 @@ func (m *CharsetConversion) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ToCharset = string(dAtA[iNdEx:postIndex]) + m.SourceUniqueKeyColumns = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Match", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetUniqueKeyColumns", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3868,11 +5404,11 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Match = string(dAtA[iNdEx:postIndex]) + m.TargetUniqueKeyColumns = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceUniqueKeyTargetColumns", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3900,11 +5436,11 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filter = string(dAtA[iNdEx:postIndex]) + m.SourceUniqueKeyTargetColumns = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConvertEnumToText", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConvertIntToEnum", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3931,11 +5467,11 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConvertEnumToText == nil { - m.ConvertEnumToText = make(map[string]string) + if m.ConvertIntToEnum == nil { + m.ConvertIntToEnum = make(map[string]bool) } var mapkey string - var mapvalue string + var mapvalue bool for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -3984,7 +5520,7 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapvaluetemp int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3994,24 +5530,12 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapvaluetemp |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = bool(mapvaluetemp != 0) } else { iNdEx = entryPreIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4027,13 +5551,136 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { iNdEx += skippy } } - m.ConvertEnumToText[mapkey] = mapvalue - iNdEx = postIndex + m.ConvertIntToEnum[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Filter) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &Rule{}) + if err := m.Rules[len(m.Rules)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldEventMode", wireType) + } + m.FieldEventMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FieldEventMode |= Filter_FieldEventMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + } + m.WorkflowType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowType |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConvertCharset", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4043,124 +5690,78 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConvertCharset == nil { - m.ConvertCharset = make(map[string]*CharsetConversion) + m.WorkflowName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var mapkey string - var mapvalue *CharsetConversion - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &CharsetConversion{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.ConvertCharset[mapkey] = mapvalue - iNdEx = postIndex - case 5: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinlogSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinlogSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceUniqueKeyColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4188,11 +5789,11 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceUniqueKeyColumns = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetUniqueKeyColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4220,11 +5821,66 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetUniqueKeyColumns = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &topodata.KeyRange{} + } + if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceUniqueKeyTargetColumns", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4252,11 +5908,11 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceUniqueKeyTargetColumns = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConvertIntToEnum", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4283,148 +5939,37 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConvertIntToEnum == nil { - m.ConvertIntToEnum = make(map[string]bool) - } - var mapkey string - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if m.Filter == nil { + m.Filter = &Filter{} } - m.ConvertIntToEnum[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Filter) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.OnDdl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OnDdl |= OnDDLAction(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Filter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Filter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalMysql", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4434,31 +5979,29 @@ func (m *Filter) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, &Rule{}) - if err := m.Rules[len(m.Rules)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ExternalMysql = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldEventMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) } - m.FieldEventMode = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4468,16 +6011,17 @@ func (m *Filter) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FieldEventMode |= Filter_FieldEventMode(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + m.StopAfterCopy = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) } - m.WorkflowType = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4487,14 +6031,59 @@ func (m *Filter) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WorkflowType |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4522,7 +6111,7 @@ func (m *Filter) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkflowName = string(dAtA[iNdEx:postIndex]) + m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4546,7 +6135,7 @@ func (m *Filter) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { +func (m *RowChange_Bitmap) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4569,17 +6158,17 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BinlogSource: wiretype end group for non-group") + return fmt.Errorf("proto: RowChange_Bitmap: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BinlogSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RowChange_Bitmap: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) } - var stringLen uint64 + m.Count = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4589,29 +6178,16 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Count |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cols", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4621,46 +6197,80 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Cols = append(m.Cols[:0], dAtA[iNdEx:postIndex]...) + if m.Cols == nil { + m.Cols = []byte{} + } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - case 4: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowChange) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4687,18 +6297,18 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KeyRange == nil { - m.KeyRange = &topodata.KeyRange{} + if m.Before == nil { + m.Before = &query.Row{} } - if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Before.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4708,27 +6318,31 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + if m.After == nil { + m.After = &query.Row{} + } + if err := m.After.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataColumns", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4755,35 +6369,67 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Filter == nil { - m.Filter = &Filter{} + if m.DataColumns == nil { + m.DataColumns = &RowChange_Bitmap{} } - if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.DataColumns.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - m.OnDdl = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OnDdl |= OnDDLAction(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - case 8: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RowEvent) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RowEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RowEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalMysql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4811,33 +6457,13 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalMysql = string(dAtA[iNdEx:postIndex]) + m.TableName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StopAfterCopy = bool(v != 0) - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowChanges", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -4847,27 +6473,29 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + m.RowChanges = append(m.RowChanges, &RowChange{}) + if err := m.RowChanges[len(m.RowChanges)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4895,11 +6523,11 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4927,8 +6555,27 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4951,7 +6598,7 @@ func (m *BinlogSource) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RowChange) UnmarshalVT(dAtA []byte) error { +func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4974,15 +6621,47 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RowChange: wiretype end group for non-group") + return fmt.Errorf("proto: FieldEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RowChange: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FieldEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Before", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5009,18 +6688,16 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Before == nil { - m.Before = &query.Row{} - } - if err := m.Before.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field After", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5030,27 +6707,55 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.After == nil { - m.After = &query.Row{} + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if err := m.After.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5074,7 +6779,7 @@ func (m *RowChange) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RowEvent) UnmarshalVT(dAtA []byte) error { +func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5097,15 +6802,15 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RowEvent: wiretype end group for non-group") + return fmt.Errorf("proto: ShardGtid: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RowEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardGtid: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5133,13 +6838,13 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TableName = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RowChanges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5149,29 +6854,27 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.RowChanges = append(m.RowChanges, &RowChange{}) - if err := m.RowChanges[len(m.RowChanges)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5199,13 +6902,13 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TablePKs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5215,23 +6918,25 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.TablePKs = append(m.TablePKs, &TableLastPK{}) + if err := m.TablePKs[len(m.TablePKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5255,7 +6960,7 @@ func (m *RowEvent) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { +func (m *VGtid) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5278,17 +6983,17 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FieldEvent: wiretype end group for non-group") + return fmt.Errorf("proto: VGtid: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FieldEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VGtid: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5298,59 +7003,78 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TableName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspaceShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspaceShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } @@ -5382,7 +7106,7 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } @@ -5436,7 +7160,7 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { +func (m *Journal) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5459,15 +7183,53 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardGtid: wiretype end group for non-group") + return fmt.Errorf("proto: Journal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardGtid: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Journal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationType", wireType) + } + m.MigrationType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MigrationType |= MigrationType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5495,11 +7257,11 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5527,13 +7289,13 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.LocalPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5543,27 +7305,29 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Gtid = string(dAtA[iNdEx:postIndex]) + m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) + if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TablePKs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5590,67 +7354,16 @@ func (m *ShardGtid) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TablePKs = append(m.TablePKs, &TableLastPK{}) - if err := m.TablePKs[len(m.TablePKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Participants = append(m.Participants, &KeyspaceShard{}) + if err := m.Participants[len(m.Participants)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VGtid) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VGtid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VGtid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceWorkflows", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5660,25 +7373,23 @@ func (m *VGtid) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) - if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SourceWorkflows = append(m.SourceWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -5702,7 +7413,7 @@ func (m *VGtid) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { +func (m *VEvent) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5725,15 +7436,53 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KeyspaceShard: wiretype end group for non-group") + return fmt.Errorf("proto: VEvent: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KeyspaceShard: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VEvent: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= VEventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5761,11 +7510,11 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Statement", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5793,64 +7542,49 @@ func (m *KeyspaceShard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Statement = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowEvent", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Journal) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.RowEvent == nil { + m.RowEvent = &RowEvent{} + } + if err := m.RowEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Journal: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Journal: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldEvent", wireType) } - m.Id = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5860,16 +7594,33 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MigrationType", wireType) + if msglen < 0 { + return ErrInvalidLength } - m.MigrationType = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldEvent == nil { + m.FieldEvent = &FieldEvent{} + } + if err := m.FieldEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5879,16 +7630,33 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MigrationType |= MigrationType(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vgtid == nil { + m.Vgtid = &VGtid{} + } + if err := m.Vgtid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Journal", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5898,27 +7666,31 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + if m.Journal == nil { + m.Journal = &Journal{} + } + if err := m.Journal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dml", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5946,11 +7718,30 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LocalPosition = string(dAtA[iNdEx:postIndex]) + m.Dml = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentTime", wireType) + } + m.CurrentTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardGtids", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastPKEvent", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5977,16 +7768,18 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardGtids = append(m.ShardGtids, &ShardGtid{}) - if err := m.ShardGtids[len(m.ShardGtids)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.LastPKEvent == nil { + m.LastPKEvent = &LastPKEvent{} + } + if err := m.LastPKEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Participants", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -5996,29 +7789,27 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Participants = append(m.Participants, &KeyspaceShard{}) - if err := m.Participants[len(m.Participants)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceWorkflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6046,8 +7837,28 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceWorkflows = append(m.SourceWorkflows, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Throttled = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6070,7 +7881,7 @@ func (m *Journal) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VEvent) UnmarshalVT(dAtA []byte) error { +func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6085,93 +7896,23 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= VEventType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Gtid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalTable: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalTable: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statement", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6199,11 +7940,11 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Statement = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RowEvent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6230,88 +7971,141 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RowEvent == nil { - m.RowEvent = &RowEvent{} - } - if err := m.RowEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldEvent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + case 3: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.PKColumns = append(m.PKColumns, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLength } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldEvent == nil { - m.FieldEvent = &FieldEvent{} - } - if err := m.FieldEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vgtid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PKColumns) == 0 { + m.PKColumns = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PKColumns = append(m.PKColumns, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PKColumns", wireType) } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Vgtid == nil { - m.Vgtid = &VGtid{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.Vgtid.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MinimalSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MinimalSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Journal", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6338,67 +8132,65 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Journal == nil { - m.Journal = &Journal{} - } - if err := m.Journal.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Tables = append(m.Tables, &MinimalTable{}) + if err := m.Tables[len(m.Tables)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dml", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Dml = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentTime", wireType) - } - m.CurrentTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentTime |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - case 21: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastPKEvent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EffectiveCallerId", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6425,18 +8217,18 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.LastPKEvent == nil { - m.LastPKEvent = &LastPKEvent{} + if m.EffectiveCallerId == nil { + m.EffectiveCallerId = &vtrpc.CallerID{} } - if err := m.LastPKEvent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.EffectiveCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImmediateCallerId", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6446,29 +8238,33 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.ImmediateCallerId == nil { + m.ImmediateCallerId = &query.VTGateCallerID{} + } + if err := m.ImmediateCallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 23: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6478,98 +8274,31 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.Target == nil { + m.Target = &query.Target{} } - m.Throttled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MinimalTable: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MinimalTable: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6597,11 +8326,11 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6628,87 +8357,47 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Filter == nil { + m.Filter = &Filter{} + } + if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PKColumns = append(m.PKColumns, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLength + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableLastPKs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PKColumns) == 0 { - m.PKColumns = make([]int64, 0, elementCount) - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PKColumns = append(m.PKColumns, v) + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PKColumns", wireType) } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableLastPKs = append(m.TableLastPKs, &TableLastPK{}) + if err := m.TableLastPKs[len(m.TableLastPKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6731,7 +8420,7 @@ func (m *MinimalTable) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { +func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6754,15 +8443,15 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MinimalSchema: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MinimalSchema: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6789,8 +8478,8 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, &MinimalTable{}) - if err := m.Tables[len(m.Tables)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &VEvent{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6816,7 +8505,7 @@ func (m *MinimalSchema) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { +func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6839,10 +8528,10 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamRowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamRowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6955,7 +8644,7 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6983,11 +8672,11 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7014,16 +8703,222 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Filter == nil { - m.Filter = &Filter{} + if m.Lastpk == nil { + m.Lastpk = &query.QueryResult{} } - if err := m.Filter.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VStreamRowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VStreamRowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Fields) == cap(m.Fields) { + m.Fields = append(m.Fields, &query.Field{}) + } else { + m.Fields = m.Fields[:len(m.Fields)+1] + if m.Fields[len(m.Fields)-1] == nil { + m.Fields[len(m.Fields)-1] = &query.Field{} + } + } + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pkfields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Pkfields) == cap(m.Pkfields) { + m.Pkfields = append(m.Pkfields, &query.Field{}) + } else { + m.Pkfields = m.Pkfields[:len(m.Pkfields)+1] + if m.Pkfields[len(m.Pkfields)-1] == nil { + m.Pkfields[len(m.Pkfields)-1] = &query.Field{} + } + } + if err := m.Pkfields[len(m.Pkfields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gtid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if len(m.Rows) == cap(m.Rows) { + m.Rows = append(m.Rows, &query.Row{}) + } else { + m.Rows = m.Rows[:len(m.Rows)+1] + if m.Rows[len(m.Rows)-1] == nil { + m.Rows[len(m.Rows)-1] = &query.Row{} + } + } + if err := m.Rows[len(m.Rows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableLastPKs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7050,67 +8945,18 @@ func (m *VStreamRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TableLastPKs = append(m.TableLastPKs, &TableLastPK{}) - if err := m.TableLastPKs[len(m.TableLastPKs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Lastpk == nil { + m.Lastpk = query.RowFromVTPool() } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VStreamResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7120,26 +8966,32 @@ func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Throttled = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Heartbeat", wireType) } - m.Events = append(m.Events, &VEvent{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.Heartbeat = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7162,7 +9014,7 @@ func (m *VStreamResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { +func (m *VStreamTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7185,10 +9037,10 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRowsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -7299,74 +9151,6 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lastpk == nil { - m.Lastpk = &query.QueryResult{} - } - if err := m.Lastpk.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7389,7 +9173,7 @@ func (m *VStreamRowsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { +func (m *VStreamTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7412,13 +9196,45 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VStreamRowsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VStreamTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VStreamRowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VStreamTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } @@ -7459,7 +9275,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Pkfields", wireType) } @@ -7500,7 +9316,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Gtid", wireType) } @@ -7532,7 +9348,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { } m.Gtid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) } @@ -7573,7 +9389,7 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Lastpk", wireType) } @@ -7609,46 +9425,6 @@ func (m *VStreamRowsResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Throttled = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Heartbeat", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Heartbeat = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 44b47ac33f7..4eac50296c1 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: binlogservice.proto diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index 4e41806d4a8..b2675716168 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: logutil.proto diff --git a/go/vt/proto/logutil/logutil_vtproto.pb.go b/go/vt/proto/logutil/logutil_vtproto.pb.go index 234c26eea93..1d3ccb74271 100644 --- a/go/vt/proto/logutil/logutil_vtproto.pb.go +++ b/go/vt/proto/logutil/logutil_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: logutil.proto package logutil import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,28 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Event) CloneVT() *Event { + if m == nil { + return (*Event)(nil) + } + r := &Event{ + Time: m.Time.CloneVT(), + Level: m.Level, + File: m.File, + Line: m.Line, + Value: m.Value, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Event) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Event) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index b6a407ea601..19f70887681 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: mysqlctl.proto @@ -97,7 +97,7 @@ func (x BackupInfo_Status) Number() protoreflect.EnumNumber { // Deprecated: Use BackupInfo_Status.Descriptor instead. func (BackupInfo_Status) EnumDescriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{10, 0} + return file_mysqlctl_proto_rawDescGZIP(), []int{16, 0} } type StartRequest struct { @@ -346,6 +346,229 @@ func (*RunMysqlUpgradeResponse) Descriptor() ([]byte, []int) { return file_mysqlctl_proto_rawDescGZIP(), []int{5} } +type ApplyBinlogFileRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BinlogFileName string `protobuf:"bytes,1,opt,name=binlog_file_name,json=binlogFileName,proto3" json:"binlog_file_name,omitempty"` + BinlogRestorePosition string `protobuf:"bytes,2,opt,name=binlog_restore_position,json=binlogRestorePosition,proto3" json:"binlog_restore_position,omitempty"` + BinlogRestoreDatetime *vttime.Time `protobuf:"bytes,3,opt,name=binlog_restore_datetime,json=binlogRestoreDatetime,proto3" json:"binlog_restore_datetime,omitempty"` +} + +func (x *ApplyBinlogFileRequest) Reset() { + *x = ApplyBinlogFileRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyBinlogFileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyBinlogFileRequest) ProtoMessage() {} + +func (x *ApplyBinlogFileRequest) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyBinlogFileRequest.ProtoReflect.Descriptor instead. +func (*ApplyBinlogFileRequest) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{6} +} + +func (x *ApplyBinlogFileRequest) GetBinlogFileName() string { + if x != nil { + return x.BinlogFileName + } + return "" +} + +func (x *ApplyBinlogFileRequest) GetBinlogRestorePosition() string { + if x != nil { + return x.BinlogRestorePosition + } + return "" +} + +func (x *ApplyBinlogFileRequest) GetBinlogRestoreDatetime() *vttime.Time { + if x != nil { + return x.BinlogRestoreDatetime + } + return nil +} + +type ApplyBinlogFileResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ApplyBinlogFileResponse) Reset() { + *x = ApplyBinlogFileResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyBinlogFileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyBinlogFileResponse) ProtoMessage() {} + +func (x *ApplyBinlogFileResponse) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyBinlogFileResponse.ProtoReflect.Descriptor instead. +func (*ApplyBinlogFileResponse) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{7} +} + +type ReadBinlogFilesTimestampsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BinlogFileNames []string `protobuf:"bytes,1,rep,name=binlog_file_names,json=binlogFileNames,proto3" json:"binlog_file_names,omitempty"` +} + +func (x *ReadBinlogFilesTimestampsRequest) Reset() { + *x = ReadBinlogFilesTimestampsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadBinlogFilesTimestampsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadBinlogFilesTimestampsRequest) ProtoMessage() {} + +func (x *ReadBinlogFilesTimestampsRequest) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadBinlogFilesTimestampsRequest.ProtoReflect.Descriptor instead. +func (*ReadBinlogFilesTimestampsRequest) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{8} +} + +func (x *ReadBinlogFilesTimestampsRequest) GetBinlogFileNames() []string { + if x != nil { + return x.BinlogFileNames + } + return nil +} + +type ReadBinlogFilesTimestampsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // FirstTimestamp is the timestamp of the first found transaction searching in order of given binlog files + FirstTimestamp *vttime.Time `protobuf:"bytes,1,opt,name=first_timestamp,json=firstTimestamp,proto3" json:"first_timestamp,omitempty"` + // FirstTimestampBinlog is the name of the binary log in which the first timestamp is found + FirstTimestampBinlog string `protobuf:"bytes,2,opt,name=first_timestamp_binlog,json=firstTimestampBinlog,proto3" json:"first_timestamp_binlog,omitempty"` + // LastTimestamp is the timestamp of the last found transaction in given binlog files + LastTimestamp *vttime.Time `protobuf:"bytes,3,opt,name=last_timestamp,json=lastTimestamp,proto3" json:"last_timestamp,omitempty"` + // LastTimestampBinlog is the name of the binary log in which the last timestamp is found + LastTimestampBinlog string `protobuf:"bytes,4,opt,name=last_timestamp_binlog,json=lastTimestampBinlog,proto3" json:"last_timestamp_binlog,omitempty"` +} + +func (x *ReadBinlogFilesTimestampsResponse) Reset() { + *x = ReadBinlogFilesTimestampsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadBinlogFilesTimestampsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadBinlogFilesTimestampsResponse) ProtoMessage() {} + +func (x *ReadBinlogFilesTimestampsResponse) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadBinlogFilesTimestampsResponse.ProtoReflect.Descriptor instead. +func (*ReadBinlogFilesTimestampsResponse) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{9} +} + +func (x *ReadBinlogFilesTimestampsResponse) GetFirstTimestamp() *vttime.Time { + if x != nil { + return x.FirstTimestamp + } + return nil +} + +func (x *ReadBinlogFilesTimestampsResponse) GetFirstTimestampBinlog() string { + if x != nil { + return x.FirstTimestampBinlog + } + return "" +} + +func (x *ReadBinlogFilesTimestampsResponse) GetLastTimestamp() *vttime.Time { + if x != nil { + return x.LastTimestamp + } + return nil +} + +func (x *ReadBinlogFilesTimestampsResponse) GetLastTimestampBinlog() string { + if x != nil { + return x.LastTimestampBinlog + } + return "" +} + type ReinitConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -355,7 +578,7 @@ type ReinitConfigRequest struct { func (x *ReinitConfigRequest) Reset() { *x = ReinitConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[6] + mi := &file_mysqlctl_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -368,7 +591,7 @@ func (x *ReinitConfigRequest) String() string { func (*ReinitConfigRequest) ProtoMessage() {} func (x *ReinitConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[6] + mi := &file_mysqlctl_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -381,7 +604,7 @@ func (x *ReinitConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReinitConfigRequest.ProtoReflect.Descriptor instead. func (*ReinitConfigRequest) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{6} + return file_mysqlctl_proto_rawDescGZIP(), []int{10} } type ReinitConfigResponse struct { @@ -393,7 +616,7 @@ type ReinitConfigResponse struct { func (x *ReinitConfigResponse) Reset() { *x = ReinitConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[7] + mi := &file_mysqlctl_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -406,7 +629,7 @@ func (x *ReinitConfigResponse) String() string { func (*ReinitConfigResponse) ProtoMessage() {} func (x *ReinitConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[7] + mi := &file_mysqlctl_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -419,7 +642,7 @@ func (x *ReinitConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReinitConfigResponse.ProtoReflect.Descriptor instead. func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{7} + return file_mysqlctl_proto_rawDescGZIP(), []int{11} } type RefreshConfigRequest struct { @@ -431,7 +654,7 @@ type RefreshConfigRequest struct { func (x *RefreshConfigRequest) Reset() { *x = RefreshConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[8] + mi := &file_mysqlctl_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -444,7 +667,7 @@ func (x *RefreshConfigRequest) String() string { func (*RefreshConfigRequest) ProtoMessage() {} func (x *RefreshConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[8] + mi := &file_mysqlctl_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -457,7 +680,7 @@ func (x *RefreshConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshConfigRequest.ProtoReflect.Descriptor instead. func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{8} + return file_mysqlctl_proto_rawDescGZIP(), []int{12} } type RefreshConfigResponse struct { @@ -469,7 +692,7 @@ type RefreshConfigResponse struct { func (x *RefreshConfigResponse) Reset() { *x = RefreshConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[9] + mi := &file_mysqlctl_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -482,7 +705,7 @@ func (x *RefreshConfigResponse) String() string { func (*RefreshConfigResponse) ProtoMessage() {} func (x *RefreshConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[9] + mi := &file_mysqlctl_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -495,7 +718,92 @@ func (x *RefreshConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshConfigResponse.ProtoReflect.Descriptor instead. func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{9} + return file_mysqlctl_proto_rawDescGZIP(), []int{13} +} + +type VersionStringRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *VersionStringRequest) Reset() { + *x = VersionStringRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionStringRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionStringRequest) ProtoMessage() {} + +func (x *VersionStringRequest) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionStringRequest.ProtoReflect.Descriptor instead. +func (*VersionStringRequest) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{14} +} + +type VersionStringResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *VersionStringResponse) Reset() { + *x = VersionStringResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_mysqlctl_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionStringResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionStringResponse) ProtoMessage() {} + +func (x *VersionStringResponse) ProtoReflect() protoreflect.Message { + mi := &file_mysqlctl_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionStringResponse.ProtoReflect.Descriptor instead. +func (*VersionStringResponse) Descriptor() ([]byte, []int) { + return file_mysqlctl_proto_rawDescGZIP(), []int{15} +} + +func (x *VersionStringResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" } // BackupInfo is the read-only attributes of a mysqlctl/backupstorage.BackupHandle. @@ -519,7 +827,7 @@ type BackupInfo struct { func (x *BackupInfo) Reset() { *x = BackupInfo{} if protoimpl.UnsafeEnabled { - mi := &file_mysqlctl_proto_msgTypes[10] + mi := &file_mysqlctl_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -532,7 +840,7 @@ func (x *BackupInfo) String() string { func (*BackupInfo) ProtoMessage() {} func (x *BackupInfo) ProtoReflect() protoreflect.Message { - mi := &file_mysqlctl_proto_msgTypes[10] + mi := &file_mysqlctl_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -545,7 +853,7 @@ func (x *BackupInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupInfo.ProtoReflect.Descriptor instead. func (*BackupInfo) Descriptor() ([]byte, []int) { - return file_mysqlctl_proto_rawDescGZIP(), []int{10} + return file_mysqlctl_proto_rawDescGZIP(), []int{16} } func (x *BackupInfo) GetName() string { @@ -622,64 +930,121 @@ var file_mysqlctl_proto_rawDesc = []byte{ 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, - 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, - 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe6, - 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, - 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, - 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x4b, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, - 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, - 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, - 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0x8a, 0x03, 0x0a, 0x08, 0x4d, 0x79, 0x73, 0x71, - 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x2e, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x19, 0x2e, 0x6d, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, + 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, + 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, + 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, + 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, + 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, + 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, + 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, + 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, + 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, + 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, + 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, + 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, + 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, - 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, - 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, - 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x42, 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, + 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, + 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, + 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, + 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, + 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, + 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, + 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, + 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, + 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, + 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -695,42 +1060,57 @@ func file_mysqlctl_proto_rawDescGZIP() []byte { } var file_mysqlctl_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_mysqlctl_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_mysqlctl_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_mysqlctl_proto_goTypes = []interface{}{ - (BackupInfo_Status)(0), // 0: mysqlctl.BackupInfo.Status - (*StartRequest)(nil), // 1: mysqlctl.StartRequest - (*StartResponse)(nil), // 2: mysqlctl.StartResponse - (*ShutdownRequest)(nil), // 3: mysqlctl.ShutdownRequest - (*ShutdownResponse)(nil), // 4: mysqlctl.ShutdownResponse - (*RunMysqlUpgradeRequest)(nil), // 5: mysqlctl.RunMysqlUpgradeRequest - (*RunMysqlUpgradeResponse)(nil), // 6: mysqlctl.RunMysqlUpgradeResponse - (*ReinitConfigRequest)(nil), // 7: mysqlctl.ReinitConfigRequest - (*ReinitConfigResponse)(nil), // 8: mysqlctl.ReinitConfigResponse - (*RefreshConfigRequest)(nil), // 9: mysqlctl.RefreshConfigRequest - (*RefreshConfigResponse)(nil), // 10: mysqlctl.RefreshConfigResponse - (*BackupInfo)(nil), // 11: mysqlctl.BackupInfo - (*topodata.TabletAlias)(nil), // 12: topodata.TabletAlias - (*vttime.Time)(nil), // 13: vttime.Time + (BackupInfo_Status)(0), // 0: mysqlctl.BackupInfo.Status + (*StartRequest)(nil), // 1: mysqlctl.StartRequest + (*StartResponse)(nil), // 2: mysqlctl.StartResponse + (*ShutdownRequest)(nil), // 3: mysqlctl.ShutdownRequest + (*ShutdownResponse)(nil), // 4: mysqlctl.ShutdownResponse + (*RunMysqlUpgradeRequest)(nil), // 5: mysqlctl.RunMysqlUpgradeRequest + (*RunMysqlUpgradeResponse)(nil), // 6: mysqlctl.RunMysqlUpgradeResponse + (*ApplyBinlogFileRequest)(nil), // 7: mysqlctl.ApplyBinlogFileRequest + (*ApplyBinlogFileResponse)(nil), // 8: mysqlctl.ApplyBinlogFileResponse + (*ReadBinlogFilesTimestampsRequest)(nil), // 9: mysqlctl.ReadBinlogFilesTimestampsRequest + (*ReadBinlogFilesTimestampsResponse)(nil), // 10: mysqlctl.ReadBinlogFilesTimestampsResponse + (*ReinitConfigRequest)(nil), // 11: mysqlctl.ReinitConfigRequest + (*ReinitConfigResponse)(nil), // 12: mysqlctl.ReinitConfigResponse + (*RefreshConfigRequest)(nil), // 13: mysqlctl.RefreshConfigRequest + (*RefreshConfigResponse)(nil), // 14: mysqlctl.RefreshConfigResponse + (*VersionStringRequest)(nil), // 15: mysqlctl.VersionStringRequest + (*VersionStringResponse)(nil), // 16: mysqlctl.VersionStringResponse + (*BackupInfo)(nil), // 17: mysqlctl.BackupInfo + (*vttime.Time)(nil), // 18: vttime.Time + (*topodata.TabletAlias)(nil), // 19: topodata.TabletAlias } var file_mysqlctl_proto_depIdxs = []int32{ - 12, // 0: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias - 13, // 1: mysqlctl.BackupInfo.time:type_name -> vttime.Time - 0, // 2: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status - 1, // 3: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest - 3, // 4: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest - 5, // 5: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest - 7, // 6: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest - 9, // 7: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest - 2, // 8: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse - 4, // 9: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse - 6, // 10: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse - 8, // 11: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse - 10, // 12: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse - 8, // [8:13] is the sub-list for method output_type - 3, // [3:8] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 18, // 0: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time + 18, // 1: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time + 18, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time + 19, // 3: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias + 18, // 4: mysqlctl.BackupInfo.time:type_name -> vttime.Time + 0, // 5: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status + 1, // 6: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest + 3, // 7: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest + 5, // 8: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest + 7, // 9: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest + 9, // 10: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest + 11, // 11: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest + 13, // 12: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest + 15, // 13: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest + 2, // 14: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse + 4, // 15: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse + 6, // 16: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse + 8, // 17: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse + 10, // 18: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse + 12, // 19: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse + 14, // 20: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse + 16, // 21: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse + 14, // [14:22] is the sub-list for method output_type + 6, // [6:14] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_mysqlctl_proto_init() } @@ -812,7 +1192,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReinitConfigRequest); i { + switch v := v.(*ApplyBinlogFileRequest); i { case 0: return &v.state case 1: @@ -824,7 +1204,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReinitConfigResponse); i { + switch v := v.(*ApplyBinlogFileResponse); i { case 0: return &v.state case 1: @@ -836,7 +1216,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshConfigRequest); i { + switch v := v.(*ReadBinlogFilesTimestampsRequest); i { case 0: return &v.state case 1: @@ -848,7 +1228,7 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshConfigResponse); i { + switch v := v.(*ReadBinlogFilesTimestampsResponse); i { case 0: return &v.state case 1: @@ -860,6 +1240,78 @@ func file_mysqlctl_proto_init() { } } file_mysqlctl_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReinitConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReinitConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshConfigResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionStringRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionStringResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mysqlctl_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BackupInfo); i { case 0: return &v.state @@ -878,7 +1330,7 @@ func file_mysqlctl_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_mysqlctl_proto_rawDesc, NumEnums: 1, - NumMessages: 11, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go b/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go index de4c1fa518b..6d0fd1a28e8 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_grpc.pb.go @@ -25,8 +25,11 @@ type MysqlCtlClient interface { Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgradeRequest, opts ...grpc.CallOption) (*RunMysqlUpgradeResponse, error) + ApplyBinlogFile(ctx context.Context, in *ApplyBinlogFileRequest, opts ...grpc.CallOption) (*ApplyBinlogFileResponse, error) + ReadBinlogFilesTimestamps(ctx context.Context, in *ReadBinlogFilesTimestampsRequest, opts ...grpc.CallOption) (*ReadBinlogFilesTimestampsResponse, error) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) + VersionString(ctx context.Context, in *VersionStringRequest, opts ...grpc.CallOption) (*VersionStringResponse, error) } type mysqlCtlClient struct { @@ -64,6 +67,24 @@ func (c *mysqlCtlClient) RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgrad return out, nil } +func (c *mysqlCtlClient) ApplyBinlogFile(ctx context.Context, in *ApplyBinlogFileRequest, opts ...grpc.CallOption) (*ApplyBinlogFileResponse, error) { + out := new(ApplyBinlogFileResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ApplyBinlogFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *mysqlCtlClient) ReadBinlogFilesTimestamps(ctx context.Context, in *ReadBinlogFilesTimestampsRequest, opts ...grpc.CallOption) (*ReadBinlogFilesTimestampsResponse, error) { + out := new(ReadBinlogFilesTimestampsResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ReadBinlogFilesTimestamps", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *mysqlCtlClient) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) { out := new(ReinitConfigResponse) err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/ReinitConfig", in, out, opts...) @@ -82,6 +103,15 @@ func (c *mysqlCtlClient) RefreshConfig(ctx context.Context, in *RefreshConfigReq return out, nil } +func (c *mysqlCtlClient) VersionString(ctx context.Context, in *VersionStringRequest, opts ...grpc.CallOption) (*VersionStringResponse, error) { + out := new(VersionStringResponse) + err := c.cc.Invoke(ctx, "/mysqlctl.MysqlCtl/VersionString", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // MysqlCtlServer is the server API for MysqlCtl service. // All implementations must embed UnimplementedMysqlCtlServer // for forward compatibility @@ -89,8 +119,11 @@ type MysqlCtlServer interface { Start(context.Context, *StartRequest) (*StartResponse, error) Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) RunMysqlUpgrade(context.Context, *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) + ApplyBinlogFile(context.Context, *ApplyBinlogFileRequest) (*ApplyBinlogFileResponse, error) + ReadBinlogFilesTimestamps(context.Context, *ReadBinlogFilesTimestampsRequest) (*ReadBinlogFilesTimestampsResponse, error) ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) + VersionString(context.Context, *VersionStringRequest) (*VersionStringResponse, error) mustEmbedUnimplementedMysqlCtlServer() } @@ -107,12 +140,21 @@ func (UnimplementedMysqlCtlServer) Shutdown(context.Context, *ShutdownRequest) ( func (UnimplementedMysqlCtlServer) RunMysqlUpgrade(context.Context, *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RunMysqlUpgrade not implemented") } +func (UnimplementedMysqlCtlServer) ApplyBinlogFile(context.Context, *ApplyBinlogFileRequest) (*ApplyBinlogFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyBinlogFile not implemented") +} +func (UnimplementedMysqlCtlServer) ReadBinlogFilesTimestamps(context.Context, *ReadBinlogFilesTimestampsRequest) (*ReadBinlogFilesTimestampsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadBinlogFilesTimestamps not implemented") +} func (UnimplementedMysqlCtlServer) ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReinitConfig not implemented") } func (UnimplementedMysqlCtlServer) RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RefreshConfig not implemented") } +func (UnimplementedMysqlCtlServer) VersionString(context.Context, *VersionStringRequest) (*VersionStringResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VersionString not implemented") +} func (UnimplementedMysqlCtlServer) mustEmbedUnimplementedMysqlCtlServer() {} // UnsafeMysqlCtlServer may be embedded to opt out of forward compatibility for this service. @@ -180,6 +222,42 @@ func _MysqlCtl_RunMysqlUpgrade_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _MysqlCtl_ApplyBinlogFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyBinlogFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).ApplyBinlogFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/ApplyBinlogFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).ApplyBinlogFile(ctx, req.(*ApplyBinlogFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MysqlCtl_ReadBinlogFilesTimestamps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadBinlogFilesTimestampsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).ReadBinlogFilesTimestamps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/ReadBinlogFilesTimestamps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).ReadBinlogFilesTimestamps(ctx, req.(*ReadBinlogFilesTimestampsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MysqlCtl_ReinitConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ReinitConfigRequest) if err := dec(in); err != nil { @@ -216,6 +294,24 @@ func _MysqlCtl_RefreshConfig_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _MysqlCtl_VersionString_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionStringRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).VersionString(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/VersionString", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).VersionString(ctx, req.(*VersionStringRequest)) + } + return interceptor(ctx, in, info, handler) +} + // MysqlCtl_ServiceDesc is the grpc.ServiceDesc for MysqlCtl service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -235,6 +331,14 @@ var MysqlCtl_ServiceDesc = grpc.ServiceDesc{ MethodName: "RunMysqlUpgrade", Handler: _MysqlCtl_RunMysqlUpgrade_Handler, }, + { + MethodName: "ApplyBinlogFile", + Handler: _MysqlCtl_ApplyBinlogFile_Handler, + }, + { + MethodName: "ReadBinlogFilesTimestamps", + Handler: _MysqlCtl_ReadBinlogFilesTimestamps_Handler, + }, { MethodName: "ReinitConfig", Handler: _MysqlCtl_ReinitConfig_Handler, @@ -243,6 +347,10 @@ var MysqlCtl_ServiceDesc = grpc.ServiceDesc{ MethodName: "RefreshConfig", Handler: _MysqlCtl_RefreshConfig_Handler, }, + { + MethodName: "VersionString", + Handler: _MysqlCtl_VersionString_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "mysqlctl.proto", diff --git a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go index 00e37e145a6..bb2ec78e03a 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: mysqlctl.proto package mysqlctl import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -20,6 +21,310 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *StartRequest) CloneVT() *StartRequest { + if m == nil { + return (*StartRequest)(nil) + } + r := &StartRequest{} + if rhs := m.MysqldArgs; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.MysqldArgs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartResponse) CloneVT() *StartResponse { + if m == nil { + return (*StartResponse)(nil) + } + r := &StartResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShutdownRequest) CloneVT() *ShutdownRequest { + if m == nil { + return (*ShutdownRequest)(nil) + } + r := &ShutdownRequest{ + WaitForMysqld: m.WaitForMysqld, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShutdownRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShutdownResponse) CloneVT() *ShutdownResponse { + if m == nil { + return (*ShutdownResponse)(nil) + } + r := &ShutdownResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShutdownResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunMysqlUpgradeRequest) CloneVT() *RunMysqlUpgradeRequest { + if m == nil { + return (*RunMysqlUpgradeRequest)(nil) + } + r := &RunMysqlUpgradeRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunMysqlUpgradeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunMysqlUpgradeResponse) CloneVT() *RunMysqlUpgradeResponse { + if m == nil { + return (*RunMysqlUpgradeResponse)(nil) + } + r := &RunMysqlUpgradeResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunMysqlUpgradeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyBinlogFileRequest) CloneVT() *ApplyBinlogFileRequest { + if m == nil { + return (*ApplyBinlogFileRequest)(nil) + } + r := &ApplyBinlogFileRequest{ + BinlogFileName: m.BinlogFileName, + BinlogRestorePosition: m.BinlogRestorePosition, + BinlogRestoreDatetime: m.BinlogRestoreDatetime.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyBinlogFileRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyBinlogFileResponse) CloneVT() *ApplyBinlogFileResponse { + if m == nil { + return (*ApplyBinlogFileResponse)(nil) + } + r := &ApplyBinlogFileResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyBinlogFileResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadBinlogFilesTimestampsRequest) CloneVT() *ReadBinlogFilesTimestampsRequest { + if m == nil { + return (*ReadBinlogFilesTimestampsRequest)(nil) + } + r := &ReadBinlogFilesTimestampsRequest{} + if rhs := m.BinlogFileNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.BinlogFileNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadBinlogFilesTimestampsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadBinlogFilesTimestampsResponse) CloneVT() *ReadBinlogFilesTimestampsResponse { + if m == nil { + return (*ReadBinlogFilesTimestampsResponse)(nil) + } + r := &ReadBinlogFilesTimestampsResponse{ + FirstTimestamp: m.FirstTimestamp.CloneVT(), + FirstTimestampBinlog: m.FirstTimestampBinlog, + LastTimestamp: m.LastTimestamp.CloneVT(), + LastTimestampBinlog: m.LastTimestampBinlog, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadBinlogFilesTimestampsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReinitConfigRequest) CloneVT() *ReinitConfigRequest { + if m == nil { + return (*ReinitConfigRequest)(nil) + } + r := &ReinitConfigRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReinitConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReinitConfigResponse) CloneVT() *ReinitConfigResponse { + if m == nil { + return (*ReinitConfigResponse)(nil) + } + r := &ReinitConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReinitConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshConfigRequest) CloneVT() *RefreshConfigRequest { + if m == nil { + return (*RefreshConfigRequest)(nil) + } + r := &RefreshConfigRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshConfigResponse) CloneVT() *RefreshConfigResponse { + if m == nil { + return (*RefreshConfigResponse)(nil) + } + r := &RefreshConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VersionStringRequest) CloneVT() *VersionStringRequest { + if m == nil { + return (*VersionStringRequest)(nil) + } + r := &VersionStringRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VersionStringRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VersionStringResponse) CloneVT() *VersionStringResponse { + if m == nil { + return (*VersionStringResponse)(nil) + } + r := &VersionStringResponse{ + Version: m.Version, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VersionStringResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupInfo) CloneVT() *BackupInfo { + if m == nil { + return (*BackupInfo)(nil) + } + r := &BackupInfo{ + Name: m.Name, + Directory: m.Directory, + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), + Time: m.Time.CloneVT(), + Engine: m.Engine, + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BackupInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *StartRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -237,7 +542,7 @@ func (m *RunMysqlUpgradeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *ReinitConfigRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyBinlogFileRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -250,12 +555,12 @@ func (m *ReinitConfigRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReinitConfigRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyBinlogFileRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReinitConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyBinlogFileRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -267,10 +572,34 @@ func (m *ReinitConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.BinlogRestoreDatetime != nil { + size, err := m.BinlogRestoreDatetime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.BinlogRestorePosition) > 0 { + i -= len(m.BinlogRestorePosition) + copy(dAtA[i:], m.BinlogRestorePosition) + i = encodeVarint(dAtA, i, uint64(len(m.BinlogRestorePosition))) + i-- + dAtA[i] = 0x12 + } + if len(m.BinlogFileName) > 0 { + i -= len(m.BinlogFileName) + copy(dAtA[i:], m.BinlogFileName) + i = encodeVarint(dAtA, i, uint64(len(m.BinlogFileName))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ReinitConfigResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyBinlogFileResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -283,12 +612,12 @@ func (m *ReinitConfigResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReinitConfigResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyBinlogFileResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReinitConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyBinlogFileResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -303,7 +632,7 @@ func (m *ReinitConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *RefreshConfigRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReadBinlogFilesTimestampsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -316,12 +645,12 @@ func (m *RefreshConfigRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshConfigRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReadBinlogFilesTimestampsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadBinlogFilesTimestampsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -333,10 +662,19 @@ func (m *RefreshConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.BinlogFileNames) > 0 { + for iNdEx := len(m.BinlogFileNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BinlogFileNames[iNdEx]) + copy(dAtA[i:], m.BinlogFileNames[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.BinlogFileNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *RefreshConfigResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReadBinlogFilesTimestampsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -349,12 +687,12 @@ func (m *RefreshConfigResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshConfigResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReadBinlogFilesTimestampsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadBinlogFilesTimestampsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -366,10 +704,44 @@ func (m *RefreshConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.LastTimestampBinlog) > 0 { + i -= len(m.LastTimestampBinlog) + copy(dAtA[i:], m.LastTimestampBinlog) + i = encodeVarint(dAtA, i, uint64(len(m.LastTimestampBinlog))) + i-- + dAtA[i] = 0x22 + } + if m.LastTimestamp != nil { + size, err := m.LastTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.FirstTimestampBinlog) > 0 { + i -= len(m.FirstTimestampBinlog) + copy(dAtA[i:], m.FirstTimestampBinlog) + i = encodeVarint(dAtA, i, uint64(len(m.FirstTimestampBinlog))) + i-- + dAtA[i] = 0x12 + } + if m.FirstTimestamp != nil { + size, err := m.FirstTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { +func (m *ReinitConfigRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -382,12 +754,12 @@ func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupInfo) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReinitConfigRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReinitConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -399,237 +771,745 @@ func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != 0 { - i = encodeVarint(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x40 - } - if len(m.Engine) > 0 { - i -= len(m.Engine) - copy(dAtA[i:], m.Engine) - i = encodeVarint(dAtA, i, uint64(len(m.Engine))) - i-- - dAtA[i] = 0x3a - } - if m.Time != nil { - size, err := m.Time.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x22 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x1a - } - if len(m.Directory) > 0 { - i -= len(m.Directory) - copy(dAtA[i:], m.Directory) - i = encodeVarint(dAtA, i, uint64(len(m.Directory))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StartRequest) SizeVT() (n int) { +func (m *ReinitConfigResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.MysqldArgs) > 0 { - for _, s := range m.MysqldArgs { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartResponse) SizeVT() (n int) { +func (m *ReinitConfigResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReinitConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *ShutdownRequest) SizeVT() (n int) { +func (m *RefreshConfigRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.WaitForMysqld { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ShutdownResponse) SizeVT() (n int) { +func (m *RefreshConfigRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *RunMysqlUpgradeRequest) SizeVT() (n int) { +func (m *RefreshConfigResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RunMysqlUpgradeResponse) SizeVT() (n int) { +func (m *RefreshConfigResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *ReinitConfigRequest) SizeVT() (n int) { +func (m *VersionStringRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReinitConfigResponse) SizeVT() (n int) { +func (m *VersionStringRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VersionStringRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *RefreshConfigRequest) SizeVT() (n int) { +func (m *VersionStringResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RefreshConfigResponse) SizeVT() (n int) { +func (m *VersionStringResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VersionStringResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *BackupInfo) SizeVT() (n int) { +func (m *BackupInfo) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BackupInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BackupInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Directory) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x40 } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Engine) > 0 { + i -= len(m.Engine) + copy(dAtA[i:], m.Engine) + i = encodeVarint(dAtA, i, uint64(len(m.Engine))) + i-- + dAtA[i] = 0x3a } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Time != nil { + size, err := m.Time.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 } if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Time != nil { - l = m.Time.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - l = len(m.Engine) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x22 } - if m.Status != 0 { - n += 1 + sov(uint64(m.Status)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n -} - + if len(m.Directory) > 0 { + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarint(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *StartRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MysqldArgs) > 0 { + for _, s := range m.MysqldArgs { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *StartResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ShutdownRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.WaitForMysqld { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ShutdownResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunMysqlUpgradeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunMysqlUpgradeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplyBinlogFileRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BinlogFileName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.BinlogRestorePosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.BinlogRestoreDatetime != nil { + l = m.BinlogRestoreDatetime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyBinlogFileResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReadBinlogFilesTimestampsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BinlogFileNames) > 0 { + for _, s := range m.BinlogFileNames { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ReadBinlogFilesTimestampsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FirstTimestamp != nil { + l = m.FirstTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.FirstTimestampBinlog) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.LastTimestamp != nil { + l = m.LastTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.LastTimestampBinlog) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReinitConfigRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReinitConfigResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshConfigRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshConfigResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VersionStringRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VersionStringResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Directory) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Time != nil { + l = m.Time.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Engine) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Status != 0 { + n += 1 + sov(uint64(m.Status)) + } + n += len(m.unknownFields) + return n +} + func sov(x uint64) (n int) { return (bits.Len64(x|1) + 6) / 7 } -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *StartRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqldArgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MysqldArgs = append(m.MysqldArgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitForMysqld", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WaitForMysqld = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *StartRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShutdownResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -652,15 +1532,200 @@ func (m *StartRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShutdownResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunMysqlUpgradeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunMysqlUpgradeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunMysqlUpgradeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunMysqlUpgradeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunMysqlUpgradeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunMysqlUpgradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyBinlogFileRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyBinlogFileRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyBinlogFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MysqldArgs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinlogFileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogFileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogRestorePosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -688,7 +1753,43 @@ func (m *StartRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MysqldArgs = append(m.MysqldArgs, string(dAtA[iNdEx:postIndex])) + m.BinlogRestorePosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogRestoreDatetime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogRestoreDatetime == nil { + m.BinlogRestoreDatetime = &vttime.Time{} + } + if err := m.BinlogRestoreDatetime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -712,7 +1813,7 @@ func (m *StartRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartResponse) UnmarshalVT(dAtA []byte) error { +func (m *ApplyBinlogFileResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -735,10 +1836,10 @@ func (m *StartResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyBinlogFileResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyBinlogFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -763,7 +1864,7 @@ func (m *StartResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReadBinlogFilesTimestampsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -786,17 +1887,17 @@ func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadBinlogFilesTimestampsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadBinlogFilesTimestampsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitForMysqld", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogFileNames", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -806,12 +1907,24 @@ func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.WaitForMysqld = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BinlogFileNames = append(m.BinlogFileNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -834,7 +1947,7 @@ func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShutdownResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReadBinlogFilesTimestampsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -857,12 +1970,148 @@ func (m *ShutdownResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShutdownResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReadBinlogFilesTimestampsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShutdownResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadBinlogFilesTimestampsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FirstTimestamp == nil { + m.FirstTimestamp = &vttime.Time{} + } + if err := m.FirstTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestampBinlog", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FirstTimestampBinlog = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTimestamp == nil { + m.LastTimestamp = &vttime.Time{} + } + if err := m.LastTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestampBinlog", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTimestampBinlog = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -885,7 +2134,7 @@ func (m *ShutdownResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunMysqlUpgradeRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -908,10 +2157,10 @@ func (m *RunMysqlUpgradeRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunMysqlUpgradeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReinitConfigRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunMysqlUpgradeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReinitConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -936,7 +2185,7 @@ func (m *RunMysqlUpgradeRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunMysqlUpgradeResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -959,10 +2208,10 @@ func (m *RunMysqlUpgradeResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunMysqlUpgradeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReinitConfigResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunMysqlUpgradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReinitConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -987,7 +2236,7 @@ func (m *RunMysqlUpgradeResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1010,10 +2259,10 @@ func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReinitConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshConfigRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReinitConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -1038,7 +2287,7 @@ func (m *ReinitConfigRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { +func (m *RefreshConfigResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1061,10 +2310,10 @@ func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReinitConfigResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshConfigResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReinitConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -1089,7 +2338,7 @@ func (m *ReinitConfigResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *VersionStringRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1112,10 +2361,10 @@ func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VersionStringRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VersionStringRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -1140,7 +2389,7 @@ func (m *RefreshConfigRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshConfigResponse) UnmarshalVT(dAtA []byte) error { +func (m *VersionStringResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1163,12 +2412,44 @@ func (m *RefreshConfigResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshConfigResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VersionStringResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VersionStringResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/query/cached_size.go b/go/vt/proto/query/cached_size.go index 735bd555e55..09f86d86903 100644 --- a/go/vt/proto/query/cached_size.go +++ b/go/vt/proto/query/cached_size.go @@ -112,7 +112,7 @@ func (cached *Value) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(96) } // field unknownFields []byte { @@ -122,5 +122,12 @@ func (cached *Value) CachedSize(alloc bool) int64 { { size += hack.RuntimeAllocSize(int64(cap(cached.Value))) } + // field Values []*vitess.io/vitess/go/vt/proto/query.Value + { + size += hack.RuntimeAllocSize(int64(cap(cached.Values)) * int64(8)) + for _, elem := range cached.Values { + size += elem.CachedSize(true) + } + } return size } diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index 29302b4e662..7dbeb1685ae 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: query.proto @@ -701,6 +701,7 @@ const ( ExecuteOptions_Gen4Left2Right ExecuteOptions_PlannerVersion = 4 ExecuteOptions_Gen4WithFallback ExecuteOptions_PlannerVersion = 5 ExecuteOptions_Gen4CompareV3 ExecuteOptions_PlannerVersion = 6 + ExecuteOptions_V3Insert ExecuteOptions_PlannerVersion = 7 ) // Enum value maps for ExecuteOptions_PlannerVersion. @@ -713,6 +714,7 @@ var ( 4: "Gen4Left2Right", 5: "Gen4WithFallback", 6: "Gen4CompareV3", + 7: "V3Insert", } ExecuteOptions_PlannerVersion_value = map[string]int32{ "DEFAULT_PLANNER": 0, @@ -722,6 +724,7 @@ var ( "Gen4Left2Right": 4, "Gen4WithFallback": 5, "Gen4CompareV3": 6, + "V3Insert": 7, } ) @@ -1121,6 +1124,8 @@ type Value struct { Type Type `protobuf:"varint,1,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // values are set if type is TUPLE. + Values []*Value `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"` } func (x *Value) Reset() { @@ -1169,6 +1174,13 @@ func (x *Value) GetValue() []byte { return nil } +func (x *Value) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + // BindVariable represents a single bind variable in a Query. type BindVariable struct { state protoimpl.MessageState @@ -1333,6 +1345,12 @@ type ExecuteOptions struct { // TransactionAccessMode specifies the access modes to be used while starting the transaction i.e. READ WRITE/READ ONLY/WITH CONSISTENT SNAPSHOT // If not specified, the transaction will be started with the default access mode on the connection. TransactionAccessMode []ExecuteOptions_TransactionAccessMode `protobuf:"varint,14,rep,packed,name=transaction_access_mode,json=transactionAccessMode,proto3,enum=query.ExecuteOptions_TransactionAccessMode" json:"transaction_access_mode,omitempty"` + // WorkloadName specifies the name of the workload as indicated in query directives. This is used for instrumentation + // in metrics and tracing spans. + WorkloadName string `protobuf:"bytes,15,opt,name=WorkloadName,proto3" json:"WorkloadName,omitempty"` + // priority specifies the priority of the query, between 0 and 100. This is leveraged by the transaction + // throttler to determine whether, under resource contention, a query should or should not be throttled. + Priority string `protobuf:"bytes,16,opt,name=priority,proto3" json:"priority,omitempty"` } func (x *ExecuteOptions) Reset() { @@ -1437,6 +1455,20 @@ func (x *ExecuteOptions) GetTransactionAccessMode() []ExecuteOptions_Transaction return nil } +func (x *ExecuteOptions) GetWorkloadName() string { + if x != nil { + return x.WorkloadName + } + return "" +} + +func (x *ExecuteOptions) GetPriority() string { + if x != nil { + return x.Priority + } + return "" +} + // Field describes a single column returned by a query type Field struct { state protoimpl.MessageState @@ -5233,8 +5265,8 @@ type StreamHealthResponse struct { // if filtered replication is enabled on a primary for instance, // or if a replica should not be used because the keyspace is being resharded. Serving bool `protobuf:"varint,2,opt,name=serving,proto3" json:"serving,omitempty"` - // tablet_externally_reparented_timestamp can be interpreted as the - // last time we knew that this tablet was the PRIMARY of this shard + // primary_term_start_timestamp can be interpreted as the + // last time we knew that this tablet was promoted to a PRIMARY of this shard // (if StreamHealthResponse describes a group of tablets, between // two vtgates, only one primary will be present in the group, and // this is this primary's value). @@ -5264,8 +5296,8 @@ type StreamHealthResponse struct { // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) // // OR - // d) 0 if the vttablet was never a PRIMARY. - TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp,proto3" json:"tablet_externally_reparented_timestamp,omitempty"` + // d) 0 if the vttablet is not a PRIMARY. + PrimaryTermStartTimestamp int64 `protobuf:"varint,3,opt,name=primary_term_start_timestamp,json=primaryTermStartTimestamp,proto3" json:"primary_term_start_timestamp,omitempty"` // realtime_stats contains information about the tablet status. // It is only filled in if the information is about a tablet. RealtimeStats *RealtimeStats `protobuf:"bytes,4,opt,name=realtime_stats,json=realtimeStats,proto3" json:"realtime_stats,omitempty"` @@ -5322,9 +5354,9 @@ func (x *StreamHealthResponse) GetServing() bool { return false } -func (x *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 { +func (x *StreamHealthResponse) GetPrimaryTermStartTimestamp() int64 { if x != nil { - return x.TabletExternallyReparentedTimestamp + return x.PrimaryTermStartTimestamp } return 0 } @@ -5635,304 +5667,250 @@ var file_query_proto_rawDesc = []byte{ 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3e, 0x0a, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x6b, 0x0a, - 0x0c, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x0a, 0x42, - 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x4b, 0x0a, 0x0e, 0x62, - 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, - 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x56, - 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x12, 0x42, 0x69, 0x6e, 0x64, - 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xca, 0x0a, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, - 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x3a, 0x0a, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x71, 0x6c, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x71, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x5f, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x15, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, - 0x61, 0x6e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x4d, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x17, 0x68, 0x61, 0x73, 0x5f, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, 0x61, 0x73, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x46, 0x0a, - 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x6f, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x63, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, - 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, - 0x6f, 0x64, 0x65, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x3b, 0x0a, 0x0e, 0x49, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x11, 0x0a, 0x0d, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x07, - 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0x38, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, - 0x6f, 0x61, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, - 0x0a, 0x04, 0x4f, 0x4c, 0x41, 0x50, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x42, 0x41, 0x10, - 0x03, 0x22, 0xa7, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x50, 0x45, 0x41, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, - 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x44, 0x10, 0x02, - 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x55, 0x4e, 0x43, 0x4f, 0x4d, 0x4d, 0x49, - 0x54, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x45, 0x52, 0x49, 0x41, 0x4c, - 0x49, 0x5a, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4e, 0x53, - 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x5f, - 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x41, - 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x06, 0x22, 0x84, 0x01, 0x0a, 0x0e, - 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, - 0x52, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47, - 0x65, 0x6e, 0x34, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x34, 0x47, 0x72, 0x65, - 0x65, 0x64, 0x79, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x47, 0x65, 0x6e, 0x34, 0x4c, 0x65, 0x66, - 0x74, 0x32, 0x52, 0x69, 0x67, 0x68, 0x74, 0x10, 0x04, 0x12, 0x14, 0x0a, 0x10, 0x47, 0x65, 0x6e, - 0x34, 0x57, 0x69, 0x74, 0x68, 0x46, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x10, 0x05, 0x12, - 0x11, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x56, 0x33, - 0x10, 0x06, 0x22, 0x84, 0x01, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, - 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, - 0x52, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, - 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x41, - 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, - 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x5f, 0x52, - 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x53, 0x10, 0x03, 0x22, 0x4f, 0x0a, 0x15, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x53, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54, - 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, - 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52, - 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, - 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xb8, 0x02, 0x0a, - 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x72, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x72, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, - 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, - 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, - 0x61, 0x72, 0x73, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x72, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, 0x6d, 0x61, 0x6c, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0x37, 0x0a, 0x03, 0x52, 0x6f, 0x77, 0x12, 0x18, - 0x0a, 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x12, 0x52, - 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x22, 0xe3, 0x01, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x72, - 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x69, - 0x6e, 0x73, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, - 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x3c, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x9e, 0x02, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x08, 0x63, - 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x12, 0x38, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, - 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x73, 0x71, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x27, - 0x0a, 0x08, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4d, 0x4c, 0x10, 0x01, 0x12, 0x07, - 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x02, 0x22, 0xe1, 0x02, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, - 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x3d, 0x0a, 0x0f, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x64, 0x0a, 0x0f, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0xe7, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x43, 0x0a, 0x15, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, - 0xee, 0x01, 0x0a, 0x0c, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, - 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xa4, 0x01, 0x0a, 0x0d, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, - 0x31, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x49, 0x64, 0x22, 0xe7, 0x01, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, - 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x33, 0x0a, 0x10, - 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, - 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x22, 0x6b, 0x0a, 0x0c, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x22, 0xc2, 0x01, 0x0a, 0x0a, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, + 0x6c, 0x12, 0x4b, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, + 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0d, 0x62, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, 0x55, + 0x0a, 0x12, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, + 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x98, 0x0b, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4d, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x52, + 0x6f, 0x77, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x28, 0x0a, 0x10, 0x73, 0x71, 0x6c, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x71, 0x6c, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x5f, 0x0a, 0x15, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x15, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x4d, 0x0a, + 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x70, 0x6c, + 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x17, + 0x68, 0x61, 0x73, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x65, 0x6d, 0x70, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x68, + 0x61, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6d, 0x70, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x63, + 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x63, 0x0a, 0x17, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x22, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x22, 0x3b, 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x4e, + 0x41, 0x4d, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x22, 0x38, 0x0a, + 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, + 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x4c, 0x41, 0x50, 0x10, 0x02, 0x12, 0x07, + 0x0a, 0x03, 0x44, 0x42, 0x41, 0x10, 0x03, 0x22, 0xa7, 0x01, 0x0a, 0x14, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x73, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x45, 0x41, 0x44, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, + 0x54, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x55, + 0x4e, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x45, 0x52, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x21, + 0x0a, 0x1d, 0x43, 0x4f, 0x4e, 0x53, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4e, 0x41, + 0x50, 0x53, 0x48, 0x4f, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, + 0x06, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, + 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x52, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x65, 0x6e, 0x34, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x47, + 0x65, 0x6e, 0x34, 0x47, 0x72, 0x65, 0x65, 0x64, 0x79, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x47, + 0x65, 0x6e, 0x34, 0x4c, 0x65, 0x66, 0x74, 0x32, 0x52, 0x69, 0x67, 0x68, 0x74, 0x10, 0x04, 0x12, + 0x14, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x34, 0x57, 0x69, 0x74, 0x68, 0x46, 0x61, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x34, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x72, 0x65, 0x56, 0x33, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x33, 0x49, 0x6e, + 0x73, 0x65, 0x72, 0x74, 0x10, 0x07, 0x22, 0x84, 0x01, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x73, 0x6f, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x53, 0x4f, + 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, + 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, + 0x5f, 0x45, 0x4e, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x4f, + 0x4e, 0x53, 0x4f, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x41, 0x42, 0x4c, + 0x45, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x53, 0x10, 0x03, 0x22, 0x4f, 0x0a, + 0x15, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4e, 0x53, 0x49, 0x53, + 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x00, 0x12, + 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0xb8, 0x02, 0x0a, 0x05, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x72, 0x67, 0x5f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x72, 0x67, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0c, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x69, + 0x6d, 0x61, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x65, 0x63, 0x69, + 0x6d, 0x61, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0x37, 0x0a, 0x03, 0x52, + 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x07, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x73, 0x12, 0x16, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x22, 0xe3, 0x01, 0x0a, 0x0b, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, + 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0c, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x04, + 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x3c, 0x0a, 0x0c, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x0b, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x0a, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x9e, 0x02, 0x0a, 0x09, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x41, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, + 0x67, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, + 0x79, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x12, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x52, 0x10, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x12, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x10, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, + 0x71, 0x6c, 0x22, 0x27, 0x0a, 0x08, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x09, + 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x4d, 0x4c, + 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x02, 0x22, 0xe1, 0x02, 0x0a, 0x0e, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, + 0x3d, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x64, + 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x22, 0xe7, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, + 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x43, + 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x22, 0xee, 0x01, 0x0a, 0x0c, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, @@ -5944,13 +5922,108 @@ var file_query_proto_rawDesc = []byte{ 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x11, - 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, + 0x67, 0x65, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x0d, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, + 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0xe7, 0x01, 0x0a, 0x0f, 0x52, 0x6f, 0x6c, 0x6c, 0x62, + 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x22, 0x33, 0x0a, 0x10, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, + 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, + 0x69, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, + 0x69, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x02, 0x0a, + 0x17, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, + 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, + 0x69, 0x64, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, + 0x02, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, @@ -5962,117 +6035,234 @@ var file_query_proto_rawDesc = []byte{ 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x18, - 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x02, 0x0a, 0x17, 0x52, 0x6f, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, - 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, - 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1a, - 0x0a, 0x18, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x02, 0x0a, 0x18, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, - 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x31, + 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, + 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, + 0x01, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, + 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, + 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, + 0x15, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x6f, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, + 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x6f, + 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdf, + 0x01, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, + 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, + 0x22, 0x1d, 0x0a, 0x1b, 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xdb, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x61, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x51, 0x0a, + 0x17, 0x52, 0x65, 0x61, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x22, 0xe0, 0x02, 0x0a, 0x13, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, + 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, - 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x1b, 0x0a, - 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, - 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0xfe, 0x01, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, - 0x74, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdf, 0x01, 0x0a, 0x1a, 0x43, - 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1d, 0x0a, 0x1b, - 0x43, 0x6f, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x16, - 0x52, 0x65, 0x61, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, - 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x51, 0x0a, 0x17, 0x52, 0x65, 0x61, - 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe0, 0x02, 0x0a, - 0x13, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x22, 0xfe, 0x01, 0x0a, 0x14, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x19, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, + 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x84, 0x02, + 0x0a, 0x1a, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, + 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x22, 0x43, 0x0a, 0x15, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, + 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, + 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x40, + 0x0a, 0x12, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, + 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x16, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, + 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0xee, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, @@ -6089,68 +6279,69 @@ var file_query_proto_rawDesc = []byte{ 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, - 0xfe, 0x01, 0x0a, 0x14, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x22, 0xe6, 0x02, 0x0a, 0x19, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, - 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, - 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x84, 0x02, 0x0a, 0x1a, 0x42, 0x65, - 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x22, 0xd9, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, + 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0xf4, 0x02, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, + 0x65, 0x67, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x1b, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, + 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x22, 0xfa, 0x02, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, @@ -6161,128 +6352,19 @@ var file_query_proto_rawDesc = []byte{ 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x15, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, - 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, - 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x69, 0x64, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x40, 0x0a, 0x12, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xe8, 0x02, 0x0a, - 0x15, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, - 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, - 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, - 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, - 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x25, - 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, - 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x22, 0xee, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, - 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, - 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x22, 0xf4, 0x02, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, - 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, - 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, - 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, - 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x10, 0x70, 0x6f, 0x73, 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x22, 0xac, 0x02, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, + 0x69, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, @@ -6300,118 +6382,74 @@ var file_query_proto_rawDesc = []byte{ 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x22, 0xfa, 0x02, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, - 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, - 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, - 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x70, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, - 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x5f, 0x71, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6f, 0x73, - 0x74, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xac, 0x02, - 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x87, 0x02, 0x0a, - 0x0e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, - 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x30, 0x0a, - 0x14, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x47, 0x0a, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x66, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, - 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x70, 0x75, 0x5f, - 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x63, 0x70, 0x75, - 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x69, 0x65, - 0x77, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x76, 0x69, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x41, 0x67, - 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x68, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, - 0x0a, 0x16, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, - 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, - 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, - 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, - 0x61, 0x78, 0x22, 0xa9, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, + 0x22, 0x87, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x0a, 0x26, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, - 0x79, 0x5f, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x23, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, + 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x6c, 0x61, 0x79, + 0x65, 0x72, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x12, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x50, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x08, 0x63, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, + 0x13, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x76, 0x69, 0x65, 0x77, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, + 0x0a, 0x0e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, + 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x14, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x4d, 0x61, 0x78, 0x22, 0x95, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x65, 0x72, 0x6d, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x19, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, + 0x65, 0x72, 0x6d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, @@ -6628,148 +6666,149 @@ var file_query_proto_goTypes = []interface{}{ var file_query_proto_depIdxs = []int32{ 79, // 0: query.Target.tablet_type:type_name -> topodata.TabletType 2, // 1: query.Value.type:type_name -> query.Type - 2, // 2: query.BindVariable.type:type_name -> query.Type - 15, // 3: query.BindVariable.values:type_name -> query.Value - 76, // 4: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry - 5, // 5: query.ExecuteOptions.included_fields:type_name -> query.ExecuteOptions.IncludedFields - 6, // 6: query.ExecuteOptions.workload:type_name -> query.ExecuteOptions.Workload - 7, // 7: query.ExecuteOptions.transaction_isolation:type_name -> query.ExecuteOptions.TransactionIsolation - 8, // 8: query.ExecuteOptions.planner_version:type_name -> query.ExecuteOptions.PlannerVersion - 9, // 9: query.ExecuteOptions.consolidator:type_name -> query.ExecuteOptions.Consolidator - 10, // 10: query.ExecuteOptions.transaction_access_mode:type_name -> query.ExecuteOptions.TransactionAccessMode - 2, // 11: query.Field.type:type_name -> query.Type - 19, // 12: query.QueryResult.fields:type_name -> query.Field - 20, // 13: query.QueryResult.rows:type_name -> query.Row - 77, // 14: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement - 14, // 15: query.StreamEvent.event_token:type_name -> query.EventToken - 80, // 16: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 17: query.ExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 18: query.ExecuteRequest.target:type_name -> query.Target - 17, // 19: query.ExecuteRequest.query:type_name -> query.BoundQuery - 18, // 20: query.ExecuteRequest.options:type_name -> query.ExecuteOptions - 21, // 21: query.ExecuteResponse.result:type_name -> query.QueryResult - 81, // 22: query.ResultWithError.error:type_name -> vtrpc.RPCError - 21, // 23: query.ResultWithError.result:type_name -> query.QueryResult - 80, // 24: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 25: query.StreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 26: query.StreamExecuteRequest.target:type_name -> query.Target - 17, // 27: query.StreamExecuteRequest.query:type_name -> query.BoundQuery - 18, // 28: query.StreamExecuteRequest.options:type_name -> query.ExecuteOptions - 21, // 29: query.StreamExecuteResponse.result:type_name -> query.QueryResult - 80, // 30: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 31: query.BeginRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 32: query.BeginRequest.target:type_name -> query.Target - 18, // 33: query.BeginRequest.options:type_name -> query.ExecuteOptions - 82, // 34: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 35: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 36: query.CommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 37: query.CommitRequest.target:type_name -> query.Target - 80, // 38: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 39: query.RollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 40: query.RollbackRequest.target:type_name -> query.Target - 80, // 41: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 42: query.PrepareRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 43: query.PrepareRequest.target:type_name -> query.Target - 80, // 44: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 45: query.CommitPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 46: query.CommitPreparedRequest.target:type_name -> query.Target - 80, // 47: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 48: query.RollbackPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 49: query.RollbackPreparedRequest.target:type_name -> query.Target - 80, // 50: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 51: query.CreateTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 52: query.CreateTransactionRequest.target:type_name -> query.Target - 12, // 53: query.CreateTransactionRequest.participants:type_name -> query.Target - 80, // 54: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 55: query.StartCommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 56: query.StartCommitRequest.target:type_name -> query.Target - 80, // 57: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 58: query.SetRollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 59: query.SetRollbackRequest.target:type_name -> query.Target - 80, // 60: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 61: query.ConcludeTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 62: query.ConcludeTransactionRequest.target:type_name -> query.Target - 80, // 63: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 64: query.ReadTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 65: query.ReadTransactionRequest.target:type_name -> query.Target - 73, // 66: query.ReadTransactionResponse.metadata:type_name -> query.TransactionMetadata - 80, // 67: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 68: query.BeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 69: query.BeginExecuteRequest.target:type_name -> query.Target - 17, // 70: query.BeginExecuteRequest.query:type_name -> query.BoundQuery - 18, // 71: query.BeginExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 72: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 73: query.BeginExecuteResponse.result:type_name -> query.QueryResult - 82, // 74: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 75: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 76: query.BeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 77: query.BeginStreamExecuteRequest.target:type_name -> query.Target - 17, // 78: query.BeginStreamExecuteRequest.query:type_name -> query.BoundQuery - 18, // 79: query.BeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 80: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 81: query.BeginStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 82: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 83: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 84: query.MessageStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 85: query.MessageStreamRequest.target:type_name -> query.Target - 21, // 86: query.MessageStreamResponse.result:type_name -> query.QueryResult - 80, // 87: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 88: query.MessageAckRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 89: query.MessageAckRequest.target:type_name -> query.Target - 15, // 90: query.MessageAckRequest.ids:type_name -> query.Value - 21, // 91: query.MessageAckResponse.result:type_name -> query.QueryResult - 80, // 92: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 93: query.ReserveExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 94: query.ReserveExecuteRequest.target:type_name -> query.Target - 17, // 95: query.ReserveExecuteRequest.query:type_name -> query.BoundQuery - 18, // 96: query.ReserveExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 97: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 98: query.ReserveExecuteResponse.result:type_name -> query.QueryResult - 82, // 99: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 100: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 101: query.ReserveStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 102: query.ReserveStreamExecuteRequest.target:type_name -> query.Target - 17, // 103: query.ReserveStreamExecuteRequest.query:type_name -> query.BoundQuery - 18, // 104: query.ReserveStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 105: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 106: query.ReserveStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 107: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 108: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 109: query.ReserveBeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 110: query.ReserveBeginExecuteRequest.target:type_name -> query.Target - 17, // 111: query.ReserveBeginExecuteRequest.query:type_name -> query.BoundQuery - 18, // 112: query.ReserveBeginExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 113: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 114: query.ReserveBeginExecuteResponse.result:type_name -> query.QueryResult - 82, // 115: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 116: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 117: query.ReserveBeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 118: query.ReserveBeginStreamExecuteRequest.target:type_name -> query.Target - 17, // 119: query.ReserveBeginStreamExecuteRequest.query:type_name -> query.BoundQuery - 18, // 120: query.ReserveBeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 121: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError - 21, // 122: query.ReserveBeginStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 123: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 124: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID - 13, // 125: query.ReleaseRequest.immediate_caller_id:type_name -> query.VTGateCallerID - 12, // 126: query.ReleaseRequest.target:type_name -> query.Target - 12, // 127: query.StreamHealthResponse.target:type_name -> query.Target - 70, // 128: query.StreamHealthResponse.realtime_stats:type_name -> query.RealtimeStats - 82, // 129: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias - 3, // 130: query.TransactionMetadata.state:type_name -> query.TransactionState - 12, // 131: query.TransactionMetadata.participants:type_name -> query.Target - 12, // 132: query.GetSchemaRequest.target:type_name -> query.Target - 4, // 133: query.GetSchemaRequest.table_type:type_name -> query.SchemaTableType - 78, // 134: query.GetSchemaResponse.table_definition:type_name -> query.GetSchemaResponse.TableDefinitionEntry - 16, // 135: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable - 11, // 136: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category - 19, // 137: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field - 20, // 138: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row - 139, // [139:139] is the sub-list for method output_type - 139, // [139:139] is the sub-list for method input_type - 139, // [139:139] is the sub-list for extension type_name - 139, // [139:139] is the sub-list for extension extendee - 0, // [0:139] is the sub-list for field type_name + 15, // 2: query.Value.values:type_name -> query.Value + 2, // 3: query.BindVariable.type:type_name -> query.Type + 15, // 4: query.BindVariable.values:type_name -> query.Value + 76, // 5: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry + 5, // 6: query.ExecuteOptions.included_fields:type_name -> query.ExecuteOptions.IncludedFields + 6, // 7: query.ExecuteOptions.workload:type_name -> query.ExecuteOptions.Workload + 7, // 8: query.ExecuteOptions.transaction_isolation:type_name -> query.ExecuteOptions.TransactionIsolation + 8, // 9: query.ExecuteOptions.planner_version:type_name -> query.ExecuteOptions.PlannerVersion + 9, // 10: query.ExecuteOptions.consolidator:type_name -> query.ExecuteOptions.Consolidator + 10, // 11: query.ExecuteOptions.transaction_access_mode:type_name -> query.ExecuteOptions.TransactionAccessMode + 2, // 12: query.Field.type:type_name -> query.Type + 19, // 13: query.QueryResult.fields:type_name -> query.Field + 20, // 14: query.QueryResult.rows:type_name -> query.Row + 77, // 15: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement + 14, // 16: query.StreamEvent.event_token:type_name -> query.EventToken + 80, // 17: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 18: query.ExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 19: query.ExecuteRequest.target:type_name -> query.Target + 17, // 20: query.ExecuteRequest.query:type_name -> query.BoundQuery + 18, // 21: query.ExecuteRequest.options:type_name -> query.ExecuteOptions + 21, // 22: query.ExecuteResponse.result:type_name -> query.QueryResult + 81, // 23: query.ResultWithError.error:type_name -> vtrpc.RPCError + 21, // 24: query.ResultWithError.result:type_name -> query.QueryResult + 80, // 25: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 26: query.StreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 27: query.StreamExecuteRequest.target:type_name -> query.Target + 17, // 28: query.StreamExecuteRequest.query:type_name -> query.BoundQuery + 18, // 29: query.StreamExecuteRequest.options:type_name -> query.ExecuteOptions + 21, // 30: query.StreamExecuteResponse.result:type_name -> query.QueryResult + 80, // 31: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 32: query.BeginRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 33: query.BeginRequest.target:type_name -> query.Target + 18, // 34: query.BeginRequest.options:type_name -> query.ExecuteOptions + 82, // 35: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 36: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 37: query.CommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 38: query.CommitRequest.target:type_name -> query.Target + 80, // 39: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 40: query.RollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 41: query.RollbackRequest.target:type_name -> query.Target + 80, // 42: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 43: query.PrepareRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 44: query.PrepareRequest.target:type_name -> query.Target + 80, // 45: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 46: query.CommitPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 47: query.CommitPreparedRequest.target:type_name -> query.Target + 80, // 48: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 49: query.RollbackPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 50: query.RollbackPreparedRequest.target:type_name -> query.Target + 80, // 51: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 52: query.CreateTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 53: query.CreateTransactionRequest.target:type_name -> query.Target + 12, // 54: query.CreateTransactionRequest.participants:type_name -> query.Target + 80, // 55: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 56: query.StartCommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 57: query.StartCommitRequest.target:type_name -> query.Target + 80, // 58: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 59: query.SetRollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 60: query.SetRollbackRequest.target:type_name -> query.Target + 80, // 61: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 62: query.ConcludeTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 63: query.ConcludeTransactionRequest.target:type_name -> query.Target + 80, // 64: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 65: query.ReadTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 66: query.ReadTransactionRequest.target:type_name -> query.Target + 73, // 67: query.ReadTransactionResponse.metadata:type_name -> query.TransactionMetadata + 80, // 68: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 69: query.BeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 70: query.BeginExecuteRequest.target:type_name -> query.Target + 17, // 71: query.BeginExecuteRequest.query:type_name -> query.BoundQuery + 18, // 72: query.BeginExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 73: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 74: query.BeginExecuteResponse.result:type_name -> query.QueryResult + 82, // 75: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 76: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 77: query.BeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 78: query.BeginStreamExecuteRequest.target:type_name -> query.Target + 17, // 79: query.BeginStreamExecuteRequest.query:type_name -> query.BoundQuery + 18, // 80: query.BeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 81: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 82: query.BeginStreamExecuteResponse.result:type_name -> query.QueryResult + 82, // 83: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 84: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 85: query.MessageStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 86: query.MessageStreamRequest.target:type_name -> query.Target + 21, // 87: query.MessageStreamResponse.result:type_name -> query.QueryResult + 80, // 88: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 89: query.MessageAckRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 90: query.MessageAckRequest.target:type_name -> query.Target + 15, // 91: query.MessageAckRequest.ids:type_name -> query.Value + 21, // 92: query.MessageAckResponse.result:type_name -> query.QueryResult + 80, // 93: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 94: query.ReserveExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 95: query.ReserveExecuteRequest.target:type_name -> query.Target + 17, // 96: query.ReserveExecuteRequest.query:type_name -> query.BoundQuery + 18, // 97: query.ReserveExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 98: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 99: query.ReserveExecuteResponse.result:type_name -> query.QueryResult + 82, // 100: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 101: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 102: query.ReserveStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 103: query.ReserveStreamExecuteRequest.target:type_name -> query.Target + 17, // 104: query.ReserveStreamExecuteRequest.query:type_name -> query.BoundQuery + 18, // 105: query.ReserveStreamExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 106: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 107: query.ReserveStreamExecuteResponse.result:type_name -> query.QueryResult + 82, // 108: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 109: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 110: query.ReserveBeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 111: query.ReserveBeginExecuteRequest.target:type_name -> query.Target + 17, // 112: query.ReserveBeginExecuteRequest.query:type_name -> query.BoundQuery + 18, // 113: query.ReserveBeginExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 114: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 115: query.ReserveBeginExecuteResponse.result:type_name -> query.QueryResult + 82, // 116: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 117: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 118: query.ReserveBeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 119: query.ReserveBeginStreamExecuteRequest.target:type_name -> query.Target + 17, // 120: query.ReserveBeginStreamExecuteRequest.query:type_name -> query.BoundQuery + 18, // 121: query.ReserveBeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions + 81, // 122: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 21, // 123: query.ReserveBeginStreamExecuteResponse.result:type_name -> query.QueryResult + 82, // 124: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 80, // 125: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID + 13, // 126: query.ReleaseRequest.immediate_caller_id:type_name -> query.VTGateCallerID + 12, // 127: query.ReleaseRequest.target:type_name -> query.Target + 12, // 128: query.StreamHealthResponse.target:type_name -> query.Target + 70, // 129: query.StreamHealthResponse.realtime_stats:type_name -> query.RealtimeStats + 82, // 130: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias + 3, // 131: query.TransactionMetadata.state:type_name -> query.TransactionState + 12, // 132: query.TransactionMetadata.participants:type_name -> query.Target + 12, // 133: query.GetSchemaRequest.target:type_name -> query.Target + 4, // 134: query.GetSchemaRequest.table_type:type_name -> query.SchemaTableType + 78, // 135: query.GetSchemaResponse.table_definition:type_name -> query.GetSchemaResponse.TableDefinitionEntry + 16, // 136: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable + 11, // 137: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category + 19, // 138: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field + 20, // 139: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row + 140, // [140:140] is the sub-list for method output_type + 140, // [140:140] is the sub-list for method input_type + 140, // [140:140] is the sub-list for extension type_name + 140, // [140:140] is the sub-list for extension extendee + 0, // [0:140] is the sub-list for field type_name } func init() { file_query_proto_init() } diff --git a/go/vt/proto/query/query_vtproto.pb.go b/go/vt/proto/query/query_vtproto.pb.go index 3abec4b7d1a..2da0324bab0 100644 --- a/go/vt/proto/query/query_vtproto.pb.go +++ b/go/vt/proto/query/query_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: query.proto package query @@ -7,6 +7,7 @@ package query import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -23,6 +24,1487 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Target) CloneVT() *Target { + if m == nil { + return (*Target)(nil) + } + r := &Target{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + Cell: m.Cell, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Target) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTGateCallerID) CloneVT() *VTGateCallerID { + if m == nil { + return (*VTGateCallerID)(nil) + } + r := &VTGateCallerID{ + Username: m.Username, + } + if rhs := m.Groups; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Groups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTGateCallerID) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EventToken) CloneVT() *EventToken { + if m == nil { + return (*EventToken)(nil) + } + r := &EventToken{ + Timestamp: m.Timestamp, + Shard: m.Shard, + Position: m.Position, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EventToken) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Value) CloneVT() *Value { + if m == nil { + return (*Value)(nil) + } + r := &Value{ + Type: m.Type, + } + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Values = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Value) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BindVariable) CloneVT() *BindVariable { + if m == nil { + return (*BindVariable)(nil) + } + r := &BindVariable{ + Type: m.Type, + } + if rhs := m.Value; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Value = tmpBytes + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]*Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Values = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BindVariable) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BoundQuery) CloneVT() *BoundQuery { + if m == nil { + return (*BoundQuery)(nil) + } + r := &BoundQuery{ + Sql: m.Sql, + } + if rhs := m.BindVariables; rhs != nil { + tmpContainer := make(map[string]*BindVariable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.BindVariables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BoundQuery) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteOptions) CloneVT() *ExecuteOptions { + if m == nil { + return (*ExecuteOptions)(nil) + } + r := &ExecuteOptions{ + IncludedFields: m.IncludedFields, + ClientFoundRows: m.ClientFoundRows, + Workload: m.Workload, + SqlSelectLimit: m.SqlSelectLimit, + TransactionIsolation: m.TransactionIsolation, + SkipQueryPlanCache: m.SkipQueryPlanCache, + PlannerVersion: m.PlannerVersion, + HasCreatedTempTables: m.HasCreatedTempTables, + Consolidator: m.Consolidator, + WorkloadName: m.WorkloadName, + Priority: m.Priority, + } + if rhs := m.TransactionAccessMode; rhs != nil { + tmpContainer := make([]ExecuteOptions_TransactionAccessMode, len(rhs)) + copy(tmpContainer, rhs) + r.TransactionAccessMode = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Field) CloneVT() *Field { + if m == nil { + return (*Field)(nil) + } + r := &Field{ + Name: m.Name, + Type: m.Type, + Table: m.Table, + OrgTable: m.OrgTable, + Database: m.Database, + OrgName: m.OrgName, + ColumnLength: m.ColumnLength, + Charset: m.Charset, + Decimals: m.Decimals, + Flags: m.Flags, + ColumnType: m.ColumnType, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Field) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Row) CloneVT() *Row { + if m == nil { + return (*Row)(nil) + } + r := &Row{} + if rhs := m.Lengths; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.Lengths = tmpContainer + } + if rhs := m.Values; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Values = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Row) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryResult) CloneVT() *QueryResult { + if m == nil { + return (*QueryResult)(nil) + } + r := &QueryResult{ + RowsAffected: m.RowsAffected, + InsertId: m.InsertId, + Info: m.Info, + SessionStateChanges: m.SessionStateChanges, + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if rhs := m.Rows; rhs != nil { + tmpContainer := make([]*Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *QueryWarning) CloneVT() *QueryWarning { + if m == nil { + return (*QueryWarning)(nil) + } + r := &QueryWarning{ + Code: m.Code, + Message: m.Message, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *QueryWarning) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamEvent_Statement) CloneVT() *StreamEvent_Statement { + if m == nil { + return (*StreamEvent_Statement)(nil) + } + r := &StreamEvent_Statement{ + Category: m.Category, + TableName: m.TableName, + } + if rhs := m.PrimaryKeyFields; rhs != nil { + tmpContainer := make([]*Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrimaryKeyFields = tmpContainer + } + if rhs := m.PrimaryKeyValues; rhs != nil { + tmpContainer := make([]*Row, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrimaryKeyValues = tmpContainer + } + if rhs := m.Sql; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Sql = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamEvent_Statement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamEvent) CloneVT() *StreamEvent { + if m == nil { + return (*StreamEvent)(nil) + } + r := &StreamEvent{ + EventToken: m.EventToken.CloneVT(), + } + if rhs := m.Statements; rhs != nil { + tmpContainer := make([]*StreamEvent_Statement, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Statements = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamEvent) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteRequest) CloneVT() *ExecuteRequest { + if m == nil { + return (*ExecuteRequest)(nil) + } + r := &ExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + TransactionId: m.TransactionId, + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteResponse) CloneVT() *ExecuteResponse { + if m == nil { + return (*ExecuteResponse)(nil) + } + r := &ExecuteResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResultWithError) CloneVT() *ResultWithError { + if m == nil { + return (*ResultWithError)(nil) + } + r := &ResultWithError{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResultWithError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteRequest) CloneVT() *StreamExecuteRequest { + if m == nil { + return (*StreamExecuteRequest)(nil) + } + r := &StreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteResponse) CloneVT() *StreamExecuteResponse { + if m == nil { + return (*StreamExecuteResponse)(nil) + } + r := &StreamExecuteResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginRequest) CloneVT() *BeginRequest { + if m == nil { + return (*BeginRequest)(nil) + } + r := &BeginRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginResponse) CloneVT() *BeginResponse { + if m == nil { + return (*BeginResponse)(nil) + } + r := &BeginResponse{ + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitRequest) CloneVT() *CommitRequest { + if m == nil { + return (*CommitRequest)(nil) + } + r := &CommitRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitResponse) CloneVT() *CommitResponse { + if m == nil { + return (*CommitResponse)(nil) + } + r := &CommitResponse{ + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackRequest) CloneVT() *RollbackRequest { + if m == nil { + return (*RollbackRequest)(nil) + } + r := &RollbackRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackResponse) CloneVT() *RollbackResponse { + if m == nil { + return (*RollbackResponse)(nil) + } + r := &RollbackResponse{ + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareRequest) CloneVT() *PrepareRequest { + if m == nil { + return (*PrepareRequest)(nil) + } + r := &PrepareRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareResponse) CloneVT() *PrepareResponse { + if m == nil { + return (*PrepareResponse)(nil) + } + r := &PrepareResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitPreparedRequest) CloneVT() *CommitPreparedRequest { + if m == nil { + return (*CommitPreparedRequest)(nil) + } + r := &CommitPreparedRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitPreparedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CommitPreparedResponse) CloneVT() *CommitPreparedResponse { + if m == nil { + return (*CommitPreparedResponse)(nil) + } + r := &CommitPreparedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CommitPreparedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackPreparedRequest) CloneVT() *RollbackPreparedRequest { + if m == nil { + return (*RollbackPreparedRequest)(nil) + } + r := &RollbackPreparedRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackPreparedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RollbackPreparedResponse) CloneVT() *RollbackPreparedResponse { + if m == nil { + return (*RollbackPreparedResponse)(nil) + } + r := &RollbackPreparedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RollbackPreparedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateTransactionRequest) CloneVT() *CreateTransactionRequest { + if m == nil { + return (*CreateTransactionRequest)(nil) + } + r := &CreateTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*Target, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateTransactionResponse) CloneVT() *CreateTransactionResponse { + if m == nil { + return (*CreateTransactionResponse)(nil) + } + r := &CreateTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartCommitRequest) CloneVT() *StartCommitRequest { + if m == nil { + return (*StartCommitRequest)(nil) + } + r := &StartCommitRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartCommitRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartCommitResponse) CloneVT() *StartCommitResponse { + if m == nil { + return (*StartCommitResponse)(nil) + } + r := &StartCommitResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartCommitResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetRollbackRequest) CloneVT() *SetRollbackRequest { + if m == nil { + return (*SetRollbackRequest)(nil) + } + r := &SetRollbackRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetRollbackRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetRollbackResponse) CloneVT() *SetRollbackResponse { + if m == nil { + return (*SetRollbackResponse)(nil) + } + r := &SetRollbackResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetRollbackResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ConcludeTransactionRequest) CloneVT() *ConcludeTransactionRequest { + if m == nil { + return (*ConcludeTransactionRequest)(nil) + } + r := &ConcludeTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ConcludeTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ConcludeTransactionResponse) CloneVT() *ConcludeTransactionResponse { + if m == nil { + return (*ConcludeTransactionResponse)(nil) + } + r := &ConcludeTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ConcludeTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadTransactionRequest) CloneVT() *ReadTransactionRequest { + if m == nil { + return (*ReadTransactionRequest)(nil) + } + r := &ReadTransactionRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadTransactionResponse) CloneVT() *ReadTransactionResponse { + if m == nil { + return (*ReadTransactionResponse)(nil) + } + r := &ReadTransactionResponse{ + Metadata: m.Metadata.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginExecuteRequest) CloneVT() *BeginExecuteRequest { + if m == nil { + return (*BeginExecuteRequest)(nil) + } + r := &BeginExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginExecuteResponse) CloneVT() *BeginExecuteResponse { + if m == nil { + return (*BeginExecuteResponse)(nil) + } + r := &BeginExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginStreamExecuteRequest) CloneVT() *BeginStreamExecuteRequest { + if m == nil { + return (*BeginStreamExecuteRequest)(nil) + } + r := &BeginStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + ReservedId: m.ReservedId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BeginStreamExecuteResponse) CloneVT() *BeginStreamExecuteResponse { + if m == nil { + return (*BeginStreamExecuteResponse)(nil) + } + r := &BeginStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BeginStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageStreamRequest) CloneVT() *MessageStreamRequest { + if m == nil { + return (*MessageStreamRequest)(nil) + } + r := &MessageStreamRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Name: m.Name, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageStreamResponse) CloneVT() *MessageStreamResponse { + if m == nil { + return (*MessageStreamResponse)(nil) + } + r := &MessageStreamResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageAckRequest) CloneVT() *MessageAckRequest { + if m == nil { + return (*MessageAckRequest)(nil) + } + r := &MessageAckRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Name: m.Name, + } + if rhs := m.Ids; rhs != nil { + tmpContainer := make([]*Value, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Ids = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageAckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MessageAckResponse) CloneVT() *MessageAckResponse { + if m == nil { + return (*MessageAckResponse)(nil) + } + r := &MessageAckResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MessageAckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveExecuteRequest) CloneVT() *ReserveExecuteRequest { + if m == nil { + return (*ReserveExecuteRequest)(nil) + } + r := &ReserveExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + TransactionId: m.TransactionId, + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveExecuteResponse) CloneVT() *ReserveExecuteResponse { + if m == nil { + return (*ReserveExecuteResponse)(nil) + } + r := &ReserveExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveStreamExecuteRequest) CloneVT() *ReserveStreamExecuteRequest { + if m == nil { + return (*ReserveStreamExecuteRequest)(nil) + } + r := &ReserveStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + TransactionId: m.TransactionId, + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveStreamExecuteResponse) CloneVT() *ReserveStreamExecuteResponse { + if m == nil { + return (*ReserveStreamExecuteResponse)(nil) + } + r := &ReserveStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginExecuteRequest) CloneVT() *ReserveBeginExecuteRequest { + if m == nil { + return (*ReserveBeginExecuteRequest)(nil) + } + r := &ReserveBeginExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if rhs := m.PostBeginQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PostBeginQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginExecuteResponse) CloneVT() *ReserveBeginExecuteResponse { + if m == nil { + return (*ReserveBeginExecuteResponse)(nil) + } + r := &ReserveBeginExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginStreamExecuteRequest) CloneVT() *ReserveBeginStreamExecuteRequest { + if m == nil { + return (*ReserveBeginStreamExecuteRequest)(nil) + } + r := &ReserveBeginStreamExecuteRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + Query: m.Query.CloneVT(), + Options: m.Options.CloneVT(), + } + if rhs := m.PreQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PreQueries = tmpContainer + } + if rhs := m.PostBeginQueries; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PostBeginQueries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginStreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReserveBeginStreamExecuteResponse) CloneVT() *ReserveBeginStreamExecuteResponse { + if m == nil { + return (*ReserveBeginStreamExecuteResponse)(nil) + } + r := &ReserveBeginStreamExecuteResponse{ + Error: m.Error.CloneVT(), + Result: m.Result.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + TabletAlias: m.TabletAlias.CloneVT(), + SessionStateChanges: m.SessionStateChanges, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReserveBeginStreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReleaseRequest) CloneVT() *ReleaseRequest { + if m == nil { + return (*ReleaseRequest)(nil) + } + r := &ReleaseRequest{ + EffectiveCallerId: m.EffectiveCallerId.CloneVT(), + ImmediateCallerId: m.ImmediateCallerId.CloneVT(), + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + ReservedId: m.ReservedId, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReleaseRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReleaseResponse) CloneVT() *ReleaseResponse { + if m == nil { + return (*ReleaseResponse)(nil) + } + r := &ReleaseResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReleaseResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamHealthRequest) CloneVT() *StreamHealthRequest { + if m == nil { + return (*StreamHealthRequest)(nil) + } + r := &StreamHealthRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamHealthRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RealtimeStats) CloneVT() *RealtimeStats { + if m == nil { + return (*RealtimeStats)(nil) + } + r := &RealtimeStats{ + HealthError: m.HealthError, + ReplicationLagSeconds: m.ReplicationLagSeconds, + BinlogPlayersCount: m.BinlogPlayersCount, + FilteredReplicationLagSeconds: m.FilteredReplicationLagSeconds, + CpuUsage: m.CpuUsage, + Qps: m.Qps, + } + if rhs := m.TableSchemaChanged; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableSchemaChanged = tmpContainer + } + if rhs := m.ViewSchemaChanged; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ViewSchemaChanged = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RealtimeStats) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AggregateStats) CloneVT() *AggregateStats { + if m == nil { + return (*AggregateStats)(nil) + } + r := &AggregateStats{ + HealthyTabletCount: m.HealthyTabletCount, + UnhealthyTabletCount: m.UnhealthyTabletCount, + ReplicationLagSecondsMin: m.ReplicationLagSecondsMin, + ReplicationLagSecondsMax: m.ReplicationLagSecondsMax, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AggregateStats) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamHealthResponse) CloneVT() *StreamHealthResponse { + if m == nil { + return (*StreamHealthResponse)(nil) + } + r := &StreamHealthResponse{ + Target: m.Target.CloneVT(), + Serving: m.Serving, + PrimaryTermStartTimestamp: m.PrimaryTermStartTimestamp, + RealtimeStats: m.RealtimeStats.CloneVT(), + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamHealthResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TransactionMetadata) CloneVT() *TransactionMetadata { + if m == nil { + return (*TransactionMetadata)(nil) + } + r := &TransactionMetadata{ + Dtid: m.Dtid, + State: m.State, + TimeCreated: m.TimeCreated, + } + if rhs := m.Participants; rhs != nil { + tmpContainer := make([]*Target, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Participants = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TransactionMetadata) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { + if m == nil { + return (*GetSchemaRequest)(nil) + } + r := &GetSchemaRequest{ + Target: m.Target.CloneVT(), + TableType: m.TableType, + } + if rhs := m.TableNames; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableNames = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { + if m == nil { + return (*GetSchemaResponse)(nil) + } + r := &GetSchemaResponse{} + if rhs := m.TableDefinition; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.TableDefinition = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Target) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -213,6 +1695,18 @@ func (m *Value) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } if len(m.Value) > 0 { i -= len(m.Value) copy(dAtA[i:], m.Value) @@ -377,6 +1871,22 @@ func (m *ExecuteOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Priority) > 0 { + i -= len(m.Priority) + copy(dAtA[i:], m.Priority) + i = encodeVarint(dAtA, i, uint64(len(m.Priority))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.WorkloadName) > 0 { + i -= len(m.WorkloadName) + copy(dAtA[i:], m.WorkloadName) + i = encodeVarint(dAtA, i, uint64(len(m.WorkloadName))) + i-- + dAtA[i] = 0x7a + } if len(m.TransactionAccessMode) > 0 { var pksize2 int for _, num := range m.TransactionAccessMode { @@ -4011,8 +5521,8 @@ func (m *StreamHealthResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i-- dAtA[i] = 0x22 } - if m.TabletExternallyReparentedTimestamp != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletExternallyReparentedTimestamp)) + if m.PrimaryTermStartTimestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.PrimaryTermStartTimestamp)) i-- dAtA[i] = 0x18 } @@ -4323,6 +5833,12 @@ func (m *Value) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -4417,6 +5933,14 @@ func (m *ExecuteOptions) SizeVT() (n int) { } n += 1 + sov(uint64(l)) + l } + l = len(m.WorkloadName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Priority) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -5770,8 +7294,8 @@ func (m *StreamHealthResponse) SizeVT() (n int) { if m.Serving { n += 2 } - if m.TabletExternallyReparentedTimestamp != 0 { - n += 1 + sov(uint64(m.TabletExternallyReparentedTimestamp)) + if m.PrimaryTermStartTimestamp != 0 { + n += 1 + sov(uint64(m.PrimaryTermStartTimestamp)) } if m.RealtimeStats != nil { l = m.RealtimeStats.SizeVT() @@ -6355,6 +7879,40 @@ func (m *Value) UnmarshalVT(dAtA []byte) error { m.Value = []byte{} } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6999,6 +8557,70 @@ func (m *ExecuteOptions) UnmarshalVT(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field TransactionAccessMode", wireType) } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkloadName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkloadName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Priority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16197,9 +17819,9 @@ func (m *StreamHealthResponse) UnmarshalVT(dAtA []byte) error { m.Serving = bool(v != 0) case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletExternallyReparentedTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryTermStartTimestamp", wireType) } - m.TabletExternallyReparentedTimestamp = 0 + m.PrimaryTermStartTimestamp = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16209,7 +17831,7 @@ func (m *StreamHealthResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TabletExternallyReparentedTimestamp |= int64(b&0x7F) << shift + m.PrimaryTermStartTimestamp |= int64(b&0x7F) << shift if b < 0x80 { break } diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index 3d72458cb27..babedcde966 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: queryservice.proto @@ -45,7 +45,7 @@ var file_queryservice_proto_rawDesc = []byte{ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x32, 0xd2, 0x10, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45, + 0x6f, 0x32, 0xac, 0x11, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, @@ -168,20 +168,26 @@ var file_queryservice_proto_rawDesc = []byte{ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5b, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x58, 0x0a, 0x0d, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x20, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x5b, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, + 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_queryservice_proto_goTypes = []interface{}{ @@ -210,35 +216,37 @@ var file_queryservice_proto_goTypes = []interface{}{ (*query.StreamHealthRequest)(nil), // 22: query.StreamHealthRequest (*binlogdata.VStreamRequest)(nil), // 23: binlogdata.VStreamRequest (*binlogdata.VStreamRowsRequest)(nil), // 24: binlogdata.VStreamRowsRequest - (*binlogdata.VStreamResultsRequest)(nil), // 25: binlogdata.VStreamResultsRequest - (*query.GetSchemaRequest)(nil), // 26: query.GetSchemaRequest - (*query.ExecuteResponse)(nil), // 27: query.ExecuteResponse - (*query.StreamExecuteResponse)(nil), // 28: query.StreamExecuteResponse - (*query.BeginResponse)(nil), // 29: query.BeginResponse - (*query.CommitResponse)(nil), // 30: query.CommitResponse - (*query.RollbackResponse)(nil), // 31: query.RollbackResponse - (*query.PrepareResponse)(nil), // 32: query.PrepareResponse - (*query.CommitPreparedResponse)(nil), // 33: query.CommitPreparedResponse - (*query.RollbackPreparedResponse)(nil), // 34: query.RollbackPreparedResponse - (*query.CreateTransactionResponse)(nil), // 35: query.CreateTransactionResponse - (*query.StartCommitResponse)(nil), // 36: query.StartCommitResponse - (*query.SetRollbackResponse)(nil), // 37: query.SetRollbackResponse - (*query.ConcludeTransactionResponse)(nil), // 38: query.ConcludeTransactionResponse - (*query.ReadTransactionResponse)(nil), // 39: query.ReadTransactionResponse - (*query.BeginExecuteResponse)(nil), // 40: query.BeginExecuteResponse - (*query.BeginStreamExecuteResponse)(nil), // 41: query.BeginStreamExecuteResponse - (*query.MessageStreamResponse)(nil), // 42: query.MessageStreamResponse - (*query.MessageAckResponse)(nil), // 43: query.MessageAckResponse - (*query.ReserveExecuteResponse)(nil), // 44: query.ReserveExecuteResponse - (*query.ReserveBeginExecuteResponse)(nil), // 45: query.ReserveBeginExecuteResponse - (*query.ReserveStreamExecuteResponse)(nil), // 46: query.ReserveStreamExecuteResponse - (*query.ReserveBeginStreamExecuteResponse)(nil), // 47: query.ReserveBeginStreamExecuteResponse - (*query.ReleaseResponse)(nil), // 48: query.ReleaseResponse - (*query.StreamHealthResponse)(nil), // 49: query.StreamHealthResponse - (*binlogdata.VStreamResponse)(nil), // 50: binlogdata.VStreamResponse - (*binlogdata.VStreamRowsResponse)(nil), // 51: binlogdata.VStreamRowsResponse - (*binlogdata.VStreamResultsResponse)(nil), // 52: binlogdata.VStreamResultsResponse - (*query.GetSchemaResponse)(nil), // 53: query.GetSchemaResponse + (*binlogdata.VStreamTablesRequest)(nil), // 25: binlogdata.VStreamTablesRequest + (*binlogdata.VStreamResultsRequest)(nil), // 26: binlogdata.VStreamResultsRequest + (*query.GetSchemaRequest)(nil), // 27: query.GetSchemaRequest + (*query.ExecuteResponse)(nil), // 28: query.ExecuteResponse + (*query.StreamExecuteResponse)(nil), // 29: query.StreamExecuteResponse + (*query.BeginResponse)(nil), // 30: query.BeginResponse + (*query.CommitResponse)(nil), // 31: query.CommitResponse + (*query.RollbackResponse)(nil), // 32: query.RollbackResponse + (*query.PrepareResponse)(nil), // 33: query.PrepareResponse + (*query.CommitPreparedResponse)(nil), // 34: query.CommitPreparedResponse + (*query.RollbackPreparedResponse)(nil), // 35: query.RollbackPreparedResponse + (*query.CreateTransactionResponse)(nil), // 36: query.CreateTransactionResponse + (*query.StartCommitResponse)(nil), // 37: query.StartCommitResponse + (*query.SetRollbackResponse)(nil), // 38: query.SetRollbackResponse + (*query.ConcludeTransactionResponse)(nil), // 39: query.ConcludeTransactionResponse + (*query.ReadTransactionResponse)(nil), // 40: query.ReadTransactionResponse + (*query.BeginExecuteResponse)(nil), // 41: query.BeginExecuteResponse + (*query.BeginStreamExecuteResponse)(nil), // 42: query.BeginStreamExecuteResponse + (*query.MessageStreamResponse)(nil), // 43: query.MessageStreamResponse + (*query.MessageAckResponse)(nil), // 44: query.MessageAckResponse + (*query.ReserveExecuteResponse)(nil), // 45: query.ReserveExecuteResponse + (*query.ReserveBeginExecuteResponse)(nil), // 46: query.ReserveBeginExecuteResponse + (*query.ReserveStreamExecuteResponse)(nil), // 47: query.ReserveStreamExecuteResponse + (*query.ReserveBeginStreamExecuteResponse)(nil), // 48: query.ReserveBeginStreamExecuteResponse + (*query.ReleaseResponse)(nil), // 49: query.ReleaseResponse + (*query.StreamHealthResponse)(nil), // 50: query.StreamHealthResponse + (*binlogdata.VStreamResponse)(nil), // 51: binlogdata.VStreamResponse + (*binlogdata.VStreamRowsResponse)(nil), // 52: binlogdata.VStreamRowsResponse + (*binlogdata.VStreamTablesResponse)(nil), // 53: binlogdata.VStreamTablesResponse + (*binlogdata.VStreamResultsResponse)(nil), // 54: binlogdata.VStreamResultsResponse + (*query.GetSchemaResponse)(nil), // 55: query.GetSchemaResponse } var file_queryservice_proto_depIdxs = []int32{ 0, // 0: queryservice.Query.Execute:input_type -> query.ExecuteRequest @@ -266,37 +274,39 @@ var file_queryservice_proto_depIdxs = []int32{ 22, // 22: queryservice.Query.StreamHealth:input_type -> query.StreamHealthRequest 23, // 23: queryservice.Query.VStream:input_type -> binlogdata.VStreamRequest 24, // 24: queryservice.Query.VStreamRows:input_type -> binlogdata.VStreamRowsRequest - 25, // 25: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest - 26, // 26: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest - 27, // 27: queryservice.Query.Execute:output_type -> query.ExecuteResponse - 28, // 28: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse - 29, // 29: queryservice.Query.Begin:output_type -> query.BeginResponse - 30, // 30: queryservice.Query.Commit:output_type -> query.CommitResponse - 31, // 31: queryservice.Query.Rollback:output_type -> query.RollbackResponse - 32, // 32: queryservice.Query.Prepare:output_type -> query.PrepareResponse - 33, // 33: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse - 34, // 34: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse - 35, // 35: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse - 36, // 36: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse - 37, // 37: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse - 38, // 38: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse - 39, // 39: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse - 40, // 40: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse - 41, // 41: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse - 42, // 42: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse - 43, // 43: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse - 44, // 44: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse - 45, // 45: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse - 46, // 46: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse - 47, // 47: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse - 48, // 48: queryservice.Query.Release:output_type -> query.ReleaseResponse - 49, // 49: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse - 50, // 50: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse - 51, // 51: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse - 52, // 52: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse - 53, // 53: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse - 27, // [27:54] is the sub-list for method output_type - 0, // [0:27] is the sub-list for method input_type + 25, // 25: queryservice.Query.VStreamTables:input_type -> binlogdata.VStreamTablesRequest + 26, // 26: queryservice.Query.VStreamResults:input_type -> binlogdata.VStreamResultsRequest + 27, // 27: queryservice.Query.GetSchema:input_type -> query.GetSchemaRequest + 28, // 28: queryservice.Query.Execute:output_type -> query.ExecuteResponse + 29, // 29: queryservice.Query.StreamExecute:output_type -> query.StreamExecuteResponse + 30, // 30: queryservice.Query.Begin:output_type -> query.BeginResponse + 31, // 31: queryservice.Query.Commit:output_type -> query.CommitResponse + 32, // 32: queryservice.Query.Rollback:output_type -> query.RollbackResponse + 33, // 33: queryservice.Query.Prepare:output_type -> query.PrepareResponse + 34, // 34: queryservice.Query.CommitPrepared:output_type -> query.CommitPreparedResponse + 35, // 35: queryservice.Query.RollbackPrepared:output_type -> query.RollbackPreparedResponse + 36, // 36: queryservice.Query.CreateTransaction:output_type -> query.CreateTransactionResponse + 37, // 37: queryservice.Query.StartCommit:output_type -> query.StartCommitResponse + 38, // 38: queryservice.Query.SetRollback:output_type -> query.SetRollbackResponse + 39, // 39: queryservice.Query.ConcludeTransaction:output_type -> query.ConcludeTransactionResponse + 40, // 40: queryservice.Query.ReadTransaction:output_type -> query.ReadTransactionResponse + 41, // 41: queryservice.Query.BeginExecute:output_type -> query.BeginExecuteResponse + 42, // 42: queryservice.Query.BeginStreamExecute:output_type -> query.BeginStreamExecuteResponse + 43, // 43: queryservice.Query.MessageStream:output_type -> query.MessageStreamResponse + 44, // 44: queryservice.Query.MessageAck:output_type -> query.MessageAckResponse + 45, // 45: queryservice.Query.ReserveExecute:output_type -> query.ReserveExecuteResponse + 46, // 46: queryservice.Query.ReserveBeginExecute:output_type -> query.ReserveBeginExecuteResponse + 47, // 47: queryservice.Query.ReserveStreamExecute:output_type -> query.ReserveStreamExecuteResponse + 48, // 48: queryservice.Query.ReserveBeginStreamExecute:output_type -> query.ReserveBeginStreamExecuteResponse + 49, // 49: queryservice.Query.Release:output_type -> query.ReleaseResponse + 50, // 50: queryservice.Query.StreamHealth:output_type -> query.StreamHealthResponse + 51, // 51: queryservice.Query.VStream:output_type -> binlogdata.VStreamResponse + 52, // 52: queryservice.Query.VStreamRows:output_type -> binlogdata.VStreamRowsResponse + 53, // 53: queryservice.Query.VStreamTables:output_type -> binlogdata.VStreamTablesResponse + 54, // 54: queryservice.Query.VStreamResults:output_type -> binlogdata.VStreamResultsResponse + 55, // 55: queryservice.Query.GetSchema:output_type -> query.GetSchemaResponse + 28, // [28:56] is the sub-list for method output_type + 0, // [0:28] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/queryservice/queryservice_grpc.pb.go b/go/vt/proto/queryservice/queryservice_grpc.pb.go index f9d596351e2..c05ea4f83a3 100644 --- a/go/vt/proto/queryservice/queryservice_grpc.pb.go +++ b/go/vt/proto/queryservice/queryservice_grpc.pb.go @@ -79,6 +79,8 @@ type QueryClient interface { VStream(ctx context.Context, in *binlogdata.VStreamRequest, opts ...grpc.CallOption) (Query_VStreamClient, error) // VStreamRows streams rows from the specified starting point. VStreamRows(ctx context.Context, in *binlogdata.VStreamRowsRequest, opts ...grpc.CallOption) (Query_VStreamRowsClient, error) + // VStreamTables streams rows from the specified starting point. + VStreamTables(ctx context.Context, in *binlogdata.VStreamTablesRequest, opts ...grpc.CallOption) (Query_VStreamTablesClient, error) // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) // GetSchema returns the schema information. @@ -502,8 +504,40 @@ func (x *queryVStreamRowsClient) Recv() (*binlogdata.VStreamRowsResponse, error) return m, nil } +func (c *queryClient) VStreamTables(ctx context.Context, in *binlogdata.VStreamTablesRequest, opts ...grpc.CallOption) (Query_VStreamTablesClient, error) { + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[8], "/queryservice.Query/VStreamTables", opts...) + if err != nil { + return nil, err + } + x := &queryVStreamTablesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Query_VStreamTablesClient interface { + Recv() (*binlogdata.VStreamTablesResponse, error) + grpc.ClientStream +} + +type queryVStreamTablesClient struct { + grpc.ClientStream +} + +func (x *queryVStreamTablesClient) Recv() (*binlogdata.VStreamTablesResponse, error) { + m := new(binlogdata.VStreamTablesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *queryClient) VStreamResults(ctx context.Context, in *binlogdata.VStreamResultsRequest, opts ...grpc.CallOption) (Query_VStreamResultsClient, error) { - stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[8], "/queryservice.Query/VStreamResults", opts...) + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[9], "/queryservice.Query/VStreamResults", opts...) if err != nil { return nil, err } @@ -535,7 +569,7 @@ func (x *queryVStreamResultsClient) Recv() (*binlogdata.VStreamResultsResponse, } func (c *queryClient) GetSchema(ctx context.Context, in *query.GetSchemaRequest, opts ...grpc.CallOption) (Query_GetSchemaClient, error) { - stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[9], "/queryservice.Query/GetSchema", opts...) + stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[10], "/queryservice.Query/GetSchema", opts...) if err != nil { return nil, err } @@ -625,6 +659,8 @@ type QueryServer interface { VStream(*binlogdata.VStreamRequest, Query_VStreamServer) error // VStreamRows streams rows from the specified starting point. VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error + // VStreamTables streams rows from the specified starting point. + VStreamTables(*binlogdata.VStreamTablesRequest, Query_VStreamTablesServer) error // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error // GetSchema returns the schema information. @@ -711,6 +747,9 @@ func (UnimplementedQueryServer) VStream(*binlogdata.VStreamRequest, Query_VStrea func (UnimplementedQueryServer) VStreamRows(*binlogdata.VStreamRowsRequest, Query_VStreamRowsServer) error { return status.Errorf(codes.Unimplemented, "method VStreamRows not implemented") } +func (UnimplementedQueryServer) VStreamTables(*binlogdata.VStreamTablesRequest, Query_VStreamTablesServer) error { + return status.Errorf(codes.Unimplemented, "method VStreamTables not implemented") +} func (UnimplementedQueryServer) VStreamResults(*binlogdata.VStreamResultsRequest, Query_VStreamResultsServer) error { return status.Errorf(codes.Unimplemented, "method VStreamResults not implemented") } @@ -1204,6 +1243,27 @@ func (x *queryVStreamRowsServer) Send(m *binlogdata.VStreamRowsResponse) error { return x.ServerStream.SendMsg(m) } +func _Query_VStreamTables_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(binlogdata.VStreamTablesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(QueryServer).VStreamTables(m, &queryVStreamTablesServer{stream}) +} + +type Query_VStreamTablesServer interface { + Send(*binlogdata.VStreamTablesResponse) error + grpc.ServerStream +} + +type queryVStreamTablesServer struct { + grpc.ServerStream +} + +func (x *queryVStreamTablesServer) Send(m *binlogdata.VStreamTablesResponse) error { + return x.ServerStream.SendMsg(m) +} + func _Query_VStreamResults_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(binlogdata.VStreamResultsRequest) if err := stream.RecvMsg(m); err != nil { @@ -1363,6 +1423,11 @@ var Query_ServiceDesc = grpc.ServiceDesc{ Handler: _Query_VStreamRows_Handler, ServerStreams: true, }, + { + StreamName: "VStreamTables", + Handler: _Query_VStreamTables_Handler, + ServerStreams: true, + }, { StreamName: "VStreamResults", Handler: _Query_VStreamResults_Handler, diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index 55bcdf99b55..ec90d6943ac 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: replicationdata.proto @@ -441,6 +441,7 @@ type FullStatus struct { SemiSyncPrimaryClients uint32 `protobuf:"varint,18,opt,name=semi_sync_primary_clients,json=semiSyncPrimaryClients,proto3" json:"semi_sync_primary_clients,omitempty"` SemiSyncPrimaryTimeout uint64 `protobuf:"varint,19,opt,name=semi_sync_primary_timeout,json=semiSyncPrimaryTimeout,proto3" json:"semi_sync_primary_timeout,omitempty"` SemiSyncWaitForReplicaCount uint32 `protobuf:"varint,20,opt,name=semi_sync_wait_for_replica_count,json=semiSyncWaitForReplicaCount,proto3" json:"semi_sync_wait_for_replica_count,omitempty"` + SuperReadOnly bool `protobuf:"varint,21,opt,name=super_read_only,json=superReadOnly,proto3" json:"super_read_only,omitempty"` } func (x *FullStatus) Reset() { @@ -615,6 +616,13 @@ func (x *FullStatus) GetSemiSyncWaitForReplicaCount() uint32 { return 0 } +func (x *FullStatus) GetSuperReadOnly() bool { + if x != nil { + return x.SuperReadOnly + } + return false +} + var File_replicationdata_proto protoreflect.FileDescriptor var file_replicationdata_proto_rawDesc = []byte{ @@ -690,7 +698,7 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc3, 0x07, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xeb, 0x07, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, @@ -751,14 +759,16 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1b, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4f, 0x41, - 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, - 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x42, - 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x75, 0x70, + 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, + 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, + 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, + 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go index 350a733e865..f92a42b05e4 100644 --- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go +++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: replicationdata.proto package replicationdata import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,121 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Status) CloneVT() *Status { + if m == nil { + return (*Status)(nil) + } + r := &Status{ + Position: m.Position, + ReplicationLagSeconds: m.ReplicationLagSeconds, + SourceHost: m.SourceHost, + SourcePort: m.SourcePort, + ConnectRetry: m.ConnectRetry, + RelayLogPosition: m.RelayLogPosition, + FilePosition: m.FilePosition, + RelayLogSourceBinlogEquivalentPosition: m.RelayLogSourceBinlogEquivalentPosition, + SourceServerId: m.SourceServerId, + SourceUuid: m.SourceUuid, + IoState: m.IoState, + LastIoError: m.LastIoError, + SqlState: m.SqlState, + LastSqlError: m.LastSqlError, + RelayLogFilePosition: m.RelayLogFilePosition, + SourceUser: m.SourceUser, + SqlDelay: m.SqlDelay, + AutoPosition: m.AutoPosition, + UsingGtid: m.UsingGtid, + HasReplicationFilters: m.HasReplicationFilters, + SslAllowed: m.SslAllowed, + ReplicationLagUnknown: m.ReplicationLagUnknown, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Status) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationStatus) CloneVT() *StopReplicationStatus { + if m == nil { + return (*StopReplicationStatus)(nil) + } + r := &StopReplicationStatus{ + Before: m.Before.CloneVT(), + After: m.After.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryStatus) CloneVT() *PrimaryStatus { + if m == nil { + return (*PrimaryStatus)(nil) + } + r := &PrimaryStatus{ + Position: m.Position, + FilePosition: m.FilePosition, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrimaryStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FullStatus) CloneVT() *FullStatus { + if m == nil { + return (*FullStatus)(nil) + } + r := &FullStatus{ + ServerId: m.ServerId, + ServerUuid: m.ServerUuid, + ReplicationStatus: m.ReplicationStatus.CloneVT(), + PrimaryStatus: m.PrimaryStatus.CloneVT(), + GtidPurged: m.GtidPurged, + Version: m.Version, + VersionComment: m.VersionComment, + ReadOnly: m.ReadOnly, + GtidMode: m.GtidMode, + BinlogFormat: m.BinlogFormat, + BinlogRowImage: m.BinlogRowImage, + LogBinEnabled: m.LogBinEnabled, + LogReplicaUpdates: m.LogReplicaUpdates, + SemiSyncPrimaryEnabled: m.SemiSyncPrimaryEnabled, + SemiSyncReplicaEnabled: m.SemiSyncReplicaEnabled, + SemiSyncPrimaryStatus: m.SemiSyncPrimaryStatus, + SemiSyncReplicaStatus: m.SemiSyncReplicaStatus, + SemiSyncPrimaryClients: m.SemiSyncPrimaryClients, + SemiSyncPrimaryTimeout: m.SemiSyncPrimaryTimeout, + SemiSyncWaitForReplicaCount: m.SemiSyncWaitForReplicaCount, + SuperReadOnly: m.SuperReadOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *FullStatus) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Status) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -354,6 +470,18 @@ func (m *FullStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.SuperReadOnly { + i-- + if m.SuperReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } if m.SemiSyncWaitForReplicaCount != 0 { i = encodeVarint(dAtA, i, uint64(m.SemiSyncWaitForReplicaCount)) i-- @@ -734,6 +862,9 @@ func (m *FullStatus) SizeVT() (n int) { if m.SemiSyncWaitForReplicaCount != 0 { n += 2 + sov(uint64(m.SemiSyncWaitForReplicaCount)) } + if m.SuperReadOnly { + n += 3 + } n += len(m.unknownFields) return n } @@ -2127,6 +2258,26 @@ func (m *FullStatus) UnmarshalVT(dAtA []byte) error { break } } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuperReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuperReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index bdf863601be..3b26ace8157 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: tableacl.proto diff --git a/go/vt/proto/tableacl/tableacl_vtproto.pb.go b/go/vt/proto/tableacl/tableacl_vtproto.pb.go index 462bf151230..8c9c9a97856 100644 --- a/go/vt/proto/tableacl/tableacl_vtproto.pb.go +++ b/go/vt/proto/tableacl/tableacl_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: tableacl.proto package tableacl import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,67 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *TableGroupSpec) CloneVT() *TableGroupSpec { + if m == nil { + return (*TableGroupSpec)(nil) + } + r := &TableGroupSpec{ + Name: m.Name, + } + if rhs := m.TableNamesOrPrefixes; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TableNamesOrPrefixes = tmpContainer + } + if rhs := m.Readers; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Readers = tmpContainer + } + if rhs := m.Writers; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Writers = tmpContainer + } + if rhs := m.Admins; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Admins = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TableGroupSpec) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Config) CloneVT() *Config { + if m == nil { + return (*Config)(nil) + } + r := &Config{} + if rhs := m.TableGroups; rhs != nil { + tmpContainer := make([]*TableGroupSpec, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableGroups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Config) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *TableGroupSpec) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index 729ee18cb44..c9039a3cfd9 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: tabletmanagerdata.proto @@ -29,6 +29,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" logutil "vitess.io/vitess/go/vt/proto/logutil" query "vitess.io/vitess/go/vt/proto/query" replicationdata "vitess.io/vitess/go/vt/proto/replicationdata" @@ -44,6 +45,57 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// This structure allows us to manage tablet selection preferences +// which are eventually passed to a TabletPicker. +type TabletSelectionPreference int32 + +const ( + TabletSelectionPreference_ANY TabletSelectionPreference = 0 + TabletSelectionPreference_INORDER TabletSelectionPreference = 1 + TabletSelectionPreference_UNKNOWN TabletSelectionPreference = 3 // Don't change any existing value +) + +// Enum value maps for TabletSelectionPreference. +var ( + TabletSelectionPreference_name = map[int32]string{ + 0: "ANY", + 1: "INORDER", + 3: "UNKNOWN", + } + TabletSelectionPreference_value = map[string]int32{ + "ANY": 0, + "INORDER": 1, + "UNKNOWN": 3, + } +) + +func (x TabletSelectionPreference) Enum() *TabletSelectionPreference { + p := new(TabletSelectionPreference) + *p = x + return p +} + +func (x TabletSelectionPreference) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TabletSelectionPreference) Descriptor() protoreflect.EnumDescriptor { + return file_tabletmanagerdata_proto_enumTypes[0].Descriptor() +} + +func (TabletSelectionPreference) Type() protoreflect.EnumType { + return &file_tabletmanagerdata_proto_enumTypes[0] +} + +func (x TabletSelectionPreference) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TabletSelectionPreference.Descriptor instead. +func (TabletSelectionPreference) EnumDescriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{0} +} + type TableDefinition struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -163,7 +215,6 @@ type SchemaDefinition struct { DatabaseSchema string `protobuf:"bytes,1,opt,name=database_schema,json=databaseSchema,proto3" json:"database_schema,omitempty"` TableDefinitions []*TableDefinition `protobuf:"bytes,2,rep,name=table_definitions,json=tableDefinitions,proto3" json:"table_definitions,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` } func (x *SchemaDefinition) Reset() { @@ -212,13 +263,6 @@ func (x *SchemaDefinition) GetTableDefinitions() []*TableDefinition { return nil } -func (x *SchemaDefinition) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - type SchemaChangeResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1583,6 +1627,8 @@ type ApplySchemaRequest struct { BeforeSchema *SchemaDefinition `protobuf:"bytes,4,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` AfterSchema *SchemaDefinition `protobuf:"bytes,5,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` SqlMode string `protobuf:"bytes,6,opt,name=sql_mode,json=sqlMode,proto3" json:"sql_mode,omitempty"` + // BatchSize indicates how many queries to apply together + BatchSize int64 `protobuf:"varint,7,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` } func (x *ApplySchemaRequest) Reset() { @@ -1659,6 +1705,13 @@ func (x *ApplySchemaRequest) GetSqlMode() string { return "" } +func (x *ApplySchemaRequest) GetBatchSize() int64 { + if x != nil { + return x.BatchSize + } + return 0 +} + type ApplySchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4498,6 +4551,9 @@ type BackupRequest struct { // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. IncrementalFromPos string `protobuf:"bytes,3,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,4,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` } func (x *BackupRequest) Reset() { @@ -4553,6 +4609,13 @@ func (x *BackupRequest) GetIncrementalFromPos() string { return "" } +func (x *BackupRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe + } + return false +} + type BackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4612,6 +4675,9 @@ type RestoreFromBackupRequest struct { RestoreToPos string `protobuf:"bytes,2,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` // Dry run does not actually performs the restore, but validates the steps and availability of backups DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp *vttime.Time `protobuf:"bytes,4,opt,name=restore_to_timestamp,json=restoreToTimestamp,proto3" json:"restore_to_timestamp,omitempty"` } func (x *RestoreFromBackupRequest) Reset() { @@ -4667,6 +4733,13 @@ func (x *RestoreFromBackupRequest) GetDryRun() bool { return false } +func (x *RestoreFromBackupRequest) GetRestoreToTimestamp() *vttime.Time { + if x != nil { + return x.RestoreToTimestamp + } + return nil +} + type RestoreFromBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4714,21 +4787,31 @@ func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { return nil } -type VDiffRequest struct { +type CreateVReplicationWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` - Action string `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` - ActionArg string `protobuf:"bytes,4,opt,name=action_arg,json=actionArg,proto3" json:"action_arg,omitempty"` - VdiffUuid string `protobuf:"bytes,5,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` - Options *VDiffOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` -} - -func (x *VDiffRequest) Reset() { - *x = VDiffRequest{} + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + BinlogSource []*binlogdata.BinlogSource `protobuf:"bytes,2,rep,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` + // Optional parameters. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + // TabletTypes is the list of tablet types to use when selecting source tablets. + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + WorkflowType binlogdata.VReplicationWorkflowType `protobuf:"varint,6,opt,name=workflow_type,json=workflowType,proto3,enum=binlogdata.VReplicationWorkflowType" json:"workflow_type,omitempty"` + WorkflowSubType binlogdata.VReplicationWorkflowSubType `protobuf:"varint,7,opt,name=workflow_sub_type,json=workflowSubType,proto3,enum=binlogdata.VReplicationWorkflowSubType" json:"workflow_sub_type,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table + // copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,8,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // AutoStart specifies if the workflow should be started when created. + AutoStart bool `protobuf:"varint,9,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` + // Should the workflow stop after the copy phase. + StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` +} + +func (x *CreateVReplicationWorkflowRequest) Reset() { + *x = CreateVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4736,13 +4819,13 @@ func (x *VDiffRequest) Reset() { } } -func (x *VDiffRequest) String() string { +func (x *CreateVReplicationWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffRequest) ProtoMessage() {} +func (*CreateVReplicationWorkflowRequest) ProtoMessage() {} -func (x *VDiffRequest) ProtoReflect() protoreflect.Message { +func (x *CreateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4754,65 +4837,91 @@ func (x *VDiffRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffRequest.ProtoReflect.Descriptor instead. -func (*VDiffRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*CreateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{96} } -func (x *VDiffRequest) GetKeyspace() string { +func (x *CreateVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.Keyspace + return x.Workflow } return "" } -func (x *VDiffRequest) GetWorkflow() string { +func (x *CreateVReplicationWorkflowRequest) GetBinlogSource() []*binlogdata.BinlogSource { if x != nil { - return x.Workflow + return x.BinlogSource } - return "" + return nil } -func (x *VDiffRequest) GetAction() string { +func (x *CreateVReplicationWorkflowRequest) GetCells() []string { if x != nil { - return x.Action + return x.Cells } - return "" + return nil } -func (x *VDiffRequest) GetActionArg() string { +func (x *CreateVReplicationWorkflowRequest) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.ActionArg + return x.TabletTypes } - return "" + return nil } -func (x *VDiffRequest) GetVdiffUuid() string { +func (x *CreateVReplicationWorkflowRequest) GetTabletSelectionPreference() TabletSelectionPreference { if x != nil { - return x.VdiffUuid + return x.TabletSelectionPreference } - return "" + return TabletSelectionPreference_ANY } -func (x *VDiffRequest) GetOptions() *VDiffOptions { +func (x *CreateVReplicationWorkflowRequest) GetWorkflowType() binlogdata.VReplicationWorkflowType { if x != nil { - return x.Options + return x.WorkflowType } - return nil + return binlogdata.VReplicationWorkflowType(0) } -type VDiffResponse struct { +func (x *CreateVReplicationWorkflowRequest) GetWorkflowSubType() binlogdata.VReplicationWorkflowSubType { + if x != nil { + return x.WorkflowSubType + } + return binlogdata.VReplicationWorkflowSubType(0) +} + +func (x *CreateVReplicationWorkflowRequest) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false +} + +func (x *CreateVReplicationWorkflowRequest) GetAutoStart() bool { + if x != nil { + return x.AutoStart + } + return false +} + +func (x *CreateVReplicationWorkflowRequest) GetStopAfterCopy() bool { + if x != nil { + return x.StopAfterCopy + } + return false +} + +type CreateVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Output *query.QueryResult `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` - VdiffUuid string `protobuf:"bytes,3,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *VDiffResponse) Reset() { - *x = VDiffResponse{} +func (x *CreateVReplicationWorkflowResponse) Reset() { + *x = CreateVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4820,13 +4929,13 @@ func (x *VDiffResponse) Reset() { } } -func (x *VDiffResponse) String() string { +func (x *CreateVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffResponse) ProtoMessage() {} +func (*CreateVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffResponse) ProtoReflect() protoreflect.Message { +func (x *CreateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4838,45 +4947,28 @@ func (x *VDiffResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffResponse.ProtoReflect.Descriptor instead. -func (*VDiffResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*CreateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{97} } -func (x *VDiffResponse) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *VDiffResponse) GetOutput() *query.QueryResult { +func (x *CreateVReplicationWorkflowResponse) GetResult() *query.QueryResult { if x != nil { - return x.Output + return x.Result } return nil } -func (x *VDiffResponse) GetVdiffUuid() string { - if x != nil { - return x.VdiffUuid - } - return "" -} - -// options that influence the tablet selected by the picker for streaming data from -type VDiffPickerOptions struct { +type DeleteVReplicationWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletTypes string `protobuf:"bytes,1,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` - SourceCell string `protobuf:"bytes,2,opt,name=source_cell,json=sourceCell,proto3" json:"source_cell,omitempty"` - TargetCell string `protobuf:"bytes,3,opt,name=target_cell,json=targetCell,proto3" json:"target_cell,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` } -func (x *VDiffPickerOptions) Reset() { - *x = VDiffPickerOptions{} +func (x *DeleteVReplicationWorkflowRequest) Reset() { + *x = DeleteVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4884,13 +4976,13 @@ func (x *VDiffPickerOptions) Reset() { } } -func (x *VDiffPickerOptions) String() string { +func (x *DeleteVReplicationWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffPickerOptions) ProtoMessage() {} +func (*DeleteVReplicationWorkflowRequest) ProtoMessage() {} -func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { +func (x *DeleteVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4902,45 +4994,28 @@ func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffPickerOptions.ProtoReflect.Descriptor instead. -func (*VDiffPickerOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*DeleteVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{98} } -func (x *VDiffPickerOptions) GetTabletTypes() string { - if x != nil { - return x.TabletTypes - } - return "" -} - -func (x *VDiffPickerOptions) GetSourceCell() string { - if x != nil { - return x.SourceCell - } - return "" -} - -func (x *VDiffPickerOptions) GetTargetCell() string { +func (x *DeleteVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.TargetCell + return x.Workflow } return "" } -// options that only influence how vdiff differences are reported -type VDiffReportOptions struct { +type DeleteVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` - DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` - Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *VDiffReportOptions) Reset() { - *x = VDiffReportOptions{} +func (x *DeleteVReplicationWorkflowResponse) Reset() { + *x = DeleteVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_tabletmanagerdata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4948,13 +5023,13 @@ func (x *VDiffReportOptions) Reset() { } } -func (x *VDiffReportOptions) String() string { +func (x *DeleteVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffReportOptions) ProtoMessage() {} +func (*DeleteVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { +func (x *DeleteVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_tabletmanagerdata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4966,63 +5041,99 @@ func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffReportOptions.ProtoReflect.Descriptor instead. -func (*VDiffReportOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*DeleteVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { return file_tabletmanagerdata_proto_rawDescGZIP(), []int{99} } -func (x *VDiffReportOptions) GetOnlyPks() bool { +func (x *DeleteVReplicationWorkflowResponse) GetResult() *query.QueryResult { if x != nil { - return x.OnlyPks + return x.Result } - return false + return nil } -func (x *VDiffReportOptions) GetDebugQuery() bool { - if x != nil { - return x.DebugQuery +type ReadVReplicationWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` +} + +func (x *ReadVReplicationWorkflowRequest) Reset() { + *x = ReadVReplicationWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[100] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func (x *VDiffReportOptions) GetFormat() string { +func (x *ReadVReplicationWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVReplicationWorkflowRequest) ProtoMessage() {} + +func (x *ReadVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[100] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} +} + +func (x *ReadVReplicationWorkflowRequest) GetWorkflow() string { if x != nil { - return x.Format + return x.Workflow } return "" } -type VDiffCoreOptions struct { +type ReadVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tables string `protobuf:"bytes,1,opt,name=tables,proto3" json:"tables,omitempty"` - AutoRetry bool `protobuf:"varint,2,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - Checksum bool `protobuf:"varint,4,opt,name=checksum,proto3" json:"checksum,omitempty"` - SamplePct int64 `protobuf:"varint,5,opt,name=sample_pct,json=samplePct,proto3" json:"sample_pct,omitempty"` - TimeoutSeconds int64 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` - MaxExtraRowsToCompare int64 `protobuf:"varint,7,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells string `protobuf:"bytes,3,opt,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + DbName string `protobuf:"bytes,6,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + Tags string `protobuf:"bytes,7,opt,name=tags,proto3" json:"tags,omitempty"` + WorkflowType binlogdata.VReplicationWorkflowType `protobuf:"varint,8,opt,name=workflow_type,json=workflowType,proto3,enum=binlogdata.VReplicationWorkflowType" json:"workflow_type,omitempty"` + WorkflowSubType binlogdata.VReplicationWorkflowSubType `protobuf:"varint,9,opt,name=workflow_sub_type,json=workflowSubType,proto3,enum=binlogdata.VReplicationWorkflowSubType" json:"workflow_sub_type,omitempty"` + DeferSecondaryKeys bool `protobuf:"varint,10,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + Streams []*ReadVReplicationWorkflowResponse_Stream `protobuf:"bytes,11,rep,name=streams,proto3" json:"streams,omitempty"` } -func (x *VDiffCoreOptions) Reset() { - *x = VDiffCoreOptions{} +func (x *ReadVReplicationWorkflowResponse) Reset() { + *x = ReadVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[100] + mi := &file_tabletmanagerdata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *VDiffCoreOptions) String() string { +func (x *ReadVReplicationWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffCoreOptions) ProtoMessage() {} +func (*ReadVReplicationWorkflowResponse) ProtoMessage() {} -func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[100] +func (x *ReadVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5033,87 +5144,923 @@ func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffCoreOptions.ProtoReflect.Descriptor instead. -func (*VDiffCoreOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} +// Deprecated: Use ReadVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} } -func (x *VDiffCoreOptions) GetTables() string { +func (x *ReadVReplicationWorkflowResponse) GetWorkflow() string { if x != nil { - return x.Tables + return x.Workflow } return "" } -func (x *VDiffCoreOptions) GetAutoRetry() bool { +func (x *ReadVReplicationWorkflowResponse) GetCells() string { if x != nil { - return x.AutoRetry + return x.Cells } - return false + return "" } -func (x *VDiffCoreOptions) GetMaxRows() int64 { +func (x *ReadVReplicationWorkflowResponse) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.MaxRows + return x.TabletTypes } - return 0 + return nil } -func (x *VDiffCoreOptions) GetChecksum() bool { +func (x *ReadVReplicationWorkflowResponse) GetTabletSelectionPreference() TabletSelectionPreference { if x != nil { - return x.Checksum + return x.TabletSelectionPreference } - return false + return TabletSelectionPreference_ANY } -func (x *VDiffCoreOptions) GetSamplePct() int64 { +func (x *ReadVReplicationWorkflowResponse) GetDbName() string { if x != nil { - return x.SamplePct + return x.DbName } - return 0 + return "" } -func (x *VDiffCoreOptions) GetTimeoutSeconds() int64 { +func (x *ReadVReplicationWorkflowResponse) GetTags() string { if x != nil { - return x.TimeoutSeconds + return x.Tags } - return 0 + return "" } -func (x *VDiffCoreOptions) GetMaxExtraRowsToCompare() int64 { +func (x *ReadVReplicationWorkflowResponse) GetWorkflowType() binlogdata.VReplicationWorkflowType { + if x != nil { + return x.WorkflowType + } + return binlogdata.VReplicationWorkflowType(0) +} + +func (x *ReadVReplicationWorkflowResponse) GetWorkflowSubType() binlogdata.VReplicationWorkflowSubType { + if x != nil { + return x.WorkflowSubType + } + return binlogdata.VReplicationWorkflowSubType(0) +} + +func (x *ReadVReplicationWorkflowResponse) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false +} + +func (x *ReadVReplicationWorkflowResponse) GetStreams() []*ReadVReplicationWorkflowResponse_Stream { + if x != nil { + return x.Streams + } + return nil +} + +type VDiffRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Action string `protobuf:"bytes,3,opt,name=action,proto3" json:"action,omitempty"` + ActionArg string `protobuf:"bytes,4,opt,name=action_arg,json=actionArg,proto3" json:"action_arg,omitempty"` + VdiffUuid string `protobuf:"bytes,5,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` + Options *VDiffOptions `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *VDiffRequest) Reset() { + *x = VDiffRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffRequest) ProtoMessage() {} + +func (x *VDiffRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[102] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffRequest.ProtoReflect.Descriptor instead. +func (*VDiffRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{102} +} + +func (x *VDiffRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *VDiffRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *VDiffRequest) GetAction() string { + if x != nil { + return x.Action + } + return "" +} + +func (x *VDiffRequest) GetActionArg() string { + if x != nil { + return x.ActionArg + } + return "" +} + +func (x *VDiffRequest) GetVdiffUuid() string { + if x != nil { + return x.VdiffUuid + } + return "" +} + +func (x *VDiffRequest) GetOptions() *VDiffOptions { + if x != nil { + return x.Options + } + return nil +} + +type VDiffResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Output *query.QueryResult `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` + VdiffUuid string `protobuf:"bytes,3,opt,name=vdiff_uuid,json=vdiffUuid,proto3" json:"vdiff_uuid,omitempty"` +} + +func (x *VDiffResponse) Reset() { + *x = VDiffResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffResponse) ProtoMessage() {} + +func (x *VDiffResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffResponse.ProtoReflect.Descriptor instead. +func (*VDiffResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{103} +} + +func (x *VDiffResponse) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *VDiffResponse) GetOutput() *query.QueryResult { + if x != nil { + return x.Output + } + return nil +} + +func (x *VDiffResponse) GetVdiffUuid() string { + if x != nil { + return x.VdiffUuid + } + return "" +} + +// options that influence the tablet selected by the picker for streaming data from +type VDiffPickerOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletTypes string `protobuf:"bytes,1,opt,name=tablet_types,json=tabletTypes,proto3" json:"tablet_types,omitempty"` + SourceCell string `protobuf:"bytes,2,opt,name=source_cell,json=sourceCell,proto3" json:"source_cell,omitempty"` + TargetCell string `protobuf:"bytes,3,opt,name=target_cell,json=targetCell,proto3" json:"target_cell,omitempty"` +} + +func (x *VDiffPickerOptions) Reset() { + *x = VDiffPickerOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffPickerOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffPickerOptions) ProtoMessage() {} + +func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffPickerOptions.ProtoReflect.Descriptor instead. +func (*VDiffPickerOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{104} +} + +func (x *VDiffPickerOptions) GetTabletTypes() string { + if x != nil { + return x.TabletTypes + } + return "" +} + +func (x *VDiffPickerOptions) GetSourceCell() string { + if x != nil { + return x.SourceCell + } + return "" +} + +func (x *VDiffPickerOptions) GetTargetCell() string { + if x != nil { + return x.TargetCell + } + return "" +} + +// options that only influence how vdiff differences are reported +type VDiffReportOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` + DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` +} + +func (x *VDiffReportOptions) Reset() { + *x = VDiffReportOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffReportOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffReportOptions) ProtoMessage() {} + +func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffReportOptions.ProtoReflect.Descriptor instead. +func (*VDiffReportOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{105} +} + +func (x *VDiffReportOptions) GetOnlyPks() bool { + if x != nil { + return x.OnlyPks + } + return false +} + +func (x *VDiffReportOptions) GetDebugQuery() bool { + if x != nil { + return x.DebugQuery + } + return false +} + +func (x *VDiffReportOptions) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +type VDiffCoreOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tables string `protobuf:"bytes,1,opt,name=tables,proto3" json:"tables,omitempty"` + AutoRetry bool `protobuf:"varint,2,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + Checksum bool `protobuf:"varint,4,opt,name=checksum,proto3" json:"checksum,omitempty"` + SamplePct int64 `protobuf:"varint,5,opt,name=sample_pct,json=samplePct,proto3" json:"sample_pct,omitempty"` + TimeoutSeconds int64 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` + MaxExtraRowsToCompare int64 `protobuf:"varint,7,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` + UpdateTableStats bool `protobuf:"varint,8,opt,name=update_table_stats,json=updateTableStats,proto3" json:"update_table_stats,omitempty"` +} + +func (x *VDiffCoreOptions) Reset() { + *x = VDiffCoreOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffCoreOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffCoreOptions) ProtoMessage() {} + +func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffCoreOptions.ProtoReflect.Descriptor instead. +func (*VDiffCoreOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{106} +} + +func (x *VDiffCoreOptions) GetTables() string { + if x != nil { + return x.Tables + } + return "" +} + +func (x *VDiffCoreOptions) GetAutoRetry() bool { + if x != nil { + return x.AutoRetry + } + return false +} + +func (x *VDiffCoreOptions) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *VDiffCoreOptions) GetChecksum() bool { + if x != nil { + return x.Checksum + } + return false +} + +func (x *VDiffCoreOptions) GetSamplePct() int64 { + if x != nil { + return x.SamplePct + } + return 0 +} + +func (x *VDiffCoreOptions) GetTimeoutSeconds() int64 { + if x != nil { + return x.TimeoutSeconds + } + return 0 +} + +func (x *VDiffCoreOptions) GetMaxExtraRowsToCompare() int64 { if x != nil { return x.MaxExtraRowsToCompare } - return 0 + return 0 +} + +func (x *VDiffCoreOptions) GetUpdateTableStats() bool { + if x != nil { + return x.UpdateTableStats + } + return false +} + +type VDiffOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PickerOptions *VDiffPickerOptions `protobuf:"bytes,1,opt,name=picker_options,json=pickerOptions,proto3" json:"picker_options,omitempty"` + CoreOptions *VDiffCoreOptions `protobuf:"bytes,2,opt,name=core_options,json=coreOptions,proto3" json:"core_options,omitempty"` + ReportOptions *VDiffReportOptions `protobuf:"bytes,3,opt,name=report_options,json=reportOptions,proto3" json:"report_options,omitempty"` +} + +func (x *VDiffOptions) Reset() { + *x = VDiffOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[107] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VDiffOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VDiffOptions) ProtoMessage() {} + +func (x *VDiffOptions) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[107] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VDiffOptions.ProtoReflect.Descriptor instead. +func (*VDiffOptions) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{107} +} + +func (x *VDiffOptions) GetPickerOptions() *VDiffPickerOptions { + if x != nil { + return x.PickerOptions + } + return nil +} + +func (x *VDiffOptions) GetCoreOptions() *VDiffCoreOptions { + if x != nil { + return x.CoreOptions + } + return nil +} + +func (x *VDiffOptions) GetReportOptions() *VDiffReportOptions { + if x != nil { + return x.ReportOptions + } + return nil +} + +type UpdateVReplicationWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,3,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,4,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + OnDdl binlogdata.OnDDLAction `protobuf:"varint,5,opt,name=on_ddl,json=onDdl,proto3,enum=binlogdata.OnDDLAction" json:"on_ddl,omitempty"` + State binlogdata.VReplicationWorkflowState `protobuf:"varint,6,opt,name=state,proto3,enum=binlogdata.VReplicationWorkflowState" json:"state,omitempty"` +} + +func (x *UpdateVReplicationWorkflowRequest) Reset() { + *x = UpdateVReplicationWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[108] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowRequest) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[108] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{108} +} + +func (x *UpdateVReplicationWorkflowRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *UpdateVReplicationWorkflowRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *UpdateVReplicationWorkflowRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *UpdateVReplicationWorkflowRequest) GetTabletSelectionPreference() TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return TabletSelectionPreference_ANY +} + +func (x *UpdateVReplicationWorkflowRequest) GetOnDdl() binlogdata.OnDDLAction { + if x != nil { + return x.OnDdl + } + return binlogdata.OnDDLAction(0) +} + +func (x *UpdateVReplicationWorkflowRequest) GetState() binlogdata.VReplicationWorkflowState { + if x != nil { + return x.State + } + return binlogdata.VReplicationWorkflowState(0) +} + +type UpdateVReplicationWorkflowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *UpdateVReplicationWorkflowResponse) Reset() { + *x = UpdateVReplicationWorkflowResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[109] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowResponse) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[109] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{109} +} + +func (x *UpdateVReplicationWorkflowResponse) GetResult() *query.QueryResult { + if x != nil { + return x.Result + } + return nil +} + +type ResetSequencesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tables []string `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"` +} + +func (x *ResetSequencesRequest) Reset() { + *x = ResetSequencesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[110] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResetSequencesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetSequencesRequest) ProtoMessage() {} + +func (x *ResetSequencesRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[110] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetSequencesRequest.ProtoReflect.Descriptor instead. +func (*ResetSequencesRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{110} +} + +func (x *ResetSequencesRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil +} + +type ResetSequencesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ResetSequencesResponse) Reset() { + *x = ResetSequencesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[111] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResetSequencesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResetSequencesResponse) ProtoMessage() {} + +func (x *ResetSequencesResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[111] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResetSequencesResponse.ProtoReflect.Descriptor instead. +func (*ResetSequencesResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{111} +} + +type CheckThrottlerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AppName string `protobuf:"bytes,1,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` +} + +func (x *CheckThrottlerRequest) Reset() { + *x = CheckThrottlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[112] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckThrottlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckThrottlerRequest) ProtoMessage() {} + +func (x *CheckThrottlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[112] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckThrottlerRequest.ProtoReflect.Descriptor instead. +func (*CheckThrottlerRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{112} +} + +func (x *CheckThrottlerRequest) GetAppName() string { + if x != nil { + return x.AppName + } + return "" +} + +type CheckThrottlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // StatusCode is HTTP compliant response code (e.g. 200 for OK) + StatusCode int32 `protobuf:"varint,1,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // Value is the metric value collected by the tablet + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // Threshold is the throttling threshold the table was comparing the value with + Threshold float64 `protobuf:"fixed64,3,opt,name=threshold,proto3" json:"threshold,omitempty"` + // Error indicates an error retrieving the value + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + // Message + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + // RecentlyChecked indicates that the tablet has been hit with a user-facing check, which can then imply + // that heartbeats lease should be renwed. + RecentlyChecked bool `protobuf:"varint,6,opt,name=recently_checked,json=recentlyChecked,proto3" json:"recently_checked,omitempty"` +} + +func (x *CheckThrottlerResponse) Reset() { + *x = CheckThrottlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[113] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckThrottlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckThrottlerResponse) ProtoMessage() {} + +func (x *CheckThrottlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[113] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckThrottlerResponse.ProtoReflect.Descriptor instead. +func (*CheckThrottlerResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{113} +} + +func (x *CheckThrottlerResponse) GetStatusCode() int32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *CheckThrottlerResponse) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *CheckThrottlerResponse) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +func (x *CheckThrottlerResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +func (x *CheckThrottlerResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" } -type VDiffOptions struct { +func (x *CheckThrottlerResponse) GetRecentlyChecked() bool { + if x != nil { + return x.RecentlyChecked + } + return false +} + +type ReadVReplicationWorkflowResponse_Stream struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PickerOptions *VDiffPickerOptions `protobuf:"bytes,1,opt,name=picker_options,json=pickerOptions,proto3" json:"picker_options,omitempty"` - CoreOptions *VDiffCoreOptions `protobuf:"bytes,2,opt,name=core_options,json=coreOptions,proto3" json:"core_options,omitempty"` - ReportOptions *VDiffReportOptions `protobuf:"bytes,3,opt,name=report_options,json=reportOptions,proto3" json:"report_options,omitempty"` -} - -func (x *VDiffOptions) Reset() { - *x = VDiffOptions{} + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Bls *binlogdata.BinlogSource `protobuf:"bytes,2,opt,name=bls,proto3" json:"bls,omitempty"` + Pos string `protobuf:"bytes,3,opt,name=pos,proto3" json:"pos,omitempty"` + StopPos string `protobuf:"bytes,4,opt,name=stop_pos,json=stopPos,proto3" json:"stop_pos,omitempty"` + MaxTps int64 `protobuf:"varint,5,opt,name=max_tps,json=maxTps,proto3" json:"max_tps,omitempty"` + MaxReplicationLag int64 `protobuf:"varint,6,opt,name=max_replication_lag,json=maxReplicationLag,proto3" json:"max_replication_lag,omitempty"` + TimeUpdated *vttime.Time `protobuf:"bytes,7,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` + TransactionTimestamp *vttime.Time `protobuf:"bytes,8,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` + State binlogdata.VReplicationWorkflowState `protobuf:"varint,9,opt,name=state,proto3,enum=binlogdata.VReplicationWorkflowState" json:"state,omitempty"` + Message string `protobuf:"bytes,10,opt,name=message,proto3" json:"message,omitempty"` + RowsCopied int64 `protobuf:"varint,11,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + TimeHeartbeat *vttime.Time `protobuf:"bytes,12,opt,name=time_heartbeat,json=timeHeartbeat,proto3" json:"time_heartbeat,omitempty"` + TimeThrottled *vttime.Time `protobuf:"bytes,13,opt,name=time_throttled,json=timeThrottled,proto3" json:"time_throttled,omitempty"` + ComponentThrottled string `protobuf:"bytes,14,opt,name=component_throttled,json=componentThrottled,proto3" json:"component_throttled,omitempty"` +} + +func (x *ReadVReplicationWorkflowResponse_Stream) Reset() { + *x = ReadVReplicationWorkflowResponse_Stream{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[101] + mi := &file_tabletmanagerdata_proto_msgTypes[117] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *VDiffOptions) String() string { +func (x *ReadVReplicationWorkflowResponse_Stream) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VDiffOptions) ProtoMessage() {} +func (*ReadVReplicationWorkflowResponse_Stream) ProtoMessage() {} -func (x *VDiffOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[101] +func (x *ReadVReplicationWorkflowResponse_Stream) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[117] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5124,488 +6071,569 @@ func (x *VDiffOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VDiffOptions.ProtoReflect.Descriptor instead. -func (*VDiffOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} +// Deprecated: Use ReadVReplicationWorkflowResponse_Stream.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowResponse_Stream) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101, 0} } -func (x *VDiffOptions) GetPickerOptions() *VDiffPickerOptions { +func (x *ReadVReplicationWorkflowResponse_Stream) GetId() int32 { if x != nil { - return x.PickerOptions + return x.Id + } + return 0 +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetBls() *binlogdata.BinlogSource { + if x != nil { + return x.Bls } return nil } -func (x *VDiffOptions) GetCoreOptions() *VDiffCoreOptions { +func (x *ReadVReplicationWorkflowResponse_Stream) GetPos() string { if x != nil { - return x.CoreOptions + return x.Pos + } + return "" +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetStopPos() string { + if x != nil { + return x.StopPos + } + return "" +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetMaxTps() int64 { + if x != nil { + return x.MaxTps + } + return 0 +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetMaxReplicationLag() int64 { + if x != nil { + return x.MaxReplicationLag + } + return 0 +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeUpdated() *vttime.Time { + if x != nil { + return x.TimeUpdated } return nil } -func (x *VDiffOptions) GetReportOptions() *VDiffReportOptions { +func (x *ReadVReplicationWorkflowResponse_Stream) GetTransactionTimestamp() *vttime.Time { if x != nil { - return x.ReportOptions + return x.TransactionTimestamp + } + return nil +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetState() binlogdata.VReplicationWorkflowState { + if x != nil { + return x.State + } + return binlogdata.VReplicationWorkflowState(0) +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetRowsCopied() int64 { + if x != nil { + return x.RowsCopied + } + return 0 +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeHeartbeat() *vttime.Time { + if x != nil { + return x.TimeHeartbeat + } + return nil +} + +func (x *ReadVReplicationWorkflowResponse_Stream) GetTimeThrottled() *vttime.Time { + if x != nil { + return x.TimeThrottled } return nil } +func (x *ReadVReplicationWorkflowResponse_Stream) GetComponentThrottled() string { + if x != nil { + return x.ComponentThrottled + } + return "" +} + var File_tabletmanagerdata_proto protoreflect.FileDescriptor var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x0a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x0b, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x0d, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xff, 0x01, 0x0a, 0x0f, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x11, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x43, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, - 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x64, - 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x77, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x6f, - 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0xa6, 0x01, 0x0a, - 0x10, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x61, 0x74, 0x61, - 0x62, 0x61, 0x73, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x4f, 0x0a, 0x11, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, - 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x12, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x48, 0x0a, 0x0d, - 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x10, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x0d, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xff, 0x01, 0x0a, + 0x0f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x11, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0a, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x72, + 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x92, + 0x01, 0x0a, 0x10, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x4f, 0x0a, 0x11, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, + 0x03, 0x10, 0x04, 0x22, 0xa6, 0x01, 0x0a, 0x12, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xf7, 0x01, 0x0a, + 0x0e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x10, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, 0x65, + 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x69, 0x76, + 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x76, 0x69, + 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x01, 0x0a, 0x0c, 0x44, 0x62, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x64, + 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x64, 0x62, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, + 0x4f, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x62, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, + 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xa3, 0x01, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x4c, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x73, + 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x75, 0x73, + 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, + 0x0e, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x62, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x62, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x27, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x28, + 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2a, 0x0a, 0x0c, 0x53, 0x6c, 0x65, 0x65, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd7, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x50, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, + 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x45, 0x6e, 0x76, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x72, 0x61, 0x45, + 0x6e, 0x76, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x72, 0x61, 0x45, 0x6e, 0x76, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x66, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x78, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x22, 0xa2, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x65, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xf7, - 0x01, 0x0a, 0x0e, 0x55, 0x73, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x73, - 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, - 0x65, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, - 0x73, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, - 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, - 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x69, - 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x01, 0x0a, 0x0c, 0x44, 0x62, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x0e, 0x0a, - 0x02, 0x64, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x64, 0x62, 0x12, 0x12, 0x0a, - 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, - 0x72, 0x12, 0x4f, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x62, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xa3, 0x01, 0x0a, 0x0b, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x4c, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x61, + 0x6e, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x55, 0x73, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, - 0x75, 0x73, 0x65, 0x72, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x46, 0x0a, 0x0e, 0x64, 0x62, 0x5f, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x62, 0x50, 0x65, - 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64, 0x62, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x27, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x22, 0x28, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2a, 0x0a, 0x0c, 0x53, 0x6c, - 0x65, 0x65, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd7, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x12, 0x50, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x65, 0x6e, 0x76, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, - 0x72, 0x61, 0x45, 0x6e, 0x76, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x45, 0x6e, 0x76, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x74, 0x72, 0x61, 0x45, 0x6e, 0x76, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x66, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, - 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, - 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x22, 0xa2, 0x01, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x65, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5a, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x65, - 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, - 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, - 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x66, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x14, - 0x0a, 0x12, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x18, 0x0a, 0x16, - 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, - 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x16, 0x50, 0x72, - 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x67, - 0x0a, 0x17, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0e, 0x63, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x46, 0x0a, - 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x5f, 0x6d, 0x6f, 0x64, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x71, 0x6c, 0x4d, 0x6f, 0x64, 0x65, - 0x22, 0xa7, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x12, 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x15, + 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x66, 0x0a, 0x11, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x14, 0x0a, 0x12, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, + 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x77, + 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x16, 0x50, 0x72, 0x65, 0x66, + 0x6c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x67, 0x0a, 0x17, + 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, - 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x14, 0x0a, 0x12, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, - 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, - 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x64, 0x22, 0x42, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, + 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x62, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x46, 0x0a, 0x0c, 0x61, + 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x71, 0x6c, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01, + 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, 0x12, + 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x55, 0x6e, 0x6c, + 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, + 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, + 0x6f, 0x77, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x42, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, - 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, - 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, - 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, - 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, - 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, - 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, - 0x77, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, - 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, - 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, - 0x77, 0x73, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, - 0x15, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, - 0x0a, 0x16, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x34, 0x0a, 0x16, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x22, + 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x18, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, - 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x16, + 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, + 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, + 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, + 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, + 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, + 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, + 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, + 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, 0x14, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, + 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, + 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, 0x19, + 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xed, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x26, + 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, + 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, + 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, 0x1b, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, + 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, + 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, + 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, + 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, - 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, - 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, - 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, - 0x64, 0x72, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, - 0x0a, 0x18, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, - 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, - 0x53, 0x79, 0x6e, 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x4e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, - 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, - 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x22, 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x22, 0x64, 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, - 0x1b, 0x0a, 0x19, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, - 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x22, 0xed, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, - 0x6e, 0x63, 0x22, 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, - 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, - 0x0a, 0x22, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, - 0x0a, 0x23, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, - 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, - 0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x88, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, - 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, - 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, - 0x73, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, + 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, + 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x88, 0x01, 0x0a, 0x18, 0x52, 0x65, + 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xc8, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, @@ -5614,62 +6642,198 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, - 0x79, 0x52, 0x75, 0x6e, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, + 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, + 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, - 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, - 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, - 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, - 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, - 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, - 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, - 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, - 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, + 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xd4, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, + 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, + 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, + 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x22, 0x50, + 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0x3f, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x22, 0x50, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x22, 0x3d, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x22, 0x94, 0x09, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x49, 0x0a, + 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, + 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, + 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, + 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x54, + 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x33, + 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, + 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, + 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, + 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, + 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, + 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, + 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, + 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, @@ -5684,11 +6848,63 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x30, 0x5a, 0x2e, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe9, 0x02, 0x0a, + 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, + 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x6f, + 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x3b, 0x0a, 0x05, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x50, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x52, 0x65, + 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, + 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, + 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, + 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x65, 0x64, 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, + 0x4f, 0x52, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x03, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, + 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5703,173 +6919,217 @@ func file_tabletmanagerdata_proto_rawDescGZIP() []byte { return file_tabletmanagerdata_proto_rawDescData } -var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 105) +var file_tabletmanagerdata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 118) var file_tabletmanagerdata_proto_goTypes = []interface{}{ - (*TableDefinition)(nil), // 0: tabletmanagerdata.TableDefinition - (*SchemaDefinition)(nil), // 1: tabletmanagerdata.SchemaDefinition - (*SchemaChangeResult)(nil), // 2: tabletmanagerdata.SchemaChangeResult - (*UserPermission)(nil), // 3: tabletmanagerdata.UserPermission - (*DbPermission)(nil), // 4: tabletmanagerdata.DbPermission - (*Permissions)(nil), // 5: tabletmanagerdata.Permissions - (*PingRequest)(nil), // 6: tabletmanagerdata.PingRequest - (*PingResponse)(nil), // 7: tabletmanagerdata.PingResponse - (*SleepRequest)(nil), // 8: tabletmanagerdata.SleepRequest - (*SleepResponse)(nil), // 9: tabletmanagerdata.SleepResponse - (*ExecuteHookRequest)(nil), // 10: tabletmanagerdata.ExecuteHookRequest - (*ExecuteHookResponse)(nil), // 11: tabletmanagerdata.ExecuteHookResponse - (*GetSchemaRequest)(nil), // 12: tabletmanagerdata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 13: tabletmanagerdata.GetSchemaResponse - (*GetPermissionsRequest)(nil), // 14: tabletmanagerdata.GetPermissionsRequest - (*GetPermissionsResponse)(nil), // 15: tabletmanagerdata.GetPermissionsResponse - (*SetReadOnlyRequest)(nil), // 16: tabletmanagerdata.SetReadOnlyRequest - (*SetReadOnlyResponse)(nil), // 17: tabletmanagerdata.SetReadOnlyResponse - (*SetReadWriteRequest)(nil), // 18: tabletmanagerdata.SetReadWriteRequest - (*SetReadWriteResponse)(nil), // 19: tabletmanagerdata.SetReadWriteResponse - (*ChangeTypeRequest)(nil), // 20: tabletmanagerdata.ChangeTypeRequest - (*ChangeTypeResponse)(nil), // 21: tabletmanagerdata.ChangeTypeResponse - (*RefreshStateRequest)(nil), // 22: tabletmanagerdata.RefreshStateRequest - (*RefreshStateResponse)(nil), // 23: tabletmanagerdata.RefreshStateResponse - (*RunHealthCheckRequest)(nil), // 24: tabletmanagerdata.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 25: tabletmanagerdata.RunHealthCheckResponse - (*ReloadSchemaRequest)(nil), // 26: tabletmanagerdata.ReloadSchemaRequest - (*ReloadSchemaResponse)(nil), // 27: tabletmanagerdata.ReloadSchemaResponse - (*PreflightSchemaRequest)(nil), // 28: tabletmanagerdata.PreflightSchemaRequest - (*PreflightSchemaResponse)(nil), // 29: tabletmanagerdata.PreflightSchemaResponse - (*ApplySchemaRequest)(nil), // 30: tabletmanagerdata.ApplySchemaRequest - (*ApplySchemaResponse)(nil), // 31: tabletmanagerdata.ApplySchemaResponse - (*LockTablesRequest)(nil), // 32: tabletmanagerdata.LockTablesRequest - (*LockTablesResponse)(nil), // 33: tabletmanagerdata.LockTablesResponse - (*UnlockTablesRequest)(nil), // 34: tabletmanagerdata.UnlockTablesRequest - (*UnlockTablesResponse)(nil), // 35: tabletmanagerdata.UnlockTablesResponse - (*ExecuteQueryRequest)(nil), // 36: tabletmanagerdata.ExecuteQueryRequest - (*ExecuteQueryResponse)(nil), // 37: tabletmanagerdata.ExecuteQueryResponse - (*ExecuteFetchAsDbaRequest)(nil), // 38: tabletmanagerdata.ExecuteFetchAsDbaRequest - (*ExecuteFetchAsDbaResponse)(nil), // 39: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*ExecuteFetchAsAllPrivsRequest)(nil), // 40: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*ExecuteFetchAsAllPrivsResponse)(nil), // 41: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*ExecuteFetchAsAppRequest)(nil), // 42: tabletmanagerdata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAppResponse - (*ReplicationStatusRequest)(nil), // 44: tabletmanagerdata.ReplicationStatusRequest - (*ReplicationStatusResponse)(nil), // 45: tabletmanagerdata.ReplicationStatusResponse - (*PrimaryStatusRequest)(nil), // 46: tabletmanagerdata.PrimaryStatusRequest - (*PrimaryStatusResponse)(nil), // 47: tabletmanagerdata.PrimaryStatusResponse - (*PrimaryPositionRequest)(nil), // 48: tabletmanagerdata.PrimaryPositionRequest - (*PrimaryPositionResponse)(nil), // 49: tabletmanagerdata.PrimaryPositionResponse - (*WaitForPositionRequest)(nil), // 50: tabletmanagerdata.WaitForPositionRequest - (*WaitForPositionResponse)(nil), // 51: tabletmanagerdata.WaitForPositionResponse - (*StopReplicationRequest)(nil), // 52: tabletmanagerdata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 53: tabletmanagerdata.StopReplicationResponse - (*StopReplicationMinimumRequest)(nil), // 54: tabletmanagerdata.StopReplicationMinimumRequest - (*StopReplicationMinimumResponse)(nil), // 55: tabletmanagerdata.StopReplicationMinimumResponse - (*StartReplicationRequest)(nil), // 56: tabletmanagerdata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 57: tabletmanagerdata.StartReplicationResponse - (*StartReplicationUntilAfterRequest)(nil), // 58: tabletmanagerdata.StartReplicationUntilAfterRequest - (*StartReplicationUntilAfterResponse)(nil), // 59: tabletmanagerdata.StartReplicationUntilAfterResponse - (*GetReplicasRequest)(nil), // 60: tabletmanagerdata.GetReplicasRequest - (*GetReplicasResponse)(nil), // 61: tabletmanagerdata.GetReplicasResponse - (*ResetReplicationRequest)(nil), // 62: tabletmanagerdata.ResetReplicationRequest - (*ResetReplicationResponse)(nil), // 63: tabletmanagerdata.ResetReplicationResponse - (*VReplicationExecRequest)(nil), // 64: tabletmanagerdata.VReplicationExecRequest - (*VReplicationExecResponse)(nil), // 65: tabletmanagerdata.VReplicationExecResponse - (*VReplicationWaitForPosRequest)(nil), // 66: tabletmanagerdata.VReplicationWaitForPosRequest - (*VReplicationWaitForPosResponse)(nil), // 67: tabletmanagerdata.VReplicationWaitForPosResponse - (*InitPrimaryRequest)(nil), // 68: tabletmanagerdata.InitPrimaryRequest - (*InitPrimaryResponse)(nil), // 69: tabletmanagerdata.InitPrimaryResponse - (*PopulateReparentJournalRequest)(nil), // 70: tabletmanagerdata.PopulateReparentJournalRequest - (*PopulateReparentJournalResponse)(nil), // 71: tabletmanagerdata.PopulateReparentJournalResponse - (*InitReplicaRequest)(nil), // 72: tabletmanagerdata.InitReplicaRequest - (*InitReplicaResponse)(nil), // 73: tabletmanagerdata.InitReplicaResponse - (*DemotePrimaryRequest)(nil), // 74: tabletmanagerdata.DemotePrimaryRequest - (*DemotePrimaryResponse)(nil), // 75: tabletmanagerdata.DemotePrimaryResponse - (*UndoDemotePrimaryRequest)(nil), // 76: tabletmanagerdata.UndoDemotePrimaryRequest - (*UndoDemotePrimaryResponse)(nil), // 77: tabletmanagerdata.UndoDemotePrimaryResponse - (*ReplicaWasPromotedRequest)(nil), // 78: tabletmanagerdata.ReplicaWasPromotedRequest - (*ReplicaWasPromotedResponse)(nil), // 79: tabletmanagerdata.ReplicaWasPromotedResponse - (*ResetReplicationParametersRequest)(nil), // 80: tabletmanagerdata.ResetReplicationParametersRequest - (*ResetReplicationParametersResponse)(nil), // 81: tabletmanagerdata.ResetReplicationParametersResponse - (*FullStatusRequest)(nil), // 82: tabletmanagerdata.FullStatusRequest - (*FullStatusResponse)(nil), // 83: tabletmanagerdata.FullStatusResponse - (*SetReplicationSourceRequest)(nil), // 84: tabletmanagerdata.SetReplicationSourceRequest - (*SetReplicationSourceResponse)(nil), // 85: tabletmanagerdata.SetReplicationSourceResponse - (*ReplicaWasRestartedRequest)(nil), // 86: tabletmanagerdata.ReplicaWasRestartedRequest - (*ReplicaWasRestartedResponse)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedResponse - (*StopReplicationAndGetStatusRequest)(nil), // 88: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*StopReplicationAndGetStatusResponse)(nil), // 89: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*PromoteReplicaRequest)(nil), // 90: tabletmanagerdata.PromoteReplicaRequest - (*PromoteReplicaResponse)(nil), // 91: tabletmanagerdata.PromoteReplicaResponse - (*BackupRequest)(nil), // 92: tabletmanagerdata.BackupRequest - (*BackupResponse)(nil), // 93: tabletmanagerdata.BackupResponse - (*RestoreFromBackupRequest)(nil), // 94: tabletmanagerdata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 95: tabletmanagerdata.RestoreFromBackupResponse - (*VDiffRequest)(nil), // 96: tabletmanagerdata.VDiffRequest - (*VDiffResponse)(nil), // 97: tabletmanagerdata.VDiffResponse - (*VDiffPickerOptions)(nil), // 98: tabletmanagerdata.VDiffPickerOptions - (*VDiffReportOptions)(nil), // 99: tabletmanagerdata.VDiffReportOptions - (*VDiffCoreOptions)(nil), // 100: tabletmanagerdata.VDiffCoreOptions - (*VDiffOptions)(nil), // 101: tabletmanagerdata.VDiffOptions - nil, // 102: tabletmanagerdata.UserPermission.PrivilegesEntry - nil, // 103: tabletmanagerdata.DbPermission.PrivilegesEntry - nil, // 104: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry - (*query.Field)(nil), // 105: query.Field - (topodata.TabletType)(0), // 106: topodata.TabletType - (*vtrpc.CallerID)(nil), // 107: vtrpc.CallerID - (*query.QueryResult)(nil), // 108: query.QueryResult - (*replicationdata.Status)(nil), // 109: replicationdata.Status - (*replicationdata.PrimaryStatus)(nil), // 110: replicationdata.PrimaryStatus - (*topodata.TabletAlias)(nil), // 111: topodata.TabletAlias - (*replicationdata.FullStatus)(nil), // 112: replicationdata.FullStatus - (replicationdata.StopReplicationMode)(0), // 113: replicationdata.StopReplicationMode - (*replicationdata.StopReplicationStatus)(nil), // 114: replicationdata.StopReplicationStatus - (*logutil.Event)(nil), // 115: logutil.Event - (*vttime.Time)(nil), // 116: vttime.Time + (TabletSelectionPreference)(0), // 0: tabletmanagerdata.TabletSelectionPreference + (*TableDefinition)(nil), // 1: tabletmanagerdata.TableDefinition + (*SchemaDefinition)(nil), // 2: tabletmanagerdata.SchemaDefinition + (*SchemaChangeResult)(nil), // 3: tabletmanagerdata.SchemaChangeResult + (*UserPermission)(nil), // 4: tabletmanagerdata.UserPermission + (*DbPermission)(nil), // 5: tabletmanagerdata.DbPermission + (*Permissions)(nil), // 6: tabletmanagerdata.Permissions + (*PingRequest)(nil), // 7: tabletmanagerdata.PingRequest + (*PingResponse)(nil), // 8: tabletmanagerdata.PingResponse + (*SleepRequest)(nil), // 9: tabletmanagerdata.SleepRequest + (*SleepResponse)(nil), // 10: tabletmanagerdata.SleepResponse + (*ExecuteHookRequest)(nil), // 11: tabletmanagerdata.ExecuteHookRequest + (*ExecuteHookResponse)(nil), // 12: tabletmanagerdata.ExecuteHookResponse + (*GetSchemaRequest)(nil), // 13: tabletmanagerdata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 14: tabletmanagerdata.GetSchemaResponse + (*GetPermissionsRequest)(nil), // 15: tabletmanagerdata.GetPermissionsRequest + (*GetPermissionsResponse)(nil), // 16: tabletmanagerdata.GetPermissionsResponse + (*SetReadOnlyRequest)(nil), // 17: tabletmanagerdata.SetReadOnlyRequest + (*SetReadOnlyResponse)(nil), // 18: tabletmanagerdata.SetReadOnlyResponse + (*SetReadWriteRequest)(nil), // 19: tabletmanagerdata.SetReadWriteRequest + (*SetReadWriteResponse)(nil), // 20: tabletmanagerdata.SetReadWriteResponse + (*ChangeTypeRequest)(nil), // 21: tabletmanagerdata.ChangeTypeRequest + (*ChangeTypeResponse)(nil), // 22: tabletmanagerdata.ChangeTypeResponse + (*RefreshStateRequest)(nil), // 23: tabletmanagerdata.RefreshStateRequest + (*RefreshStateResponse)(nil), // 24: tabletmanagerdata.RefreshStateResponse + (*RunHealthCheckRequest)(nil), // 25: tabletmanagerdata.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 26: tabletmanagerdata.RunHealthCheckResponse + (*ReloadSchemaRequest)(nil), // 27: tabletmanagerdata.ReloadSchemaRequest + (*ReloadSchemaResponse)(nil), // 28: tabletmanagerdata.ReloadSchemaResponse + (*PreflightSchemaRequest)(nil), // 29: tabletmanagerdata.PreflightSchemaRequest + (*PreflightSchemaResponse)(nil), // 30: tabletmanagerdata.PreflightSchemaResponse + (*ApplySchemaRequest)(nil), // 31: tabletmanagerdata.ApplySchemaRequest + (*ApplySchemaResponse)(nil), // 32: tabletmanagerdata.ApplySchemaResponse + (*LockTablesRequest)(nil), // 33: tabletmanagerdata.LockTablesRequest + (*LockTablesResponse)(nil), // 34: tabletmanagerdata.LockTablesResponse + (*UnlockTablesRequest)(nil), // 35: tabletmanagerdata.UnlockTablesRequest + (*UnlockTablesResponse)(nil), // 36: tabletmanagerdata.UnlockTablesResponse + (*ExecuteQueryRequest)(nil), // 37: tabletmanagerdata.ExecuteQueryRequest + (*ExecuteQueryResponse)(nil), // 38: tabletmanagerdata.ExecuteQueryResponse + (*ExecuteFetchAsDbaRequest)(nil), // 39: tabletmanagerdata.ExecuteFetchAsDbaRequest + (*ExecuteFetchAsDbaResponse)(nil), // 40: tabletmanagerdata.ExecuteFetchAsDbaResponse + (*ExecuteFetchAsAllPrivsRequest)(nil), // 41: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*ExecuteFetchAsAllPrivsResponse)(nil), // 42: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*ExecuteFetchAsAppRequest)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 44: tabletmanagerdata.ExecuteFetchAsAppResponse + (*ReplicationStatusRequest)(nil), // 45: tabletmanagerdata.ReplicationStatusRequest + (*ReplicationStatusResponse)(nil), // 46: tabletmanagerdata.ReplicationStatusResponse + (*PrimaryStatusRequest)(nil), // 47: tabletmanagerdata.PrimaryStatusRequest + (*PrimaryStatusResponse)(nil), // 48: tabletmanagerdata.PrimaryStatusResponse + (*PrimaryPositionRequest)(nil), // 49: tabletmanagerdata.PrimaryPositionRequest + (*PrimaryPositionResponse)(nil), // 50: tabletmanagerdata.PrimaryPositionResponse + (*WaitForPositionRequest)(nil), // 51: tabletmanagerdata.WaitForPositionRequest + (*WaitForPositionResponse)(nil), // 52: tabletmanagerdata.WaitForPositionResponse + (*StopReplicationRequest)(nil), // 53: tabletmanagerdata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 54: tabletmanagerdata.StopReplicationResponse + (*StopReplicationMinimumRequest)(nil), // 55: tabletmanagerdata.StopReplicationMinimumRequest + (*StopReplicationMinimumResponse)(nil), // 56: tabletmanagerdata.StopReplicationMinimumResponse + (*StartReplicationRequest)(nil), // 57: tabletmanagerdata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 58: tabletmanagerdata.StartReplicationResponse + (*StartReplicationUntilAfterRequest)(nil), // 59: tabletmanagerdata.StartReplicationUntilAfterRequest + (*StartReplicationUntilAfterResponse)(nil), // 60: tabletmanagerdata.StartReplicationUntilAfterResponse + (*GetReplicasRequest)(nil), // 61: tabletmanagerdata.GetReplicasRequest + (*GetReplicasResponse)(nil), // 62: tabletmanagerdata.GetReplicasResponse + (*ResetReplicationRequest)(nil), // 63: tabletmanagerdata.ResetReplicationRequest + (*ResetReplicationResponse)(nil), // 64: tabletmanagerdata.ResetReplicationResponse + (*VReplicationExecRequest)(nil), // 65: tabletmanagerdata.VReplicationExecRequest + (*VReplicationExecResponse)(nil), // 66: tabletmanagerdata.VReplicationExecResponse + (*VReplicationWaitForPosRequest)(nil), // 67: tabletmanagerdata.VReplicationWaitForPosRequest + (*VReplicationWaitForPosResponse)(nil), // 68: tabletmanagerdata.VReplicationWaitForPosResponse + (*InitPrimaryRequest)(nil), // 69: tabletmanagerdata.InitPrimaryRequest + (*InitPrimaryResponse)(nil), // 70: tabletmanagerdata.InitPrimaryResponse + (*PopulateReparentJournalRequest)(nil), // 71: tabletmanagerdata.PopulateReparentJournalRequest + (*PopulateReparentJournalResponse)(nil), // 72: tabletmanagerdata.PopulateReparentJournalResponse + (*InitReplicaRequest)(nil), // 73: tabletmanagerdata.InitReplicaRequest + (*InitReplicaResponse)(nil), // 74: tabletmanagerdata.InitReplicaResponse + (*DemotePrimaryRequest)(nil), // 75: tabletmanagerdata.DemotePrimaryRequest + (*DemotePrimaryResponse)(nil), // 76: tabletmanagerdata.DemotePrimaryResponse + (*UndoDemotePrimaryRequest)(nil), // 77: tabletmanagerdata.UndoDemotePrimaryRequest + (*UndoDemotePrimaryResponse)(nil), // 78: tabletmanagerdata.UndoDemotePrimaryResponse + (*ReplicaWasPromotedRequest)(nil), // 79: tabletmanagerdata.ReplicaWasPromotedRequest + (*ReplicaWasPromotedResponse)(nil), // 80: tabletmanagerdata.ReplicaWasPromotedResponse + (*ResetReplicationParametersRequest)(nil), // 81: tabletmanagerdata.ResetReplicationParametersRequest + (*ResetReplicationParametersResponse)(nil), // 82: tabletmanagerdata.ResetReplicationParametersResponse + (*FullStatusRequest)(nil), // 83: tabletmanagerdata.FullStatusRequest + (*FullStatusResponse)(nil), // 84: tabletmanagerdata.FullStatusResponse + (*SetReplicationSourceRequest)(nil), // 85: tabletmanagerdata.SetReplicationSourceRequest + (*SetReplicationSourceResponse)(nil), // 86: tabletmanagerdata.SetReplicationSourceResponse + (*ReplicaWasRestartedRequest)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedRequest + (*ReplicaWasRestartedResponse)(nil), // 88: tabletmanagerdata.ReplicaWasRestartedResponse + (*StopReplicationAndGetStatusRequest)(nil), // 89: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*StopReplicationAndGetStatusResponse)(nil), // 90: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*PromoteReplicaRequest)(nil), // 91: tabletmanagerdata.PromoteReplicaRequest + (*PromoteReplicaResponse)(nil), // 92: tabletmanagerdata.PromoteReplicaResponse + (*BackupRequest)(nil), // 93: tabletmanagerdata.BackupRequest + (*BackupResponse)(nil), // 94: tabletmanagerdata.BackupResponse + (*RestoreFromBackupRequest)(nil), // 95: tabletmanagerdata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 96: tabletmanagerdata.RestoreFromBackupResponse + (*CreateVReplicationWorkflowRequest)(nil), // 97: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*CreateVReplicationWorkflowResponse)(nil), // 98: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*DeleteVReplicationWorkflowRequest)(nil), // 99: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*DeleteVReplicationWorkflowResponse)(nil), // 100: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*ReadVReplicationWorkflowRequest)(nil), // 101: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*ReadVReplicationWorkflowResponse)(nil), // 102: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*VDiffRequest)(nil), // 103: tabletmanagerdata.VDiffRequest + (*VDiffResponse)(nil), // 104: tabletmanagerdata.VDiffResponse + (*VDiffPickerOptions)(nil), // 105: tabletmanagerdata.VDiffPickerOptions + (*VDiffReportOptions)(nil), // 106: tabletmanagerdata.VDiffReportOptions + (*VDiffCoreOptions)(nil), // 107: tabletmanagerdata.VDiffCoreOptions + (*VDiffOptions)(nil), // 108: tabletmanagerdata.VDiffOptions + (*UpdateVReplicationWorkflowRequest)(nil), // 109: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*UpdateVReplicationWorkflowResponse)(nil), // 110: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*ResetSequencesRequest)(nil), // 111: tabletmanagerdata.ResetSequencesRequest + (*ResetSequencesResponse)(nil), // 112: tabletmanagerdata.ResetSequencesResponse + (*CheckThrottlerRequest)(nil), // 113: tabletmanagerdata.CheckThrottlerRequest + (*CheckThrottlerResponse)(nil), // 114: tabletmanagerdata.CheckThrottlerResponse + nil, // 115: tabletmanagerdata.UserPermission.PrivilegesEntry + nil, // 116: tabletmanagerdata.DbPermission.PrivilegesEntry + nil, // 117: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + (*ReadVReplicationWorkflowResponse_Stream)(nil), // 118: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + (*query.Field)(nil), // 119: query.Field + (topodata.TabletType)(0), // 120: topodata.TabletType + (*vtrpc.CallerID)(nil), // 121: vtrpc.CallerID + (*query.QueryResult)(nil), // 122: query.QueryResult + (*replicationdata.Status)(nil), // 123: replicationdata.Status + (*replicationdata.PrimaryStatus)(nil), // 124: replicationdata.PrimaryStatus + (*topodata.TabletAlias)(nil), // 125: topodata.TabletAlias + (*replicationdata.FullStatus)(nil), // 126: replicationdata.FullStatus + (replicationdata.StopReplicationMode)(0), // 127: replicationdata.StopReplicationMode + (*replicationdata.StopReplicationStatus)(nil), // 128: replicationdata.StopReplicationStatus + (*logutil.Event)(nil), // 129: logutil.Event + (*vttime.Time)(nil), // 130: vttime.Time + (*binlogdata.BinlogSource)(nil), // 131: binlogdata.BinlogSource + (binlogdata.VReplicationWorkflowType)(0), // 132: binlogdata.VReplicationWorkflowType + (binlogdata.VReplicationWorkflowSubType)(0), // 133: binlogdata.VReplicationWorkflowSubType + (binlogdata.OnDDLAction)(0), // 134: binlogdata.OnDDLAction + (binlogdata.VReplicationWorkflowState)(0), // 135: binlogdata.VReplicationWorkflowState } var file_tabletmanagerdata_proto_depIdxs = []int32{ - 105, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field - 0, // 1: tabletmanagerdata.SchemaDefinition.table_definitions:type_name -> tabletmanagerdata.TableDefinition - 1, // 2: tabletmanagerdata.SchemaChangeResult.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 3: tabletmanagerdata.SchemaChangeResult.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 102, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry - 103, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry - 3, // 6: tabletmanagerdata.Permissions.user_permissions:type_name -> tabletmanagerdata.UserPermission - 4, // 7: tabletmanagerdata.Permissions.db_permissions:type_name -> tabletmanagerdata.DbPermission - 104, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry - 1, // 9: tabletmanagerdata.GetSchemaResponse.schema_definition:type_name -> tabletmanagerdata.SchemaDefinition - 5, // 10: tabletmanagerdata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 106, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType - 2, // 12: tabletmanagerdata.PreflightSchemaResponse.change_results:type_name -> tabletmanagerdata.SchemaChangeResult - 1, // 13: tabletmanagerdata.ApplySchemaRequest.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 14: tabletmanagerdata.ApplySchemaRequest.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 15: tabletmanagerdata.ApplySchemaResponse.before_schema:type_name -> tabletmanagerdata.SchemaDefinition - 1, // 16: tabletmanagerdata.ApplySchemaResponse.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 107, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID - 108, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult - 108, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult - 108, // 20: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult - 108, // 21: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 109, // 22: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status - 110, // 23: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus - 108, // 24: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult - 111, // 25: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias - 111, // 26: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias - 110, // 27: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus - 112, // 28: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus - 111, // 29: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias - 111, // 30: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias - 113, // 31: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode - 114, // 32: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus - 115, // 33: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event - 116, // 34: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 115, // 35: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 101, // 36: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions - 108, // 37: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult - 98, // 38: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions - 100, // 39: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions - 99, // 40: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions - 41, // [41:41] is the sub-list for method output_type - 41, // [41:41] is the sub-list for method input_type - 41, // [41:41] is the sub-list for extension type_name - 41, // [41:41] is the sub-list for extension extendee - 0, // [0:41] is the sub-list for field type_name + 119, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field + 1, // 1: tabletmanagerdata.SchemaDefinition.table_definitions:type_name -> tabletmanagerdata.TableDefinition + 2, // 2: tabletmanagerdata.SchemaChangeResult.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 3: tabletmanagerdata.SchemaChangeResult.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 115, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry + 116, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry + 4, // 6: tabletmanagerdata.Permissions.user_permissions:type_name -> tabletmanagerdata.UserPermission + 5, // 7: tabletmanagerdata.Permissions.db_permissions:type_name -> tabletmanagerdata.DbPermission + 117, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + 2, // 9: tabletmanagerdata.GetSchemaResponse.schema_definition:type_name -> tabletmanagerdata.SchemaDefinition + 6, // 10: tabletmanagerdata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions + 120, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType + 3, // 12: tabletmanagerdata.PreflightSchemaResponse.change_results:type_name -> tabletmanagerdata.SchemaChangeResult + 2, // 13: tabletmanagerdata.ApplySchemaRequest.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 14: tabletmanagerdata.ApplySchemaRequest.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 15: tabletmanagerdata.ApplySchemaResponse.before_schema:type_name -> tabletmanagerdata.SchemaDefinition + 2, // 16: tabletmanagerdata.ApplySchemaResponse.after_schema:type_name -> tabletmanagerdata.SchemaDefinition + 121, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID + 122, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult + 122, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult + 122, // 20: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult + 122, // 21: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 123, // 22: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status + 124, // 23: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus + 122, // 24: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult + 125, // 25: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias + 125, // 26: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias + 124, // 27: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus + 126, // 28: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus + 125, // 29: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias + 125, // 30: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias + 127, // 31: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode + 128, // 32: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus + 129, // 33: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event + 130, // 34: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 130, // 35: tabletmanagerdata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 129, // 36: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 131, // 37: tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source:type_name -> binlogdata.BinlogSource + 120, // 38: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 39: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 132, // 40: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 133, // 41: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 122, // 42: tabletmanagerdata.CreateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 122, // 43: tabletmanagerdata.DeleteVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 120, // 44: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_types:type_name -> topodata.TabletType + 0, // 45: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 132, // 46: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 133, // 47: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 118, // 48: tabletmanagerdata.ReadVReplicationWorkflowResponse.streams:type_name -> tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + 108, // 49: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions + 122, // 50: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult + 105, // 51: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions + 107, // 52: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions + 106, // 53: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions + 120, // 54: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 55: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 134, // 56: tabletmanagerdata.UpdateVReplicationWorkflowRequest.on_ddl:type_name -> binlogdata.OnDDLAction + 135, // 57: tabletmanagerdata.UpdateVReplicationWorkflowRequest.state:type_name -> binlogdata.VReplicationWorkflowState + 122, // 58: tabletmanagerdata.UpdateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 131, // 59: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.bls:type_name -> binlogdata.BinlogSource + 130, // 60: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_updated:type_name -> vttime.Time + 130, // 61: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.transaction_timestamp:type_name -> vttime.Time + 135, // 62: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.state:type_name -> binlogdata.VReplicationWorkflowState + 130, // 63: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_heartbeat:type_name -> vttime.Time + 130, // 64: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_throttled:type_name -> vttime.Time + 65, // [65:65] is the sub-list for method output_type + 65, // [65:65] is the sub-list for method input_type + 65, // [65:65] is the sub-list for extension type_name + 65, // [65:65] is the sub-list for extension extendee + 0, // [0:65] is the sub-list for field type_name } func init() { file_tabletmanagerdata_proto_init() } @@ -7031,7 +8291,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffRequest); i { + switch v := v.(*CreateVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7043,7 +8303,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffResponse); i { + switch v := v.(*CreateVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -7055,7 +8315,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffPickerOptions); i { + switch v := v.(*DeleteVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7067,7 +8327,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffReportOptions); i { + switch v := v.(*DeleteVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -7079,7 +8339,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffCoreOptions); i { + switch v := v.(*ReadVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -7091,6 +8351,78 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVReplicationWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffPickerOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffReportOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VDiffCoreOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffOptions); i { case 0: return &v.state @@ -7102,19 +8434,104 @@ func file_tabletmanagerdata_proto_init() { return nil } } + file_tabletmanagerdata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadVReplicationWorkflowResponse_Stream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tabletmanagerdata_proto_rawDesc, - NumEnums: 0, - NumMessages: 105, + NumEnums: 1, + NumMessages: 118, NumExtensions: 0, NumServices: 0, }, GoTypes: file_tabletmanagerdata_proto_goTypes, DependencyIndexes: file_tabletmanagerdata_proto_depIdxs, + EnumInfos: file_tabletmanagerdata_proto_enumTypes, MessageInfos: file_tabletmanagerdata_proto_msgTypes, }.Build() File_tabletmanagerdata_proto = out.File diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go index 3a1c46f657d..502a4c17ff9 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go @@ -1,14 +1,18 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: tabletmanagerdata.proto package tabletmanagerdata import ( + binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" + math "math" bits "math/bits" + binlogdata "vitess.io/vitess/go/vt/proto/binlogdata" logutil "vitess.io/vitess/go/vt/proto/logutil" query "vitess.io/vitess/go/vt/proto/query" replicationdata "vitess.io/vitess/go/vt/proto/replicationdata" @@ -24,2290 +28,2242 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *TableDefinition) MarshalVT() (dAtA []byte, err error) { +func (m *TableDefinition) CloneVT() *TableDefinition { if m == nil { - return nil, nil + return (*TableDefinition)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &TableDefinition{ + Name: m.Name, + Schema: m.Schema, + Type: m.Type, + DataLength: m.DataLength, + RowCount: m.RowCount, } - return dAtA[:n], nil -} - -func (m *TableDefinition) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *TableDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Columns = tmpContainer } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if rhs := m.PrimaryKeyColumns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.PrimaryKeyColumns = tmpContainer } - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x42 + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.Fields = tmpContainer } - if m.RowCount != 0 { - i = encodeVarint(dAtA, i, uint64(m.RowCount)) - i-- - dAtA[i] = 0x38 - } - if m.DataLength != 0 { - i = encodeVarint(dAtA, i, uint64(m.DataLength)) - i-- - dAtA[i] = 0x30 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarint(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *TableDefinition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SchemaDefinition) CloneVT() *SchemaDefinition { + if m == nil { + return (*SchemaDefinition)(nil) } - if len(m.PrimaryKeyColumns) > 0 { - for iNdEx := len(m.PrimaryKeyColumns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PrimaryKeyColumns[iNdEx]) - copy(dAtA[i:], m.PrimaryKeyColumns[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.PrimaryKeyColumns[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + r := &SchemaDefinition{ + DatabaseSchema: m.DatabaseSchema, } - if len(m.Columns) > 0 { - for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Columns[iNdEx]) - copy(dAtA[i:], m.Columns[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Columns[iNdEx]))) - i-- - dAtA[i] = 0x1a + if rhs := m.TableDefinitions; rhs != nil { + tmpContainer := make([]*TableDefinition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.TableDefinitions = tmpContainer } - if len(m.Schema) > 0 { - i -= len(m.Schema) - copy(dAtA[i:], m.Schema) - i = encodeVarint(dAtA, i, uint64(len(m.Schema))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SchemaDefinition) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaDefinition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SchemaChangeResult) CloneVT() *SchemaChangeResult { if m == nil { - return nil, nil + return (*SchemaChangeResult)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SchemaChangeResult{ + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SchemaDefinition) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SchemaChangeResult) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SchemaDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UserPermission) CloneVT() *UserPermission { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*UserPermission)(nil) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarint(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a + r := &UserPermission{ + Host: m.Host, + User: m.User, + PasswordChecksum: m.PasswordChecksum, } - if len(m.TableDefinitions) > 0 { - for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TableDefinitions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + if rhs := m.Privileges; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.Privileges = tmpContainer } - if len(m.DatabaseSchema) > 0 { - i -= len(m.DatabaseSchema) - copy(dAtA[i:], m.DatabaseSchema) - i = encodeVarint(dAtA, i, uint64(len(m.DatabaseSchema))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SchemaChangeResult) MarshalVT() (dAtA []byte, err error) { +func (m *UserPermission) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DbPermission) CloneVT() *DbPermission { if m == nil { - return nil, nil + return (*DbPermission)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DbPermission{ + Host: m.Host, + Db: m.Db, + User: m.User, } - return dAtA[:n], nil + if rhs := m.Privileges; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Privileges = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SchemaChangeResult) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DbPermission) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SchemaChangeResult) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Permissions) CloneVT() *Permissions { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*Permissions)(nil) } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &Permissions{} + if rhs := m.UserPermissions; rhs != nil { + tmpContainer := make([]*UserPermission, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.UserPermissions = tmpContainer } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.DbPermissions; rhs != nil { + tmpContainer := make([]*DbPermission, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.DbPermissions = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UserPermission) MarshalVT() (dAtA []byte, err error) { +func (m *Permissions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingRequest) CloneVT() *PingRequest { if m == nil { - return nil, nil + return (*PingRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PingRequest{ + Payload: m.Payload, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UserPermission) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PingRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UserPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingResponse) CloneVT() *PingResponse { if m == nil { - return 0, nil + return (*PingResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PingResponse{ + Payload: m.Payload, } - if len(m.Privileges) > 0 { - for k := range m.Privileges { - v := m.Privileges[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.PasswordChecksum != 0 { - i = encodeVarint(dAtA, i, uint64(m.PasswordChecksum)) - i-- - dAtA[i] = 0x18 + return r +} + +func (m *PingResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SleepRequest) CloneVT() *SleepRequest { + if m == nil { + return (*SleepRequest)(nil) } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarint(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x12 + r := &SleepRequest{ + Duration: m.Duration, } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarint(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DbPermission) MarshalVT() (dAtA []byte, err error) { +func (m *SleepRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SleepResponse) CloneVT() *SleepResponse { if m == nil { - return nil, nil + return (*SleepResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SleepResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *DbPermission) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SleepResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DbPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) CloneVT() *ExecuteHookRequest { if m == nil { - return 0, nil + return (*ExecuteHookRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteHookRequest{ + Name: m.Name, } - if len(m.Privileges) > 0 { - for k := range m.Privileges { - v := m.Privileges[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 + if rhs := m.Parameters; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Parameters = tmpContainer + } + if rhs := m.ExtraEnv; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.ExtraEnv = tmpContainer } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarint(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Db) > 0 { - i -= len(m.Db) - copy(dAtA[i:], m.Db) - i = encodeVarint(dAtA, i, uint64(len(m.Db))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *ExecuteHookRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteHookResponse) CloneVT() *ExecuteHookResponse { + if m == nil { + return (*ExecuteHookResponse)(nil) } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarint(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa + r := &ExecuteHookResponse{ + ExitStatus: m.ExitStatus, + Stdout: m.Stdout, + Stderr: m.Stderr, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Permissions) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { if m == nil { - return nil, nil + return (*GetSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSchemaRequest{ + IncludeViews: m.IncludeViews, + TableSchemaOnly: m.TableSchemaOnly, } - return dAtA[:n], nil + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Permissions) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Permissions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*GetSchemaResponse)(nil) } - if len(m.DbPermissions) > 0 { - for iNdEx := len(m.DbPermissions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.DbPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } + r := &GetSchemaResponse{ + SchemaDefinition: m.SchemaDefinition.CloneVT(), } - if len(m.UserPermissions) > 0 { - for iNdEx := len(m.UserPermissions) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.UserPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PingRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetPermissionsRequest) CloneVT() *GetPermissionsRequest { if m == nil { - return nil, nil + return (*GetPermissionsRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetPermissionsRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *PingRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetPermissionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) CloneVT() *GetPermissionsResponse { if m == nil { - return 0, nil + return (*GetPermissionsResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetPermissionsResponse{ + Permissions: m.Permissions.CloneVT(), } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarint(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PingResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyRequest) CloneVT() *SetReadOnlyRequest { if m == nil { - return nil, nil + return (*SetReadOnlyRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetReadOnlyRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *PingResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetReadOnlyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyResponse) CloneVT() *SetReadOnlyResponse { if m == nil { - return 0, nil + return (*SetReadOnlyResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetReadOnlyResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarint(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *SetReadOnlyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteRequest) CloneVT() *SetReadWriteRequest { + if m == nil { + return (*SetReadWriteRequest)(nil) } - return len(dAtA) - i, nil + r := &SetReadWriteRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SleepRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteResponse) CloneVT() *SetReadWriteResponse { if m == nil { - return nil, nil + return (*SetReadWriteResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetReadWriteResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *SleepRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetReadWriteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SleepRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeRequest) CloneVT() *ChangeTypeRequest { if m == nil { - return 0, nil + return (*ChangeTypeRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ChangeTypeRequest{ + TabletType: m.TabletType, + SemiSync: m.SemiSync, } - if m.Duration != 0 { - i = encodeVarint(dAtA, i, uint64(m.Duration)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SleepResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTypeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTypeResponse) CloneVT() *ChangeTypeResponse { if m == nil { - return nil, nil + return (*ChangeTypeResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ChangeTypeResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *SleepResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ChangeTypeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SleepResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { if m == nil { - return 0, nil + return (*RefreshStateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RefreshStateRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { if m == nil { - return nil, nil + return (*RefreshStateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { if m == nil { - return 0, nil + return (*RunHealthCheckRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RunHealthCheckRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.ExtraEnv) > 0 { - for k := range m.ExtraEnv { - v := m.ExtraEnv[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarint(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } + return r +} + +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { + if m == nil { + return (*RunHealthCheckResponse)(nil) } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Parameters[iNdEx]) - copy(dAtA[i:], m.Parameters[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Parameters[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + r := &RunHealthCheckResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaRequest) CloneVT() *ReloadSchemaRequest { if m == nil { - return nil, nil + return (*ReloadSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaRequest{ + WaitPosition: m.WaitPosition, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) CloneVT() *ReloadSchemaResponse { if m == nil { - return 0, nil + return (*ReloadSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarint(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *ReloadSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PreflightSchemaRequest) CloneVT() *PreflightSchemaRequest { + if m == nil { + return (*PreflightSchemaRequest)(nil) } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarint(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x12 + r := &PreflightSchemaRequest{} + if rhs := m.Changes; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Changes = tmpContainer } - if m.ExitStatus != 0 { - i = encodeVarint(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PreflightSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PreflightSchemaResponse) CloneVT() *PreflightSchemaResponse { if m == nil { - return nil, nil + return (*PreflightSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PreflightSchemaResponse{} + if rhs := m.ChangeResults; rhs != nil { + tmpContainer := make([]*SchemaChangeResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ChangeResults = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PreflightSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { if m == nil { - return 0, nil + return (*ApplySchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplySchemaRequest{ + Sql: m.Sql, + Force: m.Force, + AllowReplication: m.AllowReplication, + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), + SqlMode: m.SqlMode, + BatchSize: m.BatchSize, } - if m.TableSchemaOnly { - i-- - if m.TableSchemaOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + return r +} + +func (m *ApplySchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplySchemaResponse) CloneVT() *ApplySchemaResponse { + if m == nil { + return (*ApplySchemaResponse)(nil) } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &ApplySchemaResponse{ + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LockTablesRequest) CloneVT() *LockTablesRequest { if m == nil { - return nil, nil + return (*LockTablesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &LockTablesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *LockTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesResponse) CloneVT() *LockTablesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*LockTablesResponse)(nil) } - if m.SchemaDefinition != nil { - size, err := m.SchemaDefinition.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &LockTablesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UnlockTablesRequest) CloneVT() *UnlockTablesRequest { if m == nil { - return nil, nil + return (*UnlockTablesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UnlockTablesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UnlockTablesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) CloneVT() *UnlockTablesResponse { if m == nil { - return 0, nil + return (*UnlockTablesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UnlockTablesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UnlockTablesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteQueryRequest) CloneVT() *ExecuteQueryRequest { if m == nil { - return nil, nil + return (*ExecuteQueryRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteQueryRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + CallerId: m.CallerId.CloneVT(), } - return dAtA[:n], nil + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteQueryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryResponse) CloneVT() *ExecuteQueryResponse { if m == nil { - return 0, nil + return (*ExecuteQueryResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteQueryResponse{ + Result: m.Result.CloneVT(), } - if m.Permissions != nil { - size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *SetReadOnlyRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDbaRequest) CloneVT() *ExecuteFetchAsDbaRequest { if m == nil { - return nil, nil + return (*ExecuteFetchAsDbaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsDbaRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, } - return dAtA[:n], nil + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsDbaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadOnlyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaResponse) CloneVT() *ExecuteFetchAsDbaResponse { if m == nil { - return 0, nil + return (*ExecuteFetchAsDbaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsDbaResponse{ + Result: m.Result.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAllPrivsRequest) CloneVT() *ExecuteFetchAsAllPrivsRequest { if m == nil { - return nil, nil + return (*ExecuteFetchAsAllPrivsRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsAllPrivsRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + ReloadSchema: m.ReloadSchema, } - return dAtA[:n], nil + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadOnlyResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsAllPrivsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadOnlyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) CloneVT() *ExecuteFetchAsAllPrivsResponse { if m == nil { - return 0, nil + return (*ExecuteFetchAsAllPrivsResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAllPrivsResponse{ + Result: m.Result.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadWriteRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAppRequest) CloneVT() *ExecuteFetchAsAppRequest { if m == nil { - return nil, nil + return (*ExecuteFetchAsAppRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteFetchAsAppRequest{ + MaxRows: m.MaxRows, } - return dAtA[:n], nil + if rhs := m.Query; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Query = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadWriteRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteFetchAsAppRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadWriteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) CloneVT() *ExecuteFetchAsAppResponse { if m == nil { - return 0, nil + return (*ExecuteFetchAsAppResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAppResponse{ + Result: m.Result.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *SetReadWriteResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicationStatusRequest) CloneVT() *ReplicationStatusRequest { if m == nil { - return nil, nil + return (*ReplicationStatusRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReplicationStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *SetReadWriteResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReplicationStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *SetReadWriteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusResponse) CloneVT() *ReplicationStatusResponse { if m == nil { - return 0, nil + return (*ReplicationStatusResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReplicationStatusResponse{ + Status: m.Status.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTypeRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryStatusRequest) CloneVT() *PrimaryStatusRequest { if m == nil { - return nil, nil + return (*PrimaryStatusRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PrimaryStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ChangeTypeRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PrimaryStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusResponse) CloneVT() *PrimaryStatusResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*PrimaryStatusResponse)(nil) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &PrimaryStatusResponse{ + Status: m.Status.CloneVT(), } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTypeResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrimaryPositionRequest) CloneVT() *PrimaryPositionRequest { if m == nil { - return nil, nil + return (*PrimaryPositionRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PrimaryPositionRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ChangeTypeResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PrimaryPositionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionResponse) CloneVT() *PrimaryPositionResponse { if m == nil { - return 0, nil + return (*PrimaryPositionResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PrimaryPositionResponse{ + Position: m.Position, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WaitForPositionRequest) CloneVT() *WaitForPositionRequest { if m == nil { - return nil, nil + return (*WaitForPositionRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WaitForPositionRequest{ + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WaitForPositionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionResponse) CloneVT() *WaitForPositionResponse { if m == nil { - return 0, nil + return (*WaitForPositionResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WaitForPositionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { if m == nil { - return nil, nil + return (*StopReplicationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { if m == nil { - return 0, nil + return (*StopReplicationResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationMinimumRequest) CloneVT() *StopReplicationMinimumRequest { if m == nil { - return nil, nil + return (*StopReplicationMinimumRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationMinimumRequest{ + Position: m.Position, + WaitTimeout: m.WaitTimeout, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationMinimumRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationMinimumResponse) CloneVT() *StopReplicationMinimumResponse { if m == nil { - return 0, nil + return (*StopReplicationMinimumResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationMinimumResponse{ + Position: m.Position, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationMinimumResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { if m == nil { - return nil, nil + return (*StartReplicationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationRequest{ + SemiSync: m.SemiSync, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { if m == nil { - return 0, nil + return (*StartReplicationResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StartReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationUntilAfterRequest) CloneVT() *StartReplicationUntilAfterRequest { if m == nil { - return nil, nil + return (*StartReplicationUntilAfterRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationUntilAfterRequest{ + Position: m.Position, + WaitTimeout: m.WaitTimeout, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationUntilAfterRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StartReplicationUntilAfterResponse) CloneVT() *StartReplicationUntilAfterResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*StartReplicationUntilAfterResponse)(nil) } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0xa + r := &StartReplicationUntilAfterResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationUntilAfterResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetReplicasRequest) CloneVT() *GetReplicasRequest { if m == nil { - return nil, nil + return (*GetReplicasRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetReplicasRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetReplicasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetReplicasResponse) CloneVT() *GetReplicasResponse { if m == nil { - return 0, nil + return (*GetReplicasResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetReplicasResponse{} + if rhs := m.Addrs; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Addrs = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PreflightSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetReplicasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetReplicationRequest) CloneVT() *ResetReplicationRequest { if m == nil { - return nil, nil + return (*ResetReplicationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ResetReplicationRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *PreflightSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ResetReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PreflightSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ResetReplicationResponse) CloneVT() *ResetReplicationResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ResetReplicationResponse)(nil) } - if len(m.Changes) > 0 { - for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Changes[iNdEx]) - copy(dAtA[i:], m.Changes[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Changes[iNdEx]))) - i-- - dAtA[i] = 0xa - } + r := &ResetReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PreflightSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ResetReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VReplicationExecRequest) CloneVT() *VReplicationExecRequest { if m == nil { - return nil, nil + return (*VReplicationExecRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VReplicationExecRequest{ + Query: m.Query, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PreflightSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VReplicationExecRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PreflightSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VReplicationExecResponse) CloneVT() *VReplicationExecResponse { if m == nil { - return 0, nil + return (*VReplicationExecResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VReplicationExecResponse{ + Result: m.Result.CloneVT(), } - if len(m.ChangeResults) > 0 { - for iNdEx := len(m.ChangeResults) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ChangeResults[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *VReplicationExecResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VReplicationWaitForPosRequest) CloneVT() *VReplicationWaitForPosRequest { if m == nil { - return nil, nil + return (*VReplicationWaitForPosRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VReplicationWaitForPosRequest{ + Id: m.Id, + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VReplicationWaitForPosRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VReplicationWaitForPosResponse) CloneVT() *VReplicationWaitForPosResponse { if m == nil { - return 0, nil + return (*VReplicationWaitForPosResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VReplicationWaitForPosResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.SqlMode) > 0 { - i -= len(m.SqlMode) - copy(dAtA[i:], m.SqlMode) - i = encodeVarint(dAtA, i, uint64(len(m.SqlMode))) - i-- - dAtA[i] = 0x32 + return r +} + +func (m *VReplicationWaitForPosResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitPrimaryRequest) CloneVT() *InitPrimaryRequest { + if m == nil { + return (*InitPrimaryRequest)(nil) } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + r := &InitPrimaryRequest{ + SemiSync: m.SemiSync, } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowReplication { - i-- - if m.AllowReplication { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *InitPrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitPrimaryResponse) CloneVT() *InitPrimaryResponse { + if m == nil { + return (*InitPrimaryResponse)(nil) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &InitPrimaryResponse{ + Position: m.Position, } - if len(m.Sql) > 0 { - i -= len(m.Sql) - copy(dAtA[i:], m.Sql) - i = encodeVarint(dAtA, i, uint64(len(m.Sql))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *InitPrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PopulateReparentJournalRequest) CloneVT() *PopulateReparentJournalRequest { if m == nil { - return nil, nil + return (*PopulateReparentJournalRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PopulateReparentJournalRequest{ + TimeCreatedNs: m.TimeCreatedNs, + ActionName: m.ActionName, + PrimaryAlias: m.PrimaryAlias.CloneVT(), + ReplicationPosition: m.ReplicationPosition, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PopulateReparentJournalRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PopulateReparentJournalResponse) CloneVT() *PopulateReparentJournalResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.AfterSchema != nil { - size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + return (*PopulateReparentJournalResponse)(nil) } - if m.BeforeSchema != nil { - size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &PopulateReparentJournalResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *LockTablesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PopulateReparentJournalResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitReplicaRequest) CloneVT() *InitReplicaRequest { if m == nil { - return nil, nil + return (*InitReplicaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &InitReplicaRequest{ + Parent: m.Parent.CloneVT(), + ReplicationPosition: m.ReplicationPosition, + TimeCreatedNs: m.TimeCreatedNs, + SemiSync: m.SemiSync, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *LockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *InitReplicaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *LockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitReplicaResponse) CloneVT() *InitReplicaResponse { if m == nil { - return 0, nil + return (*InitReplicaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &InitReplicaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *LockTablesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *InitReplicaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DemotePrimaryRequest) CloneVT() *DemotePrimaryRequest { if m == nil { - return nil, nil + return (*DemotePrimaryRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DemotePrimaryRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *LockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DemotePrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *LockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DemotePrimaryResponse) CloneVT() *DemotePrimaryResponse { if m == nil { - return 0, nil + return (*DemotePrimaryResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DemotePrimaryResponse{ + PrimaryStatus: m.PrimaryStatus.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UnlockTablesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DemotePrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UndoDemotePrimaryRequest) CloneVT() *UndoDemotePrimaryRequest { if m == nil { - return nil, nil + return (*UndoDemotePrimaryRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UndoDemotePrimaryRequest{ + SemiSync: m.SemiSync, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UnlockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UndoDemotePrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UnlockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UndoDemotePrimaryResponse) CloneVT() *UndoDemotePrimaryResponse { if m == nil { - return 0, nil + return (*UndoDemotePrimaryResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UndoDemotePrimaryResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UnlockTablesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UndoDemotePrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicaWasPromotedRequest) CloneVT() *ReplicaWasPromotedRequest { if m == nil { - return nil, nil + return (*ReplicaWasPromotedRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReplicaWasPromotedRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *UnlockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReplicaWasPromotedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UnlockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicaWasPromotedResponse) CloneVT() *ReplicaWasPromotedResponse { if m == nil { - return 0, nil + return (*ReplicaWasPromotedResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReplicaWasPromotedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteQueryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicaWasPromotedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetReplicationParametersRequest) CloneVT() *ResetReplicationParametersRequest { if m == nil { - return nil, nil + return (*ResetReplicationParametersRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ResetReplicationParametersRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteQueryRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ResetReplicationParametersRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteQueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ResetReplicationParametersResponse) CloneVT() *ResetReplicationParametersResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.CallerId != nil { - size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + return (*ResetReplicationParametersResponse)(nil) } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + r := &ResetReplicationParametersResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *ResetReplicationParametersResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FullStatusRequest) CloneVT() *FullStatusRequest { + if m == nil { + return (*FullStatusRequest)(nil) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + r := &FullStatusRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteQueryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *FullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FullStatusResponse) CloneVT() *FullStatusResponse { if m == nil { - return nil, nil + return (*FullStatusResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &FullStatusResponse{ + Status: m.Status.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteQueryResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *FullStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteQueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReplicationSourceRequest) CloneVT() *SetReplicationSourceRequest { if m == nil { - return 0, nil + return (*SetReplicationSourceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetReplicationSourceRequest{ + Parent: m.Parent.CloneVT(), + TimeCreatedNs: m.TimeCreatedNs, + ForceStartReplication: m.ForceStartReplication, + WaitPosition: m.WaitPosition, + SemiSync: m.SemiSync, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDbaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReplicationSourceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReplicationSourceResponse) CloneVT() *SetReplicationSourceResponse { if m == nil { - return nil, nil + return (*SetReplicationSourceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetReplicationSourceResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteFetchAsDbaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetReplicationSourceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicaWasRestartedRequest) CloneVT() *ReplicaWasRestartedRequest { if m == nil { - return 0, nil + return (*ReplicaWasRestartedRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReplicaWasRestartedRequest{ + Parent: m.Parent.CloneVT(), } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.DisableBinlogs { - i-- - if m.DisableBinlogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + return r +} + +func (m *ReplicaWasRestartedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReplicaWasRestartedResponse) CloneVT() *ReplicaWasRestartedResponse { + if m == nil { + return (*ReplicaWasRestartedResponse)(nil) } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + r := &ReplicaWasRestartedResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *ReplicaWasRestartedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationAndGetStatusRequest) CloneVT() *StopReplicationAndGetStatusRequest { + if m == nil { + return (*StopReplicationAndGetStatusRequest)(nil) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + r := &StopReplicationAndGetStatusRequest{ + StopReplicationMode: m.StopReplicationMode, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDbaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationAndGetStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationAndGetStatusResponse) CloneVT() *StopReplicationAndGetStatusResponse { if m == nil { - return nil, nil + return (*StopReplicationAndGetStatusResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationAndGetStatusResponse{ + Status: m.Status.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDbaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationAndGetStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PromoteReplicaRequest) CloneVT() *PromoteReplicaRequest { if m == nil { - return 0, nil + return (*PromoteReplicaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PromoteReplicaRequest{ + SemiSync: m.SemiSync, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PromoteReplicaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PromoteReplicaResponse) CloneVT() *PromoteReplicaResponse { if m == nil { - return nil, nil + return (*PromoteReplicaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PromoteReplicaResponse{ + Position: m.Position, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PromoteReplicaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupRequest) CloneVT() *BackupRequest { if m == nil { - return 0, nil + return (*BackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &BackupRequest{ + Concurrency: m.Concurrency, + AllowPrimary: m.AllowPrimary, + IncrementalFromPos: m.IncrementalFromPos, + UpgradeSafe: m.UpgradeSafe, } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 - } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x12 - } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *BackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupResponse) CloneVT() *BackupResponse { if m == nil { - return nil, nil + return (*BackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &BackupResponse{ + Event: m.Event.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *BackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RestoreFromBackupRequest) CloneVT() *RestoreFromBackupRequest { if m == nil { - return 0, nil + return (*RestoreFromBackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RestoreFromBackupRequest{ + BackupTime: m.BackupTime.CloneVT(), + RestoreToPos: m.RestoreToPos, + DryRun: m.DryRun, + RestoreToTimestamp: m.RestoreToTimestamp.CloneVT(), } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RestoreFromBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RestoreFromBackupResponse) CloneVT() *RestoreFromBackupResponse { if m == nil { - return nil, nil + return (*RestoreFromBackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RestoreFromBackupResponse{ + Event: m.Event.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RestoreFromBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateVReplicationWorkflowRequest) CloneVT() *CreateVReplicationWorkflowRequest { if m == nil { - return 0, nil + return (*CreateVReplicationWorkflowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &CreateVReplicationWorkflowRequest{ + Workflow: m.Workflow, + TabletSelectionPreference: m.TabletSelectionPreference, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, + StopAfterCopy: m.StopAfterCopy, } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x10 + if rhs := m.BinlogSource; rhs != nil { + tmpContainer := make([]*binlogdata.BinlogSource, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.BinlogSource = tmpContainer } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateVReplicationWorkflowResponse) CloneVT() *CreateVReplicationWorkflowResponse { if m == nil { - return nil, nil + return (*CreateVReplicationWorkflowResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteVReplicationWorkflowRequest) CloneVT() *DeleteVReplicationWorkflowRequest { if m == nil { - return 0, nil + return (*DeleteVReplicationWorkflowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteVReplicationWorkflowRequest{ + Workflow: m.Workflow, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteVReplicationWorkflowResponse) CloneVT() *DeleteVReplicationWorkflowResponse { if m == nil { - return nil, nil + return (*DeleteVReplicationWorkflowResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowRequest) CloneVT() *ReadVReplicationWorkflowRequest { if m == nil { - return 0, nil + return (*ReadVReplicationWorkflowRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReadVReplicationWorkflowRequest{ + Workflow: m.Workflow, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReadVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadVReplicationWorkflowResponse_Stream) CloneVT() *ReadVReplicationWorkflowResponse_Stream { if m == nil { - return nil, nil + return (*ReadVReplicationWorkflowResponse_Stream)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReadVReplicationWorkflowResponse_Stream{ + Id: m.Id, + Bls: m.Bls.CloneVT(), + Pos: m.Pos, + StopPos: m.StopPos, + MaxTps: m.MaxTps, + MaxReplicationLag: m.MaxReplicationLag, + TimeUpdated: m.TimeUpdated.CloneVT(), + TransactionTimestamp: m.TransactionTimestamp.CloneVT(), + State: m.State, + Message: m.Message, + RowsCopied: m.RowsCopied, + TimeHeartbeat: m.TimeHeartbeat.CloneVT(), + TimeThrottled: m.TimeThrottled.CloneVT(), + ComponentThrottled: m.ComponentThrottled, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReadVReplicationWorkflowResponse_Stream) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowResponse) CloneVT() *ReadVReplicationWorkflowResponse { if m == nil { - return 0, nil + return (*ReadVReplicationWorkflowResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReadVReplicationWorkflowResponse{ + Workflow: m.Workflow, + Cells: m.Cells, + TabletSelectionPreference: m.TabletSelectionPreference, + DbName: m.DbName, + Tags: m.Tags, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, + DeferSecondaryKeys: m.DeferSecondaryKeys, } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*ReadVReplicationWorkflowResponse_Stream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.Streams = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReadVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffRequest) CloneVT() *VDiffRequest { if m == nil { - return nil, nil + return (*VDiffRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + Action: m.Action, + ActionArg: m.ActionArg, + VdiffUuid: m.VdiffUuid, + Options: m.Options.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffResponse) CloneVT() *VDiffResponse { if m == nil { - return 0, nil + return (*VDiffResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffResponse{ + Id: m.Id, + Output: m.Output.CloneVT(), + VdiffUuid: m.VdiffUuid, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffPickerOptions) CloneVT() *VDiffPickerOptions { if m == nil { - return nil, nil + return (*VDiffPickerOptions)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffPickerOptions{ + TabletTypes: m.TabletTypes, + SourceCell: m.SourceCell, + TargetCell: m.TargetCell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffPickerOptions) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffReportOptions) CloneVT() *VDiffReportOptions { if m == nil { - return 0, nil + return (*VDiffReportOptions)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffReportOptions{ + OnlyPks: m.OnlyPks, + DebugQuery: m.DebugQuery, + Format: m.Format, } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *PrimaryPositionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffReportOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VDiffCoreOptions) CloneVT() *VDiffCoreOptions { if m == nil { - return nil, nil + return (*VDiffCoreOptions)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &VDiffCoreOptions{ + Tables: m.Tables, + AutoRetry: m.AutoRetry, + MaxRows: m.MaxRows, + Checksum: m.Checksum, + SamplePct: m.SamplePct, + TimeoutSeconds: m.TimeoutSeconds, + MaxExtraRowsToCompare: m.MaxExtraRowsToCompare, + UpdateTableStats: m.UpdateTableStats, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryPositionRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *VDiffCoreOptions) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *VDiffOptions) CloneVT() *VDiffOptions { if m == nil { - return 0, nil + return (*VDiffOptions)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &VDiffOptions{ + PickerOptions: m.PickerOptions.CloneVT(), + CoreOptions: m.CoreOptions.CloneVT(), + ReportOptions: m.ReportOptions.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryPositionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *VDiffOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateVReplicationWorkflowRequest) CloneVT() *UpdateVReplicationWorkflowRequest { if m == nil { - return nil, nil + return (*UpdateVReplicationWorkflowRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateVReplicationWorkflowRequest{ + Workflow: m.Workflow, + TabletSelectionPreference: m.TabletSelectionPreference, + OnDdl: m.OnDdl, + State: m.State, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *PrimaryPositionResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateVReplicationWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *PrimaryPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateVReplicationWorkflowResponse) CloneVT() *UpdateVReplicationWorkflowResponse { if m == nil { - return 0, nil + return (*UpdateVReplicationWorkflowResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateVReplicationWorkflowResponse{ + Result: m.Result.CloneVT(), } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateVReplicationWorkflowResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetSequencesRequest) CloneVT() *ResetSequencesRequest { + if m == nil { + return (*ResetSequencesRequest)(nil) + } + r := &ResetSequencesRequest{} + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetSequencesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetSequencesResponse) CloneVT() *ResetSequencesResponse { + if m == nil { + return (*ResetSequencesResponse)(nil) + } + r := &ResetSequencesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetSequencesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CheckThrottlerRequest) CloneVT() *CheckThrottlerRequest { + if m == nil { + return (*CheckThrottlerRequest)(nil) + } + r := &CheckThrottlerRequest{ + AppName: m.AppName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CheckThrottlerRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CheckThrottlerResponse) CloneVT() *CheckThrottlerResponse { + if m == nil { + return (*CheckThrottlerResponse)(nil) + } + r := &CheckThrottlerResponse{ + StatusCode: m.StatusCode, + Value: m.Value, + Threshold: m.Threshold, + Error: m.Error, + Message: m.Message, + RecentlyChecked: m.RecentlyChecked, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CheckThrottlerResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableDefinition) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2320,12 +2276,12 @@ func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WaitForPositionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *TableDefinition) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TableDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2337,17 +2293,71 @@ func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Fields[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.RowCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowCount)) + i-- + dAtA[i] = 0x38 + } + if m.DataLength != 0 { + i = encodeVarint(dAtA, i, uint64(m.DataLength)) + i-- + dAtA[i] = 0x30 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x2a + } + if len(m.PrimaryKeyColumns) > 0 { + for iNdEx := len(m.PrimaryKeyColumns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PrimaryKeyColumns[iNdEx]) + copy(dAtA[i:], m.PrimaryKeyColumns[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.PrimaryKeyColumns[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Columns) > 0 { + for iNdEx := len(m.Columns) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Columns[iNdEx]) + copy(dAtA[i:], m.Columns[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Columns[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Schema) > 0 { + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarint(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaDefinition) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2360,12 +2370,12 @@ func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WaitForPositionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaDefinition) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaDefinition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2377,10 +2387,29 @@ func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.TableDefinitions) > 0 { + for iNdEx := len(m.TableDefinitions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TableDefinitions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.DatabaseSchema) > 0 { + i -= len(m.DatabaseSchema) + copy(dAtA[i:], m.DatabaseSchema) + i = encodeVarint(dAtA, i, uint64(len(m.DatabaseSchema))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaChangeResult) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2393,12 +2422,12 @@ func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaChangeResult) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaChangeResult) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2410,10 +2439,30 @@ func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UserPermission) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2426,12 +2475,12 @@ func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UserPermission) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UserPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2443,10 +2492,48 @@ func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.PasswordChecksum != 0 { + i = encodeVarint(dAtA, i, uint64(m.PasswordChecksum)) + i-- + dAtA[i] = 0x18 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarint(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarint(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DbPermission) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2459,12 +2546,12 @@ func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationMinimumRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DbPermission) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DbPermission) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2476,22 +2563,50 @@ func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) - i-- - dAtA[i] = 0x10 + if len(m.Privileges) > 0 { + for k := range m.Privileges { + v := m.Privileges[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarint(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + } + if len(m.Db) > 0 { + i -= len(m.Db) + copy(dAtA[i:], m.Db) + i = encodeVarint(dAtA, i, uint64(len(m.Db))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarint(dAtA, i, uint64(len(m.Host))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Permissions) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2504,12 +2619,12 @@ func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationMinimumResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Permissions) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Permissions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2521,17 +2636,34 @@ func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa + if len(m.DbPermissions) > 0 { + for iNdEx := len(m.DbPermissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DbPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.UserPermissions) > 0 { + for iNdEx := len(m.UserPermissions) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.UserPermissions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PingRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2544,12 +2676,12 @@ func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PingRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2561,20 +2693,17 @@ func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PingResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2587,12 +2716,12 @@ func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PingResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2604,10 +2733,17 @@ func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SleepRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2620,12 +2756,12 @@ func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *StartReplicationUntilAfterRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SleepRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2637,22 +2773,15 @@ func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) - i-- - dAtA[i] = 0x10 - } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if m.Duration != 0 { + i = encodeVarint(dAtA, i, uint64(m.Duration)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SleepResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2665,12 +2794,12 @@ func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *StartReplicationUntilAfterResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SleepResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2685,7 +2814,7 @@ func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } -func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2698,12 +2827,12 @@ func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetReplicasRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2715,10 +2844,45 @@ func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ExtraEnv) > 0 { + for k := range m.ExtraEnv { + v := m.ExtraEnv[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parameters[iNdEx]) + copy(dAtA[i:], m.Parameters[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Parameters[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2731,12 +2895,12 @@ func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetReplicasResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2748,19 +2912,29 @@ func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.Stderr) > 0 { + i -= len(m.Stderr) + copy(dAtA[i:], m.Stderr) + i = encodeVarint(dAtA, i, uint64(len(m.Stderr))) + i-- + dAtA[i] = 0x1a + } + if len(m.Stdout) > 0 { + i -= len(m.Stdout) + copy(dAtA[i:], m.Stdout) + i = encodeVarint(dAtA, i, uint64(len(m.Stdout))) + i-- + dAtA[i] = 0x12 + } + if m.ExitStatus != 0 { + i = encodeVarint(dAtA, i, uint64(m.ExitStatus)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2773,12 +2947,12 @@ func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResetReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2790,10 +2964,48 @@ func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.TableSchemaOnly { + i-- + if m.TableSchemaOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2806,12 +3018,12 @@ func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResetReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2823,10 +3035,20 @@ func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - return len(dAtA) - i, nil + if m.SchemaDefinition != nil { + size, err := m.SchemaDefinition.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2839,12 +3061,12 @@ func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationExecRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2856,17 +3078,10 @@ func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2879,12 +3094,12 @@ func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationExecResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2896,8 +3111,8 @@ func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if m.Permissions != nil { + size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -2909,7 +3124,7 @@ func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadOnlyRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2922,12 +3137,12 @@ func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationWaitForPosRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2939,22 +3154,10 @@ func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0x12 - } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadOnlyResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -2967,12 +3170,12 @@ func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VReplicationWaitForPosResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadOnlyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -2987,7 +3190,7 @@ func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3000,12 +3203,12 @@ func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadWriteRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadWriteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3017,20 +3220,10 @@ func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } return len(dAtA) - i, nil } -func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetReadWriteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3043,12 +3236,12 @@ func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SetReadWriteResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetReadWriteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3060,17 +3253,10 @@ func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTypeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3083,12 +3269,12 @@ func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PopulateReparentJournalRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTypeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3100,39 +3286,25 @@ func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ReplicationPosition) > 0 { - i -= len(m.ReplicationPosition) - copy(dAtA[i:], m.ReplicationPosition) - i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) + if m.SemiSync { i-- - dAtA[i] = 0x22 - } - if m.PrimaryAlias != nil { - size, err := m.PrimaryAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.ActionName) > 0 { - i -= len(m.ActionName) - copy(dAtA[i:], m.ActionName) - i = encodeVarint(dAtA, i, uint64(len(m.ActionName))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTypeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3145,12 +3317,12 @@ func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PopulateReparentJournalResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTypeResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3165,7 +3337,7 @@ func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (i return len(dAtA) - i, nil } -func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3178,12 +3350,12 @@ func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3195,42 +3367,10 @@ func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) - i-- - dAtA[i] = 0x18 - } - if len(m.ReplicationPosition) > 0 { - i -= len(m.ReplicationPosition) - copy(dAtA[i:], m.ReplicationPosition) - i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) - i-- - dAtA[i] = 0x12 - } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3243,12 +3383,12 @@ func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3263,7 +3403,7 @@ func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3276,12 +3416,12 @@ func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3296,7 +3436,7 @@ func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3309,12 +3449,12 @@ func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3326,20 +3466,10 @@ func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PrimaryStatus != nil { - size, err := m.PrimaryStatus.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3352,12 +3482,12 @@ func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UndoDemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3369,20 +3499,17 @@ func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3395,12 +3522,12 @@ func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UndoDemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3415,7 +3542,7 @@ func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PreflightSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3428,12 +3555,12 @@ func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasPromotedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3445,10 +3572,19 @@ func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Changes) > 0 { + for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Changes[iNdEx]) + copy(dAtA[i:], m.Changes[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Changes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ReplicaWasPromotedResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PreflightSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3461,12 +3597,12 @@ func (m *ReplicaWasPromotedResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasPromotedResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasPromotedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PreflightSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3478,10 +3614,22 @@ func (m *ReplicaWasPromotedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ChangeResults) > 0 { + for iNdEx := len(m.ChangeResults) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ChangeResults[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3494,12 +3642,12 @@ func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ResetReplicationParametersRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3511,10 +3659,69 @@ func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.BatchSize != 0 { + i = encodeVarint(dAtA, i, uint64(m.BatchSize)) + i-- + dAtA[i] = 0x38 + } + if len(m.SqlMode) > 0 { + i -= len(m.SqlMode) + copy(dAtA[i:], m.SqlMode) + i = encodeVarint(dAtA, i, uint64(len(m.SqlMode))) + i-- + dAtA[i] = 0x32 + } + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.AllowReplication { + i-- + if m.AllowReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3527,12 +3734,12 @@ func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *ResetReplicationParametersResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3544,10 +3751,30 @@ func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.AfterSchema != nil { + size, err := m.AfterSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BeforeSchema != nil { + size, err := m.BeforeSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3560,12 +3787,12 @@ func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *LockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3580,7 +3807,7 @@ func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *LockTablesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3593,12 +3820,12 @@ func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *LockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3610,20 +3837,10 @@ func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UnlockTablesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3636,12 +3853,12 @@ func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetReplicationSourceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *UnlockTablesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3653,52 +3870,10 @@ func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - i-- - if m.SemiSync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0x22 - } - if m.ForceStartReplication { - i-- - if m.ForceStartReplication { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.TimeCreatedNs != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) - i-- - dAtA[i] = 0x10 - } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UnlockTablesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3711,12 +3886,12 @@ func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetReplicationSourceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UnlockTablesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3731,7 +3906,7 @@ func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3744,12 +3919,12 @@ func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasRestartedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3761,20 +3936,39 @@ func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Parent != nil { - size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if m.CallerId != nil { + size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x22 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteQueryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3787,12 +3981,12 @@ func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicaWasRestartedResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteQueryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3804,10 +3998,20 @@ func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3820,12 +4024,12 @@ func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3837,15 +4041,49 @@ func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.StopReplicationMode != 0 { - i = encodeVarint(dAtA, i, uint64(m.StopReplicationMode)) + if m.ReloadSchema { i-- - dAtA[i] = 0x8 + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.DisableBinlogs { + i-- + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDbaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3858,12 +4096,12 @@ func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err erro return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3875,20 +4113,20 @@ func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3901,12 +4139,12 @@ func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PromoteReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3918,20 +4156,39 @@ func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { + if m.ReloadSchema { i-- - if m.SemiSync { + if m.ReloadSchema { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3944,12 +4201,12 @@ func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PromoteReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3961,17 +4218,20 @@ func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3984,12 +4244,12 @@ func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4001,32 +4261,22 @@ func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.IncrementalFromPos) > 0 { - i -= len(m.IncrementalFromPos) - copy(dAtA[i:], m.IncrementalFromPos) - i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) - i-- - dAtA[i] = 0x1a - } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) i-- dAtA[i] = 0x10 } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4039,12 +4289,12 @@ func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4056,8 +4306,8 @@ func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -4069,7 +4319,7 @@ func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4082,12 +4332,12 @@ func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4099,37 +4349,10 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.RestoreToPos) > 0 { - i -= len(m.RestoreToPos) - copy(dAtA[i:], m.RestoreToPos) - i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) - i-- - dAtA[i] = 0x12 - } - if m.BackupTime != nil { - size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4142,12 +4365,12 @@ func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4159,8 +4382,8 @@ func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -4172,7 +4395,7 @@ func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4185,12 +4408,12 @@ func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4202,55 +4425,10 @@ func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - if len(m.VdiffUuid) > 0 { - i -= len(m.VdiffUuid) - copy(dAtA[i:], m.VdiffUuid) - i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) - i-- - dAtA[i] = 0x2a - } - if len(m.ActionArg) > 0 { - i -= len(m.ActionArg) - copy(dAtA[i:], m.ActionArg) - i = encodeVarint(dAtA, i, uint64(len(m.ActionArg))) - i-- - dAtA[i] = 0x22 - } - if len(m.Action) > 0 { - i -= len(m.Action) - copy(dAtA[i:], m.Action) - i = encodeVarint(dAtA, i, uint64(len(m.Action))) - i-- - dAtA[i] = 0x1a - } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4263,12 +4441,12 @@ func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4280,32 +4458,20 @@ func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.VdiffUuid) > 0 { - i -= len(m.VdiffUuid) - copy(dAtA[i:], m.VdiffUuid) - i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) - i-- - dAtA[i] = 0x1a - } - if m.Output != nil { - size, err := m.Output.MarshalToSizedBufferVT(dAtA[:i]) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4318,12 +4484,12 @@ func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffPickerOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4335,31 +4501,10 @@ func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TargetCell) > 0 { - i -= len(m.TargetCell) - copy(dAtA[i:], m.TargetCell) - i = encodeVarint(dAtA, i, uint64(len(m.TargetCell))) - i-- - dAtA[i] = 0x1a - } - if len(m.SourceCell) > 0 { - i -= len(m.SourceCell) - copy(dAtA[i:], m.SourceCell) - i = encodeVarint(dAtA, i, uint64(len(m.SourceCell))) - i-- - dAtA[i] = 0x12 - } - if len(m.TabletTypes) > 0 { - i -= len(m.TabletTypes) - copy(dAtA[i:], m.TabletTypes) - i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { +func (m *PrimaryPositionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4372,12 +4517,12 @@ func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffReportOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PrimaryPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4389,37 +4534,17 @@ func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Format) > 0 { - i -= len(m.Format) - copy(dAtA[i:], m.Format) - i = encodeVarint(dAtA, i, uint64(len(m.Format))) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- - dAtA[i] = 0x1a - } - if m.DebugQuery { - i-- - if m.DebugQuery { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.OnlyPks { - i-- - if m.OnlyPks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4432,12 +4557,12 @@ func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffCoreOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *WaitForPositionRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4449,57 +4574,17 @@ func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.MaxExtraRowsToCompare != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxExtraRowsToCompare)) - i-- - dAtA[i] = 0x38 - } - if m.TimeoutSeconds != 0 { - i = encodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) - i-- - dAtA[i] = 0x30 - } - if m.SamplePct != 0 { - i = encodeVarint(dAtA, i, uint64(m.SamplePct)) - i-- - dAtA[i] = 0x28 - } - if m.Checksum { - i-- - if m.Checksum { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 - } - if m.AutoRetry { - i-- - if m.AutoRetry { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Tables) > 0 { - i -= len(m.Tables) - copy(dAtA[i:], m.Tables) - i = encodeVarint(dAtA, i, uint64(len(m.Tables))) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { +func (m *WaitForPositionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4512,12 +4597,12 @@ func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VDiffOptions) MarshalToVT(dAtA []byte) (int, error) { +func (m *WaitForPositionResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WaitForPositionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4529,1634 +4614,7300 @@ func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.ReportOptions != nil { - size, err := m.ReportOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if m.CoreOptions != nil { - size, err := m.CoreOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if m.PickerOptions != nil { - size, err := m.PickerOptions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - dAtA[offset] = uint8(v) - return base + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *TableDefinition) SizeVT() (n int) { + +func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Schema) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Columns) > 0 { - for _, s := range m.Columns { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.PrimaryKeyColumns) > 0 { - for _, s := range m.PrimaryKeyColumns { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.DataLength != 0 { - n += 1 + sov(uint64(m.DataLength)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.RowCount != 0 { - n += 1 + sov(uint64(m.RowCount)) + return len(dAtA) - i, nil +} + +func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if len(m.Fields) > 0 { - for _, e := range m.Fields { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SchemaDefinition) SizeVT() (n int) { +func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.DatabaseSchema) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.TableDefinitions) > 0 { - for _, e := range m.TableDefinitions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SchemaChangeResult) SizeVT() (n int) { +func (m *StopReplicationMinimumRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UserPermission) SizeVT() (n int) { +func (m *StopReplicationMinimumRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationMinimumRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.User) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.WaitTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) + i-- + dAtA[i] = 0x10 } - if m.PasswordChecksum != 0 { - n += 1 + sov(uint64(m.PasswordChecksum)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa } - if len(m.Privileges) > 0 { - for k, v := range m.Privileges { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + return len(dAtA) - i, nil +} + +func (m *StopReplicationMinimumResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DbPermission) SizeVT() (n int) { +func (m *StopReplicationMinimumResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationMinimumResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Db) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa } - l = len(m.User) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if len(m.Privileges) > 0 { - for k, v := range m.Privileges { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Permissions) SizeVT() (n int) { +func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.UserPermissions) > 0 { - for _, e := range m.UserPermissions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.DbPermissions) > 0 { - for _, e := range m.DbPermissions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PingRequest) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PingResponse) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Payload) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SleepRequest) SizeVT() (n int) { +func (m *StartReplicationUntilAfterRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Duration != 0 { - n += 1 + sov(uint64(m.Duration)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SleepResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *StartReplicationUntilAfterRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteHookRequest) SizeVT() (n int) { +func (m *StartReplicationUntilAfterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Parameters) > 0 { - for _, s := range m.Parameters { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.WaitTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.WaitTimeout)) + i-- + dAtA[i] = 0x10 } - if len(m.ExtraEnv) > 0 { - for k, v := range m.ExtraEnv { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteHookResponse) SizeVT() (n int) { +func (m *StartReplicationUntilAfterResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sov(uint64(m.ExitStatus)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSchemaRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if m.IncludeViews { - n += 2 - } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if m.TableSchemaOnly { - n += 2 - } - n += len(m.unknownFields) - return n +func (m *StartReplicationUntilAfterResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSchemaResponse) SizeVT() (n int) { +func (m *StartReplicationUntilAfterResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SchemaDefinition != nil { - l = m.SchemaDefinition.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetPermissionsRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetPermissionsResponse) SizeVT() (n int) { +func (m *GetReplicasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Permissions != nil { - l = m.Permissions.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetReadOnlyRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *GetReplicasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetReadOnlyResponse) SizeVT() (n int) { +func (m *GetReplicasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *SetReadWriteRequest) SizeVT() (n int) { +func (m *GetReplicasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SetReadWriteResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *GetReplicasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ChangeTypeRequest) SizeVT() (n int) { +func (m *GetReplicasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - n += 2 + if len(m.Addrs) > 0 { + for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addrs[iNdEx]) + copy(dAtA[i:], m.Addrs[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Addrs[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ChangeTypeResponse) SizeVT() (n int) { +func (m *ResetReplicationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RefreshStateRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ResetReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateResponse) SizeVT() (n int) { +func (m *ResetReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *RunHealthCheckRequest) SizeVT() (n int) { +func (m *ResetReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RunHealthCheckResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ResetReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaRequest) SizeVT() (n int) { +func (m *ResetReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ReloadSchemaResponse) SizeVT() (n int) { +func (m *VReplicationExecRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PreflightSchemaRequest) SizeVT() (n int) { +func (m *VReplicationExecRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VReplicationExecRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Changes) > 0 { - for _, s := range m.Changes { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} -func (m *PreflightSchemaResponse) SizeVT() (n int) { +func (m *VReplicationExecResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.ChangeResults) > 0 { - for _, e := range m.ChangeResults { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ApplySchemaRequest) SizeVT() (n int) { +func (m *VReplicationExecResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VReplicationExecResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Sql) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 - } - if m.AllowReplication { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *VReplicationWaitForPosRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - l = len(m.SqlMode) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ApplySchemaResponse) SizeVT() (n int) { +func (m *VReplicationWaitForPosRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VReplicationWaitForPosRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.BeforeSchema != nil { - l = m.BeforeSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AfterSchema != nil { - l = m.AfterSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *LockTablesRequest) SizeVT() (n int) { +func (m *VReplicationWaitForPosResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *LockTablesResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *VReplicationWaitForPosResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UnlockTablesRequest) SizeVT() (n int) { +func (m *VReplicationWaitForPosResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *UnlockTablesResponse) SizeVT() (n int) { +func (m *InitPrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ExecuteQueryRequest) SizeVT() (n int) { +func (m *InitPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.CallerId != nil { - l = m.CallerId.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteQueryResponse) SizeVT() (n int) { +func (m *InitPrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) { +func (m *InitPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa } - if m.DisableBinlogs { - n += 2 - } - if m.ReloadSchema { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) { +func (m *PopulateReparentJournalRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) { +func (m *PopulateReparentJournalRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PopulateReparentJournalRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x22 } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.PrimaryAlias != nil { + size, err := m.PrimaryAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.ReloadSchema { - n += 2 + if len(m.ActionName) > 0 { + i -= len(m.ActionName) + copy(dAtA[i:], m.ActionName) + i = encodeVarint(dAtA, i, uint64(len(m.ActionName))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if m.TimeCreatedNs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAllPrivsResponse) SizeVT() (n int) { +func (m *PopulateReparentJournalResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { +func (m *PopulateReparentJournalResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PopulateReparentJournalResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { +func (m *InitReplicaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReplicationStatusRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *InitReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicationStatusResponse) SizeVT() (n int) { +func (m *InitReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.TimeCreatedNs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x18 + } + if len(m.ReplicationPosition) > 0 { + i -= len(m.ReplicationPosition) + copy(dAtA[i:], m.ReplicationPosition) + i = encodeVarint(dAtA, i, uint64(len(m.ReplicationPosition))) + i-- + dAtA[i] = 0x12 + } + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PrimaryStatusRequest) SizeVT() (n int) { +func (m *InitReplicaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PrimaryStatusResponse) SizeVT() (n int) { +func (m *InitReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PrimaryPositionRequest) SizeVT() (n int) { +func (m *DemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PrimaryPositionResponse) SizeVT() (n int) { +func (m *DemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *WaitForPositionRequest) SizeVT() (n int) { +func (m *DemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *WaitForPositionResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *DemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) SizeVT() (n int) { +func (m *DemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PrimaryStatus != nil { + size, err := m.PrimaryStatus.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil } -func (m *StopReplicationResponse) SizeVT() (n int) { +func (m *UndoDemotePrimaryRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *StopReplicationMinimumRequest) SizeVT() (n int) { +func (m *UndoDemotePrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UndoDemotePrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.WaitTimeout != 0 { - n += 1 + sov(uint64(m.WaitTimeout)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *StopReplicationMinimumResponse) SizeVT() (n int) { +func (m *UndoDemotePrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartReplicationRequest) SizeVT() (n int) { +func (m *UndoDemotePrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UndoDemotePrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SemiSync { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *StartReplicationResponse) SizeVT() (n int) { +func (m *ReplicaWasPromotedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *StartReplicationUntilAfterRequest) SizeVT() (n int) { +func (m *ReplicaWasPromotedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicaWasPromotedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.WaitTimeout != 0 { - n += 1 + sov(uint64(m.WaitTimeout)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *StartReplicationUntilAfterResponse) SizeVT() (n int) { +func (m *ReplicaWasPromotedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GetReplicasRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ReplicaWasPromotedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetReplicasResponse) SizeVT() (n int) { +func (m *ReplicaWasPromotedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Addrs) > 0 { - for _, s := range m.Addrs { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ResetReplicationRequest) SizeVT() (n int) { +func (m *ResetReplicationParametersRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResetReplicationResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ResetReplicationParametersRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *VReplicationExecRequest) SizeVT() (n int) { +func (m *ResetReplicationParametersRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *VReplicationExecResponse) SizeVT() (n int) { +func (m *ResetReplicationParametersResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VReplicationWaitForPosRequest) SizeVT() (n int) { +func (m *ResetReplicationParametersResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetReplicationParametersResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) - } - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *VReplicationWaitForPosResponse) SizeVT() (n int) { +func (m *FullStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *InitPrimaryRequest) SizeVT() (n int) { +func (m *FullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SemiSync { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *InitPrimaryResponse) SizeVT() (n int) { +func (m *FullStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PopulateReparentJournalRequest) SizeVT() (n int) { +func (m *FullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *FullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) - } - l = len(m.ActionName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PrimaryAlias != nil { - l = m.PrimaryAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ReplicationPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PopulateReparentJournalResponse) SizeVT() (n int) { +func (m *SetReplicationSourceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *InitReplicaRequest) SizeVT() (n int) { +func (m *SetReplicationSourceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ReplicationPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x22 + } + if m.ForceStartReplication { + i-- + if m.ForceStartReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) + i = encodeVarint(dAtA, i, uint64(m.TimeCreatedNs)) + i-- + dAtA[i] = 0x10 } - if m.SemiSync { - n += 2 + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *InitReplicaResponse) SizeVT() (n int) { +func (m *SetReplicationSourceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DemotePrimaryRequest) SizeVT() (n int) { +func (m *SetReplicationSourceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetReplicationSourceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *DemotePrimaryResponse) SizeVT() (n int) { +func (m *ReplicaWasRestartedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.PrimaryStatus != nil { - l = m.PrimaryStatus.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UndoDemotePrimaryRequest) SizeVT() (n int) { +func (m *ReplicaWasRestartedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicaWasRestartedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.SemiSync { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Parent != nil { + size, err := m.Parent.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *UndoDemotePrimaryResponse) SizeVT() (n int) { +func (m *ReplicaWasRestartedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReplicaWasPromotedRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *ReplicaWasRestartedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicaWasPromotedResponse) SizeVT() (n int) { +func (m *ReplicaWasRestartedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *ResetReplicationParametersRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ResetReplicationParametersResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *StopReplicationAndGetStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *FullStatusRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.StopReplicationMode != 0 { + i = encodeVarint(dAtA, i, uint64(m.StopReplicationMode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *FullStatusResponse) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetReplicationSourceRequest) SizeVT() (n int) { +func (m *StopReplicationAndGetStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationAndGetStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.TimeCreatedNs != 0 { - n += 1 + sov(uint64(m.TimeCreatedNs)) - } - if m.ForceStartReplication { - n += 2 - } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.SemiSync { - n += 2 + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetReplicationSourceResponse) SizeVT() (n int) { +func (m *PromoteReplicaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReplicaWasRestartedRequest) SizeVT() (n int) { +func (m *PromoteReplicaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PromoteReplicaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Parent != nil { - l = m.Parent.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.SemiSync { + i-- + if m.SemiSync { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ReplicaWasRestartedResponse) SizeVT() (n int) { +func (m *PromoteReplicaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *StopReplicationAndGetStatusRequest) SizeVT() (n int) { +func (m *PromoteReplicaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PromoteReplicaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.StopReplicationMode != 0 { - n += 1 + sov(uint64(m.StopReplicationMode)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) { +func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PromoteReplicaRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SemiSync { - n += 2 - } - n += len(m.unknownFields) - return n +func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PromoteReplicaResponse) SizeVT() (n int) { +func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *BackupRequest) SizeVT() (n int) { - if m == nil { - return 0 + if m.UpgradeSafe { + i-- + if m.UpgradeSafe { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - var l int - _ = l - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) + i-- + dAtA[i] = 0x1a } if m.AllowPrimary { - n += 2 + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - l = len(m.IncrementalFromPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *BackupResponse) SizeVT() (n int) { +func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) SizeVT() (n int) { +func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.BackupTime != nil { - l = m.BackupTime.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.RestoreToPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.DryRun { - n += 2 + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffRequest) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Workflow) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Action) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.RestoreToTimestamp != nil { + size, err := m.RestoreToTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - l = len(m.ActionArg) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - l = len(m.VdiffUuid) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.RestoreToPos) > 0 { + i -= len(m.RestoreToPos) + copy(dAtA[i:], m.RestoreToPos) + i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) + i-- + dAtA[i] = 0x12 } - if m.Options != nil { - l = m.Options.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.BackupTime != nil { + size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *VDiffResponse) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) - } - if m.Output != nil { - l = m.Output.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.VdiffUuid) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffPickerOptions) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TabletTypes) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.SourceCell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - l = len(m.TargetCell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *CreateVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *VDiffReportOptions) SizeVT() (n int) { +func (m *CreateVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.OnlyPks { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.DebugQuery { - n += 2 + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 } - l = len(m.Format) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 } - n += len(m.unknownFields) - return n + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.WorkflowSubType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowSubType)) + i-- + dAtA[i] = 0x38 + } + if m.WorkflowType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + i-- + dAtA[i] = 0x30 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x28 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.BinlogSource) > 0 { + for iNdEx := len(m.BinlogSource) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.BinlogSource[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *VDiffCoreOptions) SizeVT() (n int) { +func (m *CreateVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tables) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.AutoRetry { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) + return dAtA[:n], nil +} + +func (m *CreateVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if m.Checksum { - n += 2 + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.SamplePct != 0 { - n += 1 + sov(uint64(m.SamplePct)) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.TimeoutSeconds != 0 { - n += 1 + sov(uint64(m.TimeoutSeconds)) + return len(dAtA) - i, nil +} + +func (m *DeleteVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.MaxExtraRowsToCompare != 0 { - n += 1 + sov(uint64(m.MaxExtraRowsToCompare)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *VDiffOptions) SizeVT() (n int) { +func (m *DeleteVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.PickerOptions != nil { - l = m.PickerOptions.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.CoreOptions != nil { - l = m.CoreOptions.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } - if m.ReportOptions != nil { - l = m.ReportOptions.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *DeleteVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 +func (m *DeleteVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (m *DeleteVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) + +func (m *ReadVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + i-- + dAtA[i] = 0x72 + } + if m.TimeThrottled != nil { + size, err := m.TimeThrottled.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.TimeHeartbeat != nil { + size, err := m.TimeHeartbeat.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x58 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x52 + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x48 + } + if m.TransactionTimestamp != nil { + size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeUpdated != nil { + size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.MaxReplicationLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxReplicationLag)) + i-- + dAtA[i] = 0x30 + } + if m.MaxTps != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxTps)) + i-- + dAtA[i] = 0x28 + } + if len(m.StopPos) > 0 { + i -= len(m.StopPos) + copy(dAtA[i:], m.StopPos) + i = encodeVarint(dAtA, i, uint64(len(m.StopPos))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pos) > 0 { + i -= len(m.Pos) + copy(dAtA[i:], m.Pos) + i = encodeVarint(dAtA, i, uint64(len(m.Pos))) + i-- + dAtA[i] = 0x1a + } + if m.Bls != nil { + size, err := m.Bls.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.WorkflowSubType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowSubType)) + i-- + dAtA[i] = 0x48 + } + if m.WorkflowType != 0 { + i = encodeVarint(dAtA, i, uint64(m.WorkflowType)) + i-- + dAtA[i] = 0x40 + } + if len(m.Tags) > 0 { + i -= len(m.Tags) + copy(dAtA[i:], m.Tags) + i = encodeVarint(dAtA, i, uint64(len(m.Tags))) + i-- + dAtA[i] = 0x3a + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x32 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x28 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + i -= len(m.Cells) + copy(dAtA[i:], m.Cells) + i = encodeVarint(dAtA, i, uint64(len(m.Cells))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *VDiffRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.VdiffUuid) > 0 { + i -= len(m.VdiffUuid) + copy(dAtA[i:], m.VdiffUuid) + i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) + i-- + dAtA[i] = 0x2a + } + if len(m.ActionArg) > 0 { + i -= len(m.ActionArg) + copy(dAtA[i:], m.ActionArg) + i = encodeVarint(dAtA, i, uint64(len(m.ActionArg))) + i-- + dAtA[i] = 0x22 + } + if len(m.Action) > 0 { + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarint(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x1a + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.VdiffUuid) > 0 { + i -= len(m.VdiffUuid) + copy(dAtA[i:], m.VdiffUuid) + i = encodeVarint(dAtA, i, uint64(len(m.VdiffUuid))) + i-- + dAtA[i] = 0x1a + } + if m.Output != nil { + size, err := m.Output.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VDiffPickerOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffPickerOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffPickerOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.TargetCell) > 0 { + i -= len(m.TargetCell) + copy(dAtA[i:], m.TargetCell) + i = encodeVarint(dAtA, i, uint64(len(m.TargetCell))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceCell) > 0 { + i -= len(m.SourceCell) + copy(dAtA[i:], m.SourceCell) + i = encodeVarint(dAtA, i, uint64(len(m.SourceCell))) + i-- + dAtA[i] = 0x12 + } + if len(m.TabletTypes) > 0 { + i -= len(m.TabletTypes) + copy(dAtA[i:], m.TabletTypes) + i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffReportOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffReportOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Format) > 0 { + i -= len(m.Format) + copy(dAtA[i:], m.Format) + i = encodeVarint(dAtA, i, uint64(len(m.Format))) + i-- + dAtA[i] = 0x1a + } + if m.DebugQuery { + i-- + if m.DebugQuery { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.OnlyPks { + i-- + if m.OnlyPks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *VDiffCoreOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffCoreOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UpdateTableStats { + i-- + if m.UpdateTableStats { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.MaxExtraRowsToCompare != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxExtraRowsToCompare)) + i-- + dAtA[i] = 0x38 + } + if m.TimeoutSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x30 + } + if m.SamplePct != 0 { + i = encodeVarint(dAtA, i, uint64(m.SamplePct)) + i-- + dAtA[i] = 0x28 + } + if m.Checksum { + i-- + if m.Checksum { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if m.AutoRetry { + i-- + if m.AutoRetry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Tables) > 0 { + i -= len(m.Tables) + copy(dAtA[i:], m.Tables) + i = encodeVarint(dAtA, i, uint64(len(m.Tables))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VDiffOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VDiffOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *VDiffOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReportOptions != nil { + size, err := m.ReportOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.CoreOptions != nil { + size, err := m.CoreOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.PickerOptions != nil { + size, err := m.PickerOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if m.OnDdl != 0 { + i = encodeVarint(dAtA, i, uint64(m.OnDdl)) + i-- + dAtA[i] = 0x28 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x20 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1a + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSequencesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetSequencesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSequencesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetSequencesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *CheckThrottlerRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckThrottlerRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CheckThrottlerRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.AppName) > 0 { + i -= len(m.AppName) + copy(dAtA[i:], m.AppName) + i = encodeVarint(dAtA, i, uint64(len(m.AppName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CheckThrottlerResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckThrottlerResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CheckThrottlerResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.RecentlyChecked { + i-- + if m.RecentlyChecked { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if m.Threshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + i-- + dAtA[i] = 0x19 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.StatusCode != 0 { + i = encodeVarint(dAtA, i, uint64(m.StatusCode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TableDefinition) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Columns) > 0 { + for _, s := range m.Columns { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.PrimaryKeyColumns) > 0 { + for _, s := range m.PrimaryKeyColumns { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DataLength != 0 { + n += 1 + sov(uint64(m.DataLength)) + } + if m.RowCount != 0 { + n += 1 + sov(uint64(m.RowCount)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaDefinition) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DatabaseSchema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TableDefinitions) > 0 { + for _, e := range m.TableDefinitions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaChangeResult) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UserPermission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PasswordChecksum != 0 { + n += 1 + sov(uint64(m.PasswordChecksum)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *DbPermission) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Db) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Privileges) > 0 { + for k, v := range m.Privileges { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Permissions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UserPermissions) > 0 { + for _, e := range m.UserPermissions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.DbPermissions) > 0 { + for _, e := range m.DbPermissions { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PingRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PingResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Duration != 0 { + n += 1 + sov(uint64(m.Duration)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Parameters) > 0 { + for _, s := range m.Parameters { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExtraEnv) > 0 { + for k, v := range m.ExtraEnv { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExitStatus != 0 { + n += 1 + sov(uint64(m.ExitStatus)) + } + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.TableSchemaOnly { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SchemaDefinition != nil { + l = m.SchemaDefinition.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Permissions != nil { + l = m.Permissions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReadOnlyRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadOnlyResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadWriteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetReadWriteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ChangeTypeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTypeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PreflightSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Changes) > 0 { + for _, s := range m.Changes { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PreflightSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChangeResults) > 0 { + for _, e := range m.ChangeResults { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowReplication { + n += 2 + } + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.SqlMode) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.BatchSize != 0 { + n += 1 + sov(uint64(m.BatchSize)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeSchema != nil { + l = m.BeforeSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterSchema != nil { + l = m.AfterSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LockTablesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *LockTablesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *UnlockTablesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *UnlockTablesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ExecuteQueryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.CallerId != nil { + l = m.CallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteQueryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAllPrivsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReplicationStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicationStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PrimaryStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PrimaryStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PrimaryPositionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PrimaryPositionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WaitForPositionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WaitForPositionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationMinimumRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sov(uint64(m.WaitTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationMinimumResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationUntilAfterRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitTimeout != 0 { + n += 1 + sov(uint64(m.WaitTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationUntilAfterResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetReplicasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetReplicasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addrs) > 0 { + for _, s := range m.Addrs { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *VReplicationExecRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationExecResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationWaitForPosRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VReplicationWaitForPosResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *InitPrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *InitPrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PopulateReparentJournalRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + l = len(m.ActionName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PrimaryAlias != nil { + l = m.PrimaryAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PopulateReparentJournalResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *InitReplicaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ReplicationPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *InitReplicaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DemotePrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DemotePrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrimaryStatus != nil { + l = m.PrimaryStatus.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UndoDemotePrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *UndoDemotePrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasPromotedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasPromotedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationParametersRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ResetReplicationParametersResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FullStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *FullStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicationSourceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeCreatedNs != 0 { + n += 1 + sov(uint64(m.TimeCreatedNs)) + } + if m.ForceStartReplication { + n += 2 + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetReplicationSourceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasRestartedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parent != nil { + l = m.Parent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReplicaWasRestartedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationAndGetStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StopReplicationMode != 0 { + n += 1 + sov(uint64(m.StopReplicationMode)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationAndGetStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PromoteReplicaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SemiSync { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *PromoteReplicaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + if m.AllowPrimary { + n += 2 + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UpgradeSafe { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *BackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BackupTime != nil { + l = m.BackupTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.RestoreToPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.RestoreToTimestamp != nil { + l = m.RestoreToTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.BinlogSource) > 0 { + for _, e := range m.BinlogSource { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.WorkflowType != 0 { + n += 1 + sov(uint64(m.WorkflowType)) + } + if m.WorkflowSubType != 0 { + n += 1 + sov(uint64(m.WorkflowSubType)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.AutoStart { + n += 2 + } + if m.StopAfterCopy { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CreateVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowResponse_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Bls != nil { + l = m.Bls.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Pos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StopPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxTps != 0 { + n += 1 + sov(uint64(m.MaxTps)) + } + if m.MaxReplicationLag != 0 { + n += 1 + sov(uint64(m.MaxReplicationLag)) + } + if m.TimeUpdated != nil { + l = m.TimeUpdated.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TransactionTimestamp != nil { + l = m.TransactionTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.RowsCopied != 0 { + n += 1 + sov(uint64(m.RowsCopied)) + } + if m.TimeHeartbeat != nil { + l = m.TimeHeartbeat.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeThrottled != nil { + l = m.TimeThrottled.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.ComponentThrottled) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cells) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Tags) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WorkflowType != 0 { + n += 1 + sov(uint64(m.WorkflowType)) + } + if m.WorkflowSubType != 0 { + n += 1 + sov(uint64(m.WorkflowSubType)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Action) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ActionArg) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.VdiffUuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Output != nil { + l = m.Output.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.VdiffUuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffPickerOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TabletTypes) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceCell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetCell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffReportOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OnlyPks { + n += 2 + } + if m.DebugQuery { + n += 2 + } + l = len(m.Format) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffCoreOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tables) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AutoRetry { + n += 2 + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.Checksum { + n += 2 + } + if m.SamplePct != 0 { + n += 1 + sov(uint64(m.SamplePct)) + } + if m.TimeoutSeconds != 0 { + n += 1 + sov(uint64(m.TimeoutSeconds)) + } + if m.MaxExtraRowsToCompare != 0 { + n += 1 + sov(uint64(m.MaxExtraRowsToCompare)) + } + if m.UpdateTableStats { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *VDiffOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PickerOptions != nil { + l = m.PickerOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CoreOptions != nil { + l = m.CoreOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ReportOptions != nil { + l = m.ReportOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateVReplicationWorkflowRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.OnDdl != 0 { + n += 1 + sov(uint64(m.OnDdl)) + } + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateVReplicationWorkflowResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *CheckThrottlerRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AppName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CheckThrottlerResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StatusCode != 0 { + n += 1 + sov(uint64(m.StatusCode)) + } + if m.Value != 0 { + n += 9 + } + if m.Threshold != 0 { + n += 9 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.RecentlyChecked { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyColumns", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrimaryKeyColumns = append(m.PrimaryKeyColumns, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + } + m.DataLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataLength |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + } + m.RowCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &query.Field{}) + if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaDefinition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatabaseSchema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatabaseSchema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableDefinitions = append(m.TableDefinitions, &TableDefinition{}) + if err := m.TableDefinitions[len(m.TableDefinitions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaChangeResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaChangeResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserPermission) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordChecksum", wireType) + } + m.PasswordChecksum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PasswordChecksum |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DbPermission) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DbPermission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DbPermission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Db", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Db = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Privileges[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Permissions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Permissions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Permissions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserPermissions = append(m.UserPermissions, &UserPermission{}) + if err := m.UserPermissions[len(m.UserPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbPermissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbPermissions = append(m.DbPermissions, &DbPermission{}) + if err := m.DbPermissions[len(m.DbPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SleepResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SleepResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExtraEnv == nil { + m.ExtraEnv = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExtraEnv[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) + } + m.ExitStatus = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitStatus |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TableSchemaOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaDefinition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SchemaDefinition == nil { + m.SchemaDefinition = &SchemaDefinition{} + } + if err := m.SchemaDefinition.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Permissions == nil { + m.Permissions = &Permissions{} + } + if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadOnlyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadOnlyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -6178,113 +11929,170 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TableDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: SetReadOnlyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TableDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetReadOnlyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadWriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Schema = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Columns", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetReadWriteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetReadWriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Columns = append(m.Columns, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryKeyColumns", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTypeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) } - var stringLen uint64 + m.TabletType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6294,29 +12102,16 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.TabletType |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PrimaryKeyColumns = append(m.PrimaryKeyColumns, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6326,96 +12121,267 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + m.SemiSync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DataLength", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.DataLength = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DataLength |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RowCount", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.RowCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RowCount |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, &query.Field{}) - if err := m.Fields[len(m.Fields)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6438,7 +12404,7 @@ func (m *TableDefinition) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6461,15 +12427,15 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SchemaDefinition: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaDefinition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatabaseSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6497,74 +12463,59 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DatabaseSchema = string(dAtA[iNdEx:postIndex]) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableDefinitions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.TableDefinitions = append(m.TableDefinitions, &TableDefinition{}) - if err := m.TableDefinitions[len(m.TableDefinitions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6587,7 +12538,7 @@ func (m *SchemaDefinition) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { +func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6610,17 +12561,17 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SchemaChangeResult: wiretype end group for non-group") + return fmt.Errorf("proto: PreflightSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SchemaChangeResult: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PreflightSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6630,31 +12581,78 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} - } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Changes = append(m.Changes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 2: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreflightSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreflightSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ChangeResults", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6681,10 +12679,8 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} - } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.ChangeResults = append(m.ChangeResults, &SchemaChangeResult{}) + if err := m.ChangeResults[len(m.ChangeResults)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -6710,7 +12706,7 @@ func (m *SchemaChangeResult) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UserPermission) UnmarshalVT(dAtA []byte) error { +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6733,15 +12729,15 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UserPermission: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UserPermission: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6769,13 +12765,13 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Sql = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6785,29 +12781,17 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Force = bool(v != 0) case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordChecksum", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowReplication", wireType) } - m.PasswordChecksum = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -6817,14 +12801,15 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PasswordChecksum |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.AllowReplication = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6851,160 +12836,18 @@ func (m *UserPermission) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Privileges == nil { - m.Privileges = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} } - m.Privileges[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DbPermission) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DbPermission: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DbPermission: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7014,59 +12857,31 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Db", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Db = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SqlMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7094,13 +12909,13 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.SqlMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BatchSize", wireType) } - var msglen int + m.BatchSize = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7110,119 +12925,11 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.BatchSize |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Privileges == nil { - m.Privileges = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Privileges[mapkey] = mapvalue - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7245,7 +12952,7 @@ func (m *DbPermission) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Permissions) UnmarshalVT(dAtA []byte) error { +func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7268,15 +12975,15 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Permissions: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Permissions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserPermissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7303,14 +13010,16 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.UserPermissions = append(m.UserPermissions, &UserPermission{}) - if err := m.UserPermissions[len(m.UserPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.BeforeSchema == nil { + m.BeforeSchema = &SchemaDefinition{} + } + if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbPermissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7337,8 +13046,10 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbPermissions = append(m.DbPermissions, &DbPermission{}) - if err := m.DbPermissions[len(m.DbPermissions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.AfterSchema == nil { + m.AfterSchema = &SchemaDefinition{} + } + if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -7364,7 +13075,7 @@ func (m *Permissions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingRequest) UnmarshalVT(dAtA []byte) error { +func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7387,44 +13098,12 @@ func (m *PingRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LockTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7447,7 +13126,7 @@ func (m *PingRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingResponse) UnmarshalVT(dAtA []byte) error { +func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7470,44 +13149,12 @@ func (m *PingResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LockTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7530,7 +13177,7 @@ func (m *PingResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { +func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7553,31 +13200,12 @@ func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UnlockTablesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UnlockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - m.Duration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Duration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -7600,7 +13228,7 @@ func (m *SleepRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { +func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7623,10 +13251,10 @@ func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UnlockTablesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UnlockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -7651,7 +13279,7 @@ func (m *SleepResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7674,17 +13302,17 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteQueryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7694,27 +13322,29 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7742,11 +13372,30 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + m.DbName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraEnv", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7773,103 +13422,12 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExtraEnv == nil { - m.ExtraEnv = make(map[string]string) + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ExtraEnv[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -7893,7 +13451,7 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7916,36 +13474,17 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteQueryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -7955,55 +13494,27 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.Result == nil { + m.Result = &query.QueryResult{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Stderr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8027,7 +13538,7 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8040,25 +13551,59 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8086,13 +13631,13 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + m.DbName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - var v int + m.MaxRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8102,17 +13647,16 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.MaxRows |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeViews = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8122,27 +13666,15 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: + m.DisableBinlogs = bool(v != 0) + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -8159,7 +13691,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.TableSchemaOnly = bool(v != 0) + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8182,7 +13714,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8205,15 +13737,15 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaDefinition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8240,10 +13772,10 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SchemaDefinition == nil { - m.SchemaDefinition = &SchemaDefinition{} + if m.Result == nil { + m.Result = &query.QueryResult{} } - if err := m.SchemaDefinition.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -8269,7 +13801,7 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8292,68 +13824,51 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8363,28 +13878,63 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Permissions == nil { - m.Permissions = &Permissions{} + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8407,7 +13957,7 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8430,12 +13980,48 @@ func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadOnlyRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadOnlyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8458,7 +14044,7 @@ func (m *SetReadOnlyRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8481,12 +14067,65 @@ func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadOnlyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadOnlyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8509,7 +14148,7 @@ func (m *SetReadOnlyResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8532,12 +14171,48 @@ func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadWriteRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8560,7 +14235,7 @@ func (m *SetReadWriteRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8583,10 +14258,10 @@ func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReadWriteResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReadWriteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -8611,7 +14286,7 @@ func (m *SetReadWriteResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8634,17 +14309,17 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTypeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicationStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.TabletType = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -8654,31 +14329,28 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - m.SemiSync = bool(v != 0) + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.Status{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8701,7 +14373,7 @@ func (m *ChangeTypeRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8724,10 +14396,10 @@ func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTypeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -8752,7 +14424,7 @@ func (m *ChangeTypeResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8775,12 +14447,48 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.PrimaryStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8803,7 +14511,7 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8826,10 +14534,10 @@ func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryPositionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -8854,7 +14562,7 @@ func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { +func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8877,63 +14585,44 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PrimaryPositionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PrimaryPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -8956,7 +14645,7 @@ func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8979,15 +14668,15 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WaitForPositionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WaitForPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9015,7 +14704,7 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9039,7 +14728,7 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9062,10 +14751,10 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WaitForPositionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WaitForPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9090,7 +14779,7 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9113,44 +14802,12 @@ func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreflightSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreflightSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Changes = append(m.Changes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9173,7 +14830,7 @@ func (m *PreflightSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9196,46 +14853,12 @@ func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PreflightSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PreflightSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChangeResults", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChangeResults = append(m.ChangeResults, &SchemaChangeResult{}) - if err := m.ChangeResults[len(m.ChangeResults)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9258,7 +14881,7 @@ func (m *PreflightSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9281,15 +14904,15 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationMinimumRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationMinimumRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9317,13 +14940,13 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Sql = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) } - var v int + m.WaitTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9333,37 +14956,67 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.WaitTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowReplication", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.AllowReplication = bool(v != 0) - case 4: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopReplicationMinimumResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopReplicationMinimumResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9373,69 +15026,80 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} - } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SqlMode", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var stringLen uint64 + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9445,24 +15109,63 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.SemiSync = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.SqlMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9485,7 +15188,7 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9508,17 +15211,17 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationUntilAfterRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9528,33 +15231,29 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeforeSchema == nil { - m.BeforeSchema = &SchemaDefinition{} - } - if err := m.BeforeSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterSchema", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) } - var msglen int + m.WaitTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -9564,28 +15263,62 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.WaitTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.AfterSchema == nil { - m.AfterSchema = &SchemaDefinition{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.AfterSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartReplicationUntilAfterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9608,7 +15341,7 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9631,10 +15364,10 @@ func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LockTablesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetReplicasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetReplicasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9659,7 +15392,7 @@ func (m *LockTablesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9682,12 +15415,44 @@ func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LockTablesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetReplicasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetReplicasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addrs = append(m.Addrs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -9710,7 +15475,7 @@ func (m *LockTablesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9733,10 +15498,10 @@ func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UnlockTablesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockTablesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9761,7 +15526,7 @@ func (m *UnlockTablesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9784,10 +15549,10 @@ func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UnlockTablesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UnlockTablesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -9812,7 +15577,7 @@ func (m *UnlockTablesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9835,50 +15600,16 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteQueryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationExecRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteQueryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) - } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -9905,62 +15636,7 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) - } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CallerId == nil { - m.CallerId = &vtrpc.CallerID{} - } - if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Query = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9984,7 +15660,7 @@ func (m *ExecuteQueryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10007,10 +15683,10 @@ func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteQueryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationExecResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteQueryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -10071,7 +15747,7 @@ func (m *ExecuteQueryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { +func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10094,17 +15770,17 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VReplicationWaitForPosRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VReplicationWaitForPosRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - var byteLen int + m.Id = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10114,29 +15790,14 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.Id |= int32(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10164,50 +15825,113 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VReplicationWaitForPosResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VReplicationWaitForPosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.DisableBinlogs = bool(v != 0) - case 5: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitPrimaryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -10224,7 +15948,7 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.ReloadSchema = bool(v != 0) + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10247,7 +15971,7 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10270,17 +15994,17 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitPrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10290,27 +16014,23 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Position = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10334,7 +16054,7 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { +func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10357,17 +16077,17 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PopulateReparentJournalRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PopulateReparentJournalRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) } - var byteLen int + m.TimeCreatedNs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10377,29 +16097,14 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + m.TimeCreatedNs |= int64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10427,13 +16132,13 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) + m.ActionName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryAlias", wireType) } - m.MaxRows = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10443,16 +16148,33 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrimaryAlias == nil { + m.PrimaryAlias = &topodata.TabletAlias{} + } + if err := m.PrimaryAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10462,12 +16184,24 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ReloadSchema = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10490,7 +16224,7 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { +func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10513,48 +16247,12 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PopulateReparentJournalResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PopulateReparentJournalResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10577,7 +16275,7 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { +func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10600,17 +16298,17 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + return fmt.Errorf("proto: InitReplicaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10620,31 +16318,33 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) } - m.MaxRows = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10654,67 +16354,29 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) } - var msglen int + m.TimeCreatedNs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -10724,79 +16386,31 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TimeCreatedNs |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10819,7 +16433,7 @@ func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitReplicaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10842,48 +16456,12 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicationStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitReplicaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &replicationdata.Status{} - } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -10906,7 +16484,7 @@ func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10929,10 +16507,10 @@ func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DemotePrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -10957,7 +16535,7 @@ func (m *PrimaryStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10980,15 +16558,15 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DemotePrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11015,10 +16593,10 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.PrimaryStatus{} + if m.PrimaryStatus == nil { + m.PrimaryStatus = &replicationdata.PrimaryStatus{} } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PrimaryStatus.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11044,7 +16622,7 @@ func (m *PrimaryStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { +func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11067,12 +16645,32 @@ func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryPositionRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UndoDemotePrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UndoDemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11095,7 +16693,7 @@ func (m *PrimaryPositionRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { +func (m *UndoDemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11118,44 +16716,12 @@ func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PrimaryPositionResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UndoDemotePrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PrimaryPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UndoDemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11178,7 +16744,7 @@ func (m *PrimaryPositionResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasPromotedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11201,44 +16767,12 @@ func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WaitForPositionRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitForPositionRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + return fmt.Errorf("proto: ReplicaWasPromotedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaWasPromotedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11261,7 +16795,7 @@ func (m *WaitForPositionRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasPromotedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11284,10 +16818,10 @@ func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WaitForPositionResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasPromotedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WaitForPositionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasPromotedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11312,7 +16846,7 @@ func (m *WaitForPositionResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11335,10 +16869,10 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationParametersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationParametersRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11363,7 +16897,7 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11386,10 +16920,10 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ResetReplicationParametersResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetReplicationParametersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11414,7 +16948,7 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { +func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11437,63 +16971,12 @@ func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationMinimumRequest: wiretype end group for non-group") + return fmt.Errorf("proto: FullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationMinimumRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) - } - m.WaitTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WaitTimeout |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11516,7 +16999,7 @@ func (m *StopReplicationMinimumRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { +func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11539,17 +17022,17 @@ func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationMinimumResponse: wiretype end group for non-group") + return fmt.Errorf("proto: FullStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationMinimumResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11559,23 +17042,27 @@ func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + if m.Status == nil { + m.Status = &replicationdata.FullStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11599,7 +17086,7 @@ func (m *StopReplicationMinimumResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11622,17 +17109,17 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetReplicationSourceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetReplicationSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11642,117 +17129,70 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.TimeCreatedNs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeCreatedNs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceStartReplication", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartReplicationUntilAfterRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationUntilAfterRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.ForceStartReplication = bool(v != 0) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11780,13 +17220,13 @@ func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - m.WaitTimeout = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11796,11 +17236,12 @@ func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.WaitTimeout |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -11823,58 +17264,7 @@ func (m *StartReplicationUntilAfterRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationUntilAfterResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartReplicationUntilAfterResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationUntilAfterResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetReplicationSourceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11897,10 +17287,10 @@ func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetReplicasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetReplicationSourceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetReplicasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetReplicationSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -11925,7 +17315,7 @@ func (m *GetReplicasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11948,17 +17338,17 @@ func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetReplicasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasRestartedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetReplicasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasRestartedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -11968,23 +17358,27 @@ func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Addrs = append(m.Addrs, string(dAtA[iNdEx:postIndex])) + if m.Parent == nil { + m.Parent = &topodata.TabletAlias{} + } + if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12008,7 +17402,7 @@ func (m *GetReplicasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReplicaWasRestartedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12031,10 +17425,10 @@ func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReplicaWasRestartedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReplicaWasRestartedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -12059,7 +17453,7 @@ func (m *ResetReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12082,12 +17476,31 @@ func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopReplicationMode", wireType) + } + m.StopReplicationMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StopReplicationMode |= replicationdata.StopReplicationMode(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12110,7 +17523,7 @@ func (m *ResetReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12133,17 +17546,17 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationExecRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12153,23 +17566,27 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + if m.Status == nil { + m.Status = &replicationdata.StopReplicationStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12193,7 +17610,7 @@ func (m *VReplicationExecRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { +func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12216,17 +17633,17 @@ func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationExecResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PromoteReplicaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PromoteReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12236,28 +17653,12 @@ func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.SemiSync = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12280,7 +17681,7 @@ func (m *VReplicationExecResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { +func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12303,32 +17704,13 @@ func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VReplicationWaitForPosRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PromoteReplicaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationWaitForPosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PromoteReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } @@ -12342,75 +17724,24 @@ func (m *VReplicationWaitForPosRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VReplicationWaitForPosResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VReplicationWaitForPosResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12433,7 +17764,7 @@ func (m *VReplicationWaitForPosResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12456,15 +17787,34 @@ func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitPrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -12481,7 +17831,59 @@ func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { break } } - m.SemiSync = bool(v != 0) + m.AllowPrimary = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpgradeSafe = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12504,7 +17906,7 @@ func (m *InitPrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12527,17 +17929,17 @@ func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitPrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12547,23 +17949,27 @@ func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12587,7 +17993,7 @@ func (m *InitPrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12610,17 +18016,17 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PopulateReparentJournalRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PopulateReparentJournalRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) } - m.TimeCreatedNs = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12630,14 +18036,31 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BackupTime == nil { + m.BackupTime = &vttime.Time{} + } + if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12665,13 +18088,13 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ActionName = string(dAtA[iNdEx:postIndex]) + m.RestoreToPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryAlias", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12681,33 +18104,17 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PrimaryAlias == nil { - m.PrimaryAlias = &topodata.TabletAlias{} - } - if err := m.PrimaryAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.DryRun = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToTimestamp", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12717,23 +18124,27 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + if m.RestoreToTimestamp == nil { + m.RestoreToTimestamp = &vttime.Time{} + } + if err := m.RestoreToTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12757,7 +18168,7 @@ func (m *PopulateReparentJournalRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12770,22 +18181,58 @@ func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PopulateReparentJournalResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PopulateReparentJournalResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12808,7 +18255,7 @@ func (m *PopulateReparentJournalResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { +func (m *CreateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12831,15 +18278,47 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitReplicaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12866,16 +18345,14 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} - } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.BinlogSource = append(m.BinlogSource, &binlogdata.BinlogSource{}) + if err := m.BinlogSource[len(m.BinlogSource)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12903,13 +18380,82 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ReplicationPosition = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - m.TimeCreatedNs = 0 + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12919,16 +18465,16 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - case 4: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) } - var v int + m.WorkflowType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12938,114 +18484,90 @@ func (m *InitReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitReplicaResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitReplicaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + m.WorkflowSubType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.DeferSecondaryKeys = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.AutoStart = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DemotePrimaryRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13068,7 +18590,7 @@ func (m *DemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *CreateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13091,15 +18613,15 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DemotePrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: CreateVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13126,10 +18648,10 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrimaryStatus == nil { - m.PrimaryStatus = &replicationdata.PrimaryStatus{} + if m.Result == nil { + m.Result = &query.QueryResult{} } - if err := m.PrimaryStatus.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13155,7 +18677,7 @@ func (m *DemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *DeleteVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13178,17 +18700,17 @@ func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UndoDemotePrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UndoDemotePrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13198,216 +18720,24 @@ func (m *UndoDemotePrimaryRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UndoDemotePrimaryResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UndoDemotePrimaryResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UndoDemotePrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasPromotedRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasPromotedRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasPromotedRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasPromotedResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasPromotedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasPromotedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationParametersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationParametersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13430,7 +18760,7 @@ func (m *ResetReplicationParametersRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { +func (m *DeleteVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13453,12 +18783,48 @@ func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResetReplicationParametersResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResetReplicationParametersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13481,7 +18847,7 @@ func (m *ResetReplicationParametersResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13504,12 +18870,44 @@ func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13532,7 +18930,7 @@ func (m *FullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13555,15 +18953,34 @@ func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FullStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Bls", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13590,67 +19007,118 @@ func (m *FullStatusResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.FullStatus{} + if m.Bls == nil { + m.Bls = &binlogdata.BinlogSource{} } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Bls.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.Pos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPos", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.StopPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTps", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicationSourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicationSourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.MaxTps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTps |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLag", wireType) + } + m.MaxReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13677,18 +19145,18 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeCreatedNs", wireType) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) } - m.TimeCreatedNs = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13698,16 +19166,33 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeCreatedNs |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 3: + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceStartReplication", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var v int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13717,15 +19202,14 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift if b < 0x80 { break } } - m.ForceStartReplication = bool(v != 0) - case 4: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13753,13 +19237,13 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 11: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) } - var v int + m.RowsCopied = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13769,117 +19253,14 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.RowsCopied |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicationSourceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicationSourceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicationSourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasRestartedRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasRestartedRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TimeHeartbeat", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13906,120 +19287,54 @@ func (m *ReplicaWasRestartedRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Parent == nil { - m.Parent = &topodata.TabletAlias{} + if m.TimeHeartbeat == nil { + m.TimeHeartbeat = &vttime.Time{} } - if err := m.Parent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TimeHeartbeat.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaWasRestartedResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeThrottled", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaWasRestartedResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaWasRestartedResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.TimeThrottled == nil { + m.TimeThrottled = &vttime.Time{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.TimeThrottled.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationAndGetStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopReplicationMode", wireType) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) } - m.StopReplicationMode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14029,11 +19344,24 @@ func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StopReplicationMode |= replicationdata.StopReplicationMode(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14056,7 +19384,7 @@ func (m *StopReplicationAndGetStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14079,17 +19407,17 @@ func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationAndGetStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14099,84 +19427,271 @@ func (m *StopReplicationAndGetStatusResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.StopReplicationStatus{} + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } - if iNdEx >= l { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Tags = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PromoteReplicaRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PromoteReplicaRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.WorkflowType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SemiSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + } + m.WorkflowSubType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - var v int + m.DeferSecondaryKeys = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14186,12 +19701,26 @@ func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.SemiSync = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, &ReadVReplicationWorkflowResponse_Stream{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14214,7 +19743,7 @@ func (m *PromoteReplicaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14237,15 +19766,15 @@ func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PromoteReplicaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PromoteReplicaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14273,64 +19802,45 @@ func (m *PromoteReplicaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } - m.Concurrency = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14340,16 +19850,29 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14359,15 +19882,27 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.AllowPrimary = bool(v != 0) - case 3: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActionArg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14395,7 +19930,43 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &VDiffOptions{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14419,7 +19990,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14442,15 +20013,34 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14477,13 +20067,45 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.Output == nil { + m.Output = &query.QueryResult{} } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14506,7 +20128,7 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14529,17 +20151,17 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14549,31 +20171,27 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BackupTime == nil { - m.BackupTime = &vttime.Time{} - } - if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TabletTypes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14601,13 +20219,13 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RestoreToPos = string(dAtA[iNdEx:postIndex]) + m.SourceCell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14617,12 +20235,24 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.DryRun = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetCell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14645,7 +20275,7 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14668,17 +20298,57 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OnlyPks = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DebugQuery = bool(v != 0) + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14688,27 +20358,23 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} - } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -14732,7 +20398,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14755,15 +20421,15 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14791,13 +20457,13 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Tables = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14807,29 +20473,17 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.AutoRetry = bool(v != 0) case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - var stringLen uint64 + m.MaxRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14839,29 +20493,16 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.MaxRows |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Action = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14871,29 +20512,17 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ActionArg = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Checksum = bool(v != 0) case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) } - var stringLen uint64 + m.SamplePct = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14903,29 +20532,16 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.SamplePct |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - var msglen int + m.TimeoutSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14935,28 +20551,50 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.TimeoutSeconds |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.MaxExtraRowsToCompare = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if m.Options == nil { - m.Options = &VDiffOptions{} + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.UpdateTableStats = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14979,7 +20617,7 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15002,17 +20640,17 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) } - m.Id = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15022,14 +20660,31 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PickerOptions == nil { + m.PickerOptions = &VDiffPickerOptions{} + } + if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15056,18 +20711,18 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Output == nil { - m.Output = &query.QueryResult{} + if m.CoreOptions == nil { + m.CoreOptions = &VDiffCoreOptions{} } - if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15077,23 +20732,27 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + if m.ReportOptions == nil { + m.ReportOptions = &VDiffReportOptions{} + } + if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15117,7 +20776,7 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15140,15 +20799,15 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15176,11 +20835,11 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15208,13 +20867,82 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceCell = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - var stringLen uint64 + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15224,24 +20952,49 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.OnDdl = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OnDdl |= binlogdata.OnDDLAction(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } } - m.TargetCell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15264,7 +21017,7 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15287,17 +21040,17 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15307,35 +21060,82 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.OnlyPks = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - m.DebugQuery = bool(v != 0) - case 3: + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetSequencesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetSequencesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetSequencesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15363,7 +21163,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Format = string(dAtA[iNdEx:postIndex]) + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -15387,7 +21187,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { +func (m *ResetSequencesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15410,15 +21210,66 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") + return fmt.Errorf("proto: ResetSequencesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResetSequencesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckThrottlerRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckThrottlerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckThrottlerRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15446,124 +21297,8 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = string(dAtA[iNdEx:postIndex]) + m.AppName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AutoRetry = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) - } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Checksum = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) - } - m.SamplePct = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SamplePct |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - m.TimeoutSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimeoutSeconds |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) - } - m.MaxExtraRowsToCompare = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15586,7 +21321,7 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { +func (m *CheckThrottlerResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15609,17 +21344,17 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") + return fmt.Errorf("proto: CheckThrottlerResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CheckThrottlerResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType) } - var msglen int + m.StatusCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15629,33 +21364,38 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.StatusCode |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - if postIndex > l { + var v uint64 + if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - if m.PickerOptions == nil { - m.PickerOptions = &VDiffPickerOptions{} + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) } - if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 2: + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Threshold = float64(math.Float64frombits(v)) + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15665,33 +21405,29 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CoreOptions == nil { - m.CoreOptions = &VDiffCoreOptions{} - } - if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15701,28 +21437,44 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReportOptions == nil { - m.ReportOptions = &VDiffReportOptions{} + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecentlyChecked", wireType) } - if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.RecentlyChecked = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index 6002acc5c35..608282049ba 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: tabletmanagerservice.proto @@ -45,7 +45,7 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xc6, 0x26, 0x0a, 0x0d, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xca, 0x2c, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, @@ -124,241 +124,289 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, - 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, - 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x12, 0x2b, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, - 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, - 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, + 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, + 0x0a, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x0c, 0x55, 0x6e, + 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, + 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, + 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x26, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, - 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, + 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x44, 0x62, 0x61, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, - 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, - 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, - 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, + 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, + 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, + 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x8b, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, + 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, 0x0a, 0x18, + 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, + 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, + 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x12, 0x30, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x12, - 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, - 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, + 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x82, + 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x31, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, + 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, - 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x82, 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, - 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x55, 0x6e, 0x64, + 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, - 0x0a, 0x11, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x73, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, - 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x79, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x12, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, - 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x12, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, + 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, - 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, - 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, - 0x12, 0x72, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, - 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, + 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x14, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8e, + 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x72, 0x0a, 0x11, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x67, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x72, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, 0x5a, 0x31, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_tabletmanagerservice_proto_goTypes = []interface{}{ @@ -375,184 +423,208 @@ var file_tabletmanagerservice_proto_goTypes = []interface{}{ (*tabletmanagerdata.ReloadSchemaRequest)(nil), // 10: tabletmanagerdata.ReloadSchemaRequest (*tabletmanagerdata.PreflightSchemaRequest)(nil), // 11: tabletmanagerdata.PreflightSchemaRequest (*tabletmanagerdata.ApplySchemaRequest)(nil), // 12: tabletmanagerdata.ApplySchemaRequest - (*tabletmanagerdata.LockTablesRequest)(nil), // 13: tabletmanagerdata.LockTablesRequest - (*tabletmanagerdata.UnlockTablesRequest)(nil), // 14: tabletmanagerdata.UnlockTablesRequest - (*tabletmanagerdata.ExecuteQueryRequest)(nil), // 15: tabletmanagerdata.ExecuteQueryRequest - (*tabletmanagerdata.ExecuteFetchAsDbaRequest)(nil), // 16: tabletmanagerdata.ExecuteFetchAsDbaRequest - (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 17: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 18: tabletmanagerdata.ExecuteFetchAsAppRequest - (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 19: tabletmanagerdata.ReplicationStatusRequest - (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 20: tabletmanagerdata.PrimaryStatusRequest - (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 21: tabletmanagerdata.PrimaryPositionRequest - (*tabletmanagerdata.WaitForPositionRequest)(nil), // 22: tabletmanagerdata.WaitForPositionRequest - (*tabletmanagerdata.StopReplicationRequest)(nil), // 23: tabletmanagerdata.StopReplicationRequest - (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 24: tabletmanagerdata.StopReplicationMinimumRequest - (*tabletmanagerdata.StartReplicationRequest)(nil), // 25: tabletmanagerdata.StartReplicationRequest - (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 26: tabletmanagerdata.StartReplicationUntilAfterRequest - (*tabletmanagerdata.GetReplicasRequest)(nil), // 27: tabletmanagerdata.GetReplicasRequest - (*tabletmanagerdata.VReplicationExecRequest)(nil), // 28: tabletmanagerdata.VReplicationExecRequest - (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 29: tabletmanagerdata.VReplicationWaitForPosRequest - (*tabletmanagerdata.VDiffRequest)(nil), // 30: tabletmanagerdata.VDiffRequest - (*tabletmanagerdata.ResetReplicationRequest)(nil), // 31: tabletmanagerdata.ResetReplicationRequest - (*tabletmanagerdata.InitPrimaryRequest)(nil), // 32: tabletmanagerdata.InitPrimaryRequest - (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 33: tabletmanagerdata.PopulateReparentJournalRequest - (*tabletmanagerdata.InitReplicaRequest)(nil), // 34: tabletmanagerdata.InitReplicaRequest - (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 35: tabletmanagerdata.DemotePrimaryRequest - (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 36: tabletmanagerdata.UndoDemotePrimaryRequest - (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 37: tabletmanagerdata.ReplicaWasPromotedRequest - (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 38: tabletmanagerdata.ResetReplicationParametersRequest - (*tabletmanagerdata.FullStatusRequest)(nil), // 39: tabletmanagerdata.FullStatusRequest - (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 40: tabletmanagerdata.SetReplicationSourceRequest - (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 41: tabletmanagerdata.ReplicaWasRestartedRequest - (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 42: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 43: tabletmanagerdata.PromoteReplicaRequest - (*tabletmanagerdata.BackupRequest)(nil), // 44: tabletmanagerdata.BackupRequest - (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 45: tabletmanagerdata.RestoreFromBackupRequest - (*tabletmanagerdata.PingResponse)(nil), // 46: tabletmanagerdata.PingResponse - (*tabletmanagerdata.SleepResponse)(nil), // 47: tabletmanagerdata.SleepResponse - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 48: tabletmanagerdata.ExecuteHookResponse - (*tabletmanagerdata.GetSchemaResponse)(nil), // 49: tabletmanagerdata.GetSchemaResponse - (*tabletmanagerdata.GetPermissionsResponse)(nil), // 50: tabletmanagerdata.GetPermissionsResponse - (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 51: tabletmanagerdata.SetReadOnlyResponse - (*tabletmanagerdata.SetReadWriteResponse)(nil), // 52: tabletmanagerdata.SetReadWriteResponse - (*tabletmanagerdata.ChangeTypeResponse)(nil), // 53: tabletmanagerdata.ChangeTypeResponse - (*tabletmanagerdata.RefreshStateResponse)(nil), // 54: tabletmanagerdata.RefreshStateResponse - (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 55: tabletmanagerdata.RunHealthCheckResponse - (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 56: tabletmanagerdata.ReloadSchemaResponse - (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 57: tabletmanagerdata.PreflightSchemaResponse - (*tabletmanagerdata.ApplySchemaResponse)(nil), // 58: tabletmanagerdata.ApplySchemaResponse - (*tabletmanagerdata.LockTablesResponse)(nil), // 59: tabletmanagerdata.LockTablesResponse - (*tabletmanagerdata.UnlockTablesResponse)(nil), // 60: tabletmanagerdata.UnlockTablesResponse - (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 61: tabletmanagerdata.ExecuteQueryResponse - (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 62: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 63: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 64: tabletmanagerdata.ExecuteFetchAsAppResponse - (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 65: tabletmanagerdata.ReplicationStatusResponse - (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 66: tabletmanagerdata.PrimaryStatusResponse - (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 67: tabletmanagerdata.PrimaryPositionResponse - (*tabletmanagerdata.WaitForPositionResponse)(nil), // 68: tabletmanagerdata.WaitForPositionResponse - (*tabletmanagerdata.StopReplicationResponse)(nil), // 69: tabletmanagerdata.StopReplicationResponse - (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 70: tabletmanagerdata.StopReplicationMinimumResponse - (*tabletmanagerdata.StartReplicationResponse)(nil), // 71: tabletmanagerdata.StartReplicationResponse - (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 72: tabletmanagerdata.StartReplicationUntilAfterResponse - (*tabletmanagerdata.GetReplicasResponse)(nil), // 73: tabletmanagerdata.GetReplicasResponse - (*tabletmanagerdata.VReplicationExecResponse)(nil), // 74: tabletmanagerdata.VReplicationExecResponse - (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 75: tabletmanagerdata.VReplicationWaitForPosResponse - (*tabletmanagerdata.VDiffResponse)(nil), // 76: tabletmanagerdata.VDiffResponse - (*tabletmanagerdata.ResetReplicationResponse)(nil), // 77: tabletmanagerdata.ResetReplicationResponse - (*tabletmanagerdata.InitPrimaryResponse)(nil), // 78: tabletmanagerdata.InitPrimaryResponse - (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 79: tabletmanagerdata.PopulateReparentJournalResponse - (*tabletmanagerdata.InitReplicaResponse)(nil), // 80: tabletmanagerdata.InitReplicaResponse - (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 81: tabletmanagerdata.DemotePrimaryResponse - (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 82: tabletmanagerdata.UndoDemotePrimaryResponse - (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 83: tabletmanagerdata.ReplicaWasPromotedResponse - (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 84: tabletmanagerdata.ResetReplicationParametersResponse - (*tabletmanagerdata.FullStatusResponse)(nil), // 85: tabletmanagerdata.FullStatusResponse - (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 86: tabletmanagerdata.SetReplicationSourceResponse - (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedResponse - (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 88: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 89: tabletmanagerdata.PromoteReplicaResponse - (*tabletmanagerdata.BackupResponse)(nil), // 90: tabletmanagerdata.BackupResponse - (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 91: tabletmanagerdata.RestoreFromBackupResponse + (*tabletmanagerdata.ResetSequencesRequest)(nil), // 13: tabletmanagerdata.ResetSequencesRequest + (*tabletmanagerdata.LockTablesRequest)(nil), // 14: tabletmanagerdata.LockTablesRequest + (*tabletmanagerdata.UnlockTablesRequest)(nil), // 15: tabletmanagerdata.UnlockTablesRequest + (*tabletmanagerdata.ExecuteQueryRequest)(nil), // 16: tabletmanagerdata.ExecuteQueryRequest + (*tabletmanagerdata.ExecuteFetchAsDbaRequest)(nil), // 17: tabletmanagerdata.ExecuteFetchAsDbaRequest + (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 18: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 19: tabletmanagerdata.ExecuteFetchAsAppRequest + (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 20: tabletmanagerdata.ReplicationStatusRequest + (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 21: tabletmanagerdata.PrimaryStatusRequest + (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 22: tabletmanagerdata.PrimaryPositionRequest + (*tabletmanagerdata.WaitForPositionRequest)(nil), // 23: tabletmanagerdata.WaitForPositionRequest + (*tabletmanagerdata.StopReplicationRequest)(nil), // 24: tabletmanagerdata.StopReplicationRequest + (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 25: tabletmanagerdata.StopReplicationMinimumRequest + (*tabletmanagerdata.StartReplicationRequest)(nil), // 26: tabletmanagerdata.StartReplicationRequest + (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 27: tabletmanagerdata.StartReplicationUntilAfterRequest + (*tabletmanagerdata.GetReplicasRequest)(nil), // 28: tabletmanagerdata.GetReplicasRequest + (*tabletmanagerdata.CreateVReplicationWorkflowRequest)(nil), // 29: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*tabletmanagerdata.DeleteVReplicationWorkflowRequest)(nil), // 30: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*tabletmanagerdata.ReadVReplicationWorkflowRequest)(nil), // 31: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*tabletmanagerdata.VReplicationExecRequest)(nil), // 32: tabletmanagerdata.VReplicationExecRequest + (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 33: tabletmanagerdata.VReplicationWaitForPosRequest + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 34: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*tabletmanagerdata.VDiffRequest)(nil), // 35: tabletmanagerdata.VDiffRequest + (*tabletmanagerdata.ResetReplicationRequest)(nil), // 36: tabletmanagerdata.ResetReplicationRequest + (*tabletmanagerdata.InitPrimaryRequest)(nil), // 37: tabletmanagerdata.InitPrimaryRequest + (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 38: tabletmanagerdata.PopulateReparentJournalRequest + (*tabletmanagerdata.InitReplicaRequest)(nil), // 39: tabletmanagerdata.InitReplicaRequest + (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 40: tabletmanagerdata.DemotePrimaryRequest + (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 41: tabletmanagerdata.UndoDemotePrimaryRequest + (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 42: tabletmanagerdata.ReplicaWasPromotedRequest + (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 43: tabletmanagerdata.ResetReplicationParametersRequest + (*tabletmanagerdata.FullStatusRequest)(nil), // 44: tabletmanagerdata.FullStatusRequest + (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 45: tabletmanagerdata.SetReplicationSourceRequest + (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 46: tabletmanagerdata.ReplicaWasRestartedRequest + (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 47: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 48: tabletmanagerdata.PromoteReplicaRequest + (*tabletmanagerdata.BackupRequest)(nil), // 49: tabletmanagerdata.BackupRequest + (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 50: tabletmanagerdata.RestoreFromBackupRequest + (*tabletmanagerdata.CheckThrottlerRequest)(nil), // 51: tabletmanagerdata.CheckThrottlerRequest + (*tabletmanagerdata.PingResponse)(nil), // 52: tabletmanagerdata.PingResponse + (*tabletmanagerdata.SleepResponse)(nil), // 53: tabletmanagerdata.SleepResponse + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 54: tabletmanagerdata.ExecuteHookResponse + (*tabletmanagerdata.GetSchemaResponse)(nil), // 55: tabletmanagerdata.GetSchemaResponse + (*tabletmanagerdata.GetPermissionsResponse)(nil), // 56: tabletmanagerdata.GetPermissionsResponse + (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 57: tabletmanagerdata.SetReadOnlyResponse + (*tabletmanagerdata.SetReadWriteResponse)(nil), // 58: tabletmanagerdata.SetReadWriteResponse + (*tabletmanagerdata.ChangeTypeResponse)(nil), // 59: tabletmanagerdata.ChangeTypeResponse + (*tabletmanagerdata.RefreshStateResponse)(nil), // 60: tabletmanagerdata.RefreshStateResponse + (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 61: tabletmanagerdata.RunHealthCheckResponse + (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 62: tabletmanagerdata.ReloadSchemaResponse + (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 63: tabletmanagerdata.PreflightSchemaResponse + (*tabletmanagerdata.ApplySchemaResponse)(nil), // 64: tabletmanagerdata.ApplySchemaResponse + (*tabletmanagerdata.ResetSequencesResponse)(nil), // 65: tabletmanagerdata.ResetSequencesResponse + (*tabletmanagerdata.LockTablesResponse)(nil), // 66: tabletmanagerdata.LockTablesResponse + (*tabletmanagerdata.UnlockTablesResponse)(nil), // 67: tabletmanagerdata.UnlockTablesResponse + (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 68: tabletmanagerdata.ExecuteQueryResponse + (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 69: tabletmanagerdata.ExecuteFetchAsDbaResponse + (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 70: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 71: tabletmanagerdata.ExecuteFetchAsAppResponse + (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 72: tabletmanagerdata.ReplicationStatusResponse + (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 73: tabletmanagerdata.PrimaryStatusResponse + (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 74: tabletmanagerdata.PrimaryPositionResponse + (*tabletmanagerdata.WaitForPositionResponse)(nil), // 75: tabletmanagerdata.WaitForPositionResponse + (*tabletmanagerdata.StopReplicationResponse)(nil), // 76: tabletmanagerdata.StopReplicationResponse + (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 77: tabletmanagerdata.StopReplicationMinimumResponse + (*tabletmanagerdata.StartReplicationResponse)(nil), // 78: tabletmanagerdata.StartReplicationResponse + (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 79: tabletmanagerdata.StartReplicationUntilAfterResponse + (*tabletmanagerdata.GetReplicasResponse)(nil), // 80: tabletmanagerdata.GetReplicasResponse + (*tabletmanagerdata.CreateVReplicationWorkflowResponse)(nil), // 81: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*tabletmanagerdata.DeleteVReplicationWorkflowResponse)(nil), // 82: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*tabletmanagerdata.ReadVReplicationWorkflowResponse)(nil), // 83: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*tabletmanagerdata.VReplicationExecResponse)(nil), // 84: tabletmanagerdata.VReplicationExecResponse + (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 85: tabletmanagerdata.VReplicationWaitForPosResponse + (*tabletmanagerdata.UpdateVReplicationWorkflowResponse)(nil), // 86: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*tabletmanagerdata.VDiffResponse)(nil), // 87: tabletmanagerdata.VDiffResponse + (*tabletmanagerdata.ResetReplicationResponse)(nil), // 88: tabletmanagerdata.ResetReplicationResponse + (*tabletmanagerdata.InitPrimaryResponse)(nil), // 89: tabletmanagerdata.InitPrimaryResponse + (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 90: tabletmanagerdata.PopulateReparentJournalResponse + (*tabletmanagerdata.InitReplicaResponse)(nil), // 91: tabletmanagerdata.InitReplicaResponse + (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 92: tabletmanagerdata.DemotePrimaryResponse + (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 93: tabletmanagerdata.UndoDemotePrimaryResponse + (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 94: tabletmanagerdata.ReplicaWasPromotedResponse + (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 95: tabletmanagerdata.ResetReplicationParametersResponse + (*tabletmanagerdata.FullStatusResponse)(nil), // 96: tabletmanagerdata.FullStatusResponse + (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 97: tabletmanagerdata.SetReplicationSourceResponse + (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 98: tabletmanagerdata.ReplicaWasRestartedResponse + (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 99: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 100: tabletmanagerdata.PromoteReplicaResponse + (*tabletmanagerdata.BackupResponse)(nil), // 101: tabletmanagerdata.BackupResponse + (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 102: tabletmanagerdata.RestoreFromBackupResponse + (*tabletmanagerdata.CheckThrottlerResponse)(nil), // 103: tabletmanagerdata.CheckThrottlerResponse } var file_tabletmanagerservice_proto_depIdxs = []int32{ - 0, // 0: tabletmanagerservice.TabletManager.Ping:input_type -> tabletmanagerdata.PingRequest - 1, // 1: tabletmanagerservice.TabletManager.Sleep:input_type -> tabletmanagerdata.SleepRequest - 2, // 2: tabletmanagerservice.TabletManager.ExecuteHook:input_type -> tabletmanagerdata.ExecuteHookRequest - 3, // 3: tabletmanagerservice.TabletManager.GetSchema:input_type -> tabletmanagerdata.GetSchemaRequest - 4, // 4: tabletmanagerservice.TabletManager.GetPermissions:input_type -> tabletmanagerdata.GetPermissionsRequest - 5, // 5: tabletmanagerservice.TabletManager.SetReadOnly:input_type -> tabletmanagerdata.SetReadOnlyRequest - 6, // 6: tabletmanagerservice.TabletManager.SetReadWrite:input_type -> tabletmanagerdata.SetReadWriteRequest - 7, // 7: tabletmanagerservice.TabletManager.ChangeType:input_type -> tabletmanagerdata.ChangeTypeRequest - 8, // 8: tabletmanagerservice.TabletManager.RefreshState:input_type -> tabletmanagerdata.RefreshStateRequest - 9, // 9: tabletmanagerservice.TabletManager.RunHealthCheck:input_type -> tabletmanagerdata.RunHealthCheckRequest - 10, // 10: tabletmanagerservice.TabletManager.ReloadSchema:input_type -> tabletmanagerdata.ReloadSchemaRequest - 11, // 11: tabletmanagerservice.TabletManager.PreflightSchema:input_type -> tabletmanagerdata.PreflightSchemaRequest - 12, // 12: tabletmanagerservice.TabletManager.ApplySchema:input_type -> tabletmanagerdata.ApplySchemaRequest - 13, // 13: tabletmanagerservice.TabletManager.LockTables:input_type -> tabletmanagerdata.LockTablesRequest - 14, // 14: tabletmanagerservice.TabletManager.UnlockTables:input_type -> tabletmanagerdata.UnlockTablesRequest - 15, // 15: tabletmanagerservice.TabletManager.ExecuteQuery:input_type -> tabletmanagerdata.ExecuteQueryRequest - 16, // 16: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:input_type -> tabletmanagerdata.ExecuteFetchAsDbaRequest - 17, // 17: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - 18, // 18: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest - 19, // 19: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest - 20, // 20: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest - 21, // 21: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest - 22, // 22: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest - 23, // 23: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest - 24, // 24: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest - 25, // 25: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest - 26, // 26: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest - 27, // 27: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest - 28, // 28: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest - 29, // 29: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest - 30, // 30: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest - 31, // 31: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest - 32, // 32: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest - 33, // 33: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest - 34, // 34: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest - 35, // 35: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest - 36, // 36: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest - 37, // 37: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest - 38, // 38: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest - 39, // 39: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest - 40, // 40: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest - 41, // 41: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest - 42, // 42: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest - 43, // 43: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest - 44, // 44: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest - 45, // 45: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest - 46, // 46: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse - 47, // 47: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse - 48, // 48: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse - 49, // 49: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse - 50, // 50: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse - 51, // 51: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse - 52, // 52: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse - 53, // 53: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse - 54, // 54: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse - 55, // 55: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse - 56, // 56: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse - 57, // 57: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse - 58, // 58: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse - 59, // 59: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse - 60, // 60: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse - 61, // 61: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse - 62, // 62: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse - 63, // 63: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - 64, // 64: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse - 65, // 65: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse - 66, // 66: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse - 67, // 67: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse - 68, // 68: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse - 69, // 69: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse - 70, // 70: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse - 71, // 71: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse - 72, // 72: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse - 73, // 73: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse - 74, // 74: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse - 75, // 75: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse - 76, // 76: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse - 77, // 77: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse - 78, // 78: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse - 79, // 79: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse - 80, // 80: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse - 81, // 81: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse - 82, // 82: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse - 83, // 83: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse - 84, // 84: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse - 85, // 85: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse - 86, // 86: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse - 87, // 87: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse - 88, // 88: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse - 89, // 89: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse - 90, // 90: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse - 91, // 91: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse - 46, // [46:92] is the sub-list for method output_type - 0, // [0:46] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: tabletmanagerservice.TabletManager.Ping:input_type -> tabletmanagerdata.PingRequest + 1, // 1: tabletmanagerservice.TabletManager.Sleep:input_type -> tabletmanagerdata.SleepRequest + 2, // 2: tabletmanagerservice.TabletManager.ExecuteHook:input_type -> tabletmanagerdata.ExecuteHookRequest + 3, // 3: tabletmanagerservice.TabletManager.GetSchema:input_type -> tabletmanagerdata.GetSchemaRequest + 4, // 4: tabletmanagerservice.TabletManager.GetPermissions:input_type -> tabletmanagerdata.GetPermissionsRequest + 5, // 5: tabletmanagerservice.TabletManager.SetReadOnly:input_type -> tabletmanagerdata.SetReadOnlyRequest + 6, // 6: tabletmanagerservice.TabletManager.SetReadWrite:input_type -> tabletmanagerdata.SetReadWriteRequest + 7, // 7: tabletmanagerservice.TabletManager.ChangeType:input_type -> tabletmanagerdata.ChangeTypeRequest + 8, // 8: tabletmanagerservice.TabletManager.RefreshState:input_type -> tabletmanagerdata.RefreshStateRequest + 9, // 9: tabletmanagerservice.TabletManager.RunHealthCheck:input_type -> tabletmanagerdata.RunHealthCheckRequest + 10, // 10: tabletmanagerservice.TabletManager.ReloadSchema:input_type -> tabletmanagerdata.ReloadSchemaRequest + 11, // 11: tabletmanagerservice.TabletManager.PreflightSchema:input_type -> tabletmanagerdata.PreflightSchemaRequest + 12, // 12: tabletmanagerservice.TabletManager.ApplySchema:input_type -> tabletmanagerdata.ApplySchemaRequest + 13, // 13: tabletmanagerservice.TabletManager.ResetSequences:input_type -> tabletmanagerdata.ResetSequencesRequest + 14, // 14: tabletmanagerservice.TabletManager.LockTables:input_type -> tabletmanagerdata.LockTablesRequest + 15, // 15: tabletmanagerservice.TabletManager.UnlockTables:input_type -> tabletmanagerdata.UnlockTablesRequest + 16, // 16: tabletmanagerservice.TabletManager.ExecuteQuery:input_type -> tabletmanagerdata.ExecuteQueryRequest + 17, // 17: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:input_type -> tabletmanagerdata.ExecuteFetchAsDbaRequest + 18, // 18: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + 19, // 19: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest + 20, // 20: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest + 21, // 21: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest + 22, // 22: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest + 23, // 23: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest + 24, // 24: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest + 25, // 25: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest + 26, // 26: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest + 27, // 27: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest + 28, // 28: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest + 29, // 29: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:input_type -> tabletmanagerdata.CreateVReplicationWorkflowRequest + 30, // 30: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:input_type -> tabletmanagerdata.DeleteVReplicationWorkflowRequest + 31, // 31: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:input_type -> tabletmanagerdata.ReadVReplicationWorkflowRequest + 32, // 32: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest + 33, // 33: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest + 34, // 34: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:input_type -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 35, // 35: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest + 36, // 36: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest + 37, // 37: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest + 38, // 38: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest + 39, // 39: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest + 40, // 40: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest + 41, // 41: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest + 42, // 42: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest + 43, // 43: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest + 44, // 44: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest + 45, // 45: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest + 46, // 46: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest + 47, // 47: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest + 48, // 48: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest + 49, // 49: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest + 50, // 50: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest + 51, // 51: tabletmanagerservice.TabletManager.CheckThrottler:input_type -> tabletmanagerdata.CheckThrottlerRequest + 52, // 52: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse + 53, // 53: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse + 54, // 54: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse + 55, // 55: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse + 56, // 56: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse + 57, // 57: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse + 58, // 58: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse + 59, // 59: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse + 60, // 60: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse + 61, // 61: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse + 62, // 62: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse + 63, // 63: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse + 64, // 64: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse + 65, // 65: tabletmanagerservice.TabletManager.ResetSequences:output_type -> tabletmanagerdata.ResetSequencesResponse + 66, // 66: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse + 67, // 67: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse + 68, // 68: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse + 69, // 69: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse + 70, // 70: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + 71, // 71: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse + 72, // 72: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse + 73, // 73: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse + 74, // 74: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse + 75, // 75: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse + 76, // 76: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse + 77, // 77: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse + 78, // 78: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse + 79, // 79: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse + 80, // 80: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse + 81, // 81: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:output_type -> tabletmanagerdata.CreateVReplicationWorkflowResponse + 82, // 82: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:output_type -> tabletmanagerdata.DeleteVReplicationWorkflowResponse + 83, // 83: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:output_type -> tabletmanagerdata.ReadVReplicationWorkflowResponse + 84, // 84: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse + 85, // 85: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse + 86, // 86: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:output_type -> tabletmanagerdata.UpdateVReplicationWorkflowResponse + 87, // 87: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse + 88, // 88: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse + 89, // 89: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse + 90, // 90: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse + 91, // 91: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse + 92, // 92: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse + 93, // 93: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse + 94, // 94: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse + 95, // 95: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse + 96, // 96: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse + 97, // 97: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse + 98, // 98: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse + 99, // 99: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse + 100, // 100: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse + 101, // 101: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse + 102, // 102: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse + 103, // 103: tabletmanagerservice.TabletManager.CheckThrottler:output_type -> tabletmanagerdata.CheckThrottlerResponse + 52, // [52:104] is the sub-list for method output_type + 0, // [0:52] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } func init() { file_tabletmanagerservice_proto_init() } diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index b556c447d95..f0665947007 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -42,6 +42,7 @@ type TabletManagerClient interface { ReloadSchema(ctx context.Context, in *tabletmanagerdata.ReloadSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(ctx context.Context, in *tabletmanagerdata.PreflightSchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(ctx context.Context, in *tabletmanagerdata.ApplySchemaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ApplySchemaResponse, error) + ResetSequences(ctx context.Context, in *tabletmanagerdata.ResetSequencesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetSequencesResponse, error) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(ctx context.Context, in *tabletmanagerdata.ExecuteQueryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteQueryResponse, error) @@ -69,8 +70,12 @@ type TabletManagerClient interface { // GetReplicas asks for the list of mysql replicas GetReplicas(ctx context.Context, in *tabletmanagerdata.GetReplicasRequest, opts ...grpc.CallOption) (*tabletmanagerdata.GetReplicasResponse, error) // VReplication API + CreateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.CreateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.DeleteVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(ctx context.Context, in *tabletmanagerdata.VReplicationWaitForPosRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) + UpdateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(ctx context.Context, in *tabletmanagerdata.VDiffRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -104,6 +109,8 @@ type TabletManagerClient interface { Backup(ctx context.Context, in *tabletmanagerdata.BackupRequest, opts ...grpc.CallOption) (TabletManager_BackupClient, error) // RestoreFromBackup deletes all local data and restores it from the latest backup. RestoreFromBackup(ctx context.Context, in *tabletmanagerdata.RestoreFromBackupRequest, opts ...grpc.CallOption) (TabletManager_RestoreFromBackupClient, error) + // CheckThrottler issues a 'check' on a tablet's throttler + CheckThrottler(ctx context.Context, in *tabletmanagerdata.CheckThrottlerRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CheckThrottlerResponse, error) } type tabletManagerClient struct { @@ -231,6 +238,15 @@ func (c *tabletManagerClient) ApplySchema(ctx context.Context, in *tabletmanager return out, nil } +func (c *tabletManagerClient) ResetSequences(ctx context.Context, in *tabletmanagerdata.ResetSequencesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ResetSequencesResponse, error) { + out := new(tabletmanagerdata.ResetSequencesResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ResetSequences", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) LockTables(ctx context.Context, in *tabletmanagerdata.LockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.LockTablesResponse, error) { out := new(tabletmanagerdata.LockTablesResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/LockTables", in, out, opts...) @@ -366,6 +382,33 @@ func (c *tabletManagerClient) GetReplicas(ctx context.Context, in *tabletmanager return out, nil } +func (c *tabletManagerClient) CreateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.CreateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.CreateVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/CreateVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) DeleteVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.DeleteVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.DeleteVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/DeleteVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.ReadVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) { out := new(tabletmanagerdata.VReplicationExecResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VReplicationExec", in, out, opts...) @@ -384,6 +427,15 @@ func (c *tabletManagerClient) VReplicationWaitForPos(ctx context.Context, in *ta return out, nil } +func (c *tabletManagerClient) UpdateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) { + out := new(tabletmanagerdata.UpdateVReplicationWorkflowResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) VDiff(ctx context.Context, in *tabletmanagerdata.VDiffRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VDiffResponse, error) { out := new(tabletmanagerdata.VDiffResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VDiff", in, out, opts...) @@ -574,6 +626,15 @@ func (x *tabletManagerRestoreFromBackupClient) Recv() (*tabletmanagerdata.Restor return m, nil } +func (c *tabletManagerClient) CheckThrottler(ctx context.Context, in *tabletmanagerdata.CheckThrottlerRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CheckThrottlerResponse, error) { + out := new(tabletmanagerdata.CheckThrottlerResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/CheckThrottler", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // TabletManagerServer is the server API for TabletManager service. // All implementations must embed UnimplementedTabletManagerServer // for forward compatibility @@ -597,6 +658,7 @@ type TabletManagerServer interface { ReloadSchema(context.Context, *tabletmanagerdata.ReloadSchemaRequest) (*tabletmanagerdata.ReloadSchemaResponse, error) PreflightSchema(context.Context, *tabletmanagerdata.PreflightSchemaRequest) (*tabletmanagerdata.PreflightSchemaResponse, error) ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) + ResetSequences(context.Context, *tabletmanagerdata.ResetSequencesRequest) (*tabletmanagerdata.ResetSequencesResponse, error) LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(context.Context, *tabletmanagerdata.ExecuteQueryRequest) (*tabletmanagerdata.ExecuteQueryResponse, error) @@ -624,8 +686,12 @@ type TabletManagerServer interface { // GetReplicas asks for the list of mysql replicas GetReplicas(context.Context, *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) // VReplication API + CreateVReplicationWorkflow(context.Context, *tabletmanagerdata.CreateVReplicationWorkflowRequest) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) + UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -659,6 +725,8 @@ type TabletManagerServer interface { Backup(*tabletmanagerdata.BackupRequest, TabletManager_BackupServer) error // RestoreFromBackup deletes all local data and restores it from the latest backup. RestoreFromBackup(*tabletmanagerdata.RestoreFromBackupRequest, TabletManager_RestoreFromBackupServer) error + // CheckThrottler issues a 'check' on a tablet's throttler + CheckThrottler(context.Context, *tabletmanagerdata.CheckThrottlerRequest) (*tabletmanagerdata.CheckThrottlerResponse, error) mustEmbedUnimplementedTabletManagerServer() } @@ -705,6 +773,9 @@ func (UnimplementedTabletManagerServer) PreflightSchema(context.Context, *tablet func (UnimplementedTabletManagerServer) ApplySchema(context.Context, *tabletmanagerdata.ApplySchemaRequest) (*tabletmanagerdata.ApplySchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplySchema not implemented") } +func (UnimplementedTabletManagerServer) ResetSequences(context.Context, *tabletmanagerdata.ResetSequencesRequest) (*tabletmanagerdata.ResetSequencesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetSequences not implemented") +} func (UnimplementedTabletManagerServer) LockTables(context.Context, *tabletmanagerdata.LockTablesRequest) (*tabletmanagerdata.LockTablesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method LockTables not implemented") } @@ -750,12 +821,24 @@ func (UnimplementedTabletManagerServer) StartReplicationUntilAfter(context.Conte func (UnimplementedTabletManagerServer) GetReplicas(context.Context, *tabletmanagerdata.GetReplicasRequest) (*tabletmanagerdata.GetReplicasResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetReplicas not implemented") } +func (UnimplementedTabletManagerServer) CreateVReplicationWorkflow(context.Context, *tabletmanagerdata.CreateVReplicationWorkflowRequest) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVReplicationWorkflow not implemented") +} +func (UnimplementedTabletManagerServer) DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVReplicationWorkflow not implemented") +} +func (UnimplementedTabletManagerServer) ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVReplicationWorkflow not implemented") +} func (UnimplementedTabletManagerServer) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VReplicationExec not implemented") } func (UnimplementedTabletManagerServer) VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VReplicationWaitForPos not implemented") } +func (UnimplementedTabletManagerServer) UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateVReplicationWorkflow not implemented") +} func (UnimplementedTabletManagerServer) VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VDiff not implemented") } @@ -804,6 +887,9 @@ func (UnimplementedTabletManagerServer) Backup(*tabletmanagerdata.BackupRequest, func (UnimplementedTabletManagerServer) RestoreFromBackup(*tabletmanagerdata.RestoreFromBackupRequest, TabletManager_RestoreFromBackupServer) error { return status.Errorf(codes.Unimplemented, "method RestoreFromBackup not implemented") } +func (UnimplementedTabletManagerServer) CheckThrottler(context.Context, *tabletmanagerdata.CheckThrottlerRequest) (*tabletmanagerdata.CheckThrottlerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckThrottler not implemented") +} func (UnimplementedTabletManagerServer) mustEmbedUnimplementedTabletManagerServer() {} // UnsafeTabletManagerServer may be embedded to opt out of forward compatibility for this service. @@ -1051,6 +1137,24 @@ func _TabletManager_ApplySchema_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _TabletManager_ResetSequences_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ResetSequencesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ResetSequences(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ResetSequences", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ResetSequences(ctx, req.(*tabletmanagerdata.ResetSequencesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_LockTables_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.LockTablesRequest) if err := dec(in); err != nil { @@ -1321,6 +1425,60 @@ func _TabletManager_GetReplicas_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _TabletManager_CreateVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.CreateVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).CreateVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/CreateVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).CreateVReplicationWorkflow(ctx, req.(*tabletmanagerdata.CreateVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_DeleteVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.DeleteVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).DeleteVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/DeleteVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).DeleteVReplicationWorkflow(ctx, req.(*tabletmanagerdata.DeleteVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _TabletManager_ReadVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReadVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReadVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReadVReplicationWorkflow(ctx, req.(*tabletmanagerdata.ReadVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_VReplicationExec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.VReplicationExecRequest) if err := dec(in); err != nil { @@ -1357,6 +1515,24 @@ func _TabletManager_VReplicationWaitForPos_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _TabletManager_UpdateVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UpdateVReplicationWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).UpdateVReplicationWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).UpdateVReplicationWorkflow(ctx, req.(*tabletmanagerdata.UpdateVReplicationWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_VDiff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.VDiffRequest) if err := dec(in); err != nil { @@ -1651,6 +1827,24 @@ func (x *tabletManagerRestoreFromBackupServer) Send(m *tabletmanagerdata.Restore return x.ServerStream.SendMsg(m) } +func _TabletManager_CheckThrottler_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.CheckThrottlerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).CheckThrottler(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/CheckThrottler", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).CheckThrottler(ctx, req.(*tabletmanagerdata.CheckThrottlerRequest)) + } + return interceptor(ctx, in, info, handler) +} + // TabletManager_ServiceDesc is the grpc.ServiceDesc for TabletManager service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1710,6 +1904,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplySchema", Handler: _TabletManager_ApplySchema_Handler, }, + { + MethodName: "ResetSequences", + Handler: _TabletManager_ResetSequences_Handler, + }, { MethodName: "LockTables", Handler: _TabletManager_LockTables_Handler, @@ -1770,6 +1968,18 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetReplicas", Handler: _TabletManager_GetReplicas_Handler, }, + { + MethodName: "CreateVReplicationWorkflow", + Handler: _TabletManager_CreateVReplicationWorkflow_Handler, + }, + { + MethodName: "DeleteVReplicationWorkflow", + Handler: _TabletManager_DeleteVReplicationWorkflow_Handler, + }, + { + MethodName: "ReadVReplicationWorkflow", + Handler: _TabletManager_ReadVReplicationWorkflow_Handler, + }, { MethodName: "VReplicationExec", Handler: _TabletManager_VReplicationExec_Handler, @@ -1778,6 +1988,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "VReplicationWaitForPos", Handler: _TabletManager_VReplicationWaitForPos_Handler, }, + { + MethodName: "UpdateVReplicationWorkflow", + Handler: _TabletManager_UpdateVReplicationWorkflow_Handler, + }, { MethodName: "VDiff", Handler: _TabletManager_VDiff_Handler, @@ -1834,6 +2048,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "PromoteReplica", Handler: _TabletManager_PromoteReplica_Handler, }, + { + MethodName: "CheckThrottler", + Handler: _TabletManager_CheckThrottler_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index 8a52d2344f2..fb12bc09ce8 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: throttlerdata.proto diff --git a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go index 7a061d1fc38..e032b7db8e8 100644 --- a/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: throttlerdata.proto package throttlerdata @@ -7,6 +7,7 @@ package throttlerdata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -20,6 +21,236 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *MaxRatesRequest) CloneVT() *MaxRatesRequest { + if m == nil { + return (*MaxRatesRequest)(nil) + } + r := &MaxRatesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MaxRatesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MaxRatesResponse) CloneVT() *MaxRatesResponse { + if m == nil { + return (*MaxRatesResponse)(nil) + } + r := &MaxRatesResponse{} + if rhs := m.Rates; rhs != nil { + tmpContainer := make(map[string]int64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Rates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MaxRatesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetMaxRateRequest) CloneVT() *SetMaxRateRequest { + if m == nil { + return (*SetMaxRateRequest)(nil) + } + r := &SetMaxRateRequest{ + Rate: m.Rate, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetMaxRateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetMaxRateResponse) CloneVT() *SetMaxRateResponse { + if m == nil { + return (*SetMaxRateResponse)(nil) + } + r := &SetMaxRateResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetMaxRateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Configuration) CloneVT() *Configuration { + if m == nil { + return (*Configuration)(nil) + } + r := &Configuration{ + TargetReplicationLagSec: m.TargetReplicationLagSec, + MaxReplicationLagSec: m.MaxReplicationLagSec, + InitialRate: m.InitialRate, + MaxIncrease: m.MaxIncrease, + EmergencyDecrease: m.EmergencyDecrease, + MinDurationBetweenIncreasesSec: m.MinDurationBetweenIncreasesSec, + MaxDurationBetweenIncreasesSec: m.MaxDurationBetweenIncreasesSec, + MinDurationBetweenDecreasesSec: m.MinDurationBetweenDecreasesSec, + SpreadBacklogAcrossSec: m.SpreadBacklogAcrossSec, + IgnoreNSlowestReplicas: m.IgnoreNSlowestReplicas, + IgnoreNSlowestRdonlys: m.IgnoreNSlowestRdonlys, + AgeBadRateAfterSec: m.AgeBadRateAfterSec, + BadRateIncrease: m.BadRateIncrease, + MaxRateApproachThreshold: m.MaxRateApproachThreshold, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Configuration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetConfigurationRequest) CloneVT() *GetConfigurationRequest { + if m == nil { + return (*GetConfigurationRequest)(nil) + } + r := &GetConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetConfigurationResponse) CloneVT() *GetConfigurationResponse { + if m == nil { + return (*GetConfigurationResponse)(nil) + } + r := &GetConfigurationResponse{} + if rhs := m.Configurations; rhs != nil { + tmpContainer := make(map[string]*Configuration, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Configurations = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateConfigurationRequest) CloneVT() *UpdateConfigurationRequest { + if m == nil { + return (*UpdateConfigurationRequest)(nil) + } + r := &UpdateConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + Configuration: m.Configuration.CloneVT(), + CopyZeroValues: m.CopyZeroValues, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateConfigurationResponse) CloneVT() *UpdateConfigurationResponse { + if m == nil { + return (*UpdateConfigurationResponse)(nil) + } + r := &UpdateConfigurationResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetConfigurationRequest) CloneVT() *ResetConfigurationRequest { + if m == nil { + return (*ResetConfigurationRequest)(nil) + } + r := &ResetConfigurationRequest{ + ThrottlerName: m.ThrottlerName, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetConfigurationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResetConfigurationResponse) CloneVT() *ResetConfigurationResponse { + if m == nil { + return (*ResetConfigurationResponse)(nil) + } + r := &ResetConfigurationResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResetConfigurationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *MaxRatesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index 83d6f506a79..9bca73e067c 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: throttlerservice.proto diff --git a/go/vt/proto/topodata/cached_size.go b/go/vt/proto/topodata/cached_size.go index 92da50b703e..d06ebd0d3f0 100644 --- a/go/vt/proto/topodata/cached_size.go +++ b/go/vt/proto/topodata/cached_size.go @@ -41,3 +41,21 @@ func (cached *KeyRange) CachedSize(alloc bool) int64 { } return size } +func (cached *ThrottledAppRule) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(80) + } + // field unknownFields []byte + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownFields))) + } + // field Name string + size += hack.RuntimeAllocSize(int64(len(cached.Name))) + // field ExpiresAt *vitess.io/vitess/go/vt/proto/vttime.Time + size += cached.ExpiresAt.CachedSize(true) + return size +} diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index d763a5d5ec3..43ecdbce963 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -20,7 +20,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: topodata.proto @@ -403,8 +403,6 @@ type Tablet struct { // replication-management commands like PlannedReparentShard, // EmergencyReparentShard, and TabletExternallyReparented. PrimaryTermStartTime *vttime.Time `protobuf:"bytes,14,opt,name=primary_term_start_time,json=primaryTermStartTime,proto3" json:"primary_term_start_time,omitempty"` - // db_server_version represents the database version used by the tablet. - DbServerVersion string `protobuf:"bytes,15,opt,name=db_server_version,json=dbServerVersion,proto3" json:"db_server_version,omitempty"` // default_conn_collation is the default connection collation used by this tablet. DefaultConnCollation uint32 `protobuf:"varint,16,opt,name=default_conn_collation,json=defaultConnCollation,proto3" json:"default_conn_collation,omitempty"` } @@ -525,13 +523,6 @@ func (x *Tablet) GetPrimaryTermStartTime() *vttime.Time { return nil } -func (x *Tablet) GetDbServerVersion() string { - if x != nil { - return x.DbServerVersion - } - return "" -} - func (x *Tablet) GetDefaultConnCollation() uint32 { if x != nil { return x.DefaultConnCollation @@ -687,6 +678,10 @@ type Keyspace struct { // server's lag throttler, and applies to the entire // keyspace, across all shards and tablets. ThrottlerConfig *ThrottlerConfig `protobuf:"bytes,9,opt,name=throttler_config,json=throttlerConfig,proto3" json:"throttler_config,omitempty"` + // SidecarDBName is the name of the Vitess sidecar database + // used for various system metadata that is stored in each + // tablet's mysqld instance. + SidecarDbName string `protobuf:"bytes,10,opt,name=sidecar_db_name,json=sidecarDbName,proto3" json:"sidecar_db_name,omitempty"` } func (x *Keyspace) Reset() { @@ -763,6 +758,13 @@ func (x *Keyspace) GetThrottlerConfig() *ThrottlerConfig { return nil } +func (x *Keyspace) GetSidecarDbName() string { + if x != nil { + return x.SidecarDbName + } + return "" +} + // ShardReplication describes the MySQL replication relationships // whithin a cell. type ShardReplication struct { @@ -996,6 +998,83 @@ func (x *ShardTabletControl) GetQueryServiceDisabled() bool { return false } +// ThrottledAppRule defines an app-specific throttling rule, with expiration. +type ThrottledAppRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the app to be throttled, e.g. "vreplication" or "online-ddl" + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Ratio defines how much the app should be throttled, range [0.0...1.0]. 1.0 means fully throttled. 0.0 means not throttled at all. + // Negative values are reserved for a future implementation. + Ratio float64 `protobuf:"fixed64,2,opt,name=ratio,proto3" json:"ratio,omitempty"` + // ExpiresAt is the time at which the rule expires. + ExpiresAt *vttime.Time `protobuf:"bytes,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // Exempt indicates the app should never be throttled, even if the throttler is, in general, throttling other apps. + Exempt bool `protobuf:"varint,4,opt,name=exempt,proto3" json:"exempt,omitempty"` +} + +func (x *ThrottledAppRule) Reset() { + *x = ThrottledAppRule{} + if protoimpl.UnsafeEnabled { + mi := &file_topodata_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ThrottledAppRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ThrottledAppRule) ProtoMessage() {} + +func (x *ThrottledAppRule) ProtoReflect() protoreflect.Message { + mi := &file_topodata_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ThrottledAppRule.ProtoReflect.Descriptor instead. +func (*ThrottledAppRule) Descriptor() ([]byte, []int) { + return file_topodata_proto_rawDescGZIP(), []int{9} +} + +func (x *ThrottledAppRule) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ThrottledAppRule) GetRatio() float64 { + if x != nil { + return x.Ratio + } + return 0 +} + +func (x *ThrottledAppRule) GetExpiresAt() *vttime.Time { + if x != nil { + return x.ExpiresAt + } + return nil +} + +func (x *ThrottledAppRule) GetExempt() bool { + if x != nil { + return x.Exempt + } + return false +} + type ThrottlerConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1014,12 +1093,14 @@ type ThrottlerConfig struct { // CheckAsCheckSelf indicates whether a throttler /check request // should behave like a /check-self. CheckAsCheckSelf bool `protobuf:"varint,4,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` + // ThrottledApps is a map of rules for app-specific throttling + ThrottledApps map[string]*ThrottledAppRule `protobuf:"bytes,5,rep,name=throttled_apps,json=throttledApps,proto3" json:"throttled_apps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *ThrottlerConfig) Reset() { *x = ThrottlerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[9] + mi := &file_topodata_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1032,7 +1113,7 @@ func (x *ThrottlerConfig) String() string { func (*ThrottlerConfig) ProtoMessage() {} func (x *ThrottlerConfig) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[9] + mi := &file_topodata_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1045,7 +1126,7 @@ func (x *ThrottlerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ThrottlerConfig.ProtoReflect.Descriptor instead. func (*ThrottlerConfig) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{9} + return file_topodata_proto_rawDescGZIP(), []int{10} } func (x *ThrottlerConfig) GetEnabled() bool { @@ -1076,6 +1157,13 @@ func (x *ThrottlerConfig) GetCheckAsCheckSelf() bool { return false } +func (x *ThrottlerConfig) GetThrottledApps() map[string]*ThrottledAppRule { + if x != nil { + return x.ThrottledApps + } + return nil +} + // SrvKeyspace is a rollup node for the keyspace itself. type SrvKeyspace struct { state protoimpl.MessageState @@ -1095,7 +1183,7 @@ type SrvKeyspace struct { func (x *SrvKeyspace) Reset() { *x = SrvKeyspace{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[10] + mi := &file_topodata_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1108,7 +1196,7 @@ func (x *SrvKeyspace) String() string { func (*SrvKeyspace) ProtoMessage() {} func (x *SrvKeyspace) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[10] + mi := &file_topodata_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1121,7 +1209,7 @@ func (x *SrvKeyspace) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace.ProtoReflect.Descriptor instead. func (*SrvKeyspace) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10} + return file_topodata_proto_rawDescGZIP(), []int{11} } func (x *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { @@ -1166,7 +1254,7 @@ type CellInfo struct { func (x *CellInfo) Reset() { *x = CellInfo{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[11] + mi := &file_topodata_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1179,7 +1267,7 @@ func (x *CellInfo) String() string { func (*CellInfo) ProtoMessage() {} func (x *CellInfo) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[11] + mi := &file_topodata_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1192,7 +1280,7 @@ func (x *CellInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use CellInfo.ProtoReflect.Descriptor instead. func (*CellInfo) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{11} + return file_topodata_proto_rawDescGZIP(), []int{12} } func (x *CellInfo) GetServerAddress() string { @@ -1222,7 +1310,7 @@ type CellsAlias struct { func (x *CellsAlias) Reset() { *x = CellsAlias{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[12] + mi := &file_topodata_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1235,7 +1323,7 @@ func (x *CellsAlias) String() string { func (*CellsAlias) ProtoMessage() {} func (x *CellsAlias) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[12] + mi := &file_topodata_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1248,7 +1336,7 @@ func (x *CellsAlias) ProtoReflect() protoreflect.Message { // Deprecated: Use CellsAlias.ProtoReflect.Descriptor instead. func (*CellsAlias) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{12} + return file_topodata_proto_rawDescGZIP(), []int{13} } func (x *CellsAlias) GetCells() []string { @@ -1271,7 +1359,7 @@ type TopoConfig struct { func (x *TopoConfig) Reset() { *x = TopoConfig{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[13] + mi := &file_topodata_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1284,7 +1372,7 @@ func (x *TopoConfig) String() string { func (*TopoConfig) ProtoMessage() {} func (x *TopoConfig) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[13] + mi := &file_topodata_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1297,7 +1385,7 @@ func (x *TopoConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use TopoConfig.ProtoReflect.Descriptor instead. func (*TopoConfig) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{13} + return file_topodata_proto_rawDescGZIP(), []int{14} } func (x *TopoConfig) GetTopoType() string { @@ -1332,7 +1420,7 @@ type ExternalVitessCluster struct { func (x *ExternalVitessCluster) Reset() { *x = ExternalVitessCluster{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[14] + mi := &file_topodata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1345,7 +1433,7 @@ func (x *ExternalVitessCluster) String() string { func (*ExternalVitessCluster) ProtoMessage() {} func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[14] + mi := &file_topodata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1358,7 +1446,7 @@ func (x *ExternalVitessCluster) ProtoReflect() protoreflect.Message { // Deprecated: Use ExternalVitessCluster.ProtoReflect.Descriptor instead. func (*ExternalVitessCluster) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{14} + return file_topodata_proto_rawDescGZIP(), []int{15} } func (x *ExternalVitessCluster) GetTopoConfig() *TopoConfig { @@ -1380,7 +1468,7 @@ type ExternalClusters struct { func (x *ExternalClusters) Reset() { *x = ExternalClusters{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[15] + mi := &file_topodata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1393,7 +1481,7 @@ func (x *ExternalClusters) String() string { func (*ExternalClusters) ProtoMessage() {} func (x *ExternalClusters) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[15] + mi := &file_topodata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1406,7 +1494,7 @@ func (x *ExternalClusters) ProtoReflect() protoreflect.Message { // Deprecated: Use ExternalClusters.ProtoReflect.Descriptor instead. func (*ExternalClusters) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{15} + return file_topodata_proto_rawDescGZIP(), []int{16} } func (x *ExternalClusters) GetVitessCluster() []*ExternalVitessCluster { @@ -1439,7 +1527,7 @@ type Shard_SourceShard struct { func (x *Shard_SourceShard) Reset() { *x = Shard_SourceShard{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[18] + mi := &file_topodata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1452,7 +1540,7 @@ func (x *Shard_SourceShard) String() string { func (*Shard_SourceShard) ProtoMessage() {} func (x *Shard_SourceShard) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[18] + mi := &file_topodata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1521,7 +1609,7 @@ type Shard_TabletControl struct { func (x *Shard_TabletControl) Reset() { *x = Shard_TabletControl{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[19] + mi := &file_topodata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1534,7 +1622,7 @@ func (x *Shard_TabletControl) String() string { func (*Shard_TabletControl) ProtoMessage() {} func (x *Shard_TabletControl) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[19] + mi := &file_topodata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1596,7 +1684,7 @@ type Keyspace_ServedFrom struct { func (x *Keyspace_ServedFrom) Reset() { *x = Keyspace_ServedFrom{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[20] + mi := &file_topodata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1609,7 +1697,7 @@ func (x *Keyspace_ServedFrom) String() string { func (*Keyspace_ServedFrom) ProtoMessage() {} func (x *Keyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[20] + mi := &file_topodata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1658,7 +1746,7 @@ type ShardReplication_Node struct { func (x *ShardReplication_Node) Reset() { *x = ShardReplication_Node{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[21] + mi := &file_topodata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1671,7 +1759,7 @@ func (x *ShardReplication_Node) String() string { func (*ShardReplication_Node) ProtoMessage() {} func (x *ShardReplication_Node) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[21] + mi := &file_topodata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1710,7 +1798,7 @@ type SrvKeyspace_KeyspacePartition struct { func (x *SrvKeyspace_KeyspacePartition) Reset() { *x = SrvKeyspace_KeyspacePartition{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1723,7 +1811,7 @@ func (x *SrvKeyspace_KeyspacePartition) String() string { func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1736,7 +1824,7 @@ func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace_KeyspacePartition.ProtoReflect.Descriptor instead. func (*SrvKeyspace_KeyspacePartition) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10, 0} + return file_topodata_proto_rawDescGZIP(), []int{11, 0} } func (x *SrvKeyspace_KeyspacePartition) GetServedType() TabletType { @@ -1776,7 +1864,7 @@ type SrvKeyspace_ServedFrom struct { func (x *SrvKeyspace_ServedFrom) Reset() { *x = SrvKeyspace_ServedFrom{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[23] + mi := &file_topodata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1789,7 +1877,7 @@ func (x *SrvKeyspace_ServedFrom) String() string { func (*SrvKeyspace_ServedFrom) ProtoMessage() {} func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[23] + mi := &file_topodata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1802,7 +1890,7 @@ func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvKeyspace_ServedFrom.ProtoReflect.Descriptor instead. func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{10, 1} + return file_topodata_proto_rawDescGZIP(), []int{11, 1} } func (x *SrvKeyspace_ServedFrom) GetTabletType() TabletType { @@ -1831,7 +1919,7 @@ var file_topodata_proto_rawDesc = []byte{ 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x75, 0x69, - 0x64, 0x22, 0xe0, 0x05, 0x0a, 0x06, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x2b, 0x0a, 0x05, + 0x64, 0x22, 0xba, 0x05, 0x0a, 0x06, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, @@ -1863,216 +1951,236 @@ var file_topodata_proto_rawDesc = []byte{ 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x62, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, - 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x63, 0x6f, - 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x14, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x43, 0x6f, 0x6c, 0x6c, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0x3a, 0x0a, 0x0c, 0x50, 0x6f, 0x72, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, - 0x08, 0x0b, 0x10, 0x0c, 0x22, 0xbc, 0x05, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x3a, - 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x43, 0x0a, 0x17, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x40, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, - 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0x9a, 0x01, 0x0a, 0x0b, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, 0x9f, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, - 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x72, 0x6f, - 0x7a, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65, - 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0xdd, 0x03, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x73, 0x12, 0x3b, 0x0a, 0x0d, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x75, 0x0a, 0x0a, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, - 0x03, 0x10, 0x04, 0x22, 0x8b, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x1a, - 0x40, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x65, 0x12, 0x34, 0x0a, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x43, 0x6f, + 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3a, 0x0a, 0x0c, 0x50, 0x6f, 0x72, 0x74, 0x4d, + 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, + 0x10, 0x04, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x22, 0xbc, + 0x05, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x43, 0x0a, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, + 0x74, 0x65, 0x72, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x14, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0d, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x46, 0x0a, 0x0f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x1a, 0x9a, 0x01, 0x0a, 0x0b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x1a, + 0x9f, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x85, 0x04, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x3b, 0x0a, 0x0d, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, + 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, + 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, + 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, + 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, + 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x8b, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, + 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x1a, 0x40, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x39, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, 0x59, 0x5f, - 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a, - 0x16, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, - 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, - 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, - 0x66, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x44, 0x0a, - 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, - 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, - 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, - 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, - 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, - 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, - 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, - 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, - 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, - 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, - 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, - 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, - 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, - 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, - 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, - 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, - 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, - 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, - 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x22, 0x39, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, + 0x59, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, + 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x34, 0x0a, 0x16, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x14, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2b, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, + 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x22, 0xce, 0x02, 0x0a, 0x0f, 0x54, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, + 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x53, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x5c, 0x0a, 0x12, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, + 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, + 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, + 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, + 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, + 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, + 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, + 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, + 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, + 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, + 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, + 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, + 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, + 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, + 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, + 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, + 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -2088,7 +2196,7 @@ func file_topodata_proto_rawDescGZIP() []byte { } var file_topodata_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 26) var file_topodata_proto_goTypes = []interface{}{ (KeyspaceType)(0), // 0: topodata.KeyspaceType (TabletType)(0), // 1: topodata.TabletType @@ -2102,62 +2210,67 @@ var file_topodata_proto_goTypes = []interface{}{ (*ShardReplicationError)(nil), // 9: topodata.ShardReplicationError (*ShardReference)(nil), // 10: topodata.ShardReference (*ShardTabletControl)(nil), // 11: topodata.ShardTabletControl - (*ThrottlerConfig)(nil), // 12: topodata.ThrottlerConfig - (*SrvKeyspace)(nil), // 13: topodata.SrvKeyspace - (*CellInfo)(nil), // 14: topodata.CellInfo - (*CellsAlias)(nil), // 15: topodata.CellsAlias - (*TopoConfig)(nil), // 16: topodata.TopoConfig - (*ExternalVitessCluster)(nil), // 17: topodata.ExternalVitessCluster - (*ExternalClusters)(nil), // 18: topodata.ExternalClusters - nil, // 19: topodata.Tablet.PortMapEntry - nil, // 20: topodata.Tablet.TagsEntry - (*Shard_SourceShard)(nil), // 21: topodata.Shard.SourceShard - (*Shard_TabletControl)(nil), // 22: topodata.Shard.TabletControl - (*Keyspace_ServedFrom)(nil), // 23: topodata.Keyspace.ServedFrom - (*ShardReplication_Node)(nil), // 24: topodata.ShardReplication.Node - (*SrvKeyspace_KeyspacePartition)(nil), // 25: topodata.SrvKeyspace.KeyspacePartition - (*SrvKeyspace_ServedFrom)(nil), // 26: topodata.SrvKeyspace.ServedFrom - (*vttime.Time)(nil), // 27: vttime.Time + (*ThrottledAppRule)(nil), // 12: topodata.ThrottledAppRule + (*ThrottlerConfig)(nil), // 13: topodata.ThrottlerConfig + (*SrvKeyspace)(nil), // 14: topodata.SrvKeyspace + (*CellInfo)(nil), // 15: topodata.CellInfo + (*CellsAlias)(nil), // 16: topodata.CellsAlias + (*TopoConfig)(nil), // 17: topodata.TopoConfig + (*ExternalVitessCluster)(nil), // 18: topodata.ExternalVitessCluster + (*ExternalClusters)(nil), // 19: topodata.ExternalClusters + nil, // 20: topodata.Tablet.PortMapEntry + nil, // 21: topodata.Tablet.TagsEntry + (*Shard_SourceShard)(nil), // 22: topodata.Shard.SourceShard + (*Shard_TabletControl)(nil), // 23: topodata.Shard.TabletControl + (*Keyspace_ServedFrom)(nil), // 24: topodata.Keyspace.ServedFrom + (*ShardReplication_Node)(nil), // 25: topodata.ShardReplication.Node + nil, // 26: topodata.ThrottlerConfig.ThrottledAppsEntry + (*SrvKeyspace_KeyspacePartition)(nil), // 27: topodata.SrvKeyspace.KeyspacePartition + (*SrvKeyspace_ServedFrom)(nil), // 28: topodata.SrvKeyspace.ServedFrom + (*vttime.Time)(nil), // 29: vttime.Time } var file_topodata_proto_depIdxs = []int32{ 4, // 0: topodata.Tablet.alias:type_name -> topodata.TabletAlias - 19, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry + 20, // 1: topodata.Tablet.port_map:type_name -> topodata.Tablet.PortMapEntry 3, // 2: topodata.Tablet.key_range:type_name -> topodata.KeyRange 1, // 3: topodata.Tablet.type:type_name -> topodata.TabletType - 20, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry - 27, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time + 21, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry + 29, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time 4, // 6: topodata.Shard.primary_alias:type_name -> topodata.TabletAlias - 27, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time + 29, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time 3, // 8: topodata.Shard.key_range:type_name -> topodata.KeyRange - 21, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard - 22, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl - 23, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom + 22, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard + 23, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl + 24, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom 0, // 12: topodata.Keyspace.keyspace_type:type_name -> topodata.KeyspaceType - 27, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time - 12, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 24, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node + 29, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time + 13, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 25, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node 2, // 16: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type 4, // 17: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias 3, // 18: topodata.ShardReference.key_range:type_name -> topodata.KeyRange 3, // 19: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange - 25, // 20: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition - 26, // 21: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom - 12, // 22: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 16, // 23: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig - 17, // 24: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster - 3, // 25: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange - 1, // 26: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType - 1, // 27: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 4, // 28: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias - 1, // 29: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType - 10, // 30: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference - 11, // 31: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl - 1, // 32: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 33, // [33:33] is the sub-list for method output_type - 33, // [33:33] is the sub-list for method input_type - 33, // [33:33] is the sub-list for extension type_name - 33, // [33:33] is the sub-list for extension extendee - 0, // [0:33] is the sub-list for field type_name + 29, // 20: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time + 26, // 21: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry + 27, // 22: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition + 28, // 23: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom + 13, // 24: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 17, // 25: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig + 18, // 26: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster + 3, // 27: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange + 1, // 28: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType + 1, // 29: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType + 4, // 30: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias + 12, // 31: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule + 1, // 32: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType + 10, // 33: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference + 11, // 34: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl + 1, // 35: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType + 36, // [36:36] is the sub-list for method output_type + 36, // [36:36] is the sub-list for method input_type + 36, // [36:36] is the sub-list for extension type_name + 36, // [36:36] is the sub-list for extension extendee + 0, // [0:36] is the sub-list for field type_name } func init() { file_topodata_proto_init() } @@ -2275,7 +2388,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ThrottlerConfig); i { + switch v := v.(*ThrottledAppRule); i { case 0: return &v.state case 1: @@ -2287,7 +2400,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SrvKeyspace); i { + switch v := v.(*ThrottlerConfig); i { case 0: return &v.state case 1: @@ -2299,7 +2412,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CellInfo); i { + switch v := v.(*SrvKeyspace); i { case 0: return &v.state case 1: @@ -2311,7 +2424,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CellsAlias); i { + switch v := v.(*CellInfo); i { case 0: return &v.state case 1: @@ -2323,7 +2436,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TopoConfig); i { + switch v := v.(*CellsAlias); i { case 0: return &v.state case 1: @@ -2335,7 +2448,7 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalVitessCluster); i { + switch v := v.(*TopoConfig); i { case 0: return &v.state case 1: @@ -2347,6 +2460,18 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalVitessCluster); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_topodata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExternalClusters); i { case 0: return &v.state @@ -2358,7 +2483,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Shard_SourceShard); i { case 0: return &v.state @@ -2370,7 +2495,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Shard_TabletControl); i { case 0: return &v.state @@ -2382,7 +2507,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Keyspace_ServedFrom); i { case 0: return &v.state @@ -2394,7 +2519,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplication_Node); i { case 0: return &v.state @@ -2406,7 +2531,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SrvKeyspace_KeyspacePartition); i { case 0: return &v.state @@ -2418,7 +2543,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SrvKeyspace_ServedFrom); i { case 0: return &v.state @@ -2437,7 +2562,7 @@ func file_topodata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_topodata_proto_rawDesc, NumEnums: 3, - NumMessages: 24, + NumMessages: 26, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/topodata/topodata_vtproto.pb.go b/go/vt/proto/topodata/topodata_vtproto.pb.go index 49d2379215a..5e675bb4ea0 100644 --- a/go/vt/proto/topodata/topodata_vtproto.pb.go +++ b/go/vt/proto/topodata/topodata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: topodata.proto package topodata @@ -7,6 +7,7 @@ package topodata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -21,6 +22,569 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *KeyRange) CloneVT() *KeyRange { + if m == nil { + return (*KeyRange)(nil) + } + r := &KeyRange{} + if rhs := m.Start; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Start = tmpBytes + } + if rhs := m.End; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.End = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyRange) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletAlias) CloneVT() *TabletAlias { + if m == nil { + return (*TabletAlias)(nil) + } + r := &TabletAlias{ + Cell: m.Cell, + Uid: m.Uid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletAlias) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Tablet) CloneVT() *Tablet { + if m == nil { + return (*Tablet)(nil) + } + r := &Tablet{ + Alias: m.Alias.CloneVT(), + Hostname: m.Hostname, + Keyspace: m.Keyspace, + Shard: m.Shard, + KeyRange: m.KeyRange.CloneVT(), + Type: m.Type, + DbNameOverride: m.DbNameOverride, + MysqlHostname: m.MysqlHostname, + MysqlPort: m.MysqlPort, + PrimaryTermStartTime: m.PrimaryTermStartTime.CloneVT(), + DefaultConnCollation: m.DefaultConnCollation, + } + if rhs := m.PortMap; rhs != nil { + tmpContainer := make(map[string]int32, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.PortMap = tmpContainer + } + if rhs := m.Tags; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Tags = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Tablet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard_SourceShard) CloneVT() *Shard_SourceShard { + if m == nil { + return (*Shard_SourceShard)(nil) + } + r := &Shard_SourceShard{ + Uid: m.Uid, + Keyspace: m.Keyspace, + Shard: m.Shard, + KeyRange: m.KeyRange.CloneVT(), + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard_SourceShard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard_TabletControl) CloneVT() *Shard_TabletControl { + if m == nil { + return (*Shard_TabletControl)(nil) + } + r := &Shard_TabletControl{ + TabletType: m.TabletType, + Frozen: m.Frozen, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.DeniedTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DeniedTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard_TabletControl) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + PrimaryAlias: m.PrimaryAlias.CloneVT(), + PrimaryTermStartTime: m.PrimaryTermStartTime.CloneVT(), + KeyRange: m.KeyRange.CloneVT(), + IsPrimaryServing: m.IsPrimaryServing, + } + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]*Shard_SourceShard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SourceShards = tmpContainer + } + if rhs := m.TabletControls; rhs != nil { + tmpContainer := make([]*Shard_TabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletControls = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace_ServedFrom) CloneVT() *Keyspace_ServedFrom { + if m == nil { + return (*Keyspace_ServedFrom)(nil) + } + r := &Keyspace_ServedFrom{ + TabletType: m.TabletType, + Keyspace: m.Keyspace, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace_ServedFrom) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + KeyspaceType: m.KeyspaceType, + BaseKeyspace: m.BaseKeyspace, + SnapshotTime: m.SnapshotTime.CloneVT(), + DurabilityPolicy: m.DurabilityPolicy, + ThrottlerConfig: m.ThrottlerConfig.CloneVT(), + SidecarDbName: m.SidecarDbName, + } + if rhs := m.ServedFroms; rhs != nil { + tmpContainer := make([]*Keyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServedFroms = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplication_Node) CloneVT() *ShardReplication_Node { + if m == nil { + return (*ShardReplication_Node)(nil) + } + r := &ShardReplication_Node{ + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplication_Node) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplication) CloneVT() *ShardReplication { + if m == nil { + return (*ShardReplication)(nil) + } + r := &ShardReplication{} + if rhs := m.Nodes; rhs != nil { + tmpContainer := make([]*ShardReplication_Node, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Nodes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplication) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationError) CloneVT() *ShardReplicationError { + if m == nil { + return (*ShardReplicationError)(nil) + } + r := &ShardReplicationError{ + Type: m.Type, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReplicationError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReference) CloneVT() *ShardReference { + if m == nil { + return (*ShardReference)(nil) + } + r := &ShardReference{ + Name: m.Name, + KeyRange: m.KeyRange.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardReference) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardTabletControl) CloneVT() *ShardTabletControl { + if m == nil { + return (*ShardTabletControl)(nil) + } + r := &ShardTabletControl{ + Name: m.Name, + KeyRange: m.KeyRange.CloneVT(), + QueryServiceDisabled: m.QueryServiceDisabled, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardTabletControl) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ThrottledAppRule) CloneVT() *ThrottledAppRule { + if m == nil { + return (*ThrottledAppRule)(nil) + } + r := &ThrottledAppRule{ + Name: m.Name, + Ratio: m.Ratio, + ExpiresAt: m.ExpiresAt.CloneVT(), + Exempt: m.Exempt, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ThrottledAppRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ThrottlerConfig) CloneVT() *ThrottlerConfig { + if m == nil { + return (*ThrottlerConfig)(nil) + } + r := &ThrottlerConfig{ + Enabled: m.Enabled, + Threshold: m.Threshold, + CustomQuery: m.CustomQuery, + CheckAsCheckSelf: m.CheckAsCheckSelf, + } + if rhs := m.ThrottledApps; rhs != nil { + tmpContainer := make(map[string]*ThrottledAppRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ThrottledApps = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ThrottlerConfig) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace_KeyspacePartition) CloneVT() *SrvKeyspace_KeyspacePartition { + if m == nil { + return (*SrvKeyspace_KeyspacePartition)(nil) + } + r := &SrvKeyspace_KeyspacePartition{ + ServedType: m.ServedType, + } + if rhs := m.ShardReferences; rhs != nil { + tmpContainer := make([]*ShardReference, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardReferences = tmpContainer + } + if rhs := m.ShardTabletControls; rhs != nil { + tmpContainer := make([]*ShardTabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardTabletControls = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace_KeyspacePartition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace_ServedFrom) CloneVT() *SrvKeyspace_ServedFrom { + if m == nil { + return (*SrvKeyspace_ServedFrom)(nil) + } + r := &SrvKeyspace_ServedFrom{ + TabletType: m.TabletType, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace_ServedFrom) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvKeyspace) CloneVT() *SrvKeyspace { + if m == nil { + return (*SrvKeyspace)(nil) + } + r := &SrvKeyspace{ + ThrottlerConfig: m.ThrottlerConfig.CloneVT(), + } + if rhs := m.Partitions; rhs != nil { + tmpContainer := make([]*SrvKeyspace_KeyspacePartition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Partitions = tmpContainer + } + if rhs := m.ServedFrom; rhs != nil { + tmpContainer := make([]*SrvKeyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ServedFrom = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvKeyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CellInfo) CloneVT() *CellInfo { + if m == nil { + return (*CellInfo)(nil) + } + r := &CellInfo{ + ServerAddress: m.ServerAddress, + Root: m.Root, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CellInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CellsAlias) CloneVT() *CellsAlias { + if m == nil { + return (*CellsAlias)(nil) + } + r := &CellsAlias{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CellsAlias) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TopoConfig) CloneVT() *TopoConfig { + if m == nil { + return (*TopoConfig)(nil) + } + r := &TopoConfig{ + TopoType: m.TopoType, + Server: m.Server, + Root: m.Root, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TopoConfig) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExternalVitessCluster) CloneVT() *ExternalVitessCluster { + if m == nil { + return (*ExternalVitessCluster)(nil) + } + r := &ExternalVitessCluster{ + TopoConfig: m.TopoConfig.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExternalVitessCluster) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExternalClusters) CloneVT() *ExternalClusters { + if m == nil { + return (*ExternalClusters)(nil) + } + r := &ExternalClusters{} + if rhs := m.VitessCluster; rhs != nil { + tmpContainer := make([]*ExternalVitessCluster, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.VitessCluster = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExternalClusters) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *KeyRange) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -150,13 +714,6 @@ func (m *Tablet) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x80 } - if len(m.DbServerVersion) > 0 { - i -= len(m.DbServerVersion) - copy(dAtA[i:], m.DbServerVersion) - i = encodeVarint(dAtA, i, uint64(len(m.DbServerVersion))) - i-- - dAtA[i] = 0x7a - } if m.PrimaryTermStartTime != nil { size, err := m.PrimaryTermStartTime.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -589,6 +1146,13 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.SidecarDbName) > 0 { + i -= len(m.SidecarDbName) + copy(dAtA[i:], m.SidecarDbName) + i = encodeVarint(dAtA, i, uint64(len(m.SidecarDbName))) + i-- + dAtA[i] = 0x52 + } if m.ThrottlerConfig != nil { size, err := m.ThrottlerConfig.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -889,7 +1453,7 @@ func (m *ShardTabletControl) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ThrottlerConfig) MarshalVT() (dAtA []byte, err error) { +func (m *ThrottledAppRule) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -902,12 +1466,12 @@ func (m *ThrottlerConfig) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ThrottlerConfig) MarshalToVT(dAtA []byte) (int, error) { +func (m *ThrottledAppRule) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ThrottledAppRule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -919,9 +1483,9 @@ func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CheckAsCheckSelf { + if m.Exempt { i-- - if m.CheckAsCheckSelf { + if m.Exempt { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -929,33 +1493,33 @@ func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x20 } - if len(m.CustomQuery) > 0 { - i -= len(m.CustomQuery) - copy(dAtA[i:], m.CustomQuery) - i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) + if m.ExpiresAt != nil { + size, err := m.ExpiresAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0x1a } - if m.Threshold != 0 { + if m.Ratio != 0 { i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Ratio)))) i-- dAtA[i] = 0x11 } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SrvKeyspace_KeyspacePartition) MarshalVT() (dAtA []byte, err error) { +func (m *ThrottlerConfig) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -968,12 +1532,12 @@ func (m *SrvKeyspace_KeyspacePartition) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SrvKeyspace_KeyspacePartition) MarshalToVT(dAtA []byte) (int, error) { +func (m *ThrottlerConfig) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SrvKeyspace_KeyspacePartition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ThrottlerConfig) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -985,24 +1549,112 @@ func (m *SrvKeyspace_KeyspacePartition) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ShardTabletControls) > 0 { - for iNdEx := len(m.ShardTabletControls) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ShardTabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.ThrottledApps) > 0 { + for k := range m.ThrottledApps { + v := m.ThrottledApps[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a - } - } - if len(m.ShardReferences) > 0 { - for iNdEx := len(m.ShardReferences) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ShardReferences[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.CheckAsCheckSelf { + i-- + if m.CheckAsCheckSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.CustomQuery) > 0 { + i -= len(m.CustomQuery) + copy(dAtA[i:], m.CustomQuery) + i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) + i-- + dAtA[i] = 0x1a + } + if m.Threshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + i-- + dAtA[i] = 0x11 + } + if m.Enabled { + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SrvKeyspace_KeyspacePartition) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SrvKeyspace_KeyspacePartition) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SrvKeyspace_KeyspacePartition) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ShardTabletControls) > 0 { + for iNdEx := len(m.ShardTabletControls) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ShardTabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ShardReferences) > 0 { + for iNdEx := len(m.ShardReferences) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ShardReferences[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- @@ -1466,10 +2118,6 @@ func (m *Tablet) SizeVT() (n int) { l = m.PrimaryTermStartTime.SizeVT() n += 1 + l + sov(uint64(l)) } - l = len(m.DbServerVersion) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } if m.DefaultConnCollation != 0 { n += 2 + sov(uint64(m.DefaultConnCollation)) } @@ -1627,6 +2275,10 @@ func (m *Keyspace) SizeVT() (n int) { l = m.ThrottlerConfig.SizeVT() n += 1 + l + sov(uint64(l)) } + l = len(m.SidecarDbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1717,6 +2369,30 @@ func (m *ShardTabletControl) SizeVT() (n int) { return n } +func (m *ThrottledAppRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Ratio != 0 { + n += 9 + } + if m.ExpiresAt != nil { + l = m.ExpiresAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Exempt { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *ThrottlerConfig) SizeVT() (n int) { if m == nil { return 0 @@ -1736,6 +2412,19 @@ func (m *ThrottlerConfig) SizeVT() (n int) { if m.CheckAsCheckSelf { n += 2 } + if len(m.ThrottledApps) > 0 { + for k, v := range m.ThrottledApps { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } n += len(m.unknownFields) return n } @@ -2696,38 +3385,6 @@ func (m *Tablet) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbServerVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DbServerVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 16: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DefaultConnCollation", wireType) @@ -3724,6 +4381,38 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SidecarDbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SidecarDbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4282,6 +4971,156 @@ func (m *ShardTabletControl) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *ThrottledAppRule) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ThrottledAppRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ThrottledAppRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Ratio", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Ratio = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExpiresAt == nil { + m.ExpiresAt = &vttime.Time{} + } + if err := m.ExpiresAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exempt", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exempt = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ThrottlerConfig) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4394,6 +5233,135 @@ func (m *ThrottlerConfig) UnmarshalVT(dAtA []byte) error { } } m.CheckAsCheckSelf = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledApps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ThrottledApps == nil { + m.ThrottledApps = make(map[string]*ThrottledAppRule) + } + var mapkey string + var mapvalue *ThrottledAppRule + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ThrottledAppRule{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ThrottledApps[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index e86c1613682..4783fa51586 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vschema.proto @@ -38,6 +38,58 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type Keyspace_ForeignKeyMode int32 + +const ( + Keyspace_FK_DEFAULT Keyspace_ForeignKeyMode = 0 + Keyspace_FK_DISALLOW Keyspace_ForeignKeyMode = 1 + Keyspace_FK_UNMANAGED Keyspace_ForeignKeyMode = 2 + Keyspace_FK_MANAGED Keyspace_ForeignKeyMode = 3 +) + +// Enum value maps for Keyspace_ForeignKeyMode. +var ( + Keyspace_ForeignKeyMode_name = map[int32]string{ + 0: "FK_DEFAULT", + 1: "FK_DISALLOW", + 2: "FK_UNMANAGED", + 3: "FK_MANAGED", + } + Keyspace_ForeignKeyMode_value = map[string]int32{ + "FK_DEFAULT": 0, + "FK_DISALLOW": 1, + "FK_UNMANAGED": 2, + "FK_MANAGED": 3, + } +) + +func (x Keyspace_ForeignKeyMode) Enum() *Keyspace_ForeignKeyMode { + p := new(Keyspace_ForeignKeyMode) + *p = x + return p +} + +func (x Keyspace_ForeignKeyMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Keyspace_ForeignKeyMode) Descriptor() protoreflect.EnumDescriptor { + return file_vschema_proto_enumTypes[0].Descriptor() +} + +func (Keyspace_ForeignKeyMode) Type() protoreflect.EnumType { + return &file_vschema_proto_enumTypes[0] +} + +func (x Keyspace_ForeignKeyMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Keyspace_ForeignKeyMode.Descriptor instead. +func (Keyspace_ForeignKeyMode) EnumDescriptor() ([]byte, []int) { + return file_vschema_proto_rawDescGZIP(), []int{2, 0} +} + // RoutingRules specify the high level routing rules for the VSchema. type RoutingRules struct { state protoimpl.MessageState @@ -157,6 +209,8 @@ type Keyspace struct { Tables map[string]*Table `protobuf:"bytes,3,rep,name=tables,proto3" json:"tables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // If require_explicit_routing is true, vindexes and tables are not added to global routing RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` + // foreign_key_mode dictates how Vitess should handle foreign keys for this keyspace. + ForeignKeyMode Keyspace_ForeignKeyMode `protobuf:"varint,5,opt,name=foreign_key_mode,json=foreignKeyMode,proto3,enum=vschema.Keyspace_ForeignKeyMode" json:"foreign_key_mode,omitempty"` } func (x *Keyspace) Reset() { @@ -219,6 +273,13 @@ func (x *Keyspace) GetRequireExplicitRouting() bool { return false } +func (x *Keyspace) GetForeignKeyMode() Keyspace_ForeignKeyMode { + if x != nil { + return x.ForeignKeyMode + } + return Keyspace_FK_DEFAULT +} + // Vindex is the vindex info for a Keyspace. type Vindex struct { state protoimpl.MessageState @@ -779,7 +840,7 @@ var file_vschema_proto_rawDesc = []byte{ 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xeb, 0x02, 0x0a, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x8c, 0x04, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, @@ -793,92 +854,102 @@ var file_vschema_proto_rawDesc = []byte{ 0x72, 0x65, 0x5f, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x45, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x1a, 0x4c, 0x0a, 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x49, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x67, 0x12, 0x4a, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x1a, 0x4c, 0x0a, + 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa2, 0x01, 0x0a, 0x06, 0x56, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, - 0x77, 0x6e, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, - 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x3d, 0x0a, - 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, - 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, - 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x07, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, - 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, - 0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74, - 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x3d, - 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa7, 0x02, - 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a, - 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a, - 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, 0x5a, - 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x0b, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x53, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, + 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x4b, 0x5f, 0x44, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x46, 0x4b, 0x5f, 0x44, + 0x49, 0x53, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x4b, 0x5f, + 0x55, 0x4e, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x46, + 0x4b, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x06, + 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, + 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0d, + 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, + 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x69, 0x6e, 0x6e, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, + 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, + 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, + 0x3d, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa7, + 0x02, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, + 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, + 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, + 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, + 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, + 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -893,46 +964,49 @@ func file_vschema_proto_rawDescGZIP() []byte { return file_vschema_proto_rawDescData } +var file_vschema_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_vschema_proto_msgTypes = make([]protoimpl.MessageInfo, 15) var file_vschema_proto_goTypes = []interface{}{ - (*RoutingRules)(nil), // 0: vschema.RoutingRules - (*RoutingRule)(nil), // 1: vschema.RoutingRule - (*Keyspace)(nil), // 2: vschema.Keyspace - (*Vindex)(nil), // 3: vschema.Vindex - (*Table)(nil), // 4: vschema.Table - (*ColumnVindex)(nil), // 5: vschema.ColumnVindex - (*AutoIncrement)(nil), // 6: vschema.AutoIncrement - (*Column)(nil), // 7: vschema.Column - (*SrvVSchema)(nil), // 8: vschema.SrvVSchema - (*ShardRoutingRules)(nil), // 9: vschema.ShardRoutingRules - (*ShardRoutingRule)(nil), // 10: vschema.ShardRoutingRule - nil, // 11: vschema.Keyspace.VindexesEntry - nil, // 12: vschema.Keyspace.TablesEntry - nil, // 13: vschema.Vindex.ParamsEntry - nil, // 14: vschema.SrvVSchema.KeyspacesEntry - (query.Type)(0), // 15: query.Type + (Keyspace_ForeignKeyMode)(0), // 0: vschema.Keyspace.ForeignKeyMode + (*RoutingRules)(nil), // 1: vschema.RoutingRules + (*RoutingRule)(nil), // 2: vschema.RoutingRule + (*Keyspace)(nil), // 3: vschema.Keyspace + (*Vindex)(nil), // 4: vschema.Vindex + (*Table)(nil), // 5: vschema.Table + (*ColumnVindex)(nil), // 6: vschema.ColumnVindex + (*AutoIncrement)(nil), // 7: vschema.AutoIncrement + (*Column)(nil), // 8: vschema.Column + (*SrvVSchema)(nil), // 9: vschema.SrvVSchema + (*ShardRoutingRules)(nil), // 10: vschema.ShardRoutingRules + (*ShardRoutingRule)(nil), // 11: vschema.ShardRoutingRule + nil, // 12: vschema.Keyspace.VindexesEntry + nil, // 13: vschema.Keyspace.TablesEntry + nil, // 14: vschema.Vindex.ParamsEntry + nil, // 15: vschema.SrvVSchema.KeyspacesEntry + (query.Type)(0), // 16: query.Type } var file_vschema_proto_depIdxs = []int32{ - 1, // 0: vschema.RoutingRules.rules:type_name -> vschema.RoutingRule - 11, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry - 12, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry - 13, // 3: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry - 5, // 4: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex - 6, // 5: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement - 7, // 6: vschema.Table.columns:type_name -> vschema.Column - 15, // 7: vschema.Column.type:type_name -> query.Type - 14, // 8: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry - 0, // 9: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules - 9, // 10: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 10, // 11: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule - 3, // 12: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex - 4, // 13: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table - 2, // 14: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 2, // 0: vschema.RoutingRules.rules:type_name -> vschema.RoutingRule + 12, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry + 13, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry + 0, // 3: vschema.Keyspace.foreign_key_mode:type_name -> vschema.Keyspace.ForeignKeyMode + 14, // 4: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry + 6, // 5: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex + 7, // 6: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement + 8, // 7: vschema.Table.columns:type_name -> vschema.Column + 16, // 8: vschema.Column.type:type_name -> query.Type + 15, // 9: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry + 1, // 10: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules + 10, // 11: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 11, // 12: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule + 4, // 13: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex + 5, // 14: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table + 3, // 15: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_vschema_proto_init() } @@ -1079,13 +1153,14 @@ func file_vschema_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vschema_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 15, NumExtensions: 0, NumServices: 0, }, GoTypes: file_vschema_proto_goTypes, DependencyIndexes: file_vschema_proto_depIdxs, + EnumInfos: file_vschema_proto_enumTypes, MessageInfos: file_vschema_proto_msgTypes, }.Build() File_vschema_proto = out.File diff --git a/go/vt/proto/vschema/vschema_vtproto.pb.go b/go/vt/proto/vschema/vschema_vtproto.pb.go index 1b461eba1ff..89f6e38a69f 100644 --- a/go/vt/proto/vschema/vschema_vtproto.pb.go +++ b/go/vt/proto/vschema/vschema_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vschema.proto package vschema import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,279 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *RoutingRules) CloneVT() *RoutingRules { + if m == nil { + return (*RoutingRules)(nil) + } + r := &RoutingRules{} + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*RoutingRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RoutingRules) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RoutingRule) CloneVT() *RoutingRule { + if m == nil { + return (*RoutingRule)(nil) + } + r := &RoutingRule{ + FromTable: m.FromTable, + } + if rhs := m.ToTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ToTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RoutingRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Sharded: m.Sharded, + RequireExplicitRouting: m.RequireExplicitRouting, + ForeignKeyMode: m.ForeignKeyMode, + } + if rhs := m.Vindexes; rhs != nil { + tmpContainer := make(map[string]*Vindex, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Vindexes = tmpContainer + } + if rhs := m.Tables; rhs != nil { + tmpContainer := make(map[string]*Table, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Vindex) CloneVT() *Vindex { + if m == nil { + return (*Vindex)(nil) + } + r := &Vindex{ + Type: m.Type, + Owner: m.Owner, + } + if rhs := m.Params; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.Params = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Vindex) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Table) CloneVT() *Table { + if m == nil { + return (*Table)(nil) + } + r := &Table{ + Type: m.Type, + AutoIncrement: m.AutoIncrement.CloneVT(), + Pinned: m.Pinned, + ColumnListAuthoritative: m.ColumnListAuthoritative, + Source: m.Source, + } + if rhs := m.ColumnVindexes; rhs != nil { + tmpContainer := make([]*ColumnVindex, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ColumnVindexes = tmpContainer + } + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]*Column, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Columns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Table) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ColumnVindex) CloneVT() *ColumnVindex { + if m == nil { + return (*ColumnVindex)(nil) + } + r := &ColumnVindex{ + Column: m.Column, + Name: m.Name, + } + if rhs := m.Columns; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Columns = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ColumnVindex) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AutoIncrement) CloneVT() *AutoIncrement { + if m == nil { + return (*AutoIncrement)(nil) + } + r := &AutoIncrement{ + Column: m.Column, + Sequence: m.Sequence, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *AutoIncrement) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Column) CloneVT() *Column { + if m == nil { + return (*Column)(nil) + } + r := &Column{ + Name: m.Name, + Type: m.Type, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Column) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvVSchema) CloneVT() *SrvVSchema { + if m == nil { + return (*SrvVSchema)(nil) + } + r := &SrvVSchema{ + RoutingRules: m.RoutingRules.CloneVT(), + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make(map[string]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvVSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardRoutingRules) CloneVT() *ShardRoutingRules { + if m == nil { + return (*ShardRoutingRules)(nil) + } + r := &ShardRoutingRules{} + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*ShardRoutingRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardRoutingRules) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardRoutingRule) CloneVT() *ShardRoutingRule { + if m == nil { + return (*ShardRoutingRule)(nil) + } + r := &ShardRoutingRule{ + FromKeyspace: m.FromKeyspace, + ToKeyspace: m.ToKeyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ShardRoutingRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *RoutingRules) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -143,6 +417,11 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ForeignKeyMode != 0 { + i = encodeVarint(dAtA, i, uint64(m.ForeignKeyMode)) + i-- + dAtA[i] = 0x28 + } if m.RequireExplicitRouting { i-- if m.RequireExplicitRouting { @@ -781,6 +1060,9 @@ func (m *Keyspace) SizeVT() (n int) { if m.RequireExplicitRouting { n += 2 } + if m.ForeignKeyMode != 0 { + n += 1 + sov(uint64(m.ForeignKeyMode)) + } n += len(m.unknownFields) return n } @@ -1513,6 +1795,25 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { } } m.RequireExplicitRouting = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForeignKeyMode", wireType) + } + m.ForeignKeyMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForeignKeyMode |= Keyspace_ForeignKeyMode(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go index 8b961cce4ac..3e41edd5f7e 100644 --- a/go/vt/proto/vtadmin/vtadmin.pb.go +++ b/go/vt/proto/vtadmin/vtadmin.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtadmin.proto @@ -2669,6 +2669,177 @@ func (x *GetShardReplicationPositionsResponse) GetReplicationPositions() []*Clus return nil } +type GetSrvKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (x *GetSrvKeyspaceRequest) Reset() { + *x = GetSrvKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSrvKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvKeyspaceRequest) ProtoMessage() {} + +func (x *GetSrvKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSrvKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{44} +} + +func (x *GetSrvKeyspaceRequest) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *GetSrvKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *GetSrvKeyspaceRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetSrvKeyspacesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An optional list of cluster IDs to filter specific clusters + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (x *GetSrvKeyspacesRequest) Reset() { + *x = GetSrvKeyspacesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSrvKeyspacesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvKeyspacesRequest) ProtoMessage() {} + +func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{45} +} + +func (x *GetSrvKeyspacesRequest) GetClusterIds() []string { + if x != nil { + return x.ClusterIds + } + return nil +} + +func (x *GetSrvKeyspacesRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetSrvKeyspacesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // GetSrvKeyspaces responses for each keyspace + SrvKeyspaces map[string]*vtctldata.GetSrvKeyspacesResponse `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetSrvKeyspacesResponse) Reset() { + *x = GetSrvKeyspacesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSrvKeyspacesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvKeyspacesResponse) ProtoMessage() {} + +func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{46} +} + +func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*vtctldata.GetSrvKeyspacesResponse { + if x != nil { + return x.SrvKeyspaces + } + return nil +} + type GetSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2681,7 +2852,7 @@ type GetSrvVSchemaRequest struct { func (x *GetSrvVSchemaRequest) Reset() { *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[44] + mi := &file_vtadmin_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2694,7 +2865,7 @@ func (x *GetSrvVSchemaRequest) String() string { func (*GetSrvVSchemaRequest) ProtoMessage() {} func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[44] + mi := &file_vtadmin_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2707,7 +2878,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{44} + return file_vtadmin_proto_rawDescGZIP(), []int{47} } func (x *GetSrvVSchemaRequest) GetClusterId() string { @@ -2736,7 +2907,7 @@ type GetSrvVSchemasRequest struct { func (x *GetSrvVSchemasRequest) Reset() { *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[45] + mi := &file_vtadmin_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2749,7 +2920,7 @@ func (x *GetSrvVSchemasRequest) String() string { func (*GetSrvVSchemasRequest) ProtoMessage() {} func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[45] + mi := &file_vtadmin_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2762,7 +2933,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{45} + return file_vtadmin_proto_rawDescGZIP(), []int{48} } func (x *GetSrvVSchemasRequest) GetClusterIds() []string { @@ -2790,7 +2961,7 @@ type GetSrvVSchemasResponse struct { func (x *GetSrvVSchemasResponse) Reset() { *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[46] + mi := &file_vtadmin_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2803,7 +2974,7 @@ func (x *GetSrvVSchemasResponse) String() string { func (*GetSrvVSchemasResponse) ProtoMessage() {} func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[46] + mi := &file_vtadmin_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2816,7 +2987,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{46} + return file_vtadmin_proto_rawDescGZIP(), []int{49} } func (x *GetSrvVSchemasResponse) GetSrvVSchemas() []*SrvVSchema { @@ -2838,7 +3009,7 @@ type GetSchemaTableSizeOptions struct { func (x *GetSchemaTableSizeOptions) Reset() { *x = GetSchemaTableSizeOptions{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[47] + mi := &file_vtadmin_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2851,7 +3022,7 @@ func (x *GetSchemaTableSizeOptions) String() string { func (*GetSchemaTableSizeOptions) ProtoMessage() {} func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[47] + mi := &file_vtadmin_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2864,7 +3035,7 @@ func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaTableSizeOptions.ProtoReflect.Descriptor instead. func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{47} + return file_vtadmin_proto_rawDescGZIP(), []int{50} } func (x *GetSchemaTableSizeOptions) GetAggregateSizes() bool { @@ -2897,7 +3068,7 @@ type GetTabletRequest struct { func (x *GetTabletRequest) Reset() { *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[48] + mi := &file_vtadmin_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2910,7 +3081,7 @@ func (x *GetTabletRequest) String() string { func (*GetTabletRequest) ProtoMessage() {} func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[48] + mi := &file_vtadmin_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2923,7 +3094,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{48} + return file_vtadmin_proto_rawDescGZIP(), []int{51} } func (x *GetTabletRequest) GetAlias() *topodata.TabletAlias { @@ -2951,7 +3122,7 @@ type GetTabletsRequest struct { func (x *GetTabletsRequest) Reset() { *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[49] + mi := &file_vtadmin_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2964,7 +3135,7 @@ func (x *GetTabletsRequest) String() string { func (*GetTabletsRequest) ProtoMessage() {} func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[49] + mi := &file_vtadmin_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2977,7 +3148,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{49} + return file_vtadmin_proto_rawDescGZIP(), []int{52} } func (x *GetTabletsRequest) GetClusterIds() []string { @@ -2998,7 +3169,7 @@ type GetTabletsResponse struct { func (x *GetTabletsResponse) Reset() { *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[50] + mi := &file_vtadmin_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3011,7 +3182,7 @@ func (x *GetTabletsResponse) String() string { func (*GetTabletsResponse) ProtoMessage() {} func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[50] + mi := &file_vtadmin_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3024,7 +3195,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{50} + return file_vtadmin_proto_rawDescGZIP(), []int{53} } func (x *GetTabletsResponse) GetTablets() []*Tablet { @@ -3046,7 +3217,7 @@ type GetTopologyPathRequest struct { func (x *GetTopologyPathRequest) Reset() { *x = GetTopologyPathRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[51] + mi := &file_vtadmin_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3059,7 +3230,7 @@ func (x *GetTopologyPathRequest) String() string { func (*GetTopologyPathRequest) ProtoMessage() {} func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[51] + mi := &file_vtadmin_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3072,7 +3243,7 @@ func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{51} + return file_vtadmin_proto_rawDescGZIP(), []int{54} } func (x *GetTopologyPathRequest) GetClusterId() string { @@ -3101,7 +3272,7 @@ type GetVSchemaRequest struct { func (x *GetVSchemaRequest) Reset() { *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[52] + mi := &file_vtadmin_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3114,7 +3285,7 @@ func (x *GetVSchemaRequest) String() string { func (*GetVSchemaRequest) ProtoMessage() {} func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[52] + mi := &file_vtadmin_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3127,7 +3298,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{52} + return file_vtadmin_proto_rawDescGZIP(), []int{55} } func (x *GetVSchemaRequest) GetClusterId() string { @@ -3155,7 +3326,7 @@ type GetVSchemasRequest struct { func (x *GetVSchemasRequest) Reset() { *x = GetVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[53] + mi := &file_vtadmin_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3168,7 +3339,7 @@ func (x *GetVSchemasRequest) String() string { func (*GetVSchemasRequest) ProtoMessage() {} func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[53] + mi := &file_vtadmin_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3181,7 +3352,7 @@ func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemasRequest.ProtoReflect.Descriptor instead. func (*GetVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{53} + return file_vtadmin_proto_rawDescGZIP(), []int{56} } func (x *GetVSchemasRequest) GetClusterIds() []string { @@ -3202,7 +3373,7 @@ type GetVSchemasResponse struct { func (x *GetVSchemasResponse) Reset() { *x = GetVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[54] + mi := &file_vtadmin_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3215,7 +3386,7 @@ func (x *GetVSchemasResponse) String() string { func (*GetVSchemasResponse) ProtoMessage() {} func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[54] + mi := &file_vtadmin_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3228,7 +3399,7 @@ func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemasResponse.ProtoReflect.Descriptor instead. func (*GetVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{54} + return file_vtadmin_proto_rawDescGZIP(), []int{57} } func (x *GetVSchemasResponse) GetVSchemas() []*VSchema { @@ -3249,7 +3420,7 @@ type GetVtctldsRequest struct { func (x *GetVtctldsRequest) Reset() { *x = GetVtctldsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[55] + mi := &file_vtadmin_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3262,7 +3433,7 @@ func (x *GetVtctldsRequest) String() string { func (*GetVtctldsRequest) ProtoMessage() {} func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[55] + mi := &file_vtadmin_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3275,7 +3446,7 @@ func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVtctldsRequest.ProtoReflect.Descriptor instead. func (*GetVtctldsRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{55} + return file_vtadmin_proto_rawDescGZIP(), []int{58} } func (x *GetVtctldsRequest) GetClusterIds() []string { @@ -3296,7 +3467,7 @@ type GetVtctldsResponse struct { func (x *GetVtctldsResponse) Reset() { *x = GetVtctldsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[56] + mi := &file_vtadmin_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3309,7 +3480,7 @@ func (x *GetVtctldsResponse) String() string { func (*GetVtctldsResponse) ProtoMessage() {} func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[56] + mi := &file_vtadmin_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3322,7 +3493,7 @@ func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVtctldsResponse.ProtoReflect.Descriptor instead. func (*GetVtctldsResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{56} + return file_vtadmin_proto_rawDescGZIP(), []int{59} } func (x *GetVtctldsResponse) GetVtctlds() []*Vtctld { @@ -3346,7 +3517,7 @@ type GetWorkflowRequest struct { func (x *GetWorkflowRequest) Reset() { *x = GetWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[57] + mi := &file_vtadmin_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3359,7 +3530,7 @@ func (x *GetWorkflowRequest) String() string { func (*GetWorkflowRequest) ProtoMessage() {} func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[57] + mi := &file_vtadmin_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3372,7 +3543,7 @@ func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{57} + return file_vtadmin_proto_rawDescGZIP(), []int{60} } func (x *GetWorkflowRequest) GetClusterId() string { @@ -3432,7 +3603,7 @@ type GetWorkflowsRequest struct { func (x *GetWorkflowsRequest) Reset() { *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[58] + mi := &file_vtadmin_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3445,7 +3616,7 @@ func (x *GetWorkflowsRequest) String() string { func (*GetWorkflowsRequest) ProtoMessage() {} func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[58] + mi := &file_vtadmin_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3458,7 +3629,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{58} + return file_vtadmin_proto_rawDescGZIP(), []int{61} } func (x *GetWorkflowsRequest) GetClusterIds() []string { @@ -3500,7 +3671,7 @@ type GetWorkflowsResponse struct { func (x *GetWorkflowsResponse) Reset() { *x = GetWorkflowsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[59] + mi := &file_vtadmin_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3513,7 +3684,7 @@ func (x *GetWorkflowsResponse) String() string { func (*GetWorkflowsResponse) ProtoMessage() {} func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[59] + mi := &file_vtadmin_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3526,7 +3697,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{59} + return file_vtadmin_proto_rawDescGZIP(), []int{62} } func (x *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows { @@ -3552,7 +3723,7 @@ type PingTabletRequest struct { func (x *PingTabletRequest) Reset() { *x = PingTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[60] + mi := &file_vtadmin_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3565,7 +3736,7 @@ func (x *PingTabletRequest) String() string { func (*PingTabletRequest) ProtoMessage() {} func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[60] + mi := &file_vtadmin_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3578,7 +3749,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. func (*PingTabletRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{60} + return file_vtadmin_proto_rawDescGZIP(), []int{63} } func (x *PingTabletRequest) GetAlias() *topodata.TabletAlias { @@ -3607,7 +3778,7 @@ type PingTabletResponse struct { func (x *PingTabletResponse) Reset() { *x = PingTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[61] + mi := &file_vtadmin_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3620,7 +3791,7 @@ func (x *PingTabletResponse) String() string { func (*PingTabletResponse) ProtoMessage() {} func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[61] + mi := &file_vtadmin_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3633,7 +3804,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. func (*PingTabletResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{61} + return file_vtadmin_proto_rawDescGZIP(), []int{64} } func (x *PingTabletResponse) GetStatus() string { @@ -3662,7 +3833,7 @@ type PlannedFailoverShardRequest struct { func (x *PlannedFailoverShardRequest) Reset() { *x = PlannedFailoverShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[62] + mi := &file_vtadmin_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3675,7 +3846,7 @@ func (x *PlannedFailoverShardRequest) String() string { func (*PlannedFailoverShardRequest) ProtoMessage() {} func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[62] + mi := &file_vtadmin_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3688,7 +3859,7 @@ func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedFailoverShardRequest.ProtoReflect.Descriptor instead. func (*PlannedFailoverShardRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{62} + return file_vtadmin_proto_rawDescGZIP(), []int{65} } func (x *PlannedFailoverShardRequest) GetClusterId() string { @@ -3724,7 +3895,7 @@ type PlannedFailoverShardResponse struct { func (x *PlannedFailoverShardResponse) Reset() { *x = PlannedFailoverShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[63] + mi := &file_vtadmin_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3737,7 +3908,7 @@ func (x *PlannedFailoverShardResponse) String() string { func (*PlannedFailoverShardResponse) ProtoMessage() {} func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[63] + mi := &file_vtadmin_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3750,7 +3921,7 @@ func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedFailoverShardResponse.ProtoReflect.Descriptor instead. func (*PlannedFailoverShardResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{63} + return file_vtadmin_proto_rawDescGZIP(), []int{66} } func (x *PlannedFailoverShardResponse) GetCluster() *Cluster { @@ -3802,7 +3973,7 @@ type RebuildKeyspaceGraphRequest struct { func (x *RebuildKeyspaceGraphRequest) Reset() { *x = RebuildKeyspaceGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[64] + mi := &file_vtadmin_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3815,7 +3986,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string { func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[64] + mi := &file_vtadmin_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3828,7 +3999,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{64} + return file_vtadmin_proto_rawDescGZIP(), []int{67} } func (x *RebuildKeyspaceGraphRequest) GetClusterId() string { @@ -3870,7 +4041,7 @@ type RebuildKeyspaceGraphResponse struct { func (x *RebuildKeyspaceGraphResponse) Reset() { *x = RebuildKeyspaceGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[65] + mi := &file_vtadmin_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3883,7 +4054,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string { func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[65] + mi := &file_vtadmin_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3896,7 +4067,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{65} + return file_vtadmin_proto_rawDescGZIP(), []int{68} } func (x *RebuildKeyspaceGraphResponse) GetStatus() string { @@ -3918,7 +4089,7 @@ type RefreshStateRequest struct { func (x *RefreshStateRequest) Reset() { *x = RefreshStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[66] + mi := &file_vtadmin_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3931,7 +4102,7 @@ func (x *RefreshStateRequest) String() string { func (*RefreshStateRequest) ProtoMessage() {} func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[66] + mi := &file_vtadmin_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3944,7 +4115,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. func (*RefreshStateRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{66} + return file_vtadmin_proto_rawDescGZIP(), []int{69} } func (x *RefreshStateRequest) GetAlias() *topodata.TabletAlias { @@ -3973,7 +4144,7 @@ type RefreshStateResponse struct { func (x *RefreshStateResponse) Reset() { *x = RefreshStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[67] + mi := &file_vtadmin_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3986,7 +4157,7 @@ func (x *RefreshStateResponse) String() string { func (*RefreshStateResponse) ProtoMessage() {} func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[67] + mi := &file_vtadmin_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3999,7 +4170,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. func (*RefreshStateResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{67} + return file_vtadmin_proto_rawDescGZIP(), []int{70} } func (x *RefreshStateResponse) GetStatus() string { @@ -4068,7 +4239,7 @@ type ReloadSchemasRequest struct { func (x *ReloadSchemasRequest) Reset() { *x = ReloadSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[68] + mi := &file_vtadmin_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4081,7 +4252,7 @@ func (x *ReloadSchemasRequest) String() string { func (*ReloadSchemasRequest) ProtoMessage() {} func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[68] + mi := &file_vtadmin_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4094,7 +4265,7 @@ func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemasRequest.ProtoReflect.Descriptor instead. func (*ReloadSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{68} + return file_vtadmin_proto_rawDescGZIP(), []int{71} } func (x *ReloadSchemasRequest) GetKeyspaces() []string { @@ -4168,7 +4339,7 @@ type ReloadSchemasResponse struct { func (x *ReloadSchemasResponse) Reset() { *x = ReloadSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[69] + mi := &file_vtadmin_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4181,7 +4352,7 @@ func (x *ReloadSchemasResponse) String() string { func (*ReloadSchemasResponse) ProtoMessage() {} func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[69] + mi := &file_vtadmin_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4194,7 +4365,7 @@ func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemasResponse.ProtoReflect.Descriptor instead. func (*ReloadSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{69} + return file_vtadmin_proto_rawDescGZIP(), []int{72} } func (x *ReloadSchemasResponse) GetKeyspaceResults() []*ReloadSchemasResponse_KeyspaceResult { @@ -4234,7 +4405,7 @@ type ReloadSchemaShardRequest struct { func (x *ReloadSchemaShardRequest) Reset() { *x = ReloadSchemaShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[70] + mi := &file_vtadmin_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4247,7 +4418,7 @@ func (x *ReloadSchemaShardRequest) String() string { func (*ReloadSchemaShardRequest) ProtoMessage() {} func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[70] + mi := &file_vtadmin_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4260,7 +4431,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{70} + return file_vtadmin_proto_rawDescGZIP(), []int{73} } func (x *ReloadSchemaShardRequest) GetClusterId() string { @@ -4316,7 +4487,7 @@ type ReloadSchemaShardResponse struct { func (x *ReloadSchemaShardResponse) Reset() { *x = ReloadSchemaShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[71] + mi := &file_vtadmin_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4329,7 +4500,7 @@ func (x *ReloadSchemaShardResponse) String() string { func (*ReloadSchemaShardResponse) ProtoMessage() {} func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[71] + mi := &file_vtadmin_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4342,7 +4513,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{71} + return file_vtadmin_proto_rawDescGZIP(), []int{74} } func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { @@ -4364,7 +4535,7 @@ type RefreshTabletReplicationSourceRequest struct { func (x *RefreshTabletReplicationSourceRequest) Reset() { *x = RefreshTabletReplicationSourceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[72] + mi := &file_vtadmin_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4377,7 +4548,7 @@ func (x *RefreshTabletReplicationSourceRequest) String() string { func (*RefreshTabletReplicationSourceRequest) ProtoMessage() {} func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[72] + mi := &file_vtadmin_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4390,7 +4561,7 @@ func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Mess // Deprecated: Use RefreshTabletReplicationSourceRequest.ProtoReflect.Descriptor instead. func (*RefreshTabletReplicationSourceRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{72} + return file_vtadmin_proto_rawDescGZIP(), []int{75} } func (x *RefreshTabletReplicationSourceRequest) GetAlias() *topodata.TabletAlias { @@ -4421,7 +4592,7 @@ type RefreshTabletReplicationSourceResponse struct { func (x *RefreshTabletReplicationSourceResponse) Reset() { *x = RefreshTabletReplicationSourceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[73] + mi := &file_vtadmin_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4434,7 +4605,7 @@ func (x *RefreshTabletReplicationSourceResponse) String() string { func (*RefreshTabletReplicationSourceResponse) ProtoMessage() {} func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[73] + mi := &file_vtadmin_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4447,7 +4618,7 @@ func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Mes // Deprecated: Use RefreshTabletReplicationSourceResponse.ProtoReflect.Descriptor instead. func (*RefreshTabletReplicationSourceResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{73} + return file_vtadmin_proto_rawDescGZIP(), []int{76} } func (x *RefreshTabletReplicationSourceResponse) GetKeyspace() string { @@ -4493,7 +4664,7 @@ type RemoveKeyspaceCellRequest struct { func (x *RemoveKeyspaceCellRequest) Reset() { *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[74] + mi := &file_vtadmin_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4506,7 +4677,7 @@ func (x *RemoveKeyspaceCellRequest) String() string { func (*RemoveKeyspaceCellRequest) ProtoMessage() {} func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[74] + mi := &file_vtadmin_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4519,7 +4690,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{74} + return file_vtadmin_proto_rawDescGZIP(), []int{77} } func (x *RemoveKeyspaceCellRequest) GetClusterId() string { @@ -4568,7 +4739,7 @@ type RemoveKeyspaceCellResponse struct { func (x *RemoveKeyspaceCellResponse) Reset() { *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[75] + mi := &file_vtadmin_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4581,7 +4752,7 @@ func (x *RemoveKeyspaceCellResponse) String() string { func (*RemoveKeyspaceCellResponse) ProtoMessage() {} func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[75] + mi := &file_vtadmin_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4594,7 +4765,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{75} + return file_vtadmin_proto_rawDescGZIP(), []int{78} } func (x *RemoveKeyspaceCellResponse) GetStatus() string { @@ -4616,7 +4787,7 @@ type RunHealthCheckRequest struct { func (x *RunHealthCheckRequest) Reset() { *x = RunHealthCheckRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[76] + mi := &file_vtadmin_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4629,7 +4800,7 @@ func (x *RunHealthCheckRequest) String() string { func (*RunHealthCheckRequest) ProtoMessage() {} func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[76] + mi := &file_vtadmin_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4642,7 +4813,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{76} + return file_vtadmin_proto_rawDescGZIP(), []int{79} } func (x *RunHealthCheckRequest) GetAlias() *topodata.TabletAlias { @@ -4671,7 +4842,7 @@ type RunHealthCheckResponse struct { func (x *RunHealthCheckResponse) Reset() { *x = RunHealthCheckResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[77] + mi := &file_vtadmin_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4684,7 +4855,7 @@ func (x *RunHealthCheckResponse) String() string { func (*RunHealthCheckResponse) ProtoMessage() {} func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[77] + mi := &file_vtadmin_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4697,7 +4868,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{77} + return file_vtadmin_proto_rawDescGZIP(), []int{80} } func (x *RunHealthCheckResponse) GetStatus() string { @@ -4726,7 +4897,7 @@ type SetReadOnlyRequest struct { func (x *SetReadOnlyRequest) Reset() { *x = SetReadOnlyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[78] + mi := &file_vtadmin_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4739,7 +4910,7 @@ func (x *SetReadOnlyRequest) String() string { func (*SetReadOnlyRequest) ProtoMessage() {} func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[78] + mi := &file_vtadmin_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4752,7 +4923,7 @@ func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReadOnlyRequest.ProtoReflect.Descriptor instead. func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{78} + return file_vtadmin_proto_rawDescGZIP(), []int{81} } func (x *SetReadOnlyRequest) GetAlias() *topodata.TabletAlias { @@ -4778,7 +4949,7 @@ type SetReadOnlyResponse struct { func (x *SetReadOnlyResponse) Reset() { *x = SetReadOnlyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[79] + mi := &file_vtadmin_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4791,7 +4962,7 @@ func (x *SetReadOnlyResponse) String() string { func (*SetReadOnlyResponse) ProtoMessage() {} func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[79] + mi := &file_vtadmin_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4804,7 +4975,7 @@ func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReadOnlyResponse.ProtoReflect.Descriptor instead. func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{79} + return file_vtadmin_proto_rawDescGZIP(), []int{82} } type SetReadWriteRequest struct { @@ -4819,7 +4990,7 @@ type SetReadWriteRequest struct { func (x *SetReadWriteRequest) Reset() { *x = SetReadWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[80] + mi := &file_vtadmin_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4832,7 +5003,7 @@ func (x *SetReadWriteRequest) String() string { func (*SetReadWriteRequest) ProtoMessage() {} func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[80] + mi := &file_vtadmin_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4845,7 +5016,7 @@ func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReadWriteRequest.ProtoReflect.Descriptor instead. func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{80} + return file_vtadmin_proto_rawDescGZIP(), []int{83} } func (x *SetReadWriteRequest) GetAlias() *topodata.TabletAlias { @@ -4871,7 +5042,7 @@ type SetReadWriteResponse struct { func (x *SetReadWriteResponse) Reset() { *x = SetReadWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[81] + mi := &file_vtadmin_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4884,7 +5055,7 @@ func (x *SetReadWriteResponse) String() string { func (*SetReadWriteResponse) ProtoMessage() {} func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[81] + mi := &file_vtadmin_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4897,7 +5068,7 @@ func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReadWriteResponse.ProtoReflect.Descriptor instead. func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{81} + return file_vtadmin_proto_rawDescGZIP(), []int{84} } type StartReplicationRequest struct { @@ -4912,7 +5083,7 @@ type StartReplicationRequest struct { func (x *StartReplicationRequest) Reset() { *x = StartReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[82] + mi := &file_vtadmin_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4925,7 +5096,7 @@ func (x *StartReplicationRequest) String() string { func (*StartReplicationRequest) ProtoMessage() {} func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[82] + mi := &file_vtadmin_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4938,7 +5109,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. func (*StartReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{82} + return file_vtadmin_proto_rawDescGZIP(), []int{85} } func (x *StartReplicationRequest) GetAlias() *topodata.TabletAlias { @@ -4967,7 +5138,7 @@ type StartReplicationResponse struct { func (x *StartReplicationResponse) Reset() { *x = StartReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[83] + mi := &file_vtadmin_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4980,7 +5151,7 @@ func (x *StartReplicationResponse) String() string { func (*StartReplicationResponse) ProtoMessage() {} func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[83] + mi := &file_vtadmin_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4993,7 +5164,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. func (*StartReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{83} + return file_vtadmin_proto_rawDescGZIP(), []int{86} } func (x *StartReplicationResponse) GetStatus() string { @@ -5022,7 +5193,7 @@ type StopReplicationRequest struct { func (x *StopReplicationRequest) Reset() { *x = StopReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[84] + mi := &file_vtadmin_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5035,7 +5206,7 @@ func (x *StopReplicationRequest) String() string { func (*StopReplicationRequest) ProtoMessage() {} func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[84] + mi := &file_vtadmin_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5048,7 +5219,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. func (*StopReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{84} + return file_vtadmin_proto_rawDescGZIP(), []int{87} } func (x *StopReplicationRequest) GetAlias() *topodata.TabletAlias { @@ -5077,7 +5248,7 @@ type StopReplicationResponse struct { func (x *StopReplicationResponse) Reset() { *x = StopReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[85] + mi := &file_vtadmin_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5090,7 +5261,7 @@ func (x *StopReplicationResponse) String() string { func (*StopReplicationResponse) ProtoMessage() {} func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[85] + mi := &file_vtadmin_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5103,7 +5274,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. func (*StopReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{85} + return file_vtadmin_proto_rawDescGZIP(), []int{88} } func (x *StopReplicationResponse) GetStatus() string { @@ -5134,7 +5305,7 @@ type TabletExternallyPromotedRequest struct { func (x *TabletExternallyPromotedRequest) Reset() { *x = TabletExternallyPromotedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[86] + mi := &file_vtadmin_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5147,7 +5318,7 @@ func (x *TabletExternallyPromotedRequest) String() string { func (*TabletExternallyPromotedRequest) ProtoMessage() {} func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[86] + mi := &file_vtadmin_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5160,7 +5331,7 @@ func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TabletExternallyPromotedRequest.ProtoReflect.Descriptor instead. func (*TabletExternallyPromotedRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{86} + return file_vtadmin_proto_rawDescGZIP(), []int{89} } func (x *TabletExternallyPromotedRequest) GetAlias() *topodata.TabletAlias { @@ -5192,7 +5363,7 @@ type TabletExternallyPromotedResponse struct { func (x *TabletExternallyPromotedResponse) Reset() { *x = TabletExternallyPromotedResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[87] + mi := &file_vtadmin_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5205,7 +5376,7 @@ func (x *TabletExternallyPromotedResponse) String() string { func (*TabletExternallyPromotedResponse) ProtoMessage() {} func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[87] + mi := &file_vtadmin_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5218,7 +5389,7 @@ func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TabletExternallyPromotedResponse.ProtoReflect.Descriptor instead. func (*TabletExternallyPromotedResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{87} + return file_vtadmin_proto_rawDescGZIP(), []int{90} } func (x *TabletExternallyPromotedResponse) GetCluster() *Cluster { @@ -5268,7 +5439,7 @@ type TabletExternallyReparentedRequest struct { func (x *TabletExternallyReparentedRequest) Reset() { *x = TabletExternallyReparentedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[88] + mi := &file_vtadmin_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5281,7 +5452,7 @@ func (x *TabletExternallyReparentedRequest) String() string { func (*TabletExternallyReparentedRequest) ProtoMessage() {} func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[88] + mi := &file_vtadmin_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5294,7 +5465,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message // Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{88} + return file_vtadmin_proto_rawDescGZIP(), []int{91} } func (x *TabletExternallyReparentedRequest) GetAlias() *topodata.TabletAlias { @@ -5323,7 +5494,7 @@ type ValidateRequest struct { func (x *ValidateRequest) Reset() { *x = ValidateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[89] + mi := &file_vtadmin_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5336,7 +5507,7 @@ func (x *ValidateRequest) String() string { func (*ValidateRequest) ProtoMessage() {} func (x *ValidateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[89] + mi := &file_vtadmin_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5349,7 +5520,7 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. func (*ValidateRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{89} + return file_vtadmin_proto_rawDescGZIP(), []int{92} } func (x *ValidateRequest) GetClusterId() string { @@ -5379,7 +5550,7 @@ type ValidateKeyspaceRequest struct { func (x *ValidateKeyspaceRequest) Reset() { *x = ValidateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[90] + mi := &file_vtadmin_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5392,7 +5563,7 @@ func (x *ValidateKeyspaceRequest) String() string { func (*ValidateKeyspaceRequest) ProtoMessage() {} func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[90] + mi := &file_vtadmin_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5405,7 +5576,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{90} + return file_vtadmin_proto_rawDescGZIP(), []int{93} } func (x *ValidateKeyspaceRequest) GetClusterId() string { @@ -5441,7 +5612,7 @@ type ValidateSchemaKeyspaceRequest struct { func (x *ValidateSchemaKeyspaceRequest) Reset() { *x = ValidateSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[91] + mi := &file_vtadmin_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5454,7 +5625,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string { func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[91] + mi := &file_vtadmin_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5467,7 +5638,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{91} + return file_vtadmin_proto_rawDescGZIP(), []int{94} } func (x *ValidateSchemaKeyspaceRequest) GetClusterId() string { @@ -5498,7 +5669,7 @@ type ValidateShardRequest struct { func (x *ValidateShardRequest) Reset() { *x = ValidateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[92] + mi := &file_vtadmin_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5511,7 +5682,7 @@ func (x *ValidateShardRequest) String() string { func (*ValidateShardRequest) ProtoMessage() {} func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[92] + mi := &file_vtadmin_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5524,7 +5695,7 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. func (*ValidateShardRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{92} + return file_vtadmin_proto_rawDescGZIP(), []int{95} } func (x *ValidateShardRequest) GetClusterId() string { @@ -5567,7 +5738,7 @@ type ValidateVersionKeyspaceRequest struct { func (x *ValidateVersionKeyspaceRequest) Reset() { *x = ValidateVersionKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[93] + mi := &file_vtadmin_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5580,7 +5751,7 @@ func (x *ValidateVersionKeyspaceRequest) String() string { func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[93] + mi := &file_vtadmin_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5593,7 +5764,7 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{93} + return file_vtadmin_proto_rawDescGZIP(), []int{96} } func (x *ValidateVersionKeyspaceRequest) GetClusterId() string { @@ -5623,7 +5794,7 @@ type ValidateVersionShardRequest struct { func (x *ValidateVersionShardRequest) Reset() { *x = ValidateVersionShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[94] + mi := &file_vtadmin_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5636,7 +5807,7 @@ func (x *ValidateVersionShardRequest) String() string { func (*ValidateVersionShardRequest) ProtoMessage() {} func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[94] + mi := &file_vtadmin_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5649,7 +5820,7 @@ func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{94} + return file_vtadmin_proto_rawDescGZIP(), []int{97} } func (x *ValidateVersionShardRequest) GetClusterId() string { @@ -5686,7 +5857,7 @@ type VTExplainRequest struct { func (x *VTExplainRequest) Reset() { *x = VTExplainRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[95] + mi := &file_vtadmin_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5699,7 +5870,7 @@ func (x *VTExplainRequest) String() string { func (*VTExplainRequest) ProtoMessage() {} func (x *VTExplainRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[95] + mi := &file_vtadmin_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5712,7 +5883,7 @@ func (x *VTExplainRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VTExplainRequest.ProtoReflect.Descriptor instead. func (*VTExplainRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{95} + return file_vtadmin_proto_rawDescGZIP(), []int{98} } func (x *VTExplainRequest) GetCluster() string { @@ -5747,7 +5918,7 @@ type VTExplainResponse struct { func (x *VTExplainResponse) Reset() { *x = VTExplainResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[96] + mi := &file_vtadmin_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5760,7 +5931,7 @@ func (x *VTExplainResponse) String() string { func (*VTExplainResponse) ProtoMessage() {} func (x *VTExplainResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[96] + mi := &file_vtadmin_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5773,7 +5944,7 @@ func (x *VTExplainResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VTExplainResponse.ProtoReflect.Descriptor instead. func (*VTExplainResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{96} + return file_vtadmin_proto_rawDescGZIP(), []int{99} } func (x *VTExplainResponse) GetResponse() string { @@ -5795,7 +5966,7 @@ type Schema_ShardTableSize struct { func (x *Schema_ShardTableSize) Reset() { *x = Schema_ShardTableSize{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[100] + mi := &file_vtadmin_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5808,7 +5979,7 @@ func (x *Schema_ShardTableSize) String() string { func (*Schema_ShardTableSize) ProtoMessage() {} func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[100] + mi := &file_vtadmin_proto_msgTypes[103] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5853,7 +6024,7 @@ type Schema_TableSize struct { func (x *Schema_TableSize) Reset() { *x = Schema_TableSize{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[101] + mi := &file_vtadmin_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5866,7 +6037,7 @@ func (x *Schema_TableSize) String() string { func (*Schema_TableSize) ProtoMessage() {} func (x *Schema_TableSize) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[101] + mi := &file_vtadmin_proto_msgTypes[104] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5921,7 +6092,7 @@ type ReloadSchemasResponse_KeyspaceResult struct { func (x *ReloadSchemasResponse_KeyspaceResult) Reset() { *x = ReloadSchemasResponse_KeyspaceResult{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[104] + mi := &file_vtadmin_proto_msgTypes[108] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5934,7 +6105,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) String() string { func (*ReloadSchemasResponse_KeyspaceResult) ProtoMessage() {} func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[104] + mi := &file_vtadmin_proto_msgTypes[108] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5947,7 +6118,7 @@ func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Messa // Deprecated: Use ReloadSchemasResponse_KeyspaceResult.ProtoReflect.Descriptor instead. func (*ReloadSchemasResponse_KeyspaceResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{69, 0} + return file_vtadmin_proto_rawDescGZIP(), []int{72, 0} } func (x *ReloadSchemasResponse_KeyspaceResult) GetKeyspace() *Keyspace { @@ -5982,7 +6153,7 @@ type ReloadSchemasResponse_ShardResult struct { func (x *ReloadSchemasResponse_ShardResult) Reset() { *x = ReloadSchemasResponse_ShardResult{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[105] + mi := &file_vtadmin_proto_msgTypes[109] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5995,7 +6166,7 @@ func (x *ReloadSchemasResponse_ShardResult) String() string { func (*ReloadSchemasResponse_ShardResult) ProtoMessage() {} func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[105] + mi := &file_vtadmin_proto_msgTypes[109] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6008,7 +6179,7 @@ func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message // Deprecated: Use ReloadSchemasResponse_ShardResult.ProtoReflect.Descriptor instead. func (*ReloadSchemasResponse_ShardResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{69, 1} + return file_vtadmin_proto_rawDescGZIP(), []int{72, 1} } func (x *ReloadSchemasResponse_ShardResult) GetShard() *Shard { @@ -6044,7 +6215,7 @@ type ReloadSchemasResponse_TabletResult struct { func (x *ReloadSchemasResponse_TabletResult) Reset() { *x = ReloadSchemasResponse_TabletResult{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[106] + mi := &file_vtadmin_proto_msgTypes[110] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6057,7 +6228,7 @@ func (x *ReloadSchemasResponse_TabletResult) String() string { func (*ReloadSchemasResponse_TabletResult) ProtoMessage() {} func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[106] + mi := &file_vtadmin_proto_msgTypes[110] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6070,7 +6241,7 @@ func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message // Deprecated: Use ReloadSchemasResponse_TabletResult.ProtoReflect.Descriptor instead. func (*ReloadSchemasResponse_TabletResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{69, 2} + return file_vtadmin_proto_rawDescGZIP(), []int{72, 2} } func (x *ReloadSchemasResponse_TabletResult) GetTablet() *Tablet { @@ -6456,658 +6627,694 @@ var file_vtadmin_proto_rawDesc = []byte{ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, + 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x68, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x37, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, - 0x3b, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10, - 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, - 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, - 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xd7, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x57, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x63, 0x0a, 0x11, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, + 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x6e, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x4e, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x22, 0x60, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, - 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x29, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, + 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x29, 0x0a, 0x07, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x52, 0x07, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, + 0x6c, 0x79, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, + 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, + 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, + 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, + 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, + 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, + 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, + 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x93, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, + 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x73, 0x22, 0x5a, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, + 0x02, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, + 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, + 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x22, 0xad, 0x04, 0x0a, 0x15, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, + 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x1a, 0x5b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, + 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, + 0x4f, 0x0a, 0x0c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x27, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0xdb, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01, - 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, - 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, + 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, + 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, + 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, + 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, + 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, + 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x73, 0x22, 0x5c, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x22, 0x62, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, + 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, + 0x22, 0x16, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x73, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, - 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x72, 0x22, 0x66, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x1b, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, - 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, - 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5a, 0x0a, - 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, 0x52, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, + 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x63, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, - 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, 0x0a, 0x15, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, - 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5b, 0x0a, - 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, 0x0a, 0x18, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, - 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, - 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, - 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x75, - 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, - 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, - 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, - 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5c, 0x0a, - 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, 0x12, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, + 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, + 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, - 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, - 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, - 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, 0x0a, 0x18, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, 0x0a, 0x16, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x53, 0x0a, 0x0f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, - 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, - 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x6e, - 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5a, - 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, 0x56, 0x54, - 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa9, 0x20, 0x0a, 0x07, - 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, + 0x53, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x22, 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, + 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, + 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0x6e, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x22, 0x5a, 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, + 0x2f, 0x0a, 0x11, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xd9, 0x21, 0x0a, 0x07, 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x6b, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, - 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, - 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, - 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, - 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x55, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, + 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, + 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, + 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, + 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x41, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, - 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47, - 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, - 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, - 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12, - 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, - 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, + 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, - 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, - 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, - 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, 0x12, - 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, - 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, - 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, + 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, + 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, + 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, + 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, + 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, + 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, + 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x3c, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, + 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, - 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, - 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, - 0x12, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52, - 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, - 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, - 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, - 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, - 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, - 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, - 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, + 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, + 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x50, 0x0a, 0x0d, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5c, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5f, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, + 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x53, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, + 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, + 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x59, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, + 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, + 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, + 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, - 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7123,7 +7330,7 @@ func file_vtadmin_proto_rawDescGZIP() []byte { } var file_vtadmin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 107) +var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 111) var file_vtadmin_proto_goTypes = []interface{}{ (Tablet_ServingState)(0), // 0: vtadmin.Tablet.ServingState (*Cluster)(nil), // 1: vtadmin.Cluster @@ -7170,303 +7377,314 @@ var file_vtadmin_proto_goTypes = []interface{}{ (*GetSchemasResponse)(nil), // 42: vtadmin.GetSchemasResponse (*GetShardReplicationPositionsRequest)(nil), // 43: vtadmin.GetShardReplicationPositionsRequest (*GetShardReplicationPositionsResponse)(nil), // 44: vtadmin.GetShardReplicationPositionsResponse - (*GetSrvVSchemaRequest)(nil), // 45: vtadmin.GetSrvVSchemaRequest - (*GetSrvVSchemasRequest)(nil), // 46: vtadmin.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 47: vtadmin.GetSrvVSchemasResponse - (*GetSchemaTableSizeOptions)(nil), // 48: vtadmin.GetSchemaTableSizeOptions - (*GetTabletRequest)(nil), // 49: vtadmin.GetTabletRequest - (*GetTabletsRequest)(nil), // 50: vtadmin.GetTabletsRequest - (*GetTabletsResponse)(nil), // 51: vtadmin.GetTabletsResponse - (*GetTopologyPathRequest)(nil), // 52: vtadmin.GetTopologyPathRequest - (*GetVSchemaRequest)(nil), // 53: vtadmin.GetVSchemaRequest - (*GetVSchemasRequest)(nil), // 54: vtadmin.GetVSchemasRequest - (*GetVSchemasResponse)(nil), // 55: vtadmin.GetVSchemasResponse - (*GetVtctldsRequest)(nil), // 56: vtadmin.GetVtctldsRequest - (*GetVtctldsResponse)(nil), // 57: vtadmin.GetVtctldsResponse - (*GetWorkflowRequest)(nil), // 58: vtadmin.GetWorkflowRequest - (*GetWorkflowsRequest)(nil), // 59: vtadmin.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 60: vtadmin.GetWorkflowsResponse - (*PingTabletRequest)(nil), // 61: vtadmin.PingTabletRequest - (*PingTabletResponse)(nil), // 62: vtadmin.PingTabletResponse - (*PlannedFailoverShardRequest)(nil), // 63: vtadmin.PlannedFailoverShardRequest - (*PlannedFailoverShardResponse)(nil), // 64: vtadmin.PlannedFailoverShardResponse - (*RebuildKeyspaceGraphRequest)(nil), // 65: vtadmin.RebuildKeyspaceGraphRequest - (*RebuildKeyspaceGraphResponse)(nil), // 66: vtadmin.RebuildKeyspaceGraphResponse - (*RefreshStateRequest)(nil), // 67: vtadmin.RefreshStateRequest - (*RefreshStateResponse)(nil), // 68: vtadmin.RefreshStateResponse - (*ReloadSchemasRequest)(nil), // 69: vtadmin.ReloadSchemasRequest - (*ReloadSchemasResponse)(nil), // 70: vtadmin.ReloadSchemasResponse - (*ReloadSchemaShardRequest)(nil), // 71: vtadmin.ReloadSchemaShardRequest - (*ReloadSchemaShardResponse)(nil), // 72: vtadmin.ReloadSchemaShardResponse - (*RefreshTabletReplicationSourceRequest)(nil), // 73: vtadmin.RefreshTabletReplicationSourceRequest - (*RefreshTabletReplicationSourceResponse)(nil), // 74: vtadmin.RefreshTabletReplicationSourceResponse - (*RemoveKeyspaceCellRequest)(nil), // 75: vtadmin.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 76: vtadmin.RemoveKeyspaceCellResponse - (*RunHealthCheckRequest)(nil), // 77: vtadmin.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 78: vtadmin.RunHealthCheckResponse - (*SetReadOnlyRequest)(nil), // 79: vtadmin.SetReadOnlyRequest - (*SetReadOnlyResponse)(nil), // 80: vtadmin.SetReadOnlyResponse - (*SetReadWriteRequest)(nil), // 81: vtadmin.SetReadWriteRequest - (*SetReadWriteResponse)(nil), // 82: vtadmin.SetReadWriteResponse - (*StartReplicationRequest)(nil), // 83: vtadmin.StartReplicationRequest - (*StartReplicationResponse)(nil), // 84: vtadmin.StartReplicationResponse - (*StopReplicationRequest)(nil), // 85: vtadmin.StopReplicationRequest - (*StopReplicationResponse)(nil), // 86: vtadmin.StopReplicationResponse - (*TabletExternallyPromotedRequest)(nil), // 87: vtadmin.TabletExternallyPromotedRequest - (*TabletExternallyPromotedResponse)(nil), // 88: vtadmin.TabletExternallyPromotedResponse - (*TabletExternallyReparentedRequest)(nil), // 89: vtadmin.TabletExternallyReparentedRequest - (*ValidateRequest)(nil), // 90: vtadmin.ValidateRequest - (*ValidateKeyspaceRequest)(nil), // 91: vtadmin.ValidateKeyspaceRequest - (*ValidateSchemaKeyspaceRequest)(nil), // 92: vtadmin.ValidateSchemaKeyspaceRequest - (*ValidateShardRequest)(nil), // 93: vtadmin.ValidateShardRequest - (*ValidateVersionKeyspaceRequest)(nil), // 94: vtadmin.ValidateVersionKeyspaceRequest - (*ValidateVersionShardRequest)(nil), // 95: vtadmin.ValidateVersionShardRequest - (*VTExplainRequest)(nil), // 96: vtadmin.VTExplainRequest - (*VTExplainResponse)(nil), // 97: vtadmin.VTExplainResponse - nil, // 98: vtadmin.ClusterCellsAliases.AliasesEntry - nil, // 99: vtadmin.Keyspace.ShardsEntry - nil, // 100: vtadmin.Schema.TableSizesEntry - (*Schema_ShardTableSize)(nil), // 101: vtadmin.Schema.ShardTableSize - (*Schema_TableSize)(nil), // 102: vtadmin.Schema.TableSize - nil, // 103: vtadmin.Schema.TableSize.ByShardEntry - nil, // 104: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry - (*ReloadSchemasResponse_KeyspaceResult)(nil), // 105: vtadmin.ReloadSchemasResponse.KeyspaceResult - (*ReloadSchemasResponse_ShardResult)(nil), // 106: vtadmin.ReloadSchemasResponse.ShardResult - (*ReloadSchemasResponse_TabletResult)(nil), // 107: vtadmin.ReloadSchemasResponse.TabletResult - (*mysqlctl.BackupInfo)(nil), // 108: mysqlctl.BackupInfo - (*topodata.CellInfo)(nil), // 109: topodata.CellInfo - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 110: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.Keyspace)(nil), // 111: vtctldata.Keyspace - (*tabletmanagerdata.TableDefinition)(nil), // 112: tabletmanagerdata.TableDefinition - (*vtctldata.Shard)(nil), // 113: vtctldata.Shard - (*vschema.SrvVSchema)(nil), // 114: vschema.SrvVSchema - (*topodata.Tablet)(nil), // 115: topodata.Tablet - (*vschema.Keyspace)(nil), // 116: vschema.Keyspace - (*vtctldata.Workflow)(nil), // 117: vtctldata.Workflow - (*vtctldata.CreateKeyspaceRequest)(nil), // 118: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 119: vtctldata.CreateShardRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 120: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 121: vtctldata.DeleteShardsRequest - (*topodata.TabletAlias)(nil), // 122: topodata.TabletAlias - (*vtctldata.EmergencyReparentShardRequest)(nil), // 123: vtctldata.EmergencyReparentShardRequest - (*logutil.Event)(nil), // 124: logutil.Event - (*vtctldata.GetBackupsRequest)(nil), // 125: vtctldata.GetBackupsRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 126: vtctldata.PlannedReparentShardRequest - (*topodata.CellsAlias)(nil), // 127: topodata.CellsAlias - (*vtctldata.CreateShardResponse)(nil), // 128: vtctldata.CreateShardResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 129: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 130: vtctldata.DeleteShardsResponse - (*vtctldata.GetFullStatusResponse)(nil), // 131: vtctldata.GetFullStatusResponse - (*vtctldata.GetTopologyPathResponse)(nil), // 132: vtctldata.GetTopologyPathResponse - (*vtctldata.ValidateResponse)(nil), // 133: vtctldata.ValidateResponse - (*vtctldata.ValidateKeyspaceResponse)(nil), // 134: vtctldata.ValidateKeyspaceResponse - (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 135: vtctldata.ValidateSchemaKeyspaceResponse - (*vtctldata.ValidateShardResponse)(nil), // 136: vtctldata.ValidateShardResponse - (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 137: vtctldata.ValidateVersionKeyspaceResponse - (*vtctldata.ValidateVersionShardResponse)(nil), // 138: vtctldata.ValidateVersionShardResponse + (*GetSrvKeyspaceRequest)(nil), // 45: vtadmin.GetSrvKeyspaceRequest + (*GetSrvKeyspacesRequest)(nil), // 46: vtadmin.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 47: vtadmin.GetSrvKeyspacesResponse + (*GetSrvVSchemaRequest)(nil), // 48: vtadmin.GetSrvVSchemaRequest + (*GetSrvVSchemasRequest)(nil), // 49: vtadmin.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 50: vtadmin.GetSrvVSchemasResponse + (*GetSchemaTableSizeOptions)(nil), // 51: vtadmin.GetSchemaTableSizeOptions + (*GetTabletRequest)(nil), // 52: vtadmin.GetTabletRequest + (*GetTabletsRequest)(nil), // 53: vtadmin.GetTabletsRequest + (*GetTabletsResponse)(nil), // 54: vtadmin.GetTabletsResponse + (*GetTopologyPathRequest)(nil), // 55: vtadmin.GetTopologyPathRequest + (*GetVSchemaRequest)(nil), // 56: vtadmin.GetVSchemaRequest + (*GetVSchemasRequest)(nil), // 57: vtadmin.GetVSchemasRequest + (*GetVSchemasResponse)(nil), // 58: vtadmin.GetVSchemasResponse + (*GetVtctldsRequest)(nil), // 59: vtadmin.GetVtctldsRequest + (*GetVtctldsResponse)(nil), // 60: vtadmin.GetVtctldsResponse + (*GetWorkflowRequest)(nil), // 61: vtadmin.GetWorkflowRequest + (*GetWorkflowsRequest)(nil), // 62: vtadmin.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 63: vtadmin.GetWorkflowsResponse + (*PingTabletRequest)(nil), // 64: vtadmin.PingTabletRequest + (*PingTabletResponse)(nil), // 65: vtadmin.PingTabletResponse + (*PlannedFailoverShardRequest)(nil), // 66: vtadmin.PlannedFailoverShardRequest + (*PlannedFailoverShardResponse)(nil), // 67: vtadmin.PlannedFailoverShardResponse + (*RebuildKeyspaceGraphRequest)(nil), // 68: vtadmin.RebuildKeyspaceGraphRequest + (*RebuildKeyspaceGraphResponse)(nil), // 69: vtadmin.RebuildKeyspaceGraphResponse + (*RefreshStateRequest)(nil), // 70: vtadmin.RefreshStateRequest + (*RefreshStateResponse)(nil), // 71: vtadmin.RefreshStateResponse + (*ReloadSchemasRequest)(nil), // 72: vtadmin.ReloadSchemasRequest + (*ReloadSchemasResponse)(nil), // 73: vtadmin.ReloadSchemasResponse + (*ReloadSchemaShardRequest)(nil), // 74: vtadmin.ReloadSchemaShardRequest + (*ReloadSchemaShardResponse)(nil), // 75: vtadmin.ReloadSchemaShardResponse + (*RefreshTabletReplicationSourceRequest)(nil), // 76: vtadmin.RefreshTabletReplicationSourceRequest + (*RefreshTabletReplicationSourceResponse)(nil), // 77: vtadmin.RefreshTabletReplicationSourceResponse + (*RemoveKeyspaceCellRequest)(nil), // 78: vtadmin.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 79: vtadmin.RemoveKeyspaceCellResponse + (*RunHealthCheckRequest)(nil), // 80: vtadmin.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 81: vtadmin.RunHealthCheckResponse + (*SetReadOnlyRequest)(nil), // 82: vtadmin.SetReadOnlyRequest + (*SetReadOnlyResponse)(nil), // 83: vtadmin.SetReadOnlyResponse + (*SetReadWriteRequest)(nil), // 84: vtadmin.SetReadWriteRequest + (*SetReadWriteResponse)(nil), // 85: vtadmin.SetReadWriteResponse + (*StartReplicationRequest)(nil), // 86: vtadmin.StartReplicationRequest + (*StartReplicationResponse)(nil), // 87: vtadmin.StartReplicationResponse + (*StopReplicationRequest)(nil), // 88: vtadmin.StopReplicationRequest + (*StopReplicationResponse)(nil), // 89: vtadmin.StopReplicationResponse + (*TabletExternallyPromotedRequest)(nil), // 90: vtadmin.TabletExternallyPromotedRequest + (*TabletExternallyPromotedResponse)(nil), // 91: vtadmin.TabletExternallyPromotedResponse + (*TabletExternallyReparentedRequest)(nil), // 92: vtadmin.TabletExternallyReparentedRequest + (*ValidateRequest)(nil), // 93: vtadmin.ValidateRequest + (*ValidateKeyspaceRequest)(nil), // 94: vtadmin.ValidateKeyspaceRequest + (*ValidateSchemaKeyspaceRequest)(nil), // 95: vtadmin.ValidateSchemaKeyspaceRequest + (*ValidateShardRequest)(nil), // 96: vtadmin.ValidateShardRequest + (*ValidateVersionKeyspaceRequest)(nil), // 97: vtadmin.ValidateVersionKeyspaceRequest + (*ValidateVersionShardRequest)(nil), // 98: vtadmin.ValidateVersionShardRequest + (*VTExplainRequest)(nil), // 99: vtadmin.VTExplainRequest + (*VTExplainResponse)(nil), // 100: vtadmin.VTExplainResponse + nil, // 101: vtadmin.ClusterCellsAliases.AliasesEntry + nil, // 102: vtadmin.Keyspace.ShardsEntry + nil, // 103: vtadmin.Schema.TableSizesEntry + (*Schema_ShardTableSize)(nil), // 104: vtadmin.Schema.ShardTableSize + (*Schema_TableSize)(nil), // 105: vtadmin.Schema.TableSize + nil, // 106: vtadmin.Schema.TableSize.ByShardEntry + nil, // 107: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 108: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry + (*ReloadSchemasResponse_KeyspaceResult)(nil), // 109: vtadmin.ReloadSchemasResponse.KeyspaceResult + (*ReloadSchemasResponse_ShardResult)(nil), // 110: vtadmin.ReloadSchemasResponse.ShardResult + (*ReloadSchemasResponse_TabletResult)(nil), // 111: vtadmin.ReloadSchemasResponse.TabletResult + (*mysqlctl.BackupInfo)(nil), // 112: mysqlctl.BackupInfo + (*topodata.CellInfo)(nil), // 113: topodata.CellInfo + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 114: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.Keyspace)(nil), // 115: vtctldata.Keyspace + (*tabletmanagerdata.TableDefinition)(nil), // 116: tabletmanagerdata.TableDefinition + (*vtctldata.Shard)(nil), // 117: vtctldata.Shard + (*vschema.SrvVSchema)(nil), // 118: vschema.SrvVSchema + (*topodata.Tablet)(nil), // 119: topodata.Tablet + (*vschema.Keyspace)(nil), // 120: vschema.Keyspace + (*vtctldata.Workflow)(nil), // 121: vtctldata.Workflow + (*vtctldata.CreateKeyspaceRequest)(nil), // 122: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 123: vtctldata.CreateShardRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 124: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 125: vtctldata.DeleteShardsRequest + (*topodata.TabletAlias)(nil), // 126: topodata.TabletAlias + (*vtctldata.EmergencyReparentShardRequest)(nil), // 127: vtctldata.EmergencyReparentShardRequest + (*logutil.Event)(nil), // 128: logutil.Event + (*vtctldata.GetBackupsRequest)(nil), // 129: vtctldata.GetBackupsRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 130: vtctldata.PlannedReparentShardRequest + (*topodata.CellsAlias)(nil), // 131: topodata.CellsAlias + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 132: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.CreateShardResponse)(nil), // 133: vtctldata.CreateShardResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 134: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 135: vtctldata.DeleteShardsResponse + (*vtctldata.GetFullStatusResponse)(nil), // 136: vtctldata.GetFullStatusResponse + (*vtctldata.GetTopologyPathResponse)(nil), // 137: vtctldata.GetTopologyPathResponse + (*vtctldata.ValidateResponse)(nil), // 138: vtctldata.ValidateResponse + (*vtctldata.ValidateKeyspaceResponse)(nil), // 139: vtctldata.ValidateKeyspaceResponse + (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 140: vtctldata.ValidateSchemaKeyspaceResponse + (*vtctldata.ValidateShardResponse)(nil), // 141: vtctldata.ValidateShardResponse + (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 142: vtctldata.ValidateVersionKeyspaceResponse + (*vtctldata.ValidateVersionShardResponse)(nil), // 143: vtctldata.ValidateVersionShardResponse } var file_vtadmin_proto_depIdxs = []int32{ 1, // 0: vtadmin.ClusterBackup.cluster:type_name -> vtadmin.Cluster - 108, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo + 112, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo 1, // 2: vtadmin.ClusterCellsAliases.cluster:type_name -> vtadmin.Cluster - 98, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry + 101, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry 1, // 4: vtadmin.ClusterCellInfo.cluster:type_name -> vtadmin.Cluster - 109, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo + 113, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo 1, // 6: vtadmin.ClusterShardReplicationPosition.cluster:type_name -> vtadmin.Cluster - 110, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse + 114, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse 15, // 8: vtadmin.ClusterWorkflows.workflows:type_name -> vtadmin.Workflow 1, // 9: vtadmin.Keyspace.cluster:type_name -> vtadmin.Cluster - 111, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace - 99, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry + 115, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace + 102, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry 1, // 12: vtadmin.Schema.cluster:type_name -> vtadmin.Cluster - 112, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition - 100, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry + 116, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition + 103, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry 1, // 15: vtadmin.Shard.cluster:type_name -> vtadmin.Cluster - 113, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard + 117, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard 1, // 17: vtadmin.SrvVSchema.cluster:type_name -> vtadmin.Cluster - 114, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema + 118, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema 1, // 19: vtadmin.Tablet.cluster:type_name -> vtadmin.Cluster - 115, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet + 119, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet 0, // 21: vtadmin.Tablet.state:type_name -> vtadmin.Tablet.ServingState 1, // 22: vtadmin.VSchema.cluster:type_name -> vtadmin.Cluster - 116, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace + 120, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace 1, // 24: vtadmin.Vtctld.cluster:type_name -> vtadmin.Cluster 1, // 25: vtadmin.VTGate.cluster:type_name -> vtadmin.Cluster 1, // 26: vtadmin.Workflow.cluster:type_name -> vtadmin.Cluster - 117, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow - 118, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest + 121, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow + 122, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest 7, // 29: vtadmin.CreateKeyspaceResponse.keyspace:type_name -> vtadmin.Keyspace - 119, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest - 120, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest - 121, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest - 122, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias + 123, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest + 124, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest + 125, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest + 126, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias 1, // 34: vtadmin.DeleteTabletResponse.cluster:type_name -> vtadmin.Cluster - 123, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest + 127, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest 1, // 36: vtadmin.EmergencyFailoverShardResponse.cluster:type_name -> vtadmin.Cluster - 122, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 124, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event - 48, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions - 125, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest + 126, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 128, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event + 51, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 129, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest 2, // 41: vtadmin.GetBackupsResponse.backups:type_name -> vtadmin.ClusterBackup 4, // 42: vtadmin.GetCellInfosResponse.cell_infos:type_name -> vtadmin.ClusterCellInfo 3, // 43: vtadmin.GetCellsAliasesResponse.aliases:type_name -> vtadmin.ClusterCellsAliases 1, // 44: vtadmin.GetClustersResponse.clusters:type_name -> vtadmin.Cluster - 122, // 45: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias + 126, // 45: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias 14, // 46: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate 7, // 47: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace - 48, // 48: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions - 48, // 49: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 51, // 48: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 51, // 49: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions 8, // 50: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema 5, // 51: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition - 10, // 52: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema - 122, // 53: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias - 11, // 54: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet - 12, // 55: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema - 13, // 56: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld - 104, // 57: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry - 122, // 58: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias - 1, // 59: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster - 126, // 60: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest - 1, // 61: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster - 122, // 62: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 124, // 63: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event - 122, // 64: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias - 1, // 65: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster - 122, // 66: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias - 105, // 67: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult - 106, // 68: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult - 107, // 69: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult - 124, // 70: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 122, // 71: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias - 122, // 72: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias - 1, // 73: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster - 122, // 74: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias - 1, // 75: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster - 122, // 76: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias - 122, // 77: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias - 122, // 78: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias - 1, // 79: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster - 122, // 80: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias - 1, // 81: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster - 122, // 82: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias - 1, // 83: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster - 122, // 84: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias - 122, // 85: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias - 122, // 86: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias - 127, // 87: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias - 113, // 88: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard - 102, // 89: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize - 103, // 90: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry - 101, // 91: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize - 6, // 92: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows - 7, // 93: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace - 124, // 94: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event - 9, // 95: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard - 124, // 96: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event - 11, // 97: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet - 16, // 98: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest - 18, // 99: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest - 19, // 100: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest - 20, // 101: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest - 21, // 102: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest - 23, // 103: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest - 25, // 104: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest - 26, // 105: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest - 28, // 106: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest - 30, // 107: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest - 32, // 108: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest - 34, // 109: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest - 35, // 110: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest - 37, // 111: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest - 38, // 112: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest - 40, // 113: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest - 41, // 114: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest - 43, // 115: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest - 45, // 116: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest - 46, // 117: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest - 49, // 118: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest - 50, // 119: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest - 52, // 120: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest - 53, // 121: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest - 54, // 122: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest - 56, // 123: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest - 58, // 124: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest - 59, // 125: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest - 61, // 126: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest - 63, // 127: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest - 65, // 128: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest - 67, // 129: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest - 73, // 130: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest - 69, // 131: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest - 71, // 132: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest - 75, // 133: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest - 77, // 134: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest - 79, // 135: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest - 81, // 136: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest - 83, // 137: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest - 85, // 138: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest - 87, // 139: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest - 90, // 140: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest - 91, // 141: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest - 92, // 142: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest - 93, // 143: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest - 94, // 144: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest - 95, // 145: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest - 96, // 146: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest - 17, // 147: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse - 128, // 148: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse - 129, // 149: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 130, // 150: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 22, // 151: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse - 24, // 152: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse - 8, // 153: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema - 27, // 154: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse - 29, // 155: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse - 31, // 156: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse - 33, // 157: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse - 131, // 158: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse - 36, // 159: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse - 7, // 160: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace - 39, // 161: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse - 8, // 162: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema - 42, // 163: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse - 44, // 164: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse - 10, // 165: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema - 47, // 166: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse - 11, // 167: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet - 51, // 168: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse - 132, // 169: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse - 12, // 170: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema - 55, // 171: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse - 57, // 172: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse - 15, // 173: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow - 60, // 174: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse - 62, // 175: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse - 64, // 176: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse - 66, // 177: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse - 68, // 178: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse - 74, // 179: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse - 70, // 180: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse - 72, // 181: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse - 76, // 182: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse - 78, // 183: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse - 80, // 184: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse - 82, // 185: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse - 84, // 186: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse - 86, // 187: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse - 88, // 188: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse - 133, // 189: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse - 134, // 190: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse - 135, // 191: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse - 136, // 192: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse - 137, // 193: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse - 138, // 194: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse - 97, // 195: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse - 147, // [147:196] is the sub-list for method output_type - 98, // [98:147] is the sub-list for method input_type - 98, // [98:98] is the sub-list for extension type_name - 98, // [98:98] is the sub-list for extension extendee - 0, // [0:98] is the sub-list for field type_name + 107, // 52: vtadmin.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 10, // 53: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema + 126, // 54: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias + 11, // 55: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet + 12, // 56: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema + 13, // 57: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld + 108, // 58: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry + 126, // 59: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias + 1, // 60: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster + 130, // 61: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest + 1, // 62: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster + 126, // 63: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 128, // 64: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event + 126, // 65: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias + 1, // 66: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster + 126, // 67: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias + 109, // 68: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult + 110, // 69: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult + 111, // 70: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult + 128, // 71: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 126, // 72: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias + 126, // 73: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias + 1, // 74: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster + 126, // 75: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias + 1, // 76: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster + 126, // 77: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias + 126, // 78: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias + 126, // 79: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias + 1, // 80: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster + 126, // 81: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias + 1, // 82: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster + 126, // 83: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias + 1, // 84: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster + 126, // 85: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias + 126, // 86: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias + 126, // 87: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias + 131, // 88: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias + 117, // 89: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard + 105, // 90: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize + 106, // 91: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry + 104, // 92: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize + 132, // 93: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> vtctldata.GetSrvKeyspacesResponse + 6, // 94: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows + 7, // 95: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace + 128, // 96: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event + 9, // 97: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard + 128, // 98: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event + 11, // 99: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet + 16, // 100: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest + 18, // 101: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest + 19, // 102: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest + 20, // 103: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest + 21, // 104: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest + 23, // 105: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest + 25, // 106: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest + 26, // 107: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest + 28, // 108: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest + 30, // 109: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest + 32, // 110: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest + 34, // 111: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest + 35, // 112: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest + 37, // 113: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest + 38, // 114: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest + 40, // 115: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest + 41, // 116: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest + 43, // 117: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest + 45, // 118: vtadmin.VTAdmin.GetSrvKeyspace:input_type -> vtadmin.GetSrvKeyspaceRequest + 46, // 119: vtadmin.VTAdmin.GetSrvKeyspaces:input_type -> vtadmin.GetSrvKeyspacesRequest + 48, // 120: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest + 49, // 121: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest + 52, // 122: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest + 53, // 123: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest + 55, // 124: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest + 56, // 125: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest + 57, // 126: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest + 59, // 127: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest + 61, // 128: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest + 62, // 129: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest + 64, // 130: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest + 66, // 131: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest + 68, // 132: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest + 70, // 133: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest + 76, // 134: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest + 72, // 135: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest + 74, // 136: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest + 78, // 137: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest + 80, // 138: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest + 82, // 139: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest + 84, // 140: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest + 86, // 141: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest + 88, // 142: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest + 90, // 143: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest + 93, // 144: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest + 94, // 145: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest + 95, // 146: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest + 96, // 147: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest + 97, // 148: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest + 98, // 149: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest + 99, // 150: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest + 17, // 151: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse + 133, // 152: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse + 134, // 153: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 135, // 154: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 22, // 155: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse + 24, // 156: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse + 8, // 157: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema + 27, // 158: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse + 29, // 159: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse + 31, // 160: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse + 33, // 161: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse + 136, // 162: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse + 36, // 163: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse + 7, // 164: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace + 39, // 165: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse + 8, // 166: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema + 42, // 167: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse + 44, // 168: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse + 132, // 169: vtadmin.VTAdmin.GetSrvKeyspace:output_type -> vtctldata.GetSrvKeyspacesResponse + 47, // 170: vtadmin.VTAdmin.GetSrvKeyspaces:output_type -> vtadmin.GetSrvKeyspacesResponse + 10, // 171: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema + 50, // 172: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse + 11, // 173: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet + 54, // 174: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse + 137, // 175: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse + 12, // 176: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema + 58, // 177: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse + 60, // 178: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse + 15, // 179: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow + 63, // 180: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse + 65, // 181: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse + 67, // 182: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse + 69, // 183: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse + 71, // 184: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse + 77, // 185: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse + 73, // 186: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse + 75, // 187: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse + 79, // 188: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse + 81, // 189: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse + 83, // 190: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse + 85, // 191: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse + 87, // 192: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse + 89, // 193: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse + 91, // 194: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse + 138, // 195: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse + 139, // 196: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse + 140, // 197: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse + 141, // 198: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse + 142, // 199: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse + 143, // 200: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse + 100, // 201: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse + 151, // [151:202] is the sub-list for method output_type + 100, // [100:151] is the sub-list for method input_type + 100, // [100:100] is the sub-list for extension type_name + 100, // [100:100] is the sub-list for extension extendee + 0, // [0:100] is the sub-list for field type_name } func init() { file_vtadmin_proto_init() } @@ -8004,7 +8222,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaRequest); i { + switch v := v.(*GetSrvKeyspaceRequest); i { case 0: return &v.state case 1: @@ -8016,7 +8234,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasRequest); i { + switch v := v.(*GetSrvKeyspacesRequest); i { case 0: return &v.state case 1: @@ -8028,7 +8246,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasResponse); i { + switch v := v.(*GetSrvKeyspacesResponse); i { case 0: return &v.state case 1: @@ -8040,7 +8258,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaTableSizeOptions); i { + switch v := v.(*GetSrvVSchemaRequest); i { case 0: return &v.state case 1: @@ -8052,7 +8270,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletRequest); i { + switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state case 1: @@ -8064,7 +8282,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsRequest); i { + switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state case 1: @@ -8076,7 +8294,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsResponse); i { + switch v := v.(*GetSchemaTableSizeOptions); i { case 0: return &v.state case 1: @@ -8088,7 +8306,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTopologyPathRequest); i { + switch v := v.(*GetTabletRequest); i { case 0: return &v.state case 1: @@ -8100,7 +8318,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaRequest); i { + switch v := v.(*GetTabletsRequest); i { case 0: return &v.state case 1: @@ -8112,7 +8330,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemasRequest); i { + switch v := v.(*GetTabletsResponse); i { case 0: return &v.state case 1: @@ -8124,7 +8342,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemasResponse); i { + switch v := v.(*GetTopologyPathRequest); i { case 0: return &v.state case 1: @@ -8136,7 +8354,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVtctldsRequest); i { + switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state case 1: @@ -8148,7 +8366,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVtctldsResponse); i { + switch v := v.(*GetVSchemasRequest); i { case 0: return &v.state case 1: @@ -8160,7 +8378,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowRequest); i { + switch v := v.(*GetVSchemasResponse); i { case 0: return &v.state case 1: @@ -8172,7 +8390,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsRequest); i { + switch v := v.(*GetVtctldsRequest); i { case 0: return &v.state case 1: @@ -8184,7 +8402,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsResponse); i { + switch v := v.(*GetVtctldsResponse); i { case 0: return &v.state case 1: @@ -8196,7 +8414,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletRequest); i { + switch v := v.(*GetWorkflowRequest); i { case 0: return &v.state case 1: @@ -8208,7 +8426,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletResponse); i { + switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state case 1: @@ -8220,7 +8438,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedFailoverShardRequest); i { + switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state case 1: @@ -8232,7 +8450,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedFailoverShardResponse); i { + switch v := v.(*PingTabletRequest); i { case 0: return &v.state case 1: @@ -8244,7 +8462,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphRequest); i { + switch v := v.(*PingTabletResponse); i { case 0: return &v.state case 1: @@ -8256,7 +8474,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphResponse); i { + switch v := v.(*PlannedFailoverShardRequest); i { case 0: return &v.state case 1: @@ -8268,7 +8486,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateRequest); i { + switch v := v.(*PlannedFailoverShardResponse); i { case 0: return &v.state case 1: @@ -8280,7 +8498,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateResponse); i { + switch v := v.(*RebuildKeyspaceGraphRequest); i { case 0: return &v.state case 1: @@ -8292,7 +8510,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemasRequest); i { + switch v := v.(*RebuildKeyspaceGraphResponse); i { case 0: return &v.state case 1: @@ -8304,7 +8522,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemasResponse); i { + switch v := v.(*RefreshStateRequest); i { case 0: return &v.state case 1: @@ -8316,7 +8534,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardRequest); i { + switch v := v.(*RefreshStateResponse); i { case 0: return &v.state case 1: @@ -8328,7 +8546,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardResponse); i { + switch v := v.(*ReloadSchemasRequest); i { case 0: return &v.state case 1: @@ -8340,7 +8558,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshTabletReplicationSourceRequest); i { + switch v := v.(*ReloadSchemasResponse); i { case 0: return &v.state case 1: @@ -8352,7 +8570,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshTabletReplicationSourceResponse); i { + switch v := v.(*ReloadSchemaShardRequest); i { case 0: return &v.state case 1: @@ -8364,7 +8582,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellRequest); i { + switch v := v.(*ReloadSchemaShardResponse); i { case 0: return &v.state case 1: @@ -8376,7 +8594,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellResponse); i { + switch v := v.(*RefreshTabletReplicationSourceRequest); i { case 0: return &v.state case 1: @@ -8388,7 +8606,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckRequest); i { + switch v := v.(*RefreshTabletReplicationSourceResponse); i { case 0: return &v.state case 1: @@ -8400,7 +8618,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckResponse); i { + switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state case 1: @@ -8412,7 +8630,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReadOnlyRequest); i { + switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state case 1: @@ -8424,7 +8642,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReadOnlyResponse); i { + switch v := v.(*RunHealthCheckRequest); i { case 0: return &v.state case 1: @@ -8436,7 +8654,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReadWriteRequest); i { + switch v := v.(*RunHealthCheckResponse); i { case 0: return &v.state case 1: @@ -8448,7 +8666,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReadWriteResponse); i { + switch v := v.(*SetReadOnlyRequest); i { case 0: return &v.state case 1: @@ -8460,7 +8678,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationRequest); i { + switch v := v.(*SetReadOnlyResponse); i { case 0: return &v.state case 1: @@ -8472,7 +8690,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationResponse); i { + switch v := v.(*SetReadWriteRequest); i { case 0: return &v.state case 1: @@ -8484,7 +8702,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationRequest); i { + switch v := v.(*SetReadWriteResponse); i { case 0: return &v.state case 1: @@ -8496,7 +8714,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationResponse); i { + switch v := v.(*StartReplicationRequest); i { case 0: return &v.state case 1: @@ -8508,7 +8726,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyPromotedRequest); i { + switch v := v.(*StartReplicationResponse); i { case 0: return &v.state case 1: @@ -8520,7 +8738,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyPromotedResponse); i { + switch v := v.(*StopReplicationRequest); i { case 0: return &v.state case 1: @@ -8532,7 +8750,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedRequest); i { + switch v := v.(*StopReplicationResponse); i { case 0: return &v.state case 1: @@ -8544,7 +8762,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateRequest); i { + switch v := v.(*TabletExternallyPromotedRequest); i { case 0: return &v.state case 1: @@ -8556,7 +8774,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateKeyspaceRequest); i { + switch v := v.(*TabletExternallyPromotedResponse); i { case 0: return &v.state case 1: @@ -8568,7 +8786,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSchemaKeyspaceRequest); i { + switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state case 1: @@ -8580,7 +8798,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateShardRequest); i { + switch v := v.(*ValidateRequest); i { case 0: return &v.state case 1: @@ -8592,7 +8810,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionKeyspaceRequest); i { + switch v := v.(*ValidateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -8604,7 +8822,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionShardRequest); i { + switch v := v.(*ValidateSchemaKeyspaceRequest); i { case 0: return &v.state case 1: @@ -8616,7 +8834,7 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VTExplainRequest); i { + switch v := v.(*ValidateShardRequest); i { case 0: return &v.state case 1: @@ -8628,6 +8846,42 @@ func file_vtadmin_proto_init() { } } file_vtadmin_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionKeyspaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VTExplainRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VTExplainResponse); i { case 0: return &v.state @@ -8639,7 +8893,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_ShardTableSize); i { case 0: return &v.state @@ -8651,7 +8905,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_TableSize); i { case 0: return &v.state @@ -8663,7 +8917,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_KeyspaceResult); i { case 0: return &v.state @@ -8675,7 +8929,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_ShardResult); i { case 0: return &v.state @@ -8687,7 +8941,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_TabletResult); i { case 0: return &v.state @@ -8706,7 +8960,7 @@ func file_vtadmin_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtadmin_proto_rawDesc, NumEnums: 1, - NumMessages: 107, + NumMessages: 111, NumExtensions: 0, NumServices: 1, }, diff --git a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go index fd6cda64704..e0e2ce2f44f 100644 --- a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go @@ -70,6 +70,10 @@ type VTAdminClient interface { // GetShardReplicationPositions returns shard replication positions grouped // by cluster. GetShardReplicationPositions(ctx context.Context, in *GetShardReplicationPositionsRequest, opts ...grpc.CallOption) (*GetShardReplicationPositionsResponse, error) + // GetSrvKeyspace returns the SrvKeyspace for a keyspace in one or more cells. + GetSrvKeyspace(ctx context.Context, in *GetSrvKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvKeyspacesResponse, error) + // GetSrvKeyspaces returns the SrvKeyspaces for all keyspaces across all the specified clusters. + GetSrvKeyspaces(ctx context.Context, in *GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*GetSrvKeyspacesResponse, error) // GetSrvVSchema returns the SrvVSchema for the given cluster and cell. GetSrvVSchema(ctx context.Context, in *GetSrvVSchemaRequest, opts ...grpc.CallOption) (*SrvVSchema, error) // GetSrvVSchemas returns all SrvVSchemas across all (or specified) clusters @@ -334,6 +338,24 @@ func (c *vTAdminClient) GetShardReplicationPositions(ctx context.Context, in *Ge return out, nil } +func (c *vTAdminClient) GetSrvKeyspace(ctx context.Context, in *GetSrvKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetSrvKeyspacesResponse, error) { + out := new(vtctldata.GetSrvKeyspacesResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSrvKeyspace", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) GetSrvKeyspaces(ctx context.Context, in *GetSrvKeyspacesRequest, opts ...grpc.CallOption) (*GetSrvKeyspacesResponse, error) { + out := new(GetSrvKeyspacesResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSrvKeyspaces", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) GetSrvVSchema(ctx context.Context, in *GetSrvVSchemaRequest, opts ...grpc.CallOption) (*SrvVSchema, error) { out := new(SrvVSchema) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSrvVSchema", in, out, opts...) @@ -664,6 +686,10 @@ type VTAdminServer interface { // GetShardReplicationPositions returns shard replication positions grouped // by cluster. GetShardReplicationPositions(context.Context, *GetShardReplicationPositionsRequest) (*GetShardReplicationPositionsResponse, error) + // GetSrvKeyspace returns the SrvKeyspace for a keyspace in one or more cells. + GetSrvKeyspace(context.Context, *GetSrvKeyspaceRequest) (*vtctldata.GetSrvKeyspacesResponse, error) + // GetSrvKeyspaces returns the SrvKeyspaces for all keyspaces across all the specified clusters. + GetSrvKeyspaces(context.Context, *GetSrvKeyspacesRequest) (*GetSrvKeyspacesResponse, error) // GetSrvVSchema returns the SrvVSchema for the given cluster and cell. GetSrvVSchema(context.Context, *GetSrvVSchemaRequest) (*SrvVSchema, error) // GetSrvVSchemas returns all SrvVSchemas across all (or specified) clusters @@ -817,6 +843,12 @@ func (UnimplementedVTAdminServer) GetSchemas(context.Context, *GetSchemasRequest func (UnimplementedVTAdminServer) GetShardReplicationPositions(context.Context, *GetShardReplicationPositionsRequest) (*GetShardReplicationPositionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetShardReplicationPositions not implemented") } +func (UnimplementedVTAdminServer) GetSrvKeyspace(context.Context, *GetSrvKeyspaceRequest) (*vtctldata.GetSrvKeyspacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSrvKeyspace not implemented") +} +func (UnimplementedVTAdminServer) GetSrvKeyspaces(context.Context, *GetSrvKeyspacesRequest) (*GetSrvKeyspacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSrvKeyspaces not implemented") +} func (UnimplementedVTAdminServer) GetSrvVSchema(context.Context, *GetSrvVSchemaRequest) (*SrvVSchema, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSrvVSchema not implemented") } @@ -1247,6 +1279,42 @@ func _VTAdmin_GetShardReplicationPositions_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _VTAdmin_GetSrvKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSrvKeyspaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetSrvKeyspace(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetSrvKeyspace", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetSrvKeyspace(ctx, req.(*GetSrvKeyspaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_GetSrvKeyspaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSrvKeyspacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetSrvKeyspaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetSrvKeyspaces", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetSrvKeyspaces(ctx, req.(*GetSrvKeyspacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_GetSrvVSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetSrvVSchemaRequest) if err := dec(in); err != nil { @@ -1884,6 +1952,14 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetShardReplicationPositions", Handler: _VTAdmin_GetShardReplicationPositions_Handler, }, + { + MethodName: "GetSrvKeyspace", + Handler: _VTAdmin_GetSrvKeyspace_Handler, + }, + { + MethodName: "GetSrvKeyspaces", + Handler: _VTAdmin_GetSrvKeyspaces_Handler, + }, { MethodName: "GetSrvVSchema", Handler: _VTAdmin_GetSrvVSchema_Handler, diff --git a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go index 9706f67e07f..0e4b4c6e84b 100644 --- a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtadmin.proto package vtadmin import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -24,6 +25,2344 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Cluster) CloneVT() *Cluster { + if m == nil { + return (*Cluster)(nil) + } + r := &Cluster{ + Id: m.Id, + Name: m.Name, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Cluster) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterBackup) CloneVT() *ClusterBackup { + if m == nil { + return (*ClusterBackup)(nil) + } + r := &ClusterBackup{ + Cluster: m.Cluster.CloneVT(), + Backup: m.Backup.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterBackup) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterCellsAliases) CloneVT() *ClusterCellsAliases { + if m == nil { + return (*ClusterCellsAliases)(nil) + } + r := &ClusterCellsAliases{ + Cluster: m.Cluster.CloneVT(), + } + if rhs := m.Aliases; rhs != nil { + tmpContainer := make(map[string]*topodata.CellsAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterCellsAliases) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterCellInfo) CloneVT() *ClusterCellInfo { + if m == nil { + return (*ClusterCellInfo)(nil) + } + r := &ClusterCellInfo{ + Cluster: m.Cluster.CloneVT(), + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterCellInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterShardReplicationPosition) CloneVT() *ClusterShardReplicationPosition { + if m == nil { + return (*ClusterShardReplicationPosition)(nil) + } + r := &ClusterShardReplicationPosition{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PositionInfo: m.PositionInfo.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterShardReplicationPosition) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ClusterWorkflows) CloneVT() *ClusterWorkflows { + if m == nil { + return (*ClusterWorkflows)(nil) + } + r := &ClusterWorkflows{} + if rhs := m.Workflows; rhs != nil { + tmpContainer := make([]*Workflow, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Workflows = tmpContainer + } + if rhs := m.Warnings; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Warnings = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ClusterWorkflows) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace.CloneVT(), + } + if rhs := m.Shards; rhs != nil { + tmpContainer := make(map[string]*vtctldata.Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema_ShardTableSize) CloneVT() *Schema_ShardTableSize { + if m == nil { + return (*Schema_ShardTableSize)(nil) + } + r := &Schema_ShardTableSize{ + RowCount: m.RowCount, + DataLength: m.DataLength, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema_ShardTableSize) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema_TableSize) CloneVT() *Schema_TableSize { + if m == nil { + return (*Schema_TableSize)(nil) + } + r := &Schema_TableSize{ + RowCount: m.RowCount, + DataLength: m.DataLength, + } + if rhs := m.ByShard; rhs != nil { + tmpContainer := make(map[string]*Schema_ShardTableSize, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ByShard = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema_TableSize) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Schema) CloneVT() *Schema { + if m == nil { + return (*Schema)(nil) + } + r := &Schema{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + } + if rhs := m.TableDefinitions; rhs != nil { + tmpContainer := make([]*tabletmanagerdata.TableDefinition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableDefinitions = tmpContainer + } + if rhs := m.TableSizes; rhs != nil { + tmpContainer := make(map[string]*Schema_TableSize, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TableSizes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Schema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + Cluster: m.Cluster.CloneVT(), + Shard: m.Shard.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SrvVSchema) CloneVT() *SrvVSchema { + if m == nil { + return (*SrvVSchema)(nil) + } + r := &SrvVSchema{ + Cell: m.Cell, + Cluster: m.Cluster.CloneVT(), + SrvVSchema: m.SrvVSchema.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SrvVSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Tablet) CloneVT() *Tablet { + if m == nil { + return (*Tablet)(nil) + } + r := &Tablet{ + Cluster: m.Cluster.CloneVT(), + Tablet: m.Tablet.CloneVT(), + State: m.State, + FQDN: m.FQDN, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Tablet) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VSchema) CloneVT() *VSchema { + if m == nil { + return (*VSchema)(nil) + } + r := &VSchema{ + Cluster: m.Cluster.CloneVT(), + Name: m.Name, + VSchema: m.VSchema.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VSchema) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Vtctld) CloneVT() *Vtctld { + if m == nil { + return (*Vtctld)(nil) + } + r := &Vtctld{ + Hostname: m.Hostname, + Cluster: m.Cluster.CloneVT(), + FQDN: m.FQDN, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Vtctld) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTGate) CloneVT() *VTGate { + if m == nil { + return (*VTGate)(nil) + } + r := &VTGate{ + Hostname: m.Hostname, + Pool: m.Pool, + Cell: m.Cell, + Cluster: m.Cluster.CloneVT(), + FQDN: m.FQDN, + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTGate) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow) CloneVT() *Workflow { + if m == nil { + return (*Workflow)(nil) + } + r := &Workflow{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Workflow: m.Workflow.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Workflow) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { + if m == nil { + return (*CreateKeyspaceRequest)(nil) + } + r := &CreateKeyspaceRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceResponse) CloneVT() *CreateKeyspaceResponse { + if m == nil { + return (*CreateKeyspaceResponse)(nil) + } + r := &CreateKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateShardRequest) CloneVT() *CreateShardRequest { + if m == nil { + return (*CreateShardRequest)(nil) + } + r := &CreateShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CreateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteKeyspaceRequest) CloneVT() *DeleteKeyspaceRequest { + if m == nil { + return (*DeleteKeyspaceRequest)(nil) + } + r := &DeleteKeyspaceRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteShardsRequest) CloneVT() *DeleteShardsRequest { + if m == nil { + return (*DeleteShardsRequest)(nil) + } + r := &DeleteShardsRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteShardsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletRequest) CloneVT() *DeleteTabletRequest { + if m == nil { + return (*DeleteTabletRequest)(nil) + } + r := &DeleteTabletRequest{ + Alias: m.Alias.CloneVT(), + AllowPrimary: m.AllowPrimary, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletResponse) CloneVT() *DeleteTabletResponse { + if m == nil { + return (*DeleteTabletResponse)(nil) + } + r := &DeleteTabletResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *DeleteTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyFailoverShardRequest) CloneVT() *EmergencyFailoverShardRequest { + if m == nil { + return (*EmergencyFailoverShardRequest)(nil) + } + r := &EmergencyFailoverShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EmergencyFailoverShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyFailoverShardResponse) CloneVT() *EmergencyFailoverShardResponse { + if m == nil { + return (*EmergencyFailoverShardResponse)(nil) + } + r := &EmergencyFailoverShardResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *EmergencyFailoverShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FindSchemaRequest) CloneVT() *FindSchemaRequest { + if m == nil { + return (*FindSchemaRequest)(nil) + } + r := &FindSchemaRequest{ + Table: m.Table, + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *FindSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsRequest) CloneVT() *GetBackupsRequest { + if m == nil { + return (*GetBackupsRequest)(nil) + } + r := &GetBackupsRequest{ + RequestOptions: m.RequestOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBackupsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsResponse) CloneVT() *GetBackupsResponse { + if m == nil { + return (*GetBackupsResponse)(nil) + } + r := &GetBackupsResponse{} + if rhs := m.Backups; rhs != nil { + tmpContainer := make([]*ClusterBackup, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Backups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetBackupsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfosRequest) CloneVT() *GetCellInfosRequest { + if m == nil { + return (*GetCellInfosRequest)(nil) + } + r := &GetCellInfosRequest{ + NamesOnly: m.NamesOnly, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellInfosRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfosResponse) CloneVT() *GetCellInfosResponse { + if m == nil { + return (*GetCellInfosResponse)(nil) + } + r := &GetCellInfosResponse{} + if rhs := m.CellInfos; rhs != nil { + tmpContainer := make([]*ClusterCellInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.CellInfos = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellInfosResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesRequest) CloneVT() *GetCellsAliasesRequest { + if m == nil { + return (*GetCellsAliasesRequest)(nil) + } + r := &GetCellsAliasesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellsAliasesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesResponse) CloneVT() *GetCellsAliasesResponse { + if m == nil { + return (*GetCellsAliasesResponse)(nil) + } + r := &GetCellsAliasesResponse{} + if rhs := m.Aliases; rhs != nil { + tmpContainer := make([]*ClusterCellsAliases, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetCellsAliasesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetClustersRequest) CloneVT() *GetClustersRequest { + if m == nil { + return (*GetClustersRequest)(nil) + } + r := &GetClustersRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetClustersRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetClustersResponse) CloneVT() *GetClustersResponse { + if m == nil { + return (*GetClustersResponse)(nil) + } + r := &GetClustersResponse{} + if rhs := m.Clusters; rhs != nil { + tmpContainer := make([]*Cluster, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Clusters = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetClustersResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetFullStatusRequest) CloneVT() *GetFullStatusRequest { + if m == nil { + return (*GetFullStatusRequest)(nil) + } + r := &GetFullStatusRequest{ + ClusterId: m.ClusterId, + Alias: m.Alias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetFullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetGatesRequest) CloneVT() *GetGatesRequest { + if m == nil { + return (*GetGatesRequest)(nil) + } + r := &GetGatesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetGatesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetGatesResponse) CloneVT() *GetGatesResponse { + if m == nil { + return (*GetGatesResponse)(nil) + } + r := &GetGatesResponse{} + if rhs := m.Gates; rhs != nil { + tmpContainer := make([]*VTGate, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Gates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetGatesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceRequest) CloneVT() *GetKeyspaceRequest { + if m == nil { + return (*GetKeyspaceRequest)(nil) + } + r := &GetKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesRequest) CloneVT() *GetKeyspacesRequest { + if m == nil { + return (*GetKeyspacesRequest)(nil) + } + r := &GetKeyspacesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesResponse) CloneVT() *GetKeyspacesResponse { + if m == nil { + return (*GetKeyspacesResponse)(nil) + } + r := &GetKeyspacesResponse{} + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { + if m == nil { + return (*GetSchemaRequest)(nil) + } + r := &GetSchemaRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Table: m.Table, + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemasRequest) CloneVT() *GetSchemasRequest { + if m == nil { + return (*GetSchemasRequest)(nil) + } + r := &GetSchemasRequest{ + TableSizeOptions: m.TableSizeOptions.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemasResponse) CloneVT() *GetSchemasResponse { + if m == nil { + return (*GetSchemasResponse)(nil) + } + r := &GetSchemasResponse{} + if rhs := m.Schemas; rhs != nil { + tmpContainer := make([]*Schema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Schemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardReplicationPositionsRequest) CloneVT() *GetShardReplicationPositionsRequest { + if m == nil { + return (*GetShardReplicationPositionsRequest)(nil) + } + r := &GetShardReplicationPositionsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationPositionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardReplicationPositionsResponse) CloneVT() *GetShardReplicationPositionsResponse { + if m == nil { + return (*GetShardReplicationPositionsResponse)(nil) + } + r := &GetShardReplicationPositionsResponse{} + if rhs := m.ReplicationPositions; rhs != nil { + tmpContainer := make([]*ClusterShardReplicationPosition, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ReplicationPositions = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationPositionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceRequest) CloneVT() *GetSrvKeyspaceRequest { + if m == nil { + return (*GetSrvKeyspaceRequest)(nil) + } + r := &GetSrvKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesRequest) CloneVT() *GetSrvKeyspacesRequest { + if m == nil { + return (*GetSrvKeyspacesRequest)(nil) + } + r := &GetSrvKeyspacesRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesResponse) CloneVT() *GetSrvKeyspacesResponse { + if m == nil { + return (*GetSrvKeyspacesResponse)(nil) + } + r := &GetSrvKeyspacesResponse{} + if rhs := m.SrvKeyspaces; rhs != nil { + tmpContainer := make(map[string]*vtctldata.GetSrvKeyspacesResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvKeyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemaRequest) CloneVT() *GetSrvVSchemaRequest { + if m == nil { + return (*GetSrvVSchemaRequest)(nil) + } + r := &GetSrvVSchemaRequest{ + ClusterId: m.ClusterId, + Cell: m.Cell, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasRequest) CloneVT() *GetSrvVSchemasRequest { + if m == nil { + return (*GetSrvVSchemasRequest)(nil) + } + r := &GetSrvVSchemasRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasResponse) CloneVT() *GetSrvVSchemasResponse { + if m == nil { + return (*GetSrvVSchemasResponse)(nil) + } + r := &GetSrvVSchemasResponse{} + if rhs := m.SrvVSchemas; rhs != nil { + tmpContainer := make([]*SrvVSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvVSchemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSrvVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaTableSizeOptions) CloneVT() *GetSchemaTableSizeOptions { + if m == nil { + return (*GetSchemaTableSizeOptions)(nil) + } + r := &GetSchemaTableSizeOptions{ + AggregateSizes: m.AggregateSizes, + IncludeNonServingShards: m.IncludeNonServingShards, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaTableSizeOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletRequest) CloneVT() *GetTabletRequest { + if m == nil { + return (*GetTabletRequest)(nil) + } + r := &GetTabletRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsRequest) CloneVT() *GetTabletsRequest { + if m == nil { + return (*GetTabletsRequest)(nil) + } + r := &GetTabletsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsResponse) CloneVT() *GetTabletsResponse { + if m == nil { + return (*GetTabletsResponse)(nil) + } + r := &GetTabletsResponse{} + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTopologyPathRequest) CloneVT() *GetTopologyPathRequest { + if m == nil { + return (*GetTopologyPathRequest)(nil) + } + r := &GetTopologyPathRequest{ + ClusterId: m.ClusterId, + Path: m.Path, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetTopologyPathRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemaRequest) CloneVT() *GetVSchemaRequest { + if m == nil { + return (*GetVSchemaRequest)(nil) + } + r := &GetVSchemaRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemasRequest) CloneVT() *GetVSchemasRequest { + if m == nil { + return (*GetVSchemasRequest)(nil) + } + r := &GetVSchemasRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemasResponse) CloneVT() *GetVSchemasResponse { + if m == nil { + return (*GetVSchemasResponse)(nil) + } + r := &GetVSchemasResponse{} + if rhs := m.VSchemas; rhs != nil { + tmpContainer := make([]*VSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.VSchemas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVtctldsRequest) CloneVT() *GetVtctldsRequest { + if m == nil { + return (*GetVtctldsRequest)(nil) + } + r := &GetVtctldsRequest{} + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVtctldsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVtctldsResponse) CloneVT() *GetVtctldsResponse { + if m == nil { + return (*GetVtctldsResponse)(nil) + } + r := &GetVtctldsResponse{} + if rhs := m.Vtctlds; rhs != nil { + tmpContainer := make([]*Vtctld, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Vtctlds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetVtctldsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowRequest) CloneVT() *GetWorkflowRequest { + if m == nil { + return (*GetWorkflowRequest)(nil) + } + r := &GetWorkflowRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Name: m.Name, + ActiveOnly: m.ActiveOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsRequest) CloneVT() *GetWorkflowsRequest { + if m == nil { + return (*GetWorkflowsRequest)(nil) + } + r := &GetWorkflowsRequest{ + ActiveOnly: m.ActiveOnly, + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.IgnoreKeyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IgnoreKeyspaces = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsResponse) CloneVT() *GetWorkflowsResponse { + if m == nil { + return (*GetWorkflowsResponse)(nil) + } + r := &GetWorkflowsResponse{} + if rhs := m.WorkflowsByCluster; rhs != nil { + tmpContainer := make(map[string]*ClusterWorkflows, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.WorkflowsByCluster = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletRequest) CloneVT() *PingTabletRequest { + if m == nil { + return (*PingTabletRequest)(nil) + } + r := &PingTabletRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PingTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletResponse) CloneVT() *PingTabletResponse { + if m == nil { + return (*PingTabletResponse)(nil) + } + r := &PingTabletResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PingTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedFailoverShardRequest) CloneVT() *PlannedFailoverShardRequest { + if m == nil { + return (*PlannedFailoverShardRequest)(nil) + } + r := &PlannedFailoverShardRequest{ + ClusterId: m.ClusterId, + Options: m.Options.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PlannedFailoverShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedFailoverShardResponse) CloneVT() *PlannedFailoverShardResponse { + if m == nil { + return (*PlannedFailoverShardResponse)(nil) + } + r := &PlannedFailoverShardResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PlannedFailoverShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphRequest) CloneVT() *RebuildKeyspaceGraphRequest { + if m == nil { + return (*RebuildKeyspaceGraphRequest)(nil) + } + r := &RebuildKeyspaceGraphRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + AllowPartial: m.AllowPartial, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RebuildKeyspaceGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphResponse) CloneVT() *RebuildKeyspaceGraphResponse { + if m == nil { + return (*RebuildKeyspaceGraphResponse)(nil) + } + r := &RebuildKeyspaceGraphResponse{ + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RebuildKeyspaceGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { + if m == nil { + return (*RefreshStateRequest)(nil) + } + r := &RefreshStateRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { + if m == nil { + return (*RefreshStateResponse)(nil) + } + r := &RefreshStateResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasRequest) CloneVT() *ReloadSchemasRequest { + if m == nil { + return (*ReloadSchemasRequest)(nil) + } + r := &ReloadSchemasRequest{ + Concurrency: m.Concurrency, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Keyspaces = tmpContainer + } + if rhs := m.KeyspaceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.KeyspaceShards = tmpContainer + } + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_KeyspaceResult) CloneVT() *ReloadSchemasResponse_KeyspaceResult { + if m == nil { + return (*ReloadSchemasResponse_KeyspaceResult)(nil) + } + r := &ReloadSchemasResponse_KeyspaceResult{ + Keyspace: m.Keyspace.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_KeyspaceResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_ShardResult) CloneVT() *ReloadSchemasResponse_ShardResult { + if m == nil { + return (*ReloadSchemasResponse_ShardResult)(nil) + } + r := &ReloadSchemasResponse_ShardResult{ + Shard: m.Shard.CloneVT(), + } + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_ShardResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse_TabletResult) CloneVT() *ReloadSchemasResponse_TabletResult { + if m == nil { + return (*ReloadSchemasResponse_TabletResult)(nil) + } + r := &ReloadSchemasResponse_TabletResult{ + Tablet: m.Tablet.CloneVT(), + Result: m.Result, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse_TabletResult) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemasResponse) CloneVT() *ReloadSchemasResponse { + if m == nil { + return (*ReloadSchemasResponse)(nil) + } + r := &ReloadSchemasResponse{} + if rhs := m.KeyspaceResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_KeyspaceResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.KeyspaceResults = tmpContainer + } + if rhs := m.ShardResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_ShardResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardResults = tmpContainer + } + if rhs := m.TabletResults; rhs != nil { + tmpContainer := make([]*ReloadSchemasResponse_TabletResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletResults = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardRequest) CloneVT() *ReloadSchemaShardRequest { + if m == nil { + return (*ReloadSchemaShardRequest)(nil) + } + r := &ReloadSchemaShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemaShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardResponse) CloneVT() *ReloadSchemaShardResponse { + if m == nil { + return (*ReloadSchemaShardResponse)(nil) + } + r := &ReloadSchemaShardResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReloadSchemaShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshTabletReplicationSourceRequest) CloneVT() *RefreshTabletReplicationSourceRequest { + if m == nil { + return (*RefreshTabletReplicationSourceRequest)(nil) + } + r := &RefreshTabletReplicationSourceRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshTabletReplicationSourceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshTabletReplicationSourceResponse) CloneVT() *RefreshTabletReplicationSourceResponse { + if m == nil { + return (*RefreshTabletReplicationSourceResponse)(nil) + } + r := &RefreshTabletReplicationSourceResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Primary: m.Primary.CloneVT(), + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RefreshTabletReplicationSourceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellRequest) CloneVT() *RemoveKeyspaceCellRequest { + if m == nil { + return (*RemoveKeyspaceCellRequest)(nil) + } + r := &RemoveKeyspaceCellRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RemoveKeyspaceCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellResponse) CloneVT() *RemoveKeyspaceCellResponse { + if m == nil { + return (*RemoveKeyspaceCellResponse)(nil) + } + r := &RemoveKeyspaceCellResponse{ + Status: m.Status, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RemoveKeyspaceCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { + if m == nil { + return (*RunHealthCheckRequest)(nil) + } + r := &RunHealthCheckRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { + if m == nil { + return (*RunHealthCheckResponse)(nil) + } + r := &RunHealthCheckResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyRequest) CloneVT() *SetReadOnlyRequest { + if m == nil { + return (*SetReadOnlyRequest)(nil) + } + r := &SetReadOnlyRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadOnlyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadOnlyResponse) CloneVT() *SetReadOnlyResponse { + if m == nil { + return (*SetReadOnlyResponse)(nil) + } + r := &SetReadOnlyResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadOnlyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteRequest) CloneVT() *SetReadWriteRequest { + if m == nil { + return (*SetReadWriteRequest)(nil) + } + r := &SetReadWriteRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadWriteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetReadWriteResponse) CloneVT() *SetReadWriteResponse { + if m == nil { + return (*SetReadWriteResponse)(nil) + } + r := &SetReadWriteResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SetReadWriteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { + if m == nil { + return (*StartReplicationRequest)(nil) + } + r := &StartReplicationRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { + if m == nil { + return (*StartReplicationResponse)(nil) + } + r := &StartReplicationResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { + if m == nil { + return (*StopReplicationRequest)(nil) + } + r := &StopReplicationRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { + if m == nil { + return (*StopReplicationResponse)(nil) + } + r := &StopReplicationResponse{ + Status: m.Status, + Cluster: m.Cluster.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyPromotedRequest) CloneVT() *TabletExternallyPromotedRequest { + if m == nil { + return (*TabletExternallyPromotedRequest)(nil) + } + r := &TabletExternallyPromotedRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyPromotedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyPromotedResponse) CloneVT() *TabletExternallyPromotedResponse { + if m == nil { + return (*TabletExternallyPromotedResponse)(nil) + } + r := &TabletExternallyPromotedResponse{ + Cluster: m.Cluster.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + OldPrimary: m.OldPrimary.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyPromotedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyReparentedRequest) CloneVT() *TabletExternallyReparentedRequest { + if m == nil { + return (*TabletExternallyReparentedRequest)(nil) + } + r := &TabletExternallyReparentedRequest{ + Alias: m.Alias.CloneVT(), + } + if rhs := m.ClusterIds; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ClusterIds = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TabletExternallyReparentedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateRequest) CloneVT() *ValidateRequest { + if m == nil { + return (*ValidateRequest)(nil) + } + r := &ValidateRequest{ + ClusterId: m.ClusterId, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateKeyspaceRequest) CloneVT() *ValidateKeyspaceRequest { + if m == nil { + return (*ValidateKeyspaceRequest)(nil) + } + r := &ValidateKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateSchemaKeyspaceRequest) CloneVT() *ValidateSchemaKeyspaceRequest { + if m == nil { + return (*ValidateSchemaKeyspaceRequest)(nil) + } + r := &ValidateSchemaKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateShardRequest) CloneVT() *ValidateShardRequest { + if m == nil { + return (*ValidateShardRequest)(nil) + } + r := &ValidateShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + PingTablets: m.PingTablets, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionKeyspaceRequest) CloneVT() *ValidateVersionKeyspaceRequest { + if m == nil { + return (*ValidateVersionKeyspaceRequest)(nil) + } + r := &ValidateVersionKeyspaceRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVersionKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionShardRequest) CloneVT() *ValidateVersionShardRequest { + if m == nil { + return (*ValidateVersionShardRequest)(nil) + } + r := &ValidateVersionShardRequest{ + ClusterId: m.ClusterId, + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVersionShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTExplainRequest) CloneVT() *VTExplainRequest { + if m == nil { + return (*VTExplainRequest)(nil) + } + r := &VTExplainRequest{ + Cluster: m.Cluster, + Keyspace: m.Keyspace, + Sql: m.Sql, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTExplainRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTExplainResponse) CloneVT() *VTExplainResponse { + if m == nil { + return (*VTExplainResponse)(nil) + } + r := &VTExplainResponse{ + Response: m.Response, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTExplainResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Cluster) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -2538,6 +4877,168 @@ func (m *GetShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byt return len(dAtA) - i, nil } +func (m *GetSrvKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SrvKeyspaces) > 0 { + for k := range m.SrvKeyspaces { + v := m.SrvKeyspaces[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -6420,29 +8921,97 @@ func (m *GetSchemasRequest) SizeVT() (n int) { } var l int _ = l - if len(m.ClusterIds) > 0 { - for _, s := range m.ClusterIds { - l = len(s) + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.TableSizeOptions != nil { + l = m.TableSizeOptions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Schemas) > 0 { + for _, e := range m.Schemas { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterIds) > 0 { + for _, s := range m.ClusterIds { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Keyspaces) > 0 { + for _, s := range m.Keyspaces { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.KeyspaceShards) > 0 { + for _, s := range m.KeyspaceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardReplicationPositionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ReplicationPositions) > 0 { + for _, e := range m.ReplicationPositions { + l = e.SizeVT() n += 1 + l + sov(uint64(l)) } } - if m.TableSizeOptions != nil { - l = m.TableSizeOptions.SizeVT() - n += 1 + l + sov(uint64(l)) - } n += len(m.unknownFields) return n } -func (m *GetSchemasResponse) SizeVT() (n int) { +func (m *GetSrvKeyspaceRequest) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Schemas) > 0 { - for _, e := range m.Schemas { - l = e.SizeVT() + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) n += 1 + l + sov(uint64(l)) } } @@ -6450,7 +9019,7 @@ func (m *GetSchemasResponse) SizeVT() (n int) { return n } -func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) { +func (m *GetSrvKeyspacesRequest) SizeVT() (n int) { if m == nil { return 0 } @@ -6462,14 +9031,8 @@ func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } - if len(m.Keyspaces) > 0 { - for _, s := range m.Keyspaces { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.KeyspaceShards) > 0 { - for _, s := range m.KeyspaceShards { + if len(m.Cells) > 0 { + for _, s := range m.Cells { l = len(s) n += 1 + l + sov(uint64(l)) } @@ -6478,16 +9041,23 @@ func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) { return n } -func (m *GetShardReplicationPositionsResponse) SizeVT() (n int) { +func (m *GetSrvKeyspacesResponse) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ReplicationPositions) > 0 { - for _, e := range m.ReplicationPositions { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.SrvKeyspaces) > 0 { + for k, v := range m.SrvKeyspaces { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) } } n += len(m.unknownFields) @@ -12023,7 +14593,227 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if err := m.RequestOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Backups = append(m.Backups, &ClusterBackup{}) + if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfosRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamesOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NamesOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12046,7 +14836,7 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12069,15 +14859,15 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellInfosResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellInfos", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12104,8 +14894,8 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Backups = append(m.Backups, &ClusterBackup{}) - if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.CellInfos = append(m.CellInfos, &ClusterCellInfo{}) + if err := m.CellInfos[len(m.CellInfos)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12131,7 +14921,7 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12154,10 +14944,10 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfosRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12192,58 +14982,6 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { } m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NamesOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NamesOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12266,7 +15004,7 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12289,15 +15027,15 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfosResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12324,8 +15062,8 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CellInfos = append(m.CellInfos, &ClusterCellInfo{}) - if err := m.CellInfos[len(m.CellInfos)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Aliases = append(m.Aliases, &ClusterCellsAliases{}) + if err := m.Aliases[len(m.Aliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12351,7 +15089,7 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12374,17 +15112,68 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetClustersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12394,23 +15183,25 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12434,7 +15225,7 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12457,15 +15248,47 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12492,8 +15315,10 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Aliases = append(m.Aliases, &ClusterCellsAliases{}) - if err := m.Aliases[len(m.Aliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Alias == nil { + m.Alias = &topodata.TabletAlias{} + } + if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12519,7 +15344,7 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12538,16 +15363,48 @@ func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetClustersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetGatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetGatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -12570,7 +15427,7 @@ func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12593,15 +15450,15 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetClustersResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetGatesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetGatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12628,8 +15485,8 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Clusters = append(m.Clusters, &Cluster{}) - if err := m.Clusters[len(m.Clusters)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Gates = append(m.Gates, &VTGate{}) + if err := m.Gates[len(m.Gates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12655,7 +15512,7 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12678,10 +15535,10 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12718,9 +15575,9 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -12730,27 +15587,23 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Alias == nil { - m.Alias = &topodata.TabletAlias{} - } - if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -12774,7 +15627,7 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12797,10 +15650,10 @@ func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetGatesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetGatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12857,7 +15710,7 @@ func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12880,15 +15733,15 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetGatesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetGatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12915,8 +15768,8 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Gates = append(m.Gates, &VTGate{}) - if err := m.Gates[len(m.Gates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12942,7 +15795,7 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12965,10 +15818,10 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13035,6 +15888,74 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13057,7 +15978,7 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13080,10 +16001,10 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13118,6 +16039,42 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13140,7 +16097,7 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13163,15 +16120,15 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13198,8 +16155,8 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, &Keyspace{}) - if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Schemas = append(m.Schemas, &Schema{}) + if err := m.Schemas[len(m.Schemas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13225,7 +16182,7 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13248,15 +16205,15 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardReplicationPositionsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13284,11 +16241,11 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterId = string(dAtA[iNdEx:postIndex]) + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13316,11 +16273,11 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShards", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13348,11 +16305,62 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Table = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.KeyspaceShards = append(m.KeyspaceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetShardReplicationPositionsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPositions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13379,10 +16387,8 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TableSizeOptions == nil { - m.TableSizeOptions = &GetSchemaTableSizeOptions{} - } - if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.ReplicationPositions = append(m.ReplicationPositions, &ClusterShardReplicationPosition{}) + if err := m.ReplicationPositions[len(m.ReplicationPositions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13408,7 +16414,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13431,15 +16437,15 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13467,13 +16473,13 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13483,84 +16489,29 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TableSizeOptions == nil { - m.TableSizeOptions = &GetSchemaTableSizeOptions{} - } - if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSchemasResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13570,25 +16521,23 @@ func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Schemas = append(m.Schemas, &Schema{}) - if err := m.Schemas[len(m.Schemas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -13612,7 +16561,7 @@ func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13635,10 +16584,10 @@ func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardReplicationPositionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13675,39 +16624,7 @@ func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13735,7 +16652,7 @@ func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KeyspaceShards = append(m.KeyspaceShards, string(dAtA[iNdEx:postIndex])) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -13759,7 +16676,7 @@ func (m *GetShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13782,15 +16699,15 @@ func (m *GetShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardReplicationPositionsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationPositions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13817,10 +16734,105 @@ func (m *GetShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ReplicationPositions = append(m.ReplicationPositions, &ClusterShardReplicationPosition{}) - if err := m.ReplicationPositions[len(m.ReplicationPositions)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.SrvKeyspaces == nil { + m.SrvKeyspaces = make(map[string]*vtctldata.GetSrvKeyspacesResponse) + } + var mapkey string + var mapvalue *vtctldata.GetSrvKeyspacesResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &vtctldata.GetSrvKeyspacesResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.SrvKeyspaces[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index fd1ab5e11e6..4f18f56bffb 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtctldata.proto @@ -101,6 +101,185 @@ func (MaterializationIntent) EnumDescriptor() ([]byte, []int) { return file_vtctldata_proto_rawDescGZIP(), []int{0} } +type QueryOrdering int32 + +const ( + QueryOrdering_NONE QueryOrdering = 0 + QueryOrdering_ASCENDING QueryOrdering = 1 + QueryOrdering_DESCENDING QueryOrdering = 2 +) + +// Enum value maps for QueryOrdering. +var ( + QueryOrdering_name = map[int32]string{ + 0: "NONE", + 1: "ASCENDING", + 2: "DESCENDING", + } + QueryOrdering_value = map[string]int32{ + "NONE": 0, + "ASCENDING": 1, + "DESCENDING": 2, + } +) + +func (x QueryOrdering) Enum() *QueryOrdering { + p := new(QueryOrdering) + *p = x + return p +} + +func (x QueryOrdering) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (QueryOrdering) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[1].Descriptor() +} + +func (QueryOrdering) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[1] +} + +func (x QueryOrdering) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use QueryOrdering.Descriptor instead. +func (QueryOrdering) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{1} +} + +type SchemaMigration_Strategy int32 + +const ( + // SchemaMigration_VITESS uses vreplication to run the schema migration. It is + // the default strategy for OnlineDDL requests. + // + // SchemaMigration_VITESS was also formerly called "ONLINE". + SchemaMigration_VITESS SchemaMigration_Strategy = 0 + SchemaMigration_ONLINE SchemaMigration_Strategy = 0 + SchemaMigration_GHOST SchemaMigration_Strategy = 1 + SchemaMigration_PTOSC SchemaMigration_Strategy = 2 + // SchemaMigration_DIRECT runs the migration directly against MySQL (e.g. `ALTER TABLE ...`), + // meaning it is not actually an "online" DDL migration. + SchemaMigration_DIRECT SchemaMigration_Strategy = 3 + // SchemaMigration_MYSQL is a managed migration (queued and executed by the + // scheduler) but runs through a MySQL `ALTER TABLE`. + SchemaMigration_MYSQL SchemaMigration_Strategy = 4 +) + +// Enum value maps for SchemaMigration_Strategy. +var ( + SchemaMigration_Strategy_name = map[int32]string{ + 0: "VITESS", + // Duplicate value: 0: "ONLINE", + 1: "GHOST", + 2: "PTOSC", + 3: "DIRECT", + 4: "MYSQL", + } + SchemaMigration_Strategy_value = map[string]int32{ + "VITESS": 0, + "ONLINE": 0, + "GHOST": 1, + "PTOSC": 2, + "DIRECT": 3, + "MYSQL": 4, + } +) + +func (x SchemaMigration_Strategy) Enum() *SchemaMigration_Strategy { + p := new(SchemaMigration_Strategy) + *p = x + return p +} + +func (x SchemaMigration_Strategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SchemaMigration_Strategy) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[2].Descriptor() +} + +func (SchemaMigration_Strategy) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[2] +} + +func (x SchemaMigration_Strategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SchemaMigration_Strategy.Descriptor instead. +func (SchemaMigration_Strategy) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{5, 0} +} + +type SchemaMigration_Status int32 + +const ( + SchemaMigration_UNKNOWN SchemaMigration_Status = 0 + SchemaMigration_REQUESTED SchemaMigration_Status = 1 + SchemaMigration_CANCELLED SchemaMigration_Status = 2 + SchemaMigration_QUEUED SchemaMigration_Status = 3 + SchemaMigration_READY SchemaMigration_Status = 4 + SchemaMigration_RUNNING SchemaMigration_Status = 5 + SchemaMigration_COMPLETE SchemaMigration_Status = 6 + SchemaMigration_FAILED SchemaMigration_Status = 7 +) + +// Enum value maps for SchemaMigration_Status. +var ( + SchemaMigration_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "REQUESTED", + 2: "CANCELLED", + 3: "QUEUED", + 4: "READY", + 5: "RUNNING", + 6: "COMPLETE", + 7: "FAILED", + } + SchemaMigration_Status_value = map[string]int32{ + "UNKNOWN": 0, + "REQUESTED": 1, + "CANCELLED": 2, + "QUEUED": 3, + "READY": 4, + "RUNNING": 5, + "COMPLETE": 6, + "FAILED": 7, + } +) + +func (x SchemaMigration_Status) Enum() *SchemaMigration_Status { + p := new(SchemaMigration_Status) + *p = x + return p +} + +func (x SchemaMigration_Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SchemaMigration_Status) Descriptor() protoreflect.EnumDescriptor { + return file_vtctldata_proto_enumTypes[3].Descriptor() +} + +func (SchemaMigration_Status) Type() protoreflect.EnumType { + return &file_vtctldata_proto_enumTypes[3] +} + +func (x SchemaMigration_Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SchemaMigration_Status.Descriptor instead. +func (SchemaMigration_Status) EnumDescriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{5, 1} +} + // ExecuteVtctlCommandRequest is the payload for ExecuteVtctlCommand. // timeouts are in nanoseconds. type ExecuteVtctlCommandRequest struct { @@ -304,7 +483,9 @@ type MaterializeSettings struct { // OnDdl specifies the action to be taken when a DDL is encountered. OnDdl string `protobuf:"bytes,13,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. - DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,15,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + AtomicCopy bool `protobuf:"varint,16,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` } func (x *MaterializeSettings) Reset() { @@ -437,6 +618,20 @@ func (x *MaterializeSettings) GetDeferSecondaryKeys() bool { return false } +func (x *MaterializeSettings) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +func (x *MaterializeSettings) GetAtomicCopy() bool { + if x != nil { + return x.AtomicCopy + } + return false +} + type Keyspace struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -492,18 +687,69 @@ func (x *Keyspace) GetKeyspace() *topodata.Keyspace { return nil } -type Shard struct { +// SchemaMigration represents a row in the schema_migrations sidecar table. +type SchemaMigration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` -} - -func (x *Shard) Reset() { - *x = Shard{} + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Schema string `protobuf:"bytes,4,opt,name=schema,proto3" json:"schema,omitempty"` + Table string `protobuf:"bytes,5,opt,name=table,proto3" json:"table,omitempty"` + MigrationStatement string `protobuf:"bytes,6,opt,name=migration_statement,json=migrationStatement,proto3" json:"migration_statement,omitempty"` + Strategy SchemaMigration_Strategy `protobuf:"varint,7,opt,name=strategy,proto3,enum=vtctldata.SchemaMigration_Strategy" json:"strategy,omitempty"` + Options string `protobuf:"bytes,8,opt,name=options,proto3" json:"options,omitempty"` + AddedAt *vttime.Time `protobuf:"bytes,9,opt,name=added_at,json=addedAt,proto3" json:"added_at,omitempty"` + RequestedAt *vttime.Time `protobuf:"bytes,10,opt,name=requested_at,json=requestedAt,proto3" json:"requested_at,omitempty"` + ReadyAt *vttime.Time `protobuf:"bytes,11,opt,name=ready_at,json=readyAt,proto3" json:"ready_at,omitempty"` + StartedAt *vttime.Time `protobuf:"bytes,12,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + LivenessTimestamp *vttime.Time `protobuf:"bytes,13,opt,name=liveness_timestamp,json=livenessTimestamp,proto3" json:"liveness_timestamp,omitempty"` + CompletedAt *vttime.Time `protobuf:"bytes,14,opt,name=completed_at,json=completedAt,proto3" json:"completed_at,omitempty"` + CleanedUpAt *vttime.Time `protobuf:"bytes,15,opt,name=cleaned_up_at,json=cleanedUpAt,proto3" json:"cleaned_up_at,omitempty"` + Status SchemaMigration_Status `protobuf:"varint,16,opt,name=status,proto3,enum=vtctldata.SchemaMigration_Status" json:"status,omitempty"` + LogPath string `protobuf:"bytes,17,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + Artifacts string `protobuf:"bytes,18,opt,name=artifacts,proto3" json:"artifacts,omitempty"` + Retries uint64 `protobuf:"varint,19,opt,name=retries,proto3" json:"retries,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,20,opt,name=tablet,proto3" json:"tablet,omitempty"` + TabletFailure bool `protobuf:"varint,21,opt,name=tablet_failure,json=tabletFailure,proto3" json:"tablet_failure,omitempty"` + Progress float32 `protobuf:"fixed32,22,opt,name=progress,proto3" json:"progress,omitempty"` + MigrationContext string `protobuf:"bytes,23,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + DdlAction string `protobuf:"bytes,24,opt,name=ddl_action,json=ddlAction,proto3" json:"ddl_action,omitempty"` + Message string `protobuf:"bytes,25,opt,name=message,proto3" json:"message,omitempty"` + EtaSeconds int64 `protobuf:"varint,26,opt,name=eta_seconds,json=etaSeconds,proto3" json:"eta_seconds,omitempty"` + RowsCopied uint64 `protobuf:"varint,27,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + TableRows int64 `protobuf:"varint,28,opt,name=table_rows,json=tableRows,proto3" json:"table_rows,omitempty"` + AddedUniqueKeys uint32 `protobuf:"varint,29,opt,name=added_unique_keys,json=addedUniqueKeys,proto3" json:"added_unique_keys,omitempty"` + RemovedUniqueKeys uint32 `protobuf:"varint,30,opt,name=removed_unique_keys,json=removedUniqueKeys,proto3" json:"removed_unique_keys,omitempty"` + LogFile string `protobuf:"bytes,31,opt,name=log_file,json=logFile,proto3" json:"log_file,omitempty"` + ArtifactRetention *vttime.Duration `protobuf:"bytes,32,opt,name=artifact_retention,json=artifactRetention,proto3" json:"artifact_retention,omitempty"` + PostponeCompletion bool `protobuf:"varint,33,opt,name=postpone_completion,json=postponeCompletion,proto3" json:"postpone_completion,omitempty"` + RemovedUniqueKeyNames string `protobuf:"bytes,34,opt,name=removed_unique_key_names,json=removedUniqueKeyNames,proto3" json:"removed_unique_key_names,omitempty"` + DroppedNoDefaultColumnNames string `protobuf:"bytes,35,opt,name=dropped_no_default_column_names,json=droppedNoDefaultColumnNames,proto3" json:"dropped_no_default_column_names,omitempty"` + ExpandedColumnNames string `protobuf:"bytes,36,opt,name=expanded_column_names,json=expandedColumnNames,proto3" json:"expanded_column_names,omitempty"` + RevertibleNotes string `protobuf:"bytes,37,opt,name=revertible_notes,json=revertibleNotes,proto3" json:"revertible_notes,omitempty"` + AllowConcurrent bool `protobuf:"varint,38,opt,name=allow_concurrent,json=allowConcurrent,proto3" json:"allow_concurrent,omitempty"` + RevertedUuid string `protobuf:"bytes,39,opt,name=reverted_uuid,json=revertedUuid,proto3" json:"reverted_uuid,omitempty"` + IsView bool `protobuf:"varint,40,opt,name=is_view,json=isView,proto3" json:"is_view,omitempty"` + ReadyToComplete bool `protobuf:"varint,41,opt,name=ready_to_complete,json=readyToComplete,proto3" json:"ready_to_complete,omitempty"` + VitessLivenessIndicator int64 `protobuf:"varint,42,opt,name=vitess_liveness_indicator,json=vitessLivenessIndicator,proto3" json:"vitess_liveness_indicator,omitempty"` + UserThrottleRatio float32 `protobuf:"fixed32,43,opt,name=user_throttle_ratio,json=userThrottleRatio,proto3" json:"user_throttle_ratio,omitempty"` + SpecialPlan string `protobuf:"bytes,44,opt,name=special_plan,json=specialPlan,proto3" json:"special_plan,omitempty"` + LastThrottledAt *vttime.Time `protobuf:"bytes,45,opt,name=last_throttled_at,json=lastThrottledAt,proto3" json:"last_throttled_at,omitempty"` + ComponentThrottled string `protobuf:"bytes,46,opt,name=component_throttled,json=componentThrottled,proto3" json:"component_throttled,omitempty"` + CancelledAt *vttime.Time `protobuf:"bytes,47,opt,name=cancelled_at,json=cancelledAt,proto3" json:"cancelled_at,omitempty"` + PostponeLaunch bool `protobuf:"varint,48,opt,name=postpone_launch,json=postponeLaunch,proto3" json:"postpone_launch,omitempty"` + Stage string `protobuf:"bytes,49,opt,name=stage,proto3" json:"stage,omitempty"` // enum? + CutoverAttempts uint32 `protobuf:"varint,50,opt,name=cutover_attempts,json=cutoverAttempts,proto3" json:"cutover_attempts,omitempty"` + IsImmediateOperation bool `protobuf:"varint,51,opt,name=is_immediate_operation,json=isImmediateOperation,proto3" json:"is_immediate_operation,omitempty"` + ReviewedAt *vttime.Time `protobuf:"bytes,52,opt,name=reviewed_at,json=reviewedAt,proto3" json:"reviewed_at,omitempty"` + ReadyToCompleteAt *vttime.Time `protobuf:"bytes,53,opt,name=ready_to_complete_at,json=readyToCompleteAt,proto3" json:"ready_to_complete_at,omitempty"` +} + +func (x *SchemaMigration) Reset() { + *x = SchemaMigration{} if protoimpl.UnsafeEnabled { mi := &file_vtctldata_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -511,13 +757,13 @@ func (x *Shard) Reset() { } } -func (x *Shard) String() string { +func (x *SchemaMigration) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Shard) ProtoMessage() {} +func (*SchemaMigration) ProtoMessage() {} -func (x *Shard) ProtoReflect() protoreflect.Message { +func (x *SchemaMigration) ProtoReflect() protoreflect.Message { mi := &file_vtctldata_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -529,406 +775,409 @@ func (x *Shard) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Shard.ProtoReflect.Descriptor instead. -func (*Shard) Descriptor() ([]byte, []int) { +// Deprecated: Use SchemaMigration.ProtoReflect.Descriptor instead. +func (*SchemaMigration) Descriptor() ([]byte, []int) { return file_vtctldata_proto_rawDescGZIP(), []int{5} } -func (x *Shard) GetKeyspace() string { +func (x *SchemaMigration) GetUuid() string { if x != nil { - return x.Keyspace + return x.Uuid } return "" } -func (x *Shard) GetName() string { +func (x *SchemaMigration) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *Shard) GetShard() *topodata.Shard { +func (x *SchemaMigration) GetShard() string { if x != nil { return x.Shard } - return nil + return "" } -// TODO: comment the hell out of this. -type Workflow struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetSchema() string { + if x != nil { + return x.Schema + } + return "" +} - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Source *Workflow_ReplicationLocation `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` - Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` - MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"` - ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - WorkflowType string `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` - WorkflowSubType string `protobuf:"bytes,7,opt,name=workflow_sub_type,json=workflowSubType,proto3" json:"workflow_sub_type,omitempty"` +func (x *SchemaMigration) GetTable() string { + if x != nil { + return x.Table + } + return "" } -func (x *Workflow) Reset() { - *x = Workflow{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetMigrationStatement() string { + if x != nil { + return x.MigrationStatement } + return "" } -func (x *Workflow) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetStrategy() SchemaMigration_Strategy { + if x != nil { + return x.Strategy + } + return SchemaMigration_VITESS } -func (*Workflow) ProtoMessage() {} +func (x *SchemaMigration) GetOptions() string { + if x != nil { + return x.Options + } + return "" +} -func (x *Workflow) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetAddedAt() *vttime.Time { + if x != nil { + return x.AddedAt } - return mi.MessageOf(x) + return nil } -// Deprecated: Use Workflow.ProtoReflect.Descriptor instead. -func (*Workflow) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6} +func (x *SchemaMigration) GetRequestedAt() *vttime.Time { + if x != nil { + return x.RequestedAt + } + return nil } -func (x *Workflow) GetName() string { +func (x *SchemaMigration) GetReadyAt() *vttime.Time { if x != nil { - return x.Name + return x.ReadyAt } - return "" + return nil } -func (x *Workflow) GetSource() *Workflow_ReplicationLocation { +func (x *SchemaMigration) GetStartedAt() *vttime.Time { if x != nil { - return x.Source + return x.StartedAt } return nil } -func (x *Workflow) GetTarget() *Workflow_ReplicationLocation { +func (x *SchemaMigration) GetLivenessTimestamp() *vttime.Time { if x != nil { - return x.Target + return x.LivenessTimestamp } return nil } -func (x *Workflow) GetMaxVReplicationLag() int64 { +func (x *SchemaMigration) GetCompletedAt() *vttime.Time { if x != nil { - return x.MaxVReplicationLag + return x.CompletedAt } - return 0 + return nil } -func (x *Workflow) GetShardStreams() map[string]*Workflow_ShardStream { +func (x *SchemaMigration) GetCleanedUpAt() *vttime.Time { if x != nil { - return x.ShardStreams + return x.CleanedUpAt } return nil } -func (x *Workflow) GetWorkflowType() string { +func (x *SchemaMigration) GetStatus() SchemaMigration_Status { if x != nil { - return x.WorkflowType + return x.Status } - return "" + return SchemaMigration_UNKNOWN } -func (x *Workflow) GetWorkflowSubType() string { +func (x *SchemaMigration) GetLogPath() string { if x != nil { - return x.WorkflowSubType + return x.LogPath } return "" } -type AddCellInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +func (x *SchemaMigration) GetArtifacts() string { + if x != nil { + return x.Artifacts + } + return "" } -func (x *AddCellInfoRequest) Reset() { - *x = AddCellInfoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetRetries() uint64 { + if x != nil { + return x.Retries } + return 0 } -func (x *AddCellInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil } -func (*AddCellInfoRequest) ProtoMessage() {} - -func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetTabletFailure() bool { + if x != nil { + return x.TabletFailure } - return mi.MessageOf(x) + return false } -// Deprecated: Use AddCellInfoRequest.ProtoReflect.Descriptor instead. -func (*AddCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7} +func (x *SchemaMigration) GetProgress() float32 { + if x != nil { + return x.Progress + } + return 0 } -func (x *AddCellInfoRequest) GetName() string { +func (x *SchemaMigration) GetMigrationContext() string { if x != nil { - return x.Name + return x.MigrationContext } return "" } -func (x *AddCellInfoRequest) GetCellInfo() *topodata.CellInfo { +func (x *SchemaMigration) GetDdlAction() string { if x != nil { - return x.CellInfo + return x.DdlAction } - return nil + return "" } -type AddCellInfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetMessage() string { + if x != nil { + return x.Message + } + return "" } -func (x *AddCellInfoResponse) Reset() { - *x = AddCellInfoResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetEtaSeconds() int64 { + if x != nil { + return x.EtaSeconds } + return 0 } -func (x *AddCellInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetRowsCopied() uint64 { + if x != nil { + return x.RowsCopied + } + return 0 } -func (*AddCellInfoResponse) ProtoMessage() {} +func (x *SchemaMigration) GetTableRows() int64 { + if x != nil { + return x.TableRows + } + return 0 +} -func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetAddedUniqueKeys() uint32 { + if x != nil { + return x.AddedUniqueKeys } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use AddCellInfoResponse.ProtoReflect.Descriptor instead. -func (*AddCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{8} +func (x *SchemaMigration) GetRemovedUniqueKeys() uint32 { + if x != nil { + return x.RemovedUniqueKeys + } + return 0 } -type AddCellsAliasRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +func (x *SchemaMigration) GetLogFile() string { + if x != nil { + return x.LogFile + } + return "" } -func (x *AddCellsAliasRequest) Reset() { - *x = AddCellsAliasRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetArtifactRetention() *vttime.Duration { + if x != nil { + return x.ArtifactRetention } + return nil } -func (x *AddCellsAliasRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetPostponeCompletion() bool { + if x != nil { + return x.PostponeCompletion + } + return false } -func (*AddCellsAliasRequest) ProtoMessage() {} - -func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetRemovedUniqueKeyNames() string { + if x != nil { + return x.RemovedUniqueKeyNames } - return mi.MessageOf(x) + return "" } -// Deprecated: Use AddCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*AddCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{9} +func (x *SchemaMigration) GetDroppedNoDefaultColumnNames() string { + if x != nil { + return x.DroppedNoDefaultColumnNames + } + return "" } -func (x *AddCellsAliasRequest) GetName() string { +func (x *SchemaMigration) GetExpandedColumnNames() string { if x != nil { - return x.Name + return x.ExpandedColumnNames } return "" } -func (x *AddCellsAliasRequest) GetCells() []string { +func (x *SchemaMigration) GetRevertibleNotes() string { if x != nil { - return x.Cells + return x.RevertibleNotes } - return nil + return "" } -type AddCellsAliasResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetAllowConcurrent() bool { + if x != nil { + return x.AllowConcurrent + } + return false } -func (x *AddCellsAliasResponse) Reset() { - *x = AddCellsAliasResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetRevertedUuid() string { + if x != nil { + return x.RevertedUuid } + return "" } -func (x *AddCellsAliasResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetIsView() bool { + if x != nil { + return x.IsView + } + return false } -func (*AddCellsAliasResponse) ProtoMessage() {} - -func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetReadyToComplete() bool { + if x != nil { + return x.ReadyToComplete } - return mi.MessageOf(x) + return false } -// Deprecated: Use AddCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*AddCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{10} +func (x *SchemaMigration) GetVitessLivenessIndicator() int64 { + if x != nil { + return x.VitessLivenessIndicator + } + return 0 } -type ApplyRoutingRulesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SchemaMigration) GetUserThrottleRatio() float32 { + if x != nil { + return x.UserThrottleRatio + } + return 0 +} - RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` - // SkipRebuild, if set, will cause ApplyRoutingRules to skip rebuilding the - // SrvVSchema objects in each cell in RebuildCells. - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not - // provided the SrvVSchema will be rebuilt in every cell in the topology. - // - // Ignored if SkipRebuild is set. - RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` +func (x *SchemaMigration) GetSpecialPlan() string { + if x != nil { + return x.SpecialPlan + } + return "" } -func (x *ApplyRoutingRulesRequest) Reset() { - *x = ApplyRoutingRulesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SchemaMigration) GetLastThrottledAt() *vttime.Time { + if x != nil { + return x.LastThrottledAt } + return nil } -func (x *ApplyRoutingRulesRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SchemaMigration) GetComponentThrottled() string { + if x != nil { + return x.ComponentThrottled + } + return "" } -func (*ApplyRoutingRulesRequest) ProtoMessage() {} +func (x *SchemaMigration) GetCancelledAt() *vttime.Time { + if x != nil { + return x.CancelledAt + } + return nil +} -func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SchemaMigration) GetPostponeLaunch() bool { + if x != nil { + return x.PostponeLaunch } - return mi.MessageOf(x) + return false } -// Deprecated: Use ApplyRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*ApplyRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{11} +func (x *SchemaMigration) GetStage() string { + if x != nil { + return x.Stage + } + return "" } -func (x *ApplyRoutingRulesRequest) GetRoutingRules() *vschema.RoutingRules { +func (x *SchemaMigration) GetCutoverAttempts() uint32 { if x != nil { - return x.RoutingRules + return x.CutoverAttempts } - return nil + return 0 } -func (x *ApplyRoutingRulesRequest) GetSkipRebuild() bool { +func (x *SchemaMigration) GetIsImmediateOperation() bool { if x != nil { - return x.SkipRebuild + return x.IsImmediateOperation } return false } -func (x *ApplyRoutingRulesRequest) GetRebuildCells() []string { +func (x *SchemaMigration) GetReviewedAt() *vttime.Time { if x != nil { - return x.RebuildCells + return x.ReviewedAt } return nil } -type ApplyRoutingRulesResponse struct { +func (x *SchemaMigration) GetReadyToCompleteAt() *vttime.Time { + if x != nil { + return x.ReadyToCompleteAt + } + return nil +} + +type Shard struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Shard *topodata.Shard `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *ApplyRoutingRulesResponse) Reset() { - *x = ApplyRoutingRulesResponse{} +func (x *Shard) Reset() { + *x = Shard{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[12] + mi := &file_vtctldata_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyRoutingRulesResponse) String() string { +func (x *Shard) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyRoutingRulesResponse) ProtoMessage() {} +func (*Shard) ProtoMessage() {} -func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[12] +func (x *Shard) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -939,44 +1188,64 @@ func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{12} +// Deprecated: Use Shard.ProtoReflect.Descriptor instead. +func (*Shard) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{6} } -type ApplyShardRoutingRulesRequest struct { +func (x *Shard) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *Shard) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Shard) GetShard() *topodata.Shard { + if x != nil { + return x.Shard + } + return nil +} + +// TODO: comment the hell out of this. +type Workflow struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` - // SkipRebuild, if set, will cause ApplyShardRoutingRules to skip rebuilding the - // SrvVSchema objects in each cell in RebuildCells. - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not - // provided the SrvVSchema will be rebuilt in every cell in the topology. - // - // Ignored if SkipRebuild is set. - RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Source *Workflow_ReplicationLocation `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + Target *Workflow_ReplicationLocation `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + MaxVReplicationLag int64 `protobuf:"varint,4,opt,name=max_v_replication_lag,json=maxVReplicationLag,proto3" json:"max_v_replication_lag,omitempty"` + ShardStreams map[string]*Workflow_ShardStream `protobuf:"bytes,5,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + WorkflowType string `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + WorkflowSubType string `protobuf:"bytes,7,opt,name=workflow_sub_type,json=workflowSubType,proto3" json:"workflow_sub_type,omitempty"` } -func (x *ApplyShardRoutingRulesRequest) Reset() { - *x = ApplyShardRoutingRulesRequest{} +func (x *Workflow) Reset() { + *x = Workflow{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyShardRoutingRulesRequest) String() string { +func (x *Workflow) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyShardRoutingRulesRequest) ProtoMessage() {} +func (*Workflow) ProtoMessage() {} -func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[13] +func (x *Workflow) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -987,115 +1256,86 @@ func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyShardRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*ApplyShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{13} -} +// Deprecated: Use Workflow.ProtoReflect.Descriptor instead. +func (*Workflow) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7} +} -func (x *ApplyShardRoutingRulesRequest) GetShardRoutingRules() *vschema.ShardRoutingRules { +func (x *Workflow) GetName() string { if x != nil { - return x.ShardRoutingRules + return x.Name } - return nil + return "" } -func (x *ApplyShardRoutingRulesRequest) GetSkipRebuild() bool { +func (x *Workflow) GetSource() *Workflow_ReplicationLocation { if x != nil { - return x.SkipRebuild + return x.Source } - return false + return nil } -func (x *ApplyShardRoutingRulesRequest) GetRebuildCells() []string { +func (x *Workflow) GetTarget() *Workflow_ReplicationLocation { if x != nil { - return x.RebuildCells + return x.Target } return nil } -type ApplyShardRoutingRulesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *ApplyShardRoutingRulesResponse) Reset() { - *x = ApplyShardRoutingRulesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *Workflow) GetMaxVReplicationLag() int64 { + if x != nil { + return x.MaxVReplicationLag } + return 0 } -func (x *ApplyShardRoutingRulesResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *Workflow) GetShardStreams() map[string]*Workflow_ShardStream { + if x != nil { + return x.ShardStreams + } + return nil } -func (*ApplyShardRoutingRulesResponse) ProtoMessage() {} - -func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *Workflow) GetWorkflowType() string { + if x != nil { + return x.WorkflowType } - return mi.MessageOf(x) + return "" } -// Deprecated: Use ApplyShardRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*ApplyShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{14} +func (x *Workflow) GetWorkflowSubType() string { + if x != nil { + return x.WorkflowSubType + } + return "" } -type ApplySchemaRequest struct { +type AddCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Allow large schema changes which incur a longer unavailability of the database. - AllowLongUnavailability bool `protobuf:"varint,2,opt,name=allow_long_unavailability,json=allowLongUnavailability,proto3" json:"allow_long_unavailability,omitempty"` - // SQL commands to run. - Sql []string `protobuf:"bytes,3,rep,name=sql,proto3" json:"sql,omitempty"` - // Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") - DdlStrategy string `protobuf:"bytes,4,opt,name=ddl_strategy,json=ddlStrategy,proto3" json:"ddl_strategy,omitempty"` - // Optional: explicit UUIDs for migration. - // If given, must match number of DDL changes - UuidList []string `protobuf:"bytes,5,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` - // For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. - // By default a unique context is auto-generated by Vitess - MigrationContext string `protobuf:"bytes,6,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in reparenting. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,7,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` - // Skip pre-apply schema checks, and directly forward schema change query to shards - SkipPreflight bool `protobuf:"varint,8,opt,name=skip_preflight,json=skipPreflight,proto3" json:"skip_preflight,omitempty"` - // caller_id identifies the caller. This is the effective caller ID, - // set by the application to further identify the caller. - CallerId *vtrpc.CallerID `protobuf:"bytes,9,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *ApplySchemaRequest) Reset() { - *x = ApplySchemaRequest{} +func (x *AddCellInfoRequest) Reset() { + *x = AddCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplySchemaRequest) String() string { +func (x *AddCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplySchemaRequest) ProtoMessage() {} +func (*AddCellInfoRequest) ProtoMessage() {} -func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[15] +func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1106,99 +1346,89 @@ func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. -func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{15} +// Deprecated: Use AddCellInfoRequest.ProtoReflect.Descriptor instead. +func (*AddCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{8} } -func (x *ApplySchemaRequest) GetKeyspace() string { +func (x *AddCellInfoRequest) GetName() string { if x != nil { - return x.Keyspace + return x.Name } return "" } -func (x *ApplySchemaRequest) GetAllowLongUnavailability() bool { - if x != nil { - return x.AllowLongUnavailability - } - return false -} - -func (x *ApplySchemaRequest) GetSql() []string { +func (x *AddCellInfoRequest) GetCellInfo() *topodata.CellInfo { if x != nil { - return x.Sql + return x.CellInfo } return nil } -func (x *ApplySchemaRequest) GetDdlStrategy() string { - if x != nil { - return x.DdlStrategy - } - return "" +type AddCellInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *ApplySchemaRequest) GetUuidList() []string { - if x != nil { - return x.UuidList +func (x *AddCellInfoResponse) Reset() { + *x = AddCellInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *ApplySchemaRequest) GetMigrationContext() string { - if x != nil { - return x.MigrationContext - } - return "" +func (x *AddCellInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *ApplySchemaRequest) GetWaitReplicasTimeout() *vttime.Duration { - if x != nil { - return x.WaitReplicasTimeout - } - return nil -} +func (*AddCellInfoResponse) ProtoMessage() {} -func (x *ApplySchemaRequest) GetSkipPreflight() bool { - if x != nil { - return x.SkipPreflight +func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *ApplySchemaRequest) GetCallerId() *vtrpc.CallerID { - if x != nil { - return x.CallerId - } - return nil +// Deprecated: Use AddCellInfoResponse.ProtoReflect.Descriptor instead. +func (*AddCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{9} } -type ApplySchemaResponse struct { +type AddCellsAliasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - UuidList []string `protobuf:"bytes,1,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *ApplySchemaResponse) Reset() { - *x = ApplySchemaResponse{} +func (x *AddCellsAliasRequest) Reset() { + *x = AddCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplySchemaResponse) String() string { +func (x *AddCellsAliasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplySchemaResponse) ProtoMessage() {} +func (*AddCellsAliasRequest) ProtoMessage() {} -func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[16] +func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1209,48 +1439,48 @@ func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplySchemaResponse.ProtoReflect.Descriptor instead. -func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{16} +// Deprecated: Use AddCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*AddCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{10} } -func (x *ApplySchemaResponse) GetUuidList() []string { +func (x *AddCellsAliasRequest) GetName() string { if x != nil { - return x.UuidList + return x.Name + } + return "" +} + +func (x *AddCellsAliasRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type ApplyVSchemaRequest struct { +type AddCellsAliasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` - Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` - VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` - Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` } -func (x *ApplyVSchemaRequest) Reset() { - *x = ApplyVSchemaRequest{} +func (x *AddCellsAliasResponse) Reset() { + *x = AddCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyVSchemaRequest) String() string { +func (x *AddCellsAliasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyVSchemaRequest) ProtoMessage() {} +func (*AddCellsAliasResponse) ProtoMessage() {} -func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[17] +func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1261,78 +1491,103 @@ func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. -func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{17} +// Deprecated: Use AddCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*AddCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{11} } -func (x *ApplyVSchemaRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} +type ApplyRoutingRulesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *ApplyVSchemaRequest) GetSkipRebuild() bool { - if x != nil { - return x.SkipRebuild - } - return false -} + RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` + // SkipRebuild, if set, will cause ApplyRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. + // + // Ignored if SkipRebuild is set. + RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` +} -func (x *ApplyVSchemaRequest) GetDryRun() bool { - if x != nil { - return x.DryRun +func (x *ApplyRoutingRulesRequest) Reset() { + *x = ApplyRoutingRulesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return false } -func (x *ApplyVSchemaRequest) GetCells() []string { +func (x *ApplyRoutingRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyRoutingRulesRequest) ProtoMessage() {} + +func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*ApplyRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{12} +} + +func (x *ApplyRoutingRulesRequest) GetRoutingRules() *vschema.RoutingRules { if x != nil { - return x.Cells + return x.RoutingRules } return nil } -func (x *ApplyVSchemaRequest) GetVSchema() *vschema.Keyspace { +func (x *ApplyRoutingRulesRequest) GetSkipRebuild() bool { if x != nil { - return x.VSchema + return x.SkipRebuild } - return nil + return false } -func (x *ApplyVSchemaRequest) GetSql() string { +func (x *ApplyRoutingRulesRequest) GetRebuildCells() []string { if x != nil { - return x.Sql + return x.RebuildCells } - return "" + return nil } -type ApplyVSchemaResponse struct { +type ApplyRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *ApplyVSchemaResponse) Reset() { - *x = ApplyVSchemaResponse{} +func (x *ApplyRoutingRulesResponse) Reset() { + *x = ApplyRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ApplyVSchemaResponse) String() string { +func (x *ApplyRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyVSchemaResponse) ProtoMessage() {} +func (*ApplyRoutingRulesResponse) ProtoMessage() {} -func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[18] +func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1343,54 +1598,44 @@ func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. -func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{18} -} - -func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { - if x != nil { - return x.VSchema - } - return nil +// Deprecated: Use ApplyRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{13} } -type BackupRequest struct { +type ApplyShardRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // AllowPrimary allows the backup to proceed if TabletAlias is a PRIMARY. + ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + // SkipRebuild, if set, will cause ApplyShardRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. // - // WARNING: If using the builtin backup engine, this will shutdown mysqld on - // the primary for the duration of the backup, and no writes will be possible. - AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` - // Concurrency specifies the number of compression/checksum jobs to run - // simultaneously. - Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` - // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty - // then the backup becomes incremental and applies as of given position. - IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // Ignored if SkipRebuild is set. + RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` } -func (x *BackupRequest) Reset() { - *x = BackupRequest{} +func (x *ApplyShardRoutingRulesRequest) Reset() { + *x = ApplyShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupRequest) String() string { +func (x *ApplyShardRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupRequest) ProtoMessage() {} +func (*ApplyShardRoutingRulesRequest) ProtoMessage() {} -func (x *BackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[19] +func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1401,68 +1646,55 @@ func (x *BackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. -func (*BackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{19} +// Deprecated: Use ApplyShardRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*ApplyShardRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{14} } -func (x *BackupRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ApplyShardRoutingRulesRequest) GetShardRoutingRules() *vschema.ShardRoutingRules { if x != nil { - return x.TabletAlias + return x.ShardRoutingRules } return nil } -func (x *BackupRequest) GetAllowPrimary() bool { +func (x *ApplyShardRoutingRulesRequest) GetSkipRebuild() bool { if x != nil { - return x.AllowPrimary + return x.SkipRebuild } return false } -func (x *BackupRequest) GetConcurrency() uint64 { - if x != nil { - return x.Concurrency - } - return 0 -} - -func (x *BackupRequest) GetIncrementalFromPos() string { +func (x *ApplyShardRoutingRulesRequest) GetRebuildCells() []string { if x != nil { - return x.IncrementalFromPos + return x.RebuildCells } - return "" + return nil } -type BackupResponse struct { +type ApplyShardRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // TabletAlias is the alias being used for the backup. - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *BackupResponse) Reset() { - *x = BackupResponse{} +func (x *ApplyShardRoutingRulesResponse) Reset() { + *x = ApplyShardRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupResponse) String() string { +func (x *ApplyShardRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupResponse) ProtoMessage() {} +func (*ApplyShardRoutingRulesResponse) ProtoMessage() {} -func (x *BackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[20] +func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1473,71 +1705,56 @@ func (x *BackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. -func (*BackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{20} -} - -func (x *BackupResponse) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil -} - -func (x *BackupResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *BackupResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *BackupResponse) GetEvent() *logutil.Event { - if x != nil { - return x.Event - } - return nil +// Deprecated: Use ApplyShardRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*ApplyShardRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{15} } -type BackupShardRequest struct { +type ApplySchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // AllowPrimary allows the backup to occur on a PRIMARY tablet. See - // BackupRequest.AllowPrimary for warnings and caveats. - AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` - // Concurrency specifies the number of compression/checksum jobs to run - // simultaneously. - Concurrency uint64 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // SQL commands to run. + Sql []string `protobuf:"bytes,3,rep,name=sql,proto3" json:"sql,omitempty"` + // Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") + DdlStrategy string `protobuf:"bytes,4,opt,name=ddl_strategy,json=ddlStrategy,proto3" json:"ddl_strategy,omitempty"` + // Optional: explicit UUIDs for migration. + // If given, must match number of DDL changes + UuidList []string `protobuf:"bytes,5,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + // For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. + // By default a unique context is auto-generated by Vitess + MigrationContext string `protobuf:"bytes,6,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,7,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // Skip pre-apply schema checks, and directly forward schema change query to shards + SkipPreflight bool `protobuf:"varint,8,opt,name=skip_preflight,json=skipPreflight,proto3" json:"skip_preflight,omitempty"` + // caller_id identifies the caller. This is the effective caller ID, + // set by the application to further identify the caller. + CallerId *vtrpc.CallerID `protobuf:"bytes,9,opt,name=caller_id,json=callerId,proto3" json:"caller_id,omitempty"` + // BatchSize indicates how many queries to apply together + BatchSize int64 `protobuf:"varint,10,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` } -func (x *BackupShardRequest) Reset() { - *x = BackupShardRequest{} +func (x *ApplySchemaRequest) Reset() { + *x = ApplySchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *BackupShardRequest) String() string { +func (x *ApplySchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupShardRequest) ProtoMessage() {} +func (*ApplySchemaRequest) ProtoMessage() {} -func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[21] +func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1548,129 +1765,100 @@ func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupShardRequest.ProtoReflect.Descriptor instead. -func (*BackupShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{21} +// Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{16} } -func (x *BackupShardRequest) GetKeyspace() string { +func (x *ApplySchemaRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *BackupShardRequest) GetShard() string { +func (x *ApplySchemaRequest) GetSql() []string { if x != nil { - return x.Shard + return x.Sql } - return "" + return nil } -func (x *BackupShardRequest) GetAllowPrimary() bool { +func (x *ApplySchemaRequest) GetDdlStrategy() string { if x != nil { - return x.AllowPrimary + return x.DdlStrategy } - return false + return "" } -func (x *BackupShardRequest) GetConcurrency() uint64 { +func (x *ApplySchemaRequest) GetUuidList() []string { if x != nil { - return x.Concurrency + return x.UuidList } - return 0 -} - -type ChangeTabletTypeRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - DbType topodata.TabletType `protobuf:"varint,2,opt,name=db_type,json=dbType,proto3,enum=topodata.TabletType" json:"db_type,omitempty"` - DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + return nil } -func (x *ChangeTabletTypeRequest) Reset() { - *x = ChangeTabletTypeRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ApplySchemaRequest) GetMigrationContext() string { + if x != nil { + return x.MigrationContext } + return "" } -func (x *ChangeTabletTypeRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ChangeTabletTypeRequest) ProtoMessage() {} - -func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ApplySchemaRequest) GetWaitReplicasTimeout() *vttime.Duration { + if x != nil { + return x.WaitReplicasTimeout } - return mi.MessageOf(x) -} - -// Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. -func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{22} + return nil } -func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ApplySchemaRequest) GetSkipPreflight() bool { if x != nil { - return x.TabletAlias + return x.SkipPreflight } - return nil + return false } -func (x *ChangeTabletTypeRequest) GetDbType() topodata.TabletType { +func (x *ApplySchemaRequest) GetCallerId() *vtrpc.CallerID { if x != nil { - return x.DbType + return x.CallerId } - return topodata.TabletType(0) + return nil } -func (x *ChangeTabletTypeRequest) GetDryRun() bool { +func (x *ApplySchemaRequest) GetBatchSize() int64 { if x != nil { - return x.DryRun + return x.BatchSize } - return false + return 0 } -type ChangeTabletTypeResponse struct { +type ApplySchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BeforeTablet *topodata.Tablet `protobuf:"bytes,1,opt,name=before_tablet,json=beforeTablet,proto3" json:"before_tablet,omitempty"` - AfterTablet *topodata.Tablet `protobuf:"bytes,2,opt,name=after_tablet,json=afterTablet,proto3" json:"after_tablet,omitempty"` - WasDryRun bool `protobuf:"varint,3,opt,name=was_dry_run,json=wasDryRun,proto3" json:"was_dry_run,omitempty"` + UuidList []string `protobuf:"bytes,1,rep,name=uuid_list,json=uuidList,proto3" json:"uuid_list,omitempty"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,2,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *ChangeTabletTypeResponse) Reset() { - *x = ChangeTabletTypeResponse{} +func (x *ApplySchemaResponse) Reset() { + *x = ApplySchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ChangeTabletTypeResponse) String() string { +func (x *ApplySchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ChangeTabletTypeResponse) ProtoMessage() {} +func (*ApplySchemaResponse) ProtoMessage() {} -func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[23] +func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1681,76 +1869,55 @@ func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. -func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{23} +// Deprecated: Use ApplySchemaResponse.ProtoReflect.Descriptor instead. +func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{17} } -func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { +func (x *ApplySchemaResponse) GetUuidList() []string { if x != nil { - return x.BeforeTablet + return x.UuidList } return nil } -func (x *ChangeTabletTypeResponse) GetAfterTablet() *topodata.Tablet { +func (x *ApplySchemaResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.AfterTablet + return x.RowsAffectedByShard } return nil } -func (x *ChangeTabletTypeResponse) GetWasDryRun() bool { - if x != nil { - return x.WasDryRun - } - return false -} - -type CreateKeyspaceRequest struct { +type ApplyVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Name is the name of the keyspace. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Force proceeds with the request even if the keyspace already exists. - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` - // AllowEmptyVSchema allows a keyspace to be created with no vschema. - AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` - // ServedFroms specifies a set of db_type:keyspace pairs used to serve - // traffic for the keyspace. - ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` - // Type is the type of the keyspace to create. - Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` - // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is - // required to create a SNAPSHOT keyspace. - BaseKeyspace string `protobuf:"bytes,8,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` - // SnapshotTime specifies the snapshot time for this keyspace. It is required - // to create a SNAPSHOT keyspace. - SnapshotTime *vttime.Time `protobuf:"bytes,9,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` - // DurabilityPolicy is the durability policy to be - // used for this keyspace. - DurabilityPolicy string `protobuf:"bytes,10,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` } -func (x *CreateKeyspaceRequest) Reset() { - *x = CreateKeyspaceRequest{} +func (x *ApplyVSchemaRequest) Reset() { + *x = ApplyVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateKeyspaceRequest) String() string { +func (x *ApplyVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateKeyspaceRequest) ProtoMessage() {} +func (*ApplyVSchemaRequest) ProtoMessage() {} -func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[24] +func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1761,93 +1928,78 @@ func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{24} +// Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{18} } -func (x *CreateKeyspaceRequest) GetName() string { +func (x *ApplyVSchemaRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *CreateKeyspaceRequest) GetForce() bool { +func (x *ApplyVSchemaRequest) GetSkipRebuild() bool { if x != nil { - return x.Force + return x.SkipRebuild } return false } -func (x *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { +func (x *ApplyVSchemaRequest) GetDryRun() bool { if x != nil { - return x.AllowEmptyVSchema + return x.DryRun } return false } -func (x *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { +func (x *ApplyVSchemaRequest) GetCells() []string { if x != nil { - return x.ServedFroms + return x.Cells } return nil } -func (x *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { - if x != nil { - return x.Type - } - return topodata.KeyspaceType(0) -} - -func (x *CreateKeyspaceRequest) GetBaseKeyspace() string { - if x != nil { - return x.BaseKeyspace - } - return "" -} - -func (x *CreateKeyspaceRequest) GetSnapshotTime() *vttime.Time { +func (x *ApplyVSchemaRequest) GetVSchema() *vschema.Keyspace { if x != nil { - return x.SnapshotTime + return x.VSchema } return nil } -func (x *CreateKeyspaceRequest) GetDurabilityPolicy() string { +func (x *ApplyVSchemaRequest) GetSql() string { if x != nil { - return x.DurabilityPolicy + return x.Sql } return "" } -type CreateKeyspaceResponse struct { +type ApplyVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the newly-created keyspace. - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *CreateKeyspaceResponse) Reset() { - *x = CreateKeyspaceResponse{} +func (x *ApplyVSchemaResponse) Reset() { + *x = ApplyVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateKeyspaceResponse) String() string { +func (x *ApplyVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateKeyspaceResponse) ProtoMessage() {} +func (*ApplyVSchemaResponse) ProtoMessage() {} -func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[25] +func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1858,52 +2010,57 @@ func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{25} +// Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{19} } -func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { +func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { if x != nil { - return x.Keyspace + return x.VSchema } return nil } -type CreateShardRequest struct { +type BackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to create the shard in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // ShardName is the name of the shard to create. E.g. "-" or "-80". - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` - // Force treats an attempt to create a shard that already exists as a - // non-error. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` - // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it - // doesn't already exist. - IncludeParent bool `protobuf:"varint,4,opt,name=include_parent,json=includeParent,proto3" json:"include_parent,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // AllowPrimary allows the backup to proceed if TabletAlias is a PRIMARY. + // + // WARNING: If using the builtin backup engine, this will shutdown mysqld on + // the primary for the duration of the backup, and no writes will be possible. + AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + // Concurrency specifies the number of compression/checksum jobs to run + // simultaneously. + Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty + // then the backup becomes incremental and applies as of given position. + IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,5,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` } -func (x *CreateShardRequest) Reset() { - *x = CreateShardRequest{} +func (x *BackupRequest) Reset() { + *x = BackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateShardRequest) String() string { +func (x *BackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateShardRequest) ProtoMessage() {} +func (*BackupRequest) ProtoMessage() {} -func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[26] +func (x *BackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1914,71 +2071,75 @@ func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. -func (*CreateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{26} +// Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. +func (*BackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{20} } -func (x *CreateShardRequest) GetKeyspace() string { +func (x *BackupRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *CreateShardRequest) GetShardName() string { +func (x *BackupRequest) GetAllowPrimary() bool { if x != nil { - return x.ShardName + return x.AllowPrimary } - return "" + return false } -func (x *CreateShardRequest) GetForce() bool { +func (x *BackupRequest) GetConcurrency() uint64 { if x != nil { - return x.Force + return x.Concurrency } - return false + return 0 } -func (x *CreateShardRequest) GetIncludeParent() bool { +func (x *BackupRequest) GetIncrementalFromPos() string { if x != nil { - return x.IncludeParent + return x.IncrementalFromPos + } + return "" +} + +func (x *BackupRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe } return false } -type CreateShardResponse struct { +type BackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the created keyspace. It is set only if IncludeParent was - // specified in the request and the parent keyspace needed to be created. - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the newly-created shard object. - Shard *Shard `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // ShardAlreadyExists is set if Force was specified in the request and the - // shard already existed. - ShardAlreadyExists bool `protobuf:"varint,3,opt,name=shard_already_exists,json=shardAlreadyExists,proto3" json:"shard_already_exists,omitempty"` + // TabletAlias is the alias being used for the backup. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *CreateShardResponse) Reset() { - *x = CreateShardResponse{} +func (x *BackupResponse) Reset() { + *x = BackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateShardResponse) String() string { +func (x *BackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateShardResponse) ProtoMessage() {} +func (*BackupResponse) ProtoMessage() {} -func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[27] +func (x *BackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1989,58 +2150,77 @@ func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. -func (*CreateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{27} +// Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. +func (*BackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{21} } -func (x *CreateShardResponse) GetKeyspace() *Keyspace { +func (x *BackupResponse) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } return nil } -func (x *CreateShardResponse) GetShard() *Shard { +func (x *BackupResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *BackupResponse) GetShard() string { if x != nil { return x.Shard } - return nil + return "" } -func (x *CreateShardResponse) GetShardAlreadyExists() bool { +func (x *BackupResponse) GetEvent() *logutil.Event { if x != nil { - return x.ShardAlreadyExists + return x.Event } - return false + return nil } -type DeleteCellInfoRequest struct { +type BackupShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // AllowPrimary allows the backup to occur on a PRIMARY tablet. See + // BackupRequest.AllowPrimary for warnings and caveats. + AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + // Concurrency specifies the number of compression/checksum jobs to run + // simultaneously. + Concurrency uint64 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 + // so that it's a backup that can be used for an upgrade. + UpgradeSafe bool `protobuf:"varint,5,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` + // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty + // then the backup becomes incremental and applies as of given position. + IncrementalFromPos string `protobuf:"bytes,6,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` } -func (x *DeleteCellInfoRequest) Reset() { - *x = DeleteCellInfoRequest{} +func (x *BackupShardRequest) Reset() { + *x = BackupShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellInfoRequest) String() string { +func (x *BackupShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellInfoRequest) ProtoMessage() {} +func (*BackupShardRequest) ProtoMessage() {} -func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[28] +func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2051,48 +2231,79 @@ func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. -func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{28} +// Deprecated: Use BackupShardRequest.ProtoReflect.Descriptor instead. +func (*BackupShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{22} } -func (x *DeleteCellInfoRequest) GetName() string { +func (x *BackupShardRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *DeleteCellInfoRequest) GetForce() bool { +func (x *BackupShardRequest) GetShard() string { if x != nil { - return x.Force + return x.Shard + } + return "" +} + +func (x *BackupShardRequest) GetAllowPrimary() bool { + if x != nil { + return x.AllowPrimary } return false } -type DeleteCellInfoResponse struct { +func (x *BackupShardRequest) GetConcurrency() uint64 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *BackupShardRequest) GetUpgradeSafe() bool { + if x != nil { + return x.UpgradeSafe + } + return false +} + +func (x *BackupShardRequest) GetIncrementalFromPos() string { + if x != nil { + return x.IncrementalFromPos + } + return "" +} + +type CancelSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteCellInfoResponse) Reset() { - *x = DeleteCellInfoResponse{} +func (x *CancelSchemaMigrationRequest) Reset() { + *x = CancelSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[29] + mi := &file_vtctldata_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellInfoResponse) String() string { +func (x *CancelSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellInfoResponse) ProtoMessage() {} +func (*CancelSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[29] +func (x *CancelSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2103,36 +2314,50 @@ func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. -func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{29} +// Deprecated: Use CancelSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CancelSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{23} } -type DeleteCellsAliasRequest struct { +func (x *CancelSchemaMigrationRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *CancelSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type CancelSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteCellsAliasRequest) Reset() { - *x = DeleteCellsAliasRequest{} +func (x *CancelSchemaMigrationResponse) Reset() { + *x = CancelSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellsAliasRequest) String() string { +func (x *CancelSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellsAliasRequest) ProtoMessage() {} +func (*CancelSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[30] +func (x *CancelSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2143,41 +2368,45 @@ func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{30} +// Deprecated: Use CancelSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CancelSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{24} } -func (x *DeleteCellsAliasRequest) GetName() string { +func (x *CancelSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.Name + return x.RowsAffectedByShard } - return "" + return nil } -type DeleteCellsAliasResponse struct { +type ChangeTabletTypeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + DbType topodata.TabletType `protobuf:"varint,2,opt,name=db_type,json=dbType,proto3,enum=topodata.TabletType" json:"db_type,omitempty"` + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` } -func (x *DeleteCellsAliasResponse) Reset() { - *x = DeleteCellsAliasResponse{} +func (x *ChangeTabletTypeRequest) Reset() { + *x = ChangeTabletTypeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteCellsAliasResponse) String() string { +func (x *ChangeTabletTypeRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteCellsAliasResponse) ProtoMessage() {} +func (*ChangeTabletTypeRequest) ProtoMessage() {} -func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[31] +func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2188,44 +2417,59 @@ func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{31} +// Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. +func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{25} } -type DeleteKeyspaceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} - // Keyspace is the name of the keyspace to delete. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Recursive causes all shards in the keyspace to be recursively deleted - // before deleting the keyspace. It is an error to call DeleteKeyspace on a - // non-empty keyspace without also specifying Recursive. - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - // Force allows a keyspace to be deleted even if the keyspace lock cannot be - // obtained. This should only be used to force-clean a keyspace. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` +func (x *ChangeTabletTypeRequest) GetDbType() topodata.TabletType { + if x != nil { + return x.DbType + } + return topodata.TabletType(0) } -func (x *DeleteKeyspaceRequest) Reset() { - *x = DeleteKeyspaceRequest{} +func (x *ChangeTabletTypeRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type ChangeTabletTypeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BeforeTablet *topodata.Tablet `protobuf:"bytes,1,opt,name=before_tablet,json=beforeTablet,proto3" json:"before_tablet,omitempty"` + AfterTablet *topodata.Tablet `protobuf:"bytes,2,opt,name=after_tablet,json=afterTablet,proto3" json:"after_tablet,omitempty"` + WasDryRun bool `protobuf:"varint,3,opt,name=was_dry_run,json=wasDryRun,proto3" json:"was_dry_run,omitempty"` +} + +func (x *ChangeTabletTypeResponse) Reset() { + *x = ChangeTabletTypeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteKeyspaceRequest) String() string { +func (x *ChangeTabletTypeResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteKeyspaceRequest) ProtoMessage() {} +func (*ChangeTabletTypeResponse) ProtoMessage() {} -func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[32] +func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2236,55 +2480,58 @@ func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{32} +// Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. +func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{26} } -func (x *DeleteKeyspaceRequest) GetKeyspace() string { +func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { if x != nil { - return x.Keyspace + return x.BeforeTablet } - return "" + return nil } -func (x *DeleteKeyspaceRequest) GetRecursive() bool { +func (x *ChangeTabletTypeResponse) GetAfterTablet() *topodata.Tablet { if x != nil { - return x.Recursive + return x.AfterTablet } - return false + return nil } -func (x *DeleteKeyspaceRequest) GetForce() bool { +func (x *ChangeTabletTypeResponse) GetWasDryRun() bool { if x != nil { - return x.Force + return x.WasDryRun } return false } -type DeleteKeyspaceResponse struct { +type CleanupSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteKeyspaceResponse) Reset() { - *x = DeleteKeyspaceResponse{} +func (x *CleanupSchemaMigrationRequest) Reset() { + *x = CleanupSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteKeyspaceResponse) String() string { +func (x *CleanupSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteKeyspaceResponse) ProtoMessage() {} +func (*CleanupSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[33] +func (x *CleanupSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2295,48 +2542,50 @@ func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{33} +// Deprecated: Use CleanupSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CleanupSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{27} } -type DeleteShardsRequest struct { +func (x *CleanupSchemaMigrationRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *CleanupSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type CleanupSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shards is the list of shards to delete. The nested topodatapb.Shard field - // is not required for DeleteShard, but the Keyspace and Shard fields are. - Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` - // Recursive also deletes all tablets belonging to the shard(s). It is an - // error to call DeleteShard on a non-empty shard without also specificying - // Recursive. - Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` - // EvenIfServing allows a shard to be deleted even if it is serving, which is - // normally an error. Use with caution. - EvenIfServing bool `protobuf:"varint,4,opt,name=even_if_serving,json=evenIfServing,proto3" json:"even_if_serving,omitempty"` - // Force allows a shard to be deleted even if the shard lock cannot be - // obtained. This should only be used to force-clean a shard. - Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteShardsRequest) Reset() { - *x = DeleteShardsRequest{} +func (x *CleanupSchemaMigrationResponse) Reset() { + *x = CleanupSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteShardsRequest) String() string { +func (x *CleanupSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteShardsRequest) ProtoMessage() {} +func (*CleanupSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[34] +func (x *CleanupSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2347,62 +2596,44 @@ func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. -func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{34} +// Deprecated: Use CleanupSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CleanupSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{28} } -func (x *DeleteShardsRequest) GetShards() []*Shard { +func (x *CleanupSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.Shards + return x.RowsAffectedByShard } return nil } -func (x *DeleteShardsRequest) GetRecursive() bool { - if x != nil { - return x.Recursive - } - return false -} - -func (x *DeleteShardsRequest) GetEvenIfServing() bool { - if x != nil { - return x.EvenIfServing - } - return false -} - -func (x *DeleteShardsRequest) GetForce() bool { - if x != nil { - return x.Force - } - return false -} - -type DeleteShardsResponse struct { +type CompleteSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *DeleteShardsResponse) Reset() { - *x = DeleteShardsResponse{} +func (x *CompleteSchemaMigrationRequest) Reset() { + *x = CompleteSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteShardsResponse) String() string { +func (x *CompleteSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteShardsResponse) ProtoMessage() {} +func (*CompleteSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[35] +func (x *CompleteSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2413,36 +2644,50 @@ func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. -func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{35} +// Deprecated: Use CompleteSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CompleteSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{29} } -type DeleteSrvVSchemaRequest struct { +func (x *CompleteSchemaMigrationRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *CompleteSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type CompleteSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *DeleteSrvVSchemaRequest) Reset() { - *x = DeleteSrvVSchemaRequest{} +func (x *CompleteSchemaMigrationResponse) Reset() { + *x = CompleteSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteSrvVSchemaRequest) String() string { +func (x *CompleteSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteSrvVSchemaRequest) ProtoMessage() {} +func (*CompleteSchemaMigrationResponse) ProtoMessage() {} -func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[36] +func (x *CompleteSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2453,41 +2698,65 @@ func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteSrvVSchemaRequest.ProtoReflect.Descriptor instead. -func (*DeleteSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{36} +// Deprecated: Use CompleteSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*CompleteSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{30} } -func (x *DeleteSrvVSchemaRequest) GetCell() string { +func (x *CompleteSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { if x != nil { - return x.Cell + return x.RowsAffectedByShard } - return "" + return nil } -type DeleteSrvVSchemaResponse struct { +type CreateKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Name is the name of the keyspace. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Force proceeds with the request even if the keyspace already exists. + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` + // AllowEmptyVSchema allows a keyspace to be created with no vschema. + AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` + // ServedFroms specifies a set of db_type:keyspace pairs used to serve + // traffic for the keyspace. + ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` + // Type is the type of the keyspace to create. + Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` + // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is + // required to create a SNAPSHOT keyspace. + BaseKeyspace string `protobuf:"bytes,8,opt,name=base_keyspace,json=baseKeyspace,proto3" json:"base_keyspace,omitempty"` + // SnapshotTime specifies the snapshot time for this keyspace. It is required + // to create a SNAPSHOT keyspace. + SnapshotTime *vttime.Time `protobuf:"bytes,9,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` + // DurabilityPolicy is the durability policy to be + // used for this keyspace. + DurabilityPolicy string `protobuf:"bytes,10,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` + // SidecarDBName is the name of the sidecar database that + // each vttablet in the keyspace will use. + SidecarDbName string `protobuf:"bytes,11,opt,name=sidecar_db_name,json=sidecarDbName,proto3" json:"sidecar_db_name,omitempty"` } -func (x *DeleteSrvVSchemaResponse) Reset() { - *x = DeleteSrvVSchemaResponse{} +func (x *CreateKeyspaceRequest) Reset() { + *x = CreateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteSrvVSchemaResponse) String() string { +func (x *CreateKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteSrvVSchemaResponse) ProtoMessage() {} +func (*CreateKeyspaceRequest) ProtoMessage() {} -func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[37] +func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2498,92 +2767,100 @@ func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteSrvVSchemaResponse.ProtoReflect.Descriptor instead. -func (*DeleteSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{37} +// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{31} } -type DeleteTabletsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // TabletAliases is the list of tablets to delete. - TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // AllowPrimary allows for the primary tablet of a shard to be deleted. - // Use with caution. - AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` +func (x *CreateKeyspaceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" } -func (x *DeleteTabletsRequest) Reset() { - *x = DeleteTabletsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CreateKeyspaceRequest) GetForce() bool { + if x != nil { + return x.Force } + return false } -func (x *DeleteTabletsRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { + if x != nil { + return x.AllowEmptyVSchema + } + return false } -func (*DeleteTabletsRequest) ProtoMessage() {} +func (x *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { + if x != nil { + return x.ServedFroms + } + return nil +} -func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { + if x != nil { + return x.Type } - return mi.MessageOf(x) + return topodata.KeyspaceType(0) } -// Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. -func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{38} +func (x *CreateKeyspaceRequest) GetBaseKeyspace() string { + if x != nil { + return x.BaseKeyspace + } + return "" } -func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { +func (x *CreateKeyspaceRequest) GetSnapshotTime() *vttime.Time { if x != nil { - return x.TabletAliases + return x.SnapshotTime } return nil } -func (x *DeleteTabletsRequest) GetAllowPrimary() bool { +func (x *CreateKeyspaceRequest) GetDurabilityPolicy() string { if x != nil { - return x.AllowPrimary + return x.DurabilityPolicy } - return false + return "" } -type DeleteTabletsResponse struct { +func (x *CreateKeyspaceRequest) GetSidecarDbName() string { + if x != nil { + return x.SidecarDbName + } + return "" +} + +type CreateKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Keyspace is the newly-created keyspace. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *DeleteTabletsResponse) Reset() { - *x = DeleteTabletsResponse{} +func (x *CreateKeyspaceResponse) Reset() { + *x = CreateKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[39] + mi := &file_vtctldata_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *DeleteTabletsResponse) String() string { +func (x *CreateKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteTabletsResponse) ProtoMessage() {} +func (*CreateKeyspaceResponse) ProtoMessage() {} -func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[39] +func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2594,53 +2871,52 @@ func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. -func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{39} +// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{32} } -type EmergencyReparentShardRequest struct { +func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { + if x != nil { + return x.Keyspace + } + return nil +} + +type CreateShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + // Keyspace is the name of the keyspace to create the shard in. Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to perform the Emergency Reparent in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Optional alias of a tablet that should become the new shard primary. If not - // not specified, the vtctld will select the most up-to-date canditate to - // promote. - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - // List of replica aliases to ignore during the Emergency Reparent. The vtctld - // will not attempt to stop replication on these tablets, nor attempt to - // demote any that may think they are the shard primary. - IgnoreReplicas []*topodata.TabletAlias `protobuf:"bytes,4,rep,name=ignore_replicas,json=ignoreReplicas,proto3" json:"ignore_replicas,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in reparenting. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` - // PreventCrossCellPromotion is used to only promote the new primary from the same cell - // as the failed primary. - PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"` + // ShardName is the name of the shard to create. E.g. "-" or "-80". + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + // Force treats an attempt to create a shard that already exists as a + // non-error. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // IncludeParent creates the parent keyspace as an empty BASE keyspace, if it + // doesn't already exist. + IncludeParent bool `protobuf:"varint,4,opt,name=include_parent,json=includeParent,proto3" json:"include_parent,omitempty"` } -func (x *EmergencyReparentShardRequest) Reset() { - *x = EmergencyReparentShardRequest{} +func (x *CreateShardRequest) Reset() { + *x = CreateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *EmergencyReparentShardRequest) String() string { +func (x *CreateShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyReparentShardRequest) ProtoMessage() {} +func (*CreateShardRequest) ProtoMessage() {} -func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[40] +func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2651,87 +2927,71 @@ func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. -func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{40} +// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. +func (*CreateShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{33} } -func (x *EmergencyReparentShardRequest) GetKeyspace() string { +func (x *CreateShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *EmergencyReparentShardRequest) GetShard() string { +func (x *CreateShardRequest) GetShardName() string { if x != nil { - return x.Shard + return x.ShardName } return "" } -func (x *EmergencyReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } - return nil -} - -func (x *EmergencyReparentShardRequest) GetIgnoreReplicas() []*topodata.TabletAlias { - if x != nil { - return x.IgnoreReplicas - } - return nil -} - -func (x *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { +func (x *CreateShardRequest) GetForce() bool { if x != nil { - return x.WaitReplicasTimeout + return x.Force } - return nil + return false } -func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool { +func (x *CreateShardRequest) GetIncludeParent() bool { if x != nil { - return x.PreventCrossCellPromotion + return x.IncludeParent } return false } -type EmergencyReparentShardResponse struct { +type CreateShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace the Emergency Reparent took place in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the Emergency Reparent took place in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the alias of the tablet that was promoted to shard - // primary. If NewPrimary was set in the request, then this will be the same - // alias. Otherwise, it will be the alias of the tablet found to be most - // up-to-date. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + // Keyspace is the created keyspace. It is set only if IncludeParent was + // specified in the request and the parent keyspace needed to be created. + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the newly-created shard object. + Shard *Shard `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // ShardAlreadyExists is set if Force was specified in the request and the + // shard already existed. + ShardAlreadyExists bool `protobuf:"varint,3,opt,name=shard_already_exists,json=shardAlreadyExists,proto3" json:"shard_already_exists,omitempty"` } -func (x *EmergencyReparentShardResponse) Reset() { - *x = EmergencyReparentShardResponse{} +func (x *CreateShardResponse) Reset() { + *x = CreateShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *EmergencyReparentShardResponse) String() string { +func (x *CreateShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyReparentShardResponse) ProtoMessage() {} +func (*CreateShardResponse) ProtoMessage() {} -func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[41] +func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2742,74 +3002,58 @@ func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. -func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{41} +// Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. +func (*CreateShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{34} } -func (x *EmergencyReparentShardResponse) GetKeyspace() string { +func (x *CreateShardResponse) GetKeyspace() *Keyspace { if x != nil { return x.Keyspace } - return "" + return nil } -func (x *EmergencyReparentShardResponse) GetShard() string { +func (x *CreateShardResponse) GetShard() *Shard { if x != nil { return x.Shard } - return "" -} - -func (x *EmergencyReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { - if x != nil { - return x.PromotedPrimary - } return nil } -func (x *EmergencyReparentShardResponse) GetEvents() []*logutil.Event { +func (x *CreateShardResponse) GetShardAlreadyExists() bool { if x != nil { - return x.Events + return x.ShardAlreadyExists } - return nil + return false } -type ExecuteFetchAsAppRequest struct { +type DeleteCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - // MaxRows is an optional parameter to limit the number of rows read into the - // QueryResult. Note that this does not apply a LIMIT to the query, just how - // many rows are read from the MySQL server on the tablet side. - // - // This field is optional. Specifying a non-positive value will use whatever - // default is configured in the VtctldService. - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - // UsePool causes the query to be run with a pooled connection to the tablet. - UsePool bool `protobuf:"varint,4,opt,name=use_pool,json=usePool,proto3" json:"use_pool,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` } -func (x *ExecuteFetchAsAppRequest) Reset() { - *x = ExecuteFetchAsAppRequest{} +func (x *DeleteCellInfoRequest) Reset() { + *x = DeleteCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsAppRequest) String() string { +func (x *DeleteCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsAppRequest) ProtoMessage() {} +func (*DeleteCellInfoRequest) ProtoMessage() {} -func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[42] +func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2820,64 +3064,48 @@ func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{42} +// Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. +func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{35} } -func (x *ExecuteFetchAsAppRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *DeleteCellInfoRequest) GetName() string { if x != nil { - return x.TabletAlias - } - return nil -} - -func (x *ExecuteFetchAsAppRequest) GetQuery() string { - if x != nil { - return x.Query + return x.Name } return "" } -func (x *ExecuteFetchAsAppRequest) GetMaxRows() int64 { - if x != nil { - return x.MaxRows - } - return 0 -} - -func (x *ExecuteFetchAsAppRequest) GetUsePool() bool { +func (x *DeleteCellInfoRequest) GetForce() bool { if x != nil { - return x.UsePool + return x.Force } return false } -type ExecuteFetchAsAppResponse struct { +type DeleteCellInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *ExecuteFetchAsAppResponse) Reset() { - *x = ExecuteFetchAsAppResponse{} +func (x *DeleteCellInfoResponse) Reset() { + *x = DeleteCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsAppResponse) String() string { +func (x *DeleteCellInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsAppResponse) ProtoMessage() {} +func (*DeleteCellInfoResponse) ProtoMessage() {} -func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[43] +func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2888,57 +3116,36 @@ func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{43} -} - -func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { - if x != nil { - return x.Result - } - return nil +// Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. +func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{36} } -type ExecuteFetchAsDBARequest struct { +type DeleteCellsAliasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` - // MaxRows is an optional parameter to limit the number of rows read into the - // QueryResult. Note that this does not apply a LIMIT to the query, just how - // many rows are read from the MySQL server on the tablet side. - // - // This field is optional. Specifying a non-positive value will use whatever - // default is configured in the VtctldService. - MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - // DisableBinlogs instructs the tablet not to use binary logging when - // executing the query. - DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` - // ReloadSchema instructs the tablet to reload its schema after executing the - // query. - ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *ExecuteFetchAsDBARequest) Reset() { - *x = ExecuteFetchAsDBARequest{} +func (x *DeleteCellsAliasRequest) Reset() { + *x = DeleteCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsDBARequest) String() string { +func (x *DeleteCellsAliasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsDBARequest) ProtoMessage() {} +func (*DeleteCellsAliasRequest) ProtoMessage() {} -func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[44] +func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2949,71 +3156,41 @@ func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsDBARequest.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsDBARequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{44} -} - -func (x *ExecuteFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil +// Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{37} } -func (x *ExecuteFetchAsDBARequest) GetQuery() string { +func (x *DeleteCellsAliasRequest) GetName() string { if x != nil { - return x.Query + return x.Name } return "" } -func (x *ExecuteFetchAsDBARequest) GetMaxRows() int64 { - if x != nil { - return x.MaxRows - } - return 0 -} - -func (x *ExecuteFetchAsDBARequest) GetDisableBinlogs() bool { - if x != nil { - return x.DisableBinlogs - } - return false -} - -func (x *ExecuteFetchAsDBARequest) GetReloadSchema() bool { - if x != nil { - return x.ReloadSchema - } - return false -} - -type ExecuteFetchAsDBAResponse struct { +type DeleteCellsAliasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *ExecuteFetchAsDBAResponse) Reset() { - *x = ExecuteFetchAsDBAResponse{} +func (x *DeleteCellsAliasResponse) Reset() { + *x = DeleteCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteFetchAsDBAResponse) String() string { +func (x *DeleteCellsAliasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteFetchAsDBAResponse) ProtoMessage() {} +func (*DeleteCellsAliasResponse) ProtoMessage() {} -func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[45] +func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3024,44 +3201,44 @@ func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteFetchAsDBAResponse.ProtoReflect.Descriptor instead. -func (*ExecuteFetchAsDBAResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{45} -} - -func (x *ExecuteFetchAsDBAResponse) GetResult() *query.QueryResult { - if x != nil { - return x.Result - } - return nil +// Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{38} } -type ExecuteHookRequest struct { +type DeleteKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - TabletHookRequest *tabletmanagerdata.ExecuteHookRequest `protobuf:"bytes,2,opt,name=tablet_hook_request,json=tabletHookRequest,proto3" json:"tablet_hook_request,omitempty"` + // Keyspace is the name of the keyspace to delete. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Recursive causes all shards in the keyspace to be recursively deleted + // before deleting the keyspace. It is an error to call DeleteKeyspace on a + // non-empty keyspace without also specifying Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + // Force allows a keyspace to be deleted even if the keyspace lock cannot be + // obtained. This should only be used to force-clean a keyspace. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` } -func (x *ExecuteHookRequest) Reset() { - *x = ExecuteHookRequest{} +func (x *DeleteKeyspaceRequest) Reset() { + *x = DeleteKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteHookRequest) String() string { +func (x *DeleteKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteHookRequest) ProtoMessage() {} +func (*DeleteKeyspaceRequest) ProtoMessage() {} -func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[46] +func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3072,50 +3249,55 @@ func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteHookRequest.ProtoReflect.Descriptor instead. -func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{46} +// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{39} } -func (x *ExecuteHookRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *DeleteKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil + return "" } -func (x *ExecuteHookRequest) GetTabletHookRequest() *tabletmanagerdata.ExecuteHookRequest { +func (x *DeleteKeyspaceRequest) GetRecursive() bool { if x != nil { - return x.TabletHookRequest + return x.Recursive } - return nil + return false } -type ExecuteHookResponse struct { +func (x *DeleteKeyspaceRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type DeleteKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - HookResult *tabletmanagerdata.ExecuteHookResponse `protobuf:"bytes,1,opt,name=hook_result,json=hookResult,proto3" json:"hook_result,omitempty"` } -func (x *ExecuteHookResponse) Reset() { - *x = ExecuteHookResponse{} +func (x *DeleteKeyspaceResponse) Reset() { + *x = DeleteKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ExecuteHookResponse) String() string { +func (x *DeleteKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ExecuteHookResponse) ProtoMessage() {} +func (*DeleteKeyspaceResponse) ProtoMessage() {} -func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[47] +func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3126,43 +3308,48 @@ func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ExecuteHookResponse.ProtoReflect.Descriptor instead. -func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{47} -} - -func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResponse { - if x != nil { - return x.HookResult - } - return nil +// Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{40} } -type FindAllShardsInKeyspaceRequest struct { +type DeleteShardsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shards is the list of shards to delete. The nested topodatapb.Shard field + // is not required for DeleteShard, but the Keyspace and Shard fields are. + Shards []*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` + // Recursive also deletes all tablets belonging to the shard(s). It is an + // error to call DeleteShard on a non-empty shard without also specificying + // Recursive. + Recursive bool `protobuf:"varint,2,opt,name=recursive,proto3" json:"recursive,omitempty"` + // EvenIfServing allows a shard to be deleted even if it is serving, which is + // normally an error. Use with caution. + EvenIfServing bool `protobuf:"varint,4,opt,name=even_if_serving,json=evenIfServing,proto3" json:"even_if_serving,omitempty"` + // Force allows a shard to be deleted even if the shard lock cannot be + // obtained. This should only be used to force-clean a shard. + Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"` } -func (x *FindAllShardsInKeyspaceRequest) Reset() { - *x = FindAllShardsInKeyspaceRequest{} +func (x *DeleteShardsRequest) Reset() { + *x = DeleteShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *FindAllShardsInKeyspaceRequest) String() string { +func (x *DeleteShardsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} +func (*DeleteShardsRequest) ProtoMessage() {} -func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[48] +func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3173,43 +3360,62 @@ func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{48} +// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. +func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{41} } -func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { +func (x *DeleteShardsRequest) GetShards() []*Shard { if x != nil { - return x.Keyspace + return x.Shards } - return "" + return nil } -type FindAllShardsInKeyspaceResponse struct { +func (x *DeleteShardsRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +func (x *DeleteShardsRequest) GetEvenIfServing() bool { + if x != nil { + return x.EvenIfServing + } + return false +} + +func (x *DeleteShardsRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +type DeleteShardsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *FindAllShardsInKeyspaceResponse) Reset() { - *x = FindAllShardsInKeyspaceResponse{} +func (x *DeleteShardsResponse) Reset() { + *x = DeleteShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[49] + mi := &file_vtctldata_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *FindAllShardsInKeyspaceResponse) String() string { +func (x *DeleteShardsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} +func (*DeleteShardsResponse) ProtoMessage() {} -func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[49] +func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3220,58 +3426,36 @@ func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{49} -} - -func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { - if x != nil { - return x.Shards - } - return nil +// Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. +func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{42} } -type GetBackupsRequest struct { +type DeleteSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Limit, if nonzero, will return only the most N recent backups. - Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` - // Detailed indicates whether to use the backupengine, if supported, to - // populate additional fields, such as Engine and Status, on BackupInfo - // objects in the response. If not set, or if the backupengine does not - // support populating these fields, Engine will always be empty, and Status - // will always be UNKNOWN. - Detailed bool `protobuf:"varint,4,opt,name=detailed,proto3" json:"detailed,omitempty"` - // DetailedLimit, if nonzero, will only populate additional fields (see Detailed) - // on the N most recent backups. The Limit field still dictates the total - // number of backup info objects returned, so, in reality, min(Limit, DetailedLimit) - // backup infos will have additional fields set, and any remaining backups - // will not. - DetailedLimit uint32 `protobuf:"varint,5,opt,name=detailed_limit,json=detailedLimit,proto3" json:"detailed_limit,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetBackupsRequest) Reset() { - *x = GetBackupsRequest{} +func (x *DeleteSrvVSchemaRequest) Reset() { + *x = DeleteSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetBackupsRequest) String() string { +func (x *DeleteSrvVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsRequest) ProtoMessage() {} +func (*DeleteSrvVSchemaRequest) ProtoMessage() {} -func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[50] +func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3282,71 +3466,41 @@ func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. -func (*GetBackupsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{50} -} - -func (x *GetBackupsRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use DeleteSrvVSchemaRequest.ProtoReflect.Descriptor instead. +func (*DeleteSrvVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{43} } -func (x *GetBackupsRequest) GetShard() string { +func (x *DeleteSrvVSchemaRequest) GetCell() string { if x != nil { - return x.Shard + return x.Cell } return "" } -func (x *GetBackupsRequest) GetLimit() uint32 { - if x != nil { - return x.Limit - } - return 0 -} - -func (x *GetBackupsRequest) GetDetailed() bool { - if x != nil { - return x.Detailed - } - return false -} - -func (x *GetBackupsRequest) GetDetailedLimit() uint32 { - if x != nil { - return x.DetailedLimit - } - return 0 -} - -type GetBackupsResponse struct { +type DeleteSrvVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Backups []*mysqlctl.BackupInfo `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` } -func (x *GetBackupsResponse) Reset() { - *x = GetBackupsResponse{} +func (x *DeleteSrvVSchemaResponse) Reset() { + *x = DeleteSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetBackupsResponse) String() string { +func (x *DeleteSrvVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsResponse) ProtoMessage() {} +func (*DeleteSrvVSchemaResponse) ProtoMessage() {} -func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[51] +func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3357,43 +3511,40 @@ func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. -func (*GetBackupsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{51} -} - -func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { - if x != nil { - return x.Backups - } - return nil +// Deprecated: Use DeleteSrvVSchemaResponse.ProtoReflect.Descriptor instead. +func (*DeleteSrvVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{44} } -type GetCellInfoRequest struct { +type DeleteTabletsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + // TabletAliases is the list of tablets to delete. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,1,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + // AllowPrimary allows for the primary tablet of a shard to be deleted. + // Use with caution. + AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } -func (x *GetCellInfoRequest) Reset() { - *x = GetCellInfoRequest{} +func (x *DeleteTabletsRequest) Reset() { + *x = DeleteTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoRequest) String() string { +func (x *DeleteTabletsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoRequest) ProtoMessage() {} +func (*DeleteTabletsRequest) ProtoMessage() {} -func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[52] +func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3404,43 +3555,48 @@ func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. -func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{52} +// Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. +func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{45} } -func (x *GetCellInfoRequest) GetCell() string { +func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { if x != nil { - return x.Cell + return x.TabletAliases } - return "" + return nil } -type GetCellInfoResponse struct { +func (x *DeleteTabletsRequest) GetAllowPrimary() bool { + if x != nil { + return x.AllowPrimary + } + return false +} + +type DeleteTabletsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - CellInfo *topodata.CellInfo `protobuf:"bytes,1,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *GetCellInfoResponse) Reset() { - *x = GetCellInfoResponse{} +func (x *DeleteTabletsResponse) Reset() { + *x = DeleteTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellInfoResponse) String() string { +func (x *DeleteTabletsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoResponse) ProtoMessage() {} +func (*DeleteTabletsResponse) ProtoMessage() {} -func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[53] +func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3451,41 +3607,56 @@ func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. -func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{53} -} - -func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { - if x != nil { - return x.CellInfo - } - return nil +// Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. +func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{46} } -type GetCellInfoNamesRequest struct { +type EmergencyReparentShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -} -func (x *GetCellInfoNamesRequest) Reset() { - *x = GetCellInfoNamesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} + // Keyspace is the name of the keyspace to perform the Emergency Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform the Emergency Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Optional alias of a tablet that should become the new shard primary. If not + // not specified, the vtctld will select the most up-to-date canditate to + // promote. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // List of replica aliases to ignore during the Emergency Reparent. The vtctld + // will not attempt to stop replication on these tablets, nor attempt to + // demote any that may think they are the shard primary. + IgnoreReplicas []*topodata.TabletAlias `protobuf:"bytes,4,rep,name=ignore_replicas,json=ignoreReplicas,proto3" json:"ignore_replicas,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in reparenting. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // PreventCrossCellPromotion is used to only promote the new primary from the same cell + // as the failed primary. + PreventCrossCellPromotion bool `protobuf:"varint,6,opt,name=prevent_cross_cell_promotion,json=preventCrossCellPromotion,proto3" json:"prevent_cross_cell_promotion,omitempty"` + // WaitForAllTablets makes ERS wait for a response from all the tablets before proceeding. + // Useful when all the tablets are up and reachable. + WaitForAllTablets bool `protobuf:"varint,7,opt,name=wait_for_all_tablets,json=waitForAllTablets,proto3" json:"wait_for_all_tablets,omitempty"` +} -func (x *GetCellInfoNamesRequest) String() string { +func (x *EmergencyReparentShardRequest) Reset() { + *x = EmergencyReparentShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EmergencyReparentShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfoNamesRequest) ProtoMessage() {} +func (*EmergencyReparentShardRequest) ProtoMessage() {} -func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[54] +func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3496,81 +3667,94 @@ func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. -func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{54} +// Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. +func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{47} } -type GetCellInfoNamesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` +func (x *EmergencyReparentShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" } -func (x *GetCellInfoNamesResponse) Reset() { - *x = GetCellInfoNamesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *EmergencyReparentShardRequest) GetShard() string { + if x != nil { + return x.Shard } + return "" } -func (x *GetCellInfoNamesResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *EmergencyReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary + } + return nil } -func (*GetCellInfoNamesResponse) ProtoMessage() {} +func (x *EmergencyReparentShardRequest) GetIgnoreReplicas() []*topodata.TabletAlias { + if x != nil { + return x.IgnoreReplicas + } + return nil +} -func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *EmergencyReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if x != nil { + return x.WaitReplicasTimeout } - return mi.MessageOf(x) + return nil } -// Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. -func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{55} +func (x *EmergencyReparentShardRequest) GetPreventCrossCellPromotion() bool { + if x != nil { + return x.PreventCrossCellPromotion + } + return false } -func (x *GetCellInfoNamesResponse) GetNames() []string { +func (x *EmergencyReparentShardRequest) GetWaitForAllTablets() bool { if x != nil { - return x.Names + return x.WaitForAllTablets } - return nil + return false } -type GetCellsAliasesRequest struct { +type EmergencyReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Keyspace is the name of the keyspace the Emergency Reparent took place in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the Emergency Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` } -func (x *GetCellsAliasesRequest) Reset() { - *x = GetCellsAliasesRequest{} +func (x *EmergencyReparentShardResponse) Reset() { + *x = EmergencyReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[56] + mi := &file_vtctldata_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellsAliasesRequest) String() string { +func (x *EmergencyReparentShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellsAliasesRequest) ProtoMessage() {} +func (*EmergencyReparentShardResponse) ProtoMessage() {} -func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[56] +func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3581,36 +3765,74 @@ func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{56} +// Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. +func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{48} } -type GetCellsAliasesResponse struct { +func (x *EmergencyReparentShardResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *EmergencyReparentShardResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *EmergencyReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary + } + return nil +} + +func (x *EmergencyReparentShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type ExecuteFetchAsAppRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Aliases map[string]*topodata.CellsAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to the query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + // UsePool causes the query to be run with a pooled connection to the tablet. + UsePool bool `protobuf:"varint,4,opt,name=use_pool,json=usePool,proto3" json:"use_pool,omitempty"` } -func (x *GetCellsAliasesResponse) Reset() { - *x = GetCellsAliasesResponse{} +func (x *ExecuteFetchAsAppRequest) Reset() { + *x = ExecuteFetchAsAppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetCellsAliasesResponse) String() string { +func (x *ExecuteFetchAsAppRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellsAliasesResponse) ProtoMessage() {} +func (*ExecuteFetchAsAppRequest) ProtoMessage() {} -func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[57] +func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3621,43 +3843,64 @@ func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{57} +// Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{49} } -func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { +func (x *ExecuteFetchAsAppRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Aliases + return x.TabletAlias } return nil } -type GetFullStatusRequest struct { +func (x *ExecuteFetchAsAppRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *ExecuteFetchAsAppRequest) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteFetchAsAppRequest) GetUsePool() bool { + if x != nil { + return x.UsePool + } + return false +} + +type ExecuteFetchAsAppResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *GetFullStatusRequest) Reset() { - *x = GetFullStatusRequest{} +func (x *ExecuteFetchAsAppResponse) Reset() { + *x = ExecuteFetchAsAppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetFullStatusRequest) String() string { +func (x *ExecuteFetchAsAppResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetFullStatusRequest) ProtoMessage() {} +func (*ExecuteFetchAsAppResponse) ProtoMessage() {} -func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[58] +func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3668,43 +3911,57 @@ func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. -func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{58} +// Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{50} } -func (x *GetFullStatusRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { if x != nil { - return x.TabletAlias + return x.Result } return nil } -type GetFullStatusResponse struct { +type ExecuteFetchAsDBARequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status *replicationdata.FullStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` -} - -func (x *GetFullStatusResponse) Reset() { - *x = GetFullStatusResponse{} + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Query string `protobuf:"bytes,2,opt,name=query,proto3" json:"query,omitempty"` + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to the query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + // DisableBinlogs instructs the tablet not to use binary logging when + // executing the query. + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + // ReloadSchema instructs the tablet to reload its schema after executing the + // query. + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` +} + +func (x *ExecuteFetchAsDBARequest) Reset() { + *x = ExecuteFetchAsDBARequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetFullStatusResponse) String() string { +func (x *ExecuteFetchAsDBARequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetFullStatusResponse) ProtoMessage() {} +func (*ExecuteFetchAsDBARequest) ProtoMessage() {} -func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[59] +func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3715,41 +3972,71 @@ func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetFullStatusResponse.ProtoReflect.Descriptor instead. -func (*GetFullStatusResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{59} +// Deprecated: Use ExecuteFetchAsDBARequest.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsDBARequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{51} } -func (x *GetFullStatusResponse) GetStatus() *replicationdata.FullStatus { +func (x *ExecuteFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Status + return x.TabletAlias } return nil } -type GetKeyspacesRequest struct { +func (x *ExecuteFetchAsDBARequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *ExecuteFetchAsDBARequest) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteFetchAsDBARequest) GetDisableBinlogs() bool { + if x != nil { + return x.DisableBinlogs + } + return false +} + +func (x *ExecuteFetchAsDBARequest) GetReloadSchema() bool { + if x != nil { + return x.ReloadSchema + } + return false +} + +type ExecuteFetchAsDBAResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` } -func (x *GetKeyspacesRequest) Reset() { - *x = GetKeyspacesRequest{} +func (x *ExecuteFetchAsDBAResponse) Reset() { + *x = ExecuteFetchAsDBAResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[60] + mi := &file_vtctldata_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspacesRequest) String() string { +func (x *ExecuteFetchAsDBAResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesRequest) ProtoMessage() {} +func (*ExecuteFetchAsDBAResponse) ProtoMessage() {} -func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[60] +func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3760,36 +4047,44 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{60} +// Deprecated: Use ExecuteFetchAsDBAResponse.ProtoReflect.Descriptor instead. +func (*ExecuteFetchAsDBAResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{52} } -type GetKeyspacesResponse struct { +func (x *ExecuteFetchAsDBAResponse) GetResult() *query.QueryResult { + if x != nil { + return x.Result + } + return nil +} + +type ExecuteHookRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + TabletHookRequest *tabletmanagerdata.ExecuteHookRequest `protobuf:"bytes,2,opt,name=tablet_hook_request,json=tabletHookRequest,proto3" json:"tablet_hook_request,omitempty"` } -func (x *GetKeyspacesResponse) Reset() { - *x = GetKeyspacesResponse{} +func (x *ExecuteHookRequest) Reset() { + *x = ExecuteHookRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspacesResponse) String() string { +func (x *ExecuteHookRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesResponse) ProtoMessage() {} +func (*ExecuteHookRequest) ProtoMessage() {} -func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[61] +func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3800,43 +4095,50 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{61} +// Deprecated: Use ExecuteHookRequest.ProtoReflect.Descriptor instead. +func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{53} } -func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { +func (x *ExecuteHookRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspaces + return x.TabletAlias } return nil } -type GetKeyspaceRequest struct { +func (x *ExecuteHookRequest) GetTabletHookRequest() *tabletmanagerdata.ExecuteHookRequest { + if x != nil { + return x.TabletHookRequest + } + return nil +} + +type ExecuteHookResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + HookResult *tabletmanagerdata.ExecuteHookResponse `protobuf:"bytes,1,opt,name=hook_result,json=hookResult,proto3" json:"hook_result,omitempty"` } -func (x *GetKeyspaceRequest) Reset() { - *x = GetKeyspaceRequest{} +func (x *ExecuteHookResponse) Reset() { + *x = ExecuteHookResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspaceRequest) String() string { +func (x *ExecuteHookResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspaceRequest) ProtoMessage() {} +func (*ExecuteHookResponse) ProtoMessage() {} -func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[62] +func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3847,43 +4149,43 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{62} +// Deprecated: Use ExecuteHookResponse.ProtoReflect.Descriptor instead. +func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{54} } -func (x *GetKeyspaceRequest) GetKeyspace() string { +func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResponse { if x != nil { - return x.Keyspace + return x.HookResult } - return "" + return nil } -type GetKeyspaceResponse struct { +type FindAllShardsInKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetKeyspaceResponse) Reset() { - *x = GetKeyspaceResponse{} +func (x *FindAllShardsInKeyspaceRequest) Reset() { + *x = FindAllShardsInKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetKeyspaceResponse) String() string { +func (x *FindAllShardsInKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspaceResponse) ProtoMessage() {} +func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} -func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[63] +func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3894,43 +4196,43 @@ func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{63} +// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{55} } -func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { +func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { if x != nil { return x.Keyspace } - return nil + return "" } -type GetPermissionsRequest struct { +type FindAllShardsInKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetPermissionsRequest) Reset() { - *x = GetPermissionsRequest{} +func (x *FindAllShardsInKeyspaceResponse) Reset() { + *x = FindAllShardsInKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetPermissionsRequest) String() string { +func (x *FindAllShardsInKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPermissionsRequest) ProtoMessage() {} +func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} -func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[64] +func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3941,43 +4243,58 @@ func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPermissionsRequest.ProtoReflect.Descriptor instead. -func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{64} +// Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{56} } -func (x *GetPermissionsRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { if x != nil { - return x.TabletAlias + return x.Shards } return nil } -type GetPermissionsResponse struct { +type GetBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Permissions *tabletmanagerdata.Permissions `protobuf:"bytes,1,opt,name=permissions,proto3" json:"permissions,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Limit, if nonzero, will return only the most N recent backups. + Limit uint32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // Detailed indicates whether to use the backupengine, if supported, to + // populate additional fields, such as Engine and Status, on BackupInfo + // objects in the response. If not set, or if the backupengine does not + // support populating these fields, Engine will always be empty, and Status + // will always be UNKNOWN. + Detailed bool `protobuf:"varint,4,opt,name=detailed,proto3" json:"detailed,omitempty"` + // DetailedLimit, if nonzero, will only populate additional fields (see Detailed) + // on the N most recent backups. The Limit field still dictates the total + // number of backup info objects returned, so, in reality, min(Limit, DetailedLimit) + // backup infos will have additional fields set, and any remaining backups + // will not. + DetailedLimit uint32 `protobuf:"varint,5,opt,name=detailed_limit,json=detailedLimit,proto3" json:"detailed_limit,omitempty"` } -func (x *GetPermissionsResponse) Reset() { - *x = GetPermissionsResponse{} +func (x *GetBackupsRequest) Reset() { + *x = GetBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetPermissionsResponse) String() string { +func (x *GetBackupsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetPermissionsResponse) ProtoMessage() {} +func (*GetBackupsRequest) ProtoMessage() {} -func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[65] +func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3988,41 +4305,71 @@ func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetPermissionsResponse.ProtoReflect.Descriptor instead. -func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{65} +// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. +func (*GetBackupsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{57} } -func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions { +func (x *GetBackupsRequest) GetKeyspace() string { if x != nil { - return x.Permissions + return x.Keyspace } - return nil + return "" } -type GetRoutingRulesRequest struct { +func (x *GetBackupsRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *GetBackupsRequest) GetLimit() uint32 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *GetBackupsRequest) GetDetailed() bool { + if x != nil { + return x.Detailed + } + return false +} + +func (x *GetBackupsRequest) GetDetailedLimit() uint32 { + if x != nil { + return x.DetailedLimit + } + return 0 +} + +type GetBackupsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Backups []*mysqlctl.BackupInfo `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` } -func (x *GetRoutingRulesRequest) Reset() { - *x = GetRoutingRulesRequest{} +func (x *GetBackupsResponse) Reset() { + *x = GetBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetRoutingRulesRequest) String() string { +func (x *GetBackupsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRoutingRulesRequest) ProtoMessage() {} +func (*GetBackupsResponse) ProtoMessage() {} -func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[66] +func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4033,36 +4380,43 @@ func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{66} +// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. +func (*GetBackupsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{58} } -type GetRoutingRulesResponse struct { +func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { + if x != nil { + return x.Backups + } + return nil +} + +type GetCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetRoutingRulesResponse) Reset() { - *x = GetRoutingRulesResponse{} +func (x *GetCellInfoRequest) Reset() { + *x = GetCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetRoutingRulesResponse) String() string { +func (x *GetCellInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetRoutingRulesResponse) ProtoMessage() {} +func (*GetCellInfoRequest) ProtoMessage() {} -func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[67] +func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4073,61 +4427,43 @@ func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{67} +// Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. +func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{59} } -func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { +func (x *GetCellInfoRequest) GetCell() string { if x != nil { - return x.RoutingRules + return x.Cell } - return nil + return "" } -type GetSchemaRequest struct { +type GetCellInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // Tables is a list of tables for which we should gather information. Each is - // either an exact match, or a regular expression of the form /regexp/. - Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` - // ExcludeTables is a list of tables to exclude from the result. Each is - // either an exact match, or a regular expression of the form /regexp/. - ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - // IncludeViews specifies whether to include views in the result. - IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` - // TableNamesOnly specifies whether to limit the results to just table names, - // rather than full schema information for each table. - TableNamesOnly bool `protobuf:"varint,5,opt,name=table_names_only,json=tableNamesOnly,proto3" json:"table_names_only,omitempty"` - // TableSizesOnly specifies whether to limit the results to just table sizes, - // rather than full schema information for each table. It is ignored if - // TableNamesOnly is set to true. - TableSizesOnly bool `protobuf:"varint,6,opt,name=table_sizes_only,json=tableSizesOnly,proto3" json:"table_sizes_only,omitempty"` - // TableSchemaOnly specifies whether to limit the results to just table/view - // schema definition (CREATE TABLE/VIEW statements) and skip column/field information - TableSchemaOnly bool `protobuf:"varint,7,opt,name=table_schema_only,json=tableSchemaOnly,proto3" json:"table_schema_only,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,1,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` } -func (x *GetSchemaRequest) Reset() { - *x = GetSchemaRequest{} +func (x *GetCellInfoResponse) Reset() { + *x = GetCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSchemaRequest) String() string { +func (x *GetCellInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaRequest) ProtoMessage() {} +func (*GetCellInfoResponse) ProtoMessage() {} -func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[68] +func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4138,85 +4474,81 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{68} +// Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. +func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{60} } -func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { if x != nil { - return x.TabletAlias + return x.CellInfo } return nil } -func (x *GetSchemaRequest) GetTables() []string { - if x != nil { - return x.Tables - } - return nil +type GetCellInfoNamesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (x *GetSchemaRequest) GetExcludeTables() []string { - if x != nil { - return x.ExcludeTables +func (x *GetCellInfoNamesRequest) Reset() { + *x = GetCellInfoNamesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *GetSchemaRequest) GetIncludeViews() bool { - if x != nil { - return x.IncludeViews - } - return false +func (x *GetCellInfoNamesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *GetSchemaRequest) GetTableNamesOnly() bool { - if x != nil { - return x.TableNamesOnly - } - return false -} +func (*GetCellInfoNamesRequest) ProtoMessage() {} -func (x *GetSchemaRequest) GetTableSizesOnly() bool { - if x != nil { - return x.TableSizesOnly +func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[61] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *GetSchemaRequest) GetTableSchemaOnly() bool { - if x != nil { - return x.TableSchemaOnly - } - return false +// Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. +func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{61} } -type GetSchemaResponse struct { +type GetCellInfoNamesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Schema *tabletmanagerdata.SchemaDefinition `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` } -func (x *GetSchemaResponse) Reset() { - *x = GetSchemaResponse{} +func (x *GetCellInfoNamesResponse) Reset() { + *x = GetCellInfoNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSchemaResponse) String() string { +func (x *GetCellInfoNamesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaResponse) ProtoMessage() {} +func (*GetCellInfoNamesResponse) ProtoMessage() {} -func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[69] +func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4227,44 +4559,41 @@ func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{69} +// Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. +func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{62} } -func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { +func (x *GetCellInfoNamesResponse) GetNames() []string { if x != nil { - return x.Schema + return x.Names } return nil } -type GetShardRequest struct { +type GetCellsAliasesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` } -func (x *GetShardRequest) Reset() { - *x = GetShardRequest{} +func (x *GetCellsAliasesRequest) Reset() { + *x = GetCellsAliasesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRequest) String() string { +func (x *GetCellsAliasesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRequest) ProtoMessage() {} +func (*GetCellsAliasesRequest) ProtoMessage() {} -func (x *GetShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[70] +func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4275,50 +4604,36 @@ func (x *GetShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. -func (*GetShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{70} +// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{63} } -func (x *GetShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetShardRequest) GetShardName() string { - if x != nil { - return x.ShardName - } - return "" -} - -type GetShardResponse struct { +type GetCellsAliasesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + Aliases map[string]*topodata.CellsAlias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetShardResponse) Reset() { - *x = GetShardResponse{} +func (x *GetCellsAliasesResponse) Reset() { + *x = GetCellsAliasesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardResponse) String() string { +func (x *GetCellsAliasesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardResponse) ProtoMessage() {} +func (*GetCellsAliasesResponse) ProtoMessage() {} -func (x *GetShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[71] +func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4329,41 +4644,43 @@ func (x *GetShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. -func (*GetShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{71} +// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{64} } -func (x *GetShardResponse) GetShard() *Shard { +func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { if x != nil { - return x.Shard + return x.Aliases } return nil } -type GetShardRoutingRulesRequest struct { +type GetFullStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *GetShardRoutingRulesRequest) Reset() { - *x = GetShardRoutingRulesRequest{} +func (x *GetFullStatusRequest) Reset() { + *x = GetFullStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRoutingRulesRequest) String() string { +func (x *GetFullStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRoutingRulesRequest) ProtoMessage() {} +func (*GetFullStatusRequest) ProtoMessage() {} -func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[72] +func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4374,36 +4691,43 @@ func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRoutingRulesRequest.ProtoReflect.Descriptor instead. -func (*GetShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{72} +// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. +func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{65} } -type GetShardRoutingRulesResponse struct { +func (x *GetFullStatusRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type GetFullStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + Status *replicationdata.FullStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (x *GetShardRoutingRulesResponse) Reset() { - *x = GetShardRoutingRulesResponse{} +func (x *GetFullStatusResponse) Reset() { + *x = GetFullStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetShardRoutingRulesResponse) String() string { +func (x *GetFullStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRoutingRulesResponse) ProtoMessage() {} +func (*GetFullStatusResponse) ProtoMessage() {} -func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[73] +func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4414,43 +4738,41 @@ func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRoutingRulesResponse.ProtoReflect.Descriptor instead. -func (*GetShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{73} +// Deprecated: Use GetFullStatusResponse.ProtoReflect.Descriptor instead. +func (*GetFullStatusResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{66} } -func (x *GetShardRoutingRulesResponse) GetShardRoutingRules() *vschema.ShardRoutingRules { +func (x *GetFullStatusResponse) GetStatus() *replicationdata.FullStatus { if x != nil { - return x.ShardRoutingRules + return x.Status } return nil } -type GetSrvKeyspaceNamesRequest struct { +type GetKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetSrvKeyspaceNamesRequest) Reset() { - *x = GetSrvKeyspaceNamesRequest{} +func (x *GetKeyspacesRequest) Reset() { + *x = GetKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesRequest) String() string { +func (x *GetKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesRequest) ProtoMessage() {} +func (*GetKeyspacesRequest) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[74] +func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4461,44 +4783,36 @@ func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{74} -} - -func (x *GetSrvKeyspaceNamesRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil +// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{67} } -type GetSrvKeyspaceNamesResponse struct { +type GetKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Names is a mapping of cell name to a list of SrvKeyspace names. - Names map[string]*GetSrvKeyspaceNamesResponse_NameList `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` } -func (x *GetSrvKeyspaceNamesResponse) Reset() { - *x = GetSrvKeyspaceNamesResponse{} +func (x *GetKeyspacesResponse) Reset() { + *x = GetKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesResponse) String() string { +func (x *GetKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesResponse) ProtoMessage() {} +func (*GetKeyspacesResponse) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[75] +func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4509,46 +4823,43 @@ func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesResponse.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75} +// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{68} } -func (x *GetSrvKeyspaceNamesResponse) GetNames() map[string]*GetSrvKeyspaceNamesResponse_NameList { +func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { if x != nil { - return x.Names + return x.Keyspaces } return nil } -type GetSrvKeyspacesRequest struct { +type GetKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is - // equivalent to specifying all cells in the topo. - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetSrvKeyspacesRequest) Reset() { - *x = GetSrvKeyspacesRequest{} +func (x *GetKeyspaceRequest) Reset() { + *x = GetKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspacesRequest) String() string { +func (x *GetKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesRequest) ProtoMessage() {} +func (*GetKeyspaceRequest) ProtoMessage() {} -func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[76] +func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4559,51 +4870,43 @@ func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{76} +// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{69} } -func (x *GetSrvKeyspacesRequest) GetKeyspace() string { +func (x *GetKeyspaceRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *GetSrvKeyspacesRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil -} - -type GetSrvKeyspacesResponse struct { +type GetKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // SrvKeyspaces is a mapping of cell name to SrvKeyspace. - SrvKeyspaces map[string]*topodata.SrvKeyspace `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetSrvKeyspacesResponse) Reset() { - *x = GetSrvKeyspacesResponse{} +func (x *GetKeyspaceResponse) Reset() { + *x = GetKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspacesResponse) String() string { +func (x *GetKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesResponse) ProtoMessage() {} +func (*GetKeyspaceResponse) ProtoMessage() {} -func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[77] +func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4614,57 +4917,43 @@ func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{77} +// Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{70} } -func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { +func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { if x != nil { - return x.SrvKeyspaces + return x.Keyspace } return nil } -type UpdateThrottlerConfigRequest struct { +type GetPermissionsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Enable instructs to enable the throttler - Enable bool `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"` - // Disable instructs to disable the throttler - Disable bool `protobuf:"varint,3,opt,name=disable,proto3" json:"disable,omitempty"` - // Threshold for throttler (with no custom query, ie using default query, only positive values are considered) - Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"` - // CustomQuery replaces the default replication lag query - CustomQuery string `protobuf:"bytes,5,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"` - // CustomQuerySet indicates that the value of CustomQuery has changed - CustomQuerySet bool `protobuf:"varint,6,opt,name=custom_query_set,json=customQuerySet,proto3" json:"custom_query_set,omitempty"` - // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health - CheckAsCheckSelf bool `protobuf:"varint,7,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` - // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior) - CheckAsCheckShard bool `protobuf:"varint,8,opt,name=check_as_check_shard,json=checkAsCheckShard,proto3" json:"check_as_check_shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *UpdateThrottlerConfigRequest) Reset() { - *x = UpdateThrottlerConfigRequest{} +func (x *GetPermissionsRequest) Reset() { + *x = GetPermissionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateThrottlerConfigRequest) String() string { +func (x *GetPermissionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateThrottlerConfigRequest) ProtoMessage() {} +func (*GetPermissionsRequest) ProtoMessage() {} -func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[78] +func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4675,90 +4964,88 @@ func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{78} -} - -func (x *UpdateThrottlerConfigRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use GetPermissionsRequest.ProtoReflect.Descriptor instead. +func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{71} } -func (x *UpdateThrottlerConfigRequest) GetEnable() bool { +func (x *GetPermissionsRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Enable + return x.TabletAlias } - return false + return nil } -func (x *UpdateThrottlerConfigRequest) GetDisable() bool { - if x != nil { - return x.Disable - } - return false +type GetPermissionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Permissions *tabletmanagerdata.Permissions `protobuf:"bytes,1,opt,name=permissions,proto3" json:"permissions,omitempty"` } -func (x *UpdateThrottlerConfigRequest) GetThreshold() float64 { - if x != nil { - return x.Threshold +func (x *GetPermissionsResponse) Reset() { + *x = GetPermissionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func (x *UpdateThrottlerConfigRequest) GetCustomQuery() string { - if x != nil { - return x.CustomQuery - } - return "" +func (x *GetPermissionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *UpdateThrottlerConfigRequest) GetCustomQuerySet() bool { - if x != nil { - return x.CustomQuerySet +func (*GetPermissionsResponse) ProtoMessage() {} + +func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckSelf() bool { - if x != nil { - return x.CheckAsCheckSelf - } - return false +// Deprecated: Use GetPermissionsResponse.ProtoReflect.Descriptor instead. +func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{72} } -func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckShard() bool { +func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions { if x != nil { - return x.CheckAsCheckShard + return x.Permissions } - return false + return nil } -type UpdateThrottlerConfigResponse struct { +type GetRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *UpdateThrottlerConfigResponse) Reset() { - *x = UpdateThrottlerConfigResponse{} +func (x *GetRoutingRulesRequest) Reset() { + *x = GetRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateThrottlerConfigResponse) String() string { +func (x *GetRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateThrottlerConfigResponse) ProtoMessage() {} +func (*GetRoutingRulesRequest) ProtoMessage() {} -func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[79] +func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4769,36 +5056,36 @@ func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead. -func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{79} +// Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{73} } -type GetSrvVSchemaRequest struct { +type GetRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + RoutingRules *vschema.RoutingRules `protobuf:"bytes,1,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` } -func (x *GetSrvVSchemaRequest) Reset() { - *x = GetSrvVSchemaRequest{} +func (x *GetRoutingRulesResponse) Reset() { + *x = GetRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemaRequest) String() string { +func (x *GetRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemaRequest) ProtoMessage() {} +func (*GetRoutingRulesResponse) ProtoMessage() {} -func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[80] +func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4809,43 +5096,61 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{80} +// Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{74} } -func (x *GetSrvVSchemaRequest) GetCell() string { +func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { if x != nil { - return x.Cell + return x.RoutingRules } - return "" + return nil } -type GetSrvVSchemaResponse struct { +type GetSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SrvVSchema *vschema.SrvVSchema `protobuf:"bytes,1,opt,name=srv_v_schema,json=srvVSchema,proto3" json:"srv_v_schema,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // Tables is a list of tables for which we should gather information. Each is + // either an exact match, or a regular expression of the form /regexp/. + Tables []string `protobuf:"bytes,2,rep,name=tables,proto3" json:"tables,omitempty"` + // ExcludeTables is a list of tables to exclude from the result. Each is + // either an exact match, or a regular expression of the form /regexp/. + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // IncludeViews specifies whether to include views in the result. + IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + // TableNamesOnly specifies whether to limit the results to just table names, + // rather than full schema information for each table. + TableNamesOnly bool `protobuf:"varint,5,opt,name=table_names_only,json=tableNamesOnly,proto3" json:"table_names_only,omitempty"` + // TableSizesOnly specifies whether to limit the results to just table sizes, + // rather than full schema information for each table. It is ignored if + // TableNamesOnly is set to true. + TableSizesOnly bool `protobuf:"varint,6,opt,name=table_sizes_only,json=tableSizesOnly,proto3" json:"table_sizes_only,omitempty"` + // TableSchemaOnly specifies whether to limit the results to just table/view + // schema definition (CREATE TABLE/VIEW statements) and skip column/field information + TableSchemaOnly bool `protobuf:"varint,7,opt,name=table_schema_only,json=tableSchemaOnly,proto3" json:"table_schema_only,omitempty"` } -func (x *GetSrvVSchemaResponse) Reset() { - *x = GetSrvVSchemaResponse{} +func (x *GetSchemaRequest) Reset() { + *x = GetSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemaResponse) String() string { +func (x *GetSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemaResponse) ProtoMessage() {} +func (*GetSchemaRequest) ProtoMessage() {} -func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[81] +func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4856,91 +5161,85 @@ func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{81} +// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{75} } -func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { +func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.SrvVSchema + return x.TabletAlias } return nil } -type GetSrvVSchemasRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` +func (x *GetSchemaRequest) GetTables() []string { + if x != nil { + return x.Tables + } + return nil } -func (x *GetSrvVSchemasRequest) Reset() { - *x = GetSrvVSchemasRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[82] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *GetSchemaRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables } + return nil } -func (x *GetSrvVSchemasRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *GetSchemaRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false } -func (*GetSrvVSchemasRequest) ProtoMessage() {} - -func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[82] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *GetSchemaRequest) GetTableNamesOnly() bool { + if x != nil { + return x.TableNamesOnly } - return mi.MessageOf(x) + return false } -// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{82} +func (x *GetSchemaRequest) GetTableSizesOnly() bool { + if x != nil { + return x.TableSizesOnly + } + return false } -func (x *GetSrvVSchemasRequest) GetCells() []string { +func (x *GetSchemaRequest) GetTableSchemaOnly() bool { if x != nil { - return x.Cells + return x.TableSchemaOnly } - return nil + return false } -type GetSrvVSchemasResponse struct { +type GetSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // SrvVSchemas is a mapping of cell name to SrvVSchema - SrvVSchemas map[string]*vschema.SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Schema *tabletmanagerdata.SchemaDefinition `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` } -func (x *GetSrvVSchemasResponse) Reset() { - *x = GetSrvVSchemasResponse{} +func (x *GetSchemaResponse) Reset() { + *x = GetSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvVSchemasResponse) String() string { +func (x *GetSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemasResponse) ProtoMessage() {} +func (*GetSchemaResponse) ProtoMessage() {} -func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[83] +func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4951,43 +5250,68 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{83} +// Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{76} } -func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { +func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { if x != nil { - return x.SrvVSchemas + return x.Schema } return nil } -type GetTabletRequest struct { +// GetSchemaMigrationsRequest controls the behavior of the GetSchemaMigrations +// rpc. +// +// Keyspace is a required field, while all other fields are optional. +// +// If UUID is set, other optional fields will be ignored, since there will be at +// most one migration with that UUID. Furthermore, if no migration with that +// UUID exists, an empty response, not an error, is returned. +// +// MigrationContext, Status, and Recent are mutually exclusive. +type GetSchemaMigrationsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` -} - -func (x *GetTabletRequest) Reset() { - *x = GetTabletRequest{} + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Uuid, if set, will cause GetSchemaMigrations to return exactly 1 migration, + // namely the one with that UUID. If no migration exists, the response will + // be an empty slice, not an error. + // + // If this field is set, other fields (status filters, limit, skip, order) are + // ignored. + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + MigrationContext string `protobuf:"bytes,3,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` + Status SchemaMigration_Status `protobuf:"varint,4,opt,name=status,proto3,enum=vtctldata.SchemaMigration_Status" json:"status,omitempty"` + // Recent, if set, returns migrations requested between now and the provided + // value. + Recent *vttime.Duration `protobuf:"bytes,5,opt,name=recent,proto3" json:"recent,omitempty"` + Order QueryOrdering `protobuf:"varint,6,opt,name=order,proto3,enum=vtctldata.QueryOrdering" json:"order,omitempty"` + Limit uint64 `protobuf:"varint,7,opt,name=limit,proto3" json:"limit,omitempty"` + Skip uint64 `protobuf:"varint,8,opt,name=skip,proto3" json:"skip,omitempty"` +} + +func (x *GetSchemaMigrationsRequest) Reset() { + *x = GetSchemaMigrationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletRequest) String() string { +func (x *GetSchemaMigrationsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletRequest) ProtoMessage() {} +func (*GetSchemaMigrationsRequest) ProtoMessage() {} -func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[84] +func (x *GetSchemaMigrationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4998,43 +5322,92 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. -func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{84} +// Deprecated: Use GetSchemaMigrationsRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{77} } -func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSchemaMigrationsRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace + } + return "" +} + +func (x *GetSchemaMigrationsRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *GetSchemaMigrationsRequest) GetMigrationContext() string { + if x != nil { + return x.MigrationContext + } + return "" +} + +func (x *GetSchemaMigrationsRequest) GetStatus() SchemaMigration_Status { + if x != nil { + return x.Status + } + return SchemaMigration_UNKNOWN +} + +func (x *GetSchemaMigrationsRequest) GetRecent() *vttime.Duration { + if x != nil { + return x.Recent } return nil } -type GetTabletResponse struct { +func (x *GetSchemaMigrationsRequest) GetOrder() QueryOrdering { + if x != nil { + return x.Order + } + return QueryOrdering_NONE +} + +func (x *GetSchemaMigrationsRequest) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *GetSchemaMigrationsRequest) GetSkip() uint64 { + if x != nil { + return x.Skip + } + return 0 +} + +type GetSchemaMigrationsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablet *topodata.Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + Migrations []*SchemaMigration `protobuf:"bytes,1,rep,name=migrations,proto3" json:"migrations,omitempty"` } -func (x *GetTabletResponse) Reset() { - *x = GetTabletResponse{} +func (x *GetSchemaMigrationsResponse) Reset() { + *x = GetSchemaMigrationsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[85] + mi := &file_vtctldata_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletResponse) String() string { +func (x *GetSchemaMigrationsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletResponse) ProtoMessage() {} +func (*GetSchemaMigrationsResponse) ProtoMessage() {} -func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[85] +func (x *GetSchemaMigrationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5045,64 +5418,44 @@ func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. -func (*GetTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{85} +// Deprecated: Use GetSchemaMigrationsResponse.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{78} } -func (x *GetTabletResponse) GetTablet() *topodata.Tablet { +func (x *GetSchemaMigrationsResponse) GetMigrations() []*SchemaMigration { if x != nil { - return x.Tablet + return x.Migrations } return nil } -type GetTabletsRequest struct { +type GetShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to return tablets for. Omit to return - // tablets from all keyspaces. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to return tablets for. This field is ignored - // if Keyspace is not set. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Cells is an optional set of cells to return tablets for. - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - // Strict specifies how the server should treat failures from individual - // cells. - // - // When false (the default), GetTablets will return data from any cells that - // return successfully, but will fail the request if all cells fail. When - // true, any individual cell can fail the full request. - Strict bool `protobuf:"varint,4,opt,name=strict,proto3" json:"strict,omitempty"` - // TabletAliases is an optional list of tablet aliases to fetch Tablet objects - // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are - // looked up by their respective aliases' Cells directly. - TabletAliases []*topodata.TabletAlias `protobuf:"bytes,5,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` - // tablet_type specifies the type of tablets to return. Omit to return all - // tablet types. - TabletType topodata.TabletType `protobuf:"varint,6,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` } -func (x *GetTabletsRequest) Reset() { - *x = GetTabletsRequest{} +func (x *GetShardRequest) Reset() { + *x = GetShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[86] + mi := &file_vtctldata_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletsRequest) String() string { +func (x *GetShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletsRequest) ProtoMessage() {} +func (*GetShardRequest) ProtoMessage() {} -func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[86] +func (x *GetShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5113,78 +5466,50 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. -func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{86} +// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. +func (*GetShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{79} } -func (x *GetTabletsRequest) GetKeyspace() string { +func (x *GetShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *GetTabletsRequest) GetShard() string { +func (x *GetShardRequest) GetShardName() string { if x != nil { - return x.Shard + return x.ShardName } return "" } -func (x *GetTabletsRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil -} - -func (x *GetTabletsRequest) GetStrict() bool { - if x != nil { - return x.Strict - } - return false -} - -func (x *GetTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { - if x != nil { - return x.TabletAliases - } - return nil -} - -func (x *GetTabletsRequest) GetTabletType() topodata.TabletType { - if x != nil { - return x.TabletType - } - return topodata.TabletType(0) -} - -type GetTabletsResponse struct { +type GetShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablets []*topodata.Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` + Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *GetTabletsResponse) Reset() { - *x = GetTabletsResponse{} +func (x *GetShardResponse) Reset() { + *x = GetShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTabletsResponse) String() string { +func (x *GetShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletsResponse) ProtoMessage() {} +func (*GetShardResponse) ProtoMessage() {} -func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[87] +func (x *GetShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5195,43 +5520,41 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. -func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{87} +// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. +func (*GetShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{80} } -func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { +func (x *GetShardResponse) GetShard() *Shard { if x != nil { - return x.Tablets + return x.Shard } return nil } -type GetTopologyPathRequest struct { +type GetShardRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (x *GetTopologyPathRequest) Reset() { - *x = GetTopologyPathRequest{} +func (x *GetShardRoutingRulesRequest) Reset() { + *x = GetShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTopologyPathRequest) String() string { +func (x *GetShardRoutingRulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTopologyPathRequest) ProtoMessage() {} +func (*GetShardRoutingRulesRequest) ProtoMessage() {} -func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[88] +func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5242,43 +5565,36 @@ func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. -func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{88} -} - -func (x *GetTopologyPathRequest) GetPath() string { - if x != nil { - return x.Path - } - return "" +// Deprecated: Use GetShardRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*GetShardRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{81} } -type GetTopologyPathResponse struct { +type GetShardRoutingRulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` + ShardRoutingRules *vschema.ShardRoutingRules `protobuf:"bytes,1,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` } -func (x *GetTopologyPathResponse) Reset() { - *x = GetTopologyPathResponse{} +func (x *GetShardRoutingRulesResponse) Reset() { + *x = GetShardRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetTopologyPathResponse) String() string { +func (x *GetShardRoutingRulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTopologyPathResponse) ProtoMessage() {} +func (*GetShardRoutingRulesResponse) ProtoMessage() {} -func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[89] +func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5289,48 +5605,43 @@ func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead. -func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{89} +// Deprecated: Use GetShardRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*GetShardRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{82} } -func (x *GetTopologyPathResponse) GetCell() *TopologyCell { +func (x *GetShardRoutingRulesResponse) GetShardRoutingRules() *vschema.ShardRoutingRules { if x != nil { - return x.Cell + return x.ShardRoutingRules } return nil } -type TopologyCell struct { +type GetSrvKeyspaceNamesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - // Data is the file contents of the cell located at path. - // It is only populated if the cell is a terminal node. - Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"` + Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *TopologyCell) Reset() { - *x = TopologyCell{} +func (x *GetSrvKeyspaceNamesRequest) Reset() { + *x = GetSrvKeyspaceNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[90] + mi := &file_vtctldata_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *TopologyCell) String() string { +func (x *GetSrvKeyspaceNamesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TopologyCell) ProtoMessage() {} +func (*GetSrvKeyspaceNamesRequest) ProtoMessage() {} -func (x *TopologyCell) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[90] +func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5341,64 +5652,44 @@ func (x *TopologyCell) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead. -func (*TopologyCell) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{90} -} - -func (x *TopologyCell) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *TopologyCell) GetPath() string { - if x != nil { - return x.Path - } - return "" -} - -func (x *TopologyCell) GetData() string { - if x != nil { - return x.Data - } - return "" +// Deprecated: Use GetSrvKeyspaceNamesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{83} } -func (x *TopologyCell) GetChildren() []string { +func (x *GetSrvKeyspaceNamesRequest) GetCells() []string { if x != nil { - return x.Children + return x.Cells } return nil } -type GetVSchemaRequest struct { +type GetSrvKeyspaceNamesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Names is a mapping of cell name to a list of SrvKeyspace names. + Names map[string]*GetSrvKeyspaceNamesResponse_NameList `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetVSchemaRequest) Reset() { - *x = GetVSchemaRequest{} +func (x *GetSrvKeyspaceNamesResponse) Reset() { + *x = GetSrvKeyspaceNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[91] + mi := &file_vtctldata_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVSchemaRequest) String() string { +func (x *GetSrvKeyspaceNamesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemaRequest) ProtoMessage() {} +func (*GetSrvKeyspaceNamesResponse) ProtoMessage() {} -func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[91] +func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5409,43 +5700,46 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{91} +// Deprecated: Use GetSrvKeyspaceNamesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{84} } -func (x *GetVSchemaRequest) GetKeyspace() string { +func (x *GetSrvKeyspaceNamesResponse) GetNames() map[string]*GetSrvKeyspaceNamesResponse_NameList { if x != nil { - return x.Keyspace + return x.Names } - return "" + return nil } -type GetVersionRequest struct { +type GetSrvKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetVersionRequest) Reset() { - *x = GetVersionRequest{} +func (x *GetSrvKeyspacesRequest) Reset() { + *x = GetSrvKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[92] + mi := &file_vtctldata_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVersionRequest) String() string { +func (x *GetSrvKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVersionRequest) ProtoMessage() {} +func (*GetSrvKeyspacesRequest) ProtoMessage() {} -func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[92] +func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5456,43 +5750,51 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. -func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{92} +// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{85} } -func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSrvKeyspacesRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace + } + return "" +} + +func (x *GetSrvKeyspacesRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type GetVersionResponse struct { +type GetSrvKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // SrvKeyspaces is a mapping of cell name to SrvKeyspace. + SrvKeyspaces map[string]*topodata.SrvKeyspace `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetVersionResponse) Reset() { - *x = GetVersionResponse{} +func (x *GetSrvKeyspacesResponse) Reset() { + *x = GetSrvKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[93] + mi := &file_vtctldata_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVersionResponse) String() string { +func (x *GetSrvKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVersionResponse) ProtoMessage() {} +func (*GetSrvKeyspacesResponse) ProtoMessage() {} -func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[93] +func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5503,43 +5805,59 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. -func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{93} +// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{86} } -func (x *GetVersionResponse) GetVersion() string { +func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { if x != nil { - return x.Version + return x.SrvKeyspaces } - return "" + return nil } -type GetVSchemaResponse struct { +type UpdateThrottlerConfigRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Enable instructs to enable the throttler + Enable bool `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"` + // Disable instructs to disable the throttler + Disable bool `protobuf:"varint,3,opt,name=disable,proto3" json:"disable,omitempty"` + // Threshold for throttler (with no custom query, ie using default query, only positive values are considered) + Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"` + // CustomQuery replaces the default replication lag query + CustomQuery string `protobuf:"bytes,5,opt,name=custom_query,json=customQuery,proto3" json:"custom_query,omitempty"` + // CustomQuerySet indicates that the value of CustomQuery has changed + CustomQuerySet bool `protobuf:"varint,6,opt,name=custom_query_set,json=customQuerySet,proto3" json:"custom_query_set,omitempty"` + // CheckAsCheckSelf instructs the throttler to respond to /check requests by checking the tablet's own health + CheckAsCheckSelf bool `protobuf:"varint,7,opt,name=check_as_check_self,json=checkAsCheckSelf,proto3" json:"check_as_check_self,omitempty"` + // CheckAsCheckShard instructs the throttler to respond to /check requests by checking the shard's health (this is the default behavior) + CheckAsCheckShard bool `protobuf:"varint,8,opt,name=check_as_check_shard,json=checkAsCheckShard,proto3" json:"check_as_check_shard,omitempty"` + // ThrottledApp indicates a single throttled app rule (ignored if name is empty) + ThrottledApp *topodata.ThrottledAppRule `protobuf:"bytes,9,opt,name=throttled_app,json=throttledApp,proto3" json:"throttled_app,omitempty"` } -func (x *GetVSchemaResponse) Reset() { - *x = GetVSchemaResponse{} +func (x *UpdateThrottlerConfigRequest) Reset() { + *x = UpdateThrottlerConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[94] + mi := &file_vtctldata_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetVSchemaResponse) String() string { +func (x *UpdateThrottlerConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemaResponse) ProtoMessage() {} +func (*UpdateThrottlerConfigRequest) ProtoMessage() {} -func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[94] +func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5550,98 +5868,97 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. -func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{94} +// Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{87} } -func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { +func (x *UpdateThrottlerConfigRequest) GetKeyspace() string { if x != nil { - return x.VSchema + return x.Keyspace } - return nil + return "" } -type GetWorkflowsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` +func (x *UpdateThrottlerConfigRequest) GetEnable() bool { + if x != nil { + return x.Enable + } + return false } -func (x *GetWorkflowsRequest) Reset() { - *x = GetWorkflowsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[95] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *UpdateThrottlerConfigRequest) GetDisable() bool { + if x != nil { + return x.Disable } + return false } -func (x *GetWorkflowsRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *UpdateThrottlerConfigRequest) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 } -func (*GetWorkflowsRequest) ProtoMessage() {} - -func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[95] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *UpdateThrottlerConfigRequest) GetCustomQuery() string { + if x != nil { + return x.CustomQuery } - return mi.MessageOf(x) + return "" } -// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{95} +func (x *UpdateThrottlerConfigRequest) GetCustomQuerySet() bool { + if x != nil { + return x.CustomQuerySet + } + return false } -func (x *GetWorkflowsRequest) GetKeyspace() string { +func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckSelf() bool { if x != nil { - return x.Keyspace + return x.CheckAsCheckSelf } - return "" + return false } -func (x *GetWorkflowsRequest) GetActiveOnly() bool { +func (x *UpdateThrottlerConfigRequest) GetCheckAsCheckShard() bool { if x != nil { - return x.ActiveOnly + return x.CheckAsCheckShard } return false } -type GetWorkflowsResponse struct { +func (x *UpdateThrottlerConfigRequest) GetThrottledApp() *topodata.ThrottledAppRule { + if x != nil { + return x.ThrottledApp + } + return nil +} + +type UpdateThrottlerConfigResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` } -func (x *GetWorkflowsResponse) Reset() { - *x = GetWorkflowsResponse{} +func (x *UpdateThrottlerConfigResponse) Reset() { + *x = UpdateThrottlerConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[96] + mi := &file_vtctldata_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetWorkflowsResponse) String() string { +func (x *UpdateThrottlerConfigResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowsResponse) ProtoMessage() {} +func (*UpdateThrottlerConfigResponse) ProtoMessage() {} -func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[96] +func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5652,47 +5969,36 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{96} -} - -func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { - if x != nil { - return x.Workflows - } - return nil +// Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{88} } -type InitShardPrimaryRequest struct { +type GetSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - PrimaryElectTabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary_elect_tablet_alias,json=primaryElectTabletAlias,proto3" json:"primary_elect_tablet_alias,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + Cell string `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *InitShardPrimaryRequest) Reset() { - *x = InitShardPrimaryRequest{} +func (x *GetSrvVSchemaRequest) Reset() { + *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[97] + mi := &file_vtctldata_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *InitShardPrimaryRequest) String() string { +func (x *GetSrvVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InitShardPrimaryRequest) ProtoMessage() {} +func (*GetSrvVSchemaRequest) ProtoMessage() {} -func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[97] +func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5703,71 +6009,90 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. -func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{97} +// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{89} } -func (x *InitShardPrimaryRequest) GetKeyspace() string { +func (x *GetSrvVSchemaRequest) GetCell() string { if x != nil { - return x.Keyspace + return x.Cell } return "" } -func (x *InitShardPrimaryRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" +type GetSrvVSchemaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrvVSchema *vschema.SrvVSchema `protobuf:"bytes,1,opt,name=srv_v_schema,json=srvVSchema,proto3" json:"srv_v_schema,omitempty"` } -func (x *InitShardPrimaryRequest) GetPrimaryElectTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.PrimaryElectTabletAlias +func (x *GetSrvVSchemaResponse) Reset() { + *x = GetSrvVSchemaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *InitShardPrimaryRequest) GetForce() bool { - if x != nil { - return x.Force +func (x *GetSrvVSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvVSchemaResponse) ProtoMessage() {} + +func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[90] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *InitShardPrimaryRequest) GetWaitReplicasTimeout() *vttime.Duration { +// Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{90} +} + +func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { if x != nil { - return x.WaitReplicasTimeout + return x.SrvVSchema } return nil } -type InitShardPrimaryResponse struct { +type GetSrvVSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *InitShardPrimaryResponse) Reset() { - *x = InitShardPrimaryResponse{} +func (x *GetSrvVSchemasRequest) Reset() { + *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[98] + mi := &file_vtctldata_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *InitShardPrimaryResponse) String() string { +func (x *GetSrvVSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InitShardPrimaryResponse) ProtoMessage() {} +func (*GetSrvVSchemasRequest) ProtoMessage() {} -func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[98] +func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5778,43 +6103,44 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. -func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{98} +// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{91} } -func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { +func (x *GetSrvVSchemasRequest) GetCells() []string { if x != nil { - return x.Events + return x.Cells } return nil } -type PingTabletRequest struct { +type GetSrvVSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // SrvVSchemas is a mapping of cell name to SrvVSchema + SrvVSchemas map[string]*vschema.SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *PingTabletRequest) Reset() { - *x = PingTabletRequest{} +func (x *GetSrvVSchemasResponse) Reset() { + *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[99] + mi := &file_vtctldata_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PingTabletRequest) String() string { +func (x *GetSrvVSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletRequest) ProtoMessage() {} +func (*GetSrvVSchemasResponse) ProtoMessage() {} -func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[99] +func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5825,41 +6151,43 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. -func (*PingTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{99} +// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{92} } -func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { if x != nil { - return x.TabletAlias + return x.SrvVSchemas } return nil } -type PingTabletResponse struct { +type GetTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *PingTabletResponse) Reset() { - *x = PingTabletResponse{} +func (x *GetTabletRequest) Reset() { + *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[100] + mi := &file_vtctldata_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PingTabletResponse) String() string { +func (x *GetTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletResponse) ProtoMessage() {} +func (*GetTabletRequest) ProtoMessage() {} -func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[100] +func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5870,57 +6198,43 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. -func (*PingTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{100} +// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. +func (*GetTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{93} } -type PlannedReparentShardRequest struct { +func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type GetTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace to perform the Planned Reparent in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard to perform teh Planned Reparent in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // NewPrimary is the alias of the tablet to promote to shard primary. If not - // specified, the vtctld will select the most up-to-date candidate to promote. - // - // It is an error to set NewPrimary and AvoidPrimary to the same alias. - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - // AvoidPrimary is the alias of the tablet to demote. In other words, - // specifying an AvoidPrimary alias tells the vtctld to promote any replica - // other than this one. A shard whose current primary is not this one is then - // a no-op. - // - // It is an error to set NewPrimary and AvoidPrimary to the same alias. - AvoidPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=avoid_primary,json=avoidPrimary,proto3" json:"avoid_primary,omitempty"` - // WaitReplicasTimeout is the duration of time to wait for replicas to catch - // up in replication both before and after the reparent. The timeout is not - // cumulative across both wait periods, meaning that the replicas have - // WaitReplicasTimeout time to catch up before the reparent, and an additional - // WaitReplicasTimeout time to catch up after the reparent. - WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + Tablet *topodata.Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` } -func (x *PlannedReparentShardRequest) Reset() { - *x = PlannedReparentShardRequest{} +func (x *GetTabletResponse) Reset() { + *x = GetTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[101] + mi := &file_vtctldata_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PlannedReparentShardRequest) String() string { +func (x *GetTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedReparentShardRequest) ProtoMessage() {} +func (*GetTabletResponse) ProtoMessage() {} -func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[101] +func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5931,80 +6245,64 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. -func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{101} -} - -func (x *PlannedReparentShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. +func (*GetTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{94} } -func (x *PlannedReparentShardRequest) GetShard() string { +func (x *GetTabletResponse) GetTablet() *topodata.Tablet { if x != nil { - return x.Shard + return x.Tablet } - return "" + return nil } -func (x *PlannedReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } - return nil -} - -func (x *PlannedReparentShardRequest) GetAvoidPrimary() *topodata.TabletAlias { - if x != nil { - return x.AvoidPrimary - } - return nil -} - -func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { - if x != nil { - return x.WaitReplicasTimeout - } - return nil -} - -type PlannedReparentShardResponse struct { +type GetTabletsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace the Planned Reparent took place in. + // Keyspace is the name of the keyspace to return tablets for. Omit to return + // tablets from all keyspaces. Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the Planned Reparent took place in. + // Shard is the name of the shard to return tablets for. This field is ignored + // if Keyspace is not set. Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the alias of the tablet that was promoted to shard - // primary. If NewPrimary was set in the request, then this will be the same - // alias. Otherwise, it will be the alias of the tablet found to be most - // up-to-date. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` + // Cells is an optional set of cells to return tablets for. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + // Strict specifies how the server should treat failures from individual + // cells. + // + // When false (the default), GetTablets will return data from any cells that + // return successfully, but will fail the request if all cells fail. When + // true, any individual cell can fail the full request. + Strict bool `protobuf:"varint,4,opt,name=strict,proto3" json:"strict,omitempty"` + // TabletAliases is an optional list of tablet aliases to fetch Tablet objects + // for. If specified, Keyspace, Shard, and Cells are ignored, and tablets are + // looked up by their respective aliases' Cells directly. + TabletAliases []*topodata.TabletAlias `protobuf:"bytes,5,rep,name=tablet_aliases,json=tabletAliases,proto3" json:"tablet_aliases,omitempty"` + // tablet_type specifies the type of tablets to return. Omit to return all + // tablet types. + TabletType topodata.TabletType `protobuf:"varint,6,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` } -func (x *PlannedReparentShardResponse) Reset() { - *x = PlannedReparentShardResponse{} +func (x *GetTabletsRequest) Reset() { + *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[102] + mi := &file_vtctldata_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PlannedReparentShardResponse) String() string { +func (x *GetTabletsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedReparentShardResponse) ProtoMessage() {} +func (*GetTabletsRequest) ProtoMessage() {} -func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[102] +func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6015,68 +6313,78 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. -func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{102} +// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. +func (*GetTabletsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{95} } -func (x *PlannedReparentShardResponse) GetKeyspace() string { +func (x *GetTabletsRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *PlannedReparentShardResponse) GetShard() string { +func (x *GetTabletsRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *PlannedReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { +func (x *GetTabletsRequest) GetCells() []string { if x != nil { - return x.PromotedPrimary + return x.Cells } return nil } -func (x *PlannedReparentShardResponse) GetEvents() []*logutil.Event { +func (x *GetTabletsRequest) GetStrict() bool { if x != nil { - return x.Events + return x.Strict + } + return false +} + +func (x *GetTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { + if x != nil { + return x.TabletAliases } return nil } -type RebuildKeyspaceGraphRequest struct { +func (x *GetTabletsRequest) GetTabletType() topodata.TabletType { + if x != nil { + return x.TabletType + } + return topodata.TabletType(0) +} + +type GetTabletsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` - // AllowPartial, when set, allows a SNAPSHOT keyspace to serve with an - // incomplete set of shards. It is ignored for all other keyspace types. - AllowPartial bool `protobuf:"varint,3,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` + Tablets []*topodata.Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` } -func (x *RebuildKeyspaceGraphRequest) Reset() { - *x = RebuildKeyspaceGraphRequest{} +func (x *GetTabletsResponse) Reset() { + *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[103] + mi := &file_vtctldata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildKeyspaceGraphRequest) String() string { +func (x *GetTabletsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} +func (*GetTabletsResponse) ProtoMessage() {} -func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[103] +func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6087,55 +6395,43 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{103} -} - -func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. +func (*GetTabletsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{96} } -func (x *RebuildKeyspaceGraphRequest) GetCells() []string { +func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { if x != nil { - return x.Cells + return x.Tablets } return nil } -func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { - if x != nil { - return x.AllowPartial - } - return false -} - -type RebuildKeyspaceGraphResponse struct { +type GetTopologyPathRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` } -func (x *RebuildKeyspaceGraphResponse) Reset() { - *x = RebuildKeyspaceGraphResponse{} +func (x *GetTopologyPathRequest) Reset() { + *x = GetTopologyPathRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[104] + mi := &file_vtctldata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildKeyspaceGraphResponse) String() string { +func (x *GetTopologyPathRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} +func (*GetTopologyPathRequest) ProtoMessage() {} -func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[104] +func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6146,38 +6442,43 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{104} +// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. +func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{97} } -type RebuildVSchemaGraphRequest struct { +func (x *GetTopologyPathRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +type GetTopologyPathResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Cells specifies the cells to rebuild the SrvVSchema objects for. If empty, - // RebuildVSchemaGraph rebuilds the SrvVSchema for every cell in the topo. - Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` + Cell *TopologyCell `protobuf:"bytes,1,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *RebuildVSchemaGraphRequest) Reset() { - *x = RebuildVSchemaGraphRequest{} +func (x *GetTopologyPathResponse) Reset() { + *x = GetTopologyPathResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[105] + mi := &file_vtctldata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildVSchemaGraphRequest) String() string { +func (x *GetTopologyPathResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildVSchemaGraphRequest) ProtoMessage() {} +func (*GetTopologyPathResponse) ProtoMessage() {} -func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[105] +func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6188,41 +6489,48 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. -func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{105} +// Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead. +func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{98} } -func (x *RebuildVSchemaGraphRequest) GetCells() []string { +func (x *GetTopologyPathResponse) GetCell() *TopologyCell { if x != nil { - return x.Cells + return x.Cell } return nil } -type RebuildVSchemaGraphResponse struct { +type TopologyCell struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + // Data is the file contents of the cell located at path. + // It is only populated if the cell is a terminal node. + Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Children []string `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"` } -func (x *RebuildVSchemaGraphResponse) Reset() { - *x = RebuildVSchemaGraphResponse{} +func (x *TopologyCell) Reset() { + *x = TopologyCell{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[106] + mi := &file_vtctldata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RebuildVSchemaGraphResponse) String() string { +func (x *TopologyCell) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildVSchemaGraphResponse) ProtoMessage() {} +func (*TopologyCell) ProtoMessage() {} -func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[106] +func (x *TopologyCell) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6233,81 +6541,64 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. -func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{106} +// Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead. +func (*TopologyCell) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{99} } -type RefreshStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +func (x *TopologyCell) GetName() string { + if x != nil { + return x.Name + } + return "" } -func (x *RefreshStateRequest) Reset() { - *x = RefreshStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[107] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *TopologyCell) GetPath() string { + if x != nil { + return x.Path } + return "" } -func (x *RefreshStateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RefreshStateRequest) ProtoMessage() {} - -func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[107] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *TopologyCell) GetData() string { + if x != nil { + return x.Data } - return mi.MessageOf(x) -} - -// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. -func (*RefreshStateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{107} + return "" } -func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *TopologyCell) GetChildren() []string { if x != nil { - return x.TabletAlias + return x.Children } return nil } -type RefreshStateResponse struct { +type GetVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *RefreshStateResponse) Reset() { - *x = RefreshStateResponse{} +func (x *GetVSchemaRequest) Reset() { + *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[108] + mi := &file_vtctldata_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateResponse) String() string { +func (x *GetVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateResponse) ProtoMessage() {} +func (*GetVSchemaRequest) ProtoMessage() {} -func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[108] +func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[100] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6318,38 +6609,43 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. -func (*RefreshStateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{108} +// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{100} } -type RefreshStateByShardRequest struct { +func (x *GetVSchemaRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type GetVersionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *RefreshStateByShardRequest) Reset() { - *x = RefreshStateByShardRequest{} +func (x *GetVersionRequest) Reset() { + *x = GetVersionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[109] + mi := &file_vtctldata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateByShardRequest) String() string { +func (x *GetVersionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateByShardRequest) ProtoMessage() {} +func (*GetVersionRequest) ProtoMessage() {} -func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[109] +func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6360,59 +6656,43 @@ func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead. -func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{109} -} - -func (x *RefreshStateByShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *RefreshStateByShardRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" +// Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. +func (*GetVersionRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{101} } -func (x *RefreshStateByShardRequest) GetCells() []string { +func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Cells + return x.TabletAlias } return nil } -type RefreshStateByShardResponse struct { +type GetVersionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - IsPartialRefresh bool `protobuf:"varint,1,opt,name=is_partial_refresh,json=isPartialRefresh,proto3" json:"is_partial_refresh,omitempty"` - // This explains why we had a partial refresh (if we did) - PartialRefreshDetails string `protobuf:"bytes,2,opt,name=partial_refresh_details,json=partialRefreshDetails,proto3" json:"partial_refresh_details,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` } -func (x *RefreshStateByShardResponse) Reset() { - *x = RefreshStateByShardResponse{} +func (x *GetVersionResponse) Reset() { + *x = GetVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[110] + mi := &file_vtctldata_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RefreshStateByShardResponse) String() string { +func (x *GetVersionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateByShardResponse) ProtoMessage() {} +func (*GetVersionResponse) ProtoMessage() {} -func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[110] +func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[102] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6423,50 +6703,43 @@ func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead. -func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{110} -} - -func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool { - if x != nil { - return x.IsPartialRefresh - } - return false +// Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. +func (*GetVersionResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{102} } -func (x *RefreshStateByShardResponse) GetPartialRefreshDetails() string { +func (x *GetVersionResponse) GetVersion() string { if x != nil { - return x.PartialRefreshDetails + return x.Version } return "" } -type ReloadSchemaRequest struct { +type GetVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` } -func (x *ReloadSchemaRequest) Reset() { - *x = ReloadSchemaRequest{} +func (x *GetVSchemaResponse) Reset() { + *x = GetVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[111] + mi := &file_vtctldata_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaRequest) String() string { +func (x *GetVSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaRequest) ProtoMessage() {} +func (*GetVSchemaResponse) ProtoMessage() {} -func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[111] +func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[103] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6477,41 +6750,47 @@ func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{111} +// Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. +func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{103} } -func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { if x != nil { - return x.TabletAlias + return x.VSchema } return nil } -type ReloadSchemaResponse struct { +type GetWorkflowsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + NameOnly bool `protobuf:"varint,3,opt,name=name_only,json=nameOnly,proto3" json:"name_only,omitempty"` + // If you only want a specific workflow then set this field. + Workflow string `protobuf:"bytes,4,opt,name=workflow,proto3" json:"workflow,omitempty"` } -func (x *ReloadSchemaResponse) Reset() { - *x = ReloadSchemaResponse{} +func (x *GetWorkflowsRequest) Reset() { + *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[112] + mi := &file_vtctldata_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaResponse) String() string { +func (x *GetWorkflowsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaResponse) ProtoMessage() {} +func (*GetWorkflowsRequest) ProtoMessage() {} -func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[112] +func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[104] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6522,42 +6801,64 @@ func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{112} +// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{104} } -type ReloadSchemaKeyspaceRequest struct { +func (x *GetWorkflowsRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *GetWorkflowsRequest) GetActiveOnly() bool { + if x != nil { + return x.ActiveOnly + } + return false +} + +func (x *GetWorkflowsRequest) GetNameOnly() bool { + if x != nil { + return x.NameOnly + } + return false +} + +func (x *GetWorkflowsRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +type GetWorkflowsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - WaitPosition string `protobuf:"bytes,2,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - IncludePrimary bool `protobuf:"varint,3,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` - // Concurrency is the global concurrency across all shards in the keyspace - // (so, at most this many tablets will be reloaded across the keyspace at any - // given point). - Concurrency uint32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Workflows []*Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` } -func (x *ReloadSchemaKeyspaceRequest) Reset() { - *x = ReloadSchemaKeyspaceRequest{} +func (x *GetWorkflowsResponse) Reset() { + *x = GetWorkflowsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[113] + mi := &file_vtctldata_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaKeyspaceRequest) String() string { +func (x *GetWorkflowsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {} +func (*GetWorkflowsResponse) ProtoMessage() {} -func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[113] +func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[105] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6568,116 +6869,47 @@ func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{113} +// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{105} } -func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string { +func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { if x != nil { - return x.Keyspace + return x.Workflows } - return "" + return nil } -func (x *ReloadSchemaKeyspaceRequest) GetWaitPosition() string { - if x != nil { - return x.WaitPosition - } - return "" -} - -func (x *ReloadSchemaKeyspaceRequest) GetIncludePrimary() bool { - if x != nil { - return x.IncludePrimary - } - return false -} - -func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() uint32 { - if x != nil { - return x.Concurrency - } - return 0 -} - -type ReloadSchemaKeyspaceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` -} - -func (x *ReloadSchemaKeyspaceResponse) Reset() { - *x = ReloadSchemaKeyspaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[114] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ReloadSchemaKeyspaceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {} - -func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[114] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{114} -} - -func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event { - if x != nil { - return x.Events - } - return nil -} - -type ReloadSchemaShardRequest struct { +type InitShardPrimaryRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - WaitPosition string `protobuf:"bytes,3,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - IncludePrimary bool `protobuf:"varint,4,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` - // Concurrency is the maximum number of tablets to reload at one time. - Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + PrimaryElectTabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary_elect_tablet_alias,json=primaryElectTabletAlias,proto3" json:"primary_elect_tablet_alias,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` } -func (x *ReloadSchemaShardRequest) Reset() { - *x = ReloadSchemaShardRequest{} +func (x *InitShardPrimaryRequest) Reset() { + *x = InitShardPrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[115] + mi := &file_vtctldata_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaShardRequest) String() string { +func (x *InitShardPrimaryRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardRequest) ProtoMessage() {} +func (*InitShardPrimaryRequest) ProtoMessage() {} -func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[115] +func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[106] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6688,71 +6920,71 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{115} +// Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. +func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{106} } -func (x *ReloadSchemaShardRequest) GetKeyspace() string { +func (x *InitShardPrimaryRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ReloadSchemaShardRequest) GetShard() string { +func (x *InitShardPrimaryRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *ReloadSchemaShardRequest) GetWaitPosition() string { +func (x *InitShardPrimaryRequest) GetPrimaryElectTabletAlias() *topodata.TabletAlias { if x != nil { - return x.WaitPosition + return x.PrimaryElectTabletAlias } - return "" + return nil } -func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { +func (x *InitShardPrimaryRequest) GetForce() bool { if x != nil { - return x.IncludePrimary + return x.Force } return false } -func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { +func (x *InitShardPrimaryRequest) GetWaitReplicasTimeout() *vttime.Duration { if x != nil { - return x.Concurrency + return x.WaitReplicasTimeout } - return 0 + return nil } -type ReloadSchemaShardResponse struct { +type InitShardPrimaryResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } -func (x *ReloadSchemaShardResponse) Reset() { - *x = ReloadSchemaShardResponse{} +func (x *InitShardPrimaryResponse) Reset() { + *x = InitShardPrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[116] + mi := &file_vtctldata_proto_msgTypes[107] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemaShardResponse) String() string { +func (x *InitShardPrimaryResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardResponse) ProtoMessage() {} +func (*InitShardPrimaryResponse) ProtoMessage() {} -func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[116] +func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[107] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6763,45 +6995,44 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{116} +// Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. +func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{107} } -func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { +func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { if x != nil { return x.Events } return nil } -type RemoveBackupRequest struct { +type LaunchSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *RemoveBackupRequest) Reset() { - *x = RemoveBackupRequest{} +func (x *LaunchSchemaMigrationRequest) Reset() { + *x = LaunchSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[117] + mi := &file_vtctldata_proto_msgTypes[108] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveBackupRequest) String() string { +func (x *LaunchSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveBackupRequest) ProtoMessage() {} +func (*LaunchSchemaMigrationRequest) ProtoMessage() {} -func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[117] +func (x *LaunchSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[108] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6812,55 +7043,50 @@ func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead. -func (*RemoveBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{117} +// Deprecated: Use LaunchSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*LaunchSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{108} } -func (x *RemoveBackupRequest) GetKeyspace() string { +func (x *LaunchSchemaMigrationRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RemoveBackupRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *RemoveBackupRequest) GetName() string { +func (x *LaunchSchemaMigrationRequest) GetUuid() string { if x != nil { - return x.Name + return x.Uuid } return "" } -type RemoveBackupResponse struct { +type LaunchSchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *RemoveBackupResponse) Reset() { - *x = RemoveBackupResponse{} +func (x *LaunchSchemaMigrationResponse) Reset() { + *x = LaunchSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[118] + mi := &file_vtctldata_proto_msgTypes[109] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveBackupResponse) String() string { +func (x *LaunchSchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveBackupResponse) ProtoMessage() {} +func (*LaunchSchemaMigrationResponse) ProtoMessage() {} -func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[118] +func (x *LaunchSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[109] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6871,44 +7097,72 @@ func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead. -func (*RemoveBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{118} +// Deprecated: Use LaunchSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*LaunchSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{109} } -type RemoveKeyspaceCellRequest struct { +func (x *LaunchSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type MoveTablesCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` - // Force proceeds even if the cell's topology server cannot be reached. This - // should only be set if a cell has been shut down entirely, and the global - // topology data just needs to be updated. - Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` - // Recursive also deletes all tablets in that cell belonging to the specified - // keyspace. - Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` + // The necessary info gets passed on to each primary tablet involved + // in the workflow via the CreateVReplicationWorkflow tabletmanager RPC. + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + SourceKeyspace string `protobuf:"bytes,2,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,5,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,6,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + SourceShards []string `protobuf:"bytes,7,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"` + AllTables bool `protobuf:"varint,8,opt,name=all_tables,json=allTables,proto3" json:"all_tables,omitempty"` + IncludeTables []string `protobuf:"bytes,9,rep,name=include_tables,json=includeTables,proto3" json:"include_tables,omitempty"` + ExcludeTables []string `protobuf:"bytes,10,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + // The name of the external cluster mounted in topo server. + ExternalClusterName string `protobuf:"bytes,11,opt,name=external_cluster_name,json=externalClusterName,proto3" json:"external_cluster_name,omitempty"` + // SourceTimeZone is the time zone in which datetimes on the source were stored, provided as an option in MoveTables + SourceTimeZone string `protobuf:"bytes,12,opt,name=source_time_zone,json=sourceTimeZone,proto3" json:"source_time_zone,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl string `protobuf:"bytes,13,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,14,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // DropForeignKeys specifies if foreign key constraints should be elided on the target. + DropForeignKeys bool `protobuf:"varint,15,opt,name=drop_foreign_keys,json=dropForeignKeys,proto3" json:"drop_foreign_keys,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,16,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // Start the workflow after creating it. + AutoStart bool `protobuf:"varint,17,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` + // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. + NoRoutingRules bool `protobuf:"varint,18,opt,name=no_routing_rules,json=noRoutingRules,proto3" json:"no_routing_rules,omitempty"` + // Run a single copy phase for the entire database. + AtomicCopy bool `protobuf:"varint,19,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` } -func (x *RemoveKeyspaceCellRequest) Reset() { - *x = RemoveKeyspaceCellRequest{} +func (x *MoveTablesCreateRequest) Reset() { + *x = MoveTablesCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[119] + mi := &file_vtctldata_proto_msgTypes[110] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RemoveKeyspaceCellRequest) String() string { +func (x *MoveTablesCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveKeyspaceCellRequest) ProtoMessage() {} +func (*MoveTablesCreateRequest) ProtoMessage() {} -func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[119] +func (x *MoveTablesCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[110] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6919,226 +7173,170 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{119} +// Deprecated: Use MoveTablesCreateRequest.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{110} } -func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { +func (x *MoveTablesCreateRequest) GetWorkflow() string { if x != nil { - return x.Keyspace + return x.Workflow } return "" } -func (x *RemoveKeyspaceCellRequest) GetCell() string { +func (x *MoveTablesCreateRequest) GetSourceKeyspace() string { if x != nil { - return x.Cell + return x.SourceKeyspace } return "" } -func (x *RemoveKeyspaceCellRequest) GetForce() bool { +func (x *MoveTablesCreateRequest) GetTargetKeyspace() string { if x != nil { - return x.Force + return x.TargetKeyspace } - return false + return "" } -func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { +func (x *MoveTablesCreateRequest) GetCells() []string { if x != nil { - return x.Recursive + return x.Cells } - return false + return nil } -type RemoveKeyspaceCellResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *MoveTablesCreateRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil } -func (x *RemoveKeyspaceCellResponse) Reset() { - *x = RemoveKeyspaceCellResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[120] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MoveTablesCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference } + return tabletmanagerdata.TabletSelectionPreference(0) } -func (x *RemoveKeyspaceCellResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *MoveTablesCreateRequest) GetSourceShards() []string { + if x != nil { + return x.SourceShards + } + return nil } -func (*RemoveKeyspaceCellResponse) ProtoMessage() {} - -func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[120] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MoveTablesCreateRequest) GetAllTables() bool { + if x != nil { + return x.AllTables } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{120} -} - -type RemoveShardCellRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` - Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` - // Force proceeds even if the cell's topology server cannot be reached. This - // should only be set if a cell has been shut down entirely, and the global - // topology data just needs to be updated. - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - // Recursive also deletes all tablets in that cell belonging to the specified - // keyspace and shard. - Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` + return false } -func (x *RemoveShardCellRequest) Reset() { - *x = RemoveShardCellRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[121] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MoveTablesCreateRequest) GetIncludeTables() []string { + if x != nil { + return x.IncludeTables } + return nil } -func (x *RemoveShardCellRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveShardCellRequest) ProtoMessage() {} - -func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[121] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MoveTablesCreateRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. -func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{121} + return nil } -func (x *RemoveShardCellRequest) GetKeyspace() string { +func (x *MoveTablesCreateRequest) GetExternalClusterName() string { if x != nil { - return x.Keyspace + return x.ExternalClusterName } return "" } -func (x *RemoveShardCellRequest) GetShardName() string { +func (x *MoveTablesCreateRequest) GetSourceTimeZone() string { if x != nil { - return x.ShardName + return x.SourceTimeZone } return "" } -func (x *RemoveShardCellRequest) GetCell() string { +func (x *MoveTablesCreateRequest) GetOnDdl() string { if x != nil { - return x.Cell + return x.OnDdl } return "" } -func (x *RemoveShardCellRequest) GetForce() bool { +func (x *MoveTablesCreateRequest) GetStopAfterCopy() bool { if x != nil { - return x.Force + return x.StopAfterCopy } return false } -func (x *RemoveShardCellRequest) GetRecursive() bool { +func (x *MoveTablesCreateRequest) GetDropForeignKeys() bool { if x != nil { - return x.Recursive + return x.DropForeignKeys } return false } -type RemoveShardCellResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RemoveShardCellResponse) Reset() { - *x = RemoveShardCellResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[122] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *MoveTablesCreateRequest) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys } + return false } -func (x *RemoveShardCellResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *MoveTablesCreateRequest) GetAutoStart() bool { + if x != nil { + return x.AutoStart + } + return false } -func (*RemoveShardCellResponse) ProtoMessage() {} - -func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[122] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *MoveTablesCreateRequest) GetNoRoutingRules() bool { + if x != nil { + return x.NoRoutingRules } - return mi.MessageOf(x) + return false } -// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. -func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{122} +func (x *MoveTablesCreateRequest) GetAtomicCopy() bool { + if x != nil { + return x.AtomicCopy + } + return false } -type ReparentTabletRequest struct { +type MoveTablesCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Tablet is the alias of the tablet that should be reparented under the - // current shard primary. - Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*MoveTablesCreateResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` } -func (x *ReparentTabletRequest) Reset() { - *x = ReparentTabletRequest{} +func (x *MoveTablesCreateResponse) Reset() { + *x = MoveTablesCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[123] + mi := &file_vtctldata_proto_msgTypes[111] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReparentTabletRequest) String() string { +func (x *MoveTablesCreateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReparentTabletRequest) ProtoMessage() {} +func (*MoveTablesCreateResponse) ProtoMessage() {} -func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[123] +func (x *MoveTablesCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[111] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7149,48 +7347,55 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. -func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{123} +// Deprecated: Use MoveTablesCreateResponse.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{111} } -func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { +func (x *MoveTablesCreateResponse) GetSummary() string { if x != nil { - return x.Tablet + return x.Summary + } + return "" +} + +func (x *MoveTablesCreateResponse) GetDetails() []*MoveTablesCreateResponse_TabletInfo { + if x != nil { + return x.Details } return nil } -type ReparentTabletResponse struct { +type MoveTablesCompleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the name of the keyspace the tablet was reparented in. - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Shard is the name of the shard the tablet was reparented in. - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - // Primary is the alias of the tablet that the tablet was reparented under. - Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + KeepData bool `protobuf:"varint,4,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,5,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + RenameTables bool `protobuf:"varint,6,opt,name=rename_tables,json=renameTables,proto3" json:"rename_tables,omitempty"` + DryRun bool `protobuf:"varint,7,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` } -func (x *ReparentTabletResponse) Reset() { - *x = ReparentTabletResponse{} +func (x *MoveTablesCompleteRequest) Reset() { + *x = MoveTablesCompleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[124] + mi := &file_vtctldata_proto_msgTypes[112] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReparentTabletResponse) String() string { +func (x *MoveTablesCompleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReparentTabletResponse) ProtoMessage() {} +func (*MoveTablesCompleteRequest) ProtoMessage() {} -func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[124] +func (x *MoveTablesCompleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[112] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7201,66 +7406,79 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. -func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{124} +// Deprecated: Use MoveTablesCompleteRequest.ProtoReflect.Descriptor instead. +func (*MoveTablesCompleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{112} } -func (x *ReparentTabletResponse) GetKeyspace() string { +func (x *MoveTablesCompleteRequest) GetWorkflow() string { if x != nil { - return x.Keyspace + return x.Workflow } return "" } -func (x *ReparentTabletResponse) GetShard() string { +func (x *MoveTablesCompleteRequest) GetTargetKeyspace() string { if x != nil { - return x.Shard + return x.TargetKeyspace } return "" } -func (x *ReparentTabletResponse) GetPrimary() *topodata.TabletAlias { +func (x *MoveTablesCompleteRequest) GetKeepData() bool { if x != nil { - return x.Primary + return x.KeepData } - return nil + return false } -type RestoreFromBackupRequest struct { +func (x *MoveTablesCompleteRequest) GetKeepRoutingRules() bool { + if x != nil { + return x.KeepRoutingRules + } + return false +} + +func (x *MoveTablesCompleteRequest) GetRenameTables() bool { + if x != nil { + return x.RenameTables + } + return false +} + +func (x *MoveTablesCompleteRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +type MoveTablesCompleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - // BackupTime, if set, will use the backup taken most closely at or before - // this time. If nil, the latest backup will be restored on the tablet. - BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"` - // RestoreToPos indicates a position for a point-in-time recovery. The recovery - // is expected to utilize one full backup, followed by zero or more incremental backups, - // that reach the precise desired position - RestoreToPos string `protobuf:"bytes,3,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` - // Dry run does not actually performs the restore, but validates the steps and availability of backups - DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + DryRunResults []string `protobuf:"bytes,2,rep,name=dry_run_results,json=dryRunResults,proto3" json:"dry_run_results,omitempty"` } -func (x *RestoreFromBackupRequest) Reset() { - *x = RestoreFromBackupRequest{} +func (x *MoveTablesCompleteResponse) Reset() { + *x = MoveTablesCompleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[125] + mi := &file_vtctldata_proto_msgTypes[113] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RestoreFromBackupRequest) String() string { +func (x *MoveTablesCompleteResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RestoreFromBackupRequest) ProtoMessage() {} +func (*MoveTablesCompleteResponse) ProtoMessage() {} -func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[125] +func (x *MoveTablesCompleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[113] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7271,68 +7489,50 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. -func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{125} -} - -func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil -} - -func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time { - if x != nil { - return x.BackupTime - } - return nil +// Deprecated: Use MoveTablesCompleteResponse.ProtoReflect.Descriptor instead. +func (*MoveTablesCompleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{113} } -func (x *RestoreFromBackupRequest) GetRestoreToPos() string { +func (x *MoveTablesCompleteResponse) GetSummary() string { if x != nil { - return x.RestoreToPos + return x.Summary } return "" } -func (x *RestoreFromBackupRequest) GetDryRun() bool { +func (x *MoveTablesCompleteResponse) GetDryRunResults() []string { if x != nil { - return x.DryRun + return x.DryRunResults } - return false + return nil } -type RestoreFromBackupResponse struct { +type PingTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // TabletAlias is the alias of the tablet doing the restore. TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *RestoreFromBackupResponse) Reset() { - *x = RestoreFromBackupResponse{} +func (x *PingTabletRequest) Reset() { + *x = PingTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[126] + mi := &file_vtctldata_proto_msgTypes[114] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RestoreFromBackupResponse) String() string { +func (x *PingTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RestoreFromBackupResponse) ProtoMessage() {} +func (*PingTabletRequest) ProtoMessage() {} -func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[126] +func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[114] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7343,64 +7543,41 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. -func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{126} +// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. +func (*PingTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{114} } -func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias { +func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { return x.TabletAlias } return nil } -func (x *RestoreFromBackupResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *RestoreFromBackupResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { - if x != nil { - return x.Event - } - return nil -} - -type RunHealthCheckRequest struct { +type PingTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *RunHealthCheckRequest) Reset() { - *x = RunHealthCheckRequest{} +func (x *PingTabletResponse) Reset() { + *x = PingTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[127] + mi := &file_vtctldata_proto_msgTypes[115] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RunHealthCheckRequest) String() string { +func (x *PingTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckRequest) ProtoMessage() {} +func (*PingTabletResponse) ProtoMessage() {} -func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[127] +func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[115] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7411,41 +7588,57 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. -func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{127} -} - -func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil +// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. +func (*PingTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{115} } -type RunHealthCheckResponse struct { +type PlannedReparentShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Keyspace is the name of the keyspace to perform the Planned Reparent in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard to perform teh Planned Reparent in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // NewPrimary is the alias of the tablet to promote to shard primary. If not + // specified, the vtctld will select the most up-to-date candidate to promote. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + // AvoidPrimary is the alias of the tablet to demote. In other words, + // specifying an AvoidPrimary alias tells the vtctld to promote any replica + // other than this one. A shard whose current primary is not this one is then + // a no-op. + // + // It is an error to set NewPrimary and AvoidPrimary to the same alias. + AvoidPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=avoid_primary,json=avoidPrimary,proto3" json:"avoid_primary,omitempty"` + // WaitReplicasTimeout is the duration of time to wait for replicas to catch + // up in replication both before and after the reparent. The timeout is not + // cumulative across both wait periods, meaning that the replicas have + // WaitReplicasTimeout time to catch up before the reparent, and an additional + // WaitReplicasTimeout time to catch up after the reparent. + WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` } -func (x *RunHealthCheckResponse) Reset() { - *x = RunHealthCheckResponse{} +func (x *PlannedReparentShardRequest) Reset() { + *x = PlannedReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[128] + mi := &file_vtctldata_proto_msgTypes[116] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *RunHealthCheckResponse) String() string { +func (x *PlannedReparentShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckResponse) ProtoMessage() {} +func (*PlannedReparentShardRequest) ProtoMessage() {} -func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[128] +func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[116] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7456,37 +7649,80 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. -func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{128} +// Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. +func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{116} } -type SetKeyspaceDurabilityPolicyRequest struct { +func (x *PlannedReparentShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *PlannedReparentShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *PlannedReparentShardRequest) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary + } + return nil +} + +func (x *PlannedReparentShardRequest) GetAvoidPrimary() *topodata.TabletAlias { + if x != nil { + return x.AvoidPrimary + } + return nil +} + +func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration { + if x != nil { + return x.WaitReplicasTimeout + } + return nil +} + +type PlannedReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - DurabilityPolicy string `protobuf:"bytes,2,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` + // Keyspace is the name of the keyspace the Planned Reparent took place in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the Planned Reparent took place in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the alias of the tablet that was promoted to shard + // primary. If NewPrimary was set in the request, then this will be the same + // alias. Otherwise, it will be the alias of the tablet found to be most + // up-to-date. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,4,rep,name=events,proto3" json:"events,omitempty"` } -func (x *SetKeyspaceDurabilityPolicyRequest) Reset() { - *x = SetKeyspaceDurabilityPolicyRequest{} +func (x *PlannedReparentShardResponse) Reset() { + *x = PlannedReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[129] + mi := &file_vtctldata_proto_msgTypes[117] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceDurabilityPolicyRequest) String() string { +func (x *PlannedReparentShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {} +func (*PlannedReparentShardResponse) ProtoMessage() {} -func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[129] +func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[117] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7497,51 +7733,68 @@ func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{129} +// Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. +func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{117} } -func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string { +func (x *PlannedReparentShardResponse) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *SetKeyspaceDurabilityPolicyRequest) GetDurabilityPolicy() string { +func (x *PlannedReparentShardResponse) GetShard() string { if x != nil { - return x.DurabilityPolicy + return x.Shard } return "" } -type SetKeyspaceDurabilityPolicyResponse struct { +func (x *PlannedReparentShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary + } + return nil +} + +func (x *PlannedReparentShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type RebuildKeyspaceGraphRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + // AllowPartial, when set, allows a SNAPSHOT keyspace to serve with an + // incomplete set of shards. It is ignored for all other keyspace types. + AllowPartial bool `protobuf:"varint,3,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` } -func (x *SetKeyspaceDurabilityPolicyResponse) Reset() { - *x = SetKeyspaceDurabilityPolicyResponse{} +func (x *RebuildKeyspaceGraphRequest) Reset() { + *x = RebuildKeyspaceGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[130] + mi := &file_vtctldata_proto_msgTypes[118] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceDurabilityPolicyResponse) String() string { +func (x *RebuildKeyspaceGraphRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {} +func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} -func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[130] +func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[118] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7552,47 +7805,55 @@ func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{130} +// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{118} } -func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { +func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { if x != nil { return x.Keyspace } + return "" +} + +func (x *RebuildKeyspaceGraphRequest) GetCells() []string { + if x != nil { + return x.Cells + } return nil } -type SetKeyspaceServedFromRequest struct { +func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { + if x != nil { + return x.AllowPartial + } + return false +} + +type RebuildKeyspaceGraphResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - Remove bool `protobuf:"varint,4,opt,name=remove,proto3" json:"remove,omitempty"` - SourceKeyspace string `protobuf:"bytes,5,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` } -func (x *SetKeyspaceServedFromRequest) Reset() { - *x = SetKeyspaceServedFromRequest{} +func (x *RebuildKeyspaceGraphResponse) Reset() { + *x = RebuildKeyspaceGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[131] + mi := &file_vtctldata_proto_msgTypes[119] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceServedFromRequest) String() string { +func (x *RebuildKeyspaceGraphResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceServedFromRequest) ProtoMessage() {} +func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} -func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[131] +func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[119] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7603,72 +7864,83 @@ func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{131} +// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{119} } -func (x *SetKeyspaceServedFromRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +type RebuildVSchemaGraphRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Cells specifies the cells to rebuild the SrvVSchema objects for. If empty, + // RebuildVSchemaGraph rebuilds the SrvVSchema for every cell in the topo. + Cells []string `protobuf:"bytes,1,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *SetKeyspaceServedFromRequest) GetTabletType() topodata.TabletType { - if x != nil { - return x.TabletType +func (x *RebuildVSchemaGraphRequest) Reset() { + *x = RebuildVSchemaGraphRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[120] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return topodata.TabletType(0) } -func (x *SetKeyspaceServedFromRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil +func (x *RebuildVSchemaGraphRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *SetKeyspaceServedFromRequest) GetRemove() bool { - if x != nil { - return x.Remove +func (*RebuildVSchemaGraphRequest) ProtoMessage() {} + +func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[120] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -func (x *SetKeyspaceServedFromRequest) GetSourceKeyspace() string { +// Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. +func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{120} +} + +func (x *RebuildVSchemaGraphRequest) GetCells() []string { if x != nil { - return x.SourceKeyspace + return x.Cells } - return "" + return nil } -type SetKeyspaceServedFromResponse struct { +type RebuildVSchemaGraphResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *SetKeyspaceServedFromResponse) Reset() { - *x = SetKeyspaceServedFromResponse{} +func (x *RebuildVSchemaGraphResponse) Reset() { + *x = RebuildVSchemaGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[132] + mi := &file_vtctldata_proto_msgTypes[121] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceServedFromResponse) String() string { +func (x *RebuildVSchemaGraphResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceServedFromResponse) ProtoMessage() {} +func (*RebuildVSchemaGraphResponse) ProtoMessage() {} -func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[132] +func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[121] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7679,44 +7951,36 @@ func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{132} -} - -func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace { - if x != nil { - return x.Keyspace - } - return nil +// Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. +func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{121} } -type SetKeyspaceShardingInfoRequest struct { +type RefreshStateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *SetKeyspaceShardingInfoRequest) Reset() { - *x = SetKeyspaceShardingInfoRequest{} +func (x *RefreshStateRequest) Reset() { + *x = RefreshStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[133] + mi := &file_vtctldata_proto_msgTypes[122] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceShardingInfoRequest) String() string { +func (x *RefreshStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} +func (*RefreshStateRequest) ProtoMessage() {} -func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[133] +func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[122] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7727,51 +7991,41 @@ func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{133} -} - -func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. +func (*RefreshStateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{122} } -func (x *SetKeyspaceShardingInfoRequest) GetForce() bool { +func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Force + return x.TabletAlias } - return false + return nil } -type SetKeyspaceShardingInfoResponse struct { +type RefreshStateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *SetKeyspaceShardingInfoResponse) Reset() { - *x = SetKeyspaceShardingInfoResponse{} +func (x *RefreshStateResponse) Reset() { + *x = RefreshStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[134] + mi := &file_vtctldata_proto_msgTypes[123] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceShardingInfoResponse) String() string { +func (x *RefreshStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {} +func (*RefreshStateResponse) ProtoMessage() {} -func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[134] +func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[123] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7782,45 +8036,38 @@ func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{134} -} - -func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace { - if x != nil { - return x.Keyspace - } - return nil +// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. +func (*RefreshStateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{123} } -type SetShardIsPrimaryServingRequest struct { +type RefreshStateByShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - IsServing bool `protobuf:"varint,3,opt,name=is_serving,json=isServing,proto3" json:"is_serving,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *SetShardIsPrimaryServingRequest) Reset() { - *x = SetShardIsPrimaryServingRequest{} +func (x *RefreshStateByShardRequest) Reset() { + *x = RefreshStateByShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[135] + mi := &file_vtctldata_proto_msgTypes[124] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardIsPrimaryServingRequest) String() string { +func (x *RefreshStateByShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardIsPrimaryServingRequest) ProtoMessage() {} +func (*RefreshStateByShardRequest) ProtoMessage() {} -func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[135] +func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[124] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7831,58 +8078,59 @@ func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead. -func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{135} +// Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead. +func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{124} } -func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string { +func (x *RefreshStateByShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *SetShardIsPrimaryServingRequest) GetShard() string { +func (x *RefreshStateByShardRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *SetShardIsPrimaryServingRequest) GetIsServing() bool { +func (x *RefreshStateByShardRequest) GetCells() []string { if x != nil { - return x.IsServing + return x.Cells } - return false + return nil } -type SetShardIsPrimaryServingResponse struct { +type RefreshStateByShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + IsPartialRefresh bool `protobuf:"varint,1,opt,name=is_partial_refresh,json=isPartialRefresh,proto3" json:"is_partial_refresh,omitempty"` + // This explains why we had a partial refresh (if we did) + PartialRefreshDetails string `protobuf:"bytes,2,opt,name=partial_refresh_details,json=partialRefreshDetails,proto3" json:"partial_refresh_details,omitempty"` } -func (x *SetShardIsPrimaryServingResponse) Reset() { - *x = SetShardIsPrimaryServingResponse{} +func (x *RefreshStateByShardResponse) Reset() { + *x = RefreshStateByShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[136] + mi := &file_vtctldata_proto_msgTypes[125] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardIsPrimaryServingResponse) String() string { +func (x *RefreshStateByShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardIsPrimaryServingResponse) ProtoMessage() {} +func (*RefreshStateByShardResponse) ProtoMessage() {} -func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[136] +func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[125] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7893,63 +8141,50 @@ func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead. -func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{136} +// Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead. +func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{125} } -func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard { +func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool { if x != nil { - return x.Shard + return x.IsPartialRefresh } - return nil + return false } -type SetShardTabletControlRequest struct { +func (x *RefreshStateByShardResponse) GetPartialRefreshDetails() string { + if x != nil { + return x.PartialRefreshDetails + } + return "" +} + +type ReloadSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` - // DeniedTables updates the list of denied tables the shard will serve for - // the given tablet type. This is useful to fix tables that are being blocked - // after a MoveTables operation. - // - // NOTE: Setting this field will cause DisableQueryService to be ignored. - DeniedTables []string `protobuf:"bytes,5,rep,name=denied_tables,json=deniedTables,proto3" json:"denied_tables,omitempty"` - // DisableQueryService instructs whether to enable the query service on - // tablets of the given type in the shard. This is useful to fix Reshard - // operations gone awry. - // - // NOTE: this is ignored if DeniedTables is not empty. - DisableQueryService bool `protobuf:"varint,6,opt,name=disable_query_service,json=disableQueryService,proto3" json:"disable_query_service,omitempty"` - // Remove removes the ShardTabletControl record entirely. If set, this takes - // precedence over DeniedTables and DisableQueryService fields, and is useful - // to manually remove serving restrictions after a completed MoveTables - // operation. - Remove bool `protobuf:"varint,7,opt,name=remove,proto3" json:"remove,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *SetShardTabletControlRequest) Reset() { - *x = SetShardTabletControlRequest{} +func (x *ReloadSchemaRequest) Reset() { + *x = ReloadSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[137] + mi := &file_vtctldata_proto_msgTypes[126] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardTabletControlRequest) String() string { +func (x *ReloadSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardTabletControlRequest) ProtoMessage() {} +func (*ReloadSchemaRequest) ProtoMessage() {} -func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[137] +func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[126] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7960,86 +8195,41 @@ func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead. -func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{137} -} - -func (x *SetShardTabletControlRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *SetShardTabletControlRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *SetShardTabletControlRequest) GetTabletType() topodata.TabletType { - if x != nil { - return x.TabletType - } - return topodata.TabletType(0) -} - -func (x *SetShardTabletControlRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil +// Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{126} } -func (x *SetShardTabletControlRequest) GetDeniedTables() []string { +func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.DeniedTables + return x.TabletAlias } return nil } -func (x *SetShardTabletControlRequest) GetDisableQueryService() bool { - if x != nil { - return x.DisableQueryService - } - return false -} - -func (x *SetShardTabletControlRequest) GetRemove() bool { - if x != nil { - return x.Remove - } - return false -} - -type SetShardTabletControlResponse struct { +type ReloadSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *SetShardTabletControlResponse) Reset() { - *x = SetShardTabletControlResponse{} +func (x *ReloadSchemaResponse) Reset() { + *x = ReloadSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[138] + mi := &file_vtctldata_proto_msgTypes[127] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetShardTabletControlResponse) String() string { +func (x *ReloadSchemaResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetShardTabletControlResponse) ProtoMessage() {} +func (*ReloadSchemaResponse) ProtoMessage() {} -func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[138] +func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[127] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8050,44 +8240,42 @@ func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead. -func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{138} -} - -func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard { - if x != nil { - return x.Shard - } - return nil +// Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{127} } -type SetWritableRequest struct { +type ReloadSchemaKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Writable bool `protobuf:"varint,2,opt,name=writable,proto3" json:"writable,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + WaitPosition string `protobuf:"bytes,2,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + IncludePrimary bool `protobuf:"varint,3,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + // Concurrency is the global concurrency across all shards in the keyspace + // (so, at most this many tablets will be reloaded across the keyspace at any + // given point). + Concurrency uint32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } -func (x *SetWritableRequest) Reset() { - *x = SetWritableRequest{} +func (x *ReloadSchemaKeyspaceRequest) Reset() { + *x = ReloadSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[139] + mi := &file_vtctldata_proto_msgTypes[128] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetWritableRequest) String() string { +func (x *ReloadSchemaKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetWritableRequest) ProtoMessage() {} +func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {} -func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[139] +func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[128] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8098,48 +8286,64 @@ func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead. -func (*SetWritableRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{139} +// Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{128} } -func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil + return "" } -func (x *SetWritableRequest) GetWritable() bool { +func (x *ReloadSchemaKeyspaceRequest) GetWaitPosition() string { if x != nil { - return x.Writable + return x.WaitPosition + } + return "" +} + +func (x *ReloadSchemaKeyspaceRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary } return false } -type SetWritableResponse struct { +func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +type ReloadSchemaKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } -func (x *SetWritableResponse) Reset() { - *x = SetWritableResponse{} +func (x *ReloadSchemaKeyspaceResponse) Reset() { + *x = ReloadSchemaKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[140] + mi := &file_vtctldata_proto_msgTypes[129] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetWritableResponse) String() string { +func (x *ReloadSchemaKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetWritableResponse) ProtoMessage() {} +func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {} -func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[140] +func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[129] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8150,38 +8354,48 @@ func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead. -func (*SetWritableResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{140} +// Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{129} } -type ShardReplicationAddRequest struct { +func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type ReloadSchemaShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + WaitPosition string `protobuf:"bytes,3,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + IncludePrimary bool `protobuf:"varint,4,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + // Concurrency is the maximum number of tablets to reload at one time. + Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } -func (x *ShardReplicationAddRequest) Reset() { - *x = ShardReplicationAddRequest{} +func (x *ReloadSchemaShardRequest) Reset() { + *x = ReloadSchemaShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[141] + mi := &file_vtctldata_proto_msgTypes[130] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationAddRequest) String() string { +func (x *ReloadSchemaShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationAddRequest) ProtoMessage() {} +func (*ReloadSchemaShardRequest) ProtoMessage() {} -func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[141] +func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[130] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8192,55 +8406,71 @@ func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{141} +// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{130} } -func (x *ShardReplicationAddRequest) GetKeyspace() string { +func (x *ReloadSchemaShardRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ShardReplicationAddRequest) GetShard() string { +func (x *ReloadSchemaShardRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *ShardReplicationAddRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *ReloadSchemaShardRequest) GetWaitPosition() string { if x != nil { - return x.TabletAlias + return x.WaitPosition } - return nil + return "" } -type ShardReplicationAddResponse struct { +func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary + } + return false +} + +func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +type ReloadSchemaShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` } -func (x *ShardReplicationAddResponse) Reset() { - *x = ShardReplicationAddResponse{} +func (x *ReloadSchemaShardResponse) Reset() { + *x = ReloadSchemaShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[142] + mi := &file_vtctldata_proto_msgTypes[131] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationAddResponse) String() string { +func (x *ReloadSchemaShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationAddResponse) ProtoMessage() {} +func (*ReloadSchemaShardResponse) ProtoMessage() {} -func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[142] +func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[131] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8251,38 +8481,45 @@ func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{142} +// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{131} } -type ShardReplicationFixRequest struct { +func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type RemoveBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` } -func (x *ShardReplicationFixRequest) Reset() { - *x = ShardReplicationFixRequest{} +func (x *RemoveBackupRequest) Reset() { + *x = RemoveBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[143] + mi := &file_vtctldata_proto_msgTypes[132] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationFixRequest) String() string { +func (x *RemoveBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationFixRequest) ProtoMessage() {} +func (*RemoveBackupRequest) ProtoMessage() {} -func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[143] +func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[132] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8293,60 +8530,55 @@ func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{143} +// Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead. +func (*RemoveBackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{132} } -func (x *ShardReplicationFixRequest) GetKeyspace() string { +func (x *RemoveBackupRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ShardReplicationFixRequest) GetShard() string { +func (x *RemoveBackupRequest) GetShard() string { if x != nil { return x.Shard } return "" } -func (x *ShardReplicationFixRequest) GetCell() string { +func (x *RemoveBackupRequest) GetName() string { if x != nil { - return x.Cell + return x.Name } return "" } -type ShardReplicationFixResponse struct { +type RemoveBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // Error contains information about the error fixed by a - // ShardReplicationFix RPC. If there were no errors to fix (i.e. all nodes - // in the replication graph are valid), this field is nil. - Error *topodata.ShardReplicationError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } -func (x *ShardReplicationFixResponse) Reset() { - *x = ShardReplicationFixResponse{} +func (x *RemoveBackupResponse) Reset() { + *x = RemoveBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[144] + mi := &file_vtctldata_proto_msgTypes[133] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationFixResponse) String() string { +func (x *RemoveBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationFixResponse) ProtoMessage() {} +func (*RemoveBackupResponse) ProtoMessage() {} -func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[144] +func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[133] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8357,44 +8589,44 @@ func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{144} -} - -func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError { - if x != nil { - return x.Error - } - return nil +// Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead. +func (*RemoveBackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{133} } -type ShardReplicationPositionsRequest struct { +type RemoveKeyspaceCellRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace. + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` } -func (x *ShardReplicationPositionsRequest) Reset() { - *x = ShardReplicationPositionsRequest{} +func (x *RemoveKeyspaceCellRequest) Reset() { + *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[145] + mi := &file_vtctldata_proto_msgTypes[134] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationPositionsRequest) String() string { +func (x *RemoveKeyspaceCellRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationPositionsRequest) ProtoMessage() {} +func (*RemoveKeyspaceCellRequest) ProtoMessage() {} -func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[145] +func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[134] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8405,55 +8637,62 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{145} +// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{134} } -func (x *ShardReplicationPositionsRequest) GetKeyspace() string { +func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ShardReplicationPositionsRequest) GetShard() string { +func (x *RemoveKeyspaceCellRequest) GetCell() string { if x != nil { - return x.Shard + return x.Cell } return "" } -type ShardReplicationPositionsResponse struct { +func (x *RemoveKeyspaceCellRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type RemoveKeyspaceCellResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // ReplicationStatuses is a mapping of tablet alias string to replication - // status for that tablet. - ReplicationStatuses map[string]*replicationdata.Status `protobuf:"bytes,1,rep,name=replication_statuses,json=replicationStatuses,proto3" json:"replication_statuses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // TabletMap is the set of tablets whose replication statuses were queried, - // keyed by tablet alias. - TabletMap map[string]*topodata.Tablet `protobuf:"bytes,2,rep,name=tablet_map,json=tabletMap,proto3" json:"tablet_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *ShardReplicationPositionsResponse) Reset() { - *x = ShardReplicationPositionsResponse{} +func (x *RemoveKeyspaceCellResponse) Reset() { + *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[146] + mi := &file_vtctldata_proto_msgTypes[135] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationPositionsResponse) String() string { +func (x *RemoveKeyspaceCellResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationPositionsResponse) ProtoMessage() {} +func (*RemoveKeyspaceCellResponse) ProtoMessage() {} -func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[146] +func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[135] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8464,52 +8703,45 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{146} -} - -func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { - if x != nil { - return x.ReplicationStatuses - } - return nil -} - -func (x *ShardReplicationPositionsResponse) GetTabletMap() map[string]*topodata.Tablet { - if x != nil { - return x.TabletMap - } - return nil +// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{135} } -type ShardReplicationRemoveRequest struct { +type RemoveShardCellRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ShardName string `protobuf:"bytes,2,opt,name=shard_name,json=shardName,proto3" json:"shard_name,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` + // Force proceeds even if the cell's topology server cannot be reached. This + // should only be set if a cell has been shut down entirely, and the global + // topology data just needs to be updated. + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + // Recursive also deletes all tablets in that cell belonging to the specified + // keyspace and shard. + Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` } -func (x *ShardReplicationRemoveRequest) Reset() { - *x = ShardReplicationRemoveRequest{} +func (x *RemoveShardCellRequest) Reset() { + *x = RemoveShardCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[147] + mi := &file_vtctldata_proto_msgTypes[136] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationRemoveRequest) String() string { +func (x *RemoveShardCellRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationRemoveRequest) ProtoMessage() {} +func (*RemoveShardCellRequest) ProtoMessage() {} -func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[147] +func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[136] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8520,55 +8752,69 @@ func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead. -func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{147} +// Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. +func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{136} } -func (x *ShardReplicationRemoveRequest) GetKeyspace() string { +func (x *RemoveShardCellRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ShardReplicationRemoveRequest) GetShard() string { +func (x *RemoveShardCellRequest) GetShardName() string { if x != nil { - return x.Shard + return x.ShardName } return "" } -func (x *ShardReplicationRemoveRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *RemoveShardCellRequest) GetCell() string { if x != nil { - return x.TabletAlias + return x.Cell } - return nil + return "" } -type ShardReplicationRemoveResponse struct { +func (x *RemoveShardCellRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *RemoveShardCellRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type RemoveShardCellResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *ShardReplicationRemoveResponse) Reset() { - *x = ShardReplicationRemoveResponse{} +func (x *RemoveShardCellResponse) Reset() { + *x = RemoveShardCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[148] + mi := &file_vtctldata_proto_msgTypes[137] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ShardReplicationRemoveResponse) String() string { +func (x *RemoveShardCellResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationRemoveResponse) ProtoMessage() {} +func (*RemoveShardCellResponse) ProtoMessage() {} -func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[148] +func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[137] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8579,37 +8825,38 @@ func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead. -func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{148} +// Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. +func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{137} } -type SleepTabletRequest struct { +type ReparentTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` - Duration *vttime.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` + // Tablet is the alias of the tablet that should be reparented under the + // current shard primary. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` } -func (x *SleepTabletRequest) Reset() { - *x = SleepTabletRequest{} +func (x *ReparentTabletRequest) Reset() { + *x = ReparentTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[149] + mi := &file_vtctldata_proto_msgTypes[138] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SleepTabletRequest) String() string { +func (x *ReparentTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SleepTabletRequest) ProtoMessage() {} +func (*ReparentTabletRequest) ProtoMessage() {} -func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[149] +func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[138] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8620,48 +8867,48 @@ func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead. -func (*SleepTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{149} -} - -func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias { - if x != nil { - return x.TabletAlias - } - return nil +// Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. +func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{138} } -func (x *SleepTabletRequest) GetDuration() *vttime.Duration { +func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { if x != nil { - return x.Duration + return x.Tablet } return nil } -type SleepTabletResponse struct { +type ReparentTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // Keyspace is the name of the keyspace the tablet was reparented in. + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Shard is the name of the shard the tablet was reparented in. + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Primary is the alias of the tablet that the tablet was reparented under. + Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` } -func (x *SleepTabletResponse) Reset() { - *x = SleepTabletResponse{} +func (x *ReparentTabletResponse) Reset() { + *x = ReparentTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[150] + mi := &file_vtctldata_proto_msgTypes[139] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SleepTabletResponse) String() string { +func (x *ReparentTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SleepTabletResponse) ProtoMessage() {} +func (*ReparentTabletResponse) ProtoMessage() {} -func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[150] +func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[139] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8672,46 +8919,74 @@ func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead. -func (*SleepTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{150} +// Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. +func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{139} } -type SourceShardAddRequest struct { +func (x *ReparentTabletResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ReparentTabletResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ReparentTabletResponse) GetPrimary() *topodata.TabletAlias { + if x != nil { + return x.Primary + } + return nil +} + +type ReshardCreateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` - SourceKeyspace string `protobuf:"bytes,4,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` - SourceShard string `protobuf:"bytes,5,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` - // KeyRange identifies the key range to use for the SourceShard. This field is - // optional. - KeyRange *topodata.KeyRange `protobuf:"bytes,6,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` - // Tables is a list of tables replicate (for MoveTables). Each "table" can be - // either an exact match or a regular expression of the form "/regexp/". - Tables []string `protobuf:"bytes,7,rep,name=tables,proto3" json:"tables,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + SourceShards []string `protobuf:"bytes,3,rep,name=source_shards,json=sourceShards,proto3" json:"source_shards,omitempty"` + TargetShards []string `protobuf:"bytes,4,rep,name=target_shards,json=targetShards,proto3" json:"target_shards,omitempty"` + Cells []string `protobuf:"bytes,5,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,6,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,7,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + // SkipSchemaCopy specifies if the schema should be copied from the source shard, set false if + // schema is already created on the target shard before Reshard is invoked. + SkipSchemaCopy bool `protobuf:"varint,8,opt,name=skip_schema_copy,json=skipSchemaCopy,proto3" json:"skip_schema_copy,omitempty"` + // OnDdl specifies the action to be taken when a DDL is encountered. + OnDdl string `protobuf:"bytes,9,opt,name=on_ddl,json=onDdl,proto3" json:"on_ddl,omitempty"` + // StopAfterCopy specifies if vreplication should be stopped after copying. + StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + // DeferSecondaryKeys specifies if secondary keys should be created in one shot after table copy finishes. + DeferSecondaryKeys bool `protobuf:"varint,11,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // Start the workflow after creating it. + AutoStart bool `protobuf:"varint,12,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` } -func (x *SourceShardAddRequest) Reset() { - *x = SourceShardAddRequest{} +func (x *ReshardCreateRequest) Reset() { + *x = ReshardCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[151] + mi := &file_vtctldata_proto_msgTypes[140] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardAddRequest) String() string { +func (x *ReshardCreateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardAddRequest) ProtoMessage() {} +func (*ReshardCreateRequest) ProtoMessage() {} -func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[151] +func (x *ReshardCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[140] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8722,135 +8997,132 @@ func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead. -func (*SourceShardAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{151} +// Deprecated: Use ReshardCreateRequest.ProtoReflect.Descriptor instead. +func (*ReshardCreateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{140} } -func (x *SourceShardAddRequest) GetKeyspace() string { +func (x *ReshardCreateRequest) GetWorkflow() string { if x != nil { - return x.Keyspace + return x.Workflow } return "" } -func (x *SourceShardAddRequest) GetShard() string { +func (x *ReshardCreateRequest) GetKeyspace() string { if x != nil { - return x.Shard + return x.Keyspace } return "" } -func (x *SourceShardAddRequest) GetUid() int32 { +func (x *ReshardCreateRequest) GetSourceShards() []string { if x != nil { - return x.Uid + return x.SourceShards } - return 0 + return nil } -func (x *SourceShardAddRequest) GetSourceKeyspace() string { +func (x *ReshardCreateRequest) GetTargetShards() []string { if x != nil { - return x.SourceKeyspace + return x.TargetShards } - return "" + return nil } -func (x *SourceShardAddRequest) GetSourceShard() string { +func (x *ReshardCreateRequest) GetCells() []string { if x != nil { - return x.SourceShard + return x.Cells } - return "" + return nil } -func (x *SourceShardAddRequest) GetKeyRange() *topodata.KeyRange { +func (x *ReshardCreateRequest) GetTabletTypes() []topodata.TabletType { if x != nil { - return x.KeyRange + return x.TabletTypes } return nil } -func (x *SourceShardAddRequest) GetTables() []string { +func (x *ReshardCreateRequest) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { if x != nil { - return x.Tables + return x.TabletSelectionPreference } - return nil -} - -type SourceShardAddResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + return tabletmanagerdata.TabletSelectionPreference(0) } -func (x *SourceShardAddResponse) Reset() { - *x = SourceShardAddResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[152] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReshardCreateRequest) GetSkipSchemaCopy() bool { + if x != nil { + return x.SkipSchemaCopy } + return false } -func (x *SourceShardAddResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *ReshardCreateRequest) GetOnDdl() string { + if x != nil { + return x.OnDdl + } + return "" } -func (*SourceShardAddResponse) ProtoMessage() {} - -func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[152] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *ReshardCreateRequest) GetStopAfterCopy() bool { + if x != nil { + return x.StopAfterCopy } - return mi.MessageOf(x) + return false } -// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead. -func (*SourceShardAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{152} +func (x *ReshardCreateRequest) GetDeferSecondaryKeys() bool { + if x != nil { + return x.DeferSecondaryKeys + } + return false } -func (x *SourceShardAddResponse) GetShard() *topodata.Shard { +func (x *ReshardCreateRequest) GetAutoStart() bool { if x != nil { - return x.Shard + return x.AutoStart } - return nil + return false } -type SourceShardDeleteRequest struct { +type RestoreFromBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // BackupTime, if set, will use the backup taken most closely at or before + // this time. If nil, the latest backup will be restored on the tablet. + BackupTime *vttime.Time `protobuf:"bytes,2,opt,name=backup_time,json=backupTime,proto3" json:"backup_time,omitempty"` + // RestoreToPos indicates a position for a point-in-time recovery. The recovery + // is expected to utilize one full backup, followed by zero or more incremental backups, + // that reach the precise desired position + RestoreToPos string `protobuf:"bytes,3,opt,name=restore_to_pos,json=restoreToPos,proto3" json:"restore_to_pos,omitempty"` + // Dry run does not actually performs the restore, but validates the steps and availability of backups + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + // RestoreToTimestamp, if given, requested an inremental restore up to (and excluding) the given timestamp. + // RestoreToTimestamp and RestoreToPos are mutually exclusive. + RestoreToTimestamp *vttime.Time `protobuf:"bytes,5,opt,name=restore_to_timestamp,json=restoreToTimestamp,proto3" json:"restore_to_timestamp,omitempty"` } -func (x *SourceShardDeleteRequest) Reset() { - *x = SourceShardDeleteRequest{} +func (x *RestoreFromBackupRequest) Reset() { + *x = RestoreFromBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[153] + mi := &file_vtctldata_proto_msgTypes[141] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardDeleteRequest) String() string { +func (x *RestoreFromBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardDeleteRequest) ProtoMessage() {} +func (*RestoreFromBackupRequest) ProtoMessage() {} -func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[153] +func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[141] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8861,58 +9133,75 @@ func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead. -func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{153} +// Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. +func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{141} } -func (x *SourceShardDeleteRequest) GetKeyspace() string { +func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias } - return "" + return nil } -func (x *SourceShardDeleteRequest) GetShard() string { +func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time { if x != nil { - return x.Shard + return x.BackupTime + } + return nil +} + +func (x *RestoreFromBackupRequest) GetRestoreToPos() string { + if x != nil { + return x.RestoreToPos } return "" } -func (x *SourceShardDeleteRequest) GetUid() int32 { +func (x *RestoreFromBackupRequest) GetDryRun() bool { if x != nil { - return x.Uid + return x.DryRun } - return 0 + return false } -type SourceShardDeleteResponse struct { +func (x *RestoreFromBackupRequest) GetRestoreToTimestamp() *vttime.Time { + if x != nil { + return x.RestoreToTimestamp + } + return nil +} + +type RestoreFromBackupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Shard is the updated shard record. - Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + // TabletAlias is the alias of the tablet doing the restore. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Event *logutil.Event `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (x *SourceShardDeleteResponse) Reset() { - *x = SourceShardDeleteResponse{} +func (x *RestoreFromBackupResponse) Reset() { + *x = RestoreFromBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[154] + mi := &file_vtctldata_proto_msgTypes[142] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SourceShardDeleteResponse) String() string { +func (x *RestoreFromBackupResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SourceShardDeleteResponse) ProtoMessage() {} +func (*RestoreFromBackupResponse) ProtoMessage() {} -func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[154] +func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[142] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8923,43 +9212,65 @@ func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead. -func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{154} +// Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. +func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{142} } -func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard { +func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +func (x *RestoreFromBackupResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *RestoreFromBackupResponse) GetShard() string { if x != nil { return x.Shard } + return "" +} + +func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { + if x != nil { + return x.Event + } return nil } -type StartReplicationRequest struct { +type RetrySchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` } -func (x *StartReplicationRequest) Reset() { - *x = StartReplicationRequest{} +func (x *RetrySchemaMigrationRequest) Reset() { + *x = RetrySchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[155] + mi := &file_vtctldata_proto_msgTypes[143] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StartReplicationRequest) String() string { +func (x *RetrySchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartReplicationRequest) ProtoMessage() {} +func (*RetrySchemaMigrationRequest) ProtoMessage() {} -func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[155] +func (x *RetrySchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[143] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8970,41 +9281,50 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. -func (*StartReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{155} +// Deprecated: Use RetrySchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*RetrySchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{143} } -func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *RetrySchemaMigrationRequest) GetKeyspace() string { if x != nil { - return x.TabletAlias + return x.Keyspace } - return nil + return "" } -type StartReplicationResponse struct { +func (x *RetrySchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type RetrySchemaMigrationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` } -func (x *StartReplicationResponse) Reset() { - *x = StartReplicationResponse{} +func (x *RetrySchemaMigrationResponse) Reset() { + *x = RetrySchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[156] + mi := &file_vtctldata_proto_msgTypes[144] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StartReplicationResponse) String() string { +func (x *RetrySchemaMigrationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartReplicationResponse) ProtoMessage() {} +func (*RetrySchemaMigrationResponse) ProtoMessage() {} -func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[156] +func (x *RetrySchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[144] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9015,12 +9335,19 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. -func (*StartReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{156} +// Deprecated: Use RetrySchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*RetrySchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{144} } -type StopReplicationRequest struct { +func (x *RetrySchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + +type RunHealthCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -9028,23 +9355,23 @@ type StopReplicationRequest struct { TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *StopReplicationRequest) Reset() { - *x = StopReplicationRequest{} +func (x *RunHealthCheckRequest) Reset() { + *x = RunHealthCheckRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[157] + mi := &file_vtctldata_proto_msgTypes[145] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StopReplicationRequest) String() string { +func (x *RunHealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationRequest) ProtoMessage() {} +func (*RunHealthCheckRequest) ProtoMessage() {} -func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[157] +func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[145] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9055,41 +9382,41 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. -func (*StopReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{157} +// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. +func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{145} } -func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias { +func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { return x.TabletAlias } return nil } -type StopReplicationResponse struct { +type RunHealthCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *StopReplicationResponse) Reset() { - *x = StopReplicationResponse{} +func (x *RunHealthCheckResponse) Reset() { + *x = RunHealthCheckResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[158] + mi := &file_vtctldata_proto_msgTypes[146] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StopReplicationResponse) String() string { +func (x *RunHealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationResponse) ProtoMessage() {} +func (*RunHealthCheckResponse) ProtoMessage() {} -func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[158] +func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[146] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9100,38 +9427,37 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. -func (*StopReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{158} +// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. +func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{146} } -type TabletExternallyReparentedRequest struct { +type SetKeyspaceDurabilityPolicyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Tablet is the alias of the tablet that was promoted externally and should - // be updated to the shard primary in the topo. - Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + DurabilityPolicy string `protobuf:"bytes,2,opt,name=durability_policy,json=durabilityPolicy,proto3" json:"durability_policy,omitempty"` } -func (x *TabletExternallyReparentedRequest) Reset() { - *x = TabletExternallyReparentedRequest{} +func (x *SetKeyspaceDurabilityPolicyRequest) Reset() { + *x = SetKeyspaceDurabilityPolicyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[159] + mi := &file_vtctldata_proto_msgTypes[147] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *TabletExternallyReparentedRequest) String() string { +func (x *SetKeyspaceDurabilityPolicyRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyReparentedRequest) ProtoMessage() {} +func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {} -func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[159] +func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[147] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9142,46 +9468,51 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. -func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{159} +// Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{147} } -func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { +func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string { if x != nil { - return x.Tablet + return x.Keyspace } - return nil + return "" } -type TabletExternallyReparentedResponse struct { +func (x *SetKeyspaceDurabilityPolicyRequest) GetDurabilityPolicy() string { + if x != nil { + return x.DurabilityPolicy + } + return "" +} + +type SetKeyspaceDurabilityPolicyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - OldPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *TabletExternallyReparentedResponse) Reset() { - *x = TabletExternallyReparentedResponse{} +func (x *SetKeyspaceDurabilityPolicyResponse) Reset() { + *x = SetKeyspaceDurabilityPolicyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[160] + mi := &file_vtctldata_proto_msgTypes[148] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *TabletExternallyReparentedResponse) String() string { +func (x *SetKeyspaceDurabilityPolicyResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyReparentedResponse) ProtoMessage() {} +func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {} -func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[160] +func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[148] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9192,65 +9523,47 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. -func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{160} +// Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{148} } -func (x *TabletExternallyReparentedResponse) GetKeyspace() string { +func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { if x != nil { return x.Keyspace } - return "" -} - -func (x *TabletExternallyReparentedResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *TabletExternallyReparentedResponse) GetNewPrimary() *topodata.TabletAlias { - if x != nil { - return x.NewPrimary - } return nil } -func (x *TabletExternallyReparentedResponse) GetOldPrimary() *topodata.TabletAlias { - if x != nil { - return x.OldPrimary - } - return nil -} - -type UpdateCellInfoRequest struct { +type SetKeyspaceServedFromRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + Remove bool `protobuf:"varint,4,opt,name=remove,proto3" json:"remove,omitempty"` + SourceKeyspace string `protobuf:"bytes,5,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` } -func (x *UpdateCellInfoRequest) Reset() { - *x = UpdateCellInfoRequest{} +func (x *SetKeyspaceServedFromRequest) Reset() { + *x = SetKeyspaceServedFromRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[161] + mi := &file_vtctldata_proto_msgTypes[149] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellInfoRequest) String() string { +func (x *SetKeyspaceServedFromRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellInfoRequest) ProtoMessage() {} +func (*SetKeyspaceServedFromRequest) ProtoMessage() {} -func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[161] +func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[149] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9261,106 +9574,72 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. -func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{161} +// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{149} } -func (x *UpdateCellInfoRequest) GetName() string { +func (x *SetKeyspaceServedFromRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *UpdateCellInfoRequest) GetCellInfo() *topodata.CellInfo { +func (x *SetKeyspaceServedFromRequest) GetTabletType() topodata.TabletType { if x != nil { - return x.CellInfo - } - return nil -} - -type UpdateCellInfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` -} - -func (x *UpdateCellInfoResponse) Reset() { - *x = UpdateCellInfoResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[162] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + return x.TabletType } + return topodata.TabletType(0) } -func (x *UpdateCellInfoResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateCellInfoResponse) ProtoMessage() {} - -func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[162] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SetKeyspaceServedFromRequest) GetCells() []string { + if x != nil { + return x.Cells } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. -func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{162} + return nil } -func (x *UpdateCellInfoResponse) GetName() string { +func (x *SetKeyspaceServedFromRequest) GetRemove() bool { if x != nil { - return x.Name + return x.Remove } - return "" + return false } -func (x *UpdateCellInfoResponse) GetCellInfo() *topodata.CellInfo { +func (x *SetKeyspaceServedFromRequest) GetSourceKeyspace() string { if x != nil { - return x.CellInfo + return x.SourceKeyspace } - return nil + return "" } -type UpdateCellsAliasRequest struct { +type SetKeyspaceServedFromResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *UpdateCellsAliasRequest) Reset() { - *x = UpdateCellsAliasRequest{} +func (x *SetKeyspaceServedFromResponse) Reset() { + *x = SetKeyspaceServedFromResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[163] + mi := &file_vtctldata_proto_msgTypes[150] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellsAliasRequest) String() string { +func (x *SetKeyspaceServedFromResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellsAliasRequest) ProtoMessage() {} +func (*SetKeyspaceServedFromResponse) ProtoMessage() {} -func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[163] +func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[150] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9371,51 +9650,44 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. -func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{163} -} - -func (x *UpdateCellsAliasRequest) GetName() string { - if x != nil { - return x.Name - } - return "" +// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{150} } -func (x *UpdateCellsAliasRequest) GetCellsAlias() *topodata.CellsAlias { +func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace { if x != nil { - return x.CellsAlias + return x.Keyspace } return nil } -type UpdateCellsAliasResponse struct { +type SetKeyspaceShardingInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` } -func (x *UpdateCellsAliasResponse) Reset() { - *x = UpdateCellsAliasResponse{} +func (x *SetKeyspaceShardingInfoRequest) Reset() { + *x = SetKeyspaceShardingInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[164] + mi := &file_vtctldata_proto_msgTypes[151] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateCellsAliasResponse) String() string { +func (x *SetKeyspaceShardingInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateCellsAliasResponse) ProtoMessage() {} +func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} -func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[164] +func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[151] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9426,50 +9698,51 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. -func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{164} +// Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead. +func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{151} } -func (x *UpdateCellsAliasResponse) GetName() string { +func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string { if x != nil { - return x.Name + return x.Keyspace } return "" } -func (x *UpdateCellsAliasResponse) GetCellsAlias() *topodata.CellsAlias { +func (x *SetKeyspaceShardingInfoRequest) GetForce() bool { if x != nil { - return x.CellsAlias + return x.Force } - return nil + return false } -type ValidateRequest struct { +type SetKeyspaceShardingInfoResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - PingTablets bool `protobuf:"varint,1,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + // Keyspace is the updated keyspace record. + Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *ValidateRequest) Reset() { - *x = ValidateRequest{} +func (x *SetKeyspaceShardingInfoResponse) Reset() { + *x = SetKeyspaceShardingInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[165] + mi := &file_vtctldata_proto_msgTypes[152] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateRequest) String() string { +func (x *SetKeyspaceShardingInfoResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateRequest) ProtoMessage() {} +func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {} -func (x *ValidateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[165] +func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[152] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9480,44 +9753,45 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. -func (*ValidateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{165} +// Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead. +func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{152} } -func (x *ValidateRequest) GetPingTablets() bool { +func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace { if x != nil { - return x.PingTablets + return x.Keyspace } - return false + return nil } -type ValidateResponse struct { +type SetShardIsPrimaryServingRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByKeyspace map[string]*ValidateKeyspaceResponse `protobuf:"bytes,2,rep,name=results_by_keyspace,json=resultsByKeyspace,proto3" json:"results_by_keyspace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + IsServing bool `protobuf:"varint,3,opt,name=is_serving,json=isServing,proto3" json:"is_serving,omitempty"` } -func (x *ValidateResponse) Reset() { - *x = ValidateResponse{} +func (x *SetShardIsPrimaryServingRequest) Reset() { + *x = SetShardIsPrimaryServingRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[166] + mi := &file_vtctldata_proto_msgTypes[153] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateResponse) String() string { +func (x *SetShardIsPrimaryServingRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateResponse) ProtoMessage() {} +func (*SetShardIsPrimaryServingRequest) ProtoMessage() {} -func (x *ValidateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[166] +func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[153] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9528,51 +9802,58 @@ func (x *ValidateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead. -func (*ValidateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{166} +// Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead. +func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{153} } -func (x *ValidateResponse) GetResults() []string { +func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string { if x != nil { - return x.Results + return x.Keyspace } - return nil + return "" } -func (x *ValidateResponse) GetResultsByKeyspace() map[string]*ValidateKeyspaceResponse { +func (x *SetShardIsPrimaryServingRequest) GetShard() string { if x != nil { - return x.ResultsByKeyspace + return x.Shard } - return nil + return "" } -type ValidateKeyspaceRequest struct { +func (x *SetShardIsPrimaryServingRequest) GetIsServing() bool { + if x != nil { + return x.IsServing + } + return false +} + +type SetShardIsPrimaryServingResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *ValidateKeyspaceRequest) Reset() { - *x = ValidateKeyspaceRequest{} +func (x *SetShardIsPrimaryServingResponse) Reset() { + *x = SetShardIsPrimaryServingResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[167] + mi := &file_vtctldata_proto_msgTypes[154] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateKeyspaceRequest) String() string { +func (x *SetShardIsPrimaryServingResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateKeyspaceRequest) ProtoMessage() {} +func (*SetShardIsPrimaryServingResponse) ProtoMessage() {} -func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[167] +func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[154] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9583,51 +9864,63 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{167} +// Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead. +func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{154} } -func (x *ValidateKeyspaceRequest) GetKeyspace() string { +func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard { if x != nil { - return x.Keyspace + return x.Shard } - return "" -} - -func (x *ValidateKeyspaceRequest) GetPingTablets() bool { - if x != nil { - return x.PingTablets - } - return false + return nil } -type ValidateKeyspaceResponse struct { +type SetShardTabletControlRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` + Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` + // DeniedTables updates the list of denied tables the shard will serve for + // the given tablet type. This is useful to fix tables that are being blocked + // after a MoveTables operation. + // + // NOTE: Setting this field will cause DisableQueryService to be ignored. + DeniedTables []string `protobuf:"bytes,5,rep,name=denied_tables,json=deniedTables,proto3" json:"denied_tables,omitempty"` + // DisableQueryService instructs whether to enable the query service on + // tablets of the given type in the shard. This is useful to fix Reshard + // operations gone awry. + // + // NOTE: this is ignored if DeniedTables is not empty. + DisableQueryService bool `protobuf:"varint,6,opt,name=disable_query_service,json=disableQueryService,proto3" json:"disable_query_service,omitempty"` + // Remove removes the ShardTabletControl record entirely. If set, this takes + // precedence over DeniedTables and DisableQueryService fields, and is useful + // to manually remove serving restrictions after a completed MoveTables + // operation. + Remove bool `protobuf:"varint,7,opt,name=remove,proto3" json:"remove,omitempty"` } -func (x *ValidateKeyspaceResponse) Reset() { - *x = ValidateKeyspaceResponse{} +func (x *SetShardTabletControlRequest) Reset() { + *x = SetShardTabletControlRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[168] + mi := &file_vtctldata_proto_msgTypes[155] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateKeyspaceResponse) String() string { +func (x *SetShardTabletControlRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateKeyspaceResponse) ProtoMessage() {} +func (*SetShardTabletControlRequest) ProtoMessage() {} -func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[168] +func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[155] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9638,130 +9931,86 @@ func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{168} +// Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead. +func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{155} } -func (x *ValidateKeyspaceResponse) GetResults() []string { +func (x *SetShardTabletControlRequest) GetKeyspace() string { if x != nil { - return x.Results + return x.Keyspace } - return nil + return "" } -func (x *ValidateKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *SetShardTabletControlRequest) GetShard() string { if x != nil { - return x.ResultsByShard - } - return nil -} - -type ValidateSchemaKeyspaceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - ExcludeTables []string `protobuf:"bytes,2,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - IncludeViews bool `protobuf:"varint,3,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` - SkipNoPrimary bool `protobuf:"varint,4,opt,name=skip_no_primary,json=skipNoPrimary,proto3" json:"skip_no_primary,omitempty"` - IncludeVschema bool `protobuf:"varint,5,opt,name=include_vschema,json=includeVschema,proto3" json:"include_vschema,omitempty"` -} - -func (x *ValidateSchemaKeyspaceRequest) Reset() { - *x = ValidateSchemaKeyspaceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[169] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ValidateSchemaKeyspaceRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} - -func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[169] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms + return x.Shard } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{169} + return "" } -func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { +func (x *SetShardTabletControlRequest) GetTabletType() topodata.TabletType { if x != nil { - return x.Keyspace + return x.TabletType } - return "" + return topodata.TabletType(0) } -func (x *ValidateSchemaKeyspaceRequest) GetExcludeTables() []string { +func (x *SetShardTabletControlRequest) GetCells() []string { if x != nil { - return x.ExcludeTables + return x.Cells } return nil } -func (x *ValidateSchemaKeyspaceRequest) GetIncludeViews() bool { +func (x *SetShardTabletControlRequest) GetDeniedTables() []string { if x != nil { - return x.IncludeViews + return x.DeniedTables } - return false + return nil } -func (x *ValidateSchemaKeyspaceRequest) GetSkipNoPrimary() bool { +func (x *SetShardTabletControlRequest) GetDisableQueryService() bool { if x != nil { - return x.SkipNoPrimary + return x.DisableQueryService } return false } -func (x *ValidateSchemaKeyspaceRequest) GetIncludeVschema() bool { +func (x *SetShardTabletControlRequest) GetRemove() bool { if x != nil { - return x.IncludeVschema + return x.Remove } return false } -type ValidateSchemaKeyspaceResponse struct { +type SetShardTabletControlResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *ValidateSchemaKeyspaceResponse) Reset() { - *x = ValidateSchemaKeyspaceResponse{} +func (x *SetShardTabletControlResponse) Reset() { + *x = SetShardTabletControlResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[170] + mi := &file_vtctldata_proto_msgTypes[156] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateSchemaKeyspaceResponse) String() string { +func (x *SetShardTabletControlResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {} +func (*SetShardTabletControlResponse) ProtoMessage() {} -func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[170] +func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[156] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9772,52 +10021,44 @@ func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{170} -} - -func (x *ValidateSchemaKeyspaceResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil +// Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead. +func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{156} } -func (x *ValidateSchemaKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard { if x != nil { - return x.ResultsByShard + return x.Shard } return nil } -type ValidateShardRequest struct { +type SetWritableRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Writable bool `protobuf:"varint,2,opt,name=writable,proto3" json:"writable,omitempty"` } -func (x *ValidateShardRequest) Reset() { - *x = ValidateShardRequest{} +func (x *SetWritableRequest) Reset() { + *x = SetWritableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[171] + mi := &file_vtctldata_proto_msgTypes[157] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateShardRequest) String() string { +func (x *SetWritableRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateShardRequest) ProtoMessage() {} +func (*SetWritableRequest) ProtoMessage() {} -func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[171] +func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[157] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9828,57 +10069,48 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{171} -} - -func (x *ValidateShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead. +func (*SetWritableRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{157} } -func (x *ValidateShardRequest) GetShard() string { +func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Shard + return x.TabletAlias } - return "" + return nil } -func (x *ValidateShardRequest) GetPingTablets() bool { +func (x *SetWritableRequest) GetWritable() bool { if x != nil { - return x.PingTablets + return x.Writable } return false } -type ValidateShardResponse struct { +type SetWritableResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (x *ValidateShardResponse) Reset() { - *x = ValidateShardResponse{} +func (x *SetWritableResponse) Reset() { + *x = SetWritableResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[172] + mi := &file_vtctldata_proto_msgTypes[158] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateShardResponse) String() string { +func (x *SetWritableResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateShardResponse) ProtoMessage() {} +func (*SetWritableResponse) ProtoMessage() {} -func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[172] +func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[158] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9889,43 +10121,38 @@ func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead. -func (*ValidateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{172} -} - -func (x *ValidateShardResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil +// Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead. +func (*SetWritableResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{158} } -type ValidateVersionKeyspaceRequest struct { +type ShardReplicationAddRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *ValidateVersionKeyspaceRequest) Reset() { - *x = ValidateVersionKeyspaceRequest{} +func (x *ShardReplicationAddRequest) Reset() { + *x = ShardReplicationAddRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[173] + mi := &file_vtctldata_proto_msgTypes[159] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionKeyspaceRequest) String() string { +func (x *ShardReplicationAddRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} +func (*ShardReplicationAddRequest) ProtoMessage() {} -func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[173] +func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[159] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9936,44 +10163,55 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{173} +// Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{159} } -func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { +func (x *ShardReplicationAddRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -type ValidateVersionKeyspaceResponse struct { +func (x *ShardReplicationAddRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ShardReplicationAddRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias + } + return nil +} + +type ShardReplicationAddResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *ValidateVersionKeyspaceResponse) Reset() { - *x = ValidateVersionKeyspaceResponse{} +func (x *ShardReplicationAddResponse) Reset() { + *x = ShardReplicationAddResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[174] + mi := &file_vtctldata_proto_msgTypes[160] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionKeyspaceResponse) String() string { +func (x *ShardReplicationAddResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionKeyspaceResponse) ProtoMessage() {} +func (*ShardReplicationAddResponse) ProtoMessage() {} -func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[174] +func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[160] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9984,51 +10222,38 @@ func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{174} -} - -func (x *ValidateVersionKeyspaceResponse) GetResults() []string { - if x != nil { - return x.Results - } - return nil -} - -func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { - if x != nil { - return x.ResultsByShard - } - return nil +// Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{160} } -type ValidateVersionShardRequest struct { +type ShardReplicationFixRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *ValidateVersionShardRequest) Reset() { - *x = ValidateVersionShardRequest{} +func (x *ShardReplicationFixRequest) Reset() { + *x = ShardReplicationFixRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[175] + mi := &file_vtctldata_proto_msgTypes[161] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionShardRequest) String() string { +func (x *ShardReplicationFixRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionShardRequest) ProtoMessage() {} +func (*ShardReplicationFixRequest) ProtoMessage() {} -func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[175] +func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[161] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10039,50 +10264,60 @@ func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{175} +// Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{161} } -func (x *ValidateVersionShardRequest) GetKeyspace() string { +func (x *ShardReplicationFixRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ValidateVersionShardRequest) GetShard() string { +func (x *ShardReplicationFixRequest) GetShard() string { if x != nil { return x.Shard } return "" } -type ValidateVersionShardResponse struct { +func (x *ShardReplicationFixRequest) GetCell() string { + if x != nil { + return x.Cell + } + return "" +} + +type ShardReplicationFixResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + // Error contains information about the error fixed by a + // ShardReplicationFix RPC. If there were no errors to fix (i.e. all nodes + // in the replication graph are valid), this field is nil. + Error *topodata.ShardReplicationError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } -func (x *ValidateVersionShardResponse) Reset() { - *x = ValidateVersionShardResponse{} +func (x *ShardReplicationFixResponse) Reset() { + *x = ShardReplicationFixResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[176] + mi := &file_vtctldata_proto_msgTypes[162] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVersionShardResponse) String() string { +func (x *ShardReplicationFixResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionShardResponse) ProtoMessage() {} +func (*ShardReplicationFixResponse) ProtoMessage() {} -func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[176] +func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[162] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10093,46 +10328,44 @@ func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead. -func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{176} +// Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{162} } -func (x *ValidateVersionShardResponse) GetResults() []string { +func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError { if x != nil { - return x.Results + return x.Error } return nil } -type ValidateVSchemaRequest struct { +type ShardReplicationPositionsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` - ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` - IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *ValidateVSchemaRequest) Reset() { - *x = ValidateVSchemaRequest{} +func (x *ShardReplicationPositionsRequest) Reset() { + *x = ShardReplicationPositionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[177] + mi := &file_vtctldata_proto_msgTypes[163] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVSchemaRequest) String() string { +func (x *ShardReplicationPositionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVSchemaRequest) ProtoMessage() {} +func (*ShardReplicationPositionsRequest) ProtoMessage() {} -func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[177] +func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[163] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10143,65 +10376,55 @@ func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead. -func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{177} +// Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{163} } -func (x *ValidateVSchemaRequest) GetKeyspace() string { +func (x *ShardReplicationPositionsRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *ValidateVSchemaRequest) GetShards() []string { - if x != nil { - return x.Shards - } - return nil -} - -func (x *ValidateVSchemaRequest) GetExcludeTables() []string { - if x != nil { - return x.ExcludeTables - } - return nil -} - -func (x *ValidateVSchemaRequest) GetIncludeViews() bool { +func (x *ShardReplicationPositionsRequest) GetShard() string { if x != nil { - return x.IncludeViews + return x.Shard } - return false + return "" } -type ValidateVSchemaResponse struct { +type ShardReplicationPositionsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` - ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ReplicationStatuses is a mapping of tablet alias string to replication + // status for that tablet. + ReplicationStatuses map[string]*replicationdata.Status `protobuf:"bytes,1,rep,name=replication_statuses,json=replicationStatuses,proto3" json:"replication_statuses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // TabletMap is the set of tablets whose replication statuses were queried, + // keyed by tablet alias. + TabletMap map[string]*topodata.Tablet `protobuf:"bytes,2,rep,name=tablet_map,json=tabletMap,proto3" json:"tablet_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *ValidateVSchemaResponse) Reset() { - *x = ValidateVSchemaResponse{} +func (x *ShardReplicationPositionsResponse) Reset() { + *x = ShardReplicationPositionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[178] + mi := &file_vtctldata_proto_msgTypes[164] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidateVSchemaResponse) String() string { +func (x *ShardReplicationPositionsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVSchemaResponse) ProtoMessage() {} +func (*ShardReplicationPositionsResponse) ProtoMessage() {} -func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[178] +func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[164] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10212,51 +10435,52 @@ func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead. -func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{178} +// Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{164} } -func (x *ValidateVSchemaResponse) GetResults() []string { +func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { if x != nil { - return x.Results + return x.ReplicationStatuses } return nil } -func (x *ValidateVSchemaResponse) GetResultsByShard() map[string]*ValidateShardResponse { +func (x *ShardReplicationPositionsResponse) GetTabletMap() map[string]*topodata.Tablet { if x != nil { - return x.ResultsByShard + return x.TabletMap } return nil } -type Workflow_ReplicationLocation struct { +type ShardReplicationRemoveRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *Workflow_ReplicationLocation) Reset() { - *x = Workflow_ReplicationLocation{} +func (x *ShardReplicationRemoveRequest) Reset() { + *x = ShardReplicationRemoveRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[180] + mi := &file_vtctldata_proto_msgTypes[165] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_ReplicationLocation) String() string { +func (x *ShardReplicationRemoveRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_ReplicationLocation) ProtoMessage() {} +func (*ShardReplicationRemoveRequest) ProtoMessage() {} -func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[180] +func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[165] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10267,52 +10491,55 @@ func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_ReplicationLocation.ProtoReflect.Descriptor instead. -func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 1} +// Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead. +func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{165} } -func (x *Workflow_ReplicationLocation) GetKeyspace() string { +func (x *ShardReplicationRemoveRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *Workflow_ReplicationLocation) GetShards() []string { +func (x *ShardReplicationRemoveRequest) GetShard() string { if x != nil { - return x.Shards + return x.Shard + } + return "" +} + +func (x *ShardReplicationRemoveRequest) GetTabletAlias() *topodata.TabletAlias { + if x != nil { + return x.TabletAlias } return nil } -type Workflow_ShardStream struct { +type ShardReplicationRemoveResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Streams []*Workflow_Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` - TabletControls []*topodata.Shard_TabletControl `protobuf:"bytes,2,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` - IsPrimaryServing bool `protobuf:"varint,3,opt,name=is_primary_serving,json=isPrimaryServing,proto3" json:"is_primary_serving,omitempty"` } -func (x *Workflow_ShardStream) Reset() { - *x = Workflow_ShardStream{} +func (x *ShardReplicationRemoveResponse) Reset() { + *x = ShardReplicationRemoveResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[181] + mi := &file_vtctldata_proto_msgTypes[166] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_ShardStream) String() string { +func (x *ShardReplicationRemoveResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_ShardStream) ProtoMessage() {} +func (*ShardReplicationRemoveResponse) ProtoMessage() {} -func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[181] +func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[166] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10323,79 +10550,89 @@ func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_ShardStream.ProtoReflect.Descriptor instead. -func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 2} +// Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead. +func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{166} } -func (x *Workflow_ShardStream) GetStreams() []*Workflow_Stream { - if x != nil { - return x.Streams +type SleepTabletRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + Duration *vttime.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"` +} + +func (x *SleepTabletRequest) Reset() { + *x = SleepTabletRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[167] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *Workflow_ShardStream) GetTabletControls() []*topodata.Shard_TabletControl { +func (x *SleepTabletRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SleepTabletRequest) ProtoMessage() {} + +func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[167] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead. +func (*SleepTabletRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{167} +} + +func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.TabletControls + return x.TabletAlias } return nil } -func (x *Workflow_ShardStream) GetIsPrimaryServing() bool { +func (x *SleepTabletRequest) GetDuration() *vttime.Duration { if x != nil { - return x.IsPrimaryServing + return x.Duration } - return false + return nil } -type Workflow_Stream struct { +type SleepTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Tablet *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet,proto3" json:"tablet,omitempty"` - BinlogSource *binlogdata.BinlogSource `protobuf:"bytes,4,opt,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` - Position string `protobuf:"bytes,5,opt,name=position,proto3" json:"position,omitempty"` - StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` - State string `protobuf:"bytes,7,opt,name=state,proto3" json:"state,omitempty"` - DbName string `protobuf:"bytes,8,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` - TransactionTimestamp *vttime.Time `protobuf:"bytes,9,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` - TimeUpdated *vttime.Time `protobuf:"bytes,10,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` - Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` - CopyStates []*Workflow_Stream_CopyState `protobuf:"bytes,12,rep,name=copy_states,json=copyStates,proto3" json:"copy_states,omitempty"` - Logs []*Workflow_Stream_Log `protobuf:"bytes,13,rep,name=logs,proto3" json:"logs,omitempty"` - // LogFetchError is set if we fail to fetch some logs for this stream. We - // will never fail to fetch workflows because we cannot fetch the logs, but - // we will still forward log-fetch errors to the caller, should that be - // relevant to the context in which they are fetching workflows. - // - // Note that this field being set does not necessarily mean that Logs is nil; - // if there are N logs that exist for the stream, and we fail to fetch the - // ith log, we will still return logs in [0, i) + (i, N]. - LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` - Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` } -func (x *Workflow_Stream) Reset() { - *x = Workflow_Stream{} +func (x *SleepTabletResponse) Reset() { + *x = SleepTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[182] + mi := &file_vtctldata_proto_msgTypes[168] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream) String() string { +func (x *SleepTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream) ProtoMessage() {} +func (*SleepTabletResponse) ProtoMessage() {} -func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[182] +func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[168] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10406,142 +10643,136 @@ func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream.ProtoReflect.Descriptor instead. -func (*Workflow_Stream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3} +// Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead. +func (*SleepTabletResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{168} } -func (x *Workflow_Stream) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} +type SourceShardAddRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *Workflow_Stream) GetShard() string { - if x != nil { - return x.Shard - } - return "" + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + SourceKeyspace string `protobuf:"bytes,4,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + SourceShard string `protobuf:"bytes,5,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` + // KeyRange identifies the key range to use for the SourceShard. This field is + // optional. + KeyRange *topodata.KeyRange `protobuf:"bytes,6,opt,name=key_range,json=keyRange,proto3" json:"key_range,omitempty"` + // Tables is a list of tables replicate (for MoveTables). Each "table" can be + // either an exact match or a regular expression of the form "/regexp/". + Tables []string `protobuf:"bytes,7,rep,name=tables,proto3" json:"tables,omitempty"` } -func (x *Workflow_Stream) GetTablet() *topodata.TabletAlias { - if x != nil { - return x.Tablet +func (x *SourceShardAddRequest) Reset() { + *x = SourceShardAddRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[169] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (x *Workflow_Stream) GetBinlogSource() *binlogdata.BinlogSource { - if x != nil { - return x.BinlogSource - } - return nil +func (x *SourceShardAddRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *Workflow_Stream) GetPosition() string { - if x != nil { - return x.Position - } - return "" -} +func (*SourceShardAddRequest) ProtoMessage() {} -func (x *Workflow_Stream) GetStopPosition() string { - if x != nil { - return x.StopPosition +func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[169] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *Workflow_Stream) GetState() string { - if x != nil { - return x.State - } - return "" +// Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead. +func (*SourceShardAddRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{169} } -func (x *Workflow_Stream) GetDbName() string { +func (x *SourceShardAddRequest) GetKeyspace() string { if x != nil { - return x.DbName + return x.Keyspace } return "" } -func (x *Workflow_Stream) GetTransactionTimestamp() *vttime.Time { +func (x *SourceShardAddRequest) GetShard() string { if x != nil { - return x.TransactionTimestamp + return x.Shard } - return nil + return "" } -func (x *Workflow_Stream) GetTimeUpdated() *vttime.Time { +func (x *SourceShardAddRequest) GetUid() int32 { if x != nil { - return x.TimeUpdated + return x.Uid } - return nil + return 0 } -func (x *Workflow_Stream) GetMessage() string { +func (x *SourceShardAddRequest) GetSourceKeyspace() string { if x != nil { - return x.Message + return x.SourceKeyspace } return "" } -func (x *Workflow_Stream) GetCopyStates() []*Workflow_Stream_CopyState { +func (x *SourceShardAddRequest) GetSourceShard() string { if x != nil { - return x.CopyStates + return x.SourceShard } - return nil + return "" } -func (x *Workflow_Stream) GetLogs() []*Workflow_Stream_Log { +func (x *SourceShardAddRequest) GetKeyRange() *topodata.KeyRange { if x != nil { - return x.Logs + return x.KeyRange } return nil } -func (x *Workflow_Stream) GetLogFetchError() string { - if x != nil { - return x.LogFetchError - } - return "" -} - -func (x *Workflow_Stream) GetTags() []string { +func (x *SourceShardAddRequest) GetTables() []string { if x != nil { - return x.Tags + return x.Tables } return nil } -type Workflow_Stream_CopyState struct { +type SourceShardAddResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` - LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` } -func (x *Workflow_Stream_CopyState) Reset() { - *x = Workflow_Stream_CopyState{} +func (x *SourceShardAddResponse) Reset() { + *x = SourceShardAddResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[183] + mi := &file_vtctldata_proto_msgTypes[170] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream_CopyState) String() string { +func (x *SourceShardAddResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream_CopyState) ProtoMessage() {} +func (*SourceShardAddResponse) ProtoMessage() {} -func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[183] +func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[170] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10552,57 +10783,45 @@ func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream_CopyState.ProtoReflect.Descriptor instead. -func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3, 0} -} - -func (x *Workflow_Stream_CopyState) GetTable() string { - if x != nil { - return x.Table - } - return "" +// Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead. +func (*SourceShardAddResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{170} } -func (x *Workflow_Stream_CopyState) GetLastPk() string { +func (x *SourceShardAddResponse) GetShard() *topodata.Shard { if x != nil { - return x.LastPk + return x.Shard } - return "" + return nil } -type Workflow_Stream_Log struct { +type SourceShardDeleteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - StreamId int64 `protobuf:"varint,2,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` - Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` - CreatedAt *vttime.Time `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - UpdatedAt *vttime.Time `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"` - Count int64 `protobuf:"varint,8,opt,name=count,proto3" json:"count,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Uid int32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` } -func (x *Workflow_Stream_Log) Reset() { - *x = Workflow_Stream_Log{} +func (x *SourceShardDeleteRequest) Reset() { + *x = SourceShardDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[184] + mi := &file_vtctldata_proto_msgTypes[171] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Workflow_Stream_Log) String() string { +func (x *SourceShardDeleteRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Workflow_Stream_Log) ProtoMessage() {} +func (*SourceShardDeleteRequest) ProtoMessage() {} -func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[184] +func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[171] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10613,92 +10832,190 @@ func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Workflow_Stream_Log.ProtoReflect.Descriptor instead. -func (*Workflow_Stream_Log) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{6, 3, 1} +// Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead. +func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{171} } -func (x *Workflow_Stream_Log) GetId() int64 { +func (x *SourceShardDeleteRequest) GetKeyspace() string { if x != nil { - return x.Id + return x.Keyspace } - return 0 + return "" } -func (x *Workflow_Stream_Log) GetStreamId() int64 { +func (x *SourceShardDeleteRequest) GetShard() string { if x != nil { - return x.StreamId + return x.Shard } - return 0 + return "" } -func (x *Workflow_Stream_Log) GetType() string { +func (x *SourceShardDeleteRequest) GetUid() int32 { if x != nil { - return x.Type + return x.Uid } - return "" + return 0 } -func (x *Workflow_Stream_Log) GetState() string { - if x != nil { - return x.State +type SourceShardDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Shard is the updated shard record. + Shard *topodata.Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *SourceShardDeleteResponse) Reset() { + *x = SourceShardDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[172] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *Workflow_Stream_Log) GetCreatedAt() *vttime.Time { +func (x *SourceShardDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceShardDeleteResponse) ProtoMessage() {} + +func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[172] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead. +func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{172} +} + +func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard { if x != nil { - return x.CreatedAt + return x.Shard } return nil } -func (x *Workflow_Stream_Log) GetUpdatedAt() *vttime.Time { +type StartReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` +} + +func (x *StartReplicationRequest) Reset() { + *x = StartReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[173] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartReplicationRequest) ProtoMessage() {} + +func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[173] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. +func (*StartReplicationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{173} +} + +func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.UpdatedAt + return x.TabletAlias } return nil } -func (x *Workflow_Stream_Log) GetMessage() string { - if x != nil { - return x.Message +type StartReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StartReplicationResponse) Reset() { + *x = StartReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[174] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *Workflow_Stream_Log) GetCount() int64 { - if x != nil { - return x.Count +func (x *StartReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartReplicationResponse) ProtoMessage() {} + +func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[174] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -type GetSrvKeyspaceNamesResponse_NameList struct { +// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. +func (*StartReplicationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{174} +} + +type StopReplicationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` } -func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() { - *x = GetSrvKeyspaceNamesResponse_NameList{} +func (x *StopReplicationRequest) Reset() { + *x = StopReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[188] + mi := &file_vtctldata_proto_msgTypes[175] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetSrvKeyspaceNamesResponse_NameList) String() string { +func (x *StopReplicationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {} +func (*StopReplicationRequest) ProtoMessage() {} -func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[188] +func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[175] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10709,1795 +11026,5363 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceNamesResponse_NameList.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceNamesResponse_NameList) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75, 1} +// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. +func (*StopReplicationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{175} } -func (x *GetSrvKeyspaceNamesResponse_NameList) GetNames() []string { +func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Names + return x.TabletAlias } return nil } -var File_vtctldata_proto protoreflect.FileDescriptor +type StopReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} -var file_vtctldata_proto_rawDesc = []byte{ - 0x0a, 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x09, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x10, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, - 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x1a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, - 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x43, 0x0a, - 0x1b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x70, - 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0xf4, - 0x04, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, - 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x4a, 0x0a, 0x0e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, - 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x16, 0x6d, 0x61, - 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x6d, 0x61, - 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, - 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, - 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x15, 0x0a, 0x06, - 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, - 0x44, 0x64, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, - 0x79, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xd2, 0x0c, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, - 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, - 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x60, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x1a, 0xf6, 0x06, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, - 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, - 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, - 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, - 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x3a, 0x0a, 0x09, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, - 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, - 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, - 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, - 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, - 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, - 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x03, - 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x75, - 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x17, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4c, 0x6f, 0x6e, 0x67, 0x55, 0x6e, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, - 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, - 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, - 0x67, 0x68, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x50, - 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, - 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, - 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xc2, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, - 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x22, 0x8d, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, - 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64, - 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6, - 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, - 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64, - 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61, - 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xf1, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, - 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, - 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61, - 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, - 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, - 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x06, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, - 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, - 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, 0x6e, 0x5f, 0x69, 0x66, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76, - 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, - 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd0, 0x02, 0x0a, 0x1d, 0x45, 0x6d, 0x65, - 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, - 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c, - 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65, - 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e, - 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x18, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, - 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, - 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x47, 0x0a, - 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, - 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, - 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, 0x13, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, - 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x5e, 0x0a, - 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3c, 0x0a, - 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, - 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, - 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, - 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, - 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75, - 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, - 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, - 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, - 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, - 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, - 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, - 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xb7, 0x02, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, - 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, - 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, - 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, - 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, - 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, - 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, - 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, - 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, - 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, - 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, - 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x49, - 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, - 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, - 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, - 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x89, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, - 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, - 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, - 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, - 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, - 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, - 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, - 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, - 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, - 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, - 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, - 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, - 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, - 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, - 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, - 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, - 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, - 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xc2, 0x01, 0x0a, 0x18, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, - 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, - 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, - 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, - 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, - 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, - 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, - 0x1d, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, - 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, - 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, - 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, - 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, - 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, - 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, - 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, - 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, - 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, - 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, - 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, - 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, - 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, - 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, - 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, - 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, - 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, - 0x45, 0x58, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *StopReplicationResponse) Reset() { + *x = StopReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[176] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -var ( - file_vtctldata_proto_rawDescOnce sync.Once - file_vtctldata_proto_rawDescData = file_vtctldata_proto_rawDesc -) +func (x *StopReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} -func file_vtctldata_proto_rawDescGZIP() []byte { - file_vtctldata_proto_rawDescOnce.Do(func() { - file_vtctldata_proto_rawDescData = protoimpl.X.CompressGZIP(file_vtctldata_proto_rawDescData) - }) - return file_vtctldata_proto_rawDescData +func (*StopReplicationResponse) ProtoMessage() {} + +func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[176] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 198) -var file_vtctldata_proto_goTypes = []interface{}{ - (MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent - (*ExecuteVtctlCommandRequest)(nil), // 1: vtctldata.ExecuteVtctlCommandRequest - (*ExecuteVtctlCommandResponse)(nil), // 2: vtctldata.ExecuteVtctlCommandResponse - (*TableMaterializeSettings)(nil), // 3: vtctldata.TableMaterializeSettings - (*MaterializeSettings)(nil), // 4: vtctldata.MaterializeSettings - (*Keyspace)(nil), // 5: vtctldata.Keyspace - (*Shard)(nil), // 6: vtctldata.Shard - (*Workflow)(nil), // 7: vtctldata.Workflow - (*AddCellInfoRequest)(nil), // 8: vtctldata.AddCellInfoRequest - (*AddCellInfoResponse)(nil), // 9: vtctldata.AddCellInfoResponse - (*AddCellsAliasRequest)(nil), // 10: vtctldata.AddCellsAliasRequest - (*AddCellsAliasResponse)(nil), // 11: vtctldata.AddCellsAliasResponse - (*ApplyRoutingRulesRequest)(nil), // 12: vtctldata.ApplyRoutingRulesRequest - (*ApplyRoutingRulesResponse)(nil), // 13: vtctldata.ApplyRoutingRulesResponse - (*ApplyShardRoutingRulesRequest)(nil), // 14: vtctldata.ApplyShardRoutingRulesRequest - (*ApplyShardRoutingRulesResponse)(nil), // 15: vtctldata.ApplyShardRoutingRulesResponse - (*ApplySchemaRequest)(nil), // 16: vtctldata.ApplySchemaRequest - (*ApplySchemaResponse)(nil), // 17: vtctldata.ApplySchemaResponse - (*ApplyVSchemaRequest)(nil), // 18: vtctldata.ApplyVSchemaRequest - (*ApplyVSchemaResponse)(nil), // 19: vtctldata.ApplyVSchemaResponse - (*BackupRequest)(nil), // 20: vtctldata.BackupRequest - (*BackupResponse)(nil), // 21: vtctldata.BackupResponse - (*BackupShardRequest)(nil), // 22: vtctldata.BackupShardRequest - (*ChangeTabletTypeRequest)(nil), // 23: vtctldata.ChangeTabletTypeRequest - (*ChangeTabletTypeResponse)(nil), // 24: vtctldata.ChangeTabletTypeResponse - (*CreateKeyspaceRequest)(nil), // 25: vtctldata.CreateKeyspaceRequest - (*CreateKeyspaceResponse)(nil), // 26: vtctldata.CreateKeyspaceResponse - (*CreateShardRequest)(nil), // 27: vtctldata.CreateShardRequest - (*CreateShardResponse)(nil), // 28: vtctldata.CreateShardResponse - (*DeleteCellInfoRequest)(nil), // 29: vtctldata.DeleteCellInfoRequest - (*DeleteCellInfoResponse)(nil), // 30: vtctldata.DeleteCellInfoResponse - (*DeleteCellsAliasRequest)(nil), // 31: vtctldata.DeleteCellsAliasRequest - (*DeleteCellsAliasResponse)(nil), // 32: vtctldata.DeleteCellsAliasResponse - (*DeleteKeyspaceRequest)(nil), // 33: vtctldata.DeleteKeyspaceRequest - (*DeleteKeyspaceResponse)(nil), // 34: vtctldata.DeleteKeyspaceResponse - (*DeleteShardsRequest)(nil), // 35: vtctldata.DeleteShardsRequest - (*DeleteShardsResponse)(nil), // 36: vtctldata.DeleteShardsResponse - (*DeleteSrvVSchemaRequest)(nil), // 37: vtctldata.DeleteSrvVSchemaRequest - (*DeleteSrvVSchemaResponse)(nil), // 38: vtctldata.DeleteSrvVSchemaResponse - (*DeleteTabletsRequest)(nil), // 39: vtctldata.DeleteTabletsRequest - (*DeleteTabletsResponse)(nil), // 40: vtctldata.DeleteTabletsResponse - (*EmergencyReparentShardRequest)(nil), // 41: vtctldata.EmergencyReparentShardRequest - (*EmergencyReparentShardResponse)(nil), // 42: vtctldata.EmergencyReparentShardResponse - (*ExecuteFetchAsAppRequest)(nil), // 43: vtctldata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 44: vtctldata.ExecuteFetchAsAppResponse - (*ExecuteFetchAsDBARequest)(nil), // 45: vtctldata.ExecuteFetchAsDBARequest - (*ExecuteFetchAsDBAResponse)(nil), // 46: vtctldata.ExecuteFetchAsDBAResponse - (*ExecuteHookRequest)(nil), // 47: vtctldata.ExecuteHookRequest - (*ExecuteHookResponse)(nil), // 48: vtctldata.ExecuteHookResponse - (*FindAllShardsInKeyspaceRequest)(nil), // 49: vtctldata.FindAllShardsInKeyspaceRequest - (*FindAllShardsInKeyspaceResponse)(nil), // 50: vtctldata.FindAllShardsInKeyspaceResponse - (*GetBackupsRequest)(nil), // 51: vtctldata.GetBackupsRequest - (*GetBackupsResponse)(nil), // 52: vtctldata.GetBackupsResponse - (*GetCellInfoRequest)(nil), // 53: vtctldata.GetCellInfoRequest - (*GetCellInfoResponse)(nil), // 54: vtctldata.GetCellInfoResponse - (*GetCellInfoNamesRequest)(nil), // 55: vtctldata.GetCellInfoNamesRequest - (*GetCellInfoNamesResponse)(nil), // 56: vtctldata.GetCellInfoNamesResponse - (*GetCellsAliasesRequest)(nil), // 57: vtctldata.GetCellsAliasesRequest - (*GetCellsAliasesResponse)(nil), // 58: vtctldata.GetCellsAliasesResponse - (*GetFullStatusRequest)(nil), // 59: vtctldata.GetFullStatusRequest - (*GetFullStatusResponse)(nil), // 60: vtctldata.GetFullStatusResponse - (*GetKeyspacesRequest)(nil), // 61: vtctldata.GetKeyspacesRequest - (*GetKeyspacesResponse)(nil), // 62: vtctldata.GetKeyspacesResponse - (*GetKeyspaceRequest)(nil), // 63: vtctldata.GetKeyspaceRequest - (*GetKeyspaceResponse)(nil), // 64: vtctldata.GetKeyspaceResponse - (*GetPermissionsRequest)(nil), // 65: vtctldata.GetPermissionsRequest - (*GetPermissionsResponse)(nil), // 66: vtctldata.GetPermissionsResponse - (*GetRoutingRulesRequest)(nil), // 67: vtctldata.GetRoutingRulesRequest - (*GetRoutingRulesResponse)(nil), // 68: vtctldata.GetRoutingRulesResponse - (*GetSchemaRequest)(nil), // 69: vtctldata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 70: vtctldata.GetSchemaResponse - (*GetShardRequest)(nil), // 71: vtctldata.GetShardRequest - (*GetShardResponse)(nil), // 72: vtctldata.GetShardResponse - (*GetShardRoutingRulesRequest)(nil), // 73: vtctldata.GetShardRoutingRulesRequest - (*GetShardRoutingRulesResponse)(nil), // 74: vtctldata.GetShardRoutingRulesResponse - (*GetSrvKeyspaceNamesRequest)(nil), // 75: vtctldata.GetSrvKeyspaceNamesRequest - (*GetSrvKeyspaceNamesResponse)(nil), // 76: vtctldata.GetSrvKeyspaceNamesResponse - (*GetSrvKeyspacesRequest)(nil), // 77: vtctldata.GetSrvKeyspacesRequest - (*GetSrvKeyspacesResponse)(nil), // 78: vtctldata.GetSrvKeyspacesResponse - (*UpdateThrottlerConfigRequest)(nil), // 79: vtctldata.UpdateThrottlerConfigRequest - (*UpdateThrottlerConfigResponse)(nil), // 80: vtctldata.UpdateThrottlerConfigResponse - (*GetSrvVSchemaRequest)(nil), // 81: vtctldata.GetSrvVSchemaRequest - (*GetSrvVSchemaResponse)(nil), // 82: vtctldata.GetSrvVSchemaResponse - (*GetSrvVSchemasRequest)(nil), // 83: vtctldata.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 84: vtctldata.GetSrvVSchemasResponse - (*GetTabletRequest)(nil), // 85: vtctldata.GetTabletRequest - (*GetTabletResponse)(nil), // 86: vtctldata.GetTabletResponse - (*GetTabletsRequest)(nil), // 87: vtctldata.GetTabletsRequest - (*GetTabletsResponse)(nil), // 88: vtctldata.GetTabletsResponse - (*GetTopologyPathRequest)(nil), // 89: vtctldata.GetTopologyPathRequest - (*GetTopologyPathResponse)(nil), // 90: vtctldata.GetTopologyPathResponse - (*TopologyCell)(nil), // 91: vtctldata.TopologyCell - (*GetVSchemaRequest)(nil), // 92: vtctldata.GetVSchemaRequest - (*GetVersionRequest)(nil), // 93: vtctldata.GetVersionRequest - (*GetVersionResponse)(nil), // 94: vtctldata.GetVersionResponse - (*GetVSchemaResponse)(nil), // 95: vtctldata.GetVSchemaResponse - (*GetWorkflowsRequest)(nil), // 96: vtctldata.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 97: vtctldata.GetWorkflowsResponse - (*InitShardPrimaryRequest)(nil), // 98: vtctldata.InitShardPrimaryRequest - (*InitShardPrimaryResponse)(nil), // 99: vtctldata.InitShardPrimaryResponse - (*PingTabletRequest)(nil), // 100: vtctldata.PingTabletRequest - (*PingTabletResponse)(nil), // 101: vtctldata.PingTabletResponse - (*PlannedReparentShardRequest)(nil), // 102: vtctldata.PlannedReparentShardRequest - (*PlannedReparentShardResponse)(nil), // 103: vtctldata.PlannedReparentShardResponse - (*RebuildKeyspaceGraphRequest)(nil), // 104: vtctldata.RebuildKeyspaceGraphRequest - (*RebuildKeyspaceGraphResponse)(nil), // 105: vtctldata.RebuildKeyspaceGraphResponse - (*RebuildVSchemaGraphRequest)(nil), // 106: vtctldata.RebuildVSchemaGraphRequest - (*RebuildVSchemaGraphResponse)(nil), // 107: vtctldata.RebuildVSchemaGraphResponse - (*RefreshStateRequest)(nil), // 108: vtctldata.RefreshStateRequest - (*RefreshStateResponse)(nil), // 109: vtctldata.RefreshStateResponse - (*RefreshStateByShardRequest)(nil), // 110: vtctldata.RefreshStateByShardRequest - (*RefreshStateByShardResponse)(nil), // 111: vtctldata.RefreshStateByShardResponse - (*ReloadSchemaRequest)(nil), // 112: vtctldata.ReloadSchemaRequest - (*ReloadSchemaResponse)(nil), // 113: vtctldata.ReloadSchemaResponse - (*ReloadSchemaKeyspaceRequest)(nil), // 114: vtctldata.ReloadSchemaKeyspaceRequest - (*ReloadSchemaKeyspaceResponse)(nil), // 115: vtctldata.ReloadSchemaKeyspaceResponse - (*ReloadSchemaShardRequest)(nil), // 116: vtctldata.ReloadSchemaShardRequest - (*ReloadSchemaShardResponse)(nil), // 117: vtctldata.ReloadSchemaShardResponse - (*RemoveBackupRequest)(nil), // 118: vtctldata.RemoveBackupRequest - (*RemoveBackupResponse)(nil), // 119: vtctldata.RemoveBackupResponse - (*RemoveKeyspaceCellRequest)(nil), // 120: vtctldata.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 121: vtctldata.RemoveKeyspaceCellResponse - (*RemoveShardCellRequest)(nil), // 122: vtctldata.RemoveShardCellRequest - (*RemoveShardCellResponse)(nil), // 123: vtctldata.RemoveShardCellResponse - (*ReparentTabletRequest)(nil), // 124: vtctldata.ReparentTabletRequest - (*ReparentTabletResponse)(nil), // 125: vtctldata.ReparentTabletResponse - (*RestoreFromBackupRequest)(nil), // 126: vtctldata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 127: vtctldata.RestoreFromBackupResponse - (*RunHealthCheckRequest)(nil), // 128: vtctldata.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 129: vtctldata.RunHealthCheckResponse - (*SetKeyspaceDurabilityPolicyRequest)(nil), // 130: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*SetKeyspaceDurabilityPolicyResponse)(nil), // 131: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*SetKeyspaceServedFromRequest)(nil), // 132: vtctldata.SetKeyspaceServedFromRequest - (*SetKeyspaceServedFromResponse)(nil), // 133: vtctldata.SetKeyspaceServedFromResponse - (*SetKeyspaceShardingInfoRequest)(nil), // 134: vtctldata.SetKeyspaceShardingInfoRequest - (*SetKeyspaceShardingInfoResponse)(nil), // 135: vtctldata.SetKeyspaceShardingInfoResponse - (*SetShardIsPrimaryServingRequest)(nil), // 136: vtctldata.SetShardIsPrimaryServingRequest - (*SetShardIsPrimaryServingResponse)(nil), // 137: vtctldata.SetShardIsPrimaryServingResponse - (*SetShardTabletControlRequest)(nil), // 138: vtctldata.SetShardTabletControlRequest - (*SetShardTabletControlResponse)(nil), // 139: vtctldata.SetShardTabletControlResponse - (*SetWritableRequest)(nil), // 140: vtctldata.SetWritableRequest - (*SetWritableResponse)(nil), // 141: vtctldata.SetWritableResponse - (*ShardReplicationAddRequest)(nil), // 142: vtctldata.ShardReplicationAddRequest - (*ShardReplicationAddResponse)(nil), // 143: vtctldata.ShardReplicationAddResponse - (*ShardReplicationFixRequest)(nil), // 144: vtctldata.ShardReplicationFixRequest - (*ShardReplicationFixResponse)(nil), // 145: vtctldata.ShardReplicationFixResponse - (*ShardReplicationPositionsRequest)(nil), // 146: vtctldata.ShardReplicationPositionsRequest - (*ShardReplicationPositionsResponse)(nil), // 147: vtctldata.ShardReplicationPositionsResponse - (*ShardReplicationRemoveRequest)(nil), // 148: vtctldata.ShardReplicationRemoveRequest - (*ShardReplicationRemoveResponse)(nil), // 149: vtctldata.ShardReplicationRemoveResponse - (*SleepTabletRequest)(nil), // 150: vtctldata.SleepTabletRequest - (*SleepTabletResponse)(nil), // 151: vtctldata.SleepTabletResponse - (*SourceShardAddRequest)(nil), // 152: vtctldata.SourceShardAddRequest - (*SourceShardAddResponse)(nil), // 153: vtctldata.SourceShardAddResponse - (*SourceShardDeleteRequest)(nil), // 154: vtctldata.SourceShardDeleteRequest - (*SourceShardDeleteResponse)(nil), // 155: vtctldata.SourceShardDeleteResponse - (*StartReplicationRequest)(nil), // 156: vtctldata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 157: vtctldata.StartReplicationResponse - (*StopReplicationRequest)(nil), // 158: vtctldata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 159: vtctldata.StopReplicationResponse - (*TabletExternallyReparentedRequest)(nil), // 160: vtctldata.TabletExternallyReparentedRequest - (*TabletExternallyReparentedResponse)(nil), // 161: vtctldata.TabletExternallyReparentedResponse - (*UpdateCellInfoRequest)(nil), // 162: vtctldata.UpdateCellInfoRequest - (*UpdateCellInfoResponse)(nil), // 163: vtctldata.UpdateCellInfoResponse - (*UpdateCellsAliasRequest)(nil), // 164: vtctldata.UpdateCellsAliasRequest - (*UpdateCellsAliasResponse)(nil), // 165: vtctldata.UpdateCellsAliasResponse - (*ValidateRequest)(nil), // 166: vtctldata.ValidateRequest - (*ValidateResponse)(nil), // 167: vtctldata.ValidateResponse - (*ValidateKeyspaceRequest)(nil), // 168: vtctldata.ValidateKeyspaceRequest - (*ValidateKeyspaceResponse)(nil), // 169: vtctldata.ValidateKeyspaceResponse - (*ValidateSchemaKeyspaceRequest)(nil), // 170: vtctldata.ValidateSchemaKeyspaceRequest - (*ValidateSchemaKeyspaceResponse)(nil), // 171: vtctldata.ValidateSchemaKeyspaceResponse - (*ValidateShardRequest)(nil), // 172: vtctldata.ValidateShardRequest - (*ValidateShardResponse)(nil), // 173: vtctldata.ValidateShardResponse - (*ValidateVersionKeyspaceRequest)(nil), // 174: vtctldata.ValidateVersionKeyspaceRequest - (*ValidateVersionKeyspaceResponse)(nil), // 175: vtctldata.ValidateVersionKeyspaceResponse - (*ValidateVersionShardRequest)(nil), // 176: vtctldata.ValidateVersionShardRequest - (*ValidateVersionShardResponse)(nil), // 177: vtctldata.ValidateVersionShardResponse - (*ValidateVSchemaRequest)(nil), // 178: vtctldata.ValidateVSchemaRequest - (*ValidateVSchemaResponse)(nil), // 179: vtctldata.ValidateVSchemaResponse - nil, // 180: vtctldata.Workflow.ShardStreamsEntry - (*Workflow_ReplicationLocation)(nil), // 181: vtctldata.Workflow.ReplicationLocation - (*Workflow_ShardStream)(nil), // 182: vtctldata.Workflow.ShardStream - (*Workflow_Stream)(nil), // 183: vtctldata.Workflow.Stream - (*Workflow_Stream_CopyState)(nil), // 184: vtctldata.Workflow.Stream.CopyState - (*Workflow_Stream_Log)(nil), // 185: vtctldata.Workflow.Stream.Log - nil, // 186: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - nil, // 187: vtctldata.GetCellsAliasesResponse.AliasesEntry - nil, // 188: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 189: vtctldata.GetSrvKeyspaceNamesResponse.NameList - nil, // 190: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - nil, // 191: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - nil, // 192: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - nil, // 193: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - nil, // 194: vtctldata.ValidateResponse.ResultsByKeyspaceEntry - nil, // 195: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - nil, // 196: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - nil, // 197: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - nil, // 198: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - (*logutil.Event)(nil), // 199: logutil.Event - (*topodata.Keyspace)(nil), // 200: topodata.Keyspace - (*topodata.Shard)(nil), // 201: topodata.Shard - (*topodata.CellInfo)(nil), // 202: topodata.CellInfo - (*vschema.RoutingRules)(nil), // 203: vschema.RoutingRules - (*vschema.ShardRoutingRules)(nil), // 204: vschema.ShardRoutingRules - (*vttime.Duration)(nil), // 205: vttime.Duration - (*vtrpc.CallerID)(nil), // 206: vtrpc.CallerID - (*vschema.Keyspace)(nil), // 207: vschema.Keyspace - (*topodata.TabletAlias)(nil), // 208: topodata.TabletAlias - (topodata.TabletType)(0), // 209: topodata.TabletType - (*topodata.Tablet)(nil), // 210: topodata.Tablet - (*topodata.Keyspace_ServedFrom)(nil), // 211: topodata.Keyspace.ServedFrom - (topodata.KeyspaceType)(0), // 212: topodata.KeyspaceType - (*vttime.Time)(nil), // 213: vttime.Time - (*query.QueryResult)(nil), // 214: query.QueryResult - (*tabletmanagerdata.ExecuteHookRequest)(nil), // 215: tabletmanagerdata.ExecuteHookRequest - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 216: tabletmanagerdata.ExecuteHookResponse - (*mysqlctl.BackupInfo)(nil), // 217: mysqlctl.BackupInfo - (*replicationdata.FullStatus)(nil), // 218: replicationdata.FullStatus - (*tabletmanagerdata.Permissions)(nil), // 219: tabletmanagerdata.Permissions - (*tabletmanagerdata.SchemaDefinition)(nil), // 220: tabletmanagerdata.SchemaDefinition - (*vschema.SrvVSchema)(nil), // 221: vschema.SrvVSchema - (*topodata.ShardReplicationError)(nil), // 222: topodata.ShardReplicationError - (*topodata.KeyRange)(nil), // 223: topodata.KeyRange - (*topodata.CellsAlias)(nil), // 224: topodata.CellsAlias - (*topodata.Shard_TabletControl)(nil), // 225: topodata.Shard.TabletControl - (*binlogdata.BinlogSource)(nil), // 226: binlogdata.BinlogSource - (*topodata.SrvKeyspace)(nil), // 227: topodata.SrvKeyspace - (*replicationdata.Status)(nil), // 228: replicationdata.Status +// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. +func (*StopReplicationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{176} } -var file_vtctldata_proto_depIdxs = []int32{ - 199, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event - 3, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings - 0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent - 200, // 3: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace - 201, // 4: vtctldata.Shard.shard:type_name -> topodata.Shard - 181, // 5: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation - 181, // 6: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation - 180, // 7: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry - 202, // 8: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 203, // 9: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules - 204, // 10: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 205, // 11: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration - 206, // 12: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID - 207, // 13: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace - 207, // 14: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 208, // 15: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 16: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 199, // 17: vtctldata.BackupResponse.event:type_name -> logutil.Event - 208, // 18: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias - 209, // 19: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType - 210, // 20: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet - 210, // 21: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet - 211, // 22: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom - 212, // 23: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType - 213, // 24: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time - 5, // 25: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 5, // 26: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace - 6, // 27: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard - 6, // 28: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard - 208, // 29: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 208, // 30: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 208, // 31: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias - 205, // 32: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 208, // 33: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 199, // 34: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event - 208, // 35: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias - 214, // 36: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 208, // 37: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias - 214, // 38: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult - 208, // 39: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias - 215, // 40: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest - 216, // 41: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse - 186, // 42: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - 217, // 43: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo - 202, // 44: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 187, // 45: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry - 208, // 46: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias - 218, // 47: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus - 5, // 48: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace - 5, // 49: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 208, // 50: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias - 219, // 51: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 203, // 52: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules - 208, // 53: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 220, // 54: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition - 6, // 55: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard - 204, // 56: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 188, // 57: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - 190, // 58: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - 221, // 59: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema - 191, // 60: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - 208, // 61: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 210, // 62: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet - 208, // 63: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 209, // 64: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType - 210, // 65: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet - 91, // 66: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell - 208, // 67: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias - 207, // 68: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 7, // 69: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow - 208, // 70: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias - 205, // 71: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration - 199, // 72: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event - 208, // 73: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 74: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 208, // 75: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias - 205, // 76: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 208, // 77: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 199, // 78: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event - 208, // 79: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 80: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 199, // 81: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event - 199, // 82: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 208, // 83: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias - 208, // 84: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias - 208, // 85: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 213, // 86: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 208, // 87: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 199, // 88: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 208, // 89: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias - 200, // 90: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace - 209, // 91: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType - 200, // 92: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace - 200, // 93: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace - 201, // 94: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard - 209, // 95: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType - 201, // 96: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard - 208, // 97: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 98: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias - 222, // 99: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError - 192, // 100: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - 193, // 101: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - 208, // 102: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 103: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 205, // 104: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration - 223, // 105: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange - 201, // 106: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard - 201, // 107: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard - 208, // 108: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 109: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 208, // 110: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias - 208, // 111: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias - 208, // 112: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias - 202, // 113: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 202, // 114: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 224, // 115: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias - 224, // 116: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias - 194, // 117: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry - 195, // 118: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - 196, // 119: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - 197, // 120: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - 198, // 121: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - 182, // 122: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream - 183, // 123: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream - 225, // 124: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl - 208, // 125: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias - 226, // 126: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource - 213, // 127: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time - 213, // 128: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time - 184, // 129: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState - 185, // 130: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log - 213, // 131: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time - 213, // 132: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time - 6, // 133: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard - 224, // 134: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias - 189, // 135: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList - 227, // 136: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace - 221, // 137: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema - 228, // 138: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status - 210, // 139: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet - 169, // 140: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse - 173, // 141: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 142: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 143: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 173, // 144: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 145, // [145:145] is the sub-list for method output_type - 145, // [145:145] is the sub-list for method input_type - 145, // [145:145] is the sub-list for extension type_name - 145, // [145:145] is the sub-list for extension extendee - 0, // [0:145] is the sub-list for field type_name + +type TabletExternallyReparentedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Tablet is the alias of the tablet that was promoted externally and should + // be updated to the shard primary in the topo. + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` } -func init() { file_vtctldata_proto_init() } -func file_vtctldata_proto_init() { - if File_vtctldata_proto != nil { - return +func (x *TabletExternallyReparentedRequest) Reset() { + *x = TabletExternallyReparentedRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[177] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if !protoimpl.UnsafeEnabled { - file_vtctldata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteVtctlCommandRequest); i { +} + +func (x *TabletExternallyReparentedRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TabletExternallyReparentedRequest) ProtoMessage() {} + +func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[177] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. +func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{177} +} + +func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +type TabletExternallyReparentedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + NewPrimary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + OldPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` +} + +func (x *TabletExternallyReparentedResponse) Reset() { + *x = TabletExternallyReparentedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[178] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TabletExternallyReparentedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TabletExternallyReparentedResponse) ProtoMessage() {} + +func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[178] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. +func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{178} +} + +func (x *TabletExternallyReparentedResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *TabletExternallyReparentedResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *TabletExternallyReparentedResponse) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary + } + return nil +} + +func (x *TabletExternallyReparentedResponse) GetOldPrimary() *topodata.TabletAlias { + if x != nil { + return x.OldPrimary + } + return nil +} + +type UpdateCellInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +} + +func (x *UpdateCellInfoRequest) Reset() { + *x = UpdateCellInfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[179] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellInfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellInfoRequest) ProtoMessage() {} + +func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[179] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. +func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{179} +} + +func (x *UpdateCellInfoRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellInfoRequest) GetCellInfo() *topodata.CellInfo { + if x != nil { + return x.CellInfo + } + return nil +} + +type UpdateCellInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellInfo *topodata.CellInfo `protobuf:"bytes,2,opt,name=cell_info,json=cellInfo,proto3" json:"cell_info,omitempty"` +} + +func (x *UpdateCellInfoResponse) Reset() { + *x = UpdateCellInfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[180] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellInfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellInfoResponse) ProtoMessage() {} + +func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[180] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. +func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{180} +} + +func (x *UpdateCellInfoResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellInfoResponse) GetCellInfo() *topodata.CellInfo { + if x != nil { + return x.CellInfo + } + return nil +} + +type UpdateCellsAliasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` +} + +func (x *UpdateCellsAliasRequest) Reset() { + *x = UpdateCellsAliasRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[181] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellsAliasRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellsAliasRequest) ProtoMessage() {} + +func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[181] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. +func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{181} +} + +func (x *UpdateCellsAliasRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellsAliasRequest) GetCellsAlias() *topodata.CellsAlias { + if x != nil { + return x.CellsAlias + } + return nil +} + +type UpdateCellsAliasResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CellsAlias *topodata.CellsAlias `protobuf:"bytes,2,opt,name=cells_alias,json=cellsAlias,proto3" json:"cells_alias,omitempty"` +} + +func (x *UpdateCellsAliasResponse) Reset() { + *x = UpdateCellsAliasResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[182] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateCellsAliasResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCellsAliasResponse) ProtoMessage() {} + +func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[182] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. +func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{182} +} + +func (x *UpdateCellsAliasResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateCellsAliasResponse) GetCellsAlias() *topodata.CellsAlias { + if x != nil { + return x.CellsAlias + } + return nil +} + +type ValidateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PingTablets bool `protobuf:"varint,1,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateRequest) Reset() { + *x = ValidateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[183] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateRequest) ProtoMessage() {} + +func (x *ValidateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[183] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. +func (*ValidateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{183} +} + +func (x *ValidateRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByKeyspace map[string]*ValidateKeyspaceResponse `protobuf:"bytes,2,rep,name=results_by_keyspace,json=resultsByKeyspace,proto3" json:"results_by_keyspace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateResponse) Reset() { + *x = ValidateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[184] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateResponse) ProtoMessage() {} + +func (x *ValidateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[184] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead. +func (*ValidateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{184} +} + +func (x *ValidateResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateResponse) GetResultsByKeyspace() map[string]*ValidateKeyspaceResponse { + if x != nil { + return x.ResultsByKeyspace + } + return nil +} + +type ValidateKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateKeyspaceRequest) Reset() { + *x = ValidateKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[185] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[185] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{185} +} + +func (x *ValidateKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateKeyspaceRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateKeyspaceResponse) Reset() { + *x = ValidateKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[186] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[186] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{186} +} + +func (x *ValidateKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateSchemaKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ExcludeTables []string `protobuf:"bytes,2,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + IncludeViews bool `protobuf:"varint,3,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` + SkipNoPrimary bool `protobuf:"varint,4,opt,name=skip_no_primary,json=skipNoPrimary,proto3" json:"skip_no_primary,omitempty"` + IncludeVschema bool `protobuf:"varint,5,opt,name=include_vschema,json=includeVschema,proto3" json:"include_vschema,omitempty"` +} + +func (x *ValidateSchemaKeyspaceRequest) Reset() { + *x = ValidateSchemaKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[187] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSchemaKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[187] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{187} +} + +func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateSchemaKeyspaceRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *ValidateSchemaKeyspaceRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false +} + +func (x *ValidateSchemaKeyspaceRequest) GetSkipNoPrimary() bool { + if x != nil { + return x.SkipNoPrimary + } + return false +} + +func (x *ValidateSchemaKeyspaceRequest) GetIncludeVschema() bool { + if x != nil { + return x.IncludeVschema + } + return false +} + +type ValidateSchemaKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateSchemaKeyspaceResponse) Reset() { + *x = ValidateSchemaKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[188] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateSchemaKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[188] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{188} +} + +func (x *ValidateSchemaKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateSchemaKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateShardRequest) Reset() { + *x = ValidateShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[189] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateShardRequest) ProtoMessage() {} + +func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[189] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{189} +} + +func (x *ValidateShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ValidateShardRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateShardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ValidateShardResponse) Reset() { + *x = ValidateShardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[190] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateShardResponse) ProtoMessage() {} + +func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[190] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead. +func (*ValidateShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{190} +} + +func (x *ValidateShardResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +type ValidateVersionKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` +} + +func (x *ValidateVersionKeyspaceRequest) Reset() { + *x = ValidateVersionKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[191] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[191] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{191} +} + +func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type ValidateVersionKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateVersionKeyspaceResponse) Reset() { + *x = ValidateVersionKeyspaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[192] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionKeyspaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionKeyspaceResponse) ProtoMessage() {} + +func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[192] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{192} +} + +func (x *ValidateVersionKeyspaceResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateVersionKeyspaceResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type ValidateVersionShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *ValidateVersionShardRequest) Reset() { + *x = ValidateVersionShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[193] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionShardRequest) ProtoMessage() {} + +func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[193] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{193} +} + +func (x *ValidateVersionShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateVersionShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +type ValidateVersionShardResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ValidateVersionShardResponse) Reset() { + *x = ValidateVersionShardResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[194] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionShardResponse) ProtoMessage() {} + +func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[194] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead. +func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{194} +} + +func (x *ValidateVersionShardResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +type ValidateVSchemaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` + ExcludeTables []string `protobuf:"bytes,3,rep,name=exclude_tables,json=excludeTables,proto3" json:"exclude_tables,omitempty"` + IncludeViews bool `protobuf:"varint,4,opt,name=include_views,json=includeViews,proto3" json:"include_views,omitempty"` +} + +func (x *ValidateVSchemaRequest) Reset() { + *x = ValidateVSchemaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[195] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVSchemaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVSchemaRequest) ProtoMessage() {} + +func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[195] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead. +func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{195} +} + +func (x *ValidateVSchemaRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateVSchemaRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + +func (x *ValidateVSchemaRequest) GetExcludeTables() []string { + if x != nil { + return x.ExcludeTables + } + return nil +} + +func (x *ValidateVSchemaRequest) GetIncludeViews() bool { + if x != nil { + return x.IncludeViews + } + return false +} + +type ValidateVSchemaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []string `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + ResultsByShard map[string]*ValidateShardResponse `protobuf:"bytes,2,rep,name=results_by_shard,json=resultsByShard,proto3" json:"results_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ValidateVSchemaResponse) Reset() { + *x = ValidateVSchemaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[196] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVSchemaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVSchemaResponse) ProtoMessage() {} + +func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[196] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead. +func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{196} +} + +func (x *ValidateVSchemaResponse) GetResults() []string { + if x != nil { + return x.Results + } + return nil +} + +func (x *ValidateVSchemaResponse) GetResultsByShard() map[string]*ValidateShardResponse { + if x != nil { + return x.ResultsByShard + } + return nil +} + +type WorkflowDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + KeepData bool `protobuf:"varint,3,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,4,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` +} + +func (x *WorkflowDeleteRequest) Reset() { + *x = WorkflowDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[197] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteRequest) ProtoMessage() {} + +func (x *WorkflowDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[197] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteRequest.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{197} +} + +func (x *WorkflowDeleteRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowDeleteRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *WorkflowDeleteRequest) GetKeepData() bool { + if x != nil { + return x.KeepData + } + return false +} + +func (x *WorkflowDeleteRequest) GetKeepRoutingRules() bool { + if x != nil { + return x.KeepRoutingRules + } + return false +} + +type WorkflowDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*WorkflowDeleteResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *WorkflowDeleteResponse) Reset() { + *x = WorkflowDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[198] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteResponse) ProtoMessage() {} + +func (x *WorkflowDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[198] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteResponse.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{198} +} + +func (x *WorkflowDeleteResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowDeleteResponse) GetDetails() []*WorkflowDeleteResponse_TabletInfo { + if x != nil { + return x.Details + } + return nil +} + +type WorkflowStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` +} + +func (x *WorkflowStatusRequest) Reset() { + *x = WorkflowStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[199] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusRequest) ProtoMessage() {} + +func (x *WorkflowStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[199] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusRequest.ProtoReflect.Descriptor instead. +func (*WorkflowStatusRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{199} +} + +func (x *WorkflowStatusRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowStatusRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +type WorkflowStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key is keyspace/shard. + TableCopyState map[string]*WorkflowStatusResponse_TableCopyState `protobuf:"bytes,1,rep,name=table_copy_state,json=tableCopyState,proto3" json:"table_copy_state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ShardStreams map[string]*WorkflowStatusResponse_ShardStreams `protobuf:"bytes,2,rep,name=shard_streams,json=shardStreams,proto3" json:"shard_streams,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *WorkflowStatusResponse) Reset() { + *x = WorkflowStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[200] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse) ProtoMessage() {} + +func (x *WorkflowStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[200] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{200} +} + +func (x *WorkflowStatusResponse) GetTableCopyState() map[string]*WorkflowStatusResponse_TableCopyState { + if x != nil { + return x.TableCopyState + } + return nil +} + +func (x *WorkflowStatusResponse) GetShardStreams() map[string]*WorkflowStatusResponse_ShardStreams { + if x != nil { + return x.ShardStreams + } + return nil +} + +type WorkflowSwitchTrafficRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + MaxReplicationLagAllowed *vttime.Duration `protobuf:"bytes,5,opt,name=max_replication_lag_allowed,json=maxReplicationLagAllowed,proto3" json:"max_replication_lag_allowed,omitempty"` + EnableReverseReplication bool `protobuf:"varint,6,opt,name=enable_reverse_replication,json=enableReverseReplication,proto3" json:"enable_reverse_replication,omitempty"` + Direction int32 `protobuf:"varint,7,opt,name=direction,proto3" json:"direction,omitempty"` + Timeout *vttime.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` + DryRun bool `protobuf:"varint,9,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + InitializeTargetSequences bool `protobuf:"varint,10,opt,name=initialize_target_sequences,json=initializeTargetSequences,proto3" json:"initialize_target_sequences,omitempty"` +} + +func (x *WorkflowSwitchTrafficRequest) Reset() { + *x = WorkflowSwitchTrafficRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[201] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowSwitchTrafficRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowSwitchTrafficRequest) ProtoMessage() {} + +func (x *WorkflowSwitchTrafficRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[201] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowSwitchTrafficRequest.ProtoReflect.Descriptor instead. +func (*WorkflowSwitchTrafficRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{201} +} + +func (x *WorkflowSwitchTrafficRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowSwitchTrafficRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +func (x *WorkflowSwitchTrafficRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetMaxReplicationLagAllowed() *vttime.Duration { + if x != nil { + return x.MaxReplicationLagAllowed + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetEnableReverseReplication() bool { + if x != nil { + return x.EnableReverseReplication + } + return false +} + +func (x *WorkflowSwitchTrafficRequest) GetDirection() int32 { + if x != nil { + return x.Direction + } + return 0 +} + +func (x *WorkflowSwitchTrafficRequest) GetTimeout() *vttime.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +func (x *WorkflowSwitchTrafficRequest) GetDryRun() bool { + if x != nil { + return x.DryRun + } + return false +} + +func (x *WorkflowSwitchTrafficRequest) GetInitializeTargetSequences() bool { + if x != nil { + return x.InitializeTargetSequences + } + return false +} + +type WorkflowSwitchTrafficResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + StartState string `protobuf:"bytes,2,opt,name=start_state,json=startState,proto3" json:"start_state,omitempty"` + CurrentState string `protobuf:"bytes,3,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + DryRunResults []string `protobuf:"bytes,4,rep,name=dry_run_results,json=dryRunResults,proto3" json:"dry_run_results,omitempty"` +} + +func (x *WorkflowSwitchTrafficResponse) Reset() { + *x = WorkflowSwitchTrafficResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[202] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowSwitchTrafficResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowSwitchTrafficResponse) ProtoMessage() {} + +func (x *WorkflowSwitchTrafficResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[202] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowSwitchTrafficResponse.ProtoReflect.Descriptor instead. +func (*WorkflowSwitchTrafficResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{202} +} + +func (x *WorkflowSwitchTrafficResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetStartState() string { + if x != nil { + return x.StartState + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetCurrentState() string { + if x != nil { + return x.CurrentState + } + return "" +} + +func (x *WorkflowSwitchTrafficResponse) GetDryRunResults() []string { + if x != nil { + return x.DryRunResults + } + return nil +} + +type WorkflowUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // TabletRequest gets passed on to each primary tablet involved + // in the workflow via the UpdateVReplicationWorkflow tabletmanager RPC. + TabletRequest *tabletmanagerdata.UpdateVReplicationWorkflowRequest `protobuf:"bytes,2,opt,name=tablet_request,json=tabletRequest,proto3" json:"tablet_request,omitempty"` +} + +func (x *WorkflowUpdateRequest) Reset() { + *x = WorkflowUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[203] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateRequest) ProtoMessage() {} + +func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[203] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateRequest.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{203} +} + +func (x *WorkflowUpdateRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *WorkflowUpdateRequest) GetTabletRequest() *tabletmanagerdata.UpdateVReplicationWorkflowRequest { + if x != nil { + return x.TabletRequest + } + return nil +} + +type WorkflowUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Summary string `protobuf:"bytes,1,opt,name=summary,proto3" json:"summary,omitempty"` + Details []*WorkflowUpdateResponse_TabletInfo `protobuf:"bytes,2,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *WorkflowUpdateResponse) Reset() { + *x = WorkflowUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[204] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateResponse) ProtoMessage() {} + +func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[204] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateResponse.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{204} +} + +func (x *WorkflowUpdateResponse) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *WorkflowUpdateResponse) GetDetails() []*WorkflowUpdateResponse_TabletInfo { + if x != nil { + return x.Details + } + return nil +} + +type Workflow_ReplicationLocation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shards []string `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (x *Workflow_ReplicationLocation) Reset() { + *x = Workflow_ReplicationLocation{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[206] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_ReplicationLocation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_ReplicationLocation) ProtoMessage() {} + +func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[206] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_ReplicationLocation.ProtoReflect.Descriptor instead. +func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Workflow_ReplicationLocation) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *Workflow_ReplicationLocation) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + +type Workflow_ShardStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Streams []*Workflow_Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` + TabletControls []*topodata.Shard_TabletControl `protobuf:"bytes,2,rep,name=tablet_controls,json=tabletControls,proto3" json:"tablet_controls,omitempty"` + IsPrimaryServing bool `protobuf:"varint,3,opt,name=is_primary_serving,json=isPrimaryServing,proto3" json:"is_primary_serving,omitempty"` +} + +func (x *Workflow_ShardStream) Reset() { + *x = Workflow_ShardStream{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[207] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_ShardStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_ShardStream) ProtoMessage() {} + +func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[207] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_ShardStream.ProtoReflect.Descriptor instead. +func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *Workflow_ShardStream) GetStreams() []*Workflow_Stream { + if x != nil { + return x.Streams + } + return nil +} + +func (x *Workflow_ShardStream) GetTabletControls() []*topodata.Shard_TabletControl { + if x != nil { + return x.TabletControls + } + return nil +} + +func (x *Workflow_ShardStream) GetIsPrimaryServing() bool { + if x != nil { + return x.IsPrimaryServing + } + return false +} + +type Workflow_Stream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,3,opt,name=tablet,proto3" json:"tablet,omitempty"` + BinlogSource *binlogdata.BinlogSource `protobuf:"bytes,4,opt,name=binlog_source,json=binlogSource,proto3" json:"binlog_source,omitempty"` + Position string `protobuf:"bytes,5,opt,name=position,proto3" json:"position,omitempty"` + StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` + State string `protobuf:"bytes,7,opt,name=state,proto3" json:"state,omitempty"` + DbName string `protobuf:"bytes,8,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + TransactionTimestamp *vttime.Time `protobuf:"bytes,9,opt,name=transaction_timestamp,json=transactionTimestamp,proto3" json:"transaction_timestamp,omitempty"` + TimeUpdated *vttime.Time `protobuf:"bytes,10,opt,name=time_updated,json=timeUpdated,proto3" json:"time_updated,omitempty"` + Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` + CopyStates []*Workflow_Stream_CopyState `protobuf:"bytes,12,rep,name=copy_states,json=copyStates,proto3" json:"copy_states,omitempty"` + Logs []*Workflow_Stream_Log `protobuf:"bytes,13,rep,name=logs,proto3" json:"logs,omitempty"` + // LogFetchError is set if we fail to fetch some logs for this stream. We + // will never fail to fetch workflows because we cannot fetch the logs, but + // we will still forward log-fetch errors to the caller, should that be + // relevant to the context in which they are fetching workflows. + // + // Note that this field being set does not necessarily mean that Logs is nil; + // if there are N logs that exist for the stream, and we fail to fetch the + // ith log, we will still return logs in [0, i) + (i, N]. + LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` + Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` +} + +func (x *Workflow_Stream) Reset() { + *x = Workflow_Stream{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[208] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream) ProtoMessage() {} + +func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[208] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream.ProtoReflect.Descriptor instead. +func (*Workflow_Stream) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3} +} + +func (x *Workflow_Stream) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Workflow_Stream) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *Workflow_Stream) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *Workflow_Stream) GetBinlogSource() *binlogdata.BinlogSource { + if x != nil { + return x.BinlogSource + } + return nil +} + +func (x *Workflow_Stream) GetPosition() string { + if x != nil { + return x.Position + } + return "" +} + +func (x *Workflow_Stream) GetStopPosition() string { + if x != nil { + return x.StopPosition + } + return "" +} + +func (x *Workflow_Stream) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *Workflow_Stream) GetDbName() string { + if x != nil { + return x.DbName + } + return "" +} + +func (x *Workflow_Stream) GetTransactionTimestamp() *vttime.Time { + if x != nil { + return x.TransactionTimestamp + } + return nil +} + +func (x *Workflow_Stream) GetTimeUpdated() *vttime.Time { + if x != nil { + return x.TimeUpdated + } + return nil +} + +func (x *Workflow_Stream) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Workflow_Stream) GetCopyStates() []*Workflow_Stream_CopyState { + if x != nil { + return x.CopyStates + } + return nil +} + +func (x *Workflow_Stream) GetLogs() []*Workflow_Stream_Log { + if x != nil { + return x.Logs + } + return nil +} + +func (x *Workflow_Stream) GetLogFetchError() string { + if x != nil { + return x.LogFetchError + } + return "" +} + +func (x *Workflow_Stream) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +type Workflow_Stream_CopyState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` +} + +func (x *Workflow_Stream_CopyState) Reset() { + *x = Workflow_Stream_CopyState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[209] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream_CopyState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream_CopyState) ProtoMessage() {} + +func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[209] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream_CopyState.ProtoReflect.Descriptor instead. +func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 0} +} + +func (x *Workflow_Stream_CopyState) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *Workflow_Stream_CopyState) GetLastPk() string { + if x != nil { + return x.LastPk + } + return "" +} + +type Workflow_Stream_Log struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + StreamId int64 `protobuf:"varint,2,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` + CreatedAt *vttime.Time `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt *vttime.Time `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Message string `protobuf:"bytes,7,opt,name=message,proto3" json:"message,omitempty"` + Count int64 `protobuf:"varint,8,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *Workflow_Stream_Log) Reset() { + *x = Workflow_Stream_Log{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[210] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Workflow_Stream_Log) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Workflow_Stream_Log) ProtoMessage() {} + +func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[210] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Workflow_Stream_Log.ProtoReflect.Descriptor instead. +func (*Workflow_Stream_Log) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 1} +} + +func (x *Workflow_Stream_Log) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Workflow_Stream_Log) GetStreamId() int64 { + if x != nil { + return x.StreamId + } + return 0 +} + +func (x *Workflow_Stream_Log) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Workflow_Stream_Log) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *Workflow_Stream_Log) GetCreatedAt() *vttime.Time { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Workflow_Stream_Log) GetUpdatedAt() *vttime.Time { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Workflow_Stream_Log) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Workflow_Stream_Log) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type GetSrvKeyspaceNamesResponse_NameList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Names []string `protobuf:"bytes,1,rep,name=names,proto3" json:"names,omitempty"` +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() { + *x = GetSrvKeyspaceNamesResponse_NameList{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[218] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {} + +func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[218] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSrvKeyspaceNamesResponse_NameList.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceNamesResponse_NameList) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{84, 1} +} + +func (x *GetSrvKeyspaceNamesResponse_NameList) GetNames() []string { + if x != nil { + return x.Names + } + return nil +} + +type MoveTablesCreateResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Created is set if the workflow was created on this tablet or not. + Created bool `protobuf:"varint,2,opt,name=created,proto3" json:"created,omitempty"` +} + +func (x *MoveTablesCreateResponse_TabletInfo) Reset() { + *x = MoveTablesCreateResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[222] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MoveTablesCreateResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MoveTablesCreateResponse_TabletInfo) ProtoMessage() {} + +func (x *MoveTablesCreateResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[222] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MoveTablesCreateResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*MoveTablesCreateResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{111, 0} +} + +func (x *MoveTablesCreateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *MoveTablesCreateResponse_TabletInfo) GetCreated() bool { + if x != nil { + return x.Created + } + return false +} + +type WorkflowDeleteResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Delete is set if the workflow was deleted on this tablet. + Deleted bool `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"` +} + +func (x *WorkflowDeleteResponse_TabletInfo) Reset() { + *x = WorkflowDeleteResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[231] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowDeleteResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowDeleteResponse_TabletInfo) ProtoMessage() {} + +func (x *WorkflowDeleteResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[231] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowDeleteResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*WorkflowDeleteResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{198, 0} +} + +func (x *WorkflowDeleteResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowDeleteResponse_TabletInfo) GetDeleted() bool { + if x != nil { + return x.Deleted + } + return false +} + +type WorkflowStatusResponse_TableCopyState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RowsCopied int64 `protobuf:"varint,1,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + RowsTotal int64 `protobuf:"varint,2,opt,name=rows_total,json=rowsTotal,proto3" json:"rows_total,omitempty"` + RowsPercentage float32 `protobuf:"fixed32,3,opt,name=rows_percentage,json=rowsPercentage,proto3" json:"rows_percentage,omitempty"` + BytesCopied int64 `protobuf:"varint,4,opt,name=bytes_copied,json=bytesCopied,proto3" json:"bytes_copied,omitempty"` + BytesTotal int64 `protobuf:"varint,5,opt,name=bytes_total,json=bytesTotal,proto3" json:"bytes_total,omitempty"` + BytesPercentage float32 `protobuf:"fixed32,6,opt,name=bytes_percentage,json=bytesPercentage,proto3" json:"bytes_percentage,omitempty"` +} + +func (x *WorkflowStatusResponse_TableCopyState) Reset() { + *x = WorkflowStatusResponse_TableCopyState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[232] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_TableCopyState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_TableCopyState) ProtoMessage() {} + +func (x *WorkflowStatusResponse_TableCopyState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[232] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_TableCopyState.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_TableCopyState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{200, 0} +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsCopied() int64 { + if x != nil { + return x.RowsCopied + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsTotal() int64 { + if x != nil { + return x.RowsTotal + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetRowsPercentage() float32 { + if x != nil { + return x.RowsPercentage + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesCopied() int64 { + if x != nil { + return x.BytesCopied + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesTotal() int64 { + if x != nil { + return x.BytesTotal + } + return 0 +} + +func (x *WorkflowStatusResponse_TableCopyState) GetBytesPercentage() float32 { + if x != nil { + return x.BytesPercentage + } + return 0 +} + +type WorkflowStatusResponse_ShardStreamState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Tablet *topodata.TabletAlias `protobuf:"bytes,2,opt,name=tablet,proto3" json:"tablet,omitempty"` + SourceShard string `protobuf:"bytes,3,opt,name=source_shard,json=sourceShard,proto3" json:"source_shard,omitempty"` + Position string `protobuf:"bytes,4,opt,name=position,proto3" json:"position,omitempty"` + Status string `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` + Info string `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` +} + +func (x *WorkflowStatusResponse_ShardStreamState) Reset() { + *x = WorkflowStatusResponse_ShardStreamState{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[233] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_ShardStreamState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_ShardStreamState) ProtoMessage() {} + +func (x *WorkflowStatusResponse_ShardStreamState) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[233] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_ShardStreamState.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_ShardStreamState) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{200, 1} +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetSourceShard() string { + if x != nil { + return x.SourceShard + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetPosition() string { + if x != nil { + return x.Position + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *WorkflowStatusResponse_ShardStreamState) GetInfo() string { + if x != nil { + return x.Info + } + return "" +} + +type WorkflowStatusResponse_ShardStreams struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Streams []*WorkflowStatusResponse_ShardStreamState `protobuf:"bytes,2,rep,name=streams,proto3" json:"streams,omitempty"` +} + +func (x *WorkflowStatusResponse_ShardStreams) Reset() { + *x = WorkflowStatusResponse_ShardStreams{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[234] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowStatusResponse_ShardStreams) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowStatusResponse_ShardStreams) ProtoMessage() {} + +func (x *WorkflowStatusResponse_ShardStreams) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[234] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowStatusResponse_ShardStreams.ProtoReflect.Descriptor instead. +func (*WorkflowStatusResponse_ShardStreams) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{200, 2} +} + +func (x *WorkflowStatusResponse_ShardStreams) GetStreams() []*WorkflowStatusResponse_ShardStreamState { + if x != nil { + return x.Streams + } + return nil +} + +type WorkflowUpdateResponse_TabletInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + // Changed is true if any of the provided values were different + // than what was already stored on this tablet. + Changed bool `protobuf:"varint,2,opt,name=changed,proto3" json:"changed,omitempty"` +} + +func (x *WorkflowUpdateResponse_TabletInfo) Reset() { + *x = WorkflowUpdateResponse_TabletInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[237] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowUpdateResponse_TabletInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowUpdateResponse_TabletInfo) ProtoMessage() {} + +func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[237] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowUpdateResponse_TabletInfo.ProtoReflect.Descriptor instead. +func (*WorkflowUpdateResponse_TabletInfo) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{204, 0} +} + +func (x *WorkflowUpdateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *WorkflowUpdateResponse_TabletInfo) GetChanged() bool { + if x != nil { + return x.Changed + } + return false +} + +var File_vtctldata_proto protoreflect.FileDescriptor + +var file_vtctldata_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x10, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, + 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0c, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x1a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, + 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x43, 0x0a, + 0x1b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, + 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x22, 0x89, 0x01, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0x83, + 0x06, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, + 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x4a, 0x0a, 0x0e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x16, 0x6d, 0x61, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x15, 0x6d, 0x61, + 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x15, 0x0a, 0x06, + 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, + 0x44, 0x64, 0x6c, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, + 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, + 0x43, 0x6f, 0x70, 0x79, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0x85, 0x13, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2f, 0x0a, 0x13, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, + 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x61, 0x64, 0x64, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x2f, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x61, 0x74, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x07, 0x72, 0x65, 0x61, 0x64, 0x79, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x12, 0x6c, 0x69, 0x76, 0x65, + 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x11, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, + 0x64, 0x5f, 0x75, 0x70, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6c, 0x65, + 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1c, + 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x64, 0x6c, 0x5f, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x64, 0x6c, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x74, 0x61, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, + 0x2a, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x65, + 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, + 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, + 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x12, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, + 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x6f, 0x73, 0x74, 0x70, + 0x6f, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x5f, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x64, 0x72, 0x6f, 0x70, + 0x70, 0x65, 0x64, 0x4e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x61, 0x6e, + 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x65, 0x64, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, + 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, + 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, + 0x65, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, + 0x65, 0x64, 0x55, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x76, 0x69, 0x65, + 0x77, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, + 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x69, + 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x49, 0x6e, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x2b, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x75, 0x73, 0x65, 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x11, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x2e, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, + 0x6e, 0x65, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x31, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x12, 0x34, 0x0a, 0x16, 0x69, 0x73, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, + 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x14, 0x69, 0x73, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x76, 0x69, 0x65, + 0x77, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, + 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x35, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x41, 0x74, 0x22, 0x53, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x49, 0x54, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, + 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x48, 0x4f, 0x53, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x54, 0x4f, 0x53, 0x43, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x59, + 0x53, 0x51, 0x4c, 0x10, 0x04, 0x1a, 0x02, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x06, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, + 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, + 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, + 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x06, + 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x5e, 0x0a, 0x05, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xd2, 0x0c, 0x0a, + 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, + 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x31, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, + 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, + 0x61, 0x67, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, + 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x1a, + 0x60, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, + 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xf6, 0x06, 0x0a, 0x06, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, + 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, + 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, + 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, + 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x3a, 0x0a, 0x09, + 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, + 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, + 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, + 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, + 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, + 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, + 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, + 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, + 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x64, 0x6c, + 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x1b, 0x0a, 0x09, + 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x50, 0x72, 0x65, 0x66, 0x6c, 0x69, + 0x67, 0x68, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xe8, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x16, 0x72, + 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, + 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, + 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, + 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, + 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x44, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xe5, 0x01, + 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, + 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, + 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, + 0x66, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x53, 0x61, 0x66, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xe2, 0x01, 0x0a, 0x12, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, + 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, + 0x4e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, + 0xdf, 0x01, 0x0a, 0x1d, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, + 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, + 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, + 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, + 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, + 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, + 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, + 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, + 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x4f, 0x0a, 0x1d, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xe1, 0x01, 0x0a, 0x1e, 0x43, 0x6c, + 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x16, + 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, + 0x1e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, + 0xe3, 0x01, 0x0a, 0x1f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, + 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x03, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, + 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x26, 0x0a, + 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x44, + 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, + 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8c, 0x01, 0x0a, + 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0xa0, 0x01, 0x0a, 0x13, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x30, 0x0a, 0x14, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x65, 0x78, + 0x69, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x41, + 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, + 0x65, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x17, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, + 0x18, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x13, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x76, 0x65, + 0x6e, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x49, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x2d, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x1a, + 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x14, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x17, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, + 0x03, 0x0a, 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, + 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3e, 0x0a, 0x0f, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, + 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x72, 0x6f, 0x73, + 0x73, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, + 0x72, 0x6f, 0x73, 0x73, 0x43, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x61, 0x6c, + 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x11, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x41, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, + 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, + 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, + 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, + 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, + 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, + 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x75, 0x73, 0x65, + 0x50, 0x6f, 0x6f, 0x6c, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd3, 0x01, + 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, + 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, + 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xa5, 0x01, 0x0a, + 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x55, 0x0a, + 0x13, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x68, 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x5e, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, + 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0b, 0x68, + 0x6f, 0x6f, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, + 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, + 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, + 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, + 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xb8, 0x02, + 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2e, + 0x0a, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x22, 0x59, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, + 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, + 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, + 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, + 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, + 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, + 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x02, + 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x0a, + 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2f, 0x0a, 0x14, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, + 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, + 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, + 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, + 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2c, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, + 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, + 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, + 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, + 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, + 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, + 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, + 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, + 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbb, 0x06, 0x0a, 0x17, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, + 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, + 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x15, 0x0a, 0x06, + 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, + 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, + 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, + 0x72, 0x6f, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, + 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, + 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x72, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x70, + 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, + 0x6f, 0x70, 0x79, 0x22, 0xd5, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x48, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0xe9, 0x01, 0x0a, 0x19, + 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, + 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6e, + 0x61, 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x17, + 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5e, 0x0a, 0x1a, 0x4d, 0x6f, 0x76, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, 0x02, 0x0a, + 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, + 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, + 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, + 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, + 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, + 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, + 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, + 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, + 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, + 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, 0x04, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, + 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, + 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, + 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x18, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, + 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, + 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xad, 0x01, + 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, + 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdd, 0x01, 0x0a, + 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, + 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, + 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x15, + 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, + 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, + 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, 0x0a, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, 0x53, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, + 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, + 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, + 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, + 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, + 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, + 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, + 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, + 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, + 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, + 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, + 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, + 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, + 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, + 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, + 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, + 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, + 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, + 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, + 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, + 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, + 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, + 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x98, 0x01, + 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, + 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x60, + 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, + 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x4f, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xc1, 0x07, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0xe8, 0x01, + 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, + 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, + 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, + 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, + 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x73, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, + 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x44, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x03, 0x0a, 0x1c, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, + 0x6f, 0x77, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, + 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, + 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, + 0x90, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, + 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, + 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, + 0x10, 0x02, 0x2a, 0x38, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_vtctldata_proto_rawDescOnce sync.Once + file_vtctldata_proto_rawDescData = file_vtctldata_proto_rawDesc +) + +func file_vtctldata_proto_rawDescGZIP() []byte { + file_vtctldata_proto_rawDescOnce.Do(func() { + file_vtctldata_proto_rawDescData = protoimpl.X.CompressGZIP(file_vtctldata_proto_rawDescData) + }) + return file_vtctldata_proto_rawDescData +} + +var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 238) +var file_vtctldata_proto_goTypes = []interface{}{ + (MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent + (QueryOrdering)(0), // 1: vtctldata.QueryOrdering + (SchemaMigration_Strategy)(0), // 2: vtctldata.SchemaMigration.Strategy + (SchemaMigration_Status)(0), // 3: vtctldata.SchemaMigration.Status + (*ExecuteVtctlCommandRequest)(nil), // 4: vtctldata.ExecuteVtctlCommandRequest + (*ExecuteVtctlCommandResponse)(nil), // 5: vtctldata.ExecuteVtctlCommandResponse + (*TableMaterializeSettings)(nil), // 6: vtctldata.TableMaterializeSettings + (*MaterializeSettings)(nil), // 7: vtctldata.MaterializeSettings + (*Keyspace)(nil), // 8: vtctldata.Keyspace + (*SchemaMigration)(nil), // 9: vtctldata.SchemaMigration + (*Shard)(nil), // 10: vtctldata.Shard + (*Workflow)(nil), // 11: vtctldata.Workflow + (*AddCellInfoRequest)(nil), // 12: vtctldata.AddCellInfoRequest + (*AddCellInfoResponse)(nil), // 13: vtctldata.AddCellInfoResponse + (*AddCellsAliasRequest)(nil), // 14: vtctldata.AddCellsAliasRequest + (*AddCellsAliasResponse)(nil), // 15: vtctldata.AddCellsAliasResponse + (*ApplyRoutingRulesRequest)(nil), // 16: vtctldata.ApplyRoutingRulesRequest + (*ApplyRoutingRulesResponse)(nil), // 17: vtctldata.ApplyRoutingRulesResponse + (*ApplyShardRoutingRulesRequest)(nil), // 18: vtctldata.ApplyShardRoutingRulesRequest + (*ApplyShardRoutingRulesResponse)(nil), // 19: vtctldata.ApplyShardRoutingRulesResponse + (*ApplySchemaRequest)(nil), // 20: vtctldata.ApplySchemaRequest + (*ApplySchemaResponse)(nil), // 21: vtctldata.ApplySchemaResponse + (*ApplyVSchemaRequest)(nil), // 22: vtctldata.ApplyVSchemaRequest + (*ApplyVSchemaResponse)(nil), // 23: vtctldata.ApplyVSchemaResponse + (*BackupRequest)(nil), // 24: vtctldata.BackupRequest + (*BackupResponse)(nil), // 25: vtctldata.BackupResponse + (*BackupShardRequest)(nil), // 26: vtctldata.BackupShardRequest + (*CancelSchemaMigrationRequest)(nil), // 27: vtctldata.CancelSchemaMigrationRequest + (*CancelSchemaMigrationResponse)(nil), // 28: vtctldata.CancelSchemaMigrationResponse + (*ChangeTabletTypeRequest)(nil), // 29: vtctldata.ChangeTabletTypeRequest + (*ChangeTabletTypeResponse)(nil), // 30: vtctldata.ChangeTabletTypeResponse + (*CleanupSchemaMigrationRequest)(nil), // 31: vtctldata.CleanupSchemaMigrationRequest + (*CleanupSchemaMigrationResponse)(nil), // 32: vtctldata.CleanupSchemaMigrationResponse + (*CompleteSchemaMigrationRequest)(nil), // 33: vtctldata.CompleteSchemaMigrationRequest + (*CompleteSchemaMigrationResponse)(nil), // 34: vtctldata.CompleteSchemaMigrationResponse + (*CreateKeyspaceRequest)(nil), // 35: vtctldata.CreateKeyspaceRequest + (*CreateKeyspaceResponse)(nil), // 36: vtctldata.CreateKeyspaceResponse + (*CreateShardRequest)(nil), // 37: vtctldata.CreateShardRequest + (*CreateShardResponse)(nil), // 38: vtctldata.CreateShardResponse + (*DeleteCellInfoRequest)(nil), // 39: vtctldata.DeleteCellInfoRequest + (*DeleteCellInfoResponse)(nil), // 40: vtctldata.DeleteCellInfoResponse + (*DeleteCellsAliasRequest)(nil), // 41: vtctldata.DeleteCellsAliasRequest + (*DeleteCellsAliasResponse)(nil), // 42: vtctldata.DeleteCellsAliasResponse + (*DeleteKeyspaceRequest)(nil), // 43: vtctldata.DeleteKeyspaceRequest + (*DeleteKeyspaceResponse)(nil), // 44: vtctldata.DeleteKeyspaceResponse + (*DeleteShardsRequest)(nil), // 45: vtctldata.DeleteShardsRequest + (*DeleteShardsResponse)(nil), // 46: vtctldata.DeleteShardsResponse + (*DeleteSrvVSchemaRequest)(nil), // 47: vtctldata.DeleteSrvVSchemaRequest + (*DeleteSrvVSchemaResponse)(nil), // 48: vtctldata.DeleteSrvVSchemaResponse + (*DeleteTabletsRequest)(nil), // 49: vtctldata.DeleteTabletsRequest + (*DeleteTabletsResponse)(nil), // 50: vtctldata.DeleteTabletsResponse + (*EmergencyReparentShardRequest)(nil), // 51: vtctldata.EmergencyReparentShardRequest + (*EmergencyReparentShardResponse)(nil), // 52: vtctldata.EmergencyReparentShardResponse + (*ExecuteFetchAsAppRequest)(nil), // 53: vtctldata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 54: vtctldata.ExecuteFetchAsAppResponse + (*ExecuteFetchAsDBARequest)(nil), // 55: vtctldata.ExecuteFetchAsDBARequest + (*ExecuteFetchAsDBAResponse)(nil), // 56: vtctldata.ExecuteFetchAsDBAResponse + (*ExecuteHookRequest)(nil), // 57: vtctldata.ExecuteHookRequest + (*ExecuteHookResponse)(nil), // 58: vtctldata.ExecuteHookResponse + (*FindAllShardsInKeyspaceRequest)(nil), // 59: vtctldata.FindAllShardsInKeyspaceRequest + (*FindAllShardsInKeyspaceResponse)(nil), // 60: vtctldata.FindAllShardsInKeyspaceResponse + (*GetBackupsRequest)(nil), // 61: vtctldata.GetBackupsRequest + (*GetBackupsResponse)(nil), // 62: vtctldata.GetBackupsResponse + (*GetCellInfoRequest)(nil), // 63: vtctldata.GetCellInfoRequest + (*GetCellInfoResponse)(nil), // 64: vtctldata.GetCellInfoResponse + (*GetCellInfoNamesRequest)(nil), // 65: vtctldata.GetCellInfoNamesRequest + (*GetCellInfoNamesResponse)(nil), // 66: vtctldata.GetCellInfoNamesResponse + (*GetCellsAliasesRequest)(nil), // 67: vtctldata.GetCellsAliasesRequest + (*GetCellsAliasesResponse)(nil), // 68: vtctldata.GetCellsAliasesResponse + (*GetFullStatusRequest)(nil), // 69: vtctldata.GetFullStatusRequest + (*GetFullStatusResponse)(nil), // 70: vtctldata.GetFullStatusResponse + (*GetKeyspacesRequest)(nil), // 71: vtctldata.GetKeyspacesRequest + (*GetKeyspacesResponse)(nil), // 72: vtctldata.GetKeyspacesResponse + (*GetKeyspaceRequest)(nil), // 73: vtctldata.GetKeyspaceRequest + (*GetKeyspaceResponse)(nil), // 74: vtctldata.GetKeyspaceResponse + (*GetPermissionsRequest)(nil), // 75: vtctldata.GetPermissionsRequest + (*GetPermissionsResponse)(nil), // 76: vtctldata.GetPermissionsResponse + (*GetRoutingRulesRequest)(nil), // 77: vtctldata.GetRoutingRulesRequest + (*GetRoutingRulesResponse)(nil), // 78: vtctldata.GetRoutingRulesResponse + (*GetSchemaRequest)(nil), // 79: vtctldata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 80: vtctldata.GetSchemaResponse + (*GetSchemaMigrationsRequest)(nil), // 81: vtctldata.GetSchemaMigrationsRequest + (*GetSchemaMigrationsResponse)(nil), // 82: vtctldata.GetSchemaMigrationsResponse + (*GetShardRequest)(nil), // 83: vtctldata.GetShardRequest + (*GetShardResponse)(nil), // 84: vtctldata.GetShardResponse + (*GetShardRoutingRulesRequest)(nil), // 85: vtctldata.GetShardRoutingRulesRequest + (*GetShardRoutingRulesResponse)(nil), // 86: vtctldata.GetShardRoutingRulesResponse + (*GetSrvKeyspaceNamesRequest)(nil), // 87: vtctldata.GetSrvKeyspaceNamesRequest + (*GetSrvKeyspaceNamesResponse)(nil), // 88: vtctldata.GetSrvKeyspaceNamesResponse + (*GetSrvKeyspacesRequest)(nil), // 89: vtctldata.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 90: vtctldata.GetSrvKeyspacesResponse + (*UpdateThrottlerConfigRequest)(nil), // 91: vtctldata.UpdateThrottlerConfigRequest + (*UpdateThrottlerConfigResponse)(nil), // 92: vtctldata.UpdateThrottlerConfigResponse + (*GetSrvVSchemaRequest)(nil), // 93: vtctldata.GetSrvVSchemaRequest + (*GetSrvVSchemaResponse)(nil), // 94: vtctldata.GetSrvVSchemaResponse + (*GetSrvVSchemasRequest)(nil), // 95: vtctldata.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 96: vtctldata.GetSrvVSchemasResponse + (*GetTabletRequest)(nil), // 97: vtctldata.GetTabletRequest + (*GetTabletResponse)(nil), // 98: vtctldata.GetTabletResponse + (*GetTabletsRequest)(nil), // 99: vtctldata.GetTabletsRequest + (*GetTabletsResponse)(nil), // 100: vtctldata.GetTabletsResponse + (*GetTopologyPathRequest)(nil), // 101: vtctldata.GetTopologyPathRequest + (*GetTopologyPathResponse)(nil), // 102: vtctldata.GetTopologyPathResponse + (*TopologyCell)(nil), // 103: vtctldata.TopologyCell + (*GetVSchemaRequest)(nil), // 104: vtctldata.GetVSchemaRequest + (*GetVersionRequest)(nil), // 105: vtctldata.GetVersionRequest + (*GetVersionResponse)(nil), // 106: vtctldata.GetVersionResponse + (*GetVSchemaResponse)(nil), // 107: vtctldata.GetVSchemaResponse + (*GetWorkflowsRequest)(nil), // 108: vtctldata.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 109: vtctldata.GetWorkflowsResponse + (*InitShardPrimaryRequest)(nil), // 110: vtctldata.InitShardPrimaryRequest + (*InitShardPrimaryResponse)(nil), // 111: vtctldata.InitShardPrimaryResponse + (*LaunchSchemaMigrationRequest)(nil), // 112: vtctldata.LaunchSchemaMigrationRequest + (*LaunchSchemaMigrationResponse)(nil), // 113: vtctldata.LaunchSchemaMigrationResponse + (*MoveTablesCreateRequest)(nil), // 114: vtctldata.MoveTablesCreateRequest + (*MoveTablesCreateResponse)(nil), // 115: vtctldata.MoveTablesCreateResponse + (*MoveTablesCompleteRequest)(nil), // 116: vtctldata.MoveTablesCompleteRequest + (*MoveTablesCompleteResponse)(nil), // 117: vtctldata.MoveTablesCompleteResponse + (*PingTabletRequest)(nil), // 118: vtctldata.PingTabletRequest + (*PingTabletResponse)(nil), // 119: vtctldata.PingTabletResponse + (*PlannedReparentShardRequest)(nil), // 120: vtctldata.PlannedReparentShardRequest + (*PlannedReparentShardResponse)(nil), // 121: vtctldata.PlannedReparentShardResponse + (*RebuildKeyspaceGraphRequest)(nil), // 122: vtctldata.RebuildKeyspaceGraphRequest + (*RebuildKeyspaceGraphResponse)(nil), // 123: vtctldata.RebuildKeyspaceGraphResponse + (*RebuildVSchemaGraphRequest)(nil), // 124: vtctldata.RebuildVSchemaGraphRequest + (*RebuildVSchemaGraphResponse)(nil), // 125: vtctldata.RebuildVSchemaGraphResponse + (*RefreshStateRequest)(nil), // 126: vtctldata.RefreshStateRequest + (*RefreshStateResponse)(nil), // 127: vtctldata.RefreshStateResponse + (*RefreshStateByShardRequest)(nil), // 128: vtctldata.RefreshStateByShardRequest + (*RefreshStateByShardResponse)(nil), // 129: vtctldata.RefreshStateByShardResponse + (*ReloadSchemaRequest)(nil), // 130: vtctldata.ReloadSchemaRequest + (*ReloadSchemaResponse)(nil), // 131: vtctldata.ReloadSchemaResponse + (*ReloadSchemaKeyspaceRequest)(nil), // 132: vtctldata.ReloadSchemaKeyspaceRequest + (*ReloadSchemaKeyspaceResponse)(nil), // 133: vtctldata.ReloadSchemaKeyspaceResponse + (*ReloadSchemaShardRequest)(nil), // 134: vtctldata.ReloadSchemaShardRequest + (*ReloadSchemaShardResponse)(nil), // 135: vtctldata.ReloadSchemaShardResponse + (*RemoveBackupRequest)(nil), // 136: vtctldata.RemoveBackupRequest + (*RemoveBackupResponse)(nil), // 137: vtctldata.RemoveBackupResponse + (*RemoveKeyspaceCellRequest)(nil), // 138: vtctldata.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 139: vtctldata.RemoveKeyspaceCellResponse + (*RemoveShardCellRequest)(nil), // 140: vtctldata.RemoveShardCellRequest + (*RemoveShardCellResponse)(nil), // 141: vtctldata.RemoveShardCellResponse + (*ReparentTabletRequest)(nil), // 142: vtctldata.ReparentTabletRequest + (*ReparentTabletResponse)(nil), // 143: vtctldata.ReparentTabletResponse + (*ReshardCreateRequest)(nil), // 144: vtctldata.ReshardCreateRequest + (*RestoreFromBackupRequest)(nil), // 145: vtctldata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 146: vtctldata.RestoreFromBackupResponse + (*RetrySchemaMigrationRequest)(nil), // 147: vtctldata.RetrySchemaMigrationRequest + (*RetrySchemaMigrationResponse)(nil), // 148: vtctldata.RetrySchemaMigrationResponse + (*RunHealthCheckRequest)(nil), // 149: vtctldata.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 150: vtctldata.RunHealthCheckResponse + (*SetKeyspaceDurabilityPolicyRequest)(nil), // 151: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*SetKeyspaceDurabilityPolicyResponse)(nil), // 152: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*SetKeyspaceServedFromRequest)(nil), // 153: vtctldata.SetKeyspaceServedFromRequest + (*SetKeyspaceServedFromResponse)(nil), // 154: vtctldata.SetKeyspaceServedFromResponse + (*SetKeyspaceShardingInfoRequest)(nil), // 155: vtctldata.SetKeyspaceShardingInfoRequest + (*SetKeyspaceShardingInfoResponse)(nil), // 156: vtctldata.SetKeyspaceShardingInfoResponse + (*SetShardIsPrimaryServingRequest)(nil), // 157: vtctldata.SetShardIsPrimaryServingRequest + (*SetShardIsPrimaryServingResponse)(nil), // 158: vtctldata.SetShardIsPrimaryServingResponse + (*SetShardTabletControlRequest)(nil), // 159: vtctldata.SetShardTabletControlRequest + (*SetShardTabletControlResponse)(nil), // 160: vtctldata.SetShardTabletControlResponse + (*SetWritableRequest)(nil), // 161: vtctldata.SetWritableRequest + (*SetWritableResponse)(nil), // 162: vtctldata.SetWritableResponse + (*ShardReplicationAddRequest)(nil), // 163: vtctldata.ShardReplicationAddRequest + (*ShardReplicationAddResponse)(nil), // 164: vtctldata.ShardReplicationAddResponse + (*ShardReplicationFixRequest)(nil), // 165: vtctldata.ShardReplicationFixRequest + (*ShardReplicationFixResponse)(nil), // 166: vtctldata.ShardReplicationFixResponse + (*ShardReplicationPositionsRequest)(nil), // 167: vtctldata.ShardReplicationPositionsRequest + (*ShardReplicationPositionsResponse)(nil), // 168: vtctldata.ShardReplicationPositionsResponse + (*ShardReplicationRemoveRequest)(nil), // 169: vtctldata.ShardReplicationRemoveRequest + (*ShardReplicationRemoveResponse)(nil), // 170: vtctldata.ShardReplicationRemoveResponse + (*SleepTabletRequest)(nil), // 171: vtctldata.SleepTabletRequest + (*SleepTabletResponse)(nil), // 172: vtctldata.SleepTabletResponse + (*SourceShardAddRequest)(nil), // 173: vtctldata.SourceShardAddRequest + (*SourceShardAddResponse)(nil), // 174: vtctldata.SourceShardAddResponse + (*SourceShardDeleteRequest)(nil), // 175: vtctldata.SourceShardDeleteRequest + (*SourceShardDeleteResponse)(nil), // 176: vtctldata.SourceShardDeleteResponse + (*StartReplicationRequest)(nil), // 177: vtctldata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 178: vtctldata.StartReplicationResponse + (*StopReplicationRequest)(nil), // 179: vtctldata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 180: vtctldata.StopReplicationResponse + (*TabletExternallyReparentedRequest)(nil), // 181: vtctldata.TabletExternallyReparentedRequest + (*TabletExternallyReparentedResponse)(nil), // 182: vtctldata.TabletExternallyReparentedResponse + (*UpdateCellInfoRequest)(nil), // 183: vtctldata.UpdateCellInfoRequest + (*UpdateCellInfoResponse)(nil), // 184: vtctldata.UpdateCellInfoResponse + (*UpdateCellsAliasRequest)(nil), // 185: vtctldata.UpdateCellsAliasRequest + (*UpdateCellsAliasResponse)(nil), // 186: vtctldata.UpdateCellsAliasResponse + (*ValidateRequest)(nil), // 187: vtctldata.ValidateRequest + (*ValidateResponse)(nil), // 188: vtctldata.ValidateResponse + (*ValidateKeyspaceRequest)(nil), // 189: vtctldata.ValidateKeyspaceRequest + (*ValidateKeyspaceResponse)(nil), // 190: vtctldata.ValidateKeyspaceResponse + (*ValidateSchemaKeyspaceRequest)(nil), // 191: vtctldata.ValidateSchemaKeyspaceRequest + (*ValidateSchemaKeyspaceResponse)(nil), // 192: vtctldata.ValidateSchemaKeyspaceResponse + (*ValidateShardRequest)(nil), // 193: vtctldata.ValidateShardRequest + (*ValidateShardResponse)(nil), // 194: vtctldata.ValidateShardResponse + (*ValidateVersionKeyspaceRequest)(nil), // 195: vtctldata.ValidateVersionKeyspaceRequest + (*ValidateVersionKeyspaceResponse)(nil), // 196: vtctldata.ValidateVersionKeyspaceResponse + (*ValidateVersionShardRequest)(nil), // 197: vtctldata.ValidateVersionShardRequest + (*ValidateVersionShardResponse)(nil), // 198: vtctldata.ValidateVersionShardResponse + (*ValidateVSchemaRequest)(nil), // 199: vtctldata.ValidateVSchemaRequest + (*ValidateVSchemaResponse)(nil), // 200: vtctldata.ValidateVSchemaResponse + (*WorkflowDeleteRequest)(nil), // 201: vtctldata.WorkflowDeleteRequest + (*WorkflowDeleteResponse)(nil), // 202: vtctldata.WorkflowDeleteResponse + (*WorkflowStatusRequest)(nil), // 203: vtctldata.WorkflowStatusRequest + (*WorkflowStatusResponse)(nil), // 204: vtctldata.WorkflowStatusResponse + (*WorkflowSwitchTrafficRequest)(nil), // 205: vtctldata.WorkflowSwitchTrafficRequest + (*WorkflowSwitchTrafficResponse)(nil), // 206: vtctldata.WorkflowSwitchTrafficResponse + (*WorkflowUpdateRequest)(nil), // 207: vtctldata.WorkflowUpdateRequest + (*WorkflowUpdateResponse)(nil), // 208: vtctldata.WorkflowUpdateResponse + nil, // 209: vtctldata.Workflow.ShardStreamsEntry + (*Workflow_ReplicationLocation)(nil), // 210: vtctldata.Workflow.ReplicationLocation + (*Workflow_ShardStream)(nil), // 211: vtctldata.Workflow.ShardStream + (*Workflow_Stream)(nil), // 212: vtctldata.Workflow.Stream + (*Workflow_Stream_CopyState)(nil), // 213: vtctldata.Workflow.Stream.CopyState + (*Workflow_Stream_Log)(nil), // 214: vtctldata.Workflow.Stream.Log + nil, // 215: vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + nil, // 216: vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 217: vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 218: vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 219: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + nil, // 220: vtctldata.GetCellsAliasesResponse.AliasesEntry + nil, // 221: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 222: vtctldata.GetSrvKeyspaceNamesResponse.NameList + nil, // 223: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 224: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + nil, // 225: vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + (*MoveTablesCreateResponse_TabletInfo)(nil), // 226: vtctldata.MoveTablesCreateResponse.TabletInfo + nil, // 227: vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 228: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + nil, // 229: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + nil, // 230: vtctldata.ValidateResponse.ResultsByKeyspaceEntry + nil, // 231: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + nil, // 232: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + nil, // 233: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + nil, // 234: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + (*WorkflowDeleteResponse_TabletInfo)(nil), // 235: vtctldata.WorkflowDeleteResponse.TabletInfo + (*WorkflowStatusResponse_TableCopyState)(nil), // 236: vtctldata.WorkflowStatusResponse.TableCopyState + (*WorkflowStatusResponse_ShardStreamState)(nil), // 237: vtctldata.WorkflowStatusResponse.ShardStreamState + (*WorkflowStatusResponse_ShardStreams)(nil), // 238: vtctldata.WorkflowStatusResponse.ShardStreams + nil, // 239: vtctldata.WorkflowStatusResponse.TableCopyStateEntry + nil, // 240: vtctldata.WorkflowStatusResponse.ShardStreamsEntry + (*WorkflowUpdateResponse_TabletInfo)(nil), // 241: vtctldata.WorkflowUpdateResponse.TabletInfo + (*logutil.Event)(nil), // 242: logutil.Event + (tabletmanagerdata.TabletSelectionPreference)(0), // 243: tabletmanagerdata.TabletSelectionPreference + (*topodata.Keyspace)(nil), // 244: topodata.Keyspace + (*vttime.Time)(nil), // 245: vttime.Time + (*topodata.TabletAlias)(nil), // 246: topodata.TabletAlias + (*vttime.Duration)(nil), // 247: vttime.Duration + (*topodata.Shard)(nil), // 248: topodata.Shard + (*topodata.CellInfo)(nil), // 249: topodata.CellInfo + (*vschema.RoutingRules)(nil), // 250: vschema.RoutingRules + (*vschema.ShardRoutingRules)(nil), // 251: vschema.ShardRoutingRules + (*vtrpc.CallerID)(nil), // 252: vtrpc.CallerID + (*vschema.Keyspace)(nil), // 253: vschema.Keyspace + (topodata.TabletType)(0), // 254: topodata.TabletType + (*topodata.Tablet)(nil), // 255: topodata.Tablet + (*topodata.Keyspace_ServedFrom)(nil), // 256: topodata.Keyspace.ServedFrom + (topodata.KeyspaceType)(0), // 257: topodata.KeyspaceType + (*query.QueryResult)(nil), // 258: query.QueryResult + (*tabletmanagerdata.ExecuteHookRequest)(nil), // 259: tabletmanagerdata.ExecuteHookRequest + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 260: tabletmanagerdata.ExecuteHookResponse + (*mysqlctl.BackupInfo)(nil), // 261: mysqlctl.BackupInfo + (*replicationdata.FullStatus)(nil), // 262: replicationdata.FullStatus + (*tabletmanagerdata.Permissions)(nil), // 263: tabletmanagerdata.Permissions + (*tabletmanagerdata.SchemaDefinition)(nil), // 264: tabletmanagerdata.SchemaDefinition + (*topodata.ThrottledAppRule)(nil), // 265: topodata.ThrottledAppRule + (*vschema.SrvVSchema)(nil), // 266: vschema.SrvVSchema + (*topodata.ShardReplicationError)(nil), // 267: topodata.ShardReplicationError + (*topodata.KeyRange)(nil), // 268: topodata.KeyRange + (*topodata.CellsAlias)(nil), // 269: topodata.CellsAlias + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 270: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*topodata.Shard_TabletControl)(nil), // 271: topodata.Shard.TabletControl + (*binlogdata.BinlogSource)(nil), // 272: binlogdata.BinlogSource + (*topodata.SrvKeyspace)(nil), // 273: topodata.SrvKeyspace + (*replicationdata.Status)(nil), // 274: replicationdata.Status +} +var file_vtctldata_proto_depIdxs = []int32{ + 242, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event + 6, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings + 0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent + 243, // 3: vtctldata.MaterializeSettings.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 244, // 4: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace + 2, // 5: vtctldata.SchemaMigration.strategy:type_name -> vtctldata.SchemaMigration.Strategy + 245, // 6: vtctldata.SchemaMigration.added_at:type_name -> vttime.Time + 245, // 7: vtctldata.SchemaMigration.requested_at:type_name -> vttime.Time + 245, // 8: vtctldata.SchemaMigration.ready_at:type_name -> vttime.Time + 245, // 9: vtctldata.SchemaMigration.started_at:type_name -> vttime.Time + 245, // 10: vtctldata.SchemaMigration.liveness_timestamp:type_name -> vttime.Time + 245, // 11: vtctldata.SchemaMigration.completed_at:type_name -> vttime.Time + 245, // 12: vtctldata.SchemaMigration.cleaned_up_at:type_name -> vttime.Time + 3, // 13: vtctldata.SchemaMigration.status:type_name -> vtctldata.SchemaMigration.Status + 246, // 14: vtctldata.SchemaMigration.tablet:type_name -> topodata.TabletAlias + 247, // 15: vtctldata.SchemaMigration.artifact_retention:type_name -> vttime.Duration + 245, // 16: vtctldata.SchemaMigration.last_throttled_at:type_name -> vttime.Time + 245, // 17: vtctldata.SchemaMigration.cancelled_at:type_name -> vttime.Time + 245, // 18: vtctldata.SchemaMigration.reviewed_at:type_name -> vttime.Time + 245, // 19: vtctldata.SchemaMigration.ready_to_complete_at:type_name -> vttime.Time + 248, // 20: vtctldata.Shard.shard:type_name -> topodata.Shard + 210, // 21: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation + 210, // 22: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation + 209, // 23: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry + 249, // 24: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 250, // 25: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules + 251, // 26: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 247, // 27: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration + 252, // 28: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID + 215, // 29: vtctldata.ApplySchemaResponse.rows_affected_by_shard:type_name -> vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + 253, // 30: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace + 253, // 31: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 246, // 32: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 33: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 242, // 34: vtctldata.BackupResponse.event:type_name -> logutil.Event + 216, // 35: vtctldata.CancelSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + 246, // 36: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias + 254, // 37: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType + 255, // 38: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet + 255, // 39: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet + 217, // 40: vtctldata.CleanupSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + 218, // 41: vtctldata.CompleteSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + 256, // 42: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom + 257, // 43: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType + 245, // 44: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time + 8, // 45: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 8, // 46: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace + 10, // 47: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard + 10, // 48: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard + 246, // 49: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 246, // 50: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 246, // 51: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias + 247, // 52: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 246, // 53: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 242, // 54: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event + 246, // 55: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias + 258, // 56: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 246, // 57: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias + 258, // 58: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult + 246, // 59: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias + 259, // 60: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest + 260, // 61: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse + 219, // 62: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + 261, // 63: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo + 249, // 64: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 220, // 65: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry + 246, // 66: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias + 262, // 67: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus + 8, // 68: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace + 8, // 69: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 246, // 70: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias + 263, // 71: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions + 250, // 72: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules + 246, // 73: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 264, // 74: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition + 3, // 75: vtctldata.GetSchemaMigrationsRequest.status:type_name -> vtctldata.SchemaMigration.Status + 247, // 76: vtctldata.GetSchemaMigrationsRequest.recent:type_name -> vttime.Duration + 1, // 77: vtctldata.GetSchemaMigrationsRequest.order:type_name -> vtctldata.QueryOrdering + 9, // 78: vtctldata.GetSchemaMigrationsResponse.migrations:type_name -> vtctldata.SchemaMigration + 10, // 79: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard + 251, // 80: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 221, // 81: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + 223, // 82: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 265, // 83: vtctldata.UpdateThrottlerConfigRequest.throttled_app:type_name -> topodata.ThrottledAppRule + 266, // 84: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema + 224, // 85: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + 246, // 86: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 255, // 87: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet + 246, // 88: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 254, // 89: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType + 255, // 90: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet + 103, // 91: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell + 246, // 92: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias + 253, // 93: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 11, // 94: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow + 246, // 95: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias + 247, // 96: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration + 242, // 97: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event + 225, // 98: vtctldata.LaunchSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + 254, // 99: vtctldata.MoveTablesCreateRequest.tablet_types:type_name -> topodata.TabletType + 243, // 100: vtctldata.MoveTablesCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 226, // 101: vtctldata.MoveTablesCreateResponse.details:type_name -> vtctldata.MoveTablesCreateResponse.TabletInfo + 246, // 102: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 103: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 246, // 104: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias + 247, // 105: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 246, // 106: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 242, // 107: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event + 246, // 108: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 109: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 242, // 110: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event + 242, // 111: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 246, // 112: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias + 246, // 113: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias + 254, // 114: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType + 243, // 115: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 246, // 116: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 245, // 117: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 245, // 118: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 246, // 119: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 242, // 120: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 227, // 121: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + 246, // 122: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias + 244, // 123: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace + 254, // 124: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType + 244, // 125: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace + 244, // 126: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace + 248, // 127: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard + 254, // 128: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType + 248, // 129: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard + 246, // 130: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 131: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias + 267, // 132: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError + 228, // 133: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + 229, // 134: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + 246, // 135: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 136: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 247, // 137: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration + 268, // 138: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange + 248, // 139: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard + 248, // 140: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard + 246, // 141: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 142: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 246, // 143: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias + 246, // 144: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias + 246, // 145: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias + 249, // 146: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 249, // 147: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 269, // 148: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias + 269, // 149: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias + 230, // 150: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry + 231, // 151: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + 232, // 152: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + 233, // 153: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + 234, // 154: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + 235, // 155: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo + 239, // 156: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry + 240, // 157: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry + 254, // 158: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType + 247, // 159: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration + 247, // 160: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration + 270, // 161: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 241, // 162: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo + 211, // 163: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream + 212, // 164: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream + 271, // 165: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl + 246, // 166: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias + 272, // 167: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource + 245, // 168: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time + 245, // 169: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time + 213, // 170: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState + 214, // 171: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log + 245, // 172: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time + 245, // 173: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time + 10, // 174: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard + 269, // 175: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias + 222, // 176: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList + 273, // 177: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace + 266, // 178: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema + 246, // 179: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 274, // 180: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status + 255, // 181: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet + 190, // 182: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse + 194, // 183: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 194, // 184: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 194, // 185: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 194, // 186: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 246, // 187: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 246, // 188: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias + 237, // 189: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState + 236, // 190: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState + 238, // 191: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams + 246, // 192: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 193, // [193:193] is the sub-list for method output_type + 193, // [193:193] is the sub-list for method input_type + 193, // [193:193] is the sub-list for extension type_name + 193, // [193:193] is the sub-list for extension extendee + 0, // [0:193] is the sub-list for field type_name +} + +func init() { file_vtctldata_proto_init() } +func file_vtctldata_proto_init() { + if File_vtctldata_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_vtctldata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteVtctlCommandRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteVtctlCommandResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TableMaterializeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaterializeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Keyspace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaMigration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Shard); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellInfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellsAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddCellsAliasResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyShardRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyShardRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplySchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplySchemaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyVSchemaRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyVSchemaResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BackupShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSchemaMigrationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeTabletTypeRequest); i { case 0: return &v.state case 1: @@ -12508,8 +16393,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteVtctlCommandResponse); i { + file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeTabletTypeResponse); i { case 0: return &v.state case 1: @@ -12520,8 +16405,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TableMaterializeSettings); i { + file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -12532,8 +16417,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MaterializeSettings); i { + file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -12544,8 +16429,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Keyspace); i { + file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -12556,8 +16441,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Shard); i { + file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -12568,8 +16453,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow); i { + file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -12580,8 +16465,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoRequest); i { + file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyspaceResponse); i { case 0: return &v.state case 1: @@ -12592,8 +16477,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoResponse); i { + file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardRequest); i { case 0: return &v.state case 1: @@ -12604,8 +16489,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardResponse); i { case 0: return &v.state case 1: @@ -12616,8 +16501,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellInfoRequest); i { case 0: return &v.state case 1: @@ -12628,8 +16513,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellInfoResponse); i { case 0: return &v.state case 1: @@ -12640,8 +16525,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasRequest); i { case 0: return &v.state case 1: @@ -12652,8 +16537,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasResponse); i { case 0: return &v.state case 1: @@ -12664,8 +16549,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceRequest); i { case 0: return &v.state case 1: @@ -12676,8 +16561,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaRequest); i { + file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceResponse); i { case 0: return &v.state case 1: @@ -12688,8 +16573,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaResponse); i { + file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsRequest); i { case 0: return &v.state case 1: @@ -12700,8 +16585,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaRequest); i { + file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsResponse); i { case 0: return &v.state case 1: @@ -12712,8 +16597,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaResponse); i { + file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaRequest); i { case 0: return &v.state case 1: @@ -12724,8 +16609,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupRequest); i { + file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaResponse); i { case 0: return &v.state case 1: @@ -12736,8 +16621,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupResponse); i { + file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsRequest); i { case 0: return &v.state case 1: @@ -12748,8 +16633,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupShardRequest); i { + file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsResponse); i { case 0: return &v.state case 1: @@ -12760,8 +16645,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeRequest); i { + file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardRequest); i { case 0: return &v.state case 1: @@ -12772,8 +16657,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeResponse); i { + file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardResponse); i { case 0: return &v.state case 1: @@ -12784,8 +16669,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppRequest); i { case 0: return &v.state case 1: @@ -12796,8 +16681,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppResponse); i { case 0: return &v.state case 1: @@ -12808,8 +16693,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardRequest); i { + file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBARequest); i { case 0: return &v.state case 1: @@ -12820,8 +16705,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardResponse); i { + file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBAResponse); i { case 0: return &v.state case 1: @@ -12832,8 +16717,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoRequest); i { + file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookRequest); i { case 0: return &v.state case 1: @@ -12844,8 +16729,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoResponse); i { + file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookResponse); i { case 0: return &v.state case 1: @@ -12856,8 +16741,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceRequest); i { case 0: return &v.state case 1: @@ -12868,8 +16753,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceResponse); i { case 0: return &v.state case 1: @@ -12880,8 +16765,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBackupsRequest); i { case 0: return &v.state case 1: @@ -12892,8 +16777,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetBackupsResponse); i { case 0: return &v.state case 1: @@ -12904,8 +16789,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsRequest); i { + file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoRequest); i { case 0: return &v.state case 1: @@ -12916,8 +16801,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsResponse); i { + file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoResponse); i { case 0: return &v.state case 1: @@ -12928,8 +16813,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaRequest); i { + file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoNamesRequest); i { case 0: return &v.state case 1: @@ -12940,8 +16825,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaResponse); i { + file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellInfoNamesResponse); i { case 0: return &v.state case 1: @@ -12952,8 +16837,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsRequest); i { + file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellsAliasesRequest); i { case 0: return &v.state case 1: @@ -12964,8 +16849,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsResponse); i { + file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCellsAliasesResponse); i { case 0: return &v.state case 1: @@ -12976,8 +16861,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardRequest); i { + file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFullStatusRequest); i { case 0: return &v.state case 1: @@ -12988,8 +16873,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardResponse); i { + file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFullStatusResponse); i { case 0: return &v.state case 1: @@ -13000,8 +16885,92 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppRequest); i { + file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPermissionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPermissionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -13012,8 +16981,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppResponse); i { + file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaRequest); i { case 0: return &v.state case 1: @@ -13024,8 +16993,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBARequest); i { + file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaResponse); i { case 0: return &v.state case 1: @@ -13036,8 +17005,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBAResponse); i { + file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsRequest); i { case 0: return &v.state case 1: @@ -13048,8 +17017,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookRequest); i { + file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsResponse); i { case 0: return &v.state case 1: @@ -13060,8 +17029,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookResponse); i { + file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRequest); i { case 0: return &v.state case 1: @@ -13072,8 +17041,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardResponse); i { case 0: return &v.state case 1: @@ -13084,8 +17053,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -13096,8 +17065,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsRequest); i { + file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -13108,8 +17077,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupsResponse); i { + file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesRequest); i { case 0: return &v.state case 1: @@ -13120,8 +17089,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoRequest); i { + file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesResponse); i { case 0: return &v.state case 1: @@ -13132,8 +17101,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoResponse); i { + file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspacesRequest); i { case 0: return &v.state case 1: @@ -13144,8 +17113,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesRequest); i { + file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspacesResponse); i { case 0: return &v.state case 1: @@ -13156,8 +17125,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellInfoNamesResponse); i { + file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateThrottlerConfigRequest); i { case 0: return &v.state case 1: @@ -13168,8 +17137,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesRequest); i { + file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateThrottlerConfigResponse); i { case 0: return &v.state case 1: @@ -13180,8 +17149,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetCellsAliasesResponse); i { + file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemaRequest); i { case 0: return &v.state case 1: @@ -13192,8 +17161,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFullStatusRequest); i { + file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemaResponse); i { case 0: return &v.state case 1: @@ -13204,8 +17173,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetFullStatusResponse); i { + file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state case 1: @@ -13216,8 +17185,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesRequest); i { + file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state case 1: @@ -13228,8 +17197,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspacesResponse); i { + file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletRequest); i { case 0: return &v.state case 1: @@ -13240,8 +17209,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletResponse); i { case 0: return &v.state case 1: @@ -13252,8 +17221,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletsRequest); i { case 0: return &v.state case 1: @@ -13264,8 +17233,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPermissionsRequest); i { + file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTabletsResponse); i { case 0: return &v.state case 1: @@ -13276,8 +17245,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetPermissionsResponse); i { + file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopologyPathRequest); i { case 0: return &v.state case 1: @@ -13288,8 +17257,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopologyPathResponse); i { case 0: return &v.state case 1: @@ -13300,8 +17269,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TopologyCell); i { case 0: return &v.state case 1: @@ -13312,8 +17281,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaRequest); i { + file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state case 1: @@ -13324,8 +17293,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSchemaResponse); i { + file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionRequest); i { case 0: return &v.state case 1: @@ -13336,8 +17305,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRequest); i { + file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVersionResponse); i { case 0: return &v.state case 1: @@ -13348,8 +17317,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardResponse); i { + file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetVSchemaResponse); i { case 0: return &v.state case 1: @@ -13360,8 +17329,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRoutingRulesRequest); i { + file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state case 1: @@ -13372,8 +17341,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRoutingRulesResponse); i { + file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state case 1: @@ -13384,8 +17353,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesRequest); i { + file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitShardPrimaryRequest); i { case 0: return &v.state case 1: @@ -13396,8 +17365,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesResponse); i { + file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitShardPrimaryResponse); i { case 0: return &v.state case 1: @@ -13408,8 +17377,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesRequest); i { + file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LaunchSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -13420,8 +17389,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspacesResponse); i { + file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LaunchSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -13432,8 +17401,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateThrottlerConfigRequest); i { + file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateRequest); i { case 0: return &v.state case 1: @@ -13444,8 +17413,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateThrottlerConfigResponse); i { + file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateResponse); i { case 0: return &v.state case 1: @@ -13456,8 +17425,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaRequest); i { + file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCompleteRequest); i { case 0: return &v.state case 1: @@ -13468,8 +17437,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemaResponse); i { + file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCompleteResponse); i { case 0: return &v.state case 1: @@ -13480,8 +17449,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasRequest); i { + file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingTabletRequest); i { case 0: return &v.state case 1: @@ -13492,8 +17461,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvVSchemasResponse); i { + file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingTabletResponse); i { case 0: return &v.state case 1: @@ -13504,8 +17473,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletRequest); i { + file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlannedReparentShardRequest); i { case 0: return &v.state case 1: @@ -13516,8 +17485,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletResponse); i { + file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlannedReparentShardResponse); i { case 0: return &v.state case 1: @@ -13528,8 +17497,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsRequest); i { + file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildKeyspaceGraphRequest); i { case 0: return &v.state case 1: @@ -13540,8 +17509,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTabletsResponse); i { + file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildKeyspaceGraphResponse); i { case 0: return &v.state case 1: @@ -13552,8 +17521,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTopologyPathRequest); i { + file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildVSchemaGraphRequest); i { case 0: return &v.state case 1: @@ -13564,8 +17533,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTopologyPathResponse); i { + file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RebuildVSchemaGraphResponse); i { case 0: return &v.state case 1: @@ -13576,8 +17545,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TopologyCell); i { + file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateRequest); i { case 0: return &v.state case 1: @@ -13588,8 +17557,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaRequest); i { + file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateResponse); i { case 0: return &v.state case 1: @@ -13600,8 +17569,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionRequest); i { + file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateByShardRequest); i { case 0: return &v.state case 1: @@ -13612,8 +17581,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVersionResponse); i { + file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RefreshStateByShardResponse); i { case 0: return &v.state case 1: @@ -13624,8 +17593,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetVSchemaResponse); i { + file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaRequest); i { case 0: return &v.state case 1: @@ -13636,8 +17605,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsRequest); i { + file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaResponse); i { case 0: return &v.state case 1: @@ -13648,8 +17617,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowsResponse); i { + file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaKeyspaceRequest); i { case 0: return &v.state case 1: @@ -13660,8 +17629,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryRequest); i { + file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaKeyspaceResponse); i { case 0: return &v.state case 1: @@ -13672,8 +17641,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitShardPrimaryResponse); i { + file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaShardRequest); i { case 0: return &v.state case 1: @@ -13684,8 +17653,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletRequest); i { + file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReloadSchemaShardResponse); i { case 0: return &v.state case 1: @@ -13696,8 +17665,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PingTabletResponse); i { + file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveBackupRequest); i { case 0: return &v.state case 1: @@ -13708,8 +17677,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardRequest); i { + file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveBackupResponse); i { case 0: return &v.state case 1: @@ -13720,8 +17689,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlannedReparentShardResponse); i { + file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state case 1: @@ -13732,8 +17701,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphRequest); i { + file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state case 1: @@ -13744,8 +17713,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildKeyspaceGraphResponse); i { + file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveShardCellRequest); i { case 0: return &v.state case 1: @@ -13756,8 +17725,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphRequest); i { + file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveShardCellResponse); i { case 0: return &v.state case 1: @@ -13768,8 +17737,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildVSchemaGraphResponse); i { + file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReparentTabletRequest); i { case 0: return &v.state case 1: @@ -13780,8 +17749,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateRequest); i { + file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReparentTabletResponse); i { case 0: return &v.state case 1: @@ -13792,8 +17761,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateResponse); i { + file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReshardCreateRequest); i { case 0: return &v.state case 1: @@ -13804,8 +17773,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateByShardRequest); i { + file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreFromBackupRequest); i { case 0: return &v.state case 1: @@ -13816,8 +17785,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshStateByShardResponse); i { + file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RestoreFromBackupResponse); i { case 0: return &v.state case 1: @@ -13828,8 +17797,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaRequest); i { + file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetrySchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -13840,8 +17809,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaResponse); i { + file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetrySchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -13852,8 +17821,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunHealthCheckRequest); i { case 0: return &v.state case 1: @@ -13864,8 +17833,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunHealthCheckResponse); i { case 0: return &v.state case 1: @@ -13876,8 +17845,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardRequest); i { + file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i { case 0: return &v.state case 1: @@ -13888,8 +17857,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReloadSchemaShardResponse); i { + file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i { case 0: return &v.state case 1: @@ -13900,8 +17869,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveBackupRequest); i { + file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceServedFromRequest); i { case 0: return &v.state case 1: @@ -13912,8 +17881,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveBackupResponse); i { + file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceServedFromResponse); i { case 0: return &v.state case 1: @@ -13924,8 +17893,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellRequest); i { + file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceShardingInfoRequest); i { case 0: return &v.state case 1: @@ -13936,8 +17905,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveKeyspaceCellResponse); i { + file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetKeyspaceShardingInfoResponse); i { case 0: return &v.state case 1: @@ -13948,8 +17917,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellRequest); i { + file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardIsPrimaryServingRequest); i { case 0: return &v.state case 1: @@ -13960,8 +17929,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveShardCellResponse); i { + file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardIsPrimaryServingResponse); i { case 0: return &v.state case 1: @@ -13972,8 +17941,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletRequest); i { + file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardTabletControlRequest); i { case 0: return &v.state case 1: @@ -13984,8 +17953,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReparentTabletResponse); i { + file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetShardTabletControlResponse); i { case 0: return &v.state case 1: @@ -13996,8 +17965,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupRequest); i { + file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetWritableRequest); i { case 0: return &v.state case 1: @@ -14008,8 +17977,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupResponse); i { + file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetWritableResponse); i { case 0: return &v.state case 1: @@ -14020,8 +17989,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckRequest); i { + file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationAddRequest); i { case 0: return &v.state case 1: @@ -14032,8 +18001,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunHealthCheckResponse); i { + file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationAddResponse); i { case 0: return &v.state case 1: @@ -14044,8 +18013,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i { + file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationFixRequest); i { case 0: return &v.state case 1: @@ -14056,8 +18025,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i { + file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationFixResponse); i { case 0: return &v.state case 1: @@ -14068,8 +18037,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromRequest); i { + file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationPositionsRequest); i { case 0: return &v.state case 1: @@ -14080,8 +18049,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromResponse); i { + file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationPositionsResponse); i { case 0: return &v.state case 1: @@ -14092,8 +18061,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceShardingInfoRequest); i { + file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationRemoveRequest); i { case 0: return &v.state case 1: @@ -14104,8 +18073,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceShardingInfoResponse); i { + file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardReplicationRemoveResponse); i { case 0: return &v.state case 1: @@ -14116,8 +18085,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardIsPrimaryServingRequest); i { + file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SleepTabletRequest); i { case 0: return &v.state case 1: @@ -14128,8 +18097,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardIsPrimaryServingResponse); i { + file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SleepTabletResponse); i { case 0: return &v.state case 1: @@ -14140,8 +18109,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardTabletControlRequest); i { + file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardAddRequest); i { case 0: return &v.state case 1: @@ -14152,8 +18121,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetShardTabletControlResponse); i { + file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardAddResponse); i { case 0: return &v.state case 1: @@ -14164,8 +18133,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetWritableRequest); i { + file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardDeleteRequest); i { case 0: return &v.state case 1: @@ -14176,8 +18145,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetWritableResponse); i { + file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceShardDeleteResponse); i { case 0: return &v.state case 1: @@ -14188,8 +18157,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationAddRequest); i { + file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartReplicationRequest); i { case 0: return &v.state case 1: @@ -14200,8 +18169,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationAddResponse); i { + file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartReplicationResponse); i { case 0: return &v.state case 1: @@ -14212,8 +18181,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationFixRequest); i { + file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopReplicationRequest); i { case 0: return &v.state case 1: @@ -14224,8 +18193,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationFixResponse); i { + file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopReplicationResponse); i { case 0: return &v.state case 1: @@ -14236,8 +18205,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsRequest); i { + file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state case 1: @@ -14248,8 +18217,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationPositionsResponse); i { + file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TabletExternallyReparentedResponse); i { case 0: return &v.state case 1: @@ -14260,8 +18229,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationRemoveRequest); i { + file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellInfoRequest); i { case 0: return &v.state case 1: @@ -14272,8 +18241,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationRemoveResponse); i { + file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellInfoResponse); i { case 0: return &v.state case 1: @@ -14284,8 +18253,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SleepTabletRequest); i { + file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasRequest); i { case 0: return &v.state case 1: @@ -14296,8 +18265,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SleepTabletResponse); i { + file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateCellsAliasResponse); i { case 0: return &v.state case 1: @@ -14308,8 +18277,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardAddRequest); i { + file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateRequest); i { case 0: return &v.state case 1: @@ -14320,8 +18289,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardAddResponse); i { + file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateResponse); i { case 0: return &v.state case 1: @@ -14332,8 +18301,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardDeleteRequest); i { + file_vtctldata_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14344,8 +18313,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceShardDeleteResponse); i { + file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14356,8 +18325,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationRequest); i { + file_vtctldata_proto_msgTypes[187].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSchemaKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14368,8 +18337,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationResponse); i { + file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateSchemaKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14380,8 +18349,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationRequest); i { + file_vtctldata_proto_msgTypes[189].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateShardRequest); i { case 0: return &v.state case 1: @@ -14392,8 +18361,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationResponse); i { + file_vtctldata_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateShardResponse); i { case 0: return &v.state case 1: @@ -14404,8 +18373,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedRequest); i { + file_vtctldata_proto_msgTypes[191].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionKeyspaceRequest); i { case 0: return &v.state case 1: @@ -14416,8 +18385,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TabletExternallyReparentedResponse); i { + file_vtctldata_proto_msgTypes[192].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionKeyspaceResponse); i { case 0: return &v.state case 1: @@ -14428,8 +18397,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoRequest); i { + file_vtctldata_proto_msgTypes[193].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionShardRequest); i { case 0: return &v.state case 1: @@ -14440,8 +18409,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellInfoResponse); i { + file_vtctldata_proto_msgTypes[194].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVersionShardResponse); i { case 0: return &v.state case 1: @@ -14452,8 +18421,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[195].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVSchemaRequest); i { case 0: return &v.state case 1: @@ -14464,8 +18433,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[196].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateVSchemaResponse); i { case 0: return &v.state case 1: @@ -14476,8 +18445,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateRequest); i { + file_vtctldata_proto_msgTypes[197].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteRequest); i { case 0: return &v.state case 1: @@ -14488,8 +18457,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResponse); i { + file_vtctldata_proto_msgTypes[198].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteResponse); i { case 0: return &v.state case 1: @@ -14500,8 +18469,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[199].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusRequest); i { case 0: return &v.state case 1: @@ -14512,8 +18481,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse); i { case 0: return &v.state case 1: @@ -14524,8 +18493,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSchemaKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[201].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowSwitchTrafficRequest); i { case 0: return &v.state case 1: @@ -14536,8 +18505,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateSchemaKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[202].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowSwitchTrafficResponse); i { case 0: return &v.state case 1: @@ -14548,8 +18517,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateShardRequest); i { + file_vtctldata_proto_msgTypes[203].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowUpdateRequest); i { case 0: return &v.state case 1: @@ -14560,8 +18529,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateShardResponse); i { + file_vtctldata_proto_msgTypes[204].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowUpdateResponse); i { case 0: return &v.state case 1: @@ -14572,8 +18541,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[206].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_ReplicationLocation); i { case 0: return &v.state case 1: @@ -14584,8 +18553,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[207].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_ShardStream); i { case 0: return &v.state case 1: @@ -14596,8 +18565,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionShardRequest); i { + file_vtctldata_proto_msgTypes[208].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream); i { case 0: return &v.state case 1: @@ -14608,8 +18577,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVersionShardResponse); i { + file_vtctldata_proto_msgTypes[209].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream_CopyState); i { case 0: return &v.state case 1: @@ -14620,8 +18589,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVSchemaRequest); i { + file_vtctldata_proto_msgTypes[210].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow_Stream_Log); i { case 0: return &v.state case 1: @@ -14632,8 +18601,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateVSchemaResponse); i { + file_vtctldata_proto_msgTypes[218].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i { case 0: return &v.state case 1: @@ -14644,8 +18613,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_ReplicationLocation); i { + file_vtctldata_proto_msgTypes[222].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MoveTablesCreateResponse_TabletInfo); i { case 0: return &v.state case 1: @@ -14656,8 +18625,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_ShardStream); i { + file_vtctldata_proto_msgTypes[231].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowDeleteResponse_TabletInfo); i { case 0: return &v.state case 1: @@ -14668,8 +18637,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream); i { + file_vtctldata_proto_msgTypes[232].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_TableCopyState); i { case 0: return &v.state case 1: @@ -14680,8 +18649,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream_CopyState); i { + file_vtctldata_proto_msgTypes[233].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_ShardStreamState); i { case 0: return &v.state case 1: @@ -14692,8 +18661,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow_Stream_Log); i { + file_vtctldata_proto_msgTypes[234].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowStatusResponse_ShardStreams); i { case 0: return &v.state case 1: @@ -14704,8 +18673,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i { + file_vtctldata_proto_msgTypes[237].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowUpdateResponse_TabletInfo); i { case 0: return &v.state case 1: @@ -14722,8 +18691,8 @@ func file_vtctldata_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtctldata_proto_rawDesc, - NumEnums: 1, - NumMessages: 198, + NumEnums: 4, + NumMessages: 238, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index 3ef886f8115..e7aef138889 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtctldata.proto package vtctldata @@ -7,6 +7,7 @@ package vtctldata import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -30,4689 +31,4672 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -func (m *ExecuteVtctlCommandRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandRequest) CloneVT() *ExecuteVtctlCommandRequest { if m == nil { - return nil, nil + return (*ExecuteVtctlCommandRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteVtctlCommandRequest{ + ActionTimeout: m.ActionTimeout, } - return dAtA[:n], nil + if rhs := m.Args; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Args = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteVtctlCommandRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteVtctlCommandRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteVtctlCommandRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) CloneVT() *ExecuteVtctlCommandResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ExecuteVtctlCommandResponse)(nil) } - if m.ActionTimeout != 0 { - i = encodeVarint(dAtA, i, uint64(m.ActionTimeout)) - i-- - dAtA[i] = 0x10 + r := &ExecuteVtctlCommandResponse{ + Event: m.Event.CloneVT(), } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteVtctlCommandResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TableMaterializeSettings) CloneVT() *TableMaterializeSettings { if m == nil { - return nil, nil + return (*TableMaterializeSettings)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &TableMaterializeSettings{ + TargetTable: m.TargetTable, + SourceExpression: m.SourceExpression, + CreateDdl: m.CreateDdl, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteVtctlCommandResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *TableMaterializeSettings) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteVtctlCommandResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) CloneVT() *MaterializeSettings { if m == nil { - return 0, nil + return (*MaterializeSettings)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MaterializeSettings{ + Workflow: m.Workflow, + SourceKeyspace: m.SourceKeyspace, + TargetKeyspace: m.TargetKeyspace, + StopAfterCopy: m.StopAfterCopy, + Cell: m.Cell, + TabletTypes: m.TabletTypes, + ExternalCluster: m.ExternalCluster, + MaterializationIntent: m.MaterializationIntent, + SourceTimeZone: m.SourceTimeZone, + TargetTimeZone: m.TargetTimeZone, + OnDdl: m.OnDdl, + DeferSecondaryKeys: m.DeferSecondaryKeys, + TabletSelectionPreference: m.TabletSelectionPreference, + AtomicCopy: m.AtomicCopy, } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.TableSettings; rhs != nil { + tmpContainer := make([]*TableMaterializeSettings, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.TableSettings = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TableMaterializeSettings) MarshalVT() (dAtA []byte, err error) { +func (m *MaterializeSettings) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { if m == nil { - return nil, nil + return (*Keyspace)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Keyspace{ + Name: m.Name, + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *TableMaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *TableMaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) CloneVT() *SchemaMigration { if m == nil { - return 0, nil + return (*SchemaMigration)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SchemaMigration{ + Uuid: m.Uuid, + Keyspace: m.Keyspace, + Shard: m.Shard, + Schema: m.Schema, + Table: m.Table, + MigrationStatement: m.MigrationStatement, + Strategy: m.Strategy, + Options: m.Options, + AddedAt: m.AddedAt.CloneVT(), + RequestedAt: m.RequestedAt.CloneVT(), + ReadyAt: m.ReadyAt.CloneVT(), + StartedAt: m.StartedAt.CloneVT(), + LivenessTimestamp: m.LivenessTimestamp.CloneVT(), + CompletedAt: m.CompletedAt.CloneVT(), + CleanedUpAt: m.CleanedUpAt.CloneVT(), + Status: m.Status, + LogPath: m.LogPath, + Artifacts: m.Artifacts, + Retries: m.Retries, + Tablet: m.Tablet.CloneVT(), + TabletFailure: m.TabletFailure, + Progress: m.Progress, + MigrationContext: m.MigrationContext, + DdlAction: m.DdlAction, + Message: m.Message, + EtaSeconds: m.EtaSeconds, + RowsCopied: m.RowsCopied, + TableRows: m.TableRows, + AddedUniqueKeys: m.AddedUniqueKeys, + RemovedUniqueKeys: m.RemovedUniqueKeys, + LogFile: m.LogFile, + ArtifactRetention: m.ArtifactRetention.CloneVT(), + PostponeCompletion: m.PostponeCompletion, + RemovedUniqueKeyNames: m.RemovedUniqueKeyNames, + DroppedNoDefaultColumnNames: m.DroppedNoDefaultColumnNames, + ExpandedColumnNames: m.ExpandedColumnNames, + RevertibleNotes: m.RevertibleNotes, + AllowConcurrent: m.AllowConcurrent, + RevertedUuid: m.RevertedUuid, + IsView: m.IsView, + ReadyToComplete: m.ReadyToComplete, + VitessLivenessIndicator: m.VitessLivenessIndicator, + UserThrottleRatio: m.UserThrottleRatio, + SpecialPlan: m.SpecialPlan, + LastThrottledAt: m.LastThrottledAt.CloneVT(), + ComponentThrottled: m.ComponentThrottled, + CancelledAt: m.CancelledAt.CloneVT(), + PostponeLaunch: m.PostponeLaunch, + Stage: m.Stage, + CutoverAttempts: m.CutoverAttempts, + IsImmediateOperation: m.IsImmediateOperation, + ReviewedAt: m.ReviewedAt.CloneVT(), + ReadyToCompleteAt: m.ReadyToCompleteAt.CloneVT(), } - if len(m.CreateDdl) > 0 { - i -= len(m.CreateDdl) - copy(dAtA[i:], m.CreateDdl) - i = encodeVarint(dAtA, i, uint64(len(m.CreateDdl))) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.SourceExpression) > 0 { - i -= len(m.SourceExpression) - copy(dAtA[i:], m.SourceExpression) - i = encodeVarint(dAtA, i, uint64(len(m.SourceExpression))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *SchemaMigration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) } - if len(m.TargetTable) > 0 { - i -= len(m.TargetTable) - copy(dAtA[i:], m.TargetTable) - i = encodeVarint(dAtA, i, uint64(len(m.TargetTable))) - i-- - dAtA[i] = 0xa + r := &Shard{ + Keyspace: m.Keyspace, + Name: m.Name, + Shard: m.Shard.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *MaterializeSettings) MarshalVT() (dAtA []byte, err error) { +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_ReplicationLocation) CloneVT() *Workflow_ReplicationLocation { if m == nil { - return nil, nil + return (*Workflow_ReplicationLocation)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Workflow_ReplicationLocation{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *MaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Workflow_ReplicationLocation) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) CloneVT() *Workflow_ShardStream { if m == nil { - return 0, nil + return (*Workflow_ShardStream)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &Workflow_ShardStream{ + IsPrimaryServing: m.IsPrimaryServing, } - if m.DeferSecondaryKeys { - i-- - if m.DeferSecondaryKeys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*Workflow_Stream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x70 - } - if len(m.OnDdl) > 0 { - i -= len(m.OnDdl) - copy(dAtA[i:], m.OnDdl) - i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) - i-- - dAtA[i] = 0x6a + r.Streams = tmpContainer } - if len(m.SourceShards) > 0 { - for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.SourceShards[iNdEx]) - copy(dAtA[i:], m.SourceShards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) - i-- - dAtA[i] = 0x62 + if rhs := m.TabletControls; rhs != nil { + tmpContainer := make([]*topodata.Shard_TabletControl, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.TabletControls = tmpContainer } - if len(m.TargetTimeZone) > 0 { - i -= len(m.TargetTimeZone) - copy(dAtA[i:], m.TargetTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) - i-- - dAtA[i] = 0x5a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.SourceTimeZone) > 0 { - i -= len(m.SourceTimeZone) - copy(dAtA[i:], m.SourceTimeZone) - i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) - i-- - dAtA[i] = 0x52 + return r +} + +func (m *Workflow_ShardStream) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream_CopyState) CloneVT() *Workflow_Stream_CopyState { + if m == nil { + return (*Workflow_Stream_CopyState)(nil) } - if m.MaterializationIntent != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaterializationIntent)) - i-- - dAtA[i] = 0x48 + r := &Workflow_Stream_CopyState{ + Table: m.Table, + LastPk: m.LastPk, } - if len(m.ExternalCluster) > 0 { - i -= len(m.ExternalCluster) - copy(dAtA[i:], m.ExternalCluster) - i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) - i-- - dAtA[i] = 0x42 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.TabletTypes) > 0 { - i -= len(m.TabletTypes) - copy(dAtA[i:], m.TabletTypes) - i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) - i-- - dAtA[i] = 0x3a + return r +} + +func (m *Workflow_Stream_CopyState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream_Log) CloneVT() *Workflow_Stream_Log { + if m == nil { + return (*Workflow_Stream_Log)(nil) } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0x32 + r := &Workflow_Stream_Log{ + Id: m.Id, + StreamId: m.StreamId, + Type: m.Type, + State: m.State, + CreatedAt: m.CreatedAt.CloneVT(), + UpdatedAt: m.UpdatedAt.CloneVT(), + Message: m.Message, + Count: m.Count, } - if len(m.TableSettings) > 0 { - for iNdEx := len(m.TableSettings) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TableSettings[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.StopAfterCopy { - i-- - if m.StopAfterCopy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return r +} + +func (m *Workflow_Stream_Log) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow_Stream) CloneVT() *Workflow_Stream { + if m == nil { + return (*Workflow_Stream)(nil) + } + r := &Workflow_Stream{ + Id: m.Id, + Shard: m.Shard, + Tablet: m.Tablet.CloneVT(), + BinlogSource: m.BinlogSource.CloneVT(), + Position: m.Position, + StopPosition: m.StopPosition, + State: m.State, + DbName: m.DbName, + TransactionTimestamp: m.TransactionTimestamp.CloneVT(), + TimeUpdated: m.TimeUpdated.CloneVT(), + Message: m.Message, + LogFetchError: m.LogFetchError, + } + if rhs := m.CopyStates; rhs != nil { + tmpContainer := make([]*Workflow_Stream_CopyState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x20 + r.CopyStates = tmpContainer } - if len(m.TargetKeyspace) > 0 { - i -= len(m.TargetKeyspace) - copy(dAtA[i:], m.TargetKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) - i-- - dAtA[i] = 0x1a + if rhs := m.Logs; rhs != nil { + tmpContainer := make([]*Workflow_Stream_Log, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Logs = tmpContainer } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) - i-- - dAtA[i] = 0x12 + if rhs := m.Tags; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tags = tmpContainer } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Keyspace) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Workflow) CloneVT() *Workflow { if m == nil { - return nil, nil + return (*Workflow)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &Workflow{ + Name: m.Name, + Source: m.Source.CloneVT(), + Target: m.Target.CloneVT(), + MaxVReplicationLag: m.MaxVReplicationLag, + WorkflowType: m.WorkflowType, + WorkflowSubType: m.WorkflowSubType, } - return dAtA[:n], nil + if rhs := m.ShardStreams; rhs != nil { + tmpContainer := make(map[string]*Workflow_ShardStream, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardStreams = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Keyspace) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *Workflow) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoRequest) CloneVT() *AddCellInfoRequest { if m == nil { - return 0, nil + return (*AddCellInfoRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &AddCellInfoRequest{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *AddCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddCellInfoResponse) CloneVT() *AddCellInfoResponse { + if m == nil { + return (*AddCellInfoResponse)(nil) } - return len(dAtA) - i, nil + r := &AddCellInfoResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Shard) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *AddCellsAliasRequest) CloneVT() *AddCellsAliasRequest { if m == nil { - return nil, nil + return (*AddCellsAliasRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &AddCellsAliasRequest{ + Name: m.Name, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Shard) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *AddCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasResponse) CloneVT() *AddCellsAliasResponse { if m == nil { - return 0, nil + return (*AddCellsAliasResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &AddCellsAliasResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *AddCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyRoutingRulesRequest) CloneVT() *ApplyRoutingRulesRequest { + if m == nil { + return (*ApplyRoutingRulesRequest)(nil) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 + r := &ApplyRoutingRulesRequest{ + RoutingRules: m.RoutingRules.CloneVT(), + SkipRebuild: m.SkipRebuild, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if rhs := m.RebuildCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RebuildCells = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_ReplicationLocation) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyRoutingRulesResponse) CloneVT() *ApplyRoutingRulesResponse { if m == nil { - return nil, nil + return (*ApplyRoutingRulesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplyRoutingRulesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Workflow_ReplicationLocation) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplyRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_ReplicationLocation) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) CloneVT() *ApplyShardRoutingRulesRequest { if m == nil { - return 0, nil + return (*ApplyShardRoutingRulesRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplyShardRoutingRulesRequest{ + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + SkipRebuild: m.SkipRebuild, } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Shards[iNdEx]) - copy(dAtA[i:], m.Shards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if rhs := m.RebuildCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RebuildCells = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_ShardStream) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyShardRoutingRulesResponse) CloneVT() *ApplyShardRoutingRulesResponse { if m == nil { - return nil, nil + return (*ApplyShardRoutingRulesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplyShardRoutingRulesResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *Workflow_ShardStream) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplyShardRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_ShardStream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { if m == nil { - return 0, nil + return (*ApplySchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplySchemaRequest{ + Keyspace: m.Keyspace, + DdlStrategy: m.DdlStrategy, + MigrationContext: m.MigrationContext, + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + SkipPreflight: m.SkipPreflight, + CallerId: m.CallerId.CloneVT(), + BatchSize: m.BatchSize, } - if m.IsPrimaryServing { - i-- - if m.IsPrimaryServing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + if rhs := m.Sql; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Sql = tmpContainer } - if len(m.TabletControls) > 0 { - for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } + if rhs := m.UuidList; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.UuidList = tmpContainer } - if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_Stream_CopyState) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplySchemaResponse) CloneVT() *ApplySchemaResponse { if m == nil { - return nil, nil + return (*ApplySchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplySchemaResponse{} + if rhs := m.UuidList; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.UuidList = tmpContainer } - return dAtA[:n], nil + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream_CopyState) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplySchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream_CopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) CloneVT() *ApplyVSchemaRequest { if m == nil { - return 0, nil + return (*ApplyVSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ApplyVSchemaRequest{ + Keyspace: m.Keyspace, + SkipRebuild: m.SkipRebuild, + DryRun: m.DryRun, + VSchema: m.VSchema.CloneVT(), + Sql: m.Sql, } - if len(m.LastPk) > 0 { - i -= len(m.LastPk) - copy(dAtA[i:], m.LastPk) - i = encodeVarint(dAtA, i, uint64(len(m.LastPk))) - i-- - dAtA[i] = 0x12 + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Table) > 0 { - i -= len(m.Table) - copy(dAtA[i:], m.Table) - i = encodeVarint(dAtA, i, uint64(len(m.Table))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_Stream_Log) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyVSchemaResponse) CloneVT() *ApplyVSchemaResponse { if m == nil { - return nil, nil + return (*ApplyVSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ApplyVSchemaResponse{ + VSchema: m.VSchema.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream_Log) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ApplyVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupRequest) CloneVT() *BackupRequest { if m == nil { - return 0, nil + return (*BackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &BackupRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + AllowPrimary: m.AllowPrimary, + Concurrency: m.Concurrency, + IncrementalFromPos: m.IncrementalFromPos, + UpgradeSafe: m.UpgradeSafe, } - if m.Count != 0 { - i = encodeVarint(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x40 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarint(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x3a - } - if m.UpdatedAt != nil { - size, err := m.UpdatedAt.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 + return r +} + +func (m *BackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupResponse) CloneVT() *BackupResponse { + if m == nil { + return (*BackupResponse)(nil) } - if m.CreatedAt != nil { - size, err := m.CreatedAt.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + r := &BackupResponse{ + TabletAlias: m.TabletAlias.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Event: m.Event.CloneVT(), } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarint(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarint(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *BackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *BackupShardRequest) CloneVT() *BackupShardRequest { + if m == nil { + return (*BackupShardRequest)(nil) } - if m.StreamId != 0 { - i = encodeVarint(dAtA, i, uint64(m.StreamId)) - i-- - dAtA[i] = 0x10 + r := &BackupShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + AllowPrimary: m.AllowPrimary, + Concurrency: m.Concurrency, + UpgradeSafe: m.UpgradeSafe, + IncrementalFromPos: m.IncrementalFromPos, } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow_Stream) MarshalVT() (dAtA []byte, err error) { +func (m *BackupShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CancelSchemaMigrationRequest) CloneVT() *CancelSchemaMigrationRequest { if m == nil { - return nil, nil + return (*CancelSchemaMigrationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CancelSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow_Stream) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CancelSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationResponse) CloneVT() *CancelSchemaMigrationResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*CancelSchemaMigrationResponse)(nil) } - if len(m.Tags) > 0 { - for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tags[iNdEx]) - copy(dAtA[i:], m.Tags[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) - i-- - dAtA[i] = 0x7a + r := &CancelSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.RowsAffectedByShard = tmpContainer } - if len(m.LogFetchError) > 0 { - i -= len(m.LogFetchError) - copy(dAtA[i:], m.LogFetchError) - i = encodeVarint(dAtA, i, uint64(len(m.LogFetchError))) - i-- - dAtA[i] = 0x72 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Logs) > 0 { - for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Logs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x6a - } + return r +} + +func (m *CancelSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTabletTypeRequest) CloneVT() *ChangeTabletTypeRequest { + if m == nil { + return (*ChangeTabletTypeRequest)(nil) } - if len(m.CopyStates) > 0 { - for iNdEx := len(m.CopyStates) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.CopyStates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x62 - } + r := &ChangeTabletTypeRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + DbType: m.DbType, + DryRun: m.DryRun, } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarint(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x5a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.TimeUpdated != nil { - size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x52 + return r +} + +func (m *ChangeTabletTypeRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ChangeTabletTypeResponse) CloneVT() *ChangeTabletTypeResponse { + if m == nil { + return (*ChangeTabletTypeResponse)(nil) } - if m.TransactionTimestamp != nil { - size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + r := &ChangeTabletTypeResponse{ + BeforeTablet: m.BeforeTablet.CloneVT(), + AfterTablet: m.AfterTablet.CloneVT(), + WasDryRun: m.WasDryRun, } - if len(m.DbName) > 0 { - i -= len(m.DbName) - copy(dAtA[i:], m.DbName) - i = encodeVarint(dAtA, i, uint64(len(m.DbName))) - i-- - dAtA[i] = 0x42 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarint(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x3a + return r +} + +func (m *ChangeTabletTypeResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CleanupSchemaMigrationRequest) CloneVT() *CleanupSchemaMigrationRequest { + if m == nil { + return (*CleanupSchemaMigrationRequest)(nil) } - if len(m.StopPosition) > 0 { - i -= len(m.StopPosition) - copy(dAtA[i:], m.StopPosition) - i = encodeVarint(dAtA, i, uint64(len(m.StopPosition))) - i-- - dAtA[i] = 0x32 + r := &CleanupSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - if len(m.Position) > 0 { - i -= len(m.Position) - copy(dAtA[i:], m.Position) - i = encodeVarint(dAtA, i, uint64(len(m.Position))) - i-- - dAtA[i] = 0x2a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.BinlogSource != nil { - size, err := m.BinlogSource.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + return r +} + +func (m *CleanupSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CleanupSchemaMigrationResponse) CloneVT() *CleanupSchemaMigrationResponse { + if m == nil { + return (*CleanupSchemaMigrationResponse)(nil) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &CleanupSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + r.RowsAffectedByShard = tmpContainer } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *Workflow) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompleteSchemaMigrationRequest) CloneVT() *CompleteSchemaMigrationRequest { if m == nil { - return nil, nil + return (*CompleteSchemaMigrationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CompleteSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *Workflow) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CompleteSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationResponse) CloneVT() *CompleteSchemaMigrationResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.WorkflowSubType) > 0 { - i -= len(m.WorkflowSubType) - copy(dAtA[i:], m.WorkflowSubType) - i = encodeVarint(dAtA, i, uint64(len(m.WorkflowSubType))) - i-- - dAtA[i] = 0x3a + return (*CompleteSchemaMigrationResponse)(nil) } - if len(m.WorkflowType) > 0 { - i -= len(m.WorkflowType) - copy(dAtA[i:], m.WorkflowType) - i = encodeVarint(dAtA, i, uint64(len(m.WorkflowType))) - i-- - dAtA[i] = 0x32 - } - if len(m.ShardStreams) > 0 { - for k := range m.ShardStreams { - v := m.ShardStreams[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + r := &CompleteSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.RowsAffectedByShard = tmpContainer } - if m.MaxVReplicationLag != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxVReplicationLag)) - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Target != nil { - size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *CompleteSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { + if m == nil { + return (*CreateKeyspaceRequest)(nil) } - if m.Source != nil { - size, err := m.Source.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &CreateKeyspaceRequest{ + Name: m.Name, + Force: m.Force, + AllowEmptyVSchema: m.AllowEmptyVSchema, + Type: m.Type, + BaseKeyspace: m.BaseKeyspace, + SnapshotTime: m.SnapshotTime.CloneVT(), + DurabilityPolicy: m.DurabilityPolicy, + SidecarDbName: m.SidecarDbName, + } + if rhs := m.ServedFroms; rhs != nil { + tmpContainer := make([]*topodata.Keyspace_ServedFrom, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.ServedFroms = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateKeyspaceResponse) CloneVT() *CreateKeyspaceResponse { if m == nil { - return nil, nil + return (*CreateKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) CloneVT() *CreateShardRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*CreateShardRequest)(nil) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &CreateShardRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, + Force: m.Force, + IncludeParent: m.IncludeParent, } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CreateShardResponse) CloneVT() *CreateShardResponse { if m == nil { - return nil, nil + return (*CreateShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &CreateShardResponse{ + Keyspace: m.Keyspace.CloneVT(), + Shard: m.Shard.CloneVT(), + ShardAlreadyExists: m.ShardAlreadyExists, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *CreateShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) CloneVT() *DeleteCellInfoRequest { if m == nil { - return 0, nil + return (*DeleteCellInfoRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteCellInfoRequest{ + Name: m.Name, + Force: m.Force, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *AddCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteCellInfoResponse) CloneVT() *DeleteCellInfoResponse { if m == nil { - return nil, nil + return (*DeleteCellInfoResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteCellInfoResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *AddCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) CloneVT() *DeleteCellsAliasRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteCellsAliasRequest)(nil) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + r := &DeleteCellsAliasRequest{ + Name: m.Name, } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *AddCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteCellsAliasResponse) CloneVT() *DeleteCellsAliasResponse { if m == nil { - return nil, nil + return (*DeleteCellsAliasResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteCellsAliasResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *AddCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *AddCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) CloneVT() *DeleteKeyspaceRequest { if m == nil { - return 0, nil + return (*DeleteKeyspaceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteKeyspaceRequest{ + Keyspace: m.Keyspace, + Recursive: m.Recursive, + Force: m.Force, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteKeyspaceResponse) CloneVT() *DeleteKeyspaceResponse { if m == nil { - return nil, nil + return (*DeleteKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteKeyspaceResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) CloneVT() *DeleteShardsRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteShardsRequest)(nil) } - if len(m.RebuildCells) > 0 { - for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RebuildCells[iNdEx]) - copy(dAtA[i:], m.RebuildCells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + r := &DeleteShardsRequest{ + Recursive: m.Recursive, + EvenIfServing: m.EvenIfServing, + Force: m.Force, } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Shards = tmpContainer } - if m.RoutingRules != nil { - size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteShardsResponse) CloneVT() *DeleteShardsResponse { if m == nil { - return nil, nil + return (*DeleteShardsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteShardsResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteShardsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) CloneVT() *DeleteSrvVSchemaRequest { if m == nil { - return 0, nil + return (*DeleteSrvVSchemaRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &DeleteSrvVSchemaRequest{ + Cell: m.Cell, } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteSrvVSchemaResponse) CloneVT() *DeleteSrvVSchemaResponse { if m == nil { - return nil, nil + return (*DeleteSrvVSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteSrvVSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteSrvVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) CloneVT() *DeleteTabletsRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*DeleteTabletsRequest)(nil) } - if len(m.RebuildCells) > 0 { - for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RebuildCells[iNdEx]) - copy(dAtA[i:], m.RebuildCells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + r := &DeleteTabletsRequest{ + AllowPrimary: m.AllowPrimary, } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.TabletAliases; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.TabletAliases = tmpContainer } - if m.ShardRoutingRules != nil { - size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *DeleteTabletsResponse) CloneVT() *DeleteTabletsResponse { if m == nil { - return nil, nil + return (*DeleteTabletsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &DeleteTabletsResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ApplyShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *DeleteTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) CloneVT() *EmergencyReparentShardRequest { if m == nil { - return 0, nil + return (*EmergencyReparentShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &EmergencyReparentShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + PreventCrossCellPromotion: m.PreventCrossCellPromotion, + WaitForAllTablets: m.WaitForAllTablets, } - return len(dAtA) - i, nil + if rhs := m.IgnoreReplicas; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.IgnoreReplicas = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *EmergencyReparentShardResponse) CloneVT() *EmergencyReparentShardResponse { if m == nil { - return nil, nil + return (*EmergencyReparentShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &EmergencyReparentShardResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), } - return dAtA[:n], nil + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *EmergencyReparentShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) CloneVT() *ExecuteFetchAsAppRequest { if m == nil { - return 0, nil + return (*ExecuteFetchAsAppRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteFetchAsAppRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Query: m.Query, + MaxRows: m.MaxRows, + UsePool: m.UsePool, } - if m.CallerId != nil { - size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.SkipPreflight { - i-- - if m.SkipPreflight { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 + return r +} + +func (m *ExecuteFetchAsAppRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsAppResponse) CloneVT() *ExecuteFetchAsAppResponse { + if m == nil { + return (*ExecuteFetchAsAppResponse)(nil) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x3a + r := &ExecuteFetchAsAppResponse{ + Result: m.Result.CloneVT(), } - if len(m.MigrationContext) > 0 { - i -= len(m.MigrationContext) - copy(dAtA[i:], m.MigrationContext) - i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) - i-- - dAtA[i] = 0x32 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.UuidList) > 0 { - for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UuidList[iNdEx]) - copy(dAtA[i:], m.UuidList[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + return r +} + +func (m *ExecuteFetchAsAppResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDBARequest) CloneVT() *ExecuteFetchAsDBARequest { + if m == nil { + return (*ExecuteFetchAsDBARequest)(nil) } - if len(m.DdlStrategy) > 0 { - i -= len(m.DdlStrategy) - copy(dAtA[i:], m.DdlStrategy) - i = encodeVarint(dAtA, i, uint64(len(m.DdlStrategy))) - i-- - dAtA[i] = 0x22 + r := &ExecuteFetchAsDBARequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Query: m.Query, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, } - if len(m.Sql) > 0 { - for iNdEx := len(m.Sql) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Sql[iNdEx]) - copy(dAtA[i:], m.Sql[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Sql[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowLongUnavailability { - i-- - if m.AllowLongUnavailability { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *ExecuteFetchAsDBARequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteFetchAsDBAResponse) CloneVT() *ExecuteFetchAsDBAResponse { + if m == nil { + return (*ExecuteFetchAsDBAResponse)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + r := &ExecuteFetchAsDBAResponse{ + Result: m.Result.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBAResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteHookRequest) CloneVT() *ExecuteHookRequest { if m == nil { - return nil, nil + return (*ExecuteHookRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ExecuteHookRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + TabletHookRequest: m.TabletHookRequest.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ExecuteHookRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) CloneVT() *ExecuteHookResponse { if m == nil { - return 0, nil + return (*ExecuteHookResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ExecuteHookResponse{ + HookResult: m.HookResult.CloneVT(), } - if len(m.UuidList) > 0 { - for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UuidList[iNdEx]) - copy(dAtA[i:], m.UuidList[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *FindAllShardsInKeyspaceRequest) CloneVT() *FindAllShardsInKeyspaceRequest { if m == nil { - return nil, nil + return (*FindAllShardsInKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &FindAllShardsInKeyspaceRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *FindAllShardsInKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) CloneVT() *FindAllShardsInKeyspaceResponse { if m == nil { - return 0, nil + return (*FindAllShardsInKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &FindAllShardsInKeyspaceResponse{} + if rhs := m.Shards; rhs != nil { + tmpContainer := make(map[string]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer } - if len(m.Sql) > 0 { - i -= len(m.Sql) - copy(dAtA[i:], m.Sql) - i = encodeVarint(dAtA, i, uint64(len(m.Sql))) - i-- - dAtA[i] = 0x32 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *FindAllShardsInKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsRequest) CloneVT() *GetBackupsRequest { + if m == nil { + return (*GetBackupsRequest)(nil) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + r := &GetBackupsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Limit: m.Limit, + Detailed: m.Detailed, + DetailedLimit: m.DetailedLimit, } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.SkipRebuild { - i-- - if m.SkipRebuild { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return r +} + +func (m *GetBackupsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetBackupsResponse) CloneVT() *GetBackupsResponse { + if m == nil { + return (*GetBackupsResponse)(nil) + } + r := &GetBackupsResponse{} + if rhs := m.Backups; rhs != nil { + tmpContainer := make([]*mysqlctl.BackupInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Backups = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfoRequest) CloneVT() *GetCellInfoRequest { if m == nil { - return nil, nil + return (*GetCellInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellInfoRequest{ + Cell: m.Cell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ApplyVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) CloneVT() *GetCellInfoResponse { if m == nil { - return 0, nil + return (*GetCellInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetCellInfoResponse{ + CellInfo: m.CellInfo.CloneVT(), } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellInfoNamesRequest) CloneVT() *GetCellInfoNamesRequest { if m == nil { - return nil, nil + return (*GetCellInfoNamesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellInfoNamesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellInfoNamesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) CloneVT() *GetCellInfoNamesResponse { if m == nil { - return 0, nil + return (*GetCellInfoNamesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.IncrementalFromPos) > 0 { - i -= len(m.IncrementalFromPos) - copy(dAtA[i:], m.IncrementalFromPos) - i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) - i-- - dAtA[i] = 0x22 + r := &GetCellInfoNamesResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x18 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + return r +} + +func (m *GetCellInfoNamesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesRequest) CloneVT() *GetCellsAliasesRequest { + if m == nil { + return (*GetCellsAliasesRequest)(nil) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &GetCellsAliasesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellsAliasesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetCellsAliasesResponse) CloneVT() *GetCellsAliasesResponse { if m == nil { - return nil, nil + return (*GetCellsAliasesResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetCellsAliasesResponse{} + if rhs := m.Aliases; rhs != nil { + tmpContainer := make(map[string]*topodata.CellsAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Aliases = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetCellsAliasesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) CloneVT() *GetFullStatusRequest { if m == nil { - return 0, nil + return (*GetFullStatusRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetFullStatusRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x1a + return r +} + +func (m *GetFullStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetFullStatusResponse) CloneVT() *GetFullStatusResponse { + if m == nil { + return (*GetFullStatusResponse)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x12 + r := &GetFullStatusResponse{ + Status: m.Status.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *BackupShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspacesRequest) CloneVT() *GetKeyspacesRequest { if m == nil { - return nil, nil + return (*GetKeyspacesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetKeyspacesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *BackupShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *BackupShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesResponse) CloneVT() *GetKeyspacesResponse { if m == nil { - return 0, nil + return (*GetKeyspacesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetKeyspacesResponse{} + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceRequest) CloneVT() *GetKeyspaceRequest { + if m == nil { + return (*GetKeyspaceRequest)(nil) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + r := &GetKeyspaceRequest{ + Keyspace: m.Keyspace, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTabletTypeRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceResponse) CloneVT() *GetKeyspaceResponse { if m == nil { - return nil, nil + return (*GetKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetKeyspaceResponse{ + Keyspace: m.Keyspace.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTabletTypeRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTabletTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) CloneVT() *GetPermissionsRequest { if m == nil { - return 0, nil + return (*GetPermissionsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.DryRun { - i-- - if m.DryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.DbType != 0 { - i = encodeVarint(dAtA, i, uint64(m.DbType)) - i-- - dAtA[i] = 0x10 + r := &GetPermissionsRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ChangeTabletTypeResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetPermissionsResponse) CloneVT() *GetPermissionsResponse { if m == nil { - return nil, nil + return (*GetPermissionsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetPermissionsResponse{ + Permissions: m.Permissions.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ChangeTabletTypeResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetPermissionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ChangeTabletTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) CloneVT() *GetRoutingRulesRequest { if m == nil { - return 0, nil + return (*GetRoutingRulesRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetRoutingRulesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.WasDryRun { - i-- - if m.WasDryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetRoutingRulesResponse) CloneVT() *GetRoutingRulesResponse { + if m == nil { + return (*GetRoutingRulesResponse)(nil) } - if m.AfterTablet != nil { - size, err := m.AfterTablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &GetRoutingRulesResponse{ + RoutingRules: m.RoutingRules.CloneVT(), } - if m.BeforeTablet != nil { - size, err := m.BeforeTablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaRequest) CloneVT() *GetSchemaRequest { if m == nil { - return nil, nil + return (*GetSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSchemaRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + IncludeViews: m.IncludeViews, + TableNamesOnly: m.TableNamesOnly, + TableSizesOnly: m.TableSizesOnly, + TableSchemaOnly: m.TableSchemaOnly, } - return dAtA[:n], nil + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*GetSchemaResponse)(nil) } - if len(m.DurabilityPolicy) > 0 { - i -= len(m.DurabilityPolicy) - copy(dAtA[i:], m.DurabilityPolicy) - i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) - i-- - dAtA[i] = 0x52 + r := &GetSchemaResponse{ + Schema: m.Schema.CloneVT(), } - if m.SnapshotTime != nil { - size, err := m.SnapshotTime.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x4a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.BaseKeyspace) > 0 { - i -= len(m.BaseKeyspace) - copy(dAtA[i:], m.BaseKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.BaseKeyspace))) - i-- - dAtA[i] = 0x42 + return r +} + +func (m *GetSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsRequest) CloneVT() *GetSchemaMigrationsRequest { + if m == nil { + return (*GetSchemaMigrationsRequest)(nil) } - if m.Type != 0 { - i = encodeVarint(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x38 + r := &GetSchemaMigrationsRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, + MigrationContext: m.MigrationContext, + Status: m.Status, + Recent: m.Recent.CloneVT(), + Order: m.Order, + Limit: m.Limit, + Skip: m.Skip, } - if len(m.ServedFroms) > 0 { - for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.AllowEmptyVSchema { - i-- - if m.AllowEmptyVSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetSchemaMigrationsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsResponse) CloneVT() *GetSchemaMigrationsResponse { + if m == nil { + return (*GetSchemaMigrationsResponse)(nil) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &GetSchemaMigrationsResponse{} + if rhs := m.Migrations; rhs != nil { + tmpContainer := make([]*SchemaMigration, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Migrations = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardRequest) CloneVT() *GetShardRequest { if m == nil { - return nil, nil + return (*GetShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetShardRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) CloneVT() *GetShardResponse { if m == nil { - return 0, nil + return (*GetShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetShardResponse{ + Shard: m.Shard.CloneVT(), } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardRoutingRulesRequest) CloneVT() *GetShardRoutingRulesRequest { if m == nil { - return nil, nil + return (*GetShardRoutingRulesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetShardRoutingRulesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetShardRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesResponse) CloneVT() *GetShardRoutingRulesResponse { if m == nil { - return 0, nil + return (*GetShardRoutingRulesResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetShardRoutingRulesResponse{ + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), } - if m.IncludeParent { - i-- - if m.IncludeParent { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return r +} + +func (m *GetShardRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceNamesRequest) CloneVT() *GetSrvKeyspaceNamesRequest { + if m == nil { + return (*GetSrvKeyspaceNamesRequest)(nil) } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) - i-- - dAtA[i] = 0x12 + r := &GetSrvKeyspaceNamesRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *CreateShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspaceNamesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) CloneVT() *GetSrvKeyspaceNamesResponse_NameList { if m == nil { - return nil, nil + return (*GetSrvKeyspaceNamesResponse_NameList)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvKeyspaceNamesResponse_NameList{} + if rhs := m.Names; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Names = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *CreateShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvKeyspaceNamesResponse_NameList) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *CreateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspaceNamesResponse) CloneVT() *GetSrvKeyspaceNamesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.ShardAlreadyExists { - i-- - if m.ShardAlreadyExists { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return (*GetSrvKeyspaceNamesResponse)(nil) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + r := &GetSrvKeyspaceNamesResponse{} + if rhs := m.Names; rhs != nil { + tmpContainer := make(map[string]*GetSrvKeyspaceNamesResponse_NameList, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r.Names = tmpContainer } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspaceNamesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvKeyspacesRequest) CloneVT() *GetSrvKeyspacesRequest { if m == nil { - return nil, nil + return (*GetSrvKeyspacesRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvKeyspacesRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvKeyspacesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesResponse) CloneVT() *GetSrvKeyspacesResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*GetSrvKeyspacesResponse)(nil) } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &GetSrvKeyspacesResponse{} + if rhs := m.SrvKeyspaces; rhs != nil { + tmpContainer := make(map[string]*topodata.SrvKeyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.SrvKeyspaces = tmpContainer } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspacesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateThrottlerConfigRequest) CloneVT() *UpdateThrottlerConfigRequest { if m == nil { - return nil, nil + return (*UpdateThrottlerConfigRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateThrottlerConfigRequest{ + Keyspace: m.Keyspace, + Enable: m.Enable, + Disable: m.Disable, + Threshold: m.Threshold, + CustomQuery: m.CustomQuery, + CustomQuerySet: m.CustomQuerySet, + CheckAsCheckSelf: m.CheckAsCheckSelf, + CheckAsCheckShard: m.CheckAsCheckShard, + ThrottledApp: m.ThrottledApp.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateThrottlerConfigRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigResponse) CloneVT() *UpdateThrottlerConfigResponse { if m == nil { - return 0, nil + return (*UpdateThrottlerConfigResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateThrottlerConfigResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateThrottlerConfigResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemaRequest) CloneVT() *GetSrvVSchemaRequest { if m == nil { - return nil, nil + return (*GetSrvVSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvVSchemaRequest{ + Cell: m.Cell, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemaResponse) CloneVT() *GetSrvVSchemaResponse { if m == nil { - return 0, nil + return (*GetSrvVSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetSrvVSchemaResponse{ + SrvVSchema: m.SrvVSchema.CloneVT(), } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSrvVSchemasRequest) CloneVT() *GetSrvVSchemasRequest { if m == nil { - return nil, nil + return (*GetSrvVSchemasRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetSrvVSchemasRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetSrvVSchemasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemasResponse) CloneVT() *GetSrvVSchemasResponse { if m == nil { - return 0, nil + return (*GetSrvVSchemasResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetSrvVSchemasResponse{} + if rhs := m.SrvVSchemas; rhs != nil { + tmpContainer := make(map[string]*vschema.SrvVSchema, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SrvVSchemas = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletRequest) CloneVT() *GetTabletRequest { if m == nil { - return nil, nil + return (*GetTabletRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletResponse) CloneVT() *GetTabletResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + return (*GetTabletResponse)(nil) } - if m.Recursive { - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &GetTabletResponse{ + Tablet: m.Tablet.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTabletsRequest) CloneVT() *GetTabletsRequest { if m == nil { - return nil, nil + return (*GetTabletsRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTabletsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Strict: m.Strict, + TabletType: m.TabletType, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletAliases; rhs != nil { + tmpContainer := make([]*topodata.TabletAlias, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletAliases = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTabletsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletsResponse) CloneVT() *GetTabletsResponse { if m == nil { - return 0, nil + return (*GetTabletsResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetTabletsResponse{} + if rhs := m.Tablets; rhs != nil { + tmpContainer := make([]*topodata.Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Tablets = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetTopologyPathRequest) CloneVT() *GetTopologyPathRequest { if m == nil { - return nil, nil + return (*GetTopologyPathRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetTopologyPathRequest{ + Path: m.Path, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetTopologyPathRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTopologyPathResponse) CloneVT() *GetTopologyPathResponse { if m == nil { - return 0, nil + return (*GetTopologyPathResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetTopologyPathResponse{ + Cell: m.Cell.CloneVT(), } - if m.Force { - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.EvenIfServing { - i-- - if m.EvenIfServing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + return r +} + +func (m *GetTopologyPathResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TopologyCell) CloneVT() *TopologyCell { + if m == nil { + return (*TopologyCell)(nil) } - if m.Recursive { - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + r := &TopologyCell{ + Name: m.Name, + Path: m.Path, + Data: m.Data, } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Shards[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if rhs := m.Children; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Children = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *TopologyCell) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVSchemaRequest) CloneVT() *GetVSchemaRequest { if m == nil { - return nil, nil + return (*GetVSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetVSchemaRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteShardsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteShardsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetVersionRequest) CloneVT() *GetVersionRequest { if m == nil { - return 0, nil + return (*GetVersionRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetVersionRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetVersionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetVersionResponse) CloneVT() *GetVersionResponse { if m == nil { - return nil, nil + return (*GetVersionResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetVersionResponse{ + Version: m.Version, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetVersionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetVSchemaResponse) CloneVT() *GetVSchemaResponse { if m == nil { - return 0, nil + return (*GetVSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetVSchemaResponse{ + VSchema: m.VSchema.CloneVT(), } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetWorkflowsRequest) CloneVT() *GetWorkflowsRequest { if m == nil { - return nil, nil + return (*GetWorkflowsRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &GetWorkflowsRequest{ + Keyspace: m.Keyspace, + ActiveOnly: m.ActiveOnly, + NameOnly: m.NameOnly, + Workflow: m.Workflow, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *GetWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetWorkflowsResponse) CloneVT() *GetWorkflowsResponse { if m == nil { - return 0, nil + return (*GetWorkflowsResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &GetWorkflowsResponse{} + if rhs := m.Workflows; rhs != nil { + tmpContainer := make([]*Workflow, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Workflows = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *InitShardPrimaryRequest) CloneVT() *InitShardPrimaryRequest { if m == nil { - return nil, nil + return (*InitShardPrimaryRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &InitShardPrimaryRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PrimaryElectTabletAlias: m.PrimaryElectTabletAlias.CloneVT(), + Force: m.Force, + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *InitShardPrimaryRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *InitShardPrimaryResponse) CloneVT() *InitShardPrimaryResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*InitShardPrimaryResponse)(nil) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &InitShardPrimaryResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.Events = tmpContainer } - if len(m.TabletAliases) > 0 { - for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *DeleteTabletsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *InitShardPrimaryResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *LaunchSchemaMigrationRequest) CloneVT() *LaunchSchemaMigrationRequest { if m == nil { - return nil, nil + return (*LaunchSchemaMigrationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &LaunchSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *DeleteTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *LaunchSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *DeleteTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *LaunchSchemaMigrationResponse) CloneVT() *LaunchSchemaMigrationResponse { if m == nil { - return 0, nil + return (*LaunchSchemaMigrationResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &LaunchSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *EmergencyReparentShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *LaunchSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MoveTablesCreateRequest) CloneVT() *MoveTablesCreateRequest { if m == nil { - return nil, nil + return (*MoveTablesCreateRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MoveTablesCreateRequest{ + Workflow: m.Workflow, + SourceKeyspace: m.SourceKeyspace, + TargetKeyspace: m.TargetKeyspace, + TabletSelectionPreference: m.TabletSelectionPreference, + AllTables: m.AllTables, + ExternalClusterName: m.ExternalClusterName, + SourceTimeZone: m.SourceTimeZone, + OnDdl: m.OnDdl, + StopAfterCopy: m.StopAfterCopy, + DropForeignKeys: m.DropForeignKeys, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, + NoRoutingRules: m.NoRoutingRules, + AtomicCopy: m.AtomicCopy, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer + } + if rhs := m.IncludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeTables = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *EmergencyReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MoveTablesCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MoveTablesCreateResponse_TabletInfo) CloneVT() *MoveTablesCreateResponse_TabletInfo { if m == nil { - return 0, nil + return (*MoveTablesCreateResponse_TabletInfo)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MoveTablesCreateResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Created: m.Created, } - if m.PreventCrossCellPromotion { - i-- - if m.PreventCrossCellPromotion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + return r +} + +func (m *MoveTablesCreateResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MoveTablesCreateResponse) CloneVT() *MoveTablesCreateResponse { + if m == nil { + return (*MoveTablesCreateResponse)(nil) } - if len(m.IgnoreReplicas) > 0 { - for iNdEx := len(m.IgnoreReplicas) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.IgnoreReplicas[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } + r := &MoveTablesCreateResponse{ + Summary: m.Summary, } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*MoveTablesCreateResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + r.Details = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *EmergencyReparentShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MoveTablesCreateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *MoveTablesCompleteRequest) CloneVT() *MoveTablesCompleteRequest { if m == nil { - return nil, nil + return (*MoveTablesCompleteRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &MoveTablesCompleteRequest{ + Workflow: m.Workflow, + TargetKeyspace: m.TargetKeyspace, + KeepData: m.KeepData, + KeepRoutingRules: m.KeepRoutingRules, + RenameTables: m.RenameTables, + DryRun: m.DryRun, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *EmergencyReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *MoveTablesCompleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *EmergencyReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MoveTablesCompleteResponse) CloneVT() *MoveTablesCompleteResponse { if m == nil { - return 0, nil + return (*MoveTablesCompleteResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &MoveTablesCompleteResponse{ + Summary: m.Summary, } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } + if rhs := m.DryRunResults; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DryRunResults = tmpContainer } - if m.PromotedPrimary != nil { - size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + return r +} + +func (m *MoveTablesCompleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletRequest) CloneVT() *PingTabletRequest { + if m == nil { + return (*PingTabletRequest)(nil) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + r := &PingTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { +func (m *PingTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PingTabletResponse) CloneVT() *PingTabletResponse { if m == nil { - return nil, nil + return (*PingTabletResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &PingTabletResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *PingTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PlannedReparentShardRequest) CloneVT() *PlannedReparentShardRequest { if m == nil { - return 0, nil + return (*PlannedReparentShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &PlannedReparentShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + AvoidPrimary: m.AvoidPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), } - if m.UsePool { - i-- - if m.UsePool { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 + return r +} + +func (m *PlannedReparentShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PlannedReparentShardResponse) CloneVT() *PlannedReparentShardResponse { + if m == nil { + return (*PlannedReparentShardResponse)(nil) } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x12 + r := &PlannedReparentShardResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PromotedPrimary: m.PromotedPrimary.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.Events = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { +func (m *PlannedReparentShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildKeyspaceGraphRequest) CloneVT() *RebuildKeyspaceGraphRequest { if m == nil { - return nil, nil + return (*RebuildKeyspaceGraphRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RebuildKeyspaceGraphRequest{ + Keyspace: m.Keyspace, + AllowPartial: m.AllowPartial, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RebuildKeyspaceGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RebuildKeyspaceGraphResponse) CloneVT() *RebuildKeyspaceGraphResponse { if m == nil { - return 0, nil + return (*RebuildKeyspaceGraphResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r := &RebuildKeyspaceGraphResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDBARequest) MarshalVT() (dAtA []byte, err error) { +func (m *RebuildKeyspaceGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RebuildVSchemaGraphRequest) CloneVT() *RebuildVSchemaGraphRequest { if m == nil { - return nil, nil + return (*RebuildVSchemaGraphRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RebuildVSchemaGraphRequest{} + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteFetchAsDBARequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RebuildVSchemaGraphRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDBARequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RebuildVSchemaGraphResponse) CloneVT() *RebuildVSchemaGraphResponse { if m == nil { - return 0, nil + return (*RebuildVSchemaGraphResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RebuildVSchemaGraphResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.ReloadSchema { - i-- - if m.ReloadSchema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + return r +} + +func (m *RebuildVSchemaGraphResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateRequest) CloneVT() *RefreshStateRequest { + if m == nil { + return (*RefreshStateRequest)(nil) } - if m.DisableBinlogs { - i-- - if m.DisableBinlogs { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + r := &RefreshStateRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.MaxRows != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxRows)) - i-- - dAtA[i] = 0x18 - } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) - i-- - dAtA[i] = 0x12 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteFetchAsDBAResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateResponse) CloneVT() *RefreshStateResponse { if m == nil { - return nil, nil + return (*RefreshStateResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteFetchAsDBAResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteFetchAsDBAResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RefreshStateByShardRequest) CloneVT() *RefreshStateByShardRequest { if m == nil { - return 0, nil + return (*RefreshStateByShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RefreshStateByShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RefreshStateByShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RefreshStateByShardResponse) CloneVT() *RefreshStateByShardResponse { if m == nil { - return nil, nil + return (*RefreshStateByShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RefreshStateByShardResponse{ + IsPartialRefresh: m.IsPartialRefresh, + PartialRefreshDetails: m.PartialRefreshDetails, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RefreshStateByShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaRequest) CloneVT() *ReloadSchemaRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*ReloadSchemaRequest)(nil) } - if m.TabletHookRequest != nil { - size, err := m.TabletHookRequest.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 + r := &ReloadSchemaRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaResponse) CloneVT() *ReloadSchemaResponse { if m == nil { - return nil, nil + return (*ReloadSchemaResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaKeyspaceRequest) CloneVT() *ReloadSchemaKeyspaceRequest { if m == nil { - return 0, nil + return (*ReloadSchemaKeyspaceRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaKeyspaceRequest{ + Keyspace: m.Keyspace, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, } - if m.HookResult != nil { - size, err := m.HookResult.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *FindAllShardsInKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaKeyspaceResponse) CloneVT() *ReloadSchemaKeyspaceResponse { if m == nil { - return nil, nil + return (*ReloadSchemaKeyspaceResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaKeyspaceResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *FindAllShardsInKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReloadSchemaShardRequest) CloneVT() *ReloadSchemaShardRequest { if m == nil { - return 0, nil + return (*ReloadSchemaShardRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReloadSchemaShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + WaitPosition: m.WaitPosition, + IncludePrimary: m.IncludePrimary, + Concurrency: m.Concurrency, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *FindAllShardsInKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReloadSchemaShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReloadSchemaShardResponse) CloneVT() *ReloadSchemaShardResponse { if m == nil { - return nil, nil + return (*ReloadSchemaShardResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReloadSchemaShardResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*logutil.Event, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *FindAllShardsInKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReloadSchemaShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveBackupRequest) CloneVT() *RemoveBackupRequest { if m == nil { - return 0, nil + return (*RemoveBackupRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RemoveBackupRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Name: m.Name, } - if len(m.Shards) > 0 { - for k := range m.Shards { - v := m.Shards[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetBackupsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveBackupResponse) CloneVT() *RemoveBackupResponse { if m == nil { - return nil, nil + return (*RemoveBackupResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveBackupResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetBackupsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetBackupsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveKeyspaceCellRequest) CloneVT() *RemoveKeyspaceCellRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.DetailedLimit != 0 { - i = encodeVarint(dAtA, i, uint64(m.DetailedLimit)) - i-- - dAtA[i] = 0x28 - } - if m.Detailed { - i-- - if m.Detailed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Limit != 0 { - i = encodeVarint(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x18 + return (*RemoveKeyspaceCellRequest)(nil) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + r := &RemoveKeyspaceCellRequest{ + Keyspace: m.Keyspace, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetBackupsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveKeyspaceCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveKeyspaceCellResponse) CloneVT() *RemoveKeyspaceCellResponse { if m == nil { - return nil, nil + return (*RemoveKeyspaceCellResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveKeyspaceCellResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetBackupsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveKeyspaceCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetBackupsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RemoveShardCellRequest) CloneVT() *RemoveShardCellRequest { if m == nil { - return 0, nil + return (*RemoveShardCellRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RemoveShardCellRequest{ + Keyspace: m.Keyspace, + ShardName: m.ShardName, + Cell: m.Cell, + Force: m.Force, + Recursive: m.Recursive, } - if len(m.Backups) > 0 { - for iNdEx := len(m.Backups) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Backups[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RemoveShardCellRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RemoveShardCellResponse) CloneVT() *RemoveShardCellResponse { if m == nil { - return nil, nil + return (*RemoveShardCellResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RemoveShardCellResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RemoveShardCellResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReparentTabletRequest) CloneVT() *ReparentTabletRequest { if m == nil { - return 0, nil + return (*ReparentTabletRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReparentTabletRequest{ + Tablet: m.Tablet.CloneVT(), } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReparentTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReparentTabletResponse) CloneVT() *ReparentTabletResponse { if m == nil { - return nil, nil + return (*ReparentTabletResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ReparentTabletResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Primary: m.Primary.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ReparentTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReshardCreateRequest) CloneVT() *ReshardCreateRequest { if m == nil { - return 0, nil + return (*ReshardCreateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ReshardCreateRequest{ + Workflow: m.Workflow, + Keyspace: m.Keyspace, + TabletSelectionPreference: m.TabletSelectionPreference, + SkipSchemaCopy: m.SkipSchemaCopy, + OnDdl: m.OnDdl, + StopAfterCopy: m.StopAfterCopy, + DeferSecondaryKeys: m.DeferSecondaryKeys, + AutoStart: m.AutoStart, } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.SourceShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.SourceShards = tmpContainer } - return len(dAtA) - i, nil + if rhs := m.TargetShards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.TargetShards = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReshardCreateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RestoreFromBackupRequest) CloneVT() *RestoreFromBackupRequest { if m == nil { - return nil, nil + return (*RestoreFromBackupRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RestoreFromBackupRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + BackupTime: m.BackupTime.CloneVT(), + RestoreToPos: m.RestoreToPos, + DryRun: m.DryRun, + RestoreToTimestamp: m.RestoreToTimestamp.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RestoreFromBackupRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RestoreFromBackupResponse) CloneVT() *RestoreFromBackupResponse { if m == nil { - return 0, nil + return (*RestoreFromBackupResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RestoreFromBackupResponse{ + TabletAlias: m.TabletAlias.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + Event: m.Event.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RestoreFromBackupResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RetrySchemaMigrationRequest) CloneVT() *RetrySchemaMigrationRequest { if m == nil { - return nil, nil + return (*RetrySchemaMigrationRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RetrySchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellInfoNamesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RetrySchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellInfoNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RetrySchemaMigrationResponse) CloneVT() *RetrySchemaMigrationResponse { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*RetrySchemaMigrationResponse)(nil) } - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa + r := &RetrySchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v } + r.RowsAffectedByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *RetrySchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { if m == nil { - return nil, nil + return (*RunHealthCheckRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &RunHealthCheckRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *RunHealthCheckRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellsAliasesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *RunHealthCheckResponse) CloneVT() *RunHealthCheckResponse { if m == nil { - return 0, nil + return (*RunHealthCheckResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &RunHealthCheckResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetCellsAliasesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *RunHealthCheckResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceDurabilityPolicyRequest) CloneVT() *SetKeyspaceDurabilityPolicyRequest { if m == nil { - return nil, nil + return (*SetKeyspaceDurabilityPolicyRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceDurabilityPolicyRequest{ + Keyspace: m.Keyspace, + DurabilityPolicy: m.DurabilityPolicy, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetCellsAliasesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceDurabilityPolicyRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetCellsAliasesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceDurabilityPolicyResponse) CloneVT() *SetKeyspaceDurabilityPolicyResponse { if m == nil { - return 0, nil + return (*SetKeyspaceDurabilityPolicyResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceDurabilityPolicyResponse{ + Keyspace: m.Keyspace.CloneVT(), } - if len(m.Aliases) > 0 { - for k := range m.Aliases { - v := m.Aliases[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceDurabilityPolicyResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceServedFromRequest) CloneVT() *SetKeyspaceServedFromRequest { if m == nil { - return nil, nil + return (*SetKeyspaceServedFromRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceServedFromRequest{ + Keyspace: m.Keyspace, + TabletType: m.TabletType, + Remove: m.Remove, + SourceKeyspace: m.SourceKeyspace, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceServedFromRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceServedFromResponse) CloneVT() *SetKeyspaceServedFromResponse { if m == nil { - return 0, nil + return (*SetKeyspaceServedFromResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceServedFromResponse{ + Keyspace: m.Keyspace.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetFullStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceServedFromResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetKeyspaceShardingInfoRequest) CloneVT() *SetKeyspaceShardingInfoRequest { if m == nil { - return nil, nil + return (*SetKeyspaceShardingInfoRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetKeyspaceShardingInfoRequest{ + Keyspace: m.Keyspace, + Force: m.Force, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetFullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetKeyspaceShardingInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetFullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetKeyspaceShardingInfoResponse) CloneVT() *SetKeyspaceShardingInfoResponse { if m == nil { - return 0, nil + return (*SetKeyspaceShardingInfoResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetKeyspaceShardingInfoResponse{ + Keyspace: m.Keyspace.CloneVT(), } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetKeyspaceShardingInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetShardIsPrimaryServingRequest) CloneVT() *SetShardIsPrimaryServingRequest { if m == nil { - return nil, nil + return (*SetShardIsPrimaryServingRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetShardIsPrimaryServingRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + IsServing: m.IsServing, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetShardIsPrimaryServingRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetShardIsPrimaryServingResponse) CloneVT() *SetShardIsPrimaryServingResponse { if m == nil { - return 0, nil + return (*SetShardIsPrimaryServingResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetShardIsPrimaryServingResponse{ + Shard: m.Shard.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SetShardIsPrimaryServingResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetShardTabletControlRequest) CloneVT() *SetShardTabletControlRequest { if m == nil { - return nil, nil + return (*SetShardTabletControlRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetShardTabletControlRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletType: m.TabletType, + DisableQueryService: m.DisableQueryService, + Remove: m.Remove, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.DeniedTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DeniedTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetShardTabletControlRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetShardTabletControlResponse) CloneVT() *SetShardTabletControlResponse { if m == nil { - return 0, nil + return (*SetShardTabletControlResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetShardTabletControlResponse{ + Shard: m.Shard.CloneVT(), } - if len(m.Keyspaces) > 0 { - for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Keyspaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SetShardTabletControlResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SetWritableRequest) CloneVT() *SetWritableRequest { if m == nil { - return nil, nil + return (*SetWritableRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SetWritableRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Writable: m.Writable, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SetWritableRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SetWritableResponse) CloneVT() *SetWritableResponse { if m == nil { - return 0, nil + return (*SetWritableResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SetWritableResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + return r +} + +func (m *SetWritableResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationAddRequest) CloneVT() *ShardReplicationAddRequest { + if m == nil { + return (*ShardReplicationAddRequest)(nil) } - return len(dAtA) - i, nil + r := &ShardReplicationAddRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationAddRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationAddResponse) CloneVT() *ShardReplicationAddResponse { if m == nil { - return nil, nil + return (*ShardReplicationAddResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationAddResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationAddResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationFixRequest) CloneVT() *ShardReplicationFixRequest { if m == nil { - return 0, nil + return (*ShardReplicationFixRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationFixRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Cell: m.Cell, } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationFixRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationFixResponse) CloneVT() *ShardReplicationFixResponse { if m == nil { - return nil, nil + return (*ShardReplicationFixResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationFixResponse{ + Error: m.Error.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationFixResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationPositionsRequest) CloneVT() *ShardReplicationPositionsRequest { if m == nil { - return 0, nil + return (*ShardReplicationPositionsRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationPositionsRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationPositionsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationPositionsResponse) CloneVT() *ShardReplicationPositionsResponse { if m == nil { - return nil, nil + return (*ShardReplicationPositionsResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationPositionsResponse{} + if rhs := m.ReplicationStatuses; rhs != nil { + tmpContainer := make(map[string]*replicationdata.Status, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ReplicationStatuses = tmpContainer } - return dAtA[:n], nil + if rhs := m.TabletMap; rhs != nil { + tmpContainer := make(map[string]*topodata.Tablet, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.TabletMap = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationPositionsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ShardReplicationRemoveRequest) CloneVT() *ShardReplicationRemoveRequest { if m == nil { - return 0, nil + return (*ShardReplicationRemoveRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ShardReplicationRemoveRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + TabletAlias: m.TabletAlias.CloneVT(), } - if m.Permissions != nil { - size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ShardReplicationRemoveRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ShardReplicationRemoveResponse) CloneVT() *ShardReplicationRemoveResponse { if m == nil { - return nil, nil + return (*ShardReplicationRemoveResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ShardReplicationRemoveResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ShardReplicationRemoveResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SleepTabletRequest) CloneVT() *SleepTabletRequest { if m == nil { - return 0, nil + return (*SleepTabletRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SleepTabletRequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Duration: m.Duration.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SleepTabletRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SleepTabletResponse) CloneVT() *SleepTabletResponse { if m == nil { - return nil, nil + return (*SleepTabletResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SleepTabletResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SleepTabletResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SourceShardAddRequest) CloneVT() *SourceShardAddRequest { if m == nil { - return 0, nil + return (*SourceShardAddRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SourceShardAddRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Uid: m.Uid, + SourceKeyspace: m.SourceKeyspace, + SourceShard: m.SourceShard, + KeyRange: m.KeyRange.CloneVT(), } - if m.RoutingRules != nil { - size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if rhs := m.Tables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Tables = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *SourceShardAddRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SourceShardAddResponse) CloneVT() *SourceShardAddResponse { if m == nil { - return nil, nil + return (*SourceShardAddResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &SourceShardAddResponse{ + Shard: m.Shard.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *SourceShardAddResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SourceShardDeleteRequest) CloneVT() *SourceShardDeleteRequest { if m == nil { - return 0, nil + return (*SourceShardDeleteRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &SourceShardDeleteRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + Uid: m.Uid, } - if m.TableSchemaOnly { - i-- - if m.TableSchemaOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.TableSizesOnly { - i-- - if m.TableSizesOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + return r +} + +func (m *SourceShardDeleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *SourceShardDeleteResponse) CloneVT() *SourceShardDeleteResponse { + if m == nil { + return (*SourceShardDeleteResponse)(nil) } - if m.TableNamesOnly { - i-- - if m.TableNamesOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + r := &SourceShardDeleteResponse{ + Shard: m.Shard.CloneVT(), } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + return r +} + +func (m *SourceShardDeleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationRequest) CloneVT() *StartReplicationRequest { + if m == nil { + return (*StartReplicationRequest)(nil) } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + r := &StartReplicationRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *StartReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StartReplicationResponse) CloneVT() *StartReplicationResponse { if m == nil { - return nil, nil + return (*StartReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StartReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StartReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *StopReplicationRequest) CloneVT() *StopReplicationRequest { if m == nil { - return 0, nil + return (*StopReplicationRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &StopReplicationRequest{ + TabletAlias: m.TabletAlias.CloneVT(), } - if m.Schema != nil { - size, err := m.Schema.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *StopReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StopReplicationResponse) CloneVT() *StopReplicationResponse { if m == nil { - return nil, nil + return (*StopReplicationResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &StopReplicationResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return dAtA[:n], nil + return r } -func (m *GetShardRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *StopReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TabletExternallyReparentedRequest) CloneVT() *TabletExternallyReparentedRequest { if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + return (*TabletExternallyReparentedRequest)(nil) } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) - i-- - dAtA[i] = 0x12 + r := &TabletExternallyReparentedRequest{ + Tablet: m.Tablet.CloneVT(), } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *TabletExternallyReparentedRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TabletExternallyReparentedResponse) CloneVT() *TabletExternallyReparentedResponse { if m == nil { - return nil, nil + return (*TabletExternallyReparentedResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &TabletExternallyReparentedResponse{ + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + OldPrimary: m.OldPrimary.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *TabletExternallyReparentedResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateCellInfoRequest) CloneVT() *UpdateCellInfoRequest { if m == nil { - return 0, nil + return (*UpdateCellInfoRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateCellInfoRequest{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateCellInfoRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCellInfoResponse) CloneVT() *UpdateCellInfoResponse { if m == nil { - return nil, nil + return (*UpdateCellInfoResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateCellInfoResponse{ + Name: m.Name, + CellInfo: m.CellInfo.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateCellInfoResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateCellsAliasRequest) CloneVT() *UpdateCellsAliasRequest { if m == nil { - return 0, nil + return (*UpdateCellsAliasRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &UpdateCellsAliasRequest{ + Name: m.Name, + CellsAlias: m.CellsAlias.CloneVT(), } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateCellsAliasRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateCellsAliasResponse) CloneVT() *UpdateCellsAliasResponse { if m == nil { - return nil, nil + return (*UpdateCellsAliasResponse)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &UpdateCellsAliasResponse{ + Name: m.Name, + CellsAlias: m.CellsAlias.CloneVT(), } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *UpdateCellsAliasResponse) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateRequest) CloneVT() *ValidateRequest { if m == nil { - return 0, nil + return (*ValidateRequest)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateRequest{ + PingTablets: m.PingTablets, } - if m.ShardRoutingRules != nil { - size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateResponse) CloneVT() *ValidateResponse { + if m == nil { + return (*ValidateResponse)(nil) + } + r := &ValidateResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer + } + if rhs := m.ResultsByKeyspace; rhs != nil { + tmpContainer := make(map[string]*ValidateKeyspaceResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + r.ResultsByKeyspace = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateKeyspaceRequest) CloneVT() *ValidateKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateKeyspaceRequest{ + Keyspace: m.Keyspace, + PingTablets: m.PingTablets, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateKeyspaceResponse) CloneVT() *ValidateKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0xa + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateSchemaKeyspaceRequest) CloneVT() *ValidateSchemaKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateSchemaKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateSchemaKeyspaceRequest{ + Keyspace: m.Keyspace, + IncludeViews: m.IncludeViews, + SkipNoPrimary: m.SkipNoPrimary, + IncludeVschema: m.IncludeVschema, } - return dAtA[:n], nil + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateSchemaKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateSchemaKeyspaceResponse) CloneVT() *ValidateSchemaKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateSchemaKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateSchemaKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - return len(dAtA) - i, nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateSchemaKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateShardRequest) CloneVT() *ValidateShardRequest { if m == nil { - return nil, nil + return (*ValidateShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + PingTablets: m.PingTablets, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspaceNamesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspaceNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateShardResponse) CloneVT() *ValidateShardResponse { if m == nil { - return 0, nil + return (*ValidateShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateShardResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.Names) > 0 { - for k := range m.Names { - v := m.Names[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionKeyspaceRequest) CloneVT() *ValidateVersionKeyspaceRequest { if m == nil { - return nil, nil + return (*ValidateVersionKeyspaceRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateVersionKeyspaceRequest{ + Keyspace: m.Keyspace, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateVersionKeyspaceRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateVersionKeyspaceResponse) CloneVT() *ValidateVersionKeyspaceResponse { if m == nil { - return 0, nil + return (*ValidateVersionKeyspaceResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateVersionKeyspaceResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } + r.ResultsByShard = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *GetSrvKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateVersionKeyspaceResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVersionShardRequest) CloneVT() *ValidateVersionShardRequest { if m == nil { - return nil, nil + return (*ValidateVersionShardRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateVersionShardRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, } - return dAtA[:n], nil + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateVersionShardRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateVersionShardResponse) CloneVT() *ValidateVersionShardResponse { if m == nil { - return 0, nil + return (*ValidateVersionShardResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateVersionShardResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if len(m.SrvKeyspaces) > 0 { - for k := range m.SrvKeyspaces { - v := m.SrvKeyspaces[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UpdateThrottlerConfigRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ValidateVersionShardResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ValidateVSchemaRequest) CloneVT() *ValidateVSchemaRequest { if m == nil { - return nil, nil + return (*ValidateVSchemaRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &ValidateVSchemaRequest{ + Keyspace: m.Keyspace, + IncludeViews: m.IncludeViews, } - return dAtA[:n], nil + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } + if rhs := m.ExcludeTables; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeTables = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UpdateThrottlerConfigRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *ValidateVSchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UpdateThrottlerConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ValidateVSchemaResponse) CloneVT() *ValidateVSchemaResponse { if m == nil { - return 0, nil + return (*ValidateVSchemaResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &ValidateVSchemaResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Results = tmpContainer } - if m.CheckAsCheckShard { - i-- - if m.CheckAsCheckShard { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.ResultsByShard; rhs != nil { + tmpContainer := make(map[string]*ValidateShardResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x40 + r.ResultsByShard = tmpContainer } - if m.CheckAsCheckSelf { - i-- - if m.CheckAsCheckSelf { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ValidateVSchemaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowDeleteRequest) CloneVT() *WorkflowDeleteRequest { + if m == nil { + return (*WorkflowDeleteRequest)(nil) + } + r := &WorkflowDeleteRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + KeepData: m.KeepData, + KeepRoutingRules: m.KeepRoutingRules, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowDeleteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowDeleteResponse_TabletInfo) CloneVT() *WorkflowDeleteResponse_TabletInfo { + if m == nil { + return (*WorkflowDeleteResponse_TabletInfo)(nil) + } + r := &WorkflowDeleteResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Deleted: m.Deleted, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowDeleteResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowDeleteResponse) CloneVT() *WorkflowDeleteResponse { + if m == nil { + return (*WorkflowDeleteResponse)(nil) + } + r := &WorkflowDeleteResponse{ + Summary: m.Summary, + } + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*WorkflowDeleteResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x38 + r.Details = tmpContainer } - if m.CustomQuerySet { - i-- - if m.CustomQuerySet { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowDeleteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusRequest) CloneVT() *WorkflowStatusRequest { + if m == nil { + return (*WorkflowStatusRequest)(nil) + } + r := &WorkflowStatusRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowStatusRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse_TableCopyState) CloneVT() *WorkflowStatusResponse_TableCopyState { + if m == nil { + return (*WorkflowStatusResponse_TableCopyState)(nil) + } + r := &WorkflowStatusResponse_TableCopyState{ + RowsCopied: m.RowsCopied, + RowsTotal: m.RowsTotal, + RowsPercentage: m.RowsPercentage, + BytesCopied: m.BytesCopied, + BytesTotal: m.BytesTotal, + BytesPercentage: m.BytesPercentage, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowStatusResponse_TableCopyState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse_ShardStreamState) CloneVT() *WorkflowStatusResponse_ShardStreamState { + if m == nil { + return (*WorkflowStatusResponse_ShardStreamState)(nil) + } + r := &WorkflowStatusResponse_ShardStreamState{ + Id: m.Id, + Tablet: m.Tablet.CloneVT(), + SourceShard: m.SourceShard, + Position: m.Position, + Status: m.Status, + Info: m.Info, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowStatusResponse_ShardStreamState) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse_ShardStreams) CloneVT() *WorkflowStatusResponse_ShardStreams { + if m == nil { + return (*WorkflowStatusResponse_ShardStreams)(nil) + } + r := &WorkflowStatusResponse_ShardStreams{} + if rhs := m.Streams; rhs != nil { + tmpContainer := make([]*WorkflowStatusResponse_ShardStreamState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x30 + r.Streams = tmpContainer } - if len(m.CustomQuery) > 0 { - i -= len(m.CustomQuery) - copy(dAtA[i:], m.CustomQuery) - i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) - i-- - dAtA[i] = 0x2a + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - if m.Threshold != 0 { - i -= 8 - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) - i-- - dAtA[i] = 0x21 + return r +} + +func (m *WorkflowStatusResponse_ShardStreams) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowStatusResponse) CloneVT() *WorkflowStatusResponse { + if m == nil { + return (*WorkflowStatusResponse)(nil) } - if m.Disable { - i-- - if m.Disable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + r := &WorkflowStatusResponse{} + if rhs := m.TableCopyState; rhs != nil { + tmpContainer := make(map[string]*WorkflowStatusResponse_TableCopyState, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x18 + r.TableCopyState = tmpContainer } - if m.Enable { - i-- - if m.Enable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if rhs := m.ShardStreams; rhs != nil { + tmpContainer := make(map[string]*WorkflowStatusResponse_ShardStreams, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() } - i-- - dAtA[i] = 0x10 + r.ShardStreams = tmpContainer } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) } - return len(dAtA) - i, nil + return r } -func (m *UpdateThrottlerConfigResponse) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowStatusResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowSwitchTrafficRequest) CloneVT() *WorkflowSwitchTrafficRequest { if m == nil { - return nil, nil + return (*WorkflowSwitchTrafficRequest)(nil) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + r := &WorkflowSwitchTrafficRequest{ + Keyspace: m.Keyspace, + Workflow: m.Workflow, + MaxReplicationLagAllowed: m.MaxReplicationLagAllowed.CloneVT(), + EnableReverseReplication: m.EnableReverseReplication, + Direction: m.Direction, + Timeout: m.Timeout.CloneVT(), + DryRun: m.DryRun, + InitializeTargetSequences: m.InitializeTargetSequences, } - return dAtA[:n], nil + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *UpdateThrottlerConfigResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) +func (m *WorkflowSwitchTrafficRequest) CloneMessageVT() proto.Message { + return m.CloneVT() } -func (m *UpdateThrottlerConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *WorkflowSwitchTrafficResponse) CloneVT() *WorkflowSwitchTrafficResponse { if m == nil { - return 0, nil + return (*WorkflowSwitchTrafficResponse)(nil) } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + r := &WorkflowSwitchTrafficResponse{ + Summary: m.Summary, + StartState: m.StartState, + CurrentState: m.CurrentState, } - return len(dAtA) - i, nil + if rhs := m.DryRunResults; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.DryRunResults = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r } -func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *WorkflowSwitchTrafficResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateRequest) CloneVT() *WorkflowUpdateRequest { + if m == nil { + return (*WorkflowUpdateRequest)(nil) + } + r := &WorkflowUpdateRequest{ + Keyspace: m.Keyspace, + TabletRequest: m.TabletRequest.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateResponse_TabletInfo) CloneVT() *WorkflowUpdateResponse_TabletInfo { + if m == nil { + return (*WorkflowUpdateResponse_TabletInfo)(nil) + } + r := &WorkflowUpdateResponse_TabletInfo{ + Tablet: m.Tablet.CloneVT(), + Changed: m.Changed, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateResponse_TabletInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *WorkflowUpdateResponse) CloneVT() *WorkflowUpdateResponse { + if m == nil { + return (*WorkflowUpdateResponse)(nil) + } + r := &WorkflowUpdateResponse{ + Summary: m.Summary, + } + if rhs := m.Details; rhs != nil { + tmpContainer := make([]*WorkflowUpdateResponse_TabletInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Details = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowUpdateResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteVtctlCommandRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4725,12 +4709,12 @@ func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4742,17 +4726,24 @@ func (m *GetSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + if m.ActionTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.ActionTimeout)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x10 + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *GetSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteVtctlCommandResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4765,12 +4756,12 @@ func (m *GetSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteVtctlCommandResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4782,8 +4773,8 @@ func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.SrvVSchema != nil { - size, err := m.SrvVSchema.MarshalToSizedBufferVT(dAtA[:i]) + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -4795,7 +4786,7 @@ func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *GetSrvVSchemasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *TableMaterializeSettings) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4808,12 +4799,12 @@ func (m *GetSrvVSchemasRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetSrvVSchemasRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *TableMaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvVSchemasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *TableMaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4825,19 +4816,31 @@ func (m *GetSrvVSchemasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if len(m.CreateDdl) > 0 { + i -= len(m.CreateDdl) + copy(dAtA[i:], m.CreateDdl) + i = encodeVarint(dAtA, i, uint64(len(m.CreateDdl))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceExpression) > 0 { + i -= len(m.SourceExpression) + copy(dAtA[i:], m.SourceExpression) + i = encodeVarint(dAtA, i, uint64(len(m.SourceExpression))) + i-- + dAtA[i] = 0x12 + } + if len(m.TargetTable) > 0 { + i -= len(m.TargetTable) + copy(dAtA[i:], m.TargetTable) + i = encodeVarint(dAtA, i, uint64(len(m.TargetTable))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetSrvVSchemasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *MaterializeSettings) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4850,12 +4853,12 @@ func (m *GetSrvVSchemasResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetSrvVSchemasResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetSrvVSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4867,32 +4870,136 @@ func (m *GetSrvVSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.SrvVSchemas) > 0 { - for k := range m.SrvVSchemas { - v := m.SrvVSchemas[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if m.AtomicCopy { + i-- + if m.AtomicCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x78 + } + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x6a + } + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.TargetTimeZone) > 0 { + i -= len(m.TargetTimeZone) + copy(dAtA[i:], m.TargetTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.TargetTimeZone))) + i-- + dAtA[i] = 0x5a + } + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x52 + } + if m.MaterializationIntent != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaterializationIntent)) + i-- + dAtA[i] = 0x48 + } + if len(m.ExternalCluster) > 0 { + i -= len(m.ExternalCluster) + copy(dAtA[i:], m.ExternalCluster) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalCluster))) + i-- + dAtA[i] = 0x42 + } + if len(m.TabletTypes) > 0 { + i -= len(m.TabletTypes) + copy(dAtA[i:], m.TabletTypes) + i = encodeVarint(dAtA, i, uint64(len(m.TabletTypes))) + i-- + dAtA[i] = 0x3a + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x32 + } + if len(m.TableSettings) > 0 { + for iNdEx := len(m.TableSettings) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TableSettings[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + dAtA[i] = 0x2a + } + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 + } + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Keyspace) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4905,12 +5012,12 @@ func (m *GetTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Keyspace) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4922,20 +5029,27 @@ func (m *GetTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *SchemaMigration) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4948,12 +5062,12 @@ func (m *GetTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *SchemaMigration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4965,103 +5079,495 @@ func (m *GetTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if m.ReadyToCompleteAt != nil { + size, err := m.ReadyToCompleteAt.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xaa } - return len(dAtA) - i, nil -} - -func (m *GetTabletsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + if m.ReviewedAt != nil { + size, err := m.ReviewedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0xa2 } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if m.IsImmediateOperation { + i-- + if m.IsImmediateOperation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x98 } - return dAtA[:n], nil -} - -func (m *GetTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + if m.CutoverAttempts != 0 { + i = encodeVarint(dAtA, i, uint64(m.CutoverAttempts)) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x90 } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if len(m.Stage) > 0 { + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarint(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x8a } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) + if m.PostponeLaunch { i-- - dAtA[i] = 0x30 + if m.PostponeLaunch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3 + i-- + dAtA[i] = 0x80 } - if len(m.TabletAliases) > 0 { - for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a + if m.CancelledAt != nil { + size, err := m.CancelledAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xfa + } + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xf2 + } + if m.LastThrottledAt != nil { + size, err := m.LastThrottledAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xea } - if m.Strict { + if len(m.SpecialPlan) > 0 { + i -= len(m.SpecialPlan) + copy(dAtA[i:], m.SpecialPlan) + i = encodeVarint(dAtA, i, uint64(len(m.SpecialPlan))) i-- - if m.Strict { + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xe2 + } + if m.UserThrottleRatio != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.UserThrottleRatio)))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xdd + } + if m.VitessLivenessIndicator != 0 { + i = encodeVarint(dAtA, i, uint64(m.VitessLivenessIndicator)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xd0 + } + if m.ReadyToComplete { + i-- + if m.ReadyToComplete { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc8 } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a + if m.IsView { + i-- + if m.IsView { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xc0 + } + if len(m.RevertedUuid) > 0 { + i -= len(m.RevertedUuid) + copy(dAtA[i:], m.RevertedUuid) + i = encodeVarint(dAtA, i, uint64(len(m.RevertedUuid))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xba + } + if m.AllowConcurrent { + i-- + if m.AllowConcurrent { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xb0 + } + if len(m.RevertibleNotes) > 0 { + i -= len(m.RevertibleNotes) + copy(dAtA[i:], m.RevertibleNotes) + i = encodeVarint(dAtA, i, uint64(len(m.RevertibleNotes))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xaa + } + if len(m.ExpandedColumnNames) > 0 { + i -= len(m.ExpandedColumnNames) + copy(dAtA[i:], m.ExpandedColumnNames) + i = encodeVarint(dAtA, i, uint64(len(m.ExpandedColumnNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0xa2 + } + if len(m.DroppedNoDefaultColumnNames) > 0 { + i -= len(m.DroppedNoDefaultColumnNames) + copy(dAtA[i:], m.DroppedNoDefaultColumnNames) + i = encodeVarint(dAtA, i, uint64(len(m.DroppedNoDefaultColumnNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x9a + } + if len(m.RemovedUniqueKeyNames) > 0 { + i -= len(m.RemovedUniqueKeyNames) + copy(dAtA[i:], m.RemovedUniqueKeyNames) + i = encodeVarint(dAtA, i, uint64(len(m.RemovedUniqueKeyNames))) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + if m.PostponeCompletion { + i-- + if m.PostponeCompletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x88 + } + if m.ArtifactRetention != nil { + size, err := m.ArtifactRetention.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + if len(m.LogFile) > 0 { + i -= len(m.LogFile) + copy(dAtA[i:], m.LogFile) + i = encodeVarint(dAtA, i, uint64(len(m.LogFile))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.RemovedUniqueKeys != 0 { + i = encodeVarint(dAtA, i, uint64(m.RemovedUniqueKeys)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.AddedUniqueKeys != 0 { + i = encodeVarint(dAtA, i, uint64(m.AddedUniqueKeys)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe8 + } + if m.TableRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.TableRows)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.EtaSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.EtaSeconds)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.DdlAction) > 0 { + i -= len(m.DdlAction) + copy(dAtA[i:], m.DdlAction) + i = encodeVarint(dAtA, i, uint64(len(m.DdlAction))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.Progress != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Progress)))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb5 + } + if m.TabletFailure { + i-- + if m.TabletFailure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.Retries != 0 { + i = encodeVarint(dAtA, i, uint64(m.Retries)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if len(m.Artifacts) > 0 { + i -= len(m.Artifacts) + copy(dAtA[i:], m.Artifacts) + i = encodeVarint(dAtA, i, uint64(len(m.Artifacts))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = encodeVarint(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.CleanedUpAt != nil { + size, err := m.CleanedUpAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + if m.CompletedAt != nil { + size, err := m.CompletedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x72 + } + if m.LivenessTimestamp != nil { + size, err := m.LivenessTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.StartedAt != nil { + size, err := m.StartedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.ReadyAt != nil { + size, err := m.ReadyAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + if m.RequestedAt != nil { + size, err := m.RequestedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + if m.AddedAt != nil { + size, err := m.AddedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.Options) > 0 { + i -= len(m.Options) + copy(dAtA[i:], m.Options) + i = encodeVarint(dAtA, i, uint64(len(m.Options))) + i-- + dAtA[i] = 0x42 + } + if m.Strategy != 0 { + i = encodeVarint(dAtA, i, uint64(m.Strategy)) + i-- + dAtA[i] = 0x38 + } + if len(m.MigrationStatement) > 0 { + i -= len(m.MigrationStatement) + copy(dAtA[i:], m.MigrationStatement) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationStatement))) + i-- + dAtA[i] = 0x32 + } + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarint(dAtA, i, uint64(len(m.Table))) + i-- + dAtA[i] = 0x2a + } + if len(m.Schema) > 0 { + i -= len(m.Schema) + copy(dAtA[i:], m.Schema) + i = encodeVarint(dAtA, i, uint64(len(m.Schema))) + i-- + dAtA[i] = 0x22 } if len(m.Shard) > 0 { i -= len(m.Shard) copy(dAtA[i:], m.Shard) i = encodeVarint(dAtA, i, uint64(len(m.Shard))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) copy(dAtA[i:], m.Keyspace) i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- + dAtA[i] = 0x12 + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTabletsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Shard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5074,12 +5580,12 @@ func (m *GetTabletsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Shard) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5091,22 +5597,34 @@ func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Tablets) > 0 { - for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Tablets[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_ReplicationLocation) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5119,12 +5637,12 @@ func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_ReplicationLocation) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ReplicationLocation) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5136,17 +5654,26 @@ func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarint(dAtA, i, uint64(len(m.Path))) + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_ShardStream) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5159,12 +5686,12 @@ func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_ShardStream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5176,20 +5703,44 @@ func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Cell != nil { - size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.IsPrimaryServing { + i-- + if m.IsPrimaryServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x18 + } + if len(m.TabletControls) > 0 { + for iNdEx := len(m.TabletControls) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletControls[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_CopyState) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5202,12 +5753,12 @@ func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_CopyState) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_CopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5219,40 +5770,24 @@ func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Children) > 0 { - for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Children[iNdEx]) - copy(dAtA[i:], m.Children[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarint(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarint(dAtA, i, uint64(len(m.Path))) + if len(m.LastPk) > 0 { + i -= len(m.LastPk) + copy(dAtA[i:], m.LastPk) + i = encodeVarint(dAtA, i, uint64(len(m.LastPk))) i-- dAtA[i] = 0x12 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.Table) > 0 { + i -= len(m.Table) + copy(dAtA[i:], m.Table) + i = encodeVarint(dAtA, i, uint64(len(m.Table))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream_Log) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5265,12 +5800,12 @@ func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_Log) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream_Log) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5282,17 +5817,66 @@ func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.Count != 0 { + i = encodeVarint(dAtA, i, uint64(m.Count)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x40 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x3a + } + if m.UpdatedAt != nil { + size, err := m.UpdatedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.CreatedAt != nil { + size, err := m.CreatedAt.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarint(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarint(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + } + if m.StreamId != 0 { + i = encodeVarint(dAtA, i, uint64(m.StreamId)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow_Stream) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5305,12 +5889,12 @@ func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5322,20 +5906,137 @@ func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tags[iNdEx]) + copy(dAtA[i:], m.Tags[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tags[iNdEx]))) + i-- + dAtA[i] = 0x7a + } + } + if len(m.LogFetchError) > 0 { + i -= len(m.LogFetchError) + copy(dAtA[i:], m.LogFetchError) + i = encodeVarint(dAtA, i, uint64(len(m.LogFetchError))) + i-- + dAtA[i] = 0x72 + } + if len(m.Logs) > 0 { + for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Logs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.CopyStates) > 0 { + for iNdEx := len(m.CopyStates) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.CopyStates[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x5a + } + if m.TimeUpdated != nil { + size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x52 + } + if m.TransactionTimestamp != nil { + size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x42 + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarint(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x3a + } + if len(m.StopPosition) > 0 { + i -= len(m.StopPosition) + copy(dAtA[i:], m.StopPosition) + i = encodeVarint(dAtA, i, uint64(len(m.StopPosition))) + i-- + dAtA[i] = 0x32 + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x2a + } + if m.BinlogSource != nil { + size, err := m.BinlogSource.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { +func (m *Workflow) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5348,12 +6049,12 @@ func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *Workflow) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5365,17 +6066,78 @@ func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarint(dAtA, i, uint64(len(m.Version))) + if len(m.WorkflowSubType) > 0 { + i -= len(m.WorkflowSubType) + copy(dAtA[i:], m.WorkflowSubType) + i = encodeVarint(dAtA, i, uint64(len(m.WorkflowSubType))) + i-- + dAtA[i] = 0x3a + } + if len(m.WorkflowType) > 0 { + i -= len(m.WorkflowType) + copy(dAtA[i:], m.WorkflowType) + i = encodeVarint(dAtA, i, uint64(len(m.WorkflowType))) + i-- + dAtA[i] = 0x32 + } + if len(m.ShardStreams) > 0 { + for k := range m.ShardStreams { + v := m.ShardStreams[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.MaxVReplicationLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxVReplicationLag)) + i-- + dAtA[i] = 0x20 + } + if m.Target != nil { + size, err := m.Target.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Source != nil { + size, err := m.Source.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5388,12 +6150,12 @@ func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5405,20 +6167,27 @@ func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.VSchema != nil { - size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5431,12 +6200,12 @@ func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5448,27 +6217,59 @@ func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.ActiveOnly { - i-- - if m.ActiveOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return len(dAtA) - i, nil +} + +func (m *AddCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AddCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AddCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i-- - dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *AddCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5481,12 +6282,12 @@ func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *AddCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5498,22 +6299,10 @@ func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Workflows) > 0 { - for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5526,12 +6315,12 @@ func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5543,54 +6332,39 @@ func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.RebuildCells) > 0 { + for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RebuildCells[iNdEx]) + copy(dAtA[i:], m.RebuildCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) + i-- + dAtA[i] = 0x1a } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x2a } - if m.Force { + if m.SkipRebuild { i-- - if m.Force { + if m.SkipRebuild { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x10 } - if m.PrimaryElectTabletAlias != nil { - size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.RoutingRules != nil { + size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5603,12 +6377,12 @@ func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5620,22 +6394,10 @@ func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5648,12 +6410,12 @@ func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5665,8 +6427,27 @@ func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.RebuildCells) > 0 { + for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RebuildCells[iNdEx]) + copy(dAtA[i:], m.RebuildCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.ShardRoutingRules != nil { + size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -5678,7 +6459,7 @@ func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5691,12 +6472,12 @@ func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5711,7 +6492,7 @@ func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5724,12 +6505,12 @@ func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5741,42 +6522,72 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.WaitReplicasTimeout != nil { - size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if m.BatchSize != 0 { + i = encodeVarint(dAtA, i, uint64(m.BatchSize)) + i-- + dAtA[i] = 0x50 + } + if m.CallerId != nil { + size, err := m.CallerId.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x4a } - if m.AvoidPrimary != nil { - size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.SkipPreflight { + i-- + if m.SkipPreflight { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x40 } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x3a } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x32 + } + if len(m.UuidList) > 0 { + for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UuidList[iNdEx]) + copy(dAtA[i:], m.UuidList[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.DdlStrategy) > 0 { + i -= len(m.DdlStrategy) + copy(dAtA[i:], m.DdlStrategy) + i = encodeVarint(dAtA, i, uint64(len(m.DdlStrategy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Sql) > 0 { + for iNdEx := len(m.Sql) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Sql[iNdEx]) + copy(dAtA[i:], m.Sql[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Sql[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -5788,7 +6599,7 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5801,12 +6612,12 @@ func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PlannedReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5818,46 +6629,36 @@ func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.PromotedPrimary != nil { - size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.UuidList) > 0 { + for iNdEx := len(m.UuidList) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UuidList[iNdEx]) + copy(dAtA[i:], m.UuidList[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.UuidList[iNdEx]))) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5870,12 +6671,12 @@ func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildKeyspaceGraphRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5887,15 +6688,22 @@ func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.AllowPartial { + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) i-- - if m.AllowPartial { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x32 + } + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x18 + dAtA[i] = 0x2a } if len(m.Cells) > 0 { for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { @@ -5903,8 +6711,28 @@ func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, copy(dAtA[i:], m.Cells[iNdEx]) i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 + } + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -5916,7 +6744,7 @@ func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5929,12 +6757,12 @@ func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildKeyspaceGraphResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5946,10 +6774,20 @@ func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { +func (m *BackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -5962,12 +6800,12 @@ func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RebuildVSchemaGraphRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -5979,52 +6817,52 @@ func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.UpgradeSafe { + i-- + if m.UpgradeSafe { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x28 } - return len(dAtA) - i, nil -} - -func (m *RebuildVSchemaGraphResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) + i-- + dAtA[i] = 0x22 } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x18 } - return dAtA[:n], nil -} - -func (m *RebuildVSchemaGraphResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RebuildVSchemaGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil + if m.AllowPrimary { + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *BackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6037,12 +6875,12 @@ func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6054,6 +6892,30 @@ func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } if m.TabletAlias != nil { size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -6067,7 +6929,7 @@ func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *BackupShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6080,12 +6942,12 @@ func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *BackupShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *BackupShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6097,10 +6959,56 @@ func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.IncrementalFromPos) > 0 { + i -= len(m.IncrementalFromPos) + copy(dAtA[i:], m.IncrementalFromPos) + i = encodeVarint(dAtA, i, uint64(len(m.IncrementalFromPos))) + i-- + dAtA[i] = 0x32 + } + if m.UpgradeSafe { + i-- + if m.UpgradeSafe { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x20 + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CancelSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6113,12 +7021,12 @@ func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateByShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6130,19 +7038,10 @@ func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) i-- dAtA[i] = 0x12 } @@ -6156,7 +7055,7 @@ func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CancelSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6169,12 +7068,12 @@ func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RefreshStateByShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6186,27 +7085,27 @@ func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.PartialRefreshDetails) > 0 { - i -= len(m.PartialRefreshDetails) - copy(dAtA[i:], m.PartialRefreshDetails) - i = encodeVarint(dAtA, i, uint64(len(m.PartialRefreshDetails))) - i-- - dAtA[i] = 0x12 - } - if m.IsPartialRefresh { - i-- - if m.IsPartialRefresh { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTabletTypeRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6219,12 +7118,12 @@ func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6236,6 +7135,21 @@ func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.DbType != 0 { + i = encodeVarint(dAtA, i, uint64(m.DbType)) + i-- + dAtA[i] = 0x10 + } if m.TabletAlias != nil { size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -6249,7 +7163,7 @@ func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ChangeTabletTypeResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6262,12 +7176,12 @@ func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ChangeTabletTypeResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6279,10 +7193,40 @@ func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.WasDryRun { + i-- + if m.WasDryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AfterTablet != nil { + size, err := m.AfterTablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.BeforeTablet != nil { + size, err := m.BeforeTablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6295,12 +7239,12 @@ func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6312,25 +7256,10 @@ func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x20 - } - if m.IncludePrimary { - i-- - if m.IncludePrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) i-- dAtA[i] = 0x12 } @@ -6344,7 +7273,7 @@ func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6357,12 +7286,12 @@ func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6374,22 +7303,27 @@ func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa - } - } + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CompleteSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6402,12 +7336,12 @@ func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6419,32 +7353,10 @@ func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Concurrency != 0 { - i = encodeVarint(dAtA, i, uint64(m.Concurrency)) - i-- - dAtA[i] = 0x28 - } - if m.IncludePrimary { - i-- - if m.IncludePrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.WaitPosition) > 0 { - i -= len(m.WaitPosition) - copy(dAtA[i:], m.WaitPosition) - i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) i-- dAtA[i] = 0x12 } @@ -6458,7 +7370,7 @@ func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CompleteSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6471,12 +7383,12 @@ func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReloadSchemaShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6488,22 +7400,27 @@ func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6516,12 +7433,12 @@ func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6533,31 +7450,85 @@ func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.SidecarDbName) > 0 { + i -= len(m.SidecarDbName) + copy(dAtA[i:], m.SidecarDbName) + i = encodeVarint(dAtA, i, uint64(len(m.SidecarDbName))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x5a } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.DurabilityPolicy) > 0 { + i -= len(m.DurabilityPolicy) + copy(dAtA[i:], m.DurabilityPolicy) + i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x52 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.SnapshotTime != nil { + size, err := m.SnapshotTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if len(m.BaseKeyspace) > 0 { + i -= len(m.BaseKeyspace) + copy(dAtA[i:], m.BaseKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.BaseKeyspace))) + i-- + dAtA[i] = 0x42 + } + if m.Type != 0 { + i = encodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x38 + } + if len(m.ServedFroms) > 0 { + for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.AllowEmptyVSchema { + i-- + if m.AllowEmptyVSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6570,12 +7541,12 @@ func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6587,10 +7558,20 @@ func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6603,12 +7584,12 @@ func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveKeyspaceCellRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6620,9 +7601,9 @@ func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Recursive { + if m.IncludeParent { i-- - if m.Recursive { + if m.IncludeParent { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -6640,10 +7621,10 @@ func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i-- dAtA[i] = 0x18 } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) i-- dAtA[i] = 0x12 } @@ -6657,7 +7638,7 @@ func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6670,12 +7651,12 @@ func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveKeyspaceCellResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6687,10 +7668,40 @@ func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ShardAlreadyExists { + i-- + if m.ShardAlreadyExists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6703,12 +7714,12 @@ func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveShardCellRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6720,16 +7731,6 @@ func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Recursive { - i-- - if m.Recursive { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } if m.Force { i-- if m.Force { @@ -6738,33 +7739,19 @@ func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error dAtA[i] = 0 } i-- - dAtA[i] = 0x20 - } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) - i-- - dAtA[i] = 0x1a - } - if len(m.ShardName) > 0 { - i -= len(m.ShardName) - copy(dAtA[i:], m.ShardName) - i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) - i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6777,12 +7764,12 @@ func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RemoveShardCellResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6797,7 +7784,7 @@ func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6810,12 +7797,12 @@ func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReparentTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6827,20 +7814,17 @@ func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6853,12 +7837,12 @@ func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReparentTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6870,34 +7854,10 @@ func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Primary != nil { - size, err := m.Primary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6910,12 +7870,12 @@ func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6927,47 +7887,37 @@ func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.DryRun { + if m.Force { i-- - if m.DryRun { + if m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x18 } - if len(m.RestoreToPos) > 0 { - i -= len(m.RestoreToPos) - copy(dAtA[i:], m.RestoreToPos) - i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) + if m.Recursive { i-- - dAtA[i] = 0x1a - } - if m.BackupTime != nil { - size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x10 } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6980,12 +7930,12 @@ func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6997,44 +7947,10 @@ func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Event != nil { - size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x1a - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x12 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7047,12 +7963,12 @@ func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7064,20 +7980,52 @@ func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x28 + } + if m.EvenIfServing { + i-- + if m.EvenIfServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Shards[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteShardsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7090,12 +8038,12 @@ func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteShardsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteShardsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7110,7 +8058,7 @@ func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7123,12 +8071,12 @@ func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7140,24 +8088,17 @@ func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.DurabilityPolicy) > 0 { - i -= len(m.DurabilityPolicy) - copy(dAtA[i:], m.DurabilityPolicy) - i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7170,12 +8111,12 @@ func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err erro return dAtA[:n], nil } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7187,20 +8128,10 @@ func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7213,12 +8144,12 @@ func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceServedFromRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7230,48 +8161,32 @@ func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) - i-- - dAtA[i] = 0x2a - } - if m.Remove { + if m.AllowPrimary { i-- - if m.Remove { + if m.AllowPrimary { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- dAtA[i] = 0x10 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { +func (m *DeleteTabletsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7284,12 +8199,12 @@ func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceServedFromResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *DeleteTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7301,20 +8216,10 @@ func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7327,12 +8232,12 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7344,15 +8249,64 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Force { + if m.WaitForAllTablets { i-- - if m.Force { + if m.WaitForAllTablets { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x20 + dAtA[i] = 0x38 + } + if m.PreventCrossCellPromotion { + i-- + if m.PreventCrossCellPromotion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.IgnoreReplicas) > 0 { + for iNdEx := len(m.IgnoreReplicas) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.IgnoreReplicas[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -7364,7 +8318,7 @@ func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (in return len(dAtA) - i, nil } -func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *EmergencyReparentShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7377,12 +8331,12 @@ func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *EmergencyReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7394,20 +8348,46 @@ func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if m.PromotedPrimary != nil { + size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7420,12 +8400,12 @@ func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7437,34 +8417,42 @@ func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IsServing { + if m.UsePool { i-- - if m.IsServing { + if m.UsePool { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- dAtA[i] = 0x18 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7477,12 +8465,12 @@ func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7494,8 +8482,8 @@ func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -7507,7 +8495,7 @@ func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) ( return len(dAtA) - i, nil } -func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBARequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7520,12 +8508,12 @@ func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardTabletControlRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBARequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBARequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7537,67 +8525,52 @@ func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Remove { + if m.ReloadSchema { i-- - if m.Remove { + if m.ReloadSchema { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x38 + dAtA[i] = 0x28 } - if m.DisableQueryService { + if m.DisableBinlogs { i-- - if m.DisableQueryService { + if m.DisableBinlogs { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x30 - } - if len(m.DeniedTables) > 0 { - for iNdEx := len(m.DeniedTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DeniedTables[iNdEx]) - copy(dAtA[i:], m.DeniedTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.DeniedTables[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + dAtA[i] = 0x20 } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) i-- dAtA[i] = 0x18 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) i-- dAtA[i] = 0x12 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsDBAResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7610,12 +8583,12 @@ func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetShardTabletControlResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBAResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsDBAResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7627,8 +8600,8 @@ func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -7640,7 +8613,7 @@ func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7653,12 +8626,12 @@ func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetWritableRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7670,15 +8643,15 @@ func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Writable { - i-- - if m.Writable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.TabletHookRequest != nil { + size, err := m.TabletHookRequest.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x12 } if m.TabletAlias != nil { size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) @@ -7693,7 +8666,7 @@ func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteHookResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7706,12 +8679,12 @@ func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SetWritableResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7723,10 +8696,20 @@ func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.HookResult != nil { + size, err := m.HookResult.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7739,12 +8722,12 @@ func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationAddRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7756,23 +8739,6 @@ func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) copy(dAtA[i:], m.Keyspace) @@ -7783,7 +8749,7 @@ func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7796,12 +8762,12 @@ func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationAddResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7813,10 +8779,32 @@ func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for k := range m.Shards { + v := m.Shards[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7829,12 +8817,12 @@ func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationFixRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetBackupsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetBackupsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7846,12 +8834,25 @@ func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Cell) > 0 { - i -= len(m.Cell) - copy(dAtA[i:], m.Cell) - i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + if m.DetailedLimit != 0 { + i = encodeVarint(dAtA, i, uint64(m.DetailedLimit)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x28 + } + if m.Detailed { + i-- + if m.Detailed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x18 } if len(m.Shard) > 0 { i -= len(m.Shard) @@ -7870,7 +8871,7 @@ func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } -func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetBackupsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7883,12 +8884,12 @@ func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationFixResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetBackupsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetBackupsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7900,20 +8901,22 @@ func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Error != nil { - size, err := m.Error.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Backups) > 0 { + for iNdEx := len(m.Backups) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Backups[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7926,12 +8929,12 @@ func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7943,24 +8946,17 @@ func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7973,12 +8969,12 @@ func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7990,54 +8986,20 @@ func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.TabletMap) > 0 { - for k := range m.TabletMap { - v := m.TabletMap[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ReplicationStatuses) > 0 { - for k := range m.ReplicationStatuses { - v := m.ReplicationStatuses[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoNamesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8050,12 +9012,12 @@ func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationRemoveRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8067,34 +9029,10 @@ func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x1a - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellInfoNamesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8107,12 +9045,12 @@ func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ShardReplicationRemoveResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellInfoNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8124,10 +9062,19 @@ func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellsAliasesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8140,12 +9087,12 @@ func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SleepTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8157,30 +9104,10 @@ func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Duration != nil { - size, err := m.Duration.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetCellsAliasesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8193,12 +9120,12 @@ func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SleepTabletResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetCellsAliasesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8210,10 +9137,32 @@ func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Aliases) > 0 { + for k := range m.Aliases { + v := m.Aliases[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } -func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8226,12 +9175,12 @@ func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardAddRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8243,62 +9192,20 @@ func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0x3a - } - } - if m.KeyRange != nil { - size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x32 - } - if len(m.SourceShard) > 0 { - i -= len(m.SourceShard) - copy(dAtA[i:], m.SourceShard) - i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) - i-- - dAtA[i] = 0x2a - } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) - i-- - dAtA[i] = 0x22 - } - if m.Uid != 0 { - i = encodeVarint(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x18 - } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetFullStatusResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8311,12 +9218,12 @@ func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardAddResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetFullStatusResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetFullStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8328,8 +9235,8 @@ func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8341,7 +9248,7 @@ func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8354,12 +9261,12 @@ func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8371,29 +9278,55 @@ func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Uid != 0 { - i = encodeVarint(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *GetKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa + return dAtA[:n], nil +} + +func (m *GetKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Keyspaces[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8406,12 +9339,12 @@ func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceShardDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8423,20 +9356,17 @@ func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Shard != nil { - size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8449,12 +9379,12 @@ func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8466,8 +9396,8 @@ func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8479,7 +9409,7 @@ func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8492,12 +9422,12 @@ func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8509,10 +9439,20 @@ func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetPermissionsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8525,12 +9465,12 @@ func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8542,8 +9482,8 @@ func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.TabletAlias != nil { - size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if m.Permissions != nil { + size, err := m.Permissions.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8555,7 +9495,7 @@ func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8568,12 +9508,12 @@ func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8588,7 +9528,7 @@ func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8601,12 +9541,12 @@ func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *TabletExternallyReparentedRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8618,8 +9558,8 @@ func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Tablet != nil { - size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if m.RoutingRules != nil { + size, err := m.RoutingRules.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -8631,7 +9571,7 @@ func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8644,12 +9584,12 @@ func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error return dAtA[:n], nil } -func (m *TabletExternallyReparentedResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8661,44 +9601,78 @@ func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.OldPrimary != nil { - size, err := m.OldPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.TableSchemaOnly { + i-- + if m.TableSchemaOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x38 } - if m.NewPrimary != nil { - size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.TableSizesOnly { + i-- + if m.TableSizesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x30 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.TableNamesOnly { i-- - dAtA[i] = 0x12 + if m.TableNamesOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8711,12 +9685,12 @@ func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8728,27 +9702,20 @@ func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if m.Schema != nil { + size, err := m.Schema.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8761,12 +9728,12 @@ func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8778,27 +9745,61 @@ func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellInfo != nil { - size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if m.Skip != 0 { + i = encodeVarint(dAtA, i, uint64(m.Skip)) + i-- + dAtA[i] = 0x40 + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 + } + if m.Order != 0 { + i = encodeVarint(dAtA, i, uint64(m.Order)) + i-- + dAtA[i] = 0x30 + } + if m.Recent != nil { + size, err := m.Recent.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x2a + } + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x20 + } + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1a + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- dAtA[i] = 0x12 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8811,12 +9812,12 @@ func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8828,27 +9829,22 @@ func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellsAlias != nil { - size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Migrations) > 0 { + for iNdEx := len(m.Migrations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Migrations[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8861,12 +9857,12 @@ func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *UpdateCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8878,27 +9874,24 @@ func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.CellsAlias != nil { - size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) i-- dAtA[i] = 0x12 } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8911,12 +9904,12 @@ func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8928,20 +9921,20 @@ func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { - i-- - if m.PingTablets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -8954,12 +9947,12 @@ func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -8971,41 +9964,10 @@ func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByKeyspace) > 0 { - for k := range m.ResultsByKeyspace { - v := m.ResultsByKeyspace[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetShardRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9018,12 +9980,12 @@ func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetShardRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9035,27 +9997,20 @@ func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { - i-- - if m.PingTablets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ShardRoutingRules != nil { + size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x10 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspaceNamesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9068,12 +10023,12 @@ func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspaceNamesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspaceNamesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9085,9 +10040,93 @@ func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvKeyspaceNamesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Names) > 0 { + for k := range m.Names { + v := m.Names[k] baseI := i size, err := v.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -9104,22 +10143,13 @@ func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, err dAtA[i] = 0xa i = encodeVarint(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspacesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9132,12 +10162,12 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9149,41 +10179,11 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IncludeVschema { - i-- - if m.IncludeVschema { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.SkipNoPrimary { - i-- - if m.SkipNoPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.IncludeViews { - i-- - if m.IncludeViews { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) i-- dAtA[i] = 0x12 } @@ -9198,7 +10198,7 @@ func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvKeyspacesResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9211,12 +10211,12 @@ func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvKeyspacesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9228,9 +10228,9 @@ func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] + if len(m.SrvKeyspaces) > 0 { + for k := range m.SrvKeyspaces { + v := m.SrvKeyspaces[k] baseI := i size, err := v.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -9247,22 +10247,13 @@ func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (in dAtA[i] = 0xa i = encodeVarint(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateThrottlerConfigRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9275,12 +10266,12 @@ func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9292,9 +10283,62 @@ func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.PingTablets { + if m.ThrottledApp != nil { + size, err := m.ThrottledApp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- - if m.PingTablets { + dAtA[i] = 0x4a + } + if m.CheckAsCheckShard { + i-- + if m.CheckAsCheckShard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.CheckAsCheckSelf { + i-- + if m.CheckAsCheckSelf { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.CustomQuerySet { + i-- + if m.CustomQuerySet { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.CustomQuery) > 0 { + i -= len(m.CustomQuery) + copy(dAtA[i:], m.CustomQuery) + i = encodeVarint(dAtA, i, uint64(len(m.CustomQuery))) + i-- + dAtA[i] = 0x2a + } + if m.Threshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Threshold)))) + i-- + dAtA[i] = 0x21 + } + if m.Disable { + i-- + if m.Disable { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -9302,12 +10346,15 @@ func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i-- dAtA[i] = 0x18 } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + if m.Enable { i-- - dAtA[i] = 0x12 + if m.Enable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -9319,7 +10366,7 @@ func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateThrottlerConfigResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9332,12 +10379,12 @@ func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateThrottlerConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9349,19 +10396,50 @@ func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa - } + return len(dAtA) - i, nil +} + +func (m *GetSrvVSchemaRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9374,12 +10452,12 @@ func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9391,17 +10469,20 @@ func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.SrvVSchema != nil { + size, err := m.SrvVSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSrvVSchemasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9414,12 +10495,12 @@ func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemasRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSrvVSchemasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9431,9 +10512,51 @@ func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *GetSrvVSchemasResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSrvVSchemasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSrvVSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SrvVSchemas) > 0 { + for k := range m.SrvVSchemas { + v := m.SrvVSchemas[k] baseI := i size, err := v.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -9450,22 +10573,13 @@ func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i dAtA[i] = 0xa i = encodeVarint(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9478,12 +10592,12 @@ func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetTabletRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9495,24 +10609,20 @@ func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Shard) > 0 { - i -= len(m.Shard) - copy(dAtA[i:], m.Shard) - i = encodeVarint(dAtA, i, uint64(len(m.Shard))) - i-- - dAtA[i] = 0x12 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9525,12 +10635,12 @@ func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetTabletResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9542,19 +10652,20 @@ func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9567,12 +10678,12 @@ func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetTabletsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9584,9 +10695,26 @@ func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.IncludeViews { + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) i-- - if m.IncludeViews { + dAtA[i] = 0x30 + } + if len(m.TabletAliases) > 0 { + for iNdEx := len(m.TabletAliases) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.TabletAliases[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.Strict { + i-- + if m.Strict { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -9594,23 +10722,21 @@ func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error i-- dAtA[i] = 0x20 } - if len(m.ExcludeTables) > 0 { - for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTables[iNdEx]) - copy(dAtA[i:], m.ExcludeTables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) i-- dAtA[i] = 0x1a } } - if len(m.Shards) > 0 { - for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Shards[iNdEx]) - copy(dAtA[i:], m.Shards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } if len(m.Keyspace) > 0 { i -= len(m.Keyspace) @@ -9622,7 +10748,7 @@ func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } -func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetTabletsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -9635,12 +10761,12 @@ func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidateVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetTabletsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetTabletsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -9652,3626 +10778,17755 @@ func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k := range m.ResultsByShard { - v := m.ResultsByShard[k] - baseI := i - size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Tablets) > 0 { + for iNdEx := len(m.Tablets) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Tablets[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarint(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarint(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Results) > 0 { - for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Results[iNdEx]) - copy(dAtA[i:], m.Results[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) - i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base +func (m *GetTopologyPathRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ExecuteVtctlCommandRequest) SizeVT() (n int) { + +func (m *GetTopologyPathRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTopologyPathRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.ActionTimeout != 0 { - n += 1 + sov(uint64(m.ActionTimeout)) + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteVtctlCommandResponse) SizeVT() (n int) { +func (m *GetTopologyPathResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *TableMaterializeSettings) SizeVT() (n int) { +func (m *GetTopologyPathResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetTopologyPathResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.TargetTable) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.SourceExpression) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Cell != nil { + size, err := m.Cell.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - l = len(m.CreateDdl) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *TopologyCell) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *MaterializeSettings) SizeVT() (n int) { +func (m *TopologyCell) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TopologyCell) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Workflow) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.TargetKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.StopAfterCopy { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.TableSettings) > 0 { - for _, e := range m.TableSettings { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Children) > 0 { + for iNdEx := len(m.Children) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Children[iNdEx]) + copy(dAtA[i:], m.Children[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Children[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.TabletTypes) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ExternalCluster) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaterializationIntent != 0 { - n += 1 + sov(uint64(m.MaterializationIntent)) - } - l = len(m.SourceTimeZone) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarint(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a } - l = len(m.TargetTimeZone) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarint(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 } - if len(m.SourceShards) > 0 { - for _, s := range m.SourceShards { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - l = len(m.OnDdl) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *GetVSchemaRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.DeferSecondaryKeys { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Keyspace) SizeVT() (n int) { +func (m *GetVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Shard) SizeVT() (n int) { +func (m *GetVersionRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Workflow_ReplicationLocation) SizeVT() (n int) { +func (m *GetVersionRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetVersionRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Shards) > 0 { - for _, s := range m.Shards { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Workflow_ShardStream) SizeVT() (n int) { +func (m *GetVersionResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetVersionResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetVersionResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Streams) > 0 { - for _, e := range m.Streams { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.TabletControls) > 0 { - for _, e := range m.TabletControls { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarint(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa } - if m.IsPrimaryServing { - n += 2 + return len(dAtA) - i, nil +} + +func (m *GetVSchemaResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Workflow_Stream_CopyState) SizeVT() (n int) { +func (m *GetVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Table) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.LastPk) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.VSchema != nil { + size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *Workflow_Stream_Log) SizeVT() (n int) { +func (m *GetWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) - } - if m.StreamId != 0 { - n += 1 + sov(uint64(m.StreamId)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Type) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x22 } - l = len(m.State) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.NameOnly { + i-- + if m.NameOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - if m.CreatedAt != nil { - l = m.CreatedAt.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - if m.UpdatedAt != nil { - l = m.UpdatedAt.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - l = len(m.Message) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *GetWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.Count != 0 { - n += 1 + sov(uint64(m.Count)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Workflow_Stream) SizeVT() (n int) { +func (m *GetWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Id != 0 { - n += 1 + sov(uint64(m.Id)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Workflows) > 0 { + for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } } - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.BinlogSource != nil { - l = m.BinlogSource.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Position) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.StopPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DbName) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TransactionTimestamp != nil { - l = m.TransactionTimestamp.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.TimeUpdated != nil { - l = m.TimeUpdated.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Message) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.CopyStates) > 0 { - for _, e := range m.CopyStates { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.LogFetchError) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *InitShardPrimaryRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if len(m.Tags) > 0 { - for _, s := range m.Tags { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *Workflow) SizeVT() (n int) { +func (m *InitShardPrimaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitShardPrimaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Source != nil { - l = m.Source.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Target != nil { - l = m.Target.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if m.MaxVReplicationLag != 0 { - n += 1 + sov(uint64(m.MaxVReplicationLag)) + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if len(m.ShardStreams) > 0 { - for k, v := range m.ShardStreams { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.PrimaryElectTabletAlias != nil { + size, err := m.PrimaryElectTabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - l = len(m.WorkflowType) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - l = len(m.WorkflowSubType) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *AddCellInfoRequest) SizeVT() (n int) { +func (m *InitShardPrimaryResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *AddCellInfoResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *InitShardPrimaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *AddCellsAliasRequest) SizeVT() (n int) { +func (m *InitShardPrimaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *AddCellsAliasResponse) SizeVT() (n int) { +func (m *LaunchSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplyRoutingRulesRequest) SizeVT() (n int) { +func (m *LaunchSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LaunchSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.RoutingRules != nil { - l = m.RoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.SkipRebuild { - n += 2 + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 } - if len(m.RebuildCells) > 0 { - for _, s := range m.RebuildCells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyRoutingRulesResponse) SizeVT() (n int) { +func (m *LaunchSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplyShardRoutingRulesRequest) SizeVT() (n int) { +func (m *LaunchSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LaunchSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.ShardRoutingRules != nil { - l = m.ShardRoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.SkipRebuild { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.RebuildCells) > 0 { - for _, s := range m.RebuildCells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ApplyShardRoutingRulesResponse) SizeVT() (n int) { +func (m *MoveTablesCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ApplySchemaRequest) SizeVT() (n int) { +func (m *MoveTablesCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AllowLongUnavailability { - n += 2 + if m.AtomicCopy { + i-- + if m.AtomicCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 } - if len(m.Sql) > 0 { - for _, s := range m.Sql { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.NoRoutingRules { + i-- + if m.NoRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 } - l = len(m.DdlStrategy) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - if len(m.UuidList) > 0 { - for _, s := range m.UuidList { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - l = len(m.MigrationContext) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DropForeignKeys { + i-- + if m.DropForeignKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 } - if m.SkipPreflight { - n += 2 + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x6a } - if m.CallerId != nil { - l = m.CallerId.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.SourceTimeZone) > 0 { + i -= len(m.SourceTimeZone) + copy(dAtA[i:], m.SourceTimeZone) + i = encodeVarint(dAtA, i, uint64(len(m.SourceTimeZone))) + i-- + dAtA[i] = 0x62 } - n += len(m.unknownFields) - return n -} - -func (m *ApplySchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.ExternalClusterName) > 0 { + i -= len(m.ExternalClusterName) + copy(dAtA[i:], m.ExternalClusterName) + i = encodeVarint(dAtA, i, uint64(len(m.ExternalClusterName))) + i-- + dAtA[i] = 0x5a } - var l int - _ = l - if len(m.UuidList) > 0 { - for _, s := range m.UuidList { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x52 } } - n += len(m.unknownFields) - return n -} - -func (m *ApplyVSchemaRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.IncludeTables) > 0 { + for iNdEx := len(m.IncludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IncludeTables[iNdEx]) + copy(dAtA[i:], m.IncludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.IncludeTables[iNdEx]))) + i-- + dAtA[i] = 0x4a + } } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AllTables { + i-- + if m.AllTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 } - if m.SkipRebuild { - n += 2 + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x3a + } } - if m.DryRun { - n += 2 + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x30 } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x2a } - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - l = len(m.Sql) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n -} - -func (m *ApplyVSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x12 } - var l int - _ = l - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *BackupRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.AllowPrimary { - n += 2 - } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + return nil, nil } - l = len(m.IncrementalFromPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *BackupResponse) SizeVT() (n int) { +func (m *MoveTablesCreateResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCreateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Created { + i-- + if m.Created { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *BackupShardRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.AllowPrimary { - n += 2 + return nil, nil } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ChangeTabletTypeRequest) SizeVT() (n int) { +func (m *MoveTablesCreateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCreateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.DbType != 0 { - n += 1 + sov(uint64(m.DbType)) + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } } - if m.DryRun { - n += 2 + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ChangeTabletTypeResponse) SizeVT() (n int) { +func (m *MoveTablesCompleteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.BeforeTablet != nil { - l = m.BeforeTablet.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.AfterTablet != nil { - l = m.AfterTablet.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.WasDryRun { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *CreateKeyspaceRequest) SizeVT() (n int) { +func (m *MoveTablesCompleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCompleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.AllowEmptyVSchema { - n += 2 + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - if len(m.ServedFroms) > 0 { - for _, e := range m.ServedFroms { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.RenameTables { + i-- + if m.RenameTables { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x30 } - if m.Type != 0 { - n += 1 + sov(uint64(m.Type)) + if m.KeepRoutingRules { + i-- + if m.KeepRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - l = len(m.BaseKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.KeepData { + i-- + if m.KeepData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.SnapshotTime != nil { - l = m.SnapshotTime.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.TargetKeyspace) > 0 { + i -= len(m.TargetKeyspace) + copy(dAtA[i:], m.TargetKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.TargetKeyspace))) + i-- + dAtA[i] = 0x1a } - l = len(m.DurabilityPolicy) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *CreateKeyspaceResponse) SizeVT() (n int) { +func (m *MoveTablesCompleteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *CreateShardRequest) SizeVT() (n int) { +func (m *MoveTablesCompleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MoveTablesCompleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Force { - n += 2 + if len(m.DryRunResults) > 0 { + for iNdEx := len(m.DryRunResults) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRunResults[iNdEx]) + copy(dAtA[i:], m.DryRunResults[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DryRunResults[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.IncludeParent { - n += 2 + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *CreateShardResponse) SizeVT() (n int) { +func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.ShardAlreadyExists { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DeleteCellInfoRequest) SizeVT() (n int) { +func (m *PingTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PingTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Force { - n += 2 + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *DeleteCellInfoResponse) SizeVT() (n int) { +func (m *PingTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DeleteCellsAliasRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *PingTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteCellsAliasResponse) SizeVT() (n int) { +func (m *PingTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil } -func (m *DeleteKeyspaceRequest) SizeVT() (n int) { +func (m *PlannedReparentShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Recursive { - n += 2 + return nil, nil } - if m.Force { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *DeleteKeyspaceResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *PlannedReparentShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteShardsRequest) SizeVT() (n int) { +func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Shards) > 0 { - for _, e := range m.Shards { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.WaitReplicasTimeout != nil { + size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - if m.Recursive { - n += 2 + if m.AvoidPrimary != nil { + size, err := m.AvoidPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - if m.EvenIfServing { - n += 2 + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.Force { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *DeleteShardsResponse) SizeVT() (n int) { +func (m *PlannedReparentShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *DeleteSrvVSchemaRequest) SizeVT() (n int) { +func (m *PlannedReparentShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PlannedReparentShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteSrvVSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *DeleteTabletsRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } } - var l int - _ = l - if len(m.TabletAliases) > 0 { - for _, e := range m.TabletAliases { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.PromotedPrimary != nil { + size, err := m.PromotedPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.AllowPrimary { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *DeleteTabletsResponse) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *EmergencyReparentShardRequest) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RebuildKeyspaceGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.IgnoreReplicas) > 0 { - for _, e := range m.IgnoreReplicas { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.AllowPartial { + i-- + if m.AllowPartial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if m.PreventCrossCellPromotion { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *EmergencyReparentShardResponse) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PromotedPrimary != nil { - l = m.PromotedPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) - } - if m.UsePool { - n += 2 - } - n += len(m.unknownFields) - return n +func (m *RebuildKeyspaceGraphResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { +func (m *RebuildKeyspaceGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteFetchAsDBARequest) SizeVT() (n int) { +func (m *RebuildVSchemaGraphRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Query) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.MaxRows != 0 { - n += 1 + sov(uint64(m.MaxRows)) - } - if m.DisableBinlogs { - n += 2 + return nil, nil } - if m.ReloadSchema { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ExecuteFetchAsDBAResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != nil { - l = m.Result.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *RebuildVSchemaGraphRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteHookRequest) SizeVT() (n int) { +func (m *RebuildVSchemaGraphRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.TabletHookRequest != nil { - l = m.TabletHookRequest.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ExecuteHookResponse) SizeVT() (n int) { +func (m *RebuildVSchemaGraphResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.HookResult != nil { - l = m.HookResult.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) { +func (m *RebuildVSchemaGraphResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RebuildVSchemaGraphResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) { +func (m *RefreshStateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Shards) > 0 { - for k, v := range m.Shards { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetBackupsRequest) SizeVT() (n int) { +func (m *RefreshStateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Limit != 0 { - n += 1 + sov(uint64(m.Limit)) - } - if m.Detailed { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.DetailedLimit != 0 { - n += 1 + sov(uint64(m.DetailedLimit)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetBackupsResponse) SizeVT() (n int) { +func (m *RefreshStateResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Backups) > 0 { - for _, e := range m.Backups { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetCellInfoRequest) SizeVT() (n int) { +func (m *RefreshStateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetCellInfoResponse) SizeVT() (n int) { +func (m *RefreshStateByShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetCellInfoNamesRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *RefreshStateByShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetCellInfoNamesResponse) SizeVT() (n int) { +func (m *RefreshStateByShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - n += len(m.unknownFields) - return n + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetCellsAliasesRequest) SizeVT() (n int) { +func (m *RefreshStateByShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GetCellsAliasesResponse) SizeVT() (n int) { +func (m *RefreshStateByShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RefreshStateByShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Aliases) > 0 { - for k, v := range m.Aliases { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.PartialRefreshDetails) > 0 { + i -= len(m.PartialRefreshDetails) + copy(dAtA[i:], m.PartialRefreshDetails) + i = encodeVarint(dAtA, i, uint64(len(m.PartialRefreshDetails))) + i-- + dAtA[i] = 0x12 + } + if m.IsPartialRefresh { + i-- + if m.IsPartialRefresh { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetFullStatusRequest) SizeVT() (n int) { +func (m *ReloadSchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetFullStatusResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *ReloadSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetKeyspacesRequest) SizeVT() (n int) { +func (m *ReloadSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetKeyspacesResponse) SizeVT() (n int) { +func (m *ReloadSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Keyspaces) > 0 { - for _, e := range m.Keyspaces { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetKeyspaceRequest) SizeVT() (n int) { +func (m *ReloadSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetKeyspaceResponse) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetPermissionsRequest) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x20 + } + if m.IncludePrimary { + i-- + if m.IncludePrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetPermissionsResponse) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Permissions != nil { - l = m.Permissions.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetRoutingRulesRequest) SizeVT() (n int) { +func (m *ReloadSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *GetRoutingRulesResponse) SizeVT() (n int) { +func (m *ReloadSchemaShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.RoutingRules != nil { - l = m.RoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSchemaRequest) SizeVT() (n int) { +func (m *ReloadSchemaShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.Concurrency != 0 { + i = encodeVarint(dAtA, i, uint64(m.Concurrency)) + i-- + dAtA[i] = 0x28 } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.IncludePrimary { + i-- + if m.IncludePrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x20 } - if m.IncludeViews { - n += 2 - } - if m.TableNamesOnly { - n += 2 + if len(m.WaitPosition) > 0 { + i -= len(m.WaitPosition) + copy(dAtA[i:], m.WaitPosition) + i = encodeVarint(dAtA, i, uint64(len(m.WaitPosition))) + i-- + dAtA[i] = 0x1a } - if m.TableSizesOnly { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - if m.TableSchemaOnly { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSchemaResponse) SizeVT() (n int) { +func (m *ReloadSchemaShardResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Schema != nil { - l = m.Schema.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetShardRequest) SizeVT() (n int) { +func (m *ReloadSchemaShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReloadSchemaShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Events[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetShardResponse) SizeVT() (n int) { +func (m *RemoveBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetShardRoutingRulesRequest) SizeVT() (n int) { +func (m *RemoveBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetShardRoutingRulesResponse) SizeVT() (n int) { +func (m *RemoveBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.ShardRoutingRules != nil { - l = m.ShardRoutingRules.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSrvKeyspaceNamesRequest) SizeVT() (n int) { +func (m *RemoveBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSrvKeyspaceNamesResponse_NameList) SizeVT() (n int) { +func (m *RemoveKeyspaceCellRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSrvKeyspaceNamesResponse) SizeVT() (n int) { +func (m *RemoveKeyspaceCellRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveKeyspaceCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Names) > 0 { - for k, v := range m.Names { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x18 } - n += len(m.unknownFields) - return n + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetSrvKeyspacesRequest) SizeVT() (n int) { +func (m *RemoveKeyspaceCellResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSrvKeyspacesResponse) SizeVT() (n int) { +func (m *RemoveKeyspaceCellResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.SrvKeyspaces) > 0 { - for k, v := range m.SrvKeyspaces { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateThrottlerConfigRequest) SizeVT() (n int) { +func (m *RemoveShardCellRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if m.Enable { - n += 2 + return dAtA[:n], nil +} + +func (m *RemoveShardCellRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveShardCellRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if m.Disable { - n += 2 + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Threshold != 0 { - n += 9 + if m.Recursive { + i-- + if m.Recursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 } - l = len(m.CustomQuery) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.CustomQuerySet { - n += 2 + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a } - if m.CheckAsCheckSelf { - n += 2 + if len(m.ShardName) > 0 { + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarint(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0x12 } - if m.CheckAsCheckShard { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateThrottlerConfigResponse) SizeVT() (n int) { +func (m *RemoveShardCellResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *GetSrvVSchemaRequest) SizeVT() (n int) { +func (m *RemoveShardCellResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveShardCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSrvVSchemaResponse) SizeVT() (n int) { +func (m *ReparentTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.SrvVSchema != nil { - l = m.SrvVSchema.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetSrvVSchemasRequest) SizeVT() (n int) { +func (m *ReparentTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReparentTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetSrvVSchemasResponse) SizeVT() (n int) { +func (m *ReparentTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.SrvVSchemas) > 0 { - for k, v := range m.SrvVSchemas { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetTabletRequest) SizeVT() (n int) { +func (m *ReparentTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReparentTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Primary != nil { + size, err := m.Primary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetTabletResponse) SizeVT() (n int) { +func (m *ReshardCreateRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetTabletsRequest) SizeVT() (n int) { +func (m *ReshardCreateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReshardCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.AutoStart { + i-- + if m.AutoStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if m.StopAfterCopy { + i-- + if m.StopAfterCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x50 } - if m.Strict { - n += 2 + if len(m.OnDdl) > 0 { + i -= len(m.OnDdl) + copy(dAtA[i:], m.OnDdl) + i = encodeVarint(dAtA, i, uint64(len(m.OnDdl))) + i-- + dAtA[i] = 0x4a } - if len(m.TabletAliases) > 0 { - for _, e := range m.TabletAliases { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.SkipSchemaCopy { + i-- + if m.SkipSchemaCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x40 } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x38 } - n += len(m.unknownFields) - return n -} - -func (m *GetTabletsResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x32 } - var l int - _ = l - if len(m.Tablets) > 0 { - for _, e := range m.Tablets { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x2a } } - n += len(m.unknownFields) - return n -} - -func (m *GetTopologyPathRequest) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.TargetShards) > 0 { + for iNdEx := len(m.TargetShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetShards[iNdEx]) + copy(dAtA[i:], m.TargetShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.TargetShards[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.SourceShards) > 0 { + for iNdEx := len(m.SourceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceShards[iNdEx]) + copy(dAtA[i:], m.SourceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShards[iNdEx]))) + i-- + dAtA[i] = 0x1a + } } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetTopologyPathResponse) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Cell != nil { - l = m.Cell.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *TopologyCell) SizeVT() (n int) { +func (m *RestoreFromBackupRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Path) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.RestoreToTimestamp != nil { + size, err := m.RestoreToTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a } - l = len(m.Data) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if len(m.Children) > 0 { - for _, s := range m.Children { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.RestoreToPos) > 0 { + i -= len(m.RestoreToPos) + copy(dAtA[i:], m.RestoreToPos) + i = encodeVarint(dAtA, i, uint64(len(m.RestoreToPos))) + i-- + dAtA[i] = 0x1a + } + if m.BackupTime != nil { + size, err := m.BackupTime.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetVSchemaRequest) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetVersionRequest) SizeVT() (n int) { +func (m *RestoreFromBackupResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RestoreFromBackupResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Event != nil { + size, err := m.Event.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x1a + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0x12 + } if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *GetVersionResponse) SizeVT() (n int) { +func (m *RetrySchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - l = len(m.Version) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *GetVSchemaResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.VSchema != nil { - l = m.VSchema.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *RetrySchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetWorkflowsRequest) SizeVT() (n int) { +func (m *RetrySchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.ActiveOnly { - n += 2 + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *GetWorkflowsResponse) SizeVT() (n int) { +func (m *RetrySchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Workflows) > 0 { - for _, e := range m.Workflows { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *InitShardPrimaryRequest) SizeVT() (n int) { +func (m *RetrySchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RetrySchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PrimaryElectTabletAlias != nil { - l = m.PrimaryElectTabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *InitShardPrimaryResponse) SizeVT() (n int) { +func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *PingTabletRequest) SizeVT() (n int) { +func (m *RunHealthCheckRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RunHealthCheckRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PingTabletResponse) SizeVT() (n int) { +func (m *RunHealthCheckResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PlannedReparentShardRequest) SizeVT() (n int) { +func (m *RunHealthCheckResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RunHealthCheckResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.AvoidPrimary != nil { - l = m.AvoidPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.WaitReplicasTimeout != nil { - l = m.WaitReplicasTimeout.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *PlannedReparentShardResponse) SizeVT() (n int) { +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.PromotedPrimary != nil { - l = m.PromotedPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) { +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceDurabilityPolicyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if len(m.DurabilityPolicy) > 0 { + i -= len(m.DurabilityPolicy) + copy(dAtA[i:], m.DurabilityPolicy) + i = encodeVarint(dAtA, i, uint64(len(m.DurabilityPolicy))) + i-- + dAtA[i] = 0x12 } - if m.AllowPartial { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) { +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RebuildVSchemaGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RebuildVSchemaGraphResponse) SizeVT() (n int) { +func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - n += len(m.unknownFields) - return n + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RefreshStateRequest) SizeVT() (n int) { +func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RefreshStateResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *SetKeyspaceServedFromRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *RefreshStateByShardRequest) SizeVT() (n int) { +func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x2a + } + if m.Remove { + i-- + if m.Remove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a } } - n += len(m.unknownFields) - return n + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RefreshStateByShardResponse) SizeVT() (n int) { +func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if m.IsPartialRefresh { - n += 2 + return nil, nil } - l = len(m.PartialRefreshDetails) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReloadSchemaRequest) SizeVT() (n int) { +func (m *SetKeyspaceServedFromResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReloadSchemaResponse) SizeVT() (n int) { +func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReloadSchemaKeyspaceRequest) SizeVT() (n int) { +func (m *SetKeyspaceShardingInfoRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceShardingInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.IncludePrimary { - n += 2 + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ReloadSchemaKeyspaceResponse) SizeVT() (n int) { +func (m *SetKeyspaceShardingInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ReloadSchemaShardRequest) SizeVT() (n int) { +func (m *SetKeyspaceShardingInfoResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetKeyspaceShardingInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.WaitPosition) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.IncludePrimary { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Concurrency != 0 { - n += 1 + sov(uint64(m.Concurrency)) + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ReloadSchemaShardResponse) SizeVT() (n int) { +func (m *SetShardIsPrimaryServingRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RemoveBackupRequest) SizeVT() (n int) { +func (m *SetShardIsPrimaryServingRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardIsPrimaryServingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.IsServing { + i-- + if m.IsServing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RemoveBackupResponse) SizeVT() (n int) { +func (m *SetShardIsPrimaryServingResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) { +func (m *SetShardIsPrimaryServingResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardIsPrimaryServingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Recursive { - n += 2 + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) { +func (m *SetShardTabletControlRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RemoveShardCellRequest) SizeVT() (n int) { +func (m *SetShardTabletControlRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardTabletControlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.ShardName) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Remove { + i-- + if m.Remove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.DisableQueryService { + i-- + if m.DisableQueryService { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 } - if m.Force { - n += 2 + if len(m.DeniedTables) > 0 { + for iNdEx := len(m.DeniedTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DeniedTables[iNdEx]) + copy(dAtA[i:], m.DeniedTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DeniedTables[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } - if m.Recursive { - n += 2 + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - n += len(m.unknownFields) - return n + if m.TabletType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletType)) + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *RemoveShardCellResponse) SizeVT() (n int) { +func (m *SetShardTabletControlResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ReparentTabletRequest) SizeVT() (n int) { +func (m *SetShardTabletControlResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetShardTabletControlResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReparentTabletResponse) SizeVT() (n int) { +func (m *SetWritableRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.Primary != nil { - l = m.Primary.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RestoreFromBackupRequest) SizeVT() (n int) { +func (m *SetWritableRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetWritableRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.BackupTime != nil { - l = m.BackupTime.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.RestoreToPos) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.DryRun { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n -} - -func (m *RestoreFromBackupResponse) SizeVT() (n int) { - if m == nil { - return 0 + if m.Writable { + i-- + if m.Writable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 } - var l int - _ = l if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Event != nil { - l = m.Event.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *RunHealthCheckRequest) SizeVT() (n int) { +func (m *SetWritableResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *RunHealthCheckResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n +func (m *SetWritableResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *SetKeyspaceDurabilityPolicyRequest) SizeVT() (n int) { +func (m *SetWritableResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.DurabilityPolicy) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) { +func (m *ShardReplicationAddRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) { +func (m *ShardReplicationAddRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.Remove { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) { +func (m *ShardReplicationAddResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) { +func (m *ShardReplicationAddResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Force { - n += 2 + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetKeyspaceShardingInfoResponse) SizeVT() (n int) { +func (m *ShardReplicationFixRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetShardIsPrimaryServingRequest) SizeVT() (n int) { +func (m *ShardReplicationFixRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationFixRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Cell) > 0 { + i -= len(m.Cell) + copy(dAtA[i:], m.Cell) + i = encodeVarint(dAtA, i, uint64(len(m.Cell))) + i-- + dAtA[i] = 0x1a } - if m.IsServing { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SetShardIsPrimaryServingResponse) SizeVT() (n int) { +func (m *ShardReplicationFixResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetShardTabletControlRequest) SizeVT() (n int) { +func (m *ShardReplicationFixResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationFixResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) - } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.DeniedTables) > 0 { - for _, s := range m.DeniedTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.Error != nil { + size, err := m.Error.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - if m.DisableQueryService { - n += 2 - } - if m.Remove { - n += 2 - } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SetShardTabletControlResponse) SizeVT() (n int) { +func (m *ShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SetWritableRequest) SizeVT() (n int) { +func (m *ShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.Writable { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SetWritableResponse) SizeVT() (n int) { +func (m *ShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ShardReplicationAddRequest) SizeVT() (n int) { +func (m *ShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.TabletMap) > 0 { + for k := range m.TabletMap { + v := m.TabletMap[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if len(m.ReplicationStatuses) > 0 { + for k := range m.ReplicationStatuses { + v := m.ReplicationStatuses[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationAddResponse) SizeVT() (n int) { +func (m *ShardReplicationRemoveRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ShardReplicationFixRequest) SizeVT() (n int) { +func (m *ShardReplicationRemoveRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationRemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Cell) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - n += len(m.unknownFields) - return n + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ShardReplicationFixResponse) SizeVT() (n int) { +func (m *ShardReplicationRemoveResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Error != nil { - l = m.Error.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ShardReplicationPositionsRequest) SizeVT() (n int) { +func (m *ShardReplicationRemoveResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ShardReplicationRemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationPositionsResponse) SizeVT() (n int) { +func (m *SleepTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.ReplicationStatuses) > 0 { - for k, v := range m.ReplicationStatuses { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + return nil, nil } - if len(m.TabletMap) > 0 { - for k, v := range m.TabletMap { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ShardReplicationRemoveRequest) SizeVT() (n int) { +func (m *SleepTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SleepTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.Duration != nil { + size, err := m.Duration.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ShardReplicationRemoveResponse) SizeVT() (n int) { +func (m *SleepTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SleepTabletRequest) SizeVT() (n int) { +func (m *SleepTabletResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SleepTabletResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if m.Duration != nil { - l = m.Duration.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SleepTabletResponse) SizeVT() (n int) { +func (m *SourceShardAddRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *SourceShardAddRequest) SizeVT() (n int) { +func (m *SourceShardAddRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardAddRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.KeyRange != nil { + size, err := m.KeyRange.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if len(m.SourceShard) > 0 { + i -= len(m.SourceShard) + copy(dAtA[i:], m.SourceShard) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) + i-- + dAtA[i] = 0x2a + } + if len(m.SourceKeyspace) > 0 { + i -= len(m.SourceKeyspace) + copy(dAtA[i:], m.SourceKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) + i-- + dAtA[i] = 0x22 } if m.Uid != 0 { - n += 1 + sov(uint64(m.Uid)) + i = encodeVarint(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x18 } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - l = len(m.SourceShard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - if m.KeyRange != nil { - l = m.KeyRange.SizeVT() - n += 1 + l + sov(uint64(l)) + return len(dAtA) - i, nil +} + +func (m *SourceShardAddResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if len(m.Tables) > 0 { - for _, s := range m.Tables { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *SourceShardAddResponse) SizeVT() (n int) { +func (m *SourceShardAddResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardAddResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *SourceShardDeleteRequest) SizeVT() (n int) { +func (m *SourceShardDeleteRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceShardDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } if m.Uid != 0 { - n += 1 + sov(uint64(m.Uid)) + i = encodeVarint(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x18 } - n += len(m.unknownFields) - return n + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *SourceShardDeleteResponse) SizeVT() (n int) { +func (m *SourceShardDeleteResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.Shard != nil { - l = m.Shard.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *StartReplicationRequest) SizeVT() (n int) { +func (m *SourceShardDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SourceShardDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + if m.Shard != nil { + size, err := m.Shard.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *StartReplicationResponse) SizeVT() (n int) { +func (m *StartReplicationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *StopReplicationRequest) SizeVT() (n int) { +func (m *StartReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } if m.TabletAlias != nil { - l = m.TabletAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *StopReplicationResponse) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - n += len(m.unknownFields) - return n + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *TabletExternallyReparentedRequest) SizeVT() (n int) { +func (m *StartReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StartReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if m.Tablet != nil { - l = m.Tablet.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *TabletExternallyReparentedResponse) SizeVT() (n int) { +func (m *StopReplicationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.NewPrimary != nil { - l = m.NewPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.OldPrimary != nil { - l = m.OldPrimary.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UpdateCellInfoRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *StopReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *UpdateCellInfoResponse) SizeVT() (n int) { +func (m *StopReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.CellInfo != nil { - l = m.CellInfo.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *UpdateCellsAliasRequest) SizeVT() (n int) { +func (m *StopReplicationResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.CellsAlias != nil { - l = m.CellsAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *UpdateCellsAliasResponse) SizeVT() (n int) { +func (m *StopReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *StopReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.CellsAlias != nil { - l = m.CellsAlias.SizeVT() - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateRequest) SizeVT() (n int) { +func (m *TabletExternallyReparentedRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if m.PingTablets { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateResponse) SizeVT() (n int) { +func (m *TabletExternallyReparentedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TabletExternallyReparentedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByKeyspace) > 0 { - for k, v := range m.ResultsByKeyspace { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateKeyspaceRequest) SizeVT() (n int) { +func (m *TabletExternallyReparentedResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - if m.PingTablets { - n += 2 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateKeyspaceResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } - } - n += len(m.unknownFields) - return n +func (m *TabletExternallyReparentedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) { +func (m *TabletExternallyReparentedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.OldPrimary != nil { + size, err := m.OldPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - if m.IncludeViews { - n += 2 + if m.NewPrimary != nil { + size, err := m.NewPrimary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a } - if m.SkipNoPrimary { - n += 2 + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 } - if m.IncludeVschema { - n += 2 + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateSchemaKeyspaceResponse) SizeVT() (n int) { +func (m *UpdateCellInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + return nil, nil } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateShardRequest) SizeVT() (n int) { +func (m *UpdateCellInfoRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - if m.PingTablets { - n += 2 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateShardResponse) SizeVT() (n int) { +func (m *UpdateCellInfoResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil } - var l int - _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n +func (m *UpdateCellInfoResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) { +func (m *UpdateCellInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() - } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if m.CellInfo != nil { + size, err := m.CellInfo.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - n += len(m.unknownFields) - return n + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ValidateVersionShardRequest) SizeVT() (n int) { +func (m *UpdateCellsAliasRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) + return nil, nil } - l = len(m.Shard) - if l > 0 { - n += 1 + l + sov(uint64(l)) + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - n += len(m.unknownFields) - return n + return dAtA[:n], nil } -func (m *ValidateVersionShardResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n +func (m *UpdateCellsAliasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ValidateVSchemaRequest) SizeVT() (n int) { +func (m *UpdateCellsAliasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { - return 0 + return 0, nil } + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Shards) > 0 { - for _, s := range m.Shards { - l = len(s) - n += 1 + l + sov(uint64(l)) - } + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if len(m.ExcludeTables) > 0 { - for _, s := range m.ExcludeTables { - l = len(s) - n += 1 + l + sov(uint64(l)) + if m.CellsAlias != nil { + size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - if m.IncludeViews { - n += 2 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa } - n += len(m.unknownFields) - return n + return len(dAtA) - i, nil } -func (m *ValidateVSchemaResponse) SizeVT() (n int) { +func (m *UpdateCellsAliasResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { - return 0 + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateCellsAliasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CellsAlias != nil { + size, err := m.CellsAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ValidateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByKeyspace) > 0 { + for k := range m.ResultsByKeyspace { + v := m.ResultsByKeyspace[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } } - var l int - _ = l if len(m.Results) > 0 { - for _, s := range m.Results { - l = len(s) - n += 1 + l + sov(uint64(l)) + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x10 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } if len(m.ResultsByShard) > 0 { - for k, v := range m.ResultsByShard { - _ = k - _ = v - l = 0 - if v != nil { - l = v.SizeVT() + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateSchemaKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IncludeVschema { + i-- + if m.IncludeVschema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.SkipNoPrimary { + i-- + if m.SkipNoPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateSchemaKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateShardRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.PingTablets { + i-- + if m.PingTablets { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateShardResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionShardRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVersionShardResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVersionShardResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVersionShardResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidateVSchemaRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVSchemaRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IncludeViews { + i-- + if m.IncludeViews { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeTables) > 0 { + for iNdEx := len(m.ExcludeTables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTables[iNdEx]) + copy(dAtA[i:], m.ExcludeTables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeTables[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidateVSchemaResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidateVSchemaResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ValidateVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResultsByShard) > 0 { + for k := range m.ResultsByShard { + v := m.ResultsByShard[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Results[iNdEx]) + copy(dAtA[i:], m.Results[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Results[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeepRoutingRules { + i-- + if m.KeepRoutingRules { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.KeepData { + i-- + if m.KeepData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Deleted { + i-- + if m.Deleted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_TableCopyState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.BytesPercentage != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.BytesPercentage)))) + i-- + dAtA[i] = 0x35 + } + if m.BytesTotal != 0 { + i = encodeVarint(dAtA, i, uint64(m.BytesTotal)) + i-- + dAtA[i] = 0x28 + } + if m.BytesCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.BytesCopied)) + i-- + dAtA[i] = 0x20 + } + if m.RowsPercentage != 0 { + i -= 4 + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.RowsPercentage)))) + i-- + dAtA[i] = 0x1d + } + if m.RowsTotal != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsTotal)) + i-- + dAtA[i] = 0x10 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_ShardStreamState) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarint(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x32 + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarint(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x2a + } + if len(m.Position) > 0 { + i -= len(m.Position) + copy(dAtA[i:], m.Position) + i = encodeVarint(dAtA, i, uint64(len(m.Position))) + i-- + dAtA[i] = 0x22 + } + if len(m.SourceShard) > 0 { + i -= len(m.SourceShard) + copy(dAtA[i:], m.SourceShard) + i = encodeVarint(dAtA, i, uint64(len(m.SourceShard))) + i-- + dAtA[i] = 0x1a + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse_ShardStreams) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowStatusResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ShardStreams) > 0 { + for k := range m.ShardStreams { + v := m.ShardStreams[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.TableCopyState) > 0 { + for k := range m.TableCopyState { + v := m.TableCopyState[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WorkflowSwitchTrafficRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSwitchTrafficRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowSwitchTrafficRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.InitializeTargetSequences { + i-- + if m.InitializeTargetSequences { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.DryRun { + i-- + if m.DryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.Timeout != nil { + size, err := m.Timeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.Direction != 0 { + i = encodeVarint(dAtA, i, uint64(m.Direction)) + i-- + dAtA[i] = 0x38 + } + if m.EnableReverseReplication { + i-- + if m.EnableReverseReplication { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.MaxReplicationLagAllowed != nil { + size, err := m.MaxReplicationLagAllowed.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x22 + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowSwitchTrafficResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSwitchTrafficResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowSwitchTrafficResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DryRunResults) > 0 { + for iNdEx := len(m.DryRunResults) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRunResults[iNdEx]) + copy(dAtA[i:], m.DryRunResults[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.DryRunResults[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.CurrentState) > 0 { + i -= len(m.CurrentState) + copy(dAtA[i:], m.CurrentState) + i = encodeVarint(dAtA, i, uint64(len(m.CurrentState))) + i-- + dAtA[i] = 0x1a + } + if len(m.StartState) > 0 { + i -= len(m.StartState) + copy(dAtA[i:], m.StartState) + i = encodeVarint(dAtA, i, uint64(len(m.StartState))) + i-- + dAtA[i] = 0x12 + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TabletRequest != nil { + size, err := m.TabletRequest.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateResponse_TabletInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Changed { + i-- + if m.Changed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Tablet != nil { + size, err := m.Tablet.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowUpdateResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowUpdateResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Details[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Summary) > 0 { + i -= len(m.Summary) + copy(dAtA[i:], m.Summary) + i = encodeVarint(dAtA, i, uint64(len(m.Summary))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ExecuteVtctlCommandRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.ActionTimeout != 0 { + n += 1 + sov(uint64(m.ActionTimeout)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteVtctlCommandResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TableMaterializeSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetTable) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceExpression) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CreateDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MaterializeSettings) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if len(m.TableSettings) > 0 { + for _, e := range m.TableSettings { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TabletTypes) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ExternalCluster) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaterializationIntent != 0 { + n += 1 + sov(uint64(m.MaterializationIntent)) + } + l = len(m.SourceTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.AtomicCopy { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *Keyspace) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SchemaMigration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Schema) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Table) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.MigrationStatement) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Strategy != 0 { + n += 1 + sov(uint64(m.Strategy)) + } + l = len(m.Options) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AddedAt != nil { + l = m.AddedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.RequestedAt != nil { + l = m.RequestedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ReadyAt != nil { + l = m.ReadyAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.StartedAt != nil { + l = m.StartedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.LivenessTimestamp != nil { + l = m.LivenessTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.CleanedUpAt != nil { + l = m.CleanedUpAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Status != 0 { + n += 2 + sov(uint64(m.Status)) + } + l = len(m.LogPath) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.Artifacts) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.Retries != 0 { + n += 2 + sov(uint64(m.Retries)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.TabletFailure { + n += 3 + } + if m.Progress != 0 { + n += 6 + } + l = len(m.MigrationContext) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.DdlAction) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.EtaSeconds != 0 { + n += 2 + sov(uint64(m.EtaSeconds)) + } + if m.RowsCopied != 0 { + n += 2 + sov(uint64(m.RowsCopied)) + } + if m.TableRows != 0 { + n += 2 + sov(uint64(m.TableRows)) + } + if m.AddedUniqueKeys != 0 { + n += 2 + sov(uint64(m.AddedUniqueKeys)) + } + if m.RemovedUniqueKeys != 0 { + n += 2 + sov(uint64(m.RemovedUniqueKeys)) + } + l = len(m.LogFile) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.ArtifactRetention != nil { + l = m.ArtifactRetention.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.PostponeCompletion { + n += 3 + } + l = len(m.RemovedUniqueKeyNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.DroppedNoDefaultColumnNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.ExpandedColumnNames) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + l = len(m.RevertibleNotes) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.AllowConcurrent { + n += 3 + } + l = len(m.RevertedUuid) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.IsView { + n += 3 + } + if m.ReadyToComplete { + n += 3 + } + if m.VitessLivenessIndicator != 0 { + n += 2 + sov(uint64(m.VitessLivenessIndicator)) + } + if m.UserThrottleRatio != 0 { + n += 6 + } + l = len(m.SpecialPlan) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.LastThrottledAt != nil { + l = m.LastThrottledAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + l = len(m.ComponentThrottled) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.CancelledAt != nil { + l = m.CancelledAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.PostponeLaunch { + n += 3 + } + l = len(m.Stage) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } + if m.CutoverAttempts != 0 { + n += 2 + sov(uint64(m.CutoverAttempts)) + } + if m.IsImmediateOperation { + n += 3 + } + if m.ReviewedAt != nil { + l = m.ReviewedAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + if m.ReadyToCompleteAt != nil { + l = m.ReadyToCompleteAt.SizeVT() + n += 2 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Shard) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_ReplicationLocation) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_ShardStream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletControls) > 0 { + for _, e := range m.TabletControls { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.IsPrimaryServing { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream_CopyState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Table) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.LastPk) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream_Log) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.StreamId != 0 { + n += 1 + sov(uint64(m.StreamId)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CreatedAt != nil { + l = m.CreatedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Count != 0 { + n += 1 + sov(uint64(m.Count)) + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow_Stream) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BinlogSource != nil { + l = m.BinlogSource.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StopPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TransactionTimestamp != nil { + l = m.TransactionTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TimeUpdated != nil { + l = m.TimeUpdated.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.CopyStates) > 0 { + for _, e := range m.CopyStates { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.LogFetchError) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Tags) > 0 { + for _, s := range m.Tags { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Workflow) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Source != nil { + l = m.Source.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Target != nil { + l = m.Target.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.MaxVReplicationLag != 0 { + n += 1 + sov(uint64(m.MaxVReplicationLag)) + } + if len(m.ShardStreams) > 0 { + for k, v := range m.ShardStreams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.WorkflowType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WorkflowSubType) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *AddCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AddCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplyRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingRules != nil { + l = m.RoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if len(m.RebuildCells) > 0 { + for _, s := range m.RebuildCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplyShardRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShardRoutingRules != nil { + l = m.ShardRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if len(m.RebuildCells) > 0 { + for _, s := range m.RebuildCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyShardRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Sql) > 0 { + for _, s := range m.Sql { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.DdlStrategy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.UuidList) > 0 { + for _, s := range m.UuidList { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.MigrationContext) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipPreflight { + n += 2 + } + if m.CallerId != nil { + l = m.CallerId.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BatchSize != 0 { + n += 1 + sov(uint64(m.BatchSize)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplySchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.UuidList) > 0 { + for _, s := range m.UuidList { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if m.DryRun { + n += 2 + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AllowPrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.UpgradeSafe { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *BackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *BackupShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.AllowPrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + if m.UpgradeSafe { + n += 2 + } + l = len(m.IncrementalFromPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTabletTypeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DbType != 0 { + n += 1 + sov(uint64(m.DbType)) + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ChangeTabletTypeResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeforeTablet != nil { + l = m.BeforeTablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AfterTablet != nil { + l = m.AfterTablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.WasDryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CleanupSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CleanupSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CompleteSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompleteSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.AllowEmptyVSchema { + n += 2 + } + if len(m.ServedFroms) > 0 { + for _, e := range m.ServedFroms { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Type != 0 { + n += 1 + sov(uint64(m.Type)) + } + l = len(m.BaseKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.SnapshotTime != nil { + l = m.SnapshotTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.DurabilityPolicy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SidecarDbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.IncludeParent { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *CreateShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.ShardAlreadyExists { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Recursive { + n += 2 + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteShardsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for _, e := range m.Shards { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Recursive { + n += 2 + } + if m.EvenIfServing { + n += 2 + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteShardsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteSrvVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteSrvVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *DeleteTabletsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AllowPrimary { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *DeleteTabletsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *EmergencyReparentShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.IgnoreReplicas) > 0 { + for _, e := range m.IgnoreReplicas { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.PreventCrossCellPromotion { + n += 2 + } + if m.WaitForAllTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *EmergencyReparentShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.UsePool { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsAppResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDBARequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Query) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteFetchAsDBAResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.TabletHookRequest != nil { + l = m.TabletHookRequest.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteHookResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HookResult != nil { + l = m.HookResult.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Shards) > 0 { + for k, v := range m.Shards { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetBackupsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + if m.Detailed { + n += 2 + } + if m.DetailedLimit != 0 { + n += 1 + sov(uint64(m.DetailedLimit)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetBackupsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Backups) > 0 { + for _, e := range m.Backups { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoNamesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCellInfoNamesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetCellsAliasesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetCellsAliasesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Aliases) > 0 { + for k, v := range m.Aliases { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetFullStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetFullStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspacesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspacesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Keyspaces) > 0 { + for _, e := range m.Keyspaces { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetPermissionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Permissions != nil { + l = m.Permissions.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RoutingRules != nil { + l = m.RoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if m.TableNamesOnly { + n += 2 + } + if m.TableSizesOnly { + n += 2 + } + if m.TableSchemaOnly { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Schema != nil { + l = m.Schema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.MigrationContext) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Status != 0 { + n += 1 + sov(uint64(m.Status)) + } + if m.Recent != nil { + l = m.Recent.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Order != 0 { + n += 1 + sov(uint64(m.Order)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + if m.Skip != 0 { + n += 1 + sov(uint64(m.Skip)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Migrations) > 0 { + for _, e := range m.Migrations { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetShardRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ShardRoutingRules != nil { + l = m.ShardRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesResponse_NameList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspaceNamesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for k, v := range m.Names { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspacesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvKeyspacesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SrvKeyspaces) > 0 { + for k, v := range m.SrvKeyspaces { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateThrottlerConfigRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Enable { + n += 2 + } + if m.Disable { + n += 2 + } + if m.Threshold != 0 { + n += 9 + } + l = len(m.CustomQuery) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CustomQuerySet { + n += 2 + } + if m.CheckAsCheckSelf { + n += 2 + } + if m.CheckAsCheckShard { + n += 2 + } + if m.ThrottledApp != nil { + l = m.ThrottledApp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateThrottlerConfigResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SrvVSchema != nil { + l = m.SrvVSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSrvVSchemasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SrvVSchemas) > 0 { + for k, v := range m.SrvVSchemas { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Strict { + n += 2 + } + if len(m.TabletAliases) > 0 { + for _, e := range m.TabletAliases { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTabletsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tablets) > 0 { + for _, e := range m.Tablets { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetTopologyPathRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetTopologyPathResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cell != nil { + l = m.Cell.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TopologyCell) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Children) > 0 { + for _, s := range m.Children { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVersionRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVersionResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VSchema != nil { + l = m.VSchema.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetWorkflowsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.ActiveOnly { + n += 2 + } + if m.NameOnly { + n += 2 + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetWorkflowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *InitShardPrimaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PrimaryElectTabletAlias != nil { + l = m.PrimaryElectTabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *InitShardPrimaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *LaunchSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *LaunchSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.AllTables { + n += 2 + } + if len(m.IncludeTables) > 0 { + for _, s := range m.IncludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.ExternalClusterName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceTimeZone) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if m.DropForeignKeys { + n += 2 + } + if m.DeferSecondaryKeys { + n += 3 + } + if m.AutoStart { + n += 3 + } + if m.NoRoutingRules { + n += 3 + } + if m.AtomicCopy { + n += 3 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Created { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCreateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCompleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TargetKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeepData { + n += 2 + } + if m.KeepRoutingRules { + n += 2 + } + if m.RenameTables { + n += 2 + } + if m.DryRun { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *MoveTablesCompleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DryRunResults) > 0 { + for _, s := range m.DryRunResults { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *PingTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PingTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PlannedReparentShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.AvoidPrimary != nil { + l = m.AvoidPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.WaitReplicasTimeout != nil { + l = m.WaitReplicasTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *PlannedReparentShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PromotedPrimary != nil { + l = m.PromotedPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildKeyspaceGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.AllowPartial { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildKeyspaceGraphResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RebuildVSchemaGraphRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RebuildVSchemaGraphResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateByShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RefreshStateByShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IsPartialRefresh { + n += 2 + } + l = len(m.PartialRefreshDetails) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IncludePrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.WaitPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IncludePrimary { + n += 2 + } + if m.Concurrency != 0 { + n += 1 + sov(uint64(m.Concurrency)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReloadSchemaShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RemoveKeyspaceCellRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *RemoveShardCellRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ShardName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + if m.Recursive { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveShardCellResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ReparentTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReparentTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Primary != nil { + l = m.Primary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ReshardCreateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.SourceShards) > 0 { + for _, s := range m.SourceShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TargetShards) > 0 { + for _, s := range m.TargetShards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 1 + sov(uint64(m.TabletSelectionPreference)) + } + if m.SkipSchemaCopy { + n += 2 + } + l = len(m.OnDdl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StopAfterCopy { + n += 2 + } + if m.DeferSecondaryKeys { + n += 2 + } + if m.AutoStart { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.BackupTime != nil { + l = m.BackupTime.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.RestoreToPos) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.RestoreToTimestamp != nil { + l = m.RestoreToTimestamp.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RestoreFromBackupResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Event != nil { + l = m.Event.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetrySchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RetrySchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RunHealthCheckResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceDurabilityPolicyRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DurabilityPolicy) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.Remove { + n += 2 + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetKeyspaceShardingInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardIsPrimaryServingRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IsServing { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardIsPrimaryServingResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardTabletControlRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletType != 0 { + n += 1 + sov(uint64(m.TabletType)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.DeniedTables) > 0 { + for _, s := range m.DeniedTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.DisableQueryService { + n += 2 + } + if m.Remove { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetShardTabletControlResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetWritableRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Writable { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SetWritableResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationAddRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationAddResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationFixRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Cell) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationFixResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Error != nil { + l = m.Error.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationPositionsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationPositionsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ReplicationStatuses) > 0 { + for k, v := range m.ReplicationStatuses { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.TabletMap) > 0 { + for k, v := range m.TabletMap { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationRemoveRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ShardReplicationRemoveResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SleepTabletRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Duration != nil { + l = m.Duration.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SleepTabletResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *SourceShardAddRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sov(uint64(m.Uid)) + } + l = len(m.SourceKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceShard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeyRange != nil { + l = m.KeyRange.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardAddResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardDeleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sov(uint64(m.Uid)) + } + n += len(m.unknownFields) + return n +} + +func (m *SourceShardDeleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Shard != nil { + l = m.Shard.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StartReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *StopReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *TabletExternallyReparentedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TabletExternallyReparentedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.NewPrimary != nil { + l = m.NewPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.OldPrimary != nil { + l = m.OldPrimary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellInfo != nil { + l = m.CellInfo.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellsAliasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellsAlias != nil { + l = m.CellsAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateCellsAliasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.CellsAlias != nil { + l = m.CellsAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByKeyspace) > 0 { + for k, v := range m.ResultsByKeyspace { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateSchemaKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + if m.SkipNoPrimary { + n += 2 + } + if m.IncludeVschema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateSchemaKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.PingTablets { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionShardRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVersionShardResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVSchemaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeTables) > 0 { + for _, s := range m.ExcludeTables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if m.IncludeViews { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ValidateVSchemaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, s := range m.Results { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ResultsByShard) > 0 { + for k, v := range m.ResultsByShard { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.KeepData { + n += 2 + } + if m.KeepRoutingRules { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Deleted { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowDeleteResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_TableCopyState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RowsCopied != 0 { + n += 1 + sov(uint64(m.RowsCopied)) + } + if m.RowsTotal != 0 { + n += 1 + sov(uint64(m.RowsTotal)) + } + if m.RowsPercentage != 0 { + n += 5 + } + if m.BytesCopied != 0 { + n += 1 + sov(uint64(m.BytesCopied)) + } + if m.BytesTotal != 0 { + n += 1 + sov(uint64(m.BytesTotal)) + } + if m.BytesPercentage != 0 { + n += 5 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_ShardStreamState) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sov(uint64(m.Id)) + } + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.SourceShard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Position) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse_ShardStreams) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowStatusResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TableCopyState) > 0 { + for k, v := range m.TableCopyState { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if len(m.ShardStreams) > 0 { + for k, v := range m.ShardStreams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowSwitchTrafficRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Workflow) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.MaxReplicationLagAllowed != nil { + l = m.MaxReplicationLagAllowed.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.EnableReverseReplication { + n += 2 + } + if m.Direction != 0 { + n += 1 + sov(uint64(m.Direction)) + } + if m.Timeout != nil { + l = m.Timeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.DryRun { + n += 2 + } + if m.InitializeTargetSequences { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowSwitchTrafficResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StartState) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CurrentState) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.DryRunResults) > 0 { + for _, s := range m.DryRunResults { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TabletRequest != nil { + l = m.TabletRequest.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateResponse_TabletInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tablet != nil { + l = m.Tablet.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Changed { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *WorkflowUpdateResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Summary) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionTimeout", wireType) + } + m.ActionTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ActionTimeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteVtctlCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableMaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableMaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTable = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CreateDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaterializeSettings: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StopAfterCopy = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSettings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableSettings = append(m.TableSettings, &TableMaterializeSettings{}) + if err := m.TableSettings[len(m.TableSettings)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletTypes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCluster = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaterializationIntent", wireType) + } + m.MaterializationIntent = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaterializationIntent |= MaterializationIntent(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnDdl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeferSecondaryKeys = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AtomicCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AtomicCopy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Keyspace) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchemaMigration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaMigration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaMigration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schema = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationStatement", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationStatement = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + m.Strategy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Strategy |= SchemaMigration_Strategy(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AddedAt == nil { + m.AddedAt = &vttime.Time{} + } + if err := m.AddedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestedAt == nil { + m.RequestedAt = &vttime.Time{} + } + if err := m.RequestedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadyAt == nil { + m.ReadyAt = &vttime.Time{} + } + if err := m.ReadyAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &vttime.Time{} + } + if err := m.StartedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessTimestamp == nil { + m.LivenessTimestamp = &vttime.Time{} + } + if err := m.LivenessTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &vttime.Time{} + } + if err := m.CompletedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CleanedUpAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CleanedUpAt == nil { + m.CleanedUpAt = &vttime.Time{} + } + if err := m.CleanedUpAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= SchemaMigration_Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletFailure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletFailure = bool(v != 0) + case 22: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Progress", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Progress = float32(math.Float32frombits(v)) + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DdlAction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DdlAction = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EtaSeconds", wireType) + } + m.EtaSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EtaSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) + } + m.RowsCopied = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsCopied |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableRows", wireType) + } + m.TableRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TableRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 29: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AddedUniqueKeys", wireType) + } + m.AddedUniqueKeys = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AddedUniqueKeys |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedUniqueKeys", wireType) + } + m.RemovedUniqueKeys = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemovedUniqueKeys |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRetention", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRetention == nil { + m.ArtifactRetention = &vttime.Duration{} + } + if err := m.ArtifactRetention.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 33: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PostponeCompletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PostponeCompletion = bool(v != 0) + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedUniqueKeyNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RemovedUniqueKeyNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 35: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedNoDefaultColumnNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DroppedNoDefaultColumnNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 36: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpandedColumnNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExpandedColumnNames = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 37: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertibleNotes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RevertibleNotes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 38: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowConcurrent", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowConcurrent = bool(v != 0) + case 39: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RevertedUuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RevertedUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 40: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsView", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsView = bool(v != 0) + case 41: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyToComplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadyToComplete = bool(v != 0) + case 42: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VitessLivenessIndicator", wireType) + } + m.VitessLivenessIndicator = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VitessLivenessIndicator |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 43: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field UserThrottleRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.UserThrottleRatio = float32(math.Float32frombits(v)) + case 44: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecialPlan", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpecialPlan = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 45: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastThrottledAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastThrottledAt == nil { + m.LastThrottledAt = &vttime.Time{} + } + if err := m.LastThrottledAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 46: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 47: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelledAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CancelledAt == nil { + m.CancelledAt = &vttime.Time{} + } + if err := m.CancelledAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 48: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PostponeLaunch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PostponeLaunch = bool(v != 0) + case 49: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 50: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CutoverAttempts", wireType) + } + m.CutoverAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CutoverAttempts |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 51: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsImmediateOperation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsImmediateOperation = bool(v != 0) + case 52: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReviewedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReviewedAt == nil { + m.ReviewedAt = &vttime.Time{} + } + if err := m.ReviewedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 53: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyToCompleteAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadyToCompleteAt == nil { + m.ReadyToCompleteAt = &vttime.Time{} + } + if err := m.ReadyToCompleteAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Shard) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Shard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_ReplicationLocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_ReplicationLocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_ShardStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_ShardStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Streams = append(m.Streams, &Workflow_Stream{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletControls = append(m.TabletControls, &topodata.Shard_TabletControl{}) + if err := m.TabletControls[len(m.TabletControls)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsPrimaryServing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsPrimaryServing = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream_CopyState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream_CopyState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastPk", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastPk = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream_Log: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream_Log: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + m.StreamId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &vttime.Time{} + } + if err := m.CreatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &vttime.Time{} + } + if err := m.UpdatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow_Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinlogSource == nil { + m.BinlogSource = &binlogdata.BinlogSource{} + } + if err := m.BinlogSource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} + } + if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CopyStates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CopyStates = append(m.CopyStates, &Workflow_Stream_CopyState{}) + if err := m.CopyStates[len(m.CopyStates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &Workflow_Stream_Log{}) + if err := m.Logs[len(m.Logs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogFetchError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogFetchError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Source == nil { + m.Source = &Workflow_ReplicationLocation{} + } + if err := m.Source.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &Workflow_ReplicationLocation{} + } + if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationLag", wireType) + } + m.MaxVReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxVReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardStreams == nil { + m.ShardStreams = make(map[string]*Workflow_ShardStream) + } + var mapkey string + var mapvalue *Workflow_ShardStream + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Workflow_ShardStream{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ShardStreams[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowSubType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} + } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AddCellsAliasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AddCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} + } + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardRoutingRules == nil { + m.ShardRoutingRules = &vschema.ShardRoutingRules{} + } + if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = append(m.Sql, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DdlStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DdlStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} + } + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipPreflight", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipPreflight = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CallerId == nil { + m.CallerId = &vtrpc.CallerID{} + } + if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BatchSize", wireType) + } + m.BatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BatchSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sql = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} + } + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpgradeSafe = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Event == nil { + m.Event = &logutil.Event{} + } + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BackupShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BackupShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrimary = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradeSafe", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpgradeSafe = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelSchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DbType", wireType) + } + m.DbType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DbType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeTabletTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeforeTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BeforeTablet == nil { + m.BeforeTablet = &topodata.Tablet{} + } + if err := m.BeforeTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterTablet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - l += 1 + sov(uint64(l)) - mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l - n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AfterTablet == nil { + m.AfterTablet = &topodata.Tablet{} + } + if err := m.AfterTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WasDryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WasDryRun = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - n += len(m.unknownFields) - return n -} -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { +func (m *CleanupSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13294,15 +28549,15 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteVtctlCommandRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteVtctlCommandRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13330,13 +28585,13 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionTimeout", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - m.ActionTimeout = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13346,11 +28601,24 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ActionTimeout |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13373,7 +28641,7 @@ func (m *ExecuteVtctlCommandRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { +func (m *CleanupSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13396,15 +28664,15 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteVtctlCommandResponse: wiretype end group for non-group") + return fmt.Errorf("proto: CleanupSchemaMigrationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteVtctlCommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CleanupSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13431,12 +28699,89 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.RowsAffectedByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -13460,7 +28805,7 @@ func (m *ExecuteVtctlCommandResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { +func (m *CompleteSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13483,15 +28828,15 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TableMaterializeSettings: wiretype end group for non-group") + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TableMaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13519,43 +28864,11 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetTable = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceExpression", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SourceExpression = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateDdl", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13583,7 +28896,7 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CreateDdl = string(dAtA[iNdEx:postIndex]) + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -13607,7 +28920,7 @@ func (m *TableMaterializeSettings) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { +func (m *CompleteSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13630,17 +28943,17 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MaterializeSettings: wiretype end group for non-group") + return fmt.Errorf("proto: CompleteSchemaMigrationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MaterializeSettings: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompleteSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13650,59 +28963,159 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13730,11 +29143,11 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -13751,12 +29164,12 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { break } } - m.StopAfterCopy = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSettings", wireType) + m.Force = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyVSchema", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13766,63 +29179,17 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TableSettings = append(m.TableSettings, &TableMaterializeSettings{}) - if err := m.TableSettings[len(m.TableSettings)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.AllowEmptyVSchema = bool(v != 0) case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13832,61 +29199,31 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalCluster", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) + if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ExternalCluster = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaterializationIntent", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.MaterializationIntent = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13896,14 +29233,14 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaterializationIntent |= MaterializationIntent(b&0x7F) << shift + m.Type |= topodata.KeyspaceType(b&0x7F) << shift if b < 0x80 { break } } - case 10: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13931,13 +29268,13 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 11: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetTimeZone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13947,27 +29284,31 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetTimeZone = string(dAtA[iNdEx:postIndex]) + if m.SnapshotTime == nil { + m.SnapshotTime = &vttime.Time{} + } + if err := m.SnapshotTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 12: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13995,11 +29336,11 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SidecarDbName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14027,28 +29368,8 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OnDdl = string(dAtA[iNdEx:postIndex]) + m.SidecarDbName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeferSecondaryKeys = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14071,7 +29392,7 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Keyspace) UnmarshalVT(dAtA []byte) error { +func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14094,45 +29415,13 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Keyspace: wiretype end group for non-group") + return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } @@ -14162,7 +29451,7 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} + m.Keyspace = &Keyspace{} } if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14190,7 +29479,7 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Shard) UnmarshalVT(dAtA []byte) error { +func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14213,10 +29502,10 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Shard: wiretype end group for non-group") + return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Shard: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14253,7 +29542,7 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14281,100 +29570,13 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.ShardName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Shard == nil { - m.Shard = &topodata.Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Workflow_ReplicationLocation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_ReplicationLocation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14384,29 +29586,17 @@ func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeParent", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14416,24 +29606,12 @@ func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.IncludeParent = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14456,7 +29634,7 @@ func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { +func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14479,15 +29657,15 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_ShardStream: wiretype end group for non-group") + return fmt.Errorf("proto: CreateShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_ShardStream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14514,14 +29692,16 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Streams = append(m.Streams, &Workflow_Stream{}) - if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletControls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14548,14 +29728,16 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletControls = append(m.TabletControls, &topodata.Shard_TabletControl{}) - if err := m.TabletControls[len(m.TabletControls)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Shard == nil { + m.Shard = &Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsPrimaryServing", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardAlreadyExists", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -14572,7 +29754,7 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { break } } - m.IsPrimaryServing = bool(v != 0) + m.ShardAlreadyExists = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14595,7 +29777,7 @@ func (m *Workflow_ShardStream) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { +func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14618,15 +29800,15 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream_CopyState: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteCellInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream_CopyState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14654,13 +29836,13 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Table = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastPk", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14670,24 +29852,12 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LastPk = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Force = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14710,7 +29880,7 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { +func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14733,85 +29903,66 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream_Log: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteCellInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream_Log: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) - } - m.StreamId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StreamId |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellsAliasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14839,131 +29990,59 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CreatedAt == nil { - m.CreatedAt = &vttime.Time{} - } - if err := m.CreatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdatedAt == nil { - m.UpdatedAt = &vttime.Time{} - } - if err := m.UpdatedAt.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteCellsAliasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14986,7 +30065,7 @@ func (m *Workflow_Stream_Log) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { +func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15009,34 +30088,15 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow_Stream: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15064,13 +30124,13 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15080,33 +30140,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinlogSource", wireType) + m.Recursive = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15116,65 +30160,119 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.BinlogSource == nil { - m.BinlogSource = &binlogdata.BinlogSource{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.BinlogSource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Position = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15184,29 +30282,31 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.StopPosition = string(dAtA[iNdEx:postIndex]) + m.Shards = append(m.Shards, &Shard{}) + if err := m.Shards[len(m.Shards)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15216,29 +30316,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + m.Recursive = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EvenIfServing", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15248,29 +30336,17 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DbName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + m.EvenIfServing = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15280,67 +30356,117 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.TransactionTimestamp == nil { - m.TransactionTimestamp = &vttime.Time{} - } - if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - if msglen < 0 { - return ErrInvalidLength + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteShardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteShardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.TimeUpdated == nil { - m.TimeUpdated = &vttime.Time{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 11: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSrvVSchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15368,45 +30494,113 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CopyStates", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - if msglen < 0 { - return ErrInvalidLength + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteSrvVSchemaResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.CopyStates = append(m.CopyStates, &Workflow_Stream_CopyState{}) - if err := m.CopyStates[len(m.CopyStates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - iNdEx = postIndex - case 13: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15433,16 +30627,16 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Logs = append(m.Logs, &Workflow_Stream_Log{}) - if err := m.Logs[len(m.Logs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogFetchError", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15452,56 +30646,63 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.AllowPrimary = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.LogFetchError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Tags = append(m.Tags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15524,7 +30725,7 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Workflow) UnmarshalVT(dAtA []byte) error { +func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15547,15 +30748,15 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + return fmt.Errorf("proto: EmergencyReparentShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmergencyReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15583,13 +30784,13 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15599,31 +30800,27 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Source == nil { - m.Source = &Workflow_ReplicationLocation{} - } - if err := m.Source.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15650,18 +30847,18 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Target == nil { - m.Target = &Workflow_ReplicationLocation{} + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} } - if err := m.Target.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxVReplicationLag", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreReplicas", wireType) } - m.MaxVReplicationLag = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15671,14 +30868,29 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxVReplicationLag |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IgnoreReplicas = append(m.IgnoreReplicas, &topodata.TabletAlias{}) + if err := m.IgnoreReplicas[len(m.IgnoreReplicas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15705,111 +30917,18 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardStreams == nil { - m.ShardStreams = make(map[string]*Workflow_ShardStream) + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} } - var mapkey string - var mapvalue *Workflow_ShardStream - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Workflow_ShardStream{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ShardStreams[mapkey] = mapvalue iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreventCrossCellPromotion", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15819,29 +30938,17 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkflowType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.PreventCrossCellPromotion = bool(v != 0) case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitForAllTablets", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15851,24 +30958,12 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkflowSubType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.WaitForAllTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15891,7 +30986,7 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15914,15 +31009,15 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AddCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15950,13 +31045,13 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15966,135 +31061,29 @@ func (m *AddCellInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} - } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddCellInfoResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddCellInfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddCellsAliasRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16104,29 +31093,33 @@ func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} + } + if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16136,75 +31129,26 @@ func (m *AddCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddCellsAliasResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16227,7 +31171,7 @@ func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16250,15 +31194,15 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16285,18 +31229,18 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16306,17 +31250,29 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SkipRebuild = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - var stringLen uint64 + m.MaxRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16326,24 +31282,31 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.MaxRows |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsePool", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.UsePool = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16366,7 +31329,7 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16389,12 +31352,48 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16417,7 +31416,7 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16440,15 +31439,15 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDBARequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDBARequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16475,18 +31474,18 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardRoutingRules == nil { - m.ShardRoutingRules = &vschema.ShardRoutingRules{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16496,17 +31495,29 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SkipRebuild = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Query = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } - var stringLen uint64 + m.MaxRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16516,24 +31527,51 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.MaxRows |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if postIndex > l { - return io.ErrUnexpectedEOF + m.DisableBinlogs = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) } - m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16556,7 +31594,7 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16579,12 +31617,48 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -16607,7 +31681,7 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16630,17 +31704,17 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16650,81 +31724,33 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowLongUnavailability", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowLongUnavailability = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Sql = append(m.Sql, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DdlStrategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletHookRequest", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16734,91 +31760,82 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.DdlStrategy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + if m.TabletHookRequest == nil { + m.TabletHookRequest = &tabletmanagerdata.ExecuteHookRequest{} } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.TabletHookRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.MigrationContext = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HookResult", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16845,38 +31862,69 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} + if m.HookResult == nil { + m.HookResult = &tabletmanagerdata.ExecuteHookResponse{} } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.HookResult.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipPreflight", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.SkipPreflight = bool(v != 0) - case 9: + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CallerId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16886,27 +31934,23 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CallerId == nil { - m.CallerId = &vtrpc.CallerID{} - } - if err := m.CallerId.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -16930,7 +31974,7 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16953,17 +31997,17 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UuidList", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -16973,23 +32017,120 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.UuidList = append(m.UuidList, string(dAtA[iNdEx:postIndex])) + if m.Shards == nil { + m.Shards = make(map[string]*Shard) + } + var mapkey string + var mapvalue *Shard + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Shard{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Shards[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -17013,7 +32154,7 @@ func (m *ApplySchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17036,10 +32177,10 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -17075,10 +32216,10 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17088,17 +32229,29 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SkipRebuild = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var v int + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17108,17 +32261,16 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Limit |= uint32(b&0x7F) << shift if b < 0x80 { break } } - m.DryRun = bool(v != 0) case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Detailed", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17128,29 +32280,17 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.Detailed = bool(v != 0) case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DetailedLimit", wireType) } - var msglen int + m.DetailedLimit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17160,33 +32300,67 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.DetailedLimit |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 6: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17196,23 +32370,25 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Sql = string(dAtA[iNdEx:postIndex]) + m.Backups = append(m.Backups, &mysqlctl.BackupInfo{}) + if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -17236,7 +32412,7 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17259,17 +32435,17 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17279,27 +32455,23 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} - } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -17323,7 +32495,7 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17346,15 +32518,15 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17381,55 +32553,118 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.AllowPrimary = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - case 4: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncrementalFromPos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17457,7 +32692,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IncrementalFromPos = string(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -17481,7 +32716,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17504,115 +32739,66 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17639,12 +32825,105 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} + if m.Aliases == nil { + m.Aliases = make(map[string]*topodata.CellsAlias) } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *topodata.CellsAlias + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.CellsAlias{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Aliases[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -17668,7 +32947,7 @@ func (m *BackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17691,17 +32970,17 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BackupShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BackupShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17711,95 +32990,28 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowPrimary = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) - } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -17822,7 +33034,7 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17845,15 +33057,15 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTabletTypeRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTabletTypeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17880,52 +33092,13 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Status == nil { + m.Status = &replicationdata.FullStatus{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DbType", wireType) - } - m.DbType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DbType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -17948,7 +33121,7 @@ func (m *ChangeTabletTypeRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17971,51 +33144,66 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ChangeTabletTypeResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ChangeTabletTypeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeforeTablet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.BeforeTablet == nil { - m.BeforeTablet = &topodata.Tablet{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.BeforeTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 2: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterTablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18042,33 +33230,11 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AfterTablet == nil { - m.AfterTablet = &topodata.Tablet{} - } - if err := m.AfterTablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WasDryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.WasDryRun = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18091,7 +33257,7 @@ func (m *ChangeTabletTypeResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18114,15 +33280,15 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18150,51 +33316,62 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength } - m.Force = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyVSchema", wireType) + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - m.AllowEmptyVSchema = bool(v != 0) - case 6: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18221,65 +33398,67 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) - if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= topodata.KeyspaceType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaseKeyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.BaseKeyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotTime", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18306,45 +33485,13 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SnapshotTime == nil { - m.SnapshotTime = &vttime.Time{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.SnapshotTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18367,7 +33514,7 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18390,15 +33537,15 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18425,10 +33572,10 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.Permissions == nil { + m.Permissions = &tabletmanagerdata.Permissions{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18454,7 +33601,7 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18477,49 +33624,68 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18529,64 +33695,28 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeParent", wireType) + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.IncludeParent = bool(v != 0) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18609,7 +33739,7 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18632,15 +33762,15 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18667,18 +33797,18 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18688,33 +33818,29 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardAlreadyExists", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18724,68 +33850,69 @@ func (m *CreateShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ShardAlreadyExists = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.IncludeViews = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteCellInfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - var stringLen uint64 + m.TableNamesOnly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18795,27 +33922,15 @@ func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.TableSizesOnly = bool(v != 0) + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -18832,7 +33947,7 @@ func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Force = bool(v != 0) + m.TableSchemaOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18855,7 +33970,7 @@ func (m *DeleteCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18878,12 +33993,48 @@ func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Schema == nil { + m.Schema = &tabletmanagerdata.SchemaDefinition{} + } + if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18906,7 +34057,7 @@ func (m *DeleteCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18929,15 +34080,15 @@ func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteCellsAliasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18965,115 +34116,96 @@ func (m *DeleteCellsAliasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteCellsAliasResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteCellsAliasResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= SchemaMigration_Status(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Recent", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19083,29 +34215,33 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.Recent == nil { + m.Recent = &vttime.Duration{} + } + if err := m.Recent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) } - var v int + m.Order = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19115,17 +34251,16 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Order |= QueryOrdering(b&0x7F) << shift if b < 0x80 { break } } - m.Recursive = bool(v != 0) - case 3: + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var v int + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19135,12 +34270,30 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Skip", wireType) + } + m.Skip = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Skip |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19163,7 +34316,7 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19186,12 +34339,46 @@ func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Migrations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Migrations = append(m.Migrations, &SchemaMigration{}) + if err := m.Migrations[len(m.Migrations)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19214,7 +34401,7 @@ func (m *DeleteKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19237,17 +34424,17 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19257,71 +34444,29 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shards = append(m.Shards, &Shard{}) - if err := m.Shards[len(m.Shards)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Recursive = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EvenIfServing", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EvenIfServing = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19331,63 +34476,24 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteShardsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteShardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19410,7 +34516,7 @@ func (m *DeleteShardsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19433,17 +34539,17 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteSrvVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19453,23 +34559,27 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + if m.Shard == nil { + m.Shard = &Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -19493,7 +34603,7 @@ func (m *DeleteSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19516,10 +34626,10 @@ func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteSrvVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -19544,7 +34654,7 @@ func (m *DeleteSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19567,51 +34677,17 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) - if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19621,12 +34697,28 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.AllowPrimary = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ShardRoutingRules == nil { + m.ShardRoutingRules = &vschema.ShardRoutingRules{} + } + if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19649,7 +34741,7 @@ func (m *DeleteTabletsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19672,12 +34764,44 @@ func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19700,7 +34824,7 @@ func (m *DeleteTabletsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19723,15 +34847,15 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyReparentShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19759,79 +34883,62 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} - } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreReplicas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19858,67 +34965,106 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IgnoreReplicas = append(m.IgnoreReplicas, &topodata.TabletAlias{}) - if err := m.IgnoreReplicas[len(m.IgnoreReplicas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + if m.Names == nil { + m.Names = make(map[string]*GetSrvKeyspaceNamesResponse_NameList) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *GetSrvKeyspaceNamesResponse_NameList + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &GetSrvKeyspaceNamesResponse_NameList{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} - } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Names[mapkey] = mapvalue iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreventCrossCellPromotion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PreventCrossCellPromotion = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -19941,7 +35087,7 @@ func (m *EmergencyReparentShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19964,10 +35110,10 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyReparentShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -20004,7 +35150,7 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20032,47 +35178,62 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.PromotedPrimary == nil { - m.PromotedPrimary = &topodata.TabletAlias{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20099,10 +35260,105 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.SrvKeyspaces == nil { + m.SrvKeyspaces = make(map[string]*topodata.SrvKeyspace) + } + var mapkey string + var mapvalue *topodata.SrvKeyspace + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.SrvKeyspace{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.SrvKeyspaces[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -20126,7 +35382,7 @@ func (m *EmergencyReparentShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20149,17 +35405,17 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateThrottlerConfigRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateThrottlerConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20169,31 +35425,78 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Disable = bool(v != 0) + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Threshold = float64(math.Float64frombits(v)) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20221,13 +35524,13 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.CustomQuery = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CustomQuerySet", wireType) } - m.MaxRows = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20237,14 +35540,15 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: + m.CustomQuerySet = bool(v != 0) + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsePool", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -20261,7 +35565,63 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { break } } - m.UsePool = bool(v != 0) + m.CheckAsCheckSelf = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckShard", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CheckAsCheckShard = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ThrottledApp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ThrottledApp == nil { + m.ThrottledApp = &topodata.ThrottledAppRule{} + } + if err := m.ThrottledApp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20284,7 +35644,7 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20307,48 +35667,12 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateThrottlerConfigResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateThrottlerConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20371,7 +35695,7 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20394,51 +35718,15 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDBARequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDBARequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20466,67 +35754,8 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = string(dAtA[iNdEx:postIndex]) + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) - } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DisableBinlogs = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20549,7 +35778,7 @@ func (m *ExecuteFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20572,15 +35801,15 @@ func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsDBAResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20607,10 +35836,10 @@ func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} + if m.SrvVSchema == nil { + m.SrvVSchema = &vschema.SrvVSchema{} } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SrvVSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20636,7 +35865,7 @@ func (m *ExecuteFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20659,53 +35888,17 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletHookRequest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20715,27 +35908,23 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletHookRequest == nil { - m.TabletHookRequest = &tabletmanagerdata.ExecuteHookRequest{} - } - if err := m.TabletHookRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -20759,7 +35948,7 @@ func (m *ExecuteHookRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20782,15 +35971,15 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteHookResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSrvVSchemasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteHookResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSrvVSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HookResult", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20817,12 +36006,105 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HookResult == nil { - m.HookResult = &tabletmanagerdata.ExecuteHookResponse{} + if m.SrvVSchemas == nil { + m.SrvVSchemas = make(map[string]*vschema.SrvVSchema) } - if err := m.HookResult.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *vschema.SrvVSchema + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &vschema.SrvVSchema{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.SrvVSchemas[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -20846,7 +36128,7 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20869,17 +36151,17 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20889,23 +36171,27 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -20929,7 +36215,7 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20952,15 +36238,15 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20987,105 +36273,12 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shards == nil { - m.Shards = make(map[string]*Shard) + if m.Tablet == nil { + m.Tablet = &topodata.Tablet{} } - var mapkey string - var mapvalue *Shard - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Shard{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Shards[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -21109,7 +36302,7 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21132,10 +36325,10 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -21203,10 +36396,10 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - m.Limit = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21216,14 +36409,27 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Limit |= uint32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Detailed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -21240,12 +36446,46 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Detailed = bool(v != 0) + m.Strict = bool(v != 0) case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) + if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DetailedLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) } - m.DetailedLimit = 0 + m.TabletType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21255,7 +36495,7 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DetailedLimit |= uint32(b&0x7F) << shift + m.TabletType |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } @@ -21282,7 +36522,7 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21305,15 +36545,15 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21340,8 +36580,8 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Backups = append(m.Backups, &mysqlctl.BackupInfo{}) - if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Tablets = append(m.Tablets, &topodata.Tablet{}) + if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -21367,7 +36607,7 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21390,15 +36630,15 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21426,7 +36666,7 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21450,7 +36690,7 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21473,15 +36713,15 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21508,10 +36748,10 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} + if m.Cell == nil { + m.Cell = &TopologyCell{} } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -21537,7 +36777,7 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { +func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21560,66 +36800,111 @@ func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -21647,7 +36932,7 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -21671,7 +36956,7 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21694,12 +36979,44 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -21722,7 +37039,7 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21745,15 +37062,15 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -21780,105 +37097,12 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Aliases == nil { - m.Aliases = make(map[string]*topodata.CellsAlias) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - var mapkey string - var mapvalue *topodata.CellsAlias - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.CellsAlias{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Aliases[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -21902,7 +37126,7 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21925,17 +37149,17 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -21945,27 +37169,23 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -21989,7 +37209,7 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22012,15 +37232,15 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22047,10 +37267,10 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.FullStatus{} + if m.VSchema == nil { + m.VSchema = &vschema.Keyspace{} } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22076,7 +37296,7 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22099,12 +37319,116 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NameOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NameOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22127,7 +37451,7 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22150,15 +37474,15 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22185,8 +37509,8 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, &Keyspace{}) - if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Workflows = append(m.Workflows, &Workflow{}) + if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22212,7 +37536,7 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22235,10 +37559,10 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -22273,60 +37597,41 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrimaryElectTabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22353,67 +37658,36 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.PrimaryElectTabletAlias == nil { + m.PrimaryElectTabletAlias = &topodata.TabletAlias{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PrimaryElectTabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Force = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22440,10 +37714,10 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22469,7 +37743,7 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { +func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22492,15 +37766,15 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: InitShardPrimaryResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitShardPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22527,10 +37801,8 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Permissions == nil { - m.Permissions = &tabletmanagerdata.Permissions{} - } - if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -22556,7 +37828,7 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *LaunchSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22579,12 +37851,76 @@ func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -22607,7 +37943,7 @@ func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *LaunchSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22630,15 +37966,15 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: LaunchSchemaMigrationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LaunchSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -22665,12 +38001,89 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.RowsAffectedByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -22694,7 +38107,7 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -22717,17 +38130,17 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22737,31 +38150,27 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -22789,9 +38198,245 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllTables", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllTables = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeTables = append(m.IncludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } @@ -22821,11 +38466,107 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceTimeZone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceTimeZone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnDdl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 14: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22842,10 +38583,10 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.IncludeViews = bool(v != 0) - case 5: + m.StopAfterCopy = bool(v != 0) + case 15: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DropForeignKeys", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22862,10 +38603,10 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.TableNamesOnly = bool(v != 0) - case 6: + m.DropForeignKeys = bool(v != 0) + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22882,10 +38623,10 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.TableSizesOnly = bool(v != 0) - case 7: + m.DeferSecondaryKeys = bool(v != 0) + case 17: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -22902,63 +38643,12 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.TableSchemaOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + m.AutoStart = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoRoutingRules", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -22968,28 +38658,32 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Schema == nil { - m.Schema = &tabletmanagerdata.SchemaDefinition{} + m.NoRoutingRules = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AtomicCopy", wireType) } - if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.AtomicCopy = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23012,7 +38706,7 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23035,17 +38729,17 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateResponse_TabletInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateResponse_TabletInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23055,29 +38749,33 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23087,24 +38785,12 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShardName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Created = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23127,7 +38813,7 @@ func (m *GetShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCreateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23150,17 +38836,17 @@ func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCreateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23170,133 +38856,27 @@ func (m *GetShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &Shard{} - } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetShardRoutingRulesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetShardRoutingRulesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23323,10 +38903,8 @@ func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardRoutingRules == nil { - m.ShardRoutingRules = &vschema.ShardRoutingRules{} - } - if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Details = append(m.Details, &MoveTablesCreateResponse_TabletInfo{}) + if err := m.Details[len(m.Details)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23352,7 +38930,7 @@ func (m *GetShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCompleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23375,15 +38953,15 @@ func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCompleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCompleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23411,62 +38989,11 @@ func (m *GetSrvKeyspaceNamesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse_NameList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetKeyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23494,64 +39021,13 @@ func (m *GetSrvKeyspaceNamesResponse_NameList) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspaceNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -23560,122 +39036,73 @@ func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Names == nil { - m.Names = make(map[string]*GetSrvKeyspaceNamesResponse_NameList) - } - var mapkey string - var mapvalue *GetSrvKeyspaceNamesResponse_NameList - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &GetSrvKeyspaceNamesResponse_NameList{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Names[mapkey] = mapvalue - iNdEx = postIndex + m.KeepData = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRoutingRules = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RenameTables", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RenameTables = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -23698,7 +39125,7 @@ func (m *GetSrvKeyspaceNamesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *MoveTablesCompleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23721,15 +39148,15 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: MoveTablesCompleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MoveTablesCompleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23757,11 +39184,11 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRunResults", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -23789,7 +39216,7 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.DryRunResults = append(m.DryRunResults, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -23813,7 +39240,7 @@ func (m *GetSrvKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23836,15 +39263,15 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: PingTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvKeyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23871,105 +39298,12 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SrvKeyspaces == nil { - m.SrvKeyspaces = make(map[string]*topodata.SrvKeyspace) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - var mapkey string - var mapvalue *topodata.SrvKeyspace - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.SrvKeyspace{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.SrvKeyspaces[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -23993,7 +39327,7 @@ func (m *GetSrvKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { +func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24016,10 +39350,61 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateThrottlerConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PingTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateThrottlerConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PingTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlannedReparentShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlannedReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -24055,10 +39440,10 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enable", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24068,17 +39453,29 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Enable = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24088,28 +39485,33 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Disable = bool(v != 0) - case 4: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field Threshold", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v uint64 - if (iNdEx + 8) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - m.Threshold = float64(math.Float64frombits(v)) - case 5: + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} + } + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CustomQuery", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24119,29 +39521,33 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.CustomQuery = string(dAtA[iNdEx:postIndex]) + if m.AvoidPrimary == nil { + m.AvoidPrimary = &topodata.TabletAlias{} + } + if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CustomQuerySet", wireType) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -24151,52 +39557,28 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.CustomQuerySet = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckSelf", wireType) + if msglen < 0 { + return ErrInvalidLength } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength } - m.CheckAsCheckSelf = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckAsCheckShard", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if m.WaitReplicasTimeout == nil { + m.WaitReplicasTimeout = &vttime.Duration{} } - m.CheckAsCheckShard = bool(v != 0) + if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24219,7 +39601,7 @@ func (m *UpdateThrottlerConfigRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { +func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24238,16 +39620,150 @@ func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { if b < 0x80 { break } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateThrottlerConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateThrottlerConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlannedReparentShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlannedReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} + } + if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24270,7 +39786,7 @@ func (m *UpdateThrottlerConfigResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24293,15 +39809,15 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24329,8 +39845,60 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPartial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPartial = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24353,7 +39921,7 @@ func (m *GetSrvVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24376,48 +39944,12 @@ func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SrvVSchema == nil { - m.SrvVSchema = &vschema.SrvVSchema{} - } - if err := m.SrvVSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24440,7 +39972,7 @@ func (m *GetSrvVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { +func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24463,13 +39995,13 @@ func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildVSchemaGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildVSchemaGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } @@ -24523,7 +40055,7 @@ func (m *GetSrvVSchemasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { +func (m *RebuildVSchemaGraphResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24546,141 +40078,12 @@ func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSrvVSchemasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RebuildVSchemaGraphResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSrvVSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RebuildVSchemaGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrvVSchemas", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SrvVSchemas == nil { - m.SrvVSchemas = make(map[string]*vschema.SrvVSchema) - } - var mapkey string - var mapvalue *vschema.SrvVSchema - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &vschema.SrvVSchema{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.SrvVSchemas[mapkey] = mapvalue - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24703,7 +40106,7 @@ func (m *GetSrvVSchemasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24726,10 +40129,10 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -24790,7 +40193,7 @@ func (m *GetTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24813,48 +40216,12 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tablet == nil { - m.Tablet = &topodata.Tablet{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -24877,7 +40244,7 @@ func (m *GetTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { +func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24900,10 +40267,10 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RefreshStateByShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RefreshStateByShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -25002,9 +40369,60 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { } m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefreshStateByShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefreshStateByShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsPartialRefresh", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -25021,12 +40439,12 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Strict = bool(v != 0) - case 5: + m.IsPartialRefresh = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PartialRefreshDetails", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25036,45 +40454,24 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletAliases = append(m.TabletAliases, &topodata.TabletAlias{}) - if err := m.TabletAliases[len(m.TabletAliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PartialRefreshDetails = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25097,7 +40494,7 @@ func (m *GetTabletsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25120,15 +40517,15 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTabletsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTabletsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25155,8 +40552,10 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tablets = append(m.Tablets, &topodata.Tablet{}) - if err := m.Tablets[len(m.Tablets)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25182,7 +40581,7 @@ func (m *GetTabletsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25205,15 +40604,98 @@ func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTopologyPathRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTopologyPathRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25241,8 +40723,47 @@ func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludePrimary = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + } + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25265,7 +40786,7 @@ func (m *GetTopologyPathRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25288,15 +40809,15 @@ func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetTopologyPathResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetTopologyPathResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -25323,10 +40844,8 @@ func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Cell == nil { - m.Cell = &TopologyCell{} - } - if err := m.Cell.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -25352,7 +40871,7 @@ func (m *GetTopologyPathResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25375,15 +40894,15 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TopologyCell: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TopologyCell: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25411,11 +40930,11 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25443,11 +40962,11 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25475,13 +40994,13 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = string(dAtA[iNdEx:postIndex]) + m.WaitPosition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25491,24 +41010,31 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + m.IncludePrimary = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Concurrency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Concurrency |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25531,7 +41057,7 @@ func (m *TopologyCell) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25554,17 +41080,17 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReloadSchemaShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReloadSchemaShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25574,23 +41100,25 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -25614,7 +41142,7 @@ func (m *GetVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25637,17 +41165,17 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetVersionRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetVersionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -25657,82 +41185,59 @@ func (m *GetVersionRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetVersionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetVersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -25760,7 +41265,7 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -25784,7 +41289,7 @@ func (m *GetVersionResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25794,61 +41299,25 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { if shift >= 64 { return ErrIntOverflow } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetVSchemaResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VSchema", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VSchema == nil { - m.VSchema = &vschema.Keyspace{} + if iNdEx >= l { + return io.ErrUnexpectedEOF } - if err := m.VSchema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveBackupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25871,7 +41340,7 @@ func (m *GetVSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25894,10 +41363,10 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetWorkflowsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveKeyspaceCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -25933,8 +41402,40 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cell = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -25951,7 +41452,27 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { break } } - m.ActiveOnly = bool(v != 0) + m.Force = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Recursive = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -25974,7 +41495,7 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { +func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -25997,46 +41518,12 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetWorkflowsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveKeyspaceCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workflows = append(m.Workflows, &Workflow{}) - if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26059,7 +41546,7 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { +func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26082,10 +41569,10 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitShardPrimaryRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveShardCellRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitShardPrimaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveShardCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26122,7 +41609,7 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26150,13 +41637,13 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.ShardName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PrimaryElectTabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26166,27 +41653,23 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PrimaryElectTabletAlias == nil { - m.PrimaryElectTabletAlias = &topodata.TabletAlias{} - } - if err := m.PrimaryElectTabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { @@ -26209,10 +41692,10 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { } m.Force = bool(v != 0) case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26222,28 +41705,12 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} - } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Recursive = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26266,7 +41733,7 @@ func (m *InitShardPrimaryRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { +func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26289,46 +41756,12 @@ func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InitShardPrimaryResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RemoveShardCellResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InitShardPrimaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RemoveShardCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26351,7 +41784,7 @@ func (m *InitShardPrimaryResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26374,15 +41807,15 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReparentTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReparentTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26409,10 +41842,10 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26438,7 +41871,7 @@ func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26461,12 +41894,112 @@ func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ReparentTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReparentTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Primary == nil { + m.Primary = &topodata.TabletAlias{} + } + if err := m.Primary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26489,7 +42022,7 @@ func (m *PingTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *ReshardCreateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26512,15 +42045,15 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlannedReparentShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReshardCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlannedReparentShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReshardCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26548,11 +42081,11 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26580,13 +42113,13 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceShards", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26596,33 +42129,201 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} + m.SourceShards = append(m.SourceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetShards", wireType) } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.TargetShards = append(m.TargetShards, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AvoidPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipSchemaCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipSchemaCopy = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26632,33 +42333,29 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AvoidPrimary == nil { - m.AvoidPrimary = &topodata.TabletAlias{} - } - if err := m.AvoidPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.OnDdl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitReplicasTimeout", wireType) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StopAfterCopy", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26668,28 +42365,52 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + m.StopAfterCopy = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if m.WaitReplicasTimeout == nil { - m.WaitReplicasTimeout = &vttime.Duration{} + m.DeferSecondaryKeys = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoStart", wireType) } - if err := m.WaitReplicasTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.AutoStart = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -26712,7 +42433,7 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26735,17 +42456,17 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PlannedReparentShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PlannedReparentShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26755,29 +42476,33 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26787,29 +42512,33 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + if m.BackupTime == nil { + m.BackupTime = &vttime.Time{} + } + if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26819,31 +42548,47 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PromotedPrimary == nil { - m.PromotedPrimary = &topodata.TabletAlias{} - } - if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.RestoreToPos = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RestoreToTimestamp", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26870,8 +42615,10 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.RestoreToTimestamp == nil { + m.RestoreToTimestamp = &vttime.Time{} + } + if err := m.RestoreToTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26897,7 +42644,7 @@ func (m *PlannedReparentShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { +func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26920,17 +42667,17 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildKeyspaceGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -26940,27 +42687,31 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26988,133 +42739,11 @@ func (m *RebuildKeyspaceGraphRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPartial", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowPartial = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RebuildKeyspaceGraphResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildKeyspaceGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RebuildVSchemaGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildVSchemaGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27142,113 +42771,11 @@ func (m *RebuildVSchemaGraphRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RebuildVSchemaGraphResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RebuildVSchemaGraphResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RebuildVSchemaGraphResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefreshStateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27275,10 +42802,10 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Event == nil { + m.Event = &logutil.Event{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -27304,58 +42831,7 @@ func (m *RefreshStateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefreshStateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *RetrySchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27378,10 +42854,10 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateByShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RetrySchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateByShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RetrySchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27418,39 +42894,7 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27478,7 +42922,7 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -27502,7 +42946,7 @@ func (m *RefreshStateByShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *RetrySchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27525,37 +42969,17 @@ func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefreshStateByShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RetrySchemaMigrationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefreshStateByShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RetrySchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsPartialRefresh", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsPartialRefresh = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartialRefreshDetails", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -27565,23 +42989,104 @@ func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.PartialRefreshDetails = string(dAtA[iNdEx:postIndex]) + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -27605,7 +43110,7 @@ func (m *RefreshStateByShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27628,10 +43133,10 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27692,7 +43197,7 @@ func (m *ReloadSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27715,10 +43220,10 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -27743,7 +43248,7 @@ func (m *ReloadSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27766,10 +43271,10 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -27806,7 +43311,7 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -27834,47 +43339,8 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludePrimary = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) - } - m.Concurrency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27897,7 +43363,7 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27920,15 +43386,15 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27955,8 +43421,10 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -27982,7 +43450,7 @@ func (m *ReloadSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28005,10 +43473,10 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceServedFromRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceServedFromRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28044,10 +43512,10 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) } - var stringLen uint64 + m.TabletType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28057,27 +43525,14 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.TabletType |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitPosition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28105,11 +43560,11 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.WaitPosition = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -28126,12 +43581,12 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { break } } - m.IncludePrimary = bool(v != 0) + m.Remove = bool(v != 0) case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Concurrency", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) } - m.Concurrency = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28141,11 +43596,24 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28168,7 +43636,7 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28191,15 +43659,15 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReloadSchemaShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceServedFromResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReloadSchemaShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceServedFromResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28226,8 +43694,10 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -28253,7 +43723,7 @@ func (m *ReloadSchemaShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28276,10 +43746,10 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveBackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28314,43 +43784,11 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -28360,24 +43798,12 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Force = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28400,7 +43826,7 @@ func (m *RemoveBackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28423,12 +43849,48 @@ func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveBackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspace == nil { + m.Keyspace = &topodata.Keyspace{} + } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28451,7 +43913,7 @@ func (m *RemoveBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28474,10 +43936,10 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveKeyspaceCellRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveKeyspaceCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28514,7 +43976,7 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28542,31 +44004,11 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Force = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IsServing", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -28583,7 +44025,7 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Recursive = bool(v != 0) + m.IsServing = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28606,7 +44048,7 @@ func (m *RemoveKeyspaceCellRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28629,12 +44071,48 @@ func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveKeyspaceCellResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveKeyspaceCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28657,7 +44135,7 @@ func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28680,10 +44158,10 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveShardCellRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardTabletControlRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveShardCellRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardTabletControlRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -28720,7 +44198,7 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28748,11 +44226,30 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ShardName = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) + } + m.TabletType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletType |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28780,11 +44277,43 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cell = string(dAtA[iNdEx:postIndex]) + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeniedTables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeniedTables = append(m.DeniedTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DisableQueryService", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -28801,10 +44330,10 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Force = bool(v != 0) - case 5: + m.DisableQueryService = bool(v != 0) + case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -28821,7 +44350,7 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { break } } - m.Recursive = bool(v != 0) + m.Remove = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28844,7 +44373,7 @@ func (m *RemoveShardCellRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28867,12 +44396,48 @@ func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveShardCellResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetShardTabletControlResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveShardCellResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetShardTabletControlResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Shard == nil { + m.Shard = &topodata.Shard{} + } + if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28895,7 +44460,7 @@ func (m *RemoveShardCellResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28918,15 +44483,15 @@ func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReparentTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetWritableRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReparentTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetWritableRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -28950,16 +44515,87 @@ func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex < 0 { return ErrInvalidLength } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Writable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Writable = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} - } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetWritableResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetWritableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28982,7 +44618,7 @@ func (m *ReparentTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29005,10 +44641,10 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReparentTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationAddRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReparentTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -29077,7 +44713,7 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Primary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29104,10 +44740,10 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Primary == nil { - m.Primary = &topodata.TabletAlias{} + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} } - if err := m.Primary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29133,7 +44769,7 @@ func (m *ReparentTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29156,136 +44792,12 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationAddResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BackupTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BackupTime == nil { - m.BackupTime = &vttime.Time{} - } - if err := m.BackupTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestoreToPos", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestoreToPos = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DryRun = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -29308,7 +44820,7 @@ func (m *RestoreFromBackupRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29331,49 +44843,13 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RestoreFromBackupResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationFixRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RestoreFromBackupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationFixRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } @@ -29405,7 +44881,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } @@ -29437,11 +44913,11 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -29451,27 +44927,23 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Event == nil { - m.Event = &logutil.Event{} - } - if err := m.Event.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -29495,7 +44967,7 @@ func (m *RestoreFromBackupResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29518,15 +44990,15 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationFixResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationFixResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29553,10 +45025,10 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.Error == nil { + m.Error = &topodata.ShardReplicationError{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Error.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29582,58 +45054,7 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29656,10 +45077,10 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationPositionsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -29696,7 +45117,7 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29724,7 +45145,7 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -29748,7 +45169,7 @@ func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29771,15 +45192,144 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationPositionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReplicationStatuses == nil { + m.ReplicationStatuses = make(map[string]*replicationdata.Status) + } + var mapkey string + var mapvalue *replicationdata.Status + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &replicationdata.Status{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ReplicationStatuses[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletMap", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29796,22 +45346,115 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletMap == nil { + m.TabletMap = make(map[string]*topodata.Tablet) + } + var mapkey string + var mapvalue *topodata.Tablet + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.Tablet{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TabletMap[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -29835,7 +45478,7 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29858,10 +45501,10 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationRemoveRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -29897,27 +45540,8 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29945,33 +45569,13 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Remove = bool(v != 0) - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -29981,23 +45585,27 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -30021,7 +45629,7 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { +func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30044,48 +45652,12 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ShardReplicationRemoveResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ShardReplicationRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30108,7 +45680,7 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30131,17 +45703,17 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SleepTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SleepTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30151,29 +45723,33 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30183,12 +45759,28 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Force = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Duration == nil { + m.Duration = &vttime.Duration{} + } + if err := m.Duration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30211,7 +45803,7 @@ func (m *SetKeyspaceShardingInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30234,48 +45826,12 @@ func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SleepTabletResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceShardingInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SleepTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30298,7 +45854,7 @@ func (m *SetKeyspaceShardingInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30321,10 +45877,10 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardAddRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardIsPrimaryServingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30393,9 +45949,9 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsServing", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) } - var v int + m.Uid = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30405,12 +45961,143 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Uid |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.IsServing = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceShard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyRange == nil { + m.KeyRange = &topodata.KeyRange{} + } + if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30433,7 +46120,7 @@ func (m *SetShardIsPrimaryServingRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30456,10 +46143,10 @@ func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardAddResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardIsPrimaryServingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30520,7 +46207,7 @@ func (m *SetShardIsPrimaryServingResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30543,10 +46230,10 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardTabletControlRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardTabletControlRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30611,116 +46298,13 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeniedTables", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeniedTables = append(m.DeniedTables, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisableQueryService", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DisableQueryService = bool(v != 0) - case 7: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) } - var v int + m.Uid = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -30730,12 +46314,11 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Uid |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Remove = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30758,7 +46341,7 @@ func (m *SetShardTabletControlRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { +func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30781,10 +46364,10 @@ func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetShardTabletControlResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SourceShardDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetShardTabletControlResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceShardDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30845,7 +46428,7 @@ func (m *SetShardTabletControlResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30868,10 +46451,10 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetWritableRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetWritableRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30910,26 +46493,6 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Writable = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30952,7 +46515,7 @@ func (m *SetWritableRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { +func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30975,10 +46538,10 @@ func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetWritableResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetWritableResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -31003,7 +46566,7 @@ func (m *SetWritableResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31026,77 +46589,13 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationAddRequest: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) } @@ -31154,7 +46653,7 @@ func (m *ShardReplicationAddRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { +func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31177,10 +46676,10 @@ func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationAddResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -31205,7 +46704,7 @@ func (m *ShardReplicationAddResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { +func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31228,49 +46727,17 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationFixRequest: wiretype end group for non-group") + return fmt.Errorf("proto: TabletExternallyReparentedRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationFixRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TabletExternallyReparentedRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31280,55 +46747,27 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cell", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Cell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -31352,7 +46791,7 @@ func (m *ShardReplicationFixRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { +func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31375,17 +46814,17 @@ func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationFixResponse: wiretype end group for non-group") + return fmt.Errorf("proto: TabletExternallyReparentedResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationFixResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TabletExternallyReparentedResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31395,82 +46834,27 @@ func (m *ShardReplicationFixResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Error == nil { - m.Error = &topodata.ShardReplicationError{} - } - if err := m.Error.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationPositionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationPositionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31498,13 +46882,13 @@ func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -31514,78 +46898,31 @@ func (m *ShardReplicationPositionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.NewPrimary == nil { + m.NewPrimary = &topodata.TabletAlias{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationPositionsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationPositionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReplicationStatuses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OldPrimary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31612,109 +46949,99 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReplicationStatuses == nil { - m.ReplicationStatuses = make(map[string]*replicationdata.Status) + if m.OldPrimary == nil { + m.OldPrimary = &topodata.TabletAlias{} } - var mapkey string - var mapvalue *replicationdata.Status - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &replicationdata.Status{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if err := m.OldPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateCellInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.ReplicationStatuses[mapkey] = mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletMap", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31741,105 +47068,12 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletMap == nil { - m.TabletMap = make(map[string]*topodata.Tablet) + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} } - var mapkey string - var mapvalue *topodata.Tablet - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.Tablet{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.TabletMap[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -31863,7 +47097,7 @@ func (m *ShardReplicationPositionsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31886,15 +47120,15 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationRemoveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellInfoResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31922,43 +47156,11 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31985,10 +47187,10 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32014,7 +47216,7 @@ func (m *ShardReplicationRemoveRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32037,12 +47239,80 @@ func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ShardReplicationRemoveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellsAliasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ShardReplicationRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellsAlias == nil { + m.CellsAlias = &topodata.CellsAlias{} + } + if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32065,7 +47335,7 @@ func (m *ShardReplicationRemoveResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32088,17 +47358,17 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateCellsAliasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32108,31 +47378,27 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32159,10 +47425,10 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Duration == nil { - m.Duration = &vttime.Duration{} + if m.CellsAlias == nil { + m.CellsAlias = &topodata.CellsAlias{} } - if err := m.Duration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -32188,7 +47454,7 @@ func (m *SleepTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32211,12 +47477,32 @@ func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SleepTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SleepTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32239,7 +47525,7 @@ func (m *SleepTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32262,15 +47548,15 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardAddRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardAddRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32298,13 +47584,13 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByKeyspace", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32314,78 +47600,175 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + if m.ResultsByKeyspace == nil { + m.ResultsByKeyspace = make(map[string]*ValidateKeyspaceResponse) } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *ValidateKeyspaceResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= int32(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateKeyspaceResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + m.ResultsByKeyspace[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32413,49 +47796,13 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceShard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyRange", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KeyRange == nil { - m.KeyRange = &topodata.KeyRange{} - } - if err := m.KeyRange.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32465,24 +47812,12 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32505,7 +47840,7 @@ func (m *SourceShardAddRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32528,15 +47863,47 @@ func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardAddResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardAddResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32563,12 +47930,105 @@ func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32592,7 +48052,7 @@ func (m *SourceShardAddResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32615,10 +48075,10 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardDeleteRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -32655,7 +48115,7 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32683,13 +48143,13 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) } - m.Uid = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32699,11 +48159,52 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Uid |= int32(b&0x7F) << shift + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipNoPrimary", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipNoPrimary = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeVschema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.IncludeVschema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32726,7 +48227,7 @@ func (m *SourceShardDeleteRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32749,15 +48250,47 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceShardDeleteResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceShardDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32784,12 +48317,105 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Shard == nil { - m.Shard = &topodata.Shard{} + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } - if err := m.Shard.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -32813,7 +48439,7 @@ func (m *SourceShardDeleteResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32836,17 +48462,17 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StartReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32856,79 +48482,76 @@ func (m *StartReplicationRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Shard = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartReplicationResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PingTablets = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -32951,7 +48574,7 @@ func (m *StartReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32974,17 +48597,17 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -32994,27 +48617,23 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -33038,7 +48657,7 @@ func (m *StopReplicationRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33061,12 +48680,44 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopReplicationResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33089,7 +48740,7 @@ func (m *StopReplicationResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33112,15 +48763,47 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TabletExternallyReparentedRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TabletExternallyReparentedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33147,12 +48830,105 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Tablet == nil { - m.Tablet = &topodata.TabletAlias{} + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) } - if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -33176,7 +48952,7 @@ func (m *TabletExternallyReparentedRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33199,10 +48975,10 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TabletExternallyReparentedResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TabletExternallyReparentedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -33269,47 +49045,62 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NewPrimary", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.NewPrimary == nil { - m.NewPrimary = &topodata.TabletAlias{} + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if err := m.NewPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OldPrimary", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33319,27 +49110,23 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.OldPrimary == nil { - m.OldPrimary = &topodata.TabletAlias{} - } - if err := m.OldPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -33363,7 +49150,7 @@ func (m *TabletExternallyReparentedResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33386,15 +49173,15 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellInfoRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33422,13 +49209,13 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33438,28 +49225,76 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeViews = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33482,7 +49317,7 @@ func (m *UpdateCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33505,15 +49340,15 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ValidateVSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidateVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33541,11 +49376,11 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33562,22 +49397,115 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} - } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResultsByShard == nil { + m.ResultsByShard = make(map[string]*ValidateShardResponse) + } + var mapkey string + var mapvalue *ValidateShardResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ValidateShardResponse{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -33601,7 +49529,7 @@ func (m *UpdateCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33624,15 +49552,15 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellsAliasRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellsAliasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33660,13 +49588,13 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -33676,28 +49604,64 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellsAlias == nil { - m.CellsAlias = &topodata.CellsAlias{} + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) } - if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + m.KeepData = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.KeepRoutingRules = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33720,7 +49684,7 @@ func (m *UpdateCellsAliasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33743,47 +49707,15 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateCellsAliasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteResponse_TabletInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateCellsAliasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteResponse_TabletInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellsAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33810,67 +49742,16 @@ func (m *UpdateCellsAliasResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellsAlias == nil { - m.CellsAlias = &topodata.CellsAlias{} + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} } - if err := m.CellsAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -33887,7 +49768,7 @@ func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { break } } - m.PingTablets = bool(v != 0) + m.Deleted = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -33910,7 +49791,7 @@ func (m *ValidateRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowDeleteResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33933,15 +49814,15 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33969,11 +49850,11 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34000,105 +49881,10 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByKeyspace == nil { - m.ResultsByKeyspace = make(map[string]*ValidateKeyspaceResponse) - } - var mapkey string - var mapvalue *ValidateKeyspaceResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateKeyspaceResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Details = append(m.Details, &WorkflowDeleteResponse_TabletInfo{}) + if err := m.Details[len(m.Details)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ResultsByKeyspace[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -34122,7 +49908,7 @@ func (m *ValidateResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34145,10 +49931,10 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -34184,10 +49970,10 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34197,12 +49983,24 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.PingTablets = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34225,7 +50023,7 @@ func (m *ValidateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse_TableCopyState) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34248,17 +50046,17 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse_TableCopyState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse_TableCopyState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) } - var stringLen uint64 + m.RowsCopied = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34268,29 +50066,16 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.RowsCopied |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsTotal", wireType) } - var msglen int + m.RowsTotal = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34300,121 +50085,71 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.RowsTotal |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength + case 3: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsPercentage", wireType) } - if postIndex > l { + var v uint32 + if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.RowsPercentage = float32(math.Float32frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesCopied", wireType) } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + m.BytesCopied = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesCopied |= int64(b&0x7F) << shift + if b < 0x80 { + break } } - m.ResultsByShard[mapkey] = mapvalue - iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesTotal", wireType) + } + m.BytesTotal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BytesTotal |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesPercentage", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.BytesPercentage = float32(math.Float32frombits(v)) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34437,7 +50172,7 @@ func (m *ValidateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse_ShardStreamState) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34460,17 +50195,36 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreamState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreamState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34480,27 +50234,31 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceShard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34528,13 +50286,13 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + m.SourceShard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34544,17 +50302,29 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeViews = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SkipNoPrimary", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Position = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34564,17 +50334,29 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.SkipNoPrimary = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeVschema", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - var v int + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34584,12 +50366,24 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeVschema = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -34612,7 +50406,7 @@ func (m *ValidateSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowStatusResponse_ShardStreams) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34635,17 +50429,17 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateSchemaKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowStatusResponse_ShardStreams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34655,27 +50449,80 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Streams = append(m.Streams, &WorkflowStatusResponse_ShardStreamState{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStatusResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TableCopyState", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34702,11 +50549,11 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) + if m.TableCopyState == nil { + m.TableCopyState = make(map[string]*WorkflowStatusResponse_TableCopyState) } var mapkey string - var mapvalue *ValidateShardResponse + var mapvalue *WorkflowStatusResponse_TableCopyState for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -34780,7 +50627,7 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &ValidateShardResponse{} + mapvalue = &WorkflowStatusResponse_TableCopyState{} if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -34800,96 +50647,13 @@ func (m *ValidateSchemaKeyspaceResponse) UnmarshalVT(dAtA []byte) error { iNdEx += skippy } } - m.ResultsByShard[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateShardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.TableCopyState[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardStreams", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -34899,126 +50663,120 @@ func (m *ValidateShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PingTablets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PingTablets = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateShardResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + if m.ShardStreams == nil { + m.ShardStreams = make(map[string]*WorkflowStatusResponse_ShardStreams) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue *WorkflowStatusResponse_ShardStreams + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &WorkflowStatusResponse_ShardStreams{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.ShardStreams[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -35042,7 +50800,7 @@ func (m *ValidateShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowSwitchTrafficRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35065,10 +50823,10 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowSwitchTrafficRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowSwitchTrafficRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -35103,60 +50861,9 @@ func (m *ValidateVersionKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35184,13 +50891,13 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35200,29 +50907,27 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) - } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType == 0 { + var v topodata.TabletType for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35232,43 +50937,44 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + v |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - if postStringIndexmapkey > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35278,43 +50984,167 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + v |= topodata.TabletType(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + m.TabletTypes = append(m.TabletTypes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLagAllowed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxReplicationLagAllowed == nil { + m.MaxReplicationLagAllowed = &vttime.Duration{} + } + if err := m.MaxReplicationLagAllowed.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableReverseReplication", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EnableReverseReplication = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Direction", wireType) + } + m.Direction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Direction |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &vttime.Duration{} + } + if err := m.Timeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DryRun = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitializeTargetSequences", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InitializeTargetSequences = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -35337,7 +51167,7 @@ func (m *ValidateVersionKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowSwitchTrafficResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35360,15 +51190,15 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowSwitchTrafficResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowSwitchTrafficResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35396,11 +51226,11 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartState", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35428,62 +51258,43 @@ func (m *ValidateVersionShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) + m.StartState = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentState", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidateVersionShardResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVersionShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.CurrentState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DryRunResults", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35511,7 +51322,7 @@ func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.DryRunResults = append(m.DryRunResults, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -35535,7 +51346,7 @@ func (m *ValidateVersionShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowUpdateRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35558,10 +51369,10 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowUpdateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -35598,9 +51409,9 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletRequest", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35610,29 +51421,84 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + if m.TabletRequest == nil { + m.TabletRequest = &tabletmanagerdata.UpdateVReplicationWorkflowRequest{} + } + if err := m.TabletRequest.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowUpdateResponse_TabletInfo) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowUpdateResponse_TabletInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowUpdateResponse_TabletInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tablet", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -35642,27 +51508,31 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + if m.Tablet == nil { + m.Tablet = &topodata.TabletAlias{} + } + if err := m.Tablet.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Changed", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -35679,7 +51549,7 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { break } } - m.IncludeViews = bool(v != 0) + m.Changed = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -35702,7 +51572,7 @@ func (m *ValidateVSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *WorkflowUpdateResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35725,15 +51595,15 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidateVSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowUpdateResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidateVSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35761,11 +51631,11 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Results = append(m.Results, string(dAtA[iNdEx:postIndex])) + m.Summary = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultsByShard", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35792,105 +51662,10 @@ func (m *ValidateVSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResultsByShard == nil { - m.ResultsByShard = make(map[string]*ValidateShardResponse) - } - var mapkey string - var mapvalue *ValidateShardResponse - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ValidateShardResponse{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Details = append(m.Details, &WorkflowUpdateResponse_TabletInfo{}) + if err := m.Details[len(m.Details)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.ResultsByShard[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 615622a97aa..753ab4f0c2e 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtctlservice.proto @@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0x91, 0x3d, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xb7, 0x47, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -94,456 +94,539 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5d, 0x0a, - 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6c, 0x0a, + 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x43, 0x6c, + 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, + 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x57, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, - 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, - 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x22, 0x2e, + 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, - 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, - 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x41, 0x70, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, + 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, - 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, - 0x6f, 0x6b, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, + 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, - 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, - 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, - 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, + 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, + 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, - 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, - 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, - 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, + 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5b, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, + 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, + 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, + 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, + 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, - 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, - 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, - 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, - 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, - 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, - 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, + 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, + 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, + 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, + 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, + 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, + 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, + 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, + 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, + 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, - 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, - 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, - 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, - 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, - 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, + 0x69, 0x63, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, + 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, + 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_vtctlservice_proto_goTypes = []interface{}{ @@ -556,166 +639,190 @@ var file_vtctlservice_proto_goTypes = []interface{}{ (*vtctldata.ApplyVSchemaRequest)(nil), // 6: vtctldata.ApplyVSchemaRequest (*vtctldata.BackupRequest)(nil), // 7: vtctldata.BackupRequest (*vtctldata.BackupShardRequest)(nil), // 8: vtctldata.BackupShardRequest - (*vtctldata.ChangeTabletTypeRequest)(nil), // 9: vtctldata.ChangeTabletTypeRequest - (*vtctldata.CreateKeyspaceRequest)(nil), // 10: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 11: vtctldata.CreateShardRequest - (*vtctldata.DeleteCellInfoRequest)(nil), // 12: vtctldata.DeleteCellInfoRequest - (*vtctldata.DeleteCellsAliasRequest)(nil), // 13: vtctldata.DeleteCellsAliasRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 14: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 15: vtctldata.DeleteShardsRequest - (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 16: vtctldata.DeleteSrvVSchemaRequest - (*vtctldata.DeleteTabletsRequest)(nil), // 17: vtctldata.DeleteTabletsRequest - (*vtctldata.EmergencyReparentShardRequest)(nil), // 18: vtctldata.EmergencyReparentShardRequest - (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 19: vtctldata.ExecuteFetchAsAppRequest - (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 20: vtctldata.ExecuteFetchAsDBARequest - (*vtctldata.ExecuteHookRequest)(nil), // 21: vtctldata.ExecuteHookRequest - (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 22: vtctldata.FindAllShardsInKeyspaceRequest - (*vtctldata.GetBackupsRequest)(nil), // 23: vtctldata.GetBackupsRequest - (*vtctldata.GetCellInfoRequest)(nil), // 24: vtctldata.GetCellInfoRequest - (*vtctldata.GetCellInfoNamesRequest)(nil), // 25: vtctldata.GetCellInfoNamesRequest - (*vtctldata.GetCellsAliasesRequest)(nil), // 26: vtctldata.GetCellsAliasesRequest - (*vtctldata.GetFullStatusRequest)(nil), // 27: vtctldata.GetFullStatusRequest - (*vtctldata.GetKeyspaceRequest)(nil), // 28: vtctldata.GetKeyspaceRequest - (*vtctldata.GetKeyspacesRequest)(nil), // 29: vtctldata.GetKeyspacesRequest - (*vtctldata.GetPermissionsRequest)(nil), // 30: vtctldata.GetPermissionsRequest - (*vtctldata.GetRoutingRulesRequest)(nil), // 31: vtctldata.GetRoutingRulesRequest - (*vtctldata.GetSchemaRequest)(nil), // 32: vtctldata.GetSchemaRequest - (*vtctldata.GetShardRequest)(nil), // 33: vtctldata.GetShardRequest - (*vtctldata.GetShardRoutingRulesRequest)(nil), // 34: vtctldata.GetShardRoutingRulesRequest - (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 35: vtctldata.GetSrvKeyspaceNamesRequest - (*vtctldata.GetSrvKeyspacesRequest)(nil), // 36: vtctldata.GetSrvKeyspacesRequest - (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 37: vtctldata.UpdateThrottlerConfigRequest - (*vtctldata.GetSrvVSchemaRequest)(nil), // 38: vtctldata.GetSrvVSchemaRequest - (*vtctldata.GetSrvVSchemasRequest)(nil), // 39: vtctldata.GetSrvVSchemasRequest - (*vtctldata.GetTabletRequest)(nil), // 40: vtctldata.GetTabletRequest - (*vtctldata.GetTabletsRequest)(nil), // 41: vtctldata.GetTabletsRequest - (*vtctldata.GetTopologyPathRequest)(nil), // 42: vtctldata.GetTopologyPathRequest - (*vtctldata.GetVersionRequest)(nil), // 43: vtctldata.GetVersionRequest - (*vtctldata.GetVSchemaRequest)(nil), // 44: vtctldata.GetVSchemaRequest - (*vtctldata.GetWorkflowsRequest)(nil), // 45: vtctldata.GetWorkflowsRequest - (*vtctldata.InitShardPrimaryRequest)(nil), // 46: vtctldata.InitShardPrimaryRequest - (*vtctldata.PingTabletRequest)(nil), // 47: vtctldata.PingTabletRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 48: vtctldata.PlannedReparentShardRequest - (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 49: vtctldata.RebuildKeyspaceGraphRequest - (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 50: vtctldata.RebuildVSchemaGraphRequest - (*vtctldata.RefreshStateRequest)(nil), // 51: vtctldata.RefreshStateRequest - (*vtctldata.RefreshStateByShardRequest)(nil), // 52: vtctldata.RefreshStateByShardRequest - (*vtctldata.ReloadSchemaRequest)(nil), // 53: vtctldata.ReloadSchemaRequest - (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 54: vtctldata.ReloadSchemaKeyspaceRequest - (*vtctldata.ReloadSchemaShardRequest)(nil), // 55: vtctldata.ReloadSchemaShardRequest - (*vtctldata.RemoveBackupRequest)(nil), // 56: vtctldata.RemoveBackupRequest - (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 57: vtctldata.RemoveKeyspaceCellRequest - (*vtctldata.RemoveShardCellRequest)(nil), // 58: vtctldata.RemoveShardCellRequest - (*vtctldata.ReparentTabletRequest)(nil), // 59: vtctldata.ReparentTabletRequest - (*vtctldata.RestoreFromBackupRequest)(nil), // 60: vtctldata.RestoreFromBackupRequest - (*vtctldata.RunHealthCheckRequest)(nil), // 61: vtctldata.RunHealthCheckRequest - (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 62: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 63: vtctldata.SetShardIsPrimaryServingRequest - (*vtctldata.SetShardTabletControlRequest)(nil), // 64: vtctldata.SetShardTabletControlRequest - (*vtctldata.SetWritableRequest)(nil), // 65: vtctldata.SetWritableRequest - (*vtctldata.ShardReplicationAddRequest)(nil), // 66: vtctldata.ShardReplicationAddRequest - (*vtctldata.ShardReplicationFixRequest)(nil), // 67: vtctldata.ShardReplicationFixRequest - (*vtctldata.ShardReplicationPositionsRequest)(nil), // 68: vtctldata.ShardReplicationPositionsRequest - (*vtctldata.ShardReplicationRemoveRequest)(nil), // 69: vtctldata.ShardReplicationRemoveRequest - (*vtctldata.SleepTabletRequest)(nil), // 70: vtctldata.SleepTabletRequest - (*vtctldata.SourceShardAddRequest)(nil), // 71: vtctldata.SourceShardAddRequest - (*vtctldata.SourceShardDeleteRequest)(nil), // 72: vtctldata.SourceShardDeleteRequest - (*vtctldata.StartReplicationRequest)(nil), // 73: vtctldata.StartReplicationRequest - (*vtctldata.StopReplicationRequest)(nil), // 74: vtctldata.StopReplicationRequest - (*vtctldata.TabletExternallyReparentedRequest)(nil), // 75: vtctldata.TabletExternallyReparentedRequest - (*vtctldata.UpdateCellInfoRequest)(nil), // 76: vtctldata.UpdateCellInfoRequest - (*vtctldata.UpdateCellsAliasRequest)(nil), // 77: vtctldata.UpdateCellsAliasRequest - (*vtctldata.ValidateRequest)(nil), // 78: vtctldata.ValidateRequest - (*vtctldata.ValidateKeyspaceRequest)(nil), // 79: vtctldata.ValidateKeyspaceRequest - (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 80: vtctldata.ValidateSchemaKeyspaceRequest - (*vtctldata.ValidateShardRequest)(nil), // 81: vtctldata.ValidateShardRequest - (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 82: vtctldata.ValidateVersionKeyspaceRequest - (*vtctldata.ValidateVersionShardRequest)(nil), // 83: vtctldata.ValidateVersionShardRequest - (*vtctldata.ValidateVSchemaRequest)(nil), // 84: vtctldata.ValidateVSchemaRequest - (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 85: vtctldata.ExecuteVtctlCommandResponse - (*vtctldata.AddCellInfoResponse)(nil), // 86: vtctldata.AddCellInfoResponse - (*vtctldata.AddCellsAliasResponse)(nil), // 87: vtctldata.AddCellsAliasResponse - (*vtctldata.ApplyRoutingRulesResponse)(nil), // 88: vtctldata.ApplyRoutingRulesResponse - (*vtctldata.ApplySchemaResponse)(nil), // 89: vtctldata.ApplySchemaResponse - (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 90: vtctldata.ApplyShardRoutingRulesResponse - (*vtctldata.ApplyVSchemaResponse)(nil), // 91: vtctldata.ApplyVSchemaResponse - (*vtctldata.BackupResponse)(nil), // 92: vtctldata.BackupResponse - (*vtctldata.ChangeTabletTypeResponse)(nil), // 93: vtctldata.ChangeTabletTypeResponse - (*vtctldata.CreateKeyspaceResponse)(nil), // 94: vtctldata.CreateKeyspaceResponse - (*vtctldata.CreateShardResponse)(nil), // 95: vtctldata.CreateShardResponse - (*vtctldata.DeleteCellInfoResponse)(nil), // 96: vtctldata.DeleteCellInfoResponse - (*vtctldata.DeleteCellsAliasResponse)(nil), // 97: vtctldata.DeleteCellsAliasResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 98: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 99: vtctldata.DeleteShardsResponse - (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 100: vtctldata.DeleteSrvVSchemaResponse - (*vtctldata.DeleteTabletsResponse)(nil), // 101: vtctldata.DeleteTabletsResponse - (*vtctldata.EmergencyReparentShardResponse)(nil), // 102: vtctldata.EmergencyReparentShardResponse - (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 103: vtctldata.ExecuteFetchAsAppResponse - (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 104: vtctldata.ExecuteFetchAsDBAResponse - (*vtctldata.ExecuteHookResponse)(nil), // 105: vtctldata.ExecuteHookResponse - (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 106: vtctldata.FindAllShardsInKeyspaceResponse - (*vtctldata.GetBackupsResponse)(nil), // 107: vtctldata.GetBackupsResponse - (*vtctldata.GetCellInfoResponse)(nil), // 108: vtctldata.GetCellInfoResponse - (*vtctldata.GetCellInfoNamesResponse)(nil), // 109: vtctldata.GetCellInfoNamesResponse - (*vtctldata.GetCellsAliasesResponse)(nil), // 110: vtctldata.GetCellsAliasesResponse - (*vtctldata.GetFullStatusResponse)(nil), // 111: vtctldata.GetFullStatusResponse - (*vtctldata.GetKeyspaceResponse)(nil), // 112: vtctldata.GetKeyspaceResponse - (*vtctldata.GetKeyspacesResponse)(nil), // 113: vtctldata.GetKeyspacesResponse - (*vtctldata.GetPermissionsResponse)(nil), // 114: vtctldata.GetPermissionsResponse - (*vtctldata.GetRoutingRulesResponse)(nil), // 115: vtctldata.GetRoutingRulesResponse - (*vtctldata.GetSchemaResponse)(nil), // 116: vtctldata.GetSchemaResponse - (*vtctldata.GetShardResponse)(nil), // 117: vtctldata.GetShardResponse - (*vtctldata.GetShardRoutingRulesResponse)(nil), // 118: vtctldata.GetShardRoutingRulesResponse - (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 119: vtctldata.GetSrvKeyspaceNamesResponse - (*vtctldata.GetSrvKeyspacesResponse)(nil), // 120: vtctldata.GetSrvKeyspacesResponse - (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 121: vtctldata.UpdateThrottlerConfigResponse - (*vtctldata.GetSrvVSchemaResponse)(nil), // 122: vtctldata.GetSrvVSchemaResponse - (*vtctldata.GetSrvVSchemasResponse)(nil), // 123: vtctldata.GetSrvVSchemasResponse - (*vtctldata.GetTabletResponse)(nil), // 124: vtctldata.GetTabletResponse - (*vtctldata.GetTabletsResponse)(nil), // 125: vtctldata.GetTabletsResponse - (*vtctldata.GetTopologyPathResponse)(nil), // 126: vtctldata.GetTopologyPathResponse - (*vtctldata.GetVersionResponse)(nil), // 127: vtctldata.GetVersionResponse - (*vtctldata.GetVSchemaResponse)(nil), // 128: vtctldata.GetVSchemaResponse - (*vtctldata.GetWorkflowsResponse)(nil), // 129: vtctldata.GetWorkflowsResponse - (*vtctldata.InitShardPrimaryResponse)(nil), // 130: vtctldata.InitShardPrimaryResponse - (*vtctldata.PingTabletResponse)(nil), // 131: vtctldata.PingTabletResponse - (*vtctldata.PlannedReparentShardResponse)(nil), // 132: vtctldata.PlannedReparentShardResponse - (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 133: vtctldata.RebuildKeyspaceGraphResponse - (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 134: vtctldata.RebuildVSchemaGraphResponse - (*vtctldata.RefreshStateResponse)(nil), // 135: vtctldata.RefreshStateResponse - (*vtctldata.RefreshStateByShardResponse)(nil), // 136: vtctldata.RefreshStateByShardResponse - (*vtctldata.ReloadSchemaResponse)(nil), // 137: vtctldata.ReloadSchemaResponse - (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 138: vtctldata.ReloadSchemaKeyspaceResponse - (*vtctldata.ReloadSchemaShardResponse)(nil), // 139: vtctldata.ReloadSchemaShardResponse - (*vtctldata.RemoveBackupResponse)(nil), // 140: vtctldata.RemoveBackupResponse - (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 141: vtctldata.RemoveKeyspaceCellResponse - (*vtctldata.RemoveShardCellResponse)(nil), // 142: vtctldata.RemoveShardCellResponse - (*vtctldata.ReparentTabletResponse)(nil), // 143: vtctldata.ReparentTabletResponse - (*vtctldata.RestoreFromBackupResponse)(nil), // 144: vtctldata.RestoreFromBackupResponse - (*vtctldata.RunHealthCheckResponse)(nil), // 145: vtctldata.RunHealthCheckResponse - (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 146: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 147: vtctldata.SetShardIsPrimaryServingResponse - (*vtctldata.SetShardTabletControlResponse)(nil), // 148: vtctldata.SetShardTabletControlResponse - (*vtctldata.SetWritableResponse)(nil), // 149: vtctldata.SetWritableResponse - (*vtctldata.ShardReplicationAddResponse)(nil), // 150: vtctldata.ShardReplicationAddResponse - (*vtctldata.ShardReplicationFixResponse)(nil), // 151: vtctldata.ShardReplicationFixResponse - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 152: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.ShardReplicationRemoveResponse)(nil), // 153: vtctldata.ShardReplicationRemoveResponse - (*vtctldata.SleepTabletResponse)(nil), // 154: vtctldata.SleepTabletResponse - (*vtctldata.SourceShardAddResponse)(nil), // 155: vtctldata.SourceShardAddResponse - (*vtctldata.SourceShardDeleteResponse)(nil), // 156: vtctldata.SourceShardDeleteResponse - (*vtctldata.StartReplicationResponse)(nil), // 157: vtctldata.StartReplicationResponse - (*vtctldata.StopReplicationResponse)(nil), // 158: vtctldata.StopReplicationResponse - (*vtctldata.TabletExternallyReparentedResponse)(nil), // 159: vtctldata.TabletExternallyReparentedResponse - (*vtctldata.UpdateCellInfoResponse)(nil), // 160: vtctldata.UpdateCellInfoResponse - (*vtctldata.UpdateCellsAliasResponse)(nil), // 161: vtctldata.UpdateCellsAliasResponse - (*vtctldata.ValidateResponse)(nil), // 162: vtctldata.ValidateResponse - (*vtctldata.ValidateKeyspaceResponse)(nil), // 163: vtctldata.ValidateKeyspaceResponse - (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 164: vtctldata.ValidateSchemaKeyspaceResponse - (*vtctldata.ValidateShardResponse)(nil), // 165: vtctldata.ValidateShardResponse - (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 166: vtctldata.ValidateVersionKeyspaceResponse - (*vtctldata.ValidateVersionShardResponse)(nil), // 167: vtctldata.ValidateVersionShardResponse - (*vtctldata.ValidateVSchemaResponse)(nil), // 168: vtctldata.ValidateVSchemaResponse + (*vtctldata.CancelSchemaMigrationRequest)(nil), // 9: vtctldata.CancelSchemaMigrationRequest + (*vtctldata.ChangeTabletTypeRequest)(nil), // 10: vtctldata.ChangeTabletTypeRequest + (*vtctldata.CleanupSchemaMigrationRequest)(nil), // 11: vtctldata.CleanupSchemaMigrationRequest + (*vtctldata.CompleteSchemaMigrationRequest)(nil), // 12: vtctldata.CompleteSchemaMigrationRequest + (*vtctldata.CreateKeyspaceRequest)(nil), // 13: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 14: vtctldata.CreateShardRequest + (*vtctldata.DeleteCellInfoRequest)(nil), // 15: vtctldata.DeleteCellInfoRequest + (*vtctldata.DeleteCellsAliasRequest)(nil), // 16: vtctldata.DeleteCellsAliasRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 17: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 18: vtctldata.DeleteShardsRequest + (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 19: vtctldata.DeleteSrvVSchemaRequest + (*vtctldata.DeleteTabletsRequest)(nil), // 20: vtctldata.DeleteTabletsRequest + (*vtctldata.EmergencyReparentShardRequest)(nil), // 21: vtctldata.EmergencyReparentShardRequest + (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 22: vtctldata.ExecuteFetchAsAppRequest + (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 23: vtctldata.ExecuteFetchAsDBARequest + (*vtctldata.ExecuteHookRequest)(nil), // 24: vtctldata.ExecuteHookRequest + (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 25: vtctldata.FindAllShardsInKeyspaceRequest + (*vtctldata.GetBackupsRequest)(nil), // 26: vtctldata.GetBackupsRequest + (*vtctldata.GetCellInfoRequest)(nil), // 27: vtctldata.GetCellInfoRequest + (*vtctldata.GetCellInfoNamesRequest)(nil), // 28: vtctldata.GetCellInfoNamesRequest + (*vtctldata.GetCellsAliasesRequest)(nil), // 29: vtctldata.GetCellsAliasesRequest + (*vtctldata.GetFullStatusRequest)(nil), // 30: vtctldata.GetFullStatusRequest + (*vtctldata.GetKeyspaceRequest)(nil), // 31: vtctldata.GetKeyspaceRequest + (*vtctldata.GetKeyspacesRequest)(nil), // 32: vtctldata.GetKeyspacesRequest + (*vtctldata.GetPermissionsRequest)(nil), // 33: vtctldata.GetPermissionsRequest + (*vtctldata.GetRoutingRulesRequest)(nil), // 34: vtctldata.GetRoutingRulesRequest + (*vtctldata.GetSchemaRequest)(nil), // 35: vtctldata.GetSchemaRequest + (*vtctldata.GetSchemaMigrationsRequest)(nil), // 36: vtctldata.GetSchemaMigrationsRequest + (*vtctldata.GetShardRequest)(nil), // 37: vtctldata.GetShardRequest + (*vtctldata.GetShardRoutingRulesRequest)(nil), // 38: vtctldata.GetShardRoutingRulesRequest + (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 39: vtctldata.GetSrvKeyspaceNamesRequest + (*vtctldata.GetSrvKeyspacesRequest)(nil), // 40: vtctldata.GetSrvKeyspacesRequest + (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 41: vtctldata.UpdateThrottlerConfigRequest + (*vtctldata.GetSrvVSchemaRequest)(nil), // 42: vtctldata.GetSrvVSchemaRequest + (*vtctldata.GetSrvVSchemasRequest)(nil), // 43: vtctldata.GetSrvVSchemasRequest + (*vtctldata.GetTabletRequest)(nil), // 44: vtctldata.GetTabletRequest + (*vtctldata.GetTabletsRequest)(nil), // 45: vtctldata.GetTabletsRequest + (*vtctldata.GetTopologyPathRequest)(nil), // 46: vtctldata.GetTopologyPathRequest + (*vtctldata.GetVersionRequest)(nil), // 47: vtctldata.GetVersionRequest + (*vtctldata.GetVSchemaRequest)(nil), // 48: vtctldata.GetVSchemaRequest + (*vtctldata.GetWorkflowsRequest)(nil), // 49: vtctldata.GetWorkflowsRequest + (*vtctldata.InitShardPrimaryRequest)(nil), // 50: vtctldata.InitShardPrimaryRequest + (*vtctldata.LaunchSchemaMigrationRequest)(nil), // 51: vtctldata.LaunchSchemaMigrationRequest + (*vtctldata.MoveTablesCreateRequest)(nil), // 52: vtctldata.MoveTablesCreateRequest + (*vtctldata.MoveTablesCompleteRequest)(nil), // 53: vtctldata.MoveTablesCompleteRequest + (*vtctldata.PingTabletRequest)(nil), // 54: vtctldata.PingTabletRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 55: vtctldata.PlannedReparentShardRequest + (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 56: vtctldata.RebuildKeyspaceGraphRequest + (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 57: vtctldata.RebuildVSchemaGraphRequest + (*vtctldata.RefreshStateRequest)(nil), // 58: vtctldata.RefreshStateRequest + (*vtctldata.RefreshStateByShardRequest)(nil), // 59: vtctldata.RefreshStateByShardRequest + (*vtctldata.ReloadSchemaRequest)(nil), // 60: vtctldata.ReloadSchemaRequest + (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 61: vtctldata.ReloadSchemaKeyspaceRequest + (*vtctldata.ReloadSchemaShardRequest)(nil), // 62: vtctldata.ReloadSchemaShardRequest + (*vtctldata.RemoveBackupRequest)(nil), // 63: vtctldata.RemoveBackupRequest + (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 64: vtctldata.RemoveKeyspaceCellRequest + (*vtctldata.RemoveShardCellRequest)(nil), // 65: vtctldata.RemoveShardCellRequest + (*vtctldata.ReparentTabletRequest)(nil), // 66: vtctldata.ReparentTabletRequest + (*vtctldata.ReshardCreateRequest)(nil), // 67: vtctldata.ReshardCreateRequest + (*vtctldata.RestoreFromBackupRequest)(nil), // 68: vtctldata.RestoreFromBackupRequest + (*vtctldata.RetrySchemaMigrationRequest)(nil), // 69: vtctldata.RetrySchemaMigrationRequest + (*vtctldata.RunHealthCheckRequest)(nil), // 70: vtctldata.RunHealthCheckRequest + (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 71: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 72: vtctldata.SetShardIsPrimaryServingRequest + (*vtctldata.SetShardTabletControlRequest)(nil), // 73: vtctldata.SetShardTabletControlRequest + (*vtctldata.SetWritableRequest)(nil), // 74: vtctldata.SetWritableRequest + (*vtctldata.ShardReplicationAddRequest)(nil), // 75: vtctldata.ShardReplicationAddRequest + (*vtctldata.ShardReplicationFixRequest)(nil), // 76: vtctldata.ShardReplicationFixRequest + (*vtctldata.ShardReplicationPositionsRequest)(nil), // 77: vtctldata.ShardReplicationPositionsRequest + (*vtctldata.ShardReplicationRemoveRequest)(nil), // 78: vtctldata.ShardReplicationRemoveRequest + (*vtctldata.SleepTabletRequest)(nil), // 79: vtctldata.SleepTabletRequest + (*vtctldata.SourceShardAddRequest)(nil), // 80: vtctldata.SourceShardAddRequest + (*vtctldata.SourceShardDeleteRequest)(nil), // 81: vtctldata.SourceShardDeleteRequest + (*vtctldata.StartReplicationRequest)(nil), // 82: vtctldata.StartReplicationRequest + (*vtctldata.StopReplicationRequest)(nil), // 83: vtctldata.StopReplicationRequest + (*vtctldata.TabletExternallyReparentedRequest)(nil), // 84: vtctldata.TabletExternallyReparentedRequest + (*vtctldata.UpdateCellInfoRequest)(nil), // 85: vtctldata.UpdateCellInfoRequest + (*vtctldata.UpdateCellsAliasRequest)(nil), // 86: vtctldata.UpdateCellsAliasRequest + (*vtctldata.ValidateRequest)(nil), // 87: vtctldata.ValidateRequest + (*vtctldata.ValidateKeyspaceRequest)(nil), // 88: vtctldata.ValidateKeyspaceRequest + (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 89: vtctldata.ValidateSchemaKeyspaceRequest + (*vtctldata.ValidateShardRequest)(nil), // 90: vtctldata.ValidateShardRequest + (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 91: vtctldata.ValidateVersionKeyspaceRequest + (*vtctldata.ValidateVersionShardRequest)(nil), // 92: vtctldata.ValidateVersionShardRequest + (*vtctldata.ValidateVSchemaRequest)(nil), // 93: vtctldata.ValidateVSchemaRequest + (*vtctldata.WorkflowDeleteRequest)(nil), // 94: vtctldata.WorkflowDeleteRequest + (*vtctldata.WorkflowStatusRequest)(nil), // 95: vtctldata.WorkflowStatusRequest + (*vtctldata.WorkflowSwitchTrafficRequest)(nil), // 96: vtctldata.WorkflowSwitchTrafficRequest + (*vtctldata.WorkflowUpdateRequest)(nil), // 97: vtctldata.WorkflowUpdateRequest + (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 98: vtctldata.ExecuteVtctlCommandResponse + (*vtctldata.AddCellInfoResponse)(nil), // 99: vtctldata.AddCellInfoResponse + (*vtctldata.AddCellsAliasResponse)(nil), // 100: vtctldata.AddCellsAliasResponse + (*vtctldata.ApplyRoutingRulesResponse)(nil), // 101: vtctldata.ApplyRoutingRulesResponse + (*vtctldata.ApplySchemaResponse)(nil), // 102: vtctldata.ApplySchemaResponse + (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 103: vtctldata.ApplyShardRoutingRulesResponse + (*vtctldata.ApplyVSchemaResponse)(nil), // 104: vtctldata.ApplyVSchemaResponse + (*vtctldata.BackupResponse)(nil), // 105: vtctldata.BackupResponse + (*vtctldata.CancelSchemaMigrationResponse)(nil), // 106: vtctldata.CancelSchemaMigrationResponse + (*vtctldata.ChangeTabletTypeResponse)(nil), // 107: vtctldata.ChangeTabletTypeResponse + (*vtctldata.CleanupSchemaMigrationResponse)(nil), // 108: vtctldata.CleanupSchemaMigrationResponse + (*vtctldata.CompleteSchemaMigrationResponse)(nil), // 109: vtctldata.CompleteSchemaMigrationResponse + (*vtctldata.CreateKeyspaceResponse)(nil), // 110: vtctldata.CreateKeyspaceResponse + (*vtctldata.CreateShardResponse)(nil), // 111: vtctldata.CreateShardResponse + (*vtctldata.DeleteCellInfoResponse)(nil), // 112: vtctldata.DeleteCellInfoResponse + (*vtctldata.DeleteCellsAliasResponse)(nil), // 113: vtctldata.DeleteCellsAliasResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 114: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 115: vtctldata.DeleteShardsResponse + (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 116: vtctldata.DeleteSrvVSchemaResponse + (*vtctldata.DeleteTabletsResponse)(nil), // 117: vtctldata.DeleteTabletsResponse + (*vtctldata.EmergencyReparentShardResponse)(nil), // 118: vtctldata.EmergencyReparentShardResponse + (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 119: vtctldata.ExecuteFetchAsAppResponse + (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 120: vtctldata.ExecuteFetchAsDBAResponse + (*vtctldata.ExecuteHookResponse)(nil), // 121: vtctldata.ExecuteHookResponse + (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 122: vtctldata.FindAllShardsInKeyspaceResponse + (*vtctldata.GetBackupsResponse)(nil), // 123: vtctldata.GetBackupsResponse + (*vtctldata.GetCellInfoResponse)(nil), // 124: vtctldata.GetCellInfoResponse + (*vtctldata.GetCellInfoNamesResponse)(nil), // 125: vtctldata.GetCellInfoNamesResponse + (*vtctldata.GetCellsAliasesResponse)(nil), // 126: vtctldata.GetCellsAliasesResponse + (*vtctldata.GetFullStatusResponse)(nil), // 127: vtctldata.GetFullStatusResponse + (*vtctldata.GetKeyspaceResponse)(nil), // 128: vtctldata.GetKeyspaceResponse + (*vtctldata.GetKeyspacesResponse)(nil), // 129: vtctldata.GetKeyspacesResponse + (*vtctldata.GetPermissionsResponse)(nil), // 130: vtctldata.GetPermissionsResponse + (*vtctldata.GetRoutingRulesResponse)(nil), // 131: vtctldata.GetRoutingRulesResponse + (*vtctldata.GetSchemaResponse)(nil), // 132: vtctldata.GetSchemaResponse + (*vtctldata.GetSchemaMigrationsResponse)(nil), // 133: vtctldata.GetSchemaMigrationsResponse + (*vtctldata.GetShardResponse)(nil), // 134: vtctldata.GetShardResponse + (*vtctldata.GetShardRoutingRulesResponse)(nil), // 135: vtctldata.GetShardRoutingRulesResponse + (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 136: vtctldata.GetSrvKeyspaceNamesResponse + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 137: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 138: vtctldata.UpdateThrottlerConfigResponse + (*vtctldata.GetSrvVSchemaResponse)(nil), // 139: vtctldata.GetSrvVSchemaResponse + (*vtctldata.GetSrvVSchemasResponse)(nil), // 140: vtctldata.GetSrvVSchemasResponse + (*vtctldata.GetTabletResponse)(nil), // 141: vtctldata.GetTabletResponse + (*vtctldata.GetTabletsResponse)(nil), // 142: vtctldata.GetTabletsResponse + (*vtctldata.GetTopologyPathResponse)(nil), // 143: vtctldata.GetTopologyPathResponse + (*vtctldata.GetVersionResponse)(nil), // 144: vtctldata.GetVersionResponse + (*vtctldata.GetVSchemaResponse)(nil), // 145: vtctldata.GetVSchemaResponse + (*vtctldata.GetWorkflowsResponse)(nil), // 146: vtctldata.GetWorkflowsResponse + (*vtctldata.InitShardPrimaryResponse)(nil), // 147: vtctldata.InitShardPrimaryResponse + (*vtctldata.LaunchSchemaMigrationResponse)(nil), // 148: vtctldata.LaunchSchemaMigrationResponse + (*vtctldata.WorkflowStatusResponse)(nil), // 149: vtctldata.WorkflowStatusResponse + (*vtctldata.MoveTablesCompleteResponse)(nil), // 150: vtctldata.MoveTablesCompleteResponse + (*vtctldata.PingTabletResponse)(nil), // 151: vtctldata.PingTabletResponse + (*vtctldata.PlannedReparentShardResponse)(nil), // 152: vtctldata.PlannedReparentShardResponse + (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 153: vtctldata.RebuildKeyspaceGraphResponse + (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 154: vtctldata.RebuildVSchemaGraphResponse + (*vtctldata.RefreshStateResponse)(nil), // 155: vtctldata.RefreshStateResponse + (*vtctldata.RefreshStateByShardResponse)(nil), // 156: vtctldata.RefreshStateByShardResponse + (*vtctldata.ReloadSchemaResponse)(nil), // 157: vtctldata.ReloadSchemaResponse + (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 158: vtctldata.ReloadSchemaKeyspaceResponse + (*vtctldata.ReloadSchemaShardResponse)(nil), // 159: vtctldata.ReloadSchemaShardResponse + (*vtctldata.RemoveBackupResponse)(nil), // 160: vtctldata.RemoveBackupResponse + (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 161: vtctldata.RemoveKeyspaceCellResponse + (*vtctldata.RemoveShardCellResponse)(nil), // 162: vtctldata.RemoveShardCellResponse + (*vtctldata.ReparentTabletResponse)(nil), // 163: vtctldata.ReparentTabletResponse + (*vtctldata.RestoreFromBackupResponse)(nil), // 164: vtctldata.RestoreFromBackupResponse + (*vtctldata.RetrySchemaMigrationResponse)(nil), // 165: vtctldata.RetrySchemaMigrationResponse + (*vtctldata.RunHealthCheckResponse)(nil), // 166: vtctldata.RunHealthCheckResponse + (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 167: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 168: vtctldata.SetShardIsPrimaryServingResponse + (*vtctldata.SetShardTabletControlResponse)(nil), // 169: vtctldata.SetShardTabletControlResponse + (*vtctldata.SetWritableResponse)(nil), // 170: vtctldata.SetWritableResponse + (*vtctldata.ShardReplicationAddResponse)(nil), // 171: vtctldata.ShardReplicationAddResponse + (*vtctldata.ShardReplicationFixResponse)(nil), // 172: vtctldata.ShardReplicationFixResponse + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 173: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.ShardReplicationRemoveResponse)(nil), // 174: vtctldata.ShardReplicationRemoveResponse + (*vtctldata.SleepTabletResponse)(nil), // 175: vtctldata.SleepTabletResponse + (*vtctldata.SourceShardAddResponse)(nil), // 176: vtctldata.SourceShardAddResponse + (*vtctldata.SourceShardDeleteResponse)(nil), // 177: vtctldata.SourceShardDeleteResponse + (*vtctldata.StartReplicationResponse)(nil), // 178: vtctldata.StartReplicationResponse + (*vtctldata.StopReplicationResponse)(nil), // 179: vtctldata.StopReplicationResponse + (*vtctldata.TabletExternallyReparentedResponse)(nil), // 180: vtctldata.TabletExternallyReparentedResponse + (*vtctldata.UpdateCellInfoResponse)(nil), // 181: vtctldata.UpdateCellInfoResponse + (*vtctldata.UpdateCellsAliasResponse)(nil), // 182: vtctldata.UpdateCellsAliasResponse + (*vtctldata.ValidateResponse)(nil), // 183: vtctldata.ValidateResponse + (*vtctldata.ValidateKeyspaceResponse)(nil), // 184: vtctldata.ValidateKeyspaceResponse + (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 185: vtctldata.ValidateSchemaKeyspaceResponse + (*vtctldata.ValidateShardResponse)(nil), // 186: vtctldata.ValidateShardResponse + (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 187: vtctldata.ValidateVersionKeyspaceResponse + (*vtctldata.ValidateVersionShardResponse)(nil), // 188: vtctldata.ValidateVersionShardResponse + (*vtctldata.ValidateVSchemaResponse)(nil), // 189: vtctldata.ValidateVSchemaResponse + (*vtctldata.WorkflowDeleteResponse)(nil), // 190: vtctldata.WorkflowDeleteResponse + (*vtctldata.WorkflowSwitchTrafficResponse)(nil), // 191: vtctldata.WorkflowSwitchTrafficResponse + (*vtctldata.WorkflowUpdateResponse)(nil), // 192: vtctldata.WorkflowUpdateResponse } var file_vtctlservice_proto_depIdxs = []int32{ 0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest @@ -727,169 +834,195 @@ var file_vtctlservice_proto_depIdxs = []int32{ 6, // 6: vtctlservice.Vtctld.ApplyVSchema:input_type -> vtctldata.ApplyVSchemaRequest 7, // 7: vtctlservice.Vtctld.Backup:input_type -> vtctldata.BackupRequest 8, // 8: vtctlservice.Vtctld.BackupShard:input_type -> vtctldata.BackupShardRequest - 9, // 9: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest - 10, // 10: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest - 11, // 11: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest - 12, // 12: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest - 13, // 13: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest - 14, // 14: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest - 15, // 15: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest - 16, // 16: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest - 17, // 17: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest - 18, // 18: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest - 19, // 19: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest - 20, // 20: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest - 21, // 21: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest - 22, // 22: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest - 23, // 23: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest - 24, // 24: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest - 25, // 25: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest - 26, // 26: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest - 27, // 27: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest - 28, // 28: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest - 29, // 29: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest - 30, // 30: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest - 31, // 31: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest - 32, // 32: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest - 33, // 33: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest - 34, // 34: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest - 35, // 35: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest - 36, // 36: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest - 37, // 37: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest - 38, // 38: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest - 39, // 39: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest - 40, // 40: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest - 41, // 41: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest - 42, // 42: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest - 43, // 43: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest - 44, // 44: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest - 45, // 45: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest - 46, // 46: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest - 47, // 47: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest - 48, // 48: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest - 49, // 49: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest - 50, // 50: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest - 51, // 51: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest - 52, // 52: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest - 53, // 53: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest - 54, // 54: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest - 55, // 55: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest - 56, // 56: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest - 57, // 57: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest - 58, // 58: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest - 59, // 59: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest - 60, // 60: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest - 61, // 61: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest - 62, // 62: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest - 63, // 63: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest - 64, // 64: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest - 65, // 65: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest - 66, // 66: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest - 67, // 67: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest - 68, // 68: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest - 69, // 69: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest - 70, // 70: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest - 71, // 71: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest - 72, // 72: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest - 73, // 73: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest - 74, // 74: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest - 75, // 75: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest - 76, // 76: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest - 77, // 77: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest - 78, // 78: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest - 79, // 79: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest - 80, // 80: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest - 81, // 81: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest - 82, // 82: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest - 83, // 83: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest - 84, // 84: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest - 85, // 85: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse - 86, // 86: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse - 87, // 87: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse - 88, // 88: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse - 89, // 89: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse - 90, // 90: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse - 91, // 91: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse - 92, // 92: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse - 92, // 93: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse - 93, // 94: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse - 94, // 95: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse - 95, // 96: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse - 96, // 97: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse - 97, // 98: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse - 98, // 99: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 99, // 100: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 100, // 101: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse - 101, // 102: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse - 102, // 103: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse - 103, // 104: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse - 104, // 105: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse - 105, // 106: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse - 106, // 107: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse - 107, // 108: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse - 108, // 109: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse - 109, // 110: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse - 110, // 111: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse - 111, // 112: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse - 112, // 113: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse - 113, // 114: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse - 114, // 115: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse - 115, // 116: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse - 116, // 117: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse - 117, // 118: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse - 118, // 119: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse - 119, // 120: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse - 120, // 121: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse - 121, // 122: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse - 122, // 123: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse - 123, // 124: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse - 124, // 125: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse - 125, // 126: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse - 126, // 127: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse - 127, // 128: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse - 128, // 129: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse - 129, // 130: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse - 130, // 131: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse - 131, // 132: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse - 132, // 133: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse - 133, // 134: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse - 134, // 135: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse - 135, // 136: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse - 136, // 137: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse - 137, // 138: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse - 138, // 139: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse - 139, // 140: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse - 140, // 141: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse - 141, // 142: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse - 142, // 143: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse - 143, // 144: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse - 144, // 145: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse - 145, // 146: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse - 146, // 147: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse - 147, // 148: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse - 148, // 149: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse - 149, // 150: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse - 150, // 151: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse - 151, // 152: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse - 152, // 153: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse - 153, // 154: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse - 154, // 155: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse - 155, // 156: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse - 156, // 157: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse - 157, // 158: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse - 158, // 159: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse - 159, // 160: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse - 160, // 161: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse - 161, // 162: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse - 162, // 163: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse - 163, // 164: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse - 164, // 165: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse - 165, // 166: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse - 166, // 167: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse - 167, // 168: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse - 168, // 169: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse - 85, // [85:170] is the sub-list for method output_type - 0, // [0:85] is the sub-list for method input_type + 9, // 9: vtctlservice.Vtctld.CancelSchemaMigration:input_type -> vtctldata.CancelSchemaMigrationRequest + 10, // 10: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest + 11, // 11: vtctlservice.Vtctld.CleanupSchemaMigration:input_type -> vtctldata.CleanupSchemaMigrationRequest + 12, // 12: vtctlservice.Vtctld.CompleteSchemaMigration:input_type -> vtctldata.CompleteSchemaMigrationRequest + 13, // 13: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest + 14, // 14: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest + 15, // 15: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest + 16, // 16: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest + 17, // 17: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest + 18, // 18: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest + 19, // 19: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest + 20, // 20: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest + 21, // 21: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest + 22, // 22: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest + 23, // 23: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest + 24, // 24: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest + 25, // 25: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest + 26, // 26: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest + 27, // 27: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest + 28, // 28: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest + 29, // 29: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest + 30, // 30: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest + 31, // 31: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest + 32, // 32: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest + 33, // 33: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest + 34, // 34: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest + 35, // 35: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest + 36, // 36: vtctlservice.Vtctld.GetSchemaMigrations:input_type -> vtctldata.GetSchemaMigrationsRequest + 37, // 37: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest + 38, // 38: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest + 39, // 39: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest + 40, // 40: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest + 41, // 41: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest + 42, // 42: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest + 43, // 43: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest + 44, // 44: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest + 45, // 45: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest + 46, // 46: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest + 47, // 47: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest + 48, // 48: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest + 49, // 49: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest + 50, // 50: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest + 51, // 51: vtctlservice.Vtctld.LaunchSchemaMigration:input_type -> vtctldata.LaunchSchemaMigrationRequest + 52, // 52: vtctlservice.Vtctld.MoveTablesCreate:input_type -> vtctldata.MoveTablesCreateRequest + 53, // 53: vtctlservice.Vtctld.MoveTablesComplete:input_type -> vtctldata.MoveTablesCompleteRequest + 54, // 54: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest + 55, // 55: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest + 56, // 56: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest + 57, // 57: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest + 58, // 58: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest + 59, // 59: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest + 60, // 60: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest + 61, // 61: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest + 62, // 62: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest + 63, // 63: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest + 64, // 64: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest + 65, // 65: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest + 66, // 66: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest + 67, // 67: vtctlservice.Vtctld.ReshardCreate:input_type -> vtctldata.ReshardCreateRequest + 68, // 68: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest + 69, // 69: vtctlservice.Vtctld.RetrySchemaMigration:input_type -> vtctldata.RetrySchemaMigrationRequest + 70, // 70: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest + 71, // 71: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest + 72, // 72: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest + 73, // 73: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest + 74, // 74: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest + 75, // 75: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest + 76, // 76: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest + 77, // 77: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest + 78, // 78: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest + 79, // 79: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest + 80, // 80: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest + 81, // 81: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest + 82, // 82: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest + 83, // 83: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest + 84, // 84: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest + 85, // 85: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest + 86, // 86: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest + 87, // 87: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest + 88, // 88: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest + 89, // 89: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest + 90, // 90: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest + 91, // 91: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest + 92, // 92: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest + 93, // 93: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest + 94, // 94: vtctlservice.Vtctld.WorkflowDelete:input_type -> vtctldata.WorkflowDeleteRequest + 95, // 95: vtctlservice.Vtctld.WorkflowStatus:input_type -> vtctldata.WorkflowStatusRequest + 96, // 96: vtctlservice.Vtctld.WorkflowSwitchTraffic:input_type -> vtctldata.WorkflowSwitchTrafficRequest + 97, // 97: vtctlservice.Vtctld.WorkflowUpdate:input_type -> vtctldata.WorkflowUpdateRequest + 98, // 98: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse + 99, // 99: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse + 100, // 100: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse + 101, // 101: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse + 102, // 102: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse + 103, // 103: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse + 104, // 104: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse + 105, // 105: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse + 105, // 106: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse + 106, // 107: vtctlservice.Vtctld.CancelSchemaMigration:output_type -> vtctldata.CancelSchemaMigrationResponse + 107, // 108: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse + 108, // 109: vtctlservice.Vtctld.CleanupSchemaMigration:output_type -> vtctldata.CleanupSchemaMigrationResponse + 109, // 110: vtctlservice.Vtctld.CompleteSchemaMigration:output_type -> vtctldata.CompleteSchemaMigrationResponse + 110, // 111: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse + 111, // 112: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse + 112, // 113: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse + 113, // 114: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse + 114, // 115: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 115, // 116: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 116, // 117: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse + 117, // 118: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse + 118, // 119: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse + 119, // 120: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse + 120, // 121: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse + 121, // 122: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse + 122, // 123: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse + 123, // 124: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse + 124, // 125: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse + 125, // 126: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse + 126, // 127: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse + 127, // 128: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse + 128, // 129: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse + 129, // 130: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse + 130, // 131: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse + 131, // 132: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse + 132, // 133: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse + 133, // 134: vtctlservice.Vtctld.GetSchemaMigrations:output_type -> vtctldata.GetSchemaMigrationsResponse + 134, // 135: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse + 135, // 136: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse + 136, // 137: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse + 137, // 138: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse + 138, // 139: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse + 139, // 140: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse + 140, // 141: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse + 141, // 142: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse + 142, // 143: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse + 143, // 144: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse + 144, // 145: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse + 145, // 146: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse + 146, // 147: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse + 147, // 148: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse + 148, // 149: vtctlservice.Vtctld.LaunchSchemaMigration:output_type -> vtctldata.LaunchSchemaMigrationResponse + 149, // 150: vtctlservice.Vtctld.MoveTablesCreate:output_type -> vtctldata.WorkflowStatusResponse + 150, // 151: vtctlservice.Vtctld.MoveTablesComplete:output_type -> vtctldata.MoveTablesCompleteResponse + 151, // 152: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse + 152, // 153: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse + 153, // 154: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse + 154, // 155: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse + 155, // 156: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse + 156, // 157: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse + 157, // 158: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse + 158, // 159: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse + 159, // 160: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse + 160, // 161: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse + 161, // 162: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse + 162, // 163: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse + 163, // 164: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse + 149, // 165: vtctlservice.Vtctld.ReshardCreate:output_type -> vtctldata.WorkflowStatusResponse + 164, // 166: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse + 165, // 167: vtctlservice.Vtctld.RetrySchemaMigration:output_type -> vtctldata.RetrySchemaMigrationResponse + 166, // 168: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse + 167, // 169: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse + 168, // 170: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse + 169, // 171: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse + 170, // 172: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse + 171, // 173: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse + 172, // 174: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse + 173, // 175: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse + 174, // 176: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse + 175, // 177: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse + 176, // 178: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse + 177, // 179: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse + 178, // 180: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse + 179, // 181: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse + 180, // 182: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse + 181, // 183: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse + 182, // 184: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse + 183, // 185: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse + 184, // 186: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse + 185, // 187: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse + 186, // 188: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse + 187, // 189: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse + 188, // 190: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse + 189, // 191: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse + 190, // 192: vtctlservice.Vtctld.WorkflowDelete:output_type -> vtctldata.WorkflowDeleteResponse + 149, // 193: vtctlservice.Vtctld.WorkflowStatus:output_type -> vtctldata.WorkflowStatusResponse + 191, // 194: vtctlservice.Vtctld.WorkflowSwitchTraffic:output_type -> vtctldata.WorkflowSwitchTrafficResponse + 192, // 195: vtctlservice.Vtctld.WorkflowUpdate:output_type -> vtctldata.WorkflowUpdateResponse + 98, // [98:196] is the sub-list for method output_type + 0, // [0:98] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go index c97a10edd16..005c73af849 100644 --- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go @@ -159,12 +159,18 @@ type VtctldClient interface { Backup(ctx context.Context, in *vtctldata.BackupRequest, opts ...grpc.CallOption) (Vtctld_BackupClient, error) // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(ctx context.Context, in *vtctldata.BackupShardRequest, opts ...grpc.CallOption) (Vtctld_BackupShardClient, error) + // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + CancelSchemaMigration(ctx context.Context, in *vtctldata.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. // // NOTE: This command automatically updates the serving graph. ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) + // CleanupSchemaMigration marks a schema migration as ready for artifact cleanup. + CleanupSchemaMigration(ctx context.Context, in *vtctldata.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations executed with --postpone-completion. + CompleteSchemaMigration(ctx context.Context, in *vtctldata.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates the specified keyspace in the topology. For a // SNAPSHOT keyspace, the request must specify the name of a base keyspace, // as well as a snapshot time. @@ -225,6 +231,12 @@ type VtctldClient interface { // GetSchema returns the schema for a tablet, or just the schema for the // specified tables in that tablet. GetSchema(ctx context.Context, in *vtctldata.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // specified keyspace, analagous to `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different filtering + // behaviors. See the documentation on GetSchemaMigrationsRequest for details. + GetSchemaMigrations(ctx context.Context, in *vtctldata.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaMigrationsResponse, error) // GetShard returns information about a shard in the topology. GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -261,6 +273,14 @@ type VtctldClient interface { // PlannedReparentShard or EmergencyReparentShard should be used in those // cases instead. InitShardPrimary(ctx context.Context, in *vtctldata.InitShardPrimaryRequest, opts ...grpc.CallOption) (*vtctldata.InitShardPrimaryResponse, error) + // LaunchSchemaMigration launches one or all migrations executed with --postpone-launch. + LaunchSchemaMigration(ctx context.Context, in *vtctldata.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) + // MoveTablesCreate creates a workflow which moves one or more tables from a + // source keyspace to a target keyspace. + MoveTablesCreate(ctx context.Context, in *vtctldata.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) + // MoveTablesComplete completes the move and cleans up the workflow and + // its related artifacts. + MoveTablesComplete(ctx context.Context, in *vtctldata.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldata.MoveTablesCompleteResponse, error) // PingTablet checks that the specified tablet is awake and responding to RPCs. // This command can be blocked by other in-flight operations. PingTablet(ctx context.Context, in *vtctldata.PingTabletRequest, opts ...grpc.CallOption) (*vtctldata.PingTabletResponse, error) @@ -308,8 +328,12 @@ type VtctldClient interface { // only works if the current replica position matches the last known reparent // action. ReparentTablet(ctx context.Context, in *vtctldata.ReparentTabletRequest, opts ...grpc.CallOption) (*vtctldata.ReparentTabletResponse, error) + // ReshardCreate creates a workflow to reshard a keyspace. + ReshardCreate(ctx context.Context, in *vtctldata.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) // RestoreFromBackup stops mysqld for the given tablet and restores a backup. RestoreFromBackup(ctx context.Context, in *vtctldata.RestoreFromBackupRequest, opts ...grpc.CallOption) (Vtctld_RestoreFromBackupClient, error) + // RetrySchemaMigration marks a given schema migration for retry. + RetrySchemaMigration(ctx context.Context, in *vtctldata.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the remote tablet. RunHealthCheck(ctx context.Context, in *vtctldata.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldata.RunHealthCheckResponse, error) // SetKeyspaceDurabilityPolicy updates the DurabilityPolicy for a keyspace. @@ -398,6 +422,13 @@ type VtctldClient interface { ValidateVersionShard(ctx context.Context, in *vtctldata.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVersionShardResponse, error) // ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences. ValidateVSchema(ctx context.Context, in *vtctldata.ValidateVSchemaRequest, opts ...grpc.CallOption) (*vtctldata.ValidateVSchemaResponse, error) + // WorkflowDelete deletes a vreplication workflow. + WorkflowDelete(ctx context.Context, in *vtctldata.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowDeleteResponse, error) + WorkflowStatus(ctx context.Context, in *vtctldata.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) + WorkflowSwitchTraffic(ctx context.Context, in *vtctldata.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowSwitchTrafficResponse, error) + // WorkflowUpdate updates the configuration of a vreplication workflow + // using the provided updated parameters. + WorkflowUpdate(ctx context.Context, in *vtctldata.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowUpdateResponse, error) } type vtctldClient struct { @@ -526,6 +557,15 @@ func (x *vtctldBackupShardClient) Recv() (*vtctldata.BackupResponse, error) { return m, nil } +func (c *vtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldata.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) { + out := new(vtctldata.CancelSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CancelSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldata.ChangeTabletTypeResponse, error) { out := new(vtctldata.ChangeTabletTypeResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ChangeTabletType", in, out, opts...) @@ -535,6 +575,24 @@ func (c *vtctldClient) ChangeTabletType(ctx context.Context, in *vtctldata.Chang return out, nil } +func (c *vtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldata.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) { + out := new(vtctldata.CleanupSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CleanupSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldata.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) { + out := new(vtctldata.CompleteSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CompleteSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) CreateKeyspace(ctx context.Context, in *vtctldata.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.CreateKeyspaceResponse, error) { out := new(vtctldata.CreateKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/CreateKeyspace", in, out, opts...) @@ -742,6 +800,15 @@ func (c *vtctldClient) GetSchema(ctx context.Context, in *vtctldata.GetSchemaReq return out, nil } +func (c *vtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldata.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaMigrationsResponse, error) { + out := new(vtctldata.GetSchemaMigrationsResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetSchemaMigrations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) { out := new(vtctldata.GetShardResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetShard", in, out, opts...) @@ -868,6 +935,33 @@ func (c *vtctldClient) InitShardPrimary(ctx context.Context, in *vtctldata.InitS return out, nil } +func (c *vtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldata.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) { + out := new(vtctldata.LaunchSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/LaunchSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldata.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MoveTablesCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldata.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldata.MoveTablesCompleteResponse, error) { + out := new(vtctldata.MoveTablesCompleteResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/MoveTablesComplete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) PingTablet(ctx context.Context, in *vtctldata.PingTabletRequest, opts ...grpc.CallOption) (*vtctldata.PingTabletResponse, error) { out := new(vtctldata.PingTabletResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/PingTablet", in, out, opts...) @@ -985,6 +1079,15 @@ func (c *vtctldClient) ReparentTablet(ctx context.Context, in *vtctldata.Reparen return out, nil } +func (c *vtctldClient) ReshardCreate(ctx context.Context, in *vtctldata.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ReshardCreate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) RestoreFromBackup(ctx context.Context, in *vtctldata.RestoreFromBackupRequest, opts ...grpc.CallOption) (Vtctld_RestoreFromBackupClient, error) { stream, err := c.cc.NewStream(ctx, &Vtctld_ServiceDesc.Streams[2], "/vtctlservice.Vtctld/RestoreFromBackup", opts...) if err != nil { @@ -1017,6 +1120,15 @@ func (x *vtctldRestoreFromBackupClient) Recv() (*vtctldata.RestoreFromBackupResp return m, nil } +func (c *vtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldata.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) { + out := new(vtctldata.RetrySchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RetrySchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) RunHealthCheck(ctx context.Context, in *vtctldata.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldata.RunHealthCheckResponse, error) { out := new(vtctldata.RunHealthCheckResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/RunHealthCheck", in, out, opts...) @@ -1233,6 +1345,42 @@ func (c *vtctldClient) ValidateVSchema(ctx context.Context, in *vtctldata.Valida return out, nil } +func (c *vtctldClient) WorkflowDelete(ctx context.Context, in *vtctldata.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowDeleteResponse, error) { + out := new(vtctldata.WorkflowDeleteResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowStatus(ctx context.Context, in *vtctldata.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowStatusResponse, error) { + out := new(vtctldata.WorkflowStatusResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldata.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowSwitchTrafficResponse, error) { + out := new(vtctldata.WorkflowSwitchTrafficResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowSwitchTraffic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldata.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldata.WorkflowUpdateResponse, error) { + out := new(vtctldata.WorkflowUpdateResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/WorkflowUpdate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // VtctldServer is the server API for Vtctld service. // All implementations must embed UnimplementedVtctldServer // for forward compatibility @@ -1260,12 +1408,18 @@ type VtctldServer interface { Backup(*vtctldata.BackupRequest, Vtctld_BackupServer) error // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(*vtctldata.BackupShardRequest, Vtctld_BackupShardServer) error + // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + CancelSchemaMigration(context.Context, *vtctldata.CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a // primary. For that, use InitShardPrimary. // // NOTE: This command automatically updates the serving graph. ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) + // CleanupSchemaMigration marks a schema migration as ready for artifact cleanup. + CleanupSchemaMigration(context.Context, *vtctldata.CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations executed with --postpone-completion. + CompleteSchemaMigration(context.Context, *vtctldata.CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates the specified keyspace in the topology. For a // SNAPSHOT keyspace, the request must specify the name of a base keyspace, // as well as a snapshot time. @@ -1326,6 +1480,12 @@ type VtctldServer interface { // GetSchema returns the schema for a tablet, or just the schema for the // specified tables in that tablet. GetSchema(context.Context, *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // specified keyspace, analagous to `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different filtering + // behaviors. See the documentation on GetSchemaMigrationsRequest for details. + GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) // GetShard returns information about a shard in the topology. GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -1362,6 +1522,14 @@ type VtctldServer interface { // PlannedReparentShard or EmergencyReparentShard should be used in those // cases instead. InitShardPrimary(context.Context, *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) + // LaunchSchemaMigration launches one or all migrations executed with --postpone-launch. + LaunchSchemaMigration(context.Context, *vtctldata.LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) + // MoveTablesCreate creates a workflow which moves one or more tables from a + // source keyspace to a target keyspace. + MoveTablesCreate(context.Context, *vtctldata.MoveTablesCreateRequest) (*vtctldata.WorkflowStatusResponse, error) + // MoveTablesComplete completes the move and cleans up the workflow and + // its related artifacts. + MoveTablesComplete(context.Context, *vtctldata.MoveTablesCompleteRequest) (*vtctldata.MoveTablesCompleteResponse, error) // PingTablet checks that the specified tablet is awake and responding to RPCs. // This command can be blocked by other in-flight operations. PingTablet(context.Context, *vtctldata.PingTabletRequest) (*vtctldata.PingTabletResponse, error) @@ -1409,8 +1577,12 @@ type VtctldServer interface { // only works if the current replica position matches the last known reparent // action. ReparentTablet(context.Context, *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) + // ReshardCreate creates a workflow to reshard a keyspace. + ReshardCreate(context.Context, *vtctldata.ReshardCreateRequest) (*vtctldata.WorkflowStatusResponse, error) // RestoreFromBackup stops mysqld for the given tablet and restores a backup. RestoreFromBackup(*vtctldata.RestoreFromBackupRequest, Vtctld_RestoreFromBackupServer) error + // RetrySchemaMigration marks a given schema migration for retry. + RetrySchemaMigration(context.Context, *vtctldata.RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the remote tablet. RunHealthCheck(context.Context, *vtctldata.RunHealthCheckRequest) (*vtctldata.RunHealthCheckResponse, error) // SetKeyspaceDurabilityPolicy updates the DurabilityPolicy for a keyspace. @@ -1499,6 +1671,13 @@ type VtctldServer interface { ValidateVersionShard(context.Context, *vtctldata.ValidateVersionShardRequest) (*vtctldata.ValidateVersionShardResponse, error) // ValidateVSchema compares the schema of each primary tablet in "keyspace/shards..." to the vschema and errs if there are differences. ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) + // WorkflowDelete deletes a vreplication workflow. + WorkflowDelete(context.Context, *vtctldata.WorkflowDeleteRequest) (*vtctldata.WorkflowDeleteResponse, error) + WorkflowStatus(context.Context, *vtctldata.WorkflowStatusRequest) (*vtctldata.WorkflowStatusResponse, error) + WorkflowSwitchTraffic(context.Context, *vtctldata.WorkflowSwitchTrafficRequest) (*vtctldata.WorkflowSwitchTrafficResponse, error) + // WorkflowUpdate updates the configuration of a vreplication workflow + // using the provided updated parameters. + WorkflowUpdate(context.Context, *vtctldata.WorkflowUpdateRequest) (*vtctldata.WorkflowUpdateResponse, error) mustEmbedUnimplementedVtctldServer() } @@ -1530,9 +1709,18 @@ func (UnimplementedVtctldServer) Backup(*vtctldata.BackupRequest, Vtctld_BackupS func (UnimplementedVtctldServer) BackupShard(*vtctldata.BackupShardRequest, Vtctld_BackupShardServer) error { return status.Errorf(codes.Unimplemented, "method BackupShard not implemented") } +func (UnimplementedVtctldServer) CancelSchemaMigration(context.Context, *vtctldata.CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelSchemaMigration not implemented") +} func (UnimplementedVtctldServer) ChangeTabletType(context.Context, *vtctldata.ChangeTabletTypeRequest) (*vtctldata.ChangeTabletTypeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ChangeTabletType not implemented") } +func (UnimplementedVtctldServer) CleanupSchemaMigration(context.Context, *vtctldata.CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CleanupSchemaMigration not implemented") +} +func (UnimplementedVtctldServer) CompleteSchemaMigration(context.Context, *vtctldata.CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteSchemaMigration not implemented") +} func (UnimplementedVtctldServer) CreateKeyspace(context.Context, *vtctldata.CreateKeyspaceRequest) (*vtctldata.CreateKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateKeyspace not implemented") } @@ -1602,6 +1790,9 @@ func (UnimplementedVtctldServer) GetRoutingRules(context.Context, *vtctldata.Get func (UnimplementedVtctldServer) GetSchema(context.Context, *vtctldata.GetSchemaRequest) (*vtctldata.GetSchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") } +func (UnimplementedVtctldServer) GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchemaMigrations not implemented") +} func (UnimplementedVtctldServer) GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetShard not implemented") } @@ -1644,6 +1835,15 @@ func (UnimplementedVtctldServer) GetWorkflows(context.Context, *vtctldata.GetWor func (UnimplementedVtctldServer) InitShardPrimary(context.Context, *vtctldata.InitShardPrimaryRequest) (*vtctldata.InitShardPrimaryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method InitShardPrimary not implemented") } +func (UnimplementedVtctldServer) LaunchSchemaMigration(context.Context, *vtctldata.LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LaunchSchemaMigration not implemented") +} +func (UnimplementedVtctldServer) MoveTablesCreate(context.Context, *vtctldata.MoveTablesCreateRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveTablesCreate not implemented") +} +func (UnimplementedVtctldServer) MoveTablesComplete(context.Context, *vtctldata.MoveTablesCompleteRequest) (*vtctldata.MoveTablesCompleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MoveTablesComplete not implemented") +} func (UnimplementedVtctldServer) PingTablet(context.Context, *vtctldata.PingTabletRequest) (*vtctldata.PingTabletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method PingTablet not implemented") } @@ -1683,9 +1883,15 @@ func (UnimplementedVtctldServer) RemoveShardCell(context.Context, *vtctldata.Rem func (UnimplementedVtctldServer) ReparentTablet(context.Context, *vtctldata.ReparentTabletRequest) (*vtctldata.ReparentTabletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReparentTablet not implemented") } +func (UnimplementedVtctldServer) ReshardCreate(context.Context, *vtctldata.ReshardCreateRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReshardCreate not implemented") +} func (UnimplementedVtctldServer) RestoreFromBackup(*vtctldata.RestoreFromBackupRequest, Vtctld_RestoreFromBackupServer) error { return status.Errorf(codes.Unimplemented, "method RestoreFromBackup not implemented") } +func (UnimplementedVtctldServer) RetrySchemaMigration(context.Context, *vtctldata.RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RetrySchemaMigration not implemented") +} func (UnimplementedVtctldServer) RunHealthCheck(context.Context, *vtctldata.RunHealthCheckRequest) (*vtctldata.RunHealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RunHealthCheck not implemented") } @@ -1758,6 +1964,18 @@ func (UnimplementedVtctldServer) ValidateVersionShard(context.Context, *vtctldat func (UnimplementedVtctldServer) ValidateVSchema(context.Context, *vtctldata.ValidateVSchemaRequest) (*vtctldata.ValidateVSchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateVSchema not implemented") } +func (UnimplementedVtctldServer) WorkflowDelete(context.Context, *vtctldata.WorkflowDeleteRequest) (*vtctldata.WorkflowDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowDelete not implemented") +} +func (UnimplementedVtctldServer) WorkflowStatus(context.Context, *vtctldata.WorkflowStatusRequest) (*vtctldata.WorkflowStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowStatus not implemented") +} +func (UnimplementedVtctldServer) WorkflowSwitchTraffic(context.Context, *vtctldata.WorkflowSwitchTrafficRequest) (*vtctldata.WorkflowSwitchTrafficResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowSwitchTraffic not implemented") +} +func (UnimplementedVtctldServer) WorkflowUpdate(context.Context, *vtctldata.WorkflowUpdateRequest) (*vtctldata.WorkflowUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WorkflowUpdate not implemented") +} func (UnimplementedVtctldServer) mustEmbedUnimplementedVtctldServer() {} // UnsafeVtctldServer may be embedded to opt out of forward compatibility for this service. @@ -1921,6 +2139,24 @@ func (x *vtctldBackupShardServer) Send(m *vtctldata.BackupResponse) error { return x.ServerStream.SendMsg(m) } +func _Vtctld_CancelSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CancelSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CancelSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CancelSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CancelSchemaMigration(ctx, req.(*vtctldata.CancelSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.ChangeTabletTypeRequest) if err := dec(in); err != nil { @@ -1939,6 +2175,42 @@ func _Vtctld_ChangeTabletType_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Vtctld_CleanupSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CleanupSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CleanupSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CleanupSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CleanupSchemaMigration(ctx, req.(*vtctldata.CleanupSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_CompleteSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.CompleteSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).CompleteSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/CompleteSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).CompleteSchemaMigration(ctx, req.(*vtctldata.CompleteSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_CreateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.CreateKeyspaceRequest) if err := dec(in); err != nil { @@ -2353,6 +2625,24 @@ func _Vtctld_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Vtctld_GetSchemaMigrations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetSchemaMigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetSchemaMigrations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetSchemaMigrations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetSchemaMigrations(ctx, req.(*vtctldata.GetSchemaMigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_GetShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.GetShardRequest) if err := dec(in); err != nil { @@ -2605,6 +2895,60 @@ func _Vtctld_InitShardPrimary_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Vtctld_LaunchSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.LaunchSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).LaunchSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/LaunchSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).LaunchSchemaMigration(ctx, req.(*vtctldata.LaunchSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MoveTablesCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MoveTablesCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MoveTablesCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MoveTablesCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MoveTablesCreate(ctx, req.(*vtctldata.MoveTablesCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_MoveTablesComplete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.MoveTablesCompleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).MoveTablesComplete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/MoveTablesComplete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).MoveTablesComplete(ctx, req.(*vtctldata.MoveTablesCompleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_PingTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.PingTabletRequest) if err := dec(in); err != nil { @@ -2839,6 +3183,24 @@ func _Vtctld_ReparentTablet_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Vtctld_ReshardCreate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ReshardCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ReshardCreate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ReshardCreate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ReshardCreate(ctx, req.(*vtctldata.ReshardCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_RestoreFromBackup_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(vtctldata.RestoreFromBackupRequest) if err := stream.RecvMsg(m); err != nil { @@ -2860,6 +3222,24 @@ func (x *vtctldRestoreFromBackupServer) Send(m *vtctldata.RestoreFromBackupRespo return x.ServerStream.SendMsg(m) } +func _Vtctld_RetrySchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.RetrySchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).RetrySchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/RetrySchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).RetrySchemaMigration(ctx, req.(*vtctldata.RetrySchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_RunHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.RunHealthCheckRequest) if err := dec(in); err != nil { @@ -3292,6 +3672,78 @@ func _Vtctld_ValidateVSchema_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Vtctld_WorkflowDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowDelete(ctx, req.(*vtctldata.WorkflowDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowStatus(ctx, req.(*vtctldata.WorkflowStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowSwitchTraffic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowSwitchTrafficRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowSwitchTraffic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowSwitchTraffic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowSwitchTraffic(ctx, req.(*vtctldata.WorkflowSwitchTrafficRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Vtctld_WorkflowUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.WorkflowUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).WorkflowUpdate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/WorkflowUpdate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).WorkflowUpdate(ctx, req.(*vtctldata.WorkflowUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Vtctld_ServiceDesc is the grpc.ServiceDesc for Vtctld service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -3323,10 +3775,22 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplyVSchema", Handler: _Vtctld_ApplyVSchema_Handler, }, + { + MethodName: "CancelSchemaMigration", + Handler: _Vtctld_CancelSchemaMigration_Handler, + }, { MethodName: "ChangeTabletType", Handler: _Vtctld_ChangeTabletType_Handler, }, + { + MethodName: "CleanupSchemaMigration", + Handler: _Vtctld_CleanupSchemaMigration_Handler, + }, + { + MethodName: "CompleteSchemaMigration", + Handler: _Vtctld_CompleteSchemaMigration_Handler, + }, { MethodName: "CreateKeyspace", Handler: _Vtctld_CreateKeyspace_Handler, @@ -3419,6 +3883,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSchema", Handler: _Vtctld_GetSchema_Handler, }, + { + MethodName: "GetSchemaMigrations", + Handler: _Vtctld_GetSchemaMigrations_Handler, + }, { MethodName: "GetShard", Handler: _Vtctld_GetShard_Handler, @@ -3475,6 +3943,18 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "InitShardPrimary", Handler: _Vtctld_InitShardPrimary_Handler, }, + { + MethodName: "LaunchSchemaMigration", + Handler: _Vtctld_LaunchSchemaMigration_Handler, + }, + { + MethodName: "MoveTablesCreate", + Handler: _Vtctld_MoveTablesCreate_Handler, + }, + { + MethodName: "MoveTablesComplete", + Handler: _Vtctld_MoveTablesComplete_Handler, + }, { MethodName: "PingTablet", Handler: _Vtctld_PingTablet_Handler, @@ -3527,6 +4007,14 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReparentTablet", Handler: _Vtctld_ReparentTablet_Handler, }, + { + MethodName: "ReshardCreate", + Handler: _Vtctld_ReshardCreate_Handler, + }, + { + MethodName: "RetrySchemaMigration", + Handler: _Vtctld_RetrySchemaMigration_Handler, + }, { MethodName: "RunHealthCheck", Handler: _Vtctld_RunHealthCheck_Handler, @@ -3623,6 +4111,22 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ValidateVSchema", Handler: _Vtctld_ValidateVSchema_Handler, }, + { + MethodName: "WorkflowDelete", + Handler: _Vtctld_WorkflowDelete_Handler, + }, + { + MethodName: "WorkflowStatus", + Handler: _Vtctld_WorkflowStatus_Handler, + }, + { + MethodName: "WorkflowSwitchTraffic", + Handler: _Vtctld_WorkflowSwitchTraffic_Handler, + }, + { + MethodName: "WorkflowUpdate", + Handler: _Vtctld_WorkflowUpdate_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index 8f06d90a76c..aee90d134a4 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtgate.proto @@ -223,7 +223,10 @@ type Session struct { EnableSystemSettings bool `protobuf:"varint,23,opt,name=enable_system_settings,json=enableSystemSettings,proto3" json:"enable_system_settings,omitempty"` AdvisoryLock map[string]int64 `protobuf:"bytes,24,rep,name=advisory_lock,json=advisoryLock,proto3" json:"advisory_lock,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // query_timeout is the maximum amount of time a query is permitted to run - QueryTimeout int64 `protobuf:"varint,25,opt,name=query_timeout,json=queryTimeout,proto3" json:"query_timeout,omitempty"` + QueryTimeout int64 `protobuf:"varint,25,opt,name=query_timeout,json=queryTimeout,proto3" json:"query_timeout,omitempty"` + PrepareStatement map[string]*PrepareData `protobuf:"bytes,26,rep,name=prepare_statement,json=prepareStatement,proto3" json:"prepare_statement,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // MigrationContext + MigrationContext string `protobuf:"bytes,27,opt,name=migration_context,json=migrationContext,proto3" json:"migration_context,omitempty"` } func (x *Session) Reset() { @@ -426,6 +429,76 @@ func (x *Session) GetQueryTimeout() int64 { return 0 } +func (x *Session) GetPrepareStatement() map[string]*PrepareData { + if x != nil { + return x.PrepareStatement + } + return nil +} + +func (x *Session) GetMigrationContext() string { + if x != nil { + return x.MigrationContext + } + return "" +} + +// PrepareData keeps the prepared statement and other information related for execution of it. +type PrepareData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PrepareStatement string `protobuf:"bytes,1,opt,name=prepare_statement,json=prepareStatement,proto3" json:"prepare_statement,omitempty"` + ParamsCount int32 `protobuf:"varint,2,opt,name=params_count,json=paramsCount,proto3" json:"params_count,omitempty"` +} + +func (x *PrepareData) Reset() { + *x = PrepareData{} + if protoimpl.UnsafeEnabled { + mi := &file_vtgate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrepareData) ProtoMessage() {} + +func (x *PrepareData) ProtoReflect() protoreflect.Message { + mi := &file_vtgate_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareData.ProtoReflect.Descriptor instead. +func (*PrepareData) Descriptor() ([]byte, []int) { + return file_vtgate_proto_rawDescGZIP(), []int{1} +} + +func (x *PrepareData) GetPrepareStatement() string { + if x != nil { + return x.PrepareStatement + } + return "" +} + +func (x *PrepareData) GetParamsCount() int32 { + if x != nil { + return x.ParamsCount + } + return 0 +} + // ReadAfterWrite contains information regarding gtid set and timeout // Also if the gtid information needs to be passed to client. type ReadAfterWrite struct { @@ -441,7 +514,7 @@ type ReadAfterWrite struct { func (x *ReadAfterWrite) Reset() { *x = ReadAfterWrite{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[1] + mi := &file_vtgate_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -454,7 +527,7 @@ func (x *ReadAfterWrite) String() string { func (*ReadAfterWrite) ProtoMessage() {} func (x *ReadAfterWrite) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[1] + mi := &file_vtgate_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -467,7 +540,7 @@ func (x *ReadAfterWrite) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadAfterWrite.ProtoReflect.Descriptor instead. func (*ReadAfterWrite) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{1} + return file_vtgate_proto_rawDescGZIP(), []int{2} } func (x *ReadAfterWrite) GetReadAfterWriteGtid() string { @@ -509,7 +582,7 @@ type ExecuteRequest struct { func (x *ExecuteRequest) Reset() { *x = ExecuteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[2] + mi := &file_vtgate_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -522,7 +595,7 @@ func (x *ExecuteRequest) String() string { func (*ExecuteRequest) ProtoMessage() {} func (x *ExecuteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[2] + mi := &file_vtgate_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -535,7 +608,7 @@ func (x *ExecuteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteRequest.ProtoReflect.Descriptor instead. func (*ExecuteRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{2} + return file_vtgate_proto_rawDescGZIP(), []int{3} } func (x *ExecuteRequest) GetCallerId() *vtrpc.CallerID { @@ -578,7 +651,7 @@ type ExecuteResponse struct { func (x *ExecuteResponse) Reset() { *x = ExecuteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[3] + mi := &file_vtgate_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -591,7 +664,7 @@ func (x *ExecuteResponse) String() string { func (*ExecuteResponse) ProtoMessage() {} func (x *ExecuteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[3] + mi := &file_vtgate_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -604,7 +677,7 @@ func (x *ExecuteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteResponse.ProtoReflect.Descriptor instead. func (*ExecuteResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{3} + return file_vtgate_proto_rawDescGZIP(), []int{4} } func (x *ExecuteResponse) GetError() *vtrpc.RPCError { @@ -646,7 +719,7 @@ type ExecuteBatchRequest struct { func (x *ExecuteBatchRequest) Reset() { *x = ExecuteBatchRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[4] + mi := &file_vtgate_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -659,7 +732,7 @@ func (x *ExecuteBatchRequest) String() string { func (*ExecuteBatchRequest) ProtoMessage() {} func (x *ExecuteBatchRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[4] + mi := &file_vtgate_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -672,7 +745,7 @@ func (x *ExecuteBatchRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteBatchRequest.ProtoReflect.Descriptor instead. func (*ExecuteBatchRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{4} + return file_vtgate_proto_rawDescGZIP(), []int{5} } func (x *ExecuteBatchRequest) GetCallerId() *vtrpc.CallerID { @@ -715,7 +788,7 @@ type ExecuteBatchResponse struct { func (x *ExecuteBatchResponse) Reset() { *x = ExecuteBatchResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[5] + mi := &file_vtgate_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -728,7 +801,7 @@ func (x *ExecuteBatchResponse) String() string { func (*ExecuteBatchResponse) ProtoMessage() {} func (x *ExecuteBatchResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[5] + mi := &file_vtgate_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -741,7 +814,7 @@ func (x *ExecuteBatchResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteBatchResponse.ProtoReflect.Descriptor instead. func (*ExecuteBatchResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{5} + return file_vtgate_proto_rawDescGZIP(), []int{6} } func (x *ExecuteBatchResponse) GetError() *vtrpc.RPCError { @@ -783,7 +856,7 @@ type StreamExecuteRequest struct { func (x *StreamExecuteRequest) Reset() { *x = StreamExecuteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[6] + mi := &file_vtgate_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -796,7 +869,7 @@ func (x *StreamExecuteRequest) String() string { func (*StreamExecuteRequest) ProtoMessage() {} func (x *StreamExecuteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[6] + mi := &file_vtgate_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -809,7 +882,7 @@ func (x *StreamExecuteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamExecuteRequest.ProtoReflect.Descriptor instead. func (*StreamExecuteRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{6} + return file_vtgate_proto_rawDescGZIP(), []int{7} } func (x *StreamExecuteRequest) GetCallerId() *vtrpc.CallerID { @@ -845,12 +918,14 @@ type StreamExecuteResponse struct { // The first value contains only Fields information. // The next values contain the actual rows, a few values per result. Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // session is the updated session information. + Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"` } func (x *StreamExecuteResponse) Reset() { *x = StreamExecuteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[7] + mi := &file_vtgate_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -863,7 +938,7 @@ func (x *StreamExecuteResponse) String() string { func (*StreamExecuteResponse) ProtoMessage() {} func (x *StreamExecuteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[7] + mi := &file_vtgate_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -876,7 +951,7 @@ func (x *StreamExecuteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamExecuteResponse.ProtoReflect.Descriptor instead. func (*StreamExecuteResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{7} + return file_vtgate_proto_rawDescGZIP(), []int{8} } func (x *StreamExecuteResponse) GetResult() *query.QueryResult { @@ -886,6 +961,13 @@ func (x *StreamExecuteResponse) GetResult() *query.QueryResult { return nil } +func (x *StreamExecuteResponse) GetSession() *Session { + if x != nil { + return x.Session + } + return nil +} + // ResolveTransactionRequest is the payload to ResolveTransaction. type ResolveTransactionRequest struct { state protoimpl.MessageState @@ -902,7 +984,7 @@ type ResolveTransactionRequest struct { func (x *ResolveTransactionRequest) Reset() { *x = ResolveTransactionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[8] + mi := &file_vtgate_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -915,7 +997,7 @@ func (x *ResolveTransactionRequest) String() string { func (*ResolveTransactionRequest) ProtoMessage() {} func (x *ResolveTransactionRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[8] + mi := &file_vtgate_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -928,7 +1010,7 @@ func (x *ResolveTransactionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ResolveTransactionRequest.ProtoReflect.Descriptor instead. func (*ResolveTransactionRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{8} + return file_vtgate_proto_rawDescGZIP(), []int{9} } func (x *ResolveTransactionRequest) GetCallerId() *vtrpc.CallerID { @@ -955,7 +1037,7 @@ type ResolveTransactionResponse struct { func (x *ResolveTransactionResponse) Reset() { *x = ResolveTransactionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[9] + mi := &file_vtgate_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -968,7 +1050,7 @@ func (x *ResolveTransactionResponse) String() string { func (*ResolveTransactionResponse) ProtoMessage() {} func (x *ResolveTransactionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[9] + mi := &file_vtgate_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -981,7 +1063,7 @@ func (x *ResolveTransactionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ResolveTransactionResponse.ProtoReflect.Descriptor instead. func (*ResolveTransactionResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{9} + return file_vtgate_proto_rawDescGZIP(), []int{10} } type VStreamFlags struct { @@ -997,13 +1079,15 @@ type VStreamFlags struct { StopOnReshard bool `protobuf:"varint,3,opt,name=stop_on_reshard,json=stopOnReshard,proto3" json:"stop_on_reshard,omitempty"` // if specified, these cells (comma-separated) are used to pick source tablets from. // defaults to the cell of the vtgate serving the VStream API. - Cells string `protobuf:"bytes,4,opt,name=cells,proto3" json:"cells,omitempty"` + Cells string `protobuf:"bytes,4,opt,name=cells,proto3" json:"cells,omitempty"` + CellPreference string `protobuf:"bytes,5,opt,name=cell_preference,json=cellPreference,proto3" json:"cell_preference,omitempty"` + TabletOrder string `protobuf:"bytes,6,opt,name=tablet_order,json=tabletOrder,proto3" json:"tablet_order,omitempty"` } func (x *VStreamFlags) Reset() { *x = VStreamFlags{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[10] + mi := &file_vtgate_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1016,7 +1100,7 @@ func (x *VStreamFlags) String() string { func (*VStreamFlags) ProtoMessage() {} func (x *VStreamFlags) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[10] + mi := &file_vtgate_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1029,7 +1113,7 @@ func (x *VStreamFlags) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamFlags.ProtoReflect.Descriptor instead. func (*VStreamFlags) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{10} + return file_vtgate_proto_rawDescGZIP(), []int{11} } func (x *VStreamFlags) GetMinimizeSkew() bool { @@ -1060,6 +1144,20 @@ func (x *VStreamFlags) GetCells() string { return "" } +func (x *VStreamFlags) GetCellPreference() string { + if x != nil { + return x.CellPreference + } + return "" +} + +func (x *VStreamFlags) GetTabletOrder() string { + if x != nil { + return x.TabletOrder + } + return "" +} + // VStreamRequest is the payload for VStream. type VStreamRequest struct { state protoimpl.MessageState @@ -1079,7 +1177,7 @@ type VStreamRequest struct { func (x *VStreamRequest) Reset() { *x = VStreamRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[11] + mi := &file_vtgate_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1092,7 +1190,7 @@ func (x *VStreamRequest) String() string { func (*VStreamRequest) ProtoMessage() {} func (x *VStreamRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[11] + mi := &file_vtgate_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1105,7 +1203,7 @@ func (x *VStreamRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamRequest.ProtoReflect.Descriptor instead. func (*VStreamRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{11} + return file_vtgate_proto_rawDescGZIP(), []int{12} } func (x *VStreamRequest) GetCallerId() *vtrpc.CallerID { @@ -1155,7 +1253,7 @@ type VStreamResponse struct { func (x *VStreamResponse) Reset() { *x = VStreamResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[12] + mi := &file_vtgate_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1168,7 +1266,7 @@ func (x *VStreamResponse) String() string { func (*VStreamResponse) ProtoMessage() {} func (x *VStreamResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[12] + mi := &file_vtgate_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1181,7 +1279,7 @@ func (x *VStreamResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VStreamResponse.ProtoReflect.Descriptor instead. func (*VStreamResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{12} + return file_vtgate_proto_rawDescGZIP(), []int{13} } func (x *VStreamResponse) GetEvents() []*binlogdata.VEvent { @@ -1209,7 +1307,7 @@ type PrepareRequest struct { func (x *PrepareRequest) Reset() { *x = PrepareRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[13] + mi := &file_vtgate_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1222,7 +1320,7 @@ func (x *PrepareRequest) String() string { func (*PrepareRequest) ProtoMessage() {} func (x *PrepareRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[13] + mi := &file_vtgate_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1235,7 +1333,7 @@ func (x *PrepareRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareRequest.ProtoReflect.Descriptor instead. func (*PrepareRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{13} + return file_vtgate_proto_rawDescGZIP(), []int{14} } func (x *PrepareRequest) GetCallerId() *vtrpc.CallerID { @@ -1278,7 +1376,7 @@ type PrepareResponse struct { func (x *PrepareResponse) Reset() { *x = PrepareResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[14] + mi := &file_vtgate_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1291,7 +1389,7 @@ func (x *PrepareResponse) String() string { func (*PrepareResponse) ProtoMessage() {} func (x *PrepareResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[14] + mi := &file_vtgate_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1304,7 +1402,7 @@ func (x *PrepareResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareResponse.ProtoReflect.Descriptor instead. func (*PrepareResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{14} + return file_vtgate_proto_rawDescGZIP(), []int{15} } func (x *PrepareResponse) GetError() *vtrpc.RPCError { @@ -1344,7 +1442,7 @@ type CloseSessionRequest struct { func (x *CloseSessionRequest) Reset() { *x = CloseSessionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[15] + mi := &file_vtgate_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1357,7 +1455,7 @@ func (x *CloseSessionRequest) String() string { func (*CloseSessionRequest) ProtoMessage() {} func (x *CloseSessionRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[15] + mi := &file_vtgate_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1370,7 +1468,7 @@ func (x *CloseSessionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CloseSessionRequest.ProtoReflect.Descriptor instead. func (*CloseSessionRequest) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{15} + return file_vtgate_proto_rawDescGZIP(), []int{16} } func (x *CloseSessionRequest) GetCallerId() *vtrpc.CallerID { @@ -1402,7 +1500,7 @@ type CloseSessionResponse struct { func (x *CloseSessionResponse) Reset() { *x = CloseSessionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[16] + mi := &file_vtgate_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1415,7 +1513,7 @@ func (x *CloseSessionResponse) String() string { func (*CloseSessionResponse) ProtoMessage() {} func (x *CloseSessionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[16] + mi := &file_vtgate_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1428,7 +1526,7 @@ func (x *CloseSessionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CloseSessionResponse.ProtoReflect.Descriptor instead. func (*CloseSessionResponse) Descriptor() ([]byte, []int) { - return file_vtgate_proto_rawDescGZIP(), []int{16} + return file_vtgate_proto_rawDescGZIP(), []int{17} } func (x *CloseSessionResponse) GetError() *vtrpc.RPCError { @@ -1454,7 +1552,7 @@ type Session_ShardSession struct { func (x *Session_ShardSession) Reset() { *x = Session_ShardSession{} if protoimpl.UnsafeEnabled { - mi := &file_vtgate_proto_msgTypes[17] + mi := &file_vtgate_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1467,7 +1565,7 @@ func (x *Session_ShardSession) String() string { func (*Session_ShardSession) ProtoMessage() {} func (x *Session_ShardSession) ProtoReflect() protoreflect.Message { - mi := &file_vtgate_proto_msgTypes[17] + mi := &file_vtgate_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1526,7 +1624,7 @@ var file_vtgate_proto_rawDesc = []byte{ 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0b, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xa0, 0x0d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, + 0x74, 0x6f, 0x22, 0xfb, 0x0e, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, @@ -1604,181 +1702,208 @@ var file_vtgate_proto_rawDesc = []byte{ 0x61, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x1a, 0xd8, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, - 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x5c, 0x0a, 0x19, - 0x55, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x53, 0x79, - 0x73, 0x74, 0x65, 0x6d, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, - 0x0a, 0x11, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, + 0x74, 0x12, 0x52, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, + 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x10, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x1a, 0xd8, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x5c, 0x0a, + 0x19, 0x55, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x53, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, - 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66, - 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, - 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72, - 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72, - 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2e, 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x11, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47, - 0x74, 0x69, 0x64, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, - 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, - 0x08, 0x22, 0x8f, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, - 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, - 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, - 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, - 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x3f, 0x0a, 0x11, 0x41, 0x64, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x79, 0x4c, 0x6f, 0x63, 0x6b, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x58, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x67, + 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x22, 0x5d, 0x0a, 0x0b, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, + 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x72, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, + 0xac, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x47, 0x74, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x72, 0x65, 0x61, 0x64, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x57, 0x72, 0x69, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2e, + 0x0a, 0x13, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, + 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x73, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0xaa, + 0x01, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, + 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x8f, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x43, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x5d, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb3, 0x01, + 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c, - 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, - 0x7a, 0x65, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, 0x12, 0x2d, 0x0a, 0x12, 0x68, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, - 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, - 0x6f, 0x70, 0x5f, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, 0x52, 0x65, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, - 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, - 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, - 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2b, + 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, + 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x57, 0x69, + 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x22, 0xaa, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x27, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, - 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, - 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, - 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x54, - 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, - 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, - 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, 0x4d, - 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, - 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, + 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x6e, 0x0a, + 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, + 0x19, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x22, 0x1c, 0x0a, 0x1a, + 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x0c, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x53, 0x6b, 0x65, 0x77, + 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x4f, 0x6e, + 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x27, 0x0a, + 0x0f, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x6c, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, + 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, + 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x67, 0x61, 0x74, + 0x65, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x3d, 0x0a, 0x14, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x50, 0x43, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2a, 0x44, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, + 0x01, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x57, 0x4f, 0x50, 0x43, 0x10, 0x03, 0x2a, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x52, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, + 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x55, 0x54, 0x4f, 0x43, 0x4f, 0x4d, + 0x4d, 0x49, 0x54, 0x10, 0x03, 0x42, 0x36, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x67, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1794,99 +1919,104 @@ func file_vtgate_proto_rawDescGZIP() []byte { } var file_vtgate_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_vtgate_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_vtgate_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_vtgate_proto_goTypes = []interface{}{ (TransactionMode)(0), // 0: vtgate.TransactionMode (CommitOrder)(0), // 1: vtgate.CommitOrder (*Session)(nil), // 2: vtgate.Session - (*ReadAfterWrite)(nil), // 3: vtgate.ReadAfterWrite - (*ExecuteRequest)(nil), // 4: vtgate.ExecuteRequest - (*ExecuteResponse)(nil), // 5: vtgate.ExecuteResponse - (*ExecuteBatchRequest)(nil), // 6: vtgate.ExecuteBatchRequest - (*ExecuteBatchResponse)(nil), // 7: vtgate.ExecuteBatchResponse - (*StreamExecuteRequest)(nil), // 8: vtgate.StreamExecuteRequest - (*StreamExecuteResponse)(nil), // 9: vtgate.StreamExecuteResponse - (*ResolveTransactionRequest)(nil), // 10: vtgate.ResolveTransactionRequest - (*ResolveTransactionResponse)(nil), // 11: vtgate.ResolveTransactionResponse - (*VStreamFlags)(nil), // 12: vtgate.VStreamFlags - (*VStreamRequest)(nil), // 13: vtgate.VStreamRequest - (*VStreamResponse)(nil), // 14: vtgate.VStreamResponse - (*PrepareRequest)(nil), // 15: vtgate.PrepareRequest - (*PrepareResponse)(nil), // 16: vtgate.PrepareResponse - (*CloseSessionRequest)(nil), // 17: vtgate.CloseSessionRequest - (*CloseSessionResponse)(nil), // 18: vtgate.CloseSessionResponse - (*Session_ShardSession)(nil), // 19: vtgate.Session.ShardSession - nil, // 20: vtgate.Session.UserDefinedVariablesEntry - nil, // 21: vtgate.Session.SystemVariablesEntry - nil, // 22: vtgate.Session.AdvisoryLockEntry - (*query.ExecuteOptions)(nil), // 23: query.ExecuteOptions - (*query.QueryWarning)(nil), // 24: query.QueryWarning - (*vtrpc.CallerID)(nil), // 25: vtrpc.CallerID - (*query.BoundQuery)(nil), // 26: query.BoundQuery - (*vtrpc.RPCError)(nil), // 27: vtrpc.RPCError - (*query.QueryResult)(nil), // 28: query.QueryResult - (*query.ResultWithError)(nil), // 29: query.ResultWithError - (topodata.TabletType)(0), // 30: topodata.TabletType - (*binlogdata.VGtid)(nil), // 31: binlogdata.VGtid - (*binlogdata.Filter)(nil), // 32: binlogdata.Filter - (*binlogdata.VEvent)(nil), // 33: binlogdata.VEvent - (*query.Field)(nil), // 34: query.Field - (*query.Target)(nil), // 35: query.Target - (*topodata.TabletAlias)(nil), // 36: topodata.TabletAlias - (*query.BindVariable)(nil), // 37: query.BindVariable + (*PrepareData)(nil), // 3: vtgate.PrepareData + (*ReadAfterWrite)(nil), // 4: vtgate.ReadAfterWrite + (*ExecuteRequest)(nil), // 5: vtgate.ExecuteRequest + (*ExecuteResponse)(nil), // 6: vtgate.ExecuteResponse + (*ExecuteBatchRequest)(nil), // 7: vtgate.ExecuteBatchRequest + (*ExecuteBatchResponse)(nil), // 8: vtgate.ExecuteBatchResponse + (*StreamExecuteRequest)(nil), // 9: vtgate.StreamExecuteRequest + (*StreamExecuteResponse)(nil), // 10: vtgate.StreamExecuteResponse + (*ResolveTransactionRequest)(nil), // 11: vtgate.ResolveTransactionRequest + (*ResolveTransactionResponse)(nil), // 12: vtgate.ResolveTransactionResponse + (*VStreamFlags)(nil), // 13: vtgate.VStreamFlags + (*VStreamRequest)(nil), // 14: vtgate.VStreamRequest + (*VStreamResponse)(nil), // 15: vtgate.VStreamResponse + (*PrepareRequest)(nil), // 16: vtgate.PrepareRequest + (*PrepareResponse)(nil), // 17: vtgate.PrepareResponse + (*CloseSessionRequest)(nil), // 18: vtgate.CloseSessionRequest + (*CloseSessionResponse)(nil), // 19: vtgate.CloseSessionResponse + (*Session_ShardSession)(nil), // 20: vtgate.Session.ShardSession + nil, // 21: vtgate.Session.UserDefinedVariablesEntry + nil, // 22: vtgate.Session.SystemVariablesEntry + nil, // 23: vtgate.Session.AdvisoryLockEntry + nil, // 24: vtgate.Session.PrepareStatementEntry + (*query.ExecuteOptions)(nil), // 25: query.ExecuteOptions + (*query.QueryWarning)(nil), // 26: query.QueryWarning + (*vtrpc.CallerID)(nil), // 27: vtrpc.CallerID + (*query.BoundQuery)(nil), // 28: query.BoundQuery + (*vtrpc.RPCError)(nil), // 29: vtrpc.RPCError + (*query.QueryResult)(nil), // 30: query.QueryResult + (*query.ResultWithError)(nil), // 31: query.ResultWithError + (topodata.TabletType)(0), // 32: topodata.TabletType + (*binlogdata.VGtid)(nil), // 33: binlogdata.VGtid + (*binlogdata.Filter)(nil), // 34: binlogdata.Filter + (*binlogdata.VEvent)(nil), // 35: binlogdata.VEvent + (*query.Field)(nil), // 36: query.Field + (*query.Target)(nil), // 37: query.Target + (*topodata.TabletAlias)(nil), // 38: topodata.TabletAlias + (*query.BindVariable)(nil), // 39: query.BindVariable } var file_vtgate_proto_depIdxs = []int32{ - 19, // 0: vtgate.Session.shard_sessions:type_name -> vtgate.Session.ShardSession - 23, // 1: vtgate.Session.options:type_name -> query.ExecuteOptions + 20, // 0: vtgate.Session.shard_sessions:type_name -> vtgate.Session.ShardSession + 25, // 1: vtgate.Session.options:type_name -> query.ExecuteOptions 0, // 2: vtgate.Session.transaction_mode:type_name -> vtgate.TransactionMode - 24, // 3: vtgate.Session.warnings:type_name -> query.QueryWarning - 19, // 4: vtgate.Session.pre_sessions:type_name -> vtgate.Session.ShardSession - 19, // 5: vtgate.Session.post_sessions:type_name -> vtgate.Session.ShardSession - 20, // 6: vtgate.Session.user_defined_variables:type_name -> vtgate.Session.UserDefinedVariablesEntry - 21, // 7: vtgate.Session.system_variables:type_name -> vtgate.Session.SystemVariablesEntry - 19, // 8: vtgate.Session.lock_session:type_name -> vtgate.Session.ShardSession - 3, // 9: vtgate.Session.read_after_write:type_name -> vtgate.ReadAfterWrite - 22, // 10: vtgate.Session.advisory_lock:type_name -> vtgate.Session.AdvisoryLockEntry - 25, // 11: vtgate.ExecuteRequest.caller_id:type_name -> vtrpc.CallerID - 2, // 12: vtgate.ExecuteRequest.session:type_name -> vtgate.Session - 26, // 13: vtgate.ExecuteRequest.query:type_name -> query.BoundQuery - 27, // 14: vtgate.ExecuteResponse.error:type_name -> vtrpc.RPCError - 2, // 15: vtgate.ExecuteResponse.session:type_name -> vtgate.Session - 28, // 16: vtgate.ExecuteResponse.result:type_name -> query.QueryResult - 25, // 17: vtgate.ExecuteBatchRequest.caller_id:type_name -> vtrpc.CallerID - 2, // 18: vtgate.ExecuteBatchRequest.session:type_name -> vtgate.Session - 26, // 19: vtgate.ExecuteBatchRequest.queries:type_name -> query.BoundQuery - 27, // 20: vtgate.ExecuteBatchResponse.error:type_name -> vtrpc.RPCError - 2, // 21: vtgate.ExecuteBatchResponse.session:type_name -> vtgate.Session - 29, // 22: vtgate.ExecuteBatchResponse.results:type_name -> query.ResultWithError - 25, // 23: vtgate.StreamExecuteRequest.caller_id:type_name -> vtrpc.CallerID - 26, // 24: vtgate.StreamExecuteRequest.query:type_name -> query.BoundQuery - 2, // 25: vtgate.StreamExecuteRequest.session:type_name -> vtgate.Session - 28, // 26: vtgate.StreamExecuteResponse.result:type_name -> query.QueryResult - 25, // 27: vtgate.ResolveTransactionRequest.caller_id:type_name -> vtrpc.CallerID - 25, // 28: vtgate.VStreamRequest.caller_id:type_name -> vtrpc.CallerID - 30, // 29: vtgate.VStreamRequest.tablet_type:type_name -> topodata.TabletType - 31, // 30: vtgate.VStreamRequest.vgtid:type_name -> binlogdata.VGtid - 32, // 31: vtgate.VStreamRequest.filter:type_name -> binlogdata.Filter - 12, // 32: vtgate.VStreamRequest.flags:type_name -> vtgate.VStreamFlags - 33, // 33: vtgate.VStreamResponse.events:type_name -> binlogdata.VEvent - 25, // 34: vtgate.PrepareRequest.caller_id:type_name -> vtrpc.CallerID - 2, // 35: vtgate.PrepareRequest.session:type_name -> vtgate.Session - 26, // 36: vtgate.PrepareRequest.query:type_name -> query.BoundQuery - 27, // 37: vtgate.PrepareResponse.error:type_name -> vtrpc.RPCError - 2, // 38: vtgate.PrepareResponse.session:type_name -> vtgate.Session - 34, // 39: vtgate.PrepareResponse.fields:type_name -> query.Field - 25, // 40: vtgate.CloseSessionRequest.caller_id:type_name -> vtrpc.CallerID - 2, // 41: vtgate.CloseSessionRequest.session:type_name -> vtgate.Session - 27, // 42: vtgate.CloseSessionResponse.error:type_name -> vtrpc.RPCError - 35, // 43: vtgate.Session.ShardSession.target:type_name -> query.Target - 36, // 44: vtgate.Session.ShardSession.tablet_alias:type_name -> topodata.TabletAlias - 37, // 45: vtgate.Session.UserDefinedVariablesEntry.value:type_name -> query.BindVariable - 46, // [46:46] is the sub-list for method output_type - 46, // [46:46] is the sub-list for method input_type - 46, // [46:46] is the sub-list for extension type_name - 46, // [46:46] is the sub-list for extension extendee - 0, // [0:46] is the sub-list for field type_name + 26, // 3: vtgate.Session.warnings:type_name -> query.QueryWarning + 20, // 4: vtgate.Session.pre_sessions:type_name -> vtgate.Session.ShardSession + 20, // 5: vtgate.Session.post_sessions:type_name -> vtgate.Session.ShardSession + 21, // 6: vtgate.Session.user_defined_variables:type_name -> vtgate.Session.UserDefinedVariablesEntry + 22, // 7: vtgate.Session.system_variables:type_name -> vtgate.Session.SystemVariablesEntry + 20, // 8: vtgate.Session.lock_session:type_name -> vtgate.Session.ShardSession + 4, // 9: vtgate.Session.read_after_write:type_name -> vtgate.ReadAfterWrite + 23, // 10: vtgate.Session.advisory_lock:type_name -> vtgate.Session.AdvisoryLockEntry + 24, // 11: vtgate.Session.prepare_statement:type_name -> vtgate.Session.PrepareStatementEntry + 27, // 12: vtgate.ExecuteRequest.caller_id:type_name -> vtrpc.CallerID + 2, // 13: vtgate.ExecuteRequest.session:type_name -> vtgate.Session + 28, // 14: vtgate.ExecuteRequest.query:type_name -> query.BoundQuery + 29, // 15: vtgate.ExecuteResponse.error:type_name -> vtrpc.RPCError + 2, // 16: vtgate.ExecuteResponse.session:type_name -> vtgate.Session + 30, // 17: vtgate.ExecuteResponse.result:type_name -> query.QueryResult + 27, // 18: vtgate.ExecuteBatchRequest.caller_id:type_name -> vtrpc.CallerID + 2, // 19: vtgate.ExecuteBatchRequest.session:type_name -> vtgate.Session + 28, // 20: vtgate.ExecuteBatchRequest.queries:type_name -> query.BoundQuery + 29, // 21: vtgate.ExecuteBatchResponse.error:type_name -> vtrpc.RPCError + 2, // 22: vtgate.ExecuteBatchResponse.session:type_name -> vtgate.Session + 31, // 23: vtgate.ExecuteBatchResponse.results:type_name -> query.ResultWithError + 27, // 24: vtgate.StreamExecuteRequest.caller_id:type_name -> vtrpc.CallerID + 28, // 25: vtgate.StreamExecuteRequest.query:type_name -> query.BoundQuery + 2, // 26: vtgate.StreamExecuteRequest.session:type_name -> vtgate.Session + 30, // 27: vtgate.StreamExecuteResponse.result:type_name -> query.QueryResult + 2, // 28: vtgate.StreamExecuteResponse.session:type_name -> vtgate.Session + 27, // 29: vtgate.ResolveTransactionRequest.caller_id:type_name -> vtrpc.CallerID + 27, // 30: vtgate.VStreamRequest.caller_id:type_name -> vtrpc.CallerID + 32, // 31: vtgate.VStreamRequest.tablet_type:type_name -> topodata.TabletType + 33, // 32: vtgate.VStreamRequest.vgtid:type_name -> binlogdata.VGtid + 34, // 33: vtgate.VStreamRequest.filter:type_name -> binlogdata.Filter + 13, // 34: vtgate.VStreamRequest.flags:type_name -> vtgate.VStreamFlags + 35, // 35: vtgate.VStreamResponse.events:type_name -> binlogdata.VEvent + 27, // 36: vtgate.PrepareRequest.caller_id:type_name -> vtrpc.CallerID + 2, // 37: vtgate.PrepareRequest.session:type_name -> vtgate.Session + 28, // 38: vtgate.PrepareRequest.query:type_name -> query.BoundQuery + 29, // 39: vtgate.PrepareResponse.error:type_name -> vtrpc.RPCError + 2, // 40: vtgate.PrepareResponse.session:type_name -> vtgate.Session + 36, // 41: vtgate.PrepareResponse.fields:type_name -> query.Field + 27, // 42: vtgate.CloseSessionRequest.caller_id:type_name -> vtrpc.CallerID + 2, // 43: vtgate.CloseSessionRequest.session:type_name -> vtgate.Session + 29, // 44: vtgate.CloseSessionResponse.error:type_name -> vtrpc.RPCError + 37, // 45: vtgate.Session.ShardSession.target:type_name -> query.Target + 38, // 46: vtgate.Session.ShardSession.tablet_alias:type_name -> topodata.TabletAlias + 39, // 47: vtgate.Session.UserDefinedVariablesEntry.value:type_name -> query.BindVariable + 3, // 48: vtgate.Session.PrepareStatementEntry.value:type_name -> vtgate.PrepareData + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_vtgate_proto_init() } @@ -1908,7 +2038,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadAfterWrite); i { + switch v := v.(*PrepareData); i { case 0: return &v.state case 1: @@ -1920,7 +2050,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteRequest); i { + switch v := v.(*ReadAfterWrite); i { case 0: return &v.state case 1: @@ -1932,7 +2062,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteResponse); i { + switch v := v.(*ExecuteRequest); i { case 0: return &v.state case 1: @@ -1944,7 +2074,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteBatchRequest); i { + switch v := v.(*ExecuteResponse); i { case 0: return &v.state case 1: @@ -1956,7 +2086,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteBatchResponse); i { + switch v := v.(*ExecuteBatchRequest); i { case 0: return &v.state case 1: @@ -1968,7 +2098,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamExecuteRequest); i { + switch v := v.(*ExecuteBatchResponse); i { case 0: return &v.state case 1: @@ -1980,7 +2110,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamExecuteResponse); i { + switch v := v.(*StreamExecuteRequest); i { case 0: return &v.state case 1: @@ -1992,7 +2122,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResolveTransactionRequest); i { + switch v := v.(*StreamExecuteResponse); i { case 0: return &v.state case 1: @@ -2004,7 +2134,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResolveTransactionResponse); i { + switch v := v.(*ResolveTransactionRequest); i { case 0: return &v.state case 1: @@ -2016,7 +2146,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamFlags); i { + switch v := v.(*ResolveTransactionResponse); i { case 0: return &v.state case 1: @@ -2028,7 +2158,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamRequest); i { + switch v := v.(*VStreamFlags); i { case 0: return &v.state case 1: @@ -2040,7 +2170,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VStreamResponse); i { + switch v := v.(*VStreamRequest); i { case 0: return &v.state case 1: @@ -2052,7 +2182,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareRequest); i { + switch v := v.(*VStreamResponse); i { case 0: return &v.state case 1: @@ -2064,7 +2194,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareResponse); i { + switch v := v.(*PrepareRequest); i { case 0: return &v.state case 1: @@ -2076,7 +2206,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseSessionRequest); i { + switch v := v.(*PrepareResponse); i { case 0: return &v.state case 1: @@ -2088,7 +2218,7 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseSessionResponse); i { + switch v := v.(*CloseSessionRequest); i { case 0: return &v.state case 1: @@ -2100,6 +2230,18 @@ func file_vtgate_proto_init() { } } file_vtgate_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseSessionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtgate_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Session_ShardSession); i { case 0: return &v.state @@ -2118,7 +2260,7 @@ func file_vtgate_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtgate_proto_rawDesc, NumEnums: 2, - NumMessages: 21, + NumMessages: 23, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vtgate/vtgate_vtproto.pb.go b/go/vt/proto/vtgate/vtgate_vtproto.pb.go index 97f92f5e873..bec24472760 100644 --- a/go/vt/proto/vtgate/vtgate_vtproto.pb.go +++ b/go/vt/proto/vtgate/vtgate_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtgate.proto package vtgate @@ -7,6 +7,7 @@ package vtgate import ( binary "encoding/binary" fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" math "math" @@ -24,6 +25,479 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Session_ShardSession) CloneVT() *Session_ShardSession { + if m == nil { + return (*Session_ShardSession)(nil) + } + r := &Session_ShardSession{ + Target: m.Target.CloneVT(), + TransactionId: m.TransactionId, + TabletAlias: m.TabletAlias.CloneVT(), + ReservedId: m.ReservedId, + VindexOnly: m.VindexOnly, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Session_ShardSession) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Session) CloneVT() *Session { + if m == nil { + return (*Session)(nil) + } + r := &Session{ + InTransaction: m.InTransaction, + Autocommit: m.Autocommit, + TargetString: m.TargetString, + Options: m.Options.CloneVT(), + TransactionMode: m.TransactionMode, + LastInsertId: m.LastInsertId, + FoundRows: m.FoundRows, + RowCount: m.RowCount, + InReservedConn: m.InReservedConn, + LockSession: m.LockSession.CloneVT(), + LastLockHeartbeat: m.LastLockHeartbeat, + ReadAfterWrite: m.ReadAfterWrite.CloneVT(), + DDLStrategy: m.DDLStrategy, + SessionUUID: m.SessionUUID, + EnableSystemSettings: m.EnableSystemSettings, + QueryTimeout: m.QueryTimeout, + MigrationContext: m.MigrationContext, + } + if rhs := m.ShardSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardSessions = tmpContainer + } + if rhs := m.Warnings; rhs != nil { + tmpContainer := make([]*query.QueryWarning, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Warnings = tmpContainer + } + if rhs := m.PreSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PreSessions = tmpContainer + } + if rhs := m.PostSessions; rhs != nil { + tmpContainer := make([]*Session_ShardSession, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PostSessions = tmpContainer + } + if rhs := m.UserDefinedVariables; rhs != nil { + tmpContainer := make(map[string]*query.BindVariable, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.UserDefinedVariables = tmpContainer + } + if rhs := m.SystemVariables; rhs != nil { + tmpContainer := make(map[string]string, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.SystemVariables = tmpContainer + } + if rhs := m.Savepoints; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Savepoints = tmpContainer + } + if rhs := m.AdvisoryLock; rhs != nil { + tmpContainer := make(map[string]int64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.AdvisoryLock = tmpContainer + } + if rhs := m.PrepareStatement; rhs != nil { + tmpContainer := make(map[string]*PrepareData, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PrepareStatement = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Session) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareData) CloneVT() *PrepareData { + if m == nil { + return (*PrepareData)(nil) + } + r := &PrepareData{ + PrepareStatement: m.PrepareStatement, + ParamsCount: m.ParamsCount, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareData) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadAfterWrite) CloneVT() *ReadAfterWrite { + if m == nil { + return (*ReadAfterWrite)(nil) + } + r := &ReadAfterWrite{ + ReadAfterWriteGtid: m.ReadAfterWriteGtid, + ReadAfterWriteTimeout: m.ReadAfterWriteTimeout, + SessionTrackGtids: m.SessionTrackGtids, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadAfterWrite) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteRequest) CloneVT() *ExecuteRequest { + if m == nil { + return (*ExecuteRequest)(nil) + } + r := &ExecuteRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + Query: m.Query.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteResponse) CloneVT() *ExecuteResponse { + if m == nil { + return (*ExecuteResponse)(nil) + } + r := &ExecuteResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteBatchRequest) CloneVT() *ExecuteBatchRequest { + if m == nil { + return (*ExecuteBatchRequest)(nil) + } + r := &ExecuteBatchRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Queries; rhs != nil { + tmpContainer := make([]*query.BoundQuery, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Queries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteBatchRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteBatchResponse) CloneVT() *ExecuteBatchResponse { + if m == nil { + return (*ExecuteBatchResponse)(nil) + } + r := &ExecuteBatchResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Results; rhs != nil { + tmpContainer := make([]*query.ResultWithError, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Results = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteBatchResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteRequest) CloneVT() *StreamExecuteRequest { + if m == nil { + return (*StreamExecuteRequest)(nil) + } + r := &StreamExecuteRequest{ + CallerId: m.CallerId.CloneVT(), + Query: m.Query.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *StreamExecuteResponse) CloneVT() *StreamExecuteResponse { + if m == nil { + return (*StreamExecuteResponse)(nil) + } + r := &StreamExecuteResponse{ + Result: m.Result.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *StreamExecuteResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResolveTransactionRequest) CloneVT() *ResolveTransactionRequest { + if m == nil { + return (*ResolveTransactionRequest)(nil) + } + r := &ResolveTransactionRequest{ + CallerId: m.CallerId.CloneVT(), + Dtid: m.Dtid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResolveTransactionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ResolveTransactionResponse) CloneVT() *ResolveTransactionResponse { + if m == nil { + return (*ResolveTransactionResponse)(nil) + } + r := &ResolveTransactionResponse{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ResolveTransactionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamFlags) CloneVT() *VStreamFlags { + if m == nil { + return (*VStreamFlags)(nil) + } + r := &VStreamFlags{ + MinimizeSkew: m.MinimizeSkew, + HeartbeatInterval: m.HeartbeatInterval, + StopOnReshard: m.StopOnReshard, + Cells: m.Cells, + CellPreference: m.CellPreference, + TabletOrder: m.TabletOrder, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamFlags) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamRequest) CloneVT() *VStreamRequest { + if m == nil { + return (*VStreamRequest)(nil) + } + r := &VStreamRequest{ + CallerId: m.CallerId.CloneVT(), + TabletType: m.TabletType, + Vgtid: m.Vgtid.CloneVT(), + Filter: m.Filter.CloneVT(), + Flags: m.Flags.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VStreamResponse) CloneVT() *VStreamResponse { + if m == nil { + return (*VStreamResponse)(nil) + } + r := &VStreamResponse{} + if rhs := m.Events; rhs != nil { + tmpContainer := make([]*binlogdata.VEvent, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Events = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VStreamResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareRequest) CloneVT() *PrepareRequest { + if m == nil { + return (*PrepareRequest)(nil) + } + r := &PrepareRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + Query: m.Query.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *PrepareResponse) CloneVT() *PrepareResponse { + if m == nil { + return (*PrepareResponse)(nil) + } + r := &PrepareResponse{ + Error: m.Error.CloneVT(), + Session: m.Session.CloneVT(), + } + if rhs := m.Fields; rhs != nil { + tmpContainer := make([]*query.Field, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Fields = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *PrepareResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CloseSessionRequest) CloneVT() *CloseSessionRequest { + if m == nil { + return (*CloseSessionRequest)(nil) + } + r := &CloseSessionRequest{ + CallerId: m.CallerId.CloneVT(), + Session: m.Session.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CloseSessionRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CloseSessionResponse) CloneVT() *CloseSessionResponse { + if m == nil { + return (*CloseSessionResponse)(nil) + } + r := &CloseSessionResponse{ + Error: m.Error.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CloseSessionResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Session_ShardSession) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -127,6 +601,39 @@ func (m *Session) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.MigrationContext) > 0 { + i -= len(m.MigrationContext) + copy(dAtA[i:], m.MigrationContext) + i = encodeVarint(dAtA, i, uint64(len(m.MigrationContext))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if len(m.PrepareStatement) > 0 { + for k := range m.PrepareStatement { + v := m.PrepareStatement[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + } if m.QueryTimeout != 0 { i = encodeVarint(dAtA, i, uint64(m.QueryTimeout)) i-- @@ -386,6 +893,51 @@ func (m *Session) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PrepareData) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrepareData) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *PrepareData) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ParamsCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.ParamsCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.PrepareStatement) > 0 { + i -= len(m.PrepareStatement) + copy(dAtA[i:], m.PrepareStatement) + i = encodeVarint(dAtA, i, uint64(len(m.PrepareStatement))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ReadAfterWrite) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -791,6 +1343,16 @@ func (m *StreamExecuteResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Session != nil { + size, err := m.Session.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } if m.Result != nil { size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -917,6 +1479,20 @@ func (m *VStreamFlags) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.TabletOrder) > 0 { + i -= len(m.TabletOrder) + copy(dAtA[i:], m.TabletOrder) + i = encodeVarint(dAtA, i, uint64(len(m.TabletOrder))) + i-- + dAtA[i] = 0x32 + } + if len(m.CellPreference) > 0 { + i -= len(m.CellPreference) + copy(dAtA[i:], m.CellPreference) + i = encodeVarint(dAtA, i, uint64(len(m.CellPreference))) + i-- + dAtA[i] = 0x2a + } if len(m.Cells) > 0 { i -= len(m.Cells) copy(dAtA[i:], m.Cells) @@ -1456,38 +2032,72 @@ func (m *Session) SizeVT() (n int) { if m.QueryTimeout != 0 { n += 2 + sov(uint64(m.QueryTimeout)) } + if len(m.PrepareStatement) > 0 { + for k, v := range m.PrepareStatement { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 2 + sov(uint64(mapEntrySize)) + } + } + l = len(m.MigrationContext) + if l > 0 { + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } -func (m *ReadAfterWrite) SizeVT() (n int) { +func (m *PrepareData) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ReadAfterWriteGtid) + l = len(m.PrepareStatement) if l > 0 { n += 1 + l + sov(uint64(l)) } - if m.ReadAfterWriteTimeout != 0 { - n += 9 - } - if m.SessionTrackGtids { - n += 2 + if m.ParamsCount != 0 { + n += 1 + sov(uint64(m.ParamsCount)) } n += len(m.unknownFields) return n } -func (m *ExecuteRequest) SizeVT() (n int) { +func (m *ReadAfterWrite) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if m.CallerId != nil { - l = m.CallerId.SizeVT() + l = len(m.ReadAfterWriteGtid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.ReadAfterWriteTimeout != 0 { + n += 9 + } + if m.SessionTrackGtids { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CallerId != nil { + l = m.CallerId.SizeVT() n += 1 + l + sov(uint64(l)) } if m.Session != nil { @@ -1604,6 +2214,10 @@ func (m *StreamExecuteResponse) SizeVT() (n int) { l = m.Result.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.Session != nil { + l = m.Session.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1655,6 +2269,14 @@ func (m *VStreamFlags) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + l = len(m.CellPreference) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TabletOrder) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -2933,6 +3555,269 @@ func (m *Session) UnmarshalVT(dAtA []byte) error { break } } + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareStatement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrepareStatement == nil { + m.PrepareStatement = make(map[string]*PrepareData) + } + var mapkey string + var mapvalue *PrepareData + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PrepareData{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PrepareStatement[mapkey] = mapvalue + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MigrationContext = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PrepareData) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PrepareData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PrepareData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareStatement", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PrepareStatement = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamsCount", wireType) + } + m.ParamsCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ParamsCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3925,6 +4810,42 @@ func (m *StreamExecuteResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Session == nil { + m.Session = &Session{} + } + if err := m.Session.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -4237,6 +5158,70 @@ func (m *VStreamFlags) UnmarshalVT(dAtA []byte) error { } m.Cells = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellPreference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellPreference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletOrder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TabletOrder = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index 7c7d049867d..2008d486dc9 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtgateservice.proto diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index abde129b474..0c82dc34bf5 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vtrpc.proto diff --git a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go index 008fe7aa100..36fb8ba8627 100644 --- a/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go +++ b/go/vt/proto/vtrpc/vtrpc_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vtrpc.proto package vtrpc import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,50 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *CallerID) CloneVT() *CallerID { + if m == nil { + return (*CallerID)(nil) + } + r := &CallerID{ + Principal: m.Principal, + Component: m.Component, + Subcomponent: m.Subcomponent, + } + if rhs := m.Groups; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Groups = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CallerID) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *RPCError) CloneVT() *RPCError { + if m == nil { + return (*RPCError)(nil) + } + r := &RPCError{ + Message: m.Message, + Code: m.Code, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RPCError) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *CallerID) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 0ab40f16929..4b4f269d38c 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -41,7 +41,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vttest.proto diff --git a/go/vt/proto/vttest/vttest_vtproto.pb.go b/go/vt/proto/vttest/vttest_vtproto.pb.go index a7474446a26..f1dee298011 100644 --- a/go/vt/proto/vttest/vttest_vtproto.pb.go +++ b/go/vt/proto/vttest/vttest_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vttest.proto package vttest import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -19,6 +20,83 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Shard) CloneVT() *Shard { + if m == nil { + return (*Shard)(nil) + } + r := &Shard{ + Name: m.Name, + DbNameOverride: m.DbNameOverride, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Shard) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Keyspace) CloneVT() *Keyspace { + if m == nil { + return (*Keyspace)(nil) + } + r := &Keyspace{ + Name: m.Name, + ServedFrom: m.ServedFrom, + ReplicaCount: m.ReplicaCount, + RdonlyCount: m.RdonlyCount, + } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]*Shard, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Keyspace) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *VTTestTopology) CloneVT() *VTTestTopology { + if m == nil { + return (*VTTestTopology)(nil) + } + r := &VTTestTopology{ + RoutingRules: m.RoutingRules.CloneVT(), + } + if rhs := m.Keyspaces; rhs != nil { + tmpContainer := make([]*Keyspace, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Keyspaces = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *VTTestTopology) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Shard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/vtgr/controller/controller.go b/go/vt/proto/vttime/cached_size.go similarity index 61% rename from go/vt/vtgr/controller/controller.go rename to go/vt/proto/vttime/cached_size.go index 2b2c36cd320..e34da16852c 100644 --- a/go/vt/vtgr/controller/controller.go +++ b/go/vt/proto/vttime/cached_size.go @@ -13,14 +13,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by Sizegen. DO NOT EDIT. -package controller +package vttime -import ( - "math/rand" - "time" -) +import hack "vitess.io/vitess/go/hack" -func init() { - rand.Seed(time.Now().UnixNano()) +func (cached *Time) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field unknownFields []byte + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownFields))) + } + return size } diff --git a/go/vt/proto/vttime/vttime.pb.go b/go/vt/proto/vttime/vttime.pb.go index 96ac33eecd9..5cdf3f616ce 100644 --- a/go/vt/proto/vttime/vttime.pb.go +++ b/go/vt/proto/vttime/vttime.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.21.3 // source: vttime.proto diff --git a/go/vt/proto/vttime/vttime_vtproto.pb.go b/go/vt/proto/vttime/vttime_vtproto.pb.go index d1e1ce8a4cc..aa53a902df5 100644 --- a/go/vt/proto/vttime/vttime_vtproto.pb.go +++ b/go/vt/proto/vttime/vttime_vtproto.pb.go @@ -1,11 +1,12 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: vttime.proto package vttime import ( fmt "fmt" + proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" bits "math/bits" @@ -18,6 +19,44 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +func (m *Time) CloneVT() *Time { + if m == nil { + return (*Time)(nil) + } + r := &Time{ + Seconds: m.Seconds, + Nanoseconds: m.Nanoseconds, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Time) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Duration) CloneVT() *Duration { + if m == nil { + return (*Duration)(nil) + } + r := &Duration{ + Seconds: m.Seconds, + Nanos: m.Nanos, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Duration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Time) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil diff --git a/go/vt/schema/ddl_strategy.go b/go/vt/schema/ddl_strategy.go index d56b8004ab8..88400d423fd 100644 --- a/go/vt/schema/ddl_strategy.go +++ b/go/vt/schema/ddl_strategy.go @@ -19,12 +19,16 @@ package schema import ( "fmt" "regexp" + "strconv" + "time" "github.com/google/shlex" ) var ( - strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) + strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) + cutOverThresholdFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, cutOverThresholdFlag)) + retainArtifactsFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, retainArtifactsFlag)) ) const ( @@ -39,8 +43,11 @@ const ( allowConcurrentFlag = "allow-concurrent" preferInstantDDL = "prefer-instant-ddl" fastRangeRotationFlag = "fast-range-rotation" + cutOverThresholdFlag = "cut-over-threshold" + retainArtifactsFlag = "retain-artifacts" vreplicationTestSuite = "vreplication-test-suite" allowForeignKeysFlag = "unsafe-allow-foreign-keys" + analyzeTableFlag = "analyze-table" ) // DDLStrategy suggests how an ALTER TABLE should run (e.g. "direct", "online", "gh-ost" or "pt-osc") @@ -102,6 +109,12 @@ func ParseDDLStrategy(strategyVariable string) (*DDLStrategySetting, error) { default: return nil, fmt.Errorf("Unknown online DDL strategy: '%v'", strategy) } + if _, err := setting.CutOverThreshold(); err != nil { + return nil, err + } + if _, err := setting.RetainArtifactsDuration(); err != nil { + return nil, err + } return setting, nil } @@ -177,6 +190,60 @@ func (setting *DDLStrategySetting) IsFastRangeRotationFlag() bool { return setting.hasFlag(fastRangeRotationFlag) } +// isCutOverThresholdFlag returns true when given option denotes a `--cut-over-threshold=[...]` flag +func isCutOverThresholdFlag(opt string) (string, bool) { + submatch := cutOverThresholdFlagRegexp.FindStringSubmatch(opt) + if len(submatch) == 0 { + return "", false + } + return submatch[1], true +} + +// isRetainArtifactsFlag returns true when given option denotes a `--retain-artifacts=[...]` flag +func isRetainArtifactsFlag(opt string) (string, bool) { + submatch := retainArtifactsFlagRegexp.FindStringSubmatch(opt) + if len(submatch) == 0 { + return "", false + } + return submatch[1], true +} + +// CutOverThreshold returns a the duration threshold indicated by --cut-over-threshold +func (setting *DDLStrategySetting) CutOverThreshold() (d time.Duration, err error) { + // We do some ugly manual parsing of --cut-over-threshold value + opts, _ := shlex.Split(setting.Options) + for _, opt := range opts { + if val, isCutOver := isCutOverThresholdFlag(opt); isCutOver { + // value is possibly quoted + if s, err := strconv.Unquote(val); err == nil { + val = s + } + if val != "" { + d, err = time.ParseDuration(val) + } + } + } + return d, err +} + +// RetainArtifactsDuration returns a the duration indicated by --retain-artifacts +func (setting *DDLStrategySetting) RetainArtifactsDuration() (d time.Duration, err error) { + // We do some ugly manual parsing of --retain-artifacts + opts, _ := shlex.Split(setting.Options) + for _, opt := range opts { + if val, isRetainArtifacts := isRetainArtifactsFlag(opt); isRetainArtifacts { + // value is possibly quoted + if s, err := strconv.Unquote(val); err == nil { + val = s + } + if val != "" { + d, err = time.ParseDuration(val) + } + } + } + return d, err +} + // IsVreplicationTestSuite checks if strategy options include --vreplicatoin-test-suite func (setting *DDLStrategySetting) IsVreplicationTestSuite() bool { return setting.hasFlag(vreplicationTestSuite) @@ -187,11 +254,22 @@ func (setting *DDLStrategySetting) IsAllowForeignKeysFlag() bool { return setting.hasFlag(allowForeignKeysFlag) } +// IsAnalyzeTableFlag checks if strategy options include --analyze-table +func (setting *DDLStrategySetting) IsAnalyzeTableFlag() bool { + return setting.hasFlag(analyzeTableFlag) +} + // RuntimeOptions returns the options used as runtime flags for given strategy, removing any internal hint options func (setting *DDLStrategySetting) RuntimeOptions() []string { opts, _ := shlex.Split(setting.Options) validOpts := []string{} for _, opt := range opts { + if _, ok := isCutOverThresholdFlag(opt); ok { + continue + } + if _, ok := isRetainArtifactsFlag(opt); ok { + continue + } switch { case isFlag(opt, declarativeFlag): case isFlag(opt, skipTopoFlag): @@ -206,6 +284,7 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string { case isFlag(opt, fastRangeRotationFlag): case isFlag(opt, vreplicationTestSuite): case isFlag(opt, allowForeignKeysFlag): + case isFlag(opt, analyzeTableFlag): default: validOpts = append(validOpts, opt) } diff --git a/go/vt/schema/ddl_strategy_test.go b/go/vt/schema/ddl_strategy_test.go index 610cb8b9ed3..8ad6ff592dc 100644 --- a/go/vt/schema/ddl_strategy_test.go +++ b/go/vt/schema/ddl_strategy_test.go @@ -19,6 +19,7 @@ package schema import ( "strings" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -38,6 +39,132 @@ func TestIsDirect(t *testing.T) { assert.True(t, DDLStrategy("something").IsDirect()) } +func TestIsCutOverThresholdFlag(t *testing.T) { + tt := []struct { + s string + expect bool + val string + d time.Duration + }{ + { + s: "something", + }, + { + s: "-cut-over-threshold", + }, + { + s: "--cut-over-threshold", + }, + { + s: "--cut-over-threshold=", + expect: true, + }, + { + s: "--cut-over-threshold=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "-cut-over-threshold=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "--cut-over-threshold=1m", + expect: true, + val: "1m", + d: time.Minute, + }, + { + s: `--cut-over-threshold="1m"`, + expect: true, + val: `"1m"`, + d: time.Minute, + }, + } + for _, ts := range tt { + t.Run(ts.s, func(t *testing.T) { + setting, err := ParseDDLStrategy("online " + ts.s) + assert.NoError(t, err) + + val, isCutOver := isCutOverThresholdFlag(ts.s) + assert.Equal(t, ts.expect, isCutOver) + assert.Equal(t, ts.val, val) + + if ts.expect { + d, err := setting.CutOverThreshold() + assert.NoError(t, err) + assert.Equal(t, ts.d, d) + } + }) + } +} + +func TestIsExpireArtifactsFlag(t *testing.T) { + tt := []struct { + s string + expect bool + val string + d time.Duration + }{ + { + s: "something", + }, + { + s: "-retain-artifacts", + }, + { + s: "--retain-artifacts", + }, + { + s: "--retain-artifacts=", + expect: true, + }, + { + s: "--retain-artifacts=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "-retain-artifacts=0", + expect: true, + val: "0", + d: 0, + }, + { + s: "--retain-artifacts=1m", + expect: true, + val: "1m", + d: time.Minute, + }, + { + s: `--retain-artifacts="1m"`, + expect: true, + val: `"1m"`, + d: time.Minute, + }, + } + for _, ts := range tt { + t.Run(ts.s, func(t *testing.T) { + setting, err := ParseDDLStrategy("online " + ts.s) + assert.NoError(t, err) + + val, isRetainArtifacts := isRetainArtifactsFlag(ts.s) + assert.Equal(t, ts.expect, isRetainArtifacts) + assert.Equal(t, ts.val, val) + + if ts.expect { + d, err := setting.RetainArtifactsDuration() + assert.NoError(t, err) + assert.Equal(t, ts.d, d) + } + }) + } +} + func TestParseDDLStrategy(t *testing.T) { tt := []struct { strategyVariable string @@ -52,6 +179,9 @@ func TestParseDDLStrategy(t *testing.T) { fastOverRevertible bool fastRangeRotation bool allowForeignKeys bool + analyzeTable bool + cutOverThreshold time.Duration + expireArtifacts time.Duration runtimeOptions string err error }{ @@ -166,6 +296,27 @@ func TestParseDDLStrategy(t *testing.T) { runtimeOptions: "", allowForeignKeys: true, }, + { + strategyVariable: "vitess --cut-over-threshold=5m", + strategy: DDLStrategyVitess, + options: "--cut-over-threshold=5m", + runtimeOptions: "", + cutOverThreshold: 5 * time.Minute, + }, + { + strategyVariable: "vitess --retain-artifacts=4m", + strategy: DDLStrategyVitess, + options: "--retain-artifacts=4m", + runtimeOptions: "", + expireArtifacts: 4 * time.Minute, + }, + { + strategyVariable: "vitess --analyze-table", + strategy: DDLStrategyVitess, + options: "--analyze-table", + runtimeOptions: "", + analyzeTable: true, + }, } for _, ts := range tt { t.Run(ts.strategyVariable, func(t *testing.T) { @@ -181,6 +332,10 @@ func TestParseDDLStrategy(t *testing.T) { assert.Equal(t, ts.fastOverRevertible, setting.IsPreferInstantDDL()) assert.Equal(t, ts.fastRangeRotation, setting.IsFastRangeRotationFlag()) assert.Equal(t, ts.allowForeignKeys, setting.IsAllowForeignKeysFlag()) + assert.Equal(t, ts.analyzeTable, setting.IsAnalyzeTableFlag()) + cutOverThreshold, err := setting.CutOverThreshold() + assert.NoError(t, err) + assert.Equal(t, ts.cutOverThreshold, cutOverThreshold) runtimeOptions := strings.Join(setting.RuntimeOptions(), " ") assert.Equal(t, ts.runtimeOptions, runtimeOptions) @@ -190,4 +345,16 @@ func TestParseDDLStrategy(t *testing.T) { _, err := ParseDDLStrategy("other") assert.Error(t, err) } + { + _, err := ParseDDLStrategy("online --cut-over-threshold=X") + assert.Error(t, err) + } + { + _, err := ParseDDLStrategy("online --cut-over-threshold=3") + assert.Error(t, err) + } + { + _, err := ParseDDLStrategy("online --retain-artifacts=3") + assert.Error(t, err) + } } diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index 7141d9ec71b..a06866e996a 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -24,7 +24,6 @@ import ( "regexp" "strconv" "strings" - "time" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" @@ -35,6 +34,7 @@ var ( onlineDdlUUIDRegexp = regexp.MustCompile(`^[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}$`) onlineDDLGeneratedTableNameRegexp = regexp.MustCompile(`^_[0-f]{8}_[0-f]{4}_[0-f]{4}_[0-f]{4}_[0-f]{12}_([0-9]{14})_(gho|ghc|del|new|vrepl)$`) ptOSCGeneratedTableNameRegexp = regexp.MustCompile(`^_.*_old$`) + migrationContextValidatorRegexp = regexp.MustCompile(`^[\w:-]*$`) ) var ( @@ -53,6 +53,14 @@ const ( RevertActionStr = "revert" ) +// ValidateMigrationContext validates that the given migration context only uses valid characters +func ValidateMigrationContext(migrationContext string) error { + if migrationContextValidatorRegexp.MatchString(migrationContext) { + return nil + } + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid characters in migration_context %v. Use alphanumeric, dash, underscore and colon only", migrationContext) +} + // when validateWalk returns true, then the child nodes are also visited func validateWalk(node sqlparser.SQLNode, allowForeignKeys bool) (kontinue bool, err error) { switch node.(type) { @@ -84,19 +92,20 @@ const ( // OnlineDDL encapsulates the relevant information in an online schema change request type OnlineDDL struct { - Keyspace string `json:"keyspace,omitempty"` - Table string `json:"table,omitempty"` - Schema string `json:"schema,omitempty"` - SQL string `json:"sql,omitempty"` - UUID string `json:"uuid,omitempty"` - Strategy DDLStrategy `json:"strategy,omitempty"` - Options string `json:"options,omitempty"` - RequestTime int64 `json:"time_created,omitempty"` - MigrationContext string `json:"context,omitempty"` - Status OnlineDDLStatus `json:"status,omitempty"` - TabletAlias string `json:"tablet,omitempty"` - Retries int64 `json:"retries,omitempty"` - ReadyToComplete int64 `json:"ready_to_complete,omitempty"` + Keyspace string `json:"keyspace,omitempty"` + Table string `json:"table,omitempty"` + Schema string `json:"schema,omitempty"` + SQL string `json:"sql,omitempty"` + UUID string `json:"uuid,omitempty"` + Strategy DDLStrategy `json:"strategy,omitempty"` + Options string `json:"options,omitempty"` + // Stateful fields: + MigrationContext string `json:"context,omitempty"` + Status OnlineDDLStatus `json:"status,omitempty"` + TabletAlias string `json:"tablet,omitempty"` + Retries int64 `json:"retries,omitempty"` + ReadyToComplete int64 `json:"ready_to_complete,omitempty"` + WasReadyToComplete int64 `json:"was_ready_to_complete,omitempty"` } // FromJSON creates an OnlineDDL from json @@ -249,7 +258,6 @@ func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting UUID: onlineDDLUUID, Strategy: ddlStrategySetting.Strategy, Options: ddlStrategySetting.Options, - RequestTime: time.Now().UnixNano(), MigrationContext: migrationContext, Status: OnlineDDLStatusRequested, }, nil @@ -274,6 +282,11 @@ func OnlineDDLFromCommentedStatement(stmt sqlparser.Statement) (onlineDDL *Onlin default: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported statement for Online DDL: %v", sqlparser.String(stmt)) } + // We clone the comments because they will end up being cached by the query planner. Then, the Directive() function actually modifies the comments. + // If comments are shared in cache, and Directive() modifies it, then we have a concurrency issue when someone else wants to read the comments. + // By cloning the comments we remove the concurrency problem. + comments = sqlparser.CloneRefOfParsedComments(comments) + comments.ResetDirectives() if comments.Length() == 0 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no comments found in statement: %v", sqlparser.String(stmt)) @@ -328,11 +341,6 @@ func (onlineDDL *OnlineDDL) StrategySetting() *DDLStrategySetting { return NewDDLStrategySetting(onlineDDL.Strategy, onlineDDL.Options) } -// RequestTimeSeconds converts request time to seconds (losing nano precision) -func (onlineDDL *OnlineDDL) RequestTimeSeconds() int64 { - return onlineDDL.RequestTime / int64(time.Second) -} - // ToJSON exports this onlineDDL to JSON func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) { return json.Marshal(onlineDDL) diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index c559c1e75f1..dbcad5454dc 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -322,14 +322,6 @@ func TestNewOnlineDDLs(t *testing.T) { } func TestNewOnlineDDLsForeignKeys(t *testing.T) { - type expect struct { - sqls []string - notDDL bool - parseError bool - isError bool - expectErrorText string - isView bool - } queries := []string{ "alter table corder add FOREIGN KEY my_fk(customer_id) references customer(customer_id)", "create table t1 (id int primary key, i int, foreign key (i) references parent(id))", @@ -400,3 +392,30 @@ func TestOnlineDDLFromCommentedStatement(t *testing.T) { }) } } + +func TestValidateMigrationContext(t *testing.T) { + tcases := []struct { + m string + expectError bool + }{ + {"", false}, + {"abc", false}, + {"abc-def", false}, + {"abc-DEF", false}, + {"abc-def-123", false}, + {"under_score:abc-DEF-123", false}, + {"~", true}, + {",", true}, + {"abc^def", true}, + } + for _, tcase := range tcases { + t.Run(tcase.m, func(t *testing.T) { + err := ValidateMigrationContext(tcase.m) + if tcase.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/go/vt/schemadiff/diff.go b/go/vt/schemadiff/diff.go index cd89770386e..fce1e5e99db 100644 --- a/go/vt/schemadiff/diff.go +++ b/go/vt/schemadiff/diff.go @@ -148,10 +148,10 @@ func DiffViews(create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hin } } -// DiffSchemasSQL compares two schemas and returns the list of diffs that turn +// DiffSchemasSQL compares two schemas and returns the rich diff that turns // 1st schema into 2nd. Schemas are build from SQL, each of which can contain an arbitrary number of // CREATE TABLE and CREATE VIEW statements. -func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) ([]EntityDiff, error) { +func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, error) { schema1, err := NewSchemaFromSQL(sql1) if err != nil { return nil, err @@ -160,17 +160,17 @@ func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) ([]EntityDiff, e if err != nil { return nil, err } - return schema1.Diff(schema2, hints) + return schema1.SchemaDiff(schema2, hints) } -// DiffSchemasSQL compares two schemas and returns the list of diffs that turn +// DiffSchemas compares two schemas and returns the list of diffs that turn // 1st schema into 2nd. Any of the schemas may be nil. -func DiffSchemas(schema1 *Schema, schema2 *Schema, hints *DiffHints) ([]EntityDiff, error) { +func DiffSchemas(schema1 *Schema, schema2 *Schema, hints *DiffHints) (*SchemaDiff, error) { if schema1 == nil { schema1 = newEmptySchema() } if schema2 == nil { schema2 = newEmptySchema() } - return schema1.Diff(schema2, hints) + return schema1.SchemaDiff(schema2, hints) } diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go index 0b0251561d5..2f8d913f042 100644 --- a/go/vt/schemadiff/diff_test.go +++ b/go/vt/schemadiff/diff_test.go @@ -599,21 +599,21 @@ func TestDiffSchemas(t *testing.T) { tableRename: TableRenameHeuristicStatement, }, { - name: "identical tables: drop and create", + name: "drop and create all", from: "create table t1a(id int primary key); create table t2a(id int unsigned primary key); create table t3a(id smallint primary key); ", to: "create table t1b(id bigint primary key); create table t2b(id int unsigned primary key); create table t3b(id int primary key); ", diffs: []string{ - "drop table t1a", - "drop table t2a", "drop table t3a", + "drop table t2a", + "drop table t1a", "create table t1b (\n\tid bigint,\n\tprimary key (id)\n)", "create table t2b (\n\tid int unsigned,\n\tprimary key (id)\n)", "create table t3b (\n\tid int,\n\tprimary key (id)\n)", }, cdiffs: []string{ - "DROP TABLE `t1a`", - "DROP TABLE `t2a`", "DROP TABLE `t3a`", + "DROP TABLE `t2a`", + "DROP TABLE `t1a`", "CREATE TABLE `t1b` (\n\t`id` bigint,\n\tPRIMARY KEY (`id`)\n)", "CREATE TABLE `t2b` (\n\t`id` int unsigned,\n\tPRIMARY KEY (`id`)\n)", "CREATE TABLE `t3b` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", @@ -626,17 +626,59 @@ func TestDiffSchemas(t *testing.T) { diffs: []string{ "drop table t3a", "create table t1b (\n\tid bigint,\n\tprimary key (id)\n)", - "rename table t1a to t3b", "rename table t2a to t2b", + "rename table t1a to t3b", }, cdiffs: []string{ "DROP TABLE `t3a`", "CREATE TABLE `t1b` (\n\t`id` bigint,\n\tPRIMARY KEY (`id`)\n)", - "RENAME TABLE `t1a` TO `t3b`", "RENAME TABLE `t2a` TO `t2b`", + "RENAME TABLE `t1a` TO `t3b`", }, tableRename: TableRenameHeuristicStatement, }, + { + name: "tables with irregular names", + from: "create table `t.2`(id int primary key); create table t3(`i.d` int primary key)", + to: "create table `t.2` (id bigint primary key); create table t3(`i.d` int unsigned primary key)", + diffs: []string{ + "alter table `t.2` modify column id bigint", + "alter table t3 modify column `i.d` int unsigned", + }, + cdiffs: []string{ + "ALTER TABLE `t.2` MODIFY COLUMN `id` bigint", + "ALTER TABLE `t3` MODIFY COLUMN `i.d` int unsigned", + }, + }, + // Foreign keys + { + name: "create tables with foreign keys, expect specific order", + to: "create table t7(id int primary key); create table t5 (id int primary key, i int, constraint f5 foreign key (i) references t7(id)); create table t4 (id int primary key, i int, constraint f4 foreign key (i) references t7(id));", + diffs: []string{ + "create table t7 (\n\tid int,\n\tprimary key (id)\n)", + "create table t4 (\n\tid int,\n\ti int,\n\tprimary key (id),\n\tkey f4 (i),\n\tconstraint f4 foreign key (i) references t7 (id)\n)", + "create table t5 (\n\tid int,\n\ti int,\n\tprimary key (id),\n\tkey f5 (i),\n\tconstraint f5 foreign key (i) references t7 (id)\n)", + }, + cdiffs: []string{ + "CREATE TABLE `t7` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", + "CREATE TABLE `t4` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f4` (`i`),\n\tCONSTRAINT `f4` FOREIGN KEY (`i`) REFERENCES `t7` (`id`)\n)", + "CREATE TABLE `t5` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f5` (`i`),\n\tCONSTRAINT `f5` FOREIGN KEY (`i`) REFERENCES `t7` (`id`)\n)", + }, + }, + { + name: "drop tables with foreign keys, expect specific order", + from: "create table t7(id int primary key); create table t5 (id int primary key, i int, constraint f5 foreign key (i) references t7(id)); create table t4 (id int primary key, i int, constraint f4 foreign key (i) references t7(id));", + diffs: []string{ + "drop table t5", + "drop table t4", + "drop table t7", + }, + cdiffs: []string{ + "DROP TABLE `t5`", + "DROP TABLE `t4`", + "DROP TABLE `t7`", + }, + }, // Views { name: "identical views", @@ -719,20 +761,36 @@ func TestDiffSchemas(t *testing.T) { from: "create view v1 as select * from t1; create table t1(id int primary key); create table t2(id int primary key); create view v2 as select * from t2; create table t3(id int primary key);", to: "create view v0 as select * from v2, t2; create table t4(id int primary key); create view v2 as select id from t2; create table t2(id bigint primary key); create table t3(id int primary key)", diffs: []string{ - "drop table t1", "drop view v1", + "drop table t1", "alter table t2 modify column id bigint", "alter view v2 as select id from t2", - "create table t4 (\n\tid int,\n\tprimary key (id)\n)", "create view v0 as select * from v2, t2", + "create table t4 (\n\tid int,\n\tprimary key (id)\n)", }, cdiffs: []string{ - "DROP TABLE `t1`", "DROP VIEW `v1`", + "DROP TABLE `t1`", "ALTER TABLE `t2` MODIFY COLUMN `id` bigint", "ALTER VIEW `v2` AS SELECT `id` FROM `t2`", - "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", "CREATE VIEW `v0` AS SELECT * FROM `v2`, `t2`", + "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", + }, + }, + { + // Making sure schemadiff distinguishes between VIEWs with different casing + name: "case insensitive views", + from: "create view v1 as select * from t; create table t(id int primary key); create view V1 as select * from t", + to: "", + diffs: []string{ + "drop view v1", + "drop view V1", + "drop table t", + }, + cdiffs: []string{ + "DROP VIEW `v1`", + "DROP VIEW `V1`", + "DROP TABLE `t`", }, }, } @@ -741,13 +799,15 @@ func TestDiffSchemas(t *testing.T) { hints := &DiffHints{ TableRenameStrategy: ts.tableRename, } - diffs, err := DiffSchemasSQL(ts.from, ts.to, hints) + diff, err := DiffSchemasSQL(ts.from, ts.to, hints) if ts.expectError != "" { require.Error(t, err) assert.Contains(t, err.Error(), ts.expectError) } else { assert.NoError(t, err) + diffs, err := diff.OrderedDiffs() + assert.NoError(t, err) statements := []string{} cstatements := []string{} for _, d := range diffs { @@ -776,20 +836,20 @@ func TestDiffSchemas(t *testing.T) { { // Validate "apply()" on "from" converges with "to" schema1, err := NewSchemaFromSQL(ts.from) - assert.NoError(t, err) + require.NoError(t, err) schema1SQL := schema1.ToSQL() schema2, err := NewSchemaFromSQL(ts.to) - assert.NoError(t, err) + require.NoError(t, err) applied, err := schema1.Apply(diffs) require.NoError(t, err) // validate schema1 unaffected by Apply assert.Equal(t, schema1SQL, schema1.ToSQL()) - appliedDiff, err := schema2.Diff(applied, hints) + appliedDiff, err := schema2.SchemaDiff(applied, hints) require.NoError(t, err) - assert.Empty(t, appliedDiff) + assert.True(t, appliedDiff.Empty()) assert.Equal(t, schema2.ToQueries(), applied.ToQueries()) } } @@ -838,7 +898,9 @@ func TestSchemaApplyError(t *testing.T) { assert.NoError(t, err) { - diffs, err := schema1.Diff(schema2, hints) + diff, err := schema1.SchemaDiff(schema2, hints) + require.NoError(t, err) + diffs, err := diff.OrderedDiffs() assert.NoError(t, err) assert.NotEmpty(t, diffs) _, err = schema1.Apply(diffs) @@ -847,7 +909,9 @@ func TestSchemaApplyError(t *testing.T) { require.Error(t, err, "expected error applying to schema2. diffs: %v", diffs) } { - diffs, err := schema2.Diff(schema1, hints) + diff, err := schema2.SchemaDiff(schema1, hints) + require.NoError(t, err) + diffs, err := diff.OrderedDiffs() assert.NoError(t, err) assert.NotEmpty(t, diffs, "schema1: %v, schema2: %v", schema1.ToSQL(), schema2.ToSQL()) _, err = schema2.Apply(diffs) diff --git a/go/vt/schemadiff/errors.go b/go/vt/schemadiff/errors.go index 42dd304e75a..771c650e51d 100644 --- a/go/vt/schemadiff/errors.go +++ b/go/vt/schemadiff/errors.go @@ -1,8 +1,25 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package schemadiff import ( "errors" "fmt" + "strings" "vitess.io/vitess/go/sqlescape" ) @@ -16,6 +33,28 @@ var ( ErrExpectedCreateView = errors.New("expected a CREATE VIEW statement") ) +type ImpossibleApplyDiffOrderError struct { + UnorderedDiffs []EntityDiff + ConflictingDiffs []EntityDiff +} + +func (e *ImpossibleApplyDiffOrderError) Error() string { + var b strings.Builder + b.WriteString("no valid applicable order for diffs. Diffs found conflicting:") + for _, s := range e.ConflictingStatements() { + b.WriteString("\n") + b.WriteString(s) + } + return b.String() +} + +func (e *ImpossibleApplyDiffOrderError) ConflictingStatements() (result []string) { + for _, diff := range e.ConflictingDiffs { + result = append(result, diff.CanonicalStatementString()) + } + return result +} + type UnsupportedEntityError struct { Entity string Statement string @@ -334,3 +373,32 @@ type ViewDependencyUnresolvedError struct { func (e *ViewDependencyUnresolvedError) Error() string { return fmt.Sprintf("view %s has unresolved/loop dependencies", sqlescape.EscapeID(e.View)) } + +type InvalidColumnReferencedInViewError struct { + View string + Column string + Ambiguous bool +} + +func (e *InvalidColumnReferencedInViewError) Error() string { + if e.Ambiguous { + return fmt.Sprintf("view %s references unqualified but non unique column %s", sqlescape.EscapeID(e.View), sqlescape.EscapeID(e.Column)) + } + return fmt.Sprintf("view %s references unqualified but non-existent column %s", sqlescape.EscapeID(e.View), sqlescape.EscapeID(e.Column)) +} + +type InvalidStarExprInViewError struct { + View string +} + +func (e *InvalidStarExprInViewError) Error() string { + return fmt.Sprintf("view %s has invalid star expression", sqlescape.EscapeID(e.View)) +} + +type EntityNotFoundError struct { + Name string +} + +func (e *EntityNotFoundError) Error() string { + return fmt.Sprintf("entity %s not found", sqlescape.EscapeID(e.Name)) +} diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go index 0e9ae4c4df1..a9ef60fbb27 100644 --- a/go/vt/schemadiff/schema.go +++ b/go/vt/schemadiff/schema.go @@ -24,7 +24,10 @@ import ( "sort" "strings" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/semantics" ) // Schema represents a database schema, which may contain entities such as tables and views. @@ -67,10 +70,8 @@ func NewSchemaFromEntities(entities []Entity) (*Schema, error) { return nil, &UnsupportedEntityError{Entity: c.Name(), Statement: c.Create().CanonicalStatementString()} } } - if err := schema.normalize(); err != nil { - return nil, err - } - return schema, nil + err := schema.normalize() + return schema, err } // NewSchemaFromStatements creates a valid and normalized schema based on list of valid statements @@ -129,20 +130,20 @@ func NewSchemaFromSQL(sql string) (*Schema, error) { } // getForeignKeyParentTableNames analyzes a CREATE TABLE definition and extracts all referened foreign key tables names. -// A table name may appear twice in the result output, it it is referenced by more than one foreign key -func getForeignKeyParentTableNames(createTable *sqlparser.CreateTable) (names []string, err error) { +// A table name may appear twice in the result output, if it is referenced by more than one foreign key +func getForeignKeyParentTableNames(createTable *sqlparser.CreateTable) (names []string) { for _, cs := range createTable.TableSpec.Constraints { if check, ok := cs.Details.(*sqlparser.ForeignKeyDefinition); ok { parentTableName := check.ReferenceDefinition.ReferencedTable.Name.String() names = append(names, parentTableName) } } - return names, err + return names } // getViewDependentTableNames analyzes a CREATE VIEW definition and extracts all tables/views read by this view -func getViewDependentTableNames(createView *sqlparser.CreateView) (names []string, err error) { - err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { +func getViewDependentTableNames(createView *sqlparser.CreateView) (names []string) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node := node.(type) { case *sqlparser.TableName: names = append(names, node.Name.String()) @@ -155,12 +156,14 @@ func getViewDependentTableNames(createView *sqlparser.CreateView) (names []strin } return true, nil }, createView) - return names, err + return names } // normalize is called as part of Schema creation process. The user may only get a hold of normalized schema. // It validates some cross-entity constraints, and orders entity based on dependencies (e.g. tables, views that read from tables, 2nd level views, etc.) func (s *Schema) normalize() error { + var errs error + s.named = make(map[string]Entity, len(s.tables)+len(s.views)) s.sorted = make([]Entity, 0, len(s.tables)+len(s.views)) // Verify no two entities share same name @@ -228,10 +231,7 @@ func (s *Schema) normalize() error { continue } // Not handled. Is this view dependent on already handled objects? - referencedTableNames, err := getForeignKeyParentTableNames(t.CreateTable) - if err != nil { - return err - } + referencedTableNames := getForeignKeyParentTableNames(t.CreateTable) if len(referencedTableNames) > 0 { s.foreignKeyChildren = append(s.foreignKeyChildren, t) } @@ -281,10 +281,7 @@ func (s *Schema) normalize() error { continue } // Not handled. Is this view dependent on already handled objects? - dependentNames, err := getViewDependentTableNames(v.CreateView) - if err != nil { - return err - } + dependentNames := getViewDependentTableNames(v.CreateView) if allNamesFoundInLowerLevel(dependentNames, iterationLevel) { s.sorted = append(s.sorted, v) dependencyLevels[v.Name()] = iterationLevel @@ -304,7 +301,7 @@ func (s *Schema) normalize() error { // - two or more views have a circular dependency for _, t := range s.tables { if _, ok := dependencyLevels[t.Name()]; !ok { - // We _know_ that in this iteration, at least one view is found unassigned a dependency level. + // We _know_ that in this iteration, at least one foreign key is not found. // We return the first one. return &ForeignKeyDependencyUnresolvedError{Table: t.Name()} } @@ -312,16 +309,23 @@ func (s *Schema) normalize() error { for _, v := range s.views { if _, ok := dependencyLevels[v.Name()]; !ok { // We _know_ that in this iteration, at least one view is found unassigned a dependency level. - // We return the first one. - return &ViewDependencyUnresolvedError{View: v.ViewName.Name.String()} + // We gather all the errors. + errs = errors.Join(errs, &ViewDependencyUnresolvedError{View: v.ViewName.Name.String()}) + // We still add it so it shows up in the output if that is used for anything. + s.sorted = append(s.sorted, v) } } } + // Validate views' referenced columns: do these columns actually exist in referenced tables/views? + if err := s.ValidateViewReferences(); err != nil { + errs = errors.Join(errs, err) + } + // Validate table definitions for _, t := range s.tables { if err := t.validate(); err != nil { - return err + return errors.Join(errs, err) } } colTypeEqualForForeignKey := func(a, b *sqlparser.ColumnType) bool { @@ -365,24 +369,24 @@ func (s *Schema) normalize() error { for i, col := range check.Source { coveredColumn, ok := tableColumns[col.Lowered()] if !ok { - return &InvalidColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), Column: col.String()} + return errors.Join(errs, &InvalidColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), Column: col.String()}) } referencedColumnName := check.ReferenceDefinition.ReferencedColumns[i].Lowered() referencedColumn, ok := referencedColumns[referencedColumnName] if !ok { - return &InvalidReferencedColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName} + return errors.Join(errs, &InvalidReferencedColumnInForeignKeyConstraintError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName}) } if !colTypeEqualForForeignKey(coveredColumn.Type, referencedColumn.Type) { - return &ForeignKeyColumnTypeMismatchError{Table: t.Name(), Constraint: cs.Name.String(), Column: coveredColumn.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName} + return errors.Join(errs, &ForeignKeyColumnTypeMismatchError{Table: t.Name(), Constraint: cs.Name.String(), Column: coveredColumn.Name.String(), ReferencedTable: referencedTableName, ReferencedColumn: referencedColumnName}) } } if !referencedTable.columnsCoveredByInOrderIndex(check.ReferenceDefinition.ReferencedColumns) { - return &MissingForeignKeyReferencedIndexError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName} + return errors.Join(errs, &MissingForeignKeyReferencedIndexError{Table: t.Name(), Constraint: cs.Name.String(), ReferencedTable: referencedTableName}) } } } - return nil + return errs } // Entities returns this schema's entities in good order (may be applied without error) @@ -441,13 +445,17 @@ func (s *Schema) ViewNames() []string { // Diff compares this schema with another schema, and sees what it takes to make this schema look // like the other. It returns a list of diffs. -func (s *Schema) Diff(other *Schema, hints *DiffHints) (diffs []EntityDiff, err error) { +func (s *Schema) diff(other *Schema, hints *DiffHints) (diffs []EntityDiff, err error) { // dropped entities var dropDiffs []EntityDiff for _, e := range s.Entities() { if _, ok := other.named[e.Name()]; !ok { // other schema does not have the entity - dropDiffs = append(dropDiffs, e.Drop()) + // Entities are sorted in foreign key CREATE TABLE valid order (create parents first, then children). + // When issuing DROPs, we want to reverse that order. We want to first frop children, then parents. + // Instead of analyzing all relationships again, we just reverse the entire order of DROPs, foreign key + // related or not. + dropDiffs = append([]EntityDiff{e.Drop()}, dropDiffs...) } } // We iterate by order of "other" schema because we need to construct queries that will be valid @@ -753,11 +761,248 @@ func (s *Schema) apply(diffs []EntityDiff) error { // The operation does not modify this object. Instead, if successful, a new (modified) Schema is returned. func (s *Schema) Apply(diffs []EntityDiff) (*Schema, error) { dup := s.copy() - for k, v := range s.named { - dup.named[k] = v - } if err := dup.apply(diffs); err != nil { return nil, err } return dup, nil } + +// SchemaDiff calulates a rich diff between this schema and the given schema. It builds on top of diff(): +// on top of the list of diffs that can take this schema into the given schema, this function also +// evaluates the dependencies between those diffs, if any, and the resulting SchemaDiff object offers OrderedDiffs(), +// the safe ordering of diffs that, when appleid sequentially, does not produce any conflicts and keeps schema valid +// at each step. +func (s *Schema) SchemaDiff(other *Schema, hints *DiffHints) (*SchemaDiff, error) { + diffs, err := s.diff(other, hints) + if err != nil { + return nil, err + } + schemaDiff := NewSchemaDiff(s) + schemaDiff.loadDiffs(diffs) + + // Utility function to see whether the given diff has dependencies on diffs that operate on any of the given named entities, + // and if so, record that dependency + checkDependencies := func(diff EntityDiff, dependentNames []string) (dependentDiffs []EntityDiff, relationsMade bool) { + for _, dependentName := range dependentNames { + dependentDiffs = schemaDiff.diffsByEntityName(dependentName) + for _, dependentDiff := range dependentDiffs { + // 'diff' refers to an entity (call it "e") that has changed. But here we find that one of the + // entities that "e" depends on, has also changed. + relationsMade = true + schemaDiff.addDep(diff, dependentDiff, DiffDependencyOrderUnknown) + } + } + return dependentDiffs, relationsMade + } + + for _, diff := range schemaDiff.UnorderedDiffs() { + switch diff := diff.(type) { + case *CreateViewEntityDiff: + checkDependencies(diff, getViewDependentTableNames(diff.createView)) + case *AlterViewEntityDiff: + checkDependencies(diff, getViewDependentTableNames(diff.from.CreateView)) + checkDependencies(diff, getViewDependentTableNames(diff.to.CreateView)) + case *DropViewEntityDiff: + checkDependencies(diff, getViewDependentTableNames(diff.from.CreateView)) + case *CreateTableEntityDiff: + checkDependencies(diff, getForeignKeyParentTableNames(diff.CreateTable())) + case *AlterTableEntityDiff: + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.AddConstraintDefinition: + // Only interested in adding a foreign key + fk, ok := node.ConstraintDefinition.Details.(*sqlparser.ForeignKeyDefinition) + if !ok { + return true, nil + } + // We add a foreign key. Normally that's fine, expect for a couple specific scenarios + parentTableName := fk.ReferenceDefinition.ReferencedTable.Name.String() + dependentDiffs, ok := checkDependencies(diff, []string{parentTableName}) + if !ok { + // No dependency. Not interesting + return true, nil + } + for _, parentDiff := range dependentDiffs { + switch parentDiff := parentDiff.(type) { + case *CreateTableEntityDiff: + // We add a foreign key constraint onto a new table... That table must therefore be first created, + // and only then can we proceed to add the FK + schemaDiff.addDep(diff, parentDiff, DiffDependencySequentialExecution) + case *AlterTableEntityDiff: + // The current diff is ALTER TABLE ... ADD FOREIGN KEY + // and the parent table also has an ALTER TABLE. + // so if the parent's ALTER in any way modifies the referenced FK columns, that's + // a sequential execution dependency + referencedColumnNames := map[string]bool{} + for _, referencedColumn := range fk.ReferenceDefinition.ReferencedColumns { + referencedColumnNames[referencedColumn.Lowered()] = true + } + // Walk parentDiff.Statement() + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ModifyColumn: + if referencedColumnNames[node.NewColDefinition.Name.Lowered()] { + schemaDiff.addDep(diff, parentDiff, DiffDependencySequentialExecution) + } + case *sqlparser.AddColumns: + for _, col := range node.Columns { + if referencedColumnNames[col.Name.Lowered()] { + schemaDiff.addDep(diff, parentDiff, DiffDependencySequentialExecution) + } + } + case *sqlparser.DropColumn: + if referencedColumnNames[node.Name.Name.Lowered()] { + schemaDiff.addDep(diff, parentDiff, DiffDependencySequentialExecution) + } + } + return true, nil + }, parentDiff.Statement()) + } + } + + case *sqlparser.DropKey: + if node.Type != sqlparser.ForeignKeyType { + // Not interesting + return true, nil + } + // Dropping a foreign key; we need to understand which table this foreign key used to reference. + // The DropKey statement itself only _names_ the constraint, but does not have information + // about the parent, columns, etc. So we need to find the constraint in the CreateTable statement. + for _, cs := range diff.from.CreateTable.TableSpec.Constraints { + if strings.EqualFold(cs.Name.String(), node.Name.String()) { + if check, ok := cs.Details.(*sqlparser.ForeignKeyDefinition); ok { + parentTableName := check.ReferenceDefinition.ReferencedTable.Name.String() + checkDependencies(diff, []string{parentTableName}) + } + } + } + } + + return true, nil + }, diff.Statement()) + case *DropTableEntityDiff: + // No need to handle. Any dependencies will be resolved by any of the other cases + } + } + return schemaDiff, nil +} + +func (s *Schema) ValidateViewReferences() error { + var errs error + schemaInformation := newDeclarativeSchemaInformation() + + // Remember that s.Entities() is already ordered by dependency. ie. tables first, then views + // that only depend on those tables (or on dual), then 2nd tier views, etc. + // Thus, the order of iteration below is valid and sufficient, to build + for _, e := range s.Entities() { + entityColumns, err := s.getEntityColumnNames(e.Name(), schemaInformation) + if err != nil { + errs = errors.Join(errs, err) + continue + } + schemaInformation.addTable(e.Name()) + for _, col := range entityColumns { + schemaInformation.addColumn(e.Name(), col.Lowered()) + } + } + + // Add dual table with no explicit columns for dual style expressions in views. + schemaInformation.addTable("dual") + + for _, view := range s.Views() { + sel := sqlparser.CloneSelectStatement(view.CreateView.Select) // Analyze(), below, rewrites the select; we don't want to actually modify the schema + _, err := semantics.AnalyzeStrict(sel, semanticKS.Name, schemaInformation) + formalizeErr := func(err error) error { + if err == nil { + return nil + } + switch e := err.(type) { + case *semantics.AmbiguousColumnError: + return &InvalidColumnReferencedInViewError{ + View: view.Name(), + Column: e.Column, + Ambiguous: true, + } + case *semantics.ColumnNotFoundError: + return &InvalidColumnReferencedInViewError{ + View: view.Name(), + Column: e.Column.Name.String(), + } + } + return err + } + errs = errors.Join(errs, formalizeErr(err)) + } + return errs +} + +// getEntityColumnNames returns the names of columns in given entity (either a table or a view) +func (s *Schema) getEntityColumnNames(entityName string, schemaInformation *declarativeSchemaInformation) ( + columnNames []*sqlparser.IdentifierCI, + err error, +) { + entity := s.Entity(entityName) + if entity == nil { + if strings.ToLower(entityName) == "dual" { + // this is fine. DUAL does not exist but is allowed + return nil, nil + } + return nil, &EntityNotFoundError{Name: entityName} + } + // The entity is either a table or a view + switch entity := entity.(type) { + case *CreateTableEntity: + return s.getTableColumnNames(entity), nil + case *CreateViewEntity: + return s.getViewColumnNames(entity, schemaInformation) + } + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected entity type for %v", entityName) +} + +// getTableColumnNames returns the names of columns in given table. +func (s *Schema) getTableColumnNames(t *CreateTableEntity) (columnNames []*sqlparser.IdentifierCI) { + for _, c := range t.TableSpec.Columns { + columnNames = append(columnNames, &c.Name) + } + return columnNames +} + +// getViewColumnNames returns the names of aliased columns returned by a given view. +func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *declarativeSchemaInformation) ( + columnNames []*sqlparser.IdentifierCI, + err error, +) { + for _, node := range v.Select.GetColumns() { + switch node := node.(type) { + case *sqlparser.StarExpr: + if tableName := node.TableName.Name.String(); tableName != "" { + for _, col := range schemaInformation.Tables[tableName].Columns { + name := sqlparser.CloneRefOfIdentifierCI(&col.Name) + columnNames = append(columnNames, name) + } + } else { + dependentNames := getViewDependentTableNames(v.CreateView) + // add all columns from all referenced tables and views + for _, entityName := range dependentNames { + if schemaInformation.Tables[entityName] != nil { // is nil for dual/DUAL + for _, col := range schemaInformation.Tables[entityName].Columns { + name := sqlparser.CloneRefOfIdentifierCI(&col.Name) + columnNames = append(columnNames, name) + } + } + } + } + if len(columnNames) == 0 { + return nil, &InvalidStarExprInViewError{View: v.Name()} + } + case *sqlparser.AliasedExpr: + ci := sqlparser.NewIdentifierCI(node.ColumnName()) + columnNames = append(columnNames, &ci) + } + } + + if err != nil { + return nil, err + } + return columnNames, nil +} diff --git a/go/vt/schemadiff/schema_diff.go b/go/vt/schemadiff/schema_diff.go new file mode 100644 index 00000000000..b6c539aea95 --- /dev/null +++ b/go/vt/schemadiff/schema_diff.go @@ -0,0 +1,278 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "fmt" + + "vitess.io/vitess/go/mathutil" +) + +type DiffDependencyType int + +// diff dependencies in increasing restriction severity +const ( + DiffDependencyNone DiffDependencyType = iota // not a dependency + DiffDependencyOrderUnknown + DiffDependencyInOrderCompletion + DiffDependencySequentialExecution +) + +// DiffDependency indicates a dependency between two diffs, and the type of that dependency +type DiffDependency struct { + diff EntityDiff + dependentDiff EntityDiff // depends on the above diff + typ DiffDependencyType +} + +// NewDiffDependency returns a new diff dependency pairing. +func NewDiffDependency(diff EntityDiff, dependentDiff EntityDiff, typ DiffDependencyType) *DiffDependency { + return &DiffDependency{ + diff: diff, + dependentDiff: dependentDiff, + typ: typ, + } +} + +func (d *DiffDependency) hashKey() string { + return d.diff.CanonicalStatementString() + "/" + d.dependentDiff.CanonicalStatementString() +} + +// Diff returns the "benefactor" diff, on which DependentDiff() depends on, ie, should run 1st. +func (d *DiffDependency) Diff() EntityDiff { + return d.diff +} + +// DependentDiff returns the diff that depends on the "benefactor" diff, ie must run 2nd +func (d *DiffDependency) DependentDiff() EntityDiff { + return d.dependentDiff +} + +// Type returns the dependency type. Types are numeric and comparable: the higher the value, the +// stricter, or more constrained, the dependency is. +func (d *DiffDependency) Type() DiffDependencyType { + return d.typ +} + +/* +The below is adapted from https://yourbasic.org/golang/generate-permutation-slice-string/ +Licensed under https://creativecommons.org/licenses/by/3.0/ +Modified to have an early break +*/ + +// permutateDiffs calls `callback` with each permutation of a. If the function returns `true`, that means +// the callback has returned `true` for an early break, thus possibly not all permutations have been evaluated. +func permutateDiffs(a []EntityDiff, callback func([]EntityDiff) (earlyBreak bool)) (earlyBreak bool) { + if len(a) == 0 { + return false + } + return permDiff(a, callback, 0) +} + +// permDiff is a recursive function to permutate given `a` and call `callback` for each permutation. +// If `callback` returns `true`, then so does this function, and this indicates a request for an early +// break, in which case this function will not be called again. +func permDiff(a []EntityDiff, callback func([]EntityDiff) (earlyBreak bool), i int) (earlyBreak bool) { + if i > len(a) { + return callback(a) + } + if permDiff(a, callback, i+1) { + return true + } + for j := i + 1; j < len(a); j++ { + a[i], a[j] = a[j], a[i] + if permDiff(a, callback, i+1) { + return true + } + a[i], a[j] = a[j], a[i] + } + return false +} + +// SchemaDiff is a rich diff between two schemas. It includes the following: +// - The source schema (on which the diff would operate) +// - A list of SQL diffs (e.g. CREATE VIEW, ALTER TABLE, ...) +// - A map of dependencies between the diffs +// Operations on SchemaDiff are not concurrency-safe. +type SchemaDiff struct { + schema *Schema + diffs []EntityDiff + + diffMap map[string]EntityDiff // key is diff's CanonicalStatementString() + dependencies map[string]*DiffDependency + + r *mathutil.EquivalenceRelation // internal structure to help determine diffs +} + +func NewSchemaDiff(schema *Schema) *SchemaDiff { + return &SchemaDiff{ + schema: schema, + dependencies: make(map[string]*DiffDependency), + diffMap: make(map[string]EntityDiff), + r: mathutil.NewEquivalenceRelation(), + } +} + +// loadDiffs loads a list of diffs, as generated by Schema.Diff(other) function. It explodes all subsequent diffs +// into distinct diffs (which then have no subsequent diffs). Thus, the list of diffs loaded can be longer than the +// list of diffs received. +func (d *SchemaDiff) loadDiffs(diffs []EntityDiff) { + for _, diff := range diffs { + allSubsequent := AllSubsequent(diff) + for i, sdiff := range allSubsequent { + d.diffs = append(d.diffs, sdiff) + d.diffMap[sdiff.CanonicalStatementString()] = sdiff + if i > 0 { + // So this is a 2nd, 3rd etc. diff operating on same table + // Two migrations on same entity (table in our case) must run sequentially. + d.addDep(sdiff, allSubsequent[0], DiffDependencySequentialExecution) + } + d.r.Add(sdiff.CanonicalStatementString()) + // since we've exploded the subsequent diffs, we now clear any subsequent diffs + // so that they do not auto-Apply() when we compute a valid path. + sdiff.SetSubsequentDiff(nil) + } + } +} + +// addDep adds a dependency: `dependentDiff` depends on `diff`, with given `depType`. If there's an +// already existing dependency between the two diffs, then we compare the dependency type; if the new +// type has a higher order (ie stricter) then we replace the existing dependency with the new one. +func (d *SchemaDiff) addDep(diff EntityDiff, dependentDiff EntityDiff, typ DiffDependencyType) *DiffDependency { + _, _ = d.r.Relate(diff.CanonicalStatementString(), dependentDiff.CanonicalStatementString()) + diffDep := NewDiffDependency(diff, dependentDiff, typ) + if existingDep, ok := d.dependencies[diffDep.hashKey()]; ok { + if existingDep.typ >= diffDep.typ { + // nothing new here, the new dependency is weaker or equals to an existing dependency + return existingDep + } + } + // Either the dep wasn't found, or we've just introduced a dep with a more severe type + d.dependencies[diffDep.hashKey()] = diffDep + return diffDep +} + +// diffByStatementString is a utility function that returns a diff by its canonical statement string +func (d *SchemaDiff) diffByStatementString(s string) (EntityDiff, bool) { + diff, ok := d.diffMap[s] + return diff, ok +} + +// diffsByEntityName returns all diffs that apply to a given entity (table/view) +func (d *SchemaDiff) diffsByEntityName(name string) (diffs []EntityDiff) { + for _, diff := range d.diffs { + if diff.EntityName() == name { + diffs = append(diffs, diff) + } + } + return diffs +} + +// Empty returns 'true' when there are no diff entries +func (d *SchemaDiff) Empty() bool { + return len(d.diffs) == 0 +} + +// UnorderedDiffs returns all the diffs. These are not sorted by dependencies. These are basically +// the original diffs, but "flattening" any subsequent diffs they may have. as result: +// - Diffs in the returned slice have no subsequent diffs +// - The returned slice may be longer than the number of diffs supplied by loadDiffs() +func (d *SchemaDiff) UnorderedDiffs() []EntityDiff { + return d.diffs +} + +// AllDependenciess returns all known dependencies +func (d *SchemaDiff) AllDependenciess() (deps []*DiffDependency) { + for _, dep := range d.dependencies { + deps = append(deps, dep) + } + return deps +} + +// HasDependencies returns `true` if there is at least one known diff dependency. +// If this function returns `false` then that means there is no restriction whatsoever to the order of diffs. +func (d *SchemaDiff) HasDependencies() bool { + return len(d.dependencies) > 0 +} + +// AllSequentialExecutionDependencies returns all diffs that are of "sequential execution" type. +func (d *SchemaDiff) AllSequentialExecutionDependencies() (deps []*DiffDependency) { + for _, dep := range d.dependencies { + if dep.typ >= DiffDependencySequentialExecution { + deps = append(deps, dep) + } + } + return deps +} + +// HasSequentialExecutionDependencies return `true` if there is at least one "subsequential execution" type diff. +// If not, that means all diffs can be applied in parallel. +func (d *SchemaDiff) HasSequentialExecutionDependencies() bool { + for _, dep := range d.dependencies { + if dep.typ >= DiffDependencySequentialExecution { + return true + } + } + return false +} + +// OrderedDiffs returns the list of diff in applicable order, if possible. This is a linearized representation +// where diffs may be applied in-order one after another, keeping the schema in valid state at all times. +func (d *SchemaDiff) OrderedDiffs() ([]EntityDiff, error) { + lastGoodSchema := d.schema + var orderedDiffs []EntityDiff + m := d.r.Map() + // The order of classes in the quivalence relation is, generally speaking, loyal to the order of original diffs. + for _, class := range d.r.OrderedClasses() { + classDiffs := []EntityDiff{} + // Which diffs are in this equivalence class? + for _, statementString := range m[class] { + diff, ok := d.diffByStatementString(statementString) + if !ok { + return nil, fmt.Errorf("unexpected error: cannot find diff: %v", statementString) + } + classDiffs = append(classDiffs, diff) + } + // We will now permutate the diffs in this equivalence class, and hopefully find + // a valid permutation (one where if we apply the diffs in-order, the schema remains valid throughout the process) + foundValidPathForClass := permutateDiffs(classDiffs, func(permutatedDiffs []EntityDiff) bool { + permutationSchema := lastGoodSchema + // We want to apply the changes one by one, and validate the schema after each change + var err error + for i := range permutatedDiffs { + permutationSchema, err = permutationSchema.Apply(permutatedDiffs[i : i+1]) + if err != nil { + // permutation is invalid + return false // continue searching + } + } + // Good news, we managed to apply all of the permutations! + orderedDiffs = append(orderedDiffs, permutatedDiffs...) + lastGoodSchema = permutationSchema + return true // early break! No need to keep searching + }) + if !foundValidPathForClass { + // In this equivalence class, there is no valid permutation. We cannot linearize the diffs. + return nil, &ImpossibleApplyDiffOrderError{ + UnorderedDiffs: d.UnorderedDiffs(), + ConflictingDiffs: classDiffs, + } + } + // Done taking care of this equivalence class. + } + return orderedDiffs, nil +} diff --git a/go/vt/schemadiff/schema_diff_test.go b/go/vt/schemadiff/schema_diff_test.go new file mode 100644 index 00000000000..670e84c6f1a --- /dev/null +++ b/go/vt/schemadiff/schema_diff_test.go @@ -0,0 +1,718 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPermutations(t *testing.T) { + tt := []struct { + name string + fromQueries []string + toQueries []string + expectDiffs int + expectPermutations int + }{ + { + name: "no diff", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + }, + expectDiffs: 0, + expectPermutations: 0, + }, + { + name: "single diff", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null, i int);", + }, + expectDiffs: 1, + expectPermutations: 1, + }, + { + name: "two diffs", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create view v1 as select id from t1", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null, i int);", + "create view v1 as select id, info from t1", + }, + expectDiffs: 2, + expectPermutations: 2, + }, + { + name: "multiple diffs", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null, i int);", + "create table t2 (id int primary key);", + "create view v1 as select id, info from t1", + "create view v2 as select id from t2", + }, + expectDiffs: 4, + expectPermutations: 24, + }, + } + hints := &DiffHints{RangeRotationStrategy: RangeRotationDistinctStatements} + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + + fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + require.NoError(t, err) + require.NotNil(t, fromSchema) + + toSchema, err := NewSchemaFromQueries(tc.toQueries) + require.NoError(t, err) + require.NotNil(t, toSchema) + + schemaDiff, err := fromSchema.SchemaDiff(toSchema, hints) + require.NoError(t, err) + + allDiffs := schemaDiff.UnorderedDiffs() + require.Equal(t, tc.expectDiffs, len(allDiffs)) + + toSingleString := func(diffs []EntityDiff) string { + res := "" + for _, diff := range diffs { + res = res + diff.CanonicalStatementString() + ";" + } + return res + } + t.Run("no early break", func(t *testing.T) { + iteration := 0 + allPerms := map[string]bool{} + allDiffs := schemaDiff.UnorderedDiffs() + originalSingleString := toSingleString(allDiffs) + earlyBreak := permutateDiffs(allDiffs, func(pdiffs []EntityDiff) (earlyBreak bool) { + // cover all permutations + allPerms[toSingleString(pdiffs)] = true + if iteration == 0 { + // First permutation should be the same as original + require.Equal(t, originalSingleString, toSingleString(pdiffs)) + } else { + // rest of permutations must be different than original (later we also verify they are all unique) + require.NotEqualf(t, originalSingleString, toSingleString(pdiffs), "in iteration %d", iteration) + } + iteration++ + return false + }) + assert.False(t, earlyBreak) + assert.Equal(t, tc.expectPermutations, len(allPerms)) + }) + t.Run("early break", func(t *testing.T) { + allPerms := map[string]bool{} + allDiffs := schemaDiff.UnorderedDiffs() + originalSingleString := toSingleString(allDiffs) + earlyBreak := permutateDiffs(allDiffs, func(pdiffs []EntityDiff) (earlyBreak bool) { + // Single visit + allPerms[toSingleString(pdiffs)] = true + // First permutation should be the same as original + require.Equal(t, originalSingleString, toSingleString(pdiffs)) + // early break; this callback function should not be invoked again + return true + }) + if len(allDiffs) > 0 { + assert.True(t, earlyBreak) + assert.Equal(t, 1, len(allPerms)) + } else { + // no diffs means no permutations, and no call to the callback function + assert.False(t, earlyBreak) + assert.Equal(t, 0, len(allPerms)) + } + }) + }) + } +} + +func TestSchemaDiff(t *testing.T) { + var ( + createQueries = []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + } + ) + tt := []struct { + name string + fromQueries []string + toQueries []string + expectDiffs int + expectDeps int + sequential bool + conflictingDiffs int + entityOrder []string // names of tables/views in expected diff order + }{ + { + name: "no change", + toQueries: createQueries, + entityOrder: []string{}, + }, + { + name: "three unrelated changes", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, ts timestamp);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + "create view v2 as select 1 from dual", + }, + expectDiffs: 3, + entityOrder: []string{"t1", "t2", "v2"}, + }, + { + name: "three unrelated changes 2", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v2 as select 1 from dual", + }, + expectDiffs: 3, + entityOrder: []string{"v1", "t2", "v2"}, + }, + // Subsequent + { + name: "add one fulltext key", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar, fulltext key ftk1 (v));", + "create view v1 as select id from t1", + }, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + }, + { + // MySQL limitation: you cannot add two FULLTEXT keys in a single statement. `schemadiff` complies + // with that limitation and turns such a request into two distinct statements. + name: "add two fulltext keys", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar, fulltext key ftk1 (v), fulltext key ftk2 (v));", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t2"}, + }, + { + name: "add partition", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1));", + "create view v1 as select id from t1", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1), partition p2 values less than (2));", + "create view v1 as select id from t1", + }, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + }, + { + // In MySQL, you cannot ALTER TABLE ADD COLUMN ..., ADD PARTITION in a single statement + name: "add column, add partition", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1));", + "create view v1 as select id from t1", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1), partition p2 values less than (2));", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t2"}, + }, + { + name: "add view", + toQueries: append( + createQueries, + "create view v2 as select id from t2", + ), + expectDiffs: 1, + entityOrder: []string{"v2"}, + }, + { + name: "add view, alter table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + "create view v2 as select id from t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + }, + { + name: "alter view, alter table", + toQueries: []string{ + "create table t1 (the_id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select the_id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "v1"}, + conflictingDiffs: 2, + }, + { + name: "alter table, add view", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + "create view v2 as select id, v from t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + }, + { + name: "create view depending on 2 tables, alter table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + "create view v2 as select info, v from t1, t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + }, + { + name: "create view depending on 2 tables, alter other table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, dt datetime);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + // "create view v2 as select id from t1", + "create view v2 as select info, ts from t1, t2", + // "create view v2 as select info, ts from t1, t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "v2"}, + }, + { + name: "create view depending on 2 tables, alter both tables", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, dt datetime);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + // "create view v2 as select id from t1", + "create view v2 as select info, ts from t1, t2", + // "create view v2 as select info, ts from t1, t2", + }, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t1", "v2", "t2"}, + }, + { + name: "alter view depending on 2 tables, uses new column, alter tables", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, dt datetime);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select id from t1", + "create view v2 as select info, v from t1, t2", + }, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t1", "t2", "v2"}, + }, + { + name: "drop view", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + }, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"v1"}, + }, + { + name: "drop view, alter dependent table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, dt datetime);", + "create table t2 (id int primary key, ts timestamp);", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + }, + { + name: "drop view, drop dependent table", + toQueries: []string{ + "create table t2 (id int primary key, ts timestamp);", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + }, + { + name: "drop view, drop unrelated table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + }, + expectDiffs: 2, + expectDeps: 0, + entityOrder: []string{"v1", "t2"}, + }, + { + name: "alter view, drop table", + toQueries: []string{ + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + }, + { + name: "alter view, add view", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id, info from t1", + "create view v2 as select info from v1", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "v2"}, + }, + { + name: "alter view, add view, 2", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id, ts from v2", + "create view v2 as select id, ts from t2", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v2", "v1"}, + }, + { + name: "alter table, alter view, add view", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, v varchar);", + "create view v1 as select ts from t2", + "create view v2 as select v from t2", + }, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t2", "v1", "v2"}, + }, + { + name: "alter table, alter view, impossible sequence", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create view v1 as select id, info from t1", + }, + toQueries: []string{ + "create table t1 (id int primary key, newcol int not null);", + "create view v1 as select id, newcol from t1", + }, + expectDiffs: 2, + expectDeps: 1, + conflictingDiffs: 2, + }, + + // FKs + { + name: "create table with fk", + toQueries: append( + createQueries, + "create table t3 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + ), + expectDiffs: 1, + entityOrder: []string{"t3"}, + }, + { + name: "create two table with fk", + toQueries: append( + createQueries, + "create table tp (id int primary key, info int not null);", + "create table t3 (id int primary key, ts timestamp, tp_id int, foreign key (tp_id) references tp (id) on delete no action);", + ), + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"tp", "t3"}, + }, + { + name: "add FK", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + }, + { + name: "add FK pointing to new table", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, tp_id int, foreign key (tp_id) references tp (id) on delete no action);", + "create table tp (id int primary key, info int not null);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"tp", "t2"}, + }, + { + name: "add FK, unrelated alter", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, key info_idx(info));", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + }, + { + name: "add FK, add unrelated column", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, dt datetime);", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + }, + { + name: "add FK, alter unrelated column", + toQueries: []string{ + "create table t1 (id int primary key, info bigint not null);", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + }, + { + name: "add FK, alter referenced column", + toQueries: []string{ + "create table t1 (id bigint primary key, info bigint not null);", + "create table t2 (id int primary key, ts timestamp, t1_id bigint, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t2"}, + }, + { + name: "add column. create FK table referencing new column", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, p int, key p_idx (p));", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + "create table t3 (id int primary key, ts timestamp, t1_p int, foreign key (t1_p) references t1 (p) on delete no action);", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t3"}, + }, + { + name: "add column. add FK referencing new column", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, p int, key p_idx (p));", + "create table t2 (id int primary key, ts timestamp, t1_p int, foreign key (t1_p) references t1 (p) on delete no action);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t2"}, + }, + { + name: "add column. add FK referencing new column, alphabetically desc", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, t2_p int, foreign key (t2_p) references t2 (p) on delete no action);", + "create table t2 (id int primary key, ts timestamp, p int, key p_idx (p));", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t1"}, + }, + { + name: "drop fk", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + "create view v1 as select id from t1", + }, + toQueries: createQueries, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + }, + { + name: "drop fk, drop table", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", + }, + toQueries: []string{ + "create table t2 (id int primary key, ts timestamp, t1_id int);", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "t1"}, + }, + { + name: "drop fk, drop column", + fromQueries: []string{ + "create table t1 (id int primary key, info int not null, p int, key p_idx (p));", + "create table t2 (id int primary key, ts timestamp, t1_p int, foreign key (t1_p) references t1 (p) on delete no action);", + }, + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, t1_p int);", + }, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "t1"}, + }, + { + name: "reverse fk", + fromQueries: []string{ + "create table t1 (id int primary key, p int, key p_idx (p));", + "create table t2 (id int primary key, p int, key p_idx (p), foreign key (p) references t1 (p) on delete no action);", + }, + toQueries: []string{ + "create table t1 (id int primary key, p int, key p_idx (p), foreign key (p) references t2 (p) on delete no action);", + "create table t2 (id int primary key, p int, key p_idx (p));", + }, + expectDiffs: 2, + expectDeps: 2, + entityOrder: []string{"t2", "t1"}, + }, + { + name: "add and drop FK, add and drop column, impossible order", + fromQueries: []string{ + "create table t1 (id int primary key, p int, key p_idx (p));", + "create table t2 (id int primary key, p int, key p_idx (p), foreign key (p) references t1 (p) on delete no action);", + }, + toQueries: []string{ + "create table t1 (id int primary key, q int, key q_idx (q));", + "create table t2 (id int primary key, q int, key q_idx (q), foreign key (q) references t1 (q) on delete no action);", + }, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + conflictingDiffs: 2, + }, + } + hints := &DiffHints{RangeRotationStrategy: RangeRotationDistinctStatements} + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + if tc.fromQueries == nil { + tc.fromQueries = createQueries + } + fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + require.NoError(t, err) + require.NotNil(t, fromSchema) + + toSchema, err := NewSchemaFromQueries(tc.toQueries) + require.NoError(t, err) + require.NotNil(t, toSchema) + + schemaDiff, err := fromSchema.SchemaDiff(toSchema, hints) + require.NoError(t, err) + + allDiffs := schemaDiff.UnorderedDiffs() + allDiffsStatements := []string{} + for _, diff := range allDiffs { + allDiffsStatements = append(allDiffsStatements, diff.CanonicalStatementString()) + } + assert.Equalf(t, tc.expectDiffs, len(allDiffs), "found diffs: %v", allDiffsStatements) + + deps := schemaDiff.AllDependenciess() + depsKeys := []string{} + for _, dep := range deps { + depsKeys = append(depsKeys, dep.hashKey()) + } + assert.Equalf(t, tc.expectDeps, len(deps), "found deps: %v", depsKeys) + assert.Equal(t, tc.sequential, schemaDiff.HasSequentialExecutionDependencies()) + + orderedDiffs, err := schemaDiff.OrderedDiffs() + if tc.conflictingDiffs > 0 { + require.Greater(t, tc.conflictingDiffs, 1) // self integrity. If there's a conflict, then obviously there's at least two conflicting diffs (a single diff has nothing to conflict with) + assert.Error(t, err) + impossibleOrderErr, ok := err.(*ImpossibleApplyDiffOrderError) + assert.True(t, ok) + assert.Equal(t, tc.conflictingDiffs, len(impossibleOrderErr.ConflictingDiffs)) + } else { + require.NoError(t, err) + } + diffStatementStrings := []string{} + for _, diff := range orderedDiffs { + diffStatementStrings = append(diffStatementStrings, diff.CanonicalStatementString()) + } + if tc.conflictingDiffs == 0 { + // validate that the order of diffs is as expected (we don't check for the full diff statement, + // just for the order of affected tables/views) + require.NotNil(t, tc.entityOrder) // making sure we explicitly specified expected order + assert.Equalf(t, len(tc.entityOrder), len(orderedDiffs), "expected %d diffs/entities per %v", len(tc.entityOrder), tc.entityOrder) + diffEntities := []string{} + for _, diff := range orderedDiffs { + diffEntities = append(diffEntities, diff.EntityName()) + } + assert.Equalf(t, tc.entityOrder, diffEntities, "diffs: %v", strings.Join(diffStatementStrings, ";\n")) + } + for _, diff := range orderedDiffs { + s := diff.CanonicalStatementString() + // Internal integrity, while we're here: see that the equivalence relation has entries for all diffs. + _, err := schemaDiff.r.ElementClass(s) + require.NoError(t, err) + } + }) + } +} diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index 1a24b862b1c..79bf44117e2 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -17,16 +17,22 @@ limitations under the License. package schemadiff import ( + "fmt" + "math/rand" + "sort" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/errors" + "vitess.io/vitess/go/vt/sqlparser" ) -var createQueries = []string{ +var schemaTestCreateQueries = []string{ "create view v5 as select * from t1, (select * from v3) as some_alias", "create table t3(id int, type enum('foo', 'bar') NOT NULL DEFAULT 'foo')", "create table t1(id int)", @@ -36,12 +42,12 @@ var createQueries = []string{ "create table t5(id int)", "create view v2 as select * from v3, t2", "create view v1 as select * from v3", - "create view v3 as select * from t3 as t3", + "create view v3 as select *, id+1 as id_plus, id+2 from t3 as t3", "create view v0 as select 1 from DUAL", "create view v9 as select 1", } -var expectSortedNames = []string{ +var schemaTestExpectSortedNames = []string{ "t1", "t2", "t3", @@ -56,14 +62,14 @@ var expectSortedNames = []string{ "v6", // level 3 } -var expectSortedTableNames = []string{ +var schemaTestExpectSortedTableNames = []string{ "t1", "t2", "t3", "t5", } -var expectSortedViewNames = []string{ +var schemaTestExpectSortedViewNames = []string{ "v0", // level 1 ("dual" is an implicit table) "v3", // level 1 "v9", // level 1 (no source table) @@ -74,31 +80,31 @@ var expectSortedViewNames = []string{ "v6", // level 3 } -var toSQL = "CREATE TABLE `t1` (\n\t`id` int\n);\nCREATE TABLE `t2` (\n\t`id` int\n);\nCREATE TABLE `t3` (\n\t`id` int,\n\t`type` enum('foo', 'bar') NOT NULL DEFAULT 'foo'\n);\nCREATE TABLE `t5` (\n\t`id` int\n);\nCREATE VIEW `v0` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v3` AS SELECT * FROM `t3` AS `t3`;\nCREATE VIEW `v9` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v1` AS SELECT * FROM `v3`;\nCREATE VIEW `v2` AS SELECT * FROM `v3`, `t2`;\nCREATE VIEW `v4` AS SELECT * FROM `t2` AS `something_else`, `v3`;\nCREATE VIEW `v5` AS SELECT * FROM `t1`, (SELECT * FROM `v3`) AS `some_alias`;\nCREATE VIEW `v6` AS SELECT * FROM `v4`;\n" +var schemaTestToSQL = "CREATE TABLE `t1` (\n\t`id` int\n);\nCREATE TABLE `t2` (\n\t`id` int\n);\nCREATE TABLE `t3` (\n\t`id` int,\n\t`type` enum('foo', 'bar') NOT NULL DEFAULT 'foo'\n);\nCREATE TABLE `t5` (\n\t`id` int\n);\nCREATE VIEW `v0` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v3` AS SELECT *, `id` + 1 AS `id_plus`, `id` + 2 FROM `t3` AS `t3`;\nCREATE VIEW `v9` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v1` AS SELECT * FROM `v3`;\nCREATE VIEW `v2` AS SELECT * FROM `v3`, `t2`;\nCREATE VIEW `v4` AS SELECT * FROM `t2` AS `something_else`, `v3`;\nCREATE VIEW `v5` AS SELECT * FROM `t1`, (SELECT * FROM `v3`) AS `some_alias`;\nCREATE VIEW `v6` AS SELECT * FROM `v4`;\n" func TestNewSchemaFromQueries(t *testing.T) { - schema, err := NewSchemaFromQueries(createQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries) assert.NoError(t, err) - assert.NotNil(t, schema) + require.NotNil(t, schema) - assert.Equal(t, expectSortedNames, schema.EntityNames()) - assert.Equal(t, expectSortedTableNames, schema.TableNames()) - assert.Equal(t, expectSortedViewNames, schema.ViewNames()) + assert.Equal(t, schemaTestExpectSortedNames, schema.EntityNames()) + assert.Equal(t, schemaTestExpectSortedTableNames, schema.TableNames()) + assert.Equal(t, schemaTestExpectSortedViewNames, schema.ViewNames()) } func TestNewSchemaFromSQL(t *testing.T) { - schema, err := NewSchemaFromSQL(strings.Join(createQueries, ";")) + schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";")) assert.NoError(t, err) - assert.NotNil(t, schema) + require.NotNil(t, schema) - assert.Equal(t, expectSortedNames, schema.EntityNames()) - assert.Equal(t, expectSortedTableNames, schema.TableNames()) - assert.Equal(t, expectSortedViewNames, schema.ViewNames()) + assert.Equal(t, schemaTestExpectSortedNames, schema.EntityNames()) + assert.Equal(t, schemaTestExpectSortedTableNames, schema.TableNames()) + assert.Equal(t, schemaTestExpectSortedViewNames, schema.ViewNames()) } func TestNewSchemaFromQueriesWithDuplicate(t *testing.T) { // v2 already exists - queries := append(createQueries, + queries := append(schemaTestCreateQueries, "create view v2 as select * from v1, t2", ) _, err := NewSchemaFromQueries(queries) @@ -108,17 +114,20 @@ func TestNewSchemaFromQueriesWithDuplicate(t *testing.T) { func TestNewSchemaFromQueriesUnresolved(t *testing.T) { // v8 does not exist - queries := append(createQueries, + queries := append(schemaTestCreateQueries, "create view v7 as select * from v8, t2", ) - _, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(queries) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) + v := schema.sorted[len(schema.sorted)-1] + assert.IsType(t, &CreateViewEntity{}, v) + assert.Equal(t, "CREATE VIEW `v7` AS SELECT * FROM `v8`, `t2`", v.Create().CanonicalStatementString()) } func TestNewSchemaFromQueriesUnresolvedAlias(t *testing.T) { // v8 does not exist - queries := append(createQueries, + queries := append(schemaTestCreateQueries, "create view v7 as select * from something_else as t1, t2", ) _, err := NewSchemaFromQueries(queries) @@ -146,28 +155,29 @@ func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) { func TestNewSchemaFromQueriesLoop(t *testing.T) { // v7 and v8 depend on each other - queries := append(createQueries, + queries := append(schemaTestCreateQueries, "create view v7 as select * from v8, t2", "create view v8 as select * from t1, v7", ) _, err := NewSchemaFromQueries(queries) - assert.Error(t, err) + require.Error(t, err) + err = errors.UnwrapFirst(err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } func TestToSQL(t *testing.T) { - schema, err := NewSchemaFromQueries(createQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries) assert.NoError(t, err) - assert.NotNil(t, schema) + require.NotNil(t, schema) sql := schema.ToSQL() - assert.Equal(t, toSQL, sql) + assert.Equal(t, schemaTestToSQL, sql) } func TestCopy(t *testing.T) { - schema, err := NewSchemaFromQueries(createQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries) assert.NoError(t, err) - assert.NotNil(t, schema) + require.NotNil(t, schema) schemaClone := schema.copy() assert.Equal(t, schema, schemaClone) @@ -217,8 +227,7 @@ func TestGetViewDependentTableNames(t *testing.T) { createView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) - tables, err := getViewDependentTableNames(createView) - assert.NoError(t, err) + tables := getViewDependentTableNames(createView) assert.Equal(t, ts.tables, tables) }) } @@ -258,8 +267,7 @@ func TestGetForeignKeyParentTableNames(t *testing.T) { createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - tables, err := getForeignKeyParentTableNames(createTable) - assert.NoError(t, err) + tables := getForeignKeyParentTableNames(createTable) assert.Equal(t, ts.tables, tables) }) } @@ -398,3 +406,420 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { assert.EqualError(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error()) } } + +func TestGetEntityColumnNames(t *testing.T) { + var queries = []string{ + "create table t1(id int, state int, some char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select id as id from t1", + "create view v2 as select 3+1 as `id`, state as state, some as thing from t1", + "create view v3 as select `id` as `id`, state as state, thing as another from v2", + "create view v4 as select 1 as `ok` from dual", + "create view v5 as select 1 as `ok` from DUAL", + "create view v6 as select ok as `ok` from v5", + "create view v7 as select * from t1", + "create view v8 as select * from v7", + "create view v9 as select * from v8, v6", + "create view va as select * from v6, v8", + "create view vb as select *, now() from v8", + } + + schema, err := NewSchemaFromQueries(queries) + require.NoError(t, err) + require.NotNil(t, schema) + + expectedColNames := map[string]([]string){ + "t1": []string{"id", "state", "some"}, + "t2": []string{"id", "c"}, + "v1": []string{"id"}, + "v2": []string{"id", "state", "thing"}, + "v3": []string{"id", "state", "another"}, + "v4": []string{"ok"}, + "v5": []string{"ok"}, + "v6": []string{"ok"}, + "v7": []string{"id", "state", "some"}, + "v8": []string{"id", "state", "some"}, + "v9": []string{"id", "state", "some", "ok"}, + "va": []string{"ok", "id", "state", "some"}, + "vb": []string{"id", "state", "some", "now()"}, + } + entities := schema.Entities() + require.Equal(t, len(entities), len(expectedColNames)) + + tcmap := newDeclarativeSchemaInformation() + // we test by order of dependency: + for _, e := range entities { + tbl := e.Name() + t.Run(tbl, func(t *testing.T) { + identifiers, err := schema.getEntityColumnNames(tbl, tcmap) + assert.NoError(t, err) + names := []string{} + for _, ident := range identifiers { + names = append(names, ident.String()) + } + // compare columns. We disregard order. + expectNames := expectedColNames[tbl][:] + sort.Strings(names) + sort.Strings(expectNames) + assert.Equal(t, expectNames, names) + // emulate the logic that fills known columns for known entities: + tcmap.addTable(tbl) + for _, name := range names { + tcmap.addColumn(tbl, name) + } + }) + } +} + +func TestViewReferences(t *testing.T) { + tt := []struct { + name string + queries []string + expectErr error + }{ + { + name: "valid", + queries: []string{ + "create table t1(id int, state int, some char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select id as id from t1", + "create view v2 as select 3+1 as `id`, state as state, some as thing from t1", + "create view v3 as select `id` as `id`, state as state, thing as another from v2", + "create view v4 as select 1 as `ok` from dual", + "create view v5 as select 1 as `ok` from DUAL", + "create view v6 as select ok as `ok` from v5", + }, + }, + { + name: "valid WHERE", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select c from t1 where id=3", + }, + }, + { + name: "invalid unqualified referenced column", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select unexpected from t1", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v1", Column: "unexpected"}, + }, + { + name: "invalid unqualified referenced column in where clause", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select 1 from t1 where unexpected=3", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v1", Column: "unexpected"}, + }, + { + name: "valid qualified", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select t1.c from t1 where t1.id=3", + }, + }, + { + name: "valid qualified, multiple tables", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select t1.c from t1, t2 where t2.id=3", + }, + }, + { + name: "invalid unqualified, multiple tables", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select c from t1, t2 where t2.id=3", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v1", Column: "c", Ambiguous: true}, + }, + { + name: "invalid unqualified in WHERE clause, multiple tables", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5))", + "create view v1 as select t2.c from t1, t2 where id=3", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v1", Column: "id", Ambiguous: true}, + }, + { + name: "valid unqualified, multiple tables", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5), only_in_t2 int)", + "create view v1 as select only_in_t2 from t1, t2 where t1.id=3", + }, + }, + { + name: "valid unqualified in WHERE clause, multiple tables", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, c char(5), only_in_t2 int)", + "create view v1 as select t1.id from t1, t2 where only_in_t2=3", + }, + }, + { + name: "valid cascaded views", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id, c from t1 where id > 0", + "create view v2 as select * from v1 where id > 0", + }, + }, + { + name: "valid cascaded views, column aliases", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id, c as ch from t1 where id > 0", + "create view v2 as select ch from v1 where id > 0", + }, + }, + { + name: "valid cascaded views, column aliases in WHERE", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id as counter, c as ch from t1 where id > 0", + "create view v2 as select ch from v1 where counter > 0", + }, + }, + { + name: "valid cascaded views, aliased expression", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id+1 as counter, c as ch from t1 where id > 0", + "create view v2 as select ch from v1 where counter > 0", + }, + }, + { + name: "valid cascaded views, non aliased expression", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id+1, c as ch from t1 where id > 0", + "create view v2 as select ch from v1 where `id + 1` > 0", + }, + }, + { + name: "cascaded views, invalid column aliases", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id, c as ch from t1 where id > 0", + "create view v2 as select c from v1 where id > 0", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v2", Column: "c"}, + }, + { + name: "cascaded views, column not in view", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id from t1 where c='x'", + "create view v2 as select c from v1 where id > 0", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v2", Column: "c"}, + }, + { + name: "complex cascade", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, n int, info int)", + "create view v1 as select id, c as ch from t1 where id > 0", + "create view v2 as select n as num, info from t2", + "create view v3 as select num, v1.id, ch from v1 join v2 on v1.id = v2.num where info > 5", + }, + }, + { + name: "valid dual", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select 1 from dual", + }, + }, + { + name: "invalid dual column", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select id from dual", + }, + expectErr: &InvalidColumnReferencedInViewError{View: "v1", Column: "id"}, + }, + { + name: "invalid dual star", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select * from dual", + }, + expectErr: &InvalidStarExprInViewError{View: "v1"}, + }, + { + name: "valid star", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select * from t1 where id > 0", + }, + }, + { + name: "valid star, cascaded", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create view v1 as select t1.* from t1 where id > 0", + "create view v2 as select * from v1 where id > 0", + }, + }, + { + name: "valid star, two tables, cascaded", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, ts timestamp)", + "create view v1 as select t1.* from t1, t2 where t1.id > 0", + "create view v2 as select * from v1 where c > 0", + }, + }, + { + name: "valid two star, two tables, cascaded", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, ts timestamp)", + "create view v1 as select t1.*, t2.* from t1, t2 where t1.id > 0", + "create view v2 as select * from v1 where c > 0 and ts is not null", + }, + }, + { + name: "valid unqualified star, cascaded", + queries: []string{ + "create table t1(id int primary key, c char(5))", + "create table t2(id int primary key, ts timestamp)", + "create view v1 as select * from t1, t2 where t1.id > 0", + "create view v2 as select * from v1 where c > 0 and ts is not null", + }, + }, + } + for _, ts := range tt { + t.Run(ts.name, func(t *testing.T) { + schema, err := NewSchemaFromQueries(ts.queries) + if ts.expectErr == nil { + require.NoError(t, err) + require.NotNil(t, schema) + } else { + require.Error(t, err) + err = errors.UnwrapFirst(err) + require.Equal(t, ts.expectErr, err, "received error: %v", err) + } + }) + } +} + +// TestMassiveSchema loads thousands of tables into one schema, and thousands of tables, some of which are different, into another schema. +// It compares the two shemas. +// The objective of this test is to verify that execution time is _reasonable_. Since this will run in GitHub CI, which is very slow, we allow +// for 1 minute total for all operations. +func TestMassiveSchema(t *testing.T) { + tableBase := ` + CREATE TABLE IF NOT EXISTS placeholder + ( + id int NOT NULL AUTO_INCREMENT, + workflow varbinary(1000) DEFAULT NULL, + source mediumblob NOT NULL, + pos varbinary(10000) NOT NULL, + stop_pos varbinary(10000) DEFAULT NULL, + max_tps bigint NOT NULL, + max_replication_lag bigint NOT NULL, + cell varbinary(1000) DEFAULT NULL, + tablet_types varbinary(100) DEFAULT NULL, + time_updated bigint NOT NULL, + transaction_timestamp bigint NOT NULL, + state varbinary(100) NOT NULL, + message varbinary(1000) DEFAULT NULL, + db_name varbinary(255) NOT NULL, + rows_copied bigint NOT NULL DEFAULT '0', + tags varbinary(1024) NOT NULL DEFAULT '', + time_heartbeat bigint NOT NULL DEFAULT '0', + workflow_type int NOT NULL DEFAULT '0', + time_throttled bigint NOT NULL DEFAULT '0', + component_throttled varchar(255) NOT NULL DEFAULT '', + workflow_sub_type int NOT NULL DEFAULT '0', + defer_secondary_keys tinyint(1) NOT NULL DEFAULT '0', + PRIMARY KEY (id), + KEY workflow_idx (workflow(64)), + KEY time_heartbeat_idx (time_heartbeat) + ) ENGINE = InnoDB + ` + // Remove a couple columns into a modified table + modifiedTable := tableBase + for _, s := range []string{ + "workflow varbinary(1000) DEFAULT NULL,\n", + "KEY workflow_idx (workflow(64)),\n", + } { + require.Contains(t, tableBase, s) + modifiedTable = strings.Replace(modifiedTable, s, "", -1) + } + require.NotEqual(t, tableBase, modifiedTable) + + var schema0 *Schema + var schema1 *Schema + var err error + numTables := 8192 + modifyTables := 500 + countModifiedTables := 0 + tableNames := map[string]bool{} + + startTime := time.Now() + + // Load thousands of tables into each schema + t.Run(fmt.Sprintf("load %d tables into schemas", numTables), func(t *testing.T) { + modifiedTableIndexes := map[int]bool{} + for i, index := range rand.Perm(numTables) { + if i >= modifyTables { + break + } + modifiedTableIndexes[index] = true + } + queries0 := make([]string, 0, numTables) // to be loaded into schema0 + queries1 := make([]string, 0, numTables) // to be loaded into schema1 + for i := 0; i < numTables; i++ { + tableName := fmt.Sprintf("tbl_%05d", i) + query := strings.Replace(tableBase, "placeholder", tableName, -1) + queries0 = append(queries0, query) + if modifiedTableIndexes[i] { + // Some tables in schema1 are changed + query = strings.Replace(modifiedTable, "placeholder", tableName, -1) + countModifiedTables++ + } + queries1 = append(queries1, query) + tableNames[tableName] = true + } + schema0, err = NewSchemaFromQueries(queries0) + require.NoError(t, err) + schema1, err = NewSchemaFromQueries(queries1) + require.NoError(t, err) + + require.Equal(t, countModifiedTables, modifyTables) + }) + t.Run(fmt.Sprintf("validate loaded %d tables", numTables), func(t *testing.T) { + for _, schema := range []*Schema{schema0, schema1} { + entities := schema.Entities() + assert.Equal(t, numTables, len(entities)) // all tables are there + for _, e := range entities { + _, ok := tableNames[e.Name()] + assert.True(t, ok) + } + } + }) + + t.Run("evaluating diff", func(t *testing.T) { + schemaDiff, err := schema0.SchemaDiff(schema1, &DiffHints{}) + require.NoError(t, err) + diffs := schemaDiff.UnorderedDiffs() + require.NotEmpty(t, diffs) + require.Equal(t, len(diffs), countModifiedTables) + }) + + elapsed := time.Since(startTime) + assert.Less(t, elapsed, time.Minute) +} diff --git a/go/vt/schemadiff/semantics.go b/go/vt/schemadiff/semantics.go new file mode 100644 index 00000000000..ef9017d3b25 --- /dev/null +++ b/go/vt/schemadiff/semantics.go @@ -0,0 +1,75 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// semanticKS is a bogus keyspace, used for consistency purposes. The name is not important +var semanticKS = &vindexes.Keyspace{ + Name: "ks", + Sharded: false, +} + +var _ semantics.SchemaInformation = (*declarativeSchemaInformation)(nil) + +// declarativeSchemaInformation is a utility wrapper arounf FakeSI, and adds a few utility functions +// to make it more simple and accessible to schemadiff's logic. +type declarativeSchemaInformation struct { + Tables map[string]*vindexes.Table +} + +func newDeclarativeSchemaInformation() *declarativeSchemaInformation { + return &declarativeSchemaInformation{ + Tables: make(map[string]*vindexes.Table), + } +} + +// FindTableOrVindex implements the SchemaInformation interface +func (si *declarativeSchemaInformation) FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) { + table := si.Tables[sqlparser.String(tablename)] + return table, nil, "", 0, nil, nil +} + +func (si *declarativeSchemaInformation) ConnCollation() collations.ID { + return 45 +} + +// addTable adds a fake table with an empty column list +func (si *declarativeSchemaInformation) addTable(tableName string) { + tbl := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS(tableName), + Columns: []vindexes.Column{}, + ColumnListAuthoritative: true, + Keyspace: semanticKS, + } + si.Tables[tableName] = tbl +} + +// addColumn adds a fake column with no type. It assumes the table already exists +func (si *declarativeSchemaInformation) addColumn(tableName string, columnName string) { + col := &vindexes.Column{ + Name: sqlparser.NewIdentifierCI(columnName), + } + si.Tables[tableName].Columns = append(si.Tables[tableName].Columns, *col) +} diff --git a/go/vt/schemadiff/table.go b/go/vt/schemadiff/table.go index 5f57c9cd631..dbc01ec315c 100644 --- a/go/vt/schemadiff/table.go +++ b/go/vt/schemadiff/table.go @@ -25,6 +25,8 @@ import ( golcs "github.com/yudai/golcs" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" ) @@ -42,6 +44,11 @@ func (d *AlterTableEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *AlterTableEntityDiff) EntityName() string { + return d.from.Name() +} + // Entities implements EntityDiff func (d *AlterTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to @@ -118,6 +125,11 @@ func (d *CreateTableEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *CreateTableEntityDiff) EntityName() string { + return d.to.Name() +} + // Entities implements EntityDiff func (d *CreateTableEntityDiff) Entities() (from Entity, to Entity) { return nil, &CreateTableEntity{CreateTable: d.createTable} @@ -174,6 +186,11 @@ func (d *DropTableEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *DropTableEntityDiff) EntityName() string { + return d.from.Name() +} + // Entities implements EntityDiff func (d *DropTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, nil @@ -231,6 +248,11 @@ func (d *RenameTableEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *RenameTableEntityDiff) EntityName() string { + return d.from.Name() +} + // Entities implements EntityDiff func (d *RenameTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to @@ -332,6 +354,36 @@ func (c *CreateTableEntity) normalizeTableOptions() { } } +// GetCharset returns the explicit character set name specified +// in the CREATE TABLE statement (if any). +func (c *CreateTableEntity) GetCharset() string { + for _, opt := range c.CreateTable.TableSpec.Options { + if strings.ToLower(opt.Name) == "charset" { + opt.String = strings.ToLower(opt.String) + if charsetName, ok := collationEnv.CharsetAlias(opt.String); ok { + return charsetName + } + return opt.String + } + } + return "" +} + +// GetCollation returns the explicit collation name specified +// in the CREATE TABLE statement (if any). +func (c *CreateTableEntity) GetCollation() string { + for _, opt := range c.CreateTable.TableSpec.Options { + if strings.ToLower(opt.Name) == "collate" { + opt.String = strings.ToLower(opt.String) + if collationName, ok := collationEnv.CollationAlias(opt.String); ok { + return collationName + } + return opt.String + } + } + return "" +} + func (c *CreateTableEntity) Clone() Entity { return &CreateTableEntity{CreateTable: sqlparser.CloneRefOfCreateTable(c.CreateTable)} } @@ -342,7 +394,7 @@ const mysqlCollationVersion = "8.0.0" var collationEnv = collations.NewEnvironment(mysqlCollationVersion) func defaultCharset() string { - collation := collations.ID(collationEnv.DefaultConnectionCharset()).Get() + collation := colldata.Lookup(collations.ID(collationEnv.DefaultConnectionCharset())) if collation == nil { return "" } @@ -351,10 +403,10 @@ func defaultCharset() string { func defaultCharsetCollation(charset string) string { collation := collationEnv.DefaultCollationForCharset(charset) - if collation == nil { + if collation == collations.Unknown { return "" } - return collation.Name() + return collationEnv.LookupName(collation) } func (c *CreateTableEntity) normalizeColumnOptions() { @@ -407,6 +459,7 @@ func (c *CreateTableEntity) normalizeColumnOptions() { // See also https://dev.mysql.com/doc/refman/8.0/en/data-type-defaults.html if _, ok := col.Type.Options.Default.(*sqlparser.NullVal); ok { col.Type.Options.Default = nil + col.Type.Options.DefaultLiteral = false } } @@ -457,6 +510,7 @@ func (c *CreateTableEntity) normalizeColumnOptions() { Type: sqlparser.StrVal, Val: defaultVal, } + col.Type.Options.DefaultLiteral = true } else { col.Type.Options.Default = nil } @@ -728,6 +782,7 @@ func (c *CreateTableEntity) Diff(other Entity, hints *DiffHints) (EntityDiff, er if err != nil { return nil, err } + return d, nil } @@ -826,7 +881,6 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints } if tableSpecHasChanged { parentAlterTableEntityDiff = newAlterTableEntityDiff(alterTable) - } for _, superfluousFulltextKey := range superfluousFulltextKeys { alterTable := &sqlparser.AlterTable{ @@ -850,6 +904,8 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints parentAlterTableEntityDiff.addSubsequentDiff(diff) } } + sortAlterOptions(parentAlterTableEntityDiff) + return parentAlterTableEntityDiff, nil } @@ -1668,24 +1724,30 @@ func (c *CreateTableEntity) Drop() EntityDiff { } func sortAlterOptions(diff *AlterTableEntityDiff) { + if diff == nil { + return + } optionOrder := func(opt sqlparser.AlterOption) int { - switch opt.(type) { + switch opt := opt.(type) { case *sqlparser.DropKey: - return 1 - case *sqlparser.DropColumn: + if opt.Type == sqlparser.ForeignKeyType { + return 1 + } return 2 - case *sqlparser.ModifyColumn: + case *sqlparser.DropColumn: return 3 - case *sqlparser.RenameColumn: + case *sqlparser.ModifyColumn: return 4 - case *sqlparser.AddColumns: + case *sqlparser.RenameColumn: return 5 - case *sqlparser.AddIndexDefinition: + case *sqlparser.AddColumns: return 6 - case *sqlparser.AddConstraintDefinition: + case *sqlparser.AddIndexDefinition: return 7 - case sqlparser.TableOptions, *sqlparser.TableOptions: + case *sqlparser.AddConstraintDefinition: return 8 + case sqlparser.TableOptions, *sqlparser.TableOptions: + return 9 default: return math.MaxInt } @@ -1988,8 +2050,10 @@ func (c *CreateTableEntity) apply(diff *AlterTableEntityDiff) error { found = true if opt.DropDefault { col.Type.Options.Default = nil + col.Type.Options.DefaultLiteral = false } else if opt.DefaultVal != nil { col.Type.Options.Default = opt.DefaultVal + col.Type.Options.DefaultLiteral = opt.DefaultLiteral } col.Type.Options.Invisible = opt.Invisible break diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index 547d0102427..633fdc9a5d6 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -390,6 +390,15 @@ func TestCreateTableDiff(t *testing.T) { from: "create table t1 (`id` int primary key, i int)", to: "create table t2 (`id` int, i int, primary key (id))", }, + { + // Note our DiffHints uses the default `ColumnRenameAssumeDifferent` value for `ColumnRenameStrategy`, + // therefore the diff is expected to drop and recreate the PK column. + name: "change primary key column", + from: "create table t1 (id int primary key, info int not null);", + to: "create table t1 (the_id int primary key, info int not null);", + diff: "alter table t1 drop primary key, drop column id, add column the_id int first, add primary key (the_id)", + cdiff: "ALTER TABLE `t1` DROP PRIMARY KEY, DROP COLUMN `id`, ADD COLUMN `the_id` int FIRST, ADD PRIMARY KEY (`the_id`)", + }, { name: "reordered key, no diff", from: "create table t1 (`id` int primary key, i int, key i_idx(i), key i2_idx(i, `id`))", diff --git a/go/vt/schemadiff/types.go b/go/vt/schemadiff/types.go index f5ec3aa31e1..86e5a8d06bf 100644 --- a/go/vt/schemadiff/types.go +++ b/go/vt/schemadiff/types.go @@ -40,6 +40,8 @@ type Entity interface { type EntityDiff interface { // IsEmpty returns true when the two entities are considered identical IsEmpty() bool + // EntityName returns the name of affected entity + EntityName() string // Entities returns the two diffed entitied, aka "from" and "to" Entities() (from Entity, to Entity) // Statement returns a valid SQL statement that applies the diff, e.g. an ALTER TABLE ... @@ -119,3 +121,9 @@ type DiffHints struct { TableQualifierHint int AlterTableAlgorithmStrategy int } + +const ( + ApplyDiffsNoConstraint = "ApplyDiffsNoConstraint" + ApplyDiffsInOrder = "ApplyDiffsInOrder" + ApplyDiffsSequential = "ApplyDiffsSequential" +) diff --git a/go/vt/schemadiff/view.go b/go/vt/schemadiff/view.go index 5be5386c106..1937200e5f9 100644 --- a/go/vt/schemadiff/view.go +++ b/go/vt/schemadiff/view.go @@ -33,6 +33,11 @@ func (d *AlterViewEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *AlterViewEntityDiff) EntityName() string { + return d.from.Name() +} + // Entities implements EntityDiff func (d *AlterViewEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to @@ -88,6 +93,12 @@ func (d *CreateViewEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *CreateViewEntityDiff) EntityName() string { + _, to := d.Entities() + return to.Name() +} + // Entities implements EntityDiff func (d *CreateViewEntityDiff) Entities() (from Entity, to Entity) { return nil, &CreateViewEntity{CreateView: d.createView} @@ -144,6 +155,11 @@ func (d *DropViewEntityDiff) IsEmpty() bool { return d.Statement() == nil } +// EntityName implements EntityDiff +func (d *DropViewEntityDiff) EntityName() string { + return d.from.Name() +} + // Entities implements EntityDiff func (d *DropViewEntityDiff) Entities() (from Entity, to Entity) { return d.from, nil diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 8786f9c2b28..154d985bba4 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -23,6 +23,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" @@ -92,7 +94,7 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) controller.SetKeyspace("unknown_keyspace") - executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() _, err := Run(ctx, controller, executor) @@ -101,22 +103,62 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { } } -func TestSchemaManagerExecutorExecuteFail(t *testing.T) { - controller := newFakeController( - []string{"create table test_table (pk int);"}, false, false, false) - executor := NewTabletExecutor("TestSchemaManagerExecutorExecuteFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) - ctx := context.Background() +func TestSchemaManagerRun(t *testing.T) { + for _, batchSize := range []int{0, 1, 10} { + t.Run(fmt.Sprintf("batch-size=%d", batchSize), func(t *testing.T) { + sql := "create table test_table (pk int)" + controller := newFakeController( + []string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ + BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, + AfterSchema: &tabletmanagerdatapb.SchemaDefinition{ + DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "test_table", + Schema: sql, + Type: tmutils.TableBaseTable, + }, + }, + }, + }) - _, err := Run(ctx, controller, executor) - if err == nil || !strings.Contains(err.Error(), "unknown database: vt_test_keyspace") { - t.Fatalf("run schema change should fail due to executor.Execute fail, but got: %v", err) + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + + ctx := context.Background() + resp, err := Run(ctx, controller, executor) + + if len(resp.UUIDs) > 0 { + t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) + } + + if err != nil { + t.Fatalf("schema change should success but get error: %v", err) + } + if !controller.onReadSuccessTriggered { + t.Fatalf("OnReadSuccess should be called") + } + if controller.onReadFailTriggered { + t.Fatalf("OnReadFail should not be called") + } + if !controller.onValidationSuccessTriggered { + t.Fatalf("OnValidateSuccess should be called") + } + if controller.onValidationFailTriggered { + t.Fatalf("OnValidationFail should not be called") + } + if !controller.onExecutorCompleteTriggered { + t.Fatalf("OnExecutorComplete should be called") + } + }) } } -func TestSchemaManagerRun(t *testing.T) { +func TestSchemaManagerExecutorFail(t *testing.T) { sql := "create table test_table (pk int)" - controller := newFakeController( - []string{sql}, false, false, false) + controller := newFakeController([]string{sql}, false, false, false) fakeTmc := newFakeTabletManagerClient() fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, @@ -133,66 +175,67 @@ func TestSchemaManagerRun(t *testing.T) { }) fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) - executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) } - if err != nil { - t.Fatalf("schema change should success but get error: %v", err) - } - if !controller.onReadSuccessTriggered { - t.Fatalf("OnReadSuccess should be called") - } - if controller.onReadFailTriggered { - t.Fatalf("OnReadFail should not be called") - } - if !controller.onValidationSuccessTriggered { - t.Fatalf("OnValidateSuccess should be called") - } - if controller.onValidationFailTriggered { - t.Fatalf("OnValidationFail should not be called") - } - if !controller.onExecutorCompleteTriggered { - t.Fatalf("OnExecutorComplete should be called") + if err == nil || !strings.Contains(err.Error(), "schema change failed") { + t.Fatalf("schema change should fail, but got err: %v", err) } } -func TestSchemaManagerExecutorFail(t *testing.T) { +func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { sql := "create table test_table (pk int)" controller := newFakeController([]string{sql}, false, false, false) fakeTmc := newFakeTabletManagerClient() - fakeTmc.AddSchemaChange(sql, &tabletmanagerdatapb.SchemaChangeResult{ - BeforeSchema: &tabletmanagerdatapb.SchemaDefinition{}, - AfterSchema: &tabletmanagerdatapb.SchemaDefinition{ - DatabaseSchema: "CREATE DATABASE `{{.DatabaseName}}` /*!40100 DEFAULT CHARACTER SET utf8 */", - TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ - { - Name: "test_table", - Schema: sql, - Type: tmutils.TableBaseTable, - }, - }, - }, - }) fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("online") ctx := context.Background() - resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { - t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) - } + _, err := Run(ctx, controller, executor) - if err == nil || !strings.Contains(err.Error(), "schema change failed") { - t.Fatalf("schema change should fail, but got err: %v", err) - } + assert.ErrorContains(t, err, "--batch-size requires 'direct'") +} + +func TestSchemaManagerExecutorBatchVsQueriesFail(t *testing.T) { + sql := "alter table test_table force" + controller := newFakeController([]string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("direct") + + ctx := context.Background() + _, err := Run(ctx, controller, executor) + + assert.ErrorContains(t, err, "--batch-size only allowed when all queries are CREATE") +} + +func TestSchemaManagerExecutorBatchVsUUIDsFail(t *testing.T) { + sql := "create table test_table (pk int)" + controller := newFakeController([]string{sql}, false, false, false) + fakeTmc := newFakeTabletManagerClient() + + fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) + fakeTmc.EnableExecuteFetchAsDbaError = true + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor.SetDDLStrategy("direct") + executor.SetUUIDList([]string{"4e5dcf80_354b_11eb_82cd_f875a4d24e90"}) + + ctx := context.Background() + _, err := Run(ctx, controller, executor) + + assert.ErrorContains(t, err, "--batch-size conflicts with --uuid-list") } func TestSchemaManagerRegisterControllerFactory(t *testing.T) { @@ -201,7 +244,6 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { "test_controller", func(params map[string]string) (Controller, error) { return newFakeController([]string{sql}, false, false, false), nil - }) _, err := GetControllerFactory("unknown") @@ -229,7 +271,7 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { } func newFakeExecutor(t *testing.T) *TabletExecutor { - return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) } func newFakeTabletManagerClient() *fakeTabletManagerClient { @@ -289,8 +331,9 @@ func (client *fakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta // - 3 shards named '1', '2', '3'. // - A primary tablet for each shard. func newFakeTopo(t *testing.T) *topo.Server { - ts := memorytopo.NewServer("test_cell") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index 3ca154b77b4..a56a95d5034 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -19,6 +19,7 @@ package schemamanager import ( "context" "fmt" + "strings" "sync" "time" @@ -36,48 +37,37 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" ) // TabletExecutor applies schema changes to all tablets. type TabletExecutor struct { - migrationContext string - ts *topo.Server - tmc tmclient.TabletManagerClient - logger logutil.Logger - tablets []*topodatapb.Tablet - isClosed bool - allowBigSchemaChange bool - keyspace string - waitReplicasTimeout time.Duration - ddlStrategySetting *schema.DDLStrategySetting - uuids []string + migrationContext string + ts *topo.Server + tmc tmclient.TabletManagerClient + logger logutil.Logger + tablets []*topodatapb.Tablet + isClosed bool + keyspace string + waitReplicasTimeout time.Duration + ddlStrategySetting *schema.DDLStrategySetting + uuids []string + batchSize int64 } // NewTabletExecutor creates a new TabletExecutor instance -func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration) *TabletExecutor { +func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64) *TabletExecutor { return &TabletExecutor{ - ts: ts, - tmc: tmc, - logger: logger, - isClosed: true, - allowBigSchemaChange: false, - waitReplicasTimeout: waitReplicasTimeout, - migrationContext: migrationContext, + ts: ts, + tmc: tmc, + logger: logger, + isClosed: true, + waitReplicasTimeout: waitReplicasTimeout, + migrationContext: migrationContext, + batchSize: batchSize, } } -// AllowBigSchemaChange changes TabletExecutor such that big schema changes -// will no longer be rejected. -func (exec *TabletExecutor) AllowBigSchemaChange() { - exec.allowBigSchemaChange = true -} - -// DisallowBigSchemaChange enables the check for big schema changes such that -// TabletExecutor will reject these. -func (exec *TabletExecutor) DisallowBigSchemaChange() { - exec.allowBigSchemaChange = false -} - // SetDDLStrategy applies ddl_strategy from command line flags func (exec *TabletExecutor) SetDDLStrategy(ddlStrategy string) error { ddlStrategySetting, err := schema.ParseDDLStrategy(ddlStrategy) @@ -147,58 +137,49 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { if exec.isClosed { return fmt.Errorf("executor is closed") } - - // We ignore DATABASE-level DDLs here because detectBigSchemaChanges doesn't - // look at them anyway. - parsedDDLs, _, _, _, err := exec.parseDDLs(sqls) - if err != nil { + if err := exec.parseDDLs(sqls); err != nil { return err } - bigSchemaChange, err := exec.detectBigSchemaChanges(ctx, parsedDDLs) - if bigSchemaChange && exec.allowBigSchemaChange { - exec.logger.Warningf("Processing big schema change. This may cause visible MySQL downtime.") - return nil - } - return err + return nil } -func (exec *TabletExecutor) parseDDLs(sqls []string) ([]sqlparser.DDLStatement, []sqlparser.DBDDLStatement, [](*sqlparser.RevertMigration), [](*sqlparser.AlterMigration), error) { - parsedDDLs := make([]sqlparser.DDLStatement, 0) - parsedDBDDLs := make([]sqlparser.DBDDLStatement, 0) - revertStatements := make([](*sqlparser.RevertMigration), 0) - alterMigrationStatements := make([](*sqlparser.AlterMigration), 0) +func (exec *TabletExecutor) parseDDLs(sqls []string) error { for _, sql := range sqls { stmt, err := sqlparser.Parse(sql) if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to parse sql: %s, got error: %v", sql, err) + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } - switch stmt := stmt.(type) { + switch stmt.(type) { case sqlparser.DDLStatement: - parsedDDLs = append(parsedDDLs, stmt) case sqlparser.DBDDLStatement: - parsedDBDDLs = append(parsedDBDDLs, stmt) case *sqlparser.RevertMigration: - revertStatements = append(revertStatements, stmt) case *sqlparser.AlterMigration: - alterMigrationStatements = append(alterMigrationStatements, stmt) default: if len(exec.tablets) != 1 { - return nil, nil, nil, nil, fmt.Errorf("non-ddl statements can only be executed for single shard keyspaces: %s", sql) + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "non-ddl statements can only be executed for single shard keyspaces: %s", sql) } } } - return parsedDDLs, parsedDBDDLs, revertStatements, alterMigrationStatements, nil + return nil +} + +// isDirectStrategy returns 'true' when the ddl_strategy configuration implies 'direct' +func (exec *TabletExecutor) isDirectStrategy() (isDirect bool) { + if exec.ddlStrategySetting == nil { + return true + } + if exec.ddlStrategySetting.Strategy.IsDirect() { + return true + } + return false } // IsOnlineSchemaDDL returns true if we expect to run a online schema change DDL func (exec *TabletExecutor) isOnlineSchemaDDL(stmt sqlparser.Statement) (isOnline bool) { switch stmt := stmt.(type) { case sqlparser.DDLStatement: - if exec.ddlStrategySetting == nil { - return false - } - if exec.ddlStrategySetting.Strategy.IsDirect() { + if exec.isDirectStrategy() { return false } switch stmt.GetAction() { @@ -211,62 +192,18 @@ func (exec *TabletExecutor) isOnlineSchemaDDL(stmt sqlparser.Statement) (isOnlin return false } -// a schema change that satisfies any following condition is considered -// to be a big schema change and will be rejected. -// 1. Alter more than 100,000 rows. -// 2. Change a table with more than 2,000,000 rows (Drops are fine). -func (exec *TabletExecutor) detectBigSchemaChanges(ctx context.Context, parsedDDLs []sqlparser.DDLStatement) (bool, error) { - // We want to avoid any overhead if possible. If all DDLs are online schema changes, then we want to - // skip GetSchema altogether. - foundAnyNonOnlineDDL := false - for _, ddl := range parsedDDLs { - if !exec.isOnlineSchemaDDL(ddl) { - foundAnyNonOnlineDDL = true - } - } - if !foundAnyNonOnlineDDL { - return false, nil - } - // exec.tablets is guaranteed to have at least one element; - // Otherwise, Open should fail and executor should fail. - primaryTabletInfo := exec.tablets[0] - // get database schema, excluding views. - req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{}, ExcludeTables: []string{}, TableSchemaOnly: true} - dbSchema, err := exec.tmc.GetSchema(ctx, primaryTabletInfo, req) - if err != nil { - return false, fmt.Errorf("unable to get database schema, error: %v", err) - } - tableWithCount := make(map[string]uint64, len(dbSchema.TableDefinitions)) - for _, tableSchema := range dbSchema.TableDefinitions { - tableWithCount[tableSchema.Name] = tableSchema.RowCount - } - for _, ddl := range parsedDDLs { - if exec.isOnlineSchemaDDL(ddl) { - // Since this is an online schema change, there is no need to worry about big changes - continue - } - switch ddl.GetAction() { - case sqlparser.DropDDLAction, sqlparser.CreateDDLAction, sqlparser.TruncateDDLAction, sqlparser.RenameDDLAction: - continue - } - tableName := ddl.GetTable().Name.String() - if rowCount, ok := tableWithCount[tableName]; ok { - if rowCount > 100000 && ddl.GetAction() == sqlparser.AlterDDLAction { - return true, fmt.Errorf( - "big schema change detected. Disable check with -allow_long_unavailability. ddl: %s alters a table with more than 100 thousand rows", sqlparser.String(ddl)) - } - if rowCount > 2000000 { - return true, fmt.Errorf( - "big schema change detected. Disable check with -allow_long_unavailability. ddl: %s changes a table with more than 2 million rows", sqlparser.String(ddl)) - } - } - } - return false, nil -} - // executeSQL executes a single SQL statement either as online DDL or synchronously on all tablets. // In online DDL case, the query may be exploded into multiple queries during func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, providedUUID string, execResult *ExecuteResult) (executedAsynchronously bool, err error) { + executeViaFetch := func() (bool, error) { + exec.executeOnAllTablets(ctx, execResult, sql, false) + return false, nil + } + if exec.batchSize > 1 { + // Batched writes only ever work with 'direct' strategy and appleid directly to the mysql servers + return executeViaFetch() + } + // Analyze what type of query this is: stmt, err := sqlparser.Parse(sql) if err != nil { return false, err @@ -303,17 +240,61 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided exec.executeOnAllTablets(ctx, execResult, sql, true) return true, nil } - exec.executeOnAllTablets(ctx, execResult, sql, false) - return false, nil + // Got here? The statement needs to be executed directly. + return executeViaFetch() +} + +// batchSQLs combines SQLs into batches, delimited by ';' +func batchSQLs(sqls []string, batchSize int) (batchedSQLs []string) { + if batchSize <= 1 { + return sqls + } + for len(sqls) > 0 { + nextBatchSize := batchSize + if nextBatchSize > len(sqls) { + nextBatchSize = len(sqls) + } + nextBatch := sqls[0:nextBatchSize] + nextBatchSql := strings.Join(nextBatch, ";") + batchedSQLs = append(batchedSQLs, nextBatchSql) + sqls = sqls[nextBatchSize:] + } + return batchedSQLs +} + +// allSQLsAreCreateQueries returns 'true' when all given queries are CREATE TABLE|VIEW +// This function runs pretty fast even for thousands of tables (its overhead is insignificant compared with +// the time it would take to apply the changes). +func allSQLsAreCreateQueries(sqls []string) (bool, error) { + for _, sql := range sqls { + stmt, err := sqlparser.Parse(sql) + if err != nil { + return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) + } + switch stmt.(type) { + case *sqlparser.CreateTable, *sqlparser.CreateView: + default: + return false, nil + } + } + return true, nil } // Execute applies schema changes func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *ExecuteResult { execResult := ExecuteResult{} + + // errorExecResult is a utility function that populates the execResult with the given error, and returns it. Used to quickly bail out of + // this function. + errorExecResult := func(err error) *ExecuteResult { + if err != nil { + execResult.ExecutorErr = err.Error() + } + return &execResult + } execResult.Sqls = sqls if exec.isClosed { - execResult.ExecutorErr = "executor is closed" - return &execResult + return errorExecResult(fmt.Errorf("executor is closed")) } startTime := time.Now() defer func() { execResult.TotalTimeSpent = time.Since(startTime) }() @@ -322,8 +303,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute // keyspace-wide operations like resharding migrations. ctx, unlock, lockErr := exec.ts.LockKeyspace(ctx, exec.keyspace, "ApplySchemaKeyspace") if lockErr != nil { - execResult.ExecutorErr = vterrors.Wrapf(lockErr, "lockErr in ApplySchemaKeyspace %v", exec.keyspace).Error() - return &execResult + return errorExecResult(vterrors.Wrapf(lockErr, "lockErr in ApplySchemaKeyspace %v", exec.keyspace)) } defer func() { // This is complicated because execResult.ExecutorErr @@ -336,8 +316,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute }() if exec.hasProvidedUUIDs() && len(exec.uuids) != len(sqls) { - execResult.ExecutorErr = fmt.Sprintf("provided %v UUIDs do not match number of DDLs %v", len(exec.uuids), len(sqls)) - return &execResult + return errorExecResult(fmt.Errorf("provided %v UUIDs do not match number of DDLs %v", len(exec.uuids), len(sqls))) } providedUUID := "" @@ -390,11 +369,28 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute wg.Wait() }() + if exec.batchSize > 1 { + // Before we proceed to batch, we need to validate there's no conflicts. + if !exec.isDirectStrategy() { + return errorExecResult(fmt.Errorf("--batch-size requires 'direct' ddl-strategy")) + } + if exec.hasProvidedUUIDs() { + return errorExecResult(fmt.Errorf("--batch-size conflicts with --uuid-list. Batching does not support UUIDs.")) + } + allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls) + if err != nil { + return errorExecResult(err) + } + if !allSQLsAreCreate { + return errorExecResult(fmt.Errorf("--batch-size only allowed when all queries are CREATE TABLE|VIEW")) + } + + sqls = batchSQLs(sqls, int(exec.batchSize)) + } for index, sql := range sqls { // Attempt to renew lease: if err := rl.Do(func() error { return topo.CheckKeyspaceLockedAndRenew(ctx, exec.keyspace) }); err != nil { - execResult.ExecutorErr = vterrors.Wrapf(err, "CheckKeyspaceLocked in ApplySchemaKeyspace %v", exec.keyspace).Error() - return &execResult + return errorExecResult(vterrors.Wrapf(err, "CheckKeyspaceLocked in ApplySchemaKeyspace %v", exec.keyspace)) } execResult.CurSQLIndex = index if exec.hasProvidedUUIDs() { @@ -402,8 +398,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute } executedAsynchronously, err := exec.executeSQL(ctx, sql, providedUUID, &execResult) if err != nil { - execResult.ExecutorErr = err.Error() - return &execResult + return errorExecResult(err) } if !executedAsynchronously { syncOperationExecuted = true @@ -446,6 +441,33 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult } } +// applyAllowZeroInDate takes a SQL string which may contain one or more statements, +// and, assuming those are DDLs, adds a /*vt+ allowZeroInDate=true */ directive to all of them, +// returning the result again as one long SQL. +func applyAllowZeroInDate(sql string) (string, error) { + // sql may be a batch of multiple statements + sqls, err := sqlparser.SplitStatementToPieces(sql) + if err != nil { + return sql, err + } + var modifiedSqls []string + for _, singleSQL := range sqls { + // --allow-zero-in-date Applies to DDLs + stmt, err := sqlparser.Parse(singleSQL) + if err != nil { + return sql, err + } + if ddlStmt, ok := stmt.(sqlparser.DDLStatement); ok { + // Add comments directive to allow zero in date + const directive = `/*vt+ allowZeroInDate=true */` + ddlStmt.SetComments(ddlStmt.GetParsedComments().Prepend(directive)) + singleSQL = sqlparser.String(ddlStmt) + } + modifiedSqls = append(modifiedSqls, singleSQL) + } + return strings.Join(modifiedSqls, ";"), err +} + func (exec *TabletExecutor) executeOneTablet( ctx context.Context, tablet *topodatapb.Tablet, @@ -464,22 +486,17 @@ func (exec *TabletExecutor) executeOneTablet( } else { if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowZeroInDateFlag() { // --allow-zero-in-date Applies to DDLs - stmt, err := sqlparser.Parse(string(sql)) + sql, err = applyAllowZeroInDate(sql) if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} return } - if ddlStmt, ok := stmt.(sqlparser.DDLStatement); ok { - // Add comments directive to allow zero in date - const directive = `/*vt+ allowZeroInDate=true */` - ddlStmt.SetComments(ddlStmt.GetParsedComments().Prepend(directive)) - sql = sqlparser.String(ddlStmt) - } } result, err = exec.tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: []byte(sql), MaxRows: 10, }) + } if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 00485d24723..175e10dfb66 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -18,6 +18,7 @@ package schemamanager import ( "context" + "fmt" "strings" "testing" "time" @@ -54,8 +55,9 @@ func TestTabletExecutorOpen(t *testing.T) { } func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "test_cell", @@ -70,7 +72,7 @@ func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { if err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { t.Fatalf("InitTablet failed: %v", err) } - executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) if err := executor.Open(ctx, "test_keyspace"); err == nil || !strings.Contains(err.Error(), "does not have a primary") { t.Fatalf("executor.Open() = '%v', want error", err) } @@ -103,7 +105,7 @@ func TestTabletExecutorValidate(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() sqls := []string{ @@ -134,8 +136,8 @@ func TestTabletExecutorValidate(t *testing.T) { // alter a table with more than 100,000 rows if err := executor.Validate(ctx, []string{ "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err == nil { - t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") + }); err != nil { + t.Fatalf("executor.Validate should not fail, even for a table with more than 100,000 rows") } if err := executor.Validate(ctx, []string{ @@ -149,21 +151,6 @@ func TestTabletExecutorValidate(t *testing.T) { }); err != nil { t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") } - - executor.AllowBigSchemaChange() - // alter a table with more than 100,000 rows - if err := executor.Validate(ctx, []string{ - "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err != nil { - t.Fatalf("executor.Validate should succeed, big schema change is disabled") - } - - executor.DisallowBigSchemaChange() - if err := executor.Validate(ctx, []string{ - "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err == nil { - t.Fatalf("executor.Validate should fail, alter a table more than 100,000 rows") - } } func TestTabletExecutorDML(t *testing.T) { @@ -192,7 +179,7 @@ func TestTabletExecutorDML(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout) + executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) ctx := context.Background() executor.Open(ctx, "unsharded_keyspace") @@ -301,3 +288,158 @@ func TestIsOnlineSchemaDDL(t *testing.T) { } } } + +func TestBatchSQLs(t *testing.T) { + sqls := []string{ + "create table t1(id int primary key)", + "create table t2(id int primary key)", + "create table t3(id int primary key)", + "create table t4(id int primary key)", + "create view v as select id from t", + } + tcases := []struct { + batchSize int + expectSQLs []string + }{ + { + batchSize: 0, + expectSQLs: sqls, + }, + { + batchSize: 1, + expectSQLs: sqls, + }, + { + batchSize: 2, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key)", + "create table t3(id int primary key);create table t4(id int primary key)", + "create view v as select id from t", + }, + }, + { + batchSize: 3, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key)", + "create table t4(id int primary key);create view v as select id from t", + }, + }, + { + batchSize: 4, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key)", + "create view v as select id from t", + }, + }, + { + batchSize: 5, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key);create view v as select id from t", + }, + }, + { + batchSize: 6, + expectSQLs: []string{ + "create table t1(id int primary key);create table t2(id int primary key);create table t3(id int primary key);create table t4(id int primary key);create view v as select id from t", + }, + }, + } + for _, tcase := range tcases { + t.Run(fmt.Sprintf("%d", tcase.batchSize), func(t *testing.T) { + batchedSQLs := batchSQLs(sqls, tcase.batchSize) + assert.Equal(t, tcase.expectSQLs, batchedSQLs) + }) + } +} + +func TestAllSQLsAreCreateQueries(t *testing.T) { + tcases := []struct { + name string + sqls []string + expect bool + }{ + { + name: "empty", + expect: true, + }, + { + name: "single, yes", + sqls: []string{"create table t1 (id int primary key)"}, + expect: true, + }, + { + name: "single, no", + sqls: []string{"alter table t1 force"}, + expect: false, + }, + { + name: "multi, no", + sqls: []string{ + "create table t1 (id int primary key)", + "alter table t1 force", + }, + expect: false, + }, + { + name: "multi, no", + sqls: []string{ + "alter table t1 force", + "create table t1 (id int primary key)", + }, + expect: false, + }, + { + name: "multi, yes", + sqls: []string{ + "create table t1 (id int primary key)", + "create table t2 (id int primary key)", + "create table t3 (id int primary key)", + "create view v1 as select id from t1", + }, + expect: true, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + result, err := allSQLsAreCreateQueries(tcase.sqls) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, result) + }) + } +} + +func TestApplyAllowZeroInDate(t *testing.T) { + tcases := []struct { + sql string + expect string + }{ + { + "create table t1(id int primary key); ", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n)", + }, + { + "create table t1(id int primary key)", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n)", + }, + { + "create table t1(id int primary key);select 1 from dual", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);select 1 from dual", + }, + { + "create table t1(id int primary key); alter table t2 add column id2 int", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);alter /*vt+ allowZeroInDate=true */ table t2 add column id2 int", + }, + { + " ; ; ;;; create table t1(id int primary key); ;; alter table t2 add column id2 int ;;", + "create /*vt+ allowZeroInDate=true */ table t1 (\n\tid int primary key\n);alter /*vt+ allowZeroInDate=true */ table t2 add column id2 int", + }, + } + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + result, err := applyAllowZeroInDate(tcase.sql) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, result) + }) + } +} diff --git a/go/vt/servenv/buildinfo_test.go b/go/vt/servenv/buildinfo_test.go index e6793c915d0..be35511a036 100644 --- a/go/vt/servenv/buildinfo_test.go +++ b/go/vt/servenv/buildinfo_test.go @@ -33,17 +33,17 @@ func TestVersionString(t *testing.T) { buildTimePretty: "time is now", buildGitRev: "d54b87ca0be09b678bb4490060e8f23f890ddb92", buildGitBranch: "gitBranch", - goVersion: "1.19.3", + goVersion: "1.20.2", goOS: "amiga", goArch: "amd64", version: "v1.2.3-SNAPSHOT", } - assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.19.3 amiga/amd64", v.String()) + assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.20.2 amiga/amd64", v.String()) v.jenkinsBuildNumber = 422 - assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.19.3 amiga/amd64", v.String()) + assert.Equal(t, "Version: v1.2.3-SNAPSHOT (Jenkins build 422) (Git revision d54b87ca0be09b678bb4490060e8f23f890ddb92 branch 'gitBranch') built on time is now by user@host using 1.20.2 amiga/amd64", v.String()) assert.Equal(t, "8.0.30-Vitess", v.MySQLVersion()) } diff --git a/go/vt/servenv/exporter.go b/go/vt/servenv/exporter.go index d8eb4ef428d..a3d23dc4b74 100644 --- a/go/vt/servenv/exporter.go +++ b/go/vt/servenv/exporter.go @@ -105,6 +105,10 @@ type Exporter struct { mu sync.Mutex } +func init() { + HTTPHandle("/debug/vars", expvar.Handler()) +} + // NewExporter creates a new Exporter with name as namespace. // label is the name of the additional dimension for the stats vars. func NewExporter(name, label string) *Exporter { @@ -153,12 +157,12 @@ func (e *Exporter) URLPrefix() string { // HandleFunc sets or overwrites the handler for url. If Exporter has a name, // url remapped from /path to /name/path. If name is empty, the request -// is passed through to http.HandleFunc. +// is passed through to HTTPHandleFunc. func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http.Request)) { e.mu.Lock() defer e.mu.Unlock() if e.name == "" { - http.HandleFunc(url, f) + HTTPHandleFunc(url, f) return } @@ -169,7 +173,7 @@ func (e *Exporter) HandleFunc(url string, f func(w http.ResponseWriter, r *http. hf := &handleFunc{f: f} e.handleFuncs[url] = hf - http.HandleFunc(e.URLPrefix()+url, func(w http.ResponseWriter, r *http.Request) { + HTTPHandleFunc(e.URLPrefix()+url, func(w http.ResponseWriter, r *http.Request) { if f := hf.Get(); f != nil { f(w, r) } diff --git a/go/vt/servenv/exporter_test.go b/go/vt/servenv/exporter_test.go index 4a9edf4da6e..f692e7d5d03 100644 --- a/go/vt/servenv/exporter_test.go +++ b/go/vt/servenv/exporter_test.go @@ -43,7 +43,12 @@ func TestHandleFunc(t *testing.T) { } defer listener.Close() port := listener.Addr().(*net.TCPAddr).Port - go http.Serve(listener, nil) + go func() { + err := HTTPServe(listener) + if err != nil { + t.Errorf("HTTPServe returned: %v", err) + } + }() ebd := NewExporter("", "") ebd.HandleFunc("/path", func(w http.ResponseWriter, r *http.Request) { diff --git a/go/vt/servenv/flushlogs.go b/go/vt/servenv/flushlogs.go index 6b88e137654..d3ba162249a 100644 --- a/go/vt/servenv/flushlogs.go +++ b/go/vt/servenv/flushlogs.go @@ -25,7 +25,7 @@ import ( func init() { OnInit(func() { - http.HandleFunc("/debug/flushlogs", func(w http.ResponseWriter, r *http.Request) { + HTTPHandleFunc("/debug/flushlogs", func(w http.ResponseWriter, r *http.Request) { logutil.Flush() fmt.Fprint(w, "flushed") }) diff --git a/go/vt/servenv/grpc_codec.go b/go/vt/servenv/grpc_codec.go index 4376783de20..7d2b6364d3b 100644 --- a/go/vt/servenv/grpc_codec.go +++ b/go/vt/servenv/grpc_codec.go @@ -38,29 +38,25 @@ type vtprotoMessage interface { } func (vtprotoCodec) Marshal(v any) ([]byte, error) { - vt, ok := v.(vtprotoMessage) - if ok { - return vt.MarshalVT() + switch v := v.(type) { + case vtprotoMessage: + return v.MarshalVT() + case proto.Message: + return proto.Marshal(v) + default: + return nil, fmt.Errorf("failed to marshal, message is %T, must satisfy the vtprotoMessage interface or want proto.Message", v) } - - vv, ok := v.(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) - } - return proto.Marshal(vv) } func (vtprotoCodec) Unmarshal(data []byte, v any) error { - vt, ok := v.(vtprotoMessage) - if ok { - return vt.UnmarshalVT(data) - } - - vv, ok := v.(proto.Message) - if !ok { - return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + switch v := v.(type) { + case vtprotoMessage: + return v.UnmarshalVT(data) + case proto.Message: + return proto.Unmarshal(data, v) + default: + return fmt.Errorf("failed to unmarshal, message is %T, must satisfy the vtprotoMessage interface or want proto.Message", v) } - return proto.Unmarshal(data, vv) } func (vtprotoCodec) Name() string { diff --git a/go/vt/servenv/http.go b/go/vt/servenv/http.go new file mode 100644 index 00000000000..f4b001383d1 --- /dev/null +++ b/go/vt/servenv/http.go @@ -0,0 +1,54 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +import ( + "errors" + "net" + "net/http" + "net/http/pprof" + + "vitess.io/vitess/go/vt/servenv/internal/mux" +) + +// HTTPHandle registers the given handler for the internal servenv mux. +func HTTPHandle(pattern string, handler http.Handler) { + mux.Mux.Handle(pattern, handler) +} + +// HTTPHandleFunc registers the given handler func for the internal servenv mux. +func HTTPHandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) { + mux.Mux.HandleFunc(pattern, handler) +} + +// HTTPServe starts the HTTP server for the internal servenv mux on the listener. +func HTTPServe(l net.Listener) error { + err := http.Serve(l, mux.Mux) + if errors.Is(err, http.ErrServerClosed) || errors.Is(err, net.ErrClosed) { + return nil + } + return err +} + +// HTTPRegisterProfile registers the default pprof HTTP endpoints with the internal servenv mux. +func HTTPRegisterProfile() { + HTTPHandleFunc("/debug/pprof/", pprof.Index) + HTTPHandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + HTTPHandleFunc("/debug/pprof/profile", pprof.Profile) + HTTPHandleFunc("/debug/pprof/symbol", pprof.Symbol) + HTTPHandleFunc("/debug/pprof/trace", pprof.Trace) +} diff --git a/go/vt/topo/k8stopo/boilerplate.go.txt b/go/vt/servenv/internal/mux/mux.go similarity index 84% rename from go/vt/topo/k8stopo/boilerplate.go.txt rename to go/vt/servenv/internal/mux/mux.go index 3f6ccc17d97..1079f493ff9 100644 --- a/go/vt/topo/k8stopo/boilerplate.go.txt +++ b/go/vt/servenv/internal/mux/mux.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,3 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +package mux + +import "net/http" + +var Mux = http.NewServeMux() diff --git a/go/vt/servenv/liveness.go b/go/vt/servenv/liveness.go index 1b3365501a1..5acd08edf60 100644 --- a/go/vt/servenv/liveness.go +++ b/go/vt/servenv/liveness.go @@ -29,7 +29,7 @@ import ( // further behind on its backlog. func init() { - http.HandleFunc("/debug/liveness", func(rw http.ResponseWriter, r *http.Request) { + HTTPHandleFunc("/debug/liveness", func(rw http.ResponseWriter, r *http.Request) { // Do nothing. Return success immediately. }) } diff --git a/go/vt/servenv/liveness_test.go b/go/vt/servenv/liveness_test.go index 38662e9150a..80c3befe829 100644 --- a/go/vt/servenv/liveness_test.go +++ b/go/vt/servenv/liveness_test.go @@ -19,12 +19,13 @@ package servenv import ( "io" "net/http" - "net/http/httptest" "testing" + + "vitess.io/vitess/go/vt/servenv/testutils" ) func TestLivenessHandler(t *testing.T) { - server := httptest.NewServer(nil) + server := testutils.HTTPTestServer() defer server.Close() resp, err := http.Get(server.URL + "/debug/liveness") diff --git a/go/vt/servenv/pprof.go b/go/vt/servenv/pprof.go index d431dd9239b..d1d8e99588f 100644 --- a/go/vt/servenv/pprof.go +++ b/go/vt/servenv/pprof.go @@ -342,4 +342,5 @@ func init() { fs.StringSliceVar(&pprofFlag, "pprof", pprofFlag, "enable profiling") }) OnInit(pprofInit) + OnInit(HTTPRegisterProfile) } diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index 82dfc285efb..5b585184331 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -19,7 +19,6 @@ package servenv import ( "fmt" "net" - "net/http" "net/url" "os" "os/signal" @@ -49,7 +48,12 @@ func Run(port int) { if err != nil { log.Exit(err) } - go http.Serve(l, nil) + go func() { + err := HTTPServe(l) + if err != nil { + log.Errorf("http serve returned unexpected error: %v", err) + } + }() ExitChan = make(chan os.Signal, 1) signal.Notify(ExitChan, syscall.SIGTERM, syscall.SIGINT) diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go index 13e9f13c2de..e115989af12 100644 --- a/go/vt/servenv/servenv.go +++ b/go/vt/servenv/servenv.go @@ -29,8 +29,8 @@ limitations under the License. package servenv import ( - // register the HTTP handlers for profiling - _ "net/http/pprof" + "flag" + "fmt" "net/url" "os" "os/signal" @@ -40,20 +40,20 @@ import ( "syscall" "time" + "github.com/spf13/cobra" "github.com/spf13/pflag" "vitess.io/vitess/go/event" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/viperutil" + viperdebug "vitess.io/vitess/go/viperutil/debug" "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/vterrors" - // register the proper init and shutdown hooks for logging - _ "vitess.io/vitess/go/vt/logutil" - // Include deprecation warnings for soon-to-be-unsupported flag invocations. _flag "vitess.io/vitess/go/internal/flag" ) @@ -116,6 +116,11 @@ func Init() { defer mu.Unlock() initStartTime = time.Now() + // Uptime metric + _ = stats.NewGaugeFunc("Uptime", "Uptime in nanoseconds", func() int64 { + return int64(time.Since(serverStart).Nanoseconds()) + }) + // Ignore SIGPIPE if specified // The Go runtime catches SIGPIPE for us on all fds except stdout/stderr // See https://golang.org/pkg/os/signal/#hdr-SIGPIPE @@ -320,11 +325,17 @@ func getFlagHooksFor(cmd string) (hooks []func(fs *pflag.FlagSet)) { return hooks } +// Needed because some tests require multiple parse passes, so we guard against +// that here. +var debugConfigRegisterOnce sync.Once + // ParseFlags initializes flags and handles the common case when no positional // arguments are expected. func ParseFlags(cmd string) { fs := GetFlagSetFor(cmd) + viperutil.BindFlags(fs) + _flag.Parse(fs) if version { @@ -338,9 +349,58 @@ func ParseFlags(cmd string) { log.Exitf("%s doesn't take any positional arguments, got '%s'", cmd, strings.Join(args, " ")) } + loadViper(cmd) + logutil.PurgeLogs() } +// ParseFlagsForTests initializes flags but skips the version, filesystem +// args and go flag related work. +// Note: this should not be used outside of unit tests. +func ParseFlagsForTests(cmd string) { + fs := GetFlagSetFor(cmd) + pflag.CommandLine = fs + pflag.Parse() + viperutil.BindFlags(fs) + loadViper(cmd) +} + +// MoveFlagsToCobraCommand moves the servenv-registered flags to the flagset of +// the given cobra command, then copies over the glog flags that otherwise +// require manual transferring. +func MoveFlagsToCobraCommand(cmd *cobra.Command) { + cmd.Flags().AddFlagSet(GetFlagSetFor(cmd.Use)) + // glog flags, no better way to do this + _flag.PreventGlogVFlagFromClobberingVersionFlagShorthand(cmd.Flags()) + cmd.Flags().AddGoFlag(flag.Lookup("logtostderr")) + cmd.Flags().AddGoFlag(flag.Lookup("log_backtrace_at")) + cmd.Flags().AddGoFlag(flag.Lookup("alsologtostderr")) + cmd.Flags().AddGoFlag(flag.Lookup("stderrthreshold")) + cmd.Flags().AddGoFlag(flag.Lookup("log_dir")) + cmd.Flags().AddGoFlag(flag.Lookup("vmodule")) + + pflag.CommandLine = cmd.Flags() +} + +// CobraPreRunE returns the common function that commands will need to load +// viper infrastructure. It matches the signature of cobra's (Pre|Post)RunE-type +// functions. +func CobraPreRunE(cmd *cobra.Command, args []string) error { + _flag.TrickGlog() + + watchCancel, err := viperutil.LoadConfig() + if err != nil { + return fmt.Errorf("%s: failed to read in config: %s", cmd.Name(), err) + } + + OnTerm(watchCancel) + HTTPHandleFunc("/debug/config", viperdebug.HandlerFunc) + + logutil.PurgeLogs() + + return nil +} + // GetFlagSetFor returns the flag set for a given command. // This has to exported for the Vitess-operator to use func GetFlagSetFor(cmd string) *pflag.FlagSet { @@ -356,6 +416,8 @@ func GetFlagSetFor(cmd string) *pflag.FlagSet { func ParseFlagsWithArgs(cmd string) []string { fs := GetFlagSetFor(cmd) + viperutil.BindFlags(fs) + _flag.Parse(fs) if version { @@ -368,11 +430,24 @@ func ParseFlagsWithArgs(cmd string) []string { log.Exitf("%s expected at least one positional argument", cmd) } + loadViper(cmd) + logutil.PurgeLogs() return args } +func loadViper(cmd string) { + watchCancel, err := viperutil.LoadConfig() + if err != nil { + log.Exitf("%s: failed to read in config: %s", cmd, err.Error()) + } + OnTerm(watchCancel) + debugConfigRegisterOnce.Do(func() { + HTTPHandleFunc("/debug/config", viperdebug.HandlerFunc) + }) +} + // Flag installations for packages that servenv imports. We need to register // here rather than in those packages (which is what we would normally do) // because that would create a dependency cycle. @@ -400,7 +475,7 @@ func init() { "vtctld", "vtgate", "vtgateclienttest", - "vtgr", + "vtorc", "vttablet", "vttestserver", } { @@ -413,7 +488,6 @@ func init() { "vtcombo", "vtctld", "vtgate", - "vtgr", "vttablet", "vtorc", } { @@ -424,6 +498,8 @@ func init() { OnParse(log.RegisterFlags) // Flags in package logutil are installed for all binaries. OnParse(logutil.RegisterFlags) + // Flags in package viperutil/config are installed for all binaries. + OnParse(viperutil.RegisterFlags) } func RegisterFlagsForTopoBinaries(registerFlags func(fs *pflag.FlagSet)) { @@ -433,7 +509,6 @@ func RegisterFlagsForTopoBinaries(registerFlags func(fs *pflag.FlagSet)) { "vtctl", "vtctld", "vtgate", - "vtgr", "vttablet", "vttestserver", "zk", @@ -443,3 +518,10 @@ func RegisterFlagsForTopoBinaries(registerFlags func(fs *pflag.FlagSet)) { OnParseFor(cmd, registerFlags) } } + +// TestingEndtoend is true when this Vitess binary is being ran as part of an endtoend test suite +var TestingEndtoend = false + +func init() { + TestingEndtoend = os.Getenv("VTTEST") == "endtoend" +} diff --git a/go/vt/servenv/status.go b/go/vt/servenv/status.go index 409b2050181..ac912fd881e 100644 --- a/go/vt/servenv/status.go +++ b/go/vt/servenv/status.go @@ -19,8 +19,6 @@ package servenv import ( "bytes" "fmt" - "html" - "html/template" "io" "net" "net/http" @@ -32,6 +30,10 @@ import ( "sync" "time" + "github.com/google/safehtml" + "github.com/google/safehtml/template" + "github.com/google/safehtml/template/uncheckedconversions" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" ) @@ -165,12 +167,12 @@ func newStatusPage(name string) *statusPage { } sp.tmpl = template.Must(sp.reparse(nil)) if name == "" { - http.HandleFunc(StatusURLPath(), sp.statusHandler) + HTTPHandleFunc(StatusURLPath(), sp.statusHandler) // Debug profiles are only supported for the top level status page. registerDebugBlockProfileRate() registerDebugMutexProfileFraction() } else { - http.HandleFunc("/"+name+StatusURLPath(), sp.statusHandler) + HTTPHandleFunc("/"+name+StatusURLPath(), sp.statusHandler) } return sp } @@ -260,7 +262,7 @@ func (sp *statusPage) reparse(sections []section) (*template.Template, error) { io.WriteString(&buf, statusHTML) for i, sec := range sections { - fmt.Fprintf(&buf, "

%s

\n", html.EscapeString(sec.Banner)) + fmt.Fprintf(&buf, "

%s

\n", safehtml.HTMLEscaped(sec.Banner)) fmt.Fprintf(&buf, "{{$sec := index .Sections %d}}\n", i) fmt.Fprintf(&buf, `{{template "sec-%d" call $sec.F}}`+"\n", i) } @@ -270,12 +272,12 @@ func (sp *statusPage) reparse(sections []section) (*template.Template, error) { for i, sec := range sections { fmt.Fprintf(&buf, `{{define "sec-%d"}}%s{{end}}\n`, i, sec.Fragment) } - return template.New("").Funcs(sp.funcMap).Parse(buf.String()) + return template.New("").Funcs(sp.funcMap).ParseFromTrustedTemplate(uncheckedconversions.TrustedTemplateFromStringKnownToSatisfyTypeContract(buf.String())) } // Toggle the block profile rate to/from 100%, unless specific rate is passed in func registerDebugBlockProfileRate() { - http.HandleFunc("/debug/blockprofilerate", func(w http.ResponseWriter, r *http.Request) { + HTTPHandleFunc("/debug/blockprofilerate", func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { acl.SendError(w, err) return @@ -305,7 +307,7 @@ func registerDebugBlockProfileRate() { // Toggle the mutex profiling fraction to/from 100%, unless specific fraction is passed in func registerDebugMutexProfileFraction() { - http.HandleFunc("/debug/mutexprofilefraction", func(w http.ResponseWriter, r *http.Request) { + HTTPHandleFunc("/debug/mutexprofilefraction", func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { acl.SendError(w, err) return diff --git a/go/vt/servenv/status_test.go b/go/vt/servenv/status_test.go index 61ea1ad82e1..b020f9cfc83 100644 --- a/go/vt/servenv/status_test.go +++ b/go/vt/servenv/status_test.go @@ -17,15 +17,16 @@ limitations under the License. package servenv import ( - "html/template" "io" "net/http" - "net/http/httptest" "regexp" "strings" "testing" + "github.com/google/safehtml/template" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/servenv/testutils" ) func init() { @@ -43,7 +44,7 @@ func init() { } func TestStatus(t *testing.T) { - server := httptest.NewServer(nil) + server := testutils.HTTPTestServer() defer server.Close() resp, err := http.Get(server.URL + StatusURLPath()) @@ -68,7 +69,7 @@ func TestStatus(t *testing.T) { } func TestNamedStatus(t *testing.T) { - server := httptest.NewServer(nil) + server := testutils.HTTPTestServer() defer server.Close() name := "test" diff --git a/go/vt/servenv/testutils/testutils.go b/go/vt/servenv/testutils/testutils.go new file mode 100644 index 00000000000..de716bcd94e --- /dev/null +++ b/go/vt/servenv/testutils/testutils.go @@ -0,0 +1,28 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutils + +import ( + "net/http/httptest" + + "vitess.io/vitess/go/vt/servenv/internal/mux" +) + +// HTTPTestServer returns a httptest.Server for the internal servenv mux. +func HTTPTestServer() *httptest.Server { + return httptest.NewServer(mux.Mux) +} diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go index bedd2de8d26..710c0c36452 100644 --- a/go/vt/servenv/version.go +++ b/go/vt/servenv/version.go @@ -19,4 +19,4 @@ limitations under the License. package servenv -const versionName = "17.0.0-SNAPSHOT" +const versionName = "18.0.0-SNAPSHOT" diff --git a/go/vt/sidecardb/identifier_cache.go b/go/vt/sidecardb/identifier_cache.go new file mode 100644 index 00000000000..002d8750ba1 --- /dev/null +++ b/go/vt/sidecardb/identifier_cache.go @@ -0,0 +1,131 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecardb + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "vitess.io/vitess/go/constants/sidecar" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" +) + +// IdentifierCache provides a read through cache of sidecar database +// identifiers; loading the values from an opaque backend database +// using a provided load function. +type IdentifierCache struct { + // Lazily loaded cache of sidecar database identifiers by keyspace. + // The key is a keyspace name string and the val is an sqlparser + // string built from an IdentifierCS using the sidecar database + // name stored in the backend database read in the provided load + // function. + sidecarDBIdentifiers sync.Map + + // The callback used to load the values from the database into + // the cache. + load func(context.Context, string) (string, error) +} + +const ( + errIdentifierCacheUninitialized = "sidecar database identifier cache is not initialized" + errIdentifierCacheNoLoadFunction = "the load from database function has not been set" + identifierCacheLoadTimeout = 30 * time.Second +) + +var identifierCache atomic.Value // *IdentifierCache singleton + +// NewIdentifierCache returns an initialized cache. This is a +// singleton so if you call New multiple times you will get the +// same instance. If the cache has already been initialized then +// it will return false indicating that your New call did not +// create a new instance. +func NewIdentifierCache(loadFunc func(context.Context, string) (string, error)) (*IdentifierCache, bool) { + created := identifierCache.CompareAndSwap(nil, &IdentifierCache{ + load: loadFunc, + sidecarDBIdentifiers: sync.Map{}, + }) + return identifierCache.Load().(*IdentifierCache), created +} + +func GetIdentifierCache() (*IdentifierCache, error) { + if identifierCache.Load() == nil { + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, errIdentifierCacheUninitialized) + } + return identifierCache.Load().(*IdentifierCache), nil +} + +// Get returns an sqlparser string built from an IdentifierCS using +// the sidecar database name stored in the database. This provides a +// read through cache. +func (ic *IdentifierCache) Get(keyspace string) (string, error) { + if ic.load == nil { + return "", vterrors.New(vtrpcpb.Code_INTERNAL, errIdentifierCacheNoLoadFunction) + } + sdbid, ok := ic.sidecarDBIdentifiers.Load(keyspace) + if !ok || sdbid == nil || sdbid == "" { + ctx, cancel := context.WithTimeout(context.Background(), identifierCacheLoadTimeout) + defer cancel() + + sdbname, err := ic.load(ctx, keyspace) + if err != nil { + return "", err + } + if sdbname == "" { + sdbname = sidecar.DefaultName + } + + sdbid = sqlparser.String(sqlparser.NewIdentifierCS(sdbname)) + ic.sidecarDBIdentifiers.Store(keyspace, sdbid) + } + return sdbid.(string), nil +} + +// GetIdentifierForKeyspace is a convenience function -- combining +// GetIdentifierCache() and IdentifierCache.Get(keyspace) -- which +// returns the sidecar database identifier as an sqlparser string +// for the provided keyspace. +func GetIdentifierForKeyspace(keyspace string) (string, error) { + cache, err := GetIdentifierCache() + if err != nil { + return "", err + } + return cache.Get(keyspace) +} + +// Delete removes an entry from the cache. It is idempotent and +// will always delete the entry IF it exists. +func (ic *IdentifierCache) Delete(keyspace string) { + ic.sidecarDBIdentifiers.Delete(keyspace) +} + +// Clear empties out the cache. +func (ic *IdentifierCache) Clear() { + ic.sidecarDBIdentifiers = sync.Map{} +} + +// Destroy clears the existing cache and sets the singleton instance +// to nil so that a new cache can be created. +// NOTE: this should ONLY be used in unit tests and NOT in production +// as it breaks the singleton pattern! +func (ic *IdentifierCache) Destroy() { + ic.Clear() + identifierCache = atomic.Value{} +} diff --git a/go/vt/sidecardb/identifier_cache_test.go b/go/vt/sidecardb/identifier_cache_test.go new file mode 100644 index 00000000000..2c4a16d1ced --- /dev/null +++ b/go/vt/sidecardb/identifier_cache_test.go @@ -0,0 +1,168 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecardb + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func TestIdentifierCache(t *testing.T) { + sidecarDBIdentifierMap := map[string]string{} + loadFunc := func(ctx context.Context, keyspace string) (string, error) { + val, ok := sidecarDBIdentifierMap[keyspace] + if !ok { + return "", fmt.Errorf("keyspace %s not found", keyspace) + } + return val, nil + } + // Test using the cache before it's been initialized + cache, err := GetIdentifierCache() + require.Error(t, err) + require.Nil(t, cache) + require.Equal(t, err.Error(), errIdentifierCacheUninitialized) + // Create the cache to use for lookups of the sidecar database + // identifier in use by each keyspace. + var created bool + cache, created = NewIdentifierCache(loadFunc) + require.True(t, created) + var emptyErr error + tests := []struct { + name string + keyspace string + sidecardbname string + preHook func() error + postHook func() error + want string + wantErr error + }{ + { + name: "calling New twice should return the same instance", + preHook: func() error { + newcache, created := NewIdentifierCache(loadFunc) // should work fine + require.False(t, created) + if newcache != cache { + return fmt.Errorf("cache should be singleton") + } + return nil + }, + keyspace: "ks1", + sidecardbname: "_vt", + want: "_vt", + }, + { + name: "keyspace doesn't exist", + keyspace: "ks2", + preHook: func() error { + delete(sidecarDBIdentifierMap, "ks2") + return nil + }, + wantErr: errors.New("keyspace ks2 not found"), + }, + { + name: "uninitialized load func", + keyspace: "ks3", + preHook: func() error { + cache.load = nil + return nil + }, + postHook: func() error { + cache.load = loadFunc + return nil + }, + wantErr: vterrors.New(vtrpcpb.Code_INTERNAL, errIdentifierCacheNoLoadFunction), + }, + { + name: "delete keyspace", + keyspace: "ksdel", + preHook: func() error { + cache.Delete("ksdel") // delete from the cache so we re-load + delete(sidecarDBIdentifierMap, "ksdel") // delete from the backing database + return nil + }, + wantErr: errors.New("keyspace ksdel not found"), + }, + { + name: "clear cache", + keyspace: "ksalldel", + preHook: func() error { + cache.Clear() // clear the cache so we re-load + sidecarDBIdentifierMap = map[string]string{} // clear the backing database + return nil + }, + postHook: func() error { + // Make sure previous entries are also now gone + _, err := cache.Get("ks1") + require.Equal(t, err, errors.New("keyspace ks1 not found")) + _, err = cache.Get("ks3") + require.Equal(t, err, errors.New("keyspace ks3 not found")) + return nil + }, + wantErr: errors.New("keyspace ksalldel not found"), + }, + { + name: "sidecar database name that needs escaping", + keyspace: "ks4", + sidecardbname: "_vt-test", + want: "`_vt-test`", + }, + { + name: "destroy cache and create a new one", + keyspace: "ks5", + preHook: func() error { + cache.Destroy() // clears the cache and will require a re-load + delete(sidecarDBIdentifierMap, "ks5") // delete from the backing database + newcache, created := NewIdentifierCache(loadFunc) + require.True(t, created) + if newcache == cache { + return fmt.Errorf("cache should have been destroyed") + } + cache = newcache + return nil + }, + wantErr: errors.New("keyspace ks5 not found"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sidecarDBIdentifierMap[tt.keyspace] = tt.sidecardbname + if tt.preHook != nil { + err := tt.preHook() + require.NoError(t, err) + } + got, err := cache.Get(tt.keyspace) + if tt.postHook != nil { + err := tt.postHook() + require.NoError(t, err) + } + if tt.wantErr != emptyErr && (err == nil || tt.wantErr.Error() != err.Error()) { + t.Errorf("cache.Get() produced error: %v, wanted error: %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("cache.Get() returned: %v, wanted: %v", got, tt.want) + } + }) + } +} diff --git a/go/vt/sidecardb/schema/misc/heartbeat.sql b/go/vt/sidecardb/schema/misc/heartbeat.sql index cacd80529b5..35668f2c0ab 100644 --- a/go/vt/sidecardb/schema/misc/heartbeat.sql +++ b/go/vt/sidecardb/schema/misc/heartbeat.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.heartbeat +CREATE TABLE IF NOT EXISTS heartbeat ( keyspaceShard VARBINARY(256) NOT NULL, tabletUid INT UNSIGNED NOT NULL, diff --git a/go/vt/sidecardb/schema/misc/reparent_journal.sql b/go/vt/sidecardb/schema/misc/reparent_journal.sql index 74534f57098..81e47c69dc5 100644 --- a/go/vt/sidecardb/schema/misc/reparent_journal.sql +++ b/go/vt/sidecardb/schema/misc/reparent_journal.sql @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.reparent_journal +CREATE TABLE IF NOT EXISTS reparent_journal ( `time_created_ns` bigint(20) unsigned NOT NULL, - `action_name` varbinary(250) NOT NULL, - `primary_alias` varbinary(32) NOT NULL, + `action_name` varbinary(255) NOT NULL, + `primary_alias` varbinary(255) NOT NULL, `replication_position` varbinary(64000) DEFAULT NULL, PRIMARY KEY (`time_created_ns`) diff --git a/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql index 7e3f64ba185..60cd4abcefa 100644 --- a/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql +++ b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.schema_migrations +CREATE TABLE IF NOT EXISTS schema_migrations ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `migration_uuid` varchar(64) NOT NULL, @@ -69,6 +69,7 @@ CREATE TABLE IF NOT EXISTS _vt.schema_migrations `cutover_attempts` int unsigned NOT NULL DEFAULT '0', `is_immediate_operation` tinyint unsigned NOT NULL DEFAULT '0', `reviewed_timestamp` timestamp NULL DEFAULT NULL, + `ready_to_complete_timestamp` timestamp NULL DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `uuid_idx` (`migration_uuid`), KEY `keyspace_shard_idx` (`keyspace`(64), `shard`(64)), diff --git a/go/cmd/topo2topo/plugin_kubernetestopo.go b/go/vt/sidecardb/schema/schemaengine/tables.sql similarity index 66% rename from go/cmd/topo2topo/plugin_kubernetestopo.go rename to go/vt/sidecardb/schema/schemaengine/tables.sql index 671d0c8321f..00fd0194d67 100644 --- a/go/cmd/topo2topo/plugin_kubernetestopo.go +++ b/go/vt/sidecardb/schema/schemaengine/tables.sql @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,10 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) +CREATE TABLE IF NOT EXISTS tables +( + TABLE_SCHEMA varchar(64) NOT NULL, + TABLE_NAME varchar(64) NOT NULL, + CREATE_STATEMENT longtext, + CREATE_TIME BIGINT, + PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) +) engine = InnoDB diff --git a/go/vt/sidecardb/schema/misc/views.sql b/go/vt/sidecardb/schema/schemaengine/views.sql similarity index 80% rename from go/vt/sidecardb/schema/misc/views.sql rename to go/vt/sidecardb/schema/schemaengine/views.sql index b70d9bb41df..1fee077202f 100644 --- a/go/vt/sidecardb/schema/misc/views.sql +++ b/go/vt/sidecardb/schema/schemaengine/views.sql @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.views +CREATE TABLE IF NOT EXISTS views ( TABLE_SCHEMA varchar(64) NOT NULL, TABLE_NAME varchar(64) NOT NULL, - CREATE_STATEMENT longtext NOT NULL, - UPDATED_AT TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + CREATE_STATEMENT longtext, + VIEW_DEFINITION longtext NOT NULL, PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) ) engine = InnoDB diff --git a/go/vt/sidecardb/schema/schematracker/schemacopy.sql b/go/vt/sidecardb/schema/schematracker/schemacopy.sql index 95cd7c34f3f..296bb34df14 100644 --- a/go/vt/sidecardb/schema/schematracker/schemacopy.sql +++ b/go/vt/sidecardb/schema/schematracker/schemacopy.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.schemacopy +CREATE TABLE IF NOT EXISTS schemacopy ( `table_schema` varchar(64) NOT NULL, `table_name` varchar(64) NOT NULL, diff --git a/go/vt/sidecardb/schema/twopc/dt_participant.sql b/go/vt/sidecardb/schema/twopc/dt_participant.sql index 66ff4bda987..9f2408497eb 100644 --- a/go/vt/sidecardb/schema/twopc/dt_participant.sql +++ b/go/vt/sidecardb/schema/twopc/dt_participant.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.dt_participant +CREATE TABLE IF NOT EXISTS dt_participant ( dtid varbinary(512) NOT NULL, id bigint NOT NULL, diff --git a/go/vt/sidecardb/schema/twopc/dt_state.sql b/go/vt/sidecardb/schema/twopc/dt_state.sql index e877a31a75f..dff9e4c3770 100644 --- a/go/vt/sidecardb/schema/twopc/dt_state.sql +++ b/go/vt/sidecardb/schema/twopc/dt_state.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.dt_state +CREATE TABLE IF NOT EXISTS dt_state ( dtid varbinary(512) NOT NULL, state bigint NOT NULL, diff --git a/go/vt/sidecardb/schema/twopc/redo_state.sql b/go/vt/sidecardb/schema/twopc/redo_state.sql index a1122b0ac8f..7e583d7fdcd 100644 --- a/go/vt/sidecardb/schema/twopc/redo_state.sql +++ b/go/vt/sidecardb/schema/twopc/redo_state.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.redo_state( +CREATE TABLE IF NOT EXISTS redo_state( dtid varbinary(512) NOT NULL, state bigint NOT NULL, time_created bigint NOT NULL, diff --git a/go/vt/sidecardb/schema/twopc/redo_statement.sql b/go/vt/sidecardb/schema/twopc/redo_statement.sql index 148cc0bb3c0..9208a0fce65 100644 --- a/go/vt/sidecardb/schema/twopc/redo_statement.sql +++ b/go/vt/sidecardb/schema/twopc/redo_statement.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.redo_statement( +CREATE TABLE IF NOT EXISTS redo_statement( dtid varbinary(512) NOT NULL, id bigint NOT NULL, statement mediumblob NOT NULL, diff --git a/go/vt/sidecardb/schema/vdiff/vdiff.sql b/go/vt/sidecardb/schema/vdiff/vdiff.sql index 24f5cf6e7ab..5eae9270460 100644 --- a/go/vt/sidecardb/schema/vdiff/vdiff.sql +++ b/go/vt/sidecardb/schema/vdiff/vdiff.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.vdiff +CREATE TABLE IF NOT EXISTS vdiff ( `id` bigint(20) NOT NULL AUTO_INCREMENT, `vdiff_uuid` varchar(64) NOT NULL, diff --git a/go/vt/sidecardb/schema/vdiff/vdiff_log.sql b/go/vt/sidecardb/schema/vdiff/vdiff_log.sql index 2935baf9b24..dbc110e5b3a 100644 --- a/go/vt/sidecardb/schema/vdiff/vdiff_log.sql +++ b/go/vt/sidecardb/schema/vdiff/vdiff_log.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.vdiff_log +CREATE TABLE IF NOT EXISTS vdiff_log ( `id` int(11) NOT NULL AUTO_INCREMENT, `vdiff_id` int(11) NOT NULL, diff --git a/go/vt/sidecardb/schema/vdiff/vdiff_table.sql b/go/vt/sidecardb/schema/vdiff/vdiff_table.sql index 81f0ba17599..580f1ba96ee 100644 --- a/go/vt/sidecardb/schema/vdiff/vdiff_table.sql +++ b/go/vt/sidecardb/schema/vdiff/vdiff_table.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.vdiff_table +CREATE TABLE IF NOT EXISTS vdiff_table ( `vdiff_id` varchar(64) NOT NULL, `table_name` varbinary(128) NOT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/copy_state.sql b/go/vt/sidecardb/schema/vreplication/copy_state.sql index f7005135aba..8f27bc9dc86 100644 --- a/go/vt/sidecardb/schema/vreplication/copy_state.sql +++ b/go/vt/sidecardb/schema/vreplication/copy_state.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.copy_state +CREATE TABLE IF NOT EXISTS copy_state ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `vrepl_id` int NOT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/post_copy_action.sql b/go/vt/sidecardb/schema/vreplication/post_copy_action.sql index 8ca979fc15d..85bb44923b0 100644 --- a/go/vt/sidecardb/schema/vreplication/post_copy_action.sql +++ b/go/vt/sidecardb/schema/vreplication/post_copy_action.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.post_copy_action( +CREATE TABLE IF NOT EXISTS post_copy_action( id BIGINT NOT NULL auto_increment, vrepl_id INT NOT NULL, table_name VARBINARY(128) NOT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/resharding_journal.sql b/go/vt/sidecardb/schema/vreplication/resharding_journal.sql index b5b960c92aa..5a3dbd64890 100644 --- a/go/vt/sidecardb/schema/vreplication/resharding_journal.sql +++ b/go/vt/sidecardb/schema/vreplication/resharding_journal.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.resharding_journal +CREATE TABLE IF NOT EXISTS resharding_journal ( `id` bigint NOT NULL, `db_name` varbinary(255) DEFAULT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/schema_version.sql b/go/vt/sidecardb/schema/vreplication/schema_version.sql index 86f782ddac1..2b7cbc08dec 100644 --- a/go/vt/sidecardb/schema/vreplication/schema_version.sql +++ b/go/vt/sidecardb/schema/vreplication/schema_version.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.schema_version +CREATE TABLE IF NOT EXISTS schema_version ( id INT NOT NULL AUTO_INCREMENT, pos VARBINARY(10000) NOT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/vreplication.sql b/go/vt/sidecardb/schema/vreplication/vreplication.sql index 3b30d1250c9..ce9badfd98f 100644 --- a/go/vt/sidecardb/schema/vreplication/vreplication.sql +++ b/go/vt/sidecardb/schema/vreplication/vreplication.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.vreplication +CREATE TABLE IF NOT EXISTS vreplication ( `id` int NOT NULL AUTO_INCREMENT, `workflow` varbinary(1000) DEFAULT NULL, diff --git a/go/vt/sidecardb/schema/vreplication/vreplication_log.sql b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql index 6700ede3c47..175e6db2bce 100644 --- a/go/vt/sidecardb/schema/vreplication/vreplication_log.sql +++ b/go/vt/sidecardb/schema/vreplication/vreplication_log.sql @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -CREATE TABLE IF NOT EXISTS _vt.vreplication_log +CREATE TABLE IF NOT EXISTS vreplication_log ( `id` bigint NOT NULL AUTO_INCREMENT, `vrepl_id` int NOT NULL, diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index 3d955995a6a..0bb64611607 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -27,8 +27,9 @@ import ( "strings" "sync" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/history" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql/fakesqldb" @@ -44,21 +45,43 @@ import ( ) const ( - SidecarDBName = "_vt" - CreateSidecarDatabaseQuery = "create database if not exists _vt" - UseSidecarDatabaseQuery = "use _vt" - ShowSidecarDatabasesQuery = "SHOW DATABASES LIKE '\\_vt'" - SelectCurrentDatabaseQuery = "select database()" - ShowCreateTableQuery = "show create table _vt.%s" - - CreateTableRegexp = "CREATE TABLE .* `\\_vt`\\..*" - AlterTableRegexp = "ALTER TABLE `\\_vt`\\..*" + sidecarDBExistsQuery = "select 'true' as 'dbexists' from information_schema.SCHEMATA where SCHEMA_NAME = %a" + showCreateTableQuery = "show create table %s.%s" + + maxDDLErrorHistoryLength = 100 + + // failOnSchemaInitError decides whether we fail the schema init process when we encounter an error while + // applying a table schema upgrade DDL or continue with the next table. + // If true, tablets will not launch. The cluster will not come up until the issue is resolved. + // If false, the init process will continue trying to upgrade other tables. So some functionality might be broken + // due to an incorrect schema, but the cluster should come up and serve queries. + // This is an operational trade-off: if we always fail it could cause a major incident since the entire cluster will be down. + // If we are more permissive, it could cause hard-to-detect errors, because a module + // doesn't load or behaves incorrectly due to an incomplete upgrade. Errors however will be reported and if the + // related stats endpoints are monitored we should be able to diagnose/get alerted in a timely fashion. + failOnSchemaInitError = false + + StatsKeyPrefix = "SidecarDBDDL" + StatsKeyQueryCount = StatsKeyPrefix + "QueryCount" + StatsKeyErrorCount = StatsKeyPrefix + "ErrorCount" + StatsKeyErrors = StatsKeyPrefix + "Errors" ) -// All tables needed in the sidecar database have their schema in the schema subdirectory. -// -//go:embed schema/* -var schemaLocation embed.FS +var ( + sidecarTables []*sidecarTable + + // All tables needed in the sidecar database have + // their schema in the schema subdirectory. + //go:embed schema/* + schemaLocation embed.FS + // Load the schema definitions one time. + once sync.Once + + ddlCount *stats.Counter + ddlErrorCount *stats.Counter + ddlErrorHistory *history.History + mu sync.Mutex +) type sidecarTable struct { module string // which module uses this table @@ -67,41 +90,12 @@ type sidecarTable struct { schema string // create table dml } -func (t *sidecarTable) String() string { - return fmt.Sprintf("%s.%s (%s)", SidecarDBName, t.name, t.module) -} - -var sidecarTables []*sidecarTable -var ddlCount *stats.Counter -var ddlErrorCount *stats.Counter -var ddlErrorHistory *history.History -var mu sync.Mutex - type ddlError struct { tableName string err error } -const maxDDLErrorHistoryLength = 100 - -// failOnSchemaInitError decides whether we fail the schema init process when we encounter an error while -// applying a table schema upgrade DDL or continue with the next table. -// If true, tablets will not launch. The cluster will not come up until the issue is resolved. -// If false, the init process will continue trying to upgrade other tables. So some functionality might be broken -// due to an incorrect schema, but the cluster should come up and serve queries. -// This is an operational trade-off: if we always fail it could cause a major incident since the entire cluster will be down. -// If we are more permissive, it could cause hard-to-detect errors, because a module -// doesn't load or behaves incorrectly due to an incomplete upgrade. Errors however will be reported and if the -// related stats endpoints are monitored we should be able to diagnose/get alerted in a timely fashion. -const failOnSchemaInitError = false - -const StatsKeyPrefix = "SidecarDBDDL" -const StatsKeyQueryCount = StatsKeyPrefix + "QueryCount" -const StatsKeyErrorCount = StatsKeyPrefix + "ErrorCount" -const StatsKeyErrors = StatsKeyPrefix + "Errors" - func init() { - initSchemaFiles() ddlCount = stats.NewCounter(StatsKeyQueryCount, "Number of queries executed") ddlErrorCount = stats.NewCounter(StatsKeyErrorCount, "Number of errors during sidecar schema upgrade") ddlErrorHistory = history.New(maxDDLErrorHistoryLength) @@ -130,11 +124,12 @@ func validateSchemaDefinition(name, schema string) (string, error) { return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected CREATE TABLE. Got %v", sqlparser.CanonicalString(stmt)) } tableName := createTable.Table.Name.String() + // The database qualifier should be configured externally. qualifier := createTable.Table.Qualifier.String() - if qualifier != SidecarDBName { - return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "database qualifier specified for the %s table is %s rather than the expected value of %s", - name, qualifier, SidecarDBName) + if qualifier != "" { + return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "database qualifier of %s specified for the %s table when there should not be one", qualifier, name) } + createTable.Table.Qualifier = sqlparser.NewIdentifierCS(sidecar.GetName()) if !strings.EqualFold(tableName, name) { return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table name of %s does not match the table name specified within the file: %s", name, tableName) } @@ -145,7 +140,9 @@ func validateSchemaDefinition(name, schema string) (string, error) { return normalizedSchema, nil } -func initSchemaFiles() { +// loadSchemaDefinitions loads the embedded schema definitions +// into a slice of sidecarTables for processing. +func loadSchemaDefinitions() { sqlFileExtension := ".sql" err := fs.WalkDir(schemaLocation, ".", func(path string, entry fs.DirEntry, err error) error { if err != nil { @@ -161,9 +158,9 @@ func initSchemaFiles() { dirparts := strings.Split(strings.Trim(dir, "/"), "/") switch len(dirparts) { case 1: - module = dir + module = dirparts[0] case 2: - module = fmt.Sprintf("%s/%s", dirparts[0], dirparts[1]) + module = dirparts[1] default: return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected path value of %s specified for sidecar schema table; expected structure is [/]/.sql", dir) } @@ -191,32 +188,33 @@ func printCallerDetails() { pc, _, line, ok := runtime.Caller(2) details := runtime.FuncForPC(pc) if ok && details != nil { - log.Infof("%s schema init called from %s:%d\n", SidecarDBName, details.Name(), line) + log.Infof("%s schema init called from %s:%d\n", sidecar.GetName(), details.Name(), line) } } type schemaInit struct { - ctx context.Context - exec Exec - existingTables map[string]bool - dbCreated bool // The first upgrade/create query will also create the sidecar database if required. + ctx context.Context + exec Exec + dbCreated bool // The first upgrade/create query will also create the sidecar database if required. } -// Exec is a callback that has to be passed to Init() to execute the specified query in the database. +// Exec is a callback that has to be passed to Init() to +// execute the specified query within the database. type Exec func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) -// GetDDLCount returns the count of sidecardb DDLs that have been run as part of this vttablet's init process. -func GetDDLCount() int64 { +// GetDDLCount metric returns the count of sidecardb DDLs that +// have been run as part of this vttablet's init process. +func getDDLCount() int64 { return ddlCount.Get() } // GetDDLErrorCount returns the count of sidecardb DDLs that have been errored out as part of this vttablet's init process. -func GetDDLErrorCount() int64 { +func getDDLErrorCount() int64 { return ddlErrorCount.Get() } // GetDDLErrorHistory returns the errors encountered as part of this vttablet's init process.. -func GetDDLErrorHistory() []*ddlError { +func getDDLErrorHistory() []*ddlError { var errors []*ddlError for _, e := range ddlErrorHistory.Records() { ddle, ok := e.(*ddlError) @@ -227,17 +225,24 @@ func GetDDLErrorHistory() []*ddlError { return errors } -// Init creates or upgrades the sidecar database based on declarative schema for all tables in the schema. +// Init creates or upgrades the sidecar database based on +// the declarative schema defined for all tables. func Init(ctx context.Context, exec Exec) error { printCallerDetails() // for debug purposes only, remove in v17 log.Infof("Starting sidecardb.Init()") + + once.Do(loadSchemaDefinitions) + si := &schemaInit{ ctx: ctx, exec: exec, } - // There are paths in the tablet initialization where we are in read-only mode but the schema is already updated. - // Hence, we should not always try to create the database, since it will then error out as the db is read-only. + // There are paths in the tablet initialization where we + // are in read-only mode but the schema is already updated. + // Hence, we should not always try to CREATE the + // database, since it will then error out as the instance + // is read-only. dbExists, err := si.doesSidecarDBExist() if err != nil { return err @@ -249,7 +254,7 @@ func Init(ctx context.Context, exec Exec) error { si.dbCreated = true } - if _, err := si.setCurrentDatabase(SidecarDBName); err != nil { + if err := si.setCurrentDatabase(sidecar.GetIdentifier()); err != nil { return err } @@ -293,7 +298,11 @@ func (si *schemaInit) setPermissiveSQLMode() (func(), error) { } func (si *schemaInit) doesSidecarDBExist() (bool, error) { - rs, err := si.exec(si.ctx, ShowSidecarDatabasesQuery, 2, false) + query, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) + if err != nil { + return false, err + } + rs, err := si.exec(si.ctx, query, 2, false) if err != nil { log.Error(err) return false, err @@ -301,53 +310,44 @@ func (si *schemaInit) doesSidecarDBExist() (bool, error) { switch len(rs.Rows) { case 0: - log.Infof("doesSidecarDBExist: not found") + log.Infof("doesSidecarDBExist: %s not found", sidecar.GetName()) return false, nil case 1: - log.Infof("doesSidecarDBExist: found") + log.Infof("doesSidecarDBExist: found %s", sidecar.GetName()) return true, nil default: - log.Errorf("found too many rows for sidecarDB %s: %d", SidecarDBName, len(rs.Rows)) - return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "found too many rows for sidecarDB %s: %d", SidecarDBName, len(rs.Rows)) + // This should never happen. + return false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid results for SidecarDB query %q as it produced %d rows", query, len(rs.Rows)) } } func (si *schemaInit) createSidecarDB() error { - _, err := si.exec(si.ctx, CreateSidecarDatabaseQuery, 1, false) + _, err := si.exec(si.ctx, sidecar.GetCreateQuery(), 1, false) if err != nil { log.Error(err) return err } - log.Infof("createSidecarDB: %s", CreateSidecarDatabaseQuery) + log.Infof("createSidecarDB: %s", sidecar.GetName()) return nil } -// Sets db of current connection, returning the currently selected database. -func (si *schemaInit) setCurrentDatabase(dbName string) (string, error) { - rs, err := si.exec(si.ctx, SelectCurrentDatabaseQuery, 1, false) - if err != nil { - return "", err - } - if rs == nil || rs.Rows == nil { // we get this in tests - return "", nil - } - currentDB := rs.Rows[0][0].ToString() - if currentDB != "" { // while running tests we can get currentDB as empty - _, err = si.exec(si.ctx, fmt.Sprintf("use %s", dbName), 1, false) - if err != nil { - return "", err - } - } - return currentDB, nil +// Sets the current db in the used connection. +func (si *schemaInit) setCurrentDatabase(dbName string) error { + _, err := si.exec(si.ctx, fmt.Sprintf("use %s", dbName), 1, false) + return err } // Gets existing schema of a table in the sidecar database. func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { var currentTableSchema string - rs, err := si.exec(si.ctx, fmt.Sprintf(ShowCreateTableQuery, tableName), 1, false) + // We escape the tableName because it can be a keyword. + // Converting the tableName to a case-sensitive identifier and converting back to a string using the + // sqlparser package, ensures that the table name is escaped with backticks if required. + escapedTableName := sqlparser.String(sqlparser.NewIdentifierCS(tableName)) + rs, err := si.exec(si.ctx, sqlparser.BuildParsedQuery(showCreateTableQuery, sidecar.GetIdentifier(), escapedTableName).Query, 1, false) if err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERNoSuchTable { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERNoSuchTable { // table does not exist in the sidecar database return "", nil } @@ -360,8 +360,11 @@ func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { return currentTableSchema, nil } -// findTableSchemaDiff gets the diff that needs to be applied to current table schema to get the desired one. Will be an empty string if they match. -// This could be a CREATE statement if the table does not exist or an ALTER if table exists but has a different schema. +// findTableSchemaDiff gets the diff which needs to be applied +// to the current table schema in order toreach the desired one. +// The result will be an empty string if they match. +// This will be a CREATE statement if the table does not exist +// or an ALTER if the table exists but has a different schema. func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (string, error) { hints := &schemadiff.DiffHints{ TableCharsetCollateStrategy: schemadiff.TableCharsetCollateIgnoreAlways, @@ -376,8 +379,6 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s if diff != nil { ddl = diff.CanonicalStatementString() - // Temporary logging to debug any eventual issues around the new schema init, should be removed in v17. - log.Infof("Current schema for table %s:\n%s", tableName, current) if ddl == "" { log.Infof("No changes needed for table %s", tableName) } else { @@ -388,9 +389,9 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s return ddl, nil } -// ensureSchema first checks if the table exist, in which case it runs the create script provided in -// the schema directory. If the table exists, schemadiff is used to compare the existing schema with the desired one. -// If it needs to be altered then we run the alter script. +// ensureSchema uses schemadiff to compare the live schema +// with the desired one and applies any DDL statements +// necessary to converge on the desired schema. func (si *schemaInit) ensureSchema(table *sidecarTable) error { ctx := si.ctx desiredTableSchema := table.schema @@ -407,9 +408,12 @@ func (si *schemaInit) ensureSchema(table *sidecarTable) error { if ddl != "" { if !si.dbCreated { - // We use CreateSidecarDatabaseQuery to also create the first binlog entry when a primary comes up. - // That statement doesn't make it to the replicas, so we run the query again so that it is replicated - // to the replicas so that the replicas can create the sidecar database. + // We use createSidecarDB to also create the + // first binlog entry when a primary comes up. + // That statement doesn't make it to the + // replicas, so we run the query again so that + // it is replicated to the replicas so that the + // replicas can create the sidecar database. if err := si.createSidecarDB(); err != nil { return err } @@ -429,7 +433,7 @@ func (si *schemaInit) ensureSchema(table *sidecarTable) error { ddlCount.Add(1) return nil } - log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, SidecarDBName) + log.Infof("Table schema was already up to date for the %s table in the %s sidecar database", table.name, sidecar.GetName()) return nil } @@ -442,32 +446,29 @@ func recordDDLError(tableName string, err error) { }) } -// region unit-test-only -// This section uses helpers used in tests, but also in the go/vt/vtexplain/vtexplain_vttablet.go. -// Hence, it is here and not in the _test.go file. - -// Query patterns to handle in mocks. -var sidecarDBInitQueries = []string{ - ShowSidecarDatabasesQuery, - SelectCurrentDatabaseQuery, - CreateSidecarDatabaseQuery, - UseSidecarDatabaseQuery, +func (t *sidecarTable) String() string { + return fmt.Sprintf("%s.%s (%s)", sidecar.GetIdentifier(), sqlparser.String(sqlparser.NewIdentifierCS(t.name)), t.module) } -var sidecarDBInitQueryPatterns = []string{ - CreateTableRegexp, - AlterTableRegexp, -} +// region unit-test-only +// This section uses helpers used in tests, but also in +// go/vt/vtexplain/vtexplain_vttablet.go. +// Hence, it is here and not in the _test.go file. -// AddSchemaInitQueries adds sidecar database schema related queries to a mock db. +// AddSchemaInitQueries adds sidecar database schema related +// queries to a mock db. +// This is for unit tests only! func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { + once.Do(loadSchemaDefinitions) result := &sqltypes.Result{} - for _, q := range sidecarDBInitQueryPatterns { + for _, q := range sidecar.DBInitQueryPatterns { db.AddQueryPattern(q, result) } - for _, q := range sidecarDBInitQueries { - db.AddQuery(q, result) + for _, q := range sidecar.DBInitQueries { + db.AddQuery(sqlparser.BuildParsedQuery(q, sidecar.GetIdentifier()).Query, result) } + sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) + db.AddQuery(sdbe, result) for _, table := range sidecarTables { result = &sqltypes.Result{} if populateTables { @@ -477,7 +478,8 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { fmt.Sprintf("%s|%s", table.name, table.schema), ) } - db.AddQuery(fmt.Sprintf(ShowCreateTableQuery, table.name), result) + db.AddQuery(sqlparser.BuildParsedQuery(showCreateTableQuery, sidecar.GetIdentifier(), + sqlparser.String(sqlparser.NewIdentifierCS(table.name))).Query, result) } sqlModeResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields( @@ -490,15 +492,21 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { db.AddQuery("set @@session.sql_mode=''", &sqltypes.Result{}) } -// MatchesInitQuery returns true if query has one of the test patterns as a substring, or it matches a provided regexp. +// MatchesInitQuery returns true if the query has one of the +// test patterns as a substring or it matches a provided regexp. +// This is for unit tests only! func MatchesInitQuery(query string) bool { query = strings.ToLower(query) - for _, q := range sidecarDBInitQueries { - if strings.EqualFold(q, query) { + for _, q := range sidecar.DBInitQueries { + if strings.EqualFold(sqlparser.BuildParsedQuery(q, sidecar.GetIdentifier()).Query, query) { return true } } - for _, q := range sidecarDBInitQueryPatterns { + sdbe, _ := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) + if strings.EqualFold(sdbe, query) { + return true + } + for _, q := range sidecar.DBInitQueryPatterns { q = strings.ToLower(q) if strings.Contains(query, q) { return true diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go index d7d1f2ed650..22147c960e9 100644 --- a/go/vt/sidecardb/sidecardb_test.go +++ b/go/vt/sidecardb/sidecardb_test.go @@ -24,6 +24,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/require" @@ -41,14 +42,6 @@ func TestInitErrors(t *testing.T) { db := fakesqldb.New(t) defer db.Close() AddSchemaInitQueries(db, false) - db.AddQuery("use dbname", &sqltypes.Result{}) - sqlMode := sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "sql_mode", - "varchar"), - "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION", - ) - db.AddQuery("select @@session.sql_mode as sql_mode", sqlMode) - db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{}) ddlErrorCount.Set(0) ddlCount.Set(0) @@ -70,7 +63,7 @@ func TestInitErrors(t *testing.T) { exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - if _, err := conn.ExecuteFetch(UseSidecarDatabaseQuery, maxRows, true); err != nil { + if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), maxRows, true); err != nil { return nil, err } } @@ -91,11 +84,11 @@ func TestInitErrors(t *testing.T) { return conn.ExecuteFetch(query, maxRows, true) } - require.Equal(t, int64(0), GetDDLCount()) + require.Equal(t, int64(0), getDDLCount()) err = Init(ctx, exec) require.NoError(t, err) - require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), GetDDLCount()) - require.Equal(t, int64(len(schemaErrors)), GetDDLErrorCount()) + require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), getDDLCount()) + require.Equal(t, int64(len(schemaErrors)), getDDLErrorCount()) var want []string for _, e := range schemaErrors { @@ -103,7 +96,7 @@ func TestInitErrors(t *testing.T) { } // sort expected and reported errors for easy comparison sort.Strings(want) - got := GetDDLErrorHistory() + got := getDDLErrorHistory() sort.Slice(got, func(i, j int) bool { return got[i].tableName < got[j].tableName }) @@ -125,34 +118,6 @@ func TestInitErrors(t *testing.T) { } } -// test the logic that confirms that the user defined schema's table name and qualifier are valid -func TestValidateSchema(t *testing.T) { - type testCase struct { - testName string - name string - schema string - mustError bool - } - testCases := []testCase{ - {"valid", "t1", "create table if not exists _vt.t1(i int)", false}, - {"no if not exists", "t1", "create table _vt.t1(i int)", true}, - {"invalid table name", "t2", "create table if not exists _vt.t1(i int)", true}, - {"invalid table name", "t1", "create table if not exists _vt.t2(i int)", true}, - {"invalid qualifier", "t1", "create table if not exists vt_product.t1(i int)", true}, - {"invalid qualifier", "t1", "create table if not exists t1(i int)", true}, - } - for _, tc := range testCases { - t.Run(tc.testName, func(t *testing.T) { - _, err := validateSchemaDefinition(tc.name, tc.schema) - if tc.mustError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - // Tests various non-error code paths in sidecardb func TestMiscSidecarDB(t *testing.T) { ctx := context.Background() @@ -168,46 +133,79 @@ func TestMiscSidecarDB(t *testing.T) { require.NoError(t, err) exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - if _, err := conn.ExecuteFetch(UseSidecarDatabaseQuery, maxRows, true); err != nil { + if _, err := conn.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), maxRows, true); err != nil { return nil, err } } return conn.ExecuteFetch(query, maxRows, true) } + result := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "dbexists", + "int64"), + sidecar.GetName(), + ) + dbeq, err := sqlparser.ParseAndBind(sidecarDBExistsQuery, sqltypes.StringBindVariable(sidecar.GetName())) + require.NoError(t, err) + db.AddQuery(dbeq, result) + db.AddQuery(sidecar.GetCreateQuery(), &sqltypes.Result{}) + AddSchemaInitQueries(db, false) + // tests init on empty db ddlErrorCount.Set(0) ddlCount.Set(0) - require.Equal(t, int64(0), GetDDLCount()) + require.Equal(t, int64(0), getDDLCount()) err = Init(ctx, exec) require.NoError(t, err) - require.Equal(t, int64(len(sidecarTables)), GetDDLCount()) + require.Equal(t, int64(len(sidecarTables)), getDDLCount()) - // tests init on already inited db + // Include the table DDLs in the expected queries. + // This causes them to NOT be created again. AddSchemaInitQueries(db, true) + + // tests init on already inited db err = Init(ctx, exec) require.NoError(t, err) - require.Equal(t, int64(len(sidecarTables)), GetDDLCount()) + require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // tests misc paths not covered above si := &schemaInit{ ctx: ctx, exec: exec, } - result := sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "Database", - "varchar"), - "currentDB", - ) - db.AddQuery(SelectCurrentDatabaseQuery, result) - currentDB, err := si.setCurrentDatabase("dbname") + err = si.setCurrentDatabase(sidecar.GetIdentifier()) require.NoError(t, err) - require.Equal(t, "currentDB", currentDB) require.False(t, MatchesInitQuery("abc")) - require.True(t, MatchesInitQuery(SelectCurrentDatabaseQuery)) - require.True(t, MatchesInitQuery("CREATE TABLE IF NOT EXISTS `_vt`.vreplication")) + require.True(t, MatchesInitQuery("CREATE TABLE IF NOT EXISTS _vt.vreplication")) +} + +// test the logic that confirms that the user defined schema's table name and qualifier are valid +func TestValidateSchema(t *testing.T) { + type testCase struct { + testName string + name string + schema string + mustError bool + } + testCases := []testCase{ + {"valid", "t1", "create table if not exists t1(i int)", false}, + {"no if not exists", "t1", "create table t1(i int)", true}, + {"invalid table name", "t2", "create table if not exists t1(i int)", true}, + {"invalid table name", "t1", "create table if not exists t2(i int)", true}, + {"qualifier", "t1", "create table if not exists vt_product.t1(i int)", true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + _, err := validateSchemaDefinition(tc.name, tc.schema) + if tc.mustError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } } // TestAlterTableAlgorithm confirms that we use ALGORITHM=COPY during alter tables diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index a9900f39044..59cc1c1834d 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -59,6 +59,10 @@ const ( StmtRevert StmtShowMigrationLogs StmtCommentOnly + StmtPrepare + StmtExecute + StmtDeallocate + StmtKill ) // ASTToStatementType returns a StatementType from an AST stmt @@ -114,6 +118,14 @@ func ASTToStatementType(stmt Statement) StatementType { return StmtVStream case *CommentOnly: return StmtCommentOnly + case *PrepareStmt: + return StmtPrepare + case *ExecuteStmt: + return StmtExecute + case *DeallocateStmt: + return StmtDeallocate + case *Kill: + return StmtKill default: return StmtUnknown } @@ -241,6 +253,8 @@ func Preview(sql string) StatementType { return StmtRelease case "rollback": return StmtSRollback + case "kill": + return StmtKill } return StmtUnknown } @@ -299,6 +313,14 @@ func (s StatementType) String() string { return "CALL_PROC" case StmtCommentOnly: return "COMMENT_ONLY" + case StmtPrepare: + return "PREPARE" + case StmtExecute: + return "EXECUTE" + case StmtDeallocate: + return "DEALLOCATE PREPARE" + case StmtKill: + return "KILL" default: return "UNKNOWN" } @@ -368,7 +390,7 @@ func IsColName(node Expr) bool { // NULL is not considered to be a value. func IsValue(node Expr) bool { switch v := node.(type) { - case Argument: + case *Argument: return true case *Literal: switch v.Type { diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 63285aa79d6..00db3b93436 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -16,6 +16,11 @@ limitations under the License. package sqlparser +import ( + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/sqltypes" +) + /* This is the Vitess AST. This file should only contain pure struct declarations, or methods used to mark a struct as implementing an interface. All other methods @@ -58,6 +63,7 @@ type ( GetOrderBy() OrderBy GetLimit() *Limit SetLimit(*Limit) + GetLock() Lock SetLock(lock Lock) SetInto(into *SelectInto) SetWith(with *With) @@ -65,6 +71,7 @@ type ( GetColumnCount() int GetColumns() SelectExprs Commented + IsDistinct() bool } // DDLStatement represents any DDL Statement @@ -132,10 +139,11 @@ type ( // AlterColumn is used to add or drop defaults & visibility to columns in alter table command AlterColumn struct { - Column *ColName - DropDefault bool - DefaultVal Expr - Invisible *bool + Column *ColName + DropDefault bool + DefaultVal Expr + DefaultLiteral bool + Invisible *bool } // With contains the lists of common table expression and specifies if it is recursive or not @@ -320,10 +328,12 @@ type ( // of the implications the deletion part may have on vindexes. // If you add fields here, consider adding them to calls to validateUnshardedRoute. Insert struct { - Action InsertAction - Comments *ParsedComments - Ignore Ignore - Table TableName + Action InsertAction + Comments *ParsedComments + Ignore Ignore + // The Insert as syntax still take TableName. + // The change is made for semantic analyzer as it takes AliasedTableExpr to provide TableInfo + Table *AliasedTableExpr Partitions Partitions Columns Columns Rows InsertRows @@ -552,6 +562,12 @@ type ( Load struct { } + // PurgeBinaryLogs represents a PURGE BINARY LOGS statement + PurgeBinaryLogs struct { + To string + Before string + } + // Show represents a show statement. Show struct { Internal ShowInternal @@ -662,17 +678,10 @@ type ( // DeallocateStmt represents a Deallocate Statement // More info available on https://dev.mysql.com/doc/refman/8.0/en/deallocate-prepare.html DeallocateStmt struct { - Type DeallocateStmtType Comments *ParsedComments Name IdentifierCI } - // DeallocateStmtType is an enum to get types of deallocate - DeallocateStmtType int8 - - // IntervalTypes is an enum to get types of intervals - IntervalTypes int8 - // OtherRead represents a DESCRIBE, or EXPLAIN statement. // It should be used only as an indicator. It does not contain // the full AST for the statement. @@ -688,6 +697,15 @@ type ( CommentOnly struct { Comments []string } + + // KillType is an enum for Kill.Type + KillType int8 + + // Kill represents a kill statement + Kill struct { + Type KillType + ProcesslistID uint64 + } ) func (*Union) iStatement() {} @@ -739,6 +757,8 @@ func (*ExplainTab) iStatement() {} func (*PrepareStmt) iStatement() {} func (*ExecuteStmt) iStatement() {} func (*DeallocateStmt) iStatement() {} +func (*PurgeBinaryLogs) iStatement() {} +func (*Kill) iStatement() {} func (*CreateView) iDDLStatement() {} func (*AlterView) iDDLStatement() {} @@ -1817,14 +1837,15 @@ type ColumnTypeOptions struct { The complexity arises from the fact that we do not know whether the column will be nullable or not if nothing is specified. Therefore we do not know whether the column is nullable or not in case 3. */ - Null *bool - Autoincrement bool - Default Expr - OnUpdate Expr - As Expr - Comment *Literal - Storage ColumnStorage - Collate string + Null *bool + Autoincrement bool + Default Expr + DefaultLiteral bool + OnUpdate Expr + As Expr + Comment *Literal + Storage ColumnStorage + Collate string // Reference stores a foreign key constraint for the given column Reference *ReferenceDefinition @@ -2036,7 +2057,7 @@ type ( SQLNode } - // TableName represents a table name. + // TableName represents a table name. // Qualifier, if specified, represents a database or keyspace. // TableName is a value struct whose fields are case sensitive. // This means two TableName vars can be compared for equality @@ -2149,6 +2170,7 @@ type ( // More information available here: https://dev.mysql.com/doc/refman/8.0/en/window-functions-frames.html FramePoint struct { Type FramePointType + Unit IntervalType Expr Expr } @@ -2256,6 +2278,11 @@ type ( Subquery *Subquery } + // AssignmentExpr represents an expression of type @value := x. + AssignmentExpr struct { + Left, Right Expr + } + // Literal represents a fixed value. Literal struct { Type ValType @@ -2263,7 +2290,10 @@ type ( } // Argument represents bindvariable expression - Argument string + Argument struct { + Name string + Type sqltypes.Type + } // NullVal represents a NULL value. NullVal struct{} @@ -2273,11 +2303,6 @@ type ( // ColName represents a column name. ColName struct { - // Metadata is not populated by the parser. - // It's a placeholder for analyzers to store - // additional data, typically info about which - // table or column this node references. - Metadata any Name IdentifierCI Qualifier TableName } @@ -2327,24 +2352,17 @@ type ( Expr Expr } - // IntervalExpr represents a date-time INTERVAL expression. - IntervalExpr struct { - Expr Expr - Unit string - } - - // TimestampFuncExpr represents the function and arguments for TIMESTAMP{ADD,DIFF} functions. - TimestampFuncExpr struct { - Name string + // TimestampDiffExpr represents the function and arguments for TIMESTAMPDIFF functions. + TimestampDiffExpr struct { Expr1 Expr Expr2 Expr - Unit string + Unit IntervalType } // ExtractFuncExpr represents the function and arguments for EXTRACT(YEAR FROM '2019-07-02') type functions. ExtractFuncExpr struct { - IntervalTypes IntervalTypes - Expr Expr + IntervalType IntervalType + Expr Expr } // CollateExpr represents dynamic collate operator. @@ -2463,7 +2481,7 @@ type ( // supported functions are documented in the grammar CurTimeFuncExpr struct { Name IdentifierCI - Fsp Expr // fractional seconds precision, integer from 0 to 6 or an Argument + Fsp int // fractional seconds precision, integer from 0 to 6 or an Argument } // ExtractedSubquery is a subquery that has been extracted from the original AST @@ -2503,7 +2521,7 @@ type ( // it is the column offset from the incoming result stream Offset struct { V int - Original string + Original Expr } // JSONArrayExpr represents JSON_ARRAY() @@ -2704,28 +2722,153 @@ type ( JSONValue Expr } - //PointExpr represents POINT(x,y) expression + // PointExpr represents POINT(x,y) expression PointExpr struct { XCordinate Expr YCordinate Expr } - //LineString represents LineString(POINT(x,y), POINT(x,y), ..) expression + // LineString represents LineString(POINT(x,y), POINT(x,y), ..) expression LineStringExpr struct { PointParams Exprs } - //PolygonExpr represents Polygon(LineString(POINT(x,y), POINT(x,y), ..)) expressions + // PolygonExpr represents Polygon(LineString(POINT(x,y), POINT(x,y), ..)) expressions PolygonExpr struct { LinestringParams Exprs } + // MultiPoint represents a geometry collection for points + MultiPointExpr struct { + PointParams Exprs + } + + // MultiPoint represents a geometry collection for linestrings + MultiLinestringExpr struct { + LinestringParams Exprs + } + + // MultiPolygon represents a geometry collection for polygons + MultiPolygonExpr struct { + PolygonParams Exprs + } + + // GeomFromWktType is an enum to get the types of wkt functions with possible values: GeometryFromText GeometryCollectionFromText PointFromText LineStringFromText PolygonFromText MultiPointFromText MultiPolygonFromText MultiLinestringFromText + GeomFromWktType int8 + + GeomFromTextExpr struct { + Type GeomFromWktType + WktText Expr + Srid Expr + AxisOrderOpt Expr + } + + // GeomFromWkbType is an enum to get the types of wkb functions with possible values: GeometryFromWKB GeometryCollectionFromWKB PointFromWKB LineStringFromWKB PolygonFromWKB MultiPointFromWKB MultiPolygonFromWKB MultiLinestringFromWKB + GeomFromWkbType int8 + + GeomFromWKBExpr struct { + Type GeomFromWkbType + WkbBlob Expr + Srid Expr + AxisOrderOpt Expr + } + + // GeomFormatType is an enum to get the types of geom format functions with possible values: BinaryFormat TextFormat + GeomFormatType int8 + + GeomFormatExpr struct { + FormatType GeomFormatType + Geom Expr + AxisOrderOpt Expr + } + + // GeomPropertyType is an enum to get the types of geom property functions with possible values: Dimension Envelope IsSimple IsEmpty GeometryType + GeomPropertyType int8 + + GeomPropertyFuncExpr struct { + Property GeomPropertyType + Geom Expr + } + + // PointPropertyType is an that enumerates the kind of point property functions: XCordinate YCordinate Latitude Longitude + PointPropertyType int8 + + PointPropertyFuncExpr struct { + Property PointPropertyType + Point Expr + ValueToSet Expr + } + + // LinestrPropType is an enum that enumerates the kind of line string property functions: EndPoint IsClosed Length NumPoints PointN StartPoint + LinestrPropType int8 + + LinestrPropertyFuncExpr struct { + Property LinestrPropType + Linestring Expr + PropertyDefArg Expr + } + + // PolygonPropType is an enum that enumerates the kind of polygon property functions: Area Centroid ExteriorRing InteriorRingN NumInteriorRing + PolygonPropType int8 + + PolygonPropertyFuncExpr struct { + Property PolygonPropType + Polygon Expr + PropertyDefArg Expr + } + + // GeomCollPropType is an enumthat enumerates the kind of geom coll property functions with possible values: GeometryN NumGeometries + GeomCollPropType int8 + + GeomCollPropertyFuncExpr struct { + Property GeomCollPropType + GeomColl Expr + PropertyDefArg Expr + } + + GeoHashFromLatLongExpr struct { + Latitude Expr + Longitude Expr + MaxLength Expr + } + + GeoHashFromPointExpr struct { + Point Expr + MaxLength Expr + } + + // GeomFromHashType is an enum that determines what kind geom being retireived from hash + GeomFromHashType int8 + + GeomFromGeoHashExpr struct { + GeomType GeomFromHashType + GeoHash Expr + SridOpt Expr + } + + GeoJSONFromGeomExpr struct { + Geom Expr + MaxDecimalDigits Expr + Bitmask Expr + } + + GeomFromGeoJSONExpr struct { + GeoJSON Expr + HigherDimHandlerOpt Expr // This value determine how the higher dimensions are handled while converting json to geometry + Srid Expr + } + AggrFunc interface { Expr - AggrName() string GetArg() Expr - IsDistinct() bool GetArgs() Exprs + // AggrName returns the lower case string representing this aggregation function + AggrName() string + } + + DistinctableAggr interface { + IsDistinct() bool + SetDistinct(bool) } Count struct { @@ -2734,6 +2877,33 @@ type ( } CountStar struct { + _ bool + // TL;DR; This makes sure that reference equality checks works as expected + // + // You're correct that this might seem a bit strange at first glance. + // It's a quirk of Go's handling of empty structs. In Go, two instances of an empty struct are considered + // identical, which can be problematic when using these as keys in maps. + // They would be treated as the same key and potentially lead to incorrect map behavior. + // + // Here's a brief example: + // + // ```golang + // func TestWeirdGo(t *testing.T) { + // type CountStar struct{} + // + // cs1 := &CountStar{} + // cs2 := &CountStar{} + // if cs1 == cs2 { + // panic("what the what!?") + // } + // } + // ``` + // + // In the above code, cs1 and cs2, despite being distinct variables, would be treated as the same object. + // + // The solution we employed was to add a dummy field `_ bool` to the otherwise empty struct `CountStar`. + // This ensures that each instance of `CountStar` is treated as a separate object, + // even in the context of out semantic state which uses these objects as map keys. } Avg struct { @@ -2805,8 +2975,15 @@ type ( Limit *Limit } + // AnyValue is an aggregation function in Vitess, even if the MySQL manual explicitly says it's not + // It's just simpler to treat it as one + // see https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_any-value + AnyValue struct { + Arg Expr + } + // RegexpInstrExpr represents REGEXP_INSTR() - // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-instr + // For more information, see https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-instr RegexpInstrExpr struct { Expr Expr Pattern Expr @@ -2817,7 +2994,7 @@ type ( } // RegexpLikeExpr represents REGEXP_LIKE() - // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-like + // For more information, see https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-like RegexpLikeExpr struct { Expr Expr Pattern Expr @@ -2825,7 +3002,7 @@ type ( } // RegexpReplaceExpr represents REGEXP_REPLACE() - // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-replace + // For more information, see https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-replace RegexpReplaceExpr struct { Expr Expr Pattern Expr @@ -2836,7 +3013,7 @@ type ( } // RegexpSubstrExpr represents REGEXP_SUBSTR() - // For more information, postVisit https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-substr + // For more information, see https://dev.mysql.com/doc/refman/8.0/en/regexp.html#function_regexp-substr RegexpSubstrExpr struct { Expr Expr Pattern Expr @@ -2845,6 +3022,16 @@ type ( MatchType Expr } + IntervalType = datetime.IntervalType + + // IntervalDateExpr represents ADDDATE(), DATE_ADD() + IntervalDateExpr struct { + Syntax IntervalExprSyntax + Date Expr + Interval Expr + Unit IntervalType + } + // ArgumentLessWindowExpr stands for the following window_functions: CUME_DIST, DENSE_RANK, PERCENT_RANK, RANK, ROW_NUMBER // These functions do not take any argument. ArgumentLessWindowExpr struct { @@ -2959,8 +3146,9 @@ func (*ComparisonExpr) iExpr() {} func (*BetweenExpr) iExpr() {} func (*IsExpr) iExpr() {} func (*ExistsExpr) iExpr() {} +func (*AssignmentExpr) iExpr() {} func (*Literal) iExpr() {} -func (Argument) iExpr() {} +func (*Argument) iExpr() {} func (*NullVal) iExpr() {} func (BoolVal) iExpr() {} func (*ColName) iExpr() {} @@ -2970,10 +3158,9 @@ func (ListArg) iExpr() {} func (*BinaryExpr) iExpr() {} func (*UnaryExpr) iExpr() {} func (*IntroducerExpr) iExpr() {} -func (*IntervalExpr) iExpr() {} func (*CollateExpr) iExpr() {} func (*FuncExpr) iExpr() {} -func (*TimestampFuncExpr) iExpr() {} +func (*TimestampDiffExpr) iExpr() {} func (*ExtractFuncExpr) iExpr() {} func (*WeightStringFuncExpr) iExpr() {} func (*CurTimeFuncExpr) iExpr() {} @@ -3017,6 +3204,7 @@ func (*RegexpInstrExpr) iExpr() {} func (*RegexpLikeExpr) iExpr() {} func (*RegexpReplaceExpr) iExpr() {} func (*RegexpSubstrExpr) iExpr() {} +func (*IntervalDateExpr) iExpr() {} func (*ArgumentLessWindowExpr) iExpr() {} func (*FirstOrLastValueExpr) iExpr() {} func (*NtileExpr) iExpr() {} @@ -3035,6 +3223,7 @@ func (*Avg) iExpr() {} func (*CountStar) iExpr() {} func (*Count) iExpr() {} func (*GroupConcatExpr) iExpr() {} +func (*AnyValue) iExpr() {} func (*BitAnd) iExpr() {} func (*BitOr) iExpr() {} func (*BitXor) iExpr() {} @@ -3049,10 +3238,26 @@ func (*Variable) iExpr() {} func (*PointExpr) iExpr() {} func (*LineStringExpr) iExpr() {} func (*PolygonExpr) iExpr() {} +func (*MultiPolygonExpr) iExpr() {} +func (*MultiPointExpr) iExpr() {} +func (*MultiLinestringExpr) iExpr() {} +func (*GeomFromTextExpr) iExpr() {} +func (*GeomFromWKBExpr) iExpr() {} +func (*GeomFormatExpr) iExpr() {} +func (*GeomPropertyFuncExpr) iExpr() {} +func (*PointPropertyFuncExpr) iExpr() {} +func (*LinestrPropertyFuncExpr) iExpr() {} +func (*PolygonPropertyFuncExpr) iExpr() {} +func (*GeomCollPropertyFuncExpr) iExpr() {} +func (*GeoHashFromLatLongExpr) iExpr() {} +func (*GeoHashFromPointExpr) iExpr() {} +func (*GeomFromGeoHashExpr) iExpr() {} +func (*GeoJSONFromGeomExpr) iExpr() {} +func (*GeomFromGeoJSONExpr) iExpr() {} // iCallable marks all expressions that represent function calls func (*FuncExpr) iCallable() {} -func (*TimestampFuncExpr) iCallable() {} +func (*TimestampDiffExpr) iCallable() {} func (*ExtractFuncExpr) iCallable() {} func (*WeightStringFuncExpr) iCallable() {} func (*CurTimeFuncExpr) iCallable() {} @@ -3067,6 +3272,7 @@ func (*CharExpr) iCallable() {} func (*ConvertUsingExpr) iCallable() {} func (*MatchExpr) iCallable() {} func (*GroupConcatExpr) iCallable() {} +func (*AnyValue) iCallable() {} func (*JSONSchemaValidFuncExpr) iCallable() {} func (*JSONSchemaValidationReportFuncExpr) iCallable() {} func (*JSONPrettyExpr) iCallable() {} @@ -3092,6 +3298,7 @@ func (*RegexpInstrExpr) iCallable() {} func (*RegexpLikeExpr) iCallable() {} func (*RegexpReplaceExpr) iCallable() {} func (*RegexpSubstrExpr) iCallable() {} +func (*IntervalDateExpr) iCallable() {} func (*ArgumentLessWindowExpr) iCallable() {} func (*FirstOrLastValueExpr) iCallable() {} func (*NtileExpr) iCallable() {} @@ -3105,6 +3312,22 @@ func (*GTIDFuncExpr) iCallable() {} func (*PointExpr) iCallable() {} func (*LineStringExpr) iCallable() {} func (*PolygonExpr) iCallable() {} +func (*MultiPolygonExpr) iCallable() {} +func (*MultiPointExpr) iCallable() {} +func (*MultiLinestringExpr) iCallable() {} +func (*GeomFromTextExpr) iCallable() {} +func (*GeomFromWKBExpr) iCallable() {} +func (*GeomFormatExpr) iCallable() {} +func (*GeomPropertyFuncExpr) iCallable() {} +func (*PointPropertyFuncExpr) iCallable() {} +func (*LinestrPropertyFuncExpr) iCallable() {} +func (*PolygonPropertyFuncExpr) iCallable() {} +func (*GeomCollPropertyFuncExpr) iCallable() {} +func (*GeoHashFromLatLongExpr) iCallable() {} +func (*GeoHashFromPointExpr) iCallable() {} +func (*GeomFromGeoHashExpr) iCallable() {} +func (*GeoJSONFromGeomExpr) iCallable() {} +func (*GeomFromGeoJSONExpr) iCallable() {} func (*Sum) iCallable() {} func (*Min) iCallable() {} @@ -3130,6 +3353,7 @@ func (stdS *StdSamp) GetArg() Expr { return stdS.Arg } func (varP *VarPop) GetArg() Expr { return varP.Arg } func (varS *VarSamp) GetArg() Expr { return varS.Arg } func (variance *Variance) GetArg() Expr { return variance.Arg } +func (av *AnyValue) GetArg() Expr { return av.Arg } func (sum *Sum) GetArgs() Exprs { return Exprs{sum.Arg} } func (min *Min) GetArgs() Exprs { return Exprs{min.Arg} } @@ -3148,42 +3372,40 @@ func (stdS *StdSamp) GetArgs() Exprs { return Exprs{stdS.Arg} } func (varP *VarPop) GetArgs() Exprs { return Exprs{varP.Arg} } func (varS *VarSamp) GetArgs() Exprs { return Exprs{varS.Arg} } func (variance *Variance) GetArgs() Exprs { return Exprs{variance.Arg} } +func (av *AnyValue) GetArgs() Exprs { return Exprs{av.Arg} } func (sum *Sum) IsDistinct() bool { return sum.Distinct } func (min *Min) IsDistinct() bool { return min.Distinct } func (max *Max) IsDistinct() bool { return max.Distinct } func (avg *Avg) IsDistinct() bool { return avg.Distinct } -func (cStar *CountStar) IsDistinct() bool { return false } func (count *Count) IsDistinct() bool { return count.Distinct } func (grpConcat *GroupConcatExpr) IsDistinct() bool { return grpConcat.Distinct } -func (bAnd *BitAnd) IsDistinct() bool { return false } -func (bOr *BitOr) IsDistinct() bool { return false } -func (bXor *BitXor) IsDistinct() bool { return false } -func (std *Std) IsDistinct() bool { return false } -func (stdD *StdDev) IsDistinct() bool { return false } -func (stdP *StdPop) IsDistinct() bool { return false } -func (stdS *StdSamp) IsDistinct() bool { return false } -func (varP *VarPop) IsDistinct() bool { return false } -func (varS *VarSamp) IsDistinct() bool { return false } -func (variance *Variance) IsDistinct() bool { return false } - -func (sum *Sum) AggrName() string { return "sum" } -func (min *Min) AggrName() string { return "min" } -func (max *Max) AggrName() string { return "max" } -func (avg *Avg) AggrName() string { return "avg" } -func (cStar *CountStar) AggrName() string { return "count" } -func (count *Count) AggrName() string { return "count" } -func (grpConcat *GroupConcatExpr) AggrName() string { return "group_concat" } -func (bAnd *BitAnd) AggrName() string { return "bit_and" } -func (bOr *BitOr) AggrName() string { return "bit_or" } -func (bXor *BitXor) AggrName() string { return "bit_xor" } -func (std *Std) AggrName() string { return "std" } -func (stdD *StdDev) AggrName() string { return "stddev" } -func (stdP *StdPop) AggrName() string { return "stddev_pop" } -func (stdS *StdSamp) AggrName() string { return "stddev_samp" } -func (varP *VarPop) AggrName() string { return "var_pop" } -func (varS *VarSamp) AggrName() string { return "var_samp" } -func (variance *Variance) AggrName() string { return "variance" } + +func (sum *Sum) SetDistinct(distinct bool) { sum.Distinct = distinct } +func (min *Min) SetDistinct(distinct bool) { min.Distinct = distinct } +func (max *Max) SetDistinct(distinct bool) { max.Distinct = distinct } +func (avg *Avg) SetDistinct(distinct bool) { avg.Distinct = distinct } +func (count *Count) SetDistinct(distinct bool) { count.Distinct = distinct } +func (grpConcat *GroupConcatExpr) SetDistinct(distinct bool) { grpConcat.Distinct = distinct } + +func (*Sum) AggrName() string { return "sum" } +func (*Min) AggrName() string { return "min" } +func (*Max) AggrName() string { return "max" } +func (*Avg) AggrName() string { return "avg" } +func (*CountStar) AggrName() string { return "count" } +func (*Count) AggrName() string { return "count" } +func (*GroupConcatExpr) AggrName() string { return "group_concat" } +func (*BitAnd) AggrName() string { return "bit_and" } +func (*BitOr) AggrName() string { return "bit_or" } +func (*BitXor) AggrName() string { return "bit_xor" } +func (*Std) AggrName() string { return "std" } +func (*StdDev) AggrName() string { return "stddev" } +func (*StdPop) AggrName() string { return "stddev_pop" } +func (*StdSamp) AggrName() string { return "stddev_samp" } +func (*VarPop) AggrName() string { return "var_pop" } +func (*VarSamp) AggrName() string { return "var_samp" } +func (*Variance) AggrName() string { return "variance" } +func (*AnyValue) AggrName() string { return "any_value" } // Exprs represents a list of value expressions. // It's not a valid expression because it's not parenthesized. diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go index 95000f87388..f98cb44fab8 100644 --- a/go/vt/sqlparser/ast_clone.go +++ b/go/vt/sqlparser/ast_clone.go @@ -55,10 +55,14 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfAlterVschema(in) case *AndExpr: return CloneRefOfAndExpr(in) - case Argument: - return in + case *AnyValue: + return CloneRefOfAnyValue(in) + case *Argument: + return CloneRefOfArgument(in) case *ArgumentLessWindowExpr: return CloneRefOfArgumentLessWindowExpr(in) + case *AssignmentExpr: + return CloneRefOfAssignmentExpr(in) case *AutoIncSpec: return CloneRefOfAutoIncSpec(in) case *Avg: @@ -181,6 +185,26 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfFuncExpr(in) case *GTIDFuncExpr: return CloneRefOfGTIDFuncExpr(in) + case *GeoHashFromLatLongExpr: + return CloneRefOfGeoHashFromLatLongExpr(in) + case *GeoHashFromPointExpr: + return CloneRefOfGeoHashFromPointExpr(in) + case *GeoJSONFromGeomExpr: + return CloneRefOfGeoJSONFromGeomExpr(in) + case *GeomCollPropertyFuncExpr: + return CloneRefOfGeomCollPropertyFuncExpr(in) + case *GeomFormatExpr: + return CloneRefOfGeomFormatExpr(in) + case *GeomFromGeoHashExpr: + return CloneRefOfGeomFromGeoHashExpr(in) + case *GeomFromGeoJSONExpr: + return CloneRefOfGeomFromGeoJSONExpr(in) + case *GeomFromTextExpr: + return CloneRefOfGeomFromTextExpr(in) + case *GeomFromWKBExpr: + return CloneRefOfGeomFromWKBExpr(in) + case *GeomPropertyFuncExpr: + return CloneRefOfGeomPropertyFuncExpr(in) case GroupBy: return CloneGroupBy(in) case *GroupConcatExpr: @@ -201,8 +225,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfInsert(in) case *InsertExpr: return CloneRefOfInsertExpr(in) - case *IntervalExpr: - return CloneRefOfIntervalExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *IntroducerExpr: @@ -263,12 +287,16 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfJtOnResponse(in) case *KeyState: return CloneRefOfKeyState(in) + case *Kill: + return CloneRefOfKill(in) case *LagLeadExpr: return CloneRefOfLagLeadExpr(in) case *Limit: return CloneRefOfLimit(in) case *LineStringExpr: return CloneRefOfLineStringExpr(in) + case *LinestrPropertyFuncExpr: + return CloneRefOfLinestrPropertyFuncExpr(in) case ListArg: return in case *Literal: @@ -295,6 +323,12 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfMin(in) case *ModifyColumn: return CloneRefOfModifyColumn(in) + case *MultiLinestringExpr: + return CloneRefOfMultiLinestringExpr(in) + case *MultiPointExpr: + return CloneRefOfMultiPointExpr(in) + case *MultiPolygonExpr: + return CloneRefOfMultiPolygonExpr(in) case *NTHValueExpr: return CloneRefOfNTHValueExpr(in) case *NamedWindow: @@ -353,10 +387,16 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfPerformanceSchemaFuncExpr(in) case *PointExpr: return CloneRefOfPointExpr(in) + case *PointPropertyFuncExpr: + return CloneRefOfPointPropertyFuncExpr(in) case *PolygonExpr: return CloneRefOfPolygonExpr(in) + case *PolygonPropertyFuncExpr: + return CloneRefOfPolygonPropertyFuncExpr(in) case *PrepareStmt: return CloneRefOfPrepareStmt(in) + case *PurgeBinaryLogs: + return CloneRefOfPurgeBinaryLogs(in) case ReferenceAction: return in case *ReferenceDefinition: @@ -455,8 +495,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfTableSpec(in) case *TablespaceOperation: return CloneRefOfTablespaceOperation(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *TruncateTable: @@ -693,6 +733,25 @@ func CloneRefOfAndExpr(n *AndExpr) *AndExpr { return &out } +// CloneRefOfAnyValue creates a deep clone of the input. +func CloneRefOfAnyValue(n *AnyValue) *AnyValue { + if n == nil { + return nil + } + out := *n + out.Arg = CloneExpr(n.Arg) + return &out +} + +// CloneRefOfArgument creates a deep clone of the input. +func CloneRefOfArgument(n *Argument) *Argument { + if n == nil { + return nil + } + out := *n + return &out +} + // CloneRefOfArgumentLessWindowExpr creates a deep clone of the input. func CloneRefOfArgumentLessWindowExpr(n *ArgumentLessWindowExpr) *ArgumentLessWindowExpr { if n == nil { @@ -703,6 +762,17 @@ func CloneRefOfArgumentLessWindowExpr(n *ArgumentLessWindowExpr) *ArgumentLessWi return &out } +// CloneRefOfAssignmentExpr creates a deep clone of the input. +func CloneRefOfAssignmentExpr(n *AssignmentExpr) *AssignmentExpr { + if n == nil { + return nil + } + out := *n + out.Left = CloneExpr(n.Left) + out.Right = CloneExpr(n.Right) + return &out +} + // CloneRefOfAutoIncSpec creates a deep clone of the input. func CloneRefOfAutoIncSpec(n *AutoIncSpec) *AutoIncSpec { if n == nil { @@ -1057,7 +1127,6 @@ func CloneRefOfCurTimeFuncExpr(n *CurTimeFuncExpr) *CurTimeFuncExpr { } out := *n out.Name = CloneIdentifierCI(n.Name) - out.Fsp = CloneExpr(n.Fsp) return &out } @@ -1358,6 +1427,120 @@ func CloneRefOfGTIDFuncExpr(n *GTIDFuncExpr) *GTIDFuncExpr { return &out } +// CloneRefOfGeoHashFromLatLongExpr creates a deep clone of the input. +func CloneRefOfGeoHashFromLatLongExpr(n *GeoHashFromLatLongExpr) *GeoHashFromLatLongExpr { + if n == nil { + return nil + } + out := *n + out.Latitude = CloneExpr(n.Latitude) + out.Longitude = CloneExpr(n.Longitude) + out.MaxLength = CloneExpr(n.MaxLength) + return &out +} + +// CloneRefOfGeoHashFromPointExpr creates a deep clone of the input. +func CloneRefOfGeoHashFromPointExpr(n *GeoHashFromPointExpr) *GeoHashFromPointExpr { + if n == nil { + return nil + } + out := *n + out.Point = CloneExpr(n.Point) + out.MaxLength = CloneExpr(n.MaxLength) + return &out +} + +// CloneRefOfGeoJSONFromGeomExpr creates a deep clone of the input. +func CloneRefOfGeoJSONFromGeomExpr(n *GeoJSONFromGeomExpr) *GeoJSONFromGeomExpr { + if n == nil { + return nil + } + out := *n + out.Geom = CloneExpr(n.Geom) + out.MaxDecimalDigits = CloneExpr(n.MaxDecimalDigits) + out.Bitmask = CloneExpr(n.Bitmask) + return &out +} + +// CloneRefOfGeomCollPropertyFuncExpr creates a deep clone of the input. +func CloneRefOfGeomCollPropertyFuncExpr(n *GeomCollPropertyFuncExpr) *GeomCollPropertyFuncExpr { + if n == nil { + return nil + } + out := *n + out.GeomColl = CloneExpr(n.GeomColl) + out.PropertyDefArg = CloneExpr(n.PropertyDefArg) + return &out +} + +// CloneRefOfGeomFormatExpr creates a deep clone of the input. +func CloneRefOfGeomFormatExpr(n *GeomFormatExpr) *GeomFormatExpr { + if n == nil { + return nil + } + out := *n + out.Geom = CloneExpr(n.Geom) + out.AxisOrderOpt = CloneExpr(n.AxisOrderOpt) + return &out +} + +// CloneRefOfGeomFromGeoHashExpr creates a deep clone of the input. +func CloneRefOfGeomFromGeoHashExpr(n *GeomFromGeoHashExpr) *GeomFromGeoHashExpr { + if n == nil { + return nil + } + out := *n + out.GeoHash = CloneExpr(n.GeoHash) + out.SridOpt = CloneExpr(n.SridOpt) + return &out +} + +// CloneRefOfGeomFromGeoJSONExpr creates a deep clone of the input. +func CloneRefOfGeomFromGeoJSONExpr(n *GeomFromGeoJSONExpr) *GeomFromGeoJSONExpr { + if n == nil { + return nil + } + out := *n + out.GeoJSON = CloneExpr(n.GeoJSON) + out.HigherDimHandlerOpt = CloneExpr(n.HigherDimHandlerOpt) + out.Srid = CloneExpr(n.Srid) + return &out +} + +// CloneRefOfGeomFromTextExpr creates a deep clone of the input. +func CloneRefOfGeomFromTextExpr(n *GeomFromTextExpr) *GeomFromTextExpr { + if n == nil { + return nil + } + out := *n + out.WktText = CloneExpr(n.WktText) + out.Srid = CloneExpr(n.Srid) + out.AxisOrderOpt = CloneExpr(n.AxisOrderOpt) + return &out +} + +// CloneRefOfGeomFromWKBExpr creates a deep clone of the input. +func CloneRefOfGeomFromWKBExpr(n *GeomFromWKBExpr) *GeomFromWKBExpr { + if n == nil { + return nil + } + out := *n + out.WkbBlob = CloneExpr(n.WkbBlob) + out.Srid = CloneExpr(n.Srid) + out.AxisOrderOpt = CloneExpr(n.AxisOrderOpt) + return &out +} + +// CloneRefOfGeomPropertyFuncExpr creates a deep clone of the input. +func CloneRefOfGeomPropertyFuncExpr(n *GeomPropertyFuncExpr) *GeomPropertyFuncExpr { + if n == nil { + return nil + } + out := *n + out.Geom = CloneExpr(n.Geom) + return &out +} + // CloneGroupBy creates a deep clone of the input. func CloneGroupBy(n GroupBy) GroupBy { if n == nil { @@ -1444,7 +1627,7 @@ func CloneRefOfInsert(n *Insert) *Insert { } out := *n out.Comments = CloneRefOfParsedComments(n.Comments) - out.Table = CloneTableName(n.Table) + out.Table = CloneRefOfAliasedTableExpr(n.Table) out.Partitions = ClonePartitions(n.Partitions) out.Columns = CloneColumns(n.Columns) out.Rows = CloneInsertRows(n.Rows) @@ -1465,13 +1648,14 @@ func CloneRefOfInsertExpr(n *InsertExpr) *InsertExpr { return &out } -// CloneRefOfIntervalExpr creates a deep clone of the input. -func CloneRefOfIntervalExpr(n *IntervalExpr) *IntervalExpr { +// CloneRefOfIntervalDateExpr creates a deep clone of the input. +func CloneRefOfIntervalDateExpr(n *IntervalDateExpr) *IntervalDateExpr { if n == nil { return nil } out := *n - out.Expr = CloneExpr(n.Expr) + out.Date = CloneExpr(n.Date) + out.Interval = CloneExpr(n.Interval) return &out } @@ -1805,6 +1989,15 @@ func CloneRefOfKeyState(n *KeyState) *KeyState { return &out } +// CloneRefOfKill creates a deep clone of the input. +func CloneRefOfKill(n *Kill) *Kill { + if n == nil { + return nil + } + out := *n + return &out +} + // CloneRefOfLagLeadExpr creates a deep clone of the input. func CloneRefOfLagLeadExpr(n *LagLeadExpr) *LagLeadExpr { if n == nil { @@ -1840,6 +2033,17 @@ func CloneRefOfLineStringExpr(n *LineStringExpr) *LineStringExpr { return &out } +// CloneRefOfLinestrPropertyFuncExpr creates a deep clone of the input. +func CloneRefOfLinestrPropertyFuncExpr(n *LinestrPropertyFuncExpr) *LinestrPropertyFuncExpr { + if n == nil { + return nil + } + out := *n + out.Linestring = CloneExpr(n.Linestring) + out.PropertyDefArg = CloneExpr(n.PropertyDefArg) + return &out +} + // CloneRefOfLiteral creates a deep clone of the input. func CloneRefOfLiteral(n *Literal) *Literal { if n == nil { @@ -1953,6 +2157,36 @@ func CloneRefOfModifyColumn(n *ModifyColumn) *ModifyColumn { return &out } +// CloneRefOfMultiLinestringExpr creates a deep clone of the input. +func CloneRefOfMultiLinestringExpr(n *MultiLinestringExpr) *MultiLinestringExpr { + if n == nil { + return nil + } + out := *n + out.LinestringParams = CloneExprs(n.LinestringParams) + return &out +} + +// CloneRefOfMultiPointExpr creates a deep clone of the input. +func CloneRefOfMultiPointExpr(n *MultiPointExpr) *MultiPointExpr { + if n == nil { + return nil + } + out := *n + out.PointParams = CloneExprs(n.PointParams) + return &out +} + +// CloneRefOfMultiPolygonExpr creates a deep clone of the input. +func CloneRefOfMultiPolygonExpr(n *MultiPolygonExpr) *MultiPolygonExpr { + if n == nil { + return nil + } + out := *n + out.PolygonParams = CloneExprs(n.PolygonParams) + return &out +} + // CloneRefOfNTHValueExpr creates a deep clone of the input. func CloneRefOfNTHValueExpr(n *NTHValueExpr) *NTHValueExpr { if n == nil { @@ -2044,6 +2278,7 @@ func CloneRefOfOffset(n *Offset) *Offset { return nil } out := *n + out.Original = CloneExpr(n.Original) return &out } @@ -2267,6 +2502,17 @@ func CloneRefOfPointExpr(n *PointExpr) *PointExpr { return &out } +// CloneRefOfPointPropertyFuncExpr creates a deep clone of the input. +func CloneRefOfPointPropertyFuncExpr(n *PointPropertyFuncExpr) *PointPropertyFuncExpr { + if n == nil { + return nil + } + out := *n + out.Point = CloneExpr(n.Point) + out.ValueToSet = CloneExpr(n.ValueToSet) + return &out +} + // CloneRefOfPolygonExpr creates a deep clone of the input. func CloneRefOfPolygonExpr(n *PolygonExpr) *PolygonExpr { if n == nil { @@ -2277,6 +2523,17 @@ func CloneRefOfPolygonExpr(n *PolygonExpr) *PolygonExpr { return &out } +// CloneRefOfPolygonPropertyFuncExpr creates a deep clone of the input. +func CloneRefOfPolygonPropertyFuncExpr(n *PolygonPropertyFuncExpr) *PolygonPropertyFuncExpr { + if n == nil { + return nil + } + out := *n + out.Polygon = CloneExpr(n.Polygon) + out.PropertyDefArg = CloneExpr(n.PropertyDefArg) + return &out +} + // CloneRefOfPrepareStmt creates a deep clone of the input. func CloneRefOfPrepareStmt(n *PrepareStmt) *PrepareStmt { if n == nil { @@ -2289,6 +2546,15 @@ func CloneRefOfPrepareStmt(n *PrepareStmt) *PrepareStmt { return &out } +// CloneRefOfPurgeBinaryLogs creates a deep clone of the input. +func CloneRefOfPurgeBinaryLogs(n *PurgeBinaryLogs) *PurgeBinaryLogs { + if n == nil { + return nil + } + out := *n + return &out +} + // CloneRefOfReferenceDefinition creates a deep clone of the input. func CloneRefOfReferenceDefinition(n *ReferenceDefinition) *ReferenceDefinition { if n == nil { @@ -2817,8 +3083,8 @@ func CloneRefOfTablespaceOperation(n *TablespaceOperation) *TablespaceOperation return &out } -// CloneRefOfTimestampFuncExpr creates a deep clone of the input. -func CloneRefOfTimestampFuncExpr(n *TimestampFuncExpr) *TimestampFuncExpr { +// CloneRefOfTimestampDiffExpr creates a deep clone of the input. +func CloneRefOfTimestampDiffExpr(n *TimestampDiffExpr) *TimestampDiffExpr { if n == nil { return nil } @@ -3164,6 +3430,8 @@ func CloneAggrFunc(in AggrFunc) AggrFunc { return nil } switch in := in.(type) { + case *AnyValue: + return CloneRefOfAnyValue(in) case *Avg: return CloneRefOfAvg(in) case *BitAnd: @@ -3266,6 +3534,8 @@ func CloneCallable(in Callable) Callable { return nil } switch in := in.(type) { + case *AnyValue: + return CloneRefOfAnyValue(in) case *ArgumentLessWindowExpr: return CloneRefOfArgumentLessWindowExpr(in) case *Avg: @@ -3292,10 +3562,32 @@ func CloneCallable(in Callable) Callable { return CloneRefOfFuncExpr(in) case *GTIDFuncExpr: return CloneRefOfGTIDFuncExpr(in) + case *GeoHashFromLatLongExpr: + return CloneRefOfGeoHashFromLatLongExpr(in) + case *GeoHashFromPointExpr: + return CloneRefOfGeoHashFromPointExpr(in) + case *GeoJSONFromGeomExpr: + return CloneRefOfGeoJSONFromGeomExpr(in) + case *GeomCollPropertyFuncExpr: + return CloneRefOfGeomCollPropertyFuncExpr(in) + case *GeomFormatExpr: + return CloneRefOfGeomFormatExpr(in) + case *GeomFromGeoHashExpr: + return CloneRefOfGeomFromGeoHashExpr(in) + case *GeomFromGeoJSONExpr: + return CloneRefOfGeomFromGeoJSONExpr(in) + case *GeomFromTextExpr: + return CloneRefOfGeomFromTextExpr(in) + case *GeomFromWKBExpr: + return CloneRefOfGeomFromWKBExpr(in) + case *GeomPropertyFuncExpr: + return CloneRefOfGeomPropertyFuncExpr(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *InsertExpr: return CloneRefOfInsertExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *JSONArrayExpr: @@ -3342,6 +3634,8 @@ func CloneCallable(in Callable) Callable { return CloneRefOfLagLeadExpr(in) case *LineStringExpr: return CloneRefOfLineStringExpr(in) + case *LinestrPropertyFuncExpr: + return CloneRefOfLinestrPropertyFuncExpr(in) case *LocateExpr: return CloneRefOfLocateExpr(in) case *MatchExpr: @@ -3352,6 +3646,12 @@ func CloneCallable(in Callable) Callable { return CloneRefOfMemberOfExpr(in) case *Min: return CloneRefOfMin(in) + case *MultiLinestringExpr: + return CloneRefOfMultiLinestringExpr(in) + case *MultiPointExpr: + return CloneRefOfMultiPointExpr(in) + case *MultiPolygonExpr: + return CloneRefOfMultiPolygonExpr(in) case *NTHValueExpr: return CloneRefOfNTHValueExpr(in) case *NamedWindow: @@ -3362,8 +3662,12 @@ func CloneCallable(in Callable) Callable { return CloneRefOfPerformanceSchemaFuncExpr(in) case *PointExpr: return CloneRefOfPointExpr(in) + case *PointPropertyFuncExpr: + return CloneRefOfPointPropertyFuncExpr(in) case *PolygonExpr: return CloneRefOfPolygonExpr(in) + case *PolygonPropertyFuncExpr: + return CloneRefOfPolygonPropertyFuncExpr(in) case *RegexpInstrExpr: return CloneRefOfRegexpInstrExpr(in) case *RegexpLikeExpr: @@ -3376,8 +3680,8 @@ func CloneCallable(in Callable) Callable { return CloneRefOfSubstrExpr(in) case *Sum: return CloneRefOfSum(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UpdateXMLExpr: @@ -3496,10 +3800,14 @@ func CloneExpr(in Expr) Expr { switch in := in.(type) { case *AndExpr: return CloneRefOfAndExpr(in) - case Argument: - return in + case *AnyValue: + return CloneRefOfAnyValue(in) + case *Argument: + return CloneRefOfArgument(in) case *ArgumentLessWindowExpr: return CloneRefOfArgumentLessWindowExpr(in) + case *AssignmentExpr: + return CloneRefOfAssignmentExpr(in) case *Avg: return CloneRefOfAvg(in) case *BetweenExpr: @@ -3552,12 +3860,32 @@ func CloneExpr(in Expr) Expr { return CloneRefOfFuncExpr(in) case *GTIDFuncExpr: return CloneRefOfGTIDFuncExpr(in) + case *GeoHashFromLatLongExpr: + return CloneRefOfGeoHashFromLatLongExpr(in) + case *GeoHashFromPointExpr: + return CloneRefOfGeoHashFromPointExpr(in) + case *GeoJSONFromGeomExpr: + return CloneRefOfGeoJSONFromGeomExpr(in) + case *GeomCollPropertyFuncExpr: + return CloneRefOfGeomCollPropertyFuncExpr(in) + case *GeomFormatExpr: + return CloneRefOfGeomFormatExpr(in) + case *GeomFromGeoHashExpr: + return CloneRefOfGeomFromGeoHashExpr(in) + case *GeomFromGeoJSONExpr: + return CloneRefOfGeomFromGeoJSONExpr(in) + case *GeomFromTextExpr: + return CloneRefOfGeomFromTextExpr(in) + case *GeomFromWKBExpr: + return CloneRefOfGeomFromWKBExpr(in) + case *GeomPropertyFuncExpr: + return CloneRefOfGeomPropertyFuncExpr(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case *InsertExpr: return CloneRefOfInsertExpr(in) - case *IntervalExpr: - return CloneRefOfIntervalExpr(in) + case *IntervalDateExpr: + return CloneRefOfIntervalDateExpr(in) case *IntervalFuncExpr: return CloneRefOfIntervalFuncExpr(in) case *IntroducerExpr: @@ -3608,6 +3936,8 @@ func CloneExpr(in Expr) Expr { return CloneRefOfLagLeadExpr(in) case *LineStringExpr: return CloneRefOfLineStringExpr(in) + case *LinestrPropertyFuncExpr: + return CloneRefOfLinestrPropertyFuncExpr(in) case ListArg: return in case *Literal: @@ -3624,6 +3954,12 @@ func CloneExpr(in Expr) Expr { return CloneRefOfMemberOfExpr(in) case *Min: return CloneRefOfMin(in) + case *MultiLinestringExpr: + return CloneRefOfMultiLinestringExpr(in) + case *MultiPointExpr: + return CloneRefOfMultiPointExpr(in) + case *MultiPolygonExpr: + return CloneRefOfMultiPolygonExpr(in) case *NTHValueExpr: return CloneRefOfNTHValueExpr(in) case *NamedWindow: @@ -3642,8 +3978,12 @@ func CloneExpr(in Expr) Expr { return CloneRefOfPerformanceSchemaFuncExpr(in) case *PointExpr: return CloneRefOfPointExpr(in) + case *PointPropertyFuncExpr: + return CloneRefOfPointPropertyFuncExpr(in) case *PolygonExpr: return CloneRefOfPolygonExpr(in) + case *PolygonPropertyFuncExpr: + return CloneRefOfPolygonPropertyFuncExpr(in) case *RegexpInstrExpr: return CloneRefOfRegexpInstrExpr(in) case *RegexpLikeExpr: @@ -3666,8 +4006,8 @@ func CloneExpr(in Expr) Expr { return CloneRefOfSubstrExpr(in) case *Sum: return CloneRefOfSum(in) - case *TimestampFuncExpr: - return CloneRefOfTimestampFuncExpr(in) + case *TimestampDiffExpr: + return CloneRefOfTimestampDiffExpr(in) case *TrimFuncExpr: return CloneRefOfTrimFuncExpr(in) case *UnaryExpr: @@ -3832,6 +4172,8 @@ func CloneStatement(in Statement) Statement { return CloneRefOfFlush(in) case *Insert: return CloneRefOfInsert(in) + case *Kill: + return CloneRefOfKill(in) case *Load: return CloneRefOfLoad(in) case *LockTables: @@ -3842,6 +4184,8 @@ func CloneStatement(in Statement) Statement { return CloneRefOfOtherRead(in) case *PrepareStmt: return CloneRefOfPrepareStmt(in) + case *PurgeBinaryLogs: + return CloneRefOfPurgeBinaryLogs(in) case *Release: return CloneRefOfRelease(in) case *RenameTable: diff --git a/go/vt/sqlparser/ast_copy_on_rewrite.go b/go/vt/sqlparser/ast_copy_on_rewrite.go index 6816f82f42f..fed49abba0b 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite.go @@ -54,10 +54,14 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfAlterVschema(n, parent) case *AndExpr: return c.copyOnRewriteRefOfAndExpr(n, parent) - case Argument: - return c.copyOnRewriteArgument(n, parent) + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) + case *Argument: + return c.copyOnRewriteRefOfArgument(n, parent) case *ArgumentLessWindowExpr: return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent) + case *AssignmentExpr: + return c.copyOnRewriteRefOfAssignmentExpr(n, parent) case *AutoIncSpec: return c.copyOnRewriteRefOfAutoIncSpec(n, parent) case *Avg: @@ -180,6 +184,26 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfFuncExpr(n, parent) case *GTIDFuncExpr: return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent) + case *GeoHashFromLatLongExpr: + return c.copyOnRewriteRefOfGeoHashFromLatLongExpr(n, parent) + case *GeoHashFromPointExpr: + return c.copyOnRewriteRefOfGeoHashFromPointExpr(n, parent) + case *GeoJSONFromGeomExpr: + return c.copyOnRewriteRefOfGeoJSONFromGeomExpr(n, parent) + case *GeomCollPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomCollPropertyFuncExpr(n, parent) + case *GeomFormatExpr: + return c.copyOnRewriteRefOfGeomFormatExpr(n, parent) + case *GeomFromGeoHashExpr: + return c.copyOnRewriteRefOfGeomFromGeoHashExpr(n, parent) + case *GeomFromGeoJSONExpr: + return c.copyOnRewriteRefOfGeomFromGeoJSONExpr(n, parent) + case *GeomFromTextExpr: + return c.copyOnRewriteRefOfGeomFromTextExpr(n, parent) + case *GeomFromWKBExpr: + return c.copyOnRewriteRefOfGeomFromWKBExpr(n, parent) + case *GeomPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomPropertyFuncExpr(n, parent) case GroupBy: return c.copyOnRewriteGroupBy(n, parent) case *GroupConcatExpr: @@ -200,8 +224,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfInsert(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) - case *IntervalExpr: - return c.copyOnRewriteRefOfIntervalExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *IntroducerExpr: @@ -262,12 +286,16 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfJtOnResponse(n, parent) case *KeyState: return c.copyOnRewriteRefOfKeyState(n, parent) + case *Kill: + return c.copyOnRewriteRefOfKill(n, parent) case *LagLeadExpr: return c.copyOnRewriteRefOfLagLeadExpr(n, parent) case *Limit: return c.copyOnRewriteRefOfLimit(n, parent) case *LineStringExpr: return c.copyOnRewriteRefOfLineStringExpr(n, parent) + case *LinestrPropertyFuncExpr: + return c.copyOnRewriteRefOfLinestrPropertyFuncExpr(n, parent) case ListArg: return c.copyOnRewriteListArg(n, parent) case *Literal: @@ -294,6 +322,12 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfMin(n, parent) case *ModifyColumn: return c.copyOnRewriteRefOfModifyColumn(n, parent) + case *MultiLinestringExpr: + return c.copyOnRewriteRefOfMultiLinestringExpr(n, parent) + case *MultiPointExpr: + return c.copyOnRewriteRefOfMultiPointExpr(n, parent) + case *MultiPolygonExpr: + return c.copyOnRewriteRefOfMultiPolygonExpr(n, parent) case *NTHValueExpr: return c.copyOnRewriteRefOfNTHValueExpr(n, parent) case *NamedWindow: @@ -352,10 +386,16 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent) case *PointExpr: return c.copyOnRewriteRefOfPointExpr(n, parent) + case *PointPropertyFuncExpr: + return c.copyOnRewriteRefOfPointPropertyFuncExpr(n, parent) case *PolygonExpr: return c.copyOnRewriteRefOfPolygonExpr(n, parent) + case *PolygonPropertyFuncExpr: + return c.copyOnRewriteRefOfPolygonPropertyFuncExpr(n, parent) case *PrepareStmt: return c.copyOnRewriteRefOfPrepareStmt(n, parent) + case *PurgeBinaryLogs: + return c.copyOnRewriteRefOfPurgeBinaryLogs(n, parent) case ReferenceAction: return c.copyOnRewriteReferenceAction(n, parent) case *ReferenceDefinition: @@ -454,8 +494,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfTableSpec(n, parent) case *TablespaceOperation: return c.copyOnRewriteRefOfTablespaceOperation(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *TruncateTable: @@ -903,6 +943,40 @@ func (c *cow) copyOnRewriteRefOfAndExpr(n *AndExpr, parent SQLNode) (out SQLNode } return } +func (c *cow) copyOnRewriteRefOfAnyValue(n *AnyValue, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) + if changedArg { + res := *n + res.Arg, _ = _Arg.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfArgument(n *Argument, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfArgumentLessWindowExpr(n *ArgumentLessWindowExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -925,6 +999,30 @@ func (c *cow) copyOnRewriteRefOfArgumentLessWindowExpr(n *ArgumentLessWindowExpr } return } +func (c *cow) copyOnRewriteRefOfAssignmentExpr(n *AssignmentExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Left, changedLeft := c.copyOnRewriteExpr(n.Left, n) + _Right, changedRight := c.copyOnRewriteExpr(n.Right, n) + if changedLeft || changedRight { + res := *n + res.Left, _ = _Left.(Expr) + res.Right, _ = _Right.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfAutoIncSpec(n *AutoIncSpec, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -1661,11 +1759,9 @@ func (c *cow) copyOnRewriteRefOfCurTimeFuncExpr(n *CurTimeFuncExpr, parent SQLNo out = n if c.pre == nil || c.pre(n, parent) { _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n) - _Fsp, changedFsp := c.copyOnRewriteExpr(n.Fsp, n) - if changedName || changedFsp { + if changedName { res := *n res.Name, _ = _Name.(IdentifierCI) - res.Fsp, _ = _Fsp.(Expr) out = &res if c.cloned != nil { c.cloned(n, out) @@ -2297,6 +2393,254 @@ func (c *cow) copyOnRewriteRefOfGTIDFuncExpr(n *GTIDFuncExpr, parent SQLNode) (o } return } +func (c *cow) copyOnRewriteRefOfGeoHashFromLatLongExpr(n *GeoHashFromLatLongExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Latitude, changedLatitude := c.copyOnRewriteExpr(n.Latitude, n) + _Longitude, changedLongitude := c.copyOnRewriteExpr(n.Longitude, n) + _MaxLength, changedMaxLength := c.copyOnRewriteExpr(n.MaxLength, n) + if changedLatitude || changedLongitude || changedMaxLength { + res := *n + res.Latitude, _ = _Latitude.(Expr) + res.Longitude, _ = _Longitude.(Expr) + res.MaxLength, _ = _MaxLength.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeoHashFromPointExpr(n *GeoHashFromPointExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Point, changedPoint := c.copyOnRewriteExpr(n.Point, n) + _MaxLength, changedMaxLength := c.copyOnRewriteExpr(n.MaxLength, n) + if changedPoint || changedMaxLength { + res := *n + res.Point, _ = _Point.(Expr) + res.MaxLength, _ = _MaxLength.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeoJSONFromGeomExpr(n *GeoJSONFromGeomExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Geom, changedGeom := c.copyOnRewriteExpr(n.Geom, n) + _MaxDecimalDigits, changedMaxDecimalDigits := c.copyOnRewriteExpr(n.MaxDecimalDigits, n) + _Bitmask, changedBitmask := c.copyOnRewriteExpr(n.Bitmask, n) + if changedGeom || changedMaxDecimalDigits || changedBitmask { + res := *n + res.Geom, _ = _Geom.(Expr) + res.MaxDecimalDigits, _ = _MaxDecimalDigits.(Expr) + res.Bitmask, _ = _Bitmask.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomCollPropertyFuncExpr(n *GeomCollPropertyFuncExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _GeomColl, changedGeomColl := c.copyOnRewriteExpr(n.GeomColl, n) + _PropertyDefArg, changedPropertyDefArg := c.copyOnRewriteExpr(n.PropertyDefArg, n) + if changedGeomColl || changedPropertyDefArg { + res := *n + res.GeomColl, _ = _GeomColl.(Expr) + res.PropertyDefArg, _ = _PropertyDefArg.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomFormatExpr(n *GeomFormatExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Geom, changedGeom := c.copyOnRewriteExpr(n.Geom, n) + _AxisOrderOpt, changedAxisOrderOpt := c.copyOnRewriteExpr(n.AxisOrderOpt, n) + if changedGeom || changedAxisOrderOpt { + res := *n + res.Geom, _ = _Geom.(Expr) + res.AxisOrderOpt, _ = _AxisOrderOpt.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomFromGeoHashExpr(n *GeomFromGeoHashExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _GeoHash, changedGeoHash := c.copyOnRewriteExpr(n.GeoHash, n) + _SridOpt, changedSridOpt := c.copyOnRewriteExpr(n.SridOpt, n) + if changedGeoHash || changedSridOpt { + res := *n + res.GeoHash, _ = _GeoHash.(Expr) + res.SridOpt, _ = _SridOpt.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomFromGeoJSONExpr(n *GeomFromGeoJSONExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _GeoJSON, changedGeoJSON := c.copyOnRewriteExpr(n.GeoJSON, n) + _HigherDimHandlerOpt, changedHigherDimHandlerOpt := c.copyOnRewriteExpr(n.HigherDimHandlerOpt, n) + _Srid, changedSrid := c.copyOnRewriteExpr(n.Srid, n) + if changedGeoJSON || changedHigherDimHandlerOpt || changedSrid { + res := *n + res.GeoJSON, _ = _GeoJSON.(Expr) + res.HigherDimHandlerOpt, _ = _HigherDimHandlerOpt.(Expr) + res.Srid, _ = _Srid.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomFromTextExpr(n *GeomFromTextExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _WktText, changedWktText := c.copyOnRewriteExpr(n.WktText, n) + _Srid, changedSrid := c.copyOnRewriteExpr(n.Srid, n) + _AxisOrderOpt, changedAxisOrderOpt := c.copyOnRewriteExpr(n.AxisOrderOpt, n) + if changedWktText || changedSrid || changedAxisOrderOpt { + res := *n + res.WktText, _ = _WktText.(Expr) + res.Srid, _ = _Srid.(Expr) + res.AxisOrderOpt, _ = _AxisOrderOpt.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomFromWKBExpr(n *GeomFromWKBExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _WkbBlob, changedWkbBlob := c.copyOnRewriteExpr(n.WkbBlob, n) + _Srid, changedSrid := c.copyOnRewriteExpr(n.Srid, n) + _AxisOrderOpt, changedAxisOrderOpt := c.copyOnRewriteExpr(n.AxisOrderOpt, n) + if changedWkbBlob || changedSrid || changedAxisOrderOpt { + res := *n + res.WkbBlob, _ = _WkbBlob.(Expr) + res.Srid, _ = _Srid.(Expr) + res.AxisOrderOpt, _ = _AxisOrderOpt.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfGeomPropertyFuncExpr(n *GeomPropertyFuncExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Geom, changedGeom := c.copyOnRewriteExpr(n.Geom, n) + if changedGeom { + res := *n + res.Geom, _ = _Geom.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteGroupBy(n GroupBy, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -2470,7 +2814,7 @@ func (c *cow) copyOnRewriteRefOfInsert(n *Insert, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n) - _Table, changedTable := c.copyOnRewriteTableName(n.Table, n) + _Table, changedTable := c.copyOnRewriteRefOfAliasedTableExpr(n.Table, n) _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n) _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n) _Rows, changedRows := c.copyOnRewriteInsertRows(n.Rows, n) @@ -2478,7 +2822,7 @@ func (c *cow) copyOnRewriteRefOfInsert(n *Insert, parent SQLNode) (out SQLNode, if changedComments || changedTable || changedPartitions || changedColumns || changedRows || changedOnDup { res := *n res.Comments, _ = _Comments.(*ParsedComments) - res.Table, _ = _Table.(TableName) + res.Table, _ = _Table.(*AliasedTableExpr) res.Partitions, _ = _Partitions.(Partitions) res.Columns, _ = _Columns.(Columns) res.Rows, _ = _Rows.(InsertRows) @@ -2523,16 +2867,18 @@ func (c *cow) copyOnRewriteRefOfInsertExpr(n *InsertExpr, parent SQLNode) (out S } return } -func (c *cow) copyOnRewriteRefOfIntervalExpr(n *IntervalExpr, parent SQLNode) (out SQLNode, changed bool) { +func (c *cow) copyOnRewriteRefOfIntervalDateExpr(n *IntervalDateExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false } out = n if c.pre == nil || c.pre(n, parent) { - _Expr, changedExpr := c.copyOnRewriteExpr(n.Expr, n) - if changedExpr { + _Date, changedDate := c.copyOnRewriteExpr(n.Date, n) + _Interval, changedInterval := c.copyOnRewriteExpr(n.Interval, n) + if changedDate || changedInterval { res := *n - res.Expr, _ = _Expr.(Expr) + res.Date, _ = _Date.(Expr) + res.Interval, _ = _Interval.(Expr) out = &res if c.cloned != nil { c.cloned(n, out) @@ -3299,6 +3645,18 @@ func (c *cow) copyOnRewriteRefOfKeyState(n *KeyState, parent SQLNode) (out SQLNo } return } +func (c *cow) copyOnRewriteRefOfKill(n *Kill, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfLagLeadExpr(n *LagLeadExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -3375,6 +3733,30 @@ func (c *cow) copyOnRewriteRefOfLineStringExpr(n *LineStringExpr, parent SQLNode } return } +func (c *cow) copyOnRewriteRefOfLinestrPropertyFuncExpr(n *LinestrPropertyFuncExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Linestring, changedLinestring := c.copyOnRewriteExpr(n.Linestring, n) + _PropertyDefArg, changedPropertyDefArg := c.copyOnRewriteExpr(n.PropertyDefArg, n) + if changedLinestring || changedPropertyDefArg { + res := *n + res.Linestring, _ = _Linestring.(Expr) + res.PropertyDefArg, _ = _PropertyDefArg.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfLiteral(n *Literal, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -3597,6 +3979,72 @@ func (c *cow) copyOnRewriteRefOfModifyColumn(n *ModifyColumn, parent SQLNode) (o } return } +func (c *cow) copyOnRewriteRefOfMultiLinestringExpr(n *MultiLinestringExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _LinestringParams, changedLinestringParams := c.copyOnRewriteExprs(n.LinestringParams, n) + if changedLinestringParams { + res := *n + res.LinestringParams, _ = _LinestringParams.(Exprs) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfMultiPointExpr(n *MultiPointExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _PointParams, changedPointParams := c.copyOnRewriteExprs(n.PointParams, n) + if changedPointParams { + res := *n + res.PointParams, _ = _PointParams.(Exprs) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} +func (c *cow) copyOnRewriteRefOfMultiPolygonExpr(n *MultiPolygonExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _PolygonParams, changedPolygonParams := c.copyOnRewriteExprs(n.PolygonParams, n) + if changedPolygonParams { + res := *n + res.PolygonParams, _ = _PolygonParams.(Exprs) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfNTHValueExpr(n *NTHValueExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -3770,6 +4218,16 @@ func (c *cow) copyOnRewriteRefOfOffset(n *Offset, parent SQLNode) (out SQLNode, } out = n if c.pre == nil || c.pre(n, parent) { + _Original, changedOriginal := c.copyOnRewriteExpr(n.Original, n) + if changedOriginal { + res := *n + res.Original, _ = _Original.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } } if c.post != nil { out, changed = c.postVisit(out, parent, changed) @@ -4225,6 +4683,30 @@ func (c *cow) copyOnRewriteRefOfPointExpr(n *PointExpr, parent SQLNode) (out SQL } return } +func (c *cow) copyOnRewriteRefOfPointPropertyFuncExpr(n *PointPropertyFuncExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Point, changedPoint := c.copyOnRewriteExpr(n.Point, n) + _ValueToSet, changedValueToSet := c.copyOnRewriteExpr(n.ValueToSet, n) + if changedPoint || changedValueToSet { + res := *n + res.Point, _ = _Point.(Expr) + res.ValueToSet, _ = _ValueToSet.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfPolygonExpr(n *PolygonExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -4247,6 +4729,30 @@ func (c *cow) copyOnRewriteRefOfPolygonExpr(n *PolygonExpr, parent SQLNode) (out } return } +func (c *cow) copyOnRewriteRefOfPolygonPropertyFuncExpr(n *PolygonPropertyFuncExpr, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _Polygon, changedPolygon := c.copyOnRewriteExpr(n.Polygon, n) + _PropertyDefArg, changedPropertyDefArg := c.copyOnRewriteExpr(n.PropertyDefArg, n) + if changedPolygon || changedPropertyDefArg { + res := *n + res.Polygon, _ = _Polygon.(Expr) + res.PropertyDefArg, _ = _PropertyDefArg.(Expr) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfPrepareStmt(n *PrepareStmt, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -4273,6 +4779,18 @@ func (c *cow) copyOnRewriteRefOfPrepareStmt(n *PrepareStmt, parent SQLNode) (out } return } +func (c *cow) copyOnRewriteRefOfPurgeBinaryLogs(n *PurgeBinaryLogs, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfReferenceDefinition(n *ReferenceDefinition, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -5381,7 +5899,7 @@ func (c *cow) copyOnRewriteRefOfTablespaceOperation(n *TablespaceOperation, pare } return } -func (c *cow) copyOnRewriteRefOfTimestampFuncExpr(n *TimestampFuncExpr, parent SQLNode) (out SQLNode, changed bool) { +func (c *cow) copyOnRewriteRefOfTimestampDiffExpr(n *TimestampDiffExpr, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false } @@ -6125,6 +6643,8 @@ func (c *cow) copyOnRewriteAggrFunc(n AggrFunc, parent SQLNode) (out SQLNode, ch return n, false } switch n := n.(type) { + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *Avg: return c.copyOnRewriteRefOfAvg(n, parent) case *BitAnd: @@ -6223,6 +6743,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return n, false } switch n := n.(type) { + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) case *ArgumentLessWindowExpr: return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent) case *Avg: @@ -6249,10 +6771,32 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfFuncExpr(n, parent) case *GTIDFuncExpr: return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent) + case *GeoHashFromLatLongExpr: + return c.copyOnRewriteRefOfGeoHashFromLatLongExpr(n, parent) + case *GeoHashFromPointExpr: + return c.copyOnRewriteRefOfGeoHashFromPointExpr(n, parent) + case *GeoJSONFromGeomExpr: + return c.copyOnRewriteRefOfGeoJSONFromGeomExpr(n, parent) + case *GeomCollPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomCollPropertyFuncExpr(n, parent) + case *GeomFormatExpr: + return c.copyOnRewriteRefOfGeomFormatExpr(n, parent) + case *GeomFromGeoHashExpr: + return c.copyOnRewriteRefOfGeomFromGeoHashExpr(n, parent) + case *GeomFromGeoJSONExpr: + return c.copyOnRewriteRefOfGeomFromGeoJSONExpr(n, parent) + case *GeomFromTextExpr: + return c.copyOnRewriteRefOfGeomFromTextExpr(n, parent) + case *GeomFromWKBExpr: + return c.copyOnRewriteRefOfGeomFromWKBExpr(n, parent) + case *GeomPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomPropertyFuncExpr(n, parent) case *GroupConcatExpr: return c.copyOnRewriteRefOfGroupConcatExpr(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *JSONArrayExpr: @@ -6299,6 +6843,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfLagLeadExpr(n, parent) case *LineStringExpr: return c.copyOnRewriteRefOfLineStringExpr(n, parent) + case *LinestrPropertyFuncExpr: + return c.copyOnRewriteRefOfLinestrPropertyFuncExpr(n, parent) case *LocateExpr: return c.copyOnRewriteRefOfLocateExpr(n, parent) case *MatchExpr: @@ -6309,6 +6855,12 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfMemberOfExpr(n, parent) case *Min: return c.copyOnRewriteRefOfMin(n, parent) + case *MultiLinestringExpr: + return c.copyOnRewriteRefOfMultiLinestringExpr(n, parent) + case *MultiPointExpr: + return c.copyOnRewriteRefOfMultiPointExpr(n, parent) + case *MultiPolygonExpr: + return c.copyOnRewriteRefOfMultiPolygonExpr(n, parent) case *NTHValueExpr: return c.copyOnRewriteRefOfNTHValueExpr(n, parent) case *NamedWindow: @@ -6319,8 +6871,12 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent) case *PointExpr: return c.copyOnRewriteRefOfPointExpr(n, parent) + case *PointPropertyFuncExpr: + return c.copyOnRewriteRefOfPointPropertyFuncExpr(n, parent) case *PolygonExpr: return c.copyOnRewriteRefOfPolygonExpr(n, parent) + case *PolygonPropertyFuncExpr: + return c.copyOnRewriteRefOfPolygonPropertyFuncExpr(n, parent) case *RegexpInstrExpr: return c.copyOnRewriteRefOfRegexpInstrExpr(n, parent) case *RegexpLikeExpr: @@ -6333,8 +6889,8 @@ func (c *cow) copyOnRewriteCallable(n Callable, parent SQLNode) (out SQLNode, ch return c.copyOnRewriteRefOfSubstrExpr(n, parent) case *Sum: return c.copyOnRewriteRefOfSum(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *UpdateXMLExpr: @@ -6441,10 +6997,14 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo switch n := n.(type) { case *AndExpr: return c.copyOnRewriteRefOfAndExpr(n, parent) - case Argument: - return c.copyOnRewriteArgument(n, parent) + case *AnyValue: + return c.copyOnRewriteRefOfAnyValue(n, parent) + case *Argument: + return c.copyOnRewriteRefOfArgument(n, parent) case *ArgumentLessWindowExpr: return c.copyOnRewriteRefOfArgumentLessWindowExpr(n, parent) + case *AssignmentExpr: + return c.copyOnRewriteRefOfAssignmentExpr(n, parent) case *Avg: return c.copyOnRewriteRefOfAvg(n, parent) case *BetweenExpr: @@ -6497,12 +7057,32 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfFuncExpr(n, parent) case *GTIDFuncExpr: return c.copyOnRewriteRefOfGTIDFuncExpr(n, parent) + case *GeoHashFromLatLongExpr: + return c.copyOnRewriteRefOfGeoHashFromLatLongExpr(n, parent) + case *GeoHashFromPointExpr: + return c.copyOnRewriteRefOfGeoHashFromPointExpr(n, parent) + case *GeoJSONFromGeomExpr: + return c.copyOnRewriteRefOfGeoJSONFromGeomExpr(n, parent) + case *GeomCollPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomCollPropertyFuncExpr(n, parent) + case *GeomFormatExpr: + return c.copyOnRewriteRefOfGeomFormatExpr(n, parent) + case *GeomFromGeoHashExpr: + return c.copyOnRewriteRefOfGeomFromGeoHashExpr(n, parent) + case *GeomFromGeoJSONExpr: + return c.copyOnRewriteRefOfGeomFromGeoJSONExpr(n, parent) + case *GeomFromTextExpr: + return c.copyOnRewriteRefOfGeomFromTextExpr(n, parent) + case *GeomFromWKBExpr: + return c.copyOnRewriteRefOfGeomFromWKBExpr(n, parent) + case *GeomPropertyFuncExpr: + return c.copyOnRewriteRefOfGeomPropertyFuncExpr(n, parent) case *GroupConcatExpr: return c.copyOnRewriteRefOfGroupConcatExpr(n, parent) case *InsertExpr: return c.copyOnRewriteRefOfInsertExpr(n, parent) - case *IntervalExpr: - return c.copyOnRewriteRefOfIntervalExpr(n, parent) + case *IntervalDateExpr: + return c.copyOnRewriteRefOfIntervalDateExpr(n, parent) case *IntervalFuncExpr: return c.copyOnRewriteRefOfIntervalFuncExpr(n, parent) case *IntroducerExpr: @@ -6553,6 +7133,8 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfLagLeadExpr(n, parent) case *LineStringExpr: return c.copyOnRewriteRefOfLineStringExpr(n, parent) + case *LinestrPropertyFuncExpr: + return c.copyOnRewriteRefOfLinestrPropertyFuncExpr(n, parent) case ListArg: return c.copyOnRewriteListArg(n, parent) case *Literal: @@ -6569,6 +7151,12 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfMemberOfExpr(n, parent) case *Min: return c.copyOnRewriteRefOfMin(n, parent) + case *MultiLinestringExpr: + return c.copyOnRewriteRefOfMultiLinestringExpr(n, parent) + case *MultiPointExpr: + return c.copyOnRewriteRefOfMultiPointExpr(n, parent) + case *MultiPolygonExpr: + return c.copyOnRewriteRefOfMultiPolygonExpr(n, parent) case *NTHValueExpr: return c.copyOnRewriteRefOfNTHValueExpr(n, parent) case *NamedWindow: @@ -6587,8 +7175,12 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfPerformanceSchemaFuncExpr(n, parent) case *PointExpr: return c.copyOnRewriteRefOfPointExpr(n, parent) + case *PointPropertyFuncExpr: + return c.copyOnRewriteRefOfPointPropertyFuncExpr(n, parent) case *PolygonExpr: return c.copyOnRewriteRefOfPolygonExpr(n, parent) + case *PolygonPropertyFuncExpr: + return c.copyOnRewriteRefOfPolygonPropertyFuncExpr(n, parent) case *RegexpInstrExpr: return c.copyOnRewriteRefOfRegexpInstrExpr(n, parent) case *RegexpLikeExpr: @@ -6611,8 +7203,8 @@ func (c *cow) copyOnRewriteExpr(n Expr, parent SQLNode) (out SQLNode, changed bo return c.copyOnRewriteRefOfSubstrExpr(n, parent) case *Sum: return c.copyOnRewriteRefOfSum(n, parent) - case *TimestampFuncExpr: - return c.copyOnRewriteRefOfTimestampFuncExpr(n, parent) + case *TimestampDiffExpr: + return c.copyOnRewriteRefOfTimestampDiffExpr(n, parent) case *TrimFuncExpr: return c.copyOnRewriteRefOfTrimFuncExpr(n, parent) case *UnaryExpr: @@ -6765,6 +7357,8 @@ func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, return c.copyOnRewriteRefOfFlush(n, parent) case *Insert: return c.copyOnRewriteRefOfInsert(n, parent) + case *Kill: + return c.copyOnRewriteRefOfKill(n, parent) case *Load: return c.copyOnRewriteRefOfLoad(n, parent) case *LockTables: @@ -6775,6 +7369,8 @@ func (c *cow) copyOnRewriteStatement(n Statement, parent SQLNode) (out SQLNode, return c.copyOnRewriteRefOfOtherRead(n, parent) case *PrepareStmt: return c.copyOnRewriteRefOfPrepareStmt(n, parent) + case *PurgeBinaryLogs: + return c.copyOnRewriteRefOfPurgeBinaryLogs(n, parent) case *Release: return c.copyOnRewriteRefOfRelease(n, parent) case *RenameTable: @@ -6852,20 +7448,6 @@ func (c *cow) copyOnRewriteAlgorithmValue(n AlgorithmValue, parent SQLNode) (out } return } -func (c *cow) copyOnRewriteArgument(n Argument, parent SQLNode) (out SQLNode, changed bool) { - if c.cursor.stop { - return n, false - } - if c.pre != nil { - c.pre(n, parent) - } - if c.post != nil { - out, changed = c.postVisit(n, parent, changed) - } else { - out = n - } - return -} func (c *cow) copyOnRewriteBoolVal(n BoolVal, parent SQLNode) (out SQLNode, changed bool) { if c.cursor.stop { return n, false diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go index 5595a4eb524..1b6ba48cb80 100644 --- a/go/vt/sqlparser/ast_equals.go +++ b/go/vt/sqlparser/ast_equals.go @@ -122,18 +122,30 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfAndExpr(a, b) - case Argument: - b, ok := inB.(Argument) + case *AnyValue: + b, ok := inB.(*AnyValue) if !ok { return false } - return a == b + return cmp.RefOfAnyValue(a, b) + case *Argument: + b, ok := inB.(*Argument) + if !ok { + return false + } + return cmp.RefOfArgument(a, b) case *ArgumentLessWindowExpr: b, ok := inB.(*ArgumentLessWindowExpr) if !ok { return false } return cmp.RefOfArgumentLessWindowExpr(a, b) + case *AssignmentExpr: + b, ok := inB.(*AssignmentExpr) + if !ok { + return false + } + return cmp.RefOfAssignmentExpr(a, b) case *AutoIncSpec: b, ok := inB.(*AutoIncSpec) if !ok { @@ -500,6 +512,66 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfGTIDFuncExpr(a, b) + case *GeoHashFromLatLongExpr: + b, ok := inB.(*GeoHashFromLatLongExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromLatLongExpr(a, b) + case *GeoHashFromPointExpr: + b, ok := inB.(*GeoHashFromPointExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromPointExpr(a, b) + case *GeoJSONFromGeomExpr: + b, ok := inB.(*GeoJSONFromGeomExpr) + if !ok { + return false + } + return cmp.RefOfGeoJSONFromGeomExpr(a, b) + case *GeomCollPropertyFuncExpr: + b, ok := inB.(*GeomCollPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomCollPropertyFuncExpr(a, b) + case *GeomFormatExpr: + b, ok := inB.(*GeomFormatExpr) + if !ok { + return false + } + return cmp.RefOfGeomFormatExpr(a, b) + case *GeomFromGeoHashExpr: + b, ok := inB.(*GeomFromGeoHashExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoHashExpr(a, b) + case *GeomFromGeoJSONExpr: + b, ok := inB.(*GeomFromGeoJSONExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoJSONExpr(a, b) + case *GeomFromTextExpr: + b, ok := inB.(*GeomFromTextExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromTextExpr(a, b) + case *GeomFromWKBExpr: + b, ok := inB.(*GeomFromWKBExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromWKBExpr(a, b) + case *GeomPropertyFuncExpr: + b, ok := inB.(*GeomPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomPropertyFuncExpr(a, b) case GroupBy: b, ok := inB.(GroupBy) if !ok { @@ -560,12 +632,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfInsertExpr(a, b) - case *IntervalExpr: - b, ok := inB.(*IntervalExpr) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) if !ok { return false } - return cmp.RefOfIntervalExpr(a, b) + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -746,6 +818,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfKeyState(a, b) + case *Kill: + b, ok := inB.(*Kill) + if !ok { + return false + } + return cmp.RefOfKill(a, b) case *LagLeadExpr: b, ok := inB.(*LagLeadExpr) if !ok { @@ -764,6 +842,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfLineStringExpr(a, b) + case *LinestrPropertyFuncExpr: + b, ok := inB.(*LinestrPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfLinestrPropertyFuncExpr(a, b) case ListArg: b, ok := inB.(ListArg) if !ok { @@ -842,6 +926,24 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfModifyColumn(a, b) + case *MultiLinestringExpr: + b, ok := inB.(*MultiLinestringExpr) + if !ok { + return false + } + return cmp.RefOfMultiLinestringExpr(a, b) + case *MultiPointExpr: + b, ok := inB.(*MultiPointExpr) + if !ok { + return false + } + return cmp.RefOfMultiPointExpr(a, b) + case *MultiPolygonExpr: + b, ok := inB.(*MultiPolygonExpr) + if !ok { + return false + } + return cmp.RefOfMultiPolygonExpr(a, b) case *NTHValueExpr: b, ok := inB.(*NTHValueExpr) if !ok { @@ -1016,18 +1118,36 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfPointExpr(a, b) + case *PointPropertyFuncExpr: + b, ok := inB.(*PointPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPointPropertyFuncExpr(a, b) case *PolygonExpr: b, ok := inB.(*PolygonExpr) if !ok { return false } return cmp.RefOfPolygonExpr(a, b) + case *PolygonPropertyFuncExpr: + b, ok := inB.(*PolygonPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPolygonPropertyFuncExpr(a, b) case *PrepareStmt: b, ok := inB.(*PrepareStmt) if !ok { return false } return cmp.RefOfPrepareStmt(a, b) + case *PurgeBinaryLogs: + b, ok := inB.(*PurgeBinaryLogs) + if !ok { + return false + } + return cmp.RefOfPurgeBinaryLogs(a, b) case ReferenceAction: b, ok := inB.(ReferenceAction) if !ok { @@ -1322,12 +1442,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfTablespaceOperation(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -1609,6 +1729,7 @@ func (cmp *Comparator) RefOfAlterColumn(a, b *AlterColumn) bool { return false } return a.DropDefault == b.DropDefault && + a.DefaultLiteral == b.DefaultLiteral && cmp.RefOfColName(a.Column, b.Column) && cmp.Expr(a.DefaultVal, b.DefaultVal) && cmp.RefOfBool(a.Invisible, b.Invisible) @@ -1716,6 +1837,29 @@ func (cmp *Comparator) RefOfAndExpr(a, b *AndExpr) bool { cmp.Expr(a.Right, b.Right) } +// RefOfAnyValue does deep equals between the two objects. +func (cmp *Comparator) RefOfAnyValue(a, b *AnyValue) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Arg, b.Arg) +} + +// RefOfArgument does deep equals between the two objects. +func (cmp *Comparator) RefOfArgument(a, b *Argument) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Name == b.Name && + a.Type == b.Type +} + // RefOfArgumentLessWindowExpr does deep equals between the two objects. func (cmp *Comparator) RefOfArgumentLessWindowExpr(a, b *ArgumentLessWindowExpr) bool { if a == b { @@ -1728,6 +1872,18 @@ func (cmp *Comparator) RefOfArgumentLessWindowExpr(a, b *ArgumentLessWindowExpr) cmp.RefOfOverClause(a.OverClause, b.OverClause) } +// RefOfAssignmentExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfAssignmentExpr(a, b *AssignmentExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Left, b.Left) && + cmp.Expr(a.Right, b.Right) +} + // RefOfAutoIncSpec does deep equals between the two objects. func (cmp *Comparator) RefOfAutoIncSpec(a, b *AutoIncSpec) bool { if a == b { @@ -2150,8 +2306,8 @@ func (cmp *Comparator) RefOfCurTimeFuncExpr(a, b *CurTimeFuncExpr) bool { if a == nil || b == nil { return false } - return cmp.IdentifierCI(a.Name, b.Name) && - cmp.Expr(a.Fsp, b.Fsp) + return a.Fsp == b.Fsp && + cmp.IdentifierCI(a.Name, b.Name) } // RefOfDeallocateStmt does deep equals between the two objects. @@ -2162,8 +2318,7 @@ func (cmp *Comparator) RefOfDeallocateStmt(a, b *DeallocateStmt) bool { if a == nil || b == nil { return false } - return a.Type == b.Type && - cmp.RefOfParsedComments(a.Comments, b.Comments) && + return cmp.RefOfParsedComments(a.Comments, b.Comments) && cmp.IdentifierCI(a.Name, b.Name) } @@ -2354,7 +2509,7 @@ func (cmp *Comparator) RefOfExtractFuncExpr(a, b *ExtractFuncExpr) bool { if a == nil || b == nil { return false } - return a.IntervalTypes == b.IntervalTypes && + return a.IntervalType == b.IntervalType && cmp.Expr(a.Expr, b.Expr) } @@ -2463,6 +2618,7 @@ func (cmp *Comparator) RefOfFramePoint(a, b *FramePoint) bool { return false } return a.Type == b.Type && + a.Unit == b.Unit && cmp.Expr(a.Expr, b.Expr) } @@ -2505,6 +2661,136 @@ func (cmp *Comparator) RefOfGTIDFuncExpr(a, b *GTIDFuncExpr) bool { cmp.Expr(a.Channel, b.Channel) } +// RefOfGeoHashFromLatLongExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeoHashFromLatLongExpr(a, b *GeoHashFromLatLongExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Latitude, b.Latitude) && + cmp.Expr(a.Longitude, b.Longitude) && + cmp.Expr(a.MaxLength, b.MaxLength) +} + +// RefOfGeoHashFromPointExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeoHashFromPointExpr(a, b *GeoHashFromPointExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Point, b.Point) && + cmp.Expr(a.MaxLength, b.MaxLength) +} + +// RefOfGeoJSONFromGeomExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeoJSONFromGeomExpr(a, b *GeoJSONFromGeomExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.Geom, b.Geom) && + cmp.Expr(a.MaxDecimalDigits, b.MaxDecimalDigits) && + cmp.Expr(a.Bitmask, b.Bitmask) +} + +// RefOfGeomCollPropertyFuncExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomCollPropertyFuncExpr(a, b *GeomCollPropertyFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Property == b.Property && + cmp.Expr(a.GeomColl, b.GeomColl) && + cmp.Expr(a.PropertyDefArg, b.PropertyDefArg) +} + +// RefOfGeomFormatExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomFormatExpr(a, b *GeomFormatExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.FormatType == b.FormatType && + cmp.Expr(a.Geom, b.Geom) && + cmp.Expr(a.AxisOrderOpt, b.AxisOrderOpt) +} + +// RefOfGeomFromGeoHashExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomFromGeoHashExpr(a, b *GeomFromGeoHashExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.GeomType == b.GeomType && + cmp.Expr(a.GeoHash, b.GeoHash) && + cmp.Expr(a.SridOpt, b.SridOpt) +} + +// RefOfGeomFromGeoJSONExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomFromGeoJSONExpr(a, b *GeomFromGeoJSONExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Expr(a.GeoJSON, b.GeoJSON) && + cmp.Expr(a.HigherDimHandlerOpt, b.HigherDimHandlerOpt) && + cmp.Expr(a.Srid, b.Srid) +} + +// RefOfGeomFromTextExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomFromTextExpr(a, b *GeomFromTextExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + cmp.Expr(a.WktText, b.WktText) && + cmp.Expr(a.Srid, b.Srid) && + cmp.Expr(a.AxisOrderOpt, b.AxisOrderOpt) +} + +// RefOfGeomFromWKBExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomFromWKBExpr(a, b *GeomFromWKBExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Type == b.Type && + cmp.Expr(a.WkbBlob, b.WkbBlob) && + cmp.Expr(a.Srid, b.Srid) && + cmp.Expr(a.AxisOrderOpt, b.AxisOrderOpt) +} + +// RefOfGeomPropertyFuncExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfGeomPropertyFuncExpr(a, b *GeomPropertyFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Property == b.Property && + cmp.Expr(a.Geom, b.Geom) +} + // GroupBy does deep equals between the two objects. func (cmp *Comparator) GroupBy(a, b GroupBy) bool { if len(a) != len(b) { @@ -2611,7 +2897,7 @@ func (cmp *Comparator) RefOfInsert(a, b *Insert) bool { return a.Action == b.Action && cmp.RefOfParsedComments(a.Comments, b.Comments) && a.Ignore == b.Ignore && - cmp.TableName(a.Table, b.Table) && + cmp.RefOfAliasedTableExpr(a.Table, b.Table) && cmp.Partitions(a.Partitions, b.Partitions) && cmp.Columns(a.Columns, b.Columns) && cmp.InsertRows(a.Rows, b.Rows) && @@ -2632,16 +2918,18 @@ func (cmp *Comparator) RefOfInsertExpr(a, b *InsertExpr) bool { cmp.Expr(a.NewStr, b.NewStr) } -// RefOfIntervalExpr does deep equals between the two objects. -func (cmp *Comparator) RefOfIntervalExpr(a, b *IntervalExpr) bool { +// RefOfIntervalDateExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfIntervalDateExpr(a, b *IntervalDateExpr) bool { if a == b { return true } if a == nil || b == nil { return false } - return a.Unit == b.Unit && - cmp.Expr(a.Expr, b.Expr) + return a.Syntax == b.Syntax && + cmp.Expr(a.Date, b.Date) && + cmp.Expr(a.Interval, b.Interval) && + a.Unit == b.Unit } // RefOfIntervalFuncExpr does deep equals between the two objects. @@ -3012,6 +3300,18 @@ func (cmp *Comparator) RefOfKeyState(a, b *KeyState) bool { return a.Enable == b.Enable } +// RefOfKill does deep equals between the two objects. +func (cmp *Comparator) RefOfKill(a, b *Kill) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.ProcesslistID == b.ProcesslistID && + a.Type == b.Type +} + // RefOfLagLeadExpr does deep equals between the two objects. func (cmp *Comparator) RefOfLagLeadExpr(a, b *LagLeadExpr) bool { if a == b { @@ -3051,6 +3351,19 @@ func (cmp *Comparator) RefOfLineStringExpr(a, b *LineStringExpr) bool { return cmp.Exprs(a.PointParams, b.PointParams) } +// RefOfLinestrPropertyFuncExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfLinestrPropertyFuncExpr(a, b *LinestrPropertyFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Property == b.Property && + cmp.Expr(a.Linestring, b.Linestring) && + cmp.Expr(a.PropertyDefArg, b.PropertyDefArg) +} + // RefOfLiteral does deep equals between the two objects. func (cmp *Comparator) RefOfLiteral(a, b *Literal) bool { if a == b { @@ -3184,6 +3497,39 @@ func (cmp *Comparator) RefOfModifyColumn(a, b *ModifyColumn) bool { cmp.RefOfColName(a.After, b.After) } +// RefOfMultiLinestringExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfMultiLinestringExpr(a, b *MultiLinestringExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Exprs(a.LinestringParams, b.LinestringParams) +} + +// RefOfMultiPointExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfMultiPointExpr(a, b *MultiPointExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Exprs(a.PointParams, b.PointParams) +} + +// RefOfMultiPolygonExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfMultiPolygonExpr(a, b *MultiPolygonExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.Exprs(a.PolygonParams, b.PolygonParams) +} + // RefOfNTHValueExpr does deep equals between the two objects. func (cmp *Comparator) RefOfNTHValueExpr(a, b *NTHValueExpr) bool { if a == b { @@ -3288,7 +3634,7 @@ func (cmp *Comparator) RefOfOffset(a, b *Offset) bool { return false } return a.V == b.V && - a.Original == b.Original + cmp.Expr(a.Original, b.Original) } // OnDup does deep equals between the two objects. @@ -3547,6 +3893,19 @@ func (cmp *Comparator) RefOfPointExpr(a, b *PointExpr) bool { cmp.Expr(a.YCordinate, b.YCordinate) } +// RefOfPointPropertyFuncExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfPointPropertyFuncExpr(a, b *PointPropertyFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Property == b.Property && + cmp.Expr(a.Point, b.Point) && + cmp.Expr(a.ValueToSet, b.ValueToSet) +} + // RefOfPolygonExpr does deep equals between the two objects. func (cmp *Comparator) RefOfPolygonExpr(a, b *PolygonExpr) bool { if a == b { @@ -3558,6 +3917,19 @@ func (cmp *Comparator) RefOfPolygonExpr(a, b *PolygonExpr) bool { return cmp.Exprs(a.LinestringParams, b.LinestringParams) } +// RefOfPolygonPropertyFuncExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfPolygonPropertyFuncExpr(a, b *PolygonPropertyFuncExpr) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.Property == b.Property && + cmp.Expr(a.Polygon, b.Polygon) && + cmp.Expr(a.PropertyDefArg, b.PropertyDefArg) +} + // RefOfPrepareStmt does deep equals between the two objects. func (cmp *Comparator) RefOfPrepareStmt(a, b *PrepareStmt) bool { if a == b { @@ -3571,6 +3943,18 @@ func (cmp *Comparator) RefOfPrepareStmt(a, b *PrepareStmt) bool { cmp.RefOfParsedComments(a.Comments, b.Comments) } +// RefOfPurgeBinaryLogs does deep equals between the two objects. +func (cmp *Comparator) RefOfPurgeBinaryLogs(a, b *PurgeBinaryLogs) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.To == b.To && + a.Before == b.Before +} + // RefOfReferenceDefinition does deep equals between the two objects. func (cmp *Comparator) RefOfReferenceDefinition(a, b *ReferenceDefinition) bool { if a == b { @@ -4174,18 +4558,17 @@ func (cmp *Comparator) RefOfTablespaceOperation(a, b *TablespaceOperation) bool return a.Import == b.Import } -// RefOfTimestampFuncExpr does deep equals between the two objects. -func (cmp *Comparator) RefOfTimestampFuncExpr(a, b *TimestampFuncExpr) bool { +// RefOfTimestampDiffExpr does deep equals between the two objects. +func (cmp *Comparator) RefOfTimestampDiffExpr(a, b *TimestampDiffExpr) bool { if a == b { return true } if a == nil || b == nil { return false } - return a.Name == b.Name && - a.Unit == b.Unit && - cmp.Expr(a.Expr1, b.Expr1) && - cmp.Expr(a.Expr2, b.Expr2) + return cmp.Expr(a.Expr1, b.Expr1) && + cmp.Expr(a.Expr2, b.Expr2) && + a.Unit == b.Unit } // RefOfTrimFuncExpr does deep equals between the two objects. @@ -4569,6 +4952,12 @@ func (cmp *Comparator) AggrFunc(inA, inB AggrFunc) bool { return false } switch a := inA.(type) { + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *Avg: b, ok := inB.(*Avg) if !ok { @@ -4833,6 +5222,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } switch a := inA.(type) { + case *AnyValue: + b, ok := inB.(*AnyValue) + if !ok { + return false + } + return cmp.RefOfAnyValue(a, b) case *ArgumentLessWindowExpr: b, ok := inB.(*ArgumentLessWindowExpr) if !ok { @@ -4911,6 +5306,66 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfGTIDFuncExpr(a, b) + case *GeoHashFromLatLongExpr: + b, ok := inB.(*GeoHashFromLatLongExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromLatLongExpr(a, b) + case *GeoHashFromPointExpr: + b, ok := inB.(*GeoHashFromPointExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromPointExpr(a, b) + case *GeoJSONFromGeomExpr: + b, ok := inB.(*GeoJSONFromGeomExpr) + if !ok { + return false + } + return cmp.RefOfGeoJSONFromGeomExpr(a, b) + case *GeomCollPropertyFuncExpr: + b, ok := inB.(*GeomCollPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomCollPropertyFuncExpr(a, b) + case *GeomFormatExpr: + b, ok := inB.(*GeomFormatExpr) + if !ok { + return false + } + return cmp.RefOfGeomFormatExpr(a, b) + case *GeomFromGeoHashExpr: + b, ok := inB.(*GeomFromGeoHashExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoHashExpr(a, b) + case *GeomFromGeoJSONExpr: + b, ok := inB.(*GeomFromGeoJSONExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoJSONExpr(a, b) + case *GeomFromTextExpr: + b, ok := inB.(*GeomFromTextExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromTextExpr(a, b) + case *GeomFromWKBExpr: + b, ok := inB.(*GeomFromWKBExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromWKBExpr(a, b) + case *GeomPropertyFuncExpr: + b, ok := inB.(*GeomPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomPropertyFuncExpr(a, b) case *GroupConcatExpr: b, ok := inB.(*GroupConcatExpr) if !ok { @@ -4923,6 +5378,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfInsertExpr(a, b) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) + if !ok { + return false + } + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -5061,6 +5522,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfLineStringExpr(a, b) + case *LinestrPropertyFuncExpr: + b, ok := inB.(*LinestrPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfLinestrPropertyFuncExpr(a, b) case *LocateExpr: b, ok := inB.(*LocateExpr) if !ok { @@ -5091,6 +5558,24 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfMin(a, b) + case *MultiLinestringExpr: + b, ok := inB.(*MultiLinestringExpr) + if !ok { + return false + } + return cmp.RefOfMultiLinestringExpr(a, b) + case *MultiPointExpr: + b, ok := inB.(*MultiPointExpr) + if !ok { + return false + } + return cmp.RefOfMultiPointExpr(a, b) + case *MultiPolygonExpr: + b, ok := inB.(*MultiPolygonExpr) + if !ok { + return false + } + return cmp.RefOfMultiPolygonExpr(a, b) case *NTHValueExpr: b, ok := inB.(*NTHValueExpr) if !ok { @@ -5121,12 +5606,24 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfPointExpr(a, b) + case *PointPropertyFuncExpr: + b, ok := inB.(*PointPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPointPropertyFuncExpr(a, b) case *PolygonExpr: b, ok := inB.(*PolygonExpr) if !ok { return false } return cmp.RefOfPolygonExpr(a, b) + case *PolygonPropertyFuncExpr: + b, ok := inB.(*PolygonPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPolygonPropertyFuncExpr(a, b) case *RegexpInstrExpr: b, ok := inB.(*RegexpInstrExpr) if !ok { @@ -5163,12 +5660,12 @@ func (cmp *Comparator) Callable(inA, inB Callable) bool { return false } return cmp.RefOfSum(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -5397,18 +5894,30 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfAndExpr(a, b) - case Argument: - b, ok := inB.(Argument) + case *AnyValue: + b, ok := inB.(*AnyValue) if !ok { return false } - return a == b + return cmp.RefOfAnyValue(a, b) + case *Argument: + b, ok := inB.(*Argument) + if !ok { + return false + } + return cmp.RefOfArgument(a, b) case *ArgumentLessWindowExpr: b, ok := inB.(*ArgumentLessWindowExpr) if !ok { return false } return cmp.RefOfArgumentLessWindowExpr(a, b) + case *AssignmentExpr: + b, ok := inB.(*AssignmentExpr) + if !ok { + return false + } + return cmp.RefOfAssignmentExpr(a, b) case *Avg: b, ok := inB.(*Avg) if !ok { @@ -5565,6 +6074,66 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfGTIDFuncExpr(a, b) + case *GeoHashFromLatLongExpr: + b, ok := inB.(*GeoHashFromLatLongExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromLatLongExpr(a, b) + case *GeoHashFromPointExpr: + b, ok := inB.(*GeoHashFromPointExpr) + if !ok { + return false + } + return cmp.RefOfGeoHashFromPointExpr(a, b) + case *GeoJSONFromGeomExpr: + b, ok := inB.(*GeoJSONFromGeomExpr) + if !ok { + return false + } + return cmp.RefOfGeoJSONFromGeomExpr(a, b) + case *GeomCollPropertyFuncExpr: + b, ok := inB.(*GeomCollPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomCollPropertyFuncExpr(a, b) + case *GeomFormatExpr: + b, ok := inB.(*GeomFormatExpr) + if !ok { + return false + } + return cmp.RefOfGeomFormatExpr(a, b) + case *GeomFromGeoHashExpr: + b, ok := inB.(*GeomFromGeoHashExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoHashExpr(a, b) + case *GeomFromGeoJSONExpr: + b, ok := inB.(*GeomFromGeoJSONExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromGeoJSONExpr(a, b) + case *GeomFromTextExpr: + b, ok := inB.(*GeomFromTextExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromTextExpr(a, b) + case *GeomFromWKBExpr: + b, ok := inB.(*GeomFromWKBExpr) + if !ok { + return false + } + return cmp.RefOfGeomFromWKBExpr(a, b) + case *GeomPropertyFuncExpr: + b, ok := inB.(*GeomPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfGeomPropertyFuncExpr(a, b) case *GroupConcatExpr: b, ok := inB.(*GroupConcatExpr) if !ok { @@ -5577,12 +6146,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfInsertExpr(a, b) - case *IntervalExpr: - b, ok := inB.(*IntervalExpr) + case *IntervalDateExpr: + b, ok := inB.(*IntervalDateExpr) if !ok { return false } - return cmp.RefOfIntervalExpr(a, b) + return cmp.RefOfIntervalDateExpr(a, b) case *IntervalFuncExpr: b, ok := inB.(*IntervalFuncExpr) if !ok { @@ -5733,6 +6302,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfLineStringExpr(a, b) + case *LinestrPropertyFuncExpr: + b, ok := inB.(*LinestrPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfLinestrPropertyFuncExpr(a, b) case ListArg: b, ok := inB.(ListArg) if !ok { @@ -5781,6 +6356,24 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfMin(a, b) + case *MultiLinestringExpr: + b, ok := inB.(*MultiLinestringExpr) + if !ok { + return false + } + return cmp.RefOfMultiLinestringExpr(a, b) + case *MultiPointExpr: + b, ok := inB.(*MultiPointExpr) + if !ok { + return false + } + return cmp.RefOfMultiPointExpr(a, b) + case *MultiPolygonExpr: + b, ok := inB.(*MultiPolygonExpr) + if !ok { + return false + } + return cmp.RefOfMultiPolygonExpr(a, b) case *NTHValueExpr: b, ok := inB.(*NTHValueExpr) if !ok { @@ -5835,12 +6428,24 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfPointExpr(a, b) + case *PointPropertyFuncExpr: + b, ok := inB.(*PointPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPointPropertyFuncExpr(a, b) case *PolygonExpr: b, ok := inB.(*PolygonExpr) if !ok { return false } return cmp.RefOfPolygonExpr(a, b) + case *PolygonPropertyFuncExpr: + b, ok := inB.(*PolygonPropertyFuncExpr) + if !ok { + return false + } + return cmp.RefOfPolygonPropertyFuncExpr(a, b) case *RegexpInstrExpr: b, ok := inB.(*RegexpInstrExpr) if !ok { @@ -5907,12 +6512,12 @@ func (cmp *Comparator) Expr(inA, inB Expr) bool { return false } return cmp.RefOfSum(a, b) - case *TimestampFuncExpr: - b, ok := inB.(*TimestampFuncExpr) + case *TimestampDiffExpr: + b, ok := inB.(*TimestampDiffExpr) if !ok { return false } - return cmp.RefOfTimestampFuncExpr(a, b) + return cmp.RefOfTimestampDiffExpr(a, b) case *TrimFuncExpr: b, ok := inB.(*TrimFuncExpr) if !ok { @@ -6279,6 +6884,12 @@ func (cmp *Comparator) Statement(inA, inB Statement) bool { return false } return cmp.RefOfInsert(a, b) + case *Kill: + b, ok := inB.(*Kill) + if !ok { + return false + } + return cmp.RefOfKill(a, b) case *Load: b, ok := inB.(*Load) if !ok { @@ -6309,6 +6920,12 @@ func (cmp *Comparator) Statement(inA, inB Statement) bool { return false } return cmp.RefOfPrepareStmt(a, b) + case *PurgeBinaryLogs: + b, ok := inB.(*PurgeBinaryLogs) + if !ok { + return false + } + return cmp.RefOfPurgeBinaryLogs(a, b) case *Release: b, ok := inB.(*Release) if !ok { @@ -6572,6 +7189,7 @@ func (cmp *Comparator) RefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool { return false } return a.Autoincrement == b.Autoincrement && + a.DefaultLiteral == b.DefaultLiteral && a.Collate == b.Collate && cmp.RefOfBool(a.Null, b.Null) && cmp.Expr(a.Default, b.Default) && diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go index f6d631c4303..67941cf0345 100644 --- a/go/vt/sqlparser/ast_format.go +++ b/go/vt/sqlparser/ast_format.go @@ -116,17 +116,17 @@ func (node *Insert) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", InsertStr, node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) case ReplaceAct: buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", ReplaceStr, node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) default: buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", "Unkown Insert Action", node.Comments, node.Ignore.ToString(), - node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) } } @@ -717,10 +717,10 @@ func (ct *ColumnType) Format(buf *TrackedBuffer) { } if ct.Options.Default != nil { buf.astPrintf(ct, " %s", keywordStrings[DEFAULT]) - if defaultRequiresParens(ct) { - buf.astPrintf(ct, " (%v)", ct.Options.Default) - } else { + if ct.Options.DefaultLiteral { buf.astPrintf(ct, " %v", ct.Options.Default) + } else { + buf.astPrintf(ct, " (%v)", ct.Options.Default) } } if ct.Options.OnUpdate != nil { @@ -1047,7 +1047,7 @@ func (node *ExecuteStmt) Format(buf *TrackedBuffer) { // Format formats the node. func (node *DeallocateStmt) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s %vprepare %v", node.Type.ToString(), node.Comments, node.Name) + buf.astPrintf(node, "deallocate %vprepare %v", node.Comments, node.Name) } // Format formats the node. @@ -1288,6 +1288,11 @@ func (node *ExistsExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "exists %v", node.Subquery) } +// Format formats the node. +func (node *AssignmentExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%l := %r", node.Left, node.Right) +} + // Format formats the node. func (node *Literal) Format(buf *TrackedBuffer) { switch node.Type { @@ -1311,8 +1316,14 @@ func (node *Literal) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node Argument) Format(buf *TrackedBuffer) { - buf.WriteArg(":", string(node)) +func (node *Argument) Format(buf *TrackedBuffer) { + buf.WriteArg(":", node.Name) + if node.Type >= 0 { + // For bind variables that are statically typed, emit their type as an adjacent comment. + // This comment will be ignored by older versions of Vitess (and by MySQL) but will provide + // type safety when using the query as a cache key. + buf.astPrintf(node, " /* %s */", node.Type.String()) + } } // Format formats the node. @@ -1381,18 +1392,13 @@ func (node *IntroducerExpr) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node *IntervalExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "interval %v %#s", node.Expr, node.Unit) -} - -// Format formats the node. -func (node *TimestampFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%#s(%#s, %v, %v)", node.Name, node.Unit, node.Expr1, node.Expr2) +func (node *TimestampDiffExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "timestampdiff(%#s, %v, %v)", node.Unit.ToString(), node.Expr1, node.Expr2) } // Format formats the node. func (node *ExtractFuncExpr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "extract(%#s from %v)", node.IntervalTypes.ToString(), node.Expr) + buf.astPrintf(node, "extract(%#s from %v)", node.IntervalType.ToString(), node.Expr) } // Format formats the node @@ -1452,18 +1458,45 @@ func (node *RegexpSubstrExpr) Format(buf *TrackedBuffer) { buf.WriteByte(')') } +// Format formats the node +func (node *IntervalDateExpr) Format(buf *TrackedBuffer) { + switch node.Syntax { + case IntervalDateExprAdddate, IntervalDateExprSubdate: + if node.Unit == IntervalNone { + buf.astPrintf(node, "%s(%v, %v)", node.FnName(), node.Date, node.Interval) + return + } + fallthrough + case IntervalDateExprDateAdd, IntervalDateExprDateSub: + buf.astPrintf(node, "%s(%v, interval %v %#s)", node.FnName(), node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprBinaryAdd: + buf.astPrintf(node, "%l + interval %r %#s", node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprBinaryAddLeft: + buf.astPrintf(node, "interval %l %#s + %r", node.Interval, node.Unit.ToString(), node.Date) + case IntervalDateExprBinarySub: + buf.astPrintf(node, "%l - interval %r %#s", node.Date, node.Interval, node.Unit.ToString()) + case IntervalDateExprTimestampadd: + buf.astPrintf(node, "timestampadd(%#s, %v, %v)", node.Unit.ToString(), node.Interval, node.Date) + } +} + // Format formats the node. func (node *TrimFuncExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%s(", node.TrimFuncType.ToString()) - if node.Type.ToString() != "" { - buf.astPrintf(node, "%s ", node.Type.ToString()) - } - if node.TrimArg != nil { - buf.astPrintf(node, "%v ", node.TrimArg) - } + if node.TrimFuncType == NormalTrimType { + var from bool + if node.Type != NoTrimType { + buf.astPrintf(node, "%s ", node.Type.ToString()) + from = true + } + if node.TrimArg != nil { + buf.astPrintf(node, "%v ", node.TrimArg) + from = true + } - if (node.Type.ToString() != "") || (node.TrimArg != nil) { - buf.literal("from ") + if from { + buf.literal("from ") + } } buf.astPrintf(node, "%v", node.StringArg) buf.WriteByte(')') @@ -1480,8 +1513,8 @@ func (node *WeightStringFuncExpr) Format(buf *TrackedBuffer) { // Format formats the node. func (node *CurTimeFuncExpr) Format(buf *TrackedBuffer) { - if node.Fsp != nil { - buf.astPrintf(node, "%#s(%v)", node.Name.String(), node.Fsp) + if node.Fsp > 0 { + buf.astPrintf(node, "%#s(%d)", node.Name.String(), node.Fsp) } else { buf.astPrintf(node, "%#s()", node.Name.String()) } @@ -1595,7 +1628,11 @@ func (node *FromFirstLastClause) Format(buf *TrackedBuffer) { // Format formats the node func (node *FramePoint) Format(buf *TrackedBuffer) { if node.Expr != nil { - buf.astPrintf(node, " %v", node.Expr) + if node.Unit != IntervalNone { + buf.astPrintf(node, " interval %v %#s", node.Expr, node.Unit.ToString()) + } else { + buf.astPrintf(node, " %v", node.Expr) + } } buf.astPrintf(node, " %s", node.Type.ToString()) } @@ -2213,7 +2250,11 @@ func (node *AlterColumn) Format(buf *TrackedBuffer) { if node.DropDefault { buf.astPrintf(node, " drop default") } else if node.DefaultVal != nil { - buf.astPrintf(node, " set default %v", node.DefaultVal) + if node.DefaultLiteral { + buf.astPrintf(node, " set default %v", node.DefaultVal) + } else { + buf.astPrintf(node, " set default (%v)", node.DefaultVal) + } } if node.Invisible != nil { if *node.Invisible { @@ -2638,12 +2679,15 @@ func (node *Count) Format(buf *TrackedBuffer) { } func (node *CountStar) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.WriteString("*)") + buf.WriteString("count(*)") +} + +func (node *AnyValue) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "any_value(%v)", node.Arg) } func (node *Avg) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("avg(") if node.Distinct { buf.literal(DistinctStr) } @@ -2651,7 +2695,7 @@ func (node *Avg) Format(buf *TrackedBuffer) { } func (node *Max) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("max(") if node.Distinct { buf.literal(DistinctStr) } @@ -2659,7 +2703,7 @@ func (node *Max) Format(buf *TrackedBuffer) { } func (node *Min) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("min(") if node.Distinct { buf.literal(DistinctStr) } @@ -2667,7 +2711,7 @@ func (node *Min) Format(buf *TrackedBuffer) { } func (node *Sum) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) + buf.WriteString("sum(") if node.Distinct { buf.literal(DistinctStr) } @@ -2675,53 +2719,43 @@ func (node *Sum) Format(buf *TrackedBuffer) { } func (node *BitAnd) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_and(%v)", node.Arg) } func (node *BitOr) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_or(%v)", node.Arg) } func (node *BitXor) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "bit_xor(%v)", node.Arg) } func (node *Std) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "std(%v)", node.Arg) } func (node *StdDev) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev(%v)", node.Arg) } func (node *StdPop) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev_pop(%v)", node.Arg) } func (node *StdSamp) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "stddev_samp(%v)", node.Arg) } func (node *VarPop) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "var_pop(%v)", node.Arg) } func (node *VarSamp) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "var_samp(%v)", node.Arg) } func (node *Variance) Format(buf *TrackedBuffer) { - buf.astPrintf(node, "%s(", node.AggrName()) - buf.astPrintf(node, "%v)", node.Arg) + buf.astPrintf(node, "variance(%v)", node.Arg) } // Format formats the node. @@ -2771,3 +2805,149 @@ func (node *LineStringExpr) Format(buf *TrackedBuffer) { func (node *PolygonExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "polygon(%v)", node.LinestringParams) } + +// Format formats the node. +func (node *PurgeBinaryLogs) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "purge binary logs") + if node.To != "" { + buf.astPrintf(node, " to '%#s'", node.To) + } else { + buf.astPrintf(node, " before '%#s'", node.Before) + } +} + +func (node *MultiPolygonExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "multipolygon(%v)", node.PolygonParams) +} + +// Format formats the node. +func (node *MultiPointExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "multipoint(%v)", node.PointParams) +} + +// Format formats the node. +func (node *MultiLinestringExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "multilinestring(%v)", node.LinestringParams) +} + +// Format formats the node +func (node *GeomFromTextExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Type.ToString(), node.WktText) + if node.Srid != nil { + buf.astPrintf(node, ", %v", node.Srid) + } + if node.AxisOrderOpt != nil { + buf.astPrintf(node, ", %v", node.AxisOrderOpt) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomFromWKBExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Type.ToString(), node.WkbBlob) + if node.Srid != nil { + buf.astPrintf(node, ", %v", node.Srid) + } + if node.AxisOrderOpt != nil { + buf.astPrintf(node, ", %v", node.AxisOrderOpt) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomFormatExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.FormatType.ToString(), node.Geom) + if node.AxisOrderOpt != nil { + buf.astPrintf(node, ", %v", node.AxisOrderOpt) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomPropertyFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v)", node.Property.ToString(), node.Geom) +} + +// Format formats the node +func (node *PointPropertyFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Property.ToString(), node.Point) + if node.ValueToSet != nil { + buf.astPrintf(node, ", %v", node.ValueToSet) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *LinestrPropertyFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Property.ToString(), node.Linestring) + if node.PropertyDefArg != nil { + buf.astPrintf(node, ", %v", node.PropertyDefArg) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *PolygonPropertyFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Property.ToString(), node.Polygon) + if node.PropertyDefArg != nil { + buf.astPrintf(node, ", %v", node.PropertyDefArg) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomCollPropertyFuncExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.Property.ToString(), node.GeomColl) + if node.PropertyDefArg != nil { + buf.astPrintf(node, ", %v", node.PropertyDefArg) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomFromGeoHashExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "%s(%v", node.GeomType.ToString(), node.GeoHash) + if node.SridOpt != nil { + buf.astPrintf(node, ", %v", node.SridOpt) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeoHashFromLatLongExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "st_geohash(%v, %v, %v)", node.Longitude, node.Latitude, node.MaxLength) +} + +// Format formats the node +func (node *GeoHashFromPointExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "st_geohash(%v, %v)", node.Point, node.MaxLength) +} + +// Format formats the node +func (node *GeoJSONFromGeomExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "st_asgeojson(%v", node.Geom) + if node.MaxDecimalDigits != nil { + buf.astPrintf(node, ", %v", node.MaxDecimalDigits) + } + if node.Bitmask != nil { + buf.astPrintf(node, ", %v", node.Bitmask) + } + buf.WriteByte(')') +} + +// Format formats the node +func (node *GeomFromGeoJSONExpr) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "st_geomfromgeojson(%v", node.GeoJSON) + if node.HigherDimHandlerOpt != nil { + buf.astPrintf(node, ", %v", node.HigherDimHandlerOpt) + } + if node.Srid != nil { + buf.astPrintf(node, ", %v", node.Srid) + } + buf.WriteByte(')') +} + +// Format formats the kill statement +func (node *Kill) Format(buf *TrackedBuffer) { + buf.astPrintf(node, "kill %s %d", node.Type.ToString(), node.ProcesslistID) +} diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go index ac8c70554dc..c424fe6e3d7 100644 --- a/go/vt/sqlparser/ast_format_fast.go +++ b/go/vt/sqlparser/ast_format_fast.go @@ -146,7 +146,7 @@ func (node *Insert) formatFast(buf *TrackedBuffer) { buf.WriteString(node.Ignore.ToString()) buf.WriteString("into ") - node.Table.formatFast(buf) + node.Table.Expr.formatFast(buf) node.Partitions.formatFast(buf) @@ -165,7 +165,7 @@ func (node *Insert) formatFast(buf *TrackedBuffer) { buf.WriteString(node.Ignore.ToString()) buf.WriteString("into ") - node.Table.formatFast(buf) + node.Table.Expr.formatFast(buf) node.Partitions.formatFast(buf) @@ -184,7 +184,7 @@ func (node *Insert) formatFast(buf *TrackedBuffer) { buf.WriteString(node.Ignore.ToString()) buf.WriteString("into ") - node.Table.formatFast(buf) + node.Table.Expr.formatFast(buf) node.Partitions.formatFast(buf) @@ -951,13 +951,13 @@ func (ct *ColumnType) formatFast(buf *TrackedBuffer) { if ct.Options.Default != nil { buf.WriteByte(' ') buf.WriteString(keywordStrings[DEFAULT]) - if defaultRequiresParens(ct) { - buf.WriteString(" (") + if ct.Options.DefaultLiteral { + buf.WriteByte(' ') ct.Options.Default.formatFast(buf) - buf.WriteByte(')') } else { - buf.WriteByte(' ') + buf.WriteString(" (") ct.Options.Default.formatFast(buf) + buf.WriteByte(')') } } if ct.Options.OnUpdate != nil { @@ -1384,8 +1384,7 @@ func (node *ExecuteStmt) formatFast(buf *TrackedBuffer) { // formatFast formats the node. func (node *DeallocateStmt) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.Type.ToString()) - buf.WriteByte(' ') + buf.WriteString("deallocate ") node.Comments.formatFast(buf) buf.WriteString("prepare ") node.Name.formatFast(buf) @@ -1687,6 +1686,13 @@ func (node *ExistsExpr) formatFast(buf *TrackedBuffer) { buf.printExpr(node, node.Subquery, true) } +// formatFast formats the node. +func (node *AssignmentExpr) formatFast(buf *TrackedBuffer) { + buf.printExpr(node, node.Left, true) + buf.WriteString(" := ") + buf.printExpr(node, node.Right, false) +} + // formatFast formats the node. func (node *Literal) formatFast(buf *TrackedBuffer) { switch node.Type { @@ -1720,8 +1726,16 @@ func (node *Literal) formatFast(buf *TrackedBuffer) { } // formatFast formats the node. -func (node Argument) formatFast(buf *TrackedBuffer) { - buf.WriteArg(":", string(node)) +func (node *Argument) formatFast(buf *TrackedBuffer) { + buf.WriteArg(":", node.Name) + if node.Type >= 0 { + // For bind variables that are statically typed, emit their type as an adjacent comment. + // This comment will be ignored by older versions of Vitess (and by MySQL) but will provide + // type safety when using the query as a cache key. + buf.WriteString(" /* ") + buf.WriteString(node.Type.String()) + buf.WriteString(" */") + } } // formatFast formats the node. @@ -1806,18 +1820,9 @@ func (node *IntroducerExpr) formatFast(buf *TrackedBuffer) { } // formatFast formats the node. -func (node *IntervalExpr) formatFast(buf *TrackedBuffer) { - buf.WriteString("interval ") - buf.printExpr(node, node.Expr, true) - buf.WriteByte(' ') - buf.WriteString(node.Unit) -} - -// formatFast formats the node. -func (node *TimestampFuncExpr) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.Name) - buf.WriteByte('(') - buf.WriteString(node.Unit) +func (node *TimestampDiffExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("timestampdiff(") + buf.WriteString(node.Unit.ToString()) buf.WriteString(", ") buf.printExpr(node, node.Expr1, true) buf.WriteString(", ") @@ -1828,7 +1833,7 @@ func (node *TimestampFuncExpr) formatFast(buf *TrackedBuffer) { // formatFast formats the node. func (node *ExtractFuncExpr) formatFast(buf *TrackedBuffer) { buf.WriteString("extract(") - buf.WriteString(node.IntervalTypes.ToString()) + buf.WriteString(node.IntervalType.ToString()) buf.WriteString(" from ") buf.printExpr(node, node.Expr, true) buf.WriteByte(')') @@ -1916,21 +1921,79 @@ func (node *RegexpSubstrExpr) formatFast(buf *TrackedBuffer) { buf.WriteByte(')') } +// formatFast formats the node +func (node *IntervalDateExpr) formatFast(buf *TrackedBuffer) { + switch node.Syntax { + case IntervalDateExprAdddate, IntervalDateExprSubdate: + if node.Unit == IntervalNone { + buf.WriteString(node.FnName()) + buf.WriteByte('(') + buf.printExpr(node, node.Date, true) + buf.WriteString(", ") + buf.printExpr(node, node.Interval, true) + buf.WriteByte(')') + return + } + fallthrough + case IntervalDateExprDateAdd, IntervalDateExprDateSub: + buf.WriteString(node.FnName()) + buf.WriteByte('(') + buf.printExpr(node, node.Date, true) + buf.WriteString(", interval ") + buf.printExpr(node, node.Interval, true) + buf.WriteByte(' ') + buf.WriteString(node.Unit.ToString()) + buf.WriteByte(')') + case IntervalDateExprBinaryAdd: + buf.printExpr(node, node.Date, true) + buf.WriteString(" + interval ") + buf.printExpr(node, node.Interval, false) + buf.WriteByte(' ') + buf.WriteString(node.Unit.ToString()) + case IntervalDateExprBinaryAddLeft: + buf.WriteString("interval ") + buf.printExpr(node, node.Interval, true) + buf.WriteByte(' ') + buf.WriteString(node.Unit.ToString()) + buf.WriteString(" + ") + buf.printExpr(node, node.Date, false) + case IntervalDateExprBinarySub: + buf.printExpr(node, node.Date, true) + buf.WriteString(" - interval ") + buf.printExpr(node, node.Interval, false) + buf.WriteByte(' ') + buf.WriteString(node.Unit.ToString()) + case IntervalDateExprTimestampadd: + buf.WriteString("timestampadd(") + buf.WriteString(node.Unit.ToString()) + buf.WriteString(", ") + buf.printExpr(node, node.Interval, true) + buf.WriteString(", ") + buf.printExpr(node, node.Date, true) + buf.WriteByte(')') + } +} + // formatFast formats the node. func (node *TrimFuncExpr) formatFast(buf *TrackedBuffer) { buf.WriteString(node.TrimFuncType.ToString()) buf.WriteByte('(') - if node.Type.ToString() != "" { - buf.WriteString(node.Type.ToString()) - buf.WriteByte(' ') - } - if node.TrimArg != nil { - buf.printExpr(node, node.TrimArg, true) - buf.WriteByte(' ') - } + if node.TrimFuncType == NormalTrimType { + var from bool + if node.Type != NoTrimType { + buf.WriteString(node.Type.ToString()) + buf.WriteByte(' ') + from = true + } + if node.TrimArg != nil { + buf.printExpr(node, node.TrimArg, true) + buf.WriteByte(' ') + from = true + } - if (node.Type.ToString() != "") || (node.TrimArg != nil) { - buf.WriteString("from ") + if from { + buf.WriteString("from ") + } } buf.printExpr(node, node.StringArg, true) buf.WriteByte(')') @@ -1953,10 +2016,10 @@ func (node *WeightStringFuncExpr) formatFast(buf *TrackedBuffer) { // formatFast formats the node. func (node *CurTimeFuncExpr) formatFast(buf *TrackedBuffer) { - if node.Fsp != nil { + if node.Fsp > 0 { buf.WriteString(node.Name.String()) buf.WriteByte('(') - buf.printExpr(node, node.Fsp, true) + buf.WriteString(fmt.Sprintf("%d", node.Fsp)) buf.WriteByte(')') } else { buf.WriteString(node.Name.String()) @@ -2105,8 +2168,15 @@ func (node *FromFirstLastClause) formatFast(buf *TrackedBuffer) { // formatFast formats the node func (node *FramePoint) formatFast(buf *TrackedBuffer) { if node.Expr != nil { - buf.WriteByte(' ') - node.Expr.formatFast(buf) + if node.Unit != IntervalNone { + buf.WriteString(" interval ") + node.Expr.formatFast(buf) + buf.WriteByte(' ') + buf.WriteString(node.Unit.ToString()) + } else { + buf.WriteByte(' ') + node.Expr.formatFast(buf) + } } buf.WriteByte(' ') buf.WriteString(node.Type.ToString()) @@ -2916,8 +2986,14 @@ func (node *AlterColumn) formatFast(buf *TrackedBuffer) { if node.DropDefault { buf.WriteString(" drop default") } else if node.DefaultVal != nil { - buf.WriteString(" set default ") - node.DefaultVal.formatFast(buf) + if node.DefaultLiteral { + buf.WriteString(" set default ") + node.DefaultVal.formatFast(buf) + } else { + buf.WriteString(" set default (") + node.DefaultVal.formatFast(buf) + buf.WriteByte(')') + } } if node.Invisible != nil { if *node.Invisible { @@ -3463,14 +3539,17 @@ func (node *Count) formatFast(buf *TrackedBuffer) { } func (node *CountStar) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') - buf.WriteString("*)") + buf.WriteString("count(*)") +} + +func (node *AnyValue) formatFast(buf *TrackedBuffer) { + buf.WriteString("any_value(") + buf.printExpr(node, node.Arg, true) + buf.WriteByte(')') } func (node *Avg) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("avg(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3479,8 +3558,7 @@ func (node *Avg) formatFast(buf *TrackedBuffer) { } func (node *Max) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("max(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3489,8 +3567,7 @@ func (node *Max) formatFast(buf *TrackedBuffer) { } func (node *Min) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("min(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3499,8 +3576,7 @@ func (node *Min) formatFast(buf *TrackedBuffer) { } func (node *Sum) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("sum(") if node.Distinct { buf.WriteString(DistinctStr) } @@ -3509,71 +3585,61 @@ func (node *Sum) formatFast(buf *TrackedBuffer) { } func (node *BitAnd) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_and(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *BitOr) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_or(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *BitXor) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("bit_xor(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *Std) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("std(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdDev) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdPop) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *StdSamp) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("stddev_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *VarPop) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("var_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *VarSamp) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("var_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } func (node *Variance) formatFast(buf *TrackedBuffer) { - buf.WriteString(node.AggrName()) - buf.WriteByte('(') + buf.WriteString("variance(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') } @@ -3638,3 +3704,207 @@ func (node *PolygonExpr) formatFast(buf *TrackedBuffer) { node.LinestringParams.formatFast(buf) buf.WriteByte(')') } + +// formatFast formats the node. +func (node *PurgeBinaryLogs) formatFast(buf *TrackedBuffer) { + buf.WriteString("purge binary logs") + if node.To != "" { + buf.WriteString(" to '") + buf.WriteString(node.To) + buf.WriteByte('\'') + } else { + buf.WriteString(" before '") + buf.WriteString(node.Before) + buf.WriteByte('\'') + } +} + +func (node *MultiPolygonExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("multipolygon(") + node.PolygonParams.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *MultiPointExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("multipoint(") + node.PointParams.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node. +func (node *MultiLinestringExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("multilinestring(") + node.LinestringParams.formatFast(buf) + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomFromTextExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Type.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.WktText, true) + if node.Srid != nil { + buf.WriteString(", ") + buf.printExpr(node, node.Srid, true) + } + if node.AxisOrderOpt != nil { + buf.WriteString(", ") + buf.printExpr(node, node.AxisOrderOpt, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomFromWKBExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Type.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.WkbBlob, true) + if node.Srid != nil { + buf.WriteString(", ") + buf.printExpr(node, node.Srid, true) + } + if node.AxisOrderOpt != nil { + buf.WriteString(", ") + buf.printExpr(node, node.AxisOrderOpt, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomFormatExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.FormatType.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.Geom, true) + if node.AxisOrderOpt != nil { + buf.WriteString(", ") + buf.printExpr(node, node.AxisOrderOpt, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomPropertyFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Property.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.Geom, true) + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *PointPropertyFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Property.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.Point, true) + if node.ValueToSet != nil { + buf.WriteString(", ") + buf.printExpr(node, node.ValueToSet, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *LinestrPropertyFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Property.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.Linestring, true) + if node.PropertyDefArg != nil { + buf.WriteString(", ") + buf.printExpr(node, node.PropertyDefArg, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *PolygonPropertyFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Property.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.Polygon, true) + if node.PropertyDefArg != nil { + buf.WriteString(", ") + buf.printExpr(node, node.PropertyDefArg, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomCollPropertyFuncExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.Property.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.GeomColl, true) + if node.PropertyDefArg != nil { + buf.WriteString(", ") + buf.printExpr(node, node.PropertyDefArg, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomFromGeoHashExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString(node.GeomType.ToString()) + buf.WriteByte('(') + buf.printExpr(node, node.GeoHash, true) + if node.SridOpt != nil { + buf.WriteString(", ") + buf.printExpr(node, node.SridOpt, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeoHashFromLatLongExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("st_geohash(") + buf.printExpr(node, node.Longitude, true) + buf.WriteString(", ") + buf.printExpr(node, node.Latitude, true) + buf.WriteString(", ") + buf.printExpr(node, node.MaxLength, true) + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeoHashFromPointExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("st_geohash(") + buf.printExpr(node, node.Point, true) + buf.WriteString(", ") + buf.printExpr(node, node.MaxLength, true) + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeoJSONFromGeomExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("st_asgeojson(") + buf.printExpr(node, node.Geom, true) + if node.MaxDecimalDigits != nil { + buf.WriteString(", ") + buf.printExpr(node, node.MaxDecimalDigits, true) + } + if node.Bitmask != nil { + buf.WriteString(", ") + buf.printExpr(node, node.Bitmask, true) + } + buf.WriteByte(')') +} + +// formatFast formats the node +func (node *GeomFromGeoJSONExpr) formatFast(buf *TrackedBuffer) { + buf.WriteString("st_geomfromgeojson(") + buf.printExpr(node, node.GeoJSON, true) + if node.HigherDimHandlerOpt != nil { + buf.WriteString(", ") + buf.printExpr(node, node.HigherDimHandlerOpt, true) + } + if node.Srid != nil { + buf.WriteString(", ") + buf.printExpr(node, node.Srid, true) + } + buf.WriteByte(')') +} + +// formatFast formats the kill statement +func (node *Kill) formatFast(buf *TrackedBuffer) { + buf.WriteString("kill ") + buf.WriteString(node.Type.ToString()) + buf.WriteByte(' ') + buf.WriteString(fmt.Sprintf("%d", node.ProcesslistID)) +} diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index eaef3d992b0..20088bee795 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -17,11 +17,10 @@ limitations under the License. package sqlparser import ( - "bytes" "encoding/hex" "encoding/json" "fmt" - "regexp" + "io" "strconv" "strings" @@ -131,7 +130,7 @@ const ( type MatchAction int const ( - // DefaultAction indicates no action was explicitly specified. + // DefaultMatch indicates no action was explicitly specified. DefaultMatch MatchAction = iota Full Partial @@ -415,16 +414,6 @@ func (node TableName) IsEmpty() bool { return node.Name.IsEmpty() } -// ToViewName returns a TableName acceptable for use as a VIEW. VIEW names are -// always lowercase, so ToViewName lowercasese the name. Databases are case-sensitive -// so Qualifier is left untouched. -func (node TableName) ToViewName() TableName { - return TableName{ - Qualifier: node.Qualifier, - Name: NewIdentifierCS(strings.ToLower(node.Name.v)), - } -} - // NewWhere creates a WHERE or HAVING clause out // of a Expr. If the expression is nil, it returns nil. func NewWhere(typ WhereType, expr Expr) *Where { @@ -543,8 +532,17 @@ func NewTimestampLiteral(in string) *Literal { } // NewArgument builds a new ValArg. -func NewArgument(in string) Argument { - return Argument(in) +func NewArgument(in string) *Argument { + return &Argument{Name: in, Type: sqltypes.Unknown} +} + +func parseBindVariable(yylex yyLexer, bvar string) *Argument { + markBindVariable(yylex, bvar) + return NewArgument(bvar) +} + +func NewTypedArgument(in string, t sqltypes.Type) *Argument { + return &Argument{Name: in, Type: t} } // NewListArg builds a new ListArg. @@ -567,36 +565,31 @@ func (node *Literal) HexDecode() ([]byte, error) { return hex.DecodeString(node.Val) } -// encodeHexOrBitValToMySQLQueryFormat encodes the hexval or bitval back into the query format -// for passing on to MySQL as a bind var -func (node *Literal) encodeHexOrBitValToMySQLQueryFormat() ([]byte, error) { - nb := node.Bytes() - if node.Type != HexVal && node.Type != BitVal { - return nb, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Literal value is not a HexVal") - } - - prefix := 'x' - regex := "^x'.*'$" - if node.Type == BitVal { - prefix = 'b' - regex = "^b'.*'$" - } - // Let's make this idempotent in case it's called more than once - match, err := regexp.Match(regex, nb) - if err != nil { - return nb, err - } - if match { - return nb, nil +func (node *Literal) SQLType() sqltypes.Type { + switch node.Type { + case StrVal: + return sqltypes.VarChar + case IntVal: + return sqltypes.Int64 + case FloatVal: + return sqltypes.Float64 + case DecimalVal: + return sqltypes.Decimal + case HexNum: + return sqltypes.HexNum + case HexVal: + return sqltypes.HexVal + case BitVal: + return sqltypes.HexNum + case DateVal: + return sqltypes.Date + case TimeVal: + return sqltypes.Time + case TimestampVal: + return sqltypes.Datetime + default: + return -1 } - - var bb bytes.Buffer - bb.WriteByte(byte(prefix)) - bb.WriteByte('\'') - bb.WriteString(string(nb)) - bb.WriteByte('\'') - nb = bb.Bytes() - return nb, nil } // Equal returns true if the column names match. @@ -608,31 +601,6 @@ func (node *ColName) Equal(c *ColName) bool { return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier } -// Aggregates is a map of all aggregate functions. -var Aggregates = map[string]bool{ - "avg": true, - "bit_and": true, - "bit_or": true, - "bit_xor": true, - "count": true, - "group_concat": true, - "max": true, - "min": true, - "std": true, - "stddev_pop": true, - "stddev_samp": true, - "stddev": true, - "sum": true, - "var_pop": true, - "var_samp": true, - "variance": true, -} - -// IsAggregate returns true if the function is an aggregate. -func (node *FuncExpr) IsAggregate() bool { - return Aggregates[node.Name.Lowered()] -} - // NewIdentifierCI makes a new IdentifierCI. func NewIdentifierCI(str string) IdentifierCI { return IdentifierCI{ @@ -658,6 +626,134 @@ func NewColNameWithQualifier(identifier string, table TableName) *ColName { } } +// NewTableName makes a new TableName +func NewTableName(name string) TableName { + return TableName{ + Name: NewIdentifierCS(name), + } +} + +// NewTableNameWithQualifier makes a new TableName with a qualifier +func NewTableNameWithQualifier(name, qualifier string) TableName { + return TableName{ + Name: NewIdentifierCS(name), + Qualifier: NewIdentifierCS(qualifier), + } +} + +// NewSubquery makes a new Subquery +func NewSubquery(selectStatement SelectStatement) *Subquery { + return &Subquery{Select: selectStatement} +} + +// NewDerivedTable makes a new DerivedTable +func NewDerivedTable(lateral bool, selectStatement SelectStatement) *DerivedTable { + return &DerivedTable{ + Lateral: lateral, + Select: selectStatement, + } +} + +// NewAliasedTableExpr makes a new AliasedTableExpr with an alias +func NewAliasedTableExpr(simpleTableExpr SimpleTableExpr, alias string) *AliasedTableExpr { + return &AliasedTableExpr{ + Expr: simpleTableExpr, + As: NewIdentifierCS(alias), + } +} + +// NewJoinTableExpr makes a new JoinTableExpr +func NewJoinTableExpr(leftExpr TableExpr, join JoinType, rightExpr TableExpr, condition *JoinCondition) *JoinTableExpr { + return &JoinTableExpr{ + LeftExpr: leftExpr, + Join: join, + RightExpr: rightExpr, + Condition: condition, + } +} + +// NewJoinCondition makes a new JoinCondition +func NewJoinCondition(on Expr, using Columns) *JoinCondition { + return &JoinCondition{ + On: on, + Using: using, + } +} + +// NewAliasedExpr makes a new AliasedExpr +func NewAliasedExpr(expr Expr, alias string) *AliasedExpr { + return &AliasedExpr{ + Expr: expr, + As: NewIdentifierCI(alias), + } +} + +func (ae *AliasedExpr) SetAlias(alias string) { + ae.As = NewIdentifierCI(alias) +} + +// NewOrder makes a new Order +func NewOrder(expr Expr, direction OrderDirection) *Order { + return &Order{ + Expr: expr, + Direction: direction, + } +} + +// NewNotExpr makes a new NotExpr +func NewNotExpr(expr Expr) *NotExpr { + return &NotExpr{Expr: expr} +} + +// NewComparisonExpr makes a new ComparisonExpr +func NewComparisonExpr(operator ComparisonExprOperator, left, right, escape Expr) *ComparisonExpr { + return &ComparisonExpr{ + Operator: operator, + Left: left, + Right: right, + Escape: escape, + } +} + +// NewExistsExpr makes a new ExistsExpr +func NewExistsExpr(subquery *Subquery) *ExistsExpr { + return &ExistsExpr{Subquery: subquery} +} + +// NewCaseExpr makes a new CaseExpr +func NewCaseExpr(expr Expr, whens []*When, elseExpr Expr) *CaseExpr { + return &CaseExpr{ + Expr: expr, + Whens: whens, + Else: elseExpr, + } +} + +// NewLimit makes a new Limit +func NewLimit(offset, rowCount int) *Limit { + return &Limit{ + Offset: &Literal{ + Type: IntVal, + Val: fmt.Sprint(offset), + }, + Rowcount: &Literal{ + Type: IntVal, + Val: fmt.Sprint(rowCount), + }, + } +} + +// NewLimitWithoutOffset makes a new Limit without an offset +func NewLimitWithoutOffset(rowCount int) *Limit { + return &Limit{ + Offset: nil, + Rowcount: &Literal{ + Type: IntVal, + Val: fmt.Sprint(rowCount), + }, + } +} + // NewSelect is used to create a select statement func NewSelect(comments Comments, exprs SelectExprs, selectOptions []string, into *SelectInto, from TableExprs, where *Where, groupBy GroupBy, having *Where, windows NamedWindows) *Select { var cache *bool @@ -760,7 +856,7 @@ func createIdentifierCI(str string) IdentifierCI { // NewOffset creates an offset and returns it func NewOffset(v int, original Expr) *Offset { - return &Offset{V: v, Original: String(original)} + return &Offset{V: v, Original: original} } // IsEmpty returns true if the name is empty. @@ -883,8 +979,12 @@ func containEscapableChars(s string, at AtCount) bool { } func formatID(buf *TrackedBuffer, original string, at AtCount) { + if buf.escape == escapeNoIdentifiers { + buf.WriteString(original) + return + } _, isKeyword := keywordLookupTable.LookupString(original) - if buf.escape || isKeyword || containEscapableChars(original, at) { + if buf.escape == escapeAllIdentifiers || isKeyword || containEscapableChars(original, at) { writeEscapedString(buf, original) } else { buf.WriteString(original) @@ -902,6 +1002,11 @@ func writeEscapedString(buf *TrackedBuffer, original string) { buf.WriteByte('`') } +func CompliantString(in SQLNode) string { + s := String(in) + return compliantName(s) +} + func compliantName(in string) string { var buf strings.Builder for i, c := range in { @@ -916,6 +1021,10 @@ func compliantName(in string) string { return buf.String() } +func (node *Select) AddSelectExprs(selectExprs SelectExprs) { + node.SelectExprs = append(node.SelectExprs, selectExprs...) +} + // AddOrder adds an order by element func (node *Select) AddOrder(order *Order) { node.OrderBy = append(node.OrderBy, order) @@ -941,6 +1050,11 @@ func (node *Select) GetLimit() *Limit { return node.Limit } +// GetLock returns the lock clause +func (node *Select) GetLock() Lock { + return node.Lock +} + // SetLock sets the lock clause func (node *Select) SetLock(lock Lock) { node.Lock = lock @@ -961,6 +1075,11 @@ func (node *Select) MakeDistinct() { node.Distinct = true } +// IsDistinct implements the SelectStatement interface +func (node *Select) IsDistinct() bool { + return node.Distinct +} + // GetColumnCount return SelectExprs count. func (node *Select) GetColumnCount() int { return len(node.SelectExprs) @@ -971,12 +1090,12 @@ func (node *Select) GetColumns() SelectExprs { return node.SelectExprs } -// SetComments implements the SelectStatement interface +// SetComments implements the Commented interface func (node *Select) SetComments(comments Comments) { node.Comments = comments.Parsed() } -// GetComments implements the SelectStatement interface +// GetParsedComments implements the Commented interface func (node *Select) GetParsedComments() *ParsedComments { return node.Comments } @@ -984,31 +1103,14 @@ func (node *Select) GetParsedComments() *ParsedComments { // AddWhere adds the boolean expression to the // WHERE clause as an AND condition. func (node *Select) AddWhere(expr Expr) { - if node.Where == nil { - node.Where = &Where{ - Type: WhereClause, - Expr: expr, - } - return - } - exprs := SplitAndExpression(nil, node.Where.Expr) - node.Where.Expr = AndExpressions(append(exprs, expr)...) + node.Where = addPredicate(node.Where, expr) } // AddHaving adds the boolean expression to the // HAVING clause as an AND condition. func (node *Select) AddHaving(expr Expr) { - if node.Having == nil { - node.Having = &Where{ - Type: HavingClause, - Expr: expr, - } - return - } - node.Having.Expr = &AndExpr{ - Left: node.Having.Expr, - Right: expr, - } + node.Having = addPredicate(node.Having, expr) + node.Having.Type = HavingClause } // AddGroupBy adds a grouping expression, unless it's already present @@ -1025,17 +1127,27 @@ func (node *Select) AddGroupBy(expr Expr) { // AddWhere adds the boolean expression to the // WHERE clause as an AND condition. func (node *Update) AddWhere(expr Expr) { - if node.Where == nil { - node.Where = &Where{ + node.Where = addPredicate(node.Where, expr) +} + +func addPredicate(where *Where, pred Expr) *Where { + if where == nil { + return &Where{ Type: WhereClause, - Expr: expr, + Expr: pred, } - return } - node.Where.Expr = &AndExpr{ - Left: node.Where.Expr, - Right: expr, + where.Expr = &AndExpr{ + Left: where.Expr, + Right: pred, } + return where +} + +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. +func (node *Delete) AddWhere(expr Expr) { + node.Where = addPredicate(node.Where, expr) } // AddOrder adds an order by element @@ -1068,6 +1180,11 @@ func (node *Union) GetColumns() SelectExprs { return node.Left.GetColumns() } +// GetLock returns the lock clause +func (node *Union) GetLock() Lock { + return node.Lock +} + // SetLock sets the lock clause func (node *Union) SetLock(lock Lock) { node.Lock = lock @@ -1088,6 +1205,11 @@ func (node *Union) MakeDistinct() { node.Distinct = true } +// IsDistinct implements the SelectStatement interface +func (node *Union) IsDistinct() bool { + return node.Distinct +} + // GetColumnCount implements the SelectStatement interface func (node *Union) GetColumnCount() int { return node.Left.GetColumnCount() @@ -1098,7 +1220,7 @@ func (node *Union) SetComments(comments Comments) { node.Left.SetComments(comments) } -// GetComments implements the SelectStatement interface +// GetParsedComments implements the SelectStatement interface func (node *Union) GetParsedComments() *ParsedComments { return node.Left.GetParsedComments() } @@ -1404,18 +1526,6 @@ func (ty IndexHintType) ToString() string { } } -// ToString returns the type as a string -func (ty DeallocateStmtType) ToString() string { - switch ty { - case DeallocateType: - return DeallocateStr - case DropType: - return DropStr - default: - return "Unknown Deallocate Statement Type" - } -} - // ToString returns the type as a string func (ty IndexHintForType) ToString() string { switch ty { @@ -1692,54 +1802,6 @@ func (ty VExplainType) ToString() string { } } -// ToString returns the type as a string -func (ty IntervalTypes) ToString() string { - switch ty { - case IntervalYear: - return YearStr - case IntervalQuarter: - return QuarterStr - case IntervalMonth: - return MonthStr - case IntervalWeek: - return WeekStr - case IntervalDay: - return DayStr - case IntervalHour: - return HourStr - case IntervalMinute: - return MinuteStr - case IntervalSecond: - return SecondStr - case IntervalMicrosecond: - return MicrosecondStr - case IntervalYearMonth: - return YearMonthStr - case IntervalDayHour: - return DayHourStr - case IntervalDayMinute: - return DayMinuteStr - case IntervalDaySecond: - return DaySecondStr - case IntervalHourMinute: - return HourMinuteStr - case IntervalHourSecond: - return HourSecondStr - case IntervalMinuteSecond: - return MinuteSecondStr - case IntervalDayMicrosecond: - return DayMicrosecondStr - case IntervalHourMicrosecond: - return HourMicrosecondStr - case IntervalMinuteMicrosecond: - return MinuteMicrosecondStr - case IntervalSecondMicrosecond: - return SecondMicrosecondStr - default: - return "Unknown IntervalType" - } -} - // ToString returns the type as a string func (sel SelectIntoType) ToString() string { switch sel { @@ -1939,17 +2001,6 @@ func (node *ColName) CompliantName() string { return node.Name.CompliantName() } -// isExprAliasForCurrentTimeStamp returns true if the Expr provided is an alias for CURRENT_TIMESTAMP -func isExprAliasForCurrentTimeStamp(expr Expr) bool { - switch node := expr.(type) { - case *FuncExpr: - return node.Name.EqualString("current_timestamp") || node.Name.EqualString("now") || node.Name.EqualString("localtimestamp") || node.Name.EqualString("localtime") - case *CurTimeFuncExpr: - return node.Name.EqualString("current_timestamp") || node.Name.EqualString("now") || node.Name.EqualString("localtimestamp") || node.Name.EqualString("localtime") - } - return false -} - // AtCount represents the '@' count in IdentifierCI type AtCount int @@ -1998,10 +2049,16 @@ func formatAddress(address string) string { func ContainsAggregation(e SQLNode) bool { hasAggregates := false _ = Walk(func(node SQLNode) (kontinue bool, err error) { - if _, isAggregate := node.(AggrFunc); isAggregate { - hasAggregates = true + switch node.(type) { + case *Offset: + // offsets here indicate that a possible aggregation has already been handled by an input + // so we don't need to worry about aggregation in the original return false, nil + case AggrFunc: + hasAggregates = true + return false, io.EOF } + return true, nil }, e) return hasAggregates @@ -2108,41 +2165,6 @@ func (s SelectExprs) AllAggregation() bool { return true } -func isExprLiteral(expr Expr) bool { - switch expr := expr.(type) { - case *Literal: - return true - case BoolVal: - return true - case *UnaryExpr: - return isExprLiteral(expr.Expr) - default: - return false - } -} - -func defaultRequiresParens(ct *ColumnType) bool { - // in 5.7 null value should be without parenthesis, in 8.0 it is allowed either way. - // so it is safe to not keep parenthesis around null. - if _, isNullVal := ct.Options.Default.(*NullVal); isNullVal { - return false - } - - switch strings.ToUpper(ct.Type) { - case "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT", "TINYBLOB", "BLOB", "MEDIUMBLOB", - "LONGBLOB", "JSON", "GEOMETRY", "POINT", - "LINESTRING", "POLYGON", "MULTIPOINT", "MULTILINESTRING", - "MULTIPOLYGON", "GEOMETRYCOLLECTION": - return true - } - - if isExprLiteral(ct.Options.Default) || isExprAliasForCurrentTimeStamp(ct.Options.Default) { - return false - } - - return true -} - // RemoveKeyspaceFromColName removes the Qualifier.Qualifier on all ColNames in the expression tree func RemoveKeyspaceFromColName(expr Expr) { RemoveKeyspace(expr) @@ -2167,6 +2189,11 @@ func convertStringToInt(integer string) int { return val } +func convertStringToUInt64(integer string) uint64 { + val, _ := strconv.ParseUint(integer, 10, 64) + return val +} + // SplitAndExpression breaks up the Expr into AND-separated conditions // and appends them to filters. Outer parenthesis are removed. Precedence // should be taken into account if expressions are recombined. @@ -2215,3 +2242,302 @@ func AndExpressions(exprs ...Expr) Expr { // Equals is the default Comparator for AST expressions. var Equals = &Comparator{} + +// ToString returns the type as a string +func (ty GeomPropertyType) ToString() string { + switch ty { + case IsEmpty: + return IsEmptyStr + case IsSimple: + return IsSimpleStr + case Envelope: + return EnvelopeStr + case GeometryType: + return GeometryTypeStr + case Dimension: + return DimensionStr + default: + return "Unknown GeomPropertyType" + } +} + +// ToString returns the type as a string +func (ty PointPropertyType) ToString() string { + switch ty { + case XCordinate: + return XCordinateStr + case YCordinate: + return YCordinateStr + case Latitude: + return LatitudeStr + case Longitude: + return LongitudeStr + default: + return "Unknown PointPropertyType" + } +} + +// ToString returns the type as a string +func (ty LinestrPropType) ToString() string { + switch ty { + case EndPoint: + return EndPointStr + case IsClosed: + return IsClosedStr + case Length: + return LengthStr + case NumPoints: + return NumPointsStr + case PointN: + return PointNStr + case StartPoint: + return StartPointStr + default: + return "Unknown LinestrPropType" + } +} + +// ToString returns the type as a string +func (ty PolygonPropType) ToString() string { + switch ty { + case Area: + return AreaStr + case Centroid: + return CentroidStr + case ExteriorRing: + return ExteriorRingStr + case InteriorRingN: + return InteriorRingNStr + case NumInteriorRings: + return NumInteriorRingsStr + default: + return "Unknown PolygonPropType" + } +} + +// ToString returns the type as a string +func (ty GeomCollPropType) ToString() string { + switch ty { + case GeometryN: + return GeometryNStr + case NumGeometries: + return NumGeometriesStr + default: + return "Unknown GeomCollPropType" + } +} + +// ToString returns the type as a string +func (ty GeomFromHashType) ToString() string { + switch ty { + case LatitudeFromHash: + return LatitudeFromHashStr + case LongitudeFromHash: + return LongitudeFromHashStr + case PointFromHash: + return PointFromHashStr + default: + return "Unknown GeomFromGeoHashType" + } +} + +// ToString returns the type as a string +func (ty GeomFormatType) ToString() string { + switch ty { + case BinaryFormat: + return BinaryFormatStr + case TextFormat: + return TextFormatStr + default: + return "Unknown GeomFormatType" + } +} + +// ToString returns the type as a string +func (ty GeomFromWktType) ToString() string { + switch ty { + case GeometryFromText: + return GeometryFromTextStr + case GeometryCollectionFromText: + return GeometryCollectionFromTextStr + case PointFromText: + return PointFromTextStr + case PolygonFromText: + return PolygonFromTextStr + case LineStringFromText: + return LineStringFromTextStr + case MultiPointFromText: + return MultiPointFromTextStr + case MultiLinestringFromText: + return MultiLinestringFromTextStr + case MultiPolygonFromText: + return MultiPolygonFromTextStr + default: + return "Unknown GeomFromWktType" + } +} + +// ToString returns the type as a string +func (ty GeomFromWkbType) ToString() string { + switch ty { + case GeometryFromWKB: + return GeometryFromWKBStr + case GeometryCollectionFromWKB: + return GeometryCollectionFromWKBStr + case PointFromWKB: + return PointFromWKBStr + case PolygonFromWKB: + return PolygonFromWKBStr + case LineStringFromWKB: + return LineStringFromWKBStr + case MultiPointFromWKB: + return MultiPointFromWKBStr + case MultiLinestringFromWKB: + return MultiLinestringFromWKBStr + case MultiPolygonFromWKB: + return MultiPolygonFromWKBStr + default: + return "Unknown GeomFromWktType" + } +} + +func getAliasedTableExprFromTableName(tblName TableName) *AliasedTableExpr { + return &AliasedTableExpr{ + Expr: tblName, + } +} + +func (node *IntervalDateExpr) IsSubtraction() bool { + switch node.Syntax { + case IntervalDateExprDateAdd, IntervalDateExprAdddate, IntervalDateExprBinaryAdd, IntervalDateExprBinaryAddLeft, IntervalDateExprTimestampadd: + return false + case IntervalDateExprDateSub, IntervalDateExprSubdate, IntervalDateExprBinarySub: + return true + default: + panic("invalid IntervalDateExpr syntax") + } +} + +func (node *IntervalDateExpr) NormalizedUnit() IntervalType { + if node.Unit == IntervalNone { + if node.Syntax == IntervalDateExprAdddate || node.Syntax == IntervalDateExprSubdate { + return IntervalDay + } + panic("IntervalDateExpr.Unit is not set") + } + return node.Unit +} + +func (node *IntervalDateExpr) FnName() string { + switch node.Syntax { + case IntervalDateExprDateAdd: + return "date_add" + case IntervalDateExprDateSub: + return "date_sub" + case IntervalDateExprAdddate: + return "adddate" + case IntervalDateExprSubdate: + return "subdate" + case IntervalDateExprTimestampadd: + return "timestampadd" + case IntervalDateExprBinaryAdd, IntervalDateExprBinaryAddLeft: + return "" + case IntervalDateExprBinarySub: + return "" + default: + return "" + } +} + +func IsDistinct(f AggrFunc) bool { + da, ok := f.(DistinctableAggr) + if !ok { + return false + } + return da.IsDistinct() +} + +// ToString returns the type as a string +func (ty KillType) ToString() string { + switch ty { + case QueryType: + return QueryStr + default: + return ConnectionStr + } +} + +// Indexes returns true, if the list of columns contains all the elements in the other list. +// It also returns the indexes of the columns in the list. +func (cols Columns) Indexes(subSetCols Columns) (bool, []int) { + var indexes []int + for _, subSetCol := range subSetCols { + colFound := false + for idx, col := range cols { + if col.Equal(subSetCol) { + colFound = true + indexes = append(indexes, idx) + break + } + } + if !colFound { + return false, nil + } + } + return true, indexes +} + +// MakeColumns is used to make a list of columns from a list of strings. +// This function is meant to be used in testing code. +func MakeColumns(colNames ...string) Columns { + var cols Columns + for _, name := range colNames { + cols = append(cols, NewIdentifierCI(name)) + } + return cols +} + +func VisitAllSelects(in SelectStatement, f func(p *Select, idx int) error) error { + v := visitor{} + return v.visitAllSelects(in, f) +} + +type visitor struct { + idx int +} + +func (v *visitor) visitAllSelects(in SelectStatement, f func(p *Select, idx int) error) error { + switch sel := in.(type) { + case *Select: + err := f(sel, v.idx) + v.idx++ + return err + case *Union: + err := v.visitAllSelects(sel.Left, f) + if err != nil { + return err + } + return v.visitAllSelects(sel.Right, f) + } + panic("switch should be exhaustive") +} + +// IsRestrict returns true if the reference action is of restrict type. +func (ra ReferenceAction) IsRestrict() bool { + switch ra { + case Restrict, NoAction, DefaultAction: + return true + default: + return false + } +} + +// IsLiteral returns true if the expression is of a literal type. +func IsLiteral(expr Expr) bool { + switch expr.(type) { + case *Argument, *NullVal, BoolVal, *Literal: + return true + default: + return false + } +} diff --git a/go/vt/sqlparser/ast_funcs_test.go b/go/vt/sqlparser/ast_funcs_test.go index b6a79da45ab..7bec47df96f 100644 --- a/go/vt/sqlparser/ast_funcs_test.go +++ b/go/vt/sqlparser/ast_funcs_test.go @@ -134,3 +134,41 @@ func TestSQLTypeToQueryType(t *testing.T) { }) } } + +// TestColumns_Indexes verifies the functionality of Indexes method on Columns. +func TestColumns_Indexes(t *testing.T) { + tests := []struct { + name string + cols Columns + subSetCols Columns + indexesWanted []int + }{ + { + name: "Not a subset", + cols: MakeColumns("col1", "col2", "col3"), + subSetCols: MakeColumns("col2", "col4"), + }, { + name: "Subset with 1 value", + cols: MakeColumns("col1", "col2", "col3"), + subSetCols: MakeColumns("col2"), + indexesWanted: []int{1}, + }, { + name: "Subset with multiple values", + cols: MakeColumns("col1", "col2", "col3", "col4", "col5"), + subSetCols: MakeColumns("col3", "col5", "col1"), + indexesWanted: []int{2, 4, 0}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + isSubset, indexes := tt.cols.Indexes(tt.subSetCols) + if tt.indexesWanted == nil { + require.False(t, isSubset) + require.Nil(t, indexes) + return + } + require.True(t, isSubset) + require.EqualValues(t, tt.indexesWanted, indexes) + }) + } +} diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go index d81b147bb7a..0266876e201 100644 --- a/go/vt/sqlparser/ast_rewrite.go +++ b/go/vt/sqlparser/ast_rewrite.go @@ -54,10 +54,14 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfAlterVschema(parent, node, replacer) case *AndExpr: return a.rewriteRefOfAndExpr(parent, node, replacer) - case Argument: - return a.rewriteArgument(parent, node, replacer) + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) + case *Argument: + return a.rewriteRefOfArgument(parent, node, replacer) case *ArgumentLessWindowExpr: return a.rewriteRefOfArgumentLessWindowExpr(parent, node, replacer) + case *AssignmentExpr: + return a.rewriteRefOfAssignmentExpr(parent, node, replacer) case *AutoIncSpec: return a.rewriteRefOfAutoIncSpec(parent, node, replacer) case *Avg: @@ -180,6 +184,26 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfFuncExpr(parent, node, replacer) case *GTIDFuncExpr: return a.rewriteRefOfGTIDFuncExpr(parent, node, replacer) + case *GeoHashFromLatLongExpr: + return a.rewriteRefOfGeoHashFromLatLongExpr(parent, node, replacer) + case *GeoHashFromPointExpr: + return a.rewriteRefOfGeoHashFromPointExpr(parent, node, replacer) + case *GeoJSONFromGeomExpr: + return a.rewriteRefOfGeoJSONFromGeomExpr(parent, node, replacer) + case *GeomCollPropertyFuncExpr: + return a.rewriteRefOfGeomCollPropertyFuncExpr(parent, node, replacer) + case *GeomFormatExpr: + return a.rewriteRefOfGeomFormatExpr(parent, node, replacer) + case *GeomFromGeoHashExpr: + return a.rewriteRefOfGeomFromGeoHashExpr(parent, node, replacer) + case *GeomFromGeoJSONExpr: + return a.rewriteRefOfGeomFromGeoJSONExpr(parent, node, replacer) + case *GeomFromTextExpr: + return a.rewriteRefOfGeomFromTextExpr(parent, node, replacer) + case *GeomFromWKBExpr: + return a.rewriteRefOfGeomFromWKBExpr(parent, node, replacer) + case *GeomPropertyFuncExpr: + return a.rewriteRefOfGeomPropertyFuncExpr(parent, node, replacer) case GroupBy: return a.rewriteGroupBy(parent, node, replacer) case *GroupConcatExpr: @@ -200,8 +224,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfInsert(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) - case *IntervalExpr: - return a.rewriteRefOfIntervalExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *IntroducerExpr: @@ -262,12 +286,16 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfJtOnResponse(parent, node, replacer) case *KeyState: return a.rewriteRefOfKeyState(parent, node, replacer) + case *Kill: + return a.rewriteRefOfKill(parent, node, replacer) case *LagLeadExpr: return a.rewriteRefOfLagLeadExpr(parent, node, replacer) case *Limit: return a.rewriteRefOfLimit(parent, node, replacer) case *LineStringExpr: return a.rewriteRefOfLineStringExpr(parent, node, replacer) + case *LinestrPropertyFuncExpr: + return a.rewriteRefOfLinestrPropertyFuncExpr(parent, node, replacer) case ListArg: return a.rewriteListArg(parent, node, replacer) case *Literal: @@ -294,6 +322,12 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfMin(parent, node, replacer) case *ModifyColumn: return a.rewriteRefOfModifyColumn(parent, node, replacer) + case *MultiLinestringExpr: + return a.rewriteRefOfMultiLinestringExpr(parent, node, replacer) + case *MultiPointExpr: + return a.rewriteRefOfMultiPointExpr(parent, node, replacer) + case *MultiPolygonExpr: + return a.rewriteRefOfMultiPolygonExpr(parent, node, replacer) case *NTHValueExpr: return a.rewriteRefOfNTHValueExpr(parent, node, replacer) case *NamedWindow: @@ -352,10 +386,16 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfPerformanceSchemaFuncExpr(parent, node, replacer) case *PointExpr: return a.rewriteRefOfPointExpr(parent, node, replacer) + case *PointPropertyFuncExpr: + return a.rewriteRefOfPointPropertyFuncExpr(parent, node, replacer) case *PolygonExpr: return a.rewriteRefOfPolygonExpr(parent, node, replacer) + case *PolygonPropertyFuncExpr: + return a.rewriteRefOfPolygonPropertyFuncExpr(parent, node, replacer) case *PrepareStmt: return a.rewriteRefOfPrepareStmt(parent, node, replacer) + case *PurgeBinaryLogs: + return a.rewriteRefOfPurgeBinaryLogs(parent, node, replacer) case ReferenceAction: return a.rewriteReferenceAction(parent, node, replacer) case *ReferenceDefinition: @@ -454,8 +494,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfTableSpec(parent, node, replacer) case *TablespaceOperation: return a.rewriteRefOfTablespaceOperation(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *TruncateTable: @@ -1030,6 +1070,57 @@ func (a *application) rewriteRefOfAndExpr(parent SQLNode, node *AndExpr, replace } return true } +func (a *application) rewriteRefOfAnyValue(parent SQLNode, node *AnyValue, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Arg, func(newNode, parent SQLNode) { + parent.(*AnyValue).Arg = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfArgument(parent SQLNode, node *Argument, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfArgumentLessWindowExpr(parent SQLNode, node *ArgumentLessWindowExpr, replacer replacerFunc) bool { if node == nil { return true @@ -1057,6 +1148,38 @@ func (a *application) rewriteRefOfArgumentLessWindowExpr(parent SQLNode, node *A } return true } +func (a *application) rewriteRefOfAssignmentExpr(parent SQLNode, node *AssignmentExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Left, func(newNode, parent SQLNode) { + parent.(*AssignmentExpr).Left = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Right, func(newNode, parent SQLNode) { + parent.(*AssignmentExpr).Right = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfAutoIncSpec(parent SQLNode, node *AutoIncSpec, replacer replacerFunc) bool { if node == nil { return true @@ -2080,11 +2203,6 @@ func (a *application) rewriteRefOfCurTimeFuncExpr(parent SQLNode, node *CurTimeF }) { return false } - if !a.rewriteExpr(node, node.Fsp, func(newNode, parent SQLNode) { - parent.(*CurTimeFuncExpr).Fsp = newNode.(Expr) - }) { - return false - } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -2924,7 +3042,362 @@ func (a *application) rewriteRefOfFuncExpr(parent SQLNode, node *FuncExpr, repla } return true } -func (a *application) rewriteRefOfGTIDFuncExpr(parent SQLNode, node *GTIDFuncExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfGTIDFuncExpr(parent SQLNode, node *GTIDFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Set1, func(newNode, parent SQLNode) { + parent.(*GTIDFuncExpr).Set1 = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Set2, func(newNode, parent SQLNode) { + parent.(*GTIDFuncExpr).Set2 = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Timeout, func(newNode, parent SQLNode) { + parent.(*GTIDFuncExpr).Timeout = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Channel, func(newNode, parent SQLNode) { + parent.(*GTIDFuncExpr).Channel = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeoHashFromLatLongExpr(parent SQLNode, node *GeoHashFromLatLongExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Latitude, func(newNode, parent SQLNode) { + parent.(*GeoHashFromLatLongExpr).Latitude = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Longitude, func(newNode, parent SQLNode) { + parent.(*GeoHashFromLatLongExpr).Longitude = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.MaxLength, func(newNode, parent SQLNode) { + parent.(*GeoHashFromLatLongExpr).MaxLength = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeoHashFromPointExpr(parent SQLNode, node *GeoHashFromPointExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Point, func(newNode, parent SQLNode) { + parent.(*GeoHashFromPointExpr).Point = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.MaxLength, func(newNode, parent SQLNode) { + parent.(*GeoHashFromPointExpr).MaxLength = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeoJSONFromGeomExpr(parent SQLNode, node *GeoJSONFromGeomExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Geom, func(newNode, parent SQLNode) { + parent.(*GeoJSONFromGeomExpr).Geom = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.MaxDecimalDigits, func(newNode, parent SQLNode) { + parent.(*GeoJSONFromGeomExpr).MaxDecimalDigits = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Bitmask, func(newNode, parent SQLNode) { + parent.(*GeoJSONFromGeomExpr).Bitmask = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomCollPropertyFuncExpr(parent SQLNode, node *GeomCollPropertyFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.GeomColl, func(newNode, parent SQLNode) { + parent.(*GeomCollPropertyFuncExpr).GeomColl = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.PropertyDefArg, func(newNode, parent SQLNode) { + parent.(*GeomCollPropertyFuncExpr).PropertyDefArg = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomFormatExpr(parent SQLNode, node *GeomFormatExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Geom, func(newNode, parent SQLNode) { + parent.(*GeomFormatExpr).Geom = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.AxisOrderOpt, func(newNode, parent SQLNode) { + parent.(*GeomFormatExpr).AxisOrderOpt = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomFromGeoHashExpr(parent SQLNode, node *GeomFromGeoHashExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.GeoHash, func(newNode, parent SQLNode) { + parent.(*GeomFromGeoHashExpr).GeoHash = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.SridOpt, func(newNode, parent SQLNode) { + parent.(*GeomFromGeoHashExpr).SridOpt = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomFromGeoJSONExpr(parent SQLNode, node *GeomFromGeoJSONExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.GeoJSON, func(newNode, parent SQLNode) { + parent.(*GeomFromGeoJSONExpr).GeoJSON = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.HigherDimHandlerOpt, func(newNode, parent SQLNode) { + parent.(*GeomFromGeoJSONExpr).HigherDimHandlerOpt = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Srid, func(newNode, parent SQLNode) { + parent.(*GeomFromGeoJSONExpr).Srid = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomFromTextExpr(parent SQLNode, node *GeomFromTextExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.WktText, func(newNode, parent SQLNode) { + parent.(*GeomFromTextExpr).WktText = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Srid, func(newNode, parent SQLNode) { + parent.(*GeomFromTextExpr).Srid = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.AxisOrderOpt, func(newNode, parent SQLNode) { + parent.(*GeomFromTextExpr).AxisOrderOpt = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomFromWKBExpr(parent SQLNode, node *GeomFromWKBExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.WkbBlob, func(newNode, parent SQLNode) { + parent.(*GeomFromWKBExpr).WkbBlob = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Srid, func(newNode, parent SQLNode) { + parent.(*GeomFromWKBExpr).Srid = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.AxisOrderOpt, func(newNode, parent SQLNode) { + parent.(*GeomFromWKBExpr).AxisOrderOpt = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfGeomPropertyFuncExpr(parent SQLNode, node *GeomPropertyFuncExpr, replacer replacerFunc) bool { if node == nil { return true } @@ -2936,23 +3409,8 @@ func (a *application) rewriteRefOfGTIDFuncExpr(parent SQLNode, node *GTIDFuncExp return true } } - if !a.rewriteExpr(node, node.Set1, func(newNode, parent SQLNode) { - parent.(*GTIDFuncExpr).Set1 = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.Set2, func(newNode, parent SQLNode) { - parent.(*GTIDFuncExpr).Set2 = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.Timeout, func(newNode, parent SQLNode) { - parent.(*GTIDFuncExpr).Timeout = newNode.(Expr) - }) { - return false - } - if !a.rewriteExpr(node, node.Channel, func(newNode, parent SQLNode) { - parent.(*GTIDFuncExpr).Channel = newNode.(Expr) + if !a.rewriteExpr(node, node.Geom, func(newNode, parent SQLNode) { + parent.(*GeomPropertyFuncExpr).Geom = newNode.(Expr) }) { return false } @@ -3226,8 +3684,8 @@ func (a *application) rewriteRefOfInsert(parent SQLNode, node *Insert, replacer }) { return false } - if !a.rewriteTableName(node, node.Table, func(newNode, parent SQLNode) { - parent.(*Insert).Table = newNode.(TableName) + if !a.rewriteRefOfAliasedTableExpr(node, node.Table, func(newNode, parent SQLNode) { + parent.(*Insert).Table = newNode.(*AliasedTableExpr) }) { return false } @@ -3303,7 +3761,7 @@ func (a *application) rewriteRefOfInsertExpr(parent SQLNode, node *InsertExpr, r } return true } -func (a *application) rewriteRefOfIntervalExpr(parent SQLNode, node *IntervalExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfIntervalDateExpr(parent SQLNode, node *IntervalDateExpr, replacer replacerFunc) bool { if node == nil { return true } @@ -3315,8 +3773,13 @@ func (a *application) rewriteRefOfIntervalExpr(parent SQLNode, node *IntervalExp return true } } - if !a.rewriteExpr(node, node.Expr, func(newNode, parent SQLNode) { - parent.(*IntervalExpr).Expr = newNode.(Expr) + if !a.rewriteExpr(node, node.Date, func(newNode, parent SQLNode) { + parent.(*IntervalDateExpr).Date = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.Interval, func(newNode, parent SQLNode) { + parent.(*IntervalDateExpr).Interval = newNode.(Expr) }) { return false } @@ -4307,6 +4770,30 @@ func (a *application) rewriteRefOfKeyState(parent SQLNode, node *KeyState, repla } return true } +func (a *application) rewriteRefOfKill(parent SQLNode, node *Kill, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfLagLeadExpr(parent SQLNode, node *LagLeadExpr, replacer replacerFunc) bool { if node == nil { return true @@ -4413,6 +4900,38 @@ func (a *application) rewriteRefOfLineStringExpr(parent SQLNode, node *LineStrin } return true } +func (a *application) rewriteRefOfLinestrPropertyFuncExpr(parent SQLNode, node *LinestrPropertyFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Linestring, func(newNode, parent SQLNode) { + parent.(*LinestrPropertyFuncExpr).Linestring = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.PropertyDefArg, func(newNode, parent SQLNode) { + parent.(*LinestrPropertyFuncExpr).PropertyDefArg = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfLiteral(parent SQLNode, node *Literal, replacer replacerFunc) bool { if node == nil { return true @@ -4732,6 +5251,87 @@ func (a *application) rewriteRefOfModifyColumn(parent SQLNode, node *ModifyColum } return true } +func (a *application) rewriteRefOfMultiLinestringExpr(parent SQLNode, node *MultiLinestringExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExprs(node, node.LinestringParams, func(newNode, parent SQLNode) { + parent.(*MultiLinestringExpr).LinestringParams = newNode.(Exprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfMultiPointExpr(parent SQLNode, node *MultiPointExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExprs(node, node.PointParams, func(newNode, parent SQLNode) { + parent.(*MultiPointExpr).PointParams = newNode.(Exprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} +func (a *application) rewriteRefOfMultiPolygonExpr(parent SQLNode, node *MultiPolygonExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExprs(node, node.PolygonParams, func(newNode, parent SQLNode) { + parent.(*MultiPolygonExpr).PolygonParams = newNode.(Exprs) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfNTHValueExpr(parent SQLNode, node *NTHValueExpr, replacer replacerFunc) bool { if node == nil { return true @@ -4989,12 +5589,15 @@ func (a *application) rewriteRefOfOffset(parent SQLNode, node *Offset, replacer return true } } + if !a.rewriteExpr(node, node.Original, func(newNode, parent SQLNode) { + parent.(*Offset).Original = newNode.(Expr) + }) { + return false + } if a.post != nil { - if a.pre == nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - } + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node if !a.post(&a.cur) { return false } @@ -5642,6 +6245,38 @@ func (a *application) rewriteRefOfPointExpr(parent SQLNode, node *PointExpr, rep } return true } +func (a *application) rewriteRefOfPointPropertyFuncExpr(parent SQLNode, node *PointPropertyFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Point, func(newNode, parent SQLNode) { + parent.(*PointPropertyFuncExpr).Point = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.ValueToSet, func(newNode, parent SQLNode) { + parent.(*PointPropertyFuncExpr).ValueToSet = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfPolygonExpr(parent SQLNode, node *PolygonExpr, replacer replacerFunc) bool { if node == nil { return true @@ -5669,6 +6304,38 @@ func (a *application) rewriteRefOfPolygonExpr(parent SQLNode, node *PolygonExpr, } return true } +func (a *application) rewriteRefOfPolygonPropertyFuncExpr(parent SQLNode, node *PolygonPropertyFuncExpr, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteExpr(node, node.Polygon, func(newNode, parent SQLNode) { + parent.(*PolygonPropertyFuncExpr).Polygon = newNode.(Expr) + }) { + return false + } + if !a.rewriteExpr(node, node.PropertyDefArg, func(newNode, parent SQLNode) { + parent.(*PolygonPropertyFuncExpr).PropertyDefArg = newNode.(Expr) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfPrepareStmt(parent SQLNode, node *PrepareStmt, replacer replacerFunc) bool { if node == nil { return true @@ -5706,6 +6373,30 @@ func (a *application) rewriteRefOfPrepareStmt(parent SQLNode, node *PrepareStmt, } return true } +func (a *application) rewriteRefOfPurgeBinaryLogs(parent SQLNode, node *PurgeBinaryLogs, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if a.post != nil { + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfReferenceDefinition(parent SQLNode, node *ReferenceDefinition, replacer replacerFunc) bool { if node == nil { return true @@ -7294,7 +7985,7 @@ func (a *application) rewriteRefOfTablespaceOperation(parent SQLNode, node *Tabl } return true } -func (a *application) rewriteRefOfTimestampFuncExpr(parent SQLNode, node *TimestampFuncExpr, replacer replacerFunc) bool { +func (a *application) rewriteRefOfTimestampDiffExpr(parent SQLNode, node *TimestampDiffExpr, replacer replacerFunc) bool { if node == nil { return true } @@ -7307,12 +7998,12 @@ func (a *application) rewriteRefOfTimestampFuncExpr(parent SQLNode, node *Timest } } if !a.rewriteExpr(node, node.Expr1, func(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr1 = newNode.(Expr) + parent.(*TimestampDiffExpr).Expr1 = newNode.(Expr) }) { return false } if !a.rewriteExpr(node, node.Expr2, func(newNode, parent SQLNode) { - parent.(*TimestampFuncExpr).Expr2 = newNode.(Expr) + parent.(*TimestampDiffExpr).Expr2 = newNode.(Expr) }) { return false } @@ -8325,6 +9016,8 @@ func (a *application) rewriteAggrFunc(parent SQLNode, node AggrFunc, replacer re return true } switch node := node.(type) { + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *Avg: return a.rewriteRefOfAvg(parent, node, replacer) case *BitAnd: @@ -8423,6 +9116,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return true } switch node := node.(type) { + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) case *ArgumentLessWindowExpr: return a.rewriteRefOfArgumentLessWindowExpr(parent, node, replacer) case *Avg: @@ -8449,10 +9144,32 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfFuncExpr(parent, node, replacer) case *GTIDFuncExpr: return a.rewriteRefOfGTIDFuncExpr(parent, node, replacer) + case *GeoHashFromLatLongExpr: + return a.rewriteRefOfGeoHashFromLatLongExpr(parent, node, replacer) + case *GeoHashFromPointExpr: + return a.rewriteRefOfGeoHashFromPointExpr(parent, node, replacer) + case *GeoJSONFromGeomExpr: + return a.rewriteRefOfGeoJSONFromGeomExpr(parent, node, replacer) + case *GeomCollPropertyFuncExpr: + return a.rewriteRefOfGeomCollPropertyFuncExpr(parent, node, replacer) + case *GeomFormatExpr: + return a.rewriteRefOfGeomFormatExpr(parent, node, replacer) + case *GeomFromGeoHashExpr: + return a.rewriteRefOfGeomFromGeoHashExpr(parent, node, replacer) + case *GeomFromGeoJSONExpr: + return a.rewriteRefOfGeomFromGeoJSONExpr(parent, node, replacer) + case *GeomFromTextExpr: + return a.rewriteRefOfGeomFromTextExpr(parent, node, replacer) + case *GeomFromWKBExpr: + return a.rewriteRefOfGeomFromWKBExpr(parent, node, replacer) + case *GeomPropertyFuncExpr: + return a.rewriteRefOfGeomPropertyFuncExpr(parent, node, replacer) case *GroupConcatExpr: return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *JSONArrayExpr: @@ -8499,6 +9216,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfLagLeadExpr(parent, node, replacer) case *LineStringExpr: return a.rewriteRefOfLineStringExpr(parent, node, replacer) + case *LinestrPropertyFuncExpr: + return a.rewriteRefOfLinestrPropertyFuncExpr(parent, node, replacer) case *LocateExpr: return a.rewriteRefOfLocateExpr(parent, node, replacer) case *MatchExpr: @@ -8509,6 +9228,12 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfMemberOfExpr(parent, node, replacer) case *Min: return a.rewriteRefOfMin(parent, node, replacer) + case *MultiLinestringExpr: + return a.rewriteRefOfMultiLinestringExpr(parent, node, replacer) + case *MultiPointExpr: + return a.rewriteRefOfMultiPointExpr(parent, node, replacer) + case *MultiPolygonExpr: + return a.rewriteRefOfMultiPolygonExpr(parent, node, replacer) case *NTHValueExpr: return a.rewriteRefOfNTHValueExpr(parent, node, replacer) case *NamedWindow: @@ -8519,8 +9244,12 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfPerformanceSchemaFuncExpr(parent, node, replacer) case *PointExpr: return a.rewriteRefOfPointExpr(parent, node, replacer) + case *PointPropertyFuncExpr: + return a.rewriteRefOfPointPropertyFuncExpr(parent, node, replacer) case *PolygonExpr: return a.rewriteRefOfPolygonExpr(parent, node, replacer) + case *PolygonPropertyFuncExpr: + return a.rewriteRefOfPolygonPropertyFuncExpr(parent, node, replacer) case *RegexpInstrExpr: return a.rewriteRefOfRegexpInstrExpr(parent, node, replacer) case *RegexpLikeExpr: @@ -8533,8 +9262,8 @@ func (a *application) rewriteCallable(parent SQLNode, node Callable, replacer re return a.rewriteRefOfSubstrExpr(parent, node, replacer) case *Sum: return a.rewriteRefOfSum(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *UpdateXMLExpr: @@ -8641,10 +9370,14 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu switch node := node.(type) { case *AndExpr: return a.rewriteRefOfAndExpr(parent, node, replacer) - case Argument: - return a.rewriteArgument(parent, node, replacer) + case *AnyValue: + return a.rewriteRefOfAnyValue(parent, node, replacer) + case *Argument: + return a.rewriteRefOfArgument(parent, node, replacer) case *ArgumentLessWindowExpr: return a.rewriteRefOfArgumentLessWindowExpr(parent, node, replacer) + case *AssignmentExpr: + return a.rewriteRefOfAssignmentExpr(parent, node, replacer) case *Avg: return a.rewriteRefOfAvg(parent, node, replacer) case *BetweenExpr: @@ -8697,12 +9430,32 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfFuncExpr(parent, node, replacer) case *GTIDFuncExpr: return a.rewriteRefOfGTIDFuncExpr(parent, node, replacer) + case *GeoHashFromLatLongExpr: + return a.rewriteRefOfGeoHashFromLatLongExpr(parent, node, replacer) + case *GeoHashFromPointExpr: + return a.rewriteRefOfGeoHashFromPointExpr(parent, node, replacer) + case *GeoJSONFromGeomExpr: + return a.rewriteRefOfGeoJSONFromGeomExpr(parent, node, replacer) + case *GeomCollPropertyFuncExpr: + return a.rewriteRefOfGeomCollPropertyFuncExpr(parent, node, replacer) + case *GeomFormatExpr: + return a.rewriteRefOfGeomFormatExpr(parent, node, replacer) + case *GeomFromGeoHashExpr: + return a.rewriteRefOfGeomFromGeoHashExpr(parent, node, replacer) + case *GeomFromGeoJSONExpr: + return a.rewriteRefOfGeomFromGeoJSONExpr(parent, node, replacer) + case *GeomFromTextExpr: + return a.rewriteRefOfGeomFromTextExpr(parent, node, replacer) + case *GeomFromWKBExpr: + return a.rewriteRefOfGeomFromWKBExpr(parent, node, replacer) + case *GeomPropertyFuncExpr: + return a.rewriteRefOfGeomPropertyFuncExpr(parent, node, replacer) case *GroupConcatExpr: return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) case *InsertExpr: return a.rewriteRefOfInsertExpr(parent, node, replacer) - case *IntervalExpr: - return a.rewriteRefOfIntervalExpr(parent, node, replacer) + case *IntervalDateExpr: + return a.rewriteRefOfIntervalDateExpr(parent, node, replacer) case *IntervalFuncExpr: return a.rewriteRefOfIntervalFuncExpr(parent, node, replacer) case *IntroducerExpr: @@ -8753,6 +9506,8 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfLagLeadExpr(parent, node, replacer) case *LineStringExpr: return a.rewriteRefOfLineStringExpr(parent, node, replacer) + case *LinestrPropertyFuncExpr: + return a.rewriteRefOfLinestrPropertyFuncExpr(parent, node, replacer) case ListArg: return a.rewriteListArg(parent, node, replacer) case *Literal: @@ -8769,6 +9524,12 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfMemberOfExpr(parent, node, replacer) case *Min: return a.rewriteRefOfMin(parent, node, replacer) + case *MultiLinestringExpr: + return a.rewriteRefOfMultiLinestringExpr(parent, node, replacer) + case *MultiPointExpr: + return a.rewriteRefOfMultiPointExpr(parent, node, replacer) + case *MultiPolygonExpr: + return a.rewriteRefOfMultiPolygonExpr(parent, node, replacer) case *NTHValueExpr: return a.rewriteRefOfNTHValueExpr(parent, node, replacer) case *NamedWindow: @@ -8787,8 +9548,12 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfPerformanceSchemaFuncExpr(parent, node, replacer) case *PointExpr: return a.rewriteRefOfPointExpr(parent, node, replacer) + case *PointPropertyFuncExpr: + return a.rewriteRefOfPointPropertyFuncExpr(parent, node, replacer) case *PolygonExpr: return a.rewriteRefOfPolygonExpr(parent, node, replacer) + case *PolygonPropertyFuncExpr: + return a.rewriteRefOfPolygonPropertyFuncExpr(parent, node, replacer) case *RegexpInstrExpr: return a.rewriteRefOfRegexpInstrExpr(parent, node, replacer) case *RegexpLikeExpr: @@ -8811,8 +9576,8 @@ func (a *application) rewriteExpr(parent SQLNode, node Expr, replacer replacerFu return a.rewriteRefOfSubstrExpr(parent, node, replacer) case *Sum: return a.rewriteRefOfSum(parent, node, replacer) - case *TimestampFuncExpr: - return a.rewriteRefOfTimestampFuncExpr(parent, node, replacer) + case *TimestampDiffExpr: + return a.rewriteRefOfTimestampDiffExpr(parent, node, replacer) case *TrimFuncExpr: return a.rewriteRefOfTrimFuncExpr(parent, node, replacer) case *UnaryExpr: @@ -8965,6 +9730,8 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer return a.rewriteRefOfFlush(parent, node, replacer) case *Insert: return a.rewriteRefOfInsert(parent, node, replacer) + case *Kill: + return a.rewriteRefOfKill(parent, node, replacer) case *Load: return a.rewriteRefOfLoad(parent, node, replacer) case *LockTables: @@ -8975,6 +9742,8 @@ func (a *application) rewriteStatement(parent SQLNode, node Statement, replacer return a.rewriteRefOfOtherRead(parent, node, replacer) case *PrepareStmt: return a.rewriteRefOfPrepareStmt(parent, node, replacer) + case *PurgeBinaryLogs: + return a.rewriteRefOfPurgeBinaryLogs(parent, node, replacer) case *Release: return a.rewriteRefOfRelease(parent, node, replacer) case *RenameTable: @@ -9059,27 +9828,6 @@ func (a *application) rewriteAlgorithmValue(parent SQLNode, node AlgorithmValue, } return true } -func (a *application) rewriteArgument(parent SQLNode, node Argument, replacer replacerFunc) bool { - if a.pre != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - if !a.pre(&a.cur) { - return true - } - } - if a.post != nil { - if a.pre == nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - } - if !a.post(&a.cur) { - return false - } - } - return true -} func (a *application) rewriteBoolVal(parent SQLNode, node BoolVal, replacer replacerFunc) bool { if a.pre != nil { a.cur.replacer = replacer diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go index 9a66202e7be..37d2e04abce 100644 --- a/go/vt/sqlparser/ast_rewriting.go +++ b/go/vt/sqlparser/ast_rewriting.go @@ -75,19 +75,24 @@ func (r *ReservedVars) ReserveAll(names ...string) bool { // with the same name already exists, it'll be suffixed with a numberic identifier // to make it unique. func (r *ReservedVars) ReserveColName(col *ColName) string { - compliantName := col.CompliantName() - if r.fast && strings.HasPrefix(compliantName, r.prefix) { - compliantName = "_" + compliantName + reserveName := col.CompliantName() + if r.fast && strings.HasPrefix(reserveName, r.prefix) { + reserveName = "_" + reserveName } + return r.ReserveVariable(reserveName) +} + +func (r *ReservedVars) ReserveVariable(compliantName string) string { joinVar := []byte(compliantName) baseLen := len(joinVar) i := int64(1) for { if _, ok := r.reserved[string(joinVar)]; !ok { - r.reserved[string(joinVar)] = struct{}{} - return string(joinVar) + bvar := string(joinVar) + r.reserved[bvar] = struct{}{} + return bvar } joinVar = strconv.AppendInt(joinVar[:baseLen], i, 10) i++ @@ -128,8 +133,9 @@ func (r *ReservedVars) ReserveHasValuesSubQuery() string { r.sqNext++ joinVar := strconv.AppendInt(HasValueSubQueryBaseName, r.sqNext, 10) if _, ok := r.reserved[string(joinVar)]; !ok { - r.reserved[string(joinVar)] = struct{}{} - return string(joinVar) + bvar := string(joinVar) + r.reserved[bvar] = struct{}{} + return bvar } } } @@ -159,7 +165,6 @@ func (r *ReservedVars) nextUnusedVar() string { for { r.counter++ r.next = strconv.AppendInt(r.next[:len(r.prefix)], int64(r.counter), 10) - if _, ok := r.reserved[string(r.next)]; !ok { bvar := string(r.next) r.reserved[bvar] = struct{}{} @@ -325,6 +330,8 @@ func (er *astRewriter) rewriteDown(node SQLNode, _ SQLNode) bool { switch node := node.(type) { case *Select: er.visitSelect(node) + case *PrepareStmt, *ExecuteStmt: + return false // nothing to rewrite here. } return true } @@ -357,6 +364,8 @@ func (er *astRewriter) rewriteUp(cursor *Cursor) bool { er.rewriteShowBasic(node) case *ExistsExpr: er.existsRewrite(cursor, node) + case DistinctableAggr: + er.rewriteDistinctableAggr(cursor, node) } return true } @@ -526,6 +535,7 @@ func (er *astRewriter) sysVarRewrite(cursor *Cursor, node *Variable) { sysvars.Charset.Name, sysvars.ClientFoundRows.Name, sysvars.DDLStrategy.Name, + sysvars.MigrationContext.Name, sysvars.Names.Name, sysvars.TransactionMode.Name, sysvars.ReadAfterWriteGTID.Name, @@ -564,12 +574,19 @@ var funcRewrites = map[string]string{ } func (er *astRewriter) funcRewrite(cursor *Cursor, node *FuncExpr) { - bindVar, found := funcRewrites[node.Name.Lowered()] + lowered := node.Name.Lowered() + if lowered == "last_insert_id" && len(node.Exprs) > 0 { + // if we are dealing with is LAST_INSERT_ID() with an argument, we don't need to rewrite it. + // with an argument, this is an identity function that will update the session state and + // sets the correct fields in the OK TCP packet that we send back + return + } + bindVar, found := funcRewrites[lowered] if !found || (bindVar == DBVarName && !er.shouldRewriteDatabaseFunc) { return } if len(node.Exprs) > 0 { - er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", node.Name.Lowered()) + er.err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Argument to %s() not supported", lowered) return } cursor.Replace(bindVarExpression(bindVar)) @@ -669,6 +686,18 @@ func (er *astRewriter) existsRewrite(cursor *Cursor, node *ExistsExpr) { sel.GroupBy = nil } +// rewriteDistinctableAggr removed Distinct from Max and Min Aggregations as it does not impact the result. But, makes the plan simpler. +func (er *astRewriter) rewriteDistinctableAggr(cursor *Cursor, node DistinctableAggr) { + if !node.IsDistinct() { + return + } + switch aggr := node.(type) { + case *Max, *Min: + aggr.SetDistinct(false) + er.bindVars.NoteRewrite() + } +} + func bindVarExpression(name string) Expr { return NewArgument(name) } diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 6fe59acbc85..c116960d139 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -37,13 +37,12 @@ type testCaseSysVar struct { } type myTestCase struct { - in, expected string - liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool - ddlStrategy, sessionUUID, sessionEnableSystemSettings bool - udv int - autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool - sqlSelectLimit, transactionMode, workload, version, versionComment bool - txIsolation bool + in, expected string + liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool + ddlStrategy, migrationContext, sessionUUID, sessionEnableSystemSettings bool + udv int + autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool + sqlSelectLimit, transactionMode, workload, version, versionComment bool } func TestRewrites(in *testing.T) { @@ -189,6 +188,10 @@ func TestRewrites(in *testing.T) { in: `select * from user where col = @@ddl_strategy`, expected: "select * from user where col = :__vtddl_strategy", ddlStrategy: true, + }, { + in: `select * from user where col = @@migration_context`, + expected: "select * from user where col = :__vtmigration_context", + migrationContext: true, }, { in: `select * from user where col = @@read_after_write_gtid OR col = @@read_after_write_timeout OR col = @@session_track_gtids`, expected: "select * from user where col = :__vtread_after_write_gtid or col = :__vtread_after_write_timeout or col = :__vtsession_track_gtids", @@ -289,6 +292,9 @@ func TestRewrites(in *testing.T) { }, { in: "SELECT id, name, salary FROM user_details", expected: "SELECT id, name, salary FROM (select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id) as user_details", + }, { + in: "select max(distinct c1), min(distinct c2), avg(distinct c3), sum(distinct c4), count(distinct c5), group_concat(distinct c6) from tbl", + expected: "select max(c1) as `max(distinct c1)`, min(c2) as `min(distinct c2)`, avg(distinct c3), sum(distinct c4), count(distinct c5), group_concat(distinct c6) from tbl", }, { in: "SHOW VARIABLES", expected: "SHOW VARIABLES", @@ -301,6 +307,7 @@ func TestRewrites(in *testing.T) { version: true, versionComment: true, ddlStrategy: true, + migrationContext: true, sessionUUID: true, sessionEnableSystemSettings: true, rawGTID: true, @@ -320,6 +327,7 @@ func TestRewrites(in *testing.T) { version: true, versionComment: true, ddlStrategy: true, + migrationContext: true, sessionUUID: true, sessionEnableSystemSettings: true, rawGTID: true, @@ -364,6 +372,7 @@ func TestRewrites(in *testing.T) { assert.Equal(tc.workload, result.NeedsSysVar(sysvars.Workload.Name), "should need :__vtworkload") assert.Equal(tc.queryTimeout, result.NeedsSysVar(sysvars.QueryTimeout.Name), "should need :__vtquery_timeout") assert.Equal(tc.ddlStrategy, result.NeedsSysVar(sysvars.DDLStrategy.Name), "should need ddlStrategy") + assert.Equal(tc.migrationContext, result.NeedsSysVar(sysvars.MigrationContext.Name), "should need migrationContext") assert.Equal(tc.sessionUUID, result.NeedsSysVar(sysvars.SessionUUID.Name), "should need sessionUUID") assert.Equal(tc.sessionEnableSystemSettings, result.NeedsSysVar(sysvars.SessionEnableSystemSettings.Name), "should need sessionEnableSystemSettings") assert.Equal(tc.rawGTID, result.NeedsSysVar(sysvars.ReadAfterWriteGTID.Name), "should need rawGTID") diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 71c56594875..7c957674b99 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -49,60 +49,35 @@ func TestAppend(t *testing.T) { } func TestSelect(t *testing.T) { - tree, err := Parse("select * from t where a = 1") + e1, err := ParseExpr("a = 1") require.NoError(t, err) - expr := tree.(*Select).Where.Expr + e2, err := ParseExpr("b = 2") + require.NoError(t, err) + t.Run("single predicate where", func(t *testing.T) { + sel := &Select{} + sel.AddWhere(e1) + assert.Equal(t, " where a = 1", String(sel.Where)) + }) - sel := &Select{} - sel.AddWhere(expr) - buf := NewTrackedBuffer(nil) - sel.Where.Format(buf) - want := " where a = 1" - if buf.String() != want { - t.Errorf("where: %q, want %s", buf.String(), want) - } - sel.AddWhere(expr) - buf = NewTrackedBuffer(nil) - sel.Where.Format(buf) - want = " where a = 1" - if buf.String() != want { - t.Errorf("where: %q, want %s", buf.String(), want) - } - sel = &Select{} - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - want = " having a = 1" - if buf.String() != want { - t.Errorf("having: %q, want %s", buf.String(), want) - } - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - want = " having a = 1 and a = 1" - if buf.String() != want { - t.Errorf("having: %q, want %s", buf.String(), want) - } + t.Run("single predicate having", func(t *testing.T) { + sel := &Select{} + sel.AddHaving(e1) + assert.Equal(t, " having a = 1", String(sel.Having)) + }) - tree, err = Parse("select * from t where a = 1 or b = 1") - require.NoError(t, err) - expr = tree.(*Select).Where.Expr - sel = &Select{} - sel.AddWhere(expr) - buf = NewTrackedBuffer(nil) - sel.Where.Format(buf) - want = " where a = 1 or b = 1" - if buf.String() != want { - t.Errorf("where: %q, want %s", buf.String(), want) - } - sel = &Select{} - sel.AddHaving(expr) - buf = NewTrackedBuffer(nil) - sel.Having.Format(buf) - want = " having a = 1 or b = 1" - if buf.String() != want { - t.Errorf("having: %q, want %s", buf.String(), want) - } + t.Run("double predicate where", func(t *testing.T) { + sel := &Select{} + sel.AddWhere(e1) + sel.AddWhere(e2) + assert.Equal(t, " where a = 1 and b = 2", String(sel.Where)) + }) + + t.Run("double predicate having", func(t *testing.T) { + sel := &Select{} + sel.AddHaving(e1) + sel.AddHaving(e2) + assert.Equal(t, " having a = 1 and b = 2", String(sel.Having)) + }) } func TestUpdate(t *testing.T) { @@ -275,7 +250,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "on" != v.Val { + if v.Val != "on" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -300,7 +275,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "on" != v.Val { + if v.Val != "on" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -327,7 +302,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "off" != v.Val { + if v.Val != "off" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -352,7 +327,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement value is not StrVal: %T", v) } - if "off" != v.Val { + if v.Val != "off" { t.Errorf("SET statement value want: on, got: %s", v.Val) } default: @@ -376,23 +351,6 @@ func TestWhere(t *testing.T) { } } -func TestIsAggregate(t *testing.T) { - f := FuncExpr{Name: NewIdentifierCI("avg")} - if !f.IsAggregate() { - t.Error("IsAggregate: false, want true") - } - - f = FuncExpr{Name: NewIdentifierCI("Avg")} - if !f.IsAggregate() { - t.Error("IsAggregate: false, want true") - } - - f = FuncExpr{Name: NewIdentifierCI("foo")} - if f.IsAggregate() { - t.Error("IsAggregate: true, want false") - } -} - func TestIsImpossible(t *testing.T) { f := ComparisonExpr{ Operator: NotEqualOp, @@ -486,9 +444,6 @@ func TestReplaceExpr(t *testing.T) { }, { in: "select * from t where -(select a from b)", out: "-:a", - }, { - in: "select * from t where interval (select a from b) aa", - out: "interval :a aa", }, { in: "select * from t where (select a from b) collate utf8", out: ":a collate utf8", @@ -771,7 +726,22 @@ func TestSplitStatementToPieces(t *testing.T) { "`createtime` datetime NOT NULL DEFAULT NOW() COMMENT 'create time;'," + "`comment` varchar(100) NOT NULL DEFAULT '' COMMENT 'comment'," + "PRIMARY KEY (`id`))", - }} + }, { + input: "create table t1 (id int primary key); create table t2 (id int primary key);", + output: "create table t1 (id int primary key); create table t2 (id int primary key)", + }, { + input: ";;; create table t1 (id int primary key);;; ;create table t2 (id int primary key);", + output: " create table t1 (id int primary key);create table t2 (id int primary key)", + }, { + // The input doesn't have to be valid SQL statements! + input: ";create table t1 ;create table t2 (id;", + output: "create table t1 ;create table t2 (id", + }, { + // Ignore quoted semicolon + input: ";create table t1 ';';;;create table t2 (id;", + output: "create table t1 ';';create table t2 (id", + }, + } for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { @@ -835,3 +805,32 @@ func BenchmarkStringTraces(b *testing.B) { }) } } + +func TestCloneComments(t *testing.T) { + c := []string{"/*vt+ a=b */"} + parsedComments := Comments(c).Parsed() + directives := parsedComments.Directives() + { + assert.NotEmpty(t, directives.m) + val, ok := directives.m["a"] + assert.Truef(t, ok, "directives map: %v", directives.m) + assert.Equal(t, "b", val) + } + cloned := CloneRefOfParsedComments(parsedComments) + cloned.ResetDirectives() + clonedDirectives := cloned.Directives() + { + assert.NotEmpty(t, clonedDirectives.m) + val, ok := clonedDirectives.m["a"] + assert.Truef(t, ok, "directives map: %v", directives.m) + assert.Equal(t, "b", val) + } + { + delete(directives.m, "a") + assert.Empty(t, directives.m) + + val, ok := clonedDirectives.m["a"] + assert.Truef(t, ok, "directives map: %v", directives.m) + assert.Equal(t, "b", val) + } +} diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go index 569f245b0ec..d791700d656 100644 --- a/go/vt/sqlparser/ast_visit.go +++ b/go/vt/sqlparser/ast_visit.go @@ -54,10 +54,14 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfAlterVschema(in, f) case *AndExpr: return VisitRefOfAndExpr(in, f) - case Argument: - return VisitArgument(in, f) + case *AnyValue: + return VisitRefOfAnyValue(in, f) + case *Argument: + return VisitRefOfArgument(in, f) case *ArgumentLessWindowExpr: return VisitRefOfArgumentLessWindowExpr(in, f) + case *AssignmentExpr: + return VisitRefOfAssignmentExpr(in, f) case *AutoIncSpec: return VisitRefOfAutoIncSpec(in, f) case *Avg: @@ -180,6 +184,26 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfFuncExpr(in, f) case *GTIDFuncExpr: return VisitRefOfGTIDFuncExpr(in, f) + case *GeoHashFromLatLongExpr: + return VisitRefOfGeoHashFromLatLongExpr(in, f) + case *GeoHashFromPointExpr: + return VisitRefOfGeoHashFromPointExpr(in, f) + case *GeoJSONFromGeomExpr: + return VisitRefOfGeoJSONFromGeomExpr(in, f) + case *GeomCollPropertyFuncExpr: + return VisitRefOfGeomCollPropertyFuncExpr(in, f) + case *GeomFormatExpr: + return VisitRefOfGeomFormatExpr(in, f) + case *GeomFromGeoHashExpr: + return VisitRefOfGeomFromGeoHashExpr(in, f) + case *GeomFromGeoJSONExpr: + return VisitRefOfGeomFromGeoJSONExpr(in, f) + case *GeomFromTextExpr: + return VisitRefOfGeomFromTextExpr(in, f) + case *GeomFromWKBExpr: + return VisitRefOfGeomFromWKBExpr(in, f) + case *GeomPropertyFuncExpr: + return VisitRefOfGeomPropertyFuncExpr(in, f) case GroupBy: return VisitGroupBy(in, f) case *GroupConcatExpr: @@ -200,8 +224,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfInsert(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) - case *IntervalExpr: - return VisitRefOfIntervalExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *IntroducerExpr: @@ -262,12 +286,16 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfJtOnResponse(in, f) case *KeyState: return VisitRefOfKeyState(in, f) + case *Kill: + return VisitRefOfKill(in, f) case *LagLeadExpr: return VisitRefOfLagLeadExpr(in, f) case *Limit: return VisitRefOfLimit(in, f) case *LineStringExpr: return VisitRefOfLineStringExpr(in, f) + case *LinestrPropertyFuncExpr: + return VisitRefOfLinestrPropertyFuncExpr(in, f) case ListArg: return VisitListArg(in, f) case *Literal: @@ -294,6 +322,12 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfMin(in, f) case *ModifyColumn: return VisitRefOfModifyColumn(in, f) + case *MultiLinestringExpr: + return VisitRefOfMultiLinestringExpr(in, f) + case *MultiPointExpr: + return VisitRefOfMultiPointExpr(in, f) + case *MultiPolygonExpr: + return VisitRefOfMultiPolygonExpr(in, f) case *NTHValueExpr: return VisitRefOfNTHValueExpr(in, f) case *NamedWindow: @@ -352,10 +386,16 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfPerformanceSchemaFuncExpr(in, f) case *PointExpr: return VisitRefOfPointExpr(in, f) + case *PointPropertyFuncExpr: + return VisitRefOfPointPropertyFuncExpr(in, f) case *PolygonExpr: return VisitRefOfPolygonExpr(in, f) + case *PolygonPropertyFuncExpr: + return VisitRefOfPolygonPropertyFuncExpr(in, f) case *PrepareStmt: return VisitRefOfPrepareStmt(in, f) + case *PurgeBinaryLogs: + return VisitRefOfPurgeBinaryLogs(in, f) case ReferenceAction: return VisitReferenceAction(in, f) case *ReferenceDefinition: @@ -454,8 +494,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfTableSpec(in, f) case *TablespaceOperation: return VisitRefOfTablespaceOperation(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *TruncateTable: @@ -761,6 +801,27 @@ func VisitRefOfAndExpr(in *AndExpr, f Visit) error { } return nil } +func VisitRefOfAnyValue(in *AnyValue, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Arg, f); err != nil { + return err + } + return nil +} +func VisitRefOfArgument(in *Argument, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} func VisitRefOfArgumentLessWindowExpr(in *ArgumentLessWindowExpr, f Visit) error { if in == nil { return nil @@ -773,6 +834,21 @@ func VisitRefOfArgumentLessWindowExpr(in *ArgumentLessWindowExpr, f Visit) error } return nil } +func VisitRefOfAssignmentExpr(in *AssignmentExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Left, f); err != nil { + return err + } + if err := VisitExpr(in.Right, f); err != nil { + return err + } + return nil +} func VisitRefOfAutoIncSpec(in *AutoIncSpec, f Visit) error { if in == nil { return nil @@ -1243,9 +1319,6 @@ func VisitRefOfCurTimeFuncExpr(in *CurTimeFuncExpr, f Visit) error { if err := VisitIdentifierCI(in.Name, f); err != nil { return err } - if err := VisitExpr(in.Fsp, f); err != nil { - return err - } return nil } func VisitRefOfDeallocateStmt(in *DeallocateStmt, f Visit) error { @@ -1648,6 +1721,168 @@ func VisitRefOfGTIDFuncExpr(in *GTIDFuncExpr, f Visit) error { } return nil } +func VisitRefOfGeoHashFromLatLongExpr(in *GeoHashFromLatLongExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Latitude, f); err != nil { + return err + } + if err := VisitExpr(in.Longitude, f); err != nil { + return err + } + if err := VisitExpr(in.MaxLength, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeoHashFromPointExpr(in *GeoHashFromPointExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Point, f); err != nil { + return err + } + if err := VisitExpr(in.MaxLength, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeoJSONFromGeomExpr(in *GeoJSONFromGeomExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Geom, f); err != nil { + return err + } + if err := VisitExpr(in.MaxDecimalDigits, f); err != nil { + return err + } + if err := VisitExpr(in.Bitmask, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomCollPropertyFuncExpr(in *GeomCollPropertyFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.GeomColl, f); err != nil { + return err + } + if err := VisitExpr(in.PropertyDefArg, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomFormatExpr(in *GeomFormatExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Geom, f); err != nil { + return err + } + if err := VisitExpr(in.AxisOrderOpt, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomFromGeoHashExpr(in *GeomFromGeoHashExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.GeoHash, f); err != nil { + return err + } + if err := VisitExpr(in.SridOpt, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomFromGeoJSONExpr(in *GeomFromGeoJSONExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.GeoJSON, f); err != nil { + return err + } + if err := VisitExpr(in.HigherDimHandlerOpt, f); err != nil { + return err + } + if err := VisitExpr(in.Srid, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomFromTextExpr(in *GeomFromTextExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.WktText, f); err != nil { + return err + } + if err := VisitExpr(in.Srid, f); err != nil { + return err + } + if err := VisitExpr(in.AxisOrderOpt, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomFromWKBExpr(in *GeomFromWKBExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.WkbBlob, f); err != nil { + return err + } + if err := VisitExpr(in.Srid, f); err != nil { + return err + } + if err := VisitExpr(in.AxisOrderOpt, f); err != nil { + return err + } + return nil +} +func VisitRefOfGeomPropertyFuncExpr(in *GeomPropertyFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Geom, f); err != nil { + return err + } + return nil +} func VisitGroupBy(in GroupBy, f Visit) error { if in == nil { return nil @@ -1757,7 +1992,7 @@ func VisitRefOfInsert(in *Insert, f Visit) error { if err := VisitRefOfParsedComments(in.Comments, f); err != nil { return err } - if err := VisitTableName(in.Table, f); err != nil { + if err := VisitRefOfAliasedTableExpr(in.Table, f); err != nil { return err } if err := VisitPartitions(in.Partitions, f); err != nil { @@ -1795,14 +2030,17 @@ func VisitRefOfInsertExpr(in *InsertExpr, f Visit) error { } return nil } -func VisitRefOfIntervalExpr(in *IntervalExpr, f Visit) error { +func VisitRefOfIntervalDateExpr(in *IntervalDateExpr, f Visit) error { if in == nil { return nil } if cont, err := f(in); err != nil || !cont { return err } - if err := VisitExpr(in.Expr, f); err != nil { + if err := VisitExpr(in.Date, f); err != nil { + return err + } + if err := VisitExpr(in.Interval, f); err != nil { return err } return nil @@ -2262,6 +2500,15 @@ func VisitRefOfKeyState(in *KeyState, f Visit) error { } return nil } +func VisitRefOfKill(in *Kill, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} func VisitRefOfLagLeadExpr(in *LagLeadExpr, f Visit) error { if in == nil { return nil @@ -2313,6 +2560,21 @@ func VisitRefOfLineStringExpr(in *LineStringExpr, f Visit) error { } return nil } +func VisitRefOfLinestrPropertyFuncExpr(in *LinestrPropertyFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Linestring, f); err != nil { + return err + } + if err := VisitExpr(in.PropertyDefArg, f); err != nil { + return err + } + return nil +} func VisitRefOfLiteral(in *Literal, f Visit) error { if in == nil { return nil @@ -2453,6 +2715,42 @@ func VisitRefOfModifyColumn(in *ModifyColumn, f Visit) error { } return nil } +func VisitRefOfMultiLinestringExpr(in *MultiLinestringExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExprs(in.LinestringParams, f); err != nil { + return err + } + return nil +} +func VisitRefOfMultiPointExpr(in *MultiPointExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExprs(in.PointParams, f); err != nil { + return err + } + return nil +} +func VisitRefOfMultiPolygonExpr(in *MultiPolygonExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExprs(in.PolygonParams, f); err != nil { + return err + } + return nil +} func VisitRefOfNTHValueExpr(in *NTHValueExpr, f Visit) error { if in == nil { return nil @@ -2567,6 +2865,9 @@ func VisitRefOfOffset(in *Offset, f Visit) error { if cont, err := f(in); err != nil || !cont { return err } + if err := VisitExpr(in.Original, f); err != nil { + return err + } return nil } func VisitOnDup(in OnDup, f Visit) error { @@ -2852,6 +3153,21 @@ func VisitRefOfPointExpr(in *PointExpr, f Visit) error { } return nil } +func VisitRefOfPointPropertyFuncExpr(in *PointPropertyFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Point, f); err != nil { + return err + } + if err := VisitExpr(in.ValueToSet, f); err != nil { + return err + } + return nil +} func VisitRefOfPolygonExpr(in *PolygonExpr, f Visit) error { if in == nil { return nil @@ -2864,6 +3180,21 @@ func VisitRefOfPolygonExpr(in *PolygonExpr, f Visit) error { } return nil } +func VisitRefOfPolygonPropertyFuncExpr(in *PolygonPropertyFuncExpr, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitExpr(in.Polygon, f); err != nil { + return err + } + if err := VisitExpr(in.PropertyDefArg, f); err != nil { + return err + } + return nil +} func VisitRefOfPrepareStmt(in *PrepareStmt, f Visit) error { if in == nil { return nil @@ -2882,6 +3213,15 @@ func VisitRefOfPrepareStmt(in *PrepareStmt, f Visit) error { } return nil } +func VisitRefOfPurgeBinaryLogs(in *PurgeBinaryLogs, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + return nil +} func VisitRefOfReferenceDefinition(in *ReferenceDefinition, f Visit) error { if in == nil { return nil @@ -3591,7 +3931,7 @@ func VisitRefOfTablespaceOperation(in *TablespaceOperation, f Visit) error { } return nil } -func VisitRefOfTimestampFuncExpr(in *TimestampFuncExpr, f Visit) error { +func VisitRefOfTimestampDiffExpr(in *TimestampDiffExpr, f Visit) error { if in == nil { return nil } @@ -4061,6 +4401,8 @@ func VisitAggrFunc(in AggrFunc, f Visit) error { return nil } switch in := in.(type) { + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *Avg: return VisitRefOfAvg(in, f) case *BitAnd: @@ -4159,6 +4501,8 @@ func VisitCallable(in Callable, f Visit) error { return nil } switch in := in.(type) { + case *AnyValue: + return VisitRefOfAnyValue(in, f) case *ArgumentLessWindowExpr: return VisitRefOfArgumentLessWindowExpr(in, f) case *Avg: @@ -4185,10 +4529,32 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfFuncExpr(in, f) case *GTIDFuncExpr: return VisitRefOfGTIDFuncExpr(in, f) + case *GeoHashFromLatLongExpr: + return VisitRefOfGeoHashFromLatLongExpr(in, f) + case *GeoHashFromPointExpr: + return VisitRefOfGeoHashFromPointExpr(in, f) + case *GeoJSONFromGeomExpr: + return VisitRefOfGeoJSONFromGeomExpr(in, f) + case *GeomCollPropertyFuncExpr: + return VisitRefOfGeomCollPropertyFuncExpr(in, f) + case *GeomFormatExpr: + return VisitRefOfGeomFormatExpr(in, f) + case *GeomFromGeoHashExpr: + return VisitRefOfGeomFromGeoHashExpr(in, f) + case *GeomFromGeoJSONExpr: + return VisitRefOfGeomFromGeoJSONExpr(in, f) + case *GeomFromTextExpr: + return VisitRefOfGeomFromTextExpr(in, f) + case *GeomFromWKBExpr: + return VisitRefOfGeomFromWKBExpr(in, f) + case *GeomPropertyFuncExpr: + return VisitRefOfGeomPropertyFuncExpr(in, f) case *GroupConcatExpr: return VisitRefOfGroupConcatExpr(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *JSONArrayExpr: @@ -4235,6 +4601,8 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfLagLeadExpr(in, f) case *LineStringExpr: return VisitRefOfLineStringExpr(in, f) + case *LinestrPropertyFuncExpr: + return VisitRefOfLinestrPropertyFuncExpr(in, f) case *LocateExpr: return VisitRefOfLocateExpr(in, f) case *MatchExpr: @@ -4245,6 +4613,12 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfMemberOfExpr(in, f) case *Min: return VisitRefOfMin(in, f) + case *MultiLinestringExpr: + return VisitRefOfMultiLinestringExpr(in, f) + case *MultiPointExpr: + return VisitRefOfMultiPointExpr(in, f) + case *MultiPolygonExpr: + return VisitRefOfMultiPolygonExpr(in, f) case *NTHValueExpr: return VisitRefOfNTHValueExpr(in, f) case *NamedWindow: @@ -4255,8 +4629,12 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfPerformanceSchemaFuncExpr(in, f) case *PointExpr: return VisitRefOfPointExpr(in, f) + case *PointPropertyFuncExpr: + return VisitRefOfPointPropertyFuncExpr(in, f) case *PolygonExpr: return VisitRefOfPolygonExpr(in, f) + case *PolygonPropertyFuncExpr: + return VisitRefOfPolygonPropertyFuncExpr(in, f) case *RegexpInstrExpr: return VisitRefOfRegexpInstrExpr(in, f) case *RegexpLikeExpr: @@ -4269,8 +4647,8 @@ func VisitCallable(in Callable, f Visit) error { return VisitRefOfSubstrExpr(in, f) case *Sum: return VisitRefOfSum(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *UpdateXMLExpr: @@ -4377,10 +4755,14 @@ func VisitExpr(in Expr, f Visit) error { switch in := in.(type) { case *AndExpr: return VisitRefOfAndExpr(in, f) - case Argument: - return VisitArgument(in, f) + case *AnyValue: + return VisitRefOfAnyValue(in, f) + case *Argument: + return VisitRefOfArgument(in, f) case *ArgumentLessWindowExpr: return VisitRefOfArgumentLessWindowExpr(in, f) + case *AssignmentExpr: + return VisitRefOfAssignmentExpr(in, f) case *Avg: return VisitRefOfAvg(in, f) case *BetweenExpr: @@ -4433,12 +4815,32 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfFuncExpr(in, f) case *GTIDFuncExpr: return VisitRefOfGTIDFuncExpr(in, f) + case *GeoHashFromLatLongExpr: + return VisitRefOfGeoHashFromLatLongExpr(in, f) + case *GeoHashFromPointExpr: + return VisitRefOfGeoHashFromPointExpr(in, f) + case *GeoJSONFromGeomExpr: + return VisitRefOfGeoJSONFromGeomExpr(in, f) + case *GeomCollPropertyFuncExpr: + return VisitRefOfGeomCollPropertyFuncExpr(in, f) + case *GeomFormatExpr: + return VisitRefOfGeomFormatExpr(in, f) + case *GeomFromGeoHashExpr: + return VisitRefOfGeomFromGeoHashExpr(in, f) + case *GeomFromGeoJSONExpr: + return VisitRefOfGeomFromGeoJSONExpr(in, f) + case *GeomFromTextExpr: + return VisitRefOfGeomFromTextExpr(in, f) + case *GeomFromWKBExpr: + return VisitRefOfGeomFromWKBExpr(in, f) + case *GeomPropertyFuncExpr: + return VisitRefOfGeomPropertyFuncExpr(in, f) case *GroupConcatExpr: return VisitRefOfGroupConcatExpr(in, f) case *InsertExpr: return VisitRefOfInsertExpr(in, f) - case *IntervalExpr: - return VisitRefOfIntervalExpr(in, f) + case *IntervalDateExpr: + return VisitRefOfIntervalDateExpr(in, f) case *IntervalFuncExpr: return VisitRefOfIntervalFuncExpr(in, f) case *IntroducerExpr: @@ -4489,6 +4891,8 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfLagLeadExpr(in, f) case *LineStringExpr: return VisitRefOfLineStringExpr(in, f) + case *LinestrPropertyFuncExpr: + return VisitRefOfLinestrPropertyFuncExpr(in, f) case ListArg: return VisitListArg(in, f) case *Literal: @@ -4505,6 +4909,12 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfMemberOfExpr(in, f) case *Min: return VisitRefOfMin(in, f) + case *MultiLinestringExpr: + return VisitRefOfMultiLinestringExpr(in, f) + case *MultiPointExpr: + return VisitRefOfMultiPointExpr(in, f) + case *MultiPolygonExpr: + return VisitRefOfMultiPolygonExpr(in, f) case *NTHValueExpr: return VisitRefOfNTHValueExpr(in, f) case *NamedWindow: @@ -4523,8 +4933,12 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfPerformanceSchemaFuncExpr(in, f) case *PointExpr: return VisitRefOfPointExpr(in, f) + case *PointPropertyFuncExpr: + return VisitRefOfPointPropertyFuncExpr(in, f) case *PolygonExpr: return VisitRefOfPolygonExpr(in, f) + case *PolygonPropertyFuncExpr: + return VisitRefOfPolygonPropertyFuncExpr(in, f) case *RegexpInstrExpr: return VisitRefOfRegexpInstrExpr(in, f) case *RegexpLikeExpr: @@ -4547,8 +4961,8 @@ func VisitExpr(in Expr, f Visit) error { return VisitRefOfSubstrExpr(in, f) case *Sum: return VisitRefOfSum(in, f) - case *TimestampFuncExpr: - return VisitRefOfTimestampFuncExpr(in, f) + case *TimestampDiffExpr: + return VisitRefOfTimestampDiffExpr(in, f) case *TrimFuncExpr: return VisitRefOfTrimFuncExpr(in, f) case *UnaryExpr: @@ -4701,6 +5115,8 @@ func VisitStatement(in Statement, f Visit) error { return VisitRefOfFlush(in, f) case *Insert: return VisitRefOfInsert(in, f) + case *Kill: + return VisitRefOfKill(in, f) case *Load: return VisitRefOfLoad(in, f) case *LockTables: @@ -4711,6 +5127,8 @@ func VisitStatement(in Statement, f Visit) error { return VisitRefOfOtherRead(in, f) case *PrepareStmt: return VisitRefOfPrepareStmt(in, f) + case *PurgeBinaryLogs: + return VisitRefOfPurgeBinaryLogs(in, f) case *Release: return VisitRefOfRelease(in, f) case *RenameTable: @@ -4778,10 +5196,6 @@ func VisitAlgorithmValue(in AlgorithmValue, f Visit) error { _, err := f(in) return err } -func VisitArgument(in Argument, f Visit) error { - _, err := f(in) - return err -} func VisitBoolVal(in BoolVal, f Visit) error { _, err := f(in) return err diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go index eecfd2f5ee9..ae413e61617 100644 --- a/go/vt/sqlparser/cached_size.go +++ b/go/vt/sqlparser/cached_size.go @@ -319,6 +319,32 @@ func (cached *AndExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *AnyValue) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Arg vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Arg.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *Argument) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Name string + size += hack.RuntimeAllocSize(int64(len(cached.Name))) + return size +} func (cached *ArgumentLessWindowExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -331,6 +357,24 @@ func (cached *ArgumentLessWindowExpr) CachedSize(alloc bool) int64 { size += cached.OverClause.CachedSize(true) return size } +func (cached *AssignmentExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Left vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Left.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Right vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Right.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *AutoIncSpec) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -605,7 +649,7 @@ func (cached *ColName) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(64) } // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI size += cached.Name.CachedSize(false) @@ -892,6 +936,16 @@ func (cached *Count) CachedSize(alloc bool) int64 { } return size } +func (cached *CountStar) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + return size +} func (cached *CreateDatabase) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -974,10 +1028,6 @@ func (cached *CurTimeFuncExpr) CachedSize(alloc bool) int64 { } // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI size += cached.Name.CachedSize(false) - // field Fsp vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Fsp.(cachedObject); ok { - size += cc.CachedSize(true) - } return size } func (cached *DatabaseOption) CachedSize(alloc bool) int64 { @@ -1435,6 +1485,202 @@ func (cached *GTIDFuncExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *GeoHashFromLatLongExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Latitude vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Latitude.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Longitude vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Longitude.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field MaxLength vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.MaxLength.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeoHashFromPointExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Point vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Point.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field MaxLength vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.MaxLength.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeoJSONFromGeomExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Geom vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Geom.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field MaxDecimalDigits vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.MaxDecimalDigits.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Bitmask vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Bitmask.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomCollPropertyFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field GeomColl vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.GeomColl.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field PropertyDefArg vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.PropertyDefArg.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomFormatExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Geom vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Geom.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field AxisOrderOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.AxisOrderOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomFromGeoHashExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field GeoHash vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.GeoHash.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field SridOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.SridOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomFromGeoJSONExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field GeoJSON vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.GeoJSON.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field HigherDimHandlerOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.HigherDimHandlerOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Srid vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Srid.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomFromTextExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field WktText vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.WktText.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Srid vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Srid.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field AxisOrderOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.AxisOrderOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomFromWKBExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field WkbBlob vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.WkbBlob.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Srid vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Srid.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field AxisOrderOpt vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.AxisOrderOpt.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *GeomPropertyFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Geom vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Geom.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *GroupConcatExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1590,12 +1836,12 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(144) + size += int64(128) } // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments size += cached.Comments.CachedSize(true) - // field Table vitess.io/vitess/go/vt/sqlparser.TableName - size += cached.Table.CachedSize(false) + // field Table *vitess.io/vitess/go/vt/sqlparser.AliasedTableExpr + size += cached.Table.CachedSize(true) // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions { size += hack.RuntimeAllocSize(int64(cap(cached.Partitions)) * int64(32)) @@ -1649,20 +1895,22 @@ func (cached *InsertExpr) CachedSize(alloc bool) int64 { } return size } -func (cached *IntervalExpr) CachedSize(alloc bool) int64 { +func (cached *IntervalDateExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(32) + size += int64(48) } - // field Expr vitess.io/vitess/go/vt/sqlparser.Expr - if cc, ok := cached.Expr.(cachedObject); ok { + // field Date vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Date.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Interval vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Interval.(cachedObject); ok { size += cc.CachedSize(true) } - // field Unit string - size += hack.RuntimeAllocSize(int64(len(cached.Unit))) return size } func (cached *IntervalFuncExpr) CachedSize(alloc bool) int64 { @@ -2298,6 +2546,16 @@ func (cached *KeyState) CachedSize(alloc bool) int64 { } return size } +func (cached *Kill) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + return size +} func (cached *LagLeadExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2363,6 +2621,24 @@ func (cached *LineStringExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *LinestrPropertyFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Linestring vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Linestring.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field PropertyDefArg vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.PropertyDefArg.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *Literal) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2523,6 +2799,63 @@ func (cached *ModifyColumn) CachedSize(alloc bool) int64 { size += cached.After.CachedSize(true) return size } +func (cached *MultiLinestringExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field LinestringParams vitess.io/vitess/go/vt/sqlparser.Exprs + { + size += hack.RuntimeAllocSize(int64(cap(cached.LinestringParams)) * int64(16)) + for _, elem := range cached.LinestringParams { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *MultiPointExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field PointParams vitess.io/vitess/go/vt/sqlparser.Exprs + { + size += hack.RuntimeAllocSize(int64(cap(cached.PointParams)) * int64(16)) + for _, elem := range cached.PointParams { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} +func (cached *MultiPolygonExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field PolygonParams vitess.io/vitess/go/vt/sqlparser.Exprs + { + size += hack.RuntimeAllocSize(int64(cap(cached.PolygonParams)) * int64(16)) + for _, elem := range cached.PolygonParams { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} func (cached *NTHValueExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2620,8 +2953,10 @@ func (cached *Offset) CachedSize(alloc bool) int64 { if alloc { size += int64(24) } - // field Original string - size += hack.RuntimeAllocSize(int64(len(cached.Original))) + // field Original vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Original.(cachedObject); ok { + size += cc.CachedSize(true) + } return size } func (cached *OptLike) CachedSize(alloc bool) int64 { @@ -2921,6 +3256,24 @@ func (cached *PointExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *PointPropertyFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Point vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Point.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field ValueToSet vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.ValueToSet.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *PolygonExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2940,6 +3293,24 @@ func (cached *PolygonExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *PolygonPropertyFuncExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Polygon vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.Polygon.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field PropertyDefArg vitess.io/vitess/go/vt/sqlparser.Expr + if cc, ok := cached.PropertyDefArg.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *PrepareStmt) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -2958,6 +3329,20 @@ func (cached *PrepareStmt) CachedSize(alloc bool) int64 { size += cached.Comments.CachedSize(true) return size } +func (cached *PurgeBinaryLogs) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field To string + size += hack.RuntimeAllocSize(int64(len(cached.To))) + // field Before string + size += hack.RuntimeAllocSize(int64(len(cached.Before))) + return size +} func (cached *ReferenceDefinition) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -3754,16 +4139,14 @@ func (cached *TablespaceOperation) CachedSize(alloc bool) int64 { } return size } -func (cached *TimestampFuncExpr) CachedSize(alloc bool) int64 { +func (cached *TimestampDiffExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(64) + size += int64(48) } - // field Name string - size += hack.RuntimeAllocSize(int64(len(cached.Name))) // field Expr1 vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Expr1.(cachedObject); ok { size += cc.CachedSize(true) @@ -3772,8 +4155,6 @@ func (cached *TimestampFuncExpr) CachedSize(alloc bool) int64 { if cc, ok := cached.Expr2.(cachedObject); ok { size += cc.CachedSize(true) } - // field Unit string - size += hack.RuntimeAllocSize(int64(len(cached.Unit))) return size } func (cached *TrimFuncExpr) CachedSize(alloc bool) int64 { diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go index 7e17e58e66b..4ecf7b1b293 100644 --- a/go/vt/sqlparser/comments.go +++ b/go/vt/sqlparser/comments.go @@ -21,6 +21,9 @@ import ( "strings" "unicode" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -48,8 +51,19 @@ const ( DirectiveVExplainRunDMLQueries = "EXECUTE_DML_QUERIES" // DirectiveConsolidator enables the query consolidator. DirectiveConsolidator = "CONSOLIDATOR" + // DirectiveWorkloadName specifies the name of the client application workload issuing the query. + DirectiveWorkloadName = "WORKLOAD_NAME" + // DirectivePriority specifies the priority of a workload. It should be an integer between 0 and MaxPriorityValue, + // where 0 is the highest priority, and MaxPriorityValue is the lowest one. + DirectivePriority = "PRIORITY" + + // MaxPriorityValue specifies the maximum value allowed for the priority query directive. Valid priority values are + // between zero and MaxPriorityValue. + MaxPriorityValue = 100 ) +var ErrInvalidPriority = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Invalid priority value specified in query") + func isNonSpace(r rune) bool { return !unicode.IsSpace(r) } @@ -210,6 +224,15 @@ type CommentDirectives struct { m map[string]string } +// ResetDirectives sets the _directives member to `nil`, which means the next call to Directives() +// will re-evaluate it. +func (c *ParsedComments) ResetDirectives() { + if c == nil { + return + } + c._directives = nil +} + // Directives parses the comment list for any execution directives // of the form: // @@ -250,6 +273,13 @@ func (c *ParsedComments) Length() int { return len(c.comments) } +func (c *ParsedComments) GetComments() Comments { + if c != nil { + return c.comments + } + return nil +} + func (c *ParsedComments) Prepend(comment string) Comments { if c == nil { return Comments{comment} @@ -374,6 +404,29 @@ func AllowScatterDirective(stmt Statement) bool { return comments != nil && comments.Directives().IsSet(DirectiveAllowScatter) } +// GetPriorityFromStatement gets the priority from the provided Statement, using DirectivePriority +func GetPriorityFromStatement(statement Statement) (string, error) { + commentedStatement, ok := statement.(Commented) + // This would mean that the statement lacks comments, so we can't obtain the workload from it. Hence default to + // empty priority + if !ok { + return "", nil + } + + directives := commentedStatement.GetParsedComments().Directives() + priority, ok := directives.GetString(DirectivePriority, "") + if !ok || priority == "" { + return "", nil + } + + intPriority, err := strconv.Atoi(priority) + if err != nil || intPriority < 0 || intPriority > MaxPriorityValue { + return "", ErrInvalidPriority + } + + return priority, nil +} + // Consolidator returns the consolidator option. func Consolidator(stmt Statement) querypb.ExecuteOptions_Consolidator { var comments *ParsedComments @@ -396,3 +449,19 @@ func Consolidator(stmt Statement) querypb.ExecuteOptions_Consolidator { } return querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED } + +// GetWorkloadNameFromStatement gets the workload name from the provided Statement, using workloadLabel as the name of +// the query directive that specifies it. +func GetWorkloadNameFromStatement(statement Statement) string { + commentedStatement, ok := statement.(Commented) + // This would mean that the statement lacks comments, so we can't obtain the workload from it. Hence default to + // empty workload name + if !ok { + return "" + } + + directives := commentedStatement.GetParsedComments().Directives() + workloadName, _ := directives.GetString(DirectiveWorkloadName, "") + + return workloadName +} diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go index 7200e3828bd..a1530cc3812 100644 --- a/go/vt/sqlparser/comments_test.go +++ b/go/vt/sqlparser/comments_test.go @@ -496,3 +496,68 @@ func TestConsolidator(t *testing.T) { }) } } + +func TestGetPriorityFromStatement(t *testing.T) { + testCases := []struct { + query string + expectedPriority string + expectedError error + }{ + { + query: "select * from a_table", + expectedPriority: "", + expectedError: nil, + }, + { + query: "select /*vt+ ANOTHER_DIRECTIVE=324 */ * from another_table", + expectedPriority: "", + expectedError: nil, + }, + { + query: "select /*vt+ PRIORITY=33 */ * from another_table", + expectedPriority: "33", + expectedError: nil, + }, + { + query: "select /*vt+ PRIORITY=200 */ * from another_table", + expectedPriority: "", + expectedError: ErrInvalidPriority, + }, + { + query: "select /*vt+ PRIORITY=-1 */ * from another_table", + expectedPriority: "", + expectedError: ErrInvalidPriority, + }, + { + query: "select /*vt+ PRIORITY=some_text */ * from another_table", + expectedPriority: "", + expectedError: ErrInvalidPriority, + }, + { + query: "select /*vt+ PRIORITY=0 */ * from another_table", + expectedPriority: "0", + expectedError: nil, + }, + { + query: "select /*vt+ PRIORITY=100 */ * from another_table", + expectedPriority: "100", + expectedError: nil, + }, + } + + for _, testCase := range testCases { + theThestCase := testCase + t.Run(theThestCase.query, func(t *testing.T) { + t.Parallel() + stmt, err := Parse(theThestCase.query) + assert.NoError(t, err) + actualPriority, actualError := GetPriorityFromStatement(stmt) + if theThestCase.expectedError != nil { + assert.ErrorIs(t, actualError, theThestCase.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, theThestCase.expectedPriority, actualPriority) + } + }) + } +} diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go index 81f2e067563..450522fb8d5 100644 --- a/go/vt/sqlparser/constants.go +++ b/go/vt/sqlparser/constants.go @@ -16,6 +16,8 @@ limitations under the License. package sqlparser +import "vitess.io/vitess/go/mysql/datetime" + // String constants to be used in ast. const ( // Select.Distinct @@ -209,7 +211,7 @@ const ( Utf16Str = "_utf16" Utf16leStr = "_utf16le" Utf32Str = "_utf32" - Utf8Str = "_utf8" + Utf8mb3Str = "_utf8mb3" Utf8mb4Str = "_utf8mb4" NStringStr = "N" @@ -401,27 +403,70 @@ const ( DefaultTypeStr = "default" ExclusiveTypeStr = "exclusive" - // IntervalTypes strings - DayStr = "day" - WeekStr = "week" - MonthStr = "month" - YearStr = "year" - DayHourStr = "day_hour" - DayMicrosecondStr = "day_microsecond" - DayMinuteStr = "day_minute" - DaySecondStr = "day_second" - HourStr = "hour" - HourMicrosecondStr = "hour_microsecond" - HourMinuteStr = "hour_minute" - HourSecondStr = "hour_second" - MicrosecondStr = "microsecond" - MinuteStr = "minute" - MinuteMicrosecondStr = "minute_microsecond" - MinuteSecondStr = "minute_second" - QuarterStr = "quarter" - SecondStr = "second" - SecondMicrosecondStr = "second_microsecond" - YearMonthStr = "year_month" + // GeomeFromWktType strings + GeometryFromTextStr = "st_geometryfromtext" + GeometryCollectionFromTextStr = "st_geometrycollectionfromtext" + PointFromTextStr = "st_pointfromtext" + MultiPointFromTextStr = "st_multipointfromtext" + LineStringFromTextStr = "st_linestringfromtext" + MultiLinestringFromTextStr = "st_multilinestringfromtext" + PolygonFromTextStr = "st_polygonfromtext" + MultiPolygonFromTextStr = "st_multipolygonfromtext" + + // GeomeFromWktType strings + GeometryFromWKBStr = "st_geometryfromwkb" + GeometryCollectionFromWKBStr = "st_geometrycollectionfromwkb" + PointFromWKBStr = "st_pointfromwkb" + MultiPointFromWKBStr = "st_multipointfromwkb" + LineStringFromWKBStr = "st_linestringfromwkb" + MultiLinestringFromWKBStr = "st_multilinestringfromwkb" + PolygonFromWKBStr = "st_polygonfromwkb" + MultiPolygonFromWKBStr = "st_multipolygonfromwkb" + + // GeomFormatExpr strings + TextFormatStr = "st_astext" + BinaryFormatStr = "st_asbinary" + + // GeomPropertyType strings + IsSimpleStr = "st_issimple" + IsEmptyStr = "st_isempty" + EnvelopeStr = "st_envelope" + DimensionStr = "st_dimension" + GeometryTypeStr = "st_geometrytype" + + // PointPropertyType strings + XCordinateStr = "st_x" + YCordinateStr = "st_y" + LatitudeStr = "st_latitude" + LongitudeStr = "st_longitude" + + // LinestringPropertyType strings + EndPointStr = "st_endpoint" + IsClosedStr = "st_isclosed" + LengthStr = "st_length" + NumPointsStr = "st_numpoints" + PointNStr = "st_pointn" + StartPointStr = "st_startpoint" + + // PolygonPropertyType strings + AreaStr = "st_area" + CentroidStr = "st_centroid" + ExteriorRingStr = "st_exteriorring" + InteriorRingNStr = "st_interiorringN" + NumInteriorRingsStr = "st_numinteriorrings" + + // GeomCollPropType strings + NumGeometriesStr = "st_numgeometries" + GeometryNStr = "st_geometryn" + + // GeomFromGeoHash strings + LatitudeFromHashStr = "st_latfromgeohash" + LongitudeFromHashStr = "st_longfromgeohash" + PointFromHashStr = "st_pointfromgeohash" + + // KillType strings + ConnectionStr = "connection" + QueryStr = "query" ) // Constants for Enum Type - Insert.Action @@ -738,12 +783,6 @@ const ( IntoDumpfile ) -// Constant for Enum Type - DeallocateStmtType -const ( - DeallocateType DeallocateStmtType = iota - DropType -) - // Constant for Enum Type - JtOnResponseType const ( ErrorJSONType JtOnResponseType = iota @@ -857,33 +896,133 @@ const ( DefaultFormat ) -// IntervalTypes constants -const ( - IntervalYear IntervalTypes = iota - IntervalQuarter - IntervalMonth - IntervalWeek - IntervalDay - IntervalHour - IntervalMinute - IntervalSecond - IntervalMicrosecond - IntervalYearMonth - IntervalDayHour - IntervalDayMinute - IntervalDaySecond - IntervalHourMinute - IntervalHourSecond - IntervalMinuteSecond - IntervalDayMicrosecond - IntervalHourMicrosecond - IntervalMinuteMicrosecond - IntervalSecondMicrosecond -) - // Transaction access mode const ( WithConsistentSnapshot TxAccessMode = iota ReadWrite ReadOnly ) + +// Enum Types of WKT functions +const ( + GeometryFromText GeomFromWktType = iota + GeometryCollectionFromText + PointFromText + LineStringFromText + PolygonFromText + MultiPointFromText + MultiPolygonFromText + MultiLinestringFromText +) + +// Enum Types of WKT functions +const ( + GeometryFromWKB GeomFromWkbType = iota + GeometryCollectionFromWKB + PointFromWKB + LineStringFromWKB + PolygonFromWKB + MultiPointFromWKB + MultiPolygonFromWKB + MultiLinestringFromWKB +) + +// Enum Types of spatial format functions +const ( + TextFormat GeomFormatType = iota + BinaryFormat +) + +// Enum Types of spatial property functions +const ( + IsSimple GeomPropertyType = iota + IsEmpty + Dimension + GeometryType + Envelope +) + +// Enum Types of point property functions +const ( + XCordinate PointPropertyType = iota + YCordinate + Latitude + Longitude +) + +// Enum Types of linestring property functions +const ( + EndPoint LinestrPropType = iota + IsClosed + Length + NumPoints + PointN + StartPoint +) + +// Enum Types of linestring property functions +const ( + Area PolygonPropType = iota + Centroid + ExteriorRing + InteriorRingN + NumInteriorRings +) + +// Enum Types of geom collection property functions +const ( + GeometryN GeomCollPropType = iota + NumGeometries +) + +// Enum Types of geom from geohash functions +const ( + LatitudeFromHash GeomFromHashType = iota + LongitudeFromHash + PointFromHash +) + +// IntervalType constants +const ( + IntervalNone = datetime.IntervalNone + IntervalMicrosecond = datetime.IntervalMicrosecond + IntervalSecond = datetime.IntervalSecond + IntervalMinute = datetime.IntervalMinute + IntervalHour = datetime.IntervalHour + IntervalDay = datetime.IntervalDay + IntervalWeek = datetime.IntervalWeek + IntervalMonth = datetime.IntervalMonth + IntervalQuarter = datetime.IntervalQuarter + IntervalYear = datetime.IntervalYear + + IntervalSecondMicrosecond = datetime.IntervalSecondMicrosecond + IntervalMinuteMicrosecond = datetime.IntervalMinuteMicrosecond + IntervalMinuteSecond = datetime.IntervalMinuteSecond + IntervalHourMicrosecond = datetime.IntervalHourMicrosecond + IntervalHourSecond = datetime.IntervalHourSecond + IntervalHourMinute = datetime.IntervalHourMinute + IntervalDayMicrosecond = datetime.IntervalDayMicrosecond + IntervalDaySecond = datetime.IntervalDaySecond + IntervalDayMinute = datetime.IntervalDayMinute + IntervalDayHour = datetime.IntervalDayHour + IntervalYearMonth = datetime.IntervalYearMonth +) + +type IntervalExprSyntax int8 + +const ( + IntervalDateExprDateAdd IntervalExprSyntax = iota + IntervalDateExprDateSub + IntervalDateExprAdddate + IntervalDateExprSubdate + IntervalDateExprBinaryAdd + IntervalDateExprBinaryAddLeft + IntervalDateExprBinarySub + IntervalDateExprTimestampadd +) + +// Constant for Enum Type - KillType +const ( + ConnectionType KillType = iota + QueryType +) diff --git a/go/vt/sqlparser/goyacc/goyacc.go b/go/vt/sqlparser/goyacc/goyacc.go index c7614a700f2..5864b5090b4 100644 --- a/go/vt/sqlparser/goyacc/goyacc.go +++ b/go/vt/sqlparser/goyacc/goyacc.go @@ -1206,7 +1206,9 @@ func emitcode(code []rune, lineno int) { if !writtenImports && isPackageClause(line) { fmt.Fprintln(ftable, `import (`) fmt.Fprintln(ftable, `__yyfmt__ "fmt"`) - fmt.Fprintln(ftable, `__yyunsafe__ "unsafe"`) + if allowFastAppend { + fmt.Fprintln(ftable, `__yyunsafe__ "unsafe"`) + } fmt.Fprintln(ftable, `)`) if !lflag { fmt.Fprintf(ftable, "//line %v:%v\n\t\t", infile, lineno+i) @@ -3047,9 +3049,14 @@ func others() { ch = getrune(finput) } + if allowFastAppend { + fastAppendHelper := strings.Replace(fastAppendHelperText, "$$", prefix, -1) + fmt.Fprint(ftable, fastAppendHelper) + } + // copy yaccpar if !lflag { - fmt.Fprintf(ftable, "\n//line yaccpar:1\n") + fmt.Fprint(ftable, "\n//line yaccpar:1\n") } parts := strings.SplitN(yaccpar, prefix+"run()", 2) @@ -3344,10 +3351,7 @@ func gofmt() { os.WriteFile(oflag, src, 0666) } -var yaccpar string // will be processed version of yaccpartext: s/$$/prefix/g -var yaccpartext = ` -/* parser for yacc output */ - +const fastAppendHelperText = ` func $$Iaddr(v any) __yyunsafe__.Pointer { type h struct { t __yyunsafe__.Pointer @@ -3355,6 +3359,11 @@ func $$Iaddr(v any) __yyunsafe__.Pointer { } return (*h)(__yyunsafe__.Pointer(&v)).p } +` + +var yaccpar string // will be processed version of yaccpartext: s/$$/prefix/g +const yaccpartext = ` +/* parser for yacc output */ var ( $$Debug = 0 diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go index 54cf96c5011..36c329d8e0a 100644 --- a/go/vt/sqlparser/keywords.go +++ b/go/vt/sqlparser/keywords.go @@ -117,6 +117,7 @@ var keywords = []keyword{ {"accessible", UNUSED}, {"action", ACTION}, {"add", ADD}, + {"adddate", ADDDATE}, {"after", AFTER}, {"against", AGAINST}, {"algorithm", ALGORITHM}, @@ -125,6 +126,7 @@ var keywords = []keyword{ {"always", ALWAYS}, {"analyze", ANALYZE}, {"and", AND}, + {"any_value", ANY_VALUE}, {"array", ARRAY}, {"as", AS}, {"asc", ASC}, @@ -134,7 +136,7 @@ var keywords = []keyword{ {"autoextend_size", AUTOEXTEND_SIZE}, {"avg", AVG}, {"avg_row_length", AVG_ROW_LENGTH}, - {"before", UNUSED}, + {"before", BEFORE}, {"begin", BEGIN}, {"between", BETWEEN}, {"bigint", BIGINT}, @@ -196,7 +198,9 @@ var keywords = []keyword{ {"csv", CSV}, {"current", CURRENT}, {"current_date", CURRENT_DATE}, + {"curdate", CURDATE}, {"current_time", CURRENT_TIME}, + {"curtime", CURTIME}, {"current_timestamp", CURRENT_TIMESTAMP}, {"current_user", CURRENT_USER}, {"cursor", UNUSED}, @@ -210,6 +214,8 @@ var keywords = []keyword{ {"day_second", DAY_SECOND}, {"date", DATE}, {"datetime", DATETIME}, + {"date_add", DATE_ADD}, + {"date_sub", DATE_SUB}, {"deallocate", DEALLOCATE}, {"dec", UNUSED}, {"decimal", DECIMAL_TYPE}, @@ -244,6 +250,7 @@ var keywords = []keyword{ {"enclosed", ENCLOSED}, {"encryption", ENCRYPTION}, {"end", END}, + {"endpoint", ST_EndPoint}, {"enforced", ENFORCED}, {"engine", ENGINE}, {"engine_attribute", ENGINE_ATTRIBUTE}, @@ -293,6 +300,7 @@ var keywords = []keyword{ {"geometrycollection", GEOMETRYCOLLECTION}, {"get", UNUSED}, {"get_lock", GET_LOCK}, + {"glength", ST_Length}, {"global", GLOBAL}, {"gtid_executed", GTID_EXECUTED}, {"gtid_subset", GTID_SUBSET}, @@ -337,6 +345,7 @@ var keywords = []keyword{ {"into", INTO}, {"io_after_gtids", UNUSED}, {"is", IS}, + {"isclosed", ST_IsClosed}, {"is_free_lock", IS_FREE_LOCK}, {"is_used_lock", IS_USED_LOCK}, {"isolation", ISOLATION}, @@ -378,7 +387,7 @@ var keywords = []keyword{ {"keys", KEYS}, {"keyspaces", KEYSPACES}, {"key_block_size", KEY_BLOCK_SIZE}, - {"kill", UNUSED}, + {"kill", KILL}, {"lag", LAG}, {"language", LANGUAGE}, {"last", LAST}, @@ -454,6 +463,7 @@ var keywords = []keyword{ {"null", NULL}, {"nulls", NULLS}, {"numeric", NUMERIC}, + {"numpoints", ST_NumPoints}, {"of", OF}, {"off", OFF}, {"offset", OFFSET}, @@ -484,6 +494,7 @@ var keywords = []keyword{ {"plan", PLAN}, {"plugins", PLUGINS}, {"point", POINT}, + {"pointn", ST_PointN}, {"polygon", POLYGON}, {"position", POSITION}, {"preceding", PRECEDING}, @@ -491,6 +502,7 @@ var keywords = []keyword{ {"prepare", PREPARE}, {"primary", PRIMARY}, {"privileges", PRIVILEGES}, + {"purge", PURGE}, {"processlist", PROCESSLIST}, {"procedure", PROCEDURE}, {"ps_current_thread_id", PS_CURRENT_THREAD_ID}, @@ -577,8 +589,18 @@ var keywords = []keyword{ {"sql_calc_found_rows", SQL_CALC_FOUND_ROWS}, {"sql_no_cache", SQL_NO_CACHE}, {"sql_small_result", UNUSED}, + {"sql_tsi_day", SQL_TSI_DAY}, + {"sql_tsi_week", SQL_TSI_WEEK}, + {"sql_tsi_hour", SQL_TSI_HOUR}, + {"sql_tsi_minute", SQL_TSI_MINUTE}, + {"sql_tsi_month", SQL_TSI_MONTH}, + {"sql_tsi_quarter", SQL_TSI_QUARTER}, + {"sql_tsi_second", SQL_TSI_SECOND}, + {"sql_tsi_microsecond", SQL_TSI_MICROSECOND}, + {"sql_tsi_year", SQL_TSI_YEAR}, {"ssl", UNUSED}, {"start", START}, + {"startpoint", ST_StartPoint}, {"starting", STARTING}, {"stats_auto_recalc", STATS_AUTO_RECALC}, {"stats_persistent", STATS_PERSISTENT}, @@ -592,7 +614,73 @@ var keywords = []keyword{ {"stored", STORED}, {"straight_join", STRAIGHT_JOIN}, {"stream", STREAM}, + {"st_area", ST_Area}, + {"st_asbinary", ST_AsBinary}, + {"st_asgeojson", ST_AsGeoJSON}, + {"st_astext", ST_AsText}, + {"st_aswkb", ST_AsBinary}, + {"st_aswkt", ST_AsText}, + {"st_centroid", ST_Centroid}, + {"st_dimension", ST_Dimension}, + {"st_endpoint", ST_EndPoint}, + {"st_envelope", ST_Envelope}, + {"st_exteriorring", ST_ExteriorRing}, + {"st_geohash", ST_GeoHash}, + {"st_geomcollfromtext", ST_GeometryCollectionFromText}, + {"st_geomcollfromtxt", ST_GeometryCollectionFromText}, + {"st_geomcollfromwkb", ST_GeometryCollectionFromWKB}, + {"st_geometrycollectionfromtext", ST_GeometryCollectionFromText}, + {"st_geometrycollectionfromwkb", ST_GeometryCollectionFromWKB}, + {"st_geometryfromtext", ST_GeometryFromText}, + {"st_geometryfromwkb", ST_GeometryFromWKB}, + {"st_geometryn", ST_GeometryN}, + {"st_geometrytype", ST_GeometryType}, + {"st_geomfromgeojson", ST_GeomFromGeoJSON}, + {"st_geomfromtext", ST_GeometryFromText}, + {"st_geomfromwkb", ST_GeometryFromWKB}, + {"st_interiorringn", ST_InteriorRingN}, + {"st_isclosed", ST_IsClosed}, + {"st_isempty", ST_IsEmpty}, + {"st_issimple", ST_IsSimple}, + {"st_latfromgeohash", ST_LatFromGeoHash}, + {"st_latitude", ST_Latitude}, + {"st_length", ST_Length}, + {"st_linefromtext", ST_LineStringFromText}, + {"st_linefromwkb", ST_LineStringFromWKB}, + {"st_linestringfromtext", ST_LineStringFromText}, + {"st_linestringfromwkb", ST_LineStringFromWKB}, + {"st_longfromgeohash", ST_LongFromGeoHash}, + {"st_longitude", ST_Longitude}, + {"st_mlinefromtext", ST_MultiLineStringFromText}, + {"st_mlinefromwkb", ST_MultiLineStringFromWKB}, + {"st_mpointfromtext", ST_MultiPointFromText}, + {"st_mpointfromwkb", ST_MultiPointFromWKB}, + {"st_mpolyfromtext", ST_MultiPolygonFromText}, + {"st_mpolyfromwkb", ST_MultiPolygonFromWKB}, + {"st_multilinestringfromtext", ST_MultiLineStringFromText}, + {"st_multilinestringfromwkb", ST_MultiLineStringFromWKB}, + {"st_multipointfromtext", ST_MultiPointFromText}, + {"st_multipointfromwkb", ST_MultiPointFromWKB}, + {"st_multipolygonfromtext", ST_MultiPolygonFromText}, + {"st_multipolygonfromwkb", ST_MultiPolygonFromWKB}, + {"st_numgeometries", ST_NumGeometries}, + {"st_numpoints", ST_NumPoints}, + {"st_numinteriorring", ST_NumInteriorRings}, + {"st_numinteriorrings", ST_NumInteriorRings}, + {"st_pointfromgeohash", ST_PointFromGeoHash}, + {"st_pointfromtext", ST_PointFromText}, + {"st_pointfromwkb", ST_PointFromWKB}, + {"st_pointn", ST_PointN}, + {"st_polyfromtext", ST_PolygonFromText}, + {"st_polyfromwkb", ST_PolygonFromWKB}, + {"st_polygonfromtext", ST_PolygonFromText}, + {"st_polygonfromwkb", ST_PolygonFromWKB}, + {"st_startpoint", ST_StartPoint}, + {"st_x", ST_X}, + {"st_y", ST_Y}, + {"subdate", SUBDATE}, {"sum", SUM}, + {"sysdate", SYSDATE}, {"system", UNUSED}, {"table", TABLE}, {"tables", TABLES}, @@ -675,6 +763,7 @@ var keywords = []keyword{ {"warnings", WARNINGS}, {"wait_for_executed_gtid_set", WAIT_FOR_EXECUTED_GTID_SET}, {"wait_until_sql_thread_after_gtids", WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS}, + {"week", WEEK}, {"weight_string", WEIGHT_STRING}, {"when", WHEN}, {"where", WHERE}, diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go index be7e5349318..0209ee20352 100644 --- a/go/vt/sqlparser/keywords_test.go +++ b/go/vt/sqlparser/keywords_test.go @@ -20,13 +20,11 @@ func TestKeywordTable(t *testing.T) { } var vitessReserved = map[string]bool{ - "ESCAPE": true, - "NEXT": true, - "OFF": true, - "SAVEPOINT": true, - "SQL_NO_CACHE": true, - "TIMESTAMPADD": true, - "TIMESTAMPDIFF": true, + "ESCAPE": true, + "NEXT": true, + "OFF": true, + "SAVEPOINT": true, + "SQL_NO_CACHE": true, } func TestCompatibility(t *testing.T) { diff --git a/go/vt/sqlparser/literal.go b/go/vt/sqlparser/literal.go new file mode 100644 index 00000000000..24613ff6e05 --- /dev/null +++ b/go/vt/sqlparser/literal.go @@ -0,0 +1,123 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "errors" + "fmt" + "math" + "math/big" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/hex" + "vitess.io/vitess/go/sqltypes" +) + +func LiteralToValue(lit *Literal) (sqltypes.Value, error) { + switch lit.Type { + case IntVal: + uval, err := fastparse.ParseUint64(lit.Val, 10) + if err != nil { + if errors.Is(err, fastparse.ErrOverflow) { + return sqltypes.NewDecimal(lit.Val), nil + } + return sqltypes.Value{}, err + } + if uval <= math.MaxInt64 { + return sqltypes.NewInt64(int64(uval)), nil + } + return sqltypes.NewUint64(uval), nil + case FloatVal: + fval, err := fastparse.ParseFloat64(lit.Val) + if err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewFloat64(fval), nil + case DecimalVal: + dec, err := decimal.NewFromMySQL(lit.Bytes()) + if err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewDecimal(hack.String(dec.FormatMySQL(0))), nil + case StrVal: + return sqltypes.NewVarChar(lit.Val), nil + case HexNum: + b := lit.Bytes() + if b[0] != '0' || b[1] != 'x' { + return sqltypes.Value{}, fmt.Errorf("invalid hex literal: %v", lit.Val) + } + if len(lit.Val)%2 == 0 { + return parseHexLiteral(b[2:]) + } + // If the hex literal doesn't have an even amount of hex digits, we need + // to pad it with a '0' in the left. Instead of allocating a new slice + // for padding pad in-place by replacing the 'x' in the original slice with + // a '0', and clean it up after parsing. + b[1] = '0' + defer func() { + b[1] = 'x' + }() + return parseHexLiteral(b[1:]) + case HexVal: + return parseHexLiteral(lit.Bytes()) + case BitVal: + return parseBitLiteral(lit.Bytes()) + case DateVal: + d, ok := datetime.ParseDate(lit.Val) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.Date_YYYY_MM_DD.Format(datetime.DateTime{Date: d}, 0) + return sqltypes.NewDate(hack.String(buf)), nil + case TimeVal: + t, l, ok := datetime.ParseTime(lit.Val, -1) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.Time_hh_mm_ss.Format(datetime.DateTime{Time: t}, uint8(l)) + return sqltypes.NewTime(hack.String(buf)), nil + case TimestampVal: + dt, l, ok := datetime.ParseDateTime(lit.Val, -1) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) + } + buf := datetime.DateTime_YYYY_MM_DD_hh_mm_ss.Format(dt, uint8(l)) + return sqltypes.NewDatetime(hack.String(buf)), nil + default: + return sqltypes.Value{}, fmt.Errorf("unsupported literal type: %v", lit.Type) + } +} + +func parseHexLiteral(val []byte) (sqltypes.Value, error) { + raw := make([]byte, hex.DecodedLen(val)) + if err := hex.DecodeBytes(raw, val); err != nil { + return sqltypes.Value{}, err + } + return sqltypes.NewVarBinary(hack.String(raw)), nil +} + +func parseBitLiteral(val []byte) (sqltypes.Value, error) { + var i big.Int + _, ok := i.SetString(string(val), 2) + if !ok { + return sqltypes.Value{}, fmt.Errorf("invalid bit literal: %v", val) + } + return sqltypes.NewVarBinary(hack.String(i.Bytes())), nil +} diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go index a71b8d9fbd6..299f58e016d 100644 --- a/go/vt/sqlparser/normalizer.go +++ b/go/vt/sqlparser/normalizer.go @@ -17,10 +17,14 @@ limitations under the License. package sqlparser import ( - "fmt" + "bytes" "math/big" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/hex" "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -156,16 +160,22 @@ func (nz *normalizer) walkUpSelect(cursor *Cursor) bool { return nz.err == nil // only continue if we haven't found any errors } -func validateLiteral(node *Literal) (err error) { +func validateLiteral(node *Literal) error { switch node.Type { case DateVal: - _, err = ParseDate(node.Val) + if _, ok := datetime.ParseDate(node.Val); !ok { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Incorrect DATE value: '%s'", node.Val) + } case TimeVal: - _, err = ParseTime(node.Val) + if _, _, ok := datetime.ParseTime(node.Val, -1); !ok { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Incorrect TIME value: '%s'", node.Val) + } case TimestampVal: - _, err = ParseDateTime(node.Val) + if _, _, ok := datetime.ParseDateTime(node.Val, -1); !ok { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Incorrect DATETIME value: '%s'", node.Val) + } } - return err + return nil } func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { @@ -200,7 +210,7 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { } // Modify the AST node to a bindvar. - cursor.Replace(NewArgument(bvname)) + cursor.Replace(NewTypedArgument(bvname, node.SQLType())) } func keyFor(bval *querypb.BindVariable, lit *Literal) string { @@ -212,7 +222,6 @@ func keyFor(bval *querypb.BindVariable, lit *Literal) string { // and number that have the same representation don't // collide. return "'" + lit.Val - } // convertLiteral converts an Literal without the dedup. @@ -229,8 +238,7 @@ func (nz *normalizer) convertLiteral(node *Literal, cursor *Cursor) { bvname := nz.reserved.nextUnusedVar() nz.bindVars[bvname] = bval - - cursor.Replace(NewArgument(bvname)) + cursor.Replace(NewTypedArgument(bvname, node.SQLType())) } // convertComparison attempts to convert IN clauses to @@ -275,7 +283,7 @@ func (nz *normalizer) parameterize(left, right Expr) Expr { } key := keyFor(bval, lit) bvname := nz.decideBindVarName(key, lit, col, bval) - return Argument(bvname) + return NewTypedArgument(bvname, lit.SQLType()) } func (nz *normalizer) decideBindVarName(key string, lit *Literal, col *ColName, bval *querypb.BindVariable) string { @@ -345,16 +353,18 @@ func SQLToBindvar(node SQLNode) *querypb.BindVariable { case DecimalVal: v, err = sqltypes.NewValue(sqltypes.Decimal, node.Bytes()) case HexNum: - v, err = sqltypes.NewValue(sqltypes.HexNum, node.Bytes()) + buf := make([]byte, 0, len(node.Bytes())) + buf = append(buf, "0x"...) + buf = append(buf, bytes.ToUpper(node.Bytes()[2:])...) + v, err = sqltypes.NewValue(sqltypes.HexNum, buf) case HexVal: // We parse the `x'7b7d'` string literal into a hex encoded string of `7b7d` in the parser // We need to re-encode it back to the original MySQL query format before passing it on as a bindvar value to MySQL - var vbytes []byte - vbytes, err = node.encodeHexOrBitValToMySQLQueryFormat() - if err != nil { - return nil - } - v, err = sqltypes.NewValue(sqltypes.HexVal, vbytes) + buf := make([]byte, 0, len(node.Bytes())+3) + buf = append(buf, 'x', '\'') + buf = append(buf, bytes.ToUpper(node.Bytes())...) + buf = append(buf, '\'') + v, err = sqltypes.NewValue(sqltypes.HexVal, buf) case BitVal: // Convert bit value to hex number in parameterized query format var i big.Int @@ -362,7 +372,12 @@ func SQLToBindvar(node SQLNode) *querypb.BindVariable { if !ok { return nil } - v, err = sqltypes.NewValue(sqltypes.HexNum, []byte(fmt.Sprintf("0x%s", i.Text(16)))) + + buf := i.Bytes() + out := make([]byte, 0, (len(buf)*2)+2) + out = append(out, '0', 'x') + out = append(out, hex.EncodeBytes(buf)...) + v, err = sqltypes.NewValue(sqltypes.HexNum, out) case DateVal: v, err = sqltypes.NewValue(sqltypes.Date, node.Bytes()) case TimeVal: @@ -392,8 +407,8 @@ func GetBindvars(stmt Statement) map[string]struct{} { // Common node types that never contain expressions but create a lot of object // allocations. return false, nil - case Argument: - bindvars[string(node)] = struct{}{} + case *Argument: + bindvars[node.Name] = struct{}{} case ListArg: bindvars[string(node)] = struct{}{} } diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index 9fe0c929256..2b0a4b52122 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -44,7 +44,7 @@ func TestNormalize(t *testing.T) { }{{ // str val in: "select * from t where foobar = 'aa'", - outstmt: "select * from t where foobar = :foobar", + outstmt: "select * from t where foobar = :foobar /* VARCHAR */", outbv: map[string]*querypb.BindVariable{ "foobar": sqltypes.StringBindVariable("aa"), }, @@ -61,50 +61,50 @@ func TestNormalize(t *testing.T) { }, { // str val in select in: "select 'aa' from t", - outstmt: "select :bv1 from t", + outstmt: "select :bv1 /* VARCHAR */ from t", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.StringBindVariable("aa"), }, }, { // int val in: "select * from t where foobar = 1", - outstmt: "select * from t where foobar = :foobar", + outstmt: "select * from t where foobar = :foobar /* INT64 */", outbv: map[string]*querypb.BindVariable{ "foobar": sqltypes.Int64BindVariable(1), }, }, { // float val in: "select * from t where foobar = 1.2", - outstmt: "select * from t where foobar = :foobar", + outstmt: "select * from t where foobar = :foobar /* DECIMAL */", outbv: map[string]*querypb.BindVariable{ - "foobar": sqltypes.DecimalBindVariable(1.2), + "foobar": sqltypes.DecimalBindVariable("1.2"), }, }, { // multiple vals in: "select * from t where foo = 1.2 and bar = 2", - outstmt: "select * from t where foo = :foo and bar = :bar", + outstmt: "select * from t where foo = :foo /* DECIMAL */ and bar = :bar /* INT64 */", outbv: map[string]*querypb.BindVariable{ - "foo": sqltypes.DecimalBindVariable(1.2), + "foo": sqltypes.DecimalBindVariable("1.2"), "bar": sqltypes.Int64BindVariable(2), }, }, { // bv collision in: "select * from t where foo = :bar and bar = 12", - outstmt: "select * from t where foo = :bar and bar = :bar1", + outstmt: "select * from t where foo = :bar and bar = :bar1 /* INT64 */", outbv: map[string]*querypb.BindVariable{ "bar1": sqltypes.Int64BindVariable(12), }, }, { // val reuse in: "select * from t where foo = 1 and bar = 1", - outstmt: "select * from t where foo = :foo and bar = :foo", + outstmt: "select * from t where foo = :foo /* INT64 */ and bar = :foo /* INT64 */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.Int64BindVariable(1), }, }, { // ints and strings are different in: "select * from t where foo = 1 and bar = '1'", - outstmt: "select * from t where foo = :foo and bar = :bar", + outstmt: "select * from t where foo = :foo /* INT64 */ and bar = :bar /* VARCHAR */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.Int64BindVariable(1), "bar": sqltypes.StringBindVariable("1"), @@ -112,7 +112,7 @@ func TestNormalize(t *testing.T) { }, { // val should not be reused for non-select statements in: "insert into a values(1, 1)", - outstmt: "insert into a values (:bv1, :bv2)", + outstmt: "insert into a values (:bv1 /* INT64 */, :bv2 /* INT64 */)", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(1), "bv2": sqltypes.Int64BindVariable(1), @@ -120,14 +120,14 @@ func TestNormalize(t *testing.T) { }, { // val should be reused only in subqueries of DMLs in: "update a set v1=(select 5 from t), v2=5, v3=(select 5 from t), v4=5", - outstmt: "update a set v1 = (select :bv1 from t), v2 = :bv1, v3 = (select :bv1 from t), v4 = :bv1", + outstmt: "update a set v1 = (select :bv1 /* INT64 */ from t), v2 = :bv1 /* INT64 */, v3 = (select :bv1 /* INT64 */ from t), v4 = :bv1 /* INT64 */", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(5), }, }, { // list vars should work for DMLs also in: "update a set v1=5 where v2 in (1, 4, 5)", - outstmt: "update a set v1 = :v1 where v2 in ::bv1", + outstmt: "update a set v1 = :v1 /* INT64 */ where v2 in ::bv1", outbv: map[string]*querypb.BindVariable{ "v1": sqltypes.Int64BindVariable(5), "bv1": sqltypes.TestBindVariable([]any{1, 4, 5}), @@ -135,51 +135,79 @@ func TestNormalize(t *testing.T) { }, { // Hex number values should work for selects in: "select * from t where foo = 0x1234", - outstmt: "select * from t where foo = :foo", + outstmt: "select * from t where foo = :foo /* HEXNUM */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.HexNumBindVariable([]byte("0x1234")), }, + }, { + // Hex number values are normalized to a consistent case + in: "select * from t where foo = 0xdeadbeef", + outstmt: "select * from t where foo = :foo /* HEXNUM */", + outbv: map[string]*querypb.BindVariable{ + "foo": sqltypes.HexNumBindVariable([]byte("0xDEADBEEF")), + }, + }, { + // Hex number values are normalized to a consistent case + in: "select * from t where foo = 0xDEADBEEF", + outstmt: "select * from t where foo = :foo /* HEXNUM */", + outbv: map[string]*querypb.BindVariable{ + "foo": sqltypes.HexNumBindVariable([]byte("0xDEADBEEF")), + }, }, { // Hex encoded string values should work for selects in: "select * from t where foo = x'7b7d'", - outstmt: "select * from t where foo = :foo", + outstmt: "select * from t where foo = :foo /* HEXVAL */", outbv: map[string]*querypb.BindVariable{ - "foo": sqltypes.HexValBindVariable([]byte("x'7b7d'")), + "foo": sqltypes.HexValBindVariable([]byte("x'7B7D'")), + }, + }, { + // Hex encoded string are converted to a consistent case + in: "select * from t where foo = x'7b7D'", + outstmt: "select * from t where foo = :foo /* HEXVAL */", + outbv: map[string]*querypb.BindVariable{ + "foo": sqltypes.HexValBindVariable([]byte("x'7B7D'")), + }, + }, { + // Hex encoded string values should work for selects + in: "select * from t where foo = x'7B7D'", + outstmt: "select * from t where foo = :foo /* HEXVAL */", + outbv: map[string]*querypb.BindVariable{ + "foo": sqltypes.HexValBindVariable([]byte("x'7B7D'")), }, }, { // Ensure that hex notation bind vars work with collation based conversions in: "select convert(x'7b7d' using utf8mb4) from dual", - outstmt: "select convert(:bv1 using utf8mb4) from dual", + outstmt: "select convert(:bv1 /* HEXVAL */ using utf8mb4) from dual", outbv: map[string]*querypb.BindVariable{ - "bv1": sqltypes.HexValBindVariable([]byte("x'7b7d'")), + "bv1": sqltypes.HexValBindVariable([]byte("x'7B7D'")), }, }, { // Hex number values should work for DMLs in: "update a set foo = 0x12", - outstmt: "update a set foo = :foo", + outstmt: "update a set foo = :foo /* HEXNUM */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.HexNumBindVariable([]byte("0x12")), }, }, { // Bin values work fine in: "select * from t where foo = b'11'", - outstmt: "select * from t where foo = :foo", + outstmt: "select * from t where foo = :foo /* HEXNUM */", outbv: map[string]*querypb.BindVariable{ - "foo": sqltypes.HexNumBindVariable([]byte("0x3")), + "foo": sqltypes.HexNumBindVariable([]byte("0x03")), }, }, { // Large bin values work fine in: "select * from t where foo = b'11101010100101010010101010101010101010101000100100100100100101001101010101010101000001'", - outstmt: "select * from t where foo = :foo", + outstmt: "select * from t where foo = :foo /* HEXNUM */", outbv: map[string]*querypb.BindVariable{ - "foo": sqltypes.HexNumBindVariable([]byte("0x3aa54aaaaaa24925355541")), + "foo": sqltypes.HexNumBindVariable([]byte("0x3AA54AAAAAA24925355541")), }, }, { // Bin value does not convert for DMLs in: "update a set v1 = b'11'", - outstmt: "update a set v1 = :v1", + outstmt: "update a set v1 = :v1 /* HEXNUM */", outbv: map[string]*querypb.BindVariable{ - "v1": sqltypes.HexNumBindVariable([]byte("0x3")), + "v1": sqltypes.HexNumBindVariable([]byte("0x03")), }, }, { // ORDER BY column_position @@ -194,7 +222,7 @@ func TestNormalize(t *testing.T) { }, { // ORDER BY with literal inside complex expression in: "select a, b from t order by field(a,1,2,3) asc", - outstmt: "select a, b from t order by field(a, :bv1, :bv2, :bv3) asc", + outstmt: "select a, b from t order by field(a, :bv1 /* INT64 */, :bv2 /* INT64 */, :bv3 /* INT64 */) asc", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(1), "bv2": sqltypes.Int64BindVariable(2), @@ -208,14 +236,14 @@ func TestNormalize(t *testing.T) { }, { // Values up to len 256 will reuse. in: fmt.Sprintf("select * from t where foo = '%256s' and bar = '%256s'", "a", "a"), - outstmt: "select * from t where foo = :foo and bar = :foo", + outstmt: "select * from t where foo = :foo /* VARCHAR */ and bar = :foo /* VARCHAR */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.StringBindVariable(fmt.Sprintf("%256s", "a")), }, }, { // Values greater than len 256 will not reuse. in: fmt.Sprintf("select * from t where foo = '%257s' and bar = '%257s'", "b", "b"), - outstmt: "select * from t where foo = :foo and bar = :bar", + outstmt: "select * from t where foo = :foo /* VARCHAR */ and bar = :bar /* VARCHAR */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")), "bar": sqltypes.StringBindVariable(fmt.Sprintf("%257s", "b")), @@ -238,7 +266,7 @@ func TestNormalize(t *testing.T) { }, { // IN clause with non-val values in: "select * from t where v1 in (1, a)", - outstmt: "select * from t where v1 in (:bv1, a)", + outstmt: "select * from t where v1 in (:bv1 /* INT64 */, a)", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(1), }, @@ -266,14 +294,14 @@ func TestNormalize(t *testing.T) { }, { // Do not normalize cast/convert types in: `select CAST("test" AS CHAR(60))`, - outstmt: `select cast(:bv1 as CHAR(60)) from dual`, + outstmt: `select cast(:bv1 /* VARCHAR */ as CHAR(60)) from dual`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.StringBindVariable("test"), }, }, { // insert syntax in: "insert into a (v1, v2, v3) values (1, '2', 3)", - outstmt: "insert into a(v1, v2, v3) values (:bv1, :bv2, :bv3)", + outstmt: "insert into a(v1, v2, v3) values (:bv1 /* INT64 */, :bv2 /* VARCHAR */, :bv3 /* INT64 */)", outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(1), "bv2": sqltypes.StringBindVariable("2"), @@ -282,38 +310,38 @@ func TestNormalize(t *testing.T) { }, { // BitVal should also be normalized in: `select b'1', 0b01, b'1010', 0b1111111`, - outstmt: `select :bv1, :bv2, :bv3, :bv4 from dual`, + outstmt: `select :bv1 /* HEXNUM */, :bv2 /* HEXNUM */, :bv3 /* HEXNUM */, :bv4 /* HEXNUM */ from dual`, outbv: map[string]*querypb.BindVariable{ - "bv1": sqltypes.HexNumBindVariable([]byte("0x1")), - "bv2": sqltypes.HexNumBindVariable([]byte("0x1")), - "bv3": sqltypes.HexNumBindVariable([]byte("0xa")), - "bv4": sqltypes.HexNumBindVariable([]byte("0x7f")), + "bv1": sqltypes.HexNumBindVariable([]byte("0x01")), + "bv2": sqltypes.HexNumBindVariable([]byte("0x01")), + "bv3": sqltypes.HexNumBindVariable([]byte("0x0A")), + "bv4": sqltypes.HexNumBindVariable([]byte("0x7F")), }, }, { // DateVal should also be normalized in: `select date'2022-08-06'`, - outstmt: `select :bv1 from dual`, + outstmt: `select :bv1 /* DATE */ from dual`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Date, []byte("2022-08-06"))), }, }, { // TimeVal should also be normalized in: `select time'17:05:12'`, - outstmt: `select :bv1 from dual`, + outstmt: `select :bv1 /* TIME */ from dual`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Time, []byte("17:05:12"))), }, }, { // TimestampVal should also be normalized in: `select timestamp'2022-08-06 17:05:12'`, - outstmt: `select :bv1 from dual`, + outstmt: `select :bv1 /* DATETIME */ from dual`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Datetime, []byte("2022-08-06 17:05:12"))), }, }, { // TimestampVal should also be normalized in: `explain select comms_by_companies.* from comms_by_companies where comms_by_companies.id = 'rjve634shXzaavKHbAH16ql6OrxJ' limit 1,1`, - outstmt: `explain select comms_by_companies.* from comms_by_companies where comms_by_companies.id = :comms_by_companies_id limit :bv1, :bv2`, + outstmt: `explain select comms_by_companies.* from comms_by_companies where comms_by_companies.id = :comms_by_companies_id /* VARCHAR */ limit :bv1 /* INT64 */, :bv2 /* INT64 */`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(1), "bv2": sqltypes.Int64BindVariable(1), @@ -322,14 +350,14 @@ func TestNormalize(t *testing.T) { }, { // Int leading with zero should also be normalized in: `select * from t where zipcode = 01001900`, - outstmt: `select * from t where zipcode = :zipcode`, + outstmt: `select * from t where zipcode = :zipcode /* INT64 */`, outbv: map[string]*querypb.BindVariable{ "zipcode": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(sqltypes.Int64, []byte("01001900"))), }, }, { // literals in limit and offset should not reuse bindvars in: `select * from t where id = 10 limit 10 offset 10`, - outstmt: `select * from t where id = :id limit :bv1, :bv2`, + outstmt: `select * from t where id = :id /* INT64 */ limit :bv1 /* INT64 */, :bv2 /* INT64 */`, outbv: map[string]*querypb.BindVariable{ "bv1": sqltypes.Int64BindVariable(10), "bv2": sqltypes.Int64BindVariable(10), @@ -363,13 +391,13 @@ func TestNormalizeInvalidDates(t *testing.T) { err error }{{ in: "select date'foo'", - err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect DATE value: '%s'", "foo"), + err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect DATE value: '%s'", "foo"), }, { in: "select time'foo'", - err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", "foo"), + err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect TIME value: '%s'", "foo"), }, { in: "select timestamp'foo'", - err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect DATETIME value: '%s'", "foo"), + err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect DATETIME value: '%s'", "foo"), }} for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { @@ -408,6 +436,34 @@ func TestNormalizeValidSQL(t *testing.T) { } } +func TestNormalizeOneCasae(t *testing.T) { + testOne := struct { + input, output string + }{ + input: "", + output: "", + } + if testOne.input == "" { + t.Skip("empty test case") + } + tree, err := Parse(testOne.input) + require.NoError(t, err, testOne.input) + // Skip the test for the queries that do not run the normalizer + if !CanNormalize(tree) { + return + } + bv := make(map[string]*querypb.BindVariable) + known := make(BindVars) + err = Normalize(tree, NewReservedVars("vtg", known), bv) + require.NoError(t, err) + normalizerOutput := String(tree) + if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { + return + } + _, err = Parse(normalizerOutput) + require.NoError(t, err, normalizerOutput) +} + func TestGetBindVars(t *testing.T) { stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") if err != nil { diff --git a/go/vt/sqlparser/parse_date.go b/go/vt/sqlparser/parse_date.go deleted file mode 100644 index a82c334aaf5..00000000000 --- a/go/vt/sqlparser/parse_date.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sqlparser - -import ( - "fmt" - "strconv" - "strings" - "time" - - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -var dateFormats = []string{"2006-01-02", "06-01-02", "20060102", "060102"} -var datetimeFormats = []string{"2006-01-02 15:04:05.9", "06-01-02 15:04:05.9", "20060102150405.9", "060102150405.9"} -var timeWithDayFormats = []string{"15:04:05.9", "15:04", "15"} -var timeWithoutDayFormats = []string{"15:04:05.9", "15:04", "150405.9", "0405", "05"} - -func ParseDate(in string) (t time.Time, err error) { - for _, f := range dateFormats { - t, err = time.Parse(f, in) - if err == nil { - return t, nil - } - } - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect DATE value: '%s'", in) -} - -func ParseTime(in string) (t time.Time, err error) { - // ParseTime is right now only excepting on specific - // time format and doesn't accept all formats MySQL accepts. - // Can be improved in the future as needed. - if in == "" { - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - start := 0 - neg := in[start] == '-' - if neg { - start++ - } - - parts := strings.Split(in[start:], " ") - if len(parts) > 2 { - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - days := 0 - hourMinuteSeconds := parts[0] - if len(parts) == 2 { - days, err = strconv.Atoi(parts[0]) - if err != nil { - fmt.Printf("atoi failed: %+v\n", err) - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - if days < 0 { - // Double negative which is not allowed - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - if days > 34 { - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - for _, f := range timeWithDayFormats { - t, err = time.Parse(f, parts[1]) - if err == nil { - break - } - } - } else { - for _, f := range timeWithoutDayFormats { - t, err = time.Parse(f, hourMinuteSeconds) - if err == nil { - break - } - } - } - - if err != nil { - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect TIME value: '%s'", in) - } - - // setting the date to today's date, because t is "0000-01-01 xx:xx:xx" - now := time.Now() - year, month, day := now.Date() - if neg { - // If we have a negative time, we start with the start of today - // and substract the total duration of the parsed time. - today := time.Date(year, month, day, 0, 0, 0, 0, t.Location()) - duration := time.Duration(days)*24*time.Hour + - time.Duration(t.Hour())*time.Hour + - time.Duration(t.Minute())*time.Minute + - time.Duration(t.Second())*time.Second + - time.Duration(t.Nanosecond())*time.Nanosecond - t = today.Add(-duration) - } else { - // In case of a positive time, we can take a quicker - // shortcut and add the date of today. - t = t.AddDate(year, int(month-1), day-1+days) - } - return t, nil -} - -func ParseDateTime(in string) (t time.Time, err error) { - for _, f := range datetimeFormats { - t, err = time.Parse(f, in) - if err == nil { - return t, nil - } - } - return t, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "incorrect DATETIME value: '%s'", in) -} diff --git a/go/vt/sqlparser/parse_date_test.go b/go/vt/sqlparser/parse_date_test.go deleted file mode 100644 index 4a6206cb5cf..00000000000 --- a/go/vt/sqlparser/parse_date_test.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sqlparser - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestParseDate(t *testing.T) { - type date struct { - year int - month time.Month - day int - } - tests := []struct { - input string - output date - err bool - }{{ - input: "2022-10-12", - output: date{2022, time.October, 12}, - }, { - input: "22-10-12", - output: date{2022, time.October, 12}, - }, { - input: "20221012", - output: date{2022, time.October, 12}, - }, { - input: "221012", - output: date{2022, time.October, 12}, - }, { - input: "2022", - err: true, - }} - - for _, test := range tests { - t.Run(test.input, func(t *testing.T) { - got, err := ParseDate(test.input) - if test.err { - assert.Error(t, err) - return - } - - require.NoError(t, err) - assert.Equal(t, test.output.year, got.Year()) - assert.Equal(t, test.output.month, got.Month()) - assert.Equal(t, test.output.day, got.Day()) - }) - } -} - -func TestParseTime(t *testing.T) { - type testTime struct { - hour int - minute int - second int - nanosecond int - } - tests := []struct { - input string - output testTime - err bool - }{{ - input: "11:12:13", - output: testTime{11, 12, 13, 0}, - }, { - input: "11:12:13.123456", - output: testTime{11, 12, 13, 123456000}, - }, { - input: "3 11:12:13", - output: testTime{3*24 + 11, 12, 13, 0}, - }, { - input: "35 11:12:13", - err: true, - }, { - input: "11:12", - output: testTime{11, 12, 0, 0}, - }, { - input: "5 11:12", - output: testTime{5*24 + 11, 12, 0, 0}, - }, { - input: "-2 11:12", - output: testTime{-2*24 - 11, -12, 0, 0}, - }, { - input: "--2 11:12", - err: true, - }, { - input: "2 11", - output: testTime{2*24 + 11, 0, 0, 0}, - }, { - input: "2 -11", - err: true, - }, { - input: "13", - output: testTime{0, 0, 13, 0}, - }, { - input: "111213", - output: testTime{11, 12, 13, 0}, - }, { - input: "111213.123456", - output: testTime{11, 12, 13, 123456000}, - }, { - input: "-111213", - output: testTime{-11, -12, -13, 0}, - }, { - input: "1213", - output: testTime{0, 12, 13, 0}, - }, { - input: "25:12:13", - err: true, - }} - - for _, test := range tests { - t.Run(test.input, func(t *testing.T) { - got, err := ParseTime(test.input) - if test.err { - assert.Errorf(t, err, "got: %s", got) - return - } - - require.NoError(t, err) - now := time.Now() - startOfToday := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC) - expected := startOfToday.Add(time.Duration(test.output.hour)*time.Hour + - time.Duration(test.output.minute)*time.Minute + - time.Duration(test.output.second)*time.Second + - time.Duration(test.output.nanosecond)*time.Nanosecond) - - assert.Equal(t, expected, got) - }) - } -} - -func TestParseDateTime(t *testing.T) { - type datetime struct { - year int - month time.Month - day int - hour int - minute int - second int - nanosecond int - } - tests := []struct { - input string - output datetime - err bool - }{{ - input: "2022-10-12 11:12:13", - output: datetime{2022, time.October, 12, 11, 12, 13, 0}, - }, { - input: "2022-10-12 11:12:13.123456", - output: datetime{2022, time.October, 12, 11, 12, 13, 123456000}, - }, { - input: "20221012111213.123456", - output: datetime{2022, time.October, 12, 11, 12, 13, 123456000}, - }} - - for _, test := range tests { - t.Run(test.input, func(t *testing.T) { - got, err := ParseDateTime(test.input) - if test.err { - assert.Error(t, err) - return - } - - require.NoError(t, err) - assert.Equal(t, test.output.year, got.Year()) - assert.Equal(t, test.output.month, got.Month()) - assert.Equal(t, test.output.day, got.Day()) - assert.Equal(t, test.output.hour, got.Hour()) - assert.Equal(t, test.output.minute, got.Minute()) - assert.Equal(t, test.output.second, got.Second()) - assert.Equal(t, test.output.nanosecond, got.Nanosecond()) - }) - } -} diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index 2717eb3d9c9..d48f8c84140 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -44,17 +44,25 @@ var ( partialDDL bool ignoreNormalizerTest bool }{{ + input: "select * from foo limit 5 + 5", + }, { input: "create table x(location GEOMETRYCOLLECTION DEFAULT (POINT(7.0, 3.0)))", output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (point(7.0, 3.0))\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT (CURRENT_TIMESTAMP))", - output: "create table t (\n\tid int primary key,\n\tdt datetime default current_timestamp()\n)", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (current_timestamp())\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT now())", output: "create table t (\n\tid int primary key,\n\tdt datetime default now()\n)", }, { input: "create table t (id int primary key, dt datetime DEFAULT (now()))", - output: "create table t (\n\tid int primary key,\n\tdt datetime default now()\n)", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (now())\n)", + }, { + input: "create table t (id int primary key, dt datetime(6) DEFAULT (now()))", + output: "create table t (\n\tid int primary key,\n\tdt datetime(6) default (now())\n)", + }, { + input: "create table t (id int primary key, dt datetime DEFAULT (now() + 1))", + output: "create table t (\n\tid int primary key,\n\tdt datetime default (now() + 1)\n)", }, { input: "create table x (e enum('red','yellow') null collate 'utf8_bin')", output: "create table x (\n\te enum('red', 'yellow') collate 'utf8_bin' null\n)", @@ -93,52 +101,52 @@ var ( output: "select extract(microsecond from '2003-01-02 10:30:00.000123') from dual", }, { input: "CREATE TABLE t2 (b BLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb BLOB default ('abc')\n)", + output: "create table t2 (\n\tb BLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b blob DEFAULT 'abc')", - output: "create table t2 (\n\tb blob default ('abc')\n)", + output: "create table t2 (\n\tb blob default 'abc'\n)", }, { input: "CREATE TABLE t2 (b BLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb BLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TINYBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb TINYBLOB default ('abc')\n)", + output: "create table t2 (\n\tb TINYBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TINYBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb TINYBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b MEDIUMBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb MEDIUMBLOB default ('abc')\n)", + output: "create table t2 (\n\tb MEDIUMBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b MEDIUMBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb MEDIUMBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b LONGBLOB DEFAULT 'abc')", - output: "create table t2 (\n\tb LONGBLOB default ('abc')\n)", + output: "create table t2 (\n\tb LONGBLOB default 'abc'\n)", }, { input: "CREATE TABLE t2 (b LONGBLOB DEFAULT ('abc'))", output: "create table t2 (\n\tb LONGBLOB default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb TEXT default ('abc')\n)", + output: "create table t2 (\n\tb TEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb TEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b TINYTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb TINYTEXT default ('abc')\n)", + output: "create table t2 (\n\tb TINYTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b TINYTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb TINYTEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b MEDIUMTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb MEDIUMTEXT default ('abc')\n)", + output: "create table t2 (\n\tb MEDIUMTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b MEDIUMTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb MEDIUMTEXT default ('abc')\n)", }, { input: "CREATE TABLE t2 (b LONGTEXT DEFAULT 'abc')", - output: "create table t2 (\n\tb LONGTEXT default ('abc')\n)", + output: "create table t2 (\n\tb LONGTEXT default 'abc'\n)", }, { input: "CREATE TABLE t2 (b LONGTEXT DEFAULT ('abc'))", output: "create table t2 (\n\tb LONGTEXT default ('abc')\n)", @@ -147,16 +155,16 @@ var ( output: "create table t2 (\n\tb JSON default null\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT (null))", - output: "create table t2 (\n\tb JSON default null\n)", + output: "create table t2 (\n\tb JSON default (null)\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT '{name:abc}')", - output: "create table t2 (\n\tb JSON default ('{name:abc}')\n)", + output: "create table t2 (\n\tb JSON default '{name:abc}'\n)", }, { input: "CREATE TABLE t2 (b JSON DEFAULT ('{name:abc}'))", output: "create table t2 (\n\tb JSON default ('{name:abc}')\n)", }, { input: "create table x(location POINT DEFAULT 7.0)", - output: "create table x (\n\tlocation POINT default (7.0)\n)", + output: "create table x (\n\tlocation POINT default 7.0\n)", }, { input: "create table x(location POINT DEFAULT (7.0))", output: "create table x (\n\tlocation POINT default (7.0)\n)", @@ -192,7 +200,307 @@ var ( output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (polygon(linestring(point(4, 5), point(4.6, 7.9), point(4.6, 7.9))))\n)", }, { input: "select ST_ASTEXT(POLYGON(linestrings)) from linestringTable", - output: "select ST_ASTEXT(polygon(linestrings)) from linestringTable", + output: "select st_astext(polygon(linestrings)) from linestringTable", + }, { + input: "create table x(location GEOMETRYCOLLECTION DEFAULT (MULTIPOINT(POINT(4, 5), POINT(4.6, 7.9), POINT(4.6, 7.9))))", + output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (multipoint(point(4, 5), point(4.6, 7.9), point(4.6, 7.9)))\n)", + }, { + input: "select ST_ASTEXT(MULTIPOINT(points)) from pointsTable", + output: "select st_astext(multipoint(points)) from pointsTable", + }, { + input: "create table x(location GEOMETRYCOLLECTION DEFAULT (MULTILINESTRING(LINESTRING(POINT(8,9), POINT(8,9)))))", + output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (multilinestring(linestring(point(8, 9), point(8, 9))))\n)", + }, { + input: "select ST_ASTEXT(MULTILINESTRING(linestrings)) from linestringsTable", + output: "select st_astext(multilinestring(linestrings)) from linestringsTable", + }, { + input: "create table x(location GEOMETRYCOLLECTION DEFAULT (MULTIPOLYGON(POINT(7.0, 3.0), POINT(7.0, 3.0))))", + output: "create table x (\n\tlocation GEOMETRYCOLLECTION default (multipolygon(point(7.0, 3.0), point(7.0, 3.0)))\n)", + }, { + input: "select ST_ASTEXT(MULTIPOLYGON(polygons)) from polygonTable", + output: "select st_astext(multipolygon(polygons)) from polygonTable", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))'))", + output: "select st_astext(st_geometrycollectionfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))')) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326))", + output: "select st_astext(st_geometrycollectionfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_geometrycollectionfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))'))", + output: "select st_astext(st_geometryfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))')) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326))", + output: "select st_astext(st_geometryfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_geometryfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))'))", + output: "select st_astext(st_multilinestringfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326))", + output: "select st_astext(st_multilinestringfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromText('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multilinestringfromtext('MULTILINESTRING((10 10, 11 11), (9 9, 10 10))', 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromText('LINESTRING((10 10, 11 11))'))", + output: "select st_astext(st_linestringfromtext('LINESTRING((10 10, 11 11))')) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromText('LINESTRING((10 10, 11 11))', 4326))", + output: "select st_astext(st_linestringfromtext('LINESTRING((10 10, 11 11))', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromText('LINESTRING((10 10, 11 11))', 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_linestringfromtext('LINESTRING((10 10, 11 11))', 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_PointFromText('POINT(10 10)'))", + output: "select st_astext(st_pointfromtext('POINT(10 10)')) from dual", + }, { + input: "SELECT ST_AsText(ST_PointFromText('POINT(10 10)', 4326))", + output: "select st_astext(st_pointfromtext('POINT(10 10)', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromText('MULTIPOINT((10 10, 11 11))'))", + output: "select st_astext(st_multipointfromtext('MULTIPOINT((10 10, 11 11))')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromText('MULTIPOINT((10 10, 11 11))', 4326))", + output: "select st_astext(st_multipointfromtext('MULTIPOINT((10 10, 11 11))', 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromText('MULTIPOINT((10 10, 11 11))', 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multipointfromtext('MULTIPOINT((10 10, 11 11))', 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(@g))", + output: "select st_astext(st_multipolygonfromtext(@g)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(@g, 4326))", + output: "select st_astext(st_multipolygonfromtext(@g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(@g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multipolygonfromtext(@g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(@g))", + output: "select st_astext(st_polygonfromtext(@g)) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(@g, 4326))", + output: "select st_astext(st_polygonfromtext(@g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(@g, 4326, 'axis-order=long-lat'))", + output: "select st_astext(st_polygonfromtext(@g, 4326, 'axis-order=long-lat')) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromWKB(0x010100000000000000000022400000000000002240))", + output: "select st_astext(st_geometrycollectionfromwkb(0x010100000000000000000022400000000000002240)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromWKB(g, 4326))", + output: "select st_astext(st_geometrycollectionfromwkb(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomCollFromText(g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_geometrycollectionfromtext(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromWKB(g))", + output: "select st_astext(st_geometryfromwkb(g)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromWKB(g, 4326))", + output: "select st_astext(st_geometryfromwkb(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromWKB(g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_geometryfromwkb(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromWKB(g))", + output: "select st_astext(st_multilinestringfromwkb(g)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromWKB(g, 4326))", + output: "select st_astext(st_multilinestringfromwkb(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultilinestringFromWKB(0x01050000000200000001020000000200000000000000000024400000000000002440000000000000264000000000000026400102000000020000000000000000002240000000000000224000000000000024400000000000002440, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multilinestringfromwkb(0x01050000000200000001020000000200000000000000000024400000000000002440000000000000264000000000000026400102000000020000000000000000002240000000000000224000000000000024400000000000002440, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromWKB(g))", + output: "select st_astext(st_linestringfromwkb(g)) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromWKB(g, 4326))", + output: "select st_astext(st_linestringfromwkb(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_LinestringFromWKB(g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_linestringfromwkb(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_PointFromWKB(mp))", + output: "select st_astext(st_pointfromwkb(mp)) from dual", + }, { + input: "SELECT ST_AsText(ST_PointFromWKB(mp, 4326))", + output: "select st_astext(st_pointfromwkb(mp, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromWKB(mp))", + output: "select st_astext(st_multipointfromwkb(mp)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromWKB(mp, 4326))", + output: "select st_astext(st_multipointfromwkb(mp, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPointFromWKB(mp, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multipointfromwkb(mp, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(g))", + output: "select st_astext(st_multipolygonfromtext(g)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(g, 4326))", + output: "select st_astext(st_multipolygonfromtext(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_MultiPolygonFromText(g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_multipolygonfromtext(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(g))", + output: "select st_astext(st_polygonfromtext(g)) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(g, 4326))", + output: "select st_astext(st_polygonfromtext(g, 4326)) from dual", + }, { + input: "SELECT ST_AsText(ST_PolygonFromText(g, 4326, 'axis-order=long-lat'), 'axis-order=long-lat')", + output: "select st_astext(st_polygonfromtext(g, 4326, 'axis-order=long-lat'), 'axis-order=long-lat') from dual", + }, { + input: "SELECT ST_AsWKT(ST_GeomCollFromText(g, 4326, 'axis-order=lat-long'))", + output: "select st_astext(st_geometrycollectionfromtext(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_AsBinary(ST_PolygonFromText(g, 4326, 'axis-order=long-lat'), 'axis-order=long-lat')", + output: "select st_asbinary(st_polygonfromtext(g, 4326, 'axis-order=long-lat'), 'axis-order=long-lat') from dual", + }, { + input: "SELECT ST_AsWKB(ST_GeomCollFromText(g, 4326, 'axis-order=lat-long'))", + output: "select st_asbinary(st_geometrycollectionfromtext(g, 4326, 'axis-order=lat-long')) from dual", + }, { + input: "SELECT ST_Dimension(ST_GeomFromText('LineString(1 1,2 2)'))", + output: "select st_dimension(st_geometryfromtext('LineString(1 1,2 2)')) from dual", + }, { + input: "SELECT ST_AsText(ST_Envelope(ST_GeomFromText('LineString(1 1,2 2)')))", + output: "select st_astext(st_envelope(st_geometryfromtext('LineString(1 1,2 2)'))) from dual", + }, { + input: "SELECT ST_IsSimple(ST_GeomFromText('POINT(1 1)'))", + output: "select st_issimple(st_geometryfromtext('POINT(1 1)')) from dual", + }, { + input: "SELECT ST_GeometryType(ST_GeomFromText('POINT(1 1)'))", + output: "select st_geometrytype(st_geometryfromtext('POINT(1 1)')) from dual", + }, { + input: "SELECT ST_IsEmpty(ST_GeomFromText('POINT(1 1)'))", + output: "select st_isempty(st_geometryfromtext('POINT(1 1)')) from dual", + }, { + input: "SELECT ST_Latitude(@pt);", + output: "select st_latitude(@pt) from dual", + }, { + input: "SELECT ST_AsText(ST_Latitude(@pt, 10));", + output: "select st_astext(st_latitude(@pt, 10)) from dual", + }, { + input: "SELECT ST_Longitude(@pt);", + output: "select st_longitude(@pt) from dual", + }, { + input: "SELECT ST_AsText(ST_Longitude(@pt, 10));", + output: "select st_astext(st_longitude(@pt, 10)) from dual", + }, { + input: "SELECT ST_X(@pt);", + output: "select st_x(@pt) from dual", + }, { + input: "SELECT ST_AsText(ST_X(@pt, 10));", + output: "select st_astext(st_x(@pt, 10)) from dual", + }, { + input: "SELECT ST_Y(@pt);", + output: "select st_y(@pt) from dual", + }, { + input: "SELECT ST_AsText(ST_Y(@pt, 10));", + output: "select st_astext(st_y(@pt, 10)) from dual", + }, { + input: "SELECT ST_AsText(ST_EndPoint(ST_GeomFromText(@ls)));", + output: "select st_astext(st_endpoint(st_geometryfromtext(@ls))) from dual", + }, { + input: "SELECT ST_IsClosed(ST_GeomFromText(@ls1));", + output: "select st_isclosed(st_geometryfromtext(@ls1)) from dual", + }, { + input: "SELECT IsClosed(ST_GeomFromText(@ls1));", + output: "select st_isclosed(st_geometryfromtext(@ls1)) from dual", + }, { + input: "SELECT ST_Length(@ls);", + output: "select st_length(@ls) from dual", + }, { + input: "SELECT ST_Length(@ls, 'metre');", + output: "select st_length(@ls, 'metre') from dual", + }, { + input: "SELECT GLength(@ls);", + output: "select st_length(@ls) from dual", + }, { + input: "SELECT GLength(@ls, 'metre');", + output: "select st_length(@ls, 'metre') from dual", + }, { + input: "SELECT ST_NumPoints(ST_GeomFromText(@ls));", + output: "select st_numpoints(st_geometryfromtext(@ls)) from dual", + }, { + input: "SELECT Numpoints(ST_GeomFromText(@ls));", + output: "select st_numpoints(st_geometryfromtext(@ls)) from dual", + }, { + input: "SELECT ST_AsText(ST_PointN(ST_GeomFromText(@ls),2));", + output: "select st_astext(st_pointn(st_geometryfromtext(@ls), 2)) from dual", + }, { + input: "SELECT ST_AsText(PointN(ST_GeomFromText(@ls),2));", + output: "select st_astext(st_pointn(st_geometryfromtext(@ls), 2)) from dual", + }, { + input: "SELECT ST_AsText(ST_StartPoint(ST_GeomFromText(@ls)));", + output: "select st_astext(st_startpoint(st_geometryfromtext(@ls))) from dual", + }, { + input: "SELECT ST_AsText(StartPoint(ST_GeomFromText(@ls)));", + output: "select st_astext(st_startpoint(st_geometryfromtext(@ls))) from dual", + }, { + input: "SELECT ST_Area(ST_GeomFromText(@mpoly));", + output: "select st_area(st_geometryfromtext(@mpoly)) from dual", + }, { + input: "SELECT ST_AsText(ST_GeometryN(ST_GeomFromText(@gc),1));", + output: "select st_astext(st_geometryn(st_geometryfromtext(@gc), 1)) from dual", + }, { + input: "SELECT ST_NumGeometries(ST_GeomFromText(@gc));", + output: "select st_numgeometries(st_geometryfromtext(@gc)) from dual", + }, { + input: "SELECT ST_GeometryType(@poly),ST_AsText(ST_Centroid(@poly));", + output: "select st_geometrytype(@poly), st_astext(st_centroid(@poly)) from dual", + }, { + input: "SELECT ST_AsText(ST_ExteriorRing(ST_GeomFromText(@poly)));", + output: "select st_astext(st_exteriorring(st_geometryfromtext(@poly))) from dual", + }, { + input: "SELECT ST_AsText(ST_InteriorRingN(ST_GeomFromText(@poly),1));", + output: "select st_astext(st_interiorringN(st_geometryfromtext(@poly), 1)) from dual", + }, { + input: "SELECT ST_NumInteriorRings(ST_GeomFromText(@poly));", + output: "select st_numinteriorrings(st_geometryfromtext(@poly)) from dual", + }, { + input: "SELECT ST_NumInteriorRing(ST_GeomFromText(@poly));", + output: "select st_numinteriorrings(st_geometryfromtext(@poly)) from dual", + }, { + input: "SELECT ST_GeoHash(180,0,10), ST_GeoHash(-180,-90,15);", + output: "select st_geohash(180, 0, 10), st_geohash(-180, -90, 15) from dual", + }, { + input: "SELECT ST_GeoHash(@p,10);", + output: "select st_geohash(@p, 10) from dual", + }, { + input: "SELECT ST_LatFromGeoHash(ST_GeoHash(45,-20,10));", + output: "select st_latfromgeohash(st_geohash(45, -20, 10)) from dual", + }, { + input: "SELECT ST_LongFromGeoHash(ST_GeoHash(45,-20,10));", + output: "select st_longfromgeohash(st_geohash(45, -20, 10)) from dual", + }, { + input: "SELECT ST_AsText(ST_PointFromGeoHash(@gh,0));", + output: "select st_astext(st_pointfromgeohash(@gh, 0)) from dual", + }, { + input: "SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(11.11111 12.22222)'));", + output: "select st_asgeojson(st_geometryfromtext('POINT(11.11111 12.22222)')) from dual", + }, { + input: "SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(11.11111 12.22222)'),2);", + output: "select st_asgeojson(st_geometryfromtext('POINT(11.11111 12.22222)'), 2) from dual", + }, { + input: "SELECT ST_AsGeoJSON(ST_GeomFromText('POINT(11.11111 12.22222)'),2,0);", + output: "select st_asgeojson(st_geometryfromtext('POINT(11.11111 12.22222)'), 2, 0) from dual", + }, { + input: "SELECT ST_AsText(ST_GeomFromGeoJSON(@json));", + output: "select st_astext(st_geomfromgeojson(@`json`)) from dual", + }, { + input: "SELECT ST_AsText(ST_SRID(ST_GeomFromGeoJSON(@json, 0),0));", + output: "select st_astext(ST_SRID(st_geomfromgeojson(@`json`, 0), 0)) from dual", + }, { + input: "SELECT ST_AsText(ST_SRID(ST_GeomFromGeoJSON(@json),1,4326));", + output: "select st_astext(ST_SRID(st_geomfromgeojson(@`json`), 1, 4326)) from dual", }, { input: "WITH RECURSIVE odd_num_cte (id, n) AS (SELECT 1, 1 union all SELECT id+1, n+2 from odd_num_cte where id < 5) SELECT * FROM odd_num_cte", output: "with recursive odd_num_cte(id, n) as (select 1, 1 from dual union all select id + 1, n + 2 from odd_num_cte where id < 5) select * from odd_num_cte", @@ -723,7 +1031,7 @@ var ( }, { input: "select /* utc_timestamp as func */ utc_timestamp() from t", }, { - input: "select /* utc_timestamp with fsp */ utc_timestamp(0) from t", + input: "select /* utc_timestamp with fsp */ utc_timestamp(1) from t", }, { input: "select /* utc_time */ utc_time() from t", }, { @@ -860,16 +1168,16 @@ var ( input: "select /* interval keyword */ adddate('2008-01-02', interval 1 year) from t", }, { input: "select /* TIMESTAMPADD */ TIMESTAMPADD(MINUTE, 1, '2008-01-04') from t", - output: "select /* TIMESTAMPADD */ timestampadd(MINUTE, 1, '2008-01-04') from t", + output: "select /* TIMESTAMPADD */ timestampadd(minute, 1, '2008-01-04') from t", }, { input: "select /* TIMESTAMPDIFF */ TIMESTAMPDIFF(MINUTE, '2008-01-02', '2008-01-04') from t", - output: "select /* TIMESTAMPDIFF */ timestampdiff(MINUTE, '2008-01-02', '2008-01-04') from t", + output: "select /* TIMESTAMPDIFF */ timestampdiff(minute, '2008-01-02', '2008-01-04') from t", }, { input: "select DATE_ADD(MIN(FROM_UNIXTIME(1673444922)),interval -DAYOFWEEK(MIN(FROM_UNIXTIME(1673444922)))+1 DAY)", - output: "select DATE_ADD(min(FROM_UNIXTIME(1673444922)), interval (-DAYOFWEEK(min(FROM_UNIXTIME(1673444922))) + 1) DAY) from dual", + output: "select date_add(min(FROM_UNIXTIME(1673444922)), interval -DAYOFWEEK(min(FROM_UNIXTIME(1673444922))) + 1 day) from dual", }, { input: "select '2020-01-01' + interval month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month))-1 month", - output: "select '2020-01-01' + interval (month(DATE_SUB(FROM_UNIXTIME(1234), interval 1 month)) - 1) month from dual", + output: "select '2020-01-01' + interval month(date_sub(FROM_UNIXTIME(1234), interval 1 month)) - 1 month from dual", }, { input: "select /* dual */ 1 from dual", }, { @@ -991,6 +1299,11 @@ var ( input: "update /* simple */ a set b = 3", }, { input: "update /* a.b */ a.b set b = 3", + }, { + input: "update a.b set d = @v := d + 7 where u = 42", + }, { + input: "select @topic3_id:= 10103;", + output: "select @topic3_id := 10103 from dual", }, { input: "update /* list */ a set b = 3, c = 4", }, { @@ -1771,7 +2084,7 @@ var ( output: "rename table x.a to b, b to c", }, { input: "drop view a,B,c", - output: "drop view a, b, c", + output: "drop view a, B, c", }, { input: "drop /*vt+ strategy=online */ view if exists v", }, { @@ -1816,11 +2129,14 @@ var ( input: "flush no_write_to_binlog slow logs, status, user_resources, relay logs, relay logs for channel s", output: "flush local slow logs, status, user_resources, relay logs, relay logs for channel s", }, { - input: "show binary logs", - output: "show binary logs", + input: "show binary logs", }, { input: "show binlog events", output: "show binlog", + }, { + input: "purge binary logs to 'x'", + }, { + input: "purge binary logs before '2020-02-02 20:20:20'", }, { input: "show character set", output: "show charset", @@ -2291,7 +2607,8 @@ var ( }, { input: "select 1 from t where foo = _binary 'bar'", }, { - input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", + input: "select 1 from t where foo = _utf8 'bar' and bar = _latin1 'sjösjuk'", + output: "select 1 from t where foo = _utf8mb3 'bar' and bar = _latin1 'sjösjuk'", }, { input: "select 1 from t where foo = _binary'bar'", output: "select 1 from t where foo = _binary 'bar'", @@ -2302,10 +2619,10 @@ var ( output: "select 1 from t where foo = _utf8mb4 'bar'", }, { input: "select 1 from t where foo = _utf8mb3 'bar'", - output: "select 1 from t where foo = _utf8 'bar'", + output: "select 1 from t where foo = _utf8mb3 'bar'", }, { - input: "select 1 from t where foo = _utf8mb3'bar'", - output: "select 1 from t where foo = _utf8 'bar'", + input: "select 1 from t where foo = _utf8'bar'", + output: "select 1 from t where foo = _utf8mb3 'bar'", }, { input: "select match(a) against ('foo') from t", }, { @@ -2333,6 +2650,8 @@ var ( }, { input: "select name, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by name", output: "select `name`, group_concat(distinct id, score order by id desc separator ':' limit 10, 2) from t group by `name`", + }, { + input: "select foo, any_value(id) from tbl group by foo", }, { input: "select * from t partition (p0)", }, { @@ -2539,10 +2858,10 @@ var ( output: "deallocate prepare stmt1", }, { input: "DROP PREPARE stmt1", - output: "drop prepare stmt1", + output: "deallocate prepare stmt1", }, { input: "DROP /* comment */ PREPARE stmt1", - output: "drop /* comment */ prepare stmt1", + output: "deallocate /* comment */ prepare stmt1", }, { input: `SELECT JSON_PRETTY('{"a":"10","b":"15","x":"25"}')`, output: `select json_pretty('{\"a\":\"10\",\"b\":\"15\",\"x\":\"25\"}') from dual`, @@ -2702,7 +3021,7 @@ var ( output: "select json_array(BIN(11)) from dual", }, { input: `SELECT JSON_ARRAY(1, "abc", NULL, TRUE, CURTIME());`, - output: `select json_array(1, 'abc', null, true, CURTIME()) from dual`, + output: `select json_array(1, 'abc', null, true, curtime()) from dual`, }, { input: "SELECT JSON_OBJECT(1,2)", output: "select json_object(1, 2) from dual", @@ -3203,13 +3522,13 @@ var ( output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc range 10 preceding)", }, { input: "SELECT time, subject, val, FIRST_VALUE(val) OVER w AS 'first', LAST_VALUE(val) OVER w AS 'last', NTH_VALUE(val, 2) OVER w AS 'second', NTH_VALUE(val, 4) OVER w AS 'fourth' FROM observations WINDOW w AS (PARTITION BY subject ORDER BY time ROWS INTERVAL 5 DAY PRECEDING);", - output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc rows interval 5 DAY preceding)", + output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc rows interval 5 day preceding)", }, { input: "SELECT time, subject, val, FIRST_VALUE(val) OVER w AS 'first', LAST_VALUE(val) OVER w AS 'last', NTH_VALUE(val, 2) OVER w AS 'second', NTH_VALUE(val, 4) OVER w AS 'fourth' FROM observations WINDOW w AS (PARTITION BY subject ORDER BY time RANGE 5 FOLLOWING);", output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc range 5 following)", }, { input: "SELECT time, subject, val, FIRST_VALUE(val) OVER w AS 'first', LAST_VALUE(val) OVER w AS 'last', NTH_VALUE(val, 2) OVER w AS 'second', NTH_VALUE(val, 4) OVER w AS 'fourth' FROM observations WINDOW w AS (PARTITION BY subject ORDER BY time ROWS INTERVAL '2:30' MINUTE_SECOND FOLLOWING);", - output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc rows interval '2:30' MINUTE_SECOND following)", + output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc rows interval '2:30' minute_second following)", }, { input: "SELECT time, subject, val, FIRST_VALUE(val) OVER w AS 'first', LAST_VALUE(val) OVER w AS 'last', NTH_VALUE(val, 2) OVER w AS 'second', NTH_VALUE(val, 4) OVER w AS 'fourth' FROM observations WINDOW w AS (PARTITION BY subject ORDER BY time ASC RANGE BETWEEN 10 PRECEDING AND 10 FOLLOWING);", output: "select `time`, subject, val, first_value(val) over w as `first`, last_value(val) over w as `last`, nth_value(val, 2) over w as `second`, nth_value(val, 4) over w as fourth from observations window w AS ( partition by subject order by `time` asc range between 10 preceding and 10 following)", @@ -3339,6 +3658,13 @@ var ( }, { input: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, output: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, + }, { + input: `kill connection 18446744073709551615`, + }, { + input: `kill query 18446744073709551615`, + }, { + input: `kill 18446744073709551615`, + output: `kill connection 18446744073709551615`, }} ) @@ -3719,13 +4045,13 @@ func TestIntroducers(t *testing.T) { output: "select _utf32 'x' from dual", }, { input: "select _utf8 'x'", - output: "select _utf8 'x' from dual", + output: "select _utf8mb3 'x' from dual", }, { input: "select _utf8mb4 'x'", output: "select _utf8mb4 'x' from dual", }, { input: "select _utf8mb3 'x'", - output: "select _utf8 'x' from dual", + output: "select _utf8mb3 'x' from dual", }} for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { @@ -3757,9 +4083,7 @@ func TestCaseSensitivity(t *testing.T) { input: "alter table A convert unparsable", output: "alter table A", }, { - // View names get lower-cased. - input: "alter view A as select * from t", - output: "alter view a as select * from t", + input: "alter view A as select * from t", }, { input: "alter table A rename to B", output: "alter table A rename B", @@ -3809,14 +4133,11 @@ func TestCaseSensitivity(t *testing.T) { input: "CREATE TABLE A (\n\t`A` int\n)", output: "create table A (\n\tA int\n)", }, { - input: "create view A as select * from b", - output: "create view a as select * from b", + input: "create view A as select * from b", }, { - input: "drop view A", - output: "drop view a", + input: "drop view A", }, { - input: "drop view if exists A", - output: "drop view if exists a", + input: "drop view if exists A", }, { input: "select /* lock in SHARE MODE */ 1 from t lock in SHARE MODE", output: "select /* lock in SHARE MODE */ 1 from t lock in share mode", @@ -3894,7 +4215,7 @@ func TestKeywords(t *testing.T) { }, { input: "select left(a, 5) from t", }, { - input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 days)", + input: "update t set d = adddate(date('2003-12-31 01:02:03'), interval 5 day)", }, { input: "insert into t(a, b) values (left('foo', 1), 'b')", }, { @@ -4688,7 +5009,7 @@ func TestCreateTable(t *testing.T) { output: `create table t ( time1 timestamp default now(), time2 timestamp default now(), - time3 timestamp default now(), + time3 timestamp default (now()), time4 timestamp default now() on update now(), time5 timestamp default now() on update now(), time6 timestamp(3) default now(3) on update now(3) @@ -5494,17 +5815,7 @@ var ( "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + "(F(F(F(F(F(F(F(F(F(F(F(F(", - output: "max nesting level reached at position 406", - }, { - input: "select(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + - "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + - "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + - "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + - "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + - "(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(" + - "F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F(F" + - "(F(F(F(F(F(F(F(F(F(F(F(", - output: "syntax error at position 404", + output: "syntax error at position 406", }, { // This construct is considered invalid due to a grammar conflict. input: "insert into a select * from b join c on duplicate key update d=e", @@ -5884,7 +6195,7 @@ func testFile(t *testing.T, filename, tempDir string) { if fail && tempDir != "" { gotFile := fmt.Sprintf("%s/%s", tempDir, filename) _ = os.WriteFile(gotFile, []byte(strings.TrimSpace(expected.String())+"\n"), 0644) - fmt.Println(fmt.Sprintf("Errors found in parse tests. If the output is correct, run `cp %s/* testdata/` to update test expectations", tempDir)) // nolint + fmt.Printf("Errors found in parse tests. If the output is correct, run `cp %s/* testdata/` to update test expectations\n", tempDir) } }) } diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index feeb34ce676..b6b03a1901a 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -21,12 +21,11 @@ import ( "fmt" "strings" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/bytes2" - + vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -124,21 +123,31 @@ func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field for i, loc := range pq.bindLocations { col := rowInfo[i] buf.WriteString(pq.Query[offsetQuery:loc.offset]) - typ := col.typ - if typ == querypb.Type_TUPLE { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) - } - length := col.length - if length < 0 { - // -1 means a null variable; serialize it directly - buf.WriteString("null") - } else { - vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) - vv.EncodeSQLBytes2(buf) + switch typ { + case querypb.Type_TUPLE: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) + case querypb.Type_JSON: + if col.length < 0 { // An SQL NULL and not an actual JSON value + buf.WriteString(sqltypes.NullStr) + } else { // A JSON value (which may be a JSON null literal value) + buf2 := row.Values[col.offset : col.offset+col.length] + vv, err := vjson.MarshalSQLValue(buf2) + if err != nil { + return err + } + buf.WriteString(vv.RawStr()) + } + default: + if col.length < 0 { + // -1 means a null variable; serialize it directly + buf.WriteString(sqltypes.NullStr) + } else { + vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) + vv.EncodeSQLBytes2(buf) + } } - offsetQuery = loc.offset + loc.length } buf.WriteString(pq.Query[offsetQuery:]) @@ -153,22 +162,23 @@ func (pq *ParsedQuery) MarshalJSON() ([]byte, error) { // EncodeValue encodes one bind variable value into the query. func EncodeValue(buf *strings.Builder, value *querypb.BindVariable) { - if value.Type != querypb.Type_TUPLE { - // Since we already check for TUPLE, we don't expect an error. + switch value.Type { + case querypb.Type_TUPLE: + buf.WriteByte('(') + for i, bv := range value.Values { + if i != 0 { + buf.WriteString(", ") + } + sqltypes.ProtoToValue(bv).EncodeSQLStringBuilder(buf) + } + buf.WriteByte(')') + case querypb.Type_JSON: + v, _ := sqltypes.BindVariableToValue(value) + buf.Write(v.Raw()) + default: v, _ := sqltypes.BindVariableToValue(value) v.EncodeSQLStringBuilder(buf) - return } - - // It's a TUPLE. - buf.WriteByte('(') - for i, bv := range value.Values { - if i != 0 { - buf.WriteString(", ") - } - sqltypes.ProtoToValue(bv).EncodeSQLStringBuilder(buf) - } - buf.WriteByte(')') } // FetchBindVar resolves the bind variable by fetching it from bindVariables. @@ -207,14 +217,24 @@ func FetchBindVar(name string, bindVariables map[string]*querypb.BindVariable) ( // query, err := ParseAndBind("select * from tbl where name=%a", sqltypes.StringBindVariable("it's me")) func ParseAndBind(in string, binds ...*querypb.BindVariable) (query string, err error) { vars := make([]any, len(binds)) - for i := range binds { - vars[i] = fmt.Sprintf(":var%d", i) + for i, bv := range binds { + switch bv.Type { + case querypb.Type_TUPLE: + vars[i] = fmt.Sprintf("::vars%d", i) + default: + vars[i] = fmt.Sprintf(":var%d", i) + } } parsed := BuildParsedQuery(in, vars...) bindVars := map[string]*querypb.BindVariable{} - for i := range binds { - bindVars[fmt.Sprintf("var%d", i)] = binds[i] + for i, bv := range binds { + switch bv.Type { + case querypb.Type_TUPLE: + bindVars[fmt.Sprintf("vars%d", i)] = binds[i] + default: + bindVars[fmt.Sprintf("var%d", i)] = binds[i] + } } return parsed.GenerateQuery(bindVars, nil) } diff --git a/go/vt/sqlparser/precedence.go b/go/vt/sqlparser/precedence.go index d63a56b62ef..cadf0d38261 100644 --- a/go/vt/sqlparser/precedence.go +++ b/go/vt/sqlparser/precedence.go @@ -86,8 +86,6 @@ func precedenceFor(in Expr) Precendence { case BangOp: return P3 } - case *IntervalExpr: - return P1 case *ExtractedSubquery: return precedenceFor(node.alternative) } diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index 215c9480823..66546eeebd0 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -18,6 +18,7 @@ package sqlparser import ( "fmt" + "math/rand" "testing" "time" @@ -215,8 +216,9 @@ func TestRandom(t *testing.T) { // The purpose of this test is to find discrepancies between Format and parsing. If for example our precedence rules are not consistent between the two, this test should find it. // The idea is to generate random queries, and pass them through the parser and then the unparser, and one more time. The result of the first unparse should be the same as the second result. seed := time.Now().UnixNano() - fmt.Println(fmt.Sprintf("seed is %d", seed)) // nolint - g := newGenerator(seed, 5) + r := rand.New(rand.NewSource(seed)) + fmt.Printf("seed is %d\n", seed) + g := NewGenerator(r, 5) endBy := time.Now().Add(1 * time.Second) for { @@ -224,7 +226,7 @@ func TestRandom(t *testing.T) { break } // Given a random expression - randomExpr := g.expression() + randomExpr := g.Expression(ExprGeneratorConfig{}) inputQ := "select " + String(randomExpr) + " from t" // When it's parsed and unparsed diff --git a/go/vt/sqlparser/predicate_rewriting.go b/go/vt/sqlparser/predicate_rewriting.go index eb772191a13..40e9a953f57 100644 --- a/go/vt/sqlparser/predicate_rewriting.go +++ b/go/vt/sqlparser/predicate_rewriting.go @@ -16,41 +16,43 @@ limitations under the License. package sqlparser -const ( - Changed RewriteState = true - NoChange RewriteState = false +import ( + "vitess.io/vitess/go/vt/log" ) -type RewriteState bool - // RewritePredicate walks the input AST and rewrites any boolean logic into a simpler form // This simpler form is CNF plus logic for extracting predicates from OR, plus logic for turning ORs into IN // Note: In order to re-plan, we need to empty the accumulated metadata in the AST, // so ColName.Metadata will be nil:ed out as part of this rewrite func RewritePredicate(ast SQLNode) SQLNode { for { - finishedRewrite := true - ast = SafeRewrite(ast, nil, func(cursor *Cursor) bool { - if e, isExpr := cursor.node.(Expr); isExpr { - rewritten, state := simplifyExpression(e) - if state == Changed { - finishedRewrite = false - cursor.Replace(rewritten) - } + printExpr(ast) + exprChanged := false + stopOnChange := func(SQLNode, SQLNode) bool { + return !exprChanged + } + ast = SafeRewrite(ast, stopOnChange, func(cursor *Cursor) bool { + e, isExpr := cursor.node.(Expr) + if !isExpr { + return true } - if col, isCol := cursor.node.(*ColName); isCol { - col.Metadata = nil + + rewritten, state := simplifyExpression(e) + if ch, isChange := state.(changed); isChange { + printRule(ch.rule, ch.exprMatched) + exprChanged = true + cursor.Replace(rewritten) } - return true + return !exprChanged }) - if finishedRewrite { + if !exprChanged { return ast } } } -func simplifyExpression(expr Expr) (Expr, RewriteState) { +func simplifyExpression(expr Expr) (Expr, rewriteState) { switch expr := expr.(type) { case *NotExpr: return simplifyNot(expr) @@ -61,24 +63,22 @@ func simplifyExpression(expr Expr) (Expr, RewriteState) { case *AndExpr: return simplifyAnd(expr) } - return expr, NoChange + return expr, noChange{} } -func simplifyNot(expr *NotExpr) (Expr, RewriteState) { +func simplifyNot(expr *NotExpr) (Expr, rewriteState) { switch child := expr.Expr.(type) { case *NotExpr: - // NOT NOT A => A - return child.Expr, Changed + return child.Expr, + newChange("NOT NOT A => A", f(expr)) case *OrExpr: - // DeMorgan Rewriter - // NOT (A OR B) => NOT A AND NOT B - return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, Changed + return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, + newChange("NOT (A OR B) => NOT A AND NOT B", f(expr)) case *AndExpr: - // DeMorgan Rewriter - // NOT (A AND B) => NOT A OR NOT B - return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, Changed + return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, + newChange("NOT (A AND B) => NOT A OR NOT B", f(expr)) } - return expr, NoChange + return expr, noChange{} } // ExtractINFromOR will add additional predicated to an OR. @@ -104,16 +104,16 @@ func ExtractINFromOR(expr *OrExpr) []Expr { continue } in, state := tryTurningOrIntoIn(l, r) - if state == Changed { + if state.changed() { ins = append(ins, in) } } } - return ins + return uniquefy(ins) } -func simplifyOr(expr *OrExpr) (Expr, RewriteState) { +func simplifyOr(expr *OrExpr) (Expr, rewriteState) { or := expr // first we search for ANDs and see how they can be simplified @@ -121,42 +121,47 @@ func simplifyOr(expr *OrExpr) (Expr, RewriteState) { rand, rok := or.Right.(*AndExpr) switch { case lok && rok: + // (<> AND <>) OR (<> AND <>) var a, b, c Expr + var change changed switch { - // (A and B) or (A and C) => A AND (B OR C) case Equals.Expr(land.Left, rand.Left): + change = newChange("(A and B) or (A and C) => A AND (B OR C)", f(expr)) a, b, c = land.Left, land.Right, rand.Right - // (A and B) or (C and A) => A AND (B OR C) case Equals.Expr(land.Left, rand.Right): + change = newChange("(A and B) or (C and A) => A AND (B OR C)", f(expr)) a, b, c = land.Left, land.Right, rand.Left - // (B and A) or (A and C) => A AND (B OR C) case Equals.Expr(land.Right, rand.Left): + change = newChange("(B and A) or (A and C) => A AND (B OR C)", f(expr)) a, b, c = land.Right, land.Left, rand.Right - // (B and A) or (C and A) => A AND (B OR C) case Equals.Expr(land.Right, rand.Right): + change = newChange("(B and A) or (C and A) => A AND (B OR C)", f(expr)) a, b, c = land.Right, land.Left, rand.Left default: - return expr, NoChange + return expr, noChange{} } - return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, Changed + return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, change case lok: + // (<> AND <>) OR <> // Simplification - // (A AND B) OR A => A if Equals.Expr(or.Right, land.Left) || Equals.Expr(or.Right, land.Right) { - return or.Right, Changed + return or.Right, newChange("(A AND B) OR A => A", f(expr)) } // Distribution Law - // (A AND B) OR C => (A OR C) AND (B OR C) - return &AndExpr{Left: &OrExpr{Left: land.Left, Right: or.Right}, Right: &OrExpr{Left: land.Right, Right: or.Right}}, Changed + return &AndExpr{Left: &OrExpr{Left: land.Left, Right: or.Right}, Right: &OrExpr{Left: land.Right, Right: or.Right}}, + newChange("(A AND B) OR C => (A OR C) AND (B OR C)", f(expr)) case rok: + // <> OR (<> AND <>) // Simplification - // A OR (A AND B) => A if Equals.Expr(or.Left, rand.Left) || Equals.Expr(or.Left, rand.Right) { - return or.Left, Changed + return or.Left, newChange("A OR (A AND B) => A", f(expr)) } // Distribution Law - // C OR (A AND B) => (C OR A) AND (C OR B) - return &AndExpr{Left: &OrExpr{Left: or.Left, Right: rand.Left}, Right: &OrExpr{Left: or.Left, Right: rand.Right}}, Changed + return &AndExpr{ + Left: &OrExpr{Left: or.Left, Right: rand.Left}, + Right: &OrExpr{Left: or.Left, Right: rand.Right}, + }, + newChange("C OR (A AND B) => (C OR A) AND (C OR B)", f(expr)) } // next, we want to try to turn multiple ORs into an IN when possible @@ -164,8 +169,8 @@ func simplifyOr(expr *OrExpr) (Expr, RewriteState) { rgtCmp, rok := or.Right.(*ComparisonExpr) if lok && rok { newExpr, rewritten := tryTurningOrIntoIn(lftCmp, rgtCmp) - if rewritten { - return newExpr, Changed + if rewritten.changed() { + return newExpr, rewritten } } @@ -173,46 +178,54 @@ func simplifyOr(expr *OrExpr) (Expr, RewriteState) { return distinctOr(expr) } -func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, RewriteState) { +func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, rewriteState) { // looks for A = X OR A = Y and turns them into A IN (X, Y) col, ok := l.Left.(*ColName) if !ok || !Equals.Expr(col, r.Left) { - return nil, NoChange + return nil, noChange{} } var tuple ValTuple - + var ruleStr string switch l.Operator { case EqualOp: tuple = ValTuple{l.Right} + ruleStr = "A = <>" case InOp: lft, ok := l.Right.(ValTuple) if !ok { - return nil, NoChange + return nil, noChange{} } tuple = lft + ruleStr = "A IN (<>, <>)" default: - return nil, NoChange + return nil, noChange{} } + ruleStr += " OR " + switch r.Operator { case EqualOp: tuple = append(tuple, r.Right) + ruleStr += "A = <>" case InOp: lft, ok := r.Right.(ValTuple) if !ok { - return nil, NoChange + return nil, noChange{} } tuple = append(tuple, lft...) + ruleStr += "A IN (<>, <>)" default: - return nil, NoChange + return nil, noChange{} } + ruleStr += " => A IN (<>, <>)" + return &ComparisonExpr{ Operator: InOp, Left: col, Right: uniquefy(tuple), - }, Changed + }, newChange(ruleStr, f(&OrExpr{Left: l, Right: r})) } func uniquefy(tuple ValTuple) (output ValTuple) { @@ -228,37 +241,45 @@ outer: return } -func simplifyXor(expr *XorExpr) (Expr, RewriteState) { +func simplifyXor(expr *XorExpr) (Expr, rewriteState) { // DeMorgan Rewriter - // (A XOR B) => (A OR B) AND NOT (A AND B) - return &AndExpr{Left: &OrExpr{Left: expr.Left, Right: expr.Right}, Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}}, Changed + return &AndExpr{ + Left: &OrExpr{Left: expr.Left, Right: expr.Right}, + Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}, + }, newChange("(A XOR B) => (A OR B) AND NOT (A AND B)", f(expr)) } -func simplifyAnd(expr *AndExpr) (Expr, RewriteState) { +func simplifyAnd(expr *AndExpr) (Expr, rewriteState) { res, rewritten := distinctAnd(expr) - if rewritten { + if rewritten.changed() { return res, rewritten } and := expr if or, ok := and.Left.(*OrExpr); ok { // Simplification - // (A OR B) AND A => A - if Equals.Expr(or.Left, and.Right) || Equals.Expr(or.Right, and.Right) { - return and.Right, Changed + + if Equals.Expr(or.Left, and.Right) { + return and.Right, newChange("(A OR B) AND A => A", f(expr)) + } + if Equals.Expr(or.Right, and.Right) { + return and.Right, newChange("(A OR B) AND B => B", f(expr)) } } if or, ok := and.Right.(*OrExpr); ok { // Simplification - // A OR (A AND B) => A - if Equals.Expr(or.Left, and.Left) || Equals.Expr(or.Right, and.Left) { - return or.Left, Changed + if Equals.Expr(or.Left, and.Left) { + return and.Left, newChange("A AND (A OR B) => A", f(expr)) + } + if Equals.Expr(or.Right, and.Left) { + return and.Left, newChange("A AND (B OR A) => A", f(expr)) } } - return expr, NoChange + return expr, noChange{} } -func distinctOr(in *OrExpr) (Expr, RewriteState) { +func distinctOr(in *OrExpr) (Expr, rewriteState) { + var skipped []*OrExpr todo := []*OrExpr{in} var leaves []Expr for len(todo) > 0 { @@ -284,13 +305,16 @@ outer1: leaves = leaves[1:] for _, alreadyIn := range predicates { if Equals.Expr(alreadyIn, curr) { + if log.V(0) { + skipped = append(skipped, &OrExpr{Left: alreadyIn, Right: curr}) + } continue outer1 } } predicates = append(predicates, curr) } if original == len(predicates) { - return in, NoChange + return in, noChange{} } var result Expr for i, curr := range predicates { @@ -300,42 +324,58 @@ outer1: } result = &OrExpr{Left: result, Right: curr} } - return result, Changed + + return result, newChange("A OR A => A", func() Expr { + var result Expr + for _, orExpr := range skipped { + if result == nil { + result = orExpr + continue + } + + result = &OrExpr{ + Left: result, + Right: orExpr, + } + } + return result + }) } -func distinctAnd(in *AndExpr) (Expr, RewriteState) { +func distinctAnd(in *AndExpr) (Expr, rewriteState) { + var skipped []*AndExpr todo := []*AndExpr{in} var leaves []Expr for len(todo) > 0 { curr := todo[0] todo = todo[1:] - addAnd := func(in Expr) { - and, ok := in.(*AndExpr) - if ok { + addExpr := func(in Expr) { + if and, ok := in.(*AndExpr); ok { todo = append(todo, and) } else { leaves = append(leaves, in) } } - addAnd(curr.Left) - addAnd(curr.Right) + addExpr(curr.Left) + addExpr(curr.Right) } original := len(leaves) var predicates []Expr outer1: - for len(leaves) > 0 { - curr := leaves[0] - leaves = leaves[1:] + for _, curr := range leaves { for _, alreadyIn := range predicates { if Equals.Expr(alreadyIn, curr) { + if log.V(0) { + skipped = append(skipped, &AndExpr{Left: alreadyIn, Right: curr}) + } continue outer1 } } predicates = append(predicates, curr) } if original == len(predicates) { - return in, NoChange + return in, noChange{} } var result Expr for i, curr := range predicates { @@ -345,5 +385,62 @@ outer1: } result = &AndExpr{Left: result, Right: curr} } - return result, Changed + return AndExpressions(leaves...), newChange("A AND A => A", func() Expr { + var result Expr + for _, andExpr := range skipped { + if result == nil { + result = andExpr + continue + } + + result = &AndExpr{ + Left: result, + Right: andExpr, + } + } + return result + }) +} + +type ( + rewriteState interface { + changed() bool + } + noChange struct{} + + // changed makes it possible to make sure we have a rule string for each change we do in the expression tree + changed struct { + rule string + + // ExprMatched is a function here so building of this expression can be paid only when we are debug logging + exprMatched func() Expr + } +) + +func (noChange) changed() bool { return false } +func (changed) changed() bool { return true } + +// f returns a function that returns the expression. It's short by design, so it interferes minimally +// used for logging +func f(e Expr) func() Expr { + return func() Expr { return e } +} + +func printRule(rule string, expr func() Expr) { + if log.V(10) { + log.Infof("Rule: %s ON %s", rule, String(expr())) + } +} + +func printExpr(expr SQLNode) { + if log.V(10) { + log.Infof("Current: %s", String(expr)) + } +} + +func newChange(rule string, exprMatched func() Expr) changed { + return changed{ + rule: rule, + exprMatched: exprMatched, + } } diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go index 1c86dec61a2..34e23597894 100644 --- a/go/vt/sqlparser/predicate_rewriting_test.go +++ b/go/vt/sqlparser/predicate_rewriting_test.go @@ -92,7 +92,7 @@ func TestSimplifyExpression(in *testing.T) { require.NoError(t, err) expr, didRewrite := simplifyExpression(expr) - assert.True(t, didRewrite == Changed) + assert.True(t, didRewrite.changed()) assert.Equal(t, tc.expected, String(expr)) }) } @@ -114,12 +114,21 @@ func TestRewritePredicate(in *testing.T) { }, { in: "(A and B) OR (A and C)", expected: "A and (B or C)", + }, { + in: "(A and B) OR (C and A)", + expected: "A and (B or C)", + }, { + in: "(B and A) OR (A and C)", + expected: "A and (B or C)", }, { in: "(A and B) or (A and C) or (A and D)", expected: "A and (B or C or D)", }, { in: "(a=1 or a IN (1,2)) or (a = 2 or a = 3)", expected: "a in (1, 2, 3)", + }, { + in: "A and (B or A)", + expected: "A", }} for _, tc := range tests { diff --git a/go/vt/sqlparser/random_expr.go b/go/vt/sqlparser/random_expr.go index e2725f37a37..6eed8145ed2 100644 --- a/go/vt/sqlparser/random_expr.go +++ b/go/vt/sqlparser/random_expr.go @@ -23,34 +23,124 @@ import ( // This file is used to generate random expressions to be used for testing -func newGenerator(seed int64, maxDepth int) *generator { - g := generator{ - seed: seed, - r: rand.New(rand.NewSource(seed)), - maxDepth: maxDepth, +// Constants for Enum Type - AggregateRule +const ( + CannotAggregate AggregateRule = iota + CanAggregate + IsAggregate +) + +type ( + ExprGenerator interface { + Generate(r *rand.Rand, config ExprGeneratorConfig) Expr } - return &g + + QueryGenerator interface { + IsQueryGenerator() + ExprGenerator + } + + AggregateRule int8 + + ExprGeneratorConfig struct { + // AggrRule determines if the random expression can, cannot, or must be an aggregation expression + AggrRule AggregateRule + Type string + // MaxCols = 0 indicates no limit + NumCols int + // SingleRow indicates that the query must have at most one row + SingleRow bool + } + + Generator struct { + r *rand.Rand + depth int + maxDepth int + isAggregate bool + exprGenerators []ExprGenerator + } +) + +func NewExprGeneratorConfig(aggrRule AggregateRule, typ string, numCols int, singleRow bool) ExprGeneratorConfig { + return ExprGeneratorConfig{ + AggrRule: aggrRule, + Type: typ, + NumCols: numCols, + SingleRow: singleRow, + } +} + +func (egc ExprGeneratorConfig) SingleRowConfig() ExprGeneratorConfig { + egc.SingleRow = true + return egc +} + +func (egc ExprGeneratorConfig) MultiRowConfig() ExprGeneratorConfig { + egc.SingleRow = false + return egc +} + +func (egc ExprGeneratorConfig) SetNumCols(numCols int) ExprGeneratorConfig { + egc.NumCols = numCols + return egc +} + +func (egc ExprGeneratorConfig) boolTypeConfig() ExprGeneratorConfig { + egc.Type = "tinyint" + return egc +} + +func (egc ExprGeneratorConfig) intTypeConfig() ExprGeneratorConfig { + egc.Type = "bigint" + return egc +} + +func (egc ExprGeneratorConfig) stringTypeConfig() ExprGeneratorConfig { + egc.Type = "varchar" + return egc +} + +func (egc ExprGeneratorConfig) anyTypeConfig() ExprGeneratorConfig { + egc.Type = "" + return egc +} + +func (egc ExprGeneratorConfig) CannotAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = CannotAggregate + return egc +} + +func (egc ExprGeneratorConfig) CanAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = CanAggregate + return egc } -type generator struct { - seed int64 - r *rand.Rand - depth int - maxDepth int +func (egc ExprGeneratorConfig) IsAggregateConfig() ExprGeneratorConfig { + egc.AggrRule = IsAggregate + return egc +} + +func NewGenerator(r *rand.Rand, maxDepth int, exprGenerators ...ExprGenerator) *Generator { + g := Generator{ + r: r, + maxDepth: maxDepth, + exprGenerators: exprGenerators, + } + return &g } // enter should be called whenever we are producing an intermediate node. it should be followed by a `defer g.exit()` -func (g *generator) enter() { +func (g *Generator) enter() { g.depth++ } // exit should be called when exiting an intermediate node -func (g *generator) exit() { +func (g *Generator) exit() { g.depth-- } // atMaxDepth returns true if we have reached the maximum allowed depth or the expression tree -func (g *generator) atMaxDepth() bool { +func (g *Generator) atMaxDepth() bool { return g.depth >= g.maxDepth } @@ -58,147 +148,299 @@ func (g *generator) atMaxDepth() bool { Creates a random expression. It builds an expression tree using the following constructs: - true/false - AND/OR/NOT - - string literalrs, numeric literals (-/+ 1000) + - string literals, numeric literals (-/+ 1000) + - columns of types bigint and varchar + - scalar and tuple subqueries - =, >, <, >=, <=, <=>, != - &, |, ^, +, -, *, /, div, %, <<, >> - IN, BETWEEN and CASE - IS NULL, IS NOT NULL, IS TRUE, IS NOT TRUE, IS FALSE, IS NOT FALSE + Returns the random expression (Expr) and its type (string) Note: It's important to update this method so that it produces all expressions that need precedence checking. It's currently missing function calls and string operators */ -func (g *generator) expression() Expr { - if g.randomBool() { - return g.booleanExpr() +func (g *Generator) Expression(genConfig ExprGeneratorConfig) Expr { + var options []exprF + // this will only be used for tuple expressions, everything else will need genConfig.NumCols = 1 + numCols := genConfig.NumCols + genConfig = genConfig.SetNumCols(1) + + switch genConfig.Type { + case "bigint": + options = append(options, func() Expr { return g.intExpr(genConfig) }) + case "varchar": + options = append(options, func() Expr { return g.stringExpr(genConfig) }) + case "tinyint": + options = append(options, func() Expr { return g.booleanExpr(genConfig) }) + case "": + options = append(options, []exprF{ + func() Expr { return g.intExpr(genConfig) }, + func() Expr { return g.stringExpr(genConfig) }, + func() Expr { return g.booleanExpr(genConfig) }, + }...) + } + + for i := range g.exprGenerators { + generator := g.exprGenerators[i] + if generator == nil { + continue + } + + // don't create expressions from the expression exprGenerators if we haven't created an aggregation yet + if _, ok := generator.(QueryGenerator); ok || genConfig.AggrRule != IsAggregate { + options = append(options, func() Expr { + expr := generator.Generate(g.r, genConfig) + if expr == nil { + return g.randomLiteral() + } + return expr + }) + } + } + + if genConfig.AggrRule != CannotAggregate { + options = append(options, func() Expr { + g.isAggregate = true + return g.randomAggregate(genConfig.CannotAggregateConfig()) + }) + } + + // if an arbitrary number of columns may be generated, randomly choose 1-3 columns + if numCols == 0 { + numCols = g.r.Intn(3) + 1 + } + + if numCols == 1 { + return g.makeAggregateIfNecessary(genConfig, g.randomOf(options)) + } + + // with 1/5 probability choose a tuple subquery + if g.randomBool(0.2) { + return g.subqueryExpr(genConfig.SetNumCols(numCols)) + } + + tuple := ValTuple{} + for i := 0; i < numCols; i++ { + tuple = append(tuple, g.makeAggregateIfNecessary(genConfig, g.randomOf(options))) + } + + return tuple +} + +// makeAggregateIfNecessary is a failsafe to make sure an IsAggregate expression is in fact an aggregation +func (g *Generator) makeAggregateIfNecessary(genConfig ExprGeneratorConfig, expr Expr) Expr { + // if the generated expression must be an aggregate, and it is not, + // tack on an extra "and count(*)" to make it aggregate + if genConfig.AggrRule == IsAggregate && !g.isAggregate && g.depth == 0 { + expr = &AndExpr{ + Left: expr, + Right: &CountStar{}, + } + g.isAggregate = true } + + return expr +} + +func (g *Generator) randomAggregate(genConfig ExprGeneratorConfig) Expr { + isDistinct := g.r.Intn(10) < 1 + options := []exprF{ - func() Expr { return g.intExpr() }, - func() Expr { return g.stringExpr() }, - func() Expr { return g.booleanExpr() }, + func() Expr { return &CountStar{} }, + func() Expr { return &Count{Args: Exprs{g.Expression(genConfig.anyTypeConfig())}, Distinct: isDistinct} }, + func() Expr { return &Sum{Arg: g.Expression(genConfig), Distinct: isDistinct} }, + func() Expr { return &Min{Arg: g.Expression(genConfig), Distinct: isDistinct} }, + func() Expr { return &Max{Arg: g.Expression(genConfig), Distinct: isDistinct} }, } + g.isAggregate = true return g.randomOf(options) } -func (g *generator) booleanExpr() Expr { +func (g *Generator) booleanExpr(genConfig ExprGeneratorConfig) Expr { if g.atMaxDepth() { return g.booleanLiteral() } + genConfig = genConfig.boolTypeConfig() + options := []exprF{ - func() Expr { return g.andExpr() }, - func() Expr { return g.xorExpr() }, - func() Expr { return g.orExpr() }, - func() Expr { return g.comparison(g.intExpr) }, - func() Expr { return g.comparison(g.stringExpr) }, - //func() Expr { return g.comparison(g.booleanExpr) }, // this is not accepted by the parser - func() Expr { return g.inExpr() }, - func() Expr { return g.between() }, - func() Expr { return g.isExpr() }, - func() Expr { return g.notExpr() }, - func() Expr { return g.likeExpr() }, + func() Expr { return g.andExpr(genConfig) }, + func() Expr { return g.xorExpr(genConfig) }, + func() Expr { return g.orExpr(genConfig) }, + func() Expr { return g.comparison(genConfig.intTypeConfig()) }, + func() Expr { return g.comparison(genConfig.stringTypeConfig()) }, + //func() Expr { return g.comparison(genConfig) }, // this is not accepted by the parser + func() Expr { return g.inExpr(genConfig) }, + func() Expr { return g.existsExpr(genConfig) }, + func() Expr { return g.between(genConfig.intTypeConfig()) }, + func() Expr { return g.isExpr(genConfig) }, + func() Expr { return g.notExpr(genConfig) }, + func() Expr { return g.likeExpr(genConfig.stringTypeConfig()) }, } return g.randomOf(options) } -func (g *generator) intExpr() Expr { +func (g *Generator) intExpr(genConfig ExprGeneratorConfig) Expr { if g.atMaxDepth() { return g.intLiteral() } + genConfig = genConfig.intTypeConfig() + options := []exprF{ - func() Expr { return g.arithmetic() }, - func() Expr { return g.intLiteral() }, - func() Expr { return g.caseExpr(g.intExpr) }, + g.intLiteral, + func() Expr { return g.arithmetic(genConfig) }, + func() Expr { return g.caseExpr(genConfig) }, } return g.randomOf(options) } -func (g *generator) booleanLiteral() Expr { - return BoolVal(g.randomBool()) -} +func (g *Generator) stringExpr(genConfig ExprGeneratorConfig) Expr { + if g.atMaxDepth() { + return g.stringLiteral() + } -func (g *generator) randomBool() bool { - return g.r.Float32() < 0.5 -} + genConfig = genConfig.stringTypeConfig() -func (g *generator) intLiteral() Expr { - t := fmt.Sprintf("%d", g.r.Intn(1000)-g.r.Intn((1000))) + options := []exprF{ + g.stringLiteral, + func() Expr { return g.caseExpr(genConfig) }, + } - return NewIntLiteral(t) + return g.randomOf(options) } -var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} +func (g *Generator) subqueryExpr(genConfig ExprGeneratorConfig) Expr { + if g.atMaxDepth() { + return g.makeAggregateIfNecessary(genConfig, g.randomTupleLiteral(genConfig)) + } -func (g *generator) stringLiteral() Expr { - return NewStrLiteral(g.randomOfS(words)) + var options []exprF + + for _, generator := range g.exprGenerators { + if qg, ok := generator.(QueryGenerator); ok { + options = append(options, func() Expr { + expr := qg.Generate(g.r, genConfig) + if expr == nil { + return g.randomTupleLiteral(genConfig) + } + return expr + }) + } + } + + if len(options) == 0 { + return g.Expression(genConfig) + } + + return g.randomOf(options) } -func (g *generator) stringExpr() Expr { - if g.atMaxDepth() { - return g.stringLiteral() +func (g *Generator) randomTupleLiteral(genConfig ExprGeneratorConfig) Expr { + if genConfig.NumCols == 0 { + genConfig.NumCols = g.r.Intn(3) + 1 + } + + tuple := ValTuple{} + for i := 0; i < genConfig.NumCols; i++ { + tuple = append(tuple, g.randomLiteral()) } + return tuple +} + +func (g *Generator) randomLiteral() Expr { options := []exprF{ - func() Expr { return g.stringLiteral() }, - func() Expr { return g.caseExpr(g.stringExpr) }, + g.intLiteral, + g.stringLiteral, + g.booleanLiteral, } return g.randomOf(options) } -func (g *generator) likeExpr() Expr { +func (g *Generator) booleanLiteral() Expr { + return BoolVal(g.randomBool(0.5)) +} + +// randomBool returns true with probability prob +func (g *Generator) randomBool(prob float32) bool { + if prob < 0 || prob > 1 { + prob = 0.5 + } + return g.r.Float32() < prob +} + +func (g *Generator) intLiteral() Expr { + t := fmt.Sprintf("%d", g.r.Intn(100)-g.r.Intn(100)) + + return NewIntLiteral(t) +} + +var words = []string{"ox", "ant", "ape", "asp", "bat", "bee", "boa", "bug", "cat", "cod", "cow", "cub", "doe", "dog", "eel", "eft", "elf", "elk", "emu", "ewe", "fly", "fox", "gar", "gnu", "hen", "hog", "imp", "jay", "kid", "kit", "koi", "lab", "man", "owl", "pig", "pug", "pup", "ram", "rat", "ray", "yak", "bass", "bear", "bird", "boar", "buck", "bull", "calf", "chow", "clam", "colt", "crab", "crow", "dane", "deer", "dodo", "dory", "dove", "drum", "duck", "fawn", "fish", "flea", "foal", "fowl", "frog", "gnat", "goat", "grub", "gull", "hare", "hawk", "ibex", "joey", "kite", "kiwi", "lamb", "lark", "lion", "loon", "lynx", "mako", "mink", "mite", "mole", "moth", "mule", "mutt", "newt", "orca", "oryx", "pika", "pony", "puma", "seal", "shad", "slug", "sole", "stag", "stud", "swan", "tahr", "teal", "tick", "toad", "tuna", "wasp", "wolf", "worm", "wren", "yeti", "adder", "akita", "alien", "aphid", "bison", "boxer", "bream", "bunny", "burro", "camel", "chimp", "civet", "cobra", "coral", "corgi", "crane", "dingo", "drake", "eagle", "egret", "filly", "finch", "gator", "gecko", "ghost", "ghoul", "goose", "guppy", "heron", "hippo", "horse", "hound", "husky", "hyena", "koala", "krill", "leech", "lemur", "liger", "llama", "louse", "macaw", "midge", "molly", "moose", "moray", "mouse", "panda", "perch", "prawn", "quail", "racer", "raven", "rhino", "robin", "satyr", "shark", "sheep", "shrew", "skink", "skunk", "sloth", "snail", "snake", "snipe", "squid", "stork", "swift", "swine", "tapir", "tetra", "tiger", "troll", "trout", "viper", "wahoo", "whale", "zebra", "alpaca", "amoeba", "baboon", "badger", "beagle", "bedbug", "beetle", "bengal", "bobcat", "caiman", "cattle", "cicada", "collie", "condor", "cougar", "coyote", "dassie", "donkey", "dragon", "earwig", "falcon", "feline", "ferret", "gannet", "gibbon", "glider", "goblin", "gopher", "grouse", "guinea", "hermit", "hornet", "iguana", "impala", "insect", "jackal", "jaguar", "jennet", "kitten", "kodiak", "lizard", "locust", "maggot", "magpie", "mammal", "mantis", "marlin", "marmot", "marten", "martin", "mayfly", "minnow", "monkey", "mullet", "muskox", "ocelot", "oriole", "osprey", "oyster", "parrot", "pigeon", "piglet", "poodle", "possum", "python", "quagga", "rabbit", "raptor", "rodent", "roughy", "salmon", "sawfly", "serval", "shiner", "shrimp", "spider", "sponge", "tarpon", "thrush", "tomcat", "toucan", "turkey", "turtle", "urchin", "vervet", "walrus", "weasel", "weevil", "wombat", "anchovy", "anemone", "bluejay", "buffalo", "bulldog", "buzzard", "caribou", "catfish", "chamois", "cheetah", "chicken", "chigger", "cowbird", "crappie", "crawdad", "cricket", "dogfish", "dolphin", "firefly", "garfish", "gazelle", "gelding", "giraffe", "gobbler", "gorilla", "goshawk", "grackle", "griffon", "grizzly", "grouper", "haddock", "hagfish", "halibut", "hamster", "herring", "jackass", "javelin", "jawfish", "jaybird", "katydid", "ladybug", "lamprey", "lemming", "leopard", "lioness", "lobster", "macaque", "mallard", "mammoth", "manatee", "mastiff", "meerkat", "mollusk", "monarch", "mongrel", "monitor", "monster", "mudfish", "muskrat", "mustang", "narwhal", "oarfish", "octopus", "opossum", "ostrich", "panther", "peacock", "pegasus", "pelican", "penguin", "phoenix", "piranha", "polecat", "primate", "quetzal", "raccoon", "rattler", "redbird", "redfish", "reptile", "rooster", "sawfish", "sculpin", "seagull", "skylark", "snapper", "spaniel", "sparrow", "sunbeam", "sunbird", "sunfish", "tadpole", "termite", "terrier", "unicorn", "vulture", "wallaby", "walleye", "warthog", "whippet", "wildcat", "aardvark", "airedale", "albacore", "anteater", "antelope", "arachnid", "barnacle", "basilisk", "blowfish", "bluebird", "bluegill", "bonefish", "bullfrog", "cardinal", "chipmunk", "cockatoo", "crayfish", "dinosaur", "doberman", "duckling", "elephant", "escargot", "flamingo", "flounder", "foxhound", "glowworm", "goldfish", "grubworm", "hedgehog", "honeybee", "hookworm", "humpback", "kangaroo", "killdeer", "kingfish", "labrador", "lacewing", "ladybird", "lionfish", "longhorn", "mackerel", "malamute", "marmoset", "mastodon", "moccasin", "mongoose", "monkfish", "mosquito", "pangolin", "parakeet", "pheasant", "pipefish", "platypus", "polliwog", "porpoise", "reindeer", "ringtail", "sailfish", "scorpion", "seahorse", "seasnail", "sheepdog", "shepherd", "silkworm", "squirrel", "stallion", "starfish", "starling", "stingray", "stinkbug", "sturgeon", "terrapin", "titmouse", "tortoise", "treefrog", "werewolf", "woodcock"} + +func (g *Generator) stringLiteral() Expr { + return NewStrLiteral(g.randomOfS(words)) +} + +func (g *Generator) likeExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &ComparisonExpr{ Operator: LikeOp, - Left: g.stringExpr(), - Right: g.stringExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } var comparisonOps = []ComparisonExprOperator{EqualOp, LessThanOp, GreaterThanOp, LessEqualOp, GreaterEqualOp, NotEqualOp, NullSafeEqualOp} -func (g *generator) comparison(f func() Expr) Expr { +func (g *Generator) comparison(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() + // specifc 1-3 columns + numCols := g.r.Intn(3) + 1 + cmp := &ComparisonExpr{ Operator: comparisonOps[g.r.Intn(len(comparisonOps))], - Left: f(), - Right: f(), + Left: g.Expression(genConfig.SetNumCols(numCols)), + Right: g.Expression(genConfig.SetNumCols(numCols)), } return cmp } -func (g *generator) caseExpr(valueF func() Expr) Expr { +func (g *Generator) caseExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() var exp Expr var elseExpr Expr - if g.randomBool() { - exp = valueF() + if g.randomBool(0.5) { + exp = g.Expression(genConfig.anyTypeConfig()) } - if g.randomBool() { - elseExpr = valueF() + if g.randomBool(0.5) { + elseExpr = g.Expression(genConfig) } - size := g.r.Intn(5) + 2 + size := g.r.Intn(2) + 1 var whens []*When for i := 0; i < size; i++ { var cond Expr if exp == nil { - cond = g.booleanExpr() + cond = g.Expression(genConfig.boolTypeConfig()) } else { - cond = g.expression() + cond = g.Expression(genConfig) } + val := g.Expression(genConfig) whens = append(whens, &When{ Cond: cond, - Val: g.expression(), + Val: val, }) } @@ -211,7 +453,7 @@ func (g *generator) caseExpr(valueF func() Expr) Expr { var arithmeticOps = []BinaryExprOperator{BitAndOp, BitOrOp, BitXorOp, PlusOp, MinusOp, MultOp, DivOp, IntDivOp, ModOp, ShiftRightOp, ShiftLeftOp} -func (g *generator) arithmetic() Expr { +func (g *Generator) arithmetic(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() @@ -219,82 +461,81 @@ func (g *generator) arithmetic() Expr { return &BinaryExpr{ Operator: op, - Left: g.intExpr(), - Right: g.intExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } type exprF func() Expr -func (g *generator) randomOf(options []exprF) Expr { +func (g *Generator) randomOf(options []exprF) Expr { return options[g.r.Intn(len(options))]() } -func (g *generator) randomOfS(options []string) string { +func (g *Generator) randomOfS(options []string) string { return options[g.r.Intn(len(options))] } -func (g *generator) andExpr() Expr { +func (g *Generator) andExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &AndExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) orExpr() Expr { +func (g *Generator) orExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &OrExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) xorExpr() Expr { +func (g *Generator) xorExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() return &XorExpr{ - Left: g.booleanExpr(), - Right: g.booleanExpr(), + Left: g.Expression(genConfig), + Right: g.Expression(genConfig), } } -func (g *generator) notExpr() Expr { +func (g *Generator) notExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - return &NotExpr{g.booleanExpr()} + return &NotExpr{g.Expression(genConfig)} } -func (g *generator) inExpr() Expr { +func (g *Generator) inExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - expr := g.intExpr() - size := g.r.Intn(5) + 2 - tuples := ValTuple{} - for i := 0; i < size; i++ { - tuples = append(tuples, g.intExpr()) - } + size := g.r.Intn(3) + 2 + inExprGenConfig := NewExprGeneratorConfig(genConfig.AggrRule, "", size, true) + tuple1 := g.Expression(inExprGenConfig) + tuple2 := ValTuple{g.Expression(inExprGenConfig)} + op := InOp - if g.randomBool() { + if g.randomBool(0.5) { op = NotInOp } return &ComparisonExpr{ Operator: op, - Left: expr, - Right: tuples, + Left: tuple1, + Right: tuple2, } } -func (g *generator) between() Expr { +func (g *Generator) between(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() var IsBetween bool - if g.randomBool() { + if g.randomBool(0.5) { IsBetween = true } else { IsBetween = false @@ -302,13 +543,13 @@ func (g *generator) between() Expr { return &BetweenExpr{ IsBetween: IsBetween, - Left: g.intExpr(), - From: g.intExpr(), - To: g.intExpr(), + Left: g.Expression(genConfig), + From: g.Expression(genConfig), + To: g.Expression(genConfig), } } -func (g *generator) isExpr() Expr { +func (g *Generator) isExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() @@ -316,6 +557,26 @@ func (g *generator) isExpr() Expr { return &IsExpr{ Right: ops[g.r.Intn(len(ops))], - Left: g.booleanExpr(), + Left: g.Expression(genConfig), } } + +func (g *Generator) existsExpr(genConfig ExprGeneratorConfig) Expr { + expr := g.subqueryExpr(genConfig.MultiRowConfig().SetNumCols(0)) + if subquery, ok := expr.(*Subquery); ok { + expr = NewExistsExpr(subquery) + } else { + // if g.subqueryExpr doesn't return a valid subquery, replace with + // select 1 + selectExprs := SelectExprs{NewAliasedExpr(NewIntLiteral("1"), "")} + from := TableExprs{NewAliasedTableExpr(NewTableName("dual"), "")} + expr = NewExistsExpr(NewSubquery(NewSelect(nil, selectExprs, nil, nil, from, nil, nil, nil, nil))) + } + + // not exists + if g.randomBool(0.5) { + expr = NewNotExpr(expr) + } + + return expr +} diff --git a/go/vt/sqlparser/redact_query_test.go b/go/vt/sqlparser/redact_query_test.go index 029a307e7c8..1cfd6d83af3 100644 --- a/go/vt/sqlparser/redact_query_test.go +++ b/go/vt/sqlparser/redact_query_test.go @@ -29,5 +29,5 @@ func TestRedactSQLStatements(t *testing.T) { t.Fatalf("redacting sql failed: %v", err) } - require.Equal(t, "select a, b, c from t where x = :x and y = :x and z = :z", redactedSQL) + require.Equal(t, "select a, b, c from t where x = :x /* INT64 */ and y = :x /* INT64 */ and z = :z /* VARCHAR */", redactedSQL) } diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go index dadd2c501df..3044e04f8b0 100644 --- a/go/vt/sqlparser/rewriter_test.go +++ b/go/vt/sqlparser/rewriter_test.go @@ -17,6 +17,7 @@ limitations under the License. package sqlparser import ( + "math/rand" "testing" "github.com/stretchr/testify/assert" @@ -25,8 +26,8 @@ import ( ) func BenchmarkVisitLargeExpression(b *testing.B) { - gen := newGenerator(1, 5) - exp := gen.expression() + gen := NewGenerator(rand.New(rand.NewSource(1)), 5) + exp := gen.Expression(ExprGeneratorConfig{}) depth := 0 for i := 0; i < b.N; i++ { diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index fa71623e2e6..3e801c79658 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -22,18 +22,6 @@ func setDDL(yylex yyLexer, node Statement) { yylex.(*Tokenizer).partialDDL = node } -func incNesting(yylex yyLexer) bool { - yylex.(*Tokenizer).nesting++ - if yylex.(*Tokenizer).nesting == 200 { - return true - } - return false -} - -func decNesting(yylex yyLexer) { - yylex.(*Tokenizer).nesting-- -} - // skipToEnd forces the lexer to end prematurely. Not all SQL statements // are supported by the Parser, thus calling skipToEnd will make the lexer // return EOF early. @@ -41,7 +29,7 @@ func skipToEnd(yylex yyLexer) { yylex.(*Tokenizer).SkipToEnd = true } -func bindVariable(yylex yyLexer, bvar string) { +func markBindVariable(yylex yyLexer, bvar string) { yylex.(*Tokenizer).BindVars[bvar] = struct{}{} } @@ -171,524 +159,591 @@ const STORED = 57468 const BOTH = 57469 const LEADING = 57470 const TRAILING = 57471 -const EMPTY_FROM_CLAUSE = 57472 -const LOWER_THAN_CHARSET = 57473 -const CHARSET = 57474 -const UNIQUE = 57475 -const KEY = 57476 -const EXPRESSION_PREC_SETTER = 57477 -const OR = 57478 -const XOR = 57479 -const AND = 57480 -const NOT = 57481 -const BETWEEN = 57482 -const CASE = 57483 -const WHEN = 57484 -const THEN = 57485 -const ELSE = 57486 -const END = 57487 -const LE = 57488 -const GE = 57489 -const NE = 57490 -const NULL_SAFE_EQUAL = 57491 -const IS = 57492 -const LIKE = 57493 -const REGEXP = 57494 -const RLIKE = 57495 -const IN = 57496 -const SHIFT_LEFT = 57497 -const SHIFT_RIGHT = 57498 -const DIV = 57499 -const MOD = 57500 -const UNARY = 57501 -const COLLATE = 57502 -const BINARY = 57503 -const UNDERSCORE_ARMSCII8 = 57504 -const UNDERSCORE_ASCII = 57505 -const UNDERSCORE_BIG5 = 57506 -const UNDERSCORE_BINARY = 57507 -const UNDERSCORE_CP1250 = 57508 -const UNDERSCORE_CP1251 = 57509 -const UNDERSCORE_CP1256 = 57510 -const UNDERSCORE_CP1257 = 57511 -const UNDERSCORE_CP850 = 57512 -const UNDERSCORE_CP852 = 57513 -const UNDERSCORE_CP866 = 57514 -const UNDERSCORE_CP932 = 57515 -const UNDERSCORE_DEC8 = 57516 -const UNDERSCORE_EUCJPMS = 57517 -const UNDERSCORE_EUCKR = 57518 -const UNDERSCORE_GB18030 = 57519 -const UNDERSCORE_GB2312 = 57520 -const UNDERSCORE_GBK = 57521 -const UNDERSCORE_GEOSTD8 = 57522 -const UNDERSCORE_GREEK = 57523 -const UNDERSCORE_HEBREW = 57524 -const UNDERSCORE_HP8 = 57525 -const UNDERSCORE_KEYBCS2 = 57526 -const UNDERSCORE_KOI8R = 57527 -const UNDERSCORE_KOI8U = 57528 -const UNDERSCORE_LATIN1 = 57529 -const UNDERSCORE_LATIN2 = 57530 -const UNDERSCORE_LATIN5 = 57531 -const UNDERSCORE_LATIN7 = 57532 -const UNDERSCORE_MACCE = 57533 -const UNDERSCORE_MACROMAN = 57534 -const UNDERSCORE_SJIS = 57535 -const UNDERSCORE_SWE7 = 57536 -const UNDERSCORE_TIS620 = 57537 -const UNDERSCORE_UCS2 = 57538 -const UNDERSCORE_UJIS = 57539 -const UNDERSCORE_UTF16 = 57540 -const UNDERSCORE_UTF16LE = 57541 -const UNDERSCORE_UTF32 = 57542 -const UNDERSCORE_UTF8 = 57543 -const UNDERSCORE_UTF8MB4 = 57544 -const UNDERSCORE_UTF8MB3 = 57545 -const INTERVAL = 57546 -const WINDOW_EXPR = 57547 -const JSON_EXTRACT_OP = 57548 -const JSON_UNQUOTE_EXTRACT_OP = 57549 -const CREATE = 57550 -const ALTER = 57551 -const DROP = 57552 -const RENAME = 57553 -const ANALYZE = 57554 -const ADD = 57555 -const FLUSH = 57556 -const CHANGE = 57557 -const MODIFY = 57558 -const DEALLOCATE = 57559 -const REVERT = 57560 -const QUERIES = 57561 -const SCHEMA = 57562 -const TABLE = 57563 -const INDEX = 57564 -const VIEW = 57565 -const TO = 57566 -const IGNORE = 57567 -const IF = 57568 -const PRIMARY = 57569 -const COLUMN = 57570 -const SPATIAL = 57571 -const FULLTEXT = 57572 -const KEY_BLOCK_SIZE = 57573 -const CHECK = 57574 -const INDEXES = 57575 -const ACTION = 57576 -const CASCADE = 57577 -const CONSTRAINT = 57578 -const FOREIGN = 57579 -const NO = 57580 -const REFERENCES = 57581 -const RESTRICT = 57582 -const SHOW = 57583 -const DESCRIBE = 57584 -const EXPLAIN = 57585 -const DATE = 57586 -const ESCAPE = 57587 -const REPAIR = 57588 -const OPTIMIZE = 57589 -const TRUNCATE = 57590 -const COALESCE = 57591 -const EXCHANGE = 57592 -const REBUILD = 57593 -const PARTITIONING = 57594 -const REMOVE = 57595 -const PREPARE = 57596 -const EXECUTE = 57597 -const MAXVALUE = 57598 -const PARTITION = 57599 -const REORGANIZE = 57600 -const LESS = 57601 -const THAN = 57602 -const PROCEDURE = 57603 -const TRIGGER = 57604 -const VINDEX = 57605 -const VINDEXES = 57606 -const DIRECTORY = 57607 -const NAME = 57608 -const UPGRADE = 57609 -const STATUS = 57610 -const VARIABLES = 57611 -const WARNINGS = 57612 -const CASCADED = 57613 -const DEFINER = 57614 -const OPTION = 57615 -const SQL = 57616 -const UNDEFINED = 57617 -const SEQUENCE = 57618 -const MERGE = 57619 -const TEMPORARY = 57620 -const TEMPTABLE = 57621 -const INVOKER = 57622 -const SECURITY = 57623 -const FIRST = 57624 -const AFTER = 57625 -const LAST = 57626 -const VITESS_MIGRATION = 57627 -const CANCEL = 57628 -const RETRY = 57629 -const LAUNCH = 57630 -const COMPLETE = 57631 -const CLEANUP = 57632 -const THROTTLE = 57633 -const UNTHROTTLE = 57634 -const EXPIRE = 57635 -const RATIO = 57636 -const VITESS_THROTTLER = 57637 -const BEGIN = 57638 -const START = 57639 -const TRANSACTION = 57640 -const COMMIT = 57641 -const ROLLBACK = 57642 -const SAVEPOINT = 57643 -const RELEASE = 57644 -const WORK = 57645 -const CONSISTENT = 57646 -const SNAPSHOT = 57647 -const BIT = 57648 -const TINYINT = 57649 -const SMALLINT = 57650 -const MEDIUMINT = 57651 -const INT = 57652 -const INTEGER = 57653 -const BIGINT = 57654 -const INTNUM = 57655 -const REAL = 57656 -const DOUBLE = 57657 -const FLOAT_TYPE = 57658 -const FLOAT4_TYPE = 57659 -const FLOAT8_TYPE = 57660 -const DECIMAL_TYPE = 57661 -const NUMERIC = 57662 -const TIME = 57663 -const TIMESTAMP = 57664 -const DATETIME = 57665 -const YEAR = 57666 -const CHAR = 57667 -const VARCHAR = 57668 -const BOOL = 57669 -const CHARACTER = 57670 -const VARBINARY = 57671 -const NCHAR = 57672 -const TEXT = 57673 -const TINYTEXT = 57674 -const MEDIUMTEXT = 57675 -const LONGTEXT = 57676 -const BLOB = 57677 -const TINYBLOB = 57678 -const MEDIUMBLOB = 57679 -const LONGBLOB = 57680 -const JSON = 57681 -const JSON_SCHEMA_VALID = 57682 -const JSON_SCHEMA_VALIDATION_REPORT = 57683 -const ENUM = 57684 -const GEOMETRY = 57685 -const POINT = 57686 -const LINESTRING = 57687 -const POLYGON = 57688 -const GEOMCOLLECTION = 57689 -const GEOMETRYCOLLECTION = 57690 -const MULTIPOINT = 57691 -const MULTILINESTRING = 57692 -const MULTIPOLYGON = 57693 -const ASCII = 57694 -const UNICODE = 57695 -const NULLX = 57696 -const AUTO_INCREMENT = 57697 -const APPROXNUM = 57698 -const SIGNED = 57699 -const UNSIGNED = 57700 -const ZEROFILL = 57701 -const CODE = 57702 -const COLLATION = 57703 -const COLUMNS = 57704 -const DATABASES = 57705 -const ENGINES = 57706 -const EVENT = 57707 -const EXTENDED = 57708 -const FIELDS = 57709 -const FULL = 57710 -const FUNCTION = 57711 -const GTID_EXECUTED = 57712 -const KEYSPACES = 57713 -const OPEN = 57714 -const PLUGINS = 57715 -const PRIVILEGES = 57716 -const PROCESSLIST = 57717 -const SCHEMAS = 57718 -const TABLES = 57719 -const TRIGGERS = 57720 -const USER = 57721 -const VGTID_EXECUTED = 57722 -const VITESS_KEYSPACES = 57723 -const VITESS_METADATA = 57724 -const VITESS_MIGRATIONS = 57725 -const VITESS_REPLICATION_STATUS = 57726 -const VITESS_SHARDS = 57727 -const VITESS_TABLETS = 57728 -const VITESS_TARGET = 57729 -const VSCHEMA = 57730 -const VITESS_THROTTLED_APPS = 57731 -const NAMES = 57732 -const GLOBAL = 57733 -const SESSION = 57734 -const ISOLATION = 57735 -const LEVEL = 57736 -const READ = 57737 -const WRITE = 57738 -const ONLY = 57739 -const REPEATABLE = 57740 -const COMMITTED = 57741 -const UNCOMMITTED = 57742 -const SERIALIZABLE = 57743 -const CURRENT_TIMESTAMP = 57744 -const DATABASE = 57745 -const CURRENT_DATE = 57746 -const NOW = 57747 -const CURRENT_TIME = 57748 -const LOCALTIME = 57749 -const LOCALTIMESTAMP = 57750 -const CURRENT_USER = 57751 -const UTC_DATE = 57752 -const UTC_TIME = 57753 -const UTC_TIMESTAMP = 57754 -const DAY = 57755 -const DAY_HOUR = 57756 -const DAY_MICROSECOND = 57757 -const DAY_MINUTE = 57758 -const DAY_SECOND = 57759 -const HOUR = 57760 -const HOUR_MICROSECOND = 57761 -const HOUR_MINUTE = 57762 -const HOUR_SECOND = 57763 -const MICROSECOND = 57764 -const MINUTE = 57765 -const MINUTE_MICROSECOND = 57766 -const MINUTE_SECOND = 57767 -const MONTH = 57768 -const QUARTER = 57769 -const SECOND = 57770 -const SECOND_MICROSECOND = 57771 -const YEAR_MONTH = 57772 -const WEEK = 57773 -const REPLACE = 57774 -const CONVERT = 57775 -const CAST = 57776 -const SUBSTR = 57777 -const SUBSTRING = 57778 -const SEPARATOR = 57779 -const TIMESTAMPADD = 57780 -const TIMESTAMPDIFF = 57781 -const WEIGHT_STRING = 57782 -const LTRIM = 57783 -const RTRIM = 57784 -const TRIM = 57785 -const JSON_ARRAY = 57786 -const JSON_OBJECT = 57787 -const JSON_QUOTE = 57788 -const JSON_DEPTH = 57789 -const JSON_TYPE = 57790 -const JSON_LENGTH = 57791 -const JSON_VALID = 57792 -const JSON_ARRAY_APPEND = 57793 -const JSON_ARRAY_INSERT = 57794 -const JSON_INSERT = 57795 -const JSON_MERGE = 57796 -const JSON_MERGE_PATCH = 57797 -const JSON_MERGE_PRESERVE = 57798 -const JSON_REMOVE = 57799 -const JSON_REPLACE = 57800 -const JSON_SET = 57801 -const JSON_UNQUOTE = 57802 -const COUNT = 57803 -const AVG = 57804 -const MAX = 57805 -const MIN = 57806 -const SUM = 57807 -const GROUP_CONCAT = 57808 -const BIT_AND = 57809 -const BIT_OR = 57810 -const BIT_XOR = 57811 -const STD = 57812 -const STDDEV = 57813 -const STDDEV_POP = 57814 -const STDDEV_SAMP = 57815 -const VAR_POP = 57816 -const VAR_SAMP = 57817 -const VARIANCE = 57818 -const REGEXP_INSTR = 57819 -const REGEXP_LIKE = 57820 -const REGEXP_REPLACE = 57821 -const REGEXP_SUBSTR = 57822 -const ExtractValue = 57823 -const UpdateXML = 57824 -const GET_LOCK = 57825 -const RELEASE_LOCK = 57826 -const RELEASE_ALL_LOCKS = 57827 -const IS_FREE_LOCK = 57828 -const IS_USED_LOCK = 57829 -const LOCATE = 57830 -const POSITION = 57831 -const MATCH = 57832 -const AGAINST = 57833 -const BOOLEAN = 57834 -const LANGUAGE = 57835 -const WITH = 57836 -const QUERY = 57837 -const EXPANSION = 57838 -const WITHOUT = 57839 -const VALIDATION = 57840 -const UNUSED = 57841 -const ARRAY = 57842 -const BYTE = 57843 -const CUME_DIST = 57844 -const DESCRIPTION = 57845 -const DENSE_RANK = 57846 -const EMPTY = 57847 -const EXCEPT = 57848 -const FIRST_VALUE = 57849 -const GROUPING = 57850 -const GROUPS = 57851 -const JSON_TABLE = 57852 -const LAG = 57853 -const LAST_VALUE = 57854 -const LATERAL = 57855 -const LEAD = 57856 -const NTH_VALUE = 57857 -const NTILE = 57858 -const OF = 57859 -const OVER = 57860 -const PERCENT_RANK = 57861 -const RANK = 57862 -const RECURSIVE = 57863 -const ROW_NUMBER = 57864 -const SYSTEM = 57865 -const WINDOW = 57866 -const ACTIVE = 57867 -const ADMIN = 57868 -const AUTOEXTEND_SIZE = 57869 -const BUCKETS = 57870 -const CLONE = 57871 -const COLUMN_FORMAT = 57872 -const COMPONENT = 57873 -const DEFINITION = 57874 -const ENFORCED = 57875 -const ENGINE_ATTRIBUTE = 57876 -const EXCLUDE = 57877 -const FOLLOWING = 57878 -const GET_MASTER_PUBLIC_KEY = 57879 -const HISTOGRAM = 57880 -const HISTORY = 57881 -const INACTIVE = 57882 -const INVISIBLE = 57883 -const LOCKED = 57884 -const MASTER_COMPRESSION_ALGORITHMS = 57885 -const MASTER_PUBLIC_KEY_PATH = 57886 -const MASTER_TLS_CIPHERSUITES = 57887 -const MASTER_ZSTD_COMPRESSION_LEVEL = 57888 -const NESTED = 57889 -const NETWORK_NAMESPACE = 57890 -const NOWAIT = 57891 -const NULLS = 57892 -const OJ = 57893 -const OLD = 57894 -const OPTIONAL = 57895 -const ORDINALITY = 57896 -const ORGANIZATION = 57897 -const OTHERS = 57898 -const PARTIAL = 57899 -const PATH = 57900 -const PERSIST = 57901 -const PERSIST_ONLY = 57902 -const PRECEDING = 57903 -const PRIVILEGE_CHECKS_USER = 57904 -const PROCESS = 57905 -const RANDOM = 57906 -const REFERENCE = 57907 -const REQUIRE_ROW_FORMAT = 57908 -const RESOURCE = 57909 -const RESPECT = 57910 -const RESTART = 57911 -const RETAIN = 57912 -const REUSE = 57913 -const ROLE = 57914 -const SECONDARY = 57915 -const SECONDARY_ENGINE = 57916 -const SECONDARY_ENGINE_ATTRIBUTE = 57917 -const SECONDARY_LOAD = 57918 -const SECONDARY_UNLOAD = 57919 -const SIMPLE = 57920 -const SKIP = 57921 -const SRID = 57922 -const THREAD_PRIORITY = 57923 -const TIES = 57924 -const UNBOUNDED = 57925 -const VCPU = 57926 -const VISIBLE = 57927 -const RETURNING = 57928 -const FORMAT_BYTES = 57929 -const FORMAT_PICO_TIME = 57930 -const PS_CURRENT_THREAD_ID = 57931 -const PS_THREAD_ID = 57932 -const GTID_SUBSET = 57933 -const GTID_SUBTRACT = 57934 -const WAIT_FOR_EXECUTED_GTID_SET = 57935 -const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 57936 -const FORMAT = 57937 -const TREE = 57938 -const VITESS = 57939 -const TRADITIONAL = 57940 -const VTEXPLAIN = 57941 -const VEXPLAIN = 57942 -const PLAN = 57943 -const LOCAL = 57944 -const LOW_PRIORITY = 57945 -const NO_WRITE_TO_BINLOG = 57946 -const LOGS = 57947 -const ERROR = 57948 -const GENERAL = 57949 -const HOSTS = 57950 -const OPTIMIZER_COSTS = 57951 -const USER_RESOURCES = 57952 -const SLOW = 57953 -const CHANNEL = 57954 -const RELAY = 57955 -const EXPORT = 57956 -const CURRENT = 57957 -const ROW = 57958 -const ROWS = 57959 -const AVG_ROW_LENGTH = 57960 -const CONNECTION = 57961 -const CHECKSUM = 57962 -const DELAY_KEY_WRITE = 57963 -const ENCRYPTION = 57964 -const ENGINE = 57965 -const INSERT_METHOD = 57966 -const MAX_ROWS = 57967 -const MIN_ROWS = 57968 -const PACK_KEYS = 57969 -const PASSWORD = 57970 -const FIXED = 57971 -const DYNAMIC = 57972 -const COMPRESSED = 57973 -const REDUNDANT = 57974 -const COMPACT = 57975 -const ROW_FORMAT = 57976 -const STATS_AUTO_RECALC = 57977 -const STATS_PERSISTENT = 57978 -const STATS_SAMPLE_PAGES = 57979 -const STORAGE = 57980 -const MEMORY = 57981 -const DISK = 57982 -const PARTITIONS = 57983 -const LINEAR = 57984 -const RANGE = 57985 -const LIST = 57986 -const SUBPARTITION = 57987 -const SUBPARTITIONS = 57988 -const HASH = 57989 +const KILL = 57472 +const EMPTY_FROM_CLAUSE = 57473 +const LOWER_THAN_CHARSET = 57474 +const CHARSET = 57475 +const UNIQUE = 57476 +const KEY = 57477 +const EXPRESSION_PREC_SETTER = 57478 +const OR = 57479 +const XOR = 57480 +const AND = 57481 +const NOT = 57482 +const BETWEEN = 57483 +const CASE = 57484 +const WHEN = 57485 +const THEN = 57486 +const ELSE = 57487 +const END = 57488 +const LE = 57489 +const GE = 57490 +const NE = 57491 +const NULL_SAFE_EQUAL = 57492 +const IS = 57493 +const LIKE = 57494 +const REGEXP = 57495 +const RLIKE = 57496 +const IN = 57497 +const ASSIGNMENT_OPT = 57498 +const SHIFT_LEFT = 57499 +const SHIFT_RIGHT = 57500 +const DIV = 57501 +const MOD = 57502 +const UNARY = 57503 +const COLLATE = 57504 +const BINARY = 57505 +const UNDERSCORE_ARMSCII8 = 57506 +const UNDERSCORE_ASCII = 57507 +const UNDERSCORE_BIG5 = 57508 +const UNDERSCORE_BINARY = 57509 +const UNDERSCORE_CP1250 = 57510 +const UNDERSCORE_CP1251 = 57511 +const UNDERSCORE_CP1256 = 57512 +const UNDERSCORE_CP1257 = 57513 +const UNDERSCORE_CP850 = 57514 +const UNDERSCORE_CP852 = 57515 +const UNDERSCORE_CP866 = 57516 +const UNDERSCORE_CP932 = 57517 +const UNDERSCORE_DEC8 = 57518 +const UNDERSCORE_EUCJPMS = 57519 +const UNDERSCORE_EUCKR = 57520 +const UNDERSCORE_GB18030 = 57521 +const UNDERSCORE_GB2312 = 57522 +const UNDERSCORE_GBK = 57523 +const UNDERSCORE_GEOSTD8 = 57524 +const UNDERSCORE_GREEK = 57525 +const UNDERSCORE_HEBREW = 57526 +const UNDERSCORE_HP8 = 57527 +const UNDERSCORE_KEYBCS2 = 57528 +const UNDERSCORE_KOI8R = 57529 +const UNDERSCORE_KOI8U = 57530 +const UNDERSCORE_LATIN1 = 57531 +const UNDERSCORE_LATIN2 = 57532 +const UNDERSCORE_LATIN5 = 57533 +const UNDERSCORE_LATIN7 = 57534 +const UNDERSCORE_MACCE = 57535 +const UNDERSCORE_MACROMAN = 57536 +const UNDERSCORE_SJIS = 57537 +const UNDERSCORE_SWE7 = 57538 +const UNDERSCORE_TIS620 = 57539 +const UNDERSCORE_UCS2 = 57540 +const UNDERSCORE_UJIS = 57541 +const UNDERSCORE_UTF16 = 57542 +const UNDERSCORE_UTF16LE = 57543 +const UNDERSCORE_UTF32 = 57544 +const UNDERSCORE_UTF8 = 57545 +const UNDERSCORE_UTF8MB4 = 57546 +const UNDERSCORE_UTF8MB3 = 57547 +const INTERVAL = 57548 +const WINDOW_EXPR = 57549 +const JSON_EXTRACT_OP = 57550 +const JSON_UNQUOTE_EXTRACT_OP = 57551 +const CREATE = 57552 +const ALTER = 57553 +const DROP = 57554 +const RENAME = 57555 +const ANALYZE = 57556 +const ADD = 57557 +const FLUSH = 57558 +const CHANGE = 57559 +const MODIFY = 57560 +const DEALLOCATE = 57561 +const REVERT = 57562 +const QUERIES = 57563 +const SCHEMA = 57564 +const TABLE = 57565 +const INDEX = 57566 +const VIEW = 57567 +const TO = 57568 +const IGNORE = 57569 +const IF = 57570 +const PRIMARY = 57571 +const COLUMN = 57572 +const SPATIAL = 57573 +const FULLTEXT = 57574 +const KEY_BLOCK_SIZE = 57575 +const CHECK = 57576 +const INDEXES = 57577 +const ACTION = 57578 +const CASCADE = 57579 +const CONSTRAINT = 57580 +const FOREIGN = 57581 +const NO = 57582 +const REFERENCES = 57583 +const RESTRICT = 57584 +const SHOW = 57585 +const DESCRIBE = 57586 +const EXPLAIN = 57587 +const DATE = 57588 +const ESCAPE = 57589 +const REPAIR = 57590 +const OPTIMIZE = 57591 +const TRUNCATE = 57592 +const COALESCE = 57593 +const EXCHANGE = 57594 +const REBUILD = 57595 +const PARTITIONING = 57596 +const REMOVE = 57597 +const PREPARE = 57598 +const EXECUTE = 57599 +const MAXVALUE = 57600 +const PARTITION = 57601 +const REORGANIZE = 57602 +const LESS = 57603 +const THAN = 57604 +const PROCEDURE = 57605 +const TRIGGER = 57606 +const VINDEX = 57607 +const VINDEXES = 57608 +const DIRECTORY = 57609 +const NAME = 57610 +const UPGRADE = 57611 +const STATUS = 57612 +const VARIABLES = 57613 +const WARNINGS = 57614 +const CASCADED = 57615 +const DEFINER = 57616 +const OPTION = 57617 +const SQL = 57618 +const UNDEFINED = 57619 +const SEQUENCE = 57620 +const MERGE = 57621 +const TEMPORARY = 57622 +const TEMPTABLE = 57623 +const INVOKER = 57624 +const SECURITY = 57625 +const FIRST = 57626 +const AFTER = 57627 +const LAST = 57628 +const VITESS_MIGRATION = 57629 +const CANCEL = 57630 +const RETRY = 57631 +const LAUNCH = 57632 +const COMPLETE = 57633 +const CLEANUP = 57634 +const THROTTLE = 57635 +const UNTHROTTLE = 57636 +const EXPIRE = 57637 +const RATIO = 57638 +const VITESS_THROTTLER = 57639 +const BEGIN = 57640 +const START = 57641 +const TRANSACTION = 57642 +const COMMIT = 57643 +const ROLLBACK = 57644 +const SAVEPOINT = 57645 +const RELEASE = 57646 +const WORK = 57647 +const CONSISTENT = 57648 +const SNAPSHOT = 57649 +const BIT = 57650 +const TINYINT = 57651 +const SMALLINT = 57652 +const MEDIUMINT = 57653 +const INT = 57654 +const INTEGER = 57655 +const BIGINT = 57656 +const INTNUM = 57657 +const REAL = 57658 +const DOUBLE = 57659 +const FLOAT_TYPE = 57660 +const FLOAT4_TYPE = 57661 +const FLOAT8_TYPE = 57662 +const DECIMAL_TYPE = 57663 +const NUMERIC = 57664 +const TIME = 57665 +const TIMESTAMP = 57666 +const DATETIME = 57667 +const YEAR = 57668 +const CHAR = 57669 +const VARCHAR = 57670 +const BOOL = 57671 +const CHARACTER = 57672 +const VARBINARY = 57673 +const NCHAR = 57674 +const TEXT = 57675 +const TINYTEXT = 57676 +const MEDIUMTEXT = 57677 +const LONGTEXT = 57678 +const BLOB = 57679 +const TINYBLOB = 57680 +const MEDIUMBLOB = 57681 +const LONGBLOB = 57682 +const JSON = 57683 +const JSON_SCHEMA_VALID = 57684 +const JSON_SCHEMA_VALIDATION_REPORT = 57685 +const ENUM = 57686 +const GEOMETRY = 57687 +const POINT = 57688 +const LINESTRING = 57689 +const POLYGON = 57690 +const GEOMCOLLECTION = 57691 +const GEOMETRYCOLLECTION = 57692 +const MULTIPOINT = 57693 +const MULTILINESTRING = 57694 +const MULTIPOLYGON = 57695 +const ASCII = 57696 +const UNICODE = 57697 +const NULLX = 57698 +const AUTO_INCREMENT = 57699 +const APPROXNUM = 57700 +const SIGNED = 57701 +const UNSIGNED = 57702 +const ZEROFILL = 57703 +const PURGE = 57704 +const BEFORE = 57705 +const CODE = 57706 +const COLLATION = 57707 +const COLUMNS = 57708 +const DATABASES = 57709 +const ENGINES = 57710 +const EVENT = 57711 +const EXTENDED = 57712 +const FIELDS = 57713 +const FULL = 57714 +const FUNCTION = 57715 +const GTID_EXECUTED = 57716 +const KEYSPACES = 57717 +const OPEN = 57718 +const PLUGINS = 57719 +const PRIVILEGES = 57720 +const PROCESSLIST = 57721 +const SCHEMAS = 57722 +const TABLES = 57723 +const TRIGGERS = 57724 +const USER = 57725 +const VGTID_EXECUTED = 57726 +const VITESS_KEYSPACES = 57727 +const VITESS_METADATA = 57728 +const VITESS_MIGRATIONS = 57729 +const VITESS_REPLICATION_STATUS = 57730 +const VITESS_SHARDS = 57731 +const VITESS_TABLETS = 57732 +const VITESS_TARGET = 57733 +const VSCHEMA = 57734 +const VITESS_THROTTLED_APPS = 57735 +const NAMES = 57736 +const GLOBAL = 57737 +const SESSION = 57738 +const ISOLATION = 57739 +const LEVEL = 57740 +const READ = 57741 +const WRITE = 57742 +const ONLY = 57743 +const REPEATABLE = 57744 +const COMMITTED = 57745 +const UNCOMMITTED = 57746 +const SERIALIZABLE = 57747 +const ADDDATE = 57748 +const CURRENT_TIMESTAMP = 57749 +const DATABASE = 57750 +const CURRENT_DATE = 57751 +const CURDATE = 57752 +const DATE_ADD = 57753 +const DATE_SUB = 57754 +const NOW = 57755 +const SUBDATE = 57756 +const CURTIME = 57757 +const CURRENT_TIME = 57758 +const LOCALTIME = 57759 +const LOCALTIMESTAMP = 57760 +const CURRENT_USER = 57761 +const UTC_DATE = 57762 +const UTC_TIME = 57763 +const UTC_TIMESTAMP = 57764 +const SYSDATE = 57765 +const DAY = 57766 +const DAY_HOUR = 57767 +const DAY_MICROSECOND = 57768 +const DAY_MINUTE = 57769 +const DAY_SECOND = 57770 +const HOUR = 57771 +const HOUR_MICROSECOND = 57772 +const HOUR_MINUTE = 57773 +const HOUR_SECOND = 57774 +const MICROSECOND = 57775 +const MINUTE = 57776 +const MINUTE_MICROSECOND = 57777 +const MINUTE_SECOND = 57778 +const MONTH = 57779 +const QUARTER = 57780 +const SECOND = 57781 +const SECOND_MICROSECOND = 57782 +const YEAR_MONTH = 57783 +const WEEK = 57784 +const SQL_TSI_DAY = 57785 +const SQL_TSI_WEEK = 57786 +const SQL_TSI_HOUR = 57787 +const SQL_TSI_MINUTE = 57788 +const SQL_TSI_MONTH = 57789 +const SQL_TSI_QUARTER = 57790 +const SQL_TSI_SECOND = 57791 +const SQL_TSI_MICROSECOND = 57792 +const SQL_TSI_YEAR = 57793 +const REPLACE = 57794 +const CONVERT = 57795 +const CAST = 57796 +const SUBSTR = 57797 +const SUBSTRING = 57798 +const SEPARATOR = 57799 +const TIMESTAMPADD = 57800 +const TIMESTAMPDIFF = 57801 +const WEIGHT_STRING = 57802 +const LTRIM = 57803 +const RTRIM = 57804 +const TRIM = 57805 +const JSON_ARRAY = 57806 +const JSON_OBJECT = 57807 +const JSON_QUOTE = 57808 +const JSON_DEPTH = 57809 +const JSON_TYPE = 57810 +const JSON_LENGTH = 57811 +const JSON_VALID = 57812 +const JSON_ARRAY_APPEND = 57813 +const JSON_ARRAY_INSERT = 57814 +const JSON_INSERT = 57815 +const JSON_MERGE = 57816 +const JSON_MERGE_PATCH = 57817 +const JSON_MERGE_PRESERVE = 57818 +const JSON_REMOVE = 57819 +const JSON_REPLACE = 57820 +const JSON_SET = 57821 +const JSON_UNQUOTE = 57822 +const COUNT = 57823 +const AVG = 57824 +const MAX = 57825 +const MIN = 57826 +const SUM = 57827 +const GROUP_CONCAT = 57828 +const BIT_AND = 57829 +const BIT_OR = 57830 +const BIT_XOR = 57831 +const STD = 57832 +const STDDEV = 57833 +const STDDEV_POP = 57834 +const STDDEV_SAMP = 57835 +const VAR_POP = 57836 +const VAR_SAMP = 57837 +const VARIANCE = 57838 +const ANY_VALUE = 57839 +const REGEXP_INSTR = 57840 +const REGEXP_LIKE = 57841 +const REGEXP_REPLACE = 57842 +const REGEXP_SUBSTR = 57843 +const ExtractValue = 57844 +const UpdateXML = 57845 +const GET_LOCK = 57846 +const RELEASE_LOCK = 57847 +const RELEASE_ALL_LOCKS = 57848 +const IS_FREE_LOCK = 57849 +const IS_USED_LOCK = 57850 +const LOCATE = 57851 +const POSITION = 57852 +const ST_GeometryCollectionFromText = 57853 +const ST_GeometryFromText = 57854 +const ST_LineStringFromText = 57855 +const ST_MultiLineStringFromText = 57856 +const ST_MultiPointFromText = 57857 +const ST_MultiPolygonFromText = 57858 +const ST_PointFromText = 57859 +const ST_PolygonFromText = 57860 +const ST_GeometryCollectionFromWKB = 57861 +const ST_GeometryFromWKB = 57862 +const ST_LineStringFromWKB = 57863 +const ST_MultiLineStringFromWKB = 57864 +const ST_MultiPointFromWKB = 57865 +const ST_MultiPolygonFromWKB = 57866 +const ST_PointFromWKB = 57867 +const ST_PolygonFromWKB = 57868 +const ST_AsBinary = 57869 +const ST_AsText = 57870 +const ST_Dimension = 57871 +const ST_Envelope = 57872 +const ST_IsSimple = 57873 +const ST_IsEmpty = 57874 +const ST_GeometryType = 57875 +const ST_X = 57876 +const ST_Y = 57877 +const ST_Latitude = 57878 +const ST_Longitude = 57879 +const ST_EndPoint = 57880 +const ST_IsClosed = 57881 +const ST_Length = 57882 +const ST_NumPoints = 57883 +const ST_StartPoint = 57884 +const ST_PointN = 57885 +const ST_Area = 57886 +const ST_Centroid = 57887 +const ST_ExteriorRing = 57888 +const ST_InteriorRingN = 57889 +const ST_NumInteriorRings = 57890 +const ST_NumGeometries = 57891 +const ST_GeometryN = 57892 +const ST_LongFromGeoHash = 57893 +const ST_PointFromGeoHash = 57894 +const ST_LatFromGeoHash = 57895 +const ST_GeoHash = 57896 +const ST_AsGeoJSON = 57897 +const ST_GeomFromGeoJSON = 57898 +const MATCH = 57899 +const AGAINST = 57900 +const BOOLEAN = 57901 +const LANGUAGE = 57902 +const WITH = 57903 +const QUERY = 57904 +const EXPANSION = 57905 +const WITHOUT = 57906 +const VALIDATION = 57907 +const UNUSED = 57908 +const ARRAY = 57909 +const BYTE = 57910 +const CUME_DIST = 57911 +const DESCRIPTION = 57912 +const DENSE_RANK = 57913 +const EMPTY = 57914 +const EXCEPT = 57915 +const FIRST_VALUE = 57916 +const GROUPING = 57917 +const GROUPS = 57918 +const JSON_TABLE = 57919 +const LAG = 57920 +const LAST_VALUE = 57921 +const LATERAL = 57922 +const LEAD = 57923 +const NTH_VALUE = 57924 +const NTILE = 57925 +const OF = 57926 +const OVER = 57927 +const PERCENT_RANK = 57928 +const RANK = 57929 +const RECURSIVE = 57930 +const ROW_NUMBER = 57931 +const SYSTEM = 57932 +const WINDOW = 57933 +const ACTIVE = 57934 +const ADMIN = 57935 +const AUTOEXTEND_SIZE = 57936 +const BUCKETS = 57937 +const CLONE = 57938 +const COLUMN_FORMAT = 57939 +const COMPONENT = 57940 +const DEFINITION = 57941 +const ENFORCED = 57942 +const ENGINE_ATTRIBUTE = 57943 +const EXCLUDE = 57944 +const FOLLOWING = 57945 +const GET_MASTER_PUBLIC_KEY = 57946 +const HISTOGRAM = 57947 +const HISTORY = 57948 +const INACTIVE = 57949 +const INVISIBLE = 57950 +const LOCKED = 57951 +const MASTER_COMPRESSION_ALGORITHMS = 57952 +const MASTER_PUBLIC_KEY_PATH = 57953 +const MASTER_TLS_CIPHERSUITES = 57954 +const MASTER_ZSTD_COMPRESSION_LEVEL = 57955 +const NESTED = 57956 +const NETWORK_NAMESPACE = 57957 +const NOWAIT = 57958 +const NULLS = 57959 +const OJ = 57960 +const OLD = 57961 +const OPTIONAL = 57962 +const ORDINALITY = 57963 +const ORGANIZATION = 57964 +const OTHERS = 57965 +const PARTIAL = 57966 +const PATH = 57967 +const PERSIST = 57968 +const PERSIST_ONLY = 57969 +const PRECEDING = 57970 +const PRIVILEGE_CHECKS_USER = 57971 +const PROCESS = 57972 +const RANDOM = 57973 +const REFERENCE = 57974 +const REQUIRE_ROW_FORMAT = 57975 +const RESOURCE = 57976 +const RESPECT = 57977 +const RESTART = 57978 +const RETAIN = 57979 +const REUSE = 57980 +const ROLE = 57981 +const SECONDARY = 57982 +const SECONDARY_ENGINE = 57983 +const SECONDARY_ENGINE_ATTRIBUTE = 57984 +const SECONDARY_LOAD = 57985 +const SECONDARY_UNLOAD = 57986 +const SIMPLE = 57987 +const SKIP = 57988 +const SRID = 57989 +const THREAD_PRIORITY = 57990 +const TIES = 57991 +const UNBOUNDED = 57992 +const VCPU = 57993 +const VISIBLE = 57994 +const RETURNING = 57995 +const FORMAT_BYTES = 57996 +const FORMAT_PICO_TIME = 57997 +const PS_CURRENT_THREAD_ID = 57998 +const PS_THREAD_ID = 57999 +const GTID_SUBSET = 58000 +const GTID_SUBTRACT = 58001 +const WAIT_FOR_EXECUTED_GTID_SET = 58002 +const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 58003 +const FORMAT = 58004 +const TREE = 58005 +const VITESS = 58006 +const TRADITIONAL = 58007 +const VTEXPLAIN = 58008 +const VEXPLAIN = 58009 +const PLAN = 58010 +const LOCAL = 58011 +const LOW_PRIORITY = 58012 +const NO_WRITE_TO_BINLOG = 58013 +const LOGS = 58014 +const ERROR = 58015 +const GENERAL = 58016 +const HOSTS = 58017 +const OPTIMIZER_COSTS = 58018 +const USER_RESOURCES = 58019 +const SLOW = 58020 +const CHANNEL = 58021 +const RELAY = 58022 +const EXPORT = 58023 +const CURRENT = 58024 +const ROW = 58025 +const ROWS = 58026 +const AVG_ROW_LENGTH = 58027 +const CONNECTION = 58028 +const CHECKSUM = 58029 +const DELAY_KEY_WRITE = 58030 +const ENCRYPTION = 58031 +const ENGINE = 58032 +const INSERT_METHOD = 58033 +const MAX_ROWS = 58034 +const MIN_ROWS = 58035 +const PACK_KEYS = 58036 +const PASSWORD = 58037 +const FIXED = 58038 +const DYNAMIC = 58039 +const COMPRESSED = 58040 +const REDUNDANT = 58041 +const COMPACT = 58042 +const ROW_FORMAT = 58043 +const STATS_AUTO_RECALC = 58044 +const STATS_PERSISTENT = 58045 +const STATS_SAMPLE_PAGES = 58046 +const STORAGE = 58047 +const MEMORY = 58048 +const DISK = 58049 +const PARTITIONS = 58050 +const LINEAR = 58051 +const RANGE = 58052 +const LIST = 58053 +const SUBPARTITION = 58054 +const SUBPARTITIONS = 58055 +const HASH = 58056 var yyToknames = [...]string{ "$end", @@ -823,6 +878,7 @@ var yyToknames = [...]string{ "BOTH", "LEADING", "TRAILING", + "KILL", "EMPTY_FROM_CLAUSE", "LOWER_THAN_CHARSET", "CHARSET", @@ -853,6 +909,7 @@ var yyToknames = [...]string{ "REGEXP", "RLIKE", "IN", + "ASSIGNMENT_OPT", "'&'", "SHIFT_LEFT", "SHIFT_RIGHT", @@ -1067,6 +1124,8 @@ var yyToknames = [...]string{ "SIGNED", "UNSIGNED", "ZEROFILL", + "PURGE", + "BEFORE", "CODE", "COLLATION", "COLUMNS", @@ -1109,10 +1168,16 @@ var yyToknames = [...]string{ "COMMITTED", "UNCOMMITTED", "SERIALIZABLE", + "ADDDATE", "CURRENT_TIMESTAMP", "DATABASE", "CURRENT_DATE", + "CURDATE", + "DATE_ADD", + "DATE_SUB", "NOW", + "SUBDATE", + "CURTIME", "CURRENT_TIME", "LOCALTIME", "LOCALTIMESTAMP", @@ -1120,6 +1185,7 @@ var yyToknames = [...]string{ "UTC_DATE", "UTC_TIME", "UTC_TIMESTAMP", + "SYSDATE", "DAY", "DAY_HOUR", "DAY_MICROSECOND", @@ -1139,6 +1205,15 @@ var yyToknames = [...]string{ "SECOND_MICROSECOND", "YEAR_MONTH", "WEEK", + "SQL_TSI_DAY", + "SQL_TSI_WEEK", + "SQL_TSI_HOUR", + "SQL_TSI_MINUTE", + "SQL_TSI_MONTH", + "SQL_TSI_QUARTER", + "SQL_TSI_SECOND", + "SQL_TSI_MICROSECOND", + "SQL_TSI_YEAR", "REPLACE", "CONVERT", "CAST", @@ -1184,6 +1259,7 @@ var yyToknames = [...]string{ "VAR_POP", "VAR_SAMP", "VARIANCE", + "ANY_VALUE", "REGEXP_INSTR", "REGEXP_LIKE", "REGEXP_REPLACE", @@ -1197,6 +1273,52 @@ var yyToknames = [...]string{ "IS_USED_LOCK", "LOCATE", "POSITION", + "ST_GeometryCollectionFromText", + "ST_GeometryFromText", + "ST_LineStringFromText", + "ST_MultiLineStringFromText", + "ST_MultiPointFromText", + "ST_MultiPolygonFromText", + "ST_PointFromText", + "ST_PolygonFromText", + "ST_GeometryCollectionFromWKB", + "ST_GeometryFromWKB", + "ST_LineStringFromWKB", + "ST_MultiLineStringFromWKB", + "ST_MultiPointFromWKB", + "ST_MultiPolygonFromWKB", + "ST_PointFromWKB", + "ST_PolygonFromWKB", + "ST_AsBinary", + "ST_AsText", + "ST_Dimension", + "ST_Envelope", + "ST_IsSimple", + "ST_IsEmpty", + "ST_GeometryType", + "ST_X", + "ST_Y", + "ST_Latitude", + "ST_Longitude", + "ST_EndPoint", + "ST_IsClosed", + "ST_Length", + "ST_NumPoints", + "ST_StartPoint", + "ST_PointN", + "ST_Area", + "ST_Centroid", + "ST_ExteriorRing", + "ST_InteriorRingN", + "ST_NumInteriorRings", + "ST_NumGeometries", + "ST_GeometryN", + "ST_LongFromGeoHash", + "ST_PointFromGeoHash", + "ST_LatFromGeoHash", + "ST_GeoHash", + "ST_AsGeoJSON", + "ST_GeomFromGeoJSON", "MATCH", "AGAINST", "BOOLEAN", @@ -1370,4939 +1492,5719 @@ var yyExca = [...]int{ 1, -1, -2, 0, -1, 2, - 13, 49, - 14, 49, - -2, 38, - -1, 50, - 1, 157, - 665, 157, - -2, 165, - -1, 51, - 135, 165, - 176, 165, - 345, 165, - -2, 519, - -1, 58, - 36, 766, - 239, 766, - 250, 766, - 285, 780, - 286, 780, - -2, 768, - -1, 63, - 241, 804, - -2, 802, - -1, 118, - 238, 1459, - -2, 131, - -1, 120, - 1, 158, - 665, 158, - -2, 165, - -1, 131, - 136, 405, - 244, 405, - -2, 508, - -1, 150, - 135, 165, - 176, 165, - 345, 165, - -2, 528, - -1, 813, - 87, 1476, - -2, 1330, - -1, 814, - 87, 1477, - 221, 1481, - -2, 1331, - -1, 815, - 221, 1480, + 13, 51, + 14, 51, -2, 40, - -1, 895, - 60, 878, - -2, 893, - -1, 981, - 249, 41, - 254, 41, - -2, 416, - -1, 1066, - 1, 576, - 665, 576, - -2, 165, - -1, 1365, - 221, 1481, - -2, 1331, - -1, 1516, - 60, 879, - -2, 898, - -1, 1517, - 60, 880, + -1, 52, + 1, 159, + 732, 159, + -2, 167, + -1, 53, + 136, 167, + 178, 167, + 347, 167, + -2, 523, + -1, 61, + 36, 772, + 241, 772, + 252, 772, + 287, 786, + 288, 786, + -2, 774, + -1, 66, + 243, 810, + -2, 808, + -1, 122, + 240, 1585, + -2, 133, + -1, 124, + 1, 160, + 732, 160, + -2, 167, + -1, 135, + 137, 408, + 246, 408, + -2, 512, + -1, 154, + 136, 167, + 178, 167, + 347, 167, + -2, 532, + -1, 733, + 164, 41, + -2, 45, + -1, 939, + 87, 1602, + -2, 1456, + -1, 940, + 87, 1603, + 223, 1607, + -2, 1457, + -1, 941, + 223, 1606, + -2, 42, + -1, 1024, + 60, 884, -2, 899, - -1, 1568, - 135, 165, - 176, 165, - 345, 165, - -2, 455, - -1, 1649, - 136, 405, - 244, 405, - -2, 508, - -1, 1658, - 249, 42, - 254, 42, - -2, 417, - -1, 2020, - 221, 1485, - -2, 1479, - -1, 2021, - 221, 1481, - -2, 1477, - -1, 2121, - 135, 165, - 176, 165, - 345, 165, - -2, 456, - -1, 2128, - 26, 186, - -2, 188, - -1, 2498, - 78, 96, - 88, 96, - -2, 957, - -1, 2567, - 640, 692, - -2, 666, - -1, 2735, - 50, 1427, - -2, 1421, - -1, 3392, - 640, 692, - -2, 680, - -1, 3480, - 90, 624, - 95, 624, - 105, 624, - 178, 624, - 179, 624, - 180, 624, - 181, 624, - 182, 624, - 183, 624, - 184, 624, - 185, 624, - 186, 624, - 187, 624, - 188, 624, - 189, 624, - 190, 624, - 191, 624, - 192, 624, - 193, 624, - 194, 624, - 195, 624, - 196, 624, - 197, 624, - 198, 624, - 199, 624, - 200, 624, - 201, 624, - 202, 624, - 203, 624, - 204, 624, - 205, 624, - 206, 624, - 207, 624, - 208, 624, - 209, 624, - 210, 624, - 211, 624, - 212, 624, - 213, 624, - 214, 624, - 215, 624, - 216, 624, - 217, 624, - 218, 624, - 219, 624, - -2, 1838, + -1, 1111, + 251, 43, + 256, 43, + -2, 419, + -1, 1196, + 1, 580, + 732, 580, + -2, 167, + -1, 1498, + 223, 1607, + -2, 1457, + -1, 1707, + 60, 885, + -2, 904, + -1, 1708, + 60, 886, + -2, 905, + -1, 1759, + 136, 167, + 178, 167, + 347, 167, + -2, 458, + -1, 1840, + 137, 408, + 246, 408, + -2, 512, + -1, 1849, + 251, 44, + 256, 44, + -2, 420, + -1, 2286, + 223, 1611, + -2, 1605, + -1, 2287, + 223, 1607, + -2, 1603, + -1, 2387, + 136, 167, + 178, 167, + 347, 167, + -2, 459, + -1, 2394, + 26, 188, + -2, 190, + -1, 2847, + 78, 98, + 88, 98, + -2, 963, + -1, 2916, + 707, 696, + -2, 670, + -1, 3123, + 50, 1553, + -2, 1547, + -1, 3937, + 707, 696, + -2, 684, + -1, 4024, + 90, 628, + 95, 628, + 105, 628, + 180, 628, + 181, 628, + 182, 628, + 183, 628, + 184, 628, + 185, 628, + 186, 628, + 187, 628, + 188, 628, + 189, 628, + 190, 628, + 191, 628, + 192, 628, + 193, 628, + 194, 628, + 195, 628, + 196, 628, + 197, 628, + 198, 628, + 199, 628, + 200, 628, + 201, 628, + 202, 628, + 203, 628, + 204, 628, + 205, 628, + 206, 628, + 207, 628, + 208, 628, + 209, 628, + 210, 628, + 211, 628, + 212, 628, + 213, 628, + 214, 628, + 215, 628, + 216, 628, + 217, 628, + 218, 628, + 219, 628, + 220, 628, + 221, 628, + -2, 1974, } const yyPrivate = 57344 -const yyLast = 47490 +const yyLast = 55209 var yyAct = [...]int{ - 1524, 829, 3137, 824, 83, 3138, 3551, 3136, 816, 1884, - 3562, 3373, 817, 684, 3457, 3520, 2118, 3521, 2964, 3478, - 1571, 3107, 3423, 2787, 3446, 2885, 2794, 3357, 3305, 1830, - 2836, 2049, 5, 2845, 2850, 2748, 2847, 2846, 783, 2844, - 1130, 40, 3355, 3094, 2849, 2848, 2802, 2398, 2051, 2192, - 1531, 666, 2752, 2865, 2864, 2471, 2749, 3003, 2628, 3171, - 2997, 2073, 778, 777, 2089, 694, 2867, 3023, 2746, 2736, - 2458, 2092, 2989, 2011, 2155, 662, 2564, 2533, 2223, 2180, - 1013, 2069, 2532, 893, 2612, 83, 1627, 663, 779, 911, - 2891, 2751, 2160, 2534, 943, 2106, 912, 2483, 890, 2093, - 39, 1132, 2094, 41, 893, 1518, 784, 2450, 3166, 2434, - 664, 2464, 1880, 1899, 3345, 2432, 1838, 159, 2016, 953, - 892, 2604, 896, 2008, 1979, 1674, 2201, 145, 2179, 2081, - 1656, 2240, 2162, 2525, 2694, 971, 1560, 100, 2500, 101, - 1540, 914, 96, 1903, 888, 676, 2096, 1498, 1377, 1857, - 1305, 1290, 1107, 976, 1777, 1663, 950, 947, 1494, 989, - 1773, 982, 2177, 951, 979, 2151, 1755, 977, 978, 1559, - 2152, 1545, 671, 929, 2074, 931, 902, 2017, 1975, 1361, - 95, 1829, 1337, 103, 897, 899, 1128, 898, 81, 1121, - 1622, 128, 1782, 129, 1912, 1062, 900, 80, 163, 123, - 121, 122, 89, 102, 924, 653, 670, 94, 3552, 3095, - 1385, 1381, 2194, 2195, 2196, 2194, 2833, 2555, 1648, 2587, - 2586, 919, 923, 2238, 3408, 2855, 1740, 3087, 1015, 3504, - 658, 3050, 1978, 598, 91, 91, 2620, 2621, 905, 3141, - 3404, 1032, 1033, 1034, 3403, 1037, 1038, 1039, 1040, 124, - 3409, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, - 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1018, 944, - 130, 3382, 2557, 2855, 91, 654, 1078, 906, 3141, 891, - 2, 2853, 2046, 2047, 889, 938, 2852, 939, 638, 1845, - 632, 1844, 993, 1860, 638, 1843, 1525, 992, 1842, 968, - 937, 781, 782, 1841, 1840, 913, 1813, 2859, 656, 3499, - 657, 2283, 2430, 186, 1026, 91, 1019, 1022, 1023, 124, - 967, 966, 965, 107, 108, 109, 3140, 112, 1301, 2853, - 118, 969, 632, 187, 2227, 2732, 593, 125, 1035, 937, - 781, 782, 3524, 955, 632, 3404, 3508, 651, 652, 3572, - 168, 2577, 2460, 2698, 3506, 2859, 1322, 884, 885, 886, - 887, 3519, 3542, 895, 2969, 3140, 2968, 960, 833, 834, - 835, 3507, 3488, 2580, 629, 2225, 3358, 2399, 2226, 3505, - 833, 834, 835, 632, 1850, 2910, 3301, 124, 3300, 1017, - 3486, 926, 927, 2514, 3100, 1016, 3502, 3101, 3533, 3492, - 3493, 3311, 3119, 1292, 82, 2292, 165, 2804, 2805, 166, - 3108, 3447, 1306, 3454, 3487, 2220, 3310, 3458, 3118, 2856, - 3387, 1889, 614, 82, 3483, 2930, 1637, 2431, 964, 82, - 1071, 1072, 185, 2112, 82, 612, 2509, 84, 2783, 2508, - 2113, 2114, 2510, 2594, 2595, 2784, 2785, 2474, 1822, 1823, - 1561, 2619, 1562, 2171, 2289, 2603, 1085, 1097, 882, 881, - 1125, 1086, 1074, 1085, 1306, 3374, 1098, 2856, 1086, 1084, - 2290, 1083, 2475, 1091, 2521, 609, 2165, 1114, 3184, 1116, - 2131, 2130, 91, 2281, 624, 2918, 962, 2467, 2468, 2662, - 2048, 1319, 3464, 1320, 1321, 2916, 1821, 2887, 646, 619, - 633, 91, 1291, 2558, 3464, 1730, 2605, 91, 1825, 622, - 1102, 1103, 91, 650, 1502, 2803, 644, 1113, 1115, 3000, - 1316, 2892, 2565, 3333, 930, 3334, 2202, 2806, 2590, 2246, - 3554, 3525, 2253, 2249, 2251, 2252, 2250, 2254, 2255, 2261, - 1302, 2262, 633, 2263, 169, 2241, 1557, 1756, 2880, 1731, - 1118, 1732, 3526, 175, 633, 1061, 2881, 1067, 1099, 632, - 1100, 1101, 1123, 1106, 2607, 1092, 2245, 3089, 2284, 2285, - 2287, 2286, 1316, 1124, 3088, 599, 2264, 601, 615, 1042, - 635, 1041, 634, 605, 2247, 603, 607, 616, 608, 2888, - 602, 2889, 613, 633, 2243, 604, 617, 618, 621, 625, - 626, 627, 623, 620, 3285, 611, 636, 2205, 2244, 972, - 3145, 963, 1104, 973, 2090, 973, 940, 934, 932, 1338, - 1011, 1111, 1105, 2164, 1010, 1112, 1312, 1505, 632, 1304, - 1129, 1009, 1129, 1129, 1008, 1117, 1007, 1006, 1005, 1004, - 3500, 2907, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1347, - 1346, 1348, 1349, 999, 2663, 940, 934, 932, 1641, 1110, - 1012, 2806, 3573, 984, 948, 1774, 948, 2290, 946, 1036, - 3531, 985, 1002, 2178, 1000, 948, 925, 160, 1312, 2608, - 893, 1362, 1367, 1368, 2231, 1371, 3085, 1372, 1374, 1375, - 1376, 2230, 1379, 1380, 1382, 1382, 2697, 1382, 1386, 1386, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, 1406, 1407, - 1408, 1409, 1410, 1411, 1412, 1413, 1414, 1415, 1416, 1417, - 1418, 1419, 1420, 1421, 1422, 1423, 1424, 1425, 1426, 1427, - 1428, 1429, 1430, 1431, 1432, 1433, 1434, 1435, 1436, 1437, - 1438, 1439, 1440, 1441, 1442, 1443, 1444, 1445, 1446, 1447, - 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1363, 633, - 1558, 1119, 1456, 1373, 1458, 1459, 1460, 1461, 1462, 2592, - 830, 830, 2224, 1355, 1356, 1357, 1358, 1386, 1386, 1386, - 1386, 1386, 632, 1369, 970, 2559, 1761, 1770, 3381, 2556, - 3084, 1468, 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, - 1477, 1478, 1479, 1480, 1481, 3001, 637, 1284, 1285, 1003, - 830, 1001, 2857, 2858, 1359, 1283, 1065, 1095, 933, 3491, - 3139, 2614, 1495, 1293, 2077, 2861, 2613, 630, 633, 2611, - 1742, 1741, 1743, 1744, 1745, 3048, 3049, 1029, 1352, 161, - 632, 1028, 631, 3117, 2826, 1662, 173, 1311, 1308, 1309, - 1310, 1315, 1317, 1314, 991, 1313, 1352, 933, 1492, 3139, - 2857, 2858, 3460, 3490, 85, 1307, 1081, 2579, 1087, 1088, - 1089, 1090, 3566, 2861, 3460, 1501, 2168, 2589, 2435, 2437, - 1021, 893, 2077, 2291, 1635, 893, 984, 181, 1526, 1528, - 1020, 893, 1126, 1127, 3459, 1383, 90, 1384, 1387, 1311, - 1308, 1309, 1310, 1315, 1317, 1314, 3459, 1313, 1634, 1633, - 2575, 2578, 1300, 1771, 1631, 90, 2169, 1307, 1508, 597, - 1493, 90, 1512, 2167, 2624, 592, 90, 1070, 892, 3370, - 162, 167, 164, 170, 171, 172, 174, 176, 177, 178, - 179, 959, 2304, 1082, 961, 2952, 180, 182, 183, 184, - 2222, 1073, 991, 1661, 3037, 2602, 990, 2170, 2601, 1353, - 1354, 2790, 120, 3019, 2505, 2470, 2407, 2166, 1506, 1892, - 1549, 964, 1060, 1457, 1076, 2465, 2119, 1352, 2782, 1510, - 2908, 1511, 1349, 100, 1493, 101, 1463, 1464, 1465, 1466, - 1467, 908, 633, 2614, 1122, 115, 3395, 1014, 2613, 1764, - 1108, 1762, 1763, 1080, 1765, 1766, 2791, 1760, 1342, 1343, - 1344, 1345, 1347, 1346, 1348, 1349, 2075, 2076, 1499, 3080, - 1094, 1486, 1783, 3013, 2242, 1834, 1509, 1767, 1563, 103, - 2793, 1096, 2648, 991, 1904, 1904, 1064, 2321, 2548, 1638, - 1639, 1640, 964, 3534, 956, 2436, 1321, 1913, 2788, 3180, - 633, 958, 957, 2214, 990, 3055, 1027, 1320, 1321, 991, - 1024, 1914, 3054, 2209, 1654, 2804, 2805, 116, 1527, 1671, - 991, 1668, 2789, 1670, 2075, 2076, 1496, 1660, 2219, 2214, - 2629, 2217, 1507, 1002, 1725, 889, 1000, 891, 1530, 3527, - 3574, 3564, 2218, 1647, 3565, 1322, 3563, 3038, 1703, 904, - 962, 1706, 3114, 1708, 3115, 2795, 1715, 1716, 1707, 1129, - 1666, 1953, 1721, 1722, 3425, 1676, 3568, 1677, 2216, 1679, - 1681, 1554, 1555, 1685, 1687, 1689, 1691, 1693, 1344, 1345, - 1347, 1346, 1348, 1349, 1665, 990, 3363, 1664, 1664, 1063, - 1630, 984, 987, 988, 2221, 948, 1079, 1109, 3293, 981, - 985, 1066, 3292, 3283, 963, 1757, 1645, 1758, 1643, 3426, - 1759, 990, 1657, 3130, 2631, 1513, 994, 984, 1779, 1784, - 980, 996, 990, 2803, 2013, 997, 995, 3575, 984, 987, - 988, 3364, 948, 1644, 3129, 2806, 981, 985, 991, 3062, - 1911, 2296, 2297, 2298, 1711, 3061, 998, 1852, 1854, 1855, - 1945, 1934, 1935, 1936, 1937, 1947, 1938, 1939, 1940, 1952, - 1948, 1941, 1942, 1949, 1950, 1951, 1943, 1944, 1946, 3051, - 2834, 1853, 1322, 2523, 1775, 963, 1785, 1786, 2822, 2530, - 1319, 1636, 1320, 1321, 2641, 2640, 2639, 1862, 1338, 2633, - 1790, 2637, 1750, 2632, 2529, 2630, 2528, 1797, 1798, 1799, - 2635, 1863, 1350, 1351, 1861, 124, 967, 966, 965, 2634, - 2360, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1347, 1346, - 1348, 1349, 2174, 1751, 1129, 1129, 1789, 2636, 2638, 1326, - 1327, 1328, 1329, 1330, 1331, 1332, 1324, 1748, 83, 2013, - 990, 83, 1525, 2010, 1810, 994, 984, 1322, 1811, 1909, - 996, 2792, 2012, 1735, 997, 995, 1749, 1737, 1910, 1734, - 1338, 1733, 1723, 1334, 638, 1335, 828, 1717, 1714, 1322, - 833, 834, 835, 1713, 1712, 40, 1683, 1322, 40, 1336, - 1350, 1351, 1333, 1339, 1340, 1341, 1342, 1343, 1344, 1345, - 1347, 1346, 1348, 1349, 1322, 2884, 2925, 1887, 1887, 1287, - 106, 1747, 1885, 1885, 1888, 1322, 1557, 1319, 1787, 1320, - 1321, 105, 1534, 104, 3528, 1791, 2650, 1793, 1794, 1795, - 1796, 1736, 99, 1907, 1800, 3390, 1322, 1908, 3045, 638, - 3389, 3538, 1525, 2456, 3553, 106, 1812, 2356, 2512, 638, - 3367, 1856, 2190, 2189, 2188, 2187, 105, 3366, 104, 1322, - 3365, 1492, 3288, 3536, 1525, 3012, 2186, 2185, 1535, 3272, - 2318, 1971, 1525, 1865, 3271, 1867, 1868, 1869, 1870, 1871, - 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 3470, 1525, - 1858, 1525, 1319, 3179, 1320, 1321, 3177, 1866, 3126, 3468, - 1525, 1363, 2006, 1339, 1340, 1341, 1342, 1343, 1344, 1345, - 1347, 1346, 1348, 1349, 1319, 1322, 1320, 1321, 1905, 1818, - 1819, 1525, 1319, 1493, 1320, 1321, 1525, 2035, 1322, 1859, - 1491, 1835, 81, 1490, 1322, 81, 2796, 3515, 1525, 1319, - 2800, 1320, 1321, 2317, 1526, 2042, 1322, 2799, 2018, 1489, - 1319, 3059, 1320, 1321, 1967, 1340, 1341, 1342, 1343, 1344, - 1345, 1347, 1346, 1348, 1349, 1322, 2009, 1864, 1318, 1525, - 1322, 1319, 3044, 1320, 1321, 2893, 97, 2890, 2066, 2825, - 186, 2801, 2824, 99, 2456, 3453, 2797, 98, 2539, 2082, - 2083, 2798, 1891, 2526, 1319, 1488, 1320, 1321, 2236, 3466, - 1525, 1525, 1898, 1900, 125, 2358, 2098, 1322, 2235, 2020, - 2456, 3433, 3342, 1525, 3383, 1318, 1525, 168, 3340, 1525, - 2456, 3429, 3319, 1965, 3416, 1525, 3098, 3380, 2023, 2024, - 3337, 1525, 3318, 1976, 1915, 1916, 1917, 1918, 2128, 100, - 2018, 101, 1525, 2100, 3296, 1525, 2456, 3284, 1929, 2059, - 1319, 2060, 1320, 1321, 3323, 1525, 1322, 2003, 2004, 100, - 2072, 101, 2019, 1319, 1322, 1320, 1321, 2054, 1322, 1319, - 99, 1320, 1321, 165, 2065, 1814, 166, 1322, 953, 1780, - 2175, 1319, 1746, 1320, 1321, 1738, 2022, 1322, 1728, 2025, - 2026, 2988, 1525, 3276, 2137, 2138, 2139, 2140, 97, 185, - 1319, 2020, 1320, 1321, 2043, 1319, 1724, 1320, 1321, 98, - 1976, 953, 1720, 2132, 2123, 2133, 2134, 2135, 2136, 1719, - 905, 1718, 2104, 1322, 2041, 3098, 1525, 2456, 3096, 1525, - 2053, 2143, 2144, 2145, 2146, 1536, 2122, 1120, 1322, 3275, - 2981, 1525, 1319, 2064, 1320, 1321, 2157, 3106, 2978, 1525, - 2214, 1525, 2976, 1525, 2087, 2163, 2067, 3017, 1525, 2203, - 105, 2940, 1525, 2566, 1525, 2369, 1525, 2472, 2085, 2815, - 2814, 2923, 1525, 2126, 2812, 2813, 2810, 2811, 2110, 938, - 2109, 939, 2108, 2810, 2809, 2480, 1525, 2125, 2124, 2290, - 2588, 1319, 2472, 1320, 1321, 1626, 2569, 2544, 2200, 1319, - 2479, 1320, 1321, 1319, 2127, 1320, 1321, 2173, 1525, 2562, - 2563, 169, 1319, 1318, 1320, 1321, 3014, 1322, 2456, 2455, - 175, 3421, 1319, 3529, 1320, 1321, 2314, 1525, 2837, 1318, - 2158, 1322, 1890, 1525, 2747, 2154, 2147, 2149, 2150, 2480, - 2172, 2208, 1626, 1625, 2211, 3012, 2212, 2176, 3394, 2184, - 1322, 2228, 1569, 1568, 1322, 2480, 2456, 2480, 1319, 1322, - 1320, 1321, 993, 1338, 3012, 2158, 2207, 992, 2210, 2206, - 2973, 2777, 1664, 1319, 2232, 1320, 1321, 2501, 2233, 2234, - 1338, 2290, 2623, 2812, 2229, 2501, 1339, 1340, 1341, 1342, - 1343, 1344, 1345, 1347, 1346, 1348, 1349, 2720, 1338, 2215, - 2303, 3314, 2295, 1339, 1340, 1341, 1342, 1343, 1344, 1345, - 1347, 1346, 1348, 1349, 1322, 2427, 1525, 2452, 2111, 2314, - 2239, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1347, 1346, - 1348, 1349, 99, 2311, 2425, 1525, 2369, 2345, 2404, 1525, - 2502, 2305, 2307, 2310, 160, 1322, 2312, 2344, 2502, 2315, - 2504, 2316, 1319, 2214, 1320, 1321, 2323, 2214, 2290, 2197, - 2325, 2326, 2327, 1322, 2080, 1529, 1319, 2044, 1320, 1321, - 2333, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, - 1890, 1836, 2267, 2308, 1820, 1319, 1769, 1320, 1321, 1319, - 2314, 1320, 1321, 1556, 1319, 1322, 1320, 1321, 2384, 1525, - 2300, 975, 2302, 894, 974, 91, 2349, 2350, 2351, 2352, - 2353, 1322, 2355, 1538, 3496, 3436, 2357, 3307, 1858, 1532, - 2362, 2363, 2280, 2364, 2301, 1322, 2367, 3273, 2368, 2306, - 3378, 3191, 2371, 1322, 3079, 3076, 2375, 2288, 1322, 3057, - 2380, 2381, 2382, 2383, 2020, 1322, 2536, 2376, 1525, 1319, - 2935, 1320, 1321, 2394, 2395, 2396, 1322, 1859, 2400, 2401, - 1322, 2299, 2934, 1628, 2156, 2882, 2403, 2405, 1322, 2839, - 2406, 2835, 2570, 2408, 2409, 2410, 2411, 2412, 1065, 1537, - 1319, 91, 1320, 1321, 2419, 2420, 2153, 2421, 2148, 2142, - 2424, 2426, 2066, 1322, 2428, 3032, 2320, 2019, 1319, 2141, - 1320, 1321, 2438, 1753, 2440, 3063, 1659, 1655, 2886, 1887, - 3280, 1624, 2274, 2275, 1885, 2441, 161, 2277, 3081, 117, - 893, 2328, 2983, 173, 3308, 1322, 2278, 3024, 3025, 2979, - 1319, 2171, 1320, 1321, 1816, 2057, 3548, 1322, 2343, 3546, - 2950, 2477, 2478, 1322, 2946, 3522, 1319, 2348, 1320, 1321, - 2098, 2439, 2932, 893, 2497, 1322, 3064, 3065, 3066, 2354, - 1319, 3402, 1320, 1321, 181, 3328, 3030, 1322, 1319, 2476, - 1320, 1321, 1322, 1319, 3027, 1320, 1321, 2931, 40, 2442, - 1319, 2444, 1320, 1321, 2457, 1322, 2535, 2494, 3067, 2831, - 2496, 1319, 2830, 1320, 1321, 1319, 1817, 1320, 1321, 1322, - 2829, 1699, 2747, 1319, 1322, 1320, 1321, 162, 167, 164, - 170, 171, 172, 174, 176, 177, 178, 179, 2549, 2268, - 2495, 2453, 2531, 180, 182, 183, 184, 2928, 1319, 1322, - 1320, 1321, 3029, 1322, 2536, 3068, 3069, 3070, 2466, 2423, - 2766, 1499, 2429, 2769, 659, 2522, 2524, 2765, 2770, 1493, - 2767, 2422, 1700, 1701, 1702, 2768, 2418, 2449, 1533, 1322, - 1319, 3398, 1320, 1321, 2454, 3309, 1322, 2574, 2071, 2417, - 2499, 2469, 1319, 1322, 1320, 1321, 2063, 3018, 1319, 1322, - 1320, 1321, 2725, 2416, 1695, 3161, 1322, 3160, 2415, 2515, - 1319, 2163, 1320, 1321, 2503, 2538, 2724, 3362, 2506, 2585, - 2541, 2542, 1319, 1322, 1320, 1321, 2513, 1319, 2309, 1320, - 1321, 1322, 3170, 2414, 3172, 1322, 2771, 2413, 2489, 2490, - 1319, 3008, 1320, 1321, 2734, 1322, 2527, 2516, 1768, 1322, - 2561, 1696, 1697, 1698, 1319, 3159, 1320, 1321, 880, 1319, - 3005, 1320, 1321, 2402, 2537, 2808, 1322, 2519, 3004, 915, - 2397, 1322, 921, 921, 2540, 2545, 2582, 2393, 2546, 1031, - 2550, 2551, 2552, 2392, 1319, 909, 1320, 1321, 1319, 2583, - 1320, 1321, 1322, 910, 1030, 1647, 2737, 2739, 1913, 2652, - 2653, 2654, 2655, 2656, 2901, 2740, 2535, 2391, 1322, 97, - 2571, 2572, 1914, 2617, 1319, 2390, 1320, 1321, 2661, 2389, - 98, 1319, 1286, 1320, 1321, 2581, 2576, 125, 1319, 2388, - 1320, 1321, 1322, 2387, 1319, 106, 1320, 1321, 3010, 2642, - 99, 1319, 1322, 1320, 1321, 97, 105, 3560, 104, 2625, - 2386, 2606, 99, 2827, 2609, 2385, 98, 99, 1319, 2271, - 1320, 1321, 2990, 2627, 2082, 2083, 1319, 1322, 1320, 1321, - 1319, 2692, 1320, 1321, 3475, 3379, 2379, 3303, 2807, 1322, - 1319, 2493, 1320, 1321, 1319, 2626, 1320, 1321, 2068, 917, - 918, 2260, 2378, 2259, 2643, 2258, 2615, 2723, 2645, 2616, - 2257, 1319, 2256, 1320, 1321, 2722, 1319, 2294, 1320, 1321, - 106, 104, 3350, 3349, 3331, 3178, 2377, 3176, 2704, 2699, - 3175, 105, 105, 104, 2098, 3168, 2374, 1319, 3077, 1320, - 1321, 3009, 3007, 2840, 1322, 2009, 2198, 2009, 1642, 106, - 916, 2666, 2644, 1319, 3167, 1320, 1321, 2754, 2756, 83, - 105, 2373, 2098, 2098, 2098, 2098, 2098, 2998, 2657, 2472, - 3149, 2100, 2452, 2372, 2672, 2664, 2774, 1319, 2346, 1320, - 1321, 2055, 2098, 3550, 3549, 2098, 2727, 1319, 1550, 1320, - 1321, 2704, 1542, 110, 111, 2759, 896, 3549, 3550, 2100, - 2100, 2100, 2100, 2100, 2728, 2703, 2700, 3368, 2702, 3043, - 2776, 2674, 1319, 2676, 1320, 1321, 907, 1833, 2701, 2100, - 10, 3, 2100, 2715, 1319, 93, 1320, 1321, 2370, 2687, - 2688, 2689, 2690, 1831, 1, 1832, 9, 2726, 8, 2729, - 1289, 2744, 1288, 2750, 2719, 3047, 3485, 610, 2750, 2860, - 2045, 1497, 2778, 2741, 2742, 2779, 3523, 3481, 897, 2868, - 2760, 898, 2753, 2763, 2761, 2762, 3482, 2764, 1739, 2772, - 1322, 100, 1729, 101, 3109, 2716, 2717, 2718, 1977, 1319, - 2780, 1320, 1321, 3304, 2758, 2843, 2204, 1779, 3075, 2161, - 2817, 983, 2819, 150, 2120, 2121, 2820, 2821, 2682, 2683, - 2684, 2685, 2686, 2903, 3449, 114, 941, 2818, 113, 986, - 1093, 2199, 2786, 3099, 1322, 2520, 2129, 1575, 1573, 1322, - 2870, 2871, 1574, 2920, 2921, 2922, 1322, 2924, 2926, 2163, - 1322, 1572, 2863, 1577, 2842, 1576, 2909, 2841, 2877, 2347, - 1322, 2933, 2862, 2951, 1824, 645, 2937, 2938, 2939, 2941, - 2942, 2943, 2944, 2492, 2366, 2945, 639, 2947, 2948, 2949, - 188, 1564, 2953, 2954, 2955, 2956, 2957, 2958, 2959, 2960, - 2961, 2962, 2963, 2897, 2896, 1543, 2965, 2894, 1025, 2904, - 600, 2970, 2816, 2237, 2974, 606, 2975, 2977, 2914, 2980, - 2982, 1370, 2984, 2985, 2986, 2987, 2911, 2912, 2365, 2913, - 2993, 1815, 2915, 2361, 2917, 1319, 2919, 1320, 1321, 2721, - 2359, 2507, 936, 928, 2324, 2905, 2056, 2443, 935, 3281, - 2755, 3002, 2967, 2733, 2313, 2735, 2459, 2738, 2731, 2971, - 3361, 3169, 3434, 2517, 1539, 3015, 3016, 2972, 2319, 3020, - 2485, 2488, 2489, 2490, 2486, 1902, 2487, 2491, 1360, 1319, - 2097, 1320, 1321, 3144, 1319, 1851, 1320, 1321, 668, 2098, - 667, 1319, 2996, 1320, 1321, 1319, 665, 1320, 1321, 2445, - 2991, 2992, 3039, 2473, 2994, 1319, 1325, 1320, 1321, 818, - 2433, 1551, 2999, 1523, 1519, 3006, 2484, 2482, 2481, 2269, - 2105, 3026, 3022, 3477, 2099, 3021, 2100, 3011, 1520, 2485, - 2488, 2489, 2490, 2486, 3031, 2487, 2491, 2095, 2451, 3024, - 3025, 3028, 769, 768, 3035, 3036, 1523, 1519, 677, 669, - 661, 3033, 767, 2061, 2062, 1522, 766, 1521, 3040, 3034, - 2869, 1520, 3461, 2870, 2871, 1323, 3041, 3042, 2591, 2883, - 2593, 2518, 2879, 1303, 1515, 3097, 655, 954, 2906, 3385, - 2293, 3058, 2929, 3060, 1514, 1932, 1516, 1517, 1522, 1933, - 1521, 3052, 3053, 3392, 1378, 3103, 3104, 2851, 3093, 2832, - 2567, 2191, 2899, 2900, 66, 44, 3356, 3422, 765, 762, - 3146, 3147, 3148, 2695, 2696, 3405, 3406, 3116, 761, 3407, - 3120, 1960, 1299, 1296, 3498, 1826, 92, 35, 34, 33, - 32, 3082, 3083, 31, 25, 24, 23, 22, 21, 3105, - 28, 20, 19, 18, 3086, 2854, 3518, 3131, 3090, 3091, - 3092, 3559, 119, 53, 50, 48, 127, 126, 51, 47, - 1068, 45, 3135, 30, 29, 17, 16, 15, 14, 13, - 12, 11, 7, 6, 38, 3143, 37, 27, 36, 26, - 4, 2554, 2193, 3150, 3121, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3158, 0, 3162, 3163, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3142, 0, 0, 0, 0, 2754, 0, 83, 3125, - 2754, 0, 0, 0, 0, 3164, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1887, 0, 0, 0, - 0, 1885, 3193, 0, 0, 0, 3185, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 3165, 0, 0, - 3174, 3173, 0, 0, 0, 0, 0, 3183, 0, 3181, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3287, 0, 3194, - 3195, 0, 0, 0, 3295, 3197, 0, 0, 0, 0, - 0, 2750, 0, 3302, 0, 3133, 0, 0, 0, 0, - 3187, 1541, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2753, 3277, 3312, 3313, 2753, 3315, 3279, 3316, 3317, - 0, 3278, 0, 3320, 3321, 3322, 0, 3324, 3327, 3325, - 0, 3294, 3326, 3298, 0, 1887, 0, 0, 1629, 3306, - 1885, 3329, 3299, 3336, 3338, 3339, 3341, 3343, 3344, 3346, - 0, 3282, 0, 0, 0, 0, 3189, 0, 3289, 3290, - 3291, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2754, 0, 0, 0, - 0, 0, 3286, 0, 0, 0, 0, 3376, 0, 0, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1407, 1408, 1409, 1410, - 1411, 1412, 1413, 1414, 1415, 1416, 1417, 1418, 1419, 1420, - 1421, 1422, 1423, 1424, 1425, 1426, 1427, 1428, 1429, 1430, - 1431, 1432, 1433, 1434, 1435, 1436, 1438, 1439, 1440, 1441, - 1442, 1443, 1444, 1445, 1446, 1447, 1448, 1449, 1450, 1468, - 1469, 1470, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, - 1479, 1480, 1481, 3372, 3354, 1781, 3369, 3351, 3352, 3371, - 3353, 3332, 0, 0, 0, 3335, 0, 3386, 0, 3330, - 0, 2753, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 83, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3401, 3360, 0, 0, - 0, 0, 0, 3393, 0, 0, 0, 0, 3388, 3375, - 3391, 0, 40, 0, 0, 0, 3417, 0, 0, 0, - 0, 0, 3418, 3419, 0, 3377, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 814, 0, - 0, 0, 0, 0, 3430, 0, 3431, 0, 0, 0, - 3411, 83, 0, 3412, 0, 0, 0, 0, 0, 3396, - 0, 0, 3384, 0, 0, 0, 0, 3420, 0, 0, - 3455, 3456, 0, 0, 3427, 0, 0, 0, 0, 3435, - 0, 0, 0, 3437, 3465, 3467, 3469, 0, 40, 0, - 3462, 3463, 191, 3448, 0, 191, 3440, 3445, 643, 3442, - 3441, 0, 3439, 649, 0, 3306, 3450, 3444, 3443, 3497, - 0, 0, 0, 0, 191, 0, 0, 0, 0, 3473, - 0, 0, 0, 0, 0, 0, 0, 2750, 3484, 191, - 3494, 3489, 3476, 0, 0, 3432, 0, 0, 0, 0, - 0, 3462, 3463, 3503, 0, 0, 0, 3513, 3514, 3501, - 0, 0, 3400, 0, 649, 191, 649, 0, 0, 0, - 3410, 0, 0, 0, 0, 0, 0, 3512, 0, 0, - 3517, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3535, 3537, 3539, 1846, 1847, 1848, 1849, 0, 0, 0, - 0, 0, 1887, 3532, 0, 0, 0, 1885, 3544, 0, - 0, 0, 3540, 0, 3543, 3541, 0, 3547, 3545, 0, - 0, 3558, 0, 0, 0, 0, 0, 3462, 3463, 3555, - 0, 0, 1373, 0, 0, 0, 3561, 3570, 3571, 0, - 3569, 3567, 1893, 1894, 0, 0, 0, 1896, 0, 921, - 921, 1901, 0, 0, 0, 1906, 1887, 0, 3530, 3578, - 3579, 1885, 3576, 3326, 3577, 0, 0, 0, 1919, 1920, - 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 0, 0, - 0, 0, 1954, 1955, 1956, 1957, 1958, 1959, 1961, 0, - 1966, 0, 1968, 1969, 1970, 0, 1972, 1973, 1974, 0, - 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, - 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, - 2000, 2001, 2002, 0, 0, 2005, 0, 2007, 0, 2014, - 2015, 921, 0, 921, 921, 921, 921, 921, 0, 0, - 0, 0, 0, 2027, 2028, 2029, 2030, 2031, 2032, 2033, - 2034, 0, 2036, 2037, 2038, 2039, 2040, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 186, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 921, 0, 0, 0, 0, 0, 125, 0, 147, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 168, - 0, 0, 2078, 2079, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 82, 42, 43, 84, 2117, 0, - 158, 0, 0, 0, 0, 0, 146, 0, 0, 0, - 0, 0, 0, 0, 88, 0, 0, 0, 46, 73, - 74, 0, 71, 75, 0, 165, 0, 0, 166, 0, - 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 186, 0, 0, 0, 134, 135, 157, - 156, 185, 0, 0, 2560, 0, 0, 0, 0, 2159, - 59, 0, 0, 0, 0, 0, 0, 125, 0, 147, - 0, 0, 91, 0, 0, 0, 0, 0, 0, 0, - 168, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 158, 0, 0, 0, 0, 0, 146, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 165, 0, 0, 166, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 151, 132, 154, 139, 131, 0, 152, 153, 1650, 1651, - 157, 156, 185, 169, 0, 0, 0, 0, 0, 0, - 0, 0, 175, 140, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 143, 141, 136, - 137, 138, 142, 0, 0, 0, 0, 0, 0, 133, - 0, 191, 0, 191, 0, 0, 191, 0, 144, 0, - 49, 52, 55, 54, 57, 0, 70, 0, 0, 79, - 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 649, - 649, 0, 0, 58, 87, 86, 0, 0, 68, 69, - 56, 0, 0, 0, 0, 0, 77, 78, 0, 649, - 191, 151, 1652, 154, 0, 1649, 0, 152, 153, 0, - 0, 0, 0, 0, 169, 0, 0, 0, 0, 0, - 0, 0, 0, 175, 0, 0, 0, 0, 1365, 0, - 0, 0, 0, 0, 0, 0, 160, 0, 60, 61, - 0, 62, 63, 64, 65, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2322, + 955, 3599, 3600, 87, 3598, 4099, 3918, 4003, 3275, 4112, + 4066, 2092, 3550, 4067, 950, 1263, 942, 2080, 3991, 2384, + 2315, 3175, 4022, 3902, 3182, 3403, 3233, 908, 3827, 3238, + 42, 1261, 3235, 3224, 2023, 3234, 3232, 5, 3237, 3236, + 3136, 1762, 1968, 3900, 3537, 2317, 3253, 2743, 3076, 3190, + 2458, 737, 3252, 943, 3140, 3137, 3448, 3442, 3637, 2339, + 2979, 3124, 904, 764, 903, 2355, 3255, 2421, 3968, 2881, + 1722, 2358, 732, 1818, 3434, 2961, 3281, 2913, 2446, 2426, + 2882, 1073, 3468, 2372, 2883, 1041, 1022, 2807, 87, 2489, + 163, 2832, 1019, 41, 2360, 43, 2813, 1709, 2799, 2783, + 2359, 2238, 2076, 2115, 2953, 2031, 1865, 1022, 2282, 3134, + 2237, 149, 2467, 1021, 1847, 1025, 2445, 2270, 2428, 2347, + 2506, 2874, 1083, 1101, 1751, 2849, 1106, 1731, 104, 2362, + 100, 1143, 1688, 1510, 1043, 105, 2335, 2119, 2051, 1437, + 731, 1422, 1964, 1854, 3139, 747, 1077, 1080, 2443, 1081, + 1112, 1946, 2417, 1107, 1108, 2340, 1750, 742, 1736, 1058, + 2820, 1060, 2146, 735, 2188, 1494, 3632, 1031, 2127, 99, + 3890, 1040, 2781, 107, 2418, 1470, 1028, 2022, 1252, 85, + 1975, 1026, 1813, 1027, 1839, 132, 133, 1192, 1029, 1017, + 1109, 1119, 106, 722, 734, 905, 1053, 93, 741, 167, + 84, 1514, 98, 4100, 1259, 1238, 2283, 127, 1519, 125, + 126, 2460, 2461, 2462, 3953, 1048, 1052, 667, 3538, 3221, + 2460, 2936, 2935, 2504, 2904, 3530, 4049, 1016, 2969, 2970, + 1034, 3949, 2312, 2313, 2038, 3948, 2037, 1931, 134, 2036, + 3954, 2035, 2034, 2033, 2006, 1148, 1074, 1208, 725, 2551, + 726, 1684, 128, 2779, 3120, 2493, 4043, 3080, 4122, 3493, + 4070, 4065, 4090, 3408, 4105, 4053, 723, 4051, 3407, 2929, + 1035, 2491, 1068, 2809, 1067, 3903, 2744, 1018, 2043, 3300, + 3823, 95, 1090, 1145, 1098, 3822, 1020, 3243, 1433, 4104, + 4052, 1085, 4050, 95, 1147, 1146, 1162, 1163, 1164, 2492, + 1167, 1168, 1169, 1170, 1042, 3927, 1173, 1174, 1175, 1176, + 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, + 1187, 1188, 1189, 1165, 128, 1209, 727, 1122, 1123, 707, + 2906, 2, 1097, 3603, 1096, 1095, 3949, 909, 4080, 3543, + 3243, 95, 3544, 3241, 3603, 3833, 1149, 1152, 1153, 707, + 1156, 1015, 1454, 3240, 4047, 701, 3562, 1424, 3551, 3992, + 2054, 4000, 2486, 190, 3832, 2085, 4027, 3320, 1828, 3247, + 2780, 959, 960, 961, 701, 3171, 111, 112, 113, 2968, + 116, 1195, 95, 122, 3172, 3173, 191, 129, 2926, 661, + 1716, 2557, 128, 1066, 1070, 907, 3241, 1099, 2379, 2380, + 172, 720, 721, 2560, 959, 960, 961, 86, 2943, 2944, + 2378, 1010, 1011, 1012, 1013, 86, 2952, 698, 1024, 1256, + 3602, 4004, 3247, 1228, 4032, 2015, 2016, 1066, 1070, 907, + 1245, 3602, 1247, 1752, 2858, 1753, 1008, 2857, 701, 2823, + 2859, 1007, 4030, 2863, 1233, 1234, 1055, 1056, 3919, 1216, + 2907, 4036, 4037, 3561, 1217, 1229, 169, 86, 1971, 170, + 3192, 3193, 1222, 2870, 2824, 683, 701, 4031, 2558, 3932, + 1244, 1246, 701, 3277, 1191, 2397, 2396, 3308, 681, 1216, + 2816, 2817, 189, 3244, 1217, 95, 701, 2437, 1451, 3014, + 1452, 1453, 1215, 95, 1214, 3306, 2549, 4008, 713, 2014, + 715, 86, 1434, 2314, 88, 2018, 3650, 719, 1166, 3282, + 2431, 2954, 4071, 1921, 1748, 3874, 1692, 3875, 678, 1423, + 1094, 2468, 1201, 1202, 2914, 2939, 1438, 693, 2512, 2507, + 4008, 4102, 3270, 4072, 1255, 95, 3244, 1947, 1094, 1190, + 3271, 2527, 688, 2528, 1254, 2529, 1231, 1232, 1235, 1230, + 1249, 1197, 691, 1237, 1204, 2956, 1223, 1922, 1236, 1923, + 3532, 3531, 2530, 1172, 1171, 3278, 702, 2511, 3279, 3191, + 3807, 2552, 2553, 2555, 2554, 2509, 1242, 1695, 1092, 95, + 1243, 3194, 2471, 2513, 3528, 702, 4123, 1102, 1485, 3607, + 1248, 1103, 1132, 2356, 1130, 1972, 173, 1103, 3445, 1114, + 1141, 1140, 1139, 1194, 1138, 179, 1054, 1137, 1136, 2510, + 1135, 1134, 1832, 1129, 2155, 1241, 1142, 1485, 3194, 1059, + 668, 1078, 670, 684, 4077, 704, 1115, 703, 674, 1078, + 672, 676, 685, 677, 1448, 671, 1965, 682, 2444, 2957, + 673, 686, 687, 690, 694, 695, 696, 692, 689, 702, + 680, 705, 4044, 1078, 3015, 2497, 1853, 1076, 2496, 2430, + 1961, 1425, 1471, 1159, 3214, 3079, 2519, 2515, 2517, 2518, + 2516, 2520, 2521, 2938, 2973, 1151, 1826, 702, 1825, 1824, + 2924, 1114, 1962, 702, 3297, 1150, 1472, 1473, 1474, 1475, + 1476, 1477, 1478, 1480, 1479, 1481, 1482, 702, 1260, 3527, + 1260, 1260, 1822, 2572, 665, 4045, 1093, 2951, 1193, 660, + 2950, 3915, 2147, 1069, 1063, 1061, 3088, 2149, 1486, 1487, + 3482, 2154, 2150, 3464, 1093, 2151, 2152, 2153, 2854, 164, + 2148, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, + 1749, 1133, 1444, 1131, 2490, 1436, 2908, 1069, 1063, 1061, + 1022, 1495, 1500, 1501, 2941, 1504, 1506, 1507, 1508, 1509, + 2488, 1512, 1513, 1515, 1515, 1852, 1515, 1515, 1520, 1520, + 1520, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, + 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, + 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, + 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, + 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, + 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, + 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, + 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, + 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, + 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, + 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, + 1642, 1643, 1644, 1250, 956, 1492, 3446, 1645, 3926, 1647, + 1648, 1649, 1650, 1651, 1416, 1417, 956, 1488, 1489, 1490, + 1491, 1520, 1520, 1520, 1520, 1520, 1520, 1502, 1933, 1932, + 1934, 1935, 1936, 2905, 706, 1100, 1658, 1659, 1660, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, + 3491, 3492, 4006, 1213, 1203, 699, 1496, 1415, 4035, 1432, + 1505, 3245, 3246, 1200, 956, 3560, 2487, 1685, 2559, 1226, + 700, 89, 2819, 701, 3249, 1516, 165, 1517, 1518, 701, + 2756, 2980, 1438, 177, 4005, 4006, 94, 1521, 1522, 1089, + 2558, 2928, 1091, 1682, 94, 2784, 2786, 2434, 1062, 2960, + 1121, 3601, 4034, 2088, 1212, 1121, 1218, 1219, 1220, 1221, + 1740, 1646, 3601, 1715, 3245, 3246, 2343, 4005, 1206, 3087, + 1691, 2814, 2343, 124, 185, 666, 2385, 3249, 3391, 1022, + 1257, 1258, 1062, 1022, 1485, 2927, 94, 2435, 1482, 1022, + 4116, 2128, 3170, 2583, 2433, 1121, 1443, 1440, 1441, 1442, + 1447, 1449, 1446, 1952, 1445, 2129, 1699, 1239, 1465, 1683, + 1703, 1037, 1716, 1976, 1439, 2982, 1021, 166, 171, 168, + 174, 175, 176, 178, 180, 181, 182, 183, 2436, 1121, + 94, 1158, 119, 184, 186, 187, 188, 1253, 2432, 1211, + 1448, 1094, 3940, 1086, 1144, 3523, 2056, 1951, 3458, 2508, + 1088, 1087, 3000, 1120, 2027, 1958, 1754, 3178, 1120, 3298, + 2057, 1483, 1484, 2055, 1114, 1117, 1118, 1701, 1078, 2120, + 2897, 104, 1111, 1115, 1702, 2120, 1683, 2592, 105, 1652, + 1653, 1654, 1655, 1656, 1657, 2992, 2991, 2990, 1452, 1453, + 2984, 2583, 2988, 1110, 2983, 4081, 2981, 1689, 1120, 1092, + 3646, 2986, 3179, 1124, 1114, 120, 2963, 1453, 1126, 1676, + 2985, 2962, 1127, 1125, 2785, 2963, 107, 1454, 3498, 3497, + 2962, 2475, 1862, 1861, 1225, 1121, 3181, 1851, 2987, 2989, + 2485, 2483, 1120, 1128, 702, 1227, 2126, 1124, 1114, 2480, + 702, 1454, 1126, 1121, 3176, 2480, 1127, 1125, 1444, 1697, + 4124, 1845, 4073, 1132, 1130, 1240, 1718, 3483, 2341, 2342, + 1686, 1977, 3192, 3193, 2341, 2342, 1196, 1033, 1716, 3177, + 1700, 4118, 3970, 1018, 3815, 2125, 1698, 1838, 2484, 1721, + 1941, 1916, 1970, 1210, 2482, 3908, 2112, 3814, 3805, 1020, + 1829, 1830, 1831, 3573, 1857, 1898, 1867, 1948, 1868, 1949, + 1870, 1872, 1950, 3183, 1876, 1878, 1880, 1882, 1884, 1260, + 1939, 4114, 1745, 1746, 4115, 1454, 4113, 3971, 1955, 1454, + 1953, 1954, 1856, 1956, 1957, 4086, 1716, 1093, 1120, 3557, + 3909, 3558, 1821, 2275, 1114, 1117, 1118, 4125, 1078, 2348, + 2349, 3572, 1111, 1115, 1940, 1928, 1120, 3505, 1157, 1835, + 1836, 1834, 1154, 1451, 1848, 1452, 1453, 1906, 1907, 2564, + 2565, 2566, 3002, 1912, 1913, 959, 960, 961, 3504, 1855, + 1855, 3191, 3494, 3222, 1938, 1902, 3210, 1451, 1859, 1452, + 1453, 2879, 2878, 3194, 2104, 2093, 2094, 2095, 2096, 2106, + 2097, 2098, 2099, 2111, 2107, 2100, 2101, 2108, 2109, 2110, + 2102, 2103, 2105, 4084, 1716, 1894, 1966, 4074, 1897, 1927, + 1899, 1471, 2877, 2972, 2440, 1704, 1477, 1478, 1480, 1479, + 1481, 1482, 190, 1473, 1474, 1475, 1476, 1477, 1478, 1480, + 1479, 1481, 1482, 2909, 1942, 1472, 1473, 1474, 1475, 1476, + 1477, 1478, 1480, 1479, 1481, 1482, 129, 1926, 151, 1454, + 1925, 1451, 1924, 1452, 1453, 1451, 128, 1452, 1453, 172, + 2045, 2047, 2048, 1914, 1097, 1982, 1096, 1095, 1908, 1905, + 2275, 1454, 2872, 1904, 2272, 1260, 1260, 1903, 1874, 1696, + 1978, 1979, 1827, 2274, 3274, 2046, 1419, 1454, 2004, 87, + 162, 707, 87, 1748, 1983, 1454, 150, 954, 1471, 3180, + 3935, 1990, 1991, 1992, 1475, 1476, 1477, 1478, 1480, 1479, + 1481, 1482, 3934, 2003, 3912, 169, 42, 2619, 170, 42, + 2631, 1725, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1480, + 1479, 1481, 1482, 4014, 1716, 2805, 4101, 1841, 1842, 161, + 160, 189, 1443, 1440, 1441, 1442, 1447, 1449, 1446, 1471, + 1445, 3911, 1467, 1454, 1468, 4012, 1716, 3457, 3488, 707, + 1439, 2083, 2083, 2081, 2081, 2084, 1454, 1726, 1469, 1483, + 1484, 1466, 1471, 1472, 1473, 1474, 1475, 1476, 1477, 1478, + 1480, 1479, 1481, 1482, 3910, 1451, 3810, 1452, 1453, 3794, + 2049, 3793, 190, 3645, 1682, 3643, 1472, 1473, 1474, 1475, + 1476, 1477, 1478, 1480, 1479, 1481, 1482, 1451, 1980, 1452, + 1453, 2861, 707, 3569, 2166, 1984, 129, 1986, 1987, 1988, + 1989, 2456, 2455, 1451, 1993, 1452, 1453, 1681, 110, 172, + 1680, 1451, 1716, 1452, 1453, 1679, 2005, 4010, 1716, 109, + 1454, 108, 155, 1843, 158, 3502, 1840, 101, 156, 157, + 3887, 1716, 101, 2454, 2453, 173, 2452, 2451, 102, 103, + 1683, 4061, 1716, 102, 179, 85, 2629, 3487, 85, 2028, + 1450, 1716, 2053, 1454, 2805, 3999, 1450, 1716, 3928, 2011, + 2012, 2805, 3978, 2805, 3974, 169, 3961, 1716, 170, 1451, + 3283, 1452, 1453, 2060, 1458, 1459, 1460, 1461, 1462, 1463, + 1464, 1456, 1451, 2058, 1452, 1453, 3541, 3925, 1454, 1716, + 3841, 189, 3280, 1716, 2581, 3818, 1716, 3840, 1716, 2805, + 3806, 2087, 2286, 2059, 2580, 2061, 2062, 2063, 2064, 2065, + 2066, 2068, 2070, 2071, 2072, 2073, 2074, 2075, 2284, 3184, + 3213, 1454, 3212, 3188, 3541, 1716, 3798, 1454, 2888, 2285, + 3187, 1496, 1454, 2130, 2131, 2132, 2133, 3885, 1716, 3797, + 3315, 2271, 2805, 3539, 2165, 1729, 2875, 2144, 2121, 110, + 1678, 1471, 2801, 2571, 2114, 2116, 1451, 2540, 1452, 1453, + 109, 2539, 108, 2502, 3189, 2480, 1716, 103, 164, 3185, + 1454, 103, 3882, 1716, 3186, 1472, 1473, 1474, 1475, 1476, + 1477, 1478, 1480, 1479, 1481, 1482, 2364, 2180, 2501, 1451, + 2338, 1452, 1453, 1454, 3462, 1716, 2286, 2289, 2290, 2711, + 1716, 3203, 3202, 1454, 2320, 173, 1716, 3549, 104, 3200, + 3201, 1728, 2284, 2366, 179, 105, 3864, 1716, 2915, 2273, + 1454, 103, 2394, 2353, 1451, 2585, 1452, 1453, 2007, 104, + 1716, 1454, 3198, 3199, 2893, 1454, 105, 3198, 3197, 2829, + 1716, 2393, 2178, 1973, 2052, 1937, 2589, 2331, 2558, 2937, + 1817, 2918, 2189, 1929, 3433, 1716, 1450, 1451, 1083, 1452, + 1453, 2911, 2912, 1451, 159, 1452, 1453, 1919, 1451, 1915, + 1452, 1453, 1911, 2403, 2404, 2405, 2406, 3426, 1716, 1454, + 1716, 2398, 2389, 2399, 2400, 2401, 2402, 3423, 1716, 1034, + 2388, 1083, 2370, 2307, 2288, 1454, 2319, 2291, 2292, 2409, + 2410, 2411, 2412, 1910, 3421, 1716, 1451, 1909, 1452, 1453, + 2805, 2804, 2325, 1727, 2326, 3383, 1716, 2585, 1716, 2588, + 2423, 3459, 2262, 2263, 2264, 2265, 2266, 2392, 2333, 1451, + 1251, 1452, 1453, 2469, 3966, 2429, 2351, 3135, 164, 1451, + 1454, 1452, 1453, 1068, 1454, 1067, 2376, 2375, 3457, 2374, + 2086, 1716, 3165, 2330, 2391, 2390, 1451, 2850, 1452, 1453, + 1817, 1816, 2558, 3381, 1716, 3939, 2466, 1451, 2481, 1452, + 1453, 1451, 152, 1452, 1453, 153, 2821, 2309, 2439, 3377, + 1716, 1454, 2821, 2189, 1472, 1473, 1474, 1475, 1476, 1477, + 1478, 1480, 1479, 1481, 1482, 109, 2850, 1760, 1759, 2805, + 2424, 2828, 1454, 2420, 2829, 165, 2474, 2438, 3412, 2477, + 1454, 2478, 177, 2442, 3200, 1451, 2450, 1452, 1453, 2494, + 2851, 2413, 2415, 2416, 3374, 1716, 2480, 3108, 3372, 1716, + 2853, 1451, 2377, 1452, 1453, 2424, 2585, 2476, 2473, 2472, + 1454, 2711, 2616, 2615, 2480, 2498, 2463, 1454, 2829, 2499, + 2500, 2495, 1454, 185, 3457, 2346, 2829, 1720, 1454, 2851, + 2310, 2086, 2029, 2013, 1450, 3370, 1716, 1454, 1960, 2558, + 1717, 1719, 1454, 1747, 1122, 1123, 1451, 1454, 1452, 1453, + 1451, 1454, 1452, 1453, 2563, 1855, 3368, 1716, 1454, 1023, + 2505, 1105, 1104, 1454, 3366, 1716, 166, 171, 168, 174, + 175, 176, 178, 180, 181, 182, 183, 95, 1506, 4040, + 1506, 3981, 184, 186, 187, 188, 1454, 1451, 3829, 1452, + 1453, 1723, 3795, 3276, 3364, 1716, 2575, 3657, 3522, 3519, + 2329, 3362, 1716, 3500, 3325, 3225, 3360, 1716, 1451, 3324, + 1452, 1453, 3358, 1716, 2286, 2533, 1451, 1819, 1452, 1453, + 3830, 3356, 1716, 2422, 3272, 4096, 3354, 1716, 3227, 1454, + 2578, 3352, 1716, 3223, 2884, 3350, 1716, 95, 2919, 2419, + 2414, 2285, 3348, 1716, 1454, 165, 1451, 3836, 1452, 1453, + 1454, 2408, 177, 1451, 3506, 1452, 1453, 2407, 1451, 1944, + 1452, 1453, 1850, 1454, 1451, 2548, 1452, 1453, 1846, 3510, + 3346, 1716, 1454, 1451, 1815, 1452, 1453, 121, 1451, 2556, + 1452, 1453, 2885, 1451, 1454, 1452, 1453, 1451, 2885, 1452, + 1453, 1195, 2437, 185, 1451, 2627, 1452, 1453, 2323, 1451, + 1454, 1452, 1453, 4094, 2567, 3507, 3508, 3509, 3469, 3470, + 4068, 2053, 3947, 3344, 1716, 1454, 3511, 3512, 3513, 1454, + 3869, 3472, 1451, 3943, 1452, 1453, 3219, 3218, 3330, 1716, + 3217, 2569, 2009, 3135, 3313, 1716, 166, 171, 168, 174, + 175, 176, 178, 180, 181, 182, 183, 2776, 1716, 1454, + 2898, 2534, 184, 186, 187, 188, 2774, 1716, 1454, 3157, + 3475, 2568, 3474, 2570, 3158, 1451, 2591, 1452, 1453, 1716, + 3154, 3155, 2573, 3153, 2574, 1454, 3156, 3831, 1890, 1454, + 1451, 2337, 1452, 1453, 2749, 1716, 1451, 1454, 1452, 1453, + 1724, 3627, 2576, 3626, 2010, 2755, 3125, 3127, 3463, 1451, + 1716, 1452, 1453, 2726, 1716, 3128, 1454, 3113, 1451, 3112, + 1452, 1453, 3907, 3450, 2542, 2543, 3636, 2625, 3453, 2545, + 1451, 3449, 1452, 1453, 1038, 1454, 3638, 2787, 2546, 1891, + 1892, 1893, 1039, 2718, 1716, 1886, 1451, 1454, 1452, 1453, + 3122, 3625, 2709, 1716, 1959, 1022, 2083, 2868, 2081, 2790, + 1006, 1451, 3196, 1452, 1453, 1451, 2889, 1452, 1453, 2707, + 1716, 1161, 2128, 2694, 1716, 1160, 2826, 2827, 3291, 101, + 1454, 2692, 1716, 2884, 2788, 2364, 2129, 2966, 1022, 2846, + 102, 1418, 1887, 1888, 1889, 1451, 2925, 1452, 1453, 2598, + 2690, 1716, 129, 42, 1451, 3455, 1452, 1453, 2348, 2349, + 2825, 103, 2843, 2052, 1471, 2845, 2613, 4110, 2806, 2688, + 1716, 1451, 1454, 1452, 1453, 1451, 1454, 1452, 1453, 3215, + 2537, 2686, 1716, 1451, 4019, 1452, 1453, 3924, 1472, 1473, + 1474, 1475, 1476, 1477, 1478, 1480, 1479, 1481, 1482, 2802, + 101, 3825, 1451, 2526, 1452, 1453, 2844, 103, 1454, 2815, + 1689, 102, 1454, 2778, 2684, 1716, 3159, 3195, 2838, 2839, + 2842, 1451, 2334, 1452, 1453, 1046, 1047, 110, 1683, 2871, + 2873, 2798, 2791, 1451, 2793, 1452, 1453, 1454, 109, 3111, + 108, 2525, 1454, 3435, 2818, 2848, 2524, 3110, 2864, 103, + 2803, 2523, 2923, 2522, 2562, 1454, 2682, 1716, 108, 3895, + 2680, 1716, 110, 3894, 3872, 3644, 1451, 2852, 1452, 1453, + 3642, 109, 2855, 109, 3633, 108, 2429, 3641, 3634, 1454, + 2862, 3520, 3454, 1454, 3452, 3228, 2865, 1454, 2934, 2123, + 2464, 1833, 2678, 1716, 2124, 110, 2676, 1716, 1454, 3443, + 2876, 1045, 2821, 1454, 4098, 4097, 109, 3611, 1451, 4097, + 1452, 1453, 1451, 1454, 1452, 1453, 2886, 1454, 2801, 3016, + 2617, 2674, 1716, 2321, 4098, 1741, 2672, 1716, 3913, 1733, + 2184, 2894, 2887, 2895, 2899, 2900, 2901, 2890, 2891, 2670, + 1716, 114, 115, 2931, 1451, 1838, 1452, 1453, 1451, 3486, + 1452, 1453, 1036, 3, 2026, 2976, 2977, 10, 97, 1454, + 2920, 2921, 2024, 2668, 1716, 9, 2112, 2666, 1716, 1, + 1014, 2910, 4075, 1451, 1454, 1452, 1453, 2930, 1451, 1421, + 1452, 1453, 2664, 1716, 1420, 3490, 2025, 2662, 1716, 8, + 4029, 1451, 679, 1452, 1453, 2311, 2579, 2660, 1716, 1454, + 2955, 2655, 1716, 1454, 1687, 4069, 4025, 4026, 1454, 2958, + 2268, 2993, 1930, 1920, 2974, 1451, 1454, 1452, 1453, 1451, + 3552, 1452, 1453, 1451, 1454, 1452, 1453, 2236, 3826, 3231, + 1454, 2470, 3518, 2427, 1451, 1113, 1452, 1453, 1454, 1451, + 2301, 1452, 1453, 2651, 1716, 154, 2386, 2387, 1454, 1451, + 3994, 1452, 1453, 1451, 2994, 1452, 1453, 1717, 2308, 2997, + 118, 1071, 2932, 117, 2104, 2093, 2094, 2095, 2096, 2106, + 2097, 2098, 2099, 2111, 2107, 2100, 2101, 2108, 2109, 2110, + 2102, 2103, 2105, 2649, 1716, 1116, 1224, 2642, 1716, 2465, + 3542, 2869, 2332, 3923, 2395, 1451, 1766, 1452, 1453, 1764, + 2640, 1716, 3018, 1765, 1763, 1768, 2975, 1767, 3299, 3802, + 1451, 2618, 1452, 1453, 3477, 3074, 3390, 2964, 2017, 714, + 2965, 2841, 708, 3524, 192, 1755, 1734, 3404, 1155, 669, + 1454, 3204, 3428, 2503, 675, 1451, 1503, 1452, 1453, 1451, + 2978, 1452, 1453, 2008, 1451, 3109, 1452, 1453, 2995, 2856, + 1065, 1057, 1451, 2322, 1452, 1453, 2792, 1064, 3092, 3803, + 1451, 3143, 1452, 1453, 3081, 3447, 1451, 3083, 1452, 1453, + 3121, 3123, 2364, 3009, 1451, 1454, 1452, 1453, 2808, 1454, + 2271, 3126, 2271, 3054, 1451, 3119, 1452, 1453, 3906, 1454, + 3635, 3979, 2866, 1730, 2441, 3142, 3411, 87, 2590, 2366, + 2364, 2364, 2364, 2364, 2364, 1454, 2996, 2118, 3064, 3065, + 3066, 3067, 3068, 1493, 3424, 2363, 3606, 2044, 739, 738, + 2364, 3092, 736, 2364, 1025, 2794, 2822, 2366, 2366, 2366, + 2366, 2366, 1457, 944, 3091, 3082, 1454, 3084, 3164, 3147, + 1970, 1454, 2782, 3116, 1742, 1454, 2833, 2366, 2831, 2830, + 2366, 2535, 3103, 2371, 3471, 3467, 4021, 2365, 2273, 3389, + 2273, 2361, 2800, 3385, 3107, 895, 1451, 3114, 1452, 1453, + 3117, 894, 748, 3322, 1454, 740, 730, 3129, 3130, 893, + 1454, 892, 3258, 3259, 2940, 3273, 2942, 3248, 2867, 3321, + 1026, 3166, 1027, 3269, 3167, 3149, 3150, 3256, 3152, 104, + 3148, 1454, 3160, 3151, 3146, 1435, 105, 3168, 1454, 1706, + 724, 1451, 1454, 1452, 1453, 1451, 1454, 1452, 1453, 1084, + 3318, 3296, 3930, 2561, 3174, 1451, 2880, 1452, 1453, 2772, + 3115, 3319, 1705, 1454, 3207, 3937, 3056, 3206, 3058, 1454, + 3205, 1451, 3239, 1452, 1453, 1454, 3104, 3105, 3106, 3536, + 3220, 2916, 2457, 69, 3069, 3070, 3071, 3072, 2771, 3260, + 3257, 3261, 3132, 3229, 2767, 46, 3901, 2429, 3250, 3967, + 887, 884, 1451, 3608, 1452, 1453, 3609, 1451, 3267, 1452, + 1453, 1451, 1454, 1452, 1453, 2766, 3208, 3209, 3610, 3138, + 1454, 3077, 2765, 3078, 3138, 1454, 2764, 3950, 3951, 3287, + 2763, 1454, 3284, 883, 3286, 3952, 2173, 1431, 1428, 4042, + 1451, 2019, 1452, 1453, 96, 3294, 1451, 2762, 1452, 1453, + 36, 35, 3304, 2753, 3301, 3302, 3251, 3303, 34, 2752, + 3305, 33, 3307, 32, 3309, 26, 25, 1451, 24, 1452, + 1453, 23, 22, 29, 1451, 19, 1452, 1453, 1451, 21, + 1452, 1453, 1451, 20, 1452, 1453, 1506, 18, 3242, 4064, + 1506, 2577, 4109, 123, 55, 2582, 2751, 52, 50, 1451, + 131, 1452, 1453, 130, 2750, 1451, 53, 1452, 1453, 2747, + 3230, 1451, 49, 1452, 1453, 2742, 1198, 47, 2586, 31, + 2587, 30, 17, 16, 3406, 2594, 15, 14, 13, 2596, + 2597, 3410, 12, 11, 7, 6, 39, 3295, 2603, 2604, + 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 1451, 2614, + 1452, 1453, 38, 37, 28, 27, 1451, 40, 1452, 1453, + 4, 1451, 3141, 1452, 1453, 2903, 2364, 1451, 2459, 1452, + 1453, 0, 2620, 2621, 2622, 2623, 2624, 0, 2626, 3484, + 3436, 3437, 2628, 0, 0, 3451, 2633, 2634, 728, 2635, + 0, 0, 2638, 2366, 2639, 2641, 2643, 2644, 2645, 2646, + 2647, 2648, 2650, 2652, 2653, 2654, 2656, 3444, 2658, 2659, + 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, + 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2696, 2697, + 3478, 2699, 3476, 2701, 3473, 2703, 2704, 3479, 2706, 2708, + 2710, 3260, 3257, 3261, 2713, 3485, 3456, 1454, 2717, 3439, + 0, 1454, 2722, 2723, 2724, 2725, 1454, 3501, 0, 3503, + 1454, 0, 0, 0, 1454, 2736, 2737, 2738, 2739, 2740, + 2741, 1454, 0, 2745, 2746, 3441, 1454, 0, 0, 0, + 3413, 2748, 3415, 3416, 3417, 1454, 2754, 0, 0, 0, + 3289, 3290, 2757, 2758, 2759, 2760, 2761, 1044, 0, 0, + 1050, 1050, 0, 2768, 2769, 0, 2770, 1454, 3466, 2773, + 2775, 2332, 0, 2777, 0, 0, 3546, 3547, 0, 0, + 3495, 3496, 0, 2789, 0, 0, 0, 3480, 3481, 1454, + 0, 2735, 0, 1454, 0, 2734, 0, 0, 1454, 3548, + 2733, 0, 1454, 0, 2732, 0, 1454, 0, 2731, 0, + 2834, 2837, 2838, 2839, 2835, 2730, 2836, 2840, 0, 0, + 2729, 0, 3564, 0, 3525, 3526, 0, 0, 0, 2728, + 1454, 0, 0, 3529, 0, 0, 0, 3533, 3534, 3535, + 0, 0, 1454, 1451, 0, 1452, 1453, 1451, 0, 1452, + 1453, 2727, 1451, 0, 1452, 1453, 1451, 0, 1452, 1453, + 1451, 0, 1452, 1453, 1454, 0, 0, 1451, 0, 1452, + 1453, 0, 1451, 2721, 1452, 1453, 1454, 2720, 0, 0, + 0, 1451, 2719, 1452, 1453, 3614, 2716, 3615, 3616, 3617, + 2715, 1454, 0, 0, 0, 3624, 0, 3628, 3629, 1454, + 0, 0, 0, 1451, 0, 1452, 1453, 0, 1454, 0, + 0, 0, 3604, 0, 2714, 1454, 0, 0, 0, 3630, + 3142, 0, 87, 0, 3142, 1451, 2712, 1452, 1453, 1451, + 0, 1452, 1453, 1454, 1451, 0, 1452, 1453, 1451, 0, + 1452, 1453, 1451, 0, 1452, 1453, 0, 0, 2705, 42, + 1454, 3568, 2083, 0, 2081, 3659, 3651, 0, 0, 3631, + 2702, 0, 0, 0, 3640, 3639, 1451, 0, 1452, 1453, + 0, 1454, 0, 3647, 3649, 2700, 0, 0, 1451, 0, + 1452, 1453, 0, 2698, 2834, 2837, 2838, 2839, 2835, 3809, + 2836, 2840, 2657, 0, 3469, 3470, 0, 0, 3663, 2637, + 1451, 0, 1452, 1453, 0, 0, 0, 0, 0, 0, + 0, 0, 1451, 0, 1452, 1453, 0, 2636, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1451, 0, 1452, + 1453, 3801, 3800, 0, 2632, 1451, 0, 1452, 1453, 0, + 0, 3816, 0, 3828, 1451, 3799, 1452, 1453, 3821, 3820, + 0, 1451, 0, 1452, 1453, 2630, 0, 0, 0, 0, + 3866, 3867, 3004, 3005, 3006, 3007, 3008, 0, 3653, 1451, + 0, 1452, 1453, 0, 0, 0, 0, 0, 2083, 0, + 2081, 3870, 3013, 0, 0, 0, 1451, 0, 1452, 1453, + 0, 0, 0, 0, 0, 0, 0, 3595, 0, 3660, + 3661, 0, 0, 0, 0, 0, 0, 1451, 0, 1452, + 1453, 3142, 0, 3138, 0, 0, 3655, 3873, 0, 3804, + 0, 3876, 3811, 3812, 3813, 1523, 1524, 1525, 1526, 1527, + 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, + 1538, 1539, 1540, 1541, 1543, 1544, 1545, 1546, 1547, 1548, + 1549, 1550, 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558, + 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, + 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, + 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, + 1609, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, + 1620, 1621, 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, + 1630, 1631, 1632, 1633, 1634, 1635, 1641, 1642, 1643, 1644, + 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, + 1668, 1669, 1670, 1671, 3917, 3914, 3871, 3141, 3899, 3896, + 3897, 3141, 0, 3898, 0, 0, 1454, 0, 3931, 0, + 0, 1454, 0, 0, 1714, 1710, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 87, 3916, 0, 1711, + 3144, 0, 1714, 1710, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1711, 3162, 0, + 3920, 4062, 0, 42, 2327, 2328, 1713, 3933, 1712, 0, + 3938, 1783, 0, 3936, 0, 0, 3808, 0, 0, 0, + 0, 0, 1707, 1708, 1713, 0, 1712, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3905, 0, 0, 0, + 2595, 0, 0, 0, 0, 2584, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1455, 0, + 3976, 3956, 0, 0, 3957, 87, 0, 0, 0, 0, + 0, 0, 3922, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1451, 3965, 1452, 1453, 0, 1451, 1511, 1452, + 1453, 0, 42, 0, 3972, 0, 0, 0, 0, 3980, + 0, 3982, 0, 0, 3985, 0, 3941, 3990, 3993, 0, + 3987, 3828, 3996, 3986, 3984, 3293, 3989, 3988, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3141, 0, + 4017, 0, 0, 0, 0, 0, 4020, 3310, 3311, 4038, + 3312, 4028, 3314, 3316, 4033, 0, 4007, 1771, 4046, 0, + 0, 0, 4048, 0, 0, 0, 3323, 0, 0, 0, + 4059, 3327, 3328, 3329, 3331, 3332, 3333, 3334, 3335, 3336, + 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3345, 3347, 3349, + 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, + 3371, 3373, 3375, 3376, 3378, 3379, 3380, 3382, 4007, 4079, + 3384, 1970, 3386, 3387, 3388, 4063, 4082, 3392, 3393, 3394, + 3395, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 4095, 2083, + 4093, 2081, 4092, 4091, 4089, 4088, 3409, 4058, 4078, 4103, + 3414, 1784, 3977, 0, 3418, 3419, 0, 3420, 3422, 4111, + 3425, 3427, 0, 3429, 3430, 3431, 3432, 4119, 4117, 3138, + 0, 3438, 0, 0, 0, 0, 3945, 0, 0, 0, + 0, 0, 0, 0, 3955, 1783, 0, 0, 4128, 4129, + 3867, 4127, 0, 0, 0, 4007, 0, 2083, 0, 2081, + 4126, 0, 0, 0, 0, 3929, 3460, 3461, 0, 0, + 3465, 0, 0, 1797, 1800, 1801, 1802, 1803, 1804, 1805, + 0, 1806, 1807, 1809, 1810, 1808, 1811, 1812, 1785, 1786, + 1787, 1788, 1769, 1770, 1798, 0, 1772, 4076, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 0, 0, 1782, + 1789, 1790, 1791, 1792, 0, 1793, 1794, 1795, 1796, 0, + 0, 0, 0, 4054, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1732, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3540, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1771, 1820, 0, 0, 940, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3559, 0, 0, 3563, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3574, 195, 0, 0, 195, 0, 0, 0, 712, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 195, 1784, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 957, 0, 2275, 0, 0, 958, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2082, 0, + 0, 0, 0, 0, 3597, 718, 195, 718, 0, 0, + 1974, 0, 0, 0, 1799, 0, 0, 3605, 0, 0, + 0, 0, 0, 0, 0, 3612, 0, 1797, 1800, 1801, + 1802, 1803, 1804, 1805, 0, 1806, 1807, 1809, 1810, 1808, + 1811, 1812, 1785, 1786, 1787, 1788, 1769, 1770, 1798, 0, + 1772, 0, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, + 1781, 0, 0, 1782, 1789, 1790, 1791, 1792, 0, 1793, + 1794, 1795, 1796, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3817, 0, 0, 0, 0, 0, 0, 0, 0, 3824, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3834, + 3835, 0, 3837, 0, 3838, 3839, 0, 0, 0, 3842, + 3843, 3844, 3845, 3846, 3847, 3848, 3849, 3850, 3851, 3852, + 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, 3861, 3862, + 3863, 0, 3865, 3868, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3877, 3878, + 3879, 3880, 3881, 3883, 3884, 3886, 3888, 3889, 3891, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2039, + 2040, 2041, 2042, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2050, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3921, 0, 1799, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2089, + 2090, 0, 0, 0, 0, 2113, 1050, 1050, 2117, 0, + 0, 0, 2122, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2134, 2135, 2136, + 2137, 2138, 2139, 2140, 2141, 2142, 2143, 0, 2145, 0, + 0, 0, 2167, 2168, 2169, 2170, 2171, 2172, 2174, 0, + 2179, 0, 2181, 2182, 2183, 0, 2185, 2186, 2187, 0, + 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, + 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, + 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, + 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, + 2230, 2231, 2232, 2233, 2234, 2235, 2239, 2240, 2241, 2242, + 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, + 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 0, + 0, 0, 0, 0, 2267, 0, 2269, 0, 2276, 2277, + 2278, 2279, 2280, 2281, 1050, 0, 1050, 1050, 1050, 1050, + 1050, 0, 0, 0, 0, 0, 0, 2293, 2294, 2295, + 2296, 2297, 2298, 2299, 2300, 0, 2302, 2303, 2304, 2305, + 2306, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3946, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3962, 1050, 0, 190, 0, 0, + 3963, 3964, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2344, 2345, 0, + 0, 129, 3975, 151, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 172, 0, 195, 0, 195, 0, + 0, 195, 0, 2383, 0, 0, 0, 0, 4001, 4002, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 4009, 4011, 4013, 162, 0, 0, 0, 0, + 0, 150, 0, 718, 0, 718, 718, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4041, 0, 0, + 169, 0, 0, 170, 0, 718, 195, 0, 0, 0, + 0, 0, 0, 0, 2425, 0, 0, 0, 0, 0, + 0, 0, 138, 139, 161, 160, 189, 0, 0, 0, + 0, 86, 44, 45, 88, 4060, 1498, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 92, 0, 0, 0, 48, 76, 77, 0, 74, + 78, 0, 0, 0, 0, 0, 0, 0, 75, 4083, + 4085, 4087, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 957, 62, 0, 0, + 0, 958, 4108, 0, 0, 0, 0, 0, 0, 95, + 0, 2082, 0, 0, 0, 0, 0, 0, 0, 0, + 4120, 4121, 0, 0, 0, 0, 0, 155, 136, 158, + 143, 135, 0, 156, 157, 0, 0, 0, 0, 0, + 173, 0, 0, 0, 0, 0, 0, 0, 0, 179, + 144, 0, 0, 0, 0, 83, 0, 0, 0, 0, + 0, 0, 0, 0, 147, 145, 140, 141, 142, 146, + 0, 0, 0, 0, 0, 0, 137, 0, 0, 0, + 0, 0, 0, 0, 0, 148, 964, 965, 966, 967, + 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, + 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, + 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 0, 0, + 0, 0, 0, 0, 0, 0, 1498, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, + 54, 57, 56, 59, 0, 73, 0, 0, 82, 79, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 164, 0, 0, 0, 0, 0, 0, + 0, 0, 61, 91, 90, 0, 0, 71, 72, 58, + 0, 0, 0, 195, 0, 80, 81, 718, 718, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 0, 0, 2593, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2599, + 2600, 2601, 2602, 0, 0, 0, 718, 63, 64, 195, + 65, 66, 67, 68, 0, 0, 0, 0, 0, 0, + 0, 718, 0, 0, 0, 0, 0, 0, 195, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 159, + 0, 0, 1511, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 718, 0, 0, 0, 0, + 0, 60, 0, 0, 0, 0, 0, 0, 1498, 0, + 0, 0, 0, 0, 718, 718, 0, 718, 0, 718, + 718, 0, 718, 718, 718, 718, 718, 718, 0, 0, + 0, 0, 0, 0, 0, 1498, 0, 0, 1498, 718, + 1498, 195, 0, 0, 0, 190, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1837, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 0, 0, 0, 129, + 0, 151, 0, 0, 718, 0, 195, 152, 0, 0, + 153, 0, 172, 0, 0, 0, 0, 0, 0, 0, + 718, 89, 195, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 195, + 165, 0, 0, 162, 0, 0, 195, 177, 0, 150, + 0, 0, 0, 0, 0, 195, 195, 195, 195, 195, + 195, 195, 195, 195, 718, 0, 0, 0, 169, 0, + 0, 170, 0, 0, 0, 1732, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 185, 0, + 1841, 1842, 161, 160, 189, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 94, 166, 171, 168, 174, 175, 176, 178, 180, 181, + 182, 183, 0, 0, 0, 0, 0, 184, 186, 187, + 188, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 155, 1843, 158, 0, 1840, + 0, 156, 157, 0, 0, 0, 0, 0, 173, 0, + 0, 0, 0, 0, 0, 0, 0, 179, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 718, 718, 0, 0, 0, 0, 70, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 0, 0, + 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2971, 0, 0, 0, 0, 0, 0, 0, + 0, 718, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1498, 0, 0, 1050, 0, 0, 2998, 2999, 0, + 0, 3001, 0, 0, 3003, 0, 0, 0, 1498, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 164, 0, 0, 3010, 3011, 3012, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3017, 0, 0, 3019, + 3020, 3021, 0, 0, 0, 3022, 3023, 0, 0, 3024, + 0, 3025, 0, 0, 0, 0, 0, 0, 3026, 0, + 3027, 0, 0, 0, 3028, 0, 3029, 0, 0, 3030, + 0, 3031, 0, 3032, 0, 3033, 0, 3034, 0, 3035, + 0, 3036, 0, 3037, 0, 3038, 0, 3039, 0, 3040, + 0, 3041, 0, 3042, 0, 3043, 0, 3044, 0, 3045, + 0, 3046, 0, 3047, 0, 0, 0, 3048, 0, 3049, + 0, 3050, 0, 0, 3051, 0, 3052, 159, 3053, 0, + 2239, 3055, 0, 0, 3057, 0, 0, 3059, 3060, 3061, + 3062, 0, 0, 0, 0, 3063, 2239, 2239, 2239, 2239, + 2239, 0, 2287, 0, 0, 0, 0, 0, 0, 0, + 0, 3073, 0, 0, 0, 0, 0, 0, 0, 3086, + 0, 0, 3090, 0, 1050, 0, 0, 0, 0, 0, + 0, 3093, 3094, 3095, 3096, 3097, 3098, 0, 0, 0, + 3099, 3100, 0, 3101, 0, 3102, 195, 0, 0, 0, + 0, 718, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 0, 0, 718, + 0, 0, 0, 0, 0, 152, 0, 0, 153, 195, + 3133, 0, 0, 718, 0, 0, 2287, 195, 0, 195, + 0, 195, 195, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3163, 718, 0, 165, 0, + 0, 0, 0, 0, 0, 177, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 185, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 3226, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 718, 0, 0, 0, 0, 0, 718, 0, 0, 166, + 171, 168, 174, 175, 176, 178, 180, 181, 182, 183, + 0, 0, 939, 0, 0, 184, 186, 187, 188, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 718, 0, + 0, 0, 718, 718, 0, 0, 0, 1690, 0, 0, + 0, 3317, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 697, 3326, 0, 0, 0, 0, + 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 0, 0, 0, 0, 195, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 195, 0, 0, + 195, 0, 195, 0, 0, 0, 663, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 0, 0, 195, 0, + 0, 0, 717, 0, 717, 0, 1009, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1079, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1498, 0, 2287, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3521, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3545, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 896, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3565, 0, 3566, 0, 0, 3567, 0, 0, + 3570, 3571, 0, 0, 0, 0, 0, 0, 0, 3575, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 716, 0, 3576, 0, 3577, 0, 3578, 0, 3579, + 0, 3580, 0, 3581, 0, 3582, 0, 3583, 0, 3584, + 0, 3585, 0, 3586, 0, 3587, 0, 3588, 0, 3589, + 0, 3590, 0, 3591, 0, 0, 3592, 0, 0, 0, + 3593, 0, 3594, 0, 0, 0, 0, 0, 3596, 0, + 0, 0, 0, 1075, 0, 1082, 195, 0, 0, 0, + 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, + 0, 3613, 0, 0, 0, 718, 0, 0, 0, 0, + 3618, 0, 3619, 3620, 0, 3621, 718, 3622, 0, 0, + 0, 0, 3623, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 0, 0, 195, 0, 3648, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3656, 0, + 0, 3658, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3662, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3796, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 897, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 0, 195, + 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 718, 0, 0, 0, 0, 0, 0, 718, 0, 0, + 0, 0, 0, 0, 0, 193, 718, 0, 664, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1498, 0, 0, 0, 0, 0, 664, 0, + 0, 0, 0, 0, 0, 195, 195, 195, 195, 195, + 0, 0, 0, 0, 1032, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 195, + 195, 1051, 1051, 0, 0, 0, 0, 0, 0, 0, + 664, 0, 3904, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, + 717, 1414, 717, 717, 0, 0, 0, 0, 1199, 0, + 1205, 0, 0, 1207, 0, 0, 0, 718, 0, 0, + 0, 0, 717, 0, 0, 0, 0, 0, 95, 0, + 0, 957, 0, 0, 0, 945, 958, 959, 960, 961, + 946, 0, 0, 947, 948, 0, 949, 0, 0, 0, + 0, 0, 0, 1497, 0, 0, 0, 0, 0, 0, + 954, 962, 963, 0, 0, 718, 0, 0, 1427, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3262, + 3263, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 964, 965, 966, 967, 968, 969, 970, 971, 972, + 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, + 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, + 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, + 1003, 1004, 1005, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 718, 0, 0, 0, 0, 3944, 0, 0, 0, 0, + 0, 0, 718, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3264, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 718, 0, 0, 0, 0, 0, 0, + 0, 3958, 0, 0, 3959, 0, 3960, 195, 0, 0, + 0, 718, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1497, 0, 718, 0, 0, 0, 1498, + 0, 0, 718, 718, 1498, 195, 195, 195, 195, 195, + 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, + 0, 3265, 3266, 195, 0, 195, 0, 0, 195, 195, + 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 717, 717, 0, 0, 0, 0, + 0, 1262, 0, 1262, 1262, 0, 0, 0, 0, 0, + 4039, 0, 0, 0, 195, 0, 0, 0, 0, 0, + 0, 0, 0, 1426, 0, 0, 0, 718, 0, 0, + 1498, 0, 0, 717, 0, 718, 0, 0, 0, 4055, + 195, 4056, 0, 4057, 0, 910, 0, 0, 717, 0, + 0, 914, 0, 0, 195, 911, 912, 0, 0, 1814, + 913, 915, 0, 0, 0, 0, 0, 0, 0, 1823, + 0, 0, 0, 195, 0, 0, 195, 0, 0, 0, + 0, 1744, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 717, 0, 1849, 0, 0, 0, 0, 0, + 1761, 0, 1858, 0, 0, 1497, 1860, 0, 0, 1863, + 1864, 717, 717, 4106, 717, 4107, 717, 717, 0, 717, + 717, 717, 717, 717, 717, 0, 0, 0, 0, 0, + 0, 0, 1497, 1895, 1896, 1497, 717, 1497, 0, 1901, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1900, 1963, 0, 0, 717, 0, 0, + 718, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 664, 0, 664, 0, 0, 664, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1945, 0, + 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, + 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1981, 0, 0, 0, 0, 0, 0, 1985, 0, + 664, 0, 0, 0, 0, 0, 0, 0, 0, 1996, + 1997, 1998, 1999, 2000, 2001, 2002, 0, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, + 1499, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 0, 0, 1693, 1694, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 195, 195, 195, 0, 0, 0, 0, + 0, 0, 0, 718, 718, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1738, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1756, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 718, 718, 718, 718, 0, 717, 717, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 717, 0, 0, 1075, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1866, 1866, 0, 1866, 0, 1866, 1866, 0, + 1875, 1866, 1866, 1866, 1866, 1866, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1075, 0, 0, + 0, 0, 0, 0, 2032, 0, 0, 0, 717, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1497, 0, + 0, 0, 0, 0, 0, 0, 0, 2091, 0, 0, + 1499, 0, 1943, 0, 0, 1497, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1967, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 664, 0, 0, + 0, 0, 1262, 0, 195, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1032, 0, 0, 1498, 0, 0, 0, 0, 718, 0, + 718, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 717, + 0, 0, 0, 0, 0, 0, 0, 0, 718, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 195, 0, 0, 718, 0, 0, 0, 0, 0, + 0, 0, 1499, 0, 0, 0, 0, 718, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 717, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1499, + 0, 0, 1499, 0, 1499, 664, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 717, 0, 0, 0, + 0, 0, 0, 0, 0, 1917, 0, 0, 1262, 1262, + 717, 0, 0, 717, 0, 0, 0, 0, 0, 0, + 664, 2020, 0, 0, 0, 0, 0, 0, 0, 0, + 718, 0, 0, 717, 718, 718, 1969, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2350, 0, 664, 0, 0, 0, 0, 0, 2354, + 664, 2357, 0, 718, 2032, 0, 0, 0, 0, 1994, + 1995, 664, 664, 664, 664, 664, 664, 664, 0, 2077, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 717, 0, 0, 0, 0, 0, 0, 2447, 2448, 2449, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 717, 0, 0, + 0, 0, 0, 717, 1858, 0, 0, 1858, 0, 1858, + 0, 0, 0, 0, 0, 2479, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 717, 0, 0, 0, 0, 717, 0, 0, 0, 717, + 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 718, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 718, 195, 0, 0, 0, + 1262, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2032, 0, 0, 0, 664, 0, 0, 2514, + 0, 0, 0, 0, 0, 0, 0, 0, 2531, 2532, + 0, 0, 2536, 0, 0, 0, 0, 0, 0, 2324, + 717, 0, 0, 2541, 0, 0, 0, 0, 0, 0, + 2544, 0, 0, 0, 0, 0, 0, 0, 0, 718, + 0, 0, 0, 0, 0, 0, 0, 2336, 0, 1498, + 0, 718, 0, 0, 0, 1499, 2547, 0, 0, 0, + 0, 1738, 0, 0, 1262, 0, 0, 1051, 1051, 0, + 0, 0, 1499, 0, 0, 718, 2287, 0, 0, 0, + 0, 0, 0, 0, 1075, 0, 0, 0, 0, 1497, + 0, 717, 0, 0, 0, 0, 0, 718, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 195, 718, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1082, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 718, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1075, 0, + 0, 0, 0, 0, 1082, 0, 718, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 718, 0, 718, 0, 0, + 0, 0, 0, 0, 0, 1051, 1969, 1051, 1051, 1051, + 1051, 1051, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1075, 0, 0, 0, 0, 2077, 0, 0, 0, + 2077, 2077, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1051, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1032, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, + 1969, 664, 0, 664, 0, 664, 2373, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, + 0, 2550, 0, 717, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2860, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1262, 0, 0, 0, 0, 2847, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 717, 0, 0, + 0, 0, 0, 0, 717, 0, 0, 0, 1858, 1858, + 0, 0, 0, 717, 0, 0, 0, 0, 0, 0, + 0, 2896, 0, 0, 0, 0, 0, 0, 0, 1497, + 2933, 0, 0, 0, 664, 0, 0, 0, 0, 0, + 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, + 664, 664, 0, 0, 664, 0, 2538, 0, 0, 0, + 0, 0, 0, 0, 0, 664, 0, 0, 0, 0, + 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2945, 2946, 2947, + 2948, 2949, 0, 0, 0, 0, 0, 0, 664, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2032, 2959, 0, 717, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2967, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1499, 0, 1969, 0, + 0, 0, 0, 2795, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2810, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2329, 2330, 2331, 2332, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 155, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1378, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 160, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1365, 85, 148, 0, 0, 149, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 161, 0, - 0, 0, 0, 0, 0, 173, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 155, 0, 0, 0, 0, - 191, 0, 0, 0, 649, 649, 90, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 191, 0, 0, 0, 0, 1541, 181, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 649, 0, 0, 191, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 649, 148, 0, 0, 149, - 0, 191, 0, 0, 0, 0, 0, 0, 0, 162, - 167, 164, 170, 171, 172, 174, 176, 177, 178, 179, - 0, 0, 0, 1500, 0, 180, 182, 183, 184, 161, - 0, 0, 0, 0, 0, 0, 173, 0, 649, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1365, 67, 0, 0, 0, 0, 649, 649, 0, - 649, 0, 649, 649, 0, 649, 649, 649, 649, 649, - 649, 0, 0, 0, 0, 0, 0, 181, 1365, 0, - 595, 1365, 649, 1365, 191, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 883, - 0, 0, 0, 0, 191, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 191, - 162, 167, 164, 170, 171, 172, 174, 176, 177, 178, - 179, 0, 0, 649, 0, 191, 180, 182, 183, 184, - 949, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 191, 0, 0, 0, 0, 0, 0, 191, 0, - 0, 0, 0, 0, 0, 0, 0, 191, 191, 191, - 191, 191, 191, 191, 191, 191, 649, 0, 0, 0, - 2622, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 921, - 0, 0, 2646, 2647, 0, 0, 2649, 0, 0, 2651, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2658, - 2659, 2660, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2665, 0, 0, 2667, 2668, 2669, 0, 0, 0, - 2670, 2671, 0, 0, 1980, 2673, 0, 0, 2675, 0, - 0, 2677, 2678, 2679, 2680, 0, 0, 0, 0, 2681, - 1980, 1980, 1980, 1980, 1980, 0, 0, 0, 0, 0, - 0, 0, 2691, 0, 0, 0, 0, 0, 0, 0, - 0, 921, 0, 0, 0, 0, 0, 0, 2705, 2706, - 2707, 2708, 2709, 2710, 0, 0, 0, 2711, 2712, 0, - 2713, 0, 2714, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 649, 649, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 649, 0, 0, 2745, 0, 0, - 0, 0, 0, 191, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2775, 0, 0, 813, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 649, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1365, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 649, 0, 0, 0, 0, 0, 1365, 0, - 0, 0, 0, 2838, 628, 0, 0, 0, 0, 0, - 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 649, 649, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 648, 0, 648, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2021, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2927, 186, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1646, 0, 2936, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, - 125, 0, 147, 0, 649, 0, 0, 0, 0, 0, - 0, 0, 0, 168, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 191, 0, - 0, 649, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 191, 0, 0, 158, 649, 0, 0, 2021, 191, - 146, 191, 0, 191, 191, 0, 1069, 0, 1075, 0, - 0, 1077, 0, 0, 0, 0, 0, 0, 649, 165, - 0, 0, 166, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1650, 1651, 157, 156, 185, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1295, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 649, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 649, 0, 0, 0, 0, 0, 649, 0, - 3078, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 3102, 151, 1652, 154, 0, 1649, 0, - 152, 153, 0, 0, 0, 649, 0, 169, 0, 0, - 649, 0, 0, 0, 649, 649, 175, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3122, 0, - 3123, 0, 191, 3124, 0, 0, 3127, 3128, 0, 191, - 0, 0, 0, 0, 0, 3132, 0, 0, 191, 191, - 0, 0, 191, 0, 191, 3134, 0, 0, 0, 0, - 0, 191, 0, 0, 0, 0, 0, 0, 191, 770, - 0, 0, 0, 0, 0, 0, 0, 0, 3151, 0, - 0, 3152, 0, 3153, 3154, 0, 3155, 0, 3156, 0, - 0, 0, 0, 3157, 191, 0, 0, 0, 0, 0, - 0, 0, 0, 649, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3182, 0, - 160, 0, 0, 0, 0, 0, 0, 0, 0, 3190, - 0, 0, 3192, 0, 647, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3196, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3274, 0, 0, 0, 0, 0, 1553, 1365, - 0, 2021, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 945, 1570, 952, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 155, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 648, 1282, 648, 648, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 648, 0, 0, 148, - 0, 0, 149, 0, 0, 0, 0, 0, 0, 1709, - 0, 0, 3359, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1364, 0, 0, 0, 0, - 0, 0, 161, 0, 0, 0, 0, 0, 0, 173, - 0, 0, 0, 0, 1754, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 191, 0, 0, 0, 0, 0, 0, - 0, 191, 0, 0, 0, 0, 1788, 0, 0, 0, - 181, 0, 649, 1792, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 649, 1803, 1804, 1805, 1806, 1807, 1808, - 1809, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 191, 0, 0, - 0, 0, 191, 162, 167, 164, 170, 171, 172, 174, - 176, 177, 178, 179, 0, 0, 0, 0, 0, 180, - 182, 183, 184, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3399, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1364, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3413, 0, 0, 3414, 0, 3415, 0, 0, - 649, 0, 0, 0, 0, 0, 191, 0, 0, 0, - 0, 0, 0, 191, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 0, - 0, 0, 0, 0, 649, 0, 0, 0, 0, 0, - 0, 648, 648, 649, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1365, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 191, 191, 191, 191, 191, 648, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3495, 648, 0, 0, 0, 191, 191, 1839, 0, - 0, 0, 1623, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1632, 0, 0, 0, 0, 0, 0, 191, - 0, 3509, 0, 3510, 0, 3511, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 0, 1658, 0, 0, - 649, 0, 0, 0, 0, 1667, 0, 0, 1364, 1669, - 0, 0, 1672, 1673, 648, 648, 0, 648, 0, 648, - 648, 0, 648, 648, 648, 648, 648, 648, 0, 0, - 0, 0, 0, 0, 0, 1364, 1704, 1705, 1364, 648, - 1364, 0, 1710, 0, 0, 0, 0, 649, 0, 0, - 0, 3556, 0, 3557, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 648, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1772, 649, 0, - 648, 0, 0, 0, 0, 0, 649, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1131, 0, - 1131, 1131, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 0, - 1294, 0, 0, 648, 0, 0, 0, 0, 0, 0, - 0, 191, 0, 0, 0, 649, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 649, - 0, 0, 0, 1365, 0, 0, 649, 649, 1365, 191, - 191, 191, 191, 191, 0, 0, 0, 0, 0, 0, - 0, 191, 0, 0, 0, 0, 0, 191, 0, 191, - 0, 0, 191, 191, 191, 0, 2084, 0, 0, 0, - 0, 0, 0, 0, 2088, 0, 2091, 0, 0, 1839, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 91, 0, 0, 831, 0, 0, - 0, 819, 832, 833, 834, 835, 820, 0, 191, 821, - 822, 0, 823, 0, 0, 0, 0, 0, 0, 0, - 0, 649, 0, 0, 1365, 0, 828, 836, 837, 649, - 0, 0, 0, 0, 191, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 191, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 191, 648, 648, - 191, 0, 0, 2872, 2873, 0, 0, 0, 0, 0, - 0, 648, 0, 0, 0, 838, 839, 840, 841, 842, - 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, - 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, - 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, - 873, 874, 875, 876, 877, 878, 879, 0, 0, 0, - 0, 0, 0, 0, 0, 1503, 1504, 0, 648, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1364, 0, - 0, 649, 0, 0, 0, 0, 0, 1895, 0, 648, - 0, 0, 0, 0, 0, 1364, 0, 0, 2874, 0, - 0, 1547, 0, 0, 0, 0, 0, 1839, 0, 0, - 191, 0, 0, 0, 2248, 0, 1565, 0, 0, 0, - 648, 648, 0, 2265, 2266, 0, 0, 2270, 0, 0, - 0, 0, 0, 0, 0, 0, 2273, 0, 0, 0, - 0, 0, 0, 2276, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 945, - 0, 0, 0, 0, 0, 0, 191, 0, 0, 2279, - 0, 0, 0, 648, 0, 2875, 2876, 0, 1675, 1675, - 0, 1675, 0, 1675, 1675, 0, 1684, 1675, 1675, 1675, - 1675, 1675, 0, 0, 0, 191, 0, 0, 0, 0, - 0, 0, 0, 945, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 191, 0, 0, 191, 191, - 191, 648, 0, 0, 0, 0, 0, 0, 649, 649, - 0, 0, 0, 0, 0, 0, 0, 0, 1752, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 648, 0, - 0, 0, 0, 0, 1776, 0, 0, 771, 0, 0, - 0, 0, 648, 0, 0, 648, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 649, 649, 649, 649, 0, - 0, 0, 0, 0, 0, 648, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1131, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 189, 0, 0, 596, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 596, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 0, 0, 0, 0, 0, 903, 2181, - 2182, 2183, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 922, 922, 0, 0, 648, - 0, 0, 0, 0, 596, 648, 1667, 0, 0, 1667, - 0, 1667, 191, 0, 0, 0, 0, 2213, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1365, 0, 0, 0, 0, 649, 0, 649, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 0, 0, 0, 0, 648, 0, 0, - 0, 648, 648, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2498, 0, 0, - 0, 0, 1131, 1131, 0, 0, 0, 0, 649, 0, - 0, 0, 0, 0, 0, 1827, 0, 0, 0, 0, - 0, 191, 0, 0, 649, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2547, 1881, 0, 0, 0, 0, 0, 0, 0, - 648, 0, 0, 0, 0, 0, 0, 649, 0, 0, - 0, 649, 649, 1897, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 649, 0, 0, 0, 1930, 1931, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2596, 2597, 2598, - 2599, 2600, 0, 0, 0, 0, 1364, 0, 648, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1839, 2610, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1131, 0, 0, - 0, 0, 0, 0, 2618, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 649, 0, 0, 0, 0, 0, 0, 3516, - 0, 0, 0, 0, 0, 0, 0, 0, 191, 1592, - 0, 0, 0, 0, 0, 2058, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 649, 191, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2070, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1547, 0, 0, 1131, - 1592, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 649, 0, 945, - 0, 0, 0, 0, 0, 0, 0, 1365, 0, 649, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 0, 0, 0, 649, 649, 0, 0, 0, 0, 0, - 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 649, 952, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, - 649, 0, 0, 1580, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 945, 0, 2511, 0, 0, 0, 952, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 596, 0, 596, 0, 0, 596, 0, 0, 0, 0, - 0, 0, 0, 649, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1580, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 649, 945, 648, 0, 0, - 0, 1881, 0, 0, 0, 1881, 1881, 0, 0, 0, - 0, 0, 649, 2828, 649, 0, 0, 1593, 0, 596, - 0, 0, 0, 0, 648, 0, 0, 0, 0, 0, - 0, 648, 0, 0, 0, 1667, 1667, 0, 0, 2866, - 648, 0, 0, 0, 0, 0, 0, 1366, 0, 0, - 0, 0, 0, 2878, 0, 0, 1364, 2584, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1593, 0, - 0, 0, 2895, 0, 0, 2898, 0, 0, 0, 1606, - 1609, 1610, 1611, 1612, 1613, 1614, 0, 1615, 1616, 1618, - 1619, 1617, 1620, 1621, 1594, 1595, 1596, 1597, 1578, 1579, - 1607, 0, 1581, 0, 1582, 1583, 1584, 1585, 1586, 1587, - 1588, 1589, 1590, 0, 2282, 1591, 1598, 1599, 1600, 1601, - 0, 1602, 1603, 1604, 1605, 0, 0, 0, 0, 0, - 1606, 1609, 1610, 1611, 1612, 1613, 1614, 648, 1615, 1616, - 1618, 1619, 1617, 1620, 1621, 1594, 1595, 1596, 1597, 1578, - 1579, 1607, 0, 1581, 0, 1582, 1583, 1584, 1585, 1586, - 1587, 1588, 1589, 1590, 0, 0, 1591, 1598, 1599, 1600, - 1601, 0, 1602, 1603, 1604, 1605, 0, 0, 0, 0, - 0, 0, 1131, 0, 648, 2995, 0, 0, 0, 0, - 0, 0, 1366, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 0, 0, 0, 0, - 0, 0, 0, 648, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 596, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 648, 1608, 0, 0, 0, 903, - 3056, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 0, 0, 0, 0, 0, 0, 0, - 3071, 0, 596, 3072, 3073, 3074, 648, 0, 0, 0, - 1364, 0, 0, 648, 648, 1364, 0, 0, 0, 0, - 596, 0, 0, 0, 0, 0, 1608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2446, 0, 0, 0, 0, 0, 0, - 1366, 0, 0, 0, 2461, 0, 2823, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 717, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 717, + 0, 2892, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1366, 648, 0, - 1366, 1364, 1366, 596, 0, 0, 648, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2336, 0, + 717, 0, 0, 0, 0, 2917, 0, 0, 0, 0, + 0, 0, 0, 0, 2922, 0, 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1726, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 596, 0, - 0, 831, 0, 2013, 0, 0, 832, 0, 0, 0, - 0, 0, 0, 0, 1778, 0, 1886, 2902, 0, 0, + 0, 0, 717, 0, 0, 0, 1497, 0, 0, 717, + 717, 1497, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 596, 2543, 0, 0, 0, 0, 0, 596, 0, 0, - 0, 0, 0, 0, 0, 0, 1801, 1802, 596, 596, - 596, 596, 596, 596, 596, 0, 0, 0, 2070, 0, - 0, 0, 0, 0, 0, 2568, 0, 0, 0, 0, - 0, 0, 0, 0, 2573, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 648, 838, - 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, - 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, - 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, - 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, - 879, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 664, 0, 0, 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1881, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3211, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2077, 0, 0, 0, 0, + 0, 0, 0, 0, 717, 0, 0, 1497, 0, 0, + 0, 0, 717, 0, 664, 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3046, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3216, 0, 0, 0, + 0, 0, 0, 2077, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1881, 0, + 0, 0, 3254, 3292, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3268, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 648, 648, 0, 0, 0, - 0, 0, 596, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 91, 0, 0, 831, 0, 2693, - 0, 819, 832, 833, 834, 835, 820, 1131, 0, 821, - 822, 0, 823, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 648, 648, 648, 828, 836, 837, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1675, 0, - 1366, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 922, 922, 0, 0, 2730, 1366, 0, 0, + 0, 0, 0, 664, 0, 3285, 0, 0, 3288, 0, + 2902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1131, 0, 0, 2872, 2873, 0, 0, 2757, 1675, 0, - 0, 0, 3397, 0, 0, 838, 839, 840, 841, 842, - 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, - 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, - 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, - 873, 874, 875, 876, 877, 878, 879, 0, 0, 0, - 0, 0, 0, 0, 922, 1778, 922, 922, 922, 922, - 922, 0, 0, 0, 0, 0, 0, 0, 1364, 0, - 0, 0, 0, 648, 0, 648, 0, 0, 0, 831, - 0, 0, 945, 0, 832, 0, 0, 0, 2874, 0, - 2070, 0, 0, 0, 1886, 0, 0, 0, 1726, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 922, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 903, 0, 0, - 0, 0, 0, 0, 0, 648, 0, 0, 0, 0, - 596, 0, 0, 0, 0, 0, 0, 1778, 596, 0, - 596, 648, 596, 2107, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 648, 2875, 2876, 838, 839, 840, - 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, - 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, - 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, - 871, 872, 873, 874, 875, 876, 877, 878, 879, 0, - 0, 0, 2966, 0, 648, 0, 0, 0, 648, 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1499, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3075, 664, + 664, 664, 664, 664, 0, 0, 0, 717, 0, 0, + 1262, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 664, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 785, 648, 0, 789, - 0, 786, 787, 0, 0, 0, 788, 0, 0, 0, + 0, 1866, 0, 0, 0, 0, 0, 0, 664, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3118, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1262, 0, 1051, 0, 0, 0, 0, + 3145, 1866, 0, 0, 0, 0, 0, 3440, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3489, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 717, 717, 0, 0, 0, 1075, 0, 0, 0, 0, + 0, 0, 3499, 2336, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 596, 0, 0, 0, 0, 0, 0, 596, 2070, - 2070, 0, 0, 648, 0, 0, 0, 596, 596, 0, - 0, 596, 0, 2272, 0, 0, 0, 0, 0, 0, - 596, 0, 0, 0, 0, 0, 0, 596, 0, 0, + 0, 0, 3514, 0, 0, 3515, 3516, 3517, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 717, + 717, 717, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3110, 3111, 3112, 3113, - 0, 0, 0, 596, 648, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1364, 0, 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1051, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 648, 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 648, 1366, 0, - 1778, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 648, 0, 0, 0, 0, 0, 0, 3186, 0, 3188, + 0, 0, 0, 1499, 0, 0, 0, 0, 1499, 664, + 664, 664, 664, 664, 0, 0, 0, 0, 3405, 0, + 0, 3161, 0, 0, 0, 0, 0, 1917, 0, 664, + 0, 0, 664, 3169, 1969, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 648, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 648, - 0, 648, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2070, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3297, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1131, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, + 1497, 0, 0, 0, 0, 717, 0, 717, 0, 0, + 0, 0, 0, 0, 1499, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 596, 0, 0, 0, 0, 0, 0, 0, - 1726, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 664, 0, 0, + 664, 0, 0, 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3347, 0, - 0, 0, 3347, 3347, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 596, 0, 0, 0, - 0, 596, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2070, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2336, 2336, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3553, 3554, 3555, 3556, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 596, 0, 0, 0, 0, - 0, 0, 2553, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2070, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 717, 0, 0, + 0, 717, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2070, 1366, 0, + 717, 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 596, 596, 596, 596, 596, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 596, 596, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 3424, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 596, 0, - 3428, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3215, 3217, 3216, 3234, 3235, 3236, 3237, 3238, 3239, 3240, - 716, 0, 922, 0, 1131, 1131, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3471, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3479, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3424, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 922, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2070, 0, 0, 0, + 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2966, 0, 3479, 0, 0, 0, 0, + 0, 0, 0, 0, 664, 0, 0, 664, 664, 664, + 0, 0, 0, 0, 0, 0, 3652, 0, 3654, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 596, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1366, 0, 0, 0, 0, 1366, 596, 596, - 596, 596, 596, 0, 0, 0, 0, 0, 0, 0, - 2773, 0, 0, 0, 0, 0, 1726, 0, 596, 0, - 0, 596, 2781, 1778, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2336, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3221, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 596, 0, 3229, - 3230, 0, 0, 3256, 3255, 3254, 0, 0, 0, 0, - 0, 0, 0, 1366, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 596, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 596, 0, 0, + 0, 0, 3819, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1262, 0, 0, 3942, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 596, 0, 0, 596, - 0, 785, 0, 695, 789, 697, 786, 787, 0, 693, - 696, 788, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 714, 715, 3214, - 3218, 3219, 3220, 3231, 3232, 3233, 3241, 3243, 747, 3242, - 3244, 3245, 3246, 3249, 3250, 3251, 3252, 3247, 3248, 3253, - 3198, 3202, 3199, 3200, 3201, 3213, 3203, 3204, 3205, 3206, - 3207, 3208, 3209, 3210, 3211, 3212, 3257, 3258, 3259, 3260, - 3261, 3262, 3224, 3228, 3227, 3225, 3226, 3222, 3223, 0, + 0, 0, 0, 0, 0, 0, 717, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1497, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 790, 0, 791, 0, 0, 795, 0, 0, 596, - 797, 796, 0, 798, 764, 763, 0, 0, 792, 793, - 0, 794, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 717, 717, 0, 0, 0, 0, 3892, 0, + 0, 0, 3892, 3892, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 717, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 717, + 0, 2336, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1499, 0, 0, + 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 596, 0, 0, 0, 0, + 0, 0, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3263, 3264, 3265, 3266, - 3267, 3268, 3269, 3270, 596, 0, 0, 0, 0, 0, + 0, 0, 717, 0, 717, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 596, 0, 0, 596, 596, 596, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2336, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2336, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3969, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3973, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1262, 1262, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4015, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4023, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1726, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1366, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3969, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 2336, 0, 0, 0, 0, 0, + 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3405, 0, 4023, 0, 0, 0, 0, + 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1726, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1499, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3995, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, + 0, 0, 0, 392, 0, 0, 0, 1397, 1383, 520, + 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, + 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, + 269, 1293, 1385, 1344, 1399, 362, 266, 1274, 1299, 425, + 1315, 203, 1364, 481, 251, 373, 370, 575, 281, 272, + 268, 249, 315, 381, 423, 510, 417, 1406, 366, 1350, + 0, 491, 396, 0, 0, 1969, 1329, 1389, 1338, 1376, + 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, + 323, 202, 408, 492, 285, 0, 0, 0, 0, 3997, + 941, 0, 0, 0, 0, 3998, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 347, 356, 355, + 336, 337, 339, 341, 346, 353, 359, 1307, 1356, 1396, + 1308, 1358, 264, 319, 271, 263, 572, 1407, 1388, 1271, + 1337, 1395, 1332, 0, 0, 228, 1398, 1331, 0, 1361, + 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, + 274, 0, 0, 0, 0, 0, 0, 0, 1328, 1339, + 1373, 1377, 1322, 0, 0, 0, 0, 0, 0, 0, + 0, 1300, 0, 1348, 0, 0, 0, 1278, 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 383, 0, 0, 0, 1265, 1250, 502, - 0, 1193, 1268, 1162, 1181, 1278, 1184, 1187, 1229, 1141, - 1207, 402, 1178, 1134, 1166, 1136, 1173, 1137, 1164, 1195, - 260, 1161, 1252, 1211, 1267, 353, 257, 1143, 1167, 416, - 1183, 198, 1231, 471, 244, 364, 361, 510, 272, 263, - 259, 242, 306, 372, 414, 492, 408, 1274, 357, 1217, - 0, 481, 387, 0, 0, 0, 1197, 1256, 1205, 1243, - 1192, 1230, 1151, 1216, 1269, 1179, 1226, 1270, 312, 240, - 314, 197, 399, 482, 276, 0, 0, 1726, 0, 3451, - 638, 0, 0, 0, 0, 3452, 0, 0, 0, 0, - 230, 0, 0, 237, 0, 0, 596, 338, 347, 346, - 327, 328, 330, 332, 337, 344, 350, 1175, 1223, 1264, - 1176, 1225, 255, 310, 262, 254, 507, 1275, 1255, 1140, - 1204, 1263, 0, 0, 221, 1266, 1199, 0, 1228, 0, - 1281, 1135, 1219, 0, 1138, 1142, 1277, 1259, 1170, 265, - 0, 0, 0, 0, 0, 0, 0, 1196, 1206, 1240, - 1244, 1190, 0, 0, 0, 0, 1366, 0, 0, 1168, - 0, 1215, 0, 0, 0, 1147, 1139, 0, 0, 0, + 0, 1326, 0, 0, 0, 0, 1281, 0, 1301, 1374, + 0, 1264, 296, 1275, 397, 256, 0, 448, 1381, 1392, + 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, + 1277, 328, 197, 224, 0, 1312, 407, 456, 468, 1386, + 1297, 1306, 252, 1304, 466, 421, 594, 232, 283, 453, + 427, 464, 435, 286, 1347, 1366, 465, 368, 577, 445, + 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, + 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, + 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, + 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, + 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, + 213, 0, 452, 267, 292, 0, 0, 257, 410, 581, + 582, 255, 639, 227, 610, 219, 1276, 609, 403, 576, + 587, 390, 379, 218, 585, 388, 378, 332, 351, 352, + 279, 305, 442, 371, 443, 304, 306, 399, 398, 400, + 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, + 233, 234, 236, 1292, 278, 282, 290, 293, 301, 302, + 311, 363, 414, 441, 437, 446, 1382, 571, 592, 604, + 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, + 402, 309, 489, 331, 369, 1371, 1412, 420, 467, 239, + 596, 490, 199, 1286, 1291, 1284, 0, 253, 254, 1353, + 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, + 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, + 506, 501, 502, 503, 504, 505, 0, 507, 1375, 1280, + 0, 1289, 1290, 1384, 583, 584, 659, 380, 480, 593, + 333, 345, 348, 338, 357, 0, 358, 334, 335, 340, + 342, 343, 344, 349, 350, 354, 360, 248, 209, 386, + 394, 570, 310, 215, 216, 217, 516, 517, 518, 519, + 607, 608, 612, 204, 457, 458, 459, 460, 291, 602, + 307, 463, 462, 329, 330, 375, 444, 532, 534, 545, + 549, 551, 553, 559, 562, 533, 535, 546, 550, 552, + 554, 560, 563, 522, 524, 526, 528, 541, 540, 537, + 565, 566, 543, 548, 527, 539, 544, 557, 564, 561, + 521, 525, 529, 538, 556, 555, 536, 547, 558, 542, + 530, 523, 531, 1346, 196, 220, 364, 1408, 449, 287, + 637, 606, 601, 205, 222, 1283, 261, 1295, 1303, 0, + 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, + 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, + 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, + 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, + 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, + 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, + 299, 300, 439, 440, 312, 313, 633, 634, 298, 590, + 620, 588, 632, 614, 433, 374, 1345, 1351, 377, 280, + 303, 318, 1360, 605, 496, 226, 461, 289, 250, 1378, + 1380, 210, 245, 229, 258, 273, 276, 322, 387, 395, + 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, + 513, 515, 391, 265, 428, 1341, 1369, 372, 568, 569, + 314, 392, 0, 0, 0, 1397, 1383, 520, 0, 1325, + 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, + 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, + 1385, 1344, 1399, 362, 266, 1274, 1299, 425, 1315, 203, + 1364, 481, 251, 373, 370, 575, 281, 272, 268, 249, + 315, 381, 423, 510, 417, 1406, 366, 1350, 0, 491, + 396, 0, 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, + 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, 323, 202, + 408, 492, 285, 0, 0, 0, 0, 0, 194, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 347, 356, 355, 336, 337, + 339, 341, 346, 353, 359, 1307, 1356, 1396, 1308, 1358, + 264, 319, 271, 263, 572, 1407, 1388, 1271, 1337, 1395, + 1332, 0, 0, 228, 1398, 1331, 0, 1361, 0, 1413, + 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, 274, 0, + 0, 0, 0, 0, 0, 0, 1328, 1339, 1373, 1377, + 1322, 0, 0, 0, 0, 0, 0, 3170, 0, 1300, + 0, 1348, 0, 0, 0, 1278, 1270, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1326, + 0, 0, 0, 0, 1281, 0, 1301, 1374, 0, 1264, + 296, 1275, 397, 256, 0, 448, 1381, 1392, 1323, 616, + 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, + 197, 224, 0, 1312, 407, 456, 468, 1386, 1297, 1306, + 252, 1304, 466, 421, 594, 232, 283, 453, 427, 464, + 435, 286, 1347, 1366, 465, 368, 577, 445, 591, 617, + 618, 262, 401, 603, 514, 611, 635, 225, 259, 415, + 499, 597, 488, 393, 573, 574, 327, 487, 294, 201, + 365, 623, 223, 474, 367, 241, 230, 579, 600, 288, + 451, 630, 212, 509, 589, 238, 478, 0, 0, 638, + 246, 498, 214, 586, 497, 389, 324, 325, 213, 0, + 452, 267, 292, 0, 0, 257, 410, 581, 582, 255, + 639, 227, 610, 219, 1276, 609, 403, 576, 587, 390, + 379, 218, 585, 388, 378, 332, 351, 352, 279, 305, + 442, 371, 443, 304, 306, 399, 398, 400, 206, 598, + 0, 207, 0, 493, 599, 640, 447, 211, 233, 234, + 236, 1292, 278, 282, 290, 293, 301, 302, 311, 363, + 414, 441, 437, 446, 1382, 571, 592, 604, 615, 621, + 622, 624, 625, 626, 627, 628, 631, 629, 402, 309, + 489, 331, 369, 1371, 1412, 420, 467, 239, 596, 490, + 199, 1286, 1291, 1284, 0, 253, 254, 1353, 567, 1287, + 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 636, 500, 506, 501, + 502, 503, 504, 505, 0, 507, 1375, 1280, 0, 1289, + 1290, 1384, 583, 584, 659, 380, 480, 593, 333, 345, + 348, 338, 357, 0, 358, 334, 335, 340, 342, 343, + 344, 349, 350, 354, 360, 248, 209, 386, 394, 570, + 310, 215, 216, 217, 516, 517, 518, 519, 607, 608, + 612, 204, 457, 458, 459, 460, 291, 602, 307, 463, + 462, 329, 330, 375, 444, 532, 534, 545, 549, 551, + 553, 559, 562, 533, 535, 546, 550, 552, 554, 560, + 563, 522, 524, 526, 528, 541, 540, 537, 565, 566, + 543, 548, 527, 539, 544, 557, 564, 561, 521, 525, + 529, 538, 556, 555, 536, 547, 558, 542, 530, 523, + 531, 1346, 196, 220, 364, 1408, 449, 287, 637, 606, + 601, 205, 222, 1283, 261, 1295, 1303, 0, 1309, 1317, + 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, + 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 308, 316, + 317, 320, 326, 376, 382, 383, 384, 385, 404, 405, + 406, 409, 412, 413, 416, 418, 419, 422, 426, 430, + 431, 432, 434, 436, 438, 450, 455, 469, 470, 471, + 472, 473, 476, 477, 482, 483, 484, 485, 486, 494, + 495, 508, 578, 580, 595, 613, 619, 475, 299, 300, + 439, 440, 312, 313, 633, 634, 298, 590, 620, 588, + 632, 614, 433, 374, 1345, 1351, 377, 280, 303, 318, + 1360, 605, 496, 226, 461, 289, 250, 1378, 1380, 210, + 245, 229, 258, 273, 276, 322, 387, 395, 424, 429, + 295, 270, 243, 454, 240, 479, 511, 512, 513, 515, + 391, 265, 428, 1341, 1369, 372, 568, 569, 314, 392, + 0, 0, 0, 1397, 1383, 520, 0, 1325, 1400, 1294, + 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, + 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, + 1399, 362, 266, 1274, 1299, 425, 1315, 203, 1364, 481, + 251, 373, 370, 575, 281, 272, 268, 249, 315, 381, + 423, 510, 417, 1406, 366, 1350, 0, 491, 396, 0, + 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, + 1401, 1311, 1359, 1402, 321, 247, 323, 202, 408, 492, + 285, 0, 0, 0, 0, 0, 707, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 347, 356, 355, 336, 337, 339, 341, + 346, 353, 359, 1307, 1356, 1396, 1308, 1358, 264, 319, + 271, 263, 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, + 0, 228, 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, + 0, 1269, 1273, 1409, 1393, 1302, 274, 0, 0, 0, + 0, 0, 0, 0, 1328, 1339, 1373, 1377, 1322, 0, + 0, 0, 0, 0, 0, 3131, 0, 1300, 0, 1348, + 0, 0, 0, 1278, 1270, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1326, 0, 0, + 0, 0, 1281, 0, 1301, 1374, 0, 1264, 296, 1275, + 397, 256, 0, 448, 1381, 1392, 1323, 616, 1394, 1321, + 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, 197, 224, + 0, 1312, 407, 456, 468, 1386, 1297, 1306, 252, 1304, + 466, 421, 594, 232, 283, 453, 427, 464, 435, 286, + 1347, 1366, 465, 368, 577, 445, 591, 617, 618, 262, + 401, 603, 514, 611, 635, 225, 259, 415, 499, 597, + 488, 393, 573, 574, 327, 487, 294, 201, 365, 623, + 223, 474, 367, 241, 230, 579, 600, 288, 451, 630, + 212, 509, 589, 238, 478, 0, 0, 638, 246, 498, + 214, 586, 497, 389, 324, 325, 213, 0, 452, 267, + 292, 0, 0, 257, 410, 581, 582, 255, 639, 227, + 610, 219, 1276, 609, 403, 576, 587, 390, 379, 218, + 585, 388, 378, 332, 351, 352, 279, 305, 442, 371, + 443, 304, 306, 399, 398, 400, 206, 598, 0, 207, + 0, 493, 599, 640, 447, 211, 233, 234, 236, 1292, + 278, 282, 290, 293, 301, 302, 311, 363, 414, 441, + 437, 446, 1382, 571, 592, 604, 615, 621, 622, 624, + 625, 626, 627, 628, 631, 629, 402, 309, 489, 331, + 369, 1371, 1412, 420, 467, 239, 596, 490, 199, 1286, + 1291, 1284, 0, 253, 254, 1353, 567, 1287, 1285, 1342, + 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 636, 500, 506, 501, 502, 503, + 504, 505, 0, 507, 1375, 1280, 0, 1289, 1290, 1384, + 583, 584, 659, 380, 480, 593, 333, 345, 348, 338, + 357, 0, 358, 334, 335, 340, 342, 343, 344, 349, + 350, 354, 360, 248, 209, 386, 394, 570, 310, 215, + 216, 217, 516, 517, 518, 519, 607, 608, 612, 204, + 457, 458, 459, 460, 291, 602, 307, 463, 462, 329, + 330, 375, 444, 532, 534, 545, 549, 551, 553, 559, + 562, 533, 535, 546, 550, 552, 554, 560, 563, 522, + 524, 526, 528, 541, 540, 537, 565, 566, 543, 548, + 527, 539, 544, 557, 564, 561, 521, 525, 529, 538, + 556, 555, 536, 547, 558, 542, 530, 523, 531, 1346, + 196, 220, 364, 1408, 449, 287, 637, 606, 601, 205, + 222, 1283, 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, + 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, + 1372, 1379, 1391, 1411, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 308, 316, 317, 320, + 326, 376, 382, 383, 384, 385, 404, 405, 406, 409, + 412, 413, 416, 418, 419, 422, 426, 430, 431, 432, + 434, 436, 438, 450, 455, 469, 470, 471, 472, 473, + 476, 477, 482, 483, 484, 485, 486, 494, 495, 508, + 578, 580, 595, 613, 619, 475, 299, 300, 439, 440, + 312, 313, 633, 634, 298, 590, 620, 588, 632, 614, + 433, 374, 1345, 1351, 377, 280, 303, 318, 1360, 605, + 496, 226, 461, 289, 250, 1378, 1380, 210, 245, 229, + 258, 273, 276, 322, 387, 395, 424, 429, 295, 270, + 243, 454, 240, 479, 511, 512, 513, 515, 391, 265, + 428, 1341, 1369, 372, 568, 569, 314, 392, 0, 0, + 0, 1397, 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, + 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, + 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, + 266, 1274, 1299, 425, 1315, 203, 1364, 481, 251, 373, + 370, 575, 281, 272, 268, 249, 315, 381, 423, 510, + 417, 1406, 366, 1350, 0, 491, 396, 0, 0, 0, + 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, + 1359, 1402, 321, 247, 323, 202, 408, 492, 285, 0, + 0, 0, 0, 0, 941, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 347, 356, 355, 336, 337, 339, 341, 346, 353, + 359, 1307, 1356, 1396, 1308, 1358, 264, 319, 271, 263, + 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, 0, 228, + 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, 0, 1269, + 1273, 1409, 1393, 1302, 274, 0, 0, 0, 0, 0, + 0, 0, 1328, 1339, 1373, 1377, 1322, 0, 0, 0, + 0, 0, 0, 2352, 0, 1300, 0, 1348, 0, 0, + 0, 1278, 1270, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1326, 0, 0, 0, 0, + 1281, 0, 1301, 1374, 0, 1264, 296, 1275, 397, 256, + 0, 448, 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, + 1279, 1387, 1314, 361, 1277, 328, 197, 224, 0, 1312, + 407, 456, 468, 1386, 1297, 1306, 252, 1304, 466, 421, + 594, 232, 283, 453, 427, 464, 435, 286, 1347, 1366, + 465, 368, 577, 445, 591, 617, 618, 262, 401, 603, + 514, 611, 635, 225, 259, 415, 499, 597, 488, 393, + 573, 574, 327, 487, 294, 201, 365, 623, 223, 474, + 367, 241, 230, 579, 600, 288, 451, 630, 212, 509, + 589, 238, 478, 0, 0, 638, 246, 498, 214, 586, + 497, 389, 324, 325, 213, 0, 452, 267, 292, 0, + 0, 257, 410, 581, 582, 255, 639, 227, 610, 219, + 1276, 609, 403, 576, 587, 390, 379, 218, 585, 388, + 378, 332, 351, 352, 279, 305, 442, 371, 443, 304, + 306, 399, 398, 400, 206, 598, 0, 207, 0, 493, + 599, 640, 447, 211, 233, 234, 236, 1292, 278, 282, + 290, 293, 301, 302, 311, 363, 414, 441, 437, 446, + 1382, 571, 592, 604, 615, 621, 622, 624, 625, 626, + 627, 628, 631, 629, 402, 309, 489, 331, 369, 1371, + 1412, 420, 467, 239, 596, 490, 199, 1286, 1291, 1284, + 0, 253, 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, + 1403, 1404, 1405, 1390, 641, 642, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 636, 500, 506, 501, 502, 503, 504, 505, + 0, 507, 1375, 1280, 0, 1289, 1290, 1384, 583, 584, + 659, 380, 480, 593, 333, 345, 348, 338, 357, 0, + 358, 334, 335, 340, 342, 343, 344, 349, 350, 354, + 360, 248, 209, 386, 394, 570, 310, 215, 216, 217, + 516, 517, 518, 519, 607, 608, 612, 204, 457, 458, + 459, 460, 291, 602, 307, 463, 462, 329, 330, 375, + 444, 532, 534, 545, 549, 551, 553, 559, 562, 533, + 535, 546, 550, 552, 554, 560, 563, 522, 524, 526, + 528, 541, 540, 537, 565, 566, 543, 548, 527, 539, + 544, 557, 564, 561, 521, 525, 529, 538, 556, 555, + 536, 547, 558, 542, 530, 523, 531, 1346, 196, 220, + 364, 1408, 449, 287, 637, 606, 601, 205, 222, 1283, + 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, + 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, + 1391, 1411, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 308, 316, 317, 320, 326, 376, + 382, 383, 384, 385, 404, 405, 406, 409, 412, 413, + 416, 418, 419, 422, 426, 430, 431, 432, 434, 436, + 438, 450, 455, 469, 470, 471, 472, 473, 476, 477, + 482, 483, 484, 485, 486, 494, 495, 508, 578, 580, + 595, 613, 619, 475, 299, 300, 439, 440, 312, 313, + 633, 634, 298, 590, 620, 588, 632, 614, 433, 374, + 1345, 1351, 377, 280, 303, 318, 1360, 605, 496, 226, + 461, 289, 250, 1378, 1380, 210, 245, 229, 258, 273, + 276, 322, 387, 395, 424, 429, 295, 270, 243, 454, + 240, 479, 511, 512, 513, 515, 391, 265, 428, 1341, + 1369, 372, 568, 569, 314, 392, 0, 0, 0, 1397, + 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, + 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, + 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, 266, 1274, + 1299, 425, 1315, 203, 1364, 481, 251, 373, 370, 575, + 281, 272, 268, 249, 315, 381, 423, 510, 417, 1406, + 366, 1350, 0, 491, 396, 0, 0, 0, 1329, 1389, + 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, + 321, 247, 323, 202, 408, 492, 285, 0, 95, 0, + 0, 0, 707, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 244, 0, 0, 0, 347, + 356, 355, 336, 337, 339, 341, 346, 353, 359, 1307, + 1356, 1396, 1308, 1358, 264, 319, 271, 263, 572, 1407, + 1388, 1271, 1337, 1395, 1332, 0, 0, 228, 1398, 1331, + 0, 1361, 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, + 1393, 1302, 274, 0, 0, 0, 0, 0, 0, 0, + 1328, 1339, 1373, 1377, 1322, 0, 0, 0, 0, 0, + 0, 0, 0, 1300, 0, 1348, 0, 0, 0, 1278, + 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1326, 0, 0, 0, 0, 1281, 0, + 1301, 1374, 0, 1264, 296, 1275, 397, 256, 0, 448, + 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, + 1314, 361, 1277, 328, 197, 224, 0, 1312, 407, 456, + 468, 1386, 1297, 1306, 252, 1304, 466, 421, 594, 232, + 283, 453, 427, 464, 435, 286, 1347, 1366, 465, 368, + 577, 445, 591, 617, 618, 262, 401, 603, 514, 611, + 635, 225, 259, 415, 499, 597, 488, 393, 573, 574, + 327, 487, 294, 201, 365, 623, 223, 474, 367, 241, + 230, 579, 600, 288, 451, 630, 212, 509, 589, 238, + 478, 0, 0, 638, 246, 498, 214, 586, 497, 389, + 324, 325, 213, 0, 452, 267, 292, 0, 0, 257, + 410, 581, 582, 255, 639, 227, 610, 219, 1276, 609, + 403, 576, 587, 390, 379, 218, 585, 388, 378, 332, + 351, 352, 279, 305, 442, 371, 443, 304, 306, 399, + 398, 400, 206, 598, 0, 207, 0, 493, 599, 640, + 447, 211, 233, 234, 236, 1292, 278, 282, 290, 293, + 301, 302, 311, 363, 414, 441, 437, 446, 1382, 571, + 592, 604, 615, 621, 622, 624, 625, 626, 627, 628, + 631, 629, 402, 309, 489, 331, 369, 1371, 1412, 420, + 467, 239, 596, 490, 199, 1286, 1291, 1284, 0, 253, + 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, + 1405, 1390, 641, 642, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 636, 500, 506, 501, 502, 503, 504, 505, 0, 507, + 1375, 1280, 0, 1289, 1290, 1384, 583, 584, 659, 380, + 480, 593, 333, 345, 348, 338, 357, 0, 358, 334, + 335, 340, 342, 343, 344, 349, 350, 354, 360, 248, + 209, 386, 394, 570, 310, 215, 216, 217, 516, 517, + 518, 519, 607, 608, 612, 204, 457, 458, 459, 460, + 291, 602, 307, 463, 462, 329, 330, 375, 444, 532, + 534, 545, 549, 551, 553, 559, 562, 533, 535, 546, + 550, 552, 554, 560, 563, 522, 524, 526, 528, 541, + 540, 537, 565, 566, 543, 548, 527, 539, 544, 557, + 564, 561, 521, 525, 529, 538, 556, 555, 536, 547, + 558, 542, 530, 523, 531, 1346, 196, 220, 364, 1408, + 449, 287, 637, 606, 601, 205, 222, 1283, 261, 1295, + 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, + 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 308, 316, 317, 320, 326, 376, 382, 383, + 384, 385, 404, 405, 406, 409, 412, 413, 416, 418, + 419, 422, 426, 430, 431, 432, 434, 436, 438, 450, + 455, 469, 470, 471, 472, 473, 476, 477, 482, 483, + 484, 485, 486, 494, 495, 508, 578, 580, 595, 613, + 619, 475, 299, 300, 439, 440, 312, 313, 633, 634, + 298, 590, 620, 588, 632, 614, 433, 374, 1345, 1351, + 377, 280, 303, 318, 1360, 605, 496, 226, 461, 289, + 250, 1378, 1380, 210, 245, 229, 258, 273, 276, 322, + 387, 395, 424, 429, 295, 270, 243, 454, 240, 479, + 511, 512, 513, 515, 391, 265, 428, 1341, 1369, 372, + 568, 569, 314, 392, 0, 0, 0, 1397, 1383, 520, + 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, + 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, + 269, 1293, 1385, 1344, 1399, 362, 266, 1274, 1299, 425, + 1315, 203, 1364, 481, 251, 373, 370, 575, 281, 272, + 268, 249, 315, 381, 423, 510, 417, 1406, 366, 1350, + 0, 491, 396, 0, 0, 0, 1329, 1389, 1338, 1376, + 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, + 323, 202, 408, 492, 285, 0, 0, 0, 0, 0, + 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 347, 356, 355, + 336, 337, 339, 341, 346, 353, 359, 1307, 1356, 1396, + 1308, 1358, 264, 319, 271, 263, 572, 1407, 1388, 1271, + 1337, 1395, 1332, 0, 0, 228, 1398, 1331, 0, 1361, + 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, + 274, 0, 0, 0, 0, 0, 0, 0, 1328, 1339, + 1373, 1377, 1322, 0, 0, 0, 0, 0, 0, 0, + 0, 1300, 0, 1348, 0, 0, 0, 1278, 1270, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1326, 0, 0, 0, 0, 1281, 0, 1301, 1374, + 0, 1264, 296, 1275, 397, 256, 0, 448, 1381, 1392, + 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, + 1277, 328, 197, 224, 0, 1312, 407, 456, 468, 1386, + 1297, 1306, 252, 1304, 466, 421, 594, 232, 283, 453, + 427, 464, 435, 286, 1347, 1366, 465, 368, 577, 445, + 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, + 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, + 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, + 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, + 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, + 213, 0, 452, 267, 292, 0, 0, 257, 410, 581, + 582, 255, 639, 227, 610, 219, 1276, 609, 403, 576, + 587, 390, 379, 218, 585, 388, 378, 332, 351, 352, + 279, 305, 442, 371, 443, 304, 306, 399, 398, 400, + 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, + 233, 234, 236, 1292, 278, 282, 290, 293, 301, 302, + 311, 363, 414, 441, 437, 446, 1382, 571, 592, 604, + 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, + 402, 309, 489, 331, 369, 1371, 1412, 420, 467, 239, + 596, 490, 199, 1286, 1291, 1284, 0, 253, 254, 1353, + 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, + 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, + 506, 501, 502, 503, 504, 505, 0, 507, 1375, 1280, + 0, 1289, 1290, 1384, 583, 584, 659, 380, 480, 593, + 333, 345, 348, 338, 357, 0, 358, 334, 335, 340, + 342, 343, 344, 349, 350, 354, 360, 248, 209, 386, + 394, 570, 310, 215, 216, 217, 516, 517, 518, 519, + 607, 608, 612, 204, 457, 458, 459, 460, 291, 602, + 307, 463, 462, 329, 330, 375, 444, 532, 534, 545, + 549, 551, 553, 559, 562, 533, 535, 546, 550, 552, + 554, 560, 563, 522, 524, 526, 528, 541, 540, 537, + 565, 566, 543, 548, 527, 539, 544, 557, 564, 561, + 521, 525, 529, 538, 556, 555, 536, 547, 558, 542, + 530, 523, 531, 1346, 196, 220, 364, 1408, 449, 287, + 637, 606, 601, 205, 222, 1283, 261, 1295, 1303, 0, + 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, + 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, + 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, + 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, + 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, + 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, + 299, 300, 439, 440, 312, 313, 633, 634, 298, 590, + 620, 588, 632, 614, 433, 374, 1345, 1351, 377, 280, + 303, 318, 1360, 605, 496, 226, 461, 289, 250, 1378, + 1380, 210, 245, 229, 258, 273, 276, 322, 387, 395, + 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, + 513, 515, 391, 265, 428, 1341, 1369, 372, 568, 569, + 314, 392, 0, 0, 0, 1397, 1383, 520, 0, 1325, + 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, + 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, + 1385, 1344, 1399, 362, 266, 1274, 1299, 425, 1315, 203, + 1364, 481, 251, 373, 370, 575, 281, 272, 268, 249, + 315, 381, 423, 510, 417, 1406, 366, 1350, 0, 491, + 396, 0, 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, + 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, 323, 202, + 408, 492, 285, 0, 0, 0, 0, 0, 707, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 347, 356, 355, 336, 337, + 339, 341, 346, 353, 359, 1307, 1356, 1396, 1308, 1358, + 264, 319, 271, 263, 572, 1407, 1388, 1271, 1337, 1395, + 1332, 0, 0, 228, 1398, 1331, 0, 1361, 0, 1413, + 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, 274, 0, + 0, 0, 0, 0, 0, 0, 1328, 1339, 1373, 1377, + 1322, 0, 0, 0, 0, 0, 0, 0, 0, 1300, + 0, 1348, 0, 0, 0, 1278, 1270, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1326, + 0, 0, 0, 0, 1281, 0, 1301, 1374, 0, 1264, + 296, 1275, 397, 256, 0, 448, 1381, 1392, 1323, 616, + 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, + 197, 224, 0, 1312, 407, 456, 468, 1386, 1297, 1306, + 252, 1304, 466, 421, 594, 232, 283, 453, 427, 464, + 435, 286, 1347, 1366, 465, 368, 577, 445, 591, 617, + 618, 262, 401, 603, 514, 611, 635, 225, 259, 415, + 499, 597, 488, 393, 573, 574, 327, 487, 294, 201, + 365, 623, 223, 474, 367, 241, 230, 579, 600, 288, + 451, 630, 212, 509, 589, 238, 478, 0, 0, 638, + 246, 498, 214, 586, 497, 389, 324, 325, 213, 0, + 452, 267, 292, 0, 0, 257, 410, 581, 582, 255, + 639, 227, 610, 219, 1276, 609, 403, 576, 587, 390, + 379, 218, 585, 388, 378, 332, 351, 352, 279, 305, + 442, 371, 443, 304, 306, 399, 398, 400, 206, 598, + 0, 207, 0, 493, 599, 640, 447, 211, 233, 234, + 236, 1292, 278, 282, 290, 293, 301, 302, 311, 363, + 414, 441, 437, 446, 1382, 571, 592, 604, 615, 621, + 622, 624, 625, 626, 627, 628, 631, 629, 402, 309, + 489, 331, 369, 1371, 1412, 420, 467, 239, 596, 490, + 199, 1286, 1291, 1284, 0, 253, 254, 1353, 567, 1287, + 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 636, 500, 506, 501, + 502, 503, 504, 505, 0, 507, 1375, 1280, 0, 1289, + 1290, 1384, 583, 584, 659, 380, 480, 593, 333, 345, + 348, 338, 357, 0, 358, 334, 335, 340, 342, 343, + 344, 349, 350, 354, 360, 248, 209, 386, 394, 570, + 310, 215, 216, 217, 516, 517, 518, 519, 607, 608, + 612, 204, 457, 458, 459, 460, 291, 602, 307, 463, + 462, 329, 330, 375, 444, 532, 534, 545, 549, 551, + 553, 559, 562, 533, 535, 546, 550, 552, 554, 560, + 563, 522, 524, 526, 528, 541, 540, 537, 565, 566, + 543, 548, 527, 539, 544, 557, 564, 561, 521, 525, + 529, 538, 556, 555, 536, 547, 558, 542, 530, 523, + 531, 1346, 196, 220, 364, 1408, 449, 287, 637, 606, + 601, 205, 222, 1283, 261, 1295, 1303, 0, 1309, 1317, + 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, + 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 308, 316, + 317, 320, 326, 376, 382, 383, 384, 385, 404, 405, + 406, 409, 412, 413, 416, 418, 419, 422, 426, 430, + 431, 432, 434, 436, 438, 450, 455, 469, 470, 471, + 472, 473, 476, 477, 482, 483, 484, 485, 486, 494, + 495, 508, 578, 580, 595, 613, 619, 475, 299, 300, + 439, 440, 312, 313, 633, 634, 298, 590, 620, 588, + 632, 614, 433, 374, 1345, 1351, 377, 280, 303, 318, + 1360, 605, 496, 226, 461, 289, 250, 1378, 1380, 210, + 245, 229, 258, 273, 276, 322, 387, 395, 424, 429, + 295, 270, 243, 454, 240, 479, 511, 512, 513, 515, + 391, 265, 428, 1341, 1369, 372, 568, 569, 314, 392, + 0, 0, 0, 1397, 1383, 520, 0, 1325, 1400, 1294, + 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, + 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, + 1399, 362, 266, 1274, 1299, 425, 1315, 203, 1364, 481, + 251, 373, 370, 575, 281, 272, 268, 249, 315, 381, + 423, 510, 417, 1406, 366, 1350, 0, 491, 396, 0, + 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, + 1401, 1311, 1359, 1402, 321, 247, 323, 202, 408, 492, + 285, 0, 0, 0, 0, 0, 941, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 347, 356, 355, 336, 337, 339, 341, + 346, 353, 359, 1307, 1356, 1396, 1308, 1358, 264, 319, + 271, 263, 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, + 0, 228, 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, + 0, 1269, 1273, 1409, 1393, 1302, 274, 0, 0, 0, + 0, 0, 0, 0, 1328, 1339, 1373, 1377, 1322, 0, + 0, 0, 0, 0, 0, 0, 0, 1300, 0, 1348, + 0, 0, 0, 1278, 1270, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1326, 0, 0, + 0, 0, 1281, 0, 1301, 1374, 0, 1264, 296, 1275, + 397, 256, 0, 448, 1381, 1392, 1323, 616, 1394, 1321, + 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, 197, 224, + 0, 1312, 407, 456, 468, 1386, 1297, 1306, 252, 1304, + 466, 421, 594, 232, 283, 453, 427, 464, 435, 286, + 1347, 1366, 465, 368, 577, 445, 591, 617, 618, 262, + 401, 603, 514, 611, 635, 225, 259, 415, 499, 597, + 488, 393, 573, 574, 327, 487, 294, 201, 365, 623, + 223, 474, 367, 241, 230, 579, 600, 288, 451, 630, + 212, 509, 589, 238, 478, 0, 0, 638, 246, 498, + 214, 586, 497, 389, 324, 325, 213, 0, 452, 267, + 292, 0, 0, 257, 410, 581, 582, 255, 639, 227, + 610, 219, 1276, 609, 403, 576, 587, 390, 379, 218, + 585, 388, 378, 332, 351, 352, 279, 305, 442, 371, + 443, 304, 306, 399, 398, 400, 206, 598, 0, 207, + 0, 493, 599, 640, 447, 211, 233, 234, 236, 1292, + 278, 282, 290, 293, 301, 302, 311, 363, 414, 441, + 437, 446, 1382, 571, 592, 604, 615, 621, 622, 624, + 625, 626, 627, 628, 631, 629, 402, 309, 489, 331, + 369, 1371, 1412, 420, 467, 239, 596, 490, 199, 1286, + 1291, 1284, 0, 253, 254, 1353, 567, 1287, 1285, 1342, + 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 636, 500, 506, 501, 502, 503, + 504, 505, 0, 507, 1375, 1280, 0, 1289, 1290, 1384, + 583, 584, 659, 380, 480, 593, 333, 345, 348, 338, + 357, 0, 358, 334, 335, 340, 342, 343, 344, 349, + 350, 354, 360, 248, 209, 386, 394, 570, 310, 215, + 216, 217, 516, 517, 518, 519, 607, 608, 612, 204, + 457, 458, 459, 460, 291, 602, 307, 463, 462, 329, + 330, 375, 444, 532, 534, 545, 549, 551, 553, 559, + 562, 533, 535, 546, 550, 552, 554, 560, 563, 522, + 524, 526, 528, 541, 540, 537, 565, 566, 543, 548, + 527, 539, 544, 557, 564, 561, 521, 525, 529, 538, + 556, 555, 536, 547, 558, 542, 530, 523, 531, 1346, + 196, 220, 364, 1408, 449, 287, 637, 606, 601, 205, + 222, 1283, 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, + 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, + 1372, 1379, 1391, 1411, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 308, 316, 317, 320, + 326, 376, 382, 383, 384, 385, 404, 405, 406, 409, + 412, 413, 416, 418, 419, 422, 426, 430, 431, 432, + 434, 436, 438, 450, 455, 469, 470, 471, 472, 473, + 476, 477, 482, 483, 484, 485, 486, 494, 495, 508, + 578, 580, 595, 613, 619, 475, 299, 300, 439, 440, + 312, 313, 633, 634, 298, 590, 620, 588, 632, 614, + 433, 374, 1345, 1351, 377, 280, 303, 318, 1360, 605, + 496, 226, 461, 289, 250, 1378, 1380, 210, 245, 229, + 258, 273, 276, 322, 387, 395, 424, 429, 295, 270, + 243, 454, 240, 479, 511, 512, 513, 515, 391, 265, + 428, 1341, 1369, 372, 568, 569, 314, 392, 0, 0, + 0, 0, 0, 520, 0, 761, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 411, 0, 0, 0, 0, + 749, 0, 0, 0, 269, 754, 0, 0, 0, 362, + 266, 0, 0, 425, 0, 203, 0, 481, 251, 373, + 370, 575, 281, 272, 268, 249, 315, 381, 423, 510, + 417, 760, 366, 0, 0, 491, 396, 0, 0, 0, + 0, 0, 756, 757, 0, 0, 0, 0, 0, 0, + 0, 0, 321, 247, 323, 202, 408, 492, 285, 0, + 95, 0, 0, 957, 941, 733, 907, 945, 958, 959, + 960, 961, 946, 0, 237, 947, 948, 244, 949, 0, + 906, 791, 793, 792, 856, 857, 858, 859, 860, 861, + 862, 789, 954, 962, 963, 0, 264, 319, 271, 263, + 572, 0, 0, 2175, 2176, 2177, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 729, 746, 0, + 759, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 743, 744, 0, 0, 0, 0, 901, 0, 745, + 0, 0, 753, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 755, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 397, 256, + 0, 448, 900, 0, 0, 616, 0, 0, 898, 0, + 0, 0, 0, 361, 0, 328, 197, 224, 0, 0, + 407, 456, 468, 0, 0, 0, 951, 0, 466, 421, + 594, 232, 283, 453, 427, 464, 435, 286, 0, 0, + 465, 368, 577, 445, 591, 617, 618, 262, 401, 603, + 514, 611, 635, 225, 259, 415, 499, 597, 488, 393, + 573, 574, 327, 487, 294, 201, 365, 623, 223, 474, + 367, 241, 230, 579, 600, 288, 451, 630, 212, 509, + 589, 238, 478, 0, 0, 638, 246, 498, 214, 586, + 497, 389, 324, 325, 213, 0, 452, 267, 292, 0, + 0, 257, 410, 952, 953, 255, 639, 797, 610, 219, + 0, 609, 403, 576, 587, 390, 379, 218, 585, 388, + 378, 332, 805, 806, 279, 305, 882, 881, 880, 304, + 306, 878, 879, 877, 206, 598, 0, 207, 0, 493, + 599, 640, 447, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 301, 302, 311, 363, 414, 441, 437, 446, + 0, 571, 592, 604, 615, 621, 622, 624, 625, 626, + 627, 628, 631, 629, 402, 309, 489, 331, 369, 0, + 0, 420, 467, 239, 596, 490, 888, 910, 899, 765, + 766, 889, 890, 914, 891, 768, 769, 911, 912, 762, + 763, 767, 913, 915, 641, 642, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 636, 500, 506, 501, 502, 503, 504, 505, + 0, 507, 902, 752, 751, 0, 758, 0, 787, 788, + 790, 794, 795, 796, 807, 854, 855, 863, 865, 866, + 864, 867, 868, 869, 872, 873, 874, 875, 870, 871, + 876, 770, 774, 771, 772, 773, 785, 775, 776, 777, + 778, 779, 780, 781, 782, 783, 784, 786, 925, 926, + 927, 928, 929, 930, 800, 804, 803, 801, 802, 798, + 799, 826, 825, 827, 828, 829, 830, 831, 832, 834, + 833, 835, 836, 837, 838, 839, 840, 808, 809, 812, + 813, 811, 810, 814, 823, 824, 815, 816, 817, 818, + 819, 820, 822, 821, 841, 842, 843, 844, 845, 847, + 846, 850, 851, 849, 848, 853, 852, 750, 196, 220, + 364, 0, 449, 287, 637, 606, 601, 205, 222, 916, + 261, 917, 0, 0, 921, 0, 0, 0, 923, 922, + 0, 924, 886, 885, 0, 0, 918, 919, 0, 920, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 308, 316, 317, 320, 326, 376, + 382, 383, 384, 385, 404, 405, 406, 409, 412, 413, + 416, 418, 419, 422, 426, 430, 431, 432, 434, 436, + 438, 450, 455, 469, 470, 471, 472, 473, 476, 477, + 482, 483, 484, 485, 486, 494, 495, 508, 578, 580, + 595, 613, 619, 475, 931, 932, 933, 934, 935, 936, + 937, 938, 298, 590, 620, 588, 632, 614, 433, 374, + 0, 0, 377, 280, 303, 318, 0, 605, 496, 226, + 461, 289, 250, 956, 0, 210, 245, 229, 258, 273, + 276, 322, 387, 395, 424, 429, 295, 270, 243, 454, + 240, 479, 511, 512, 513, 515, 391, 265, 428, 392, + 0, 372, 568, 569, 314, 520, 0, 761, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 411, 0, 0, + 0, 0, 749, 0, 0, 0, 269, 754, 0, 0, + 0, 362, 266, 0, 0, 425, 0, 203, 0, 481, + 251, 373, 370, 575, 281, 272, 268, 249, 315, 381, + 423, 510, 417, 760, 366, 0, 0, 491, 396, 0, + 0, 0, 0, 0, 756, 757, 0, 0, 0, 0, + 0, 0, 2381, 0, 321, 247, 323, 202, 408, 492, + 285, 0, 95, 0, 0, 957, 941, 733, 907, 945, + 958, 959, 960, 961, 946, 0, 237, 947, 948, 244, + 949, 0, 906, 791, 793, 792, 856, 857, 858, 859, + 860, 861, 862, 789, 954, 962, 963, 2382, 264, 319, + 271, 263, 572, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 729, + 746, 0, 759, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 743, 744, 0, 0, 0, 0, 901, + 0, 745, 0, 0, 753, 964, 965, 966, 967, 968, + 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 397, 256, 0, 448, 900, 0, 0, 616, 0, 0, + 898, 0, 0, 0, 0, 361, 0, 328, 197, 224, + 0, 0, 407, 456, 468, 0, 0, 0, 951, 0, + 466, 421, 594, 232, 283, 453, 427, 464, 435, 286, + 0, 0, 465, 368, 577, 445, 591, 617, 618, 262, + 401, 603, 514, 611, 635, 225, 259, 415, 499, 597, + 488, 393, 573, 574, 327, 487, 294, 201, 365, 623, + 223, 474, 367, 241, 230, 579, 600, 288, 451, 630, + 212, 509, 589, 238, 478, 0, 0, 638, 246, 498, + 214, 586, 497, 389, 324, 325, 213, 0, 452, 267, + 292, 0, 0, 257, 410, 952, 953, 255, 639, 797, + 610, 219, 0, 609, 403, 576, 587, 390, 379, 218, + 585, 388, 378, 332, 805, 806, 279, 305, 882, 881, + 880, 304, 306, 878, 879, 877, 206, 598, 0, 207, + 0, 493, 599, 640, 447, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 301, 302, 311, 363, 414, 441, + 437, 446, 0, 571, 592, 604, 615, 621, 622, 624, + 625, 626, 627, 628, 631, 629, 402, 309, 489, 331, + 369, 0, 0, 420, 467, 239, 596, 490, 888, 910, + 899, 765, 766, 889, 890, 914, 891, 768, 769, 911, + 912, 762, 763, 767, 913, 915, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 636, 500, 506, 501, 502, 503, + 504, 505, 0, 507, 902, 752, 751, 0, 758, 0, + 787, 788, 790, 794, 795, 796, 807, 854, 855, 863, + 865, 866, 864, 867, 868, 869, 872, 873, 874, 875, + 870, 871, 876, 770, 774, 771, 772, 773, 785, 775, + 776, 777, 778, 779, 780, 781, 782, 783, 784, 786, + 925, 926, 927, 928, 929, 930, 800, 804, 803, 801, + 802, 798, 799, 826, 825, 827, 828, 829, 830, 831, + 832, 834, 833, 835, 836, 837, 838, 839, 840, 808, + 809, 812, 813, 811, 810, 814, 823, 824, 815, 816, + 817, 818, 819, 820, 822, 821, 841, 842, 843, 844, + 845, 847, 846, 850, 851, 849, 848, 853, 852, 750, + 196, 220, 364, 0, 449, 287, 637, 606, 601, 205, + 222, 916, 261, 917, 0, 0, 921, 0, 0, 0, + 923, 922, 0, 924, 886, 885, 0, 0, 918, 919, + 0, 920, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 308, 316, 317, 320, + 326, 376, 382, 383, 384, 385, 404, 405, 406, 409, + 412, 413, 416, 418, 419, 422, 426, 430, 431, 432, + 434, 436, 438, 450, 455, 469, 470, 471, 472, 473, + 476, 477, 482, 483, 484, 485, 486, 494, 495, 508, + 578, 580, 595, 613, 619, 475, 931, 932, 933, 934, + 935, 936, 937, 938, 298, 590, 620, 588, 632, 614, + 433, 374, 0, 0, 377, 280, 303, 318, 0, 605, + 496, 226, 461, 289, 250, 956, 0, 210, 245, 229, + 258, 273, 276, 322, 387, 395, 424, 429, 295, 270, + 243, 454, 240, 479, 511, 512, 513, 515, 391, 265, + 428, 0, 392, 372, 568, 569, 314, 86, 520, 0, + 761, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 749, 0, 0, 0, 269, + 754, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 760, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 756, 757, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 95, 0, 0, 957, 941, + 733, 907, 945, 958, 959, 960, 961, 946, 0, 237, + 947, 948, 244, 949, 0, 906, 791, 793, 792, 856, + 857, 858, 859, 860, 861, 862, 789, 954, 962, 963, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 729, 746, 0, 759, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 743, 744, 0, 0, + 0, 0, 901, 0, 745, 0, 0, 753, 964, 965, + 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, + 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 755, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 900, 0, 0, + 616, 0, 0, 898, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 951, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 952, 953, + 255, 639, 797, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 805, 806, 279, + 305, 882, 881, 880, 304, 306, 878, 879, 877, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 888, 910, 899, 765, 766, 889, 890, 914, 891, + 768, 769, 911, 912, 762, 763, 767, 913, 915, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 902, 752, 751, + 0, 758, 0, 787, 788, 790, 794, 795, 796, 807, + 854, 855, 863, 865, 866, 864, 867, 868, 869, 872, + 873, 874, 875, 870, 871, 876, 770, 774, 771, 772, + 773, 785, 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 786, 925, 926, 927, 928, 929, 930, 800, + 804, 803, 801, 802, 798, 799, 826, 825, 827, 828, + 829, 830, 831, 832, 834, 833, 835, 836, 837, 838, + 839, 840, 808, 809, 812, 813, 811, 810, 814, 823, + 824, 815, 816, 817, 818, 819, 820, 822, 821, 841, + 842, 843, 844, 845, 847, 846, 850, 851, 849, 848, + 853, 852, 750, 196, 220, 364, 94, 449, 287, 637, + 606, 601, 205, 222, 916, 261, 917, 0, 0, 921, + 0, 0, 0, 923, 922, 0, 924, 886, 885, 0, + 0, 918, 919, 0, 920, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 931, + 932, 933, 934, 935, 936, 937, 938, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 956, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 761, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 749, 0, 0, + 0, 269, 754, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 760, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 756, + 757, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, + 957, 941, 733, 907, 945, 958, 959, 960, 961, 946, + 0, 237, 947, 948, 244, 949, 0, 906, 791, 793, + 792, 856, 857, 858, 859, 860, 861, 862, 789, 954, + 962, 963, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 729, 746, 0, 759, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 743, 744, + 0, 0, 0, 0, 901, 0, 745, 0, 0, 753, + 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 755, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 900, + 0, 0, 616, 0, 0, 898, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 951, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 3983, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 952, 953, 255, 639, 797, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 805, + 806, 279, 305, 882, 881, 880, 304, 306, 878, 879, + 877, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 888, 910, 899, 765, 766, 889, 890, + 914, 891, 768, 769, 911, 912, 762, 763, 767, 913, + 915, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 902, + 752, 751, 0, 758, 0, 787, 788, 790, 794, 795, + 796, 807, 854, 855, 863, 865, 866, 864, 867, 868, + 869, 872, 873, 874, 875, 870, 871, 876, 770, 774, + 771, 772, 773, 785, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 786, 925, 926, 927, 928, 929, + 930, 800, 804, 803, 801, 802, 798, 799, 826, 825, + 827, 828, 829, 830, 831, 832, 834, 833, 835, 836, + 837, 838, 839, 840, 808, 809, 812, 813, 811, 810, + 814, 823, 824, 815, 816, 817, 818, 819, 820, 822, + 821, 841, 842, 843, 844, 845, 847, 846, 850, 851, + 849, 848, 853, 852, 750, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 916, 261, 917, 0, + 0, 921, 0, 0, 0, 923, 922, 0, 924, 886, + 885, 0, 0, 918, 919, 0, 920, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 931, 932, 933, 934, 935, 936, 937, 938, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 956, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 761, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 749, + 0, 0, 0, 269, 754, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 760, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 756, 757, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 95, + 0, 1716, 957, 941, 733, 907, 945, 958, 959, 960, + 961, 946, 0, 237, 947, 948, 244, 949, 0, 906, + 791, 793, 792, 856, 857, 858, 859, 860, 861, 862, + 789, 954, 962, 963, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 729, 746, 0, 759, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 743, 744, 0, 0, 0, 0, 901, 0, 745, 0, + 0, 753, 964, 965, 966, 967, 968, 969, 970, 971, + 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 755, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 900, 0, 0, 616, 0, 0, 898, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 951, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 952, 953, 255, 639, 797, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 805, 806, 279, 305, 882, 881, 880, 304, 306, + 878, 879, 877, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 888, 910, 899, 765, 766, + 889, 890, 914, 891, 768, 769, 911, 912, 762, 763, + 767, 913, 915, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 902, 752, 751, 0, 758, 0, 787, 788, 790, + 794, 795, 796, 807, 854, 855, 863, 865, 866, 864, + 867, 868, 869, 872, 873, 874, 875, 870, 871, 876, + 770, 774, 771, 772, 773, 785, 775, 776, 777, 778, + 779, 780, 781, 782, 783, 784, 786, 925, 926, 927, + 928, 929, 930, 800, 804, 803, 801, 802, 798, 799, + 826, 825, 827, 828, 829, 830, 831, 832, 834, 833, + 835, 836, 837, 838, 839, 840, 808, 809, 812, 813, + 811, 810, 814, 823, 824, 815, 816, 817, 818, 819, + 820, 822, 821, 841, 842, 843, 844, 845, 847, 846, + 850, 851, 849, 848, 853, 852, 750, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 916, 261, + 917, 0, 0, 921, 0, 0, 0, 923, 922, 0, + 924, 886, 885, 0, 0, 918, 919, 0, 920, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 931, 932, 933, 934, 935, 936, 937, + 938, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 956, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 761, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 749, 0, 0, 0, 269, 754, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 760, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 756, 757, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 95, 0, 0, 957, 941, 733, 907, 945, 958, + 959, 960, 961, 946, 0, 237, 947, 948, 244, 949, + 0, 906, 791, 793, 792, 856, 857, 858, 859, 860, + 861, 862, 789, 954, 962, 963, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 729, 746, + 0, 759, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 743, 744, 1049, 0, 0, 0, 901, 0, + 745, 0, 0, 753, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, + 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 900, 0, 0, 616, 0, 0, 898, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 951, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 952, 953, 255, 639, 797, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 805, 806, 279, 305, 882, 881, 880, + 304, 306, 878, 879, 877, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 888, 910, 899, + 765, 766, 889, 890, 914, 891, 768, 769, 911, 912, + 762, 763, 767, 913, 915, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 902, 752, 751, 0, 758, 0, 787, + 788, 790, 794, 795, 796, 807, 854, 855, 863, 865, + 866, 864, 867, 868, 869, 872, 873, 874, 875, 870, + 871, 876, 770, 774, 771, 772, 773, 785, 775, 776, + 777, 778, 779, 780, 781, 782, 783, 784, 786, 925, + 926, 927, 928, 929, 930, 800, 804, 803, 801, 802, + 798, 799, 826, 825, 827, 828, 829, 830, 831, 832, + 834, 833, 835, 836, 837, 838, 839, 840, 808, 809, + 812, 813, 811, 810, 814, 823, 824, 815, 816, 817, + 818, 819, 820, 822, 821, 841, 842, 843, 844, 845, + 847, 846, 850, 851, 849, 848, 853, 852, 750, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 916, 261, 917, 0, 0, 921, 0, 0, 0, 923, + 922, 0, 924, 886, 885, 0, 0, 918, 919, 0, + 920, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 931, 932, 933, 934, 935, + 936, 937, 938, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 956, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 761, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 749, 0, 0, 0, 269, 754, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 760, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 756, 757, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 95, 0, 0, 957, 941, 733, 907, + 945, 958, 959, 960, 961, 946, 0, 237, 947, 948, + 244, 949, 0, 906, 791, 793, 792, 856, 857, 858, + 859, 860, 861, 862, 789, 954, 962, 963, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 729, 746, 0, 759, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 743, 744, 0, 0, 0, 0, + 901, 0, 745, 0, 0, 753, 964, 965, 966, 967, + 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, + 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, + 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 900, 0, 0, 616, 0, + 0, 898, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 951, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 952, 953, 255, 639, + 797, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 805, 806, 279, 305, 882, + 881, 880, 304, 306, 878, 879, 877, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 888, + 910, 899, 765, 766, 889, 890, 914, 891, 768, 769, + 911, 912, 762, 763, 767, 913, 915, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 902, 752, 751, 0, 758, + 0, 787, 788, 790, 794, 795, 796, 807, 854, 855, + 863, 865, 866, 864, 867, 868, 869, 872, 873, 874, + 875, 870, 871, 876, 770, 774, 771, 772, 773, 785, + 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, + 786, 925, 926, 927, 928, 929, 930, 800, 804, 803, + 801, 802, 798, 799, 826, 825, 827, 828, 829, 830, + 831, 832, 834, 833, 835, 836, 837, 838, 839, 840, + 808, 809, 812, 813, 811, 810, 814, 823, 824, 815, + 816, 817, 818, 819, 820, 822, 821, 841, 842, 843, + 844, 845, 847, 846, 850, 851, 849, 848, 853, 852, + 750, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 916, 261, 917, 0, 0, 921, 0, 0, + 0, 923, 922, 0, 924, 886, 885, 0, 0, 918, + 919, 0, 920, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 931, 932, 933, + 934, 935, 936, 937, 938, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 956, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 761, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 749, 0, 0, 0, 269, + 754, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 760, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 756, 757, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 95, 0, 0, 957, 941, + 733, 907, 945, 958, 959, 960, 961, 946, 0, 237, + 947, 948, 244, 949, 0, 906, 791, 793, 792, 856, + 857, 858, 859, 860, 861, 862, 789, 954, 962, 963, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 729, 746, 0, 759, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 743, 744, 0, 0, + 0, 0, 901, 0, 745, 0, 0, 753, 964, 965, + 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, + 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 3089, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 900, 0, 0, + 616, 0, 0, 898, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 951, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 952, 953, + 255, 639, 797, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 805, 806, 279, + 305, 882, 881, 880, 304, 306, 878, 879, 877, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 888, 910, 899, 765, 766, 889, 890, 914, 891, + 768, 769, 911, 912, 762, 763, 767, 913, 915, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 902, 752, 751, + 0, 758, 0, 787, 788, 790, 794, 795, 796, 807, + 854, 855, 863, 865, 866, 864, 867, 868, 869, 872, + 873, 874, 875, 870, 871, 876, 770, 774, 771, 772, + 773, 785, 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 786, 925, 926, 927, 928, 929, 930, 800, + 804, 803, 801, 802, 798, 799, 826, 825, 827, 828, + 829, 830, 831, 832, 834, 833, 835, 836, 837, 838, + 839, 840, 808, 809, 812, 813, 811, 810, 814, 823, + 824, 815, 816, 817, 818, 819, 820, 822, 821, 841, + 842, 843, 844, 845, 847, 846, 850, 851, 849, 848, + 853, 852, 750, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 916, 261, 917, 0, 0, 921, + 0, 0, 0, 923, 922, 0, 924, 886, 885, 0, + 0, 918, 919, 0, 920, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 931, + 932, 933, 934, 935, 936, 937, 938, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 956, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 761, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 749, 0, 0, + 0, 269, 754, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 760, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 756, + 757, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, + 957, 941, 733, 907, 945, 958, 959, 960, 961, 946, + 0, 237, 947, 948, 244, 949, 0, 906, 791, 793, + 792, 856, 857, 858, 859, 860, 861, 862, 789, 954, + 962, 963, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 729, 746, 0, 759, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 743, 744, + 0, 0, 0, 0, 901, 0, 745, 0, 0, 753, + 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, + 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, + 1004, 1005, 3085, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 900, + 0, 0, 616, 0, 0, 898, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 951, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 952, 953, 255, 639, 797, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 805, + 806, 279, 305, 882, 881, 880, 304, 306, 878, 879, + 877, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 888, 910, 899, 765, 766, 889, 890, + 914, 891, 768, 769, 911, 912, 762, 763, 767, 913, + 915, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 902, + 752, 751, 0, 758, 0, 787, 788, 790, 794, 795, + 796, 807, 854, 855, 863, 865, 866, 864, 867, 868, + 869, 872, 873, 874, 875, 870, 871, 876, 770, 774, + 771, 772, 773, 785, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 786, 925, 926, 927, 928, 929, + 930, 800, 804, 803, 801, 802, 798, 799, 826, 825, + 827, 828, 829, 830, 831, 832, 834, 833, 835, 836, + 837, 838, 839, 840, 808, 809, 812, 813, 811, 810, + 814, 823, 824, 815, 816, 817, 818, 819, 820, 822, + 821, 841, 842, 843, 844, 845, 847, 846, 850, 851, + 849, 848, 853, 852, 750, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 916, 261, 917, 0, + 0, 921, 0, 0, 0, 923, 922, 0, 924, 886, + 885, 0, 0, 918, 919, 0, 920, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 931, 932, 933, 934, 935, 936, 937, 938, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 956, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 761, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 749, + 0, 0, 0, 269, 754, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 760, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 756, 757, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 95, + 0, 0, 957, 941, 1070, 907, 945, 958, 959, 960, + 961, 946, 0, 237, 947, 948, 244, 949, 0, 906, + 791, 793, 792, 856, 857, 858, 859, 860, 861, 862, + 789, 954, 962, 963, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 746, 0, 759, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 743, 744, 0, 0, 0, 0, 901, 0, 745, 0, + 0, 753, 964, 965, 966, 967, 968, 969, 970, 971, + 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 755, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 900, 0, 0, 616, 0, 0, 898, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 951, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 952, 953, 255, 639, 797, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 805, 806, 279, 305, 882, 881, 880, 304, 306, + 878, 879, 877, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 888, 910, 899, 765, 766, + 889, 890, 914, 891, 768, 769, 911, 912, 762, 763, + 767, 913, 915, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 902, 752, 751, 0, 758, 0, 787, 788, 790, + 794, 795, 796, 807, 854, 855, 863, 865, 866, 864, + 867, 868, 869, 872, 873, 874, 875, 870, 871, 876, + 770, 774, 771, 772, 773, 785, 775, 776, 777, 778, + 779, 780, 781, 782, 783, 784, 786, 925, 926, 927, + 928, 929, 930, 800, 804, 803, 801, 802, 798, 799, + 826, 825, 827, 828, 829, 830, 831, 832, 834, 833, + 835, 836, 837, 838, 839, 840, 808, 809, 812, 813, + 811, 810, 814, 823, 824, 815, 816, 817, 818, 819, + 820, 822, 821, 841, 842, 843, 844, 845, 847, 846, + 850, 851, 849, 848, 853, 852, 750, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 916, 261, + 917, 0, 0, 921, 0, 0, 0, 923, 922, 0, + 924, 886, 885, 0, 0, 918, 919, 0, 920, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 931, 932, 933, 934, 935, 936, 937, + 938, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 956, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 761, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 749, 0, 0, 0, 269, 754, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 760, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 756, 757, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 95, 0, 0, 957, 941, 1070, 907, 945, 958, + 959, 960, 961, 946, 0, 237, 947, 948, 244, 949, + 0, 906, 791, 793, 792, 856, 857, 858, 859, 860, + 861, 862, 789, 954, 962, 963, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 746, + 0, 759, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 743, 744, 0, 0, 0, 0, 901, 0, + 745, 0, 0, 753, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, + 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, + 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, + 1000, 1001, 1002, 1003, 1004, 1005, 2069, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 900, 0, 0, 616, 0, 0, 898, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 951, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 952, 953, 255, 639, 797, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 805, 806, 279, 305, 882, 881, 880, + 304, 306, 878, 879, 877, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 888, 910, 899, + 765, 766, 889, 890, 914, 891, 768, 769, 911, 912, + 762, 763, 767, 913, 915, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 902, 752, 751, 0, 758, 0, 787, + 788, 790, 794, 795, 796, 807, 854, 855, 863, 865, + 866, 864, 867, 868, 869, 872, 873, 874, 875, 870, + 871, 876, 770, 774, 771, 772, 773, 785, 775, 776, + 777, 778, 779, 780, 781, 782, 783, 784, 786, 925, + 926, 927, 928, 929, 930, 800, 804, 803, 801, 802, + 798, 799, 826, 825, 827, 828, 829, 830, 831, 832, + 834, 833, 835, 836, 837, 838, 839, 840, 808, 809, + 812, 813, 811, 810, 814, 823, 824, 815, 816, 817, + 818, 819, 820, 822, 821, 841, 842, 843, 844, 845, + 847, 846, 850, 851, 849, 848, 853, 852, 750, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 916, 261, 917, 0, 0, 921, 0, 0, 0, 923, + 922, 0, 924, 886, 885, 0, 0, 918, 919, 0, + 920, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 931, 932, 933, 934, 935, + 936, 937, 938, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 956, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 761, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 749, 0, 0, 0, 269, 754, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 760, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 756, 757, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 95, 0, 0, 957, 941, 1070, 907, + 945, 958, 959, 960, 961, 946, 0, 237, 947, 948, + 244, 949, 0, 906, 791, 793, 792, 856, 857, 858, + 859, 860, 861, 862, 789, 954, 962, 963, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 746, 0, 759, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 743, 744, 0, 0, 0, 0, + 901, 0, 745, 0, 0, 753, 964, 965, 966, 967, + 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, + 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, + 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 2067, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 900, 0, 0, 616, 0, + 0, 898, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 951, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 952, 953, 255, 639, + 797, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 805, 806, 279, 305, 882, + 881, 880, 304, 306, 878, 879, 877, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 888, + 910, 899, 765, 766, 889, 890, 914, 891, 768, 769, + 911, 912, 762, 763, 767, 913, 915, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 902, 752, 751, 0, 758, + 0, 787, 788, 790, 794, 795, 796, 807, 854, 855, + 863, 865, 866, 864, 867, 868, 869, 872, 873, 874, + 875, 870, 871, 876, 770, 774, 771, 772, 773, 785, + 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, + 786, 925, 926, 927, 928, 929, 930, 800, 804, 803, + 801, 802, 798, 799, 826, 825, 827, 828, 829, 830, + 831, 832, 834, 833, 835, 836, 837, 838, 839, 840, + 808, 809, 812, 813, 811, 810, 814, 823, 824, 815, + 816, 817, 818, 819, 820, 822, 821, 841, 842, 843, + 844, 845, 847, 846, 850, 851, 849, 848, 853, 852, + 750, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 916, 261, 917, 0, 0, 921, 0, 0, + 0, 923, 922, 0, 924, 886, 885, 0, 0, 918, + 919, 0, 920, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 931, 932, 933, + 934, 935, 936, 937, 938, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 956, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 1121, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 1120, + 616, 0, 0, 0, 0, 0, 1117, 1118, 361, 1078, + 328, 197, 224, 1111, 1115, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 1678, 941, 0, 0, 1675, 0, 0, 0, 0, 1673, + 0, 237, 1674, 1672, 244, 1677, 0, 906, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 0, 392, 372, 568, + 569, 314, 86, 520, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 411, 0, 0, 0, 0, + 0, 0, 0, 0, 269, 0, 0, 0, 0, 362, + 266, 0, 0, 425, 0, 203, 0, 481, 251, 373, + 370, 575, 281, 272, 268, 249, 315, 381, 423, 510, + 417, 0, 366, 0, 0, 491, 396, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 321, 247, 323, 202, 408, 492, 285, 0, + 95, 0, 0, 0, 194, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 347, 356, 355, 336, 337, 339, 341, 346, 353, + 359, 0, 0, 0, 0, 0, 264, 319, 271, 263, + 572, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 397, 256, + 0, 448, 0, 0, 0, 616, 0, 0, 0, 0, + 0, 0, 0, 361, 0, 328, 197, 224, 0, 0, + 407, 456, 468, 0, 0, 0, 252, 0, 466, 421, + 594, 232, 283, 453, 427, 464, 435, 286, 0, 0, + 465, 368, 577, 445, 591, 617, 618, 262, 401, 603, + 514, 611, 635, 225, 259, 415, 499, 597, 488, 393, + 573, 574, 327, 487, 294, 201, 365, 623, 223, 474, + 367, 241, 230, 579, 600, 288, 451, 630, 212, 509, + 589, 238, 478, 0, 0, 638, 246, 498, 214, 586, + 497, 389, 324, 325, 213, 0, 452, 267, 292, 0, + 0, 257, 410, 581, 582, 255, 639, 227, 610, 219, + 0, 609, 403, 576, 587, 390, 379, 218, 585, 388, + 378, 332, 351, 352, 279, 305, 442, 371, 443, 304, + 306, 399, 398, 400, 206, 598, 0, 207, 0, 493, + 599, 640, 447, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 301, 302, 311, 363, 414, 441, 437, 446, + 0, 571, 592, 604, 615, 621, 622, 624, 625, 626, + 627, 628, 631, 629, 402, 309, 489, 331, 369, 0, + 0, 420, 467, 239, 596, 490, 199, 0, 0, 0, + 0, 253, 254, 0, 567, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 641, 642, 643, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 636, 500, 506, 501, 502, 503, 504, 505, + 0, 507, 0, 0, 0, 0, 0, 0, 583, 584, + 659, 380, 480, 593, 333, 345, 348, 338, 357, 0, + 358, 334, 335, 340, 342, 343, 344, 349, 350, 354, + 360, 248, 209, 386, 394, 570, 310, 215, 216, 217, + 516, 517, 518, 519, 607, 608, 612, 204, 457, 458, + 459, 460, 291, 602, 307, 463, 462, 329, 330, 375, + 444, 532, 534, 545, 549, 551, 553, 559, 562, 533, + 535, 546, 550, 552, 554, 560, 563, 522, 524, 526, + 528, 541, 540, 537, 565, 566, 543, 548, 527, 539, + 544, 557, 564, 561, 521, 525, 529, 538, 556, 555, + 536, 547, 558, 542, 530, 523, 531, 0, 196, 220, + 364, 94, 449, 287, 637, 606, 601, 205, 222, 0, + 261, 0, 0, 0, 0, 0, 0, 2368, 0, 0, + 2367, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 308, 316, 317, 320, 326, 376, + 382, 383, 384, 385, 404, 405, 406, 409, 412, 413, + 416, 418, 419, 422, 426, 430, 431, 432, 434, 436, + 438, 450, 455, 469, 470, 471, 472, 473, 476, 477, + 482, 483, 484, 485, 486, 494, 495, 508, 578, 580, + 595, 613, 619, 475, 299, 300, 439, 440, 312, 313, + 633, 634, 298, 590, 620, 588, 632, 614, 433, 374, + 0, 0, 377, 280, 303, 318, 0, 605, 496, 226, + 461, 289, 250, 0, 0, 210, 245, 229, 258, 273, + 276, 322, 387, 395, 424, 429, 295, 270, 243, 454, + 240, 479, 511, 512, 513, 515, 391, 265, 428, 1735, + 0, 372, 568, 569, 314, 520, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 411, 0, 0, + 0, 1737, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 362, 266, 0, 0, 425, 0, 203, 0, 481, + 251, 373, 370, 575, 281, 272, 268, 249, 315, 381, + 423, 510, 417, 0, 366, 0, 0, 491, 396, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 321, 247, 323, 202, 408, 492, + 285, 0, 0, 0, 0, 1739, 707, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 347, 356, 355, 336, 337, 339, 341, + 346, 353, 359, 0, 0, 0, 0, 0, 264, 319, + 271, 263, 572, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 1451, 0, 1452, 1453, 0, + 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 397, 256, 0, 448, 0, 0, 0, 616, 0, 0, + 0, 0, 0, 0, 0, 361, 0, 328, 197, 224, + 0, 0, 407, 456, 468, 0, 0, 0, 252, 0, + 466, 421, 594, 232, 283, 453, 427, 464, 435, 286, + 0, 0, 465, 368, 577, 445, 591, 617, 618, 262, + 401, 603, 514, 611, 635, 225, 259, 415, 499, 597, + 488, 393, 573, 574, 327, 487, 294, 201, 365, 623, + 223, 474, 367, 241, 230, 579, 600, 288, 451, 630, + 212, 509, 589, 238, 478, 0, 0, 638, 246, 498, + 214, 586, 497, 389, 324, 325, 213, 0, 452, 267, + 292, 0, 0, 257, 410, 581, 582, 255, 639, 227, + 610, 219, 0, 609, 403, 576, 587, 390, 379, 218, + 585, 388, 378, 332, 351, 352, 279, 305, 442, 371, + 443, 304, 306, 399, 398, 400, 206, 598, 0, 207, + 0, 493, 599, 640, 447, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 301, 302, 311, 363, 414, 441, + 437, 446, 0, 571, 592, 604, 615, 621, 622, 624, + 625, 626, 627, 628, 631, 629, 402, 309, 489, 331, + 369, 0, 0, 420, 467, 239, 596, 490, 199, 0, + 0, 0, 0, 253, 254, 0, 567, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 641, 642, 643, 644, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 636, 500, 506, 501, 502, 503, + 504, 505, 0, 507, 0, 0, 0, 0, 0, 0, + 583, 584, 659, 380, 480, 593, 333, 345, 348, 338, + 357, 0, 358, 334, 335, 340, 342, 343, 344, 349, + 350, 354, 360, 248, 209, 386, 394, 570, 310, 215, + 216, 217, 516, 517, 518, 519, 607, 608, 612, 204, + 457, 458, 459, 460, 291, 602, 307, 463, 462, 329, + 330, 375, 444, 532, 534, 545, 549, 551, 553, 559, + 562, 533, 535, 546, 550, 552, 554, 560, 563, 522, + 524, 526, 528, 541, 540, 537, 565, 566, 543, 548, + 527, 539, 544, 557, 564, 561, 521, 525, 529, 538, + 556, 555, 536, 547, 558, 542, 530, 523, 531, 0, + 196, 220, 364, 0, 449, 287, 637, 606, 601, 205, + 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 308, 316, 317, 320, + 326, 376, 382, 383, 384, 385, 404, 405, 406, 409, + 412, 413, 416, 418, 419, 422, 426, 430, 431, 432, + 434, 436, 438, 450, 455, 469, 470, 471, 472, 473, + 476, 477, 482, 483, 484, 485, 486, 494, 495, 508, + 578, 580, 595, 613, 619, 475, 299, 300, 439, 440, + 312, 313, 633, 634, 298, 590, 620, 588, 632, 614, + 433, 374, 0, 0, 377, 280, 303, 318, 0, 605, + 496, 226, 461, 289, 250, 0, 0, 210, 245, 229, + 258, 273, 276, 322, 387, 395, 424, 429, 295, 270, + 243, 454, 240, 479, 511, 512, 513, 515, 391, 265, + 428, 0, 392, 372, 568, 569, 314, 86, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 95, 0, 1716, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 94, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 2368, 0, 0, 2367, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 2318, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 1918, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 2316, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 1072, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 1078, 328, 197, 224, 1076, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 2318, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 0, 0, 0, 1918, 194, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 1716, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 3893, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 2078, 707, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2079, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 2811, 707, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2812, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 2796, 0, 0, 0, 0, 237, 0, 0, 244, 2797, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 1758, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 0, 0, 0, 1757, 707, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, + 710, 711, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 4016, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 1918, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 3893, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 95, 0, 0, 0, 707, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 2369, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 194, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 1739, 707, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 2030, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 2021, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 1885, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 0, 0, 0, 0, 707, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 1883, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 1881, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 1879, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 0, 707, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 1877, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 1873, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 0, 0, 0, 0, 707, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 1871, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 707, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 1869, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 0, 707, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 1844, + 0, 0, 0, 707, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 1743, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 194, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 95, 0, 0, 0, 941, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 194, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1430, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 1429, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1030, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, + 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, + 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, + 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, + 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, + 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, + 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, + 448, 0, 662, 0, 616, 0, 0, 0, 0, 0, + 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, + 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, + 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, + 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, + 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, + 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, + 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, + 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, + 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, + 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, + 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, + 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, + 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, + 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, + 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, + 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, + 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, + 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, + 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, + 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, + 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, + 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, + 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, + 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, + 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, + 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, + 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, + 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, + 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, + 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, + 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, + 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, + 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, + 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, + 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, + 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, + 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, + 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, + 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, + 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, + 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, + 0, 0, 0, 0, 0, 707, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, + 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, + 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, + 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, + 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, + 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, + 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, + 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, + 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, + 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, + 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, + 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, + 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, + 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, + 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, + 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, + 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, + 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, + 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, + 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, + 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, + 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, + 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, + 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, + 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, + 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, + 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, + 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, + 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, + 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, + 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, + 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, + 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, + 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, + 376, 382, 383, 384, 385, 4024, 405, 406, 409, 412, + 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, + 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, + 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, + 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, + 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, + 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, + 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, + 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, + 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, + 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, + 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, + 492, 285, 0, 0, 0, 0, 0, 707, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, + 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, + 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, + 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, + 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, + 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, + 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, + 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, + 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, + 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, + 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, + 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, + 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, + 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, + 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, + 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, + 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, + 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, + 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, + 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, + 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, + 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, + 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, + 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, + 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, + 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, + 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, + 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, + 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, + 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, + 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, + 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, + 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, + 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, + 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, + 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, + 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, + 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, + 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, + 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, + 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, + 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, + 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, + 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, + 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, + 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, + 202, 408, 492, 285, 0, 0, 0, 0, 0, 941, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, + 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, + 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, + 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, + 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, + 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, + 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, + 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, + 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, + 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, + 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, + 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, + 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, + 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, + 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, + 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, + 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, + 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, + 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, + 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, + 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, + 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, + 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, + 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, + 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, + 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, + 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, + 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, + 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, + 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, + 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, + 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, + 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, + 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, + 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, + 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, + 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, + 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, + 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, + 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, + 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, + 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, + 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, + 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, + 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, + 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, + 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, + 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, + 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, + 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, + 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, + 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, + 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, + 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, + 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, + 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, + 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, + 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, + 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, + 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, + 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, + 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, + 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, + 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, + 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, + 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, + 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, + 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, + 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, + 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, + 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, + 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, + 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, + 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, + 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, + 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, + 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, + 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, + 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, + 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, + 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, + 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, + 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, + 287, 637, 606, 601, 205, 222, 0, 261, 3684, 3686, + 3685, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 789, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, + 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, + 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, + 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, + 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, + 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, + 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, + 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, + 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, + 512, 513, 515, 391, 265, 428, 0, 0, 372, 568, + 569, 314, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3690, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3698, + 3699, 0, 0, 3774, 3773, 3772, 0, 0, 3770, 3771, + 3769, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3775, 910, 0, 765, 766, 3776, 3777, + 914, 3778, 768, 769, 911, 912, 0, 763, 767, 913, + 915, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3681, 3682, 3683, 3687, 3688, + 3689, 3700, 3747, 3748, 3756, 3758, 866, 3757, 3759, 3760, + 3761, 3764, 3765, 3766, 3767, 3762, 3763, 3768, 3664, 3668, + 3665, 3666, 3667, 3679, 3669, 3670, 3671, 3672, 3673, 3674, + 3675, 3676, 3677, 3678, 3680, 3779, 3780, 3781, 3782, 3783, + 3784, 3693, 3697, 3696, 3694, 3695, 3691, 3692, 3719, 3718, + 3720, 3721, 3722, 3723, 3724, 3725, 3727, 3726, 3728, 3729, + 3730, 3731, 3732, 3733, 3701, 3702, 3705, 3706, 3704, 3703, + 3707, 3716, 3717, 3708, 3709, 3710, 3711, 3712, 3713, 3715, + 3714, 3734, 3735, 3736, 3737, 3738, 3740, 3739, 3743, 3744, + 3742, 3741, 3746, 3745, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 916, 0, 917, 0, + 0, 921, 0, 0, 0, 923, 922, 0, 924, 886, + 885, 0, 0, 918, 919, 0, 920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1194, - 0, 0, 0, 0, 1150, 0, 1169, 1241, 1726, 1133, - 287, 1144, 388, 247, 0, 438, 1248, 1258, 1191, 549, - 1262, 1189, 1188, 1235, 1148, 1254, 1182, 352, 1146, 319, - 193, 217, 0, 1180, 398, 446, 458, 1253, 1165, 1174, - 245, 1172, 456, 412, 527, 225, 274, 443, 418, 454, - 426, 277, 1214, 1233, 455, 359, 512, 436, 524, 550, - 551, 253, 392, 536, 496, 544, 568, 218, 250, 406, - 489, 530, 478, 384, 508, 509, 318, 477, 285, 196, - 356, 556, 216, 464, 358, 234, 223, 514, 533, 279, - 441, 563, 205, 491, 522, 231, 468, 0, 0, 570, - 239, 488, 207, 519, 487, 380, 315, 316, 206, 0, - 442, 258, 283, 0, 0, 248, 401, 516, 517, 246, - 571, 220, 543, 212, 1145, 542, 394, 511, 520, 381, - 370, 211, 518, 379, 369, 323, 342, 343, 270, 296, - 433, 362, 434, 295, 297, 390, 389, 391, 200, 531, - 0, 201, 0, 483, 532, 572, 226, 227, 229, 1160, - 269, 273, 281, 284, 292, 293, 302, 354, 405, 432, - 428, 437, 1249, 506, 525, 537, 548, 554, 555, 557, - 558, 559, 560, 561, 564, 562, 393, 300, 479, 322, - 360, 1238, 1280, 411, 457, 232, 529, 480, 1155, 1159, - 1153, 1220, 1154, 1209, 1210, 1156, 1271, 1272, 1273, 573, - 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, - 584, 585, 586, 587, 588, 589, 590, 0, 1242, 1149, - 0, 1157, 1158, 1251, 1260, 1261, 591, 371, 470, 526, - 324, 336, 339, 329, 348, 0, 349, 325, 326, 331, - 333, 334, 335, 340, 341, 345, 351, 241, 203, 377, - 385, 505, 301, 208, 209, 210, 498, 499, 500, 501, - 540, 541, 545, 447, 448, 449, 450, 282, 535, 298, - 453, 452, 320, 321, 366, 435, 1213, 192, 213, 355, - 1276, 439, 278, 569, 539, 534, 199, 215, 1152, 252, - 1163, 1171, 0, 1177, 1185, 1186, 1198, 1200, 1201, 1202, - 1203, 1221, 1222, 1224, 1232, 1234, 1237, 1239, 1246, 1257, - 1279, 194, 195, 202, 214, 224, 228, 235, 251, 266, - 268, 275, 288, 299, 307, 308, 311, 317, 367, 373, - 374, 375, 376, 395, 396, 397, 400, 403, 404, 407, - 409, 410, 413, 417, 421, 422, 423, 425, 427, 429, - 440, 445, 459, 460, 461, 462, 463, 466, 467, 472, - 473, 474, 475, 476, 484, 485, 490, 513, 515, 528, - 546, 552, 465, 290, 291, 430, 431, 303, 304, 566, - 567, 289, 523, 553, 521, 565, 547, 424, 365, 1212, - 1218, 368, 271, 294, 309, 1227, 538, 486, 219, 451, - 280, 243, 1245, 1247, 204, 238, 222, 249, 264, 267, - 313, 378, 386, 415, 420, 286, 261, 236, 444, 233, - 469, 493, 494, 495, 497, 382, 256, 419, 1208, 1236, - 363, 503, 504, 305, 383, 0, 0, 0, 1265, 1250, - 502, 0, 1193, 1268, 1162, 1181, 1278, 1184, 1187, 1229, - 1141, 1207, 402, 1178, 1134, 1166, 1136, 1173, 1137, 1164, - 1195, 260, 1161, 1252, 1211, 1267, 353, 257, 1143, 1167, - 416, 1183, 198, 1231, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 1274, 357, - 1217, 0, 481, 387, 0, 0, 0, 1197, 1256, 1205, - 1243, 1192, 1230, 1151, 1216, 1269, 1179, 1226, 1270, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 190, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 1175, 1223, - 1264, 1176, 1225, 255, 310, 262, 254, 507, 1275, 1255, - 1140, 1204, 1263, 0, 0, 221, 1266, 1199, 0, 1228, - 0, 1281, 1135, 1219, 0, 1138, 1142, 1277, 1259, 1170, - 265, 0, 0, 0, 0, 0, 0, 0, 1196, 1206, - 1240, 1244, 1190, 0, 0, 0, 0, 0, 2782, 0, - 1168, 0, 1215, 0, 0, 0, 1147, 1139, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1194, 0, 0, 0, 0, 1150, 0, 1169, 1241, 0, - 1133, 287, 1144, 388, 247, 0, 438, 1248, 1258, 1191, - 549, 1262, 1189, 1188, 1235, 1148, 1254, 1182, 352, 1146, - 319, 193, 217, 0, 1180, 398, 446, 458, 1253, 1165, - 1174, 245, 1172, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 1214, 1233, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 1145, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 1160, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 1249, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 1238, 1280, 411, 457, 232, 529, 480, 1155, - 1159, 1153, 1220, 1154, 1209, 1210, 1156, 1271, 1272, 1273, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 1242, - 1149, 0, 1157, 1158, 1251, 1260, 1261, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 1213, 192, 213, - 355, 1276, 439, 278, 569, 539, 534, 199, 215, 1152, - 252, 1163, 1171, 0, 1177, 1185, 1186, 1198, 1200, 1201, - 1202, 1203, 1221, 1222, 1224, 1232, 1234, 1237, 1239, 1246, - 1257, 1279, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 1212, 1218, 368, 271, 294, 309, 1227, 538, 486, 219, - 451, 280, 243, 1245, 1247, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 1208, - 1236, 363, 503, 504, 305, 383, 0, 0, 0, 1265, - 1250, 502, 0, 1193, 1268, 1162, 1181, 1278, 1184, 1187, - 1229, 1141, 1207, 402, 1178, 1134, 1166, 1136, 1173, 1137, - 1164, 1195, 260, 1161, 1252, 1211, 1267, 353, 257, 1143, - 1167, 416, 1183, 198, 1231, 471, 244, 364, 361, 510, - 272, 263, 259, 242, 306, 372, 414, 492, 408, 1274, - 357, 1217, 0, 481, 387, 0, 0, 0, 1197, 1256, - 1205, 1243, 1192, 1230, 1151, 1216, 1269, 1179, 1226, 1270, - 312, 240, 314, 197, 399, 482, 276, 0, 0, 0, - 0, 0, 638, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 230, 0, 0, 237, 0, 0, 0, 338, - 347, 346, 327, 328, 330, 332, 337, 344, 350, 1175, - 1223, 1264, 1176, 1225, 255, 310, 262, 254, 507, 1275, - 1255, 1140, 1204, 1263, 0, 0, 221, 1266, 1199, 0, - 1228, 0, 1281, 1135, 1219, 0, 1138, 1142, 1277, 1259, - 1170, 265, 0, 0, 0, 0, 0, 0, 0, 1196, - 1206, 1240, 1244, 1190, 0, 0, 0, 0, 0, 2743, - 0, 1168, 0, 1215, 0, 0, 0, 1147, 1139, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1194, 0, 0, 0, 0, 1150, 0, 1169, 1241, - 0, 1133, 287, 1144, 388, 247, 0, 438, 1248, 1258, - 1191, 549, 1262, 1189, 1188, 1235, 1148, 1254, 1182, 352, - 1146, 319, 193, 217, 0, 1180, 398, 446, 458, 1253, - 1165, 1174, 245, 1172, 456, 412, 527, 225, 274, 443, - 418, 454, 426, 277, 1214, 1233, 455, 359, 512, 436, - 524, 550, 551, 253, 392, 536, 496, 544, 568, 218, - 250, 406, 489, 530, 478, 384, 508, 509, 318, 477, - 285, 196, 356, 556, 216, 464, 358, 234, 223, 514, - 533, 279, 441, 563, 205, 491, 522, 231, 468, 0, - 0, 570, 239, 488, 207, 519, 487, 380, 315, 316, - 206, 0, 442, 258, 283, 0, 0, 248, 401, 516, - 517, 246, 571, 220, 543, 212, 1145, 542, 394, 511, - 520, 381, 370, 211, 518, 379, 369, 323, 342, 343, - 270, 296, 433, 362, 434, 295, 297, 390, 389, 391, - 200, 531, 0, 201, 0, 483, 532, 572, 226, 227, - 229, 1160, 269, 273, 281, 284, 292, 293, 302, 354, - 405, 432, 428, 437, 1249, 506, 525, 537, 548, 554, - 555, 557, 558, 559, 560, 561, 564, 562, 393, 300, - 479, 322, 360, 1238, 1280, 411, 457, 232, 529, 480, - 1155, 1159, 1153, 1220, 1154, 1209, 1210, 1156, 1271, 1272, - 1273, 573, 574, 575, 576, 577, 578, 579, 580, 581, - 582, 583, 584, 585, 586, 587, 588, 589, 590, 0, - 1242, 1149, 0, 1157, 1158, 1251, 1260, 1261, 591, 371, - 470, 526, 324, 336, 339, 329, 348, 0, 349, 325, - 326, 331, 333, 334, 335, 340, 341, 345, 351, 241, - 203, 377, 385, 505, 301, 208, 209, 210, 498, 499, - 500, 501, 540, 541, 545, 447, 448, 449, 450, 282, - 535, 298, 453, 452, 320, 321, 366, 435, 1213, 192, - 213, 355, 1276, 439, 278, 569, 539, 534, 199, 215, - 1152, 252, 1163, 1171, 0, 1177, 1185, 1186, 1198, 1200, - 1201, 1202, 1203, 1221, 1222, 1224, 1232, 1234, 1237, 1239, - 1246, 1257, 1279, 194, 195, 202, 214, 224, 228, 235, - 251, 266, 268, 275, 288, 299, 307, 308, 311, 317, - 367, 373, 374, 375, 376, 395, 396, 397, 400, 403, - 404, 407, 409, 410, 413, 417, 421, 422, 423, 425, - 427, 429, 440, 445, 459, 460, 461, 462, 463, 466, - 467, 472, 473, 474, 475, 476, 484, 485, 490, 513, - 515, 528, 546, 552, 465, 290, 291, 430, 431, 303, - 304, 566, 567, 289, 523, 553, 521, 565, 547, 424, - 365, 1212, 1218, 368, 271, 294, 309, 1227, 538, 486, - 219, 451, 280, 243, 1245, 1247, 204, 238, 222, 249, - 264, 267, 313, 378, 386, 415, 420, 286, 261, 236, - 444, 233, 469, 493, 494, 495, 497, 382, 256, 419, - 1208, 1236, 363, 503, 504, 305, 383, 0, 0, 0, - 1265, 1250, 502, 0, 1193, 1268, 1162, 1181, 1278, 1184, - 1187, 1229, 1141, 1207, 402, 1178, 1134, 1166, 1136, 1173, - 1137, 1164, 1195, 260, 1161, 1252, 1211, 1267, 353, 257, - 1143, 1167, 416, 1183, 198, 1231, 471, 244, 364, 361, - 510, 272, 263, 259, 242, 306, 372, 414, 492, 408, - 1274, 357, 1217, 0, 481, 387, 0, 0, 0, 1197, - 1256, 1205, 1243, 1192, 1230, 1151, 1216, 1269, 1179, 1226, - 1270, 312, 240, 314, 197, 399, 482, 276, 0, 0, - 0, 0, 0, 815, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 230, 0, 0, 237, 0, 0, 0, - 338, 347, 346, 327, 328, 330, 332, 337, 344, 350, - 1175, 1223, 1264, 1176, 1225, 255, 310, 262, 254, 507, - 1275, 1255, 1140, 1204, 1263, 0, 0, 221, 1266, 1199, - 0, 1228, 0, 1281, 1135, 1219, 0, 1138, 1142, 1277, - 1259, 1170, 265, 0, 0, 0, 0, 0, 0, 0, - 1196, 1206, 1240, 1244, 1190, 0, 0, 0, 0, 0, - 2086, 0, 1168, 0, 1215, 0, 0, 0, 1147, 1139, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1194, 0, 0, 0, 0, 1150, 0, 1169, - 1241, 0, 1133, 287, 1144, 388, 247, 0, 438, 1248, - 1258, 1191, 549, 1262, 1189, 1188, 1235, 1148, 1254, 1182, - 352, 1146, 319, 193, 217, 0, 1180, 398, 446, 458, - 1253, 1165, 1174, 245, 1172, 456, 412, 527, 225, 274, - 443, 418, 454, 426, 277, 1214, 1233, 455, 359, 512, - 436, 524, 550, 551, 253, 392, 536, 496, 544, 568, - 218, 250, 406, 489, 530, 478, 384, 508, 509, 318, - 477, 285, 196, 356, 556, 216, 464, 358, 234, 223, - 514, 533, 279, 441, 563, 205, 491, 522, 231, 468, - 0, 0, 570, 239, 488, 207, 519, 487, 380, 315, - 316, 206, 0, 442, 258, 283, 0, 0, 248, 401, - 516, 517, 246, 571, 220, 543, 212, 1145, 542, 394, - 511, 520, 381, 370, 211, 518, 379, 369, 323, 342, - 343, 270, 296, 433, 362, 434, 295, 297, 390, 389, - 391, 200, 531, 0, 201, 0, 483, 532, 572, 226, - 227, 229, 1160, 269, 273, 281, 284, 292, 293, 302, - 354, 405, 432, 428, 437, 1249, 506, 525, 537, 548, - 554, 555, 557, 558, 559, 560, 561, 564, 562, 393, - 300, 479, 322, 360, 1238, 1280, 411, 457, 232, 529, - 480, 1155, 1159, 1153, 1220, 1154, 1209, 1210, 1156, 1271, - 1272, 1273, 573, 574, 575, 576, 577, 578, 579, 580, - 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, - 0, 1242, 1149, 0, 1157, 1158, 1251, 1260, 1261, 591, - 371, 470, 526, 324, 336, 339, 329, 348, 0, 349, - 325, 326, 331, 333, 334, 335, 340, 341, 345, 351, - 241, 203, 377, 385, 505, 301, 208, 209, 210, 498, - 499, 500, 501, 540, 541, 545, 447, 448, 449, 450, - 282, 535, 298, 453, 452, 320, 321, 366, 435, 1213, - 192, 213, 355, 1276, 439, 278, 569, 539, 534, 199, - 215, 1152, 252, 1163, 1171, 0, 1177, 1185, 1186, 1198, - 1200, 1201, 1202, 1203, 1221, 1222, 1224, 1232, 1234, 1237, - 1239, 1246, 1257, 1279, 194, 195, 202, 214, 224, 228, - 235, 251, 266, 268, 275, 288, 299, 307, 308, 311, - 317, 367, 373, 374, 375, 376, 395, 396, 397, 400, - 403, 404, 407, 409, 410, 413, 417, 421, 422, 423, - 425, 427, 429, 440, 445, 459, 460, 461, 462, 463, - 466, 467, 472, 473, 474, 475, 476, 484, 485, 490, - 513, 515, 528, 546, 552, 465, 290, 291, 430, 431, - 303, 304, 566, 567, 289, 523, 553, 521, 565, 547, - 424, 365, 1212, 1218, 368, 271, 294, 309, 1227, 538, - 486, 219, 451, 280, 243, 1245, 1247, 204, 238, 222, - 249, 264, 267, 313, 378, 386, 415, 420, 286, 261, - 236, 444, 233, 469, 493, 494, 495, 497, 382, 256, - 419, 1208, 1236, 363, 503, 504, 305, 383, 0, 0, - 0, 1265, 1250, 502, 0, 1193, 1268, 1162, 1181, 1278, - 1184, 1187, 1229, 1141, 1207, 402, 1178, 1134, 1166, 1136, - 1173, 1137, 1164, 1195, 260, 1161, 1252, 1211, 1267, 353, - 257, 1143, 1167, 416, 1183, 198, 1231, 471, 244, 364, - 361, 510, 272, 263, 259, 242, 306, 372, 414, 492, - 408, 1274, 357, 1217, 0, 481, 387, 0, 0, 0, - 1197, 1256, 1205, 1243, 1192, 1230, 1151, 1216, 1269, 1179, - 1226, 1270, 312, 240, 314, 197, 399, 482, 276, 0, - 91, 0, 0, 0, 638, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 230, 0, 0, 237, 0, 0, - 0, 338, 347, 346, 327, 328, 330, 332, 337, 344, - 350, 1175, 1223, 1264, 1176, 1225, 255, 310, 262, 254, - 507, 1275, 1255, 1140, 1204, 1263, 0, 0, 221, 1266, - 1199, 0, 1228, 0, 1281, 1135, 1219, 0, 1138, 1142, - 1277, 1259, 1170, 265, 0, 0, 0, 0, 0, 0, - 0, 1196, 1206, 1240, 1244, 1190, 0, 0, 0, 0, - 0, 0, 0, 1168, 0, 1215, 0, 0, 0, 1147, - 1139, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1194, 0, 0, 0, 0, 1150, 0, - 1169, 1241, 0, 1133, 287, 1144, 388, 247, 0, 438, - 1248, 1258, 1191, 549, 1262, 1189, 1188, 1235, 1148, 1254, - 1182, 352, 1146, 319, 193, 217, 0, 1180, 398, 446, - 458, 1253, 1165, 1174, 245, 1172, 456, 412, 527, 225, - 274, 443, 418, 454, 426, 277, 1214, 1233, 455, 359, - 512, 436, 524, 550, 551, 253, 392, 536, 496, 544, - 568, 218, 250, 406, 489, 530, 478, 384, 508, 509, - 318, 477, 285, 196, 356, 556, 216, 464, 358, 234, - 223, 514, 533, 279, 441, 563, 205, 491, 522, 231, - 468, 0, 0, 570, 239, 488, 207, 519, 487, 380, - 315, 316, 206, 0, 442, 258, 283, 0, 0, 248, - 401, 516, 517, 246, 571, 220, 543, 212, 1145, 542, - 394, 511, 520, 381, 370, 211, 518, 379, 369, 323, - 342, 343, 270, 296, 433, 362, 434, 295, 297, 390, - 389, 391, 200, 531, 0, 201, 0, 483, 532, 572, - 226, 227, 229, 1160, 269, 273, 281, 284, 292, 293, - 302, 354, 405, 432, 428, 437, 1249, 506, 525, 537, - 548, 554, 555, 557, 558, 559, 560, 561, 564, 562, - 393, 300, 479, 322, 360, 1238, 1280, 411, 457, 232, - 529, 480, 1155, 1159, 1153, 1220, 1154, 1209, 1210, 1156, - 1271, 1272, 1273, 573, 574, 575, 576, 577, 578, 579, - 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, - 590, 0, 1242, 1149, 0, 1157, 1158, 1251, 1260, 1261, - 591, 371, 470, 526, 324, 336, 339, 329, 348, 0, - 349, 325, 326, 331, 333, 334, 335, 340, 341, 345, - 351, 241, 203, 377, 385, 505, 301, 208, 209, 210, - 498, 499, 500, 501, 540, 541, 545, 447, 448, 449, - 450, 282, 535, 298, 453, 452, 320, 321, 366, 435, - 1213, 192, 213, 355, 1276, 439, 278, 569, 539, 534, - 199, 215, 1152, 252, 1163, 1171, 0, 1177, 1185, 1186, - 1198, 1200, 1201, 1202, 1203, 1221, 1222, 1224, 1232, 1234, - 1237, 1239, 1246, 1257, 1279, 194, 195, 202, 214, 224, - 228, 235, 251, 266, 268, 275, 288, 299, 307, 308, - 311, 317, 367, 373, 374, 375, 376, 395, 396, 397, - 400, 403, 404, 407, 409, 410, 413, 417, 421, 422, - 423, 425, 427, 429, 440, 445, 459, 460, 461, 462, - 463, 466, 467, 472, 473, 474, 475, 476, 484, 485, - 490, 513, 515, 528, 546, 552, 465, 290, 291, 430, - 431, 303, 304, 566, 567, 289, 523, 553, 521, 565, - 547, 424, 365, 1212, 1218, 368, 271, 294, 309, 1227, - 538, 486, 219, 451, 280, 243, 1245, 1247, 204, 238, - 222, 249, 264, 267, 313, 378, 386, 415, 420, 286, - 261, 236, 444, 233, 469, 493, 494, 495, 497, 382, - 256, 419, 1208, 1236, 363, 503, 504, 305, 383, 0, - 0, 0, 1265, 1250, 502, 0, 1193, 1268, 1162, 1181, - 1278, 1184, 1187, 1229, 1141, 1207, 402, 1178, 1134, 1166, - 1136, 1173, 1137, 1164, 1195, 260, 1161, 1252, 1211, 1267, - 353, 257, 1143, 1167, 416, 1183, 198, 1231, 471, 244, - 364, 361, 510, 272, 263, 259, 242, 306, 372, 414, - 492, 408, 1274, 357, 1217, 0, 481, 387, 0, 0, - 0, 1197, 1256, 1205, 1243, 1192, 1230, 1151, 1216, 1269, - 1179, 1226, 1270, 312, 240, 314, 197, 399, 482, 276, - 0, 0, 0, 0, 0, 638, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 230, 0, 0, 237, 0, - 0, 0, 338, 347, 346, 327, 328, 330, 332, 337, - 344, 350, 1175, 1223, 1264, 1176, 1225, 255, 310, 262, - 254, 507, 1275, 1255, 1140, 1204, 1263, 0, 0, 221, - 1266, 1199, 0, 1228, 0, 1281, 1135, 1219, 0, 1138, - 1142, 1277, 1259, 1170, 265, 0, 0, 0, 0, 0, - 0, 0, 1196, 1206, 1240, 1244, 1190, 0, 0, 0, - 0, 0, 0, 0, 1168, 0, 1215, 0, 0, 0, - 1147, 1139, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1194, 0, 0, 0, 0, 1150, - 0, 1169, 1241, 0, 1133, 287, 1144, 388, 247, 0, - 438, 1248, 1258, 1191, 549, 1262, 1189, 1188, 1235, 1148, - 1254, 1182, 352, 1146, 319, 193, 217, 0, 1180, 398, - 446, 458, 1253, 1165, 1174, 245, 1172, 456, 412, 527, - 225, 274, 443, 418, 454, 426, 277, 1214, 1233, 455, - 359, 512, 436, 524, 550, 551, 253, 392, 536, 496, - 544, 568, 218, 250, 406, 489, 530, 478, 384, 508, - 509, 318, 477, 285, 196, 356, 556, 216, 464, 358, - 234, 223, 514, 533, 279, 441, 563, 205, 491, 522, - 231, 468, 0, 0, 570, 239, 488, 207, 519, 487, - 380, 315, 316, 206, 0, 442, 258, 283, 0, 0, - 248, 401, 516, 517, 246, 571, 220, 543, 212, 1145, - 542, 394, 511, 520, 381, 370, 211, 518, 379, 369, - 323, 342, 343, 270, 296, 433, 362, 434, 295, 297, - 390, 389, 391, 200, 531, 0, 201, 0, 483, 532, - 572, 226, 227, 229, 1160, 269, 273, 281, 284, 292, - 293, 302, 354, 405, 432, 428, 437, 1249, 506, 525, - 537, 548, 554, 555, 557, 558, 559, 560, 561, 564, - 562, 393, 300, 479, 322, 360, 1238, 1280, 411, 457, - 232, 529, 480, 1155, 1159, 1153, 1220, 1154, 1209, 1210, - 1156, 1271, 1272, 1273, 573, 574, 575, 576, 577, 578, - 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, - 589, 590, 0, 1242, 1149, 0, 1157, 1158, 1251, 1260, - 1261, 591, 371, 470, 526, 324, 336, 339, 329, 348, - 0, 349, 325, 326, 331, 333, 334, 335, 340, 341, - 345, 351, 241, 203, 377, 385, 505, 301, 208, 209, - 210, 498, 499, 500, 501, 540, 541, 545, 447, 448, - 449, 450, 282, 535, 298, 453, 452, 320, 321, 366, - 435, 1213, 192, 213, 355, 1276, 439, 278, 569, 539, - 534, 199, 215, 1152, 252, 1163, 1171, 0, 1177, 1185, - 1186, 1198, 1200, 1201, 1202, 1203, 1221, 1222, 1224, 1232, - 1234, 1237, 1239, 1246, 1257, 1279, 194, 195, 202, 214, - 224, 228, 235, 251, 266, 268, 275, 288, 299, 307, - 308, 311, 317, 367, 373, 374, 375, 376, 395, 396, - 397, 400, 403, 404, 407, 409, 410, 413, 417, 421, - 422, 423, 425, 427, 429, 440, 445, 459, 460, 461, - 462, 463, 466, 467, 472, 473, 474, 475, 476, 484, - 485, 490, 513, 515, 528, 546, 552, 465, 290, 291, - 430, 431, 303, 304, 566, 567, 289, 523, 553, 521, - 565, 547, 424, 365, 1212, 1218, 368, 271, 294, 309, - 1227, 538, 486, 219, 451, 280, 243, 1245, 1247, 204, - 238, 222, 249, 264, 267, 313, 378, 386, 415, 420, - 286, 261, 236, 444, 233, 469, 493, 494, 495, 497, - 382, 256, 419, 1208, 1236, 363, 503, 504, 305, 383, - 0, 0, 0, 1265, 1250, 502, 0, 1193, 1268, 1162, - 1181, 1278, 1184, 1187, 1229, 1141, 1207, 402, 1178, 1134, - 1166, 1136, 1173, 1137, 1164, 1195, 260, 1161, 1252, 1211, - 1267, 353, 257, 1143, 1167, 416, 1183, 198, 1231, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 1274, 357, 1217, 0, 481, 387, 0, - 0, 0, 1197, 1256, 1205, 1243, 1192, 1230, 1151, 1216, - 1269, 1179, 1226, 1270, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 815, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 1175, 1223, 1264, 1176, 1225, 255, 310, - 262, 254, 507, 1275, 1255, 1140, 1204, 1263, 0, 0, - 221, 1266, 1199, 0, 1228, 0, 1281, 1135, 1219, 0, - 1138, 1142, 1277, 1259, 1170, 265, 0, 0, 0, 0, - 0, 0, 0, 1196, 1206, 1240, 1244, 1190, 0, 0, - 0, 0, 0, 0, 0, 1168, 0, 1215, 0, 0, - 0, 1147, 1139, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1194, 0, 0, 0, 0, - 1150, 0, 1169, 1241, 0, 1133, 287, 1144, 388, 247, - 0, 438, 1248, 1258, 1191, 549, 1262, 1189, 1188, 1235, - 1148, 1254, 1182, 352, 1146, 319, 193, 217, 0, 1180, - 398, 446, 458, 1253, 1165, 1174, 245, 1172, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 1214, 1233, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 1145, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 1160, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 1249, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 1238, 1280, 411, - 457, 232, 529, 480, 1155, 1159, 1153, 1220, 1154, 1209, - 1210, 1156, 1271, 1272, 1273, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 1242, 1149, 0, 1157, 1158, 1251, - 1260, 1261, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 1213, 192, 213, 355, 1276, 439, 278, 569, - 539, 534, 199, 215, 1152, 252, 1163, 1171, 0, 1177, - 1185, 1186, 1198, 1200, 1201, 1202, 1203, 1221, 1222, 1224, - 1232, 1234, 1237, 1239, 1246, 1257, 1279, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 1212, 1218, 368, 271, 294, - 309, 1227, 538, 486, 219, 451, 280, 243, 1245, 1247, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 1208, 1236, 363, 503, 504, 305, - 383, 0, 0, 0, 1265, 1250, 502, 0, 1193, 1268, - 1162, 1181, 1278, 1184, 1187, 1229, 1141, 1207, 402, 1178, - 1134, 1166, 1136, 1173, 1137, 1164, 1195, 260, 1161, 1252, - 1211, 1267, 353, 257, 1143, 1167, 416, 1183, 198, 1231, - 471, 244, 364, 361, 510, 272, 263, 259, 242, 306, - 372, 414, 492, 408, 1274, 357, 1217, 0, 481, 387, - 0, 0, 0, 1197, 1256, 1205, 1243, 1192, 1230, 1151, - 1216, 1269, 1179, 1226, 1270, 312, 240, 314, 197, 399, - 482, 276, 0, 0, 0, 0, 0, 190, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 230, 0, 0, - 237, 0, 0, 0, 338, 347, 346, 327, 328, 330, - 332, 337, 344, 350, 1175, 1223, 1264, 1176, 1225, 255, - 310, 262, 254, 507, 1275, 1255, 1140, 1204, 1263, 0, - 0, 221, 1266, 1199, 0, 1228, 0, 1281, 1135, 1219, - 0, 1138, 1142, 1277, 1259, 1170, 265, 0, 0, 0, - 0, 0, 0, 0, 1196, 1206, 1240, 1244, 1190, 0, - 0, 0, 0, 0, 0, 0, 1168, 0, 1215, 0, - 0, 0, 1147, 1139, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1194, 0, 0, 0, - 0, 1150, 0, 1169, 1241, 0, 1133, 287, 1144, 388, - 247, 0, 438, 1248, 1258, 1191, 549, 1262, 1189, 1188, - 1235, 1148, 1254, 1182, 352, 1146, 319, 193, 217, 0, - 1180, 398, 446, 458, 1253, 1165, 1174, 245, 1172, 456, - 412, 527, 225, 274, 443, 418, 454, 426, 277, 1214, - 1233, 455, 359, 512, 436, 524, 550, 551, 253, 392, - 536, 496, 544, 568, 218, 250, 406, 489, 530, 478, - 384, 508, 509, 318, 477, 285, 196, 356, 556, 216, - 464, 358, 234, 223, 514, 533, 279, 441, 563, 205, - 491, 522, 231, 468, 0, 0, 570, 239, 488, 207, - 519, 487, 380, 315, 316, 206, 0, 442, 258, 283, - 0, 0, 248, 401, 516, 517, 246, 571, 220, 543, - 212, 1145, 542, 394, 511, 520, 381, 370, 211, 518, - 379, 369, 323, 342, 343, 270, 296, 433, 362, 434, - 295, 297, 390, 389, 391, 200, 531, 0, 201, 0, - 483, 532, 572, 226, 227, 229, 1160, 269, 273, 281, - 284, 292, 293, 302, 354, 405, 432, 428, 437, 1249, - 506, 525, 537, 548, 554, 555, 557, 558, 559, 560, - 561, 564, 562, 393, 300, 479, 322, 360, 1238, 1280, - 411, 457, 232, 529, 480, 1155, 1159, 1153, 1220, 1154, - 1209, 1210, 1156, 1271, 1272, 1273, 573, 574, 575, 576, - 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, - 587, 588, 589, 590, 0, 1242, 1149, 0, 1157, 1158, - 1251, 1260, 1261, 591, 371, 470, 526, 324, 336, 339, - 329, 348, 0, 349, 325, 326, 331, 333, 334, 335, - 340, 341, 345, 351, 241, 203, 377, 385, 505, 301, - 208, 209, 210, 498, 499, 500, 501, 540, 541, 545, - 447, 448, 449, 450, 282, 535, 298, 453, 452, 320, - 321, 366, 435, 1213, 192, 213, 355, 1276, 439, 278, - 569, 539, 534, 199, 215, 1152, 252, 1163, 1171, 0, - 1177, 1185, 1186, 1198, 1200, 1201, 1202, 1203, 1221, 1222, - 1224, 1232, 1234, 1237, 1239, 1246, 1257, 1279, 194, 195, - 202, 214, 224, 228, 235, 251, 266, 268, 275, 288, - 299, 307, 308, 311, 317, 367, 373, 374, 375, 376, - 395, 396, 397, 400, 403, 404, 407, 409, 410, 413, - 417, 421, 422, 423, 425, 427, 429, 440, 445, 459, - 460, 461, 462, 463, 466, 467, 472, 473, 474, 475, - 476, 484, 485, 490, 513, 515, 528, 546, 552, 465, - 290, 291, 430, 431, 303, 304, 566, 567, 289, 523, - 553, 521, 565, 547, 424, 365, 1212, 1218, 368, 271, - 294, 309, 1227, 538, 486, 219, 451, 280, 243, 1245, - 1247, 204, 238, 222, 249, 264, 267, 313, 378, 386, - 415, 420, 286, 261, 236, 444, 233, 469, 493, 494, - 495, 497, 382, 256, 419, 1208, 1236, 363, 503, 504, - 305, 383, 0, 0, 0, 0, 0, 502, 0, 691, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 402, - 0, 0, 0, 0, 678, 0, 0, 0, 260, 683, - 0, 0, 0, 353, 257, 0, 0, 416, 0, 198, - 0, 471, 244, 364, 361, 510, 272, 263, 259, 242, - 306, 372, 414, 492, 408, 690, 357, 0, 0, 481, - 387, 0, 0, 0, 0, 0, 686, 687, 0, 0, - 0, 0, 0, 0, 0, 0, 312, 240, 314, 197, - 399, 482, 276, 0, 91, 0, 0, 831, 815, 781, - 782, 819, 832, 833, 834, 835, 820, 0, 230, 821, - 822, 237, 823, 0, 780, 718, 720, 719, 737, 738, - 739, 740, 741, 742, 743, 716, 828, 836, 837, 0, - 255, 310, 262, 254, 507, 0, 0, 1962, 1963, 1964, - 0, 0, 221, 0, 0, 0, 0, 0, 0, 0, - 660, 675, 0, 689, 0, 0, 0, 265, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 672, 673, 0, 0, 0, 0, 775, - 0, 674, 0, 0, 682, 838, 839, 840, 841, 842, - 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, - 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, - 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, - 873, 874, 875, 876, 877, 878, 879, 685, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 287, 0, - 388, 247, 0, 438, 774, 0, 0, 549, 0, 0, - 772, 0, 0, 0, 0, 352, 0, 319, 193, 217, - 0, 0, 398, 446, 458, 0, 0, 0, 825, 0, - 456, 412, 527, 225, 274, 443, 418, 454, 426, 277, - 0, 0, 455, 359, 512, 436, 524, 550, 551, 253, - 392, 536, 496, 544, 568, 218, 250, 406, 489, 530, - 478, 384, 508, 509, 318, 477, 285, 196, 356, 556, - 216, 464, 358, 234, 223, 514, 533, 279, 441, 563, - 205, 491, 522, 231, 468, 0, 0, 570, 239, 488, - 207, 519, 487, 380, 315, 316, 206, 0, 442, 258, - 283, 0, 0, 248, 401, 826, 827, 246, 571, 724, - 543, 212, 0, 542, 394, 511, 520, 381, 370, 211, - 518, 379, 369, 323, 732, 733, 270, 296, 760, 759, - 758, 295, 297, 390, 389, 391, 200, 531, 0, 201, - 0, 483, 532, 572, 226, 227, 229, 0, 269, 273, - 281, 284, 292, 293, 302, 354, 405, 432, 428, 437, - 0, 506, 525, 537, 548, 554, 555, 557, 558, 559, - 560, 561, 564, 562, 393, 300, 479, 322, 360, 0, - 0, 411, 457, 232, 529, 480, 785, 773, 695, 789, - 697, 786, 787, 692, 693, 696, 788, 573, 574, 575, - 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, - 586, 587, 588, 589, 590, 0, 776, 681, 680, 0, - 688, 0, 714, 715, 717, 721, 722, 723, 734, 735, - 736, 744, 746, 747, 745, 748, 749, 750, 753, 754, - 755, 756, 751, 752, 757, 698, 702, 699, 700, 701, - 713, 703, 704, 705, 706, 707, 708, 709, 710, 711, - 712, 799, 800, 801, 802, 803, 804, 727, 731, 730, - 728, 729, 725, 726, 679, 192, 213, 355, 0, 439, - 278, 569, 539, 534, 199, 215, 790, 252, 791, 0, - 0, 795, 0, 0, 0, 797, 796, 0, 798, 764, - 763, 0, 0, 792, 793, 0, 794, 0, 0, 194, - 195, 202, 214, 224, 228, 235, 251, 266, 268, 275, - 288, 299, 307, 308, 311, 317, 367, 373, 374, 375, - 376, 395, 396, 397, 400, 403, 404, 407, 409, 410, - 413, 417, 421, 422, 423, 425, 427, 429, 440, 445, - 459, 460, 461, 462, 463, 466, 467, 472, 473, 474, - 475, 476, 484, 485, 490, 513, 515, 528, 546, 552, - 465, 805, 806, 807, 808, 809, 810, 811, 812, 289, - 523, 553, 521, 565, 547, 424, 365, 0, 0, 368, - 271, 294, 309, 0, 538, 486, 219, 451, 280, 243, - 830, 0, 204, 238, 222, 249, 264, 267, 313, 378, - 386, 415, 420, 286, 261, 236, 444, 233, 469, 493, - 494, 495, 497, 382, 256, 419, 383, 0, 363, 503, - 504, 305, 502, 0, 691, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 402, 0, 0, 0, 0, 678, - 0, 0, 0, 260, 683, 0, 0, 0, 353, 257, - 0, 0, 416, 0, 198, 0, 471, 244, 364, 361, - 510, 272, 263, 259, 242, 306, 372, 414, 492, 408, - 690, 357, 0, 0, 481, 387, 0, 0, 0, 0, - 0, 686, 687, 0, 0, 0, 0, 0, 0, 2115, - 0, 312, 240, 314, 197, 399, 482, 276, 0, 91, - 0, 0, 831, 815, 781, 782, 819, 832, 833, 834, - 835, 820, 0, 230, 821, 822, 237, 823, 0, 780, - 718, 720, 719, 737, 738, 739, 740, 741, 742, 743, - 716, 828, 836, 837, 2116, 255, 310, 262, 254, 507, - 0, 0, 0, 0, 0, 0, 0, 221, 0, 0, - 0, 0, 0, 0, 0, 660, 675, 0, 689, 0, - 0, 0, 265, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 672, 673, - 0, 0, 0, 0, 775, 0, 674, 0, 0, 682, - 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, - 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, - 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, - 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, - 878, 879, 685, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 287, 0, 388, 247, 0, 438, 774, - 0, 0, 549, 0, 0, 772, 0, 0, 0, 0, - 352, 0, 319, 193, 217, 0, 0, 398, 446, 458, - 0, 0, 0, 825, 0, 456, 412, 527, 225, 274, - 443, 418, 454, 426, 277, 0, 0, 455, 359, 512, - 436, 524, 550, 551, 253, 392, 536, 496, 544, 568, - 218, 250, 406, 489, 530, 478, 384, 508, 509, 318, - 477, 285, 196, 356, 556, 216, 464, 358, 234, 223, - 514, 533, 279, 441, 563, 205, 491, 522, 231, 468, - 0, 0, 570, 239, 488, 207, 519, 487, 380, 315, - 316, 206, 0, 442, 258, 283, 0, 0, 248, 401, - 826, 827, 246, 571, 724, 543, 212, 0, 542, 394, - 511, 520, 381, 370, 211, 518, 379, 369, 323, 732, - 733, 270, 296, 760, 759, 758, 295, 297, 390, 389, - 391, 200, 531, 0, 201, 0, 483, 532, 572, 226, - 227, 229, 0, 269, 273, 281, 284, 292, 293, 302, - 354, 405, 432, 428, 437, 0, 506, 525, 537, 548, - 554, 555, 557, 558, 559, 560, 561, 564, 562, 393, - 300, 479, 322, 360, 0, 0, 411, 457, 232, 529, - 480, 785, 773, 695, 789, 697, 786, 787, 692, 693, - 696, 788, 573, 574, 575, 576, 577, 578, 579, 580, - 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, - 0, 776, 681, 680, 0, 688, 0, 714, 715, 717, - 721, 722, 723, 734, 735, 736, 744, 746, 747, 745, - 748, 749, 750, 753, 754, 755, 756, 751, 752, 757, - 698, 702, 699, 700, 701, 713, 703, 704, 705, 706, - 707, 708, 709, 710, 711, 712, 799, 800, 801, 802, - 803, 804, 727, 731, 730, 728, 729, 725, 726, 679, - 192, 213, 355, 0, 439, 278, 569, 539, 534, 199, - 215, 790, 252, 791, 0, 0, 795, 0, 0, 0, - 797, 796, 0, 798, 764, 763, 0, 0, 792, 793, - 0, 794, 0, 0, 194, 195, 202, 214, 224, 228, - 235, 251, 266, 268, 275, 288, 299, 307, 308, 311, - 317, 367, 373, 374, 375, 376, 395, 396, 397, 400, - 403, 404, 407, 409, 410, 413, 417, 421, 422, 423, - 425, 427, 429, 440, 445, 459, 460, 461, 462, 463, - 466, 467, 472, 473, 474, 475, 476, 484, 485, 490, - 513, 515, 528, 546, 552, 465, 805, 806, 807, 808, - 809, 810, 811, 812, 289, 523, 553, 521, 565, 547, - 424, 365, 0, 0, 368, 271, 294, 309, 0, 538, - 486, 219, 451, 280, 243, 830, 0, 204, 238, 222, - 249, 264, 267, 313, 378, 386, 415, 420, 286, 261, - 236, 444, 233, 469, 493, 494, 495, 497, 382, 256, - 419, 0, 383, 363, 503, 504, 305, 82, 502, 0, - 691, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 402, 0, 0, 0, 0, 678, 0, 0, 0, 260, - 683, 0, 0, 0, 353, 257, 0, 0, 416, 0, - 198, 0, 471, 244, 364, 361, 510, 272, 263, 259, - 242, 306, 372, 414, 492, 408, 690, 357, 0, 0, - 481, 387, 0, 0, 0, 0, 0, 686, 687, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 240, 314, - 197, 399, 482, 276, 0, 91, 0, 0, 831, 815, - 781, 782, 819, 832, 833, 834, 835, 820, 0, 230, - 821, 822, 237, 823, 0, 780, 718, 720, 719, 737, - 738, 739, 740, 741, 742, 743, 716, 828, 836, 837, - 0, 255, 310, 262, 254, 507, 0, 0, 0, 0, - 0, 0, 0, 221, 0, 0, 0, 0, 0, 0, - 0, 660, 675, 0, 689, 0, 0, 0, 265, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 672, 673, 0, 0, 0, 0, - 775, 0, 674, 0, 0, 682, 838, 839, 840, 841, - 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, - 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, - 872, 873, 874, 875, 876, 877, 878, 879, 685, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, - 0, 388, 247, 0, 438, 774, 0, 0, 549, 0, - 0, 772, 0, 0, 0, 0, 352, 0, 319, 193, - 217, 0, 0, 398, 446, 458, 0, 0, 0, 825, - 0, 456, 412, 527, 225, 274, 443, 418, 454, 426, - 277, 0, 0, 455, 359, 512, 436, 524, 550, 551, - 253, 392, 536, 496, 544, 568, 218, 250, 406, 489, - 530, 478, 384, 508, 509, 318, 477, 285, 196, 356, - 556, 216, 464, 358, 234, 223, 514, 533, 279, 441, - 563, 205, 491, 522, 231, 468, 0, 0, 570, 239, - 488, 207, 519, 487, 380, 315, 316, 206, 0, 442, - 258, 283, 0, 0, 248, 401, 826, 827, 246, 571, - 724, 543, 212, 0, 542, 394, 511, 520, 381, 370, - 211, 518, 379, 369, 323, 732, 733, 270, 296, 760, - 759, 758, 295, 297, 390, 389, 391, 200, 531, 0, - 201, 0, 483, 532, 572, 226, 227, 229, 0, 269, - 273, 281, 284, 292, 293, 302, 354, 405, 432, 428, - 437, 0, 506, 525, 537, 548, 554, 555, 557, 558, - 559, 560, 561, 564, 562, 393, 300, 479, 322, 360, - 0, 0, 411, 457, 232, 529, 480, 785, 773, 695, - 789, 697, 786, 787, 692, 693, 696, 788, 573, 574, - 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 0, 776, 681, 680, - 0, 688, 0, 714, 715, 717, 721, 722, 723, 734, - 735, 736, 744, 746, 747, 745, 748, 749, 750, 753, - 754, 755, 756, 751, 752, 757, 698, 702, 699, 700, - 701, 713, 703, 704, 705, 706, 707, 708, 709, 710, - 711, 712, 799, 800, 801, 802, 803, 804, 727, 731, - 730, 728, 729, 725, 726, 679, 192, 213, 355, 90, - 439, 278, 569, 539, 534, 199, 215, 790, 252, 791, - 0, 0, 795, 0, 0, 0, 797, 796, 0, 798, - 764, 763, 0, 0, 792, 793, 0, 794, 0, 0, - 194, 195, 202, 214, 224, 228, 235, 251, 266, 268, - 275, 288, 299, 307, 308, 311, 317, 367, 373, 374, - 375, 376, 395, 396, 397, 400, 403, 404, 407, 409, - 410, 413, 417, 421, 422, 423, 425, 427, 429, 440, - 445, 459, 460, 461, 462, 463, 466, 467, 472, 473, - 474, 475, 476, 484, 485, 490, 513, 515, 528, 546, - 552, 465, 805, 806, 807, 808, 809, 810, 811, 812, - 289, 523, 553, 521, 565, 547, 424, 365, 0, 0, - 368, 271, 294, 309, 0, 538, 486, 219, 451, 280, - 243, 830, 0, 204, 238, 222, 249, 264, 267, 313, - 378, 386, 415, 420, 286, 261, 236, 444, 233, 469, - 493, 494, 495, 497, 382, 256, 419, 383, 0, 363, - 503, 504, 305, 502, 0, 691, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 402, 0, 0, 0, 0, - 678, 0, 0, 0, 260, 683, 0, 0, 0, 353, - 257, 0, 0, 416, 0, 198, 0, 471, 244, 364, - 361, 510, 272, 263, 259, 242, 306, 372, 414, 492, - 408, 690, 357, 0, 0, 481, 387, 0, 0, 0, - 0, 0, 686, 687, 0, 0, 0, 0, 0, 0, - 0, 0, 312, 240, 314, 197, 399, 482, 276, 0, - 91, 0, 0, 831, 815, 781, 782, 819, 832, 833, - 834, 835, 820, 0, 230, 821, 822, 237, 823, 0, - 780, 718, 720, 719, 737, 738, 739, 740, 741, 742, - 743, 716, 828, 836, 837, 0, 255, 310, 262, 254, - 507, 0, 0, 0, 0, 0, 0, 0, 221, 0, - 0, 0, 0, 0, 0, 0, 660, 675, 0, 689, - 0, 0, 0, 265, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 672, - 673, 0, 0, 0, 0, 775, 0, 674, 0, 0, - 682, 838, 839, 840, 841, 842, 843, 844, 845, 846, - 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, - 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, - 877, 878, 879, 685, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 287, 0, 388, 247, 0, 438, - 774, 0, 0, 549, 0, 0, 772, 0, 0, 0, - 0, 352, 0, 319, 193, 217, 0, 0, 398, 446, - 458, 0, 0, 0, 825, 0, 456, 412, 527, 225, - 274, 443, 418, 454, 426, 277, 3438, 0, 455, 359, - 512, 436, 524, 550, 551, 253, 392, 536, 496, 544, - 568, 218, 250, 406, 489, 530, 478, 384, 508, 509, - 318, 477, 285, 196, 356, 556, 216, 464, 358, 234, - 223, 514, 533, 279, 441, 563, 205, 491, 522, 231, - 468, 0, 0, 570, 239, 488, 207, 519, 487, 380, - 315, 316, 206, 0, 442, 258, 283, 0, 0, 248, - 401, 826, 827, 246, 571, 724, 543, 212, 0, 542, - 394, 511, 520, 381, 370, 211, 518, 379, 369, 323, - 732, 733, 270, 296, 760, 759, 758, 295, 297, 390, - 389, 391, 200, 531, 0, 201, 0, 483, 532, 572, - 226, 227, 229, 0, 269, 273, 281, 284, 292, 293, - 302, 354, 405, 432, 428, 437, 0, 506, 525, 537, - 548, 554, 555, 557, 558, 559, 560, 561, 564, 562, - 393, 300, 479, 322, 360, 0, 0, 411, 457, 232, - 529, 480, 785, 773, 695, 789, 697, 786, 787, 692, - 693, 696, 788, 573, 574, 575, 576, 577, 578, 579, - 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, - 590, 0, 776, 681, 680, 0, 688, 0, 714, 715, - 717, 721, 722, 723, 734, 735, 736, 744, 746, 747, - 745, 748, 749, 750, 753, 754, 755, 756, 751, 752, - 757, 698, 702, 699, 700, 701, 713, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 799, 800, 801, - 802, 803, 804, 727, 731, 730, 728, 729, 725, 726, - 679, 192, 213, 355, 0, 439, 278, 569, 539, 534, - 199, 215, 790, 252, 791, 0, 0, 795, 0, 0, - 0, 797, 796, 0, 798, 764, 763, 0, 0, 792, - 793, 0, 794, 0, 0, 194, 195, 202, 214, 224, - 228, 235, 251, 266, 268, 275, 288, 299, 307, 308, - 311, 317, 367, 373, 374, 375, 376, 395, 396, 397, - 400, 403, 404, 407, 409, 410, 413, 417, 421, 422, - 423, 425, 427, 429, 440, 445, 459, 460, 461, 462, - 463, 466, 467, 472, 473, 474, 475, 476, 484, 485, - 490, 513, 515, 528, 546, 552, 465, 805, 806, 807, - 808, 809, 810, 811, 812, 289, 523, 553, 521, 565, - 547, 424, 365, 0, 0, 368, 271, 294, 309, 0, - 538, 486, 219, 451, 280, 243, 830, 0, 204, 238, - 222, 249, 264, 267, 313, 378, 386, 415, 420, 286, - 261, 236, 444, 233, 469, 493, 494, 495, 497, 382, - 256, 419, 383, 0, 363, 503, 504, 305, 502, 0, - 691, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 402, 0, 0, 0, 0, 678, 0, 0, 0, 260, - 683, 0, 0, 0, 353, 257, 0, 0, 416, 0, - 198, 0, 471, 244, 364, 361, 510, 272, 263, 259, - 242, 306, 372, 414, 492, 408, 690, 357, 0, 0, - 481, 387, 0, 0, 0, 0, 0, 686, 687, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 240, 314, - 197, 399, 482, 276, 0, 91, 0, 1525, 831, 815, - 781, 782, 819, 832, 833, 834, 835, 820, 0, 230, - 821, 822, 237, 823, 0, 780, 718, 720, 719, 737, - 738, 739, 740, 741, 742, 743, 716, 828, 836, 837, - 0, 255, 310, 262, 254, 507, 0, 0, 0, 0, - 0, 0, 0, 221, 0, 0, 0, 0, 0, 0, - 0, 660, 675, 0, 689, 0, 0, 0, 265, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 672, 673, 0, 0, 0, 0, - 775, 0, 674, 0, 0, 682, 838, 839, 840, 841, - 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, - 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, - 872, 873, 874, 875, 876, 877, 878, 879, 685, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, - 0, 388, 247, 0, 438, 774, 0, 0, 549, 0, - 0, 772, 0, 0, 0, 0, 352, 0, 319, 193, - 217, 0, 0, 398, 446, 458, 0, 0, 0, 825, - 0, 456, 412, 527, 225, 274, 443, 418, 454, 426, - 277, 0, 0, 455, 359, 512, 436, 524, 550, 551, - 253, 392, 536, 496, 544, 568, 218, 250, 406, 489, - 530, 478, 384, 508, 509, 318, 477, 285, 196, 356, - 556, 216, 464, 358, 234, 223, 514, 533, 279, 441, - 563, 205, 491, 522, 231, 468, 0, 0, 570, 239, - 488, 207, 519, 487, 380, 315, 316, 206, 0, 442, - 258, 283, 0, 0, 248, 401, 826, 827, 246, 571, - 724, 543, 212, 0, 542, 394, 511, 520, 381, 370, - 211, 518, 379, 369, 323, 732, 733, 270, 296, 760, - 759, 758, 295, 297, 390, 389, 391, 200, 531, 0, - 201, 0, 483, 532, 572, 226, 227, 229, 0, 269, - 273, 281, 284, 292, 293, 302, 354, 405, 432, 428, - 437, 0, 506, 525, 537, 548, 554, 555, 557, 558, - 559, 560, 561, 564, 562, 393, 300, 479, 322, 360, - 0, 0, 411, 457, 232, 529, 480, 785, 773, 695, - 789, 697, 786, 787, 692, 693, 696, 788, 573, 574, - 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 0, 776, 681, 680, - 0, 688, 0, 714, 715, 717, 721, 722, 723, 734, - 735, 736, 744, 746, 747, 745, 748, 749, 750, 753, - 754, 755, 756, 751, 752, 757, 698, 702, 699, 700, - 701, 713, 703, 704, 705, 706, 707, 708, 709, 710, - 711, 712, 799, 800, 801, 802, 803, 804, 727, 731, - 730, 728, 729, 725, 726, 679, 192, 213, 355, 0, - 439, 278, 569, 539, 534, 199, 215, 790, 252, 791, - 0, 0, 795, 0, 0, 0, 797, 796, 0, 798, - 764, 763, 0, 0, 792, 793, 0, 794, 0, 0, - 194, 195, 202, 214, 224, 228, 235, 251, 266, 268, - 275, 288, 299, 307, 308, 311, 317, 367, 373, 374, - 375, 376, 395, 396, 397, 400, 403, 404, 407, 409, - 410, 413, 417, 421, 422, 423, 425, 427, 429, 440, - 445, 459, 460, 461, 462, 463, 466, 467, 472, 473, - 474, 475, 476, 484, 485, 490, 513, 515, 528, 546, - 552, 465, 805, 806, 807, 808, 809, 810, 811, 812, - 289, 523, 553, 521, 565, 547, 424, 365, 0, 0, - 368, 271, 294, 309, 0, 538, 486, 219, 451, 280, - 243, 830, 0, 204, 238, 222, 249, 264, 267, 313, - 378, 386, 415, 420, 286, 261, 236, 444, 233, 469, - 493, 494, 495, 497, 382, 256, 419, 383, 0, 363, - 503, 504, 305, 502, 0, 691, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 402, 0, 0, 0, 0, - 678, 0, 0, 0, 260, 683, 0, 0, 0, 353, - 257, 0, 0, 416, 0, 198, 0, 471, 244, 364, - 361, 510, 272, 263, 259, 242, 306, 372, 414, 492, - 408, 690, 357, 0, 0, 481, 387, 0, 0, 0, - 0, 0, 686, 687, 0, 0, 0, 0, 0, 0, - 0, 0, 312, 240, 314, 197, 399, 482, 276, 0, - 91, 0, 0, 831, 815, 781, 782, 819, 832, 833, - 834, 835, 820, 0, 230, 821, 822, 237, 823, 0, - 780, 718, 720, 719, 737, 738, 739, 740, 741, 742, - 743, 716, 828, 836, 837, 0, 255, 310, 262, 254, - 507, 0, 0, 0, 0, 0, 0, 0, 221, 0, - 0, 0, 0, 0, 0, 0, 660, 675, 0, 689, - 0, 0, 0, 265, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 672, - 673, 920, 0, 0, 0, 775, 0, 674, 0, 0, - 682, 838, 839, 840, 841, 842, 843, 844, 845, 846, - 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, - 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, - 877, 878, 879, 685, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 287, 0, 388, 247, 0, 438, - 774, 0, 0, 549, 0, 0, 772, 0, 0, 0, - 0, 352, 0, 319, 193, 217, 0, 0, 398, 446, - 458, 0, 0, 0, 825, 0, 456, 412, 527, 225, - 274, 443, 418, 454, 426, 277, 0, 0, 455, 359, - 512, 436, 524, 550, 551, 253, 392, 536, 496, 544, - 568, 218, 250, 406, 489, 530, 478, 384, 508, 509, - 318, 477, 285, 196, 356, 556, 216, 464, 358, 234, - 223, 514, 533, 279, 441, 563, 205, 491, 522, 231, - 468, 0, 0, 570, 239, 488, 207, 519, 487, 380, - 315, 316, 206, 0, 442, 258, 283, 0, 0, 248, - 401, 826, 827, 246, 571, 724, 543, 212, 0, 542, - 394, 511, 520, 381, 370, 211, 518, 379, 369, 323, - 732, 733, 270, 296, 760, 759, 758, 295, 297, 390, - 389, 391, 200, 531, 0, 201, 0, 483, 532, 572, - 226, 227, 229, 0, 269, 273, 281, 284, 292, 293, - 302, 354, 405, 432, 428, 437, 0, 506, 525, 537, - 548, 554, 555, 557, 558, 559, 560, 561, 564, 562, - 393, 300, 479, 322, 360, 0, 0, 411, 457, 232, - 529, 480, 785, 773, 695, 789, 697, 786, 787, 692, - 693, 696, 788, 573, 574, 575, 576, 577, 578, 579, - 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, - 590, 0, 776, 681, 680, 0, 688, 0, 714, 715, - 717, 721, 722, 723, 734, 735, 736, 744, 746, 747, - 745, 748, 749, 750, 753, 754, 755, 756, 751, 752, - 757, 698, 702, 699, 700, 701, 713, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 799, 800, 801, - 802, 803, 804, 727, 731, 730, 728, 729, 725, 726, - 679, 192, 213, 355, 0, 439, 278, 569, 539, 534, - 199, 215, 790, 252, 791, 0, 0, 795, 0, 0, - 0, 797, 796, 0, 798, 764, 763, 0, 0, 792, - 793, 0, 794, 0, 0, 194, 195, 202, 214, 224, - 228, 235, 251, 266, 268, 275, 288, 299, 307, 308, - 311, 317, 367, 373, 374, 375, 376, 395, 396, 397, - 400, 403, 404, 407, 409, 410, 413, 417, 421, 422, - 423, 425, 427, 429, 440, 445, 459, 460, 461, 462, - 463, 466, 467, 472, 473, 474, 475, 476, 484, 485, - 490, 513, 515, 528, 546, 552, 465, 805, 806, 807, - 808, 809, 810, 811, 812, 289, 523, 553, 521, 565, - 547, 424, 365, 0, 0, 368, 271, 294, 309, 0, - 538, 486, 219, 451, 280, 243, 830, 0, 204, 238, - 222, 249, 264, 267, 313, 378, 386, 415, 420, 286, - 261, 236, 444, 233, 469, 493, 494, 495, 497, 382, - 256, 419, 383, 0, 363, 503, 504, 305, 502, 0, - 691, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 402, 0, 0, 0, 0, 678, 0, 0, 0, 260, - 683, 0, 0, 0, 353, 257, 0, 0, 416, 0, - 198, 0, 471, 244, 364, 361, 510, 272, 263, 259, - 242, 306, 372, 414, 492, 408, 690, 357, 0, 0, - 481, 387, 0, 0, 0, 0, 0, 686, 687, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 240, 314, - 197, 399, 482, 276, 0, 91, 0, 0, 831, 815, - 781, 782, 819, 832, 833, 834, 835, 820, 0, 230, - 821, 822, 237, 823, 0, 780, 718, 720, 719, 737, - 738, 739, 740, 741, 742, 743, 716, 828, 836, 837, - 0, 255, 310, 262, 254, 507, 0, 0, 0, 0, - 0, 0, 0, 221, 0, 0, 0, 0, 0, 0, - 0, 660, 675, 0, 689, 0, 0, 0, 265, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 672, 673, 0, 0, 0, 0, - 775, 0, 674, 0, 0, 682, 838, 839, 840, 841, - 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, - 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, - 872, 873, 874, 875, 876, 877, 878, 879, 685, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, - 0, 388, 247, 0, 438, 774, 0, 0, 549, 0, - 0, 772, 0, 0, 0, 0, 352, 0, 319, 193, - 217, 0, 0, 398, 446, 458, 0, 0, 0, 825, - 0, 456, 412, 527, 225, 274, 443, 418, 454, 426, - 277, 0, 0, 455, 359, 512, 436, 524, 550, 551, - 253, 392, 536, 496, 544, 568, 218, 250, 406, 489, - 530, 478, 384, 508, 509, 318, 477, 285, 196, 356, - 556, 216, 464, 358, 234, 223, 514, 533, 279, 441, - 563, 205, 491, 522, 231, 468, 0, 0, 570, 239, - 488, 207, 519, 487, 380, 315, 316, 206, 0, 442, - 258, 283, 0, 0, 248, 401, 826, 827, 246, 571, - 724, 543, 212, 0, 542, 394, 511, 520, 381, 370, - 211, 518, 379, 369, 323, 732, 733, 270, 296, 760, - 759, 758, 295, 297, 390, 389, 391, 200, 531, 0, - 201, 0, 483, 532, 572, 226, 227, 229, 0, 269, - 273, 281, 284, 292, 293, 302, 354, 405, 432, 428, - 437, 0, 506, 525, 537, 548, 554, 555, 557, 558, - 559, 560, 561, 564, 562, 393, 300, 479, 322, 360, - 0, 0, 411, 457, 232, 529, 480, 785, 773, 695, - 789, 697, 786, 787, 692, 693, 696, 788, 573, 574, - 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 0, 776, 681, 680, - 0, 688, 0, 714, 715, 717, 721, 722, 723, 734, - 735, 736, 744, 746, 747, 745, 748, 749, 750, 753, - 754, 755, 756, 751, 752, 757, 698, 702, 699, 700, - 701, 713, 703, 704, 705, 706, 707, 708, 709, 710, - 711, 712, 799, 800, 801, 802, 803, 804, 727, 731, - 730, 728, 729, 725, 726, 679, 192, 213, 355, 0, - 439, 278, 569, 539, 534, 199, 215, 790, 252, 791, - 0, 0, 795, 0, 0, 0, 797, 796, 0, 798, - 764, 763, 0, 0, 792, 793, 0, 794, 0, 0, - 194, 195, 202, 214, 224, 228, 235, 251, 266, 268, - 275, 288, 299, 307, 308, 311, 317, 367, 373, 374, - 375, 376, 395, 396, 397, 400, 403, 404, 407, 409, - 410, 413, 417, 421, 422, 423, 425, 427, 429, 440, - 445, 459, 460, 461, 462, 463, 466, 467, 472, 473, - 474, 475, 476, 484, 485, 490, 513, 515, 528, 546, - 552, 465, 805, 806, 807, 808, 809, 810, 811, 812, - 289, 523, 553, 521, 565, 547, 424, 365, 0, 0, - 368, 271, 294, 309, 0, 538, 486, 219, 451, 280, - 243, 830, 0, 204, 238, 222, 249, 264, 267, 313, - 378, 386, 415, 420, 286, 261, 236, 444, 233, 469, - 493, 494, 495, 497, 382, 256, 419, 383, 0, 363, - 503, 504, 305, 502, 0, 691, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 402, 0, 0, 0, 0, - 678, 0, 0, 0, 260, 683, 0, 0, 0, 353, - 257, 0, 0, 416, 0, 198, 0, 471, 244, 364, - 361, 510, 272, 263, 259, 242, 306, 372, 414, 492, - 408, 690, 357, 0, 0, 481, 387, 0, 0, 0, - 0, 0, 686, 687, 0, 0, 0, 0, 0, 0, - 0, 0, 312, 240, 314, 197, 399, 482, 276, 0, - 91, 0, 0, 831, 815, 781, 782, 819, 832, 833, - 834, 835, 820, 0, 230, 821, 822, 237, 823, 0, - 780, 718, 720, 719, 737, 738, 739, 740, 741, 742, - 743, 716, 828, 836, 837, 0, 255, 310, 262, 254, - 507, 0, 0, 0, 0, 0, 0, 0, 221, 0, - 0, 0, 0, 0, 0, 0, 0, 675, 0, 689, - 0, 0, 0, 265, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 672, - 673, 0, 0, 0, 0, 775, 0, 674, 0, 0, - 682, 838, 839, 840, 841, 842, 843, 844, 845, 846, - 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, - 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, - 877, 878, 879, 685, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 287, 0, 388, 247, 0, 438, - 774, 0, 0, 549, 0, 0, 772, 0, 0, 0, - 0, 352, 0, 319, 193, 217, 0, 0, 398, 446, - 458, 0, 0, 0, 825, 0, 456, 412, 527, 225, - 274, 443, 418, 454, 426, 277, 0, 0, 455, 359, - 512, 436, 524, 550, 551, 253, 392, 536, 496, 544, - 568, 218, 250, 406, 489, 530, 478, 384, 508, 509, - 318, 477, 285, 196, 356, 556, 216, 464, 358, 234, - 223, 514, 533, 279, 441, 563, 205, 491, 522, 231, - 468, 0, 0, 570, 239, 488, 207, 519, 487, 380, - 315, 316, 206, 0, 442, 258, 283, 0, 0, 248, - 401, 826, 827, 246, 571, 724, 543, 212, 0, 542, - 394, 511, 520, 381, 370, 211, 518, 379, 369, 323, - 732, 733, 270, 296, 760, 759, 758, 295, 297, 390, - 389, 391, 200, 531, 0, 201, 0, 483, 532, 572, - 226, 227, 229, 0, 269, 273, 281, 284, 292, 293, - 302, 354, 405, 432, 428, 437, 0, 506, 525, 537, - 548, 554, 555, 557, 558, 559, 560, 561, 564, 562, - 393, 300, 479, 322, 360, 0, 0, 411, 457, 232, - 529, 480, 785, 773, 695, 789, 697, 786, 787, 692, - 693, 696, 788, 573, 574, 575, 576, 577, 578, 579, - 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, - 590, 0, 776, 681, 680, 0, 688, 0, 714, 715, - 717, 721, 722, 723, 734, 735, 736, 744, 746, 747, - 745, 748, 749, 750, 753, 754, 755, 756, 751, 752, - 757, 698, 702, 699, 700, 701, 713, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 799, 800, 801, - 802, 803, 804, 727, 731, 730, 728, 729, 725, 726, - 679, 192, 213, 355, 0, 439, 278, 569, 539, 534, - 199, 215, 790, 252, 791, 0, 0, 795, 0, 0, - 0, 797, 796, 0, 798, 764, 763, 0, 0, 792, - 793, 0, 794, 0, 0, 194, 195, 202, 214, 224, - 228, 235, 251, 266, 268, 275, 288, 299, 307, 308, - 311, 317, 367, 373, 374, 375, 376, 395, 396, 397, - 400, 403, 404, 407, 409, 410, 413, 417, 421, 422, - 423, 425, 427, 429, 440, 445, 459, 460, 461, 462, - 463, 466, 467, 472, 473, 474, 475, 476, 484, 485, - 490, 513, 515, 528, 546, 552, 465, 805, 806, 807, - 808, 809, 810, 811, 812, 289, 523, 553, 521, 565, - 547, 424, 365, 0, 0, 368, 271, 294, 309, 0, - 538, 486, 219, 451, 280, 243, 830, 0, 204, 238, - 222, 249, 264, 267, 313, 378, 386, 415, 420, 286, - 261, 236, 444, 233, 469, 493, 494, 495, 497, 382, - 256, 419, 383, 0, 363, 503, 504, 305, 502, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 402, 0, 0, 0, 0, 0, 0, 0, 0, 260, - 0, 0, 0, 0, 353, 257, 0, 0, 416, 0, - 198, 0, 471, 244, 364, 361, 510, 272, 263, 259, - 242, 306, 372, 414, 492, 408, 0, 357, 0, 0, - 481, 387, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 240, 314, - 197, 399, 482, 276, 0, 0, 0, 0, 0, 638, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 230, - 0, 0, 237, 0, 0, 0, 338, 347, 346, 327, - 328, 330, 332, 337, 344, 350, 0, 0, 0, 0, - 0, 255, 310, 262, 254, 507, 0, 0, 0, 0, - 0, 0, 0, 221, 0, 0, 0, 0, 1338, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 265, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1339, 1340, 1341, 1342, 1343, 1344, 1345, 1347, 1346, - 1348, 1349, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, - 0, 388, 247, 0, 438, 0, 0, 0, 549, 0, - 0, 0, 0, 0, 0, 0, 352, 0, 319, 193, - 217, 0, 0, 398, 446, 458, 0, 0, 0, 245, - 0, 456, 412, 527, 225, 274, 443, 418, 454, 426, - 277, 0, 0, 455, 359, 512, 436, 524, 550, 551, - 253, 392, 536, 496, 544, 568, 218, 250, 406, 489, - 530, 478, 384, 508, 509, 318, 477, 285, 196, 356, - 556, 216, 464, 358, 234, 223, 514, 533, 279, 441, - 563, 205, 491, 522, 231, 468, 0, 0, 570, 239, - 488, 207, 519, 487, 380, 315, 316, 206, 0, 442, - 258, 283, 0, 0, 248, 401, 516, 517, 246, 571, - 220, 543, 212, 0, 542, 394, 511, 520, 381, 370, - 211, 518, 379, 369, 323, 342, 343, 270, 296, 433, - 362, 434, 295, 297, 390, 389, 391, 200, 531, 0, - 201, 0, 483, 532, 572, 226, 227, 229, 0, 269, - 273, 281, 284, 292, 293, 302, 354, 405, 432, 428, - 437, 0, 506, 525, 537, 548, 554, 555, 557, 558, - 559, 560, 561, 564, 562, 393, 300, 479, 322, 360, - 0, 0, 411, 457, 232, 529, 480, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 573, 574, - 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 591, 371, 470, 526, 324, - 336, 339, 329, 348, 0, 349, 325, 326, 331, 333, - 334, 335, 340, 341, 345, 351, 241, 203, 377, 385, - 505, 301, 208, 209, 210, 498, 499, 500, 501, 540, - 541, 545, 447, 448, 449, 450, 282, 535, 298, 453, - 452, 320, 321, 366, 435, 0, 192, 213, 355, 0, - 439, 278, 569, 539, 534, 199, 215, 0, 252, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 194, 195, 202, 214, 224, 228, 235, 251, 266, 268, - 275, 288, 299, 307, 308, 311, 317, 367, 373, 374, - 375, 376, 395, 396, 397, 400, 403, 404, 407, 409, - 410, 413, 417, 421, 422, 423, 425, 427, 429, 440, - 445, 459, 460, 461, 462, 463, 466, 467, 472, 473, - 474, 475, 476, 484, 485, 490, 513, 515, 528, 546, - 552, 465, 290, 291, 430, 431, 303, 304, 566, 567, - 289, 523, 553, 521, 565, 547, 424, 365, 0, 0, - 368, 271, 294, 309, 0, 538, 486, 219, 451, 280, - 243, 0, 0, 204, 238, 222, 249, 264, 267, 313, - 378, 386, 415, 420, 286, 261, 236, 444, 233, 469, - 493, 494, 495, 497, 382, 256, 419, 383, 0, 363, - 503, 504, 305, 502, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 402, 0, 0, 0, 0, - 0, 0, 0, 0, 260, 0, 0, 0, 0, 353, - 257, 0, 0, 416, 0, 198, 0, 471, 244, 364, - 361, 510, 272, 263, 259, 242, 306, 372, 414, 492, - 408, 0, 357, 0, 0, 481, 387, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 312, 240, 314, 197, 399, 482, 276, 0, - 0, 0, 0, 0, 638, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 230, 0, 0, 237, 0, 0, - 0, 338, 347, 346, 327, 328, 330, 332, 337, 344, - 350, 0, 0, 0, 0, 0, 255, 310, 262, 254, - 507, 0, 0, 0, 0, 0, 0, 0, 221, 0, - 991, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 265, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 287, 0, 388, 247, 0, 438, - 0, 0, 990, 549, 0, 0, 0, 0, 0, 987, - 988, 352, 948, 319, 193, 217, 981, 985, 398, 446, - 458, 0, 0, 0, 245, 0, 456, 412, 527, 225, - 274, 443, 418, 454, 426, 277, 0, 0, 455, 359, - 512, 436, 524, 550, 551, 253, 392, 536, 496, 544, - 568, 218, 250, 406, 489, 530, 478, 384, 508, 509, - 318, 477, 285, 196, 356, 556, 216, 464, 358, 234, - 223, 514, 533, 279, 441, 563, 205, 491, 522, 231, - 468, 0, 0, 570, 239, 488, 207, 519, 487, 380, - 315, 316, 206, 0, 442, 258, 283, 0, 0, 248, - 401, 516, 517, 246, 571, 220, 543, 212, 0, 542, - 394, 511, 520, 381, 370, 211, 518, 379, 369, 323, - 342, 343, 270, 296, 433, 362, 434, 295, 297, 390, - 389, 391, 200, 531, 0, 201, 0, 483, 532, 572, - 226, 227, 229, 0, 269, 273, 281, 284, 292, 293, - 302, 354, 405, 432, 428, 437, 0, 506, 525, 537, - 548, 554, 555, 557, 558, 559, 560, 561, 564, 562, - 393, 300, 479, 322, 360, 0, 0, 411, 457, 232, - 529, 480, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 573, 574, 575, 576, 577, 578, 579, - 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, - 590, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 591, 371, 470, 526, 324, 336, 339, 329, 348, 0, - 349, 325, 326, 331, 333, 334, 335, 340, 341, 345, - 351, 241, 203, 377, 385, 505, 301, 208, 209, 210, - 498, 499, 500, 501, 540, 541, 545, 447, 448, 449, - 450, 282, 535, 298, 453, 452, 320, 321, 366, 435, - 0, 192, 213, 355, 0, 439, 278, 569, 539, 534, - 199, 215, 0, 252, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 194, 195, 202, 214, 224, - 228, 235, 251, 266, 268, 275, 288, 299, 307, 308, - 311, 317, 367, 373, 374, 375, 376, 395, 396, 397, - 400, 403, 404, 407, 409, 410, 413, 417, 421, 422, - 423, 425, 427, 429, 440, 445, 459, 460, 461, 462, - 463, 466, 467, 472, 473, 474, 475, 476, 484, 485, - 490, 513, 515, 528, 546, 552, 465, 290, 291, 430, - 431, 303, 304, 566, 567, 289, 523, 553, 521, 565, - 547, 424, 365, 0, 0, 368, 271, 294, 309, 0, - 538, 486, 219, 451, 280, 243, 0, 0, 204, 238, - 222, 249, 264, 267, 313, 378, 386, 415, 420, 286, - 261, 236, 444, 233, 469, 493, 494, 495, 497, 382, - 256, 419, 383, 0, 363, 503, 504, 305, 502, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 402, 0, 0, 0, 0, 0, 0, 0, 0, 260, - 0, 0, 0, 0, 353, 257, 0, 0, 416, 0, - 198, 0, 471, 244, 364, 361, 510, 272, 263, 259, - 242, 306, 372, 414, 492, 408, 0, 357, 0, 0, - 481, 387, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 312, 240, 314, - 197, 399, 482, 276, 0, 0, 0, 0, 1488, 815, - 0, 0, 1485, 0, 0, 0, 0, 1483, 0, 230, - 1484, 1482, 237, 1487, 0, 780, 338, 347, 346, 327, - 328, 330, 332, 337, 344, 350, 0, 0, 0, 0, - 0, 255, 310, 262, 254, 507, 0, 0, 0, 0, - 0, 0, 0, 221, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 265, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 287, - 0, 388, 247, 0, 438, 0, 0, 0, 549, 0, - 0, 0, 0, 0, 0, 0, 352, 0, 319, 193, - 217, 0, 0, 398, 446, 458, 0, 0, 0, 245, - 0, 456, 412, 527, 225, 274, 443, 418, 454, 426, - 277, 0, 0, 455, 359, 512, 436, 524, 550, 551, - 253, 392, 536, 496, 544, 568, 218, 250, 406, 489, - 530, 478, 384, 508, 509, 318, 477, 285, 196, 356, - 556, 216, 464, 358, 234, 223, 514, 533, 279, 441, - 563, 205, 491, 522, 231, 468, 0, 0, 570, 239, - 488, 207, 519, 487, 380, 315, 316, 206, 0, 442, - 258, 283, 0, 0, 248, 401, 516, 517, 246, 571, - 220, 543, 212, 0, 542, 394, 511, 520, 381, 370, - 211, 518, 379, 369, 323, 342, 343, 270, 296, 433, - 362, 434, 295, 297, 390, 389, 391, 200, 531, 0, - 201, 0, 483, 532, 572, 226, 227, 229, 0, 269, - 273, 281, 284, 292, 293, 302, 354, 405, 432, 428, - 437, 0, 506, 525, 537, 548, 554, 555, 557, 558, - 559, 560, 561, 564, 562, 393, 300, 479, 322, 360, - 0, 0, 411, 457, 232, 529, 480, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 573, 574, - 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, - 585, 586, 587, 588, 589, 590, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 591, 371, 470, 526, 324, - 336, 339, 329, 348, 0, 349, 325, 326, 331, 333, - 334, 335, 340, 341, 345, 351, 241, 203, 377, 385, - 505, 301, 208, 209, 210, 498, 499, 500, 501, 540, - 541, 545, 447, 448, 449, 450, 282, 535, 298, 453, - 452, 320, 321, 366, 435, 0, 192, 213, 355, 0, - 439, 278, 569, 539, 534, 199, 215, 0, 252, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 194, 195, 202, 214, 224, 228, 235, 251, 266, 268, - 275, 288, 299, 307, 308, 311, 317, 367, 373, 374, - 375, 376, 395, 396, 397, 400, 403, 404, 407, 409, - 410, 413, 417, 421, 422, 423, 425, 427, 429, 440, - 445, 459, 460, 461, 462, 463, 466, 467, 472, 473, - 474, 475, 476, 484, 485, 490, 513, 515, 528, 546, - 552, 465, 290, 291, 430, 431, 303, 304, 566, 567, - 289, 523, 553, 521, 565, 547, 424, 365, 0, 0, - 368, 271, 294, 309, 0, 538, 486, 219, 451, 280, - 243, 0, 0, 204, 238, 222, 249, 264, 267, 313, - 378, 386, 415, 420, 286, 261, 236, 444, 233, 469, - 493, 494, 495, 497, 382, 256, 419, 0, 383, 363, - 503, 504, 305, 82, 502, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 402, 0, 0, 0, - 0, 0, 0, 0, 0, 260, 0, 0, 0, 0, - 353, 257, 0, 0, 416, 0, 198, 0, 471, 244, - 364, 361, 510, 272, 263, 259, 242, 306, 372, 414, - 492, 408, 0, 357, 0, 0, 481, 387, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 312, 240, 314, 197, 399, 482, 276, - 0, 91, 0, 0, 0, 190, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 230, 0, 0, 237, 0, - 0, 0, 338, 347, 346, 327, 328, 330, 332, 337, - 344, 350, 0, 0, 0, 0, 0, 255, 310, 262, - 254, 507, 0, 0, 0, 0, 0, 0, 0, 221, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 265, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 287, 0, 388, 247, 0, - 438, 0, 0, 0, 549, 0, 0, 0, 0, 0, - 0, 0, 352, 0, 319, 193, 217, 0, 0, 398, - 446, 458, 0, 0, 0, 245, 0, 456, 412, 527, - 225, 274, 443, 418, 454, 426, 277, 0, 0, 455, - 359, 512, 436, 524, 550, 551, 253, 392, 536, 496, - 544, 568, 218, 250, 406, 489, 530, 478, 384, 508, - 509, 318, 477, 285, 196, 356, 556, 216, 464, 358, - 234, 223, 514, 533, 279, 441, 563, 205, 491, 522, - 231, 468, 0, 0, 570, 239, 488, 207, 519, 487, - 380, 315, 316, 206, 0, 442, 258, 283, 0, 0, - 248, 401, 516, 517, 246, 571, 220, 543, 212, 0, - 542, 394, 511, 520, 381, 370, 211, 518, 379, 369, - 323, 342, 343, 270, 296, 433, 362, 434, 295, 297, - 390, 389, 391, 200, 531, 0, 201, 0, 483, 532, - 572, 226, 227, 229, 0, 269, 273, 281, 284, 292, - 293, 302, 354, 405, 432, 428, 437, 0, 506, 525, - 537, 548, 554, 555, 557, 558, 559, 560, 561, 564, - 562, 393, 300, 479, 322, 360, 0, 0, 411, 457, - 232, 529, 480, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 573, 574, 575, 576, 577, 578, - 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, - 589, 590, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 591, 371, 470, 526, 324, 336, 339, 329, 348, - 0, 349, 325, 326, 331, 333, 334, 335, 340, 341, - 345, 351, 241, 203, 377, 385, 505, 301, 208, 209, - 210, 498, 499, 500, 501, 540, 541, 545, 447, 448, - 449, 450, 282, 535, 298, 453, 452, 320, 321, 366, - 435, 0, 192, 213, 355, 90, 439, 278, 569, 539, - 534, 199, 215, 0, 252, 0, 0, 0, 0, 0, - 0, 2102, 0, 0, 2101, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 194, 195, 202, 214, - 224, 228, 235, 251, 266, 268, 275, 288, 299, 307, - 308, 311, 317, 367, 373, 374, 375, 376, 395, 396, - 397, 400, 403, 404, 407, 409, 410, 413, 417, 421, - 422, 423, 425, 427, 429, 440, 445, 459, 460, 461, - 462, 463, 466, 467, 472, 473, 474, 475, 476, 484, - 485, 490, 513, 515, 528, 546, 552, 465, 290, 291, - 430, 431, 303, 304, 566, 567, 289, 523, 553, 521, - 565, 547, 424, 365, 0, 0, 368, 271, 294, 309, - 0, 538, 486, 219, 451, 280, 243, 0, 0, 204, - 238, 222, 249, 264, 267, 313, 378, 386, 415, 420, - 286, 261, 236, 444, 233, 469, 493, 494, 495, 497, - 382, 256, 419, 1544, 0, 363, 503, 504, 305, 502, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 402, 0, 0, 0, 1546, 0, 0, 0, 0, - 260, 0, 0, 0, 0, 353, 257, 0, 0, 416, - 0, 198, 0, 471, 244, 364, 361, 510, 272, 263, - 259, 242, 306, 372, 414, 492, 408, 0, 357, 0, - 0, 481, 387, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 312, 240, - 314, 197, 399, 482, 276, 0, 0, 0, 0, 1548, - 638, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 230, 0, 0, 237, 0, 0, 0, 338, 347, 346, - 327, 328, 330, 332, 337, 344, 350, 0, 0, 0, - 0, 0, 255, 310, 262, 254, 507, 0, 0, 0, - 0, 0, 0, 0, 221, 0, 0, 0, 1319, 0, - 1320, 1321, 0, 0, 0, 0, 0, 0, 0, 265, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 287, 0, 388, 247, 0, 438, 0, 0, 0, 549, - 0, 0, 0, 0, 0, 0, 0, 352, 0, 319, - 193, 217, 0, 0, 398, 446, 458, 0, 0, 0, - 245, 0, 456, 412, 527, 225, 274, 443, 418, 454, - 426, 277, 0, 0, 455, 359, 512, 436, 524, 550, - 551, 253, 392, 536, 496, 544, 568, 218, 250, 406, - 489, 530, 478, 384, 508, 509, 318, 477, 285, 196, - 356, 556, 216, 464, 358, 234, 223, 514, 533, 279, - 441, 563, 205, 491, 522, 231, 468, 0, 0, 570, - 239, 488, 207, 519, 487, 380, 315, 316, 206, 0, - 442, 258, 283, 0, 0, 248, 401, 516, 517, 246, - 571, 220, 543, 212, 0, 542, 394, 511, 520, 381, - 370, 211, 518, 379, 369, 323, 342, 343, 270, 296, - 433, 362, 434, 295, 297, 390, 389, 391, 200, 531, - 0, 201, 0, 483, 532, 572, 226, 227, 229, 0, - 269, 273, 281, 284, 292, 293, 302, 354, 405, 432, - 428, 437, 0, 506, 525, 537, 548, 554, 555, 557, - 558, 559, 560, 561, 564, 562, 393, 300, 479, 322, - 360, 0, 0, 411, 457, 232, 529, 480, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 573, - 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, - 584, 585, 586, 587, 588, 589, 590, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 591, 371, 470, 526, - 324, 336, 339, 329, 348, 0, 349, 325, 326, 331, - 333, 334, 335, 340, 341, 345, 351, 241, 203, 377, - 385, 505, 301, 208, 209, 210, 498, 499, 500, 501, - 540, 541, 545, 447, 448, 449, 450, 282, 535, 298, - 453, 452, 320, 321, 366, 435, 0, 192, 213, 355, - 0, 439, 278, 569, 539, 534, 199, 215, 0, 252, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 194, 195, 202, 214, 224, 228, 235, 251, 266, - 268, 275, 288, 299, 307, 308, 311, 317, 367, 373, - 374, 375, 376, 395, 396, 397, 400, 403, 404, 407, - 409, 410, 413, 417, 421, 422, 423, 425, 427, 429, - 440, 445, 459, 460, 461, 462, 463, 466, 467, 472, - 473, 474, 475, 476, 484, 485, 490, 513, 515, 528, - 546, 552, 465, 290, 291, 430, 431, 303, 304, 566, - 567, 289, 523, 553, 521, 565, 547, 424, 365, 0, - 0, 368, 271, 294, 309, 0, 538, 486, 219, 451, - 280, 243, 0, 0, 204, 238, 222, 249, 264, 267, - 313, 378, 386, 415, 420, 286, 261, 236, 444, 233, - 469, 493, 494, 495, 497, 382, 256, 419, 0, 383, - 363, 503, 504, 305, 82, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 91, 0, 1525, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 90, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 91, 0, 0, - 0, 190, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 2102, 0, 0, - 2101, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 2052, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1727, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 2050, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 942, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 948, - 319, 193, 217, 946, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 2052, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1727, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 1525, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 3348, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1882, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1883, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 2462, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2463, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 2447, 0, 0, 0, 0, 230, 0, 0, 237, - 2448, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 1567, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 1566, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 640, 641, 642, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 3472, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1727, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 3348, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 91, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 2103, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 190, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1548, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 190, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 1837, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 1828, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 1694, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 1692, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 1690, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 1688, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 1686, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 1682, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 1680, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 1678, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 1653, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 1552, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 91, 0, 0, - 0, 815, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1298, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 1297, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 190, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 901, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 594, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 638, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 3480, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 638, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 383, 0, 363, 503, 504, 305, - 502, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 402, 0, 0, 0, 0, 0, 0, 0, - 0, 260, 0, 0, 0, 0, 353, 257, 0, 0, - 416, 0, 198, 0, 471, 244, 364, 361, 510, 272, - 263, 259, 242, 306, 372, 414, 492, 408, 0, 357, - 0, 0, 481, 387, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 312, - 240, 314, 197, 399, 482, 276, 0, 0, 0, 0, - 0, 815, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 230, 0, 0, 237, 0, 0, 0, 338, 347, - 346, 327, 328, 330, 332, 337, 344, 350, 0, 0, - 0, 0, 0, 255, 310, 262, 254, 507, 0, 0, - 0, 0, 0, 0, 0, 221, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 287, 0, 388, 247, 0, 438, 0, 0, 0, - 549, 0, 0, 0, 0, 0, 0, 0, 352, 0, - 319, 193, 217, 0, 0, 398, 446, 458, 0, 0, - 0, 245, 0, 456, 412, 527, 225, 274, 443, 418, - 454, 426, 277, 0, 0, 455, 359, 512, 436, 524, - 550, 551, 253, 392, 536, 496, 544, 568, 218, 250, - 406, 489, 530, 478, 384, 508, 509, 318, 477, 285, - 196, 356, 556, 216, 464, 358, 234, 223, 514, 533, - 279, 441, 563, 205, 491, 522, 231, 468, 0, 0, - 570, 239, 488, 207, 519, 487, 380, 315, 316, 206, - 0, 442, 258, 283, 0, 0, 248, 401, 516, 517, - 246, 571, 220, 543, 212, 0, 542, 394, 511, 520, - 381, 370, 211, 518, 379, 369, 323, 342, 343, 270, - 296, 433, 362, 434, 295, 297, 390, 389, 391, 200, - 531, 0, 201, 0, 483, 532, 572, 226, 227, 229, - 0, 269, 273, 281, 284, 292, 293, 302, 354, 405, - 432, 428, 437, 0, 506, 525, 537, 548, 554, 555, - 557, 558, 559, 560, 561, 564, 562, 393, 300, 479, - 322, 360, 0, 0, 411, 457, 232, 529, 480, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, - 583, 584, 585, 586, 587, 588, 589, 590, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 591, 371, 470, - 526, 324, 336, 339, 329, 348, 0, 349, 325, 326, - 331, 333, 334, 335, 340, 341, 345, 351, 241, 203, - 377, 385, 505, 301, 208, 209, 210, 498, 499, 500, - 501, 540, 541, 545, 447, 448, 449, 450, 282, 535, - 298, 453, 452, 320, 321, 366, 435, 0, 192, 213, - 355, 0, 439, 278, 569, 539, 534, 199, 215, 0, - 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 194, 195, 202, 214, 224, 228, 235, 251, - 266, 268, 275, 288, 299, 307, 308, 311, 317, 367, - 373, 374, 375, 376, 395, 396, 397, 400, 403, 404, - 407, 409, 410, 413, 417, 421, 422, 423, 425, 427, - 429, 440, 445, 459, 460, 461, 462, 463, 466, 467, - 472, 473, 474, 475, 476, 484, 485, 490, 513, 515, - 528, 546, 552, 465, 290, 291, 430, 431, 303, 304, - 566, 567, 289, 523, 553, 521, 565, 547, 424, 365, - 0, 0, 368, 271, 294, 309, 0, 538, 486, 219, - 451, 280, 243, 0, 0, 204, 238, 222, 249, 264, - 267, 313, 378, 386, 415, 420, 286, 261, 236, 444, - 233, 469, 493, 494, 495, 497, 382, 256, 419, 383, - 0, 363, 503, 504, 305, 502, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 402, 0, 0, - 0, 0, 0, 0, 0, 0, 260, 0, 0, 0, - 0, 353, 257, 0, 0, 416, 0, 198, 0, 471, - 244, 364, 361, 510, 272, 263, 259, 242, 306, 372, - 414, 492, 408, 0, 357, 0, 0, 481, 387, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 312, 240, 314, 197, 399, 482, - 276, 0, 0, 0, 0, 0, 190, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 230, 0, 0, 237, - 0, 0, 0, 338, 347, 346, 327, 328, 330, 332, - 337, 344, 350, 0, 0, 0, 0, 0, 255, 310, - 262, 254, 507, 0, 0, 0, 0, 0, 0, 0, - 221, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 287, 0, 388, 247, - 0, 438, 0, 0, 0, 549, 0, 0, 0, 0, - 0, 0, 0, 352, 0, 319, 193, 217, 0, 0, - 398, 446, 458, 0, 0, 0, 245, 0, 456, 412, - 527, 225, 274, 443, 418, 454, 426, 277, 0, 0, - 455, 359, 512, 436, 524, 550, 551, 253, 392, 536, - 496, 544, 568, 218, 250, 406, 489, 530, 478, 384, - 508, 509, 318, 477, 285, 196, 356, 556, 216, 464, - 358, 234, 223, 514, 533, 279, 441, 563, 205, 491, - 522, 231, 468, 0, 0, 570, 239, 488, 207, 519, - 487, 380, 315, 316, 206, 0, 442, 258, 283, 0, - 0, 248, 401, 516, 517, 246, 571, 220, 543, 212, - 0, 542, 394, 511, 520, 381, 370, 211, 518, 379, - 369, 323, 342, 343, 270, 296, 433, 362, 434, 295, - 297, 390, 389, 391, 200, 531, 0, 201, 0, 483, - 532, 572, 226, 227, 229, 0, 269, 273, 281, 284, - 292, 293, 302, 354, 405, 432, 428, 437, 0, 506, - 525, 537, 548, 554, 555, 557, 558, 559, 560, 561, - 564, 562, 393, 300, 479, 322, 360, 0, 0, 411, - 457, 232, 529, 480, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 573, 574, 575, 576, 577, - 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, - 588, 589, 590, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 591, 371, 470, 526, 324, 336, 339, 329, - 348, 0, 349, 325, 326, 331, 333, 334, 335, 340, - 341, 345, 351, 241, 203, 377, 385, 505, 301, 208, - 209, 210, 498, 499, 500, 501, 540, 541, 545, 447, - 448, 449, 450, 282, 535, 298, 453, 452, 320, 321, - 366, 435, 0, 192, 213, 355, 0, 439, 278, 569, - 539, 534, 199, 215, 0, 252, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 194, 195, 202, - 214, 224, 228, 235, 251, 266, 268, 275, 288, 299, - 307, 308, 311, 317, 367, 373, 374, 375, 376, 395, - 396, 397, 400, 403, 404, 407, 409, 410, 413, 417, - 421, 422, 423, 425, 427, 429, 440, 445, 459, 460, - 461, 462, 463, 466, 467, 472, 473, 474, 475, 476, - 484, 485, 490, 513, 515, 528, 546, 552, 465, 290, - 291, 430, 431, 303, 304, 566, 567, 289, 523, 553, - 521, 565, 547, 424, 365, 0, 0, 368, 271, 294, - 309, 0, 538, 486, 219, 451, 280, 243, 0, 0, - 204, 238, 222, 249, 264, 267, 313, 378, 386, 415, - 420, 286, 261, 236, 444, 233, 469, 493, 494, 495, - 497, 382, 256, 419, 0, 0, 363, 503, 504, 305, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3785, 3786, 3787, 3788, 3789, 3790, 3791, 3792, } var yyPact = [...]int{ - -1000, -1000, 3745, -1000, -458, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 5012, -1000, -530, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 2407, 2449, -1000, -1000, -1000, -1000, 2558, -1000, 985, + 2080, -1000, 2350, 4889, -1000, 54050, 469, -1000, 51162, 464, + 836, 238, 35278, -1000, 181, -1000, 178, 52606, 187, -1000, + -1000, -1000, -1000, -438, 20836, 2295, 43, 38, 54050, -1000, + -1000, -1000, -1000, -352, 2517, 2040, -1000, 398, -1000, -1000, + -1000, -1000, -1000, -1000, 50440, -1000, 1107, -1000, -1000, 2361, + 2336, 2569, 888, 2271, -1000, 2467, 2040, -1000, 20836, 2511, + 2431, 20114, 20114, 362, -1000, -1000, 302, -1000, -1000, 30224, + 54050, 38166, 842, -1000, 2350, -1000, -1000, -1000, 219, -1000, + 315, 1964, -1000, 1963, -1000, 857, 897, 337, 467, 465, + 335, 334, 332, 331, 328, 326, 325, 324, 345, -1000, + 932, 932, -287, -288, 1524, 434, 352, 352, 1045, 420, + 2319, 2315, -1000, -1000, 932, 932, 932, 330, 932, 932, + 932, 932, 280, 279, 932, 932, 932, 932, 932, 932, + 932, 932, 932, 932, 932, 932, 932, 932, 932, 932, + 932, 299, 2350, 265, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2352, - 2357, -1000, -1000, -1000, -1000, 2500, -1000, 918, 1982, -1000, - 2325, 3684, -1000, 46825, 697, -1000, 44205, 691, 197, 29795, - -1000, 201, -1000, 178, 45515, 195, -1000, -1000, -1000, -1000, - -311, 18658, 2243, 65, 64, 46825, -1000, -1000, -1000, -1000, - 2461, 1944, -1000, 395, -1000, -1000, -1000, -1000, -1000, -1000, - 43550, -1000, 1009, -1000, -1000, 2340, 2316, 2523, 839, 2272, - -1000, 2420, 1944, -1000, 18658, 2450, 2395, 18003, 18003, 434, - -1000, -1000, 209, -1000, -1000, 25210, 46825, 32415, 815, -1000, - 2325, -1000, -1000, -1000, 155, -1000, 339, 1866, -1000, 1863, - -1000, 906, 932, 379, 547, 545, 365, 364, 363, 362, - 360, 357, 350, 346, 391, -1000, 856, 856, -120, -126, - 1522, 651, 418, 418, 825, 606, 2288, 2273, -1000, -1000, - 856, 856, 856, 493, 856, 856, 856, 856, 299, 297, - 856, 856, 856, 856, 856, 856, 856, 856, 856, 856, - 856, 856, 856, 856, 856, 856, 856, 744, 2325, 273, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -6343,63 +7245,68 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 46825, 191, 46825, -1000, 763, 46825, 997, 997, - 77, 997, 997, 997, 997, 188, 791, 63, -1000, 181, - 275, 225, 278, 995, 239, -1000, -1000, 264, 995, 1597, - -1000, 845, 277, 179, -1000, 997, 997, -1000, 12083, 203, - 12083, 12083, -1000, 2319, -1000, -1000, -1000, -1000, -1000, 1267, - -1000, -1000, -1000, -1000, -9, 592, -1000, -1000, -1000, -1000, - 45515, 42895, 304, -1000, -1000, 235, -1000, -1000, 1675, 1101, - 18658, 1138, -1000, 1180, 811, -1000, -1000, -1000, -1000, -1000, - 746, -1000, 19313, 19313, 19313, 19313, -1000, -1000, 1868, 42240, - 1868, 1868, 19313, 1868, -1000, 19313, 1868, 1868, 1868, 18658, - 1868, 1868, 1868, 1868, -1000, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, -1000, -1000, -1000, -1000, - 1868, 762, 1868, 1868, 1868, 1868, 1868, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1868, 1868, 1868, 1868, 1868, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 21278, 1409, 1393, 1390, -1000, 16038, - 1868, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 54050, 281, 54050, -1000, 785, 54050, -442, 1063, 1063, 96, + 1063, 1063, 1063, 1063, 175, 923, 25, -1000, 168, 259, + 157, 266, 1032, 190, -1000, -1000, 262, 1032, 1800, -1000, + 917, 257, 136, -1000, 1063, 1063, -1000, 13591, 258, 13591, + 13591, -1000, 2338, -1000, -1000, -1000, -1000, -1000, 1334, -1000, + -1000, -1000, -1000, -59, 418, -1000, -1000, -1000, -1000, 52606, + 49718, 264, -1000, -1000, 347, -1000, -1000, 1728, 1423, 20836, + 1482, 884, -1000, -1000, 1348, 846, -1000, -1000, -1000, -1000, + -1000, 493, -1000, 23002, 23002, 23002, 23002, -1000, -1000, 1980, + 48996, 1980, 1980, 23002, 1980, 23002, 1980, 1980, 1980, 20836, + 1980, 1980, 1980, 1980, -1000, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, -1000, -1000, -1000, -1000, 1980, 778, 1980, 1980, + 1980, 1980, 1980, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1980, 1980, 1980, 1980, 1980, 1980, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 25890, 1485, 1480, 1477, -1000, 17948, 1980, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 46825, -1000, 1868, 212, 45515, 45515, 356, 2420, 1944, - -1000, 2461, 2432, 395, -1000, 2812, 1352, 1503, 1462, 1944, - 1827, 46825, -1000, 1882, -1000, -1000, -1000, -1000, 2146, 1328, - 1595, -1000, -1000, -1000, -1000, 1941, 18658, -1000, -1000, 2497, - -1000, 22589, 759, 2493, 41585, -1000, 434, 434, 1855, 455, - 40, -1000, -1000, -1000, -1000, 887, 29140, -1000, -1000, -1000, - -1000, 1714, 46825, -1000, -1000, 6677, 1233, -1000, 1974, -1000, - 1704, -1000, 1926, 18658, 1946, 686, 1233, 680, 679, 655, - -1000, -23, -1000, -1000, -1000, -1000, -1000, -1000, 856, 856, - 856, -1000, 389, 2448, 3684, 4838, -1000, -1000, -1000, 40930, - 1970, 1233, -1000, 1969, -1000, 950, 719, 727, 727, 1233, - -1000, -1000, 46170, 1233, 946, 942, 1233, 1233, 45515, 45515, - -1000, 40275, -1000, 39620, 38965, 1240, 45515, 38310, 37655, 37000, - 36345, 35690, -1000, 2202, -1000, 2119, -1000, -1000, -1000, 46170, - 1233, 1233, 46170, 45515, 46170, 46825, 1233, -1000, -1000, 424, - -1000, -1000, 1238, 1237, 1232, 856, 856, 1231, 1581, 1579, - 1572, 856, 856, 1226, 1566, 31105, 1548, 250, 1225, 1223, - 1217, 1285, 1545, 194, 1542, 1265, 1220, 1187, 45515, 1966, - 46825, -1000, 258, 940, 706, 886, 2325, 2233, 1848, 556, - 685, 1233, 422, 422, 45515, -1000, 14066, -1000, -1000, 1539, - 18658, -1000, 1017, 995, 995, -1000, -1000, -1000, -1000, -1000, - -1000, 997, 46825, 1017, -1000, -1000, -1000, 995, 997, 46825, - 997, 997, 997, 997, 995, 995, 995, 997, 46825, 46825, - 46825, 46825, 46825, 46825, 46825, 46825, 46825, 12083, 845, 997, - -316, -1000, 1535, -1000, -1000, 2069, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54050, -1000, 1980, + 212, 52606, 52606, 304, 1323, -1000, -1000, 2467, 2040, -1000, + 2517, 2484, 398, -1000, 3808, 1701, 1569, 1483, 2040, 1929, + 54050, -1000, 1994, -1000, -1000, -1000, -1000, 2218, 1417, 1783, + -1000, -1000, -1000, -1000, 1683, 20836, -1000, -1000, 2544, -1000, + 27335, 777, 2540, 48274, -1000, 362, 362, 1945, 423, 19, + -1000, -1000, -1000, -1000, 944, 34556, -1000, -1000, -1000, -1000, + -1000, 1869, 54050, -1000, -1000, 4092, 1340, -1000, 2077, -1000, + 1832, -1000, 2020, 20836, 2099, 462, 1340, 438, 437, 435, + -1000, -101, -1000, -1000, -1000, -1000, -1000, -1000, 932, 932, + 932, -1000, 341, 2501, 4889, 5427, -1000, -1000, -1000, 47552, + 2071, 1340, -1000, 2065, -1000, 1029, 519, 852, 852, 1340, + -1000, -1000, 53328, 1340, 1025, 1024, 1340, 1340, 52606, 52606, + -1000, 46830, -1000, 46108, 45386, 1322, 52606, 44664, 43942, 43220, + 42498, 41776, -1000, 2293, -1000, 2236, -1000, -1000, -1000, 53328, + 1340, 1340, 53328, 52606, 53328, 54050, 1340, -1000, -1000, 350, + -1000, -1000, 1321, 1317, 1313, 932, 932, 1312, 1777, 1773, + 1742, 932, 932, 1307, 1739, 36722, 1737, 256, 1296, 1294, + 1291, 1253, 1723, 205, 1715, 1218, 1188, 1278, 52606, 2062, + 54050, -1000, 246, 1010, 953, 943, 2350, 2289, 1940, 417, + 442, 1340, 391, 391, 52606, -1000, 14319, -1000, 215, -1000, + 1713, 20836, -1000, 1038, 1032, 1032, -1000, -1000, -1000, -1000, + -1000, -1000, 1063, 54050, 1038, -1000, -1000, -1000, 1032, 1063, + 54050, 1063, 1063, 1063, 1063, 1032, 1032, 1032, 1063, 54050, + 54050, 54050, 54050, 54050, 54050, 54050, 54050, 54050, 13591, 917, + 1063, -445, -1000, 1698, -1000, -1000, 2207, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -6412,284 +7319,329 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 12083, 12083, -1000, -1000, -1000, -1000, -1000, 1846, - -1000, 175, 35, 190, -1000, 35035, 425, 884, -1000, 425, - -1000, -1000, -1000, 1843, 34380, -1000, -318, -319, -324, -327, - -1000, -1000, -1000, -331, -333, -1000, -1000, -1000, 18658, 18658, - 18658, 18658, -150, -1000, 1088, 19313, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 187, 1102, 19313, 19313, 19313, 19313, 19313, - 19313, 19313, 19313, 19313, 19313, 19313, 19313, 19313, 19313, 19313, - -1000, -1000, 27175, 7629, 7629, 811, 811, 811, 811, -1000, - -87, 1842, 46170, -1000, -1000, -1000, 758, 18658, 18658, 811, - -1000, 1233, 16038, 19968, 18003, 18003, 18658, 897, 1101, 46170, - 18658, -1000, 1462, -1000, -1000, -1000, 1213, -1000, 1032, 2303, - 2303, 2303, 2303, 18658, 18658, 18658, 18658, 18658, 18658, 18658, - 18658, 18658, 18658, 2303, 45515, 45515, 780, 18658, 18658, 18658, - 18658, 18658, 18658, 14727, 18658, 18658, 19313, 18658, 18658, 18658, - 1462, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, - 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, - 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, 18658, - 18658, 1462, 18658, 1207, 18658, 18658, 18003, 13405, 18003, 18003, - 18003, 18003, 18003, -1000, -1000, -1000, -1000, -1000, 18658, 18658, - 18658, 18658, 18658, 18658, 18658, 18658, 1462, 18658, 18658, 18658, - 18658, 18658, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 1387, 1625, 1430, 18658, -1000, 1829, -1000, -130, - 24555, 18658, 1527, 2486, 2007, 45515, -1000, -1000, -1000, 2420, - -1000, 2420, 1387, 2779, 2166, 18003, -1000, -1000, 2779, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1590, -1000, 46825, - 1827, 2392, 45515, 2155, 1520, 657, -1000, 18658, 18658, 1826, - -1000, 1511, 46825, -1000, -150, -1000, 33725, -1000, -1000, 11422, - 46825, 340, 46825, -1000, 23900, 33070, 248, -1000, 40, 1780, - -1000, 22, 27, 15382, 810, -1000, -1000, -1000, 1522, 20623, - 1665, 810, 106, -1000, -1000, -1000, 1926, -1000, 1926, 1926, - 1926, 1926, 657, 657, 657, 657, -1000, -1000, -1000, -1000, - -1000, 1962, 1952, -1000, 1926, 1926, 1926, 1926, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1951, 1951, 1951, 1949, 1949, - 1927, 1927, 426, -1000, 18658, 375, 32415, 2366, 1186, 1669, - 258, 430, 2003, 1233, 1233, 1233, 430, -1000, 1326, 1314, - 1312, -1000, -447, 1821, -1000, -1000, 2446, -1000, -1000, 943, - 969, 966, 1061, 45515, 227, 333, -1000, 417, -1000, 32415, - 1233, 936, 727, 1233, -1000, 1233, -1000, -1000, -1000, -1000, - -1000, 1233, -1000, -1000, 1815, -1000, 1819, 1001, 964, 975, - 961, 1815, -1000, -1000, -96, 1815, -1000, 1815, -1000, 1815, - -1000, 1815, -1000, 1815, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 927, 232, -224, 45515, 227, 450, -1000, - 443, 27175, -1000, -1000, -1000, 27175, 27175, -1000, -1000, -1000, - -1000, 1468, 1458, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -432, 46825, -1000, 254, 883, 314, 328, 291, - 46825, 229, 2408, 2406, 2401, 2399, 2397, 247, 294, 46825, - 46825, 422, 2092, 46825, 2362, 46825, -1000, -1000, -1000, -1000, - -1000, 1101, 46825, -1000, -1000, 997, 997, -1000, -1000, 46825, - 997, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 997, + -1000, -1000, -1000, -1000, 13591, 13591, -1000, -1000, -1000, -1000, + -1000, 1935, -1000, 176, 8, 185, -1000, 41054, 492, 942, + -1000, 492, -1000, -1000, -1000, 1934, 40332, -1000, -446, -447, + -448, -450, -1000, -1000, -1000, -453, -455, -1000, -1000, -1000, + 20836, 20836, 20836, 20836, -323, -1000, 1281, 23002, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 20836, 254, 940, 23002, 23002, + 23002, 23002, 23002, 23002, 23002, 24446, 23724, 23002, 23002, 23002, + 23002, 23002, 23002, -1000, -1000, 32390, 4996, 4996, 846, 846, + 846, 846, -1000, -210, 1933, 53328, -1000, -1000, -1000, 770, + 20836, 20836, 846, -1000, 1340, 2253, 17948, 20114, 20114, 20836, + 961, 1423, 53328, 20836, -1000, 1483, -1000, -1000, -1000, -1000, + 1119, -1000, -1000, 1006, 2327, 2327, 2327, 2327, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 2327, 20836, + 271, 271, 883, 20836, 20836, 20836, 20836, 20836, 20836, 16503, + 20836, 20836, 23002, 20836, 20836, 20836, 1483, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 1483, 20836, 1318, 20836, + 20836, 20836, 20836, 20836, 20836, 20114, 15775, 20114, 20114, 20114, + 20114, 20114, -1000, -1000, -1000, -1000, -1000, -1000, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, 1483, 20836, 20836, 20836, + 20836, 20836, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1560, 1564, 1522, 20836, -1000, 1932, -1000, -184, + 29502, 20836, 1674, 2538, 2110, 52606, -1000, -1000, -1000, -1000, + 2467, -1000, 2467, 1560, 3790, 2030, 20114, -1000, -1000, 3790, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1751, -1000, + 54050, 1929, 2426, 52606, 2208, 1660, 833, -1000, 20836, 20836, + 1927, -1000, 1251, 54050, -1000, -323, -1000, 39610, -1000, -1000, + 12863, 54050, 317, 54050, -1000, 28780, 38888, 336, -1000, 19, + 1904, -1000, -5, -19, 17225, 838, -1000, -1000, -1000, 1524, + 25168, 1712, 838, 99, -1000, -1000, -1000, 2020, -1000, 2020, + 2020, 2020, 2020, 833, 833, 833, 833, -1000, -1000, -1000, + -1000, -1000, 2060, 2054, -1000, 2020, 2020, 2020, 2020, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 46825, -1000, -1000, -1000, -1000, - -9, 161, -1000, -1000, 45515, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -45, -1000, 287, 42, 382, -1000, - -1000, -1000, -1000, -1000, 2414, -1000, 1101, 926, 914, -1000, - 1868, -1000, -1000, 1082, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 187, 19313, 19313, 19313, 1708, 690, 1673, 1290, 1341, - 852, 852, 970, 970, 819, 819, 819, 819, 819, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1455, -1000, 1868, - 46170, 1694, 13405, 2232, 1805, 1462, 2656, -1000, 1688, -1000, - 1688, 1405, 898, -1000, 18658, 1462, 2646, -1000, -1000, 1462, - 1462, 1462, 18658, -1000, -1000, 18658, 18658, 18658, 18658, 1669, - 1669, 1669, 1669, 1669, 1669, 1669, 1669, 1669, 1669, 18658, - 1809, 1799, 2483, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 2043, 2043, 2043, 2042, + 2042, 2026, 2026, 378, -1000, 20836, 409, 38166, 2360, 1258, + 2201, 246, 393, 2104, 1340, 1340, 1340, 393, -1000, 1506, + 1503, 1471, -1000, -515, 1918, -1000, -1000, 2500, -1000, -1000, + 1027, 1067, 1066, 931, 52606, 220, 306, -1000, 370, -1000, + 38166, 1340, 1023, 852, 1340, -1000, 1340, -1000, -1000, -1000, + -1000, -1000, 1340, -1000, -1000, 1916, -1000, 1898, 1097, 1044, + 1091, 1043, 1916, -1000, -1000, -216, 1916, -1000, 1916, -1000, + 1916, -1000, 1916, -1000, 1916, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 727, 127, -370, 52606, 220, 415, + -1000, 412, 32390, -1000, -1000, -1000, 32390, 32390, -1000, -1000, + -1000, -1000, 1658, 1633, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 2071, 1669, 1669, 1669, 1669, 1669, - 18658, 1382, -1000, -1000, -1000, 1477, 2642, 1108, 2635, 1669, - 1669, -1000, 1669, 2630, 2586, 1462, 1675, 1462, 1798, -1000, - 2460, 1669, 2405, 2393, 2368, 1909, 2358, 2334, 2318, 1669, - 1669, 1669, 1860, 2297, 2292, 2275, 2271, 2261, 2257, 2249, - 2225, 2219, 1669, 1430, 1430, 2212, -158, 1669, 1462, -1000, - -1000, -1000, -1000, -1000, 2205, 1800, 1462, 1781, 1868, 755, - -1000, -1000, 1688, 1462, 1462, 1688, 1688, 2179, 2175, 2150, - 2145, 2131, 2118, 1669, 1669, -1000, 1669, 2113, 2101, 1796, - 1777, 1462, -1000, 1430, 46825, -1000, -307, -1000, 14, 813, - 1868, -1000, 31105, 1462, -1000, 7211, -1000, 1092, -1000, -1000, - -1000, -1000, -1000, 28485, 1852, 2779, -1000, -1000, 1868, 1680, - -1000, -1000, 657, 84, 27830, 808, 808, 118, 1101, 1101, - 18658, -1000, -1000, -1000, -1000, -1000, -1000, 754, 2473, 414, - 1868, -1000, 1717, 2693, -1000, -1000, -1000, 2385, 21934, -1000, - -1000, 1868, 1868, 46825, 1820, 1812, -1000, 753, -1000, 1275, - 1780, 40, 24, -1000, -1000, -1000, -1000, 1101, -1000, 1308, - 341, 305, -1000, 415, -1000, -1000, -1000, -1000, 2256, 98, - -1000, -1000, -1000, 715, 657, -1000, -1000, -1000, -1000, -1000, - -1000, 1453, 1453, -1000, -1000, -1000, -1000, -1000, 1160, -1000, - -1000, -1000, -1000, 1158, -1000, -1000, 1143, -1000, -1000, 2083, - 2102, 375, -1000, -1000, 856, 1448, -1000, -1000, 2264, 856, - 856, 45515, -1000, -1000, 1658, 2366, 254, 46825, 905, 2091, - -1000, 2003, 2003, 2003, 46825, -1000, -1000, -1000, -1000, -1000, - -1000, -441, 135, 416, -1000, -1000, -1000, 3795, 45515, 1671, - -1000, 222, -1000, 1624, -1000, 45515, -1000, 1657, 1935, 1233, - 1233, -1000, -1000, -1000, 45515, 1868, -1000, -1000, -1000, -1000, - 682, 2324, 319, -1000, -1000, -177, -1000, -1000, 227, 222, - 46170, 1233, 810, -1000, -1000, -1000, -1000, -1000, -437, 1651, - 647, 230, 353, 46825, 46825, 46825, 46825, 46825, 738, -1000, - -1000, 53, -1000, -1000, 196, -1000, -1000, -1000, -1000, 196, - -1000, -1000, -1000, -1000, 281, 438, -1000, 46825, 46825, 751, - -1000, -1000, -1000, 995, -1000, -1000, 995, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2309, - 46825, 39, -395, -1000, -392, 18658, -1000, -1000, -1000, -1000, - 1690, 672, 1673, 19313, 19313, 19313, -1000, -1000, -1000, 913, - 913, 27175, -1000, 18658, 18003, -1000, -1000, 18658, 18658, 892, - -1000, 18658, 1228, -1000, 18658, -1000, -1000, -1000, 1430, 1669, - 1669, 1669, 1669, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 1691, 18658, 18658, 18658, 1462, 312, -1000, - -1000, -1000, -1000, -1000, 2480, -1000, 18658, -1000, 27175, 18658, - 18658, 18658, -1000, -1000, -1000, 18658, 18658, -1000, -1000, 18658, - 18658, -1000, 18658, 18658, 18658, -1000, 18658, 18658, 18658, 18658, - -1000, -1000, -1000, -1000, 18658, 18658, 18658, 18658, 18658, 18658, - 18658, 18658, 18658, 18658, -1000, -1000, -1000, 18658, -1000, 32415, - 111, -158, 1207, 111, 1207, -1000, 18003, 12744, -1000, -1000, - -1000, -1000, -1000, 18658, 18658, 18658, 18658, 18658, 18658, -1000, - -1000, -1000, 18658, 18658, -1000, 18658, -1000, 18658, -1000, -1000, - -1000, -1000, -1000, 813, -1000, 727, 727, 727, 45515, -1000, - -1000, -1000, -1000, 1759, -1000, 2412, -1000, 2188, 2174, 2477, - 2473, -1000, 23900, 2779, -1000, -1000, 45515, -277, -1000, 2227, - 2278, 808, 808, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 10761, 2420, 18658, 2075, 46170, 148, -1000, 23245, 45515, 46170, - 23900, 23900, 23900, 23900, 23900, -1000, 2130, 2123, -1000, 2133, - 2126, 2199, 46825, -1000, 1387, 1647, -1000, 18658, 25865, 1743, - 23900, -1000, -1000, 23900, 46825, 10100, -1000, -1000, 26, 29, - -1000, -1000, -1000, -1000, 1522, -1000, -1000, 939, 2382, 2253, - -1000, -1000, -1000, -1000, -1000, 1645, -1000, 1638, 1745, 1636, - 1631, 232, -1000, 1914, 2302, 856, 856, -1000, 1142, -1000, - 1233, 1442, 1439, -1000, -1000, -1000, 614, -1000, 2356, 46825, - 2073, 2065, 2062, -1000, -446, 1134, 1934, 1696, 18658, 1932, - 2443, 1718, 45515, -1000, -1000, 46170, -1000, 228, -1000, 375, - 45515, -1000, -1000, -1000, 333, 46825, -1000, 5817, -1000, -1000, - -1000, 222, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 46825, - 259, -1000, 1928, 1263, -1000, -1000, 1980, -1000, -1000, -1000, - -1000, 217, 311, 1437, 210, 1435, 210, -1000, 46825, 579, - 2102, 46825, -1000, -1000, -1000, 997, 997, -1000, -1000, 2300, - -1000, 1233, 1669, 19313, 19313, -1000, 811, 479, -132, 1926, - 1926, -1000, 1926, 1927, -1000, 1926, 167, 1926, 157, 1926, - -1000, -1000, 1462, 1462, 1430, -1000, 1633, 1333, -1000, 1101, - 18658, 2089, -1000, -1000, -1000, -1000, -1000, -29, 2039, 2014, - 1669, -1000, 1925, 1913, 18658, 1669, 1462, 1623, 1669, 1669, - 1669, 1669, -1000, 1101, 1430, 2006, 1430, 1669, 1669, 2002, - 352, 1669, 1627, 1627, 1627, 1627, 1627, 1430, 1430, 1430, - 1430, 1669, 45515, -1000, -158, -1000, -1000, -201, -203, -1000, - 1462, -158, 1732, 1462, -1000, 1614, 1610, 1991, 1602, 1669, - 1984, 1669, 1669, 1669, 1553, -1000, 2369, 2369, 2369, 1612, - 1092, 46825, -1000, -1000, -1000, -1000, 2473, 2470, 1719, -1000, - -1000, 84, 467, -1000, 2242, 2278, -1000, 2442, 2221, 2441, - -1000, -1000, -1000, -1000, -1000, 1101, -1000, 2337, 1707, -1000, - 882, 1678, -1000, -1000, 17348, 1619, 2169, 752, 1612, 1726, - 2693, 2000, 2047, 2752, -1000, -1000, -1000, -1000, 2115, -1000, - 2049, -1000, -1000, 1882, -1000, 1957, 340, 23900, 1701, 1701, - -1000, 743, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 988, - 7467, 2516, -1000, 1432, -1000, 1298, 199, 1133, -1000, -1000, - 856, 856, -1000, 935, 928, -1000, 46825, 1902, -1000, 657, - 1411, 657, 1109, -1000, -1000, 1103, -1000, -1000, -1000, -1000, - 2023, 2106, -1000, -1000, -1000, -1000, 46825, -1000, -1000, 46825, - 46825, 46825, 1898, 2438, -1000, 18658, 1897, 878, 1979, 45515, - 45515, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 524, 856, -413, 292, 285, 856, 856, 856, - -453, -1000, -1000, 1589, 1587, -1000, -117, -1000, 18658, -1000, - -1000, -1000, 1234, 1234, 1409, 1393, 1390, -1000, 1882, -1000, - -1000, -1000, 1608, -1000, -1000, -101, 45515, 45515, 45515, 45515, - -1000, -1000, 1016, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 811, 1462, 344, -110, 1462, - -1000, -1000, 657, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 18658, -1000, 18658, -1000, 1101, 18658, 2420, - 1358, 18658, 18658, -1000, 1098, 1077, 1669, -1000, -1000, -1000, - 18658, -1000, -1000, -1000, -1000, -1000, 18658, -1000, -1000, -1000, - 18658, 207, 913, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1462, 336, -1000, -1000, -1000, -1000, - 2475, -1000, 1462, 18658, -1000, -1000, 18658, -1000, 18658, 18658, - -1000, 18658, -1000, 18658, -1000, -1000, -1000, -1000, 18658, 1868, - 2218, 1868, 1868, 25865, -1000, -1000, 2470, 2456, 2435, 2208, - 2211, 2211, 2242, -1000, 2430, 2427, -1000, 1356, 2425, 1353, - 922, -1000, 46170, 18658, 148, -1000, 420, 45515, 148, 45515, - -1000, 2433, -1000, -1000, 18658, 1894, -1000, 18658, -1000, -1000, - -1000, -1000, 7629, 2473, 1701, -1000, -1000, 820, -1000, 18658, - -1000, -1000, -1000, 8432, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 1334, 1329, -1000, -1000, 1890, 18658, -1000, -1000, - -1000, 1600, 1554, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 1882, -1000, -1000, -1000, -1000, 333, -444, 1971, 45515, - 1067, -1000, 1508, 1718, 327, 148, 1322, 856, 856, 856, - 1066, 1062, 31105, 1506, -1000, 45515, 405, -1000, 333, -1000, - -127, -129, 1669, -1000, -1000, 2381, -1000, -1000, 12744, -1000, - -1000, 1880, 1996, -1000, -1000, -1000, -1000, 2152, -94, -112, - -1000, -1000, 1669, 1669, 1763, 1462, -1000, 1669, 1669, 1493, - 1483, -1000, 1669, 1430, 1516, -1000, 207, 1462, 2038, -1000, - -1000, 7629, -1000, -1000, 2433, 2424, 111, -1000, -1000, 224, - 111, 1101, 1492, 1669, 1480, 1474, 1669, 1669, 26520, -1000, - 2423, 2422, 31760, 31760, 813, 2456, -165, 18658, 18658, 2192, - 1069, -1000, -1000, -1000, -1000, 1320, 1317, -1000, 1310, -1000, - 2514, -1000, 1101, -1000, 148, -1000, 718, 1678, -1000, 2420, - 1101, 45515, 1101, 86, 2433, -1000, 1669, -1000, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, 1868, - 1868, -1000, -1000, 45515, 1891, -1000, -1000, 2379, 1488, 134, - -1000, 1475, 1718, -1000, -1000, 147, -1000, 18658, -1000, 31105, - 1300, 1295, -1000, -1000, -1000, -1000, -453, -1000, -1000, -1000, - -1000, -1000, -1000, 395, 1710, -1000, 855, 45515, 46825, -1000, - 2148, -1000, -1000, -1000, 18658, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 18658, -1000, 1462, 2034, -1000, -278, -1000, - -410, 18658, -158, -1000, -1000, -158, -1000, 18658, -1000, -1000, - 18658, -1000, 18658, -1000, -1000, 1486, -1000, -1000, -1000, -1000, - -1000, 1486, 1486, -1000, -165, -1000, 1683, -1000, 45515, 1101, - 1675, -1000, 1047, -1000, -1000, -1000, -1000, -1000, 46170, 1678, - 45515, -1000, 1482, 1462, 1868, 2420, -1000, 1472, -1000, 395, - -1000, 1878, 1696, -1000, -1000, -1000, 16693, -1000, -1000, -1000, - -1000, -1000, 180, -100, 12744, 9439, 1446, -1000, -98, 1669, - 1430, -1000, -383, -1000, -1000, -1000, -1000, 272, -1000, -1000, - 1675, -1000, -1000, 1461, 1361, 1350, 30450, -1000, -1000, -1000, - -1000, -165, -1000, -1000, 2378, -1000, -1000, 1327, -1000, -1000, - 25865, 44860, -1000, -83, 271, -100, 18658, 1877, 1462, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21, -1000, -1000, - -1000, -1000, -1000, 1980, -116, -1000, -1000, -1000, 284, -404, - -199, -207, -1000, -1000, 19313, -1000, 18658, -1000, 18658, -1000, - 18658, -1000, -1000, -1000, 45515, 1868, -1000, 1399, -1000, 6626, - -214, 2018, -1000, -43, -1000, -1000, -1000, 980, 1284, -1000, - -1000, -1000, -1000, -1000, -1000, 1684, 45515, -1000, 421, -1000, - -1000, -101, -115, 911, -1000, -1000, -1000, -1000, -1000, 1325, - 1303, 1669, -1000, 45515, -1000, 44860, -209, 810, 7629, -1000, - 2012, 2009, 2490, -1000, -1000, -1000, -1000, -1000, -1000, -455, - 1305, 240, -1000, -1000, 284, -1000, 18658, -1000, 18658, -1000, - 1462, -1000, -1000, 2350, 86, -1000, 2505, -1000, 2503, 849, - 849, -1000, 1030, -455, -1000, -1000, 1669, 1669, -1000, -226, - -1000, -1000, -1000, -1000, -1000, 411, 1068, -1000, -1000, -1000, - -1000, -1000, 7629, -1000, -1000, -1000, 246, 246, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -499, 54050, -1000, 236, 937, 293, 327, + 288, 54050, 361, 2459, 2457, 2452, 2447, 2409, 247, 278, + 54050, 54050, 391, 2164, 54050, 2383, 54050, -1000, -1000, -1000, + -1000, 1631, 1627, -1000, 1423, 54050, -1000, -1000, 1063, 1063, + -1000, -1000, 54050, 1063, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1063, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54050, -1000, + -1000, -1000, -1000, -59, 172, -1000, -1000, 52606, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -109, -1000, 793, + -25, 380, -1000, -1000, -1000, -1000, -1000, 2461, -1000, 1423, + 986, 1004, -1000, 1980, -1000, -1000, 1180, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 254, 23002, 23002, 23002, 1570, + 439, 1371, 1779, 1197, 1276, 1276, 1186, 23002, 1186, 23002, + 853, 853, 853, 853, 853, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 1620, -1000, 1980, 53328, 1822, 15775, 2600, + 1576, 1483, 865, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 3807, 1789, -1000, 1789, 1791, 967, -1000, + 20836, 1483, 3802, -1000, -1000, 1483, 1483, 20836, -1000, -1000, + 20836, 20836, 20836, 20836, 2201, 2201, 2201, 2201, 2201, 2201, + 2201, 2201, 2201, 2201, 20836, 2201, 1915, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 1914, 2535, 1431, 2201, 2201, + 2201, 2201, 2201, 20836, 2170, -1000, -1000, -1000, 1528, 3507, + 1297, 3486, 2201, 2201, -1000, 2201, 3469, 3451, 1483, 1728, + 2642, 2629, 2201, 2201, 2201, 2201, 2201, 2625, 2585, 2201, + 2201, 2543, 2201, 3444, 2201, 2539, 2529, 2524, 2509, 2505, + 2481, 2468, 2463, 2438, 2434, 2402, 2398, 2356, 2323, 2311, + 2292, 2273, 2265, 2201, 2201, 2201, 3435, 2201, 3427, 2201, + 3412, 2201, 2201, 3400, 2261, 2244, 1483, 1913, -1000, 3378, + 2201, 3366, 3342, 3338, 2235, 3334, 3329, 3325, 2201, 2201, + 2201, 2205, 3303, 3281, 3272, 3267, 3260, 3256, 3252, 3247, + 3243, 2201, 1522, 1522, 1522, 1522, 1522, 3027, -326, 2201, + 1483, -1000, -1000, -1000, -1000, -1000, 3021, 2186, 3016, 3008, + 2971, 2965, 1483, 1908, 1980, 747, -1000, -1000, 1789, 1483, + 1483, 1789, 1789, 2959, 2942, 2938, 2934, 2927, 2906, 2201, + 2201, -1000, 2201, 2900, 2871, 2158, 2149, 1483, -1000, 1522, + 54050, -1000, -433, -1000, -47, 910, 1980, -1000, 36722, 1483, + -1000, 4283, -1000, 1181, -1000, -1000, -1000, -1000, -1000, 33834, + 1697, 3790, -1000, -1000, 1980, 1782, -1000, -1000, 833, 66, + 33112, 832, 832, 109, 1423, 1423, 20836, -1000, -1000, -1000, + -1000, -1000, -1000, 739, 2516, 406, 1980, -1000, 1928, 3283, + -1000, -1000, -1000, 2424, 26613, -1000, -1000, 1980, 1980, 54050, + 1941, 1902, -1000, 505, -1000, 1342, 1904, 19, 18, -1000, + -1000, -1000, -1000, 1423, -1000, 1461, 321, 355, -1000, 402, + -1000, -1000, -1000, -1000, 2296, 85, -1000, -1000, -1000, 827, + 833, -1000, -1000, -1000, -1000, -1000, -1000, 1616, 1616, -1000, + -1000, -1000, -1000, -1000, 1256, -1000, -1000, -1000, -1000, 1226, + -1000, -1000, 1225, -1000, -1000, 2867, 2090, 409, -1000, -1000, + 932, 1598, -1000, -1000, 2306, 932, 932, 52606, -1000, -1000, + 1705, 2360, 236, 54050, 966, 2163, -1000, 2104, 2104, 2104, + 54050, -1000, -1000, -1000, -1000, -1000, -1000, -501, 192, 363, + -1000, -1000, -1000, 1354, 52606, 1733, -1000, 222, -1000, 1689, + -1000, 52606, -1000, 1722, 2041, 1340, 1340, -1000, -1000, -1000, + 52606, 1980, -1000, -1000, -1000, -1000, 440, 2344, 356, -1000, + -1000, -348, -1000, -1000, 220, 222, 53328, 1340, 838, -1000, + -1000, -1000, -1000, -1000, -502, 1720, 431, 225, 318, 54050, + 54050, 54050, 54050, 54050, 478, -1000, -1000, 10, -1000, -1000, + 199, -1000, -1000, -1000, -1000, 199, -1000, -1000, -1000, -1000, + 270, 396, -1000, 54050, 54050, 901, -1000, -1000, -1000, -1000, + -1000, 1032, -1000, -1000, 1032, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2333, 54050, -37, + -470, -1000, -467, 20836, -1000, -1000, -1000, -1000, 1210, 410, + 1371, 23002, 23002, 2253, 2253, 23002, -1000, -1000, -1000, 792, + 792, 32390, -1000, 23002, 20836, 20114, -1000, -1000, 20836, 20836, + 941, -1000, 20836, 1153, -1000, 20836, -1000, -1000, 1522, 2201, + 2201, 2201, 2201, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 1936, -1000, 20836, 20836, 20836, 1483, 310, + -1000, -1000, -1000, -1000, -1000, 2534, -1000, 20836, -1000, 32390, + 20836, 20836, 20836, -1000, -1000, -1000, 20836, 20836, -1000, -1000, + 20836, -1000, 20836, -1000, -1000, -1000, -1000, -1000, -1000, 20836, + -1000, 20836, -1000, -1000, -1000, 20836, -1000, 20836, -1000, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, -1000, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, -1000, 20836, -1000, 20836, -1000, 20836, + -1000, 20836, 20836, -1000, 20836, 20836, 20836, -1000, 20836, 20836, + 20836, 20836, -1000, -1000, -1000, -1000, 20836, 20836, 20836, 20836, + 20836, 20836, 20836, 20836, 20836, 20836, -1000, -1000, -1000, -1000, + -1000, -1000, 20836, -1000, 38166, 13, -326, 1318, 13, 1318, + 22280, 787, 494, 21558, -1000, 20114, 15047, -1000, -1000, -1000, + -1000, -1000, 20836, 20836, 20836, 20836, 20836, 20836, -1000, -1000, + -1000, 20836, 20836, -1000, 20836, -1000, 20836, -1000, -1000, -1000, + -1000, -1000, 910, -1000, 852, 852, 852, 52606, -1000, -1000, + -1000, -1000, 1899, -1000, 2454, -1000, 2241, 2239, 2533, 2516, + -1000, 28780, 3790, -1000, -1000, 52606, -425, -1000, 2283, 2238, + 832, 832, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 12135, + 2467, 20836, 2146, 53328, 206, -1000, 28058, 52606, 53328, 28780, + 28780, 28780, 28780, 28780, -1000, 2196, 2193, -1000, 2194, 2182, + 2379, 54050, -1000, 1560, 1711, -1000, 20836, 30946, 1834, 28780, + -1000, -1000, 28780, 54050, 11407, -1000, -1000, -41, -36, -1000, + -1000, -1000, -1000, 1524, -1000, -1000, 1065, 2421, 2300, -1000, + -1000, -1000, -1000, -1000, 1709, -1000, 1704, 1886, 1681, 1673, + 127, -1000, 2096, 2329, 932, 932, -1000, 1220, -1000, 1340, + 1592, 1590, -1000, -1000, -1000, 422, -1000, 2382, 54050, 2143, + 2140, 2139, -1000, -510, 1217, 2036, 2013, 20836, 2031, 2495, + 1871, 52606, -1000, -1000, 53328, -1000, 295, -1000, 409, 52606, + -1000, -1000, -1000, 306, 54050, -1000, 6781, -1000, -1000, -1000, + 222, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54050, 241, + -1000, 2027, 1332, -1000, -1000, 2005, -1000, -1000, -1000, -1000, + 191, 286, 1562, 196, 1540, 196, -1000, 54050, 892, 2090, + 54050, -1000, -1000, -1000, 1063, 1063, -1000, -1000, 2324, -1000, + 1340, 2201, 23002, 23002, -1000, 846, -1000, -1000, 521, -305, + 2020, 2020, -1000, 2020, 2026, -1000, 2020, 165, 2020, 147, + 2020, -1000, -1000, 1483, 1483, -1000, 1522, -1000, 2136, 1677, + -1000, 1423, 20836, 2862, -1000, -1000, -1000, -1000, -1000, -107, + 2831, 2815, 2201, -1000, 2012, 2007, 20836, 2201, 1483, 2130, + 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, + 2201, 2201, 2115, 2072, 2044, 2037, 2033, 2028, 2023, 2014, + 2008, 2003, 1996, 1966, 1958, 1937, 1900, 1896, 2201, 2201, + 1851, 2201, 1835, 1787, -1000, 1423, 1522, 2805, 1522, 2201, + 2201, 2801, 348, 2201, 1671, 1671, 1671, 1671, 1671, 1522, + 1522, 1522, 1522, 2201, 52606, -1000, -326, -1000, -1000, -366, + -371, -1000, 1483, -326, 1880, 23002, 2201, 23002, 23002, 23002, + 2201, 1483, -1000, 1776, 1759, 2756, 1749, 2201, 2674, 2201, + 2201, 2201, 1726, -1000, 2450, 2450, 2450, 1637, 1181, 54050, + -1000, -1000, -1000, -1000, 2516, 2512, 1876, -1000, -1000, 66, + 546, -1000, 2255, 2238, -1000, 2494, 2258, 2492, -1000, -1000, + -1000, -1000, -1000, 1423, -1000, 2354, 1820, -1000, 936, 1793, + -1000, -1000, 19392, 1666, 2230, 500, 1637, 1926, 3283, 2121, + 2134, 3457, -1000, -1000, -1000, -1000, 2185, -1000, 2183, -1000, + -1000, 1994, -1000, 2656, 317, 28780, 1920, 1920, -1000, 497, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1078, 6781, 2566, + -1000, 1517, -1000, 1408, 227, 1216, -1000, -1000, 932, 932, + -1000, 1021, 1020, -1000, 54050, 2006, -1000, 833, 1495, 833, + 1212, -1000, -1000, 1191, -1000, -1000, -1000, -1000, 2112, 2127, + -1000, -1000, -1000, -1000, 54050, -1000, -1000, 54050, 54050, 54050, + 2002, 2491, -1000, 20836, 2001, 933, 2664, 52606, 52606, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 421, 932, -482, 277, 276, 932, 932, 932, -511, -1000, + -1000, 1614, 1596, -1000, -239, -1000, 20836, -1000, -1000, -1000, + -1000, -1000, 1209, 1209, 1485, 1480, 1477, -1000, 1994, -1000, + -1000, -1000, 1678, -1000, -1000, -220, 52606, 52606, 52606, 52606, + -1000, -1000, 1173, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 846, 1483, 379, -223, 1483, + -1000, -1000, 833, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 20836, -1000, 20836, -1000, 1423, 20836, 2467, + 1463, 20836, 20836, -1000, 1185, 1137, 2201, -1000, -1000, -1000, + 20836, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, 20836, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, -1000, 20836, -1000, -1000, + -1000, 20836, -1000, 20836, -1000, 20836, -1000, -1000, -1000, 20836, + 301, 792, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 1483, 313, -1000, -1000, -1000, -1000, 2522, + -1000, 1483, 20836, 2253, -1000, 2253, 2253, 2253, -1000, -1000, + -1000, 20836, -1000, 20836, 20836, -1000, 20836, -1000, 20836, -1000, + -1000, -1000, -1000, 20836, 1980, 2264, 1980, 1980, 30946, -1000, + -1000, 2512, 2486, 2488, 2252, 2263, 2263, 2255, -1000, 2487, + 2480, -1000, 1445, 2475, 1443, 1002, -1000, 53328, 20836, 206, + -1000, 448, 52606, 206, 52606, -1000, 2482, -1000, -1000, 20836, + 2000, -1000, 20836, -1000, -1000, -1000, -1000, 4996, 2516, 1920, + -1000, -1000, 862, -1000, 20836, -1000, 54530, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 1441, 1439, -1000, -1000, 1995, + 20836, -1000, -1000, -1000, 1610, 1597, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1994, -1000, -1000, -1000, -1000, 306, + -506, 2650, 52606, 1132, -1000, 1571, 1871, 291, 206, 1436, + 932, 932, 932, 1131, 1118, 36722, 1567, -1000, 52606, 360, + -1000, 306, -1000, -297, -302, 2201, -1000, -1000, 2405, -1000, + -1000, 15047, -1000, -1000, 1991, 2032, -1000, -1000, -1000, -1000, + 2204, -213, -235, -1000, -1000, 2201, 2201, 2049, 1483, -1000, + 2201, 2201, 1568, 1561, -1000, 2201, 2201, 2201, 2201, 2201, + 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, 2201, + 2201, 2201, 2201, 2201, 2201, 1522, 1688, -1000, 301, 1483, + 2133, -1000, -1000, 4996, -1000, -1000, 2482, 2474, 13, -1000, + -1000, 214, 13, 1423, 963, 1483, 1483, 963, 1644, 2201, + 1609, 1502, 2201, 2201, 31668, -1000, 2473, 2469, 37444, 37444, + 910, 2486, -333, 20836, 20836, 2247, 1148, -1000, -1000, -1000, + -1000, 1434, 1401, -1000, 1364, -1000, 2545, -1000, 1423, -1000, + 206, -1000, 488, 1793, -1000, 2467, 1423, 52606, 1423, 65, + 2482, -1000, 2201, -1000, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, 1980, + 1980, 1980, 1980, -1000, -1000, 52606, 2634, -1000, -1000, 2391, + 1558, 167, -1000, 1529, 1871, -1000, -1000, 194, -1000, 20836, + -1000, 36722, 1362, 1350, -1000, -1000, -1000, -1000, -511, -1000, + -1000, -1000, -1000, -1000, -1000, 398, 1837, -1000, 930, 52606, + 54050, -1000, 2150, -1000, -1000, -1000, 20836, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 20836, -1000, 1483, 2125, -1000, -354, + -1000, -487, 20836, -326, -1000, -1000, -326, -1000, -1000, -1000, + -1000, -1000, 20836, -1000, -1000, 20836, -1000, 20836, -1000, -1000, + 1538, -1000, -1000, -1000, -1000, -1000, 1538, 1538, -1000, -333, + -1000, 1806, -1000, 52606, 1423, 1728, -1000, 1135, -1000, -1000, + -1000, -1000, -1000, 53328, 1793, 52606, -1000, 1535, 1483, 1980, + 2467, -1000, 1533, -1000, 398, -1000, 1984, 2013, -1000, -1000, + -1000, 18670, -1000, -1000, -1000, -1000, -1000, 242, -219, 15047, + 10679, 1526, -1000, -217, 2201, 1522, -1000, -459, -1000, -1000, + -1000, -1000, 275, -1000, -1000, 1728, -1000, -1000, 1489, 1407, + 1385, 36000, -1000, -1000, -1000, -1000, -333, -1000, -1000, 2388, + -1000, -1000, 1409, -1000, -1000, 30946, 51884, -1000, -208, 323, + -219, 20836, 1982, 1483, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -34, -1000, -1000, 482, -1000, -1000, -1000, 2005, + -225, -1000, -1000, -1000, 308, -474, -353, -355, 23002, -1000, + 20836, -1000, 20836, -1000, 20836, -1000, -1000, -1000, 52606, 1980, + -1000, 1513, -1000, 3828, -381, 2123, -1000, -129, -1000, -1000, + -1000, 1073, 1257, -1000, -1000, -1000, -1000, -1000, -1000, 2513, + 52606, -1000, 373, -1000, -1000, 14319, -220, -242, 992, -1000, + -1000, -1000, -1000, -1000, 2253, 1255, 1177, 2201, -1000, 52606, + -1000, 51884, -376, 838, 4996, -1000, 2116, 2038, 2521, -1000, + -1000, -1000, -1000, -1000, -1000, -527, 1387, 239, -1000, -1000, + -1000, 308, -356, -1000, 20836, -1000, 20836, -1000, 1483, -1000, + -1000, 2370, 65, -1000, 2541, -1000, 2525, 997, 997, -1000, + 1115, -527, -1000, -1000, -1000, -1000, 2201, 2201, -1000, -384, + -1000, -1000, -1000, -1000, -1000, 333, 1158, -1000, -1000, -1000, + -1000, -1000, 4996, -1000, -1000, -1000, 312, 312, -1000, -1000, } var yyPgo = [...]int{ - 0, 2972, 2971, 30, 6, 45, 44, 2970, 38, 100, - 197, 29, 202, 103, 2969, 181, 2968, 2967, 2966, 2964, - 2963, 2962, 2555, 2553, 2537, 2961, 2960, 2959, 2958, 2957, - 2956, 2955, 2954, 2953, 2951, 185, 176, 196, 2950, 2949, - 2948, 127, 218, 77, 93, 193, 2947, 2946, 82, 2945, - 2944, 2943, 201, 200, 199, 972, 2942, 198, 126, 49, - 2941, 2936, 2935, 2933, 2932, 2931, 2930, 2928, 2927, 2926, - 2925, 2924, 2923, 2920, 2919, 2918, 2917, 280, 2916, 2915, - 24, 2914, 84, 2913, 2912, 2911, 2909, 2908, 14, 2906, - 2905, 18, 47, 2904, 2903, 134, 2902, 2901, 2900, 2899, - 2898, 22, 2897, 27, 2896, 42, 2895, 2894, 135, 2891, - 2890, 2889, 43, 2888, 2887, 2883, 2879, 2875, 2874, 2872, - 150, 2870, 2869, 2868, 194, 203, 2867, 2866, 177, 118, - 113, 2864, 2863, 105, 195, 2862, 131, 2861, 2860, 2859, - 166, 2858, 2194, 2852, 2850, 66, 62, 2848, 87, 2846, - 2842, 13, 110, 63, 12, 8, 9, 2840, 2839, 65, - 75, 2838, 123, 2833, 2832, 107, 71, 2828, 99, 102, - 2827, 2814, 19, 11, 2813, 2, 7, 5, 67, 2812, - 2811, 116, 2810, 2809, 2808, 97, 2807, 2806, 4333, 2801, - 95, 146, 109, 72, 2800, 115, 55, 2799, 2796, 2793, - 2789, 2786, 51, 2780, 2778, 2775, 149, 158, 178, 2773, - 91, 106, 52, 145, 2770, 88, 76, 206, 179, 2768, - 2765, 143, 148, 2758, 2757, 60, 108, 144, 2754, 98, - 140, 129, 89, 96, 142, 2753, 2752, 59, 70, 2751, - 2750, 2748, 2747, 182, 2746, 2745, 69, 2743, 57, 2741, - 180, 2740, 81, 50, 2739, 114, 172, 2738, 73, 2737, - 2736, 64, 125, 68, 35, 2733, 169, 175, 136, 174, - 2732, 2731, 56, 2729, 2721, 2711, 204, 352, 2705, 2703, - 276, 189, 160, 162, 78, 2702, 343, 2700, 2698, 101, - 3368, 5159, 2696, 40, 171, 2695, 2681, 6237, 154, 48, - 31, 2680, 152, 2676, 2673, 2665, 2664, 205, 186, 112, - 173, 58, 2663, 2659, 2656, 20, 2655, 2653, 2651, 2642, - 2638, 2637, 86, 39, 37, 36, 210, 61, 16, 111, - 170, 165, 74, 2636, 2635, 2633, 133, 94, 2631, 168, - 167, 153, 159, 2630, 192, 155, 130, 2629, 80, 34, - 2628, 2626, 2625, 2624, 117, 2615, 2614, 2613, 2611, 163, - 156, 132, 79, 2609, 92, 128, 161, 157, 54, 2608, - 53, 2606, 2605, 33, 191, 28, 2603, 21, 124, 232, - 2598, 4695, 190, 2594, 25, 367, 164, 2592, 2588, 10, - 15, 17, 2586, 2577, 2576, 2571, 147, 2570, 2567, 2566, - 2565, 26, 46, 23, 3, 121, 90, 2562, 2560, 151, - 2554, 2545, 1, 0, 138, 2541, 211, + 0, 3168, 3165, 33, 5, 39, 38, 3160, 3157, 3155, + 177, 3154, 3153, 3152, 3136, 3135, 3134, 2616, 2592, 2584, + 3133, 3132, 3128, 3127, 3126, 3123, 3122, 3121, 3119, 34, + 95, 27, 93, 200, 197, 3117, 176, 167, 188, 3116, + 3112, 3106, 111, 184, 80, 84, 186, 3103, 3100, 69, + 3098, 3097, 3094, 210, 209, 207, 1013, 3093, 199, 112, + 50, 3092, 3089, 3088, 3087, 3083, 3079, 3075, 3073, 3072, + 3071, 3068, 3066, 3065, 3063, 3061, 3058, 3051, 3050, 331, + 3044, 3041, 18, 3039, 75, 3038, 3037, 3036, 3035, 3033, + 7, 3028, 3027, 25, 47, 3023, 3021, 48, 3018, 3006, + 3003, 3001, 3000, 68, 2999, 23, 2996, 43, 2995, 2983, + 123, 2982, 2981, 2980, 44, 2979, 2972, 2965, 11, 162, + 2962, 2961, 139, 2953, 2952, 2951, 168, 192, 2949, 2940, + 206, 108, 103, 2939, 2935, 97, 187, 2923, 120, 2918, + 2916, 2915, 151, 2914, 3188, 2913, 2912, 62, 66, 194, + 2911, 2909, 163, 64, 53, 16, 17, 2906, 2905, 63, + 72, 2902, 117, 2901, 2895, 98, 71, 2892, 100, 94, + 2891, 2887, 22, 6, 2886, 1, 4, 2, 82, 2885, + 2884, 105, 2883, 2881, 2879, 91, 2878, 2876, 6187, 2874, + 83, 129, 99, 74, 2872, 172, 160, 2863, 2862, 2856, + 2855, 2852, 51, 2849, 2848, 2847, 138, 251, 164, 2846, + 144, 337, 54, 145, 2845, 195, 77, 198, 165, 2843, + 2837, 137, 133, 2828, 2826, 57, 166, 189, 2823, 92, + 127, 119, 171, 85, 130, 2822, 2821, 58, 87, 2820, + 2818, 2815, 2811, 175, 2808, 2801, 61, 2800, 56, 2795, + 169, 2791, 136, 70, 2789, 170, 157, 2787, 140, 2786, + 2783, 65, 106, 109, 40, 2781, 156, 161, 124, 155, + 2780, 2779, 55, 2775, 2773, 2766, 196, 273, 2764, 2763, + 325, 178, 142, 148, 89, 2761, 291, 2759, 2758, 15, + 4275, 6463, 2757, 31, 158, 2756, 2755, 6699, 42, 45, + 20, 2754, 205, 2752, 2751, 2749, 2748, 193, 204, 102, + 159, 60, 2746, 2741, 2738, 41, 2737, 2735, 2734, 2733, + 2729, 2726, 73, 36, 35, 32, 208, 59, 19, 96, + 174, 152, 67, 2724, 2721, 2720, 121, 81, 2719, 154, + 153, 126, 191, 2716, 180, 143, 114, 2715, 131, 29, + 2693, 2691, 2690, 2680, 90, 2677, 2676, 2675, 2665, 149, + 147, 118, 78, 2663, 79, 116, 150, 146, 52, 2662, + 46, 2661, 2659, 26, 185, 28, 2658, 12, 101, 110, + 2657, 6142, 182, 2650, 8, 282, 190, 2643, 2642, 9, + 10, 13, 2637, 2636, 2635, 2634, 132, 2625, 2622, 2620, + 2615, 24, 49, 21, 14, 104, 76, 2614, 2609, 141, + 2600, 2599, 2588, 0, 1003, 125, 2583, 201, } -//line sql.y:7994 +//line sql.y:8564 type yySymType struct { union any empty struct{} @@ -6978,8 +7930,8 @@ func (st *yySymType) integerUnion() int { return v } -func (st *yySymType) intervalTypeUnion() IntervalTypes { - v, _ := st.union.(IntervalTypes) +func (st *yySymType) intervalTypeUnion() IntervalType { + v, _ := st.union.(IntervalType) return v } @@ -7013,6 +7965,11 @@ func (st *yySymType) jtOnResponseUnion() *JtOnResponse { return v } +func (st *yySymType) killTypeUnion() KillType { + v, _ := st.union.(KillType) + return v +} + func (st *yySymType) lagLeadExprTypeUnion() LagLeadExprType { v, _ := st.union.(LagLeadExprType) return v @@ -7369,120 +8326,131 @@ func (st *yySymType) withUnion() *With { } var yyR1 = [...]int{ - 0, 410, 411, 411, 7, 7, 7, 7, 7, 7, + 0, 411, 412, 412, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 258, - 381, 382, 382, 256, 256, 33, 72, 35, 35, 34, - 34, 37, 37, 36, 8, 8, 8, 9, 9, 9, - 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, - 11, 11, 11, 11, 13, 13, 13, 13, 13, 20, - 21, 12, 12, 22, 22, 106, 106, 23, 24, 24, - 24, 24, 414, 414, 183, 183, 181, 181, 182, 182, - 261, 261, 25, 265, 265, 267, 267, 267, 267, 257, - 257, 257, 26, 26, 266, 266, 268, 268, 268, 271, - 271, 271, 271, 310, 310, 310, 27, 27, 27, 27, - 27, 126, 126, 384, 384, 383, 377, 377, 376, 376, - 375, 380, 380, 379, 379, 378, 39, 40, 49, 49, - 49, 49, 50, 51, 385, 385, 350, 56, 56, 55, - 55, 55, 55, 55, 55, 57, 57, 53, 53, 52, - 52, 54, 54, 352, 352, 338, 338, 351, 351, 351, - 351, 351, 351, 351, 337, 337, 137, 137, 235, 235, + 7, 258, 381, 382, 382, 256, 256, 28, 74, 36, + 36, 35, 35, 38, 38, 37, 31, 31, 31, 32, + 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, + 33, 33, 29, 29, 29, 29, 30, 30, 30, 30, + 30, 15, 16, 34, 34, 17, 17, 108, 108, 18, + 19, 19, 19, 19, 415, 415, 183, 183, 181, 181, + 182, 182, 261, 261, 20, 265, 265, 267, 267, 267, + 267, 257, 257, 257, 21, 21, 266, 266, 268, 268, + 268, 271, 271, 271, 271, 310, 310, 310, 22, 22, + 22, 22, 22, 128, 128, 384, 384, 383, 377, 377, + 376, 376, 375, 380, 380, 379, 379, 378, 40, 41, + 50, 50, 50, 50, 51, 52, 385, 385, 350, 57, + 57, 56, 56, 56, 56, 56, 56, 58, 58, 54, + 54, 53, 53, 55, 55, 352, 352, 338, 338, 351, + 351, 351, 351, 351, 351, 351, 337, 337, 139, 139, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, - 235, 235, 235, 235, 235, 400, 400, 400, 399, 399, - 236, 236, 236, 236, 236, 236, 236, 236, 147, 147, - 159, 159, 159, 159, 159, 145, 145, 146, 144, 144, - 144, 153, 153, 153, 153, 153, 153, 153, 153, 153, - 153, 153, 153, 153, 153, 153, 153, 153, 404, 404, + 235, 235, 235, 235, 235, 235, 235, 400, 400, 400, + 399, 399, 236, 236, 236, 236, 236, 236, 236, 236, + 148, 148, 159, 159, 159, 159, 159, 159, 146, 146, + 147, 145, 145, 145, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 158, 158, 154, 154, 154, 155, 155, 155, 156, 156, - 401, 401, 401, 401, 315, 315, 315, 315, 318, 318, - 316, 316, 316, 316, 316, 316, 316, 316, 316, 317, - 317, 317, 317, 317, 317, 317, 319, 319, 319, 319, - 319, 320, 320, 320, 320, 320, 320, 320, 320, 320, - 320, 320, 320, 320, 320, 320, 320, 321, 321, 321, - 321, 321, 321, 321, 321, 336, 336, 322, 322, 330, - 330, 331, 331, 332, 332, 332, 333, 333, 333, 334, - 334, 327, 327, 327, 327, 327, 327, 327, 327, 327, - 329, 329, 328, 328, 328, 339, 364, 364, 363, 363, - 361, 361, 361, 361, 361, 361, 361, 361, 348, 348, - 358, 358, 358, 358, 358, 347, 347, 343, 343, 343, - 344, 344, 345, 345, 342, 342, 346, 346, 360, 360, - 359, 359, 340, 340, 341, 341, 366, 402, 402, 402, - 402, 402, 403, 403, 367, 392, 394, 394, 394, 393, - 393, 390, 391, 389, 389, 389, 389, 389, 82, 82, - 82, 284, 284, 285, 285, 356, 356, 355, 355, 355, - 357, 357, 354, 354, 354, 354, 354, 354, 354, 354, + 404, 404, 404, 158, 158, 154, 154, 154, 155, 155, + 155, 156, 156, 401, 401, 401, 401, 315, 315, 315, + 315, 318, 318, 316, 316, 316, 316, 316, 316, 316, + 316, 316, 317, 317, 317, 317, 317, 317, 317, 319, + 319, 319, 319, 319, 320, 320, 320, 320, 320, 320, + 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, + 321, 321, 321, 321, 321, 321, 321, 321, 336, 336, + 322, 322, 330, 330, 331, 331, 332, 332, 332, 333, + 333, 333, 334, 334, 327, 327, 327, 327, 327, 327, + 327, 327, 327, 329, 329, 328, 328, 328, 339, 364, + 364, 363, 363, 361, 361, 361, 361, 361, 361, 361, + 361, 348, 348, 358, 358, 358, 358, 358, 347, 347, + 343, 343, 343, 344, 344, 345, 345, 342, 342, 346, + 346, 360, 360, 359, 359, 340, 340, 341, 341, 366, + 402, 402, 402, 402, 402, 403, 403, 367, 392, 394, + 394, 394, 393, 393, 390, 391, 389, 389, 389, 389, + 389, 84, 84, 84, 284, 284, 285, 285, 356, 356, + 355, 355, 355, 357, 357, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, - 354, 354, 354, 279, 279, 279, 388, 388, 388, 388, - 388, 388, 387, 387, 387, 353, 353, 353, 386, 386, - 58, 58, 216, 216, 405, 405, 406, 406, 406, 46, - 46, 46, 46, 46, 46, 45, 45, 45, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 47, - 47, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 28, 28, 28, 28, 28, 28, 28, 28, 28, 108, - 108, 109, 109, 109, 109, 111, 111, 111, 369, 369, - 59, 59, 3, 3, 171, 173, 174, 174, 172, 172, - 172, 172, 172, 172, 61, 61, 60, 60, 176, 175, - 177, 177, 177, 1, 1, 2, 2, 4, 4, 374, - 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, + 354, 354, 354, 354, 354, 354, 279, 279, 279, 388, + 388, 388, 388, 388, 388, 387, 387, 387, 353, 353, + 353, 353, 386, 386, 59, 59, 216, 216, 405, 405, + 406, 406, 406, 47, 47, 47, 47, 47, 47, 46, + 46, 46, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 48, 48, 43, 43, 43, 43, 43, + 43, 43, 43, 43, 43, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 23, 23, 23, 110, 110, 111, 111, 111, 111, 113, + 113, 113, 369, 369, 60, 60, 3, 3, 171, 173, + 174, 174, 172, 172, 172, 172, 172, 172, 62, 62, + 61, 61, 176, 175, 177, 177, 177, 1, 1, 2, + 2, 4, 4, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, - 374, 335, 335, 335, 368, 368, 370, 110, 110, 110, - 110, 110, 110, 110, 110, 110, 110, 114, 113, 113, - 112, 115, 115, 115, 115, 115, 115, 115, 115, 372, - 372, 372, 62, 62, 373, 323, 324, 325, 5, 6, - 349, 371, 122, 122, 29, 38, 38, 30, 30, 30, - 30, 31, 31, 63, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 278, 278, 287, 287, - 277, 277, 302, 302, 302, 280, 280, 280, 281, 281, - 398, 398, 398, 274, 274, 65, 65, 65, 303, 303, - 303, 303, 67, 67, 407, 407, 408, 408, 409, 409, - 409, 68, 69, 69, 305, 305, 306, 306, 70, 71, - 83, 83, 83, 83, 83, 83, 83, 84, 84, 84, - 84, 107, 107, 107, 15, 15, 15, 15, 79, 79, - 79, 14, 14, 17, 66, 66, 73, 395, 395, 396, - 397, 397, 397, 397, 74, 76, 32, 32, 32, 32, - 32, 32, 132, 132, 120, 120, 120, 120, 120, 120, - 120, 120, 120, 120, 120, 120, 127, 127, 127, 121, - 121, 415, 77, 78, 78, 125, 125, 125, 118, 118, - 118, 124, 124, 124, 16, 16, 18, 260, 260, 19, - 19, 129, 129, 131, 131, 131, 131, 131, 133, 133, - 133, 133, 133, 133, 133, 128, 128, 130, 130, 130, - 130, 295, 295, 295, 294, 294, 165, 165, 167, 166, - 166, 168, 168, 169, 169, 169, 169, 214, 214, 191, - 191, 253, 253, 254, 254, 252, 252, 259, 259, 255, - 255, 255, 255, 262, 262, 170, 170, 170, 170, 178, - 178, 179, 179, 180, 180, 304, 304, 300, 300, 300, - 299, 299, 184, 184, 184, 186, 185, 185, 185, 185, - 187, 187, 189, 189, 188, 188, 190, 195, 195, 194, - 194, 192, 192, 192, 192, 193, 193, 193, 193, 196, - 196, 142, 142, 142, 142, 142, 142, 142, 157, 157, - 157, 157, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 243, 243, 148, 148, 148, 148, 148, - 148, 148, 148, 148, 148, 148, 148, 148, 152, 152, + 374, 374, 374, 374, 374, 335, 335, 335, 368, 368, + 370, 112, 112, 112, 112, 112, 112, 112, 112, 112, + 112, 116, 115, 115, 114, 117, 117, 117, 117, 117, + 117, 117, 117, 372, 372, 372, 63, 63, 373, 323, + 324, 325, 5, 6, 349, 371, 124, 124, 24, 39, + 39, 25, 25, 25, 25, 26, 26, 64, 67, 67, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 278, 278, 287, 287, 277, 277, 302, 302, + 302, 280, 280, 280, 281, 281, 398, 398, 398, 274, + 274, 66, 66, 66, 303, 303, 303, 303, 69, 69, + 407, 407, 408, 408, 409, 409, 409, 70, 71, 71, + 305, 305, 306, 306, 72, 73, 85, 85, 85, 85, + 85, 85, 85, 86, 86, 86, 86, 109, 109, 109, + 10, 10, 10, 10, 81, 81, 81, 9, 9, 11, + 68, 68, 75, 395, 395, 396, 397, 397, 397, 397, + 76, 78, 27, 27, 27, 27, 27, 27, 134, 134, + 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, + 122, 122, 129, 129, 129, 123, 123, 416, 79, 80, + 80, 127, 127, 127, 120, 120, 120, 126, 126, 126, + 12, 12, 13, 260, 260, 14, 14, 131, 131, 133, + 133, 133, 133, 133, 135, 135, 135, 135, 135, 135, + 135, 130, 130, 132, 132, 132, 132, 295, 295, 295, + 294, 294, 165, 165, 167, 166, 166, 168, 168, 169, + 169, 169, 169, 214, 214, 191, 191, 253, 253, 254, + 254, 252, 252, 259, 259, 255, 255, 255, 255, 262, + 262, 170, 170, 170, 170, 178, 178, 179, 179, 180, + 180, 304, 304, 300, 300, 300, 299, 299, 184, 184, + 184, 186, 185, 185, 185, 185, 187, 187, 189, 189, + 188, 188, 190, 195, 195, 194, 194, 192, 192, 192, + 192, 193, 193, 193, 193, 196, 196, 144, 144, 144, + 144, 144, 144, 144, 144, 157, 157, 157, 157, 160, + 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, + 243, 243, 149, 149, 149, 149, 149, 149, 149, 149, + 149, 149, 149, 149, 149, 149, 149, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, - 152, 152, 152, 151, 219, 219, 218, 218, 85, 85, - 85, 86, 86, 87, 87, 87, 87, 87, 88, 88, - 88, 88, 88, 143, 143, 90, 90, 89, 89, 209, - 209, 292, 292, 91, 92, 92, 95, 95, 94, 93, - 93, 99, 99, 96, 96, 98, 98, 97, 100, 100, - 101, 102, 102, 275, 275, 197, 197, 205, 205, 205, - 205, 198, 198, 198, 198, 198, 198, 198, 206, 206, - 206, 213, 207, 207, 203, 203, 201, 201, 201, 201, - 201, 201, 201, 201, 201, 201, 202, 202, 202, 202, + 152, 152, 219, 219, 218, 218, 87, 87, 87, 88, + 88, 89, 89, 89, 89, 89, 90, 90, 90, 90, + 90, 90, 90, 92, 92, 91, 91, 209, 209, 292, + 292, 93, 94, 94, 97, 97, 96, 95, 95, 101, + 101, 98, 98, 100, 100, 99, 102, 102, 103, 104, + 104, 275, 275, 197, 197, 205, 205, 205, 205, 198, + 198, 198, 198, 198, 198, 198, 206, 206, 206, 213, + 207, 207, 203, 203, 201, 201, 201, 201, 201, 201, + 201, 201, 201, 201, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, + 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, @@ -7492,32 +8460,34 @@ var yyR1 = [...]int{ 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 162, 162, 162, 162, 224, - 224, 149, 149, 149, 149, 149, 149, 149, 149, 149, - 149, 149, 149, 149, 149, 149, 150, 150, 163, 163, + 224, 150, 150, 150, 150, 150, 150, 150, 150, 150, + 150, 150, 150, 150, 150, 150, 151, 151, 163, 163, 163, 163, 164, 164, 164, 164, 164, 164, 164, 312, - 312, 117, 117, 117, 117, 117, 117, 117, 117, 117, - 117, 117, 117, 116, 116, 116, 116, 116, 116, 116, - 116, 116, 416, 416, 326, 326, 326, 326, 204, 204, - 204, 204, 204, 123, 123, 123, 123, 123, 309, 309, - 309, 313, 313, 313, 311, 311, 311, 311, 311, 311, - 311, 311, 311, 311, 311, 311, 311, 311, 311, 314, - 314, 222, 222, 119, 119, 220, 220, 221, 223, 223, - 215, 215, 215, 215, 217, 217, 200, 200, 200, 225, - 225, 226, 226, 103, 104, 104, 105, 105, 227, 227, - 229, 228, 228, 230, 231, 231, 231, 232, 232, 233, - 233, 233, 48, 48, 48, 48, 48, 43, 43, 43, - 43, 44, 44, 44, 44, 134, 134, 134, 134, 136, - 136, 135, 135, 80, 80, 81, 81, 81, 140, 140, - 141, 141, 141, 138, 138, 139, 139, 250, 250, 234, - 234, 234, 241, 241, 241, 237, 237, 239, 239, 239, - 240, 240, 240, 238, 247, 247, 249, 249, 248, 248, - 244, 244, 245, 245, 246, 246, 246, 242, 242, 199, - 199, 199, 199, 199, 251, 251, 251, 251, 263, 263, - 210, 210, 212, 212, 211, 211, 161, 264, 264, 272, - 269, 269, 270, 270, 296, 296, 296, 273, 273, 286, - 286, 282, 282, 283, 283, 276, 276, 288, 288, 288, - 75, 208, 208, 365, 365, 362, 291, 291, 293, 293, - 297, 297, 301, 301, 298, 298, 289, 289, 289, 289, + 312, 118, 118, 118, 118, 118, 118, 118, 118, 118, + 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, + 118, 119, 119, 119, 119, 119, 119, 119, 119, 119, + 119, 119, 119, 119, 119, 119, 119, 119, 119, 417, + 417, 326, 326, 326, 204, 204, 204, 204, 204, 125, + 125, 125, 125, 125, 309, 309, 309, 313, 313, 313, + 311, 311, 311, 311, 311, 311, 311, 311, 311, 311, + 311, 311, 311, 311, 311, 314, 314, 222, 222, 121, + 121, 220, 220, 221, 223, 223, 215, 215, 215, 215, + 217, 217, 200, 200, 200, 225, 225, 226, 226, 105, + 106, 106, 107, 107, 227, 227, 229, 228, 228, 230, + 231, 231, 231, 232, 232, 233, 233, 233, 49, 49, + 49, 49, 49, 44, 44, 44, 44, 45, 45, 45, + 45, 136, 136, 136, 136, 138, 138, 137, 137, 82, + 82, 83, 83, 83, 142, 142, 143, 143, 143, 140, + 140, 141, 141, 250, 250, 234, 234, 234, 241, 241, + 241, 237, 237, 239, 239, 239, 240, 240, 240, 238, + 247, 247, 249, 249, 248, 248, 244, 244, 245, 245, + 246, 246, 246, 242, 242, 199, 199, 199, 199, 199, + 251, 251, 251, 251, 263, 263, 210, 210, 212, 212, + 211, 211, 161, 264, 264, 272, 269, 269, 270, 270, + 296, 296, 296, 273, 273, 286, 286, 282, 282, 283, + 283, 276, 276, 288, 288, 288, 77, 208, 208, 365, + 365, 362, 291, 291, 293, 293, 297, 297, 301, 301, + 298, 298, 8, 410, 410, 410, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, @@ -7532,7 +8502,9 @@ var yyR1 = [...]int{ 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 290, 290, 290, 290, 290, + 289, 289, 289, 289, 289, 289, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, @@ -7572,160 +8544,182 @@ var yyR1 = [...]int{ 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 412, 413, 307, 308, 308, - 308, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, + 413, 414, 307, 308, 308, 308, } var yyR2 = [...]int{ 0, 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, - 1, 0, 1, 1, 1, 2, 3, 2, 3, 0, - 1, 3, 1, 4, 3, 3, 4, 3, 2, 3, - 4, 3, 4, 2, 7, 1, 3, 3, 3, 3, - 1, 2, 1, 1, 3, 2, 3, 3, 2, 5, - 7, 10, 9, 7, 8, 1, 1, 10, 11, 9, - 8, 8, 1, 1, 1, 3, 1, 3, 1, 3, - 0, 4, 3, 1, 3, 3, 3, 3, 3, 1, - 1, 2, 5, 4, 1, 3, 3, 2, 2, 2, - 2, 2, 1, 1, 1, 1, 2, 2, 6, 12, - 2, 0, 2, 0, 2, 1, 0, 2, 1, 3, - 3, 0, 1, 1, 3, 3, 6, 4, 7, 8, - 8, 8, 6, 3, 1, 1, 5, 0, 1, 1, - 1, 1, 2, 2, 2, 0, 1, 4, 4, 4, - 4, 4, 4, 2, 4, 1, 3, 1, 1, 3, - 4, 3, 3, 3, 5, 10, 0, 2, 0, 2, - 3, 5, 3, 4, 2, 3, 2, 3, 3, 3, - 3, 2, 2, 4, 4, 1, 1, 1, 1, 1, - 0, 2, 2, 3, 3, 2, 2, 2, 1, 1, - 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 1, 0, 1, 1, 1, 2, 3, 2, + 3, 0, 1, 3, 1, 4, 3, 3, 4, 3, + 2, 3, 4, 3, 4, 2, 7, 1, 3, 3, + 3, 3, 1, 2, 1, 1, 3, 2, 3, 3, + 2, 5, 7, 10, 9, 7, 8, 1, 1, 10, + 11, 9, 8, 8, 1, 1, 1, 3, 1, 3, + 1, 3, 0, 4, 3, 1, 3, 3, 3, 3, + 3, 1, 1, 2, 5, 4, 1, 3, 3, 2, + 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, + 6, 12, 2, 0, 2, 0, 2, 1, 0, 2, + 1, 3, 3, 0, 1, 1, 3, 3, 6, 4, + 7, 8, 8, 8, 6, 3, 1, 1, 5, 0, + 1, 1, 1, 1, 2, 2, 2, 0, 1, 4, + 4, 4, 4, 4, 4, 2, 4, 1, 3, 1, + 1, 3, 4, 3, 3, 3, 5, 10, 0, 2, + 0, 2, 3, 5, 3, 4, 2, 3, 2, 3, + 3, 3, 3, 2, 2, 4, 4, 1, 1, 1, + 1, 1, 0, 2, 2, 3, 3, 2, 2, 2, + 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, - 2, 1, 2, 1, 3, 1, 1, 1, 2, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, - 2, 3, 3, 3, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 5, 5, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 3, 0, 3, 0, - 5, 1, 3, 0, 3, 5, 0, 1, 1, 0, - 1, 0, 3, 3, 2, 2, 2, 1, 2, 2, - 0, 1, 0, 2, 2, 5, 0, 1, 1, 2, - 1, 3, 2, 1, 1, 3, 3, 3, 0, 1, - 4, 3, 3, 4, 2, 0, 2, 1, 1, 1, - 1, 1, 0, 1, 1, 1, 0, 1, 1, 3, - 3, 4, 3, 1, 3, 1, 7, 6, 7, 7, - 8, 8, 0, 1, 5, 2, 1, 1, 1, 0, - 1, 3, 3, 1, 1, 2, 2, 2, 0, 1, - 1, 1, 2, 0, 1, 0, 1, 1, 3, 2, - 1, 2, 3, 3, 3, 4, 4, 3, 3, 3, - 3, 4, 4, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 4, 5, 0, 2, 2, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, - 0, 1, 0, 2, 0, 2, 0, 2, 2, 0, - 1, 5, 1, 3, 7, 1, 3, 3, 1, 2, - 2, 2, 5, 5, 5, 6, 8, 5, 5, 4, - 4, 4, 6, 5, 5, 5, 2, 2, 2, 2, - 3, 3, 3, 4, 3, 3, 1, 3, 5, 1, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, - 2, 3, 4, 4, 2, 11, 3, 6, 8, 6, - 6, 6, 13, 8, 6, 10, 5, 5, 5, 7, - 5, 5, 5, 5, 5, 7, 7, 5, 5, 0, - 6, 5, 6, 4, 5, 0, 8, 9, 0, 3, - 0, 1, 0, 3, 8, 4, 1, 3, 3, 6, - 7, 7, 8, 4, 0, 1, 0, 1, 3, 3, - 1, 1, 2, 1, 1, 0, 2, 0, 2, 5, - 3, 7, 4, 4, 4, 4, 3, 3, 3, 7, + 2, 1, 1, 2, 1, 2, 1, 3, 1, 1, + 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, + 2, 2, 2, 2, 3, 3, 3, 2, 2, 2, + 2, 2, 2, 1, 1, 1, 1, 1, 5, 5, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, + 0, 3, 0, 5, 1, 3, 0, 3, 5, 0, + 1, 1, 0, 1, 0, 3, 3, 2, 2, 2, + 1, 2, 2, 0, 1, 0, 2, 2, 5, 0, + 1, 1, 2, 1, 3, 2, 1, 1, 3, 3, + 3, 0, 1, 4, 3, 3, 4, 2, 0, 2, + 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, + 1, 1, 3, 3, 4, 3, 1, 3, 1, 7, + 6, 7, 7, 8, 8, 0, 1, 5, 2, 1, + 1, 1, 0, 1, 3, 3, 1, 1, 2, 2, + 2, 0, 1, 1, 1, 2, 0, 1, 0, 1, + 1, 3, 2, 1, 2, 3, 3, 3, 4, 4, + 3, 3, 3, 3, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 0, 2, 2, 1, 3, 2, 0, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 3, 1, 3, - 3, 0, 2, 2, 2, 2, 2, 2, 2, 4, - 4, 3, 0, 1, 4, 3, 4, 4, 3, 3, - 3, 2, 1, 3, 3, 3, 5, 7, 7, 6, - 5, 3, 2, 3, 3, 3, 7, 3, 3, 3, - 3, 4, 7, 5, 2, 4, 4, 4, 4, 4, - 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 2, 2, 4, 4, 4, 4, 4, 2, 3, 3, - 3, 3, 5, 2, 3, 3, 2, 3, 4, 4, - 4, 3, 4, 4, 5, 3, 0, 1, 0, 1, - 1, 1, 0, 2, 2, 0, 2, 2, 0, 2, - 0, 1, 1, 1, 1, 2, 1, 3, 1, 1, - 1, 1, 1, 3, 0, 1, 1, 3, 3, 2, - 2, 1, 1, 5, 0, 1, 0, 1, 2, 3, - 0, 3, 3, 3, 3, 3, 1, 0, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, - 1, 4, 4, 4, 2, 2, 3, 1, 3, 2, - 1, 2, 1, 2, 2, 4, 3, 3, 6, 4, - 7, 6, 1, 3, 2, 2, 2, 2, 1, 1, - 1, 3, 2, 1, 1, 1, 0, 1, 1, 0, - 3, 0, 2, 0, 2, 1, 2, 2, 0, 1, - 1, 0, 1, 1, 5, 5, 4, 0, 2, 4, - 4, 0, 1, 0, 1, 2, 3, 4, 1, 1, - 1, 1, 1, 1, 1, 1, 3, 1, 2, 3, - 5, 0, 1, 2, 1, 1, 0, 1, 2, 1, - 3, 1, 1, 1, 4, 3, 1, 1, 2, 3, - 7, 0, 3, 0, 1, 1, 3, 1, 3, 1, - 1, 3, 3, 1, 3, 4, 4, 4, 3, 2, - 4, 0, 1, 0, 2, 0, 1, 0, 1, 2, - 1, 1, 1, 2, 2, 1, 2, 3, 2, 3, - 2, 2, 2, 1, 1, 3, 3, 0, 1, 1, - 2, 6, 5, 6, 6, 0, 2, 3, 3, 0, - 2, 3, 3, 3, 2, 3, 1, 6, 3, 4, - 3, 1, 3, 4, 5, 6, 3, 4, 5, 6, - 3, 4, 1, 1, 1, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, - 1, 1, 3, 1, 1, 1, 2, 2, 2, 2, - 1, 1, 2, 7, 7, 6, 6, 2, 2, 1, - 6, 3, 3, 3, 1, 3, 1, 3, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 2, 2, 2, 1, 1, 0, 1, 2, 5, 0, - 3, 0, 1, 4, 4, 2, 0, 1, 1, 2, - 2, 1, 1, 2, 2, 0, 1, 1, 1, 1, - 5, 1, 3, 0, 3, 1, 1, 1, 2, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 3, 4, 6, 4, 4, 8, 6, - 8, 6, 5, 4, 10, 2, 2, 1, 2, 2, + 3, 3, 3, 3, 4, 5, 0, 2, 2, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, + 1, 1, 0, 1, 0, 1, 0, 2, 0, 2, + 0, 2, 2, 0, 1, 5, 1, 3, 7, 1, + 3, 3, 1, 2, 2, 2, 5, 5, 5, 6, + 8, 5, 5, 4, 4, 4, 6, 5, 5, 5, + 2, 2, 2, 2, 3, 3, 3, 4, 3, 3, + 1, 3, 5, 1, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 2, 2, 3, 4, 4, 2, 11, + 3, 6, 8, 6, 6, 6, 13, 8, 6, 10, + 5, 5, 5, 7, 5, 5, 5, 5, 5, 7, + 7, 5, 5, 0, 6, 5, 6, 4, 5, 0, + 8, 9, 0, 3, 0, 1, 0, 3, 8, 4, + 1, 3, 3, 6, 7, 7, 8, 4, 0, 1, + 0, 1, 3, 3, 1, 1, 2, 1, 1, 0, + 2, 0, 2, 5, 3, 7, 4, 4, 4, 4, + 3, 3, 3, 7, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 2, 0, 2, 2, 1, 3, + 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 3, 1, 3, 3, 0, 2, 2, 2, 2, + 2, 2, 2, 4, 4, 3, 0, 1, 4, 3, + 4, 4, 3, 3, 3, 2, 1, 3, 3, 3, + 5, 7, 7, 6, 5, 3, 2, 3, 5, 5, + 3, 3, 7, 3, 3, 3, 3, 4, 7, 5, + 2, 4, 4, 4, 4, 4, 5, 5, 4, 4, + 4, 4, 4, 4, 4, 4, 2, 2, 4, 4, + 4, 4, 4, 2, 3, 3, 3, 3, 5, 2, + 3, 3, 2, 3, 4, 4, 4, 3, 4, 4, + 5, 3, 0, 1, 0, 1, 1, 1, 0, 2, + 2, 0, 2, 2, 0, 2, 0, 1, 1, 1, + 1, 2, 1, 3, 1, 1, 1, 1, 1, 3, + 0, 1, 1, 3, 3, 2, 2, 1, 1, 5, + 0, 1, 0, 1, 2, 3, 0, 3, 3, 3, + 3, 3, 1, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 1, 1, 4, 4, 4, + 2, 2, 3, 1, 3, 2, 1, 2, 1, 2, + 2, 4, 3, 3, 6, 4, 7, 6, 1, 3, + 2, 2, 2, 2, 1, 1, 1, 3, 2, 1, + 1, 1, 0, 1, 1, 0, 3, 0, 2, 0, + 2, 1, 2, 2, 0, 1, 1, 0, 1, 1, + 5, 5, 4, 0, 2, 4, 4, 0, 1, 0, + 1, 2, 3, 4, 1, 1, 1, 1, 1, 1, + 1, 1, 3, 1, 2, 3, 5, 0, 1, 2, + 1, 1, 0, 1, 2, 1, 3, 1, 1, 1, + 4, 3, 1, 1, 2, 3, 7, 0, 3, 0, + 1, 1, 3, 1, 3, 1, 1, 3, 3, 1, + 3, 4, 4, 4, 3, 2, 4, 0, 1, 0, + 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, + 2, 1, 2, 3, 2, 3, 2, 2, 2, 1, + 1, 3, 3, 0, 1, 1, 2, 6, 5, 6, + 6, 0, 2, 3, 3, 0, 2, 3, 3, 3, + 2, 3, 1, 3, 6, 3, 4, 3, 1, 3, + 4, 5, 6, 3, 4, 5, 6, 3, 4, 1, + 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, + 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, + 1, 3, 1, 1, 1, 2, 2, 2, 2, 1, + 1, 2, 7, 7, 6, 6, 2, 2, 5, 6, + 3, 3, 1, 3, 1, 3, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, + 4, 2, 4, 0, 1, 2, 5, 0, 3, 0, + 1, 4, 4, 2, 0, 1, 1, 2, 2, 1, + 1, 2, 2, 0, 1, 1, 1, 1, 5, 1, + 3, 0, 3, 1, 1, 1, 2, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 3, 4, 6, 4, 4, 8, 6, 8, 6, + 5, 4, 10, 2, 2, 1, 2, 2, 2, 2, 2, 4, 5, 5, 5, 5, 5, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, - 6, 5, 4, 4, 4, 4, 4, 7, 4, 4, - 6, 6, 6, 8, 6, 6, 4, 4, 3, 4, - 6, 6, 4, 4, 4, 6, 8, 6, 4, 6, - 6, 8, 10, 7, 8, 8, 9, 4, 4, 4, - 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 4, 4, 4, 6, 4, 6, 5, 9, 6, - 9, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 4, 4, 4, 4, 4, 4, 4, 8, 4, 8, + 8, 6, 5, 4, 4, 4, 4, 4, 7, 4, + 4, 6, 6, 6, 8, 6, 6, 4, 4, 3, + 4, 6, 6, 4, 4, 6, 4, 6, 4, 4, + 4, 4, 4, 4, 6, 4, 6, 4, 4, 4, + 6, 4, 6, 4, 4, 6, 4, 6, 4, 6, + 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, + 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, + 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, + 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, + 4, 6, 8, 4, 6, 8, 4, 4, 4, 6, + 4, 6, 4, 8, 6, 4, 4, 6, 4, 6, + 8, 4, 6, 8, 4, 4, 6, 8, 6, 4, + 6, 6, 8, 10, 7, 8, 8, 9, 4, 4, + 4, 4, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 4, 4, 4, 4, 4, 4, 6, 4, + 6, 5, 9, 6, 9, 8, 6, 8, 8, 8, + 6, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 6, 8, 10, 12, 14, 6, 8, 8, 10, 12, 14, 6, 8, 10, 12, 6, 8, 4, 4, 3, 4, 6, 6, 4, 6, 4, 6, 8, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 2, 0, 2, 3, 3, 4, 4, - 4, 4, 4, 0, 3, 4, 7, 3, 1, 1, - 1, 0, 5, 5, 2, 3, 1, 2, 2, 1, - 2, 1, 2, 2, 1, 2, 2, 1, 1, 0, - 1, 0, 1, 0, 2, 1, 2, 4, 0, 2, - 1, 1, 3, 5, 1, 1, 1, 2, 2, 0, - 3, 0, 2, 2, 1, 3, 0, 1, 0, 1, - 3, 1, 3, 2, 0, 1, 1, 0, 1, 2, - 4, 4, 0, 2, 2, 1, 1, 3, 3, 3, - 3, 3, 3, 3, 3, 0, 3, 3, 3, 0, - 3, 1, 1, 0, 4, 0, 1, 1, 0, 3, - 1, 3, 2, 1, 1, 0, 1, 2, 4, 9, - 3, 5, 0, 3, 3, 0, 1, 0, 2, 2, - 0, 2, 2, 2, 0, 2, 1, 2, 3, 3, - 0, 2, 1, 2, 3, 4, 3, 0, 1, 2, - 1, 5, 4, 4, 1, 3, 3, 5, 0, 5, - 1, 3, 1, 2, 3, 4, 1, 1, 3, 3, - 1, 2, 1, 1, 1, 1, 1, 1, 1, 0, - 1, 0, 2, 0, 3, 0, 1, 0, 1, 1, - 5, 0, 1, 0, 1, 2, 1, 1, 1, 1, - 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 2, 0, 2, 3, 4, 4, 4, 4, 4, 0, + 3, 4, 7, 3, 1, 1, 1, 0, 5, 5, + 2, 3, 1, 2, 2, 1, 2, 1, 2, 2, + 1, 2, 2, 1, 1, 0, 1, 0, 1, 0, + 2, 1, 2, 4, 0, 2, 1, 1, 3, 5, + 1, 1, 1, 2, 2, 0, 3, 0, 2, 2, + 1, 3, 0, 1, 0, 1, 3, 1, 3, 2, + 0, 1, 1, 0, 1, 2, 4, 4, 0, 2, + 2, 1, 1, 3, 3, 3, 3, 3, 3, 3, + 3, 0, 3, 3, 3, 0, 3, 1, 1, 0, + 4, 0, 1, 1, 0, 3, 1, 3, 2, 1, + 1, 0, 1, 2, 4, 9, 3, 5, 0, 3, + 3, 0, 1, 0, 2, 2, 0, 2, 2, 2, + 0, 2, 1, 2, 3, 3, 0, 2, 1, 2, + 3, 4, 3, 0, 1, 2, 1, 5, 4, 4, + 1, 3, 3, 5, 0, 5, 1, 3, 1, 2, + 3, 4, 1, 1, 3, 3, 1, 2, 1, 1, + 1, 1, 1, 1, 1, 0, 1, 0, 2, 0, + 3, 0, 1, 0, 1, 1, 5, 0, 1, 0, + 1, 2, 1, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 3, 0, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -7780,746 +8774,857 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, - 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -410, -77, -415, -7, -11, -20, -21, -22, -23, - -24, -25, -26, -27, -28, -29, -30, -31, -63, -64, - -65, -67, -68, -69, -70, -71, -14, -17, -66, -32, - -33, -72, -73, -74, -75, -76, -16, -18, -19, -9, - -8, -13, 10, 11, -106, -34, 33, -39, -49, 225, - -50, -40, 226, -51, 228, 227, 265, 229, 258, 75, - 313, 314, 316, 317, 318, 319, -107, 617, 263, 264, - 231, 37, 46, 34, 35, 38, 235, 271, 272, 234, - -10, -35, 9, -412, 12, 449, 260, 259, 29, -12, - 511, 87, -78, -411, 665, -250, -234, 23, 34, 30, - -233, -229, -125, -234, 21, 19, 8, -77, -77, -77, - 13, 14, -77, -350, -352, 87, 159, 87, -77, -56, - -55, -53, -52, -54, -57, 32, -46, -47, -374, -45, - -42, 230, 227, 275, 123, 124, 265, 266, 267, 229, - 249, 264, 268, 263, 284, -41, 82, 34, 511, 514, - -357, 226, 232, 233, 228, 450, 126, 125, 76, -354, - 372, 544, 635, -57, 637, 101, 104, 636, 45, 239, - 638, 639, 640, 551, 641, 248, 642, 643, 644, 645, - 651, 592, 652, 653, 654, 127, 8, -77, -301, -297, - 91, -290, 508, 251, 542, 543, 300, 82, 42, 517, - 369, 372, 544, 479, 635, 313, 329, 323, 484, 485, - 486, 352, 344, 509, 545, 518, 303, 252, 288, 629, - 342, 135, 637, 307, 546, 266, 377, 378, 547, 379, - 101, 316, 416, 650, 306, 548, 648, 104, 636, 321, - 80, 478, 52, 632, 45, 261, 340, 234, 336, 638, - 289, 549, 520, 282, 126, 123, 657, 37, 332, 51, - 31, 647, 125, 50, 639, 150, 550, 640, 551, 381, - 359, 623, 49, 382, 267, 552, 85, 272, 513, 310, - 631, 383, 498, 333, 384, 299, 646, 231, 553, 612, - 604, 605, 385, 386, 624, 364, 360, 365, 500, 554, - 408, 483, 387, 608, 609, 664, 53, 555, 556, 625, - 124, 557, 79, 641, 81, 327, 328, 558, 297, 250, - 503, 504, 410, 356, 461, 468, 469, 111, 112, 464, - 113, 470, 114, 471, 472, 473, 462, 115, 108, 463, - 474, 475, 357, 358, 116, 476, 110, 109, 465, 467, - 117, 477, 248, 36, 388, 510, 301, 59, 305, 276, - 411, 47, 362, 661, 46, 619, 505, 559, 622, 355, - 351, 458, 54, 560, 561, 562, 563, 480, 642, 354, - 326, 350, 656, 4, 294, 481, 643, 63, 233, 367, - 366, 368, 283, 407, 347, 564, 565, 566, 255, 83, - 567, 337, 22, 568, 569, 389, 290, 570, 57, 571, - 572, 414, 264, 573, 55, 644, 40, 574, 269, 658, - 645, 575, 576, 577, 618, 578, 271, 579, 391, 580, - 606, 607, 390, 361, 363, 506, 278, 392, 236, 512, - 581, 311, 331, 268, 649, 582, 256, 494, 495, 496, - 497, 630, 502, 501, 270, 275, 263, 415, 257, 583, - 584, 585, 586, 587, 304, 603, 588, 589, 317, 651, - 459, 44, 590, 591, 592, 593, 594, 298, 293, 409, - 418, 62, 84, 374, 595, 596, 628, 325, 322, 291, - 597, 314, 56, 652, 653, 654, 285, 655, 487, 488, - 489, 490, 10, 662, 663, 482, 394, 127, 295, 296, - 48, 348, 277, 598, 308, 599, 338, 339, 353, 324, - 349, 615, 315, 613, 279, 395, 460, 265, 600, 417, - 292, 370, 375, 309, 516, 499, 284, 396, 627, 515, - 491, 492, 346, 343, 286, 493, 601, 617, 397, 240, - 280, 281, 602, 614, 398, 399, 302, 400, 401, 402, - 403, 404, 406, 312, 405, 616, 610, 611, 287, 514, - 320, 341, 376, 430, 431, 432, 433, 434, 435, 436, - 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, - 447, 457, 238, -77, 238, -188, -297, 238, -269, 378, - -287, 380, 393, 388, 398, 386, -278, 389, 391, 278, - -398, 408, 238, 395, 225, 381, 390, 399, 400, 302, - 406, 401, 312, 405, 287, 402, 403, 404, -381, 177, - 640, 655, 135, 345, 385, 383, 409, 619, 91, -303, - 91, 92, 93, -290, 315, -305, 320, -291, -381, -290, - 318, -77, -77, -307, -307, -127, 619, 621, -207, -142, - 143, -157, -160, -148, -152, -201, -202, -203, -204, -158, - -217, -256, 166, 167, 174, 144, -213, -161, 27, 507, - 451, 450, 177, 32, -151, 220, 69, 70, 453, 146, - 58, 12, 426, 427, -159, 421, 428, 423, 478, 480, - 481, 482, 479, 484, 485, 486, 487, 488, 489, 490, - 491, 492, 493, 483, 455, 456, 118, 457, 108, 110, - 109, 458, 459, 460, 342, 505, 506, 500, 503, 504, - 502, 501, 357, 358, 461, 462, 463, 111, 112, 113, - 114, 115, 116, 117, 464, 467, 465, 466, 468, 469, - 470, 475, 476, 471, 472, 473, 474, 477, 363, 362, - 361, -87, -99, 533, 532, -100, -149, -150, -163, -164, - -291, -297, 243, 420, 237, 172, 449, -153, -146, -215, - 107, 92, 93, -8, -211, 419, 424, 425, 429, 422, - 519, 521, 536, 537, 539, 524, 529, 528, 531, 494, - 495, 496, 497, 498, 499, 604, 605, 606, 607, 608, - 609, 610, 611, -381, -290, 91, -155, -154, -197, 94, - 99, 102, 103, 105, -404, 261, 338, 339, 119, -412, - 633, 90, 95, 96, 97, 98, 120, 121, 178, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, - 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, - 45, 394, 394, -188, -77, -77, -77, -77, -227, -125, - -229, -10, -8, -412, 9, -77, -8, -9, -13, -35, - -37, 538, -36, -297, 100, -234, -250, 13, 162, 43, - 51, -232, -233, -12, -8, -142, 20, 24, 25, -130, - 168, -142, -297, -130, -276, 242, -77, -77, -265, -310, - 315, -267, 409, 619, 408, -257, -270, 91, -256, -269, - 407, -351, 159, -337, -341, -291, 253, -367, 249, -188, - -360, -359, -291, -412, -126, -286, 239, 247, 246, 136, - -385, 139, 295, 420, 237, -52, -53, -54, -269, 176, - 639, -108, 270, 274, 88, 88, -341, -340, -339, -386, - 274, 253, -366, -358, 245, 254, -347, 246, 247, -342, - 239, 137, -386, -342, 244, 254, 249, 253, 274, 274, - 127, 274, 127, 274, 274, 274, 274, 274, 274, 274, - 274, 274, 269, -348, 151, -348, 515, 515, -354, -386, - 249, 239, -386, -386, 245, -288, -342, 241, 26, 241, - 36, 36, -348, -348, -348, -269, 176, -348, -348, -348, - -348, 282, 282, -348, -348, -348, -348, -348, -348, -348, + -1000, -411, -79, -416, -7, -29, -15, -16, -17, -18, + -19, -20, -21, -22, -23, -24, -25, -26, -64, -67, + -65, -66, -69, -70, -71, -72, -73, -9, -11, -68, + -27, -28, -74, -75, -76, -77, -78, -12, -13, -14, + -8, -32, -31, -30, 10, 11, -108, -35, 33, -40, + -50, 227, -51, -41, 228, -52, 230, 229, 267, 231, + 379, 260, 75, 315, 316, 318, 319, 320, 321, -109, + 684, 265, 266, 233, 37, 46, 34, 35, 38, 237, + 273, 274, 236, 133, -33, -36, 9, -413, 12, 469, + 262, 261, 29, -34, 578, 87, -80, -412, 732, -250, + -234, 23, 34, 30, -233, -229, -127, -234, 21, 19, + 8, -79, -79, -79, 13, 14, -79, -350, -352, 87, + 160, 87, -79, -57, -56, -54, -53, -55, -58, 32, + -47, -48, -374, -46, -43, 232, 229, 277, 123, 124, + 267, 268, 269, 231, 251, 266, 270, 265, 286, -42, + 82, 34, 578, 581, -357, 228, 234, 235, 230, 470, + 126, 125, 76, -354, 374, 611, 702, -58, 704, 101, + 104, 703, 45, 241, 705, 706, 707, 618, 708, 250, + 709, 710, 711, 712, 718, 659, 719, 720, 721, 127, + 8, -79, -301, -297, 91, -290, 575, 253, 609, 423, + 610, 302, 82, 42, 514, 584, 371, 374, 611, 499, + 702, 380, 315, 331, 325, 504, 505, 506, 354, 346, + 576, 612, 585, 305, 254, 290, 696, 344, 136, 704, + 309, 613, 268, 381, 382, 614, 383, 101, 318, 420, + 717, 308, 615, 715, 104, 703, 323, 80, 498, 52, + 699, 45, 263, 428, 429, 342, 236, 338, 705, 291, + 616, 587, 284, 126, 123, 724, 37, 334, 51, 31, + 714, 125, 50, 706, 151, 617, 707, 618, 385, 361, + 690, 49, 386, 269, 619, 85, 274, 580, 312, 698, + 387, 519, 335, 388, 301, 713, 233, 620, 679, 671, + 672, 389, 390, 691, 366, 362, 367, 521, 621, 412, + 503, 391, 675, 676, 731, 53, 622, 623, 692, 124, + 624, 79, 708, 81, 329, 330, 625, 299, 252, 524, + 525, 414, 358, 481, 488, 489, 111, 112, 484, 113, + 490, 114, 491, 492, 493, 482, 115, 108, 483, 494, + 495, 359, 360, 116, 496, 110, 109, 485, 487, 117, + 497, 250, 36, 392, 577, 303, 59, 307, 278, 415, + 47, 364, 728, 46, 686, 526, 626, 689, 357, 353, + 478, 54, 627, 628, 629, 630, 500, 709, 356, 328, + 352, 723, 4, 296, 501, 710, 63, 235, 369, 368, + 370, 285, 411, 349, 631, 632, 633, 257, 83, 634, + 339, 22, 635, 636, 393, 292, 637, 57, 638, 639, + 418, 266, 640, 55, 711, 40, 641, 271, 725, 712, + 642, 643, 644, 685, 645, 273, 646, 395, 647, 673, + 674, 394, 363, 365, 527, 280, 396, 379, 238, 579, + 648, 313, 333, 270, 716, 649, 258, 515, 516, 517, + 518, 697, 523, 522, 272, 277, 265, 419, 259, 650, + 651, 652, 653, 654, 306, 670, 655, 656, 319, 718, + 479, 44, 657, 658, 659, 660, 661, 300, 295, 413, + 422, 62, 84, 376, 662, 663, 695, 327, 324, 293, + 460, 462, 463, 464, 465, 466, 461, 468, 664, 316, + 56, 719, 720, 721, 287, 722, 507, 508, 509, 510, + 10, 561, 544, 572, 545, 562, 546, 555, 547, 563, + 571, 573, 528, 536, 529, 537, 567, 550, 564, 556, + 549, 548, 570, 553, 557, 530, 538, 568, 554, 531, + 539, 532, 540, 533, 541, 566, 565, 558, 569, 534, + 542, 560, 535, 543, 559, 551, 552, 431, 729, 730, + 502, 398, 127, 297, 298, 48, 350, 279, 665, 310, + 666, 340, 341, 475, 476, 355, 326, 351, 682, 317, + 680, 281, 399, 480, 267, 667, 421, 294, 372, 377, + 311, 583, 520, 286, 400, 694, 582, 511, 512, 348, + 345, 288, 513, 668, 684, 401, 242, 282, 283, 669, + 681, 402, 403, 304, 404, 405, 406, 407, 408, 410, + 314, 409, 683, 677, 678, 289, 459, 581, 322, 343, + 378, 441, 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 453, 454, 455, 456, 457, 458, 477, + 240, -79, 240, -188, -297, 240, 179, -269, 382, -287, + 384, 397, 392, 402, 390, -278, 393, 395, 280, -398, + 412, 240, 399, 227, 385, 394, 403, 404, 304, 410, + 405, 314, 409, 289, 406, 407, 408, -381, 179, 707, + 722, 136, 347, 389, 387, 413, 686, 91, -303, 91, + 92, 93, -290, 317, -305, 322, -291, -381, -290, 320, + -79, -79, -307, -307, -129, 686, 688, -207, -144, 144, + -157, -258, -160, 92, -149, -152, -201, -202, -203, -204, + -158, -217, -256, 168, 169, 176, 145, -213, -161, 27, + 574, 471, 470, 179, 32, 222, 69, 70, 473, 147, + 58, 12, 436, 437, -159, 426, 427, 438, 432, 433, + 498, 500, 501, 502, 499, 504, 505, 506, 507, 508, + 509, 510, 511, 512, 513, 503, 514, 475, 476, 118, + 477, 108, 110, 109, 478, 479, 480, 344, 526, 527, + 521, 524, 525, 523, 522, 359, 360, 481, 544, 545, + 549, 548, 546, 547, 550, 553, 554, 555, 556, 557, + 558, 560, 559, 551, 552, 529, 528, 530, 531, 532, + 533, 534, 535, 537, 536, 538, 539, 540, 541, 542, + 543, 561, 562, 563, 564, 565, 567, 566, 571, 570, + 568, 569, 573, 572, 482, 483, 111, 112, 113, 114, + 115, 116, 117, 484, 487, 485, 486, 488, 489, 490, + 495, 496, 491, 492, 493, 494, 497, 370, 368, 369, + 365, 364, 363, -89, -101, 600, 599, -102, 423, 428, + 429, 431, -150, -151, -163, -164, -291, -297, 245, 425, + 239, 174, 469, -153, -147, -215, 107, 93, -31, -211, + 424, 434, 435, 439, 430, 440, 586, 588, 603, 604, + 606, 591, 596, 595, 598, 515, 516, 517, 518, 519, + 520, 671, 672, 673, 674, 675, 676, 677, 678, -381, + -290, 91, -155, -154, -197, 94, 99, 102, 103, 105, + -404, 263, 340, 341, 119, -413, 700, 90, 95, 96, + 97, 98, 120, 121, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, + 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 221, 45, 398, 398, -188, + -79, -79, -79, -79, -410, 703, 579, -227, -127, -229, + -33, -31, -413, 9, -79, -31, -32, -30, -36, -38, + 605, -37, -297, 100, -234, -250, 13, 163, 43, 51, + -232, -233, -34, -31, -144, 20, 24, 25, -132, 170, + -144, -297, -132, -276, 244, -79, -79, -265, -310, 317, + -267, 413, 686, 412, -257, -270, 91, -256, -269, 411, + 92, -351, 160, -337, -341, -291, 255, -367, 251, -188, + -360, -359, -291, -413, -128, -286, 241, 249, 248, 137, + -385, 140, 297, 425, 239, -53, -54, -55, -269, 178, + 706, -110, 272, 276, 88, 88, -341, -340, -339, -386, + 276, 255, -366, -358, 247, 256, -347, 248, 249, -342, + 241, 138, -386, -342, 246, 256, 251, 255, 276, 276, + 127, 276, 127, 276, 276, 276, 276, 276, 276, 276, + 276, 276, 271, -348, 152, -348, 582, 582, -354, -386, + 251, 241, -386, -386, 247, -288, -342, 243, 26, 243, + 36, 36, -348, -348, -348, -269, 178, -348, -348, -348, + -348, 284, 284, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, - 238, -385, -134, 405, 302, 82, -55, 284, -38, -188, - -286, 239, 240, -385, 271, -188, 221, -188, -280, 159, - 16, -280, -277, 394, 392, 379, 384, -280, -280, -280, - -280, 285, 377, -343, 239, 36, 250, 394, 285, 377, - 285, 286, 285, 286, 387, 397, 285, -302, 15, 162, - 420, 382, 386, 278, 238, 279, 240, 396, 286, -302, - 90, -281, 159, 285, 394, 281, -280, -280, -308, -412, - -293, -291, -289, 230, 24, 142, 26, 28, 145, 177, - 130, 20, 146, 38, 232, 345, 249, 176, 245, 450, - 225, 73, 519, 421, 423, 419, 426, 452, 453, 420, - 380, 32, 14, 521, 29, 259, 25, 39, 170, 227, - 149, 522, 262, 27, 260, 118, 121, 524, 23, 76, - 254, 15, 247, 41, 17, 525, 526, 18, 243, 242, - 162, 239, 71, 12, 220, 30, 158, 67, 527, 137, - 528, 529, 530, 531, 131, 69, 159, 21, 659, 424, - 425, 34, 620, 507, 273, 172, 74, 60, 621, 143, - 422, 532, 533, 119, 534, 122, 77, 626, 139, 19, - 72, 43, 535, 274, 536, 244, 660, 537, 412, 538, - 160, 228, 449, 70, 161, 633, 539, 634, 237, 393, - 9, 454, 33, 258, 246, 129, 68, 540, 238, 148, - 455, 456, 241, 132, 120, 8, 136, 35, 13, 75, - 78, 427, 428, 429, 58, 128, 511, 147, 16, 541, - 413, 141, -381, 622, -308, -308, 33, 92, -407, -408, - -409, 511, 412, 241, -291, -188, -83, 612, 229, -84, - 618, 24, 236, -132, 394, -120, 177, 640, 623, 624, - 625, 622, 391, 630, 628, 626, 285, 627, 88, 139, - 141, 142, 4, -142, 158, -198, 151, 152, 153, 154, - 155, 156, 157, 162, 143, 145, 159, -243, 140, 163, - 164, 165, 166, 167, 168, 169, 171, 170, 172, 173, - 160, 161, 176, 223, 224, -152, -152, -152, -152, -213, - -219, -218, -412, -215, -381, -290, -297, -412, -412, -152, - -275, -412, -412, -148, -412, -412, -412, -222, -142, -412, - -412, -416, -412, -416, -416, -326, -412, -326, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, -412, -412, -412, -412, -412, 221, -412, -412, - -412, -412, -412, -326, -326, -326, -326, -326, -412, -412, - -412, -412, -412, -412, -412, -412, -412, -412, -412, -412, - -412, -412, 103, 99, 102, 94, -217, 105, 90, 90, - 90, 90, -8, -9, -207, -412, -307, -395, -396, -191, - -188, -412, 302, -291, -291, 271, -232, -12, -8, -227, - -233, -229, -8, -77, -118, -131, 64, 65, -133, 25, - 39, 68, 66, 24, -413, 89, -413, -250, -413, 88, - -37, -253, 87, 62, 44, 90, 90, 88, 22, -228, - -230, -142, 15, -295, 4, -294, 26, -291, 90, 221, - 15, -189, 30, -188, -276, -276, 88, 91, 315, -266, - -268, 410, 412, 151, -296, -291, 90, 32, 89, 88, - -188, -315, -318, -320, -319, -321, -316, -317, 342, 343, - 177, 346, 348, 349, 350, 351, 352, 353, 354, 355, - 356, 359, 33, 261, 338, 339, 340, 341, 360, 361, - 362, 363, 365, 366, 367, 368, 323, 344, 509, 324, - 325, 326, 327, 328, 329, 331, 332, 335, 333, 334, - 336, 337, -382, -381, 87, 89, 88, -322, 87, -142, - -134, 238, -381, 239, 239, 239, -77, 449, -348, -348, - -348, 269, 20, -45, -42, -374, 19, -41, -42, 230, - 123, 124, 227, 87, -337, 87, -346, -382, -381, 87, - 137, 244, 136, -345, -342, -345, -346, -381, -215, -381, - 137, 137, -381, -381, -262, -291, -262, -262, 24, -262, - 24, -262, 24, 96, -291, -262, 24, -262, 24, -262, - 24, -262, 24, -262, 24, 32, 79, 80, 81, 32, - 83, 84, 85, -215, -381, -381, -215, -337, -215, -188, - -381, -269, 96, 96, 96, -348, -348, 96, 90, 90, - 90, -348, -348, 96, 90, -299, -297, 90, 90, -387, - 255, 299, 301, 96, 96, 96, 96, 32, 90, -388, - 32, 647, 646, 648, 649, 650, 90, 96, 32, 96, - 32, 96, -291, 87, -188, -140, 289, 225, 227, 230, - 77, 90, 305, 306, 303, 308, 309, 151, 45, 88, - 241, 238, -381, -282, 243, -282, -291, -298, -297, -289, - 90, -142, -344, 15, 162, -302, -302, -280, -188, -344, - -302, -280, -188, -280, -280, -280, -280, -302, -302, -302, - -280, -297, -297, -188, -188, -188, -188, -188, -188, -188, - -308, -281, -280, 622, 90, -274, 15, 77, -308, -308, - 88, 321, 413, 414, -306, 318, -79, -291, 90, -15, - -11, -23, -22, -24, 151, -15, 88, 511, -181, -188, - 622, 622, 622, 622, 622, 622, -142, -142, -142, -142, - 534, -205, 119, 143, 120, 121, -160, -206, -211, -213, - 106, 162, 145, 159, -243, -148, -152, -148, -148, -148, - -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, - -309, -291, 90, 177, -156, -155, 105, -404, -156, 508, - 88, -218, 221, -142, -142, -381, -142, -291, -128, -130, - -128, -142, -220, -221, 147, -215, -142, -413, -413, 96, - 105, 168, -124, 25, 39, -124, -124, -124, -124, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -124, - -291, -291, -117, -116, 431, 432, 433, 434, 436, 437, - 438, 441, 442, 446, 447, 430, 448, 435, 440, 443, - 444, 445, 439, 341, -142, -142, -142, -142, -142, -142, - -85, -142, 130, 131, 132, -207, -142, -148, -142, -142, - -142, -413, -142, -142, -142, -208, -207, -380, -379, -378, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -142, -142, -142, -142, -142, - -142, -142, -142, -207, -207, -142, -413, -142, -162, -146, - 96, -258, 105, 92, -142, -142, -129, -128, -293, -298, - -289, -290, -128, -129, -129, -128, -128, -142, -142, -142, - -142, -142, -142, -142, -142, -413, -142, -142, -142, -142, - -142, -250, -413, -207, 88, -397, 412, 413, 620, -300, - 274, -299, 26, -208, 90, 15, -260, 78, -291, -232, - -232, 64, 65, 60, -128, -133, -413, -36, 26, -252, - -291, 63, 90, -327, -269, 369, 370, 177, -142, -142, - 88, -231, 28, 29, -188, -294, 168, -298, -188, -261, - 274, -188, -166, -168, -169, -170, -191, -214, -412, -171, - -8, 530, 527, 15, -181, -182, -190, -297, -267, -310, - -266, 88, 411, 413, 414, 77, 122, -142, -328, 176, - -356, -355, -354, -337, -339, -340, -341, 89, -328, -333, - 375, 374, -322, -322, -322, -322, -322, -327, -327, -327, - -327, 87, 87, -322, -322, -322, -322, -330, 87, -330, - -330, -331, -330, 87, -331, -332, 87, -332, -367, -142, - -364, -363, -361, -362, 248, 101, 602, 558, 511, 551, - 592, 78, -359, -231, 96, -413, -140, -283, 243, -365, - -362, -381, -381, -381, -283, 91, 90, 91, 90, 91, - 90, -109, -59, -1, 659, 660, 661, 88, 20, -338, - -337, -58, 299, -370, -371, 274, -366, -360, -346, 137, - -345, -346, -346, -381, 88, 30, 127, 127, 127, 127, - 511, 227, 33, -284, 550, 143, 602, 558, -337, -58, - 241, 241, -309, -309, -309, 90, 90, -279, 655, -181, - -136, 291, 151, 280, 280, 238, 238, 293, -188, 304, - 307, 305, 306, 303, 308, 309, 24, 24, 24, 24, - 24, 292, 294, 296, 282, -188, -188, -282, 77, -183, - -188, 27, -297, -188, -280, -280, -188, -280, -280, -188, - -409, 322, -291, 356, 613, 614, 616, 615, -120, 412, - 88, 511, 23, -121, 23, -412, 119, 120, 121, -206, - -148, -152, -148, 142, 262, -412, -215, -413, -293, 26, - 88, 78, -413, 88, 88, -413, -413, 88, 15, -223, - -221, 149, -142, -413, 88, -413, -413, -413, -207, -142, - -142, -142, -142, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -207, 88, 88, 15, -313, 26, -413, - -413, -413, -413, -413, -222, -413, 15, -413, 78, 88, - 162, 88, -413, -413, -413, 88, 88, -413, -413, 88, - 88, -413, 88, 88, 88, -413, 88, 88, 88, 88, - -413, -413, -413, -413, 88, 88, 88, 88, 88, 88, - 88, 88, 88, 88, -413, -413, -413, 88, -92, 535, - -413, -413, 88, -413, 88, -413, -412, 221, -413, -413, - -413, -413, -413, 88, 88, 88, 88, 88, 88, -413, - -413, -413, 88, 88, -413, 88, -413, 88, -413, -396, - 619, 413, -195, -194, -192, 75, 242, 76, -412, -299, - -413, -156, -258, -259, -258, -200, -291, 96, 105, -234, - -165, -167, 15, -133, -213, 89, 88, -327, -238, -244, - -277, -291, 90, 177, -329, 177, -329, 369, 370, -230, - 221, -196, 16, -199, 33, 58, -11, -412, -412, 33, - 88, -184, -186, -185, -187, 67, 71, 73, 68, 69, - 70, 74, -304, 26, -8, -166, -8, -412, -188, -181, - -414, 15, 78, -414, 88, 221, -268, -271, 415, 412, - 418, -381, 90, -108, 88, -354, -341, -235, -137, 41, - -334, 376, -327, 518, -327, -336, 90, -336, 96, 96, - 96, 89, -48, -43, -44, 34, 82, -361, -348, 90, - 40, -348, -348, -291, 89, -231, -136, -188, 143, 77, - -365, -365, -365, -297, -2, 658, 664, 137, 87, 379, - 19, -252, 88, 89, -216, 300, 89, -110, -291, 89, - 87, -346, -346, -291, -412, 238, 32, 32, 602, 558, - 550, -58, -216, -215, -381, -328, 657, 656, 89, 240, - 298, -141, 426, -138, 90, 91, -188, -188, -188, -188, - -188, 230, 227, 402, -405, 310, -405, 283, 241, -181, - -188, 88, -82, 257, 252, -302, -302, 34, -188, 412, - 631, 629, -142, 142, 262, -160, -152, -148, -311, 177, - 342, 261, 340, 336, 356, 347, 374, 338, 375, 333, - 332, 331, -311, -309, -207, -130, -142, -142, 150, -142, - 148, -142, -413, -413, -413, -413, -413, -227, -142, -142, - -142, -413, 177, 342, 15, -142, -309, -142, -142, -142, - -142, -142, -378, -142, -207, -142, -207, -142, -142, -142, - -142, -142, -379, -379, -379, -379, -379, -207, -207, -207, - -207, -142, -412, -291, -95, -94, -93, 585, 242, -92, - -162, -95, -162, -129, -293, -142, -142, -142, -142, -142, - -142, -142, -142, -142, -142, -192, -342, -342, -342, -262, - 88, -273, 23, 15, 58, 58, -165, -196, -166, -133, - -291, -241, 612, -247, 47, -245, -246, 48, -242, 49, - 57, -329, -329, 168, -232, -142, -263, 77, -264, -272, - -215, -210, -212, -211, -412, -251, -413, -291, -262, -264, - -168, -169, -169, -168, -169, 67, 67, 67, 72, 67, - 72, 67, -185, -297, -413, -142, -300, 78, -166, -166, - -190, -297, 168, 412, 416, 417, -354, -403, 119, 143, - 32, 77, 372, 101, -401, 176, 547, 597, 602, 558, - 551, 592, -402, 244, 136, 137, 256, 26, 42, 89, - 88, 89, 88, 89, 89, 88, -285, -284, -44, -43, - -348, -348, 96, -381, 90, 90, 240, 27, -188, 77, - 77, 77, -111, 662, 96, 87, -3, 82, -142, 87, - 20, -337, -215, -372, -323, -373, -324, -325, -5, -6, - -349, -114, 58, 101, -62, 45, 239, 642, 643, 127, - -412, 655, -364, -252, -368, -370, -188, -145, -412, -144, - -146, -153, 166, 167, 261, 338, 339, -216, -188, -135, - 289, 297, 87, -139, 92, -384, 78, 280, 372, 280, - 90, -406, 311, 90, -406, -188, -82, -48, -188, -280, - -280, 34, -381, -413, -160, -152, -123, 162, 511, -314, - 517, -322, -322, -322, -332, -322, 328, -322, 328, -322, - -413, -413, -413, 88, -413, 23, -413, -142, 88, -119, - 454, 88, 88, -413, 87, 87, -142, -413, -413, -413, - 88, -413, -413, -413, -413, -413, 88, -413, -413, -413, - 88, -312, 603, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -91, -292, -291, -92, 567, 567, - -413, -92, -224, 88, -413, -413, 88, -413, 88, 88, - -413, 88, -413, 88, -413, -413, -413, -413, 88, -193, - 23, -193, -193, -413, -258, -188, -196, -225, 17, -238, - 52, 348, -249, -248, 56, 48, -246, 20, 50, 20, - 31, -263, 88, 151, 88, -413, -413, 88, 58, 221, - -413, -196, -179, -178, 77, 78, -180, 77, -178, 67, - 67, -253, 88, -261, -166, -196, -196, 221, 119, -412, - -147, -159, -145, 13, 90, 90, -381, -400, 646, 647, - 32, 96, -348, -348, 137, 137, -188, 87, -327, 90, - -327, 96, 96, 32, 83, 84, 85, 32, 79, 80, - 81, -188, -188, -188, -188, -369, 87, 20, -142, 87, - 151, 89, -252, -252, 276, 162, -348, 640, 282, 282, - -348, -348, -348, -113, -112, 662, 89, -413, 88, -335, - 511, 514, -142, -154, -154, -253, 89, -377, 511, -383, - -291, -291, -291, -291, 96, 98, -413, 509, 74, 512, - -413, -327, -142, -142, -142, -232, 90, -142, -142, 96, - 96, -413, -142, -207, -142, -413, -176, -175, -177, 623, - 119, 32, -311, -413, -209, 274, -98, -97, -96, 15, - -413, -142, -142, -142, -142, -142, -142, -142, -412, 67, - 19, 17, -412, -412, -300, -225, -226, 18, 20, -239, - 54, -237, 53, -237, -248, 20, 20, 90, 20, 90, - 137, -272, -142, -212, 58, -11, -291, -210, -291, -227, - -142, 87, -142, -156, -196, -196, -142, -202, 478, 480, - 481, 482, 479, 484, 485, 486, 487, 488, 489, 490, - 491, 492, 493, 483, 457, 108, 110, 109, 458, 459, - 460, 342, 505, 506, 500, 503, 504, 502, 501, 357, - 358, 461, 462, 463, 111, 112, 113, 114, 115, 116, - 117, 464, 467, 465, 468, 469, 470, 475, 476, 471, - 472, 473, 474, 477, 363, 362, 361, 494, 495, 496, - 497, 498, 499, 604, 605, 606, 607, 608, 609, 610, - 611, 90, 90, 87, -142, 89, 89, -253, -368, -59, - 89, -254, -252, 96, 89, 277, -211, -412, 90, -348, - -348, -348, 96, 96, -299, -413, 88, -291, -402, -370, - 515, 515, -413, 26, -376, -375, -293, 87, 78, 63, - 510, 513, -413, -413, 88, -413, -413, -413, 89, 89, - -413, -413, -413, 88, -413, -175, -177, -413, 77, -156, - -227, 20, -95, 299, 301, -95, -413, 88, -413, -413, - 88, -413, 88, -413, -413, -255, -413, -291, 244, 20, - 20, -255, -255, -195, -226, -105, -104, -103, 541, -142, - -207, -240, 55, 77, 122, 90, 90, 90, 13, -210, - 221, -232, -252, -173, 379, -227, -413, -252, 89, 26, - 89, 664, 137, 89, -211, -122, -412, 273, -299, 90, - 90, -112, -115, -11, 88, 151, -252, -188, 63, -142, - -207, -413, 77, 522, 623, -90, -89, -86, 634, 660, - -207, -92, -92, -142, -142, -142, 88, -413, -413, -413, - -105, 88, -102, -101, -291, 77, 122, -264, -291, 89, - -413, -412, -232, 89, -236, -11, 87, -3, 273, -323, - -373, -324, -325, -5, -6, -349, -80, 511, -375, -353, - -293, 90, 96, 89, 511, -413, -413, -88, 145, 632, - 600, -143, -154, -151, 220, -413, 88, -413, 88, -413, - 88, -291, 244, -103, 88, 26, -300, -174, -172, -291, - 564, -393, -392, 507, -403, -399, 119, 143, 101, -401, - 602, 558, 128, 129, -80, -142, 87, -413, -81, 288, - 619, -384, 512, -88, 633, 578, 553, 578, 553, -142, - -142, -142, -101, -412, -413, 88, 23, -315, -61, 575, - -390, -391, 77, -394, 385, 574, 595, 119, 90, 89, - -252, 249, -377, 513, 142, -413, 88, -413, 88, -413, - -91, -172, 571, -328, -156, -391, 77, -390, 77, 14, - 13, -4, 663, 89, 290, -88, -142, -142, -413, -60, - 27, -173, -389, 257, 252, 255, 33, -389, 96, -4, - -413, -413, 575, 251, 32, 119, -156, -176, -175, -175, + 240, -385, -136, 409, 304, 82, -56, 286, -39, -188, + -286, 241, 242, -385, 273, -188, 223, -188, 689, -280, + 160, 16, -280, -277, 398, 396, 383, 388, -280, -280, + -280, -280, 287, 381, -343, 241, 36, 252, 398, 287, + 381, 287, 288, 287, 288, 391, 401, 287, -302, 15, + 163, 425, 386, 390, 280, 240, 281, 242, 400, 288, + -302, 90, -281, 160, 287, 398, 283, -280, -280, -308, + -413, -293, -291, -289, 232, 24, 143, 26, 28, 146, + 179, 130, 20, 147, 38, 234, 347, 251, 178, 247, + 470, 227, 73, 586, 426, 433, 424, 432, 436, 472, + 473, 425, 384, 32, 14, 588, 29, 261, 25, 39, + 172, 229, 150, 589, 264, 27, 262, 118, 121, 591, + 23, 76, 256, 15, 249, 41, 17, 592, 593, 18, + 245, 244, 163, 241, 71, 12, 222, 30, 159, 67, + 594, 138, 133, 595, 596, 597, 598, 131, 69, 160, + 21, 726, 434, 435, 34, 687, 574, 275, 174, 74, + 60, 688, 144, 430, 599, 600, 119, 601, 122, 77, + 693, 140, 19, 72, 43, 602, 276, 603, 246, 727, + 604, 416, 605, 161, 230, 469, 70, 162, 700, 606, + 701, 239, 397, 9, 474, 33, 260, 248, 129, 68, + 440, 607, 240, 149, 243, 132, 120, 8, 137, 35, + 13, 75, 78, 437, 438, 439, 58, 128, 578, 148, + 16, 608, 417, 142, -381, 689, -308, -308, 33, 92, + -407, -408, -409, 578, 416, 243, -291, -188, -85, 679, + 231, -86, 685, 24, 238, -134, 398, -122, 179, 707, + 690, 691, 692, 689, 395, 697, 695, 693, 287, 694, + 88, 140, 142, 143, 4, -144, 159, -198, 152, 153, + 154, 155, 156, 157, 158, 164, 163, 144, 146, 160, + -243, 141, 165, 166, 167, 168, 169, 170, 171, 173, + 172, 174, 175, 161, 162, 178, 225, 226, -152, -152, + -152, -152, -213, -219, -218, -413, -215, -381, -290, -297, + -413, -413, -152, -275, -413, -149, -413, -413, -413, -413, + -222, -144, -413, -413, -417, -413, -417, -417, -417, -326, + -413, -326, -326, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, -413, -413, -413, -413, 223, -413, -413, -413, + -413, -413, -326, -326, -326, -326, -326, -326, -413, -413, + -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, + -413, -413, 103, 99, 102, 94, -217, 105, 90, 90, + 90, 90, -31, -32, -207, -413, -307, -395, -396, -191, + -188, -413, 304, -291, -291, 273, 96, -232, -34, -31, + -227, -233, -229, -31, -79, -120, -133, 64, 65, -135, + 25, 39, 68, 66, 24, -414, 89, -414, -250, -414, + 88, -38, -253, 87, 62, 44, 90, 90, 88, 22, + -228, -230, -144, 15, -295, 4, -294, 26, -291, 90, + 223, 15, -189, 30, -188, -276, -276, 88, 91, 317, + -266, -268, 414, 416, 152, -296, -291, 90, 32, 89, + 88, -188, -315, -318, -320, -319, -321, -316, -317, 344, + 345, 179, 348, 350, 351, 352, 353, 354, 355, 356, + 357, 358, 361, 33, 263, 340, 341, 342, 343, 362, + 363, 364, 365, 367, 368, 369, 370, 325, 346, 576, + 326, 327, 328, 329, 330, 331, 333, 334, 337, 335, + 336, 338, 339, -382, -381, 87, 89, 88, -322, 87, + -144, -136, 240, -381, 241, 241, 241, -79, 469, -348, + -348, -348, 271, 20, -46, -43, -374, 19, -42, -43, + 232, 123, 124, 229, 87, -337, 87, -346, -382, -381, + 87, 138, 246, 137, -345, -342, -345, -346, -381, -215, + -381, 138, 138, -381, -381, -262, -291, -262, -262, 24, + -262, 24, -262, 24, 96, -291, -262, 24, -262, 24, + -262, 24, -262, 24, -262, 24, 32, 79, 80, 81, + 32, 83, 84, 85, -215, -381, -381, -215, -337, -215, + -188, -381, -269, 96, 96, 96, -348, -348, 96, 90, + 90, 90, -348, -348, 96, 90, -299, -297, 90, 90, + -387, 257, 301, 303, 96, 96, 96, 96, 32, 90, + -388, 32, 714, 713, 715, 716, 717, 90, 96, 32, + 96, 32, 96, -291, 87, -188, -142, 291, 227, 229, + 232, 77, 90, 307, 308, 305, 310, 311, 152, 45, + 88, 243, 240, -381, -282, 245, -282, -291, -298, -297, + -289, 243, 380, 90, -144, -344, 15, 163, -302, -302, + -280, -188, -344, -302, -280, -188, -280, -280, -280, -280, + -302, -302, -302, -280, -297, -297, -188, -188, -188, -188, + -188, -188, -188, -308, -281, -280, 689, 90, -274, 15, + 77, -308, -308, 88, 323, 417, 418, -306, 320, -81, + -291, 90, -10, -29, -18, -17, -19, 152, -10, 88, + 578, -181, -188, 689, 689, 689, 689, 689, 689, -144, + -144, -144, -144, 601, -205, 119, 144, 120, 121, -160, + -144, -206, -211, -213, 106, 163, 146, 160, -243, -149, + -152, -149, -149, -149, -149, -149, -149, 222, -149, 222, + -149, -149, -149, -149, -149, -149, -309, -291, 90, 179, + -156, -155, 105, -404, -156, 575, 88, -218, 223, -144, + -144, -381, -118, 442, 443, 444, 445, 447, 448, 449, + 452, 453, 457, 458, 441, 459, 446, 451, 454, 455, + 456, 450, 343, -144, -130, -132, -130, -144, -220, -221, + 148, -215, -144, -414, -414, 96, 170, -126, 25, 39, + -126, -126, -126, -126, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -126, -144, -119, 441, 459, 446, + 451, 454, 455, 456, 450, 343, 460, 461, 462, 463, + 464, 465, 466, 467, 468, -119, -118, -144, -144, -144, + -144, -144, -144, -87, -144, 130, 131, 132, -207, -144, + -149, -144, -144, -144, -414, -144, -144, -144, -208, -207, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -380, -379, -378, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -207, -207, -207, -207, -207, -144, -414, -144, + -162, -147, 96, -258, 105, 92, -144, -144, -144, -144, + -144, -144, -131, -130, -293, -298, -289, -290, -130, -131, + -131, -130, -130, -144, -144, -144, -144, -144, -144, -144, + -144, -414, -144, -144, -144, -144, -144, -250, -414, -207, + 88, -397, 416, 417, 687, -300, 276, -299, 26, -208, + 90, 15, -260, 78, -291, -232, -232, 64, 65, 60, + -130, -135, -414, -37, 26, -252, -291, 63, 90, -327, + -269, 371, 372, 179, -144, -144, 88, -231, 28, 29, + -188, -294, 170, -298, -188, -261, 276, -188, -166, -168, + -169, -170, -191, -214, -413, -171, -31, 597, 594, 15, + -181, -182, -190, -297, -267, -310, -266, 88, 415, 417, + 418, 77, 122, -144, -328, 178, -356, -355, -354, -337, + -339, -340, -341, 89, -328, -333, 377, 376, -322, -322, + -322, -322, -322, -327, -327, -327, -327, 87, 87, -322, + -322, -322, -322, -330, 87, -330, -330, -331, -330, 87, + -331, -332, 87, -332, -367, -144, -364, -363, -361, -362, + 250, 101, 669, 625, 578, 618, 659, 78, -359, -231, + 96, -414, -142, -283, 245, -365, -362, -381, -381, -381, + -283, 91, 90, 91, 90, 91, 90, -111, -60, -1, + 726, 727, 728, 88, 20, -338, -337, -59, 301, -370, + -371, 276, -366, -360, -346, 138, -345, -346, -346, -381, + 88, 30, 127, 127, 127, 127, 578, 229, 33, -284, + 617, 144, 669, 625, -337, -59, 243, 243, -309, -309, + -309, 90, 90, -279, 722, -181, -138, 293, 152, 282, + 282, 240, 240, 295, -188, 306, 309, 307, 308, 305, + 310, 311, 24, 24, 24, 24, 24, 294, 296, 298, + 284, -188, -188, -282, 77, -183, -188, 27, -297, 90, + 90, -188, -280, -280, -188, -280, -280, -188, -409, 324, + -291, 358, 680, 681, 683, 682, -122, 416, 88, 578, + 23, -123, 23, -413, 119, 120, 121, -206, -149, -152, + -149, 143, 264, -149, -149, -413, -215, -414, -293, 26, + 88, 78, -414, 168, 88, 88, -414, -414, 88, 15, + -223, -221, 150, -144, -414, 88, -414, -414, -207, -144, + -144, -144, -144, -414, -414, -414, -414, -414, -414, -414, + -414, -414, -414, -207, -414, 88, 88, 15, -313, 26, + -414, -414, -414, -414, -414, -222, -414, 15, -414, 78, + 88, 163, 88, -414, -414, -414, 88, 88, -414, -414, + 88, -414, 88, -414, -414, -414, -414, -414, -414, 88, + -414, 88, -414, -414, -414, 88, -414, 88, -414, -414, + 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, -414, -414, 88, -414, + 88, -414, 88, -414, -414, 88, -414, 88, -414, 88, + -414, 88, 88, -414, 88, 88, 88, -414, 88, 88, + 88, 88, -414, -414, -414, -414, 88, 88, 88, 88, + 88, 88, 88, 88, 88, 88, -414, -414, -414, -414, + -414, -414, 88, -94, 602, -414, -414, 88, -414, 88, + 88, 88, 88, 88, -414, -413, 223, -414, -414, -414, + -414, -414, 88, 88, 88, 88, 88, 88, -414, -414, + -414, 88, 88, -414, 88, -414, 88, -414, -396, 686, + 417, -195, -194, -192, 75, 244, 76, -413, -299, -414, + -156, -258, -259, -258, -200, -291, 96, 105, -234, -165, + -167, 15, -135, -213, 89, 88, -327, -238, -244, -277, + -291, 90, 179, -329, 179, -329, 371, 372, -230, 223, + -196, 16, -199, 33, 58, -29, -413, -413, 33, 88, + -184, -186, -185, -187, 67, 71, 73, 68, 69, 70, + 74, -304, 26, -31, -166, -31, -413, -188, -181, -415, + 15, 78, -415, 88, 223, -268, -271, 419, 416, 422, + -381, 90, -110, 88, -354, -341, -235, -139, 41, -334, + 378, -327, 585, -327, -336, 90, -336, 96, 96, 96, + 89, -49, -44, -45, 34, 82, -361, -348, 90, 40, + -348, -348, -291, 89, -231, -138, -188, 144, 77, -365, + -365, -365, -297, -2, 725, 731, 138, 87, 383, 19, + -252, 88, 89, -216, 302, 89, -112, -291, 89, 87, + -346, -346, -291, -413, 240, 32, 32, 669, 625, 617, + -59, -216, -215, -381, -328, 724, 723, 89, 242, 300, + -143, 436, -140, 90, 91, -188, -188, -188, -188, -188, + 232, 229, 406, -405, 312, -405, 285, 243, -181, -188, + 88, -84, 259, 254, -302, -302, 34, -188, 416, 698, + 696, -144, 143, 264, -160, -152, -118, -118, -149, -311, + 179, 344, 263, 342, 338, 358, 349, 376, 340, 377, + 335, 334, 333, -311, -309, -149, -207, -132, -144, -144, + 151, -144, 149, -144, -414, -414, -414, -414, -414, -227, + -144, -144, -144, -414, 179, 344, 15, -144, -309, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -378, -144, -207, -144, -207, -144, + -144, -144, -144, -144, -379, -379, -379, -379, -379, -207, + -207, -207, -207, -144, -413, -291, -97, -96, -95, 652, + 244, -94, -162, -97, -162, 222, -144, 222, 222, 222, + -144, -131, -293, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -192, -342, -342, -342, -262, 88, -273, + 23, 15, 58, 58, -165, -196, -166, -135, -291, -241, + 679, -247, 47, -245, -246, 48, -242, 49, 57, -329, + -329, 170, -232, -144, -263, 77, -264, -272, -215, -210, + -212, -211, -413, -251, -414, -291, -262, -264, -168, -169, + -169, -168, -169, 67, 67, 67, 72, 67, 72, 67, + -185, -297, -414, -144, -300, 78, -166, -166, -190, -297, + 170, 416, 420, 421, -354, -403, 119, 144, 32, 77, + 374, 101, -401, 178, 614, 664, 669, 625, 618, 659, + -402, 246, 137, 138, 258, 26, 42, 89, 88, 89, + 88, 89, 89, 88, -285, -284, -45, -44, -348, -348, + 96, -381, 90, 90, 242, 27, -188, 77, 77, 77, + -113, 729, 96, 87, -3, 82, -144, 87, 20, -337, + -215, -372, -323, -373, -324, -325, -5, -6, -349, -116, + 58, 101, -63, 45, 241, 709, 710, 127, -413, 722, + -364, -252, -368, -370, -188, -148, -413, -159, -146, -145, + -147, -153, 168, 169, 263, 340, 341, -216, -188, -137, + 291, 299, 87, -141, 92, -384, 78, 282, 374, 282, + 90, -406, 313, 90, -406, -188, -84, -49, -188, -280, + -280, 34, -381, -414, -160, -152, -125, 163, 578, -314, + 584, -322, -322, -322, -332, -322, 330, -322, 330, -322, + -414, -414, -414, 88, -414, 23, -414, -144, 88, -121, + 474, 88, 88, -414, 87, 87, -144, -414, -414, -414, + 88, -414, -414, -414, -414, -414, -414, -414, -414, -414, + -414, -414, -414, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, + 88, -414, 88, -414, 88, -414, -414, 88, -414, -414, + -414, 88, -414, 88, -414, 88, -414, -414, -414, 88, + -312, 670, -414, -414, -414, -414, -414, -414, -414, -414, + -414, -414, -414, -93, -292, -291, -94, 634, 634, -414, + -94, -224, 88, -149, -414, -149, -149, -149, -414, -414, + -414, 88, -414, 88, 88, -414, 88, -414, 88, -414, + -414, -414, -414, 88, -193, 23, -193, -193, -414, -258, + -188, -196, -225, 17, -238, 52, 350, -249, -248, 56, + 48, -246, 20, 50, 20, 31, -263, 88, 152, 88, + -414, -414, 88, 58, 223, -414, -196, -179, -178, 77, + 78, -180, 77, -178, 67, 67, -253, 88, -261, -166, + -196, -196, 223, 119, -413, -148, 13, 90, 90, -381, + -400, 713, 714, 32, 96, -348, -348, 138, 138, -188, + 87, -327, 90, -327, 96, 96, 32, 83, 84, 85, + 32, 79, 80, 81, -188, -188, -188, -188, -369, 87, + 20, -144, 87, 152, 89, -252, -252, 278, 163, -348, + 707, 284, 284, -348, -348, -348, -115, -114, 729, 89, + -414, 88, -335, 578, 581, -144, -154, -154, -253, 89, + -377, 578, -383, -291, -291, -291, -291, 96, 98, -414, + 576, 74, 579, -414, -327, -144, -144, -144, -232, 90, + -144, -144, 96, 96, -414, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, + -144, -144, -144, -144, -144, -207, -144, -414, -176, -175, + -177, 690, 119, 32, -311, -414, -209, 276, -100, -99, + -98, 15, -414, -144, -118, -118, -118, -118, -144, -144, + -144, -144, -144, -144, -413, 67, 19, 17, -413, -413, + -300, -225, -226, 18, 20, -239, 54, -237, 53, -237, + -248, 20, 20, 90, 20, 90, 138, -272, -144, -212, + 58, -29, -291, -210, -291, -227, -144, 87, -144, -156, + -196, -196, -144, -202, 498, 500, 501, 502, 499, 504, + 505, 506, 507, 508, 509, 510, 511, 512, 513, 503, + 514, 475, 476, 477, 108, 110, 109, 478, 479, 480, + 344, 526, 527, 521, 524, 525, 523, 522, 359, 360, + 481, 544, 545, 549, 548, 546, 547, 550, 553, 554, + 555, 556, 557, 558, 560, 559, 551, 552, 529, 528, + 530, 531, 532, 533, 534, 535, 537, 536, 538, 539, + 540, 541, 542, 543, 561, 562, 563, 564, 565, 567, + 566, 571, 570, 568, 569, 573, 572, 482, 483, 111, + 112, 113, 114, 115, 116, 117, 484, 487, 485, 488, + 489, 490, 495, 496, 491, 492, 493, 494, 497, 370, + 368, 369, 365, 364, 363, 423, 428, 429, 431, 515, + 516, 517, 518, 519, 520, 671, 672, 673, 674, 675, + 676, 677, 678, 90, 90, 87, -144, 89, 89, -253, + -368, -60, 89, -254, -252, 96, 89, 279, -211, -413, + 90, -348, -348, -348, 96, 96, -299, -414, 88, -291, + -402, -370, 582, 582, -414, 26, -376, -375, -293, 87, + 78, 63, 577, 580, -414, -414, 88, -414, -414, -414, + 89, 89, -414, -414, -414, -414, -414, -414, -414, -414, + -414, -414, -414, -414, -414, -414, -414, -414, -414, -414, + -414, -414, -414, -414, 88, -414, -175, -177, -414, 77, + -156, -227, 20, -97, 301, 303, -97, -414, -414, -414, + -414, -414, 88, -414, -414, 88, -414, 88, -414, -414, + -255, -414, -291, 246, 20, 20, -255, -255, -195, -226, + -107, -106, -105, 608, -144, -207, -240, 55, 77, 122, + 90, 90, 90, 13, -210, 223, -232, -252, -173, 383, + -227, -414, -252, 89, 26, 89, 731, 138, 89, -211, + -124, -413, 275, -299, 90, 90, -114, -117, -29, 88, + 152, -252, -188, 63, -144, -207, -414, 77, 589, 690, + -92, -91, -88, 701, 727, -207, -94, -94, -144, -144, + -144, 88, -414, -414, -414, -107, 88, -104, -103, -291, + 77, 122, -264, -291, 89, -414, -413, -232, 89, -236, + -29, 87, -3, 275, -323, -373, -324, -325, -5, -6, + -349, -82, 578, -375, -353, -297, -293, 90, 96, 89, + 578, -414, -414, -90, 146, 699, 667, -154, 222, -414, + 88, -414, 88, -414, 88, -291, 246, -105, 88, 26, + -300, -174, -172, -291, 631, -393, -392, 574, -403, -399, + 119, 144, 101, -401, 669, 625, 128, 129, -82, -144, + 87, -414, -83, 290, 686, 223, -384, 579, -90, 700, + 645, 620, 645, 620, -149, -144, -144, -144, -103, -413, + -414, 88, 23, -315, -62, 642, -390, -391, 77, -394, + 389, 641, 662, 119, 90, 89, -252, 251, -298, -377, + 580, 143, -118, -414, 88, -414, 88, -414, -93, -172, + 638, -328, -156, -391, 77, -390, 77, 14, 13, -4, + 730, 89, 292, -90, 645, 620, -144, -144, -414, -61, + 27, -173, -389, 259, 254, 257, 33, -389, 96, -4, + -414, -414, 642, 253, 32, 119, -156, -176, -175, -175, } var yyDef = [...]int{ - 871, -2, -2, 873, 2, 4, 5, 6, 7, 8, + 877, -2, -2, 879, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 36, 37, 70, - 72, 73, 871, 871, 871, 0, 871, 0, 0, 871, - -2, -2, 871, 1482, 0, 871, 0, 0, -2, 786, - 792, 0, 801, -2, 0, 0, 871, 871, 2037, 2037, - 866, 0, 0, 0, 0, 0, 871, 871, 871, 871, - 1348, 50, 871, 0, 85, 86, 821, 822, 823, 65, - 0, 2035, 872, 1, 3, 71, 75, 0, 0, 0, - 58, 1357, 0, 78, 0, 0, 875, 0, 0, 1465, - 871, 871, 0, 126, 127, 0, 0, 0, -2, 130, - -2, 159, 160, 161, 0, 166, 599, 522, 574, 520, - 559, -2, 508, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 525, 398, 398, 0, 0, - -2, 508, 508, 508, 1467, 0, 0, 0, 556, 460, - 398, 398, 398, 0, 398, 398, 398, 398, 0, 0, - 398, 398, 398, 398, 398, 398, 398, 398, 398, 398, - 398, 398, 398, 398, 398, 398, 398, 1375, 165, 1483, - 1480, 1481, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, - 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, - 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, - 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, - 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, - 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, - 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, - 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, - 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, - 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, - 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, - 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, - 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, 1771, 1772, - 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, - 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, 1792, - 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, 1802, - 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, 1812, - 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, 1822, - 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, - 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, - 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, 1852, - 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, - 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, - 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, 1882, - 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, - 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, 1902, - 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, - 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, 1922, - 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, 1932, - 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, 1942, - 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, 1952, - 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, - 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, - 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, - 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1992, - 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, - 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, - 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, - 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, - 2033, 2034, 0, 1459, 0, 712, 974, 0, 775, 775, - 0, 775, 775, 775, 775, 0, 0, 0, 724, 0, - 0, 0, 0, 772, 0, 740, 741, 0, 772, 0, - 747, 778, 0, 0, 753, 775, 775, 756, 2038, 0, - 2038, 2038, 1450, 0, 769, 767, 781, 782, 40, 785, - 788, 789, 790, 791, 794, 0, 805, 808, 1476, 1477, - 0, 810, 817, 834, 835, 0, 867, 868, 45, 1122, - 0, 996, 1001, 1012, 1027, 1028, 1029, 1030, 1031, 1033, - 1034, 1035, 0, 0, 0, 0, 1040, 1041, 0, 0, - 0, 0, 0, 1103, 1049, 0, 0, 0, 0, 1321, - 0, 0, 1282, 1282, 1137, 1282, 1284, 1284, 1684, 1820, - 1828, 1948, 1646, 1651, 1652, 1653, 1941, 1942, 1943, 1944, - 1983, 1984, 1988, 1744, 0, 0, 0, 2034, 1781, 1789, - 1790, 1814, 1913, 1969, 1663, 1809, 1878, 1741, 1763, 1764, - 1895, 1896, 1785, 1786, 1767, 1779, 1782, 1770, 1771, 1773, - 1775, 1780, 1787, 1793, 1772, 1792, 1791, 0, 1768, 1769, - 1774, 1784, 1788, 1776, 1777, 1778, 1783, 1794, 1877, 1805, - 1876, 0, 0, 0, 0, 0, 1221, 1222, 1223, 1224, - 0, 0, 0, 0, 0, 0, 0, 290, 291, 1334, - 1335, 43, 44, 1121, 1446, 1284, 1284, 1284, 1284, 1284, - 1063, 1064, 1065, 1066, 1067, 1091, 1092, 1098, 1099, 1890, - 1891, 1892, 1893, 1725, 1978, 1733, 1734, 1873, 1874, 1746, - 1747, 2009, 2010, -2, -2, -2, 231, 232, 233, 234, - 235, 236, 237, 238, 0, 1688, 1959, 1960, 227, 0, - 0, 295, 296, 292, 293, 294, 1105, 1106, 248, 249, - 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, - 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, - 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, - 2037, 0, 844, 0, 0, 0, 0, 0, 1357, 0, - 1349, 1348, 63, 0, 871, -2, 0, 0, 0, 0, - 47, 0, 52, 931, 874, 77, 76, 1397, 0, 0, - 0, 59, 1358, 67, 69, 1359, 0, 876, 877, 0, - 907, 911, 0, 0, 0, 1466, 1465, 1465, 102, 0, - 0, 103, 123, 124, 125, 0, 0, 109, 110, 1452, - 1453, 0, 0, 177, 178, 0, 41, 425, 0, 173, - 0, 418, 357, 0, 1375, 0, 0, 0, 0, 0, - 871, 0, 1460, 154, 155, 162, 163, 164, 398, 398, - 398, 571, 0, 0, 165, 165, 529, 530, 531, 0, - 0, -2, 423, 0, 509, 0, 0, 412, 412, 416, - 414, 415, 0, 0, 0, 0, 0, 0, 0, 0, - 548, 0, 549, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 660, 0, 399, 0, 569, 570, 461, 0, - 0, 0, 0, 0, 0, 0, 0, 1468, 1469, 0, - 546, 547, 0, 0, 0, 398, 398, 0, 0, 0, - 0, 398, 398, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 153, 1388, 0, 0, 0, -2, 0, 704, 0, - 0, 0, 1461, 1461, 0, 711, 0, 713, 714, 0, - 0, 715, 0, 772, 772, 770, 771, 717, 718, 719, - 720, 775, 0, 0, 407, 408, 409, 772, 775, 0, - 775, 775, 775, 775, 772, 772, 772, 775, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2038, 778, 775, - 0, 748, 0, 749, 750, 751, 754, 755, 757, 2039, - 2040, 1478, 1479, 1486, 1487, 1488, 1489, 1490, 1491, 1492, - 1493, 1494, 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, - 1503, 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, - 1513, 1514, 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522, - 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, - 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, - 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, - 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, - 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, - 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, - 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, - 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, - 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, - 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, - 1633, 1634, 2038, 2038, 761, 765, 1451, 787, 793, 795, - 796, 0, 0, 806, 809, 828, 49, 1732, 816, 49, - 818, 819, 820, 846, 847, 852, 0, 0, 0, 0, - 858, 859, 860, 0, 0, 863, 864, 865, 0, 0, - 0, 0, 0, 994, 0, 0, 1111, 1112, 1113, 1114, - 1115, 1116, 1117, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1013, 1014, 0, 0, 0, 1036, 1037, 1038, 1039, 1042, - 0, 1054, 0, 1056, 1330, -2, 0, 0, 0, 1047, - 1048, 0, 0, 0, 0, 0, 0, 0, 1322, 0, - 0, 1135, 0, 1136, 1138, 1139, 0, 1140, 881, 881, - 881, 881, 881, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 881, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1471, 141, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 891, 0, 0, 891, - 891, 0, 0, 220, 221, 222, 223, 224, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 239, 240, 241, 242, 243, 244, 297, 245, - 246, 247, 1121, 0, 0, 0, 46, 836, 837, 0, - 957, 1471, 0, 0, 887, 0, 57, 66, 68, 1357, - 61, 1357, 0, 893, 0, 0, -2, -2, 894, 900, - 901, 902, 903, 904, 54, 2036, 55, 0, 74, 0, - 48, 0, 0, 0, 0, 371, 1400, 0, 0, 1350, - 1351, 1354, 0, 908, 1826, 912, 0, 914, 915, 0, - 0, 100, 0, 973, 0, 0, 0, 111, 0, 113, - 114, 0, 0, 0, 382, 1454, 1455, 1456, -2, 405, - 0, 382, 366, 305, 306, 307, 357, 309, 357, 357, - 357, 357, 371, 371, 371, 371, 340, 341, 342, 343, - 344, 0, 0, 326, 357, 357, 357, 357, 347, 348, - 349, 350, 351, 352, 353, 354, 310, 311, 312, 313, - 314, 315, 316, 317, 318, 359, 359, 359, 359, 359, - 363, 363, 0, 42, 0, 386, 0, 1354, 0, 0, - 1388, 1463, 1473, 0, 0, 0, 1463, 132, 0, 0, - 0, 572, 610, 523, 560, 573, 0, 526, 527, -2, - 0, 0, 508, 0, 510, 0, 406, 0, -2, 0, - 416, 0, 412, 416, 413, 416, 404, 417, 550, 551, - 552, 0, 554, 555, 640, 943, 0, 0, 0, 0, - 0, 646, 647, 648, 0, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 561, 562, 563, 564, 565, - 566, 567, 568, 0, 0, 0, 0, 510, 0, 557, - 0, 0, 462, 463, 464, 0, 0, 467, 468, 469, - 470, 0, 0, 473, 474, 475, 960, 961, 476, 477, - 502, 503, 504, 478, 479, 480, 481, 482, 483, 484, - 496, 497, 498, 499, 500, 501, 485, 486, 487, 488, - 489, 490, 493, 0, 147, 1379, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1461, 0, 0, 0, 0, 890, 975, 1484, 1485, - 776, 777, 0, 410, 411, 775, 775, 721, 762, 0, - 775, 725, 763, 726, 728, 727, 729, 742, 743, 775, - 732, 773, 774, 733, 734, 735, 736, 737, 738, 739, - 758, 744, 745, 746, 779, 0, 783, 784, 759, 760, - 0, 0, 799, 800, 0, 807, 831, 829, 830, 832, - 824, 825, 826, 827, 0, 833, 0, 0, 849, 96, - 854, 855, 856, 857, 869, 862, 1123, 991, 992, 993, - 0, 995, 998, 0, 1107, 1109, 1000, 1002, 1118, 1119, - 1120, 0, 0, 0, 0, 0, 1006, 1010, 1015, 1016, - 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, - 1032, 1298, 1299, 1300, 1051, 298, 299, 0, 1052, 0, - 0, 0, 0, 0, 0, 0, 1122, 1053, 0, 905, - 0, 0, 1328, 1325, 0, 0, 0, 1283, 1285, 0, - 0, 0, 0, 882, 883, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1261, 1262, 1263, 1264, 1265, 1266, 1267, - 1268, 1269, 1270, 1271, 1272, 1273, 1274, 1275, 1276, 1277, - 1278, 1279, 1280, 1281, 1301, 0, 0, 0, 0, 0, - 1321, 0, 1058, 1059, 1060, 0, 0, 0, 0, 0, - 0, 1178, 0, 0, 0, 0, 1472, 0, 142, 143, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1225, - 1226, 1227, 1228, 39, 0, 0, 0, 892, 1332, 0, - -2, -2, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1250, 0, 0, 0, 0, - 0, 0, 1444, 0, 0, 839, 840, 842, 0, 977, - 0, 958, 0, 0, 845, 0, 886, 0, 889, 60, - 62, 898, 899, 0, 916, 895, 56, 51, 0, 0, - 935, 1398, 371, 1420, 0, 380, 380, 377, 1360, 1361, - 0, 1353, 1355, 1356, 79, 913, 909, 0, 989, 0, - 0, 972, 0, 919, 921, 922, 923, 955, 0, 926, - 927, 0, 0, 0, 0, 0, 98, 974, 104, 0, - 112, 0, 0, 117, 118, 105, 106, 107, 108, 0, - 599, -2, 457, 179, 181, 182, 183, 174, -2, 369, - 367, 368, 308, 371, 371, 334, 335, 336, 337, 338, - 339, 0, 0, 327, 328, 329, 330, 319, 0, 320, - 321, 322, 361, 0, 323, 324, 0, 325, 424, 0, - 1362, 387, 388, 390, 398, 0, 393, 394, 0, 398, - 398, 0, 419, 420, 0, 1354, 1379, 0, 0, 0, - 1474, 1473, 1473, 1473, 0, 167, 168, 169, 170, 171, - 172, 635, 0, 0, 611, 633, 634, 165, 0, 0, - 175, 512, 511, 0, 667, 0, 422, 0, 0, 416, - 416, 401, 402, 553, 0, 0, 642, 643, 644, 645, - 0, 0, 0, 539, 451, 0, 540, 541, 510, 512, - 0, 0, 382, 465, 466, 471, 472, 491, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 586, - 587, 588, 591, 593, 514, 597, 590, 592, 594, 514, - 598, 1376, 1377, 1378, 0, 0, 705, 0, 0, 448, - 94, 1462, 710, 772, 731, 764, 772, 723, 730, 752, - 797, 798, 803, 811, 812, 813, 814, 815, 853, 0, - 0, 0, 0, 861, 0, 0, 999, 1108, 1110, 1003, - 0, 1007, 1011, 0, 0, 0, 1057, 1055, 1332, 0, - 0, 0, 1104, 0, 0, 1126, 1127, 0, 0, 0, - 1326, 0, 0, 1133, 0, 1286, 1287, 1141, 0, 0, - 0, 0, 0, 1147, 1148, 1149, 1150, 1151, 1152, 1153, - 1154, 1155, 1156, 1348, 0, 0, 0, 0, 0, 1162, - 1163, 1164, 1165, 1166, 0, 1168, 0, 1169, 0, 0, - 0, 0, 1176, 1177, 1179, 0, 0, 1182, 1183, 0, - 0, 1184, 0, 0, 0, 1188, 0, 0, 0, 0, - 1197, 1198, 1199, 1200, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1211, 1212, 1213, 0, 1215, 0, - 1086, 0, 0, 1086, 0, 1124, 891, 0, 1288, 1289, - 1290, 1291, 1292, 0, 0, 0, 0, 0, 0, 1248, - 1249, 1251, 0, 0, 1254, 0, 1256, 0, 1445, 838, - 841, 843, 929, 978, 979, 0, 0, 0, 0, 959, - 1470, 884, 885, 888, 937, 0, 1336, 0, 0, 916, - 989, 917, 0, 896, 53, 932, 0, 1402, 1401, 1414, - 1427, 380, 380, 374, 375, 381, 376, 378, 379, 1352, - 0, 1357, 0, 1438, 0, 0, 1430, 0, 0, 0, - 0, 0, 0, 0, 0, 962, 0, 0, 965, 0, - 0, 0, 0, 956, 927, 0, 928, 0, -2, 0, - 0, 92, 93, 0, 0, 0, 115, 116, 0, 0, - 122, 383, 384, 156, 165, 459, 180, 432, 0, 0, - 304, 370, 331, 332, 333, 0, 355, 0, 0, 0, - 0, 453, 128, 1366, 1365, 398, 398, 389, 0, 392, - 0, 0, 0, 1475, 358, 421, 0, 146, 0, 0, - 0, 0, 0, 152, 605, 0, 0, 612, 0, 0, - 0, 521, 0, 532, 533, 0, 639, -2, 701, 386, - 0, 400, 403, 944, 0, 0, 534, 0, 537, 538, - 452, 512, 543, 544, 558, 545, 494, 495, 492, 0, - 0, 1389, 1390, 1395, 1393, 1394, 133, 579, 581, 580, - 584, 0, 0, 0, 516, 0, 516, 577, 0, 448, - 1362, 0, 709, 449, 450, 775, 775, 848, 97, 0, - 851, 0, 0, 0, 0, 1004, 1008, 1293, 1319, 357, - 357, 1306, 357, 363, 1309, 357, 1311, 357, 1314, 357, - 1317, 1318, 0, 0, 0, 906, 0, 0, 1132, 1329, - 0, 0, 1142, 1143, 1144, 1145, 1146, 1323, 0, 0, - 0, 1161, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 144, 145, 0, 0, 0, 0, 0, 0, - 1259, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1081, 1085, 0, 1087, 1088, 0, 0, 1217, - 0, 0, 1229, 0, 1333, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 980, 985, 985, 985, 0, - 0, 0, 1457, 1458, 1337, 1338, 989, 1339, 918, 897, - 936, 1420, 0, 1413, 0, -2, 1422, 0, 0, 0, - 1428, 372, 373, 910, 80, 990, 83, 0, 1438, 1447, - 0, 1429, 1440, 1442, 0, 0, 0, 1434, 0, 989, - 920, 951, 953, 0, 948, 963, 964, 966, 0, 968, - 0, 970, 971, 931, 925, 0, 100, 0, 989, 989, - 99, 0, 976, 119, 120, 121, 458, 184, 189, 0, - 0, 0, 194, 0, 196, 0, 0, 0, 201, 202, - 398, 398, 433, 0, 301, 303, 0, 0, 187, 371, - 0, 371, 0, 362, 364, 0, 434, 454, 1363, 1364, - 0, 0, 391, 395, 396, 397, 0, 1464, 148, 0, - 0, 0, 608, 0, 636, 0, 0, 0, 0, 0, - 0, 176, 513, 668, 669, 670, 671, 672, 673, 674, - 675, 676, 0, 398, 0, 0, 0, 398, 398, 398, - 0, 693, 385, 0, 0, 664, 661, 535, 0, 225, - 226, 228, 0, 0, 0, 0, 0, 542, 931, 1380, - 1381, 1382, 0, 1392, 1396, 136, 0, 0, 0, 0, - 589, 595, 0, 515, 596, 706, 707, 708, 95, 716, - 722, 850, 870, 997, 1005, 1009, 0, 0, 0, 0, - 1320, 1304, 371, 1307, 1308, 1310, 1312, 1313, 1315, 1316, - 1045, 1046, 1050, 0, 1129, 0, 1131, 1327, 0, 1357, - 0, 0, 0, 1160, 0, 0, 0, 1171, 1170, 1172, - 0, 1174, 1175, 1180, 1181, 1185, 0, 1187, 1189, 1190, - 0, 0, 0, 1201, 1202, 1203, 1204, 1205, 1206, 1207, - 1208, 1209, 1210, 1214, 0, 1079, 1082, 1216, 1089, 1090, - 1095, 1219, 0, 0, 1125, 1231, 0, 1236, 0, 0, - 1242, 0, 1246, 0, 1252, 1253, 1255, 1257, 0, 0, - 0, 0, 0, 957, 938, 64, 1339, 1341, 0, 1407, - 1405, 1405, 1415, 1416, 0, 0, 1423, 0, 0, 0, - 0, 84, 0, 0, 0, 1443, 0, 0, 0, 0, - 101, 1348, 945, 952, 0, 0, 946, 0, 947, 967, - 969, 924, 0, 989, 989, 90, 91, 0, 190, 0, - 192, 218, 219, 0, 195, 197, 198, 199, 205, 206, - 207, 200, 0, 0, 300, 302, 0, 0, 345, 356, - 346, 0, 0, 1367, 1368, 1369, 1370, 1371, 1372, 1373, - 1374, 931, 149, 150, 151, 600, 0, 610, 0, 933, - 0, 603, 0, 524, 0, 0, 0, 398, 398, 398, - 0, 0, 0, 0, 678, 0, 0, 641, 0, 649, - 0, 0, 0, 229, 230, 0, 1391, 578, 0, 134, - 135, 0, 0, 583, 517, 518, 1043, 0, 0, 0, - 1044, 1305, 0, 0, 0, 0, 1324, 0, 0, 0, - 0, 1167, 0, 0, 0, 1193, 0, 0, 0, 630, - 631, 0, 1260, 1084, 1348, 0, 1086, 1096, 1097, 0, - 1086, 1230, 0, 0, 0, 0, 0, 0, 0, 986, - 0, 0, 0, 0, 977, 1341, 1346, 0, 0, 1410, - 0, 1403, 1406, 1404, 1417, 0, 0, 1424, 0, 1426, - 0, 1448, 1449, 1441, 0, 1433, 1436, 1432, 1435, 1357, - 949, 0, 954, 0, 1348, 89, 0, 193, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 203, 204, 0, 0, 360, 365, 0, 0, 0, - 601, 0, 934, 613, 604, 0, 691, 0, 695, 0, - 0, 0, 698, 699, 700, 677, 0, 681, 426, 665, - 662, 663, 536, 0, 137, 138, 0, 0, 0, 1294, - 0, 1297, 1128, 1130, 0, 1157, 1158, 1159, 1302, 1303, - 1173, 1186, 1191, 0, 1194, 0, 0, 1195, 0, 632, - 1075, 0, 0, 1093, 1094, 0, 1232, 0, 1237, 1238, - 0, 1243, 0, 1247, 1258, 0, 982, 939, 940, 987, - 988, 0, 0, 930, 1346, 82, 1347, 1344, 0, 1342, - 1340, 1399, 0, 1408, 1409, 1418, 1419, 1425, 0, 1431, - 0, 87, 0, 0, 0, 1357, 191, 0, 210, 0, - 609, 0, 612, 602, 689, 690, 0, 702, 694, 696, - 697, 679, -2, 1383, 0, 0, 0, 585, 1295, 0, - 0, 1196, 0, 628, 629, 1083, 1076, 0, 1061, 1062, - 1080, 1218, 1220, 0, 0, 0, 0, 981, 983, 984, - 81, 0, 1343, 1101, 0, 1411, 1412, 1439, 1437, 950, - 957, 0, 88, 439, 432, 1383, 0, 0, 0, 682, - 683, 684, 685, 686, 687, 688, 575, 1385, 139, 140, - 505, 506, 507, 133, 0, 1134, 1192, 1077, 0, 0, - 0, 0, 1073, 1074, 0, 1233, 0, 1239, 0, 1244, - 0, 941, 942, 1345, 0, 0, 614, 0, 616, 0, - -2, 427, 440, 0, 185, 211, 212, 0, 0, 215, - 216, 217, 208, 209, 129, 0, 0, 703, 0, 1386, - 1387, 136, 0, 0, 1068, 1069, 1070, 1071, 1072, 0, - 0, 0, 1102, 1081, 615, 0, 0, 382, 0, 625, - 428, 429, 0, 435, 436, 437, 438, 213, 214, 637, - 0, 0, 582, 1296, 0, 1234, 0, 1240, 0, 1245, - 0, 617, 618, 626, 0, 430, 0, 431, 0, 0, - 0, 606, 0, 637, 1384, 1078, 0, 0, 1100, 0, - 627, 623, 441, 443, 444, 0, 0, 442, 638, 607, - 1235, 1241, 0, 445, 446, 447, 619, 620, 621, 622, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 72, 74, 75, 877, 877, 877, 0, 877, 0, + 0, 877, -2, -2, 877, 1608, 0, 877, 0, 0, + 0, -2, 792, 798, 0, 807, -2, 0, 0, 877, + 877, 2232, 2232, 872, 0, 0, 0, 0, 0, 877, + 877, 877, 877, 1613, 1474, 52, 877, 0, 87, 88, + 827, 828, 829, 67, 0, 2230, 878, 1, 3, 73, + 77, 0, 0, 0, 60, 1483, 0, 80, 0, 0, + 881, 0, 0, 1591, 877, 877, 0, 128, 129, 0, + 0, 0, -2, 132, -2, 161, 162, 163, 0, 168, + 603, 526, 578, 524, 563, -2, 512, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 529, + 401, 401, 0, 0, -2, 512, 512, 512, 1593, 0, + 0, 0, 560, 463, 401, 401, 401, 0, 401, 401, + 401, 401, 0, 0, 401, 401, 401, 401, 401, 401, + 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, + 401, 1501, 167, 1609, 1606, 1607, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, + 1780, 1781, 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, + 1790, 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, + 1800, 1801, 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, + 1820, 1821, 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, + 1830, 1831, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, + 1840, 1841, 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, + 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, + 1880, 1881, 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, + 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, + 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, + 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, + 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, + 1930, 1931, 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, + 1940, 1941, 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, + 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, + 1960, 1961, 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, + 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, + 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, + 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, + 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, + 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, + 2040, 2041, 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, + 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, + 2060, 2061, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, + 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, + 2080, 2081, 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, + 2090, 2091, 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, + 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, + 2110, 2111, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, + 2120, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, + 2130, 2131, 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, + 2140, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, + 2150, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, + 2160, 2161, 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, + 2170, 2171, 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, + 2180, 2181, 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, + 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, + 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, + 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, + 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, + 0, 1585, 0, 716, 980, 0, 0, 781, 781, 0, + 781, 781, 781, 781, 0, 0, 0, 730, 0, 0, + 0, 0, 778, 0, 746, 747, 0, 778, 0, 753, + 784, 0, 0, 759, 781, 781, 762, 2233, 0, 2233, + 2233, 1576, 0, 775, 773, 787, 788, 42, 791, 794, + 795, 796, 797, 800, 0, 811, 814, 1602, 1603, 0, + 816, 823, 840, 841, 0, 873, 874, 47, 1130, 0, + 1002, 0, 1008, -2, 1019, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 0, 0, 0, 0, 1049, 1050, 0, + 0, 0, 0, 0, 1111, 0, 0, 0, 0, 1447, + 0, 0, 1409, 1409, 1145, 1409, 1409, 1411, 1411, 1411, + 1818, 1956, 1964, 2140, 1779, 1785, 1786, 1787, 2086, 2087, + 2088, 2089, 2177, 2178, 2182, 1880, 1774, 2153, 2154, 0, + 2229, 1917, 1925, 1926, 1950, 2050, 2163, 1797, 1945, 2014, + 1877, 1899, 1900, 2032, 2033, 1921, 1922, 1903, 2092, 2094, + 2110, 2111, 2096, 2098, 2107, 2113, 2118, 2097, 2109, 2114, + 2127, 2131, 2134, 2135, 2136, 2104, 2102, 2115, 2119, 2121, + 2123, 2129, 2132, 2105, 2103, 2116, 2120, 2122, 2124, 2130, + 2133, 2091, 2095, 2099, 2108, 2126, 2106, 2125, 2100, 2112, + 2117, 2128, 2101, 2093, 1915, 1918, 1906, 1907, 1909, 1911, + 1916, 1923, 1929, 1908, 1928, 1927, 0, 1904, 1905, 1910, + 1920, 1924, 1912, 1913, 1914, 1919, 1930, 1970, 1969, 1968, + 2013, 1941, 2012, 0, 0, 0, 0, 0, 1769, 1823, + 1824, 2137, 1331, 1332, 1333, 1334, 0, 0, 0, 0, + 0, 0, 0, 293, 294, 1460, 1461, 46, 1129, 1572, + 1411, 1411, 1411, 1411, 1411, 1411, 1071, 1072, 1073, 1074, + 1075, 1099, 1100, 1106, 1107, 2027, 2028, 2029, 2030, 1861, + 2172, 1869, 1870, 2009, 2010, 1882, 1883, 2203, 2204, -2, + -2, -2, 234, 235, 236, 237, 238, 239, 240, 241, + 0, 1822, 2151, 2152, 230, 0, 0, 298, 299, 295, + 296, 297, 1113, 1114, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 2232, 0, 850, 0, + 0, 0, 0, 0, 0, 1614, 1615, 1483, 0, 1475, + 1474, 65, 0, 877, -2, 0, 0, 0, 0, 49, + 0, 54, 937, 880, 79, 78, 1523, 0, 0, 0, + 61, 1484, 69, 71, 1485, 0, 882, 883, 0, 913, + 917, 0, 0, 0, 1592, 1591, 1591, 104, 0, 0, + 105, 125, 126, 127, 0, 0, 111, 112, 1578, 1579, + 45, 0, 0, 179, 180, 0, 43, 428, 0, 175, + 0, 421, 360, 0, 1501, 0, 0, 0, 0, 0, + 877, 0, 1586, 156, 157, 164, 165, 166, 401, 401, + 401, 575, 0, 0, 167, 167, 533, 534, 535, 0, + 0, -2, 426, 0, 513, 0, 0, 415, 415, 419, + 417, 418, 0, 0, 0, 0, 0, 0, 0, 0, + 552, 0, 553, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 664, 0, 402, 0, 573, 574, 464, 0, + 0, 0, 0, 0, 0, 0, 0, 1594, 1595, 0, + 550, 551, 0, 0, 0, 401, 401, 0, 0, 0, + 0, 401, 401, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 155, 1514, 0, 0, 0, -2, 0, 708, 0, + 0, 0, 1587, 1587, 0, 715, 0, 717, 0, 720, + 0, 0, 721, 0, 778, 778, 776, 777, 723, 724, + 725, 726, 781, 0, 0, 410, 411, 412, 778, 781, + 0, 781, 781, 781, 781, 778, 778, 778, 781, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2233, 784, + 781, 0, 754, 0, 755, 756, 757, 760, 761, 763, + 2234, 2235, 1604, 1605, 1616, 1617, 1618, 1619, 1620, 1621, + 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, + 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, + 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, + 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, + 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, + 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, + 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, + 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, + 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, + 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, + 1762, 1763, 1764, 1765, 2233, 2233, 767, 771, 1577, 793, + 799, 801, 802, 0, 0, 812, 815, 834, 51, 1868, + 822, 51, 824, 825, 826, 852, 853, 858, 0, 0, + 0, 0, 864, 865, 866, 0, 0, 869, 870, 871, + 0, 0, 0, 0, 0, 1000, 0, 0, 1119, 1120, + 1121, 1122, 1123, 1124, 1125, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1020, 1021, 0, 0, 0, 1045, 1046, + 1047, 1048, 1051, 0, 1062, 0, 1064, 1456, -2, 0, + 0, 0, 1056, 1057, 0, 0, 0, 0, 0, 0, + 0, 1448, 0, 0, 1143, 0, 1144, 1146, 1147, 1148, + 0, 1149, 1150, 887, 887, 887, 887, 887, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 887, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1597, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 143, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 897, 0, 0, 897, 897, + 0, 0, 222, 223, 224, 225, 226, 227, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 242, 243, 244, 245, 246, 247, 300, 248, + 249, 250, 1129, 0, 0, 0, 48, 842, 843, 0, + 963, 1597, 0, 0, 893, 0, 1612, 59, 68, 70, + 1483, 63, 1483, 0, 899, 0, 0, -2, -2, 900, + 906, 907, 908, 909, 910, 56, 2231, 57, 0, 76, + 0, 50, 0, 0, 0, 0, 374, 1526, 0, 0, + 1476, 1477, 1480, 0, 914, 1962, 918, 0, 920, 921, + 0, 0, 102, 0, 979, 0, 0, 0, 113, 0, + 115, 116, 0, 0, 0, 385, 1580, 1581, 1582, -2, + 408, 0, 385, 369, 308, 309, 310, 360, 312, 360, + 360, 360, 360, 374, 374, 374, 374, 343, 344, 345, + 346, 347, 0, 0, 329, 360, 360, 360, 360, 350, + 351, 352, 353, 354, 355, 356, 357, 313, 314, 315, + 316, 317, 318, 319, 320, 321, 362, 362, 362, 362, + 362, 366, 366, 0, 44, 0, 389, 0, 1480, 0, + 0, 1514, 1589, 1599, 0, 0, 0, 1589, 134, 0, + 0, 0, 576, 614, 527, 564, 577, 0, 530, 531, + -2, 0, 0, 512, 0, 514, 0, 409, 0, -2, + 0, 419, 0, 415, 419, 416, 419, 407, 420, 554, + 555, 556, 0, 558, 559, 644, 949, 0, 0, 0, + 0, 0, 650, 651, 652, 0, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 663, 565, 566, 567, 568, + 569, 570, 571, 572, 0, 0, 0, 0, 514, 0, + 561, 0, 0, 465, 466, 467, 0, 0, 470, 471, + 472, 473, 0, 0, 476, 477, 478, 966, 967, 479, + 480, 505, 506, 507, 481, 482, 483, 484, 485, 486, + 487, 499, 500, 501, 502, 503, 504, 488, 489, 490, + 491, 492, 493, 496, 0, 149, 1505, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1587, 0, 0, 0, 0, 896, 981, 1610, + 1611, 0, 0, 782, 783, 0, 413, 414, 781, 781, + 727, 768, 0, 781, 731, 769, 732, 734, 733, 735, + 748, 749, 781, 738, 779, 780, 739, 740, 741, 742, + 743, 744, 745, 764, 750, 751, 752, 785, 0, 789, + 790, 765, 766, 0, 0, 805, 806, 0, 813, 837, + 835, 836, 838, 830, 831, 832, 833, 0, 839, 0, + 0, 855, 98, 860, 861, 862, 863, 875, 868, 1131, + 997, 998, 999, 0, 1001, 1005, 0, 1115, 1117, 1007, + 1003, 1009, 1126, 1127, 1128, 0, 0, 0, 0, 0, + 1013, 1017, 1022, 1023, 1024, 1025, 1026, 0, 1027, 0, + 1030, 1031, 1032, 1033, 1034, 1035, 1041, 1424, 1425, 1426, + 1060, 301, 302, 0, 1061, 0, 0, 0, 0, 0, + 0, 0, 0, 1371, 1372, 1373, 1374, 1375, 1376, 1377, + 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1130, 0, 911, 0, 0, 1454, 1451, + 0, 0, 0, 1410, 1412, 0, 0, 0, 888, 889, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1391, 1392, 1393, + 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1405, 1406, 1407, 1408, 0, 0, 1427, 0, 0, + 0, 0, 0, 1447, 0, 1066, 1067, 1068, 0, 0, + 0, 0, 0, 0, 1189, 0, 0, 0, 0, 1598, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 144, 145, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1335, 1336, 1337, 1338, 41, 0, 0, 0, 0, + 0, 0, 0, 898, 1458, 0, -2, -2, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1360, 0, 0, 0, 0, 0, 0, 1570, 0, + 0, 845, 846, 848, 0, 983, 0, 964, 0, 0, + 851, 0, 892, 0, 895, 62, 64, 904, 905, 0, + 922, 901, 58, 53, 0, 0, 941, 1524, 374, 1546, + 0, 383, 383, 380, 1486, 1487, 0, 1479, 1481, 1482, + 81, 919, 915, 0, 995, 0, 0, 978, 0, 925, + 927, 928, 929, 961, 0, 932, 933, 0, 0, 0, + 0, 0, 100, 980, 106, 0, 114, 0, 0, 119, + 120, 107, 108, 109, 110, 0, 603, -2, 460, 181, + 183, 184, 185, 176, -2, 372, 370, 371, 311, 374, + 374, 337, 338, 339, 340, 341, 342, 0, 0, 330, + 331, 332, 333, 322, 0, 323, 324, 325, 364, 0, + 326, 327, 0, 328, 427, 0, 1488, 390, 391, 393, + 401, 0, 396, 397, 0, 401, 401, 0, 422, 423, + 0, 1480, 1505, 0, 0, 0, 1600, 1599, 1599, 1599, + 0, 169, 170, 171, 172, 173, 174, 639, 0, 0, + 615, 637, 638, 167, 0, 0, 177, 516, 515, 0, + 671, 0, 425, 0, 0, 419, 419, 404, 405, 557, + 0, 0, 646, 647, 648, 649, 0, 0, 0, 543, + 454, 0, 544, 545, 514, 516, 0, 0, 385, 468, + 469, 474, 475, 494, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 590, 591, 592, 595, 597, + 518, 601, 594, 596, 598, 518, 602, 1502, 1503, 1504, + 0, 0, 709, 0, 0, 451, 96, 1588, 714, 718, + 719, 778, 737, 770, 778, 729, 736, 758, 803, 804, + 809, 817, 818, 819, 820, 821, 859, 0, 0, 0, + 0, 867, 0, 0, 1006, 1116, 1118, 1010, 0, 1014, + 1018, 0, 0, 0, 0, 0, 1065, 1063, 1458, 0, + 0, 0, 1112, 0, 0, 0, 1134, 1135, 0, 0, + 0, 1452, 0, 0, 1141, 0, 1413, 1151, 0, 0, + 0, 0, 0, 1157, 1158, 1159, 1160, 1161, 1162, 1163, + 1164, 1165, 1166, 1474, 1168, 0, 0, 0, 0, 0, + 1173, 1174, 1175, 1176, 1177, 0, 1179, 0, 1180, 0, + 0, 0, 0, 1187, 1188, 1190, 0, 0, 1193, 1194, + 0, 1196, 0, 1198, 1199, 1200, 1201, 1202, 1203, 0, + 1205, 0, 1207, 1208, 1209, 0, 1211, 0, 1213, 1214, + 0, 1216, 0, 1218, 0, 1221, 0, 1224, 0, 1227, + 0, 1230, 0, 1233, 0, 1236, 0, 1239, 0, 1242, + 0, 1245, 0, 1248, 0, 1251, 0, 1254, 0, 1257, + 0, 1260, 0, 1263, 0, 1266, 1267, 1268, 0, 1270, + 0, 1272, 0, 1275, 1276, 0, 1278, 0, 1281, 0, + 1284, 0, 0, 1285, 0, 0, 0, 1289, 0, 0, + 0, 0, 1298, 1299, 1300, 1301, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1312, 1313, 1314, 1315, + 1316, 1317, 0, 1319, 0, 1094, 0, 0, 1094, 0, + 0, 0, 0, 0, 1132, 897, 0, 1414, 1415, 1416, + 1417, 1418, 0, 0, 0, 0, 0, 0, 1358, 1359, + 1361, 0, 0, 1364, 0, 1366, 0, 1571, 844, 847, + 849, 935, 984, 985, 0, 0, 0, 0, 965, 1596, + 890, 891, 894, 943, 0, 1462, 0, 0, 922, 995, + 923, 0, 902, 55, 938, 0, 1528, 1527, 1540, 1553, + 383, 383, 377, 378, 384, 379, 381, 382, 1478, 0, + 1483, 0, 1564, 0, 0, 1556, 0, 0, 0, 0, + 0, 0, 0, 0, 968, 0, 0, 971, 0, 0, + 0, 0, 962, 933, 0, 934, 0, -2, 0, 0, + 94, 95, 0, 0, 0, 117, 118, 0, 0, 124, + 386, 387, 158, 167, 462, 182, 435, 0, 0, 307, + 373, 334, 335, 336, 0, 358, 0, 0, 0, 0, + 456, 130, 1492, 1491, 401, 401, 392, 0, 395, 0, + 0, 0, 1601, 361, 424, 0, 148, 0, 0, 0, + 0, 0, 154, 609, 0, 0, 616, 0, 0, 0, + 525, 0, 536, 537, 0, 643, -2, 705, 389, 0, + 403, 406, 950, 0, 0, 538, 0, 541, 542, 455, + 516, 547, 548, 562, 549, 497, 498, 495, 0, 0, + 1515, 1516, 1521, 1519, 1520, 135, 583, 585, 584, 588, + 0, 0, 0, 520, 0, 520, 581, 0, 451, 1488, + 0, 713, 452, 453, 781, 781, 854, 99, 0, 857, + 0, 0, 0, 0, 1011, 1015, 1028, 1029, 1419, 1445, + 360, 360, 1432, 360, 366, 1435, 360, 1437, 360, 1440, + 360, 1443, 1444, 0, 0, 1058, 0, 912, 0, 0, + 1140, 1455, 0, 0, 1152, 1153, 1154, 1155, 1156, 1449, + 0, 0, 0, 1172, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 146, 147, 0, 0, 0, 0, + 0, 0, 1369, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1089, 1093, 0, 1095, 1096, 0, + 0, 1321, 0, 0, 1339, 0, 0, 0, 0, 0, + 0, 0, 1459, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 986, 991, 991, 991, 0, 0, 0, + 1583, 1584, 1463, 1464, 995, 1465, 924, 903, 942, 1546, + 0, 1539, 0, -2, 1548, 0, 0, 0, 1554, 375, + 376, 916, 82, 996, 85, 0, 1564, 1573, 0, 1555, + 1566, 1568, 0, 0, 0, 1560, 0, 995, 926, 957, + 959, 0, 954, 969, 970, 972, 0, 974, 0, 976, + 977, 937, 931, 0, 102, 0, 995, 995, 101, 0, + 982, 121, 122, 123, 461, 186, 191, 0, 0, 0, + 196, 0, 198, 0, 0, 0, 203, 204, 401, 401, + 436, 0, 304, 306, 0, 0, 189, 374, 0, 374, + 0, 365, 367, 0, 437, 457, 1489, 1490, 0, 0, + 394, 398, 399, 400, 0, 1590, 150, 0, 0, 0, + 612, 0, 640, 0, 0, 0, 0, 0, 0, 178, + 517, 672, 673, 674, 675, 676, 677, 678, 679, 680, + 0, 401, 0, 0, 0, 401, 401, 401, 0, 697, + 388, 0, 0, 668, 665, 539, 0, 220, 221, 228, + 229, 231, 0, 0, 0, 0, 0, 546, 937, 1506, + 1507, 1508, 0, 1518, 1522, 138, 0, 0, 0, 0, + 593, 599, 0, 519, 600, 710, 711, 712, 97, 722, + 728, 856, 876, 1004, 1012, 1016, 0, 0, 0, 0, + 1446, 1430, 374, 1433, 1434, 1436, 1438, 1439, 1441, 1442, + 1054, 1055, 1059, 0, 1137, 0, 1139, 1453, 0, 1483, + 0, 0, 0, 1171, 0, 0, 0, 1182, 1181, 1183, + 0, 1185, 1186, 1191, 1192, 1195, 1197, 1204, 1206, 1210, + 1212, 1215, 1217, 1219, 0, 1222, 0, 1225, 0, 1228, + 0, 1231, 0, 1234, 0, 1237, 0, 1240, 0, 1243, + 0, 1246, 0, 1249, 0, 1252, 0, 1255, 0, 1258, + 0, 1261, 0, 1264, 0, 1269, 1271, 0, 1274, 1277, + 1279, 0, 1282, 0, 1286, 0, 1288, 1290, 1291, 0, + 0, 0, 1302, 1303, 1304, 1305, 1306, 1307, 1308, 1309, + 1310, 1311, 1318, 0, 1087, 1090, 1320, 1097, 1098, 1103, + 1323, 0, 0, 0, 1326, 0, 0, 0, 1330, 1133, + 1341, 0, 1346, 0, 0, 1352, 0, 1356, 0, 1362, + 1363, 1365, 1367, 0, 0, 0, 0, 0, 963, 944, + 66, 1465, 1467, 0, 1533, 1531, 1531, 1541, 1542, 0, + 0, 1549, 0, 0, 0, 0, 86, 0, 0, 0, + 1569, 0, 0, 0, 0, 103, 1474, 951, 958, 0, + 0, 952, 0, 953, 973, 975, 930, 0, 995, 995, + 92, 93, 0, 192, 0, 194, 0, 197, 199, 200, + 201, 207, 208, 209, 202, 0, 0, 303, 305, 0, + 0, 348, 359, 349, 0, 0, 1493, 1494, 1495, 1496, + 1497, 1498, 1499, 1500, 937, 151, 152, 153, 604, 0, + 614, 0, 939, 0, 607, 0, 528, 0, 0, 0, + 401, 401, 401, 0, 0, 0, 0, 682, 0, 0, + 645, 0, 653, 0, 0, 0, 232, 233, 0, 1517, + 582, 0, 136, 137, 0, 0, 587, 521, 522, 1052, + 0, 0, 0, 1053, 1431, 0, 0, 0, 0, 1450, + 0, 0, 0, 0, 1178, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1294, 0, 0, + 0, 634, 635, 0, 1370, 1092, 1474, 0, 1094, 1104, + 1105, 0, 1094, 1340, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 992, 0, 0, 0, 0, + 983, 1467, 1472, 0, 0, 1536, 0, 1529, 1532, 1530, + 1543, 0, 0, 1550, 0, 1552, 0, 1574, 1575, 1567, + 0, 1559, 1562, 1558, 1561, 1483, 955, 0, 960, 0, + 1474, 91, 0, 195, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 205, 206, 0, 0, 363, 368, 0, + 0, 0, 605, 0, 940, 617, 608, 0, 695, 0, + 699, 0, 0, 0, 702, 703, 704, 681, 0, 685, + 429, 669, 666, 667, 540, 0, 139, 140, 0, 0, + 0, 1420, 0, 1423, 1136, 1138, 0, 1167, 1169, 1170, + 1428, 1429, 1184, 1220, 1223, 1226, 1229, 1232, 1235, 1238, + 1241, 1244, 1247, 1250, 1253, 1256, 1259, 1262, 1265, 1273, + 1280, 1283, 1287, 1292, 0, 1295, 0, 0, 1296, 0, + 636, 1083, 0, 0, 1101, 1102, 0, 1325, 1327, 1328, + 1329, 1342, 0, 1347, 1348, 0, 1353, 0, 1357, 1368, + 0, 988, 945, 946, 993, 994, 0, 0, 936, 1472, + 84, 1473, 1470, 0, 1468, 1466, 1525, 0, 1534, 1535, + 1544, 1545, 1551, 0, 1557, 0, 89, 0, 0, 0, + 1483, 193, 0, 212, 0, 613, 0, 616, 606, 693, + 694, 0, 706, 698, 700, 701, 683, -2, 1509, 0, + 0, 0, 589, 1421, 0, 0, 1297, 0, 632, 633, + 1091, 1084, 0, 1069, 1070, 1088, 1322, 1324, 0, 0, + 0, 0, 987, 989, 990, 83, 0, 1469, 1109, 0, + 1537, 1538, 1565, 1563, 956, 963, 0, 90, 442, 435, + 1509, 0, 0, 0, 686, 687, 688, 689, 690, 691, + 692, 579, 1511, 141, 142, 0, 509, 510, 511, 135, + 0, 1142, 1293, 1085, 0, 0, 0, 0, 0, 1343, + 0, 1349, 0, 1354, 0, 947, 948, 1471, 0, 0, + 618, 0, 620, 0, -2, 430, 443, 0, 187, 213, + 214, 0, 0, 217, 218, 219, 210, 211, 131, 0, + 0, 707, 0, 1512, 1513, 0, 138, 0, 0, 1076, + 1077, 1078, 1079, 1081, 0, 0, 0, 0, 1110, 1089, + 619, 0, 0, 385, 0, 629, 431, 432, 0, 438, + 439, 440, 441, 215, 216, 641, 0, 0, 508, 586, + 1422, 0, 0, 1344, 0, 1350, 0, 1355, 0, 621, + 622, 630, 0, 433, 0, 434, 0, 0, 0, 610, + 0, 641, 1510, 1086, 1080, 1082, 0, 0, 1108, 0, + 631, 627, 444, 446, 447, 0, 0, 445, 642, 611, + 1345, 1351, 0, 448, 449, 450, 623, 624, 625, 626, } var yyTok1 = [...]int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 144, 3, 3, 3, 171, 163, 3, - 87, 89, 168, 166, 88, 167, 221, 169, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 665, - 152, 151, 153, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 145, 3, 3, 3, 173, 165, 3, + 87, 89, 170, 168, 88, 169, 223, 171, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 732, + 153, 152, 154, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 173, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 175, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 140, 3, 174, + 3, 3, 3, 3, 141, 3, 176, } var yyTok2 = [...]int{ @@ -8536,14 +9641,14 @@ var yyTok2 = [...]int{ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 141, 142, 143, 145, 146, - 147, 148, 149, 150, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 164, 165, 170, 172, 175, 176, 177, + 135, 136, 137, 138, 139, 140, 142, 143, 144, 146, + 147, 148, 149, 150, 151, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 166, 167, 172, 174, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 222, 223, 224, 225, 226, 227, 228, + 218, 219, 220, 221, 222, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, @@ -8630,7 +9735,20 @@ var yyTok3 = [...]int{ 57975, 650, 57976, 651, 57977, 652, 57978, 653, 57979, 654, 57980, 655, 57981, 656, 57982, 657, 57983, 658, 57984, 659, 57985, 660, 57986, 661, 57987, 662, 57988, 663, 57989, 664, - 0, + 57990, 665, 57991, 666, 57992, 667, 57993, 668, 57994, 669, + 57995, 670, 57996, 671, 57997, 672, 57998, 673, 57999, 674, + 58000, 675, 58001, 676, 58002, 677, 58003, 678, 58004, 679, + 58005, 680, 58006, 681, 58007, 682, 58008, 683, 58009, 684, + 58010, 685, 58011, 686, 58012, 687, 58013, 688, 58014, 689, + 58015, 690, 58016, 691, 58017, 692, 58018, 693, 58019, 694, + 58020, 695, 58021, 696, 58022, 697, 58023, 698, 58024, 699, + 58025, 700, 58026, 701, 58027, 702, 58028, 703, 58029, 704, + 58030, 705, 58031, 706, 58032, 707, 58033, 708, 58034, 709, + 58035, 710, 58036, 711, 58037, 712, 58038, 713, 58039, 714, + 58040, 715, 58041, 716, 58042, 717, 58043, 718, 58044, 719, + 58045, 720, 58046, 721, 58047, 722, 58048, 723, 58049, 724, + 58050, 725, 58051, 726, 58052, 727, 58053, 728, 58054, 729, + 58055, 730, 58056, 731, 0, } var yyErrorMessages = [...]struct { @@ -8639,10 +9757,6 @@ var yyErrorMessages = [...]struct { msg string }{} -//line yaccpar:1 - -/* parser for yacc output */ - func yyIaddr(v any) __yyunsafe__.Pointer { type h struct { t __yyunsafe__.Pointer @@ -8651,6 +9765,10 @@ func yyIaddr(v any) __yyunsafe__.Pointer { return (*h)(__yyunsafe__.Pointer(&v)).p } +//line yaccpar:1 + +/* parser for yacc output */ + var ( yyDebug = 0 yyErrorVerbose = false @@ -8980,7 +10098,7 @@ yydefault: case 1: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:602 +//line sql.y:599 { stmt := yyDollar[2].statementUnion() // If the statement is empty and we have comments @@ -8994,199 +10112,199 @@ yydefault: } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:615 +//line sql.y:612 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:616 +//line sql.y:613 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:620 +//line sql.y:617 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 38: + case 40: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:657 +//line sql.y:656 { setParseTree(yylex, nil) } - case 39: + case 41: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:663 +//line sql.y:662 { yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt) } yyVAL.union = yyLOCAL - case 40: + case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:669 +//line sql.y:668 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 41: + case 43: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:674 +//line sql.y:673 { yyVAL.identifierCI = NewIdentifierCI("") } - case 42: + case 44: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:678 +//line sql.y:677 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 43: + case 45: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:684 +//line sql.y:683 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), SingleAt) } yyVAL.union = yyLOCAL - case 44: + case 46: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:688 +//line sql.y:687 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), DoubleAt) } yyVAL.union = yyLOCAL - case 45: + case 47: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:694 +//line sql.y:693 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 46: + case 48: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:700 +//line sql.y:699 { yyLOCAL = &Load{} } yyVAL.union = yyLOCAL - case 47: + case 49: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *With -//line sql.y:706 +//line sql.y:705 { yyLOCAL = &With{ctes: yyDollar[2].ctesUnion(), Recursive: false} } yyVAL.union = yyLOCAL - case 48: + case 50: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *With -//line sql.y:710 +//line sql.y:709 { yyLOCAL = &With{ctes: yyDollar[3].ctesUnion(), Recursive: true} } yyVAL.union = yyLOCAL - case 49: + case 51: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *With -//line sql.y:715 +//line sql.y:714 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 50: + case 52: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *With -//line sql.y:719 +//line sql.y:718 { yyLOCAL = yyDollar[1].withUnion() } yyVAL.union = yyLOCAL - case 51: + case 53: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:725 +//line sql.y:724 { yySLICE := (*[]*CommonTableExpr)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].cteUnion()) } - case 52: + case 54: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*CommonTableExpr -//line sql.y:729 +//line sql.y:728 { yyLOCAL = []*CommonTableExpr{yyDollar[1].cteUnion()} } yyVAL.union = yyLOCAL - case 53: + case 55: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *CommonTableExpr -//line sql.y:735 +//line sql.y:734 { yyLOCAL = &CommonTableExpr{ID: yyDollar[1].identifierCS, Columns: yyDollar[2].columnsUnion(), Subquery: yyDollar[4].subqueryUnion()} } yyVAL.union = yyLOCAL - case 54: + case 56: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:741 +//line sql.y:740 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 55: + case 57: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:745 +//line sql.y:744 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 56: + case 58: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:749 +//line sql.y:748 { setLockInSelect(yyDollar[2].selStmtUnion(), yyDollar[3].lockUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 57: + case 59: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:772 +//line sql.y:771 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 58: + case 60: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:778 +//line sql.y:777 { yyDollar[1].selStmtUnion().SetLimit(yyDollar[2].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 59: + case 61: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:783 +//line sql.y:782 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 60: + case 62: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:789 +//line sql.y:788 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -9194,20 +10312,20 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 61: + case 63: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:796 +//line sql.y:795 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 62: + case 64: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:802 +//line sql.y:801 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -9215,191 +10333,191 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 63: + case 65: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:809 +//line sql.y:808 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) } - case 64: + case 66: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:813 +//line sql.y:812 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, nil, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/, nil) } yyVAL.union = yyLOCAL - case 65: + case 67: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:819 +//line sql.y:818 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 66: + case 68: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:823 +//line sql.y:822 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 67: + case 69: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:827 +//line sql.y:826 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 68: + case 70: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:831 +//line sql.y:830 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 69: + case 71: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:835 +//line sql.y:834 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 70: + case 72: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:841 +//line sql.y:840 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 71: + case 73: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:845 +//line sql.y:844 { setLockInSelect(yyDollar[1].selStmtUnion(), yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 72: + case 74: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:850 +//line sql.y:849 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 73: + case 75: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:854 +//line sql.y:853 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 74: + case 76: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:860 +//line sql.y:859 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 75: + case 77: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:864 +//line sql.y:863 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 76: + case 78: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:869 +//line sql.y:868 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[3].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 77: + case 79: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:875 +//line sql.y:874 { yyDollar[1].selStmtUnion().SetInto(yyDollar[3].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 78: + case 80: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:881 +//line sql.y:880 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 79: + case 81: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:888 +//line sql.y:887 { yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName} } yyVAL.union = yyLOCAL - case 80: + case 82: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:894 +//line sql.y:893 { yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 81: + case 83: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:902 +//line sql.y:901 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), GroupBy(yyDollar[8].exprsUnion()), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 82: + case 84: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:906 +//line sql.y:905 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 83: + case 85: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:912 +//line sql.y:911 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].insUnion() ins.Action = yyDollar[1].insertActionUnion() ins.Comments = Comments(yyDollar[2].strs).Parsed() ins.Ignore = yyDollar[3].ignoreUnion() - ins.Table = yyDollar[4].tableName + ins.Table = getAliasedTableExprFromTableName(yyDollar[4].tableName) ins.Partitions = yyDollar[5].partitionsUnion() ins.OnDup = OnDup(yyDollar[7].updateExprsUnion()) yyLOCAL = ins } yyVAL.union = yyLOCAL - case 84: + case 86: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:924 +//line sql.y:923 { cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion())) vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion())) @@ -9407,332 +10525,332 @@ yydefault: cols = append(cols, updateList.Name.Name) vals = append(vals, updateList.Expr) } - yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Ignore: yyDollar[3].ignoreUnion(), Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())} + yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Ignore: yyDollar[3].ignoreUnion(), Table: getAliasedTableExprFromTableName(yyDollar[4].tableName), Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())} } yyVAL.union = yyLOCAL - case 85: + case 87: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:936 +//line sql.y:935 { yyLOCAL = InsertAct } yyVAL.union = yyLOCAL - case 86: + case 88: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:940 +//line sql.y:939 { yyLOCAL = ReplaceAct } yyVAL.union = yyLOCAL - case 87: + case 89: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:946 +//line sql.y:945 { yyLOCAL = &Update{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: yyDollar[5].tableExprsUnion(), Exprs: yyDollar[7].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion()), OrderBy: yyDollar[9].orderByUnion(), Limit: yyDollar[10].limitUnion()} } yyVAL.union = yyLOCAL - case 88: + case 90: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:952 +//line sql.y:951 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].tableName, As: yyDollar[7].identifierCS}}, Partitions: yyDollar[8].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion()), OrderBy: yyDollar[10].orderByUnion(), Limit: yyDollar[11].limitUnion()} } yyVAL.union = yyLOCAL - case 89: + case 91: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Statement -//line sql.y:956 +//line sql.y:955 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[6].tableNamesUnion(), TableExprs: yyDollar[8].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion())} } yyVAL.union = yyLOCAL - case 90: + case 92: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:960 +//line sql.y:959 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 91: + case 93: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:964 +//line sql.y:963 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 92: + case 94: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:969 +//line sql.y:968 { } - case 93: + case 95: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:970 +//line sql.y:969 { } - case 94: + case 96: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:974 +//line sql.y:973 { - yyLOCAL = TableNames{yyDollar[1].tableName.ToViewName()} + yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 95: + case 97: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:978 +//line sql.y:977 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) - *yySLICE = append(*yySLICE, yyDollar[3].tableName.ToViewName()) + *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 96: + case 98: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:984 +//line sql.y:983 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 97: + case 99: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:988 +//line sql.y:987 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 98: + case 100: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:994 +//line sql.y:993 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 99: + case 101: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:998 +//line sql.y:997 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 100: + case 102: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Partitions -//line sql.y:1003 +//line sql.y:1002 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 101: + case 103: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Partitions -//line sql.y:1007 +//line sql.y:1006 { yyLOCAL = yyDollar[3].partitionsUnion() } yyVAL.union = yyLOCAL - case 102: + case 104: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:1013 +//line sql.y:1012 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[3].setExprsUnion()) } yyVAL.union = yyLOCAL - case 103: + case 105: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1019 +//line sql.y:1018 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 104: + case 106: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1023 +//line sql.y:1022 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 105: + case 107: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1029 +//line sql.y:1028 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 106: + case 108: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1033 +//line sql.y:1032 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 107: + case 109: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1037 +//line sql.y:1036 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 108: + case 110: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1041 +//line sql.y:1040 { yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 109: + case 111: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1047 +//line sql.y:1046 { yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope) } yyVAL.union = yyLOCAL - case 110: + case 112: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1051 +//line sql.y:1050 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 111: + case 113: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Variable -//line sql.y:1055 +//line sql.y:1054 { yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion()) } yyVAL.union = yyLOCAL - case 112: + case 114: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:1061 +//line sql.y:1060 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), UpdateSetExprsScope(yyDollar[5].setExprsUnion(), yyDollar[3].scopeUnion())) } yyVAL.union = yyLOCAL - case 113: + case 115: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:1065 +//line sql.y:1064 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[4].setExprsUnion()) } yyVAL.union = yyLOCAL - case 114: + case 116: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1071 +//line sql.y:1070 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 115: + case 117: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1075 +//line sql.y:1074 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 116: + case 118: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1081 +//line sql.y:1080 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionIsolationStr, NextTxScope), Expr: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 117: + case 119: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1085 +//line sql.y:1084 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 118: + case 120: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1089 +//line sql.y:1088 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 119: + case 121: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1095 +//line sql.y:1094 { yyVAL.str = RepeatableReadStr } - case 120: + case 122: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1099 +//line sql.y:1098 { yyVAL.str = ReadCommittedStr } - case 121: + case 123: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1103 +//line sql.y:1102 { yyVAL.str = ReadUncommittedStr } - case 122: + case 124: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1107 +//line sql.y:1106 { yyVAL.str = SerializableStr } - case 123: + case 125: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1113 +//line sql.y:1112 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 124: + case 126: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1117 +//line sql.y:1116 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 125: + case 127: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1121 +//line sql.y:1120 { yyLOCAL = GlobalScope } yyVAL.union = yyLOCAL - case 126: + case 128: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1127 +//line sql.y:1126 { yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion() yyDollar[1].createTableUnion().FullyParsed = true yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 127: + case 129: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1133 +//line sql.y:1132 { // Create table [name] like [name] yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion() @@ -9740,10 +10858,10 @@ yydefault: yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 128: + case 130: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:1140 +//line sql.y:1139 { indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition indexDef.Columns = yyDollar[3].indexColumnsUnion() @@ -9753,413 +10871,413 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 129: + case 131: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Statement -//line sql.y:1149 +//line sql.y:1148 { - yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str} + yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str} } yyVAL.union = yyLOCAL - case 130: + case 132: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1153 +//line sql.y:1152 { yyDollar[1].createDatabaseUnion().FullyParsed = true yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].databaseOptionsUnion() yyLOCAL = yyDollar[1].createDatabaseUnion() } yyVAL.union = yyLOCAL - case 131: + case 133: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1160 +//line sql.y:1159 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 132: + case 134: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:1164 +//line sql.y:1163 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 133: + case 135: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1169 +//line sql.y:1168 { yyVAL.identifierCI = NewIdentifierCI("") } - case 134: + case 136: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1173 +//line sql.y:1172 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 135: + case 137: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1179 +//line sql.y:1178 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 136: + case 138: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1184 +//line sql.y:1183 { var v []VindexParam yyLOCAL = v } yyVAL.union = yyLOCAL - case 137: + case 139: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1189 +//line sql.y:1188 { yyLOCAL = yyDollar[2].vindexParamsUnion() } yyVAL.union = yyLOCAL - case 138: + case 140: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1195 +//line sql.y:1194 { yyLOCAL = make([]VindexParam, 0, 4) yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam) } yyVAL.union = yyLOCAL - case 139: + case 141: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1200 +//line sql.y:1199 { yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].vindexParam) } - case 140: + case 142: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1206 +//line sql.y:1205 { yyVAL.vindexParam = VindexParam{Key: yyDollar[1].identifierCI, Val: yyDollar[3].str} } - case 141: + case 143: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1211 +//line sql.y:1210 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 142: + case 144: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1215 +//line sql.y:1214 { yyLOCAL = yyDollar[1].jsonObjectParamsUnion() } yyVAL.union = yyLOCAL - case 143: + case 145: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1221 +//line sql.y:1220 { yyLOCAL = []*JSONObjectParam{yyDollar[1].jsonObjectParam} } yyVAL.union = yyLOCAL - case 144: + case 146: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1225 +//line sql.y:1224 { yySLICE := (*[]*JSONObjectParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jsonObjectParam) } - case 145: + case 147: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1231 +//line sql.y:1230 { yyVAL.jsonObjectParam = &JSONObjectParam{Key: yyDollar[1].exprUnion(), Value: yyDollar[3].exprUnion()} } - case 146: + case 148: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateTable -//line sql.y:1237 +//line sql.y:1236 { yyLOCAL = &CreateTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[6].tableName, IfNotExists: yyDollar[5].booleanUnion(), Temp: yyDollar[3].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 147: + case 149: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1244 +//line sql.y:1243 { yyLOCAL = &AlterTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[4].tableName} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 148: + case 150: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1251 +//line sql.y:1250 { yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].identifierCI, Type: string(yyDollar[3].str)}, Options: yyDollar[5].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 149: + case 151: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1256 +//line sql.y:1255 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Fulltext: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 150: + case 152: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1261 +//line sql.y:1260 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Spatial: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 151: + case 153: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1266 +//line sql.y:1265 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: string(yyDollar[3].str) + " " + string(yyDollar[4].str), Unique: true}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 152: + case 154: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateDatabase -//line sql.y:1273 +//line sql.y:1272 { yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[4].strs).Parsed(), DBName: yyDollar[6].identifierCS, IfNotExists: yyDollar[5].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 153: + case 155: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AlterDatabase -//line sql.y:1280 +//line sql.y:1279 { yyLOCAL = &AlterDatabase{} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 156: + case 158: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1291 +//line sql.y:1290 { yyLOCAL = yyDollar[2].tableSpecUnion() yyLOCAL.Options = yyDollar[4].tableOptionsUnion() yyLOCAL.PartitionOption = yyDollar[5].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 157: + case 159: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1298 +//line sql.y:1297 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 158: + case 160: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1302 +//line sql.y:1301 { yyLOCAL = yyDollar[1].databaseOptionsUnion() } yyVAL.union = yyLOCAL - case 159: + case 161: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1308 +//line sql.y:1307 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 160: + case 162: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1312 +//line sql.y:1311 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 161: + case 163: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1316 +//line sql.y:1315 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 162: + case 164: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1320 +//line sql.y:1319 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 163: + case 165: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1324 +//line sql.y:1323 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 164: + case 166: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1328 +//line sql.y:1327 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 165: + case 167: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1334 +//line sql.y:1333 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 166: + case 168: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:1338 +//line sql.y:1337 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 167: + case 169: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1344 +//line sql.y:1343 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 168: + case 170: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1348 +//line sql.y:1347 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 169: + case 171: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1354 +//line sql.y:1353 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 170: + case 172: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1358 +//line sql.y:1357 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 171: + case 173: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1364 +//line sql.y:1363 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 172: + case 174: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1368 +//line sql.y:1367 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 173: + case 175: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1374 +//line sql.y:1373 { yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 174: + case 176: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1378 +//line sql.y:1377 { yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 175: + case 177: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColumnDefinition -//line sql.y:1384 +//line sql.y:1383 { yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 176: + case 178: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1388 +//line sql.y:1387 { yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion()) } - case 177: + case 179: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1394 +//line sql.y:1393 { yyLOCAL = &TableSpec{} yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion()) } yyVAL.union = yyLOCAL - case 178: + case 180: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1399 +//line sql.y:1398 { yyLOCAL = &TableSpec{} yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion()) } yyVAL.union = yyLOCAL - case 179: + case 181: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1404 +//line sql.y:1403 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) } - case 180: + case 182: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1408 +//line sql.y:1407 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion()) } - case 181: + case 183: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1413 +//line sql.y:1412 { yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion()) } - case 182: + case 184: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1417 +//line sql.y:1416 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 183: + case 185: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1421 +//line sql.y:1420 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 184: + case 186: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1432 +//line sql.y:1431 { yyDollar[2].columnType.Options = yyDollar[4].columnTypeOptionsUnion() if yyDollar[2].columnType.Options.Collate == "" { @@ -10169,10 +11287,10 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 185: + case 187: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1441 +//line sql.y:1440 { yyDollar[2].columnType.Options = yyDollar[9].columnTypeOptionsUnion() yyDollar[2].columnType.Options.As = yyDollar[7].exprUnion() @@ -10181,65 +11299,66 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 186: + case 188: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1450 +//line sql.y:1449 { yyVAL.str = "" } - case 187: + case 189: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1454 +//line sql.y:1453 { yyVAL.str = "" } - case 188: + case 190: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1463 +//line sql.y:1462 { yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: ColKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil} } yyVAL.union = yyLOCAL - case 189: + case 191: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1467 +//line sql.y:1466 { val := true yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 190: + case 192: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1473 +//line sql.y:1472 { val := false yyDollar[1].columnTypeOptionsUnion().Null = &val yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 191: + case 193: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1479 +//line sql.y:1478 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[4].exprUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 192: + case 194: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1484 +//line sql.y:1483 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion() + yyDollar[1].columnTypeOptionsUnion().DefaultLiteral = true yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 193: + case 195: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1489 @@ -10248,7 +11367,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 194: + case 196: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1494 @@ -10257,7 +11376,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 195: + case 197: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1499 @@ -10266,7 +11385,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 196: + case 198: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1504 @@ -10275,13 +11394,13 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 197: + case 199: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1509 { yyDollar[1].columnTypeOptionsUnion().Collate = encodeSQLString(yyDollar[3].str) } - case 198: + case 200: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1513 @@ -10290,13 +11409,13 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 199: + case 201: yyDollar = yyS[yypt-3 : yypt+1] //line sql.y:1518 { yyDollar[1].columnTypeOptionsUnion().Format = yyDollar[3].columnFormatUnion() } - case 200: + case 202: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1522 @@ -10305,7 +11424,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 201: + case 203: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1527 @@ -10315,7 +11434,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 202: + case 204: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1533 @@ -10325,19 +11444,19 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 203: + case 205: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1539 { yyDollar[1].columnTypeOptionsUnion().EngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 204: + case 206: yyDollar = yyS[yypt-4 : yypt+1] //line sql.y:1543 { yyDollar[1].columnTypeOptionsUnion().SecondaryEngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 205: + case 207: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat //line sql.y:1549 @@ -10345,7 +11464,7 @@ yydefault: yyLOCAL = FixedFormat } yyVAL.union = yyLOCAL - case 206: + case 208: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat //line sql.y:1553 @@ -10353,7 +11472,7 @@ yydefault: yyLOCAL = DynamicFormat } yyVAL.union = yyLOCAL - case 207: + case 209: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat //line sql.y:1557 @@ -10361,7 +11480,7 @@ yydefault: yyLOCAL = DefaultFormat } yyVAL.union = yyLOCAL - case 208: + case 210: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage //line sql.y:1563 @@ -10369,7 +11488,7 @@ yydefault: yyLOCAL = VirtualStorage } yyVAL.union = yyLOCAL - case 209: + case 211: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage //line sql.y:1567 @@ -10377,7 +11496,7 @@ yydefault: yyLOCAL = StoredStorage } yyVAL.union = yyLOCAL - case 210: + case 212: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1572 @@ -10385,7 +11504,7 @@ yydefault: yyLOCAL = &ColumnTypeOptions{} } yyVAL.union = yyLOCAL - case 211: + case 213: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1576 @@ -10394,7 +11513,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 212: + case 214: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1581 @@ -10404,7 +11523,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 213: + case 215: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1587 @@ -10414,7 +11533,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 214: + case 216: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1593 @@ -10423,7 +11542,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 215: + case 217: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1598 @@ -10432,7 +11551,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 216: + case 218: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1603 @@ -10442,7 +11561,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 217: + case 219: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions //line sql.y:1609 @@ -10452,7 +11571,7 @@ yydefault: yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 218: + case 220: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1617 @@ -10460,2429 +11579,2441 @@ yydefault: yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 220: + case 222: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1624 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 221: + case 223: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1628 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 222: + case 224: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1632 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 223: + case 225: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1636 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 224: + case 226: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1640 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL case 227: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Expr +//line sql.y:1644 + { + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("sysdate"), Fsp: yyDollar[2].integerUnion()} + } + yyVAL.union = yyLOCAL + case 230: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1650 +//line sql.y:1654 { yyLOCAL = &NullVal{} } yyVAL.union = yyLOCAL - case 229: + case 232: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1657 +//line sql.y:1661 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 230: + case 233: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1661 +//line sql.y:1665 { yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 231: + case 234: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1667 +//line sql.y:1671 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 232: + case 235: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1671 +//line sql.y:1675 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 233: + case 236: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1675 +//line sql.y:1679 { yyLOCAL = yyDollar[1].boolValUnion() } yyVAL.union = yyLOCAL - case 234: + case 237: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1679 +//line sql.y:1683 { yyLOCAL = NewHexLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 235: + case 238: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1683 +//line sql.y:1687 { yyLOCAL = NewHexNumLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 236: + case 239: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1687 +//line sql.y:1691 { yyLOCAL = NewBitLiteral(yyDollar[1].str[2:]) } yyVAL.union = yyLOCAL - case 237: + case 240: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1691 +//line sql.y:1695 { yyLOCAL = NewBitLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 238: + case 241: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1695 +//line sql.y:1699 { - yyLOCAL = NewArgument(yyDollar[1].str[1:]) - bindVariable(yylex, yyDollar[1].str[1:]) + yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 239: + case 242: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1700 +//line sql.y:1703 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 240: + case 243: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1704 +//line sql.y:1707 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexNumLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 241: + case 244: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1708 +//line sql.y:1711 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str[2:])} } yyVAL.union = yyLOCAL - case 242: + case 245: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1712 +//line sql.y:1715 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 243: + case 246: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1716 +//line sql.y:1719 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 244: + case 247: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1720 +//line sql.y:1723 { - bindVariable(yylex, yyDollar[2].str[1:]) - yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewArgument(yyDollar[2].str[1:])} + arg := parseBindVariable(yylex, yyDollar[2].str[1:]) + yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: arg} } yyVAL.union = yyLOCAL - case 245: + case 248: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1725 +//line sql.y:1728 { yyLOCAL = NewDateLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 246: + case 249: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1729 +//line sql.y:1732 { yyLOCAL = NewTimeLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 247: + case 250: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1733 +//line sql.y:1736 { yyLOCAL = NewTimestampLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 248: + case 251: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1739 +//line sql.y:1742 { yyVAL.str = Armscii8Str } - case 249: + case 252: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1743 +//line sql.y:1746 { yyVAL.str = ASCIIStr } - case 250: + case 253: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1747 +//line sql.y:1750 { yyVAL.str = Big5Str } - case 251: + case 254: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1751 +//line sql.y:1754 { yyVAL.str = UBinaryStr } - case 252: + case 255: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1755 +//line sql.y:1758 { yyVAL.str = Cp1250Str } - case 253: + case 256: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1759 +//line sql.y:1762 { yyVAL.str = Cp1251Str } - case 254: + case 257: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1763 +//line sql.y:1766 { yyVAL.str = Cp1256Str } - case 255: + case 258: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1767 +//line sql.y:1770 { yyVAL.str = Cp1257Str } - case 256: + case 259: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1771 +//line sql.y:1774 { yyVAL.str = Cp850Str } - case 257: + case 260: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1775 +//line sql.y:1778 { yyVAL.str = Cp852Str } - case 258: + case 261: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1779 +//line sql.y:1782 { yyVAL.str = Cp866Str } - case 259: + case 262: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1783 +//line sql.y:1786 { yyVAL.str = Cp932Str } - case 260: + case 263: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1787 +//line sql.y:1790 { yyVAL.str = Dec8Str } - case 261: + case 264: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1791 +//line sql.y:1794 { yyVAL.str = EucjpmsStr } - case 262: + case 265: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1795 +//line sql.y:1798 { yyVAL.str = EuckrStr } - case 263: + case 266: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1799 +//line sql.y:1802 { yyVAL.str = Gb18030Str } - case 264: + case 267: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1803 +//line sql.y:1806 { yyVAL.str = Gb2312Str } - case 265: + case 268: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1807 +//line sql.y:1810 { yyVAL.str = GbkStr } - case 266: + case 269: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1811 +//line sql.y:1814 { yyVAL.str = Geostd8Str } - case 267: + case 270: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1815 +//line sql.y:1818 { yyVAL.str = GreekStr } - case 268: + case 271: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1819 +//line sql.y:1822 { yyVAL.str = HebrewStr } - case 269: + case 272: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1823 +//line sql.y:1826 { yyVAL.str = Hp8Str } - case 270: + case 273: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1827 +//line sql.y:1830 { yyVAL.str = Keybcs2Str } - case 271: + case 274: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1831 +//line sql.y:1834 { yyVAL.str = Koi8rStr } - case 272: + case 275: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1835 +//line sql.y:1838 { yyVAL.str = Koi8uStr } - case 273: + case 276: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1839 +//line sql.y:1842 { yyVAL.str = Latin1Str } - case 274: + case 277: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1843 +//line sql.y:1846 { yyVAL.str = Latin2Str } - case 275: + case 278: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1847 +//line sql.y:1850 { yyVAL.str = Latin5Str } - case 276: + case 279: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1851 +//line sql.y:1854 { yyVAL.str = Latin7Str } - case 277: + case 280: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1855 +//line sql.y:1858 { yyVAL.str = MacceStr } - case 278: + case 281: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1859 +//line sql.y:1862 { yyVAL.str = MacromanStr } - case 279: + case 282: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1863 +//line sql.y:1866 { yyVAL.str = SjisStr } - case 280: + case 283: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1867 +//line sql.y:1870 { yyVAL.str = Swe7Str } - case 281: + case 284: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1871 +//line sql.y:1874 { yyVAL.str = Tis620Str } - case 282: + case 285: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1875 +//line sql.y:1878 { yyVAL.str = Ucs2Str } - case 283: + case 286: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1879 +//line sql.y:1882 { yyVAL.str = UjisStr } - case 284: + case 287: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1883 +//line sql.y:1886 { yyVAL.str = Utf16Str } - case 285: + case 288: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1887 +//line sql.y:1890 { yyVAL.str = Utf16leStr } - case 286: + case 289: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1891 +//line sql.y:1894 { yyVAL.str = Utf32Str } - case 287: + case 290: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1895 +//line sql.y:1898 { - yyVAL.str = Utf8Str + yyVAL.str = Utf8mb3Str } - case 288: + case 291: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1899 +//line sql.y:1902 { yyVAL.str = Utf8mb4Str } - case 289: + case 292: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1903 +//line sql.y:1906 { - yyVAL.str = Utf8Str + yyVAL.str = Utf8mb3Str } - case 292: + case 295: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1913 +//line sql.y:1916 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 293: + case 296: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1917 +//line sql.y:1920 { yyLOCAL = NewFloatLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 294: + case 297: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1921 +//line sql.y:1924 { yyLOCAL = NewDecimalLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 295: + case 298: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1927 +//line sql.y:1930 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 296: + case 299: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1931 +//line sql.y:1934 { yyLOCAL = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 297: + case 300: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1935 +//line sql.y:1938 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewStrLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 298: + case 301: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1941 +//line sql.y:1944 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 299: + case 302: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1945 +//line sql.y:1948 { - yyLOCAL = NewArgument(yyDollar[1].str[1:]) - bindVariable(yylex, yyDollar[1].str[1:]) + yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 300: + case 303: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1952 +//line sql.y:1954 { yyLOCAL = ColKeyPrimary } yyVAL.union = yyLOCAL - case 301: + case 304: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1956 +//line sql.y:1958 { yyLOCAL = ColKeyUnique } yyVAL.union = yyLOCAL - case 302: + case 305: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1960 +//line sql.y:1962 { yyLOCAL = ColKeyUniqueKey } yyVAL.union = yyLOCAL - case 303: + case 306: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1964 +//line sql.y:1966 { yyLOCAL = ColKey } yyVAL.union = yyLOCAL - case 304: + case 307: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1970 +//line sql.y:1972 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion() yyVAL.columnType.Zerofill = yyDollar[3].booleanUnion() } - case 308: + case 311: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1981 +//line sql.y:1983 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Length = yyDollar[2].literalUnion() } - case 309: + case 312: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1986 +//line sql.y:1988 { yyVAL.columnType = yyDollar[1].columnType } - case 310: + case 313: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1992 +//line sql.y:1994 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 311: + case 314: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1996 +//line sql.y:1998 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 312: + case 315: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2000 +//line sql.y:2002 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 313: + case 316: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2004 +//line sql.y:2006 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 314: + case 317: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2008 +//line sql.y:2010 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 315: + case 318: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2012 +//line sql.y:2014 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 316: + case 319: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2016 +//line sql.y:2018 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 317: + case 320: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2020 +//line sql.y:2022 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 318: + case 321: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2024 +//line sql.y:2026 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 319: + case 322: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2030 +//line sql.y:2032 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 320: + case 323: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2036 +//line sql.y:2038 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 321: + case 324: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2042 +//line sql.y:2044 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 322: + case 325: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2048 +//line sql.y:2050 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 323: + case 326: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2054 +//line sql.y:2056 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 324: + case 327: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2060 +//line sql.y:2062 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 325: + case 328: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2066 +//line sql.y:2068 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 326: + case 329: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2074 +//line sql.y:2076 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 327: + case 330: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2078 +//line sql.y:2080 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 328: + case 331: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2082 +//line sql.y:2084 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 329: + case 332: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2086 +//line sql.y:2088 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 330: + case 333: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2090 +//line sql.y:2092 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 331: + case 334: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2096 +//line sql.y:2098 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } - case 332: + case 335: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2100 +//line sql.y:2102 { // CHAR BYTE is an alias for binary. See also: // https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html yyVAL.columnType = &ColumnType{Type: "binary", Length: yyDollar[2].literalUnion()} } - case 333: + case 336: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2106 +//line sql.y:2108 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } - case 334: + case 337: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2110 +//line sql.y:2112 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 335: + case 338: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2114 +//line sql.y:2116 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } - case 336: + case 339: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2118 +//line sql.y:2120 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } - case 337: + case 340: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2122 +//line sql.y:2124 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } - case 338: + case 341: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2126 +//line sql.y:2128 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } - case 339: + case 342: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2130 +//line sql.y:2132 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } - case 340: + case 343: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2134 +//line sql.y:2136 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 341: + case 344: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2138 +//line sql.y:2140 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 342: + case 345: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2142 +//line sql.y:2144 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 343: + case 346: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2146 +//line sql.y:2148 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 344: + case 347: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2150 +//line sql.y:2152 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 345: + case 348: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2154 +//line sql.y:2156 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } - case 346: + case 349: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2159 +//line sql.y:2161 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } - case 347: + case 350: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2165 +//line sql.y:2167 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 348: + case 351: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2169 +//line sql.y:2171 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 349: + case 352: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2173 +//line sql.y:2175 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 350: + case 353: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2177 +//line sql.y:2179 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 351: + case 354: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2181 +//line sql.y:2183 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 352: + case 355: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2185 +//line sql.y:2187 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 353: + case 356: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2189 +//line sql.y:2191 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 354: + case 357: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2193 +//line sql.y:2195 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } - case 355: + case 358: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2199 +//line sql.y:2201 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str)) } - case 356: + case 359: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2204 +//line sql.y:2206 { yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str)) } - case 357: + case 360: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Literal -//line sql.y:2209 +//line sql.y:2211 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 358: + case 361: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Literal -//line sql.y:2213 +//line sql.y:2215 { yyLOCAL = NewIntLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 359: + case 362: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2218 +//line sql.y:2220 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 360: + case 363: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2222 +//line sql.y:2224 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), Scale: NewIntLiteral(yyDollar[4].str), } } - case 361: + case 364: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2231 +//line sql.y:2233 { yyVAL.LengthScaleOption = yyDollar[1].LengthScaleOption } - case 362: + case 365: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2235 +//line sql.y:2237 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), } } - case 363: + case 366: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2242 +//line sql.y:2244 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 364: + case 367: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2246 +//line sql.y:2248 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), } } - case 365: + case 368: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2252 +//line sql.y:2254 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntLiteral(yyDollar[2].str), Scale: NewIntLiteral(yyDollar[4].str), } } - case 366: + case 369: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2260 +//line sql.y:2262 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 367: + case 370: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2264 +//line sql.y:2266 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 368: + case 371: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2268 +//line sql.y:2270 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 369: + case 372: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2273 +//line sql.y:2275 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 370: + case 373: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2277 +//line sql.y:2279 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 371: + case 374: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2282 +//line sql.y:2284 { yyVAL.columnCharset = ColumnCharset{} } - case 372: + case 375: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2286 +//line sql.y:2288 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].identifierCI.String()), Binary: yyDollar[3].booleanUnion()} } - case 373: + case 376: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2290 +//line sql.y:2292 { yyVAL.columnCharset = ColumnCharset{Name: encodeSQLString(yyDollar[2].str), Binary: yyDollar[3].booleanUnion()} } - case 374: + case 377: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2294 +//line sql.y:2296 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].str)} } - case 375: + case 378: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2298 +//line sql.y:2300 { // ASCII: Shorthand for CHARACTER SET latin1. yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: yyDollar[2].booleanUnion()} } - case 376: + case 379: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2303 +//line sql.y:2305 { // UNICODE: Shorthand for CHARACTER SET ucs2. yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: yyDollar[2].booleanUnion()} } - case 377: + case 380: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2308 +//line sql.y:2310 { // BINARY: Shorthand for default CHARACTER SET but with binary collation yyVAL.columnCharset = ColumnCharset{Name: "", Binary: true} } - case 378: + case 381: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2313 +//line sql.y:2315 { // BINARY ASCII: Shorthand for CHARACTER SET latin1 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: true} } - case 379: + case 382: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2318 +//line sql.y:2320 { // BINARY UNICODE: Shorthand for CHARACTER SET ucs2 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: true} } - case 380: + case 383: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2324 +//line sql.y:2326 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 381: + case 384: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2328 +//line sql.y:2330 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 382: + case 385: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2333 +//line sql.y:2335 { yyVAL.str = "" } - case 383: + case 386: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2337 +//line sql.y:2339 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } - case 384: + case 387: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2341 +//line sql.y:2343 { yyVAL.str = encodeSQLString(yyDollar[2].str) } - case 385: + case 388: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexDefinition -//line sql.y:2347 +//line sql.y:2349 { yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()} } yyVAL.union = yyLOCAL - case 386: + case 389: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2352 +//line sql.y:2354 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 387: + case 390: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2356 +//line sql.y:2358 { yyLOCAL = yyDollar[1].indexOptionsUnion() } yyVAL.union = yyLOCAL - case 388: + case 391: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2362 +//line sql.y:2364 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL - case 389: + case 392: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2366 +//line sql.y:2368 { yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion()) } - case 390: + case 393: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2372 +//line sql.y:2374 { yyLOCAL = yyDollar[1].indexOptionUnion() } yyVAL.union = yyLOCAL - case 391: + case 394: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2376 +//line sql.y:2378 { // should not be string yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 392: + case 395: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2381 +//line sql.y:2383 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 393: + case 396: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2385 +//line sql.y:2387 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 394: + case 397: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2389 +//line sql.y:2391 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 395: + case 398: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2393 +//line sql.y:2395 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].identifierCI.String()} } yyVAL.union = yyLOCAL - case 396: + case 399: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2397 +//line sql.y:2399 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 397: + case 400: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2401 +//line sql.y:2403 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 398: + case 401: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2407 +//line sql.y:2409 { yyVAL.str = "" } - case 399: + case 402: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2411 +//line sql.y:2413 { yyVAL.str = string(yyDollar[1].str) } - case 400: + case 403: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2417 +//line sql.y:2419 { yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI("PRIMARY"), Primary: true, Unique: true} } yyVAL.union = yyLOCAL - case 401: + case 404: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2421 +//line sql.y:2423 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Spatial: true, Unique: false} } yyVAL.union = yyLOCAL - case 402: + case 405: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2425 +//line sql.y:2427 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str) + " " + string(yyDollar[2].str), Name: NewIdentifierCI(yyDollar[3].str), Fulltext: true, Unique: false} } yyVAL.union = yyLOCAL - case 403: + case 406: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2429 +//line sql.y:2431 { yyLOCAL = &IndexInfo{Type: string(yyDollar[2].str) + " " + string(yyDollar[3].str), ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[4].str), Unique: true} } yyVAL.union = yyLOCAL - case 404: + case 407: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2433 +//line sql.y:2435 { yyLOCAL = &IndexInfo{Type: string(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[2].str), Unique: false} } yyVAL.union = yyLOCAL - case 405: + case 408: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2438 +//line sql.y:2440 { yyVAL.str = "" } - case 406: + case 409: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2442 +//line sql.y:2444 { yyVAL.str = yyDollar[2].str } - case 407: + case 410: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2448 +//line sql.y:2450 { yyVAL.str = string(yyDollar[1].str) } - case 408: + case 411: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2452 +//line sql.y:2454 { yyVAL.str = string(yyDollar[1].str) } - case 409: + case 412: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2456 +//line sql.y:2458 { yyVAL.str = string(yyDollar[1].str) } - case 410: + case 413: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2462 +//line sql.y:2464 { yyVAL.str = string(yyDollar[1].str) } - case 411: + case 414: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2466 +//line sql.y:2468 { yyVAL.str = string(yyDollar[1].str) } - case 412: + case 415: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2471 +//line sql.y:2473 { yyVAL.str = "key" } - case 413: + case 416: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2475 +//line sql.y:2477 { yyVAL.str = yyDollar[1].str } - case 414: + case 417: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2481 +//line sql.y:2483 { yyVAL.str = string(yyDollar[1].str) } - case 415: + case 418: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2485 +//line sql.y:2487 { yyVAL.str = string(yyDollar[1].str) } - case 416: + case 419: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2490 +//line sql.y:2492 { yyVAL.str = "" } - case 417: + case 420: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2494 +//line sql.y:2496 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } - case 418: + case 421: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexColumn -//line sql.y:2500 +//line sql.y:2502 { yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()} } yyVAL.union = yyLOCAL - case 419: + case 422: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2504 +//line sql.y:2506 { yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion()) } - case 420: + case 423: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2510 +//line sql.y:2512 { yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 421: + case 424: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2514 +//line sql.y:2516 { yyLOCAL = &IndexColumn{Expression: yyDollar[2].exprUnion(), Direction: yyDollar[4].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 422: + case 425: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2520 +//line sql.y:2522 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 423: + case 426: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2524 +//line sql.y:2526 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 424: + case 427: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2530 +//line sql.y:2532 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 425: + case 428: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2534 +//line sql.y:2536 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } yyVAL.union = yyLOCAL - case 426: + case 429: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2540 +//line sql.y:2542 { yyLOCAL = &ForeignKeyDefinition{IndexName: NewIdentifierCI(yyDollar[3].str), Source: yyDollar[5].columnsUnion(), ReferenceDefinition: yyDollar[7].referenceDefinitionUnion()} } yyVAL.union = yyLOCAL - case 427: + case 430: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2546 +//line sql.y:2548 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion()} } yyVAL.union = yyLOCAL - case 428: + case 431: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2550 +//line sql.y:2552 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 429: + case 432: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2554 +//line sql.y:2556 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 430: + case 433: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2558 +//line sql.y:2560 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion(), OnUpdate: yyDollar[8].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 431: + case 434: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2562 +//line sql.y:2564 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion(), OnDelete: yyDollar[8].referenceActionUnion()} } yyVAL.union = yyLOCAL - case 432: + case 435: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2567 +//line sql.y:2569 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 433: + case 436: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2571 +//line sql.y:2573 { yyLOCAL = yyDollar[1].referenceDefinitionUnion() } yyVAL.union = yyLOCAL - case 434: + case 437: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2577 +//line sql.y:2579 { yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()} } yyVAL.union = yyLOCAL - case 435: + case 438: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2583 +//line sql.y:2585 { yyLOCAL = yyDollar[2].matchActionUnion() } yyVAL.union = yyLOCAL - case 436: + case 439: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2589 +//line sql.y:2591 { yyLOCAL = Full } yyVAL.union = yyLOCAL - case 437: + case 440: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2593 +//line sql.y:2595 { yyLOCAL = Partial } yyVAL.union = yyLOCAL - case 438: + case 441: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2597 +//line sql.y:2599 { yyLOCAL = Simple } yyVAL.union = yyLOCAL - case 439: + case 442: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2602 +//line sql.y:2604 { yyLOCAL = DefaultMatch } yyVAL.union = yyLOCAL - case 440: + case 443: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2606 +//line sql.y:2608 { yyLOCAL = yyDollar[1].matchActionUnion() } yyVAL.union = yyLOCAL - case 441: + case 444: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2612 +//line sql.y:2614 { yyLOCAL = yyDollar[3].referenceActionUnion() } yyVAL.union = yyLOCAL - case 442: + case 445: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2618 +//line sql.y:2620 { yyLOCAL = yyDollar[3].referenceActionUnion() } yyVAL.union = yyLOCAL - case 443: + case 446: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2624 +//line sql.y:2626 { yyLOCAL = Restrict } yyVAL.union = yyLOCAL - case 444: + case 447: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2628 +//line sql.y:2630 { yyLOCAL = Cascade } yyVAL.union = yyLOCAL - case 445: + case 448: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2632 +//line sql.y:2634 { yyLOCAL = NoAction } yyVAL.union = yyLOCAL - case 446: + case 449: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2636 +//line sql.y:2638 { yyLOCAL = SetDefault } yyVAL.union = yyLOCAL - case 447: + case 450: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2640 +//line sql.y:2642 { yyLOCAL = SetNull } yyVAL.union = yyLOCAL - case 448: + case 451: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2645 +//line sql.y:2647 { yyVAL.str = "" } - case 449: + case 452: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2649 +//line sql.y:2651 { yyVAL.str = string(yyDollar[1].str) } - case 450: + case 453: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2653 +//line sql.y:2655 { yyVAL.str = string(yyDollar[1].str) } - case 451: + case 454: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2659 +//line sql.y:2661 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 452: + case 455: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:2663 +//line sql.y:2665 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 453: + case 456: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2668 +//line sql.y:2670 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 454: + case 457: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2672 +//line sql.y:2674 { yyLOCAL = yyDollar[1].booleanUnion() } yyVAL.union = yyLOCAL - case 455: + case 458: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2677 +//line sql.y:2679 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 456: + case 459: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2681 +//line sql.y:2683 { yyLOCAL = yyDollar[1].tableOptionsUnion() } yyVAL.union = yyLOCAL - case 457: + case 460: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2687 +//line sql.y:2689 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL - case 458: + case 461: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2691 +//line sql.y:2693 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion()) } - case 459: + case 462: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2695 +//line sql.y:2697 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } - case 460: + case 463: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2701 +//line sql.y:2703 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL - case 461: + case 464: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2705 +//line sql.y:2707 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) } - case 462: + case 465: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2711 +//line sql.y:2713 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 463: + case 466: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2715 +//line sql.y:2717 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 464: + case 467: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2719 +//line sql.y:2721 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 465: + case 468: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2723 +//line sql.y:2725 { yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str, CaseSensitive: true} } yyVAL.union = yyLOCAL - case 466: + case 469: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2727 +//line sql.y:2729 { yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str, CaseSensitive: true} } yyVAL.union = yyLOCAL - case 467: + case 470: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2731 +//line sql.y:2733 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 468: + case 471: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2735 +//line sql.y:2737 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 469: + case 472: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2739 +//line sql.y:2741 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 470: + case 473: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2743 +//line sql.y:2745 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 471: + case 474: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2747 +//line sql.y:2749 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 472: + case 475: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2751 +//line sql.y:2753 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 473: + case 476: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2755 +//line sql.y:2757 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 474: + case 477: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2759 +//line sql.y:2761 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 475: + case 478: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2763 +//line sql.y:2765 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].identifierCS.String(), CaseSensitive: true} } yyVAL.union = yyLOCAL - case 476: + case 479: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2767 +//line sql.y:2769 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 477: + case 480: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2771 +//line sql.y:2773 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 478: + case 481: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2775 +//line sql.y:2777 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 479: + case 482: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2779 +//line sql.y:2781 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 480: + case 483: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2783 +//line sql.y:2785 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 481: + case 484: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2787 +//line sql.y:2789 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 482: + case 485: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2791 +//line sql.y:2793 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 483: + case 486: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2795 +//line sql.y:2797 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 484: + case 487: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2799 +//line sql.y:2801 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 485: + case 488: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2803 +//line sql.y:2805 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 486: + case 489: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2807 +//line sql.y:2809 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 487: + case 490: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2811 +//line sql.y:2813 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 488: + case 491: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2815 +//line sql.y:2817 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 489: + case 492: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2819 +//line sql.y:2821 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 490: + case 493: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2823 +//line sql.y:2825 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 491: + case 494: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2827 +//line sql.y:2829 { - yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str)} + yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str), CaseSensitive: true} } yyVAL.union = yyLOCAL - case 492: + case 495: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2831 +//line sql.y:2833 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL - case 493: + case 496: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2836 +//line sql.y:2838 { yyVAL.str = "" } - case 494: + case 497: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2840 +//line sql.y:2842 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 495: + case 498: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2844 +//line sql.y:2846 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 505: + case 508: + yyDollar = yyS[yypt-3 : yypt+1] +//line sql.y:2865 + { + yyVAL.str = String(TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}) + } + case 509: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2863 +//line sql.y:2869 { yyVAL.str = yyDollar[1].identifierCI.String() } - case 506: + case 510: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2867 +//line sql.y:2873 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 507: + case 511: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2871 +//line sql.y:2877 { yyVAL.str = string(yyDollar[1].str) } - case 508: + case 512: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2876 +//line sql.y:2882 { yyVAL.str = "" } - case 510: + case 514: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2882 +//line sql.y:2888 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 511: + case 515: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2886 +//line sql.y:2892 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 512: + case 516: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColName -//line sql.y:2891 +//line sql.y:2897 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 513: + case 517: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColName -//line sql.y:2895 +//line sql.y:2901 { yyLOCAL = yyDollar[2].colNameUnion() } yyVAL.union = yyLOCAL - case 514: + case 518: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2900 +//line sql.y:2906 { yyVAL.str = "" } - case 515: + case 519: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2904 +//line sql.y:2910 { yyVAL.str = string(yyDollar[2].str) } - case 516: + case 520: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Literal -//line sql.y:2909 +//line sql.y:2915 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 517: + case 521: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2913 +//line sql.y:2919 { yyLOCAL = NewIntLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 518: + case 522: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2917 +//line sql.y:2923 { yyLOCAL = NewDecimalLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 519: + case 523: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2922 +//line sql.y:2928 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 520: + case 524: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2926 +//line sql.y:2932 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL - case 521: + case 525: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2930 +//line sql.y:2936 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()}) } - case 522: + case 526: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2934 +//line sql.y:2940 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL - case 523: + case 527: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2938 +//line sql.y:2944 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...) } - case 524: + case 528: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2942 +//line sql.y:2948 { yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()}) } yyVAL.union = yyLOCAL - case 525: + case 529: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2948 +//line sql.y:2954 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 526: + case 530: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2952 +//line sql.y:2958 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 527: + case 531: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2956 +//line sql.y:2962 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 528: + case 532: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2962 +//line sql.y:2968 { yyLOCAL = yyDollar[1].tableOptionsUnion() } yyVAL.union = yyLOCAL - case 529: + case 533: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2966 +//line sql.y:2972 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } yyVAL.union = yyLOCAL - case 530: + case 534: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2970 +//line sql.y:2976 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } yyVAL.union = yyLOCAL - case 531: + case 535: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2974 +//line sql.y:2980 { yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()} } yyVAL.union = yyLOCAL - case 532: + case 536: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2978 +//line sql.y:2984 { yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()} } yyVAL.union = yyLOCAL - case 533: + case 537: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2982 +//line sql.y:2988 { yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 534: + case 538: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2986 +//line sql.y:2992 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true} } yyVAL.union = yyLOCAL - case 535: + case 539: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2990 +//line sql.y:2996 { - yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion()} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion(), DefaultLiteral: true} } yyVAL.union = yyLOCAL - case 536: + case 540: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2994 +//line sql.y:3000 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 537: + case 541: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2998 +//line sql.y:3004 { val := false yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} } yyVAL.union = yyLOCAL - case 538: + case 542: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3003 +//line sql.y:3009 { val := true yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} } yyVAL.union = yyLOCAL - case 539: + case 543: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3008 +//line sql.y:3014 { yyLOCAL = &AlterCheck{Name: yyDollar[3].identifierCI, Enforced: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 540: + case 544: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3012 +//line sql.y:3018 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: false} } yyVAL.union = yyLOCAL - case 541: + case 545: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3016 +//line sql.y:3022 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: true} } yyVAL.union = yyLOCAL - case 542: + case 546: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3020 +//line sql.y:3026 { yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].booleanUnion(), After: yyDollar[6].colNameUnion()} } yyVAL.union = yyLOCAL - case 543: + case 547: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3024 +//line sql.y:3030 { yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 544: + case 548: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3028 +//line sql.y:3034 { yyLOCAL = &RenameColumn{OldName: yyDollar[3].colNameUnion(), NewName: yyDollar[5].colNameUnion()} } yyVAL.union = yyLOCAL - case 545: + case 549: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3032 +//line sql.y:3038 { yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 546: + case 550: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3036 +//line sql.y:3042 { yyLOCAL = &KeyState{Enable: false} } yyVAL.union = yyLOCAL - case 547: + case 551: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3040 +//line sql.y:3046 { yyLOCAL = &KeyState{Enable: true} } yyVAL.union = yyLOCAL - case 548: + case 552: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3044 +//line sql.y:3050 { yyLOCAL = &TablespaceOperation{Import: false} } yyVAL.union = yyLOCAL - case 549: + case 553: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3048 +//line sql.y:3054 { yyLOCAL = &TablespaceOperation{Import: true} } yyVAL.union = yyLOCAL - case 550: + case 554: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3052 +//line sql.y:3058 { yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()} } yyVAL.union = yyLOCAL - case 551: + case 555: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3056 +//line sql.y:3062 { yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 552: + case 556: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3060 +//line sql.y:3066 { yyLOCAL = &DropKey{Type: PrimaryKeyType} } yyVAL.union = yyLOCAL - case 553: + case 557: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3064 +//line sql.y:3070 { yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 554: + case 558: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3068 +//line sql.y:3074 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 555: + case 559: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3072 +//line sql.y:3078 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 556: + case 560: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3076 +//line sql.y:3082 { yyLOCAL = &Force{} } yyVAL.union = yyLOCAL - case 557: + case 561: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3080 +//line sql.y:3086 { yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 558: + case 562: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3084 +//line sql.y:3090 { yyLOCAL = &RenameIndex{OldName: yyDollar[3].identifierCI, NewName: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 559: + case 563: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:3090 +//line sql.y:3096 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 560: + case 564: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3094 +//line sql.y:3100 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } - case 561: + case 565: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3100 +//line sql.y:3106 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 562: + case 566: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3104 +//line sql.y:3110 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 563: + case 567: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3108 +//line sql.y:3114 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 564: + case 568: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3112 +//line sql.y:3118 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } yyVAL.union = yyLOCAL - case 565: + case 569: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3116 +//line sql.y:3122 { yyLOCAL = &LockOption{Type: DefaultType} } yyVAL.union = yyLOCAL - case 566: + case 570: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3120 +//line sql.y:3126 { yyLOCAL = &LockOption{Type: NoneType} } yyVAL.union = yyLOCAL - case 567: + case 571: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3124 +//line sql.y:3130 { yyLOCAL = &LockOption{Type: SharedType} } yyVAL.union = yyLOCAL - case 568: + case 572: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3128 +//line sql.y:3134 { yyLOCAL = &LockOption{Type: ExclusiveType} } yyVAL.union = yyLOCAL - case 569: + case 573: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3132 +//line sql.y:3138 { yyLOCAL = &Validation{With: true} } yyVAL.union = yyLOCAL - case 570: + case 574: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3136 +//line sql.y:3142 { yyLOCAL = &Validation{With: false} } yyVAL.union = yyLOCAL - case 571: + case 575: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3142 +//line sql.y:3148 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -12890,10 +14021,10 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 572: + case 576: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3149 +//line sql.y:3155 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -12901,10 +14032,10 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 573: + case 577: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3156 +//line sql.y:3162 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -12912,28 +14043,28 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 574: + case 578: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3163 +//line sql.y:3169 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion() yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 575: + case 579: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:3169 +//line sql.y:3175 { - yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName.ToViewName(), Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str} + yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str} } yyVAL.union = yyLOCAL - case 576: + case 580: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3179 +//line sql.y:3185 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -12941,10 +14072,10 @@ yydefault: yyLOCAL = yyDollar[1].alterDatabaseUnion() } yyVAL.union = yyLOCAL - case 577: + case 581: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3186 +//line sql.y:3192 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -12952,10 +14083,10 @@ yydefault: yyLOCAL = yyDollar[1].alterDatabaseUnion() } yyVAL.union = yyLOCAL - case 578: + case 582: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3193 +//line sql.y:3199 { yyLOCAL = &AlterVschema{ Action: CreateVindexDDLAction, @@ -12968,10 +14099,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 579: + case 583: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3205 +//line sql.y:3211 { yyLOCAL = &AlterVschema{ Action: DropVindexDDLAction, @@ -12982,26 +14113,26 @@ yydefault: } } yyVAL.union = yyLOCAL - case 580: + case 584: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3215 +//line sql.y:3221 { yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 581: + case 585: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3219 +//line sql.y:3225 { yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 582: + case 586: yyDollar = yyS[yypt-13 : yypt+1] var yyLOCAL Statement -//line sql.y:3223 +//line sql.y:3229 { yyLOCAL = &AlterVschema{ Action: AddColVindexDDLAction, @@ -13015,10 +14146,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 583: + case 587: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3236 +//line sql.y:3242 { yyLOCAL = &AlterVschema{ Action: DropColVindexDDLAction, @@ -13029,18 +14160,18 @@ yydefault: } } yyVAL.union = yyLOCAL - case 584: + case 588: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3246 +//line sql.y:3252 { yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[6].tableName} } yyVAL.union = yyLOCAL - case 585: + case 589: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:3250 +//line sql.y:3256 { yyLOCAL = &AlterVschema{ Action: AddAutoIncDDLAction, @@ -13052,10 +14183,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 586: + case 590: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3261 +//line sql.y:3267 { yyLOCAL = &AlterMigration{ Type: RetryMigrationType, @@ -13063,10 +14194,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 587: + case 591: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3268 +//line sql.y:3274 { yyLOCAL = &AlterMigration{ Type: CleanupMigrationType, @@ -13074,10 +14205,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 588: + case 592: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3275 +//line sql.y:3281 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -13085,10 +14216,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 589: + case 593: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3282 +//line sql.y:3288 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -13097,20 +14228,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 590: + case 594: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3290 +//line sql.y:3296 { yyLOCAL = &AlterMigration{ Type: LaunchAllMigrationType, } } yyVAL.union = yyLOCAL - case 591: + case 595: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3296 +//line sql.y:3302 { yyLOCAL = &AlterMigration{ Type: CompleteMigrationType, @@ -13118,20 +14249,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 592: + case 596: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3303 +//line sql.y:3309 { yyLOCAL = &AlterMigration{ Type: CompleteAllMigrationType, } } yyVAL.union = yyLOCAL - case 593: + case 597: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3309 +//line sql.y:3315 { yyLOCAL = &AlterMigration{ Type: CancelMigrationType, @@ -13139,20 +14270,20 @@ yydefault: } } yyVAL.union = yyLOCAL - case 594: + case 598: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3316 +//line sql.y:3322 { yyLOCAL = &AlterMigration{ Type: CancelAllMigrationType, } } yyVAL.union = yyLOCAL - case 595: + case 599: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3322 +//line sql.y:3328 { yyLOCAL = &AlterMigration{ Type: ThrottleMigrationType, @@ -13162,10 +14293,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 596: + case 600: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3331 +//line sql.y:3337 { yyLOCAL = &AlterMigration{ Type: ThrottleAllMigrationType, @@ -13174,10 +14305,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 597: + case 601: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3339 +//line sql.y:3345 { yyLOCAL = &AlterMigration{ Type: UnthrottleMigrationType, @@ -13185,28 +14316,28 @@ yydefault: } } yyVAL.union = yyLOCAL - case 598: + case 602: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3346 +//line sql.y:3352 { yyLOCAL = &AlterMigration{ Type: UnthrottleAllMigrationType, } } yyVAL.union = yyLOCAL - case 599: + case 603: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3353 +//line sql.y:3359 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 600: + case 604: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3357 +//line sql.y:3363 { yyDollar[3].partitionOptionUnion().Partitions = yyDollar[4].integerUnion() yyDollar[3].partitionOptionUnion().SubPartition = yyDollar[5].subPartitionUnion() @@ -13214,10 +14345,10 @@ yydefault: yyLOCAL = yyDollar[3].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 601: + case 605: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3366 +//line sql.y:3372 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -13226,10 +14357,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 602: + case 606: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3374 +//line sql.y:3380 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -13239,10 +14370,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 603: + case 607: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3383 +//line sql.y:3389 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -13250,10 +14381,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 604: + case 608: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3390 +//line sql.y:3396 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -13261,18 +14392,18 @@ yydefault: } } yyVAL.union = yyLOCAL - case 605: + case 609: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3398 +//line sql.y:3404 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 606: + case 610: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3402 +//line sql.y:3408 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -13282,10 +14413,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 607: + case 611: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3411 +//line sql.y:3417 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -13296,682 +14427,682 @@ yydefault: } } yyVAL.union = yyLOCAL - case 608: + case 612: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3422 +//line sql.y:3428 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 609: + case 613: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3426 +//line sql.y:3432 { yyLOCAL = yyDollar[2].partDefsUnion() } yyVAL.union = yyLOCAL - case 610: + case 614: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3431 +//line sql.y:3437 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 611: + case 615: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3435 +//line sql.y:3441 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 612: + case 616: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3440 +//line sql.y:3446 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 613: + case 617: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3444 +//line sql.y:3450 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 614: + case 618: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL TableExpr -//line sql.y:3450 +//line sql.y:3456 { yyLOCAL = &JSONTableExpr{Expr: yyDollar[3].exprUnion(), Filter: yyDollar[5].exprUnion(), Columns: yyDollar[6].jtColumnListUnion(), Alias: yyDollar[8].identifierCS} } yyVAL.union = yyLOCAL - case 615: + case 619: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3456 +//line sql.y:3462 { yyLOCAL = yyDollar[3].jtColumnListUnion() } yyVAL.union = yyLOCAL - case 616: + case 620: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3462 +//line sql.y:3468 { yyLOCAL = []*JtColumnDefinition{yyDollar[1].jtColumnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 617: + case 621: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3466 +//line sql.y:3472 { yySLICE := (*[]*JtColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jtColumnDefinitionUnion()) } - case 618: + case 622: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3472 +//line sql.y:3478 { yyLOCAL = &JtColumnDefinition{JtOrdinal: &JtOrdinalColDef{Name: yyDollar[1].identifierCI}} } yyVAL.union = yyLOCAL - case 619: + case 623: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3476 +//line sql.y:3482 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 620: + case 624: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3482 +//line sql.y:3488 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 621: + case 625: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3488 +//line sql.y:3494 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 622: + case 626: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3494 +//line sql.y:3500 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 623: + case 627: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3500 +//line sql.y:3506 { jtNestedPath := &JtNestedPathColDef{Path: yyDollar[3].exprUnion(), Columns: yyDollar[4].jtColumnListUnion()} yyLOCAL = &JtColumnDefinition{JtNestedPath: jtNestedPath} } yyVAL.union = yyLOCAL - case 624: + case 628: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3506 +//line sql.y:3512 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 625: + case 629: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3510 +//line sql.y:3516 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 626: + case 630: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3514 +//line sql.y:3520 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 627: + case 631: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3518 +//line sql.y:3524 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 628: + case 632: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3524 +//line sql.y:3530 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 629: + case 633: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3530 +//line sql.y:3536 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 630: + case 634: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3536 +//line sql.y:3542 { yyLOCAL = &JtOnResponse{ResponseType: ErrorJSONType} } yyVAL.union = yyLOCAL - case 631: + case 635: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3540 +//line sql.y:3546 { yyLOCAL = &JtOnResponse{ResponseType: NullJSONType} } yyVAL.union = yyLOCAL - case 632: + case 636: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3544 +//line sql.y:3550 { yyLOCAL = &JtOnResponse{ResponseType: DefaultJSONType, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 633: + case 637: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3550 +//line sql.y:3556 { yyLOCAL = RangeType } yyVAL.union = yyLOCAL - case 634: + case 638: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3554 +//line sql.y:3560 { yyLOCAL = ListType } yyVAL.union = yyLOCAL - case 635: + case 639: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3559 +//line sql.y:3565 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 636: + case 640: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3563 +//line sql.y:3569 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 637: + case 641: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3568 +//line sql.y:3574 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 638: + case 642: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3572 +//line sql.y:3578 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 639: + case 643: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3578 +//line sql.y:3584 { yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}} } yyVAL.union = yyLOCAL - case 640: + case 644: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3582 +//line sql.y:3588 { yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 641: + case 645: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3586 +//line sql.y:3592 { yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()} } yyVAL.union = yyLOCAL - case 642: + case 646: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3590 +//line sql.y:3596 { yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 643: + case 647: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3594 +//line sql.y:3600 { yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true} } yyVAL.union = yyLOCAL - case 644: + case 648: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3598 +//line sql.y:3604 { yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 645: + case 649: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3602 +//line sql.y:3608 { yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true} } yyVAL.union = yyLOCAL - case 646: + case 650: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3606 +//line sql.y:3612 { yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 647: + case 651: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3610 +//line sql.y:3616 { yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true} } yyVAL.union = yyLOCAL - case 648: + case 652: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3614 +//line sql.y:3620 { yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 649: + case 653: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3618 +//line sql.y:3624 { yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].identifierCI}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()} } yyVAL.union = yyLOCAL - case 650: + case 654: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3622 +//line sql.y:3628 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 651: + case 655: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3626 +//line sql.y:3632 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 652: + case 656: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3630 +//line sql.y:3636 { yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 653: + case 657: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3634 +//line sql.y:3640 { yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true} } yyVAL.union = yyLOCAL - case 654: + case 658: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3638 +//line sql.y:3644 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 655: + case 659: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3642 +//line sql.y:3648 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 656: + case 660: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3646 +//line sql.y:3652 { yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 657: + case 661: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3650 +//line sql.y:3656 { yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true} } yyVAL.union = yyLOCAL - case 658: + case 662: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3654 +//line sql.y:3660 { yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 659: + case 663: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3658 +//line sql.y:3664 { yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true} } yyVAL.union = yyLOCAL - case 660: + case 664: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3662 +//line sql.y:3668 { yyLOCAL = &PartitionSpec{Action: UpgradeAction} } yyVAL.union = yyLOCAL - case 661: + case 665: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3667 +//line sql.y:3673 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 662: + case 666: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3671 +//line sql.y:3677 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 663: + case 667: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3675 +//line sql.y:3681 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 664: + case 668: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3681 +//line sql.y:3687 { yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()} } yyVAL.union = yyLOCAL - case 665: + case 669: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3685 +//line sql.y:3691 { yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].partDefUnion()) } - case 666: + case 670: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3691 +//line sql.y:3697 { yyVAL.partDefUnion().Options = yyDollar[2].partitionDefinitionOptionsUnion() } - case 667: + case 671: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3696 +//line sql.y:3702 { yyLOCAL = &PartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 668: + case 672: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3700 +//line sql.y:3706 { yyDollar[1].partitionDefinitionOptionsUnion().ValueRange = yyDollar[2].partitionValueRangeUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 669: + case 673: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3705 +//line sql.y:3711 { yyDollar[1].partitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 670: + case 674: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3710 +//line sql.y:3716 { yyDollar[1].partitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 671: + case 675: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3715 +//line sql.y:3721 { yyDollar[1].partitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 672: + case 676: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3720 +//line sql.y:3726 { yyDollar[1].partitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 673: + case 677: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3725 +//line sql.y:3731 { val := yyDollar[2].integerUnion() yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = &val yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 674: + case 678: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3731 +//line sql.y:3737 { val := yyDollar[2].integerUnion() yyDollar[1].partitionDefinitionOptionsUnion().MinRows = &val yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 675: + case 679: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3737 +//line sql.y:3743 { yyDollar[1].partitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 676: + case 680: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3742 +//line sql.y:3748 { yyDollar[1].partitionDefinitionOptionsUnion().SubPartitionDefinitions = yyDollar[2].subPartitionDefinitionsUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 677: + case 681: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3748 +//line sql.y:3754 { yyLOCAL = yyDollar[2].subPartitionDefinitionsUnion() } yyVAL.union = yyLOCAL - case 678: + case 682: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3754 +//line sql.y:3760 { yyLOCAL = SubPartitionDefinitions{yyDollar[1].subPartitionDefinitionUnion()} } yyVAL.union = yyLOCAL - case 679: + case 683: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3758 +//line sql.y:3764 { yySLICE := (*SubPartitionDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].subPartitionDefinitionUnion()) } - case 680: + case 684: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SubPartitionDefinition -//line sql.y:3764 +//line sql.y:3770 { yyLOCAL = &SubPartitionDefinition{Name: yyDollar[2].identifierCI, Options: yyDollar[3].subPartitionDefinitionOptionsUnion()} } yyVAL.union = yyLOCAL - case 681: + case 685: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3769 +//line sql.y:3775 { yyLOCAL = &SubPartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 682: + case 686: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3773 +//line sql.y:3779 { yyDollar[1].subPartitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 683: + case 687: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3778 +//line sql.y:3784 { yyDollar[1].subPartitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 684: + case 688: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3783 +//line sql.y:3789 { yyDollar[1].subPartitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 685: + case 689: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3788 +//line sql.y:3794 { yyDollar[1].subPartitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 686: + case 690: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3793 +//line sql.y:3799 { val := yyDollar[2].integerUnion() yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = &val yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 687: + case 691: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3799 +//line sql.y:3805 { val := yyDollar[2].integerUnion() yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = &val yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 688: + case 692: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3805 +//line sql.y:3811 { yyDollar[1].subPartitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 689: + case 693: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3812 +//line sql.y:3818 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -13979,10 +15110,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 690: + case 694: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3819 +//line sql.y:3825 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -13990,10 +15121,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 691: + case 695: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3826 +//line sql.y:3832 { yyLOCAL = &PartitionValueRange{ Type: InType, @@ -14001,131 +15132,131 @@ yydefault: } } yyVAL.union = yyLOCAL - case 692: + case 696: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3834 +//line sql.y:3840 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 693: + case 697: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3838 +//line sql.y:3844 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 694: + case 698: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionEngine -//line sql.y:3844 +//line sql.y:3850 { yyLOCAL = &PartitionEngine{Storage: yyDollar[1].booleanUnion(), Name: yyDollar[4].identifierCS.String()} } yyVAL.union = yyLOCAL - case 695: + case 699: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Literal -//line sql.y:3850 +//line sql.y:3856 { yyLOCAL = NewStrLiteral(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 696: + case 700: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3856 +//line sql.y:3862 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 697: + case 701: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3862 +//line sql.y:3868 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 698: + case 702: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3868 +//line sql.y:3874 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 699: + case 703: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3874 +//line sql.y:3880 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 700: + case 704: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3880 +//line sql.y:3886 { yyVAL.str = yyDollar[3].identifierCS.String() } - case 701: + case 705: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinition -//line sql.y:3886 +//line sql.y:3892 { yyLOCAL = &PartitionDefinition{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 702: + case 706: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3892 +//line sql.y:3898 { yyVAL.str = "" } - case 703: + case 707: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3896 +//line sql.y:3902 { yyVAL.str = "" } - case 704: + case 708: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3902 +//line sql.y:3908 { yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()} } yyVAL.union = yyLOCAL - case 705: + case 709: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*RenameTablePair -//line sql.y:3908 +//line sql.y:3914 { yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}} } yyVAL.union = yyLOCAL - case 706: + case 710: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3912 +//line sql.y:3918 { yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName}) } - case 707: + case 711: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3918 +//line sql.y:3924 { yyLOCAL = &DropTable{FromTables: yyDollar[6].tableNamesUnion(), IfExists: yyDollar[5].booleanUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Temp: yyDollar[3].booleanUnion()} } yyVAL.union = yyLOCAL - case 708: + case 712: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3922 +//line sql.y:3928 { // Change this to an alter statement if yyDollar[4].identifierCI.Lowered() == "primary" { @@ -14135,1319 +15266,1335 @@ yydefault: } } yyVAL.union = yyLOCAL - case 709: + case 713: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3931 +//line sql.y:3937 { yyLOCAL = &DropView{FromTables: yyDollar[5].tableNamesUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 710: + case 714: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3935 +//line sql.y:3941 { yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[2].strs).Parsed(), DBName: yyDollar[5].identifierCS, IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 711: + case 715: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3941 +//line sql.y:3947 { yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 712: + case 716: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3945 +//line sql.y:3951 { yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 713: + case 717: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3951 +//line sql.y:3957 { yyLOCAL = &OtherRead{} } yyVAL.union = yyLOCAL - case 714: - yyDollar = yyS[yypt-3 : yypt+1] + case 718: + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3957 +//line sql.y:3963 { - yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}} + yyLOCAL = &PurgeBinaryLogs{To: string(yyDollar[5].str)} } yyVAL.union = yyLOCAL - case 715: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Statement -//line sql.y:3961 + case 719: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:3967 + { + yyLOCAL = &PurgeBinaryLogs{Before: string(yyDollar[5].str)} + } + yyVAL.union = yyLOCAL + case 720: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:3973 + { + yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}} + } + yyVAL.union = yyLOCAL + case 721: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:3977 { yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 716: + case 722: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3965 +//line sql.y:3981 { yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 717: + case 723: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3969 +//line sql.y:3985 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 718: + case 724: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3973 +//line sql.y:3989 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 719: + case 725: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3977 +//line sql.y:3993 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 720: + case 726: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3981 +//line sql.y:3997 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 721: + case 727: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3985 +//line sql.y:4001 { yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 722: + case 728: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3989 +//line sql.y:4005 { yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 723: + case 729: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3993 +//line sql.y:4009 { yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 724: + case 730: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3997 +//line sql.y:4013 { yyLOCAL = &Show{&ShowBasic{Command: Privilege}} } yyVAL.union = yyLOCAL - case 725: + case 731: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4001 +//line sql.y:4017 { yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 726: + case 732: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4005 +//line sql.y:4021 { yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 727: + case 733: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4009 +//line sql.y:4025 { yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 728: + case 734: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4013 +//line sql.y:4029 { yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 729: + case 735: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4017 +//line sql.y:4033 { yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 730: + case 736: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4021 +//line sql.y:4037 { yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 731: + case 737: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4025 +//line sql.y:4041 { yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 732: + case 738: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4029 +//line sql.y:4045 { yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].identifierCS, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 733: + case 739: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4033 +//line sql.y:4049 { yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 734: + case 740: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4037 +//line sql.y:4053 { yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 735: + case 741: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4041 +//line sql.y:4057 { yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 736: + case 742: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4045 +//line sql.y:4061 { yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 737: + case 743: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4049 +//line sql.y:4065 { yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 738: + case 744: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4053 +//line sql.y:4069 { yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 739: + case 745: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4057 +//line sql.y:4073 { yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 740: + case 746: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4061 +//line sql.y:4077 { yyLOCAL = &Show{&ShowBasic{Command: Engines}} } yyVAL.union = yyLOCAL - case 741: + case 747: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4065 +//line sql.y:4081 { yyLOCAL = &Show{&ShowBasic{Command: Plugins}} } yyVAL.union = yyLOCAL - case 742: + case 748: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4069 +//line sql.y:4085 { yyLOCAL = &Show{&ShowBasic{Command: GtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 743: + case 749: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4073 +//line sql.y:4089 { yyLOCAL = &Show{&ShowBasic{Command: VGtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 744: + case 750: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4077 +//line sql.y:4093 { yyLOCAL = &Show{&ShowBasic{Command: VitessVariables, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 745: + case 751: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4081 +//line sql.y:4097 { yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 746: + case 752: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4085 +//line sql.y:4101 { yyLOCAL = &ShowMigrationLogs{UUID: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 747: + case 753: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4089 +//line sql.y:4105 { yyLOCAL = &ShowThrottledApps{} } yyVAL.union = yyLOCAL - case 748: + case 754: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4093 +//line sql.y:4109 { yyLOCAL = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 749: + case 755: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4097 +//line sql.y:4113 { yyLOCAL = &ShowThrottlerStatus{} } yyVAL.union = yyLOCAL - case 750: + case 756: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4101 +//line sql.y:4117 { yyLOCAL = &Show{&ShowBasic{Command: VschemaTables}} } yyVAL.union = yyLOCAL - case 751: + case 757: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4105 +//line sql.y:4121 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes}} } yyVAL.union = yyLOCAL - case 752: + case 758: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4109 +//line sql.y:4125 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes, Tbl: yyDollar[5].tableName}} } yyVAL.union = yyLOCAL - case 753: + case 759: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4113 +//line sql.y:4129 { yyLOCAL = &Show{&ShowBasic{Command: Warnings}} } yyVAL.union = yyLOCAL - case 754: + case 760: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4117 +//line sql.y:4133 { yyLOCAL = &Show{&ShowBasic{Command: VitessShards, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 755: + case 761: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4121 +//line sql.y:4137 { yyLOCAL = &Show{&ShowBasic{Command: VitessTablets, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 756: + case 762: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4125 +//line sql.y:4141 { yyLOCAL = &Show{&ShowBasic{Command: VitessTarget}} } yyVAL.union = yyLOCAL - case 757: + case 763: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4132 +//line sql.y:4148 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].identifierCI.String())}} } yyVAL.union = yyLOCAL - case 758: + case 764: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4136 +//line sql.y:4152 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 759: + case 765: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4140 +//line sql.y:4156 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()}} } yyVAL.union = yyLOCAL - case 760: + case 766: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4144 +//line sql.y:4160 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 761: + case 767: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4148 +//line sql.y:4164 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 762: + case 768: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4152 +//line sql.y:4168 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 763: + case 769: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4156 +//line sql.y:4172 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 764: + case 770: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4160 +//line sql.y:4176 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 765: + case 771: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4164 +//line sql.y:4180 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 766: + case 772: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4170 +//line sql.y:4186 { yyVAL.str = "" } - case 767: + case 773: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4174 +//line sql.y:4190 { yyVAL.str = "extended " } - case 768: + case 774: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4180 +//line sql.y:4196 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 769: + case 775: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4184 +//line sql.y:4200 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 770: + case 776: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4190 +//line sql.y:4206 { yyVAL.str = string(yyDollar[1].str) } - case 771: + case 777: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4194 +//line sql.y:4210 { yyVAL.str = string(yyDollar[1].str) } - case 772: + case 778: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4200 +//line sql.y:4216 { yyVAL.identifierCS = NewIdentifierCS("") } - case 773: + case 779: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4204 +//line sql.y:4220 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 774: + case 780: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4208 +//line sql.y:4224 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 775: + case 781: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4214 +//line sql.y:4230 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 776: + case 782: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4218 +//line sql.y:4234 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 777: + case 783: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4222 +//line sql.y:4238 { yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 778: + case 784: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4228 +//line sql.y:4244 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 779: + case 785: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4232 +//line sql.y:4248 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 780: + case 786: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4238 +//line sql.y:4254 { yyVAL.empty = struct{}{} } - case 781: + case 787: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4242 +//line sql.y:4258 { yyVAL.empty = struct{}{} } - case 782: + case 788: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4246 +//line sql.y:4262 { yyVAL.empty = struct{}{} } - case 783: + case 789: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4252 +//line sql.y:4268 { yyVAL.str = string(yyDollar[1].str) } - case 784: + case 790: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4256 +//line sql.y:4272 { yyVAL.str = string(yyDollar[1].str) } - case 785: + case 791: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4262 +//line sql.y:4278 { yyLOCAL = &Use{DBName: yyDollar[2].identifierCS} } yyVAL.union = yyLOCAL - case 786: + case 792: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4266 +//line sql.y:4282 { yyLOCAL = &Use{DBName: IdentifierCS{v: ""}} } yyVAL.union = yyLOCAL - case 787: + case 793: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4270 +//line sql.y:4286 { yyLOCAL = &Use{DBName: NewIdentifierCS(yyDollar[2].identifierCS.String() + "@" + string(yyDollar[3].str))} } yyVAL.union = yyLOCAL - case 788: + case 794: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4277 +//line sql.y:4293 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 789: + case 795: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4281 +//line sql.y:4297 { yyVAL.identifierCS = NewIdentifierCS("@" + string(yyDollar[1].str)) } - case 790: + case 796: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4285 +//line sql.y:4301 { yyVAL.identifierCS = NewIdentifierCS("@@" + string(yyDollar[1].str)) } - case 791: + case 797: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4289 +//line sql.y:4305 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 792: + case 798: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4296 +//line sql.y:4312 { yyLOCAL = &Begin{} } yyVAL.union = yyLOCAL - case 793: + case 799: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4300 +//line sql.y:4316 { yyLOCAL = &Begin{TxAccessModes: yyDollar[3].txAccessModesUnion()} } yyVAL.union = yyLOCAL - case 794: + case 800: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4305 +//line sql.y:4321 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 795: + case 801: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4309 +//line sql.y:4325 { yyLOCAL = yyDollar[1].txAccessModesUnion() } yyVAL.union = yyLOCAL - case 796: + case 802: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4315 +//line sql.y:4331 { yyLOCAL = []TxAccessMode{yyDollar[1].txAccessModeUnion()} } yyVAL.union = yyLOCAL - case 797: + case 803: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4319 +//line sql.y:4335 { yySLICE := (*[]TxAccessMode)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].txAccessModeUnion()) } - case 798: + case 804: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4325 +//line sql.y:4341 { yyLOCAL = WithConsistentSnapshot } yyVAL.union = yyLOCAL - case 799: + case 805: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4329 +//line sql.y:4345 { yyLOCAL = ReadWrite } yyVAL.union = yyLOCAL - case 800: + case 806: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4333 +//line sql.y:4349 { yyLOCAL = ReadOnly } yyVAL.union = yyLOCAL - case 801: + case 807: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4340 +//line sql.y:4356 { yyLOCAL = &Commit{} } yyVAL.union = yyLOCAL - case 802: + case 808: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4346 +//line sql.y:4362 { yyLOCAL = &Rollback{} } yyVAL.union = yyLOCAL - case 803: + case 809: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4350 +//line sql.y:4366 { yyLOCAL = &SRollback{Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 804: + case 810: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4355 +//line sql.y:4371 { yyVAL.empty = struct{}{} } - case 805: + case 811: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4357 +//line sql.y:4373 { yyVAL.empty = struct{}{} } - case 806: + case 812: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4360 +//line sql.y:4376 { yyVAL.empty = struct{}{} } - case 807: + case 813: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4362 +//line sql.y:4378 { yyVAL.empty = struct{}{} } - case 808: + case 814: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4366 +//line sql.y:4382 { yyLOCAL = &Savepoint{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 809: + case 815: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4372 +//line sql.y:4388 { yyLOCAL = &Release{Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 810: + case 816: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4377 +//line sql.y:4393 { yyLOCAL = EmptyType } yyVAL.union = yyLOCAL - case 811: + case 817: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4381 +//line sql.y:4397 { yyLOCAL = JSONType } yyVAL.union = yyLOCAL - case 812: + case 818: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4385 +//line sql.y:4401 { yyLOCAL = TreeType } yyVAL.union = yyLOCAL - case 813: + case 819: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4389 +//line sql.y:4405 { yyLOCAL = VitessType } yyVAL.union = yyLOCAL - case 814: + case 820: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4393 +//line sql.y:4409 { yyLOCAL = VTExplainType } yyVAL.union = yyLOCAL - case 815: + case 821: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4397 +//line sql.y:4413 { yyLOCAL = TraditionalType } yyVAL.union = yyLOCAL - case 816: + case 822: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4401 +//line sql.y:4417 { yyLOCAL = AnalyzeType } yyVAL.union = yyLOCAL - case 817: + case 823: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4406 +//line sql.y:4422 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 818: + case 824: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4410 +//line sql.y:4426 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 819: + case 825: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4414 +//line sql.y:4430 { yyLOCAL = AllVExplainType } yyVAL.union = yyLOCAL - case 820: + case 826: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4418 +//line sql.y:4434 { yyLOCAL = QueriesVExplainType } yyVAL.union = yyLOCAL - case 821: + case 827: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4424 +//line sql.y:4440 { yyVAL.str = yyDollar[1].str } - case 822: + case 828: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4428 +//line sql.y:4444 { yyVAL.str = yyDollar[1].str } - case 823: + case 829: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4432 +//line sql.y:4448 { yyVAL.str = yyDollar[1].str } - case 824: + case 830: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4438 +//line sql.y:4454 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 825: + case 831: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4442 +//line sql.y:4458 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 826: + case 832: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4446 +//line sql.y:4462 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 827: + case 833: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4450 +//line sql.y:4466 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 828: + case 834: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4455 +//line sql.y:4471 { yyVAL.str = "" } - case 829: + case 835: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4459 +//line sql.y:4475 { yyVAL.str = yyDollar[1].identifierCI.val } - case 830: + case 836: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4463 +//line sql.y:4479 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 831: + case 837: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4469 +//line sql.y:4485 { yyLOCAL = &ExplainTab{Table: yyDollar[3].tableName, Wild: yyDollar[4].str} } yyVAL.union = yyLOCAL - case 832: + case 838: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4473 +//line sql.y:4489 { yyLOCAL = &ExplainStmt{Type: yyDollar[3].explainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 833: + case 839: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4479 +//line sql.y:4495 { yyLOCAL = &VExplainStmt{Type: yyDollar[3].vexplainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 834: + case 840: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4485 +//line sql.y:4501 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 835: + case 841: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4489 +//line sql.y:4505 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 836: + case 842: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4495 +//line sql.y:4511 { yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()} } yyVAL.union = yyLOCAL - case 837: + case 843: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableAndLockTypes -//line sql.y:4501 +//line sql.y:4517 { yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()} } yyVAL.union = yyLOCAL - case 838: + case 844: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4505 +//line sql.y:4521 { yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion()) } - case 839: + case 845: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *TableAndLockType -//line sql.y:4511 +//line sql.y:4527 { yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()} } yyVAL.union = yyLOCAL - case 840: + case 846: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4517 +//line sql.y:4533 { yyLOCAL = Read } yyVAL.union = yyLOCAL - case 841: + case 847: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4521 +//line sql.y:4537 { yyLOCAL = ReadLocal } yyVAL.union = yyLOCAL - case 842: + case 848: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4525 +//line sql.y:4541 { yyLOCAL = Write } yyVAL.union = yyLOCAL - case 843: + case 849: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4529 +//line sql.y:4545 { yyLOCAL = LowPriorityWrite } yyVAL.union = yyLOCAL - case 844: + case 850: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4535 +//line sql.y:4551 { yyLOCAL = &UnlockTables{} } yyVAL.union = yyLOCAL - case 845: + case 851: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4541 +//line sql.y:4557 { yyLOCAL = &RevertMigration{Comments: Comments(yyDollar[2].strs).Parsed(), UUID: string(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 846: + case 852: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4547 +//line sql.y:4563 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs} } yyVAL.union = yyLOCAL - case 847: + case 853: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4551 +//line sql.y:4567 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()} } yyVAL.union = yyLOCAL - case 848: + case 854: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4555 +//line sql.y:4571 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 849: + case 855: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4559 +//line sql.y:4575 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL - case 850: + case 856: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:4563 +//line sql.y:4579 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 851: + case 857: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4567 +//line sql.y:4583 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true} } yyVAL.union = yyLOCAL - case 852: + case 858: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4573 +//line sql.y:4589 { yyVAL.strs = []string{yyDollar[1].str} } - case 853: + case 859: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4577 +//line sql.y:4593 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str) } - case 854: + case 860: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4583 +//line sql.y:4599 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 855: + case 861: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4587 +//line sql.y:4603 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 856: + case 862: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4591 +//line sql.y:4607 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 857: + case 863: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4595 +//line sql.y:4611 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 858: + case 864: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4599 +//line sql.y:4615 { yyVAL.str = string(yyDollar[1].str) } - case 859: + case 865: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4603 +//line sql.y:4619 { yyVAL.str = string(yyDollar[1].str) } - case 860: + case 866: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4607 +//line sql.y:4623 { yyVAL.str = string(yyDollar[1].str) } - case 861: + case 867: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4611 +//line sql.y:4627 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str } - case 862: + case 868: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4615 +//line sql.y:4631 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 863: + case 869: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4619 +//line sql.y:4635 { yyVAL.str = string(yyDollar[1].str) } - case 864: + case 870: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4623 +//line sql.y:4639 { yyVAL.str = string(yyDollar[1].str) } - case 865: + case 871: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4627 +//line sql.y:4643 { yyVAL.str = string(yyDollar[1].str) } - case 866: + case 872: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4632 +//line sql.y:4648 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 867: + case 873: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4636 +//line sql.y:4652 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 868: + case 874: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4640 +//line sql.y:4656 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 869: + case 875: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4645 +//line sql.y:4661 { yyVAL.str = "" } - case 870: + case 876: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4649 +//line sql.y:4665 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String() } - case 871: + case 877: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4654 +//line sql.y:4670 { setAllowComments(yylex, true) } - case 872: + case 878: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4658 +//line sql.y:4674 { yyVAL.strs = yyDollar[2].strs setAllowComments(yylex, false) } - case 873: + case 879: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4664 +//line sql.y:4680 { yyVAL.strs = nil } - case 874: + case 880: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4668 +//line sql.y:4684 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str) } - case 875: + case 881: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4674 +//line sql.y:4690 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 876: + case 882: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4678 +//line sql.y:4694 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 877: + case 883: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4682 +//line sql.y:4698 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 878: + case 884: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4687 +//line sql.y:4703 { yyVAL.str = "" } - case 879: + case 885: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4691 +//line sql.y:4707 { yyVAL.str = SQLNoCacheStr } - case 880: + case 886: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4695 +//line sql.y:4711 { yyVAL.str = SQLCacheStr } - case 881: + case 887: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4700 +//line sql.y:4716 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 882: + case 888: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4704 +//line sql.y:4720 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 883: + case 889: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4708 +//line sql.y:4724 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 884: + case 890: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4714 +//line sql.y:4730 { yyLOCAL = &PrepareStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Statement: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 885: + case 891: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4718 +//line sql.y:4734 { yyLOCAL = &PrepareStmt{ Name: yyDollar[3].identifierCI, @@ -15456,595 +16603,595 @@ yydefault: } } yyVAL.union = yyLOCAL - case 886: + case 892: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4728 +//line sql.y:4744 { yyLOCAL = &ExecuteStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Arguments: yyDollar[4].variablesUnion()} } yyVAL.union = yyLOCAL - case 887: + case 893: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4733 +//line sql.y:4749 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 888: + case 894: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4737 +//line sql.y:4753 { yyLOCAL = yyDollar[2].variablesUnion() } yyVAL.union = yyLOCAL - case 889: + case 895: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4743 +//line sql.y:4759 { - yyLOCAL = &DeallocateStmt{Type: DeallocateType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} + yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 890: + case 896: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4747 +//line sql.y:4763 { - yyLOCAL = &DeallocateStmt{Type: DropType, Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} + yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 891: + case 897: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4752 +//line sql.y:4768 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 892: + case 898: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4756 +//line sql.y:4772 { yyLOCAL = yyDollar[1].selectExprsUnion() } yyVAL.union = yyLOCAL - case 893: + case 899: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4761 +//line sql.y:4777 { yyVAL.strs = nil } - case 894: + case 900: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4765 +//line sql.y:4781 { yyVAL.strs = []string{yyDollar[1].str} } - case 895: + case 901: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4769 +//line sql.y:4785 { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str} } - case 896: + case 902: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4773 +//line sql.y:4789 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str} } - case 897: + case 903: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4777 +//line sql.y:4793 { yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str} } - case 898: + case 904: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4783 +//line sql.y:4799 { yyVAL.str = SQLNoCacheStr } - case 899: + case 905: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4787 +//line sql.y:4803 { yyVAL.str = SQLCacheStr } - case 900: + case 906: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4791 +//line sql.y:4807 { yyVAL.str = DistinctStr } - case 901: + case 907: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4795 +//line sql.y:4811 { yyVAL.str = DistinctStr } - case 902: + case 908: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4799 +//line sql.y:4815 { yyVAL.str = StraightJoinHint } - case 903: + case 909: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4803 +//line sql.y:4819 { yyVAL.str = SQLCalcFoundRowsStr } - case 904: + case 910: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4807 +//line sql.y:4823 { yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway } - case 905: + case 911: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4813 +//line sql.y:4829 { yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()} } yyVAL.union = yyLOCAL - case 906: + case 912: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4817 +//line sql.y:4833 { yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion()) } - case 907: + case 913: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4823 +//line sql.y:4839 { yyLOCAL = &StarExpr{} } yyVAL.union = yyLOCAL - case 908: + case 914: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4827 +//line sql.y:4843 { yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 909: + case 915: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4831 +//line sql.y:4847 { yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].identifierCS}} } yyVAL.union = yyLOCAL - case 910: + case 916: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4835 +//line sql.y:4851 { yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 911: + case 917: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4840 +//line sql.y:4856 { yyVAL.identifierCI = IdentifierCI{} } - case 912: + case 918: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4844 +//line sql.y:4860 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 913: + case 919: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4848 +//line sql.y:4864 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 915: + case 921: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4855 +//line sql.y:4871 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 916: + case 922: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4860 +//line sql.y:4876 { yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewIdentifierCS("dual")}}} } yyVAL.union = yyLOCAL - case 917: + case 923: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4864 +//line sql.y:4880 { yyLOCAL = yyDollar[1].tableExprsUnion() } yyVAL.union = yyLOCAL - case 918: + case 924: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4870 +//line sql.y:4886 { yyLOCAL = yyDollar[2].tableExprsUnion() } yyVAL.union = yyLOCAL - case 919: + case 925: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4876 +//line sql.y:4892 { yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()} } yyVAL.union = yyLOCAL - case 920: + case 926: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4880 +//line sql.y:4896 { yySLICE := (*TableExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion()) } - case 923: + case 929: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4890 +//line sql.y:4906 { yyLOCAL = yyDollar[1].aliasedTableNameUnion() } yyVAL.union = yyLOCAL - case 924: + case 930: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4894 +//line sql.y:4910 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].identifierCS, Columns: yyDollar[4].columnsUnion()} } yyVAL.union = yyLOCAL - case 925: + case 931: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4898 +//line sql.y:4914 { yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()} } yyVAL.union = yyLOCAL - case 926: + case 932: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4902 +//line sql.y:4918 { yyLOCAL = yyDollar[1].tableExprUnion() } yyVAL.union = yyLOCAL - case 927: + case 933: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4908 +//line sql.y:4924 { yyLOCAL = &DerivedTable{Lateral: false, Select: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 928: + case 934: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4912 +//line sql.y:4928 { yyLOCAL = &DerivedTable{Lateral: true, Select: yyDollar[2].selStmtUnion()} } yyVAL.union = yyLOCAL - case 929: + case 935: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4918 +//line sql.y:4934 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].identifierCS, Hints: yyDollar[3].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 930: + case 936: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4922 +//line sql.y:4938 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].identifierCS, Hints: yyDollar[7].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 931: + case 937: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4927 +//line sql.y:4943 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 932: + case 938: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:4931 +//line sql.y:4947 { yyLOCAL = yyDollar[2].columnsUnion() } yyVAL.union = yyLOCAL - case 933: + case 939: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4936 +//line sql.y:4952 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 934: + case 940: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4940 +//line sql.y:4956 { yyLOCAL = yyDollar[1].columnsUnion() } yyVAL.union = yyLOCAL - case 935: + case 941: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4946 +//line sql.y:4962 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 936: + case 942: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4950 +//line sql.y:4966 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 937: + case 943: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4956 +//line sql.y:4972 { yyLOCAL = []*Variable{yyDollar[1].variableUnion()} } yyVAL.union = yyLOCAL - case 938: + case 944: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4960 +//line sql.y:4976 { yySLICE := (*[]*Variable)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].variableUnion()) } - case 939: + case 945: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4966 +//line sql.y:4982 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 940: + case 946: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4970 +//line sql.y:4986 { yyLOCAL = Columns{NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 941: + case 947: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4974 +//line sql.y:4990 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 942: + case 948: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4978 +//line sql.y:4994 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, NewIdentifierCI(string(yyDollar[3].str))) } - case 943: + case 949: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Partitions -//line sql.y:4984 +//line sql.y:5000 { yyLOCAL = Partitions{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 944: + case 950: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4988 +//line sql.y:5004 { yySLICE := (*Partitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 945: + case 951: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5001 +//line sql.y:5017 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 946: + case 952: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5005 +//line sql.y:5021 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 947: + case 953: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5009 +//line sql.y:5025 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 948: + case 954: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5013 +//line sql.y:5029 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()} } yyVAL.union = yyLOCAL - case 949: + case 955: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5019 +//line sql.y:5035 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 950: + case 956: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:5021 +//line sql.y:5037 { yyVAL.joinCondition = &JoinCondition{Using: yyDollar[3].columnsUnion()} } - case 951: + case 957: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5025 +//line sql.y:5041 { yyVAL.joinCondition = &JoinCondition{} } - case 952: + case 958: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5027 +//line sql.y:5043 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 953: + case 959: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5031 +//line sql.y:5047 { yyVAL.joinCondition = &JoinCondition{} } - case 954: + case 960: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5033 +//line sql.y:5049 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 955: + case 961: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5036 +//line sql.y:5052 { yyVAL.empty = struct{}{} } - case 956: + case 962: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5038 +//line sql.y:5054 { yyVAL.empty = struct{}{} } - case 957: + case 963: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5041 +//line sql.y:5057 { yyVAL.identifierCS = NewIdentifierCS("") } - case 958: + case 964: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5045 +//line sql.y:5061 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 959: + case 965: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5049 +//line sql.y:5065 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 961: + case 967: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5056 +//line sql.y:5072 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 962: + case 968: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5062 +//line sql.y:5078 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 963: + case 969: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5066 +//line sql.y:5082 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 964: + case 970: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5070 +//line sql.y:5086 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 965: + case 971: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5076 +//line sql.y:5092 { yyLOCAL = StraightJoinType } yyVAL.union = yyLOCAL - case 966: + case 972: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5082 +//line sql.y:5098 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 967: + case 973: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5086 +//line sql.y:5102 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 968: + case 974: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5090 +//line sql.y:5106 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 969: + case 975: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5094 +//line sql.y:5110 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 970: + case 976: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5100 +//line sql.y:5116 { yyLOCAL = NaturalJoinType } yyVAL.union = yyLOCAL - case 971: + case 977: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5104 +//line sql.y:5120 { if yyDollar[2].joinTypeUnion() == LeftJoinType { yyLOCAL = NaturalLeftJoinType @@ -16053,593 +17200,617 @@ yydefault: } } yyVAL.union = yyLOCAL - case 972: + case 978: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5114 +//line sql.y:5130 { yyVAL.tableName = yyDollar[2].tableName } - case 973: + case 979: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5118 +//line sql.y:5134 { yyVAL.tableName = yyDollar[1].tableName } - case 974: + case 980: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5124 +//line sql.y:5140 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 975: + case 981: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5128 +//line sql.y:5144 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS} } - case 976: + case 982: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5134 +//line sql.y:5150 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 977: + case 983: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5139 +//line sql.y:5155 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 978: + case 984: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5143 +//line sql.y:5159 { yyLOCAL = yyDollar[1].indexHintsUnion() } yyVAL.union = yyLOCAL - case 979: + case 985: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5149 +//line sql.y:5165 { yyLOCAL = IndexHints{yyDollar[1].indexHintUnion()} } yyVAL.union = yyLOCAL - case 980: + case 986: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5153 +//line sql.y:5169 { yySLICE := (*IndexHints)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexHintUnion()) } - case 981: + case 987: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5159 +//line sql.y:5175 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 982: + case 988: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5163 +//line sql.y:5179 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion()} } yyVAL.union = yyLOCAL - case 983: + case 989: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5167 +//line sql.y:5183 { yyLOCAL = &IndexHint{Type: IgnoreOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 984: + case 990: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5171 +//line sql.y:5187 { yyLOCAL = &IndexHint{Type: ForceOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 985: + case 991: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5176 +//line sql.y:5192 { yyLOCAL = NoForType } yyVAL.union = yyLOCAL - case 986: + case 992: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5180 +//line sql.y:5196 { yyLOCAL = JoinForType } yyVAL.union = yyLOCAL - case 987: + case 993: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5184 +//line sql.y:5200 { yyLOCAL = OrderByForType } yyVAL.union = yyLOCAL - case 988: + case 994: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5188 +//line sql.y:5204 { yyLOCAL = GroupByForType } yyVAL.union = yyLOCAL - case 989: + case 995: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:5194 +//line sql.y:5210 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 990: + case 996: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5198 +//line sql.y:5214 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 991: + case 997: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5205 +//line sql.y:5221 { yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 992: + case 998: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5209 +//line sql.y:5225 { yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 993: + case 999: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5213 +//line sql.y:5229 { yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 994: + case 1000: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5217 +//line sql.y:5233 { yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 995: + case 1001: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5221 +//line sql.y:5237 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].isExprOperatorUnion()} } yyVAL.union = yyLOCAL - case 996: + case 1002: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5225 +//line sql.y:5241 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 997: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:5229 + case 1003: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Expr +//line sql.y:5245 + { + yyLOCAL = &AssignmentExpr{Left: yyDollar[1].variableUnion(), Right: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1004: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:5249 { yyLOCAL = &MemberOfExpr{Value: yyDollar[1].exprUnion(), JSONArr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 998: + case 1005: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5235 +//line sql.y:5255 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNullOp} } yyVAL.union = yyLOCAL - case 999: + case 1006: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5239 +//line sql.y:5259 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNotNullOp} } yyVAL.union = yyLOCAL - case 1000: + case 1007: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5243 +//line sql.y:5263 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1001: + case 1008: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5247 +//line sql.y:5267 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1002: + case 1009: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5253 +//line sql.y:5273 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1003: + case 1010: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5257 +//line sql.y:5277 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1004: + case 1011: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5261 +//line sql.y:5281 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: true, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1005: + case 1012: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5265 +//line sql.y:5285 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: false, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1006: + case 1013: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5269 +//line sql.y:5289 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1007: + case 1014: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5273 +//line sql.y:5293 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1008: + case 1015: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5277 +//line sql.y:5297 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1009: + case 1016: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5281 +//line sql.y:5301 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1010: + case 1017: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5285 +//line sql.y:5305 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1011: + case 1018: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5289 +//line sql.y:5309 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1012: + case 1019: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5293 +//line sql.y:5313 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1013: + case 1020: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5299 +//line sql.y:5319 { } - case 1014: + case 1021: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5302 +//line sql.y:5322 { } - case 1015: + case 1022: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5308 +//line sql.y:5328 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1016: + case 1023: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5312 +//line sql.y:5332 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1017: + case 1024: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5316 +//line sql.y:5336 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1018: + case 1025: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5320 +//line sql.y:5340 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1019: + case 1026: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5324 +//line sql.y:5344 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1020: + case 1027: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5328 +//line sql.y:5348 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1021: + case 1028: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:5352 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1029: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:5356 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1030: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5332 +//line sql.y:5360 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1022: + case 1031: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5336 +//line sql.y:5364 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1023: + case 1032: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5340 +//line sql.y:5368 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1024: + case 1033: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5344 +//line sql.y:5372 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1025: + case 1034: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5348 +//line sql.y:5376 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1026: + case 1035: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5352 +//line sql.y:5380 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1027: + case 1036: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5356 +//line sql.y:5384 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1028: + case 1037: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5362 +//line sql.y:5390 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1029: + case 1038: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5366 +//line sql.y:5394 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1030: + case 1039: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5370 +//line sql.y:5398 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1031: + case 1040: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5374 +//line sql.y:5402 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1032: + case 1041: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5378 +//line sql.y:5406 { yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Collation: yyDollar[3].str} } yyVAL.union = yyLOCAL - case 1033: + case 1042: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5382 +//line sql.y:5410 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1034: + case 1043: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5386 +//line sql.y:5414 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1035: + case 1044: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5390 +//line sql.y:5418 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 1036: + case 1045: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5394 +//line sql.y:5422 { yyLOCAL = yyDollar[2].exprUnion() // TODO: do we really want to ignore unary '+' before any kind of literals? } yyVAL.union = yyLOCAL - case 1037: + case 1046: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5398 +//line sql.y:5426 { yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1038: + case 1047: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5402 +//line sql.y:5430 { yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1039: + case 1048: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5406 +//line sql.y:5434 { yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1040: + case 1049: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5410 +//line sql.y:5438 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1041: + case 1050: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5414 +//line sql.y:5442 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1042: + case 1051: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5418 +//line sql.y:5446 { yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()} } yyVAL.union = yyLOCAL - case 1043: + case 1052: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5422 +//line sql.y:5450 { yyLOCAL = &MatchExpr{Columns: yyDollar[2].colNamesUnion(), Expr: yyDollar[5].exprUnion(), Option: yyDollar[6].matchExprOptionUnion()} } yyVAL.union = yyLOCAL - case 1044: + case 1053: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5426 +//line sql.y:5454 { yyLOCAL = &CastExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion(), Array: yyDollar[6].booleanUnion()} } yyVAL.union = yyLOCAL - case 1045: + case 1054: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5430 +//line sql.y:5458 { yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1046: + case 1055: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5434 +//line sql.y:5462 { yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1047: + case 1056: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5438 +//line sql.y:5466 { // From: https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#operator_binary // To convert a string expression to a binary string, these constructs are equivalent: @@ -16648,2236 +17819,3169 @@ yydefault: yyLOCAL = &ConvertExpr{Expr: yyDollar[2].exprUnion(), Type: &ConvertType{Type: yyDollar[1].str}} } yyVAL.union = yyLOCAL - case 1048: + case 1057: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5446 +//line sql.y:5474 { yyLOCAL = &Default{ColName: yyDollar[2].str} } yyVAL.union = yyLOCAL - case 1049: - yyDollar = yyS[yypt-1 : yypt+1] + case 1058: + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5450 +//line sql.y:5478 { - // INTERVAL can trigger a shift / reduce conflict. We want - // to shift here for the interval rule. In case we do have - // the additional expression_list below, we'd pick that path - // and thus properly parse it as a function when needed. - yyLOCAL = yyDollar[1].exprUnion() + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion(), Interval: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1050: + case 1059: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5458 +//line sql.y:5482 { yyLOCAL = &IntervalFuncExpr{Expr: yyDollar[3].exprUnion(), Exprs: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1051: + case 1060: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5462 +//line sql.y:5486 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1052: + case 1061: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5466 +//line sql.y:5490 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1053: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Expr -//line sql.y:5472 - { - yyLOCAL = &IntervalExpr{Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].identifierCI.String()} - } - yyVAL.union = yyLOCAL - case 1054: + case 1062: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5478 +//line sql.y:5496 { yyLOCAL = yyDollar[1].colNamesUnion() } yyVAL.union = yyLOCAL - case 1055: + case 1063: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5482 +//line sql.y:5500 { yyLOCAL = yyDollar[2].colNamesUnion() } yyVAL.union = yyLOCAL - case 1056: + case 1064: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5488 +//line sql.y:5506 { yyLOCAL = []*ColName{yyDollar[1].colNameUnion()} } yyVAL.union = yyLOCAL - case 1057: + case 1065: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5492 +//line sql.y:5510 { yySLICE := (*[]*ColName)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].colNameUnion()) } - case 1058: + case 1066: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5498 +//line sql.y:5516 { yyLOCAL = BothTrimType } yyVAL.union = yyLOCAL - case 1059: + case 1067: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5502 +//line sql.y:5520 { yyLOCAL = LeadingTrimType } yyVAL.union = yyLOCAL - case 1060: + case 1068: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5506 +//line sql.y:5524 { yyLOCAL = TrailingTrimType } yyVAL.union = yyLOCAL - case 1061: + case 1069: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5512 +//line sql.y:5530 { yyLOCAL = FrameRowsType } yyVAL.union = yyLOCAL - case 1062: + case 1070: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5516 +//line sql.y:5534 { yyLOCAL = FrameRangeType } yyVAL.union = yyLOCAL - case 1063: + case 1071: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5523 +//line sql.y:5541 { yyLOCAL = CumeDistExprType } yyVAL.union = yyLOCAL - case 1064: + case 1072: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5527 +//line sql.y:5545 { yyLOCAL = DenseRankExprType } yyVAL.union = yyLOCAL - case 1065: + case 1073: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5531 +//line sql.y:5549 { yyLOCAL = PercentRankExprType } yyVAL.union = yyLOCAL - case 1066: + case 1074: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5535 +//line sql.y:5553 { yyLOCAL = RankExprType } yyVAL.union = yyLOCAL - case 1067: + case 1075: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5539 +//line sql.y:5557 { yyLOCAL = RowNumberExprType } yyVAL.union = yyLOCAL - case 1068: + case 1076: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5545 +//line sql.y:5563 { yyLOCAL = &FramePoint{Type: CurrentRowType} } yyVAL.union = yyLOCAL - case 1069: + case 1077: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5549 +//line sql.y:5567 { yyLOCAL = &FramePoint{Type: UnboundedPrecedingType} } yyVAL.union = yyLOCAL - case 1070: + case 1078: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5553 +//line sql.y:5571 { yyLOCAL = &FramePoint{Type: UnboundedFollowingType} } yyVAL.union = yyLOCAL - case 1071: + case 1079: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5557 +//line sql.y:5575 { yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1072: - yyDollar = yyS[yypt-2 : yypt+1] + case 1080: + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5561 +//line sql.y:5579 { - yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()} + yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1073: - yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL Expr -//line sql.y:5567 + case 1081: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *FramePoint +//line sql.y:5583 { - yyLOCAL = yyDollar[1].exprUnion() + yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1074: - yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL Expr -//line sql.y:5571 + case 1082: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *FramePoint +//line sql.y:5587 { - yyLOCAL = yyDollar[1].exprUnion() + yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1075: + case 1083: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5576 +//line sql.y:5592 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1076: + case 1084: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5580 +//line sql.y:5596 { yyLOCAL = yyDollar[1].frameClauseUnion() } yyVAL.union = yyLOCAL - case 1077: + case 1085: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5586 +//line sql.y:5602 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[2].framePointUnion()} } yyVAL.union = yyLOCAL - case 1078: + case 1086: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5590 +//line sql.y:5606 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[3].framePointUnion(), End: yyDollar[5].framePointUnion()} } yyVAL.union = yyLOCAL - case 1079: + case 1087: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:5595 +//line sql.y:5611 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1080: + case 1088: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Exprs -//line sql.y:5599 +//line sql.y:5615 { yyLOCAL = yyDollar[3].exprsUnion() } yyVAL.union = yyLOCAL - case 1081: + case 1089: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5604 +//line sql.y:5620 { } - case 1082: + case 1090: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5607 +//line sql.y:5623 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1083: + case 1091: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *WindowSpecification -//line sql.y:5613 +//line sql.y:5629 { yyLOCAL = &WindowSpecification{Name: yyDollar[1].identifierCI, PartitionClause: yyDollar[2].exprsUnion(), OrderClause: yyDollar[3].orderByUnion(), FrameClause: yyDollar[4].frameClauseUnion()} } yyVAL.union = yyLOCAL - case 1084: + case 1092: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5619 +//line sql.y:5635 { yyLOCAL = &OverClause{WindowSpec: yyDollar[3].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1085: + case 1093: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5623 +//line sql.y:5639 { yyLOCAL = &OverClause{WindowName: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 1086: + case 1094: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5628 +//line sql.y:5644 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1088: + case 1096: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5635 +//line sql.y:5651 { yyLOCAL = &NullTreatmentClause{yyDollar[1].nullTreatmentTypeUnion()} } yyVAL.union = yyLOCAL - case 1089: + case 1097: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5641 +//line sql.y:5657 { yyLOCAL = RespectNullsType } yyVAL.union = yyLOCAL - case 1090: + case 1098: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5645 +//line sql.y:5661 { yyLOCAL = IgnoreNullsType } yyVAL.union = yyLOCAL - case 1091: + case 1099: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5651 +//line sql.y:5667 { yyLOCAL = FirstValueExprType } yyVAL.union = yyLOCAL - case 1092: + case 1100: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5655 +//line sql.y:5671 { yyLOCAL = LastValueExprType } yyVAL.union = yyLOCAL - case 1093: + case 1101: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5661 +//line sql.y:5677 { yyLOCAL = FromFirstType } yyVAL.union = yyLOCAL - case 1094: + case 1102: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5665 +//line sql.y:5681 { yyLOCAL = FromLastType } yyVAL.union = yyLOCAL - case 1095: + case 1103: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5670 +//line sql.y:5686 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1097: + case 1105: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5677 +//line sql.y:5693 { yyLOCAL = &FromFirstLastClause{yyDollar[1].fromFirstLastTypeUnion()} } yyVAL.union = yyLOCAL - case 1098: + case 1106: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5683 +//line sql.y:5699 { yyLOCAL = LagExprType } yyVAL.union = yyLOCAL - case 1099: + case 1107: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5687 +//line sql.y:5703 { yyLOCAL = LeadExprType } yyVAL.union = yyLOCAL - case 1100: + case 1108: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *WindowDefinition -//line sql.y:5693 +//line sql.y:5709 { yyLOCAL = &WindowDefinition{Name: yyDollar[1].identifierCI, WindowSpec: yyDollar[4].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1101: + case 1109: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL WindowDefinitions -//line sql.y:5699 +//line sql.y:5715 { yyLOCAL = WindowDefinitions{yyDollar[1].windowDefinitionUnion()} } yyVAL.union = yyLOCAL - case 1102: + case 1110: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5703 +//line sql.y:5719 { yySLICE := (*WindowDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].windowDefinitionUnion()) } - case 1103: + case 1111: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5709 +//line sql.y:5725 { yyVAL.str = "" } - case 1104: + case 1112: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5713 +//line sql.y:5729 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } - case 1105: + case 1113: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5719 +//line sql.y:5735 { yyLOCAL = BoolVal(true) } yyVAL.union = yyLOCAL - case 1106: + case 1114: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5723 +//line sql.y:5739 { yyLOCAL = BoolVal(false) } yyVAL.union = yyLOCAL - case 1107: + case 1115: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5730 +//line sql.y:5746 { yyLOCAL = IsTrueOp } yyVAL.union = yyLOCAL - case 1108: + case 1116: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5734 +//line sql.y:5750 { yyLOCAL = IsNotTrueOp } yyVAL.union = yyLOCAL - case 1109: + case 1117: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5738 +//line sql.y:5754 { yyLOCAL = IsFalseOp } yyVAL.union = yyLOCAL - case 1110: + case 1118: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5742 +//line sql.y:5758 { yyLOCAL = IsNotFalseOp } yyVAL.union = yyLOCAL - case 1111: + case 1119: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5748 +//line sql.y:5764 { yyLOCAL = EqualOp } yyVAL.union = yyLOCAL - case 1112: + case 1120: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5752 +//line sql.y:5768 { yyLOCAL = LessThanOp } yyVAL.union = yyLOCAL - case 1113: + case 1121: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5756 +//line sql.y:5772 { yyLOCAL = GreaterThanOp } yyVAL.union = yyLOCAL - case 1114: + case 1122: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5760 +//line sql.y:5776 { yyLOCAL = LessEqualOp } yyVAL.union = yyLOCAL - case 1115: + case 1123: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5764 +//line sql.y:5780 { yyLOCAL = GreaterEqualOp } yyVAL.union = yyLOCAL - case 1116: + case 1124: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5768 +//line sql.y:5784 { yyLOCAL = NotEqualOp } yyVAL.union = yyLOCAL - case 1117: + case 1125: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5772 +//line sql.y:5788 { yyLOCAL = NullSafeEqualOp } yyVAL.union = yyLOCAL - case 1118: + case 1126: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5778 +//line sql.y:5794 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1119: + case 1127: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5782 +//line sql.y:5798 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1120: + case 1128: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5786 +//line sql.y:5802 { yyLOCAL = ListArg(yyDollar[1].str[2:]) - bindVariable(yylex, yyDollar[1].str[2:]) + markBindVariable(yylex, yyDollar[1].str[2:]) } yyVAL.union = yyLOCAL - case 1121: + case 1129: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Subquery -//line sql.y:5793 +//line sql.y:5809 { yyLOCAL = &Subquery{yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1122: + case 1130: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:5799 +//line sql.y:5815 { yyLOCAL = Exprs{yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1123: + case 1131: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5803 +//line sql.y:5819 { yySLICE := (*Exprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].exprUnion()) } - case 1124: + case 1132: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5813 +//line sql.y:5829 { yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1125: + case 1133: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5817 +//line sql.y:5833 { yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1126: + case 1134: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5827 +//line sql.y:5843 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1127: + case 1135: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5831 +//line sql.y:5847 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1128: + case 1136: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5835 +//line sql.y:5851 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1129: + case 1137: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5839 +//line sql.y:5855 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1130: + case 1138: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5843 +//line sql.y:5859 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1131: + case 1139: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5847 +//line sql.y:5863 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1132: + case 1140: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5851 +//line sql.y:5867 { yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1133: + case 1141: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5855 +//line sql.y:5871 { yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()} } yyVAL.union = yyLOCAL - case 1134: + case 1142: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:5859 +//line sql.y:5875 { yyLOCAL = &InsertExpr{Str: yyDollar[3].exprUnion(), Pos: yyDollar[5].exprUnion(), Len: yyDollar[7].exprUnion(), NewStr: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1135: + case 1143: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5863 +//line sql.y:5879 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1136: + case 1144: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5874 +//line sql.y:5890 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("utc_date")} } yyVAL.union = yyLOCAL - case 1137: + case 1145: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5878 +//line sql.y:5894 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1138: + case 1146: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5884 +//line sql.y:5900 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("current_date")} } yyVAL.union = yyLOCAL - case 1139: + case 1147: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5888 +//line sql.y:5904 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("curdate")} } yyVAL.union = yyLOCAL - case 1140: + case 1148: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5893 +//line sql.y:5908 { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1141: + case 1149: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Expr +//line sql.y:5913 + { + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("curtime"), Fsp: yyDollar[2].integerUnion()} + } + yyVAL.union = yyLOCAL + case 1150: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Expr +//line sql.y:5918 + { + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].integerUnion()} + } + yyVAL.union = yyLOCAL + case 1151: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5897 +//line sql.y:5922 { yyLOCAL = &CountStar{} } yyVAL.union = yyLOCAL - case 1142: + case 1152: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5901 +//line sql.y:5926 { yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion()} } yyVAL.union = yyLOCAL - case 1143: + case 1153: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5905 +//line sql.y:5930 { yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1144: + case 1154: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5909 +//line sql.y:5934 { yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1145: + case 1155: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5913 +//line sql.y:5938 { yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1146: + case 1156: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5917 +//line sql.y:5942 { yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1147: + case 1157: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5921 +//line sql.y:5946 { yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1148: + case 1158: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5925 +//line sql.y:5950 { yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1149: + case 1159: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5929 +//line sql.y:5954 { yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1150: + case 1160: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5933 +//line sql.y:5958 { yyLOCAL = &Std{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1151: + case 1161: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5937 +//line sql.y:5962 { yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1152: + case 1162: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5941 +//line sql.y:5966 { yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1153: + case 1163: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5945 +//line sql.y:5970 { yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1154: + case 1164: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5949 +//line sql.y:5974 { yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1155: + case 1165: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5953 +//line sql.y:5978 { yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1156: + case 1166: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5957 +//line sql.y:5982 { yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1157: + case 1167: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5961 +//line sql.y:5986 { yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].exprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 1158: + case 1168: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:5990 + { + yyLOCAL = &AnyValue{Arg: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1169: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5965 +//line sql.y:5994 { - yyLOCAL = &TimestampFuncExpr{Name: string("timestampadd"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprTimestampadd, Date: yyDollar[7].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1159: + case 1170: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5969 +//line sql.y:5998 { - yyLOCAL = &TimestampFuncExpr{Name: string("timestampdiff"), Unit: yyDollar[3].identifierCI.String(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} + yyLOCAL = &TimestampDiffExpr{Unit: yyDollar[3].intervalTypeUnion(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1160: + case 1171: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5973 +//line sql.y:6002 { - yyLOCAL = &ExtractFuncExpr{IntervalTypes: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()} + yyLOCAL = &ExtractFuncExpr{IntervalType: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1161: + case 1172: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5977 +//line sql.y:6006 { yyLOCAL = &WeightStringFuncExpr{Expr: yyDollar[3].exprUnion(), As: yyDollar[4].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1162: + case 1173: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5981 +//line sql.y:6010 { yyLOCAL = &JSONPrettyExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1163: + case 1174: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5985 +//line sql.y:6014 { yyLOCAL = &JSONStorageFreeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1164: + case 1175: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5989 +//line sql.y:6018 { yyLOCAL = &JSONStorageSizeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1165: + case 1176: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5993 +//line sql.y:6022 { - yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, StringArg: yyDollar[3].exprUnion()} + yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, Type: LeadingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1166: + case 1177: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5997 +//line sql.y:6026 { - yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, StringArg: yyDollar[3].exprUnion()} + yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, Type: TrailingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1167: + case 1178: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:6001 +//line sql.y:6030 { yyLOCAL = &TrimFuncExpr{Type: yyDollar[3].trimTypeUnion(), TrimArg: yyDollar[4].exprUnion(), StringArg: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1168: + case 1179: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6005 +//line sql.y:6034 { yyLOCAL = &TrimFuncExpr{StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1169: + case 1180: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6009 +//line sql.y:6038 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1170: + case 1181: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6013 +//line sql.y:6042 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion(), Charset: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1171: + case 1182: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6017 +//line sql.y:6046 { yyLOCAL = &TrimFuncExpr{TrimArg: yyDollar[3].exprUnion(), StringArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1172: + case 1183: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6021 +//line sql.y:6050 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1173: + case 1184: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6025 +//line sql.y:6054 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion(), Pos: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1174: + case 1185: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6029 +//line sql.y:6058 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1175: + case 1186: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6033 +//line sql.y:6062 { yyLOCAL = &LockingFunc{Type: GetLock, Name: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1176: + case 1187: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6037 +//line sql.y:6066 { yyLOCAL = &LockingFunc{Type: IsFreeLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1177: + case 1188: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6041 +//line sql.y:6070 { yyLOCAL = &LockingFunc{Type: IsUsedLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1178: + case 1189: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:6045 +//line sql.y:6074 { yyLOCAL = &LockingFunc{Type: ReleaseAllLocks} } yyVAL.union = yyLOCAL - case 1179: + case 1190: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6049 +//line sql.y:6078 { yyLOCAL = &LockingFunc{Type: ReleaseLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1180: + case 1191: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6053 +//line sql.y:6082 { yyLOCAL = &JSONSchemaValidFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1181: + case 1192: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6057 +//line sql.y:6086 { yyLOCAL = &JSONSchemaValidationReportFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1182: + case 1193: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6061 +//line sql.y:6090 { yyLOCAL = &JSONArrayExpr{Params: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1183: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6065 - { - yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()} - } - yyVAL.union = yyLOCAL - case 1184: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6069 - { - yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1185: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6073 - { - yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]} - } - yyVAL.union = yyLOCAL - case 1186: - yyDollar = yyS[yypt-8 : yypt+1] - var yyLOCAL Expr -//line sql.y:6077 - { - yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()} - } - yyVAL.union = yyLOCAL - case 1187: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6081 - { - yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} - } - yyVAL.union = yyLOCAL - case 1188: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6085 - { - yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1189: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6089 - { - yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1190: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6093 - { - yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1191: - yyDollar = yyS[yypt-8 : yypt+1] - var yyLOCAL Expr -//line sql.y:6097 - { - yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1192: - yyDollar = yyS[yypt-10 : yypt+1] - var yyLOCAL Expr -//line sql.y:6101 - { - yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]} - } - yyVAL.union = yyLOCAL - case 1193: - yyDollar = yyS[yypt-7 : yypt+1] - var yyLOCAL Expr -//line sql.y:6105 - { - yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()} - } - yyVAL.union = yyLOCAL case 1194: - yyDollar = yyS[yypt-8 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6109 +//line sql.y:6094 { - yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} + yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1195: - yyDollar = yyS[yypt-8 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6113 +//line sql.y:6098 { - yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} + yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1196: - yyDollar = yyS[yypt-9 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6117 +//line sql.y:6102 { - yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} + yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1197: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6121 +//line sql.y:6106 { - yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()} + yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1198: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6125 +//line sql.y:6110 { - yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()} + yyLOCAL = &GeomPropertyFuncExpr{Property: IsEmpty, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1199: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6129 +//line sql.y:6114 { - yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()} + yyLOCAL = &GeomPropertyFuncExpr{Property: IsSimple, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1200: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6133 +//line sql.y:6118 { - yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()} + yyLOCAL = &GeomPropertyFuncExpr{Property: Dimension, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1201: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6137 +//line sql.y:6122 { - yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} + yyLOCAL = &GeomPropertyFuncExpr{Property: Envelope, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1202: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6141 +//line sql.y:6126 { - yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + yyLOCAL = &GeomPropertyFuncExpr{Property: GeometryType, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1203: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6145 +//line sql.y:6130 { - yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1204: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6149 +//line sql.y:6134 { - yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1205: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6153 +//line sql.y:6138 { - yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1206: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6157 +//line sql.y:6142 { - yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1207: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6161 +//line sql.y:6146 { - yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: EndPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1208: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6165 +//line sql.y:6150 { - yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: IsClosed, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1209: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6169 +//line sql.y:6154 { - yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1210: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6173 +//line sql.y:6158 { - yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1211: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6177 +//line sql.y:6162 { - yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: NumPoints, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1212: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6181 +//line sql.y:6166 { - yyLOCAL = &PolygonExpr{LinestringParams: yyDollar[3].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: PointN, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1213: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6185 +//line sql.y:6170 { - yyLOCAL = &LineStringExpr{PointParams: yyDollar[3].exprsUnion()} + yyLOCAL = &LinestrPropertyFuncExpr{Property: StartPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1214: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6189 +//line sql.y:6174 { - yyLOCAL = &PointExpr{XCordinate: yyDollar[3].exprUnion(), YCordinate: yyDollar[5].exprUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1215: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6193 +//line sql.y:6178 { - yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1216: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6197 +//line sql.y:6182 { - yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1217: - yyDollar = yyS[yypt-5 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6201 +//line sql.y:6186 { - yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1218: - yyDollar = yyS[yypt-9 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6205 +//line sql.y:6190 { - yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} + yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1219: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6209 +//line sql.y:6194 { - yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} + yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1220: - yyDollar = yyS[yypt-9 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6213 +//line sql.y:6198 { - yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} + yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1221: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6202 + { + yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1222: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6206 + { + yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1223: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6210 + { + yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1224: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6214 + { + yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1225: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6223 +//line sql.y:6218 { - yyLOCAL = yyDollar[1].exprUnion() + yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1226: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6227 +//line sql.y:6222 { - yyLOCAL = NewIntLiteral(yyDollar[1].str) + yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1227: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6231 +//line sql.y:6226 { - yyLOCAL = yyDollar[1].variableUnion() + yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1228: - yyDollar = yyS[yypt-1 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6235 +//line sql.y:6230 { - yyLOCAL = NewArgument(yyDollar[1].str[1:]) - bindVariable(yylex, yyDollar[1].str[1:]) + yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1229: - yyDollar = yyS[yypt-0 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6241 +//line sql.y:6234 { - yyLOCAL = nil + yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1230: - yyDollar = yyS[yypt-2 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6245 +//line sql.y:6238 { - yyLOCAL = yyDollar[2].exprUnion() + yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1231: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6251 +//line sql.y:6242 { - yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1232: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6255 +//line sql.y:6246 { - yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1233: - yyDollar = yyS[yypt-10 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6259 +//line sql.y:6250 { - yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1234: - yyDollar = yyS[yypt-12 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6263 +//line sql.y:6254 { - yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1235: - yyDollar = yyS[yypt-14 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6267 +//line sql.y:6258 { - // Match type is kept expression as TRIM( ' m ') is accepted - yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1236: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6272 +//line sql.y:6262 { - yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1237: - yyDollar = yyS[yypt-8 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6276 +//line sql.y:6266 { - yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1238: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6280 +//line sql.y:6270 { - yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1239: - yyDollar = yyS[yypt-10 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6284 +//line sql.y:6274 { - yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1240: - yyDollar = yyS[yypt-12 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6288 +//line sql.y:6278 { - yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1241: - yyDollar = yyS[yypt-14 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6292 +//line sql.y:6282 { - // Match type is kept expression as TRIM( ' m ') is accepted - yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} + yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1242: - yyDollar = yyS[yypt-6 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6297 +//line sql.y:6286 { - yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1243: - yyDollar = yyS[yypt-8 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6301 +//line sql.y:6290 { - yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1244: - yyDollar = yyS[yypt-10 : yypt+1] + yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6305 +//line sql.y:6294 { - yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1245: - yyDollar = yyS[yypt-12 : yypt+1] + yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6309 +//line sql.y:6298 { - // Match type is kept expression as TRIM( ' m ') is accepted - yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL case 1246: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6316 +//line sql.y:6302 { - yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1247: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6320 +//line sql.y:6306 { - yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()} + yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL case 1248: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr +//line sql.y:6310 + { + yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1249: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6314 + { + yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1250: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6318 + { + yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1251: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6322 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1252: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr //line sql.y:6326 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1253: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6330 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1254: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6334 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1255: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6338 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1256: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6342 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1257: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6346 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1258: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6350 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1259: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6354 + { + yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1260: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6358 + { + yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1261: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6362 + { + yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1262: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6366 + { + yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1263: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6370 + { + yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1264: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6374 + { + yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1265: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6378 + { + yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1266: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6382 + { + yyLOCAL = &PolygonPropertyFuncExpr{Property: Area, Polygon: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1267: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6386 + { + yyLOCAL = &PolygonPropertyFuncExpr{Property: Centroid, Polygon: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1268: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6390 + { + yyLOCAL = &PolygonPropertyFuncExpr{Property: ExteriorRing, Polygon: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1269: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6394 + { + yyLOCAL = &PolygonPropertyFuncExpr{Property: InteriorRingN, Polygon: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1270: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6398 + { + yyLOCAL = &PolygonPropertyFuncExpr{Property: NumInteriorRings, Polygon: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1271: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6402 + { + yyLOCAL = &GeomCollPropertyFuncExpr{Property: GeometryN, GeomColl: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1272: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6406 + { + yyLOCAL = &GeomCollPropertyFuncExpr{Property: NumGeometries, GeomColl: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1273: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6410 + { + yyLOCAL = &GeoHashFromLatLongExpr{Longitude: yyDollar[3].exprUnion(), Latitude: yyDollar[5].exprUnion(), MaxLength: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1274: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6414 + { + yyLOCAL = &GeoHashFromPointExpr{Point: yyDollar[3].exprUnion(), MaxLength: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1275: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6418 + { + yyLOCAL = &GeomFromGeoHashExpr{GeomType: LatitudeFromHash, GeoHash: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1276: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6422 + { + yyLOCAL = &GeomFromGeoHashExpr{GeomType: LongitudeFromHash, GeoHash: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1277: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6426 + { + yyLOCAL = &GeomFromGeoHashExpr{GeomType: PointFromHash, GeoHash: yyDollar[3].exprUnion(), SridOpt: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1278: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6430 + { + yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1279: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6434 + { + yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1280: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6438 + { + yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion(), Srid: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1281: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6442 + { + yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1282: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6446 + { + yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1283: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6450 + { + yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion(), Bitmask: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1284: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6454 + { + yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1285: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6458 + { + yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1286: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6462 + { + yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]} + } + yyVAL.union = yyLOCAL + case 1287: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6466 + { + yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1288: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6470 + { + yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1289: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6474 + { + yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1290: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6478 + { + yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1291: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6482 + { + yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1292: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6486 + { + yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1293: + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL Expr +//line sql.y:6490 + { + yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]} + } + yyVAL.union = yyLOCAL + case 1294: + yyDollar = yyS[yypt-7 : yypt+1] + var yyLOCAL Expr +//line sql.y:6494 + { + yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()} + } + yyVAL.union = yyLOCAL + case 1295: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6498 + { + yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} + } + yyVAL.union = yyLOCAL + case 1296: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6502 + { + yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} + } + yyVAL.union = yyLOCAL + case 1297: + yyDollar = yyS[yypt-9 : yypt+1] + var yyLOCAL Expr +//line sql.y:6506 + { + yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} + } + yyVAL.union = yyLOCAL + case 1298: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6510 + { + yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1299: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6514 + { + yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1300: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6518 + { + yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1301: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6522 + { + yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1302: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6526 + { + yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1303: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6530 + { + yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1304: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6534 + { + yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1305: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6538 + { + yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1306: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6542 + { + yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1307: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6546 + { + yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} + } + yyVAL.union = yyLOCAL + case 1308: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6550 + { + yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1309: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6554 + { + yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1310: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6558 + { + yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1311: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6562 + { + yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1312: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6566 + { + yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1313: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6570 + { + yyLOCAL = &MultiPolygonExpr{PolygonParams: yyDollar[3].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1314: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6574 + { + yyLOCAL = &MultiPointExpr{PointParams: yyDollar[3].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1315: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6578 + { + yyLOCAL = &MultiLinestringExpr{LinestringParams: yyDollar[3].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1316: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6582 + { + yyLOCAL = &PolygonExpr{LinestringParams: yyDollar[3].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1317: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6586 + { + yyLOCAL = &LineStringExpr{PointParams: yyDollar[3].exprsUnion()} + } + yyVAL.union = yyLOCAL + case 1318: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6590 + { + yyLOCAL = &PointExpr{XCordinate: yyDollar[3].exprUnion(), YCordinate: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1319: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6594 + { + yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1320: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6598 + { + yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1321: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6602 + { + yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1322: + yyDollar = yyS[yypt-9 : yypt+1] + var yyLOCAL Expr +//line sql.y:6606 + { + yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1323: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6610 + { + yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1324: + yyDollar = yyS[yypt-9 : yypt+1] + var yyLOCAL Expr +//line sql.y:6614 + { + yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1325: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6618 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + } + yyVAL.union = yyLOCAL + case 1326: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6622 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} + } + yyVAL.union = yyLOCAL + case 1327: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6626 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateAdd, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + } + yyVAL.union = yyLOCAL + case 1328: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6630 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateSub, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + } + yyVAL.union = yyLOCAL + case 1329: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6634 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} + } + yyVAL.union = yyLOCAL + case 1330: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6638 + { + yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} + } + yyVAL.union = yyLOCAL + case 1335: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Expr +//line sql.y:6648 + { + yyLOCAL = yyDollar[1].exprUnion() + } + yyVAL.union = yyLOCAL + case 1336: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Expr +//line sql.y:6652 + { + yyLOCAL = NewIntLiteral(yyDollar[1].str) + } + yyVAL.union = yyLOCAL + case 1337: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Expr +//line sql.y:6656 + { + yyLOCAL = yyDollar[1].variableUnion() + } + yyVAL.union = yyLOCAL + case 1338: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Expr +//line sql.y:6660 + { + yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) + } + yyVAL.union = yyLOCAL + case 1339: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL Expr +//line sql.y:6665 + { + yyLOCAL = nil + } + yyVAL.union = yyLOCAL + case 1340: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Expr +//line sql.y:6669 + { + yyLOCAL = yyDollar[2].exprUnion() + } + yyVAL.union = yyLOCAL + case 1341: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6675 + { + yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1342: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6679 + { + yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1343: + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL Expr +//line sql.y:6683 + { + yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1344: + yyDollar = yyS[yypt-12 : yypt+1] + var yyLOCAL Expr +//line sql.y:6687 + { + yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1345: + yyDollar = yyS[yypt-14 : yypt+1] + var yyLOCAL Expr +//line sql.y:6691 + { + // Match type is kept expression as TRIM( ' m ') is accepted + yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1346: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6696 + { + yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1347: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6700 + { + yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1348: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6704 + { + yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1349: + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL Expr +//line sql.y:6708 + { + yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1350: + yyDollar = yyS[yypt-12 : yypt+1] + var yyLOCAL Expr +//line sql.y:6712 + { + yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1351: + yyDollar = yyS[yypt-14 : yypt+1] + var yyLOCAL Expr +//line sql.y:6716 + { + // Match type is kept expression as TRIM( ' m ') is accepted + yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1352: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6721 + { + yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1353: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6725 + { + yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1354: + yyDollar = yyS[yypt-10 : yypt+1] + var yyLOCAL Expr +//line sql.y:6729 + { + yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1355: + yyDollar = yyS[yypt-12 : yypt+1] + var yyLOCAL Expr +//line sql.y:6733 + { + // Match type is kept expression as TRIM( ' m ') is accepted + yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1356: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6740 + { + yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1357: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6744 + { + yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1358: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6750 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatBytesType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1249: + case 1359: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6330 +//line sql.y:6754 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatPicoTimeType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1250: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Expr -//line sql.y:6334 + case 1360: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Expr +//line sql.y:6758 + { + yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType} + } + yyVAL.union = yyLOCAL + case 1361: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6762 + { + yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1362: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6768 + { + yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1363: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6772 + { + yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1364: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6776 + { + yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1365: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6780 + { + yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1366: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Expr +//line sql.y:6784 + { + yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1367: + yyDollar = yyS[yypt-6 : yypt+1] + var yyLOCAL Expr +//line sql.y:6788 + { + yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1368: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:6792 + { + yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1369: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *ConvertType +//line sql.y:6797 + { + yyLOCAL = nil + } + yyVAL.union = yyLOCAL + case 1370: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *ConvertType +//line sql.y:6801 + { + yyLOCAL = yyDollar[2].convertTypeUnion() + } + yyVAL.union = yyLOCAL + case 1371: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6807 + { + yyLOCAL = IntervalDayHour + } + yyVAL.union = yyLOCAL + case 1372: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6811 + { + yyLOCAL = IntervalDayMicrosecond + } + yyVAL.union = yyLOCAL + case 1373: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6815 + { + yyLOCAL = IntervalDayMinute + } + yyVAL.union = yyLOCAL + case 1374: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6819 + { + yyLOCAL = IntervalDaySecond + } + yyVAL.union = yyLOCAL + case 1375: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6823 + { + yyLOCAL = IntervalHourMicrosecond + } + yyVAL.union = yyLOCAL + case 1376: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6827 + { + yyLOCAL = IntervalHourMinute + } + yyVAL.union = yyLOCAL + case 1377: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6831 { - yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType} + yyLOCAL = IntervalHourSecond } yyVAL.union = yyLOCAL - case 1251: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6338 + case 1378: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6835 { - yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()} + yyLOCAL = IntervalMinuteMicrosecond } yyVAL.union = yyLOCAL - case 1252: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6344 + case 1379: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6839 { - yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} + yyLOCAL = IntervalMinuteSecond } yyVAL.union = yyLOCAL - case 1253: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6348 + case 1380: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6843 { - yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} + yyLOCAL = IntervalSecondMicrosecond } yyVAL.union = yyLOCAL - case 1254: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6352 + case 1381: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6847 { - yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()} + yyLOCAL = IntervalYearMonth } yyVAL.union = yyLOCAL - case 1255: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6356 + case 1382: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6851 { - yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} + yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1256: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:6360 + case 1383: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6855 { - yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()} + yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1257: - yyDollar = yyS[yypt-6 : yypt+1] - var yyLOCAL Expr -//line sql.y:6364 + case 1384: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6859 { - yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} + yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1258: - yyDollar = yyS[yypt-8 : yypt+1] - var yyLOCAL Expr -//line sql.y:6368 + case 1385: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6863 { - yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()} + yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1259: - yyDollar = yyS[yypt-0 : yypt+1] - var yyLOCAL *ConvertType -//line sql.y:6373 + case 1386: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6867 { - yyLOCAL = nil + yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1260: - yyDollar = yyS[yypt-2 : yypt+1] - var yyLOCAL *ConvertType -//line sql.y:6377 + case 1387: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL IntervalType +//line sql.y:6871 { - yyLOCAL = yyDollar[2].convertTypeUnion() + yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1261: + case 1388: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6383 + var yyLOCAL IntervalType +//line sql.y:6875 { + yyLOCAL = IntervalSecond } - case 1262: + yyVAL.union = yyLOCAL + case 1389: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6385 + var yyLOCAL IntervalType +//line sql.y:6879 { - yyLOCAL = IntervalDayHour + yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1263: + case 1390: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6389 + var yyLOCAL IntervalType +//line sql.y:6883 { - yyLOCAL = IntervalDayMicrosecond + yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1264: + case 1391: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6393 + var yyLOCAL IntervalType +//line sql.y:6889 { - yyLOCAL = IntervalDayMinute + yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1265: + case 1392: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6397 + var yyLOCAL IntervalType +//line sql.y:6893 { - yyLOCAL = IntervalDaySecond + yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1266: + case 1393: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6401 + var yyLOCAL IntervalType +//line sql.y:6897 { - yyLOCAL = IntervalHourMicrosecond + yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1267: + case 1394: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6405 + var yyLOCAL IntervalType +//line sql.y:6901 { - yyLOCAL = IntervalHourMinute + yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1268: + case 1395: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6409 + var yyLOCAL IntervalType +//line sql.y:6905 { - yyLOCAL = IntervalHourSecond + yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1269: + case 1396: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6413 + var yyLOCAL IntervalType +//line sql.y:6909 { - yyLOCAL = IntervalMinuteMicrosecond + yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1270: + case 1397: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6417 + var yyLOCAL IntervalType +//line sql.y:6913 { - yyLOCAL = IntervalMinuteSecond + yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1271: + case 1398: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6421 + var yyLOCAL IntervalType +//line sql.y:6917 { - yyLOCAL = IntervalSecondMicrosecond + yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1272: + case 1399: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6425 + var yyLOCAL IntervalType +//line sql.y:6921 { - yyLOCAL = IntervalYearMonth + yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1273: + case 1400: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6431 + var yyLOCAL IntervalType +//line sql.y:6925 { yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1274: + case 1401: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6435 + var yyLOCAL IntervalType +//line sql.y:6929 { yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1275: + case 1402: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6439 + var yyLOCAL IntervalType +//line sql.y:6933 { yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1276: + case 1403: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6443 + var yyLOCAL IntervalType +//line sql.y:6937 { yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1277: + case 1404: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6447 + var yyLOCAL IntervalType +//line sql.y:6941 { yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1278: + case 1405: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6451 + var yyLOCAL IntervalType +//line sql.y:6945 { yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1279: + case 1406: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6455 + var yyLOCAL IntervalType +//line sql.y:6949 { yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1280: + case 1407: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6459 + var yyLOCAL IntervalType +//line sql.y:6953 { yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1281: + case 1408: yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL IntervalTypes -//line sql.y:6463 + var yyLOCAL IntervalType +//line sql.y:6957 { yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1284: + case 1411: yyDollar = yyS[yypt-0 : yypt+1] - var yyLOCAL Expr -//line sql.y:6473 + var yyLOCAL int +//line sql.y:6967 { - yyLOCAL = nil + yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1285: + case 1412: yyDollar = yyS[yypt-2 : yypt+1] - var yyLOCAL Expr -//line sql.y:6477 - { - yyLOCAL = nil - } - yyVAL.union = yyLOCAL - case 1286: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Expr -//line sql.y:6481 + var yyLOCAL int +//line sql.y:6971 { - yyLOCAL = NewIntLiteral(yyDollar[2].str) + yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1287: + case 1413: yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Expr -//line sql.y:6485 + var yyLOCAL int +//line sql.y:6975 { - yyLOCAL = NewArgument(yyDollar[2].str[1:]) - bindVariable(yylex, yyDollar[2].str[1:]) + yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 1288: + case 1414: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6496 +//line sql.y:6985 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1289: + case 1415: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6500 +//line sql.y:6989 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1290: + case 1416: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6504 +//line sql.y:6993 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1291: + case 1417: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6508 +//line sql.y:6997 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1292: + case 1418: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6512 +//line sql.y:7001 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].selectExprsUnion()} } yyVAL.union = yyLOCAL - case 1293: + case 1419: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6518 +//line sql.y:7007 { yyLOCAL = NoOption } yyVAL.union = yyLOCAL - case 1294: + case 1420: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6522 +//line sql.y:7011 { yyLOCAL = BooleanModeOpt } yyVAL.union = yyLOCAL - case 1295: + case 1421: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6526 +//line sql.y:7015 { yyLOCAL = NaturalLanguageModeOpt } yyVAL.union = yyLOCAL - case 1296: + case 1422: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6530 +//line sql.y:7019 { yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt } yyVAL.union = yyLOCAL - case 1297: + case 1423: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:6534 +//line sql.y:7023 { yyLOCAL = QueryExpansionOpt } yyVAL.union = yyLOCAL - case 1298: + case 1424: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6540 +//line sql.y:7029 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } - case 1299: + case 1425: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6544 +//line sql.y:7033 { yyVAL.str = string(yyDollar[1].str) } - case 1300: + case 1426: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6548 +//line sql.y:7037 { yyVAL.str = string(yyDollar[1].str) } - case 1301: + case 1427: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6554 +//line sql.y:7043 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1302: + case 1428: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6558 +//line sql.y:7047 { yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 1303: + case 1429: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6562 +//line sql.y:7051 { yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 1304: + case 1430: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6568 +//line sql.y:7057 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1305: + case 1431: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6572 +//line sql.y:7061 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} } yyVAL.union = yyLOCAL - case 1306: + case 1432: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6576 +//line sql.y:7065 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1307: + case 1433: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6580 +//line sql.y:7069 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1308: + case 1434: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6584 +//line sql.y:7073 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale } yyVAL.union = yyLOCAL - case 1309: + case 1435: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6590 +//line sql.y:7079 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1310: + case 1436: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6594 +//line sql.y:7083 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1311: + case 1437: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6598 +//line sql.y:7087 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1312: + case 1438: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6602 +//line sql.y:7091 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1313: + case 1439: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6606 +//line sql.y:7095 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1314: + case 1440: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6610 +//line sql.y:7099 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1315: + case 1441: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6614 +//line sql.y:7103 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1316: + case 1442: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6618 +//line sql.y:7107 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} } yyVAL.union = yyLOCAL - case 1317: + case 1443: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6622 +//line sql.y:7111 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1318: + case 1444: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6626 +//line sql.y:7115 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1319: + case 1445: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:6632 +//line sql.y:7121 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1320: + case 1446: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:6636 +//line sql.y:7125 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1321: + case 1447: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:6641 +//line sql.y:7130 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1322: + case 1448: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6645 +//line sql.y:7134 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1323: + case 1449: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6650 +//line sql.y:7139 { yyVAL.str = string("") } - case 1324: + case 1450: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:6654 +//line sql.y:7143 { yyVAL.str = encodeSQLString(yyDollar[2].str) } - case 1325: + case 1451: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*When -//line sql.y:6660 +//line sql.y:7149 { yyLOCAL = []*When{yyDollar[1].whenUnion()} } yyVAL.union = yyLOCAL - case 1326: + case 1452: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:6664 +//line sql.y:7153 { yySLICE := (*[]*When)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].whenUnion()) } - case 1327: + case 1453: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *When -//line sql.y:6670 +//line sql.y:7159 { yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1328: + case 1454: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:6675 +//line sql.y:7164 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1329: + case 1455: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6679 +//line sql.y:7168 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1330: + case 1456: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:6685 +//line sql.y:7174 { yyLOCAL = &ColName{Name: yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1331: + case 1457: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:6689 +//line sql.y:7178 { yyLOCAL = &ColName{Name: NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 1332: + case 1458: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColName -//line sql.y:6693 +//line sql.y:7182 { yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].identifierCS}, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1333: + case 1459: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColName -//line sql.y:6697 +//line sql.y:7186 { yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}, Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 1334: + case 1460: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6703 +//line sql.y:7192 { yyLOCAL = yyDollar[1].colNameUnion() } yyVAL.union = yyLOCAL - case 1335: + case 1461: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6707 +//line sql.y:7196 { yyLOCAL = &Offset{V: convertStringToInt(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1336: + case 1462: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6713 +//line sql.y:7202 { // TODO(sougou): Deprecate this construct. if yyDollar[1].identifierCI.Lowered() != "value" { @@ -18887,427 +20991,426 @@ yydefault: yyLOCAL = NewIntLiteral("1") } yyVAL.union = yyLOCAL - case 1337: + case 1463: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6722 +//line sql.y:7211 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1338: + case 1464: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6726 +//line sql.y:7215 { - yyLOCAL = NewArgument(yyDollar[1].str[1:]) - bindVariable(yylex, yyDollar[1].str[1:]) + yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 1339: + case 1465: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:6732 +//line sql.y:7220 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1340: + case 1466: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Exprs -//line sql.y:6736 +//line sql.y:7224 { yyLOCAL = yyDollar[3].exprsUnion() } yyVAL.union = yyLOCAL - case 1341: + case 1467: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:6741 +//line sql.y:7229 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1342: + case 1468: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6745 +//line sql.y:7233 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1343: + case 1469: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *NamedWindow -//line sql.y:6751 +//line sql.y:7239 { yyLOCAL = &NamedWindow{yyDollar[2].windowDefinitionsUnion()} } yyVAL.union = yyLOCAL - case 1344: + case 1470: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:6757 +//line sql.y:7245 { yyLOCAL = NamedWindows{yyDollar[1].namedWindowUnion()} } yyVAL.union = yyLOCAL - case 1345: + case 1471: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6761 +//line sql.y:7249 { yySLICE := (*NamedWindows)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].namedWindowUnion()) } - case 1346: + case 1472: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:6766 +//line sql.y:7254 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1347: + case 1473: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:6770 +//line sql.y:7258 { yyLOCAL = yyDollar[1].namedWindowsUnion() } yyVAL.union = yyLOCAL - case 1348: + case 1474: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderBy -//line sql.y:6775 +//line sql.y:7263 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1349: + case 1475: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:6779 +//line sql.y:7267 { yyLOCAL = yyDollar[1].orderByUnion() } yyVAL.union = yyLOCAL - case 1350: + case 1476: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL OrderBy -//line sql.y:6785 +//line sql.y:7273 { yyLOCAL = yyDollar[3].orderByUnion() } yyVAL.union = yyLOCAL - case 1351: + case 1477: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:6791 +//line sql.y:7279 { yyLOCAL = OrderBy{yyDollar[1].orderUnion()} } yyVAL.union = yyLOCAL - case 1352: + case 1478: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6795 +//line sql.y:7283 { yySLICE := (*OrderBy)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].orderUnion()) } - case 1353: + case 1479: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Order -//line sql.y:6801 +//line sql.y:7289 { yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 1354: + case 1480: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:6806 +//line sql.y:7294 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1355: + case 1481: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:6810 +//line sql.y:7298 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1356: + case 1482: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:6814 +//line sql.y:7302 { yyLOCAL = DescOrder } yyVAL.union = yyLOCAL - case 1357: + case 1483: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Limit -//line sql.y:6819 +//line sql.y:7307 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1358: + case 1484: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Limit -//line sql.y:6823 +//line sql.y:7311 { yyLOCAL = yyDollar[1].limitUnion() } yyVAL.union = yyLOCAL - case 1359: + case 1485: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Limit -//line sql.y:6829 +//line sql.y:7317 { yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1360: + case 1486: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:6833 +//line sql.y:7321 { yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1361: + case 1487: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:6837 +//line sql.y:7325 { yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1362: + case 1488: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:6842 +//line sql.y:7330 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1363: + case 1489: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:6846 +//line sql.y:7334 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1364: + case 1490: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:6850 +//line sql.y:7338 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1365: + case 1491: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:6854 +//line sql.y:7342 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1366: + case 1492: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:6858 +//line sql.y:7346 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1367: + case 1493: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6865 +//line sql.y:7353 { yyLOCAL = &LockOption{Type: DefaultType} } yyVAL.union = yyLOCAL - case 1368: + case 1494: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6869 +//line sql.y:7357 { yyLOCAL = &LockOption{Type: NoneType} } yyVAL.union = yyLOCAL - case 1369: + case 1495: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6873 +//line sql.y:7361 { yyLOCAL = &LockOption{Type: SharedType} } yyVAL.union = yyLOCAL - case 1370: + case 1496: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6877 +//line sql.y:7365 { yyLOCAL = &LockOption{Type: ExclusiveType} } yyVAL.union = yyLOCAL - case 1371: + case 1497: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6883 +//line sql.y:7371 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1372: + case 1498: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6887 +//line sql.y:7375 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1373: + case 1499: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6891 +//line sql.y:7379 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1374: + case 1500: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:6895 +//line sql.y:7383 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1375: + case 1501: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6900 +//line sql.y:7388 { yyVAL.str = "" } - case 1376: + case 1502: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6904 +//line sql.y:7392 { yyVAL.str = string(yyDollar[3].str) } - case 1377: + case 1503: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6908 +//line sql.y:7396 { yyVAL.str = string(yyDollar[3].str) } - case 1378: + case 1504: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6912 +//line sql.y:7400 { yyVAL.str = string(yyDollar[3].str) } - case 1379: + case 1505: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6917 +//line sql.y:7405 { yyVAL.str = "" } - case 1380: + case 1506: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:6921 +//line sql.y:7409 { yyVAL.str = yyDollar[3].str } - case 1381: + case 1507: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6927 +//line sql.y:7415 { yyVAL.str = string(yyDollar[1].str) } - case 1382: + case 1508: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6931 +//line sql.y:7419 { yyVAL.str = string(yyDollar[1].str) } - case 1383: + case 1509: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6936 +//line sql.y:7424 { yyVAL.str = "" } - case 1384: + case 1510: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:6940 +//line sql.y:7428 { yyVAL.str = yyDollar[2].str } - case 1385: + case 1511: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6945 +//line sql.y:7433 { yyVAL.str = "cascaded" } - case 1386: + case 1512: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6949 +//line sql.y:7437 { yyVAL.str = string(yyDollar[1].str) } - case 1387: + case 1513: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6953 +//line sql.y:7441 { yyVAL.str = string(yyDollar[1].str) } - case 1388: + case 1514: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Definer -//line sql.y:6958 +//line sql.y:7446 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1389: + case 1515: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:6962 +//line sql.y:7450 { yyLOCAL = yyDollar[3].definerUnion() } yyVAL.union = yyLOCAL - case 1390: + case 1516: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Definer -//line sql.y:6968 +//line sql.y:7456 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1391: + case 1517: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:6974 +//line sql.y:7462 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1392: + case 1518: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Definer -//line sql.y:6980 +//line sql.y:7468 { yyLOCAL = &Definer{ Name: yyDollar[1].str, @@ -19315,369 +21418,369 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1393: + case 1519: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6989 +//line sql.y:7477 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 1394: + case 1520: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:6993 +//line sql.y:7481 { yyVAL.str = formatIdentifier(yyDollar[1].str) } - case 1395: + case 1521: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:6998 +//line sql.y:7486 { yyVAL.str = "" } - case 1396: + case 1522: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7002 +//line sql.y:7490 { yyVAL.str = formatAddress(yyDollar[1].str) } - case 1397: + case 1523: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Lock -//line sql.y:7008 +//line sql.y:7496 { yyLOCAL = ForUpdateLock } yyVAL.union = yyLOCAL - case 1398: + case 1524: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Lock -//line sql.y:7012 +//line sql.y:7500 { yyLOCAL = ShareModeLock } yyVAL.union = yyLOCAL - case 1399: + case 1525: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7018 +//line sql.y:7506 { yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].columnCharset, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str} } yyVAL.union = yyLOCAL - case 1400: + case 1526: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7022 +//line sql.y:7510 { yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: ColumnCharset{}, FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1401: + case 1527: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7026 +//line sql.y:7514 { yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].columnCharset, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1402: + case 1528: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7031 +//line sql.y:7519 { yyVAL.str = "" } - case 1403: + case 1529: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7035 +//line sql.y:7523 { yyVAL.str = " format csv" + yyDollar[3].str } - case 1404: + case 1530: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7039 +//line sql.y:7527 { yyVAL.str = " format text" + yyDollar[3].str } - case 1405: + case 1531: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7044 +//line sql.y:7532 { yyVAL.str = "" } - case 1406: + case 1532: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7048 +//line sql.y:7536 { yyVAL.str = " header" } - case 1407: + case 1533: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7053 +//line sql.y:7541 { yyVAL.str = "" } - case 1408: + case 1534: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7057 +//line sql.y:7545 { yyVAL.str = " manifest on" } - case 1409: + case 1535: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7061 +//line sql.y:7549 { yyVAL.str = " manifest off" } - case 1410: + case 1536: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7066 +//line sql.y:7554 { yyVAL.str = "" } - case 1411: + case 1537: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7070 +//line sql.y:7558 { yyVAL.str = " overwrite on" } - case 1412: + case 1538: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7074 +//line sql.y:7562 { yyVAL.str = " overwrite off" } - case 1413: + case 1539: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7080 +//line sql.y:7568 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1414: + case 1540: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7085 +//line sql.y:7573 { yyVAL.str = "" } - case 1415: + case 1541: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7089 +//line sql.y:7577 { yyVAL.str = " lines" + yyDollar[2].str } - case 1416: + case 1542: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7095 +//line sql.y:7583 { yyVAL.str = yyDollar[1].str } - case 1417: + case 1543: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7099 +//line sql.y:7587 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1418: + case 1544: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7105 +//line sql.y:7593 { yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str) } - case 1419: + case 1545: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7109 +//line sql.y:7597 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1420: + case 1546: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7114 +//line sql.y:7602 { yyVAL.str = "" } - case 1421: + case 1547: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7118 +//line sql.y:7606 { yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str } - case 1422: + case 1548: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7124 +//line sql.y:7612 { yyVAL.str = yyDollar[1].str } - case 1423: + case 1549: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7128 +//line sql.y:7616 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1424: + case 1550: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7134 +//line sql.y:7622 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1425: + case 1551: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:7138 +//line sql.y:7626 { yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str) } - case 1426: + case 1552: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7142 +//line sql.y:7630 { yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str) } - case 1427: + case 1553: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7147 +//line sql.y:7635 { yyVAL.str = "" } - case 1428: + case 1554: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7151 +//line sql.y:7639 { yyVAL.str = " optionally" } - case 1429: + case 1555: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Insert -//line sql.y:7164 +//line sql.y:7652 { yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()} } yyVAL.union = yyLOCAL - case 1430: + case 1556: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Insert -//line sql.y:7168 +//line sql.y:7656 { yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1431: + case 1557: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *Insert -//line sql.y:7172 +//line sql.y:7660 { yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()} } yyVAL.union = yyLOCAL - case 1432: + case 1558: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Insert -//line sql.y:7176 +//line sql.y:7664 { yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion()} } yyVAL.union = yyLOCAL - case 1433: + case 1559: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Insert -//line sql.y:7180 +//line sql.y:7668 { yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1434: + case 1560: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:7186 +//line sql.y:7674 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1435: + case 1561: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:7190 +//line sql.y:7678 { yyLOCAL = Columns{yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1436: + case 1562: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7194 +//line sql.y:7682 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 1437: + case 1563: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:7198 +//line sql.y:7686 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[5].identifierCI) } - case 1438: + case 1564: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7203 +//line sql.y:7691 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1439: + case 1565: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7207 +//line sql.y:7695 { yyLOCAL = yyDollar[5].updateExprsUnion() } yyVAL.union = yyLOCAL - case 1440: + case 1566: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Values -//line sql.y:7213 +//line sql.y:7701 { yyLOCAL = Values{yyDollar[1].valTupleUnion()} } yyVAL.union = yyLOCAL - case 1441: + case 1567: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7217 +//line sql.y:7705 { yySLICE := (*Values)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion()) } - case 1442: + case 1568: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7223 +//line sql.y:7711 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1443: + case 1569: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7227 +//line sql.y:7715 { yyLOCAL = ValTuple{} } yyVAL.union = yyLOCAL - case 1444: + case 1570: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7233 +//line sql.y:7721 { yyLOCAL = ValTuple(yyDollar[2].exprsUnion()) } yyVAL.union = yyLOCAL - case 1445: + case 1571: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7237 +//line sql.y:7725 { yyLOCAL = ValTuple(yyDollar[3].exprsUnion()) } yyVAL.union = yyLOCAL - case 1446: + case 1572: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7242 +//line sql.y:7730 { if len(yyDollar[1].valTupleUnion()) == 1 { yyLOCAL = yyDollar[1].valTupleUnion()[0] @@ -19686,273 +21789,300 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1447: + case 1573: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7252 +//line sql.y:7740 { yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()} } yyVAL.union = yyLOCAL - case 1448: + case 1574: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7256 +//line sql.y:7744 { yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion()) } - case 1449: + case 1575: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *UpdateExpr -//line sql.y:7262 +//line sql.y:7750 { yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1451: + case 1577: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7269 +//line sql.y:7757 { yyVAL.str = "charset" } - case 1454: + case 1580: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7279 +//line sql.y:7767 { yyLOCAL = NewStrLiteral(yyDollar[1].identifierCI.String()) } yyVAL.union = yyLOCAL - case 1455: + case 1581: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7283 +//line sql.y:7771 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1456: + case 1582: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7287 +//line sql.y:7775 { yyLOCAL = &Default{} } yyVAL.union = yyLOCAL - case 1459: + case 1585: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7296 +//line sql.y:7784 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1460: + case 1586: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:7298 +//line sql.y:7786 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1461: + case 1587: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7301 +//line sql.y:7789 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1462: + case 1588: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:7303 +//line sql.y:7791 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1463: + case 1589: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7306 +//line sql.y:7794 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1464: + case 1590: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL bool -//line sql.y:7308 +//line sql.y:7796 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1465: + case 1591: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Ignore -//line sql.y:7311 +//line sql.y:7799 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1466: + case 1592: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Ignore -//line sql.y:7313 +//line sql.y:7801 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1467: + case 1593: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7316 +//line sql.y:7804 { yyVAL.empty = struct{}{} } - case 1468: + case 1594: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7318 +//line sql.y:7806 { yyVAL.empty = struct{}{} } - case 1469: + case 1595: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7320 +//line sql.y:7808 { yyVAL.empty = struct{}{} } - case 1470: + case 1596: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:7324 +//line sql.y:7812 { yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()} } yyVAL.union = yyLOCAL - case 1471: + case 1597: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:7329 +//line sql.y:7817 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1472: + case 1598: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:7333 +//line sql.y:7821 { yyLOCAL = yyDollar[1].exprsUnion() } yyVAL.union = yyLOCAL - case 1473: + case 1599: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7338 +//line sql.y:7826 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1474: + case 1600: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7340 +//line sql.y:7828 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL - case 1475: + case 1601: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:7344 +//line sql.y:7832 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].identifierCI.String())} } yyVAL.union = yyLOCAL - case 1476: + case 1602: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7350 +//line sql.y:7838 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1477: + case 1603: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7354 +//line sql.y:7842 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1479: + case 1605: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7361 +//line sql.y:7849 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1480: + case 1606: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7367 +//line sql.y:7855 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1481: + case 1607: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7371 +//line sql.y:7859 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1482: + case 1608: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7377 +//line sql.y:7865 { yyVAL.identifierCS = NewIdentifierCS("") } - case 1483: + case 1609: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7381 +//line sql.y:7869 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 1485: + case 1611: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7388 +//line sql.y:7876 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 2035: + case 1612: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:7882 + { + yyLOCAL = &Kill{Type: yyDollar[2].killTypeUnion(), ProcesslistID: convertStringToUInt64(yyDollar[3].str)} + } + yyVAL.union = yyLOCAL + case 1613: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL KillType +//line sql.y:7888 + { + yyLOCAL = ConnectionType + } + yyVAL.union = yyLOCAL + case 1614: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7964 + var yyLOCAL KillType +//line sql.y:7892 + { + yyLOCAL = ConnectionType + } + yyVAL.union = yyLOCAL + case 1615: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL KillType +//line sql.y:7896 + { + yyLOCAL = QueryType + } + yyVAL.union = yyLOCAL + case 2230: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:8539 { - if incNesting(yylex) { - yylex.Error("max nesting level reached") - return 1 - } } - case 2036: + case 2231: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7973 +//line sql.y:8544 { - decNesting(yylex) } - case 2037: + case 2232: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7978 +//line sql.y:8548 { skipToEnd(yylex) } - case 2038: + case 2233: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7983 +//line sql.y:8553 { skipToEnd(yylex) } - case 2039: + case 2234: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7987 +//line sql.y:8557 { skipToEnd(yylex) } - case 2040: + case 2235: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7991 +//line sql.y:8561 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index bfa42993fa1..7c7c2b1a85b 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -28,18 +28,6 @@ func setDDL(yylex yyLexer, node Statement) { yylex.(*Tokenizer).partialDDL = node } -func incNesting(yylex yyLexer) bool { - yylex.(*Tokenizer).nesting++ - if yylex.(*Tokenizer).nesting == 200 { - return true - } - return false -} - -func decNesting(yylex yyLexer) { - yylex.(*Tokenizer).nesting-- -} - // skipToEnd forces the lexer to end prematurely. Not all SQL statements // are supported by the Parser, thus calling skipToEnd will make the lexer // return EOF early. @@ -47,7 +35,7 @@ func skipToEnd(yylex yyLexer) { yylex.(*Tokenizer).SkipToEnd = true } -func bindVariable(yylex yyLexer, bvar string) { +func markBindVariable(yylex yyLexer, bvar string) { yylex.(*Tokenizer).BindVars[bvar] = struct{}{} } @@ -185,11 +173,12 @@ func bindVariable(yylex yyLexer, bvar string) { orderDirection OrderDirection explainType ExplainType vexplainType VExplainType - intervalType IntervalTypes + intervalType IntervalType lockType LockType referenceDefinition *ReferenceDefinition txAccessModes []TxAccessMode txAccessMode TxAccessMode + killType KillType columnStorage ColumnStorage columnFormat ColumnFormat @@ -259,6 +248,7 @@ func bindVariable(yylex yyLexer, bvar string) { %token DISCARD IMPORT ENABLE DISABLE TABLESPACE %token VIRTUAL STORED %token BOTH LEADING TRAILING +%token KILL %left EMPTY_FROM_CLAUSE %right INTO @@ -278,7 +268,7 @@ func bindVariable(yylex yyLexer, bvar string) { %left AND %right NOT '!' %left BETWEEN CASE WHEN THEN ELSE END -%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP RLIKE IN +%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP RLIKE IN ASSIGNMENT_OPT %left '&' %left SHIFT_LEFT SHIFT_RIGHT %left '+' '-' @@ -334,6 +324,9 @@ func bindVariable(yylex yyLexer, bvar string) { // Type Modifiers %token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL +// PURGE tokens +%token PURGE BEFORE + // SHOW tokens %token CODE COLLATION COLUMNS DATABASES ENGINES EVENT EXTENDED FIELDS FULL FUNCTION GTID_EXECUTED %token KEYSPACES OPEN PLUGINS PRIVILEGES PROCESSLIST SCHEMAS TABLES TRIGGERS USER @@ -343,10 +336,11 @@ func bindVariable(yylex yyLexer, bvar string) { %token NAMES GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE // Functions -%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE NOW -%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER -%token UTC_DATE UTC_TIME UTC_TIMESTAMP +%token ADDDATE CURRENT_TIMESTAMP DATABASE CURRENT_DATE CURDATE DATE_ADD DATE_SUB NOW SUBDATE +%token CURTIME CURRENT_TIME LOCALTIME LOCALTIMESTAMP CURRENT_USER +%token UTC_DATE UTC_TIME UTC_TIMESTAMP SYSDATE %token DAY DAY_HOUR DAY_MICROSECOND DAY_MINUTE DAY_SECOND HOUR HOUR_MICROSECOND HOUR_MINUTE HOUR_SECOND MICROSECOND MINUTE MINUTE_MICROSECOND MINUTE_SECOND MONTH QUARTER SECOND SECOND_MICROSECOND YEAR_MONTH WEEK +%token SQL_TSI_DAY SQL_TSI_WEEK SQL_TSI_HOUR SQL_TSI_MINUTE SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_SECOND SQL_TSI_MICROSECOND SQL_TSI_YEAR %token REPLACE %token CONVERT CAST %token SUBSTR SUBSTRING @@ -357,11 +351,15 @@ func bindVariable(yylex yyLexer, bvar string) { %token JSON_ARRAY JSON_OBJECT JSON_QUOTE %token JSON_DEPTH JSON_TYPE JSON_LENGTH JSON_VALID %token JSON_ARRAY_APPEND JSON_ARRAY_INSERT JSON_INSERT JSON_MERGE JSON_MERGE_PATCH JSON_MERGE_PRESERVE JSON_REMOVE JSON_REPLACE JSON_SET JSON_UNQUOTE -%token COUNT AVG MAX MIN SUM GROUP_CONCAT BIT_AND BIT_OR BIT_XOR STD STDDEV STDDEV_POP STDDEV_SAMP VAR_POP VAR_SAMP VARIANCE +%token COUNT AVG MAX MIN SUM GROUP_CONCAT BIT_AND BIT_OR BIT_XOR STD STDDEV STDDEV_POP STDDEV_SAMP VAR_POP VAR_SAMP VARIANCE ANY_VALUE %token REGEXP_INSTR REGEXP_LIKE REGEXP_REPLACE REGEXP_SUBSTR %token ExtractValue UpdateXML %token GET_LOCK RELEASE_LOCK RELEASE_ALL_LOCKS IS_FREE_LOCK IS_USED_LOCK %token LOCATE POSITION +%token ST_GeometryCollectionFromText ST_GeometryFromText ST_LineStringFromText ST_MultiLineStringFromText ST_MultiPointFromText ST_MultiPolygonFromText ST_PointFromText ST_PolygonFromText +%token ST_GeometryCollectionFromWKB ST_GeometryFromWKB ST_LineStringFromWKB ST_MultiLineStringFromWKB ST_MultiPointFromWKB ST_MultiPolygonFromWKB ST_PointFromWKB ST_PolygonFromWKB +%token ST_AsBinary ST_AsText ST_Dimension ST_Envelope ST_IsSimple ST_IsEmpty ST_GeometryType ST_X ST_Y ST_Latitude ST_Longitude ST_EndPoint ST_IsClosed ST_Length ST_NumPoints ST_StartPoint ST_PointN +%token ST_Area ST_Centroid ST_ExteriorRing ST_InteriorRingN ST_NumInteriorRings ST_NumGeometries ST_GeometryN ST_LongFromGeoHash ST_PointFromGeoHash ST_LatFromGeoHash ST_GeoHash ST_AsGeoJSON ST_GeomFromGeoJSON // Match %token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION WITHOUT VALIDATION @@ -402,14 +400,12 @@ func bindVariable(yylex yyLexer, bvar string) { %type range_or_list %type partitions_opt algorithm_opt subpartitions_opt partition_max_rows partition_min_rows -%type command -%type query_expression_parens query_expression query_expression_body select_statement query_primary select_stmt_with_into -%type explain_statement explainable_statement -%type prepare_statement -%type vexplain_statement -%type execute_statement deallocate_statement +%type command kill_statement +%type explain_statement explainable_statement vexplain_statement +%type prepare_statement execute_statement deallocate_statement %type stream_statement vstream_statement insert_statement update_statement delete_statement set_statement set_transaction_statement %type create_statement alter_statement rename_statement drop_statement truncate_statement flush_statement do_statement +%type select_statement select_stmt_with_into query_expression_parens query_expression query_expression_body query_primary %type with_clause_opt with_clause %type common_table_expr %type with_list @@ -424,7 +420,7 @@ func bindVariable(yylex yyLexer, bvar string) { %type collate character_set encryption %type create_options create_options_opt %type default_optional first_opt linear_opt jt_exists_opt jt_path_opt partition_storage_opt -%type analyze_statement show_statement use_statement other_statement +%type analyze_statement show_statement use_statement purge_statement other_statement %type begin_statement commit_statement rollback_statement savepoint_statement release_statement load_statement %type lock_statement unlock_statement call_statement %type revert_statement @@ -457,7 +453,7 @@ func bindVariable(yylex yyLexer, bvar string) { %type subpartition_definition %type subpartition_definition_list subpartition_definition_list_with_brackets %type subpartition_definition_attribute_list_opt -%type interval_time_stamp interval +%type interval timestampadd_interval %type cache_opt separator_opt flush_option for_channel_opt maxvalue %type match_option %type distinct_opt union_op replace_opt local_opt @@ -467,8 +463,8 @@ func bindVariable(yylex yyLexer, bvar string) { %type select_option algorithm_view security_view security_view_opt %type generated_always_opt user_username address_opt %type definer_opt user -%type expression frame_expression signed_literal signed_literal_or_null null_as_literal now_or_signed_literal signed_literal bit_expr regular_expressions xml_expressions -%type interval_value simple_expr literal NUM_literal text_literal text_literal_or_arg bool_pri literal_or_null now predicate tuple_expression null_int_variable_arg performance_schema_function_expressions gtid_function_expressions +%type expression signed_literal signed_literal_or_null null_as_literal now_or_signed_literal signed_literal bit_expr regular_expressions xml_expressions +%type simple_expr literal NUM_literal text_literal text_literal_or_arg bool_pri literal_or_null now predicate tuple_expression null_int_variable_arg performance_schema_function_expressions gtid_function_expressions %type from_opt table_references from_clause %type table_reference table_factor join_table json_table_function %type jt_column @@ -542,7 +538,7 @@ func bindVariable(yylex yyLexer, bvar string) { %type column_type %type int_type decimal_type numeric_type time_type char_type spatial_type %type length_opt partition_comment partition_data_directory partition_index_directory -%type func_datetime_precision +%type func_datetime_precision %type charset_opt %type collate_opt %type binary_opt @@ -593,6 +589,7 @@ func bindVariable(yylex yyLexer, bvar string) { %type ratio_opt %type tx_chacteristics_opt tx_chars %type tx_char +%type kill_type_opt %start any_command %% @@ -633,6 +630,7 @@ command: | drop_statement | truncate_statement | analyze_statement +| purge_statement | show_statement | use_statement | begin_statement @@ -653,6 +651,7 @@ command: | prepare_statement | execute_statement | deallocate_statement +| kill_statement | /*empty*/ { setParseTree(yylex, nil) @@ -915,7 +914,7 @@ insert_statement: ins.Action = $1 ins.Comments = Comments($2).Parsed() ins.Ignore = $3 - ins.Table = $4 + ins.Table = getAliasedTableExprFromTableName($4) ins.Partitions = $5 ins.OnDup = OnDup($7) $$ = ins @@ -928,7 +927,7 @@ insert_statement: cols = append(cols, updateList.Name.Name) vals = append(vals, updateList.Expr) } - $$ = &Insert{Action: $1, Comments: Comments($2).Parsed(), Ignore: $3, Table: $4, Partitions: $5, Columns: cols, Rows: Values{vals}, OnDup: OnDup($8)} + $$ = &Insert{Action: $1, Comments: Comments($2).Parsed(), Ignore: $3, Table: getAliasedTableExprFromTableName($4), Partitions: $5, Columns: cols, Rows: Values{vals}, OnDup: OnDup($8)} } insert_or_replace: @@ -972,11 +971,11 @@ from_or_using: view_name_list: table_name { - $$ = TableNames{$1.ToViewName()} + $$ = TableNames{$1} } | view_name_list ',' table_name { - $$ = append($$, $3.ToViewName()) + $$ = append($$, $3) } table_name_list: @@ -1147,7 +1146,7 @@ create_statement: } | CREATE comment_opt replace_opt algorithm_view definer_opt security_view_opt VIEW table_name column_list_opt AS select_statement check_option_opt { - $$ = &CreateView{ViewName: $8.ToViewName(), Comments: Comments($2).Parsed(), IsReplace:$3, Algorithm:$4, Definer: $5 ,Security:$6, Columns:$9, Select: $11, CheckOption: $12 } + $$ = &CreateView{ViewName: $8, Comments: Comments($2).Parsed(), IsReplace:$3, Algorithm:$4, Definer: $5 ,Security:$6, Columns:$9, Select: $11, CheckOption: $12 } } | create_database_prefix create_options_opt { @@ -1483,6 +1482,7 @@ column_attribute_list_opt: | column_attribute_list_opt DEFAULT now_or_signed_literal { $1.Default = $3 + $1.DefaultLiteral = true $$ = $1 } | column_attribute_list_opt ON UPDATE function_call_nonkeyword @@ -1640,6 +1640,10 @@ CURRENT_TIMESTAMP func_datetime_precision { $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("now"), Fsp: $2} } +| SYSDATE func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("sysdate"), Fsp: $2} + } signed_literal_or_null: signed_literal @@ -1693,8 +1697,7 @@ text_literal } | VALUE_ARG { - $$ = NewArgument($1[1:]) - bindVariable(yylex, $1[1:]) + $$ = parseBindVariable(yylex, $1[1:]) } | underscore_charsets BIT_LITERAL %prec UNARY { @@ -1718,8 +1721,8 @@ text_literal } | underscore_charsets VALUE_ARG %prec UNARY { - bindVariable(yylex, $2[1:]) - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewArgument($2[1:])} + arg := parseBindVariable(yylex, $2[1:]) + $$ = &IntroducerExpr{CharacterSet: $1, Expr: arg} } | DATE STRING { @@ -1893,7 +1896,7 @@ underscore_charsets: } | UNDERSCORE_UTF8 { - $$ = Utf8Str + $$ = Utf8mb3Str } | UNDERSCORE_UTF8MB4 { @@ -1901,7 +1904,7 @@ underscore_charsets: } | UNDERSCORE_UTF8MB3 { - $$ = Utf8Str + $$ = Utf8mb3Str } literal_or_null: @@ -1943,8 +1946,7 @@ text_literal_or_arg: } | VALUE_ARG { - $$ = NewArgument($1[1:]) - bindVariable(yylex, $1[1:]) + $$ = parseBindVariable(yylex, $1[1:]) } keys: @@ -2825,7 +2827,7 @@ table_option: } | TABLESPACE equal_opt sql_id storage_opt { - $$ = &TableOption{Name:string($1), String: ($3.String() + $4)} + $$ = &TableOption{Name:string($1), String: ($3.String() + $4), CaseSensitive: true} } | UNION equal_opt '(' table_name_list ')' { @@ -2859,7 +2861,11 @@ insert_method_options: | LAST table_opt_value: - reserved_sql_id + table_id '.' reserved_table_id + { + $$ = String(TableName{Qualifier: $1, Name: $3}) + } +| reserved_sql_id { $$ = $1.String() } @@ -2986,9 +2992,9 @@ alter_option: { $$ = &AlterColumn{Column: $3, DropDefault:true} } -| ALTER column_opt column_name SET DEFAULT signed_literal_or_null +| ALTER column_opt column_name SET DEFAULT now_or_signed_literal { - $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$6} + $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$6, DefaultLiteral: true} } | ALTER column_opt column_name SET DEFAULT openb expression closeb { @@ -3167,7 +3173,7 @@ alter_statement: } | ALTER comment_opt algorithm_view definer_opt security_view_opt VIEW table_name column_list_opt AS select_statement check_option_opt { - $$ = &AlterView{ViewName: $7.ToViewName(), Comments: Comments($2).Parsed(), Algorithm:$3, Definer: $4 ,Security:$5, Columns:$8, Select: $10, CheckOption: $11 } + $$ = &AlterView{ViewName: $7, Comments: Comments($2).Parsed(), Algorithm:$3, Definer: $4 ,Security:$5, Columns:$8, Select: $10, CheckOption: $11 } } // The syntax here causes a shift / reduce issue, because ENCRYPTION is a non reserved keyword // and the database identifier is optional. When no identifier is given, the current database @@ -3952,6 +3958,16 @@ analyze_statement: $$ = &OtherRead{} } +purge_statement: + PURGE BINARY LOGS TO STRING + { + $$ = &PurgeBinaryLogs{To: string($5)} + } +| PURGE BINARY LOGS BEFORE STRING + { + $$ = &PurgeBinaryLogs{Before: string($5)} + } + show_statement: SHOW charset_or_character_set like_or_where_opt { @@ -4741,11 +4757,11 @@ execute_statement_list_opt: // execute db.foo(@apa) using @foo, @bar deallocate_statement: DEALLOCATE comment_opt PREPARE sql_id { - $$ = &DeallocateStmt{Type:DeallocateType, Comments: Comments($2).Parsed(), Name:$4} + $$ = &DeallocateStmt{Comments: Comments($2).Parsed(), Name:$4} } | DROP comment_opt PREPARE sql_id { - $$ = &DeallocateStmt{Type: DropType, Comments: Comments($2).Parsed(), Name: $4} + $$ = &DeallocateStmt{Comments: Comments($2).Parsed(), Name: $4} } select_expression_list_opt: @@ -5225,6 +5241,10 @@ expression: { $$ = $1 } +| user_defined_variable ASSIGNMENT_OPT expression %prec ASSIGNMENT_OPT + { + $$ = &AssignmentExpr{Left: $1, Right: $3} + } | expression MEMBER OF openb expression closeb { $$ = &MemberOfExpr{Value: $1, JSONArr:$5 } @@ -5328,6 +5348,14 @@ bit_expr '|' bit_expr %prec '|' { $$ = &BinaryExpr{Left: $1, Operator: MinusOp, Right: $3} } +| bit_expr '+' INTERVAL bit_expr interval %prec '+' + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: $1, Unit: $5, Interval: $4} + } +| bit_expr '-' INTERVAL bit_expr interval %prec '-' + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: $1, Unit: $5, Interval: $4} + } | bit_expr '*' bit_expr %prec '*' { $$ = &BinaryExpr{Left: $1, Operator: MultOp, Right: $3} @@ -5446,18 +5474,14 @@ function_call_keyword { $$ = &Default{ColName: $2} } -| interval_value +| INTERVAL bit_expr interval '+' bit_expr %prec INTERVAL { - // INTERVAL can trigger a shift / reduce conflict. We want - // to shift here for the interval rule. In case we do have - // the additional expression_list below, we'd pick that path - // and thus properly parse it as a function when needed. - $$ = $1 + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: $5, Unit: $3, Interval: $2} } | INTERVAL openb expression ',' expression_list closeb -{ - $$ = &IntervalFuncExpr{Expr: $3, Exprs: $5} -} + { + $$ = &IntervalFuncExpr{Expr: $3, Exprs: $5} + } | column_name_or_offset JSON_EXTRACT_OP text_literal_or_arg { $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} @@ -5467,12 +5491,6 @@ function_call_keyword $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} } -interval_value: - INTERVAL bit_expr sql_id - { - $$ = &IntervalExpr{Expr: $2, Unit: $3.String()} - } - column_names_opt_paren: column_names { @@ -5553,23 +5571,21 @@ frame_point: { $$ = &FramePoint{Type:UnboundedFollowingType} } -| frame_expression PRECEDING +| NUM_literal PRECEDING { $$ = &FramePoint{Type:ExprPrecedingType, Expr:$1} } -| frame_expression FOLLOWING +| INTERVAL bit_expr interval PRECEDING { - $$ = &FramePoint{Type:ExprFollowingType, Expr:$1} + $$ = &FramePoint{Type:ExprPrecedingType, Expr:$2, Unit: $3} } - -frame_expression: - NUM_literal +| NUM_literal FOLLOWING { - $$ = $1 + $$ = &FramePoint{Type:ExprFollowingType, Expr:$1} } -| interval_value +| INTERVAL bit_expr interval FOLLOWING { - $$ = $1 + $$ = &FramePoint{Type:ExprFollowingType, Expr:$2, Unit:$3} } frame_clause_opt: @@ -5785,7 +5801,7 @@ col_tuple: | LIST_ARG { $$ = ListArg($1[2:]) - bindVariable(yylex, $1[2:]) + markBindVariable(yylex, $1[2:]) } subquery: @@ -5884,11 +5900,20 @@ UTC_DATE func_paren_opt { $$ = &FuncExpr{Name:NewIdentifierCI("current_date")} } +| CURDATE func_paren_opt + { + $$ = &FuncExpr{Name:NewIdentifierCI("curdate")} + } | UTC_TIME func_datetime_precision { $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("utc_time"), Fsp: $2} } // curtime +| CURTIME func_datetime_precision + { + $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("curtime"), Fsp: $2} + } + // curtime | CURRENT_TIME func_datetime_precision { $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("current_time"), Fsp: $2} @@ -5961,17 +5986,21 @@ UTC_DATE func_paren_opt { $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6, Limit: $7} } -| TIMESTAMPADD openb sql_id ',' expression ',' expression closeb +| ANY_VALUE openb expression closeb { - $$ = &TimestampFuncExpr{Name:string("timestampadd"), Unit:$3.String(), Expr1:$5, Expr2:$7} + $$ = &AnyValue{Arg:$3} } -| TIMESTAMPDIFF openb sql_id ',' expression ',' expression closeb +| TIMESTAMPADD openb timestampadd_interval ',' expression ',' expression closeb { - $$ = &TimestampFuncExpr{Name:string("timestampdiff"), Unit:$3.String(), Expr1:$5, Expr2:$7} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprTimestampadd, Date: $7, Interval: $5, Unit: $3} + } +| TIMESTAMPDIFF openb timestampadd_interval ',' expression ',' expression closeb + { + $$ = &TimestampDiffExpr{Unit:$3, Expr1:$5, Expr2:$7} } | EXTRACT openb interval FROM expression closeb { - $$ = &ExtractFuncExpr{IntervalTypes: $3, Expr: $5} + $$ = &ExtractFuncExpr{IntervalType: $3, Expr: $5} } | WEIGHT_STRING openb expression convert_type_weight_string closeb { @@ -5991,11 +6020,11 @@ UTC_DATE func_paren_opt } | LTRIM openb expression closeb { - $$ = &TrimFuncExpr{TrimFuncType:LTrimType, StringArg: $3} + $$ = &TrimFuncExpr{TrimFuncType:LTrimType, Type: LeadingTrimType, StringArg: $3} } | RTRIM openb expression closeb { - $$ = &TrimFuncExpr{TrimFuncType:RTrimType, StringArg: $3} + $$ = &TrimFuncExpr{TrimFuncType:RTrimType, Type: TrailingTrimType, StringArg: $3} } | TRIM openb trim_type expression_opt FROM expression closeb { @@ -6061,6 +6090,366 @@ UTC_DATE func_paren_opt { $$ = &JSONArrayExpr{ Params:$3 } } +| ST_AsBinary openb expression closeb + { + $$ = &GeomFormatExpr{ FormatType: BinaryFormat, Geom: $3} + } +| ST_AsBinary openb expression ',' expression closeb + { + $$ = &GeomFormatExpr{ FormatType: BinaryFormat, Geom: $3, AxisOrderOpt: $5 } + } +| ST_AsText openb expression closeb + { + $$ = &GeomFormatExpr{ FormatType: TextFormat, Geom: $3} + } +| ST_AsText openb expression ',' expression closeb + { + $$ = &GeomFormatExpr{ FormatType: TextFormat, Geom: $3, AxisOrderOpt: $5 } + } +| ST_IsEmpty openb expression closeb + { + $$ = &GeomPropertyFuncExpr{ Property: IsEmpty, Geom: $3} + } +| ST_IsSimple openb expression closeb + { + $$ = &GeomPropertyFuncExpr{ Property: IsSimple, Geom: $3} + } +| ST_Dimension openb expression closeb + { + $$ = &GeomPropertyFuncExpr{ Property: Dimension, Geom: $3} + } +| ST_Envelope openb expression closeb + { + $$ = &GeomPropertyFuncExpr{ Property: Envelope, Geom: $3} + } +| ST_GeometryType openb expression closeb + { + $$ = &GeomPropertyFuncExpr{ Property: GeometryType, Geom: $3} + } +| ST_Latitude openb expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: Latitude, Point: $3} + } +| ST_Latitude openb expression ',' expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: Latitude, Point: $3, ValueToSet: $5} + } +| ST_Longitude openb expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: Longitude, Point: $3} + } +| ST_Longitude openb expression ',' expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: Longitude, Point: $3, ValueToSet: $5} + } +| ST_EndPoint openb expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: EndPoint, Linestring: $3} + } +| ST_IsClosed openb expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: IsClosed, Linestring: $3} + } +| ST_Length openb expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: Length, Linestring: $3} + } +| ST_Length openb expression ',' expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: Length, Linestring: $3, PropertyDefArg: $5} + } +| ST_NumPoints openb expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: NumPoints, Linestring: $3} + } +| ST_PointN openb expression ',' expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: PointN, Linestring: $3, PropertyDefArg: $5} + } +| ST_StartPoint openb expression closeb + { + $$ = &LinestrPropertyFuncExpr{ Property: StartPoint, Linestring: $3} + } +| ST_X openb expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: XCordinate, Point: $3} + } +| ST_X openb expression ',' expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: XCordinate, Point: $3, ValueToSet: $5} + } +| ST_Y openb expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: YCordinate, Point: $3} + } +| ST_Y openb expression ',' expression closeb + { + $$ = &PointPropertyFuncExpr{ Property: YCordinate, Point: $3, ValueToSet: $5} + } +| ST_GeometryFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryFromText, WktText: $3 } + } +| ST_GeometryFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryFromText, WktText: $3, Srid: $5 } + } +| ST_GeometryFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_GeometryCollectionFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryCollectionFromText, WktText: $3 } + } +| ST_GeometryCollectionFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryCollectionFromText, WktText: $3, Srid: $5 } + } +| ST_GeometryCollectionFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: GeometryCollectionFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_LineStringFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: LineStringFromText, WktText: $3 } + } +| ST_LineStringFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: LineStringFromText, WktText: $3, Srid: $5 } + } +| ST_LineStringFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: LineStringFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiLineStringFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiLinestringFromText, WktText: $3 } + } +| ST_MultiLineStringFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiLinestringFromText, WktText: $3, Srid: $5 } + } +| ST_MultiLineStringFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiLinestringFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiPointFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPointFromText, WktText: $3 } + } +| ST_MultiPointFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPointFromText, WktText: $3, Srid: $5 } + } +| ST_MultiPointFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPointFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiPolygonFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPolygonFromText, WktText: $3 } + } +| ST_MultiPolygonFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPolygonFromText, WktText: $3, Srid: $5 } + } +| ST_MultiPolygonFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: MultiPolygonFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_PointFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: PointFromText, WktText: $3 } + } +| ST_PointFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: PointFromText, WktText: $3, Srid: $5 } + } +| ST_PointFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: PointFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_PolygonFromText openb expression closeb + { + $$ = &GeomFromTextExpr{ Type: PolygonFromText, WktText: $3 } + } +| ST_PolygonFromText openb expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: PolygonFromText, WktText: $3, Srid: $5 } + } +| ST_PolygonFromText openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromTextExpr{ Type: PolygonFromText, WktText: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_GeometryFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryFromWKB, WkbBlob: $3 } + } +| ST_GeometryFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_GeometryFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_GeometryCollectionFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryCollectionFromWKB, WkbBlob: $3 } + } +| ST_GeometryCollectionFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryCollectionFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_GeometryCollectionFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: GeometryCollectionFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_LineStringFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: LineStringFromWKB, WkbBlob: $3 } + } +| ST_LineStringFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: LineStringFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_LineStringFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: LineStringFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiLineStringFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiLinestringFromWKB, WkbBlob: $3 } + } +| ST_MultiLineStringFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiLinestringFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_MultiLineStringFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiLinestringFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiPointFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPointFromWKB, WkbBlob: $3 } + } +| ST_MultiPointFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPointFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_MultiPointFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPointFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_MultiPolygonFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPolygonFromWKB, WkbBlob: $3 } + } +| ST_MultiPolygonFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPolygonFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_MultiPolygonFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: MultiPolygonFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_PointFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PointFromWKB, WkbBlob: $3 } + } +| ST_PointFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PointFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_PointFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PointFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_PolygonFromWKB openb expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PolygonFromWKB, WkbBlob: $3 } + } +| ST_PolygonFromWKB openb expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PolygonFromWKB, WkbBlob: $3, Srid: $5 } + } +| ST_PolygonFromWKB openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromWKBExpr{ Type: PolygonFromWKB, WkbBlob: $3, Srid: $5, AxisOrderOpt: $7 } + } +| ST_Area openb expression closeb + { + $$ = &PolygonPropertyFuncExpr{ Property: Area, Polygon: $3 } + } +| ST_Centroid openb expression closeb + { + $$ = &PolygonPropertyFuncExpr{ Property: Centroid, Polygon: $3 } + } +| ST_ExteriorRing openb expression closeb + { + $$ = &PolygonPropertyFuncExpr{ Property: ExteriorRing, Polygon: $3 } + } +| ST_InteriorRingN openb expression ',' expression closeb + { + $$ = &PolygonPropertyFuncExpr{ Property: InteriorRingN, Polygon: $3, PropertyDefArg: $5 } + } +| ST_NumInteriorRings openb expression closeb + { + $$ = &PolygonPropertyFuncExpr{ Property: NumInteriorRings, Polygon: $3 } + } +| ST_GeometryN openb expression ',' expression closeb + { + $$ = &GeomCollPropertyFuncExpr{ Property: GeometryN, GeomColl: $3, PropertyDefArg: $5 } + } +| ST_NumGeometries openb expression closeb + { + $$ = &GeomCollPropertyFuncExpr{ Property: NumGeometries, GeomColl: $3 } + } +| ST_GeoHash openb expression ',' expression ',' expression closeb + { + $$ = &GeoHashFromLatLongExpr{ Longitude: $3, Latitude: $5, MaxLength: $7 } + } +| ST_GeoHash openb expression ',' expression closeb + { + $$ = &GeoHashFromPointExpr{ Point: $3, MaxLength: $5 } + } +| ST_LatFromGeoHash openb expression closeb + { + $$ = &GeomFromGeoHashExpr{ GeomType: LatitudeFromHash, GeoHash: $3 } + } +| ST_LongFromGeoHash openb expression closeb + { + $$ = &GeomFromGeoHashExpr{ GeomType: LongitudeFromHash, GeoHash: $3 } + } +| ST_PointFromGeoHash openb expression ',' expression closeb + { + $$ = &GeomFromGeoHashExpr{ GeomType: PointFromHash, GeoHash: $3, SridOpt: $5 } + } +| ST_GeomFromGeoJSON openb expression closeb + { + $$ = &GeomFromGeoJSONExpr{ GeoJSON: $3 } + } +| ST_GeomFromGeoJSON openb expression ',' expression closeb + { + $$ = &GeomFromGeoJSONExpr{ GeoJSON: $3, HigherDimHandlerOpt: $5 } + } +| ST_GeomFromGeoJSON openb expression ',' expression ',' expression closeb + { + $$ = &GeomFromGeoJSONExpr{ GeoJSON: $3, HigherDimHandlerOpt: $5 , Srid: $7 } + } +| ST_AsGeoJSON openb expression closeb + { + $$ = &GeoJSONFromGeomExpr{ Geom: $3 } + } +| ST_AsGeoJSON openb expression ',' expression closeb + { + $$ = &GeoJSONFromGeomExpr{ Geom: $3, MaxDecimalDigits: $5 } + } +| ST_AsGeoJSON openb expression ',' expression ',' expression closeb + { + $$ = &GeoJSONFromGeomExpr{ Geom: $3, MaxDecimalDigits: $5 , Bitmask: $7 } + } | JSON_OBJECT openb json_object_param_opt closeb { $$ = &JSONObjectExpr{ Params:$3 } @@ -6177,6 +6566,18 @@ UTC_DATE func_paren_opt { $$ = &JSONUnquoteExpr{JSONValue:$3} } +| MULTIPOLYGON openb expression_list closeb + { + $$ = &MultiPolygonExpr{ PolygonParams:$3 } + } +| MULTIPOINT openb expression_list closeb + { + $$ = &MultiPointExpr{ PointParams:$3 } + } +| MULTILINESTRING openb expression_list closeb + { + $$ = &MultiLinestringExpr{ LinestringParams:$3 } + } | POLYGON openb expression_list closeb { $$ = &PolygonExpr{ LinestringParams:$3 } @@ -6213,6 +6614,30 @@ UTC_DATE func_paren_opt { $$ = &LagLeadExpr{ Type:$1 , Expr: $3, N: $5, Default: $6, NullTreatmentClause:$8, OverClause: $9} } +| ADDDATE openb expression ',' INTERVAL bit_expr interval closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: $3, Interval: $6, Unit: $7} + } +| ADDDATE openb expression ',' expression closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: $3, Interval: $5, Unit: IntervalNone} + } +| DATE_ADD openb expression ',' INTERVAL bit_expr interval closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprDateAdd, Date: $3, Interval: $6, Unit: $7} + } +| DATE_SUB openb expression ',' INTERVAL bit_expr interval closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprDateSub, Date: $3, Interval: $6, Unit: $7} + } +| SUBDATE openb expression ',' INTERVAL bit_expr interval closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: $3, Interval: $6, Unit: $7} + } +| SUBDATE openb expression ',' expression closeb + { + $$ = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: $3, Interval: $5, Unit: IntervalNone} + } | regular_expressions | xml_expressions | performance_schema_function_expressions @@ -6233,8 +6658,7 @@ null_int_variable_arg: } | VALUE_ARG { - $$ = NewArgument($1[1:]) - bindVariable(yylex, $1[1:]) + $$ = parseBindVariable(yylex, $1[1:]) } default_with_comma_opt: @@ -6379,9 +6803,7 @@ returning_type_opt: } interval: - interval_time_stamp - {} -| DAY_HOUR + DAY_HOUR { $$=IntervalDayHour } @@ -6425,9 +6847,7 @@ interval: { $$=IntervalYearMonth } - -interval_time_stamp: - DAY +| DAY { $$=IntervalDay } @@ -6464,6 +6884,80 @@ interval_time_stamp: $$=IntervalYear } +timestampadd_interval: + DAY + { + $$=IntervalDay + } +| WEEK + { + $$=IntervalWeek + } +| HOUR + { + $$=IntervalHour + } +| MINUTE + { + $$=IntervalMinute + } +| MONTH + { + $$=IntervalMonth + } +| QUARTER + { + $$=IntervalQuarter + } +| SECOND + { + $$=IntervalSecond + } +| MICROSECOND + { + $$=IntervalMicrosecond + } +| YEAR + { + $$=IntervalYear + } +| SQL_TSI_DAY + { + $$=IntervalDay + } +| SQL_TSI_WEEK + { + $$=IntervalWeek + } +| SQL_TSI_HOUR + { + $$=IntervalHour + } +| SQL_TSI_MINUTE + { + $$=IntervalMinute + } +| SQL_TSI_MONTH + { + $$=IntervalMonth + } +| SQL_TSI_QUARTER + { + $$=IntervalQuarter + } +| SQL_TSI_SECOND + { + $$=IntervalSecond + } +| SQL_TSI_MICROSECOND + { + $$=IntervalMicrosecond + } +| SQL_TSI_YEAR + { + $$=IntervalYear + } + func_paren_opt: /* empty */ | openb closeb @@ -6471,20 +6965,15 @@ func_paren_opt: func_datetime_precision: /* empty */ { - $$ = nil + $$ = 0 } | openb closeb { - $$ = nil + $$ = 0 } | openb INTEGRAL closeb { - $$ = NewIntLiteral($2) - } -| openb VALUE_ARG closeb - { - $$ = NewArgument($2[1:]) - bindVariable(yylex, $2[1:]) + $$ = convertStringToInt($2) } /* @@ -6724,8 +7213,7 @@ num_val: } | VALUE_ARG VALUES { - $$ = NewArgument($1[1:]) - bindVariable(yylex, $1[1:]) + $$ = parseBindVariable(yylex, $1[1:]) } group_by_opt: @@ -7388,6 +7876,28 @@ reserved_table_id: { $$ = NewIdentifierCS(string($1)) } + +kill_statement: + KILL kill_type_opt INTEGRAL + { + $$ = &Kill{Type: $2, ProcesslistID: convertStringToUInt64($3)} + } + +kill_type_opt: + /* empty */ + { + $$ = ConnectionType + } +| CONNECTION + { + $$ = ConnectionType + } +| QUERY + { + $$ = QueryType + } + + /* These are not all necessarily reserved in MySQL, but some are. @@ -7421,6 +7931,7 @@ reserved_keyword: | CURRENT_DATE | CURRENT_TIME | CURRENT_TIMESTAMP +| CURTIME | CURRENT_USER | SUBSTR | SUBSTRING @@ -7465,6 +7976,7 @@ reserved_keyword: | JOIN | JSON_TABLE | KEY +| KILL | LAG | LAST_VALUE | LATERAL @@ -7522,11 +8034,10 @@ reserved_keyword: | SPATIAL | STORED | STRAIGHT_JOIN +| SYSDATE | SYSTEM | TABLE | THEN -| TIMESTAMPADD -| TIMESTAMPDIFF | TO | TRAILING | TRUE @@ -7559,16 +8070,19 @@ non_reserved_keyword: AGAINST | ACTION | ACTIVE +| ADDDATE %prec FUNCTION_CALL_NON_KEYWORD | ADMIN | AFTER | ALGORITHM | ALWAYS +| ANY_VALUE %prec FUNCTION_CALL_NON_KEYWORD | ARRAY | ASCII | AUTO_INCREMENT | AUTOEXTEND_SIZE | AVG %prec FUNCTION_CALL_NON_KEYWORD | AVG_ROW_LENGTH +| BEFORE | BEGIN | BIGINT | BIT @@ -7610,6 +8124,8 @@ non_reserved_keyword: | CURRENT | DATA | DATE %prec STRING_TYPE_PREFIX_NON_KEYWORD +| DATE_ADD %prec FUNCTION_CALL_NON_KEYWORD +| DATE_SUB %prec FUNCTION_CALL_NON_KEYWORD | DATETIME | DEALLOCATE | DECIMAL_TYPE @@ -7753,9 +8269,9 @@ non_reserved_keyword: | MIN_ROWS | MODE | MODIFY -| MULTILINESTRING -| MULTIPOINT -| MULTIPOLYGON +| MULTILINESTRING %prec FUNCTION_CALL_NON_KEYWORD +| MULTIPOINT %prec FUNCTION_CALL_NON_KEYWORD +| MULTIPOLYGON %prec FUNCTION_CALL_NON_KEYWORD | NAME | NAMES | NCHAR @@ -7802,6 +8318,7 @@ non_reserved_keyword: | POSITION %prec FUNCTION_CALL_NON_KEYWORD | PROCEDURE | PROCESSLIST +| PURGE | QUERIES | QUERY | RANDOM @@ -7854,6 +8371,14 @@ non_reserved_keyword: | SMALLINT | SNAPSHOT | SQL +| SQL_TSI_DAY +| SQL_TSI_HOUR +| SQL_TSI_MINUTE +| SQL_TSI_MONTH +| SQL_TSI_QUARTER +| SQL_TSI_SECOND +| SQL_TSI_WEEK +| SQL_TSI_YEAR | SRID | START | STARTING @@ -7867,6 +8392,53 @@ non_reserved_keyword: | STDDEV_POP %prec FUNCTION_CALL_NON_KEYWORD | STDDEV_SAMP %prec FUNCTION_CALL_NON_KEYWORD | STREAM +| ST_Area %prec FUNCTION_CALL_NON_KEYWORD +| ST_AsBinary %prec FUNCTION_CALL_NON_KEYWORD +| ST_AsGeoJSON %prec FUNCTION_CALL_NON_KEYWORD +| ST_AsText %prec FUNCTION_CALL_NON_KEYWORD +| ST_Centroid %prec FUNCTION_CALL_NON_KEYWORD +| ST_Dimension %prec FUNCTION_CALL_NON_KEYWORD +| ST_EndPoint %prec FUNCTION_CALL_NON_KEYWORD +| ST_Envelope %prec FUNCTION_CALL_NON_KEYWORD +| ST_ExteriorRing %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeoHash %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeomFromGeoJSON %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryCollectionFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryCollectionFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryN %prec FUNCTION_CALL_NON_KEYWORD +| ST_GeometryType %prec FUNCTION_CALL_NON_KEYWORD +| ST_InteriorRingN %prec FUNCTION_CALL_NON_KEYWORD +| ST_IsClosed %prec FUNCTION_CALL_NON_KEYWORD +| ST_IsEmpty %prec FUNCTION_CALL_NON_KEYWORD +| ST_IsSimple %prec FUNCTION_CALL_NON_KEYWORD +| ST_LatFromGeoHash %prec FUNCTION_CALL_NON_KEYWORD +| ST_Latitude %prec FUNCTION_CALL_NON_KEYWORD +| ST_Length %prec FUNCTION_CALL_NON_KEYWORD +| ST_LineStringFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_LineStringFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_LongFromGeoHash %prec FUNCTION_CALL_NON_KEYWORD +| ST_Longitude %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiLineStringFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiLineStringFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiPointFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiPointFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiPolygonFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_MultiPolygonFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_NumGeometries %prec FUNCTION_CALL_NON_KEYWORD +| ST_NumInteriorRings %prec FUNCTION_CALL_NON_KEYWORD +| ST_NumPoints %prec FUNCTION_CALL_NON_KEYWORD +| ST_PointFromGeoHash %prec FUNCTION_CALL_NON_KEYWORD +| ST_PointFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_PointFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_PointN %prec FUNCTION_CALL_NON_KEYWORD +| ST_PolygonFromText %prec FUNCTION_CALL_NON_KEYWORD +| ST_PolygonFromWKB %prec FUNCTION_CALL_NON_KEYWORD +| ST_StartPoint %prec FUNCTION_CALL_NON_KEYWORD +| ST_X %prec FUNCTION_CALL_NON_KEYWORD +| ST_Y %prec FUNCTION_CALL_NON_KEYWORD +| SUBDATE %prec FUNCTION_CALL_NON_KEYWORD | SUBPARTITION | SUBPARTITIONS | SUM %prec FUNCTION_CALL_NON_KEYWORD @@ -7882,6 +8454,8 @@ non_reserved_keyword: | TIES | TIME %prec STRING_TYPE_PREFIX_NON_KEYWORD | TIMESTAMP %prec STRING_TYPE_PREFIX_NON_KEYWORD +| TIMESTAMPADD %prec FUNCTION_CALL_NON_KEYWORD +| TIMESTAMPDIFF %prec FUNCTION_CALL_NON_KEYWORD | TINYBLOB | TINYINT | TINYTEXT @@ -7933,6 +8507,7 @@ non_reserved_keyword: | WAIT_FOR_EXECUTED_GTID_SET %prec FUNCTION_CALL_NON_KEYWORD | WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS %prec FUNCTION_CALL_NON_KEYWORD | WARNINGS +| WEEK %prec FUNCTION_CALL_NON_KEYWORD | WITHOUT | WORK | YEAR @@ -7962,16 +8537,11 @@ non_reserved_keyword: openb: '(' { - if incNesting(yylex) { - yylex.Error("max nesting level reached") - return 1 - } } closeb: ')' { - decNesting(yylex) } skip_to_end: diff --git a/go/vt/sqlparser/testdata/select_cases.txt b/go/vt/sqlparser/testdata/select_cases.txt index 81340685d18..1112593cd13 100644 --- a/go/vt/sqlparser/testdata/select_cases.txt +++ b/go/vt/sqlparser/testdata/select_cases.txt @@ -8,7 +8,7 @@ INPUT select concat(a, if(b>10, _utf8 0xC3A6, _utf8 0xC3AF)) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 0xC3A6, _utf8 0xC3AF)) from t1 +select concat(a, if(b > 10, _utf8mb3 0xC3A6, _utf8mb3 0xC3AF)) from t1 END INPUT select a as 'x', t1.*, b as 'x' from t1; @@ -158,7 +158,7 @@ INPUT select date_sub("0069-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('0069-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('0069-01-01 00:00:01', interval 2 second) from dual END INPUT select count(distinct a) from t1 group by b; @@ -253,8 +253,8 @@ END INPUT select @topic3_id:= 10103; END -ERROR -syntax error at position 19 near ':' +OUTPUT +select @topic3_id := 10103 from dual END INPUT select t1.*,t2.*,t3.a from t1 left join t2 on (t3.a=t2.a) left join t1 as t3 on (t1.a=t3.a); @@ -278,7 +278,7 @@ INPUT select mbrwithin(ST_GeomFromText("point(2 4)"), ST_GeomFromText("point(2 4)")); END OUTPUT -select mbrwithin(ST_GeomFromText('point(2 4)'), ST_GeomFromText('point(2 4)')) from dual +select mbrwithin(st_geometryfromtext('point(2 4)'), st_geometryfromtext('point(2 4)')) from dual END INPUT select concat(@a, table_name), @a, table_name from information_schema.tables where table_schema = 'test' order by table_name; @@ -374,7 +374,7 @@ INPUT select timestampdiff(SQL_TSI_SECOND, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a; END OUTPUT -select timestampdiff(SQL_TSI_SECOND, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a from dual +select timestampdiff(second, '2001-02-01 12:59:59', '2001-05-01 12:58:58') as a from dual END INPUT select concat(f1, 2) a from t1 union select 'x' a from t1; @@ -404,7 +404,7 @@ INPUT select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2 collate utf8_bin); END OUTPUT -select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2 collate utf8_bin) from dual +select locate(_utf8mb3 0xD091, _utf8mb3 0xD0B0D0B1D0B2 collate utf8_bin) from dual END INPUT select hex('a'), hex('a '); @@ -440,7 +440,7 @@ INPUT select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'),ST_GeomFromText('MULTILINESTRING((15 0,20 0,20 20),(10 10,20 20,15 0))')) as result; END OUTPUT -select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'), ST_GeomFromText('MULTILINESTRING((15 0,20 0,20 20),(10 10,20 20,15 0))')) as result from dual +select ST_Crosses(st_geometryfromtext('MULTIPOINT(1 0,15 0,10 10)'), st_geometryfromtext('MULTILINESTRING((15 0,20 0,20 20),(10 10,20 20,15 0))')) as result from dual END INPUT select std(s1/s2) from bug22555 where i=1 group by i; @@ -518,7 +518,7 @@ INPUT select date_add("1997-12-31",INTERVAL "10.09" SECOND_MICROSECOND) as a; END OUTPUT -select date_add('1997-12-31', interval '10.09' SECOND_MICROSECOND) as a from dual +select date_add('1997-12-31', interval '10.09' second_microsecond) as a from dual END INPUT select substring('hello', 18446744073709551616, 1); @@ -740,7 +740,7 @@ INPUT select date_add(date,INTERVAL "1 1" YEAR_MONTH) from t1; END OUTPUT -select date_add(`date`, interval '1 1' YEAR_MONTH) from t1 +select date_add(`date`, interval '1 1' year_month) from t1 END INPUT select a1, a2, b, min(c), max(c) from t1 group by a1,a2,b; @@ -794,13 +794,13 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1:1" MINUTE_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1:1' MINUTE_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '1:1' minute_second) from dual END INPUT select st_contains(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_contains(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_contains(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select space(-18446744073709551617); @@ -860,7 +860,7 @@ INPUT select ST_astext(ST_intersection(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 1, 0 2, 0 0))'))); END OUTPUT -select ST_astext(ST_intersection(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 1, 0 2, 0 0))'))) from dual +select st_astext(ST_intersection(st_geometryfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), st_geometryfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))) from dual END INPUT select * from t1 left join t2 on m_id = id where match(d, e, f) against ('+aword +bword' in boolean mode); @@ -896,7 +896,7 @@ INPUT select date_add(date,INTERVAL "1" WEEK) from t1; END OUTPUT -select date_add(`date`, interval '1' WEEK) from t1 +select date_add(`date`, interval '1' week) from t1 END INPUT select timestampdiff(month,'2004-09-11','2004-09-11'); @@ -938,7 +938,7 @@ INPUT select ST_astext(fn3()); END OUTPUT -select ST_astext(fn3()) from dual +select st_astext(fn3()) from dual END INPUT select 1 | -1, 1 ^ -1, 1 & -1; @@ -1004,7 +1004,7 @@ INPUT select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB); END OUTPUT -select soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB) from dual +select soundex(_utf8mb3 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB) from dual END INPUT select t1.a, (case t1.a when 0 then 0 else t1.b end) d from t1 join t2 on t1.a=t2.c where b=11120436154190595086 order by d; @@ -1016,7 +1016,7 @@ INPUT select date_sub("90-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('90-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('90-01-01 00:00:01', interval 2 second) from dual END INPUT select grp,group_concat(distinct c order by c) from t1 group by grp; @@ -1135,8 +1135,8 @@ END INPUT select c, substring_index(lcase(c), @q:=',', -1) as res from t1; END -ERROR -syntax error at position 40 near ':' +OUTPUT +select c, substring_index(lcase(c), @q := ',', -1) as res from t1 END INPUT select concat(a, if(b>10, _utf8mb4'æ', _utf8mb4'ß')) from t1; @@ -1178,7 +1178,7 @@ INPUT select ST_astext(st_difference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_difference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_difference(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select a as like_l from t1 where a like 'l%'; @@ -1201,8 +1201,8 @@ END INPUT select @keyword3_id:= 10203; END -ERROR -syntax error at position 21 near ':' +OUTPUT +select @keyword3_id := 10203 from dual END INPUT select * from t3 where x = 1 and y < 5 order by y desc; @@ -1238,13 +1238,13 @@ INPUT select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_crosses(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select ST_astext(st_intersection(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_intersection(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_intersection(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select distinct t1.a from t1,t2 order by t2.a; @@ -1322,7 +1322,7 @@ INPUT select DATE_ADD(20071108, INTERVAL 1 DAY); END OUTPUT -select DATE_ADD(20071108, interval 1 DAY) from dual +select date_add(20071108, interval 1 day) from dual END INPUT select distinct concat(c1, repeat('xx', 250)) as cc from t2 order by 1; @@ -1352,7 +1352,7 @@ INPUT select (ST_aswkb(cast(st_union(multipoint( point(8,6), point(1,-17679), point(-9,-9)), linestring(point(91,12), point(-77,49), point(53,-81)))as char(18)))) in ('1','2'); END OUTPUT -select ST_aswkb(cast(st_union(multipoint(point(8, 6), point(1, -17679), point(-9, -9)), linestring(point(91, 12), point(-77, 49), point(53, -81))) as char(18))) in ('1', '2') from dual +select st_asbinary(cast(st_union(multipoint(point(8, 6), point(1, -17679), point(-9, -9)), linestring(point(91, 12), point(-77, 49), point(53, -81))) as char(18))) in ('1', '2') from dual END INPUT select 12%0 as 'NULL'; @@ -1448,7 +1448,7 @@ INPUT select ST_astext(ST_MPointFromWKB(ST_AsWKB(MultiPoint(Point('0', '-0'),Point('-0', '0'), Point('0', '0'))))) as result; END OUTPUT -select ST_astext(ST_MPointFromWKB(ST_AsWKB(MultiPoint(point('0', '-0'), point('-0', '0'), point('0', '0'))))) as result from dual +select st_astext(st_multipointfromwkb(st_asbinary(multipoint(point('0', '-0'), point('-0', '0'), point('0', '0'))))) as result from dual END INPUT select table_name, index_type from information_schema.statistics where table_schema = 'test' and table_name = 'tm' order by table_name; @@ -1490,7 +1490,7 @@ INPUT select ST_asbinary(g) from t1; END OUTPUT -select ST_asbinary(g) from t1 +select st_asbinary(g) from t1 END INPUT select * from t1 join t2 using(`t1_id`) where match (t1.name, t2.name) against('xxfoo' in boolean mode); @@ -1526,13 +1526,13 @@ INPUT select timestampdiff(YEAR, '2002-05-01', '2001-01-01') as a; END OUTPUT -select timestampdiff(YEAR, '2002-05-01', '2001-01-01') as a from dual +select timestampdiff(year, '2002-05-01', '2001-01-01') as a from dual END INPUT select ST_AsText(Polygon(LineString(Point(0, 0), Point(1, 0), Point(1,1), Point(0, 1), Point(0, 0)))); END OUTPUT -select ST_AsText(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 1), point(0, 0)))) from dual +select st_astext(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 1), point(0, 0)))) from dual END INPUT select get_lock('ee_16407_5', 60); @@ -1622,7 +1622,7 @@ INPUT select st_touches(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')); END OUTPUT -select st_touches(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')) from dual +select st_touches(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')) from dual END INPUT select 'c' like '_' as want0; @@ -1676,7 +1676,7 @@ INPUT select hex(soundex(_utf8 0xD091D092D093)); END OUTPUT -select hex(soundex(_utf8 0xD091D092D093)) from dual +select hex(soundex(_utf8mb3 0xD091D092D093)) from dual END INPUT select * from t1 where btn like "ff%"; @@ -1778,7 +1778,7 @@ INPUT select mbrcontains(ST_GeomFromText("polygon((2 2, 10 2, 10 10, 2 10, 2 2))"), ST_GeomFromText("point(2 4)")); END OUTPUT -select mbrcontains(ST_GeomFromText('polygon((2 2, 10 2, 10 10, 2 10, 2 2))'), ST_GeomFromText('point(2 4)')) from dual +select mbrcontains(st_geometryfromtext('polygon((2 2, 10 2, 10 10, 2 10, 2 2))'), st_geometryfromtext('point(2 4)')) from dual END INPUT select t1.*,t2.* from { oj t2 left outer join t1 on (t1.a=t2.a) }; @@ -2000,7 +2000,7 @@ INPUT select date_add(date,INTERVAL "1:1" HOUR_MINUTE) from t1; END OUTPUT -select date_add(`date`, interval '1:1' HOUR_MINUTE) from t1 +select date_add(`date`, interval '1:1' hour_minute) from t1 END INPUT select timediff(cast('1 12:00:00' as time), '12:00:00'); @@ -2011,8 +2011,8 @@ END INPUT select hex(a), hex(@a:=convert(a using utf8mb4)), hex(convert(@a using utf16)) from t1; END -ERROR -syntax error at position 23 near ':' +OUTPUT +select hex(a), hex(@a := convert(a using utf8mb4)), hex(convert(@a using utf16)) from t1 END INPUT select event_name from information_schema.events where event_name = 'e1' and sql_mode = @full_mode; @@ -2036,7 +2036,7 @@ INPUT select mbrwithin(ST_GeomFromText("point(2 4)"), ST_GeomFromText("polygon((2 2, 10 2, 10 10, 2 10, 2 2))")); END OUTPUT -select mbrwithin(ST_GeomFromText('point(2 4)'), ST_GeomFromText('polygon((2 2, 10 2, 10 10, 2 10, 2 2))')) from dual +select mbrwithin(st_geometryfromtext('point(2 4)'), st_geometryfromtext('polygon((2 2, 10 2, 10 10, 2 10, 2 2))')) from dual END INPUT select locate('HE','hello' collate utf8_bin); @@ -2060,7 +2060,7 @@ INPUT select ST_Disjoint(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 15, 15 15, 15 10, 10 10))')); END OUTPUT -select ST_Disjoint(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 15, 15 15, 15 10, 10 10))')) from dual +select ST_Disjoint(st_geometryfromtext('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), st_geometryfromtext('POLYGON((10 10, 10 15, 15 15, 15 10, 10 10))')) from dual END INPUT select insert('hello', -4294967295, 1, 'hi'); @@ -2180,7 +2180,7 @@ INPUT select mbrwithin(ST_GeomFromText("linestring(1 0, 2 0)"), ST_GeomFromText("linestring(0 0, 3 0)")); END OUTPUT -select mbrwithin(ST_GeomFromText('linestring(1 0, 2 0)'), ST_GeomFromText('linestring(0 0, 3 0)')) from dual +select mbrwithin(st_geometryfromtext('linestring(1 0, 2 0)'), st_geometryfromtext('linestring(0 0, 3 0)')) from dual END INPUT select locate(_utf8mb4 0xD0B1, _utf8mb4 0xD0B0D091D0B2 collate utf8mb4_bin); @@ -2198,7 +2198,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 DAY); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 DAY) from dual +select date_sub('1998-01-01 00:00:00', interval 1 day) from dual END INPUT select "he"; @@ -2258,7 +2258,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 YEAR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 YEAR) from dual +select date_add('1997-12-31 23:59:59', interval 1 year) from dual END INPUT select monthname("1972-03-04"),monthname("1972-03-04")+0; @@ -2270,7 +2270,7 @@ INPUT select object_id, ST_geometrytype(geo), ST_ISSIMPLE(GEO), ST_ASTEXT(ST_centroid(geo)) from t1 where object_id=85984; END OUTPUT -select object_id, ST_geometrytype(geo), ST_ISSIMPLE(GEO), ST_ASTEXT(ST_centroid(geo)) from t1 where object_id = 85984 +select object_id, st_geometrytype(geo), st_issimple(GEO), st_astext(st_centroid(geo)) from t1 where object_id = 85984 END INPUT select @@global.optimizer_switch; @@ -2288,7 +2288,7 @@ INPUT select date_sub("1998-01-01 00:00:00.000001",INTERVAL "1:1.000002" MINUTE_MICROSECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00.000001', interval '1:1.000002' MINUTE_MICROSECOND) from dual +select date_sub('1998-01-01 00:00:00.000001', interval '1:1.000002' minute_microsecond) from dual END INPUT select locate('lo','hello',2); @@ -2300,7 +2300,7 @@ INPUT select date_add("1997-12-31 23:59:59.000002",INTERVAL "10000.999999" SECOND_MICROSECOND); END OUTPUT -select date_add('1997-12-31 23:59:59.000002', interval '10000.999999' SECOND_MICROSECOND) from dual +select date_add('1997-12-31 23:59:59.000002', interval '10000.999999' second_microsecond) from dual END INPUT select get_lock("mysqltest_lock", 100); @@ -2360,7 +2360,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1 1" YEAR_MONTH); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1 1' YEAR_MONTH) from dual +select date_sub('1998-01-01 00:00:00', interval '1 1' year_month) from dual END INPUT select min(b) from t1; @@ -2378,7 +2378,7 @@ INPUT select ST_astext(geom), ST_area(geom),ST_area(ST_buffer(geom,2)) from t1; END OUTPUT -select ST_astext(geom), ST_area(geom), ST_area(ST_buffer(geom, 2)) from t1 +select st_astext(geom), st_area(geom), st_area(ST_buffer(geom, 2)) from t1 END INPUT select * from t1,t2 right join t3 on (t2.i=t3.i) order by t1.i,t2.i,t3.i; @@ -2402,7 +2402,7 @@ INPUT select ST_astext(st_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result; END OUTPUT -select ST_astext(st_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result from dual +select st_astext(st_symdifference(st_geometryfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), st_geometryfromtext('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result from dual END INPUT select TRIGGER_NAME from information_schema.triggers where trigger_schema='test'; @@ -2450,7 +2450,7 @@ INPUT select length(uuid()), charset(uuid()), length(unhex(replace(uuid(),_utf8'-',_utf8''))); END OUTPUT -select length(uuid()), charset(uuid()), length(unhex(replace(uuid(), _utf8 '-', _utf8 ''))) from dual +select length(uuid()), charset(uuid()), length(unhex(replace(uuid(), _utf8mb3 '-', _utf8mb3 ''))) from dual END INPUT select substring('hello', 4294967296, 4294967296); @@ -2570,7 +2570,7 @@ INPUT select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(0 0, 4 4)'))); END OUTPUT -select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(0 0, 4 4)'))) from dual +select st_astext(st_difference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_geometryfromtext('multipoint(0 0, 4 4)'))) from dual END INPUT select a1,a2,b, max(c) from t1 where (c > 'b1') or (c <= 'g1') group by a1,a2,b; @@ -2588,7 +2588,7 @@ INPUT select _utf8 0xD0B0D0B1D0B2 like concat(_utf8'%',_utf8 0xD0B1,_utf8 '%'); END OUTPUT -select _utf8 0xD0B0D0B1D0B2 like concat(_utf8 '%', _utf8 0xD0B1, _utf8 '%') from dual +select _utf8mb3 0xD0B0D0B1D0B2 like concat(_utf8mb3 '%', _utf8mb3 0xD0B1, _utf8mb3 '%') from dual END INPUT select * from t1 where MATCH(a,b) AGAINST ("indexes"); @@ -2612,7 +2612,7 @@ INPUT select timestampdiff(SQL_TSI_HOUR, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_HOUR, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(hour, '2001-02-01', '2001-05-01') as a from dual END INPUT select max(t2.a1) from t2 left outer join t1 on t2.a2=10 where t2.a2=10; @@ -2624,7 +2624,7 @@ INPUT select timestampadd(SQL_TSI_SECOND, 1, date) from t1; END OUTPUT -select timestampadd(SQL_TSI_SECOND, 1, `date`) from t1 +select timestampadd(second, 1, `date`) from t1 END INPUT select * from (select 1 as a) b left join (select 2 as a) c using(a); @@ -2666,7 +2666,7 @@ INPUT select concat(a, if(b>10, _utf8'æ', _utf8'ß')) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 'æ', _utf8 'ß')) from t1 +select concat(a, if(b > 10, _utf8mb3 'æ', _utf8mb3 'ß')) from t1 END INPUT select hex(group_concat(a separator ',')) from t1; @@ -2702,7 +2702,7 @@ INPUT select hex(_utf8 X'616263FF'); END OUTPUT -select hex(_utf8 X'616263FF') from dual +select hex(_utf8mb3 X'616263FF') from dual END INPUT select t2.count, t1.name from t2 inner join t1 using (color); @@ -2714,7 +2714,7 @@ INPUT select st_touches(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_touches(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_touches(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select a, t1.* as 'with_alias' from t1; @@ -2725,8 +2725,8 @@ END INPUT select @x:=group_concat(x) from t1 group by y; END -ERROR -syntax error at position 11 near ':' +OUTPUT +select @x := group_concat(x) from t1 group by y END INPUT select cast('-10a' as signed integer); @@ -2810,7 +2810,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 SECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 SECOND) from dual +select date_sub('1998-01-01 00:00:00', interval 1 second) from dual END INPUT select * from t6 order by a,b; @@ -2840,7 +2840,7 @@ INPUT select ST_astext(ST_centroid(ST_PolyFromWKB(ST_AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(1, 0), Point(0, 0))))))); END OUTPUT -select ST_astext(ST_centroid(ST_PolyFromWKB(ST_AsWKB(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual +select st_astext(st_centroid(st_polygonfromwkb(st_asbinary(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual END INPUT select null sounds like 'null'; @@ -2852,7 +2852,7 @@ INPUT select ST_Contains(ST_GeomFromText('POLYGON((0 0,5 0,5 5,0 5,0 0))'),ST_GeomFromText('LINESTRING(1 2,5 5)')) as result; END OUTPUT -select ST_Contains(ST_GeomFromText('POLYGON((0 0,5 0,5 5,0 5,0 0))'), ST_GeomFromText('LINESTRING(1 2,5 5)')) as result from dual +select ST_Contains(st_geometryfromtext('POLYGON((0 0,5 0,5 5,0 5,0 0))'), st_geometryfromtext('LINESTRING(1 2,5 5)')) as result from dual END INPUT select * from t1 where a like '%PESA%'; @@ -2882,7 +2882,7 @@ INPUT select ST_astext(st_symdifference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_symdifference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_symdifference(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select @@session.transaction_isolation; @@ -2900,7 +2900,7 @@ INPUT select timestampdiff(QUARTER, '2002-05-01', '2001-01-01') as a; END OUTPUT -select timestampdiff(QUARTER, '2002-05-01', '2001-01-01') as a from dual +select timestampdiff(quarter, '2002-05-01', '2001-01-01') as a from dual END INPUT select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME= "vo"; @@ -2966,7 +2966,7 @@ INPUT select str_to_date('10:00 PM', '%h:%i %p') + INTERVAL 10 MINUTE; END OUTPUT -select str_to_date('10:00 PM', '%h:%i %p') + interval 10 MINUTE from dual +select str_to_date('10:00 PM', '%h:%i %p') + interval 10 minute from dual END INPUT select 497, TMP.ID, NULL from (select 497 as ID, MAX(t3.DATA) as DATA from t1 join t2 on (t1.ObjectID = t2.ID) join t3 on (t1.ObjectID = t3.ID) group by t2.ParID order by DATA DESC) as TMP; @@ -3044,7 +3044,7 @@ INPUT select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_disjoint(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select _latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin; @@ -3098,7 +3098,7 @@ INPUT select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2); END OUTPUT -select locate(_utf8 0xD091, _utf8 0xD0B0D0B1D0B2) from dual +select locate(_utf8mb3 0xD091, _utf8mb3 0xD0B0D0B1D0B2) from dual END INPUT select group_concat(distinct a, c order by a desc, c desc) from t1; @@ -3122,7 +3122,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 SECOND) from dual +select date_add('1997-12-31 23:59:59', interval 1 second) from dual END INPUT select t1.id, count(t2.id) from t1,t2 where t2.id = t1.id group by t1.id; @@ -3248,7 +3248,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL -100000 MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval (-100000) MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval -100000 minute) from dual END INPUT select insert('hello', 18446744073709551616, 1, 'hi'); @@ -3308,7 +3308,7 @@ INPUT select i from t1 where a=repeat(_utf8 0xD0B1,200); END OUTPUT -select i from t1 where a = repeat(_utf8 0xD0B1, 200) +select i from t1 where a = repeat(_utf8mb3 0xD0B1, 200) END INPUT select @@read_rnd_buffer_size; @@ -3392,7 +3392,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1:1" HOUR_MINUTE); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1:1' HOUR_MINUTE) from dual +select date_sub('1998-01-01 00:00:00', interval '1:1' hour_minute) from dual END INPUT select (select f from (select max(t1.a) as f) as dt) as g from t1; @@ -3422,7 +3422,7 @@ INPUT select t1.*,t2.* from t1 left join t2 on (t1.b=t2.b) where charset(t2.a) = _utf8'binary' order by t1.a,t2.a; END OUTPUT -select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where charset(t2.a) = _utf8 'binary' order by t1.a asc, t2.a asc +select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where charset(t2.a) = _utf8mb3 'binary' order by t1.a asc, t2.a asc END INPUT select 1 from (select 1) as a; @@ -3608,13 +3608,13 @@ INPUT select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 1)')); END OUTPUT -select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 1)')) from dual +select st_touches(st_geometryfromtext('polygon((0 0, 2 2, 0 4, 0 0))'), st_geometryfromtext('point(1 1)')) from dual END INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval 1 minute) from dual END INPUT select hex(_utf32 X'103344'); @@ -3662,7 +3662,7 @@ INPUT select ST_astext(st_intersection(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_intersection(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_intersection(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_difference(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select 'a' union select concat('a', -0.0); @@ -3692,7 +3692,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1 1" YEAR_MONTH); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1 1' YEAR_MONTH) from dual +select date_add('1997-12-31 23:59:59', interval '1 1' year_month) from dual END INPUT select distinct pk from v1; @@ -3704,7 +3704,7 @@ INPUT select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 1 2, 2 1, 0 0))'), ST_GeomFromText('linestring(0 1, 1 0)')); END OUTPUT -select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 1 2, 2 1, 0 0))'), ST_GeomFromText('linestring(0 1, 1 0)')) from dual +select ST_DISTANCE(st_geometryfromtext('polygon((0 0, 1 2, 2 1, 0 0))'), st_geometryfromtext('linestring(0 1, 1 0)')) from dual END INPUT select mod(12.0, NULL) as 'NULL'; @@ -3728,7 +3728,7 @@ INPUT select date_add("1997-12-31 23:59:59.000002",INTERVAL "10000 99:99:99.999999" DAY_MICROSECOND); END OUTPUT -select date_add('1997-12-31 23:59:59.000002', interval '10000 99:99:99.999999' DAY_MICROSECOND) from dual +select date_add('1997-12-31 23:59:59.000002', interval '10000 99:99:99.999999' day_microsecond) from dual END INPUT select * from t5 where a < 3; @@ -3746,7 +3746,7 @@ INPUT select ST_area(ST_PolygonFromText('POLYGON((10 10,20 10,20 20,10 20, 10 10))')); END OUTPUT -select ST_area(ST_PolygonFromText('POLYGON((10 10,20 10,20 20,10 20, 10 10))')) from dual +select st_area(st_polygonfromtext('POLYGON((10 10,20 10,20 20,10 20, 10 10))')) from dual END INPUT select min(a3) from t1 where a2 = 2 and a3 >= 'SEA' and a3 = 'MIN'; @@ -3799,8 +3799,8 @@ END INPUT select t2.isbn,city,@bar:=t1.libname,count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1; END -ERROR -syntax error at position 26 near ':' +OUTPUT +select t2.isbn, city, @bar := t1.libname, count(distinct t1.libname) as a from t3 left join t1 on t3.libname = t1.libname left join t2 on t3.isbn = t2.isbn group by city having count(distinct t1.libname) > 1 END INPUT select format('f','')<=replace(1,1,mid(0xd9,2,1)); @@ -3950,7 +3950,7 @@ INPUT select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_symdifference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select 2; @@ -3986,7 +3986,7 @@ INPUT select ST_astext(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(1, 0), Point(0, 0)))); END OUTPUT -select ST_astext(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0)))) from dual +select st_astext(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0)))) from dual END INPUT select host,user from mysql.user where User='myuser'; @@ -4033,8 +4033,8 @@ END INPUT select @topic1_id:= 10101; END -ERROR -syntax error at position 19 near ':' +OUTPUT +select @topic1_id := 10101 from dual END INPUT select locate(_ujis 0xa2a1,_ujis 0xa1a2a1a3 collate ujis_bin); @@ -4118,7 +4118,7 @@ INPUT select date_add("1997-12-31 23:59:59.000002",INTERVAL "10000:99.999999" MINUTE_MICROSECOND); END OUTPUT -select date_add('1997-12-31 23:59:59.000002', interval '10000:99.999999' MINUTE_MICROSECOND) from dual +select date_add('1997-12-31 23:59:59.000002', interval '10000:99.999999' minute_microsecond) from dual END INPUT select insert(_ucs2 0x006100620063,1,2,_ucs2 0x006400650066); @@ -4160,13 +4160,13 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1:1" HOUR_MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1:1' HOUR_MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval '1:1' hour_minute) from dual END INPUT select ST_astext(ST_buffer(ST_geometryfromtext('point(1 1)'), 1)); END OUTPUT -select ST_astext(ST_buffer(ST_geometryfromtext('point(1 1)'), 1)) from dual +select st_astext(ST_buffer(st_geometryfromtext('point(1 1)'), 1)) from dual END INPUT select concat(a,if(b<10,_ucs2 0x00C0,_ucs2 0x0062)) from t1; @@ -4243,8 +4243,8 @@ END INPUT select @topic2_id:= 10102; END -ERROR -syntax error at position 19 near ':' +OUTPUT +select @topic2_id := 10102 from dual END INPUT select group_concat(c1 order by binary c1 separator '') from t1 group by c1 collate utf32_hungarian_ci; @@ -4262,7 +4262,7 @@ INPUT select date_add(date,INTERVAL "1:1" DAY_HOUR) from t1; END OUTPUT -select date_add(`date`, interval '1:1' DAY_HOUR) from t1 +select date_add(`date`, interval '1:1' day_hour) from t1 END INPUT select * from v1a join v1b on t1.b = t2.b; @@ -4274,7 +4274,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1 1:1:1" DAY_SECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1 1:1:1' DAY_SECOND) from dual +select date_sub('1998-01-01 00:00:00', interval '1 1:1:1' day_second) from dual END INPUT select cast(rtrim(ltrim(' 20.06 ')) as decimal(19,2)); @@ -4316,7 +4316,7 @@ INPUT select ST_astext(st_intersection(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_intersection(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_intersection(st_geometryfromtext('multipoint(2 2, 3 3)'), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 1 0, 1 1, 0 1, 0 0))'), st_geomfromtext('polygon((2 0, 3 0, 3 1, 2 1, 2 0))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 2 0, 2 1, 0 1, 0 0))'), st_geomfromtext('polygon((1 0, 3 0, 3 1, 1 1, 1 0))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 1 0, 1 1, 0 1, 0 0))'), st_geomfromtext('polygon((1 0, 2 0, 2 1, 1 1, 1 0))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 2 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 2 5, 3 3, 4 3, 4 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 2 5, 3 2, 6 2, 6 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 4 3, 4 2, 6 2, 6 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 4 3, 4 0, 6 0, 6 3, 5 3, 5 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 2 5, 3 3, 4 3, 4 2, 6 2, 6 5, 0 5))'))) select st_astext(st_intersection( st_geomfromtext('polygon((0 0, 10 0, 10 3, 0 3, 0 0))'), st_geomfromtext('polygon((0 5, 1 3, 2 5, 3 3, 4 3, 4 0, 10 0, 10 3, 6 3, 6 5, 0 5))'))) SELECT ST_AsText(ST_GeomFromText("POINT(10 11) POINT(11 12)")) as result; @@ -4388,7 +4388,7 @@ INPUT select date_add(datetime, INTERVAL 1 SECOND) from t1; END OUTPUT -select date_add(`datetime`, interval 1 SECOND) from t1 +select date_add(`datetime`, interval 1 second) from t1 END INPUT select max(a) from t1; @@ -4544,13 +4544,13 @@ INPUT select timestampadd(MINUTE, 1, date) from t1; END OUTPUT -select timestampadd(MINUTE, 1, `date`) from t1 +select timestampadd(minute, 1, `date`) from t1 END INPUT select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200)'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200)'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('LINESTRING(-10 -10, 200 200)'))) from dual END INPUT select TABLE_SCHEMA,TABLE_NAME FROM information_schema.TABLES where TABLE_SCHEMA ='mysqltest_LC2'; @@ -4700,7 +4700,7 @@ INPUT select ST_AsText(a) from t2; END OUTPUT -select ST_AsText(a) from t2 +select st_astext(a) from t2 END INPUT select count(distinct f) from t1; @@ -4808,7 +4808,7 @@ INPUT select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))')) from dual +select 1, ST_Intersects(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('POLYGON((50 5, 55 10, 0 45, 50 5))')) from dual END INPUT select substring_index('aaaaaaaaa1','aaa',2); @@ -4910,7 +4910,7 @@ INPUT select date_add(date,INTERVAL 1 DAY) from t1; END OUTPUT -select date_add(`date`, interval 1 DAY) from t1 +select date_add(`date`, interval 1 day) from t1 END INPUT select * from t1 where word between binary 0xDF and binary 0xDF; @@ -4952,7 +4952,7 @@ INPUT select st_distance(ST_GeomFromText('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'),ST_GeomFromText('point(100 100)')); END OUTPUT -select st_distance(ST_GeomFromText('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'), ST_GeomFromText('point(100 100)')) from dual +select st_distance(st_geometryfromtext('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'), st_geometryfromtext('point(100 100)')) from dual END INPUT select group_concat(c1 order by c1) from t1 group by c1 collate utf8_polish_ci; @@ -5042,7 +5042,7 @@ INPUT select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_crosses(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select * from v1c; @@ -5462,7 +5462,7 @@ INPUT select timestampdiff(MONTH, '2000-03-28', '2000-02-29') as a; END OUTPUT -select timestampdiff(MONTH, '2000-03-28', '2000-02-29') as a from dual +select timestampdiff(month, '2000-03-28', '2000-02-29') as a from dual END INPUT select concat(_latin1'a',_latin2'b',_latin5'c' collate latin5_turkish_ci); @@ -5498,7 +5498,7 @@ INPUT select st_intersects(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_intersects(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_intersects(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select * from t1 where id=000000000001; @@ -5612,7 +5612,7 @@ INPUT select st_crosses(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_crosses(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_crosses(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select SUBSTRING_INDEX(_latin1'abcdabcdabcd',_latin2'd',2); @@ -5726,7 +5726,7 @@ INPUT select 1, ST_Within(ST_GeomFromText('POLYGON((1 1,20 10,10 30, 1 1))'), ST_GeomFromText('POLYGON((0 0,30 5,10 40, 0 0))')); END OUTPUT -select 1, ST_Within(ST_GeomFromText('POLYGON((1 1,20 10,10 30, 1 1))'), ST_GeomFromText('POLYGON((0 0,30 5,10 40, 0 0))')) from dual +select 1, ST_Within(st_geometryfromtext('POLYGON((1 1,20 10,10 30, 1 1))'), st_geometryfromtext('POLYGON((0 0,30 5,10 40, 0 0))')) from dual END INPUT select hex(substr(_utf16 0x00e400e50068,-2)); @@ -5738,7 +5738,7 @@ INPUT select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 2)')); END OUTPUT -select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 2)')) from dual +select st_touches(st_geometryfromtext('polygon((0 0, 2 2, 0 4, 0 0))'), st_geometryfromtext('point(1 2)')) from dual END INPUT select a1,a2,b, max(c) from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') group by a1,a2,b; @@ -5786,7 +5786,7 @@ INPUT select date_sub("0169-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('0169-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('0169-01-01 00:00:01', interval 2 second) from dual END INPUT select timediff("1997-12-31 23:59:59.000001","1997-12-30 01:01:01.000002"); @@ -5834,7 +5834,7 @@ INPUT select ST_astext(st_union(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_union(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_union(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select column_type from information_schema.columns where table_schema="information_schema" and table_name="COLUMNS" and (column_name="character_set_name" or column_name="collation_name"); @@ -5846,7 +5846,7 @@ INPUT select concat(a, if(b>10, _utf8'x', _utf8'y')) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 'x', _utf8 'y')) from t1 +select concat(a, if(b > 10, _utf8mb3 'x', _utf8mb3 'y')) from t1 END INPUT select /lib32/ /libx32/ user, host, db, info from information_schema.processlist where state = 'User lock' and info = 'select get_lock('ee_16407_2', 60)'; @@ -5954,7 +5954,7 @@ INPUT select INTERVAL 1 DAY + "1997-12-31"; END OUTPUT -select interval 1 DAY + '1997-12-31' from dual +select interval 1 day + '1997-12-31' from dual END INPUT select * from t1 where t1 like "a_%"; @@ -6104,7 +6104,7 @@ INPUT select ST_astext(g) from t1 where ST_Contains(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g); END OUTPUT -select ST_astext(g) from t1 where ST_Contains(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g) +select st_astext(g) from t1 where ST_Contains(st_geometryfromtext('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g) END INPUT select timestamp("2001-12-01", "25:01:01"); @@ -6164,7 +6164,7 @@ INPUT select ST_astext(ST_Intersection(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))); END OUTPUT -select ST_astext(ST_Intersection(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual END INPUT select (select d from t2 where d > a) as 'x', t1.* from t1; @@ -6296,19 +6296,19 @@ INPUT select date_add("1997-12-31",INTERVAL 1 DAY); END OUTPUT -select date_add('1997-12-31', interval 1 DAY) from dual +select date_add('1997-12-31', interval 1 day) from dual END INPUT select mbrwithin(ST_GeomFromText("linestring(1 0, 2 0)"), ST_GeomFromText("polygon((0 0, 3 0, 3 3, 0 3, 0 0))")); END OUTPUT -select mbrwithin(ST_GeomFromText('linestring(1 0, 2 0)'), ST_GeomFromText('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual +select mbrwithin(st_geometryfromtext('linestring(1 0, 2 0)'), st_geometryfromtext('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual END INPUT select ST_astext(ST_convexhull(ST_PolyFromWKB(ST_AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(1, 0), Point(0, 0))))))); END OUTPUT -select ST_astext(ST_convexhull(ST_PolyFromWKB(ST_AsWKB(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual +select st_astext(ST_convexhull(st_polygonfromwkb(st_asbinary(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual END INPUT select * from t1 where i between 2 and 4 and v in ('def','3r4f','abc'); @@ -6338,7 +6338,7 @@ INPUT select ST_Length(ST_MLineFromWKB(0x0000000005000000020000000002000000035FB317E5EF3AB327E3A4B378469B67320000000000000000C0240000000000003FF05FD8ADAB9F560000000000000000000000000200000003000000000000000000000000000000000000000000000000BFF08B439581062540240000000000004341C37937E08000)) as length; END OUTPUT -select ST_Length(ST_MLineFromWKB(0x0000000005000000020000000002000000035FB317E5EF3AB327E3A4B378469B67320000000000000000C0240000000000003FF05FD8ADAB9F560000000000000000000000000200000003000000000000000000000000000000000000000000000000BFF08B439581062540240000000000004341C37937E08000)) as length from dual +select st_length(st_multilinestringfromwkb(0x0000000005000000020000000002000000035FB317E5EF3AB327E3A4B378469B67320000000000000000C0240000000000003FF05FD8ADAB9F560000000000000000000000000200000003000000000000000000000000000000000000000000000000BFF08B439581062540240000000000004341C37937E08000)) as length from dual END INPUT select host,db,user,select_priv,insert_priv from mysql.db where db="mysqltest1"; @@ -6368,7 +6368,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1:1" DAY_HOUR); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1:1' DAY_HOUR) from dual +select date_sub('1998-01-01 00:00:00', interval '1:1' day_hour) from dual END INPUT select bin(convert(-9223372036854775808 using ucs2)); @@ -6380,7 +6380,7 @@ INPUT select date_add(date,INTERVAL "1:1:1" HOUR_SECOND) from t1; END OUTPUT -select date_add(`date`, interval '1:1:1' HOUR_SECOND) from t1 +select date_add(`date`, interval '1:1:1' hour_second) from t1 END INPUT select * from t1 where a <> _latin1 'B' collate latin1_bin; @@ -6458,7 +6458,7 @@ INPUT select right(_utf8 0xD0B0D0B2D0B2,1); END OUTPUT -select right(_utf8 0xD0B0D0B2D0B2, 1) from dual +select right(_utf8mb3 0xD0B0D0B2D0B2, 1) from dual END INPUT select 5 div 2; @@ -6476,7 +6476,7 @@ INPUT select timestampdiff(MONTH, '1991-03-28', '2000-02-29') as a; END OUTPUT -select timestampdiff(MONTH, '1991-03-28', '2000-02-29') as a from dual +select timestampdiff(month, '1991-03-28', '2000-02-29') as a from dual END INPUT select date_format(f1, "%m") as d1, date_format(f1, "%M") as d2 from t1 order by date_format(f1, "%M"); @@ -6632,7 +6632,7 @@ INPUT select timestampdiff(MONTH, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(MONTH, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(month, '2001-02-01', '2001-05-01') as a from dual END INPUT select inet_aton("255.255.255.255.255"),inet_aton("255.255.1.255"),inet_aton("0.1.255"); @@ -6668,7 +6668,7 @@ INPUT select ST_NUMPOINTS(ST_EXTERIORRING(@buff)) from t1; END OUTPUT -select ST_NUMPOINTS(ST_EXTERIORRING(@buff)) from t1 +select st_numpoints(st_exteriorring(@buff)) from t1 END INPUT select fld1,fld3 FROM t2 where fld1 like "25050_"; @@ -6722,7 +6722,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 HOUR); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 HOUR) from dual +select date_sub('1998-01-01 00:00:00', interval 1 hour) from dual END INPUT select 0, st_overlaps(t.geom, p.geom) from tbl_polygon t, tbl_polygon p where t.id = 'POLY1' and p.id = 'POLY2'; @@ -6860,7 +6860,7 @@ INPUT select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_contains(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select table_name, index_type from information_schema.statistics where table_schema = 'test' and table_name like 't%' and index_name = 'l' order by table_name; @@ -6932,13 +6932,13 @@ INPUT select visitor_id,max(ts) as mts from t1 group by visitor_id having DATE_ADD(mts,INTERVAL 3 MONTH) < NOW(); END OUTPUT -select visitor_id, max(ts) as mts from t1 group by visitor_id having DATE_ADD(mts, interval 3 MONTH) < now() +select visitor_id, max(ts) as mts from t1 group by visitor_id having date_add(mts, interval 3 month) < now() END INPUT select @category3_id:= 10003; END -ERROR -syntax error at position 22 near ':' +OUTPUT +select @category3_id := 10003 from dual END INPUT select c as c_a from t1 where c='a'; @@ -7076,7 +7076,7 @@ INPUT select date_add("0199-12-31 23:59:59",INTERVAL 2 SECOND); END OUTPUT -select date_add('0199-12-31 23:59:59', interval 2 SECOND) from dual +select date_add('0199-12-31 23:59:59', interval 2 second) from dual END INPUT select a as foo, sum(b) as bar from t1 group by a having foo<10; @@ -7088,7 +7088,7 @@ INPUT select date_sub("0200-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('0200-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('0200-01-01 00:00:01', interval 2 second) from dual END INPUT select a,b,c from t3 force index (a) where a=1 order by a desc, b desc, c desc; @@ -7274,7 +7274,7 @@ INPUT select date_add(date,INTERVAL 1 MINUTE) from t1; END OUTPUT -select date_add(`date`, interval 1 MINUTE) from t1 +select date_add(`date`, interval 1 minute) from t1 END INPUT select a as foo, sum(b) as bar from t1 group by a having bar>10 order by foo+10; @@ -7316,7 +7316,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 MONTH); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 MONTH) from dual +select date_add('1997-12-31 23:59:59', interval 1 month) from dual END INPUT select table_name, index_type from information_schema.statistics where table_schema = 'test' and table_name like 't%' and index_name = 'w' order by table_name; @@ -7412,7 +7412,7 @@ INPUT select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual +select st_astext(st_union(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual END INPUT select COLUMN_NAME,COLUMN_TYPE, CHARACTER_MAXIMUM_LENGTH, CHARACTER_OCTET_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE from information_schema.columns where table_name= 't1'; @@ -7430,7 +7430,7 @@ INPUT select ST_AsText(f2),ST_AsText(f3) from t1; END OUTPUT -select ST_AsText(f2), ST_AsText(f3) from t1 +select st_astext(f2), st_astext(f3) from t1 END INPUT select collation(group_concat(a,b)) from t1; @@ -7496,7 +7496,7 @@ INPUT select mbrwithin(ST_GeomFromText("linestring(1 1, 2 1)"), ST_GeomFromText("polygon((0 0, 3 0, 3 3, 0 3, 0 0))")); END OUTPUT -select mbrwithin(ST_GeomFromText('linestring(1 1, 2 1)'), ST_GeomFromText('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual +select mbrwithin(st_geometryfromtext('linestring(1 1, 2 1)'), st_geometryfromtext('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual END INPUT select ST_GeomFromText('linestring(7 6, 15 4)') into @l; @@ -7520,7 +7520,7 @@ INPUT select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_equals(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select table_schema, table_name, column_name from information_schema.columns where table_schema not in ('performance_schema', 'sys', 'mysql') and data_type = 'longtext' order by table_name, column_name; @@ -7670,7 +7670,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 HOUR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 HOUR) from dual +select date_add('1997-12-31 23:59:59', interval 1 hour) from dual END INPUT select 0x903f645a8c507dd79178 like '%-128%'; @@ -7682,7 +7682,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1 1:1" DAY_MINUTE); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1 1:1' DAY_MINUTE) from dual +select date_sub('1998-01-01 00:00:00', interval '1 1:1' day_minute) from dual END INPUT select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b ORDER BY NULL; @@ -7706,7 +7706,7 @@ INPUT select st_astext(st_symdifference(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_symdifference(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual +select st_astext(st_symdifference(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual END INPUT select mysqltest1.f1(); @@ -7724,7 +7724,7 @@ INPUT select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_disjoint(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select substring('hello', 1, -18446744073709551617); @@ -7741,14 +7741,14 @@ END INPUT select t2.isbn,city,concat(@bar:=t1.libname),count(distinct t1.libname) as a from t3 left join t1 on t3.libname=t1.libname left join t2 on t3.isbn=t2.isbn group by city having count(distinct t1.libname) > 1; END -ERROR -syntax error at position 33 near ':' +OUTPUT +select t2.isbn, city, concat(@bar := t1.libname), count(distinct t1.libname) as a from t3 left join t1 on t3.libname = t1.libname left join t2 on t3.isbn = t2.isbn group by city having count(distinct t1.libname) > 1 END INPUT select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 3 6, 6 3, 0 0),(2 2, 3 4, 4 3, 2 2))'), ST_GeomFromText('point(3 3)')); END OUTPUT -select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 3 6, 6 3, 0 0),(2 2, 3 4, 4 3, 2 2))'), ST_GeomFromText('point(3 3)')) from dual +select ST_DISTANCE(st_geometryfromtext('polygon((0 0, 3 6, 6 3, 0 0),(2 2, 3 4, 4 3, 2 2))'), st_geometryfromtext('point(3 3)')) from dual END INPUT select hex(min(binary a)),count(*) from t1 group by a; @@ -7790,7 +7790,7 @@ INPUT select user() like _utf8"%@%"; END OUTPUT -select user() like _utf8 '%@%' from dual +select user() like _utf8mb3 '%@%' from dual END INPUT select st_distance(linestring(point(26,87),point(13,95)), geometrycollection(point(4.297374e+307,8.433875e+307), point(1e308, 1e308))) as dist; @@ -7849,8 +7849,8 @@ END INPUT select @category1_id:= 10001; END -ERROR -syntax error at position 22 near ':' +OUTPUT +select @category1_id := 10001 from dual END INPUT select hex(char(0x0102 using utf32)); @@ -7874,7 +7874,7 @@ INPUT select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual END INPUT select cast('18446744073709551616' as signed); @@ -7934,7 +7934,7 @@ INPUT select date_add(date,INTERVAL "1:1" MINUTE_SECOND) from t1; END OUTPUT -select date_add(`date`, interval '1:1' MINUTE_SECOND) from t1 +select date_add(`date`, interval '1:1' minute_second) from t1 END INPUT select extract(MINUTE_SECOND FROM "10:11:12"); @@ -8066,7 +8066,7 @@ INPUT select date_sub("1998-01-01 00:00:00.000001",INTERVAL "000002" MICROSECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00.000001', interval '000002' MICROSECOND) from dual +select date_sub('1998-01-01 00:00:00.000001', interval '000002' microsecond) from dual END INPUT select concat(a1,a2),b,min(c),max(c) from t1 where a1 < 'd' group by a1,a2,b; @@ -8168,7 +8168,7 @@ INPUT select ST_Astext(ST_Envelope(ST_MPointFromWKB(ST_AsWKB(MultiPoint(Point('0', '0'),Point('-0', '0'), Point('0', '-0')))))) as result; END OUTPUT -select ST_Astext(ST_Envelope(ST_MPointFromWKB(ST_AsWKB(MultiPoint(point('0', '0'), point('-0', '0'), point('0', '-0')))))) as result from dual +select st_astext(st_envelope(st_multipointfromwkb(st_asbinary(multipoint(point('0', '0'), point('-0', '0'), point('0', '-0')))))) as result from dual END INPUT select constraint_name from information_schema.table_constraints where table_schema='test' order by constraint_name; @@ -8281,8 +8281,8 @@ END INPUT select hex(@utf82:= CONVERT(@ujis2 USING utf8)); END -ERROR -syntax error at position 19 near ':' +OUTPUT +select hex(@utf82 := convert(@ujis2 using utf8)) from dual END INPUT select * from t5 order by a,b; @@ -8300,7 +8300,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 100000 HOUR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 100000 HOUR) from dual +select date_add('1997-12-31 23:59:59', interval 100000 hour) from dual END INPUT select std(s1/s2) from bug22555 where i=2; @@ -8348,7 +8348,7 @@ INPUT select date_sub("1998-01-01 00:00:00.000001",INTERVAL "1.000002" SECOND_MICROSECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00.000001', interval '1.000002' SECOND_MICROSECOND) from dual +select date_sub('1998-01-01 00:00:00.000001', interval '1.000002' second_microsecond) from dual END INPUT select if(0, 18446744073709551610, 18446744073709551610); @@ -8360,19 +8360,19 @@ INPUT select st_astext(st_difference(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_difference(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual +select st_astext(st_difference(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual END INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1:1" DAY_HOUR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1:1' DAY_HOUR) from dual +select date_add('1997-12-31 23:59:59', interval '1:1' day_hour) from dual END INPUT select ST_astext(ST_Union(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))); END OUTPUT -select ST_astext(ST_Union(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual +select st_astext(ST_Union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual END INPUT select hex(convert(_big5 0xC84041 using ucs2)); @@ -8462,7 +8462,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D091D0B2) from dual END INPUT select 18446744073709551615, 18446744073709551615 DIV 1, 18446744073709551615 DIV 2; @@ -8690,7 +8690,7 @@ INPUT select mbrcovers(ST_GeomFromText("polygon((2 2, 10 2, 10 10, 2 10, 2 2))"), ST_GeomFromText("point(2 4)")); END OUTPUT -select mbrcovers(ST_GeomFromText('polygon((2 2, 10 2, 10 10, 2 10, 2 2))'), ST_GeomFromText('point(2 4)')) from dual +select mbrcovers(st_geometryfromtext('polygon((2 2, 10 2, 10 10, 2 10, 2 2))'), st_geometryfromtext('point(2 4)')) from dual END INPUT select uncompress(b) from t1; @@ -8720,13 +8720,13 @@ INPUT select mbrwithin(ST_GeomFromText("point(2 4)"), ST_GeomFromText("linestring(2 0, 2 6)")); END OUTPUT -select mbrwithin(ST_GeomFromText('point(2 4)'), ST_GeomFromText('linestring(2 0, 2 6)')) from dual +select mbrwithin(st_geometryfromtext('point(2 4)'), st_geometryfromtext('linestring(2 0, 2 6)')) from dual END INPUT select st_astext(st_makeenvelope(st_geomfromtext('point(0 0)'), st_geomfromtext('point(-22 -11)'))); END OUTPUT -select st_astext(st_makeenvelope(st_geomfromtext('point(0 0)'), st_geomfromtext('point(-22 -11)'))) from dual +select st_astext(st_makeenvelope(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(-22 -11)'))) from dual END INPUT select _latin1'B' collate latin1_general_ci between _latin1'a' collate latin1_bin and _latin1'b'; @@ -8876,7 +8876,7 @@ INPUT select st_astext(st_symdifference(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_symdifference(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual +select st_astext(st_symdifference(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual END INPUT select distinct t1.a from t1,t3 where t1.a=t3.a; @@ -9044,7 +9044,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1 1:1" DAY_MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1 1:1' DAY_MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval '1 1:1' day_minute) from dual END INPUT select quote(''"test'); @@ -9074,7 +9074,7 @@ INPUT select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_intersects(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select t1.* as 'with_alias', (select a from t2 where d > a) as 'x' from t1; @@ -9182,7 +9182,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D0B1D0B2); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D0B1D0B2) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D0B1D0B2) from dual END INPUT select * from t1 where b like 'foob%'; @@ -9211,8 +9211,8 @@ END INPUT select @stamp1:=f2 from t1; END -ERROR -syntax error at position 16 near ':' +OUTPUT +select @stamp1 := f2 from t1 END INPUT select t1.*,t2.* from mysqltest_2.t1,mysqltest_2.t2; @@ -9230,7 +9230,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "-10000:1" HOUR_MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '-10000:1' HOUR_MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval '-10000:1' hour_minute) from dual END INPUT select mod(12.0, 0) as 'NULL'; @@ -9320,7 +9320,7 @@ INPUT select ST_AsText(a) from (select f2 as a from t1 union select f3 from t1) t; END OUTPUT -select ST_AsText(a) from (select f2 as a from t1 union select f3 from t1) as t +select st_astext(a) from (select f2 as a from t1 union select f3 from t1) as t END INPUT select host,db,user,table_name from mysql.tables_priv where user = 'mysqltest_1' order by host,db,user,table_name; @@ -9422,7 +9422,7 @@ INPUT select ST_AsText(ST_GeometryFromWKB(ST_AsWKB(GeometryCollection(POINT(0, 0), MULTIPOINT(point(0, 0), point(1, 1)), LINESTRING(point(0, 0),point(10, 10)), MULTILINESTRING(LINESTRING(point(1, 2), point(1, 3))), POLYGON(LineString(Point(10, 20), Point(1, 1), Point(2, 2), Point(1, 1), Point(10, 20))), MULTIPOLYGON(Polygon(LineString(Point(0, 0), Point(1, 0), Point(1, 1), Point(0, 0)))))))) as Result; END OUTPUT -select ST_AsText(ST_GeometryFromWKB(ST_AsWKB(GeometryCollection(point(0, 0), MULTIPOINT(point(0, 0), point(1, 1)), linestring(point(0, 0), point(10, 10)), MULTILINESTRING(linestring(point(1, 2), point(1, 3))), polygon(linestring(point(10, 20), point(1, 1), point(2, 2), point(1, 1), point(10, 20))), MULTIPOLYGON(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 0)))))))) as Result from dual +select st_astext(st_geometryfromwkb(st_asbinary(GeometryCollection(point(0, 0), multipoint(point(0, 0), point(1, 1)), linestring(point(0, 0), point(10, 10)), multilinestring(linestring(point(1, 2), point(1, 3))), polygon(linestring(point(10, 20), point(1, 1), point(2, 2), point(1, 1), point(10, 20))), multipolygon(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 0)))))))) as Result from dual END INPUT select s1*0 as s1 from t1 group by s1 having s1 <> 0; @@ -9440,7 +9440,7 @@ INPUT select st_distance_sphere(st_geomfromtext('point(-120 45)'), st_geomfromtext('point(30.24 68.37)')); END OUTPUT -select st_distance_sphere(st_geomfromtext('point(-120 45)'), st_geomfromtext('point(30.24 68.37)')) from dual +select st_distance_sphere(st_geometryfromtext('point(-120 45)'), st_geometryfromtext('point(30.24 68.37)')) from dual END INPUT select group_concat(distinct s1 order by s2) from t1; @@ -9494,7 +9494,7 @@ INPUT select st_difference((convert(st_polygonfromwkb(linestring(point(1,1))) using gb18030)), st_geomcollfromwkb(point(1,1))); END OUTPUT -select st_difference(convert(st_polygonfromwkb(linestring(point(1, 1))) using gb18030), st_geomcollfromwkb(point(1, 1))) from dual +select st_difference(convert(st_polygonfromwkb(linestring(point(1, 1))) using gb18030), st_geometrycollectionfromwkb(point(1, 1))) from dual END INPUT select convert(_koi8r'�' using utf8mb4) < convert(_koi8r'�' using utf8mb4); @@ -9512,7 +9512,7 @@ INPUT select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 3 6, 6 3, 0 0))'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')); END OUTPUT -select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 3 6, 6 3, 0 0))'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')) from dual +select ST_DISTANCE(st_geometryfromtext('polygon((0 0, 3 6, 6 3, 0 0))'), st_geometryfromtext('polygon((2 2, 3 4, 4 3, 2 2))')) from dual END INPUT select (with recursive dt as (select t1.a as a union select a+1 from dt where a<10) select concat(count(*), ' - ', avg(dt.a)) from dt ) as subq from t1; @@ -9554,13 +9554,13 @@ INPUT select mbrwithin(ST_GeomFromText("point(2 4)"), ST_GeomFromText("linestring(2 0, 2 4)")); END OUTPUT -select mbrwithin(ST_GeomFromText('point(2 4)'), ST_GeomFromText('linestring(2 0, 2 4)')) from dual +select mbrwithin(st_geometryfromtext('point(2 4)'), st_geometryfromtext('linestring(2 0, 2 4)')) from dual END INPUT select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(0 0, 4 4)'))); END OUTPUT -select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(0 0, 4 4)'))) from dual +select st_astext(st_symdifference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_geometryfromtext('multipoint(0 0, 4 4)'))) from dual END INPUT select count(b), sum(b), avg(b), std(b), min(b), max(b), bit_and(b), bit_or(b) from t1; @@ -9704,7 +9704,7 @@ INPUT select ST_astext(st_difference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_difference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_difference(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_difference(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select * from information_schema.user_privileges where grantee like "'mysqltest_8'%"; @@ -9944,7 +9944,7 @@ INPUT select 0, ST_Within(ST_GeomFromText('LINESTRING(15 15, 50 50, 60 60)'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')); END OUTPUT -select 0, ST_Within(ST_GeomFromText('LINESTRING(15 15, 50 50, 60 60)'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')) from dual +select 0, ST_Within(st_geometryfromtext('LINESTRING(15 15, 50 50, 60 60)'), st_geometryfromtext('POLYGON((10 10,30 20,20 40, 10 10))')) from dual END INPUT select a from t1 as t1 left join t1 as t2 using (a) left join t1 as t3 using (a) left join t1 as t4 using (a) left join t1 as t5 using (a) left join t1 as t6 using (a) left join t1 as t7 using (a) left join t1 as t8 using (a) left join t1 as t9 using (a) left join t1 as t10 using (a) left join t1 as t11 using (a) left join t1 as t12 using (a) left join t1 as t13 using (a) left join t1 as t14 using (a) left join t1 as t15 using (a) left join t1 as t16 using (a) left join t1 as t17 using (a) left join t1 as t18 using (a) left join t1 as t19 using (a) left join t1 as t20 using (a) left join t1 as t21 using (a) left join t1 as t22 using (a) left join t1 as t23 using (a) left join t1 as t24 using (a) left join t1 as t25 using (a) left join t1 as t26 using (a) left join t1 as t27 using (a) left join t1 as t28 using (a) left join t1 as t29 using (a) left join t1 as t30 using (a) left join t1 as t31 using (a) left join t1 as t32 using (a) left join t1 as t33 using (a) left join t1 as t34 using (a) left join t1 as t35 using (a) left join t1 as t36 using (a) left join t1 as t37 using (a) left join t1 as t38 using (a) left join t1 as t39 using (a) left join t1 as t40 using (a) left join t1 as t41 using (a) left join t1 as t42 using (a) left join t1 as t43 using (a) left join t1 as t44 using (a) left join t1 as t45 using (a) left join t1 as t46 using (a) left join t1 as t47 using (a) left join t1 as t48 using (a) left join t1 as t49 using (a) left join t1 as t50 using (a) left join t1 as t51 using (a) left join t1 as t52 using (a) left join t1 as t53 using (a) left join t1 as t54 using (a) left join t1 as t55 using (a) left join t1 as t56 using (a) left join t1 as t57 using (a) left join t1 as t58 using (a) left join t1 as t59 using (a) left join t1 as t60 using (a) left join t1 as t61 using (a) left join t1 as t62 using (a) left join t1 as t63 using (a) left join t1 as t64 using (a) left join t1 as t65 using (a); @@ -9962,7 +9962,7 @@ INPUT select DATE_ADD('20071108181000', INTERVAL 1 DAY); END OUTPUT -select DATE_ADD('20071108181000', interval 1 DAY) from dual +select date_add('20071108181000', interval 1 day) from dual END INPUT select sha('abc'); @@ -9974,7 +9974,7 @@ INPUT select st_overlaps(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))')); END OUTPUT -select st_overlaps(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))')) from dual +select st_overlaps(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))')) from dual END INPUT select 1, min(1) from t1i where 1=99; @@ -10100,7 +10100,7 @@ INPUT select 1, ST_Intersects(ST_GeomFromText('LINESTRING(15 15, 50 50)'), ST_GeomFromText('LINESTRING(50 15, 15 50)')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('LINESTRING(15 15, 50 50)'), ST_GeomFromText('LINESTRING(50 15, 15 50)')) from dual +select 1, ST_Intersects(st_geometryfromtext('LINESTRING(15 15, 50 50)'), st_geometryfromtext('LINESTRING(50 15, 15 50)')) from dual END INPUT select UNIQUE_CONSTRAINT_NAME from information_schema.referential_constraints where constraint_schema = schema(); @@ -10196,7 +10196,7 @@ INPUT select ST_Touches(ST_GeomFromText('LINESTRING(15 5,15 25)'),ST_GeomFromText('LINESTRING(15 5,15 25)')) as result; END OUTPUT -select ST_Touches(ST_GeomFromText('LINESTRING(15 5,15 25)'), ST_GeomFromText('LINESTRING(15 5,15 25)')) as result from dual +select ST_Touches(st_geometryfromtext('LINESTRING(15 5,15 25)'), st_geometryfromtext('LINESTRING(15 5,15 25)')) as result from dual END INPUT select distinct a from t1 order by rand(10); @@ -10244,7 +10244,7 @@ INPUT select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_touches(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select column_name as 'Field',column_type as 'Type',is_nullable as 'Null',column_key as 'Key',column_default as 'Default',extra as 'Extra' from information_schema.columns where table_schema='mysqltest_db1' and table_name='t_column_priv_only'; @@ -10340,7 +10340,7 @@ INPUT select ST_astext(ST_geomfromwkb(ST_AsWKB(st_intersection(linestring(point(-59,82),point(32,29)), point(2,-5))))) as result; END OUTPUT -select ST_astext(ST_geomfromwkb(ST_AsWKB(st_intersection(linestring(point(-59, 82), point(32, 29)), point(2, -5))))) as result from dual +select st_astext(st_geometryfromwkb(st_asbinary(st_intersection(linestring(point(-59, 82), point(32, 29)), point(2, -5))))) as result from dual END INPUT select event_name from performance_schema.events_stages_history_long where thread_id = @con1_thread_id and event_name like '%Opening %tables' or event_name like '%Locking system tables' or event_name like '%System lock'; @@ -10430,7 +10430,7 @@ INPUT select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POINT(20 20)'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POINT(20 20)'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('POINT(20 20)'))) from dual END INPUT select group_concat(bar order by concat(bar,bar)) from t1; @@ -10544,7 +10544,7 @@ INPUT select mbrtouches(ST_GeomFromText("point(2 4)"), ST_GeomFromText("linestring(2 0, 2 6)")); END OUTPUT -select mbrtouches(ST_GeomFromText('point(2 4)'), ST_GeomFromText('linestring(2 0, 2 6)')) from dual +select mbrtouches(st_geometryfromtext('point(2 4)'), st_geometryfromtext('linestring(2 0, 2 6)')) from dual END INPUT select substring_index('aaaaaaaaa1','aa',-1); @@ -10814,7 +10814,7 @@ INPUT select 1, @empty_geom = st_geomfromtext('geometrycollection()') as equal; END OUTPUT -select 1, @empty_geom = st_geomfromtext('geometrycollection()') as equal from dual +select 1, @empty_geom = st_geometryfromtext('geometrycollection()') as equal from dual END INPUT select hex(convert(char(2557 using latin1) using utf8)); @@ -10862,7 +10862,7 @@ INPUT select date_add(time,INTERVAL 1 SECOND) from t1; END OUTPUT -select date_add(`time`, interval 1 SECOND) from t1 +select date_add(`time`, interval 1 second) from t1 END INPUT select * from v3; @@ -11102,7 +11102,7 @@ INPUT select mbrwithin(ST_GeomFromText("linestring(0 1, 3 1)"), ST_GeomFromText("polygon((0 0, 3 0, 3 3, 0 3, 0 0))")); END OUTPUT -select mbrwithin(ST_GeomFromText('linestring(0 1, 3 1)'), ST_GeomFromText('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual +select mbrwithin(st_geometryfromtext('linestring(0 1, 3 1)'), st_geometryfromtext('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual END INPUT select a, count(a) from t1 group by a with rollup; @@ -11186,7 +11186,7 @@ INPUT select (_utf8 X'616263FF'); END OUTPUT -select _utf8 X'616263FF' from dual +select _utf8mb3 X'616263FF' from dual END INPUT select f1, group_concat(f1+1) from t1 group by f1 with rollup; @@ -11204,7 +11204,7 @@ INPUT select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_overlaps(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select * from (t4 natural join t5) natural join t1 where t4.y > 7; @@ -11342,7 +11342,7 @@ INPUT select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('POLYGON((50 5, 55 10, 0 45, 50 5))'))) from dual END INPUT select strcmp(concat(utc_date(),' ',utc_time()),utc_timestamp())=0; @@ -11570,7 +11570,7 @@ INPUT select ST_astext(g) from t1 where ST_Contains(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1), (5.01 3.01, 6 5, 9 5, 8 3, 5.01 3.01))'), g); END OUTPUT -select ST_astext(g) from t1 where ST_Contains(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1), (5.01 3.01, 6 5, 9 5, 8 3, 5.01 3.01))'), g) +select st_astext(g) from t1 where ST_Contains(st_geometryfromtext('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1), (5.01 3.01, 6 5, 9 5, 8 3, 5.01 3.01))'), g) END INPUT select week(19981231,0) as '0', week(19981231,1) as '1', week(19981231,2) as '2', week(19981231,3) as '3', week(19981231,4) as '4', week(19981231,5) as '5', week(19981231,6) as '6', week(19981231,7) as '7'; @@ -11594,7 +11594,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1:1:1" HOUR_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1:1:1' HOUR_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '1:1:1' hour_second) from dual END INPUT select * from v2a; @@ -11654,13 +11654,13 @@ INPUT select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 0,20 20,0 20,0 0))'), ST_GeomFromText('POLYGON((10 10,30 10,30 30,10 30,10 10))')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 0,20 20,0 20,0 0))'), ST_GeomFromText('POLYGON((10 10,30 10,30 30,10 30,10 10))')) from dual +select 1, ST_Intersects(st_geometryfromtext('POLYGON((0 0,20 0,20 20,0 20,0 0))'), st_geometryfromtext('POLYGON((10 10,30 10,30 30,10 30,10 10))')) from dual END INPUT select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual +select st_astext(st_union(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual END INPUT select _latin1'a' regexp _latin1'A' collate latin1_general_ci; @@ -11714,7 +11714,7 @@ INPUT select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_WEEK, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(week, '2001-02-01', '2001-05-01') as a from dual END INPUT select a, MAX(b), INTERVAL (MAX(b), 1,3,10,30,39,40,50,60,100,1000) from t1 group by a; @@ -11786,7 +11786,7 @@ INPUT select ST_DISTANCE(ST_GeomFromText('linestring(0 0, 3 6, 6 3, 0 0)'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')); END OUTPUT -select ST_DISTANCE(ST_GeomFromText('linestring(0 0, 3 6, 6 3, 0 0)'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')) from dual +select ST_DISTANCE(st_geometryfromtext('linestring(0 0, 3 6, 6 3, 0 0)'), st_geometryfromtext('polygon((2 2, 3 4, 4 3, 2 2))')) from dual END INPUT select distinct t1_outer.a from t1 t1_outer order by (select max(t1_outer.b+t1_inner.b) from t1 t1_inner); @@ -11930,7 +11930,7 @@ INPUT select date_add("1997-12-31 23:59:59.000002",INTERVAL "999999" MICROSECOND); END OUTPUT -select date_add('1997-12-31 23:59:59.000002', interval '999999' MICROSECOND) from dual +select date_add('1997-12-31 23:59:59.000002', interval '999999' microsecond) from dual END INPUT select inet_aton("122.226."); @@ -12050,7 +12050,7 @@ INPUT select ST_astext(ST_MPointFromWKB(ST_AsWKB(MultiPoint(Point('0', '0'),Point('-0', '0'), Point('0', '-0'))))) as result; END OUTPUT -select ST_astext(ST_MPointFromWKB(ST_AsWKB(MultiPoint(point('0', '0'), point('-0', '0'), point('0', '-0'))))) as result from dual +select st_astext(st_multipointfromwkb(st_asbinary(multipoint(point('0', '0'), point('-0', '0'), point('0', '-0'))))) as result from dual END INPUT select IF(0,"ERROR","this"),IF(1,"is","ERROR"),IF(NULL,"ERROR","a"),IF(1,2,3)|0,IF(1,2.0,3.0)+0; @@ -12068,25 +12068,25 @@ INPUT select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'),ST_GeomFromText('MULTILINESTRING((15 0,20 0,20 20,15 0))')) as result; END OUTPUT -select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'), ST_GeomFromText('MULTILINESTRING((15 0,20 0,20 20,15 0))')) as result from dual +select ST_Crosses(st_geometryfromtext('MULTIPOINT(1 0,15 0,10 10)'), st_geometryfromtext('MULTILINESTRING((15 0,20 0,20 20,15 0))')) as result from dual END INPUT select ST_AsText(a) from t1 where MBRContains(ST_GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) or MBRContains(ST_GeomFromText('Polygon((2 2, 2 5, 5 5, 5 2, 2 2))'), a); END OUTPUT -select ST_AsText(a) from t1 where MBRContains(ST_GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) or MBRContains(ST_GeomFromText('Polygon((2 2, 2 5, 5 5, 5 2, 2 2))'), a) +select st_astext(a) from t1 where MBRContains(st_geometryfromtext('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) or MBRContains(st_geometryfromtext('Polygon((2 2, 2 5, 5 5, 5 2, 2 2))'), a) END INPUT select date_add("1997-12-31 23:59:59",INTERVAL "10000:1" DAY_HOUR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '10000:1' DAY_HOUR) from dual +select date_add('1997-12-31 23:59:59', interval '10000:1' day_hour) from dual END INPUT select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_contains(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select hex(soundex(_ucs2 0x041004110412)); @@ -12176,7 +12176,7 @@ INPUT select date_add("2001-01-01 23:59:59",INTERVAL -2000 YEAR); END OUTPUT -select date_add('2001-01-01 23:59:59', interval (-2000) YEAR) from dual +select date_add('2001-01-01 23:59:59', interval -2000 year) from dual END INPUT select avg(2) from t1; @@ -12194,7 +12194,7 @@ INPUT select st_disjoint(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_disjoint(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_disjoint(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select cast(-5 as unsigned) -1, cast(-5 as unsigned) + 1; @@ -12290,7 +12290,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1:1:1" HOUR_SECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1:1:1' HOUR_SECOND) from dual +select date_sub('1998-01-01 00:00:00', interval '1:1:1' hour_second) from dual END INPUT select round(std(s1/s2), 17) from bug22555; @@ -12368,7 +12368,7 @@ INPUT select concat(a, if(b>10, _utf8 0x78, _utf8 0x79)) from t1; END OUTPUT -select concat(a, if(b > 10, _utf8 0x78, _utf8 0x79)) from t1 +select concat(a, if(b > 10, _utf8mb3 0x78, _utf8mb3 0x79)) from t1 END INPUT select cast(19999999999999999999 as signed); @@ -12476,7 +12476,7 @@ INPUT select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('polygon((1 1, 1 0, 2 0, 1 1))')); END OUTPUT -select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('polygon((1 1, 1 0, 2 0, 1 1))')) from dual +select st_touches(st_geometryfromtext('polygon((0 0, 2 2, 0 4, 0 0))'), st_geometryfromtext('polygon((1 1, 1 0, 2 0, 1 1))')) from dual END INPUT select t1.* as 'with_alias', (select a from t2 where d > a) from t1; @@ -12494,7 +12494,7 @@ INPUT select mbrcoveredby(ST_GeomFromText("point(2 4)"), ST_GeomFromText("polygon((2 2, 10 2, 10 10, 2 10, 2 2))")); END OUTPUT -select mbrcoveredby(ST_GeomFromText('point(2 4)'), ST_GeomFromText('polygon((2 2, 10 2, 10 10, 2 10, 2 2))')) from dual +select mbrcoveredby(st_geometryfromtext('point(2 4)'), st_geometryfromtext('polygon((2 2, 10 2, 10 10, 2 10, 2 2))')) from dual END INPUT select date,format,str_to_date(date, format) as str_to_date from t1; @@ -12506,7 +12506,7 @@ INPUT select ST_astext(st_symdifference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_symdifference(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_difference(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_symdifference(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_difference(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select user() like _latin1"%@%"; @@ -12590,7 +12590,7 @@ INPUT select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 1 2, 2 1, 0 0))'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')); END OUTPUT -select ST_DISTANCE(ST_GeomFromText('polygon((0 0, 1 2, 2 1, 0 0))'), ST_GeomFromText('polygon((2 2, 3 4, 4 3, 2 2))')) from dual +select ST_DISTANCE(st_geometryfromtext('polygon((0 0, 1 2, 2 1, 0 0))'), st_geometryfromtext('polygon((2 2, 3 4, 4 3, 2 2))')) from dual END INPUT select p from t1; @@ -12626,7 +12626,7 @@ INPUT select length(_utf8 0xD0B1), bit_length(_utf8 0xD0B1), char_length(_utf8 0xD0B1); END OUTPUT -select length(_utf8 0xD0B1), bit_length(_utf8 0xD0B1), char_length(_utf8 0xD0B1) from dual +select length(_utf8mb3 0xD0B1), bit_length(_utf8mb3 0xD0B1), char_length(_utf8mb3 0xD0B1) from dual END INPUT select @@keycache1.key_buffer_size; @@ -12794,7 +12794,7 @@ INPUT select version()>=_utf8"3.23.29"; END OUTPUT -select version() >= _utf8 '3.23.29' from dual +select version() >= _utf8mb3 '3.23.29' from dual END INPUT select table_name, column_name, privileges from information_schema.columns where table_schema = 'mysqltest' and table_name = 'v1' order by table_name, column_name; @@ -12812,7 +12812,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL -100000 DAY); END OUTPUT -select date_add('1997-12-31 23:59:59', interval (-100000) DAY) from dual +select date_add('1997-12-31 23:59:59', interval -100000 day) from dual END INPUT select dummy1,count(distinct id) from t1 group by dummy1; @@ -13142,7 +13142,7 @@ INPUT select ST_geomfromtext(col9,col89) as a from t1; END OUTPUT -select ST_geomfromtext(col9, col89) as a from t1 +select st_geometryfromtext(col9, col89) as a from t1 END INPUT select * from t4 order by b,a limit 3; @@ -13196,7 +13196,7 @@ INPUT select mbrtouches(ST_GeomFromText("point(2 4)"), ST_GeomFromText("polygon((2 2, 6 2, 6 6, 2 6, 2 2))")); END OUTPUT -select mbrtouches(ST_GeomFromText('point(2 4)'), ST_GeomFromText('polygon((2 2, 6 2, 6 6, 2 6, 2 2))')) from dual +select mbrtouches(st_geometryfromtext('point(2 4)'), st_geometryfromtext('polygon((2 2, 6 2, 6 6, 2 6, 2 2))')) from dual END INPUT select * from t1, t2 where t1.start between t2.ctime1 and t2.ctime2; @@ -13490,7 +13490,7 @@ INPUT select date_sub("1998-01-02",INTERVAL 31 DAY); END OUTPUT -select date_sub('1998-01-02', interval 31 DAY) from dual +select date_sub('1998-01-02', interval 31 day) from dual END INPUT select * from db where user = 'mysqltest_1'; @@ -13514,7 +13514,7 @@ INPUT select date_add("1997-12-31 23:59:59.000002",INTERVAL "10000:99:99.999999" HOUR_MICROSECOND); END OUTPUT -select date_add('1997-12-31 23:59:59.000002', interval '10000:99:99.999999' HOUR_MICROSECOND) from dual +select date_add('1997-12-31 23:59:59.000002', interval '10000:99:99.999999' hour_microsecond) from dual END INPUT select @@event_scheduler; @@ -13532,13 +13532,13 @@ INPUT select st_touches(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')); END OUTPUT -select st_touches(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')) from dual +select st_touches(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')) from dual END INPUT select left(_utf8 0xD0B0D0B1D0B2,1); END OUTPUT -select left(_utf8 0xD0B0D0B1D0B2, 1) from dual +select left(_utf8mb3 0xD0B0D0B1D0B2, 1) from dual END INPUT select * from information_schema.SCHEMA_PRIVILEGES where grantee like '%mysqltest_1%'; @@ -13580,7 +13580,7 @@ INPUT select 0, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 40, 40 50, 20 70, 10 40))')); END OUTPUT -select 0, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 40, 40 50, 20 70, 10 40))')) from dual +select 0, ST_Intersects(st_geometryfromtext('POLYGON((0 0,20 10,10 30, 0 0))'), st_geometryfromtext('POLYGON((10 40, 40 50, 20 70, 10 40))')) from dual END INPUT select a, group_concat(distinct b order by b) from t1 group by a with rollup; @@ -13640,7 +13640,7 @@ INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), st_geometryfromtext('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual END INPUT select 1 from t1 group by b; @@ -13748,13 +13748,13 @@ INPUT select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'),ST_GeomFromText('LINESTRING(15 0,20 0,10 10,20 20)')) as result; END OUTPUT -select ST_Crosses(ST_GeomFromText('MULTIPOINT(1 0,15 0,10 10)'), ST_GeomFromText('LINESTRING(15 0,20 0,10 10,20 20)')) as result from dual +select ST_Crosses(st_geometryfromtext('MULTIPOINT(1 0,15 0,10 10)'), st_geometryfromtext('LINESTRING(15 0,20 0,10 10,20 20)')) as result from dual END INPUT select date_sub("0000-00-00 00:00:00",INTERVAL 1 SECOND); END OUTPUT -select date_sub('0000-00-00 00:00:00', interval 1 SECOND) from dual +select date_sub('0000-00-00 00:00:00', interval 1 second) from dual END INPUT select table_name, index_type from information_schema.statistics where table_schema = 'test' and table_name = 'tm' and index_name = 'v' order by table_name; @@ -13783,8 +13783,8 @@ END INPUT select hex(a) a, hex(@u:=convert(a using utf8)) b, hex(convert(@u using big5)) c from t1 order by a; END -ERROR -syntax error at position 25 near ':' +OUTPUT +select hex(a) as a, hex(@u := convert(a using utf8)) as b, hex(convert(@u using big5)) as c from t1 order by a asc END INPUT select t,count(t) from t1 group by t order by t limit 10; @@ -13814,13 +13814,13 @@ INPUT select date_sub("1998-01-01 00:00:00.000001",INTERVAL "1 1:1:1.000002" DAY_MICROSECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00.000001', interval '1 1:1:1.000002' DAY_MICROSECOND) from dual +select date_sub('1998-01-01 00:00:00.000001', interval '1 1:1:1.000002' day_microsecond) from dual END INPUT select date_add(date,INTERVAL "1 1:1" DAY_MINUTE) from t1; END OUTPUT -select date_add(`date`, interval '1 1:1' DAY_MINUTE) from t1 +select date_add(`date`, interval '1 1:1' day_minute) from t1 END INPUT select hex(char(0xFF using utf8)); @@ -13832,7 +13832,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL -100000 YEAR); END OUTPUT -select date_add('1997-12-31 23:59:59', interval (-100000) YEAR) from dual +select date_add('1997-12-31 23:59:59', interval -100000 year) from dual END INPUT select substring_index('the king of the the hill',' ',-2); @@ -14078,7 +14078,7 @@ INPUT select mbrtouches(ST_GeomFromText("point (2 4)"), ST_GeomFromText("point (2 4)")); END OUTPUT -select mbrtouches(ST_GeomFromText('point (2 4)'), ST_GeomFromText('point (2 4)')) from dual +select mbrtouches(st_geometryfromtext('point (2 4)'), st_geometryfromtext('point (2 4)')) from dual END INPUT select distinct t1.a1,t2.a1 from t1,t2; @@ -14240,7 +14240,7 @@ INPUT select date_add("1997-12-31",INTERVAL 1 SECOND); END OUTPUT -select date_add('1997-12-31', interval 1 SECOND) from dual +select date_add('1997-12-31', interval 1 second) from dual END INPUT select 'a' = 'a ', 'a' < 'a ', 'a' > 'a '; @@ -14282,7 +14282,7 @@ INPUT select date_sub("0050-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('0050-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('0050-01-01 00:00:01', interval 2 second) from dual END INPUT select t1.f1,count(distinct t2.f2),count(distinct 1,NULL) from t1 left join t2 on t1.f1=t2.f1 group by t1.f1; @@ -14366,7 +14366,7 @@ INPUT select ST_Touches(ST_GeomFromText('POLYGON((0 0,5 0,5 5,0 5,0 0),(1 1,3 1,3 3,1 3,1 1))'),ST_GeomFromText('LINESTRING(3 3,10 10)')) as result; END OUTPUT -select ST_Touches(ST_GeomFromText('POLYGON((0 0,5 0,5 5,0 5,0 0),(1 1,3 1,3 3,1 3,1 1))'), ST_GeomFromText('LINESTRING(3 3,10 10)')) as result from dual +select ST_Touches(st_geometryfromtext('POLYGON((0 0,5 0,5 5,0 5,0 0),(1 1,3 1,3 3,1 3,1 1))'), st_geometryfromtext('LINESTRING(3 3,10 10)')) as result from dual END INPUT select strcmp(_koi8r'a', _koi8r'A' COLLATE koi8r_general_ci); @@ -14426,7 +14426,7 @@ INPUT select i from t1 where a=repeat(_utf8 'a',200); END OUTPUT -select i from t1 where a = repeat(_utf8 'a', 200) +select i from t1 where a = repeat(_utf8mb3 'a', 200) END INPUT select time_format('100:00:00', '%H %k %h %I %l'); @@ -14570,7 +14570,7 @@ INPUT select ST_AsText(ST_PolygonFromText('POLYGON((10 10,20 10,20 20,10 20, 10 10))')); END OUTPUT -select ST_AsText(ST_PolygonFromText('POLYGON((10 10,20 10,20 20,10 20, 10 10))')) from dual +select st_astext(st_polygonfromtext('POLYGON((10 10,20 10,20 20,10 20, 10 10))')) from dual END INPUT select 10.0+'10'; @@ -14672,7 +14672,7 @@ INPUT select ST_contains(ST_GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)), ((6 6, 6 11, 11 11, 11 6, 6 6)))'), ST_GeomFromText('POINT(5 10)')); END OUTPUT -select ST_contains(ST_GeomFromText('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)), ((6 6, 6 11, 11 11, 11 6, 6 6)))'), ST_GeomFromText('POINT(5 10)')) from dual +select ST_contains(st_geometryfromtext('MULTIPOLYGON(((0 0, 0 5, 5 5, 5 0, 0 0)), ((6 6, 6 11, 11 11, 11 6, 6 6)))'), st_geometryfromtext('POINT(5 10)')) from dual END INPUT select group_concat(c1 order by binary c1 separator '') from t1 group by c1 collate utf16_unicode_520_ci; @@ -14684,7 +14684,7 @@ INPUT select collation(charset(_utf8'a')), collation(collation(_utf8'a')); END OUTPUT -select collation(charset(_utf8 'a')), collation(collation(_utf8 'a')) from dual +select collation(charset(_utf8mb3 'a')), collation(collation(_utf8mb3 'a')) from dual END INPUT select last_day('2000-02-05') as f1, last_day('2002-12-31') as f2, last_day('2003-03-32') as f3, last_day('2003-04-01') as f4, last_day('2001-01-01 01:01:01') as f5, last_day(NULL), last_day('2001-02-12'); @@ -14743,8 +14743,8 @@ END INPUT select hex(@utf83:= CONVERT(@ujis3 USING utf8)); END -ERROR -syntax error at position 19 near ':' +OUTPUT +select hex(@utf83 := convert(@ujis3 using utf8)) from dual END INPUT select * from t1 where MATCH a,b AGAINST('"space model' IN BOOLEAN MODE); @@ -15062,7 +15062,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "1 1:1:1" DAY_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '1 1:1:1' DAY_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '1 1:1:1' day_second) from dual END INPUT select NULL=NULL,NULL<>NULL,IFNULL(NULL,1.1)+0,IFNULL(NULL,1) | 0; @@ -15092,7 +15092,7 @@ INPUT select date_add(NULL,INTERVAL 100000 SECOND); END OUTPUT -select date_add(null, interval 100000 SECOND) from dual +select date_add(null, interval 100000 second) from dual END INPUT select * from information_schema.tables where table_schema = NULL; @@ -15356,7 +15356,7 @@ INPUT select ST_astext(ST_Intersection(ST_GeomFromText('LINESTRING(0 0, 50 45, 40 50, 0 0)'), ST_GeomFromText('LINESTRING(50 5, 55 10, 0 45, 50 5)'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('LINESTRING(0 0, 50 45, 40 50, 0 0)'), ST_GeomFromText('LINESTRING(50 5, 55 10, 0 45, 50 5)'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('LINESTRING(0 0, 50 45, 40 50, 0 0)'), st_geometryfromtext('LINESTRING(50 5, 55 10, 0 45, 50 5)'))) from dual END INPUT select load_file("/proc/self/fd/0"); @@ -15380,7 +15380,7 @@ INPUT select st_astext(st_intersection(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_intersection(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual +select st_astext(st_intersection(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual END INPUT select Fld1, max(Fld2) from t1 group by Fld1 having std(Fld2) is not null; @@ -15397,8 +15397,8 @@ END INPUT select t1.time+0,t1.date+0,t1.timestamp+0,concat(date," ",time), t1.quarter+t1.week, t1.year+timestampadd, timestampdiff from t1; END -ERROR -syntax error at position 107 +OUTPUT +select t1.`time` + 0, t1.`date` + 0, t1.`timestamp` + 0, concat(`date`, ' ', `time`), t1.`quarter` + t1.`week`, t1.`year` + `timestampadd`, `timestampdiff` from t1 END INPUT select substring_index('aaaaaaaaa1','aa',1); @@ -15434,13 +15434,13 @@ INPUT select 1, ST_Intersects(ST_GeomFromText('LINESTRING(15 15, 50 50)'), ST_GeomFromText('LINESTRING(16 16, 51 51)')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('LINESTRING(15 15, 50 50)'), ST_GeomFromText('LINESTRING(16 16, 51 51)')) from dual +select 1, ST_Intersects(st_geometryfromtext('LINESTRING(15 15, 50 50)'), st_geometryfromtext('LINESTRING(16 16, 51 51)')) from dual END INPUT select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(2 2, 3 3)'))); END OUTPUT -select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(2 2, 3 3)'))) from dual +select st_astext(st_difference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_geometryfromtext('multipoint(2 2, 3 3)'))) from dual END INPUT select bit_length('; @@ -15458,7 +15458,7 @@ INPUT select st_astext(st_difference(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_difference(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual +select st_astext(st_difference(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual END INPUT select coercibility(col1), collation(col1)from v2; @@ -15506,7 +15506,7 @@ INPUT select st_touches(ST_GeomFromText('point(1 1)'), ST_GeomFromText('point(1 1)')); END OUTPUT -select st_touches(ST_GeomFromText('point(1 1)'), ST_GeomFromText('point(1 1)')) from dual +select st_touches(st_geometryfromtext('point(1 1)'), st_geometryfromtext('point(1 1)')) from dual END INPUT select SUBSTRING_INDEX(_latin1'abcdabcdabcd' COLLATE latin1_bin,_latin1'd',2); @@ -15530,7 +15530,7 @@ INPUT select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_within(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select timediff("1997-01-01 23:59:59.000001","1995-12-31 23:59:59.000002"); @@ -15554,7 +15554,7 @@ INPUT select ST_astext(ST_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 0, 0 0,0 1, 0 0))'))) as result; END OUTPUT -select ST_astext(ST_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 0, 0 0,0 1, 0 0))'))) as result from dual +select st_astext(ST_symdifference(st_geometryfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), st_geometryfromtext('polygon((0 0, 1 0, 0 0,0 1, 0 0))'))) as result from dual END INPUT select extract(HOUR_SECOND FROM "10:11:12"); @@ -15662,7 +15662,7 @@ INPUT select st_crosses(ST_GeometryFromText('geometrycollection(multipoint(0 0, 1 0, 1 1, 0 1, 0 0))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')); END OUTPUT -select st_crosses(ST_GeometryFromText('geometrycollection(multipoint(0 0, 1 0, 1 1, 0 1, 0 0))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')) from dual +select st_crosses(st_geometryfromtext('geometrycollection(multipoint(0 0, 1 0, 1 1, 0 1, 0 0))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))')) from dual END INPUT select a1,a2,min(b),c from t2 where (a2 = 'a') and (c = 'a111') group by a1; @@ -15680,7 +15680,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL NULL MINUTE_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval null MINUTE_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval null minute_second) from dual END INPUT select timestampdiff(month,'2005-09-11','2004-09-11'); @@ -15740,7 +15740,7 @@ INPUT select st_distance(ST_GeomFromText('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'),ST_GeomFromText('linestring(0 0,10 10)')); END OUTPUT -select st_distance(ST_GeomFromText('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'), ST_GeomFromText('linestring(0 0,10 10)')) from dual +select st_distance(st_geometryfromtext('geometrycollection(geometrycollection(),polygon((0 0,0 10,10 10,10 0,0 0)))'), st_geometryfromtext('linestring(0 0,10 10)')) from dual END INPUT select day("1997-12-31 23:59:59.000001"); @@ -15794,13 +15794,13 @@ INPUT select date_sub("1998-01-01 00:00:00.000001",INTERVAL "1:1:1.000002" HOUR_MICROSECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00.000001', interval '1:1:1.000002' HOUR_MICROSECOND) from dual +select date_sub('1998-01-01 00:00:00.000001', interval '1:1:1.000002' hour_microsecond) from dual END INPUT select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)); END OUTPUT -select hex(soundex(_utf8 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)) from dual +select hex(soundex(_utf8mb3 0xE99885E8A788E99A8FE697B6E69BB4E696B0E79A84E696B0E997BB)) from dual END INPUT select a2 from t3 join (t1 join t2 using (a1)) on b=c1 join t4 using (c2); @@ -15842,7 +15842,7 @@ INPUT select 1, ST_Within(ST_GeomFromText('LINESTRING(15 15, 16 16)'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')); END OUTPUT -select 1, ST_Within(ST_GeomFromText('LINESTRING(15 15, 16 16)'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')) from dual +select 1, ST_Within(st_geometryfromtext('LINESTRING(15 15, 16 16)'), st_geometryfromtext('POLYGON((10 10,30 20,20 40, 10 10))')) from dual END INPUT select count(distinct n) from t1; @@ -15932,7 +15932,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL NULL SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval null SECOND) from dual +select date_add('1997-12-31 23:59:59', interval null second) from dual END INPUT select 1 from t1 group by 2; @@ -16058,7 +16058,7 @@ INPUT select ST_astext(st_union( st_intersection( multipoint(point(-1,-1)), point(1,-1) ), st_difference( multipoint(point(-1,1)), point(-1,-1) ))); END OUTPUT -select ST_astext(st_union(st_intersection(multipoint(point(-1, -1)), point(1, -1)), st_difference(multipoint(point(-1, 1)), point(-1, -1)))) from dual +select st_astext(st_union(st_intersection(multipoint(point(-1, -1)), point(1, -1)), st_difference(multipoint(point(-1, 1)), point(-1, -1)))) from dual END INPUT select * from t1, lateral (with qn as (select t1.a) select (select max(a) from qn)) as dt; @@ -16094,7 +16094,7 @@ INPUT select visitor_id,max(ts) as mts from t1 group by visitor_id having mts < DATE_SUB(NOW(),INTERVAL 3 MONTH); END OUTPUT -select visitor_id, max(ts) as mts from t1 group by visitor_id having mts < DATE_SUB(now(), interval 3 MONTH) +select visitor_id, max(ts) as mts from t1 group by visitor_id having mts < date_sub(now(), interval 3 month) END INPUT select f1, f2, if(f1, 40.0, 5.00) from t1 group by f1 order by f2; @@ -16357,8 +16357,8 @@ END INPUT select @category2_id:= 10002; END -ERROR -syntax error at position 22 near ':' +OUTPUT +select @category2_id := 10002 from dual END INPUT select CONVERT("2004-01-22 21:45:33",DATE); @@ -16519,8 +16519,8 @@ END INPUT select @test_compress_string:='string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa '; END -ERROR -syntax error at position 30 near ':' +OUTPUT +select @test_compress_string := 'string for test compress function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ' from dual END INPUT select length(quote(concat(char(0),"test"))); @@ -16604,7 +16604,7 @@ INPUT select ST_astext(st_union(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_union(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_union(st_geometryfromtext('multipoint(2 2, 3 3)'), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select * from t3_trans; @@ -16622,7 +16622,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "10000 99:99:99" DAY_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '10000 99:99:99' DAY_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '10000 99:99:99' day_second) from dual END INPUT select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user; @@ -16646,7 +16646,7 @@ INPUT select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 0)')); END OUTPUT -select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('point(1 0)')) from dual +select st_touches(st_geometryfromtext('polygon((0 0, 2 2, 0 4, 0 0))'), st_geometryfromtext('point(1 0)')) from dual END INPUT select distinct s1 from t1 order by s2,s1; @@ -16802,7 +16802,7 @@ INPUT select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2 collate utf8_bin); END OUTPUT -select locate(_utf8 0xD0B1, _utf8 0xD0B0D091D0B2 collate utf8_bin) from dual +select locate(_utf8mb3 0xD0B1, _utf8mb3 0xD0B0D091D0B2 collate utf8_bin) from dual END INPUT select insert('hello', 1, -4294967295, 'hi'); @@ -16934,7 +16934,7 @@ INPUT select DATE_ADD(20071108181000, INTERVAL 1 DAY); END OUTPUT -select DATE_ADD(20071108181000, interval 1 DAY) from dual +select date_add(20071108181000, interval 1 day) from dual END INPUT select a as gci2 from t1 where a like 'あいうえおかきくけこさしすせそ'; @@ -16957,8 +16957,8 @@ END INPUT select hex(@utf81:= CONVERT(@ujis1 USING utf8)); END -ERROR -syntax error at position 19 near ':' +OUTPUT +select hex(@utf81 := convert(@ujis1 using utf8)) from dual END INPUT select strcmp(_koi8r'a', _koi8r'A' COLLATE koi8r_bin); @@ -17018,7 +17018,7 @@ INPUT select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_overlaps(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select _latin1'B' between _latin1'a' collate latin1_bin and _latin1'c'; @@ -17030,7 +17030,7 @@ INPUT select st_overlaps(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_overlaps(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_overlaps(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select locate('HE','hello' collate utf8mb4_bin); @@ -17072,13 +17072,13 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 MONTH); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 MONTH) from dual +select date_sub('1998-01-01 00:00:00', interval 1 month) from dual END INPUT select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')) from dual +select 1, ST_Intersects(st_geometryfromtext('POLYGON((0 0,20 10,10 30, 0 0))'), st_geometryfromtext('POLYGON((10 10,30 20,20 40, 10 10))')) from dual END INPUT select concat("-",a,"-",b,"-") from t1 where a="hello"; @@ -17407,8 +17407,8 @@ END INPUT select @a:=FROM_UNIXTIME(1); END -ERROR -syntax error at position 11 near ':' +OUTPUT +select @a := FROM_UNIXTIME(1) from dual END INPUT select sleep(2); @@ -17432,7 +17432,7 @@ INPUT select date_add(datetime, INTERVAL 1 YEAR) from t1; END OUTPUT -select date_add(`datetime`, interval 1 YEAR) from t1 +select date_add(`datetime`, interval 1 year) from t1 END INPUT select group_concat(t1.b,t2.c) from t1 left join t2 using(a) group by t1.a; @@ -17600,7 +17600,7 @@ INPUT select date_sub("0199-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('0199-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('0199-01-01 00:00:01', interval 2 second) from dual END INPUT select concat('|', text1, '|') from t1 where text1='teststring' or text1 > 'teststring '; @@ -17744,7 +17744,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL "1:1" MINUTE_SECOND); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval '1:1' MINUTE_SECOND) from dual +select date_sub('1998-01-01 00:00:00', interval '1:1' minute_second) from dual END INPUT select count(distinct s,t) from t1; @@ -17810,7 +17810,7 @@ INPUT select date_sub("50-01-01 00:00:01",INTERVAL 2 SECOND); END OUTPUT -select date_sub('50-01-01 00:00:01', interval 2 SECOND) from dual +select date_sub('50-01-01 00:00:01', interval 2 second) from dual END INPUT select * from `information_schema`.`REFERENTIAL_CONSTRAINTS` where `TABLE_NAME` = NULL; @@ -17875,8 +17875,8 @@ END INPUT select @keyword1_id:= 10201; END -ERROR -syntax error at position 21 near ':' +OUTPUT +select @keyword1_id := 10201 from dual END INPUT select min(a) is null from t1; @@ -17887,8 +17887,8 @@ END INPUT select @stamp2:=f2 from t1; END -ERROR -syntax error at position 16 near ':' +OUTPUT +select @stamp2 := f2 from t1 END INPUT select last_day('2005-00-00'); @@ -17906,7 +17906,7 @@ INPUT select export_set(3, _latin1'foo', _utf8'bar', ',', 4); END OUTPUT -select export_set(3, _latin1 'foo', _utf8 'bar', ',', 4) from dual +select export_set(3, _latin1 'foo', _utf8mb3 'bar', ',', 4) from dual END INPUT select a,hex(a) from t1; @@ -17954,7 +17954,7 @@ INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), st_geometryfromtext('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual END INPUT select count(distinct vs) from t1; @@ -17966,7 +17966,7 @@ INPUT select timestampdiff(SQL_TSI_DAY, '2001-02-01', '2001-05-01') as a; END OUTPUT -select timestampdiff(SQL_TSI_DAY, '2001-02-01', '2001-05-01') as a from dual +select timestampdiff(day, '2001-02-01', '2001-05-01') as a from dual END INPUT select elt(1,c1,'�'),elt(1,'�',c1) from t1; @@ -18104,7 +18104,7 @@ INPUT select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('polygon((1 1.2, 1 0, 2 0, 1 1.2))')); END OUTPUT -select st_touches(ST_GeomFromText('polygon((0 0, 2 2, 0 4, 0 0))'), ST_GeomFromText('polygon((1 1.2, 1 0, 2 0, 1 1.2))')) from dual +select st_touches(st_geometryfromtext('polygon((0 0, 2 2, 0 4, 0 0))'), st_geometryfromtext('polygon((1 1.2, 1 0, 2 0, 1 1.2))')) from dual END INPUT select ifnull(NULL, _utf8mb4'string'); @@ -18116,7 +18116,7 @@ INPUT select database() = _utf8"test"; END OUTPUT -select database() = _utf8 'test' from dual +select database() = _utf8mb3 'test' from dual END INPUT select collation(char(123)), collation(char(123 using binary)); @@ -18187,8 +18187,8 @@ END INPUT select @keyword2_id:= 10202; END -ERROR -syntax error at position 21 near ':' +OUTPUT +select @keyword2_id := 10202 from dual END INPUT select week(20001231), week(20001231,6); @@ -18200,7 +18200,7 @@ INPUT select mbrtouches(ST_GeomFromText("linestring(1 0, 2 0)"), ST_GeomFromText("polygon((0 0, 3 0, 3 3, 0 3, 0 0))")); END OUTPUT -select mbrtouches(ST_GeomFromText('linestring(1 0, 2 0)'), ST_GeomFromText('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual +select mbrtouches(st_geometryfromtext('linestring(1 0, 2 0)'), st_geometryfromtext('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual END INPUT select count(*) from information_schema.events where event_schema = database() and event_name = 'event_35981' and on_completion = 'NOT PRESERVE'; @@ -18332,7 +18332,7 @@ INPUT select date_add(last_day("1997-12-1"), INTERVAL 1 DAY); END OUTPUT -select date_add(last_day('1997-12-1'), interval 1 DAY) from dual +select date_add(last_day('1997-12-1'), interval 1 day) from dual END INPUT select concat(':',trim(BOTH 'ab' FROM 'ababmyabab'),':',trim(BOTH '*' FROM '***sql'),':'); @@ -18344,7 +18344,7 @@ INPUT select date_add(date,INTERVAL 1 YEAR) from t1; END OUTPUT -select date_add(`date`, interval 1 YEAR) from t1 +select date_add(`date`, interval 1 year) from t1 END INPUT select v,count(t) from t1 group by v order by v limit 10; @@ -18386,7 +18386,7 @@ INPUT select mbrtouches(ST_GeomFromText("point(2 4)"), ST_GeomFromText("linestring(2 0, 2 4)")); END OUTPUT -select mbrtouches(ST_GeomFromText('point(2 4)'), ST_GeomFromText('linestring(2 0, 2 4)')) from dual +select mbrtouches(st_geometryfromtext('point(2 4)'), st_geometryfromtext('linestring(2 0, 2 4)')) from dual END INPUT select distinct a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b; @@ -18548,7 +18548,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "10000:99:99" HOUR_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '10000:99:99' HOUR_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '10000:99:99' hour_second) from dual END INPUT select count(*) from t1 where t='a '; @@ -18679,8 +18679,8 @@ END INPUT select char_length(left(@a:='тест',5)), length(@a), @a; END -ERROR -syntax error at position 28 near ':' +OUTPUT +select char_length(left(@a := 'тест', 5)), length(@a), @a from dual END INPUT select t1.*,t2.* from t1 left join t2 on (t1.b=t2.b) where coercibility(t2.a) = 5 order by t1.a,t2.a; @@ -18698,7 +18698,7 @@ INPUT select (ST_asWKT(ST_geomfromwkb((0x000000000140240000000000004024000000000000)))); END OUTPUT -select ST_asWKT(ST_geomfromwkb(0x000000000140240000000000004024000000000000)) from dual +select st_astext(st_geometryfromwkb(0x000000000140240000000000004024000000000000)) from dual END INPUT select collation(export_set(255,_latin2'y',_latin2'n',_latin2' ')), coercibility(export_set(255,_latin2'y',_latin2'n',_latin2' ')); @@ -18722,7 +18722,7 @@ INPUT select ST_astext(g) from t1 where ST_Intersects(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g); END OUTPUT -select ST_astext(g) from t1 where ST_Intersects(ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g) +select st_astext(g) from t1 where ST_Intersects(st_geometryfromtext('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))'), g) END INPUT select t1.f1,t.* from t1, t1 t group by 1; @@ -18746,7 +18746,7 @@ INPUT select charset(charset(_utf8'a')), charset(collation(_utf8'a')); END OUTPUT -select charset(charset(_utf8 'a')), charset(collation(_utf8 'a')) from dual +select charset(charset(_utf8mb3 'a')), charset(collation(_utf8mb3 'a')) from dual END INPUT select * from `information_schema`.`key_column_usage` where `TABLE_NAME` = NULL; @@ -18764,7 +18764,7 @@ INPUT select timestampadd(WEEK, 1, date) from t1; END OUTPUT -select timestampadd(WEEK, 1, `date`) from t1 +select timestampadd(week, 1, `date`) from t1 END INPUT select log2(-1); @@ -18818,7 +18818,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "10000:1" MINUTE_SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '10000:1' MINUTE_SECOND) from dual +select date_add('1997-12-31 23:59:59', interval '10000:1' minute_second) from dual END INPUT select unix_timestamp('2038-02-10 01:00:00'); @@ -18914,7 +18914,7 @@ INPUT select 0, ST_Within(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')); END OUTPUT -select 0, ST_Within(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POLYGON((10 10,30 20,20 40, 10 10))')) from dual +select 0, ST_Within(st_geometryfromtext('POLYGON((0 0,20 10,10 30, 0 0))'), st_geometryfromtext('POLYGON((10 10,30 20,20 40, 10 10))')) from dual END INPUT select c cz from t1 where c='z'; @@ -18944,7 +18944,7 @@ INPUT select i from t1 where b=repeat(_utf8 'b',310); END OUTPUT -select i from t1 where b = repeat(_utf8 'b', 310) +select i from t1 where b = repeat(_utf8mb3 'b', 310) END INPUT select * from t1 where not(not(a)); @@ -18962,13 +18962,13 @@ INPUT select ifnull(NULL, _utf8'string'); END OUTPUT -select ifnull(null, _utf8 'string') from dual +select ifnull(null, _utf8mb3 'string') from dual END INPUT select hex(_utf8 B'001111111111'); END OUTPUT -select hex(_utf8 B'001111111111') from dual +select hex(_utf8mb3 B'001111111111') from dual END INPUT select right('hello', -18446744073709551615); @@ -19016,7 +19016,7 @@ INPUT select "1997-12-31 23:59:59" + INTERVAL 1 SECOND; END OUTPUT -select '1997-12-31 23:59:59' + interval 1 SECOND from dual +select '1997-12-31 23:59:59' + interval 1 second from dual END INPUT select * from t2 where b="world"; @@ -19159,8 +19159,8 @@ END INPUT select @topic4_id:= 10104; END -ERROR -syntax error at position 19 near ':' +OUTPUT +select @topic4_id := 10104 from dual END INPUT select 10E+0+'a'; @@ -19220,7 +19220,7 @@ INPUT select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(2 2, 3 3)'))); END OUTPUT -select ST_astext(st_symdifference(ST_GeomFromText('multipoint(2 2, 3 3)'), ST_GeomFromText('multipoint(2 2, 3 3)'))) from dual +select st_astext(st_symdifference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_geometryfromtext('multipoint(2 2, 3 3)'))) from dual END INPUT select t2.fld3 FROM t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3; @@ -19232,7 +19232,7 @@ INPUT select date_add(date,INTERVAL "1 1:1:1" DAY_SECOND) from t1; END OUTPUT -select date_add(`date`, interval '1 1:1:1' DAY_SECOND) from t1 +select date_add(`date`, interval '1 1:1:1' day_second) from t1 END INPUT select column_name from information_schema.columns where table_name='t1' order by column_name; @@ -19352,7 +19352,7 @@ INPUT select LAST_DAY('2007-12-06 08:59:19.05') - INTERVAL 1 SECOND; END OUTPUT -select LAST_DAY('2007-12-06 08:59:19.05') - interval 1 SECOND from dual +select LAST_DAY('2007-12-06 08:59:19.05') - interval 1 second from dual END INPUT select t1.*,t2.*,t3.a from t1 left join t2 on (t3.a=t2.a) left join t1 as t3 on (t2.a=t3.a); @@ -19376,7 +19376,7 @@ INPUT select ST_Crosses(ST_GeomFromText('MULTIPOINT(0 0,3 3)'),ST_GeomFromText('LINESTRING(1 1,10 10)')) as result; END OUTPUT -select ST_Crosses(ST_GeomFromText('MULTIPOINT(0 0,3 3)'), ST_GeomFromText('LINESTRING(1 1,10 10)')) as result from dual +select ST_Crosses(st_geometryfromtext('MULTIPOINT(0 0,3 3)'), st_geometryfromtext('LINESTRING(1 1,10 10)')) as result from dual END INPUT select 7; @@ -19430,7 +19430,7 @@ INPUT select t1.*,t2.* from t1 left join t2 on (t1.b=t2.b) where collation(t2.a) = _utf8'binary' order by t1.a,t2.a; END OUTPUT -select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where collation(t2.a) = _utf8 'binary' order by t1.a asc, t2.a asc +select t1.*, t2.* from t1 left join t2 on t1.b = t2.b where collation(t2.a) = _utf8mb3 'binary' order by t1.a asc, t2.a asc END INPUT select * from t1 where i = 2; @@ -19472,7 +19472,7 @@ INPUT select ST_Intersects(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))')); END OUTPUT -select ST_Intersects(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('POLYGON((50 5, 55 10, 0 45, 50 5))')) from dual +select ST_Intersects(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('POLYGON((50 5, 55 10, 0 45, 50 5))')) from dual END INPUT select crc32("123"); @@ -19484,7 +19484,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 YEAR); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 YEAR) from dual +select date_sub('1998-01-01 00:00:00', interval 1 year) from dual END INPUT select substr(z.a,-1), z.a from t1 as y join t1 as z on y.a=z.a order by 1; @@ -19496,7 +19496,7 @@ INPUT select timestampdiff(SQL_TSI_DAY, '1986-02-01', '1986-03-01') as a1, timestampdiff(SQL_TSI_DAY, '1900-02-01', '1900-03-01') as a2, timestampdiff(SQL_TSI_DAY, '1996-02-01', '1996-03-01') as a3, timestampdiff(SQL_TSI_DAY, '2000-02-01', '2000-03-01') as a4; END OUTPUT -select timestampdiff(SQL_TSI_DAY, '1986-02-01', '1986-03-01') as a1, timestampdiff(SQL_TSI_DAY, '1900-02-01', '1900-03-01') as a2, timestampdiff(SQL_TSI_DAY, '1996-02-01', '1996-03-01') as a3, timestampdiff(SQL_TSI_DAY, '2000-02-01', '2000-03-01') as a4 from dual +select timestampdiff(day, '1986-02-01', '1986-03-01') as a1, timestampdiff(day, '1900-02-01', '1900-03-01') as a2, timestampdiff(day, '1996-02-01', '1996-03-01') as a3, timestampdiff(day, '2000-02-01', '2000-03-01') as a4 from dual END INPUT select * from information_schema.COLLATION_CHARACTER_SET_APPLICABILITY where COLLATION_NAME like 'latin1%' ORDER BY COLLATION_NAME; @@ -19520,7 +19520,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL " -10000 99:99" DAY_MINUTE); END OUTPUT -select date_add('1997-12-31 23:59:59', interval ' -10000 99:99' DAY_MINUTE) from dual +select date_add('1997-12-31 23:59:59', interval ' -10000 99:99' day_minute) from dual END INPUT select insert('hello', 1, 18446744073709551617, 'hi'); @@ -19598,7 +19598,7 @@ INPUT select ST_astext(ST_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 1, 0 2, 0 0))'))); END OUTPUT -select ST_astext(ST_symdifference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 1 1, 0 2, 0 0))'))) from dual +select st_astext(ST_symdifference(st_geometryfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), st_geometryfromtext('polygon((0 0, 1 1, 0 2, 0 0))'))) from dual END INPUT select ifnull(group_concat(concat(t1.id, ':', t1.name)), 'shortname') as 'without distinct: how it should be' from t1; @@ -19646,7 +19646,7 @@ INPUT select greatest(1,_utf16'.',_utf8''); END OUTPUT -select greatest(1, _utf16 '.', _utf8 '') from dual +select greatest(1, _utf16 '.', _utf8mb3 '') from dual END INPUT select round(1e1,308), truncate(1e1, 308); @@ -19670,7 +19670,7 @@ INPUT select ST_AsText(ST_GeometryFromWKB(ST_AsWKB(GeometryCollection(POINT(0, 0), MULTIPOINT(point(0, 0), point(1, 1)), LINESTRING(point(0, 0),point(10, 10)), MULTILINESTRING(LINESTRING(point(1, 2), point(1, 3))), POLYGON(LineString(Point(10, 20), Point(1, 1), Point(2, 2), Point(10, 20))), MULTIPOLYGON(Polygon(LineString(Point(0, 0), Point(1, 0), Point(1, 1), Point(0, 0)))))))) as Result; END OUTPUT -select ST_AsText(ST_GeometryFromWKB(ST_AsWKB(GeometryCollection(point(0, 0), MULTIPOINT(point(0, 0), point(1, 1)), linestring(point(0, 0), point(10, 10)), MULTILINESTRING(linestring(point(1, 2), point(1, 3))), polygon(linestring(point(10, 20), point(1, 1), point(2, 2), point(10, 20))), MULTIPOLYGON(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 0)))))))) as Result from dual +select st_astext(st_geometryfromwkb(st_asbinary(GeometryCollection(point(0, 0), multipoint(point(0, 0), point(1, 1)), linestring(point(0, 0), point(10, 10)), multilinestring(linestring(point(1, 2), point(1, 3))), polygon(linestring(point(10, 20), point(1, 1), point(2, 2), point(10, 20))), multipolygon(polygon(linestring(point(0, 0), point(1, 0), point(1, 1), point(0, 0)))))))) as Result from dual END INPUT select bar from t1 having group_concat(bar)=''; @@ -19700,7 +19700,7 @@ INPUT select "1998-01-01 00:00:00" - INTERVAL 1 SECOND; END OUTPUT -select '1998-01-01 00:00:00' - interval 1 SECOND from dual +select '1998-01-01 00:00:00' - interval 1 second from dual END INPUT select sql_big_result distinct t1.a from t1,t2 order by t2.a; @@ -19814,7 +19814,7 @@ INPUT select ST_Overlaps(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')); END OUTPUT -select ST_Overlaps(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')) from dual +select ST_Overlaps(st_geometryfromtext('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), st_geometryfromtext('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')) from dual END INPUT select spid,count(*) from t1 where spid between 1 and 2 group by spid order by spid desc; @@ -20078,7 +20078,7 @@ INPUT select ST_astext(ST_Intersection(ST_GeomFromText('LINESTRING(0 0, 50 45, 40 50)'), ST_GeomFromText('LINESTRING(50 5, 55 10, 0 45)'))); END OUTPUT -select ST_astext(ST_Intersection(ST_GeomFromText('LINESTRING(0 0, 50 45, 40 50)'), ST_GeomFromText('LINESTRING(50 5, 55 10, 0 45)'))) from dual +select st_astext(ST_Intersection(st_geometryfromtext('LINESTRING(0 0, 50 45, 40 50)'), st_geometryfromtext('LINESTRING(50 5, 55 10, 0 45)'))) from dual END INPUT select distinct ifnull(group_concat(concat(t1.id, ':', t1.name)), 'shortname') as 'with distinct: cutoff at length of shortname' from t1; @@ -20216,7 +20216,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 1 DAY); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 1 DAY) from dual +select date_add('1997-12-31 23:59:59', interval 1 day) from dual END INPUT select host,db,user,table_name,column_name from mysql.columns_priv where user = 'mysqltest_1' order by host,db,user,table_name,column_name; @@ -20348,7 +20348,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL "-100 1" YEAR_MONTH); END OUTPUT -select date_add('1997-12-31 23:59:59', interval '-100 1' YEAR_MONTH) from dual +select date_add('1997-12-31 23:59:59', interval '-100 1' year_month) from dual END INPUT select hex(weight_string('a' as char(0))); @@ -20378,13 +20378,13 @@ INPUT select ST_Astext(ST_Envelope(ST_MPointFromWKB(ST_AsWKB(MultiPoint(Point('0', '-0'),Point('-0', '0'), Point('0', '0')))))) as result; END OUTPUT -select ST_Astext(ST_Envelope(ST_MPointFromWKB(ST_AsWKB(MultiPoint(point('0', '-0'), point('-0', '0'), point('0', '0')))))) as result from dual +select st_astext(st_envelope(st_multipointfromwkb(st_asbinary(multipoint(point('0', '-0'), point('-0', '0'), point('0', '0')))))) as result from dual END INPUT select date_add("9999-12-31 23:59:59",INTERVAL 1 SECOND); END OUTPUT -select date_add('9999-12-31 23:59:59', interval 1 SECOND) from dual +select date_add('9999-12-31 23:59:59', interval 1 second) from dual END INPUT select distinct a1,a2,b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; @@ -20522,7 +20522,7 @@ INPUT select ST_astext(g) from t1 where ST_Within(g, ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))')); END OUTPUT -select ST_astext(g) from t1 where ST_Within(g, ST_GeomFromText('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))')) +select st_astext(g) from t1 where ST_Within(g, st_geometryfromtext('POLYGON((5 1, 7 1, 7 7, 5 7, 3 3, 5 3, 5 1))')) END INPUT select table_name, is_updatable from information_schema.views where table_schema != 'sys' order by table_name; @@ -20552,7 +20552,7 @@ INPUT select soundex(_utf8 0xD091D092D093); END OUTPUT -select soundex(_utf8 0xD091D092D093) from dual +select soundex(_utf8mb3 0xD091D092D093) from dual END INPUT select sum(a) from t1 where a > 10; @@ -20654,7 +20654,7 @@ INPUT select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_intersects(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select count(*), min(7), max(7) from t1m, t2i; @@ -20774,7 +20774,7 @@ INPUT select st_astext(st_makeenvelope(st_geomfromtext('point(0 0)'), st_geomfromtext('point(2 2)'))); END OUTPUT -select st_astext(st_makeenvelope(st_geomfromtext('point(0 0)'), st_geomfromtext('point(2 2)'))) from dual +select st_astext(st_makeenvelope(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(2 2)'))) from dual END INPUT select a1,a2,b,min(c),max(c) from t1 where (a2 >= 'b') and (b = 'a') and (c > 'b111') group by a1,a2,b; @@ -20840,7 +20840,7 @@ INPUT select repeat(_utf8'+',3) as h union select NULL; END OUTPUT -select repeat(_utf8 '+', 3) as h from dual union select null from dual +select repeat(_utf8mb3 '+', 3) as h from dual union select null from dual END INPUT select fld1,fld3 FROM t2 where fld1 like "25050%"; @@ -20870,7 +20870,7 @@ INPUT select date_sub("0200-01-01 00:00:01",INTERVAL 1 SECOND); END OUTPUT -select date_sub('0200-01-01 00:00:01', interval 1 SECOND) from dual +select date_sub('0200-01-01 00:00:01', interval 1 second) from dual END INPUT select repeat('monty',5),concat('*',space(5),'*'); @@ -20948,7 +20948,7 @@ INPUT select ST_Overlaps(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((1 1, 1 4, 4 4, 4 1, 1 1))')); END OUTPUT -select ST_Overlaps(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((1 1, 1 4, 4 4, 4 1, 1 1))')) from dual +select ST_Overlaps(st_geometryfromtext('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), st_geometryfromtext('POLYGON((1 1, 1 4, 4 4, 4 1, 1 1))')) from dual END INPUT select length(@test_compress_string); @@ -20960,13 +20960,13 @@ INPUT select st_astext(st_intersection(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_intersection(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual +select st_astext(st_intersection(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual END INPUT select st_equals(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_equals(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_equals(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select distinct *,if(1,'',f1) from t1; @@ -20978,7 +20978,7 @@ INPUT select hex(_utf8 0x616263FF); END OUTPUT -select hex(_utf8 0x616263FF) from dual +select hex(_utf8mb3 0x616263FF) from dual END INPUT select avg(a) as x from t1 having x=2; @@ -21032,7 +21032,7 @@ INPUT select date_add(date,INTERVAL 1 MONTH) from t1; END OUTPUT -select date_add(`date`, interval 1 MONTH) from t1 +select date_add(`date`, interval 1 month) from t1 END INPUT select substring('hello', -18446744073709551615, 1); @@ -21068,7 +21068,7 @@ INPUT select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_touches(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select i, count(*), variance(s1/s2) from bug22555 group by i order by i; @@ -21122,7 +21122,7 @@ INPUT select DATE_ADD('20071108', INTERVAL 1 DAY); END OUTPUT -select DATE_ADD('20071108', interval 1 DAY) from dual +select date_add('20071108', interval 1 day) from dual END INPUT select locate(_ujis 0xa1a3,_ujis 0xa1a2a1a3); @@ -21146,7 +21146,7 @@ INPUT select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom,2))) from t1; END OUTPUT -select ST_NUMPOINTS(ST_EXTERIORRING(ST_buffer(geom, 2))) from t1 +select st_numpoints(st_exteriorring(ST_buffer(geom, 2))) from t1 END INPUT select 0=0,1>0,1>=1,1<0,1<=0,1!=0,strcmp("abc","abcd"),strcmp("b","a"),strcmp("a","a"); @@ -21212,7 +21212,7 @@ INPUT select ST_astext(st_difference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result; END OUTPUT -select ST_astext(st_difference(ST_GeomFromText('polygon((0 0, 1 0, 0 1, 0 0))'), ST_GeomFromText('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result from dual +select st_astext(st_difference(st_geometryfromtext('polygon((0 0, 1 0, 0 1, 0 0))'), st_geometryfromtext('polygon((0 0, 0 1, 1 1, 1 0, 0 0))'))) as result from dual END INPUT select distinct a1,a2,b from t2 where (a2 >= 'b') and (b = 'a') group by a1,a2,b; @@ -21302,7 +21302,7 @@ INPUT select date_add("1997-12-31",INTERVAL "1 1" YEAR_MONTH); END OUTPUT -select date_add('1997-12-31', interval '1 1' YEAR_MONTH) from dual +select date_add('1997-12-31', interval '1 1' year_month) from dual END INPUT select timestampdiff(month,'1999-09-11','2001-10-10'); @@ -21428,7 +21428,7 @@ INPUT select ST_Disjoint(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')); END OUTPUT -select ST_Disjoint(ST_GeomFromText('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), ST_GeomFromText('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')) from dual +select ST_Disjoint(st_geometryfromtext('POLYGON((0 0, 0 5, 5 5, 5 0, 0 0))'), st_geometryfromtext('POLYGON((10 10, 10 4, 4 4, 4 10, 10 10))')) from dual END INPUT select insert(_ucs2 0x006100620063,10,2,_ucs2 0x006400650066); @@ -21560,7 +21560,7 @@ INPUT select st_within(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_within(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_within(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select collation(group_concat(a)) from t1; @@ -21632,7 +21632,7 @@ INPUT select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_equals(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select cast(_latin1'ab' AS char) as c1, cast(_latin1'a ' AS char) as c2, cast(_latin1'abc' AS char(2)) as c3, cast(_latin1'a ' AS char(2)) as c4, hex(cast(_latin1'a' AS char(2))) as c5; @@ -21650,7 +21650,7 @@ INPUT select (ST_asWKT(ST_geomfromwkb((0x010100000000000000000024400000000000002440)))); END OUTPUT -select ST_asWKT(ST_geomfromwkb(0x010100000000000000000024400000000000002440)) from dual +select st_astext(st_geometryfromwkb(0x010100000000000000000024400000000000002440)) from dual END INPUT select right('hello', 18446744073709551617); @@ -21842,7 +21842,7 @@ INPUT select mbrtouches(ST_GeomFromText("linestring(3 2, 4 2)"), ST_GeomFromText("polygon((0 0, 3 0, 3 3, 0 3, 0 0))")); END OUTPUT -select mbrtouches(ST_GeomFromText('linestring(3 2, 4 2)'), ST_GeomFromText('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual +select mbrtouches(st_geometryfromtext('linestring(3 2, 4 2)'), st_geometryfromtext('polygon((0 0, 3 0, 3 3, 0 3, 0 0))')) from dual END INPUT select round(1.1e1, 4294967295), truncate(1.1e1, 4294967295); @@ -21992,7 +21992,7 @@ INPUT select ST_astext(ST_UNION(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); END OUTPUT -select ST_astext(ST_UNION(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual +select st_astext(ST_UNION(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual END INPUT select substring_index('.tcx.se','.',-2),substring_index('.tcx.se','.tcx',-1); @@ -22172,7 +22172,7 @@ INPUT select ST_astext(ST_PolyFromWKB(ST_AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(1, 0), Point(0, 0)))))); END OUTPUT -select ST_astext(ST_PolyFromWKB(ST_AsWKB(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0)))))) from dual +select st_astext(st_polygonfromwkb(st_asbinary(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0)))))) from dual END INPUT select count(*) from `load`; @@ -22190,7 +22190,7 @@ INPUT select date_add(date,INTERVAL "1" QUARTER) from t1; END OUTPUT -select date_add(`date`, interval '1' QUARTER) from t1 +select date_add(`date`, interval '1' quarter) from t1 END INPUT select concat('<', user(), '>'), concat('<', current_user(), '>'), database(); @@ -22207,8 +22207,8 @@ END INPUT select @topic5_id:= 10105; END -ERROR -syntax error at position 19 near ':' +OUTPUT +select @topic5_id := 10105 from dual END INPUT select max(b) from t1 where a = 2; @@ -22310,7 +22310,7 @@ INPUT select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POINT(10 10)')); END OUTPUT -select 1, ST_Intersects(ST_GeomFromText('POLYGON((0 0,20 10,10 30, 0 0))'), ST_GeomFromText('POINT(10 10)')) from dual +select 1, ST_Intersects(st_geometryfromtext('POLYGON((0 0,20 10,10 30, 0 0))'), st_geometryfromtext('POINT(10 10)')) from dual END INPUT select week("0000-00-00"),week(d),week(dt),week(t),week(c) from t1; @@ -22322,7 +22322,7 @@ INPUT select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_difference(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_difference(st_geometryfromtext('multipoint(2 2, 3 3)'), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select * from v1b; @@ -22352,7 +22352,7 @@ INPUT select date_sub("1998-01-01 00:00:00",INTERVAL 1 MINUTE); END OUTPUT -select date_sub('1998-01-01 00:00:00', interval 1 MINUTE) from dual +select date_sub('1998-01-01 00:00:00', interval 1 minute) from dual END INPUT select length(repeat("a",65500)),length(concat(repeat("a",32000),repeat("a",32000))),length(replace("aaaaa","a",concat(repeat("a",10000)))),length(insert(repeat("a",40000),1,30000,repeat("b",50000))); @@ -22364,7 +22364,7 @@ INPUT select date_add(date,INTERVAL 1 HOUR) from t1; END OUTPUT -select date_add(`date`, interval 1 HOUR) from t1 +select date_add(`date`, interval 1 hour) from t1 END INPUT select hex(weight_string(ch)) w, name from t1 order by concat(ch); @@ -22436,7 +22436,7 @@ INPUT select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_within(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select distinct a,c from t1 group by b,c,a having a > 2 order by a desc; @@ -22484,13 +22484,13 @@ INPUT select ST_Intersects(ST_GeomFromText('LINESTRING(15 10,10 0)'),ST_GeomFromText('POINT(15 10)')) as result; END OUTPUT -select ST_Intersects(ST_GeomFromText('LINESTRING(15 10,10 0)'), ST_GeomFromText('POINT(15 10)')) as result from dual +select ST_Intersects(st_geometryfromtext('LINESTRING(15 10,10 0)'), st_geometryfromtext('POINT(15 10)')) as result from dual END INPUT select date_add(date,INTERVAL 1 SECOND) from t1; END OUTPUT -select date_add(`date`, interval 1 SECOND) from t1 +select date_add(`date`, interval 1 second) from t1 END INPUT select * from t1 order by i1; @@ -22646,7 +22646,7 @@ INPUT select ST_geometryfromtext(b) IS NULL, ST_geometryfromwkb(b) IS NULL, ST_astext(b) IS NULL, ST_aswkb(b) IS NULL, ST_geometrytype(b) IS NULL, ST_centroid(b) IS NULL, ST_envelope(b) IS NULL, ST_startpoint(b) IS NULL, ST_endpoint(b) IS NULL, ST_exteriorring(b) IS NULL, ST_pointn(b, 1) IS NULL, ST_geometryn(b, 1) IS NULL, ST_interiorringn(b, 1) IS NULL, multipoint(b) IS NULL, ST_isempty(b) IS NULL, ST_issimple(b) IS NULL, ST_isclosed(b) IS NULL, ST_dimension(b) IS NULL, ST_numgeometries(b) IS NULL, ST_numinteriorrings(b) IS NULL, ST_numpoints(b) IS NULL, ST_area(b) IS NULL, ST_length(b) IS NULL, ST_srid(b) IS NULL, ST_x(b) IS NULL, ST_y(b) IS NULL from t1; END OUTPUT -select ST_geometryfromtext(b) is null, ST_geometryfromwkb(b) is null, ST_astext(b) is null, ST_aswkb(b) is null, ST_geometrytype(b) is null, ST_centroid(b) is null, ST_envelope(b) is null, ST_startpoint(b) is null, ST_endpoint(b) is null, ST_exteriorring(b) is null, ST_pointn(b, 1) is null, ST_geometryn(b, 1) is null, ST_interiorringn(b, 1) is null, multipoint(b) is null, ST_isempty(b) is null, ST_issimple(b) is null, ST_isclosed(b) is null, ST_dimension(b) is null, ST_numgeometries(b) is null, ST_numinteriorrings(b) is null, ST_numpoints(b) is null, ST_area(b) is null, ST_length(b) is null, ST_srid(b) is null, ST_x(b) is null, ST_y(b) is null from t1 +select st_geometryfromtext(b) is null, st_geometryfromwkb(b) is null, st_astext(b) is null, st_asbinary(b) is null, st_geometrytype(b) is null, st_centroid(b) is null, st_envelope(b) is null, st_startpoint(b) is null, st_endpoint(b) is null, st_exteriorring(b) is null, st_pointn(b, 1) is null, st_geometryn(b, 1) is null, st_interiorringN(b, 1) is null, multipoint(b) is null, st_isempty(b) is null, st_issimple(b) is null, st_isclosed(b) is null, st_dimension(b) is null, st_numgeometries(b) is null, st_numinteriorrings(b) is null, st_numpoints(b) is null, st_area(b) is null, st_length(b) is null, ST_srid(b) is null, st_x(b) is null, st_y(b) is null from t1 END INPUT select inet6_ntoa(null),inet6_aton(null); @@ -22658,13 +22658,13 @@ INPUT select timestampdiff(SQL_TSI_MINUTE, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a; END OUTPUT -select timestampdiff(SQL_TSI_MINUTE, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a from dual +select timestampdiff(minute, '2001-02-01 12:59:59', '2001-05-01 12:58:59') as a from dual END INPUT select date_add("1997-12-31 23:59:59",INTERVAL 100000 SECOND); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 100000 SECOND) from dual +select date_add('1997-12-31 23:59:59', interval 100000 second) from dual END INPUT select n,d,unix_timestamp(t) from t2; @@ -22760,7 +22760,7 @@ INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result from dual END INPUT select group_concat(c1 order by binary c1 separator '') from t1 group by c1 collate utf32_esperanto_ci; @@ -22778,7 +22778,7 @@ INPUT select object_id, ST_geometrytype(geo), ST_ISSIMPLE(GEO), ST_ASTEXT(ST_centroid(geo)) from t1 where object_id=85998; END OUTPUT -select object_id, ST_geometrytype(geo), ST_ISSIMPLE(GEO), ST_ASTEXT(ST_centroid(geo)) from t1 where object_id = 85998 +select object_id, st_geometrytype(geo), st_issimple(GEO), st_astext(st_centroid(geo)) from t1 where object_id = 85998 END INPUT select hex(cast(9007199254740994 as decimal(30,0))); @@ -22879,8 +22879,8 @@ END INPUT select hex(@utf84:= CONVERT(@ujis4 USING utf8)); END -ERROR -syntax error at position 19 near ':' +OUTPUT +select hex(@utf84 := convert(@ujis4 using utf8)) from dual END INPUT select (select dt.a from (select 1 as a, t2.a as b from t2 having t1.a) dt where dt.b=t1.a) as subq from t1; @@ -22910,7 +22910,7 @@ INPUT select ST_AsText(a) from t1 where MBRContains(ST_GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) and MBRContains(ST_GeomFromText('Polygon((0 0, 0 7, 7 7, 7 0, 0 0))'), a); END OUTPUT -select ST_AsText(a) from t1 where MBRContains(ST_GeomFromText('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) and MBRContains(ST_GeomFromText('Polygon((0 0, 0 7, 7 7, 7 0, 0 0))'), a) +select st_astext(a) from t1 where MBRContains(st_geometryfromtext('Polygon((0 0, 0 2, 2 2, 2 0, 0 0))'), a) and MBRContains(st_geometryfromtext('Polygon((0 0, 0 7, 7 7, 7 0, 0 0))'), a) END INPUT select group_concat(c1 order by binary c1 separator '') from t1 group by c1 collate utf16_spanish_ci; @@ -22934,7 +22934,7 @@ INPUT select date_add("1997-12-31 23:59:59",INTERVAL 100000 MONTH); END OUTPUT -select date_add('1997-12-31 23:59:59', interval 100000 MONTH) from dual +select date_add('1997-12-31 23:59:59', interval 100000 month) from dual END INPUT select * from t2 order by name; @@ -23120,5 +23120,5 @@ INPUT select ST_astext(ST_envelope(ST_PolyFromWKB(ST_AsWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(1, 0), Point(0, 0))))))); END OUTPUT -select ST_astext(ST_envelope(ST_PolyFromWKB(ST_AsWKB(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual +select st_astext(st_envelope(st_polygonfromwkb(st_asbinary(polygon(linestring(point(0, 0), point(30, 0), point(30, 30), point(1, 0), point(0, 0))))))) from dual END diff --git a/go/vt/sqlparser/testdata/union_cases.txt b/go/vt/sqlparser/testdata/union_cases.txt index 0f74e8a3cda..8e2def0e04e 100644 --- a/go/vt/sqlparser/testdata/union_cases.txt +++ b/go/vt/sqlparser/testdata/union_cases.txt @@ -2,7 +2,7 @@ INPUT SELECT ST_ASTEXT(ST_UNION(ST_ENVELOPE(ST_GEOMFROMTEXT('LINESTRING(5 9,-1 10,-2 -6,2 9,2 0,3 6,-3 3,9 -2,-3 -10,-7 -4,1 4)')), ST_UNION(ST_GEOMFROMTEXT('MULTILINESTRING((6 -8,10 -8,3 0,-6 1,0 8,-1 8,-3 -3,6 -6,0 6,1 -6,-1 7,8 3),(-9 -10,-4 0,0 1,-9 1,6 9,-8 7,-2 -6,2 10,-1 -5,3 -5,-1 -10))'), ST_GEOMFROMTEXT('MULTILINESTRING((8 7,2 6,-6 -8,-2 10,4 1,9 7,5 9,4 1,8 2,-2 10,8 -5))')))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_ENVELOPE(ST_GEOMFROMTEXT('LINESTRING(5 9,-1 10,-2 -6,2 9,2 0,3 6,-3 3,9 -2,-3 -10,-7 -4,1 4)')), ST_UNION(ST_GEOMFROMTEXT('MULTILINESTRING((6 -8,10 -8,3 0,-6 1,0 8,-1 8,-3 -3,6 -6,0 6,1 -6,-1 7,8 3),(-9 -10,-4 0,0 1,-9 1,6 9,-8 7,-2 -6,2 10,-1 -5,3 -5,-1 -10))'), ST_GEOMFROMTEXT('MULTILINESTRING((8 7,2 6,-6 -8,-2 10,4 1,9 7,5 9,4 1,8 2,-2 10,8 -5))')))) as result from dual +select st_astext(ST_UNION(st_envelope(st_geometryfromtext('LINESTRING(5 9,-1 10,-2 -6,2 9,2 0,3 6,-3 3,9 -2,-3 -10,-7 -4,1 4)')), ST_UNION(st_geometryfromtext('MULTILINESTRING((6 -8,10 -8,3 0,-6 1,0 8,-1 8,-3 -3,6 -6,0 6,1 -6,-1 7,8 3),(-9 -10,-4 0,0 1,-9 1,6 9,-8 7,-2 -6,2 10,-1 -5,3 -5,-1 -10))'), st_geometryfromtext('MULTILINESTRING((8 7,2 6,-6 -8,-2 10,4 1,9 7,5 9,4 1,8 2,-2 10,8 -5))')))) as result from dual END INPUT SELECT i FROM t2 UNION SELECT c FROM t1; @@ -50,25 +50,25 @@ INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(0 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT select ST_astext(st_union( st_intersection( multipoint(point(-1,-1)), point(1,-1) ), st_difference( multipoint(point(-1,1)), point(-1,-1) ))); END OUTPUT -select ST_astext(st_union(st_intersection(multipoint(point(-1, -1)), point(1, -1)), st_difference(multipoint(point(-1, 1)), point(-1, -1)))) from dual +select st_astext(st_union(st_intersection(multipoint(point(-1, -1)), point(1, -1)), st_difference(multipoint(point(-1, 1)), point(-1, -1)))) from dual END INPUT (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 HOUR)),'%H') As H) union (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 HOUR)),'%H') As H); END OUTPUT -select time_format(timediff(now(), DATE_SUB(now(), interval 5 HOUR)), '%H') as H from dual union select time_format(timediff(now(), DATE_SUB(now(), interval 5 HOUR)), '%H') as H from dual +select time_format(timediff(now(), date_sub(now(), interval 5 hour)), '%H') as H from dual union select time_format(timediff(now(), date_sub(now(), interval 5 hour)), '%H') as H from dual END INPUT select ST_astext(st_union(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_union(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_union(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT SELECT sleep(0.5) FROM t17059925 UNION ALL SELECT * FROM t17059925 WHERE a= 10 AND a= 20 UNION ALL SELECT * FROM t2; @@ -98,7 +98,7 @@ INPUT (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%k') As H) union (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%k') As H); END OUTPUT -select time_format(timediff(now(), DATE_SUB(now(), interval 5 DAY)), '%k') as H from dual union select time_format(timediff(now(), DATE_SUB(now(), interval 5 DAY)), '%k') as H from dual +select time_format(timediff(now(), date_sub(now(), interval 5 day)), '%k') as H from dual union select time_format(timediff(now(), date_sub(now(), interval 5 day)), '%k') as H from dual END INPUT SELECT * FROM (SELECT t17059925_func1(1)) t WHERE 1= 0 UNION SELECT sleep(0.5); @@ -134,13 +134,13 @@ INPUT select ST_AsText(a) from (select f2 as a from t1 union select f3 from t1) t; END OUTPUT -select ST_AsText(a) from (select f2 as a from t1 union select f3 from t1) as t +select st_astext(a) from (select f2 as a from t1 union select f3 from t1) as t END INPUT select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_overlaps(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_overlaps(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT * FROM t1 LIMIT 5) UNION ALL (SELECT * FROM t2 LIMIT 4) LIMIT 7; @@ -206,7 +206,7 @@ INPUT (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 HOUR)),'%k') As H) union (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 HOUR)),'%k') As H); END OUTPUT -select time_format(timediff(now(), DATE_SUB(now(), interval 5 HOUR)), '%k') as H from dual union select time_format(timediff(now(), DATE_SUB(now(), interval 5 HOUR)), '%k') as H from dual +select time_format(timediff(now(), date_sub(now(), interval 5 hour)), '%k') as H from dual union select time_format(timediff(now(), date_sub(now(), interval 5 hour)), '%k') as H from dual END INPUT select concat((select x from (select 'a' as x) as t1 ), (select y from (select 'b' as y) as t2 )) from (select 1 union select 2 ) as t3; @@ -224,7 +224,7 @@ INPUT select st_touches(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_touches(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_touches(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT 1 UNION SELECT 1 INTO @var FOR UPDATE; @@ -236,13 +236,13 @@ INPUT select st_intersects(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_intersects(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_intersects(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select ST_astext(ST_UNION(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))); END OUTPUT -select ST_astext(ST_UNION(ST_GeomFromText('POLYGON((0 0, 50 45, 40 50, 0 0))'), ST_GeomFromText('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual +select st_astext(ST_UNION(st_geometryfromtext('POLYGON((0 0, 50 45, 40 50, 0 0))'), st_geometryfromtext('LINESTRING(-10 -10, 200 200, 199 201, -11 -9)'))) from dual END INPUT SELECT NULL as "my_col1",2 AS "my_col2" UNION SELECT NULL,1; @@ -254,19 +254,19 @@ INPUT select st_contains(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_contains(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_contains(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(LINESTRING(4 1, 6 1), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(LINESTRING(4 1, 6 1), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(LINESTRING(4 1, 6 1), LINESTRING(0 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((10 10,10 20,20 10,10 10)),((20 10,30 20,30 10,20 10)),((10 20,10 30,20 20,10 20)),((20 20,30 30,30 20,20 20)))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((10 10,10 20,20 10,10 10)),((20 10,30 20,30 10,20 10)),((10 20,10 30,20 20,10 20)),((20 20,30 30,30 20,20 20)))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), st_geometryfromtext('MULTIPOLYGON(((10 10,10 20,20 10,10 10)),((20 10,30 20,30 10,20 10)),((10 20,10 30,20 20,10 20)),((20 20,30 30,30 20,20 20)))'))) from dual END INPUT (SELECT * FROM t1 LIMIT 5 OFFSET 4) UNION (SELECT * FROM t2 LIMIT 4 OFFSET 2) ORDER BY a LIMIT 7 OFFSET 1; @@ -302,7 +302,7 @@ INPUT SELECT ST_CONTAINS(ST_UNION(ST_INTERSECTION(ST_GEOMFROMTEXT('POINT(-3 3)'), ST_GEOMFROMTEXT('POLYGON((8 3,-2 9,-10 2,-10 -9,7 -1,4 1,7 6,5 -10,5 3,2 1,-10 0, 8 3))')), ST_CONVEXHULL(ST_GEOMFROMTEXT('MULTIPOINT(8 -8,-7 5)'))), ST_UNION(ST_GEOMFROMTEXT('POINT(4 1)'), ST_GEOMFROMTEXT('MULTIPOINT(-10 -10,5 -2,-6 -7,1 5,-3 0)'))) as result; END OUTPUT -select ST_CONTAINS(ST_UNION(ST_INTERSECTION(ST_GEOMFROMTEXT('POINT(-3 3)'), ST_GEOMFROMTEXT('POLYGON((8 3,-2 9,-10 2,-10 -9,7 -1,4 1,7 6,5 -10,5 3,2 1,-10 0, 8 3))')), ST_CONVEXHULL(ST_GEOMFROMTEXT('MULTIPOINT(8 -8,-7 5)'))), ST_UNION(ST_GEOMFROMTEXT('POINT(4 1)'), ST_GEOMFROMTEXT('MULTIPOINT(-10 -10,5 -2,-6 -7,1 5,-3 0)'))) as result from dual +select ST_CONTAINS(ST_UNION(ST_INTERSECTION(st_geometryfromtext('POINT(-3 3)'), st_geometryfromtext('POLYGON((8 3,-2 9,-10 2,-10 -9,7 -1,4 1,7 6,5 -10,5 3,2 1,-10 0, 8 3))')), ST_CONVEXHULL(st_geometryfromtext('MULTIPOINT(8 -8,-7 5)'))), ST_UNION(st_geometryfromtext('POINT(4 1)'), st_geometryfromtext('MULTIPOINT(-10 -10,5 -2,-6 -7,1 5,-3 0)'))) as result from dual END INPUT select * from (select * from t1 union all select * from t1 limit 2) a; @@ -344,7 +344,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT select 'a' union select concat('a', -concat('3',4)); @@ -392,13 +392,13 @@ INPUT select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual +select st_astext(st_union(st_geometryfromtext('geometrycollection(polygon((0 0, 2 0, 2 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 3 0, 3 1, 1 1, 1 0)))'))) from dual END INPUT select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_equals(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_equals(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT * FROM t1 LIMIT 5) UNION ALL SELECT * FROM t2 ORDER BY a LIMIT 8; @@ -410,7 +410,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 6,-11 -6,6 0,0 6),(3 1,5 0,-2 0,3 1))'), ST_GEOMFROMTEXT('POLYGON((5 4,6 0,9 12,-7 -12,5 -19,5 4))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 6,-11 -6,6 0,0 6),(3 1,5 0,-2 0,3 1))'), ST_GEOMFROMTEXT('POLYGON((5 4,6 0,9 12,-7 -12,5 -19,5 4))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('POLYGON((0 6,-11 -6,6 0,0 6),(3 1,5 0,-2 0,3 1))'), st_geometryfromtext('POLYGON((5 4,6 0,9 12,-7 -12,5 -19,5 4))'))) from dual END INPUT (SELECT * FROM t1 ORDER BY a DESC LIMIT 5 OFFSET 4) UNION ALL (SELECT * FROM t2 ORDER BY a DESC LIMIT 4 OFFSET 2) LIMIT 7 OFFSET 1; @@ -428,7 +428,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(POLYGON((0 0, 5 0, 5 5, 0 5,0 0)), POLYGON((5 0,10 0, 10 3,5 3,5 0)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POLYGON((0 0, 5 0, 5 5, 0 5,0 0)), POLYGON((5 0,10 0, 10 3,5 3,5 0)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(POLYGON((0 0, 5 0, 5 5, 0 5,0 0)), POLYGON((5 0,10 0, 10 3,5 3,5 0)))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT select f1 from t1 union select f1 from t1; @@ -446,7 +446,7 @@ INPUT select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_intersects(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_intersects(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT a FROM t1 ORDER BY COUNT(*)) UNION ALL SELECT a FROM t1; @@ -554,7 +554,7 @@ INPUT select st_disjoint(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_disjoint(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_disjoint(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT alias2 . `col_int_nokey` AS field1 FROM ( CC AS alias1 INNER JOIN ( ( BB AS alias2 INNER JOIN ( SELECT SQ1_alias1 . * FROM C AS SQ1_alias1 ) AS alias3 ON (alias3 . `col_int_key` = alias2 . `col_int_nokey` ) ) ) ON (alias3 . `col_varchar_nokey` = alias2 . `col_varchar_key` ) ) WHERE ( ( alias2 . `pk` , alias3 . `col_int_nokey` ) IN ( SELECT 4 , 7 UNION SELECT 137, 6 ) ) AND alias1 . `pk` > 149 AND alias1 . `pk` < ( 149 + 7 ) OR alias3 . `col_varchar_key` < 'o'; @@ -572,7 +572,7 @@ INPUT select repeat(_utf8'+',3) as h union select NULL; END OUTPUT -select repeat(_utf8 '+', 3) as h from dual union select null from dual +select repeat(_utf8mb3 '+', 3) as h from dual union select null from dual END INPUT SELECT * FROM t1 UNION SELECT /*+ MAX_EXECUTION_TIME(0) */ * FROM t1; @@ -584,13 +584,13 @@ INPUT SELECT ST_AsText(st_union(ST_GeomFromText('GeometryCollection(GeometryCollection(Point(1 1)), GeometryCollection(linestring(1 1, 2 2)))'), ST_GeomFromText('GeometryCollection(GeometryCollection(Point(1 1)))'))); END OUTPUT -select ST_AsText(st_union(ST_GeomFromText('GeometryCollection(GeometryCollection(Point(1 1)), GeometryCollection(linestring(1 1, 2 2)))'), ST_GeomFromText('GeometryCollection(GeometryCollection(Point(1 1)))'))) from dual +select st_astext(st_union(st_geometryfromtext('GeometryCollection(GeometryCollection(Point(1 1)), GeometryCollection(linestring(1 1, 2 2)))'), st_geometryfromtext('GeometryCollection(GeometryCollection(Point(1 1)))'))) from dual END INPUT select ST_astext(st_union(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))); END OUTPUT -select ST_astext(st_union(ST_GeomFromText('multipoint(2 2, 3 3)'), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)')))) from dual +select st_astext(st_union(st_geometryfromtext('multipoint(2 2, 3 3)'), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)')))) from dual END INPUT select group_concat('x') UNION ALL select 1; @@ -626,7 +626,7 @@ INPUT select st_equals(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_equals(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_equals(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT * FROM t1 LIMIT 5 OFFSET 4) UNION ALL (SELECT * FROM t2 LIMIT 4 OFFSET 2) ORDER BY a LIMIT 7 OFFSET 1; @@ -644,7 +644,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10))'), ST_GEOMFROMTEXT('POLYGON((5 15,5 30,30 15,5 15))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10))'), ST_GEOMFROMTEXT('POLYGON((5 15,5 30,30 15,5 15))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('POLYGON((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10))'), st_geometryfromtext('POLYGON((5 15,5 30,30 15,5 15))'))) from dual END INPUT (SELECT * FROM t1 ORDER BY a DESC LIMIT 5) UNION ALL (SELECT * FROM t2 ORDER BY a DESC LIMIT 4) LIMIT 7; @@ -674,19 +674,19 @@ INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),GEOMETRYCOLLECTION(MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20))),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), st_geometryfromtext('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual END INPUT SELECT ST_ISVALID( ST_UNION( ST_GEOMFROMTEXT(' LINESTRING(-9 -17,17 -11) '), ST_GEOMFROMTEXT(' GEOMETRYCOLLECTION( LINESTRING(8 16,-8 -3), POLYGON((2 3,-9 -7,12 -13,2 3)), MULTILINESTRING((-2 2,11 -10),(6 0,-15 0,16 0)) ) ') ) ) AS valid; END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT(' LINESTRING(-9 -17,17 -11) '), ST_GEOMFROMTEXT(' GEOMETRYCOLLECTION( LINESTRING(8 16,-8 -3), POLYGON((2 3,-9 -7,12 -13,2 3)), MULTILINESTRING((-2 2,11 -10),(6 0,-15 0,16 0)) ) '))) as valid from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext(' LINESTRING(-9 -17,17 -11) '), st_geometryfromtext(' GEOMETRYCOLLECTION( LINESTRING(8 16,-8 -3), POLYGON((2 3,-9 -7,12 -13,2 3)), MULTILINESTRING((-2 2,11 -10),(6 0,-15 0,16 0)) ) '))) as valid from dual END INPUT select ST_astext(ST_Union(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))); END OUTPUT -select ST_astext(ST_Union(ST_geometryfromtext('point(1 1)'), ST_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual +select st_astext(ST_Union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('polygon((0 0, 2 0, 1 2, 0 0))'))) from dual END INPUT SELECT 1 as a FROM (SELECT 1 UNION SELECT a) b; @@ -722,7 +722,7 @@ INPUT select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_touches(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_touches(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT * FROM t1 ORDER BY a DESC LIMIT 5 OFFSET 4) UNION ALL (SELECT * FROM t2 ORDER BY a DESC LIMIT 4 OFFSET 2) ORDER BY a LIMIT 7 OFFSET 1; @@ -764,7 +764,7 @@ INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)))'), st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17))'))) as result from dual END INPUT SELECT 1 LOCK IN SHARE MODE UNION SELECT 2; @@ -824,7 +824,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT SELECT (a DIV 254576881) FROM t1 UNION ALL SELECT (a DIV 254576881) FROM t1; @@ -848,7 +848,7 @@ INPUT select st_crosses(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_crosses(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_crosses(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT 1 UNION SELECT 2 FOR UPDATE; @@ -860,7 +860,7 @@ INPUT select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_disjoint(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_disjoint(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT MAX(f1) FROM t1) UNION (SELECT MAX(f1) FROM t1); @@ -884,7 +884,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((15 10,10 15,10 17,15 10)),((15 10,10 20,10 22,15 10)),((15 10,10 25,10 27,15 10)),((25 10,30 17,30 15,25 10)),((25 10,30 22,30 20,25 10)),((25 10,30 27,30 25,25 10)),((18 10,20 30,19 10,18 10)),((21 10,20 30,22 10,21 10)))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((15 10,10 15,10 17,15 10)),((15 10,10 20,10 22,15 10)),((15 10,10 25,10 27,15 10)),((25 10,30 17,30 15,25 10)),((25 10,30 22,30 20,25 10)),((25 10,30 27,30 25,25 10)),((18 10,20 30,19 10,18 10)),((21 10,20 30,22 10,21 10)))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('MULTIPOLYGON(((0 0,0 40,40 40,40 0,0 0),(10 10,30 10,30 30,10 30,10 10)))'), st_geometryfromtext('MULTIPOLYGON(((15 10,10 15,10 17,15 10)),((15 10,10 20,10 22,15 10)),((15 10,10 25,10 27,15 10)),((25 10,30 17,30 15,25 10)),((25 10,30 22,30 20,25 10)),((25 10,30 27,30 25,25 10)),((18 10,20 30,19 10,18 10)),((21 10,20 30,22 10,21 10)))'))) from dual END INPUT SELECT a, SUM(a), SUM(a)+1, CONCAT(SUM(a),'x'), SUM(a)+SUM(a), SUM(a) FROM (SELECT 1 a, 2 b UNION SELECT 2,3 UNION SELECT 5,6 ) d GROUP BY a WITH ROLLUP ORDER BY GROUPING(a),a; @@ -896,13 +896,13 @@ INPUT SELECT ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom from dual END INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(linestring(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(linestring(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(linestring(0 0,100 100), MULTIPOINT(1 1, 2 2)))'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT (SELECT a FROM t1 ORDER BY COUNT(*) LIMIT 1) UNION (SELECT a FROM t1 ORDER BY COUNT(*) LIMIT 1 OFFSET 1); @@ -944,13 +944,13 @@ INPUT select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_contains(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_contains(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(LINESTRING(3 1, 6 1), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(LINESTRING(3 1, 6 1), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(LINESTRING(3 1, 6 1), LINESTRING(0 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT select * from (select * from t1 union all select * from t1) a; @@ -1010,7 +1010,7 @@ INPUT SELECT ST_AsText(ST_Union(shore, boundary)) FROM lakes, named_places WHERE lakes.name = 'Blue Lake' AND named_places.name = 'Goose Island'; END OUTPUT -select ST_AsText(ST_Union(shore, boundary)) from lakes, named_places where lakes.`name` = 'Blue Lake' and named_places.`name` = 'Goose Island' +select st_astext(ST_Union(shore, boundary)) from lakes, named_places where lakes.`name` = 'Blue Lake' and named_places.`name` = 'Goose Island' END INPUT (SELECT max(b), a FROM t1 GROUP BY a) UNION (SELECT max(b), a FROM t1 GROUP BY a); @@ -1022,7 +1022,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), MULTIPOINT(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), MULTIPOINT(1 1, 2 2)))'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT (SELECT * FROM t1 ORDER BY a DESC LIMIT 5 OFFSET 4) UNION ALL SELECT * FROM t2 LIMIT 8 OFFSET 1; @@ -1034,7 +1034,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('LINESTRING(12 6,9 4,-9 1,-4 -6,12 -9,-9 -17,17 -11,-16 17,19 -19,0 -16,6 -5,15 3,14 -5,18 13,-9 10,-11 8)'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-18 2,1 7),(-19 -3,-16 -12),(10 0,3 8,12 19,8 -15)),MULTILINESTRING((8 16,-8 -3),(18 3,8 12),(-19 4,20 14)),POLYGON((2 3,-9 -7,12 -13,2 3)),MULTILINESTRING((16 -7,-2 2,11 -10,-1 8),(6 0,-15 0,16 0,-6 -14)))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('LINESTRING(12 6,9 4,-9 1,-4 -6,12 -9,-9 -17,17 -11,-16 17,19 -19,0 -16,6 -5,15 3,14 -5,18 13,-9 10,-11 8)'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((-18 2,1 7),(-19 -3,-16 -12),(10 0,3 8,12 19,8 -15)),MULTILINESTRING((8 16,-8 -3),(18 3,8 12),(-19 4,20 14)),POLYGON((2 3,-9 -7,12 -13,2 3)),MULTILINESTRING((16 -7,-2 2,11 -10,-1 8),(6 0,-15 0,16 0,-6 -14)))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('LINESTRING(12 6,9 4,-9 1,-4 -6,12 -9,-9 -17,17 -11,-16 17,19 -19,0 -16,6 -5,15 3,14 -5,18 13,-9 10,-11 8)'), st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((-18 2,1 7),(-19 -3,-16 -12),(10 0,3 8,12 19,8 -15)),MULTILINESTRING((8 16,-8 -3),(18 3,8 12),(-19 4,20 14)),POLYGON((2 3,-9 -7,12 -13,2 3)),MULTILINESTRING((16 -7,-2 2,11 -10,-1 8),(6 0,-15 0,16 0,-6 -14)))'))) from dual END INPUT select s1 from t1 where s1 in (select version from information_schema.tables) union select version from information_schema.tables; @@ -1058,7 +1058,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), linestring(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), linestring(1 1, 2 2)))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(MULTIPOINT(0 0,100 100), linestring(1 1, 2 2)))'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT SELECT 'case+union+test' UNION SELECT CASE '1' WHEN '2' THEN 'BUG' ELSE 'nobug' END; @@ -1070,7 +1070,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 0, 3 1, 4 2))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 0, 3 1, 4 2))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(POLYGON((0 0, 3 0, 3 3, 0 3, 0 0)), LINESTRING(3 0, 3 1, 4 2))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT SELECT 1 UNION SELECT 1 FOR UPDATE INTO @var; @@ -1082,7 +1082,7 @@ INPUT SELECT ST_ASTEXT(ST_VALIDATE(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), ST_GEOMFROMTEXT('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))')))) as result; END OUTPUT -select ST_ASTEXT(ST_VALIDATE(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), ST_GEOMFROMTEXT('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))')))) as result from dual +select st_astext(ST_VALIDATE(ST_UNION(st_geometryfromtext('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), st_geometryfromtext('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))')))) as result from dual END INPUT SELECT a, SUM(a), SUM(a)+1, CONCAT(SUM(a),'x'), SUM(a)+SUM(a), SUM(a) FROM (SELECT 1 a, 2 b UNION SELECT 2,3 UNION SELECT 5,6 ) d GROUP BY a WITH ROLLUP ORDER BY SUM(a); @@ -1100,7 +1100,7 @@ INPUT select st_within(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_within(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_within(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT (SELECT a FROM t1 ORDER BY COUNT(*) LIMIT 1) UNION ALL (SELECT a FROM t1 ORDER BY COUNT(*) LIMIT 1 OFFSET 1); @@ -1118,7 +1118,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('MULTIPOINT(0 0,100 100)'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('MULTIPOINT(0 0,100 100)'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('MULTIPOINT(0 0,100 100)'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT SELECT * FROM t1 UNION ALL SELECT * FROM t2 ORDER BY a LIMIT 5; @@ -1136,7 +1136,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((4 5,12 11,-12 -3,4 5))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((5 4,-14 0,1 0,5 4)),((1 6,13 0,10 12,1 6)))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((4 5,12 11,-12 -3,4 5))'), ST_GEOMFROMTEXT('MULTIPOLYGON(((5 4,-14 0,1 0,5 4)),((1 6,13 0,10 12,1 6)))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('POLYGON((4 5,12 11,-12 -3,4 5))'), st_geometryfromtext('MULTIPOLYGON(((5 4,-14 0,1 0,5 4)),((1 6,13 0,10 12,1 6)))'))) from dual END INPUT select 1 as a from t1 union all select 1 from dual limit 1; @@ -1154,19 +1154,19 @@ INPUT (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H) union (select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H); END OUTPUT -select time_format(timediff(now(), DATE_SUB(now(), interval 5 DAY)), '%H') as H from dual union select time_format(timediff(now(), DATE_SUB(now(), interval 5 DAY)), '%H') as H from dual +select time_format(timediff(now(), date_sub(now(), interval 5 day)), '%H') as H from dual union select time_format(timediff(now(), date_sub(now(), interval 5 day)), '%H') as H from dual END INPUT select st_overlaps(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_overlaps(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_overlaps(st_union(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_intersection(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(LINESTRING(3 1, 3 3), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(LINESTRING(3 1, 3 3), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(LINESTRING(3 1, 3 3), LINESTRING(0 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT SELECT * FROM t1 UNION ALL SELECT * FROM t2 LIMIT 5; @@ -1196,7 +1196,7 @@ INPUT SELECT ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(POINT(1 1), GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(POINT(1 1), GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(POINT(1 1), GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom from dual END INPUT select 'a' union select concat('a', -4.5); @@ -1220,7 +1220,7 @@ INPUT select (ST_aswkb(cast(st_union(multipoint( point(8,6), point(1,-17679), point(-9,-9)), linestring(point(91,12), point(-77,49), point(53,-81)))as char(18)))) in ('1','2'); END OUTPUT -select ST_aswkb(cast(st_union(multipoint(point(8, 6), point(1, -17679), point(-9, -9)), linestring(point(91, 12), point(-77, 49), point(53, -81))) as char(18))) in ('1', '2') from dual +select st_asbinary(cast(st_union(multipoint(point(8, 6), point(1, -17679), point(-9, -9)), linestring(point(91, 12), point(-77, 49), point(53, -81))) as char(18))) in ('1', '2') from dual END INPUT select st_astext(st_union(cast(point(1,1)as char(15)),point(1,1))) as res; @@ -1232,13 +1232,13 @@ INPUT select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_crosses(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_crosses(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('GEOMETRYCOLLECTION(MULTILINESTRING((0 -14,13 -8),(-5 -3,8 7),(-6 18,17 -11,-12 19,19 5),(16 11,9 -5),(17 -5,5 10),(-4 17,6 4),(-12 15,17 13,-18 11,15 10),(7 0,2 -16,-18 13,-6 4),(-17 -6,-6 -7,1 4,-18 0)),MULTIPOINT(0 14,-9 -11),MULTILINESTRING((-11 -2,17 -14),(18 -12,18 -8),(-13 -16,9 16,9 -10,-7 20),(-14 -5,10 -9,4 1,17 -8),(-9 -4,-2 -12,9 -13,-5 4),(15 17,13 20)),MULTIPOINT(16 1,-9 -17,-16 6,-17 3),POINT(-18 13))'), st_geometryfromtext('GEOMETRYCOLLECTION(POINT(7 0),MULTILINESTRING((-13 -18,-16 0),(17 11,-1 11,-18 -19,-4 -18),(-8 -8,-15 -13,3 -18,6 8)),LINESTRING(5 16,0 -9,-6 4,-15 17),MULTIPOINT(-9 -5,5 15,12 -11,12 11))'))) as result from dual END INPUT SELECT * FROM t1 UNION SELECT * FROM t2 LIMIT 5 OFFSET 6; @@ -1268,7 +1268,7 @@ INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(POLYGON((5 0,0 10,10 10,5 0)), POLYGON((5 0,0 -10,10 -10,5 0)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(POLYGON((5 0,0 10,10 10,5 0)), POLYGON((5 0,0 -10,10 -10,5 0)))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(POLYGON((5 0,0 10,10 10,5 0)), POLYGON((5 0,0 -10,10 -10,5 0)))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT SELECT a DIV 2 FROM t1 UNION SELECT a DIV 2 FROM t1; @@ -1286,7 +1286,7 @@ INPUT SELECT ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), ST_GEOMFROMTEXT('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))'))) as result; END OUTPUT -select ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), ST_GEOMFROMTEXT('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))'))) as result from dual +select st_astext(ST_UNION(st_geometryfromtext('MULTIPOLYGON(((-7 -9,-3 7,0 -10,-6 5,10 10,-3 -4,7 9,2 -9)),((1 -10,-3 10,-2 5)))'), st_geometryfromtext('POLYGON((6 10,-7 10,-1 -6,0 5,5 4,1 -9,1 3,-10 -7,-10 8))'))) as result from dual END INPUT (SELECT * FROM t1 LIMIT 5) UNION ALL (SELECT * FROM t2 LIMIT 4) ORDER BY a LIMIT 7; @@ -1340,25 +1340,25 @@ INPUT SELECT ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), polygon((0 0, 1 0, 1 1, 0 1, 0 0))))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), polygon((0 0, 1 0, 1 1, 0 1, 0 0))))'), ST_GeomFromText('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(polygon((0 0,10 0, 10 10, 0 10, 0 0)), polygon((0 0, 1 0, 1 1, 0 1, 0 0))))'), st_geometryfromtext('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(),GEOMETRYCOLLECTION())'))) as result from dual END INPUT select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))); END OUTPUT -select st_astext(st_union(ST_GeometryFromText('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), ST_GeometryFromText('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual +select st_astext(st_union(st_geometryfromtext('geometrycollection(polygon((0 0, 1 0, 1 1, 0 1, 0 0)))'), st_geometryfromtext('geometrycollection(polygon((1 0, 2 0, 2 1, 1 1, 1 0)))'))) from dual END INPUT select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); END OUTPUT -select st_within(st_intersection(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_union(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))) from dual +select st_within(st_intersection(st_geometryfromtext('point(1 1)'), st_geometryfromtext('multipoint(2 2, 3 3)')), st_union(st_geometryfromtext('point(0 0)'), st_geometryfromtext('point(1 1)'))) from dual END INPUT SELECT ST_AsText(ST_Union(ST_GEOMFROMTEXT( 'GEOMETRYCOLLECTION(LINESTRING(3 0, 3 3), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result; END OUTPUT -select ST_AsText(ST_Union(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(LINESTRING(3 0, 3 3), LINESTRING(0 1, 4 1))'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION()'))) as result from dual +select st_astext(ST_Union(st_geometryfromtext('GEOMETRYCOLLECTION(LINESTRING(3 0, 3 3), LINESTRING(0 1, 4 1))'), st_geometryfromtext('GEOMETRYCOLLECTION()'))) as result from dual END INPUT (SELECT * FROM t1 ORDER BY a DESC LIMIT 5 OFFSET 4) UNION SELECT * FROM t2 ORDER BY a LIMIT 8 OFFSET 1; @@ -1388,7 +1388,7 @@ INPUT SELECT ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 0,10 10,20 0,0 0))'), ST_GEOMFROMTEXT('POLYGON((10 5,20 7,10 10,30 10,20 0,20 5,10 5))'))); END OUTPUT -select ST_ISVALID(ST_UNION(ST_GEOMFROMTEXT('POLYGON((0 0,10 10,20 0,0 0))'), ST_GEOMFROMTEXT('POLYGON((10 5,20 7,10 10,30 10,20 0,20 5,10 5))'))) from dual +select ST_ISVALID(ST_UNION(st_geometryfromtext('POLYGON((0 0,10 10,20 0,0 0))'), st_geometryfromtext('POLYGON((10 5,20 7,10 10,30 10,20 0,20 5,10 5))'))) from dual END INPUT select * from t1 union select * from t2 order by 1, 2; @@ -1411,8 +1411,8 @@ END INPUT SELECT @a:= CAST(f1 AS SIGNED) FROM t1 UNION ALL SELECT CAST(f1 AS SIGNED) FROM t1; END -ERROR -syntax error at position 11 near ':' +OUTPUT +select @a := cast(f1 as SIGNED) from t1 union all select cast(f1 as SIGNED) from t1 END INPUT (SELECT a FROM t1 ORDER BY COUNT(*)) UNION (SELECT a FROM t1 ORDER BY COUNT(*)); diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 16cda39dd46..2b82e619445 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -41,7 +41,6 @@ type Tokenizer struct { lastToken string posVarIndex int partialDDL Statement - nesting int multi bool specialComment *Tokenizer @@ -172,7 +171,7 @@ func (tkn *Tokenizer) Scan() (int, string) { case isDigit(ch): return tkn.scanNumber() case ch == ':': - return tkn.scanBindVar() + return tkn.scanBindVarOrAssignmentExpression() case ch == ';': if tkn.multi { // In multi mode, ';' is treated as EOF. So, we don't advance. @@ -426,8 +425,8 @@ func (tkn *Tokenizer) scanLiteralIdentifier() (int, string) { } } -// scanBindVar scans a bind variable; assumes a ':' has been scanned right before -func (tkn *Tokenizer) scanBindVar() (int, string) { +// scanBindVarOrAssignmentExpression scans a bind variable or an assignment expression; assumes a ':' has been scanned right before +func (tkn *Tokenizer) scanBindVarOrAssignmentExpression() (int, string) { start := tkn.Pos token := VALUE_ARG @@ -437,6 +436,13 @@ func (tkn *Tokenizer) scanBindVar() (int, string) { tkn.scanMantissa(10) return OFFSET_ARG, tkn.buf[start+1 : tkn.Pos] } + + // If : is followed by a =, then it is an assignment operator + if tkn.cur() == '=' { + tkn.skip(1) + return ASSIGNMENT_OPT, "" + } + // If : is followed by another : it is a list arg. Example ::v1, ::list if tkn.cur() == ':' { token = LIST_ARG @@ -703,7 +709,6 @@ func (tkn *Tokenizer) reset() { tkn.partialDDL = nil tkn.specialComment = nil tkn.posVarIndex = 0 - tkn.nesting = 0 tkn.SkipToEnd = false } diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index 6d332b870e4..59aa9d8b13f 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -37,10 +37,19 @@ type TrackedBuffer struct { bindLocations []bindLocation nodeFormatter NodeFormatter literal func(string) (int, error) - escape bool fast bool + + escape escapeType } +type escapeType int + +const ( + escapeKeywords escapeType = iota + escapeAllIdentifiers + escapeNoIdentifiers +) + // NewTrackedBuffer creates a new TrackedBuffer. func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer { buf := &TrackedBuffer{ @@ -81,9 +90,17 @@ func (buf *TrackedBuffer) SetUpperCase(enable bool) { // and escaped. By default, identifiers are only escaped if they match the name of a SQL keyword or they // contain characters that must be escaped. // Enabling this option will prevent the optimized fastFormat routines from running. -func (buf *TrackedBuffer) SetEscapeAllIdentifiers(enable bool) { +func (buf *TrackedBuffer) SetEscapeAllIdentifiers() { buf.fast = false - buf.escape = enable + buf.escape = escapeAllIdentifiers +} + +// SetEscapeNoIdentifier sets whether NO identifiers in the serialized SQL query should be quoted and escaped. +// Warning: this can lead to query output that is not valid SQL +// Enabling this option will prevent the optimized fastFormat routines from running. +func (buf *TrackedBuffer) SetEscapeNoIdentifier() { + buf.fast = false + buf.escape = escapeNoIdentifiers } // WriteNode function, initiates the writing of a single SQLNode tree by passing @@ -304,6 +321,19 @@ func String(node SQLNode) string { return buf.String() } +// UnescapedString will return a string where no identifiers have been escaped. +func UnescapedString(node SQLNode) string { + if node == nil { + return "" // do not return '', which is Go syntax. + } + + buf := NewTrackedBuffer(nil) + buf.SetEscapeNoIdentifier() + node.Format(buf) + return buf.String() + +} + // CanonicalString returns a canonical string representation of an SQLNode where all identifiers // are always escaped and all SQL syntax is in uppercase. This matches the canonical output from MySQL. func CanonicalString(node SQLNode) string { @@ -313,7 +343,7 @@ func CanonicalString(node SQLNode) string { buf := NewTrackedBuffer(nil) buf.SetUpperCase(true) - buf.SetEscapeAllIdentifiers(true) + buf.SetEscapeAllIdentifiers() node.Format(buf) return buf.String() } diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go index af1de7c843e..6924bf11911 100644 --- a/go/vt/sqlparser/tracked_buffer_test.go +++ b/go/vt/sqlparser/tracked_buffer_test.go @@ -104,6 +104,10 @@ func TestCanonicalOutput(t *testing.T) { "create table a (v varchar(32)) engine=InnoDB", "CREATE TABLE `a` (\n\t`v` varchar(32)\n) ENGINE InnoDB", }, + { // tablespace names are case-sensitive: https://dev.mysql.com/doc/refman/en/general-tablespaces.html + "create table a (v varchar(32)) engine=InnoDB tablespace innodb_system", + "CREATE TABLE `a` (\n\t`v` varchar(32)\n) ENGINE InnoDB,\n TABLESPACE innodb_system", + }, { "create table a (id int not null primary key) engine InnoDB, charset utf8mb4, collate utf8mb4_0900_ai_ci partition by range (`id`) (partition `p10` values less than(10) engine InnoDB tablespace foo)", "CREATE TABLE `a` (\n\t`id` int NOT NULL PRIMARY KEY\n) ENGINE InnoDB,\n CHARSET utf8mb4,\n COLLATE utf8mb4_0900_ai_ci\nPARTITION BY RANGE (`id`)\n(PARTITION `p10` VALUES LESS THAN (10) ENGINE InnoDB TABLESPACE foo)", diff --git a/go/vt/sqlparser/truncate_query.go b/go/vt/sqlparser/truncate_query.go index 8a4f317d59e..4bb63730fd2 100644 --- a/go/vt/sqlparser/truncate_query.go +++ b/go/vt/sqlparser/truncate_query.go @@ -30,6 +30,8 @@ var ( truncateErrLen = 0 ) +const TruncationText = "[TRUNCATED]" + func registerQueryTruncationFlags(fs *pflag.FlagSet) { fs.IntVar(&truncateUILen, "sql-max-length-ui", truncateUILen, "truncate queries in debug UIs to the given length (default 512)") fs.IntVar(&truncateErrLen, "sql-max-length-errors", truncateErrLen, "truncate queries in error logs to the given length (default unlimited)") @@ -69,7 +71,7 @@ func truncateQuery(query string, max int) string { return comments.Leading + sql + comments.Trailing } - return comments.Leading + sql[:max-12] + " [TRUNCATED]" + comments.Trailing + return comments.Leading + sql[:max-(len(TruncationText)+1)] + " " + TruncationText + comments.Trailing } // TruncateForUI is used when displaying queries on various Vitess status pages diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 170def2103b..0f3c66f2ea3 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -113,3 +113,50 @@ func NormalizeAlphabetically(query string) (normalized string, err error) { } return String(stmt), nil } + +// ReplaceTableQualifiers takes a statement's table expressions and +// replaces any cases of the provided database name with the +// specified replacement name. +// Note: both database names provided should be unescaped strings. +func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { + if newdb == olddb { + // Nothing to do here. + return query, nil + } + in, err := Parse(query) + if err != nil { + return "", err + } + + oldQualifier := NewIdentifierCS(olddb) + newQualifier := NewIdentifierCS(newdb) + + modified := false + upd := Rewrite(in, func(cursor *Cursor) bool { + switch node := cursor.Node().(type) { + case TableName: + if !node.Qualifier.IsEmpty() && + node.Qualifier.String() == oldQualifier.String() { + node.Qualifier = newQualifier + cursor.Replace(node) + modified = true + } + case *ShowBasic: // for things like 'show tables from _vt' + if !node.DbName.IsEmpty() && + node.DbName.String() == oldQualifier.String() { + node.DbName = newQualifier + cursor.Replace(node) + modified = true + } + } + return true + }, nil) + // If we didn't modify anything, return the original query. + // This is particularly helpful with unit tests that + // execute a query which slightly differs from the parsed + // version: e.g. 'where id=1' becomes 'where id = 1'. + if modified { + return String(upd), nil + } + return query, nil +} diff --git a/go/vt/sqlparser/utils_test.go b/go/vt/sqlparser/utils_test.go index 362a675076e..63c9b10ba43 100644 --- a/go/vt/sqlparser/utils_test.go +++ b/go/vt/sqlparser/utils_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNormalizeAlphabetically(t *testing.T) { @@ -180,3 +181,97 @@ func TestQueryMatchesTemplates(t *testing.T) { }) } } + +func TestReplaceTableQualifiers(t *testing.T) { + origDB := "_vt" + tests := []struct { + name string + in string + newdb string + out string + wantErr bool + }{ + { + name: "invalid select", + in: "select frog bar person", + out: "", + wantErr: true, + }, + { + name: "simple select", + in: "select * from _vt.foo", + out: "select * from foo", + }, + { + name: "simple select with new db", + in: "select * from _vt.foo", + newdb: "_vt_test", + out: "select * from _vt_test.foo", + }, + { + name: "simple select with new db same", + in: "select * from _vt.foo where id=1", // should be unchanged + newdb: "_vt", + out: "select * from _vt.foo where id=1", + }, + { + name: "simple select with new db needing escaping", + in: "select * from _vt.foo", + newdb: "1_vt-test", + out: "select * from `1_vt-test`.foo", + }, + { + name: "complex select", + in: "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)", + out: "select table_name, lastpk from copy_state where vrepl_id = 1 and id in (select max(id) from copy_state where vrepl_id = 1 group by vrepl_id, table_name)", + }, + { + name: "complex mixed exists select", + in: "select workflow_name, db_name from _vt.vreplication where id = 1 and exists (select v1 from mydb.foo where fname = 'matt') and not exists (select v2 from _vt.newsidecartable where _vt.newsidecartable.id = _vt.vreplication.workflow_name)", + newdb: "_vt_import", + out: "select workflow_name, db_name from _vt_import.vreplication where id = 1 and exists (select v1 from mydb.foo where fname = 'matt') and not exists (select v2 from _vt_import.newsidecartable where _vt_import.newsidecartable.id = _vt_import.vreplication.workflow_name)", + }, + { + name: "derived table select", + in: "select myder.id from (select max(id) as id from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name) as myder where id = 1", + newdb: "__vt-metadata", + out: "select myder.id from (select max(id) as id from `__vt-metadata`.copy_state where vrepl_id = 1 group by vrepl_id, table_name) as myder where id = 1", + }, + { + name: "complex select", + in: "select t1.col1, t2.col2 from _vt.t1 as t1 join _vt.t2 as t2 on t1.id = t2.id", + out: "select t1.col1, t2.col2 from t1 as t1 join t2 as t2 on t1.id = t2.id", + }, + { + name: "simple insert", + in: "insert into _vt.foo(id) values (1)", + out: "insert into foo(id) values (1)", + }, + { + name: "simple update", + in: "update _vt.foo set id = 1", + out: "update foo set id = 1", + }, + { + name: "simple delete", + in: "delete from _vt.foo where id = 1", + out: "delete from foo where id = 1", + }, + { + name: "simple set", + in: "set names 'binary'", + out: "set names 'binary'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ReplaceTableQualifiers(tt.in, origDB, tt.newdb) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.out, got, "RemoveTableQualifiers(); in: %s, out: %s", tt.in, got) + }) + } +} diff --git a/go/vt/sqlparser/walker_test.go b/go/vt/sqlparser/walker_test.go index f8bf2b4792a..560ed2ff470 100644 --- a/go/vt/sqlparser/walker_test.go +++ b/go/vt/sqlparser/walker_test.go @@ -18,6 +18,7 @@ package sqlparser import ( "fmt" + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -26,7 +27,7 @@ import ( func BenchmarkWalkLargeExpression(b *testing.B) { for i := 0; i < 10; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := newGenerator(int64(i*100), 5).expression() + exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), 5).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { err := Walk(func(node SQLNode) (kontinue bool, err error) { @@ -42,7 +43,7 @@ func BenchmarkWalkLargeExpression(b *testing.B) { func BenchmarkRewriteLargeExpression(b *testing.B) { for i := 1; i < 7; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := newGenerator(int64(i*100), i).expression() + exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), i).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { _ = Rewrite(exp, func(_ *Cursor) bool { diff --git a/go/vt/srvtopo/discover_test.go b/go/vt/srvtopo/discover_test.go index c076ba0e7b7..ca4774a1b84 100644 --- a/go/vt/srvtopo/discover_test.go +++ b/go/vt/srvtopo/discover_test.go @@ -48,8 +48,9 @@ func (a TargetArray) Less(i, j int) bool { } func TestFindAllTargets(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") srvTopoCacheRefresh = 0 srvTopoCacheTTL = 0 @@ -58,7 +59,7 @@ func TestFindAllTargets(t *testing.T) { srvTopoCacheTTL = 1 * time.Second }() - rs := NewResilientServer(ts, "TestFindAllKeyspaceShards") + rs := NewResilientServer(ctx, ts, "TestFindAllKeyspaceShards") // No keyspace / shards. ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) diff --git a/go/vt/srvtopo/keyspace_filtering_server_test.go b/go/vt/srvtopo/keyspace_filtering_server_test.go index 83e1a18e062..bcd5681f3e8 100644 --- a/go/vt/srvtopo/keyspace_filtering_server_test.go +++ b/go/vt/srvtopo/keyspace_filtering_server_test.go @@ -48,10 +48,10 @@ var ( } ) -func newFiltering(filter []string) (*topo.Server, *srvtopotest.PassthroughSrvTopoServer, Server) { +func newFiltering(ctx context.Context, filter []string) (*topo.Server, *srvtopotest.PassthroughSrvTopoServer, Server) { testServer := srvtopotest.NewPassthroughSrvTopoServer() - testServer.TopoServer = memorytopo.NewServer(stockCell) + testServer.TopoServer = memorytopo.NewServer(ctx, stockCell) testServer.SrvKeyspaceNames = []string{"foo", "bar", "baz"} testServer.SrvKeyspace = &topodatapb.SrvKeyspace{} testServer.WatchedSrvVSchema = stockVSchema @@ -71,7 +71,9 @@ func TestFilteringServerHandlesNilUnderlying(t *testing.T) { } func TestFilteringServerReturnsUnderlyingServer(t *testing.T) { - _, _, f := newFiltering(nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, nil) got, gotErr := f.GetTopoServer() if gotErr != nil { t.Errorf("Got error getting topo.Server from FilteringServer") @@ -108,17 +110,23 @@ func doTestGetSrvKeyspaceNames( } func TestFilteringServerGetSrvKeyspameNamesFiltersEverythingOut(t *testing.T) { - _, _, f := newFiltering(nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, nil) doTestGetSrvKeyspaceNames(t, f, stockCell, []string{}, nil) } func TestFilteringServerGetSrvKeyspaceNamesFiltersKeyspaces(t *testing.T) { - _, _, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, _, f := newFiltering(ctx, stockFilters) doTestGetSrvKeyspaceNames(t, f, stockCell, stockFilters, nil) } func TestFilteringServerGetSrvKeyspaceNamesPassesThroughErrors(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) wantErr := fmt.Errorf("some badcell error") mock.SrvKeyspaceNamesError = wantErr doTestGetSrvKeyspaceNames(t, f, "badcell", stockFilters, wantErr) @@ -140,28 +148,36 @@ func doTestGetSrvKeyspace( } func TestFilteringServerGetSrvKeyspaceReturnsSelectedKeyspaces(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspace = stockKeyspaces["bar"] doTestGetSrvKeyspace(t, f, stockCell, "bar", stockKeyspaces["bar"], nil) } func TestFilteringServerGetSrvKeyspaceErrorPassthrough(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() wantErr := fmt.Errorf("some error") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspace = stockKeyspaces["bar"] mock.SrvKeyspaceError = wantErr doTestGetSrvKeyspace(t, f, "badcell", "bar", stockKeyspaces["bar"], wantErr) } func TestFilteringServerGetSrvKeyspaceFilters(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() wantErr := topo.NewError(topo.NoNode, "foo") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.SrvKeyspaceError = wantErr doTestGetSrvKeyspace(t, f, stockCell, "foo", nil, wantErr) } func TestFilteringServerWatchSrvVSchemaFiltersPassthroughSrvVSchema(t *testing.T) { - _, mock, f := newFiltering(stockFilters) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, mock, f := newFiltering(ctx, stockFilters) allowed := map[string]bool{} for _, ks := range stockFilters { @@ -196,8 +212,11 @@ func TestFilteringServerWatchSrvVSchemaFiltersPassthroughSrvVSchema(t *testing.T } func TestFilteringServerWatchSrvVSchemaHandlesNilSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wantErr := fmt.Errorf("some err") - _, mock, f := newFiltering(stockFilters) + _, mock, f := newFiltering(ctx, stockFilters) mock.WatchedSrvVSchema = nil mock.WatchedSrvVSchemaError = wantErr diff --git a/go/vt/srvtopo/query.go b/go/vt/srvtopo/query.go index 098f5c77bc1..ec1ed50100a 100644 --- a/go/vt/srvtopo/query.go +++ b/go/vt/srvtopo/query.go @@ -86,7 +86,12 @@ func (q *resilientQuery) getCurrentValue(ctx context.Context, wkey fmt.Stringer, // If it is not time to check again, then return either the cached // value or the cached error but don't ask topo again. - if !shouldRefresh { + // Here we have to be careful with the part where we haven't gotten even the first result. + // In that case, a refresh is already in progress, but the cache is empty! So, we can't use the cache. + // We have to wait for the query's results. + // We know the query has run at least once if the insertionTime is non-zero, or if we have an error. + queryRanAtLeastOnce := !entry.insertionTime.IsZero() || entry.lastError != nil + if !shouldRefresh && queryRanAtLeastOnce { if cacheValid { return entry.value, nil } diff --git a/go/vt/srvtopo/query_test.go b/go/vt/srvtopo/query_test.go new file mode 100644 index 00000000000..2569a2ad420 --- /dev/null +++ b/go/vt/srvtopo/query_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srvtopo + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/stats" +) + +// TestResilientQueryGetCurrentValueInitialization tests that the resilient query returns the correct results when it has been +// initialized. +func TestResilientQueryGetCurrentValueInitialization(t *testing.T) { + // Create a basic query, which doesn't do anything other than return the same cell it got as an input. + // The query however needs to simulate being slow, so we have a sleep in there. + query := func(ctx context.Context, entry *queryEntry) (any, error) { + time.Sleep(1 * time.Second) + cell := entry.key.(cellName) + return cell, nil + } + counts := stats.NewCountersWithSingleLabel("TestResilientQueryGetCurrentValue", "Test for resilient query", "type") + + // Create the resilient query + rq := &resilientQuery{ + query: query, + counts: counts, + cacheRefreshInterval: 5 * time.Second, + cacheTTL: 5 * time.Second, + entries: make(map[string]*queryEntry), + } + + // Create a context and a cell. + ctx := context.Background() + cell := cellName("cell-1") + + // Hammer the resilient query with multiple get requests just as it is created. + // We expect all of them to work. + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + // To test with both stale and not-stale, we use the modulo of our index. + stale := i%2 == 0 + wg.Add(1) + go func() { + defer wg.Done() + res, err := rq.getCurrentValue(ctx, cell, stale) + // Assert that we don't have any error and the value matches what we want. + assert.NoError(t, err) + assert.EqualValues(t, cell, res) + }() + } + // Wait for the wait group to be empty, otherwise the test is marked a success before any of the go routines finish completion! + wg.Wait() +} diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index cac368dedd9..d1521952ab0 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -17,6 +17,7 @@ limitations under the License. package srvtopo import ( + "context" "time" "github.com/spf13/pflag" @@ -78,7 +79,7 @@ type ResilientServer struct { // NewResilientServer creates a new ResilientServer // based on the provided topo.Server. -func NewResilientServer(base *topo.Server, counterPrefix string) *ResilientServer { +func NewResilientServer(ctx context.Context, base *topo.Server, counterPrefix string) *ResilientServer { if srvTopoCacheRefresh > srvTopoCacheTTL { log.Fatalf("srv_topo_cache_refresh must be less than or equal to srv_topo_cache_ttl") } @@ -94,8 +95,8 @@ func NewResilientServer(base *topo.Server, counterPrefix string) *ResilientServe return &ResilientServer{ topoServer: base, counts: counts, - SrvKeyspaceWatcher: NewSrvKeyspaceWatcher(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), - SrvVSchemaWatcher: NewSrvVSchemaWatcher(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), + SrvKeyspaceWatcher: NewSrvKeyspaceWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), + SrvVSchemaWatcher: NewSrvVSchemaWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), SrvKeyspaceNamesQuery: NewSrvKeyspaceNamesQuery(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), } } diff --git a/go/vt/srvtopo/resilient_server_test.go b/go/vt/srvtopo/resilient_server_test.go index 47ff6df81e2..c237d43f300 100644 --- a/go/vt/srvtopo/resilient_server_test.go +++ b/go/vt/srvtopo/resilient_server_test.go @@ -20,20 +20,20 @@ import ( "bytes" "context" "fmt" - "html/template" "reflect" "sync" "sync/atomic" "testing" "time" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/vt/key" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/status" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -43,7 +43,9 @@ import ( // TestGetSrvKeyspace will test we properly return updated SrvKeyspace. func TestGetSrvKeyspace(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") srvTopoCacheTTL = 200 * time.Millisecond srvTopoCacheRefresh = 80 * time.Millisecond defer func() { @@ -51,7 +53,7 @@ func TestGetSrvKeyspace(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspace") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspace") // Ask for a not-yet-created keyspace _, err := rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") @@ -106,9 +108,6 @@ func TestGetSrvKeyspace(t *testing.T) { // make sure the HTML template works funcs := map[string]any{} - for k, v := range status.StatusFuncs { - funcs[k] = v - } for k, v := range StatusFuncs { funcs[k] = v } @@ -362,17 +361,18 @@ func TestGetSrvKeyspace(t *testing.T) { // TestSrvKeyspaceCachedError will test we properly re-try to query // the topo server upon failure. func TestSrvKeyspaceCachedError(t *testing.T) { - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestSrvKeyspaceCachedErrors") + rs := NewResilientServer(ctx, ts, "TestSrvKeyspaceCachedErrors") // Ask for an unknown keyspace, should get an error. - ctx := context.Background() _, err := rs.GetSrvKeyspace(ctx, "test_cell", "unknown_ks") if err == nil { t.Fatalf("First GetSrvKeyspace didn't return an error") @@ -385,8 +385,6 @@ func TestSrvKeyspaceCachedError(t *testing.T) { time.Sleep(srvTopoCacheTTL + 10*time.Millisecond) // Ask again with a different context, should get an error and // save that context. - ctx, cancel := context.WithCancel(ctx) - defer cancel() _, err2 := rs.GetSrvKeyspace(ctx, "test_cell", "unknown_ks") if err2 == nil { t.Fatalf("Second GetSrvKeyspace didn't return an error") @@ -399,8 +397,11 @@ func TestSrvKeyspaceCachedError(t *testing.T) { // TestGetSrvKeyspaceCreated will test we properly get the initial // value if the SrvKeyspace already exists. func TestGetSrvKeyspaceCreated(t *testing.T) { - ts := memorytopo.NewServer("test_cell") - rs := NewResilientServer(ts, "TestGetSrvKeyspaceCreated") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceCreated") // Set SrvKeyspace with value. want := &topodatapb.SrvKeyspace{} @@ -431,9 +432,10 @@ func TestGetSrvKeyspaceCreated(t *testing.T) { func TestWatchSrvVSchema(t *testing.T) { srvTopoCacheRefresh = 10 * time.Millisecond - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - rs := NewResilientServer(ts, "TestWatchSrvVSchema") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + rs := NewResilientServer(ctx, ts, "TestWatchSrvVSchema") // mu protects watchValue and watchErr. mu := sync.Mutex{} @@ -515,14 +517,19 @@ func TestWatchSrvVSchema(t *testing.T) { } func TestGetSrvKeyspaceNames(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") + + time.Sleep(1 * time.Second) + srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceNames") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceNames") // Set SrvKeyspace with value want := &topodatapb.SrvKeyspace{} @@ -532,7 +539,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { err = ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks2", want) require.NoError(t, err, "UpdateSrvKeyspace(test_cell, test_ks2, %s) failed", want) - ctx := context.Background() names, err := rs.GetSrvKeyspaceNames(ctx, "test_cell", false) if err != nil { t.Errorf("GetSrvKeyspaceNames unexpected error %v", err) @@ -647,8 +653,8 @@ func TestGetSrvKeyspaceNames(t *testing.T) { time.Sleep(srvTopoCacheTTL) - timeoutCtx, cancel := context.WithTimeout(context.Background(), srvTopoCacheRefresh*2) //nolint - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), srvTopoCacheRefresh*2) //nolint + defer timeoutCancel() _, err = rs.GetSrvKeyspaceNames(timeoutCtx, "test_cell", false) if err != context.DeadlineExceeded { t.Errorf("expected error '%v', got '%v'", context.DeadlineExceeded, err.Error()) @@ -669,7 +675,9 @@ func (w *watched) equals(other *watched) bool { } func TestSrvKeyspaceWatcher(t *testing.T) { - ts, factory := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { @@ -677,7 +685,7 @@ func TestSrvKeyspaceWatcher(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceWatcher") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") var wmu sync.Mutex var wseen []watched @@ -793,7 +801,9 @@ func TestSrvKeyspaceWatcher(t *testing.T) { } func TestSrvKeyspaceListener(t *testing.T) { - ts, _ := memorytopo.NewServerAndFactory("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") srvTopoCacheTTL = 100 * time.Millisecond srvTopoCacheRefresh = 40 * time.Millisecond defer func() { @@ -801,16 +811,16 @@ func TestSrvKeyspaceListener(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ts, "TestGetSrvKeyspaceWatcher") + rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") - ctx, cancel := context.WithCancel(context.Background()) + cancelCtx, cancelFunc := context.WithCancel(context.Background()) var callbackCount atomic.Int32 // adding listener will perform callback. - rs.WatchSrvKeyspace(context.Background(), "test_cell", "test_ks", func(srvKs *topodatapb.SrvKeyspace, err error) bool { + rs.WatchSrvKeyspace(ctx, "test_cell", "test_ks", func(srvKs *topodatapb.SrvKeyspace, err error) bool { callbackCount.Add(1) select { - case <-ctx.Done(): + case <-cancelCtx.Done(): return false default: return true @@ -819,16 +829,16 @@ func TestSrvKeyspaceListener(t *testing.T) { // First update (callback - 2) want := &topodatapb.SrvKeyspace{} - err := ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks", want) + err := ts.UpdateSrvKeyspace(ctx, "test_cell", "test_ks", want) require.NoError(t, err) // Next callback to remove from listener - cancel() + cancelFunc() // multi updates thereafter for i := 0; i < 5; i++ { want = &topodatapb.SrvKeyspace{} - err = ts.UpdateSrvKeyspace(context.Background(), "test_cell", "test_ks", want) + err = ts.UpdateSrvKeyspace(ctx, "test_cell", "test_ks", want) require.NoError(t, err) time.Sleep(100 * time.Millisecond) } diff --git a/go/vt/srvtopo/resolver.go b/go/vt/srvtopo/resolver.go index 2cb3fed676c..98d77e259ef 100644 --- a/go/vt/srvtopo/resolver.go +++ b/go/vt/srvtopo/resolver.go @@ -17,12 +17,11 @@ limitations under the License. package srvtopo import ( + "context" "sort" "vitess.io/vitess/go/sqltypes" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/key" @@ -43,6 +42,9 @@ type Gateway interface { // QueryServiceByAlias returns a QueryService QueryServiceByAlias(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) + + // GetServingKeyspaces returns list of serving keyspaces. + GetServingKeyspaces() []string } // A Resolver can resolve keyspace ids and key ranges into ResolvedShard* diff --git a/go/vt/srvtopo/resolver_test.go b/go/vt/srvtopo/resolver_test.go index 49b108fcffb..95e6dbe620c 100644 --- a/go/vt/srvtopo/resolver_test.go +++ b/go/vt/srvtopo/resolver_test.go @@ -17,12 +17,11 @@ limitations under the License. package srvtopo import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" @@ -34,11 +33,10 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func initResolver(t *testing.T, name string) *Resolver { - ctx := context.Background() +func initResolver(t *testing.T, ctx context.Context, name string) *Resolver { cell := "cell1" - ts := memorytopo.NewServer(cell) - rs := NewResilientServer(ts, name) + ts := memorytopo.NewServer(ctx, cell) + rs := NewResilientServer(ctx, ts, name) // Create sharded keyspace and shards. if err := ts.CreateKeyspace(ctx, "sks", &topodatapb.Keyspace{}); err != nil { @@ -97,7 +95,9 @@ func initResolver(t *testing.T, name string) *Resolver { } func TestResolveDestinations(t *testing.T) { - resolver := initResolver(t, "TestResolveDestinations") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + resolver := initResolver(t, ctx, "TestResolveDestinations") id1 := &querypb.Value{ Type: sqltypes.VarChar, diff --git a/go/vt/srvtopo/status.go b/go/vt/srvtopo/status.go index 350390c4c6c..b3069be6c38 100644 --- a/go/vt/srvtopo/status.go +++ b/go/vt/srvtopo/status.go @@ -18,10 +18,12 @@ package srvtopo import ( "context" - "html/template" "sort" "time" + "github.com/google/safehtml" + "github.com/google/safehtml/template" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -49,8 +51,8 @@ const TopoTemplate = `
{{range $i, $skn := .SrvKeyspaceNames}} - - + + @@ -70,8 +72,8 @@ const TopoTemplate = ` {{range $i, $sk := .SrvKeyspaces}} - - + + @@ -121,30 +123,35 @@ type SrvKeyspaceCacheStatus struct { LastError error } +var noData = safehtml.HTMLEscaped("No Data") +var partitions = template.Must(template.New("partitions").Parse(` +Partitions:
+{{ range .Partitions }} + {{ .ServedType }}: +{{ range .ShardReferences }} + {{ .Name }} +{{ end }} +
+{{ end }} +{{if .ServedFrom }} +ServedFrom:
+{{ range .ServedFrom }} + {{ .TabletType }}: {{ .Keyspace}}

+{{ end }} +{{ end }} +`)) + // StatusAsHTML returns an HTML version of our status. // It works best if there is data in the cache. -func (st *SrvKeyspaceCacheStatus) StatusAsHTML() template.HTML { +func (st *SrvKeyspaceCacheStatus) StatusAsHTML() safehtml.HTML { if st.Value == nil { - return template.HTML("No Data") - } - - result := "Partitions:
" - for _, keyspacePartition := range st.Value.Partitions { - result += " " + keyspacePartition.ServedType.String() + ":" - for _, shard := range keyspacePartition.ShardReferences { - result += " " + shard.Name - } - result += "
" + return noData } - - if len(st.Value.ServedFrom) > 0 { - result += "ServedFrom:
" - for _, sf := range st.Value.ServedFrom { - result += " " + sf.TabletType.String() + ": " + sf.Keyspace + "
" - } + html, err := partitions.ExecuteToHTML(st.Value) + if err != nil { + panic(err) } - - return template.HTML(result) + return html } // SrvKeyspaceCacheStatusList is used for sorting @@ -183,17 +190,19 @@ func (server *ResilientServer) CacheStatus() *ResilientServerCacheStatus { return result } +var expired = template.MustParseAndExecuteToHTML("Expired") + // Returns the ttl for the cached entry or "Expired" if it is in the past -func ttlTime(expirationTime time.Time) template.HTML { +func ttlTime(expirationTime time.Time) safehtml.HTML { ttl := time.Until(expirationTime).Round(time.Second) if ttl < 0 { - return template.HTML("Expired") + return expired } - return template.HTML(ttl.String()) + return safehtml.HTMLEscaped(ttl.String()) } -func timeSince(t time.Time) template.HTML { - return template.HTML(time.Since(t).Round(time.Second).String()) +func timeSince(t time.Time) safehtml.HTML { + return safehtml.HTMLEscaped(time.Since(t).Round(time.Second).String()) } // StatusFuncs is required for CacheStatus) to work properly. diff --git a/go/vt/srvtopo/watch.go b/go/vt/srvtopo/watch.go index 2d571f4930f..36d8fd428bd 100644 --- a/go/vt/srvtopo/watch.go +++ b/go/vt/srvtopo/watch.go @@ -33,6 +33,7 @@ const ( watchStateIdle watchState = iota watchStateStarting watchStateRunning + watchStateStopped ) type watchEntry struct { @@ -100,7 +101,10 @@ func (entry *watchEntry) addListener(ctx context.Context, callback func(any, err callback(v, err) } -func (entry *watchEntry) ensureWatchingLocked() { +func (entry *watchEntry) ensureWatchingLocked(ctx context.Context) { + if ctx.Err() != nil { + return + } switch entry.watchState { case watchStateRunning, watchStateStarting: case watchStateIdle: @@ -121,7 +125,7 @@ func (entry *watchEntry) currentValueLocked(ctx context.Context) (any, error) { return entry.value, entry.lastError } - entry.ensureWatchingLocked() + entry.ensureWatchingLocked(ctx) cacheValid := entry.value != nil && time.Since(entry.lastValueTime) < entry.rw.cacheTTL if cacheValid { @@ -146,12 +150,12 @@ func (entry *watchEntry) currentValueLocked(ctx context.Context) (any, error) { return nil, entry.lastError } -func (entry *watchEntry) update(value any, err error, init bool) { +func (entry *watchEntry) update(ctx context.Context, value any, err error, init bool) { entry.mutex.Lock() defer entry.mutex.Unlock() if err != nil { - entry.onErrorLocked(err, init) + entry.onErrorLocked(ctx, err, init) } else { entry.onValueLocked(value) } @@ -179,7 +183,7 @@ func (entry *watchEntry) onValueLocked(value any) { entry.lastErrorTime = time.Time{} } -func (entry *watchEntry) onErrorLocked(err error, init bool) { +func (entry *watchEntry) onErrorLocked(ctx context.Context, err error, init bool) { entry.rw.counts.Add(errorCategory, 1) entry.lastErrorTime = time.Now() @@ -195,7 +199,7 @@ func (entry *watchEntry) onErrorLocked(err error, init bool) { // This watcher will able to continue to return the last value till it is not able to connect to the topo server even if the cache TTL is reached. // TTL cache is only checked if the error is a known error i.e topo.Error. _, isTopoErr := err.(topo.Error) - if isTopoErr && time.Since(entry.lastValueTime) > entry.rw.cacheTTL { + if entry.value != nil && isTopoErr && time.Since(entry.lastValueTime) > entry.rw.cacheTTL { log.Errorf("WatchSrvKeyspace clearing cached entry for %v", entry.key) entry.value = nil } @@ -217,12 +221,13 @@ func (entry *watchEntry) onErrorLocked(err error, init bool) { entry.watchState = watchStateIdle // only retry the watch if we haven't been explicitly interrupted + if len(entry.listeners) > 0 && !topo.IsErrType(err, topo.Interrupted) { go func() { time.Sleep(entry.rw.cacheRefreshInterval) entry.mutex.Lock() - entry.ensureWatchingLocked() + entry.ensureWatchingLocked(ctx) entry.mutex.Unlock() }() } diff --git a/go/vt/srvtopo/watch_srvkeyspace.go b/go/vt/srvtopo/watch_srvkeyspace.go index e47e810b615..cefe95c6951 100644 --- a/go/vt/srvtopo/watch_srvkeyspace.go +++ b/go/vt/srvtopo/watch_srvkeyspace.go @@ -37,25 +37,25 @@ func (k *srvKeyspaceKey) String() string { return k.cell + "." + k.keyspace } -func NewSrvKeyspaceWatcher(topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvKeyspaceWatcher { +func NewSrvKeyspaceWatcher(ctx context.Context, topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvKeyspaceWatcher { watch := func(entry *watchEntry) { key := entry.key.(*srvKeyspaceKey) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + requestCtx, requestCancel := context.WithCancel(context.Background()) + defer requestCancel() - current, changes, err := topoServer.WatchSrvKeyspace(ctx, key.cell, key.keyspace) + current, changes, err := topoServer.WatchSrvKeyspace(requestCtx, key.cell, key.keyspace) if err != nil { - entry.update(nil, err, true) + entry.update(ctx, nil, err, true) return } - entry.update(current.Value, current.Err, true) + entry.update(ctx, current.Value, current.Err, true) if current.Err != nil { return } for c := range changes { - entry.update(c.Value, c.Err, false) + entry.update(ctx, c.Value, c.Err, false) if c.Err != nil { return } diff --git a/go/vt/srvtopo/watch_srvvschema.go b/go/vt/srvtopo/watch_srvvschema.go index 251f5e55644..1b5536e623d 100644 --- a/go/vt/srvtopo/watch_srvvschema.go +++ b/go/vt/srvtopo/watch_srvvschema.go @@ -35,26 +35,25 @@ func (k cellName) String() string { return string(k) } -func NewSrvVSchemaWatcher(topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvVSchemaWatcher { +func NewSrvVSchemaWatcher(ctx context.Context, topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvVSchemaWatcher { watch := func(entry *watchEntry) { key := entry.key.(cellName) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + requestCtx, requestCancel := context.WithCancel(ctx) + defer requestCancel() - current, changes, err := topoServer.WatchSrvVSchema(ctx, key.String()) + current, changes, err := topoServer.WatchSrvVSchema(requestCtx, key.String()) if err != nil { - entry.update(nil, err, true) + entry.update(ctx, nil, err, true) return } - entry.update(current.Value, current.Err, true) + entry.update(ctx, current.Value, current.Err, true) if current.Err != nil { return } - defer cancel() for c := range changes { - entry.update(c.Value, c.Err, false) + entry.update(ctx, c.Value, c.Err, false) if c.Err != nil { return } diff --git a/go/vt/status/status.go b/go/vt/status/status.go index ad11af5050b..61ea9b8c1bf 100644 --- a/go/vt/status/status.go +++ b/go/vt/status/status.go @@ -19,20 +19,15 @@ limitations under the License. package status import ( - "fmt" - "html/template" - "net/url" - "strings" - "github.com/spf13/pflag" "vitess.io/vitess/go/vt/servenv" ) -var vtctldAddr string - +// TODO(deepthi): This entire file (and package) can be deleted after v17 func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&vtctldAddr, "vtctld_addr", vtctldAddr, "address of a vtctld instance") + fs.String("vtctld_addr", "", "address of a vtctld instance") + _ = fs.MarkDeprecated("vtctld_addr", "will be removed after v17") } func init() { @@ -40,103 +35,3 @@ func init() { servenv.OnParseFor("vtgate", registerFlags) servenv.OnParseFor("vttablet", registerFlags) } - -// MakeVtctldRedirect returns an absolute vtctld url that will -// redirect to the page for the topology object specified in q. -func MakeVtctldRedirect(text string, q map[string]string) template.HTML { - query := url.Values{} - for k, v := range q { - query.Set(k, v) - } - url := "explorers/redirect" + "?" + query.Encode() - return VtctldLink(text, url) -} - -// VtctldLink returns the HTML to display a link to the fully -// qualified vtctld url whose path is given as parameter. -// If no vtctld_addr flag was passed in, we just return the text with no link. -func VtctldLink(text, urlPath string) template.HTML { - if vtctldAddr == "" { - return template.HTML(text) - } - var fullURL string - if strings.HasSuffix(vtctldAddr, "/") { - fullURL = vtctldAddr + urlPath - } else { - fullURL = vtctldAddr + "/" + urlPath - } - - return template.HTML(fmt.Sprintf(`%v`, fullURL, text)) -} - -// VtctldKeyspace returns the keyspace name, possibly linked to the -// keyspace page in vtctld. -func VtctldKeyspace(keyspace string) template.HTML { - return MakeVtctldRedirect(keyspace, - map[string]string{ - "type": "keyspace", - "keyspace": keyspace, - }) -} - -// VtctldShard returns the shard name, possibly linked to the shard -// page in vtctld. -func VtctldShard(keyspace, shard string) template.HTML { - return MakeVtctldRedirect(shard, map[string]string{ - "type": "shard", - "keyspace": keyspace, - "shard": shard, - }) -} - -// VtctldSrvCell returns the cell name, possibly linked to the -// serving graph page in vtctld for that page. -func VtctldSrvCell(cell string) template.HTML { - return VtctldLink(cell, "serving_graph/"+cell) -} - -// VtctldSrvKeyspace returns the keyspace name, possibly linked to the -// SrvKeyspace page in vtctld. -func VtctldSrvKeyspace(cell, keyspace string) template.HTML { - return MakeVtctldRedirect(keyspace, map[string]string{ - "type": "srv_keyspace", - "cell": cell, - "keyspace": keyspace, - }) -} - -// VtctldReplication returns 'cell/keyspace/shard', possibly linked to the -// ShardReplication page in vtctld. -func VtctldReplication(cell, keyspace, shard string) template.HTML { - return MakeVtctldRedirect(fmt.Sprintf("%v/%v/%v", cell, keyspace, shard), - map[string]string{ - "type": "replication", - "keyspace": keyspace, - "shard": shard, - "cell": cell, - }) -} - -// VtctldTablet returns the tablet alias, possibly linked to the -// Tablet page in vtctld. -func VtctldTablet(aliasName string) template.HTML { - return MakeVtctldRedirect(aliasName, map[string]string{ - "type": "tablet", - "alias": aliasName, - }) -} - -// StatusFuncs returns a FuncMap that contains all of our methods here. -// It is exported so tests can use them. -var StatusFuncs = template.FuncMap{ - "github_com_vitessio_vitess_vtctld_keyspace": VtctldKeyspace, - "github_com_vitessio_vitess_vtctld_shard": VtctldShard, - "github_com_vitessio_vitess_vtctld_srv_cell": VtctldSrvCell, - "github_com_vitessio_vitess_vtctld_srv_keyspace": VtctldSrvKeyspace, - "github_com_vitessio_vitess_vtctld_replication": VtctldReplication, - "github_com_vitessio_vitess_vtctld_tablet": VtctldTablet, -} - -func init() { - servenv.AddStatusFuncs(StatusFuncs) -} diff --git a/go/vt/sysvars/sysvars.go b/go/vt/sysvars/sysvars.go index c4939d5c63e..98da8ff07b7 100644 --- a/go/vt/sysvars/sysvars.go +++ b/go/vt/sysvars/sysvars.go @@ -73,7 +73,10 @@ var ( QueryTimeout = SystemVariable{Name: "query_timeout"} // Online DDL - DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + DDLStrategy = SystemVariable{Name: "ddl_strategy", IdentifierAsString: true} + MigrationContext = SystemVariable{Name: "migration_context", IdentifierAsString: true} + + // Version Version = SystemVariable{Name: "version"} VersionComment = SystemVariable{Name: "version_comment"} @@ -95,6 +98,7 @@ var ( Charset, Names, SessionUUID, + MigrationContext, SessionEnableSystemSettings, ReadAfterWriteGTID, ReadAfterWriteTimeOut, diff --git a/go/vt/tableacl/tableacl.go b/go/vt/tableacl/tableacl.go index 4ee46ae7739..9a6e6eeba4e 100644 --- a/go/vt/tableacl/tableacl.go +++ b/go/vt/tableacl/tableacl.go @@ -26,13 +26,11 @@ import ( "sync" "github.com/tchap/go-patricia/patricia" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/tableacl/acl" - tableaclpb "vitess.io/vitess/go/vt/proto/tableacl" + "vitess.io/vitess/go/vt/tableacl/acl" ) // ACLResult embeds an acl.ACL and also tell which table group it belongs to. @@ -188,7 +186,7 @@ func (tacl *tableACL) Set(config *tableaclpb.Config) error { } tacl.Lock() tacl.entries = entries - tacl.config = proto.Clone(config).(*tableaclpb.Config) + tacl.config = config.CloneVT() callback := tacl.callback tacl.Unlock() if callback != nil { @@ -277,7 +275,7 @@ func GetCurrentConfig() *tableaclpb.Config { func (tacl *tableACL) Config() *tableaclpb.Config { tacl.RLock() defer tacl.RUnlock() - return proto.Clone(tacl.config).(*tableaclpb.Config) + return tacl.config.CloneVT() } // Register registers an AclFactory. diff --git a/go/vt/tableacl/testlib/testlib.go b/go/vt/tableacl/testlib/testlib.go index 3c30c43d8dc..bdde9ae800f 100644 --- a/go/vt/tableacl/testlib/testlib.go +++ b/go/vt/tableacl/testlib/testlib.go @@ -21,7 +21,6 @@ import ( "fmt" "math/rand" "testing" - "time" querypb "vitess.io/vitess/go/vt/proto/query" tableaclpb "vitess.io/vitess/go/vt/proto/tableacl" @@ -127,7 +126,3 @@ func checkAccess(config *tableaclpb.Config, tableName string, role tableacl.Role } return nil } - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 3593bc0806d..126b9098236 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -231,13 +231,13 @@ type client struct { healthcheckCh chan *discovery.TabletHealth } -func newClient(primary *primary, replica *replica, ts *topo.Server) *client { +func newClient(ctx context.Context, primary *primary, replica *replica, ts *topo.Server) *client { t, err := throttler.NewThrottler("client", "TPS", 1, throttler.MaxRateModuleDisabled, 5 /* seconds */) if err != nil { log.Fatal(err) } - healthCheck := discovery.NewHealthCheck(context.Background(), 5*time.Second, 1*time.Minute, ts, "cell1", "") + healthCheck := discovery.NewHealthCheck(ctx, 5*time.Second, 1*time.Minute, ts, "cell1", "") c := &client{ primary: primary, healthCheck: healthCheck, @@ -302,15 +302,15 @@ func main() { servenv.ParseFlags(flagSetName) go servenv.RunDefault() - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/throttlerz", http.StatusTemporaryRedirect) }) log.Infof("start rate set to: %v", rate) - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(context.Background(), "cell1") replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) primary := &primary{replica: replica} - client := newClient(primary, replica, ts) + client := newClient(context.Background(), primary, replica, ts) client.run() time.Sleep(duration) diff --git a/go/vt/throttler/manager_test.go b/go/vt/throttler/manager_test.go index 8c0e6ae4563..e6c3359b242 100644 --- a/go/vt/throttler/manager_test.go +++ b/go/vt/throttler/manager_test.go @@ -24,8 +24,6 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -225,7 +223,7 @@ func TestManager_UpdateConfiguration_ZeroValues(t *testing.T) { defer f.tearDown() // Test the explicit copy of zero values. - zeroValueConfig := proto.Clone(defaultMaxReplicationLagModuleConfig.Configuration).(*throttlerdatapb.Configuration) + zeroValueConfig := defaultMaxReplicationLagModuleConfig.Configuration.CloneVT() zeroValueConfig.IgnoreNSlowestReplicas = 0 names, err := f.m.UpdateConfiguration("t2", zeroValueConfig, true /* copyZeroValues */) if err != nil { diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index bd4666ec92f..f08c9211205 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -207,7 +207,7 @@ func (m *MaxReplicationLagModule) applyLatestConfig() { func (m *MaxReplicationLagModule) getConfiguration() *throttlerdatapb.Configuration { m.mutableConfigMu.Lock() defer m.mutableConfigMu.Unlock() - return proto.Clone(m.mutableConfig.Configuration).(*throttlerdatapb.Configuration) + return m.mutableConfig.Configuration.CloneVT() } func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error { @@ -217,7 +217,7 @@ func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerda newConfig := m.mutableConfig if copyZeroValues { - newConfig.Configuration = proto.Clone(configuration).(*throttlerdatapb.Configuration) + newConfig.Configuration = configuration.CloneVT() } else { proto.Merge(newConfig.Configuration, configuration) } @@ -302,6 +302,12 @@ func (m *MaxReplicationLagModule) recalculateRate(lagRecordNow replicationLagRec if lagRecordNow.isZero() { panic("rate recalculation was triggered with a zero replication lag record") } + + // Protect against nil stats + if lagRecordNow.Stats == nil { + return + } + now := lagRecordNow.time lagNow := lagRecordNow.lag() @@ -593,7 +599,7 @@ func (m *MaxReplicationLagModule) decreaseAndGuessRate(r *result, now time.Time, if replicationLagChange == equal { // The replication lag did not change. Keep going at the current rate. - r.Reason = fmt.Sprintf("did not decrease the rate because the lag did not change (assuming a 1s error margin)") // nolint + r.Reason = "did not decrease the rate because the lag did not change (assuming a 1s error margin)" return } diff --git a/go/vt/throttler/max_replication_lag_module_config.go b/go/vt/throttler/max_replication_lag_module_config.go index 775aa4639a4..e61909f57dc 100644 --- a/go/vt/throttler/max_replication_lag_module_config.go +++ b/go/vt/throttler/max_replication_lag_module_config.go @@ -20,8 +20,6 @@ import ( "fmt" "time" - "google.golang.org/protobuf/proto" - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -33,9 +31,7 @@ type MaxReplicationLagModuleConfig struct { } func (cfg MaxReplicationLagModuleConfig) Clone() MaxReplicationLagModuleConfig { - return MaxReplicationLagModuleConfig{ - proto.Clone(cfg.Configuration).(*throttlerdatapb.Configuration), - } + return MaxReplicationLagModuleConfig{cfg.Configuration.CloneVT()} } // Most of the values are based on the assumption that vttablet is started diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index f0324df192c..6379b067412 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/discovery" @@ -83,6 +85,12 @@ func (tf *testFixture) process(lagRecord replicationLagRecord) { tf.m.processRecord(lagRecord) } +// recalculateRate does the same thing as MaxReplicationLagModule.recalculateRate() does +// for a new "lagRecord". +func (tf *testFixture) recalculateRate(lagRecord replicationLagRecord) { + tf.m.recalculateRate(lagRecord) +} + func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.Time) error { if got, want := tf.m.currentState, state; got != want { return fmt.Errorf("module in wrong state. got = %v, want = %v", got, want) @@ -96,6 +104,47 @@ func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.T return nil } +func TestNewMaxReplicationLagModule_recalculateRate(t *testing.T) { + testCases := []struct { + name string + lagRecord replicationLagRecord + expectPanic bool + }{ + { + name: "Zero lag", + lagRecord: replicationLagRecord{ + time: time.Time{}, + TabletHealth: discovery.TabletHealth{Stats: nil}, + }, + expectPanic: true, + }, + { + name: "nil lag record stats", + lagRecord: replicationLagRecord{ + time: time.Now(), + TabletHealth: discovery.TabletHealth{Stats: nil}, + }, + expectPanic: false, + }, + } + + for _, aTestCase := range testCases { + theCase := aTestCase + + t.Run(theCase.name, func(t *testing.T) { + t.Parallel() + + fixture, err := newTestFixtureWithMaxReplicationLag(5) + assert.NoError(t, err) + + if theCase.expectPanic { + assert.Panics(t, func() { fixture.recalculateRate(theCase.lagRecord) }) + } + }, + ) + } +} + func TestMaxReplicationLagModule_RateNotZeroWhenDisabled(t *testing.T) { tf, err := newTestFixtureWithMaxReplicationLag(ReplicationLagModuleDisabled) if err != nil { diff --git a/go/vt/throttler/throttler.go b/go/vt/throttler/throttler.go index 03a20013396..83a1c52225e 100644 --- a/go/vt/throttler/throttler.go +++ b/go/vt/throttler/throttler.go @@ -130,19 +130,31 @@ func NewThrottler(name, unit string, threadCount int, maxRate, maxReplicationLag return newThrottler(GlobalManager, name, unit, threadCount, maxRate, maxReplicationLag, time.Now) } +func NewThrottlerFromConfig(name, unit string, threadCount int, maxRateModuleMaxRate int64, maxReplicationLagModuleConfig MaxReplicationLagModuleConfig, nowFunc func() time.Time) (*Throttler, error) { + return newThrottlerFromConfig(GlobalManager, name, unit, threadCount, maxRateModuleMaxRate, maxReplicationLagModuleConfig, nowFunc) +} + func newThrottler(manager *managerImpl, name, unit string, threadCount int, maxRate, maxReplicationLag int64, nowFunc func() time.Time) (*Throttler, error) { - // Verify input parameters. - if maxRate < 0 { - return nil, fmt.Errorf("maxRate must be >= 0: %v", maxRate) + config := NewMaxReplicationLagModuleConfig(maxReplicationLag) + config.MaxReplicationLagSec = maxReplicationLag + + return newThrottlerFromConfig(manager, name, unit, threadCount, maxRate, config, nowFunc) + +} + +func newThrottlerFromConfig(manager *managerImpl, name, unit string, threadCount int, maxRateModuleMaxRate int64, maxReplicationLagModuleConfig MaxReplicationLagModuleConfig, nowFunc func() time.Time) (*Throttler, error) { + err := maxReplicationLagModuleConfig.Verify() + if err != nil { + return nil, fmt.Errorf("invalid max replication lag config: %w", err) } - if maxReplicationLag < 0 { - return nil, fmt.Errorf("maxReplicationLag must be >= 0: %v", maxReplicationLag) + if maxRateModuleMaxRate < 0 { + return nil, fmt.Errorf("maxRate must be >= 0: %v", maxRateModuleMaxRate) } // Enable the configured modules. - maxRateModule := NewMaxRateModule(maxRate) + maxRateModule := NewMaxRateModule(maxRateModuleMaxRate) actualRateHistory := newAggregatedIntervalHistory(1024, 1*time.Second, threadCount) - maxReplicationLagModule, err := NewMaxReplicationLagModule(NewMaxReplicationLagModuleConfig(maxReplicationLag), actualRateHistory, nowFunc) + maxReplicationLagModule, err := NewMaxReplicationLagModule(maxReplicationLagModuleConfig, actualRateHistory, nowFunc) if err != nil { return nil, err } diff --git a/go/vt/throttler/throttlerlogz.go b/go/vt/throttler/throttlerlogz.go index 758fcdce65a..4023dcd7e68 100644 --- a/go/vt/throttler/throttlerlogz.go +++ b/go/vt/throttler/throttlerlogz.go @@ -18,15 +18,16 @@ package throttler import ( "fmt" - "html/template" "io" "net/http" + "slices" "strings" "time" - "golang.org/x/exp/slices" + "github.com/google/safehtml/template" "vitess.io/vitess/go/vt/logz" + "vitess.io/vitess/go/vt/servenv" ) const logHeaderHTML = ` @@ -101,7 +102,7 @@ var ( ) func init() { - http.HandleFunc("/throttlerlogz/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/throttlerlogz/", func(w http.ResponseWriter, r *http.Request) { throttlerlogzHandler(w, r, GlobalManager) }) } diff --git a/go/vt/throttler/throttlerz.go b/go/vt/throttler/throttlerz.go index c63c2670c33..84431aad62f 100644 --- a/go/vt/throttler/throttlerz.go +++ b/go/vt/throttler/throttlerz.go @@ -17,13 +17,14 @@ limitations under the License. package throttler import ( - "html/template" "net/http" + "slices" "strings" - "golang.org/x/exp/slices" + "github.com/google/safehtml/template" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" ) const listHTML = ` @@ -50,7 +51,7 @@ var ( ) func init() { - http.HandleFunc("/throttlerz/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/throttlerz/", func(w http.ResponseWriter, r *http.Request) { throttlerzHandler(w, r, GlobalManager) }) } diff --git a/go/vt/tlstest/tlstest.go b/go/vt/tlstest/tlstest.go index 0529ea4ef09..ae560115e8d 100644 --- a/go/vt/tlstest/tlstest.go +++ b/go/vt/tlstest/tlstest.go @@ -348,8 +348,8 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { log.Fatal(err) } - revoked := crlList.RevokedCertificates - revoked = append(revoked, pkix.RevokedCertificate{ + revoked := crlList.RevokedCertificateEntries + revoked = append(revoked, x509.RevocationListEntry{ SerialNumber: certificate.SerialNumber, RevocationTime: time.Now(), }) @@ -365,8 +365,8 @@ func RevokeCertAndRegenerateCRL(root, parent, name string) { var crlNumber big.Int newCrl, err := x509.CreateRevocationList(rand.Reader, &x509.RevocationList{ - RevokedCertificates: revoked, - Number: crlNumber.Add(crlList.Number, big.NewInt(1)), + RevokedCertificateEntries: revoked, + Number: crlNumber.Add(crlList.Number, big.NewInt(1)), }, caCert, caKey.(crypto.Signer)) if err != nil { log.Fatal(err) diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index c12e65b8d88..5c79e45b906 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -162,7 +162,7 @@ func testClientServer(t *testing.T, combineCerts bool) { // With TLS 1.3, the Dial will succeed and the first Read will fail. clientConn, err := tls.DialWithDialer(dialer, "tcp", addr, badClientConfig) if err != nil { - if !strings.Contains(err.Error(), "bad certificate") { + if !strings.Contains(err.Error(), "certificate required") { t.Errorf("Wrong error returned: %v", err) } return @@ -177,7 +177,8 @@ func testClientServer(t *testing.T, combineCerts bool) { if err == nil { t.Fatalf("Dial or first Read was expected to fail") } - if !strings.Contains(err.Error(), "bad certificate") { + + if !strings.Contains(err.Error(), "certificate required") { t.Errorf("Wrong error returned: %v", err) } } diff --git a/go/vt/topo/cell_info.go b/go/vt/topo/cell_info.go index fd7a4a5249e..4a8112084cb 100644 --- a/go/vt/topo/cell_info.go +++ b/go/vt/topo/cell_info.go @@ -60,6 +60,9 @@ func (ts *Server) GetCellInfoNames(ctx context.Context) ([]string, error) { // GetCellInfo reads a CellInfo from the global Conn. func (ts *Server) GetCellInfo(ctx context.Context, cell string, strongRead bool) (*topodatapb.CellInfo, error) { conn := ts.globalCell + if ctx.Err() != nil { + return nil, ctx.Err() + } if !strongRead { conn = ts.globalReadOnlyCell } diff --git a/go/vt/topo/consultopo/server.go b/go/vt/topo/consultopo/server.go index 3e9192b0e46..a7a5446c274 100644 --- a/go/vt/topo/consultopo/server.go +++ b/go/vt/topo/consultopo/server.go @@ -21,7 +21,6 @@ package consultopo import ( "encoding/json" - "fmt" "os" "strings" "sync" @@ -90,7 +89,7 @@ func getClientCreds() (creds map[string]*ClientAuthCred, err error) { } if err := json.Unmarshal(data, &creds); err != nil { - err = vterrors.Wrapf(err, fmt.Sprintf("Error parsing consul_auth_static_file")) //nolint + err = vterrors.Wrapf(err, "Error parsing consul_auth_static_file") return creds, err } return creds, nil diff --git a/go/vt/topo/consultopo/server_flaky_test.go b/go/vt/topo/consultopo/server_flaky_test.go index 797ad4c955f..a987336dd01 100644 --- a/go/vt/topo/consultopo/server_flaky_test.go +++ b/go/vt/topo/consultopo/server_flaky_test.go @@ -144,7 +144,9 @@ func TestConsulTopo(t *testing.T) { // Run the TopoServerTestSuite tests. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ @@ -190,7 +192,9 @@ func TestConsulTopoWithChecks(t *testing.T) { // Run the TopoServerTestSuite tests. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ @@ -247,7 +251,9 @@ func TestConsulTopoWithAuth(t *testing.T) { t.Fatalf("couldn't write temp file: %v", err) } - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. testRoot := fmt.Sprintf("test-%v", testIndex) testIndex++ diff --git a/go/vt/topo/etcd2topo/server_test.go b/go/vt/topo/etcd2topo/server_test.go index 3bf0e29cfd7..732829ee78b 100644 --- a/go/vt/topo/etcd2topo/server_test.go +++ b/go/vt/topo/etcd2topo/server_test.go @@ -245,7 +245,9 @@ func TestEtcd2Topo(t *testing.T) { } // Run the TopoServerTestSuite tests. - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { return newServer() }, []string{}) diff --git a/go/vt/topo/faketopo/faketopo.go b/go/vt/topo/faketopo/faketopo.go index 9265ba699a3..8601d28f5b6 100644 --- a/go/vt/topo/faketopo/faketopo.go +++ b/go/vt/topo/faketopo/faketopo.go @@ -340,13 +340,13 @@ func (f *FakeConn) Close() { } // NewFakeTopoServer creates a new fake topo server -func NewFakeTopoServer(factory *FakeFactory) *topo.Server { +func NewFakeTopoServer(ctx context.Context, factory *FakeFactory) *topo.Server { ts, err := topo.NewWithFactory(factory, "" /*serverAddress*/, "" /*root*/) if err != nil { log.Exitf("topo.NewWithFactory() failed: %v", err) } for cell := range factory.cells { - if err := ts.CreateCellInfo(context.Background(), cell, &topodatapb.CellInfo{}); err != nil { + if err := ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}); err != nil { log.Exitf("ts.CreateCellInfo(%v) failed: %v", cell, err) } } diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index f0ae912243b..27d39179688 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtgate/vindexes" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -55,6 +56,11 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) { vs, err := fromTS.GetVSchema(ctx, keyspace) switch { case err == nil: + _, err = vindexes.BuildKeyspace(vs) + if err != nil { + log.Errorf("BuildKeyspace(%v): %v", keyspace, err) + break + } if err := toTS.SaveVSchema(ctx, keyspace, vs); err != nil { log.Errorf("SaveVSchema(%v): %v", keyspace, err) } @@ -95,7 +101,7 @@ func CopyShards(ctx context.Context, fromTS, toTS *topo.Server) { } } if _, err := toTS.UpdateShardFields(ctx, keyspace, shard, func(toSI *topo.ShardInfo) error { - toSI.Shard = proto.Clone(si.Shard).(*topodatapb.Shard) + toSI.Shard = si.Shard.CloneVT() return nil }); err != nil { log.Fatalf("UpdateShardFields(%v, %v): %v", keyspace, shard, err) diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 73ecfabf66b..2086a2e6552 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -17,12 +17,11 @@ limitations under the License. package helpers import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -34,8 +33,8 @@ func createSetup(ctx context.Context, t *testing.T) (*topo.Server, *topo.Server) // Create a source and destination TS. They will have // different generations, so we test using the Version for // both works as expected. - fromTS := memorytopo.NewServer("test_cell") - toTS := memorytopo.NewServer("test_cell") + fromTS := memorytopo.NewServer(ctx, "test_cell") + toTS := memorytopo.NewServer(ctx, "test_cell") // create a keyspace and a couple tablets if err := fromTS.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { diff --git a/go/vt/topo/helpers/tee_topo_test.go b/go/vt/topo/helpers/tee_topo_test.go index 519301eaafa..8a4c5690846 100644 --- a/go/vt/topo/helpers/tee_topo_test.go +++ b/go/vt/topo/helpers/tee_topo_test.go @@ -19,15 +19,17 @@ package helpers import ( "testing" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/test" ) func TestTeeTopo(t *testing.T) { - test.TopoServerTestSuite(t, func() *topo.Server { - s1 := memorytopo.NewServer(test.LocalCellName) - s2 := memorytopo.NewServer(test.LocalCellName) + ctx := utils.LeakCheckContext(t) + test.TopoServerTestSuite(t, ctx, func() *topo.Server { + s1 := memorytopo.NewServer(ctx, test.LocalCellName) + s2 := memorytopo.NewServer(ctx, test.LocalCellName) tee, err := NewTee(s1, s2, false) if err != nil { t.Fatalf("NewTee() failed: %v", err) diff --git a/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml b/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml deleted file mode 100644 index 44e89925817..00000000000 --- a/go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: vitesstoponodes.topo.vitess.io -spec: - group: topo.vitess.io - versions: - - name: v1beta1 - served: true - storage: true - additionalPrinterColumns: - - name: Key - type: string - description: The full key path - jsonPath: .data.key - schema: - openAPIV3Schema: - type: object - required: - - data - properties: - data: - type: object - required: - - key - - value - properties: - key: - description: A file-path like key. Must be an absolute path. Must not end with a /. - type: string - pattern: '^\/.+[^\/]$' - value: - description: A base64 encoded value. Must be a base64 encoded string or empty string. - type: string - pattern: "^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$" - ephemeral: - description: Whether or not the node is considered ephemeral. True for lock and election nodes. - type: boolean - scope: Namespaced - names: - plural: vitesstoponodes - singular: vitesstoponode - kind: VitessTopoNode diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go deleted file mode 100644 index e2be94ae46e..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// +k8s:deepcopy-gen=package -// +k8s:defaulter-gen=TypeMeta -// +groupName=topo.vitess.io - -package v1beta1 diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go deleted file mode 100644 index 49a9ee9a2a5..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/register.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var SchemeGroupVersion = schema.GroupVersion{Group: "topo.vitess.io", Version: "v1beta1"} - -var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - localSchemeBuilder.Register(addKnownTypes) -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &VitessTopoNode{}, - &VitessTopoNodeList{}, - ) - - scheme.AddKnownTypes(SchemeGroupVersion, - &metav1.Status{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go deleted file mode 100644 index 48d48001d64..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/types.go +++ /dev/null @@ -1,32 +0,0 @@ -package v1beta1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VitessTopoNode is a container for Vitess topology data -type VitessTopoNode struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - Data VitessTopoNodeData `json:"data"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VitessTopoNodeList is a top-level list type. The client methods for lists are automatically created. -type VitessTopoNodeList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []VitessTopoNode `json:"items"` -} - -// VitessTopoNodeData contains the basic data for the node -type VitessTopoNodeData struct { - Key string `json:"key"` - Value string `json:"value"` - Ephemeral bool `json:"ephemeral"` -} diff --git a/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go b/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index b9fd77141c0..00000000000 --- a/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,102 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNode) DeepCopyInto(out *VitessTopoNode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Data = in.Data - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNode. -func (in *VitessTopoNode) DeepCopy() *VitessTopoNode { - if in == nil { - return nil - } - out := new(VitessTopoNode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VitessTopoNode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNodeData) DeepCopyInto(out *VitessTopoNodeData) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNodeData. -func (in *VitessTopoNodeData) DeepCopy() *VitessTopoNodeData { - if in == nil { - return nil - } - out := new(VitessTopoNodeData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VitessTopoNodeList) DeepCopyInto(out *VitessTopoNodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VitessTopoNode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitessTopoNodeList. -func (in *VitessTopoNodeList) DeepCopy() *VitessTopoNodeList { - if in == nil { - return nil - } - out := new(VitessTopoNodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VitessTopoNodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go b/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go deleted file mode 100644 index 83ad7c4c839..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - "fmt" - - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - TopoV1beta1() topov1beta1.TopoV1beta1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - topoV1beta1 *topov1beta1.TopoV1beta1Client -} - -// TopoV1beta1 retrieves the TopoV1beta1Client -func (c *Clientset) TopoV1beta1() topov1beta1.TopoV1beta1Interface { - return c.topoV1beta1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -// If config's RateLimiter is not set and QPS and Burst are acceptable, -// NewForConfig will generate a rate-limiter in configShallowCopy. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - if configShallowCopy.Burst <= 0 { - return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") - } - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.topoV1beta1, err = topov1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.topoV1beta1 = topov1beta1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.topoV1beta1 = topov1beta1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/doc.go deleted file mode 100644 index 499efc0b13e..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go deleted file mode 100644 index 532eab2b2a2..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/clientset_generated.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" - - clientset "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" - faketopov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{tracker: o} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery - tracker testing.ObjectTracker -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -func (c *Clientset) Tracker() testing.ObjectTracker { - return c.tracker -} - -var _ clientset.Interface = &Clientset{} - -// TopoV1beta1 retrieves the TopoV1beta1Client -func (c *Clientset) TopoV1beta1() topov1beta1.TopoV1beta1Interface { - return &faketopov1beta1.FakeTopoV1beta1{Fake: &c.Fake} -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go deleted file mode 100644 index 97b3e5c56db..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go b/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go deleted file mode 100644 index fea362af64c..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/fake/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - topov1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 35280ae27c6..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go b/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go deleted file mode 100644 index 7a9084ecb67..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - topov1beta1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go deleted file mode 100644 index 4911ad316da..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go deleted file mode 100644 index e8f15d37732..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go deleted file mode 100644 index 76c43f0d8dd..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_topo_client.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -type FakeTopoV1beta1 struct { - *testing.Fake -} - -func (c *FakeTopoV1beta1) VitessTopoNodes(namespace string) v1beta1.VitessTopoNodeInterface { - return &FakeVitessTopoNodes{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeTopoV1beta1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go deleted file mode 100644 index 45841625eb9..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/fake/fake_vitesstoponode.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// FakeVitessTopoNodes implements VitessTopoNodeInterface -type FakeVitessTopoNodes struct { - Fake *FakeTopoV1beta1 - ns string -} - -var vitesstoponodesResource = schema.GroupVersionResource{Group: "topo.vitess.io", Version: "v1beta1", Resource: "vitesstoponodes"} - -var vitesstoponodesKind = schema.GroupVersionKind{Group: "topo.vitess.io", Version: "v1beta1", Kind: "VitessTopoNode"} - -// Get takes name of the vitessTopoNode, and returns the corresponding vitessTopoNode object, and an error if there is any. -func (c *FakeVitessTopoNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(vitesstoponodesResource, c.ns, name), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// List takes label and field selectors, and returns the list of VitessTopoNodes that match those selectors. -func (c *FakeVitessTopoNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VitessTopoNodeList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(vitesstoponodesResource, vitesstoponodesKind, c.ns, opts), &v1beta1.VitessTopoNodeList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1beta1.VitessTopoNodeList{ListMeta: obj.(*v1beta1.VitessTopoNodeList).ListMeta} - for _, item := range obj.(*v1beta1.VitessTopoNodeList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested vitessTopoNodes. -func (c *FakeVitessTopoNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(vitesstoponodesResource, c.ns, opts)) - -} - -// Create takes the representation of a vitessTopoNode and creates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *FakeVitessTopoNodes) Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(vitesstoponodesResource, c.ns, vitessTopoNode), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// Update takes the representation of a vitessTopoNode and updates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *FakeVitessTopoNodes) Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(vitesstoponodesResource, c.ns, vitessTopoNode), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} - -// Delete takes name of the vitessTopoNode and deletes it. Returns an error if one occurs. -func (c *FakeVitessTopoNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(vitesstoponodesResource, c.ns, name), &v1beta1.VitessTopoNode{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeVitessTopoNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(vitesstoponodesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1beta1.VitessTopoNodeList{}) - return err -} - -// Patch applies the patch and returns the patched vitessTopoNode. -func (c *FakeVitessTopoNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(vitesstoponodesResource, c.ns, name, pt, data, subresources...), &v1beta1.VitessTopoNode{}) - - if obj == nil { - return nil, err - } - return obj.(*v1beta1.VitessTopoNode), err -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go deleted file mode 100644 index 1219ba9ee0c..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type VitessTopoNodeExpansion any diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go deleted file mode 100644 index 55b78f6fd17..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/topo_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/scheme" -) - -type TopoV1beta1Interface interface { - RESTClient() rest.Interface - VitessTopoNodesGetter -} - -// TopoV1beta1Client is used to interact with features provided by the topo.vitess.io group. -type TopoV1beta1Client struct { - restClient rest.Interface -} - -func (c *TopoV1beta1Client) VitessTopoNodes(namespace string) VitessTopoNodeInterface { - return newVitessTopoNodes(c, namespace) -} - -// NewForConfig creates a new TopoV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*TopoV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &TopoV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new TopoV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *TopoV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new TopoV1beta1Client for the given RESTClient. -func New(c rest.Interface) *TopoV1beta1Client { - return &TopoV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *TopoV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index 7b458f7dfee..00000000000 --- a/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - scheme "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/scheme" -) - -// VitessTopoNodesGetter has a method to return a VitessTopoNodeInterface. -// A group's client should implement this interface. -type VitessTopoNodesGetter interface { - VitessTopoNodes(namespace string) VitessTopoNodeInterface -} - -// VitessTopoNodeInterface has methods to work with VitessTopoNode resources. -type VitessTopoNodeInterface interface { - Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (*v1beta1.VitessTopoNode, error) - Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (*v1beta1.VitessTopoNode, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VitessTopoNode, error) - List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VitessTopoNodeList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) - VitessTopoNodeExpansion -} - -// vitessTopoNodes implements VitessTopoNodeInterface -type vitessTopoNodes struct { - client rest.Interface - ns string -} - -// newVitessTopoNodes returns a VitessTopoNodes -func newVitessTopoNodes(c *TopoV1beta1Client, namespace string) *vitessTopoNodes { - return &vitessTopoNodes{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the vitessTopoNode, and returns the corresponding vitessTopoNode object, and an error if there is any. -func (c *vitessTopoNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VitessTopoNodes that match those selectors. -func (c *vitessTopoNodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VitessTopoNodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.VitessTopoNodeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested vitessTopoNodes. -func (c *vitessTopoNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a vitessTopoNode and creates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *vitessTopoNodes) Create(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.CreateOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Post(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vitessTopoNode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a vitessTopoNode and updates it. Returns the server's representation of the vitessTopoNode, and an error, if there is any. -func (c *vitessTopoNodes) Update(ctx context.Context, vitessTopoNode *v1beta1.VitessTopoNode, opts v1.UpdateOptions) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Put(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(vitessTopoNode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(vitessTopoNode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the vitessTopoNode and deletes it. Returns an error if one occurs. -func (c *vitessTopoNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *vitessTopoNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("vitesstoponodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched vitessTopoNode. -func (c *vitessTopoNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VitessTopoNode, err error) { - result = &v1beta1.VitessTopoNode{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("vitesstoponodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/factory.go b/go/vt/topo/k8stopo/client/informers/externalversions/factory.go deleted file mode 100644 index 5c5886daedd..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/factory.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - topo "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/topo" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client versioned.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Topo() topo.Interface -} - -func (f *sharedInformerFactory) Topo() topo.Interface { - return topo.New(f, f.namespace, f.tweakListOptions) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/generic.go b/go/vt/topo/k8stopo/client/informers/externalversions/generic.go deleted file mode 100644 index 8064882a6bd..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/generic.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=topo.vitess.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("vitesstoponodes"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Topo().V1beta1().VitessTopoNodes().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index ca90aa8983f..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" - - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" -) - -// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. -type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -// TweakListOptionsFunc is a function that transforms a v1.ListOptions. -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go deleted file mode 100644 index 655cf46d4da..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package topo - -import ( - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1beta1 provides access to shared informers for resources in V1beta1. - V1beta1() v1beta1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1beta1 returns a new v1beta1.Interface. -func (g *group) V1beta1() v1beta1.Interface { - return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go deleted file mode 100644 index f1bc2dc3db6..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // VitessTopoNodes returns a VitessTopoNodeInformer. - VitessTopoNodes() VitessTopoNodeInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// VitessTopoNodes returns a VitessTopoNodeInformer. -func (v *version) VitessTopoNodes() VitessTopoNodeInformer { - return &vitessTopoNodeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index af6e8b17acc..00000000000 --- a/go/vt/topo/k8stopo/client/informers/externalversions/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "context" - time "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" - - topov1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - versioned "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - internalinterfaces "vitess.io/vitess/go/vt/topo/k8stopo/client/informers/externalversions/internalinterfaces" - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/client/listers/topo/v1beta1" -) - -// VitessTopoNodeInformer provides access to a shared informer and lister for -// VitessTopoNodes. -type VitessTopoNodeInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1beta1.VitessTopoNodeLister -} - -type vitessTopoNodeInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewVitessTopoNodeInformer constructs a new informer for VitessTopoNode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewVitessTopoNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredVitessTopoNodeInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredVitessTopoNodeInformer constructs a new informer for VitessTopoNode type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredVitessTopoNodeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TopoV1beta1().VitessTopoNodes(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TopoV1beta1().VitessTopoNodes(namespace).Watch(context.TODO(), options) - }, - }, - &topov1beta1.VitessTopoNode{}, - resyncPeriod, - indexers, - ) -} - -func (f *vitessTopoNodeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredVitessTopoNodeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *vitessTopoNodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&topov1beta1.VitessTopoNode{}, f.defaultInformer) -} - -func (f *vitessTopoNodeInformer) Lister() v1beta1.VitessTopoNodeLister { - return v1beta1.NewVitessTopoNodeLister(f.Informer().GetIndexer()) -} diff --git a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go b/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go deleted file mode 100644 index b1602ecced9..00000000000 --- a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -// VitessTopoNodeListerExpansion allows custom methods to be added to -// VitessTopoNodeLister. -type VitessTopoNodeListerExpansion any - -// VitessTopoNodeNamespaceListerExpansion allows custom methods to be added to -// VitessTopoNodeNamespaceLister. -type VitessTopoNodeNamespaceListerExpansion any diff --git a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go b/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go deleted file mode 100644 index 4ca6176b146..00000000000 --- a/go/vt/topo/k8stopo/client/listers/topo/v1beta1/vitesstoponode.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - - v1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// VitessTopoNodeLister helps list VitessTopoNodes. -type VitessTopoNodeLister interface { - // List lists all VitessTopoNodes in the indexer. - List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) - // VitessTopoNodes returns an object that can list and get VitessTopoNodes. - VitessTopoNodes(namespace string) VitessTopoNodeNamespaceLister - VitessTopoNodeListerExpansion -} - -// vitessTopoNodeLister implements the VitessTopoNodeLister interface. -type vitessTopoNodeLister struct { - indexer cache.Indexer -} - -// NewVitessTopoNodeLister returns a new VitessTopoNodeLister. -func NewVitessTopoNodeLister(indexer cache.Indexer) VitessTopoNodeLister { - return &vitessTopoNodeLister{indexer: indexer} -} - -// List lists all VitessTopoNodes in the indexer. -func (s *vitessTopoNodeLister) List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) { - err = cache.ListAll(s.indexer, selector, func(m any) { - ret = append(ret, m.(*v1beta1.VitessTopoNode)) - }) - return ret, err -} - -// VitessTopoNodes returns an object that can list and get VitessTopoNodes. -func (s *vitessTopoNodeLister) VitessTopoNodes(namespace string) VitessTopoNodeNamespaceLister { - return vitessTopoNodeNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// VitessTopoNodeNamespaceLister helps list and get VitessTopoNodes. -type VitessTopoNodeNamespaceLister interface { - // List lists all VitessTopoNodes in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) - // Get retrieves the VitessTopoNode from the indexer for a given namespace and name. - Get(name string) (*v1beta1.VitessTopoNode, error) - VitessTopoNodeNamespaceListerExpansion -} - -// vitessTopoNodeNamespaceLister implements the VitessTopoNodeNamespaceLister -// interface. -type vitessTopoNodeNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all VitessTopoNodes in the indexer for a given namespace. -func (s vitessTopoNodeNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.VitessTopoNode, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m any) { - ret = append(ret, m.(*v1beta1.VitessTopoNode)) - }) - return ret, err -} - -// Get retrieves the VitessTopoNode from the indexer for a given namespace and name. -func (s vitessTopoNodeNamespaceLister) Get(name string) (*v1beta1.VitessTopoNode, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1beta1.Resource("vitesstoponode"), name) - } - return obj.(*v1beta1.VitessTopoNode), nil -} diff --git a/go/vt/topo/k8stopo/config.go b/go/vt/topo/k8stopo/config.go deleted file mode 100644 index 7db781d6497..00000000000 --- a/go/vt/topo/k8stopo/config.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo diff --git a/go/vt/topo/k8stopo/directory.go b/go/vt/topo/k8stopo/directory.go deleted file mode 100644 index abc18cf8bef..00000000000 --- a/go/vt/topo/k8stopo/directory.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "path/filepath" - "sort" - "strings" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// ListDir is part of the topo.Conn interface. -// It uses an internal cache to find all the objects matching a specific key and returns -// a slice of results sorted alphabetically to emulate the behavior of etcd, zk, consul, etc -func (s *Server) ListDir(ctx context.Context, dirPath string, full bool) ([]topo.DirEntry, error) { - dirPath = filepath.Join(s.root, dirPath) - dirMap := map[string]topo.DirEntry{} - - if children, err := s.memberIndexer.ByIndex("by_parent", dirPath); err == nil { - for _, obj := range children { - vtn := obj.(*vtv1beta1.VitessTopoNode) - - key := vtn.Data.Key - - // skip duplicates - if _, ok := dirMap[key]; ok { - continue - } - - // new empty entry - e := topo.DirEntry{ - Ephemeral: vtn.Data.Ephemeral, - } - - // Clean dirPath from key to get name - key = strings.TrimPrefix(key, dirPath+"/") - - // If the key represents a directory - if strings.Contains(key, "/") { - if full { - e.Type = topo.TypeDirectory - } - - // get first part of path as name - key = strings.Split(filepath.Dir(key), "/")[0] - } else if full { - e.Type = topo.TypeFile - } - - // set name - e.Name = key - - // add to results - dirMap[e.Name] = e - } - } else { - return nil, err - } - - // An empty map means not found - if len(dirMap) == 0 { - return nil, topo.NewError(topo.NoNode, dirPath) - } - - // Get slice of keys - var keys []string - for key := range dirMap { - keys = append(keys, key) - } - - // sort keys - sort.Strings(keys) - - // Get ordered result - var result []topo.DirEntry - for _, k := range keys { - result = append(result, dirMap[k]) - } - - return result, nil -} diff --git a/go/vt/topo/k8stopo/election.go b/go/vt/topo/k8stopo/election.go deleted file mode 100644 index 9c89faf445d..00000000000 --- a/go/vt/topo/k8stopo/election.go +++ /dev/null @@ -1,129 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "path" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" -) - -const electionsPath = "elections" - -// NewLeaderParticipation is part of the topo.Server interface -func (s *Server) NewLeaderParticipation(name, id string) (topo.LeaderParticipation, error) { - return &kubernetesLeaderParticipation{ - s: s, - name: name, - id: id, - stop: make(chan struct{}), - done: make(chan struct{}), - }, nil -} - -// kubernetesLeaderParticipation implements topo.LeaderParticipation. -// -// We use a directory (in global election path, with the name) with -// ephemeral files in it, that contains the id. The oldest revision -// wins the election. -type kubernetesLeaderParticipation struct { - // s is our parent kubernetes topo Server - s *Server - - // name is the name of this LeaderParticipation - name string - - // id is the process's current id. - id string - - // stop is a channel closed when Stop is called. - stop chan struct{} - - // done is a channel closed when we're done processing the Stop - done chan struct{} -} - -func (mp *kubernetesLeaderParticipation) getElectionPath() string { - return path.Join(mp.s.root, electionsPath, mp.name) -} - -// WaitForLeadership is part of the topo.LeaderParticipation interface. -func (mp *kubernetesLeaderParticipation) WaitForLeadership() (context.Context, error) { - // If Stop was already called, mp.done is closed, so we are interrupted. - select { - case <-mp.done: - return nil, topo.NewError(topo.Interrupted, "Leadership") - default: - } - - electionPath := mp.getElectionPath() - var ld topo.LockDescriptor - - // We use a cancelable context here. If stop is closed, - // we just cancel that context. - lockCtx, lockCancel := context.WithCancel(context.Background()) - go func() { - <-mp.stop - if ld != nil { - if err := ld.Unlock(context.Background()); err != nil { - log.Errorf("failed to unlock electionPath %v: %v", electionPath, err) - } - } - lockCancel() - close(mp.done) - }() - - // Try to get the primaryship, by getting a lock. - var err error - ld, err = mp.s.lock(lockCtx, electionPath, mp.id, true) - if err != nil { - // It can be that we were interrupted. - return nil, err - } - - // We got the lock. Return the lockContext. If Stop() is called, - // it will cancel the lockCtx, and cancel the returned context. - return lockCtx, nil -} - -// Stop is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) Stop() { - close(mp.stop) - <-mp.done -} - -// GetCurrentLeaderID is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) GetCurrentLeaderID(ctx context.Context) (string, error) { - id, _, err := mp.s.Get(ctx, mp.getElectionPath()) - if err != nil { - // NoNode means nobody is the primary - if topo.IsErrType(err, topo.NoNode) { - return "", nil - } - return "", err - } - return string(id), nil -} - -// WaitForNewLeader is part of the topo.LeaderParticipation interface -func (mp *kubernetesLeaderParticipation) WaitForNewLeader(context.Context) (<-chan string, error) { - // Kubernetes doesn't seem to provide a primitive that watches a prefix - // or directory, so this likely can never be implemented. - return nil, topo.NewError(topo.NoImplementation, "wait for leader not supported in K8s topo") -} diff --git a/go/vt/topo/k8stopo/error.go b/go/vt/topo/k8stopo/error.go deleted file mode 100644 index 32f44d0beef..00000000000 --- a/go/vt/topo/k8stopo/error.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - - "k8s.io/apimachinery/pkg/api/errors" - - "vitess.io/vitess/go/vt/topo" -) - -// convertError converts errors into a topo error. All errors -// are either application-level errors, or context errors. -func convertError(err error, nodePath string) error { - if err == nil { - return nil - } - - // Check for specific kubernetes errors - if errors.IsAlreadyExists(err) { - return topo.NewError(topo.NodeExists, nodePath) - } - if errors.IsNotFound(err) { - return topo.NewError(topo.NoNode, nodePath) - } - if errors.IsServerTimeout(err) { - return topo.NewError(topo.Timeout, nodePath) - } - - // Convert specific context sentinel values. - switch err { - case context.Canceled: - return topo.NewError(topo.Interrupted, nodePath) - case context.DeadlineExceeded: - return topo.NewError(topo.Timeout, nodePath) - } - - return err -} diff --git a/go/vt/topo/k8stopo/file.go b/go/vt/topo/k8stopo/file.go deleted file mode 100644 index 0a186235cde..00000000000 --- a/go/vt/topo/k8stopo/file.go +++ /dev/null @@ -1,301 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/base64" - "fmt" - "hash/fnv" - "io" - "path/filepath" - "strconv" - "strings" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// NodeReference contains the data relating to a node -type NodeReference struct { - id string - key string - value string -} - -func packValue(value []byte) ([]byte, error) { - encoded := &bytes.Buffer{} - encoder := base64.NewEncoder(base64.StdEncoding, encoded) - - zw := gzip.NewWriter(encoder) - _, err := zw.Write(value) - if err != nil { - return []byte{}, fmt.Errorf("gzip write error: %s", err) - } - - err = zw.Close() - if err != nil { - return []byte{}, fmt.Errorf("gzip close error: %s", err) - } - - err = encoder.Close() - if err != nil { - return []byte{}, fmt.Errorf("base64 encoder close error: %s", err) - } - - return encoded.Bytes(), nil -} - -func unpackValue(value []byte) ([]byte, error) { - decoder := base64.NewDecoder(base64.StdEncoding, bytes.NewBuffer(value)) - - zr, err := gzip.NewReader(decoder) - if err != nil { - return []byte{}, fmt.Errorf("unable to create new gzip reader: %s", err) - } - - decoded := &bytes.Buffer{} - if _, err := io.Copy(decoded, zr); err != nil { - return []byte{}, fmt.Errorf("error coppying uncompressed data: %s", err) - } - - if err := zr.Close(); err != nil { - return []byte{}, fmt.Errorf("unable to close gzip reader: %s", err) - } - - return decoded.Bytes(), nil -} - -// ToData converts a nodeReference to the data type used in the VitessTopoNode -func (n *NodeReference) ToData() vtv1beta1.VitessTopoNodeData { - return vtv1beta1.VitessTopoNodeData{ - Key: n.key, - Value: string(n.value), - } -} - -func getHash(parent string) string { - hasher := fnv.New64a() - hasher.Write([]byte(parent)) - return strconv.FormatUint(hasher.Sum64(), 10) -} - -func (s *Server) newNodeReference(key string) *NodeReference { - key = filepath.Join(s.root, key) - - node := &NodeReference{ - id: fmt.Sprintf("vt-%s", getHash(key)), - key: key, - } - - return node -} - -func (s *Server) buildFileResource(filePath string, contents []byte) (*vtv1beta1.VitessTopoNode, error) { - node := s.newNodeReference(filePath) - - value, err := packValue(contents) - if err != nil { - return nil, err - } - - // create data - node.value = string(value) - - // Create "file" object - return &vtv1beta1.VitessTopoNode{ - ObjectMeta: metav1.ObjectMeta{ - Name: node.id, - Namespace: s.namespace, - }, - Data: node.ToData(), - }, nil -} - -// Create is part of the topo.Conn interface. -func (s *Server) Create(ctx context.Context, filePath string, contents []byte) (topo.Version, error) { - resource, err := s.buildFileResource(filePath, contents) - if err != nil { - return nil, convertError(err, filePath) - } - - final, err := s.resourceClient.Create(ctx, resource, metav1.CreateOptions{}) - if err != nil { - return nil, convertError(err, filePath) - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return nil, convertError(err, filePath) - } - - return KubernetesVersion(final.GetResourceVersion()), nil -} - -// Update is part of the topo.Conn interface. -func (s *Server) Update(ctx context.Context, filePath string, contents []byte, version topo.Version) (topo.Version, error) { - resource, err := s.buildFileResource(filePath, contents) - if err != nil { - return nil, convertError(err, filePath) - } - - var finalVersion KubernetesVersion - - err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - result, err := s.resourceClient.Get(ctx, resource.Name, metav1.GetOptions{}) - if err != nil && errors.IsNotFound(err) && version == nil { - // Update should create objects when the version is nil and the object is not found - createdVersion, err := s.Create(ctx, filePath, contents) - if err != nil { - return err - } - finalVersion = KubernetesVersion(createdVersion.String()) - return nil - } - - // If a non-nil version is given to update, fail on mismatched version - if version != nil && KubernetesVersion(result.GetResourceVersion()) != version { - return topo.NewError(topo.BadVersion, filePath) - } - - // set new contents - result.Data.Value = resource.Data.Value - - // get result or err - final, err := s.resourceClient.Update(ctx, result, metav1.UpdateOptions{}) - if err != nil { - return convertError(err, filePath) - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return convertError(err, filePath) - } - - finalVersion = KubernetesVersion(final.GetResourceVersion()) - - return nil - }) - if err != nil { - return nil, err - } - - return finalVersion, nil -} - -// Get is part of the topo.Conn interface. -func (s *Server) Get(ctx context.Context, filePath string) ([]byte, topo.Version, error) { - node := s.newNodeReference(filePath) - - result, err := s.resourceClient.Get(ctx, node.id, metav1.GetOptions{}) - if err != nil { - return []byte{}, nil, convertError(err, filePath) - } - - out, err := unpackValue([]byte(result.Data.Value)) - if err != nil { - return []byte{}, nil, convertError(err, filePath) - } - - return out, KubernetesVersion(result.GetResourceVersion()), nil -} - -// List is part of the topo.Conn interface. -func (s *Server) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, error) { - nodeList, err := s.resourceClient.List(ctx, metav1.ListOptions{}) - - results := []topo.KVInfo{} - if err != nil { - return results, convertError(err, filePathPrefix) - } - nodes := nodeList.Items - if len(nodes) == 0 { - return results, topo.NewError(topo.NoNode, filePathPrefix) - } - rootPrefix := filepath.Join(s.root, filePathPrefix) - for _, node := range nodes { - if strings.HasPrefix(node.Data.Key, rootPrefix) { - out, err := unpackValue([]byte(node.Data.Value)) - if err != nil { - return results, convertError(err, node.Data.Key) - } - results = append(results, topo.KVInfo{ - Key: []byte(node.Data.Key), - Value: out, - Version: KubernetesVersion(node.GetResourceVersion()), - }) - } - } - - return results, nil -} - -// Delete is part of the topo.Conn interface. -func (s *Server) Delete(ctx context.Context, filePath string, version topo.Version) error { - node := s.newNodeReference(filePath) - - // Check version before delete - current, err := s.resourceClient.Get(ctx, node.id, metav1.GetOptions{}) - if err != nil { - return convertError(err, filePath) - } - if version != nil { - if KubernetesVersion(current.GetResourceVersion()) != version { - return topo.NewError(topo.BadVersion, filePath) - } - } - - err = s.resourceClient.Delete(ctx, node.id, metav1.DeleteOptions{}) - if err != nil { - return convertError(err, filePath) - } - - // Wait for one of the following conditions - // 1. Context is cancelled - // 2. The object is no longer in the cache - // 3. The object in the cache has a new uid (was deleted but recreated since we last checked) - for { - select { - case <-ctx.Done(): - return convertError(ctx.Err(), filePath) - case <-time.After(50 * time.Millisecond): - } - - obj, ok, err := s.memberIndexer.Get(current) - if err != nil { // error getting from cache - return convertError(err, filePath) - } - if !ok { // deleted from cache - break - } - cached := obj.(*vtv1beta1.VitessTopoNode) - if cached.GetUID() != current.GetUID() { - break // deleted and recreated - } - } - - return nil -} diff --git a/go/vt/topo/k8stopo/file_test.go b/go/vt/topo/k8stopo/file_test.go deleted file mode 100644 index 036c2c02f2a..00000000000 --- a/go/vt/topo/k8stopo/file_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "reflect" - "testing" -) - -func Test_packValue(t *testing.T) { - tests := []struct { - name string - value []byte - want []byte - wantErr bool - }{ - { - // a gzip with an empty payload still has header bytes to identify the stream - "empty", - []byte{}, - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 119, 69, 65, 65, 80, 47, 47, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 61}, - false, - }, - { - "valid payload", - []byte("test payload"), - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 121, 112, 74, 76, 83, 53, 82, 75, 69, 105, 115, 122, 77, 108, 80, 84, 65, 69, 69, 65, 65, 68, 47, 47, 43, 69, 57, 72, 101, 115, 77, 65, 65, 65, 65}, - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := packValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("packValue() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_unpackValue(t *testing.T) { - tests := []struct { - name string - value []byte - want []byte - wantErr bool - }{ - { - // a gzip with an empty payload still has header bytes to identify the stream - "empty", - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 119, 69, 65, 65, 80, 47, 47, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 61}, - []byte{}, - false, - }, - { - "valid payload", - []byte{72, 52, 115, 73, 65, 65, 65, 65, 65, 65, 65, 65, 47, 121, 112, 74, 76, 83, 53, 82, 75, 69, 105, 115, 122, 77, 108, 80, 84, 65, 69, 69, 65, 65, 68, 47, 47, 43, 69, 57, 72, 101, 115, 77, 65, 65, 65, 65}, - []byte("test payload"), - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := unpackValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("unpackValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("unpackValue() = %v, want %v", got, tt.want) - } - }) - } -} - -func Test_packUnpackRoundTrip(t *testing.T) { - tests := []struct { - name string - value []byte - wantErr bool - }{ - { - "empty", - []byte{}, - false, - }, - { - "valid payload", - []byte("test payload"), - false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - packed, err := packValue(tt.value) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - - unpacked, err := unpackValue(packed) - if (err != nil) != tt.wantErr { - t.Errorf("packValue() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if !reflect.DeepEqual(unpacked, tt.value) { - t.Errorf("unpacked value != original value original = %v, unpacked %v", tt.value, unpacked) - return - } - }) - } -} diff --git a/go/vt/topo/k8stopo/lock.go b/go/vt/topo/k8stopo/lock.go deleted file mode 100644 index e1321ea76e4..00000000000 --- a/go/vt/topo/k8stopo/lock.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "time" - - "context" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// kubernetesLockDescriptor implements topo.LockDescriptor. -type kubernetesLockDescriptor struct { - s *Server - leaseID string - leasePath string -} - -// Lock is part of the topo.Conn interface. -func (s *Server) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return s.lock(ctx, dirPath, contents, false) -} - -// TryLock is part of the topo.Conn interface. Its implementation is same as Lock -func (s *Server) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return s.Lock(ctx, dirPath, contents) -} - -// lock is used by both Lock() and primary election. -// it blocks until the lock is taken, interrupted, or times out -func (s *Server) lock(ctx context.Context, nodePath, contents string, createMissing bool) (topo.LockDescriptor, error) { - // Satisfy the topo.Conn interface - if !createMissing { - // Per the topo.Conn interface: - // "Returns ErrNoNode if the directory doesn't exist (meaning - // there is no existing file under that directory)." - if _, err := s.ListDir(ctx, nodePath, false); err != nil { - return nil, convertError(err, nodePath) - } - } - - resource, err := s.buildFileResource(nodePath, []byte(contents)) - if err != nil { - return nil, convertError(err, nodePath) - } - - // mark locks as ephemeral - resource.Data.Ephemeral = true - - var final *vtv1beta1.VitessTopoNode - - for { - // Try and and create the resource. The kube api will handle the actual atomic lock creation - final, err = s.resourceClient.Create(ctx, resource, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { - select { - case <-time.After(10 * time.Millisecond): - continue // retry - case <-ctx.Done(): - return nil, convertError(ctx.Err(), nodePath) - } - } else if err != nil { - return nil, convertError(err, nodePath) - } - - break - } - - // Update the internal cache - err = s.memberIndexer.Update(final) - if err != nil { - return nil, convertError(err, nodePath) - } - - return &kubernetesLockDescriptor{ - s: s, - leaseID: resource.Name, - leasePath: resource.Data.Key, - }, nil -} - -// Check is part of the topo.LockDescriptor interface. -func (ld *kubernetesLockDescriptor) Check(ctx context.Context) error { - // Get the object and ensure the leaseid - _, err := ld.s.resourceClient.Get(ctx, ld.leaseID, metav1.GetOptions{}) // TODO namespacing - if err != nil { - return convertError(err, ld.leasePath) - - } - - return nil -} - -// Unlock is part of the topo.LockDescriptor interface. -func (ld *kubernetesLockDescriptor) Unlock(ctx context.Context) error { - err := ld.s.resourceClient.Delete(ctx, ld.leaseID, metav1.DeleteOptions{}) // TODO namespacing - if err != nil { - return convertError(err, ld.leasePath) - } - return nil -} diff --git a/go/vt/topo/k8stopo/server.go b/go/vt/topo/k8stopo/server.go deleted file mode 100644 index 2507ae254b1..00000000000 --- a/go/vt/topo/k8stopo/server.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package k8stopo implements topo.Server with the Kubernetes API as the backend. - -We expect the following behavior from the kubernetes client library: - - - TODO - -We follow these conventions within this package: - - - TODO -*/ -package k8stopo - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "vitess.io/vitess/go/vt/servenv" - - "github.com/spf13/pflag" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" - vtkube "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned" - vttyped "vitess.io/vitess/go/vt/topo/k8stopo/client/clientset/versioned/typed/topo/v1beta1" -) - -var kubeconfigPath, configContext, configNamespace string - -func init() { - servenv.RegisterFlagsForTopoBinaries(registerK8STopoFlags) -} - -func registerK8STopoFlags(fs *pflag.FlagSet) { - // kubeconfigPath is a string that gives the location of a valid kubeconfig file - fs.StringVar(&kubeconfigPath, "topo_k8s_kubeconfig", kubeconfigPath, "Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod") - - // configContext is a string that can be used to override the default context - fs.StringVar(&configContext, "topo_k8s_context", configContext, "The kubeconfig context to use, overrides the 'current-context' from the config") - - // configNamespace is a string that can be used to override the default namespace for objects - fs.StringVar(&configNamespace, "topo_k8s_namespace", configNamespace, "The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config") -} - -// Factory is the Kubernetes topo.Factory implementation. -type Factory struct{} - -// HasGlobalReadOnlyCell is part of the topo.Factory interface. -func (f Factory) HasGlobalReadOnlyCell(serverAddr, root string) bool { - return false -} - -// Create is part of the topo.Factory interface. -func (f Factory) Create(cell, serverAddr, root string) (topo.Conn, error) { - return NewServer(serverAddr, root) -} - -// Server is the implementation of topo.Server for Kubernetes. -type Server struct { - // kubeClient is the entire kubernetes interface - kubeClient kubernetes.Interface - - // vtKubeClient is the client for vitess api types - vtKubeClient vtkube.Interface - - // resource is a scoped-down kubernetes.Interface used for convenience - resourceClient vttyped.VitessTopoNodeInterface - - // stopChan is used to tell the client-go informers to quit - stopChan chan struct{} - - // memberInformer is the controller that syncronized the cache of data - memberInformer cache.Controller - - // memberIndexer is the cache of tree data - memberIndexer cache.Indexer - - // namespace is the Kubernetes namespace to be used for all resources - namespace string - - // root is the root path for this client. - // used for resource prefixing - root string -} - -// Close implements topo.Server.Close. -func (s *Server) Close() { - close(s.stopChan) -} - -func getKeyParents(key string) []string { - parents := []string{""} - parent := []string{} - for _, segment := range strings.Split(filepath.Dir(key), "/") { - parent = append(parent, segment) - parents = append(parents, strings.Join(parent, "/")) - } - return parents -} - -func indexByParent(obj any) ([]string, error) { - return getKeyParents(obj.(*vtv1beta1.VitessTopoNode).Data.Key), nil -} - -// syncTree starts and syncs the member objects that form the directory "tree" -func (s *Server) syncTree() error { - // Create the informer / indexer - restClient := s.vtKubeClient.TopoV1beta1().RESTClient() - listwatch := cache.NewListWatchFromClient(restClient, "vitesstoponodes", s.namespace, fields.Everything()) - - // set up index funcs - indexers := cache.Indexers{} - indexers["by_parent"] = indexByParent - - s.memberIndexer, s.memberInformer = cache.NewIndexerInformer(listwatch, &vtv1beta1.VitessTopoNode{}, 0, - cache.ResourceEventHandlerFuncs{}, indexers) - - // Start indexer - go s.memberInformer.Run(s.stopChan) - - // Wait for sync - log.Info("Waiting for Kubernetes topo cache sync") - if !cache.WaitForCacheSync(s.stopChan, s.memberInformer.HasSynced) { - return fmt.Errorf("timed out waiting for caches to sync") - } - log.Info("Kubernetes topo cache sync completed") - - return nil -} - -// NewServer returns a new k8stopo.Server. -func NewServer(_, root string) (*Server, error) { - log.Info("Creating new Kubernetes topo server with root: ", root) - - var config *rest.Config - var err error - namespace := "default" //nolint - - if kubeconfigPath == "" { - log.Info("Creating new in-cluster Kubernetes config") - - config, err = rest.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("error getting Kubernetes in-cluster client config: %s", err) - } - - // When running in the cluster, use the namespace file to detect the current namespace - nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return nil, err - } - namespace = string(nsBytes) - } else { - log.Info("Creating new Kubernetes config from kubeconfig", kubeconfigPath) - - configOverrides := &clientcmd.ConfigOverrides{} - - // respect the context flag - if configContext != "" { - configOverrides.CurrentContext = configContext - } - - configLoader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, - configOverrides, - ) - - config, err = configLoader.ClientConfig() - if err != nil { - return nil, fmt.Errorf("error getting Kubernetes client config: %s", err) - } - - // When given a kubeconfig file, use the namespace from the current context - namespace, _, err = configLoader.Namespace() - if err != nil { - return nil, fmt.Errorf("error getting namespace from Kubernetes client config: %s", err) - } - } - - // respect the namespace flag - if configNamespace != "" { - namespace = configNamespace - } - - // create the kubernetes client - kubeClientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("error creating official Kubernetes client: %s", err) - } - - vtKubeClientset, err := vtkube.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("error creating vitess Kubernetes client: %s", err) - } - - // Create the server - s := &Server{ - namespace: namespace, - kubeClient: kubeClientset, - vtKubeClient: vtKubeClientset, - resourceClient: vtKubeClientset.TopoV1beta1().VitessTopoNodes(namespace), - root: root, - stopChan: make(chan struct{}), - } - - // Sync cache - if err = s.syncTree(); err != nil { - return nil, err - } - - return s, nil -} - -func init() { - topo.RegisterFactory("k8s", Factory{}) -} diff --git a/go/vt/topo/k8stopo/server_flaky_test.go b/go/vt/topo/k8stopo/server_flaky_test.go deleted file mode 100644 index 5a9fce1ca80..00000000000 --- a/go/vt/topo/k8stopo/server_flaky_test.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - "fmt" - "os" - "os/exec" - "path" - "runtime" - "testing" - "time" - - "github.com/stretchr/testify/require" - extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/tools/clientcmd" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/test" -) - -func TestKubernetesTopo(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("k3s not supported on non-linux platforms. Skipping k8stopo integration tests") - } - - // Create a data dir for test data - testDataDir := t.TempDir() - - // Gen a temp file name for the config - testConfig, err := os.CreateTemp("", "vt-test-k3s-config") - if err != nil { - t.Fatal(err) - } - testConfigPath := testConfig.Name() - defer os.Remove(testConfigPath) // clean up - - k3sArgs := []string{ - "server", "start", - "--write-kubeconfig=" + testConfigPath, - "--data-dir=" + testDataDir, - "--https-listen-port=6663", - "--disable-agent", "--flannel-backend=none", - "--disable-network-policy", - "--disable-cloud-controller", - "--disable-scheduler", - "--no-deploy=coredns,servicelb,traefik,local-storage,metrics-server", - "--kube-controller-manager-arg=port=10253", - - "--log=/tmp/k3svtlog", - } - - // Start a minimal k3s daemon, and close it after all tests are done. - ctx, killK3s := context.WithCancel(context.Background()) - c := exec.CommandContext(ctx, "k3s", k3sArgs...) - - // Start in the background and kill when tests end - t.Log("Starting k3s") - err = c.Start() - if err != nil { - t.Fatal("Unable to start k3s", err) - } - defer killK3s() - - // Wait for server to be ready - for { - t.Log("Waiting for server to be ready") - time.Sleep(time.Second) - config, err := clientcmd.BuildConfigFromFlags("", testConfigPath) - if err != nil { - continue - } - - // Create the vitesstoponode crd - apiextensionsClientSet, err := apiextensionsclient.NewForConfig(config) - if err != nil { - t.Fatal(err) - } - - crdFile, err := os.Open("./VitessTopoNodes-crd.yaml") - require.NoError(t, err) - defer crdFile.Close() - - crd := &extensionsv1.CustomResourceDefinition{} - - kubeyaml.NewYAMLOrJSONDecoder(crdFile, 2048).Decode(crd) - - _, err = apiextensionsClientSet.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - break - } - - serverAddr := "default" - - oldKubeConfigPath := kubeconfigPath - kubeconfigPath = testConfigPath - defer func() { - kubeconfigPath = oldKubeConfigPath - }() - - // Run the test suite. - testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { - // Each test will use its own sub-directories. - // The directories will be created when used the first time. - testRoot := fmt.Sprintf("/test-%v", testIndex) - testIndex++ - - globalRoot := path.Join(testRoot, topo.GlobalCell) - cellRoot := path.Join(testRoot, test.LocalCellName) - - ts, err := topo.OpenServer("k8s", serverAddr, globalRoot) - if err != nil { - t.Fatalf("OpenServer() failed: %v", err) - } - if err := ts.CreateCellInfo(context.Background(), test.LocalCellName, &topodatapb.CellInfo{ - ServerAddress: serverAddr, - Root: cellRoot, - }); err != nil { - t.Fatalf("CreateCellInfo() failed: %v", err) - } - - return ts - }, []string{"checkTryLock", "checkShardWithLock"}) -} diff --git a/go/vt/topo/k8stopo/version.go b/go/vt/topo/k8stopo/version.go deleted file mode 100644 index c7e16bfffeb..00000000000 --- a/go/vt/topo/k8stopo/version.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "fmt" - - "vitess.io/vitess/go/vt/topo" -) - -// KubernetesVersion is Kubernetes's idea of a version. -// It implements topo.Version. -type KubernetesVersion string - -// String is part of the topo.Version interface. -func (v KubernetesVersion) String() string { - return string(v) -} - -// VersionFromInt is used by old-style functions to create a proper -// Version: if version is -1, returns nil. Otherwise returns the -// KubernetesVersion object. -func VersionFromInt(version int64) topo.Version { - if version == -1 { - return nil - } - return KubernetesVersion(fmt.Sprint(version)) -} diff --git a/go/vt/topo/k8stopo/watch.go b/go/vt/topo/k8stopo/watch.go deleted file mode 100644 index 4cf1c7bc2c3..00000000000 --- a/go/vt/topo/k8stopo/watch.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8stopo - -import ( - "context" - - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/tools/cache" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" - vtv1beta1 "vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1" -) - -// Watch is part of the topo.Conn interface. -func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) { - log.Info("Starting Kubernetes topo Watch on ", filePath) - - current := &topo.WatchData{} - - // get current - initialCtx, initialCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) - defer initialCancel() - - contents, ver, err := s.Get(initialCtx, filePath) - if err != nil { - return nil, nil, err - } - current.Contents = contents - current.Version = ver - - // Create the changes channel - changes := make(chan *topo.WatchData, 10) - - // Create a signal channel for non-interrupt shutdowns - gracefulShutdown := make(chan struct{}) - - resource, err := s.buildFileResource(filePath, []byte{}) - if err != nil { - return nil, nil, err - } - - // Create the informer / indexer to watch the single resource - restClient := s.vtKubeClient.TopoV1beta1().RESTClient() - listwatch := cache.NewListWatchFromClient(restClient, "vitesstoponodes", s.namespace, fields.OneTermEqualSelector("metadata.name", resource.Name)) - - // set up index funcs - indexers := cache.Indexers{} - indexers["by_parent"] = indexByParent - - _, memberInformer := cache.NewIndexerInformer(listwatch, &vtv1beta1.VitessTopoNode{}, 0, - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj any) { - vtn := obj.(*vtv1beta1.VitessTopoNode) - out, err := unpackValue([]byte(vtn.Data.Value)) - if err != nil { - changes <- &topo.WatchData{Err: err} - close(gracefulShutdown) - } else { - changes <- &topo.WatchData{ - Contents: out, - Version: KubernetesVersion(vtn.GetResourceVersion()), - } - } - }, - UpdateFunc: func(oldObj, newObj any) { - vtn := newObj.(*vtv1beta1.VitessTopoNode) - out, err := unpackValue([]byte(vtn.Data.Value)) - if err != nil { - changes <- &topo.WatchData{Err: err} - close(gracefulShutdown) - } else { - changes <- &topo.WatchData{ - Contents: out, - Version: KubernetesVersion(vtn.GetResourceVersion()), - } - } - }, - DeleteFunc: func(obj any) { - vtn := obj.(*vtv1beta1.VitessTopoNode) - changes <- &topo.WatchData{Err: topo.NewError(topo.NoNode, vtn.Name)} - close(gracefulShutdown) - }, - }, indexers) - - // create control chan for informer and start it - informerChan := make(chan struct{}) - go memberInformer.Run(informerChan) - - // Handle interrupts - go closeOnDone(ctx, filePath, informerChan, gracefulShutdown, changes) - - return current, changes, nil -} - -func closeOnDone(ctx context.Context, filePath string, informerChan chan struct{}, gracefulShutdown chan struct{}, changes chan *topo.WatchData) { - select { - case <-ctx.Done(): - if err := ctx.Err(); err != nil && err == context.Canceled { - changes <- &topo.WatchData{Err: topo.NewError(topo.Interrupted, filePath)} - } - case <-gracefulShutdown: - } - close(informerChan) - close(changes) -} - -// WatchRecursive is part of the topo.Conn interface. -func (s *Server) WatchRecursive(_ context.Context, path string) ([]*topo.WatchDataRecursive, <-chan *topo.WatchDataRecursive, error) { - // Kubernetes doesn't seem to provide a primitive that watches a prefix - // or directory, so this likely can never be implemented. - return nil, nil, topo.NewError(topo.NoImplementation, path) -} diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index 8a3e816001a..feb80c374e5 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -17,10 +17,10 @@ limitations under the License. package topo import ( - "path" - "context" + "path" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/event" @@ -52,6 +52,12 @@ func (ki *KeyspaceInfo) SetKeyspaceName(name string) { ki.keyspace = name } +// ValidateKeyspaceName checks if the provided name is a valid name for a +// keyspace. +func ValidateKeyspaceName(name string) error { + return validateObjectName(name) +} + // GetServedFrom returns a Keyspace_ServedFrom record if it exists. func (ki *KeyspaceInfo) GetServedFrom(tabletType topodatapb.TabletType) *topodatapb.Keyspace_ServedFrom { for _, ksf := range ki.ServedFroms { @@ -159,6 +165,10 @@ func (ki *KeyspaceInfo) ComputeCellServedFrom(cell string) []*topodatapb.SrvKeys // CreateKeyspace wraps the underlying Conn.Create // and dispatches the event. func (ts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *topodatapb.Keyspace) error { + if err := ValidateKeyspaceName(keyspace); err != nil { + return vterrors.Wrapf(err, "CreateKeyspace: %s", err) + } + data, err := value.MarshalVT() if err != nil { return err @@ -179,6 +189,10 @@ func (ts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *to // GetKeyspace reads the given keyspace and returns it func (ts *Server) GetKeyspace(ctx context.Context, keyspace string) (*KeyspaceInfo, error) { + if err := ValidateKeyspaceName(keyspace); err != nil { + return nil, vterrors.Wrapf(err, "GetKeyspace: %s", err) + } + keyspacePath := path.Join(KeyspacesPath, keyspace, KeyspaceFile) data, version, err := ts.globalCell.Get(ctx, keyspacePath) if err != nil { @@ -211,6 +225,17 @@ func (ts *Server) GetKeyspaceDurability(ctx context.Context, keyspace string) (s return "none", nil } +func (ts *Server) GetSidecarDBName(ctx context.Context, keyspace string) (string, error) { + keyspaceInfo, err := ts.GetKeyspace(ctx, keyspace) + if err != nil { + return "", err + } + if keyspaceInfo.SidecarDbName != "" { + return keyspaceInfo.SidecarDbName, nil + } + return sidecar.DefaultName, nil +} + func (ts *Server) GetThrottlerConfig(ctx context.Context, keyspace string) (*topodatapb.ThrottlerConfig, error) { keyspaceInfo, err := ts.GetKeyspace(ctx, keyspace) if err != nil { diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go index 036ce983078..8d30d85e891 100644 --- a/go/vt/topo/locks.go +++ b/go/vt/topo/locks.go @@ -332,7 +332,7 @@ func (ts *Server) TryLockShard(ctx context.Context, keyspace, shard, action stri return ts.internalLockShard(ctx, keyspace, shard, action, false) } -// isBlocking is used to indicate whether the call should fail-fast or not. +// internalLockShard is used to indicate whether the call should fail-fast or not. func (ts *Server) internalLockShard(ctx context.Context, keyspace, shard, action string, isBlocking bool) (context.Context, func(*error), error) { i, ok := ctx.Value(locksKey).(*locksInfo) if !ok { diff --git a/go/vt/topo/memorytopo/election.go b/go/vt/topo/memorytopo/election.go index fd9830edb35..868a2c53287 100644 --- a/go/vt/topo/memorytopo/election.go +++ b/go/vt/topo/memorytopo/election.go @@ -153,9 +153,7 @@ func (mp *cLeaderParticipation) WaitForNewLeader(ctx context.Context) (<-chan st } notifications := make(chan string, 8) - watchIndex := nextWatchIndex - nextWatchIndex++ - n.watches[watchIndex] = watch{lock: notifications} + watchIndex := n.addWatch(watch{lock: notifications}) if n.lock != nil { notifications <- n.lockContents diff --git a/go/vt/topo/memorytopo/file.go b/go/vt/topo/memorytopo/file.go index 0abfc56cb80..0007203799f 100644 --- a/go/vt/topo/memorytopo/file.go +++ b/go/vt/topo/memorytopo/file.go @@ -262,7 +262,6 @@ func (c *Conn) Delete(ctx context.Context, filePath string, version topo.Version // Check if it's a directory. if n.isDirectory() { - //lint:ignore ST1005 Delete is a function name return fmt.Errorf("delete(%v, %v) failed: it's a directory", c.cell, filePath) } diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index cdad2ddbcdd..f24b2f6c89e 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -25,7 +25,6 @@ import ( "math/rand" "strings" "sync" - "time" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" @@ -49,10 +48,6 @@ const ( UnreachableServerAddr = "unreachable" ) -var ( - nextWatchIndex = 0 -) - // Factory is a memory-based implementation of topo.Factory. It // takes a file-system like approach, with directories at each level // being an actual directory node. This is meant to be closer to @@ -132,18 +127,16 @@ type Conn struct { } // dial returns immediately, unless the Conn points to the sentinel -// UnreachableServerAddr, in which case it will block until the context expires -// and return the context's error. +// UnreachableServerAddr, in which case it will block until the context expires. func (c *Conn) dial(ctx context.Context) error { if c.closed { return ErrConnectionClosed } if c.serverAddr == UnreachableServerAddr { <-ctx.Done() - return ctx.Err() } - return nil + return ctx.Err() } // Close is part of the topo.Conn interface. @@ -206,6 +199,20 @@ func (n *node) propagateRecursiveWatch(ev *topo.WatchDataRecursive) { } } +var ( + nextWatchIndex = 0 + nextWatchIndexMu sync.Mutex +) + +func (n *node) addWatch(w watch) int { + nextWatchIndexMu.Lock() + defer nextWatchIndexMu.Unlock() + watchIndex := nextWatchIndex + nextWatchIndex++ + n.watches[watchIndex] = w + return watchIndex +} + // PropagateWatchError propagates the given error to all watches on this node // and recursively applies to all children func (n *node) PropagateWatchError(err error) { @@ -226,14 +233,13 @@ func (n *node) PropagateWatchError(err error) { // NewServerAndFactory returns a new MemoryTopo and the backing factory for all // the cells. It will create one cell for each parameter passed in. It will log.Exit out // in case of a problem. -func NewServerAndFactory(cells ...string) (*topo.Server, *Factory) { +func NewServerAndFactory(ctx context.Context, cells ...string) (*topo.Server, *Factory) { f := &Factory{ cells: make(map[string]*node), generation: uint64(rand.Int63n(1 << 60)), } f.cells[topo.GlobalCell] = f.newDirectory(topo.GlobalCell, nil) - ctx := context.Background() ts, err := topo.NewWithFactory(f, "" /*serverAddress*/, "" /*root*/) if err != nil { log.Exitf("topo.NewWithFactory() failed: %v", err) @@ -248,8 +254,8 @@ func NewServerAndFactory(cells ...string) (*topo.Server, *Factory) { } // NewServer returns the new server -func NewServer(cells ...string) *topo.Server { - server, _ := NewServerAndFactory(cells...) +func NewServer(ctx context.Context, cells ...string) *topo.Server { + server, _ := NewServerAndFactory(ctx, cells...) return server } @@ -342,7 +348,3 @@ func (f *Factory) recursiveDelete(n *node) { f.recursiveDelete(parent) } } - -func init() { - rand.Seed(time.Now().UnixNano()) -} diff --git a/go/vt/topo/memorytopo/server_test.go b/go/vt/topo/memorytopo/server_test.go index 5bfa41c8a5e..c2d1cf6cfb5 100644 --- a/go/vt/topo/memorytopo/server_test.go +++ b/go/vt/topo/memorytopo/server_test.go @@ -17,6 +17,7 @@ limitations under the License. package memorytopo import ( + "context" "testing" "vitess.io/vitess/go/vt/topo" @@ -25,7 +26,9 @@ import ( func TestMemoryTopo(t *testing.T) { // Run the TopoServerTestSuite tests. - test.TopoServerTestSuite(t, func() *topo.Server { - return NewServer(test.LocalCellName) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { + return NewServer(ctx, test.LocalCellName) }, []string{"checkTryLock", "checkShardWithLock"}) } diff --git a/go/vt/topo/memorytopo/watch.go b/go/vt/topo/memorytopo/watch.go index 14cb20bc09d..73b2d248434 100644 --- a/go/vt/topo/memorytopo/watch.go +++ b/go/vt/topo/memorytopo/watch.go @@ -50,9 +50,7 @@ func (c *Conn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-c } notifications := make(chan *topo.WatchData, 100) - watchIndex := nextWatchIndex - nextWatchIndex++ - n.watches[watchIndex] = watch{contents: notifications} + watchIndex := n.addWatch(watch{contents: notifications}) go func() { <-ctx.Done() @@ -105,9 +103,7 @@ func (c *Conn) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Watc }) notifications := make(chan *topo.WatchDataRecursive, 100) - watchIndex := nextWatchIndex - nextWatchIndex++ - n.watches[watchIndex] = watch{recursive: notifications} + watchIndex := n.addWatch(watch{recursive: notifications}) go func() { defer close(notifications) diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index 20af5c624a2..1995e8b6ec4 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -174,7 +174,7 @@ var ( } FlagBinaries = []string{"vttablet", "vtctl", "vtctld", "vtcombo", "vtgate", - "vtgr", "vtorc", "vtbackup"} + "vtorc", "vtbackup"} ) func init() { @@ -240,7 +240,7 @@ func OpenServer(implementation, serverAddress, root string) (*Server, error) { // Open returns a Server using the command line parameter flags // for implementation, address and root. It log.Exits out if an error occurs. func Open() *Server { - if topoGlobalServerAddress == "" && topoImplementation != "k8s" { + if topoGlobalServerAddress == "" { log.Exitf("topo_global_server_address must be configured") } if topoGlobalRoot == "" { diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index c2538581926..183ed409bbb 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -27,9 +27,8 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -120,6 +119,10 @@ func IsShardUsingRangeBasedSharding(shard string) bool { // ValidateShardName takes a shard name and sanitizes it, and also returns // the KeyRange. func ValidateShardName(shard string) (string, *topodatapb.KeyRange, error) { + if err := validateObjectName(shard); err != nil { + return "", nil, err + } + if !IsShardUsingRangeBasedSharding(shard) { return shard, nil, nil } @@ -184,17 +187,25 @@ func (si *ShardInfo) HasPrimary() bool { // GetPrimaryTermStartTime returns the shard's primary term start time as a Time value. func (si *ShardInfo) GetPrimaryTermStartTime() time.Time { - return logutil.ProtoToTime(si.Shard.PrimaryTermStartTime) + return protoutil.TimeFromProto(si.Shard.PrimaryTermStartTime).UTC() } // SetPrimaryTermStartTime sets the shard's primary term start time as a Time value. func (si *ShardInfo) SetPrimaryTermStartTime(t time.Time) { - si.Shard.PrimaryTermStartTime = logutil.TimeToProto(t) + si.Shard.PrimaryTermStartTime = protoutil.TimeToProto(t) } // GetShard is a high level function to read shard data. // It generates trace spans. func (ts *Server) GetShard(ctx context.Context, keyspace, shard string) (*ShardInfo, error) { + if err := ValidateKeyspaceName(keyspace); err != nil { + return nil, err + } + + if _, _, err := ValidateShardName(shard); err != nil { + return nil, err + } + span, ctx := trace.NewSpan(ctx, "TopoServer.GetShard") span.Annotate("keyspace", keyspace) span.Annotate("shard", shard) @@ -279,12 +290,9 @@ func (ts *Server) UpdateShardFields(ctx context.Context, keyspace, shard string, // This will lock the Keyspace, as we may be looking at other shard servedTypes. // Using GetOrCreateShard is probably a better idea for most use cases. func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err error) { - // Lock the keyspace, because we'll be looking at ServedTypes. - ctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, "CreateShard") - if lockErr != nil { - return lockErr + if err := ValidateKeyspaceName(keyspace); err != nil { + return err } - defer unlock(&err) // validate parameters _, keyRange, err := ValidateShardName(shard) @@ -292,6 +300,13 @@ func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err return err } + // Lock the keyspace, because we'll be looking at ServedTypes. + ctx, unlock, lockErr := ts.LockKeyspace(ctx, keyspace, "CreateShard") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + value := &topodatapb.Shard{ KeyRange: keyRange, } @@ -304,7 +319,7 @@ func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err return err } for _, si := range sis { - if si.KeyRange == nil || key.KeyRangesIntersect(si.KeyRange, keyRange) { + if si.KeyRange == nil || key.KeyRangeIntersect(si.KeyRange, keyRange) { value.IsPrimaryServing = false break } @@ -339,8 +354,17 @@ func (ts *Server) GetOrCreateShard(ctx context.Context, keyspace, shard string) return } - // create the keyspace, maybe it already exists - if err = ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil && !IsErrType(err, NodeExists) { + // Create the keyspace, if it does not already exist. + // We store the sidecar database name in the keyspace record. + // If not already set, then it is set to the default (_vt) by + // the first tablet to start in the keyspace and is from + // then on immutable. Any other tablets that try to come up in + // this keyspace will be able to serve queries but will fail to + // fully initialize and perform certain operations (e.g. + // OnlineDDL or VReplication workflows) if they are using a + // different sidecar database name. + ksi := topodatapb.Keyspace{SidecarDbName: sidecar.GetName()} + if err = ts.CreateKeyspace(ctx, keyspace, &ksi); err != nil && !IsErrType(err, NodeExists) { return nil, vterrors.Wrapf(err, "CreateKeyspace(%v) failed", keyspace) } @@ -386,7 +410,7 @@ func (si *ShardInfo) GetTabletControl(tabletType topodatapb.TabletType) *topodat return nil } -// UpdateSourceDeniedTables will add or remove the listed tables +// UpdateDeniedTables will add or remove the listed tables // in the shard record's TabletControl structures. Note we don't // support a lot of the corner cases: // - only support one table list per shard. If we encounter a different @@ -395,7 +419,7 @@ func (si *ShardInfo) GetTabletControl(tabletType topodatapb.TabletType) *topodat // because it's not used in the same context (vertical vs horizontal sharding) // // This function should be called while holding the keyspace lock. -func (si *ShardInfo) UpdateSourceDeniedTables(ctx context.Context, tabletType topodatapb.TabletType, cells []string, remove bool, tables []string) error { +func (si *ShardInfo) UpdateDeniedTables(ctx context.Context, tabletType topodatapb.TabletType, cells []string, remove bool, tables []string) error { if err := CheckKeyspaceLocked(ctx, si.keyspace); err != nil { return err } @@ -607,7 +631,7 @@ func (ts *Server) FindAllTabletAliasesInShardByCell(ctx context.Context, keyspac } for _, a := range resultAsMap { - result = append(result, proto.Clone(a).(*topodatapb.TabletAlias)) + result = append(result, a.CloneVT()) } sort.Sort(topoproto.TabletAliasList(result)) return result, err diff --git a/go/vt/topo/shard_test.go b/go/vt/topo/shard_test.go index 4c0088f00ee..2c0b9082816 100644 --- a/go/vt/topo/shard_test.go +++ b/go/vt/topo/shard_test.go @@ -17,13 +17,14 @@ limitations under the License. package topo import ( + "context" "reflect" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "context" - + "vitess.io/vitess/go/test/utils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -105,14 +106,14 @@ func lockedKeyspaceContext(keyspace string) context.Context { } func addToDenyList(ctx context.Context, si *ShardInfo, tabletType topodatapb.TabletType, cells, tables []string) error { - if err := si.UpdateSourceDeniedTables(ctx, tabletType, cells, false, tables); err != nil { + if err := si.UpdateDeniedTables(ctx, tabletType, cells, false, tables); err != nil { return err } return nil } func removeFromDenyList(ctx context.Context, si *ShardInfo, tabletType topodatapb.TabletType, cells, tables []string) error { - if err := si.UpdateSourceDeniedTables(ctx, tabletType, cells, true, tables); err != nil { + if err := si.UpdateDeniedTables(ctx, tabletType, cells, true, tables); err != nil { return err } return nil @@ -160,13 +161,13 @@ func TestUpdateSourceDeniedTables(t *testing.T) { // check we enforce the keyspace lock ctx := context.Background() - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, nil, false, nil); err == nil || err.Error() != "keyspace ks is not locked (no locksInfo)" { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, nil, false, nil); err == nil || err.Error() != "keyspace ks is not locked (no locksInfo)" { t.Fatalf("unlocked keyspace produced wrong error: %v", err) } ctx = lockedKeyspaceContext("ks") // add one cell - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first"}, @@ -177,20 +178,20 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // remove that cell, going back - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, true, nil); err != nil || len(si.TabletControls) != 0 { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, true, nil); err != nil || len(si.TabletControls) != 0 { t.Fatalf("going back should have remove the record: %v", si) } // re-add a cell, then another with different table list to // make sure it fails - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil { t.Fatalf("one cell add failed: %v", si) } - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t2", "t3"}); err == nil || err.Error() != "trying to use two different sets of denied tables for shard ks/sh: [t1 t2] and [t2 t3]" { + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t2", "t3"}); err == nil || err.Error() != "trying to use two different sets of denied tables for shard ks/sh: [t1 t2] and [t2 t3]" { t.Fatalf("different table list should fail: %v", err) } // add another cell, see the list grow - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "second"}, @@ -201,7 +202,7 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // add all cells, see the list grow to all - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first", "second", "third"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first", "second", "third"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "second", "third"}, @@ -212,7 +213,7 @@ func TestUpdateSourceDeniedTables(t *testing.T) { } // remove one cell from the full list - if err := si.UpdateSourceDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, true, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, true, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { TabletType: topodatapb.TabletType_RDONLY, Cells: []string{"first", "third"}, @@ -222,3 +223,58 @@ func TestUpdateSourceDeniedTables(t *testing.T) { t.Fatalf("one cell removal from all failed: %v", si) } } + +func TestValidateShardName(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + expectedRange *topodatapb.KeyRange + valid bool + }{ + { + name: "0", + valid: true, + }, + { + name: "-80", + expectedRange: &topodatapb.KeyRange{ + Start: nil, + End: []byte{0x80}, + }, + valid: true, + }, + { + name: "40-80", + expectedRange: &topodatapb.KeyRange{ + Start: []byte{0x40}, + End: []byte{0x80}, + }, + valid: true, + }, + { + name: "foo-bar", + valid: false, + }, + { + name: "a/b", + valid: false, + }, + } + + for _, tcase := range cases { + tcase := tcase + t.Run(tcase.name, func(t *testing.T) { + t.Parallel() + + _, kr, err := ValidateShardName(tcase.name) + if !tcase.valid { + assert.Error(t, err, "expected %q to be an invalid shard name", tcase.name) + return + } + + require.NoError(t, err, "expected %q to be a valid shard name, got error: %v", tcase.name, err) + utils.MustMatch(t, tcase.expectedRange, kr) + }) + } +} diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index b68a48b0223..d17235f6948 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -24,18 +24,15 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/event" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/events" @@ -215,7 +212,7 @@ func (ti *TabletInfo) IsReplicaType() bool { // GetPrimaryTermStartTime returns the tablet's primary term start time as a Time value. func (ti *TabletInfo) GetPrimaryTermStartTime() time.Time { - return logutil.ProtoToTime(ti.Tablet.PrimaryTermStartTime) + return protoutil.TimeFromProto(ti.Tablet.PrimaryTermStartTime).UTC() } // NewTabletInfo returns a TabletInfo basing on tablet with the @@ -586,7 +583,7 @@ func (ts *Server) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, all if tablet.Type == topodatapb.TabletType_PRIMARY { // we update primary_term_start_time even if the primary hasn't changed // because that means a new primary term with the same primary - tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) + tablet.PrimaryTermStartTime = protoutil.TimeToProto(time.Now()) } err = ts.CreateTablet(ctx, tablet) @@ -602,7 +599,7 @@ func (ts *Server) InitTablet(ctx context.Context, tablet *topodatapb.Tablet, all if oldTablet.Keyspace != tablet.Keyspace || oldTablet.Shard != tablet.Shard { return fmt.Errorf("old tablet has shard %v/%v. Cannot override with shard %v/%v. Delete and re-add tablet if you want to change the tablet's keyspace/shard", oldTablet.Keyspace, oldTablet.Shard, tablet.Keyspace, tablet.Shard) } - oldTablet.Tablet = proto.Clone(tablet).(*topodatapb.Tablet) + oldTablet.Tablet = tablet.CloneVT() if err := ts.UpdateTablet(ctx, oldTablet); err != nil { return fmt.Errorf("failed updating tablet %v: %v", topoproto.TabletAliasString(tablet.Alias), err) } diff --git a/go/vt/topo/test/directory.go b/go/vt/topo/test/directory.go index b33404f8643..88001a56d87 100644 --- a/go/vt/topo/test/directory.go +++ b/go/vt/topo/test/directory.go @@ -17,18 +17,15 @@ limitations under the License. package test import ( + "context" "reflect" "testing" - "context" - "vitess.io/vitess/go/vt/topo" ) // checkDirectory tests the directory part of the topo.Conn API. -func checkDirectory(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkDirectory(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell t.Logf("=== checkDirectoryInCell global") conn, err := ts.ConnForCell(ctx, topo.GlobalCell) diff --git a/go/vt/topo/test/election.go b/go/vt/topo/test/election.go index 594e6562eb2..377ea21ae09 100644 --- a/go/vt/topo/test/election.go +++ b/go/vt/topo/test/election.go @@ -46,8 +46,8 @@ func waitForLeaderID(t *testing.T, mp topo.LeaderParticipation, expected string) // checkElection runs the tests on the LeaderParticipation part of the // topo.Conn API. -func checkElection(t *testing.T, ts *topo.Server) { - conn, err := ts.ConnForCell(context.Background(), topo.GlobalCell) +func checkElection(t *testing.T, ctx context.Context, ts *topo.Server) { + conn, err := ts.ConnForCell(ctx, topo.GlobalCell) if err != nil { t.Fatalf("ConnForCell(global) failed: %v", err) } @@ -71,7 +71,7 @@ func checkElection(t *testing.T, ts *topo.Server) { // A lot of implementations use a toplevel directory for their elections. // Make sure it is marked as 'Ephemeral'. - entries, err := conn.ListDir(context.Background(), "/", true /*full*/) + entries, err := conn.ListDir(ctx, "/", true /*full*/) if err != nil { t.Fatalf("ListDir(/) failed: %v", err) } @@ -148,8 +148,8 @@ func checkElection(t *testing.T, ts *topo.Server) { } // checkWaitForNewLeader runs the WaitForLeadership test on the LeaderParticipation -func checkWaitForNewLeader(t *testing.T, ts *topo.Server) { - conn, err := ts.ConnForCell(context.Background(), topo.GlobalCell) +func checkWaitForNewLeader(t *testing.T, ctx context.Context, ts *topo.Server) { + conn, err := ts.ConnForCell(ctx, topo.GlobalCell) if err != nil { t.Fatalf("ConnForCell(global) failed: %v", err) } @@ -195,7 +195,7 @@ func checkWaitForNewLeader(t *testing.T, ts *topo.Server) { t.Fatalf("cannot create mp2: %v", err) } - leaders, err := mp2.WaitForNewLeader(context.Background()) + leaders, err := mp2.WaitForNewLeader(ctx) if topo.IsErrType(err, topo.NoImplementation) { t.Logf("%T does not support WaitForNewLeader()", mp2) return diff --git a/go/vt/topo/test/file.go b/go/vt/topo/test/file.go index 8e5858d17a3..70bb386fd80 100644 --- a/go/vt/topo/test/file.go +++ b/go/vt/topo/test/file.go @@ -26,9 +26,7 @@ import ( ) // checkFile tests the file part of the Conn API. -func checkFile(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkFile(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell t.Logf("=== checkFileInCell global") conn, err := ts.ConnForCell(ctx, topo.GlobalCell) @@ -203,8 +201,7 @@ func checkFileInCell(t *testing.T, conn topo.Conn, hasCells bool) { } // checkList tests the file part of the Conn API. -func checkList(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkList(t *testing.T, ctx context.Context, ts *topo.Server) { // global cell conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { diff --git a/go/vt/topo/test/keyspace.go b/go/vt/topo/test/keyspace.go index c5b9af68009..0458e7fd2d7 100644 --- a/go/vt/topo/test/keyspace.go +++ b/go/vt/topo/test/keyspace.go @@ -17,9 +17,8 @@ limitations under the License. package test import ( - "testing" - "context" + "testing" "vitess.io/vitess/go/vt/topo" @@ -27,8 +26,7 @@ import ( ) // checkKeyspace tests the keyspace part of the API -func checkKeyspace(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { keyspaces, err := ts.GetKeyspaces(ctx) if err != nil { t.Errorf("GetKeyspaces(empty): %v", err) diff --git a/go/vt/topo/test/lock.go b/go/vt/topo/test/lock.go index 69cdeff2a55..dce51ed859d 100644 --- a/go/vt/topo/test/lock.go +++ b/go/vt/topo/test/lock.go @@ -17,12 +17,11 @@ limitations under the License. package test import ( + "context" "path" "testing" "time" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -35,8 +34,7 @@ var timeUntilLockIsTaken = 10 * time.Millisecond // checkLock checks we can lock / unlock as expected. It's using a keyspace // as the lock target. -func checkLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } diff --git a/go/vt/topo/test/replication.go b/go/vt/topo/test/replication.go index e681749f68d..3080cb77145 100644 --- a/go/vt/topo/test/replication.go +++ b/go/vt/topo/test/replication.go @@ -17,20 +17,18 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // checkShardReplication tests ShardReplication objects -func checkShardReplication(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShardReplication(t *testing.T, ctx context.Context, ts *topo.Server) { if _, err := ts.GetShardReplication(ctx, LocalCellName, "test_keyspace", "-10"); !topo.IsErrType(err, topo.NoNode) { t.Errorf("GetShardReplication(not there): %v", err) } diff --git a/go/vt/topo/test/serving.go b/go/vt/topo/test/serving.go index dfeac442180..dd00f3da370 100644 --- a/go/vt/topo/test/serving.go +++ b/go/vt/topo/test/serving.go @@ -17,12 +17,11 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -30,9 +29,7 @@ import ( ) // checkSrvKeyspace tests the SrvKeyspace methods (other than watch). -func checkSrvKeyspace(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { // Test GetSrvKeyspaceNames returns an empty list correctly. if names, err := ts.GetSrvKeyspaceNames(ctx, LocalCellName); err != nil || len(names) != 0 { t.Errorf("GetSrvKeyspace(not there): %v %v", names, err) @@ -91,9 +88,7 @@ func checkSrvKeyspace(t *testing.T, ts *topo.Server) { } // checkSrvVSchema tests the SrvVSchema methods (other than watch). -func checkSrvVSchema(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkSrvVSchema(t *testing.T, ctx context.Context, ts *topo.Server) { // check GetSrvVSchema returns topo.ErrNoNode if no SrvVSchema if _, err := ts.GetSrvVSchema(ctx, LocalCellName); !topo.IsErrType(err, topo.NoNode) { t.Errorf("GetSrvVSchema(not set): %v", err) diff --git a/go/vt/topo/test/shard.go b/go/vt/topo/test/shard.go index d285f382838..b5c92c4a3ec 100644 --- a/go/vt/topo/test/shard.go +++ b/go/vt/topo/test/shard.go @@ -31,8 +31,7 @@ import ( ) // checkShard verifies the Shard operations work correctly -func checkShard(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShard(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } @@ -100,8 +99,7 @@ func checkShard(t *testing.T, ts *topo.Server) { // checkShardWithLock verifies that `TryLockShard` will keep failing with `NodeExists` error if there is // a lock already taken for given shard. Once we unlock that shard, then subsequent call to `TryLockShard` // should succeed. -func checkShardWithLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkShardWithLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } diff --git a/go/vt/topo/test/tablet.go b/go/vt/topo/test/tablet.go index 4a562b1e46a..63afc1abff0 100644 --- a/go/vt/topo/test/tablet.go +++ b/go/vt/topo/test/tablet.go @@ -17,21 +17,18 @@ limitations under the License. package test import ( + "context" "testing" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/topo" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // checkTablet verifies the topo server API is correct for managing tablets. -func checkTablet(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkTablet(t *testing.T, ctx context.Context, ts *topo.Server) { tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: LocalCellName, diff --git a/go/vt/topo/test/testing.go b/go/vt/topo/test/testing.go index e8d014242ad..d189a7f4cf1 100644 --- a/go/vt/topo/test/testing.go +++ b/go/vt/topo/test/testing.go @@ -22,6 +22,7 @@ limitations under the License. package test import ( + "context" "testing" "vitess.io/vitess/go/vt/topo" @@ -40,7 +41,7 @@ func newKeyRange(value string) *topodatapb.KeyRange { return result } -func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.Server, ignoreList []string, name string) { +func executeTestSuite(f func(*testing.T, context.Context, *topo.Server), t *testing.T, ctx context.Context, ts *topo.Server, ignoreList []string, name string) { // some test does not apply every where therefore we ignore them for _, n := range ignoreList { if n == name { @@ -48,7 +49,7 @@ func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.S return } } - f(t, ts) + f(t, ctx, ts) } // TopoServerTestSuite runs the full topo.Server/Conn test suite. @@ -57,101 +58,101 @@ func executeTestSuite(f func(*testing.T, *topo.Server), t *testing.T, ts *topo.S // Not all tests are applicable for each Topo server, therefore we provide ignoreList in order to // avoid them for given Topo server tests. For example `TryLock` implementation is same as `Lock` for some Topo servers. // Hence, for these Topo servers we ignore executing TryLock Tests. -func TopoServerTestSuite(t *testing.T, factory func() *topo.Server, ignoreList []string) { +func TopoServerTestSuite(t *testing.T, ctx context.Context, factory func() *topo.Server, ignoreList []string) { var ts *topo.Server t.Log("=== checkKeyspace") ts = factory() - executeTestSuite(checkKeyspace, t, ts, ignoreList, "checkKeyspace") + executeTestSuite(checkKeyspace, t, ctx, ts, ignoreList, "checkKeyspace") ts.Close() t.Log("=== checkShard") ts = factory() - executeTestSuite(checkShard, t, ts, ignoreList, "checkShard") + executeTestSuite(checkShard, t, ctx, ts, ignoreList, "checkShard") ts.Close() t.Log("=== checkShardWithLock") ts = factory() - executeTestSuite(checkShardWithLock, t, ts, ignoreList, "checkShardWithLock") + executeTestSuite(checkShardWithLock, t, ctx, ts, ignoreList, "checkShardWithLock") ts.Close() t.Log("=== checkTablet") ts = factory() - executeTestSuite(checkTablet, t, ts, ignoreList, "checkTablet") + executeTestSuite(checkTablet, t, ctx, ts, ignoreList, "checkTablet") ts.Close() t.Log("=== checkShardReplication") ts = factory() - executeTestSuite(checkShardReplication, t, ts, ignoreList, "checkShardReplication") + executeTestSuite(checkShardReplication, t, ctx, ts, ignoreList, "checkShardReplication") ts.Close() t.Log("=== checkSrvKeyspace") ts = factory() - executeTestSuite(checkSrvKeyspace, t, ts, ignoreList, "checkSrvKeyspace") + executeTestSuite(checkSrvKeyspace, t, ctx, ts, ignoreList, "checkSrvKeyspace") ts.Close() t.Log("=== checkSrvVSchema") ts = factory() - executeTestSuite(checkSrvVSchema, t, ts, ignoreList, "checkSrvVSchema") + executeTestSuite(checkSrvVSchema, t, ctx, ts, ignoreList, "checkSrvVSchema") ts.Close() t.Log("=== checkLock") ts = factory() - executeTestSuite(checkLock, t, ts, ignoreList, "checkLock") + executeTestSuite(checkLock, t, ctx, ts, ignoreList, "checkLock") ts.Close() t.Log("=== checkTryLock") ts = factory() - executeTestSuite(checkTryLock, t, ts, ignoreList, "checkTryLock") + executeTestSuite(checkTryLock, t, ctx, ts, ignoreList, "checkTryLock") ts.Close() t.Log("=== checkVSchema") ts = factory() - executeTestSuite(checkVSchema, t, ts, ignoreList, "checkVSchema") + executeTestSuite(checkVSchema, t, ctx, ts, ignoreList, "checkVSchema") ts.Close() t.Log("=== checkRoutingRules") ts = factory() - executeTestSuite(checkRoutingRules, t, ts, ignoreList, "checkRoutingRules") + executeTestSuite(checkRoutingRules, t, ctx, ts, ignoreList, "checkRoutingRules") ts.Close() t.Log("=== checkElection") ts = factory() - executeTestSuite(checkElection, t, ts, ignoreList, "checkElection") + executeTestSuite(checkElection, t, ctx, ts, ignoreList, "checkElection") ts.Close() t.Log("=== checkWaitForNewLeader") ts = factory() - executeTestSuite(checkWaitForNewLeader, t, ts, ignoreList, "checkWaitForNewLeader") + executeTestSuite(checkWaitForNewLeader, t, ctx, ts, ignoreList, "checkWaitForNewLeader") ts.Close() t.Log("=== checkDirectory") ts = factory() - executeTestSuite(checkDirectory, t, ts, ignoreList, "checkDirectory") + executeTestSuite(checkDirectory, t, ctx, ts, ignoreList, "checkDirectory") ts.Close() t.Log("=== checkFile") ts = factory() - executeTestSuite(checkFile, t, ts, ignoreList, "checkFile") + executeTestSuite(checkFile, t, ctx, ts, ignoreList, "checkFile") ts.Close() t.Log("=== checkWatch") ts = factory() - executeTestSuite(checkWatch, t, ts, ignoreList, "checkWatch") + executeTestSuite(checkWatch, t, ctx, ts, ignoreList, "checkWatch") ts.Close() ts = factory() t.Log("=== checkWatchInterrupt") - executeTestSuite(checkWatchInterrupt, t, ts, ignoreList, "checkWatchInterrupt") + executeTestSuite(checkWatchInterrupt, t, ctx, ts, ignoreList, "checkWatchInterrupt") ts.Close() ts = factory() t.Log("=== checkList") - executeTestSuite(checkList, t, ts, ignoreList, "checkList") + executeTestSuite(checkList, t, ctx, ts, ignoreList, "checkList") ts.Close() ts = factory() t.Log("=== checkWatchRecursive") - executeTestSuite(checkWatchRecursive, t, ts, ignoreList, "checkWatchRecursive") + executeTestSuite(checkWatchRecursive, t, ctx, ts, ignoreList, "checkWatchRecursive") ts.Close() } diff --git a/go/vt/topo/test/trylock.go b/go/vt/topo/test/trylock.go index cace3cccc61..4519d1bcaab 100644 --- a/go/vt/topo/test/trylock.go +++ b/go/vt/topo/test/trylock.go @@ -31,8 +31,7 @@ import ( // checkTryLock checks if we can lock / unlock as expected. It's using a keyspace // as the lock target. -func checkTryLock(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkTryLock(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { require.Fail(t, "CreateKeyspace fail", err.Error()) } diff --git a/go/vt/topo/test/vschema.go b/go/vt/topo/test/vschema.go index 0c2d58bdba7..5063addaefd 100644 --- a/go/vt/topo/test/vschema.go +++ b/go/vt/topo/test/vschema.go @@ -17,9 +17,8 @@ limitations under the License. package test import ( - "testing" - "context" + "testing" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -31,8 +30,7 @@ import ( ) // checkVSchema runs the tests on the VSchema part of the API -func checkVSchema(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkVSchema(t *testing.T, ctx context.Context, ts *topo.Server) { if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace: %v", err) } @@ -92,9 +90,7 @@ func checkVSchema(t *testing.T, ts *topo.Server) { } // checkRoutingRules runs the tests on the routing rules part of the API -func checkRoutingRules(t *testing.T, ts *topo.Server) { - ctx := context.Background() - +func checkRoutingRules(t *testing.T, ctx context.Context, ts *topo.Server) { if _, err := ts.GetRoutingRules(ctx); err != nil { t.Fatal(err) } diff --git a/go/vt/topo/test/watch.go b/go/vt/topo/test/watch.go index 08dec8cd56f..a4caaaf742d 100644 --- a/go/vt/topo/test/watch.go +++ b/go/vt/topo/test/watch.go @@ -114,8 +114,8 @@ func waitForInitialValueRecursive(t *testing.T, conn topo.Conn, srvKeyspace *top // checkWatch runs the tests on the Watch part of the Conn API. // We use a SrvKeyspace object. -func checkWatch(t *testing.T, ts *topo.Server) { - ctx, cancel := context.WithCancel(context.Background()) +func checkWatch(t *testing.T, ctx context.Context, ts *topo.Server) { + ctx, cancel := context.WithCancel(ctx) defer cancel() conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { @@ -227,8 +227,7 @@ func checkWatch(t *testing.T, ts *topo.Server) { } // checkWatchInterrupt tests we can interrupt a watch. -func checkWatchInterrupt(t *testing.T, ts *topo.Server) { - ctx := context.Background() +func checkWatchInterrupt(t *testing.T, ctx context.Context, ts *topo.Server) { conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { t.Fatalf("ConnForCell(test) failed: %v", err) @@ -298,8 +297,8 @@ func checkWatchInterrupt(t *testing.T, ts *topo.Server) { } // checkWatchRecursive tests we can setup a recursive watch -func checkWatchRecursive(t *testing.T, ts *topo.Server) { - ctx, cancel := context.WithCancel(context.Background()) +func checkWatchRecursive(t *testing.T, ctx context.Context, ts *topo.Server) { + ctx, cancel := context.WithCancel(ctx) defer cancel() conn, err := ts.ConnForCell(ctx, LocalCellName) if err != nil { diff --git a/go/vt/topo/topoproto/shard.go b/go/vt/topo/topoproto/shard.go index 75c892ce96e..ac062a40899 100644 --- a/go/vt/topo/topoproto/shard.go +++ b/go/vt/topo/topoproto/shard.go @@ -17,13 +17,8 @@ limitations under the License. package topoproto import ( - "encoding/hex" "fmt" - "html/template" "strings" - - "vitess.io/vitess/go/vt/key" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // KeyspaceShardString returns a "keyspace/shard" string taking @@ -44,23 +39,3 @@ func ParseKeyspaceShard(param string) (string, string, error) { } return keySpaceShard[0], keySpaceShard[1], nil } - -// SourceShardString returns a printable view of a SourceShard. -func SourceShardString(source *topodatapb.Shard_SourceShard) string { - return fmt.Sprintf("SourceShard(%v,%v/%v)", source.Uid, source.Keyspace, source.Shard) -} - -// SourceShardAsHTML returns a HTML version of the object. -func SourceShardAsHTML(source *topodatapb.Shard_SourceShard) template.HTML { - result := fmt.Sprintf("Uid: %v
\nSource: %v/%v
\n", source.Uid, source.Keyspace, source.Shard) - if key.KeyRangeIsPartial(source.KeyRange) { - result += fmt.Sprintf("KeyRange: %v-%v
\n", - hex.EncodeToString(source.KeyRange.Start), - hex.EncodeToString(source.KeyRange.End)) - } - if len(source.Tables) > 0 { - result += fmt.Sprintf("Tables: %v
\n", - strings.Join(source.Tables, " ")) - } - return template.HTML(result) -} diff --git a/go/vt/topo/topoproto/shard_test.go b/go/vt/topo/topoproto/shard_test.go index 3f44549f39d..0c558ef5ad4 100644 --- a/go/vt/topo/topoproto/shard_test.go +++ b/go/vt/topo/topoproto/shard_test.go @@ -18,8 +18,6 @@ package topoproto import ( "testing" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) func TestParseKeyspaceShard(t *testing.T) { @@ -44,23 +42,3 @@ func TestParseKeyspaceShard(t *testing.T) { } } } - -func TestSourceShardAsHTML(t *testing.T) { - s := &topodatapb.Shard_SourceShard{ - Uid: 123, - Keyspace: "source_keyspace", - Shard: "source_shard", - KeyRange: &topodatapb.KeyRange{ - Start: []byte{0x80}, - }, - Tables: []string{"table1", "table2"}, - } - got := string(SourceShardAsHTML(s)) - expected := "Uid: 123
\n" + - "Source: source_keyspace/source_shard
\n" + - "KeyRange: 80-
\n" + - "Tables: table1 table2
\n" - if got != expected { - t.Errorf("got wrong SourceShardAsHTML output, got:\n%vexpected:\n%v", got, expected) - } -} diff --git a/go/vt/topo/topoproto/srvkeyspace.go b/go/vt/topo/topoproto/srvkeyspace.go index 24618233fb2..f5e775ca355 100644 --- a/go/vt/topo/topoproto/srvkeyspace.go +++ b/go/vt/topo/topoproto/srvkeyspace.go @@ -17,9 +17,10 @@ limitations under the License. package topoproto import ( - "bytes" "sort" + "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -29,18 +30,12 @@ type ShardReferenceArray []*topodatapb.ShardReference // Len implements sort.Interface func (sra ShardReferenceArray) Len() int { return len(sra) } -// Len implements sort.Interface +// Less implements sort.Interface func (sra ShardReferenceArray) Less(i, j int) bool { - if sra[i].KeyRange == nil || len(sra[i].KeyRange.Start) == 0 { - return true - } - if sra[j].KeyRange == nil || len(sra[j].KeyRange.Start) == 0 { - return false - } - return bytes.Compare(sra[i].KeyRange.Start, sra[j].KeyRange.Start) < 0 + return key.KeyRangeLess(sra[i].KeyRange, sra[j].KeyRange) } -// Len implements sort.Interface +// Swap implements sort.Interface func (sra ShardReferenceArray) Swap(i, j int) { sra[i], sra[j] = sra[j], sra[i] } @@ -51,6 +46,9 @@ func (sra ShardReferenceArray) Sort() { sort.Sort(sra) } // SrvKeyspaceGetPartition returns a Partition for the given tablet type, // or nil if it's not there. func SrvKeyspaceGetPartition(sk *topodatapb.SrvKeyspace, tabletType topodatapb.TabletType) *topodatapb.SrvKeyspace_KeyspacePartition { + if sk == nil { + return nil + } for _, p := range sk.Partitions { if p.ServedType == tabletType { return p diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index 117ccc41af0..63e71807119 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -186,6 +186,9 @@ func ParseTabletType(param string) (topodatapb.TabletType, error) { // ParseTabletTypes parses a comma separated list of tablet types and returns a slice with the respective enums. func ParseTabletTypes(param string) ([]topodatapb.TabletType, error) { var tabletTypes []topodatapb.TabletType + if param == "" { + return tabletTypes, nil + } for _, typeStr := range strings.Split(param, ",") { t, err := ParseTabletType(typeStr) if err != nil { @@ -227,6 +230,40 @@ func MakeStringTypeList(types []topodatapb.TabletType) []string { return strs } +// MakeStringTypeUnsortedList returns a list of strings that match the input +// without modifying the order in the list. +func MakeStringTypeUnsortedList(types []topodatapb.TabletType) []string { + strs := make([]string, len(types)) + for i, t := range types { + strs[i] = strings.ToLower(t.String()) + } + return strs +} + +// MakeStringTypeCSV returns the tablet types in CSV format. +func MakeStringTypeCSV(types []topodatapb.TabletType) string { + return strings.Join(MakeStringTypeUnsortedList(types), ",") +} + +// MakeUniqueStringTypeList returns a unique list of strings that match +// the input list -- with duplicate types removed. +// This is needed as some types are aliases for others, like BATCH and +// RDONLY, so e.g. rdonly shows up twice in the list when using +// AllTabletTypes. +func MakeUniqueStringTypeList(types []topodatapb.TabletType) []string { + strs := make([]string, 0, len(types)) + seen := make(map[string]struct{}) + for _, t := range types { + if _, exists := seen[t.String()]; exists { + continue + } + strs = append(strs, strings.ToLower(t.String())) + seen[t.String()] = struct{}{} + } + sort.Strings(strs) + return strs +} + // MysqlAddr returns the host:port of the mysql server. func MysqlAddr(tablet *topodatapb.Tablet) string { return netutil.JoinHostPort(tablet.MysqlHostname, tablet.MysqlPort) diff --git a/go/vt/topo/topotests/cell_info_test.go b/go/vt/topo/topotests/cell_info_test.go index 89ad68043a1..becdbd8d14a 100644 --- a/go/vt/topo/topotests/cell_info_test.go +++ b/go/vt/topo/topotests/cell_info_test.go @@ -36,8 +36,10 @@ import ( func TestCellInfo(t *testing.T) { cell := "cell1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Check GetCellInfo returns what memorytopo created. ci, err := ts.GetCellInfo(ctx, cell, true /*strongRead*/) @@ -135,7 +137,8 @@ func TestCellInfo(t *testing.T) { } func TestExpandCells(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() var cells []string var err error var allCells = "cell1,cell2,cell3" @@ -162,10 +165,12 @@ func TestExpandCells(t *testing.T) { topoCells := strings.Split(cellsIn, ",") var ts *topo.Server if tCase.name == "bad" { - ts = memorytopo.NewServer() + ts = memorytopo.NewServer(ctx) } else { - ts = memorytopo.NewServer(topoCells...) + ts = memorytopo.NewServer(ctx, topoCells...) } + defer ts.Close() + cells, err = ts.ExpandCells(ctx, cellsIn) if tCase.errString != "" { require.Error(t, err) @@ -179,7 +184,7 @@ func TestExpandCells(t *testing.T) { t.Run("aliases", func(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} - ts := memorytopo.NewServer(cells...) + ts := memorytopo.NewServer(ctx, cells...) err := ts.CreateCellsAlias(ctx, "alias", &topodatapb.CellsAlias{Cells: cells}) require.NoError(t, err) @@ -228,8 +233,10 @@ func TestExpandCells(t *testing.T) { } func TestDeleteCellInfo(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "unreachable") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "unreachable") + defer ts.Close() err := ts.UpdateCellInfoFields(ctx, "unreachable", func(ci *topodatapb.CellInfo) error { ci.ServerAddress = memorytopo.UnreachableServerAddr @@ -254,11 +261,11 @@ func TestDeleteCellInfo(t *testing.T) { }, } for _, tt := range tests { - func() { - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() + t.Run(fmt.Sprintf("force:%t", tt.force), func(t *testing.T) { + requestCtx, requestCancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer requestCancel() - err := ts.DeleteCellInfo(ctx, "unreachable", tt.force) + err := ts.DeleteCellInfo(requestCtx, "unreachable", tt.force) if tt.shouldErr { assert.Error(t, err, "force=%t", tt.force) } else { @@ -272,6 +279,6 @@ func TestDeleteCellInfo(t *testing.T) { } else { assert.True(t, topo.IsErrType(err, topo.NoNode), "expected cell %q to not exist", "unreachable") } - }() + }) } } diff --git a/go/vt/topo/topotests/cells_aliases_test.go b/go/vt/topo/topotests/cells_aliases_test.go index d124dcd8d47..7b8f0ebe3f5 100644 --- a/go/vt/topo/topotests/cells_aliases_test.go +++ b/go/vt/topo/topotests/cells_aliases_test.go @@ -33,8 +33,10 @@ func TestCellsAliases(t *testing.T) { // Create an alias cell := "cell1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() if err := ts.CreateCellsAlias(ctx, "alias", &topodatapb.CellsAlias{Cells: []string{"cell1", "cell2"}}); err != nil { t.Fatalf("CreateCellsAlias failed: %v", err) diff --git a/go/vt/topo/topotests/keyspace_test.go b/go/vt/topo/topotests/keyspace_test.go new file mode 100644 index 00000000000..96eb9938353 --- /dev/null +++ b/go/vt/topo/topotests/keyspace_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topotests + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" +) + +func TestCreateKeyspace(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + t.Run("valid name", func(t *testing.T) { + err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) + require.NoError(t, err) + }) + t.Run("invalid name", func(t *testing.T) { + err := ts.CreateKeyspace(ctx, "no/slashes/allowed", &topodatapb.Keyspace{}) + assert.Error(t, err) + assert.Equal(t, vtrpc.Code_INVALID_ARGUMENT, vterrors.Code(err), "%+v", err) + }) +} + +func TestGetKeyspace(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + t.Run("valid name", func(t *testing.T) { + // First, create the keyspace. + err := ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) + require.NoError(t, err) + + // Now, get it. + ks, err := ts.GetKeyspace(ctx, "ks") + require.NoError(t, err) + assert.NotNil(t, ks) + }) + + t.Run("invalid name", func(t *testing.T) { + // We can't create the keyspace (because we can't create a keyspace + // with an invalid name), so we'll validate the error we get is *not* + // NOT_FOUND. + ks, err := ts.GetKeyspace(ctx, "no/slashes/allowed") + assert.Error(t, err) + assert.Equal(t, vtrpc.Code_INVALID_ARGUMENT, vterrors.Code(err), "%+v", err) + assert.Nil(t, ks) + }) +} diff --git a/go/vt/topo/topotests/replication_test.go b/go/vt/topo/topotests/replication_test.go index 0d6e148fa3d..d45aaf36551 100644 --- a/go/vt/topo/topotests/replication_test.go +++ b/go/vt/topo/topotests/replication_test.go @@ -37,8 +37,10 @@ func TestFixShardReplication(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "shard1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create a tablet. alias := &topodatapb.TabletAlias{ diff --git a/go/vt/topo/topotests/shard_watch_test.go b/go/vt/topo/topotests/shard_watch_test.go index a8333251a2d..80b696c106d 100644 --- a/go/vt/topo/topotests/shard_watch_test.go +++ b/go/vt/topo/topotests/shard_watch_test.go @@ -56,8 +56,10 @@ func waitForInitialShard(t *testing.T, ts *topo.Server, keyspace, shard string) func TestWatchShardNoNode(t *testing.T) { keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() // No Shard -> ErrNoNode _, _, err := ts.WatchShard(ctx, keyspace, shard) @@ -70,8 +72,10 @@ func TestWatchShard(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create keyspace if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil { @@ -205,8 +209,10 @@ func TestWatchShardCancel(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "0" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No Shard -> ErrNoNode _, _, err := ts.WatchShard(ctx, keyspace, shard) diff --git a/go/vt/topo/topotests/srv_keyspace_test.go b/go/vt/topo/topotests/srv_keyspace_test.go index 8eeaf3f07ac..97e44e3a82a 100644 --- a/go/vt/topo/topotests/srv_keyspace_test.go +++ b/go/vt/topo/topotests/srv_keyspace_test.go @@ -62,8 +62,10 @@ func waitForInitialSrvKeyspace(t *testing.T, ts *topo.Server, cell, keyspace str func TestWatchSrvKeyspaceNoNode(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No SrvKeyspace -> ErrNoNode _, _, err := ts.WatchSrvKeyspace(ctx, cell, keyspace) @@ -76,8 +78,10 @@ func TestWatchSrvKeyspace(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create initial value if err := ts.UpdateSrvKeyspace(ctx, cell, keyspace, &topodatapb.SrvKeyspace{}); err != nil { @@ -175,8 +179,10 @@ func TestWatchSrvKeyspace(t *testing.T) { func TestWatchSrvKeyspaceCancel(t *testing.T) { cell := "cell1" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // No SrvKeyspace -> ErrNoNode _, _, err := ts.WatchSrvKeyspace(ctx, cell, keyspace) @@ -223,8 +229,10 @@ func TestUpdateSrvKeyspacePartitions(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() keyRange, err := key.ParseShardingSpec("-") if err != nil || len(keyRange) != 1 { @@ -464,8 +472,10 @@ func TestUpdateUpdateDisableQueryService(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -656,8 +666,10 @@ func TestGetShardServingTypes(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -764,8 +776,10 @@ func TestGetShardServingCells(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { @@ -868,8 +882,10 @@ func TestMasterMigrateServedType(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() initialKeyRange, err := key.ParseShardingSpec("-") if err != nil || len(initialKeyRange) != 1 { @@ -1152,8 +1168,10 @@ func TestValidateSrvKeyspace(t *testing.T) { cell := "cell1" cell2 := "cell2" keyspace := "ks1" - ctx := context.Background() - ts := memorytopo.NewServer(cell, cell2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell, cell2) + defer ts.Close() leftKeyRange, err := key.ParseShardingSpec("-80") if err != nil || len(leftKeyRange) != 1 { diff --git a/go/vt/topo/topotests/srv_vschema_test.go b/go/vt/topo/topotests/srv_vschema_test.go index 73745854ae1..85a2d65c4ec 100644 --- a/go/vt/topo/topotests/srv_vschema_test.go +++ b/go/vt/topo/topotests/srv_vschema_test.go @@ -28,7 +28,6 @@ import ( ) func TestRebuildVSchema(t *testing.T) { - ctx := context.Background() emptySrvVSchema := &vschemapb.SrvVSchema{ RoutingRules: &vschemapb.RoutingRules{}, ShardRoutingRules: &vschemapb.ShardRoutingRules{}, @@ -36,7 +35,10 @@ func TestRebuildVSchema(t *testing.T) { // Set up topology. cells := []string{"cell1", "cell2"} - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() // Rebuild with no keyspace / no vschema if err := ts.RebuildSrvVSchema(ctx, cells); err != nil { diff --git a/go/vt/topo/topotests/tablet_test.go b/go/vt/topo/topotests/tablet_test.go index e59b4c6d060..96bcdba1ae5 100644 --- a/go/vt/topo/topotests/tablet_test.go +++ b/go/vt/topo/topotests/tablet_test.go @@ -34,8 +34,10 @@ func TestCreateTablet(t *testing.T) { cell := "cell1" keyspace := "ks1" shard := "shard1" - ctx := context.Background() - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + defer ts.Close() // Create a tablet. alias := &topodatapb.TabletAlias{ diff --git a/go/vt/topo/topotests/wildcards_test.go b/go/vt/topo/topotests/wildcards_test.go index a87992b28de..d373a91b686 100644 --- a/go/vt/topo/topotests/wildcards_test.go +++ b/go/vt/topo/topotests/wildcards_test.go @@ -49,10 +49,12 @@ func (l *topoLayout) initTopo(t *testing.T, ts *topo.Server) { } func validateKeyspaceWildcard(t *testing.T, l *topoLayout, param string, expected []string) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveKeyspaceWildcard(ctx, param) if err != nil { if expected != nil { @@ -85,10 +87,12 @@ func TestResolveKeyspaceWildcard(t *testing.T) { } func validateShardWildcard(t *testing.T, l *topoLayout, param string, expected []topo.KeyspaceShard) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveShardWildcard(ctx, param) if err != nil { if expected != nil { @@ -181,10 +185,12 @@ func TestResolveShardWildcard(t *testing.T) { } func validateWildcards(t *testing.T, l *topoLayout, param string, expected []string) { - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) + defer ts.Close() l.initTopo(t, ts) - ctx := context.Background() r, err := ts.ResolveWildcards(ctx, topo.GlobalCell, []string{param}) if err != nil { if expected != nil { diff --git a/go/vt/topo/validator.go b/go/vt/topo/validator.go new file mode 100644 index 00000000000..b9ba5f918fa --- /dev/null +++ b/go/vt/topo/validator.go @@ -0,0 +1,34 @@ +package topo + +import ( + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// ValidateObjectName checks that the name is a valid object name. +// Object names are used for things like keyspace and shard names +// and must match specific constraints. +// They are only allowed to use ASCII letters or digits, - and _. +// No spaces or special characters are allowed. +func validateObjectName(name string) error { + if name == "" { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "empty name") + } + + if len(name) > 64 { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "name %v is too long", name) + } + + for _, c := range name { + switch { + case 'a' <= c && c <= 'z': + case 'A' <= c && c <= 'Z': + case '0' <= c && c <= '9': + case c == '-' || c == '_': + default: + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid character %s in name %v", string(c), name) + } + } + + return nil +} diff --git a/go/vt/topo/validator_test.go b/go/vt/topo/validator_test.go new file mode 100644 index 00000000000..88bad381ace --- /dev/null +++ b/go/vt/topo/validator_test.go @@ -0,0 +1,46 @@ +package topo + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateObjectName(t *testing.T) { + cases := []struct { + name string + err string + }{ + { + name: "valid", + err: "", + }, + { + name: "validdigits1321", + err: "", + }, + { + name: "valid-with-dashes", + err: "", + }, + { + name: "very-long-keyspace-name-that-is-even-too-long-for-mysql-to-handle", + err: "name very-long-keyspace-name-that-is-even-too-long-for-mysql-to-handle is too long", + }, + { + name: "withchars", + err: "invalid character < in name withchars", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := validateObjectName(c.name) + if c.err == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, c.err) + } + }) + } +} diff --git a/go/vt/topo/vschema.go b/go/vt/topo/vschema.go index a2503673deb..0f63a26c2ae 100644 --- a/go/vt/topo/vschema.go +++ b/go/vt/topo/vschema.go @@ -17,27 +17,20 @@ limitations under the License. package topo import ( + "context" "path" "google.golang.org/protobuf/proto" - "context" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vterrors" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// SaveVSchema first validates the VSchema, then saves it. +// SaveVSchema saves a Vschema. A valid Vschema should be passed in. It does not verify its correctness. // If the VSchema is empty, just remove it. func (ts *Server) SaveVSchema(ctx context.Context, keyspace string, vschema *vschemapb.Keyspace) error { - err := vindexes.ValidateKeyspace(vschema) - if err != nil { - return err - } - nodePath := path.Join(KeyspacesPath, keyspace, VSchemaFile) data, err := vschema.MarshalVT() if err != nil { diff --git a/go/vt/topo/zk2topo/server_test.go b/go/vt/topo/zk2topo/server_test.go index ebbca9898ce..a35e84f2950 100644 --- a/go/vt/topo/zk2topo/server_test.go +++ b/go/vt/topo/zk2topo/server_test.go @@ -17,12 +17,11 @@ limitations under the License. package zk2topo import ( + "context" "fmt" "path" "testing" - "context" - "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/test" @@ -38,7 +37,9 @@ func TestZk2Topo(t *testing.T) { // Run the test suite. testIndex := 0 - test.TopoServerTestSuite(t, func() *topo.Server { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test.TopoServerTestSuite(t, ctx, func() *topo.Server { // Each test will use its own sub-directories. // The directories will be created when used the first time. testRoot := fmt.Sprintf("/test-%v", testIndex) diff --git a/go/vt/topotools/keyspace.go b/go/vt/topotools/keyspace.go index 82f4b133a50..d8a5740f3ae 100644 --- a/go/vt/topotools/keyspace.go +++ b/go/vt/topotools/keyspace.go @@ -118,22 +118,18 @@ func UpdateShardRecords( if err := ts.UpdateDisableQueryService(ctx, keyspace, shards, servedType, cells, disableQueryService); err != nil { return err } - for i, si := range shards { updatedShard, err := ts.UpdateShardFields(ctx, si.Keyspace(), si.ShardName(), func(si *topo.ShardInfo) error { if clearSourceShards { si.SourceShards = nil } - return nil }) if err != nil { return err } - shards[i] = updatedShard - // For 'to' shards, refresh to make them serve. The 'from' shards will // be refreshed after traffic has migrated. if !isFrom { @@ -142,7 +138,6 @@ func UpdateShardRecords( } } } - return nil } diff --git a/go/vt/topotools/routing_rules.go b/go/vt/topotools/routing_rules.go index 6dfa8b655ca..9eb64c936d7 100644 --- a/go/vt/topotools/routing_rules.go +++ b/go/vt/topotools/routing_rules.go @@ -27,6 +27,19 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) +//region routing rules + +func GetRoutingRulesMap(rules *vschemapb.RoutingRules) map[string][]string { + if rules == nil { + return nil + } + rulesMap := make(map[string][]string, len(rules.Rules)) + for _, rr := range rules.Rules { + rulesMap[rr.FromTable] = rr.ToTables + } + return rulesMap +} + // GetRoutingRules fetches routing rules from the topology server and returns a // mapping of fromTable=>[]toTables. func GetRoutingRules(ctx context.Context, ts *topo.Server) (map[string][]string, error) { @@ -35,10 +48,7 @@ func GetRoutingRules(ctx context.Context, ts *topo.Server) (map[string][]string, return nil, err } - rules := make(map[string][]string, len(rrs.Rules)) - for _, rr := range rrs.Rules { - rules[rr.FromTable] = rr.ToTables - } + rules := GetRoutingRulesMap(rrs) return rules, nil } @@ -59,6 +69,29 @@ func SaveRoutingRules(ctx context.Context, ts *topo.Server, rules map[string][]s return ts.SaveRoutingRules(ctx, rrs) } +//endregion + +//region shard routing rules + +func GetShardRoutingRuleKey(fromKeyspace, shard string) string { + return fmt.Sprintf("%s.%s", fromKeyspace, shard) +} +func ParseShardRoutingRuleKey(key string) (string, string) { + arr := strings.Split(key, ".") + return arr[0], arr[1] +} + +func GetShardRoutingRulesMap(rules *vschemapb.ShardRoutingRules) map[string]string { + if rules == nil { + return nil + } + rulesMap := make(map[string]string, len(rules.Rules)) + for _, rr := range rules.Rules { + rulesMap[GetShardRoutingRuleKey(rr.FromKeyspace, rr.Shard)] = rr.ToKeyspace + } + return rulesMap +} + // GetShardRoutingRules fetches shard routing rules from the topology server and returns a // mapping of fromKeyspace.Shard=>toKeyspace. func GetShardRoutingRules(ctx context.Context, ts *topo.Server) (map[string]string, error) { @@ -67,10 +100,7 @@ func GetShardRoutingRules(ctx context.Context, ts *topo.Server) (map[string]stri return nil, err } - rules := make(map[string]string, len(rrs.Rules)) - for _, rr := range rrs.Rules { - rules[fmt.Sprintf("%s.%s", rr.FromKeyspace, rr.Shard)] = rr.ToKeyspace - } + rules := GetShardRoutingRulesMap(rrs) return rules, nil } @@ -82,9 +112,7 @@ func SaveShardRoutingRules(ctx context.Context, ts *topo.Server, srr map[string] srs := &vschemapb.ShardRoutingRules{Rules: make([]*vschemapb.ShardRoutingRule, 0, len(srr))} for from, to := range srr { - arr := strings.Split(from, ".") - fromKeyspace := arr[0] - shard := arr[1] + fromKeyspace, shard := ParseShardRoutingRuleKey(from) srs.Rules = append(srs.Rules, &vschemapb.ShardRoutingRule{ FromKeyspace: fromKeyspace, ToKeyspace: to, diff --git a/go/vt/topotools/routing_rules_test.go b/go/vt/topotools/routing_rules_test.go index 6047bb441fe..0b4f265a77b 100644 --- a/go/vt/topotools/routing_rules_test.go +++ b/go/vt/topotools/routing_rules_test.go @@ -28,8 +28,10 @@ import ( ) func TestRoutingRulesRoundTrip(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() rules := map[string][]string{ "t1": {"t2", "t3"}, @@ -46,8 +48,10 @@ func TestRoutingRulesRoundTrip(t *testing.T) { } func TestRoutingRulesErrors(t *testing.T) { - ctx := context.Background() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") + defer ts.Close() factory.SetError(errors.New("topo failure for testing")) t.Run("GetRoutingRules error", func(t *testing.T) { @@ -68,8 +72,10 @@ func TestRoutingRulesErrors(t *testing.T) { } func TestShardRoutingRulesRoundTrip(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() srr := map[string]string{ "ks1.shard1": "ks2", diff --git a/go/vt/topotools/shard_test.go b/go/vt/topotools/shard_test.go index 8904c984715..f2fb5f50340 100644 --- a/go/vt/topotools/shard_test.go +++ b/go/vt/topotools/shard_test.go @@ -17,13 +17,11 @@ limitations under the License. package topotools import ( + "context" "fmt" "math/rand" "sync" "testing" - "time" - - "context" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -31,10 +29,10 @@ import ( // TestCreateShard tests a few cases for topo.CreateShard func TestCreateShard(t *testing.T) { - ctx := context.Background() - - // Set up topology. - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() keyspace := "test_keyspace" shard := "0" @@ -60,10 +58,10 @@ func TestCreateShard(t *testing.T) { // TODO(sougou): we should eventually disallow multiple shards // for unsharded keyspaces. func TestCreateShardMultiUnsharded(t *testing.T) { - ctx := context.Background() - - // Set up topology. - ts := memorytopo.NewServer("test_cell") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() // create keyspace keyspace := "test_keyspace" @@ -102,16 +100,14 @@ func TestCreateShardMultiUnsharded(t *testing.T) { // for a long time in parallel, making sure the locking and everything // works correctly. func TestGetOrCreateShard(t *testing.T) { - ctx := context.Background() - - // Set up topology. - cell := "test_cell" - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "test_cell") + defer ts.Close() // and do massive parallel GetOrCreateShard keyspace := "test_keyspace" wg := sync.WaitGroup{} - rand.Seed(time.Now().UnixNano()) for i := 0; i < 100; i++ { wg.Add(1) go func(i int) { diff --git a/go/vt/topotools/split.go b/go/vt/topotools/split.go index 00cfd49cdae..ace3dda94a7 100644 --- a/go/vt/topotools/split.go +++ b/go/vt/topotools/split.go @@ -222,7 +222,7 @@ func findOverlappingShards(shardMap map[string]*topo.ShardInfo) ([]*OverlappingS func findIntersectingShard(shardMap map[string]*topo.ShardInfo, sourceArray []*topo.ShardInfo) *topo.ShardInfo { for name, si := range shardMap { for _, sourceShardInfo := range sourceArray { - if si.KeyRange == nil || sourceShardInfo.KeyRange == nil || key.KeyRangesIntersect(si.KeyRange, sourceShardInfo.KeyRange) { + if si.KeyRange == nil || sourceShardInfo.KeyRange == nil || key.KeyRangeIntersect(si.KeyRange, sourceShardInfo.KeyRange) { delete(shardMap, name) return si } @@ -235,7 +235,7 @@ func findIntersectingShard(shardMap map[string]*topo.ShardInfo, sourceArray []*t // in the destination array func intersect(si *topo.ShardInfo, allShards []*topo.ShardInfo) bool { for _, shard := range allShards { - if key.KeyRangesIntersect(si.KeyRange, shard.KeyRange) { + if key.KeyRangeIntersect(si.KeyRange, shard.KeyRange) { return true } } diff --git a/go/vt/topotools/split_test.go b/go/vt/topotools/split_test.go index 0ba13a3524d..003dc767317 100644 --- a/go/vt/topotools/split_test.go +++ b/go/vt/topotools/split_test.go @@ -129,6 +129,10 @@ func TestValidateForReshard(t *testing.T) { sources: []string{"0"}, targets: []string{"-40", "40-"}, out: "", + }, { + sources: []string{"0003-"}, + targets: []string{"000300-000380", "000380-000400", "0004-"}, + out: "", }, { sources: []string{"-40", "40-80", "80-"}, targets: []string{"-40", "40-"}, diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index ef458ae7cd3..8bbca4b8c03 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -245,3 +245,29 @@ func TabletIdent(tablet *topodatapb.Tablet) string { func TargetIdent(target *querypb.Target) string { return fmt.Sprintf("%s/%s (%s)", target.Keyspace, target.Shard, target.TabletType) } + +// TabletEquality returns true iff two Tablets are identical for testing purposes +func TabletEquality(left, right *topodatapb.Tablet) bool { + if left.Keyspace != right.Keyspace { + return false + } + if left.Shard != right.Shard { + return false + } + if left.Hostname != right.Hostname { + return false + } + if left.Type != right.Type { + return false + } + if left.MysqlHostname != right.MysqlHostname { + return false + } + if left.MysqlPort != right.MysqlPort { + return false + } + if left.PrimaryTermStartTime.String() != right.PrimaryTermStartTime.String() { + return false + } + return topoproto.TabletAliasString(left.Alias) == topoproto.TabletAliasString(right.Alias) +} diff --git a/go/vt/vitessdriver/convert.go b/go/vt/vitessdriver/convert.go index abb25beb000..7ba95db4147 100644 --- a/go/vt/vitessdriver/convert.go +++ b/go/vt/vitessdriver/convert.go @@ -21,10 +21,10 @@ import ( "fmt" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) type converter struct { @@ -32,13 +32,27 @@ type converter struct { } func (cv *converter) ToNative(v sqltypes.Value) (any, error) { - switch v.Type() { - case sqltypes.Datetime, sqltypes.Timestamp: - return DatetimeToNative(v, cv.location) - case sqltypes.Date: - return DateToNative(v, cv.location) + var out any + var err error + switch { + case v.Type() == sqltypes.Null: + // no-op + case v.IsSigned(): + return v.ToInt64() + case v.IsUnsigned(): + return v.ToUint64() + case v.IsFloat(): + return v.ToFloat64() + case v.Type() == sqltypes.Datetime, v.Type() == sqltypes.Timestamp: + return datetimeToNative(v, cv.location) + case v.Type() == sqltypes.Date: + return dateToNative(v, cv.location) + case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal: + out, err = v.ToBytes() + case v.Type() == sqltypes.Expression: + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) } - return evalengine.ToNative(v) + return out, err } func (cv *converter) BuildBindVariable(v any) (*querypb.BindVariable, error) { @@ -109,12 +123,16 @@ func (cv *converter) bindVarsFromNamedValues(args []driver.NamedValue) (map[stri return bindVars, nil } -func newConverter(cfg *Configuration) (c *converter, err error) { - c = &converter{ - location: time.UTC, +func newConverter(cfg *Configuration) (*converter, error) { + c := &converter{location: time.UTC} + if cfg.DefaultLocation == "" { + return c, nil } - if cfg.DefaultLocation != "" { - c.location, err = time.LoadLocation(cfg.DefaultLocation) + + loc, err := time.LoadLocation(cfg.DefaultLocation) + if err != nil { + return nil, err } - return + c.location = loc + return c, nil } diff --git a/go/vt/vitessdriver/convert_test.go b/go/vt/vitessdriver/convert_test.go index 2e6734e8da0..c1d5f46b247 100644 --- a/go/vt/vitessdriver/convert_test.go +++ b/go/vt/vitessdriver/convert_test.go @@ -33,35 +33,161 @@ func TestToNative(t *testing.T) { convert *converter in sqltypes.Value out any - }{{ - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Int32, "1"), - out: int64(1), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Time, "23:19:43"), - out: []byte("23:19:43"), // TIME is not handled - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), - out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.UTC), - }, { - convert: &converter{}, - in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), - }, { - convert: convertTimeLocal, - in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.Local), - }, { - convert: convertTimeLocal, - in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), - out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.Local), - }} + }{ + { + convert: &converter{}, + in: sqltypes.NULL, + out: nil, + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int8, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int16, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int24, "1"), + out: int64(1), + }, { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int32, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Int64, "1"), + out: int64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint8, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint16, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint24, "1"), + out: uint64(1), + }, { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint32, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Uint64, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Float32, "1.1"), + out: float64(1.1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Float64, "1.1"), + out: float64(1.1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Time, "23:19:43"), + out: []byte("23:19:43"), // TIME is not handled + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), + out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.UTC), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.UTC), + }, + { + convert: convertTimeLocal, + in: sqltypes.TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), + out: time.Date(2012, 02, 24, 23, 19, 43, 0, time.Local), + }, + { + convert: convertTimeLocal, + in: sqltypes.TestValue(sqltypes.Date, "2012-02-24"), + out: time.Date(2012, 02, 24, 0, 0, 0, 0, time.Local), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Year, "1"), + out: uint64(1), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Decimal, "1"), + out: []byte("1"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Text, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Blob, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarChar, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarBinary, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Char, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Binary, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.VarChar, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Bit, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Enum, "a"), + out: []byte("a"), + }, + { + convert: &converter{}, + in: sqltypes.TestValue(sqltypes.Set, "a"), + out: []byte("a"), + }, + } for _, tcase := range testcases { v, err := tcase.convert.ToNative(tcase.in) diff --git a/go/vt/vitessdriver/driver.go b/go/vt/vitessdriver/driver.go index 638e31523f3..4a965399e9c 100644 --- a/go/vt/vitessdriver/driver.go +++ b/go/vt/vitessdriver/driver.go @@ -41,10 +41,30 @@ var ( // Type-check interfaces. var ( - _ driver.QueryerContext = &conn{} - _ driver.ExecerContext = &conn{} - _ driver.StmtQueryContext = &stmt{} - _ driver.StmtExecContext = &stmt{} + _ interface { + driver.Connector + } = &connector{} + + _ interface { + driver.Driver + driver.DriverContext + } = drv{} + + _ interface { + driver.Conn + driver.ConnBeginTx + driver.ConnPrepareContext + driver.ExecerContext + driver.Pinger + driver.QueryerContext + driver.Tx + } = &conn{} + + _ interface { + driver.Stmt + driver.StmtExecContext + driver.StmtQueryContext + } = &stmt{} ) func init() { @@ -94,8 +114,7 @@ func OpenWithConfiguration(c Configuration) (*sql.DB, error) { return sql.Open(c.DriverName, json) } -type drv struct { -} +type drv struct{} // Open implements the database/sql/driver.Driver interface. // @@ -112,25 +131,65 @@ type drv struct { // // For a description of the available fields, see the Configuration struct. func (d drv) Open(name string) (driver.Conn, error) { - c := &conn{} - err := json.Unmarshal([]byte(name), c) + conn, err := d.OpenConnector(name) if err != nil { return nil, err } - c.setDefaults() + return conn.Connect(context.Background()) +} - if c.convert, err = newConverter(&c.Configuration); err != nil { +// OpenConnector implements the database/sql/driver.DriverContext interface. +// +// See the documentation of Open for details on the format of name. +func (d drv) OpenConnector(name string) (driver.Connector, error) { + var cfg Configuration + if err := json.Unmarshal([]byte(name), &cfg); err != nil { return nil, err } - if err = c.dial(); err != nil { + cfg.setDefaults() + return d.newConnector(cfg) +} + +// A connector holds immutable state for the creation of additional conns via +// the Connect method. +type connector struct { + drv drv + cfg Configuration + convert *converter +} + +func (d drv) newConnector(cfg Configuration) (driver.Connector, error) { + convert, err := newConverter(&cfg) + if err != nil { return nil, err } - return c, nil + return &connector{ + drv: d, + cfg: cfg, + convert: convert, + }, nil } +// Connect implements the database/sql/driver.Connector interface. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + conn := &conn{ + cfg: c.cfg, + convert: c.convert, + } + + if err := conn.dial(ctx); err != nil { + return nil, err + } + + return conn, nil +} + +// Driver implements the database/sql/driver.Connector interface. +func (c *connector) Driver() driver.Driver { return c.drv } + // Configuration holds all Vitess driver settings. // // Fields with documented default values do not have to be set explicitly. @@ -202,32 +261,32 @@ func (c *Configuration) setDefaults() { } type conn struct { - Configuration + cfg Configuration convert *converter conn *vtgateconn.VTGateConn session *vtgateconn.VTGateSession } -func (c *conn) dial() error { +func (c *conn) dial(ctx context.Context) error { var err error - c.conn, err = vtgateconn.DialProtocol(context.Background(), c.Protocol, c.Address) + c.conn, err = vtgateconn.DialProtocol(ctx, c.cfg.Protocol, c.cfg.Address) if err != nil { return err } - if c.Configuration.SessionToken != "" { - sessionFromToken, err := sessionTokenToSession(c.Configuration.SessionToken) + if c.cfg.SessionToken != "" { + sessionFromToken, err := sessionTokenToSession(c.cfg.SessionToken) if err != nil { return err } c.session = c.conn.SessionFromPb(sessionFromToken) } else { - c.session = c.conn.Session(c.Target, nil) + c.session = c.conn.Session(c.cfg.Target, nil) } return nil } func (c *conn) Ping(ctx context.Context) error { - if c.Streaming { + if c.cfg.Streaming { return errors.New("Ping not allowed for streaming connections") } @@ -378,7 +437,7 @@ func sessionTokenToSession(sessionToken string) (*vtgatepb.Session, error) { func (c *conn) Begin() (driver.Tx, error) { // if we're loading from an existing session, we need to avoid starting a new transaction - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return c, nil } @@ -401,7 +460,7 @@ func (c *conn) Commit() error { // if we're loading from an existing session, disallow committing/rolling back the transaction // this isn't a technical limitation, but is enforced to prevent misuse, so that only // the original creator of the transaction can commit/rollback - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return errors.New("calling Commit from a distributed tx is not allowed") } @@ -413,7 +472,7 @@ func (c *conn) Rollback() error { // if we're loading from an existing session, disallow committing/rolling back the transaction // this isn't a technical limitation, but is enforced to prevent misuse, so that only // the original creator of the transaction can commit/rollback - if c.Configuration.SessionToken != "" { + if c.cfg.SessionToken != "" { return errors.New("calling Rollback from a distributed tx is not allowed") } @@ -424,7 +483,7 @@ func (c *conn) Rollback() error { func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) { ctx := context.TODO() - if c.Streaming { + if c.cfg.Streaming { return nil, errors.New("Exec not allowed for streaming connections") } bindVars, err := c.convert.buildBindVars(args) @@ -440,7 +499,7 @@ func (c *conn) Exec(query string, args []driver.Value) (driver.Result, error) { } func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - if c.Streaming { + if c.cfg.Streaming { return nil, errors.New("Exec not allowed for streaming connections") } @@ -462,7 +521,7 @@ func (c *conn) Query(query string, args []driver.Value) (driver.Rows, error) { return nil, err } - if c.Streaming { + if c.cfg.Streaming { stream, err := c.session.StreamExecute(ctx, query, bindVars) if err != nil { return nil, err @@ -488,7 +547,7 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam return nil, err } - if c.Streaming { + if c.cfg.Streaming { stream, err := c.session.StreamExecute(ctx, query, bv) if err != nil { return nil, err diff --git a/go/vt/vitessdriver/driver_test.go b/go/vt/vitessdriver/driver_test.go index 5438aa0c75d..bd49a0acd0a 100644 --- a/go/vt/vitessdriver/driver_test.go +++ b/go/vt/vitessdriver/driver_test.go @@ -38,9 +38,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/grpcvtgateservice" ) -var ( - testAddress string -) +var testAddress string // TestMain tests the Vitess Go SQL driver. // @@ -71,7 +69,7 @@ func TestOpen(t *testing.T) { panic(err) } - var testcases = []struct { + testcases := []struct { desc string connStr string conn *conn @@ -80,7 +78,7 @@ func TestOpen(t *testing.T) { desc: "Open()", connStr: fmt.Sprintf(`{"address": "%s", "target": "@replica", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", Target: "@replica", @@ -94,7 +92,7 @@ func TestOpen(t *testing.T) { desc: "Open() (defaults omitted)", connStr: fmt.Sprintf(`{"address": "%s", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", }, @@ -107,7 +105,7 @@ func TestOpen(t *testing.T) { desc: "Open() with keyspace", connStr: fmt.Sprintf(`{"protocol": "grpc", "address": "%s", "target": "ks:0@replica", "timeout": %d}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", Target: "ks:0@replica", @@ -123,7 +121,7 @@ func TestOpen(t *testing.T) { `{"address": "%s", "timeout": %d, "defaultlocation": "America/Los_Angeles"}`, testAddress, int64(30*time.Second)), conn: &conn{ - Configuration: Configuration{ + cfg: Configuration{ Protocol: "grpc", DriverName: "vitess", DefaultLocation: "America/Los_Angeles", @@ -144,7 +142,7 @@ func TestOpen(t *testing.T) { wantc := tc.conn newc := *(c.(*conn)) - newc.Address = "" + newc.cfg.Address = "" newc.conn = nil newc.session = nil if !reflect.DeepEqual(&newc, wantc) { @@ -255,7 +253,7 @@ func TestExecStreamingNotAllowed(t *testing.T) { } func TestQuery(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string config Configuration requestName string @@ -357,7 +355,7 @@ func TestQuery(t *testing.T) { } func TestBindVars(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string in []driver.NamedValue out map[string]*querypb.BindVariable @@ -440,7 +438,7 @@ func TestBindVars(t *testing.T) { } func TestDatetimeQuery(t *testing.T) { - var testcases = []struct { + testcases := []struct { desc string config Configuration requestName string @@ -725,3 +723,141 @@ func TestSessionToken(t *testing.T) { t.Fatal(err) } } + +// TestStreamExec tests that different kinds of query present in `execMap` can run through streaming api +func TestStreamExec(t *testing.T) { + db, err := OpenForStreaming(testAddress, "@rdonly") + require.NoError(t, err) + defer db.Close() + + for k, v := range execMap { + t.Run(k, func(t *testing.T) { + s, err := db.Prepare(k) + require.NoError(t, err) + defer s.Close() + + r, err := s.Query(0) + require.NoError(t, err) + defer r.Close() + + fields, err := r.Columns() + require.NoError(t, err) + require.Equal(t, colList(v.result.Fields), fields) + + for r.Next() { + require.NoError(t, r.Err()) + } + }) + } +} + +func colList(fields []*querypb.Field) []string { + if fields == nil { + return nil + } + cols := make([]string, 0, len(fields)) + for _, field := range fields { + cols = append(cols, field.Name) + } + return cols +} + +func TestConnSeparateSessions(t *testing.T) { + c := Configuration{ + Protocol: "grpc", + Address: testAddress, + Target: "@primary", + } + + db, err := OpenWithConfiguration(c) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Each new connection starts a fresh session pointed at @primary. When the + // USE statement is executed, we simulate a change to that individual + // connection's target string. + // + // No connections are returned to the pool during this test and therefore + // the connection state should not be shared. + var conns []*sql.Conn + for i := 0; i < 3; i++ { + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + conns = append(conns, sconn) + + targets := []string{targetString(t, sconn)} + + _, err = sconn.ExecContext(ctx, "use @rdonly") + require.NoError(t, err) + + targets = append(targets, targetString(t, sconn)) + + require.Equal(t, []string{"@primary", "@rdonly"}, targets) + } + + for _, c := range conns { + require.NoError(t, c.Close()) + } +} + +func TestConnReuseSessions(t *testing.T) { + c := Configuration{ + Protocol: "grpc", + Address: testAddress, + Target: "@primary", + } + + db, err := OpenWithConfiguration(c) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Pull an individual connection from the pool and execute a USE, resulting + // in changing the target string. We return the connection to the pool + // continuously in this test and verify that we keep pulling the same + // connection with its target string altered. + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = sconn.ExecContext(ctx, "use @rdonly") + require.NoError(t, err) + require.NoError(t, sconn.Close()) + + var targets []string + for i := 0; i < 3; i++ { + sconn, err := db.Conn(ctx) + if err != nil { + t.Fatal(err) + } + + targets = append(targets, targetString(t, sconn)) + require.NoError(t, sconn.Close()) + } + + require.Equal(t, []string{"@rdonly", "@rdonly", "@rdonly"}, targets) +} + +func targetString(t *testing.T, c *sql.Conn) string { + t.Helper() + + var target string + require.NoError(t, c.Raw(func(driverConn any) error { + target = driverConn.(*conn).session.SessionPb().TargetString + return nil + })) + + return target +} diff --git a/go/vt/vitessdriver/fakeserver_test.go b/go/vt/vitessdriver/fakeserver_test.go index eefa2abd285..a74e44e682c 100644 --- a/go/vt/vitessdriver/fakeserver_test.go +++ b/go/vt/vitessdriver/fakeserver_test.go @@ -33,8 +33,7 @@ import ( ) // fakeVTGateService has the server side of this fake -type fakeVTGateService struct { -} +type fakeVTGateService struct{} // queryExecute contains all the fields we use to test Execute type queryExecute struct { @@ -50,7 +49,7 @@ func (q *queryExecute) Equal(q2 *queryExecute) bool { } // Execute is part of the VTGateService interface -func (f *fakeVTGateService) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (f *fakeVTGateService) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { execCase, ok := execMap[sql] if !ok { return session, nil, fmt.Errorf("no match for: %s", sql) @@ -100,10 +99,10 @@ func (f *fakeVTGateService) ExecuteBatch(ctx context.Context, session *vtgatepb. } // StreamExecute is part of the VTGateService interface -func (f *fakeVTGateService) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (f *fakeVTGateService) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { execCase, ok := execMap[sql] if !ok { - return fmt.Errorf("no match for: %s", sql) + return session, fmt.Errorf("no match for: %s", sql) } query := &queryExecute{ SQL: sql, @@ -111,25 +110,25 @@ func (f *fakeVTGateService) StreamExecute(ctx context.Context, session *vtgatepb Session: session, } if !query.Equal(execCase.execQuery) { - return fmt.Errorf("request mismatch: got %+v, want %+v", query, execCase.execQuery) + return session, fmt.Errorf("request mismatch: got %+v, want %+v", query, execCase.execQuery) } if execCase.result != nil { result := &sqltypes.Result{ Fields: execCase.result.Fields, } if err := callback(result); err != nil { - return err + return execCase.session, err } for _, row := range execCase.result.Rows { result := &sqltypes.Result{ Rows: [][]sqltypes.Value{row}, } if err := callback(result); err != nil { - return err + return execCase.session, err } } } - return nil + return execCase.session, nil } // Prepare is part of the VTGateService interface @@ -282,6 +281,20 @@ var execMap = map[string]struct { TargetString: "@primary", }, }, + "use @rdonly": { + execQuery: &queryExecute{ + SQL: "use @rdonly", + Session: &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + }, + }, + result: &sqltypes.Result{}, + session: &vtgatepb.Session{ + TargetString: "@rdonly", + SessionUUID: "1111", + }, + }, } var result1 = sqltypes.Result{ diff --git a/go/vt/vitessdriver/time.go b/go/vt/vitessdriver/time.go index dc2d4453c31..70ec2d679ae 100644 --- a/go/vt/vitessdriver/time.go +++ b/go/vt/vitessdriver/time.go @@ -74,8 +74,8 @@ func parseISOTime(tstr string, loc *time.Location, minLen, maxLen int) (t time.T return time.ParseInLocation(isoTimeFormat[:tlen], tstr, loc) } -// DatetimeToNative converts a Datetime Value into a time.Time -func DatetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { +// datetimeToNative converts a Datetime Value into a time.Time +func datetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { // Valid format string offsets for a DATETIME // |DATETIME |19+ // |------------------|------| @@ -83,11 +83,11 @@ func DatetimeToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { return parseISOTime(v.ToString(), loc, 19, isoTimeLength) } -// DateToNative converts a Date Value into a time.Time. +// dateToNative converts a Date Value into a time.Time. // Note that there's no specific type in the Go stdlib to represent // dates without time components, so the returned Time will have // their hours/mins/seconds zeroed out. -func DateToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { +func dateToNative(v sqltypes.Value, loc *time.Location) (time.Time, error) { // Valid format string offsets for a DATE // |DATE |10 // |---------| diff --git a/go/vt/vitessdriver/time_test.go b/go/vt/vitessdriver/time_test.go index d2924fa343a..949d8f43354 100644 --- a/go/vt/vitessdriver/time_test.go +++ b/go/vt/vitessdriver/time_test.go @@ -113,15 +113,15 @@ func TestDatetimeToNative(t *testing.T) { }} for _, tcase := range tcases { - got, err := DatetimeToNative(tcase.val, tcase.loc) + got, err := datetimeToNative(tcase.val, tcase.loc) if tcase.err && err == nil { - t.Errorf("DatetimeToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) + t.Errorf("datetimeToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) } if !tcase.err && err != nil { - t.Errorf("DatetimeToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) + t.Errorf("datetimeToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) } if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("DatetimeToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) + t.Errorf("datetimeToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) } } } @@ -161,15 +161,15 @@ func TestDateToNative(t *testing.T) { }} for _, tcase := range tcases { - got, err := DateToNative(tcase.val, tcase.loc) + got, err := dateToNative(tcase.val, tcase.loc) if tcase.err && err == nil { - t.Errorf("DateToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) + t.Errorf("dateToNative(%v, %#v) succeeded; expected error", tcase.val, tcase.loc) } if !tcase.err && err != nil { - t.Errorf("DateToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) + t.Errorf("dateToNative(%v, %#v) failed: %v", tcase.val, tcase.loc, err) } if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("DateToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) + t.Errorf("dateToNative(%v, %#v): %v, want %v", tcase.val, tcase.loc, got, tcase.out) } } } diff --git a/go/vt/vtadmin/README.md b/go/vt/vtadmin/README.md index c49f04a8dd4..48db7bcb246 100644 --- a/go/vt/vtadmin/README.md +++ b/go/vt/vtadmin/README.md @@ -58,7 +58,7 @@ npm install # This should be the address you passed to `./vtadmin --addr`. For example, # "http://127.0.0.1:14200". -export REACT_APP_VTADMIN_API_ADDRESS="${vtadmin_api_addr}" -export REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" +export VITE_VTADMIN_API_ADDRESS="${vtadmin_api_addr}" +export VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" npm run start ``` diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 58f37897e3a..92d11ba18ea 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -73,6 +73,10 @@ type API struct { authz *rbac.Authorizer options Options + + // vtexplain is now global again due to stat exporters in the tablet layer + // we're not super concerned because we will be deleting vtexplain Soon(TM). + vtexplainLock sync.Mutex } // Options wraps the configuration options for different components of the @@ -176,17 +180,26 @@ func NewAPI(clusters []*cluster.Cluster, opts Options) *API { } // Middlewares are executed in order of addition. Our ordering (all - // middlewares being optional) is: - // 1. CORS. CORS is a special case and is applied globally, the rest are applied only to the subrouter. - // 2. Compression - // 3. Tracing - // 4. Authentication + // middlewares except the panic handler being optional) is: + // 1. Panic recovery (applied globally) + // 2. CORS. CORS is a special case and is applied globally, the rest are applied only to the subrouter. + // 3. Compression + // 4. Tracing + // 5. Authentication + globalMiddlewares := []mux.MiddlewareFunc{ + vthandlers.PanicRecoveryHandler, + } middlewares := []mux.MiddlewareFunc{} if len(opts.HTTPOpts.CORSOrigins) > 0 { - serv.Router().Use(handlers.CORS( - handlers.AllowCredentials(), handlers.AllowedOrigins(opts.HTTPOpts.CORSOrigins), handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}))) + corsHandler := handlers.CORS( + handlers.AllowCredentials(), + handlers.AllowedOrigins(opts.HTTPOpts.CORSOrigins), + handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "DELETE", "OPTIONS"}), + ) + globalMiddlewares = append(globalMiddlewares, corsHandler) } + serv.Router().Use(globalMiddlewares...) if !opts.HTTPOpts.DisableCompression { middlewares = append(middlewares, handlers.CompressHandler) @@ -365,6 +378,8 @@ func (api *API) Handler() http.Handler { router.HandleFunc("/shard_replication_positions", httpAPI.Adapt(vtadminhttp.GetShardReplicationPositions)).Name("API.GetShardReplicationPositions") router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.CreateShard)).Name("API.CreateShard").Methods("POST") router.HandleFunc("/shards/{cluster_id}", httpAPI.Adapt(vtadminhttp.DeleteShards)).Name("API.DeleteShards").Methods("DELETE") + router.HandleFunc("/srvkeyspaces", httpAPI.Adapt(vtadminhttp.GetSrvKeyspaces)).Name("API.GetSrvKeyspaces").Methods("GET") + router.HandleFunc("/srvkeyspace/{cluster_id}/{name}", httpAPI.Adapt(vtadminhttp.GetSrvKeyspace)).Name("API.GetSrvKeyspace").Methods("GET") router.HandleFunc("/srvvschema/{cluster_id}/{cell}", httpAPI.Adapt(vtadminhttp.GetSrvVSchema)).Name("API.GetSrvVSchema") router.HandleFunc("/srvvschemas", httpAPI.Adapt(vtadminhttp.GetSrvVSchemas)).Name("API.GetSrvVSchemas") router.HandleFunc("/tablets", httpAPI.Adapt(vtadminhttp.GetTablets)).Name("API.GetTablets") @@ -1047,6 +1062,79 @@ func (api *API) GetShardReplicationPositions(ctx context.Context, req *vtadminpb }, nil } +// GetSrvKeyspace is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetSrvKeyspace(ctx context.Context, req *vtadminpb.GetSrvKeyspaceRequest) (*vtctldatapb.GetSrvKeyspacesResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetSrvKeyspace") + defer span.Finish() + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + if !api.authz.IsAuthorized(ctx, c.ID, rbac.SrvKeyspaceResource, rbac.GetAction) { + return nil, nil + } + + return c.Vtctld.GetSrvKeyspaces(ctx, &vtctldatapb.GetSrvKeyspacesRequest{ + Keyspace: req.Keyspace, + Cells: req.Cells, + }) +} + +// GetSrvKeyspaces is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetSrvKeyspaces(ctx context.Context, req *vtadminpb.GetSrvKeyspacesRequest) (*vtadminpb.GetSrvKeyspacesResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetSrvKeyspaces") + defer span.Finish() + + clusters, _ := api.getClustersForRequest(req.ClusterIds) + + var ( + sks = make(map[string]*vtctldatapb.GetSrvKeyspacesResponse) + wg sync.WaitGroup + er concurrency.AllErrorRecorder + m sync.Mutex + ) + + for _, c := range clusters { + if !api.authz.IsAuthorized(ctx, c.ID, rbac.SrvKeyspaceResource, rbac.GetAction) { + continue + } + + wg.Add(1) + + go func(c *cluster.Cluster) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "Cluster.GetSrvKeyspaces") + defer span.Finish() + + sk, err := c.GetSrvKeyspaces(ctx, req.Cells) + + if err != nil { + er.RecordError(err) + return + } + + m.Lock() + for key, value := range sk { + sks[key] = value + } + m.Unlock() + }(c) + } + + wg.Wait() + + if er.HasErrors() { + return nil, er.Error() + } + + return &vtadminpb.GetSrvKeyspacesResponse{ + SrvKeyspaces: sks, + }, nil +} + // GetSrvVSchema is part of the vtadminpb.VTAdminServer interface. func (api *API) GetSrvVSchema(ctx context.Context, req *vtadminpb.GetSrvVSchemaRequest) (*vtadminpb.SrvVSchema, error) { span, ctx := trace.NewSpan(ctx, "API.GetSrvVSchema") @@ -1943,6 +2031,16 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, nil } + lockWaitStart := time.Now() + + api.vtexplainLock.Lock() + defer api.vtexplainLock.Unlock() + + lockWaitTime := time.Since(lockWaitStart) + log.Infof("vtexplain lock wait time: %s", lockWaitTime) + + span.Annotate("vtexplain_lock_wait_time", lockWaitTime.String()) + tablet, err := c.FindTablet(ctx, func(t *vtadminpb.Tablet) bool { return t.Tablet.Keyspace == req.Keyspace && topo.IsInServingGraph(t.Tablet.Type) && t.Tablet.Type != topodatapb.TabletType_PRIMARY && t.State == vtadminpb.Tablet_SERVING }) @@ -2050,7 +2148,7 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, er.Error() } - vte, err := vtexplain.Init(srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) + vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) if err != nil { return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) } diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go index 45d3e443c6e..eb67757a1c1 100644 --- a/go/vt/vtadmin/api_authz_test.go +++ b/go/vt/vtadmin/api_authz_test.go @@ -2939,16 +2939,16 @@ func TestVTExplain(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) - t.Cleanup(func() { - if err := api.Close(); err != nil { - t.Logf("api did not close cleanly: %s", err.Error()) - } - }) - t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() + api := vtadmin.NewAPI(testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + actor := &rbac.Actor{Name: "other"} ctx := context.Background() if actor != nil { @@ -2966,6 +2966,13 @@ func TestVTExplain(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() + api := vtadmin.NewAPI(testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() if actor != nil { diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index b707f2036aa..4a68abd6b73 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -803,8 +803,7 @@ func TestFindSchema(t *testing.T) { if schema != nil { // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -1050,19 +1049,19 @@ func TestGetKeyspace(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() topos := make([]*topo.Server, len(tt.clusterShards)) vtctlds := make([]vtctlservicepb.VtctldServer, len(tt.clusterShards)) for i, shards := range tt.clusterShards { - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") testutil.AddShards(ctx, t, ts, shards...) topos[i] = ts vtctlds[i] = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -1282,20 +1281,20 @@ func TestGetKeyspaces(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Note that these test cases were written prior to the existence of // WithTestServers, so they are all written with the assumption that // there are exactly 2 clusters. topos := []*topo.Server{ - memorytopo.NewServer("c0_cell1"), - memorytopo.NewServer("c1_cell1"), + memorytopo.NewServer(ctx, "c0_cell1"), + memorytopo.NewServer(ctx, "c1_cell1"), } for cdx, cks := range tt.clusterKeyspaces { @@ -1346,7 +1345,8 @@ func TestGetKeyspaces(t *testing.T) { } func TestGetSchema(t *testing.T) { - t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string @@ -1361,7 +1361,7 @@ func TestGetSchema(t *testing.T) { { name: "success", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -1416,7 +1416,7 @@ func TestGetSchema(t *testing.T) { { name: "cluster not found", clusterID: 1, // results in clusterId == "c1" - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtadminpb.GetSchemaRequest{ ClusterId: "c2", @@ -1429,7 +1429,7 @@ func TestGetSchema(t *testing.T) { { name: "tablet not found for keyspace", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*vtadminpb.Tablet{ { Cluster: &vtadminpb.Cluster{ @@ -1457,7 +1457,7 @@ func TestGetSchema(t *testing.T) { { name: "no serving tablet found for keyspace", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*vtadminpb.Tablet{ { Cluster: &vtadminpb.Cluster{ @@ -1485,7 +1485,7 @@ func TestGetSchema(t *testing.T) { { name: "error in GetSchema call", clusterID: 1, - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -1535,13 +1535,13 @@ func TestGetSchema(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -1570,7 +1570,7 @@ func TestGetSchema(t *testing.T) { if resp != nil { // Clone so our mutation below doesn't trip the race detector. - resp = proto.Clone(resp).(*vtadminpb.Schema) + resp = resp.CloneVT() } assert.NoError(t, err) @@ -1580,8 +1580,6 @@ func TestGetSchema(t *testing.T) { } t.Run("size aggregation", func(t *testing.T) { - t.Parallel() - c1pb := &vtadminpb.Cluster{ Id: "c1", Name: "cluster1", @@ -1730,8 +1728,7 @@ func TestGetSchema(t *testing.T) { if schema != nil { // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -2176,8 +2173,6 @@ func TestGetSchemas(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { // Note that these test cases were written prior to the existence of // WithTestServers, so they are all written with the assumption that @@ -2186,10 +2181,12 @@ func TestGetSchemas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() topos := []*topo.Server{ - memorytopo.NewServer("c0_cell1"), - memorytopo.NewServer("c1_cell1"), + memorytopo.NewServer(ctx, "c0_cell1"), + memorytopo.NewServer(ctx, "c1_cell1"), } tmc := testutil.TabletManagerClient{ @@ -2469,7 +2466,7 @@ func TestGetSchemas(t *testing.T) { api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) defer api.Close() - resp, err := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{ + resp, err := api.GetSchemas(context.Background(), &vtadminpb.GetSchemasRequest{ TableSizeOptions: &vtadminpb.GetSchemaTableSizeOptions{ AggregateSizes: true, }, @@ -2539,8 +2536,7 @@ func TestGetSchemas(t *testing.T) { // Clone schemas so our mutations below don't trip the race detector. schemas := make([]*vtadminpb.Schema, len(resp.Schemas)) for i, schema := range resp.Schemas { - schema := proto.Clone(schema).(*vtadminpb.Schema) - + schema := schema.CloneVT() for _, td := range schema.TableDefinitions { // Zero these out because they're non-deterministic and also not // relevant to the final result. @@ -2559,6 +2555,288 @@ func TestGetSchemas(t *testing.T) { }) } +func TestGetSrvKeyspace(t *testing.T) { + t.Parallel() + + clusterID := "c0" + clusterName := "cluster0" + + tests := []struct { + name string + cells []string + keyspace string + cellSrvKeyspaces map[string]*topodatapb.SrvKeyspace + req *vtadminpb.GetSrvKeyspaceRequest + expected *vtctldatapb.GetSrvKeyspacesResponse + shouldErr bool + }{ + { + name: "success", + cells: []string{"zone0"}, + keyspace: "testkeyspace", + cellSrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSrvKeyspaceRequest{ + ClusterId: clusterID, + Keyspace: "testkeyspace", + Cells: []string{"zone0"}, + }, + expected: &vtctldatapb.GetSrvKeyspacesResponse{ + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "cluster doesn't exist", + req: &vtadminpb.GetSrvKeyspaceRequest{ + Cells: []string{"doesnt-matter"}, + ClusterId: "doesnt-exist", + Keyspace: "doesnt-matter", + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tmc := testutil.TabletManagerClient{} + + toposerver := memorytopo.NewServer(ctx, tt.cells...) + + vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }) + + testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + for cell, sks := range tt.cellSrvKeyspaces { + err := toposerver.UpdateSrvKeyspace(ctx, cell, tt.keyspace, sks) + require.NoError(t, err) + } + + clusters := []*cluster.Cluster{ + vtadmintestutil.BuildCluster(t, vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: clusterID, + Name: clusterName, + }, + VtctldClient: vtctldClient, + }), + } + + api := NewAPI(clusters, Options{}) + resp, err := api.GetSrvKeyspace(ctx, tt.req) + + if tt.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp) + }) + }) + } +} + +func TestGetSrvKeyspaces(t *testing.T) { + t.Parallel() + + clusterID := "c0" + clusterName := "cluster0" + + tests := []struct { + name string + cells []string + keyspaces []*vtctldatapb.Keyspace + cellSrvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace + req *vtadminpb.GetSrvKeyspacesRequest + expected *vtadminpb.GetSrvKeyspacesResponse + shouldErr bool + }{ + { + name: "success", + cells: []string{"zone0"}, + keyspaces: []*vtctldatapb.Keyspace{ + { + Name: "keyspace0", + Keyspace: &topodatapb.Keyspace{}, + }, + { + Name: "keyspace1", + Keyspace: &topodatapb.Keyspace{}, + }, + }, + cellSrvKeyspaces: map[string]map[string]*topodatapb.SrvKeyspace{ + "keyspace0": { + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + "keyspace1": { + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + }, + req: &vtadminpb.GetSrvKeyspacesRequest{ + ClusterIds: []string{clusterID}, + Cells: []string{"zone0"}, + }, + expected: &vtadminpb.GetSrvKeyspacesResponse{ + SrvKeyspaces: map[string]*vtctldatapb.GetSrvKeyspacesResponse{ + "keyspace0": { + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + }, + "keyspace1": { + SrvKeyspaces: map[string]*topodatapb.SrvKeyspace{ + "zone0": { + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_REPLICA, + ShardTabletControls: []*topodatapb.ShardTabletControl{ + { + Name: "-", + QueryServiceDisabled: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "cluster doesn't exist", + req: &vtadminpb.GetSrvKeyspacesRequest{ + Cells: []string{"doesnt-matter"}, + ClusterIds: []string{"doesnt-exist"}, + }, + expected: &vtadminpb.GetSrvKeyspacesResponse{}, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tmc := testutil.TabletManagerClient{} + + toposerver := memorytopo.NewServer(ctx, tt.cells...) + + for _, ks := range tt.keyspaces { + testutil.AddKeyspace(ctx, t, toposerver, ks) + } + + vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return grpcvtctldserver.NewVtctldServer(ts) + }) + + testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + for keyspace, sks := range tt.cellSrvKeyspaces { + for cell, sk := range sks { + err := toposerver.UpdateSrvKeyspace(ctx, cell, keyspace, sk) + require.NoError(t, err) + } + } + + clusters := []*cluster.Cluster{ + vtadmintestutil.BuildCluster(t, vtadmintestutil.TestClusterConfig{ + Cluster: &vtadminpb.Cluster{ + Id: clusterID, + Name: clusterName, + }, + VtctldClient: vtctldClient, + }), + } + + api := NewAPI(clusters, Options{}) + resp, err := api.GetSrvKeyspaces(ctx, tt.req) + + if tt.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Truef(t, proto.Equal(tt.expected, resp), "expected %v, got %v", tt.expected, resp) + }) + }) + } +} + func TestGetSrvVSchema(t *testing.T) { t.Parallel() @@ -2675,17 +2953,17 @@ func TestGetSrvVSchema(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -2969,17 +3247,17 @@ func TestGetSrvVSchemas(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tmc := testutil.TabletManagerClient{} - toposerver := memorytopo.NewServer(tt.cells...) + toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) @@ -4601,8 +4879,6 @@ func TestGetWorkflows(t *testing.T) { } func TestVTExplain(t *testing.T) { - t.Parallel() - tests := []struct { name string keyspaces []*vtctldatapb.Keyspace @@ -4820,15 +5096,13 @@ func TestVTExplain(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - toposerver := memorytopo.NewServer("c0_cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + toposerver := memorytopo.NewServer(ctx, "c0_cell1") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { @@ -4925,6 +5199,7 @@ func TestServeHTTP(t *testing.T) { }, }, }.Cluster(context.Background()) + defer testCluster.Close() tests := []struct { name string diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index a9246832060..9c8a63ea930 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -30,8 +30,6 @@ import ( "text/template" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/pools" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" @@ -39,7 +37,6 @@ import ( "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cache" "vitess.io/vitess/go/vt/vtadmin/cluster/discovery" @@ -231,12 +228,13 @@ func (c *Cluster) Close() error { rec.RecordError(closer.Close()) }(closer) } + wg.Wait() if rec.HasErrors() { return fmt.Errorf("failed to cleanly close cluster (id=%s): %w", c.ID, rec.Error()) } - return nil + return c.schemaCache.Close() } // ToProto returns a value-copy protobuf equivalent of the cluster. @@ -340,7 +338,7 @@ func (c *Cluster) parseTablet(rows *sql.Rows) (*vtadminpb.Tablet, error) { return nil, fmt.Errorf("failed parsing primary_term_start_time %s: %w", mtstStr, err) } - topotablet.PrimaryTermStartTime = logutil.TimeToProto(timeTime) + topotablet.PrimaryTermStartTime = protoutil.TimeToProto(timeTime) } if c.TabletFQDNTmpl != nil { @@ -507,6 +505,7 @@ func (c *Cluster) EmergencyFailoverShard(ctx context.Context, req *vtctldatapb.E span.Annotate("new_primary", topoproto.TabletAliasString(req.NewPrimary)) span.Annotate("ignore_replicas", strings.Join(topoproto.TabletAliasList(req.IgnoreReplicas).ToStringSlice(), ",")) span.Annotate("prevent_cross_cell_promotion", req.PreventCrossCellPromotion) + span.Annotate("wait_for_all_tablets", req.WaitForAllTablets) if d, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout); ok && err == nil { span.Annotate("wait_replicas_timeout", d.String()) @@ -1193,6 +1192,50 @@ func (c *Cluster) GetKeyspaces(ctx context.Context) ([]*vtadminpb.Keyspace, erro return keyspaces, nil } +// GetSrvKeyspaces returns all SrvKeyspaces for all keyspaces in a cluster. +func (c *Cluster) GetSrvKeyspaces(ctx context.Context, cells []string) (map[string]*vtctldatapb.GetSrvKeyspacesResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetKeyspaces") + AnnotateSpan(c, span) + + defer span.Finish() + keyspaces, err := c.Vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) + if err != nil { + return nil, fmt.Errorf("GetKeyspaces(cluster = %s): %w", c.ID, err) + } + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + srvKeyspaces = make(map[string]*vtctldatapb.GetSrvKeyspacesResponse, len(keyspaces.Keyspaces)) + ) + + for _, keyspace := range keyspaces.Keyspaces { + wg.Add(1) + + go func(keyspace *vtctldatapb.Keyspace) { + defer wg.Done() + srv_keyspaces, err := c.Vtctld.GetSrvKeyspaces(ctx, &vtctldatapb.GetSrvKeyspacesRequest{Keyspace: keyspace.Name, Cells: cells}) + if err != nil { + rec.RecordError(fmt.Errorf("GetSrvKeyspaces(keyspace = %s): %w", keyspace.Name, err)) + return + } + + m.Lock() + srvKeyspaces[keyspace.Name] = srv_keyspaces + m.Unlock() + }(keyspace) + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return srvKeyspaces, nil +} + // GetTablets returns all tablets in the cluster. func (c *Cluster) GetTablets(ctx context.Context) ([]*vtadminpb.Tablet, error) { span, ctx := trace.NewSpan(ctx, "Cluster.GetTablets") @@ -1528,8 +1571,7 @@ func (c *Cluster) getSchemaFromTablets(ctx context.Context, keyspace string, tab span, ctx := trace.NewSpan(ctx, "Vtctld.GetSchema") defer span.Finish() - - req := proto.Clone(opts.BaseRequest).(*vtctldatapb.GetSchemaRequest) + req := opts.BaseRequest.CloneVT() req.TableSizesOnly = sizesOnly req.TabletAlias = tablet.Tablet.Alias diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go index 202937bb95f..53c3b4f71cd 100644 --- a/go/vt/vtadmin/cluster/cluster_test.go +++ b/go/vt/vtadmin/cluster/cluster_test.go @@ -27,9 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/test/utils" @@ -49,9 +48,8 @@ import ( ) func TestCreateKeyspace(t *testing.T) { - t.Parallel() + defer utils.EnsureNoLeaks(t) - ctx := context.Background() tests := []struct { name string cfg testutil.TestClusterConfig @@ -157,11 +155,12 @@ func TestCreateKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() resp, err := cluster.CreateKeyspace(ctx, tt.req) if tt.shouldErr { @@ -176,7 +175,7 @@ func TestCreateKeyspace(t *testing.T) { } func TestCreateShard(t *testing.T) { - t.Parallel() + ctx := utils.LeakCheckContext(t) type test struct { name string @@ -185,11 +184,11 @@ func TestCreateShard(t *testing.T) { shouldErr bool assertion func(t *testing.T, tt *test) } - ctx := context.Background() + tests := []*test{ { name: "ok", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -210,7 +209,7 @@ func TestCreateShard(t *testing.T) { }, { name: "nil request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -219,7 +218,7 @@ func TestCreateShard(t *testing.T) { }, { name: "no keyspace in request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -231,7 +230,7 @@ func TestCreateShard(t *testing.T) { }, { name: "no shard name in request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -243,7 +242,7 @@ func TestCreateShard(t *testing.T) { }, { name: "vtctld.CreateShard fails", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -258,6 +257,7 @@ func TestCreateShard(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { + defer tt.tc.Cluster.Close() _, err := tt.tc.Cluster.CreateShard(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -276,9 +276,8 @@ func TestCreateShard(t *testing.T) { } func TestDeleteKeyspace(t *testing.T) { - t.Parallel() + ctx := utils.LeakCheckContext(t) - ctx := context.Background() tests := []struct { name string cfg testutil.TestClusterConfig @@ -343,11 +342,9 @@ func TestDeleteKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() resp, err := cluster.DeleteKeyspace(ctx, tt.req) if tt.shouldErr { @@ -363,6 +360,7 @@ func TestDeleteKeyspace(t *testing.T) { func TestDeleteShards(t *testing.T) { t.Parallel() + ctx := utils.LeakCheckContext(t) type test struct { name string @@ -372,16 +370,15 @@ func TestDeleteShards(t *testing.T) { shouldErr bool assertion func(t *testing.T, tt *test) } - ctx := context.Background() + tests := []*test{ { name: "ok", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), setup: func(t *testing.T, tt *test) { - ctx := context.Background() shards := []string{"-80", "80-"} for _, shard := range shards { _, err := tt.tc.Cluster.CreateShard(ctx, &vtctldatapb.CreateShardRequest{ @@ -418,7 +415,7 @@ func TestDeleteShards(t *testing.T) { }, { name: "nil request", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -427,7 +424,7 @@ func TestDeleteShards(t *testing.T) { }, { name: "vtctld.DeleteShards fails", - tc: testutil.BuildIntegrationTestCluster(t, &vtadminpb.Cluster{ + tc: testutil.BuildIntegrationTestCluster(t, ctx, &vtadminpb.Cluster{ Id: "local", Name: "local", }, "zone1"), @@ -475,7 +472,6 @@ func TestDeleteShards(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.setup != nil { func() { @@ -484,6 +480,7 @@ func TestDeleteShards(t *testing.T) { }() } + defer tt.tc.Cluster.Close() _, err := tt.tc.Cluster.DeleteShards(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -610,6 +607,7 @@ func TestFindTablet(t *testing.T) { }, Tablets: tt.tablets, }) + defer cluster.Close() tablet, err := cluster.FindTablet(ctx, tt.filter) if tt.expectedError != nil { @@ -821,6 +819,7 @@ func TestFindTablets(t *testing.T) { }, Tablets: tt.tablets, }) + defer cluster.Close() tablets, err := cluster.FindTablets(ctx, tt.filter, tt.n) assert.NoError(t, err) @@ -1202,6 +1201,7 @@ func TestFindWorkflows(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflows, err := c.FindWorkflows(ctx, tt.keyspaces, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -1482,6 +1482,7 @@ func TestGetCellInfos(t *testing.T) { Cluster: cpb, VtctldClient: tt.vtctld, }) + defer c.Close() cellInfos, err := c.GetCellInfos(context.Background(), tt.req) if tt.shouldErr { assert.Error(t, err) @@ -1564,6 +1565,7 @@ func TestGetCellsAliases(t *testing.T) { Cluster: cpb, VtctldClient: tt.vtctld, }) + defer c.Close() cellsAliases, err := c.GetCellsAliases(context.Background()) if tt.shouldErr { assert.Error(t, err) @@ -1716,6 +1718,7 @@ func TestGetSchema(t *testing.T) { Tablets: []*vtadminpb.Tablet{tt.tablet}, DBConfig: testutil.Dbcfg{}, }) + defer c.Close() schema, err := c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ BaseRequest: tt.req, @@ -1767,6 +1770,7 @@ func TestGetSchema(t *testing.T) { }, VtctldClient: vtctld, }) + defer c.Close() _, _ = c.GetSchema(ctx, "testkeyspace", cluster.GetSchemaOptions{ BaseRequest: req, @@ -2689,6 +2693,7 @@ func TestGetSchema(t *testing.T) { } c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() schema, err := c.GetSchema(ctx, tt.keyspace, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -2697,8 +2702,7 @@ func TestGetSchema(t *testing.T) { } // Clone so our mutation below doesn't trip the race detector. - schema = proto.Clone(schema).(*vtadminpb.Schema) - + schema = schema.CloneVT() if schema.TableDefinitions != nil { // For simplicity, we're going to assert only on the state // of the aggregated sizes (in schema.TableSizes), since the @@ -2772,18 +2776,18 @@ func TestGetShardReplicationPositions(t *testing.T) { Response: &vtctldatapb.ShardReplicationPositionsResponse{ ReplicationStatuses: map[string]*replicationdatapb.Status{ "zone1-001": { - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-002": { // Note: in reality other fields will be set on replicating hosts as well, but this is sufficient to illustrate in the testing. - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-003": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, }, @@ -2835,18 +2839,18 @@ func TestGetShardReplicationPositions(t *testing.T) { PositionInfo: &vtctldatapb.ShardReplicationPositionsResponse{ ReplicationStatuses: map[string]*replicationdatapb.Status{ "zone1-001": { - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-002": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, "zone1-003": { - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), Position: "MySQL56/08d0dbbb-be29-11eb-9fea-0aafb9701138:1-109848265", }, }, @@ -2944,6 +2948,7 @@ func TestGetShardReplicationPositions(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() resp, err := c.GetShardReplicationPositions(ctx, tt.req) if tt.shouldErr { @@ -3033,6 +3038,7 @@ func TestGetVSchema(t *testing.T) { t.Parallel() cluster := testutil.BuildCluster(t, tt.cfg) + defer cluster.Close() vschema, err := cluster.GetVSchema(ctx, tt.keyspace) if tt.shouldErr { @@ -3191,6 +3197,7 @@ func TestGetWorkflow(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflow, err := c.GetWorkflow(ctx, tt.keyspace, tt.workflow, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -3357,6 +3364,7 @@ func TestGetWorkflows(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() workflows, err := c.GetWorkflows(ctx, tt.keyspaces, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -3434,6 +3442,7 @@ func TestSetWritable(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() err := c.SetWritable(ctx, tt.req) tt.assertion(t, err, tt.assertionMsgExtra...) }) @@ -3580,6 +3589,7 @@ func TestToggleTabletReplication(t *testing.T) { t.Parallel() c := testutil.BuildCluster(t, tt.cfg) + defer c.Close() err := c.ToggleTabletReplication(ctx, tt.tablet, bool(tt.state)) tt.assertion(t, err, tt.assertionMsgExtra...) }) diff --git a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go index 8fa049f8540..344ee32863d 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vtadmin" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) @@ -54,7 +53,7 @@ func TestDiscoverVTGate(t *testing.T) { }] } `), - expected: &vtadmin.VTGate{ + expected: &vtadminpb.VTGate{ Hostname: "127.0.0.1:12345", }, }, @@ -292,7 +291,7 @@ func TestDiscoverVtctld(t *testing.T) { }] } `), - expected: &vtadmin.Vtctld{ + expected: &vtadminpb.Vtctld{ Hostname: "127.0.0.1:12345", }, }, diff --git a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go index 98801ab3951..ebc3899e82f 100644 --- a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go +++ b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache.go @@ -22,8 +22,6 @@ import ( "fmt" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/vtadmin/cache" @@ -178,8 +176,7 @@ func LoadOne(c *schemaCache, key Key, opts LoadOptions) (schema *vtadminpb.Schem } func loadSchema(cachedSchema *vtadminpb.Schema, opts LoadOptions) *vtadminpb.Schema { - schema := proto.Clone(cachedSchema).(*vtadminpb.Schema) - + schema := cachedSchema.CloneVT() if !opts.AggregateSizes { schema.TableSizes = nil } diff --git a/go/vt/vtadmin/cluster/resolver/resolver_test.go b/go/vt/vtadmin/cluster/resolver/resolver_test.go index 720f26f9f6c..fd1dbab5f13 100644 --- a/go/vt/vtadmin/cluster/resolver/resolver_test.go +++ b/go/vt/vtadmin/cluster/resolver/resolver_test.go @@ -78,20 +78,6 @@ func (cc *mockClientConn) assertUpdateWithin(t testing.TB, timeout time.Duration } } -func (cc *mockClientConn) assertErrorReportedWithin(t testing.TB, timeout time.Duration, msgAndArgs ...any) bool { - t.Helper() - - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - select { - case <-ctx.Done(): - return assert.Fail(t, "failed to receive reported error", "did not receive reported error within %v: %s", timeout, ctx.Err()) - case actual := <-cc.errors: - return assert.Error(t, actual, msgAndArgs...) - } -} - func (cc *mockClientConn) UpdateState(state grpcresolver.State) error { select { case <-cc.ctx.Done(): diff --git a/go/vt/vtadmin/grpcserver/server.go b/go/vt/vtadmin/grpcserver/server.go index 87a7bd8eb35..af31cf77bd3 100644 --- a/go/vt/vtadmin/grpcserver/server.go +++ b/go/vt/vtadmin/grpcserver/server.go @@ -17,6 +17,7 @@ limitations under the License. package grpcserver import ( + "context" "fmt" "net" "net/http" @@ -118,6 +119,23 @@ func New(name string, opts Options) *Server { unaryInterceptors = append(unaryInterceptors, otgrpc.UnaryServerInterceptor(otgrpc.WithTracer(tracer))) } + streamInterceptors = append(streamInterceptors, func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + err := handler(srv, ss) + if err != nil { + log.Errorf("%s error: %s", info.FullMethod, err) + } + + return err + }) + unaryInterceptors = append(unaryInterceptors, func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + resp, err = handler(ctx, req) + if err != nil { + log.Errorf("%s error: %s", info.FullMethod, err) + } + + return resp, err + }) + recoveryHandler := grpc_recovery.WithRecoveryHandler(func(p any) (err error) { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "panic triggered: %v", p) }) diff --git a/go/vt/vtadmin/http/api.go b/go/vt/vtadmin/http/api.go index 4d5d54b3afb..3a74b1e7aaf 100644 --- a/go/vt/vtadmin/http/api.go +++ b/go/vt/vtadmin/http/api.go @@ -20,7 +20,9 @@ import ( "context" "net/http" + "vitess.io/vitess/go/sets" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtadmin/cache" "vitess.io/vitess/go/vt/vtadmin/rbac" @@ -84,6 +86,9 @@ func (api *API) Adapt(handler VTAdminHandler) http.HandlerFunc { ctx = cache.NewIncomingRefreshContext(ctx) } + // Transform any ?cluster query params to ?cluster_id. + deprecateQueryParam(r, "cluster_id", "cluster") + handler(ctx, Request{r}, api).Write(w) } } @@ -97,3 +102,21 @@ func (api *API) Options() Options { func (api *API) Server() vtadminpb.VTAdminServer { return api.server } + +func deprecateQueryParam(r *http.Request, newName string, oldName string) { + q := r.URL.Query() + + if q.Has(oldName) { + log.Warningf("query param %s is deprecated in favor of %s. support for %s will be dropped in the next version", oldName, newName, oldName) + + newVals := sets.New(q[newName]...) + for _, oldVal := range q[oldName] { + if !newVals.Has(oldVal) { + q.Add(newName, oldVal) + } + } + + q.Del(oldName) + r.URL.RawQuery = q.Encode() + } +} diff --git a/go/vt/vtadmin/http/api_test.go b/go/vt/vtadmin/http/api_test.go new file mode 100644 index 00000000000..9648eaaaff2 --- /dev/null +++ b/go/vt/vtadmin/http/api_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDeprecateQueryParam(t *testing.T) { + t.Parallel() + + cases := []struct { + in map[string][]string + oldName string + newName string + expected []string + }{ + { + in: map[string][]string{ + "foo": {"1", "2"}, + "old_bar": {"one", "two"}, + }, + oldName: "old_bar", + newName: "bar", + expected: []string{"one", "two"}, + }, + { + in: map[string][]string{ + "foo": {"1", "2"}, + "bar": {"one", "two"}, + }, + oldName: "old_bar", + newName: "bar", + expected: []string{"one", "two"}, + }, + { + in: map[string][]string{ + "foo": {"1", "2"}, + "old_bar": {"one", "three"}, + "bar": {"one", "two"}, + }, + oldName: "old_bar", + newName: "bar", + expected: []string{"one", "two", "three"}, + }, + { + in: map[string][]string{ + "foo": {"1", "2"}, + }, + oldName: "old_bar", + newName: "bar", + expected: nil, + }, + } + + for _, tcase := range cases { + tcase := tcase + t.Run("", func(t *testing.T) { + query := url.Values(tcase.in) + + r := http.Request{ + URL: &url.URL{RawQuery: query.Encode()}, + } + + deprecateQueryParam(&r, tcase.newName, tcase.oldName) + + assert.False(t, r.URL.Query().Has(tcase.oldName), "old query param (%s) should not be in transformed query", tcase.oldName) + assert.Equal(t, tcase.expected, r.URL.Query()[tcase.newName]) + }) + } +} diff --git a/go/vt/vtadmin/http/backups.go b/go/vt/vtadmin/http/backups.go index 14d3d185cc8..8221944b527 100644 --- a/go/vt/vtadmin/http/backups.go +++ b/go/vt/vtadmin/http/backups.go @@ -25,7 +25,7 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -// GetBackups implements the http wrapper for /backups[?cluster=[&cluster=]]. +// GetBackups implements the http wrapper for /backups[?cluster_id=[&cluster_id=]]. func GetBackups(ctx context.Context, r Request, api *API) *JSONResponse { query := r.URL.Query() @@ -51,7 +51,7 @@ func GetBackups(ctx context.Context, r Request, api *API) *JSONResponse { } backups, err := api.server.GetBackups(ctx, &vtadminpb.GetBackupsRequest{ - ClusterIds: query["cluster"], + ClusterIds: query["cluster_id"], Keyspaces: query["keyspace"], KeyspaceShards: query["keyspace_shard"], RequestOptions: &vtctldatapb.GetBackupsRequest{ diff --git a/go/vt/vtadmin/http/cells.go b/go/vt/vtadmin/http/cells.go index 7c0ade5ce2d..c0d2dede3c1 100644 --- a/go/vt/vtadmin/http/cells.go +++ b/go/vt/vtadmin/http/cells.go @@ -23,7 +23,7 @@ import ( ) // GetCellInfos implements the http wrapper for the -// /cells[?cluster=[&cluster=...]?cell=[&cell=...]&names_only=(true|false)] route. +// /cells[?cluster=[&cluster_id=...]?cell=[&cell=...]&names_only=(true|false)] route. func GetCellInfos(ctx context.Context, r Request, api *API) *JSONResponse { namesOnly, err := r.ParseQueryParamAsBool("names_only", false) if err != nil { @@ -31,7 +31,7 @@ func GetCellInfos(ctx context.Context, r Request, api *API) *JSONResponse { } cellInfos, err := api.server.GetCellInfos(ctx, &vtadminpb.GetCellInfosRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], Cells: r.URL.Query()["cell"], NamesOnly: namesOnly, }) @@ -39,10 +39,10 @@ func GetCellInfos(ctx context.Context, r Request, api *API) *JSONResponse { } // GetCellsAliases implements the http wrapper for the -// /cells_aliases[?cluster=[&cluster=]] route. +// /cells_aliases[?cluster_id=[&cluster_id=]] route. func GetCellsAliases(ctx context.Context, r Request, api *API) *JSONResponse { cellsAliases, err := api.server.GetCellsAliases(ctx, &vtadminpb.GetCellsAliasesRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(cellsAliases, err) diff --git a/go/vt/vtadmin/http/experimental/tablets.go b/go/vt/vtadmin/http/experimental/tablets.go index 389eb74c765..e101ccee260 100644 --- a/go/vt/vtadmin/http/experimental/tablets.go +++ b/go/vt/vtadmin/http/experimental/tablets.go @@ -42,7 +42,7 @@ func TabletDebugVarsPassthrough(ctx context.Context, r vtadminhttp.Request, api tablet, err := api.Server().GetTablet(ctx, &vtadminpb.GetTabletRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) if err != nil { diff --git a/go/vt/vtadmin/http/gates.go b/go/vt/vtadmin/http/gates.go index 1cbcc6a749d..3f566618918 100644 --- a/go/vt/vtadmin/http/gates.go +++ b/go/vt/vtadmin/http/gates.go @@ -22,10 +22,10 @@ import ( vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) -// GetGates implements the http wrapper for /gates[?cluster=[&cluster=]]. +// GetGates implements the http wrapper for /gates[?cluster_id=[&cluster_id=]]. func GetGates(ctx context.Context, r Request, api *API) *JSONResponse { gates, err := api.server.GetGates(ctx, &vtadminpb.GetGatesRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(gates, err) diff --git a/go/vt/vtadmin/http/handlers/panic_recovery.go b/go/vt/vtadmin/http/handlers/panic_recovery.go new file mode 100644 index 00000000000..2f9af188a86 --- /dev/null +++ b/go/vt/vtadmin/http/handlers/panic_recovery.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "fmt" + "net/http" + + "github.com/gorilla/mux" + + "vitess.io/vitess/go/vt/log" +) + +// PanicRecoveryHandler is a mux.MiddlewareFunc which recovers from any uncaught +// `panic`s further down the middleware chain. +// +// If it recovers from a panic, it returns a 500 to the caller, and logs the +// route name along with the panicking error. +// name, as set by (mux.*Route).Name(), embeds it in the request context, and invokes +// the next middleware in the chain. +func PanicRecoveryHandler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + name := mux.CurrentRoute(r).GetName() + defer func() { + if err := recover(); err != nil { + log.Errorf("uncaught panic in %s: %s", name, err) + http.Error(w, fmt.Sprintf("%v", err), http.StatusInternalServerError) + } + }() + + next.ServeHTTP(w, r) + }) +} diff --git a/go/vt/vtadmin/http/handlers/panic_recovery_test.go b/go/vt/vtadmin/http/handlers/panic_recovery_test.go new file mode 100644 index 00000000000..8d32847df23 --- /dev/null +++ b/go/vt/vtadmin/http/handlers/panic_recovery_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" +) + +func TestPanicRecoveryHandler(t *testing.T) { + m := mux.NewRouter() + m.HandleFunc("/panic", func(w http.ResponseWriter, r *http.Request) { panic("test") }) + m.HandleFunc("/nopanic", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("ok\n")) + }) + + m.Use(PanicRecoveryHandler) + serv := httptest.NewServer(m) + defer serv.Close() + + cases := []struct { + route string + code int + message string + }{ + { + route: "/panic", + code: http.StatusInternalServerError, + message: "test\n", + }, + { + route: "/nopanic", + code: http.StatusOK, + message: "ok\n", + }, + } + + for _, tcase := range cases { + tcase := tcase + t.Run(tcase.route, func(t *testing.T) { + rec := httptest.ResponseRecorder{ + Body: bytes.NewBuffer(nil), + } + + m.ServeHTTP(&rec, httptest.NewRequest("GET", tcase.route, nil)) + assert.Equal(t, tcase.code, rec.Code) + assert.Equal(t, tcase.message, rec.Body.String()) + }) + } +} diff --git a/go/vt/vtadmin/http/keyspaces.go b/go/vt/vtadmin/http/keyspaces.go index dd5bc426347..7bfa8e77b5e 100644 --- a/go/vt/vtadmin/http/keyspaces.go +++ b/go/vt/vtadmin/http/keyspaces.go @@ -78,10 +78,10 @@ func GetKeyspace(ctx context.Context, r Request, api *API) *JSONResponse { return NewJSONResponse(keyspace, err) } -// GetKeyspaces implements the http wrapper for /keyspaces[?cluster=[&cluster=]]. +// GetKeyspaces implements the http wrapper for /keyspaces[?cluster_id=[&cluster_id=]]. func GetKeyspaces(ctx context.Context, r Request, api *API) *JSONResponse { keyspaces, err := api.server.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(keyspaces, err) @@ -168,7 +168,7 @@ func ValidateKeyspace(ctx context.Context, r Request, api *API) *JSONResponse { return NewJSONResponse(res, err) } -// ValidateKeyspace validates that all nodes reachable from the specified keyspace are consistent. +// ValidateSchemaKeyspace validates that all nodes reachable from the specified keyspace are consistent. func ValidateSchemaKeyspace(ctx context.Context, r Request, api *API) *JSONResponse { vars := mux.Vars(r.Request) diff --git a/go/vt/vtadmin/http/replication.go b/go/vt/vtadmin/http/replication.go index 25436d493f4..36c37e19b3c 100644 --- a/go/vt/vtadmin/http/replication.go +++ b/go/vt/vtadmin/http/replication.go @@ -24,14 +24,14 @@ import ( // GetShardReplicationPositions implements the http wrapper for /shard_replication_positions. // Query params: -// - cluster: repeated, cluster ID +// - cluster_id: repeated, cluster ID // - keyspace: repeated, keyspace names // - keyspace_shard: repeated, keyspace shard names func GetShardReplicationPositions(ctx context.Context, r Request, api *API) *JSONResponse { query := r.URL.Query() resp, err := api.server.GetShardReplicationPositions(ctx, &vtadminpb.GetShardReplicationPositionsRequest{ - ClusterIds: query["cluster"], + ClusterIds: query["cluster_id"], Keyspaces: query["keyspace"], KeyspaceShards: query["keyspace_shard"], }) diff --git a/go/vt/vtadmin/http/response.go b/go/vt/vtadmin/http/response.go index edc9485675a..899a610e57e 100644 --- a/go/vt/vtadmin/http/response.go +++ b/go/vt/vtadmin/http/response.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtadmin/errors" ) @@ -44,6 +45,8 @@ type errorBody struct { // 500 unknown. func NewJSONResponse(value any, err error) *JSONResponse { if err != nil { + log.Errorf(err.Error()) + switch e := err.(type) { case errors.TypedError: return typedErrorJSONResponse(e) diff --git a/go/vt/vtadmin/http/schemas.go b/go/vt/vtadmin/http/schemas.go index e8e0e32a8de..4b157720cb7 100644 --- a/go/vt/vtadmin/http/schemas.go +++ b/go/vt/vtadmin/http/schemas.go @@ -27,7 +27,7 @@ import ( ) // FindSchema implements the http wrapper for the -// /schema/{table}[?cluster=[&cluster=]] route. +// /schema/{table}[?cluster_id=[&cluster_id=]] route. func FindSchema(ctx context.Context, r Request, api *API) *JSONResponse { vars := r.Vars() query := r.URL.Query() @@ -39,7 +39,7 @@ func FindSchema(ctx context.Context, r Request, api *API) *JSONResponse { schema, err := api.server.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ Table: vars["table"], - ClusterIds: query["cluster"], + ClusterIds: query["cluster_id"], TableSizeOptions: sizeOpts, }) @@ -66,7 +66,7 @@ func GetSchema(ctx context.Context, r Request, api *API) *JSONResponse { return NewJSONResponse(schema, err) } -// GetSchemas implements the http wrapper for the /schemas[?cluster=[&cluster=] +// GetSchemas implements the http wrapper for the /schemas[?cluster_id=[&cluster_id=] // route. func GetSchemas(ctx context.Context, r Request, api *API) *JSONResponse { sizeOpts, err := getTableSizeOpts(r) @@ -75,7 +75,7 @@ func GetSchemas(ctx context.Context, r Request, api *API) *JSONResponse { } schemas, err := api.server.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], TableSizeOptions: sizeOpts, }) @@ -134,14 +134,14 @@ func ReloadSchemas(ctx context.Context, r Request, api *API) *JSONResponse { Concurrency: concurrency, IncludePrimary: includePrimary, WaitPosition: q.Get("wait_position"), - ClusterIds: q["cluster"], + ClusterIds: q["cluster_id"], }) return NewJSONResponse(resp, err) } // ReloadTabletSchema implements the http wrapper for /tablets/{tablet}/reload_schema. // -// Note that all query parameters that apply to ReloadSchemas, except for `cluster`, +// Note that all query parameters that apply to ReloadSchemas, except for `cluster_id`, // are ignored. func ReloadTabletSchema(ctx context.Context, r Request, api *API) *JSONResponse { alias, err := r.Vars().GetTabletAlias("tablet") @@ -151,7 +151,7 @@ func ReloadTabletSchema(ctx context.Context, r Request, api *API) *JSONResponse resp, err := api.server.ReloadSchemas(ctx, &vtadminpb.ReloadSchemasRequest{ Tablets: []*topodatapb.TabletAlias{alias}, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(resp, err) } diff --git a/go/vt/vtadmin/http/shards.go b/go/vt/vtadmin/http/shards.go index b8b83335f99..56d22742be6 100644 --- a/go/vt/vtadmin/http/shards.go +++ b/go/vt/vtadmin/http/shards.go @@ -70,7 +70,7 @@ func DeleteShards(ctx context.Context, r Request, api *API) *JSONResponse { } shardList := r.URL.Query()["keyspace_shard"] - shardList = sets.List(sets.New[string](shardList...)) + shardList = sets.List(sets.New(shardList...)) shards := make([]*vtctldatapb.Shard, len(shardList)) for i, kss := range shardList { ks, shard, err := topoproto.ParseKeyspaceShard(kss) diff --git a/go/vt/vtadmin/http/srvkeyspaces.go b/go/vt/vtadmin/http/srvkeyspaces.go new file mode 100644 index 00000000000..9b1c1dbdf8c --- /dev/null +++ b/go/vt/vtadmin/http/srvkeyspaces.go @@ -0,0 +1,51 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + + "github.com/gorilla/mux" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" +) + +// GetSrvKeyspaces implements the http wrapper for the /api/srvkeyspaces route. +func GetSrvKeyspaces(ctx context.Context, r Request, api *API) *JSONResponse { + query := r.URL.Query() + + sks, err := api.server.GetSrvKeyspaces(ctx, &vtadminpb.GetSrvKeyspacesRequest{ + Cells: query["cell"], + ClusterIds: query["cluster_id"], + }) + + return NewJSONResponse(sks, err) +} + +// GetSrvKeyspace implements the http wrapper for the /api/srvkeyspaces/{cluster_id}/{name} route. +func GetSrvKeyspace(ctx context.Context, r Request, api *API) *JSONResponse { + query := r.URL.Query() + vars := mux.Vars(r.Request) + + sk, err := api.server.GetSrvKeyspace(ctx, &vtadminpb.GetSrvKeyspaceRequest{ + Cells: query["cell"], + Keyspace: vars["name"], + ClusterId: vars["cluster_id"], + }) + + return NewJSONResponse(sk, err) +} diff --git a/go/vt/vtadmin/http/srvvschemas.go b/go/vt/vtadmin/http/srvvschemas.go index 515c692aba6..1d9b13a46de 100644 --- a/go/vt/vtadmin/http/srvvschemas.go +++ b/go/vt/vtadmin/http/srvvschemas.go @@ -40,7 +40,7 @@ func GetSrvVSchemas(ctx context.Context, r Request, api *API) *JSONResponse { svs, err := api.server.GetSrvVSchemas(ctx, &vtadminpb.GetSrvVSchemasRequest{ Cells: query["cell"], - ClusterIds: query["cluster"], + ClusterIds: query["cluster_id"], }) return NewJSONResponse(svs, err) diff --git a/go/vt/vtadmin/http/tablets.go b/go/vt/vtadmin/http/tablets.go index b812fd1aebb..5e819ce945b 100644 --- a/go/vt/vtadmin/http/tablets.go +++ b/go/vt/vtadmin/http/tablets.go @@ -31,7 +31,7 @@ func GetFullStatus(ctx context.Context, r Request, api *API) *JSONResponse { return NewJSONResponse(nil, err) } status, err := api.server.GetFullStatus(ctx, &vtadminpb.GetFullStatusRequest{ - ClusterId: r.URL.Query()["cluster"][0], + ClusterId: r.URL.Query()["cluster_id"][0], Alias: alias, }) @@ -41,7 +41,7 @@ func GetFullStatus(ctx context.Context, r Request, api *API) *JSONResponse { // GetTablets implements the http wrapper for /tablets[?cluster=[&cluster=]]. func GetTablets(ctx context.Context, r Request, api *API) *JSONResponse { tablets, err := api.server.GetTablets(ctx, &vtadminpb.GetTabletsRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(tablets, err) @@ -58,7 +58,7 @@ func GetTablet(ctx context.Context, r Request, api *API) *JSONResponse { tablet, err := api.server.GetTablet(ctx, &vtadminpb.GetTabletRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(tablet, err) @@ -80,7 +80,7 @@ func DeleteTablet(ctx context.Context, r Request, api *API) *JSONResponse { deleted, err := api.server.DeleteTablet(ctx, &vtadminpb.DeleteTabletRequest{ Alias: alias, AllowPrimary: allowPrimary, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(deleted, err) @@ -97,7 +97,7 @@ func PingTablet(ctx context.Context, r Request, api *API) *JSONResponse { ping, err := api.server.PingTablet(ctx, &vtadminpb.PingTabletRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(ping, err) @@ -114,7 +114,7 @@ func RefreshState(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.RefreshState(ctx, &vtadminpb.RefreshStateRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -124,7 +124,7 @@ func RefreshState(ctx context.Context, r Request, api *API) *JSONResponse { // PUT /tablet/{tablet}/refresh_replication_source. // // Query params: -// - cluster: repeatable, list of cluster IDs to restrict to when searching fo +// - cluster_id: repeatable, list of cluster IDs to restrict to when searching fo // a tablet with that alias. // // PUT body is unused; this endpoint takes no additional options. @@ -138,7 +138,7 @@ func RefreshTabletReplicationSource(ctx context.Context, r Request, api *API) *J result, err := api.server.RefreshTabletReplicationSource(ctx, &vtadminpb.RefreshTabletReplicationSourceRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -155,7 +155,7 @@ func RunHealthCheck(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.RunHealthCheck(ctx, &vtadminpb.RunHealthCheckRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -172,7 +172,7 @@ func SetReadOnly(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.SetReadOnly(ctx, &vtadminpb.SetReadOnlyRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -189,7 +189,7 @@ func SetReadWrite(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.SetReadWrite(ctx, &vtadminpb.SetReadWriteRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -206,7 +206,7 @@ func StartReplication(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.StartReplication(ctx, &vtadminpb.StartReplicationRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -223,7 +223,7 @@ func StopReplication(ctx context.Context, r Request, api *API) *JSONResponse { result, err := api.server.StopReplication(ctx, &vtadminpb.StopReplicationRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) @@ -233,7 +233,7 @@ func StopReplication(ctx context.Context, r Request, api *API) *JSONResponse { // POST /tablet/{tablet}/tablet_externally_promoted. // // Query params: -// - `cluster`: repeated list of clusterIDs to limit the request to. +// - `cluster_id`: repeated list of clusterIDs to limit the request to. // // POST body is unused; this endpoint takes no additional options. func TabletExternallyPromoted(ctx context.Context, r Request, api *API) *JSONResponse { @@ -246,7 +246,7 @@ func TabletExternallyPromoted(ctx context.Context, r Request, api *API) *JSONRes result, err := api.server.TabletExternallyPromoted(ctx, &vtadminpb.TabletExternallyPromotedRequest{ Alias: alias, - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(result, err) } diff --git a/go/vt/vtadmin/http/vschemas.go b/go/vt/vtadmin/http/vschemas.go index 09d706527c1..b9494df696b 100644 --- a/go/vt/vtadmin/http/vschemas.go +++ b/go/vt/vtadmin/http/vschemas.go @@ -36,10 +36,10 @@ func GetVSchema(ctx context.Context, r Request, api *API) *JSONResponse { } // GetVSchemas implements the http wrapper for the -// /vschemas[?cluster=[&cluster=]] route. +// /vschemas[?cluster_id=[&cluster_id=]] route. func GetVSchemas(ctx context.Context, r Request, api *API) *JSONResponse { vschemas, err := api.server.GetVSchemas(ctx, &vtadminpb.GetVSchemasRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(vschemas, err) diff --git a/go/vt/vtadmin/http/vtctlds.go b/go/vt/vtadmin/http/vtctlds.go index 954c73beae5..b27c5d06520 100644 --- a/go/vt/vtadmin/http/vtctlds.go +++ b/go/vt/vtadmin/http/vtctlds.go @@ -22,10 +22,10 @@ import ( vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) -// GetVtctlds implements the http wrapper for /vtctlds[?cluster=[&cluster=]]. +// GetVtctlds implements the http wrapper for /vtctlds[?cluster_id=[&cluster_id=]]. func GetVtctlds(ctx context.Context, r Request, api *API) *JSONResponse { vtctlds, err := api.server.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{ - ClusterIds: r.URL.Query()["cluster"], + ClusterIds: r.URL.Query()["cluster_id"], }) return NewJSONResponse(vtctlds, err) diff --git a/go/vt/vtadmin/http/vtexplain.go b/go/vt/vtadmin/http/vtexplain.go index 53a70f3b899..b080c004b93 100644 --- a/go/vt/vtadmin/http/vtexplain.go +++ b/go/vt/vtadmin/http/vtexplain.go @@ -22,11 +22,11 @@ import ( vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" ) -// VTExplain implements the http wrapper for /vtexplain?cluster=&keyspace=&sql= +// VTExplain implements the http wrapper for /vtexplain?cluster_id=&keyspace=&sql= func VTExplain(ctx context.Context, r Request, api *API) *JSONResponse { query := r.URL.Query() res, err := api.server.VTExplain(ctx, &vtadminpb.VTExplainRequest{ - Cluster: query.Get("cluster"), + Cluster: query.Get("cluster_id"), Keyspace: query.Get("keyspace"), Sql: query.Get("sql"), }) diff --git a/go/vt/vtadmin/http/workflows.go b/go/vt/vtadmin/http/workflows.go index 2a6fed4766e..80c6dff775b 100644 --- a/go/vt/vtadmin/http/workflows.go +++ b/go/vt/vtadmin/http/workflows.go @@ -48,7 +48,7 @@ func GetWorkflow(ctx context.Context, r Request, api *API) *JSONResponse { // method. // // Its route is /workflows, with query params: -// - cluster: repeated, cluster IDs +// - cluster_id: repeated, cluster IDs // - active_only // - keyspace: repeated // - ignore_keyspace: repeated @@ -61,7 +61,7 @@ func GetWorkflows(ctx context.Context, r Request, api *API) *JSONResponse { } workflows, err := api.server.GetWorkflows(ctx, &vtadminpb.GetWorkflowsRequest{ - ClusterIds: query["cluster"], + ClusterIds: query["cluster_id"], Keyspaces: query["keyspace"], IgnoreKeyspaces: query["ignore_keyspace"], ActiveOnly: activeOnly, diff --git a/go/vt/vtadmin/rbac/rbac.go b/go/vt/vtadmin/rbac/rbac.go index 7b5b0e8c8e8..12d23b3ac20 100644 --- a/go/vt/vtadmin/rbac/rbac.go +++ b/go/vt/vtadmin/rbac/rbac.go @@ -105,8 +105,12 @@ const ( /* vschema resources */ - SrvVSchemaResource Resource = "SrvVSchema" - VSchemaResource Resource = "VSchema" + VSchemaResource Resource = "VSchema" + + /* serving graph resources */ + + SrvKeyspaceResource Resource = "SrvKeyspace" + SrvVSchemaResource Resource = "SrvVSchema" /* misc resources */ diff --git a/go/vt/vtadmin/testutil/authztestgen/config.json b/go/vt/vtadmin/testutil/authztestgen/config.json index ac89d7f5557..01b0da66465 100644 --- a/go/vt/vtadmin/testutil/authztestgen/config.json +++ b/go/vt/vtadmin/testutil/authztestgen/config.json @@ -1736,6 +1736,7 @@ } ], "request": "&vtadminpb.VTExplainRequest{\nCluster: \"test\",\nKeyspace: \"test\",\nSql: \"select id from t1;\",}", + "serialize_cases": true, "cases": [ { "name": "unauthorized actor", diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go index 2f825093e1f..9141d6b0c22 100644 --- a/go/vt/vtadmin/testutil/cluster.go +++ b/go/vt/vtadmin/testutil/cluster.go @@ -164,10 +164,10 @@ type IntegrationTestCluster struct { // // (TODO|@ajm188): Unify this with the BuildCluster API. Also this does not // support any cluster methods that involve vtgate/vitessdriver queries. -func BuildIntegrationTestCluster(t testing.TB, c *vtadminpb.Cluster, cells ...string) *IntegrationTestCluster { +func BuildIntegrationTestCluster(t testing.TB, ctx context.Context, c *vtadminpb.Cluster, cells ...string) *IntegrationTestCluster { t.Helper() - ts, factory := memorytopo.NewServerAndFactory(cells...) + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := grpcvtctldtestutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) diff --git a/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go b/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go index cf0d7b3f72b..e2701a1f594 100644 --- a/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go +++ b/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go @@ -139,6 +139,10 @@ type VtctldClient struct { Response *vtctldatapb.ValidateVersionKeyspaceResponse Error error } + WorkflowUpdateResults map[string]struct { + Response *vtctldatapb.WorkflowUpdateResponse + Error error + } } // Compile-time type assertion to make sure we haven't overriden a method @@ -671,3 +675,16 @@ func (fake *VtctldClient) ValidateVersionKeyspace(ctx context.Context, req *vtct return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) } + +// WorkflowUpdate is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowUpdateResponse, error) { + if fake.WorkflowUpdateResults == nil { + return nil, fmt.Errorf("%w: WorkflowUpdateResults not set on fake vtctldclient", assert.AnError) + } + + if result, ok := fake.WorkflowUpdateResults[req.Keyspace]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for keyspace %s", assert.AnError, req.Keyspace) +} diff --git a/go/vt/vtadmin/vtsql/config.go b/go/vt/vtadmin/vtsql/config.go index bf3eee89fab..52b162236fd 100644 --- a/go/vt/vtadmin/vtsql/config.go +++ b/go/vt/vtadmin/vtsql/config.go @@ -97,13 +97,20 @@ func (c *Config) Parse(args []string) error { "a Username and Password. Templates are given the context of the vtsql.Config, and primarily "+ "interoplate the cluster name and ID variables.") effectiveUser := fs.String("effective-user", "", "username to send queries on behalf of") - + credentialsUsername := fs.String("credentials-username", "", + "A string specifying the Username to use for authenticating with vtgate. "+ + "Used with credentials-password in place of credentials-path-tmpl, in cases where providing a static file cannot be done.") + credentialsPassword := fs.String("credentials-password", "", + "A string specifying a Password to use for authenticating with vtgate. "+ + "Used with credentials-username in place of credentials-path-tmpl, in cases where providing a static file cannot be done.") if err := fs.Parse(args); err != nil { return err } - var creds *grpcclient.StaticAuthClientCreds + var username, password string + // First load credentials from credentials-path-tmpl, if provided + var tmplStrCreds *grpcclient.StaticAuthClientCreds if *credentialsTmplStr != "" { _creds, path, err := credentials.LoadFromTemplate(*credentialsTmplStr, c) if err != nil { @@ -111,20 +118,34 @@ func (c *Config) Parse(args []string) error { } c.CredentialsPath = path - creds = _creds + tmplStrCreds = _creds + } + if tmplStrCreds != nil { + username = tmplStrCreds.Username + password = tmplStrCreds.Password } - if creds != nil { - // If we did not receive an effective user, but loaded credentials, then the - // immediate user is the effective user. - if *effectiveUser == "" { - *effectiveUser = creds.Username - } + // If credentials-username and credentials-password are provided, use those credentials instead + if *credentialsUsername != "" { + username = *credentialsUsername + } + if *credentialsPassword != "" { + password = *credentialsPassword + } - c.Credentials = &StaticAuthCredentials{ - EffectiveUser: *effectiveUser, - StaticAuthClientCreds: creds, - } + // If we did not receive an effective user, but loaded user credentials, then the + // immediate user is the effective user. + if *effectiveUser == "" { + *effectiveUser = username + } + + // Set credentials to values potentially supplied by credentials-password and credentials-username + c.Credentials = &StaticAuthCredentials{ + EffectiveUser: *effectiveUser, + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: username, + Password: password, + }, } return nil diff --git a/go/vt/vtadmin/vtsql/config_test.go b/go/vt/vtadmin/vtsql/config_test.go index 2fe2cea5d22..f23874c5039 100644 --- a/go/vt/vtadmin/vtsql/config_test.go +++ b/go/vt/vtadmin/vtsql/config_test.go @@ -98,6 +98,97 @@ func TestConfigParse(t *testing.T) { assert.Equal(t, expectedCreds, cfg.Credentials) }) + t.Run("uses vtsql-credentials-password", func(t *testing.T) { + t.Parallel() + + f, err := os.CreateTemp("", "vtsql-config-test-testcluster-*") // testcluster is going to appear in the template + require.NoError(t, err) + + _, err = f.Write([]byte(`{ + "Username": "vtadmin", + "Password": "hunter2" +}`)) + require.NoError(t, err) + + path := f.Name() + defer os.Remove(path) + f.Close() + + dir := filepath.Dir(path) + baseParts := strings.Split(filepath.Base(path), "-") + tmplParts := append(baseParts[:3], "{{ .Cluster.Name }}", baseParts[4]) + + cfg := &Config{ + Cluster: &vtadminpb.Cluster{ + Name: "testcluster", + }, + } + + credsTmplStr := filepath.Join(dir, strings.Join(tmplParts, "-")) + + args := []string{ + "--discovery-tags=a:1,b:2", + "--effective-user=vt_appdebug", + "--discovery-tags=c:3", + "--credentials-password=my_password", + fmt.Sprintf("--credentials-path-tmpl=%s", credsTmplStr), + } + + expectedCreds := &StaticAuthCredentials{ + EffectiveUser: "vt_appdebug", + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: "vtadmin", + Password: "my_password", + }, + } + expectedTags := []string{ + "a:1", + "b:2", + "c:3", + } + + err = cfg.Parse(args) + assert.NoError(t, err) + assert.Equal(t, expectedTags, cfg.ResolverOptions.DiscoveryTags) + assert.Equal(t, expectedCreds, cfg.Credentials) + }) + + t.Run("it uses vtsql credentials passed as flags", func(t *testing.T) { + t.Parallel() + + cfg := &Config{ + Cluster: &vtadminpb.Cluster{ + Name: "testcluster", + }, + } + + args := []string{ + "--discovery-tags=a:1,b:2", + "--effective-user=vt_appdebug", + "--discovery-tags=c:3", + "--credentials-username=vtadmin", + "--credentials-password=my_password", + } + + expectedCreds := &StaticAuthCredentials{ + EffectiveUser: "vt_appdebug", + StaticAuthClientCreds: &grpcclient.StaticAuthClientCreds{ + Username: "vtadmin", + Password: "my_password", + }, + } + expectedTags := []string{ + "a:1", + "b:2", + "c:3", + } + + err = cfg.Parse(args) + assert.NoError(t, err) + assert.Equal(t, expectedTags, cfg.ResolverOptions.DiscoveryTags) + assert.Equal(t, expectedCreds, cfg.Credentials) + }) + t.Run("", func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 1a666d3b855..77b7f267a30 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -89,7 +89,7 @@ func CreateTablet( } log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) - controller := tabletserver.NewServer(topoproto.TabletAliasString(alias), ts, alias) + controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias) initTabletType := tabletType if tabletType == topodatapb.TabletType_PRIMARY { initTabletType = topodatapb.TabletType_REPLICA @@ -394,6 +394,10 @@ func CreateKs( return 0, fmt.Errorf("cannot load vschema file %v for keyspace %v: %v", f, keyspace, err) } + _, err = vindexes.BuildKeyspace(formal) + if err != nil { + return 0, fmt.Errorf("BuildKeyspace(%v) failed: %v", keyspace, err) + } if err := ts.SaveVSchema(ctx, keyspace, formal); err != nil { return 0, fmt.Errorf("SaveVSchema(%v) failed: %v", keyspace, err) } @@ -703,6 +707,16 @@ func (itc *internalTabletConn) VStreamRows( return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) } +// VStreamTables is part of the QueryService interface. +func (itc *internalTabletConn) VStreamTables( + ctx context.Context, + request *binlogdatapb.VStreamTablesRequest, + send func(*binlogdatapb.VStreamTablesResponse) error, +) error { + err := itc.tablet.qsc.QueryService().VStreamTables(ctx, request, send) + return tabletconn.ErrorFromGRPC(vterrors.ToGRPC(err)) +} + // VStreamResults is part of the QueryService interface. func (itc *internalTabletConn) VStreamResults( ctx context.Context, @@ -861,6 +875,22 @@ func (itmc *internalTabletManagerClient) WaitForPosition(context.Context, *topod return fmt.Errorf("not implemented in vtcombo") } +// +// VReplication related methods +// + +func (itmc *internalTabletManagerClient) CreateVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) DeleteVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) ReadVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) VReplicationExec(context.Context, *topodatapb.Tablet, string) (*querypb.QueryResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } @@ -869,6 +899,10 @@ func (itmc *internalTabletManagerClient) VReplicationWaitForPos(context.Context, return fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) UpdateVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) ResetReplication(context.Context, *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } @@ -909,6 +943,10 @@ func (itmc *internalTabletManagerClient) RestoreFromBackup(context.Context, *top return nil, fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) CheckThrottler(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) Close() { } @@ -955,3 +993,6 @@ func (itmc *internalTabletManagerClient) ResetReplicationParameters(context.Cont func (itmc *internalTabletManagerClient) ReplicaWasRestarted(context.Context, *topodatapb.Tablet, *topodatapb.TabletAlias) error { return fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + return fmt.Errorf("not implemented in vtcombo") +} diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go index 8087580125c..c2f90ec4b14 100644 --- a/go/vt/vtctl/backup.go +++ b/go/vt/vtctl/backup.go @@ -73,6 +73,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Allows backups to be taken on primary. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { return err @@ -91,6 +92,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F Concurrency: uint64(*concurrency), AllowPrimary: *allowPrimary, IncrementalFromPos: *incrementalFromPos, + UpgradeSafe: *upgradeSafe, }, &backupEventStreamLogger{logger: wr.Logger(), ctx: ctx}) } @@ -112,6 +114,8 @@ func (b *backupEventStreamLogger) Send(resp *vtctldatapb.BackupResponse) error { func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Whether to use primary tablet for backup. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") + incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { return err @@ -126,10 +130,12 @@ func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf } return wr.VtctldServer().BackupShard(&vtctldatapb.BackupShardRequest{ - Keyspace: keyspace, - Shard: shard, - Concurrency: uint64(*concurrency), - AllowPrimary: *allowPrimary, + Keyspace: keyspace, + Shard: shard, + Concurrency: uint64(*concurrency), + AllowPrimary: *allowPrimary, + IncrementalFromPos: *incrementalFromPos, + UpgradeSafe: *upgradeSafe, }, &backupEventStreamLogger{logger: wr.Logger(), ctx: ctx}) } @@ -202,6 +208,7 @@ func (b *backupRestoreEventStreamLogger) Send(resp *vtctldatapb.RestoreFromBacku func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { backupTimestampStr := subFlags.String("backup_timestamp", "", "Use the backup taken at or before this timestamp rather than using the latest backup.") restoreToPos := subFlags.String("restore_to_pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups") + restoreToTimestampStr := subFlags.String("restore_to_timestamp", "", "Run a point in time recovery that restores up to, and excluding, given timestamp in RFC3339 format (`2006-01-02T15:04:05Z07:00`). This will attempt to use one full backup followed by zero or more incremental backups") dryRun := subFlags.Bool("dry_run", false, "Only validate restore steps, do not actually restore data") if err := subFlags.Parse(args); err != nil { return err @@ -227,10 +234,18 @@ func commandRestoreFromBackup(ctx context.Context, wr *wrangler.Wrangler, subFla return err } + var restoreToTimestamp time.Time + if *restoreToTimestampStr != "" { + restoreToTimestamp, err = mysqlctl.ParseRFC3339(*restoreToTimestampStr) + if err != nil { + return vterrors.Wrapf(err, "parsing --restore_to_timestamp args") + } + } req := &vtctldatapb.RestoreFromBackupRequest{ - TabletAlias: tabletAlias, - RestoreToPos: *restoreToPos, - DryRun: *dryRun, + TabletAlias: tabletAlias, + RestoreToPos: *restoreToPos, + RestoreToTimestamp: protoutil.TimeToProto(restoreToTimestamp), + DryRun: *dryRun, } if !backupTime.IsZero() { diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index fe25080c7ae..2373fb6e3a5 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -26,9 +26,10 @@ import ( ) func TestGetSchema(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - topo := memorytopo.NewServer("zone1", "zone2", "zone3") + topo := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go index 0b982bc7545..fe795af752d 100644 --- a/go/vt/vtctl/endtoend/onlineddl_show_test.go +++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go @@ -94,9 +94,11 @@ func TestShowOnlineDDL_Cancel(t *testing.T) { func onlineDDLTest(t *testing.T, args []string, expectedQuery string) { t.Helper() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - fakeTopo := memorytopo.NewServer("zone1", "zone2", "zone3") + fakeTopo := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") + defer fakeTopo.Close() tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go index a11db6a4952..14147316508 100644 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go +++ b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" logutilpb "vitess.io/vitess/go/vt/proto/logutil" @@ -111,7 +112,7 @@ type streamResultAdapter struct { func (s *streamResultAdapter) Recv() (*logutilpb.Event, error) { if s.index < len(s.lines) { result := &logutilpb.Event{ - Time: logutil.TimeToProto(time.Now()), + Time: protoutil.TimeToProto(time.Now()), Level: logutilpb.Level_CONSOLE, File: "fakevtctlclient", Line: -1, diff --git a/go/vt/vtctl/grpcvtctlclient/client_test.go b/go/vt/vtctl/grpcvtctlclient/client_test.go index a50a79ecdce..50e1968533e 100644 --- a/go/vt/vtctl/grpcvtctlclient/client_test.go +++ b/go/vt/vtctl/grpcvtctlclient/client_test.go @@ -17,6 +17,7 @@ limitations under the License. package grpcvtctlclient import ( + "context" "fmt" "io" "net" @@ -38,7 +39,9 @@ import ( // the test here creates a fake server implementation, a fake client // implementation, and runs the test suite against the setup. func TestVtctlServer(t *testing.T) { - ts := vtctlclienttest.CreateTopoServer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := vtctlclienttest.CreateTopoServer(t, ctx) // Listen on a random port listener, err := net.Listen("tcp", "127.0.0.1:0") @@ -65,7 +68,9 @@ func TestVtctlServer(t *testing.T) { // the test here creates a fake server implementation, a fake client with auth // implementation, and runs the test suite against the setup. func TestVtctlAuthClient(t *testing.T) { - ts := vtctlclienttest.CreateTopoServer(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := vtctlclienttest.CreateTopoServer(t, ctx) // Listen on a random port listener, err := net.Listen("tcp", "127.0.0.1:0") diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go index 6267d650c7a..54f27e0a142 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_gen.go +++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go @@ -101,6 +101,15 @@ func (client *gRPCVtctldClient) BackupShard(ctx context.Context, in *vtctldatapb return client.c.BackupShard(ctx, in, opts...) } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldatapb.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CancelSchemaMigration(ctx, in, opts...) +} + // ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { if client.c == nil { @@ -110,6 +119,24 @@ func (client *gRPCVtctldClient) ChangeTabletType(ctx context.Context, in *vtctld return client.c.ChangeTabletType(ctx, in, opts...) } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldatapb.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CleanupSchemaMigration(ctx, in, opts...) +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldatapb.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.CompleteSchemaMigration(ctx, in, opts...) +} + // CreateKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) CreateKeyspace(ctx context.Context, in *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { if client.c == nil { @@ -317,6 +344,15 @@ func (client *gRPCVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb.G return client.c.GetSchema(ctx, in, opts...) } +// GetSchemaMigrations is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldatapb.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaMigrationsResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetSchemaMigrations(ctx, in, opts...) +} + // GetShard is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.GetShardRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardResponse, error) { if client.c == nil { @@ -434,6 +470,33 @@ func (client *gRPCVtctldClient) InitShardPrimary(ctx context.Context, in *vtctld return client.c.InitShardPrimary(ctx, in, opts...) } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldatapb.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.LaunchSchemaMigration(ctx, in, opts...) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldatapb.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldatapb.MoveTablesCompleteResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MoveTablesComplete(ctx, in, opts...) +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldatapb.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.MoveTablesCreate(ctx, in, opts...) +} + // PingTablet is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) PingTablet(ctx context.Context, in *vtctldatapb.PingTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.PingTabletResponse, error) { if client.c == nil { @@ -551,6 +614,15 @@ func (client *gRPCVtctldClient) ReparentTablet(ctx context.Context, in *vtctldat return client.c.ReparentTablet(ctx, in, opts...) } +// ReshardCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ReshardCreate(ctx context.Context, in *vtctldatapb.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ReshardCreate(ctx, in, opts...) +} + // RestoreFromBackup is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) RestoreFromBackup(ctx context.Context, in *vtctldatapb.RestoreFromBackupRequest, opts ...grpc.CallOption) (vtctlservicepb.Vtctld_RestoreFromBackupClient, error) { if client.c == nil { @@ -560,6 +632,15 @@ func (client *gRPCVtctldClient) RestoreFromBackup(ctx context.Context, in *vtctl return client.c.RestoreFromBackup(ctx, in, opts...) } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldatapb.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.RetrySchemaMigration(ctx, in, opts...) +} + // RunHealthCheck is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) RunHealthCheck(ctx context.Context, in *vtctldatapb.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldatapb.RunHealthCheckResponse, error) { if client.c == nil { @@ -784,3 +865,39 @@ func (client *gRPCVtctldClient) ValidateVersionShard(ctx context.Context, in *vt return client.c.ValidateVersionShard(ctx, in, opts...) } + +// WorkflowDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowDelete(ctx context.Context, in *vtctldatapb.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowDeleteResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowDelete(ctx, in, opts...) +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowStatus(ctx context.Context, in *vtctldatapb.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowStatus(ctx, in, opts...) +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldatapb.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowSwitchTraffic(ctx, in, opts...) +} + +// WorkflowUpdate is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldatapb.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowUpdateResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.WorkflowUpdate(ctx, in, opts...) +} diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index 1b42f8e5270..93c95ffa607 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -36,8 +36,10 @@ import ( ) func TestFindAllShardsInKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) @@ -80,9 +82,11 @@ func TestFindAllShardsInKeyspace(t *testing.T) { } func TestGetKeyspace(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) @@ -107,9 +111,11 @@ func TestGetKeyspace(t *testing.T) { } func TestGetKeyspaces(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ts := memorytopo.NewServer("cell1") + ts := memorytopo.NewServer(ctx, "cell1") + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return grpcvtctldserver.NewVtctldServer(ts) }) diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index c1e5899aac6..f5f7847b499 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -21,6 +21,7 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/mysqlctl" "github.com/stretchr/testify/assert" @@ -41,11 +42,14 @@ import ( ) func TestInitShardPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx := utils.LeakCheckContext(t) + ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() + defer tmc.Close() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) + defer primaryDb.Close() primaryDb.AddQuery("create database if not exists `vt_test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, primaryDb) @@ -61,13 +65,11 @@ func TestInitShardPrimary(t *testing.T) { tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These come from tablet startup "STOP SLAVE", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", // These come from InitShardPrimary "FAKE RESET ALL REPLICATION", "FAKE SET SLAVE POSITION", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", } @@ -75,12 +77,10 @@ func TestInitShardPrimary(t *testing.T) { tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", "FAKE RESET ALL REPLICATION", "FAKE SET SLAVE POSITION", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", } @@ -105,11 +105,14 @@ func TestInitShardPrimary(t *testing.T) { } func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) + defer primaryDb.Close() primaryDb.AddQuery("create database if not exists `vt_test_keyspace`", &sqltypes.Result{InsertID: 0, RowsAffected: 0}) tablet1 := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, primaryDb) @@ -125,7 +128,6 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", "FAKE SET SLAVE POSITION", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", } @@ -134,7 +136,6 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", "FAKE SET SLAVE POSITION", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", } diff --git a/go/vt/vtctl/grpcvtctldserver/query.go b/go/vt/vtctl/grpcvtctldserver/query.go new file mode 100644 index 00000000000..2fac399bd4f --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/query.go @@ -0,0 +1,243 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcvtctldserver + +import ( + "fmt" + "strings" + "time" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/schematools" + + querypb "vitess.io/vitess/go/vt/proto/query" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +const ( + alterSingleSchemaMigrationSql = `alter vitess_migration %a ` + alterAllSchemaMigrationSql = `alter vitess_migration %s all` + selectSchemaMigrationsSql = `select + * + from _vt.schema_migrations where %s %s %s` + AllMigrationsIndicator = "all" +) + +func alterSchemaMigrationQuery(command, uuid string) (string, error) { + if strings.ToLower(uuid) == AllMigrationsIndicator { + return fmt.Sprintf(alterAllSchemaMigrationSql, command), nil + } + return sqlparser.ParseAndBind(alterSingleSchemaMigrationSql+command, sqltypes.StringBindVariable(uuid)) +} + +func selectSchemaMigrationsQuery(condition, order, skipLimit string) string { + return fmt.Sprintf(selectSchemaMigrationsSql, condition, order, skipLimit) +} + +// rowToSchemaMigration converts a single row into a SchemaMigration protobuf. +func rowToSchemaMigration(row sqltypes.RowNamedValues) (sm *vtctldatapb.SchemaMigration, err error) { + sm = new(vtctldatapb.SchemaMigration) + sm.Uuid = row.AsString("migration_uuid", "") + sm.Keyspace = row.AsString("keyspace", "") + sm.Shard = row.AsString("shard", "") + sm.Schema = row.AsString("mysql_schema", "") + sm.Table = row.AsString("mysql_table", "") + sm.MigrationStatement = row.AsString("migration_statement", "") + + sm.Strategy, err = schematools.ParseSchemaMigrationStrategy(row.AsString("strategy", "")) + if err != nil { + return nil, err + } + + sm.Options = row.AsString("options", "") + + sm.AddedAt, err = valueToVTTime(row.AsString("added_timestamp", "")) + if err != nil { + return nil, err + } + + sm.RequestedAt, err = valueToVTTime(row.AsString("requested_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ReadyAt, err = valueToVTTime(row.AsString("ready_timestamp", "")) + if err != nil { + return nil, err + } + + sm.StartedAt, err = valueToVTTime(row.AsString("started_timestamp", "")) + if err != nil { + return nil, err + } + + sm.LivenessTimestamp, err = valueToVTTime(row.AsString("liveness_timestamp", "")) + if err != nil { + return nil, err + } + + sm.CompletedAt, err = valueToVTTime(row.AsString("completed_timestamp", "")) + if err != nil { + return nil, err + } + + sm.CleanedUpAt, err = valueToVTTime(row.AsString("cleanup_timestamp", "")) + if err != nil { + return nil, err + } + + sm.Status, err = schematools.ParseSchemaMigrationStatus(row.AsString("migration_status", "unknown")) + if err != nil { + return nil, err + } + + sm.LogPath = row.AsString("log_path", "") + sm.Artifacts = row.AsString("artifacts", "") + sm.Retries = row.AsUint64("retries", 0) + + if alias := row.AsString("tablet", ""); alias != "" { + sm.Tablet, err = topoproto.ParseTabletAlias(alias) + if err != nil { + return nil, err + } + } + + sm.TabletFailure = row.AsBool("tablet_failure", false) + sm.Progress = float32(row.AsFloat64("progress", 0)) + sm.MigrationContext = row.AsString("migration_context", "") + sm.DdlAction = row.AsString("ddl_action", "") + sm.Message = row.AsString("message", "") + sm.EtaSeconds = row.AsInt64("eta_seconds", -1) + sm.RowsCopied = row.AsUint64("rows_copied", 0) + sm.TableRows = row.AsInt64("table_rows", 0) + sm.AddedUniqueKeys = uint32(row.AsUint64("added_unique_keys", 0)) + sm.RemovedUniqueKeys = uint32(row.AsUint64("removed_unique_keys", 0)) + sm.LogFile = row.AsString("log_file", "") + + sm.ArtifactRetention, err = valueToVTDuration(row.AsString("retain_artifacts_seconds", ""), "s") + if err != nil { + return nil, err + } + + sm.PostponeCompletion = row.AsBool("postpone_completion", false) + sm.RemovedUniqueKeyNames = row.AsString("removed_unique_key_names", "") + sm.DroppedNoDefaultColumnNames = row.AsString("dropped_no_default_column_names", "") + sm.ExpandedColumnNames = row.AsString("expanded_column_names", "") + sm.RevertibleNotes = row.AsString("revertible_notes", "") + sm.AllowConcurrent = row.AsBool("allow_concurrent", false) + sm.RevertedUuid = row.AsString("reverted_uuid", "") + sm.IsView = row.AsBool("is_view", false) + sm.ReadyToComplete = row.AsBool("ready_to_complete", false) + sm.VitessLivenessIndicator = row.AsInt64("vitess_liveness_indicator", 0) + sm.UserThrottleRatio = float32(row.AsFloat64("user_throttle_ratio", 0)) + sm.SpecialPlan = row.AsString("special_plan", "") + + sm.LastThrottledAt, err = valueToVTTime(row.AsString("last_throttled_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ComponentThrottled = row.AsString("component_throttled", "") + + sm.CancelledAt, err = valueToVTTime(row.AsString("cancelled_at", "")) + if err != nil { + return nil, err + } + + sm.PostponeLaunch = row.AsBool("postpone_launch", false) + sm.Stage = row.AsString("stage", "") + sm.CutoverAttempts = uint32(row.AsUint64("cutover_attempts", 0)) + sm.IsImmediateOperation = row.AsBool("is_immediate_operation", false) + + sm.ReviewedAt, err = valueToVTTime(row.AsString("reviewed_timestamp", "")) + if err != nil { + return nil, err + } + + sm.ReadyToCompleteAt, err = valueToVTTime(row.AsString("ready_to_complete_timestamp", "")) + if err != nil { + return nil, err + } + + return sm, nil +} + +// valueToVTTime converts a SQL timestamp string into a vttime Time type, first +// parsing the raw string value into a Go Time type in the local timezone. This +// is a correct conversion only if the vtctld is set to the same timezone as the +// vttablet that stored the value. +func valueToVTTime(s string) (*vttime.Time, error) { + if s == "" { + return nil, nil + } + + gotime, err := time.ParseInLocation(sqltypes.TimestampFormat, s, time.Local) + if err != nil { + return nil, err + } + + return protoutil.TimeToProto(gotime), nil +} + +// valueToVTDuration converts a SQL string into a vttime Duration type. It takes +// a defaultUnit in the event the value is a bare numeral (e.g. 124 vs 124s). +func valueToVTDuration(s string, defaultUnit string) (*vttime.Duration, error) { + if s == "" { + return nil, nil + } + + switch s[len(s)-1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + s += defaultUnit + } + + godur, err := time.ParseDuration(s) + if err != nil { + return nil, err + } + + return protoutil.DurationToProto(godur), nil +} + +// queryResultForTabletResults aggregates given results into a combined result set +func queryResultForTabletResults(results map[string]*sqltypes.Result) *sqltypes.Result { + var qr = &sqltypes.Result{} + defaultFields := []*querypb.Field{{ + Name: "Tablet", + Type: sqltypes.VarBinary, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), + }} + var row2 []sqltypes.Value + for tabletAlias, result := range results { + if qr.Fields == nil { + qr.Fields = append(qr.Fields, defaultFields...) + qr.Fields = append(qr.Fields, result.Fields...) + } + for _, row := range result.Rows { + row2 = nil + row2 = append(row2, sqltypes.NewVarBinary(tabletAlias)) + row2 = append(row2, row...) + qr.Rows = append(qr.Rows, row2) + } + } + return qr +} diff --git a/go/vt/vtctl/grpcvtctldserver/query_test.go b/go/vt/vtctl/grpcvtctldserver/query_test.go new file mode 100644 index 00000000000..6073d3bc395 --- /dev/null +++ b/go/vt/vtctl/grpcvtctldserver/query_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package grpcvtctldserver + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vtctl/schematools" + + "vitess.io/vitess/go/test/utils" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +var now = time.Now() + +func TestRowToSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + row sqltypes.RowNamedValues + expected *vtctldatapb.SchemaMigration + shouldErr bool + }{ + { + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{ + "migration_uuid": sqltypes.NewVarChar("abc"), + "keyspace": sqltypes.NewVarChar("testks"), + "shard": sqltypes.NewVarChar("shard"), + "mysql_schema": sqltypes.NewVarChar("_vt"), + "mysql_table": sqltypes.NewVarChar("t1"), + "migration_statement": sqltypes.NewVarChar("alter table t1 rename foo to bar"), + "strategy": sqltypes.NewVarChar(schematools.SchemaMigrationStrategyName(vtctldatapb.SchemaMigration_ONLINE)), + "requested_timestamp": sqltypes.NewTimestamp(mysqlTimestamp(now)), + "eta_seconds": sqltypes.NewInt64(10), + }), + expected: &vtctldatapb.SchemaMigration{ + Uuid: "abc", + Keyspace: "testks", + Shard: "shard", + Schema: "_vt", + Table: "t1", + MigrationStatement: "alter table t1 rename foo to bar", + Strategy: vtctldatapb.SchemaMigration_ONLINE, + RequestedAt: protoutil.TimeToProto(now.Truncate(time.Second)), + EtaSeconds: 10, + }, + }, + { + name: "eta_seconds defaults to -1", + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{}), + expected: &vtctldatapb.SchemaMigration{ + Strategy: vtctldatapb.SchemaMigration_DIRECT, + EtaSeconds: -1, + }, + }, + { + name: "bad data", + row: sqltypes.RowNamedValues(map[string]sqltypes.Value{ + "tablet": sqltypes.NewVarChar("not-an-alias"), + }), + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + out, err := rowToSchemaMigration(test.row) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out) + }) + } +} + +func mysqlTimestamp(t time.Time) string { + return t.Local().Format(sqltypes.TimestampFormat) +} + +func TestValueToVTTime(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + value string + expected *vttime.Time + shouldErr bool + }{ + { + value: mysqlTimestamp(now), + expected: protoutil.TimeToProto(now.Truncate(time.Second)), + }, + { + name: "empty string", + value: "", + expected: nil, + }, + { + name: "parse error", + value: "2006/01/02", + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + out, err := valueToVTTime(test.value) + if test.shouldErr { + assert.Error(t, err, "expected parse error") + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out, "failed to convert %s into vttime", test.value) + }) + } +} + +func TestValueToVTDuration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + value string + defaultUnit string + expected *vttime.Duration + shouldErr bool + }{ + { + value: "12s", + expected: protoutil.DurationToProto(12 * time.Second), + }, + { + value: "1h10m", + expected: protoutil.DurationToProto(time.Hour + 10*time.Minute), + }, + { + name: "no unit in value", + value: "120", + defaultUnit: "s", + expected: protoutil.DurationToProto(120 * time.Second), + }, + { + name: "empty", + expected: nil, + }, + { + name: "bad input", + value: "abcd", + shouldErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + out, err := valueToVTDuration(test.value, test.defaultUnit) + if test.shouldErr { + assert.Error(t, err, "expected parse error") + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, out, "failed to convert %s into vttime duration", test.value) + }) + } +} + +func TestAlterSchemaMigrationQuery(t *testing.T) { + uuid := "4e5dcf80_354b_11eb_82cd_f875a4d24e90" + + tcases := []struct { + command string + uuid string + expect string + }{ + { + command: "cleanup", + uuid: uuid, + expect: "alter vitess_migration '4e5dcf80_354b_11eb_82cd_f875a4d24e90' cleanup", + }, + { + command: "cancel", + uuid: uuid, + expect: "alter vitess_migration '4e5dcf80_354b_11eb_82cd_f875a4d24e90' cancel", + }, + { + command: "cancel", + uuid: "all", + expect: "alter vitess_migration cancel all", + }, + { + command: "cancel", + uuid: "ALL", + expect: "alter vitess_migration cancel all", + }, + } + for _, tcase := range tcases { + testName := fmt.Sprintf("%s %s", tcase.command, tcase.uuid) + t.Run(testName, func(t *testing.T) { + query, err := alterSchemaMigrationQuery(tcase.command, tcase.uuid) + assert.NoError(t, err) + assert.Equal(t, tcase.expect, query) + }) + } +} diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index a9bbb4760f5..18c4e1567c0 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -31,13 +31,13 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/grpc" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/event" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/concurrency" @@ -60,6 +60,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tmclient" logutilpb "vitess.io/vitess/go/vt/proto/logutil" @@ -71,11 +72,14 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" - "vitess.io/vitess/go/vt/proto/vtrpc" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( initShardPrimaryOperation = "InitShardPrimary" + + // DefaultWaitReplicasTimeout is the default value for waitReplicasTimeout, which is used when calling method ApplySchema. + DefaultWaitReplicasTimeout = 10 * time.Second ) // VtctldServer implements the Vtctld RPC service protocol. @@ -121,7 +125,7 @@ func (s *VtctldServer) AddCellInfo(ctx context.Context, req *vtctldatapb.AddCell defer panicHandler(&err) if req.CellInfo.Root == "" { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "CellInfo.Root must be non-empty") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "CellInfo.Root must be non-empty") return nil, err } @@ -225,7 +229,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc span.Annotate("ddl_strategy", req.DdlStrategy) if len(req.Sql) == 0 { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Sql must be a non-empty array") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Sql must be a non-empty array") return nil, err } @@ -263,10 +267,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc logstream = append(logstream, e) }) - executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout) - if req.AllowLongUnavailability { - executor.AllowBigSchemaChange() - } + executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize) if err = executor.SetDDLStrategy(req.DdlStrategy); err != nil { err = vterrors.Wrapf(err, "invalid DdlStrategy: %s", req.DdlStrategy) @@ -287,12 +288,19 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc ) if err != nil { - return &vtctldatapb.ApplySchemaResponse{}, err + return nil, err + } + + resp = &vtctldatapb.ApplySchemaResponse{ + UuidList: execResult.UUIDs, + RowsAffectedByShard: make(map[string]uint64, len(execResult.SuccessShards)), + } + + for _, shard := range execResult.SuccessShards { + resp.RowsAffectedByShard[shard.Shard] = shard.Result.RowsAffected } - return &vtctldatapb.ApplySchemaResponse{ - UuidList: execResult.UUIDs, - }, err + return resp, err } // ApplyVSchema is part of the vtctlservicepb.VtctldServer interface. @@ -318,7 +326,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV } if (req.Sql != "" && req.VSchema != nil) || (req.Sql == "" && req.VSchema == nil) { - err = vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql") + err = vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "must pass exactly one of req.VSchema and req.Sql") return nil, err } @@ -335,7 +343,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV } ddl, ok := stmt.(*sqlparser.AlterVschema) if !ok { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error parsing VSchema DDL statement `%s`", req.Sql) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing VSchema DDL statement `%s`", req.Sql) return nil, err } @@ -359,6 +367,12 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil } + _, err = vindexes.BuildKeyspace(vs) + if err != nil { + err = vterrors.Wrapf(err, "BuildKeyspace(%s)", req.Keyspace) + return nil, err + } + if err = s.ts.SaveVSchema(ctx, req.Keyspace, vs); err != nil { err = vterrors.Wrapf(err, "SaveVSchema(%s, %v)", req.Keyspace, req.VSchema) return nil, err @@ -413,6 +427,7 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v span.Annotate("shard", req.Shard) span.Annotate("allow_primary", req.AllowPrimary) span.Annotate("concurrency", req.Concurrency) + span.Annotate("incremental_from_pos", req.IncrementalFromPos) tablets, stats, err := reparentutil.ShardReplicationStatuses(ctx, s.ts, s.tmc, req.Keyspace, req.Shard) if err != nil { @@ -449,13 +464,13 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v } if backupTablet == nil { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no tablet available for backup") + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tablet available for backup") return err } span.Annotate("tablet_alias", topoproto.TabletAliasString(backupTablet.Alias)) - r := &vtctldatapb.BackupRequest{Concurrency: req.Concurrency, AllowPrimary: req.AllowPrimary} + r := &vtctldatapb.BackupRequest{Concurrency: req.Concurrency, AllowPrimary: req.AllowPrimary, UpgradeSafe: req.UpgradeSafe, IncrementalFromPos: req.IncrementalFromPos} err = s.backupTablet(ctx, backupTablet, r, stream) return err } @@ -467,6 +482,7 @@ func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tabl Concurrency: int64(req.Concurrency), AllowPrimary: req.AllowPrimary, IncrementalFromPos: req.IncrementalFromPos, + UpgradeSafe: req.UpgradeSafe, } logStream, err := s.tmc.Backup(ctx, tablet, r) if err != nil { @@ -489,26 +505,44 @@ func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tabl logger.Errorf("failed to send stream response %+v: %v", resp, err) } case io.EOF: - // Do not do anything for primary tablets and when active reparenting is disabled - if mysqlctl.DisableActiveReparents || tablet.Type == topodatapb.TabletType_PRIMARY { - return nil - } - - // Otherwise we find the correct primary tablet and set the replication source, - // since the primary could have changed while we executed the backup which can - // also affect whether we want to send semi sync acks or not. - tabletInfo, err := s.ts.GetTablet(ctx, tablet.Alias) - if err != nil { - return err - } - - return reparentutil.SetReplicationSource(ctx, s.ts, s.tmc, tabletInfo.Tablet) + return nil default: return err } } } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CancelSchemaMigration(ctx context.Context, req *vtctldatapb.CancelSchemaMigrationRequest) (resp *vtctldatapb.CancelSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CancelSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("cancel", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to cancel migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CancelSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // ChangeTabletType is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.ChangeTabletTypeRequest) (resp *vtctldatapb.ChangeTabletTypeResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.ChangeTabletType") @@ -536,7 +570,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if req.DryRun { - afterTablet := proto.Clone(tablet.Tablet).(*topodatapb.Tablet) + afterTablet := tablet.Tablet.CloneVT() afterTablet.Type = req.DbType return &vtctldatapb.ChangeTabletTypeResponse{ @@ -562,7 +596,7 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -573,18 +607,18 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), req.TabletAlias, tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), req.TabletAlias, tablet.Keyspace, tablet.Shard) return nil, err } // We should clone the tablet and change its type to the expected type before checking the durability rules // Since we want to check the durability rules for the desired state and not before we make that change - expectedTablet := proto.Clone(tablet.Tablet).(*topodatapb.Tablet) + expectedTablet := tablet.Tablet.CloneVT() expectedTablet.Type = req.DbType err = s.tmc.ChangeType(ctx, tablet.Tablet, req.DbType, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, expectedTablet)) if err != nil { @@ -607,6 +641,68 @@ func (s *VtctldServer) ChangeTabletType(ctx context.Context, req *vtctldatapb.Ch }, nil } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CleanupSchemaMigration(ctx context.Context, req *vtctldatapb.CleanupSchemaMigrationRequest) (resp *vtctldatapb.CleanupSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CleanupSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("cleanup", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to cleanup migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CleanupSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) CompleteSchemaMigration(ctx context.Context, req *vtctldatapb.CompleteSchemaMigrationRequest) (resp *vtctldatapb.CompleteSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.CompleteSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("complete", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to complete migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.CompleteSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // CreateKeyspace is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest) (resp *vtctldatapb.CreateKeyspaceResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.CreateKeyspace") @@ -634,7 +730,7 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea } span.Annotate("base_keyspace", req.BaseKeyspace) - span.Annotate("snapshot_time", req.SnapshotTime) // TODO: get a proper string repr + span.Annotate("snapshot_time", protoutil.TimeFromProto(req.SnapshotTime).String()) default: return nil, fmt.Errorf("unknown keyspace type %v", req.Type) } @@ -645,6 +741,7 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea BaseKeyspace: req.BaseKeyspace, SnapshotTime: req.SnapshotTime, DurabilityPolicy: req.DurabilityPolicy, + SidecarDbName: req.SidecarDbName, } err = s.ts.CreateKeyspace(ctx, req.Name, ki) @@ -857,7 +954,7 @@ func (s *VtctldServer) DeleteKeyspace(ctx context.Context, req *vtctldatapb.Dele if len(shards) > 0 { if !req.Recursive { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "keyspace %v still has %d shards; use Recursive=true or remove them manually", req.Keyspace, len(shards)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "keyspace %v still has %d shards; use Recursive=true or remove them manually", req.Keyspace, len(shards)) return nil, err } @@ -929,7 +1026,7 @@ func (s *VtctldServer) DeleteSrvVSchema(ctx context.Context, req *vtctldatapb.De defer panicHandler(&err) if req.Cell == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell must be non-empty") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cell must be non-empty") return nil, err } @@ -988,6 +1085,7 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat span.Annotate("wait_replicas_timeout_sec", waitReplicasTimeout.Seconds()) span.Annotate("prevent_cross_cell_promotion", req.PreventCrossCellPromotion) + span.Annotate("wait_for_all_tablets", req.WaitForAllTablets) m := sync.RWMutex{} logstream := []*logutilpb.Event{} @@ -1003,8 +1101,9 @@ func (s *VtctldServer) EmergencyReparentShard(ctx context.Context, req *vtctldat req.Shard, reparentutil.EmergencyReparentOptions{ NewPrimaryAlias: req.NewPrimary, - IgnoreReplicas: sets.New[string](ignoreReplicaAliases...), + IgnoreReplicas: sets.New(ignoreReplicaAliases...), WaitReplicasTimeout: waitReplicasTimeout, + WaitAllTablets: req.WaitForAllTablets, PreventCrossCellPromotion: req.PreventCrossCellPromotion, }, ) @@ -1099,14 +1198,14 @@ func (s *VtctldServer) ExecuteHook(ctx context.Context, req *vtctldatapb.Execute span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) if req.TabletHookRequest == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletHookRequest cannot be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "TabletHookRequest cannot be nil") return nil, err } span.Annotate("hook_name", req.TabletHookRequest.Name) if strings.Contains(req.TabletHookRequest.Name, "/") { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "hook name cannot contain a '/'; was %v", req.TabletHookRequest.Name) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "hook name cannot contain a '/'; was %v", req.TabletHookRequest.Name) return nil, err } @@ -1244,7 +1343,7 @@ func (s *VtctldServer) GetCellInfo(ctx context.Context, req *vtctldatapb.GetCell defer panicHandler(&err) if req.Cell == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cell field is required") return nil, err } @@ -1360,7 +1459,7 @@ func (s *VtctldServer) GetPermissions(ctx context.Context, req *vtctldatapb.GetP span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) ti, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - err = vterrors.Errorf(vtrpc.Code_NOT_FOUND, "Failed to get tablet %v: %v", req.TabletAlias, err) + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "Failed to get tablet %v: %v", req.TabletAlias, err) return nil, err } @@ -1457,6 +1556,124 @@ func (s *VtctldServer) GetSchema(ctx context.Context, req *vtctldatapb.GetSchema }, nil } +func (s *VtctldServer) GetSchemaMigrations(ctx context.Context, req *vtctldatapb.GetSchemaMigrationsRequest) (resp *vtctldatapb.GetSchemaMigrationsResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShard") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + + var condition string + switch { + case req.Uuid != "": + span.Annotate("uuid", req.Uuid) + if !schema.IsOnlineDDLUUID(req.Uuid) { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a valid UUID", req.Uuid) + } + + condition, err = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(req.Uuid)) + case req.MigrationContext != "": + span.Annotate("migration_context", req.MigrationContext) + condition, err = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(req.MigrationContext)) + case req.Status != vtctldatapb.SchemaMigration_UNKNOWN: + span.Annotate("migration_status", schematools.SchemaMigrationStatusName(req.Status)) + condition, err = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(schematools.SchemaMigrationStatusName(req.Status))) + case req.Recent != nil: + var d time.Duration + d, _, err = protoutil.DurationFromProto(req.Recent) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing duration: %s", err) + } + + span.Annotate("recent", d.String()) + condition = fmt.Sprintf("requested_timestamp > now() - interval %0.f second", d.Seconds()) + default: + condition = "migration_uuid like '%'" + } + + if err != nil { + return nil, fmt.Errorf("Error generating OnlineDDL query: %+v", err) + } + + order := " order by `id` " + switch req.Order { + case vtctldatapb.QueryOrdering_DESCENDING: + order += "DESC" + default: + order += "ASC" + } + + var skipLimit string + if req.Limit > 0 { + skipLimit = fmt.Sprintf("LIMIT %v,%v", req.Skip, req.Limit) + span.Annotate("skip_limit", skipLimit) + } + + query := selectSchemaMigrationsQuery(condition, order, skipLimit) + + tabletsResp, err := s.GetTablets(ctx, &vtctldatapb.GetTabletsRequest{ + Cells: nil, + Strict: false, + Keyspace: req.Keyspace, + TabletType: topodatapb.TabletType_PRIMARY, + }) + if err != nil { + return nil, err + } + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = map[string]*sqltypes.Result{} + ) + for _, tablet := range tabletsResp.Tablets { + + wg.Add(1) + go func(tablet *topodatapb.Tablet) { + defer wg.Done() + + alias := topoproto.TabletAliasString(tablet.Alias) + fetchResp, err := s.ExecuteFetchAsDBA(ctx, &vtctldatapb.ExecuteFetchAsDBARequest{ + TabletAlias: tablet.Alias, + Query: query, + MaxRows: 10_000, + }) + if err != nil { + rec.RecordError(err) + return + } + + m.Lock() + defer m.Unlock() + + results[alias] = sqltypes.Proto3ToResult(fetchResp.Result) + }(tablet) + } + + wg.Wait() + if rec.HasErrors() { + return nil, rec.Error() + } + + // combine results. This loses sorting if there's more then 1 tablet + combinedResults := queryResultForTabletResults(results) + + resp = new(vtctldatapb.GetSchemaMigrationsResponse) + for _, row := range combinedResults.Named().Rows { + var m *vtctldatapb.SchemaMigration + m, err = rowToSchemaMigration(row) + if err != nil { + return nil, err + } + + resp.Migrations = append(resp.Migrations, m) + } + + return resp, err +} + // GetShard is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRequest) (resp *vtctldatapb.GetShardResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShard") @@ -1581,6 +1798,9 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata if throttlerConfig == nil { throttlerConfig = &topodatapb.ThrottlerConfig{} } + if throttlerConfig.ThrottledApps == nil { + throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) + } if req.CustomQuerySet { // custom query provided throttlerConfig.CustomQuery = req.CustomQuery @@ -1603,6 +1823,9 @@ func (s *VtctldServer) UpdateThrottlerConfig(ctx context.Context, req *vtctldata if req.CheckAsCheckShard { throttlerConfig.CheckAsCheckSelf = false } + if req.ThrottledApp != nil && req.ThrottledApp.Name != "" { + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + } return throttlerConfig } @@ -1664,8 +1887,8 @@ func (s *VtctldServer) GetSrvVSchemas(ctx context.Context, req *vtctldatapb.GetS // Omit any cell names in the request that don't map to existing cells if len(req.Cells) > 0 { - s1 := sets.New[string](allCells...) - s2 := sets.New[string](req.Cells...) + s1 := sets.New(allCells...) + s2 := sets.New(req.Cells...) cells = sets.List(s1.Intersection(s2)) } @@ -1985,12 +2208,12 @@ func (s *VtctldServer) InitShardPrimary(ctx context.Context, req *vtctldatapb.In defer panicHandler(&err) if req.Keyspace == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "keyspace field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyspace field is required") return nil, err } if req.Shard == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "shard field is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "shard field is required") return nil, err } @@ -2089,7 +2312,7 @@ func (s *VtctldServer) InitShardPrimaryLocked( if !ok { return fmt.Errorf("primary-elect tablet %v is not in the shard", topoproto.TabletAliasString(req.PrimaryElectTabletAlias)) } - ev.NewPrimary = proto.Clone(primaryElectTabletInfo.Tablet).(*topodatapb.Tablet) + ev.NewPrimary = primaryElectTabletInfo.Tablet.CloneVT() // Check the primary is the only primary is the shard, or -force was used. _, primaryTabletMap := topotools.SortedTabletMap(tabletMap) @@ -2255,6 +2478,71 @@ func (s *VtctldServer) InitShardPrimaryLocked( return nil } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) LaunchSchemaMigration(ctx context.Context, req *vtctldatapb.LaunchSchemaMigrationRequest) (resp *vtctldatapb.LaunchSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.LaunchSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("launch", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to launch migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.LaunchSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MoveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MoveTablesCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + resp, err = s.ws.MoveTablesCreate(ctx, req) + return resp, err +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) MoveTablesComplete(ctx context.Context, req *vtctldatapb.MoveTablesCompleteRequest) (resp *vtctldatapb.MoveTablesCompleteResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.MoveTablesComplete") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("keep_data", req.KeepData) + span.Annotate("keep_routing_rules", req.KeepRoutingRules) + span.Annotate("dry_run", req.DryRun) + + resp, err = s.ws.MoveTablesComplete(ctx, req) + return resp, err +} + // PingTablet is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) PingTablet(ctx context.Context, req *vtctldatapb.PingTabletRequest) (resp *vtctldatapb.PingTabletResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.PingTablet") @@ -2387,7 +2675,7 @@ func (s *VtctldServer) RefreshState(ctx context.Context, req *vtctldatapb.Refres defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshState requires a tablet alias") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshState requires a tablet alias") return nil, err } @@ -2396,7 +2684,7 @@ func (s *VtctldServer) RefreshState(ctx context.Context, req *vtctldatapb.Refres tablet, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - err = fmt.Errorf("Failed to get tablet %s: %w", topoproto.TabletAliasString(req.TabletAlias), err) + err = fmt.Errorf("failed to get tablet %s: %w", topoproto.TabletAliasString(req.TabletAlias), err) return nil, err } @@ -2415,12 +2703,12 @@ func (s *VtctldServer) RefreshStateByShard(ctx context.Context, req *vtctldatapb defer panicHandler(&err) if req.Keyspace == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a keyspace") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a keyspace") return nil, err } if req.Shard == "" { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a shard") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "RefreshStateByShard requires a shard") return nil, err } @@ -2429,7 +2717,7 @@ func (s *VtctldServer) RefreshStateByShard(ctx context.Context, req *vtctldatapb si, err := s.ts.GetShard(ctx, req.Keyspace, req.Shard) if err != nil { - err = fmt.Errorf("Failed to get shard %s/%s/: %w", req.Keyspace, req.Shard, err) + err = fmt.Errorf("failed to get shard %s/%s/: %w", req.Keyspace, req.Shard, err) return nil, err } @@ -2464,7 +2752,7 @@ func (s *VtctldServer) ReloadSchema(ctx context.Context, req *vtctldatapb.Reload ti, err := s.ts.GetTablet(ctx, req.TabletAlias) if err != nil { - err = vterrors.Errorf(vtrpc.Code_NOT_FOUND, "GetTablet(%v) failed: %v", req.TabletAlias, err) + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "GetTablet(%v) failed: %v", req.TabletAlias, err) return nil, err } @@ -2526,7 +2814,7 @@ func (s *VtctldServer) ReloadSchemaKeyspace(ctx context.Context, req *vtctldatap shards, err := s.ts.GetShardNames(ctx, req.Keyspace) if err != nil { - err = vterrors.Errorf(vtrpc.Code_INTERNAL, "GetShardNames(%v) failed: %v", req.Keyspace, err) + err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetShardNames(%v) failed: %v", req.Keyspace, err) return nil, err } @@ -2651,7 +2939,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa defer panicHandler(&err) if req.Tablet == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "tablet alias must not be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "tablet alias must not be nil") return nil, err } @@ -2668,7 +2956,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -2679,17 +2967,17 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(req.Tablet), tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and potential replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(req.Tablet), tablet.Keyspace, tablet.Shard) return nil, err } if topoproto.TabletAliasEqual(req.Tablet, shardPrimary.Alias) { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot ReparentTablet current shard primary (%v) onto itself", topoproto.TabletAliasString(req.Tablet)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot ReparentTablet current shard primary (%v) onto itself", topoproto.TabletAliasString(req.Tablet)) return nil, err } @@ -2714,6 +3002,24 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa }, nil } +// ReshardCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCreateRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.ReshardCreate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("source_shards", req.SourceShards) + span.Annotate("target_shards", req.TargetShards) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + resp, err = s.ws.ReshardCreate(ctx, req) + return resp, err +} func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupRequest, stream vtctlservicepb.Vtctld_RestoreFromBackupServer) (err error) { span, ctx := trace.NewSpan(stream.Context(), "VtctldServer.RestoreFromBackup") defer span.Finish() @@ -2735,9 +3041,10 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque span.Annotate("shard", ti.Shard) r := &tabletmanagerdatapb.RestoreFromBackupRequest{ - BackupTime: req.BackupTime, - RestoreToPos: req.RestoreToPos, - DryRun: req.DryRun, + BackupTime: req.BackupTime, + RestoreToPos: req.RestoreToPos, + RestoreToTimestamp: req.RestoreToTimestamp, + DryRun: req.DryRun, } logStream, err := s.tmc.RestoreFromBackup(ctx, ti.Tablet, r) if err != nil { @@ -2766,7 +3073,7 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque if mysqlctl.DisableActiveReparents { return nil } - if req.RestoreToPos != "" && !req.DryRun { + if (req.RestoreToPos != "" || !protoutil.TimeFromProto(req.RestoreToTimestamp).UTC().IsZero()) && !req.DryRun { // point in time recovery. Do not restore replication return nil } @@ -2790,6 +3097,37 @@ func (s *VtctldServer) RestoreFromBackup(req *vtctldatapb.RestoreFromBackupReque } } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) RetrySchemaMigration(ctx context.Context, req *vtctldatapb.RetrySchemaMigrationRequest) (resp *vtctldatapb.RetrySchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.RetrySchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("retry", req.Uuid) + if err != nil { + return nil, err + } + + log.Info("Calling ApplySchema to retry migration") + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.RetrySchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // RunHealthCheck is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) RunHealthCheck(ctx context.Context, req *vtctldatapb.RunHealthCheckRequest) (resp *vtctldatapb.RunHealthCheckResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.RunHealthCheck") @@ -2837,7 +3175,7 @@ func (s *VtctldServer) SetKeyspaceDurabilityPolicy(ctx context.Context, req *vtc policyValid := reparentutil.CheckDurabilityPolicyExists(req.DurabilityPolicy) if !policyValid { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "durability policy <%v> is not a valid policy. Please register it as a policy first", req.DurabilityPolicy) return nil, err } @@ -2948,7 +3286,7 @@ func (s *VtctldServer) SetShardTabletControl(ctx context.Context, req *vtctldata defer unlock(&err) si, err := s.ts.UpdateShardFields(ctx, req.Keyspace, req.Shard, func(si *topo.ShardInfo) error { - return si.UpdateSourceDeniedTables(ctx, req.TabletType, req.Cells, req.Remove, req.DeniedTables) + return si.UpdateDeniedTables(ctx, req.TabletType, req.Cells, req.Remove, req.DeniedTables) }) switch { @@ -2984,7 +3322,7 @@ func (s *VtctldServer) SetWritable(ctx context.Context, req *vtctldatapb.SetWrit defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "SetWritable.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "SetWritable.TabletAlias is required") return nil, err } @@ -3353,7 +3691,7 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "StartReplication.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "StartReplication.TabletAlias is required") return nil, err } @@ -3372,7 +3710,7 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St } if !shard.HasPrimary() { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) return nil, err } @@ -3383,12 +3721,12 @@ func (s *VtctldServer) StartReplication(ctx context.Context, req *vtctldatapb.St } if shardPrimary.Type != topodatapb.TabletType_PRIMARY { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "TopologyServer has incosistent state for shard primary %v", topoproto.TabletAliasString(shard.PrimaryAlias)) return nil, err } if shardPrimary.Keyspace != tablet.Keyspace || shardPrimary.Shard != tablet.Shard { - err = vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary %v and replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(tablet.Alias), tablet.Keyspace, tablet.Shard) + err = vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "primary %v and replica %v not in same keypace shard (%v/%v)", topoproto.TabletAliasString(shard.PrimaryAlias), topoproto.TabletAliasString(tablet.Alias), tablet.Keyspace, tablet.Shard) return nil, err } @@ -3418,7 +3756,7 @@ func (s *VtctldServer) StopReplication(ctx context.Context, req *vtctldatapb.Sto defer panicHandler(&err) if req.TabletAlias == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "StopReplication.TabletAlias is required") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "StopReplication.TabletAlias is required") return nil, err } @@ -3447,7 +3785,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct defer panicHandler(&err) if req.Tablet == nil { - err = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil") + err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "TabletExternallyReparentedRequest.Tablet must not be nil") return nil, err } @@ -3481,7 +3819,7 @@ func (s *VtctldServer) TabletExternallyReparented(ctx context.Context, req *vtct log.Infof("TabletExternallyReparented: executing tablet type change %v -> PRIMARY on %v", tablet.Type, topoproto.TabletAliasString(req.Tablet)) ev := &events.Reparent{ ShardInfo: *shard, - NewPrimary: proto.Clone(tablet.Tablet).(*topodatapb.Tablet), + NewPrimary: tablet.Tablet.CloneVT(), OldPrimary: &topodatapb.Tablet{ Alias: shard.PrimaryAlias, Type: topodatapb.TabletType_PRIMARY, @@ -3533,9 +3871,7 @@ func (s *VtctldServer) UpdateCellInfo(ctx context.Context, req *vtctldatapb.Upda var updatedCi *topodatapb.CellInfo err = s.ts.UpdateCellInfoFields(ctx, req.Name, func(ci *topodatapb.CellInfo) error { - defer func() { - updatedCi = proto.Clone(ci).(*topodatapb.CellInfo) - }() + defer func() { updatedCi = ci.CloneVT() }() changed := false @@ -3581,9 +3917,7 @@ func (s *VtctldServer) UpdateCellsAlias(ctx context.Context, req *vtctldatapb.Up var updatedCa *topodatapb.CellsAlias err = s.ts.UpdateCellsAlias(ctx, req.Name, func(ca *topodatapb.CellsAlias) error { - defer func() { - updatedCa = proto.Clone(ca).(*topodatapb.CellsAlias) - }() + defer func() { updatedCa = ca.CloneVT() }() ca.Cells = req.CellsAlias.Cells return nil @@ -4254,7 +4588,9 @@ func (s *VtctldServer) ValidateVersionShard(ctx context.Context, req *vtctldatap } wg.Add(1) - go s.diffVersion(ctx, primaryVersion.Version, shard.PrimaryAlias, alias, &wg, &er) + go func(alias *topodatapb.TabletAlias) { + s.diffVersion(ctx, primaryVersion.Version, shard.PrimaryAlias, alias, &wg, &er) + }(alias) } wg.Wait() @@ -4352,6 +4688,68 @@ func (s *VtctldServer) ValidateVSchema(ctx context.Context, req *vtctldatapb.Val return resp, err } +// WorkflowDelete is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDeleteRequest) (resp *vtctldatapb.WorkflowDeleteResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowDelete") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + resp, err = s.ws.WorkflowDelete(ctx, req) + return resp, err +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowStatusRequest) (resp *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowStatus") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + resp, err = s.ws.WorkflowStatus(ctx, req) + return resp, err +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest) (resp *vtctldatapb.WorkflowSwitchTrafficResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowSwitchTraffic") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("tablet-types", req.TabletTypes) + span.Annotate("direction", req.Direction) + span.Annotate("enable-reverse-replication", req.EnableReverseReplication) + + resp, err = s.ws.WorkflowSwitchTraffic(ctx, req) + return resp, err +} + +// WorkflowUpdate is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest) (resp *vtctldatapb.WorkflowUpdateResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.WorkflowUpdate") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.TabletRequest.Workflow) + span.Annotate("cells", req.TabletRequest.Cells) + span.Annotate("tablet_types", req.TabletRequest.TabletTypes) + span.Annotate("on_ddl", req.TabletRequest.OnDdl) + + resp, err = s.ws.WorkflowUpdate(ctx, req) + return resp, err +} + // StartServer registers a VtctldServer for RPCs on the given gRPC server. func StartServer(s *grpc.Server, ts *topo.Server) { vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts)) @@ -4362,7 +4760,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v // extract cell and relative path parts := strings.Split(cellPath, "/") if parts[0] != "" || len(parts) < 2 { - err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath) + err := vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid path: %s", cellPath) return nil, err } cell := parts[1] @@ -4371,7 +4769,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v conn, err := s.ts.ConnForCell(ctx, cell) if err != nil { - err := vterrors.Errorf(vtrpc.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err) + err := vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "error fetching connection to cell %s: %v", cell, err) return nil, err } @@ -4380,7 +4778,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v if dataErr == nil { result, err := topo.DecodeContent(relativePath, data, false) if err != nil { - err := vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err) + err := vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err) return nil, err } topoCell.Data = result @@ -4392,7 +4790,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v children, childrenErr := conn.ListDir(ctx, relativePath, false /*full*/) if childrenErr != nil && dataErr != nil { - err := vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err) + err := vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err) return nil, err } diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 858ac271a70..3100855e370 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/topo" @@ -43,7 +45,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -62,7 +63,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { // concurrently, so the total time is only around 30 seconds, but // that's still a long time for a unit test! name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -140,7 +140,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -180,7 +180,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, { name: "nil WaitReplicasTimeout and request takes 31 seconds is error", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -258,7 +257,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -290,8 +289,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt @@ -302,13 +299,17 @@ func TestEmergencyReparentShardSlow(t *testing.T) { t.Skip("tt.EmergencyReparentShardRequest = nil implies test not ready to run") } - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -343,7 +344,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -357,7 +357,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { // nil WaitReplicasTimeout in the request results in a default 30 // second WaitReplicasTimeout. name: "nil WaitReplicasTimeout and request takes 29 seconds is ok", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -402,6 +401,21 @@ func TestPlannedReparentShardSlow(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -460,7 +474,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, { name: "nil WaitReplicasTimeout and request takes 31 seconds is error", - ts: memorytopo.NewServer("zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -505,6 +518,21 @@ func TestPlannedReparentShardSlow(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -563,21 +591,23 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -610,8 +640,10 @@ func TestPlannedReparentShardSlow(t *testing.T) { func TestSleepTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -704,10 +736,7 @@ func TestSleepTablet(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index a482ad80e02..9999cbdc5bd 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -23,6 +23,7 @@ import ( "io" "os" "sort" + "strings" "testing" "time" @@ -30,20 +31,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" hk "vitess.io/vitess/go/vt/hook" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" + "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" @@ -94,7 +95,8 @@ func TestPanicHandler(t *testing.T) { func TestAddCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -102,7 +104,7 @@ func TestAddCellInfo(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone2", CellInfo: &topodatapb.CellInfo{ @@ -113,7 +115,7 @@ func TestAddCellInfo(t *testing.T) { }, { name: "cell already exists", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone1", CellInfo: &topodatapb.CellInfo{ @@ -125,7 +127,7 @@ func TestAddCellInfo(t *testing.T) { }, { name: "no cell root", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.AddCellInfoRequest{ Name: "zone2", CellInfo: &topodatapb.CellInfo{ @@ -137,10 +139,7 @@ func TestAddCellInfo(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -161,7 +160,8 @@ func TestAddCellInfo(t *testing.T) { func TestAddCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -170,7 +170,7 @@ func TestAddCellsAlias(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), req: &vtctldatapb.AddCellsAliasRequest{ Name: "zone", Cells: []string{"zone1", "zone2", "zone3"}, @@ -178,7 +178,7 @@ func TestAddCellsAlias(t *testing.T) { }, { name: "alias exists", - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -192,7 +192,7 @@ func TestAddCellsAlias(t *testing.T) { }, { name: "alias overlaps", - ts: memorytopo.NewServer("zone1", "zone2", "zone3"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2", "zone3"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(context.Background(), "zone_a", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone3"}, @@ -207,10 +207,7 @@ func TestAddCellsAlias(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { err := tt.setup(tt.ts) require.NoError(t, err, "test setup failed") @@ -236,7 +233,8 @@ func TestAddCellsAlias(t *testing.T) { func TestApplyRoutingRules(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string cells []string @@ -321,11 +319,8 @@ func TestApplyRoutingRules(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ts, factory := memorytopo.NewServerAndFactory(tt.cells...) + ts, factory := memorytopo.NewServerAndFactory(ctx, tt.cells...) if tt.topoDown { factory.SetError(errors.New("topo down for testing")) } @@ -421,12 +416,10 @@ func TestApplyVSchema(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -508,7 +501,8 @@ func TestApplyVSchema(t *testing.T) { } func TestBackup(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -520,7 +514,7 @@ func TestBackup(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -558,7 +552,7 @@ func TestBackup(t *testing.T) { }, { name: "cannot backup primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -593,7 +587,7 @@ func TestBackup(t *testing.T) { }, { name: "allow-primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -629,7 +623,7 @@ func TestBackup(t *testing.T) { }, { name: "no tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -664,7 +658,7 @@ func TestBackup(t *testing.T) { }, { name: "midstream error", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -702,7 +696,6 @@ func TestBackup(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { if tt.tablet != nil { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) @@ -740,7 +733,8 @@ func TestBackup(t *testing.T) { } func TestBackupShard(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -752,7 +746,7 @@ func TestBackupShard(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -817,7 +811,7 @@ func TestBackupShard(t *testing.T) { }, { name: "cannot backup primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -860,7 +854,7 @@ func TestBackupShard(t *testing.T) { }, { name: "allow-primary", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -919,9 +913,75 @@ func TestBackupShard(t *testing.T) { assert.Equal(t, 3, len(responses), "expected 3 messages from backupclient stream") }, }, + { + name: "incremental-from-pos", + ts: memorytopo.NewServer(ctx, "zone1"), + tmc: &testutil.TabletManagerClient{ + Backups: map[string]struct { + Events []*logutilpb.Event + EventInterval time.Duration + EventJitter time.Duration + ErrorAfter time.Duration + }{ + "zone1-0000000100": { + Events: []*logutilpb.Event{{}, {}, {}}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000200": { + Position: "some-position", + }, + }, + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000100": { + Position: &replicationdatapb.Status{ + ReplicationLagSeconds: 0, + }, + }, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000100": nil, + }, + }, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_REPLICA, + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + req: &vtctldatapb.BackupShardRequest{ + Keyspace: "ks", + Shard: "-", + IncrementalFromPos: "auto", + }, + assertion: func(t *testing.T, responses []*vtctldatapb.BackupResponse, err error) { + assert.ErrorIs(t, err, io.EOF, "expected Recv loop to end with io.EOF") + assert.Equal(t, 3, len(responses), "expected 3 messages from backupclient stream") + }, + }, { name: "no available tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -974,7 +1034,6 @@ func TestBackupShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ @@ -1013,6 +1072,210 @@ func TestBackupShard(t *testing.T) { } } +func TestCancelSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CancelSchemaMigrationRequest + expected *vtctldatapb.CancelSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CancelSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CancelSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestChangeTabletType(t *testing.T) { t.Parallel() @@ -1183,129 +1446,537 @@ func TestChangeTabletType(t *testing.T) { Type: topodatapb.TabletType_PRIMARY, }, }, - req: &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - DbType: topodatapb.TabletType_PRIMARY, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_PRIMARY, + }, + expected: nil, + shouldErr: true, + }, + { + name: "primary demotions not allowed", + cells: []string{"zone1"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + req: &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_REPLICA, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + tt := tt + + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: ts, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, tt.tablets...) + + resp, err := vtctld.ChangeTabletType(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + utils.MustMatch(t, tt.expected, resp) + + // If we are testing a dry-run, then the tablet in the actual + // topo should match the BeforeTablet in the response. Otherwise, + // the tablet in the actual topo should match the AfterTablet in + // the response. + expectedRealType := resp.AfterTablet.Type + msg := "ChangeTabletType did not cause topo update" + if tt.req.DryRun { + expectedRealType = resp.BeforeTablet.Type + msg = "dryrun type change resulted in real type change" + } + + tablet, err := ts.GetTablet(ctx, tt.req.TabletAlias) + assert.NoError(t, err, + "could not load tablet %s from topo after type change %v -> %v [dryrun=%t]", + topoproto.TabletAliasString(tt.req.TabletAlias), + resp.BeforeTablet.Type, + resp.AfterTablet.Type, + resp.WasDryRun, + ) + utils.MustMatch(t, expectedRealType, tablet.Type, msg) + }) + } + + t.Run("tabletmanager failure", func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ + TopoServer: nil, + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + }, nil) + testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + }, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }) + + _, err := vtctld.ChangeTabletType(ctx, &vtctldatapb.ChangeTabletTypeRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + DbType: topodatapb.TabletType_RDONLY, + }) + assert.Error(t, err) + }) +} + +func TestCleanupSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CleanupSchemaMigrationRequest + expected *vtctldatapb.CleanupSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CleanupSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CleanupSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + +func TestCompleteSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.CompleteSchemaMigrationRequest + expected *vtctldatapb.CompleteSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.CompleteSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", }, - expected: nil, shouldErr: true, }, { - name: "primary demotions not allowed", - cells: []string{"zone1"}, + name: "executeQuery failure", tablets: []*topodatapb.Tablet{ { + Keyspace: "ks", + Shard: "-80", Alias: &topodatapb.TabletAlias{ Cell: "zone1", Uid: 100, }, + Type: topodatapb.TabletType_PRIMARY, + }, + { Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, }, }, - req: &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, }, - DbType: topodatapb.TabletType_REPLICA, }, - expected: nil, + req: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, shouldErr: true, }, + // execute query failure } - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ - TopoServer: ts, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + ctx, Complete := context.WithCancel(context.Background()) + defer Complete() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, - }, tt.tablets...) + }, test.tablets...) - resp, err := vtctld.ChangeTabletType(ctx, tt.req) - if tt.shouldErr { + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.CompleteSchemaMigration(ctx, test.req) + if test.shouldErr { assert.Error(t, err) return } - assert.NoError(t, err) - utils.MustMatch(t, tt.expected, resp) - - // If we are testing a dry-run, then the tablet in the actual - // topo should match the BeforeTablet in the response. Otherwise, - // the tablet in the actual topo should match the AfterTablet in - // the response. - expectedRealType := resp.AfterTablet.Type - msg := "ChangeTabletType did not cause topo update" - if tt.req.DryRun { - expectedRealType = resp.BeforeTablet.Type - msg = "dryrun type change resulted in real type change" - } - - tablet, err := ts.GetTablet(ctx, tt.req.TabletAlias) - assert.NoError(t, err, - "could not load tablet %s from topo after type change %v -> %v [dryrun=%t]", - topoproto.TabletAliasString(tt.req.TabletAlias), - resp.BeforeTablet.Type, - resp.AfterTablet.Type, - resp.WasDryRun, - ) - utils.MustMatch(t, expectedRealType, tablet.Type, msg) + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) }) } - - t.Run("tabletmanager failure", func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - ts := memorytopo.NewServer("zone1") - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ - TopoServer: nil, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) - - testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_REPLICA, - }, nil) - testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - }, &testutil.AddTabletOptions{ - AlsoSetShardPrimary: true, - }) - - _, err := vtctld.ChangeTabletType(ctx, &vtctldatapb.ChangeTabletTypeRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - DbType: topodatapb.TabletType_RDONLY, - }) - assert.Error(t, err) - }) } func TestCreateKeyspace(t *testing.T) { @@ -1545,8 +2216,9 @@ func TestCreateKeyspace(t *testing.T) { t.Skip("test not yet implemented") } - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1822,8 +2494,9 @@ func TestCreateShard(t *testing.T) { t.Skip("focusing on other tests") } - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1853,7 +2526,8 @@ func TestCreateShard(t *testing.T) { func TestDeleteCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -1861,14 +2535,14 @@ func TestDeleteCellInfo(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), req: &vtctldatapb.DeleteCellInfoRequest{ Name: "zone2", }, }, { name: "cell does not exist", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.DeleteCellInfoRequest{ Name: "zone2", }, @@ -1877,10 +2551,7 @@ func TestDeleteCellInfo(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -1900,7 +2571,8 @@ func TestDeleteCellInfo(t *testing.T) { func TestDeleteCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -1909,7 +2581,7 @@ func TestDeleteCellsAlias(t *testing.T) { shouldErr bool }{ { - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -1921,7 +2593,7 @@ func TestDeleteCellsAlias(t *testing.T) { }, { name: "alias does not exist", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), setup: func(ts *topo.Server) error { return ts.CreateCellsAlias(ctx, "zone_a", &topodatapb.CellsAlias{ Cells: []string{"zone1", "zone2"}, @@ -1935,10 +2607,7 @@ func TestDeleteCellsAlias(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { err := tt.setup(tt.ts) require.NoError(t, err, "test setup failed") @@ -1968,7 +2637,7 @@ func TestDeleteKeyspace(t *testing.T) { keyspaces []*vtctldatapb.Keyspace shards []*vtctldatapb.Shard srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace - before func(t *testing.T, ts *topo.Server, tt testcase) func() + before func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) topoErr error req *vtctldatapb.DeleteKeyspaceRequest expected *vtctldatapb.DeleteKeyspaceResponse @@ -2115,10 +2784,10 @@ func TestDeleteKeyspace(t *testing.T) { shards: nil, srvKeyspaces: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { - _, unlock, err := ts.LockKeyspace(context.Background(), tt.req.Keyspace, "test.DeleteKeyspace") + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "test.DeleteKeyspace") require.NoError(t, err, "failed to lock keyspace %s before test", tt.req.Keyspace) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking keyspace %s after test", tt.req.Keyspace) @@ -2146,10 +2815,10 @@ func TestDeleteKeyspace(t *testing.T) { shards: nil, srvKeyspaces: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { - _, unlock, err := ts.LockKeyspace(context.Background(), tt.req.Keyspace, "test.DeleteKeyspace") + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { + lctx, unlock, err := ts.LockKeyspace(ctx, tt.req.Keyspace, "test.DeleteKeyspace") require.NoError(t, err, "failed to lock keyspace %s before test", tt.req.Keyspace) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking keyspace %s after test", tt.req.Keyspace) @@ -2167,18 +2836,15 @@ func TestDeleteKeyspace(t *testing.T) { }, } - for _, tt := range tests { - tt := tt + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - t.Parallel() - cells := []string{"zone1", "zone2", "zone3"} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) - defer cancel() - - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) + defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -2219,12 +2885,15 @@ func TestDeleteKeyspace(t *testing.T) { }() if tt.before != nil { - if after := tt.before(t, ts, tt); after != nil { + var after func() + if ctx, after = tt.before(t, ctx, ts, tt); after != nil { defer after() } } - resp, err := vtctld.DeleteKeyspace(ctx, tt.req) + requestCtx, requestCancel := context.WithTimeout(ctx, time.Millisecond*50) + defer requestCancel() + resp, err := vtctld.DeleteKeyspace(requestCtx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -2247,7 +2916,7 @@ func TestDeleteShards(t *testing.T) { replicationGraphs []*topo.ShardReplicationInfo srvKeyspaces map[string]map[string]*topodatapb.SrvKeyspace topoErr error - before func(t *testing.T, ts *topo.Server, tt testcase) func() + before func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) req *vtctldatapb.DeleteShardsRequest expected *vtctldatapb.DeleteShardsResponse expectedRemainingShards []*vtctldatapb.Shard @@ -2614,11 +3283,11 @@ func TestDeleteShards(t *testing.T) { }, tablets: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { shard := tt.req.Shards[0] - _, unlock, err := ts.LockShard(context.Background(), shard.Keyspace, shard.Name, "test.DeleteShard") + lctx, unlock, err := ts.LockShard(ctx, shard.Keyspace, shard.Name, "test.DeleteShard") require.NoError(t, err, "failed to lock shard %s/%s before test", shard.Keyspace, shard.Name) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking shard %s/%s after test", shard.Keyspace, shard.Name) @@ -2652,11 +3321,11 @@ func TestDeleteShards(t *testing.T) { }, tablets: nil, topoErr: nil, - before: func(t *testing.T, ts *topo.Server, tt testcase) func() { + before: func(t *testing.T, ctx context.Context, ts *topo.Server, tt testcase) (context.Context, func()) { shard := tt.req.Shards[0] - _, unlock, err := ts.LockShard(context.Background(), shard.Keyspace, shard.Name, "test.DeleteShard") + lctx, unlock, err := ts.LockShard(ctx, shard.Keyspace, shard.Name, "test.DeleteShard") require.NoError(t, err, "failed to lock shard %s/%s before test", shard.Keyspace, shard.Name) - return func() { + return lctx, func() { unlock(&err) if !topo.IsErrType(err, topo.NoNode) { assert.NoError(t, err, "error while unlocking shard %s/%s after test", shard.Keyspace, shard.Name) @@ -2689,7 +3358,7 @@ func TestDeleteShards(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) defer cancel() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -2707,7 +3376,7 @@ func TestDeleteShards(t *testing.T) { defer func() { topofactory.SetError(nil) - actualShards := []*vtctldatapb.Shard{} + var actualShards []*vtctldatapb.Shard keyspaces, err := ts.GetKeyspaces(ctx) require.NoError(t, err, "cannot get keyspace names to check remaining shards") @@ -2729,7 +3398,8 @@ func TestDeleteShards(t *testing.T) { } if tt.before != nil { - if after := tt.before(t, ts, tt); after != nil { + var after func() + if ctx, after = tt.before(t, ctx, ts, tt); after != nil { defer after() } } @@ -2750,7 +3420,6 @@ func TestDeleteShards(t *testing.T) { func TestDeleteSrvKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string vschemas map[string]*vschemapb.SrvVSchema @@ -2825,7 +3494,9 @@ func TestDeleteSrvKeyspace(t *testing.T) { finalVSchemas[cell] = vschema } - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) for cell, vschema := range tt.vschemas { err := ts.UpdateSrvVSchema(ctx, cell, vschema) require.NoError(t, err, "failed to update SrvVSchema in cell = %v, vschema = %+v", cell, vschema) @@ -3289,8 +3960,9 @@ func TestDeleteTablets(t *testing.T) { t.Skip("focusing on other tests") } - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -3354,6 +4026,9 @@ func TestDeleteTablets(t *testing.T) { func TestEmergencyReparentShard(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -3367,7 +4042,7 @@ func TestEmergencyReparentShard(t *testing.T) { }{ { name: "successful reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -3442,7 +4117,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, "zone1-0000000200": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", @@ -3487,7 +4162,7 @@ func TestEmergencyReparentShard(t *testing.T) { // the simplest way to trigger a failure is to attempt an ERS on a // shard that does not exist. name: "failed reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtctldatapb.EmergencyReparentShardRequest{ @@ -3509,14 +4184,8 @@ func TestEmergencyReparentShard(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, @@ -3660,8 +4329,9 @@ func TestExecuteFetchAsApp(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -3786,8 +4456,9 @@ func TestExecuteFetchAsDBA(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -3808,6 +4479,9 @@ func TestExecuteFetchAsDBA(t *testing.T) { func TestExecuteHook(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -3818,7 +4492,7 @@ func TestExecuteHook(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3847,7 +4521,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "nil hook request", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3877,7 +4551,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "hook with slash", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3909,7 +4583,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "no such tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3939,7 +4613,7 @@ func TestExecuteHook(t *testing.T) { }, { name: "tablet hook failure", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ ExecuteHookResults: map[string]struct { Response *hk.HookResult @@ -3969,12 +4643,8 @@ func TestExecuteHook(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) @@ -3994,8 +4664,9 @@ func TestExecuteHook(t *testing.T) { func TestFindAllShardsInKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4035,8 +4706,9 @@ func TestFindAllShardsInKeyspace(t *testing.T) { } func TestGetBackups(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4142,8 +4814,9 @@ func TestGetBackups(t *testing.T) { func TestGetKeyspace(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4167,8 +4840,9 @@ func TestGetKeyspace(t *testing.T) { func TestGetCellInfoNames(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4177,7 +4851,7 @@ func TestGetCellInfoNames(t *testing.T) { assert.NoError(t, err) assert.ElementsMatch(t, []string{"cell1", "cell2", "cell3"}, resp.Names) - ts = memorytopo.NewServer() + ts = memorytopo.NewServer(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4186,7 +4860,7 @@ func TestGetCellInfoNames(t *testing.T) { assert.NoError(t, err) assert.Empty(t, resp.Names) - ts, topofactory := memorytopo.NewServerAndFactory("cell1") + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4199,8 +4873,9 @@ func TestGetCellInfoNames(t *testing.T) { func TestGetCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4209,7 +4884,7 @@ func TestGetCellInfo(t *testing.T) { ServerAddress: "example.com", Root: "vitess", } - input := proto.Clone(expected).(*topodatapb.CellInfo) + input := expected.CloneVT() require.NoError(t, ts.CreateCellInfo(ctx, "cell1", input)) resp, err := vtctld.GetCellInfo(ctx, &vtctldatapb.GetCellInfoRequest{Cell: "cell1"}) @@ -4226,8 +4901,9 @@ func TestGetCellInfo(t *testing.T) { func TestGetCellsAliases(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("c11", "c12", "c13", "c21", "c22") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "c11", "c12", "c13", "c21", "c22") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4240,7 +4916,7 @@ func TestGetCellsAliases(t *testing.T) { } for i, alias := range []*topodatapb.CellsAlias{alias1, alias2} { - input := proto.Clone(alias).(*topodatapb.CellsAlias) + input := alias.CloneVT() name := fmt.Sprintf("a%d", i+1) require.NoError(t, ts.CreateCellsAlias(ctx, name, input), "cannot create cells alias %d (idx = %d) = %+v", i+1, i, input) } @@ -4254,7 +4930,7 @@ func TestGetCellsAliases(t *testing.T) { assert.NoError(t, err) utils.MustMatch(t, expected, resp.Aliases) - ts, topofactory := memorytopo.NewServerAndFactory() + ts, topofactory := memorytopo.NewServerAndFactory(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4328,8 +5004,9 @@ func TestGetFullStatus(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: ts, FullStatusResult: &replicationdatapb.FullStatus{ @@ -4356,8 +5033,9 @@ func TestGetFullStatus(t *testing.T) { func TestGetKeyspaces(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4397,7 +5075,6 @@ func TestGetKeyspaces(t *testing.T) { func TestGetPermissions(t *testing.T) { t.Parallel() - ctx := context.Background() var testGetPermissionsReply = &tabletmanagerdatapb.Permissions{ UserPermissions: []*tabletmanagerdatapb.UserPermission{ { @@ -4521,7 +5198,10 @@ func TestGetPermissions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -4544,7 +5224,6 @@ func TestGetPermissions(t *testing.T) { func TestGetRoutingRules(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string topoDown bool @@ -4588,7 +5267,10 @@ func TestGetRoutingRules(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx) if tt.rrIn != nil { err := ts.SaveRoutingRules(ctx, tt.rrIn) require.NoError(t, err, "could not save routing rules: %+v", tt.rrIn) @@ -4614,8 +5296,9 @@ func TestGetRoutingRules(t *testing.T) { } func TestGetSchema(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -4768,40 +5451,241 @@ func TestGetSchema(t *testing.T) { }, shouldErr: false, }, - // error cases + // error cases + { + name: "no tablet", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "notfound", + Uid: 100, + }, + }, + expected: nil, + shouldErr: true, + }, + { + name: "no schema", + req: &vtctldatapb.GetSchemaRequest{ + TabletAlias: otherAlias, + }, + expected: nil, + shouldErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setupSchema() + + resp, err := vtctld.GetSchema(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + utils.MustMatch(t, tt.expected, resp) + }) + } +} + +func TestGetSchemaMigrations(t *testing.T) { + t.Parallel() + + convertNamedRowsToProto3Result := func(rows []sqltypes.RowNamedValues) *querypb.QueryResult { + var ( + result sqltypes.Result + fieldNames, fieldTypes []string + ) + for i, row := range rows { + var unnamedRow sqltypes.Row + if i == 0 { + // Add to fields if this is the first row + for name, value := range row { + fieldNames = append(fieldNames, name) + fieldTypes = append(fieldTypes, strings.ToLower(querypb.Type_name[int32(value.Type())])) + } + } + + for _, name := range fieldNames { + value, ok := row[name] + if !ok { + value = sqltypes.NULL + } + + unnamedRow = append(unnamedRow, value) + } + + result.Rows = append(result.Rows, unnamedRow) + } + + result.Fields = sqltypes.MakeTestFields(strings.Join(fieldNames, "|"), strings.Join(fieldTypes, "|")) + return sqltypes.ResultToProto3(&result) + } + + tests := []struct { + name string + tablets []*topodatapb.Tablet + rowsByTablet map[string][]sqltypes.RowNamedValues + failTopo bool + req *vtctldatapb.GetSchemaMigrationsRequest + expected *vtctldatapb.GetSchemaMigrationsResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": { + map[string]sqltypes.Value{ + "migration_uuid": sqltypes.NewVarChar("uuid1"), + "keyspace": sqltypes.NewVarChar("ks"), + "shard": sqltypes.NewVarChar("-"), + "strategy": sqltypes.NewVarChar(schematools.SchemaMigrationStrategyName(vtctldatapb.SchemaMigration_ONLINE)), + }, + }, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", + }, + expected: &vtctldatapb.GetSchemaMigrationsResponse{ + Migrations: []*vtctldatapb.SchemaMigration{ + { + Uuid: "uuid1", + Keyspace: "ks", + Shard: "-", + Strategy: vtctldatapb.SchemaMigration_ONLINE, + EtaSeconds: -1, + }, + }, + }, + }, + { + name: "bad uuid input", + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Uuid: "not-a-uuid", + }, + shouldErr: true, + }, + { + name: "gettablets failure", + failTopo: true, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "notfound", + }, + shouldErr: true, + }, { - name: "no tablet", - req: &vtctldatapb.GetSchemaRequest{ - TabletAlias: &topodatapb.TabletAlias{ - Cell: "notfound", - Uid: 100, + name: "execute fetch failure", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, }, }, - expected: nil, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": nil, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", + }, shouldErr: true, }, { - name: "no schema", - req: &vtctldatapb.GetSchemaRequest{ - TabletAlias: otherAlias, + name: "bad row data", + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Keyspace: "ks", + Shard: "-", + Type: topodatapb.TabletType_PRIMARY, + }, + }, + rowsByTablet: map[string][]sqltypes.RowNamedValues{ + "zone1-0000000100": { + {"requested_timestamp": sqltypes.NewVarChar("invalid timestamp")}, + }, + }, + req: &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: "ks", }, - expected: nil, shouldErr: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setupSchema() + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() - resp, err := vtctld.GetSchema(ctx, tt.req) - if tt.shouldErr { + tmc := &testutil.TabletManagerClient{ + ExecuteFetchAsDbaResults: make(map[string]struct { + Response *querypb.QueryResult + Error error + }, len(test.rowsByTablet)), + } + + if test.rowsByTablet == nil { + test.rowsByTablet = map[string][]sqltypes.RowNamedValues{} + } + for alias, rows := range test.rowsByTablet { + switch rows { + case nil: + tmc.ExecuteFetchAsDbaResults[alias] = struct { + Response *querypb.QueryResult + Error error + }{ + Error: assert.AnError, + } + default: + tmc.ExecuteFetchAsDbaResults[alias] = struct { + Response *querypb.QueryResult + Error error + }{ + Response: convertNamedRowsToProto3Result(rows), + } + } + } + + cells := []string{"zone1", "zone2", "zone3"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{AlsoSetShardPrimary: true}, test.tablets...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + if test.failTopo { + factory.SetError(assert.AnError) + } + + resp, err := vtctld.GetSchemaMigrations(ctx, test.req) + if test.shouldErr { assert.Error(t, err) return } - assert.NoError(t, err) - utils.MustMatch(t, tt.expected, resp) + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) }) } } @@ -4874,8 +5758,9 @@ func TestGetShard(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -4900,7 +5785,6 @@ func TestGetShard(t *testing.T) { func TestGetSrvKeyspaceNames(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string srvKeyspacesByCell map[string]map[string]*topodatapb.SrvKeyspace @@ -4999,7 +5883,9 @@ func TestGetSrvKeyspaceNames(t *testing.T) { cells = append(cells, cell) } - ts, factory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) for cell, srvKeyspaces := range tt.srvKeyspacesByCell { for ks, srvks := range srvKeyspaces { @@ -5153,8 +6039,6 @@ func TestGetSrvKeyspaces(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt @@ -5165,7 +6049,10 @@ func TestGetSrvKeyspaces(t *testing.T) { t.SkipNow() } - ts, topofactory := memorytopo.NewServerAndFactory(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, topofactory := memorytopo.NewServerAndFactory(ctx, tt.cells...) testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -5192,8 +6079,9 @@ func TestGetSrvKeyspaces(t *testing.T) { func TestGetSrvVSchema(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5402,8 +6290,9 @@ func TestGetSrvVSchemas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2", "zone3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5461,8 +6350,9 @@ func TestGetSrvVSchemas(t *testing.T) { func TestGetTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5638,7 +6528,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks2", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5649,7 +6539,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "stale.primary", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, }, req: &vtctldatapb.GetTabletsRequest{ @@ -5665,7 +6555,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks2", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5676,7 +6566,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "stale.primary", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, }, shouldErr: false, @@ -5694,7 +6584,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "slightly less stale", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5705,7 +6595,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5716,7 +6606,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), }, }, req: &vtctldatapb.GetTabletsRequest{}, @@ -5730,7 +6620,7 @@ func TestGetTablets(t *testing.T) { Shard: "-", Hostname: "slightly less stale", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5741,7 +6631,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_UNKNOWN, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 14, 4, 5, 0, time.UTC)), }, { Alias: &topodatapb.TabletAlias{ @@ -5752,7 +6642,7 @@ func TestGetTablets(t *testing.T) { Keyspace: "ks1", Shard: "-", Type: topodatapb.TabletType_PRIMARY, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(2006, time.January, 2, 16, 4, 5, 0, time.UTC)), }, }, shouldErr: false, @@ -5940,8 +6830,9 @@ func TestGetTablets(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -5963,8 +6854,9 @@ func TestGetTablets(t *testing.T) { func TestGetTopologyPath(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6051,15 +6943,14 @@ func TestGetTopologyPath(t *testing.T) { func TestGetVSchema(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) t.Run("found", func(t *testing.T) { - t.Parallel() - err := ts.SaveVSchema(ctx, "testkeyspace", &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ @@ -6098,11 +6989,216 @@ func TestGetVSchema(t *testing.T) { }) } +func TestLaunchSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.LaunchSchemaMigrationRequest + expected *vtctldatapb.LaunchSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.LaunchSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + + ctx, Launch := context.WithCancel(context.Background()) + defer Launch() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.LaunchSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestPingTablet(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6167,10 +7263,7 @@ func TestPingTablet(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6191,6 +7284,9 @@ func TestPingTablet(t *testing.T) { func TestPlannedReparentShard(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -6204,7 +7300,7 @@ func TestPlannedReparentShard(t *testing.T) { }{ { name: "successful reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -6249,6 +7345,21 @@ func TestPlannedReparentShard(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -6309,7 +7420,7 @@ func TestPlannedReparentShard(t *testing.T) { // the simplest way to trigger a failure is to attempt an PRS on a // shard that does not exist. name: "failed reparent", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: nil, req: &vtctldatapb.PlannedReparentShardRequest{ Keyspace: "testkeyspace", @@ -6330,14 +7441,8 @@ func TestPlannedReparentShard(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, @@ -6380,8 +7485,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("ok", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6398,7 +7505,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("no such keyspace", func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -6412,8 +7522,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("topo unavailable", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6431,8 +7543,10 @@ func TestRebuildKeyspaceGraph(t *testing.T) { t.Run("lock error", func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ Name: "testkeyspace", }) @@ -6440,15 +7554,13 @@ func TestRebuildKeyspaceGraph(t *testing.T) { return NewVtctldServer(ts) }) - _, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") + lctx, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") require.NoError(t, lerr, "could not lock keyspace for testing") defer unlock(&lerr) defer func() { require.NoError(t, lerr, "could not unlock testkeyspace after test") }() - ctx, cancel := context.WithTimeout(ctx, time.Millisecond*50) - defer cancel() - _, err := vtctld.RebuildKeyspaceGraph(ctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ + _, err := vtctld.RebuildKeyspaceGraph(lctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ Keyspace: "testkeyspace", }) assert.Error(t, err) @@ -6458,7 +7570,6 @@ func TestRebuildKeyspaceGraph(t *testing.T) { func TestRebuildVSchemaGraph(t *testing.T) { t.Parallel() - ctx := context.Background() req := &vtctldatapb.RebuildVSchemaGraphRequest{} tests := []struct { name string @@ -6480,7 +7591,10 @@ func TestRebuildVSchemaGraph(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, "zone1") if tt.topoDown { factory.SetError(errors.New("topo down for testing")) } @@ -6502,7 +7616,9 @@ func TestRebuildVSchemaGraph(t *testing.T) { func TestRefreshState(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -6513,7 +7629,7 @@ func TestRefreshState(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6530,13 +7646,13 @@ func TestRefreshState(t *testing.T) { }, { name: "tablet alias nil", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateRequest{}, shouldErr: true, }, { name: "tablet not found", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6554,7 +7670,7 @@ func TestRefreshState(t *testing.T) { }, { name: "RefreshState failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -6573,11 +7689,7 @@ func TestRefreshState(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - var tmc testutil.TabletManagerClient if tt.tablet != nil { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) @@ -6603,7 +7715,8 @@ func TestRefreshState(t *testing.T) { func TestRefreshStateByShard(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string ts *topo.Server @@ -6615,7 +7728,7 @@ func TestRefreshStateByShard(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6648,7 +7761,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "cell filtering", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6686,7 +7799,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "partial result", - ts: memorytopo.NewServer("zone1", "zone2"), + ts: memorytopo.NewServer(ctx, "zone1", "zone2"), tablets: []*topodatapb.Tablet{ { Hostname: "zone1-100", @@ -6723,13 +7836,13 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "missing keyspace argument", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateByShardRequest{}, shouldErr: true, }, { name: "missing shard argument", - ts: memorytopo.NewServer(), + ts: memorytopo.NewServer(ctx), req: &vtctldatapb.RefreshStateByShardRequest{ Keyspace: "ks", }, @@ -6737,7 +7850,7 @@ func TestRefreshStateByShard(t *testing.T) { }, { name: "shard not found", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -6758,11 +7871,7 @@ func TestRefreshStateByShard(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - require.Equal(t, len(tt.tablets), len(tt.refreshStateErrors), "Invalid test case: must have one refreshStateError for each tablet") tmc := &testutil.TabletManagerClient{ @@ -6869,11 +7978,13 @@ func TestReloadSchema(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -6961,13 +8072,15 @@ func TestReloadSchemaKeyspace(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) @@ -7117,13 +8230,15 @@ func TestReloadSchemaShard(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) @@ -7144,8 +8259,9 @@ func TestReloadSchemaShard(t *testing.T) { } func TestRemoveBackup(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -7334,8 +8450,9 @@ func TestRemoveKeyspaceCell(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -7622,8 +8739,9 @@ func TestRemoveShardCell(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -8231,8 +9349,9 @@ func TestReparentTablet(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -8259,7 +9378,9 @@ func TestReparentTablet(t *testing.T) { } func TestRestoreFromBackup(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -8271,7 +9392,7 @@ func TestRestoreFromBackup(t *testing.T) { }{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ RestoreFromBackupResults: map[string]struct { Events []*logutilpb.Event @@ -8320,7 +9441,7 @@ func TestRestoreFromBackup(t *testing.T) { }, { name: "no such tablet", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ Backups: map[string]struct { Events []*logutilpb.Event @@ -8358,7 +9479,6 @@ func TestRestoreFromBackup(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ @@ -8397,10 +9517,211 @@ func TestRestoreFromBackup(t *testing.T) { } } +func TestRetrySchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.RetrySchemaMigrationRequest + expected *vtctldatapb.RetrySchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.RetrySchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts) + }) + + resp, err := vtctld.RetrySchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestRunHealthCheck(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string tablets []*topodatapb.Tablet @@ -8483,7 +9804,10 @@ func TestRunHealthCheck(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -8555,14 +9879,15 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { }, } - ctx := context.Background() - for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { @@ -8583,11 +9908,14 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { func TestSetShardIsPrimaryServing(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type testcase struct { name string ctx context.Context ts *topo.Server - setup func(*testing.T, *testcase) + setup func(*testing.T, *testcase) context.Context teardown func(*testing.T, *testcase) req *vtctldatapb.SetShardIsPrimaryServingRequest expected *vtctldatapb.SetShardIsPrimaryServingResponse @@ -8597,14 +9925,15 @@ func TestSetShardIsPrimaryServing(t *testing.T) { tests := []*testcase{ { name: "ok", - setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1") + setup: func(t *testing.T, tt *testcase) context.Context { + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", Shard: &topodatapb.Shard{}, }) + return tt.ctx }, req: &vtctldatapb.SetShardIsPrimaryServingRequest{ Keyspace: "testkeyspace", @@ -8619,17 +9948,17 @@ func TestSetShardIsPrimaryServing(t *testing.T) { }, { name: "lock error", - setup: func(t *testing.T, tt *testcase) { + setup: func(t *testing.T, tt *testcase) context.Context { var cancel func() - tt.ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) - tt.ts = memorytopo.NewServer("zone1") + tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", Shard: &topodatapb.Shard{}, }) - _, unlock, err := tt.ts.LockKeyspace(tt.ctx, "testkeyspace", "test lock") + lctx, unlock, err := tt.ts.LockKeyspace(tt.ctx, "testkeyspace", "test lock") require.NoError(t, err) tt.teardown = func(t *testing.T, tt *testcase) { var err error @@ -8637,6 +9966,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { assert.NoError(t, err) cancel() } + return lctx }, req: &vtctldatapb.SetShardIsPrimaryServingRequest{ Keyspace: "testkeyspace", @@ -8649,13 +9979,9 @@ func TestSetShardIsPrimaryServing(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { - tt.setup(t, tt) + tt.ctx = tt.setup(t, tt) } if tt.teardown != nil { defer tt.teardown(t, tt) @@ -8679,6 +10005,9 @@ func TestSetShardIsPrimaryServing(t *testing.T) { func TestSetShardTabletControl(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + type testcase struct { name string ctx context.Context @@ -8694,8 +10023,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "ok", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8743,8 +10072,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "remove tabletcontrols", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8778,8 +10107,8 @@ func TestSetShardTabletControl(t *testing.T) { { name: "disable queryservice", setup: func(t *testing.T, tt *testcase) { - tt.ctx = context.Background() - tt.ts = memorytopo.NewServer("zone1", "zone2", "zone3") + tt.ctx = ctx + tt.ts = memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -8873,8 +10202,8 @@ func TestSetShardTabletControl(t *testing.T) { name: "keyspace lock error", setup: func(t *testing.T, tt *testcase) { var cancel func() - tt.ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond*50) - tt.ts = memorytopo.NewServer("zone1") + tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", Name: "-", @@ -8900,10 +10229,7 @@ func TestSetShardTabletControl(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { tt.setup(t, tt) } @@ -9103,13 +10429,15 @@ func TestSetWritable(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -9131,8 +10459,9 @@ func TestSetWritable(t *testing.T) { func TestShardReplicationAdd(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9185,6 +10514,9 @@ func TestShardReplicationAdd(t *testing.T) { func TestShardReplicationPositions(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tests := []struct { name string ts *topo.Server @@ -9197,7 +10529,7 @@ func TestShardReplicationPositions(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9276,7 +10608,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "timeouts are nonfatal", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9358,7 +10690,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "other rpc errors are fatal", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -9408,7 +10740,7 @@ func TestShardReplicationPositions(t *testing.T) { }, { name: "nonexistent shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), req: &vtctldatapb.ShardReplicationPositionsRequest{ Keyspace: "testkeyspace", Shard: "-", @@ -9419,13 +10751,7 @@ func TestShardReplicationPositions(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, SkipShardCreation: false, @@ -9435,14 +10761,14 @@ func TestShardReplicationPositions(t *testing.T) { return NewVtctldServer(ts) }) + requestCtx := ctx if tt.ctxTimeout > 0 { - _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) - defer cancel() - - ctx = _ctx + var requestCancel func() + requestCtx, requestCancel = context.WithTimeout(ctx, tt.ctxTimeout) + defer requestCancel() } - resp, err := vtctld.ShardReplicationPositions(ctx, tt.req) + resp, err := vtctld.ShardReplicationPositions(requestCtx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -9458,8 +10784,10 @@ func TestShardReplicationPositions(t *testing.T) { func TestShardReplicationRemove(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9617,8 +10945,9 @@ func TestSourceShardAdd(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9751,8 +11080,9 @@ func TestSourceShardDelete(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) @@ -9931,13 +11261,15 @@ func TestStartReplication(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -10068,13 +11400,15 @@ func TestStopReplication(t *testing.T) { }, } - ctx := context.Background() for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) defer ts.Close() testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -10459,8 +11793,9 @@ func TestTabletExternallyReparented(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx := context.Background() - ts, topofactory := memorytopo.NewServerAndFactory(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) tmc := testutil.TabletManagerClient{ TopoServer: ts, } @@ -10513,7 +11848,6 @@ func TestTabletExternallyReparented(t *testing.T) { func TestUpdateCellInfo(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string cells map[string]*topodatapb.CellInfo @@ -10637,7 +11971,10 @@ func TestUpdateCellInfo(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts, factory := memorytopo.NewServerAndFactory() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx) for name, cell := range tt.cells { err := ts.CreateCellInfo(ctx, name, cell) require.NoError(t, err, "failed to create cell %s: %+v for test", name, cell) @@ -10665,7 +12002,6 @@ func TestUpdateCellInfo(t *testing.T) { func TestUpdateCellsAlias(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string cells []string @@ -10776,7 +12112,10 @@ func TestUpdateCellsAlias(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, tt.cells...) for name, cells := range tt.aliases { for _, cell := range cells { // We use UpdateCellInfoFields rather than CreateCellInfo @@ -10813,8 +12152,9 @@ func TestUpdateCellsAlias(t *testing.T) { func TestValidate(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tablets := []*topodatapb.Tablet{ { Keyspace: "ks1", @@ -10928,8 +12268,9 @@ func TestValidate(t *testing.T) { } func TestValidateSchemaKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2", "zone3") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2", "zone3") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11155,8 +12496,9 @@ func TestValidateSchemaKeyspace(t *testing.T) { } func TestValidateVersionKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11274,8 +12616,9 @@ func TestValidateVersionKeyspace(t *testing.T) { func TestValidateVersionShard(t *testing.T) { t.Parallel() - ctx := context.Background() - ts := memorytopo.NewServer("zone1", "zone2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1", "zone2") tmc := testutil.TabletManagerClient{ GetSchemaResults: map[string]struct { Schema *tabletmanagerdatapb.SchemaDefinition @@ -11394,11 +12737,12 @@ func TestValidateShard(t *testing.T) { shouldErr bool } - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []*testcase{ { name: "ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11435,7 +12779,7 @@ func TestValidateShard(t *testing.T) { }, { name: "no shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11476,7 +12820,7 @@ func TestValidateShard(t *testing.T) { }, { name: "no primary in shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11503,7 +12847,7 @@ func TestValidateShard(t *testing.T) { }, { name: "two primaries in shard", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: nil, setup: func(t *testing.T, tt *testcase) { tablets := []*topodatapb.Tablet{ @@ -11546,7 +12890,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/ok", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11615,7 +12959,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/GetReplicas failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11686,7 +13030,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/no replicas", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11757,7 +13101,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/orphaned replica", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11831,7 +13175,7 @@ func TestValidateShard(t *testing.T) { }, { name: "ping_tablets/Ping failed", - ts: memorytopo.NewServer("zone1"), + ts: memorytopo.NewServer(ctx, "zone1"), tmc: &testutil.TabletManagerClient{ GetReplicasResults: map[string]struct { Replicas []string @@ -11903,10 +13247,7 @@ func TestValidateShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { - t.Parallel() - if tt.setup != nil { tt.setup(t, tt) } diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go index 45692e70114..20ad0f692b0 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/proto_compare.go @@ -23,10 +23,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/test/utils" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -37,11 +35,9 @@ import ( // respective Events field in the comparison. func AssertEmergencyReparentShardResponsesEqual(t *testing.T, expected *vtctldatapb.EmergencyReparentShardResponse, actual *vtctldatapb.EmergencyReparentShardResponse, msgAndArgs ...any) { t.Helper() - - expected = proto.Clone(expected).(*vtctldatapb.EmergencyReparentShardResponse) + expected = expected.CloneVT() expected.Events = nil - - actual = proto.Clone(actual).(*vtctldatapb.EmergencyReparentShardResponse) + actual = actual.CloneVT() actual.Events = nil utils.MustMatch(t, expected, actual) @@ -104,11 +100,9 @@ func clearEvents(events []*logutilpb.Event, f func(*logutilpb.Event) *logutilpb. // respective Events field in the comparison. func AssertPlannedReparentShardResponsesEqual(t *testing.T, expected *vtctldatapb.PlannedReparentShardResponse, actual *vtctldatapb.PlannedReparentShardResponse) { t.Helper() - - expected = proto.Clone(expected).(*vtctldatapb.PlannedReparentShardResponse) + expected = expected.CloneVT() expected.Events = nil - - actual = proto.Clone(actual).(*vtctldatapb.PlannedReparentShardResponse) + actual = actual.CloneVT() actual.Events = nil utils.MustMatch(t, expected, actual) diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 020eed4bd81..ba7c8477d22 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -214,6 +214,13 @@ type TabletManagerClient struct { Response *hk.HookResult Error error } + // keyed by tablet alias. + ExecuteQueryDelays map[string]time.Duration + // keyed by tablet alias. + ExecuteQueryResults map[string]struct { + Response *querypb.QueryResult + Error error + } // FullStatus result FullStatusResult *replicationdatapb.FullStatus // keyed by tablet alias. @@ -281,6 +288,11 @@ type TabletManagerClient struct { Position *replicationdatapb.Status Error error } + PrimaryStatusDelays map[string]time.Duration + PrimaryStatusResults map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + } RestoreFromBackupResults map[string]struct { Events []*logutilpb.Event EventInterval time.Duration @@ -343,6 +355,10 @@ type TabletManagerClient struct { // WaitForPosition(tablet *topodatapb.Tablet, position string) error, so we // key by tablet alias and then by position. WaitForPositionResults map[string]map[string]error + // tablet alias => duration + CheckThrottlerDelays map[string]time.Duration + // keyed by tablet alias + CheckThrottlerResults map[string]*tabletmanagerdatapb.CheckThrottlerResponse } type backupStreamAdapter struct { @@ -559,6 +575,30 @@ func (fake *TabletManagerClient) ExecuteHook(ctx context.Context, tablet *topoda return nil, fmt.Errorf("%w: no ExecuteHook result set for tablet %s", assert.AnError, key) } +// ExecuteQuery is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) ExecuteQuery(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteQueryRequest) (*querypb.QueryResult, error) { + if fake.ExecuteQueryResults == nil { + return nil, fmt.Errorf("%w: no ExecuteQuery results on fake TabletManagerClient", assert.AnError) + } + + key := topoproto.TabletAliasString(tablet.Alias) + if fake.ExecuteQueryDelays != nil { + if delay, ok := fake.ExecuteQueryDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + if result, ok := fake.ExecuteQueryResults[key]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no ExecuteQuery result set for tablet %s", assert.AnError, key) +} + // FullStatus is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) FullStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.FullStatus, error) { if fake.FullStatusResult != nil { @@ -572,7 +612,7 @@ func (fake *TabletManagerClient) FullStatus(ctx context.Context, tablet *topodat return nil, fmt.Errorf("no output set for FullStatus") } -// GetPermission is part of the tmclient.TabletManagerClient interface. +// GetPermissions is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) GetPermissions(ctx context.Context, tablet *topodatapb.Tablet) (*tabletmanagerdatapb.Permissions, error) { if fake.GetPermissionsResults == nil { return nil, assert.AnError @@ -870,6 +910,32 @@ func (fake *TabletManagerClient) ReplicationStatus(ctx context.Context, tablet * return nil, assert.AnError } +// PrimaryStatus is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) PrimaryStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.PrimaryStatus, error) { + if fake.PrimaryStatusResults == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.PrimaryStatusDelays != nil { + if delay, ok := fake.PrimaryStatusDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.PrimaryStatusResults[key]; ok { + return result.Status, result.Error + } + + return nil, assert.AnError +} + type backupRestoreStreamAdapter struct { *grpcshim.BidiStream ch chan *logutilpb.Event @@ -1326,3 +1392,33 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t return nil, assert.AnError } + +// CheckThrottler is part of the tmclient.TabletManagerCLient interface. +func (fake *TabletManagerClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + if fake.CheckThrottlerResults == nil { + return nil, assert.AnError + } + + if tablet.Alias == nil { + return nil, assert.AnError + } + + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.CheckThrottlerDelays != nil { + if delay, ok := fake.CheckThrottlerDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if resultsForTablet, ok := fake.CheckThrottlerResults[key]; ok { + return resultsForTablet, nil + } + + return nil, assert.AnError +} diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/util.go b/go/vt/vtctl/grpcvtctldserver/testutil/util.go index 2b18d0bce68..97638e9c41e 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/util.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/util.go @@ -24,8 +24,6 @@ import ( "fmt" "testing" - "google.golang.org/protobuf/proto" - "github.com/stretchr/testify/require" "golang.org/x/net/nettest" "google.golang.org/grpc" @@ -106,7 +104,7 @@ func WithTestServers( // could not be added. It shallow copies the proto struct to prevent XXX_ fields // from changing in the marshalling. func AddKeyspace(ctx context.Context, t *testing.T, ts *topo.Server, ks *vtctldatapb.Keyspace) { - err := ts.CreateKeyspace(ctx, ks.Name, proto.Clone(ks.Keyspace).(*topodatapb.Keyspace)) + err := ts.CreateKeyspace(ctx, ks.Name, ks.Keyspace.CloneVT()) require.NoError(t, err) } @@ -149,7 +147,8 @@ type AddTabletOptions struct { // shard to serving. If that shard record already has a serving primary, then // AddTablet will fail the test. func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topodatapb.Tablet, opts *AddTabletOptions) { - tablet = proto.Clone(tablet).(*topodatapb.Tablet) + t.Helper() + tablet = tablet.CloneVT() if opts == nil { opts = &AddTabletOptions{} } @@ -200,6 +199,7 @@ func AddTablet(ctx context.Context, t *testing.T, ts *topo.Server, tablet *topod // AddTablets adds a list of tablets to the topology. See AddTablet for more // details. func AddTablets(ctx context.Context, t *testing.T, ts *topo.Server, opts *AddTabletOptions, tablets ...*topodatapb.Tablet) { + t.Helper() for _, tablet := range tablets { AddTablet(ctx, t, ts, tablet, opts) } diff --git a/go/vt/vtctl/localvtctldclient/client_gen.go b/go/vt/vtctl/localvtctldclient/client_gen.go index 21d743de148..e0031b321cd 100644 --- a/go/vt/vtctl/localvtctldclient/client_gen.go +++ b/go/vt/vtctl/localvtctldclient/client_gen.go @@ -161,11 +161,26 @@ func (client *localVtctldClient) BackupShard(ctx context.Context, in *vtctldatap return stream, nil } +// CancelSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CancelSchemaMigration(ctx context.Context, in *vtctldatapb.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + return client.s.CancelSchemaMigration(ctx, in) +} + // ChangeTabletType is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) ChangeTabletType(ctx context.Context, in *vtctldatapb.ChangeTabletTypeRequest, opts ...grpc.CallOption) (*vtctldatapb.ChangeTabletTypeResponse, error) { return client.s.ChangeTabletType(ctx, in) } +// CleanupSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CleanupSchemaMigration(ctx context.Context, in *vtctldatapb.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + return client.s.CleanupSchemaMigration(ctx, in) +} + +// CompleteSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) CompleteSchemaMigration(ctx context.Context, in *vtctldatapb.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + return client.s.CompleteSchemaMigration(ctx, in) +} + // CreateKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) CreateKeyspace(ctx context.Context, in *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { return client.s.CreateKeyspace(ctx, in) @@ -281,6 +296,11 @@ func (client *localVtctldClient) GetSchema(ctx context.Context, in *vtctldatapb. return client.s.GetSchema(ctx, in) } +// GetSchemaMigrations is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldatapb.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaMigrationsResponse, error) { + return client.s.GetSchemaMigrations(ctx, in) +} + // GetShard is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.GetShardRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardResponse, error) { return client.s.GetShard(ctx, in) @@ -346,6 +366,21 @@ func (client *localVtctldClient) InitShardPrimary(ctx context.Context, in *vtctl return client.s.InitShardPrimary(ctx, in) } +// LaunchSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) LaunchSchemaMigration(ctx context.Context, in *vtctldatapb.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + return client.s.LaunchSchemaMigration(ctx, in) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MoveTablesComplete(ctx context.Context, in *vtctldatapb.MoveTablesCompleteRequest, opts ...grpc.CallOption) (*vtctldatapb.MoveTablesCompleteResponse, error) { + return client.s.MoveTablesComplete(ctx, in) +} + +// MoveTablesCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) MoveTablesCreate(ctx context.Context, in *vtctldatapb.MoveTablesCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.MoveTablesCreate(ctx, in) +} + // PingTablet is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) PingTablet(ctx context.Context, in *vtctldatapb.PingTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.PingTabletResponse, error) { return client.s.PingTablet(ctx, in) @@ -411,6 +446,11 @@ func (client *localVtctldClient) ReparentTablet(ctx context.Context, in *vtctlda return client.s.ReparentTablet(ctx, in) } +// ReshardCreate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) ReshardCreate(ctx context.Context, in *vtctldatapb.ReshardCreateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.ReshardCreate(ctx, in) +} + type restoreFromBackupStreamAdapter struct { *grpcshim.BidiStream ch chan *vtctldatapb.RestoreFromBackupResponse @@ -462,6 +502,11 @@ func (client *localVtctldClient) RestoreFromBackup(ctx context.Context, in *vtct return stream, nil } +// RetrySchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) RetrySchemaMigration(ctx context.Context, in *vtctldatapb.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + return client.s.RetrySchemaMigration(ctx, in) +} + // RunHealthCheck is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) RunHealthCheck(ctx context.Context, in *vtctldatapb.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldatapb.RunHealthCheckResponse, error) { return client.s.RunHealthCheck(ctx, in) @@ -586,3 +631,23 @@ func (client *localVtctldClient) ValidateVersionKeyspace(ctx context.Context, in func (client *localVtctldClient) ValidateVersionShard(ctx context.Context, in *vtctldatapb.ValidateVersionShardRequest, opts ...grpc.CallOption) (*vtctldatapb.ValidateVersionShardResponse, error) { return client.s.ValidateVersionShard(ctx, in) } + +// WorkflowDelete is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowDelete(ctx context.Context, in *vtctldatapb.WorkflowDeleteRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowDeleteResponse, error) { + return client.s.WorkflowDelete(ctx, in) +} + +// WorkflowStatus is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowStatus(ctx context.Context, in *vtctldatapb.WorkflowStatusRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowStatusResponse, error) { + return client.s.WorkflowStatus(ctx, in) +} + +// WorkflowSwitchTraffic is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowSwitchTraffic(ctx context.Context, in *vtctldatapb.WorkflowSwitchTrafficRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + return client.s.WorkflowSwitchTraffic(ctx, in) +} + +// WorkflowUpdate is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) WorkflowUpdate(ctx context.Context, in *vtctldatapb.WorkflowUpdateRequest, opts ...grpc.CallOption) (*vtctldatapb.WorkflowUpdateResponse, error) { + return client.s.WorkflowUpdate(ctx, in) +} diff --git a/go/vt/vtctl/plugin_kubernetestopo.go b/go/vt/vtctl/plugin_kubernetestopo.go deleted file mode 100644 index 271633fc2bc..00000000000 --- a/go/vt/vtctl/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtctl - -import ( - // Imports k8stopo to register the kubernetes implementation of - // TopoServer. - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 43844eb9388..7ed0f6582b9 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -162,6 +162,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s newPrimary := subFlags.String("new_primary", "", "optional alias of a tablet that should be the new primary. If not specified, Vitess will select the best candidate") preventCrossCellPromotion := subFlags.Bool("prevent_cross_cell_promotion", false, "only promotes a new primary from the same cell as the previous primary") ignoreReplicasList := subFlags.String("ignore_replicas", "", "comma-separated list of replica tablet aliases to ignore during emergency reparent") + waitForAllTablets := subFlags.Bool("wait_for_all_tablets", false, "should ERS wait for all the tablets to respond. Useful when all the tablets are reachable") if err := subFlags.Parse(args); err != nil { return err @@ -189,7 +190,7 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s } } unreachableReplicas := topoproto.ParseTabletSet(*ignoreReplicasList) - return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion) + return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion, *waitForAllTablets) } func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go index 735965c3afa..e68485a395c 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/durability.go @@ -43,10 +43,24 @@ func init() { return &durabilityNone{} }) RegisterDurability("semi_sync", func() Durabler { - return &durabilitySemiSync{} + return &durabilitySemiSync{ + rdonlySemiSync: false, + } }) RegisterDurability("cross_cell", func() Durabler { - return &durabilityCrossCell{} + return &durabilityCrossCell{ + rdonlySemiSync: false, + } + }) + RegisterDurability("semi_sync_with_rdonly_ack", func() Durabler { + return &durabilitySemiSync{ + rdonlySemiSync: true, + } + }) + RegisterDurability("cross_cell_with_rdonly_ack", func() Durabler { + return &durabilityCrossCell{ + rdonlySemiSync: true, + } }) RegisterDurability("test", func() Durabler { return &durabilityTest{} @@ -141,7 +155,9 @@ func (d *durabilityNone) isReplicaSemiSync(primary, replica *topodatapb.Tablet) // durabilitySemiSync has 1 semi-sync setup. It only allows Primary and Replica type servers to acknowledge semi sync // It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilitySemiSync struct{} +type durabilitySemiSync struct { + rdonlySemiSync bool +} // promotionRule implements the Durabler interface func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { @@ -162,6 +178,8 @@ func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tabl switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return true + case topodatapb.TabletType_RDONLY: + return d.rdonlySemiSync } return false } @@ -171,7 +189,9 @@ func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tabl // durabilityCrossCell has 1 semi-sync setup. It only allows Primary and Replica type servers from a different cell to acknowledge semi sync. // This means that a transaction must be in two cells for it to be acknowledged // It returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else -type durabilityCrossCell struct{} +type durabilityCrossCell struct { + rdonlySemiSync bool +} // promotionRule implements the Durabler interface func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { @@ -192,6 +212,8 @@ func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tab switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return primary.Alias.Cell != replica.Alias.Cell + case topodatapb.TabletType_RDONLY: + return d.rdonlySemiSync && primary.Alias.Cell != replica.Alias.Cell } return false } diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go index 857718174c5..f1429b29621 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/durability_test.go @@ -73,146 +73,204 @@ func TestDurabilityNone(t *testing.T) { } func TestDurabilitySemiSync(t *testing.T) { - durability, err := GetDurabilityPolicy("semi_sync") - require.NoError(t, err) - - promoteRule := PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, + testcases := []struct { + durabilityPolicy string + rdonlySemiSync bool + }{ + { + durabilityPolicy: "semi_sync", + rdonlySemiSync: false, + }, { + durabilityPolicy: "semi_sync_with_rdonly_ack", + rdonlySemiSync: true, }, - Type: topodatapb.TabletType_PRIMARY, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + } - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + for _, tt := range testcases { + t.Run(tt.durabilityPolicy, func(t *testing.T) { + durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + require.NoError(t, err) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_RDONLY, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) + promoteRule := PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_SPARE, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) - assert.Equal(t, 1, SemiSyncAckers(durability, nil)) - assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 101, - }, - Type: topodatapb.TabletType_PRIMARY, - }, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - })) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 101, - }, - Type: topodatapb.TabletType_PRIMARY, - }, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_EXPERIMENTAL, - })) + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(durability, nil)) + assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + })) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_EXPERIMENTAL, + })) + assert.Equal(t, tt.rdonlySemiSync, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 101, + }, + Type: topodatapb.TabletType_PRIMARY, + }, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + })) + }) + } } func TestDurabilityCrossCell(t *testing.T) { - durability, err := GetDurabilityPolicy("cross_cell") - require.NoError(t, err) - - promoteRule := PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, + testcases := []struct { + durabilityPolicy string + rdonlySemiSync bool + }{ + { + durabilityPolicy: "cross_cell", + rdonlySemiSync: false, + }, { + durabilityPolicy: "cross_cell_with_rdonly_ack", + rdonlySemiSync: true, }, - Type: topodatapb.TabletType_PRIMARY, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + } - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_REPLICA, - }) - assert.Equal(t, promotionrule.Neutral, promoteRule) + for _, tt := range testcases { + t.Run(tt.durabilityPolicy, func(t *testing.T) { + durability, err := GetDurabilityPolicy(tt.durabilityPolicy) + require.NoError(t, err) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_RDONLY, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) + promoteRule := PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) - promoteRule = PromotionRule(durability, &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - Uid: 100, - }, - Type: topodatapb.TabletType_SPARE, - }) - assert.Equal(t, promotionrule.MustNot, promoteRule) - assert.Equal(t, 1, SemiSyncAckers(durability, nil)) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_REPLICA, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - })) - assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_REPLICA, - Alias: &topodatapb.TabletAlias{ - Cell: "cell2", - }, - })) - assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ - Type: topodatapb.TabletType_PRIMARY, - Alias: &topodatapb.TabletAlias{ - Cell: "cell1", - }, - }, &topodatapb.Tablet{ - Type: topodatapb.TabletType_EXPERIMENTAL, - Alias: &topodatapb.TabletAlias{ - Cell: "cell2", - }, - })) + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_REPLICA, + }) + assert.Equal(t, promotionrule.Neutral, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_RDONLY, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + + promoteRule = PromotionRule(durability, &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Type: topodatapb.TabletType_SPARE, + }) + assert.Equal(t, promotionrule.MustNot, promoteRule) + assert.Equal(t, 1, SemiSyncAckers(durability, nil)) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + })) + assert.Equal(t, true, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + assert.Equal(t, false, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_EXPERIMENTAL, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + assert.Equal(t, tt.rdonlySemiSync, IsReplicaSemiSync(durability, &topodatapb.Tablet{ + Type: topodatapb.TabletType_PRIMARY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + }, + }, &topodatapb.Tablet{ + Type: topodatapb.TabletType_RDONLY, + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + }, + })) + }) + } } func TestError(t *testing.T) { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 536d77bdaad..13705e8fa59 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -22,10 +22,9 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" @@ -54,8 +53,11 @@ type EmergencyReparenter struct { // EmergencyReparentShard operations. Options are passed by value, so it is safe // for callers to mutate and reuse options structs for multiple calls. type EmergencyReparentOptions struct { - NewPrimaryAlias *topodatapb.TabletAlias - IgnoreReplicas sets.Set[string] + NewPrimaryAlias *topodatapb.TabletAlias + IgnoreReplicas sets.Set[string] + // WaitAllTablets is used to specify whether ERS should wait for all the tablets to return and not proceed + // further after n-1 tablets have returned. + WaitAllTablets bool WaitReplicasTimeout time.Duration PreventCrossCellPromotion bool @@ -97,10 +99,10 @@ func NewEmergencyReparenter(ts *topo.Server, tmc tmclient.TabletManagerClient, l // keyspace and shard. func (erp *EmergencyReparenter) ReparentShard(ctx context.Context, keyspace string, shard string, opts EmergencyReparentOptions) (*events.Reparent, error) { var err error + opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) // First step is to lock the shard for the given operation, if not already locked if err = topo.CheckShardLocked(ctx, keyspace, shard); err != nil { var unlock func(*error) - opts.lockAction = erp.getLockAction(opts.NewPrimaryAlias) ctx, unlock, err = erp.ts.LockShard(ctx, keyspace, shard, opts.lockAction) if err != nil { return nil, err @@ -147,7 +149,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve shardInfo *topo.ShardInfo prevPrimary *topodatapb.Tablet tabletMap map[string]*topo.TabletInfo - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position intermediateSource *topodatapb.Tablet validCandidateTablets []*topodatapb.Tablet validReplacementCandidates []*topodatapb.Tablet @@ -191,7 +193,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve } // Stop replication on all the tablets and build their status map - stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, erp.logger) + stoppedReplicationSnapshot, err = stopReplicationAndBuildStatusMaps(ctx, erp.tmc, ev, tabletMap, topo.RemoteOperationTimeout, opts.IgnoreReplicas, opts.NewPrimaryAlias, opts.durability, opts.WaitAllTablets, erp.logger) if err != nil { return vterrors.Wrapf(err, "failed to stop replication and build status maps: %v", err) } @@ -298,14 +300,13 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve if err != nil { return err } - - ev.NewPrimary = proto.Clone(newPrimary).(*topodatapb.Tablet) + ev.NewPrimary = newPrimary.CloneVT() return err } func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( ctx context.Context, - validCandidates map[string]mysql.Position, + validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo, statusMap map[string]*replicationdatapb.StopReplicationStatus, waitReplicasTimeout time.Duration, @@ -371,7 +372,7 @@ func (erp *EmergencyReparenter) waitForAllRelayLogsToApply( // findMostAdvanced finds the intermediate source for ERS. We always choose the most advanced one from our valid candidates list. Further ties are broken by looking at the promotion rules. func (erp *EmergencyReparenter) findMostAdvanced( - validCandidates map[string]mysql.Position, + validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo, opts EmergencyReparentOptions, ) (*topodatapb.Tablet, []*topodatapb.Tablet, error) { diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index fad4dfeb15b..a4cf95700d5 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/logutil" @@ -116,7 +118,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { emergencyReparentOps EmergencyReparentOptions tmc *testutil.TabletManagerClient // setup - ts *topo.Server + cells []string keyspace string shard string unlockTopo bool @@ -161,7 +163,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -170,7 +172,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -179,7 +181,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -240,7 +242,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -278,7 +280,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -287,7 +289,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -296,7 +298,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -380,7 +382,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -423,7 +425,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -432,7 +434,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -441,7 +443,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -501,7 +503,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -552,7 +554,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -561,7 +563,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -620,7 +622,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: false, }, { @@ -632,7 +634,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shards: nil, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "node doesn't exist: keyspaces/testkeyspace/shards/-/Shard", }, @@ -691,7 +693,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "failed to stop replication and build status maps", }, @@ -705,13 +707,13 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error error }{ "zone1-0000000100": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, "zone1-0000000101": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, "zone1-0000000102": { - StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}}, + StopStatus: &replicationdatapb.StopReplicationStatus{Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}}, }, }, }, @@ -750,7 +752,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "lost topology lock, aborting", }, @@ -765,7 +767,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -774,7 +776,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -783,7 +785,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{}, }, }, @@ -824,7 +826,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "encountered tablet zone1-0000000102 with no relay log position", }, @@ -842,7 +844,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, errShouldContain: "no valid candidates for emergency reparent", }, { @@ -859,7 +861,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -868,7 +870,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -877,7 +879,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -937,7 +939,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, errShouldContain: "could not apply all relay logs within the provided waitReplicasTimeout", }, { @@ -954,7 +956,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -963,7 +965,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -972,7 +974,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1026,7 +1028,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "primary elect zone1-0000000200 has errant GTIDs", }, @@ -1039,7 +1041,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, @@ -1076,7 +1078,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1085,7 +1087,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", @@ -1094,7 +1096,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-20", @@ -1189,7 +1191,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1198,7 +1200,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1207,7 +1209,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1273,7 +1275,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "failed to be upgraded to primary", }, @@ -1312,7 +1314,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1321,7 +1323,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1330,7 +1332,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1388,7 +1390,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "no valid candidates for emergency reparent", }, @@ -1432,7 +1434,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1441,7 +1443,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1450,7 +1452,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1508,7 +1510,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 has a must not promotion rule", }, @@ -1547,7 +1549,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1556,7 +1558,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1565,7 +1567,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1635,7 +1637,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1", "zone2"), + cells: []string{"zone1", "zone2"}, shouldErr: true, errShouldContain: "no valid candidates for emergency reparent", }, @@ -1680,7 +1682,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1689,7 +1691,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1698,7 +1700,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1768,7 +1770,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1", "zone2"), + cells: []string{"zone1", "zone2"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 is is a different cell as the previous primary", }, @@ -1812,7 +1814,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1821,7 +1823,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -1830,7 +1832,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -1891,7 +1893,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), + cells: []string{"zone1"}, shouldErr: true, errShouldContain: "proposed primary zone1-0000000102 will not be able to make forward progress on being promoted", }, @@ -1902,7 +1904,9 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := logutil.NewMemoryLogger() ev := &events.Reparent{} @@ -1913,12 +1917,14 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { tt.tablets[i] = tablet } - testutil.AddShards(ctx, t, tt.ts, tt.shards...) - testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) - reparenttestutil.SetKeyspaceDurability(ctx, t, tt.ts, tt.keyspace, tt.durability) + ts := memorytopo.NewServer(ctx, tt.cells...) + defer ts.Close() + testutil.AddShards(ctx, t, ts, tt.shards...) + testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, tt.keyspace, tt.durability) if !tt.unlockTopo { - lctx, unlock, lerr := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, lerr := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -1929,7 +1935,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { ctx = lctx // make the reparentShardLocked call use the lock ctx } - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) err := erp.reparentShardLocked(ctx, ev, tt.keyspace, tt.shard, tt.emergencyReparentOps) if tt.shouldErr { @@ -1952,7 +1958,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newPrimaryTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -2032,20 +2037,19 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -2091,7 +2095,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "primary position error", }, @@ -2141,7 +2144,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "failed to PopulateReparentJournal on primary", }, @@ -2205,7 +2207,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -2274,7 +2275,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), errShouldContain: "context deadline exceeded", }, { @@ -2336,7 +2336,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -2410,20 +2409,19 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, } @@ -2435,7 +2433,8 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() ev := &events.Reparent{ShardInfo: topo.ShardInfo{ Shard: &topodatapb.Shard{ @@ -2449,7 +2448,10 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { ev.ShardInfo.PrimaryAlias = nil } - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -2460,7 +2462,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -2472,7 +2474,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) err := erp.promoteNewPrimary(ctx, ev, tabletInfo.Tablet, tt.emergencyReparentOps, tt.tabletMap, tt.statusMap) if tt.shouldErr { assert.Error(t, err) @@ -2494,7 +2496,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { tests := []struct { name string tmc *testutil.TabletManagerClient - candidates map[string]mysql.Position + candidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo statusMap map[string]*replicationdatapb.StopReplicationStatus shouldErr bool @@ -2511,7 +2513,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2559,7 +2561,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2610,7 +2612,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000102": {}, @@ -2675,7 +2677,7 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { }, }, }, - candidates: map[string]mysql.Position{ + candidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, }, @@ -2768,7 +2770,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -2777,7 +2779,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", @@ -2786,7 +2788,7 @@ func TestEmergencyReparenterCounters(t *testing.T) { }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562", RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-26", @@ -2844,11 +2846,12 @@ func TestEmergencyReparenterCounters(t *testing.T) { } keyspace := "testkeyspace" shard := "-" - ts := memorytopo.NewServer("zone1") - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() + ts := memorytopo.NewServer(ctx, "zone1") testutil.AddShards(ctx, t, ts, shards...) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -2883,40 +2886,40 @@ func TestEmergencyReparenterCounters(t *testing.T) { } func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - mysqlGTID1 := mysql.Mysql56GTID{ + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid1, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) - positionOnly2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionOnly2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionOnly2.GTIDSet = positionOnly2.GTIDSet.AddGTID(mysqlGTID2) - positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionEmpty := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo emergencyReparentOps EmergencyReparentOptions result *topodatapb.Tablet @@ -2924,7 +2927,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }{ { name: "choose most advanced", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionIntermediate2, @@ -2972,7 +2975,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { }, }, { name: "choose most advanced with the best promotion rule", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionMostAdvanced, @@ -3026,7 +3029,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { Cell: "zone1", Uid: 102, }}, - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionMostAdvanced, @@ -3080,7 +3083,7 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { Cell: "zone1", Uid: 102, }}, - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionOnly2, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionEmpty, @@ -3154,7 +3157,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newPrimaryTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -3225,20 +3227,19 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { @@ -3276,7 +3277,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "primary position error", }, @@ -3318,7 +3318,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: "failed to PopulateReparentJournal on primary", }, @@ -3374,7 +3373,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -3434,7 +3432,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { shouldErr: true, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), errShouldContain: "context deadline exceeded", }, { @@ -3487,7 +3484,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, { name: "single replica failing to SetReplicationSource does not fail the promotion", @@ -3530,7 +3526,6 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, }, } @@ -3542,11 +3537,15 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() logger := logutil.NewMemoryLogger() ev := &events.Reparent{} - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -3557,7 +3556,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -3569,7 +3568,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false /* waitForAllReplicas */, true /* populateReparentJournal */) if tt.shouldErr { assert.Error(t, err) @@ -3591,7 +3590,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { tmc *testutil.TabletManagerClient unlockTopo bool newSourceTabletAlias string - ts *topo.Server keyspace string shard string tablets []*topodatapb.Tablet @@ -3664,20 +3662,19 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, result: []*topodatapb.Tablet{ { @@ -3795,7 +3792,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: true, errShouldContain: " replica(s) failed", }, @@ -3849,7 +3845,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, validCandidateTablets: []*topodatapb.Tablet{ { @@ -3938,20 +3933,19 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { statusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { // forceStart = false Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, "zone1-0000000102": { // forceStart = true Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, }, keyspace: "testkeyspace", shard: "-", - ts: memorytopo.NewServer("zone1"), shouldErr: false, result: []*topodatapb.Tablet{ { @@ -3991,11 +3985,15 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() logger := logutil.NewMemoryLogger() ev := &events.Reparent{} - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) @@ -4006,7 +4004,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { lerr error ) - ctx, unlock, lerr = tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + ctx, unlock, lerr = ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, lerr, "could not lock %s/%s for test", tt.keyspace, tt.shard) defer func() { @@ -4018,7 +4016,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { tt.emergencyReparentOps.durability = durability - erp := NewEmergencyReparenter(tt.ts, tt.tmc, logger) + erp := NewEmergencyReparenter(ts, tt.tmc, logger) res, err := erp.promoteIntermediateSource(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.validCandidateTablets, tt.emergencyReparentOps) if tt.shouldErr { assert.Error(t, err) @@ -4289,17 +4287,19 @@ func TestParentContextCancelled(t *testing.T) { statusMap := map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, } keyspace := "testkeyspace" shard := "-" - ts := memorytopo.NewServer("zone1") ctx, cancel := context.WithCancel(context.Background()) defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + logger := logutil.NewMemoryLogger() ev := &events.Reparent{} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index fc0e1c80a06..c178b64bf1d 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -22,21 +22,20 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/logutil" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" - - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" ) // PlannedReparenter performs PlannedReparentShard operations. @@ -198,9 +197,7 @@ func (pr *PlannedReparenter) preflightChecks( if !canEstablishForTablet(opts.durability, newPrimaryTabletInfo.Tablet, tabletsReachable) { return true, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v won't be able to make forward progress on promotion", primaryElectAliasStr) } - - ev.NewPrimary = proto.Clone(newPrimaryTabletInfo.Tablet).(*topodatapb.Tablet) - + ev.NewPrimary = newPrimaryTabletInfo.Tablet.CloneVT() return false, nil } @@ -213,9 +210,9 @@ func (pr *PlannedReparenter) performGracefulPromotion( primaryElect *topodatapb.Tablet, tabletMap map[string]*topo.TabletInfo, opts PlannedReparentOptions, -) (string, error) { +) error { primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias) - ev.OldPrimary = proto.Clone(currentPrimary.Tablet).(*topodatapb.Tablet) + ev.OldPrimary = currentPrimary.Tablet.CloneVT() // Before demoting the old primary, we're going to ensure that replication // is working from the old primary to the primary-elect. If replication is @@ -231,7 +228,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( snapshotPos, err := pr.tmc.PrimaryPosition(snapshotCtx, currentPrimary.Tablet) if err != nil { - return "", vterrors.Wrapf(err, "cannot get replication position on current primary %v; current primary must be healthy to perform PlannedReparent", currentPrimary.AliasString()) + return vterrors.Wrapf(err, "cannot get replication position on current primary %v; current primary must be healthy to perform PlannedReparent", currentPrimary.AliasString()) } // Next, we wait for the primary-elect to catch up to that snapshot point. @@ -246,12 +243,12 @@ func (pr *PlannedReparenter) performGracefulPromotion( defer setSourceCancel() if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect)); err != nil { - return "", vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) + return vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) } // Verify we still have the topology lock before doing the demotion. if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return "", vterrors.Wrap(err, "lost topology lock; aborting") + return vterrors.Wrap(err, "lost topology lock; aborting") } // Next up, demote the current primary and get its replication position. @@ -265,7 +262,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( primaryStatus, err := pr.tmc.DemotePrimary(demoteCtx, currentPrimary.Tablet) if err != nil { - return "", vterrors.Wrapf(err, "failed to DemotePrimary on current primary %v: %v", currentPrimary.AliasString(), err) + return vterrors.Wrapf(err, "failed to DemotePrimary on current primary %v: %v", currentPrimary.AliasString(), err) } // Wait for the primary-elect to catch up to the position we demoted the @@ -298,26 +295,10 @@ func (pr *PlannedReparenter) performGracefulPromotion( finalWaitErr = vterrors.Wrapf(finalWaitErr, "encountered error while performing UndoDemotePrimary(%v): %v", currentPrimary.AliasString(), undoErr) } - return "", finalWaitErr + return finalWaitErr } - // Primary-elect is caught up to the current primary. We can do the - // promotion now. - promoteCtx, promoteCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) - defer promoteCancel() - - rp, err := pr.tmc.PromoteReplica(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0) - if err != nil { - return "", vterrors.Wrapf(err, "primary-elect tablet %v failed to be promoted to primary; please try again", primaryElectAliasStr) - } - - if ctx.Err() == context.DeadlineExceeded { - // PromoteReplica succeeded, but we ran out of time. PRS needs to be - // re-run to complete fully. - return "", vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "PLannedReparent timed out after successfully promoting primary-elect %v; please re-run to fix up the replicas", primaryElectAliasStr) - } - - return rp, nil + return nil } func (pr *PlannedReparenter) performInitialPromotion( @@ -383,7 +364,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( primaryElect *topodatapb.Tablet, tabletMap map[string]*topo.TabletInfo, opts PlannedReparentOptions, -) (string, error) { +) error { primaryElectAliasStr := topoproto.TabletAliasString(primaryElect.Alias) pr.logger.Infof("no clear winner found for current primary term; checking if it's safe to recover by electing %v", primaryElectAliasStr) @@ -391,7 +372,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( type tabletPos struct { alias string tablet *topodatapb.Tablet - pos mysql.Position + pos replication.Position } positions := make(chan tabletPos, len(tabletMap)) @@ -438,7 +419,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( return } - pos, err := mysql.DecodePosition(primaryStatus.Position) + pos, err := replication.DecodePosition(primaryStatus.Position) if err != nil { rec.RecordError(vterrors.Wrapf(err, "cannot decode replication position (%v) for demoted tablet %v", primaryStatus.Position, alias)) @@ -457,7 +438,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( close(positions) if rec.HasErrors() { - return "", vterrors.Wrap(rec.Error(), "failed to demote all tablets") + return vterrors.Wrap(rec.Error(), "failed to demote all tablets") } // Construct a mapping of alias to tablet position. @@ -478,7 +459,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( // if the candidate primary is behind that tablet. tp, ok := tabletPosMap[primaryElectAliasStr] if !ok { - return "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v not found in tablet map", primaryElectAliasStr) + return vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "primary-elect tablet %v not found in tablet map", primaryElectAliasStr) } primaryElectPos := tp.pos @@ -487,7 +468,7 @@ func (pr *PlannedReparenter) performPotentialPromotion( // The primary-elect pos has to be at least as advanced as every tablet // in the shard. if !primaryElectPos.AtLeast(tp.pos) { - return "", vterrors.Errorf( + return vterrors.Errorf( vtrpc.Code_FAILED_PRECONDITION, "tablet %v (position: %v) contains transactions not found in primary-elect %v (position: %v)", tp.alias, tp.pos, primaryElectAliasStr, primaryElectPos, @@ -497,19 +478,9 @@ func (pr *PlannedReparenter) performPotentialPromotion( // Check that we still have the topology lock. if err := topo.CheckShardLocked(ctx, keyspace, shard); err != nil { - return "", vterrors.Wrap(err, "lost topology lock; aborting") - } - - // Promote the candidate primary to type:PRIMARY. - promoteCtx, promoteCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) - defer promoteCancel() - - rp, err := pr.tmc.PromoteReplica(promoteCtx, primaryElect, SemiSyncAckers(opts.durability, primaryElect) > 0) - if err != nil { - return "", vterrors.Wrapf(err, "failed to promote %v to primary", primaryElectAliasStr) + return vterrors.Wrap(err, "lost topology lock; aborting") } - - return rp, nil + return nil } func (pr *PlannedReparenter) reparentShardLocked( @@ -544,6 +515,11 @@ func (pr *PlannedReparenter) reparentShardLocked( return err } + err = pr.verifyAllTabletsReachable(ctx, tabletMap) + if err != nil { + return err + } + // Check invariants that PlannedReparentShard depends on. if isNoop, err := pr.preflightChecks(ctx, ev, keyspace, shard, tabletMap, &opts); err != nil { return err @@ -553,6 +529,11 @@ func (pr *PlannedReparenter) reparentShardLocked( currentPrimary := FindCurrentPrimary(tabletMap, pr.logger) reparentJournalPos := "" + // promoteReplicaRequired is a boolean that is used to store whether we need to call + // `PromoteReplica` when we reparent the tablets. This is required to be done when we are doing + // a potential or a graceful promotion. + // InitialPromotion calls `InitPrimary` and for partial promotion, the tablet is already a primary. + promoteReplicaRequired := false // needsRefresh is used to keep track of whether we need to refresh the state // of the new primary tablet. The only case that we need to reload the state // is when we are initializing the new primary. The reason is that the first @@ -593,15 +574,17 @@ func (pr *PlannedReparenter) reparentShardLocked( // inserted in the new primary's journal, so we can use it below to check // that all the replicas have attached to new primary successfully. switch { - case currentPrimary == nil && ev.ShardInfo.PrimaryAlias == nil: + case currentPrimary == nil && ev.ShardInfo.PrimaryTermStartTime == nil: // Case (1): no primary has been elected ever. Initialize // the primary-elect tablet reparentJournalPos, err = pr.performInitialPromotion(ctx, ev.NewPrimary, opts) needsRefresh = true - case currentPrimary == nil && ev.ShardInfo.PrimaryAlias != nil: + case currentPrimary == nil && ev.ShardInfo.PrimaryTermStartTime != nil: // Case (2): no clear current primary. Try to find a safe promotion // candidate, and promote to it. - reparentJournalPos, err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewPrimary, tabletMap, opts) + err = pr.performPotentialPromotion(ctx, keyspace, shard, ev.NewPrimary, tabletMap, opts) + // We need to call `PromoteReplica` when we reparent the tablets. + promoteReplicaRequired = true case topoproto.TabletAliasEqual(currentPrimary.Alias, opts.NewPrimaryAlias): // Case (3): desired new primary is the current primary. Attempt to fix // up replicas to recover from a previous partial promotion. @@ -609,7 +592,9 @@ func (pr *PlannedReparenter) reparentShardLocked( default: // Case (4): desired primary and current primary differ. Do a graceful // demotion-then-promotion. - reparentJournalPos, err = pr.performGracefulPromotion(ctx, ev, keyspace, shard, currentPrimary, ev.NewPrimary, tabletMap, opts) + err = pr.performGracefulPromotion(ctx, ev, keyspace, shard, currentPrimary, ev.NewPrimary, tabletMap, opts) + // We need to call `PromoteReplica` when we reparent the tablets. + promoteReplicaRequired = true } if err != nil { @@ -620,7 +605,7 @@ func (pr *PlannedReparenter) reparentShardLocked( return vterrors.Wrap(err, "lost topology lock, aborting") } - if err := pr.reparentTablets(ctx, ev, reparentJournalPos, tabletMap, opts); err != nil { + if err := pr.reparentTablets(ctx, ev, reparentJournalPos, promoteReplicaRequired, tabletMap, opts); err != nil { return err } @@ -637,6 +622,7 @@ func (pr *PlannedReparenter) reparentTablets( ctx context.Context, ev *events.Reparent, reparentJournalPosition string, + promoteReplicaRequired bool, tabletMap map[string]*topo.TabletInfo, opts PlannedReparentOptions, ) error { @@ -645,7 +631,7 @@ func (pr *PlannedReparenter) reparentTablets( replCtx, replCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) defer replCancel() - // Go thorugh all the tablets. + // Go through all the tablets. // - New primary: populate the reparent journal. // - Everybody else: reparent to the new primary; wait for the reparent // journal row. @@ -660,7 +646,7 @@ func (pr *PlannedReparenter) reparentTablets( // Point all replicas at the new primary and check that they receive the // reparent journal entry, proving that they are replicating from the new - // primary. We do this concurrently with adding the journal entry (after + // primary. We do this concurrently with adding the journal entry (after // this loop), because if semi-sync is enabled, the update to the journal // table will block until at least one replica is successfully attached to // the new primary. @@ -688,6 +674,20 @@ func (pr *PlannedReparenter) reparentTablets( }(alias, tabletInfo.Tablet) } + // If `PromoteReplica` call is required, we should call it and use the position that it returns. + if promoteReplicaRequired { + // Promote the candidate primary to type:PRIMARY. + primaryPosition, err := pr.tmc.PromoteReplica(replCtx, ev.NewPrimary, SemiSyncAckers(opts.durability, ev.NewPrimary) > 0) + if err != nil { + pr.logger.Warningf("primary %v failed to PromoteReplica; cancelling replica reparent attempts", primaryElectAliasStr) + replCancel() + replicasWg.Wait() + + return vterrors.Wrapf(err, "failed PromoteReplica(primary=%v, ts=%v): %v", primaryElectAliasStr, reparentJournalTimestamp, err) + } + reparentJournalPosition = primaryPosition + } + // Add a reparent journal entry on the new primary. If semi-sync is enabled, // this blocks until at least one replica is reparented (above) and // successfully replicating from the new primary. @@ -715,3 +715,20 @@ func (pr *PlannedReparenter) reparentTablets( return nil } + +// verifyAllTabletsReachable verifies that all the tablets are reachable when running PRS. +func (pr *PlannedReparenter) verifyAllTabletsReachable(ctx context.Context, tabletMap map[string]*topo.TabletInfo) error { + // Create a cancellable context for the entire set of RPCs to verify reachability. + verifyCtx, verifyCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer verifyCancel() + + errorGroup, groupCtx := errgroup.WithContext(verifyCtx) + for _, info := range tabletMap { + tablet := info.Tablet + errorGroup.Go(func() error { + _, err := pr.tmc.PrimaryStatus(groupCtx, tablet) + return err + }) + } + return errorGroup.Wait() +} diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index 5c79caeadb7..270bf97f87e 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -18,6 +18,7 @@ package reparentutil import ( "context" + "errors" "fmt" "strings" "testing" @@ -77,7 +78,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet lockShardBeforeTest bool @@ -91,7 +91,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }{ { name: "success", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -111,6 +110,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -165,7 +176,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, { name: "success - new primary not provided", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ ReplicationStatusResults: map[string]struct { Position *replicationdatapb.Status @@ -221,6 +231,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -279,7 +301,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, { name: "already locked shard", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -299,6 +320,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -358,8 +391,17 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { // fail the preflight checks. Other functions are unit-tested // thoroughly to cover all the cases. name: "reparent fails", - ts: memorytopo.NewServer("zone1"), - tmc: nil, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -399,7 +441,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -408,15 +449,18 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, SkipShardCreation: false, }, tt.tablets...) if tt.lockShardBeforeTest { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for test") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for test") require.NoError(t, err, "could not lock %s/%s for test case", tt.keyspace, tt.shard) defer func() { @@ -427,7 +471,7 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { ctx = lctx } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) ev, err := pr.ReparentShard(ctx, tt.keyspace, tt.shard, tt.opts) if tt.shouldErr { assert.Error(t, err) @@ -515,7 +559,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet @@ -906,7 +949,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -915,6 +957,11 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + defer func() { if tt.expectedEvent != nil { AssertReparentEventsEqualWithMessage(t, tt.expectedEvent, tt.ev, "expected preflightChecks to mutate the passed-in event") @@ -925,7 +972,7 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { } }() - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.opts.durability == nil { durability, err := GetDurabilityPolicy("none") require.NoError(t, err) @@ -950,7 +997,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient unlockTopo bool ctxTimeout time.Duration @@ -963,17 +1009,15 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { tabletMap map[string]*topo.TabletInfo opts PlannedReparentOptions - expectedPos string expectedEvent *events.Reparent shouldErr bool // Optional function to run some additional post-test assertions. Will // be run in the main test body before the common assertions are run, // regardless of the value of tt.shouldErr for that test case. - extraAssertions func(t *testing.T, pos string, err error) + extraAssertions func(t *testing.T, err error) }{ { name: "successful promotion", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -998,15 +1042,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", }, }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000200": { - Result: "successful reparent journal position", - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000200": nil, }, @@ -1033,14 +1068,12 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { Uid: 200, }, }, - tabletMap: map[string]*topo.TabletInfo{}, - opts: PlannedReparentOptions{}, - expectedPos: "successful reparent journal position", - shouldErr: false, + tabletMap: map[string]*topo.TabletInfo{}, + opts: PlannedReparentOptions{}, + shouldErr: false, }, { name: "cannot get snapshot of current primary", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1074,7 +1107,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect fails to catch up to current primary snapshot position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1111,7 +1143,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect times out catching up to current primary snapshot position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1153,7 +1184,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -1191,7 +1221,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "failed to demote current primary", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1236,7 +1265,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect fails to catch up to current primary demotion position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1293,7 +1321,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "primary-elect times out catching up to current primary demotion position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1355,7 +1382,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "demotion succeeds but parent context times out", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1376,20 +1402,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", }, }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - // This being present means that if we don't encounter a - // a case where either WaitForPosition errors, or the parent - // context times out, then we will fail the test, since it - // will cause the overall function under test to return no - // error. - "zone1-0000000200": { - Result: "success!", - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000200": nil, }, @@ -1426,7 +1438,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { }, { name: "rollback fails", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1483,13 +1494,12 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { tabletMap: map[string]*topo.TabletInfo{}, opts: PlannedReparentOptions{}, shouldErr: true, - extraAssertions: func(t *testing.T, pos string, err error) { + extraAssertions: func(t *testing.T, err error) { assert.Contains(t, err.Error(), "UndoDemotePrimary", "expected error to include information about failed demotion rollback") }, }, { name: "rollback succeeds", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -1546,147 +1556,12 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { tabletMap: map[string]*topo.TabletInfo{}, opts: PlannedReparentOptions{}, shouldErr: true, - extraAssertions: func(t *testing.T, pos string, err error) { + extraAssertions: func(t *testing.T, err error) { assert.NotContains(t, err.Error(), "UndoDemotePrimary", "expected error to not include information about failed demotion rollback") }, }, - { - name: "primary-elect fails to promote", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{ - DemotePrimaryResults: map[string]struct { - Status *replicationdatapb.PrimaryStatus - Error error - }{ - "zone1-0000000100": { - Status: &replicationdatapb.PrimaryStatus{ - // value of Position doesn't strictly matter for - // this test case, as long as it matches the inner - // key of the WaitForPositionResults map for the - // primary-elect. - Position: "position1", - }, - Error: nil, - }, - }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000200": { - Error: assert.AnError, - }, - }, - SetReplicationSourceResults: map[string]error{ - "zone1-0000000200": nil, - }, - WaitForPositionResults: map[string]map[string]error{ - "zone1-0000000200": { - "position1": nil, - }, - }, - }, - ev: &events.Reparent{}, - keyspace: "testkeyspace", - shard: "-", - currentPrimary: &topo.TabletInfo{ - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - }, - primaryElect: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 200, - }, - }, - tabletMap: map[string]*topo.TabletInfo{}, - opts: PlannedReparentOptions{}, - shouldErr: true, - }, - { - name: "promotion succeeds but parent context times out", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{ - DemotePrimaryResults: map[string]struct { - Status *replicationdatapb.PrimaryStatus - Error error - }{ - "zone1-0000000100": { - Status: &replicationdatapb.PrimaryStatus{ - // value of Position doesn't strictly matter for - // this test case, as long as it matches the inner - // key of the WaitForPositionResults map for the - // primary-elect. - Position: "position1", - }, - Error: nil, - }, - }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - }, - PromoteReplicaPostDelays: map[string]time.Duration{ - "zone1-0000000200": time.Millisecond * 100, // 10x the parent context timeout - }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000200": { - Error: nil, - }, - }, - SetReplicationSourceResults: map[string]error{ - "zone1-0000000200": nil, - }, - WaitForPositionResults: map[string]map[string]error{ - "zone1-0000000200": { - "position1": nil, - }, - }, - }, - ctxTimeout: time.Millisecond * 10, - ev: &events.Reparent{}, - keyspace: "testkeyspace", - shard: "-", - currentPrimary: &topo.TabletInfo{ - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - }, - primaryElect: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 200, - }, - }, - tabletMap: map[string]*topo.TabletInfo{}, - opts: PlannedReparentOptions{}, - shouldErr: true, - }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -1695,15 +1570,18 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -1714,7 +1592,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ctx = lctx } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.ctxTimeout > 0 { _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) @@ -1727,7 +1605,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { require.NoError(t, err) tt.opts.durability = durability - pos, err := pr.performGracefulPromotion( + err = pr.performGracefulPromotion( ctx, tt.ev, tt.keyspace, @@ -1739,7 +1617,7 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { ) if tt.extraAssertions != nil { - tt.extraAssertions(t, pos, err) + tt.extraAssertions(t, err) } if tt.shouldErr { @@ -1749,7 +1627,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { } assert.NoError(t, err) - assert.Equal(t, tt.expectedPos, pos) }) } } @@ -1759,7 +1636,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient ctxTimeout time.Duration @@ -1773,7 +1649,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }{ { name: "successful promotion", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryResults: map[string]struct { Result string @@ -1799,7 +1674,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, { name: "primary-elect fails to promote", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryResults: map[string]struct { Result string @@ -1823,7 +1697,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, { name: "promotion succeeds but parent context times out", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ InitPrimaryPostDelays: map[string]time.Duration{ "zone1-0000000200": time.Millisecond * 100, // 10x the parent context timeout @@ -1851,7 +1724,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -1860,14 +1732,17 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) if tt.ctxTimeout > 0 { _ctx, cancel := context.WithTimeout(ctx, tt.ctxTimeout) @@ -2056,7 +1931,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient timeout time.Duration unlockTopo bool @@ -2066,12 +1940,10 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { primaryElect *topodatapb.Tablet tabletMap map[string]*topo.TabletInfo - expectedPos string - shouldErr bool + shouldErr bool }{ { name: "success", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2096,15 +1968,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { Error: nil, }, }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000100": { - Result: "reparent journal position", - Error: nil, - }, - }, }, unlockTopo: false, keyspace: "testkeyspace", @@ -2141,12 +2004,10 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, }, }, - expectedPos: "reparent journal position", - shouldErr: false, + shouldErr: false, }, { name: "failed to DemotePrimary on a tablet", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2181,7 +2042,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "timed out during DemotePrimary on a tablet", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryDelays: map[string]time.Duration{ "zone1-0000000100": time.Millisecond * 50, @@ -2222,7 +2082,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "failed to DecodePosition on a tablet's demote position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2259,7 +2118,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "primary-elect not in tablet map", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{}, unlockTopo: false, keyspace: "testkeyspace", @@ -2275,7 +2133,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "primary-elect not most at most advanced position", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2340,7 +2197,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2403,161 +2259,8 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { }, shouldErr: true, }, - { - name: "failed to promote primary-elect", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{ - DemotePrimaryResults: map[string]struct { - Status *replicationdatapb.PrimaryStatus - Error error - }{ - "zone1-0000000100": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - Error: nil, - }, - "zone1-0000000101": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - Error: nil, - }, - "zone1-0000000102": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", - }, - Error: nil, - }, - }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000100": { - Result: "", - Error: assert.AnError, - }, - }, - }, - unlockTopo: false, - keyspace: "testkeyspace", - shard: "-", - primaryElect: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - }, - "zone1-0000000101": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, - }, - }, - "zone1-0000000102": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 102, - }, - }, - }, - }, - shouldErr: true, - }, - { - name: "timed out while promoting primary-elect", - ts: memorytopo.NewServer("zone1"), - tmc: &testutil.TabletManagerClient{ - DemotePrimaryResults: map[string]struct { - Status *replicationdatapb.PrimaryStatus - Error error - }{ - "zone1-0000000100": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - Error: nil, - }, - "zone1-0000000101": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-10", - }, - Error: nil, - }, - "zone1-0000000102": { - Status: &replicationdatapb.PrimaryStatus{ - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", - }, - Error: nil, - }, - }, - PromoteReplicaDelays: map[string]time.Duration{ - "zone1-0000000100": time.Millisecond * 100, - }, - PromoteReplicaResults: map[string]struct { - Result string - Error error - }{ - "zone1-0000000100": { - Result: "reparent journal position", - Error: nil, - }, - }, - }, - timeout: time.Millisecond * 50, - unlockTopo: false, - keyspace: "testkeyspace", - shard: "-", - primaryElect: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - tabletMap: map[string]*topo.TabletInfo{ - "zone1-0000000100": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 100, - }, - }, - }, - "zone1-0000000101": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 101, - }, - }, - }, - "zone1-0000000102": { - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Cell: "zone1", - Uid: 102, - }, - }, - }, - }, - shouldErr: true, - }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -2566,16 +2269,20 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + pr := NewPlannedReparenter(nil, tt.tmc, logger) - testutil.AddShards(ctx, t, tt.ts, &vtctldatapb.Shard{ + testutil.AddShards(ctx, t, ts, &vtctldatapb.Shard{ Keyspace: tt.keyspace, Name: tt.shard, }) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "test lock") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -2595,7 +2302,7 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { durability, err := GetDurabilityPolicy("none") require.NoError(t, err) - rp, err := pr.performPotentialPromotion(ctx, tt.keyspace, tt.shard, tt.primaryElect, tt.tabletMap, PlannedReparentOptions{durability: durability}) + err = pr.performPotentialPromotion(ctx, tt.keyspace, tt.shard, tt.primaryElect, tt.tabletMap, PlannedReparentOptions{durability: durability}) if tt.shouldErr { assert.Error(t, err) @@ -2603,7 +2310,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { } assert.NoError(t, err) - assert.Equal(t, tt.expectedPos, rp) }) } } @@ -2613,7 +2319,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { tests := []struct { name string - ts *topo.Server tmc tmclient.TabletManagerClient tablets []*topodatapb.Tablet unlockTopo bool @@ -2628,7 +2333,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }{ { name: "success: current primary cannot be determined", // "Case (1)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2647,6 +2351,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, // zone1-200 gets promoted }, @@ -2708,7 +2424,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "success: current primary is desired primary", // "Case (2)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -2728,6 +2443,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { SetReadWriteResults: map[string]error{ "zone1-0000000100": nil, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -2783,7 +2510,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "success: graceful promotion", // "Case (3)" - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ DemotePrimaryResults: map[string]struct { Status *replicationdatapb.PrimaryStatus @@ -2797,6 +2523,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -2867,7 +2605,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard not found", - ts: memorytopo.NewServer("zone1"), tmc: nil, tablets: nil, unlockTopo: true, @@ -2882,7 +2619,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard initialization", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -2899,6 +2635,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, // called during reparentTablets to make this tablet a replica of newPrimary }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ // Shard has no current primary in the beginning. @@ -2951,7 +2699,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "shard initialization with no new primary provided", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -2970,15 +2717,29 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error error }{ "zone1-0000000200": { - Error: mysql.ErrNotReplica, + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-2", + }, }, "zone1-0000000100": { - Error: fmt.Errorf("not providing replication status, so that 200 wins"), + Error: mysql.ErrNotReplica, }, }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, // called during reparentTablets to make this tablet a replica of newPrimary }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ // Shard has no current primary in the beginning. @@ -3025,8 +2786,20 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "preflight checks determine PRS is no-op", - ts: memorytopo.NewServer("zone1"), - tmc: nil, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, tablets: []*topodatapb.Tablet{ { Alias: &topodatapb.TabletAlias{ @@ -3073,11 +2846,22 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "promotion step fails", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ SetReadWriteResults: map[string]error{ "zone1-0000000100": assert.AnError, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, }, tablets: []*topodatapb.Tablet{ { @@ -3133,8 +2917,19 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "lost topology lock", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PrimaryPositionResults: map[string]struct { Position string Error error @@ -3204,7 +2999,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, { name: "failed to reparent tablets", - ts: memorytopo.NewServer("zone1"), tmc: &testutil.TabletManagerClient{ PrimaryPositionResults: map[string]struct { Position string @@ -3215,6 +3009,18 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": assert.AnError, }, @@ -3276,7 +3082,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }, } - ctx := context.Background() logger := logutil.NewMemoryLogger() for _, tt := range tests { @@ -3285,16 +3090,19 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ctx := ctx + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() - testutil.AddTablets(ctx, t, tt.ts, &testutil.AddTabletOptions{ + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, ForceSetShardPrimary: true, // Some of our test cases count on having multiple primaries, so let the last one "win". SkipShardCreation: false, }, tt.tablets...) if !tt.unlockTopo { - lctx, unlock, err := tt.ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing") + lctx, unlock, err := ts.LockShard(ctx, tt.keyspace, tt.shard, "locking for testing") require.NoError(t, err, "could not lock %s/%s for testing", tt.keyspace, tt.shard) defer func() { @@ -3311,7 +3119,7 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { }() } - pr := NewPlannedReparenter(tt.ts, tt.tmc, logger) + pr := NewPlannedReparenter(ts, tt.tmc, logger) err := pr.reparentShardLocked(ctx, tt.ev, tt.keyspace, tt.shard, tt.opts) if tt.shouldErr { @@ -3335,10 +3143,12 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { durability string ev *events.Reparent reparentJournalPosition string + promoteReplicaRequired bool tabletMap map[string]*topo.TabletInfo opts PlannedReparentOptions shouldErr bool + wantErr string }{ { name: "success - durability = none", @@ -3473,6 +3283,158 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, }, shouldErr: false, + }, { + name: "success - promote replica required", + durability: "semi_sync", + promoteReplicaRequired: true, + tmc: &testutil.TabletManagerClient{ + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Result: "successful reparent journal position", + Error: nil, + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + SetReplicationSourceSemiSync: map[string]bool{ + "zone1-0000000200": true, + "zone1-0000000201": true, + "zone1-0000000202": false, + }, + }, + ev: &events.Reparent{ + NewPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + }, + shouldErr: false, + }, { + name: "Promote replica failed", + durability: "semi_sync", + promoteReplicaRequired: true, + tmc: &testutil.TabletManagerClient{ + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: errors.New("failed promote replica"), + }, + }, + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000200": nil, + "zone1-0000000201": nil, + "zone1-0000000202": nil, + }, + SetReplicationSourceSemiSync: map[string]bool{ + "zone1-0000000200": true, + "zone1-0000000201": true, + "zone1-0000000202": false, + }, + }, + ev: &events.Reparent{ + NewPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000202": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 202, + }, + Type: topodatapb.TabletType_RDONLY, + }, + }, + }, + shouldErr: true, + wantErr: "failed PromoteReplica(primary=zone1-0000000100,", }, { name: "SetReplicationSource failed on replica", @@ -3534,6 +3496,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, }, shouldErr: true, + wantErr: "retry failed replicas: tablet zone1-0000000201 failed to SetReplicationSource(zone1-0000000100): assert.AnError general error for testing", }, { name: "SetReplicationSource timed out on replica", @@ -3601,6 +3564,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { WaitReplicasTimeout: time.Millisecond * 10, }, shouldErr: true, + wantErr: "retry failed replicas: tablet zone1-0000000201 failed to SetReplicationSource(zone1-0000000100): context deadline exceeded", }, { name: "PopulateReparentJournal failed out on new primary", @@ -3662,6 +3626,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { }, }, shouldErr: true, + wantErr: "failed PopulateReparentJournal(primary=zone1-0000000100", }, { name: "PopulateReparentJournal timed out on new primary", @@ -3729,6 +3694,7 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { WaitReplicasTimeout: time.Millisecond * 10, }, shouldErr: true, + wantErr: "failed PopulateReparentJournal(primary=zone1-0000000100", }, } @@ -3749,10 +3715,12 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { durability, err := GetDurabilityPolicy(durabilityPolicy) require.NoError(t, err) tt.opts.durability = durability - err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.tabletMap, tt.opts) + err = pr.reparentTablets(ctx, tt.ev, tt.reparentJournalPosition, tt.promoteReplicaRequired, tt.tabletMap, tt.opts) if tt.shouldErr { assert.Error(t, err) - + if tt.wantErr != "" { + require.ErrorContains(t, err, tt.wantErr) + } return } @@ -3801,3 +3769,189 @@ func AssertReparentEventsEqual(t *testing.T, expected *events.Reparent, actual * AssertReparentEventsEqualWithMessage(t, expected, actual, "") } + +// TestPlannedReparenter_verifyAllTabletsReachable tests the functionality of verifyAllTabletsReachable. +func TestPlannedReparenter_verifyAllTabletsReachable(t *testing.T) { + tests := []struct { + name string + tmc tmclient.TabletManagerClient + tabletMap map[string]*topo.TabletInfo + remoteOpTime time.Duration + wantErr string + }{ + { + name: "Success", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + }, { + name: "Failure", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Error: fmt.Errorf("primary status failed"), + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + wantErr: "primary status failed", + }, { + name: "Timeout", + tmc: &testutil.TabletManagerClient{ + PrimaryStatusDelays: map[string]time.Duration{ + "zone1-0000000100": 20 * time.Second, + }, + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000201": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + remoteOpTime: 100 * time.Millisecond, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "zone1-0000000200": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "zone1-0000000201": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 201, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + wantErr: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + pr := &PlannedReparenter{ + ts: ts, + tmc: tt.tmc, + } + if tt.remoteOpTime != 0 { + oldTime := topo.RemoteOperationTimeout + topo.RemoteOperationTimeout = tt.remoteOpTime + defer func() { + topo.RemoteOperationTimeout = oldTime + }() + } + err := pr.verifyAllTabletsReachable(context.Background(), tt.tabletMap) + if tt.wantErr == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.wantErr) + }) + } +} diff --git a/go/vt/vtctl/reparentutil/reparent_sorter.go b/go/vt/vtctl/reparentutil/reparent_sorter.go index 77547827d37..e4461b78064 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter.go @@ -19,7 +19,7 @@ package reparentutil import ( "sort" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vterrors" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -30,12 +30,12 @@ import ( // candidate for intermediate promotion in emergency reparent shard, and the new primary in planned reparent shard type reparentSorter struct { tablets []*topodatapb.Tablet - positions []mysql.Position + positions []replication.Position durability Durabler } // newReparentSorter creates a new reparentSorter -func newReparentSorter(tablets []*topodatapb.Tablet, positions []mysql.Position, durability Durabler) *reparentSorter { +func newReparentSorter(tablets []*topodatapb.Tablet, positions []replication.Position, durability Durabler) *reparentSorter { return &reparentSorter{ tablets: tablets, positions: positions, @@ -84,7 +84,7 @@ func (rs *reparentSorter) Less(i, j int) bool { // sortTabletsForReparent sorts the tablets, given their positions for emergency reparent shard and planned reparent shard. // Tablets are sorted first by their replication positions, with ties broken by the promotion rules. -func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []mysql.Position, durability Durabler) error { +func sortTabletsForReparent(tablets []*topodatapb.Tablet, positions []replication.Position, durability Durabler) error { // throw an error internal error in case of unequal number of tablets and positions // fail-safe code prevents panic in sorting in case the lengths are unequal if len(tablets) != len(positions) { diff --git a/go/vt/vtctl/reparentutil/reparent_sorter_test.go b/go/vt/vtctl/reparentutil/reparent_sorter_test.go index 469d9ac2c88..c21c95ad22b 100644 --- a/go/vt/vtctl/reparentutil/reparent_sorter_test.go +++ b/go/vt/vtctl/reparentutil/reparent_sorter_test.go @@ -21,14 +21,15 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // TestReparentSorter tests that the sorting for tablets works correctly func TestReparentSorter(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - sid2 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + sid2 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16} cell1 := "cell1" cell2 := "cell2" tabletReplica1_100 := &topodatapb.Tablet{ @@ -60,64 +61,64 @@ func TestReparentSorter(t *testing.T) { Type: topodatapb.TabletType_RDONLY, } - mysqlGTID1 := mysql.Mysql56GTID{ + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid2, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionEmpty := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionEmpty := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) testcases := []struct { name string tablets []*topodatapb.Tablet - positions []mysql.Position + positions []replication.Position containsErr string sortedTablets []*topodatapb.Tablet }{ { name: "all advanced, sort via promotion rules", tablets: []*topodatapb.Tablet{nil, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced}, + positions: []replication.Position{positionMostAdvanced, positionMostAdvanced, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletRdonly1_102, nil}, }, { name: "ordering by position", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionIntermediate2, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletRdonly1_102, tabletReplica1_100, tabletReplica2_100, tabletReplica1_101}, }, { name: "tablets and positions count error", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced}, containsErr: "unequal number of tablets and positions", }, { name: "promotion rule check", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, - positions: []mysql.Position{positionMostAdvanced, positionMostAdvanced}, + positions: []replication.Position{positionMostAdvanced, positionMostAdvanced}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_101, tabletRdonly1_102}, }, { name: "mixed", tablets: []*topodatapb.Tablet{tabletReplica1_101, tabletReplica2_100, tabletReplica1_100, tabletRdonly1_102}, - positions: []mysql.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1}, + positions: []replication.Position{positionEmpty, positionIntermediate1, positionMostAdvanced, positionIntermediate1}, sortedTablets: []*topodatapb.Tablet{tabletReplica1_100, tabletReplica2_100, tabletRdonly1_102, tabletReplica1_101}, }, } diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index ddc83ad43f4..9b33a5b0536 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -22,7 +22,8 @@ import ( "time" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" @@ -44,13 +45,13 @@ import ( func FindValidEmergencyReparentCandidates( statusMap map[string]*replicationdatapb.StopReplicationStatus, primaryStatusMap map[string]*replicationdatapb.PrimaryStatus, -) (map[string]mysql.Position, error) { - replicationStatusMap := make(map[string]*mysql.ReplicationStatus, len(statusMap)) - positionMap := make(map[string]mysql.Position) +) (map[string]replication.Position, error) { + replicationStatusMap := make(map[string]*replication.ReplicationStatus, len(statusMap)) + positionMap := make(map[string]replication.Position) // Build out replication status list from proto types. for alias, statuspb := range statusMap { - status := mysql.ProtoToReplicationStatus(statuspb.After) + status := replication.ProtoToReplicationStatus(statuspb.After) replicationStatusMap[alias] = &status } @@ -63,7 +64,7 @@ func FindValidEmergencyReparentCandidates( ) for alias, status := range replicationStatusMap { - if _, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet); ok { + if _, ok := status.RelayLogPosition.GTIDSet.(replication.Mysql56GTIDSet); ok { isGTIDBased = true } else { isNonGTIDBased = true @@ -98,14 +99,14 @@ func FindValidEmergencyReparentCandidates( // This condition should really never happen, since we did the same cast // in the earlier loop, but let's be doubly sure. - relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(mysql.Mysql56GTIDSet) + relayLogGTIDSet, ok := status.RelayLogPosition.GTIDSet.(replication.Mysql56GTIDSet) if !ok { return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "we got a filled-in relay log position, but it's not of type Mysql56GTIDSet, even though we've determined we need to use GTID based assesment") } // We need to remove this alias's status from the list, otherwise the // GTID diff will always be empty. - statusList := make([]*mysql.ReplicationStatus, 0, len(replicationStatusMap)-1) + statusList := make([]*replication.ReplicationStatus, 0, len(replicationStatusMap)-1) for a, s := range replicationStatusMap { if a != alias { @@ -126,12 +127,12 @@ func FindValidEmergencyReparentCandidates( continue } - pos := mysql.Position{GTIDSet: relayLogGTIDSet} + pos := replication.Position{GTIDSet: relayLogGTIDSet} positionMap[alias] = pos } for alias, primaryStatus := range primaryStatusMap { - executedPosition, err := mysql.DecodePosition(primaryStatus.Position) + executedPosition, err := replication.DecodePosition(primaryStatus.Position) if err != nil { return nil, vterrors.Wrapf(err, "could not decode a primary status executed position for tablet %v: %v", alias, err) } @@ -150,9 +151,9 @@ func ReplicaWasRunning(stopStatus *replicationdatapb.StopReplicationStatus) (boo return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopStatus) } - replStatus := mysql.ProtoToReplicationStatus(stopStatus.Before) - return (replStatus.IOState == mysql.ReplicationStateRunning) || - (replStatus.SQLState == mysql.ReplicationStateRunning), nil + replStatus := replication.ProtoToReplicationStatus(stopStatus.Before) + return (replStatus.IOState == replication.ReplicationStateRunning) || + (replStatus.SQLState == replication.ReplicationStateRunning), nil } // SQLThreadWasRunning returns true if a StopReplicationStatus indicates that the @@ -163,8 +164,8 @@ func SQLThreadWasRunning(stopStatus *replicationdatapb.StopReplicationStatus) (b return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "could not determine Before state of StopReplicationStatus %v", stopStatus) } - replStatus := mysql.ProtoToReplicationStatus(stopStatus.Before) - return replStatus.SQLState == mysql.ReplicationStateRunning, nil + replStatus := replication.ProtoToReplicationStatus(stopStatus.Before) + return replStatus.SQLState == replication.ReplicationStateRunning, nil } // SetReplicationSource is used to set the replication source on the specified @@ -217,6 +218,7 @@ func stopReplicationAndBuildStatusMaps( ignoredTablets sets.Set[string], tabletToWaitFor *topodatapb.TabletAlias, durability Durabler, + waitForAllTablets bool, logger logutil.Logger, ) (*replicationSnapshot, error) { event.DispatchUpdate(ev, "stop replication on all replicas") @@ -248,8 +250,8 @@ func stopReplicationAndBuildStatusMaps( stopReplicationStatus, err := tmc.StopReplicationAndGetStatus(groupCtx, tabletInfo.Tablet, replicationdatapb.StopReplicationMode_IOTHREADONLY) if err != nil { - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNotReplica { + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { var primaryStatus *replicationdatapb.PrimaryStatus primaryStatus, err = tmc.DemotePrimary(groupCtx, tabletInfo.Tablet) @@ -291,6 +293,12 @@ func stopReplicationAndBuildStatusMaps( } } + // For the tablets that we want to get a response from necessarily, we + // get them to set the MustWaitFor boolean as part of the concurrency.Error message + // that we send to the waitGroup below. + // + // numErrorsToWaitFor corresponds to how many such tablets there are. This is the number + // of special messages with MustWaitFor set that the call errgroup.Wait will wait for. tabletAliasToWaitFor := "" numErrorsToWaitFor := 0 if tabletToWaitFor != nil { @@ -300,6 +308,10 @@ func stopReplicationAndBuildStatusMaps( allTablets = append(allTablets, tabletInfo.Tablet) if !ignoredTablets.Has(alias) { mustWaitFor := tabletAliasToWaitFor == alias + // If this is a tablet that we must wait for + // we increment numErrorsToWaitFor and pass in this to the + // fillStatus function to indicate we must send this with the boolean + // MustWaitFor specified. if mustWaitFor { numErrorsToWaitFor++ } @@ -307,9 +319,18 @@ func stopReplicationAndBuildStatusMaps( } } + numGoRoutines := len(tabletMap) - ignoredTablets.Len() + // In general we want to wait for n-1 tablets to respond, since we know the primary tablet is down. + requiredSuccesses := numGoRoutines - 1 + if waitForAllTablets { + // In the special case, where we are explicitly told to wait for all the tablets to return, + // we set the required success to all the go-routines. + requiredSuccesses = numGoRoutines + } + errgroup := concurrency.ErrorGroup{ - NumGoroutines: len(tabletMap) - ignoredTablets.Len(), - NumRequiredSuccesses: len(tabletMap) - ignoredTablets.Len() - 1, + NumGoroutines: numGoRoutines, + NumRequiredSuccesses: requiredSuccesses, NumAllowedErrors: len(tabletMap), // We set the number of allowed errors to a very high value, because we don't want to exit early // even in case of multiple failures. We rely on the revoke function below to determine if we have more failures than we can tolerate NumErrorsToWaitFor: numErrorsToWaitFor, diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index 5c7adc42ec4..ed7bd152e9c 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sets" @@ -299,6 +301,8 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { stopReplicasTimeout time.Duration ignoredTablets sets.Set[string] tabletToWaitFor *topodatapb.TabletAlias + timeSpent time.Duration + waitForAllTablets bool expectedStatusMap map[string]*replicationdatapb.StopReplicationStatus expectedPrimaryStatusMap map[string]*replicationdatapb.PrimaryStatus expectedTabletsReachable []*topodatapb.Tablet @@ -314,13 +318,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -349,11 +353,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -372,6 +376,159 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, }}, shouldErr: false, + }, { + name: "success with wait for all tablets", + durability: "none", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + ignoredTablets: sets.New[string](), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, + expectedTabletsReachable: []*topodatapb.Tablet{{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }}, + waitForAllTablets: true, + shouldErr: false, + }, { + name: "timing check with wait for all tablets", + durability: "none", + tmc: &stopReplicationAndBuildStatusMapsTestTMClient{ + stopReplicationAndGetStatusResults: map[string]*struct { + StopStatus *replicationdatapb.StopReplicationStatus + Err error + }{ + "zone1-0000000100": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + }, + "zone1-0000000101": { + StopStatus: &replicationdatapb.StopReplicationStatus{ + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + }, + stopReplicationAndGetStatusDelays: map[string]time.Duration{ + // We want `zone1-0000000102` to take a lot of time to respond. + // Simulating a tablet being unreachable. + "zone1-0000000102": time.Hour, + }, + }, + stopReplicasTimeout: 1 * time.Second, + timeSpent: 900 * time.Millisecond, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + }, + }, + }, + ignoredTablets: sets.New[string](), + expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000100": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, + }, + "zone1-0000000101": { + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, + After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, + }, + }, + expectedPrimaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{}, + expectedTabletsReachable: []*topodatapb.Tablet{{ + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, { + Type: topodatapb.TabletType_REPLICA, + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }}, + waitForAllTablets: true, + shouldErr: false, }, { name: "success - 2 rdonly failures", @@ -383,13 +540,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -442,11 +599,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -476,13 +633,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -535,11 +692,11 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -569,13 +726,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -604,7 +761,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string]("zone1-0000000100"), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -643,7 +800,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -672,7 +829,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -717,7 +874,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -746,7 +903,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -826,13 +983,13 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -862,7 +1019,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -889,7 +1046,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -918,7 +1075,7 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, @@ -1031,19 +1188,19 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { }{ "zone1-0000000100": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, }, "zone1-0000000101": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, }, "zone1-0000000102": { StopStatus: &replicationdatapb.StopReplicationStatus{ - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-9"}, }, }, @@ -1085,15 +1242,15 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { ignoredTablets: sets.New[string](), expectedStatusMap: map[string]*replicationdatapb.StopReplicationStatus{ "zone1-0000000100": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429100:1-9"}, }, "zone1-0000000101": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429101:1-9"}, }, "zone1-0000000102": { - Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(mysql.ReplicationStateRunning), SqlState: int32(mysql.ReplicationStateRunning)}, + Before: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-5", IoState: int32(replication.ReplicationStateRunning), SqlState: int32(replication.ReplicationStateRunning)}, After: &replicationdatapb.Status{Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429102:1-9"}, }, }, @@ -1128,7 +1285,12 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { t.Run(tt.name, func(t *testing.T) { durability, err := GetDurabilityPolicy(tt.durability) require.NoError(t, err) - res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, logger) + startTime := time.Now() + res, err := stopReplicationAndBuildStatusMaps(ctx, tt.tmc, &events.Reparent{}, tt.tabletMap, tt.stopReplicasTimeout, tt.ignoredTablets, tt.tabletToWaitFor, durability, tt.waitForAllTablets, logger) + totalTimeSpent := time.Since(startTime) + if tt.timeSpent != 0 { + assert.Greater(t, totalTimeSpent, tt.timeSpent) + } if tt.shouldErr { assert.Error(t, err) return @@ -1158,8 +1320,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "io thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: true, @@ -1169,8 +1331,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "sql thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1180,8 +1342,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "io and sql threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1191,8 +1353,8 @@ func TestReplicaWasRunning(t *testing.T) { name: "no replication threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, @@ -1246,8 +1408,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "io thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, @@ -1257,8 +1419,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "sql thread running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1268,8 +1430,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "io and sql threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateRunning), - SqlState: int32(mysql.ReplicationStateRunning), + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), }, }, expected: true, @@ -1279,8 +1441,8 @@ func TestSQLThreadWasRunning(t *testing.T) { name: "no replication threads running", in: &replicationdatapb.StopReplicationStatus{ Before: &replicationdatapb.Status{ - IoState: int32(mysql.ReplicationStateStopped), - SqlState: int32(mysql.ReplicationStateStopped), + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), }, }, expected: false, diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index f4cebc3dd7d..c3499c7a1a4 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -22,7 +22,10 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "golang.org/x/sync/errgroup" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -66,12 +69,12 @@ func ChooseNewPrimary( } var ( - wg sync.WaitGroup // mutex to secure the next two fields from concurrent access mu sync.Mutex // tablets that are possible candidates to be the new primary and their positions - validTablets []*topodatapb.Tablet - tabletPositions []mysql.Position + validTablets []*topodatapb.Tablet + tabletPositions []replication.Position + errorGroup, groupCtx = errgroup.WithContext(ctx) ) for _, tablet := range tabletMap { @@ -84,22 +87,24 @@ func ChooseNewPrimary( continue } - wg.Add(1) - - go func(tablet *topodatapb.Tablet) { - defer wg.Done() + tb := tablet.Tablet + errorGroup.Go(func() error { // find and store the positions for the tablet - pos, err := findPositionForTablet(ctx, tablet, logger, tmc, waitReplicasTimeout) + pos, err := findPositionForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) mu.Lock() defer mu.Unlock() if err == nil { - validTablets = append(validTablets, tablet) + validTablets = append(validTablets, tb) tabletPositions = append(tabletPositions, pos) } - }(tablet.Tablet) + return err + }) } - wg.Wait() + err := errorGroup.Wait() + if err != nil { + return nil, err + } // return nothing if there are no valid tablets available if len(validTablets) == 0 { @@ -107,7 +112,7 @@ func ChooseNewPrimary( } // sort the tablets for finding the best primary - err := sortTabletsForReparent(validTablets, tabletPositions, durability) + err = sortTabletsForReparent(validTablets, tabletPositions, durability) if err != nil { return nil, err } @@ -117,7 +122,7 @@ func ChooseNewPrimary( // findPositionForTablet processes the replication position for a single tablet and // returns it. It is safe to call from multiple goroutines. -func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (mysql.Position, error) { +func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, error) { logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) ctx, cancel := context.WithTimeout(ctx, waitTimeout) @@ -125,13 +130,13 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge status, err := tmc.ReplicationStatus(ctx, tablet) if err != nil { - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERNotReplica { + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias)) - return mysql.Position{}, nil + return replication.Position{}, nil } logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) - return mysql.Position{}, err + return replication.Position{}, err } // Use the relay log position if available, otherwise use the executed GTID set (binary log position). @@ -139,10 +144,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge if status.RelayLogPosition != "" { positionString = status.RelayLogPosition } - pos, err := mysql.DecodePosition(positionString) + pos, err := replication.DecodePosition(positionString) if err != nil { logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err) - return mysql.Position{}, err + return replication.Position{}, err } return pos, nil @@ -247,9 +252,9 @@ func ShardReplicationStatuses(ctx context.Context, ts *topo.Server, tmc tmclient } // getValidCandidatesAndPositionsAsList converts the valid candidates from a map to a list of tablets, making it easier to sort -func getValidCandidatesAndPositionsAsList(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) ([]*topodatapb.Tablet, []mysql.Position, error) { +func getValidCandidatesAndPositionsAsList(validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo) ([]*topodatapb.Tablet, []replication.Position, error) { var validTablets []*topodatapb.Tablet - var tabletPositions []mysql.Position + var tabletPositions []replication.Position for tabletAlias, position := range validCandidates { tablet, isFound := tabletMap[tabletAlias] if !isFound { @@ -262,8 +267,8 @@ func getValidCandidatesAndPositionsAsList(validCandidates map[string]mysql.Posit } // restrictValidCandidates is used to restrict some candidates from being considered eligible for becoming the intermediate source or the final promotion candidate -func restrictValidCandidates(validCandidates map[string]mysql.Position, tabletMap map[string]*topo.TabletInfo) (map[string]mysql.Position, error) { - restrictedValidCandidates := make(map[string]mysql.Position) +func restrictValidCandidates(validCandidates map[string]replication.Position, tabletMap map[string]*topo.TabletInfo) (map[string]replication.Position, error) { + restrictedValidCandidates := make(map[string]replication.Position) for candidate, position := range validCandidates { candidateInfo, ok := tabletMap[candidate] if !ok { diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index 29f7bb4ab7d..a9e6274d490 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" @@ -559,7 +561,7 @@ func TestFindPositionForTablet(t *testing.T) { return } require.NoError(t, err) - posString := mysql.EncodePosition(pos) + posString := replication.EncodePosition(pos) require.Equal(t, test.expectedPosition, posString) }) } @@ -736,41 +738,41 @@ func TestFindCurrentPrimary(t *testing.T) { } func TestGetValidCandidatesAndPositionsAsList(t *testing.T) { - sid1 := mysql.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - mysqlGTID1 := mysql.Mysql56GTID{ + sid1 := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + mysqlGTID1 := replication.Mysql56GTID{ Server: sid1, Sequence: 9, } - mysqlGTID2 := mysql.Mysql56GTID{ + mysqlGTID2 := replication.Mysql56GTID{ Server: sid1, Sequence: 10, } - mysqlGTID3 := mysql.Mysql56GTID{ + mysqlGTID3 := replication.Mysql56GTID{ Server: sid1, Sequence: 11, } - positionMostAdvanced := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionMostAdvanced := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID1) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID2) positionMostAdvanced.GTIDSet = positionMostAdvanced.GTIDSet.AddGTID(mysqlGTID3) - positionIntermediate1 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate1 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate1.GTIDSet = positionIntermediate1.GTIDSet.AddGTID(mysqlGTID1) - positionIntermediate2 := mysql.Position{GTIDSet: mysql.Mysql56GTIDSet{}} + positionIntermediate2 := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID1) positionIntermediate2.GTIDSet = positionIntermediate2.GTIDSet.AddGTID(mysqlGTID2) tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo tabletRes []*topodatapb.Tablet }{ { name: "test conversion", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": positionMostAdvanced, "zone1-0000000101": positionIntermediate1, "zone1-0000000102": positionIntermediate2, @@ -968,13 +970,13 @@ func TestWaitForCatchUp(t *testing.T) { func TestRestrictValidCandidates(t *testing.T) { tests := []struct { name string - validCandidates map[string]mysql.Position + validCandidates map[string]replication.Position tabletMap map[string]*topo.TabletInfo - result map[string]mysql.Position + result map[string]replication.Position }{ { name: "remove invalid tablets", - validCandidates: map[string]mysql.Position{ + validCandidates: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000102": {}, @@ -1038,7 +1040,7 @@ func TestRestrictValidCandidates(t *testing.T) { }, }, }, - result: map[string]mysql.Position{ + result: map[string]replication.Position{ "zone1-0000000100": {}, "zone1-0000000101": {}, "zone1-0000000104": {}, diff --git a/go/vt/vtctl/schematools/marshal.go b/go/vt/vtctl/schematools/marshal.go new file mode 100644 index 00000000000..0ebf3e65346 --- /dev/null +++ b/go/vt/vtctl/schematools/marshal.go @@ -0,0 +1,158 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/topo/topoproto" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +type tSchemaMigration struct { + *vtctldatapb.SchemaMigration + // Renamed fields + MigrationUuid string + MysqlSchema string + MysqlTable string + AddedTimestamp *vttime.Time + RequestedTimestamp *vttime.Time + ReadyTimestamp *vttime.Time + StartedTimestamp *vttime.Time + CompletedTimestamp *vttime.Time + CleanupTimestamp *vttime.Time + ArtifactRetentionSeconds int64 + LastThrottledTimestamp *vttime.Time + CancelledTimestamp *vttime.Time + ReviewedTimestamp *vttime.Time + ReadyToCompleteTimestamp *vttime.Time + + // Re-typed fields. These must have distinct names or the first-pass + // marshalling will not produce fields/rows for these. + Status_ string `sqltypes:"$$status"` + Tablet_ string `sqltypes:"$$tablet"` + Strategy_ string `sqltypes:"$$strategy"` +} + +func replaceSchemaMigrationFields(result *sqltypes.Result) *sqltypes.Result { + // NOTE: this depends entirely on (1) the ordering of the fields in the + // embedded protobuf message and (2) that MarshalResult walks fields in the + // order they are defined (via reflect.VisibleFields). + // + // That half is stable, as it is part of the VisibleFields API, but if we + // were to remove or reorder fields in the SchemaMigration proto without + // updating this function, this could break. + return sqltypes.ReplaceFields(result, map[string]string{ + "uuid": "migration_uuid", + "schema": "mysql_schema", + "table": "mysql_table", + "added_at": "added_timestamp", + "requested_at": "requested_timestamp", + "ready_at": "ready_timestamp", + "started_at": "started_timestamp", + "completed_at": "completed_timestamp", + "cleaned_up_at": "cleanup_timestamp", + "artifact_retention": "artifact_retention_seconds", + "last_throttled_at": "last_throttled_timestamp", + "cancelled_at": "cancelled_timestamp", + "reviewed_at": "reviewed_timestamp", + "ready_to_complete_at": "ready_to_complete_timestamp", + "$$status": "status", + "$$tablet": "tablet", + "$$strategy": "strategy", + }) +} + +type MarshallableSchemaMigration vtctldatapb.SchemaMigration + +func (t *MarshallableSchemaMigration) MarshalResult() (*sqltypes.Result, error) { + artifactRetention, _, err := protoutil.DurationFromProto(t.ArtifactRetention) + if err != nil { + return nil, err + } + + tmp := tSchemaMigration{ + SchemaMigration: (*vtctldatapb.SchemaMigration)(t), + MigrationUuid: t.Uuid, + MysqlSchema: t.Schema, + MysqlTable: t.Table, + AddedTimestamp: t.AddedAt, + RequestedTimestamp: t.RequestedAt, + ReadyTimestamp: t.ReadyAt, + StartedTimestamp: t.StartedAt, + CompletedTimestamp: t.CompletedAt, + CleanupTimestamp: t.CleanedUpAt, + ArtifactRetentionSeconds: int64(artifactRetention.Seconds()), + LastThrottledTimestamp: t.LastThrottledAt, + CancelledTimestamp: t.CancelledAt, + ReviewedTimestamp: t.ReviewedAt, + ReadyToCompleteTimestamp: t.ReadyToCompleteAt, + Status_: SchemaMigrationStatusName(t.Status), + Tablet_: topoproto.TabletAliasString(t.Tablet), + Strategy_: SchemaMigrationStrategyName(t.Strategy), + } + + res, err := sqltypes.MarshalResult(&tmp) + if err != nil { + return nil, err + } + + return replaceSchemaMigrationFields(res), nil +} + +type MarshallableSchemaMigrations []*vtctldatapb.SchemaMigration + +func (ts MarshallableSchemaMigrations) MarshalResult() (*sqltypes.Result, error) { + s := make([]*tSchemaMigration, len(ts)) + for i, t := range ts { + artifactRetention, _, err := protoutil.DurationFromProto(t.ArtifactRetention) + if err != nil { + return nil, err + } + + tmp := &tSchemaMigration{ + SchemaMigration: (*vtctldatapb.SchemaMigration)(t), + MigrationUuid: t.Uuid, + MysqlSchema: t.Schema, + MysqlTable: t.Table, + AddedTimestamp: t.AddedAt, + RequestedTimestamp: t.RequestedAt, + ReadyTimestamp: t.ReadyAt, + StartedTimestamp: t.StartedAt, + CompletedTimestamp: t.CompletedAt, + CleanupTimestamp: t.CleanedUpAt, + ArtifactRetentionSeconds: int64(artifactRetention.Seconds()), + LastThrottledTimestamp: t.LastThrottledAt, + CancelledTimestamp: t.CancelledAt, + ReviewedTimestamp: t.ReviewedAt, + ReadyToCompleteTimestamp: t.ReadyToCompleteAt, + Status_: SchemaMigrationStatusName(t.Status), + Tablet_: topoproto.TabletAliasString(t.Tablet), + Strategy_: SchemaMigrationStrategyName(t.Strategy), + } + s[i] = tmp + } + + res, err := sqltypes.MarshalResult(s) + if err != nil { + return nil, err + } + + return replaceSchemaMigrationFields(res), nil +} diff --git a/go/vt/vtctl/schematools/marshal_test.go b/go/vt/vtctl/schematools/marshal_test.go new file mode 100644 index 00000000000..6a574af5974 --- /dev/null +++ b/go/vt/vtctl/schematools/marshal_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/sqltypes" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +func TestMarshalResult(t *testing.T) { + t.Parallel() + + now := time.Now() + + sm := &vtctldatapb.SchemaMigration{ + Uuid: "abc", + RequestedAt: protoutil.TimeToProto(now), + Tablet: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Status: vtctldatapb.SchemaMigration_RUNNING, + Table: "t1", + } + + r, err := sqltypes.MarshalResult((*MarshallableSchemaMigration)(sm)) + require.NoError(t, err) + row := r.Named().Rows[0] + + assert.Equal(t, "abc", row.AsString("migration_uuid", "")) + assert.Equal(t, now.Format(sqltypes.TimestampFormat), row.AsString("requested_timestamp", "")) + assert.Equal(t, "zone1-0000000101", row.AsString("tablet", "")) + assert.Equal(t, "running", row.AsString("status", "")) + assert.Equal(t, "t1", row.AsString("mysql_table", "")) + + r, err = sqltypes.MarshalResult(MarshallableSchemaMigrations([]*vtctldatapb.SchemaMigration{sm})) + require.NoError(t, err) + row = r.Named().Rows[0] + + assert.Equal(t, "abc", row.AsString("migration_uuid", "")) + assert.Equal(t, now.Format(sqltypes.TimestampFormat), row.AsString("requested_timestamp", "")) + assert.Equal(t, "zone1-0000000101", row.AsString("tablet", "")) + assert.Equal(t, "running", row.AsString("status", "")) + assert.Equal(t, "t1", row.AsString("mysql_table", "")) +} diff --git a/go/vt/vtctl/schematools/reload_test.go b/go/vt/vtctl/schematools/reload_test.go index 6fbc7f152be..4f00e300d13 100644 --- a/go/vt/vtctl/schematools/reload_test.go +++ b/go/vt/vtctl/schematools/reload_test.go @@ -86,7 +86,6 @@ func (tmc *reloadSchemaTMC) ReloadSchema(ctx context.Context, tablet *topodatapb func TestReloadShard(t *testing.T) { t.Parallel() - ctx := context.Background() tests := []struct { name string @@ -330,7 +329,10 @@ func TestReloadShard(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - ts := memorytopo.NewServer(tt.cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, tt.cells...) + defer ts.Close() testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, }, tt.tablets...) diff --git a/go/vt/vtctl/schematools/schematools.go b/go/vt/vtctl/schematools/schematools.go index 4b8543a394d..059b7ca3db8 100644 --- a/go/vt/vtctl/schematools/schematools.go +++ b/go/vt/vtctl/schematools/schematools.go @@ -18,6 +18,8 @@ package schematools import ( "context" + "fmt" + "strings" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" @@ -25,7 +27,8 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtrpc" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // GetSchema makes an RPC to get the schema from a remote tablet, after @@ -33,7 +36,7 @@ import ( func GetSchema(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, alias *topodatapb.TabletAlias, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { ti, err := ts.GetTablet(ctx, alias) if err != nil { - return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "GetTablet(%v) failed: %v", alias, err) + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "GetTablet(%v) failed: %v", alias, err) } sd, err := tmc.GetSchema(ctx, ti.Tablet, request) @@ -43,3 +46,61 @@ func GetSchema(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerC return sd, nil } + +// ParseSchemaMigrationStrategy parses the given strategy into the underlying enum type. +func ParseSchemaMigrationStrategy(name string) (vtctldatapb.SchemaMigration_Strategy, error) { + if name == "" { + // backward compatiblity and to handle unspecified values + return vtctldatapb.SchemaMigration_DIRECT, nil + + } + + upperName := strings.ToUpper(name) + switch upperName { + case "GH-OST", "PT-OSC": + // more compatibility since the protobuf message names don't + // have the dash. + upperName = strings.ReplaceAll(upperName, "-", "") + default: + } + + strategy, ok := vtctldatapb.SchemaMigration_Strategy_value[upperName] + if !ok { + return 0, fmt.Errorf("unknown schema migration strategy: '%v'", name) + } + + return vtctldatapb.SchemaMigration_Strategy(strategy), nil + +} + +// ParseSchemaMigrationStatus parses the given status into the underlying enum type. +func ParseSchemaMigrationStatus(name string) (vtctldatapb.SchemaMigration_Status, error) { + key := strings.ToUpper(name) + + val, ok := vtctldatapb.SchemaMigration_Status_value[key] + if !ok { + return 0, fmt.Errorf("unknown enum name for SchemaMigration_Status: %s", name) + } + + return vtctldatapb.SchemaMigration_Status(val), nil +} + +// SchemaMigrationStrategyName returns the text-based form of the strategy. +func SchemaMigrationStrategyName(strategy vtctldatapb.SchemaMigration_Strategy) string { + name, ok := vtctldatapb.SchemaMigration_Strategy_name[int32(strategy)] + if !ok { + return "unknown" + } + + switch strategy { + case vtctldatapb.SchemaMigration_GHOST, vtctldatapb.SchemaMigration_PTOSC: + name = strings.Join([]string{name[:2], name[2:]}, "-") + } + + return strings.ToLower(name) +} + +// SchemaMigrationStatusName returns the text-based form of the status. +func SchemaMigrationStatusName(status vtctldatapb.SchemaMigration_Status) string { + return strings.ToLower(vtctldatapb.SchemaMigration_Status_name[int32(status)]) +} diff --git a/go/vt/vtctl/schematools/schematools_test.go b/go/vt/vtctl/schematools/schematools_test.go new file mode 100644 index 00000000000..94909ab52b1 --- /dev/null +++ b/go/vt/vtctl/schematools/schematools_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematools + +import ( + "testing" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + + "github.com/stretchr/testify/assert" +) + +func TestSchemaMigrationStrategyName(t *testing.T) { + t.Parallel() + + tests := []struct { + in vtctldatapb.SchemaMigration_Strategy + out string + }{ + { + in: vtctldatapb.SchemaMigration_ONLINE, + out: "vitess", + }, + { + in: vtctldatapb.SchemaMigration_VITESS, + out: "vitess", + }, + { + in: vtctldatapb.SchemaMigration_GHOST, + out: "gh-ost", + }, + { + in: vtctldatapb.SchemaMigration_PTOSC, + out: "pt-osc", + }, + { + in: vtctldatapb.SchemaMigration_DIRECT, + out: "direct", + }, + { + in: vtctldatapb.SchemaMigration_Strategy(-1), + out: "unknown", + }, + } + + for _, test := range tests { + test := test + t.Run(test.out, func(t *testing.T) { + t.Parallel() + + out := SchemaMigrationStrategyName(test.in) + assert.Equal(t, test.out, out) + }) + } +} diff --git a/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json b/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json new file mode 100644 index 00000000000..aefcfb13ae7 --- /dev/null +++ b/go/vt/vtctl/testdata/unknown-params-logged-dry-run-vschema.json @@ -0,0 +1,18 @@ +{ + "sharded": true, + "vindexes": { + "hash_vdx" : { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + }, + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + } + } +} diff --git a/go/vt/vtctl/testdata/unknown-params-logged-vschema.json b/go/vt/vtctl/testdata/unknown-params-logged-vschema.json new file mode 100644 index 00000000000..d3abc1c0e03 --- /dev/null +++ b/go/vt/vtctl/testdata/unknown-params-logged-vschema.json @@ -0,0 +1,18 @@ +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go index c51fb15a95e..7cd1c7e00ca 100644 --- a/go/vt/vtctl/vdiff2.go +++ b/go/vt/vtctl/vdiff2.go @@ -67,6 +67,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F verbose := subFlags.Bool("verbose", false, "Show verbose vdiff output in summaries") wait := subFlags.Bool("wait", false, "When creating or resuming a vdiff, wait for it to finish before exiting") waitUpdateInterval := subFlags.Duration("wait-update-interval", time.Duration(1*time.Minute), "When waiting on a vdiff to finish, check and display the current status this often") + updateTableStats := subFlags.Bool("update-table-stats", false, "Update the table statistics, using ANALYZE TABLE, on each table involved in the VDiff during initialization. This will ensure that progress estimates are as accurate as possible -- but it does involve locks and can potentially impact query processing on the target keyspace.") if err := subFlags.Parse(args); err != nil { return err @@ -117,6 +118,7 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F SamplePct: *samplePct, TimeoutSeconds: int64(timeout.Seconds()), MaxExtraRowsToCompare: *maxExtraRowsToCompare, + UpdateTableStats: *updateTableStats, }, ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ OnlyPks: *onlyPks, @@ -163,9 +165,6 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F return fmt.Errorf("invalid action '%s'; %s", action, usage) } - type ErrorResponse struct { - Error string - } output, err := wr.VDiff2(ctx, keyspace, workflowName, action, actionArg, vdiffUUID.String(), options) if err != nil { log.Errorf("vdiff2 returning with error: %v", err) diff --git a/go/vt/vtctl/vdiff2_test.go b/go/vt/vtctl/vdiff2_test.go index 368f21eb93b..1348cd06448 100644 --- a/go/vt/vtctl/vdiff2_test.go +++ b/go/vt/vtctl/vdiff2_test.go @@ -35,7 +35,9 @@ var ( ) func TestVDiff2Unsharded(t *testing.T) { - env := newTestVDiffEnv(t, []string{"0"}, []string{"0"}, "", nil) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil) defer env.close() UUID := uuid.New().String() @@ -275,7 +277,9 @@ func TestVDiff2Unsharded(t *testing.T) { } func TestVDiff2Sharded(t *testing.T) { - env := newTestVDiffEnv(t, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{ "-80": "MySQL56/0e45e704-7cb9-11ed-a1eb-0242ac120002:1-890", "80-": "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891", }) @@ -473,7 +477,7 @@ func TestBuildProgressReport(t *testing.T) { t.Run(tt.name, func(t *testing.T) { buildProgressReport(tt.args.summary, tt.args.rowsToCompare) // We always check the percentage - require.Equal(t, tt.want.Percentage, tt.args.summary.Progress.Percentage) + require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage)) // We only check the ETA if there is one if tt.want.ETA != "" { diff --git a/go/vt/vtctl/vdiff_env_test.go b/go/vt/vtctl/vdiff_env_test.go index 5fb854284ae..955d2673d20 100644 --- a/go/vt/vtctl/vdiff_env_test.go +++ b/go/vt/vtctl/vdiff_env_test.go @@ -69,11 +69,11 @@ type testVDiffEnv struct { //---------------------------------------------- // testVDiffEnv -func newTestVDiffEnv(t testing.TB, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { +func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv { env := &testVDiffEnv{ workflow: "vdiffTest", tablets: make(map[int]*testVDiffTablet), - topoServ: memorytopo.NewServer("cell"), + topoServ: memorytopo.NewServer(ctx, "cell"), cell: "cell", tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index bf76279d73c..e9583e7c5b0 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -97,12 +97,16 @@ import ( "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/discovery" hk "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" @@ -120,14 +124,19 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" "vitess.io/vitess/go/vt/wrangler" ) // ErrUnknownCommand is returned for an unknown command. var ErrUnknownCommand = errors.New("unknown command") +const errWorkflowUpdateWithoutChanges = "no updates were provided; use --cells, --tablet-types, or --on-ddl to specify new values" + type command struct { name string method func(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error @@ -387,7 +396,7 @@ var commands = []commandGroup{ { name: "CreateKeyspace", method: commandCreateKeyspace, - params: "[--sharding_column_name=name] [--sharding_column_type=type] [--served_from=tablettype1:ks1,tablettype2:ks2,...] [--force] [--keyspace_type=type] [--base_keyspace=base_keyspace] [--snapshot_time=time] [--durability-policy=policy_name] ", + params: "[--served_from=tablettype1:ks1,tablettype2:ks2,...] [--force] [--keyspace_type=type] [--base_keyspace=base_keyspace] [--snapshot_time=time] [--durability-policy=policy_name] [--sidecar-db-name=db_name] ", help: "Creates the specified keyspace. keyspace_type can be NORMAL or SNAPSHOT. For a SNAPSHOT keyspace you must specify the name of a base_keyspace, and a snapshot_time in UTC, in RFC3339 time format, e.g. 2006-01-02T15:04:05+00:00", }, { @@ -435,7 +444,7 @@ var commands = []commandGroup{ { name: "MoveTables", method: commandMoveTables, - params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--defer-secondary-keys] [--on-ddl=] [--source_shards=] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ", + params: "[--source=] [--tables=] [--cells=] [--tablet_types=] [--all] [--exclude=] [--auto_start] [--stop_after_copy] [--defer-secondary-keys] [--on-ddl=] [--source_shards=] [--source_time_zone=] [--initialize-target-sequences] [--no-routing-rules] 'action must be one of the following: Create, Complete, Cancel, SwitchTraffic, ReverseTrafffic, Show, or Progress' ", help: `Move table(s) to another keyspace, table_specs is a list of tables or the tables section of the vschema for the target keyspace. Example: '{"t1":{"column_vindexes": [{"column": "id1", "name": "hash"}]}, "t2":{"column_vindexes": [{"column": "id2", "name": "hash"}]}}'. In the case of an unsharded target keyspace the vschema for each table may be empty. Example: '{"t1":{}, "t2":{}}'.`, }, { @@ -582,8 +591,8 @@ var commands = []commandGroup{ { name: "ApplySchema", method: commandApplySchema, - params: "[--allow_long_unavailability] [--wait_replicas_timeout=10s] [--ddl_strategy=] [--uuid_list=] [--migration_context=] {--sql= || --sql-file=} ", - help: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. If --allow_long_unavailability is set, schema changes affecting a large number of rows (and possibly incurring a longer period of unavailability) will not be rejected. -ddl_strategy is used to instruct migrations via vreplication, gh-ost or pt-osc with optional parameters. -migration_context allows the user to specify a custom request context for online DDL migrations.", + params: "[--wait_replicas_timeout=10s] [--ddl_strategy=] [--uuid_list=] [--migration_context=] {--sql= || --sql-file=} [--batch-size=] ", + help: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. -ddl_strategy is used to instruct migrations via vreplication, gh-ost or pt-osc with optional parameters. -migration_context allows the user to specify a custom request context for online DDL migrations.", }, { name: "CopySchemaShard", @@ -604,8 +613,19 @@ var commands = []commandGroup{ " \nvtctl OnlineDDL test_keyspace show running" + " \nvtctl OnlineDDL test_keyspace show complete" + " \nvtctl OnlineDDL test_keyspace show failed" + + " \nvtctl OnlineDDL test_keyspace cleanup 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + " \nvtctl OnlineDDL test_keyspace retry 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + - " \nvtctl OnlineDDL test_keyspace cancel 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + " \nvtctl OnlineDDL test_keyspace cancel 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace cancel-all" + + " \nvtctl OnlineDDL test_keyspace launch 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace launch-all" + + " \nvtctl OnlineDDL test_keyspace complete 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace complete-all" + + " \nvtctl OnlineDDL test_keyspace throttle 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace throttle-all" + + " \nvtctl OnlineDDL test_keyspace unthrottle 82fa54ac_e83e_11ea_96b7_f875a4d24e90" + + " \nvtctl OnlineDDL test_keyspace unthrottle-all" + + "", }, { name: "ValidateVersionShard", @@ -686,7 +706,7 @@ var commands = []commandGroup{ { name: "UpdateThrottlerConfig", method: commandUpdateThrottlerConfig, - params: "[--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ", + params: "[--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] [--throttle-app|unthrottle-app=] [--throttle-app-ratio=] [--throttle-app-duration=] [--throttle-app-exempt] ", help: "Update the table throttler configuration for all cells and tablets of a given keyspace", }, { @@ -718,8 +738,8 @@ var commands = []commandGroup{ { name: "Workflow", method: commandWorkflow, - params: " --dry-run", - help: "Start/Stop/Delete/Show/ListAll/Tags Workflow on all target tablets in workflow. Example: Workflow merchant.morders Start", + params: "[--dry-run] [--cells] [--tablet-types] [.] start/stop/update/delete/show/listall/tags []", + help: "Start/Stop/Update/Delete/Show/ListAll/Tags Workflow on all target tablets in workflow. Example: Workflow merchant.morders Start", }, }, }, @@ -780,7 +800,7 @@ func fmtTabletAwkable(ti *topo.TabletInfo) string { mtst := "" // special case for old primary that hasn't updated topo yet if ti.PrimaryTermStartTime != nil && ti.PrimaryTermStartTime.Seconds > 0 { - mtst = logutil.ProtoToTime(ti.PrimaryTermStartTime).Format(time.RFC3339) + mtst = protoutil.TimeFromProto(ti.PrimaryTermStartTime).UTC().Format(time.RFC3339) } return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(ti.Alias), keyspace, shard, topoproto.TabletTypeLString(ti.Type), ti.Addr(), ti.MysqlAddr(), fmtMapAwkable(ti.Tags), mtst) } @@ -1800,6 +1820,7 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") durabilityPolicy := subFlags.String("durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.") + sidecarDBName := subFlags.String("sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.") if err := subFlags.Parse(args); err != nil { return err } @@ -1840,13 +1861,14 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags if timeTime.After(time.Now()) { return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "snapshot_time can not be more than current time") } - snapshotTime = logutil.TimeToProto(timeTime) + snapshotTime = protoutil.TimeToProto(timeTime) } ki := &topodatapb.Keyspace{ KeyspaceType: ktype, BaseKeyspace: *baseKeyspace, SnapshotTime: snapshotTime, DurabilityPolicy: *durabilityPolicy, + SidecarDbName: *sidecarDBName, } if len(servedFrom) > 0 { for name, value := range servedFrom { @@ -2022,11 +2044,11 @@ func commandValidateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlag } func commandReshard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.ReshardWorkflow) } func commandMoveTables(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.MoveTablesWorkflow) } // VReplicationWorkflowAction defines subcommands passed to vtctl for movetables or reshard @@ -2044,7 +2066,7 @@ const ( ) func commandMigrate(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - return commandVRWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) + return commandVReplicationWorkflow(ctx, wr, subFlags, args, wrangler.MigrateWorkflow) } // getSourceKeyspace expects a keyspace of the form "externalClusterName.keyspaceName" and returns the components @@ -2056,9 +2078,9 @@ func getSourceKeyspace(clusterKeyspace string) (clusterName string, sourceKeyspa return splits[0], splits[1], nil } -// commandVRWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows +// commandVReplicationWorkflow is the common entry point for MoveTables/Reshard/Migrate workflows // FIXME: this function needs a refactor. Also validations for params should to be done per workflow type -func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string, +func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string, workflowType wrangler.VReplicationWorkflowType) error { const defaultWaitTime = time.Duration(30 * time.Second) @@ -2067,7 +2089,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl const defaultMaxReplicationLagAllowed = defaultWaitTime cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.") - tabletTypes := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") + tabletTypesStr := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchTraffic and only reports the actions to be taken. --dry_run is only supported for SwitchTraffic, ReverseTraffic and Complete.") timeout := subFlags.Duration("timeout", defaultWaitTime, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout. --timeout is only supported for SwitchTraffic and ReverseTraffic.") reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication (default true). --reverse_replication is only supported for SwitchTraffic.") @@ -2077,6 +2099,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl stopAfterCopy := subFlags.Bool("stop_after_copy", false, "Streams will be stopped once the copy phase is completed") dropForeignKeys := subFlags.Bool("drop_foreign_keys", false, "If true, tables in the target keyspace will be created without foreign keys.") maxReplicationLagAllowed := subFlags.Duration("max_replication_lag_allowed", defaultMaxReplicationLagAllowed, "Allow traffic to be switched only if vreplication lag is below this (in seconds)") + atomicCopy := subFlags.Bool("atomic-copy", false, "(EXPERIMENTAL) Use this if your source keyspace has tables which use foreign key constraints. All tables from the source will be moved.") onDDL := "IGNORE" subFlags.StringVar(&onDDL, "on-ddl", onDDL, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") @@ -2086,6 +2109,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl allTables := subFlags.Bool("all", false, "MoveTables only. Move all tables from the source keyspace. Either table_specs or --all needs to be specified.") excludes := subFlags.String("exclude", "", "MoveTables only. Tables to exclude (comma-separated) if --all is specified") sourceKeyspace := subFlags.String("source", "", "MoveTables only. Source keyspace") + initializeTargetSequences := subFlags.Bool("initialize-target-sequences", false, "MoveTables only. When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.") // if sourceTimeZone is specified, the target needs to have time zones loaded // note we make an opinionated decision to not allow specifying a different target time zone than UTC. @@ -2093,6 +2117,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl // MoveTables-only params renameTables := subFlags.Bool("rename_tables", false, "MoveTables only. Rename tables instead of dropping them. --rename_tables is only supported for Complete.") + noRoutingRules := subFlags.Bool("no-routing-rules", false, "(Advanced) MoveTables Create only. Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") // MoveTables and Reshard params sourceShards := subFlags.String("source_shards", "", "Source shards") @@ -2135,6 +2160,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl DryRun: *dryRun, AutoStart: *autoStart, StopAfterCopy: *stopAfterCopy, + AtomicCopy: *atomicCopy, } printDetails := func() error { @@ -2144,11 +2170,18 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl return err } s += fmt.Sprintf("The following vreplication streams exist for workflow %s.%s:\n\n", target, workflowName) - for ksShard := range res.ShardStatuses { + + // Sort the results for consistent and intuitive output. + ksShardKeys := make([]string, 0, len(res.ShardStatuses)) + for ksShardKey := range res.ShardStatuses { + ksShardKeys = append(ksShardKeys, ksShardKey) + } + sort.Strings(ksShardKeys) + for _, ksShard := range ksShardKeys { statuses := res.ShardStatuses[ksShard].PrimaryReplicationStatuses for _, st := range statuses { msg := "" - if st.State == "Error" { + if st.State == binlogdatapb.VReplicationWorkflowState_Error.String() { msg += fmt.Sprintf(": %s.", st.Message) } else if st.Pos == "" { msg += ". VStream has not started." @@ -2219,6 +2252,24 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl wr.Logger().Errorf("keyspace %s not found", *sourceKeyspace) return err } + + if *atomicCopy { + var errors []string + if !*allTables { + errors = append(errors, "atomic copy requires --all.") + } + if *tables != "" { + errors = append(errors, "atomic copy does not support specifying tables.") + } + if *excludes != "" { + errors = append(errors, "atomic copy does not support specifying excludes.") + } + if len(errors) > 0 { + errors = append(errors, "Found options incompatible with atomic copy:") + return fmt.Errorf(strings.Join(errors, " ")) + } + } + if !*allTables && *tables == "" { return fmt.Errorf("no tables specified to move") } @@ -2230,6 +2281,7 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.ExternalCluster = externalClusterName vrwp.SourceTimeZone = *sourceTimeZone vrwp.DropForeignKeys = *dropForeignKeys + vrwp.NoRoutingRules = *noRoutingRules if *sourceShards != "" { vrwp.SourceShards = strings.Split(*sourceShards, ",") } @@ -2247,11 +2299,11 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.OnDDL = onDDL vrwp.DeferSecondaryKeys = *deferNonPKeys vrwp.Cells = *cells - vrwp.TabletTypes = *tabletTypes + vrwp.TabletTypes = *tabletTypesStr case vReplicationWorkflowActionSwitchTraffic, vReplicationWorkflowActionReverseTraffic: vrwp.Cells = *cells if subFlags.Changed("tablet_types") { - vrwp.TabletTypes = *tabletTypes + vrwp.TabletTypes = *tabletTypesStr } else { // When no tablet types are specified we are supposed to switch all traffic so // we override the normal default for tablet_types. @@ -2260,8 +2312,10 @@ func commandVRWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfl vrwp.Timeout = *timeout vrwp.EnableReverseReplication = *reverseReplication vrwp.MaxAllowedTransactionLagSeconds = int64(math.Ceil(maxReplicationLagAllowed.Seconds())) + vrwp.InitializeTargetSequences = *initializeTargetSequences case vReplicationWorkflowActionCancel: vrwp.KeepData = *keepData + vrwp.KeepRoutingRules = *keepRoutingRules case vReplicationWorkflowActionComplete: switch workflowType { case wrangler.MoveTablesWorkflow: @@ -2461,7 +2515,7 @@ func commandExternalizeVindex(ctx context.Context, wr *wrangler.Wrangler, subFla func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { cells := subFlags.String("cells", "", "Source cells to replicate from.") - tabletTypes := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") + tabletTypesStr := subFlags.String("tablet_types", "", "Source tablet types to replicate from.") if err := subFlags.Parse(args); err != nil { return err } @@ -2473,7 +2527,16 @@ func commandMaterialize(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf return err } ms.Cell = *cells - ms.TabletTypes = *tabletTypes + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(*tabletTypesStr) + if err != nil { + return err + } + tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + ms.TabletTypes = topoproto.MakeStringTypeCSV(tabletTypes) + ms.TabletSelectionPreference = tsp return wr.Materialize(ctx, ms) } @@ -2494,7 +2557,7 @@ func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fl sourceCell := subFlags.String("source_cell", "", "The source cell to compare from; default is any available cell") targetCell := subFlags.String("target_cell", "", "The target cell to compare with; default is any available cell") - tabletTypes := subFlags.String("tablet_types", "in_order:RDONLY,REPLICA,PRIMARY", "Tablet types for source and target") + tabletTypesStr := subFlags.String("tablet_types", "in_order:RDONLY,REPLICA,PRIMARY", "Tablet types for source and target") filteredReplicationWaitTime := subFlags.Duration("filtered_replication_wait_time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for filtered replication to catch up on primary migrations. The migration will be cancelled on a timeout.") maxRows := subFlags.Int64("limit", math.MaxInt64, "Max rows to stop comparing after") debugQuery := subFlags.Bool("debug_query", false, "Adds a mysql query to the report that can be used for further debugging") @@ -2524,7 +2587,7 @@ func commandVDiff(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fl } }() - _, err = wr.VDiff(ctx, keyspace, workflow, *sourceCell, *targetCell, *tabletTypes, *filteredReplicationWaitTime, *format, + _, err = wr.VDiff(ctx, keyspace, workflow, *sourceCell, *targetCell, *tabletTypesStr, *filteredReplicationWaitTime, *format, *maxRows, *tables, *debugQuery, *onlyPks, *maxExtraRowsToCompare) if err != nil { log.Errorf("vdiff returning with error: %v", err) @@ -2837,15 +2900,16 @@ func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, s } func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - allowLongUnavailability := subFlags.Bool("allow_long_unavailability", false, "Allow large schema changes which incur a longer unavailability of the database.") + subFlags.MarkDeprecated("allow_long_unavailability", "") sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") ddlStrategy := subFlags.String("ddl_strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") uuidList := subFlags.String("uuid_list", "", "Optional: comma delimited explicit UUIDs for migration. If given, must match number of DDL changes") migrationContext := subFlags.String("migration_context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess") requestContext := subFlags.String("request_context", "", "synonym for --migration_context") - waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", wrangler.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") + waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") skipPreflight := subFlags.Bool("skip_preflight", false, "Deprecated. Always assumed to be 'true'") + batchSize := subFlags.Int64("batch_size", 0, "How many queries to batch together") callerID := subFlags.String("caller_id", "", "This is the effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used)") if err := subFlags.Parse(args); err != nil { @@ -2884,15 +2948,15 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf log.Info("Calling ApplySchema on VtctldServer") resp, err := wr.VtctldServer().ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ - Keyspace: keyspace, - AllowLongUnavailability: *allowLongUnavailability, - DdlStrategy: *ddlStrategy, - Sql: parts, - SkipPreflight: true, - UuidList: textutil.SplitDelimitedList(*uuidList), - MigrationContext: *migrationContext, - WaitReplicasTimeout: protoutil.DurationToProto(*waitReplicasTimeout), - CallerId: cID, + Keyspace: keyspace, + DdlStrategy: *ddlStrategy, + Sql: parts, + SkipPreflight: true, + UuidList: textutil.SplitDelimitedList(*uuidList), + MigrationContext: *migrationContext, + WaitReplicasTimeout: protoutil.DurationToProto(*waitReplicasTimeout), + CallerId: cID, + BatchSize: *batchSize, }) if err != nil { @@ -2907,6 +2971,34 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf return nil } +func generateOnlineDDLQuery(command string, arg string, allSupported bool) (string, error) { + // Accept inputs like so: + // "launch", "all" + // "launch", + // "launch-all", + if tokens := strings.Split(command, "-"); len(tokens) == 2 && tokens[1] == "all" { + // command is e.g. "launch-all" + if arg != "" { + return "", fmt.Errorf("UUID not allowed in '%s' command", command) + } + // transform "launch-all" into "launch", "all" + command = tokens[0] + arg = "all" + } + switch arg { + case "": + return "", fmt.Errorf("UUID|all required") + case "all": + if !allSupported { + return "", fmt.Errorf("'all' not supported for '%s' command", command) + } + return fmt.Sprintf(`alter vitess_migration %s all`, command), nil + default: + query := `alter vitess_migration %a ` + command + return sqlparser.ParseAndBind(query, sqltypes.StringBindVariable(arg)) + } +} + func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { json := subFlags.Bool("json", false, "Output JSON instead of human-readable table") orderBy := subFlags.String("order", "ascending", "Sort the results by `id` property of the Schema migration (default is ascending. Allowed values are `ascending` or `descending`.") @@ -2930,7 +3022,7 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla applySchemaQuery := "" executeFetchQuery := "" - var bindErr error + var err error switch command { case "show": condition := "" @@ -2946,12 +3038,12 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla string(schema.OnlineDDLStatusRunning), string(schema.OnlineDDLStatusComplete), string(schema.OnlineDDLStatusFailed): - condition, bindErr = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_status=%a", sqltypes.StringBindVariable(arg)) default: if schema.IsOnlineDDLUUID(arg) { - condition, bindErr = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_uuid=%a", sqltypes.StringBindVariable(arg)) } else { - condition, bindErr = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(arg)) + condition, err = sqlparser.ParseAndBind("migration_context=%a", sqltypes.StringBindVariable(arg)) } } order := " order by `id` " @@ -2970,31 +3062,29 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla executeFetchQuery = fmt.Sprintf(`select * from _vt.schema_migrations where %s %s %s`, condition, order, skipLimit) - case "retry": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a retry`, sqltypes.StringBindVariable(arg)) - case "complete": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a complete`, sqltypes.StringBindVariable(arg)) - case "cancel": - if arg == "" { - return fmt.Errorf("UUID required") - } - applySchemaQuery, bindErr = sqlparser.ParseAndBind(`alter vitess_migration %a cancel`, sqltypes.StringBindVariable(arg)) - case "cancel-all": - if arg != "" { - return fmt.Errorf("UUID not allowed in %s", command) - } - applySchemaQuery = `alter vitess_migration cancel all` + case + "retry", + "cleanup": + // Do not support 'ALL' argument + applySchemaQuery, err = generateOnlineDDLQuery(command, arg, false) + case + "launch", + "launch-all", + "complete", + "complete-all", + "cancel", + "cancel-all", + "throttle", + "throttle-all", + "unthrottle", + "unthrottle-all": + // Support 'ALL' argument + applySchemaQuery, err = generateOnlineDDLQuery(command, arg, true) default: return fmt.Errorf("Unknown OnlineDDL command: %s", command) } - if bindErr != nil { - return fmt.Errorf("Error generating OnlineDDL query: %+v", bindErr) + if err != nil { + return fmt.Errorf("Error generating OnlineDDL query: %+v", err) } if applySchemaQuery != "" { @@ -3004,7 +3094,7 @@ func commandOnlineDDL(ctx context.Context, wr *wrangler.Wrangler, subFlags *pfla Keyspace: keyspace, Sql: []string{applySchemaQuery}, SkipPreflight: true, - WaitReplicasTimeout: protoutil.DurationToProto(wrangler.DefaultWaitReplicasTimeout), + WaitReplicasTimeout: protoutil.DurationToProto(grpcvtctldserver.DefaultWaitReplicasTimeout), }) if err != nil { return err @@ -3049,7 +3139,7 @@ func commandCopySchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags includeViews := subFlags.Bool("include-views", true, "Includes views in the output") skipVerify := subFlags.Bool("skip-verify", false, "Skip verification of source and target schema after copy") // for backwards compatibility - waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", wrangler.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") + waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "The amount of time to wait for replicas to receive the schema change via replication.") if err := subFlags.Parse(args); err != nil { return err } @@ -3322,6 +3412,27 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p wr.Logger().Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", b) } + // Validate the VSchema. + ksVs, err := vindexes.BuildKeyspace(vs) + if err != nil { + return err + } + + // Log unknown Vindex params as warnings. + var vdxNames []string + for name := range ksVs.Vindexes { + vdxNames = append(vdxNames, name) + } + sort.Strings(vdxNames) + for _, name := range vdxNames { + vdx := ksVs.Vindexes[name] + if val, ok := vdx.(vindexes.ParamValidating); ok { + for _, param := range val.UnknownParams() { + wr.Logger().Warningf("Unknown param in vindex %s: %s", name, param) + } + } + } + if *dryRun { wr.Logger().Printf("Dry run: Skipping update of VSchema\n") return nil @@ -3334,6 +3445,10 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p return err } + if _, err := vindexes.BuildKeyspace(vs); err != nil { + return err + } + if err := wr.TopoServer().SaveVSchema(ctx, keyspace, vs); err != nil { return err } @@ -3483,7 +3598,11 @@ func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, su customQuery := subFlags.String("custom-query", "", "custom throttler check query") checkAsCheckSelf := subFlags.Bool("check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called") checkAsCheckShard := subFlags.Bool("check-as-check-shard", false, "use standard behavior for /throttler/check requests") - + unthrottledApp := subFlags.String("unthrottle-app", "", "an app name to unthrottle") + throttledApp := subFlags.String("throttle-app", "", "an app name to throttle") + throttledAppRatio := subFlags.Float64("throttle-app-ratio", throttle.DefaultThrottleRatio, "ratio to throttle app (app specififed in --throttled-app)") + throttledAppDuration := subFlags.Duration("throttle-app-duration", throttle.DefaultAppThrottleDuration, "duration after which throttled app rule expires (app specified in --throttled-app)") + throttledAppExempt := subFlags.Bool("throttle-app-exempt", false, "exempt this app from being at all throttled. WARNING: use with extreme care, as this is likely to push metrics beyond the throttler's threshold, and starve other apps (app specified in --throttled-app)") if err := subFlags.Parse(args); err != nil { return err } @@ -3498,44 +3617,46 @@ func commandUpdateThrottlerConfig(ctx context.Context, wr *wrangler.Wrangler, su return fmt.Errorf("--check-as-check-self and --check-as-check-shard are mutually exclusive") } + if *throttledApp != "" && *unthrottledApp != "" { + return fmt.Errorf("--throttle-app and --unthrottle-app are mutually exclusive") + } + if subFlags.Changed("throttle-app-ratio") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-ratio requires --throttle-app") + } + if subFlags.Changed("throttle-app-duration") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-duration requires --throttle-app") + } + if subFlags.Changed("throttle-app-exempt") && *throttledApp == "" { + return fmt.Errorf("--throttle-app-exempt requires --throttle-app") + } + keyspace := subFlags.Arg(0) - update := func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { - if throttlerConfig == nil { - throttlerConfig = &topodatapb.ThrottlerConfig{} - } - if customQuerySet { - // custom query provided - throttlerConfig.CustomQuery = *customQuery - throttlerConfig.Threshold = *threshold // allowed to be zero/negative because who knows what kind of custom query this is - } else { - // no custom query, throttler works by querying replication lag. We only allow positive values - if *threshold > 0 { - throttlerConfig.Threshold = *threshold - } + req := &vtctldatapb.UpdateThrottlerConfigRequest{ + Keyspace: keyspace, + Enable: *enable, + Disable: *disable, + CustomQuery: *customQuery, + CustomQuerySet: customQuerySet, + Threshold: *threshold, + CheckAsCheckSelf: *checkAsCheckSelf, + CheckAsCheckShard: *checkAsCheckShard, + } + if *throttledApp != "" { + req.ThrottledApp = &topodatapb.ThrottledAppRule{ + Name: *throttledApp, + Ratio: *throttledAppRatio, + Exempt: *throttledAppExempt, + ExpiresAt: protoutil.TimeToProto(time.Now().Add(*throttledAppDuration)), } - if *enable { - throttlerConfig.Enabled = true + } else if *unthrottledApp != "" { + req.ThrottledApp = &topodatapb.ThrottledAppRule{ + Name: *unthrottledApp, + Ratio: 0, + ExpiresAt: protoutil.TimeToProto(time.Now()), } - if *disable { - throttlerConfig.Enabled = false - } - if *checkAsCheckSelf { - throttlerConfig.CheckAsCheckSelf = true - } - if *checkAsCheckShard { - throttlerConfig.CheckAsCheckSelf = false - } - return throttlerConfig - } - - ctx, unlock, lockErr := wr.TopoServer().LockKeyspace(ctx, keyspace, "UpdateThrottlerConfig") - if lockErr != nil { - return lockErr } - defer unlock(&err) - - _, err = wr.TopoServer().UpdateSrvKeyspaceThrottlerConfig(ctx, keyspace, []string{}, update) + _, err = wr.VtctldServer().UpdateThrottlerConfig(ctx, req) return err } @@ -3606,19 +3727,19 @@ func commandHelp(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fla } func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - dryRun := subFlags.Bool("dry_run", false, "Does a dry run of Workflow and only reports the final query and list of tablets on which the operation will be applied") + usage := "usage: Workflow [--dry-run] [--cells] [--tablet-types] [.] start/stop/update/delete/show/listall/tags []" + dryRun := subFlags.Bool("dry-run", false, "Does a dry run of the Workflow action and reports the query and list of tablets on which the operation will be applied") + cells := subFlags.StringSlice("cells", []string{}, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from. (Update only)") + tabletTypesStrs := subFlags.StringSlice("tablet-types", []string{}, "New source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). (Update only)") + onDDL := subFlags.String("on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE. (Update only)") if err := subFlags.Parse(args); err != nil { return err } if subFlags.NArg() < 2 { - return fmt.Errorf("usage: Workflow --dry-run keyspace[.workflow] start/stop/delete/show/listall/tags []") + return fmt.Errorf(usage) } keyspace := subFlags.Arg(0) action := strings.ToLower(subFlags.Arg(1)) - // Note: List is deprecated and replaced by show. - if action == "list" { - action = "show" - } var workflow string var err error if action != "listall" { @@ -3647,13 +3768,68 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag } } else { if subFlags.NArg() != 2 { - return fmt.Errorf("usage: Workflow --dry-run keyspace[.workflow] start/stop/delete/show/listall") + return fmt.Errorf(usage) + } + var rpcReq any = nil + if action == "update" { + changes := false + // We need to implicitly distinguish between an empty value (which is valid) + // and no value having been provided. We will use NULL for this purpose. + if subFlags.Lookup("cells").Changed { // Validate the provided value(s) + changes = true + for i, cell := range *cells { // Which only means trimming whitespace + (*cells)[i] = strings.TrimSpace(cell) + } + } else { + cells = &textutil.SimulatedNullStringSlice + } + tabletTypes := make([]topodatapb.TabletType, len(*tabletTypesStrs)) + inorder := false + if subFlags.Lookup("tablet-types").Changed { // Validate the provided value(s) + changes = true + if len(*tabletTypesStrs) > 0 && strings.HasPrefix((*tabletTypesStrs)[0], discovery.InOrderHint) { + (*tabletTypesStrs)[0] = strings.TrimPrefix((*tabletTypesStrs)[0], discovery.InOrderHint) + inorder = true + } + for i, tabletType := range *tabletTypesStrs { + tabletTypes[i], err = topoproto.ParseTabletType(tabletType) + if err != nil { + return err + } + } + } else { + tabletTypes = []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)} + } + onddl := int32(textutil.SimulatedNullInt) // To signify no value has been provided + if subFlags.Lookup("on-ddl").Changed { // Validate the provided value + changes = true + ival, valid := binlogdatapb.OnDDLAction_value[strings.ToUpper(*onDDL)] + if !valid { + return fmt.Errorf("invalid on-ddl action: %s", *onDDL) + } + onddl = ival + } + if !changes { + return fmt.Errorf(errWorkflowUpdateWithoutChanges) + } + tsp := tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN + if inorder { + tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + rpcReq = &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: *cells, + TabletTypes: tabletTypes, + TabletSelectionPreference: tsp, + OnDdl: binlogdatapb.OnDDLAction(onddl), + } } - results, err = wr.WorkflowAction(ctx, workflow, keyspace, action, *dryRun) + results, err = wr.WorkflowAction(ctx, workflow, keyspace, action, *dryRun, rpcReq) // Only update currently uses the new RPC path if err != nil { return err } - if action == "show" || action == "listall" { + if action == "show" || action == "listall" || (action == "update" && *dryRun) { + // No final results left to print. return nil } } @@ -3920,8 +4096,10 @@ func PrintAllCommands(logger logutil.Logger) { func queryResultForTabletResults(results map[string]*sqltypes.Result) *sqltypes.Result { var qr = &sqltypes.Result{} defaultFields := []*querypb.Field{{ - Name: "Tablet", - Type: sqltypes.VarBinary, + Name: "Tablet", + Type: sqltypes.VarBinary, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }} var row2 []sqltypes.Value for tabletAlias, result := range results { diff --git a/go/vt/vtctl/vtctl_env_test.go b/go/vt/vtctl/vtctl_env_test.go index 570088b9d13..e502fbdf86a 100644 --- a/go/vt/vtctl/vtctl_env_test.go +++ b/go/vt/vtctl/vtctl_env_test.go @@ -68,12 +68,12 @@ func init() { //---------------------------------------------- // testVTCtlEnv -func newTestVTCtlEnv() *testVTCtlEnv { +func newTestVTCtlEnv(ctx context.Context) *testVTCtlEnv { tabletconntest.SetProtocol("go.vt.vtctl.vtctl_env_test", "VTCtlTest") cellName := "cell1" env := &testVTCtlEnv{ tablets: make(map[int]*testVTCtlTablet), - topoServ: memorytopo.NewServer(cellName), + topoServ: memorytopo.NewServer(ctx, cellName), cell: cellName, tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVTCtlTMClient(), diff --git a/go/vt/vtctl/vtctl_test.go b/go/vt/vtctl/vtctl_test.go index ab2d7786a4b..eb6a5f5941f 100644 --- a/go/vt/vtctl/vtctl_test.go +++ b/go/vt/vtctl/vtctl_test.go @@ -18,12 +18,15 @@ package vtctl import ( "context" + _ "embed" "fmt" + "regexp" "strings" "testing" "time" "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -32,8 +35,112 @@ import ( "vitess.io/vitess/go/vt/wrangler" ) +var ( + //go:embed testdata/unknown-params-logged-vschema.json + unknownParamsLoggedVSchema string + + //go:embed testdata/unknown-params-logged-dry-run-vschema.json + unknownParamsLoggedDryRunVSchema string +) + +// TestApplyVSchema tests the the MoveTables client command +// via the commandVRApplyVSchema() cmd handler. +func TestApplyVSchema(t *testing.T) { + shard := "0" + ks := "ks" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVTCtlEnv(ctx) + defer env.close() + _ = env.addTablet(100, ks, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) + + tests := []struct { + name string + args []string + expectResults func() + want string + }{ + { + name: "EmptyVSchema", + args: []string{"--vschema", "{}", ks}, + want: "New VSchema object:\n{}\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n\n", + }, + { + name: "UnknownParamsLogged", + args: []string{"--vschema", unknownParamsLoggedVSchema, ks}, + want: `/New VSchema object: +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} +If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. + +.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello`, + }, + { + name: "UnknownParamsLoggedWithDryRun", + args: []string{"--vschema", unknownParamsLoggedDryRunVSchema, "--dry-run", ks}, + want: `/New VSchema object: +{ + "sharded": true, + "vindexes": { + "binary_vdx": { + "type": "binary", + "params": { + "hello": "world" + } + }, + "hash_vdx": { + "type": "hash", + "params": { + "foo": "bar", + "hello": "world" + } + } + } +} +If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. + +.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello +Dry run: Skipping update of VSchema`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + subFlags := pflag.NewFlagSet("test", pflag.ContinueOnError) + err := commandApplyVSchema(ctx, env.wr, subFlags, tt.args) + require.NoError(t, err) + if strings.HasPrefix(tt.want, "/") { + require.Regexp(t, regexp.MustCompile(tt.want[1:]), env.cmdlog.String()) + } else { + require.Equal(t, tt.want, env.cmdlog.String()) + } + env.cmdlog.Clear() + env.tmc.clearResults() + }) + } +} + // TestMoveTables tests the the MoveTables client command -// via the commandVRWorkflow() cmd handler. +// via the commandVReplicationWorkflow() cmd handler. // This currently only tests the Progress action (which is // a parent of the Show action) but it can be used to test // other actions as well. @@ -46,8 +153,9 @@ func TestMoveTables(t *testing.T) { wf := "testwf" ksWf := fmt.Sprintf("%s.%s", targetKs, wf) minTableSize := 16384 // a single 16KiB InnoDB page - ctx := context.Background() - env := newTestVTCtlEnv() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestVTCtlEnv(ctx) defer env.close() source := env.addTablet(100, sourceKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) target := env.addTablet(200, targetKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) @@ -243,7 +351,7 @@ func TestMoveTables(t *testing.T) { subFlags := pflag.NewFlagSet("test", pflag.ContinueOnError) expectGlobalResults() tt.expectResults() - err := commandVRWorkflow(ctx, env.wr, subFlags, tt.args, tt.workflowType) + err := commandVReplicationWorkflow(ctx, env.wr, subFlags, tt.args, tt.workflowType) require.NoError(t, err) if strings.HasPrefix(tt.want, "/") { require.Regexp(t, tt.want[1:], env.cmdlog.String()) @@ -255,3 +363,109 @@ func TestMoveTables(t *testing.T) { }) } } + +func TestGenerateOnlineDDLQuery(t *testing.T) { + tcases := []struct { + cmd string + arg string + allSupported bool + expectError bool + expectQuery string + }{ + { + "launch", + "all", + true, + false, + "alter vitess_migration launch all", + }, + { + "launch-all", + "", + true, + false, + "alter vitess_migration launch all", + }, + { + "launch", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' launch", + }, + { + "cancel", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' cancel", + }, + { + "unthrottle", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' unthrottle", + }, + { + "unthrottle", + "", + true, + true, + "", + }, + { + "unthrottle-all", + "all", + true, + true, + "", + }, + { + "unthrottle-all", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + true, + true, + "", + }, + { + "retry", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + false, + false, + "alter vitess_migration '718169cc_1fea_11ee_82b1_0a43f95f28a3' retry", + }, + { + "retry-all", + "718169cc_1fea_11ee_82b1_0a43f95f28a3", + false, + true, + "", + }, + { + "retry-all", + "", + false, + true, + "", + }, + { + "retry", + "all", + false, + true, + "", + }, + } + for _, tcase := range tcases { + t.Run(fmt.Sprintf("%s %s", tcase.cmd, tcase.arg), func(t *testing.T) { + query, err := generateOnlineDDLQuery(tcase.cmd, tcase.arg, tcase.allSupported) + if tcase.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.expectQuery, query) + } + }) + } +} diff --git a/go/vt/vtctl/vtctlclienttest/client.go b/go/vt/vtctl/vtctlclienttest/client.go index df192997cbb..8e77bed8f8a 100644 --- a/go/vt/vtctl/vtctlclienttest/client.go +++ b/go/vt/vtctl/vtctlclienttest/client.go @@ -25,13 +25,13 @@ package vtctlclienttest // zookeeper) won't be drawn into production binaries as well. import ( + "context" "io" "strings" "testing" "time" - "context" - + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -51,8 +51,8 @@ func init() { } // CreateTopoServer returns the test topo server properly configured -func CreateTopoServer(t *testing.T) *topo.Server { - return memorytopo.NewServer("cell1") +func CreateTopoServer(t *testing.T, ctx context.Context) *topo.Server { + return memorytopo.NewServer(ctx, "cell1") } // TestSuite runs the test suite on the given topo server and client @@ -67,7 +67,7 @@ func TestSuite(t *testing.T, ts *topo.Server, client vtctlclient.VtctlClient) { PortMap: map[string]int32{ "vt": 3333, }, - PrimaryTermStartTime: logutil.TimeToProto(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)), + PrimaryTermStartTime: protoutil.TimeToProto(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)), Tags: map[string]string{"tag": "value"}, Keyspace: "test_keyspace", Type: topodatapb.TabletType_PRIMARY, diff --git a/go/vt/vtctl/workflow/log_recorder.go b/go/vt/vtctl/workflow/log_recorder.go new file mode 100644 index 00000000000..c35ef562354 --- /dev/null +++ b/go/vt/vtctl/workflow/log_recorder.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "sort" +) + +// LogRecorder is used to collect logs for a specific purpose. +// Not thread-safe since it is expected to be generated in repeatable sequence +type LogRecorder struct { + logs []string +} + +// NewLogRecorder creates a new instance of LogRecorder +func NewLogRecorder() *LogRecorder { + lr := LogRecorder{} + return &lr +} + +// Log records a new log message +func (lr *LogRecorder) Log(log string) { + lr.logs = append(lr.logs, log) +} + +// Logf records a new log message with interpolation parameters using fmt.Sprintf. +func (lr *LogRecorder) Logf(log string, args ...any) { + lr.logs = append(lr.logs, fmt.Sprintf(log, args...)) +} + +// LogSlice sorts a given slice using natural sort, so that the result is predictable. +// Useful when logging arrays or maps where order of objects can vary +func (lr *LogRecorder) LogSlice(logs []string) { + sort.Strings(logs) + for _, log := range logs { + lr.Log(log) + } +} + +// GetLogs returns all recorded logs in sequence +func (lr *LogRecorder) GetLogs() []string { + return lr.logs +} diff --git a/go/cmd/vtctld/plugin_kubernetestopo.go b/go/vt/vtctl/workflow/log_recorder_test.go similarity index 57% rename from go/cmd/vtctld/plugin_kubernetestopo.go rename to go/vt/vtctl/workflow/log_recorder_test.go index 97612df6ed7..b58d1d42a79 100644 --- a/go/cmd/vtctld/plugin_kubernetestopo.go +++ b/go/vt/vtctl/workflow/log_recorder_test.go @@ -14,10 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main - -// Imports and register the 'kubernetes' topo.Server. +package workflow import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" + "testing" + "time" + + "github.com/magiconair/properties/assert" ) + +func TestLogRecorder(t *testing.T) { + lr := NewLogRecorder() + now := time.August + lr.Log("log 1") + lr.Log("log 2") + lr.Logf("log 3 with params: %s, %v, %d", "param1", now, 3) + lr.LogSlice([]string{"log 4", "log 5"}) + want := []string{"log 1", "log 2", "log 3 with params: param1, August, 3", "log 4", "log 5"} + assert.Equal(t, lr.GetLogs(), want) +} diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go new file mode 100644 index 00000000000..1aa8137612d --- /dev/null +++ b/go/vt/vtctl/workflow/materializer.go @@ -0,0 +1,741 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "strings" + "sync" + "text/template" + "time" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const ( + createDDLAsCopy = "copy" + createDDLAsCopyDropConstraint = "copy:drop_constraint" + createDDLAsCopyDropForeignKeys = "copy:drop_foreign_keys" +) + +type materializer struct { + ctx context.Context + ts *topo.Server + sourceTs *topo.Server + tmc tmclient.TabletManagerClient + + ms *vtctldatapb.MaterializeSettings + targetVSchema *vindexes.KeyspaceSchema + sourceShards []*topo.ShardInfo + targetShards []*topo.ShardInfo + isPartial bool + primaryVindexesDiffer bool +} + +func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, error) { + switch { + case mz.isPartial && mz.ms.AtomicCopy: + return binlogdatapb.VReplicationWorkflowSubType_None, + fmt.Errorf("both atomic copy and partial mode cannot be specified for the same workflow") + case mz.isPartial: + return binlogdatapb.VReplicationWorkflowSubType_Partial, nil + case mz.ms.AtomicCopy: + return binlogdatapb.VReplicationWorkflowSubType_AtomicCopy, nil + default: + return binlogdatapb.VReplicationWorkflowSubType_None, nil + } +} + +func (mz *materializer) prepareMaterializerStreams(req *vtctldatapb.MoveTablesCreateRequest) error { + if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { + return err + } + err := mz.buildMaterializer() + if err != nil { + return err + } + if err := mz.deploySchema(); err != nil { + return err + } + + var workflowSubType binlogdatapb.VReplicationWorkflowSubType + workflowSubType, err = mz.getWorkflowSubType() + if err != nil { + return err + } + + return mz.forAllTargets(func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(mz.ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + + sourceShards := mz.filterSourceShards(target) + blses, err := mz.generateBinlogSources(mz.ctx, target, sourceShards) + if err != nil { + return err + } + _, err = mz.tmc.CreateVReplicationWorkflow(mz.ctx, targetPrimary.Tablet, &tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + Workflow: req.Workflow, + BinlogSource: blses, + Cells: req.Cells, + TabletTypes: req.TabletTypes, + TabletSelectionPreference: req.TabletSelectionPreference, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + WorkflowSubType: workflowSubType, + DeferSecondaryKeys: req.DeferSecondaryKeys, + AutoStart: req.AutoStart, + StopAfterCopy: req.StopAfterCopy, + }) + return err + }) +} + +func (mz *materializer) createMaterializerStreams() error { + if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { + return err + } + err := mz.buildMaterializer() + if err != nil { + return err + } + if mz.isPartial { + if err := createDefaultShardRoutingRules(mz.ctx, mz.ms, mz.ts); err != nil { + return err + } + } + if err := mz.deploySchema(); err != nil { + return err + } + insertMap := make(map[string]string, len(mz.targetShards)) + for _, targetShard := range mz.targetShards { + sourceShards := mz.filterSourceShards(targetShard) + inserts, err := mz.generateInserts(mz.ctx, sourceShards) + if err != nil { + return err + } + insertMap[key.KeyRangeString(targetShard.KeyRange)] = inserts + } + if err := mz.createStreams(mz.ctx, insertMap); err != nil { + return err + } + return nil +} + +func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*topo.ShardInfo) (string, error) { + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "{{.dbname}}") + + for _, sourceShard := range sourceShards { + bls := &binlogdatapb.BinlogSource{ + Keyspace: mz.ms.SourceKeyspace, + Shard: sourceShard.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, + SourceTimeZone: mz.ms.SourceTimeZone, + TargetTimeZone: mz.ms.TargetTimeZone, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), + } + for _, ts := range mz.ms.TableSettings { + rule := &binlogdatapb.Rule{ + Match: ts.TargetTable, + } + + if ts.SourceExpression == "" { + bls.Filter.Rules = append(bls.Filter.Rules, rule) + continue + } + + // Validate non-empty query. + stmt, err := sqlparser.Parse(ts.SourceExpression) + if err != nil { + return "", err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return "", fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) + } + filter := ts.SourceExpression + if mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { + cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) + if err != nil { + return "", err + } + mappedCols := make([]*sqlparser.ColName, 0, len(cv.Columns)) + for _, col := range cv.Columns { + colName, err := matchColInSelect(col, sel) + if err != nil { + return "", err + } + mappedCols = append(mappedCols, colName) + } + subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + for _, mappedCol := range mappedCols { + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + } + vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")}) + inKeyRange := &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("in_keyrange"), + Exprs: subExprs, + } + if sel.Where != nil { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: &sqlparser.AndExpr{ + Left: inKeyRange, + Right: sel.Where.Expr, + }, + } + } else { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: inKeyRange, + } + } + + filter = sqlparser.String(sel) + } + + rule.Filter = filter + + bls.Filter.Rules = append(bls.Filter.Rules, rule) + } + workflowSubType := binlogdatapb.VReplicationWorkflowSubType_None + if mz.isPartial { + workflowSubType = binlogdatapb.VReplicationWorkflowSubType_Partial + } + var workflowType binlogdatapb.VReplicationWorkflowType + switch mz.ms.MaterializationIntent { + case vtctldatapb.MaterializationIntent_CUSTOM: + workflowType = binlogdatapb.VReplicationWorkflowType_Materialize + case vtctldatapb.MaterializationIntent_MOVETABLES: + workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + ig.AddRow(mz.ms.Workflow, bls, "", mz.ms.Cell, mz.ms.TabletTypes, + workflowType, + workflowSubType, mz.ms.DeferSecondaryKeys) + } + return ig.String(), nil +} + +func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard *topo.ShardInfo, sourceShards []*topo.ShardInfo) ([]*binlogdatapb.BinlogSource, error) { + blses := make([]*binlogdatapb.BinlogSource, 0, len(mz.sourceShards)) + for _, sourceShard := range sourceShards { + bls := &binlogdatapb.BinlogSource{ + Keyspace: mz.ms.SourceKeyspace, + Shard: sourceShard.ShardName(), + Filter: &binlogdatapb.Filter{}, + StopAfterCopy: mz.ms.StopAfterCopy, + ExternalCluster: mz.ms.ExternalCluster, + SourceTimeZone: mz.ms.SourceTimeZone, + TargetTimeZone: mz.ms.TargetTimeZone, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), + } + for _, ts := range mz.ms.TableSettings { + rule := &binlogdatapb.Rule{ + Match: ts.TargetTable, + } + + if ts.SourceExpression == "" { + bls.Filter.Rules = append(bls.Filter.Rules, rule) + continue + } + + // Validate non-empty query. + stmt, err := sqlparser.Parse(ts.SourceExpression) + if err != nil { + return nil, err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) + } + filter := ts.SourceExpression + if mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { + cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) + if err != nil { + return nil, err + } + mappedCols := make([]*sqlparser.ColName, 0, len(cv.Columns)) + for _, col := range cv.Columns { + colName, err := matchColInSelect(col, sel) + if err != nil { + return nil, err + } + mappedCols = append(mappedCols, colName) + } + subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + for _, mappedCol := range mappedCols { + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + } + vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) + subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(key.KeyRangeString(targetShard.KeyRange))}) + inKeyRange := &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("in_keyrange"), + Exprs: subExprs, + } + if sel.Where != nil { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: &sqlparser.AndExpr{ + Left: inKeyRange, + Right: sel.Where.Expr, + }, + } + } else { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: inKeyRange, + } + } + + filter = sqlparser.String(sel) + } + + rule.Filter = filter + bls.Filter.Rules = append(bls.Filter.Rules, rule) + } + blses = append(blses, bls) + } + return blses, nil +} + +func (mz *materializer) deploySchema() error { + var sourceDDLs map[string]string + var mu sync.Mutex + + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + allTables := []string{"/.*/"} + + hasTargetTable := map[string]bool{} + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + targetSchema, err := schematools.GetSchema(mz.ctx, mz.ts, mz.tmc, target.PrimaryAlias, req) + if err != nil { + return err + } + + for _, td := range targetSchema.TableDefinitions { + hasTargetTable[td.Name] = true + } + + targetTablet, err := mz.ts.GetTablet(mz.ctx, target.PrimaryAlias) + if err != nil { + return err + } + + var applyDDLs []string + for _, ts := range mz.ms.TableSettings { + if hasTargetTable[ts.TargetTable] { + // Table already exists. + continue + } + if ts.CreateDdl == "" { + return fmt.Errorf("target table %v does not exist and there is no create ddl defined", ts.TargetTable) + } + + var err error + mu.Lock() + if len(sourceDDLs) == 0 { + // Only get DDLs for tables once and lazily: if we need to copy the schema from source + // to target then we copy schemas from primaries on the source keyspace; we have found + // use cases where the user just has a replica (no primary) in the source keyspace. + sourceDDLs, err = getSourceTableDDLs(mz.ctx, mz.sourceTs, mz.tmc, mz.sourceShards) + } + mu.Unlock() + if err != nil { + log.Errorf("Error getting DDLs of source tables: %s", err.Error()) + return err + } + + createDDL := ts.CreateDdl + if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { + if ts.SourceExpression != "" { + // Check for table if non-empty SourceExpression. + sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + if err != nil { + return err + } + if sourceTableName.Name.String() != ts.TargetTable { + return fmt.Errorf("source and target table names must match for copying schema: %v vs %v", sqlparser.String(sourceTableName), ts.TargetTable) + + } + } + + ddl, ok := sourceDDLs[ts.TargetTable] + if !ok { + return fmt.Errorf("source table %v does not exist", ts.TargetTable) + } + + if createDDL == createDDLAsCopyDropConstraint { + strippedDDL, err := stripTableConstraints(ddl) + if err != nil { + return err + } + + ddl = strippedDDL + } + + if createDDL == createDDLAsCopyDropForeignKeys { + strippedDDL, err := stripTableForeignKeys(ddl) + if err != nil { + return err + } + + ddl = strippedDDL + } + createDDL = ddl + } + + applyDDLs = append(applyDDLs, createDDL) + } + + if len(applyDDLs) > 0 { + sql := strings.Join(applyDDLs, ";\n") + + _, err = mz.tmc.ApplySchema(mz.ctx, targetTablet.Tablet, &tmutils.SchemaChange{ + SQL: sql, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + }) + if err != nil { + return err + } + } + + return nil + }) +} + +func (mz *materializer) buildMaterializer() error { + ctx := mz.ctx + ms := mz.ms + vschema, err := mz.ts.GetVSchema(ctx, ms.TargetKeyspace) + if err != nil { + return err + } + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + if err != nil { + return err + } + if targetVSchema.Keyspace.Sharded { + for _, ts := range ms.TableSettings { + if targetVSchema.Tables[ts.TargetTable] == nil { + return fmt.Errorf("table %s not found in vschema for keyspace %s", ts.TargetTable, ms.TargetKeyspace) + } + } + } + isPartial := false + sourceShards, err := mz.sourceTs.GetServingShards(ctx, ms.SourceKeyspace) + if err != nil { + return err + } + if len(ms.SourceShards) > 0 { + isPartial = true + var sourceShards2 []*topo.ShardInfo + for _, shard := range sourceShards { + for _, shard2 := range ms.SourceShards { + if shard.ShardName() == shard2 { + sourceShards2 = append(sourceShards2, shard) + break + } + } + } + sourceShards = sourceShards2 + } + if len(sourceShards) == 0 { + return fmt.Errorf("no source shards specified for workflow %s ", ms.Workflow) + } + + targetShards, err := mz.ts.GetServingShards(ctx, ms.TargetKeyspace) + if err != nil { + return err + } + if len(ms.SourceShards) > 0 { + var targetShards2 []*topo.ShardInfo + for _, shard := range targetShards { + for _, shard2 := range ms.SourceShards { + if shard.ShardName() == shard2 { + targetShards2 = append(targetShards2, shard) + break + } + } + } + targetShards = targetShards2 + } + if len(targetShards) == 0 { + return fmt.Errorf("no target shards specified for workflow %s ", ms.Workflow) + } + + sourceTs := mz.ts + if ms.ExternalCluster != "" { // when the source is an external mysql cluster mounted using the Mount command + externalTopo, err := mz.ts.OpenExternalVitessClusterServer(ctx, ms.ExternalCluster) + if err != nil { + return fmt.Errorf("failed to open external topo: %v", err) + } + sourceTs = externalTopo + } + differentPVs := false + sourceVSchema, err := sourceTs.GetVSchema(ctx, ms.SourceKeyspace) + if err != nil { + return fmt.Errorf("failed to get source keyspace vschema: %v", err) + } + differentPVs = primaryVindexesDiffer(ms, sourceVSchema, vschema) + + mz.targetVSchema = targetVSchema + mz.sourceShards = sourceShards + mz.targetShards = targetShards + mz.isPartial = isPartial + mz.primaryVindexesDiffer = differentPVs + return nil +} + +func (mz *materializer) createStreams(ctx context.Context, insertsMap map[string]string) error { + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + keyRange := key.KeyRangeString(target.KeyRange) + inserts := insertsMap[keyRange] + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + buf := &strings.Builder{} + t := template.Must(template.New("").Parse(inserts)) + input := map[string]string{ + "keyrange": keyRange, + "dbname": targetPrimary.DbName(), + } + if err := t.Execute(buf, input); err != nil { + return err + } + if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, buf.String()); err != nil { + return err + } + return nil + }) +} + +func (mz *materializer) startStreams(ctx context.Context) error { + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) + if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) +} + +func Materialize(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, ms *vtctldatapb.MaterializeSettings) error { + mz := &materializer{ + ctx: ctx, + ts: ts, + sourceTs: ts, + tmc: tmc, + ms: ms, + } + + err := mz.createMaterializerStreams() + if err != nil { + return err + } + return mz.startStreams(ctx) +} + +func (mz *materializer) forAllTargets(f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range mz.targetShards { + wg.Add(1) + go func(target *topo.ShardInfo) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +// checkTZConversion is a light-weight consistency check to validate that, if a source time zone is specified to MoveTables, +// that the current primary has the time zone loaded in order to run the convert_tz() function used by VReplication to do the +// datetime conversions. We only check the current primaries on each shard and note here that it is possible a new primary +// gets elected: in this case user will either see errors during vreplication or vdiff will report mismatches. +func (mz *materializer) checkTZConversion(ctx context.Context, tz string) error { + err := mz.forAllTargets(func(target *topo.ShardInfo) error { + targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + testDateTime := "2006-01-02 15:04:05" + query := fmt.Sprintf("select convert_tz(%s, %s, 'UTC')", encodeString(testDateTime), encodeString(tz)) + qrproto, err := mz.tmc.ExecuteFetchAsApp(ctx, targetPrimary.Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query), + MaxRows: 1, + }) + if err != nil { + return vterrors.Wrapf(err, "ExecuteFetchAsApp(%v, %s)", targetPrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(qrproto) + if gotDate, err := time.Parse(testDateTime, qr.Rows[0][0].ToString()); err != nil { + return fmt.Errorf("unable to perform time_zone conversions from %s to UTC — value from DB was: %+v and the result of the attempt was: %s. Either the specified source time zone is invalid or the time zone tables have not been loaded on the %s tablet", + tz, qr.Rows, gotDate, targetPrimary.Alias) + } + return nil + }) + return err +} + +// filterSourceShards filters out source shards that do not overlap with the +// provided target shard. This is an optimization to avoid copying unnecessary +// data between the shards. This optimization is only applied for MoveTables +// when the source and target shard have the same primary vindexes. +func (mz *materializer) filterSourceShards(targetShard *topo.ShardInfo) []*topo.ShardInfo { + if mz.primaryVindexesDiffer || mz.ms.MaterializationIntent != vtctldatapb.MaterializationIntent_MOVETABLES { + // Use all source shards. + return mz.sourceShards + } + // Use intersecting source shards. + var filteredSourceShards []*topo.ShardInfo + for _, sourceShard := range mz.sourceShards { + if !key.KeyRangeIntersect(sourceShard.KeyRange, targetShard.KeyRange) { + continue + } + filteredSourceShards = append(filteredSourceShards, sourceShard) + } + return filteredSourceShards +} + +// primaryVindexesDiffer returns true if, for any tables defined in the provided +// materialize settings, the source and target vschema definitions for those +// tables have different primary vindexes. +// +// The result of this function is used to determine whether to apply a source +// shard selection optimization in MoveTables. +func primaryVindexesDiffer(ms *vtctldatapb.MaterializeSettings, source, target *vschemapb.Keyspace) bool { + // Unless both keyspaces are sharded, treat the answer to the question as + // trivially false. + if source.Sharded != target.Sharded { + return false + } + + // For source and target keyspaces that are sharded, we can optimize source + // shard selection if source and target tables' primary vindexes are equal. + // + // To determine this, iterate over all target tables, looking for primary + // vindexes that differ from the corresponding source table. + for _, ts := range ms.TableSettings { + sColumnVindexes := []*vschemapb.ColumnVindex{} + tColumnVindexes := []*vschemapb.ColumnVindex{} + if tt, ok := source.Tables[ts.TargetTable]; ok { + sColumnVindexes = tt.ColumnVindexes + } + if tt, ok := target.Tables[ts.TargetTable]; ok { + tColumnVindexes = tt.ColumnVindexes + } + + // If source does not have a primary vindex, but the target does, then + // the primary vindexes differ. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) > 0 { + return true + } + // If source has a primary vindex, but the target does not, then the + // primary vindexes differ. + if len(sColumnVindexes) > 0 && len(tColumnVindexes) == 0 { + return true + } + // If neither source nor target have any vindexes, treat the answer to + // the question as trivially false. + if len(sColumnVindexes) == 0 && len(tColumnVindexes) == 0 { + return true + } + + sPrimaryVindex := sColumnVindexes[0] + tPrimaryVindex := tColumnVindexes[0] + + // Compare source and target primary vindex columns. + var sColumns, tColumns []string + if sPrimaryVindex.Column != "" { + sColumns = []string{sPrimaryVindex.Column} + } else { + sColumns = sPrimaryVindex.Columns + } + if tPrimaryVindex.Column != "" { + tColumns = []string{tPrimaryVindex.Column} + } else { + tColumns = tPrimaryVindex.Columns + } + if len(sColumns) != len(tColumns) { + return true + } + for i := 0; i < len(sColumns); i++ { + if !strings.EqualFold(sColumns[i], tColumns[i]) { + return true + } + } + + // Get source and target vindex definitions. + spv := source.Vindexes[sColumnVindexes[0].Name] + tpv := target.Vindexes[tColumnVindexes[0].Name] + // If the source has vindex definition, but target does not, then the + // target vschema is invalid. Assume the primary vindexes differ. + if spv != nil && tpv == nil { + return true + } + // If the target has vindex definition, but source does not, then the + // source vschema is invalid. Assume the primary vindexes differ. + if spv == nil && tpv != nil { + return true + } + // If both target and source are missing vindex definitions, then both + // are equally invalid. + if spv == nil && tpv == nil { + continue + } + // Compare source and target vindex type. + if !strings.EqualFold(spv.Type, tpv.Type) { + return true + } + } + return false +} diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go new file mode 100644 index 00000000000..73646d7da80 --- /dev/null +++ b/go/vt/vtctl/workflow/materializer_env_test.go @@ -0,0 +1,308 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "os" + "regexp" + "strconv" + "strings" + "sync" + "testing" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + _flag "vitess.io/vitess/go/internal/flag" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +type queryResult struct { + query string + result *querypb.QueryResult +} + +type testMaterializerEnv struct { + ws *Server + ms *vtctldatapb.MaterializeSettings + sources []string + targets []string + tablets map[int]*topodatapb.Tablet + // Importing the tabletmanager package causes a circular dependency. :-( + //tms map[int]*tabletmanager.TabletManager + topoServ *topo.Server + cell string + tmc *testMaterializerTMClient +} + +//---------------------------------------------- +// testMaterializerEnv + +func TestMain(m *testing.M) { + _flag.ParseFlagsForTest() + os.Exit(m.Run()) +} + +func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv { + t.Helper() + env := &testMaterializerEnv{ + ms: ms, + sources: sources, + targets: targets, + tablets: make(map[int]*topodatapb.Tablet), + topoServ: memorytopo.NewServer(ctx, "cell"), + cell: "cell", + tmc: newTestMaterializerTMClient(), + } + env.ws = NewServer(env.topoServ, env.tmc) + tabletID := 100 + for _, shard := range sources { + _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) + tabletID += 10 + } + if ms.SourceKeyspace != ms.TargetKeyspace { + tabletID = 200 + for _, shard := range targets { + _ = env.addTablet(tabletID, env.ms.TargetKeyspace, shard, topodatapb.TabletType_PRIMARY) + tabletID += 10 + } + } + + for _, ts := range ms.TableSettings { + tableName := ts.TargetTable + table, err := sqlparser.TableFromStatement(ts.SourceExpression) + if err == nil { + tableName = table.Name.String() + } + env.tmc.schema[ms.SourceKeyspace+"."+tableName] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Name: tableName, + Schema: fmt.Sprintf("%s_schema", tableName), + }}, + } + env.tmc.schema[ms.TargetKeyspace+"."+ts.TargetTable] = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ + Name: ts.TargetTable, + Schema: fmt.Sprintf("%s_schema", ts.TargetTable), + }}, + } + } + if ms.Workflow != "" { + env.expectValidation() + } + return env +} + +func (env *testMaterializerEnv) expectValidation() { + for _, tablet := range env.tablets { + tabletID := int(tablet.Alias.Uid) + if tabletID < 200 { + continue + } + // wr.validateNewWorkflow + env.tmc.expectVRQuery(tabletID, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", env.ms.TargetKeyspace, env.ms.Workflow), &sqltypes.Result{}) + } +} + +func (env *testMaterializerEnv) close() { + for _, t := range env.tablets { + env.deleteTablet(t) + } +} + +func (env *testMaterializerEnv) addTablet(id int, keyspace, shard string, tabletType topodatapb.TabletType) *topodatapb.Tablet { + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: env.cell, + Uid: uint32(id), + }, + Keyspace: keyspace, + Shard: shard, + KeyRange: &topodatapb.KeyRange{}, + Type: tabletType, + PortMap: map[string]int32{ + "test": int32(id), + }, + } + env.tablets[id] = tablet + if err := env.ws.ts.InitTablet(context.Background(), tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + panic(err) + } + if tabletType == topodatapb.TabletType_PRIMARY { + _, err := env.ws.ts.UpdateShardFields(context.Background(), keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = tablet.Alias + return nil + }) + if err != nil { + panic(err) + } + } + return tablet +} + +func (env *testMaterializerEnv) deleteTablet(tablet *topodatapb.Tablet) { + _ = env.topoServ.DeleteTablet(context.Background(), tablet.Alias) + delete(env.tablets, int(tablet.Alias.Uid)) +} + +//---------------------------------------------- +// testMaterializerTMClient + +type testMaterializerTMClient struct { + tmclient.TabletManagerClient + schema map[string]*tabletmanagerdatapb.SchemaDefinition + + mu sync.Mutex + vrQueries map[int][]*queryResult + getSchemaCounts map[string]int + muSchemaCount sync.Mutex +} + +func newTestMaterializerTMClient() *testMaterializerTMClient { + return &testMaterializerTMClient{ + schema: make(map[string]*tabletmanagerdatapb.SchemaDefinition), + vrQueries: make(map[int][]*queryResult), + getSchemaCounts: make(map[string]int), + } +} + +func (tmc *testMaterializerTMClient) schemaRequested(uid uint32) { + tmc.muSchemaCount.Lock() + defer tmc.muSchemaCount.Unlock() + key := strconv.Itoa(int(uid)) + n, ok := tmc.getSchemaCounts[key] + if !ok { + tmc.getSchemaCounts[key] = 1 + } else { + tmc.getSchemaCounts[key] = n + 1 + } +} + +func (tmc *testMaterializerTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + res := sqltypes.MakeTestResult(sqltypes.MakeTestFields("rowsaffected", "int64"), "1") + return &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + +func (tmc *testMaterializerTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + Workflow: "workflow", + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + Id: 1, + Bls: &binlogdatapb.BinlogSource{ + Keyspace: "sourceks", + Shard: "0", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: ".*", + }, + }, + }, + }, + }, + }, + }, nil +} + +func (tmc *testMaterializerTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { + tmc.schemaRequested(tablet.Alias.Uid) + schemaDefn := &tabletmanagerdatapb.SchemaDefinition{} + for _, table := range request.Tables { + if table == "/.*/" { + // Special case of all tables in keyspace. + for key, tableDefn := range tmc.schema { + if strings.HasPrefix(key, tablet.Keyspace+".") { + schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...) + } + } + break + } + + key := tablet.Keyspace + "." + table + tableDefn := tmc.schema[key] + if tableDefn == nil { + continue + } + schemaDefn.TableDefinitions = append(schemaDefn.TableDefinitions, tableDefn.TableDefinitions...) + } + return schemaDefn, nil +} + +func (tmc *testMaterializerTMClient) expectVRQuery(tabletID int, query string, result *sqltypes.Result) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + + tmc.vrQueries[tabletID] = append(tmc.vrQueries[tabletID], &queryResult{ + query: query, + result: sqltypes.ResultToProto3(result), + }) +} + +func (tmc *testMaterializerTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + + qrs := tmc.vrQueries[int(tablet.Alias.Uid)] + if len(qrs) == 0 { + return nil, fmt.Errorf("tablet %v does not expect any more queries: %s", tablet, query) + } + matched := false + if qrs[0].query[0] == '/' { + matched = regexp.MustCompile(qrs[0].query[1:]).MatchString(query) + } else { + matched = query == qrs[0].query + } + if !matched { + return nil, fmt.Errorf("tablet %v:\nunexpected query\n%s\nwant:\n%s", tablet, query, qrs[0].query) + } + tmc.vrQueries[int(tablet.Alias.Uid)] = qrs[1:] + return qrs[0].result, nil +} + +func (tmc *testMaterializerTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +// Note: ONLY breaks up change.SQL into individual statements and executes it. Does NOT fully implement ApplySchema. +func (tmc *testMaterializerTMClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) { + stmts := strings.Split(change.SQL, ";") + + for _, stmt := range stmts { + _, err := tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(stmt), + MaxRows: 0, + ReloadSchema: true, + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go new file mode 100644 index 00000000000..6a7b191dd0a --- /dev/null +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -0,0 +1,577 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +const getWorkflowQuery = "select id from _vt.vreplication where db_name='vt_targetks' and workflow='workflow'" +const mzUpdateQuery = "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='workflow'" +const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_targetks' and message='FROZEN' and workflow_sub_type != 1" +const mzCheckJournal = "/select val from _vt.resharding_journal where id=" +const mzGetWorkflowStatusQuery = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type from _vt.vreplication where workflow = 'workflow' and db_name = 'vt_targetks'" +const mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" +const mzGetLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" + +var defaultOnDDL = binlogdatapb.OnDDLAction_IGNORE.String() +var binlogSource = &binlogdatapb.BinlogSource{ + Keyspace: "sourceks", + Shard: "0", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + }, +} +var getWorkflowRes = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|blob|varchar|varchar|varchar|int64|int64|int64", + ), + fmt.Sprintf("1|%s||zone1|replica|1|0|1", binlogSource), +) +var getWorkflowStatusRes = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64", + ), + fmt.Sprintf("1|wf1|%s|MySQL56/9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97|NULL|0|running|vt_ks|1686577659|0|||1|0", binlogSource), +) + +func TestStripForeignKeys(t *testing.T) { + tcs := []struct { + desc string + ddl string + + hasErr bool + newDDL string + }{ + { + desc: "has FK constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) CHECK (foreign_id>10),\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "CONSTRAINT `fk_table1_ref_foreign_id` FOREIGN KEY (`foreign_id`) REFERENCES `foreign` (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11),\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tcheck (foreign_id > 10)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + + hasErr: false, + }, + { + desc: "no FK constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL CHECK (foreign_id>10),\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id),\n" + + "\tcheck (foreign_id > 10)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + } + + for _, tc := range tcs { + newDDL, err := stripTableForeignKeys(tc.ddl) + if tc.hasErr != (err != nil) { + t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) + } + + if newDDL != tc.newDDL { + utils.MustMatch(t, tc.newDDL, newDDL, fmt.Sprintf("newDDL does not match. tc: %+v", tc)) + } + } +} + +func TestStripConstraints(t *testing.T) { + tcs := []struct { + desc string + ddl string + + hasErr bool + newDDL string + }{ + { + desc: "constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL,\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`),\n" + + "CONSTRAINT `fk_table1_ref_foreign_id` FOREIGN KEY (`foreign_id`) REFERENCES `foreign` (`id`),\n" + + "CONSTRAINT `fk_table1_ref_user_id` FOREIGN KEY (`user_id`) REFERENCES `core_user` (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + + hasErr: false, + }, + { + desc: "no constraints", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int(11) NOT NULL AUTO_INCREMENT,\n" + + "`foreign_id` int(11) NOT NULL,\n" + + "`user_id` int(11) NOT NULL,\n" + + "PRIMARY KEY (`id`),\n" + + "KEY `fk_table1_ref_foreign_id` (`foreign_id`),\n" + + "KEY `fk_table1_ref_user_id` (`user_id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + + newDDL: "create table table1 (\n" + + "\tid int(11) not null auto_increment,\n" + + "\tforeign_id int(11) not null,\n" + + "\tuser_id int(11) not null,\n" + + "\tPRIMARY KEY (id),\n" + + "\tKEY fk_table1_ref_foreign_id (foreign_id),\n" + + "\tKEY fk_table1_ref_user_id (user_id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + { + desc: "bad ddl has error", + ddl: "bad ddl", + + hasErr: true, + }, + } + + for _, tc := range tcs { + newDDL, err := stripTableConstraints(tc.ddl) + if tc.hasErr != (err != nil) { + t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) + } + + if newDDL != tc.newDDL { + utils.MustMatch(t, tc.newDDL, newDDL, fmt.Sprintf("newDDL does not match. tc: %+v", tc)) + } + } +} + +func TestAddTablesToVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + srcks := "source" + ws := &Server{ + ts: ts, + } + tests := []struct { + name string + sourceVSchema *vschemapb.Keyspace + inTargetVSchema *vschemapb.Keyspace + tables []string + copyVSchema bool + wantTargetVSchema *vschemapb.Keyspace + }{ + { + name: "no target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; copy source vschema; sharded source", + sourceVSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + Pinned: "123456", + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + Pinned: "123456", + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": {}, + }, + }, + }, + { + name: "target vschema; copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ // Should be stripped on target + { + Column: "c1", + Name: "hash", + }, + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + tables: []string{"t1", "t2", "t3", "t4"}, + copyVSchema: true, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": {}, + "t3": {}, + "t4": {}, + }, + }, + }, + { + name: "no target vschema; do not copy source vschema", + sourceVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": { + Type: vindexes.TypeReference, + }, + "t2": { + Type: vindexes.TypeSequence, + }, + "t3": { + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "t2", + }, + }, + }, + }, + inTargetVSchema: &vschemapb.Keyspace{}, + tables: []string{"t1", "t2"}, + copyVSchema: false, + wantTargetVSchema: &vschemapb.Keyspace{ + Tables: map[string]*vschemapb.Table{ + "t1": {}, + "t2": {}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ts.SaveVSchema(ctx, srcks, tt.sourceVSchema) + require.NoError(t, err) + err = ws.addTablesToVSchema(ctx, srcks, tt.inTargetVSchema, tt.tables, tt.copyVSchema) + require.NoError(t, err) + require.Equal(t, tt.wantTargetVSchema, tt.inTargetVSchema) + }) + } +} + +func TestMigrateVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + Cell: "cell", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + _, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + Cells: []string{ms.Cell}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + AutoStart: true, + OnDdl: defaultOnDDL, + }) + require.NoError(t, err) + vschema, err := env.ws.ts.GetSrvVSchema(ctx, env.cell) + require.NoError(t, err) + got := fmt.Sprintf("%v", vschema) + want := []string{`keyspaces:{key:"sourceks" value:{}}`, + `keyspaces:{key:"sourceks" value:{}} keyspaces:{key:"targetks" value:{tables:{key:"t1" value:{}}}}`, + `rules:{from_table:"t1" to_tables:"sourceks.t1"}`, + `rules:{from_table:"targetks.t1" to_tables:"sourceks.t1"}`, + } + for _, wantstr := range want { + require.Contains(t, got, wantstr) + } +} + +// TestMoveTablesDDLFlag tests that we save the on-ddl flag value in the workflow. +// Note: +// - TestPlayerDDL tests that the vplayer correctly implements the ddl behavior +// - We have a manual e2e test for the full behavior: TestVReplicationDDLHandling +func TestMoveTablesDDLFlag(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + + for onDDLAction := range binlogdatapb.OnDDLAction_value { + t.Run(fmt.Sprintf("OnDDL Flag:%v", onDDLAction), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + // This is the default and go does not marshal defaults + // for prototext fields so we use the default insert stmt. + //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + + // TODO: we cannot test the actual query generated w/o having a + // TabletManager. Importing the tabletmanager package, however, causes + // a circular dependency. + // The TabletManager portion is tested in rpc_vreplication_test.go. + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) + require.NoError(t, err) + sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) + require.NoError(t, err) + want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"MySQL56/9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}}", + ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + + res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + OnDdl: onDDLAction, + }) + require.NoError(t, err) + require.Equal(t, want, fmt.Sprintf("%+v", res)) + }) + } +} + +// TestMoveTablesNoRoutingRules confirms that MoveTables does not create routing rules if --no-routing-rules is specified. +func TestMoveTablesNoRoutingRules(t *testing.T) { + ms := &vtctldatapb.MaterializeSettings{ + Workflow: "workflow", + SourceKeyspace: "sourceks", + TargetKeyspace: "targetks", + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + }}, + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) + defer env.close() + // This is the default and go does not marshal defaults + // for prototext fields so we use the default insert stmt. + //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + + // TODO: we cannot test the actual query generated w/o having a + // TabletManager. Importing the tabletmanager package, however, causes + // a circular dependency. + // The TabletManager portion is tested in rpc_vreplication_test.go. + env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) + + targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) + require.NoError(t, err) + sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) + require.NoError(t, err) + want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"MySQL56/9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}}", + ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + + res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: ms.Workflow, + SourceKeyspace: ms.SourceKeyspace, + TargetKeyspace: ms.TargetKeyspace, + IncludeTables: []string{"t1"}, + NoRoutingRules: true, + }) + require.NoError(t, err) + require.Equal(t, want, fmt.Sprintf("%+v", res)) + rr, err := env.ws.ts.GetRoutingRules(ctx) + require.NoError(t, err) + require.Zerof(t, len(rr.Rules), "routing rules should be empty, found %+v", rr.Rules) +} diff --git a/go/vt/vtctl/workflow/resharder.go b/go/vt/vtctl/workflow/resharder.go new file mode 100644 index 00000000000..161b1c4567d --- /dev/null +++ b/go/vt/vtctl/workflow/resharder.go @@ -0,0 +1,348 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +*/ + +package workflow + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +type resharder struct { + s *Server + keyspace string + workflow string + sourceShards []*topo.ShardInfo + sourcePrimaries map[string]*topo.TabletInfo + targetShards []*topo.ShardInfo + targetPrimaries map[string]*topo.TabletInfo + vschema *vschemapb.Keyspace + refStreams map[string]*refStream + // This can be single cell name or cell alias but it can + // also be a comma-separated list of cells. + cell string + tabletTypes string + stopAfterCopy bool + onDDL string + deferSecondaryKeys bool +} + +type refStream struct { + workflow string + bls *binlogdatapb.BinlogSource + cell string + tabletTypes string +} + +func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, sources, targets []string, cell, tabletTypes string) (*resharder, error) { + ts := s.ts + rs := &resharder{ + s: s, + keyspace: keyspace, + workflow: workflow, + sourcePrimaries: make(map[string]*topo.TabletInfo), + targetPrimaries: make(map[string]*topo.TabletInfo), + cell: cell, + tabletTypes: tabletTypes, + } + for _, shard := range sources { + si, err := ts.GetShard(ctx, keyspace, shard) + if err != nil { + return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) + } + if !si.IsPrimaryServing { + return nil, fmt.Errorf("source shard %v is not in serving state", shard) + } + rs.sourceShards = append(rs.sourceShards, si) + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) + } + rs.sourcePrimaries[si.ShardName()] = primary + } + for _, shard := range targets { + si, err := ts.GetShard(ctx, keyspace, shard) + if err != nil { + return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) + } + if si.IsPrimaryServing { + return nil, fmt.Errorf("target shard %v is in serving state", shard) + } + rs.targetShards = append(rs.targetShards, si) + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) + } + rs.targetPrimaries[si.ShardName()] = primary + } + if err := topotools.ValidateForReshard(rs.sourceShards, rs.targetShards); err != nil { + return nil, vterrors.Wrap(err, "ValidateForReshard") + } + if err := rs.validateTargets(ctx); err != nil { + return nil, vterrors.Wrap(err, "validateTargets") + } + + vschema, err := ts.GetVSchema(ctx, keyspace) + if err != nil { + return nil, vterrors.Wrap(err, "GetVSchema") + } + rs.vschema = vschema + + if err := rs.readRefStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "readRefStreams") + } + return rs, nil +} + +func (rs *resharder) validateTargets(ctx context.Context) error { + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + query := fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s", encodeString(targetPrimary.DbName())) + p3qr, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query) + if err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + if len(p3qr.Rows) != 0 { + return errors.New("some streams already exist in the target shards, please clean them up and retry the command") + } + return nil + }) + return err +} + +func (rs *resharder) readRefStreams(ctx context.Context) error { + var mu sync.Mutex + err := rs.forAll(rs.sourceShards, func(source *topo.ShardInfo) error { + sourcePrimary := rs.sourcePrimaries[source.ShardName()] + + query := fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name=%s and message != 'FROZEN'", encodeString(sourcePrimary.DbName())) + p3qr, err := rs.s.tmc.VReplicationExec(ctx, sourcePrimary.Tablet, query) + if err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", sourcePrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(p3qr) + + mu.Lock() + defer mu.Unlock() + + mustCreate := false + var ref map[string]bool + if rs.refStreams == nil { + rs.refStreams = make(map[string]*refStream) + mustCreate = true + } else { + // Copy the ref streams for comparison. + ref = make(map[string]bool, len(rs.refStreams)) + for k := range rs.refStreams { + ref[k] = true + } + } + for _, row := range qr.Rows { + + workflow := row[0].ToString() + if workflow == "" { + return fmt.Errorf("VReplication streams must have named workflows for migration: shard: %s:%s", source.Keyspace(), source.ShardName()) + } + var bls binlogdatapb.BinlogSource + rowBytes, err := row[1].ToBytes() + if err != nil { + return err + } + if err := prototext.Unmarshal(rowBytes, &bls); err != nil { + return vterrors.Wrapf(err, "prototext.Unmarshal: %v", row) + } + isReference, err := rs.blsIsReference(&bls) + if err != nil { + return vterrors.Wrap(err, "blsIsReference") + } + if !isReference { + continue + } + refKey := fmt.Sprintf("%s:%s:%s", workflow, bls.Keyspace, bls.Shard) + if mustCreate { + rs.refStreams[refKey] = &refStream{ + workflow: workflow, + bls: &bls, + cell: row[2].ToString(), + tabletTypes: row[3].ToString(), + } + } else { + if !ref[refKey] { + return fmt.Errorf("streams are mismatched across source shards for workflow: %s", workflow) + } + delete(ref, refKey) + } + } + if len(ref) != 0 { + return fmt.Errorf("streams are mismatched across source shards: %v", ref) + } + return nil + }) + return err +} + +// blsIsReference is partially copied from streamMigrater.templatize. +// It reuses the constants from that function also. +func (rs *resharder) blsIsReference(bls *binlogdatapb.BinlogSource) (bool, error) { + streamType := StreamTypeUnknown + for _, rule := range bls.Filter.Rules { + typ, err := rs.identifyRuleType(rule) + if err != nil { + return false, err + } + + switch typ { + case StreamTypeSharded: + if streamType == StreamTypeReference { + return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) + } + streamType = StreamTypeSharded + case StreamTypeReference: + if streamType == StreamTypeSharded { + return false, fmt.Errorf("cannot reshard streams with a mix of reference and sharded tables: %v", bls) + } + streamType = StreamTypeReference + } + } + return streamType == StreamTypeReference, nil +} + +func (rs *resharder) identifyRuleType(rule *binlogdatapb.Rule) (StreamType, error) { + vtable, ok := rs.vschema.Tables[rule.Match] + if !ok && !schema.IsInternalOperationTableName(rule.Match) { + return 0, fmt.Errorf("table %v not found in vschema", rule.Match) + } + if vtable != nil && vtable.Type == vindexes.TypeReference { + return StreamTypeReference, nil + } + // In this case, 'sharded' means that it's not a reference + // table. We don't care about any other subtleties. + return StreamTypeSharded, nil +} + +func (rs *resharder) copySchema(ctx context.Context) error { + oneSource := rs.sourceShards[0].PrimaryAlias + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + return rs.s.CopySchemaShard(ctx, oneSource, []string{"/.*"}, nil, false, rs.keyspace, target.ShardName(), 1*time.Second, false) + }) + return err +} + +func (rs *resharder) createStreams(ctx context.Context) error { + var excludeRules []*binlogdatapb.Rule + for tableName, table := range rs.vschema.Tables { + if table.Type == vindexes.TypeReference { + excludeRules = append(excludeRules, &binlogdatapb.Rule{ + Match: tableName, + Filter: "exclude", + }) + } + } + + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, targetPrimary.DbName()) + + // copy excludeRules to prevent data race. + copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) + for _, source := range rs.sourceShards { + if !key.KeyRangeIntersect(target.KeyRange, source.KeyRange) { + continue + } + filter := &binlogdatapb.Filter{ + Rules: append(copyExcludeRules, &binlogdatapb.Rule{ + Match: "/.*", + Filter: key.KeyRangeString(target.KeyRange), + }), + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: rs.keyspace, + Shard: source.ShardName(), + Filter: filter, + StopAfterCopy: rs.stopAfterCopy, + OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[rs.onDDL]), + } + ig.AddRow(rs.workflow, bls, "", rs.cell, rs.tabletTypes, + binlogdatapb.VReplicationWorkflowType_Reshard, + binlogdatapb.VReplicationWorkflowSubType_None, + rs.deferSecondaryKeys) + } + + for _, rstream := range rs.refStreams { + ig.AddRow(rstream.workflow, rstream.bls, "", rstream.cell, rstream.tabletTypes, + // TODO: fix based on original stream. + binlogdatapb.VReplicationWorkflowType_Reshard, + binlogdatapb.VReplicationWorkflowSubType_None, + rs.deferSecondaryKeys) + } + query := ig.String() + if _, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) + + return err +} + +func (rs *resharder) startStreams(ctx context.Context) error { + err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { + targetPrimary := rs.targetPrimaries[target.ShardName()] + query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s", encodeString(targetPrimary.DbName())) + if _, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + return nil + }) + return err +} + +func (rs *resharder) forAll(shards []*topo.ShardInfo, f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, shard := range shards { + wg.Add(1) + go func(shard *topo.ShardInfo) { + defer wg.Done() + + if err := f(shard); err != nil { + allErrors.RecordError(err) + } + }(shard) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index f1088157fb1..f42a2dda59c 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -17,31 +17,92 @@ limitations under the License. package workflow import ( + "bytes" "context" "errors" "fmt" + "reflect" "sort" "strings" "sync" + "text/template" "time" + "golang.org/x/sync/semaphore" "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow/vexec" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" - "vitess.io/vitess/go/vt/proto/vttime" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" +) + +// TableCopyProgress stores the row counts and disk sizes of the source and target tables +type TableCopyProgress struct { + TargetRowCount, TargetTableSize int64 + SourceRowCount, SourceTableSize int64 +} + +// CopyProgress stores the TableCopyProgress for all tables still being copied +type CopyProgress map[string]*TableCopyProgress + +// sequenceMetadata contains all of the relevant metadata for a sequence that +// is being used by a table involved in a vreplication workflow. +type sequenceMetadata struct { + // The name of the sequence table. + backingTableName string + // The keyspace where the backing table lives. + backingTableKeyspace string + // The dbName in use by the keyspace where the backing table lives. + backingTableDBName string + // The name of the table using the sequence. + usingTableName string + // The dbName in use by the keyspace where the using table lives. + usingTableDBName string + // The using table definition. + usingTableDefinition *vschemapb.Table +} + +const ( + cannotSwitchError = "workflow has errors" + cannotSwitchCopyIncomplete = "copy is still in progress" + cannotSwitchHighLag = "replication lag %ds is higher than allowed lag %ds" + cannotSwitchFailedTabletRefresh = "could not refresh all of the tablets involved in the operation:\n%s" + cannotSwitchFrozen = "workflow is frozen" + + // Number of LOCK TABLES cycles to perform on the sources during SwitchWrites. + lockTablesCycles = 2 + // Time to wait between LOCK TABLES cycles on the sources during SwitchWrites. + lockTablesCycleDelay = time.Duration(100 * time.Millisecond) + + // Default duration used for lag, timeout, etc. + defaultDuration = 30 * time.Second ) var ( @@ -55,19 +116,18 @@ var ( // ErrMultipleTargetKeyspaces occurs when a workflow somehow has multiple // target keyspaces across different shard primaries. This should be // impossible. - ErrMultipleTargetKeyspaces = errors.New("multiple target keyspaces for a single workflow") + ErrMultipleTargetKeyspaces = errors.New("multiple target keyspaces for a single workflow") + ErrWorkflowNotFullySwitched = errors.New("cannot complete workflow because you have not yet switched all read and write traffic") + ErrWorkflowPartiallySwitched = errors.New("cannot cancel workflow because you have already switched some or all read and write traffic") ) // Server provides an API to work with Vitess workflows, like vreplication // workflows (MoveTables, Reshard, etc) and schema migration workflows. -// -// NB: This is in alpha, and you probably don't want to depend on it (yet!). -// Currently, it provides only a read-only API to vreplication workflows. Write -// actions on vreplication workflows, and schema migration workflows entirely, -// are not yet supported, but planned. type Server struct { ts *topo.Server tmc tmclient.TabletManagerClient + // Limt the number of concurrent background goroutines if needed. + sem *semaphore.Weighted } // NewServer returns a new server instance with the given topo.Server and @@ -262,6 +322,21 @@ func (s *Server) GetCellsWithTableReadsSwitched( return cellsSwitched, cellsNotSwitched, nil } +func (s *Server) GetWorkflow(ctx context.Context, keyspace, workflow string) (*vtctldatapb.Workflow, error) { + res, err := s.GetWorkflows(ctx, &vtctldatapb.GetWorkflowsRequest{ + Keyspace: keyspace, + Workflow: workflow, + }) + if err != nil { + return nil, err + } + if len(res.Workflows) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of workflows returned for %s.%s; expected 1, got %d", + keyspace, workflow, len(res.Workflows)) + } + return res.Workflows[0], nil +} + // GetWorkflows returns a list of all workflows that exist in a given keyspace, // with some additional filtering depending on the request parameters (for // example, ActiveOnly=true restricts the search to only workflows that are @@ -277,8 +352,15 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("active_only", req.ActiveOnly) where := "" + predicates := []string{} if req.ActiveOnly { - where = "WHERE state <> 'Stopped'" + predicates = append(predicates, "state <> 'Stopped'") + } + if req.Workflow != "" { + predicates = append(predicates, fmt.Sprintf("workflow = '%s'", req.Workflow)) + } + if len(predicates) > 0 { + where = fmt.Sprintf("WHERE %s", strings.Join(predicates, " AND ")) } query := fmt.Sprintf(` @@ -334,7 +416,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("workflow", workflow.Name) span.Annotate("tablet_alias", tablet.AliasString()) - id, err := evalengine.ToInt64(row["id"]) + id, err := row["id"].ToCastInt64() if err != nil { return err } @@ -353,12 +435,12 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows state := row["state"].ToString() dbName := row["db_name"].ToString() - timeUpdatedSeconds, err := evalengine.ToInt64(row["time_updated"]) + timeUpdatedSeconds, err := row["time_updated"].ToCastInt64() if err != nil { return err } - transactionTimeSeconds, err := evalengine.ToInt64(row["transaction_timestamp"]) + transactionTimeSeconds, err := row["transaction_timestamp"].ToCastInt64() if err != nil { return err } @@ -381,17 +463,16 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows StopPosition: stopPos, State: state, DbName: dbName, - TransactionTimestamp: &vttime.Time{ + TransactionTimestamp: &vttimepb.Time{ Seconds: transactionTimeSeconds, }, - TimeUpdated: &vttime.Time{ + TimeUpdated: &vttimepb.Time{ Seconds: timeUpdatedSeconds, }, Message: message, Tags: tagArray, } - workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[workflowType] - workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[workflowSubType] + stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id) if err != nil { return err @@ -399,15 +480,6 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows span.Annotate("num_copy_states", len(stream.CopyStates)) - switch { - case strings.Contains(strings.ToLower(stream.Message), "error"): - stream.State = "Error" - case stream.State == "Running" && len(stream.CopyStates) > 0: - stream.State = "Copying" - case stream.State == "Running" && int64(time.Now().Second())-timeUpdatedSeconds > 10: - stream.State = "Lagging" - } - // At this point, we're going to start modifying the maps defined // outside this function, as well as fields on the passed-in Workflow // pointer. Since we're running concurrently, take the lock. @@ -417,6 +489,18 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows m.Lock() defer m.Unlock() + workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[workflowType] + workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[workflowSubType] + + switch { + case strings.Contains(strings.ToLower(stream.Message), "error"): + stream.State = binlogdatapb.VReplicationWorkflowState_Error.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: + stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-timeUpdatedSeconds > 10: + stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String() + } + shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) shardStream, ok := workflow.ShardStreams[shardStreamKey] if !ok { @@ -577,13 +661,13 @@ ORDER BY } for _, row := range qr.Rows { - id, err := evalengine.ToInt64(row[0]) + id, err := row[0].ToCastInt64() if err != nil { markErrors(err) continue } - streamID, err := evalengine.ToInt64(row[1]) + streamID, err := row[1].ToCastInt64() if err != nil { markErrors(err) continue @@ -605,7 +689,7 @@ ORDER BY continue } - count, err := evalengine.ToInt64(row[7]) + count, err := row[7].ToCastInt64() if err != nil { markErrors(err) continue @@ -616,10 +700,10 @@ ORDER BY StreamId: streamID, Type: typ, State: state, - CreatedAt: &vttime.Time{ + CreatedAt: &vttimepb.Time{ Seconds: createdAt.Unix(), }, - UpdatedAt: &vttime.Time{ + UpdatedAt: &vttimepb.Time{ Seconds: updatedAt.Unix(), }, Message: message, @@ -718,6 +802,120 @@ ORDER BY }, nil } +func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, *State, error) { + ts, err := s.buildTrafficSwitcher(ctx, targetKeyspace, workflowName) + + if err != nil { + log.Errorf("buildTrafficSwitcher failed: %v", err) + return nil, nil, err + } + + state := &State{ + Workflow: workflowName, + SourceKeyspace: ts.SourceKeyspaceName(), + TargetKeyspace: targetKeyspace, + IsPartialMigration: ts.isPartialMigration, + } + + var ( + reverse bool + sourceKeyspace string + ) + + // We reverse writes by using the source_keyspace.workflowname_reverse workflow + // spec, so we need to use the source of the reverse workflow, which is the + // target of the workflow initiated by the user for checking routing rules. + // Similarly we use a target shard of the reverse workflow as the original + // source to check if writes have been switched. + if strings.HasSuffix(workflowName, "_reverse") { + reverse = true + // Flip the source and target keyspaces. + sourceKeyspace = state.TargetKeyspace + targetKeyspace = state.SourceKeyspace + workflowName = ReverseWorkflowName(workflowName) + } else { + sourceKeyspace = state.SourceKeyspace + } + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + state.WorkflowType = TypeMoveTables + + // We assume a consistent state, so only choose routing rule for one table. + if len(ts.Tables()) == 0 { + return nil, nil, fmt.Errorf("no tables in workflow %s.%s", targetKeyspace, workflowName) + + } + table := ts.Tables()[0] + + if ts.isPartialMigration { // shard level traffic switching is all or nothing + shardRoutingRules, err := s.ts.GetShardRoutingRules(ctx) + if err != nil { + return nil, nil, err + } + + rules := shardRoutingRules.Rules + for _, rule := range rules { + switch rule.ToKeyspace { + case sourceKeyspace: + state.ShardsNotYetSwitched = append(state.ShardsNotYetSwitched, rule.Shard) + case targetKeyspace: + state.ShardsAlreadySwitched = append(state.ShardsAlreadySwitched, rule.Shard) + default: + // Not a relevant rule. + } + } + } else { + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = s.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_RDONLY) + if err != nil { + return nil, nil, err + } + + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = s.GetCellsWithTableReadsSwitched(ctx, targetKeyspace, table, topodatapb.TabletType_REPLICA) + if err != nil { + return nil, nil, err + } + globalRules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return nil, nil, err + } + for _, table := range ts.Tables() { + rr := globalRules[table] + // If a rule exists for the table and points to the target keyspace, then + // writes have been switched. + if len(rr) > 0 && rr[0] == fmt.Sprintf("%s.%s", targetKeyspace, table) { + state.WritesSwitched = true + break + } + } + } + } else { + state.WorkflowType = TypeReshard + + // We assume a consistent state, so only choose one shard. + var shard *topo.ShardInfo + if reverse { + shard = ts.TargetShards()[0] + } else { + shard = ts.SourceShards()[0] + } + + state.RdonlyCellsSwitched, state.RdonlyCellsNotSwitched, err = s.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_RDONLY) + if err != nil { + return nil, nil, err + } + + state.ReplicaCellsSwitched, state.ReplicaCellsNotSwitched, err = s.GetCellsWithShardReadsSwitched(ctx, targetKeyspace, shard, topodatapb.TabletType_REPLICA) + if err != nil { + return nil, nil, err + } + + if !shard.IsPrimaryServing { + state.WritesSwitched = true + } + } + + return ts, state, nil +} + func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, id int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates") defer span.Finish() @@ -749,3 +947,1925 @@ func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletI return copyStates, nil } + +// MoveTablesCreate is part of the vtctlservicepb.VtctldServer interface. +// It passes the embedded TabletRequest object to the given keyspace's +// target primary tablets that will be executing the workflow. +func (s *Server) MoveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest) (res *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.MoveTablesCreate") + defer span.Finish() + + span.Annotate("keyspace", req.TargetKeyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + sourceKeyspace := req.SourceKeyspace + targetKeyspace := req.TargetKeyspace + //FIXME validate tableSpecs, allTables, excludeTables + var ( + tables = req.IncludeTables + externalTopo *topo.Server + sourceTopo = s.ts + ) + + // When the source is an external cluster mounted using the Mount command. + if req.ExternalClusterName != "" { + externalTopo, err = s.ts.OpenExternalVitessClusterServer(ctx, req.ExternalClusterName) + if err != nil { + return nil, err + } + sourceTopo = externalTopo + log.Infof("Successfully opened external topo: %+v", externalTopo) + } + + var vschema *vschemapb.Keyspace + var origVSchema *vschemapb.Keyspace // If we need to rollback a failed create + vschema, err = s.ts.GetVSchema(ctx, targetKeyspace) + if err != nil { + return nil, err + } + if vschema == nil { + return nil, fmt.Errorf("no vschema found for target keyspace %s", targetKeyspace) + } + ksTables, err := getTablesInKeyspace(ctx, sourceTopo, s.tmc, sourceKeyspace) + if err != nil { + return nil, err + } + if len(tables) > 0 { + err = s.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, tables) + if err != nil { + return nil, err + } + } else { + if req.AllTables { + tables = ksTables + } else { + return nil, fmt.Errorf("no tables to move") + } + } + if len(req.ExcludeTables) > 0 { + err = s.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, req.ExcludeTables) + if err != nil { + return nil, err + } + } + var tables2 []string + for _, t := range tables { + if shouldInclude(t, req.ExcludeTables) { + tables2 = append(tables2, t) + } + } + tables = tables2 + if len(tables) == 0 { + return nil, fmt.Errorf("no tables to move") + } + log.Infof("Found tables to move: %s", strings.Join(tables, ",")) + + if !vschema.Sharded { + // Save the original in case we need to restore it for a late failure + // in the defer(). + origVSchema = vschema.CloneVT() + if err := s.addTablesToVSchema(ctx, sourceKeyspace, vschema, tables, externalTopo == nil); err != nil { + return nil, err + } + } + + ms := &vtctldatapb.MaterializeSettings{ + Workflow: req.Workflow, + MaterializationIntent: vtctldatapb.MaterializationIntent_MOVETABLES, + SourceKeyspace: sourceKeyspace, + TargetKeyspace: targetKeyspace, + Cell: strings.Join(req.Cells, ","), + TabletTypes: topoproto.MakeStringTypeCSV(req.TabletTypes), + TabletSelectionPreference: req.TabletSelectionPreference, + StopAfterCopy: req.StopAfterCopy, + ExternalCluster: req.ExternalClusterName, + SourceShards: req.SourceShards, + OnDdl: req.OnDdl, + DeferSecondaryKeys: req.DeferSecondaryKeys, + AtomicCopy: req.AtomicCopy, + } + if req.SourceTimeZone != "" { + ms.SourceTimeZone = req.SourceTimeZone + ms.TargetTimeZone = "UTC" + } + createDDLMode := createDDLAsCopy + if req.DropForeignKeys { + createDDLMode = createDDLAsCopyDropForeignKeys + } + + for _, table := range tables { + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(table)) + ms.TableSettings = append(ms.TableSettings, &vtctldatapb.TableMaterializeSettings{ + TargetTable: table, + SourceExpression: buf.String(), + CreateDdl: createDDLMode, + }) + } + mz := &materializer{ + ctx: ctx, + ts: s.ts, + sourceTs: sourceTopo, + tmc: s.tmc, + ms: ms, + } + err = mz.prepareMaterializerStreams(req) + if err != nil { + return nil, err + } + + // If we get an error after this point, where the vreplication streams/records + // have been created, then we clean up the workflow's artifacts. + defer func() { + if err != nil { + ts, cerr := s.buildTrafficSwitcher(ctx, ms.TargetKeyspace, ms.Workflow) + if cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if cerr := s.dropArtifacts(ctx, false, &switcher{s: s, ts: ts}); cerr != nil { + err = vterrors.Wrapf(err, "failed to cleanup workflow artifacts: %v", cerr) + } + if origVSchema == nil { // There's no previous version to restore + return + } + if cerr := s.ts.SaveVSchema(ctx, targetKeyspace, origVSchema); cerr != nil { + err = vterrors.Wrapf(err, "failed to restore original target vschema: %v", cerr) + } + } + }() + + // Now that the streams have been successfully created, let's put the associated + // routing rules in place. + if externalTopo == nil { + if req.NoRoutingRules { + log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, req.Workflow) + } else { + // Save routing rules before vschema. If we save vschema first, and routing + // rules fails to save, we may generate duplicate table errors. + if mz.isPartial { + if err := createDefaultShardRoutingRules(mz.ctx, mz.ms, mz.ts); err != nil { + return nil, err + } + } + + rules, err := topotools.GetRoutingRules(ctx, s.ts) + if err != nil { + return nil, err + } + for _, table := range tables { + toSource := []string{sourceKeyspace + "." + table} + rules[table] = toSource + rules[table+"@replica"] = toSource + rules[table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[targetKeyspace+"."+table+"@replica"] = toSource + rules[targetKeyspace+"."+table+"@rdonly"] = toSource + rules[targetKeyspace+"."+table] = toSource + rules[sourceKeyspace+"."+table+"@replica"] = toSource + rules[sourceKeyspace+"."+table+"@rdonly"] = toSource + } + if err := topotools.SaveRoutingRules(ctx, s.ts, rules); err != nil { + return nil, err + } + } + if vschema != nil { + // We added to the vschema. + if err := s.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return nil, err + } + } + + } + if err := s.ts.RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + + if ms.SourceTimeZone != "" { + if err := mz.checkTZConversion(ctx, ms.SourceTimeZone); err != nil { + return nil, err + } + } + + tabletShards, err := s.collectTargetStreams(ctx, mz) + if err != nil { + return nil, err + } + + migrationID, err := getMigrationID(targetKeyspace, tabletShards) + if err != nil { + return nil, err + } + + if mz.ms.ExternalCluster == "" { + exists, tablets, err := s.checkIfPreviousJournalExists(ctx, mz, migrationID) + if err != nil { + return nil, err + } + if exists { + log.Errorf("Found a previous journal entry for %d", migrationID) + msg := fmt.Sprintf("found an entry from a previous run for migration id %d in _vt.resharding_journal on tablets %s, ", + migrationID, strings.Join(tablets, ",")) + msg += fmt.Sprintf("please review and delete it before proceeding and then start the workflow using: MoveTables --workflow %s --target-keyspace %s start", + req.Workflow, req.TargetKeyspace) + return nil, fmt.Errorf(msg) + } + } + + if req.AutoStart { + if err := mz.startStreams(ctx); err != nil { + return nil, err + } + } + + return s.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{ + Keyspace: targetKeyspace, + Workflow: req.Workflow, + }) +} + +// MoveTablesComplete is part of the vtctlservicepb.VtctldServer interface. +// It cleans up a successful MoveTables workflow and its related artifacts. +func (s *Server) MoveTablesComplete(ctx context.Context, req *vtctldatapb.MoveTablesCompleteRequest) (*vtctldatapb.MoveTablesCompleteResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.MoveTablesComplete") + defer span.Finish() + + ts, state, err := s.getWorkflowState(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + + var summary string + if req.DryRun { + summary = fmt.Sprintf("Complete dry run results for workflow %s.%s at %v", req.TargetKeyspace, req.Workflow, time.Now().UTC().Format(time.RFC822)) + } else { + summary = fmt.Sprintf("Successfully completed the %s workflow in the %s keyspace", req.Workflow, req.TargetKeyspace) + } + var dryRunResults *[]string + + if state.WorkflowType == TypeMigrate { + dryRunResults, err = s.finalizeMigrateWorkflow(ctx, req.TargetKeyspace, req.Workflow, strings.Join(ts.tables, ","), + false, req.KeepData, req.KeepRoutingRules, req.DryRun) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to finalize the %s workflow in the %s keyspace", + req.Workflow, req.TargetKeyspace) + } + resp := &vtctldatapb.MoveTablesCompleteResponse{ + Summary: summary, + } + if dryRunResults != nil { + resp.DryRunResults = *dryRunResults + } + return resp, nil + } + + if !state.WritesSwitched || len(state.ReplicaCellsNotSwitched) > 0 || len(state.RdonlyCellsNotSwitched) > 0 { + return nil, ErrWorkflowNotFullySwitched + } + var renameTable TableRemovalType + if req.RenameTables { + renameTable = RenameTable + } else { + renameTable = DropTable + } + if dryRunResults, err = s.dropSources(ctx, ts, renameTable, req.KeepData, req.KeepRoutingRules, false, req.DryRun); err != nil { + return nil, err + } + + resp := &vtctldatapb.MoveTablesCompleteResponse{ + Summary: summary, + } + if dryRunResults != nil { + resp.DryRunResults = *dryRunResults + } + + return resp, nil +} + +// ReshardCreate is part of the vtctlservicepb.VtctldServer interface. +func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCreateRequest) (*vtctldatapb.WorkflowStatusResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.ReshardCreate") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + span.Annotate("source_shards", req.SourceShards) + span.Annotate("target_shards", req.TargetShards) + span.Annotate("cells", req.Cells) + span.Annotate("tablet_types", req.TabletTypes) + span.Annotate("on_ddl", req.OnDdl) + + keyspace := req.Keyspace + cells := req.Cells + // TODO: validate workflow does not exist. + + if err := s.ts.ValidateSrvKeyspace(ctx, keyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "SrvKeyspace for keyspace %s is corrupt for cell(s) %s", keyspace, cells) + log.Errorf("%w", err2) + return nil, err + } + rs, err := s.buildResharder(ctx, keyspace, req.Workflow, req.SourceShards, req.TargetShards, strings.Join(cells, ","), "") + if err != nil { + return nil, vterrors.Wrap(err, "buildResharder") + } + rs.onDDL = req.OnDdl + rs.stopAfterCopy = req.StopAfterCopy + rs.deferSecondaryKeys = req.DeferSecondaryKeys + if !req.SkipSchemaCopy { + if err := rs.copySchema(ctx); err != nil { + return nil, vterrors.Wrap(err, "copySchema") + } + } + if err := rs.createStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "createStreams") + } + + if req.AutoStart { + if err := rs.startStreams(ctx); err != nil { + return nil, vterrors.Wrap(err, "startStreams") + } + } else { + log.Warningf("Streams will not be started since --auto-start is set to false") + } + return nil, nil +} + +// WorkflowDelete is part of the vtctlservicepb.VtctldServer interface. +// It passes on the request to the target primary tablets that are +// participating in the given workflow. +func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDeleteRequest) (*vtctldatapb.WorkflowDeleteResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.WorkflowDelete") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) + + // Cleanup related data and artifacts. + if _, err := s.DropTargets(ctx, req.Keyspace, req.Workflow, req.KeepData, req.KeepRoutingRules, false); err != nil { + if topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "%s keyspace does not exist", req.Keyspace) + } + return nil, err + } + + deleteReq := &tabletmanagerdatapb.DeleteVReplicationWorkflowRequest{ + Workflow: req.Workflow, + } + vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc) + callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { + res, err := s.tmc.DeleteVReplicationWorkflow(ctx, tablet.Tablet, deleteReq) + if err != nil { + return nil, err + } + // Best effort cleanup and optimization of related data. + s.deleteWorkflowVDiffData(ctx, tablet.Tablet, req.Workflow) + s.optimizeCopyStateTable(tablet.Tablet) + return res.Result, err + } + res, err := vx.CallbackContext(ctx, callback) + if err != nil { + return nil, err + } + + if len(res) == 0 { + return nil, fmt.Errorf("the %s workflow does not exist in the %s keyspace", req.Workflow, req.Keyspace) + } + + response := &vtctldatapb.WorkflowDeleteResponse{} + response.Summary = fmt.Sprintf("Successfully cancelled the %s workflow in the %s keyspace", req.Workflow, req.Keyspace) + details := make([]*vtctldatapb.WorkflowDeleteResponse_TabletInfo, 0, len(res)) + for tinfo, tres := range res { + result := &vtctldatapb.WorkflowDeleteResponse_TabletInfo{ + Tablet: tinfo.Alias, + Deleted: tres.RowsAffected > 0, // Can be more than one with shard merges + } + details = append(details, result) + } + response.Details = details + return response, nil +} + +func (s *Server) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowStatusRequest) (*vtctldatapb.WorkflowStatusResponse, error) { + ts, state, err := s.getWorkflowState(ctx, req.Keyspace, req.Workflow) + if err != nil { + return nil, err + } + copyProgress, err := s.GetCopyProgress(ctx, ts, state) + if err != nil { + return nil, err + } + resp := &vtctldatapb.WorkflowStatusResponse{} + if copyProgress != nil { + resp.TableCopyState = make(map[string]*vtctldatapb.WorkflowStatusResponse_TableCopyState, len(*copyProgress)) + // We sort the tables for intuitive and consistent output. + var tables []string + for table := range *copyProgress { + tables = append(tables, table) + } + sort.Strings(tables) + var progress TableCopyProgress + for _, table := range tables { + var rowCountPct, tableSizePct float32 + resp.TableCopyState[table] = &vtctldatapb.WorkflowStatusResponse_TableCopyState{} + progress = *(*copyProgress)[table] + if progress.SourceRowCount > 0 { + rowCountPct = float32(100.0 * float64(progress.TargetRowCount) / float64(progress.SourceRowCount)) + } + if progress.SourceTableSize > 0 { + tableSizePct = float32(100.0 * float64(progress.TargetTableSize) / float64(progress.SourceTableSize)) + } + resp.TableCopyState[table].RowsCopied = progress.TargetRowCount + resp.TableCopyState[table].RowsTotal = progress.SourceRowCount + resp.TableCopyState[table].RowsPercentage = rowCountPct + resp.TableCopyState[table].BytesCopied = progress.TargetTableSize + resp.TableCopyState[table].BytesTotal = progress.SourceTableSize + resp.TableCopyState[table].BytesPercentage = tableSizePct + } + } + + workflow, err := s.GetWorkflow(ctx, req.Keyspace, req.Workflow) + if err != nil { + return nil, err + } + + // The stream key is target keyspace/tablet alias, e.g. 0/test-0000000100. + // We sort the keys for intuitive and consistent output. + streamKeys := make([]string, 0, len(workflow.ShardStreams)) + for streamKey := range workflow.ShardStreams { + streamKeys = append(streamKeys, streamKey) + } + sort.Strings(streamKeys) + resp.ShardStreams = make(map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams, len(streamKeys)) + for _, streamKey := range streamKeys { + streams := workflow.ShardStreams[streamKey].GetStreams() + keyParts := strings.Split(streamKey, "/") + if len(keyParts) != 2 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected stream key format in: %s ; expect /", + streamKey) + } + // We want to use target keyspace/shard as the map key for the + // response, e.g. customer/-80. + ksShard := fmt.Sprintf("%s/%s", req.Keyspace, keyParts[0]) + resp.ShardStreams[ksShard] = &vtctldatapb.WorkflowStatusResponse_ShardStreams{} + resp.ShardStreams[ksShard].Streams = make([]*vtctldatapb.WorkflowStatusResponse_ShardStreamState, len(streams)) + for i, st := range streams { + info := []string{} + ts := &vtctldatapb.WorkflowStatusResponse_ShardStreamState{} + if st.State == binlogdatapb.VReplicationWorkflowState_Error.String() { + info = append(info, st.Message) + } else if st.Position == "" { + info = append(info, "VStream has not started") + } else { + now := time.Now().Nanosecond() + updateLag := int64(now) - st.TimeUpdated.Seconds + if updateLag > 0*1e9 { + info = append(info, "VStream may not be running") + } + txLag := int64(now) - st.TransactionTimestamp.Seconds + info = append(info, fmt.Sprintf("VStream Lag: %ds", txLag/1e9)) + if st.TransactionTimestamp.Seconds > 0 { // if no events occur after copy phase, TransactionTimeStamp can be 0 + info = append(info, fmt.Sprintf("; Tx time: %s.", time.Unix(st.TransactionTimestamp.Seconds, 0).Format(time.ANSIC))) + } + } + ts.Id = int32(st.Id) + ts.Tablet = st.Tablet + ts.SourceShard = fmt.Sprintf("%s/%s", st.BinlogSource.Keyspace, st.BinlogSource.Shard) + ts.Position = st.Position + ts.Status = st.State + ts.Info = strings.Join(info, "; ") + resp.ShardStreams[ksShard].Streams[i] = ts + } + } + + return resp, nil +} + +// GetCopyProgress returns the progress of all tables being copied in the +// workflow. +func (s *Server) GetCopyProgress(ctx context.Context, ts *trafficSwitcher, state *State) (*CopyProgress, error) { + getTablesQuery := "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = %d" + getRowCountQuery := "select table_name, table_rows, data_length from information_schema.tables where table_schema = %s and table_name in (%s)" + tables := make(map[string]bool) + const MaxRows = 1000 + sourcePrimaries := make(map[*topodatapb.TabletAlias]bool) + for _, target := range ts.targets { + for id, bls := range target.Sources { + query := fmt.Sprintf(getTablesQuery, id) + p3qr, err := s.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: MaxRows, + }) + if err != nil { + return nil, err + } + if len(p3qr.Rows) < 1 { + continue + } + qr := sqltypes.Proto3ToResult(p3qr) + for i := 0; i < len(p3qr.Rows); i++ { + tables[qr.Rows[i][0].ToString()] = true + } + sourcesi, err := s.ts.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + found := false + for existingSource := range sourcePrimaries { + if existingSource.Uid == sourcesi.PrimaryAlias.Uid { + found = true + } + } + if !found { + sourcePrimaries[sourcesi.PrimaryAlias] = true + } + } + } + if len(tables) == 0 { + return nil, nil + } + var tableList []string + targetRowCounts := make(map[string]int64) + sourceRowCounts := make(map[string]int64) + targetTableSizes := make(map[string]int64) + sourceTableSizes := make(map[string]int64) + + for table := range tables { + tableList = append(tableList, encodeString(table)) + targetRowCounts[table] = 0 + sourceRowCounts[table] = 0 + targetTableSizes[table] = 0 + sourceTableSizes[table] = 0 + } + + var getTableMetrics = func(tablet *topodatapb.Tablet, query string, rowCounts *map[string]int64, tableSizes *map[string]int64) error { + p3qr, err := s.tmc.ExecuteFetchAsDba(ctx, tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: uint64(len(tables)), + }) + if err != nil { + return err + } + qr := sqltypes.Proto3ToResult(p3qr) + for i := 0; i < len(qr.Rows); i++ { + table := qr.Rows[i][0].ToString() + rowCount, err := qr.Rows[i][1].ToCastInt64() + if err != nil { + return err + } + tableSize, err := qr.Rows[i][2].ToCastInt64() + if err != nil { + return err + } + (*rowCounts)[table] += rowCount + (*tableSizes)[table] += tableSize + } + return nil + } + sourceDbName := "" + for _, tsSource := range ts.sources { + sourceDbName = tsSource.GetPrimary().DbName() + break + } + if sourceDbName == "" { + return nil, fmt.Errorf("no sources found for workflow %s.%s", state.TargetKeyspace, state.Workflow) + } + targetDbName := "" + for _, tsTarget := range ts.targets { + targetDbName = tsTarget.GetPrimary().DbName() + break + } + if sourceDbName == "" || targetDbName == "" { + return nil, fmt.Errorf("workflow %s.%s is incorrectly configured", state.TargetKeyspace, state.Workflow) + } + sort.Strings(tableList) // sort list for repeatability for mocking in tests + tablesStr := strings.Join(tableList, ",") + query := fmt.Sprintf(getRowCountQuery, encodeString(targetDbName), tablesStr) + for _, target := range ts.targets { + tablet := target.GetPrimary().Tablet + if err := getTableMetrics(tablet, query, &targetRowCounts, &targetTableSizes); err != nil { + return nil, err + } + } + + query = fmt.Sprintf(getRowCountQuery, encodeString(sourceDbName), tablesStr) + for source := range sourcePrimaries { + ti, err := s.ts.GetTablet(ctx, source) + tablet := ti.Tablet + if err != nil { + return nil, err + } + if err := getTableMetrics(tablet, query, &sourceRowCounts, &sourceTableSizes); err != nil { + return nil, err + } + } + + copyProgress := CopyProgress{} + for table, rowCount := range targetRowCounts { + copyProgress[table] = &TableCopyProgress{ + TargetRowCount: rowCount, + TargetTableSize: targetTableSizes[table], + SourceRowCount: sourceRowCounts[table], + SourceTableSize: sourceTableSizes[table], + } + } + return ©Progress, nil +} + +// WorkflowUpdate is part of the vtctlservicepb.VtctldServer interface. +// It passes the embedded TabletRequest object to the given keyspace's +// target primary tablets that are participating in the given workflow. +func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUpdateRequest) (*vtctldatapb.WorkflowUpdateResponse, error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.WorkflowUpdate") + defer span.Finish() + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.TabletRequest.Workflow) + span.Annotate("cells", req.TabletRequest.Cells) + span.Annotate("tablet_types", req.TabletRequest.TabletTypes) + span.Annotate("on_ddl", req.TabletRequest.OnDdl) + + vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) + callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { + res, err := s.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, req.TabletRequest) + if err != nil { + return nil, err + } + return res.Result, err + } + res, err := vx.CallbackContext(ctx, callback) + if err != nil { + if topo.IsErrType(err, topo.NoNode) { + return nil, vterrors.Wrapf(err, "%s keyspace does not exist", req.Keyspace) + } + return nil, err + } + + if len(res) == 0 { + return nil, fmt.Errorf("the %s workflow does not exist in the %s keyspace", req.TabletRequest.Workflow, req.Keyspace) + } + + response := &vtctldatapb.WorkflowUpdateResponse{} + response.Summary = fmt.Sprintf("Successfully updated the %s workflow on (%d) target primary tablets in the %s keyspace", req.TabletRequest.Workflow, len(res), req.Keyspace) + details := make([]*vtctldatapb.WorkflowUpdateResponse_TabletInfo, 0, len(res)) + for tinfo, tres := range res { + result := &vtctldatapb.WorkflowUpdateResponse_TabletInfo{ + Tablet: tinfo.Alias, + Changed: tres.RowsAffected > 0, // Can be more than one with shard merges + } + details = append(details, result) + } + response.Details = details + return response, nil +} + +// validateSourceTablesExist validates that tables provided are present +// in the source keyspace. +func (s *Server) validateSourceTablesExist(ctx context.Context, sourceKeyspace string, ksTables, tables []string) error { + var missingTables []string + for _, table := range tables { + if schema.IsInternalOperationTableName(table) { + continue + } + found := false + + for _, ksTable := range ksTables { + if table == ksTable { + found = true + break + } + } + if !found { + missingTables = append(missingTables, table) + } + } + if len(missingTables) > 0 { + return fmt.Errorf("table(s) not found in source keyspace %s: %s", sourceKeyspace, strings.Join(missingTables, ",")) + } + return nil +} + +// addTablesToVSchema adds tables to an (unsharded) vschema if they are not already defined. +// If copyVSchema is true then we copy over the vschema table definitions from the source, +// otherwise we create empty ones. +// For a migrate workflow we do not copy the vschema since the source keyspace is just a +// proxy to import data into Vitess. +func (s *Server) addTablesToVSchema(ctx context.Context, sourceKeyspace string, targetVSchema *vschemapb.Keyspace, tables []string, copyVSchema bool) error { + if targetVSchema.Tables == nil { + targetVSchema.Tables = make(map[string]*vschemapb.Table) + } + if copyVSchema { + srcVSchema, err := s.ts.GetVSchema(ctx, sourceKeyspace) + if err != nil { + return vterrors.Wrapf(err, "failed to get vschema for source keyspace %s", sourceKeyspace) + } + for _, table := range tables { + srcTable, sok := srcVSchema.Tables[table] + if _, tok := targetVSchema.Tables[table]; sok && !tok { + targetVSchema.Tables[table] = srcTable + // If going from sharded to unsharded, then we need to remove the + // column vindexes as they are not valid for unsharded tables. + if srcVSchema.Sharded { + targetVSchema.Tables[table].ColumnVindexes = nil + } + } + } + } + // Ensure that each table at least has an empty definition on the target. + for _, table := range tables { + if _, tok := targetVSchema.Tables[table]; !tok { + targetVSchema.Tables[table] = &vschemapb.Table{} + } + } + return nil +} + +func (s *Server) collectTargetStreams(ctx context.Context, mz *materializer) ([]string, error) { + var shardTablets []string + var mu sync.Mutex + err := mz.forAllTargets(func(target *topo.ShardInfo) error { + var qrproto *querypb.QueryResult + var id int64 + var err error + targetPrimary, err := s.ts.GetTablet(ctx, target.PrimaryAlias) + if err != nil { + return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) + } + query := fmt.Sprintf("select id from _vt.vreplication where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) + if qrproto, err = s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { + return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + } + qr := sqltypes.Proto3ToResult(qrproto) + for i := 0; i < len(qr.Rows); i++ { + id, err = qr.Rows[i][0].ToCastInt64() + if err != nil { + return err + } + mu.Lock() + shardTablets = append(shardTablets, fmt.Sprintf("%s:%d", target.ShardName(), id)) + mu.Unlock() + } + return nil + }) + if err != nil { + return nil, err + } + return shardTablets, nil +} + +func (s *Server) checkIfPreviousJournalExists(ctx context.Context, mz *materializer, migrationID int64) (bool, []string, error) { + forAllSources := func(f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, sourceShard := range mz.sourceShards { + wg.Add(1) + go func(sourceShard *topo.ShardInfo) { + defer wg.Done() + + if err := f(sourceShard); err != nil { + allErrors.RecordError(err) + } + }(sourceShard) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) + } + + var ( + mu sync.Mutex + exists bool + tablets []string + ) + + err := forAllSources(func(si *topo.ShardInfo) error { + tablet, err := s.ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return err + } + if tablet == nil { + return nil + } + _, exists, err = s.CheckReshardingJournalExistsOnTablet(ctx, tablet.Tablet, migrationID) + if err != nil { + return err + } + if exists { + mu.Lock() + defer mu.Unlock() + tablets = append(tablets, tablet.AliasString()) + } + return nil + }) + return exists, tablets, err +} + +// deleteWorkflowVDiffData cleans up any potential VDiff related data associated +// with the workflow on the given tablet. +func (s *Server) deleteWorkflowVDiffData(ctx context.Context, tablet *topodatapb.Tablet, workflow string) { + sqlDeleteVDiffs := `delete from vd, vdt, vdl using _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + inner join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) + where vd.keyspace = %s and vd.workflow = %s` + query := fmt.Sprintf(sqlDeleteVDiffs, encodeString(tablet.Keyspace), encodeString(workflow)) + rows := -1 + if _, err := s.tmc.ExecuteFetchAsAllPrivs(ctx, tablet, &tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest{ + Query: []byte(query), + MaxRows: uint64(rows), + }); err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num != sqlerror.ERNoSuchTable { // the tables may not exist if no vdiffs have been run + log.Errorf("Error deleting vdiff data for %s.%s workflow: %v", tablet.Keyspace, workflow, err) + } + } +} + +// optimizeCopyStateTable rebuilds the copy_state table to ensure the on-disk +// structures are minimal and optimized and resets the auto-inc value for +// subsequent inserts. +// This helps to ensure that the size, storage, and performance related factors +// for the table remain optimal over time and that we don't ever exhaust the +// available auto-inc values for the table. +// Note: it's not critical that this executes successfully any given time, it's +// only important that we try to do this periodically so that things stay in an +// optimal state over long periods of time. For this reason, the work is done +// asynchronously in the background on the given tablet and any failures are +// logged as warnings. Because it's done in the background we use the AllPrivs +// account to be sure that we don't execute the writes if READ_ONLY is set on +// the MySQL instance. +func (s *Server) optimizeCopyStateTable(tablet *topodatapb.Tablet) { + if s.sem != nil { + if !s.sem.TryAcquire(1) { + log.Warningf("Deferring work to optimize the copy_state table on %q due to hitting the maximum concurrent background job limit.", + tablet.Alias.String()) + return + } + } + go func() { + defer func() { + if s.sem != nil { + s.sem.Release(1) + } + }() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + sqlOptimizeTable := "optimize table _vt.copy_state" + if _, err := s.tmc.ExecuteFetchAsAllPrivs(ctx, tablet, &tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest{ + Query: []byte(sqlOptimizeTable), + MaxRows: uint64(100), // always produces 1+rows with notes and status + }); err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERNoSuchTable { // the table may not exist + return + } + log.Warningf("Failed to optimize the copy_state table on %q: %v", tablet.Alias.String(), err) + } + // This will automatically set the value to 1 or the current max value in the + // table, whichever is greater. + sqlResetAutoInc := "alter table _vt.copy_state auto_increment = 1" + if _, err := s.tmc.ExecuteFetchAsAllPrivs(ctx, tablet, &tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest{ + Query: []byte(sqlResetAutoInc), + MaxRows: uint64(0), + }); err != nil { + log.Warningf("Failed to reset the auto_increment value for the copy_state table on %q: %v", + tablet.Alias.String(), err) + } + }() +} + +// DropTargets cleans up target tables, shards and denied tables if a MoveTables/Reshard +// is cancelled. +func (s *Server) DropTargets(ctx context.Context, targetKeyspace, workflow string, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { + ts, state, err := s.getWorkflowState(ctx, targetKeyspace, workflow) + if err != nil { + log.Errorf("Failed to get VReplication workflow state for %s.%s: %v", targetKeyspace, workflow, err) + return nil, err + } + + // Return an error if the workflow traffic is partially switched. + if state.WritesSwitched || len(state.ReplicaCellsSwitched) > 0 || len(state.RdonlyCellsSwitched) > 0 { + return nil, ErrWorkflowPartiallySwitched + } + + if state.WorkflowType == TypeMigrate { + _, err := s.finalizeMigrateWorkflow(ctx, targetKeyspace, workflow, "", true, keepData, keepRoutingRules, dryRun) + return nil, err + } + + ts.keepRoutingRules = keepRoutingRules + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{s: s, ts: ts} + } + var tctx context.Context + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "DropTargets") + if lockErr != nil { + ts.Logger().Errorf("Source LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer sourceUnlock(&err) + ctx = tctx + + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "DropTargets") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + } + if !keepData { + switch ts.MigrationType() { + case binlogdatapb.MigrationType_TABLES: + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + if err := sw.dropSourceDeniedTables(ctx); err != nil { + return nil, err + } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } + case binlogdatapb.MigrationType_SHARDS: + if err := sw.dropTargetShards(ctx); err != nil { + return nil, err + } + } + } + if err := s.dropRelatedArtifacts(ctx, keepRoutingRules, sw); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + return sw.logs(), nil +} + +func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, error) { + tgtInfo, err := BuildTargets(ctx, s.ts, s.tmc, targetKeyspace, workflowName) + if err != nil { + log.Infof("Error building targets: %s", err) + return nil, err + } + targets, frozen, optCells, optTabletTypes := tgtInfo.Targets, tgtInfo.Frozen, tgtInfo.OptCells, tgtInfo.OptTabletTypes + + ts := &trafficSwitcher{ + ws: s, + logger: logutil.NewConsoleLogger(), + workflow: workflowName, + reverseWorkflow: ReverseWorkflowName(workflowName), + id: HashStreams(targetKeyspace, targets), + targets: targets, + sources: make(map[string]*MigrationSource), + targetKeyspace: targetKeyspace, + frozen: frozen, + optCells: optCells, + optTabletTypes: optTabletTypes, + workflowType: tgtInfo.WorkflowType, + workflowSubType: tgtInfo.WorkflowSubType, + } + log.Infof("Migration ID for workflow %s: %d", workflowName, ts.id) + sourceTopo := s.ts + + // Build the sources. + for _, target := range targets { + for _, bls := range target.Sources { + if ts.sourceKeyspace == "" { + ts.sourceKeyspace = bls.Keyspace + ts.sourceTimeZone = bls.SourceTimeZone + ts.targetTimeZone = bls.TargetTimeZone + ts.externalCluster = bls.ExternalCluster + if ts.externalCluster != "" { + externalTopo, err := s.ts.OpenExternalVitessClusterServer(ctx, ts.externalCluster) + if err != nil { + return nil, err + } + sourceTopo = externalTopo + ts.externalTopo = externalTopo + } + } else if ts.sourceKeyspace != bls.Keyspace { + return nil, fmt.Errorf("source keyspaces are mismatched across streams: %v vs %v", ts.sourceKeyspace, bls.Keyspace) + } + + if ts.tables == nil { + for _, rule := range bls.Filter.Rules { + ts.tables = append(ts.tables, rule.Match) + } + sort.Strings(ts.tables) + } else { + var tables []string + for _, rule := range bls.Filter.Rules { + tables = append(tables, rule.Match) + } + sort.Strings(tables) + if !reflect.DeepEqual(ts.tables, tables) { + return nil, fmt.Errorf("table lists are mismatched across streams: %v vs %v", ts.tables, tables) + } + } + + if _, ok := ts.sources[bls.Shard]; ok { + continue + } + sourcesi, err := sourceTopo.GetShard(ctx, bls.Keyspace, bls.Shard) + if err != nil { + return nil, err + } + if sourcesi.PrimaryAlias == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "source shard %s/%s currently has no primary tablet", + bls.Keyspace, bls.Shard) + } + sourcePrimary, err := sourceTopo.GetTablet(ctx, sourcesi.PrimaryAlias) + if err != nil { + return nil, err + } + ts.sources[bls.Shard] = NewMigrationSource(sourcesi, sourcePrimary) + } + } + if ts.sourceKeyspace != ts.targetKeyspace || ts.externalCluster != "" { + ts.migrationType = binlogdatapb.MigrationType_TABLES + } else { + // TODO(sougou): for shard migration, validate that source and target combined + // keyranges match. + ts.migrationType = binlogdatapb.MigrationType_SHARDS + for sourceShard := range ts.sources { + if _, ok := ts.targets[sourceShard]; ok { + // If shards are overlapping, then this is a table migration. + ts.migrationType = binlogdatapb.MigrationType_TABLES + break + } + } + } + vs, err := sourceTopo.GetVSchema(ctx, ts.sourceKeyspace) + if err != nil { + return nil, err + } + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + if err != nil { + return nil, err + } + + sourceShards, targetShards := ts.getSourceAndTargetShardsNames() + + ts.isPartialMigration, err = ts.isPartialMoveTables(sourceShards, targetShards) + if err != nil { + return nil, err + } + if ts.isPartialMigration { + log.Infof("Migration is partial, for shards %+v", sourceShards) + } + return ts, nil +} + +func (s *Server) dropRelatedArtifacts(ctx context.Context, keepRoutingRules bool, sw iswitcher) error { + if err := sw.dropSourceReverseVReplicationStreams(ctx); err != nil { + return err + } + if !keepRoutingRules { + if err := sw.deleteRoutingRules(ctx); err != nil { + return err + } + if err := sw.deleteShardRoutingRules(ctx); err != nil { + return err + } + } + + return nil +} + +// dropSources cleans up source tables, shards and denied tables after a +// MoveTables/Reshard is completed. +func (s *Server) dropSources(ctx context.Context, ts *trafficSwitcher, removalType TableRemovalType, keepData, keepRoutingRules, force, dryRun bool) (*[]string, error) { + var ( + sw iswitcher + err error + ) + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + var tctx context.Context + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "DropSources") + if lockErr != nil { + ts.Logger().Errorf("Source LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer sourceUnlock(&err) + ctx = tctx + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "DropSources") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + } + if !force { + if err := sw.validateWorkflowHasCompleted(ctx); err != nil { + ts.Logger().Errorf("Workflow has not completed, cannot DropSources: %v", err) + return nil, err + } + } + if !keepData { + switch ts.MigrationType() { + case binlogdatapb.MigrationType_TABLES: + log.Infof("Deleting tables") + if err := sw.removeSourceTables(ctx, removalType); err != nil { + return nil, err + } + if err := sw.dropSourceDeniedTables(ctx); err != nil { + return nil, err + } + if err := sw.dropTargetDeniedTables(ctx); err != nil { + return nil, err + } + + case binlogdatapb.MigrationType_SHARDS: + log.Infof("Removing shards") + if err := sw.dropSourceShards(ctx); err != nil { + return nil, err + } + } + } + if err := s.dropArtifacts(ctx, keepRoutingRules, sw); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + + return sw.logs(), nil +} + +func (s *Server) dropArtifacts(ctx context.Context, keepRoutingRules bool, sw iswitcher) error { + if err := sw.dropSourceReverseVReplicationStreams(ctx); err != nil { + return err + } + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return err + } + if !keepRoutingRules { + if err := sw.deleteRoutingRules(ctx); err != nil { + return err + } + if err := sw.deleteShardRoutingRules(ctx); err != nil { + return err + } + } + + return nil +} + +// DeleteShard will do all the necessary changes in the topology server +// to entirely remove a shard. +func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string, recursive, evenIfServing bool) error { + // Read the Shard object. If it's not there, try to clean up + // the topology anyway. + shardInfo, err := s.ts.GetShard(ctx, keyspace, shard) + if err != nil { + if topo.IsErrType(err, topo.NoNode) { + log.Infof("Shard %v/%v doesn't seem to exist, cleaning up any potential leftover", keyspace, shard) + return s.ts.DeleteShard(ctx, keyspace, shard) + } + return err + } + + servingCells, err := s.ts.GetShardServingCells(ctx, shardInfo) + if err != nil { + return err + } + // Check the Serving map for the shard, we don't want to + // remove a serving shard if not absolutely sure. + if !evenIfServing && len(servingCells) > 0 { + return fmt.Errorf("shard %v/%v is still serving, cannot delete it, use the even-if-serving flag if needed", keyspace, shard) + } + + cells, err := s.ts.GetCellInfoNames(ctx) + if err != nil { + return err + } + + // Go through all the cells. + for _, cell := range cells { + var aliases []*topodatapb.TabletAlias + + // Get the ShardReplication object for that cell. Try + // to find all tablets that may belong to our shard. + sri, err := s.ts.GetShardReplication(ctx, cell, keyspace, shard) + switch { + case topo.IsErrType(err, topo.NoNode): + // No ShardReplication object. It means the + // topo is inconsistent. Let's read all the + // tablets for that cell, and if we find any + // in our keyspace / shard, either abort or + // try to delete them. + aliases, err = s.ts.GetTabletAliasesByCell(ctx, cell) + if err != nil { + return fmt.Errorf("GetTabletsByCell(%v) failed: %v", cell, err) + } + case err == nil: + // We found a ShardReplication object. We + // trust it to have all tablet records. + aliases = make([]*topodatapb.TabletAlias, len(sri.Nodes)) + for i, n := range sri.Nodes { + aliases[i] = n.TabletAlias + } + default: + return fmt.Errorf("GetShardReplication(%v, %v, %v) failed: %v", cell, keyspace, shard, err) + } + + // Get the corresponding Tablet records. Note + // GetTabletMap ignores ErrNoNode, and it's good for + // our purpose, it means a tablet was deleted but is + // still referenced. + tabletMap, err := s.ts.GetTabletMap(ctx, aliases) + if err != nil { + return fmt.Errorf("GetTabletMap() failed: %v", err) + } + + // Remove the tablets that don't belong to our + // keyspace/shard from the map. + for a, ti := range tabletMap { + if ti.Keyspace != keyspace || ti.Shard != shard { + delete(tabletMap, a) + } + } + + // Now see if we need to DeleteTablet, and if we can, do it. + if len(tabletMap) > 0 { + if !recursive { + return fmt.Errorf("shard %v/%v still has %v tablets in cell %v; use --recursive or remove them manually", keyspace, shard, len(tabletMap), cell) + } + + log.Infof("Deleting all tablets in shard %v/%v cell %v", keyspace, shard, cell) + for tabletAlias, tabletInfo := range tabletMap { + // We don't care about scrapping or updating the replication graph, + // because we're about to delete the entire replication graph. + log.Infof("Deleting tablet %v", tabletAlias) + if err := s.ts.DeleteTablet(ctx, tabletInfo.Alias); err != nil && !topo.IsErrType(err, topo.NoNode) { + // We don't want to continue if a DeleteTablet fails for + // any good reason (other than missing tablet, in which + // case it's just a topology server inconsistency we can + // ignore). If we continue and delete the replication + // graph, the tablet record will be orphaned, since + // we'll no longer know it belongs to this shard. + // + // If the problem is temporary, or resolved externally, re-running + // DeleteShard will skip over tablets that were already deleted. + return fmt.Errorf("can't delete tablet %v: %v", tabletAlias, err) + } + } + } + } + + // Try to remove the replication graph and serving graph in each cell, + // regardless of its existence. + for _, cell := range cells { + if err := s.ts.DeleteShardReplication(ctx, cell, keyspace, shard); err != nil && !topo.IsErrType(err, topo.NoNode) { + log.Warningf("Cannot delete ShardReplication in cell %v for %v/%v: %v", cell, keyspace, shard, err) + } + } + + return s.ts.DeleteShard(ctx, keyspace, shard) +} + +// updateShardRecords updates the shard records based on 'from' or 'to' direction. +func (s *Server) updateShardRecords(ctx context.Context, keyspace string, shards []*topo.ShardInfo, cells []string, + servedType topodatapb.TabletType, isFrom bool, clearSourceShards bool, logger logutil.Logger) (err error) { + return topotools.UpdateShardRecords(ctx, s.ts, s.tmc, keyspace, shards, cells, servedType, isFrom, clearSourceShards, logger) +} + +// refreshPrimaryTablets will just RPC-ping all the primary tablets with RefreshState +func (s *Server) refreshPrimaryTablets(ctx context.Context, shards []*topo.ShardInfo) error { + wg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + for _, si := range shards { + wg.Add(1) + go func(si *topo.ShardInfo) { + defer wg.Done() + ti, err := s.ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + rec.RecordError(err) + return + } + + if err := s.tmc.RefreshState(ctx, ti.Tablet); err != nil { + rec.RecordError(err) + } else { + log.Infof("%v responded", topoproto.TabletAliasString(si.PrimaryAlias)) + } + }(si) + } + wg.Wait() + return rec.Error() +} + +// finalizeMigrateWorkflow deletes the streams for the Migrate workflow. +// We only cleanup the target for external sources. +func (s *Server) finalizeMigrateWorkflow(ctx context.Context, targetKeyspace, workflow, tableSpecs string, cancel, keepData, keepRoutingRules, dryRun bool) (*[]string, error) { + ts, err := s.buildTrafficSwitcher(ctx, targetKeyspace, workflow) + if err != nil { + ts.Logger().Errorf("buildTrafficSwitcher failed: %v", err) + return nil, err + } + var sw iswitcher + if dryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{s: s, ts: ts} + } + var tctx context.Context + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "completeMigrateWorkflow") + if lockErr != nil { + ts.Logger().Errorf("Target LockKeyspace failed: %v", lockErr) + return nil, lockErr + } + defer targetUnlock(&err) + ctx = tctx + if err := sw.dropTargetVReplicationStreams(ctx); err != nil { + return nil, err + } + if !cancel { + if err := sw.addParticipatingTablesToKeyspace(ctx, targetKeyspace, tableSpecs); err != nil { + return nil, err + } + if err := ts.TopoServer().RebuildSrvVSchema(ctx, nil); err != nil { + return nil, err + } + } + log.Infof("cancel is %t, keepData %t", cancel, keepData) + if cancel && !keepData { + if err := sw.removeTargetTables(ctx); err != nil { + return nil, err + } + } + return sw.logs(), nil +} + +// WorkflowSwitchTraffic switches traffic in the direction passed for specified tablet types. +func (s *Server) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest) (*vtctldatapb.WorkflowSwitchTrafficResponse, error) { + var ( + dryRunResults []string + rdDryRunResults, wrDryRunResults *[]string + hasReplica, hasRdonly, hasPrimary bool + ) + timeout, set, err := protoutil.DurationFromProto(req.Timeout) + if err != nil { + err = vterrors.Wrapf(err, "unable to parse Timeout into a valid duration") + return nil, err + } + if !set { + timeout = defaultDuration + } + ts, startState, err := s.getWorkflowState(ctx, req.Keyspace, req.Workflow) + if err != nil { + return nil, err + } + + if startState.WorkflowType == TypeMigrate { + return nil, fmt.Errorf("invalid action for Migrate workflow: SwitchTraffic") + } + + maxReplicationLagAllowed, set, err := protoutil.DurationFromProto(req.MaxReplicationLagAllowed) + if err != nil { + err = vterrors.Wrapf(err, "unable to parse MaxReplicationLagAllowed into a valid duration") + return nil, err + } + if !set { + maxReplicationLagAllowed = defaultDuration + } + direction := TrafficSwitchDirection(req.Direction) + if direction == DirectionBackward { + ts, startState, err = s.getWorkflowState(ctx, startState.SourceKeyspace, ts.reverseWorkflow) + if err != nil { + return nil, err + } + } + reason, err := s.canSwitch(ctx, ts, startState, direction, int64(maxReplicationLagAllowed.Seconds())) + if err != nil { + return nil, err + } + if reason != "" { + return nil, fmt.Errorf("cannot switch traffic for workflow %s at this time: %s", startState.Workflow, reason) + } + hasReplica, hasRdonly, hasPrimary, err = parseTabletTypes(req.TabletTypes) + if err != nil { + return nil, err + } + if hasReplica || hasRdonly { + if rdDryRunResults, err = s.switchReads(ctx, req, ts, startState, timeout, false, direction); err != nil { + return nil, err + } + log.Infof("Switch Reads done for workflow %s.%s", req.Keyspace, req.Workflow) + } + if rdDryRunResults != nil { + dryRunResults = append(dryRunResults, *rdDryRunResults...) + } + if hasPrimary { + if _, wrDryRunResults, err = s.switchWrites(ctx, req, ts, timeout, false); err != nil { + return nil, err + } + log.Infof("Switch Writes done for workflow %s.%s", req.Keyspace, req.Workflow) + } + + if wrDryRunResults != nil { + dryRunResults = append(dryRunResults, *wrDryRunResults...) + } + if req.DryRun && len(dryRunResults) == 0 { + dryRunResults = append(dryRunResults, "No changes required") + } + cmd := "SwitchTraffic" + if direction == DirectionBackward { + cmd = "ReverseTraffic" + } + log.Infof("%s done for workflow %s.%s", cmd, req.Keyspace, req.Workflow) + resp := &vtctldatapb.WorkflowSwitchTrafficResponse{} + if req.DryRun { + resp.Summary = fmt.Sprintf("%s dry run results for workflow %s.%s at %v", cmd, req.Keyspace, req.Workflow, time.Now().UTC().Format(time.RFC822)) + resp.DryRunResults = dryRunResults + } else { + log.Infof("SwitchTraffic done for workflow %s.%s", req.Keyspace, req.Workflow) + resp.Summary = fmt.Sprintf("%s was successful for workflow %s.%s", cmd, req.Keyspace, req.Workflow) + // Reload the state after the SwitchTraffic operation + // and return that as a string. + keyspace := req.Keyspace + workflow := req.Workflow + if direction == DirectionBackward { + keyspace = startState.SourceKeyspace + workflow = ts.reverseWorkflow + } + resp.StartState = startState.String() + log.Infof("Before reloading workflow state after switching traffic: %+v\n", resp.StartState) + _, currentState, err := s.getWorkflowState(ctx, keyspace, workflow) + if err != nil { + resp.CurrentState = fmt.Sprintf("Error reloading workflow state after switching traffic: %v", err) + } else { + resp.CurrentState = currentState.String() + } + } + return resp, nil +} + +// switchReads is a generic way of switching read traffic for a workflow. +func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, state *State, timeout time.Duration, cancel bool, direction TrafficSwitchDirection) (*[]string, error) { + roTypesToSwitchStr := topoproto.MakeStringTypeCSV(req.TabletTypes) + var switchReplica, switchRdonly bool + for _, roType := range req.TabletTypes { + switch roType { + case topodatapb.TabletType_REPLICA: + switchReplica = true + case topodatapb.TabletType_RDONLY: + switchRdonly = true + } + } + + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (*[]string, error) { + werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + ts.Logger().Error(werr) + return nil, werr + } + + log.Infof("Switching reads: %s.%s tablet types: %s, cells: %s, workflow state: %s", ts.targetKeyspace, ts.workflow, roTypesToSwitchStr, ts.optCells, state.String()) + if !switchReplica && !switchRdonly { + return handleError("invalid tablet types", fmt.Errorf("tablet types must be REPLICA or RDONLY: %s", roTypesToSwitchStr)) + } + if !ts.isPartialMigration { // shard level traffic switching is all or nothing + if direction == DirectionBackward && switchReplica && len(state.ReplicaCellsSwitched) == 0 { + return handleError("invalid request", fmt.Errorf("requesting reversal of read traffic for REPLICAs but REPLICA reads have not been switched")) + } + if direction == DirectionBackward && switchRdonly && len(state.RdonlyCellsSwitched) == 0 { + return handleError("invalid request", fmt.Errorf("requesting reversal of SwitchReads for RDONLYs but RDONLY reads have not been switched")) + } + } + var cells = req.Cells + // If no cells were provided in the command then use the value from the workflow. + if len(cells) == 0 && ts.optCells != "" { + cells = strings.Split(strings.TrimSpace(ts.optCells), ",") + } + + // If there are no rdonly tablets in the cells ask to switch rdonly tablets as well so that routing rules + // are updated for rdonly as well. Otherwise vitess will not know that the workflow has completed and will + // incorrectly report that not all reads have been switched. User currently is forced to switch non-existent + // rdonly tablets. + if switchReplica && !switchRdonly { + var err error + rdonlyTabletsExist, err := topotools.DoCellsHaveRdonlyTablets(ctx, s.ts, cells) + if err != nil { + return nil, err + } + if !rdonlyTabletsExist { + req.TabletTypes = append(req.TabletTypes, topodatapb.TabletType_RDONLY) + } + } + + // If journals exist notify user and fail. + journalsExist, _, err := ts.checkJournals(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) + } + if journalsExist { + log.Infof("Found a previous journal entry for %d", ts.id) + } + var sw iswitcher + if req.DryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + + if err := ts.validate(ctx); err != nil { + return handleError("workflow validation failed", err) + } + + // For reads, locking the source keyspace is sufficient. + ctx, unlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchReads") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) + } + defer unlock(&err) + + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + if ts.isPartialMigration { + ts.Logger().Infof("Partial migration, skipping switchTableReads as traffic is all or nothing per shard and overridden for reads AND writes in the ShardRoutingRule created when switching writes.") + } else if err := sw.switchTableReads(ctx, cells, req.TabletTypes, direction); err != nil { + return handleError("failed to switch read traffic for the tables", err) + } + return sw.logs(), nil + } + ts.Logger().Infof("About to switchShardReads: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) + if err := sw.switchShardReads(ctx, cells, req.TabletTypes, direction); err != nil { + return handleError("failed to switch read traffic for the shards", err) + } + + ts.Logger().Infof("switchShardReads Completed: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) + if err := s.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.targetKeyspace, strings.Join(cells, ",")) + return handleError("failed to validate SrvKeyspace record", err2) + } + return sw.logs(), nil +} + +// switchWrites is a generic way of migrating write traffic for a workflow. +func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, timeout time.Duration, + cancel bool) (journalID int64, dryRunResults *[]string, err error) { + + var sw iswitcher + if req.DryRun { + sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} + } else { + sw = &switcher{ts: ts, s: s} + } + + // Consistently handle errors by logging and returning them. + handleError := func(message string, err error) (int64, *[]string, error) { + werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + ts.Logger().Error(werr) + return 0, nil, werr + } + + if ts.frozen { + ts.Logger().Warningf("Writes have already been switched for workflow %s, nothing to do here", ts.WorkflowName()) + return 0, sw.logs(), nil + } + + if err := ts.validate(ctx); err != nil { + return handleError("workflow validation failed", err) + } + + if req.EnableReverseReplication { + if err := areTabletsAvailableToStreamFrom(ctx, req, ts, ts.TargetKeyspaceName(), ts.TargetShards()); err != nil { + return handleError(fmt.Sprintf("no tablets were available to stream from in the %s keyspace", ts.SourceKeyspaceName()), err) + } + } + + // Need to lock both source and target keyspaces. + tctx, sourceUnlock, lockErr := sw.lockKeyspace(ctx, ts.SourceKeyspaceName(), "SwitchWrites") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.SourceKeyspaceName()), lockErr) + } + ctx = tctx + defer sourceUnlock(&err) + if ts.TargetKeyspaceName() != ts.SourceKeyspaceName() { + tctx, targetUnlock, lockErr := sw.lockKeyspace(ctx, ts.TargetKeyspaceName(), "SwitchWrites") + if lockErr != nil { + return handleError(fmt.Sprintf("failed to lock the %s keyspace", ts.TargetKeyspaceName()), lockErr) + } + ctx = tctx + defer targetUnlock(&err) + } + + // Find out if the target is using any sequence tables for auto_increment + // value generation. If so, then we'll need to ensure that they are + // initialized properly before allowing new writes on the target. + sequenceMetadata := make(map[string]*sequenceMetadata) + // For sharded to sharded migrations the sequence must already be setup. + // For reshards the sequence usage is not changed. + if req.InitializeTargetSequences && ts.workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && + ts.SourceKeyspaceSchema() != nil && ts.SourceKeyspaceSchema().Keyspace != nil && + !ts.SourceKeyspaceSchema().Keyspace.Sharded { + sequenceMetadata, err = ts.getTargetSequenceMetadata(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to get the sequence information in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } + + // If no journals exist, sourceWorkflows will be initialized by sm.MigrateStreams. + journalsExist, sourceWorkflows, err := ts.checkJournals(ctx) + if err != nil { + return handleError(fmt.Sprintf("failed to read journal in the %s keyspace", ts.SourceKeyspaceName()), err) + } + if !journalsExist { + ts.Logger().Infof("No previous journals were found. Proceeding normally.") + sm, err := BuildStreamMigrator(ctx, ts, cancel) + if err != nil { + return handleError("failed to migrate the workflow streams", err) + } + if cancel { + sw.cancelMigration(ctx, sm) + return 0, sw.logs(), nil + } + + ts.Logger().Infof("Stopping streams") + sourceWorkflows, err = sw.stopStreams(ctx, sm) + if err != nil { + for key, streams := range sm.Streams() { + for _, stream := range streams { + ts.Logger().Errorf("stream in stopStreams: key %s shard %s stream %+v", key, stream.BinlogSource.Shard, stream.BinlogSource) + } + } + sw.cancelMigration(ctx, sm) + return handleError("failed to stop the workflow streams", err) + } + + ts.Logger().Infof("Stopping source writes") + if err := sw.stopSourceWrites(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to stop writes in the %s keyspace", ts.SourceKeyspaceName()), err) + } + + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + ts.Logger().Infof("Executing LOCK TABLES on source tables %d times", lockTablesCycles) + // Doing this twice with a pause in-between to catch any writes that may have raced in between + // the tablet's deny list check and the first mysqld side table lock. + for cnt := 1; cnt <= lockTablesCycles; cnt++ { + if err := ts.executeLockTablesOnSource(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to execute LOCK TABLES (attempt %d of %d) on sources", cnt, lockTablesCycles), err) + } + // No need to UNLOCK the tables as the connection was closed once the locks were acquired + // and thus the locks released. + time.Sleep(lockTablesCycleDelay) + } + } + + ts.Logger().Infof("Waiting for streams to catchup") + if err := sw.waitForCatchup(ctx, timeout); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to sync up replication between the source and target", err) + } + + ts.Logger().Infof("Migrating streams") + if err := sw.migrateStreams(ctx, sm); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to migrate the workflow streams", err) + } + + ts.Logger().Infof("Resetting sequences") + if err := sw.resetSequences(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to reset the sequences", err) + } + + ts.Logger().Infof("Creating reverse streams") + if err := sw.createReverseVReplication(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError("failed to create the reverse vreplication streams", err) + } + } else { + if cancel { + return handleError("invalid cancel", fmt.Errorf("traffic switching has reached the point of no return, cannot cancel")) + } + ts.Logger().Infof("Journals were found. Completing the left over steps.") + // Need to gather positions in case all journals were not created. + if err := ts.gatherPositions(ctx); err != nil { + return handleError("failed to gather replication positions", err) + } + } + + // This is the point of no return. Once a journal is created, + // traffic can be redirected to target shards. + if err := sw.createJournals(ctx, sourceWorkflows); err != nil { + return handleError("failed to create the journal", err) + } + // Initialize any target sequences, if there are any, before allowing new writes. + if req.InitializeTargetSequences && len(sequenceMetadata) > 0 { + // Writes are blocked so we can safely initialize the sequence tables but + // we also want to use a shorter timeout than the parent context. + // We use up at most half of the overall timeout. + initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) + defer cancel() + if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { + return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } + if err := sw.allowTargetWrites(ctx); err != nil { + return handleError(fmt.Sprintf("failed to allow writes in the %s keyspace", ts.TargetKeyspaceName()), err) + } + if err := sw.changeRouting(ctx); err != nil { + return handleError("failed to update the routing rules", err) + } + if err := sw.streamMigraterfinalize(ctx, ts, sourceWorkflows); err != nil { + return handleError("failed to finalize the traffic switch", err) + } + if req.EnableReverseReplication { + if err := sw.startReverseVReplication(ctx); err != nil { + return handleError("failed to start the reverse workflow", err) + } + } + + if err := sw.freezeTargetVReplication(ctx); err != nil { + return handleError(fmt.Sprintf("failed to freeze the workflow in the %s keyspace", ts.TargetKeyspaceName()), err) + } + + return ts.id, sw.logs(), nil +} + +func (s *Server) canSwitch(ctx context.Context, ts *trafficSwitcher, state *State, direction TrafficSwitchDirection, maxAllowedReplLagSecs int64) (reason string, err error) { + if direction == DirectionForward && state.WritesSwitched || + direction == DirectionBackward && !state.WritesSwitched { + log.Infof("writes already switched no need to check lag") + return "", nil + } + wf, err := s.GetWorkflow(ctx, state.TargetKeyspace, state.Workflow) + if err != nil { + return "", err + } + for _, stream := range wf.ShardStreams { + for _, st := range stream.GetStreams() { + if st.Message == Frozen { + return cannotSwitchFrozen, nil + } + // If no new events have been replicated after the copy phase then it will be 0. + if vreplLag := time.Now().Unix() - st.TimeUpdated.Seconds; vreplLag > maxAllowedReplLagSecs { + return fmt.Sprintf(cannotSwitchHighLag, vreplLag, maxAllowedReplLagSecs), nil + } + switch st.State { + case binlogdatapb.VReplicationWorkflowState_Copying.String(): + return cannotSwitchCopyIncomplete, nil + case binlogdatapb.VReplicationWorkflowState_Error.String(): + return cannotSwitchError, nil + } + } + } + + // Ensure that the tablets on both sides are in good shape as we make this same call in the + // process and an error will cause us to backout. + refreshErrors := strings.Builder{} + var m sync.Mutex + var wg sync.WaitGroup + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + refreshTablets := func(shards []*topo.ShardInfo, stype string) { + defer wg.Done() + for _, si := range shards { + if partial, partialDetails, err := topotools.RefreshTabletsByShard(rtbsCtx, s.ts, s.tmc, si, nil, ts.Logger()); err != nil || partial { + m.Lock() + refreshErrors.WriteString(fmt.Sprintf("failed to successfully refresh all tablets in the %s/%s %s shard (%v):\n %v\n", + si.Keyspace(), si.ShardName(), stype, err, partialDetails)) + m.Unlock() + } + } + } + wg.Add(1) + go refreshTablets(ts.SourceShards(), "source") + wg.Add(1) + go refreshTablets(ts.TargetShards(), "target") + wg.Wait() + if refreshErrors.Len() > 0 { + return fmt.Sprintf(cannotSwitchFailedTabletRefresh, refreshErrors.String()), nil + } + return "", nil +} + +// VReplicationExec executes a query remotely using the DBA pool. +func (s *Server) VReplicationExec(ctx context.Context, tabletAlias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { + ti, err := s.ts.GetTablet(ctx, tabletAlias) + if err != nil { + return nil, err + } + return s.tmc.VReplicationExec(ctx, ti.Tablet, query) +} + +// CopySchemaShard copies the schema from a source tablet to the +// specified shard. The schema is applied directly on the primary of +// the destination shard, and is propagated to the replicas through +// binlogs. +func (s *Server) CopySchemaShard(ctx context.Context, sourceTabletAlias *topodatapb.TabletAlias, tables, excludeTables []string, includeViews bool, destKeyspace, destShard string, waitReplicasTimeout time.Duration, skipVerify bool) error { + destShardInfo, err := s.ts.GetShard(ctx, destKeyspace, destShard) + if err != nil { + return fmt.Errorf("GetShard(%v, %v) failed: %v", destKeyspace, destShard, err) + } + + if destShardInfo.PrimaryAlias == nil { + return fmt.Errorf("no primary in shard record %v/%v. Consider running 'vtctl InitShardPrimary' in case of a new shard or reparenting the shard to fix the topology data", destKeyspace, destShard) + } + + diffs, err := schematools.CompareSchemas(ctx, s.ts, s.tmc, sourceTabletAlias, destShardInfo.PrimaryAlias, tables, excludeTables, includeViews) + if err != nil { + return fmt.Errorf("CopySchemaShard failed because schemas could not be compared initially: %v", err) + } + if diffs == nil { + // Return early because dest has already the same schema as source. + return nil + } + + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: tables, ExcludeTables: excludeTables, IncludeViews: includeViews} + sourceSd, err := schematools.GetSchema(ctx, s.ts, s.tmc, sourceTabletAlias, req) + if err != nil { + return fmt.Errorf("GetSchema(%v, %v, %v, %v) failed: %v", sourceTabletAlias, tables, excludeTables, includeViews, err) + } + + createSQLstmts := tmutils.SchemaDefinitionToSQLStrings(sourceSd) + + destTabletInfo, err := s.ts.GetTablet(ctx, destShardInfo.PrimaryAlias) + if err != nil { + return fmt.Errorf("GetTablet(%v) failed: %v", destShardInfo.PrimaryAlias, err) + } + for _, createSQL := range createSQLstmts { + err = s.applySQLShard(ctx, destTabletInfo, createSQL) + if err != nil { + return fmt.Errorf("creating a table failed."+ + " Most likely some tables already exist on the destination and differ from the source."+ + " Please remove all to be copied tables from the destination manually and run this command again."+ + " Full error: %v", err) + } + } + + // Remember the replication position after all the above were applied. + destPrimaryPos, err := s.tmc.PrimaryPosition(ctx, destTabletInfo.Tablet) + if err != nil { + return fmt.Errorf("CopySchemaShard: can't get replication position after schema applied: %v", err) + } + + // Although the copy was successful, we have to verify it to catch the case + // where the database already existed on the destination, but with different + // options e.g. a different character set. + // In that case, MySQL would have skipped our CREATE DATABASE IF NOT EXISTS + // statement. + if !skipVerify { + diffs, err = schematools.CompareSchemas(ctx, s.ts, s.tmc, sourceTabletAlias, destShardInfo.PrimaryAlias, tables, excludeTables, includeViews) + if err != nil { + return fmt.Errorf("CopySchemaShard failed because schemas could not be compared finally: %v", err) + } + if diffs != nil { + return fmt.Errorf("CopySchemaShard was not successful because the schemas between the two tablets %v and %v differ: %v", sourceTabletAlias, destShardInfo.PrimaryAlias, diffs) + } + } + + // Notify Replicas to reload schema. This is best-effort. + reloadCtx, cancel := context.WithTimeout(ctx, waitReplicasTimeout) + defer cancel() + _, ok := schematools.ReloadShard(reloadCtx, s.ts, s.tmc, logutil.NewMemoryLogger(), destKeyspace, destShard, destPrimaryPos, nil, true) + if !ok { + log.Error(fmt.Errorf("CopySchemaShard: failed to reload schema on all replicas")) + } + + return err +} + +// applySQLShard applies a given SQL change on a given tablet alias. It allows executing arbitrary +// SQL statements, but doesn't return any results, so it's only useful for SQL statements +// that would be run for their effects (e.g., CREATE). +// It works by applying the SQL statement on the shard's primary tablet with replication turned on. +// Thus it should be used only for changes that can be applied on a live instance without causing issues; +// it shouldn't be used for anything that will require a pivot. +// The SQL statement string is expected to have {{.DatabaseName}} in place of the actual db name. +func (s *Server) applySQLShard(ctx context.Context, tabletInfo *topo.TabletInfo, change string) error { + filledChange, err := fillStringTemplate(change, map[string]string{"DatabaseName": tabletInfo.DbName()}) + if err != nil { + return fmt.Errorf("fillStringTemplate failed: %v", err) + } + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + // Need to make sure that replication is enabled since we're only applying the statement on primaries + _, err = s.tmc.ApplySchema(ctx, tabletInfo.Tablet, &tmutils.SchemaChange{ + SQL: filledChange, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + }) + return err +} + +// fillStringTemplate returns the string template filled +func fillStringTemplate(tmpl string, vars any) (string, error) { + myTemplate := template.Must(template.New("").Parse(tmpl)) + data := new(bytes.Buffer) + if err := myTemplate.Execute(data, vars); err != nil { + return "", err + } + return data.String(), nil +} diff --git a/go/vt/vtctl/workflow/state.go b/go/vt/vtctl/workflow/state.go index 613f82d0b43..927f5a9db56 100644 --- a/go/vt/vtctl/workflow/state.go +++ b/go/vt/vtctl/workflow/state.go @@ -16,15 +16,45 @@ limitations under the License. package workflow -// Type is the type of a workflow. +import ( + "fmt" + "strings" +) + +// VReplicationWorkflowType specifies whether workflow is +// MoveTables or Reshard and maps directly to what is stored +// in the backend database. +type VReplicationWorkflowType int + +// VReplicationWorkflowType enums. +const ( + MoveTablesWorkflow = VReplicationWorkflowType(iota) + ReshardWorkflow + MigrateWorkflow +) + +// Type is the type of a workflow as a string and maps directly +// to what is provided and presented to the user. type Type string -// Workflow types. +// Workflow string types. const ( - TypeReshard Type = "Reshard" TypeMoveTables Type = "MoveTables" + TypeReshard Type = "Reshard" + TypeMigrate Type = "Migrate" ) +var TypeStrMap = map[VReplicationWorkflowType]Type{ + MoveTablesWorkflow: TypeMoveTables, + ReshardWorkflow: TypeReshard, + MigrateWorkflow: TypeMigrate, +} +var TypeIntMap = map[Type]VReplicationWorkflowType{ + TypeMoveTables: MoveTablesWorkflow, + TypeReshard: ReshardWorkflow, + TypeMigrate: MigrateWorkflow, +} + // State represents the state of a workflow. type State struct { Workflow string @@ -45,3 +75,52 @@ type State struct { ShardsAlreadySwitched []string ShardsNotYetSwitched []string } + +func (s *State) String() string { + var stateInfo []string + if !s.IsPartialMigration { // shard level traffic switching is all or nothing + if len(s.RdonlyCellsNotSwitched) == 0 && len(s.ReplicaCellsNotSwitched) == 0 && len(s.ReplicaCellsSwitched) > 0 { + stateInfo = append(stateInfo, "All Reads Switched") + } else if len(s.RdonlyCellsSwitched) == 0 && len(s.ReplicaCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Reads Not Switched") + } else { + stateInfo = append(stateInfo, "Reads partially switched") + if len(s.ReplicaCellsNotSwitched) == 0 { + stateInfo = append(stateInfo, "All Replica Reads Switched") + } else if len(s.ReplicaCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Replica not switched") + } else { + stateInfo = append(stateInfo, "Replica switched in cells: "+strings.Join(s.ReplicaCellsSwitched, ",")) + } + if len(s.RdonlyCellsNotSwitched) == 0 { + stateInfo = append(stateInfo, "All Rdonly Reads Switched") + } else if len(s.RdonlyCellsSwitched) == 0 { + stateInfo = append(stateInfo, "Rdonly not switched") + } else { + stateInfo = append(stateInfo, "Rdonly switched in cells: "+strings.Join(s.RdonlyCellsSwitched, ",")) + } + } + } + if s.WritesSwitched { + stateInfo = append(stateInfo, "Writes Switched") + } else if s.IsPartialMigration { + // For partial migrations, the traffic switching is all or nothing + // at the shard level, so reads are effectively switched on the + // shard when writes are switched. + if len(s.ShardsAlreadySwitched) > 0 && len(s.ShardsNotYetSwitched) > 0 { + stateInfo = append(stateInfo, fmt.Sprintf("Reads partially switched, for shards: %s", strings.Join(s.ShardsAlreadySwitched, ","))) + stateInfo = append(stateInfo, fmt.Sprintf("Writes partially switched, for shards: %s", strings.Join(s.ShardsAlreadySwitched, ","))) + } else { + if len(s.ShardsAlreadySwitched) == 0 { + stateInfo = append(stateInfo, "Reads Not Switched") + stateInfo = append(stateInfo, "Writes Not Switched") + } else { + stateInfo = append(stateInfo, "All Reads Switched") + stateInfo = append(stateInfo, "All Writes Switched") + } + } + } else { + stateInfo = append(stateInfo, "Writes Not Switched") + } + return strings.Join(stateInfo, ". ") +} diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index b29fb51794d..75d509614b7 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -25,9 +25,9 @@ import ( "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" @@ -262,7 +262,7 @@ func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.Tablet continue } - pos, err := mysql.DecodePosition(row["pos"].ToString()) + pos, err := replication.DecodePosition(row["pos"].ToString()) if err != nil { return nil, err } @@ -426,8 +426,8 @@ func (sm *StreamMigrator) stopSourceStreams(ctx context.Context) error { return nil } -func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mysql.Position, error) { - stopPositions := make(map[string]mysql.Position) +func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]replication.Position, error) { + stopPositions := make(map[string]replication.Position) for _, tabletStreams := range sm.streams { for _, vrs := range tabletStreams { @@ -455,7 +455,7 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys } wg.Add(1) - go func(vrs *VReplicationStream, shard string, pos mysql.Position) { + go func(vrs *VReplicationStream, shard string, pos replication.Position) { defer wg.Done() sm.ts.Logger().Infof("syncSourceStreams beginning of go func %s %s %+v %d", shard, vrs.BinlogSource.Shard, pos, vrs.ID) @@ -471,14 +471,14 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys return } - query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", mysql.EncodePosition(pos), vrs.ID) + query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", replication.EncodePosition(pos), vrs.ID) if _, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, primary.Tablet, query); err != nil { allErrors.RecordError(err) return } sm.ts.Logger().Infof("Waiting for keyspace:shard: %v:%v, position %v", sm.ts.SourceKeyspaceName(), shard, pos) - if err := sm.ts.TabletManagerClient().VReplicationWaitForPos(ctx, primary.Tablet, vrs.ID, mysql.EncodePosition(pos)); err != nil { + if err := sm.ts.TabletManagerClient().VReplicationWaitForPos(ctx, primary.Tablet, vrs.ID, replication.EncodePosition(pos)); err != nil { allErrors.RecordError(err) return } @@ -493,7 +493,7 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]mys return stopPositions, allErrors.AggrError(vterrors.Aggregate) } -func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositions map[string]mysql.Position) ([]string, error) { +func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositions map[string]replication.Position) ([]string, error) { var ( mu sync.Mutex stoppedStreams = make(map[string][]*VReplicationStream) @@ -538,7 +538,7 @@ func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositio for _, vrs := range tabletStreams { key := fmt.Sprintf("%s:%s", vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard) if pos := stopPositions[key]; !vrs.Position.Equal(pos) { - allErrors.RecordError(fmt.Errorf("%s: stream %d position: %s does not match %s", key, vrs.ID, mysql.EncodePosition(vrs.Position), mysql.EncodePosition(pos))) + allErrors.RecordError(fmt.Errorf("%s: stream %d position: %s does not match %s", key, vrs.ID, replication.EncodePosition(vrs.Position), replication.EncodePosition(pos))) } } } @@ -564,7 +564,7 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl } return sm.ts.ForAllTargets(func(target *MigrationTarget) error { - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, target.GetPrimary().DbName()) + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, target.GetPrimary().DbName()) tabletStreams := VReplicationStreams(tmpl).Copy().ToSlice() for _, vrs := range tabletStreams { @@ -579,7 +579,7 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl rule.Filter = buf.String() } - ig.AddRow(vrs.Workflow, vrs.BinlogSource, mysql.EncodePosition(vrs.Position), "", "", + ig.AddRow(vrs.Workflow, vrs.BinlogSource, replication.EncodePosition(vrs.Position), "", "", vrs.WorkflowType, vrs.WorkflowSubType, vrs.DeferSecondaryKeys) } @@ -659,7 +659,7 @@ func (sm *StreamMigrator) templatizeRule(ctx context.Context, rule *binlogdatapb switch { case rule.Filter == "": return StreamTypeUnknown, fmt.Errorf("rule %v does not have a select expression in vreplication", rule) - case key.IsKeyRange(rule.Filter): + case key.IsValidKeyRange(rule.Filter): rule.Filter = "{{.}}" return StreamTypeSharded, nil case rule.Filter == vreplication.ExcludeStr: diff --git a/go/vt/vtctl/workflow/stream_migrator_test.go b/go/vt/vtctl/workflow/stream_migrator_test.go index 903e873a130..04f787eb4d4 100644 --- a/go/vt/vtctl/workflow/stream_migrator_test.go +++ b/go/vt/vtctl/workflow/stream_migrator_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -282,20 +281,20 @@ func TestTemplatize(t *testing.T) { }} vs := &vschemapb.Keyspace{ Sharded: true, - Vindexes: map[string]*vschema.Vindex{ + Vindexes: map[string]*vschemapb.Vindex{ "thash": { Type: "hash", }, }, - Tables: map[string]*vschema.Table{ + Tables: map[string]*vschemapb.Table{ "t1": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, }, "t2": { - ColumnVindexes: []*vschema.ColumnVindex{{ + ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1"}, Name: "thash", }}, diff --git a/go/vt/vtctl/workflow/switcher.go b/go/vt/vtctl/workflow/switcher.go new file mode 100644 index 00000000000..0cbdce164dc --- /dev/null +++ b/go/vt/vtctl/workflow/switcher.go @@ -0,0 +1,151 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "time" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var _ iswitcher = (*switcher)(nil) + +type switcher struct { + s *Server + ts *trafficSwitcher +} + +func (r *switcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + return r.ts.addParticipatingTablesToKeyspace(ctx, keyspace, tableSpecs) +} + +func (r *switcher) deleteRoutingRules(ctx context.Context) error { + return r.ts.deleteRoutingRules(ctx) +} + +func (r *switcher) deleteShardRoutingRules(ctx context.Context) error { + return r.ts.deleteShardRoutingRules(ctx) +} + +func (r *switcher) dropSourceDeniedTables(ctx context.Context) error { + return r.ts.dropSourceDeniedTables(ctx) +} + +func (r *switcher) dropTargetDeniedTables(ctx context.Context) error { + return r.ts.dropTargetDeniedTables(ctx) +} + +func (r *switcher) validateWorkflowHasCompleted(ctx context.Context) error { + return r.ts.validateWorkflowHasCompleted(ctx) +} + +func (r *switcher) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + return r.ts.removeSourceTables(ctx, removalType) +} + +func (r *switcher) dropSourceShards(ctx context.Context) error { + return r.ts.dropSourceShards(ctx) +} + +func (r *switcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + return r.ts.switchShardReads(ctx, cells, servedTypes, direction) +} + +func (r *switcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + return r.ts.switchTableReads(ctx, cells, servedTypes, direction) +} + +func (r *switcher) startReverseVReplication(ctx context.Context) error { + return r.ts.startReverseVReplication(ctx) +} + +func (r *switcher) createJournals(ctx context.Context, sourceWorkflows []string) error { + return r.ts.createJournals(ctx, sourceWorkflows) +} + +func (r *switcher) allowTargetWrites(ctx context.Context) error { + return r.ts.allowTargetWrites(ctx) +} + +func (r *switcher) changeRouting(ctx context.Context) error { + return r.ts.changeRouting(ctx) +} + +func (r *switcher) streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error { + return StreamMigratorFinalize(ctx, ts, workflows) +} + +func (r *switcher) createReverseVReplication(ctx context.Context) error { + return r.ts.createReverseVReplication(ctx) +} + +func (r *switcher) migrateStreams(ctx context.Context, sm *StreamMigrator) error { + return sm.MigrateStreams(ctx) +} + +func (r *switcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + return r.ts.waitForCatchup(ctx, filteredReplicationWaitTime) +} + +func (r *switcher) stopSourceWrites(ctx context.Context) error { + return r.ts.stopSourceWrites(ctx) +} + +func (r *switcher) stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) { + return sm.StopStreams(ctx) +} + +func (r *switcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { + r.ts.cancelMigration(ctx, sm) +} + +func (r *switcher) lockKeyspace(ctx context.Context, keyspace, action string) (context.Context, func(*error), error) { + return r.s.ts.LockKeyspace(ctx, keyspace, action) +} + +func (r *switcher) freezeTargetVReplication(ctx context.Context) error { + return r.ts.freezeTargetVReplication(ctx) +} + +func (r *switcher) dropTargetVReplicationStreams(ctx context.Context) error { + return r.ts.dropTargetVReplicationStreams(ctx) +} + +func (r *switcher) dropSourceReverseVReplicationStreams(ctx context.Context) error { + return r.ts.dropSourceReverseVReplicationStreams(ctx) +} + +func (r *switcher) removeTargetTables(ctx context.Context) error { + return r.ts.removeTargetTables(ctx) +} + +func (r *switcher) dropTargetShards(ctx context.Context) error { + return r.ts.dropTargetShards(ctx) +} + +func (r *switcher) logs() *[]string { + return nil +} + +func (r *switcher) resetSequences(ctx context.Context) error { + return r.ts.resetSequences(ctx) +} + +func (r *switcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + return r.ts.initializeTargetSequences(ctx, sequencesByBackingTable) +} diff --git a/go/vt/vtctl/workflow/switcher_dry_run.go b/go/vt/vtctl/workflow/switcher_dry_run.go new file mode 100644 index 00000000000..1c8a05e00c2 --- /dev/null +++ b/go/vt/vtctl/workflow/switcher_dry_run.go @@ -0,0 +1,388 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "fmt" + "slices" + "sort" + "strings" + "time" + + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/replication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +var _ iswitcher = (*switcherDryRun)(nil) + +type switcherDryRun struct { + drLog *LogRecorder + ts *trafficSwitcher +} + +func (dr *switcherDryRun) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + dr.drLog.Log("All source tables will be added to the target keyspace vschema") + return nil +} + +func (dr *switcherDryRun) deleteRoutingRules(ctx context.Context) error { + dr.drLog.Log("Routing rules for participating tables will be deleted") + return nil +} + +func (dr *switcherDryRun) deleteShardRoutingRules(ctx context.Context) error { + if dr.ts.isPartialMigration { + dr.drLog.Log("Shard routing rules for participating shards will be deleted") + } + return nil +} + +func (dr *switcherDryRun) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + sourceShards := make([]string, 0) + targetShards := make([]string, 0) + for _, source := range dr.ts.Sources() { + sourceShards = append(sourceShards, source.GetShard().ShardName()) + } + for _, target := range dr.ts.Targets() { + targetShards = append(targetShards, target.GetShard().ShardName()) + } + sort.Strings(sourceShards) + sort.Strings(targetShards) + if direction == DirectionForward { + dr.drLog.Logf("Switch reads from keyspace %s to keyspace %s for shards [%s] to shards [%s]", + dr.ts.SourceKeyspaceName(), dr.ts.TargetKeyspaceName(), strings.Join(sourceShards, ","), strings.Join(targetShards, ",")) + } else { + dr.drLog.Logf("Switch reads from keyspace %s to keyspace %s for shards [%s] to shards [%s]", + dr.ts.TargetKeyspaceName(), dr.ts.SourceKeyspaceName(), strings.Join(targetShards, ","), strings.Join(sourceShards, ",")) + } + return nil +} + +func (dr *switcherDryRun) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + ks := dr.ts.TargetKeyspaceName() + if direction == DirectionBackward { + ks = dr.ts.SourceKeyspaceName() + } + var tabletTypes []string + for _, servedType := range servedTypes { + tabletTypes = append(tabletTypes, servedType.String()) + } + tables := strings.Join(dr.ts.Tables(), ",") + dr.drLog.Logf("Switch reads for tables [%s] to keyspace %s for tablet types [%s]", tables, ks, strings.Join(tabletTypes, ",")) + dr.drLog.Logf("Routing rules for tables [%s] will be updated", tables) + return nil +} + +func (dr *switcherDryRun) createJournals(ctx context.Context, sourceWorkflows []string) error { + dr.drLog.Log("Create journal entries on source databases") + if len(sourceWorkflows) > 0 { + dr.drLog.Logf("Source workflows found: [%s]", strings.Join(sourceWorkflows, ",")) + } + return nil +} + +func (dr *switcherDryRun) allowTargetWrites(ctx context.Context) error { + dr.drLog.Logf("Enable writes on keyspace %s for tables [%s]", dr.ts.TargetKeyspaceName(), strings.Join(dr.ts.Tables(), ",")) + return nil +} + +func (dr *switcherDryRun) changeRouting(ctx context.Context) error { + dr.drLog.Logf("Switch routing from keyspace %s to keyspace %s", dr.ts.SourceKeyspaceName(), dr.ts.TargetKeyspaceName()) + var deleteLogs, addLogs []string + if dr.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + tables := strings.Join(dr.ts.Tables(), ",") + dr.drLog.Logf("Routing rules for tables [%s] will be updated", tables) + return nil + } + deleteLogs = nil + addLogs = nil + for _, source := range dr.ts.Sources() { + deleteLogs = append(deleteLogs, fmt.Sprintf("shard:%s;tablet:%d", source.GetShard().ShardName(), source.GetShard().PrimaryAlias.Uid)) + } + for _, target := range dr.ts.Targets() { + addLogs = append(addLogs, fmt.Sprintf("shard:%s;tablet:%d", target.GetShard().ShardName(), target.GetShard().PrimaryAlias.Uid)) + } + if len(deleteLogs) > 0 { + dr.drLog.Logf("IsPrimaryServing will be set to false for: [%s]", strings.Join(deleteLogs, ",")) + dr.drLog.Logf("IsPrimaryServing will be set to true for: [%s]", strings.Join(addLogs, ",")) + } + return nil +} + +func (dr *switcherDryRun) streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error { + logs := make([]string, 0) + for _, t := range ts.Targets() { + logs = append(logs, fmt.Sprintf("tablet:%d", t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Switch writes completed, freeze and delete vreplication streams on: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) startReverseVReplication(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Sources() { + logs = append(logs, fmt.Sprintf("tablet:%d", t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Start reverse vreplication streams on: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) createReverseVReplication(ctx context.Context) error { + dr.drLog.Logf("Create reverse vreplication workflow %s", dr.ts.ReverseWorkflowName()) + return nil +} + +func (dr *switcherDryRun) migrateStreams(ctx context.Context, sm *StreamMigrator) error { + templates := sm.Templates() + + if len(templates) == 0 { + return nil + } + logs := make([]string, 0) + + dr.drLog.Logf("Migrate streams to %s:", dr.ts.TargetKeyspaceName()) + for key, streams := range sm.Streams() { + for _, stream := range streams { + logs = append(logs, fmt.Sprintf("shard:%s;id:%d;workflow:%s;position:%s;binlogsource:%v", key, stream.ID, stream.Workflow, replication.EncodePosition(stream.Position), stream.BinlogSource)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Migrate source streams: [%s]", strings.Join(logs, ",")) + logs = nil + } + for _, target := range dr.ts.Targets() { + tabletStreams := templates + for _, vrs := range tabletStreams { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d;workflow:%s;id:%d,position:%v;binlogsource:%s", + vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard, target.GetPrimary().Alias.Uid, vrs.Workflow, vrs.ID, replication.EncodePosition(vrs.Position), vrs.BinlogSource)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Create target streams (as stopped): [%s]", strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + dr.drLog.Logf("Wait for vreplication on stopped streams to catchup for up to %v", filteredReplicationWaitTime) + return nil +} + +func (dr *switcherDryRun) stopSourceWrites(ctx context.Context) error { + logs := make([]string, 0) + for _, source := range dr.ts.Sources() { + position, _ := dr.ts.TabletManagerClient().PrimaryPosition(ctx, source.GetPrimary().Tablet) + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;position:%s", dr.ts.SourceKeyspaceName(), source.GetShard().ShardName(), position)) + } + if len(logs) > 0 { + dr.drLog.Logf("Stop writes on keyspace %s for tables [%s]: [%s]", dr.ts.SourceKeyspaceName(), + strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) { + logs := make([]string, 0) + for _, streams := range sm.Streams() { + for _, stream := range streams { + logs = append(logs, fmt.Sprintf("id:%d;keyspace:%s;shard:%s;rules:%s;position:%v", + stream.ID, stream.BinlogSource.Keyspace, stream.BinlogSource.Shard, stream.BinlogSource.Filter, stream.Position)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Stop streams on keyspace %s: [%s]", dr.ts.SourceKeyspaceName(), strings.Join(logs, ",")) + } + return nil, nil +} + +func (dr *switcherDryRun) cancelMigration(ctx context.Context, sm *StreamMigrator) { + dr.drLog.Log("Cancel stream migrations as requested") +} + +func (dr *switcherDryRun) lockKeyspace(ctx context.Context, keyspace, _ string) (context.Context, func(*error), error) { + dr.drLog.Logf("Lock keyspace %s", keyspace) + return ctx, func(e *error) { + dr.drLog.Logf("Unlock keyspace %s", keyspace) + }, nil +} + +func (dr *switcherDryRun) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + logs := make([]string, 0) + for _, source := range dr.ts.Sources() { + for _, tableName := range dr.ts.Tables() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;dbname:%s;tablet:%d;table:%s", + source.GetPrimary().Keyspace, source.GetPrimary().Shard, source.GetPrimary().DbName(), source.GetPrimary().Alias.Uid, tableName)) + } + } + action := "Dropping" + if removalType == RenameTable { + action = "Renaming" + } + if len(logs) > 0 { + dr.drLog.Logf("%s these tables from the database and removing them from the vschema for keyspace %s: [%s]", + action, dr.ts.SourceKeyspaceName(), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropSourceShards(ctx context.Context) error { + logs := make([]string, 0) + tabletsList := make(map[string][]string) + for _, si := range dr.ts.SourceShards() { + tabletAliases, err := dr.ts.TopoServer().FindAllTabletAliasesInShard(ctx, si.Keyspace(), si.ShardName()) + if err != nil { + return err + } + tabletsList[si.ShardName()] = make([]string, 0) + for _, t := range tabletAliases { + tabletsList[si.ShardName()] = append(tabletsList[si.ShardName()], fmt.Sprintf("%d", t.Uid)) + } + sort.Strings(tabletsList[si.ShardName()]) + logs = append(logs, fmt.Sprintf("cell:%s;keyspace:%s;shards:[%s]", + si.Shard.PrimaryAlias.Cell, si.Keyspace(), si.ShardName()), strings.Join(tabletsList[si.ShardName()], ",")) + } + if len(logs) > 0 { + dr.drLog.Logf("Delete shards (and all related tablets): [%s]", strings.Join(logs, ",")) + } + + return nil +} + +func (dr *switcherDryRun) validateWorkflowHasCompleted(ctx context.Context) error { + return doValidateWorkflowHasCompleted(ctx, dr.ts) +} + +func (dr *switcherDryRun) dropTargetVReplicationStreams(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Targets() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;workflow:%s;dbname:%s;tablet:%d", + t.GetShard().Keyspace(), t.GetShard().ShardName(), dr.ts.WorkflowName(), t.GetPrimary().DbName(), t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Delete vreplication streams on targets: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) dropSourceReverseVReplicationStreams(ctx context.Context) error { + logs := make([]string, 0) + for _, t := range dr.ts.Sources() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;workflow:%s;dbname:%s;tablet:%d", + t.GetShard().Keyspace(), t.GetShard().ShardName(), ReverseWorkflowName(dr.ts.WorkflowName()), t.GetPrimary().DbName(), t.GetPrimary().Alias.Uid)) + } + dr.drLog.Logf("Delete reverse vreplication streams on sources: [%s]", strings.Join(logs, ",")) + return nil +} + +func (dr *switcherDryRun) freezeTargetVReplication(ctx context.Context) error { + logs := make([]string, 0) + for _, target := range dr.ts.Targets() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d;workflow:%s;dbname:%s", + target.GetPrimary().Keyspace, target.GetPrimary().Shard, target.GetPrimary().Alias.Uid, dr.ts.WorkflowName(), target.GetPrimary().DbName())) + } + if len(logs) > 0 { + dr.drLog.Logf("Mark vreplication streams frozen on: [%s]", strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropSourceDeniedTables(ctx context.Context) error { + logs := make([]string, 0) + for _, si := range dr.ts.SourceShards() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d", si.Keyspace(), si.ShardName(), si.PrimaryAlias.Uid)) + } + if len(logs) > 0 { + dr.drLog.Logf("Denied tables records on [%s] will be removed from: [%s]", strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropTargetDeniedTables(ctx context.Context) error { + logs := make([]string, 0) + for _, si := range dr.ts.TargetShards() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;tablet:%d", si.Keyspace(), si.ShardName(), si.PrimaryAlias.Uid)) + } + if len(logs) > 0 { + dr.drLog.Logf("Denied tables records on [%s] will be removed from: [%s]", strings.Join(dr.ts.Tables(), ","), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) logs() *[]string { + return &dr.drLog.logs +} + +func (dr *switcherDryRun) removeTargetTables(ctx context.Context) error { + logs := make([]string, 0) + for _, target := range dr.ts.Targets() { + for _, tableName := range dr.ts.Tables() { + logs = append(logs, fmt.Sprintf("keyspace:%s;shard:%s;dbname:%s;tablet:%d;table:%s", + target.GetPrimary().Keyspace, target.GetPrimary().Shard, target.GetPrimary().DbName(), target.GetPrimary().Alias.Uid, tableName)) + } + } + if len(logs) > 0 { + dr.drLog.Logf("Dropping these tables from the database and removing from the vschema for keyspace %s: [%s]", + dr.ts.TargetKeyspaceName(), strings.Join(logs, ",")) + } + return nil +} + +func (dr *switcherDryRun) dropTargetShards(ctx context.Context) error { + logs := make([]string, 0) + tabletsList := make(map[string][]string) + for _, si := range dr.ts.TargetShards() { + tabletAliases, err := dr.ts.TopoServer().FindAllTabletAliasesInShard(ctx, si.Keyspace(), si.ShardName()) + if err != nil { + return err + } + tabletsList[si.ShardName()] = make([]string, 0) + for _, t := range tabletAliases { + tabletsList[si.ShardName()] = append(tabletsList[si.ShardName()], fmt.Sprintf("%d", t.Uid)) + } + sort.Strings(tabletsList[si.ShardName()]) + logs = append(logs, fmt.Sprintf("cell:%s;keyspace:%s;shards:[%s]", + si.Shard.PrimaryAlias.Cell, si.Keyspace(), si.ShardName()), strings.Join(tabletsList[si.ShardName()], ",")) + } + if len(logs) > 0 { + dr.drLog.Logf("Delete shards (and all related tablets): [%s]", strings.Join(logs, ",")) + } + + return nil +} + +func (dr *switcherDryRun) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = dr.ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + dr.drLog.Log("The sequence caches will be reset on the source since sequence tables are being moved") + return nil +} + +func (dr *switcherDryRun) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + sortedBackingTableNames := maps2.Keys(sequencesByBackingTable) + slices.Sort(sortedBackingTableNames) + dr.drLog.Log(fmt.Sprintf("The following sequence backing tables used by tables being moved will be initialized: %s", + strings.Join(sortedBackingTableNames, ","))) + return nil +} diff --git a/go/vt/vtctl/workflow/switcher_interface.go b/go/vt/vtctl/workflow/switcher_interface.go new file mode 100644 index 00000000000..8d0f9e847be --- /dev/null +++ b/go/vt/vtctl/workflow/switcher_interface.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "context" + "time" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +type iswitcher interface { + lockKeyspace(ctx context.Context, keyspace, action string) (context.Context, func(*error), error) + cancelMigration(ctx context.Context, sm *StreamMigrator) + stopStreams(ctx context.Context, sm *StreamMigrator) ([]string, error) + stopSourceWrites(ctx context.Context) error + waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error + migrateStreams(ctx context.Context, sm *StreamMigrator) error + createReverseVReplication(ctx context.Context) error + createJournals(ctx context.Context, sourceWorkflows []string) error + allowTargetWrites(ctx context.Context) error + changeRouting(ctx context.Context) error + streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error + startReverseVReplication(ctx context.Context) error + switchTableReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error + switchShardReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error + validateWorkflowHasCompleted(ctx context.Context) error + removeSourceTables(ctx context.Context, removalType TableRemovalType) error + dropSourceShards(ctx context.Context) error + dropSourceDeniedTables(ctx context.Context) error + dropTargetDeniedTables(ctx context.Context) error + freezeTargetVReplication(ctx context.Context) error + dropSourceReverseVReplicationStreams(ctx context.Context) error + dropTargetVReplicationStreams(ctx context.Context) error + removeTargetTables(ctx context.Context) error + dropTargetShards(ctx context.Context) error + deleteRoutingRules(ctx context.Context) error + deleteShardRoutingRules(ctx context.Context) error + addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error + resetSequences(ctx context.Context) error + initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error + logs() *[]string +} diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 53a6e0ede9d..d4fe77130ae 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -17,43 +17,73 @@ limitations under the License. package workflow import ( - "bytes" "context" "errors" "fmt" - "hash/fnv" - "math" "sort" "strings" + "sync" + "time" - "google.golang.org/protobuf/encoding/prototext" + "golang.org/x/sync/errgroup" - "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( // Frozen is the message value of frozen vreplication streams. Frozen = "FROZEN" + // Running is the state value of a vreplication stream in the + // replicating state. + Running = "RUNNING" + + // How long to wait when refreshing the state of each tablet in a shard. Note that these + // are refreshed in parallel, non-topo errors are ignored (in the error handling) and we + // may only do a partial refresh. Because in some cases it's unsafe to switch the traffic + // if some tablets do not refresh, we may need to look for partial results and produce + // an error (with the provided details of WHY) if we see them. + // Side note: the default lock/lease TTL in etcd is 60s so the default tablet refresh + // timeout of 60s can cause us to lose our keyspace lock before completing the + // operation too. + shardTabletRefreshTimeout = time.Duration(30 * time.Second) + + // Use pt-osc's naming convention, this format also ensures vstreamer ignores such tables. + renameTableTemplate = "_%.59s_old" // limit table name to 64 characters + + sqlDeleteWorkflow = "delete from _vt.vreplication where db_name = %s and workflow = %s" + sqlGetMaxSequenceVal = "select max(%a) as maxval from %a.%a" + sqlInitSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" ) -var ( - // ErrNoStreams occurs when no target streams are found for a workflow in a - // target keyspace. - ErrNoStreams = errors.New("no streams found") -) +// accessType specifies the type of access for a shard (allow/disallow writes). +type accessType int -// TrafficSwitchDirection specifies the switching direction. -type TrafficSwitchDirection int +const ( + allowWrites = accessType(iota) + disallowWrites +) // The following constants define the switching direction. const ( @@ -61,20 +91,29 @@ const ( DirectionBackward ) -// TableRemovalType specifies the way the a table will be removed during a -// DropSource for a MoveTables workflow. -type TableRemovalType int - // The following consts define if DropSource will drop or rename the table. const ( DropTable = TableRemovalType(iota) RenameTable ) -var tableRemovalTypeStrs = [...]string{ - "DROP TABLE", - "RENAME TABLE", -} +// TrafficSwitchDirection specifies the switching direction. +type TrafficSwitchDirection int + +// TableRemovalType specifies the way the a table will be removed during a +// DropSource for a MoveTables workflow. +type TableRemovalType int + +var ( + // ErrNoStreams occurs when no target streams are found for a workflow in a + // target keyspace. + ErrNoStreams = errors.New("no streams found") + + tableRemovalTypeStrs = []string{ + "DROP TABLE", + "RENAME TABLE", + } +) // String returns a string representation of a TableRemovalType func (trt TableRemovalType) String() string { @@ -85,13 +124,13 @@ func (trt TableRemovalType) String() string { return tableRemovalTypeStrs[trt] } -// ITrafficSwitcher is a temporary hack to allow us to move streamMigrater out -// of package wrangler without also needing to move trafficSwitcher in the same -// changeset. +// ITrafficSwitcher is a hack to allow us to maintain the legacy wrangler +// package for vtctl/vtctlclient while migrating most of the TrafficSwitcher +// related code to the workflow package for vtctldclient usage. // -// After moving TrafficSwitcher to this package, this type should be removed, -// and StreamMigrator should be updated to contain a field of type -// *TrafficSwitcher instead of ITrafficSwitcher. +// After moving TrafficSwitcher to this package and removing the implementation +// in wrangler, this type should be removed, and StreamMigrator should be updated +// to contain a field of type *TrafficSwitcher instead of ITrafficSwitcher. type ITrafficSwitcher interface { /* Functions that expose types and behavior contained in *wrangler.Wrangler */ @@ -165,6 +204,96 @@ func (source *MigrationSource) GetPrimary() *topo.TabletInfo { return source.primary } +// trafficSwitcher contains the metadata for switching read and write traffic +// for vreplication streams. +type trafficSwitcher struct { + ws *Server + logger logutil.Logger + + migrationType binlogdatapb.MigrationType + isPartialMigration bool + workflow string + + // if frozen is true, the rest of the fields are not set. + frozen bool + reverseWorkflow string + id int64 + sources map[string]*MigrationSource + targets map[string]*MigrationTarget + sourceKeyspace string + targetKeyspace string + tables []string + keepRoutingRules bool + sourceKSSchema *vindexes.KeyspaceSchema + optCells string // cells option passed to MoveTables/Reshard Create + optTabletTypes string // tabletTypes option passed to MoveTables/Reshard Create + externalCluster string + externalTopo *topo.Server + sourceTimeZone string + targetTimeZone string + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType +} + +func (ts *trafficSwitcher) TopoServer() *topo.Server { return ts.ws.ts } +func (ts *trafficSwitcher) TabletManagerClient() tmclient.TabletManagerClient { return ts.ws.tmc } +func (ts *trafficSwitcher) Logger() logutil.Logger { + if ts.logger == nil { + ts.logger = logutil.NewConsoleLogger() + } + return ts.logger +} +func (ts *trafficSwitcher) VReplicationExec(ctx context.Context, alias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { + return ts.ws.VReplicationExec(ctx, alias, query) +} +func (ts *trafficSwitcher) ExternalTopo() *topo.Server { return ts.externalTopo } +func (ts *trafficSwitcher) MigrationType() binlogdatapb.MigrationType { return ts.migrationType } +func (ts *trafficSwitcher) IsPartialMigration() bool { return ts.isPartialMigration } +func (ts *trafficSwitcher) ReverseWorkflowName() string { return ts.reverseWorkflow } +func (ts *trafficSwitcher) SourceKeyspaceName() string { return ts.sourceKSSchema.Keyspace.Name } +func (ts *trafficSwitcher) SourceKeyspaceSchema() *vindexes.KeyspaceSchema { return ts.sourceKSSchema } +func (ts *trafficSwitcher) Sources() map[string]*MigrationSource { return ts.sources } +func (ts *trafficSwitcher) Tables() []string { return ts.tables } +func (ts *trafficSwitcher) TargetKeyspaceName() string { return ts.targetKeyspace } +func (ts *trafficSwitcher) Targets() map[string]*MigrationTarget { return ts.targets } +func (ts *trafficSwitcher) WorkflowName() string { return ts.workflow } +func (ts *trafficSwitcher) SourceTimeZone() string { return ts.sourceTimeZone } +func (ts *trafficSwitcher) TargetTimeZone() string { return ts.targetTimeZone } + +func (ts *trafficSwitcher) ForAllSources(f func(source *MigrationSource) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, source := range ts.sources { + wg.Add(1) + go func(source *MigrationSource) { + defer wg.Done() + + if err := f(source); err != nil { + allErrors.RecordError(err) + } + }(source) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func (ts *trafficSwitcher) ForAllTargets(f func(source *MigrationTarget) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range ts.targets { + wg.Add(1) + go func(target *MigrationTarget) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + // MigrationTarget contains the metadata for each migration target. type MigrationTarget struct { si *topo.ShardInfo @@ -184,204 +313,1237 @@ func (target *MigrationTarget) GetPrimary() *topo.TabletInfo { return target.primary } -// BuildTargets collects MigrationTargets and other metadata (see TargetInfo) -// from a workflow in the target keyspace. -// -// It returns ErrNoStreams if there are no targets found for the workflow. -func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { - targetShards, err := ts.GetShardNames(ctx, targetKeyspace) +func (ts *trafficSwitcher) SourceShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(ts.Sources())) + for _, source := range ts.Sources() { + shards = append(shards, source.GetShard()) + } + return shards +} + +func (ts *trafficSwitcher) TargetShards() []*topo.ShardInfo { + shards := make([]*topo.ShardInfo, 0, len(ts.Targets())) + for _, target := range ts.Targets() { + shards = append(shards, target.GetShard()) + } + return shards +} + +func (ts *trafficSwitcher) getSourceAndTargetShardsNames() ([]string, []string) { + var sourceShards, targetShards []string + for _, si := range ts.SourceShards() { + sourceShards = append(sourceShards, si.ShardName()) + } + for _, si := range ts.TargetShards() { + targetShards = append(targetShards, si.ShardName()) + } + return sourceShards, targetShards +} + +// isPartialMoveTables returns true if whe workflow is MoveTables, has the same +// number of shards, is not covering the entire shard range, and has one-to-one +// shards in source and target. +func (ts *trafficSwitcher) isPartialMoveTables(sourceShards, targetShards []string) (bool, error) { + if ts.MigrationType() != binlogdatapb.MigrationType_TABLES { + return false, nil + } + + skr, tkr, err := getSourceAndTargetKeyRanges(sourceShards, targetShards) if err != nil { - return nil, err + return false, err + } + + if key.KeyRangeIsComplete(skr) || key.KeyRangeIsComplete(tkr) || len(sourceShards) != len(targetShards) { + return false, nil } - var ( - frozen bool - optCells string - optTabletTypes string - targets = make(map[string]*MigrationTarget, len(targetShards)) - workflowType binlogdatapb.VReplicationWorkflowType - workflowSubType binlogdatapb.VReplicationWorkflowSubType - ) - - // We check all shards in the target keyspace. Not all of them may have a - // stream. For example, if we're splitting -80 to [-40,40-80], only those - // two target shards will have vreplication streams, and the other shards in - // the target keyspace will not. - for _, targetShard := range targetShards { - si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + return key.KeyRangeEqual(skr, tkr), nil +} + +// addParticipatingTablesToKeyspace updates the vschema with the new tables that +// were created as part of the Migrate flow. It is called when the Migrate flow +// is Completed. +func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error { + vschema, err := ts.TopoServer().GetVSchema(ctx, keyspace) + if err != nil { + return err + } + if vschema == nil { + return fmt.Errorf("no vschema found for keyspace %s", keyspace) + } + if vschema.Tables == nil { + vschema.Tables = make(map[string]*vschemapb.Table) + } + if strings.HasPrefix(tableSpecs, "{") { // user defined the vschema snippet, typically for a sharded target + wrap := fmt.Sprintf(`{"tables": %s}`, tableSpecs) + ks := &vschemapb.Keyspace{} + if err := json2.Unmarshal([]byte(wrap), ks); err != nil { + return err + } if err != nil { - return nil, err + return err + } + for table, vtab := range ks.Tables { + vschema.Tables[table] = vtab + } + } else { + if vschema.Sharded { + return fmt.Errorf("no sharded vschema was provided, so you will need to update the vschema of the target manually for the moved tables") + } + for _, table := range ts.tables { + vschema.Tables[table] = &vschemapb.Table{} + } + } + return ts.TopoServer().SaveVSchema(ctx, keyspace, vschema) +} + +func (ts *trafficSwitcher) deleteRoutingRules(ctx context.Context) error { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, table := range ts.Tables() { + delete(rules, table) + delete(rules, table+"@replica") + delete(rules, table+"@rdonly") + delete(rules, ts.TargetKeyspaceName()+"."+table) + delete(rules, ts.TargetKeyspaceName()+"."+table+"@replica") + delete(rules, ts.TargetKeyspaceName()+"."+table+"@rdonly") + delete(rules, ts.SourceKeyspaceName()+"."+table) + delete(rules, ts.SourceKeyspaceName()+"."+table+"@replica") + delete(rules, ts.SourceKeyspaceName()+"."+table+"@rdonly") + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error { + if !ts.isPartialMigration { + return nil + } + srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, si := range ts.TargetShards() { + delete(srr, fmt.Sprintf("%s.%s", ts.targetKeyspace, si.ShardName())) + } + if err := topotools.SaveShardRoutingRules(ctx, ts.TopoServer(), srr); err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), source.GetShard(), nil, ts.Logger()) + return err + }) +} - if si.PrimaryAlias == nil { - // This can happen if bad inputs are given. - return nil, fmt.Errorf("shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) +func (ts *trafficSwitcher) dropTargetDeniedTables(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), target.GetShard(), nil, ts.Logger()) + return err + }) +} + +func (ts *trafficSwitcher) validateWorkflowHasCompleted(ctx context.Context) error { + return doValidateWorkflowHasCompleted(ctx, ts) +} + +func (ts *trafficSwitcher) dropParticipatingTablesFromKeyspace(ctx context.Context, keyspace string) error { + vschema, err := ts.TopoServer().GetVSchema(ctx, keyspace) + if err != nil { + return err + } + // VReplication does NOT create the vschema entries in SHARDED + // TARGET keyspaces -- as we cannot know the proper vindex + // definitions to use -- and we should not delete them either + // (on workflow Cancel) as the user must create them separately + // and they contain information about the vindex definitions, etc. + if vschema.Sharded && keyspace == ts.TargetKeyspaceName() { + return nil + } + for _, tableName := range ts.Tables() { + delete(vschema.Tables, tableName) + } + return ts.TopoServer().SaveVSchema(ctx, keyspace, vschema) +} + +func (ts *trafficSwitcher) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + for _, tableName := range ts.Tables() { + query := fmt.Sprintf("drop table %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + if removalType == DropTable { + ts.Logger().Infof("%s: Dropping table %s.%s\n", + source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) + } else { + renameName := getRenameFileName(tableName) + ts.Logger().Infof("%s: Renaming table %s.%s to %s.%s\n", + source.GetPrimary().String(), source.GetPrimary().DbName(), tableName, source.GetPrimary().DbName(), renameName) + query = fmt.Sprintf("rename table %s.%s TO %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName)), + sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(renameName))) + } + _, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, source.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + }) + if err != nil { + ts.Logger().Errorf("%s: Error removing table %s: %v", source.GetPrimary().String(), tableName, err) + return err + } + ts.Logger().Infof("%s: Removed table %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) - primary, err := ts.GetTablet(ctx, si.PrimaryAlias) - if err != nil { - return nil, err } + return nil + }) + if err != nil { + return err + } + + return ts.dropParticipatingTablesFromKeyspace(ctx, ts.SourceKeyspaceName()) +} - // NB: changing the whitespace of this query breaks tests for now. - // (TODO:@ajm188) extend FakeDBClient to be less whitespace-sensitive on - // expected queries. - query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName())) - p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, query) +// FIXME: even after dropSourceShards there are still entries in the topo, need to research and fix +func (ts *trafficSwitcher) dropSourceShards(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Deleting shard %s.%s\n", source.GetShard().Keyspace(), source.GetShard().ShardName()) + err := ts.ws.DeleteShard(ctx, source.GetShard().Keyspace(), source.GetShard().ShardName(), true, false) if err != nil { - return nil, err + ts.Logger().Errorf("Error deleting shard %s: %v", source.GetShard().ShardName(), err) + return err } + ts.Logger().Infof("Deleted shard %s.%s\n", source.GetShard().Keyspace(), source.GetShard().ShardName()) + return nil + }) +} - if len(p3qr.Rows) < 1 { - continue +func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + var fromShards, toShards []*topo.ShardInfo + if direction == DirectionForward { + fromShards, toShards = ts.SourceShards(), ts.TargetShards() + } else { + fromShards, toShards = ts.TargetShards(), ts.SourceShards() + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "Before switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.TargetKeyspaceName(), strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } + for _, servedType := range servedTypes { + if err := ts.ws.updateShardRecords(ctx, ts.SourceKeyspaceName(), fromShards, cells, servedType, true /* isFrom */, false /* clearSourceShards */, ts.logger); err != nil { + return err } - - target := &MigrationTarget{ - si: si, - primary: primary, - Sources: make(map[int32]*binlogdatapb.BinlogSource), + if err := ts.ws.updateShardRecords(ctx, ts.SourceKeyspaceName(), toShards, cells, servedType, false, false, ts.logger); err != nil { + return err } + err := ts.TopoServer().MigrateServedType(ctx, ts.SourceKeyspaceName(), toShards, fromShards, servedType, cells) + if err != nil { + return err + } + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", + ts.TargetKeyspaceName(), strings.Join(cells, ",")) + log.Errorf("%w", err2) + return err2 + } + return nil +} - qr := sqltypes.Proto3ToResult(p3qr) - for _, row := range qr.Named().Rows { - id, err := row["id"].ToInt32() - if err != nil { - return nil, err +func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { + log.Infof("switchTableReads: servedTypes: %+v, direction %t", servedTypes, direction) + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + // We assume that the following rules were setup when the targets were created: + // table -> sourceKeyspace.table + // targetKeyspace.table -> sourceKeyspace.table + // For forward migration, we add tablet type specific rules to redirect traffic to the target. + // For backward, we redirect to source. + for _, servedType := range servedTypes { + tt := strings.ToLower(servedType.String()) + for _, table := range ts.Tables() { + if direction == DirectionForward { + log.Infof("Route direction forward") + } else { + log.Infof("Route direction backwards") } + toTarget := []string{ts.TargetKeyspaceName() + "." + table} + rules[table+"@"+tt] = toTarget + rules[ts.TargetKeyspaceName()+"."+table+"@"+tt] = toTarget + rules[ts.SourceKeyspaceName()+"."+table+"@"+tt] = toTarget + } + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + return ts.TopoServer().RebuildSrvVSchema(ctx, cells) +} - var bls binlogdatapb.BinlogSource - rowBytes, err := row["source"].ToBytes() - if err != nil { - return nil, err - } - if err := prototext.Unmarshal(rowBytes, &bls); err != nil { - return nil, err - } +func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) + return err + }) +} - if row["message"].ToString() == Frozen { - frozen = true +func (ts *trafficSwitcher) createJournals(ctx context.Context, sourceWorkflows []string) error { + log.Infof("In createJournals for source workflows %+v", sourceWorkflows) + return ts.ForAllSources(func(source *MigrationSource) error { + if source.Journaled { + return nil + } + participants := make([]*binlogdatapb.KeyspaceShard, 0) + participantMap := make(map[string]bool) + journal := &binlogdatapb.Journal{ + Id: ts.id, + MigrationType: ts.MigrationType(), + Tables: ts.Tables(), + LocalPosition: source.Position, + Participants: participants, + SourceWorkflows: sourceWorkflows, + } + for targetShard, target := range ts.Targets() { + for _, tsource := range target.Sources { + participantMap[tsource.Shard] = true } + journal.ShardGtids = append(journal.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: ts.TargetKeyspaceName(), + Shard: targetShard, + Gtid: target.Position, + }) + } + shards := make([]string, 0) + for shard := range participantMap { + shards = append(shards, shard) + } + sort.Sort(vreplication.ShardSorter(shards)) + for _, shard := range shards { + journal.Participants = append(journal.Participants, &binlogdatapb.KeyspaceShard{ + Keyspace: source.GetShard().Keyspace(), + Shard: shard, + }) + + } + log.Infof("Creating journal %v", journal) + ts.Logger().Infof("Creating journal: %v", journal) + statement := fmt.Sprintf("insert into _vt.resharding_journal "+ + "(id, db_name, val) "+ + "values (%v, %v, %v)", + ts.id, encodeString(source.GetPrimary().DbName()), encodeString(journal.String())) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, statement); err != nil { + return err + } + return nil + }) +} + +func (ts *trafficSwitcher) changeShardsAccess(ctx context.Context, keyspace string, shards []*topo.ShardInfo, access accessType) error { + if err := ts.TopoServer().UpdateDisableQueryService(ctx, keyspace, shards, topodatapb.TabletType_PRIMARY, nil, access == disallowWrites /* disable */); err != nil { + return err + } + return ts.ws.refreshPrimaryTablets(ctx, shards) +} + +func (ts *trafficSwitcher) allowTargetWrites(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + return ts.allowTableTargetWrites(ctx) + } + return ts.changeShardsAccess(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), allowWrites) +} + +func (ts *trafficSwitcher) allowTableTargetWrites(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, true, ts.Tables()) + }); err != nil { + return err + } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + _, _, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), target.GetShard(), nil, ts.Logger()) + return err + }) +} + +func (ts *trafficSwitcher) changeRouting(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + return ts.changeWriteRoute(ctx) + } + return ts.changeShardRouting(ctx) +} + +func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { + if ts.isPartialMigration { + srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, si := range ts.SourceShards() { + delete(srr, fmt.Sprintf("%s.%s", ts.TargetKeyspaceName(), si.ShardName())) + ts.Logger().Infof("Deleted shard routing: %v:%v", ts.TargetKeyspaceName(), si.ShardName()) + srr[fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), si.ShardName())] = ts.TargetKeyspaceName() + ts.Logger().Infof("Added shard routing: %v:%v", ts.SourceKeyspaceName(), si.ShardName()) + } + if err := topotools.SaveShardRoutingRules(ctx, ts.TopoServer(), srr); err != nil { + return err + } + } else { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + return err + } + for _, table := range ts.Tables() { + targetKsTable := fmt.Sprintf("%s.%s", ts.TargetKeyspaceName(), table) + sourceKsTable := fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), table) + delete(rules, targetKsTable) + ts.Logger().Infof("Deleted routing: %s", targetKsTable) + rules[table] = []string{targetKsTable} + rules[sourceKsTable] = []string{targetKsTable} + ts.Logger().Infof("Added routing: %v %v", table, sourceKsTable) + } + if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { + return err + } + } + + return ts.TopoServer().RebuildSrvVSchema(ctx, nil) +} - target.Sources[id] = &bls - optCells = row["cell"].ToString() - optTabletTypes = row["tablet_types"].ToString() +func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { + err2 := vterrors.Wrapf(err, "Before changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) + log.Errorf("%w", err2) + return err2 + } + err := ts.ForAllSources(func(source *MigrationSource) error { + _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + si.IsPrimaryServing = false + return nil + }) + return err + }) + if err != nil { + return err + } + err = ts.ForAllTargets(func(target *MigrationTarget) error { + _, err := ts.TopoServer().UpdateShardFields(ctx, ts.TargetKeyspaceName(), target.GetShard().ShardName(), func(si *topo.ShardInfo) error { + si.IsPrimaryServing = true + return nil + }) + return err + }) + if err != nil { + return err + } + err = ts.TopoServer().MigrateServedType(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), ts.SourceShards(), topodatapb.TabletType_PRIMARY, nil) + if err != nil { + return err + } + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), ""); err != nil { + err2 := vterrors.Wrapf(err, "after changing shard routes, found SrvKeyspace for %s is corrupt", ts.TargetKeyspaceName()) + log.Errorf("%w", err2) + return err2 + } + return nil +} - workflowType = getVReplicationWorkflowType(row) - workflowSubType = getVReplicationWorkflowSubType(row) +func (ts *trafficSwitcher) getReverseVReplicationUpdateQuery(targetCell string, sourceCell string, dbname string) string { + // we try to be clever to understand what user intends: + // if target's cell is present in cells but not source's cell we replace it + // with the source's cell. + if ts.optCells != "" && targetCell != sourceCell && strings.Contains(ts.optCells+",", targetCell+",") && + !strings.Contains(ts.optCells+",", sourceCell+",") { + ts.optCells = strings.Replace(ts.optCells, targetCell, sourceCell, 1) + } + if ts.optCells != "" || ts.optTabletTypes != "" { + query := fmt.Sprintf("update _vt.vreplication set cell = '%s', tablet_types = '%s' where workflow = '%s' and db_name = '%s'", + ts.optCells, ts.optTabletTypes, ts.ReverseWorkflowName(), dbname) + return query + } + return "" +} + +func (ts *trafficSwitcher) deleteReverseVReplication(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(source.GetPrimary().DbName()), encodeString(ts.reverseWorkflow)) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, source.GetPrimary().Tablet, ts.reverseWorkflow) + ts.ws.optimizeCopyStateTable(source.GetPrimary().Tablet) + return nil + }) +} - targets[targetShard] = target +func (ts *trafficSwitcher) ForAllUIDs(f func(target *MigrationTarget, uid int32) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range ts.Targets() { + for uid := range target.Sources { + wg.Add(1) + go func(target *MigrationTarget, uid int32) { + defer wg.Done() + + if err := f(target, uid); err != nil { + allErrors.RecordError(err) + } + }(target, uid) + } } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} - if len(targets) == 0 { - return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) +func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error { + if err := ts.deleteReverseVReplication(ctx); err != nil { + return err } + err := ts.ForAllUIDs(func(target *MigrationTarget, uid int32) error { + bls := target.Sources[uid] + source := ts.Sources()[bls.Shard] + reverseBls := &binlogdatapb.BinlogSource{ + Keyspace: ts.TargetKeyspaceName(), + Shard: target.GetShard().ShardName(), + TabletType: bls.TabletType, + Filter: &binlogdatapb.Filter{}, + OnDdl: bls.OnDdl, + SourceTimeZone: bls.TargetTimeZone, + TargetTimeZone: bls.SourceTimeZone, + } - return &TargetInfo{ - Targets: targets, - Frozen: frozen, - OptCells: optCells, - OptTabletTypes: optTabletTypes, - WorkflowType: workflowType, - WorkflowSubType: workflowSubType, - }, nil + for _, rule := range bls.Filter.Rules { + if rule.Filter == "exclude" { + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, rule) + continue + } + var filter string + if strings.HasPrefix(rule.Match, "/") { + if ts.SourceKeyspaceSchema().Keyspace.Sharded { + filter = key.KeyRangeString(source.GetShard().KeyRange) + } + } else { + var inKeyrange string + if ts.SourceKeyspaceSchema().Keyspace.Sharded { + vtable, ok := ts.SourceKeyspaceSchema().Tables[rule.Match] + if !ok { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table %s not found in vschema", rule.Match) + } + // We currently assume the primary vindex is the best way to filter rows + // for the table, which may not always be true. + // TODO: handle more of these edge cases explicitly, e.g. sequence tables. + switch vtable.Type { + case vindexes.TypeReference: + // For reference tables there are no vindexes and thus no filter to apply. + default: + // For non-reference tables we return an error if there's no primary + // vindex as it's not clear what to do. + if len(vtable.ColumnVindexes) > 0 && len(vtable.ColumnVindexes[0].Columns) > 0 { + inKeyrange = fmt.Sprintf(" where in_keyrange(%s, '%s.%s', '%s')", sqlparser.String(vtable.ColumnVindexes[0].Columns[0]), + ts.SourceKeyspaceName(), vtable.ColumnVindexes[0].Name, key.KeyRangeString(source.GetShard().KeyRange)) + } else { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary vindex found for the %s table in the %s keyspace", + vtable.Name.String(), ts.SourceKeyspaceName()) + } + } + } + filter = fmt.Sprintf("select * from %s%s", sqlescape.EscapeID(rule.Match), inKeyrange) + } + reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ + Match: rule.Match, + Filter: filter, + }) + } + log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s", + source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) + _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, + binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, + binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) + if err != nil { + return err + } + + // if user has defined the cell/tablet_types parameters in the forward workflow, update the reverse workflow as well + updateQuery := ts.getReverseVReplicationUpdateQuery(target.GetPrimary().Alias.Cell, source.GetPrimary().Alias.Cell, source.GetPrimary().DbName()) + if updateQuery != "" { + log.Infof("Updating vreplication stream entry on %s with: %s", source.GetPrimary().Alias, updateQuery) + _, err = ts.VReplicationExec(ctx, source.GetPrimary().Alias, updateQuery) + return err + } + return nil + }) + return err } -func getVReplicationWorkflowType(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowType { - i, _ := row["workflow_type"].ToInt32() - return binlogdatapb.VReplicationWorkflowType(i) +func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) + defer cancel() + // Source writes have been stopped, wait for all streams on targets to catch up. + if err := ts.ForAllUIDs(func(target *MigrationTarget, uid int32) error { + ts.Logger().Infof("Before Catchup: uid: %d, target primary %s, target position %s, shard %s", uid, + target.GetPrimary().AliasString(), target.Position, target.GetShard().String()) + bls := target.Sources[uid] + source := ts.Sources()[bls.Shard] + ts.Logger().Infof("Before Catchup: waiting for keyspace:shard: %v:%v to reach source position %v, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid) + if err := ts.TabletManagerClient().VReplicationWaitForPos(ctx, target.GetPrimary().Tablet, uid, source.Position); err != nil { + return err + } + log.Infof("After catchup: target keyspace:shard: %v:%v, source position %v, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), source.Position, uid) + ts.Logger().Infof("After catchup: position for keyspace:shard: %v:%v reached, uid %d", + ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, binlogplayer.StopVReplication(uid, "stopped for cutover")); err != nil { + log.Infof("Error marking stopped for cutover on %s, uid %d", target.GetPrimary().AliasString(), uid) + return err + } + return nil + }); err != nil { + return err + } + // all targets have caught up, record their positions for setting up reverse workflows + return ts.ForAllTargets(func(target *MigrationTarget) error { + var err error + target.Position, err = ts.TabletManagerClient().PrimaryPosition(ctx, target.GetPrimary().Tablet) + ts.Logger().Infof("After catchup, position for target primary %s, %v", target.GetPrimary().AliasString(), target.Position) + return err + }) } -func getVReplicationWorkflowSubType(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowSubType { - i, _ := row["workflow_sub_type"].ToInt32() - return binlogdatapb.VReplicationWorkflowSubType(i) +func (ts *trafficSwitcher) stopSourceWrites(ctx context.Context) error { + var err error + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + err = ts.changeTableSourceWrites(ctx, disallowWrites) + } else { + err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), disallowWrites) + } + if err != nil { + log.Warningf("Error: %s", err) + return err + } + return ts.ForAllSources(func(source *MigrationSource) error { + var err error + source.Position, err = ts.TabletManagerClient().PrimaryPosition(ctx, source.GetPrimary().Tablet) + log.Infof("Stopped Source Writes. Position for source %v:%v: %v", + ts.SourceKeyspaceName(), source.GetShard().ShardName(), source.Position) + if err != nil { + log.Warningf("Error: %s", err) + } + return err + }) } -// CompareShards compares the list of shards in a workflow with the shards in -// that keyspace according to the topo. It returns an error if they do not match. -// -// This function is used to validate MoveTables workflows. -// -// (TODO|@ajm188): This function is temporarily-exported until *wrangler.trafficSwitcher -// has been fully moved over to this package. Once that refactor is finished, -// this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON -// THIS FUNCTION EXTERNALLY. -func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error { - shardSet := sets.New[string]() - for _, si := range shards { - shardSet.Insert(si.ShardName()) +func (ts *trafficSwitcher) changeTableSourceWrites(ctx context.Context, access accessType) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { + return si.UpdateDeniedTables(ctx, topodatapb.TabletType_PRIMARY, nil, access == allowWrites /* remove */, ts.Tables()) + }); err != nil { + return err + } + rtbsCtx, cancel := context.WithTimeout(ctx, shardTabletRefreshTimeout) + defer cancel() + isPartial, partialDetails, err := topotools.RefreshTabletsByShard(rtbsCtx, ts.TopoServer(), ts.TabletManagerClient(), source.GetShard(), nil, ts.Logger()) + if isPartial { + err = fmt.Errorf("failed to successfully refresh all tablets in the %s/%s source shard (%v):\n %v", + source.GetShard().Keyspace(), source.GetShard().ShardName(), err, partialDetails) + } + return err + }) + if err != nil { + log.Warningf("Error in changeTableSourceWrites: %s", err) + return err } + // Note that the denied tables, which are being updated in this method, are not part of the SrvVSchema in the topo. + // However, we are using the notification of a SrvVSchema change in VTGate to recompute the state of a + // MoveTables workflow (which also looks up denied tables from the topo). So we need to trigger a SrvVSchema change here. + return ts.TopoServer().RebuildSrvVSchema(ctx, nil) +} - topoShards, err := ts.GetShardNames(ctx, keyspace) +func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrator) { + var err error + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + err = ts.changeTableSourceWrites(ctx, allowWrites) + } else { + err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) + } if err != nil { + ts.Logger().Errorf("Cancel migration failed:", err) + } + + sm.CancelMigration(ctx) + + err = ts.ForAllTargets(func(target *MigrationTarget) error { + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) return err + }) + if err != nil { + ts.Logger().Errorf("Cancel migration failed: could not restart vreplication: %v", err) } - topoShardSet := sets.New[string](topoShards...) - if !shardSet.Equal(topoShardSet) { - wfExtra := shardSet.Difference(topoShardSet) - topoExtra := topoShardSet.Difference(shardSet) + err = ts.deleteReverseVReplication(ctx) + if err != nil { + ts.Logger().Errorf("Cancel migration failed: could not delete revers vreplication entries: %v", err) + } +} - var rec concurrency.AllErrorRecorder - if wfExtra.Len() > 0 { - wfExtraSorted := sets.List(wfExtra) - rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted)) +func (ts *trafficSwitcher) freezeTargetVReplication(ctx context.Context) error { + // Mark target streams as frozen before deleting. If SwitchWrites gets + // re-invoked after a freeze, it will skip all the previous steps + err := ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Marking target streams frozen for workflow %s db_name %s", ts.WorkflowName(), target.GetPrimary().DbName()) + query := fmt.Sprintf("update _vt.vreplication set message = '%s' where db_name=%s and workflow=%s", Frozen, + encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query) + return err + }) + if err != nil { + return err + } + return nil +} + +func (ts *trafficSwitcher) dropTargetVReplicationStreams(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Deleting target streams and related data for workflow %s db_name %s", ts.WorkflowName(), target.GetPrimary().DbName()) + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, target.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, target.GetPrimary().Tablet, ts.WorkflowName()) + ts.ws.optimizeCopyStateTable(target.GetPrimary().Tablet) + return nil + }) +} - if topoExtra.Len() > 0 { - topoExtraSorted := sets.List(topoExtra) - rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted)) +func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Context) error { + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Deleting reverse streams and related data for workflow %s db_name %s", ts.WorkflowName(), source.GetPrimary().DbName()) + query := fmt.Sprintf(sqlDeleteWorkflow, encodeString(source.GetPrimary().DbName()), encodeString(ReverseWorkflowName(ts.WorkflowName()))) + if _, err := ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query); err != nil { + return err } + ts.ws.deleteWorkflowVDiffData(ctx, source.GetPrimary().Tablet, ReverseWorkflowName(ts.WorkflowName())) + ts.ws.optimizeCopyStateTable(source.GetPrimary().Tablet) + return nil + }) +} + +func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { + log.Flush() + err := ts.ForAllTargets(func(target *MigrationTarget) error { + log.Infof("ForAllTargets: %+v", target) + for _, tableName := range ts.Tables() { + query := fmt.Sprintf("drop table %s.%s", + sqlescape.EscapeID(sqlescape.UnescapeID(target.GetPrimary().DbName())), + sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + ts.Logger().Infof("%s: Dropping table %s.%s\n", + target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) + res, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + }) + log.Infof("Removed target table with result: %+v", res) + log.Flush() + if err != nil { + ts.Logger().Errorf("%s: Error removing table %s: %v", + target.GetPrimary().String(), tableName, err) + return err + } + ts.Logger().Infof("%s: Removed table %s.%s\n", + target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) - return fmt.Errorf("mismatched shards for keyspace %s: %s", keyspace, strings.Join(rec.ErrorStrings(), "; ")) + } + return nil + }) + if err != nil { + return err } + return ts.dropParticipatingTablesFromKeyspace(ctx, ts.TargetKeyspaceName()) +} + +func (ts *trafficSwitcher) dropTargetShards(ctx context.Context) error { + return ts.ForAllTargets(func(target *MigrationTarget) error { + ts.Logger().Infof("Deleting shard %s.%s\n", target.GetShard().Keyspace(), target.GetShard().ShardName()) + err := ts.ws.DeleteShard(ctx, target.GetShard().Keyspace(), target.GetShard().ShardName(), true, false) + if err != nil { + ts.Logger().Errorf("Error deleting shard %s: %v", target.GetShard().ShardName(), err) + return err + } + ts.Logger().Infof("Deleted shard %s.%s\n", target.GetShard().Keyspace(), target.GetShard().ShardName()) + return nil + }) +} + +func (ts *trafficSwitcher) validate(ctx context.Context) error { + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + if ts.isPartialMigration { + return nil + } + sourceTopo := ts.ws.ts + if ts.externalTopo != nil { + sourceTopo = ts.externalTopo + } + + // All shards must be present. + if err := CompareShards(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), sourceTopo); err != nil { + return err + } + if err := CompareShards(ctx, ts.TargetKeyspaceName(), ts.TargetShards(), ts.ws.ts); err != nil { + return err + } + // Wildcard table names not allowed. + for _, table := range ts.tables { + if strings.HasPrefix(table, "/") { + return fmt.Errorf("cannot migrate streams with wild card table names: %v", table) + } + } + } return nil } -// HashStreams produces a stable hash based on the target keyspace and migration -// targets. -func HashStreams(targetKeyspace string, targets map[string]*MigrationTarget) int64 { - var expanded []string - for shard, target := range targets { - for uid := range target.Sources { - expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) +// checkJournals returns true if at least one journal has been created. +// If so, it also returns the list of sourceWorkflows that need to be switched. +func (ts *trafficSwitcher) checkJournals(ctx context.Context) (journalsExist bool, sourceWorkflows []string, err error) { + var mu sync.Mutex + + err = ts.ForAllSources(func(source *MigrationSource) error { + mu.Lock() + defer mu.Unlock() + journal, exists, err := ts.ws.CheckReshardingJournalExistsOnTablet(ctx, source.GetPrimary().Tablet, ts.id) + if err != nil { + return err } + if exists { + if journal.Id != 0 { + sourceWorkflows = journal.SourceWorkflows + } + source.Journaled = true + journalsExist = true + } + return nil + }) + return journalsExist, sourceWorkflows, err +} + +// executeLockTablesOnSource executes a LOCK TABLES tb1 READ, tbl2 READ,... statement on each +// source shard's primary tablet using a non-pooled connection as the DBA user. The connection +// is closed when the LOCK TABLES statement returns, so we immediately release the LOCKs. +func (ts *trafficSwitcher) executeLockTablesOnSource(ctx context.Context) error { + ts.Logger().Infof("Locking (and then immediately unlocking) the following tables on source keyspace %v: %v", ts.SourceKeyspaceName(), ts.Tables()) + if len(ts.Tables()) == 0 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no tables found in the source keyspace %v associated with the %s workflow", ts.SourceKeyspaceName(), ts.WorkflowName()) } - sort.Strings(expanded) + sb := strings.Builder{} + sb.WriteString("LOCK TABLES ") + for _, tableName := range ts.Tables() { + sb.WriteString(fmt.Sprintf("%s READ,", sqlescape.EscapeID(tableName))) + } + // trim extra trailing comma + lockStmt := sb.String()[:sb.Len()-1] - hasher := fnv.New64() - hasher.Write([]byte(targetKeyspace)) + return ts.ForAllSources(func(source *MigrationSource) error { + primary := source.GetPrimary() + if primary == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary found for source shard %s", source.GetShard()) + } + tablet := primary.Tablet + _, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(lockStmt), + MaxRows: uint64(1), + DisableBinlogs: false, + ReloadSchema: true, + }) + if err != nil { + ts.Logger().Errorf("Error executing %s on source tablet %v: %v", lockStmt, tablet, err) + return err + } + return err + }) +} - for _, s := range expanded { - hasher.Write([]byte(s)) +func (ts *trafficSwitcher) gatherPositions(ctx context.Context) error { + err := ts.ForAllSources(func(source *MigrationSource) error { + var err error + source.Position, err = ts.ws.tmc.PrimaryPosition(ctx, source.GetPrimary().Tablet) + ts.Logger().Infof("Position for source %v:%v: %v", ts.SourceKeyspaceName(), source.GetShard().ShardName(), source.Position) + return err + }) + if err != nil { + return err } + return ts.ForAllTargets(func(target *MigrationTarget) error { + var err error + target.Position, err = ts.ws.tmc.PrimaryPosition(ctx, target.GetPrimary().Tablet) + ts.Logger().Infof("Position for target %v:%v: %v", ts.TargetKeyspaceName(), target.GetShard().ShardName(), target.Position) + return err + }) +} - // Convert to int64 after dropping the highest bit. - return int64(hasher.Sum64() & math.MaxInt64) +func (ts *trafficSwitcher) isSequenceParticipating(ctx context.Context) (bool, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return false, err + } + if vschema == nil || len(vschema.Tables) == 0 { + return false, nil + } + sequenceFound := false + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil { + continue + } + if vs.Type == vindexes.TypeSequence { + sequenceFound = true + break + } + } + return sequenceFound, nil } -const reverseSuffix = "_reverse" +// getTargetSequenceMetadata returns a map of sequence metadata keyed by the +// backing sequence table name. If the target keyspace has no tables +// defined that use sequences for auto_increment generation then a nil +// map will be returned. +func (ts *trafficSwitcher) getTargetSequenceMetadata(ctx context.Context) (map[string]*sequenceMetadata, error) { + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for target keyspace %s: %v", + ts.targetKeyspace, err) + } + if vschema == nil || len(vschema.Tables) == 0 { // Nothing to do + return nil, nil + } + + sequencesByBackingTable, backingTablesFound, err := ts.findSequenceUsageInKeyspace(vschema) + if err != nil { + return nil, err + } + // If all of the sequence tables were defined using qualified table + // names then we don't need to search for them in other keyspaces. + if len(sequencesByBackingTable) == 0 || backingTablesFound { + return sequencesByBackingTable, nil + } -// ReverseWorkflowName returns the "reversed" name of a workflow. For a -// "forward" workflow, this is the workflow name with "_reversed" appended, and -// for a "reversed" workflow, this is the workflow name with the "_reversed" -// suffix removed. -func ReverseWorkflowName(workflow string) string { - if strings.HasSuffix(workflow, reverseSuffix) { - return workflow[:len(workflow)-len(reverseSuffix)] + if err := ctx.Err(); err != nil { + return nil, err } - return workflow + reverseSuffix + // Now we need to locate the backing sequence table(s) which will + // be in another unsharded keyspace. + smMu := sync.Mutex{} + tableCount := len(sequencesByBackingTable) + tablesFound := 0 // Used to short circuit the search + // Define the function used to search each keyspace. + searchKeyspace := func(sctx context.Context, done chan struct{}, keyspace string) error { + kvs, kerr := ts.TopoServer().GetVSchema(sctx, keyspace) + if kerr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get vschema for keyspace %s: %v", + keyspace, kerr) + } + if kvs == nil || kvs.Sharded || len(kvs.Tables) == 0 { + return nil + } + for tableName, tableDef := range kvs.Tables { + select { + case <-sctx.Done(): + return sctx.Err() + case <-done: // We've found everything we need in other goroutines + return nil + default: + } + if complete := func() bool { + smMu.Lock() // Prevent concurrent access to the map + defer smMu.Unlock() + sm := sequencesByBackingTable[tableName] + if tableDef != nil && tableDef.Type == vindexes.TypeSequence && + sm != nil && tableName == sm.backingTableName { + tablesFound++ // This is also protected by the mutex + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + if tablesFound == tableCount { // Short circuit the search + select { + case <-done: // It's already been closed + return true + default: + close(done) // Mark the search as completed + return true + } + } + } + return false + }(); complete { + return nil + } + } + return nil + } + keyspaces, err := ts.TopoServer().GetKeyspaces(ctx) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get keyspaces: %v", err) + } + searchGroup, gctx := errgroup.WithContext(ctx) + searchCompleted := make(chan struct{}) + for _, keyspace := range keyspaces { + keyspace := keyspace // https://golang.org/doc/faq#closures_and_goroutines + searchGroup.Go(func() error { + return searchKeyspace(gctx, searchCompleted, keyspace) + }) + } + if err := searchGroup.Wait(); err != nil { + return nil, err + } + + if tablesFound != tableCount { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to locate all of the backing sequence tables being used; sequence table metadata: %+v", + sequencesByBackingTable) + } + return sequencesByBackingTable, nil +} + +// findSequenceUsageInKeyspace searches the keyspace's vschema for usage +// of sequences. It returns a map of sequence metadata keyed by the backing +// sequence table name -- if any usage is found -- along with a boolean to +// indicate if all of the backing sequence tables were defined using +// qualified table names (so we know where they all live) along with an +// error if any is seen. +func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspace) (map[string]*sequenceMetadata, bool, error) { + allFullyQualified := true + targets := maps2.Values(ts.Targets()) + if len(targets) == 0 || targets[0].GetPrimary() == nil { // This should never happen + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target keyspace %s", ts.targetKeyspace) + } + targetDBName := targets[0].GetPrimary().DbName() + sequencesByBackingTable := make(map[string]*sequenceMetadata) + + for _, table := range ts.Tables() { + vs, ok := vschema.Tables[table] + if !ok || vs == nil || vs.AutoIncrement == nil || vs.AutoIncrement.Sequence == "" { + continue + } + sm := &sequenceMetadata{ + backingTableName: vs.AutoIncrement.Sequence, + usingTableName: table, + usingTableDefinition: vs, + usingTableDBName: targetDBName, + } + // If the sequence table is fully qualified in the vschema then + // we don't need to find it later. + if strings.Contains(vs.AutoIncrement.Sequence, ".") { + keyspace, tableName, found := strings.Cut(vs.AutoIncrement.Sequence, ".") + if !found { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid sequence table name %s defined in the %s keyspace", + vs.AutoIncrement.Sequence, ts.targetKeyspace) + } + sm.backingTableName = tableName + sm.backingTableKeyspace = keyspace + // Set the default keyspace name. We will later check to + // see if the tablet we send requests to is using a dbname + // override and use that if it is. + sm.backingTableDBName = "vt_" + keyspace + } else { + allFullyQualified = false + } + sequencesByBackingTable[sm.backingTableName] = sm + } + + return sequencesByBackingTable, allFullyQualified, nil } -// Straight copy-paste of encodeString from wrangler/keyspace.go. I want to make -// this public, but it doesn't belong in package workflow. Maybe package sqltypes, -// or maybe package sqlescape? -func encodeString(in string) string { - buf := bytes.NewBuffer(nil) - sqltypes.NewVarChar(in).EncodeSQL(buf) - return buf.String() +// initializeTargetSequences initializes the backing sequence tables +// using a map keyed by the backing sequence table name. +// +// The backing tables must have already been created. This function will +// then ensure that the next value is set to a value greater than any +// currently stored in the using table on the target keyspace. If the +// backing table is updated to a new higher value then it will also tell +// the primary tablet serving the sequence to refresh/reset its cache to +// be sure that it does not provide a value that is less than the current max. +func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { + initSequenceTable := func(ictx context.Context, sequenceTableName string, sequenceMetadata *sequenceMetadata) error { + // Now we need to run this query on the target shards in order + // to get the max value and set the next id for the sequence to + // a higher value. + shardResults := make([]int64, 0, len(ts.TargetShards())) + srMu := sync.Mutex{} + ierr := ts.ForAllTargets(func(target *MigrationTarget) error { + primary := target.GetPrimary() + if primary == nil || primary.GetAlias() == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target shard %s/%s", + ts.targetKeyspace, target.GetShard().ShardName()) + } + query := sqlparser.BuildParsedQuery(sqlGetMaxSequenceVal, + sqlescape.EscapeID(sequenceMetadata.usingTableDefinition.AutoIncrement.Column), + sqlescape.EscapeID(sequenceMetadata.usingTableDBName), + sqlescape.EscapeID(sequenceMetadata.usingTableName), + ) + qr, terr := ts.ws.tmc.ExecuteFetchAsApp(ictx, primary.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query.Query), + MaxRows: 1, + }) + if terr != nil || len(qr.Rows) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + maxID, terr := sqltypes.Proto3ToResult(qr).Rows[0][0].ToInt64() + if terr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + } + srMu.Lock() + defer srMu.Unlock() + shardResults = append(shardResults, maxID) + return nil + }) + if ierr != nil { + return ierr + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + // Sort the values to find the max value across all shards. + sort.Slice(shardResults, func(i, j int) bool { + return shardResults[i] < shardResults[j] + }) + nextVal := shardResults[len(shardResults)-1] + 1 + // Now we need to update the sequence table, if needed, in order to + // ensure that that the next value it provides is > the current max. + sequenceShard, ierr := ts.TopoServer().GetOnlyShard(ictx, sequenceMetadata.backingTableKeyspace) + if ierr != nil || sequenceShard == nil || sequenceShard.PrimaryAlias == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + sequenceTablet, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil || sequenceTablet == nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + if sequenceTablet.DbNameOverride != "" { + sequenceMetadata.backingTableDBName = sequenceTablet.DbNameOverride + } + query := sqlparser.BuildParsedQuery(sqlInitSequenceTable, + sqlescape.EscapeID(sequenceMetadata.backingTableDBName), + sqlescape.EscapeID(sequenceMetadata.backingTableName), + nextVal, + nextVal, + nextVal, + ) + // Now execute this on the primary tablet of the unsharded keyspace + // housing the backing table. + primaryTablet, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for %s.%s using alias %s: %v", + sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) + } + qr, ierr := ts.ws.tmc.ExecuteFetchAsApp(ictx, primaryTablet.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + Query: []byte(query.Query), + MaxRows: 1, + }) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to initialize the backing sequence table %s.%s: %v", + sequenceMetadata.backingTableDBName, sequenceMetadata.backingTableName, ierr) + } + // If we actually updated the backing sequence table, then we need + // to tell the primary tablet managing the sequence to refresh/reset + // its cache for the table. + if qr.RowsAffected == 0 { + return nil + } + select { + case <-ictx.Done(): + return ictx.Err() + default: + } + ts.Logger().Infof("Resetting sequence cache for backing table %s on shard %s/%s using tablet %s", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias) + ti, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get primary tablet for keyspace %s: %v", + sequenceMetadata.backingTableKeyspace, ierr) + } + ierr = ts.TabletManagerClient().ResetSequences(ictx, ti.Tablet, []string{sequenceMetadata.backingTableName}) + if ierr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to reset the sequence cache for backing table %s on shard %s/%s using tablet %s: %v", + sequenceMetadata.backingTableName, sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) + } + return nil + } + + initGroup, gctx := errgroup.WithContext(ctx) + for sequenceTableName, sequenceMetadata := range sequencesByBackingTable { + sequenceTableName, sequenceMetadata := sequenceTableName, sequenceMetadata // https://golang.org/doc/faq#closures_and_goroutines + initGroup.Go(func() error { + return initSequenceTable(gctx, sequenceTableName, sequenceMetadata) + }) + } + return initGroup.Wait() +} + +func (ts *trafficSwitcher) mustResetSequences(ctx context.Context) (bool, error) { + switch ts.workflowType { + case binlogdatapb.VReplicationWorkflowType_Migrate, + binlogdatapb.VReplicationWorkflowType_MoveTables: + return ts.isSequenceParticipating(ctx) + default: + return false, nil + } +} + +func (ts *trafficSwitcher) resetSequences(ctx context.Context) error { + var err error + mustReset := false + if mustReset, err = ts.mustResetSequences(ctx); err != nil { + return err + } + if !mustReset { + return nil + } + return ts.ForAllSources(func(source *MigrationSource) error { + ts.Logger().Infof("Resetting sequences for source shard %s.%s on tablet %s", + source.GetShard().Keyspace(), source.GetShard().ShardName(), source.GetPrimary().String()) + return ts.TabletManagerClient().ResetSequences(ctx, source.GetPrimary().Tablet, ts.Tables()) + }) } diff --git a/go/vt/vtctl/workflow/traffic_switcher_test.go b/go/vt/vtctl/workflow/traffic_switcher_test.go index 447e47d7490..c416baa18f9 100644 --- a/go/vt/vtctl/workflow/traffic_switcher_test.go +++ b/go/vt/vtctl/workflow/traffic_switcher_test.go @@ -25,7 +25,7 @@ import ( ) type testTrafficSwitcher struct { - ITrafficSwitcher + trafficSwitcher sourceKeyspaceSchema *vindexes.KeyspaceSchema } diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go new file mode 100644 index 00000000000..bb8416414f8 --- /dev/null +++ b/go/vt/vtctl/workflow/utils.go @@ -0,0 +1,768 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "bytes" + "context" + "fmt" + "hash/fnv" + "math" + "sort" + "strings" + "sync" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sets" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tmclient" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) + +const reverseSuffix = "_reverse" + +func getTablesInKeyspace(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, keyspace string) ([]string, error) { + shards, err := ts.GetServingShards(ctx, keyspace) + if err != nil { + return nil, err + } + if len(shards) == 0 { + return nil, fmt.Errorf("keyspace %s has no shards", keyspace) + } + primary := shards[0].PrimaryAlias + if primary == nil { + return nil, fmt.Errorf("shard does not have a primary: %v", shards[0].ShardName()) + } + allTables := []string{"/.*/"} + + ti, err := ts.GetTablet(ctx, primary) + if err != nil { + return nil, err + } + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + schema, err := tmc.GetSchema(ctx, ti.Tablet, req) + if err != nil { + return nil, err + } + log.Infof("got table schemas: %+v from source primary %v.", schema, primary) + + var sourceTables []string + for _, td := range schema.TableDefinitions { + sourceTables = append(sourceTables, td.Name) + } + return sourceTables, nil +} + +// validateNewWorkflow ensures that the specified workflow doesn't already exist +// in the keyspace. +func validateNewWorkflow(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, keyspace, workflow string) error { + allshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + if err != nil { + return err + } + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, si := range allshards { + if si.PrimaryAlias == nil { + allErrors.RecordError(fmt.Errorf("shard has no primary: %v", si.ShardName())) + continue + } + wg.Add(1) + go func(si *topo.ShardInfo) { + defer wg.Done() + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.GetTablet")) + return + } + validations := []struct { + query string + msg string + }{{ + fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and workflow=%s", encodeString(primary.DbName()), encodeString(workflow)), + fmt.Sprintf("workflow %s already exists in keyspace %s on tablet %d", workflow, keyspace, primary.Alias.Uid), + }, { + fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and message='FROZEN' and workflow_sub_type != %d", encodeString(primary.DbName()), binlogdatapb.VReplicationWorkflowSubType_Partial), + fmt.Sprintf("found previous frozen workflow on tablet %d, please review and delete it first before creating a new workflow", + primary.Alias.Uid), + }} + for _, validation := range validations { + p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, validation.query) + if err != nil { + allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.VReplicationExec")) + return + } + if p3qr != nil && len(p3qr.Rows) != 0 { + allErrors.RecordError(vterrors.Wrap(fmt.Errorf(validation.msg), "validateWorkflowName.VReplicationExec")) + return + } + } + }(si) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +// createDefaultShardRoutingRules creates a reverse routing rule for +// each shard in a new partial keyspace migration workflow that does +// not already have an existing routing rule in place. +func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.MaterializeSettings, ts *topo.Server) error { + srr, err := topotools.GetShardRoutingRules(ctx, ts) + if err != nil { + return err + } + allShards, err := ts.GetServingShards(ctx, ms.SourceKeyspace) + if err != nil { + return err + } + changed := false + for _, si := range allShards { + fromSource := fmt.Sprintf("%s.%s", ms.SourceKeyspace, si.ShardName()) + fromTarget := fmt.Sprintf("%s.%s", ms.TargetKeyspace, si.ShardName()) + if srr[fromSource] == "" && srr[fromTarget] == "" { + srr[fromTarget] = ms.SourceKeyspace + changed = true + log.Infof("Added default shard routing rule from %q to %q", fromTarget, fromSource) + } + } + if changed { + if err := topotools.SaveShardRoutingRules(ctx, ts, srr); err != nil { + return err + } + if err := ts.RebuildSrvVSchema(ctx, nil); err != nil { + return err + } + } + return nil +} + +func stripTableConstraints(ddl string) (string, error) { + ast, err := sqlparser.ParseStrictDDL(ddl) + if err != nil { + return "", err + } + + stripConstraints := func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case sqlparser.DDLStatement: + if node.GetTableSpec() != nil { + node.GetTableSpec().Constraints = nil + } + } + return true + } + + noConstraintAST := sqlparser.Rewrite(ast, stripConstraints, nil) + newDDL := sqlparser.String(noConstraintAST) + + return newDDL, nil +} + +func stripTableForeignKeys(ddl string) (string, error) { + ast, err := sqlparser.ParseStrictDDL(ddl) + if err != nil { + return "", err + } + + stripFKConstraints := func(cursor *sqlparser.Cursor) bool { + switch node := cursor.Node().(type) { + case sqlparser.DDLStatement: + if node.GetTableSpec() != nil { + var noFKConstraints []*sqlparser.ConstraintDefinition + for _, constraint := range node.GetTableSpec().Constraints { + if constraint.Details != nil { + if _, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition); !ok { + noFKConstraints = append(noFKConstraints, constraint) + } + } + } + node.GetTableSpec().Constraints = noFKConstraints + } + } + return true + } + + noFKConstraintAST := sqlparser.Rewrite(ast, stripFKConstraints, nil) + newDDL := sqlparser.String(noFKConstraintAST) + return newDDL, nil +} + +func getSourceTableDDLs(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, shards []*topo.ShardInfo) (map[string]string, error) { + sourceDDLs := make(map[string]string) + allTables := []string{"/.*/"} + + sourcePrimary := shards[0].PrimaryAlias + if sourcePrimary == nil { + return nil, fmt.Errorf("shard must have a primary for copying schema: %v", shards[0].ShardName()) + } + + ti, err := ts.GetTablet(ctx, sourcePrimary) + if err != nil { + return nil, err + } + req := &tabletmanagerdatapb.GetSchemaRequest{Tables: allTables} + sourceSchema, err := tmc.GetSchema(ctx, ti.Tablet, req) + if err != nil { + return nil, err + } + + for _, td := range sourceSchema.TableDefinitions { + sourceDDLs[td.Name] = td.Schema + } + return sourceDDLs, nil +} + +func forAllShards(shards []*topo.ShardInfo, f func(*topo.ShardInfo) error) error { + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, target := range shards { + wg.Add(1) + go func(target *topo.ShardInfo) { + defer wg.Done() + + if err := f(target); err != nil { + allErrors.RecordError(err) + } + }(target) + } + wg.Wait() + return allErrors.AggrError(vterrors.Aggregate) +} + +func matchColInSelect(col sqlparser.IdentifierCI, sel *sqlparser.Select) (*sqlparser.ColName, error) { + for _, selExpr := range sel.SelectExprs { + switch selExpr := selExpr.(type) { + case *sqlparser.StarExpr: + return &sqlparser.ColName{Name: col}, nil + case *sqlparser.AliasedExpr: + match := selExpr.As + if match.IsEmpty() { + if colExpr, ok := selExpr.Expr.(*sqlparser.ColName); ok { + match = colExpr.Name + } else { + // Cannot match against a complex expression. + continue + } + } + if match.Equal(col) { + colExpr, ok := selExpr.Expr.(*sqlparser.ColName) + if !ok { + return nil, fmt.Errorf("vindex column cannot be a complex expression: %v", sqlparser.String(selExpr)) + } + return colExpr, nil + } + default: + return nil, fmt.Errorf("unsupported select expression: %v", sqlparser.String(selExpr)) + } + } + return nil, fmt.Errorf("could not find vindex column %v", sqlparser.String(col)) +} + +func shouldInclude(table string, excludes []string) bool { + // We filter out internal tables elsewhere when processing SchemaDefinition + // structures built from the GetSchema database related API calls. In this + // case, however, the table list comes from the user via the -tables flag + // so we need to filter out internal table names here in case a user has + // explicitly specified some. + // This could happen if there's some automated tooling that creates the list of + // tables to explicitly specify. + // But given that this should never be done in practice, we ignore the request. + if schema.IsInternalOperationTableName(table) { + return false + } + for _, t := range excludes { + if t == table { + return false + } + } + return true +} + +// getMigrationID produces a reproducible hash based on the input parameters. +func getMigrationID(targetKeyspace string, shardTablets []string) (int64, error) { + sort.Strings(shardTablets) + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + for _, str := range shardTablets { + hasher.Write([]byte(str)) + } + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64), nil +} + +// BuildTargets collects MigrationTargets and other metadata (see TargetInfo) +// from a workflow in the target keyspace. +// +// It returns ErrNoStreams if there are no targets found for the workflow. +func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { + targetShards, err := ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + + var ( + frozen bool + optCells string + optTabletTypes string + targets = make(map[string]*MigrationTarget, len(targetShards)) + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType + ) + + // We check all shards in the target keyspace. Not all of them may have a + // stream. For example, if we're splitting -80 to [-40,40-80], only those + // two target shards will have vreplication streams, and the other shards in + // the target keyspace will not. + for _, targetShard := range targetShards { + si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + + if si.PrimaryAlias == nil { + // This can happen if bad inputs are given. + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) + } + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, err + } + + wf, err := tmc.ReadVReplicationWorkflow(ctx, primary.Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowRequest{ + Workflow: workflow, + }) + if err != nil { + return nil, err + } + + if len(wf.Streams) < 1 { + continue + } + + target := &MigrationTarget{ + si: si, + primary: primary, + Sources: make(map[int32]*binlogdatapb.BinlogSource), + } + + optCells = wf.Cells + optTabletTypes = topoproto.MakeStringTypeCSV(wf.TabletTypes) + workflowType = wf.WorkflowType + workflowSubType = wf.WorkflowSubType + + for _, stream := range wf.Streams { + if stream.Message == Frozen { + frozen = true + } + target.Sources[stream.Id] = stream.Bls + } + + targets[targetShard] = target + } + + if len(targets) == 0 { + return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) + } + + return &TargetInfo{ + Targets: targets, + Frozen: frozen, + OptCells: optCells, + OptTabletTypes: optTabletTypes, + WorkflowType: workflowType, + WorkflowSubType: workflowSubType, + }, nil +} + +func getSourceAndTargetKeyRanges(sourceShards, targetShards []string) (*topodatapb.KeyRange, *topodatapb.KeyRange, error) { + if len(sourceShards) == 0 || len(targetShards) == 0 { + return nil, nil, fmt.Errorf("either source or target shards are missing") + } + + getKeyRange := func(shard string) (*topodatapb.KeyRange, error) { + krs, err := key.ParseShardingSpec(shard) + if err != nil { + return nil, err + } + return krs[0], nil + } + + // Happily string sorting of shards also sorts them in the ascending order of key + // ranges in vitess. + sort.Strings(sourceShards) + sort.Strings(targetShards) + getFullKeyRange := func(shards []string) (*topodatapb.KeyRange, error) { + // Expect sorted shards. + kr1, err := getKeyRange(sourceShards[0]) + if err != nil { + return nil, err + } + kr2, err := getKeyRange(sourceShards[len(sourceShards)-1]) + if err != nil { + return nil, err + } + return &topodatapb.KeyRange{ + Start: kr1.Start, + End: kr2.End, + }, nil + } + + skr, err := getFullKeyRange(sourceShards) + if err != nil { + return nil, nil, err + } + tkr, err := getFullKeyRange(targetShards) + if err != nil { + return nil, nil, err + } + + return skr, tkr, nil +} + +// CompareShards compares the list of shards in a workflow with the shards in +// that keyspace according to the topo. It returns an error if they do not match. +// +// This function is used to validate MoveTables workflows. +// +// (TODO|@ajm188): This function is temporarily-exported until *wrangler.trafficSwitcher +// has been fully moved over to this package. Once that refactor is finished, +// this function should be unexported. Consequently, YOU SHOULD NOT DEPEND ON +// THIS FUNCTION EXTERNALLY. +func CompareShards(ctx context.Context, keyspace string, shards []*topo.ShardInfo, ts *topo.Server) error { + shardSet := sets.New[string]() + for _, si := range shards { + shardSet.Insert(si.ShardName()) + } + + topoShards, err := ts.GetShardNames(ctx, keyspace) + if err != nil { + return err + } + + topoShardSet := sets.New[string](topoShards...) + if !shardSet.Equal(topoShardSet) { + wfExtra := shardSet.Difference(topoShardSet) + topoExtra := topoShardSet.Difference(shardSet) + + var rec concurrency.AllErrorRecorder + if wfExtra.Len() > 0 { + wfExtraSorted := sets.List(wfExtra) + rec.RecordError(fmt.Errorf("switch command shards not in topo: %v", wfExtraSorted)) + } + + if topoExtra.Len() > 0 { + topoExtraSorted := sets.List(topoExtra) + rec.RecordError(fmt.Errorf("topo shards not in switch command: %v", topoExtraSorted)) + } + + return fmt.Errorf("mismatched shards for keyspace %s: %s", keyspace, strings.Join(rec.ErrorStrings(), "; ")) + } + + return nil +} + +// HashStreams produces a stable hash based on the target keyspace and migration +// targets. +func HashStreams(targetKeyspace string, targets map[string]*MigrationTarget) int64 { + var expanded []string + for shard, target := range targets { + for uid := range target.Sources { + expanded = append(expanded, fmt.Sprintf("%s:%d", shard, uid)) + } + } + + sort.Strings(expanded) + + hasher := fnv.New64() + hasher.Write([]byte(targetKeyspace)) + + for _, s := range expanded { + hasher.Write([]byte(s)) + } + + // Convert to int64 after dropping the highest bit. + return int64(hasher.Sum64() & math.MaxInt64) +} + +func doValidateWorkflowHasCompleted(ctx context.Context, ts *trafficSwitcher) error { + wg := sync.WaitGroup{} + rec := concurrency.AllErrorRecorder{} + if ts.MigrationType() == binlogdatapb.MigrationType_SHARDS { + _ = ts.ForAllSources(func(source *MigrationSource) error { + wg.Add(1) + if source.GetShard().IsPrimaryServing { + rec.RecordError(fmt.Errorf(fmt.Sprintf("Shard %s is still serving", source.GetShard().ShardName()))) + } + wg.Done() + return nil + }) + } else { + _ = ts.ForAllTargets(func(target *MigrationTarget) error { + wg.Add(1) + query := fmt.Sprintf("select 1 from _vt.vreplication where db_name='%s' and workflow='%s' and message!='FROZEN'", target.GetPrimary().DbName(), ts.WorkflowName()) + rs, _ := ts.VReplicationExec(ctx, target.GetPrimary().Alias, query) + if len(rs.Rows) > 0 { + rec.RecordError(fmt.Errorf("vreplication streams are not frozen on tablet %d", target.GetPrimary().Alias.Uid)) + } + wg.Done() + return nil + }) + } + wg.Wait() + + if !ts.keepRoutingRules { + // Check if table is routable. + if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) + if err != nil { + rec.RecordError(fmt.Errorf("could not get RoutingRules")) + } + for fromTable, toTables := range rules { + for _, toTable := range toTables { + for _, table := range ts.Tables() { + if toTable == fmt.Sprintf("%s.%s", ts.SourceKeyspaceName(), table) { + rec.RecordError(fmt.Errorf("routing still exists from keyspace %s table %s to %s", ts.SourceKeyspaceName(), table, fromTable)) + } + } + } + } + } + } + if rec.HasErrors() { + return fmt.Errorf("%s", strings.Join(rec.ErrorStrings(), "\n")) + } + return nil + +} + +// ReverseWorkflowName returns the "reversed" name of a workflow. For a +// "forward" workflow, this is the workflow name with "_reverse" appended, and +// for a "reversed" workflow, this is the workflow name with the "_reverse" +// suffix removed. +func ReverseWorkflowName(workflow string) string { + if strings.HasSuffix(workflow, reverseSuffix) { + return workflow[:len(workflow)-len(reverseSuffix)] + } + + return workflow + reverseSuffix +} + +// Straight copy-paste of encodeString from wrangler/keyspace.go. I want to make +// this public, but it doesn't belong in package workflow. Maybe package sqltypes, +// or maybe package sqlescape? +func encodeString(in string) string { + buf := bytes.NewBuffer(nil) + sqltypes.NewVarChar(in).EncodeSQL(buf) + return buf.String() +} + +func getRenameFileName(tableName string) string { + return fmt.Sprintf(renameTableTemplate, tableName) +} + +func parseTabletTypes(tabletTypes []topodatapb.TabletType) (hasReplica, hasRdonly, hasPrimary bool, err error) { + for _, tabletType := range tabletTypes { + switch { + case tabletType == topodatapb.TabletType_REPLICA: + hasReplica = true + case tabletType == topodatapb.TabletType_RDONLY: + hasRdonly = true + case tabletType == topodatapb.TabletType_PRIMARY: + hasPrimary = true + default: + return false, false, false, fmt.Errorf("invalid tablet type passed %s", tabletType) + } + } + return hasReplica, hasRdonly, hasPrimary, nil +} + +func areTabletsAvailableToStreamFrom(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, keyspace string, shards []*topo.ShardInfo) error { + // We use the value from the workflow for the TabletPicker. + tabletTypesStr := ts.optTabletTypes + cells := req.Cells + // If no cells were provided in the command then use the value from the workflow. + if len(cells) == 0 && ts.optCells != "" { + cells = strings.Split(strings.TrimSpace(ts.optCells), ",") + } + + var wg sync.WaitGroup + allErrors := &concurrency.AllErrorRecorder{} + for _, shard := range shards { + wg.Add(1) + go func(cells []string, keyspace string, shard *topo.ShardInfo) { + defer wg.Done() + if cells == nil { + cells = append(cells, shard.PrimaryAlias.Cell) + } + tp, err := discovery.NewTabletPicker(ctx, ts.ws.ts, cells, shard.PrimaryAlias.Cell, keyspace, shard.ShardName(), tabletTypesStr, discovery.TabletPickerOptions{}) + if err != nil { + allErrors.RecordError(err) + return + } + tablets := tp.GetMatchingTablets(ctx) + if len(tablets) == 0 { + allErrors.RecordError(fmt.Errorf("no tablet found to source data in keyspace %s, shard %s", keyspace, shard.ShardName())) + return + } + }(cells, keyspace, shard) + } + + wg.Wait() + if allErrors.HasErrors() { + log.Errorf("%s", allErrors.Error()) + return allErrors.Error() + } + return nil +} + +// LegacyBuildTargets collects MigrationTargets and other metadata (see TargetInfo) +// from a workflow in the target keyspace. It uses VReplicationExec to get the workflow +// details rather than the new TabletManager ReadVReplicationWorkflow RPC. This is +// being used to slowly transition all of the older code, including unit tests, over to +// the new RPC and limit the impact of the new implementation to vtctldclient. You can see +// how the unit tests were being migrated here: https://gist.github.com/mattlord/738c12befe951f8d09304ff7fdc47c46 +// +// New callers should instead use the new BuildTargets function. +// +// It returns ErrNoStreams if there are no targets found for the workflow. +func LegacyBuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { + targetShards, err := ts.GetShardNames(ctx, targetKeyspace) + if err != nil { + return nil, err + } + + var ( + frozen bool + optCells string + optTabletTypes string + targets = make(map[string]*MigrationTarget, len(targetShards)) + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType + ) + + getVReplicationWorkflowType := func(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowType { + i, _ := row["workflow_type"].ToInt32() + return binlogdatapb.VReplicationWorkflowType(i) + } + + getVReplicationWorkflowSubType := func(row sqltypes.RowNamedValues) binlogdatapb.VReplicationWorkflowSubType { + i, _ := row["workflow_sub_type"].ToInt32() + return binlogdatapb.VReplicationWorkflowSubType(i) + } + + // We check all shards in the target keyspace. Not all of them may have a + // stream. For example, if we're splitting -80 to [-40,40-80], only those + // two target shards will have vreplication streams, and the other shards in + // the target keyspace will not. + for _, targetShard := range targetShards { + si, err := ts.GetShard(ctx, targetKeyspace, targetShard) + if err != nil { + return nil, err + } + + if si.PrimaryAlias == nil { + // This can happen if bad inputs are given. + return nil, fmt.Errorf("shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) + } + + primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + if err != nil { + return nil, err + } + + // NB: changing the whitespace of this query breaks tests for now. + // (TODO:@ajm188) extend FakeDBClient to be less whitespace-sensitive on + // expected queries. + query := fmt.Sprintf("select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow=%s and db_name=%s", encodeString(workflow), encodeString(primary.DbName())) + p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, query) + if err != nil { + return nil, err + } + + if len(p3qr.Rows) < 1 { + continue + } + + target := &MigrationTarget{ + si: si, + primary: primary, + Sources: make(map[int32]*binlogdatapb.BinlogSource), + } + + qr := sqltypes.Proto3ToResult(p3qr) + for _, row := range qr.Named().Rows { + id, err := row["id"].ToInt32() + if err != nil { + return nil, err + } + + var bls binlogdatapb.BinlogSource + rowBytes, err := row["source"].ToBytes() + if err != nil { + return nil, err + } + if err := prototext.Unmarshal(rowBytes, &bls); err != nil { + return nil, err + } + + if row["message"].ToString() == Frozen { + frozen = true + } + + target.Sources[id] = &bls + optCells = row["cell"].ToString() + optTabletTypes = row["tablet_types"].ToString() + + workflowType = getVReplicationWorkflowType(row) + workflowSubType = getVReplicationWorkflowSubType(row) + + } + + targets[targetShard] = target + } + + if len(targets) == 0 { + return nil, fmt.Errorf("%w in keyspace %s for %s", ErrNoStreams, targetKeyspace, workflow) + } + + return &TargetInfo{ + Targets: targets, + Frozen: frozen, + OptCells: optCells, + OptTabletTypes: optTabletTypes, + WorkflowType: workflowType, + WorkflowSubType: workflowSubType, + }, nil +} diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index 053c5d55665..477b81a1a03 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -20,12 +20,15 @@ import ( "context" "errors" "fmt" + "sync" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" querypb "vitess.io/vitess/go/vt/proto/query" @@ -134,7 +137,7 @@ func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.Tabl return nil, err } - planner, err := vx.GetPlanner(ctx, table) + planner, err := vx.getPlanner(ctx, table) if err != nil { return nil, err } @@ -147,6 +150,55 @@ func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.Tabl return qp.ExecuteScatter(ctx, vx.primaries...) } +// CallbackContext executes the given callback, returning a mapping of tablet +// to querypb.QueryResult. +// +// On first use, QueryContext will also cause the VExec instance to discover +// target tablets from the topo; that target list will be reused for all future +// callbacks executed by this instance. +func (vx *VExec) CallbackContext(ctx context.Context, callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error)) (map[*topo.TabletInfo]*querypb.QueryResult, error) { + if vx.primaries == nil { + if err := vx.initialize(ctx); err != nil { + return nil, err + } + } + return vx.execCallback(ctx, callback) +} + +// execCallback runs the provided callback function on backend shard primaries. +// It collects query results from all shards and returns an aggregate (UNION +// ALL -like) result. +// Note: any nil results from the callback are ignored. +func (vx *VExec) execCallback(ctx context.Context, callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error)) (map[*topo.TabletInfo]*querypb.QueryResult, error) { + var ( + wg sync.WaitGroup + mu sync.Mutex + + allErrors = &concurrency.AllErrorRecorder{} + results = make(map[*topo.TabletInfo]*querypb.QueryResult) + ) + for _, primary := range vx.primaries { + wg.Add(1) + go func(ctx context.Context, primary *topo.TabletInfo) { + defer wg.Done() + qr, err := callback(ctx, primary) + if err != nil { + allErrors.RecordError(err) + } else { + if qr == nil { + log.Infof("Callback returned nil result for tablet %s-%s", primary.Alias.Cell, primary.Alias.Uid) + return // no result + } + mu.Lock() + defer mu.Unlock() + results[primary] = qr + } + }(ctx, primary) + } + wg.Wait() + return results, allErrors.AggrError(vterrors.Aggregate) +} + func (vx *VExec) initialize(ctx context.Context) error { vx.primaries = nil @@ -194,13 +246,13 @@ func (vx *VExec) initialize(ctx context.Context) error { return nil } -// GetPlanner returns an appropriate implementation of a QueryPlanner, depending +// getPlanner returns an appropriate implementation of a QueryPlanner, depending // on the table being queried. // -// On first use, GetPlanner will also cause the VExec instance to discover +// On first use, getPlanner will also cause the VExec instance to discover // target tablets from the topo; that target list will be reused for all future // queries made by this instance. -func (vx *VExec) GetPlanner(ctx context.Context, table string) (QueryPlanner, error) { // TODO: private? +func (vx *VExec) getPlanner(ctx context.Context, table string) (QueryPlanner, error) { if vx.primaries == nil { if err := vx.initialize(ctx); err != nil { return nil, fmt.Errorf("error while initializing target list: %w", err) @@ -224,7 +276,7 @@ func (vx *VExec) GetPlanner(ctx context.Context, table string) (QueryPlanner, er tabletStreamIDMap[aliasStr] = make([]int64, len(qr.Rows)) for i, row := range qr.Rows { - id, err := evalengine.ToInt64(row[0]) + id, err := row[0].ToCastInt64() if err != nil { return nil, err } diff --git a/go/vt/vtctl/workflow/vreplication_stream.go b/go/vt/vtctl/workflow/vreplication_stream.go index 7d3c2b94145..980d686bae9 100644 --- a/go/vt/vtctl/workflow/vreplication_stream.go +++ b/go/vt/vtctl/workflow/vreplication_stream.go @@ -21,9 +21,7 @@ import ( "sort" "strings" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -33,7 +31,7 @@ type VReplicationStream struct { ID int32 Workflow string BinlogSource *binlogdatapb.BinlogSource - Position mysql.Position + Position replication.Position WorkflowType binlogdatapb.VReplicationWorkflowType WorkflowSubType binlogdatapb.VReplicationWorkflowSubType DeferSecondaryKeys bool @@ -89,7 +87,7 @@ func (streams VReplicationStreams) Copy() VReplicationStreams { out[i] = &VReplicationStream{ ID: vrs.ID, Workflow: vrs.Workflow, - BinlogSource: proto.Clone(vrs.BinlogSource).(*binlogdatapb.BinlogSource), + BinlogSource: vrs.BinlogSource.CloneVT(), Position: vrs.Position, } } diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index 07eda0a1470..75bc344611c 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -122,7 +122,7 @@ func httpErrorf(w http.ResponseWriter, r *http.Request, format string, args ...a } func handleAPI(apiPath string, handlerFunc func(w http.ResponseWriter, r *http.Request) error) { - http.HandleFunc(apiPrefix+apiPath, func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc(apiPrefix+apiPath, func(w http.ResponseWriter, r *http.Request) { defer func() { if x := recover(); x != nil { httpErrorf(w, r, "uncaught panic: %v", x) @@ -534,7 +534,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { } requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second) + executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0) if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil { return fmt.Errorf("error setting DDL strategy: %v", err) } diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index 4437a6069f8..6443d89a56b 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -22,12 +22,12 @@ import ( "encoding/json" "io" "net/http" - "net/http/httptest" "strings" "testing" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/wrangler" @@ -42,15 +42,22 @@ func compactJSON(in []byte) string { } func TestAPI(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cells := []string{"cell1", "cell2"} - ts := memorytopo.NewServer(cells...) + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() actionRepo := NewActionRepository(ts) - server := httptest.NewServer(nil) + server := testutils.HTTPTestServer() defer server.Close() + ks1 := &topodatapb.Keyspace{ + DurabilityPolicy: "semi_sync", + SidecarDbName: "_vt_sidecar_ks1", + } + // Populate topo. Remove ServedTypes from shards to avoid ordering issues. - ts.CreateKeyspace(ctx, "ks1", &topodatapb.Keyspace{DurabilityPolicy: "semi_sync"}) + ts.CreateKeyspace(ctx, "ks1", ks1) ts.CreateShard(ctx, "ks1", "-80") ts.CreateShard(ctx, "ks1", "80-") @@ -165,7 +172,7 @@ func TestAPI(t *testing.T) { statusCode int }{ // Create snapshot keyspace with durability policy specified - {"POST", "vtctl/", `["CreateKeyspace", "--keyspace_type=SNAPSHOT", "--base_keyspace=ks1", "--snapshot_time=2006-01-02T15:04:05+00:00", "--durability-policy=semi_sync", "ks3"]`, `{ + {"POST", "vtctl/", `["CreateKeyspace", "--keyspace_type=SNAPSHOT", "--base_keyspace=ks1", "--snapshot_time=2006-01-02T15:04:05+00:00", "--durability-policy=semi_sync", "--sidecar-db-name=_vt_sidecar_ks3", "ks3"]`, `{ "Error": "durability-policy cannot be specified while creating a snapshot keyspace"`, http.StatusOK}, // Create snapshot keyspace using API {"POST", "vtctl/", `["CreateKeyspace", "--keyspace_type=SNAPSHOT", "--base_keyspace=ks1", "--snapshot_time=2006-01-02T15:04:05+00:00", "ks3"]`, `{ @@ -235,7 +242,8 @@ func TestAPI(t *testing.T) { "base_keyspace":"", "snapshot_time":null, "durability_policy":"semi_sync", - "throttler_config": null + "throttler_config": null, + "sidecar_db_name":"_vt_sidecar_ks1" }`, http.StatusOK}, {"GET", "keyspaces/nonexistent", "", "404 page not found", http.StatusNotFound}, {"POST", "keyspaces/ks1?action=TestKeyspaceAction", "", `{ @@ -316,11 +324,11 @@ func TestAPI(t *testing.T) { // vtctl RunCommand {"POST", "vtctl/", `["GetKeyspace","ks1"]`, `{ "Error": "", - "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\",\n \"throttler_config\": null\n}\n\n" + "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt_sidecar_ks1\"\n}\n\n" }`, http.StatusOK}, {"POST", "vtctl/", `["GetKeyspace","ks3"]`, `{ "Error": "", - "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\",\n \"throttler_config\": null\n}\n\n" + "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt\"\n}\n\n" }`, http.StatusOK}, {"POST", "vtctl/", `["GetVSchema","ks3"]`, `{ "Error": "", diff --git a/go/vt/vtctld/debug_health.go b/go/vt/vtctld/debug_health.go index c7daba3c478..ca4e1d8aae9 100644 --- a/go/vt/vtctld/debug_health.go +++ b/go/vt/vtctld/debug_health.go @@ -24,12 +24,13 @@ import ( "context" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" ) // RegisterDebugHealthHandler register a debug health http endpoint for a vtcld server func RegisterDebugHealthHandler(ts *topo.Server) { - http.HandleFunc("/debug/health", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/debug/health", func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.MONITORING); err != nil { acl.SendError(w, err) return diff --git a/go/vt/vtctld/explorer_test.go b/go/vt/vtctld/explorer_test.go index 62eb7c01642..95ce6c6c3d9 100644 --- a/go/vt/vtctld/explorer_test.go +++ b/go/vt/vtctld/explorer_test.go @@ -17,12 +17,11 @@ limitations under the License. package vtctld import ( + "context" "path" "reflect" "testing" - "context" - "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -35,7 +34,10 @@ func TestHandlePathRoot(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} want := []string{topo.GlobalCell, "cell1", "cell2", "cell3"} - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() ex := newBackendExplorer(ts) result := ex.HandlePath(input, nil) if got := result.Children; !reflect.DeepEqual(got, want) { @@ -52,8 +54,10 @@ func TestHandlePathKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, } - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } @@ -101,8 +105,11 @@ func TestHandlePathShard(t *testing.T) { keyspace := &topodatapb.Keyspace{} want := "is_primary_serving:true" - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + if err := ts.CreateKeyspace(ctx, "test_keyspace", keyspace); err != nil { t.Fatalf("CreateKeyspace error: %v", err) } @@ -140,8 +147,11 @@ func TestHandlePathTablet(t *testing.T) { } want := "alias:{cell:\"cell1\" uid:123} hostname:\"example.com\" port_map:{key:\"vt\" value:4321}" - ctx := context.Background() - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + if err := ts.CreateTablet(ctx, tablet); err != nil { t.Fatalf("CreateTablet error: %v", err) } @@ -164,7 +174,11 @@ func TestHandleBadPath(t *testing.T) { cells := []string{"cell1", "cell2", "cell3"} want := "Invalid cell: node doesn't exist: cells/foo/CellInfo" - ts := memorytopo.NewServer(cells...) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cells...) + defer ts.Close() + ex := newBackendExplorer(ts) result := ex.HandlePath(input, nil) if got := result.Error; !reflect.DeepEqual(got, want) { diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index 34428738e90..d40c6647ef3 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -17,13 +17,12 @@ limitations under the License. package vtctld import ( + "context" "io" "sync" "testing" "time" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/logutil" @@ -93,7 +92,7 @@ func (s *streamHealthTabletServer) streamHealthUnregister(id int) error { // BroadcastHealth will broadcast the current health to all listeners func (s *streamHealthTabletServer) BroadcastHealth() { shr := &querypb.StreamHealthResponse{ - TabletExternallyReparentedTimestamp: 42, + PrimaryTermStartTimestamp: 42, RealtimeStats: &querypb.RealtimeStats{ HealthError: "testHealthError", ReplicationLagSeconds: 72, @@ -109,7 +108,10 @@ func (s *streamHealthTabletServer) BroadcastHealth() { } func TestTabletData(t *testing.T) { - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") + defer ts.Close() wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { @@ -138,9 +140,9 @@ func TestTabletData(t *testing.T) { }() // Start streaming and wait for the first result. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - result, err := thc.Get(ctx, tablet1.Tablet.Alias) - cancel() + requestCtx, requestCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer requestCancel() + result, err := thc.Get(requestCtx, tablet1.Tablet.Alias) close(stop) if err != nil { diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index b9ecef23162..a265b013075 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -46,6 +46,7 @@ func init() { func registerVtctldFlags(fs *pflag.FlagSet) { fs.StringVar(&durabilityPolicy, "durability_policy", durabilityPolicy, "type of durability to enforce. Default is none. Other values are dictated by registered plugins") + fs.MarkDeprecated("durability_policy", "Set the correct durability policy in the keyspace information instead.") fs.BoolVar(&sanitizeLogMessages, "vtctld_sanitize_log_messages", sanitizeLogMessages, "When true, vtctld sanitizes logging.") } diff --git a/go/vt/vterrors/code.go b/go/vt/vterrors/code.go index 9b4351d8e7b..b2f83b898b8 100644 --- a/go/vt/vterrors/code.go +++ b/go/vt/vterrors/code.go @@ -41,11 +41,13 @@ var ( VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.") VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > '", "This vstream where clause can only be a greater than filter.") VT03018 = errorWithoutState("VT03018", vtrpcpb.Code_INVALID_ARGUMENT, "NEXT used on a non-sequence table", "You cannot use the NEXT syntax on a table that is not a sequence table.") - VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "symbol %s not found", "The given symbol was not found or is not available.") - VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "symbol %s not found in subquery", "The given symbol was not found in the subquery.") - VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous symbol reference: %v", "The given symbol is ambiguous. You can use a table qualifier to make it unambiguous.") + VT03019 = errorWithoutState("VT03019", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found", "The given column was not found or is not available.") + VT03020 = errorWithoutState("VT03020", vtrpcpb.Code_INVALID_ARGUMENT, "column %s not found in subquery", "The given column was not found in the subquery.") + VT03021 = errorWithoutState("VT03021", vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous column reference: %v", "The given column is ambiguous. You can use a table qualifier to make it unambiguous.") VT03022 = errorWithoutState("VT03022", vtrpcpb.Code_INVALID_ARGUMENT, "column %v not found in %v", "The given column cannot be found.") VT03023 = errorWithoutState("VT03023", vtrpcpb.Code_INVALID_ARGUMENT, "INSERT not supported when targeting a key range: %s", "When targeting a range of shards, Vitess does not know which shard to send the INSERT to.") + VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.") + VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments") VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.") VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.") @@ -57,6 +59,8 @@ var ( VT06001 = errorWithState("VT06001", vtrpcpb.Code_ALREADY_EXISTS, DbCreateExists, "cannot create database '%s'; database exists", "The given database name already exists.") + VT07001 = errorWithState("VT07001", vtrpcpb.Code_PERMISSION_DENIED, KillDeniedError, "%s", "Kill statement is not allowed. More in docs about how to enable it and its limitations.") + VT09001 = errorWithState("VT09001", vtrpcpb.Code_FAILED_PRECONDITION, RequiresPrimaryKey, PrimaryVindexNotSet, "the table does not have a primary vindex, the operation is impossible.") VT09002 = errorWithState("VT09002", vtrpcpb.Code_FAILED_PRECONDITION, InnodbReadOnly, "%s statement with a replica target", "This type of DML statement is not allowed on a replica target.") VT09003 = errorWithoutState("VT09003", vtrpcpb.Code_FAILED_PRECONDITION, "INSERT query does not have primary vindex column '%v' in the column list", "A vindex column is mandatory for the insert, please provide one.") @@ -67,10 +71,17 @@ var ( VT09008 = errorWithoutState("VT09008", vtrpcpb.Code_FAILED_PRECONDITION, "vexplain queries/all will actually run queries", "vexplain queries/all will actually run queries. `/*vt+ EXECUTE_DML_QUERIES */` must be set to run DML queries in vtexplain. Example: `vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from t1`") VT09009 = errorWithoutState("VT09009", vtrpcpb.Code_FAILED_PRECONDITION, "stream is supported only for primary tablet type, current type: %v", "Stream is only supported for primary tablets, please use a stream on those tablets.") VT09010 = errorWithoutState("VT09010", vtrpcpb.Code_FAILED_PRECONDITION, "SHOW VITESS_THROTTLER STATUS works only on primary tablet", "SHOW VITESS_THROTTLER STATUS works only on primary tablet.") + VT09011 = errorWithState("VT09011", vtrpcpb.Code_FAILED_PRECONDITION, UnknownStmtHandler, "Unknown prepared statement handler (%s) given to %s", "The prepared statement is not available") + VT09012 = errorWithoutState("VT09012", vtrpcpb.Code_FAILED_PRECONDITION, "%s statement with %s tablet not allowed", "This type of statement is not allowed on the given tablet.") + VT09013 = errorWithoutState("VT09013", vtrpcpb.Code_FAILED_PRECONDITION, "semi-sync plugins are not loaded", "Durability policy wants Vitess to use semi-sync, but the MySQL instances don't have the semi-sync plugin loaded.") + VT09014 = errorWithoutState("VT09014", vtrpcpb.Code_FAILED_PRECONDITION, "vindex cannot be modified", "The vindex cannot be used as table in DML statement") + VT09015 = errorWithoutState("VT09015", vtrpcpb.Code_FAILED_PRECONDITION, "schema tracking required", "This query cannot be planned without more information on the SQL schema. Please turn on schema tracking or add authoritative columns information to your VSchema.") + VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB") VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.") VT12001 = errorWithoutState("VT12001", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: %s", "This statement is unsupported by Vitess. Please rewrite your query to use supported syntax.") + VT12002 = errorWithoutState("VT12002", vtrpcpb.Code_UNIMPLEMENTED, "unsupported: cross-shard foreign keys", "Vitess does not support cross shard foreign keys.") // VT13001 General Error VT13001 = errorWithoutState("VT13001", vtrpcpb.Code_INTERNAL, "[BUG] %s", "This error should not happen and is a bug. Please file an issue on GitHub: https://github.com/vitessio/vitess/issues/new/choose.") @@ -80,6 +91,7 @@ var ( VT14002 = errorWithoutState("VT14002", vtrpcpb.Code_UNAVAILABLE, "no available connection", "No available connection.") VT14003 = errorWithoutState("VT14003", vtrpcpb.Code_UNAVAILABLE, "no connection for tablet %v", "No connection for the given tablet.") VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.") + VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.") Errors = []func(args ...any) *VitessError{ VT03001, @@ -105,6 +117,8 @@ var ( VT03021, VT03022, VT03023, + VT03024, + VT03025, VT05001, VT05002, VT05003, @@ -113,6 +127,7 @@ var ( VT05006, VT05007, VT06001, + VT07001, VT09001, VT09002, VT09003, @@ -123,14 +138,22 @@ var ( VT09008, VT09009, VT09010, + VT09011, + VT09012, + VT09013, + VT09014, + VT09015, + VT09016, VT10001, VT12001, + VT12002, VT13001, VT13002, VT14001, VT14002, VT14003, VT14004, + VT14005, } ) diff --git a/go/vt/vterrors/errors_test.go b/go/vt/vterrors/errors_test.go index c115fb41686..8c039e5874f 100644 --- a/go/vt/vterrors/errors_test.go +++ b/go/vt/vterrors/errors_test.go @@ -257,8 +257,8 @@ func TestStackFormat(t *testing.T) { assertContains(t, got, "middle", false) assertContains(t, got, "outer", false) - logErrStacks = true - defer func() { logErrStacks = false }() + setLogErrStacks(true) + defer func() { setLogErrStacks(false) }() got = fmt.Sprintf("%v", err) assertContains(t, got, "innerMost", true) assertContains(t, got, "middle", true) @@ -340,9 +340,9 @@ func TestWrapping(t *testing.T) { err3 := Wrapf(err2, "baz") errorWithoutStack := fmt.Sprintf("%v", err3) - logErrStacks = true + setLogErrStacks(true) errorWithStack := fmt.Sprintf("%v", err3) - logErrStacks = false + setLogErrStacks(false) assertEquals(t, err3.Error(), "baz: bar: foo") assertContains(t, errorWithoutStack, "foo", true) diff --git a/go/vt/vterrors/state.go b/go/vt/vterrors/state.go index ae5a4970d2b..5e3dcf22dfb 100644 --- a/go/vt/vterrors/state.go +++ b/go/vt/vterrors/state.go @@ -46,6 +46,7 @@ const ( DupFieldName WrongValueCountOnRow WrongValue + WrongArguments // failed precondition NoDB @@ -54,6 +55,9 @@ const ( CantDoThisInTransaction RequiresPrimaryKey OperandColumns + RowIsReferenced2 + NoReferencedRow2 + UnknownStmtHandler // not found BadDb @@ -79,10 +83,39 @@ const ( // permission denied AccessDeniedError + KillDeniedError // server not available ServerNotAvailable + // unknown timezone + UnknownTimeZone + + // regexp errors + RegexpStringNotTerminated + RegexpBufferOverflow + RegexpIllegalArgument + RegexpIndexOutOfBounds + RegexpInternal + RegexpRuleSyntax + RegexpBadEscapeSequence + RegexpUnimplemented + RegexpMismatchParen + RegexpBadInterval + RegexpMaxLtMin + RegexpInvalidBackRef + RegexpLookBehindLimit + RegexpMissingCloseBracket + RegexpInvalidRange + RegexpStackOverflow + RegexpTimeOut + RegexpPatternTooBig + RegexpInvalidCaptureGroup + RegexpInvalidFlag + + CharacterSetMismatch + WrongParametersToNativeFct + // No state should be added below NumOfStates NumOfStates ) diff --git a/go/vt/vterrors/vterrors.go b/go/vt/vterrors/vterrors.go index 5c5d59cf3ea..6a322837de9 100644 --- a/go/vt/vterrors/vterrors.go +++ b/go/vt/vterrors/vterrors.go @@ -91,19 +91,35 @@ import ( "errors" "fmt" "io" + "sync" "github.com/spf13/pflag" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -// logErrStacks controls whether or not printing errors includes the +// logErrStacks controls whether printing errors includes the // embedded stack trace in the output. var logErrStacks bool +var muLogErrStacks sync.Mutex + +func getLogErrStacks() bool { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() + return logErrStacks +} + +func setLogErrStacks(val bool) { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() + logErrStacks = val +} // RegisterFlags registers the command-line options that control vterror // behavior on the provided FlagSet. func RegisterFlags(fs *pflag.FlagSet) { + muLogErrStacks.Lock() + defer muLogErrStacks.Unlock() fs.BoolVar(&logErrStacks, "log_err_stacks", false, "log stack traces for errors") } @@ -161,7 +177,7 @@ func (f *fundamental) Format(s fmt.State, verb rune) { case 'v': panicIfError(io.WriteString(s, "Code: "+f.code.String()+"\n")) panicIfError(io.WriteString(s, f.msg+"\n")) - if logErrStacks { + if getLogErrStacks() { f.stack.Format(s, verb) } return @@ -278,7 +294,7 @@ func (w *wrapping) Format(s fmt.State, verb rune) { if rune('v') == verb { panicIfError(fmt.Fprintf(s, "%v\n", w.Cause())) panicIfError(io.WriteString(s, w.msg)) - if logErrStacks { + if getLogErrStacks() { w.stack.Format(s, verb) } return @@ -361,5 +377,19 @@ func Print(err error) string { return fmt.Sprintf("%v: %v\n", Code(err), err.Error()) } +// TruncateError truncates error messages that are longer than the +// specified length. +func TruncateError(oldErr error, max int) error { + if oldErr == nil || max <= 0 || len(oldErr.Error()) <= max { + return oldErr + } + + if max <= 12 { + return New(Code(oldErr), "[TRUNCATED]") + } + + return New(Code(oldErr), oldErr.Error()[:max-12]+" [TRUNCATED]") +} + func (f *fundamental) ErrorState() State { return f.state } func (f *fundamental) ErrorCode() vtrpcpb.Code { return f.code } diff --git a/go/vt/vterrors/vterrorsgen/main.go b/go/vt/vterrors/vterrorsgen/main.go index f705813af8c..2aafee509e6 100644 --- a/go/vt/vterrors/vterrorsgen/main.go +++ b/go/vt/vterrors/vterrorsgen/main.go @@ -22,7 +22,7 @@ import ( "strings" "text/template" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/vterrors" ) @@ -44,8 +44,8 @@ const ( func main() { t := template.New("template") t.Funcs(map[string]any{ - "ConvertStateToMySQLErrorCode": mysql.ConvertStateToMySQLErrorCode, - "ConvertStateToMySQLState": mysql.ConvertStateToMySQLState, + "ConvertStateToMySQLErrorCode": sqlerror.ConvertStateToMySQLErrorCode, + "ConvertStateToMySQLState": sqlerror.ConvertStateToMySQLState, "FormatError": func(err error) string { s := err.Error() return strings.TrimSpace(strings.Join(strings.Split(s, ":")[1:], ":")) diff --git a/go/vt/vtexplain/testdata/multi-output/comments-output.txt b/go/vt/vtexplain/testdata/multi-output/comments-output.txt index db6c63b250a..030163fb111 100644 --- a/go/vt/vtexplain/testdata/multi-output/comments-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/comments-output.txt @@ -17,25 +17,25 @@ select /* ; */ 1 from user ---------------------------------------------------------------------- select 1 from user where x=';' -1 ks_sharded/-40: select 1 from `user` where x = ';' limit 10001 -1 ks_sharded/40-80: select 1 from `user` where x = ';' limit 10001 -1 ks_sharded/80-c0: select 1 from `user` where x = ';' limit 10001 -1 ks_sharded/c0-: select 1 from `user` where x = ';' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = ';' limit 10001 /* VARCHAR */ +1 ks_sharded/40-80: select 1 from `user` where x = ';' limit 10001 /* VARCHAR */ +1 ks_sharded/80-c0: select 1 from `user` where x = ';' limit 10001 /* VARCHAR */ +1 ks_sharded/c0-: select 1 from `user` where x = ';' limit 10001 /* VARCHAR */ ---------------------------------------------------------------------- select 1 from user where x='/* hello */' -1 ks_sharded/-40: select 1 from `user` where x = '/* hello */' limit 10001 -1 ks_sharded/40-80: select 1 from `user` where x = '/* hello */' limit 10001 -1 ks_sharded/80-c0: select 1 from `user` where x = '/* hello */' limit 10001 -1 ks_sharded/c0-: select 1 from `user` where x = '/* hello */' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = '/* hello */' limit 10001 /* VARCHAR */ +1 ks_sharded/40-80: select 1 from `user` where x = '/* hello */' limit 10001 /* VARCHAR */ +1 ks_sharded/80-c0: select 1 from `user` where x = '/* hello */' limit 10001 /* VARCHAR */ +1 ks_sharded/c0-: select 1 from `user` where x = '/* hello */' limit 10001 /* VARCHAR */ ---------------------------------------------------------------------- select 1 from user where x='/* ; */' -1 ks_sharded/-40: select 1 from `user` where x = '/* ; */' limit 10001 -1 ks_sharded/40-80: select 1 from `user` where x = '/* ; */' limit 10001 -1 ks_sharded/80-c0: select 1 from `user` where x = '/* ; */' limit 10001 -1 ks_sharded/c0-: select 1 from `user` where x = '/* ; */' limit 10001 +1 ks_sharded/-40: select 1 from `user` where x = '/* ; */' limit 10001 /* VARCHAR */ +1 ks_sharded/40-80: select 1 from `user` where x = '/* ; */' limit 10001 /* VARCHAR */ +1 ks_sharded/80-c0: select 1 from `user` where x = '/* ; */' limit 10001 /* VARCHAR */ +1 ks_sharded/c0-: select 1 from `user` where x = '/* ; */' limit 10001 /* VARCHAR */ ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt index e6edbcdb0d1..d88fddbfd4f 100644 --- a/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/deletesharded-output.txt @@ -2,14 +2,14 @@ delete from music_extra where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: delete from music_extra where id = 1 limit 10001 +1 ks_sharded/-40: delete from music_extra where id = 1 limit 10001 /* INT64 */ 1 ks_sharded/-40: commit ---------------------------------------------------------------------- delete from music_extra where id=1 and extra='abc' 1 ks_sharded/-40: begin -1 ks_sharded/-40: delete from music_extra where id = 1 and extra = 'abc' limit 10001 +1 ks_sharded/-40: delete from music_extra where id = 1 and extra = 'abc' limit 10001 /* VARCHAR */ 1 ks_sharded/-40: commit ---------------------------------------------------------------------- @@ -19,7 +19,7 @@ delete from user where id=1 1 ks_sharded/-40: select id, `name` from `user` where id = 1 limit 10001 for update 2 ks_sharded/40-80: begin 2 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 -3 ks_sharded/-40: delete from `user` where id = 1 limit 10001 +3 ks_sharded/-40: delete from `user` where id = 1 limit 10001 /* INT64 */ 4 ks_sharded/-40: commit 5 ks_sharded/40-80: commit @@ -32,7 +32,7 @@ delete from user where name='billy' 2 ks_sharded/-40: select id, `name` from `user` where `name` = 'billy' limit 10001 for update 3 ks_sharded/40-80: begin 3 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 -4 ks_sharded/-40: delete from `user` where `name` = 'billy' limit 10001 +4 ks_sharded/-40: delete from `user` where `name` = 'billy' limit 10001 /* VARCHAR */ 5 ks_sharded/c0-: commit 6 ks_sharded/-40: commit 7 ks_sharded/40-80: commit @@ -41,24 +41,24 @@ delete from user where name='billy' delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra='abc' 1 ks_sharded/-40: begin -1 ks_sharded/-40: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 +1 ks_sharded/-40: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 /* VARCHAR */ 1 ks_sharded/-40: commit 1 ks_sharded/40-80: begin -1 ks_sharded/40-80: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 +1 ks_sharded/40-80: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 /* VARCHAR */ 1 ks_sharded/40-80: commit 1 ks_sharded/80-c0: begin -1 ks_sharded/80-c0: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 +1 ks_sharded/80-c0: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 /* VARCHAR */ 1 ks_sharded/80-c0: commit 1 ks_sharded/c0-: begin -1 ks_sharded/c0-: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 +1 ks_sharded/c0-: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10001 /* VARCHAR */ 1 ks_sharded/c0-: commit ---------------------------------------------------------------------- delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from `ks_sharded[-]`.music_extra where extra='abc' LIMIT 10 -1 ks_sharded/-40: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 -1 ks_sharded/40-80: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 -1 ks_sharded/80-c0: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 -1 ks_sharded/c0-: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 +1 ks_sharded/-40: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 /* INT64 */ +1 ks_sharded/40-80: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 /* INT64 */ +1 ks_sharded/80-c0: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 /* INT64 */ +1 ks_sharded/c0-: delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from music_extra where extra = 'abc' limit 10 /* INT64 */ ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/gen4-output.txt b/go/vt/vtexplain/testdata/multi-output/gen4-output.txt index 826f51e12ae..e3e58379aa5 100644 --- a/go/vt/vtexplain/testdata/multi-output/gen4-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/gen4-output.txt @@ -1,12 +1,12 @@ ---------------------------------------------------------------------- select * from user_region where regionId = 4611686018427387904 and userId = 100 /* exact shard */ -1 ks_sharded/40-80: select * from user_region where regionId = 4611686018427387904 and userId = 100 limit 10001 /* exact shard */ +1 ks_sharded/40-80: select * from user_region where regionId = 4611686018427387904 and userId = 100 limit 10001 /* INT64 */ /* exact shard */ ---------------------------------------------------------------------- select * from user_region where regionId = 4611686018427387904 /* subshard */ -1 ks_sharded/40-80: select * from user_region where regionId = 4611686018427387904 limit 10001 /* subshard */ +1 ks_sharded/40-80: select * from user_region where regionId = 4611686018427387904 limit 10001 /* INT64 */ /* subshard */ ---------------------------------------------------------------------- select * from user_region where regionId in (4611686018427387903, 4611686018427387904) /* subshard */ @@ -17,9 +17,9 @@ select * from user_region where regionId in (4611686018427387903, 46116860184273 ---------------------------------------------------------------------- select * from user_region where userId = 100 /* scatter, needs prefix columns for subshard routing */ -1 ks_sharded/-40: select * from user_region where userId = 100 limit 10001 /* scatter, needs prefix columns for subshard routing */ -1 ks_sharded/40-80: select * from user_region where userId = 100 limit 10001 /* scatter, needs prefix columns for subshard routing */ -1 ks_sharded/80-c0: select * from user_region where userId = 100 limit 10001 /* scatter, needs prefix columns for subshard routing */ -1 ks_sharded/c0-: select * from user_region where userId = 100 limit 10001 /* scatter, needs prefix columns for subshard routing */ +1 ks_sharded/-40: select * from user_region where userId = 100 limit 10001 /* INT64 */ /* scatter, needs prefix columns for subshard routing */ +1 ks_sharded/40-80: select * from user_region where userId = 100 limit 10001 /* INT64 */ /* scatter, needs prefix columns for subshard routing */ +1 ks_sharded/80-c0: select * from user_region where userId = 100 limit 10001 /* INT64 */ /* scatter, needs prefix columns for subshard routing */ +1 ks_sharded/c0-: select * from user_region where userId = 100 limit 10001 /* INT64 */ /* scatter, needs prefix columns for subshard routing */ ---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt index 95e4823abce..9227ddd6797 100644 --- a/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/insertsharded-output.txt @@ -47,7 +47,7 @@ insert into user (id, name, nickname) values(2, 'bob', 'bobby') on duplicate key 1 ks_sharded/c0-: insert ignore into name_user_map(`name`, user_id) values ('bob', 2) 2 ks_sharded/c0-: select `name` from name_user_map where `name` = 'bob' and user_id = 2 limit 10001 3 ks_sharded/-40: begin -3 ks_sharded/-40: insert into `user`(id, `name`, nickname) values (2, 'bob', 'bobby') on duplicate key update nickname = 'bobby' +3 ks_sharded/-40: insert into `user`(id, `name`, nickname) values (2, 'bob', 'bobby') on duplicate key update nickname = 'bobby' /* VARCHAR */ 4 ks_sharded/c0-: commit 5 ks_sharded/-40: commit @@ -82,10 +82,10 @@ insert into member (lkp, more_id, id) values ("a", 1, 1), ("b", 1, 3), ("c", 1, 4 ks_sharded/40-80: select lkp from lkp_idx where lkp = 'c' and id = 1 limit 10001 5 ks_sharded/-40: begin 5 ks_sharded/-40: savepoint x1 -5 ks_sharded/-40: insert into `member`(lkp, more_id, id) values ('a', 1, 1), ('c', 1, 1) on duplicate key update more_id = 2 +5 ks_sharded/-40: insert into `member`(lkp, more_id, id) values ('a', 1, 1), ('c', 1, 1) on duplicate key update more_id = 2 /* INT64 */ 5 ks_sharded/40-80: begin 5 ks_sharded/40-80: savepoint x1 -5 ks_sharded/40-80: insert into `member`(lkp, more_id, id) values ('b', 1, 3) on duplicate key update more_id = 2 +5 ks_sharded/40-80: insert into `member`(lkp, more_id, id) values ('b', 1, 3) on duplicate key update more_id = 2 /* INT64 */ ---------------------------------------------------------------------- commit @@ -93,4 +93,4 @@ commit 6 ks_sharded/-40: commit 7 ks_sharded/40-80: commit ----------------------------------------------------------------------- \ No newline at end of file +---------------------------------------------------------------------- diff --git a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt index b7d0c359002..7ae20ca1a7f 100644 --- a/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/selectsharded-output.txt @@ -9,52 +9,52 @@ select * from user /* scatter */ ---------------------------------------------------------------------- select * from user where id = 1 /* equal unique */ -1 ks_sharded/-40: select * from `user` where id = 1 limit 10001 /* equal unique */ +1 ks_sharded/-40: select * from `user` where id = 1 limit 10001 /* INT64 */ /* equal unique */ ---------------------------------------------------------------------- select * from user where id > 100 /* scatter range */ -1 ks_sharded/-40: select * from `user` where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/40-80: select * from `user` where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/80-c0: select * from `user` where id > 100 limit 10001 /* scatter range */ -1 ks_sharded/c0-: select * from `user` where id > 100 limit 10001 /* scatter range */ +1 ks_sharded/-40: select * from `user` where id > 100 limit 10001 /* INT64 */ /* scatter range */ +1 ks_sharded/40-80: select * from `user` where id > 100 limit 10001 /* INT64 */ /* scatter range */ +1 ks_sharded/80-c0: select * from `user` where id > 100 limit 10001 /* INT64 */ /* scatter range */ +1 ks_sharded/c0-: select * from `user` where id > 100 limit 10001 /* INT64 */ /* scatter range */ ---------------------------------------------------------------------- select * from user where name = 'bob' /* vindex lookup */ 1 ks_sharded/c0-: select `name`, user_id from name_user_map where `name` in ('bob') limit 10001 /* vindex lookup */ -2 ks_sharded/-40: select * from `user` where `name` = 'bob' limit 10001 /* vindex lookup */ +2 ks_sharded/-40: select * from `user` where `name` = 'bob' limit 10001 /* VARCHAR */ /* vindex lookup */ ---------------------------------------------------------------------- select * from user where name = 'bob' or nickname = 'bob' /* vindex lookup */ -1 ks_sharded/-40: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/40-80: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/80-c0: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ -1 ks_sharded/c0-: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* vindex lookup */ +1 ks_sharded/-40: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* VARCHAR */ /* vindex lookup */ +1 ks_sharded/40-80: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* VARCHAR */ /* vindex lookup */ +1 ks_sharded/80-c0: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* VARCHAR */ /* vindex lookup */ +1 ks_sharded/c0-: select * from `user` where `name` = 'bob' or nickname = 'bob' limit 10001 /* VARCHAR */ /* vindex lookup */ ---------------------------------------------------------------------- select u.id, u.name, u.nickname, n.info from user u join name_info n on u.name = n.name /* join on varchar */ -1 ks_sharded/-40: select u.`name`, u.id, u.nickname from `user` as u limit 10001 /* join on varchar */ -1 ks_sharded/40-80: select u.`name`, u.id, u.nickname from `user` as u limit 10001 /* join on varchar */ -1 ks_sharded/80-c0: select u.`name`, u.id, u.nickname from `user` as u limit 10001 /* join on varchar */ -1 ks_sharded/c0-: select u.`name`, u.id, u.nickname from `user` as u limit 10001 /* join on varchar */ -2 ks_sharded/80-c0: select n.info from name_info as n where n.`name` = 'name_val_1' limit 10001 /* join on varchar */ -3 ks_sharded/80-c0: select n.info from name_info as n where n.`name` = 'name_val_1' limit 10001 /* join on varchar */ -4 ks_sharded/80-c0: select n.info from name_info as n where n.`name` = 'name_val_1' limit 10001 /* join on varchar */ -5 ks_sharded/80-c0: select n.info from name_info as n where n.`name` = 'name_val_1' limit 10001 /* join on varchar */ +1 ks_sharded/-40: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/40-80: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/80-c0: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +1 ks_sharded/c0-: select u.id, u.`name`, u.nickname from `user` as u limit 10001 /* join on varchar */ +2 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ +3 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ +4 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ +5 ks_sharded/40-80: select n.info from name_info as n where n.`name` = 'name_val_2' limit 10001 /* join on varchar */ ---------------------------------------------------------------------- select m.id, m.song, e.extra from music m join music_extra e on m.id = e.id where m.user_id = 100 /* join on int */ -1 ks_sharded/80-c0: select m.id, m.song from music as m where m.user_id = 100 limit 10001 /* join on int */ +1 ks_sharded/80-c0: select m.id, m.song from music as m where m.user_id = 100 limit 10001 /* INT64 */ /* join on int */ 2 ks_sharded/-40: select e.extra from music_extra as e where e.id = 1 limit 10001 /* join on int */ ---------------------------------------------------------------------- select count(*) from user where id = 1 /* point aggregate */ -1 ks_sharded/-40: select count(*) from `user` where id = 1 limit 10001 /* point aggregate */ +1 ks_sharded/-40: select count(*) from `user` where id = 1 limit 10001 /* INT64 */ /* point aggregate */ ---------------------------------------------------------------------- select count(*) from user where name in ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j') /* scatter aggregate */ @@ -137,68 +137,68 @@ select name from user where exists (select id from t1) /* non-correlated subquer ---------------------------------------------------------------------- select * from name_info order by info /* select * and order by varchar column */ -1 ks_sharded/-40: select `name`, info, weight_string(info) from name_info order by info asc limit 10001 /* select * and order by varchar column */ -1 ks_sharded/40-80: select `name`, info, weight_string(info) from name_info order by info asc limit 10001 /* select * and order by varchar column */ -1 ks_sharded/80-c0: select `name`, info, weight_string(info) from name_info order by info asc limit 10001 /* select * and order by varchar column */ -1 ks_sharded/c0-: select `name`, info, weight_string(info) from name_info order by info asc limit 10001 /* select * and order by varchar column */ +1 ks_sharded/-40: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */ +1 ks_sharded/40-80: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */ +1 ks_sharded/80-c0: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */ +1 ks_sharded/c0-: select `name`, info from name_info order by info asc limit 10001 /* select * and order by varchar column */ ---------------------------------------------------------------------- select distinct(name) from user where id = 1 /* select distinct */ -1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* select distinct */ +1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* INT64 */ /* select distinct */ ---------------------------------------------------------------------- select distinct name from user where id = 1 /* select distinct */ -1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* select distinct */ +1 ks_sharded/-40: select distinct `name` from `user` where id = 1 limit 10001 /* INT64 */ /* select distinct */ ---------------------------------------------------------------------- select id, substring(name, 1, -1) from user where id = 123 /* select substring */ -1 ks_sharded/-40: select id, substr(`name`, 1, -1) from `user` where id = 123 limit 10001 /* select substring */ +1 ks_sharded/-40: select id, substr(`name`, 1, -1) from `user` where id = 123 limit 10001 /* INT64 */ /* select substring */ ---------------------------------------------------------------------- select id, substring_index(name, '123456', -1) from user where id = 123 /* select substring_index */ -1 ks_sharded/-40: select id, substring_index(`name`, '123456', -1) from `user` where id = 123 limit 10001 /* select substring_index */ +1 ks_sharded/-40: select id, substring_index(`name`, '123456', -1) from `user` where id = 123 limit 10001 /* INT64 */ /* select substring_index */ ---------------------------------------------------------------------- select id, case when name = 'alice' then 'ALICE' when name = 'bob' then 'BOB' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' end as `name` from `user` where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' end as `name` from `user` where id = 1 limit 10001 /* INT64 */ /* select case */ ---------------------------------------------------------------------- select id, case when name = 'alice' then 'ALICE' when name = 'bob' then 'BOB' else 'OTHER' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when `name` = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* INT64 */ /* select case */ ---------------------------------------------------------------------- select id, case when substr(name, 1, 5) = 'alice' then 'ALICE' when name = 'bob' then 'BOB' else 'OTHER' end as name from user where id = 1 /* select case */ -1 ks_sharded/-40: select id, case when substr(`name`, 1, 5) = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* select case */ +1 ks_sharded/-40: select id, case when substr(`name`, 1, 5) = 'alice' then 'ALICE' when `name` = 'bob' then 'BOB' else 'OTHER' end as `name` from `user` where id = 1 limit 10001 /* INT64 */ /* select case */ ---------------------------------------------------------------------- select id, 'abc' as test from user where id = 1 union all select id, 'def' as test from user where id = 1 union all select id, 'ghi' as test from user where id = 1 /* union all */ -1 ks_sharded/-40: select id, 'abc' as test from `user` where id = 1 union all select id, 'def' as test from `user` where id = 1 union all select id, 'ghi' as test from `user` where id = 1 limit 10001 /* union all */ +1 ks_sharded/-40: select id, 'abc' as test from `user` where id = 1 union all select id, 'def' as test from `user` where id = 1 union all select id, 'ghi' as test from `user` where id = 1 limit 10001 /* INT64 */ /* union all */ ---------------------------------------------------------------------- select id from user where not id in (select col from music where music.user_id = 42) and id in (select col from music where music.user_id = 411) -1 ks_sharded/40-80: select col from music where music.user_id = 411 limit 10001 -2 ks_sharded/40-80: select col from music where music.user_id = 42 limit 10001 +1 ks_sharded/40-80: select col from music where music.user_id = 411 limit 10001 /* INT64 */ +2 ks_sharded/40-80: select col from music where music.user_id = 42 limit 10001 /* INT64 */ ---------------------------------------------------------------------- SELECT user.id, user.name, name_info.info FROM user INNER JOIN music ON (user.id = music.user_id) LEFT OUTER JOIN name_info ON (user.name = name_info.name) -1 ks_sharded/-40: select `user`.`name`, `user`.id from `user`, music where `user`.id = music.user_id limit 10001 -1 ks_sharded/40-80: select `user`.`name`, `user`.id from `user`, music where `user`.id = music.user_id limit 10001 -1 ks_sharded/80-c0: select `user`.`name`, `user`.id from `user`, music where `user`.id = music.user_id limit 10001 -1 ks_sharded/c0-: select `user`.`name`, `user`.id from `user`, music where `user`.id = music.user_id limit 10001 -2 ks_sharded/80-c0: select name_info.info from name_info where name_info.`name` = 'name_val_1' limit 10001 -3 ks_sharded/80-c0: select name_info.info from name_info where name_info.`name` = 'name_val_1' limit 10001 -4 ks_sharded/80-c0: select name_info.info from name_info where name_info.`name` = 'name_val_1' limit 10001 -5 ks_sharded/80-c0: select name_info.info from name_info where name_info.`name` = 'name_val_1' limit 10001 +1 ks_sharded/-40: select `user`.id, `user`.`name` from `user`, music where `user`.id = music.user_id limit 10001 +1 ks_sharded/40-80: select `user`.id, `user`.`name` from `user`, music where `user`.id = music.user_id limit 10001 +1 ks_sharded/80-c0: select `user`.id, `user`.`name` from `user`, music where `user`.id = music.user_id limit 10001 +1 ks_sharded/c0-: select `user`.id, `user`.`name` from `user`, music where `user`.id = music.user_id limit 10001 +2 ks_sharded/40-80: select name_info.info from name_info where name_info.`name` = 'name_val_2' limit 10001 +3 ks_sharded/40-80: select name_info.info from name_info where name_info.`name` = 'name_val_2' limit 10001 +4 ks_sharded/40-80: select name_info.info from name_info where name_info.`name` = 'name_val_2' limit 10001 +5 ks_sharded/40-80: select name_info.info from name_info where name_info.`name` = 'name_val_2' limit 10001 ---------------------------------------------------------------------- SELECT id FROM orders WHERE id IN (1, "1", 1) diff --git a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt index 0adc5661077..aab1ab0234f 100644 --- a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt @@ -17,27 +17,27 @@ insert into t1 (id,intval,floatval) values (1,2,3.14) update t1 set intval = 10 1 ks_unsharded/-: begin -1 ks_unsharded/-: update t1 set intval = 10 limit 10001 +1 ks_unsharded/-: update t1 set intval = 10 limit 10001 /* INT64 */ 1 ks_unsharded/-: commit ---------------------------------------------------------------------- update t1 set floatval = 9.99 1 ks_unsharded/-: begin -1 ks_unsharded/-: update t1 set floatval = 9.99 limit 10001 +1 ks_unsharded/-: update t1 set floatval = 9.99 limit 10001 /* DECIMAL */ 1 ks_unsharded/-: commit ---------------------------------------------------------------------- delete from t1 where id = 100 1 ks_unsharded/-: begin -1 ks_unsharded/-: delete from t1 where id = 100 limit 10001 +1 ks_unsharded/-: delete from t1 where id = 100 limit 10001 /* INT64 */ 1 ks_unsharded/-: commit ---------------------------------------------------------------------- insert into t1 (id,intval,floatval) values (1,2,3.14) on duplicate key update intval=3, floatval=3.14 -1 ks_unsharded/-: insert into t1(id, intval, floatval) values (1, 2, 3.14) on duplicate key update intval = 3, floatval = 3.14 +1 ks_unsharded/-: insert into t1(id, intval, floatval) values (1, 2, 3.14) on duplicate key update intval = 3, floatval = 3.14 /* DECIMAL */ ---------------------------------------------------------------------- select ID from t1 diff --git a/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt b/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt index 41723325eba..b5e055bd856 100644 --- a/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/updatesharded-output.txt @@ -2,7 +2,7 @@ update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 /* INT64 */ 1 ks_sharded/-40: commit ---------------------------------------------------------------------- @@ -11,7 +11,7 @@ update user set nickname='alice' where name='alice' 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: update `user` set nickname = 'alice' where `name` = 'alice' limit 10001 +2 ks_sharded/-40: update `user` set nickname = 'alice' where `name` = 'alice' limit 10001 /* VARCHAR */ 3 ks_sharded/40-80: commit 4 ks_sharded/-40: commit @@ -19,7 +19,7 @@ update user set nickname='alice' where name='alice' update user set pet='fido' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update `user` set pet = 'fido' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set pet = 'fido' where id = 1 limit 10001 /* INT64 */ 1 ks_sharded/-40: commit ---------------------------------------------------------------------- @@ -31,7 +31,7 @@ update user set name='alicia' where id=1 2 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 3 ks_sharded/c0-: begin 3 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('alicia', 1) -4 ks_sharded/-40: update `user` set `name` = 'alicia' where id = 1 limit 10001 +4 ks_sharded/-40: update `user` set `name` = 'alicia' where id = 1 limit 10001 /* INT64 */ 5 ks_sharded/-40: commit 6 ks_sharded/40-80: commit 7 ks_sharded/c0-: commit @@ -46,7 +46,7 @@ update user set name='alicia' where name='alice' 3 ks_sharded/40-80: delete from name_user_map where `name` = 'name_val_2' and user_id = 1 limit 10001 4 ks_sharded/c0-: begin 4 ks_sharded/c0-: insert into name_user_map(`name`, user_id) values ('alicia', 1) -5 ks_sharded/-40: update `user` set `name` = 'alicia' where `name` = 'alice' limit 10001 +5 ks_sharded/-40: update `user` set `name` = 'alicia' where `name` = 'alice' limit 10001 /* VARCHAR */ 6 ks_sharded/40-80: commit 7 ks_sharded/-40: commit 8 ks_sharded/c0-: commit @@ -55,16 +55,16 @@ update user set name='alicia' where name='alice' update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info='apa' where name != 'hog' 1 ks_sharded/-40: begin -1 ks_sharded/-40: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 +1 ks_sharded/-40: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 /* VARCHAR */ 1 ks_sharded/-40: commit 1 ks_sharded/40-80: begin -1 ks_sharded/40-80: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 +1 ks_sharded/40-80: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 /* VARCHAR */ 1 ks_sharded/40-80: commit 1 ks_sharded/80-c0: begin -1 ks_sharded/80-c0: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 +1 ks_sharded/80-c0: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 /* VARCHAR */ 1 ks_sharded/80-c0: commit 1 ks_sharded/c0-: begin -1 ks_sharded/c0-: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 +1 ks_sharded/c0-: update /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ name_info set info = 'apa' where `name` != 'hog' limit 10001 /* VARCHAR */ 1 ks_sharded/c0-: commit ---------------------------------------------------------------------- @@ -73,7 +73,7 @@ update user set pet='rover' where name='alice' 1 ks_sharded/40-80: begin 1 ks_sharded/40-80: select `name`, user_id from name_user_map where `name` in ('alice') limit 10001 for update 2 ks_sharded/-40: begin -2 ks_sharded/-40: update `user` set pet = 'rover' where `name` = 'alice' limit 10001 +2 ks_sharded/-40: update `user` set pet = 'rover' where `name` = 'alice' limit 10001 /* VARCHAR */ 3 ks_sharded/40-80: commit 4 ks_sharded/-40: commit @@ -85,12 +85,12 @@ begin update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 /* INT64 */ ---------------------------------------------------------------------- update user set nickname='bob' where id=1 -2 ks_sharded/-40: update `user` set nickname = 'bob' where id = 1 limit 10001 +2 ks_sharded/-40: update `user` set nickname = 'bob' where id = 1 limit 10001 /* INT64 */ ---------------------------------------------------------------------- commit @@ -105,13 +105,13 @@ begin update user set nickname='alice' where id=1 1 ks_sharded/-40: begin -1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 +1 ks_sharded/-40: update `user` set nickname = 'alice' where id = 1 limit 10001 /* INT64 */ ---------------------------------------------------------------------- update user set nickname='bob' where id=3 2 ks_sharded/40-80: begin -2 ks_sharded/40-80: update `user` set nickname = 'bob' where id = 3 limit 10001 +2 ks_sharded/40-80: update `user` set nickname = 'bob' where id = 3 limit 10001 /* INT64 */ ---------------------------------------------------------------------- commit diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index 74810dc618f..55e76606e08 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -21,6 +21,7 @@ package vtexplain import ( "bytes" + "context" "fmt" "sort" "strings" @@ -180,7 +181,7 @@ type TabletActions struct { } // Init sets up the fake execution environment -func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { +func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { // Verify options if opts.ReplicationMode != "ROW" && opts.ReplicationMode != "STATEMENT" { return nil, fmt.Errorf("invalid replication mode \"%s\"", opts.ReplicationMode) @@ -200,7 +201,7 @@ func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplai Autocommit: true, }} vte.setGlobalTabletEnv(tabletEnv) - err = vte.initVtgateExecutor(vSchemaStr, ksShardMapStr, opts) + err = vte.initVtgateExecutor(ctx, vSchemaStr, ksShardMapStr, opts) if err != nil { return nil, fmt.Errorf("initVtgateExecutor: %v", err.Error()) } @@ -210,10 +211,15 @@ func Init(vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplai // Stop and cleans up fake execution environment func (vte *VTExplain) Stop() { + if vte.vtgateExecutor != nil { + vte.vtgateExecutor.Close() + } + // Cleanup all created fake dbs. if vte.explainTopo != nil { for _, conn := range vte.explainTopo.TabletConns { conn.tsv.StopService() + conn.tsv.Close(context.Background()) } for _, conn := range vte.explainTopo.TabletConns { conn.db.Close() diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go index 8145c59b44d..54f1efbc522 100644 --- a/go/vt/vtexplain/vtexplain_test.go +++ b/go/vt/vtexplain/vtexplain_test.go @@ -17,6 +17,7 @@ limitations under the License. package vtexplain import ( + "context" "encoding/json" "fmt" "os" @@ -24,15 +25,15 @@ import ( "strings" "testing" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv/tabletenvtest" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/key" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv/tabletenvtest" ) func defaultTestOpts() *Options { @@ -48,7 +49,7 @@ type testopts struct { shardmap map[string]map[string]*topo.ShardInfo } -func initTest(mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { +func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { schema, err := os.ReadFile("testdata/test-schema.sql") require.NoError(t, err) @@ -64,7 +65,7 @@ func initTest(mode string, opts *Options, topts *testopts, t *testing.T) *VTExpl } opts.ExecutionMode = mode - vte, err := Init(string(vSchema), string(schema), shardmap, opts) + vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts) require.NoError(t, err, "vtexplain Init error\n%s", string(schema)) return vte } @@ -85,7 +86,10 @@ func testExplain(testcase string, opts *Options, t *testing.T) { func runTestCase(testcase, mode string, opts *Options, topts *testopts, t *testing.T) { t.Run(testcase, func(t *testing.T) { - vte := initTest(mode, opts, topts, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, mode, opts, topts, t) + defer vte.Stop() sqlFile := fmt.Sprintf("testdata/%s-queries.sql", testcase) sql, err := os.ReadFile(sqlFile) @@ -141,28 +145,6 @@ func TestExplain(t *testing.T) { } tests := []test{ {"unsharded", defaultTestOpts()}, - {"selectsharded", defaultTestOpts()}, - {"insertsharded", defaultTestOpts()}, - {"updatesharded", defaultTestOpts()}, - {"deletesharded", defaultTestOpts()}, - {"comments", defaultTestOpts()}, - {"options", &Options{ - ReplicationMode: "STATEMENT", - NumShards: 4, - Normalize: false, - }}, - {"target", &Options{ - ReplicationMode: "ROW", - NumShards: 4, - Normalize: false, - Target: "ks_sharded/40-80", - }}, - {"gen4", &Options{ - ReplicationMode: "ROW", - NumShards: 4, - Normalize: true, - PlannerVersion: querypb.ExecuteOptions_Gen4, - }}, } for _, tst := range tests { @@ -171,7 +153,10 @@ func TestExplain(t *testing.T) { } func TestErrors(t *testing.T) { - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() tests := []struct { SQL string @@ -208,7 +193,10 @@ func TestErrors(t *testing.T) { } func TestJSONOutput(t *testing.T) { - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx := utils.LeakCheckContext(t) + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() sql := "select 1 from user where id = 1" explains, err := vte.Run(sql) require.NoError(t, err, "vtexplain error") @@ -261,7 +249,7 @@ func TestJSONOutput(t *testing.T) { "ks_sharded/-40": { "MysqlQueries": [ { - "SQL": "select 1 from ` + "`user`" + ` where id = 1 limit 10001", + "SQL": "select 1 from ` + "`user`" + ` where id = 1 limit 10001 /* INT64 */", "Time": 1 } ], @@ -271,7 +259,7 @@ func TestJSONOutput(t *testing.T) { "#maxLimit": "10001", "vtg1": "1" }, - "SQL": "select :vtg1 from ` + "`user`" + ` where id = :vtg1", + "SQL": "select :vtg1 /* INT64 */ from ` + "`user`" + ` where id = :vtg1 /* INT64 */", "Time": 1 } ] @@ -344,6 +332,9 @@ func TestUsingKeyspaceShardMap(t *testing.T) { } func TestInit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vschema := `{ "ks1": { "sharded": true, @@ -353,7 +344,7 @@ func TestInit(t *testing.T) { } }` schema := "create table table_missing_primary_vindex (id int primary key)" - _, err := Init(vschema, schema, "", defaultTestOpts()) + _, err := Init(ctx, vschema, schema, "", defaultTestOpts()) require.Error(t, err) require.Contains(t, err.Error(), "missing primary col vindex") } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 676e9757266..bbeb99e0e36 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -25,10 +25,10 @@ import ( "sort" "strings" + "vitess.io/vitess/go/cache/theine" "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -50,14 +50,14 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) -func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts *Options) error { +func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShardMapStr string, opts *Options) error { vte.explainTopo = &ExplainTopo{NumShards: opts.NumShards} - vte.explainTopo.TopoServer = memorytopo.NewServer(vtexplainCell) + vte.explainTopo.TopoServer = memorytopo.NewServer(ctx, vtexplainCell) vte.healthCheck = discovery.NewFakeHealthCheck(nil) - resolver := vte.newFakeResolver(opts, vte.explainTopo, vtexplainCell) + resolver := vte.newFakeResolver(ctx, opts, vte.explainTopo, vtexplainCell) - err := vte.buildTopology(opts, vSchemaStr, ksShardMapStr, opts.NumShards) + err := vte.buildTopology(ctx, opts, vSchemaStr, ksShardMapStr, opts.NumShards) if err != nil { return err } @@ -73,18 +73,17 @@ func (vte *VTExplain) initVtgateExecutor(vSchemaStr, ksShardMapStr string, opts streamSize := 10 var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests - vte.vtgateExecutor = vtgate.NewExecutor(context.Background(), vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, cache.DefaultConfig, schemaTracker, false, opts.PlannerVersion) - queryLogBufferSize := 10 - vtgate.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) + plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion) + vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil } -func (vte *VTExplain) newFakeResolver(opts *Options, serv srvtopo.Server, cell string) *vtgate.Resolver { - ctx := context.Background() +func (vte *VTExplain) newFakeResolver(ctx context.Context, opts *Options, serv srvtopo.Server, cell string) *vtgate.Resolver { gw := vtgate.NewTabletGateway(ctx, vte.healthCheck, serv, cell) - _ = gw.WaitForTablets([]topodatapb.TabletType{topodatapb.TabletType_REPLICA}) + _ = gw.WaitForTablets(ctx, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) txMode := vtgatepb.TransactionMode_MULTI if opts.ExecutionMode == ModeTwoPC { @@ -96,7 +95,7 @@ func (vte *VTExplain) newFakeResolver(opts *Options, serv srvtopo.Server, cell s return vtgate.NewResolver(srvResolver, serv, cell, sc) } -func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int) error { +func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int) error { vte.explainTopo.Lock.Lock() defer vte.explainTopo.Lock.Unlock() @@ -144,7 +143,7 @@ func (vte *VTExplain) buildTopology(opts *Options, vschemaStr string, ksShardMap log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) tablet := vte.healthCheck.AddFakeTablet(vtexplainCell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { - return vte.newTablet(opts, t) + return vte.newTablet(ctx, opts, t) }) vte.explainTopo.TabletConns[hostname] = tablet.(*explainTablet) vte.explainTopo.KeyspaceShards[ks][shard.Name] = shard @@ -209,29 +208,27 @@ func (vte *VTExplain) vtgateExecute(sql string) ([]*engine.Plan, map[string]*Tab // This will ensure that the commit/rollback order is predictable. vte.sortShardSession() - // use the plan cache to get the set of plans used for this query, then - // clear afterwards for the next run - planCache := vte.vtgateExecutor.Plans() - - _, err := vte.vtgateExecutor.Execute(context.Background(), "VtexplainExecute", vtgate.NewSafeSession(vte.vtgateSession), sql, nil) + _, err := vte.vtgateExecutor.Execute(context.Background(), nil, "VtexplainExecute", vtgate.NewSafeSession(vte.vtgateSession), sql, nil) if err != nil { for _, tc := range vte.explainTopo.TabletConns { tc.tabletQueries = nil tc.mysqlQueries = nil } - planCache.Clear() - + vte.vtgateExecutor.ClearPlans() return nil, nil, vterrors.Wrapf(err, "vtexplain execute error in '%s'", sql) } var plans []*engine.Plan - planCache.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + + // use the plan cache to get the set of plans used for this query, then + // clear afterwards for the next run + vte.vtgateExecutor.ForEachPlan(func(plan *engine.Plan) bool { plan.ExecTime = 0 plans = append(plans, plan) return true }) - planCache.Clear() + + vte.vtgateExecutor.ClearPlans() tabletActions := make(map[string]*TabletActions) for shard, tc := range vte.explainTopo.TabletConns { diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index a1bef6547fc..f902eca8b07 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -102,7 +102,7 @@ type explainTablet struct { var _ queryservice.QueryService = (*explainTablet)(nil) -func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTablet { +func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet) *explainTablet { db := fakesqldb.New(nil) sidecardb.AddSchemaInitQueries(db, true) @@ -117,7 +117,7 @@ func (vte *VTExplain) newTablet(opts *Options, t *topodatapb.Tablet) *explainTab config.EnableTableGC = false // XXX much of this is cloned from the tabletserver tests - tsv := tabletserver.NewTabletServer(topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(""), t.Alias) + tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias) tablet := explainTablet{db: db, tsv: tsv, vte: vte} db.Handler = &tablet @@ -285,7 +285,9 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet schemaQueries := map[string]*sqltypes.Result{ "select unix_timestamp()": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Int64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG), }}, Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1427325875)}, @@ -293,95 +295,111 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet }, "select @@global.sql_mode": { Fields: []*querypb.Field{{ - Type: sqltypes.VarChar, + Type: sqltypes.VarChar, + Charset: uint32(collations.SystemCollation.Collation), }}, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarBinary("STRICT_TRANS_TABLES")}, + {sqltypes.NewVarChar("STRICT_TRANS_TABLES")}, }, }, "select @@session.sql_mode as sql_mode": { Fields: []*querypb.Field{{ - Name: "sql_mode", - Type: sqltypes.VarChar, + Name: "sql_mode", + Type: sqltypes.VarChar, + Charset: uint32(collations.SystemCollation.Collation), }}, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarBinary("STRICT_TRANS_TABLES")}, + {sqltypes.NewVarChar("STRICT_TRANS_TABLES")}, }, }, "select @@autocommit": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Int64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), }}, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarBinary("1")}, + {sqltypes.NewInt64(1)}, }, }, "select @@sql_auto_is_null": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Int64, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_NUM_FLAG), }}, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarBinary("0")}, + {sqltypes.NewInt64(0)}, }, }, "set @@session.sql_log_bin = 0": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "create database if not exists `_vt`": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "drop table if exists `_vt`.redo_log_transaction": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "drop table if exists `_vt`.redo_log_statement": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "drop table if exists `_vt`.transaction": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "drop table if exists `_vt`.participant": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "create table if not exists `_vt`.redo_state(\n dtid varbinary(512),\n state bigint,\n time_created bigint,\n primary key(dtid)\n\t) engine=InnoDB": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "create table if not exists `_vt`.redo_statement(\n dtid varbinary(512),\n id bigint,\n statement mediumblob,\n primary key(dtid, id)\n\t) engine=InnoDB": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "create table if not exists `_vt`.dt_state(\n dtid varbinary(512),\n state bigint,\n time_created bigint,\n primary key(dtid)\n\t) engine=InnoDB": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, "create table if not exists `_vt`.dt_participant(\n dtid varbinary(512),\n\tid bigint,\n\tkeyspace varchar(256),\n\tshard varchar(256),\n primary key(dtid, id)\n\t) engine=InnoDB": { Fields: []*querypb.Field{{ - Type: sqltypes.Uint64, + Type: sqltypes.Uint64, + Charset: collations.CollationBinaryID, }}, Rows: [][]sqltypes.Value{}, }, @@ -459,15 +477,17 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet var colTypes []*querypb.Field var colValues [][]sqltypes.Value colType := &querypb.Field{ - Name: "column_type", - Type: sqltypes.VarChar, + Name: "column_type", + Type: sqltypes.VarChar, + Charset: uint32(collations.Default()), } colTypes = append(colTypes, colType) for _, col := range ddl.GetTableSpec().Columns { colName := strings.ToLower(col.Name.String()) rowType := &querypb.Field{ - Name: colName, - Type: col.Type.SQLType(), + Name: colName, + Type: col.Type.SQLType(), + Charset: uint32(collations.SystemCollation.Collation), } rowTypes = append(rowTypes, rowType) tEnv.tableColumns[table][colName] = col.Type.SQLType() @@ -496,6 +516,30 @@ func (t *explainTablet) HandleQuery(c *mysql.Conn, query string, callback func(* t.mu.Lock() defer t.mu.Unlock() + // If query is part of rejected list then return error right away. + if err := t.db.GetRejectedQueryResult(query); err != nil { + return err + } + + // If query is expected to have a specific result then return the result. + if result := t.db.GetQueryResult(query); result != nil { + if f := result.BeforeFunc; f != nil { + f() + } + return callback(result.Result) + } + + // return result if query is part of defined pattern. + if userCallback, expResult, ok, err := t.db.GetQueryPatternResult(query); ok { + if userCallback != nil { + userCallback(query) + } + if err != nil { + return err + } + return callback(expResult.Result) + } + if !strings.Contains(query, "1 != 1") { t.mysqlQueries = append(t.mysqlQueries, &MysqlQuery{ Time: t.currentTime, @@ -506,13 +550,10 @@ func (t *explainTablet) HandleQuery(c *mysql.Conn, query string, callback func(* // return the pre-computed results for any schema introspection queries tEnv := t.vte.getGlobalTabletEnv() result := tEnv.getResult(query) - emptyResult := &sqltypes.Result{} - if sidecardb.MatchesInitQuery(query) { - return callback(emptyResult) - } if result != nil { return callback(result) } + switch sqlparser.Preview(query) { case sqlparser.StmtSelect: var err error @@ -605,9 +646,12 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { rows := make([][]sqltypes.Value, 0, rowCount) for i, col := range colNames { colType := colTypes[i] + cs := collations.DefaultCollationForType(colType) fields[i] = &querypb.Field{ - Name: col, - Type: colType, + Name: col, + Type: colType, + Charset: uint32(cs), + Flags: mysql.FlagsForColumn(colType, cs), } } @@ -676,13 +720,13 @@ func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap m if !ok { continue } - value, err := evalengine.LiteralToValue(lit) + value, err := sqlparser.LiteralToValue(lit) if err != nil { return "", nil, 0, nil, err } // Cast the value in the tuple to the expected value of the column - castedValue, err := evalengine.Cast(value, colType) + castedValue, err := sqltypes.Cast(value, colType) if err != nil { return "", nil, 0, nil, err } diff --git a/go/vt/vtexplain/vtexplain_vttablet_test.go b/go/vt/vtexplain/vtexplain_vttablet_test.go index 8fd28d07adf..614ad186224 100644 --- a/go/vt/vtexplain/vtexplain_vttablet_test.go +++ b/go/vt/vtexplain/vtexplain_vttablet_test.go @@ -17,6 +17,7 @@ limitations under the License. package vtexplain import ( + "context" "encoding/json" "testing" @@ -67,7 +68,9 @@ create table t2 ( NumShards: 2, } - vte, err := Init(testVSchema, testSchema, "", opts) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vte, err := Init(ctx, testVSchema, testSchema, "", opts) require.NoError(t, err) defer vte.Stop() @@ -119,12 +122,16 @@ create table test_partitioned ( if err != nil { t.Fatalf("parseSchema: %v", err) } - vte := initTest(ModeMulti, defaultTestOpts(), &testopts{}, t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + defer vte.Stop() tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts()) vte.setGlobalTabletEnv(tabletEnv) - tablet := vte.newTablet(defaultTestOpts(), &topodatapb.Tablet{ + tablet := vte.newTablet(ctx, defaultTestOpts(), &topodatapb.Tablet{ Keyspace: "test_keyspace", Shard: "-80", Alias: &topodatapb.TabletAlias{}, @@ -135,7 +142,7 @@ create table test_partitioned ( t1 := tables["t1"] require.NotNil(t, t1, "table t1 wasn't parsed properly") - wantCols := `[{"name":"id","type":778},{"name":"val","type":6165}]` + wantCols := `[{"name":"id","type":778,"charset":33,"flags":32800},{"name":"val","type":6165,"charset":33}]` got, _ := json.Marshal(t1.Fields) assert.Equal(t, wantCols, string(got)) @@ -143,14 +150,14 @@ create table test_partitioned ( t.Errorf("expected HasPrimary && t1.PKColumns == [0] got %v", t1.PKColumns) } pkCol := t1.GetPKColumn(0) - if pkCol == nil || pkCol.String() != `name:"id" type:UINT64` { + if pkCol == nil || pkCol.String() != `name:"id" type:UINT64 charset:33 flags:32800` { t.Errorf("expected pkCol[0] == id, got %v", pkCol) } t2 := tables["t2"] require.NotNil(t, t2, "table t2 wasn't parsed properly") - wantCols = `[{"name":"val","type":6163}]` + wantCols = `[{"name":"val","type":6163,"charset":33}]` got, _ = json.Marshal(t2.Fields) assert.Equal(t, wantCols, string(got)) diff --git a/go/vt/vtgate/api.go b/go/vt/vtgate/api.go index d4a6fb240d7..d4d7d143b21 100644 --- a/go/vt/vtgate/api.go +++ b/go/vt/vtgate/api.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" ) // This file implements a REST-style API for the vtgate web interface. @@ -41,7 +42,7 @@ func httpErrorf(w http.ResponseWriter, r *http.Request, format string, args ...a } func handleAPI(apiPath string, handlerFunc func(w http.ResponseWriter, r *http.Request) error) { - http.HandleFunc(apiPrefix+apiPath, func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc(apiPrefix+apiPath, func(w http.ResponseWriter, r *http.Request) { defer func() { if x := recover(); x != nil { httpErrorf(w, r, "uncaught panic: %v", x) diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go index 94f0ef9b4d3..0d55bbf2875 100644 --- a/go/vt/vtgate/autocommit_test.go +++ b/go/vt/vtgate/autocommit_test.go @@ -35,7 +35,7 @@ import ( // TestAutocommitUpdateSharded: instant-commit. func TestAutocommitUpdateSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "update user set a=2 where id = 1") require.NoError(t, err) @@ -52,7 +52,7 @@ func TestAutocommitUpdateSharded(t *testing.T) { // TestAutocommitUpdateLookup: transaction: select before update. func TestAutocommitUpdateLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "2|1", @@ -81,7 +81,7 @@ func TestAutocommitUpdateLookup(t *testing.T) { // TestAutocommitUpdateVindexChange: transaction: select & update before final update. func TestAutocommitUpdateVindexChange(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, _ := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("id|name|lastname|name_lastname_keyspace_id_map", "int64|int32|varchar|int64"), "1|1|foo|0", @@ -120,7 +120,7 @@ func TestAutocommitUpdateVindexChange(t *testing.T) { // TestAutocommitDeleteSharded: instant-commit. func TestAutocommitDeleteSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id = 1") require.NoError(t, err) @@ -137,7 +137,7 @@ func TestAutocommitDeleteSharded(t *testing.T) { // TestAutocommitDeleteLookup: transaction: select before update. func TestAutocommitDeleteLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("id|name|lastname", "int64|int32|varchar"), "1|1|foo", @@ -179,7 +179,7 @@ func TestAutocommitDeleteLookup(t *testing.T) { // TestAutocommitDeleteIn: instant-commit. func TestAutocommitDeleteIn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id in (1, 2)") require.NoError(t, err) @@ -196,7 +196,7 @@ func TestAutocommitDeleteIn(t *testing.T) { // TestAutocommitDeleteMultiShard: instant-commit. func TestAutocommitDeleteMultiShard(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete from user_extra where user_id = user_id + 1") require.NoError(t, err) @@ -216,7 +216,7 @@ func TestAutocommitDeleteMultiShard(t *testing.T) { // TestAutocommitDeleteMultiShardAutoCommit: instant-commit. func TestAutocommitDeleteMultiShardAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "delete /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ from user_extra where user_id = user_id + 1") require.NoError(t, err) @@ -236,7 +236,7 @@ func TestAutocommitDeleteMultiShardAutoCommit(t *testing.T) { // TestAutocommitInsertSharded: instant-commit. func TestAutocommitInsertSharded(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user_extra(user_id, v) values (1, 2)") require.NoError(t, err) @@ -255,7 +255,7 @@ func TestAutocommitInsertSharded(t *testing.T) { // TestAutocommitInsertLookup: transaction: select before update. func TestAutocommitInsertLookup(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')") require.NoError(t, err) @@ -282,51 +282,55 @@ func TestAutocommitInsertLookup(t *testing.T) { // TestAutocommitInsertShardAutoCommit: instant-commit. func TestAutocommitInsertMultishardAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - - _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") - require.NoError(t, err) - - assertQueries(t, sbc1, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_0, 2)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc1", sbc1, 0) - - assertQueries(t, sbc2, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc2", sbc2, 0) - - executor, sbc1, sbc2, _ = createExecutorEnv() - // Make the first shard fail - the second completes anyway - sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") - require.Error(t, err) - require.Contains(t, err.Error(), "INVALID_ARGUMENT", "expected invalid argument error") - - testCommitCount(t, "sbc1", sbc1, 0) - - assertQueries(t, sbc2, []*querypb.BoundQuery{{ - Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", - BindVariables: map[string]*querypb.BindVariable{ - "_user_id_0": sqltypes.Int64BindVariable(1), - "_user_id_1": sqltypes.Int64BindVariable(3), - }, - }}) - testCommitCount(t, "sbc2", sbc2, 0) + t.Run("1", func(t *testing.T) { + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") + require.NoError(t, err) + + assertQueries(t, sbc1, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_0, 2)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_0": sqltypes.Int64BindVariable(1), + "_user_id_1": sqltypes.Int64BindVariable(3), + }, + }}) + testCommitCount(t, "sbc1", sbc1, 0) + + assertQueries(t, sbc2, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_0": sqltypes.Int64BindVariable(1), + "_user_id_1": sqltypes.Int64BindVariable(3), + }, + }}) + testCommitCount(t, "sbc2", sbc2, 0) + }) + t.Run("2", func(t *testing.T) { + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + // Make the first shard fail - the second completes anyway + sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 + _, err := autocommitExec(executor, "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (1, 2), (3, 4)") + require.Error(t, err) + require.Contains(t, err.Error(), "INVALID_ARGUMENT", "expected invalid argument error") + + testCommitCount(t, "sbc1", sbc1, 0) + + assertQueries(t, sbc2, []*querypb.BoundQuery{{ + Sql: "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into user_extra(user_id, v) values (:_user_id_1, 4)", + BindVariables: map[string]*querypb.BindVariable{ + "_user_id_0": sqltypes.Int64BindVariable(1), + "_user_id_1": sqltypes.Int64BindVariable(3), + }, + }}) + testCommitCount(t, "sbc2", sbc2, 0) + }) } func TestAutocommitInsertMultishard(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into user_extra(user_id, v) values (1, 2), (3, 4)") require.NoError(t, err) @@ -352,13 +356,13 @@ func TestAutocommitInsertMultishard(t *testing.T) { // TestAutocommitInsertAutoinc: instant-commit: sequence fetch is not transactional. func TestAutocommitInsertAutoinc(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) _, err := autocommitExec(executor, "insert into main1(id, name) values (null, 'myname')") require.NoError(t, err) assertQueries(t, sbclookup, []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(1)}, }, { Sql: "insert into main1(id, `name`) values (:__seq0, 'myname')", @@ -371,7 +375,7 @@ func TestAutocommitInsertAutoinc(t *testing.T) { // TestAutocommitTransactionStarted: no instant-commit. func TestAutocommitTransactionStarted(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "@primary", @@ -382,7 +386,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // single shard query - no savepoint needed sql := "update `user` set a = 2 where id = 1" - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.Len(t, sbc1.Queries, 1) require.Equal(t, sql, sbc1.Queries[0].Sql) @@ -393,7 +397,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // multi shard query - savepoint needed sql = "update `user` set a = 2 where id in (1, 4)" - _, err = executor.Execute(context.Background(), "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.Len(t, sbc1.Queries, 2) require.Contains(t, sbc1.Queries[0].Sql, "savepoint") @@ -403,7 +407,7 @@ func TestAutocommitTransactionStarted(t *testing.T) { // TestAutocommitDirectTarget: instant-commit. func TestAutocommitDirectTarget(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "TestUnsharded/0@primary", @@ -412,7 +416,7 @@ func TestAutocommitDirectTarget(t *testing.T) { } sql := "insert into `simple`(val) values ('val')" - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbclookup, []*querypb.BoundQuery{{ @@ -424,7 +428,7 @@ func TestAutocommitDirectTarget(t *testing.T) { // TestAutocommitDirectRangeTarget: no instant-commit. func TestAutocommitDirectRangeTarget(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := &vtgatepb.Session{ TargetString: "TestExecutor[-]@primary", @@ -433,7 +437,7 @@ func TestAutocommitDirectRangeTarget(t *testing.T) { } sql := "delete from sharded_user_msgs limit 1000" - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbc1, []*querypb.BoundQuery{{ @@ -450,5 +454,5 @@ func autocommitExec(executor *Executor, sql string) (*sqltypes.Result, error) { TransactionMode: vtgatepb.TransactionMode_MULTI, } - return executor.Execute(context.Background(), "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + return executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) } diff --git a/go/vt/vtgate/bench_test.go b/go/vt/vtgate/bench_test.go index d33f8a9abe1..5c64c7e3473 100644 --- a/go/vt/vtgate/bench_test.go +++ b/go/vt/vtgate/bench_test.go @@ -21,9 +21,6 @@ import ( "fmt" "testing" - "context" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) @@ -61,16 +58,12 @@ func init() { } func BenchmarkWithNormalizer(b *testing.B) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - saved := rpcVTGate.executor.normalize - rpcVTGate.executor.normalize = true - defer func() { rpcVTGate.executor.normalize = saved }() + vtgateInst, _, ctx := createVtgateEnv(b) for i := 0; i < b.N; i++ { - _, _, err := rpcVTGate.Execute( - context.Background(), + _, _, err := vtgateInst.Execute( + ctx, + nil, &vtgatepb.Session{ TargetString: "@primary", Options: executeOptions, @@ -85,16 +78,14 @@ func BenchmarkWithNormalizer(b *testing.B) { } func BenchmarkWithoutNormalizer(b *testing.B) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - saved := rpcVTGate.executor.normalize - rpcVTGate.executor.normalize = false - defer func() { rpcVTGate.executor.normalize = saved }() + vtgateInst, _, ctx := createVtgateEnv(b) + + vtgateInst.executor.normalize = false for i := 0; i < b.N; i++ { - _, _, err := rpcVTGate.Execute( - context.Background(), + _, _, err := vtgateInst.Execute( + ctx, + nil, &vtgatepb.Session{ TargetString: "@primary", Options: executeOptions, diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index f9618b6e0c7..622bb03b082 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -28,14 +28,13 @@ package buffer import ( "context" - "fmt" + "strings" "sync" "golang.org/x/sync/semaphore" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -67,12 +66,61 @@ const ( // currently retried. type RetryDoneFunc context.CancelFunc +const ( + ClusterEventReshardingInProgress = "current keyspace is being resharded" + ClusterEventReparentInProgress = "primary is not serving, there may be a reparent operation in progress" + ClusterEventMoveTables = "disallowed due to rule" +) + +var ClusterEvents []string + +func init() { + ClusterEvents = []string{ + ClusterEventReshardingInProgress, + ClusterEventReparentInProgress, + ClusterEventMoveTables, + } +} + // CausedByFailover returns true if "err" was supposedly caused by a failover. // To simplify things, we've merged the detection for different MySQL flavors // in one function. Supported flavors: MariaDB, MySQL func CausedByFailover(err error) bool { log.V(2).Infof("Checking error (type: %T) if it is caused by a failover. err: %v", err, err) - return vterrors.Code(err) == vtrpcpb.Code_CLUSTER_EVENT + reason, isFailover := isFailoverError(err) + if isFailover { + log.Infof("CausedByFailover signalling failover for reason: %s", reason) + } + return isFailover +} + +// for debugging purposes +func getReason(err error) string { + for _, ce := range ClusterEvents { + if strings.Contains(err.Error(), ce) { + return ce + } + } + return "" +} + +// isFailoverError looks at the error returned by the sql query execution to check if there is a cluster event +// (caused by resharding or reparenting) or a denied tables error seen during switch writes in MoveTables +func isFailoverError(err error) (string, bool) { + var reason string + var isFailover bool + switch vterrors.Code(err) { + case vtrpcpb.Code_CLUSTER_EVENT: + isFailover = true + case vtrpcpb.Code_FAILED_PRECONDITION: + if strings.Contains(err.Error(), ClusterEventMoveTables) { + isFailover = true + } + } + if isFailover { + reason = getReason(err) + } + return reason, isFailover } // Buffer is used to track ongoing PRIMARY tablet failovers and buffer @@ -140,36 +188,14 @@ func (b *Buffer) WaitForFailoverEnd(ctx context.Context, keyspace, shard string, requestsSkipped.Add([]string{keyspace, shard, skippedDisabled}, 1) return nil, nil } - return sb.waitForFailoverEnd(ctx, keyspace, shard, err) } -// ProcessPrimaryHealth notifies the buffer to record a new primary -// and end any failover buffering that may be in progress -func (b *Buffer) ProcessPrimaryHealth(th *discovery.TabletHealth) { - if th.Target.TabletType != topodatapb.TabletType_PRIMARY { - panic(fmt.Sprintf("BUG: non-PRIMARY TabletHealth object must not be forwarded: %#v", th)) - } - timestamp := th.PrimaryTermStartTime - if timestamp == 0 { - // Primarys where TabletExternallyReparented was never called will return 0. - // Ignore them. - return - } - - sb := b.getOrCreateBuffer(th.Target.Keyspace, th.Target.Shard) - if sb == nil { - // Buffer is shut down. Ignore all calls. - return - } - sb.recordExternallyReparentedTimestamp(timestamp, th.Tablet.Alias) -} - func (b *Buffer) HandleKeyspaceEvent(ksevent *discovery.KeyspaceEvent) { for _, shard := range ksevent.Shards { sb := b.getOrCreateBuffer(shard.Target.Keyspace, shard.Target.Shard) if sb != nil { - sb.recordKeyspaceEvent(shard.Tablet, shard.Serving) + sb.recordKeyspaceEvent(shard.Tablet, shard.Serving, ksevent) } } } diff --git a/go/vt/vtgate/buffer/buffer_helper_test.go b/go/vt/vtgate/buffer/buffer_helper_test.go index a6b7605d4da..2deb460fc39 100644 --- a/go/vt/vtgate/buffer/buffer_helper_test.go +++ b/go/vt/vtgate/buffer/buffer_helper_test.go @@ -20,17 +20,6 @@ type failover func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard strin func testAllImplementations(t *testing.T, runTest func(t *testing.T, fail failover)) { t.Helper() - t.Run("HealthCheck", func(t *testing.T) { - t.Helper() - runTest(t, func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard string, now time.Time) { - buf.ProcessPrimaryHealth(&discovery.TabletHealth{ - Tablet: tablet, - Target: &query.Target{Keyspace: keyspace, Shard: shard, TabletType: topodatapb.TabletType_PRIMARY}, - PrimaryTermStartTime: now.Unix(), - }) - }) - }) - t.Run("KeyspaceEvent", func(t *testing.T) { t.Helper() runTest(t, func(buf *Buffer, tablet *topodatapb.Tablet, keyspace, shard string, now time.Time) { diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index 742a5d5d412..a17cc09ccc3 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -70,9 +70,6 @@ func verifyFlags() error { if bufferSize < 1 { return fmt.Errorf("--buffer_size must be >= 1 (specified value: %d)", bufferSize) } - if bufferMinTimeBetweenFailovers < bufferMaxFailoverDuration*time.Duration(2) { - return fmt.Errorf("--buffer_min_time_between_failovers should be at least twice the length of --buffer_max_failover_duration: %v vs. %v", bufferMinTimeBetweenFailovers, bufferMaxFailoverDuration) - } if bufferDrainConcurrency < 1 { return fmt.Errorf("--buffer_drain_concurrency must be >= 1 (specified value: %d)", bufferDrainConcurrency) @@ -165,6 +162,16 @@ func NewDefaultConfig() *Config { } } +// EnableBuffering is used in tests where we require the keyspace event watcher to be created +func EnableBuffering() { + bufferEnabled = true +} + +// DisableBuffering is the counterpart of EnableBuffering +func DisableBuffering() { + bufferEnabled = false +} + func NewConfigFromFlags() *Config { if err := verifyFlags(); err != nil { log.Fatalf("Invalid buffer configuration: %v", err) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index 1b829cb3ddd..ae33aabb399 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/vtgate/errorsanitizer" "vitess.io/vitess/go/vt/log" @@ -93,14 +95,6 @@ type shardBuffer struct { state bufferState // queue is the list of buffered requests (ordered by arrival). queue []*entry - // externallyReparented is the maximum value of all seen - // "StreamHealthResponse.TabletexternallyReparentedTimestamp" values across - // all PRIMARY tablets of this shard. - // In practice, it is a) the last time the shard was reparented or b) the last - // time the TabletExternallyReparented RPC was called on the tablet to confirm - // that the tablet is the current PRIMARY. - // We assume the value is a Unix timestamp in seconds. - externallyReparented int64 // lastStart is the last time we saw the start of a failover. lastStart time.Time // lastEnd is the last time we saw the end of a failover. @@ -476,11 +470,12 @@ func (sb *shardBuffer) remove(toRemove *entry) { // Entry was already removed. Keep the queue as it is. } -func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillServing bool) { +func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillServing bool, keyspaceEvent *discovery.KeyspaceEvent) { sb.mu.Lock() defer sb.mu.Unlock() - log.Infof("disruption in shard %s/%s resolved (serving: %v)", sb.keyspace, sb.shard, stillServing) + log.Infof("disruption in shard %s/%s resolved (serving: %v), movetable state %#v", + sb.keyspace, sb.shard, stillServing, keyspaceEvent.MoveTablesState) if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { if sb.currentPrimary != nil { @@ -488,42 +483,26 @@ func (sb *shardBuffer) recordKeyspaceEvent(alias *topodatapb.TabletAlias, stillS } sb.currentPrimary = alias } - if stillServing { - sb.stopBufferingLocked(stopFailoverEndDetected, "a primary promotion has been detected") - } else { - sb.stopBufferingLocked(stopShardMissing, "the keyspace has been resharded") - } -} - -func (sb *shardBuffer) recordExternallyReparentedTimestamp(timestamp int64, alias *topodatapb.TabletAlias) { - // Fast path (read lock): Check if new timestamp is higher. - sb.mu.RLock() - if timestamp <= sb.externallyReparented { - // Do nothing. Equal values are reported if the primary has not changed. - // Smaller values can be reported during the failover by the old primary - // after the new primary already took over. - sb.mu.RUnlock() - return - } - sb.mu.RUnlock() + var reason stopReason + var msg string - // New timestamp is higher. Stop buffering if running. - sb.mu.Lock() - defer sb.mu.Unlock() - - // Re-check value after acquiring write lock. - if timestamp <= sb.externallyReparented { - return + // heuristically determine the reason why vtgate is currently buffering + moveTablesSwitched := false + if keyspaceEvent.MoveTablesState.State == discovery.MoveTablesSwitched { + moveTablesSwitched = true } - - sb.externallyReparented = timestamp - if !topoproto.TabletAliasEqual(alias, sb.currentPrimary) { - if sb.currentPrimary != nil { - sb.lastReparent = sb.timeNow() - } - sb.currentPrimary = alias + switch { + case moveTablesSwitched: + reason = stopMoveTablesSwitchingTraffic + msg = stopMoveTablesSwitchingTrafficMessage + case stillServing: + reason = stopFailoverEndDetected + msg = stopFailoverEndDetectedMessage + default: + reason = stopShardMissing + msg = stopShardMissingMessage } - sb.stopBufferingLocked(stopFailoverEndDetected, "failover end detected") + sb.stopBufferingLocked(reason, msg) } func (sb *shardBuffer) stopBufferingDueToMaxDuration() { @@ -569,7 +548,8 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) { if sb.mode == bufferModeDryRun { msg = "Dry-run: Would have stopped buffering" } - log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) + log.Infof("%v for shard: %s after: %.1f seconds due to: %v. Draining %d buffered requests now.", + msg, topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d.Seconds(), details, len(q)) var clientEntryError error if reason == stopShardMissing { diff --git a/go/vt/vtgate/buffer/variables.go b/go/vt/vtgate/buffer/variables.go index b4b036b0775..af99cb52220 100644 --- a/go/vt/vtgate/buffer/variables.go +++ b/go/vt/vtgate/buffer/variables.go @@ -112,6 +112,11 @@ const ( stopFailoverEndDetected stopReason = "NewPrimarySeen" stopMaxFailoverDurationExceeded stopReason = "MaxDurationExceeded" stopShutdown stopReason = "Shutdown" + stopMoveTablesSwitchingTraffic stopReason = "MoveTablesSwitchedTraffic" + + stopMoveTablesSwitchingTrafficMessage = "MoveTables has switched writes" + stopFailoverEndDetectedMessage = "a primary promotion has been detected" + stopShardMissingMessage = "the keyspace has been resharded" ) // evictedReason is used in "requestsEvicted" as "Reason" label. diff --git a/go/vt/vtgate/endtoend/last_insert_id_test.go b/go/vt/vtgate/endtoend/last_insert_id_test.go index 9a467c089b8..e3fbcdaa2dd 100644 --- a/go/vt/vtgate/endtoend/last_insert_id_test.go +++ b/go/vt/vtgate/endtoend/last_insert_id_test.go @@ -21,15 +21,17 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/utils" ) func TestLastInsertId(t *testing.T) { + require.NoError(t, + utils.WaitForAuthoritative(t, "ks", "t1_last_insert_id", cluster.VTProcess().ReadVSchema)) + ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -37,7 +39,7 @@ func TestLastInsertId(t *testing.T) { // figure out the last inserted id before we run change anything qr := exec(t, conn, "select max(id) from t1_last_insert_id") - oldLastID, err := evalengine.ToUint64(qr.Rows[0][0]) + oldLastID, err := qr.Rows[0][0].ToCastUint64() require.NoError(t, err) exec(t, conn, "insert into t1_last_insert_id(id1) values(42)") @@ -53,6 +55,9 @@ func TestLastInsertId(t *testing.T) { } func TestLastInsertIdWithRollback(t *testing.T) { + require.NoError(t, + utils.WaitForAuthoritative(t, "ks", "t1_last_insert_id", cluster.VTProcess().ReadVSchema)) + ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) @@ -60,7 +65,7 @@ func TestLastInsertIdWithRollback(t *testing.T) { // figure out the last inserted id before we run our tests qr := exec(t, conn, "select max(id) from t1_last_insert_id") - oldLastID, err := evalengine.ToUint64(qr.Rows[0][0]) + oldLastID, err := qr.Rows[0][0].ToCastUint64() require.NoError(t, err) // add row inside explicit transaction diff --git a/go/vt/vtgate/endtoend/lookup_test.go b/go/vt/vtgate/endtoend/lookup_test.go index 01fc3aee32d..d69bec8c0c6 100644 --- a/go/vt/vtgate/endtoend/lookup_test.go +++ b/go/vt/vtgate/endtoend/lookup_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/sqlerror" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -61,8 +63,8 @@ func TestConsistentLookup(t *testing.T) { _, err = conn.ExecuteFetch("insert into t1(id1, id2) values(1, 4)", 1000, false) exec(t, conn, "rollback") require.Error(t, err) - mysqlErr := err.(*mysql.SQLError) - assert.Equal(t, mysql.ERDupEntry, mysqlErr.Num) + mysqlErr := err.(*sqlerror.SQLError) + assert.Equal(t, sqlerror.ERDupEntry, mysqlErr.Num) assert.Equal(t, "23000", mysqlErr.State) // Simple delete. diff --git a/go/vt/vtgate/endtoend/main_test.go b/go/vt/vtgate/endtoend/main_test.go index 48872965cb9..08aae25420e 100644 --- a/go/vt/vtgate/endtoend/main_test.go +++ b/go/vt/vtgate/endtoend/main_test.go @@ -18,6 +18,7 @@ package endtoend import ( "context" + _ "embed" "fmt" "os" "testing" @@ -25,10 +26,9 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vttest" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" vttestpb "vitess.io/vitess/go/vt/proto/vttest" + "vitess.io/vitess/go/vt/vttest" ) var ( @@ -37,76 +37,8 @@ var ( mysqlParams mysql.ConnParams grpcAddress string - schema = ` -create table t1( - id1 bigint, - id2 bigint, - primary key(id1) -) Engine=InnoDB; - -create table t1_copy_basic( - id1 bigint, - id2 bigint, - primary key(id1) -) Engine=InnoDB; - -create table t1_copy_resume( - id1 bigint, - id2 bigint, - primary key(id1) -) Engine=InnoDB; - -create table t1_id2_idx( - id2 bigint, - keyspace_id varbinary(10), - primary key(id2) -) Engine=InnoDB; - -create table vstream_test( - id bigint, - val bigint, - primary key(id) -) Engine=InnoDB; - -create table aggr_test( - id bigint, - val1 varchar(16), - val2 bigint, - primary key(id) -) Engine=InnoDB; - -create table t2( - id3 bigint, - id4 bigint, - primary key(id3) -) Engine=InnoDB; - -create table t2_id4_idx( - id bigint not null auto_increment, - id4 bigint, - id3 bigint, - primary key(id), - key idx_id4(id4) -) Engine=InnoDB; - -create table t1_last_insert_id( - id bigint not null auto_increment, - id1 bigint, - primary key(id) -) Engine=InnoDB; - -create table t1_row_count( - id bigint not null, - id1 bigint, - primary key(id) -) Engine=InnoDB; - -create table t1_sharded( - id1 bigint, - id2 bigint, - primary key(id1) -) Engine=InnoDB; -` + //go:embed schema.sql + Schema string vschema = &vschemapb.Keyspace{ Sharded: true, @@ -150,6 +82,12 @@ create table t1_sharded( Name: "hash", }}, }, + "t1_copy_all": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id1", + Name: "hash", + }}, + }, "t1_copy_resume": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Column: "id1", @@ -217,6 +155,31 @@ create table t1_sharded( }, }, } + + schema2 = ` +create table t1_copy_all_ks2( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; +` + + vschema2 = &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1_copy_all_ks2": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id1", + Name: "hash", + }}, + }, + }, + } ) func TestMain(m *testing.M) { @@ -225,21 +188,36 @@ func TestMain(m *testing.M) { exitCode := func() int { var cfg vttest.Config cfg.Topology = &vttestpb.VTTestTopology{ - Keyspaces: []*vttestpb.Keyspace{{ - Name: "ks", - Shards: []*vttestpb.Shard{{ - Name: "-80", - }, { - Name: "80-", - }}, - }}, + Keyspaces: []*vttestpb.Keyspace{ + { + Name: "ks", + Shards: []*vttestpb.Shard{{ + Name: "-80", + }, { + Name: "80-", + }}, + }, + { + Name: "ks2", + Shards: []*vttestpb.Shard{{ + Name: "-80", + }, { + Name: "80-", + }}, + }, + }, } - if err := cfg.InitSchemas("ks", schema, vschema); err != nil { + if err := cfg.InitSchemas("ks", Schema, vschema); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.RemoveAll(cfg.SchemaDir) return 1 } defer os.RemoveAll(cfg.SchemaDir) + if err := cfg.InitSchemas("ks2", schema2, vschema2); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.RemoveAll(cfg.SchemaDir) + return 1 + } cluster = &vttest.LocalCluster{ Config: cfg, diff --git a/go/vt/vtgate/endtoend/misc_test.go b/go/vt/vtgate/endtoend/misc_test.go index 138b68d0aa3..aeeb1c122db 100644 --- a/go/vt/vtgate/endtoend/misc_test.go +++ b/go/vt/vtgate/endtoend/misc_test.go @@ -19,6 +19,7 @@ package endtoend import ( "context" "fmt" + osExec "os/exec" "testing" "github.com/stretchr/testify/assert" @@ -55,6 +56,16 @@ func TestCreateAndDropDatabase(t *testing.T) { require.NoError(t, err) defer conn.Close() + // cleanup the keyspace from the topology. + defer func() { + // the corresponding database needs to be created in advance. + // a subsequent DeleteKeyspace command returns the error of 'node doesn't exist' without it. + _ = exec(t, conn, "create database testitest") + + _, err := osExec.Command("vtctldclient", "--server", grpcAddress, "DeleteKeyspace", "--recursive", "--force", "testitest").CombinedOutput() + require.NoError(t, err) + }() + // run it 3 times. for count := 0; count < 3; count++ { t.Run(fmt.Sprintf("exec:%d", count), func(t *testing.T) { diff --git a/go/vt/vtgate/endtoend/row_count_test.go b/go/vt/vtgate/endtoend/row_count_test.go index 9ac200b33fa..5a29f6177a9 100644 --- a/go/vt/vtgate/endtoend/row_count_test.go +++ b/go/vt/vtgate/endtoend/row_count_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/utils" ) func TestRowCount(t *testing.T) { @@ -31,6 +32,7 @@ func TestRowCount(t *testing.T) { conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) defer conn.Close() + utils.Exec(t, conn, "use ks") type tc struct { query string expected int diff --git a/go/vt/vtgate/endtoend/schema.sql b/go/vt/vtgate/endtoend/schema.sql new file mode 100644 index 00000000000..5fb1f52224f --- /dev/null +++ b/go/vt/vtgate/endtoend/schema.sql @@ -0,0 +1,74 @@ +create table t1( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; + +create table t1_copy_basic( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; + +create table t1_copy_all( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; + +create table t1_copy_resume( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; + +create table t1_id2_idx( + id2 bigint, + keyspace_id varbinary(10), + primary key(id2) +) Engine=InnoDB; + +create table vstream_test( + id bigint, + val bigint, + primary key(id) +) Engine=InnoDB; + +create table aggr_test( + id bigint, + val1 varchar(16), + val2 bigint, + primary key(id) +) Engine=InnoDB; + +create table t2( + id3 bigint, + id4 bigint, + primary key(id3) +) Engine=InnoDB; + +create table t2_id4_idx( + id bigint not null auto_increment, + id4 bigint, + id3 bigint, + primary key(id), + key idx_id4(id4) +) Engine=InnoDB; + +create table t1_last_insert_id( + id bigint not null auto_increment, + id1 bigint, + primary key(id) +) Engine=InnoDB; + +create table t1_row_count( + id bigint not null, + id1 bigint, + primary key(id) +) Engine=InnoDB; + +create table t1_sharded( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB; diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index 832799366b1..42dd6e3d2a3 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -20,23 +20,24 @@ import ( "context" "fmt" "io" + "regexp" "sort" "sync" "testing" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/vt/proto/query" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) func initialize(ctx context.Context, t *testing.T) (*vtgateconn.VTGateConn, *mysql.Conn, *mysql.Conn, func()) { @@ -149,11 +150,12 @@ func TestVStream(t *testing.T) { Keyspace: "ks", Shard: "-80", RowChanges: []*binlogdatapb.RowChange{{ - After: &query.Row{ + After: &querypb.Row{ Lengths: []int64{1, 1}, Values: []byte("11"), }, }}, + Flags: 1, // foreign_key_checks are enabled by default. } gotRows := events[2].RowEvent if !proto.Equal(gotRows, wantRows) { @@ -175,7 +177,7 @@ func TestVStreamCopyBasic(t *testing.T) { } lastPK := sqltypes.Result{ - Fields: []*query.Field{{Name: "id1", Type: query.Type_INT32}}, + Fields: []*querypb.Field{{Name: "id1", Type: querypb.Type_INT32}}, Rows: [][]sqltypes.Value{{sqltypes.NewInt32(4)}}, } qr := sqltypes.ResultToProto3(&lastPK) @@ -234,12 +236,7 @@ func TestVStreamCopyBasic(t *testing.T) { printEvents(evs) // for debugging ci failures if len(evs) == numExpectedEvents { - // The arrival order of COPY_COMPLETED events with keyspace/shard is not constant. - // On the other hand, the last event should always be a fully COPY_COMPLETED event. - // That's why the sort.Slice doesn't have to handle the last element in completedEvs. - sort.Slice(completedEvs[:len(completedEvs)-1], func(i, j int) bool { - return completedEvs[i].GetShard() < completedEvs[j].GetShard() - }) + sortCopyCompletedEvents(completedEvs) for i, ev := range completedEvs { require.Regexp(t, expectedCompletedEvents[i], ev.String()) } @@ -258,6 +255,139 @@ func TestVStreamCopyBasic(t *testing.T) { } } +// TestVStreamCopyUnspecifiedShardGtid tests the case where the keyspace contains wildcards and/or the shard is not specified in the request. +// Verify that the Vstream API resolves the unspecified ShardGtid input to a list of all the matching keyspaces and all the shards in the topology. +// - If the keyspace contains wildcards and the shard is not specified, the copy operation should be performed on all shards of all matching keyspaces. +// - If the keyspace is specified and the shard is not specified, the copy operation should be performed on all shards of the specified keyspace. +func TestVStreamCopyUnspecifiedShardGtid(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + conn, err := mysql.Connect(ctx, &vtParams) + if err != nil { + require.NoError(t, err) + } + defer conn.Close() + + _, err = conn.ExecuteFetch("insert into t1_copy_all(id1,id2) values(1,1), (2,2), (3,3), (4,4), (5,5), (6,6), (7,7), (8,8)", 1, false) + if err != nil { + require.NoError(t, err) + } + + _, err = conn.ExecuteFetch("insert into t1_copy_all_ks2(id1,id2) values(10,10), (20,20)", 1, false) + if err != nil { + require.NoError(t, err) + } + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/t1_copy_all.*/", + }}, + } + flags := &vtgatepb.VStreamFlags{} + + // We have 2 shards in each keyspace. We assume the rows are + // evenly split across each shard. For each INSERT statement, which + // is a transaction and gets a global transaction identifier or GTID, we + // have 1 each of the following events: + // begin, field, position, lastpk, commit (5) + // For each row created in the INSERT statement -- 8 on ks1 and + // 2 on ks2 -- we have 1 row event between the begin and commit. + // When we have copied all rows for a table in the shard, the shard + // also gets events marking the transition from the copy phase to + // the streaming phase for that table with 1 each of the following: + // begin, vgtid, commit (3) + // As the copy phase completes for all tables on the shard, the shard + // gets 1 copy phase completed event. + // Lastly the stream has 1 final event to mark the final end to all + // copy phase operations in the vstream. + expectedKs1EventNum := 2 /* num shards */ * (9 /* begin/field/vgtid:pos/4 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */ + 1 /* copy operation completed */) + expectedKs2EventNum := 2 /* num shards */ * (6 /* begin/field/vgtid:pos/1 rowevents avg/vgitd: lastpk/commit) */ + 3 /* begin/vgtid/commit for completed table */ + 1 /* copy operation completed */) + expectedFullyCopyCompletedNum := 1 + + cases := []struct { + name string + shardGtid *binlogdatapb.ShardGtid + expectedEventNum int + expectedCompletedEvents []string + }{ + { + name: "copy from all keyspaces", + shardGtid: &binlogdatapb.ShardGtid{ + Keyspace: "/.*", + }, + expectedEventNum: expectedKs1EventNum + expectedKs2EventNum + expectedFullyCopyCompletedNum, + expectedCompletedEvents: []string{ + `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`, + `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`, + `type:COPY_COMPLETED keyspace:"ks2" shard:"-80"`, + `type:COPY_COMPLETED keyspace:"ks2" shard:"80-"`, + `type:COPY_COMPLETED`, + }, + }, + { + name: "copy from all shards in one keyspace", + shardGtid: &binlogdatapb.ShardGtid{ + Keyspace: "ks", + }, + expectedEventNum: expectedKs1EventNum + expectedFullyCopyCompletedNum, + expectedCompletedEvents: []string{ + `type:COPY_COMPLETED keyspace:"ks" shard:"-80"`, + `type:COPY_COMPLETED keyspace:"ks" shard:"80-"`, + `type:COPY_COMPLETED`, + }, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + gconn, conn, mconn, closeConnections := initialize(ctx, t) + defer closeConnections() + + var vgtid = &binlogdatapb.VGtid{} + vgtid.ShardGtids = []*binlogdatapb.ShardGtid{c.shardGtid} + reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags) + _, _ = conn, mconn + if err != nil { + require.NoError(t, err) + } + require.NotNil(t, reader) + var evs []*binlogdatapb.VEvent + var completedEvs []*binlogdatapb.VEvent + for { + e, err := reader.Recv() + switch err { + case nil: + evs = append(evs, e...) + + for _, ev := range e { + if ev.Type == binlogdatapb.VEventType_COPY_COMPLETED { + completedEvs = append(completedEvs, ev) + } + } + + if len(evs) == c.expectedEventNum { + sortCopyCompletedEvents(completedEvs) + for i, ev := range completedEvs { + require.Equal(t, c.expectedCompletedEvents[i], ev.String()) + } + t.Logf("TestVStreamCopyUnspecifiedShardGtid was successful") + return + } else if c.expectedEventNum < len(evs) { + printEvents(evs) // for debugging ci failures + require.FailNow(t, fmt.Sprintf("len(events)=%d are not expected\n", len(evs))) + } + case io.EOF: + log.Infof("stream ended\n") + cancel() + default: + log.Errorf("Returned err %v", err) + require.FailNow(t, "remote error: %v\n", err) + } + } + }) + } +} + func TestVStreamCopyResume(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -275,7 +405,7 @@ func TestVStreamCopyResume(t *testing.T) { // lastPK is id1=4, meaning we should only copy rows for id1 IN(5,6,7,8,9) lastPK := sqltypes.Result{ - Fields: []*query.Field{{Name: "id1", Type: query.Type_INT64}}, + Fields: []*querypb.Field{{Name: "id1", Type: querypb.Type_INT64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_BINARY_FLAG)}}, Rows: [][]sqltypes.Value{{sqltypes.NewInt64(4)}}, } tableLastPK := []*binlogdatapb.TableLastPK{{ @@ -337,13 +467,18 @@ func TestVStreamCopyResume(t *testing.T) { `type:ROW row_event:{table_name:"ks.t1_copy_resume" row_changes:{after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} keyspace:"ks" shard:"-80"`, `type:ROW timestamp:[0-9]+ row_event:{table_name:"ks.t1_copy_resume" row_changes:{before:{lengths:1 lengths:1 values:"99"} after:{lengths:1 lengths:2 values:"990"}} keyspace:"ks" shard:"-80"} current_time:[0-9]+ keyspace:"ks" shard:"-80"`, } + redash80 := regexp.MustCompile(`(?i)type:VGTID vgtid:{shard_gtids:{keyspace:"ks" shard:"-80" gtid:".+" table_p_ks:{table_name:"t1_copy_resume" lastpk:{fields:{name:"id1" type:INT64 charset:63 flags:[0-9]+} rows:{lengths:1 values:"[0-9]"}}}} shard_gtids:{keyspace:"ks" shard:"80-" gtid:".+"}} keyspace:"ks" shard:"(-80|80-)"`) + re80dash := regexp.MustCompile(`(?i)type:VGTID vgtid:{shard_gtids:{keyspace:"ks" shard:"-80" gtid:".+"} shard_gtids:{keyspace:"ks" shard:"80-" gtid:".+" table_p_ks:{table_name:"t1_copy_resume" lastpk:{fields:{name:"id1" type:INT64 charset:63 flags:[0-9]+} rows:{lengths:1 values:"[0-9]"}}}}} keyspace:"ks" shard:"(-80|80-)"`) + both := regexp.MustCompile(`(?i)type:VGTID vgtid:{shard_gtids:{keyspace:"ks" shard:"-80" gtid:".+" table_p_ks:{table_name:"t1_copy_resume" lastpk:{fields:{name:"id1" type:INT64 charset:63 flags:[0-9]+} rows:{lengths:1 values:"[0-9]"}}}} shard_gtids:{keyspace:"ks" shard:"80-" gtid:".+" table_p_ks:{table_name:"t1_copy_resume" lastpk:{fields:{name:"id1" type:INT64 charset:63 flags:[0-9]+} rows:{lengths:1 values:"[0-9]"}}}}} keyspace:"ks" shard:"(-80|80-)"`) var evs []*binlogdatapb.VEvent + for { e, err := reader.Recv() switch err { case nil: for _, ev := range e { if ev.Type == binlogdatapb.VEventType_ROW { + ev.RowEvent.Flags = 0 // null Flags, so we don't have to define flags in every wanted row event. evs = append(evs, ev) if ev.Timestamp == 0 { rowCopyEvents++ @@ -352,6 +487,19 @@ func TestVStreamCopyResume(t *testing.T) { } printEvents(evs) // for debugging ci failures } + if ev.Type == binlogdatapb.VEventType_VGTID { + // Validate that the vgtid event the client receives from the vstream copy + // has a complete TableLastPK proto message. + // Also, to ensure that the client can resume properly, make sure that + // the Fields value is present in the sqltypes.Result field and not missing. + // It's not guaranteed that BOTH shards have streamed a row yet as the order + // of events in the stream is non-determinstic. So we check to be sure that + // at least one shard has copied rows and thus has a full TableLastPK proto + // message. + eventStr := ev.String() + require.True(t, redash80.MatchString(eventStr) || re80dash.MatchString(eventStr) || both.MatchString(eventStr), + "VGTID event does not have a complete TableLastPK proto message for either shard; event: %s", eventStr) + } } if expectedCatchupEvents == replCatchupEvents && expectedRowCopyEvents == rowCopyEvents { sort.Sort(VEventSorter(evs)) @@ -469,9 +617,9 @@ func TestVStreamSharded(t *testing.T) { received bool } expectedEvents := []*expectedEvent{ - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"-80"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"-80"}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"11"}} keyspace:"ks" shard:"-80"}`, false}, - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768} keyspace:"ks" shard:"80-"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"80-"}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"44"}} keyspace:"ks" shard:"80-"}`, false}, } for { @@ -496,7 +644,7 @@ func TestVStreamSharded(t *testing.T) { for _, ev := range evs { s := fmt.Sprintf("%v", ev) for _, expectedEv := range expectedEvents { - if expectedEv.ev == s { + if removeAnyDeprecatedDisplayWidths(expectedEv.ev) == removeAnyDeprecatedDisplayWidths(s) { expectedEv.received = true break } @@ -520,6 +668,136 @@ func TestVStreamSharded(t *testing.T) { } +// TestVStreamCopyTransactions tests that we are properly wrapping +// ROW events in the stream with BEGIN and COMMIT events. +func TestVStreamCopyTransactions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + keyspace := "ks" + shards := []string{"-80", "80-"} + table := "t1_copy_basic" + beginEventSeen, commitEventSeen := false, false + numResultInTrx := 0 + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: keyspace, + Shard: shards[0], + Gtid: "", // Start a vstream copy + }, + { + Keyspace: keyspace, + Shard: shards[1], + Gtid: "", // Start a vstream copy + }, + }, + } + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: table, + Filter: fmt.Sprintf("select * from %s", table), + }}, + } + + gconn, conn, _, closeConnections := initialize(ctx, t) + defer closeConnections() + + // Clear any existing data. + q := fmt.Sprintf("delete from %s", table) + _, err := conn.ExecuteFetch(q, -1, false) + require.NoError(t, err, "error clearing data: %v", err) + + // Generate some test data. Enough to cross the default + // vstream_packet_size threshold. + for i := 1; i <= 100000; i++ { + values := fmt.Sprintf("(%d, %d)", i, i) + q := fmt.Sprintf("insert into %s (id1, id2) values %s", table, values) + _, err := conn.ExecuteFetch(q, 1, false) + require.NoError(t, err, "error inserting data: %v", err) + } + + // Start a vstream. + reader, err := gconn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, nil) + require.NoError(t, err, "error starting vstream: %v", err) + +recvLoop: + for { + vevents, err := reader.Recv() + numResultInTrx++ + eventCount := len(vevents) + t.Logf("------------------ Received %d events in response #%d for the transaction ------------------\n", + eventCount, numResultInTrx) + switch err { + case nil: + for _, event := range vevents { + switch event.Type { + case binlogdatapb.VEventType_BEGIN: + require.False(t, beginEventSeen, "received a second BEGIN event within the transaction: numResultInTrx=%d\n", + numResultInTrx) + beginEventSeen = true + t.Logf("Found BEGIN event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d\n", + beginEventSeen, commitEventSeen, event.Type, numResultInTrx) + require.False(t, commitEventSeen, "received a BEGIN event when expecting a COMMIT event: numResultInTrx=%d\n", + numResultInTrx) + case binlogdatapb.VEventType_VGTID: + t.Logf("Found VGTID event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n", + beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event) + case binlogdatapb.VEventType_FIELD: + t.Logf("Found FIELD event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n", + beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event) + case binlogdatapb.VEventType_ROW: + // Uncomment if you need to do more debugging. + // t.Logf("Found ROW event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n", + // beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event) + case binlogdatapb.VEventType_COMMIT: + commitEventSeen = true + t.Logf("Found COMMIT event, beginEventSeen=%t, commitEventSeen=%t, eventType=%v, numResultInTrx=%d, event=%+v\n", + beginEventSeen, commitEventSeen, event.Type, numResultInTrx, event) + require.True(t, beginEventSeen, "received COMMIT event before receiving BEGIN event: numResultInTrx=%d\n", + numResultInTrx) + case binlogdatapb.VEventType_COPY_COMPLETED: + t.Logf("Finished vstream copy\n") + t.Logf("-------------------------------------------------------------------\n\n") + cancel() + break recvLoop + default: + t.Logf("Found extraneous event: %+v\n", event) + } + if beginEventSeen && commitEventSeen { + t.Logf("Received both BEGIN and COMMIT, so resetting transactional state\n") + beginEventSeen = false + commitEventSeen = false + numResultInTrx = 0 + } + } + case io.EOF: + t.Logf("vstream ended\n") + t.Logf("-------------------------------------------------------------------\n\n") + cancel() + return + default: + require.FailNowf(t, "unexpected error", "encountered error in vstream: %v", err) + return + } + } + // The last response, when the vstream copy completes, does not + // typically contain ROW events. + if beginEventSeen || commitEventSeen { + require.True(t, (beginEventSeen && commitEventSeen), "did not receive both BEGIN and COMMIT events in the final ROW event set") + } +} + +func removeAnyDeprecatedDisplayWidths(orig string) string { + var adjusted string + baseIntType := "int" + intRE := regexp.MustCompile(`(?i)int\(([0-9]*)?\)`) + adjusted = intRE.ReplaceAllString(orig, baseIntType) + baseYearType := "year" + yearRE := regexp.MustCompile(`(?i)year\(([0-9]*)?\)`) + adjusted = yearRE.ReplaceAllString(adjusted, baseYearType) + return adjusted +} + var printMu sync.Mutex func printEvents(evs []*binlogdatapb.VEvent) { @@ -563,3 +841,19 @@ func (v VEventSorter) Less(i, j int) bool { } return valI < valJ } + +// The arrival order of COPY_COMPLETED events with keyspace/shard is not constant. +// On the other hand, the last event should always be a fully COPY_COMPLETED event. +// That's why the sort.Slice doesn't have to handle the last element in completedEvs. +func sortCopyCompletedEvents(completedEvs []*binlogdatapb.VEvent) { + sortVEventByKeyspaceAndShard(completedEvs[:len(completedEvs)-1]) +} + +func sortVEventByKeyspaceAndShard(evs []*binlogdatapb.VEvent) { + sort.Slice(evs, func(i, j int) bool { + if evs[i].Keyspace == evs[j].Keyspace { + return evs[i].Shard < evs[j].Shard + } + return evs[i].Keyspace < evs[j].Keyspace + }) +} diff --git a/go/vt/vtgate/engine/aggregations.go b/go/vt/vtgate/engine/aggregations.go new file mode 100644 index 00000000000..8037dda37a9 --- /dev/null +++ b/go/vt/vtgate/engine/aggregations.go @@ -0,0 +1,446 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "fmt" + "strconv" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/evalengine" +) + +// AggregateParams specify the parameters for each aggregation. +// It contains the opcode and input column number. +type AggregateParams struct { + Opcode AggregateOpcode + Col int + + // These are used only for distinct opcodes. + KeyCol int + WCol int + Type sqltypes.Type + CollationID collations.ID + + Alias string `json:",omitempty"` + Expr sqlparser.Expr + Original *sqlparser.AliasedExpr + + // This is based on the function passed in the select expression and + // not what we use to aggregate at the engine primitive level. + OrigOpcode AggregateOpcode +} + +func NewAggregateParam(opcode AggregateOpcode, col int, alias string) *AggregateParams { + out := &AggregateParams{ + Opcode: opcode, + Col: col, + Alias: alias, + WCol: -1, + Type: sqltypes.Unknown, + } + if opcode.NeedsComparableValues() { + out.KeyCol = col + } + return out +} + +func (ap *AggregateParams) WAssigned() bool { + return ap.WCol >= 0 +} + +func (ap *AggregateParams) String() string { + keyCol := strconv.Itoa(ap.Col) + if ap.WAssigned() { + keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) + } + if sqltypes.IsText(ap.Type) && ap.CollationID != collations.Unknown { + keyCol += " COLLATE " + collations.Local().LookupName(ap.CollationID) + } + dispOrigOp := "" + if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { + dispOrigOp = "_" + ap.OrigOpcode.String() + } + if ap.Alias != "" { + return fmt.Sprintf("%s%s(%s) AS %s", ap.Opcode.String(), dispOrigOp, keyCol, ap.Alias) + } + return fmt.Sprintf("%s%s(%s)", ap.Opcode.String(), dispOrigOp, keyCol) +} + +func (ap *AggregateParams) typ(inputType querypb.Type) querypb.Type { + if ap.OrigOpcode != AggregateUnassigned { + return ap.OrigOpcode.Type(inputType) + } + return ap.Opcode.Type(inputType) +} + +type aggregator interface { + add(row []sqltypes.Value) error + finish() sqltypes.Value + reset() +} + +type aggregatorDistinct struct { + column int + last sqltypes.Value + coll collations.ID +} + +func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { + if a.column >= 0 { + if !a.last.IsNull() { + cmp, err := evalengine.NullsafeCompare(a.last, row[a.column], a.coll) + if err != nil { + return true, err + } + if cmp == 0 { + return true, nil + } + } + a.last = row[a.column] + } + return false, nil +} + +func (a *aggregatorDistinct) reset() { + a.last = sqltypes.NULL +} + +type aggregatorCount struct { + from int + n int64 + distinct aggregatorDistinct +} + +func (a *aggregatorCount) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if ret, err := a.distinct.shouldReturn(row); ret { + return err + } + a.n++ + return nil +} + +func (a *aggregatorCount) finish() sqltypes.Value { + return sqltypes.NewInt64(a.n) +} + +func (a *aggregatorCount) reset() { + a.n = 0 + a.distinct.reset() +} + +type aggregatorCountStar struct { + n int64 +} + +func (a *aggregatorCountStar) add(_ []sqltypes.Value) error { + a.n++ + return nil +} + +func (a *aggregatorCountStar) finish() sqltypes.Value { + return sqltypes.NewInt64(a.n) +} + +func (a *aggregatorCountStar) reset() { + a.n = 0 +} + +type aggregatorMinMax struct { + from int + minmax evalengine.MinMax +} + +type aggregatorMin struct { + aggregatorMinMax +} + +func (a *aggregatorMin) add(row []sqltypes.Value) (err error) { + return a.minmax.Min(row[a.from]) +} + +type aggregatorMax struct { + aggregatorMinMax +} + +func (a *aggregatorMax) add(row []sqltypes.Value) (err error) { + return a.minmax.Max(row[a.from]) +} + +func (a *aggregatorMinMax) finish() sqltypes.Value { + return a.minmax.Result() +} + +func (a *aggregatorMinMax) reset() { + a.minmax.Reset() +} + +type aggregatorSum struct { + from int + sum evalengine.Sum + distinct aggregatorDistinct +} + +func (a *aggregatorSum) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if ret, err := a.distinct.shouldReturn(row); ret { + return err + } + return a.sum.Add(row[a.from]) +} + +func (a *aggregatorSum) finish() sqltypes.Value { + return a.sum.Result() +} + +func (a *aggregatorSum) reset() { + a.sum.Reset() + a.distinct.reset() +} + +type aggregatorScalar struct { + from int + current sqltypes.Value + init bool +} + +func (a *aggregatorScalar) add(row []sqltypes.Value) error { + if !a.init { + a.current = row[a.from] + a.init = true + } + return nil +} + +func (a *aggregatorScalar) finish() sqltypes.Value { + return a.current +} + +func (a *aggregatorScalar) reset() { + a.current = sqltypes.NULL + a.init = false +} + +type aggregatorGroupConcat struct { + from int + type_ sqltypes.Type + + concat []byte + n int +} + +func (a *aggregatorGroupConcat) add(row []sqltypes.Value) error { + if row[a.from].IsNull() { + return nil + } + if a.n > 0 { + a.concat = append(a.concat, ',') + } + a.concat = append(a.concat, row[a.from].Raw()...) + a.n++ + return nil +} + +func (a *aggregatorGroupConcat) finish() sqltypes.Value { + if a.n == 0 { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(a.type_, a.concat) +} + +func (a *aggregatorGroupConcat) reset() { + a.n = 0 + a.concat = nil // not safe to reuse this byte slice as it's returned as MakeTrusted +} + +type aggregatorGtid struct { + from int + shards []*binlogdatapb.ShardGtid +} + +func (a *aggregatorGtid) add(row []sqltypes.Value) error { + a.shards = append(a.shards, &binlogdatapb.ShardGtid{ + Keyspace: row[a.from-1].ToString(), + Shard: row[a.from+1].ToString(), + Gtid: row[a.from].ToString(), + }) + return nil +} + +func (a *aggregatorGtid) finish() sqltypes.Value { + gtid := binlogdatapb.VGtid{ShardGtids: a.shards} + return sqltypes.NewVarChar(gtid.String()) +} + +func (a *aggregatorGtid) reset() { + a.shards = a.shards[:0] // safe to reuse because only the serialized form of a.shards is returned +} + +type aggregationState []aggregator + +func (a aggregationState) add(row []sqltypes.Value) error { + for _, st := range a { + if err := st.add(row); err != nil { + return err + } + } + return nil +} + +func (a aggregationState) finish() (row []sqltypes.Value) { + row = make([]sqltypes.Value, 0, len(a)) + for _, st := range a { + row = append(row, st.finish()) + } + return +} + +func (a aggregationState) reset() { + for _, st := range a { + st.reset() + } +} + +func isComparable(typ sqltypes.Type) bool { + if typ == sqltypes.Null || sqltypes.IsNumber(typ) || sqltypes.IsBinary(typ) { + return true + } + switch typ { + case sqltypes.Timestamp, + sqltypes.Date, + sqltypes.Time, + sqltypes.Datetime, + sqltypes.Enum, + sqltypes.Set, + sqltypes.TypeJSON, + sqltypes.Bit: + return true + } + return false +} + +func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (aggregationState, []*querypb.Field, error) { + fields = slice.Map(fields, func(from *querypb.Field) *querypb.Field { return from.CloneVT() }) + + agstate := make([]aggregator, len(fields)) + for _, aggr := range aggregates { + sourceType := fields[aggr.Col].Type + targetType := aggr.typ(sourceType) + + var ag aggregator + var distinct = -1 + + if aggr.Opcode.IsDistinct() { + distinct = aggr.KeyCol + if aggr.WAssigned() && !isComparable(sourceType) { + distinct = aggr.WCol + } + } + + if aggr.Opcode == AggregateMin || aggr.Opcode == AggregateMax { + if aggr.WAssigned() && !isComparable(sourceType) { + return nil, nil, vterrors.VT12001("min/max on types that are not comparable is not supported") + } + } + + switch aggr.Opcode { + case AggregateCountStar: + ag = &aggregatorCountStar{} + + case AggregateCount, AggregateCountDistinct: + ag = &aggregatorCount{ + from: aggr.Col, + distinct: aggregatorDistinct{ + column: distinct, + coll: aggr.CollationID, + }, + } + + case AggregateSum, AggregateSumDistinct: + var sum evalengine.Sum + switch aggr.OrigOpcode { + case AggregateCount, AggregateCountStar, AggregateCountDistinct: + sum = evalengine.NewSumOfCounts() + default: + sum = evalengine.NewAggregationSum(sourceType) + } + + ag = &aggregatorSum{ + from: aggr.Col, + sum: sum, + distinct: aggregatorDistinct{ + column: distinct, + coll: aggr.CollationID, + }, + } + + case AggregateMin: + ag = &aggregatorMin{ + aggregatorMinMax{ + from: aggr.Col, + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationID), + }, + } + + case AggregateMax: + ag = &aggregatorMax{ + aggregatorMinMax{ + from: aggr.Col, + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationID), + }, + } + + case AggregateGtid: + ag = &aggregatorGtid{from: aggr.Col} + + case AggregateAnyValue: + ag = &aggregatorScalar{from: aggr.Col} + + case AggregateGroupConcat: + ag = &aggregatorGroupConcat{from: aggr.Col, type_: targetType} + + default: + panic("BUG: unexpected Aggregation opcode") + } + + agstate[aggr.Col] = ag + fields[aggr.Col].Type = targetType + if aggr.Alias != "" { + fields[aggr.Col].Name = aggr.Alias + } + } + + for i, a := range agstate { + if a == nil { + agstate[i] = &aggregatorScalar{from: i} + } + } + + return agstate, fields, nil +} diff --git a/go/vt/vtgate/engine/aggregations_test.go b/go/vt/vtgate/engine/aggregations_test.go new file mode 100644 index 00000000000..55ec59f73e1 --- /dev/null +++ b/go/vt/vtgate/engine/aggregations_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + "math/rand" + "strings" + "testing" + + "github.com/google/uuid" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" +) + +func makeTestResults(fields []*querypb.Field, gen []sqltypes.RandomGenerator, N int) []*sqltypes.Result { + result := &sqltypes.Result{Fields: fields} + + for i := 0; i < N; i++ { + row := make([]sqltypes.Value, 0, len(fields)) + for _, f := range gen { + row = append(row, f()) + } + result.Rows = append(result.Rows, row) + } + + return []*sqltypes.Result{result} +} + +func benchmarkName(fields []*querypb.Field) string { + var buf strings.Builder + for i, f := range fields { + if i > 0 { + buf.WriteByte('_') + } + fmt.Fprintf(&buf, "%s(%s)", f.Name, f.Type.String()) + } + return buf.String() +} + +func BenchmarkScalarAggregate(b *testing.B) { + var rand_i64 = sqltypes.RandomGenerators[sqltypes.Int64] + var rand_i64small = func() sqltypes.Value { + return sqltypes.NewInt64(rand.Int63n(1024)) + } + var rand_f64 = sqltypes.RandomGenerators[sqltypes.Float64] + var rand_dec = sqltypes.RandomGenerators[sqltypes.Decimal] + var rand_bin = sqltypes.RandomGenerators[sqltypes.VarBinary] + + var cases = []struct { + fields []*querypb.Field + gen []sqltypes.RandomGenerator + params []*AggregateParams + }{ + { + fields: sqltypes.MakeTestFields("count", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateCount, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum_small", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64small}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "float64"), + gen: []sqltypes.RandomGenerator{rand_f64}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("sum", "decimal"), + gen: []sqltypes.RandomGenerator{rand_dec}, + params: []*AggregateParams{ + {Opcode: AggregateSum, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "int64"), + gen: []sqltypes.RandomGenerator{rand_i64}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "float64"), + gen: []sqltypes.RandomGenerator{rand_f64}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "decimal"), + gen: []sqltypes.RandomGenerator{rand_dec}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("min", "varbinary"), + gen: []sqltypes.RandomGenerator{rand_bin}, + params: []*AggregateParams{ + {Opcode: AggregateMin, Col: 0}, + }, + }, + { + fields: sqltypes.MakeTestFields("keyspace|gtid|shard", "varchar|varchar|varchar"), + gen: []sqltypes.RandomGenerator{ + func() sqltypes.Value { + return sqltypes.NewVarChar("keyspace") + }, + func() sqltypes.Value { + return sqltypes.NewVarChar(uuid.New().String()) + }, + func() sqltypes.Value { + return sqltypes.NewVarChar(fmt.Sprintf("%x-%x", rand.Intn(256), rand.Intn(256))) + }, + }, + params: []*AggregateParams{ + {Opcode: AggregateGtid, Col: 1}, + }, + }, + } + + for _, tc := range cases { + b.Run(benchmarkName(tc.fields), func(b *testing.B) { + results := makeTestResults(tc.fields, tc.gen, 10000) + + fp := &fakePrimitive{ + allResultsInOneCall: true, + results: results, + } + oa := &ScalarAggregate{ + Aggregates: tc.params, + Input: fp, + } + + b.Run("TryExecute", func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + fp.rewind() + _, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true) + if err != nil { + panic(err) + } + } + }) + }) + } +} diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index bf370f4720d..dcaefd270ed 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -145,7 +145,7 @@ func (cached *DML) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(112) + size += int64(128) } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) @@ -153,10 +153,17 @@ func (cached *DML) CachedSize(alloc bool) int64 { if cc, ok := cached.KsidVindex.(cachedObject); ok { size += cc.CachedSize(true) } - // field Table []*vitess.io/vitess/go/vt/vtgate/vindexes.Table + // field TableNames []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.TableNames)) * int64(16)) + for _, elem := range cached.TableNames { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } + // field Vindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex { - size += hack.RuntimeAllocSize(int64(cap(cached.Table)) * int64(8)) - for _, elem := range cached.Table { + size += hack.RuntimeAllocSize(int64(cap(cached.Vindexes)) * int64(8)) + for _, elem := range cached.Vindexes { size += elem.CachedSize(true) } } @@ -192,23 +199,30 @@ func (cached *Distinct) CachedSize(alloc bool) int64 { } // field CheckCols []vitess.io/vitess/go/vt/vtgate/engine.CheckCol { - size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(18)) + size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(22)) for _, elem := range cached.CheckCols { size += elem.CachedSize(false) } } return size } -func (cached *DistinctV3) CachedSize(alloc bool) int64 { +func (cached *ExecStmt) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } - // field Source vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Source.(cachedObject); ok { + // field Params []*vitess.io/vitess/go/vt/sqlparser.Variable + { + size += hack.RuntimeAllocSize(int64(cap(cached.Params)) * int64(8)) + for _, elem := range cached.Params { + size += elem.CachedSize(true) + } + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { size += cc.CachedSize(true) } return size @@ -219,7 +233,7 @@ func (cached *Filter) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field Predicate vitess.io/vitess/go/vt/vtgate/evalengine.Expr if cc, ok := cached.Predicate.(cachedObject); ok { @@ -235,20 +249,68 @@ func (cached *Filter) CachedSize(alloc bool) int64 { } return size } -func (cached *Gen4CompareV3) CachedSize(alloc bool) int64 { +func (cached *FkCascade) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(48) + size += int64(64) + } + // field Selection vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Selection.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Children []*vitess.io/vitess/go/vt/vtgate/engine.FkChild + { + size += hack.RuntimeAllocSize(int64(cap(cached.Children)) * int64(8)) + for _, elem := range cached.Children { + size += elem.CachedSize(true) + } + } + // field Parent vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Parent.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} +func (cached *FkChild) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field BVName string + size += hack.RuntimeAllocSize(int64(len(cached.BVName))) + // field Cols []int + { + size += hack.RuntimeAllocSize(int64(cap(cached.Cols)) * int64(8)) } - // field V3 vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.V3.(cachedObject); ok { + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { size += cc.CachedSize(true) } - // field Gen4 vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Gen4.(cachedObject); ok { + return size +} +func (cached *FkVerify) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Verify []*vitess.io/vitess/go/vt/vtgate/engine.Verify + { + size += hack.RuntimeAllocSize(int64(cap(cached.Verify)) * int64(8)) + for _, elem := range cached.Verify { + size += elem.CachedSize(true) + } + } + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { size += cc.CachedSize(true) } return size @@ -317,7 +379,7 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(224) + size += int64(240) } // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace size += cached.Keyspace.CachedSize(true) @@ -349,8 +411,8 @@ func (cached *Insert) CachedSize(alloc bool) int64 { size += elem.CachedSize(true) } } - // field Table *vitess.io/vitess/go/vt/vtgate/vindexes.Table - size += cached.Table.CachedSize(true) + // field TableName string + size += hack.RuntimeAllocSize(int64(len(cached.TableName))) // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate size += cached.Generate.CachedSize(true) // field Prefix string @@ -512,7 +574,7 @@ func (cached *MemorySort) CachedSize(alloc bool) int64 { } // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { @@ -539,7 +601,7 @@ func (cached *MergeSort) CachedSize(alloc bool) int64 { } // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } return size } @@ -567,15 +629,13 @@ func (cached *OnlineDDL) CachedSize(alloc bool) int64 { } return size } - -//go:nocheckptr func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(96) + size += int64(80) } // field Aggregates []*vitess.io/vitess/go/vt/vtgate/engine.AggregateParams { @@ -591,17 +651,6 @@ func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { size += elem.CachedSize(true) } } - // field Collations map[int]vitess.io/vitess/go/mysql/collations.ID - if cached.Collations != nil { - size += int64(48) - hmap := reflect.ValueOf(cached.Collations) - numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) - numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) - size += hack.RuntimeAllocSize(int64(numOldBuckets * 96)) - if len(cached.Collations) > 0 || numBuckets > 1 { - size += hack.RuntimeAllocSize(int64(numBuckets * 96)) - } - } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { size += cc.CachedSize(true) @@ -767,7 +816,7 @@ func (cached *Route) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.FieldQuery))) // field OrderBy []vitess.io/vitess/go/vt/vtgate/engine.OrderByParams { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(38)) } // field RoutingParameters *vitess.io/vitess/go/vt/vtgate/engine.RoutingParameters size += cached.RoutingParameters.CachedSize(true) @@ -843,7 +892,7 @@ func (cached *Rows) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(cap(cached.rows)) * int64(24)) for _, elem := range cached.rows { { - size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(32)) + size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(56)) for _, elem := range elem { size += elem.CachedSize(false) } @@ -877,15 +926,13 @@ func (cached *SQLCalcFoundRows) CachedSize(alloc bool) int64 { } return size } - -//go:nocheckptr func (cached *ScalarAggregate) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(64) + size += int64(48) } // field Aggregates []*vitess.io/vitess/go/vt/vtgate/engine.AggregateParams { @@ -894,17 +941,6 @@ func (cached *ScalarAggregate) CachedSize(alloc bool) int64 { size += elem.CachedSize(true) } } - // field Collations map[int]vitess.io/vitess/go/mysql/collations.ID - if cached.Collations != nil { - size += int64(48) - hmap := reflect.ValueOf(cached.Collations) - numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) - numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) - size += hack.RuntimeAllocSize(int64(numOldBuckets * 96)) - if len(cached.Collations) > 0 || numBuckets > 1 { - size += hack.RuntimeAllocSize(int64(numBuckets * 96)) - } - } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { size += cc.CachedSize(true) @@ -1102,6 +1138,20 @@ func (cached *SysVarSetAware) CachedSize(alloc bool) int64 { } return size } +func (cached *ThrottleApp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field ThrottledAppRule *vitess.io/vitess/go/vt/proto/topodata.ThrottledAppRule + size += cached.ThrottledAppRule.CachedSize(true) + return size +} //go:nocheckptr func (cached *Update) CachedSize(alloc bool) int64 { @@ -1193,6 +1243,22 @@ func (cached *VStream) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Position))) return size } +func (cached *Verify) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Exec.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Typ string + size += hack.RuntimeAllocSize(int64(len(cached.Typ))) + return size +} func (cached *VindexFunc) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) diff --git a/go/vt/vtgate/engine/compare_utils.go b/go/vt/vtgate/engine/compare_utils.go deleted file mode 100644 index c854d6723d3..00000000000 --- a/go/vt/vtgate/engine/compare_utils.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "encoding/json" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -func printMismatch(leftResult, rightResult *sqltypes.Result, leftPrimitive, rightPrimitive Primitive, leftName, rightName string) { - log.Errorf("Results of %s and %s are not equal. Displaying diff.", rightName, leftName) - - // get right plan and print it - rightplan := &Plan{ - Instructions: rightPrimitive, - } - rightJSON, _ := json.MarshalIndent(rightplan, "", " ") - log.Errorf("%s's plan:\n%s", rightName, string(rightJSON)) - - // get left's plan and print it - leftplan := &Plan{ - Instructions: leftPrimitive, - } - leftJSON, _ := json.MarshalIndent(leftplan, "", " ") - log.Errorf("%s's plan:\n%s", leftName, string(leftJSON)) - - log.Errorf("%s's results:\n", rightName) - log.Errorf("\t[rows affected: %d]\n", rightResult.RowsAffected) - for _, row := range rightResult.Rows { - log.Errorf("\t%s", row) - } - log.Errorf("%s's results:\n", leftName) - log.Errorf("\t[rows affected: %d]\n", leftResult.RowsAffected) - for _, row := range leftResult.Rows { - log.Errorf("\t%s", row) - } - log.Error("End of diff.") -} - -// CompareErrors compares the two errors, and if they don't match, produces an error -func CompareErrors(leftErr, rightErr error, leftName, rightName string) error { - if leftErr != nil && rightErr != nil { - if leftErr.Error() == rightErr.Error() { - return rightErr - } - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s and %s failed with different errors: %s: [%s], %s: [%s]", leftName, rightName, leftErr.Error(), rightErr.Error(), leftName, rightName) - } - if leftErr == nil && rightErr != nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", rightName, rightErr.Error(), leftName) - } - if leftErr != nil && rightErr == nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "%s failed while %s did not: %s", leftName, leftErr.Error(), rightName) - } - return nil -} diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go index 7858ccfc938..904a44ccb85 100644 --- a/go/vt/vtgate/engine/concatenate.go +++ b/go/vt/vtgate/engine/concatenate.go @@ -82,8 +82,8 @@ func formatTwoOptionsNicely(a, b string) string { return a + "_" + b } -// ErrWrongNumberOfColumnsInSelect is an error -var ErrWrongNumberOfColumnsInSelect = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.WrongNumberOfColumnsInSelect, "The used SELECT statements have a different number of columns") +// errWrongNumberOfColumnsInSelect is an error +var errWrongNumberOfColumnsInSelect = vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.WrongNumberOfColumnsInSelect, "The used SELECT statements have a different number of columns") // TryExecute performs a non-streaming exec. func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { @@ -106,7 +106,7 @@ func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars if len(rows) > 0 && len(r.Rows) > 0 && len(rows[0]) != len(r.Rows[0]) { - return nil, ErrWrongNumberOfColumnsInSelect + return nil, errWrongNumberOfColumnsInSelect } rows = append(rows, r.Rows...) @@ -340,8 +340,8 @@ func (c *Concatenate) NeedsTransaction() bool { } // Inputs returns the input primitives for this -func (c *Concatenate) Inputs() []Primitive { - return c.Sources +func (c *Concatenate) Inputs() ([]Primitive, []map[string]any) { + return c.Sources, nil } func (c *Concatenate) description() PrimitiveDescription { @@ -350,7 +350,7 @@ func (c *Concatenate) description() PrimitiveDescription { func (c *Concatenate) compareFields(fields1 []*querypb.Field, fields2 []*querypb.Field) error { if len(fields1) != len(fields2) { - return ErrWrongNumberOfColumnsInSelect + return errWrongNumberOfColumnsInSelect } for i, field1 := range fields1 { if _, found := c.NoNeedToTypeCheck[i]; found { diff --git a/go/vt/vtgate/engine/delete.go b/go/vt/vtgate/engine/delete.go index 1db717450f8..e931d665b44 100644 --- a/go/vt/vtgate/engine/delete.go +++ b/go/vt/vtgate/engine/delete.go @@ -108,11 +108,7 @@ func (del *Delete) deleteVindexEntries(ctx context.Context, vcursor VCursor, bin return err } colnum := del.KsidLength - vindexTable, err := del.GetSingleTable() - if err != nil { - return err - } - for _, colVindex := range vindexTable.Owned { + for _, colVindex := range del.Vindexes { // Fetch the column values. colnum must keep incrementing. fromIds := make([]sqltypes.Value, 0, len(colVindex.Columns)) for range colVindex.Columns { diff --git a/go/vt/vtgate/engine/delete_test.go b/go/vt/vtgate/engine/delete_test.go index 900c33e0757..7312b4bd010 100644 --- a/go/vt/vtgate/engine/delete_test.go +++ b/go/vt/vtgate/engine/delete_test.go @@ -65,7 +65,7 @@ func TestDeleteUnsharded(t *testing.T) { } func TestDeleteEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -90,14 +90,14 @@ func TestDeleteEqual(t *testing.T) { }) // Failure case - expr := evalengine.NewBindVar("aa", collations.TypedCollation{}) + expr := evalengine.NewBindVar("aa", sqltypes.Unknown, collations.Unknown) del.Values = []evalengine.Expr{expr} _, err = del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, "query arguments missing for aa") } func TestDeleteEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -122,14 +122,14 @@ func TestDeleteEqualMultiCol(t *testing.T) { }) // Failure case - expr := evalengine.NewBindVar("aa", collations.TypedCollation{}) + expr := evalengine.NewBindVar("aa", sqltypes.Unknown, collations.Unknown) del.Values = []evalengine.Expr{expr} _, err = del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, "query arguments missing for aa") } func TestDeleteEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -161,7 +161,7 @@ func TestDeleteEqualNoRoute(t *testing.T) { func TestDeleteEqualNoScatter(t *testing.T) { t.Skip("planner does not produces this plan anymore") - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -197,10 +197,9 @@ func TestDeleteOwnedVindex(t *testing.T) { Vindex: ks.Vindexes["hash"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1)}, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -285,10 +284,9 @@ func TestDeleteOwnedVindexMultiCol(t *testing.T) { Vindex: ks.Vindexes["rg_vdx"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1), evalengine.NewLiteralInt(2)}, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -368,10 +366,9 @@ func TestDeleteSharded(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t2"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t2"].Name.String()}, + Vindexes: ks.Tables["t2"].Owned, }, } @@ -397,10 +394,9 @@ func TestDeleteShardedStreaming(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t2"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t2"].Name.String()}, + Vindexes: ks.Tables["t2"].Owned, }, } @@ -423,10 +419,9 @@ func TestDeleteScatterOwnedVindex(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_delete", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_delete", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -515,10 +510,9 @@ func TestDeleteInChangedVindexMultiCol(t *testing.T) { evalengine.NewLiteralInt(3), }, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -556,7 +550,7 @@ func TestDeleteInChangedVindexMultiCol(t *testing.T) { } func TestDeleteEqualSubshard(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) del := &Delete{ DML: &DML{ RoutingParameters: &RoutingParameters{ diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index bc3634d630b..8608aec0d98 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -35,11 +35,12 @@ type ( Distinct struct { Source Primitive CheckCols []CheckCol - Truncate bool + Truncate int } CheckCol struct { Col int WsCol *int + Type sqltypes.Type Collation collations.ID } probeTable struct { @@ -189,8 +190,8 @@ func (d *Distinct) TryExecute(ctx context.Context, vcursor VCursor, bindVars map result.Rows = append(result.Rows, row) } } - if d.Truncate { - return result.Truncate(len(d.CheckCols)), nil + if d.Truncate > 0 { + return result.Truncate(d.Truncate), nil } return result, err } @@ -245,8 +246,8 @@ func (d *Distinct) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (d *Distinct) Inputs() []Primitive { - return []Primitive{d.Source} +func (d *Distinct) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{d.Source}, nil } func (d *Distinct) description() PrimitiveDescription { @@ -260,8 +261,8 @@ func (d *Distinct) description() PrimitiveDescription { other["Collations"] = colls } - if d.Truncate { - other["ResultColumns"] = len(d.CheckCols) + if d.Truncate > 0 { + other["ResultColumns"] = d.Truncate } return PrimitiveDescription{ Other: other, @@ -274,15 +275,15 @@ func (cc CheckCol) SwitchToWeightString() CheckCol { return CheckCol{ Col: *cc.WsCol, WsCol: nil, + Type: sqltypes.VarBinary, Collation: collations.CollationBinaryID, } } func (cc CheckCol) String() string { - coll := cc.Collation.Get() var collation string - if coll != nil { - collation = ": " + coll.Name() + if sqltypes.IsText(cc.Type) && cc.Collation != collations.Unknown { + collation = ": " + collations.Local().LookupName(cc.Collation) } var column string diff --git a/go/vt/vtgate/engine/distinctV3.go b/go/vt/vtgate/engine/distinctV3.go deleted file mode 100644 index 0506331d9c6..00000000000 --- a/go/vt/vtgate/engine/distinctV3.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vtgate/evalengine" -) - -// DistinctV3 Primitive is used to uniqueify results -// It does not always work, and should be removed once the V3 planner has been removed -var _ Primitive = (*DistinctV3)(nil) - -// Distinct Primitive is used to uniqueify results -type DistinctV3 struct { - Source Primitive -} - -type row = []sqltypes.Value - -type probeTableV3 struct { - m map[evalengine.HashCode][]row -} - -func (pt *probeTableV3) exists(inputRow row) (bool, error) { - // calculate hashcode from all column values in the input row - code := evalengine.HashCode(17) - for _, value := range inputRow { - hashcode, err := evalengine.NullsafeHashcode(value, collations.Unknown, value.Type()) - if err != nil { - return false, err - } - code = code*31 + hashcode - } - - existingRows, found := pt.m[code] - if !found { - // nothing with this hash code found, we can be sure it's a not seen row - pt.m[code] = []row{inputRow} - return false, nil - } - - // we found something in the map - still need to check all individual values - // so we don't just fall for a hash collision - for _, existingRow := range existingRows { - exists, err := equalV3(existingRow, inputRow) - if err != nil { - return false, err - } - if exists { - return true, nil - } - } - - pt.m[code] = append(existingRows, inputRow) - - return false, nil -} - -func equalV3(a, b []sqltypes.Value) (bool, error) { - for i, aVal := range a { - cmp, err := evalengine.NullsafeCompare(aVal, b[i], collations.Unknown) - if err != nil { - return false, err - } - if cmp != 0 { - return false, nil - } - } - return true, nil -} - -func newProbeTableV3() *probeTableV3 { - return &probeTableV3{m: map[evalengine.HashCode][]row{}} -} - -// TryExecute implements the Primitive interface -func (d *DistinctV3) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - input, err := vcursor.ExecutePrimitive(ctx, d.Source, bindVars, wantfields) - if err != nil { - return nil, err - } - - result := &sqltypes.Result{ - Fields: input.Fields, - InsertID: input.InsertID, - } - - pt := newProbeTableV3() - - for _, row := range input.Rows { - exists, err := pt.exists(row) - if err != nil { - return nil, err - } - if !exists { - result.Rows = append(result.Rows, row) - } - } - - return result, err -} - -// TryStreamExecute implements the Primitive interface -func (d *DistinctV3) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - pt := newProbeTableV3() - - err := vcursor.StreamExecutePrimitive(ctx, d.Source, bindVars, wantfields, func(input *sqltypes.Result) error { - result := &sqltypes.Result{ - Fields: input.Fields, - InsertID: input.InsertID, - } - for _, row := range input.Rows { - exists, err := pt.exists(row) - if err != nil { - return err - } - if !exists { - result.Rows = append(result.Rows, row) - } - } - return callback(result) - }) - - return err -} - -// RouteType implements the Primitive interface -func (d *DistinctV3) RouteType() string { - return d.Source.RouteType() -} - -// GetKeyspaceName implements the Primitive interface -func (d *DistinctV3) GetKeyspaceName() string { - return d.Source.GetKeyspaceName() -} - -// GetTableName implements the Primitive interface -func (d *DistinctV3) GetTableName() string { - return d.Source.GetTableName() -} - -// GetFields implements the Primitive interface -func (d *DistinctV3) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return d.Source.GetFields(ctx, vcursor, bindVars) -} - -// NeedsTransaction implements the Primitive interface -func (d *DistinctV3) NeedsTransaction() bool { - return d.Source.NeedsTransaction() -} - -// Inputs implements the Primitive interface -func (d *DistinctV3) Inputs() []Primitive { - return []Primitive{d.Source} -} - -func (d *DistinctV3) description() PrimitiveDescription { - return PrimitiveDescription{ - OperatorType: "Distinct", - } -} diff --git a/go/vt/vtgate/engine/distinct_test.go b/go/vt/vtgate/engine/distinct_test.go index 5e39d2c4425..e120c60bd3e 100644 --- a/go/vt/vtgate/engine/distinct_test.go +++ b/go/vt/vtgate/engine/distinct_test.go @@ -65,12 +65,12 @@ func TestDistinct(t *testing.T) { expectedError: "text type with an unknown/unsupported collation cannot be hashed", }, { testName: "varchar columns with collations", - collations: []collations.ID{collations.ID(0x21)}, + collations: []collations.ID{collations.CollationUtf8mb4ID}, inputs: r("myid", "varchar", "monkey", "horse", "Horse", "Monkey", "horses", "MONKEY"), expectedResult: r("myid", "varchar", "monkey", "horse", "horses"), }, { testName: "mixed columns", - collations: []collations.ID{collations.ID(0x21), collations.Unknown}, + collations: []collations.ID{collations.CollationUtf8mb4ID, collations.Unknown}, inputs: r("myid|id", "varchar|int64", "monkey|1", "horse|1", "Horse|1", "Monkey|1", "horses|1", "MONKEY|2"), expectedResult: r("myid|id", "varchar|int64", "monkey|1", "horse|1", "horses|1", "MONKEY|2"), }} @@ -88,6 +88,7 @@ func TestDistinct(t *testing.T) { } checkCols = append(checkCols, CheckCol{ Col: i, + Type: tc.inputs.Fields[i].Type, Collation: collID, }) } @@ -96,7 +97,6 @@ func TestDistinct(t *testing.T) { distinct := &Distinct{ Source: &fakePrimitive{results: []*sqltypes.Result{tc.inputs}}, CheckCols: checkCols, - Truncate: false, } qr, err := distinct.TryExecute(context.Background(), &noopVCursor{}, nil, true) @@ -134,6 +134,7 @@ func TestWeightStringFallBack(t *testing.T) { checkCols := []CheckCol{{ Col: 0, WsCol: &offsetOne, + Type: sqltypes.Unknown, Collation: collations.Unknown, }} input := r("myid|weightstring(myid)", @@ -145,7 +146,7 @@ func TestWeightStringFallBack(t *testing.T) { distinct := &Distinct{ Source: &fakePrimitive{results: []*sqltypes.Result{input}}, CheckCols: checkCols, - Truncate: true, + Truncate: 1, } qr, err := distinct.TryExecute(context.Background(), &noopVCursor{}, nil, true) @@ -159,6 +160,7 @@ func TestWeightStringFallBack(t *testing.T) { utils.MustMatch(t, []CheckCol{{ Col: 0, WsCol: &offsetOne, + Type: sqltypes.Unknown, Collation: collations.Unknown, }}, distinct.CheckCols, "checkCols should not be updated") } diff --git a/go/vt/vtgate/engine/dml.go b/go/vt/vtgate/engine/dml.go index 5201fe9f81e..51177f41e08 100644 --- a/go/vt/vtgate/engine/dml.go +++ b/go/vt/vtgate/engine/dml.go @@ -45,8 +45,11 @@ type DML struct { // KsidLength is number of columns that represents KsidVindex KsidLength int - // Table specifies the table for the update. - Table []*vindexes.Table + // TableNames are the name of the tables involved in the query. + TableNames []string + + // Vindexes are the column vindexes modified by this DML. + Vindexes []*vindexes.ColumnVindex // OwnedVindexQuery is used for updating changes in lookup vindexes. OwnedVindexQuery string @@ -103,29 +106,16 @@ func (dml *DML) GetKeyspaceName() string { // GetTableName specifies the table that this primitive routes to. func (dml *DML) GetTableName() string { - if dml.Table != nil { - tableNameMap := map[string]any{} - for _, table := range dml.Table { - tableNameMap[table.Name.String()] = nil - } - - var tableNames []string - for name := range tableNameMap { + sort.Strings(dml.TableNames) + var tableNames []string + var previousTbl string + for _, name := range dml.TableNames { + if name != previousTbl { tableNames = append(tableNames, name) + previousTbl = name } - sort.Strings(tableNames) - - return strings.Join(tableNames, ", ") - } - return "" -} - -// GetSingleTable returns single table used in dml. -func (dml *DML) GetSingleTable() (*vindexes.Table, error) { - if len(dml.Table) > 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported dml on complex table expression") } - return dml.Table[0], nil + return strings.Join(tableNames, ", ") } func allowOnlyPrimary(rss ...*srvtopo.ResolvedShard) error { diff --git a/go/vt/vtgate/engine/exec_prepared_statement.go b/go/vt/vtgate/engine/exec_prepared_statement.go new file mode 100644 index 00000000000..c9a23d89e12 --- /dev/null +++ b/go/vt/vtgate/engine/exec_prepared_statement.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "strconv" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" +) + +var _ Primitive = (*ExecStmt)(nil) + +type ExecStmt struct { + Params []*sqlparser.Variable + Input Primitive + + noTxNeeded +} + +func (e *ExecStmt) RouteType() string { + return "EXECUTE" +} + +func (e *ExecStmt) GetKeyspaceName() string { + return e.Input.GetKeyspaceName() +} + +func (e *ExecStmt) GetTableName() string { + return e.Input.GetTableName() +} + +func (e *ExecStmt) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT12001("prepare command on execute statement") +} + +func (e *ExecStmt) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + bindVars = e.prepareBindVars(vcursor, bindVars) + return vcursor.ExecutePrimitive(ctx, e.Input, bindVars, wantfields) +} + +func (e *ExecStmt) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + bindVars = e.prepareBindVars(vcursor, bindVars) + return vcursor.StreamExecutePrimitive(ctx, e.Input, bindVars, wantfields, callback) +} + +func (e *ExecStmt) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{e.Input}, nil +} + +func (e *ExecStmt) description() PrimitiveDescription { + var params []string + for _, p := range e.Params { + params = append(params, p.Name.Lowered()) + } + return PrimitiveDescription{ + OperatorType: e.RouteType(), + Other: map[string]any{ + "Parameters": params, + }, + } +} + +func (e *ExecStmt) prepareBindVars(vcursor VCursor, bindVars map[string]*querypb.BindVariable) map[string]*querypb.BindVariable { + count := 1 + for _, p := range e.Params { + bvName := "v" + strconv.Itoa(count) + bv := vcursor.Session().GetUDV(p.Name.Lowered()) + if bv == nil { + bv = sqltypes.NullBindVariable + } + bindVars[bvName] = bv + count++ + } + return bindVars +} diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go index 1a168dc3dc4..dcec32f1ffd 100644 --- a/go/vt/vtgate/engine/fake_primitive_test.go +++ b/go/vt/vtgate/engine/fake_primitive_test.go @@ -43,8 +43,8 @@ type fakePrimitive struct { allResultsInOneCall bool } -func (f *fakePrimitive) Inputs() []Primitive { - return []Primitive{} +func (f *fakePrimitive) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{}, nil } var _ Primitive = (*fakePrimitive)(nil) diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index 464502f5099..139223d4d09 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -25,6 +25,7 @@ import ( "strings" "sync" "testing" + "time" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -50,12 +51,17 @@ var _ SessionActions = (*noopVCursor)(nil) type noopVCursor struct { } +func (t *noopVCursor) GetUDV(key string) *querypb.BindVariable { + // TODO implement me + panic("implement me") +} + func (t *noopVCursor) InTransaction() bool { return false } func (t *noopVCursor) SetCommitOrder(co vtgatepb.CommitOrder) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -87,6 +93,10 @@ func (t *noopVCursor) SetExec(ctx context.Context, name string, value string) er panic("implement me") } +func (t *noopVCursor) ThrottleApp(ctx context.Context, throttleAppRule *topodatapb.ThrottledAppRule) error { + panic("implement me") +} + func (t *noopVCursor) ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) { panic("implement me") } @@ -98,7 +108,11 @@ func (t *noopVCursor) SetContextWithValue(key, value interface{}) func() { // ConnCollation implements VCursor func (t *noopVCursor) ConnCollation() collations.ID { - return collations.CollationUtf8mb4ID + return collations.Default() +} + +func (t *noopVCursor) TimeZone() *time.Location { + return nil } func (t *noopVCursor) ExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { @@ -145,6 +159,14 @@ func (t *noopVCursor) GetDDLStrategy() string { panic("implement me") } +func (t *noopVCursor) SetMigrationContext(migrationContext string) { + panic("implement me") +} + +func (t *noopVCursor) GetMigrationContext() string { + panic("implement me") +} + func (t *noopVCursor) GetSessionUUID() string { panic("implement me") } @@ -255,10 +277,18 @@ func (t *noopVCursor) SetWorkload(querypb.ExecuteOptions_Workload) { panic("implement me") } +func (t *noopVCursor) SetWorkloadName(string) { + panic("implement me") +} + func (t *noopVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) { panic("implement me") } +func (t *noopVCursor) SetPriority(string) { + panic("implement me") +} + func (t *noopVCursor) SetConsolidator(querypb.ExecuteOptions_Consolidator) { panic("implement me") } @@ -359,6 +389,11 @@ type loggingVCursor struct { shardSession []*srvtopo.ResolvedShard } +func (f *loggingVCursor) GetUDV(key string) *querypb.BindVariable { + // TODO implement me + panic("implement me") +} + type tableRoutes struct { tbl *vindexes.Table } @@ -682,10 +717,18 @@ func (f *loggingVCursor) SetWorkload(querypb.ExecuteOptions_Workload) { panic("implement me") } +func (f *loggingVCursor) SetWorkloadName(string) { + panic("implement me") +} + func (f *loggingVCursor) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) { panic("implement me") } +func (f *loggingVCursor) SetPriority(string) { + panic("implement me") +} + func (f *loggingVCursor) FindRoutedTable(tbl sqlparser.TableName) (*vindexes.Table, error) { f.log = append(f.log, fmt.Sprintf("FindTable(%s)", sqlparser.String(tbl))) return f.tableRoutes.tbl, nil diff --git a/go/vt/vtgate/engine/filter.go b/go/vt/vtgate/engine/filter.go index f36467a7526..c0a54f2b6ac 100644 --- a/go/vt/vtgate/engine/filter.go +++ b/go/vt/vtgate/engine/filter.go @@ -19,12 +19,10 @@ package engine import ( "context" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) var _ Primitive = (*Filter)(nil) @@ -35,6 +33,8 @@ type Filter struct { ASTPredicate sqlparser.Expr Input Primitive + Truncate int + noTxNeeded } @@ -59,40 +59,35 @@ func (f *Filter) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[s if err != nil { return nil, err } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var rows [][]sqltypes.Value - env.Fields = result.Fields for _, row := range result.Rows { env.Row = row evalResult, err := env.Evaluate(f.Predicate) if err != nil { return nil, err } - intEvalResult, err := evalResult.Value().ToInt64() - if err != nil { - return nil, err - } - if intEvalResult == 1 { + + if evalResult.ToBoolean() { rows = append(rows, row) } } result.Rows = rows - return result, nil + return result.Truncate(f.Truncate), nil } // TryStreamExecute satisfies the Primitive interface. func (f *Filter) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) filter := func(results *sqltypes.Result) error { var rows [][]sqltypes.Value - env.Fields = results.Fields for _, row := range results.Rows { env.Row = row evalResult, err := env.Evaluate(f.Predicate) if err != nil { return err } - intEvalResult, err := evalResult.Value().ToInt64() + intEvalResult, err := evalResult.Value(vcursor.ConnCollation()).ToInt64() if err != nil { return err } @@ -101,7 +96,7 @@ func (f *Filter) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars } } results.Rows = rows - return callback(results) + return callback(results.Truncate(f.Truncate)) } return vcursor.StreamExecutePrimitive(ctx, f.Input, bindVars, wantfields, filter) @@ -113,13 +108,14 @@ func (f *Filter) GetFields(ctx context.Context, vcursor VCursor, bindVars map[st } // Inputs returns the input to limit -func (f *Filter) Inputs() []Primitive { - return []Primitive{f.Input} +func (f *Filter) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{f.Input}, nil } func (f *Filter) description() PrimitiveDescription { other := map[string]any{ - "Predicate": sqlparser.String(f.ASTPredicate), + "Predicate": sqlparser.String(f.ASTPredicate), + "ResultColumns": f.Truncate, } return PrimitiveDescription{ diff --git a/go/vt/vtgate/engine/filter_test.go b/go/vt/vtgate/engine/filter_test.go index b6d8730a869..9a8335e4d7e 100644 --- a/go/vt/vtgate/engine/filter_test.go +++ b/go/vt/vtgate/engine/filter_test.go @@ -23,74 +23,56 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" ) -type dummyTranslator struct{} - -func (d dummyTranslator) ColumnLookup(col *sqlparser.ColName) (int, error) { - switch col.Name.String() { - case "left": - return 0, nil - case "right": - return 1, nil - default: - panic("unexpected column name") - } -} - -func (d dummyTranslator) CollationForExpr(_ sqlparser.Expr) collations.ID { - return collationEnv.LookupByName("utf8mb4_bin").ID() -} - -func (d dummyTranslator) DefaultCollation() collations.ID { - return collationEnv.LookupByName("utf8mb4_bin").ID() -} - func TestFilterPass(t *testing.T) { + utf8mb4Bin := collationEnv.LookupByName("utf8mb4_bin") predicate := &sqlparser.ComparisonExpr{ Operator: sqlparser.GreaterThanOp, Left: sqlparser.NewColName("left"), Right: sqlparser.NewColName("right"), } - pred, err := evalengine.Translate(predicate, &dummyTranslator{}) - require.NoError(t, err) - tcases := []struct { name string res *sqltypes.Result expRes string }{{ name: "int32", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "int32|int32"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "int32|int32"), "0|1", "1|0", "2|3"), expRes: `[[INT32(1) INT32(0)]]`, }, { name: "uint16", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint16|uint16"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "uint16|uint16"), "0|1", "1|0", "2|3"), expRes: `[[UINT16(1) UINT16(0)]]`, }, { name: "uint64_int64", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint64|int64"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "uint64|int64"), "0|1", "1|0", "2|3"), expRes: `[[UINT64(1) INT64(0)]]`, }, { name: "int32_uint32", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "int32|uint32"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "int32|uint32"), "0|1", "1|0", "2|3"), expRes: `[[INT32(1) UINT32(0)]]`, }, { name: "uint16_int8", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint16|int8"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "uint16|int8"), "0|1", "1|0", "2|3"), expRes: `[[UINT16(1) INT8(0)]]`, }, { name: "uint64_int32", - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint64|int32"), "0|1", "1|0", "2|3"), + res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("left|right", "uint64|int32"), "0|1", "1|0", "2|3"), expRes: `[[UINT64(1) INT32(0)]]`, }} for _, tc := range tcases { t.Run(tc.name, func(t *testing.T) { + pred, err := evalengine.Translate(predicate, &evalengine.Config{ + Collation: utf8mb4Bin, + ResolveColumn: evalengine.FieldResolver(tc.res.Fields).Column, + }) + require.NoError(t, err) + filter := &Filter{ Predicate: pred, Input: &fakePrimitive{results: []*sqltypes.Result{tc.res}}, diff --git a/go/vt/vtgate/engine/fk_cascade.go b/go/vt/vtgate/engine/fk_cascade.go new file mode 100644 index 00000000000..e7d14d0aa31 --- /dev/null +++ b/go/vt/vtgate/engine/fk_cascade.go @@ -0,0 +1,197 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// FkChild contains the Child Primitive to be executed collecting the values from the Selection Primitive using the column indexes. +// BVName is used to pass the value as bind variable to the Child Primitive. +type FkChild struct { + BVName string + Cols []int // indexes + Exec Primitive +} + +// FkCascade is a primitive that implements foreign key cascading using Selection as values required to execute the FkChild Primitives. +// On success, it executes the Parent Primitive. +type FkCascade struct { + // Selection is the Primitive that is used to find the rows that are going to be modified in the child tables. + Selection Primitive + // Children is a list of child foreign key Primitives that are executed using rows from the Selection Primitive. + Children []*FkChild + // Parent is the Primitive that is executed after the children are modified. + Parent Primitive + + txNeeded +} + +// RouteType implements the Primitive interface. +func (fkc *FkCascade) RouteType() string { + return "FkCascade" +} + +// GetKeyspaceName implements the Primitive interface. +func (fkc *FkCascade) GetKeyspaceName() string { + return fkc.Parent.GetKeyspaceName() +} + +// GetTableName implements the Primitive interface. +func (fkc *FkCascade) GetTableName() string { + return fkc.Parent.GetTableName() +} + +// GetFields implements the Primitive interface. +func (fkc *FkCascade) GetFields(_ context.Context, _ VCursor, _ map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields should not be called") +} + +// TryExecute implements the Primitive interface. +func (fkc *FkCascade) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + // Execute the Selection primitive to find the rows that are going to modified. + // This will be used to find the rows that need modification on the children. + selectionRes, err := vcursor.ExecutePrimitive(ctx, fkc.Selection, bindVars, wantfields) + if err != nil { + return nil, err + } + + // If no rows are to be modified, there is nothing to do. + if len(selectionRes.Rows) == 0 { + return &sqltypes.Result{}, nil + } + + for _, child := range fkc.Children { + // We create a bindVariable for each Child + // that stores the tuple of columns involved in the fk constraint. + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + } + for _, row := range selectionRes.Rows { + // Create a tuple from each Row. + tuple := &querypb.Value{ + Type: querypb.Type_TUPLE, + } + for _, colIdx := range child.Cols { + tuple.Values = append(tuple.Values, + sqltypes.ValueToProto(row[colIdx])) + } + bv.Values = append(bv.Values, tuple) + } + // Execute the child primitive, and bail out incase of failure. + // Since this Primitive is always executed in a transaction, the changes should + // be rolled back incase of an error. + bindVars[child.BVName] = bv + _, err = vcursor.ExecutePrimitive(ctx, child.Exec, bindVars, wantfields) + if err != nil { + return nil, err + } + delete(bindVars, child.BVName) + } + + // All the children are modified successfully, we can now execute the Parent Primitive. + return vcursor.ExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields) +} + +// TryStreamExecute implements the Primitive interface. +func (fkc *FkCascade) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + // We create a bindVariable for each Child + // that stores the tuple of columns involved in the fk constraint. + var bindVariables []*querypb.BindVariable + for range fkc.Children { + bindVariables = append(bindVariables, &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + }) + } + + // Execute the Selection primitive to find the rows that are going to modified. + // This will be used to find the rows that need modification on the children. + err := vcursor.StreamExecutePrimitive(ctx, fkc.Selection, bindVars, wantfields, func(result *sqltypes.Result) error { + if len(result.Rows) == 0 { + return nil + } + for idx, child := range fkc.Children { + for _, row := range result.Rows { + // Create a tuple from each Row. + tuple := &querypb.Value{ + Type: querypb.Type_TUPLE, + } + for _, colIdx := range child.Cols { + tuple.Values = append(tuple.Values, + sqltypes.ValueToProto(row[colIdx])) + } + bindVariables[idx].Values = append(bindVariables[idx].Values, tuple) + } + } + return nil + }) + if err != nil { + return err + } + + // Execute the child primitive, and bail out incase of failure. + // Since this Primitive is always executed in a transaction, the changes should + // be rolled back incase of an error. + for idx, child := range fkc.Children { + bindVars[child.BVName] = bindVariables[idx] + err = vcursor.StreamExecutePrimitive(ctx, child.Exec, bindVars, wantfields, func(result *sqltypes.Result) error { + return nil + }) + if err != nil { + return err + } + delete(bindVars, child.BVName) + } + + // All the children are modified successfully, we can now execute the Parent Primitive. + return vcursor.StreamExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields, callback) +} + +// Inputs implements the Primitive interface. +func (fkc *FkCascade) Inputs() ([]Primitive, []map[string]any) { + var inputs []Primitive + var inputsMap []map[string]any + inputs = append(inputs, fkc.Selection) + inputsMap = append(inputsMap, map[string]any{ + inputName: "Selection", + }) + for idx, child := range fkc.Children { + inputsMap = append(inputsMap, map[string]any{ + inputName: fmt.Sprintf("CascadeChild-%d", idx+1), + "BvName": child.BVName, + "Cols": child.Cols, + }) + inputs = append(inputs, child.Exec) + } + inputs = append(inputs, fkc.Parent) + inputsMap = append(inputsMap, map[string]any{ + inputName: "Parent", + }) + return inputs, inputsMap +} + +func (fkc *FkCascade) description() PrimitiveDescription { + return PrimitiveDescription{OperatorType: fkc.RouteType()} +} + +var _ Primitive = (*FkCascade)(nil) diff --git a/go/vt/vtgate/engine/fk_cascade_test.go b/go/vt/vtgate/engine/fk_cascade_test.go new file mode 100644 index 00000000000..6c89feebf95 --- /dev/null +++ b/go/vt/vtgate/engine/fk_cascade_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// TestDeleteCascade tests that FkCascade executes the child and parent primitives for a delete cascade. +func TestDeleteCascade(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("cola|colb", "int64|varchar"), "1|a", "2|b") + + inputP := &Route{ + Query: "select cola, colb from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Delete{ + DML: &DML{ + Query: "delete from child where (ca, cb) in ::__vals", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + parentP := &Delete{ + DML: &DML{ + Query: "delete from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkCascade{ + Selection: inputP, + Children: []*FkChild{{BVName: "__vals", Cols: []int{0, 1}, Exec: childP}}, + Parent: parentP, + } + + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from child where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE values:{type:INT64 value:"1"} values:{type:VARCHAR value:"a"}} values:{type:TUPLE values:{type:INT64 value:"2"} values:{type:VARCHAR value:"b"}}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from parent where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from child where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE values:{type:INT64 value:"1"} values:{type:VARCHAR value:"a"}} values:{type:TUPLE values:{type:INT64 value:"2"} values:{type:VARCHAR value:"b"}}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: delete from parent where foo = 48 {} true true`, + }) +} + +// TestUpdateCascade tests that FkCascade executes the child and parent primitives for an update cascade. +func TestUpdateCascade(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("cola|colb", "int64|varchar"), "1|a", "2|b") + + inputP := &Route{ + Query: "select cola, colb from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Update{ + DML: &DML{ + Query: "update child set ca = :vtg1 where (ca, cb) in ::__vals", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + parentP := &Update{ + DML: &DML{ + Query: "update parent set cola = 1 where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkCascade{ + Selection: inputP, + Children: []*FkChild{{BVName: "__vals", Cols: []int{0, 1}, Exec: childP}}, + Parent: parentP, + } + + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :vtg1 where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE values:{type:INT64 value:"1"} values:{type:VARCHAR value:"a"}} values:{type:TUPLE values:{type:INT64 value:"2"} values:{type:VARCHAR value:"b"}}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = 1 where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :vtg1 where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE values:{type:INT64 value:"1"} values:{type:VARCHAR value:"a"}} values:{type:TUPLE values:{type:INT64 value:"2"} values:{type:VARCHAR value:"b"}}} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = 1 where foo = 48 {} true true`, + }) +} diff --git a/go/vt/vtgate/engine/fk_verify.go b/go/vt/vtgate/engine/fk_verify.go new file mode 100644 index 00000000000..350aeec59e0 --- /dev/null +++ b/go/vt/vtgate/engine/fk_verify.go @@ -0,0 +1,129 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Verify contains the verification primitve and its type i.e. parent or child +type Verify struct { + Exec Primitive + Typ string +} + +// FkVerify is a primitive that verifies that the foreign key constraints in parent tables are satisfied. +// It does this by executing a select distinct query on the parent table with the values that are being inserted/updated. +type FkVerify struct { + Verify []*Verify + Exec Primitive + + txNeeded +} + +// constants for verification type. +const ( + ParentVerify = "VerifyParent" + ChildVerify = "VerifyChild" +) + +// RouteType implements the Primitive interface +func (f *FkVerify) RouteType() string { + return "FKVerify" +} + +// GetKeyspaceName implements the Primitive interface +func (f *FkVerify) GetKeyspaceName() string { + return f.Exec.GetKeyspaceName() +} + +// GetTableName implements the Primitive interface +func (f *FkVerify) GetTableName() string { + return f.Exec.GetTableName() +} + +// GetFields implements the Primitive interface +func (f *FkVerify) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields should not be called") +} + +// TryExecute implements the Primitive interface +func (f *FkVerify) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + for _, v := range f.Verify { + qr, err := vcursor.ExecutePrimitive(ctx, v.Exec, bindVars, wantfields) + if err != nil { + return nil, err + } + if len(qr.Rows) > 0 { + return nil, getError(v.Typ) + } + } + return vcursor.ExecutePrimitive(ctx, f.Exec, bindVars, wantfields) +} + +// TryStreamExecute implements the Primitive interface +func (f *FkVerify) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + for _, v := range f.Verify { + err := vcursor.StreamExecutePrimitive(ctx, v.Exec, bindVars, wantfields, func(qr *sqltypes.Result) error { + if len(qr.Rows) > 0 { + return getError(v.Typ) + } + return nil + }) + if err != nil { + return err + } + } + return vcursor.StreamExecutePrimitive(ctx, f.Exec, bindVars, wantfields, callback) +} + +// Inputs implements the Primitive interface +func (f *FkVerify) Inputs() ([]Primitive, []map[string]any) { + var inputs []Primitive + var inputsMap []map[string]any + for idx, v := range f.Verify { + inputsMap = append(inputsMap, map[string]any{ + inputName: fmt.Sprintf("%s-%d", v.Typ, idx+1), + }) + inputs = append(inputs, v.Exec) + } + inputs = append(inputs, f.Exec) + inputsMap = append(inputsMap, map[string]any{ + inputName: "PostVerify", + }) + return inputs, inputsMap + +} + +func (f *FkVerify) description() PrimitiveDescription { + return PrimitiveDescription{OperatorType: f.RouteType()} +} + +var _ Primitive = (*FkVerify)(nil) + +func getError(typ string) error { + if typ == ParentVerify { + return vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.NoReferencedRow2, "Cannot add or update a child row: a foreign key constraint fails") + } + return vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails") +} diff --git a/go/vt/vtgate/engine/fk_verify_test.go b/go/vt/vtgate/engine/fk_verify_test.go new file mode 100644 index 00000000000..5635a32bc2c --- /dev/null +++ b/go/vt/vtgate/engine/fk_verify_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestFKVerifyUpdate(t *testing.T) { + verifyP := &Route{ + Query: "select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + verifyC := &Route{ + Query: "select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Update{ + DML: &DML{ + Query: "update child set cola = 1, colb = 'a' where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkVerify{ + Verify: []*Verify{{Exec: verifyP, Typ: ParentVerify}}, + Exec: childP, + } + + t.Run("foreign key verification success", func(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64")) + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set cola = 1, colb = 'a' where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set cola = 1, colb = 'a' where foo = 48 {} true true`, + }) + }) + + t.Run("parent foreign key verification failure", func(t *testing.T) { + // No results from select, should cause the foreign key verification to fail. + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1", "1", "1") + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + }) + }) + + fkc.Verify[0] = &Verify{Exec: verifyC, Typ: ChildVerify} + t.Run("child foreign key verification failure", func(t *testing.T) { + // No results from select, should cause the foreign key verification to fail. + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("1", "int64"), "1", "1", "1") + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 {} false false`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `StreamExecuteMulti select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 ks.0: {} `, + }) + }) +} diff --git a/go/vt/vtgate/engine/gen4_compare_v3.go b/go/vt/vtgate/engine/gen4_compare_v3.go deleted file mode 100644 index a913c442a2c..00000000000 --- a/go/vt/vtgate/engine/gen4_compare_v3.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package engine - -import ( - "context" - "sync" - - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" -) - -// Gen4CompareV3 is a Primitive used to compare V3 and Gen4's plans. -type Gen4CompareV3 struct { - V3, Gen4 Primitive - HasOrderBy bool -} - -var _ Primitive = (*Gen4CompareV3)(nil) -var _ Gen4Comparer = (*Gen4CompareV3)(nil) - -// GetGen4Primitive implements the Gen4Comparer interface -func (gc *Gen4CompareV3) GetGen4Primitive() Primitive { - return gc.Gen4 -} - -// RouteType implements the Primitive interface -func (gc *Gen4CompareV3) RouteType() string { - return gc.Gen4.RouteType() -} - -// GetKeyspaceName implements the Primitive interface -func (gc *Gen4CompareV3) GetKeyspaceName() string { - return gc.Gen4.GetKeyspaceName() -} - -// GetTableName implements the Primitive interface -func (gc *Gen4CompareV3) GetTableName() string { - return gc.Gen4.GetTableName() -} - -// GetFields implements the Primitive interface -func (gc *Gen4CompareV3) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return gc.Gen4.GetFields(ctx, vcursor, bindVars) -} - -// NeedsTransaction implements the Primitive interface -func (gc *Gen4CompareV3) NeedsTransaction() bool { - return gc.Gen4.NeedsTransaction() -} - -// TryExecute implements the Primitive interface -func (gc *Gen4CompareV3) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - var v3Err, gen4Err error - v3Result, gen4Result := &sqltypes.Result{}, &sqltypes.Result{} - if gc.Gen4 != nil { - gen4Result, gen4Err = gc.Gen4.TryExecute(ctx, vcursor, bindVars, wantfields) - } - if gc.V3 != nil { - v3Result, v3Err = gc.V3.TryExecute(ctx, vcursor, bindVars, wantfields) - } - - if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil { - return nil, err - } - - if err := gc.compareResults(v3Result, gen4Result); err != nil { - return nil, err - } - return gen4Result, nil -} - -// TryStreamExecute implements the Primitive interface -func (gc *Gen4CompareV3) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - var mu sync.Mutex - var v3Err, gen4Err error - v3Result, gen4Result := &sqltypes.Result{}, &sqltypes.Result{} - - if gc.Gen4 != nil { - gen4Err = gc.Gen4.TryStreamExecute(ctx, vcursor, bindVars, wantfields, func(result *sqltypes.Result) error { - mu.Lock() - defer mu.Unlock() - gen4Result.AppendResult(result) - return nil - }) - } - if gc.V3 != nil { - v3Err = gc.V3.TryStreamExecute(ctx, vcursor, bindVars, wantfields, func(result *sqltypes.Result) error { - mu.Lock() - defer mu.Unlock() - v3Result.AppendResult(result) - return nil - }) - } - - if err := CompareErrors(v3Err, gen4Err, "v3", "Gen4"); err != nil { - return err - } - - if err := gc.compareResults(v3Result, gen4Result); err != nil { - return err - } - return callback(gen4Result) -} - -func (gc *Gen4CompareV3) compareResults(v3Result *sqltypes.Result, gen4Result *sqltypes.Result) error { - var match bool - if gc.HasOrderBy { - match = sqltypes.ResultsEqual([]sqltypes.Result{*v3Result}, []sqltypes.Result{*gen4Result}) - } else { - match = sqltypes.ResultsEqualUnordered([]sqltypes.Result{*v3Result}, []sqltypes.Result{*gen4Result}) - } - if !match { - printMismatch(v3Result, gen4Result, gc.V3, gc.Gen4, "V3", "Gen4") - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "results did not match, see VTGate's logs for more information") - } - return nil -} - -// Inputs implements the Primitive interface -func (gc *Gen4CompareV3) Inputs() []Primitive { - return []Primitive{gc.Gen4, gc.V3} -} - -// description implements the Primitive interface -func (gc *Gen4CompareV3) description() PrimitiveDescription { - return PrimitiveDescription{OperatorType: "Gen4CompareV3"} -} diff --git a/go/vt/vtgate/engine/hash_join.go b/go/vt/vtgate/engine/hash_join.go index 1fb889c8fd4..a38fc21bf97 100644 --- a/go/vt/vtgate/engine/hash_join.go +++ b/go/vt/vtgate/engine/hash_join.go @@ -98,7 +98,7 @@ func (hj *HashJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma for _, currentLHSRow := range lftRows { lhsVal := currentLHSRow[hj.LHSKey] // hash codes can give false positives, so we need to check with a real comparison as well - cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, collations.Unknown) + cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, hj.Collation) if err != nil { return nil, err } @@ -234,8 +234,8 @@ func (hj *HashJoin) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (hj *HashJoin) Inputs() []Primitive { - return []Primitive{hj.Left, hj.Right} +func (hj *HashJoin) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{hj.Left, hj.Right}, nil } // description implements the Primitive interface @@ -246,9 +246,9 @@ func (hj *HashJoin) description() PrimitiveDescription { "Predicate": sqlparser.String(hj.ASTPred), "ComparisonType": hj.ComparisonType.String(), } - coll := hj.Collation.Get() - if coll != nil { - other["Collation"] = coll.Name() + coll := hj.Collation + if coll != collations.Unknown { + other["Collation"] = collations.Local().LookupName(coll) } return PrimitiveDescription{ OperatorType: "Join", diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index ec816cba742..394ccb8ecce 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -25,20 +25,16 @@ import ( "sync" "time" - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) var _ Primitive = (*Insert)(nil) @@ -71,8 +67,8 @@ type ( // ColVindexes are the vindexes that will use the VindexValues ColVindexes []*vindexes.ColumnVindex - // Table specifies the table for the insert. - Table *vindexes.Table + // TableName is the name of the table on which row will be inserted. + TableName string // Generate is only set for inserts where a sequence must be generated. Generate *Generate @@ -112,11 +108,11 @@ type ( ksID = []byte ) -func (ins *Insert) Inputs() []Primitive { +func (ins *Insert) Inputs() ([]Primitive, []map[string]any) { if ins.Input == nil { - return nil + return nil, nil } - return []Primitive{ins.Input} + return []Primitive{ins.Input}, nil } // NewQueryInsert creates an Insert with a query string. @@ -128,15 +124,6 @@ func NewQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query stri } } -// NewSimpleInsert creates an Insert for a Table. -func NewSimpleInsert(opcode InsertOpcode, table *vindexes.Table, keyspace *vindexes.Keyspace) *Insert { - return &Insert{ - Opcode: opcode, - Table: table, - Keyspace: keyspace, - } -} - // NewInsert creates a new Insert. func NewInsert( opcode InsertOpcode, @@ -148,16 +135,25 @@ func NewInsert( mid []string, suffix string, ) *Insert { - return &Insert{ + ins := &Insert{ Opcode: opcode, Ignore: ignore, Keyspace: keyspace, VindexValues: vindexValues, - Table: table, Prefix: prefix, Mid: mid, Suffix: suffix, } + if table != nil { + ins.TableName = table.Name.String() + for _, colVindex := range table.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + ins.ColVindexes = append(ins.ColVindexes, colVindex) + } + } + return ins } // Generate represents the instruction to generate @@ -221,10 +217,7 @@ func (ins *Insert) GetKeyspaceName() string { // GetTableName specifies the table that this primitive routes to. func (ins *Insert) GetTableName() string { - if ins.Table != nil { - return ins.Table.Name.String() - } - return "" + return ins.TableName } // TryExecute performs a non-streaming exec. @@ -397,10 +390,6 @@ func (ins *Insert) getInsertSelectQueries( rows []sqltypes.Row, ) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { colVindexes := ins.ColVindexes - if colVindexes == nil { - colVindexes = ins.Table.ColumnVindexes - } - if len(colVindexes) != len(ins.VindexValueOffset) { return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") } @@ -513,7 +502,7 @@ func shouldGenerate(v sqltypes.Value) bool { // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also // treats 0 as a value that should generate a new sequence. - n, err := evalengine.ToUint64(v) + n, err := v.ToCastUint64() if err == nil && n == 0 { return true } @@ -535,7 +524,7 @@ func (ins *Insert) processGenerateFromValues( // Scan input values to compute the number of values to generate, and // keep track of where they should be filled. - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) resolved, err := env.Evaluate(ins.Generate.Values) if err != nil { return 0, err @@ -564,7 +553,7 @@ func (ins *Insert) processGenerateFromValues( } // If no rows are returned, it's an internal error, and the code // must panic, which will be caught and reported. - insertID, err = evalengine.ToInt64(qr.Rows[0][0]) + insertID, err = qr.Rows[0][0].ToCastInt64() if err != nil { return 0, err } @@ -626,7 +615,7 @@ func (ins *Insert) processGenerateFromRows( } // If no rows are returned, it's an internal error, and the code // must panic, which will be caught and reported. - insertID, err = evalengine.ToInt64(qr.Rows[0][0]) + insertID, err = qr.Rows[0][0].ToCastInt64() if err != nil { return 0, err } @@ -667,11 +656,8 @@ func (ins *Insert) getInsertShardedRoute( // require inputs in that format. vindexRowsValues := make([][]sqltypes.Row, len(ins.VindexValues)) rowCount := 0 - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) colVindexes := ins.ColVindexes - if colVindexes == nil { - colVindexes = ins.Table.ColumnVindexes - } for vIdx, vColValues := range ins.VindexValues { if len(vColValues) != len(colVindexes[vIdx].Columns) { return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) @@ -683,7 +669,7 @@ func (ins *Insert) getInsertShardedRoute( if err != nil { return nil, nil, err } - rowsResolvedValues = append(rowsResolvedValues, result.Value()) + rowsResolvedValues = append(rowsResolvedValues, result.Value(vcursor.ConnCollation())) } // This is the first iteration: allocate for transpose. if colIdx == 0 { @@ -710,7 +696,7 @@ func (ins *Insert) getInsertShardedRoute( // results in an error. For 'ignore' type inserts, the keyspace // id is returned as nil, which is used later to drop the corresponding rows. if len(vindexRowsValues) == 0 || len(colVindexes) == 0 { - return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.Table.Name) + return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.TableName) } keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) if err != nil { @@ -953,6 +939,8 @@ func (ins *Insert) description() PrimitiveDescription { "TableName": ins.GetTableName(), "MultiShardAutocommit": ins.MultiShardAutocommit, "QueryTimeout": ins.QueryTimeout, + "InsertIgnore": ins.Ignore, + "InputAsNonStreaming": ins.ForceNonStreaming, } if len(ins.VindexValues) > 0 { @@ -976,8 +964,12 @@ func (ins *Insert) description() PrimitiveDescription { other["VindexValues"] = valuesOffsets } - if ins.Generate != nil && ins.Generate.Values == nil { - other["AutoIncrement"] = fmt.Sprintf("%s:%d", ins.Generate.Keyspace.Name, ins.Generate.Offset) + if ins.Generate != nil { + if ins.Generate.Values == nil { + other["AutoIncrement"] = fmt.Sprintf("%s:Offset(%d)", ins.Generate.Query, ins.Generate.Offset) + } else { + other["AutoIncrement"] = fmt.Sprintf("%s:Values::%s", ins.Generate.Query, evalengine.FormatExpr(ins.Generate.Values)) + } } if len(ins.VindexValueOffset) > 0 { @@ -992,8 +984,11 @@ func (ins *Insert) description() PrimitiveDescription { } other["VindexOffsetFromSelect"] = valuesOffsets } - if ins.Ignore { - other["InsertIgnore"] = true + if len(ins.Mid) > 0 { + shardQuery := fmt.Sprintf("%s%s%s", ins.Prefix, strings.Join(ins.Mid, ", "), ins.Suffix) + if shardQuery != ins.Query { + other["ShardedQuery"] = shardQuery + } } return PrimitiveDescription{ OperatorType: "Insert", diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 72dec39045d..b651efe2b03 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -712,12 +712,6 @@ func TestInsertShardedGeo(t *testing.T) { []string{" mid1", " mid2"}, " suffix", ) - for _, colVindex := range ks.Tables["t1"].ColumnVindexes { - if colVindex.IsPartialVindex() { - continue - } - ins.ColVindexes = append(ins.ColVindexes, colVindex) - } vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20"} @@ -1511,7 +1505,6 @@ func TestInsertSelectSimple(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{{1}}, Input: &Route{ Query: "dummy_select", @@ -1602,7 +1595,6 @@ func TestInsertSelectOwned(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}, // The primary vindex has a single column as sharding key {0}}, // the onecol vindex uses the 'name' column @@ -1699,19 +1691,23 @@ func TestInsertSelectGenerate(t *testing.T) { vs := vindexes.BuildVSchema(invschema) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - Table: ks.Tables["t1"], - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} + ins := NewInsert( + InsertSelect, + false, + ks.Keyspace, + nil, + ks.Tables["t1"], + "prefix ", + nil, + " suffix") + ins.Query = "dummy_insert" + ins.VindexValueOffset = [][]int{{1}} // The primary vindex has a single column as sharding key + ins.Input = &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ @@ -1721,8 +1717,6 @@ func TestInsertSelectGenerate(t *testing.T) { Query: "dummy_generate", Offset: 1, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1795,7 +1789,6 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1804,6 +1797,7 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { RoutingParameters: &RoutingParameters{ Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ @@ -1891,7 +1885,6 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1901,6 +1894,7 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1979,7 +1973,6 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t1"], VindexValueOffset: [][]int{ {1}}, // The primary vindex has a single column as sharding key Input: &Route{ @@ -1989,6 +1982,7 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace}}} + ins.ColVindexes = ks.Tables["t1"].ColumnVindexes ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -2077,7 +2071,6 @@ func TestInsertSelectUnowned(t *testing.T) { Opcode: InsertSelect, Keyspace: ks.Keyspace, Query: "dummy_insert", - Table: ks.Tables["t2"], VindexValueOffset: [][]int{ {0}}, // the onecol vindex as unowned lookup sharding column Input: &Route{ @@ -2199,7 +2192,6 @@ func TestInsertSelectShardingCases(t *testing.T) { Opcode: InsertSelect, Keyspace: sks1.Keyspace, Query: "dummy_insert", - Table: sks1.Tables["s1"], Prefix: "prefix ", Suffix: " suffix", ColVindexes: sks1.Tables["s1"].ColumnVindexes, @@ -2278,7 +2270,6 @@ func TestInsertSelectShardingCases(t *testing.T) { Opcode: InsertUnsharded, Keyspace: uks1.Keyspace, Query: "dummy_insert", - Table: uks1.Tables["s1"], Prefix: "prefix ", Suffix: " suffix", Input: sRoute, diff --git a/go/vt/vtgate/engine/join.go b/go/vt/vtgate/engine/join.go index c67a0951b35..1c3adc1f5c9 100644 --- a/go/vt/vtgate/engine/join.go +++ b/go/vt/vtgate/engine/join.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "strings" + "sync/atomic" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -58,8 +59,8 @@ func (jn *Join) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st } result := &sqltypes.Result{} if len(lresult.Rows) == 0 && wantfields { - for k := range jn.Vars { - joinVars[k] = sqltypes.NullBindVariable + for k, col := range jn.Vars { + joinVars[k] = bindvarForType(lresult.Fields[col].Type) } rresult, err := jn.Right.GetFields(ctx, vcursor, combineVars(bindVars, joinVars)) if err != nil { @@ -93,36 +94,57 @@ func (jn *Join) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st return result, nil } +func bindvarForType(t querypb.Type) *querypb.BindVariable { + bv := &querypb.BindVariable{ + Type: t, + Value: nil, + } + switch t { + case querypb.Type_INT8, querypb.Type_UINT8, querypb.Type_INT16, querypb.Type_UINT16, + querypb.Type_INT32, querypb.Type_UINT32, querypb.Type_INT64, querypb.Type_UINT64: + bv.Value = []byte("0") + case querypb.Type_FLOAT32, querypb.Type_FLOAT64: + bv.Value = []byte("0e0") + case querypb.Type_DECIMAL: + bv.Value = []byte("0.0") + default: + return sqltypes.NullBindVariable + } + return bv +} + // TryStreamExecute performs a streaming exec. func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - joinVars := make(map[string]*querypb.BindVariable) - err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error { + var fieldNeeded atomic.Bool + fieldNeeded.Store(wantfields) + err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, fieldNeeded.Load(), func(lresult *sqltypes.Result) error { + joinVars := make(map[string]*querypb.BindVariable) for _, lrow := range lresult.Rows { for k, col := range jn.Vars { joinVars[k] = sqltypes.ValueBindVariable(lrow[col]) } - rowSent := false - err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), wantfields, func(rresult *sqltypes.Result) error { + var rowSent atomic.Bool + err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), fieldNeeded.Load(), func(rresult *sqltypes.Result) error { result := &sqltypes.Result{} - if wantfields { + if fieldNeeded.Load() { // This code is currently unreachable because the first result // will always be just the field info, which will cause the outer // wantfields code path to be executed. But this may change in the future. - wantfields = false + fieldNeeded.Store(false) result.Fields = joinFields(lresult.Fields, rresult.Fields, jn.Cols) } for _, rrow := range rresult.Rows { result.Rows = append(result.Rows, joinRows(lrow, rrow, jn.Cols)) } if len(rresult.Rows) != 0 { - rowSent = true + rowSent.Store(true) } return callback(result) }) if err != nil { return err } - if jn.Opcode == LeftJoin && !rowSent { + if jn.Opcode == LeftJoin && !rowSent.Load() { result := &sqltypes.Result{} result.Rows = [][]sqltypes.Value{joinRows( lrow, @@ -132,8 +154,8 @@ func (jn *Join) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars return callback(result) } } - if wantfields { - wantfields = false + if fieldNeeded.Load() { + fieldNeeded.Store(false) for k := range jn.Vars { joinVars[k] = sqltypes.NullBindVariable } @@ -170,8 +192,8 @@ func (jn *Join) GetFields(ctx context.Context, vcursor VCursor, bindVars map[str } // Inputs returns the input primitives for this join -func (jn *Join) Inputs() []Primitive { - return []Primitive{jn.Left, jn.Right} +func (jn *Join) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{jn.Left, jn.Right}, nil } func joinFields(lfields, rfields []*querypb.Field, cols []int) []*querypb.Field { diff --git a/go/vt/vtgate/engine/join_test.go b/go/vt/vtgate/engine/join_test.go index 50ccb35ac7c..2df507f9512 100644 --- a/go/vt/vtgate/engine/join_test.go +++ b/go/vt/vtgate/engine/join_test.go @@ -237,9 +237,7 @@ func TestJoinExecuteNoResult(t *testing.T) { }, } r, err := jn.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) leftPrim.ExpectLog(t, []string{ `Execute true`, }) diff --git a/go/vt/vtgate/engine/limit.go b/go/vt/vtgate/engine/limit.go index f0de3fe6202..6a66bd56f82 100644 --- a/go/vt/vtgate/engine/limit.go +++ b/go/vt/vtgate/engine/limit.go @@ -55,7 +55,7 @@ func (l *Limit) GetTableName() string { // TryExecute satisfies the Primitive interface. func (l *Limit) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - count, offset, err := l.getCountAndOffset(vcursor, bindVars) + count, offset, err := l.getCountAndOffset(ctx, vcursor, bindVars) if err != nil { return nil, err } @@ -85,7 +85,7 @@ func (l *Limit) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st // TryStreamExecute satisfies the Primitive interface. func (l *Limit) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - count, offset, err := l.getCountAndOffset(vcursor, bindVars) + count, offset, err := l.getCountAndOffset(ctx, vcursor, bindVars) if err != nil { return err } @@ -154,8 +154,8 @@ func (l *Limit) GetFields(ctx context.Context, vcursor VCursor, bindVars map[str } // Inputs returns the input to limit -func (l *Limit) Inputs() []Primitive { - return []Primitive{l.Input} +func (l *Limit) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{l.Input}, nil } // NeedsTransaction implements the Primitive interface. @@ -163,20 +163,20 @@ func (l *Limit) NeedsTransaction() bool { return l.Input.NeedsTransaction() } -func (l *Limit) getCountAndOffset(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (count int, offset int, err error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) - count, err = getIntFrom(env, l.Count) +func (l *Limit) getCountAndOffset(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (count int, offset int, err error) { + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + count, err = getIntFrom(env, vcursor, l.Count) if err != nil { return } - offset, err = getIntFrom(env, l.Offset) + offset, err = getIntFrom(env, vcursor, l.Offset) if err != nil { return } return } -func getIntFrom(env *evalengine.ExpressionEnv, expr evalengine.Expr) (int, error) { +func getIntFrom(env *evalengine.ExpressionEnv, vcursor VCursor, expr evalengine.Expr) (int, error) { if expr == nil { return 0, nil } @@ -184,7 +184,7 @@ func getIntFrom(env *evalengine.ExpressionEnv, expr evalengine.Expr) (int, error if err != nil { return 0, err } - value := evalResult.Value() + value := evalResult.Value(vcursor.ConnCollation()) if value.IsNull() { return 0, nil } diff --git a/go/vt/vtgate/engine/limit_test.go b/go/vt/vtgate/engine/limit_test.go index dcdc43880a0..ba15306685a 100644 --- a/go/vt/vtgate/engine/limit_test.go +++ b/go/vt/vtgate/engine/limit_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vtgate/evalengine" "github.com/stretchr/testify/require" @@ -129,7 +130,7 @@ func TestLimitExecute(t *testing.T) { results: []*sqltypes.Result{inputResult}, } l = &Limit{ - Count: evalengine.NewBindVar("l", collations.TypedCollation{}), + Count: evalengine.NewBindVar("l", sqltypes.Int64, collations.CollationBinaryID), Input: fp, } @@ -342,8 +343,8 @@ func TestLimitOffsetExecute(t *testing.T) { } l = &Limit{ - Count: evalengine.NewBindVar("l", collations.TypedCollation{}), - Offset: evalengine.NewBindVar("o", collations.TypedCollation{}), + Count: evalengine.NewBindVar("l", sqltypes.Int64, collations.CollationBinaryID), + Offset: evalengine.NewBindVar("o", sqltypes.Int64, collations.CollationBinaryID), Input: fp, } result, err = l.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(1), "o": sqltypes.Int64BindVariable(1)}, false) @@ -395,7 +396,7 @@ func TestLimitStreamExecute(t *testing.T) { // Test with bind vars. fp.rewind() - l.Count = evalengine.NewBindVar("l", collations.TypedCollation{}) + l.Count = evalengine.NewBindVar("l", sqltypes.Int64, collations.CollationBinaryID) results = nil err = l.TryStreamExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(2)}, true, func(qr *sqltypes.Result) error { results = append(results, qr) @@ -539,17 +540,17 @@ func TestLimitInputFail(t *testing.T) { func TestLimitInvalidCount(t *testing.T) { l := &Limit{ - Count: evalengine.NewBindVar("l", collations.TypedCollation{}), + Count: evalengine.NewBindVar("l", sqltypes.Int64, collations.CollationBinaryID), } - _, _, err := l.getCountAndOffset(&noopVCursor{}, nil) + _, _, err := l.getCountAndOffset(context.Background(), &noopVCursor{}, nil) assert.EqualError(t, err, "query arguments missing for l") l.Count = evalengine.NewLiteralFloat(1.2) - _, _, err = l.getCountAndOffset(&noopVCursor{}, nil) + _, _, err = l.getCountAndOffset(context.Background(), &noopVCursor{}, nil) assert.EqualError(t, err, "Cannot convert value to desired type") l.Count = evalengine.NewLiteralUint(18446744073709551615) - _, _, err = l.getCountAndOffset(&noopVCursor{}, nil) + _, _, err = l.getCountAndOffset(context.Background(), &noopVCursor{}, nil) assert.EqualError(t, err, "requested limit is out of range: 18446744073709551615") // When going through the API, it should return the same error. diff --git a/go/vt/vtgate/engine/lock.go b/go/vt/vtgate/engine/lock.go index bf3eac73194..c1701f6c166 100644 --- a/go/vt/vtgate/engine/lock.go +++ b/go/vt/vtgate/engine/lock.go @@ -87,7 +87,7 @@ func (l *Lock) execLock(ctx context.Context, vcursor VCursor, bindVars map[strin return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "lock query can be routed to single shard only: %v", rss) } - env := &evalengine.ExpressionEnv{BindVars: bindVars} + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var fields []*querypb.Field var rrow sqltypes.Row for _, lf := range l.LockFunctions { @@ -97,7 +97,7 @@ func (l *Lock) execLock(ctx context.Context, vcursor VCursor, bindVars map[strin if err != nil { return nil, err } - lName = er.Value().ToString() + lName = er.Value(vcursor.ConnCollation()).ToString() } qr, err := lf.execLock(ctx, vcursor, bindVars, rss[0]) if err != nil { diff --git a/go/vt/vtgate/engine/memory_sort.go b/go/vt/vtgate/engine/memory_sort.go index de1b78c9a86..b1770225211 100644 --- a/go/vt/vtgate/engine/memory_sort.go +++ b/go/vt/vtgate/engine/memory_sort.go @@ -68,7 +68,7 @@ func (ms *MemorySort) SetTruncateColumnCount(count int) { // TryExecute satisfies the Primitive interface. func (ms *MemorySort) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - count, err := ms.fetchCount(vcursor, bindVars) + count, err := ms.fetchCount(ctx, vcursor, bindVars) if err != nil { return nil, err } @@ -94,7 +94,7 @@ func (ms *MemorySort) TryExecute(ctx context.Context, vcursor VCursor, bindVars // TryStreamExecute satisfies the Primitive interface. func (ms *MemorySort) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - count, err := ms.fetchCount(vcursor, bindVars) + count, err := ms.fetchCount(ctx, vcursor, bindVars) if err != nil { return err } @@ -150,8 +150,8 @@ func (ms *MemorySort) GetFields(ctx context.Context, vcursor VCursor, bindVars m } // Inputs returns the input to memory sort -func (ms *MemorySort) Inputs() []Primitive { - return []Primitive{ms.Input} +func (ms *MemorySort) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ms.Input}, nil } // NeedsTransaction implements the Primitive interface @@ -159,22 +159,23 @@ func (ms *MemorySort) NeedsTransaction() bool { return ms.Input.NeedsTransaction() } -func (ms *MemorySort) fetchCount(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (int, error) { +func (ms *MemorySort) fetchCount(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (int, error) { if ms.UpperLimit == nil { return math.MaxInt64, nil } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) resolved, err := env.Evaluate(ms.UpperLimit) if err != nil { return 0, err } - if !resolved.Value().IsIntegral() { + value := resolved.Value(vcursor.ConnCollation()) + if !value.IsIntegral() { return 0, sqltypes.ErrIncompatibleTypeCast } - count, err := strconv.Atoi(resolved.Value().RawStr()) + count, err := strconv.Atoi(value.RawStr()) if err != nil || count < 0 { - return 0, fmt.Errorf("requested limit is out of range: %v", resolved.Value().RawStr()) + return 0, fmt.Errorf("requested limit is out of range: %v", value.RawStr()) } return count, nil } diff --git a/go/vt/vtgate/engine/memory_sort_test.go b/go/vt/vtgate/engine/memory_sort_test.go index 93b76876783..3b53ef11250 100644 --- a/go/vt/vtgate/engine/memory_sort_test.go +++ b/go/vt/vtgate/engine/memory_sort_test.go @@ -20,16 +20,14 @@ import ( "context" "testing" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) func init() { @@ -77,7 +75,7 @@ func TestMemorySortExecute(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -138,7 +136,7 @@ func TestMemorySortStreamExecuteWeightString(t *testing.T) { t.Run("Limit test", func(t *testing.T) { fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -196,7 +194,7 @@ func TestMemorySortExecuteWeightString(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -231,6 +229,7 @@ func TestMemorySortStreamExecuteCollation(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderByParams{{ Col: 0, + Type: sqltypes.VarChar, CollationID: collationID, }}, Input: fp, @@ -279,7 +278,7 @@ func TestMemorySortStreamExecuteCollation(t *testing.T) { t.Run("Limit test", func(t *testing.T) { fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -319,6 +318,7 @@ func TestMemorySortExecuteCollation(t *testing.T) { ms := &MemorySort{ OrderBy: []OrderByParams{{ Col: 0, + Type: sqltypes.VarChar, CollationID: collationID, }}, Input: fp, @@ -338,7 +338,7 @@ func TestMemorySortExecuteCollation(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -395,7 +395,7 @@ func TestMemorySortStreamExecute(t *testing.T) { utils.MustMatch(t, wantResults, results) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -554,7 +554,7 @@ func TestMemorySortMultiColumn(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) diff --git a/go/vt/vtgate/engine/merge_sort.go b/go/vt/vtgate/engine/merge_sort.go index 1ff4ca7e736..6c694ae9e37 100644 --- a/go/vt/vtgate/engine/merge_sort.go +++ b/go/vt/vtgate/engine/merge_sort.go @@ -21,7 +21,7 @@ import ( "context" "io" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" @@ -167,7 +167,7 @@ func (ms *MergeSort) TryStreamExecute(ctx context.Context, vcursor VCursor, bind if err != nil && ms.ScatterErrorsAsWarnings && len(errs) < len(handles) { // we got errors, but not all shards failed, so we can hide the error and just warn instead partialSuccessScatterQueries.Add(1) - sErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(sErr.Num), Message: err.Error()}) return nil } diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index c72f31c0993..e8823e9e6d5 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -180,6 +180,7 @@ func TestMergeSortCollation(t *testing.T) { collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") orderBy := []OrderByParams{{ Col: 0, + Type: sqltypes.VarChar, CollationID: collationID, }} @@ -369,7 +370,7 @@ func TestMergeSortDataFailures(t *testing.T) { }} err := testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) - want := `strconv.ParseInt: parsing "2.1": invalid syntax` + want := `unparsed tail left after parsing int64 from "2.1": ".1"` require.EqualError(t, err, want) // Create a new VCursor because the previous MergeSort will still @@ -385,7 +386,7 @@ func TestMergeSortDataFailures(t *testing.T) { ), }} err = testMergeSort(shardResults, orderBy, func(qr *sqltypes.Result) error { return nil }) - want = `strconv.ParseInt: parsing "1.1": invalid syntax` + want = `unparsed tail left after parsing int64 from "1.1": ".1"` require.EqualError(t, err, want) } diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index 67290103285..c972fee66e9 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -76,14 +77,20 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma result = &sqltypes.Result{ Fields: []*querypb.Field{ { - Name: "uuid", - Type: sqltypes.VarChar, + Name: "uuid", + Type: sqltypes.VarChar, + Charset: uint32(collations.Default()), }, }, Rows: [][]sqltypes.Value{}, } + migrationContext := vcursor.Session().GetMigrationContext() + if migrationContext == "" { + // default to @@session_uuid + migrationContext = fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()) + } onlineDDLs, err := schema.NewOnlineDDLs(v.GetKeyspaceName(), v.SQL, v.DDL, - v.DDLStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "", + v.DDLStrategySetting, migrationContext, "", ) if err != nil { return result, err diff --git a/go/vt/vtgate/engine/opcode/constants.go b/go/vt/vtgate/engine/opcode/constants.go new file mode 100644 index 00000000000..b8df30ff01b --- /dev/null +++ b/go/vt/vtgate/engine/opcode/constants.go @@ -0,0 +1,172 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opcode + +import ( + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// PulloutOpcode is a number representing the opcode +// for the PulloutSubquery primitive. +type PulloutOpcode int + +// This is the list of PulloutOpcode values. +const ( + PulloutValue = PulloutOpcode(iota) + PulloutIn + PulloutNotIn + PulloutExists +) + +var pulloutName = map[PulloutOpcode]string{ + PulloutValue: "PulloutValue", + PulloutIn: "PulloutIn", + PulloutNotIn: "PulloutNotIn", + PulloutExists: "PulloutExists", +} + +func (code PulloutOpcode) String() string { + return pulloutName[code] +} + +// MarshalJSON serializes the PulloutOpcode as a JSON string. +// It's used for testing and diagnostics. +func (code PulloutOpcode) MarshalJSON() ([]byte, error) { + return ([]byte)(fmt.Sprintf("\"%s\"", code.String())), nil +} + +// AggregateOpcode is the aggregation Opcode. +type AggregateOpcode int + +// These constants list the possible aggregate opcodes. +const ( + AggregateUnassigned = AggregateOpcode(iota) + AggregateCount + AggregateSum + AggregateMin + AggregateMax + AggregateCountDistinct + AggregateSumDistinct + AggregateGtid + AggregateAnyValue + AggregateCountStar + AggregateGroupConcat + _NumOfOpCodes // This line must be last of the opcodes! +) + +var ( + // OpcodeType keeps track of the known output types for different aggregate functions + OpcodeType = map[AggregateOpcode]querypb.Type{ + AggregateCountDistinct: sqltypes.Int64, + AggregateCount: sqltypes.Int64, + AggregateCountStar: sqltypes.Int64, + AggregateSumDistinct: sqltypes.Decimal, + AggregateSum: sqltypes.Decimal, + AggregateGtid: sqltypes.VarChar, + } +) + +// SupportedAggregates maps the list of supported aggregate +// functions to their opcodes. +var SupportedAggregates = map[string]AggregateOpcode{ + "count": AggregateCount, + "sum": AggregateSum, + "min": AggregateMin, + "max": AggregateMax, + // These functions don't exist in mysql, but are used + // to display the plan. + "count_distinct": AggregateCountDistinct, + "sum_distinct": AggregateSumDistinct, + "vgtid": AggregateGtid, + "count_star": AggregateCountStar, + "any_value": AggregateAnyValue, + "group_concat": AggregateGroupConcat, +} + +var AggregateName = map[AggregateOpcode]string{ + AggregateCount: "count", + AggregateSum: "sum", + AggregateMin: "min", + AggregateMax: "max", + AggregateCountDistinct: "count_distinct", + AggregateSumDistinct: "sum_distinct", + AggregateGtid: "vgtid", + AggregateCountStar: "count_star", + AggregateGroupConcat: "group_concat", + AggregateAnyValue: "any_value", +} + +func (code AggregateOpcode) String() string { + name := AggregateName[code] + if name == "" { + name = "ERROR" + } + return name +} + +// MarshalJSON serializes the AggregateOpcode as a JSON string. +// It's used for testing and diagnostics. +func (code AggregateOpcode) MarshalJSON() ([]byte, error) { + return ([]byte)(fmt.Sprintf("\"%s\"", code.String())), nil +} + +// Type returns the opcode return sql type, and a bool telling is we are sure about this type or not +func (code AggregateOpcode) Type(typ querypb.Type) querypb.Type { + switch code { + case AggregateUnassigned: + return sqltypes.Null + case AggregateGroupConcat: + if sqltypes.IsBinary(typ) { + return sqltypes.Blob + } + return sqltypes.Text + case AggregateMax, AggregateMin, AggregateAnyValue: + return typ + case AggregateSumDistinct, AggregateSum: + if sqltypes.IsIntegral(typ) || sqltypes.IsDecimal(typ) { + return sqltypes.Decimal + } + return sqltypes.Float64 + case AggregateCount, AggregateCountStar, AggregateCountDistinct: + return sqltypes.Int64 + case AggregateGtid: + return sqltypes.VarChar + default: + panic(code.String()) // we have a unit test checking we never reach here + } +} + +func (code AggregateOpcode) NeedsComparableValues() bool { + switch code { + case AggregateCountDistinct, AggregateSumDistinct, AggregateMin, AggregateMax: + return true + default: + return false + } +} + +func (code AggregateOpcode) IsDistinct() bool { + switch code { + case AggregateCountDistinct, AggregateSumDistinct: + return true + default: + return false + } +} diff --git a/tools/coverage-go/vttablet_test.go b/go/vt/vtgate/engine/opcode/constants_test.go similarity index 64% rename from tools/coverage-go/vttablet_test.go rename to go/vt/vtgate/engine/opcode/constants_test.go index 2fc3525ad11..50cfc49a71c 100644 --- a/tools/coverage-go/vttablet_test.go +++ b/go/vt/vtgate/engine/opcode/constants_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package main +package opcode import ( "testing" + + "vitess.io/vitess/go/sqltypes" ) -func TestVttablet(t *testing.T) { - main() +func TestCheckAllAggrOpCodes(t *testing.T) { + // This test is just checking that we never reach the panic when using Type() on valid opcodes + for i := AggregateOpcode(0); i < _NumOfOpCodes; i++ { + i.Type(sqltypes.Null) + } } diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index e5d3057a127..acb958199d0 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -22,14 +22,17 @@ import ( "strconv" "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" +) - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" +var ( + // Some predefined values + countZero = sqltypes.MakeTrusted(sqltypes.Int64, []byte("0")) + countOne = sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) + sumZero = sqltypes.MakeTrusted(sqltypes.Decimal, []byte("0")) ) var _ Primitive = (*OrderedAggregate)(nil) @@ -40,15 +43,10 @@ var _ Primitive = (*OrderedAggregate)(nil) // is that the underlying primitive is a scatter select with pre-sorted // rows. type OrderedAggregate struct { - // PreProcess is true if one of the aggregates needs preprocessing. - PreProcess bool `json:",omitempty"` - // Aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. Aggregates []*AggregateParams - AggrOnEngine bool - // GroupByKeys specifies the input values that must be used for // the aggregation key. GroupByKeys []*GroupByParams @@ -58,10 +56,6 @@ type OrderedAggregate struct { // from the result received. If 0, no truncation happens. TruncateColumnCount int `json:",omitempty"` - // Collations stores the collation ID per column offset. - // It is used for grouping keys and distinct aggregate functions - Collations map[int]collations.ID - // Input is the primitive that will feed into this Primitive. Input Primitive } @@ -72,6 +66,7 @@ type GroupByParams struct { WeightStringCol int Expr sqlparser.Expr FromGroupBy bool + Type sqltypes.Type CollationID collations.ID } @@ -84,125 +79,13 @@ func (gbp GroupByParams) String() string { out = fmt.Sprintf("(%d|%d)", gbp.KeyCol, gbp.WeightStringCol) } - if gbp.CollationID != collations.Unknown { - collation := gbp.CollationID.Get() - out += " COLLATE " + collation.Name() + if sqltypes.IsText(gbp.Type) && gbp.CollationID != collations.Unknown { + out += " COLLATE " + collations.Local().LookupName(gbp.CollationID) } return out } -// AggregateParams specify the parameters for each aggregation. -// It contains the opcode and input column number. -type AggregateParams struct { - Opcode AggregateOpcode - Col int - - // These are used only for distinct opcodes. - KeyCol int - WCol int - WAssigned bool - CollationID collations.ID - - Alias string `json:",omitempty"` - Expr sqlparser.Expr - Original *sqlparser.AliasedExpr - - // This is based on the function passed in the select expression and - // not what we use to aggregate at the engine primitive level. - OrigOpcode AggregateOpcode -} - -func (ap *AggregateParams) isDistinct() bool { - return ap.Opcode == AggregateCountDistinct || ap.Opcode == AggregateSumDistinct -} - -func (ap *AggregateParams) preProcess() bool { - return ap.Opcode == AggregateCountDistinct || ap.Opcode == AggregateSumDistinct || ap.Opcode == AggregateGtid || ap.Opcode == AggregateCount -} - -func (ap *AggregateParams) String() string { - keyCol := strconv.Itoa(ap.Col) - if ap.WAssigned { - keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) - } - if ap.CollationID != collations.Unknown { - keyCol += " COLLATE " + ap.CollationID.Get().Name() - } - dispOrigOp := "" - if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { - dispOrigOp = "_" + ap.OrigOpcode.String() - } - if ap.Alias != "" { - return fmt.Sprintf("%s%s(%s) AS %s", ap.Opcode.String(), dispOrigOp, keyCol, ap.Alias) - } - return fmt.Sprintf("%s%s(%s)", ap.Opcode.String(), dispOrigOp, keyCol) -} - -// AggregateOpcode is the aggregation Opcode. -type AggregateOpcode int - -// These constants list the possible aggregate opcodes. -const ( - AggregateUnassigned = AggregateOpcode(iota) - AggregateCount - AggregateSum - AggregateMin - AggregateMax - AggregateCountDistinct - AggregateSumDistinct - AggregateGtid - AggregateRandom - AggregateCountStar -) - -var ( - // OpcodeType keeps track of the known output types for different aggregate functions - OpcodeType = map[AggregateOpcode]querypb.Type{ - AggregateCountDistinct: sqltypes.Int64, - AggregateCount: sqltypes.Int64, - AggregateCountStar: sqltypes.Int64, - AggregateSumDistinct: sqltypes.Decimal, - AggregateSum: sqltypes.Decimal, - AggregateGtid: sqltypes.VarChar, - } - // Some predefined values - countZero = sqltypes.MakeTrusted(sqltypes.Int64, []byte("0")) - countOne = sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) - sumZero = sqltypes.MakeTrusted(sqltypes.Decimal, []byte("0")) -) - -// SupportedAggregates maps the list of supported aggregate -// functions to their opcodes. -var SupportedAggregates = map[string]AggregateOpcode{ - "count": AggregateCount, - "sum": AggregateSum, - "min": AggregateMin, - "max": AggregateMax, - // These functions don't exist in mysql, but are used - // to display the plan. - "count_distinct": AggregateCountDistinct, - "sum_distinct": AggregateSumDistinct, - "vgtid": AggregateGtid, - "count_star": AggregateCountStar, - "random": AggregateRandom, -} - -func (code AggregateOpcode) String() string { - for k, v := range SupportedAggregates { - if v == code { - return k - } - } - return "ERROR" -} - -// MarshalJSON serializes the AggregateOpcode as a JSON string. -// It's used for testing and diagnostics. -func (code AggregateOpcode) MarshalJSON() ([]byte, error) { - return ([]byte)(fmt.Sprintf("\"%s\"", code.String())), nil -} - // RouteType returns a description of the query routing type used by the primitive func (oa *OrderedAggregate) RouteType() string { return oa.Input.RouteType() @@ -224,211 +107,141 @@ func (oa *OrderedAggregate) SetTruncateColumnCount(count int) { } // TryExecute is a Primitive function. -func (oa *OrderedAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - qr, err := oa.execute(ctx, vcursor, bindVars, wantfields) +func (oa *OrderedAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + qr, err := oa.execute(ctx, vcursor, bindVars) if err != nil { return nil, err } return qr.Truncate(oa.TruncateColumnCount), nil } -func (oa *OrderedAggregate) execute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - result, err := vcursor.ExecutePrimitive(ctx, oa.Input, bindVars, wantfields) +func (oa *OrderedAggregate) execute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + result, err := vcursor.ExecutePrimitive( + ctx, + oa.Input, + bindVars, + true, /*wantFields - we need the input fields types to correctly calculate the output types*/ + ) + if err != nil { + return nil, err + } + agg, fields, err := newAggregation(result.Fields, oa.Aggregates) if err != nil { return nil, err } + out := &sqltypes.Result{ - Fields: convertFields(result.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine), + Fields: fields, Rows: make([][]sqltypes.Value, 0, len(result.Rows)), } - // This code is similar to the one in StreamExecute. - var current []sqltypes.Value - var curDistincts []sqltypes.Value + + var currentKey []sqltypes.Value for _, row := range result.Rows { - if current == nil { - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - continue - } - equal, err := oa.keysEqual(current, row, oa.Collations) + var nextGroup bool + + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) if err != nil { return nil, err } - if equal { - current, curDistincts, err = merge(result.Fields, current, row, curDistincts, oa.Collations, oa.Aggregates) - if err != nil { - return nil, err - } - continue + if nextGroup { + out.Rows = append(out.Rows, agg.finish()) + agg.reset() } - out.Rows = append(out.Rows, current) - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - } - if current != nil { - final, err := convertFinal(current, oa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return nil, err } - out.Rows = append(out.Rows, final) } + + if currentKey != nil { + out.Rows = append(out.Rows, agg.finish()) + } + return out, nil } // TryStreamExecute is a Primitive function. -func (oa *OrderedAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - var current []sqltypes.Value - var curDistincts []sqltypes.Value - var fields []*querypb.Field - +func (oa *OrderedAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool, callback func(*sqltypes.Result) error) error { cb := func(qr *sqltypes.Result) error { return callback(qr.Truncate(oa.TruncateColumnCount)) } - err := vcursor.StreamExecutePrimitive(ctx, oa.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { - if len(qr.Fields) != 0 { - fields = convertFields(qr.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - if err := cb(&sqltypes.Result{Fields: fields}); err != nil { + var agg aggregationState + var fields []*querypb.Field + var currentKey []sqltypes.Value + + visitor := func(qr *sqltypes.Result) error { + var err error + + if agg == nil && len(qr.Fields) != 0 { + agg, fields, err = newAggregation(qr.Fields, oa.Aggregates) + if err != nil { + return err + } + if err = cb(&sqltypes.Result{Fields: fields}); err != nil { return err } } + // This code is similar to the one in Execute. for _, row := range qr.Rows { - if current == nil { - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) - continue - } + var nextGroup bool - equal, err := oa.keysEqual(current, row, oa.Collations) + currentKey, nextGroup, err = oa.nextGroupBy(currentKey, row) if err != nil { return err } - if equal { - current, curDistincts, err = merge(fields, current, row, curDistincts, oa.Collations, oa.Aggregates) - if err != nil { + if nextGroup { + // this is a new grouping. let's yield the old one, and start a new + if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}); err != nil { return err } - continue + + agg.reset() } - if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}); err != nil { + + if err := agg.add(row); err != nil { return err } - current, curDistincts = convertRow(row, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine) } return nil - }) + } + + /* we need the input fields types to correctly calculate the output types */ + err := vcursor.StreamExecutePrimitive(ctx, oa.Input, bindVars, true, visitor) if err != nil { return err } - if current != nil { - if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}); err != nil { + if currentKey != nil { + if err := cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}); err != nil { return err } } return nil } -func convertFields(fields []*querypb.Field, preProcess bool, aggrs []*AggregateParams, aggrOnEngine bool) []*querypb.Field { - if !preProcess { - return fields - } - for _, aggr := range aggrs { - if !aggr.preProcess() && !aggrOnEngine { - continue - } - fields[aggr.Col] = &querypb.Field{ - Name: aggr.Alias, - Type: OpcodeType[aggr.Opcode], - } - if aggr.isDistinct() { - aggr.KeyCol = aggr.Col - } - } - return fields -} - -func convertRow(row []sqltypes.Value, preProcess bool, aggregates []*AggregateParams, aggrOnEngine bool) (newRow []sqltypes.Value, curDistincts []sqltypes.Value) { - if !preProcess { - return row, nil - } - newRow = append(newRow, row...) - curDistincts = make([]sqltypes.Value, len(aggregates)) - for index, aggr := range aggregates { - switch aggr.Opcode { - case AggregateCountStar: - newRow[aggr.Col] = countOne - case AggregateCount: - val := countOne - if row[aggr.Col].IsNull() { - val = countZero - } - newRow[aggr.Col] = val - case AggregateCountDistinct: - curDistincts[index] = findComparableCurrentDistinct(row, aggr) - // Type is int64. Ok to call MakeTrusted. - if row[aggr.KeyCol].IsNull() { - newRow[aggr.Col] = countZero - } else { - newRow[aggr.Col] = countOne - } - case AggregateSum: - if !aggrOnEngine { - break - } - if row[aggr.Col].IsNull() { - break - } - var err error - newRow[aggr.Col], err = evalengine.Cast(row[aggr.Col], OpcodeType[aggr.Opcode]) - if err != nil { - newRow[aggr.Col] = sumZero - } - case AggregateSumDistinct: - curDistincts[index] = findComparableCurrentDistinct(row, aggr) - var err error - newRow[aggr.Col], err = evalengine.Cast(row[aggr.Col], OpcodeType[aggr.Opcode]) - if err != nil { - newRow[aggr.Col] = sumZero - } - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: row[aggr.Col-1].ToString(), - Shard: row[aggr.Col+1].ToString(), - Gtid: row[aggr.Col].ToString(), - }) - data, _ := vgtid.MarshalVT() - val, _ := sqltypes.NewValue(sqltypes.VarBinary, data) - newRow[aggr.Col] = val - } - } - return newRow, curDistincts -} - -func findComparableCurrentDistinct(row []sqltypes.Value, aggr *AggregateParams) sqltypes.Value { - curDistinct := row[aggr.KeyCol] - if aggr.WAssigned && !curDistinct.IsComparable() { - aggr.KeyCol = aggr.WCol - curDistinct = row[aggr.KeyCol] - } - return curDistinct -} - // GetFields is a Primitive function. func (oa *OrderedAggregate) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { qr, err := oa.Input.GetFields(ctx, vcursor, bindVars) if err != nil { return nil, err } - qr = &sqltypes.Result{Fields: convertFields(qr.Fields, oa.PreProcess, oa.Aggregates, oa.AggrOnEngine)} + + _, fields, err := newAggregation(qr.Fields, oa.Aggregates) + if err != nil { + return nil, err + } + + qr = &sqltypes.Result{Fields: fields} return qr.Truncate(oa.TruncateColumnCount), nil } // Inputs returns the Primitive input for this aggregation -func (oa *OrderedAggregate) Inputs() []Primitive { - return []Primitive{oa.Input} +func (oa *OrderedAggregate) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{oa.Input}, nil } // NeedsTransaction implements the Primitive interface @@ -436,107 +249,31 @@ func (oa *OrderedAggregate) NeedsTransaction() bool { return oa.Input.NeedsTransaction() } -func (oa *OrderedAggregate) keysEqual(row1, row2 []sqltypes.Value, colls map[int]collations.ID) (bool, error) { - for _, key := range oa.GroupByKeys { - cmp, err := evalengine.NullsafeCompare(row1[key.KeyCol], row2[key.KeyCol], colls[key.KeyCol]) +func (oa *OrderedAggregate) nextGroupBy(currentKey, nextRow []sqltypes.Value) (nextKey []sqltypes.Value, nextGroup bool, err error) { + if currentKey == nil { + return nextRow, false, nil + } + + for _, gb := range oa.GroupByKeys { + cmp, err := evalengine.NullsafeCompare(currentKey[gb.KeyCol], nextRow[gb.KeyCol], gb.CollationID) if err != nil { _, isComparisonErr := err.(evalengine.UnsupportedComparisonError) _, isCollationErr := err.(evalengine.UnsupportedCollationError) - if !isComparisonErr && !isCollationErr || key.WeightStringCol == -1 { - return false, err + if !isComparisonErr && !isCollationErr || gb.WeightStringCol == -1 { + return nil, false, err } - key.KeyCol = key.WeightStringCol - cmp, err = evalengine.NullsafeCompare(row1[key.WeightStringCol], row2[key.WeightStringCol], colls[key.KeyCol]) + gb.KeyCol = gb.WeightStringCol + cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], gb.CollationID) if err != nil { - return false, err + return nil, false, err } } if cmp != 0 { - return false, nil - } - } - return true, nil -} - -func merge( - fields []*querypb.Field, - row1, row2 []sqltypes.Value, - curDistincts []sqltypes.Value, - colls map[int]collations.ID, - aggregates []*AggregateParams, -) ([]sqltypes.Value, []sqltypes.Value, error) { - result := sqltypes.CopyRow(row1) - for index, aggr := range aggregates { - if aggr.isDistinct() { - if row2[aggr.KeyCol].IsNull() { - continue - } - cmp, err := evalengine.NullsafeCompare(curDistincts[index], row2[aggr.KeyCol], colls[aggr.KeyCol]) - if err != nil { - return nil, nil, err - } - if cmp == 0 { - continue - } - curDistincts[index] = findComparableCurrentDistinct(row2, aggr) - } - var err error - switch aggr.Opcode { - case AggregateCountStar: - value := row1[aggr.Col] - result[aggr.Col], err = evalengine.NullSafeAdd(value, countOne, fields[aggr.Col].Type) - case AggregateCount: - val := countOne - if row2[aggr.Col].IsNull() { - val = countZero - } - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], val, fields[aggr.Col].Type) - case AggregateSum: - value := row1[aggr.Col] - v2 := row2[aggr.Col] - if value.IsNull() && v2.IsNull() { - result[aggr.Col] = sqltypes.NULL - break - } - result[aggr.Col], err = evalengine.NullSafeAdd(value, v2, fields[aggr.Col].Type) - case AggregateMin: - result[aggr.Col], err = evalengine.Min(row1[aggr.Col], row2[aggr.Col], colls[aggr.Col]) - case AggregateMax: - result[aggr.Col], err = evalengine.Max(row1[aggr.Col], row2[aggr.Col], colls[aggr.Col]) - case AggregateCountDistinct: - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], countOne, OpcodeType[aggr.Opcode]) - case AggregateSumDistinct: - result[aggr.Col], err = evalengine.NullSafeAdd(row1[aggr.Col], row2[aggr.Col], OpcodeType[aggr.Opcode]) - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - rowBytes, err := row1[aggr.Col].ToBytes() - if err != nil { - return nil, nil, err - } - err = vgtid.UnmarshalVT(rowBytes) - if err != nil { - return nil, nil, err - } - vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: row2[aggr.Col-1].ToString(), - Shard: row2[aggr.Col+1].ToString(), - Gtid: row2[aggr.Col].ToString(), - }) - data, _ := vgtid.MarshalVT() - val, _ := sqltypes.NewValue(sqltypes.VarBinary, data) - result[aggr.Col] = val - case AggregateRandom: - // we just grab the first value per grouping. no need to do anything more complicated here - default: - return nil, nil, fmt.Errorf("BUG: Unexpected opcode: %v", aggr.Opcode) - } - if err != nil { - return nil, nil, err + return nextRow, true, nil } } - return result, curDistincts, nil + return currentKey, false, nil } - func aggregateParamsToString(in any) string { return in.(*AggregateParams).String() } @@ -561,23 +298,3 @@ func (oa *OrderedAggregate) description() PrimitiveDescription { Other: other, } } - -func convertFinal(current []sqltypes.Value, aggregates []*AggregateParams) ([]sqltypes.Value, error) { - result := sqltypes.CopyRow(current) - for _, aggr := range aggregates { - switch aggr.Opcode { - case AggregateGtid: - vgtid := &binlogdatapb.VGtid{} - currentBytes, err := current[aggr.Col].ToBytes() - if err != nil { - return nil, err - } - err = vgtid.UnmarshalVT(currentBytes) - if err != nil { - return nil, err - } - result[aggr.Col] = sqltypes.NewVarChar(vgtid.String()) - } - } - return result, nil -} diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index a24ba88fd5e..8aa0bf3c3b4 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -28,10 +28,10 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/servenv" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) var collationEnv *collations.Environment @@ -44,7 +44,6 @@ func init() { } func TestOrderedAggregateExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -61,16 +60,13 @@ func TestOrderedAggregateExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -78,16 +74,15 @@ func TestOrderedAggregateExecute(t *testing.T) { "b|2", "c|7", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateExecuteTruncate(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)|weight_string(col)", - "varchar|decimal|varbinary", + "varchar|int64|varbinary", ), "a|1|A", "A|1|A", @@ -97,33 +92,59 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateSum, 1, "") + aggr.OrigOpcode = AggregateCountStar + oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", - "varchar|decimal", + "varchar|int64", ), "a|2", "b|2", "C|7", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) +} + +func TestMinMaxFailsCorrectly(t *testing.T) { + fp := &fakePrimitive{ + results: []*sqltypes.Result{sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col|weight_string(col)", + "varchar|varbinary", + ), + "a|A", + "A|A", + "b|B", + "C|C", + "c|C", + )}, + } + + aggr := NewAggregateParam(AggregateMax, 0, "") + aggr.WCol = 1 + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{aggr}, + TruncateColumnCount: 1, + Input: fp, + } + + _, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + assert.ErrorContains(t, err, "min/max on types that are not comparable is not supported") } func TestOrderedAggregateStreamExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varbinary|decimal", @@ -140,10 +161,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -153,7 +171,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( fields, @@ -163,11 +181,10 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { "---", "c|7", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -183,10 +200,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, @@ -197,7 +211,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -210,11 +224,10 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { "---", "C|7", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateGetFields(t *testing.T) { - assert := assert.New(t) input := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col|count(*)", @@ -226,34 +239,8 @@ func TestOrderedAggregateGetFields(t *testing.T) { oa := &OrderedAggregate{Input: fp} got, err := oa.GetFields(context.Background(), nil, nil) - assert.NoError(err) - assert.Equal(got, input) -} - -func TestOrderedAggregateGetFieldsTruncate(t *testing.T) { - assert := assert.New(t) - result := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col|count(*)|weight_string(col)", - "varchar|decimal|varbinary", - ), - ) - fp := &fakePrimitive{results: []*sqltypes.Result{result}} - - oa := &OrderedAggregate{ - TruncateColumnCount: 2, - Input: fp, - } - - got, err := oa.GetFields(context.Background(), nil, nil) - assert.NoError(err) - wantResult := sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col|count(*)", - "varchar|decimal", - ), - ) - assert.Equal(wantResult, got) + assert.NoError(t, err) + assert.Equal(t, got, input) } func TestOrderedAggregateInputFail(t *testing.T) { @@ -278,7 +265,6 @@ func TestOrderedAggregateInputFail(t *testing.T) { } func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -317,23 +303,17 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { )}, } + aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)") + aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{aggr1, aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -350,11 +330,10 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { "h|3|4", "i|2|2", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateStreamCountDistinct(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -393,17 +372,13 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { )}, } + aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2.OrigOpcode = AggregateCountDistinct + oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)"), + aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -413,7 +388,7 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) wantResults := sqltypes.MakeTestStreamingResults( sqltypes.MakeTestFields( @@ -438,11 +413,10 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { "-----", "i|2|2", ) - assert.Equal(wantResults, results) + utils.MustMatch(t, wantResults, results) } func TestOrderedAggregateSumDistinctGood(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -483,22 +457,16 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - Alias: "sum(distinct col2)", - }, { - // Also add a count(*) - Opcode: AggregateSum, - Col: 2, - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)"), + NewAggregateParam(AggregateSum, 2, ""), + }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -517,7 +485,7 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { ) want := fmt.Sprintf("%v", wantResult.Rows) got := fmt.Sprintf("%v", result.Rows) - assert.Equal(want, got) + assert.Equal(t, want, got) } func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { @@ -534,12 +502,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - Alias: "sum(distinct col2)", - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -550,7 +513,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|sum(distinct col2)", - "varbinary|decimal", + "varbinary|float64", ), "a|1", ) @@ -571,10 +534,7 @@ func TestOrderedAggregateKeysFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -604,10 +564,7 @@ func TestOrderedAggregateMergeFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -641,43 +598,6 @@ func TestOrderedAggregateMergeFail(t *testing.T) { require.NoError(t, err) } -func TestMerge(t *testing.T) { - assert := assert.New(t) - oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }, { - Opcode: AggregateSum, - Col: 2, - }, { - Opcode: AggregateMin, - Col: 3, - }, { - Opcode: AggregateMax, - Col: 4, - }}, - } - fields := sqltypes.MakeTestFields( - "a|b|c|d|e", - "int64|int64|decimal|in32|varbinary", - ) - r := sqltypes.MakeTestResult(fields, - "1|2|3.2|3|ab", - "1|3|2.8|2|bc", - ) - - merged, _, err := merge(fields, r.Rows[0], r.Rows[1], nil, nil, oa.Aggregates) - assert.NoError(err) - want := sqltypes.MakeTestResult(fields, "1|5|6.0|2|bc").Rows[0] - assert.Equal(want, merged) - - // swap and retry - merged, _, err = merge(fields, r.Rows[1], r.Rows[0], nil, nil, oa.Aggregates) - assert.NoError(err) - assert.Equal(want, merged) -} - func TestOrderedAggregateExecuteGtid(t *testing.T) { vgtid := binlogdatapb.VGtid{} vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{ @@ -705,12 +625,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateGtid, - Col: 1, - Alias: "vgtid", - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid")}, TruncateColumnCount: 2, Input: fp, } @@ -725,7 +640,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { ), `ks|shard_gtids:{keyspace:"ks" shard:"-40" gtid:"a"} shard_gtids:{keyspace:"ks" shard:"40-80" gtid:"b"} shard_gtids:{keyspace:"ks" shard:"80-c0" gtid:"c"} shard_gtids:{keyspace:"ks" shard:"c0-" gtid:"d"}`, ) - assert.Equal(t, wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestCountDistinctOnVarchar(t *testing.T) { @@ -743,15 +658,10 @@ func TestCountDistinctOnVarchar(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "count(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -768,7 +678,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -780,7 +690,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestCountDistinctOnVarcharWithNulls(t *testing.T) { @@ -808,15 +718,10 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "count(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -835,7 +740,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -847,7 +752,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestSumDistinctOnVarcharWithNulls(t *testing.T) { @@ -875,15 +780,10 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { )}, } + aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)") + aggr.WCol = 2 oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateSumDistinct, - Col: 1, - WCol: 2, - WAssigned: true, - Alias: "sum(distinct c2)", - }}, + Aggregates: []*AggregateParams{aggr}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, TruncateColumnCount: 2, @@ -892,7 +792,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { want := sqltypes.MakeTestResult( sqltypes.MakeTestFields( "c1|sum(distinct c2)", - "int64|decimal", + "int64|float64", ), `null|0`, `10|0`, @@ -902,7 +802,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -914,7 +814,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestMultiDistinct(t *testing.T) { @@ -945,16 +845,10 @@ func TestMultiDistinct(t *testing.T) { } oa := &OrderedAggregate{ - PreProcess: true, - Aggregates: []*AggregateParams{{ - Opcode: AggregateCountDistinct, - Col: 1, - Alias: "count(distinct c2)", - }, { - Opcode: AggregateSumDistinct, - Col: 2, - Alias: "sum(distinct c3)", - }}, + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)"), + NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)"), + }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -973,7 +867,7 @@ func TestMultiDistinct(t *testing.T) { qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) require.NoError(t, err) - assert.Equal(t, want, qr) + utils.MustMatch(t, want, qr) fp.rewind() results := &sqltypes.Result{} @@ -985,11 +879,10 @@ func TestMultiDistinct(t *testing.T) { return nil }) require.NoError(t, err) - assert.Equal(t, want, results) + utils.MustMatch(t, want, results) } func TestOrderedAggregateCollate(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1011,17 +904,13 @@ func TestOrderedAggregateCollate(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_ai_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, Input: fp, - Collations: map[int]collations.ID{0: collationID}, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1030,11 +919,10 @@ func TestOrderedAggregateCollate(t *testing.T) { "c|7", "ß|13", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateCollateAS(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1054,17 +942,13 @@ func TestOrderedAggregateCollateAS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_as_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, - Collations: map[int]collations.ID{0: collationID}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1074,11 +958,10 @@ func TestOrderedAggregateCollateAS(t *testing.T) { "c|7", "Ç|4", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) } func TestOrderedAggregateCollateKS(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|count(*)", "varchar|decimal", @@ -1099,17 +982,13 @@ func TestOrderedAggregateCollateKS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_ja_0900_as_cs_ks") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{{ - Opcode: AggregateSum, - Col: 1, - }}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, GroupByKeys: []*GroupByParams{{KeyCol: 0, CollationID: collationID}}, - Collations: map[int]collations.ID{0: collationID}, Input: fp, } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( fields, @@ -1121,5 +1000,172 @@ func TestOrderedAggregateCollateKS(t *testing.T) { "\xE3\x83\x8F\xE3\x81\xAF|2", "\xE3\x83\x8F\xE3\x83\x8F|1", ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) +} + +// TestGroupConcatWithAggrOnEngine tests group_concat with full aggregation on engine. +func TestGroupConcatWithAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|c2", + "int64|varchar", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c1|c2", + "int64|varbinary", + ) + + textOutFields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|text", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "multiple grouping keys", + inputResult: sqltypes.MakeTestResult(fields, + "10|a", "10|a", "10|b", + "20|b", + "30|null", + "40|null", "40|c", + "50|d", "50|null", "50|a", "50|", "50|"), + expResult: sqltypes.MakeTestResult(textOutFields, + `10|a,a,b`, + `20|b`, + `30|null`, + `40|c`, + `50|d,a,,`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(textOutFields), + }, { + name: "null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "42|null", "42|null", "42|null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `42|null`), + }, { + name: "concat on varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "42|a", "42|b", "42|c"), + expResult: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|blob", + ), + `42|a,b,c`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &OrderedAggregate{ + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)")}, + GroupByKeys: []*GroupByParams{{KeyCol: 0}}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + if len(qr.Rows) == 0 { + qr.Rows = nil // just to make the expectation. + // empty slice or nil both are valid and will not cause any issue. + } + utils.MustMatch(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, results) + }) + } +} + +// TestGroupConcat tests group_concat with partial aggregation on engine. +func TestGroupConcat(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|text", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c1|group_concat(c2)", + "int64|blob", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "multiple grouping keys", + inputResult: sqltypes.MakeTestResult(fields, + "10|a", "10|a", "10|b", + "20|b", + "30|null", + "40|null", "40|c", + "50|d", "50|null", "50|a", "50|", "50|"), + expResult: sqltypes.MakeTestResult(fields, + `10|a,a,b`, + `20|b`, + `30|null`, + `40|c`, + `50|d,a,,`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(fields), + }, { + name: "null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "42|null", "42|null", "42|null"), + expResult: sqltypes.MakeTestResult(fields, + `42|null`), + }, { + name: "concat on varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "42|a", "42|b", "42|c"), + expResult: sqltypes.MakeTestResult(varbinaryFields, + `42|a,b,c`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &OrderedAggregate{ + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "")}, + GroupByKeys: []*GroupByParams{{KeyCol: 0}}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + if len(qr.Rows) == 0 { + qr.Rows = nil // just to make the expectation. + // empty slice or nil both are valid and will not cause any issue. + } + assert.Equal(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, results) + }) + } } diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go index 0e7929bbe0c..72220fda460 100644 --- a/go/vt/vtgate/engine/plan_description.go +++ b/go/vt/vtgate/engine/plan_description.go @@ -28,6 +28,8 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) +const inputName = "InputName" + // PrimitiveDescription is used to create a serializable representation of the Primitive tree // Using this structure, all primitives can share json marshalling code, which gives us an uniform output type PrimitiveDescription struct { @@ -41,7 +43,9 @@ type PrimitiveDescription struct { // this is only used in conjunction with TargetDestination TargetTabletType topodatapb.TabletType Other map[string]any - Inputs []PrimitiveDescription + + InputName string + Inputs []PrimitiveDescription } // MarshalJSON serializes the PlanDescription into a JSON representation. @@ -51,16 +55,24 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { buf := &bytes.Buffer{} buf.WriteString("{") - if err := marshalAdd("", buf, "OperatorType", pd.OperatorType); err != nil { + prepend := "" + if pd.InputName != "" { + if err := marshalAdd(prepend, buf, "InputName", pd.InputName); err != nil { + return nil, err + } + prepend = "," + } + if err := marshalAdd(prepend, buf, "OperatorType", pd.OperatorType); err != nil { return nil, err } + prepend = "," if pd.Variant != "" { - if err := marshalAdd(",", buf, "Variant", pd.Variant); err != nil { + if err := marshalAdd(prepend, buf, "Variant", pd.Variant); err != nil { return nil, err } } if pd.Keyspace != nil { - if err := marshalAdd(",", buf, "Keyspace", pd.Keyspace); err != nil { + if err := marshalAdd(prepend, buf, "Keyspace", pd.Keyspace); err != nil { return nil, err } } @@ -68,12 +80,12 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { s := pd.TargetDestination.String() dest := s[11:] // TODO: All these start with Destination. We should fix that instead if trimming it out here - if err := marshalAdd(",", buf, "TargetDestination", dest); err != nil { + if err := marshalAdd(prepend, buf, "TargetDestination", dest); err != nil { return nil, err } } if pd.TargetTabletType != topodatapb.TabletType_UNKNOWN { - if err := marshalAdd(",", buf, "TargetTabletType", pd.TargetTabletType.String()); err != nil { + if err := marshalAdd(prepend, buf, "TargetTabletType", pd.TargetTabletType.String()); err != nil { return nil, err } } @@ -83,7 +95,7 @@ func (pd PrimitiveDescription) MarshalJSON() ([]byte, error) { } if len(pd.Inputs) > 0 { - if err := marshalAdd(",", buf, "Inputs", pd.Inputs); err != nil { + if err := marshalAdd(prepend, buf, "Inputs", pd.Inputs); err != nil { return nil, err } } @@ -144,7 +156,7 @@ func GraphViz(p Primitive) (*graphviz.Graph, error) { func addMap(input map[string]any, buf *bytes.Buffer) error { var mk []string for k, v := range input { - if v == "" || v == nil || v == 0 { + if v == "" || v == nil || v == 0 || v == false { continue } mk = append(mk, k) @@ -172,11 +184,25 @@ func marshalAdd(prepend string, buf *bytes.Buffer, name string, obj any) error { func PrimitiveToPlanDescription(in Primitive) PrimitiveDescription { this := in.description() - for _, input := range in.Inputs() { - this.Inputs = append(this.Inputs, PrimitiveToPlanDescription(input)) + inputs, infos := in.Inputs() + for idx, input := range inputs { + pd := PrimitiveToPlanDescription(input) + if infos != nil { + for k, v := range infos[idx] { + if k == inputName { + pd.InputName = v.(string) + continue + } + if pd.Other == nil { + pd.Other = map[string]any{} + } + pd.Other[k] = v + } + } + this.Inputs = append(this.Inputs, pd) } - if len(in.Inputs()) == 0 { + if len(inputs) == 0 { this.Inputs = []PrimitiveDescription{} } diff --git a/go/vt/vtgate/engine/plan_description_test.go b/go/vt/vtgate/engine/plan_description_test.go index 0d985b9b606..b986cea59cf 100644 --- a/go/vt/vtgate/engine/plan_description_test.go +++ b/go/vt/vtgate/engine/plan_description_test.go @@ -50,7 +50,7 @@ func TestCreateRoutePlanDescription(t *testing.T) { } func createRoute() *Route { - hash, _ := vindexes.NewHash("vindex name", nil) + hash, _ := vindexes.CreateVindex("hash", "vindex name", nil) return &Route{ RoutingParameters: &RoutingParameters{ Opcode: Scatter, diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index 36d0719796b..b5d67c9d994 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -18,6 +18,7 @@ package engine import ( "context" + "time" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -28,6 +29,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) @@ -53,7 +55,6 @@ type ( // if the max memory rows override directive is set to true ExceedsMaxMemoryRows(numRows int) bool - // V3 functions. Execute(ctx context.Context, method string, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError bool, co vtgatepb.CommitOrder) (*sqltypes.Result, error) AutocommitApproval() bool @@ -86,6 +87,7 @@ type ( Session() SessionActions ConnCollation() collations.ID + TimeZone() *time.Location ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) @@ -109,6 +111,8 @@ type ( ShowExec(ctx context.Context, command sqlparser.ShowCommandType, filter *sqlparser.ShowFilter) (*sqltypes.Result, error) // SetExec takes in k,v pair and use executor to set them in topo metadata. SetExec(ctx context.Context, name string, value string) error + // ThrottleApp sets a ThrottlerappRule in topo + ThrottleApp(ctx context.Context, throttleAppRule *topodatapb.ThrottledAppRule) error // CanUseSetVar returns true if system_settings can use SET_VAR hint. CanUseSetVar() bool @@ -125,6 +129,7 @@ type ( SetTarget(target string) error SetUDV(key string, value any) error + GetUDV(key string) *querypb.BindVariable SetSysVar(name string, expr string) @@ -145,10 +150,14 @@ type ( SetWorkload(querypb.ExecuteOptions_Workload) SetPlannerVersion(querypb.ExecuteOptions_PlannerVersion) SetConsolidator(querypb.ExecuteOptions_Consolidator) + SetWorkloadName(string) + SetPriority(string) SetFoundRows(uint64) SetDDLStrategy(string) GetDDLStrategy() string + SetMigrationContext(string) + GetMigrationContext() string GetSessionUUID() string @@ -213,8 +222,9 @@ type ( TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error - // Inputs is a slice containing the inputs to this Primitive - Inputs() []Primitive + // Inputs is a slice containing the inputs to this Primitive. + // The returned map has additional information about the inputs, that is used in the description. + Inputs() ([]Primitive, []map[string]any) // description is the description, sans the inputs, of this Primitive. // to get the plan description with all children, use PrimitiveToPlanDescription() @@ -229,12 +239,6 @@ type ( // txNeeded is a default implementation for Primitives that need transaction handling txNeeded struct{} - - // Gen4Comparer interfaces all Primitive used to compare Gen4 with other planners (V3, MySQL, ...). - Gen4Comparer interface { - Primitive - GetGen4Primitive() Primitive - } ) // Find will return the first Primitive that matches the evaluate function. If no match is found, nil will be returned @@ -242,7 +246,8 @@ func Find(isMatch Match, start Primitive) Primitive { if isMatch(start) { return start } - for _, input := range start.Inputs() { + inputs, _ := start.Inputs() + for _, input := range inputs { result := Find(isMatch, input) if result != nil { return result @@ -257,8 +262,8 @@ func Exists(m Match, p Primitive) bool { } // Inputs implements no inputs -func (noInputs) Inputs() []Primitive { - return nil +func (noInputs) Inputs() ([]Primitive, []map[string]any) { + return nil, nil } func (noTxNeeded) NeedsTransaction() bool { diff --git a/go/vt/vtgate/engine/projection.go b/go/vt/vtgate/engine/projection.go index 0c0875d19d9..ad1be62ea53 100644 --- a/go/vt/vtgate/engine/projection.go +++ b/go/vt/vtgate/engine/projection.go @@ -20,6 +20,8 @@ import ( "context" "sync" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -57,23 +59,22 @@ func (p *Projection) TryExecute(ctx context.Context, vcursor VCursor, bindVars m return nil, err } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) - env.Fields = result.Fields + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var resultRows []sqltypes.Row for _, row := range result.Rows { resultRow := make(sqltypes.Row, 0, len(p.Exprs)) env.Row = row for _, exp := range p.Exprs { - result, err := env.Evaluate(exp) + c, err := env.Evaluate(exp) if err != nil { return nil, err } - resultRow = append(resultRow, result.Value()) + resultRow = append(resultRow, c.Value(vcursor.ConnCollation())) } resultRows = append(resultRows, resultRow) } if wantfields { - err := p.addFields(env, result) + result.Fields, err = p.evalFields(env, result.Fields, vcursor) if err != nil { return nil, err } @@ -84,21 +85,18 @@ func (p *Projection) TryExecute(ctx context.Context, vcursor VCursor, bindVars m // TryStreamExecute implements the Primitive interface func (p *Projection) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var once sync.Once var fields []*querypb.Field return vcursor.StreamExecutePrimitive(ctx, p.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { var err error if wantfields { once.Do(func() { - env.Fields = qr.Fields - fieldRes := &sqltypes.Result{} - err = p.addFields(env, fieldRes) + fields, err = p.evalFields(env, qr.Fields, vcursor) if err != nil { return } - fields = fieldRes.Fields - err = callback(fieldRes) + err = callback(&sqltypes.Result{Fields: fields}) if err != nil { return } @@ -117,7 +115,7 @@ func (p *Projection) TryStreamExecute(ctx context.Context, vcursor VCursor, bind if err != nil { return err } - resultRow = append(resultRow, c.Value()) + resultRow = append(resultRow, c.Value(vcursor.ConnCollation())) } resultRows = append(resultRows, resultRow) } @@ -132,32 +130,43 @@ func (p *Projection) GetFields(ctx context.Context, vcursor VCursor, bindVars ma if err != nil { return nil, err } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) - err = p.addFields(env, qr) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + qr.Fields, err = p.evalFields(env, qr.Fields, vcursor) if err != nil { return nil, err } return qr, nil } -func (p *Projection) addFields(env *evalengine.ExpressionEnv, qr *sqltypes.Result) error { - qr.Fields = nil +func (p *Projection) evalFields(env *evalengine.ExpressionEnv, infields []*querypb.Field, vcursor VCursor) ([]*querypb.Field, error) { + var fields []*querypb.Field for i, col := range p.Cols { - q, err := env.TypeOf(p.Exprs[i]) + q, f, err := env.TypeOf(p.Exprs[i], infields) if err != nil { - return err + return nil, err + } + var cs collations.ID = collations.CollationBinaryID + if sqltypes.IsText(q) { + cs = vcursor.ConnCollation() + } + + fl := mysql.FlagsForColumn(q, cs) + if !sqltypes.IsNull(q) && !f.Nullable() { + fl |= uint32(querypb.MySqlFlag_NOT_NULL_FLAG) } - qr.Fields = append(qr.Fields, &querypb.Field{ - Name: col, - Type: q, + fields = append(fields, &querypb.Field{ + Name: col, + Type: q, + Charset: uint32(cs), + Flags: fl, }) } - return nil + return fields, nil } // Inputs implements the Primitive interface -func (p *Projection) Inputs() []Primitive { - return []Primitive{p.Input} +func (p *Projection) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{p.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/projection_test.go b/go/vt/vtgate/engine/projection_test.go index 701ee8e2aaf..37d1730e2e1 100644 --- a/go/vt/vtgate/engine/projection_test.go +++ b/go/vt/vtgate/engine/projection_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" @@ -106,7 +108,7 @@ func TestEmptyInput(t *testing.T) { } func TestHexAndBinaryArgument(t *testing.T) { - hexExpr, err := evalengine.Translate(sqlparser.Argument("vtg1"), nil) + hexExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) require.NoError(t, err) proj := &Projection{ Cols: []string{"hex"}, @@ -120,3 +122,50 @@ func TestHexAndBinaryArgument(t *testing.T) { require.NoError(t, err) assert.Equal(t, `[[VARBINARY("\t")]]`, fmt.Sprintf("%v", qr.Rows)) } + +func TestFields(t *testing.T) { + var testCases = []struct { + name string + bindVar *querypb.BindVariable + typ querypb.Type + collation collations.ID + }{ + { + name: `integer`, + bindVar: sqltypes.Int64BindVariable(10), + typ: querypb.Type_INT64, + collation: collations.CollationBinaryID, + }, + { + name: `string`, + bindVar: sqltypes.StringBindVariable("test"), + typ: querypb.Type_VARCHAR, + collation: collations.Default(), + }, + { + name: `binary`, + bindVar: sqltypes.BytesBindVariable([]byte("test")), + typ: querypb.Type_VARBINARY, + collation: collations.CollationBinaryID, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + bindExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) + require.NoError(t, err) + proj := &Projection{ + Cols: []string{"col"}, + Exprs: []evalengine.Expr{bindExpr}, + Input: &SingleRow{}, + noTxNeeded: noTxNeeded{}, + } + qr, err := proj.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{ + "vtg1": testCase.bindVar, + }, true) + require.NoError(t, err) + assert.Equal(t, testCase.typ, qr.Fields[0].Type) + assert.Equal(t, testCase.collation, collations.ID(qr.Fields[0].Charset)) + }) + } +} diff --git a/go/vt/vtgate/engine/pullout_subquery.go b/go/vt/vtgate/engine/pullout_subquery.go index d3c614b4dd6..096cbf707f7 100644 --- a/go/vt/vtgate/engine/pullout_subquery.go +++ b/go/vt/vtgate/engine/pullout_subquery.go @@ -18,10 +18,10 @@ package engine import ( "context" - "fmt" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vterrors" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -43,8 +43,12 @@ type PulloutSubquery struct { } // Inputs returns the input primitives for this join -func (ps *PulloutSubquery) Inputs() []Primitive { - return []Primitive{ps.Subquery, ps.Underlying} +func (ps *PulloutSubquery) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ps.Subquery, ps.Underlying}, []map[string]any{{ + inputName: "SubQuery", + }, { + inputName: "Outer", + }} } // RouteType returns a description of the query routing type used by the primitive @@ -189,32 +193,3 @@ func (ps *PulloutSubquery) description() PrimitiveDescription { Other: other, } } - -// PulloutOpcode is a number representing the opcode -// for the PulloutSubquery primitive. -type PulloutOpcode int - -// This is the list of PulloutOpcode values. -const ( - PulloutValue = PulloutOpcode(iota) - PulloutIn - PulloutNotIn - PulloutExists -) - -var pulloutName = map[PulloutOpcode]string{ - PulloutValue: "PulloutValue", - PulloutIn: "PulloutIn", - PulloutNotIn: "PulloutNotIn", - PulloutExists: "PulloutExists", -} - -func (code PulloutOpcode) String() string { - return pulloutName[code] -} - -// MarshalJSON serializes the PulloutOpcode as a JSON string. -// It's used for testing and diagnostics. -func (code PulloutOpcode) MarshalJSON() ([]byte, error) { - return ([]byte)(fmt.Sprintf("\"%s\"", code.String())), nil -} diff --git a/go/vt/vtgate/engine/pullout_subquery_test.go b/go/vt/vtgate/engine/pullout_subquery_test.go index d2f57383e99..9b6e7c490f0 100644 --- a/go/vt/vtgate/engine/pullout_subquery_test.go +++ b/go/vt/vtgate/engine/pullout_subquery_test.go @@ -24,8 +24,8 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) func TestPulloutSubqueryValueGood(t *testing.T) { diff --git a/go/vt/vtgate/engine/rename_fields.go b/go/vt/vtgate/engine/rename_fields.go index 3eb1917abdd..e1dc7cbbb43 100644 --- a/go/vt/vtgate/engine/rename_fields.go +++ b/go/vt/vtgate/engine/rename_fields.go @@ -110,8 +110,8 @@ func (r *RenameFields) GetFields(ctx context.Context, vcursor VCursor, bindVars } // Inputs implements the primitive interface -func (r *RenameFields) Inputs() []Primitive { - return []Primitive{r.Input} +func (r *RenameFields) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{r.Input}, nil } // description implements the primitive interface diff --git a/go/vt/vtgate/engine/replace_variables.go b/go/vt/vtgate/engine/replace_variables.go index 5667e9bae10..66375266427 100644 --- a/go/vt/vtgate/engine/replace_variables.go +++ b/go/vt/vtgate/engine/replace_variables.go @@ -77,8 +77,8 @@ func (r *ReplaceVariables) GetFields(ctx context.Context, vcursor VCursor, bindV } // Inputs implements the Primitive interface -func (r *ReplaceVariables) Inputs() []Primitive { - return []Primitive{r.Input} +func (r *ReplaceVariables) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{r.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 419ea52726e..80c4f181830 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -25,9 +25,9 @@ import ( "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/key" @@ -89,16 +89,6 @@ type Route struct { noTxNeeded } -// NewSimpleRoute creates a Route with the bare minimum of parameters. -func NewSimpleRoute(opcode Opcode, keyspace *vindexes.Keyspace) *Route { - return &Route{ - RoutingParameters: &RoutingParameters{ - Opcode: opcode, - Keyspace: keyspace, - }, - } -} - // NewRoute creates a Route. func NewRoute(opcode Opcode, keyspace *vindexes.Keyspace, query, fieldQuery string) *Route { return &Route{ @@ -120,8 +110,8 @@ type OrderByParams struct { WeightStringCol int Desc bool StarColFixedIndex int - // v3 specific boolean. Used to also add weight strings originating from GroupBys to the Group by clause - FromGroupBy bool + // Type for knowing if the collation is relevant + Type querypb.Type // Collation ID for comparison using collation CollationID collations.ID } @@ -140,9 +130,9 @@ func (obp OrderByParams) String() string { } else { val += " ASC" } - if obp.CollationID != collations.Unknown { - collation := obp.CollationID.Get() - val += " COLLATE " + collation.Name() + + if sqltypes.IsText(obp.Type) && obp.CollationID != collations.Unknown { + val += " COLLATE " + collations.Local().LookupName(obp.CollationID) } return val } @@ -259,7 +249,7 @@ func (route *Route) executeShards( partialSuccessScatterQueries.Add(1) for _, err := range errs { - serr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + serr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(serr.Num), Message: err.Error()}) } } @@ -348,7 +338,7 @@ func (route *Route) streamExecuteShards( } partialSuccessScatterQueries.Add(1) for _, err := range errs { - sErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) vcursor.Session().RecordWarning(&querypb.QueryWarning{Code: uint32(sErr.Num), Message: err.Error()}) } } @@ -389,15 +379,30 @@ func (route *Route) mergeSort( // GetFields fetches the field info. func (route *Route) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - rss, _, err := vcursor.ResolveDestinations(ctx, route.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) - if err != nil { - return nil, err + var rs *srvtopo.ResolvedShard + + // Use an existing shard session + sss := vcursor.Session().ShardSession() + for _, ss := range sss { + if ss.Target.Keyspace == route.Keyspace.Name { + rs = ss + break + } } - if len(rss) != 1 { - // This code is unreachable. It's just a sanity check. - return nil, fmt.Errorf("no shards for keyspace: %s", route.Keyspace.Name) + + // If not find, then pick any shard. + if rs == nil { + rss, _, err := vcursor.ResolveDestinations(ctx, route.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) + if err != nil { + return nil, err + } + if len(rss) != 1 { + // This code is unreachable. It's just a sanity check. + return nil, fmt.Errorf("no shards for keyspace: %s", route.Keyspace.Name) + } + rs = rss[0] } - qr, err := execShard(ctx, route, vcursor, route.FieldQuery, bindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) + qr, err := execShard(ctx, route, vcursor, route.FieldQuery, bindVars, rs, false /* rollbackOnError */, false /* canAutocommit */) if err != nil { return nil, err } diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index ef0b494e999..13fb0be656b 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -24,21 +24,17 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" - + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -94,7 +90,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { stringListToExprList := func(in []string) []evalengine.Expr { var schema []evalengine.Expr for _, s := range in { - schema = append(schema, evalengine.NewLiteralString([]byte(s), collations.TypedCollation{})) + schema = append(schema, evalengine.NewLiteralString([]byte(s), collations.SystemCollation)) } return schema } @@ -109,7 +105,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { tests := []testCase{{ testName: "both schema and table predicates - routed table", tableSchema: []string{"schema"}, - tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("table"), collations.TypedCollation{})}, + tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("table"), collations.SystemCollation)}, routed: true, expectedLog: []string{ "FindTable(`schema`.`table`)", @@ -118,7 +114,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { }, { testName: "both schema and table predicates - not routed", tableSchema: []string{"schema"}, - tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("table"), collations.TypedCollation{})}, + tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("table"), collations.SystemCollation)}, routed: false, expectedLog: []string{ "FindTable(`schema`.`table`)", @@ -127,7 +123,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { }, { testName: "multiple schema and table predicates", tableSchema: []string{"schema", "schema", "schema"}, - tableName: map[string]evalengine.Expr{"t1": evalengine.NewLiteralString([]byte("table"), collations.TypedCollation{}), "t2": evalengine.NewLiteralString([]byte("table"), collations.TypedCollation{}), "t3": evalengine.NewLiteralString([]byte("table"), collations.TypedCollation{})}, + tableName: map[string]evalengine.Expr{"t1": evalengine.NewLiteralString([]byte("table"), collations.SystemCollation), "t2": evalengine.NewLiteralString([]byte("table"), collations.SystemCollation), "t3": evalengine.NewLiteralString([]byte("table"), collations.SystemCollation)}, routed: false, expectedLog: []string{ "FindTable(`schema`.`table`)", @@ -137,7 +133,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { "ExecuteMultiShard schema.1: dummy_select {__replacevtschemaname: type:INT64 value:\"1\" t1: type:VARCHAR value:\"table\" t2: type:VARCHAR value:\"table\" t3: type:VARCHAR value:\"table\"} false false"}, }, { testName: "table name predicate - routed table", - tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("tableName"), collations.TypedCollation{})}, + tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("tableName"), collations.SystemCollation)}, routed: true, expectedLog: []string{ "FindTable(tableName)", @@ -145,7 +141,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { "ExecuteMultiShard routedKeyspace.1: dummy_select {table_name: type:VARCHAR value:\"routedTable\"} false false"}, }, { testName: "table name predicate - not routed", - tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("tableName"), collations.TypedCollation{})}, + tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("tableName"), collations.SystemCollation)}, routed: false, expectedLog: []string{ "FindTable(tableName)", @@ -236,7 +232,7 @@ func TestSelectScatter(t *testing.T) { } func TestSelectEqualUnique(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( EqualUnique, &vindexes.Keyspace{ @@ -274,7 +270,7 @@ func TestSelectEqualUnique(t *testing.T) { } func TestSelectNone(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( None, &vindexes.Keyspace{ @@ -325,7 +321,7 @@ func TestSelectNone(t *testing.T) { } func TestSelectEqualUniqueScatter(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -368,7 +364,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { } func TestSelectEqual(t *testing.T) { - vindex, _ := vindexes.NewLookup("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -421,7 +417,7 @@ func TestSelectEqual(t *testing.T) { } func TestSelectEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -485,7 +481,7 @@ func TestSelectEqualNoRoute(t *testing.T) { } func TestINUnique(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -530,7 +526,7 @@ func TestINUnique(t *testing.T) { } func TestINNonUnique(t *testing.T) { - vindex, _ := vindexes.NewLookup("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -597,7 +593,7 @@ func TestINNonUnique(t *testing.T) { } func TestMultiEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) sel := NewRoute( MultiEqual, &vindexes.Keyspace{ @@ -640,7 +636,7 @@ func TestMultiEqual(t *testing.T) { } func TestSelectLike(t *testing.T) { - subshard, _ := vindexes.NewCFC("cfc", map[string]string{"hash": "md5", "offsets": "[1,2]"}) + subshard, _ := vindexes.CreateVindex("cfc", "cfc", map[string]string{"hash": "md5", "offsets": "[1,2]"}) vindex := subshard.(*vindexes.CFC).PrefixVindex() vc := &loggingVCursor{ // we have shards '-0c80', '0c80-0d', '0d-40', '40-80', '80-' @@ -660,7 +656,7 @@ func TestSelectLike(t *testing.T) { sel.Vindex = vindex sel.Values = []evalengine.Expr{ - evalengine.NewLiteralString([]byte("a%"), collations.TypedCollation{}), + evalengine.NewLiteralString([]byte("a%"), collations.SystemCollation), } // md5("a") = 0cc175b9c0f1b6a831c399e269772661 // keyspace id prefix for "a" is 0x0c @@ -690,7 +686,7 @@ func TestSelectLike(t *testing.T) { vc.Rewind() sel.Values = []evalengine.Expr{ - evalengine.NewLiteralString([]byte("ab%"), collations.TypedCollation{}), + evalengine.NewLiteralString([]byte("ab%"), collations.SystemCollation), } // md5("b") = 92eb5ffee6ae2fec3ad71c777531578f // keyspace id prefix for "ab" is 0x0c92 @@ -816,7 +812,7 @@ func TestSelectReference(t *testing.T) { } func TestRouteGetFields(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -1081,6 +1077,7 @@ func TestRouteSortCollation(t *testing.T) { sel.OrderBy = []OrderByParams{{ Col: 0, + Type: sqltypes.VarChar, CollationID: collationID, }} @@ -1147,6 +1144,7 @@ func TestRouteSortCollation(t *testing.T) { t.Run("Error when Unknown Collation", func(t *testing.T) { sel.OrderBy = []OrderByParams{{ Col: 0, + Type: sqltypes.Unknown, CollationID: collations.Unknown, }} @@ -1439,7 +1437,7 @@ func TestExecFail(t *testing.T) { expectResult(t, "sel.Execute", result, defaultSelectResult) vc.Rewind() - vc.resultErr = mysql.NewSQLError(mysql.ERQueryInterrupted, "", "query timeout -20") + vc.resultErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", "query timeout -20") // test when there is order by column sel.OrderBy = []OrderByParams{{ WeightStringCol: -1, @@ -1447,12 +1445,12 @@ func TestExecFail(t *testing.T) { }} _, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) require.NoError(t, err, "unexpected ScatterErrorsAsWarnings error %v", err) - vc.ExpectWarnings(t, []*querypb.QueryWarning{{Code: uint32(mysql.ERQueryInterrupted), Message: "query timeout -20 (errno 1317) (sqlstate HY000)"}}) + vc.ExpectWarnings(t, []*querypb.QueryWarning{{Code: uint32(sqlerror.ERQueryInterrupted), Message: "query timeout -20 (errno 1317) (sqlstate HY000)"}}) }) } func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( EqualUnique, &vindexes.Keyspace{ @@ -1491,7 +1489,7 @@ func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { } func TestSelectEqualMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, shardForKsid: []string{"-20", "20-"}, @@ -1528,7 +1526,7 @@ func TestSelectEqualMultiColumnVindex(t *testing.T) { } func TestINMultiColumnVindex(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -1574,7 +1572,7 @@ func TestINMultiColumnVindex(t *testing.T) { } func TestINMixedMultiColumnComparision(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( IN, &vindexes.Keyspace{ @@ -1617,7 +1615,7 @@ func TestINMixedMultiColumnComparision(t *testing.T) { } func TestMultiEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) sel := NewRoute( MultiEqual, &vindexes.Keyspace{Name: "ks", Sharded: true}, diff --git a/go/vt/vtgate/engine/routing.go b/go/vt/vtgate/engine/routing.go index e4ec06aeaba..a4f6dabde20 100644 --- a/go/vt/vtgate/engine/routing.go +++ b/go/vt/vtgate/engine/routing.go @@ -190,14 +190,14 @@ func (rp *RoutingParameters) routeInfoSchemaQuery(ctx context.Context, vcursor V return defaultRoute() } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var specifiedKS string for _, tableSchema := range rp.SysTableTableSchema { result, err := env.Evaluate(tableSchema) if err != nil { return nil, err } - ks := result.Value().ToString() + ks := result.Value(vcursor.ConnCollation()).ToString() if specifiedKS == "" { specifiedKS = ks } @@ -215,7 +215,7 @@ func (rp *RoutingParameters) routeInfoSchemaQuery(ctx context.Context, vcursor V if err != nil { return nil, err } - tabName := val.Value().ToString() + tabName := val.Value(vcursor.ConnCollation()).ToString() tableNames[tblBvName] = tabName bindVars[tblBvName] = sqltypes.StringBindVariable(tabName) } @@ -333,12 +333,12 @@ func (rp *RoutingParameters) byDestination(ctx context.Context, vcursor VCursor, } func (rp *RoutingParameters) equal(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) value, err := env.Evaluate(rp.Values[0]) if err != nil { return nil, nil, err } - rss, _, err := resolveShards(ctx, vcursor, rp.Vindex.(vindexes.SingleColumn), rp.Keyspace, []sqltypes.Value{value.Value()}) + rss, _, err := resolveShards(ctx, vcursor, rp.Vindex.(vindexes.SingleColumn), rp.Keyspace, []sqltypes.Value{value.Value(vcursor.ConnCollation())}) if err != nil { return nil, nil, err } @@ -350,14 +350,14 @@ func (rp *RoutingParameters) equal(ctx context.Context, vcursor VCursor, bindVar } func (rp *RoutingParameters) equalMultiCol(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var rowValue []sqltypes.Value for _, rvalue := range rp.Values { v, err := env.Evaluate(rvalue) if err != nil { return nil, nil, err } - rowValue = append(rowValue, v.Value()) + rowValue = append(rowValue, v.Value(vcursor.ConnCollation())) } rss, _, err := resolveShardsMultiCol(ctx, vcursor, rp.Vindex.(vindexes.MultiColumn), rp.Keyspace, [][]sqltypes.Value{rowValue}, false /* shardIdsNeeded */) @@ -372,7 +372,7 @@ func (rp *RoutingParameters) equalMultiCol(ctx context.Context, vcursor VCursor, } func (rp *RoutingParameters) in(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) value, err := env.Evaluate(rp.Values[0]) if err != nil { return nil, nil, err @@ -385,7 +385,7 @@ func (rp *RoutingParameters) in(ctx context.Context, vcursor VCursor, bindVars m } func (rp *RoutingParameters) inMultiCol(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { - rowColValues, isSingleVal, err := generateRowColValues(vcursor, bindVars, rp.Values) + rowColValues, isSingleVal, err := generateRowColValues(ctx, vcursor, bindVars, rp.Values) if err != nil { return nil, nil, err } @@ -398,7 +398,7 @@ func (rp *RoutingParameters) inMultiCol(ctx context.Context, vcursor VCursor, bi } func (rp *RoutingParameters) multiEqual(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) value, err := env.Evaluate(rp.Values[0]) if err != nil { return nil, nil, err @@ -416,7 +416,7 @@ func (rp *RoutingParameters) multiEqual(ctx context.Context, vcursor VCursor, bi func (rp *RoutingParameters) multiEqualMultiCol(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]*srvtopo.ResolvedShard, []map[string]*querypb.BindVariable, error) { var multiColValues [][]sqltypes.Value - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) for _, rvalue := range rp.Values { v, err := env.Evaluate(rvalue) if err != nil { @@ -559,12 +559,12 @@ func shardVarsMultiCol(bv map[string]*querypb.BindVariable, mapVals [][][]*query return shardVars } -func generateRowColValues(vcursor VCursor, bindVars map[string]*querypb.BindVariable, values []evalengine.Expr) ([][]sqltypes.Value, map[int]any, error) { +func generateRowColValues(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, values []evalengine.Expr) ([][]sqltypes.Value, map[int]any, error) { // gather values from all the column in the vindex var multiColValues [][]sqltypes.Value var lv []sqltypes.Value isSingleVal := map[int]any{} - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) for colIdx, rvalue := range values { result, err := env.Evaluate(rvalue) if err != nil { @@ -577,7 +577,7 @@ func generateRowColValues(vcursor VCursor, bindVars map[string]*querypb.BindVari return nil, nil, err } isSingleVal[colIdx] = nil - lv = []sqltypes.Value{v.Value()} + lv = []sqltypes.Value{v.Value(vcursor.ConnCollation())} } multiColValues = append(multiColValues, lv) } diff --git a/go/vt/vtgate/engine/scalar_aggregation.go b/go/vt/vtgate/engine/scalar_aggregation.go index a1a76091689..6190e2e5fd6 100644 --- a/go/vt/vtgate/engine/scalar_aggregation.go +++ b/go/vt/vtgate/engine/scalar_aggregation.go @@ -20,22 +20,14 @@ import ( "context" "sync" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" ) var _ Primitive = (*ScalarAggregate)(nil) // ScalarAggregate is a primitive used to do aggregations without grouping keys type ScalarAggregate struct { - // PreProcess is true if one of the aggregates needs preprocessing. - PreProcess bool `json:",omitempty"` - - AggrOnEngine bool - // Aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. Aggregates []*AggregateParams @@ -45,10 +37,6 @@ type ScalarAggregate struct { // from the result received. If 0, no truncation happens. TruncateColumnCount int `json:",omitempty"` - // Collations stores the collation ID per column offset. - // It is used for grouping keys and distinct aggregate functions - Collations map[int]collations.ID - // Input is the primitive that will feed into this Primitive. Input Primitive } @@ -75,7 +63,13 @@ func (sa *ScalarAggregate) GetFields(ctx context.Context, vcursor VCursor, bindV if err != nil { return nil, err } - qr = &sqltypes.Result{Fields: convertFields(qr.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine)} + + _, fields, err := newAggregation(qr.Fields, sa.Aggregates) + if err != nil { + return nil, err + } + + qr = &sqltypes.Result{Fields: fields} return qr.Truncate(sa.TruncateColumnCount), nil } @@ -90,38 +84,22 @@ func (sa *ScalarAggregate) TryExecute(ctx context.Context, vcursor VCursor, bind if err != nil { return nil, err } - out := &sqltypes.Result{ - Fields: convertFields(result.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine), + + agg, fields, err := newAggregation(result.Fields, sa.Aggregates) + if err != nil { + return nil, err } - var resultRow []sqltypes.Value - var curDistincts []sqltypes.Value for _, row := range result.Rows { - if resultRow == nil { - resultRow, curDistincts = convertRow(row, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) - continue - } - resultRow, curDistincts, err = merge(result.Fields, resultRow, row, curDistincts, sa.Collations, sa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return nil, err } } - if resultRow == nil { - // When doing aggregation without grouping keys, we need to produce a single row containing zero-value for the - // different aggregation functions - resultRow, err = sa.createEmptyRow() - if err != nil { - return nil, err - } - } else { - resultRow, err = convertFinal(resultRow, sa.Aggregates) - if err != nil { - return nil, err - } + out := &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{agg.finish()}, } - - out.Rows = [][]sqltypes.Value{resultRow} return out.Truncate(sa.TruncateColumnCount), nil } @@ -130,11 +108,11 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor cb := func(qr *sqltypes.Result) error { return callback(qr.Truncate(sa.TruncateColumnCount)) } - var current []sqltypes.Value - var curDistincts []sqltypes.Value - var fields []*querypb.Field - fieldsSent := false + var mu sync.Mutex + var agg aggregationState + var fields []*querypb.Field + var fieldsSent bool err := vcursor.StreamExecutePrimitive(ctx, sa.Input, bindVars, wantfields, func(result *sqltypes.Result) error { // as the underlying primitive call is not sync @@ -142,23 +120,23 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor // for correct aggregation. mu.Lock() defer mu.Unlock() - if len(result.Fields) != 0 && !fieldsSent { - fields = convertFields(result.Fields, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) + + if agg == nil { + var err error + agg, fields, err = newAggregation(result.Fields, sa.Aggregates) + if err != nil { + return err + } + } + if !fieldsSent { if err := cb(&sqltypes.Result{Fields: fields}); err != nil { return err } fieldsSent = true } - // this code is very similar to the TryExecute method for _, row := range result.Rows { - if current == nil { - current, curDistincts = convertRow(row, sa.PreProcess, sa.Aggregates, sa.AggrOnEngine) - continue - } - var err error - current, curDistincts, err = merge(fields, current, row, curDistincts, sa.Collations, sa.Aggregates) - if err != nil { + if err := agg.add(row); err != nil { return err } } @@ -168,61 +146,12 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor return err } - if current == nil { - // When doing aggregation without grouping keys, we need to produce a single row containing zero-value for the - // different aggregation functions - current, err = sa.createEmptyRow() - if err != nil { - return err - } - } else { - current, err = convertFinal(current, sa.Aggregates) - if err != nil { - return err - } - } - - return cb(&sqltypes.Result{Rows: [][]sqltypes.Value{current}}) -} - -// creates the empty row for the case when we are missing grouping keys and have empty input table -func (sa *ScalarAggregate) createEmptyRow() ([]sqltypes.Value, error) { - out := make([]sqltypes.Value, len(sa.Aggregates)) - for i, aggr := range sa.Aggregates { - op := aggr.Opcode - if aggr.OrigOpcode != AggregateUnassigned { - op = aggr.OrigOpcode - } - value, err := createEmptyValueFor(op) - if err != nil { - return nil, err - } - out[i] = value - } - return out, nil -} - -func createEmptyValueFor(opcode AggregateOpcode) (sqltypes.Value, error) { - switch opcode { - case - AggregateCountDistinct, - AggregateCount, - AggregateCountStar: - return countZero, nil - case - AggregateSumDistinct, - AggregateSum, - AggregateMin, - AggregateMax: - return sqltypes.NULL, nil - - } - return sqltypes.NULL, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "unknown aggregation %v", opcode) + return cb(&sqltypes.Result{Rows: [][]sqltypes.Value{agg.finish()}}) } // Inputs implements the Primitive interface -func (sa *ScalarAggregate) Inputs() []Primitive { - return []Primitive{sa.Input} +func (sa *ScalarAggregate) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{sa.Input}, nil } // description implements the Primitive interface diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go index 15e72639f3d..3329fc72d39 100644 --- a/go/vt/vtgate/engine/scalar_aggregation_test.go +++ b/go/vt/vtgate/engine/scalar_aggregation_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) func TestEmptyRows(outer *testing.T) { @@ -48,7 +50,7 @@ func TestEmptyRows(outer *testing.T) { }, { opcode: AggregateSum, expectedVal: "null", - expectedTyp: "int64", + expectedTyp: "decimal", }, { opcode: AggregateSum, expectedVal: "0", @@ -66,7 +68,6 @@ func TestEmptyRows(outer *testing.T) { for _, test := range testCases { outer.Run(test.opcode.String(), func(t *testing.T) { - assert := assert.New(t) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -78,7 +79,6 @@ func TestEmptyRows(outer *testing.T) { } oa := &ScalarAggregate{ - PreProcess: true, Aggregates: []*AggregateParams{{ Opcode: test.opcode, Col: 0, @@ -89,7 +89,7 @@ func TestEmptyRows(outer *testing.T) { } result, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) - assert.NoError(err) + assert.NoError(t, err) wantResult := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -98,13 +98,12 @@ func TestEmptyRows(outer *testing.T) { ), test.expectedVal, ) - assert.Equal(wantResult, result) + utils.MustMatch(t, wantResult, result) }) } } func TestScalarAggregateStreamExecute(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|weight_string(col)", "uint64|varbinary", @@ -126,7 +125,6 @@ func TestScalarAggregateStreamExecute(t *testing.T) { }}, Input: fp, TruncateColumnCount: 1, - PreProcess: true, } var results []*sqltypes.Result @@ -134,17 +132,16 @@ func TestScalarAggregateStreamExecute(t *testing.T) { results = append(results, qr) return nil }) - assert.NoError(err) + assert.NoError(t, err) // one for the fields, and one for the actual aggregation result require.EqualValues(t, 2, len(results), "number of results") got := fmt.Sprintf("%v", results[1].Rows) - assert.Equal("[[UINT64(4)]]", got) + assert.Equal(t, "[[DECIMAL(4)]]", got) } // TestScalarAggregateExecuteTruncate checks if truncate works func TestScalarAggregateExecuteTruncate(t *testing.T) { - assert := assert.New(t) fields := sqltypes.MakeTestFields( "col|weight_string(col)", "uint64|varbinary", @@ -165,10 +162,255 @@ func TestScalarAggregateExecuteTruncate(t *testing.T) { }}, Input: fp, TruncateColumnCount: 1, - PreProcess: true, } qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, true) - assert.NoError(err) - assert.Equal("[[UINT64(4)]]", fmt.Sprintf("%v", qr.Rows)) + assert.NoError(t, err) + assert.Equal(t, "[[DECIMAL(4)]]", fmt.Sprintf("%v", qr.Rows)) +} + +// TestScalarGroupConcatWithAggrOnEngine tests group_concat with full aggregation on engine. +func TestScalarGroupConcatWithAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c2", + "varchar", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "c2", + "varbinary", + ) + + textOutFields := sqltypes.MakeTestFields( + "group_concat(c2)", + "text", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "ending with null", + inputResult: sqltypes.MakeTestResult(fields, + "a", "a", "b", "null", "null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `a,a,b`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(textOutFields, + `null`), + }, { + name: "only null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "null", "null", "null"), + expResult: sqltypes.MakeTestResult(textOutFields, + `null`), + }, { + name: "empty string value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "", "", ""), + expResult: sqltypes.MakeTestResult(textOutFields, + `,,`), + }, { + name: "varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "foo", "null", "bar"), + expResult: sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "group_concat(c2)", + "blob", + ), + `foo,bar`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{{ + Opcode: AggregateGroupConcat, + Col: 0, + Alias: "group_concat(c2)", + }}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + utils.MustMatch(t, tcase.expResult, results) + }) + } +} + +// TestScalarDistinctAggr tests distinct aggregation on engine. +func TestScalarDistinctAggrOnEngine(t *testing.T) { + fields := sqltypes.MakeTestFields( + "value|value", + "int64|int64", + ) + + fp := &fakePrimitive{results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "100|100", + "200|200", + "200|200", + "400|400", + "400|400", + "600|600", + )}} + + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{ + NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)"), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)"), + }, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + require.Equal(t, `[[INT64(4) DECIMAL(1300)]]`, fmt.Sprintf("%v", qr.Rows)) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + require.Equal(t, `[[INT64(4) DECIMAL(1300)]]`, fmt.Sprintf("%v", results.Rows)) +} + +func TestScalarDistinctPushedDown(t *testing.T) { + fields := sqltypes.MakeTestFields( + "count(distinct value)|sum(distinct value)", + "int64|decimal", + ) + + fp := &fakePrimitive{results: []*sqltypes.Result{sqltypes.MakeTestResult( + fields, + "2|200", + "6|400", + "3|700", + "1|10", + "7|30", + "8|90", + )}} + + countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)") + countAggr.OrigOpcode = AggregateCountDistinct + sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)") + sumAggr.OrigOpcode = AggregateSumDistinct + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{ + countAggr, + sumAggr, + }, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + require.Equal(t, `[[INT64(27) DECIMAL(1430)]]`, fmt.Sprintf("%v", qr.Rows)) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + require.Equal(t, `[[INT64(27) DECIMAL(1430)]]`, fmt.Sprintf("%v", results.Rows)) +} + +// TestScalarGroupConcat tests group_concat with partial aggregation on engine. +func TestScalarGroupConcat(t *testing.T) { + fields := sqltypes.MakeTestFields( + "group_concat(c2)", + "text", + ) + + varbinaryFields := sqltypes.MakeTestFields( + "group_concat(c2)", + "blob", + ) + + var tcases = []struct { + name string + inputResult *sqltypes.Result + expResult *sqltypes.Result + }{{ + name: "ending with null", + inputResult: sqltypes.MakeTestResult(fields, + "a", "a", "b", "null", "null"), + expResult: sqltypes.MakeTestResult(fields, + `a,a,b`), + }, { + name: "empty result", + inputResult: sqltypes.MakeTestResult(fields), + expResult: sqltypes.MakeTestResult(fields, + `null`), + }, { + name: "only null value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "null", "null", "null"), + expResult: sqltypes.MakeTestResult(fields, + `null`), + }, { + name: "empty string value for concat", + inputResult: sqltypes.MakeTestResult(fields, + "", "", ""), + expResult: sqltypes.MakeTestResult(fields, + `,,`), + }, { + name: "varbinary column", + inputResult: sqltypes.MakeTestResult(varbinaryFields, + "foo", "null", "bar"), + expResult: sqltypes.MakeTestResult(varbinaryFields, + `foo,bar`), + }} + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} + oa := &ScalarAggregate{ + Aggregates: []*AggregateParams{{ + Opcode: AggregateGroupConcat, + Col: 0, + }}, + Input: fp, + } + qr, err := oa.TryExecute(context.Background(), &noopVCursor{}, nil, false) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, qr) + + fp.rewind() + results := &sqltypes.Result{} + err = oa.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(qr *sqltypes.Result) error { + if qr.Fields != nil { + results.Fields = qr.Fields + } + results.Rows = append(results.Rows, qr.Rows...) + return nil + }) + require.NoError(t, err) + assert.Equal(t, tcase.expResult, results) + }) + } } diff --git a/go/vt/vtgate/engine/semi_join.go b/go/vt/vtgate/engine/semi_join.go index 2b08fe0f26e..25eeb7f9293 100644 --- a/go/vt/vtgate/engine/semi_join.go +++ b/go/vt/vtgate/engine/semi_join.go @@ -102,8 +102,12 @@ func (jn *SemiJoin) GetFields(ctx context.Context, vcursor VCursor, bindVars map } // Inputs returns the input primitives for this SemiJoin -func (jn *SemiJoin) Inputs() []Primitive { - return []Primitive{jn.Left, jn.Right} +func (jn *SemiJoin) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{jn.Left, jn.Right}, []map[string]any{{ + inputName: "Outer", + }, { + inputName: "SubQuery", + }} } // RouteType returns a description of the query routing type used by the primitive diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index 7b253d5f034..768581a7504 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -127,9 +127,8 @@ func (s *Set) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[stri if len(input.Rows) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "should get a single row") } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) env.Row = input.Rows[0] - env.Fields = input.Fields for _, setOp := range s.Ops { err := setOp.Execute(ctx, vcursor, env) if err != nil { @@ -154,8 +153,8 @@ func (s *Set) GetFields(context.Context, VCursor, map[string]*querypb.BindVariab } // Inputs implements the Primitive interface -func (s *Set) Inputs() []Primitive { - return []Primitive{s.Input} +func (s *Set) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{s.Input}, nil } func (s *Set) description() PrimitiveDescription { @@ -195,7 +194,7 @@ func (u *UserDefinedVariable) Execute(ctx context.Context, vcursor VCursor, env if err != nil { return err } - return vcursor.Session().SetUDV(u.Name, value.Value()) + return vcursor.Session().SetUDV(u.Name, value.Value(vcursor.ConnCollation())) } var _ SetOp = (*SysVarIgnore)(nil) @@ -460,13 +459,13 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e noop := func(context.Context, bool) error { return nil } err = svss.setBoolSysVar(ctx, env, noop) case sysvars.SQLSelectLimit.Name: - intValue, err := svss.evalAsInt64(env) + intValue, err := svss.evalAsInt64(env, vcursor) if err != nil { return err } vcursor.Session().SetSQLSelectLimit(intValue) // nolint:errcheck case sysvars.TransactionMode.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } @@ -476,7 +475,7 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e } vcursor.Session().SetTransactionMode(vtgatepb.TransactionMode(out)) case sysvars.Workload.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } @@ -486,7 +485,7 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e } vcursor.Session().SetWorkload(querypb.ExecuteOptions_Workload(out)) case sysvars.DDLStrategy.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } @@ -494,8 +493,17 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid DDL strategy: %s", str) } vcursor.Session().SetDDLStrategy(str) + case sysvars.MigrationContext.Name: + str, err := svss.evalAsString(env, vcursor) + if err != nil { + return err + } + if err := schema.ValidateMigrationContext(str); err != nil { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValueForVar, "invalid migration_context: %s", str) + } + vcursor.Session().SetMigrationContext(str) case sysvars.QueryTimeout.Name: - queryTimeout, err := svss.evalAsInt64(env) + queryTimeout, err := svss.evalAsInt64(env, vcursor) if err != nil { return err } @@ -503,7 +511,7 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e case sysvars.SessionEnableSystemSettings.Name: err = svss.setBoolSysVar(ctx, env, vcursor.Session().SetSessionEnableSystemSettings) case sysvars.Charset.Name, sysvars.Names.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } @@ -515,19 +523,19 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "charset/name %v is not supported", str) } case sysvars.ReadAfterWriteGTID.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } vcursor.Session().SetReadAfterWriteGTID(str) case sysvars.ReadAfterWriteTimeOut.Name: - val, err := svss.evalAsFloat(env) + val, err := svss.evalAsFloat(env, vcursor) if err != nil { return err } vcursor.Session().SetReadAfterWriteTimeout(val) case sysvars.SessionTrackGTIDs.Name: - str, err := svss.evalAsString(env) + str, err := svss.evalAsString(env, vcursor) if err != nil { return err } @@ -546,15 +554,15 @@ func (svss *SysVarSetAware) Execute(ctx context.Context, vcursor VCursor, env *e return err } -func (svss *SysVarSetAware) evalAsInt64(env *evalengine.ExpressionEnv) (int64, error) { +func (svss *SysVarSetAware) evalAsInt64(env *evalengine.ExpressionEnv, vcursor VCursor) (int64, error) { value, err := env.Evaluate(svss.Expr) if err != nil { return 0, err } - v := value.Value() + v := value.Value(vcursor.ConnCollation()) if !v.IsIntegral() { - return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, v.Type().String()) } intValue, err := v.ToInt64() if err != nil { @@ -563,28 +571,28 @@ func (svss *SysVarSetAware) evalAsInt64(env *evalengine.ExpressionEnv) (int64, e return intValue, nil } -func (svss *SysVarSetAware) evalAsFloat(env *evalengine.ExpressionEnv) (float64, error) { +func (svss *SysVarSetAware) evalAsFloat(env *evalengine.ExpressionEnv, vcursor VCursor) (float64, error) { value, err := env.Evaluate(svss.Expr) if err != nil { return 0, err } - v := value.Value() + v := value.Value(vcursor.ConnCollation()) floatValue, err := v.ToFloat64() if err != nil { - return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, v.Type().String()) } return floatValue, nil } -func (svss *SysVarSetAware) evalAsString(env *evalengine.ExpressionEnv) (string, error) { +func (svss *SysVarSetAware) evalAsString(env *evalengine.ExpressionEnv, vcursor VCursor) (string, error) { value, err := env.Evaluate(svss.Expr) if err != nil { return "", err } - v := value.Value() + v := value.Value(vcursor.ConnCollation()) if !v.IsText() && !v.IsBinary() { - return "", vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, value.Value().Type().String()) + return "", vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongTypeForVar, "incorrect argument type to variable '%s': %s", svss.Name, v.Type().String()) } return v.ToString(), nil diff --git a/go/vt/vtgate/engine/set_test.go b/go/vt/vtgate/engine/set_test.go index d66b7406187..62ffa42b8d6 100644 --- a/go/vt/vtgate/engine/set_test.go +++ b/go/vt/vtgate/engine/set_test.go @@ -22,12 +22,12 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/sqltypes" @@ -108,7 +108,7 @@ func TestSetTable(t *testing.T) { setOps: []SetOp{ &UserDefinedVariable{ Name: "x", - Expr: evalengine.NewColumn(0, collations.TypedCollation{}), + Expr: evalengine.NewColumn(0, sqltypes.Unknown, collations.Unknown), }, }, qr: []*sqltypes.Result{sqltypes.MakeTestResult( @@ -116,12 +116,12 @@ func TestSetTable(t *testing.T) { "col0", "datetime", ), - "2020-10-28", + "2020-10-28 00:00:00", )}, expectedQueryLog: []string{ `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: select now() from dual {} false false`, - `UDV set with (x,DATETIME("2020-10-28"))`, + `UDV set with (x,DATETIME("2020-10-28 00:00:00"))`, }, input: &Send{ Keyspace: ks, diff --git a/go/vt/vtgate/engine/simple_projection.go b/go/vt/vtgate/engine/simple_projection.go index 774fabb4d4a..1a4f4ce92c4 100644 --- a/go/vt/vtgate/engine/simple_projection.go +++ b/go/vt/vtgate/engine/simple_projection.go @@ -79,8 +79,8 @@ func (sc *SimpleProjection) GetFields(ctx context.Context, vcursor VCursor, bind } // Inputs returns the input to this primitive -func (sc *SimpleProjection) Inputs() []Primitive { - return []Primitive{sc.Input} +func (sc *SimpleProjection) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{sc.Input}, nil } // buildResult builds a new result by pulling the necessary columns from diff --git a/go/vt/vtgate/engine/sql_calc_found_rows.go b/go/vt/vtgate/engine/sql_calc_found_rows.go index 9553023069c..2472bfd1d14 100644 --- a/go/vt/vtgate/engine/sql_calc_found_rows.go +++ b/go/vt/vtgate/engine/sql_calc_found_rows.go @@ -23,7 +23,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) var _ Primitive = (*SQLCalcFoundRows)(nil) @@ -62,7 +61,7 @@ func (s SQLCalcFoundRows) TryExecute(ctx context.Context, vcursor VCursor, bindV if len(countQr.Rows) != 1 || len(countQr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "count query is not a scalar") } - fr, err := evalengine.ToUint64(countQr.Rows[0][0]) + fr, err := countQr.Rows[0][0].ToCastUint64() if err != nil { return nil, err } @@ -87,7 +86,7 @@ func (s SQLCalcFoundRows) TryStreamExecute(ctx context.Context, vcursor VCursor, if len(countQr.Rows) != 1 || len(countQr.Rows[0]) != 1 { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "count query is not a scalar") } - toUint64, err := evalengine.ToUint64(countQr.Rows[0][0]) + toUint64, err := countQr.Rows[0][0].ToCastUint64() if err != nil { return err } @@ -115,8 +114,8 @@ func (s SQLCalcFoundRows) NeedsTransaction() bool { } // Inputs implements the Primitive interface -func (s SQLCalcFoundRows) Inputs() []Primitive { - return []Primitive{s.LimitPrimitive, s.CountPrimitive} +func (s SQLCalcFoundRows) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{s.LimitPrimitive, s.CountPrimitive}, nil } func (s SQLCalcFoundRows) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/throttle_app.go b/go/vt/vtgate/engine/throttle_app.go new file mode 100644 index 00000000000..db485e6bec3 --- /dev/null +++ b/go/vt/vtgate/engine/throttle_app.go @@ -0,0 +1,89 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*ThrottleApp)(nil) + +// ThrottleApp represents the instructions to perform an online schema change via vtctld +type ThrottleApp struct { + Keyspace *vindexes.Keyspace + ThrottledAppRule *topodatapb.ThrottledAppRule + + noTxNeeded + + noInputs +} + +func (v *ThrottleApp) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "ThrottleApp", + Keyspace: v.Keyspace, + Other: map[string]any{ + "appName": v.ThrottledAppRule.Name, + "expireAt": v.ThrottledAppRule.ExpiresAt, + "ratio": v.ThrottledAppRule.Ratio, + }, + } +} + +// RouteType implements the Primitive interface +func (v *ThrottleApp) RouteType() string { + return "ThrottleApp" +} + +// GetKeyspaceName implements the Primitive interface +func (v *ThrottleApp) GetKeyspaceName() string { + return v.Keyspace.Name +} + +// GetTableName implements the Primitive interface +func (v *ThrottleApp) GetTableName() string { + return "" +} + +// TryExecute implements the Primitive interface +func (v *ThrottleApp) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (result *sqltypes.Result, err error) { + if err := vcursor.ThrottleApp(ctx, v.ThrottledAppRule); err != nil { + return nil, err + } + return &sqltypes.Result{}, nil +} + +// TryStreamExecute implements the Primitive interface +func (v *ThrottleApp) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + results, err := v.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(results) +} + +// GetFields implements the Primitive interface +func (v *ThrottleApp) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] GetFields is not reachable") +} diff --git a/go/vt/vtgate/engine/update.go b/go/vt/vtgate/engine/update.go index 8a26fa87629..093d0a73b80 100644 --- a/go/vt/vtgate/engine/update.go +++ b/go/vt/vtgate/engine/update.go @@ -120,7 +120,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin for colNum, field := range subQueryResult.Fields { fieldColNumMap[field.Name] = colNum } - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) for _, row := range subQueryResult.Rows { ksid, err := resolveKeyspaceID(ctx, vcursor, upd.KsidVindex, row[0:upd.KsidLength]) @@ -128,11 +128,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin return err } - vindexTable, err := upd.GetSingleTable() - if err != nil { - return err - } - for _, colVindex := range vindexTable.ColumnVindexes { + for _, colVindex := range upd.Vindexes { // Skip this vindex if no rows are being changed updColValues, ok := upd.ChangedVindexValues[colVindex.Name] if !ok { @@ -141,7 +137,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin offset := updColValues.Offset if !row[offset].IsNull() { - val, err := evalengine.ToInt64(row[offset]) + val, err := row[offset].ToCastInt64() if err != nil { return err } @@ -161,7 +157,7 @@ func (upd *Update) updateVindexEntries(ctx context.Context, vcursor VCursor, bin if err != nil { return err } - vindexColumnKeys = append(vindexColumnKeys, resolvedVal.Value()) + vindexColumnKeys = append(vindexColumnKeys, resolvedVal.Value(vcursor.ConnCollation())) } else { // Set the column value to original as this column in vindex is not updated. vindexColumnKeys = append(vindexColumnKeys, origColValue) diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index e75f8dec3f6..026b23aa20d 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -69,7 +69,7 @@ func TestUpdateUnsharded(t *testing.T) { } func TestUpdateEqual(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -94,13 +94,13 @@ func TestUpdateEqual(t *testing.T) { }) // Failure case - upd.Values = []evalengine.Expr{evalengine.NewBindVar("aa", collations.TypedCollation{})} + upd.Values = []evalengine.Expr{evalengine.NewBindVar("aa", sqltypes.Unknown, collations.Unknown)} _, err = upd.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, `query arguments missing for aa`) } func TestUpdateEqualMultiCol(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -126,7 +126,7 @@ func TestUpdateEqualMultiCol(t *testing.T) { } func TestUpdateScatter(t *testing.T) { - vindex, _ := vindexes.NewHash("", nil) + vindex, _ := vindexes.CreateVindex("hash", "", nil) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ @@ -179,7 +179,7 @@ func TestUpdateScatter(t *testing.T) { } func TestUpdateEqualNoRoute(t *testing.T) { - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -211,7 +211,7 @@ func TestUpdateEqualNoRoute(t *testing.T) { func TestUpdateEqualNoScatter(t *testing.T) { t.Skip("planner does not produces this plan anymore") - vindex, _ := vindexes.NewLookupUnique("", map[string]string{ + vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ "table": "lkp", "from": "from", "to": "toc", @@ -247,10 +247,9 @@ func TestUpdateEqualChangedVindex(t *testing.T) { Vindex: ks.Vindexes["hash"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1)}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -392,10 +391,9 @@ func TestUpdateEqualMultiColChangedVindex(t *testing.T) { Vindex: ks.Vindexes["rg_vdx"], Values: []evalengine.Expr{evalengine.NewLiteralInt(1), evalengine.NewLiteralInt(2)}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -513,10 +511,9 @@ func TestUpdateScatterChangedVindex(t *testing.T) { Opcode: Scatter, Keyspace: ks.Keyspace, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -709,10 +706,9 @@ func TestUpdateInChangedVindex(t *testing.T) { evalengine.NewLiteralInt(2), }}, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["t1"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["t1"].Name.String()}, + Vindexes: ks.Tables["t1"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["hash"], KsidLength: 1, @@ -840,10 +836,9 @@ func TestUpdateInChangedVindexMultiCol(t *testing.T) { evalengine.NewLiteralInt(3), }, }, - Query: "dummy_update", - Table: []*vindexes.Table{ - ks.Tables["rg_tbl"], - }, + Query: "dummy_update", + TableNames: []string{ks.Tables["rg_tbl"].Name.String()}, + Vindexes: ks.Tables["rg_tbl"].Owned, OwnedVindexQuery: "dummy_subquery", KsidVindex: ks.Vindexes["rg_vdx"], KsidLength: 2, @@ -891,7 +886,7 @@ func TestUpdateInChangedVindexMultiCol(t *testing.T) { } func TestUpdateEqualSubshard(t *testing.T) { - vindex, _ := vindexes.NewRegionExperimental("", map[string]string{"region_bytes": "1"}) + vindex, _ := vindexes.CreateVindex("region_experimental", "", map[string]string{"region_bytes": "1"}) upd := &Update{ DML: &DML{ RoutingParameters: &RoutingParameters{ diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go index da7b6100221..ad540f96c9c 100644 --- a/go/vt/vtgate/engine/vexplain.go +++ b/go/vt/vtgate/engine/vexplain.go @@ -170,11 +170,22 @@ func primitiveToPlanDescriptionWithSQLResults(in Primitive, res map[Primitive]st this.Other["mysql_explain_json"] = json.RawMessage(v) } - for _, input := range in.Inputs() { - this.Inputs = append(this.Inputs, primitiveToPlanDescriptionWithSQLResults(input, res)) + inputs, infos := in.Inputs() + for idx, input := range inputs { + pd := primitiveToPlanDescriptionWithSQLResults(input, res) + if infos != nil { + for k, v := range infos[idx] { + if k == inputName { + pd.InputName = v.(string) + continue + } + pd.Other[k] = v + } + } + this.Inputs = append(this.Inputs, pd) } - if len(in.Inputs()) == 0 { + if len(inputs) == 0 { this.Inputs = []PrimitiveDescription{} } @@ -206,8 +217,8 @@ func convertToVExplainQueriesResult(logs []ExecuteEntry) *sqltypes.Result { } // Inputs implements the Primitive interface -func (v *VExplain) Inputs() []Primitive { - return []Primitive{v.Input} +func (v *VExplain) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{v.Input}, nil } func (v *VExplain) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go index 7e1802077d1..918bc9240ad 100644 --- a/go/vt/vtgate/engine/vindex_func.go +++ b/go/vt/vtgate/engine/vindex_func.go @@ -111,16 +111,17 @@ func (vf *VindexFunc) GetFields(ctx context.Context, vcursor VCursor, bindVars m } func (vf *VindexFunc) mapVindex(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) k, err := env.Evaluate(vf.Value) if err != nil { return nil, err } var values []sqltypes.Value - if k.Value().Type() == querypb.Type_TUPLE { + value := k.Value(vcursor.ConnCollation()) + if value.Type() == querypb.Type_TUPLE { values = k.TupleValues() } else { - values = append(values, k.Value()) + values = append(values, value) } result := &sqltypes.Result{ Fields: vf.Fields, @@ -135,7 +136,7 @@ func (vf *VindexFunc) mapVindex(ctx context.Context, vcursor VCursor, bindVars m len(values), len(destinations)) } for i, value := range values { - vkey, err := evalengine.Cast(value, sqltypes.VarBinary) + vkey, err := sqltypes.Cast(value, sqltypes.VarBinary) if err != nil { return nil, err } diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go index 816507ae086..576cad14287 100644 --- a/go/vt/vtgate/engine/vindex_lookup.go +++ b/go/vt/vtgate/engine/vindex_lookup.go @@ -81,7 +81,7 @@ func (vr *VindexLookup) NeedsTransaction() bool { // TryExecute implements the Primitive interface func (vr *VindexLookup) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - ids, err := vr.generateIds(vcursor, bindVars) + ids, err := vr.generateIds(ctx, vcursor, bindVars) if err != nil { return nil, err } @@ -117,7 +117,7 @@ func (vr *VindexLookup) mapVindexToDestination(ids []sqltypes.Value, results []* // TryStreamExecute implements the Primitive interface func (vr *VindexLookup) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - ids, err := vr.generateIds(vcursor, bindVars) + ids, err := vr.generateIds(ctx, vcursor, bindVars) if err != nil { return err } @@ -136,12 +136,12 @@ func (vr *VindexLookup) TryStreamExecute(ctx context.Context, vcursor VCursor, b } // Inputs implements the Primitive interface -func (vr *VindexLookup) Inputs() []Primitive { +func (vr *VindexLookup) Inputs() ([]Primitive, []map[string]any) { if vr.Lookup != nil { - return []Primitive{vr.Lookup, vr.SendTo} + return []Primitive{vr.Lookup, vr.SendTo}, nil } - return []Primitive{vr.SendTo} + return []Primitive{vr.SendTo}, nil } // description implements the Primitive interface @@ -246,15 +246,15 @@ func (vr *VindexLookup) executeBatch(ctx context.Context, vcursor VCursor, ids [ return results, nil } -func (vr *VindexLookup) generateIds(vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]sqltypes.Value, error) { - env := evalengine.EnvWithBindVars(bindVars, vcursor.ConnCollation()) +func (vr *VindexLookup) generateIds(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([]sqltypes.Value, error) { + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) value, err := env.Evaluate(vr.Values[0]) if err != nil { return nil, err } switch vr.Opcode { case Equal, EqualUnique: - return []sqltypes.Value{value.Value()}, nil + return []sqltypes.Value{value.Value(vcursor.ConnCollation())}, nil case IN: return value.TupleValues(), nil } diff --git a/go/vt/vtgate/evalengine/api_aggregation.go b/go/vt/vtgate/evalengine/api_aggregation.go new file mode 100644 index 00000000000..c0d490ced22 --- /dev/null +++ b/go/vt/vtgate/evalengine/api_aggregation.go @@ -0,0 +1,497 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "strconv" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" + "vitess.io/vitess/go/sqltypes" +) + +// Sum implements a SUM() aggregation +type Sum interface { + Add(value sqltypes.Value) error + Result() sqltypes.Value + Reset() +} + +// MinMax implements a MIN() or MAX() aggregation +type MinMax interface { + Min(value sqltypes.Value) error + Max(value sqltypes.Value) error + Result() sqltypes.Value + Reset() +} + +// aggregationSumCount implements a sum of count values. +// This is a Vitess-specific optimization that allows our planner to push down +// some expensive cross-shard operations by summing counts from different result sets. +// The result of this operator is always an INT64 (like for the COUNT() operator); +// if no values were provided to the operator, the result will be 0 (not NULL). +// If the sum of counts overflows, an error will be returned (instead of transparently +// calculating the larger sum using decimals). +type aggregationSumCount struct { + n int64 +} + +func (s *aggregationSumCount) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + + result := s.n + n + if (result > s.n) != (n > 0) { + return dataOutOfRangeError(s.n, n, "BIGINT", "+") + } + + s.n = result + return nil +} + +func (s *aggregationSumCount) Result() sqltypes.Value { + return sqltypes.NewInt64(s.n) +} + +func (s *aggregationSumCount) Reset() { + s.n = 0 +} + +// aggregationInt implements SUM, MIN and MAX aggregation for Signed types, +// including INT64, INT32, INT24, INT16 and INT8. +// +// For SUM, the result of the operator is always a DECIMAL (matching MySQL's behavior), +// unless no values have been aggregated, in which case the result is NULL. +// For performance reasons, although the output of a SUM is a DECIMAL, the computations +// are performed using 64-bit arithmetic as long as they don't overflow. +// +// For MIN and MAX aggregations, the result of the operator is the same type as the values that +// have been aggregated. +type aggregationInt struct { + current int64 + dec decimal.Decimal + t sqltypes.Type + init bool +} + +func (s *aggregationInt) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + + s.init = true + + if s.dec.IsInitialized() { + s.dec = s.dec.Add(decimal.NewFromInt(n)) + return nil + } + + result := s.current + n + if (result > s.current) != (n > 0) { + s.dec = decimal.NewFromInt(s.current).Add(decimal.NewFromInt(n)) + } else { + s.current = result + } + + return nil +} + +func (s *aggregationInt) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationInt) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToInt64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationInt) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + + var b []byte + if s.dec.IsInitialized() { + b = s.dec.FormatMySQL(0) + } else { + b = strconv.AppendInt(nil, s.current, 10) + } + return sqltypes.MakeTrusted(s.t, b) +} + +func (s *aggregationInt) Reset() { + s.current = 0 + s.dec = decimal.Decimal{} + s.init = false +} + +// aggregationUint implements SUM, MIN and MAX aggregation for Unsigned types, +// including UINT64, UINT32, UINT24, UINT16 and UINT8. +// +// For SUM, the result of the operator is always a DECIMAL (matching MySQL's behavior), +// unless no values have been aggregated, in which case the result is NULL. +// For performance reasons, although the output of a SUM is a DECIMAL, the computations +// are performed using 64-bit arithmetic as long as they don't overflow. +// +// For MIN and MAX aggregations, the result of the operator is the same type as the values that +// have been aggregated. +type aggregationUint struct { + current uint64 + dec decimal.Decimal + t sqltypes.Type + init bool +} + +func (s *aggregationUint) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + + s.init = true + + if s.dec.IsInitialized() { + s.dec = s.dec.Add(decimal.NewFromUint(n)) + return nil + } + + result := s.current + n + if false { + s.dec = decimal.NewFromUint(s.current).Add(decimal.NewFromUint(n)) + } else { + s.current = result + } + + return nil +} + +func (s *aggregationUint) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationUint) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToUint64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationUint) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + + var b []byte + if s.dec.IsInitialized() { + b = s.dec.FormatMySQL(0) + } else { + b = strconv.AppendUint(nil, s.current, 10) + } + return sqltypes.MakeTrusted(s.t, b) +} + +func (s *aggregationUint) Reset() { + s.current = 0 + s.dec = decimal.Decimal{} + s.init = false +} + +// aggregationFloat implements SUM, MIN and MAX aggregations for FLOAT32 and FLOAT64 types. +// For SUM aggregations, the result is always a FLOAT64, unless no values have been aggregated, +// in which case the result is NULL. +// For MIN and MAX aggregations, the result is the same type as the aggregated values. +type aggregationFloat struct { + current float64 + t sqltypes.Type + init bool +} + +func (s *aggregationFloat) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + f, err := value.ToFloat64() + if err != nil { + return err + } + s.current += f + s.init = true + return nil +} + +func (s *aggregationFloat) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToFloat64() + if err != nil { + return err + } + if !s.init || n < s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationFloat) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + n, err := value.ToFloat64() + if err != nil { + return err + } + if !s.init || n > s.current { + s.current = n + } + s.init = true + return nil +} + +func (s *aggregationFloat) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(s.t, format.FormatFloat(s.current)) +} + +func (s *aggregationFloat) Reset() { + s.current = 0 + s.init = false +} + +// aggregationSumAny implements SUM aggregation for non-numeric values. +// Matching MySQL's behavior, all the values are best-effort parsed as FLOAT64 +// before being aggregated. +type aggregationSumAny struct { + aggregationFloat +} + +func (s *aggregationSumAny) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + f, _ := fastparse.ParseFloat64(value.RawStr()) + s.current += f + s.init = true + return nil +} + +func (s *aggregationSumAny) Result() sqltypes.Value { + if !s.init { + return sqltypes.NULL + } + return sqltypes.NewFloat64(s.current) +} + +// aggregationDecimal implements SUM, MIN and MAX aggregations for the DECIMAL type. +// The return of all aggregations is always DECIMAL, except when no values have been +// aggregated, where the return is NULL. +type aggregationDecimal struct { + dec decimal.Decimal + prec int32 +} + +func (s *aggregationDecimal) Add(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() { + s.dec = dec + s.prec = -dec.Exponent() + } else { + s.dec = s.dec.Add(dec) + s.prec = max(s.prec, -dec.Exponent()) + } + return nil +} + +func (s *aggregationDecimal) Min(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() || dec.Cmp(s.dec) < 0 { + s.dec = dec + } + return nil +} + +func (s *aggregationDecimal) Max(value sqltypes.Value) error { + if value.IsNull() { + return nil + } + dec, err := decimal.NewFromMySQL(value.Raw()) + if err != nil { + return err + } + if !s.dec.IsInitialized() || dec.Cmp(s.dec) > 0 { + s.dec = dec + } + return nil +} + +func (s *aggregationDecimal) Result() sqltypes.Value { + if !s.dec.IsInitialized() { + return sqltypes.NULL + } + return sqltypes.MakeTrusted(sqltypes.Decimal, s.dec.FormatMySQL(s.prec)) +} + +func (s *aggregationDecimal) Reset() { + s.dec = decimal.Decimal{} + s.prec = 0 +} + +func NewSumOfCounts() Sum { + return &aggregationSumCount{} +} + +func NewAggregationSum(type_ sqltypes.Type) Sum { + switch { + case sqltypes.IsSigned(type_): + return &aggregationInt{t: sqltypes.Decimal} + case sqltypes.IsUnsigned(type_): + return &aggregationUint{t: sqltypes.Decimal} + case sqltypes.IsFloat(type_): + return &aggregationFloat{t: sqltypes.Float64} + case sqltypes.IsDecimal(type_): + return &aggregationDecimal{} + default: + return &aggregationSumAny{} + } +} + +// aggregationMinMax implements MIN and MAX aggregations for all data types +// that cannot be more efficiently handled by one of the numeric aggregators. +// The aggregation is performed using the slow NullSafeComparison path of the +// evaluation engine. +type aggregationMinMax struct { + current sqltypes.Value + collation collations.ID +} + +func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { + if value.IsNull() { + return nil + } + if a.current.IsNull() { + a.current = value + return nil + } + n, err := compare(a.current, value, a.collation) + if err != nil { + return err + } + if (n < 0) == max { + a.current = value + } + return nil +} + +func (a *aggregationMinMax) Min(value sqltypes.Value) (err error) { + return a.minmax(value, false) +} + +func (a *aggregationMinMax) Max(value sqltypes.Value) error { + return a.minmax(value, true) +} + +func (a *aggregationMinMax) Result() sqltypes.Value { + return a.current +} + +func (a *aggregationMinMax) Reset() { + a.current = sqltypes.NULL +} + +func NewAggregationMinMax(type_ sqltypes.Type, collation collations.ID) MinMax { + switch { + case sqltypes.IsSigned(type_): + return &aggregationInt{t: type_} + case sqltypes.IsUnsigned(type_): + return &aggregationUint{t: type_} + case sqltypes.IsFloat(type_): + return &aggregationFloat{t: type_} + case sqltypes.IsDecimal(type_): + return &aggregationDecimal{} + default: + return &aggregationMinMax{collation: collation} + } +} diff --git a/go/vt/vtgate/evalengine/api_aggregation_test.go b/go/vt/vtgate/evalengine/api_aggregation_test.go new file mode 100644 index 00000000000..aab49541e71 --- /dev/null +++ b/go/vt/vtgate/evalengine/api_aggregation_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func TestMinMax(t *testing.T) { + tcases := []struct { + type_ sqltypes.Type + coll collations.ID + values []sqltypes.Value + min, max sqltypes.Value + err error + }{ + { + type_: sqltypes.Int64, + values: []sqltypes.Value{}, + min: sqltypes.NULL, + max: sqltypes.NULL, + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NULL, NULL}, + min: sqltypes.NULL, + max: sqltypes.NULL, + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NULL, NewInt64(1)}, + min: NewInt64(1), + max: NewInt64(1), + }, + { + type_: sqltypes.Int64, + values: []sqltypes.Value{NewInt64(1), NewInt64(2)}, + min: NewInt64(1), + max: NewInt64(2), + }, + { + type_: sqltypes.VarChar, + values: []sqltypes.Value{TestValue(sqltypes.VarChar, "aa"), TestValue(sqltypes.VarChar, "bb")}, + err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), + }, + { + type_: sqltypes.VarBinary, + values: []sqltypes.Value{sqltypes.NewVarBinary("a"), sqltypes.NewVarBinary("b")}, + min: sqltypes.NewVarBinary("a"), + max: sqltypes.NewVarBinary("b"), + }, + { + // accent insensitive + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_0900_as_ci"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("ǍḄÇ"), + sqltypes.NewVarChar("ÁḆĈ"), + }, + min: sqltypes.NewVarChar("ÁḆĈ"), + max: sqltypes.NewVarChar("ǍḄÇ"), + }, + { + // kana sensitive + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_ja_0900_as_cs_ks"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94"), + sqltypes.NewVarChar("\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4"), + }, + min: sqltypes.NewVarChar("\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94"), + max: sqltypes.NewVarChar("\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4"), + }, + { + // non breaking space + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_0900_as_cs"), + values: []sqltypes.Value{ + sqltypes.NewVarChar("abc "), + sqltypes.NewVarChar("abc\u00a0"), + }, + min: sqltypes.NewVarChar("abc "), + max: sqltypes.NewVarChar("abc\u00a0"), + }, + { + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_hu_0900_ai_ci"), + // "cs" counts as a separate letter, where c < cs < d + values: []sqltypes.Value{ + sqltypes.NewVarChar("c"), + sqltypes.NewVarChar("cs"), + }, + min: sqltypes.NewVarChar("c"), + max: sqltypes.NewVarChar("cs"), + }, + { + type_: sqltypes.VarChar, + coll: getCollationID("utf8mb4_hu_0900_ai_ci"), + // "cs" counts as a separate letter, where c < cs < d + values: []sqltypes.Value{ + sqltypes.NewVarChar("cukor"), + sqltypes.NewVarChar("csak"), + }, + min: sqltypes.NewVarChar("cukor"), + max: sqltypes.NewVarChar("csak"), + }, + } + for i, tcase := range tcases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + t.Run("Min", func(t *testing.T) { + agg := NewAggregationMinMax(tcase.type_, tcase.coll) + + for _, v := range tcase.values { + err := agg.Min(v) + if err != nil { + if tcase.err != nil { + return + } + require.NoError(t, err) + } + } + + utils.MustMatch(t, agg.Result(), tcase.min) + }) + + t.Run("Max", func(t *testing.T) { + agg := NewAggregationMinMax(tcase.type_, tcase.coll) + + for _, v := range tcase.values { + err := agg.Max(v) + if err != nil { + if tcase.err != nil { + return + } + require.NoError(t, err) + } + } + + utils.MustMatch(t, agg.Result(), tcase.max) + }) + }) + } +} diff --git a/go/vt/vtgate/evalengine/api_arithmetic.go b/go/vt/vtgate/evalengine/api_arithmetic.go index 88b4426b820..4da7e3450a2 100644 --- a/go/vt/vtgate/evalengine/api_arithmetic.go +++ b/go/vt/vtgate/evalengine/api_arithmetic.go @@ -17,9 +17,6 @@ limitations under the License. package evalengine import ( - "bytes" - "strconv" - "vitess.io/vitess/go/sqltypes" ) @@ -27,32 +24,6 @@ import ( // a Value, used for arithmetic operations. var zeroBytes = []byte("0") -// FormatFloat formats a float64 as a byte string in a similar way to what MySQL does -func FormatFloat(typ sqltypes.Type, f float64) []byte { - return AppendFloat(nil, typ, f) -} - -func AppendFloat(buf []byte, typ sqltypes.Type, f float64) []byte { - format := byte('g') - if typ == sqltypes.Decimal { - format = 'f' - } - - // the float printer in MySQL does not add a positive sign before - // the exponent for positive exponents, but the Golang printer does - // do that, and there's no way to customize it, so we must strip the - // redundant positive sign manually - // e.g. 1.234E+56789 -> 1.234E56789 - fstr := strconv.AppendFloat(buf, f, format, -1, 64) - if idx := bytes.IndexByte(fstr, 'e'); idx >= 0 { - if fstr[idx+1] == '+' { - fstr = append(fstr[:idx+1], fstr[idx+2:]...) - } - } - - return fstr -} - // Add adds two values together // if v1 or v2 is null, then it returns null func Add(v1, v2 sqltypes.Value) (sqltypes.Value, error) { diff --git a/go/vt/vtgate/evalengine/api_arithmetic_test.go b/go/vt/vtgate/evalengine/api_arithmetic_test.go index 9a941d5242e..40373423aa5 100644 --- a/go/vt/vtgate/evalengine/api_arithmetic_test.go +++ b/go/vt/vtgate/evalengine/api_arithmetic_test.go @@ -24,7 +24,6 @@ import ( "strconv" "testing" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/vthash" @@ -117,12 +116,12 @@ func TestArithmetics(t *testing.T) { // testing for error for parsing float value to uint64 v1: TestValue(sqltypes.Uint64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", }, { // testing for error for parsing float value to uint64 v1: NewUint64(2), v2: TestValue(sqltypes.Uint64, "1.2"), - err: "strconv.ParseUint: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", }, { // uint64 - uint64 v1: NewUint64(8), @@ -253,11 +252,11 @@ func TestArithmetics(t *testing.T) { }, { v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint64 overflow with max uint64 + int value v1: NewUint64(maxUint64), @@ -320,12 +319,12 @@ func TestArithmetics(t *testing.T) { // testing for error in types v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for error in types v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint/int v1: NewUint64(4), @@ -350,7 +349,7 @@ func TestArithmetics(t *testing.T) { // testing for overflow of float64 v1: NewFloat64(math.MaxFloat64), v2: NewFloat64(0.5), - err: dataOutOfRangeError(math.MaxFloat64, 0.5, "BIGINT", "/").Error(), + err: dataOutOfRangeError(math.MaxFloat64, 0.5, "DOUBLE", "/").Error(), }}, }, { operator: "*", @@ -384,12 +383,12 @@ func TestArithmetics(t *testing.T) { // testing for error in types v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for error in types v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: "strconv.ParseInt: parsing \"1.2\": invalid syntax", + err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", }, { // testing for uint*int v1: NewUint64(4), @@ -479,12 +478,12 @@ func TestNullSafeAdd(t *testing.T) { // Make sure underlying error is returned for LHS. v1: TestValue(sqltypes.Int64, "1.2"), v2: NewInt64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Make sure underlying error is returned for RHS. v1: NewInt64(2), v2: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Make sure underlying error is returned while adding. v1: NewInt64(-1), @@ -515,313 +514,6 @@ func TestNullSafeAdd(t *testing.T) { } } -func TestCast(t *testing.T) { - tcases := []struct { - typ sqltypes.Type - v sqltypes.Value - out sqltypes.Value - err error - }{{ - typ: sqltypes.VarChar, - v: NULL, - out: NULL, - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.VarChar, "exact types"), - out: TestValue(sqltypes.VarChar, "exact types"), - }, { - typ: sqltypes.Int64, - v: TestValue(sqltypes.Int32, "32"), - out: TestValue(sqltypes.Int64, "32"), - }, { - typ: sqltypes.Int24, - v: TestValue(sqltypes.Uint64, "64"), - out: TestValue(sqltypes.Int24, "64"), - }, { - typ: sqltypes.Int24, - v: TestValue(sqltypes.VarChar, "bad int"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseInt: parsing "bad int": invalid syntax`), - }, { - typ: sqltypes.Uint64, - v: TestValue(sqltypes.Uint32, "32"), - out: TestValue(sqltypes.Uint64, "32"), - }, { - typ: sqltypes.Uint24, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.Uint24, "64"), - }, { - typ: sqltypes.Uint24, - v: TestValue(sqltypes.Int64, "-1"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseUint: parsing "-1": invalid syntax`), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.Float64, "64"), - }, { - typ: sqltypes.Float32, - v: TestValue(sqltypes.Float64, "64"), - out: TestValue(sqltypes.Float32, "64"), - }, { - typ: sqltypes.Float32, - v: TestValue(sqltypes.Decimal, "1.24"), - out: TestValue(sqltypes.Float32, "1.24"), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.VarChar, "1.25"), - out: TestValue(sqltypes.Float64, "1.25"), - }, { - typ: sqltypes.Float64, - v: TestValue(sqltypes.VarChar, "bad float"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, `strconv.ParseFloat: parsing "bad float": invalid syntax`), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.Int64, "64"), - out: TestValue(sqltypes.VarChar, "64"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.Float64, "64"), - out: TestValue(sqltypes.VarBinary, "64"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.Decimal, "1.24"), - out: TestValue(sqltypes.VarBinary, "1.24"), - }, { - typ: sqltypes.VarBinary, - v: TestValue(sqltypes.VarChar, "1.25"), - out: TestValue(sqltypes.VarBinary, "1.25"), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.VarBinary, "valid string"), - out: TestValue(sqltypes.VarChar, "valid string"), - }, { - typ: sqltypes.VarChar, - v: TestValue(sqltypes.Expression, "bad string"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "expression cannot be converted to bytes"), - }} - for _, tcase := range tcases { - got, err := Cast(tcase.v, tcase.typ) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Cast(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("Cast(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToUint64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out uint64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), - }, { - v: NewInt64(-1), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: -1"), - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }} - for _, tcase := range tcases { - got, err := ToUint64(tcase.v) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("ToUint64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got != tcase.out { - t.Errorf("ToUint64(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToInt64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out int64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), - }, { - v: NewUint64(18446744073709551615), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: 18446744073709551615"), - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }} - for _, tcase := range tcases { - got, err := ToInt64(tcase.v) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("ToInt64(%v) error: %v, want %v", tcase.v, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got != tcase.out { - t.Errorf("ToInt64(%v): %v, want %v", tcase.v, got, tcase.out) - } - } -} - -func TestToFloat64(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out float64 - err error - }{{ - v: TestValue(sqltypes.VarChar, "abcd"), - out: 0, - }, { - v: TestValue(sqltypes.VarChar, "1.2"), - out: 1.2, - }, { - v: NewInt64(1), - out: 1, - }, { - v: NewUint64(1), - out: 1, - }, { - v: NewFloat64(1.2), - out: 1.2, - }, { - v: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), - }} - for _, tcase := range tcases { - t.Run(tcase.v.String(), func(t *testing.T) { - got, err := ToFloat64(tcase.v) - if tcase.err != nil { - require.EqualError(t, err, tcase.err.Error()) - } else { - require.Equal(t, tcase.out, got) - } - }) - } -} - -func TestToNative(t *testing.T) { - testcases := []struct { - in sqltypes.Value - out any - }{{ - in: NULL, - out: nil, - }, { - in: TestValue(sqltypes.Int8, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int16, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int24, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int32, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Int64, "1"), - out: int64(1), - }, { - in: TestValue(sqltypes.Uint8, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint16, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint24, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint32, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Uint64, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Float32, "1"), - out: float64(1), - }, { - in: TestValue(sqltypes.Float64, "1"), - out: float64(1), - }, { - in: TestValue(sqltypes.Timestamp, "2012-02-24 23:19:43"), - out: []byte("2012-02-24 23:19:43"), - }, { - in: TestValue(sqltypes.Date, "2012-02-24"), - out: []byte("2012-02-24"), - }, { - in: TestValue(sqltypes.Time, "23:19:43"), - out: []byte("23:19:43"), - }, { - in: TestValue(sqltypes.Datetime, "2012-02-24 23:19:43"), - out: []byte("2012-02-24 23:19:43"), - }, { - in: TestValue(sqltypes.Year, "1"), - out: uint64(1), - }, { - in: TestValue(sqltypes.Decimal, "1"), - out: []byte("1"), - }, { - in: TestValue(sqltypes.Text, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Blob, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.VarChar, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.VarBinary, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Char, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Binary, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Bit, "1"), - out: []byte("1"), - }, { - in: TestValue(sqltypes.Enum, "a"), - out: []byte("a"), - }, { - in: TestValue(sqltypes.Set, "a"), - out: []byte("a"), - }} - for _, tcase := range testcases { - v, err := ToNative(tcase.in) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(v, tcase.out) { - t.Errorf("%v.ToNative = %#v, want %#v", tcase.in, v, tcase.out) - } - } - - // Test Expression failure. - _, err := ToNative(TestValue(sqltypes.Expression, "aa")) - want := vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "EXPRESSION(aa) cannot be converted to a go type") - if !vterrors.Equals(err, want) { - t.Errorf("ToNative(EXPRESSION): %v, want %v", vterrors.Print(err), vterrors.Print(want)) - } -} - func TestNewIntegralNumeric(t *testing.T) { tcases := []struct { v sqltypes.Value @@ -847,11 +539,11 @@ func TestNewIntegralNumeric(t *testing.T) { }, { // Only valid Int64 allowed if type is Int64. v: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), }, { // Only valid Uint64 allowed if type is Uint64. v: TestValue(sqltypes.Uint64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseUint: parsing \"1.2\": invalid syntax"), + err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing uint64 from \"1.2\": \".2\""), }, { v: TestValue(sqltypes.VarChar, "abcd"), err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), @@ -1112,220 +804,6 @@ func TestCompareNumeric(t *testing.T) { } } -func TestMin(t *testing.T) { - tcases := []struct { - v1, v2 sqltypes.Value - min sqltypes.Value - err error - }{{ - v1: NULL, - v2: NULL, - min: NULL, - }, { - v1: NewInt64(1), - v2: NULL, - min: NewInt64(1), - }, { - v1: NULL, - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(2), - min: NewInt64(1), - }, { - v1: NewInt64(2), - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(1), - min: NewInt64(1), - }, { - v1: TestValue(sqltypes.VarChar, "aa"), - v2: TestValue(sqltypes.VarChar, "aa"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), - }} - for _, tcase := range tcases { - v, err := Min(tcase.v1, tcase.v2, collations.Unknown) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Min error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(v, tcase.min) { - t.Errorf("Min(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.min) - } - } -} - -func TestMinCollate(t *testing.T) { - tcases := []struct { - v1, v2 string - collation collations.ID - out string - err error - }{ - { - // accent insensitive - v1: "ǍḄÇ", - v2: "ÁḆĈ", - out: "ǍḄÇ", - collation: getCollationID("utf8mb4_0900_as_ci"), - }, - { - // kana sensitive - v1: "\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94", - v2: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - out: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - collation: getCollationID("utf8mb4_ja_0900_as_cs_ks"), - }, - { - // non breaking space - v1: "abc ", - v2: "abc\u00a0", - out: "abc\u00a0", - collation: getCollationID("utf8mb4_0900_as_cs"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "c", - v2: "cs", - out: "cs", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "cukor", - v2: "csak", - out: "csak", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - } - for _, tcase := range tcases { - got, err := Min(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got.ToString() == tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } - } -} - -func TestMax(t *testing.T) { - tcases := []struct { - v1, v2 sqltypes.Value - max sqltypes.Value - err error - }{{ - v1: NULL, - v2: NULL, - max: NULL, - }, { - v1: NewInt64(1), - v2: NULL, - max: NewInt64(1), - }, { - v1: NULL, - v2: NewInt64(1), - max: NewInt64(1), - }, { - v1: NewInt64(1), - v2: NewInt64(2), - max: NewInt64(2), - }, { - v1: NewInt64(2), - v2: NewInt64(1), - max: NewInt64(2), - }, { - v1: NewInt64(1), - v2: NewInt64(1), - max: NewInt64(1), - }, { - v1: TestValue(sqltypes.VarChar, "aa"), - v2: TestValue(sqltypes.VarChar, "aa"), - err: vterrors.New(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: 0)"), - }} - for _, tcase := range tcases { - v, err := Max(tcase.v1, tcase.v2, collations.Unknown) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("Max error: %v, want %v", vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if !reflect.DeepEqual(v, tcase.max) { - t.Errorf("Max(%v, %v): %v, want %v", tcase.v1, tcase.v2, v, tcase.max) - } - } -} - -func TestMaxCollate(t *testing.T) { - tcases := []struct { - v1, v2 string - collation collations.ID - out string - err error - }{ - { - // accent insensitive - v1: "ǍḄÇ", - v2: "ÁḆĈ", - out: "ǍḄÇ", - collation: getCollationID("utf8mb4_0900_as_ci"), - }, - { - // kana sensitive - v1: "\xE3\x81\xAB\xE3\x81\xBB\xE3\x82\x93\xE3\x81\x94", - v2: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - out: "\xE3\x83\x8B\xE3\x83\x9B\xE3\x83\xB3\xE3\x82\xB4", - collation: getCollationID("utf8mb4_ja_0900_as_cs_ks"), - }, - { - // non breaking space - v1: "abc ", - v2: "abc\u00a0", - out: "abc\u00a0", - collation: getCollationID("utf8mb4_0900_as_cs"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "c", - v2: "cs", - out: "cs", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - { - // "cs" counts as a separate letter, where c < cs < d - v1: "cukor", - v2: "csak", - out: "csak", - collation: getCollationID("utf8mb4_hu_0900_ai_ci"), - }, - } - for _, tcase := range tcases { - got, err := Max(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } - - if got.ToString() != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } - } -} - func printValue(v sqltypes.Value) string { vBytes, _ := v.ToBytes() return fmt.Sprintf("%v:%q", v.Type(), vBytes) @@ -1360,8 +838,8 @@ func BenchmarkAddNoNative(b *testing.B) { v1 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) v2 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("12")) for i := 0; i < b.N; i++ { - iv1, _ := ToInt64(v1) - iv2, _ := ToInt64(v2) + iv1, _ := v1.ToInt64() + iv2, _ := v2.ToInt64() v1 = sqltypes.MakeTrusted(sqltypes.Int64, strconv.AppendInt(nil, iv1+iv2, 10)) } } @@ -1398,36 +876,3 @@ func BenchmarkAddGo(b *testing.B) { v1 += v2 } } - -func TestParseStringToFloat(t *testing.T) { - tcs := []struct { - str string - val float64 - }{ - {str: ""}, - {str: " "}, - {str: "1", val: 1}, - {str: "1.10", val: 1.10}, - {str: " 6.87", val: 6.87}, - {str: "93.66 ", val: 93.66}, - {str: "\t 42.10 \n ", val: 42.10}, - {str: "1.10aa", val: 1.10}, - {str: ".", val: 0.00}, - {str: ".99", val: 0.99}, - {str: "..99", val: 0}, - {str: "1.", val: 1}, - {str: "0.1.99", val: 0.1}, - {str: "0.", val: 0}, - {str: "8794354", val: 8794354}, - {str: " 10 ", val: 10}, - {str: "2266951196291479516", val: 2266951196291479516}, - {str: "abcd123", val: 0}, - } - - for _, tc := range tcs { - t.Run(tc.str, func(t *testing.T) { - got := parseStringToFloat(tc.str) - require.EqualValues(t, tc.val, got) - }) - } -} diff --git a/go/vt/vtgate/evalengine/api_compare.go b/go/vt/vtgate/evalengine/api_compare.go index 29d3ace89c3..3c9e632e819 100644 --- a/go/vt/vtgate/evalengine/api_compare.go +++ b/go/vt/vtgate/evalengine/api_compare.go @@ -21,6 +21,7 @@ import ( "fmt" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -50,55 +51,102 @@ func (err UnsupportedCollationError) Error() string { // UnsupportedCollationHashError is returned when we try to get the hash value and are missing the collation to use var UnsupportedCollationHashError = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "text type with an unknown/unsupported collation cannot be hashed") -// Min returns the minimum of v1 and v2. If one of the -// values is NULL, it returns the other value. If both -// are NULL, it returns NULL. -func Min(v1, v2 sqltypes.Value, collation collations.ID) (sqltypes.Value, error) { - return minmax(v1, v2, true, collation) -} - -// Max returns the maximum of v1 and v2. If one of the -// values is NULL, it returns the other value. If both -// are NULL, it returns NULL. -func Max(v1, v2 sqltypes.Value, collation collations.ID) (sqltypes.Value, error) { - return minmax(v1, v2, false, collation) -} - -func minmax(v1, v2 sqltypes.Value, min bool, collation collations.ID) (sqltypes.Value, error) { - if v1.IsNull() { - return v2, nil - } - if v2.IsNull() { - return v1, nil +func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { + // We have a fast path here for the case where both values are + // the same type, and it's one of the basic types we can compare + // directly. This is a common case for equality checks. + if v1.Type() == v2.Type() { + switch { + case sqltypes.IsSigned(v1.Type()): + i1, err := v1.ToInt64() + if err != nil { + return 0, err + } + i2, err := v2.ToInt64() + if err != nil { + return 0, err + } + switch { + case i1 < i2: + return -1, nil + case i1 > i2: + return 1, nil + default: + return 0, nil + } + case sqltypes.IsUnsigned(v1.Type()): + u1, err := v1.ToUint64() + if err != nil { + return 0, err + } + u2, err := v2.ToUint64() + if err != nil { + return 0, err + } + switch { + case u1 < u2: + return -1, nil + case u1 > u2: + return 1, nil + default: + return 0, nil + } + case sqltypes.IsBinary(v1.Type()), v1.Type() == sqltypes.Date, + v1.Type() == sqltypes.Datetime, v1.Type() == sqltypes.Timestamp: + // We can't optimize for Time here, since Time is not sortable + // based on the raw bytes. This is because of cases like + // '24:00:00' and '101:00:00' which are both valid times and + // order wrong based on the raw bytes. + return bytes.Compare(v1.Raw(), v2.Raw()), nil + case sqltypes.IsText(v1.Type()): + if collationID == collations.CollationBinaryID { + return bytes.Compare(v1.Raw(), v2.Raw()), nil + } + coll := colldata.Lookup(collationID) + if coll == nil { + return 0, UnsupportedCollationError{ID: collationID} + } + result := coll.Collate(v1.Raw(), v2.Raw(), false) + switch { + case result < 0: + return -1, nil + case result > 0: + return 1, nil + default: + return 0, nil + } + } } - n, err := NullsafeCompare(v1, v2, collation) + v1eval, err := valueToEval(v1, collations.TypedCollation{ + Collation: collationID, + Coercibility: collations.CoerceImplicit, + Repertoire: collations.RepertoireUnicode, + }) if err != nil { - return sqltypes.NULL, err + return 0, err } - // XNOR construct. See tests. - v1isSmaller := n < 0 - if min == v1isSmaller { - return v1, nil + v2eval, err := valueToEval(v2, collations.TypedCollation{ + Collation: collationID, + Coercibility: collations.CoerceImplicit, + Repertoire: collations.RepertoireUnicode, + }) + if err != nil { + return 0, err } - return v2, nil -} -// isByteComparable returns true if the type is binary or date/time. -func isByteComparable(typ sqltypes.Type, collationID collations.ID) bool { - if sqltypes.IsBinary(typ) { - return true + out, err := evalCompare(v1eval, v2eval) + if err != nil { + return 0, err } - if sqltypes.IsText(typ) { - return collationID == collations.CollationBinaryID + if out == 0 { + return 0, nil } - switch typ { - case sqltypes.Timestamp, sqltypes.Date, sqltypes.Time, sqltypes.Datetime, sqltypes.Enum, sqltypes.Set, sqltypes.TypeJSON, sqltypes.Bit: - return true - default: - return false + if out > 0 { + return 1, nil } + return -1, nil } // NullsafeCompare returns 0 if v1==v2, -1 if v1v2. @@ -120,53 +168,5 @@ func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, err if v2.IsNull() { return 1, nil } - - if isByteComparable(v1.Type(), collationID) && isByteComparable(v2.Type(), collationID) { - return bytes.Compare(v1.Raw(), v2.Raw()), nil - } - - typ, err := CoerceTo(v1.Type(), v2.Type()) // TODO systay we should add a method where this decision is done at plantime - if err != nil { - return 0, err - } - - switch { - case sqltypes.IsText(typ): - collation := collationID.Get() - if collation == nil { - return 0, UnsupportedCollationError{ID: collationID} - } - - v1Bytes, err := v1.ToBytes() - if err != nil { - return 0, err - } - v2Bytes, err := v2.ToBytes() - if err != nil { - return 0, err - } - - switch result := collation.Collate(v1Bytes, v2Bytes, false); { - case result < 0: - return -1, nil - case result > 0: - return 1, nil - default: - return 0, nil - } - - case sqltypes.IsNumber(typ): - v1cast, err := valueToEvalCast(v1, typ) - if err != nil { - return 0, err - } - v2cast, err := valueToEvalCast(v2, typ) - if err != nil { - return 0, err - } - return compareNumeric(v1cast, v2cast) - - default: - return 0, UnsupportedComparisonError{Type1: v1.Type(), Type2: v2.Type()} - } + return compare(v1, v2, collationID) } diff --git a/go/vt/vtgate/evalengine/api_compare_test.go b/go/vt/vtgate/evalengine/api_compare_test.go index b031db178c3..bd87363b7e8 100644 --- a/go/vt/vtgate/evalengine/api_compare_test.go +++ b/go/vt/vtgate/evalengine/api_compare_test.go @@ -17,6 +17,7 @@ limitations under the License. package evalengine import ( + "context" "fmt" "strings" "testing" @@ -60,12 +61,11 @@ func init() { func defaultCollation() collations.TypedCollation { return collations.TypedCollation{ - Collation: collationEnv.LookupByName("utf8mb4_bin").ID(), + Collation: collationEnv.LookupByName("utf8mb4_bin"), Coercibility: collations.CoerceImplicit, Repertoire: collations.RepertoireASCII, } } - func (tc testCase) run(t *testing.T) { if tc.bv == nil { tc.bv = map[string]*querypb.BindVariable{} @@ -74,12 +74,15 @@ func (tc testCase) run(t *testing.T) { for i, value := range tc.row { fields[i] = &querypb.Field{Type: value.Type()} } - env := &ExpressionEnv{ - BindVars: tc.bv, - Row: tc.row, - Fields: fields, + env := NewExpressionEnv(context.Background(), tc.bv, nil) + env.Row = tc.row + ast := &astCompiler{ + cfg: &Config{ + Collation: collations.CollationUtf8mb4ID, + Optimization: OptimizationLevelSimplify, + }, } - cmp, err := (&astCompiler{}).translateComparisonExpr2(tc.op, tc.v1, tc.v2) + cmp, err := ast.translateComparisonExpr2(tc.op, tc.v1, tc.v2) if err != nil { t.Fatalf("failed to convert: %v", err) } @@ -88,9 +91,9 @@ func (tc testCase) run(t *testing.T) { if tc.err == "" { require.NoError(t, err) if tc.out != nil && *tc.out { - require.EqualValues(t, uint64(1), evalToNumeric(got.v).toUint64().u) + require.EqualValues(t, uint64(1), evalToInt64(got.v).toUint64().u) } else if tc.out != nil && !*tc.out { - require.EqualValues(t, uint64(0), evalToNumeric(got.v).toUint64().u) + require.EqualValues(t, uint64(0), evalToInt64(got.v).toUint64().u) } else { require.EqualValues(t, nil, got.v) } @@ -104,7 +107,7 @@ func TestCompareIntegers(t *testing.T) { tests := []testCase{ { name: "integers are equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Int64, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(18)}, }, @@ -125,25 +128,25 @@ func TestCompareIntegers(t *testing.T) { }, { name: "integers are not equal (3)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Int64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(18), sqltypes.NewInt64(98)}, }, { name: "unsigned integers are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18)}, }, { name: "unsigned integer and integer are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18), sqltypes.NewInt64(18)}, }, { name: "unsigned integer and integer are not equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18), sqltypes.NewInt64(42)}, }, @@ -201,7 +204,7 @@ func TestCompareFloats(t *testing.T) { tests := []testCase{ { name: "floats are equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(18)}, }, @@ -222,7 +225,7 @@ func TestCompareFloats(t *testing.T) { }, { name: "floats are not equal (3)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(16516.84), sqltypes.NewFloat64(219541.01)}, }, @@ -280,37 +283,37 @@ func TestCompareDecimals(t *testing.T) { tests := []testCase{ { name: "decimals are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("12.9019")}, }, { name: "decimals are not equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("12.9019"), sqltypes.NewDecimal("489.156849")}, }, { name: "decimal is greater than decimal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewDecimal("192.128")}, }, { name: "decimal is not greater than decimal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.128"), sqltypes.NewDecimal("192.129")}, }, { name: "decimal is less than decimal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.128"), sqltypes.NewDecimal("192.129")}, }, { name: "decimal is not less than decimal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewDecimal("192.128")}, }, @@ -328,151 +331,151 @@ func TestCompareNumerics(t *testing.T) { tests := []testCase{ { name: "decimal and float are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(189.6), sqltypes.NewDecimal("189.6")}, }, { name: "decimal and float with negative values are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.1839), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and float with negative values are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and float with negative values are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Float64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and integer are equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Int64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(8979), sqltypes.NewDecimal("8979")}, }, { name: "decimal and integer are equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("8979.0000"), sqltypes.NewInt64(8979)}, }, { name: "decimal and unsigned integer are equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Uint64, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Decimal, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(901), sqltypes.NewDecimal("901")}, }, { name: "decimal and unsigned integer are equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("901.00"), sqltypes.NewUint64(901)}, }, { name: "decimal and unsigned integer are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewUint64(192)}, }, { name: "decimal and unsigned integer are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Uint64, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewUint64(192)}, }, { name: "decimal is greater than integer", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.01"), sqltypes.NewInt64(1)}, }, { name: "decimal is greater-equal to integer", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.00"), sqltypes.NewInt64(1)}, }, { name: "decimal is less than integer", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal(".99"), sqltypes.NewInt64(1)}, }, { name: "decimal is less-equal to integer", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.00"), sqltypes.NewInt64(1)}, }, { name: "decimal is greater than float", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("849.896"), sqltypes.NewFloat64(86.568)}, }, { name: "decimal is not greater than float", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("15.23"), sqltypes.NewFloat64(8689.5)}, }, { name: "decimal is greater-equal to float (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("65"), sqltypes.NewFloat64(65)}, }, { name: "decimal is greater-equal to float (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("65"), sqltypes.NewFloat64(60)}, }, { name: "decimal is less than float", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("0.998"), sqltypes.NewFloat64(0.999)}, }, { name: "decimal is less-equal to float", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Decimal, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Float64, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.000101"), sqltypes.NewFloat64(1.00101)}, }, { name: "different int types are equal for 8 bit", - v1: NewColumn(0, defaultCollation()), v2: NewLiteralInt(0), + v1: NewColumn(0, sqltypes.Int8, collations.CollationBinaryID), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt8(0)}, }, { name: "different int types are equal for 32 bit", - v1: NewColumn(0, defaultCollation()), v2: NewLiteralInt(0), + v1: NewColumn(0, sqltypes.Int32, collations.CollationBinaryID), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt32(0)}, }, { name: "different int types are equal for float32 bit", - v1: NewColumn(0, defaultCollation()), v2: NewLiteralFloat(1.0), + v1: NewColumn(0, sqltypes.Float32, collations.CollationBinaryID), v2: NewLiteralFloat(1.0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Float32, []byte("1.0"))}, }, { name: "different unsigned int types are equal for 8 bit", - v1: NewColumn(0, defaultCollation()), v2: NewLiteralInt(0), + v1: NewColumn(0, sqltypes.Uint8, collations.CollationBinaryID), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Uint8, []byte("0"))}, }, { name: "different unsigned int types are equal for 32 bit", - v1: NewColumn(0, defaultCollation()), v2: NewLiteralInt(0), + v1: NewColumn(0, sqltypes.Uint32, collations.CollationBinaryID), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint32(0)}, }, @@ -490,73 +493,73 @@ func TestCompareDatetime(t *testing.T) { tests := []testCase{ { name: "datetimes are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00")}, }, { name: "datetimes are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00"), sqltypes.NewDatetime("2020-10-22 12:00:00")}, }, { name: "datetimes are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00"), sqltypes.NewDatetime("2021-10-22 10:23:56")}, }, { name: "datetimes are not equal (3)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 00:00:00"), sqltypes.NewDatetime("2021-02-01 00:00:00")}, }, { name: "datetime is greater than datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is not greater than datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is less than datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is not less than datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is greater-equal to datetime (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is greater-equal to datetime (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is less-equal to datetime (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is less-equal to datetime (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Datetime, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, @@ -574,73 +577,73 @@ func TestCompareTimestamp(t *testing.T) { tests := []testCase{ { name: "timestamps are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00")}, }, { name: "timestamps are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00"), sqltypes.NewTimestamp("2020-10-22 12:00:00")}, }, { name: "timestamps are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00"), sqltypes.NewTimestamp("2021-10-22 10:23:56")}, }, { name: "timestamps are not equal (3)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 00:00:00"), sqltypes.NewTimestamp("2021-02-01 00:00:00")}, }, { name: "timestamp is greater than timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is not greater than timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is less than timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is not less than timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is greater-equal to timestamp (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is greater-equal to timestamp (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is less-equal to timestamp (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is less-equal to timestamp (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Timestamp, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, @@ -658,67 +661,67 @@ func TestCompareDate(t *testing.T) { tests := []testCase{ { name: "dates are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22")}, }, { name: "dates are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDate("2020-10-21")}, }, { name: "dates are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-02-01")}, }, { name: "date is greater than date", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is not greater than date", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is less than date", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is not less than date", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is greater-equal to date (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is greater-equal to date (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is less-equal to date (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is less-equal to date (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, @@ -736,67 +739,79 @@ func TestCompareTime(t *testing.T) { tests := []testCase{ { name: "times are equal", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(0, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTime("12:00:00")}, }, { name: "times are not equal (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTime("12:00:00"), sqltypes.NewTime("10:23:56")}, }, { name: "times are not equal (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewTime("00:00:00"), sqltypes.NewTime("10:15:00")}, }, { name: "time is greater than time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("18:14:35"), sqltypes.NewTime("13:01:38")}, }, { name: "time is not greater than time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("02:46:02"), sqltypes.NewTime("10:42:50")}, }, + { + name: "time is greater than time", + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), + out: &T, op: sqlparser.GreaterThanOp, + row: []sqltypes.Value{sqltypes.NewTime("101:14:35"), sqltypes.NewTime("13:01:38")}, + }, + { + name: "time is not greater than time", + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), + out: &F, op: sqlparser.GreaterThanOp, + row: []sqltypes.Value{sqltypes.NewTime("24:46:02"), sqltypes.NewTime("101:42:50")}, + }, { name: "time is less than time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTime("04:30:00"), sqltypes.NewTime("09:23:48")}, }, { name: "time is not less than time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTime("15:21:00"), sqltypes.NewTime("10:00:00")}, }, { name: "time is greater-equal to time (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:42:50"), sqltypes.NewTime("10:42:50")}, }, { name: "time is greater-equal to time (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTime("19:42:50"), sqltypes.NewTime("13:10:02")}, }, { name: "time is less-equal to time (1)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:42:50"), sqltypes.NewTime("10:42:50")}, }, { name: "time is less-equal to time (2)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Time, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:10:02"), sqltypes.NewTime("10:42:50")}, }, @@ -814,13 +829,13 @@ func TestCompareDates(t *testing.T) { tests := []testCase{ { name: "date equal datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDatetime("2021-10-22 00:00:00")}, }, { name: "date equal datetime through bind variables", - v1: NewBindVar("k1", defaultCollation()), v2: NewBindVar("k2", defaultCollation()), + v1: NewBindVar("k1", sqltypes.Date, collations.CollationBinaryID), v2: NewBindVar("k2", sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, bv: map[string]*querypb.BindVariable{ "k1": {Type: sqltypes.Date, Value: []byte("2021-10-22")}, @@ -829,7 +844,7 @@ func TestCompareDates(t *testing.T) { }, { name: "date not equal datetime through bind variables", - v1: NewBindVar("k1", defaultCollation()), v2: NewBindVar("k2", defaultCollation()), + v1: NewBindVar("k1", sqltypes.Date, collations.CollationBinaryID), v2: NewBindVar("k2", sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, bv: map[string]*querypb.BindVariable{ "k1": {Type: sqltypes.Date, Value: []byte("2021-02-20")}, @@ -838,73 +853,73 @@ func TestCompareDates(t *testing.T) { }, { name: "date not equal datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDatetime("2021-10-20 00:06:00")}, }, { name: "date equal timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewTimestamp("2021-10-22 00:00:00")}, }, { name: "date not equal timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewTimestamp("2021-10-22 16:00:00")}, }, { name: "date equal time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), - out: &T, op: sqlparser.EqualOp, + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), + out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate(time.Now().Format("2006-01-02")), sqltypes.NewTime("00:00:00")}, }, { name: "date not equal time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate(time.Now().Format("2006-01-02")), sqltypes.NewTime("12:00:00")}, }, { name: "string equal datetime", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Datetime, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22"), sqltypes.NewDatetime("2021-10-22 00:00:00")}, }, { name: "string equal timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22 00:00:00"), sqltypes.NewTimestamp("2021-10-22 00:00:00")}, }, { name: "string not equal timestamp", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Timestamp, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22 06:00:30"), sqltypes.NewTimestamp("2021-10-20 15:02:10")}, }, { name: "string equal time", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Time, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("00:05:12"), sqltypes.NewTime("00:05:12")}, }, { name: "string equal date", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-02-22"), sqltypes.NewDate("2021-02-22")}, }, { name: "string not equal date (1, date on the RHS)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.CollationUtf8mb4ID), v2: NewColumn(1, sqltypes.Date, collations.CollationBinaryID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-02-20"), sqltypes.NewDate("2021-03-30")}, }, { name: "string not equal date (2, date on the LHS)", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.Date, collations.CollationBinaryID), v2: NewColumn(1, sqltypes.VarChar, collations.CollationUtf8mb4ID), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-03-30"), sqltypes.NewVarChar("2021-02-20")}, }, @@ -922,13 +937,13 @@ func TestCompareStrings(t *testing.T) { tests := []testCase{ { name: "string equal string", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.Default()), v2: NewColumn(1, sqltypes.VarChar, collations.Default()), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("toto"), sqltypes.NewVarChar("toto")}, }, { name: "string equal number", - v1: NewColumn(0, defaultCollation()), v2: NewColumn(1, defaultCollation()), + v1: NewColumn(0, sqltypes.VarChar, collations.Default()), v2: NewColumn(1, sqltypes.Int64, collations.CollationBinaryID), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("1"), sqltypes.NewInt64(1)}, }, @@ -1092,104 +1107,55 @@ func TestNullComparisons(t *testing.T) { } func TestNullsafeCompare(t *testing.T) { - collation := collationEnv.LookupByName("utf8mb4_general_ci").ID() + collation := collationEnv.LookupByName("utf8mb4_general_ci") tcases := []struct { v1, v2 sqltypes.Value out int err error - }{{ - // All nulls. - v1: NULL, - v2: NULL, - out: 0, - }, { - // LHS null. - v1: NULL, - v2: NewInt64(1), - out: -1, - }, { - // RHS null. - v1: NewInt64(1), - v2: NULL, - out: 1, - }, { - // LHS Text - v1: TestValue(sqltypes.VarChar, "abcd"), - v2: TestValue(sqltypes.VarChar, "abcd"), - out: 0, - }, { - // Make sure underlying error is returned for LHS. - v1: TestValue(sqltypes.Int64, "1.2"), - v2: NewInt64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), - }, { - // Make sure underlying error is returned for RHS. - v1: NewInt64(2), - v2: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "strconv.ParseInt: parsing \"1.2\": invalid syntax"), - }, { - // Numeric equal. - v1: NewInt64(1), - v2: NewUint64(1), - out: 0, - }, { - // Numeric unequal. - v1: NewInt64(1), - v2: NewUint64(2), - out: -1, - }, { - // Non-numeric equal - v1: TestValue(sqltypes.VarBinary, "abcd"), - v2: TestValue(sqltypes.Binary, "abcd"), - out: 0, - }, { - // Non-numeric unequal - v1: TestValue(sqltypes.VarBinary, "abcd"), - v2: TestValue(sqltypes.Binary, "bcde"), - out: -1, - }, { - // Date/Time types - v1: TestValue(sqltypes.Datetime, "1000-01-01 00:00:00"), - v2: TestValue(sqltypes.Binary, "1000-01-01 00:00:00"), - out: 0, - }, { - // Date/Time types - v1: TestValue(sqltypes.Datetime, "2000-01-01 00:00:00"), - v2: TestValue(sqltypes.Binary, "1000-01-01 00:00:00"), - out: 1, - }, { - // Date/Time types - v1: TestValue(sqltypes.Datetime, "1000-01-01 00:00:00"), - v2: TestValue(sqltypes.Binary, "2000-01-01 00:00:00"), - out: -1, - }, { - // Date/Time types - v1: TestValue(sqltypes.Bit, "101"), - v2: TestValue(sqltypes.Bit, "101"), - out: 0, - }, { - // Date/Time types - v1: TestValue(sqltypes.Bit, "1"), - v2: TestValue(sqltypes.Bit, "0"), - out: 1, - }, { - // Date/Time types - v1: TestValue(sqltypes.Bit, "0"), - v2: TestValue(sqltypes.Bit, "1"), - out: -1, - }} + }{ + { + v1: NULL, + v2: NULL, + out: 0, + }, + { + v1: NULL, + v2: NewInt64(1), + out: -1, + }, + { + v1: NewInt64(1), + v2: NULL, + out: 1, + }, + { + v1: TestValue(sqltypes.VarChar, "abcd"), + v2: TestValue(sqltypes.VarChar, "abcd"), + out: 0, + }, + { + v1: TestValue(sqltypes.Float64, "0.0"), + v2: TestValue(sqltypes.VarChar, " 6736380880502626304.000000 aa"), + out: -1, + }, + { + v1: TestValue(sqltypes.Enum, "foo"), + v2: TestValue(sqltypes.Enum, "bar"), + out: 1, + }, + } for _, tcase := range tcases { - got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) - if tcase.err != nil { - require.EqualError(t, err, tcase.err.Error()) - } - if tcase.err != nil { - continue - } - - if got != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) - } + t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { + got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) + if tcase.err != nil { + require.EqualError(t, err, tcase.err.Error()) + return + } + require.NoError(t, err) + if got != tcase.out { + t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) + } + }) } } @@ -1234,7 +1200,7 @@ func TestNullsafeCompareCollate(t *testing.T) { collation: getCollationID("utf8mb4_ja_0900_as_cs_ks"), }, { - // non breaking space + // non-breaking space v1: "abc ", v2: "abc\u00a0", out: -1, @@ -1268,17 +1234,24 @@ func TestNullsafeCompareCollate(t *testing.T) { }, } for _, tcase := range tcases { - got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) - if !vterrors.Equals(err, tcase.err) { - t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err != nil { - continue - } + t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { + got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) + if tcase.err == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + } + if !vterrors.Equals(err, tcase.err) { + t.Errorf("NullsafeCompare(%v, %v) error: %v, want %v", tcase.v1, tcase.v2, vterrors.Print(err), vterrors.Print(tcase.err)) + } + if tcase.err != nil { + return + } - if got != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) - } + if got != tcase.out { + t.Errorf("NullsafeCompare(%v, %v): %v, want %v", tcase.v1, tcase.v2, got, tcase.out) + } + }) } } diff --git a/go/vt/vtgate/evalengine/api_hash.go b/go/vt/vtgate/evalengine/api_hash.go index 2acb995b836..209f766840d 100644 --- a/go/vt/vtgate/evalengine/api_hash.go +++ b/go/vt/vtgate/evalengine/api_hash.go @@ -18,13 +18,14 @@ package evalengine import ( "math" - "strconv" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" "vitess.io/vitess/go/vt/vthash" ) @@ -34,7 +35,7 @@ type HashCode = uint64 // NullsafeHashcode returns an int64 hashcode that is guaranteed to be the same // for two values that are considered equal by `NullsafeCompare`. func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type) (HashCode, error) { - e, err := valueToEvalCast(v, coerceType) + e, err := valueToEvalCast(v, coerceType, collation) if err != nil { return 0, err } @@ -45,7 +46,7 @@ func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqlt h := vthash.New() switch e := e.(type) { case *evalBytes: - if !collation.Valid() { + if collation == collations.Unknown { return 0, UnsupportedCollationHashError } e.col.Collation = collation @@ -93,10 +94,10 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat f = float64(uval) case v.IsFloat() || v.IsDecimal(): f, err = v.ToFloat64() - case v.IsQuoted(): - f = parseStringToFloat(v.RawStr()) + case v.IsText(), v.IsBinary(): + f, _ = fastparse.ParseFloat64(v.RawStr()) default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err @@ -107,10 +108,12 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case sqltypes.IsSigned(coerceTo): var i int64 var err error + var neg bool switch { case v.IsSigned(): i, err = v.ToInt64() + neg = i < 0 case v.IsUnsigned(): var uval uint64 uval, err = v.ToUint64() @@ -122,22 +125,24 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat return ErrHashCoercionIsNotExact } i = int64(fval) - case v.IsQuoted(): - i, err = strconv.ParseInt(v.RawStr(), 10, 64) + neg = i < 0 + case v.IsText(), v.IsBinary(): + i, err = fastparse.ParseInt64(v.RawStr(), 10) if err != nil { - fval := parseStringToFloat(v.RawStr()) + fval, _ := fastparse.ParseFloat64(v.RawStr()) if fval != math.Trunc(fval) { return ErrHashCoercionIsNotExact } i, err = int64(fval), nil } + neg = i < 0 default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err } - if i < 0 { + if neg { hash.Write16(hashPrefixIntegralNegative) } else { hash.Write16(hashPrefixIntegralPositive) @@ -147,11 +152,12 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case sqltypes.IsUnsigned(coerceTo): var u uint64 var err error - + var neg bool switch { case v.IsSigned(): var ival int64 ival, err = v.ToInt64() + neg = ival < 0 u = uint64(ival) case v.IsUnsigned(): u, err = v.ToUint64() @@ -161,31 +167,37 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat if fval != math.Trunc(fval) || fval < 0 { return ErrHashCoercionIsNotExact } + neg = fval < 0 u = uint64(fval) - case v.IsQuoted(): - u, err = strconv.ParseUint(v.RawStr(), 10, 64) + case v.IsText(), v.IsBinary(): + u, err = fastparse.ParseUint64(v.RawStr(), 10) if err != nil { - fval := parseStringToFloat(v.RawStr()) + fval, _ := fastparse.ParseFloat64(v.RawStr()) if fval != math.Trunc(fval) || fval < 0 { return ErrHashCoercionIsNotExact } + neg = fval < 0 u, err = uint64(fval), nil } default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } if err != nil { return err } - hash.Write16(hashPrefixIntegralPositive) + if neg { + hash.Write16(hashPrefixIntegralNegative) + } else { + hash.Write16(hashPrefixIntegralPositive) + } hash.Write64(u) case sqltypes.IsBinary(coerceTo): hash.Write16(hashPrefixBytes) - collations.Binary.Hash(hash, v.Raw(), 0) + colldata.Lookup(collations.CollationBinaryID).Hash(hash, v.Raw(), 0) case sqltypes.IsText(coerceTo): - coll := collation.Get() + coll := colldata.Lookup(collation) if coll == nil { panic("cannot hash unsupported collation") } @@ -208,16 +220,33 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat } dec = decimal.NewFromFloat(fval) case v.IsText() || v.IsBinary(): - fval := parseStringToFloat(v.RawStr()) + fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a decimal: %v", v) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } hash.Write16(hashPrefixDecimal) dec.Hash(hash) - default: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", v.Type()) + return nullsafeHashcode128Default(hash, v, collation, coerceTo) } return nil } + +func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { + // Slow path to handle all other types. This uses the generic + // logic for value casting to ensure we match MySQL here. + e, err := valueToEvalCast(v, coerceTo, collation) + if err != nil { + return err + } + switch e := e.(type) { + case nil: + hash.Write16(hashPrefixNil) + return nil + case hashable: + e.Hash(hash) + return nil + } + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", coerceTo) +} diff --git a/go/vt/vtgate/evalengine/api_hash_test.go b/go/vt/vtgate/evalengine/api_hash_test.go index 7af73645666..832a1ed3b88 100644 --- a/go/vt/vtgate/evalengine/api_hash_test.go +++ b/go/vt/vtgate/evalengine/api_hash_test.go @@ -18,32 +18,70 @@ package evalengine import ( "fmt" - "math/rand" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" ) +func TestHashCodes(t *testing.T) { + var cases = []struct { + static, dynamic sqltypes.Value + equal bool + err error + }{ + {sqltypes.NewFloat64(-1), sqltypes.NewVarChar("-1"), true, nil}, + {sqltypes.NewDecimal("-1"), sqltypes.NewVarChar("-1"), true, nil}, + {sqltypes.NewDate("2000-01-01"), sqltypes.NewInt64(20000101), true, nil}, + {sqltypes.NewDatetime("2000-01-01 11:22:33"), sqltypes.NewInt64(20000101112233), true, nil}, + {sqltypes.NewTime("11:22:33"), sqltypes.NewInt64(112233), true, nil}, + {sqltypes.NewInt64(20000101), sqltypes.NewDate("2000-01-01"), true, nil}, + {sqltypes.NewInt64(20000101112233), sqltypes.NewDatetime("2000-01-01 11:22:33"), true, nil}, + {sqltypes.NewInt64(112233), sqltypes.NewTime("11:22:33"), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"2": "bar", "1": "foo"}`)), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), false, nil}, + {sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), false, nil}, + } + + for _, tc := range cases { + t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + require.NoError(t, err) + require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) + + h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + require.NoError(t, err) + + h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + require.ErrorIs(t, err, tc.err) + + assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + }) + } +} + // The following test tries to produce lots of different values and compares them both using hash code and compare, // to make sure that these two methods agree on what values are equal func TestHashCodesRandom(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci").ID() + collation := collations.Local().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ - v1, v2 := randomValues() + v1, v2 := sqltypes.TestRandomValues() cmp, err := NullsafeCompare(v1, v2, collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) - typ, err := CoerceTo(v1.Type(), v2.Type()) + typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) hash1, err := NullsafeHashcode(v1, collation, typ) @@ -80,32 +118,43 @@ func TestHashCodes128(t *testing.T) { equal bool err error }{ - {sqltypes.NewInt64(-1), sqltypes.NewUint64(^uint64(0)), true, nil}, - {sqltypes.NewUint64(^uint64(0)), sqltypes.NewInt64(-1), true, nil}, + {sqltypes.NewInt64(-1), sqltypes.NewUint64(^uint64(0)), false, nil}, + {sqltypes.NewUint64(^uint64(0)), sqltypes.NewInt64(-1), false, nil}, {sqltypes.NewInt64(-1), sqltypes.NewVarChar("-1"), true, nil}, {sqltypes.NewVarChar("-1"), sqltypes.NewInt64(-1), true, nil}, {sqltypes.NewInt64(23), sqltypes.NewFloat64(23.0), true, nil}, {sqltypes.NewInt64(23), sqltypes.NewFloat64(23.1), false, ErrHashCoercionIsNotExact}, {sqltypes.NewUint64(^uint64(0)), sqltypes.NewFloat64(-1.0), false, ErrHashCoercionIsNotExact}, {sqltypes.NewUint64(42), sqltypes.NewFloat64(42.0), true, nil}, + {sqltypes.NewDate("2000-01-01"), sqltypes.NewInt64(20000101), true, nil}, + {sqltypes.NewDatetime("2000-01-01 11:22:33"), sqltypes.NewInt64(20000101112233), true, nil}, + {sqltypes.NewTime("11:22:33"), sqltypes.NewInt64(112233), true, nil}, + {sqltypes.NewInt64(20000101), sqltypes.NewDate("2000-01-01"), true, nil}, + {sqltypes.NewInt64(20000101112233), sqltypes.NewDatetime("2000-01-01 11:22:33"), true, nil}, + {sqltypes.NewInt64(112233), sqltypes.NewTime("11:22:33"), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"2": "bar", "1": "foo"}`)), true, nil}, + {sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), false, nil}, + {sqltypes.NewVarChar(`{"2": "bar", "1": "foo"}`), sqltypes.MakeTrusted(sqltypes.TypeJSON, []byte(`{"1": "foo", "2": "bar"}`)), false, nil}, } for _, tc := range cases { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) - require.NoError(t, err) - require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) - - hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) - require.NoError(t, err) - - hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) - require.ErrorIs(t, err, tc.err) - - h1 := hasher1.Sum128() - h2 := hasher2.Sum128() - assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + require.NoError(t, err) + require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) + + hasher1 := vthash.New() + err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + require.NoError(t, err) + + hasher2 := vthash.New() + err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + require.ErrorIs(t, err, tc.err) + + h1 := hasher1.Sum128() + h2 := hasher2.Sum128() + assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) + }) } } @@ -114,14 +163,14 @@ func TestHashCodes128(t *testing.T) { func TestHashCodesRandom128(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci").ID() + collation := collations.Local().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ - v1, v2 := randomValues() + v1, v2 := sqltypes.TestRandomValues() cmp, err := NullsafeCompare(v1, v2, collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) - typ, err := CoerceTo(v1.Type(), v2.Type()) + typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) hasher1 := vthash.New() @@ -140,59 +189,47 @@ func TestHashCodesRandom128(t *testing.T) { t.Logf("tested %d values, with %d equalities found\n", tested, equal) } -func randomValues() (sqltypes.Value, sqltypes.Value) { - if rand.Int()%2 == 0 { - // create a single value, and turn it into two different types - v := rand.Int() - return randomNumericType(v), randomNumericType(v) +// coerceTo takes two input types, and decides how they should be coerced before compared +func coerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { + if v1 == v2 { + return v1, nil + } + if sqltypes.IsNull(v1) || sqltypes.IsNull(v2) { + return sqltypes.Null, nil + } + if (sqltypes.IsText(v1) || sqltypes.IsBinary(v1)) && (sqltypes.IsText(v2) || sqltypes.IsBinary(v2)) { + return sqltypes.VarChar, nil + } + if sqltypes.IsDateOrTime(v1) { + return v1, nil + } + if sqltypes.IsDateOrTime(v2) { + return v2, nil } - // just produce two arbitrary random values and compare - return randomValue(), randomValue() -} - -func randomNumericType(i int) sqltypes.Value { - r := rand.Intn(len(numericTypes)) - return numericTypes[r](i) - -} - -var numericTypes = []func(int) sqltypes.Value{ - func(i int) sqltypes.Value { return sqltypes.NULL }, - func(i int) sqltypes.Value { return sqltypes.NewInt8(int8(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewInt32(int32(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewInt64(int64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewUint64(uint64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewUint32(uint32(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewFloat64(float64(i)) }, - func(i int) sqltypes.Value { return sqltypes.NewDecimal(fmt.Sprintf("%d", i)) }, - func(i int) sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf("%d", i)) }, - func(i int) sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf(" %f aa", float64(i))) }, -} - -var randomGenerators = []func() sqltypes.Value{ - randomNull, - randomInt8, - randomInt32, - randomInt64, - randomUint64, - randomUint32, - randomVarChar, - randomComplexVarChar, -} - -func randomValue() sqltypes.Value { - r := rand.Intn(len(randomGenerators)) - return randomGenerators[r]() -} - -func randomNull() sqltypes.Value { return sqltypes.NULL } -func randomInt8() sqltypes.Value { return sqltypes.NewInt8(int8(rand.Intn(255))) } -func randomInt32() sqltypes.Value { return sqltypes.NewInt32(rand.Int31()) } -func randomInt64() sqltypes.Value { return sqltypes.NewInt64(rand.Int63()) } -func randomUint32() sqltypes.Value { return sqltypes.NewUint32(rand.Uint32()) } -func randomUint64() sqltypes.Value { return sqltypes.NewUint64(rand.Uint64()) } -func randomVarChar() sqltypes.Value { return sqltypes.NewVarChar(fmt.Sprintf("%d", rand.Int63())) } -func randomComplexVarChar() sqltypes.Value { - return sqltypes.NewVarChar(fmt.Sprintf(" \t %f apa", float64(rand.Intn(1000))*1.10)) + if sqltypes.IsNumber(v1) || sqltypes.IsNumber(v2) { + switch { + case sqltypes.IsText(v1) || sqltypes.IsBinary(v1) || sqltypes.IsText(v2) || sqltypes.IsBinary(v2): + return sqltypes.Float64, nil + case sqltypes.IsFloat(v2) || v2 == sqltypes.Decimal || sqltypes.IsFloat(v1) || v1 == sqltypes.Decimal: + return sqltypes.Float64, nil + case sqltypes.IsSigned(v1): + switch { + case sqltypes.IsUnsigned(v2): + return sqltypes.Uint64, nil + case sqltypes.IsSigned(v2): + return sqltypes.Int64, nil + default: + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) + } + case sqltypes.IsUnsigned(v1): + switch { + case sqltypes.IsSigned(v2) || sqltypes.IsUnsigned(v2): + return sqltypes.Uint64, nil + default: + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) + } + } + } + return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) } diff --git a/go/vt/vtgate/evalengine/api_literal.go b/go/vt/vtgate/evalengine/api_literal.go index 77756a21849..1b2ba6e2da2 100644 --- a/go/vt/vtgate/evalengine/api_literal.go +++ b/go/vt/vtgate/evalengine/api_literal.go @@ -17,15 +17,17 @@ limitations under the License. package evalengine import ( - "encoding/hex" + "errors" "math" - "strconv" + "math/big" "unicode/utf8" + "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/hex" + "vitess.io/vitess/go/sqltypes" ) // NullExpr is just what you are lead to believe @@ -38,9 +40,9 @@ func NewLiteralIntegralFromBytes(val []byte) (*Literal, error) { panic("NewLiteralIntegralFromBytes: negative value") } - uval, err := strconv.ParseUint(string(val), 10, 64) + uval, err := fastparse.ParseUint64(hack.String(val), 10) if err != nil { - if numError, ok := err.(*strconv.NumError); ok && numError.Err == strconv.ErrRange { + if errors.Is(err, fastparse.ErrOverflow) { return NewLiteralDecimalFromBytes(val) } return nil, err @@ -72,7 +74,7 @@ func NewLiteralFloat(val float64) *Literal { // NewLiteralFloatFromBytes returns a float literal expression from a slice of bytes func NewLiteralFloatFromBytes(val []byte) (*Literal, error) { - fval, err := strconv.ParseFloat(string(val), 64) + fval, err := fastparse.ParseFloat64(hack.String(val)) if err != nil { return nil, err } @@ -101,36 +103,36 @@ func NewLiteralString(val []byte, collation collations.TypedCollation) *Literal // NewLiteralDateFromBytes returns a literal expression. func NewLiteralDateFromBytes(val []byte) (*Literal, error) { - _, err := sqlparser.ParseDate(string(val)) + t, err := parseDate(val) if err != nil { return nil, err } - return &Literal{newEvalRaw(querypb.Type_DATE, val, collationNumeric)}, nil + return &Literal{t}, nil } // NewLiteralTimeFromBytes returns a literal expression. // it validates the time by parsing it and checking the error. func NewLiteralTimeFromBytes(val []byte) (*Literal, error) { - _, err := sqlparser.ParseTime(string(val)) + t, err := parseTime(val) if err != nil { return nil, err } - return &Literal{newEvalRaw(querypb.Type_TIME, val, collationNumeric)}, nil + return &Literal{t}, nil } // NewLiteralDatetimeFromBytes returns a literal expression. // it validates the datetime by parsing it and checking the error. func NewLiteralDatetimeFromBytes(val []byte) (*Literal, error) { - _, err := sqlparser.ParseDateTime(string(val)) + t, err := parseDateTime(val) if err != nil { return nil, err } - return &Literal{newEvalRaw(querypb.Type_DATETIME, val, collationNumeric)}, nil + return &Literal{t}, nil } func parseHexLiteral(val []byte) ([]byte, error) { - raw := make([]byte, hex.DecodedLen(len(val))) - if _, err := hex.Decode(raw, val); err != nil { + raw := make([]byte, hex.DecodedLen(val)) + if err := hex.DecodeBytes(raw, val); err != nil { return nil, err } return raw, nil @@ -154,6 +156,15 @@ func parseHexNumber(val []byte) ([]byte, error) { return parseHexLiteral(val[1:]) } +func parseBitLiteral(val []byte) ([]byte, error) { + var i big.Int + _, ok := i.SetString(string(val), 2) + if !ok { + panic("malformed bit literal from parser") + } + return i.Bytes(), nil +} + func NewLiteralBinary(val []byte) *Literal { return &Literal{newEvalBinary(val)} } @@ -174,29 +185,38 @@ func NewLiteralBinaryFromHexNum(val []byte) (*Literal, error) { return &Literal{newEvalBytesHex(raw)}, nil } +func NewLiteralBinaryFromBit(val []byte) (*Literal, error) { + raw, err := parseBitLiteral(val) + if err != nil { + return nil, err + } + return &Literal{newEvalBytesBit(raw)}, nil +} + // NewBindVar returns a bind variable -func NewBindVar(key string, collation collations.TypedCollation) Expr { +func NewBindVar(key string, typ sqltypes.Type, col collations.ID) *BindVariable { return &BindVariable{ - Key: key, - col: collation, - coerce: -1, + Key: key, + Type: typ, + Collation: defaultCoercionCollation(col), } } // NewBindVarTuple returns a bind variable containing a tuple -func NewBindVarTuple(key string) Expr { +func NewBindVarTuple(key string, col collations.ID) *BindVariable { return &BindVariable{ - Key: key, - tuple: true, - coerce: -1, + Key: key, + Type: sqltypes.Tuple, + Collation: defaultCoercionCollation(col), } } // NewColumn returns a column expression -func NewColumn(offset int, collation collations.TypedCollation) Expr { +func NewColumn(offset int, typ sqltypes.Type, col collations.ID) *Column { return &Column{ - Offset: offset, - coll: collation, + Offset: offset, + Type: typ, + Collation: defaultCoercionCollation(col), } } diff --git a/go/vt/vtgate/evalengine/api_types.go b/go/vt/vtgate/evalengine/api_types.go deleted file mode 100644 index 817689d04c8..00000000000 --- a/go/vt/vtgate/evalengine/api_types.go +++ /dev/null @@ -1,202 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package evalengine - -import ( - "fmt" - "strconv" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" -) - -// CoerceTo takes two input types, and decides how they should be coerced before compared -func CoerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { - if v1 == v2 { - return v1, nil - } - if sqltypes.IsNull(v1) || sqltypes.IsNull(v2) { - return sqltypes.Null, nil - } - if (sqltypes.IsText(v1) || sqltypes.IsBinary(v1)) && (sqltypes.IsText(v2) || sqltypes.IsBinary(v2)) { - return sqltypes.VarChar, nil - } - if sqltypes.IsNumber(v1) || sqltypes.IsNumber(v2) { - switch { - case sqltypes.IsText(v1) || sqltypes.IsBinary(v1) || sqltypes.IsText(v2) || sqltypes.IsBinary(v2): - return sqltypes.Float64, nil - case sqltypes.IsFloat(v2) || v2 == sqltypes.Decimal || sqltypes.IsFloat(v1) || v1 == sqltypes.Decimal: - return sqltypes.Float64, nil - case sqltypes.IsSigned(v1): - switch { - case sqltypes.IsUnsigned(v2): - return sqltypes.Uint64, nil - case sqltypes.IsSigned(v2): - return sqltypes.Int64, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) - } - case sqltypes.IsUnsigned(v1): - switch { - case sqltypes.IsSigned(v2) || sqltypes.IsUnsigned(v2): - return sqltypes.Uint64, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) - } - } - } - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "types does not support hashcode yet: %v vs %v", v1, v2) -} - -// Cast converts a Value to the target type. -func Cast(v sqltypes.Value, typ sqltypes.Type) (sqltypes.Value, error) { - if v.Type() == typ || v.IsNull() { - return v, nil - } - vBytes, err := v.ToBytes() - if err != nil { - return v, err - } - if sqltypes.IsSigned(typ) && v.IsSigned() { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if sqltypes.IsUnsigned(typ) && v.IsUnsigned() { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if (sqltypes.IsFloat(typ) || typ == sqltypes.Decimal) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal) { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - if sqltypes.IsQuoted(typ) && (v.IsIntegral() || v.IsFloat() || v.Type() == sqltypes.Decimal || v.IsQuoted()) { - return sqltypes.MakeTrusted(typ, vBytes), nil - } - - // Explicitly disallow Expression. - if v.Type() == sqltypes.Expression { - return sqltypes.NULL, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be cast to %v", v, typ) - } - - // If the above fast-paths were not possible, - // go through full validation. - return sqltypes.NewValue(typ, vBytes) -} - -// ToUint64 converts Value to uint64. -func ToUint64(v sqltypes.Value) (uint64, error) { - num, err := valueToEvalNumeric(v) - if err != nil { - return 0, err - } - switch num := num.(type) { - case *evalInt64: - if num.i < 0 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "negative number cannot be converted to unsigned: %d", num.i) - } - return uint64(num.i), nil - case *evalUint64: - return num.u, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected return from numeric evaluation (%T)", num) - } -} - -// ToInt64 converts Value to int64. -func ToInt64(v sqltypes.Value) (int64, error) { - num, err := valueToEvalNumeric(v) - if err != nil { - return 0, err - } - switch num := num.(type) { - case *evalInt64: - return num.i, nil - case *evalUint64: - ival := int64(num.u) - if ival < 0 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsigned number overflows int64 value: %d", num.u) - } - return ival, nil - default: - return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected return from numeric evaluation (%T)", num) - } -} - -// ToFloat64 converts Value to float64. -func ToFloat64(v sqltypes.Value) (float64, error) { - num, err := valueToEval(v, collationNumeric) - if err != nil { - return 0, err - } - f, _ := evalToNumeric(num).toFloat() - return f.f, nil -} - -func LiteralToValue(literal *sqlparser.Literal) (sqltypes.Value, error) { - lit, err := (&astCompiler{}).translateLiteral(literal) - if err != nil { - return sqltypes.Value{}, err - } - return evalToSQLValue(lit.inner), nil -} - -// ToNative converts Value to a native go type. -// Decimal is returned as []byte. -func ToNative(v sqltypes.Value) (any, error) { - var out any - var err error - switch { - case v.Type() == sqltypes.Null: - // no-op - case v.IsSigned(): - return ToInt64(v) - case v.IsUnsigned(): - return ToUint64(v) - case v.IsFloat(): - return ToFloat64(v) - case v.IsQuoted() || v.Type() == sqltypes.Bit || v.Type() == sqltypes.Decimal: - out, err = v.ToBytes() - case v.Type() == sqltypes.Expression: - err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v cannot be converted to a go type", v) - } - return out, err -} - -func NormalizeValue(v sqltypes.Value, coll collations.ID) string { - typ := v.Type() - if typ == sqltypes.Null { - return "NULL" - } - if typ == sqltypes.VarChar && coll == collations.CollationBinaryID { - return fmt.Sprintf("VARBINARY(%q)", v.Raw()) - } - if v.IsQuoted() || typ == sqltypes.Bit { - return fmt.Sprintf("%v(%q)", typ, v.Raw()) - } - if typ == sqltypes.Float32 || typ == sqltypes.Float64 { - var bitsize = 64 - if typ == sqltypes.Float32 { - bitsize = 32 - } - f, err := strconv.ParseFloat(v.RawStr(), bitsize) - if err != nil { - panic(err) - } - return fmt.Sprintf("%v(%s)", typ, FormatFloat(typ, f)) - } - return fmt.Sprintf("%v(%s)", typ, v.Raw()) -} diff --git a/go/vt/vtgate/evalengine/arena.go b/go/vt/vtgate/evalengine/arena.go new file mode 100644 index 00000000000..590dc3b02c7 --- /dev/null +++ b/go/vt/vtgate/evalengine/arena.go @@ -0,0 +1,155 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/sqltypes" +) + +// Arena is an arena memory allocator for eval types. +// It allocates the types from reusable slices to prevent heap allocations. +// After each evaluation execution, (*Arena).reset() should be called to reset the arena. +type Arena struct { + aInt64 []evalInt64 + aUint64 []evalUint64 + aFloat64 []evalFloat + aDecimal []evalDecimal + aBytes []evalBytes +} + +func (a *Arena) reset() { + a.aInt64 = a.aInt64[:0] + a.aUint64 = a.aUint64[:0] + a.aFloat64 = a.aFloat64[:0] + a.aDecimal = a.aDecimal[:0] + a.aBytes = a.aBytes[:0] +} + +func (a *Arena) newEvalDecimalWithPrec(dec decimal.Decimal, prec int32) *evalDecimal { + if cap(a.aDecimal) > len(a.aDecimal) { + a.aDecimal = a.aDecimal[:len(a.aDecimal)+1] + } else { + a.aDecimal = append(a.aDecimal, evalDecimal{}) + } + val := &a.aDecimal[len(a.aDecimal)-1] + val.dec = dec + val.length = prec + return val +} + +func (a *Arena) newEvalDecimal(dec decimal.Decimal, m, d int32) *evalDecimal { + if m == 0 && d == 0 { + return a.newEvalDecimalWithPrec(dec, -dec.Exponent()) + } + return a.newEvalDecimalWithPrec(dec.Clamp(m-d, d), d) +} + +func (a *Arena) newEvalBool(b bool) *evalInt64 { + if b { + return a.newEvalInt64(1) + } + return a.newEvalInt64(0) +} + +func (a *Arena) newEvalInt64(i int64) *evalInt64 { + if cap(a.aInt64) > len(a.aInt64) { + a.aInt64 = a.aInt64[:len(a.aInt64)+1] + } else { + a.aInt64 = append(a.aInt64, evalInt64{}) + } + val := &a.aInt64[len(a.aInt64)-1] + val.i = i + return val +} + +func (a *Arena) newEvalUint64(u uint64) *evalUint64 { + if cap(a.aUint64) > len(a.aUint64) { + a.aUint64 = a.aUint64[:len(a.aUint64)+1] + } else { + a.aUint64 = append(a.aUint64, evalUint64{}) + } + val := &a.aUint64[len(a.aUint64)-1] + val.u = u + val.hexLiteral = false + return val +} + +func (a *Arena) newEvalFloat(f float64) *evalFloat { + if cap(a.aFloat64) > len(a.aFloat64) { + a.aFloat64 = a.aFloat64[:len(a.aFloat64)+1] + } else { + a.aFloat64 = append(a.aFloat64, evalFloat{}) + } + val := &a.aFloat64[len(a.aFloat64)-1] + val.f = f + return val +} + +func (a *Arena) newEvalBytesEmpty() *evalBytes { + if cap(a.aBytes) > len(a.aBytes) { + a.aBytes = a.aBytes[:len(a.aBytes)+1] + } else { + a.aBytes = append(a.aBytes, evalBytes{}) + } + return &a.aBytes[len(a.aBytes)-1] +} + +func (a *Arena) newEvalBinary(raw []byte) *evalBytes { + b := a.newEvalBytesEmpty() + b.tt = int16(sqltypes.VarBinary) + b.col = collationBinary + b.bytes = raw + return b +} + +func (a *Arena) newEvalText(raw []byte, tc collations.TypedCollation) *evalBytes { + b := a.newEvalBytesEmpty() + b.tt = int16(sqltypes.VarChar) + b.col = tc + b.bytes = raw + return b +} + +func (a *Arena) newEvalRaw(raw []byte, tt sqltypes.Type, tc collations.TypedCollation) *evalBytes { + b := a.newEvalBytesEmpty() + b.tt = int16(tt) + b.col = tc + b.bytes = raw + return b +} + +func (a *Arena) newEvalTime(time datetime.Time, l int) *evalTemporal { + // TODO: reuse evalTemporal + return &evalTemporal{t: sqltypes.Time, dt: datetime.DateTime{Time: time.Round(l)}, prec: uint8(l)} +} + +func (a *Arena) newEvalDateTime(dt datetime.DateTime, l int) *evalTemporal { + // TODO: reuse evalTemporal + return &evalTemporal{t: sqltypes.Datetime, dt: dt.Round(l), prec: uint8(l)} +} + +func (a *Arena) newEvalDate(date datetime.Date) *evalTemporal { + // TODO: reuse evalTemporal + return &evalTemporal{t: sqltypes.Date, dt: datetime.DateTime{Date: date}} +} + +func (a *Arena) newTemporal(t sqltypes.Type, dt datetime.DateTime, prec uint8) *evalTemporal { + return &evalTemporal{t: t, dt: dt, prec: prec} +} diff --git a/go/vt/vtgate/evalengine/arithmetic.go b/go/vt/vtgate/evalengine/arithmetic.go index 47a2880a746..d6ac81b7a58 100644 --- a/go/vt/vtgate/evalengine/arithmetic.go +++ b/go/vt/vtgate/evalengine/arithmetic.go @@ -17,20 +17,22 @@ limitations under the License. package evalengine import ( - "strings" + "math" - "golang.org/x/exp/constraints" - - "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) -func dataOutOfRangeError[N1, N2 constraints.Integer | constraints.Float](v1 N1, v2 N2, typ, sign string) error { +func dataOutOfRangeError[N1, N2 int | int64 | uint64 | float64](v1 N1, v2 N2, typ, sign string) error { return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in '(%v %s %v)'", typ, v1, sign, v2) } +func dataOutOfRangeErrorDecimal(v1 decimal.Decimal, v2 decimal.Decimal, typ, sign string) error { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in '(%v %s %v)'", typ, v1.String(), sign, v2.String()) +} + func addNumericWithError(left, right eval) (eval, error) { v1, v2 := makeNumericAndPrioritize(left, right) switch v1 := v1.(type) { @@ -52,8 +54,8 @@ func addNumericWithError(left, right eval) (eval, error) { } func subtractNumericWithError(left, right eval) (eval, error) { - v1 := evalToNumeric(left) - v2 := evalToNumeric(right) + v1 := evalToNumeric(left, true) + v2 := evalToNumeric(right, true) switch v1 := v1.(type) { case *evalInt64: switch v2 := v2.(type) { @@ -111,8 +113,8 @@ func multiplyNumericWithError(left, right eval) (eval, error) { } func divideNumericWithError(left, right eval, precise bool) (eval, error) { - v1 := evalToNumeric(left) - v2 := evalToNumeric(right) + v1 := evalToNumeric(left, true) + v2 := evalToNumeric(right, true) if v1, ok := v1.(*evalFloat); ok { return mathDiv_fx(v1.f, v2) } @@ -126,11 +128,129 @@ func divideNumericWithError(left, right eval, precise bool) (eval, error) { return mathDiv_xx(v1, v2, divPrecisionIncrement) } +func integerDivideConvert(arg eval) evalNumeric { + if dec, ok := arg.(evalNumeric); ok { + return dec + } + + if b1, ok := arg.(*evalBytes); ok && b1.isHexLiteral { + hex, ok := b1.toNumericHex() + if !ok { + return newEvalDecimal(decimal.Zero, 0, 0) + } + return hex + } + return evalToDecimal(arg, 0, 0) +} + +func integerDivideNumericWithError(left, right eval) (eval, error) { + v1 := integerDivideConvert(left) + v2 := integerDivideConvert(right) + + switch v1 := v1.(type) { + case *evalInt64: + switch v2 := v2.(type) { + case *evalInt64: + return mathIntDiv_ii(v1, v2) + case *evalUint64: + return mathIntDiv_iu(v1, v2) + case *evalFloat: + return mathIntDiv_di(v1.toDecimal(0, 0), v2.toDecimal(0, 0)) + case *evalDecimal: + return mathIntDiv_di(v1.toDecimal(0, 0), v2) + } + case *evalUint64: + switch v2 := v2.(type) { + case *evalInt64: + return mathIntDiv_ui(v1, v2) + case *evalUint64: + return mathIntDiv_uu(v1, v2) + case *evalFloat: + return mathIntDiv_du(v1.toDecimal(0, 0), v2.toDecimal(0, 0)) + case *evalDecimal: + return mathIntDiv_du(v1.toDecimal(0, 0), v2) + } + case *evalFloat: + switch v2 := v2.(type) { + case *evalUint64: + return mathIntDiv_du(v1.toDecimal(0, 0), v2.toDecimal(0, 0)) + default: + return mathIntDiv_di(v1.toDecimal(0, 0), v2.toDecimal(0, 0)) + } + case *evalDecimal: + switch v2 := v2.(type) { + case *evalUint64: + return mathIntDiv_du(v1, v2.toDecimal(0, 0)) + default: + return mathIntDiv_di(v1, v2.toDecimal(0, 0)) + } + } + + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", evalToSQLValue(v1), evalToSQLValue(v2)) +} + +func modNumericWithError(left, right eval, precise bool) (eval, error) { + v1 := evalToNumeric(left, true) + v2 := evalToNumeric(right, true) + + switch v1 := v1.(type) { + case *evalInt64: + switch v2 := v2.(type) { + case *evalInt64: + return mathMod_ii(v1, v2) + case *evalUint64: + return mathMod_iu(v1, v2) + case *evalFloat: + v1f, ok := v1.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathMod_ff(v1f, v2) + case *evalDecimal: + return mathMod_dd(v1.toDecimal(0, 0), v2) + } + case *evalUint64: + switch v2 := v2.(type) { + case *evalInt64: + return mathMod_ui(v1, v2) + case *evalUint64: + return mathMod_uu(v1, v2) + case *evalFloat: + v1f, ok := v1.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathMod_ff(v1f, v2) + case *evalDecimal: + return mathMod_dd(v1.toDecimal(0, 0), v2) + } + case *evalDecimal: + switch v2 := v2.(type) { + case *evalFloat: + v1f, ok := v1.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathMod_ff(v1f, v2) + default: + return mathMod_dd(v1, v2.toDecimal(0, 0)) + } + case *evalFloat: + v2f, ok := v2.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathMod_ff(v1, v2f) + } + + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid arithmetic between: %s %s", evalToSQLValue(v1), evalToSQLValue(v2)) +} + // makeNumericAndPrioritize reorders the input parameters // to be Float64, Decimal, Uint64, Int64. func makeNumericAndPrioritize(left, right eval) (evalNumeric, evalNumeric) { - i1 := evalToNumeric(left) - i2 := evalToNumeric(right) + i1 := evalToNumeric(left, true) + i2 := evalToNumeric(right, true) switch i1.SQLType() { case sqltypes.Int64: if i2.SQLType() == sqltypes.Uint64 || i2.SQLType() == sqltypes.Float64 || i2.SQLType() == sqltypes.Decimal { @@ -161,28 +281,68 @@ func mathAdd_ii0(v1, v2 int64) (int64, error) { return result, nil } -func mathSub_ii(v1, v2 int64) (*evalInt64, error) { - result, err := mathSub_ii0(v1, v2) - return newEvalInt64(result), err +func mathAdd_ui(v1 uint64, v2 int64) (*evalUint64, error) { + result, err := mathAdd_ui0(v1, v2) + return newEvalUint64(result), err } -func mathSub_ii0(v1, v2 int64) (int64, error) { - result := v1 - v2 - if (result < v1) != (v2 > 0) { - return 0, dataOutOfRangeError(v1, v2, "BIGINT", "-") +func mathAdd_ui0(v1 uint64, v2 int64) (uint64, error) { + result := v1 + uint64(v2) + if v2 < 0 && v1 < uint64(-v2) || v2 > 0 && (result < v1 || result < uint64(v2)) { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "+") } return result, nil } -func mathMul_ii(v1, v2 int64) (*evalInt64, error) { - result, err := mathMul_ii0(v1, v2) +func mathAdd_uu(v1, v2 uint64) (*evalUint64, error) { + result, err := mathAdd_uu0(v1, v2) + return newEvalUint64(result), err +} + +func mathAdd_uu0(v1, v2 uint64) (uint64, error) { + result := v1 + v2 + if result < v1 || result < v2 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "+") + } + return result, nil +} + +var errDecimalOutOfRange = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "DECIMAL value is out of range") + +func mathAdd_fx(v1 float64, v2 evalNumeric) (*evalFloat, error) { + v2f, ok := v2.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathAdd_ff(v1, v2f.f), nil +} + +func mathAdd_ff(v1, v2 float64) *evalFloat { + return newEvalFloat(v1 + v2) +} + +func mathAdd_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { + return mathAdd_dd(v1, v2.toDecimal(0, 0)) +} + +func mathAdd_dd(v1, v2 *evalDecimal) *evalDecimal { + return newEvalDecimalWithPrec(v1.dec.Add(v2.dec), max(v1.length, v2.length)) +} + +func mathAdd_dd0(v1, v2 *evalDecimal) { + v1.dec = v1.dec.Add(v2.dec) + v1.length = max(v1.length, v2.length) +} + +func mathSub_ii(v1, v2 int64) (*evalInt64, error) { + result, err := mathSub_ii0(v1, v2) return newEvalInt64(result), err } -func mathMul_ii0(v1, v2 int64) (int64, error) { - result := v1 * v2 - if v1 != 0 && result/v1 != v2 { - return 0, dataOutOfRangeError(v1, v2, "BIGINT", "*") +func mathSub_ii0(v1, v2 int64) (int64, error) { + result := v1 - v2 + if (result < v1) != (v2 > 0) { + return 0, dataOutOfRangeError(v1, v2, "BIGINT", "-") } return result, nil } @@ -193,80 +353,104 @@ func mathSub_iu(v1 int64, v2 uint64) (*evalUint64, error) { } func mathSub_iu0(v1 int64, v2 uint64) (uint64, error) { - if v1 < 0 || v1 < int64(v2) { + if v1 < 0 { return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "-") } return mathSub_uu0(uint64(v1), v2) } -func mathAdd_ui(v1 uint64, v2 int64) (*evalUint64, error) { - result, err := mathAdd_ui0(v1, v2) - return newEvalUint64(result), err -} - -func mathAdd_ui0(v1 uint64, v2 int64) (uint64, error) { - result := v1 + uint64(v2) - if v2 < 0 && v1 < uint64(-v2) || v2 > 0 && (result < v1 || result < uint64(v2)) { - return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "+") - } - return result, nil -} - func mathSub_ui(v1 uint64, v2 int64) (*evalUint64, error) { result, err := mathSub_ui0(v1, v2) return newEvalUint64(result), err } func mathSub_ui0(v1 uint64, v2 int64) (uint64, error) { - if int64(v1) < v2 && v2 > 0 { + if v2 > 0 && v1 < uint64(v2) { return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "-") } // uint - (- int) = uint + int if v2 < 0 { - return mathAdd_ui0(v1, -v2) + return mathAdd_uu0(v1, uint64(-v2)) } return mathSub_uu0(v1, uint64(v2)) } -func mathMul_ui(v1 uint64, v2 int64) (*evalUint64, error) { - result, err := mathMul_ui0(v1, v2) +func mathSub_uu(v1, v2 uint64) (*evalUint64, error) { + result, err := mathSub_uu0(v1, v2) return newEvalUint64(result), err } -func mathMul_ui0(v1 uint64, v2 int64) (uint64, error) { - if v1 == 0 || v2 == 0 { - return 0, nil +func mathSub_uu0(v1, v2 uint64) (uint64, error) { + result := v1 - v2 + if v2 > v1 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "-") } - if v2 < 0 || int64(v1) < 0 { - return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "*") + return result, nil +} + +func mathSub_fx(v1 float64, v2 evalNumeric) (*evalFloat, error) { + v2f, ok := v2.toFloat() + if !ok { + return nil, errDecimalOutOfRange } - return mathMul_uu0(v1, uint64(v2)) + return mathSub_ff(v1, v2f.f), nil } -func mathAdd_uu(v1, v2 uint64) (*evalUint64, error) { - result, err := mathAdd_uu0(v1, v2) - return newEvalUint64(result), err +func mathSub_xf(v1 evalNumeric, v2 float64) (*evalFloat, error) { + v1f, ok := v1.toFloat() + if !ok { + return nil, errDecimalOutOfRange + } + return mathSub_ff(v1f.f, v2), nil } -func mathAdd_uu0(v1, v2 uint64) (uint64, error) { - result := v1 + v2 - if result < v1 || result < v2 { - return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "+") +func mathSub_ff(v1, v2 float64) *evalFloat { + return newEvalFloat(v1 - v2) +} + +func mathSub_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { + return mathSub_dd(v1, v2.toDecimal(0, 0)) +} + +func mathSub_xd(v1 evalNumeric, v2 *evalDecimal) *evalDecimal { + return mathSub_dd(v1.toDecimal(0, 0), v2) +} + +func mathSub_dd(v1, v2 *evalDecimal) *evalDecimal { + return newEvalDecimalWithPrec(v1.dec.Sub(v2.dec), max(v1.length, v2.length)) +} + +func mathSub_dd0(v1, v2 *evalDecimal) { + v1.dec = v1.dec.Sub(v2.dec) + v1.length = max(v1.length, v2.length) +} + +func mathMul_ii(v1, v2 int64) (*evalInt64, error) { + result, err := mathMul_ii0(v1, v2) + return newEvalInt64(result), err +} + +func mathMul_ii0(v1, v2 int64) (int64, error) { + result := v1 * v2 + if v1 != 0 && result/v1 != v2 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT", "*") } return result, nil } -func mathSub_uu(v1, v2 uint64) (*evalUint64, error) { - result, err := mathSub_uu0(v1, v2) +func mathMul_ui(v1 uint64, v2 int64) (*evalUint64, error) { + result, err := mathMul_ui0(v1, v2) return newEvalUint64(result), err } -func mathSub_uu0(v1, v2 uint64) (uint64, error) { - result := v1 - v2 - if v2 > v1 { - return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "-") +func mathMul_ui0(v1 uint64, v2 int64) (uint64, error) { + if v1 == 0 || v2 == 0 { + return 0, nil } - return result, nil + if v2 < 0 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "*") + } + return mathMul_uu0(v1, uint64(v2)) } func mathMul_uu(v1, v2 uint64) (*evalUint64, error) { @@ -285,28 +469,6 @@ func mathMul_uu0(v1, v2 uint64) (uint64, error) { return result, nil } -var errDecimalOutOfRange = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "DECIMAL value is out of range") - -func mathAdd_fx(v1 float64, v2 evalNumeric) (*evalFloat, error) { - v2f, ok := v2.toFloat() - if !ok { - return nil, errDecimalOutOfRange - } - return mathAdd_ff(v1, v2f.f), nil -} - -func mathAdd_ff(v1, v2 float64) *evalFloat { - return newEvalFloat(v1 + v2) -} - -func mathSub_fx(v1 float64, v2 evalNumeric) (*evalFloat, error) { - v2f, ok := v2.toFloat() - if !ok { - return nil, errDecimalOutOfRange - } - return mathSub_ff(v1, v2f.f), nil -} - func mathMul_fx(v1 float64, v2 evalNumeric) (eval, error) { v2f, ok := v2.toFloat() if !ok { @@ -319,43 +481,6 @@ func mathMul_ff(v1, v2 float64) *evalFloat { return newEvalFloat(v1 * v2) } -func maxprec(a, b int32) int32 { - if a > b { - return a - } - return b -} - -func mathAdd_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { - return mathAdd_dd(v1, v2.toDecimal(0, 0)) -} - -func mathAdd_dd(v1, v2 *evalDecimal) *evalDecimal { - return newEvalDecimalWithPrec(v1.dec.Add(v2.dec), maxprec(v1.length, v2.length)) -} - -func mathAdd_dd0(v1, v2 *evalDecimal) { - v1.dec = v1.dec.Add(v2.dec) - v1.length = maxprec(v1.length, v2.length) -} - -func mathSub_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { - return mathSub_dd(v1, v2.toDecimal(0, 0)) -} - -func mathSub_xd(v1 evalNumeric, v2 *evalDecimal) *evalDecimal { - return mathSub_dd(v1.toDecimal(0, 0), v2) -} - -func mathSub_dd(v1, v2 *evalDecimal) *evalDecimal { - return newEvalDecimalWithPrec(v1.dec.Sub(v2.dec), maxprec(v1.length, v2.length)) -} - -func mathSub_dd0(v1, v2 *evalDecimal) { - v1.dec = v1.dec.Sub(v2.dec) - v1.length = maxprec(v1.length, v2.length) -} - func mathMul_dx(v1 *evalDecimal, v2 evalNumeric) *evalDecimal { return mathMul_dd(v1, v2.toDecimal(0, 0)) } @@ -405,36 +530,179 @@ func mathDiv_ff(v1, v2 float64) (eval, error) { func mathDiv_ff0(v1, v2 float64) (float64, error) { result := v1 / v2 - divisorLessThanOne := v2 < 1 - resultMismatch := v2*result != v1 - if divisorLessThanOne && resultMismatch { - return 0, dataOutOfRangeError(v1, v2, "BIGINT", "/") + if math.IsInf(result, 1) || math.IsInf(result, -1) { + return 0, dataOutOfRangeError(v1, v2, "DOUBLE", "/") } return result, nil } -func mathSub_xf(v1 evalNumeric, v2 float64) (*evalFloat, error) { - v1f, ok := v1.toFloat() +func mathIntDiv_ii(v1, v2 *evalInt64) (eval, error) { + if v2.i == 0 { + return nil, nil + } + result := v1.i / v2.i + return newEvalInt64(result), nil +} + +func mathIntDiv_iu(v1 *evalInt64, v2 *evalUint64) (eval, error) { + if v2.u == 0 { + return nil, nil + } + result, err := mathIntDiv_iu0(v1.i, v2.u) + return newEvalUint64(result), err +} + +func mathIntDiv_iu0(v1 int64, v2 uint64) (uint64, error) { + if v1 < 0 { + if v2 >= math.MaxInt64 { + // We know here that v2 is always so large the result + // must be 0. + return 0, nil + } + result := v1 / int64(v2) + if result < 0 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "DIV") + } + return uint64(result), nil + + } + return uint64(v1) / v2, nil +} + +func mathIntDiv_ui(v1 *evalUint64, v2 *evalInt64) (eval, error) { + if v2.i == 0 { + return nil, nil + } + result, err := mathIntDiv_ui0(v1.u, v2.i) + return newEvalUint64(result), err +} + +func mathIntDiv_ui0(v1 uint64, v2 int64) (uint64, error) { + if v2 < 0 { + if v1 >= math.MaxInt64 { + // We know that v1 is always large here and with v2, the result + // must be at least -1 so we can't store this in the available range. + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "DIV") + } + // Safe to cast since we know it fits in int64 when we get here. + result := int64(v1) / v2 + if result < 0 { + return 0, dataOutOfRangeError(v1, v2, "BIGINT UNSIGNED", "DIV") + } + return uint64(result), nil + } + return v1 / uint64(v2), nil +} + +func mathIntDiv_uu(v1, v2 *evalUint64) (eval, error) { + if v2.u == 0 { + return nil, nil + } + return newEvalUint64(v1.u / v2.u), nil +} + +func mathIntDiv_di(v1, v2 *evalDecimal) (eval, error) { + if v2.dec.IsZero() { + return nil, nil + } + result, err := mathIntDiv_di0(v1, v2) + return newEvalInt64(result), err +} + +func mathIntDiv_di0(v1, v2 *evalDecimal) (int64, error) { + div, _ := v1.dec.QuoRem(v2.dec, 0) + result, ok := div.Int64() if !ok { - return nil, errDecimalOutOfRange + return 0, dataOutOfRangeErrorDecimal(v1.dec, v2.dec, "BIGINT", "DIV") } - return mathSub_ff(v1f.f, v2), nil + return result, nil } -func mathSub_ff(v1, v2 float64) *evalFloat { - return newEvalFloat(v1 - v2) +func mathIntDiv_du(v1, v2 *evalDecimal) (eval, error) { + if v2.dec.IsZero() { + return nil, nil + } + result, err := mathIntDiv_du0(v1, v2) + return newEvalUint64(result), err +} + +func mathIntDiv_du0(v1, v2 *evalDecimal) (uint64, error) { + div, _ := v1.dec.QuoRem(v2.dec, 0) + result, ok := div.Uint64() + if !ok { + return 0, dataOutOfRangeErrorDecimal(v1.dec, v2.dec, "BIGINT UNSIGNED", "DIV") + } + return result, nil +} + +func mathMod_ii(v1, v2 *evalInt64) (eval, error) { + if v2.i == 0 { + return nil, nil + } + return newEvalInt64(v1.i % v2.i), nil +} + +func mathMod_iu(v1 *evalInt64, v2 *evalUint64) (eval, error) { + if v2.u == 0 { + return nil, nil + } + return newEvalInt64(mathMod_iu0(v1.i, v2.u)), nil +} + +func mathMod_iu0(v1 int64, v2 uint64) int64 { + if v1 == math.MinInt64 && v2 == math.MaxInt64+1 { + return 0 + } + if v2 > math.MaxInt64 { + return v1 + } + return v1 % int64(v2) } -func parseStringToFloat(str string) float64 { - str = strings.TrimSpace(str) +func mathMod_ui(v1 *evalUint64, v2 *evalInt64) (eval, error) { + if v2.i == 0 { + return nil, nil + } + result, err := mathMod_ui0(v1.u, v2.i) + return newEvalUint64(result), err +} + +func mathMod_ui0(v1 uint64, v2 int64) (uint64, error) { + if v2 < 0 { + return v1 % uint64(-v2), nil + } + return v1 % uint64(v2), nil +} + +func mathMod_uu(v1, v2 *evalUint64) (eval, error) { + if v2.u == 0 { + return nil, nil + } + return newEvalUint64(v1.u % v2.u), nil +} + +func mathMod_ff(v1, v2 *evalFloat) (eval, error) { + if v2.f == 0.0 { + return nil, nil + } + return newEvalFloat(math.Mod(v1.f, v2.f)), nil +} + +func mathMod_dd(v1, v2 *evalDecimal) (eval, error) { + if v2.dec.IsZero() { + return nil, nil + } + + dec, prec := mathMod_dd0(v1, v2) + return newEvalDecimalWithPrec(dec, prec), nil +} - // We only care to parse as many of the initial float characters of the - // string as possible. This functionality is implemented in the `strconv` package - // of the standard library, but not exposed, so we hook into it. - val, _, err := hack.ParseFloatPrefix(str, 64) - if err != nil { - return 0.0 +func mathMod_dd0(v1, v2 *evalDecimal) (decimal.Decimal, int32) { + length := v1.length + if v2.length > length { + length = v2.length } - return val + _, rem := v1.dec.QuoRem(v2.dec, 0) + return rem, length } diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go index 1e0740cbb50..69c39249fb9 100644 --- a/go/vt/vtgate/evalengine/cached_size.go +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -17,13 +17,7 @@ limitations under the License. package evalengine -import ( - "math" - "reflect" - "unsafe" - - hack "vitess.io/vitess/go/hack" -) +import hack "vitess.io/vitess/go/hack" type cachedObject interface { CachedSize(alloc bool) int64 @@ -69,7 +63,7 @@ func (cached *BindVariable) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(24) } // field Key string size += hack.RuntimeAllocSize(int64(len(cached.Key))) @@ -183,6 +177,24 @@ func (cached *ComparisonExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *CompiledExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field code []vitess.io/vitess/go/vt/vtgate/evalengine.frame + { + size += hack.RuntimeAllocSize(int64(cap(cached.code)) * int64(8)) + } + // field original vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.original.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} func (cached *ConvertExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -209,8 +221,6 @@ func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { size += cached.UnaryExpr.CachedSize(false) return size } - -//go:nocheckptr func (cached *InExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -221,17 +231,30 @@ func (cached *InExpr) CachedSize(alloc bool) int64 { } // field BinaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.BinaryExpr size += cached.BinaryExpr.CachedSize(false) - // field Hashed map[[16]byte]int - if cached.Hashed != nil { - size += int64(48) - hmap := reflect.ValueOf(cached.Hashed) - numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) - numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) - size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) - if len(cached.Hashed) > 0 || numBuckets > 1 { - size += hack.RuntimeAllocSize(int64(numBuckets * 208)) - } + return size +} +func (cached *IntervalExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *IntroducerExpr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) } + // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr + size += cached.UnaryExpr.CachedSize(false) return size } func (cached *IsExpr) CachedSize(alloc bool) int64 { @@ -256,7 +279,7 @@ func (cached *LikeExpr) CachedSize(alloc bool) int64 { } // field BinaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.BinaryExpr size += cached.BinaryExpr.CachedSize(false) - // field Match vitess.io/vitess/go/mysql/collations.WildcardPattern + // field Match vitess.io/vitess/go/mysql/collations/colldata.WildcardPattern if cc, ok := cached.Match.(cachedObject); ok { size += cc.CachedSize(true) } @@ -358,6 +381,78 @@ func (cached *builtinASCII) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinAbs) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinAcos) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinAsin) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinAtan) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinAtan2) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinBinToUUID) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinBitCount) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -442,7 +537,7 @@ func (cached *builtinCollation) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinFromBase64) CachedSize(alloc bool) int64 { +func (cached *builtinConcat) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -454,7 +549,7 @@ func (cached *builtinFromBase64) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinHex) CachedSize(alloc bool) int64 { +func (cached *builtinConcatWs) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -466,7 +561,7 @@ func (cached *builtinHex) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONArray) CachedSize(alloc bool) int64 { +func (cached *builtinConv) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -478,7 +573,7 @@ func (cached *builtinJSONArray) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONContainsPath) CachedSize(alloc bool) int64 { +func (cached *builtinConvertTz) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -490,7 +585,7 @@ func (cached *builtinJSONContainsPath) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONDepth) CachedSize(alloc bool) int64 { +func (cached *builtinCos) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -502,7 +597,7 @@ func (cached *builtinJSONDepth) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONExtract) CachedSize(alloc bool) int64 { +func (cached *builtinCot) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -514,7 +609,7 @@ func (cached *builtinJSONExtract) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONKeys) CachedSize(alloc bool) int64 { +func (cached *builtinCrc32) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -526,7 +621,7 @@ func (cached *builtinJSONKeys) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONLength) CachedSize(alloc bool) int64 { +func (cached *builtinCurdate) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -538,7 +633,7 @@ func (cached *builtinJSONLength) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONObject) CachedSize(alloc bool) int64 { +func (cached *builtinDatabase) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -550,7 +645,7 @@ func (cached *builtinJSONObject) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinJSONUnquote) CachedSize(alloc bool) int64 { +func (cached *builtinDate) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -562,7 +657,7 @@ func (cached *builtinJSONUnquote) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinLength) CachedSize(alloc bool) int64 { +func (cached *builtinDateFormat) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -574,7 +669,7 @@ func (cached *builtinLength) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinMultiComparison) CachedSize(alloc bool) int64 { +func (cached *builtinDateMath) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -586,7 +681,7 @@ func (cached *builtinMultiComparison) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinRepeat) CachedSize(alloc bool) int64 { +func (cached *builtinDayOfMonth) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -598,7 +693,7 @@ func (cached *builtinRepeat) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinToBase64) CachedSize(alloc bool) int64 { +func (cached *builtinDayOfWeek) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -610,7 +705,7 @@ func (cached *builtinToBase64) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } -func (cached *builtinWeightString) CachedSize(alloc bool) int64 { +func (cached *builtinDayOfYear) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } @@ -618,57 +713,1003 @@ func (cached *builtinWeightString) CachedSize(alloc bool) int64 { if alloc { size += int64(48) } - // field String vitess.io/vitess/go/vt/vtgate/evalengine.Expr - if cc, ok := cached.String.(cachedObject); ok { - size += cc.CachedSize(true) + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinDegrees) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) } - // field Cast string - size += hack.RuntimeAllocSize(int64(len(cached.Cast))) + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) return size } -func (cached *evalBytes) CachedSize(alloc bool) int64 { +func (cached *builtinExp) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(32) + size += int64(48) } - // field bytes []byte - { - size += hack.RuntimeAllocSize(int64(cap(cached.bytes))) + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinFloor) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) return size } -func (cached *evalDecimal) CachedSize(alloc bool) int64 { +func (cached *builtinFromBase64) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } - // field dec vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal.Decimal - size += cached.dec.CachedSize(false) + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) return size } -func (cached *evalFloat) CachedSize(alloc bool) int64 { +func (cached *builtinFromUnixtime) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(8) + size += int64(48) } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) return size } -func (cached *evalInt64) CachedSize(alloc bool) int64 { +func (cached *builtinHex) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(8) + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinHour) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinInet6Aton) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinInet6Ntoa) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinInetAton) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinInetNtoa) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinIsIPV4) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinIsIPV4Compat) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinIsIPV4Mapped) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinIsIPV6) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinIsUUID) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONArray) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONContainsPath) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONDepth) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONExtract) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONKeys) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONLength) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONObject) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinJSONUnquote) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLeftRight) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLength) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLn) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLog) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLog10) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinLog2) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMD5) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMakedate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMaketime) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMicrosecond) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMinute) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMonth) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMonthName) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinMultiComparison) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinNow) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinOrd) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinPad) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinPi) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinPow) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinQuarter) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRadians) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRandomBytes) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpInstr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpLike) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpReplace) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRegexpSubstr) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRepeat) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinRound) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSHA1) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSHA2) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSecond) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSign) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSin) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSqrt) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinStrcmp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinSysdate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinTan) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinTime) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinToBase64) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinTrim) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinTruncate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUUID) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUUIDToBin) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUnhex) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUnixTimestamp) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUser) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinUtcDate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinVersion) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinWeek) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinWeekDay) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinWeekOfYear) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinWeightString) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field Expr vitess.io/vitess/go/vt/vtgate/evalengine.Expr + if cc, ok := cached.Expr.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Cast string + size += hack.RuntimeAllocSize(int64(len(cached.Cast))) + return size +} +func (cached *builtinYear) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinYearWeek) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *evalBytes) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field bytes []byte + { + size += hack.RuntimeAllocSize(int64(cap(cached.bytes))) + } + return size +} +func (cached *evalDecimal) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(16) + } + // field dec vitess.io/vitess/go/mysql/decimal.Decimal + size += cached.dec.CachedSize(false) + return size +} +func (cached *evalFloat) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + return size +} +func (cached *evalInt64) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(8) + } + return size +} +func (cached *evalTemporal) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) } return size } diff --git a/go/vt/vtgate/evalengine/collation.go b/go/vt/vtgate/evalengine/collation.go new file mode 100644 index 00000000000..9d53a9d8ea9 --- /dev/null +++ b/go/vt/vtgate/evalengine/collation.go @@ -0,0 +1,27 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import "vitess.io/vitess/go/mysql/collations" + +func defaultCoercionCollation(id collations.ID) collations.TypedCollation { + return collations.TypedCollation{ + Collation: id, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireUnicode, + } +} diff --git a/go/vt/vtgate/evalengine/compare.go b/go/vt/vtgate/evalengine/compare.go index 4f638727413..aa452c61729 100644 --- a/go/vt/vtgate/evalengine/compare.go +++ b/go/vt/vtgate/evalengine/compare.go @@ -17,13 +17,13 @@ limitations under the License. package evalengine import ( - "time" + "bytes" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/json" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" ) func compareNumeric(left, right eval) (int, error) { @@ -117,86 +117,144 @@ func compareNumeric(left, right eval) (int, error) { return 1, nil } -// matchExprWithAnyDateFormat formats the given expr (usually a string) to a date using the first format -// that does not return an error. -func matchExprWithAnyDateFormat(e eval) (t time.Time, err error) { - expr := e.(*evalBytes) - t, err = sqlparser.ParseDate(expr.string()) - if err == nil { - return +func compareDates(l, r *evalTemporal) int { + return l.dt.Compare(r.dt) +} + +func compareDateAndString(l, r eval) int { + if tt, ok := l.(*evalTemporal); ok { + return tt.dt.Compare(r.(*evalBytes).toDateBestEffort()) + } + if tt, ok := r.(*evalTemporal); ok { + return l.(*evalBytes).toDateBestEffort().Compare(tt.dt) + } + panic("unreachable") +} + +// More on string collations coercibility on MySQL documentation: +// - https://dev.mysql.com/doc/refman/8.0/en/charset-collation-coercibility.html +func compareStrings(l, r eval) (int, error) { + l, r, col, err := mergeAndCoerceCollations(l, r) + if err != nil { + return 0, err } - t, err = sqlparser.ParseDateTime(expr.string()) - if err == nil { - return + collation := colldata.Lookup(col.Collation) + if collation == nil { + return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "cannot compare strings, collation is unknown or unsupported (collation ID: %d)", col.Collation) } - t, err = sqlparser.ParseTime(expr.string()) - return + return collation.Collate(l.ToRawBytes(), r.ToRawBytes(), false), nil } -// Date comparison based on: -// - https://dev.mysql.com/doc/refman/8.0/en/type-conversion.html -// - https://dev.mysql.com/doc/refman/8.0/en/date-and-time-type-conversion.html -func compareDates(l, r eval) (int, error) { - lTime, err := l.(*evalBytes).parseDate() +func compareJSON(l, r eval) (int, error) { + lj, err := argToJSON(l) if err != nil { return 0, err } - rTime, err := r.(*evalBytes).parseDate() + + rj, err := argToJSON(r) if err != nil { return 0, err } - return compareGoTimes(lTime, rTime) + + return compareJSONValue(lj, rj) } -func compareDateAndString(l, r eval) (int, error) { - lb := l.(*evalBytes) - rb := r.(*evalBytes) +// compareJSONValue compares two JSON values. +// See https://dev.mysql.com/doc/refman/8.0/en/json.html#json-comparison for all the rules. +func compareJSONValue(lj, rj *json.Value) (int, error) { + cmp := int(lj.Type()) - int(rj.Type()) + if cmp != 0 { + return cmp, nil + } - var lTime, rTime time.Time - var err error - switch { - case sqltypes.IsDate(lb.SQLType()): - lTime, err = lb.parseDate() - if err != nil { - return 0, err + switch lj.Type() { + case json.TypeNull: + return 0, nil + case json.TypeNumber: + ld, ok := lj.Decimal() + if !ok { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "DECIMAL value is out of range") } - rTime, err = matchExprWithAnyDateFormat(r) - if err != nil { - return 0, err + rd, ok := rj.Decimal() + if !ok { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "DECIMAL value is out of range") } - case typeIsTextual(lb.SQLType()): - lTime, err = matchExprWithAnyDateFormat(l) - if err != nil { - return 0, err + return ld.Cmp(rd), nil + case json.TypeString: + return colldata.Lookup(collationJSON.Collation).Collate(lj.ToRawBytes(), rj.ToRawBytes(), false), nil + case json.TypeBlob, json.TypeBit, json.TypeOpaque: + return bytes.Compare(lj.ToUnencodedBytes(), rj.ToUnencodedBytes()), nil + case json.TypeBoolean: + if lj == rj { + return 0, nil } - rTime, err = rb.parseDate() - if err != nil { - return 0, err + if lj == json.ValueFalse { + return -1, nil } - } - return compareGoTimes(lTime, rTime) -} - -func compareGoTimes(lTime, rTime time.Time) (int, error) { - if lTime.Before(rTime) { - return -1, nil - } - if lTime.After(rTime) { return 1, nil - } - return 0, nil -} + case json.TypeDate: + ld, _ := lj.Date() + rd, _ := rj.Date() + return ld.Compare(rd), nil + case json.TypeDateTime: + ld, _ := lj.DateTime() + rd, _ := rj.DateTime() + return ld.Compare(rd), nil + case json.TypeTime: + ld, _ := lj.Time() + rd, _ := rj.Time() + return ld.Compare(rd), nil + case json.TypeArray: + la, _ := lj.Array() + ra, _ := rj.Array() + until := len(la) + if len(la) > len(ra) { + until = len(ra) + } + for i := 0; i < until; i++ { + cmp, err := compareJSONValue(la[i], ra[i]) + if err != nil { + return 0, err + } + if cmp != 0 { + return cmp, nil + } + } + return len(la) - len(ra), nil + case json.TypeObject: + // These rules are not documented but this is the so far + // best effort reverse engineered implementation based on + // what MySQL returns in our tests. + lo, _ := lj.Object() + ro, _ := rj.Object() -// More on string collations coercibility on MySQL documentation: -// - https://dev.mysql.com/doc/refman/8.0/en/charset-collation-coercibility.html -func compareStrings(l, r eval) (int, error) { - l, r, col, err := mergeCollations(l, r) - if err != nil { - return 0, err - } - collation := col.Get() - if collation == nil { - panic("unknown collation after coercion") + if lo.Len() != ro.Len() { + return lo.Len() - ro.Len(), nil + } + + rks := ro.Keys() + lks := lo.Keys() + + for i := 0; i < len(lks); i++ { + if lks[i] < rks[i] { + return -1, nil + } + if lks[i] > rks[i] { + return 1, nil + } + } + + for i := 0; i < len(lks); i++ { + cmp, err := compareJSONValue(lo.Get(lks[i]), ro.Get(rks[i])) + if err != nil { + return 0, err + } + if cmp != 0 { + return cmp, nil + } + } + return 0, nil } - return collation.Collate(l.(*evalBytes).bytes, r.(*evalBytes).bytes, false), nil + + return cmp, nil } diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go new file mode 100644 index 00000000000..23f7a9f10aa --- /dev/null +++ b/go/vt/vtgate/evalengine/compiler.go @@ -0,0 +1,516 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +type frame func(env *ExpressionEnv) int + +type compiler struct { + cfg *Config + asm assembler +} + +type CompilerLog interface { + Instruction(ins string, args ...any) + Stack(old, new int) +} + +type compiledCoercion struct { + col colldata.Collation + left colldata.Coercion + right colldata.Coercion +} + +type ctype struct { + Type sqltypes.Type + Flag typeFlag + Col collations.TypedCollation +} + +func (ct ctype) nullable() bool { + return ct.Flag&flagNullable != 0 +} + +func (ct ctype) isTextual() bool { + return sqltypes.IsText(ct.Type) || sqltypes.IsBinary(ct.Type) +} + +func (ct ctype) isHexOrBitLiteral() bool { + return ct.Flag&flagBit != 0 || ct.Flag&flagHex != 0 +} + +func (c *compiler) unsupported(expr Expr) error { + return vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "unsupported compilation for expression '%s'", FormatExpr(expr)) +} + +func (c *compiler) compile(expr Expr) (ctype, error) { + ct, err := expr.compile(c) + if err != nil { + return ctype{}, err + } + if c.asm.stack.cur != 1 { + return ctype{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "bad compilation: stack pointer at %d after compilation", c.asm.stack.cur) + } + return ct, nil +} + +func (c *compiler) compileToNumeric(ct ctype, offset int, fallback sqltypes.Type, preciseDatetime bool) ctype { + if sqltypes.IsNumber(ct.Type) { + return ct + } + if ct.Type == sqltypes.VarBinary && (ct.Flag&flagHex) != 0 { + c.asm.Convert_hex(offset) + return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + } + + if sqltypes.IsDateOrTime(ct.Type) { + if preciseDatetime { + c.asm.Convert_Ti(offset) + return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + } + c.asm.Convert_Tf(offset) + return ctype{sqltypes.Float64, ct.Flag, collationNumeric} + } + + switch fallback { + case sqltypes.Int64: + c.asm.Convert_xi(offset) + return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + case sqltypes.Uint64: + c.asm.Convert_xu(offset) + return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + case sqltypes.Decimal: + c.asm.Convert_xd(offset, 0, 0) + return ctype{sqltypes.Decimal, ct.Flag, collationNumeric} + } + c.asm.Convert_xf(offset) + return ctype{sqltypes.Float64, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToInt64(ct ctype, offset int) ctype { + switch ct.Type { + case sqltypes.Int64: + return ct + case sqltypes.Uint64: + c.asm.Convert_ui(offset) + // TODO: specialization + default: + c.asm.Convert_xi(offset) + } + return ctype{sqltypes.Int64, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToUint64(ct ctype, offset int) ctype { + switch ct.Type { + case sqltypes.Uint64: + return ct + case sqltypes.Int64: + c.asm.Convert_iu(offset) + // TODO: specialization + default: + c.asm.Convert_xu(offset) + } + return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToBitwiseUint64(ct ctype, offset int) ctype { + switch ct.Type { + case sqltypes.Uint64: + return ct + case sqltypes.Int64: + c.asm.Convert_iu(offset) + case sqltypes.Decimal: + c.asm.Convert_dbit(offset) + // TODO: specialization + default: + c.asm.Convert_xu(offset) + } + return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToFloat(ct ctype, offset int) ctype { + if sqltypes.IsFloat(ct.Type) { + return ct + } + switch ct.Type { + case sqltypes.Int64: + c.asm.Convert_if(offset) + case sqltypes.Uint64: + // only emit u->f conversion if this is not a hex value; hex values + // will already be converted + c.asm.Convert_uf(offset) + default: + c.asm.Convert_xf(offset) + } + return ctype{sqltypes.Float64, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToDecimal(ct ctype, offset int) ctype { + if sqltypes.IsDecimal(ct.Type) { + return ct + } + switch ct.Type { + case sqltypes.Int64: + c.asm.Convert_id(offset) + case sqltypes.Uint64: + c.asm.Convert_ud(offset) + default: + c.asm.Convert_xd(offset, 0, 0) + } + return ctype{sqltypes.Decimal, ct.Flag, collationNumeric} +} + +func (c *compiler) compileToDate(doct ctype, offset int) ctype { + switch doct.Type { + case sqltypes.Date: + return doct + default: + c.asm.Convert_xD(offset) + } + return ctype{Type: sqltypes.Date, Col: collationBinary, Flag: flagNullable} +} + +func (c *compiler) compileToDateTime(doct ctype, offset, prec int) ctype { + switch doct.Type { + case sqltypes.Datetime: + c.asm.Convert_tp(offset, prec) + return doct + default: + c.asm.Convert_xDT(offset, prec) + } + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: flagNullable} +} + +func (c *compiler) compileToTime(doct ctype, offset, prec int) ctype { + switch doct.Type { + case sqltypes.Time: + c.asm.Convert_tp(offset, prec) + return doct + default: + c.asm.Convert_xT(offset, prec) + } + return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: flagNullable} +} + +func (c *compiler) compileNullCheck1(ct ctype) *jump { + if ct.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheck1(j) + return j + } + return nil +} + +func (c *compiler) compileNullCheck1r(ct ctype) *jump { + if ct.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheck1r(j) + return j + } + return nil +} + +func (c *compiler) compileNullCheck2(lt, rt ctype) *jump { + if lt.nullable() || rt.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheck2(j) + return j + } + return nil +} + +func (c *compiler) compileNullCheck3(arg1, arg2, arg3 ctype) *jump { + if arg1.nullable() || arg2.nullable() || arg3.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheck3(j) + return j + } + return nil +} + +func (c *compiler) compileNullCheckArg(ct ctype, offset int) *jump { + if ct.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheckArg(j, offset) + return j + } + return nil +} + +func (c *compiler) compileNullCheckOffset(ct ctype, offset int) *jump { + if ct.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheckOffset(j, offset) + return j + } + return nil +} + +func (c *compiler) compileNumericPriority(lt, rt ctype) (ctype, ctype, bool) { + switch lt.Type { + case sqltypes.Int64: + if rt.Type == sqltypes.Uint64 || rt.Type == sqltypes.Float64 || rt.Type == sqltypes.Decimal { + return rt, lt, true + } + case sqltypes.Uint64: + if rt.Type == sqltypes.Float64 || rt.Type == sqltypes.Decimal { + return rt, lt, true + } + case sqltypes.Decimal: + if rt.Type == sqltypes.Float64 { + return rt, lt, true + } + } + return lt, rt, false +} + +func (c *compiler) compareNumericTypes(lt ctype, rt ctype) (swapped bool) { + switch lt.Type { + case sqltypes.Int64: + switch rt.Type { + case sqltypes.Int64: + c.asm.CmpNum_ii() + case sqltypes.Uint64: + c.asm.CmpNum_iu(2, 1) + case sqltypes.Float64: + c.asm.CmpNum_if(2, 1) + case sqltypes.Decimal: + c.asm.CmpNum_id(2, 1) + } + case sqltypes.Uint64: + switch rt.Type { + case sqltypes.Int64: + c.asm.CmpNum_iu(1, 2) + swapped = true + case sqltypes.Uint64: + c.asm.CmpNum_uu() + case sqltypes.Float64: + c.asm.CmpNum_uf(2, 1) + case sqltypes.Decimal: + c.asm.CmpNum_ud(2, 1) + } + case sqltypes.Float64: + switch rt.Type { + case sqltypes.Int64: + c.asm.CmpNum_if(1, 2) + swapped = true + case sqltypes.Uint64: + c.asm.CmpNum_uf(1, 2) + swapped = true + case sqltypes.Float64: + c.asm.CmpNum_ff() + case sqltypes.Decimal: + c.asm.CmpNum_fd(2, 1) + } + + case sqltypes.Decimal: + switch rt.Type { + case sqltypes.Int64: + c.asm.CmpNum_id(1, 2) + swapped = true + case sqltypes.Uint64: + c.asm.CmpNum_ud(1, 2) + swapped = true + case sqltypes.Float64: + c.asm.CmpNum_fd(1, 2) + swapped = true + case sqltypes.Decimal: + c.asm.CmpNum_dd() + } + } + return +} + +func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { + merged, coerceLeft, coerceRight, err := mergeCollations(lt.Col, rt.Col, lt.Type, rt.Type) + if err != nil { + return err + } + if coerceLeft == nil && coerceRight == nil { + c.asm.CmpString_collate(colldata.Lookup(merged.Collation)) + } else { + if coerceLeft == nil { + coerceLeft = func(dst, in []byte) ([]byte, error) { return in, nil } + } + if coerceRight == nil { + coerceRight = func(dst, in []byte) ([]byte, error) { return in, nil } + } + c.asm.CmpString_coerce(&compiledCoercion{ + col: colldata.Lookup(merged.Collation), + left: coerceLeft, + right: coerceRight, + }) + } + return nil +} + +func isEncodingJSONSafe(col collations.ID) bool { + switch colldata.Lookup(col).Charset().(type) { + case charset.Charset_utf8mb4, charset.Charset_utf8mb3, charset.Charset_binary: + return true + default: + return false + } +} + +func (c *compiler) compileParseJSON(fn string, doct ctype, offset int) (ctype, error) { + switch doct.Type { + case sqltypes.TypeJSON: + case sqltypes.VarChar, sqltypes.VarBinary: + c.asm.Parse_j(offset) + default: + return ctype{}, errJSONType(fn) + } + return ctype{Type: sqltypes.TypeJSON, Flag: doct.Flag, Col: collationJSON}, nil +} + +func (c *compiler) compileToJSON(doct ctype, offset int) (ctype, error) { + switch doct.Type { + case sqltypes.TypeJSON: + return doct, nil + case sqltypes.Float64: + c.asm.Convert_fj(offset) + case sqltypes.Int64: + c.asm.Convert_ij(offset, doct.Flag&flagIsBoolean != 0) + case sqltypes.Uint64: + c.asm.Convert_uj(offset) + case sqltypes.Decimal: + c.asm.Convert_dj(offset) + case sqltypes.VarChar: + c.asm.Convert_cj(offset) + case sqltypes.VarBinary: + c.asm.Convert_bj(offset) + case sqltypes.Null: + c.asm.Convert_Nj(offset) + case sqltypes.Date, sqltypes.Datetime, sqltypes.Timestamp, sqltypes.Time: + c.asm.Convert_Tj(offset) + default: + return ctype{}, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "Unsupported type conversion: %s AS JSON", doct.Type) + } + return ctype{Type: sqltypes.TypeJSON, Col: collationJSON}, nil +} + +func (c *compiler) compileArgToJSON(doct ctype, offset int) (ctype, error) { + switch doct.Type { + case sqltypes.TypeJSON: + return doct, nil + case sqltypes.Float64: + c.asm.Convert_fj(offset) + case sqltypes.Int64: + c.asm.Convert_ij(offset, doct.Flag&flagIsBoolean != 0) + case sqltypes.Uint64: + c.asm.Convert_uj(offset) + case sqltypes.Decimal: + c.asm.Convert_dj(offset) + case sqltypes.VarChar: + c.asm.ConvertArg_cj(offset) + case sqltypes.VarBinary: + c.asm.Convert_bj(offset) + case sqltypes.Null: + c.asm.Convert_Nj(offset) + case sqltypes.Date, sqltypes.Datetime, sqltypes.Timestamp, sqltypes.Time: + c.asm.Convert_Tj(offset) + default: + return ctype{}, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "Unsupported type conversion: %s AS JSON", doct.Type) + } + return ctype{Type: sqltypes.TypeJSON, Col: collationJSON}, nil +} + +func (c *compiler) compileToJSONKey(key ctype) error { + if key.Type == sqltypes.Null { + return errJSONKeyIsNil + } + if key.Type == sqltypes.VarChar && isEncodingJSONSafe(key.Col.Collation) { + return nil + } + if key.Type == sqltypes.VarBinary { + return nil + } + c.asm.Convert_xc(1, sqltypes.VarChar, c.cfg.Collation, 0, false) + return nil +} + +func (c *compiler) jsonExtractPath(expr Expr) (*json.Path, error) { + path, ok := expr.(*Literal) + if !ok { + return nil, errJSONPath + } + pathBytes, ok := path.inner.(*evalBytes) + if !ok { + return nil, errJSONPath + } + var parser json.PathParser + return parser.ParseBytes(pathBytes.bytes) +} + +func (c *compiler) jsonExtractOneOrAll(fname string, expr Expr) (jsonMatch, error) { + lit, ok := expr.(*Literal) + if !ok { + return jsonMatchInvalid, errOneOrAll(fname) + } + b, ok := lit.inner.(*evalBytes) + if !ok { + return jsonMatchInvalid, errOneOrAll(fname) + } + return intoOneOrAll(fname, b.string()) +} + +func (c *compiler) compareAsJSON(lt ctype, rt ctype) error { + _, err := c.compileArgToJSON(lt, 2) + if err != nil { + return err + } + + _, err = c.compileArgToJSON(rt, 1) + if err != nil { + return err + } + c.asm.CmpJSON() + + return nil +} + +func (c *compiler) compileCheckTrue(when ctype, offset int) error { + switch when.Type { + case sqltypes.Int64: + c.asm.Convert_iB(offset) + case sqltypes.Uint64: + c.asm.Convert_uB(offset) + case sqltypes.Float64: + c.asm.Convert_fB(offset) + case sqltypes.Decimal: + c.asm.Convert_dB(offset) + case sqltypes.VarChar, sqltypes.VarBinary: + c.asm.Convert_bB(offset) + case sqltypes.Timestamp, sqltypes.Datetime, sqltypes.Time, sqltypes.Date: + c.asm.Convert_TB(offset) + case sqltypes.Null: + c.asm.SetBool(offset, false) + default: + return vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "unsupported Truth check: %s", when.Type) + } + return nil +} diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go new file mode 100644 index 00000000000..317423696db --- /dev/null +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -0,0 +1,4736 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + gohex "encoding/hex" + "errors" + "hash/crc32" + "math" + "math/bits" + "net/netip" + "strconv" + "time" + + "github.com/google/uuid" + + "vitess.io/vitess/go/mysql/collations/colldata" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/hex" + "vitess.io/vitess/go/mysql/icuregex" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" +) + +type jump struct { + from, to int +} + +func (j *jump) offset() int { + return j.to - j.from +} + +type assembler struct { + ins []frame + log CompilerLog + stack struct { + cur int + max int + } +} + +func (asm *assembler) jumpFrom() *jump { + return &jump{from: len(asm.ins)} +} + +func (asm *assembler) jumpDestination(jumps ...*jump) { + for _, j := range jumps { + if j != nil { + j.to = len(asm.ins) + } + } +} + +func (asm *assembler) adjustStack(offset int) { + asm.stack.cur += offset + if asm.stack.cur < 0 { + panic("negative stack position") + } + if asm.stack.cur > asm.stack.max { + asm.stack.max = asm.stack.cur + } + if asm.log != nil { + asm.log.Stack(asm.stack.cur-offset, asm.stack.cur) + } +} + +func (asm *assembler) emit(f frame, instruction string, args ...any) { + if asm.log != nil { + asm.log.Instruction(instruction, args...) + } + asm.ins = append(asm.ins, f) +} + +func (asm *assembler) Add_dd() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + mathAdd_dd0(l, r) + env.vm.sp-- + return 1 + }, "ADD DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Add_ff() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + l.f += r.f + env.vm.sp-- + return 1 + }, "ADD FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) Add_ii() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.i, env.vm.err = mathAdd_ii0(l.i, r.i) + env.vm.sp-- + return 1 + }, "ADD INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Add_ui(swap bool) { + asm.adjustStack(-1) + + if swap { + asm.emit(func(env *ExpressionEnv) int { + var u uint64 + l := env.vm.stack[env.vm.sp-1].(*evalUint64) + r := env.vm.stack[env.vm.sp-2].(*evalInt64) + u, env.vm.err = mathAdd_ui0(l.u, r.i) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalUint64(u) + env.vm.sp-- + return 1 + }, "ADD UINT64(SP-1), INT64(SP-2)") + } else { + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.u, env.vm.err = mathAdd_ui0(l.u, r.i) + env.vm.sp-- + return 1 + }, "ADD UINT64(SP-2), INT64(SP-1)") + } +} + +func (asm *assembler) Add_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u, env.vm.err = mathAdd_uu0(l.u, r.u) + env.vm.sp-- + return 1 + }, "ADD UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitCount_b() { + asm.emit(func(env *ExpressionEnv) int { + a := env.vm.stack[env.vm.sp-1].(*evalBytes) + count := 0 + for _, b := range a.bytes { + count += bits.OnesCount8(b) + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(count)) + return 1 + }, "BIT_COUNT BINARY(SP-1)") +} + +func (asm *assembler) BitCount_u() { + asm.emit(func(env *ExpressionEnv) int { + a := env.vm.stack[env.vm.sp-1].(*evalUint64) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(bits.OnesCount64(a.u))) + return 1 + }, "BIT_COUNT UINT64(SP-1)") +} + +func (asm *assembler) BitOp_and_bb() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + if len(l.bytes) != len(r.bytes) { + env.vm.err = errBitwiseOperandsLength + return 0 + } + for i := range l.bytes { + l.bytes[i] = l.bytes[i] & r.bytes[i] + } + env.vm.sp-- + return 1 + }, "AND BINARY(SP-2), BINARY(SP-1)") +} + +func (asm *assembler) BitOp_or_bb() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + if len(l.bytes) != len(r.bytes) { + env.vm.err = errBitwiseOperandsLength + return 0 + } + for i := range l.bytes { + l.bytes[i] = l.bytes[i] | r.bytes[i] + } + env.vm.sp-- + return 1 + }, "OR BINARY(SP-2), BINARY(SP-1)") +} + +func (asm *assembler) BitOp_xor_bb() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + if len(l.bytes) != len(r.bytes) { + env.vm.err = errBitwiseOperandsLength + return 0 + } + for i := range l.bytes { + l.bytes[i] = l.bytes[i] ^ r.bytes[i] + } + env.vm.sp-- + return 1 + }, "XOR BINARY(SP-2), BINARY(SP-1)") +} + +func (asm *assembler) BitOp_and_uu() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u = l.u & r.u + env.vm.sp-- + return 1 + }, "AND UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitOp_or_uu() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u = l.u | r.u + env.vm.sp-- + return 1 + }, "OR UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitOp_xor_uu() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u = l.u ^ r.u + env.vm.sp-- + return 1 + }, "XOR UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitShiftLeft_bu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + + var ( + bits = int(r.u & 7) + bytes = int(r.u >> 3) + length = len(l.bytes) + out = make([]byte, length) + ) + + for i := 0; i < length; i++ { + pos := i + bytes + 1 + switch { + case pos < length: + out[i] = l.bytes[pos] >> (8 - bits) + fallthrough + case pos == length: + out[i] |= l.bytes[pos-1] << bits + } + } + l.bytes = out + + env.vm.sp-- + return 1 + }, "BIT_SHL BINARY(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitShiftLeft_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u = l.u << r.u + + env.vm.sp-- + return 1 + }, "BIT_SHL UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitShiftRight_bu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + + var ( + bits = int(r.u & 7) + bytes = int(r.u >> 3) + length = len(l.bytes) + out = make([]byte, length) + ) + + for i := length - 1; i >= 0; i-- { + switch { + case i > bytes: + out[i] = l.bytes[i-bytes-1] << (8 - bits) + fallthrough + case i == bytes: + out[i] |= l.bytes[i-bytes] >> bits + } + } + l.bytes = out + + env.vm.sp-- + return 1 + }, "BIT_SHR BINARY(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitShiftRight_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u = l.u >> r.u + + env.vm.sp-- + return 1 + }, "BIT_SHR UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) BitwiseNot_b() { + asm.emit(func(env *ExpressionEnv) int { + a := env.vm.stack[env.vm.sp-1].(*evalBytes) + for i := range a.bytes { + a.bytes[i] = ^a.bytes[i] + } + return 1 + }, "BIT_NOT BINARY(SP-1)") +} + +func (asm *assembler) BitwiseNot_u() { + asm.emit(func(env *ExpressionEnv) int { + a := env.vm.stack[env.vm.sp-1].(*evalUint64) + a.u = ^a.u + return 1 + }, "BIT_NOT UINT64(SP-1)") +} + +func (asm *assembler) Cmp_eq() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp == 0) + env.vm.sp++ + return 1 + }, "CMPFLAG EQ") +} + +func (asm *assembler) Cmp_eq_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp == 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG EQ [NULL]") +} + +func (asm *assembler) Cmp_ge() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp >= 0) + env.vm.sp++ + return 1 + }, "CMPFLAG GE") +} + +func (asm *assembler) Cmp_ge_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp >= 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG GE [NULL]") +} + +func (asm *assembler) Cmp_gt() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp > 0) + env.vm.sp++ + return 1 + }, "CMPFLAG GT") +} + +func (asm *assembler) Cmp_gt_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp > 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG GT [NULL]") +} + +func (asm *assembler) Cmp_le() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp <= 0) + env.vm.sp++ + return 1 + }, "CMPFLAG LE") +} + +func (asm *assembler) Cmp_le_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp <= 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG LE [NULL]") +} + +func (asm *assembler) Cmp_lt() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp < 0) + env.vm.sp++ + return 1 + }, "CMPFLAG LT") +} + +func (asm *assembler) Cmp_lt_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp < 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG LT [NULL]") +} +func (asm *assembler) Cmp_ne() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp != 0) + env.vm.sp++ + return 1 + }, "CMPFLAG NE") +} + +func (asm *assembler) Cmp_ne_n() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.flags.null { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalBool(env.vm.flags.cmp != 0) + } + env.vm.sp++ + return 1 + }, "CMPFLAG NE [NULL]") +} + +func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc collations.TypedCollation) { + elseOffset := 0 + if hasElse { + elseOffset = 1 + } + + stackDepth := 2*cases + elseOffset + asm.adjustStack(-(stackDepth - 1)) + + asm.emit(func(env *ExpressionEnv) int { + end := env.vm.sp - elseOffset + for sp := env.vm.sp - stackDepth; sp < end; sp += 2 { + if env.vm.stack[sp].(*evalInt64).i != 0 { + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[sp+1], tt, cc.Collation) + goto done + } + } + if elseOffset != 0 { + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[env.vm.sp-1], tt, cc.Collation) + } else { + env.vm.stack[env.vm.sp-stackDepth] = nil + } + done: + env.vm.sp -= stackDepth - 1 + return 1 + }, "CASE [%d cases, else = %v]", cases, hasElse) +} + +func (asm *assembler) CmpNum_dd() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + env.vm.sp -= 2 + env.vm.flags.cmp = l.dec.Cmp(r.dec) + return 1 + }, "CMP DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) CmpNum_fd(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalFloat) + r := env.vm.stack[env.vm.sp-right].(*evalDecimal) + env.vm.sp -= 2 + fval, ok := r.dec.Float64() + if !ok { + env.vm.err = errDecimalOutOfRange + } + env.vm.flags.cmp = cmpnum(l.f, fval) + return 1 + }, "CMP FLOAT64(SP-%d), DECIMAL(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_ff() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + env.vm.sp -= 2 + env.vm.flags.cmp = cmpnum(l.f, r.f) + return 1 + }, "CMP FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) CmpNum_id(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalInt64) + r := env.vm.stack[env.vm.sp-right].(*evalDecimal) + env.vm.sp -= 2 + env.vm.flags.cmp = decimal.NewFromInt(l.i).Cmp(r.dec) + return 1 + }, "CMP INT64(SP-%d), DECIMAL(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_if(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalInt64) + r := env.vm.stack[env.vm.sp-right].(*evalFloat) + env.vm.sp -= 2 + env.vm.flags.cmp = cmpnum(float64(l.i), r.f) + return 1 + }, "CMP INT64(SP-%d), FLOAT64(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_ii() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + env.vm.sp -= 2 + env.vm.flags.cmp = cmpnum(l.i, r.i) + return 1 + }, "CMP INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) CmpNum_iu(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalInt64) + r := env.vm.stack[env.vm.sp-right].(*evalUint64) + env.vm.sp -= 2 + if l.i < 0 { + env.vm.flags.cmp = -1 + } else { + env.vm.flags.cmp = cmpnum(uint64(l.i), r.u) + } + return 1 + }, "CMP INT64(SP-%d), UINT64(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_ud(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalUint64) + r := env.vm.stack[env.vm.sp-right].(*evalDecimal) + env.vm.sp -= 2 + env.vm.flags.cmp = decimal.NewFromUint(l.u).Cmp(r.dec) + return 1 + }, "CMP UINT64(SP-%d), DECIMAL(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_uf(left, right int) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-left].(*evalUint64) + r := env.vm.stack[env.vm.sp-right].(*evalFloat) + env.vm.sp -= 2 + env.vm.flags.cmp = cmpnum(float64(l.u), r.f) + return 1 + }, "CMP UINT64(SP-%d), FLOAT64(SP-%d)", left, right) +} + +func (asm *assembler) CmpNum_uu() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + env.vm.sp -= 2 + env.vm.flags.cmp = cmpnum(l.u, r.u) + return 1 + }, "CMP UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) CmpString_coerce(coercion *compiledCoercion) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp -= 2 + + var bl, br []byte + bl, env.vm.err = coercion.left(nil, l.bytes) + if env.vm.err != nil { + return 0 + } + br, env.vm.err = coercion.right(nil, r.bytes) + if env.vm.err != nil { + return 0 + } + env.vm.flags.cmp = coercion.col.Collate(bl, br, false) + return 1 + }, "CMP VARCHAR(SP-2), VARCHAR(SP-1) COERCE AND COLLATE '%s'", coercion.col.Name()) +} + +func (asm *assembler) CmpString_collate(collation colldata.Collation) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2] + r := env.vm.stack[env.vm.sp-1] + env.vm.sp -= 2 + env.vm.flags.cmp = collation.Collate(l.ToRawBytes(), r.ToRawBytes(), false) + return 1 + }, "CMP VARCHAR(SP-2), VARCHAR(SP-1) COLLATE '%s'", collation.Name()) +} + +func (asm *assembler) CmpJSON() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalJSON) + r := env.vm.stack[env.vm.sp-1].(*evalJSON) + env.vm.sp -= 2 + env.vm.flags.cmp, env.vm.err = compareJSONValue(l, r) + return 1 + }, "CMP JSON(SP-2), JSON(SP-1)") +} + +func (asm *assembler) CmpTuple(fullEquality bool) { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalTuple) + r := env.vm.stack[env.vm.sp-1].(*evalTuple) + env.vm.sp -= 2 + env.vm.flags.cmp, env.vm.flags.null, env.vm.err = evalCompareMany(l.t, r.t, fullEquality) + return 1 + }, "CMP TUPLE(SP-2), TUPLE(SP-1)") +} + +func (asm *assembler) CmpTupleNullsafe() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalTuple) + r := env.vm.stack[env.vm.sp-1].(*evalTuple) + + var equals int + equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t) + + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(equals == 0) + env.vm.sp -= 1 + return 1 + }, "CMP NULLSAFE TUPLE(SP-2), TUPLE(SP-1)") +} + +func (asm *assembler) CmpDateString() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2] + r := env.vm.stack[env.vm.sp-1] + env.vm.sp -= 2 + env.vm.flags.cmp = compareDateAndString(l, r) + return 1 + }, "CMP DATE|STRING(SP-2), DATE|STRING(SP-1)") +} + +func (asm *assembler) CmpDates() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalTemporal) + r := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.sp -= 2 + env.vm.flags.cmp = compareDates(l, r) + return 1 + }, "CMP DATE(SP-2), DATE(SP-1)") +} + +func (asm *assembler) Collate(col collations.ID) { + asm.emit(func(env *ExpressionEnv) int { + a := env.vm.stack[env.vm.sp-1].(*evalBytes) + a.tt = int16(sqltypes.VarChar) + a.col.Collation = col + return 1 + }, "COLLATE VARCHAR(SP-1), %d", col) +} + +func (asm *assembler) Convert_bB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + var f float64 + if arg != nil { + f, _ = fastparse.ParseFloat64(arg.(*evalBytes).string()) + } + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(f != 0.0) + return 1 + }, "CONV VARBINARY(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_TB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && !arg.(*evalTemporal).isZero()) + return 1 + }, "CONV SQLTYPES(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_jB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalJSON) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg.ToBoolean()) + return 1 + }, "CONV JSON(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_bj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalBytes) + env.vm.stack[env.vm.sp-offset] = evalConvert_bj(arg) + return 1 + }, "CONV VARBINARY(SP-%d), JSON", offset) +} + +func (asm *assembler) ConvertArg_cj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalBytes) + env.vm.stack[env.vm.sp-offset], env.vm.err = evalConvertArg_cj(arg) + return 1 + }, "CONVA VARCHAR(SP-%d), JSON", offset) +} + +func (asm *assembler) Convert_cj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalBytes) + env.vm.stack[env.vm.sp-offset], env.vm.err = evalConvert_cj(arg) + return 1 + }, "CONV VARCHAR(SP-%d), JSON", offset) +} + +func (asm *assembler) Convert_Tj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalTemporal) + env.vm.stack[env.vm.sp-offset] = arg.toJSON() + return 1 + }, "CONV SQLTIME(SP-%d), JSON", offset) +} + +func (asm *assembler) Convert_dB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && !arg.(*evalDecimal).dec.IsZero()) + return 1 + }, "CONV DECIMAL(SP-%d), BOOL", offset) +} + +// Convert_dbit is a special instruction emission for converting +// a bigdecimal in preparation for a bitwise operation. In that case +// we need to convert the bigdecimal to an int64 and then cast to +// uint64 to ensure we match the behavior of MySQL. +func (asm *assembler) Convert_dbit(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToInt64(env.vm.stack[env.vm.sp-offset]) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalUint64(uint64(arg.i)) + return 1 + }, "CONV DECIMAL_BITWISE(SP-%d), UINT64", offset) +} + +func (asm *assembler) Convert_fB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalFloat).f != 0.0) + return 1 + }, "CONV FLOAT64(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_fj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalFloat) + env.vm.stack[env.vm.sp-offset] = evalConvert_fj(arg) + return 1 + }, "CONV FLOAT64(SP-%d), JSON") +} + +func (asm *assembler) Convert_hex(offset int) { + asm.emit(func(env *ExpressionEnv) int { + var ok bool + env.vm.stack[env.vm.sp-offset], ok = env.vm.stack[env.vm.sp-offset].(*evalBytes).toNumericHex() + if !ok { + env.vm.err = errDeoptimize + } + return 1 + }, "CONV VARBINARY(SP-%d), HEX", offset) +} + +func (asm *assembler) Convert_Ti(offset int) { + asm.emit(func(env *ExpressionEnv) int { + v := env.vm.stack[env.vm.sp-offset].(*evalTemporal) + if v.prec != 0 { + env.vm.err = errDeoptimize + return 1 + } + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalInt64(v.toInt64()) + return 1 + }, "CONV SQLTIME(SP-%d), INT64", offset) +} + +func (asm *assembler) Convert_Tf(offset int) { + asm.emit(func(env *ExpressionEnv) int { + v := env.vm.stack[env.vm.sp-offset].(*evalTemporal) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalFloat(v.toFloat()) + return 1 + }, "CONV SQLTIME(SP-%d), FLOAT64", offset) +} + +func (asm *assembler) Convert_iB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalInt64).i != 0) + return 1 + }, "CONV INT64(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_id(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalInt64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalDecimalWithPrec(decimal.NewFromInt(arg.i), 0) + return 1 + }, "CONV INT64(SP-%d), FLOAT64", offset) +} + +func (asm *assembler) Convert_if(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalInt64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalFloat(arg.toFloat0()) + return 1 + }, "CONV INT64(SP-%d), FLOAT64", offset) +} + +func (asm *assembler) Convert_iu(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalInt64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalUint64(uint64(arg.i)) + return 1 + }, "CONV INT64(SP-%d), UINT64", offset) +} + +func (asm *assembler) Clamp_u(offset int, val uint64) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalUint64) + if arg.u > val { + arg.u = val + } + return 1 + }, "CLAMP UINT64(SP-%d), UINT64", offset) +} + +func (asm *assembler) Convert_ij(offset int, isBool bool) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalInt64) + switch { + case isBool && arg.i == 0: + env.vm.stack[env.vm.sp-offset] = json.ValueFalse + case isBool && arg.i == 1: + env.vm.stack[env.vm.sp-offset] = json.ValueTrue + default: + env.vm.stack[env.vm.sp-offset] = json.NewNumber(string(arg.ToRawBytes()), json.NumberTypeSigned) + } + return 1 + }, "CONV INT64(SP-%d), JSON") +} + +func (asm *assembler) Convert_uj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalUint64) + env.vm.stack[env.vm.sp-offset] = json.NewNumber(string(arg.ToRawBytes()), json.NumberTypeUnsigned) + return 1 + }, "CONV UINT64(SP-%d), JSON") +} + +func (asm *assembler) Convert_dj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalDecimal) + env.vm.stack[env.vm.sp-offset] = json.NewNumber(string(arg.ToRawBytes()), json.NumberTypeDecimal) + return 1 + }, "CONV DECIMAL(SP-%d), JSON") +} + +func (asm *assembler) Convert_Nj(offset int) { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset] = json.ValueNull + return 1 + }, "CONV NULL(SP-%d), JSON") +} + +func (asm *assembler) Convert_uB(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset] + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalUint64).u != 0) + return 1 + }, "CONV UINT64(SP-%d), BOOL", offset) +} + +func (asm *assembler) Convert_ud(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalUint64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalDecimalWithPrec(decimal.NewFromUint(arg.u), 0) + return 1 + }, "CONV UINT64(SP-%d), FLOAT64)", offset) +} + +func (asm *assembler) Convert_uf(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalUint64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalFloat(arg.toFloat0()) + return 1 + }, "CONV UINT64(SP-%d), FLOAT64)", offset) +} + +func (asm *assembler) Convert_ui(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalUint64) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalInt64(int64(arg.u)) + return 1 + }, "CONV UINT64(SP-%d), INT64", offset) +} + +func (asm *assembler) Convert_xb(offset int, t sqltypes.Type, length int, hasLength bool) { + if hasLength { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) + arg.truncateInPlace(length) + arg.tt = int16(t) + env.vm.stack[env.vm.sp-offset] = arg + return 1 + }, "CONV (SP-%d), VARBINARY[%d]", offset, length) + } else { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) + arg.tt = int16(t) + env.vm.stack[env.vm.sp-offset] = arg + return 1 + }, "CONV (SP-%d), VARBINARY", offset) + } +} + +func (asm *assembler) Convert_xc(offset int, t sqltypes.Type, collation collations.ID, length int, hasLength bool) { + if hasLength { + asm.emit(func(env *ExpressionEnv) int { + arg, err := evalToVarchar(env.vm.stack[env.vm.sp-offset], collation, true) + if err != nil { + env.vm.stack[env.vm.sp-offset] = nil + } else { + arg.truncateInPlace(length) + arg.tt = int16(t) + env.vm.stack[env.vm.sp-offset] = arg + } + return 1 + }, "CONV (SP-%d), VARCHAR[%d]", offset, length) + } else { + asm.emit(func(env *ExpressionEnv) int { + arg, err := evalToVarchar(env.vm.stack[env.vm.sp-offset], collation, true) + if err != nil { + env.vm.stack[env.vm.sp-offset] = nil + } else { + arg.tt = int16(t) + env.vm.stack[env.vm.sp-offset] = arg + } + return 1 + }, "CONV (SP-%d), VARCHAR", offset) + } +} + +func (asm *assembler) Convert_xce(offset int, t sqltypes.Type, collation collations.ID) { + asm.emit(func(env *ExpressionEnv) int { + arg, err := evalToVarchar(env.vm.stack[env.vm.sp-offset], collation, true) + if err != nil { + env.vm.stack[env.vm.sp-offset] = nil + env.vm.err = err + } else { + arg.tt = int16(t) + env.vm.stack[env.vm.sp-offset] = arg + } + return 1 + }, "CONVE (SP-%d), VARCHAR", offset) +} + +func (asm *assembler) Convert_xd(offset int, m, d int32) { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset] = evalToDecimal(env.vm.stack[env.vm.sp-offset], m, d) + return 1 + }, "CONV (SP-%d), DECIMAL", offset) +} + +func (asm *assembler) Convert_xf(offset int) { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset], _ = evalToFloat(env.vm.stack[env.vm.sp-offset]) + return 1 + }, "CONV (SP-%d), FLOAT64", offset) +} + +func (asm *assembler) Convert_xi(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToInt64(env.vm.stack[env.vm.sp-offset]) + env.vm.stack[env.vm.sp-offset] = arg + return 1 + }, "CONV (SP-%d), INT64", offset) +} + +func (asm *assembler) Convert_xu(offset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToInt64(env.vm.stack[env.vm.sp-offset]) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalUint64(uint64(arg.i)) + return 1 + }, "CONV (SP-%d), UINT64", offset) +} + +func (asm *assembler) Convert_xD(offset int) { + asm.emit(func(env *ExpressionEnv) int { + // Need to explicitly check here or we otherwise + // store a nil wrapper in an interface vs. a direct + // nil. + d := evalToDate(env.vm.stack[env.vm.sp-offset]) + if d == nil { + env.vm.stack[env.vm.sp-offset] = nil + } else { + env.vm.stack[env.vm.sp-offset] = d + } + return 1 + }, "CONV (SP-%d), DATE", offset) +} + +func (asm *assembler) Convert_xD_nz(offset int) { + asm.emit(func(env *ExpressionEnv) int { + // Need to explicitly check here or we otherwise + // store a nil wrapper in an interface vs. a direct + // nil. + d := evalToDate(env.vm.stack[env.vm.sp-offset]) + if d == nil || d.isZero() { + env.vm.stack[env.vm.sp-offset] = nil + } else { + env.vm.stack[env.vm.sp-offset] = d + } + return 1 + }, "CONV (SP-%d), DATE(NOZERO)", offset) +} + +func (asm *assembler) Convert_xDT(offset, prec int) { + asm.emit(func(env *ExpressionEnv) int { + // Need to explicitly check here or we otherwise + // store a nil wrapper in an interface vs. a direct + // nil. + dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec) + if dt == nil { + env.vm.stack[env.vm.sp-offset] = nil + } else { + env.vm.stack[env.vm.sp-offset] = dt + } + return 1 + }, "CONV (SP-%d), DATETIME", offset) +} + +func (asm *assembler) Convert_xDT_nz(offset, prec int) { + asm.emit(func(env *ExpressionEnv) int { + // Need to explicitly check here or we otherwise + // store a nil wrapper in an interface vs. a direct + // nil. + dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec) + if dt == nil || dt.isZero() { + env.vm.stack[env.vm.sp-offset] = nil + } else { + env.vm.stack[env.vm.sp-offset] = dt + } + return 1 + }, "CONV (SP-%d), DATETIME(NOZERO)", offset) +} + +func (asm *assembler) Convert_xT(offset, prec int) { + asm.emit(func(env *ExpressionEnv) int { + t := evalToTime(env.vm.stack[env.vm.sp-offset], prec) + if t == nil { + env.vm.stack[env.vm.sp-offset] = nil + } else { + env.vm.stack[env.vm.sp-offset] = t + } + return 1 + }, "CONV (SP-%d), TIME", offset) +} + +func (asm *assembler) Convert_tp(offset, prec int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalTemporal) + arg.dt = arg.dt.Round(prec) + arg.prec = uint8(prec) + return 1 + }, "CONV (SP-%d), PRECISION", offset) +} + +func (asm *assembler) Div_dd() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + if r.dec.IsZero() { + env.vm.stack[env.vm.sp-2] = nil + } else { + mathDiv_dd0(l, r, divPrecisionIncrement) + } + env.vm.sp-- + return 1 + }, "DIV DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Div_ff() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + if r.f == 0.0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.f, env.vm.err = mathDiv_ff0(l.f, r.f) + } + env.vm.sp-- + return 1 + }, "DIV FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) IntDiv_ii() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.i = l.i / r.i + } + env.vm.sp-- + return 1 + }, "INTDIV INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) IntDiv_iu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + if r.u == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + r.u, env.vm.err = mathIntDiv_iu0(l.i, r.u) + env.vm.stack[env.vm.sp-2] = r + } + env.vm.sp-- + return 1 + }, "INTDIV INT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) IntDiv_ui() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.u, env.vm.err = mathIntDiv_ui0(l.u, r.i) + } + env.vm.sp-- + return 1 + }, "INTDIV UINT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) IntDiv_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + if r.u == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.u = l.u / r.u + } + env.vm.sp-- + return 1 + }, "INTDIV UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) IntDiv_di() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + if r.dec.IsZero() { + env.vm.stack[env.vm.sp-2] = nil + } else { + var res int64 + res, env.vm.err = mathIntDiv_di0(l, r) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalInt64(res) + } + env.vm.sp-- + return 1 + }, "INTDIV DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) IntDiv_du() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + if r.dec.IsZero() { + env.vm.stack[env.vm.sp-2] = nil + } else { + var res uint64 + res, env.vm.err = mathIntDiv_du0(l, r) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalUint64(res) + } + env.vm.sp-- + return 1 + }, "UINTDIV DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Mod_ii() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.i = l.i % r.i + } + env.vm.sp-- + return 1 + }, "MOD INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Mod_iu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + if r.u == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.i = mathMod_iu0(l.i, r.u) + } + env.vm.sp-- + return 1 + }, "MOD INT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) Mod_ui() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.u, env.vm.err = mathMod_ui0(l.u, r.i) + } + env.vm.sp-- + return 1 + }, "MOD UINT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Mod_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + if r.u == 0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.u = l.u % r.u + } + env.vm.sp-- + return 1 + }, "MOD UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) Mod_ff() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + if r.f == 0.0 { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.f = math.Mod(l.f, r.f) + } + env.vm.sp-- + return 1 + }, "MOD FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) Mod_dd() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + if r.dec.IsZero() { + env.vm.stack[env.vm.sp-2] = nil + } else { + l.dec, l.length = mathMod_dd0(l, r) + } + env.vm.sp-- + return 1 + }, "MOD DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_ASCII() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + if len(arg.bytes) == 0 { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(0) + } else { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.bytes[0])) + } + return 1 + }, "FN ASCII VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_ORD(col collations.ID) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(charOrd(arg.bytes, col)) + return 1 + }, "FN ORD VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_CEIL_d() { + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-1].(*evalDecimal) + c := d.dec.Ceil() + i, valid := c.Int64() + if valid { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(i) + } else { + env.vm.err = errDeoptimize + } + return 1 + }, "FN CEIL DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_CEIL_f() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Ceil(f.f) + return 1 + }, "FN CEIL FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_FLOOR_d() { + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-1].(*evalDecimal) + c := d.dec.Floor() + i, valid := c.Int64() + if valid { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(i) + } else { + env.vm.err = errDeoptimize + } + return 1 + }, "FN FLOOR DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_FLOOR_f() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Floor(f.f) + return 1 + }, "FN FLOOR FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ABS_i() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalInt64) + if f.i >= 0 { + return 1 + } + if f.i == math.MinInt64 { + env.vm.err = vterrors.NewErrorf(vtrpc.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "BIGINT value is out of range") + return 1 + } + f.i = -f.i + return 1 + }, "FN ABS INT64(SP-1)") +} + +func (asm *assembler) Fn_ABS_d() { + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-1].(*evalDecimal) + d.dec = d.dec.Abs() + return 1 + }, "FN ABS DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_ABS_f() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + if f.f >= 0 { + return 1 + } + f.f = -f.f + return 1 + }, "FN ABS FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_PI() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalFloat(math.Pi) + env.vm.sp++ + return 1 + }, "FN PI") +} + +func (asm *assembler) Fn_ACOS() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + if f.f < -1 || f.f > 1 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + f.f = math.Acos(f.f) + return 1 + }, "FN ACOS FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ASIN() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + if f.f < -1 || f.f > 1 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + f.f = math.Asin(f.f) + return 1 + }, "FN ASIN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ATAN() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Atan(f.f) + return 1 + }, "FN ATAN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ATAN2() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + f1 := env.vm.stack[env.vm.sp-2].(*evalFloat) + f2 := env.vm.stack[env.vm.sp-1].(*evalFloat) + f1.f = math.Atan2(f1.f, f2.f) + env.vm.sp-- + return 1 + }, "FN ATAN2 FLOAT64(SP-2) FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_COS() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Cos(f.f) + return 1 + }, "FN COS FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_COT() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = 1.0 / math.Tan(f.f) + return 1 + }, "FN COT FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_SIN() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Sin(f.f) + return 1 + }, "FN SIN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_TAN() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Tan(f.f) + return 1 + }, "FN TAN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_DEGREES() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = f.f * (180 / math.Pi) + return 1 + }, "FN DEGREES FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_RADIANS() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = f.f * (math.Pi / 180) + return 1 + }, "FN RADIANS FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_EXP() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Exp(f.f) + if !isFinite(f.f) { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN EXP FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_LN() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + var ok bool + f.f, ok = math_log(f.f) + if !ok { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN LN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_LOG() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + var ok bool + f1 := env.vm.stack[env.vm.sp-2].(*evalFloat) + f2 := env.vm.stack[env.vm.sp-1].(*evalFloat) + f1.f, ok = math_logN(f1.f, f2.f) + if !ok { + env.vm.stack[env.vm.sp-2] = nil + } + env.vm.sp-- + return 1 + }, "FN LOG FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_LOG10() { + asm.emit(func(env *ExpressionEnv) int { + var ok bool + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f, ok = math_log10(f.f) + if !ok { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN LOG10 FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_LOG2() { + asm.emit(func(env *ExpressionEnv) int { + var ok bool + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f, ok = math_log2(f.f) + if !ok { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN LOG2 FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_POW() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + f1 := env.vm.stack[env.vm.sp-2].(*evalFloat) + f2 := env.vm.stack[env.vm.sp-1].(*evalFloat) + + f1.f = math.Pow(f1.f, f2.f) + if !isFinite(f1.f) { + env.vm.stack[env.vm.sp-2] = nil + } + env.vm.sp-- + return 1 + }, "FN POW FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_SIGN_i() { + asm.emit(func(env *ExpressionEnv) int { + i := env.vm.stack[env.vm.sp-1].(*evalInt64) + if i.i < 0 { + i.i = -1 + } else if i.i > 0 { + i.i = 1 + } else { + i.i = 0 + } + return 1 + }, "FN SIGN INT64(SP-1)") +} + +func (asm *assembler) Fn_SIGN_u() { + asm.emit(func(env *ExpressionEnv) int { + u := env.vm.stack[env.vm.sp-1].(*evalUint64) + a := int64(0) + if u.u > 0 { + a = 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(a) + return 1 + }, "FN SIGN UINT64(SP-1)") +} + +func (asm *assembler) Fn_SIGN_f() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + a := int64(0) + if f.f < 0 { + a = -1 + } else if f.f > 0 { + a = 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(a) + return 1 + }, "FN SIGN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_SIGN_d() { + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-1].(*evalDecimal) + a := int64(d.dec.Sign()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(a) + return 1 + }, "FN SIGN FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_SQRT() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Sqrt(f.f) + if !isFinite(f.f) { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN SQRT FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ROUND1_f() { + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-1].(*evalFloat) + f.f = math.Round(f.f) + return 1 + }, "FN ROUND FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_ROUND1_d() { + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-1].(*evalDecimal) + d.dec = d.dec.Round(0) + d.length = 0 + return 1 + }, "FN ROUND DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_ROUND2_i() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + i := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + i.i = roundSigned(i.i, r.i) + env.vm.sp-- + return 1 + }, "FN ROUND INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_ROUND2_u() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + u := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + u.u = roundUnsigned(u.u, r.i) + env.vm.sp-- + return 1 + }, "FN ROUND INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_ROUND2_f() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + f.f = math.Round(f.f) + env.vm.sp-- + return 1 + } + + r.i = clampRounding(r.i) + factor := math.Pow(10, float64(r.i)) + if factor == 0.0 { + f.f = 0.0 + env.vm.sp-- + return 1 + } + f.f = math.Round(f.f*factor) / factor + env.vm.sp-- + return 1 + }, "FN ROUND FLOAT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_ROUND2_d() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if d.dec.IsZero() { + env.vm.sp-- + return 1 + } + + if r.i == 0 { + d.dec = d.dec.Round(0) + d.length = 0 + env.vm.sp-- + return 1 + } + + r.i = clampRounding(r.i) + digit := int32(r.i) + if digit < 0 { + digit = 0 + } + if digit > d.length { + digit = d.length + } + rounded := d.dec.Round(int32(r.i)) + if rounded.IsZero() { + d.dec = decimal.Zero + d.length = 0 + env.vm.sp-- + return 1 + } + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalDecimalWithPrec(rounded, digit) + env.vm.sp-- + return 1 + }, "FN ROUND DECIMAL(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_TRUNCATE_i() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + i := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + i.i = truncateSigned(i.i, r.i) + env.vm.sp-- + return 1 + }, "FN TRUNCATE INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_TRUNCATE_u() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + u := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + u.u = truncateUnsigned(u.u, r.i) + env.vm.sp-- + return 1 + }, "FN TRUNCATE INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_TRUNCATE_f() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + f := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + if r.i == 0 { + f.f = math.Trunc(f.f) + env.vm.sp-- + return 1 + } + + r.i = clampRounding(r.i) + factor := math.Pow(10, float64(r.i)) + if factor == 0.0 { + f.f = 0.0 + env.vm.sp-- + return 1 + } + f.f = math.Trunc(f.f*factor) / factor + env.vm.sp-- + return 1 + }, "FN TRUNCATE FLOAT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_TRUNCATE_d() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + d := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if d.dec.IsZero() { + env.vm.sp-- + return 1 + } + + if r.i == 0 { + d.dec = d.dec.Truncate(0) + d.length = 0 + env.vm.sp-- + return 1 + } + + r.i = clampRounding(r.i) + digit := int32(r.i) + if digit < 0 { + digit = 0 + } + if digit > d.length { + digit = d.length + } + rounded := d.dec.Truncate(int32(r.i)) + if rounded.IsZero() { + d.dec = decimal.Zero + d.length = 0 + env.vm.sp-- + return 1 + } + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalDecimalWithPrec(rounded, digit) + env.vm.sp-- + return 1 + }, "FN TRUNCATE DECIMAL(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_CRC32() { + asm.emit(func(env *ExpressionEnv) int { + b := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalUint64(uint64(crc32.ChecksumIEEE(b.bytes))) + return 1 + }, "FN CRC32 BINARY(SP-1)") +} + +func (asm *assembler) Fn_CONV_hu(offset int, baseOffset int) { + asm.emit(func(env *ExpressionEnv) int { + base := env.vm.stack[env.vm.sp-baseOffset].(*evalInt64) + + // Even though the base is not used at all with a hex string literal, + // we still need to check the base range to make sure it is valid. + if base.i < -36 || (base.i > -2 && base.i < 2) || base.i > 36 { + env.vm.stack[env.vm.sp-offset] = nil + return 1 + } + + env.vm.stack[env.vm.sp-offset], _ = env.vm.stack[env.vm.sp-offset].(*evalBytes).toNumericHex() + return 1 + }, "FN CONV VARBINARY(SP-%d), HEX", offset) +} + +func (asm *assembler) Fn_CONV_bu(offset int, baseOffset int) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-offset].(*evalBytes) + base := env.vm.stack[env.vm.sp-baseOffset].(*evalInt64) + + if base.i < -36 || (base.i > -2 && base.i < 2) || base.i > 36 { + env.vm.stack[env.vm.sp-offset] = nil + return 1 + } + if base.i < 0 { + base.i = -base.i + } + + var u uint64 + i, err := fastparse.ParseInt64(arg.string(), int(base.i)) + u = uint64(i) + if errors.Is(err, fastparse.ErrOverflow) { + u, _ = fastparse.ParseUint64(arg.string(), int(base.i)) + } + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalUint64(u) + return 1 + }, "FN CONV VARBINARY(SP-%d), INT64(SP-%d)", offset, baseOffset) +} + +func (asm *assembler) Fn_CONV_uc(t sqltypes.Type, col collations.TypedCollation) { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-3] == nil { + env.vm.sp -= 2 + return 1 + } + u := env.vm.stack[env.vm.sp-3].(*evalUint64).u + base := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if base.i < -36 || (base.i > -2 && base.i < 2) || base.i > 36 { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + var out []byte + if base.i < 0 { + out = strconv.AppendInt(out, int64(u), -int(base.i)) + } else { + out = strconv.AppendUint(out, u, int(base.i)) + } + + res := env.vm.arena.newEvalBytesEmpty() + res.tt = int16(t) + res.bytes = upcaseASCII(out) + res.col = col + + env.vm.stack[env.vm.sp-3] = res + env.vm.sp -= 2 + return 1 + }, "FN CONV VARCHAR(SP-3) INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_COLLATION(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + v := evalCollation(env.vm.stack[env.vm.sp-1]) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collations.Local().LookupName(v.Collation)), col) + return 1 + }, "FN COLLATION (SP-1)") +} + +func (asm *assembler) Fn_FROM_BASE64(t sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + + decoded, err := mysqlBase64Decode(str.bytes) + if err != nil { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + str.tt = int16(t) + str.bytes = decoded + return 1 + }, "FN FROM_BASE64 VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_HEX_c(t sqltypes.Type, col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + encoded := env.vm.arena.newEvalText(hex.EncodeBytes(arg.bytes), col) + encoded.tt = int16(t) + env.vm.stack[env.vm.sp-1] = encoded + return 1 + }, "FN HEX VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_HEX_d(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(evalNumeric) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(hex.EncodeUint(uint64(arg.toInt64().i)), col) + return 1 + }, "FN HEX NUMERIC(SP-1)") +} + +func (asm *assembler) Fn_UNHEX_i(tt sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64) + if arg.toInt64().i < 0 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(hex.DecodeUint(uint64(arg.toInt64().i)), tt, collationBinary) + return 1 + }, "FN UNHEX INT64(SP-1)") +} + +func (asm *assembler) Fn_UNHEX_u(tt sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(hex.DecodeUint(uint64(arg.u)), tt, collationBinary) + return 1 + }, "FN UNHEX UINT64(SP-1)") +} + +func (asm *assembler) Fn_UNHEX_f(tt sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalFloat) + f := arg.f + if f != float64(int64(f)) { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(hex.DecodeUint(uint64(arg.f)), tt, collationBinary) + return 1 + }, "FN UNHEX FLOAT64(SP-1)") +} + +func (asm *assembler) Fn_UNHEX_b(tt sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + decoded := make([]byte, hex.DecodedLen(arg.bytes)) + + err := hex.DecodeBytes(decoded, arg.bytes) + if err != nil { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(decoded, tt, collationBinary) + return 1 + }, "FN UNHEX VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_UNHEX_j(tt sqltypes.Type) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalJSON) + decoded, ok := hexDecodeJSON(arg) + if !ok { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(decoded, tt, collationBinary) + return 1 + }, "FN UNHEX VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_JSON_ARRAY(args int) { + asm.adjustStack(-(args - 1)) + asm.emit(func(env *ExpressionEnv) int { + ary := make([]*json.Value, 0, args) + for sp := env.vm.sp - args; sp < env.vm.sp; sp++ { + ary = append(ary, env.vm.stack[sp].(*json.Value)) + } + env.vm.stack[env.vm.sp-args] = json.NewArray(ary) + env.vm.sp -= args - 1 + return 1 + }, "FN JSON_ARRAY (SP-%d)...(SP-1)", args) +} + +func (asm *assembler) Fn_JSON_CONTAINS_PATH(match jsonMatch, paths []*json.Path) { + switch match { + case jsonMatchOne: + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalJSON) + matched := false + for _, p := range paths { + p.Match(arg, true, func(*json.Value) { matched = true }) + if matched { + break + } + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(matched) + return 1 + }, "FN JSON_CONTAINS_PATH, SP-1, 'one', [static]") + case jsonMatchAll: + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalJSON) + matched := true + for _, p := range paths { + matched = false + p.Match(arg, true, func(*json.Value) { matched = true }) + if !matched { + break + } + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(matched) + return 1 + }, "FN JSON_CONTAINS_PATH, SP-1, 'all', [static]") + } +} + +func (asm *assembler) Fn_JSON_EXTRACT0(jp []*json.Path) { + multi := len(jp) > 1 || slice.Any(jp, func(path *json.Path) bool { return path.ContainsWildcards() }) + + if multi { + asm.emit(func(env *ExpressionEnv) int { + matches := make([]*json.Value, 0, 4) + arg := env.vm.stack[env.vm.sp-1].(*evalJSON) + for _, jp := range jp { + jp.Match(arg, true, func(value *json.Value) { + matches = append(matches, value) + }) + } + if len(matches) == 0 { + env.vm.stack[env.vm.sp-1] = nil + } else { + env.vm.stack[env.vm.sp-1] = json.NewArray(matches) + } + return 1 + }, "FN JSON_EXTRACT, SP-1, [static]") + } else { + asm.emit(func(env *ExpressionEnv) int { + var match *json.Value + arg := env.vm.stack[env.vm.sp-1].(*evalJSON) + jp[0].Match(arg, true, func(value *json.Value) { + match = value + }) + if match == nil { + env.vm.stack[env.vm.sp-1] = nil + } else { + env.vm.stack[env.vm.sp-1] = match + } + return 1 + }, "FN JSON_EXTRACT, SP-1, [static]") + } +} + +func (asm *assembler) Fn_JSON_KEYS(jp *json.Path) { + if jp == nil { + asm.emit(func(env *ExpressionEnv) int { + doc := env.vm.stack[env.vm.sp-1] + if doc == nil { + return 1 + } + j := doc.(*evalJSON) + if obj, ok := j.Object(); ok { + var keys []*json.Value + obj.Visit(func(key string, _ *json.Value) { + keys = append(keys, json.NewString(key)) + }) + env.vm.stack[env.vm.sp-1] = json.NewArray(keys) + } else { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN JSON_KEYS (SP-1)") + } else { + asm.emit(func(env *ExpressionEnv) int { + doc := env.vm.stack[env.vm.sp-1] + if doc == nil { + return 1 + } + var obj *json.Object + jp.Match(doc.(*evalJSON), false, func(value *json.Value) { + obj, _ = value.Object() + }) + if obj != nil { + var keys []*json.Value + obj.Visit(func(key string, _ *json.Value) { + keys = append(keys, json.NewString(key)) + }) + env.vm.stack[env.vm.sp-1] = json.NewArray(keys) + } else { + env.vm.stack[env.vm.sp-1] = nil + } + return 1 + }, "FN JSON_KEYS (SP-1), %q", jp.String()) + } +} + +func (asm *assembler) Fn_JSON_OBJECT(args int) { + asm.adjustStack(-(args - 1)) + asm.emit(func(env *ExpressionEnv) int { + var obj json.Object + for sp := env.vm.sp - args; sp < env.vm.sp; sp += 2 { + key := env.vm.stack[sp] + val := env.vm.stack[sp+1] + + if key == nil { + env.vm.err = errJSONKeyIsNil + return 0 + } + + obj.Set(key.(*evalBytes).string(), val.(*evalJSON), json.Set) + } + env.vm.stack[env.vm.sp-args] = json.NewObject(obj) + env.vm.sp -= args - 1 + return 1 + }, "FN JSON_ARRAY (SP-%d)...(SP-1)", args) +} + +func (asm *assembler) Fn_JSON_UNQUOTE() { + asm.emit(func(env *ExpressionEnv) int { + j := env.vm.stack[env.vm.sp-1].(*evalJSON) + b := env.vm.arena.newEvalBytesEmpty() + b.tt = int16(sqltypes.Blob) + b.col = collationJSON + if jbytes, ok := j.StringBytes(); ok { + b.bytes = jbytes + } else { + b.bytes = j.MarshalTo(nil) + } + env.vm.stack[env.vm.sp-1] = b + return 1 + }, "FN JSON_UNQUOTE (SP-1)") +} + +func (asm *assembler) Fn_CHAR_LENGTH() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + if sqltypes.IsBinary(arg.SQLType()) { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(len(arg.bytes))) + } else { + coll := colldata.Lookup(arg.col.Collation) + count := charset.Length(coll.Charset(), arg.bytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(count)) + } + return 1 + }, "FN CHAR_LENGTH VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_LENGTH() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(len(arg.bytes))) + return 1 + }, "FN LENGTH VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_BIT_LENGTH() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(len(arg.bytes) * 8)) + return 1 + }, "FN BIT_LENGTH VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_LUCASE(upcase bool) { + if upcase { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + + coll := colldata.Lookup(str.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) + if !ok { + env.vm.err = vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") + } else { + str.bytes = csa.ToUpper(nil, str.bytes) + } + str.tt = int16(sqltypes.VarChar) + return 1 + }, "FN UPPER VARCHAR(SP-1)") + } else { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + + coll := colldata.Lookup(str.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) + if !ok { + env.vm.err = vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") + } else { + str.bytes = csa.ToLower(nil, str.bytes) + } + str.tt = int16(sqltypes.VarChar) + return 1 + }, "FN LOWER VARCHAR(SP-1)") + } +} + +func (asm *assembler) Fn_MULTICMP_b(args int, lessThan bool) { + asm.adjustStack(-(args - 1)) + + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].ToRawBytes() + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].ToRawBytes() + if lessThan == (bytes.Compare(y, x) < 0) { + x = y + } + } + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalBinary(x) + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP VARBINARY(SP-%d)...VARBINARY(SP-1)", args) +} + +func (asm *assembler) Fn_MULTICMP_c(args int, lessThan bool, tc collations.TypedCollation) { + col := colldata.Lookup(tc.Collation) + + asm.adjustStack(-(args - 1)) + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].ToRawBytes() + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].ToRawBytes() + if lessThan == (col.Collate(y, x, false) < 0) { + x = y + } + } + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalText(x, tc) + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP VARCHAR(SP-%d)...VARCHAR(SP-1)", args) +} + +func (asm *assembler) Fn_MULTICMP_d(args int, lessThan bool) { + asm.adjustStack(-(args - 1)) + + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].(*evalDecimal) + xprec := x.length + + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].(*evalDecimal) + if lessThan == (y.dec.Cmp(x.dec) < 0) { + x = y + } + if y.length > xprec { + xprec = y.length + } + } + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalDecimalWithPrec(x.dec, xprec) + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP DECIMAL(SP-%d)...DECIMAL(SP-1)", args) +} + +func (asm *assembler) Fn_MULTICMP_f(args int, lessThan bool) { + asm.adjustStack(-(args - 1)) + + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].(*evalFloat) + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].(*evalFloat) + if lessThan == (y.f < x.f) { + x = y + } + } + env.vm.stack[env.vm.sp-args] = x + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP FLOAT64(SP-%d)...FLOAT64(SP-1)", args) +} + +func (asm *assembler) Fn_MULTICMP_i(args int, lessThan bool) { + asm.adjustStack(-(args - 1)) + + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].(*evalInt64) + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].(*evalInt64) + if lessThan == (y.i < x.i) { + x = y + } + } + env.vm.stack[env.vm.sp-args] = x + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP INT64(SP-%d)...INT64(SP-1)", args) +} + +func (asm *assembler) Fn_MULTICMP_u(args int, lessThan bool) { + asm.adjustStack(-(args - 1)) + + asm.emit(func(env *ExpressionEnv) int { + x := env.vm.stack[env.vm.sp-args].(*evalUint64) + for sp := env.vm.sp - args + 1; sp < env.vm.sp; sp++ { + y := env.vm.stack[sp].(*evalUint64) + if lessThan == (y.u < x.u) { + x = y + } + } + env.vm.stack[env.vm.sp-args] = x + env.vm.sp -= args - 1 + return 1 + }, "FN MULTICMP UINT64(SP-%d)...UINT64(SP-1)", args) +} + +func (asm *assembler) Fn_REPEAT() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + repeat := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if repeat.i < 0 { + repeat.i = 0 + } + + if !validMaxLength(int64(len(str.bytes)), repeat.i) { + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + } + + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.Repeat(str.bytes, int(repeat.i)) + env.vm.sp-- + return 1 + }, "FN REPEAT VARCHAR(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_LEFT(col collations.TypedCollation) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + length := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if length.i <= 0 { + str.tt = int16(sqltypes.VarChar) + str.bytes = nil + str.col = col + env.vm.sp-- + return 1 + } + + cs := colldata.Lookup(col.Collation).Charset() + strLen := charset.Length(cs, str.bytes) + + str.tt = int16(sqltypes.VarChar) + str.col = col + if strLen <= int(length.i) { + env.vm.sp-- + return 1 + } + + str.bytes = charset.Slice(cs, str.bytes, 0, int(length.i)) + env.vm.sp-- + return 1 + }, "FN LEFT VARCHAR(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_RIGHT(col collations.TypedCollation) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + length := env.vm.stack[env.vm.sp-1].(*evalInt64) + + if length.i <= 0 { + str.tt = int16(sqltypes.VarChar) + str.bytes = nil + str.col = col + env.vm.sp-- + return 1 + } + + cs := colldata.Lookup(col.Collation).Charset() + strLen := charset.Length(cs, str.bytes) + + str.tt = int16(sqltypes.VarChar) + str.col = col + + if strLen <= int(length.i) { + env.vm.sp-- + return 1 + } + + str.bytes = charset.Slice(cs, str.bytes, strLen-int(length.i), strLen) + env.vm.sp-- + return 1 + }, "FN RIGHT VARCHAR(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_LPAD(col collations.TypedCollation) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-3].(*evalBytes) + length := env.vm.stack[env.vm.sp-2].(*evalInt64) + pad := env.vm.stack[env.vm.sp-1].(*evalBytes) + + if length.i < 0 { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + if !validMaxLength(int64(len(pad.bytes)), length.i) { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + cs := colldata.Lookup(col.Collation).Charset() + strLen := charset.Length(cs, str.bytes) + l := int(length.i) + + str.tt = int16(sqltypes.VarChar) + str.col = col + + if strLen >= int(length.i) { + str.bytes = charset.Slice(cs, str.bytes, 0, l) + env.vm.sp -= 2 + return 1 + } + + runeLen := charset.Length(cs, pad.bytes) + if runeLen == 0 { + str.bytes = nil + env.vm.sp -= 2 + return 1 + } + + repeat := (l - strLen) / runeLen + remainder := (l - strLen) % runeLen + + res := bytes.Repeat(pad.bytes, repeat) + if remainder > 0 { + res = append(res, charset.Slice(cs, pad.bytes, 0, remainder)...) + } + str.bytes = append(res, str.bytes...) + + env.vm.sp -= 2 + return 1 + }, "FN LPAD VARCHAR(SP-3) INT64(SP-2) VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_RPAD(col collations.TypedCollation) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-3].(*evalBytes) + length := env.vm.stack[env.vm.sp-2].(*evalInt64) + pad := env.vm.stack[env.vm.sp-1].(*evalBytes) + + if length.i < 0 { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + if !validMaxLength(int64(len(pad.bytes)), length.i) { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + cs := colldata.Lookup(col.Collation).Charset() + strLen := charset.Length(cs, str.bytes) + l := int(length.i) + + str.tt = int16(sqltypes.VarChar) + str.col = col + + if strLen >= int(length.i) { + str.bytes = charset.Slice(cs, str.bytes, 0, int(length.i)) + env.vm.sp -= 2 + return 1 + } + + runeLen := charset.Length(cs, pad.bytes) + if runeLen == 0 { + str.bytes = nil + env.vm.sp -= 2 + return 1 + } + + repeat := (l - strLen) / runeLen + remainder := (l - strLen) % runeLen + + str.bytes = append(str.bytes, bytes.Repeat(pad.bytes, repeat)...) + if remainder > 0 { + str.bytes = append(str.bytes, charset.Slice(cs, pad.bytes, 0, remainder)...) + } + + env.vm.sp -= 2 + return 1 + }, "FN RPAD VARCHAR(SP-3) INT64(SP-2) VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_LTRIM1(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.TrimLeft(str.bytes, " ") + str.col = col + return 1 + }, "FN LTRIM VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_RTRIM1(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.TrimRight(str.bytes, " ") + str.col = col + return 1 + }, "FN RTRIM VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_TRIM1(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.Trim(str.bytes, " ") + str.col = col + return 1 + }, "FN TRIM VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_LTRIM2(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + pat := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.TrimPrefix(str.bytes, pat.bytes) + str.col = col + env.vm.sp-- + return 1 + }, "FN LTRIM VARCHAR(SP-2) VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_RTRIM2(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + pat := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.TrimSuffix(str.bytes, pat.bytes) + str.col = col + env.vm.sp-- + return 1 + }, "FN RTRIM VARCHAR(SP-2) VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_TRIM2(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + pat := env.vm.stack[env.vm.sp-1].(*evalBytes) + str.tt = int16(sqltypes.VarChar) + str.bytes = bytes.TrimPrefix(bytes.TrimSuffix(str.bytes, pat.bytes), pat.bytes) + str.col = col + env.vm.sp-- + return 1 + }, "FN TRIM VARCHAR(SP-2) VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_TO_BASE64(t sqltypes.Type, col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + encoded := mysqlBase64Encode(str.bytes) + str.tt = int16(t) + str.col = col + str.bytes = encoded + return 1 + }, "FN TO_BASE64 VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_WEIGHT_STRING(typ sqltypes.Type, length int) { + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-1] + w, _, err := evalWeightString(nil, input, length, 0) + if err != nil { + env.vm.err = err + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalRaw(w, typ, collationBinary) + return 1 + }, "FN WEIGHT_STRING (SP-1)") +} + +func (asm *assembler) In_table(not bool, table map[vthash.Hash]struct{}) { + if not { + asm.emit(func(env *ExpressionEnv) int { + lhs := env.vm.stack[env.vm.sp-1] + if lhs != nil { + env.vm.hash.Reset() + lhs.(hashable).Hash(&env.vm.hash) + _, in := table[env.vm.hash.Sum128()] + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(!in) + } + return 1 + }, "NOT IN (SP-1), [static table]") + } else { + asm.emit(func(env *ExpressionEnv) int { + lhs := env.vm.stack[env.vm.sp-1] + if lhs != nil { + env.vm.hash.Reset() + lhs.(hashable).Hash(&env.vm.hash) + _, in := table[env.vm.hash.Sum128()] + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(in) + } + return 1 + }, "IN (SP-1), [static table]") + } +} + +func (asm *assembler) In_slow(not bool) { + asm.adjustStack(-1) + + if not { + asm.emit(func(env *ExpressionEnv) int { + lhs := env.vm.stack[env.vm.sp-2] + rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) + + var in boolean + in, env.vm.err = evalInExpr(lhs, rhs) + + env.vm.stack[env.vm.sp-2] = in.not().eval() + env.vm.sp -= 1 + return 1 + }, "NOT IN (SP-2), TUPLE(SP-1)") + } else { + asm.emit(func(env *ExpressionEnv) int { + lhs := env.vm.stack[env.vm.sp-2] + rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) + + var in boolean + in, env.vm.err = evalInExpr(lhs, rhs) + + env.vm.stack[env.vm.sp-2] = in.eval() + env.vm.sp -= 1 + return 1 + }, "IN (SP-2), TUPLE(SP-1)") + } +} + +func (asm *assembler) Is(check func(eval) bool) { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(check(env.vm.stack[env.vm.sp-1])) + return 1 + }, "IS (SP-1), [static]") +} + +func (asm *assembler) Not_i() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(arg.i == 0) + return 1 + }, "NOT INT64(SP-1)") +} + +func (asm *assembler) Not_u() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(arg.u == 0) + return 1 + }, "NOT UINT64(SP-1)") +} + +func (asm *assembler) Not_f() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalFloat) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(arg.f == 0.0) + return 1 + }, "NOT FLOAT64(SP-1)") +} + +func (asm *assembler) Not_d() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalDecimal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(arg.dec.IsZero()) + return 1 + }, "NOT DECIMAL(SP-1)") +} + +func (asm *assembler) LogicalLeft(opname string) *jump { + switch opname { + case "AND": + j := asm.jumpFrom() + asm.emit(func(env *ExpressionEnv) int { + left, ok := env.vm.stack[env.vm.sp-1].(*evalInt64) + if ok && left.i == 0 { + return j.offset() + } + return 1 + }, "AND CHECK INT64(SP-1)") + return j + case "OR": + j := asm.jumpFrom() + asm.emit(func(env *ExpressionEnv) int { + left, ok := env.vm.stack[env.vm.sp-1].(*evalInt64) + if ok && left.i != 0 { + left.i = 1 + return j.offset() + } + return 1 + }, "OR CHECK INT64(SP-1)") + return j + case "XOR": + j := asm.jumpFrom() + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return j.offset() + } + return 1 + }, "XOR CHECK INT64(SP-1)") + return j + } + return nil +} + +func (asm *assembler) LogicalRight(opname string) { + asm.adjustStack(-1) + switch opname { + case "AND": + asm.emit(func(env *ExpressionEnv) int { + left, lok := env.vm.stack[env.vm.sp-2].(*evalInt64) + right, rok := env.vm.stack[env.vm.sp-1].(*evalInt64) + + isLeft := lok && left.i != 0 + isRight := rok && right.i != 0 + + if isLeft && isRight { + left.i = 1 + } else if rok && !isRight { + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(false) + } else { + env.vm.stack[env.vm.sp-2] = nil + } + env.vm.sp-- + return 1 + }, "AND INT64(SP-2), INT64(SP-1)") + case "OR": + asm.emit(func(env *ExpressionEnv) int { + left, lok := env.vm.stack[env.vm.sp-2].(*evalInt64) + right, rok := env.vm.stack[env.vm.sp-1].(*evalInt64) + + isLeft := lok && left.i != 0 + isRight := rok && right.i != 0 + + switch { + case !lok: + if isRight { + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(true) + } + case !rok: + env.vm.stack[env.vm.sp-2] = nil + default: + if isLeft || isRight { + left.i = 1 + } else { + left.i = 0 + } + } + env.vm.sp-- + return 1 + }, "OR INT64(SP-2), INT64(SP-1)") + case "XOR": + asm.emit(func(env *ExpressionEnv) int { + left := env.vm.stack[env.vm.sp-2].(*evalInt64) + right, rok := env.vm.stack[env.vm.sp-1].(*evalInt64) + + isLeft := left.i != 0 + isRight := rok && right.i != 0 + + switch { + case !rok: + env.vm.stack[env.vm.sp-2] = nil + default: + if isLeft != isRight { + left.i = 1 + } else { + left.i = 0 + } + } + env.vm.sp-- + return 1 + }, "XOR INT64(SP-2), INT64(SP-1)") + } +} + +func (asm *assembler) Like_coerce(expr *LikeExpr, coercion *compiledCoercion) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp-- + + var bl, br []byte + bl, env.vm.err = coercion.left(nil, l.bytes) + if env.vm.err != nil { + return 0 + } + br, env.vm.err = coercion.right(nil, r.bytes) + if env.vm.err != nil { + return 0 + } + + match := expr.matchWildcard(bl, br, coercion.col.ID()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(match) + return 1 + }, "LIKE VARCHAR(SP-2), VARCHAR(SP-1) COERCE AND COLLATE '%s'", coercion.col.Name()) +} + +func (asm *assembler) Like_collate(expr *LikeExpr, collation colldata.Collation) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp-- + + match := expr.matchWildcard(l.bytes, r.bytes, collation.ID()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(match) + return 1 + }, "LIKE VARCHAR(SP-2), VARCHAR(SP-1) COLLATE '%s'", collation.Name()) +} + +func (asm *assembler) Strcmp(collation collations.TypedCollation) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalBytes) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp-- + + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(strcmpCollate(l.bytes, r.bytes, collation.Collation)) + return 1 + }, "STRCMP VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Mul_dd() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + mathMul_dd0(l, r) + env.vm.sp-- + return 1 + }, "MUL DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Mul_ff() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + l.f *= r.f + env.vm.sp-- + return 1 + }, "MUL FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) Mul_ii() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.i, env.vm.err = mathMul_ii0(l.i, r.i) + env.vm.sp-- + return 1 + }, "MUL INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Mul_ui(swap bool) { + asm.adjustStack(-1) + + if swap { + asm.emit(func(env *ExpressionEnv) int { + var u uint64 + l := env.vm.stack[env.vm.sp-1].(*evalUint64) + r := env.vm.stack[env.vm.sp-2].(*evalInt64) + u, env.vm.err = mathMul_ui0(l.u, r.i) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalUint64(u) + env.vm.sp-- + return 1 + }, "MUL UINT64(SP-1), INT64(SP-2)") + } else { + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.u, env.vm.err = mathMul_ui0(l.u, r.i) + env.vm.sp-- + return 1 + }, "MUL UINT64(SP-2), INT64(SP-1)") + } +} + +func (asm *assembler) Mul_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u, env.vm.err = mathMul_uu0(l.u, r.u) + env.vm.sp-- + return 1 + }, "MUL UINT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) Neg_d() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalDecimal) + arg.dec = arg.dec.Neg() + return 1 + }, "NEG DECIMAL(SP-1)") +} + +func (asm *assembler) Neg_f() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalFloat) + arg.f = -arg.f + return 1 + }, "NEG FLOAT64(SP-1)") +} + +func (asm *assembler) Neg_hex() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalFloat(-float64(arg.u)) + return 1 + }, "NEG HEX(SP-1)") +} + +func (asm *assembler) Neg_i() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64) + if arg.i == math.MinInt64 { + env.vm.err = errDeoptimize + } else { + arg.i = -arg.i + } + return 1 + }, "NEG INT64(SP-1)") +} + +func (asm *assembler) Neg_u() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + if arg.u > math.MaxInt64+1 { + env.vm.err = errDeoptimize + } else { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(-int64(arg.u)) + } + return 1 + }, "NEG UINT64(SP-1)") +} + +func (asm *assembler) NullCheck1(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return j.offset() + } + return 1 + }, "NULLCHECK SP-1") +} + +func (asm *assembler) NullCheck1r(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return j.offset() + } + return 1 + }, "NULLCHECK SP-1 [rhs]") +} + +func (asm *assembler) NullCheck2(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-2] == nil || env.vm.stack[env.vm.sp-1] == nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return j.offset() + } + return 1 + }, "NULLCHECK SP-1, SP-2") +} + +func (asm *assembler) NullCheck3(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-3] == nil || env.vm.stack[env.vm.sp-2] == nil || env.vm.stack[env.vm.sp-1] == nil { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return j.offset() + } + return 1 + }, "NULLCHECK SP-1, SP-2, SP-3") +} + +func (asm *assembler) NullCheckArg(j *jump, offset int) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + env.vm.stack[env.vm.sp-offset-1] = nil + env.vm.sp -= offset + return j.offset() + } + return 1 + }, "NULLCHECK SP-1 [argument %d]", offset) +} + +func (asm *assembler) NullCheckOffset(j *jump, offset int) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-offset] == nil { + return j.offset() + } + return 1 + }, "NULLCHECK SP-1 [offset %d]", offset) +} + +func (asm *assembler) Cmp_nullsafe(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2] + r := env.vm.stack[env.vm.sp-1] + if l == nil || r == nil { + if l == r { + env.vm.flags.cmp = 0 + } else { + env.vm.flags.cmp = 1 + } + env.vm.sp -= 2 + return j.offset() + } + return 1 + }, "NULLCMP SP-1, SP-2") +} + +func (asm *assembler) PackTuple(tlen int) { + asm.adjustStack(-(tlen - 1)) + asm.emit(func(env *ExpressionEnv) int { + tuple := make([]eval, tlen) + copy(tuple, env.vm.stack[env.vm.sp-tlen:]) + env.vm.stack[env.vm.sp-tlen] = &evalTuple{tuple} + env.vm.sp -= tlen - 1 + return 1 + }, "TUPLE (SP-%d)...(SP-1)", tlen) +} + +func (asm *assembler) Parse_j(offset int) { + asm.emit(func(env *ExpressionEnv) int { + var p json.Parser + arg := env.vm.stack[env.vm.sp-offset].(*evalBytes) + env.vm.stack[env.vm.sp-offset], env.vm.err = p.ParseBytes(arg.bytes) + return 1 + }, "PARSE_JSON VARCHAR(SP-%d)", offset) +} + +func (asm *assembler) SetBool(offset int, b bool) { + if b { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(true) + return 1 + }, "SET (SP-%d), BOOL(true)", offset) + } else { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(false) + return 1 + }, "SET (SP-%d), BOOL(false)", offset) + } +} + +func (asm *assembler) SetNull(offset int) { + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp-offset] = nil + return 1 + }, "SET (SP-%d), NULL", offset) +} + +func (asm *assembler) Sub_dd() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalDecimal) + r := env.vm.stack[env.vm.sp-1].(*evalDecimal) + mathSub_dd0(l, r) + env.vm.sp-- + return 1 + }, "SUB DECIMAL(SP-2), DECIMAL(SP-1)") +} + +func (asm *assembler) Sub_ff() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalFloat) + r := env.vm.stack[env.vm.sp-1].(*evalFloat) + l.f -= r.f + env.vm.sp-- + return 1 + }, "SUB FLOAT64(SP-2), FLOAT64(SP-1)") +} + +func (asm *assembler) Sub_ii() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.i, env.vm.err = mathSub_ii0(l.i, r.i) + env.vm.sp-- + return 1 + }, "SUB INT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Sub_iu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalInt64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + r.u, env.vm.err = mathSub_iu0(l.i, r.u) + env.vm.stack[env.vm.sp-2] = r + env.vm.sp-- + return 1 + }, "SUB INT64(SP-2), UINT64(SP-1)") +} + +func (asm *assembler) Sub_ui() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalInt64) + l.u, env.vm.err = mathSub_ui0(l.u, r.i) + env.vm.sp-- + return 1 + }, "SUB UINT64(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Sub_uu() { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + l := env.vm.stack[env.vm.sp-2].(*evalUint64) + r := env.vm.stack[env.vm.sp-1].(*evalUint64) + l.u, env.vm.err = mathSub_uu0(l.u, r.u) + env.vm.sp-- + return 1 + }, "SUB UINT64(SP-2), UINT64(SP-1)") +} + +func cmpnum[N interface{ int64 | uint64 | float64 }](a, b N) int { + switch { + case a == b: + return 0 + case a < b: + return -1 + default: + return 1 + } +} + +func (asm *assembler) Fn_Now(t querypb.Type, format *datetime.Strftime, prec uint8, utc bool) { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + val := env.vm.arena.newEvalBytesEmpty() + val.tt = int16(t) + val.bytes = format.Format(env.time(utc), prec) + val.col = collationBinary + env.vm.stack[env.vm.sp] = val + env.vm.sp++ + return 1 + }, "FN NOW") +} + +func (asm *assembler) Fn_Sysdate(prec uint8) { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + val := env.vm.arena.newEvalBytesEmpty() + val.tt = int16(sqltypes.Datetime) + now := SystemTime() + if tz := env.currentTimezone(); tz != nil { + now = now.In(tz) + } + val.bytes = datetime.NewDateTimeFromStd(now).Format(prec) + val.col = collationBinary + env.vm.stack[env.vm.sp] = val + env.vm.sp++ + return 1 + }, "FN SYSDATE") +} + +func (asm *assembler) Fn_Curdate() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + val := env.vm.arena.newEvalBytesEmpty() + val.tt = int16(sqltypes.Date) + val.bytes = datetime.Date_YYYY_MM_DD.Format(env.time(false), 0) + val.col = collationBinary + env.vm.stack[env.vm.sp] = val + env.vm.sp++ + return 1 + }, "FN CURDATE") +} + +func (asm *assembler) Fn_UtcDate() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + val := env.vm.arena.newEvalBytesEmpty() + val.tt = int16(sqltypes.Date) + val.bytes = datetime.Date_YYYY_MM_DD.Format(env.time(true), 0) + val.col = collationBinary + env.vm.stack[env.vm.sp] = val + env.vm.sp++ + return 1 + }, "FN UTC_DATE") +} + +func (asm *assembler) Fn_User() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText([]byte(env.currentUser()), collationUtf8mb3) + env.vm.sp++ + return 1 + }, "FN USER") +} + +func (asm *assembler) Fn_Database() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + db := env.currentDatabase() + if db == "" { + env.vm.stack[env.vm.sp] = nil + } else { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText([]byte(db), collationUtf8mb3) + } + env.vm.sp++ + return 1 + }, "FN DATABASE") +} + +func (asm *assembler) Fn_Version() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText([]byte(servenv.MySQLServerVersion()), collationUtf8mb3) + env.vm.sp++ + return 1 + }, "FN VERSION") +} + +func (asm *assembler) Fn_MD5(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + sum := md5.Sum(arg.bytes) + buf := make([]byte, gohex.EncodedLen(len(sum))) + gohex.Encode(buf, sum[:]) + + arg.tt = int16(sqltypes.VarChar) + arg.bytes = buf + arg.col = col + return 1 + }, "FN MD5 VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_SHA1(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + sum := sha1.Sum(arg.bytes) + buf := make([]byte, gohex.EncodedLen(len(sum))) + gohex.Encode(buf, sum[:]) + + arg.tt = int16(sqltypes.VarChar) + arg.bytes = buf + arg.col = col + return 1 + }, "FN SHA1 VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_SHA2(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-2].(*evalBytes) + bits := env.vm.stack[env.vm.sp-1].(*evalInt64) + + var sum []byte + switch bits.i { + case 224: + s := sha256.Sum224(arg.bytes) + sum = s[:] + case 0, 256: + s := sha256.Sum256(arg.bytes) + sum = s[:] + case 384: + s := sha512.Sum384(arg.bytes) + sum = s[:] + case 512: + s := sha512.Sum512(arg.bytes) + sum = s[:] + default: + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + } + buf := make([]byte, gohex.EncodedLen(len(sum))) + gohex.Encode(buf, sum[:]) + + arg.tt = int16(sqltypes.VarChar) + arg.bytes = buf + arg.col = col + env.vm.sp-- + return 1 + }, "FN SHA2 VARBINARY(SP-2), INT64(SP-1)") +} + +func (asm *assembler) Fn_RandomBytes() { + asm.emit(func(env *ExpressionEnv) int { + size := env.vm.stack[env.vm.sp-1].(*evalInt64) + if size.i < 1 || size.i > 1024 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + buf := make([]byte, size.i) + _, env.vm.err = rand.Read(buf) + if env.vm.err != nil { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBinary(buf) + return 1 + }, "FN RANDOM_BYTES INT64(SP-1)") +} + +func (asm *assembler) Fn_DATE_FORMAT(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-2] == nil { + env.vm.sp-- + return 1 + } + l := env.vm.stack[env.vm.sp-2].(*evalTemporal) + r := env.vm.stack[env.vm.sp-1].(*evalBytes) + + var d []byte + d, env.vm.err = datetime.Format(r.string(), l.dt, l.prec) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalText(d, col) + env.vm.sp-- + return 1 + }, "FN DATE_FORMAT DATETIME(SP-2), VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_CONVERT_TZ() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-3] == nil { + env.vm.sp -= 2 + return 1 + } + + n := env.vm.stack[env.vm.sp-3].(*evalTemporal) + f := env.vm.stack[env.vm.sp-2].(*evalBytes) + t := env.vm.stack[env.vm.sp-1].(*evalBytes) + + fromTz, err := datetime.ParseTimeZone(f.string()) + if err != nil { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + toTz, err := datetime.ParseTimeZone(t.string()) + if err != nil { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + dt, ok := convertTz(n.dt, fromTz, toTz) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + env.vm.stack[env.vm.sp-3] = env.vm.arena.newEvalDateTime(dt, int(n.prec)) + env.vm.sp -= 2 + return 1 + }, "FN CONVERT_TZ DATETIME(SP-3), VARBINARY(SP-2), VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_DAYOFMONTH() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Day())) + return 1 + }, "FN DAYOFMONTH DATE(SP-1)") +} + +func (asm *assembler) Fn_DAYOFWEEK() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Weekday() + 1)) + return 1 + }, "FN DAYOFWEEK DATE(SP-1)") +} + +func (asm *assembler) Fn_DAYOFYEAR() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Yearday())) + return 1 + }, "FN DAYOFYEAR DATE(SP-1)") +} + +func (asm *assembler) Fn_FROM_UNIXTIME_i() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64) + if arg.i < 0 || arg.i > maxUnixtime { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + t := time.Unix(arg.i, 0) + if tz := env.currentTimezone(); tz != nil { + t = t.In(tz) + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 0) + return 1 + }, "FN FROM_UNIXTIME INT64(SP-1)") +} + +func (asm *assembler) Fn_FROM_UNIXTIME_u() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + if arg.u > maxUnixtime { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + t := time.Unix(int64(arg.u), 0) + if tz := env.currentTimezone(); tz != nil { + t = t.In(tz) + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 0) + return 1 + }, "FN FROM_UNIXTIME UINT64(SP-1)") +} + +func (asm *assembler) Fn_FROM_UNIXTIME_d() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalDecimal) + if arg.dec.Sign() < 0 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + sd, fd := arg.dec.QuoRem(decimal.New(1, 0), 0) + sec, _ := sd.Int64() + if sec > maxUnixtime { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + frac, _ := fd.Mul(decimal.New(1, 9)).Int64() + t := time.Unix(sec, frac) + if tz := env.currentTimezone(); tz != nil { + t = t.In(tz) + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), int(arg.length)) + return 1 + }, "FN FROM_UNIXTIME DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_FROM_UNIXTIME_f() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalFloat) + if arg.f < 0 || arg.f > maxUnixtime { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + sec, frac := math.Modf(arg.f) + t := time.Unix(int64(sec), int64(frac*1e9)) + if tz := env.currentTimezone(); tz != nil { + t = t.In(tz) + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(t), 6) + return 1 + }, "FN FROM_UNIXTIME FLOAT(SP-1)") +} + +func (asm *assembler) Fn_HOUR() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Time.Hour())) + return 1 + }, "FN HOUR TIME(SP-1)") +} + +func (asm *assembler) Fn_MAKEDATE() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + y := env.vm.stack[env.vm.sp-1].(*evalInt64) + yd := env.vm.stack[env.vm.sp-2].(*evalInt64) + + t := yearDayToTime(y.i, yd.i) + if t.IsZero() { + env.vm.stack[env.vm.sp-2] = nil + } else { + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalDate(datetime.NewDateTimeFromStd(t).Date) + } + env.vm.sp-- + return 1 + }, "FN MAKEDATE INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_MAKETIME_i() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + h := env.vm.stack[env.vm.sp-3].(*evalInt64) + m := env.vm.stack[env.vm.sp-2].(*evalInt64) + s := env.vm.stack[env.vm.sp-1].(*evalInt64) + + i, ok := makeTime_i(h.i, m.i, s.i) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + t, ok := datetime.ParseTimeInt64(i) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + env.vm.stack[env.vm.sp-3] = env.vm.arena.newEvalTime(t, 0) + env.vm.sp -= 2 + return 1 + }, "FN MAKETIME INT64(SP-3) INT64(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_MAKETIME_d() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + h := env.vm.stack[env.vm.sp-3].(*evalInt64) + m := env.vm.stack[env.vm.sp-2].(*evalInt64) + s := env.vm.stack[env.vm.sp-1].(*evalDecimal) + + d, ok := makeTime_d(h.i, m.i, s.dec) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + t, l, ok := datetime.ParseTimeDecimal(d, s.length, -1) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + env.vm.stack[env.vm.sp-3] = env.vm.arena.newEvalTime(t, l) + env.vm.sp -= 2 + return 1 + }, "FN MAKETIME INT64(SP-3) INT64(SP-2) DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_MAKETIME_f() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + h := env.vm.stack[env.vm.sp-3].(*evalInt64) + m := env.vm.stack[env.vm.sp-2].(*evalInt64) + s := env.vm.stack[env.vm.sp-1].(*evalFloat) + + f, ok := makeTime_f(h.i, m.i, s.f) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + t, l, ok := datetime.ParseTimeFloat(f, -1) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + env.vm.stack[env.vm.sp-3] = env.vm.arena.newEvalTime(t, l) + env.vm.sp -= 2 + return 1 + }, "FN MAKETIME INT64(SP-3) INT64(SP-2) FLOAT(SP-1)") +} + +func (asm *assembler) Fn_MICROSECOND() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Time.Nanosecond() / 1000)) + return 1 + }, "FN MICROSECOND TIME(SP-1)") +} + +func (asm *assembler) Fn_MINUTE() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Time.Minute())) + return 1 + }, "FN MINUTE TIME(SP-1)") +} + +func (asm *assembler) Fn_MONTH() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Month())) + return 1 + }, "FN MONTH DATE(SP-1)") +} + +func (asm *assembler) Fn_MONTHNAME(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + m := arg.dt.Date.Month() + if m < 1 || m > 12 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + mb := hack.StringBytes(time.Month(arg.dt.Date.Month()).String()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(mb, col) + return 1 + }, "FN MONTHNAME DATE(SP-1)") +} + +func (asm *assembler) Fn_QUARTER() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Quarter())) + return 1 + }, "FN QUARTER DATE(SP-1)") +} + +func (asm *assembler) Fn_SECOND() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Time.Second())) + return 1 + }, "FN SECOND TIME(SP-1)") +} + +func (asm *assembler) Fn_UNIX_TIMESTAMP0() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalInt64(env.now.Unix()) + env.vm.sp++ + return 1 + }, "FN UNIX_TIMESTAMP") +} + +func (asm *assembler) Fn_UNIX_TIMESTAMP1() { + asm.emit(func(env *ExpressionEnv) int { + res := dateTimeUnixTimestamp(env, env.vm.stack[env.vm.sp-1]) + if _, ok := res.(*evalInt64); !ok { + env.vm.err = errDeoptimize + } + env.vm.stack[env.vm.sp-1] = res + return 1 + }, "FN UNIX_TIMESTAMP (SP-1)") +} + +func (asm *assembler) Fn_WEEK0() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + week := arg.dt.Date.Week(0) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(week)) + return 1 + }, "FN WEEK0 DATE(SP-1)") +} + +func (asm *assembler) Fn_WEEK() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-2] == nil { + env.vm.sp-- + return 1 + } + arg := env.vm.stack[env.vm.sp-2].(*evalTemporal) + mode := env.vm.stack[env.vm.sp-1].(*evalInt64) + week := arg.dt.Date.Week(int(mode.i)) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalInt64(int64(week)) + env.vm.sp-- + return 1 + }, "FN WEEK DATE(SP-1)") +} + +func (asm *assembler) Fn_WEEKDAY() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Weekday()+6) % 7) + return 1 + }, "FN WEEKDAY DATE(SP-1)") +} + +func (asm *assembler) Fn_WEEKOFYEAR() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + _, week := arg.dt.Date.ISOWeek() + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(week)) + return 1 + }, "FN WEEKOFYEAR DATE(SP-1)") +} + +func (asm *assembler) Fn_YEAR() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(arg.dt.Date.Year())) + return 1 + }, "FN YEAR DATE(SP-1)") +} + +func (asm *assembler) Fn_YEARWEEK0() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + yw := arg.dt.Date.YearWeek(0) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(yw)) + return 1 + }, "FN YEARWEEK0 DATE(SP-1)") +} + +func (asm *assembler) Fn_YEARWEEK() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-2] == nil { + env.vm.sp-- + return 1 + } + arg := env.vm.stack[env.vm.sp-2].(*evalTemporal) + mode := env.vm.stack[env.vm.sp-1].(*evalInt64) + yw := arg.dt.Date.YearWeek(int(mode.i)) + env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalInt64(int64(yw)) + env.vm.sp-- + return 1 + }, "FN YEARWEEK DATE(SP-1)") +} + +func (asm *assembler) Interval_i(l int) { + asm.adjustStack(-l) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-l] == nil { + env.vm.stack[env.vm.sp-l] = env.vm.arena.newEvalInt64(-1) + env.vm.sp -= l + return 1 + } + + env.vm.sp -= l + return 1 + }, "INTERVAL INT64(SP-1)...INT64(SP-%d)", l) +} + +func (asm *assembler) Interval(l int) { + asm.adjustStack(-l) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-l-1] == nil { + env.vm.stack[env.vm.sp-l-1] = env.vm.arena.newEvalInt64(-1) + env.vm.sp -= l + return 1 + } + + args := env.vm.stack[env.vm.sp-l-1:] + idx, err := findInterval(args) + if err != nil { + env.vm.err = err + } else { + env.vm.stack[env.vm.sp-l-1] = env.vm.arena.newEvalInt64(idx) + } + env.vm.sp -= l + return 1 + + }, "INTERVAL NUMERIC(SP-1)...NUMERIC(SP-%d)", l) +} + +func (asm *assembler) Fn_INET_ATON() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + ip, err := netip.ParseAddr(arg.string()) + if err != nil || !ip.Is4() { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalUint64(uint64(binary.BigEndian.Uint32(ip.AsSlice()))) + return 1 + }, "FN INET_ATON VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_INET_NTOA(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalUint64) + if arg.u > math.MaxUint32 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + b := binary.BigEndian.AppendUint32(nil, uint32(arg.u)) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(hack.StringBytes(netip.AddrFrom4([4]byte(b)).String()), col) + return 1 + }, "FN INET_NTOA VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_INET6_ATON() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + ip, err := netip.ParseAddr(arg.string()) + if err != nil { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBinary(ip.AsSlice()) + return 1 + }, "INET6_ATON VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_INET6_NTOA(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + ip, ok := netip.AddrFromSlice(arg.bytes) + if !ok { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + if ip, ok := printIPv6AsIPv4(ip); ok { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(hack.StringBytes("::"+ip.String()), col) + } else { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(hack.StringBytes(ip.String()), col) + } + return 1 + }, "FN INET6_NTOA VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_IS_IPV4() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + ip, err := netip.ParseAddr(arg.string()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(err == nil && ip.Is4()) + return 1 + }, "FN IS_IPV4 VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_IS_IPV4_COMPAT() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + ip, ok := netip.AddrFromSlice(arg.bytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(ok && isIPv4Compat(ip)) + return 1 + }, "FN IS_IPV4_COMPAT VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_IS_IPV4_MAPPED() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + ip, ok := netip.AddrFromSlice(arg.bytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(ok && ip.Is4In6()) + return 1 + }, "FN IS_IPV4_MAPPED VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_IS_IPV6() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + ip, err := netip.ParseAddr(arg.string()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(err == nil && ip.Is6()) + return 1 + }, "FN IS_IPV6 VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_CONCAT(tt querypb.Type, tc collations.TypedCollation, args int) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + var buf []byte + for i := 0; i < args; i++ { + arg := env.vm.stack[env.vm.sp-args+i].(*evalBytes) + buf = append(buf, arg.bytes...) + } + + ret := env.vm.stack[env.vm.sp-args].(*evalBytes) + ret.bytes = buf + ret.tt = int16(tt) + ret.col = tc + env.vm.sp -= args - 1 + return 1 + }, "FN CONCAT VARCHAR(SP-1)...VARCHAR(SP-N)") +} + +func (asm *assembler) Fn_CONCAT_WS(tt querypb.Type, tc collations.TypedCollation, args int) { + asm.adjustStack(-args) + asm.emit(func(env *ExpressionEnv) int { + var buf []byte + sep := env.vm.stack[env.vm.sp-args-1].(*evalBytes).bytes + + first := true + for i := 0; i < args; i++ { + if env.vm.stack[env.vm.sp-args+i] == nil { + continue + } + if !first { + buf = append(buf, sep...) + } + first = false + arg := env.vm.stack[env.vm.sp-args+i].(*evalBytes) + buf = append(buf, arg.bytes...) + } + + ret := env.vm.stack[env.vm.sp-args-1].(*evalBytes) + ret.bytes = buf + ret.tt = int16(tt) + ret.col = tc + env.vm.sp -= args + return 1 + }, "FN CONCAT_WS VARCHAR(SP-1) VARCHAR(SP-2)...VARCHAR(SP-N)") +} + +func (asm *assembler) Fn_BIN_TO_UUID0(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + parsed, err := uuid.FromBytes(arg.bytes) + if err != nil { + env.vm.stack[env.vm.sp-1] = nil + env.vm.err = errIncorrectUUID(arg.bytes, "bin_to_uuid") + return 1 + } + arg.bytes = hack.StringBytes(parsed.String()) + arg.tt = int16(sqltypes.VarChar) + arg.col = col + return 1 + }, "FN BIN_TO_UUID VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_BIN_TO_UUID1(col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-2].(*evalBytes) + b := arg.bytes + + if env.vm.stack[env.vm.sp-1] != nil && + env.vm.stack[env.vm.sp-1].(*evalInt64).i != 0 { + b = swapUUIDFrom(b) + } + + parsed, err := uuid.FromBytes(b) + if err != nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.err = errIncorrectUUID(arg.bytes, "bin_to_uuid") + env.vm.sp-- + return 1 + } + arg.bytes = hack.StringBytes(parsed.String()) + arg.tt = int16(sqltypes.VarChar) + arg.col = col + env.vm.sp-- + return 1 + }, "FN BIN_TO_UUID VARBINARY(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_IS_UUID() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + _, err := uuid.ParseBytes(arg.bytes) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalBool(err == nil) + return 1 + }, "FN IS_UUID VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_UUID() { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + v, err := uuid.NewUUID() + if err != nil { + env.vm.err = err + env.vm.sp++ + return 1 + } + m, err := v.MarshalText() + if err != nil { + env.vm.err = err + env.vm.sp++ + return 1 + } + + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText(m, collationUtf8mb3) + env.vm.sp++ + return 1 + }, "FN UUID") +} + +func (asm *assembler) Fn_UUID_TO_BIN0() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + parsed, err := uuid.ParseBytes(arg.bytes) + if err != nil { + env.vm.stack[env.vm.sp-1] = nil + env.vm.err = errIncorrectUUID(arg.bytes, "uuid_to_bin") + return 1 + } + arg.bytes = parsed[:] + arg.tt = int16(sqltypes.VarBinary) + arg.col = collationBinary + return 1 + }, "FN UUID_TO_BIN VARBINARY(SP-1)") +} + +func (asm *assembler) Fn_UUID_TO_BIN1() { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-2].(*evalBytes) + parsed, err := uuid.ParseBytes(arg.bytes) + if err != nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.err = errIncorrectUUID(arg.bytes, "uuid_to_bin") + env.vm.sp-- + return 1 + } + b := parsed[:] + if env.vm.stack[env.vm.sp-1] != nil && + env.vm.stack[env.vm.sp-1].(*evalInt64).i != 0 { + b = swapUUIDTo(b) + } + arg.bytes = b + arg.tt = int16(sqltypes.VarBinary) + arg.col = collationBinary + env.vm.sp-- + return 1 + }, "FN UUID_TO_BIN VARBINARY(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_DATEADD_D(unit datetime.IntervalType, sub bool) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + interval := evalToInterval(env.vm.stack[env.vm.sp-1], unit, sub) + if interval == nil { + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + } + + tmp := env.vm.stack[env.vm.sp-2].(*evalTemporal) + env.vm.stack[env.vm.sp-2] = tmp.addInterval(interval, collations.TypedCollation{}) + env.vm.sp-- + return 1 + }, "FN DATEADD TEMPORAL(SP-2), INTERVAL(SP-1)") +} + +func (asm *assembler) Fn_DATEADD_s(unit datetime.IntervalType, sub bool, col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + var interval *datetime.Interval + var tmp *evalTemporal + + interval = evalToInterval(env.vm.stack[env.vm.sp-1], unit, sub) + if interval == nil { + goto baddate + } + + tmp = evalToTemporal(env.vm.stack[env.vm.sp-2]) + if tmp == nil { + goto baddate + } + + env.vm.stack[env.vm.sp-2] = tmp.addInterval(interval, col) + env.vm.sp-- + return 1 + + baddate: + env.vm.stack[env.vm.sp-2] = nil + env.vm.sp-- + return 1 + }, "FN DATEADD TEMPORAL(SP-2), INTERVAL(SP-1)") + +} + +func (asm *assembler) Fn_REGEXP_LIKE(m *icuregex.Matcher, negate bool, c charset.Charset, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + m.Reset(charset.Expand(nil, input.bytes, c)) + + ok, err := m.Find() + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if negate { + ok = !ok + } + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalBool(ok) + env.vm.sp -= offset + return 1 + }, "FN REGEXP_LIKE VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_LIKE_slow(negate bool, c colldata.Charset, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + var err error + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + + if offset > 1 { + fe := env.vm.stack[env.vm.sp-offset+1] + flags, err = regexpFlags(fe, flags, "regexp_like") + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(charset.Expand(nil, input.bytes, c)) + + ok, err := m.Find() + if err != nil { + env.vm.err = err + env.vm.sp-- + return 1 + } + if negate { + ok = !ok + } + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalBool(ok) + env.vm.sp -= offset + return 1 + }, "FN REGEXP_LIKE_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_INSTR(m *icuregex.Matcher, c charset.Charset, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + runes := charset.Expand(nil, input.bytes, c) + + if len(runes) == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= offset + return 1 + } + + pos := int64(1) + if offset > 1 { + pos, env.vm.err = positionInstr(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), int64(len(runes))) + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + returnOpt := int64(0) + if offset > 3 { + returnOpt, env.vm.err = returnOption(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + if !found { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + } else if returnOpt == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.Start()) + pos) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.End()) + pos) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_INSTR VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_INSTR_slow(c colldata.Charset, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + + if offset > 4 { + fe := env.vm.stack[env.vm.sp-offset+4] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + runes := charset.Expand(nil, input.bytes, c) + if len(runes) == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= offset + return 1 + } + + pos := int64(1) + if offset > 1 { + pos, env.vm.err = positionInstr(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), int64(len(runes))) + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + returnOpt := int64(0) + if offset > 3 { + returnOpt, env.vm.err = returnOption(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), "regexp_instr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + m := icuregex.NewMatcher(p) + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + if !found { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(0) + } else if returnOpt == 0 { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.Start()) + pos) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalInt64(int64(m.End()) + pos) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_INSTR_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_SUBSTR(m *icuregex.Matcher, merged collations.TypedCollation, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + c := colldata.Lookup(merged.Collation).Charset() + runes := charset.Expand(nil, input.bytes, c) + + pos := int64(1) + if offset > 1 { + limit := int64(len(runes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), limit, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = nil + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + + if !found { + env.vm.stack[env.vm.sp-offset-1] = nil + } else { + out := runes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, c) + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalText(b, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_SUBSTR VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_SUBSTR_slow(merged collations.TypedCollation, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + c := colldata.Lookup(merged.Collation).Charset() + runes := charset.Expand(nil, input.bytes, c) + + pos := int64(1) + if offset > 1 { + limit := int64(len(runes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+1].(*evalInt64), limit, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = nil + env.vm.sp -= offset + return 1 + } + } + + occ := int64(1) + if offset > 2 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), occ) + } + + if offset > 3 { + fe := env.vm.stack[env.vm.sp-offset+3] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_substr") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(runes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, env.vm.err = m.Find() + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if !found { + break + } + } + + if !found { + env.vm.stack[env.vm.sp-offset-1] = nil + } else { + out := runes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, c) + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalText(b, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_SUBSTR_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_REPLACE(m *icuregex.Matcher, merged collations.TypedCollation, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + repl := env.vm.stack[env.vm.sp-offset+1].(*evalBytes) + + c := colldata.Lookup(merged.Collation).Charset() + inputRunes := charset.Expand(nil, input.bytes, c) + replRunes := charset.Expand(nil, repl.bytes, c) + + pos := int64(1) + if offset > 2 { + limit := int64(len(inputRunes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), limit, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + env.vm.sp -= offset + return 1 + } + } + + occ := int64(0) + if offset > 3 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), occ) + } + + m.Reset(inputRunes[pos-1:]) + + cs := colldata.Lookup(merged.Collation).Charset() + b, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, cs) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if !replaced { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(b, sqltypes.Text, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_REPLACE VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_REGEXP_REPLACE_slow(merged collations.TypedCollation, flags icuregex.RegexpFlag, offset int) { + asm.adjustStack(-offset) + asm.emit(func(env *ExpressionEnv) int { + input := env.vm.stack[env.vm.sp-offset-1].(*evalBytes) + pattern := env.vm.stack[env.vm.sp-offset].(*evalBytes) + repl := env.vm.stack[env.vm.sp-offset+1].(*evalBytes) + + c := colldata.Lookup(merged.Collation).Charset() + inputRunes := charset.Expand(nil, input.bytes, c) + replRunes := charset.Expand(nil, repl.bytes, c) + + pos := int64(1) + if offset > 2 { + limit := int64(len(inputRunes)) + pos, env.vm.err = position(env.vm.stack[env.vm.sp-offset+2].(*evalInt64), limit, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + if pos-1 == limit { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + env.vm.sp -= offset + return 1 + } + } + + occ := int64(0) + if offset > 3 { + occ = occurrence(env.vm.stack[env.vm.sp-offset+3].(*evalInt64), 0) + } + + if offset > 4 { + fe := env.vm.stack[env.vm.sp-offset+4] + flags, env.vm.err = regexpFlags(fe, flags, "regexp_replace") + if env.vm.err != nil { + env.vm.sp -= offset + return 1 + } + } + + p, err := compileRegex(pattern, c, flags) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + + m := icuregex.NewMatcher(p) + m.Reset(inputRunes[pos-1:]) + + b, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, colldata.Lookup(merged.Collation).Charset()) + if err != nil { + env.vm.err = err + env.vm.sp -= offset + return 1 + } + if !replaced { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(input.bytes, sqltypes.Text, resultCollation(merged)) + } else { + env.vm.stack[env.vm.sp-offset-1] = env.vm.arena.newEvalRaw(b, sqltypes.Text, resultCollation(merged)) + } + env.vm.sp -= offset + return 1 + }, "FN REGEXP_REPLACE_SLOW VARCHAR(SP-2), VARCHAR(SP-1)") +} + +func (asm *assembler) Introduce(offset int, t sqltypes.Type, col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) + arg.tt = int16(t) + arg.col = col + env.vm.stack[env.vm.sp-offset] = arg + return 1 + }, "INTRODUCE (SP-1)") +} diff --git a/go/vt/vtgate/evalengine/compiler_asm_push.go b/go/vt/vtgate/evalengine/compiler_asm_push.go new file mode 100644 index 00000000000..17537973215 --- /dev/null +++ b/go/vt/vtgate/evalengine/compiler_asm_push.go @@ -0,0 +1,341 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/json" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func push_i(env *ExpressionEnv, raw []byte) int { + var ival int64 + ival, env.vm.err = fastparse.ParseInt64(hack.String(raw), 10) + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalInt64(ival) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_i(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_i(env, env.Row[offset].Raw()) + }, "PUSH INT64(:%d)", offset) +} + +func (asm *assembler) PushBVar_i(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_i(env, bvar.Value) + }, "PUSH INT64(:%q)", key) +} + +func push_bin(env *ExpressionEnv, raw []byte) int { + env.vm.stack[env.vm.sp] = newEvalBinary(raw) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_bin(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_bin(env, env.Row[offset].Raw()) + }, "PUSH VARBINARY(:%d)", offset) +} + +func (asm *assembler) PushBVar_bin(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_bin(env, bvar.Value) + }, "PUSH VARBINARY(:%q)", key) +} + +func push_d(env *ExpressionEnv, raw []byte) int { + var dec decimal.Decimal + dec, env.vm.err = decimal.NewFromMySQL(raw) + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDecimal(dec, 0, 0) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_d(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_d(env, env.Row[offset].Raw()) + }, "PUSH DECIMAL(:%d)", offset) +} + +func (asm *assembler) PushBVar_d(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_d(env, bvar.Value) + }, "PUSH DECIMAL(:%q)", key) +} + +func push_f(env *ExpressionEnv, raw []byte) int { + var fval float64 + fval, env.vm.err = fastparse.ParseFloat64(hack.String(raw)) + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalFloat(fval) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_f(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_f(env, env.Row[offset].Raw()) + }, "PUSH FLOAT64(:%d)", offset) +} + +func (asm *assembler) PushBVar_f(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_f(env, bvar.Value) + }, "PUSH FLOAT64(:%q)", key) +} + +func push_hexnum(env *ExpressionEnv, raw []byte) int { + raw, env.vm.err = parseHexNumber(raw) + env.vm.stack[env.vm.sp] = newEvalBytesHex(raw) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_hexnum(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_hexnum(env, env.Row[offset].Raw()) + }, "PUSH HEXNUM(:%d)", offset) +} + +func (asm *assembler) PushBVar_hexnum(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_hexnum(env, bvar.Value) + }, "PUSH HEXNUM(:%q)", key) +} + +func push_hexval(env *ExpressionEnv, raw []byte) int { + raw, env.vm.err = parseHexLiteral(raw[2 : len(raw)-1]) + env.vm.stack[env.vm.sp] = newEvalBytesHex(raw) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_hexval(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_hexval(env, env.Row[offset].Raw()) + }, "PUSH HEXVAL(:%d)", offset) +} + +func (asm *assembler) PushBVar_hexval(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_hexval(env, bvar.Value) + }, "PUSH HEXVAL(:%q)", key) +} + +func push_json(env *ExpressionEnv, raw []byte) int { + var parser json.Parser + env.vm.stack[env.vm.sp], env.vm.err = parser.ParseBytes(raw) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_json(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_json(env, env.Row[offset].Raw()) + }, "PUSH JSON(:%d)", offset) +} + +func (asm *assembler) PushBVar_json(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_json(env, bvar.Value) + }, "PUSH JSON(:%q)", key) +} + +func push_text(env *ExpressionEnv, raw []byte, col collations.TypedCollation) int { + env.vm.stack[env.vm.sp] = newEvalText(raw, col) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_text(offset int, col collations.TypedCollation) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_text(env, env.Row[offset].Raw(), col) + }, "PUSH VARCHAR(:%d) COLLATE %d", offset, col.Collation) +} + +func (asm *assembler) PushBVar_text(key string, col collations.TypedCollation) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_text(env, bvar.Value, col) + }, "PUSH VARCHAR(:%q)", key) +} + +func push_u(env *ExpressionEnv, raw []byte) int { + var uval uint64 + uval, env.vm.err = fastparse.ParseUint64(hack.String(raw), 10) + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalUint64(uval) + env.vm.sp++ + return 1 +} + +func (asm *assembler) PushColumn_u(offset int) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + return push_u(env, env.Row[offset].Raw()) + }, "PUSH UINT64(:%d)", offset) +} + +func (asm *assembler) PushBVar_u(key string) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + var bvar *querypb.BindVariable + bvar, env.vm.err = env.lookupBindVar(key) + if env.vm.err != nil { + return 0 + } + return push_u(env, bvar.Value) + }, "PUSH UINT64(:%q)", key) +} + +func (asm *assembler) PushLiteral(lit eval) error { + asm.adjustStack(1) + + switch lit := lit.(type) { + case *evalInt64: + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalInt64(lit.i) + env.vm.sp++ + return 1 + }, "PUSH INT64(%s)", lit.ToRawBytes()) + case *evalUint64: + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalUint64(lit.u) + env.vm.sp++ + return 1 + }, "PUSH UINT64(%s)", lit.ToRawBytes()) + case *evalFloat: + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalFloat(lit.f) + env.vm.sp++ + return 1 + }, "PUSH FLOAT64(%s)", lit.ToRawBytes()) + case *evalDecimal: + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDecimalWithPrec(lit.dec, lit.length) + env.vm.sp++ + return 1 + }, "PUSH DECIMAL(%s)", lit.ToRawBytes()) + case *evalBytes: + asm.emit(func(env *ExpressionEnv) int { + b := env.vm.arena.newEvalBytesEmpty() + *b = *lit + env.vm.stack[env.vm.sp] = b + env.vm.sp++ + return 1 + }, "PUSH VARCHAR(%q)", lit.ToRawBytes()) + case *evalTemporal: + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newTemporal(lit.t, lit.dt, lit.prec) + env.vm.sp++ + return 1 + }, "PUSH TIME|DATETIME|DATE(%q)", lit.ToRawBytes()) + default: + return vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "unsupported literal kind '%T'", lit) + } + + return nil +} + +func (asm *assembler) PushNull() { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = nil + env.vm.sp++ + return 1 + }, "PUSH NULL") +} diff --git a/go/vt/vtgate/evalengine/compiler_fn.go b/go/vt/vtgate/evalengine/compiler_fn.go new file mode 100644 index 00000000000..b17fee7fedc --- /dev/null +++ b/go/vt/vtgate/evalengine/compiler_fn.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "vitess.io/vitess/go/sqltypes" +) + +func (c *compiler) compileFn_rounding(arg0 Expr, asm_ins_f, asm_ins_d func()) (ctype, error) { + arg, err := arg0.compile(c) + if err != nil { + return ctype{}, err + } + + if arg.Type == sqltypes.Int64 || arg.Type == sqltypes.Uint64 { + // No-op for integers. + return arg, nil + } + + skip := c.compileNullCheck1(arg) + + convt := ctype{Type: arg.Type, Col: collationNumeric, Flag: arg.Flag} + switch arg.Type { + case sqltypes.Float64: + asm_ins_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + convt.Type = sqltypes.Int64 + asm_ins_d() + default: + convt.Type = sqltypes.Float64 + c.asm.Convert_xf(1) + asm_ins_f() + } + + c.asm.jumpDestination(skip) + return convt, nil +} + +func (c *compiler) compileFn_math1(arg0 Expr, asm_ins func(), nullable typeFlag) (ctype, error) { + arg, err := arg0.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + c.compileToFloat(arg, 1) + asm_ins() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg.Flag | nullable}, nil +} + +func (c *compiler) compileFn_length(arg Expr, asm_ins func()) (ctype, error) { + str, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xc(1, sqltypes.VarChar, c.cfg.Collation, 0, false) + } + + asm_ins() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil +} diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go new file mode 100644 index 00000000000..e1e905a6efa --- /dev/null +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -0,0 +1,589 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine_test + +import ( + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/olekukonko/tablewriter" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" +) + +func makeFields(values []sqltypes.Value) (fields []*querypb.Field) { + for i, v := range values { + field := &querypb.Field{ + Name: fmt.Sprintf("column%d", i), + Type: v.Type(), + } + if sqltypes.IsText(field.Type) { + field.Charset = uint32(collations.CollationUtf8mb4ID) + } else { + field.Charset = uint32(collations.CollationBinaryID) + } + fields = append(fields, field) + } + return +} + +type Tracker struct { + buf strings.Builder + tbl *tablewriter.Table + supported, total int +} + +func NewTracker() *Tracker { + track := &Tracker{} + track.tbl = tablewriter.NewWriter(&track.buf) + return track +} + +func (s *Tracker) Add(name string, supported, total int) { + s.tbl.Append([]string{ + name, + strconv.Itoa(supported), + strconv.Itoa(total), + fmt.Sprintf("%.02f%%", 100*float64(supported)/float64(total)), + }) + s.supported += supported + s.total += total +} + +func (s *Tracker) String() string { + s.tbl.SetBorder(false) + s.tbl.SetColumnAlignment([]int{ + tablewriter.ALIGN_LEFT, + tablewriter.ALIGN_RIGHT, + tablewriter.ALIGN_RIGHT, + tablewriter.ALIGN_RIGHT, + }) + s.tbl.SetFooterAlignment(tablewriter.ALIGN_RIGHT) + s.tbl.SetFooter([]string{ + "", + strconv.Itoa(s.supported), + strconv.Itoa(s.total), + fmt.Sprintf("%.02f%%", 100*float64(s.supported)/float64(s.total)), + }) + s.tbl.Render() + return s.buf.String() +} + +func TestCompilerReference(t *testing.T) { + now := time.Now() + evalengine.SystemTime = func() time.Time { return now } + defer func() { evalengine.SystemTime = time.Now }() + + track := NewTracker() + + for _, tc := range testcases.Cases { + t.Run(tc.Name(), func(t *testing.T) { + var supported, total int + env := evalengine.EmptyExpressionEnv() + + tc.Run(func(query string, row []sqltypes.Value) { + env.Row = row + + stmt, err := sqlparser.ParseExpr(query) + if err != nil { + // no need to test un-parseable queries + return + } + + fields := evalengine.FieldResolver(tc.Schema) + cfg := &evalengine.Config{ + ResolveColumn: fields.Column, + ResolveType: fields.Type, + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelCompilerDebug, + } + + converted, err := evalengine.Translate(stmt, cfg) + if err != nil { + return + } + + expected, evalErr := env.Evaluate(evalengine.Deoptimize(converted)) + total++ + + if cfg.CompilerErr != nil { + switch { + case vterrors.Code(cfg.CompilerErr) == vtrpcpb.Code_UNIMPLEMENTED: + t.Logf("unsupported: %s", query) + case evalErr == nil: + t.Errorf("failed compilation:\nSQL: %s\nError: %s", query, cfg.CompilerErr) + case evalErr.Error() != cfg.CompilerErr.Error(): + t.Errorf("error mismatch:\nSQL: %s\nError eval: %s\nError comp: %s", query, evalErr, cfg.CompilerErr) + default: + supported++ + } + return + } + + res, vmErr := func() (res evalengine.EvalResult, err error) { + res, err = env.EvaluateVM(converted.(*evalengine.CompiledExpr)) + return + }() + + if vmErr != nil { + switch { + case evalErr == nil: + t.Errorf("failed evaluation from compiler:\nSQL: %s\nError: %s", query, vmErr) + case evalErr.Error() != vmErr.Error(): + t.Errorf("error mismatch:\nSQL: %s\nError eval: %s\nError comp: %s", query, evalErr, vmErr) + default: + supported++ + } + return + } + + eval := expected.String() + comp := res.String() + + if eval != comp { + t.Errorf("bad evaluation from compiler:\nSQL: %s\nEval: %s\nComp: %s", query, eval, comp) + return + } + + supported++ + }) + + track.Add(tc.Name(), supported, total) + }) + } + + t.Logf("\n%s", track.String()) +} + +func TestCompilerSingle(t *testing.T) { + var testCases = []struct { + expression string + values []sqltypes.Value + result string + }{ + { + expression: "1 + column0", + values: []sqltypes.Value{sqltypes.NewInt64(1)}, + result: "INT64(2)", + }, + { + expression: "1 + column0", + values: []sqltypes.Value{sqltypes.NewFloat64(1)}, + result: "FLOAT64(2)", + }, + { + expression: "1.0e0 - column0", + values: []sqltypes.Value{sqltypes.NewFloat64(1)}, + result: "FLOAT64(0)", + }, + { + expression: "128 - column0", + values: []sqltypes.Value{sqltypes.NewFloat64(1)}, + result: "FLOAT64(127)", + }, + { + expression: "(128 - column0) * 3", + values: []sqltypes.Value{sqltypes.NewFloat64(1)}, + result: "FLOAT64(381)", + }, + { + expression: "1.0e0 < column0", + values: []sqltypes.Value{sqltypes.NewFloat64(2)}, + result: "INT64(1)", + }, + { + expression: "1.0e0 < column0", + values: []sqltypes.Value{sqltypes.NewFloat64(-1)}, + result: "INT64(0)", + }, + { + expression: `'foo' = 'FOO' collate utf8mb4_0900_as_cs`, + result: "INT64(0)", + }, + { + expression: `'foo' < 'bar'`, + result: "INT64(0)", + }, + { + expression: `case when false then 0 else 18446744073709551615 end`, + result: `DECIMAL(18446744073709551615)`, + }, + { + expression: `case when true then _binary "foobar" else 'foo' collate utf8mb4_0900_as_cs end`, + result: `VARCHAR("foobar")`, + }, + { + expression: `- 18446744073709551615`, + result: `DECIMAL(-18446744073709551615)`, + }, + { + expression: `CAST(CAST(true AS JSON) AS BINARY)`, + result: `BLOB("true")`, + }, + { + expression: `JSON_ARRAY(true, 1.0)`, + result: `JSON("[true, 1.0]")`, + }, + { + expression: `cast(true as json) + 0`, + result: `FLOAT64(1)`, + }, + { + expression: `CAST(CAST(0 AS JSON) AS CHAR(16))`, + result: `VARCHAR("0")`, + }, + { + expression: `1 OR cast('invalid' as json)`, + result: `INT64(1)`, + }, + { + expression: `NULL AND 1`, + result: `NULL`, + }, + { + expression: `CONV(-1.5e0, 1.5e0, 1.5e0)`, + result: `VARCHAR("1111111111111111111111111111111111111111111111111111111111111111")`, + }, + { + expression: `CONV(9223372036854775810.4, 13, 7)`, + result: `VARCHAR("45012021522523134134601")`, + }, + { + expression: `CONV(-9223372036854775809, 13e0, 13e0)`, + result: `VARCHAR("0")`, + }, + { + expression: `0 + time '10:04:58'`, + result: `INT64(100458)`, + }, + { + expression: `0 + time '101:34:58'`, + result: `INT64(1013458)`, + }, + { + expression: `time '10:04:58' < '101:34:58'`, + result: `INT64(1)`, + }, + { + expression: `1.7 / 173458`, + result: `DECIMAL(0.00001)`, + }, + { + expression: `cast(time '5 12:34:58' as json)`, + result: `JSON("\"04:34:58.000000\"")`, + }, + { + expression: `CAST(20000229235959.999950 AS DATETIME(4))`, + result: `DATETIME("2000-03-01 00:00:00.0000")`, + }, + { + expression: `CAST(1.5678 AS TIME(2))`, + result: `TIME("00:00:01.57")`, + }, + { + expression: `CAST(235959.995 AS TIME(2))`, + result: `TIME("24:00:00.00")`, + }, + { + expression: `CAST(-235959.995 AS TIME(2))`, + result: `TIME("-24:00:00.00")`, + }, + { + expression: `WEEK('2000-01-02', 6)`, + result: `INT64(1)`, + }, + { + expression: `WEEK(date '2000-01-01', 4)`, + result: `INT64(0)`, + }, + { + // This is the day of DST change in Europe/Amsterdam when + // the year started on a Wednesday. Regression test for + // using 24 hour time diffing instead of days. + expression: `WEEK(date '2014-10-26', 6)`, + result: `INT64(44)`, + }, + { + expression: `MAKEDATE(cast('invalid' as json), NULL)`, + result: `NULL`, + }, + { + expression: `MAKETIME(NULL, '', cast('invalid' as json))`, + result: `NULL`, + }, + { + expression: `1 = ' 1 '`, + result: `INT64(1)`, + }, + { + expression: `CAST(' 0 ' AS TIME)`, + result: `TIME("00:00:00")`, + }, + { + expression: `CAST('0' AS TIME)`, + result: `TIME("00:00:00")`, + }, + { + expression: `timestamp '2000-01-01 10:34:58.978654' DIV '\t1 foo\t'`, + result: `INT64(20000101103458)`, + }, + { + expression: `UNHEX('f')`, + result: `VARBINARY("\x0f")`, + }, + { + expression: `STRCMP(1234, '12_4')`, + result: `INT64(-1)`, + }, + { + expression: `INTERVAL(0, 0, 0, 0)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, 0, 1, 0)`, + result: `INT64(1)`, + }, + { + expression: `INTERVAL(0, 1, 0, 0)`, + result: `INT64(0)`, + }, + { + expression: `INTERVAL(0, -1, 0, 0)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, 1, 1, 1)`, + result: `INT64(0)`, + }, + { + expression: `INTERVAL(0, -1, -1, -1)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, 0, 0, 1)`, + result: `INT64(2)`, + }, + { + expression: `INTERVAL(0, 0, 0, -1)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, NULL, 0, 0)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(NULL, 0, 0, 0)`, + result: `INT64(-1)`, + }, + { + expression: `INTERVAL(0, 0, 0, NULL)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, 0, 0, NULL, 1, 1)`, + result: `INT64(3)`, + }, + { + expression: `INTERVAL(0, 0, 2, NULL, 1, 1)`, + result: `INT64(1)`, + }, + { + expression: `INTERVAL(0, 2, -1, NULL, -1, 1)`, + result: `INT64(0)`, + }, + { + expression: `INTERVAL(0, 2, NULL, NULL, -1, 1)`, + result: `INT64(0)`, + }, + { + expression: `INTERVAL(0, NULL, NULL, NULL, -1, 1)`, + result: `INT64(4)`, + }, + { + expression: `INTERVAL(0, 0, 0, -1, NULL, 1)`, + result: `INT64(4)`, + }, + { + expression: `INTERVAL(0, 0, 0, -1, NULL, NULL, 1)`, + result: `INT64(5)`, + }, + { + expression: `REGEXP_REPLACE(1234, 12, 6, 1)`, + result: `TEXT("634")`, + }, + { + expression: `_latin1 0xFF`, + result: `VARCHAR("ÿ")`, + }, + { + expression: `TRIM(_latin1 0xA078A0 FROM _utf8mb4 0xC2A078C2A0)`, + result: `VARCHAR("")`, + }, + { + expression: `CONCAT_WS("😊😂🤢", date '2000-01-01', _latin1 0xFF)`, + result: `VARCHAR("2000-01-01😊😂🤢ÿ")`, + }, + { + expression: `concat('test', _latin1 0xff)`, + result: `VARCHAR("testÿ")`, + }, + { + expression: `WEIGHT_STRING('foobar' as char(3))`, + result: `VARBINARY("\x1c\xe5\x1d\xdd\x1d\xdd")`, + }, + } + + for _, tc := range testCases { + t.Run(tc.expression, func(t *testing.T) { + expr, err := sqlparser.ParseExpr(tc.expression) + if err != nil { + t.Fatal(err) + } + + fields := evalengine.FieldResolver(makeFields(tc.values)) + cfg := &evalengine.Config{ + ResolveColumn: fields.Column, + ResolveType: fields.Type, + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelCompilerDebug, + } + + converted, err := evalengine.Translate(expr, cfg) + if err != nil { + t.Fatal(err) + } + + env := evalengine.EmptyExpressionEnv() + env.Row = tc.values + + expected, err := env.Evaluate(evalengine.Deoptimize(converted)) + if err != nil { + t.Fatal(err) + } + if expected.String() != tc.result { + t.Fatalf("bad evaluation from eval engine: got %s, want %s", expected.String(), tc.result) + } + + if cfg.CompilerErr != nil { + t.Fatalf("bad compilation: %v", cfg.CompilerErr) + } + + // re-run the same evaluation multiple times to ensure results are always consistent + for i := 0; i < 8; i++ { + res, err := env.EvaluateVM(converted.(*evalengine.CompiledExpr)) + if err != nil { + t.Fatal(err) + } + + if res.String() != tc.result { + t.Errorf("bad evaluation from compiler: got %s, want %s (iteration %d)", res, tc.result, i) + } + } + }) + } +} + +func TestBindVarLiteral(t *testing.T) { + var testCases = []struct { + expression string + bindType func(expr sqlparser.Expr) + bindVar *querypb.BindVariable + result string + }{ + { + expression: `_latin1 :vtg1 /* HEXNUM */`, + bindType: func(expr sqlparser.Expr) { + expr.(*sqlparser.IntroducerExpr).Expr.(*sqlparser.Argument).Type = sqltypes.HexNum + }, + bindVar: sqltypes.HexNumBindVariable([]byte("0xFF")), + result: `VARCHAR("ÿ")`, + }, + { + expression: `cast(:vtg1 /* HEXVAL */ as char character set latin1)`, + bindType: func(expr sqlparser.Expr) { + expr.(*sqlparser.CastExpr).Expr.(*sqlparser.Argument).Type = sqltypes.HexVal + }, + bindVar: sqltypes.HexValBindVariable([]byte("0'FF'")), + result: `VARCHAR("ÿ")`, + }, + } + + for _, tc := range testCases { + t.Run(tc.expression, func(t *testing.T) { + expr, err := sqlparser.ParseExpr(tc.expression) + if err != nil { + t.Fatal(err) + } + + tc.bindType(expr) + + fields := evalengine.FieldResolver(makeFields(nil)) + cfg := &evalengine.Config{ + ResolveColumn: fields.Column, + ResolveType: fields.Type, + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelCompilerDebug, + } + + converted, err := evalengine.Translate(expr, cfg) + if err != nil { + t.Fatal(err) + } + + result := `VARCHAR("ÿ")` + + env := evalengine.EmptyExpressionEnv() + env.BindVars = map[string]*querypb.BindVariable{ + "vtg1": tc.bindVar, + } + + expected, err := env.Evaluate(evalengine.Deoptimize(converted)) + if err != nil { + t.Fatal(err) + } + if expected.String() != result { + t.Fatalf("bad evaluation from eval engine: got %s, want %s", expected.String(), result) + } + + if cfg.CompilerErr != nil { + t.Fatalf("bad compilation: %v", cfg.CompilerErr) + } + + // re-run the same evaluation multiple times to ensure results are always consistent + for i := 0; i < 8; i++ { + res, err := env.EvaluateVM(converted.(*evalengine.CompiledExpr)) + if err != nil { + t.Fatal(err) + } + + if res.String() != result { + t.Errorf("bad evaluation from compiler: got %s, want %s (iteration %d)", res, result, i) + } + } + }) + } +} diff --git a/go/vt/vtgate/evalengine/eval.go b/go/vt/vtgate/evalengine/eval.go index f5f42c30625..fbc3cbca57d 100644 --- a/go/vt/vtgate/evalengine/eval.go +++ b/go/vt/vtgate/evalengine/eval.go @@ -17,15 +17,18 @@ limitations under the License. package evalengine import ( - "fmt" "strconv" + "unicode/utf8" + "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" + "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/json" "vitess.io/vitess/go/vt/vthash" ) @@ -36,6 +39,8 @@ const ( flagNull typeFlag = 1 << 0 // flagNullable marks that this value CAN be null flagNullable typeFlag = 1 << 1 + // flagIsBoolean marks that this value should be interpreted as boolean + flagIsBoolean typeFlag = 1 << 2 // flagIntegerUdf marks that this value is math.MinInt64, and will underflow if negated flagIntegerUdf typeFlag = 1 << 5 @@ -52,10 +57,18 @@ const ( // flagExplicitCollation marks that this value has an explicit collation flagExplicitCollation typeFlag = 1 << 10 + // flagAmbiguousType marks that the type of this value depends on the value at runtime + // and cannot be computed accurately + flagAmbiguousType typeFlag = 1 << 11 + // flagIntegerRange are the flags that mark overflow/underflow in integers flagIntegerRange = flagIntegerOvf | flagIntegerCap | flagIntegerUdf ) +func (f typeFlag) Nullable() bool { + return f&flagNullable != 0 || f&flagNull != 0 +} + type eval interface { ToRawBytes() []byte SQLType() sqltypes.Type @@ -93,14 +106,25 @@ func evalToSQLValueWithType(e eval, resultType sqltypes.Type) sqltypes.Value { case *evalFloat: return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(e.f), 10)) } - case sqltypes.IsFloat(resultType) || resultType == sqltypes.Decimal: + case sqltypes.IsFloat(resultType): + switch e := e.(type) { + case *evalInt64: + return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, e.i, 10)) + case *evalUint64: + return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) + case *evalFloat: + return sqltypes.MakeTrusted(resultType, format.FormatFloat(e.f)) + case *evalDecimal: + return sqltypes.MakeTrusted(resultType, e.dec.FormatMySQL(e.length)) + } + case sqltypes.IsDecimal(resultType): switch e := e.(type) { case *evalInt64: return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, e.i, 10)) case *evalUint64: return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) case *evalFloat: - return sqltypes.MakeTrusted(resultType, FormatFloat(resultType, e.f)) + return sqltypes.MakeTrusted(resultType, hack.StringBytes(strconv.FormatFloat(e.f, 'f', -1, 64))) case *evalDecimal: return sqltypes.MakeTrusted(resultType, e.dec.FormatMySQL(e.length)) } @@ -124,7 +148,20 @@ func evalIsTruthy(e eval) boolean { case *evalDecimal: return makeboolean(!e.dec.IsZero()) case *evalBytes: - return makeboolean(parseStringToFloat(e.string()) != 0.0) + if e.isHexLiteral { + hex, ok := e.toNumericHex() + if !ok { + // overflow + return makeboolean(true) + } + return makeboolean(hex.u != 0) + } + f, _ := fastparse.ParseFloat64(e.string()) + return makeboolean(f != 0.0) + case *evalJSON: + return makeboolean(e.ToBoolean()) + case *evalTemporal: + return makeboolean(!e.isZero()) default: panic("unhandled case: evalIsTruthy") } @@ -153,22 +190,26 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID) (eval, error) { case sqltypes.Char, sqltypes.VarChar: panic("unreacheable") case sqltypes.Decimal: - return evalToNumeric(e).toDecimal(0, 0), nil + return evalToDecimal(e, 0, 0), nil case sqltypes.Float32, sqltypes.Float64: - f, _ := evalToNumeric(e).toFloat() + f, _ := evalToFloat(e) return f, nil case sqltypes.Int8, sqltypes.Int16, sqltypes.Int32, sqltypes.Int64: - return evalToNumeric(e).toInt64(), nil + return evalToInt64(e), nil case sqltypes.Uint8, sqltypes.Uint16, sqltypes.Uint32, sqltypes.Uint64: - return evalToNumeric(e).toUint64(), nil - case sqltypes.Date, sqltypes.Datetime, sqltypes.Year, sqltypes.TypeJSON, sqltypes.Time, sqltypes.Bit: - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", typ.String()) + return evalToInt64(e).toUint64(), nil + case sqltypes.Date: + return evalToDate(e), nil + case sqltypes.Datetime, sqltypes.Timestamp: + return evalToDateTime(e, -1), nil + case sqltypes.Time: + return evalToTime(e, -1), nil default: - panic(fmt.Sprintf("BUG: emitted unknown type: %s", typ)) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", typ.String()) } } -func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { +func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID) (eval, error) { switch { case typ == sqltypes.Null: return nil, nil @@ -185,9 +226,15 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { fval, err := v.ToFloat64() return newEvalFloat(fval), err case v.IsText() || v.IsBinary(): - return newEvalFloat(parseStringToFloat(v.RawStr())), nil + fval, _ := fastparse.ParseFloat64(v.RawStr()) + return newEvalFloat(fval), nil default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a float: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + f, _ := evalToFloat(e) + return f, nil } case sqltypes.IsDecimal(typ): @@ -206,10 +253,14 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { } dec = decimal.NewFromFloat(fval) case v.IsText() || v.IsBinary(): - fval := parseStringToFloat(v.RawStr()) + fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a decimal: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToDecimal(e, 0, 0), nil } return &evalDecimal{dec: dec, length: -dec.Exponent()}, nil @@ -221,8 +272,15 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { case v.IsUnsigned(): uval, err := v.ToUint64() return newEvalInt64(int64(uval)), err + case v.IsText() || v.IsBinary(): + i, err := fastparse.ParseInt64(v.RawStr(), 10) + return newEvalInt64(i), err default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a signed int: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToInt64(e), nil } case sqltypes.IsUnsigned(typ): @@ -233,18 +291,71 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type) (eval, error) { case v.IsUnsigned(): uval, err := v.ToUint64() return newEvalUint64(uval), err + case v.IsText() || v.IsBinary(): + u, err := fastparse.ParseUint64(v.RawStr(), 10) + return newEvalUint64(u), err default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a unsigned int: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + i := evalToInt64(e) + return newEvalUint64(uint64(i.i)), nil } case sqltypes.IsText(typ) || sqltypes.IsBinary(typ): switch { case v.IsText() || v.IsBinary(): - // TODO: collation - return newEvalRaw(v.Type(), v.Raw(), collationBinary), nil + return newEvalRaw(v.Type(), v.Raw(), defaultCoercionCollation(collation)), nil + case sqltypes.IsText(typ): + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToVarchar(e, collation, true) default: - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value to a text: %v", v) + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + return evalToBinary(e), nil + } + + case typ == sqltypes.TypeJSON: + return json.NewFromSQL(v) + case typ == sqltypes.Date: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + // Separate return here to avoid nil wrapped in interface type + d := evalToDate(e) + if d == nil { + return nil, nil + } + return d, nil + case typ == sqltypes.Datetime || typ == sqltypes.Timestamp: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err } + // Separate return here to avoid nil wrapped in interface type + dt := evalToDateTime(e, -1) + if dt == nil { + return nil, nil + } + return dt, nil + case typ == sqltypes.Time: + e, err := valueToEval(v, defaultCoercionCollation(collation)) + if err != nil { + return nil, err + } + // Separate return here to avoid nil wrapped in interface type + t := evalToTime(e, -1) + if t == nil { + return nil, nil + } + return t, nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value: %v", v) } @@ -311,15 +422,56 @@ func valueToEval(value sqltypes.Value, collation collations.TypedCollation) (eva } case sqltypes.IsBinary(tt): return newEvalBinary(value.Raw()), nil - case sqltypes.IsDate(tt): - return newEvalRaw(value.Type(), value.Raw(), collationNumeric), nil + case tt == sqltypes.Date: + return parseDate(value.Raw()) + case tt == sqltypes.Datetime || tt == sqltypes.Timestamp: + return parseDateTime(value.Raw()) + case tt == sqltypes.Time: + return parseTime(value.Raw()) case sqltypes.IsNull(tt): return nil, nil case tt == sqltypes.TypeJSON: var p json.Parser j, err := p.ParseBytes(value.Raw()) return j, wrap(err) + case fallbackBinary(tt): + return newEvalRaw(tt, value.Raw(), collation), nil default: return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Type is not supported: %q %s", value, value.Type()) } } + +const hexchars = "0123456789ABCDEF" + +func sanitizeErrorValue(s []byte) []byte { + var buf []byte + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeLastRune(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, hexchars[s[0]>>4]) + buf = append(buf, hexchars[s[0]&0xF]) + continue + } + + if strconv.IsPrint(r) { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + b := [utf8.UTFMax]byte{} + n := utf8.EncodeRune(b[:], r) + buf = append(buf, b[:n]...) + } + continue + } + + buf = append(buf, `\x`...) + buf = append(buf, hexchars[s[0]>>4]) + buf = append(buf, hexchars[s[0]&0xF]) + } + return buf +} diff --git a/go/vt/vtgate/evalengine/eval_bytes.go b/go/vt/vtgate/evalengine/eval_bytes.go index 8b000c6beae..455394e31e4 100644 --- a/go/vt/vtgate/evalengine/eval_bytes.go +++ b/go/vt/vtgate/evalengine/eval_bytes.go @@ -18,15 +18,13 @@ package evalengine import ( "encoding/binary" - "time" "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vthash" ) @@ -49,6 +47,13 @@ func newEvalBytesHex(raw []byte) eval { return &evalBytes{tt: int16(sqltypes.VarBinary), isHexLiteral: true, col: collationBinary, bytes: raw} } +// newEvalBytesBit creates a new evalBytes for a bit literal. +// Turns out that a bit literal is not actually typed with +// sqltypes.Bit, but with sqltypes.VarBinary. +func newEvalBytesBit(raw []byte) eval { + return &evalBytes{tt: int16(sqltypes.VarBinary), isBitLiteral: true, col: collationBinary, bytes: raw} +} + func newEvalBinary(raw []byte) *evalBytes { return newEvalRaw(sqltypes.VarBinary, raw, collationBinary) } @@ -78,8 +83,8 @@ func evalToVarchar(e eval, col collations.ID, convert bool) (*evalBytes, error) typedcol.Collation = col if col != collations.CollationBinaryID { - fromCollation := b.col.Collation.Get() - toCollation := col.Get() + fromCollation := colldata.Lookup(b.col.Collation) + toCollation := colldata.Lookup(col) var err error bytes, err = charset.Convert(nil, toCollation.Charset(), bytes, fromCollation.Charset()) @@ -100,25 +105,18 @@ func evalToVarchar(e eval, col collations.ID, convert bool) (*evalBytes, error) func (e *evalBytes) Hash(h *vthash.Hasher) { switch tt := e.SQLType(); { - case sqltypes.IsDate(tt): - t, err := e.parseDate() - if err != nil { - panic("parseDate() in evalBytes should never fail") - } - h.Write16(hashPrefixDate) - h.Write64(uint64(t.UnixNano())) case tt == sqltypes.VarBinary: h.Write16(hashPrefixBytes) _, _ = h.Write(e.bytes) default: h.Write16(hashPrefixBytes) - col := e.col.Collation.Get() + col := colldata.Lookup(e.col.Collation) col.Hash(h, e.bytes, 0) } } func (e *evalBytes) isBinary() bool { - return e.SQLType() == sqltypes.VarBinary + return e.SQLType() == sqltypes.VarBinary || e.SQLType() == sqltypes.Binary || e.SQLType() == sqltypes.Blob } func (e *evalBytes) isHexOrBitLiteral() bool { @@ -156,31 +154,32 @@ func (e *evalBytes) truncateInPlace(size int) { e.bytes = e.bytes[:size] } case sqltypes.IsText(tt): - collation := e.col.Collation.Get() + collation := colldata.Lookup(e.col.Collation) e.bytes = charset.Slice(collation.Charset(), e.bytes, 0, size) default: panic("called EvalResult.truncate on non-quoted") } } -func (e *evalBytes) parseDate() (t time.Time, err error) { - switch e.SQLType() { - case sqltypes.Date: - t, err = sqlparser.ParseDate(e.string()) - case sqltypes.Timestamp, sqltypes.Datetime: - t, err = sqlparser.ParseDateTime(e.string()) - case sqltypes.Time: - t, err = sqlparser.ParseTime(e.string()) - default: - err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "type %v is not date-like", e.SQLType()) +func (e *evalBytes) toDateBestEffort() datetime.DateTime { + if t, _, _ := datetime.ParseDateTime(e.string(), -1); !t.IsZero() { + return t } - return + if t, _ := datetime.ParseDate(e.string()); !t.IsZero() { + return datetime.DateTime{Date: t} + } + return datetime.DateTime{} } func (e *evalBytes) toNumericHex() (*evalUint64, bool) { raw := e.bytes - if len(raw) > 8 { - return nil, false // overflow + if l := len(raw); l > 8 { + for _, b := range raw[:l-8] { + if b != 0 { + return nil, false // overflow + } + } + raw = raw[l-8:] } var number [8]byte diff --git a/go/vt/vtgate/evalengine/eval_json.go b/go/vt/vtgate/evalengine/eval_json.go index 8275f241c70..8b19a27f92b 100644 --- a/go/vt/vtgate/evalengine/eval_json.go +++ b/go/vt/vtgate/evalengine/eval_json.go @@ -18,15 +18,16 @@ package evalengine import ( "bytes" - "encoding/base64" "errors" "fmt" + "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/json" ) type errJSONType string @@ -65,12 +66,10 @@ func intoJSONPath(e eval) (*json.Path, error) { } func evalConvert_bj(e *evalBytes) *evalJSON { - const prefix = "base64:type15:" - - dst := make([]byte, len(prefix)+mysqlBase64.EncodedLen(len(e.bytes))) - copy(dst, prefix) - base64.StdEncoding.Encode(dst[len(prefix):], e.bytes) - return json.NewString(dst) + if e.tt == int16(sqltypes.Bit) { + return json.NewBit(e.string()) + } + return json.NewBlob(e.string()) } func evalConvert_fj(e *evalFloat) *evalJSON { @@ -78,7 +77,7 @@ func evalConvert_fj(e *evalFloat) *evalJSON { if bytes.IndexByte(f, '.') < 0 { f = append(f, '.', '0') } - return json.NewNumber(f) + return json.NewNumber(hack.String(f), json.NumberTypeFloat) } func evalConvert_nj(e evalNumeric) *evalJSON { @@ -88,15 +87,32 @@ func evalConvert_nj(e evalNumeric) *evalJSON { if e == evalBoolFalse { return json.ValueFalse } - return json.NewNumber(e.ToRawBytes()) + switch e := e.(type) { + case *evalInt64: + return json.NewNumber(hack.String(e.ToRawBytes()), json.NumberTypeSigned) + case *evalUint64: + return json.NewNumber(hack.String(e.ToRawBytes()), json.NumberTypeUnsigned) + case *evalDecimal: + return json.NewNumber(hack.String(e.ToRawBytes()), json.NumberTypeDecimal) + } + panic("unreachable") } func evalConvert_cj(e *evalBytes) (*evalJSON, error) { - jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, e.col.Collation.Get().Charset()) + jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, colldata.Lookup(e.col.Collation).Charset()) + if err != nil { + return nil, err + } + var p json.Parser + return p.ParseBytes(jsonText) +} + +func evalConvertArg_cj(e *evalBytes) (*evalJSON, error) { + jsonText, err := charset.Convert(nil, charset.Charset_utf8mb4{}, e.bytes, colldata.Lookup(e.col.Collation).Charset()) if err != nil { return nil, err } - return json.NewString(jsonText), nil + return json.NewString(string(jsonText)), nil } func evalToJSON(e eval) (*evalJSON, error) { @@ -114,6 +130,30 @@ func evalToJSON(e eval) (*evalJSON, error) { return evalConvert_bj(e), nil } return evalConvert_cj(e) + case *evalTemporal: + return e.toJSON(), nil + default: + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s AS JSON", e.SQLType()) + } +} + +func argToJSON(e eval) (*evalJSON, error) { + switch e := e.(type) { + case nil: + return json.ValueNull, nil + case *evalJSON: + return e, nil + case *evalFloat: + return evalConvert_fj(e), nil + case evalNumeric: + return evalConvert_nj(e), nil + case *evalBytes: + if sqltypes.IsBinary(e.SQLType()) { + return evalConvert_bj(e), nil + } + return evalConvertArg_cj(e) + case *evalTemporal: + return e.toJSON(), nil default: return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s AS JSON", e.SQLType()) } diff --git a/go/vt/vtgate/evalengine/eval_numeric.go b/go/vt/vtgate/evalengine/eval_numeric.go index 4d4dc3ef133..7ff4d230ff5 100644 --- a/go/vt/vtgate/evalengine/eval_numeric.go +++ b/go/vt/vtgate/evalengine/eval_numeric.go @@ -17,12 +17,15 @@ limitations under the License. package evalengine import ( + "fmt" "math" "strconv" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" + "vitess.io/vitess/go/mysql/format" + "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/json" "vitess.io/vitess/go/vt/vthash" ) @@ -94,7 +97,7 @@ func newEvalBool(b bool) *evalInt64 { return evalBoolFalse } -func evalToNumeric(e eval) evalNumeric { +func evalToNumeric(e eval, preciseDatetime bool) evalNumeric { switch e := e.(type) { case evalNumeric: return e @@ -107,23 +110,192 @@ func evalToNumeric(e eval) evalNumeric { } return hex } - return &evalFloat{f: parseStringToFloat(e.string())} + f, _ := fastparse.ParseFloat64(e.string()) + return &evalFloat{f: f} case *evalJSON: switch e.Type() { - case json.TypeTrue: - return newEvalBool(true) - case json.TypeFalse: - return newEvalBool(false) - case json.TypeNumber, json.TypeString: - return &evalFloat{f: parseStringToFloat(e.Raw())} + case json.TypeBoolean: + if e == json.ValueTrue { + return &evalFloat{f: 1.0} + } + return &evalFloat{f: 0.0} + case json.TypeNumber: + f, _ := e.Float64() + return &evalFloat{f: f} + case json.TypeString: + f, _ := fastparse.ParseFloat64(e.Raw()) + return &evalFloat{f: f} default: return &evalFloat{f: 0} } + case *evalTemporal: + if preciseDatetime { + if e.prec == 0 { + return newEvalInt64(e.toInt64()) + } + return newEvalDecimalWithPrec(e.toDecimal(), int32(e.prec)) + } + return &evalFloat{f: e.toFloat()} default: panic("unsupported") } } +func evalToFloat(e eval) (*evalFloat, bool) { + switch e := e.(type) { + case *evalFloat: + return e, true + case evalNumeric: + return e.toFloat() + case *evalBytes: + if e.isHexLiteral { + hex, ok := e.toNumericHex() + if !ok { + // overflow + return newEvalFloat(0), false + } + f, ok := hex.toFloat() + if !ok { + return newEvalFloat(0), false + } + return f, true + } + val, err := fastparse.ParseFloat64(e.string()) + return &evalFloat{f: val}, err == nil + case *evalJSON: + switch e.Type() { + case json.TypeBoolean: + if e == json.ValueTrue { + return &evalFloat{f: 1.0}, true + } + return &evalFloat{f: 0.0}, true + case json.TypeNumber: + f, ok := e.Float64() + return &evalFloat{f: f}, ok + case json.TypeString: + val, err := fastparse.ParseFloat64(e.Raw()) + return &evalFloat{f: val}, err == nil + default: + return &evalFloat{f: 0}, true + } + case *evalTemporal: + return &evalFloat{f: e.toFloat()}, true + default: + panic(fmt.Sprintf("unsupported type %T", e)) + } +} + +func evalToDecimal(e eval, m, d int32) *evalDecimal { + switch e := e.(type) { + case evalNumeric: + return e.toDecimal(m, d) + case *evalBytes: + if e.isHexLiteral { + hex, ok := e.toNumericHex() + if !ok { + // overflow + return newEvalDecimal(decimal.Zero, m, d) + } + return hex.toDecimal(m, d) + } + dec, _ := decimal.NewFromString(e.string()) + return newEvalDecimal(dec, m, d) + case *evalJSON: + switch e.Type() { + case json.TypeBoolean: + if e == json.ValueTrue { + return newEvalDecimal(decimal.NewFromInt(1), m, d) + } + return newEvalDecimal(decimal.Zero, m, d) + case json.TypeNumber: + switch e.NumberType() { + case json.NumberTypeSigned: + i, _ := e.Int64() + return newEvalDecimal(decimal.NewFromInt(i), m, d) + case json.NumberTypeUnsigned: + // If the value fits in an unsigned integer, convert to that + // and then cast it to a signed integer and then turn it into a decimal. + // SELECT CAST(CAST(18446744073709551615 AS JSON) AS DECIMAL) -> -1 + u, _ := e.Uint64() + return newEvalDecimal(decimal.NewFromInt(int64(u)), m, d) + case json.NumberTypeDecimal: + dec, _ := e.Decimal() + return newEvalDecimal(dec, m, d) + case json.NumberTypeFloat: + f, _ := e.Float64() + dec := decimal.NewFromFloat(f) + return newEvalDecimal(dec, m, d) + default: + panic("unreachable") + } + case json.TypeString: + dec, _ := decimal.NewFromString(e.Raw()) + return newEvalDecimal(dec, m, d) + default: + return newEvalDecimal(decimal.Zero, m, d) + } + case *evalTemporal: + return newEvalDecimal(e.toDecimal(), m, d) + default: + panic("unsupported") + } +} + +func evalToInt64(e eval) *evalInt64 { + switch e := e.(type) { + case *evalInt64: + return e + case evalNumeric: + return e.toInt64() + case *evalBytes: + if e.isHexLiteral { + hex, ok := e.toNumericHex() + if !ok { + // overflow + return newEvalInt64(0) + } + return hex.toInt64() + } + i, _ := fastparse.ParseInt64(e.string(), 10) + return newEvalInt64(i) + case *evalJSON: + switch e.Type() { + case json.TypeBoolean: + if e == json.ValueTrue { + return newEvalInt64(1) + } + return newEvalInt64(0) + case json.TypeNumber: + switch e.NumberType() { + case json.NumberTypeSigned: + i, _ := e.Int64() + return newEvalInt64(i) + case json.NumberTypeUnsigned: + u, _ := e.Uint64() + // OMG, MySQL is really terrible at this. + return newEvalInt64(int64(u)) + case json.NumberTypeDecimal: + d, _ := e.Decimal() + return newEvalInt64(decimalToInt64(d)) + case json.NumberTypeFloat: + f, _ := e.Float64() + return newEvalInt64(floatToInt64(f)) + default: + panic("unsupported") + } + case json.TypeString: + i, _ := fastparse.ParseInt64(e.Raw(), 10) + return newEvalInt64(i) + default: + return newEvalInt64(0) + } + case *evalTemporal: + return newEvalInt64(e.toInt64()) + default: + panic(fmt.Sprintf("unsupported type: %T", e)) + } +} + func (e *evalInt64) Hash(h *vthash.Hasher) { if e.i < 0 { h.Write16(hashPrefixIntegralNegative) @@ -221,23 +393,26 @@ func (e *evalFloat) SQLType() sqltypes.Type { } func (e *evalFloat) ToRawBytes() []byte { - return FormatFloat(sqltypes.Float64, e.f) + return format.FormatFloat(e.f) } func (e *evalFloat) negate() evalNumeric { return newEvalFloat(-e.f) } -func (e *evalFloat) toInt64() *evalInt64 { +func floatToInt64(f float64) int64 { // the int64(f) conversion is always well-defined, but for float values larger than // MaxInt64, it returns a negative value. Check for underflow: if the sign of // our integral is negative but our float is not, clamp to MaxInt64 like MySQL does. - f := math.Round(e.f) - i := int64(f) + i := int64(math.Round(f)) if i < 0 && !math.Signbit(f) { i = math.MaxInt64 } - return newEvalInt64(i) + return i +} + +func (e *evalFloat) toInt64() *evalInt64 { + return newEvalInt64(floatToInt64(e.f)) } func (e *evalFloat) toFloat() (*evalFloat, bool) { @@ -304,16 +479,20 @@ func (e *evalDecimal) negate() evalNumeric { return newEvalDecimalWithPrec(e.dec.Neg(), e.length) } -func (e *evalDecimal) toInt64() *evalInt64 { - dec := e.dec.Round(0) +func decimalToInt64(dec decimal.Decimal) int64 { + dec = dec.Round(0) i, valid := dec.Int64() if !valid { if dec.Sign() < 0 { - return newEvalInt64(math.MinInt64) + return math.MinInt64 } - return newEvalInt64(math.MaxInt64) + return math.MaxInt64 } - return newEvalInt64(i) + return i +} + +func (e *evalDecimal) toInt64() *evalInt64 { + return newEvalInt64(decimalToInt64(e.dec)) } func (e *evalDecimal) toFloat0() (float64, bool) { diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go index 3f678ae3a7d..19a6ea59220 100644 --- a/go/vt/vtgate/evalengine/eval_result.go +++ b/go/vt/vtgate/evalengine/eval_result.go @@ -20,6 +20,8 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -29,9 +31,24 @@ type EvalResult struct { v eval } -// Value allows for retrieval of the value we expose for public consumption -func (er EvalResult) Value() sqltypes.Value { - return evalToSQLValue(er.v) +// Value allows for retrieval of the value we expose for public consumption. +// It will be converted to the passed in collation which is the connection +// collation and what the client expects the result to be in. +func (er EvalResult) Value(id collations.ID) sqltypes.Value { + str, ok := er.v.(*evalBytes) + if !ok || str.isBinary() || str.col.Collation == collations.Unknown || str.col.Collation == id { + return evalToSQLValue(er.v) + } + + dst, err := charset.Convert(nil, colldata.Lookup(id).Charset(), str.bytes, colldata.Lookup(str.col.Collation).Charset()) + if err != nil { + // If we can't convert, we just return what we have, but it's going + // to be invalidly encoded. Should normally never happen as only utf8mb4 + // is really supported for the connection character set anyway and all + // other charsets can be converted to utf8mb4. + return sqltypes.MakeTrusted(str.SQLType(), str.bytes) + } + return sqltypes.MakeTrusted(str.SQLType(), dst) } func (er EvalResult) Collation() collations.ID { @@ -39,7 +56,7 @@ func (er EvalResult) Collation() collations.ID { } func (er EvalResult) String() string { - return er.Value().String() + return er.Value(collations.Default()).String() } // TupleValues allows for retrieval of the value we expose for public consumption @@ -64,6 +81,10 @@ func (er EvalResult) MustBoolean() bool { return b } +func (er EvalResult) ToBoolean() bool { + return evalIsTruthy(er.v) == boolTrue +} + // ToBooleanStrict is used when the casting to a boolean has to be minimally forgiving, // such as when assigning to a system variable that is expected to be a boolean func (er EvalResult) ToBooleanStrict() (bool, error) { diff --git a/go/vt/vtgate/evalengine/eval_temporal.go b/go/vt/vtgate/evalengine/eval_temporal.go new file mode 100644 index 00000000000..13acc5bd290 --- /dev/null +++ b/go/vt/vtgate/evalengine/eval_temporal.go @@ -0,0 +1,401 @@ +package evalengine + +import ( + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" +) + +type evalTemporal struct { + t sqltypes.Type + prec uint8 + dt datetime.DateTime +} + +func (e *evalTemporal) Hash(h *vthash.Hasher) { + h.Write16(hashPrefixDate) + e.dt.Hash(h) +} + +func (e *evalTemporal) ToRawBytes() []byte { + switch e.t { + case sqltypes.Date: + return e.dt.Date.Format() + case sqltypes.Datetime: + return e.dt.Format(e.prec) + case sqltypes.Time: + return e.dt.Time.Format(e.prec) + default: + panic("unreachable") + } +} + +func (e *evalTemporal) SQLType() sqltypes.Type { + return e.t +} + +func (e *evalTemporal) toInt64() int64 { + switch e.SQLType() { + case sqltypes.Date: + return e.dt.Date.FormatInt64() + case sqltypes.Datetime: + return e.dt.FormatInt64() + case sqltypes.Time: + return e.dt.Time.FormatInt64() + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toFloat() float64 { + switch e.SQLType() { + case sqltypes.Date: + return float64(e.dt.Date.FormatInt64()) + case sqltypes.Datetime: + return e.dt.FormatFloat64() + case sqltypes.Time: + return e.dt.Time.FormatFloat64() + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toDecimal() decimal.Decimal { + switch e.SQLType() { + case sqltypes.Date: + return decimal.NewFromInt(e.dt.Date.FormatInt64()) + case sqltypes.Datetime: + return e.dt.FormatDecimal() + case sqltypes.Time: + return e.dt.Time.FormatDecimal() + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toJSON() *evalJSON { + switch e.SQLType() { + case sqltypes.Date: + return json.NewDate(hack.String(e.dt.Date.Format())) + case sqltypes.Datetime: + return json.NewDateTime(hack.String(e.dt.Format(datetime.DefaultPrecision))) + case sqltypes.Time: + return json.NewTime(hack.String(e.dt.Time.Format(datetime.DefaultPrecision))) + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toDateTime(l int) *evalTemporal { + switch e.SQLType() { + case sqltypes.Datetime, sqltypes.Date: + return &evalTemporal{t: sqltypes.Datetime, dt: e.dt.Round(l), prec: uint8(l)} + case sqltypes.Time: + return &evalTemporal{t: sqltypes.Datetime, dt: e.dt.Time.Round(l).ToDateTime(), prec: uint8(l)} + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toTime(l int) *evalTemporal { + switch e.SQLType() { + case sqltypes.Datetime: + dt := datetime.DateTime{Time: e.dt.Time.Round(l)} + return &evalTemporal{t: sqltypes.Time, dt: dt, prec: uint8(l)} + case sqltypes.Date: + // Zero-time + return &evalTemporal{t: sqltypes.Time, prec: uint8(l)} + case sqltypes.Time: + return &evalTemporal{t: sqltypes.Time, dt: e.dt.Round(l), prec: uint8(l)} + default: + panic("unreachable") + } +} + +func (e *evalTemporal) toDate() *evalTemporal { + switch e.SQLType() { + case sqltypes.Datetime: + dt := datetime.DateTime{Date: e.dt.Date} + return &evalTemporal{t: sqltypes.Date, dt: dt} + case sqltypes.Date: + return e + case sqltypes.Time: + dt := e.dt.Time.ToDateTime() + dt.Time = datetime.Time{} + return &evalTemporal{t: sqltypes.Date, dt: dt} + default: + panic("unreachable") + } +} + +func (e *evalTemporal) isZero() bool { + return e.dt.IsZero() +} + +func (e *evalTemporal) addInterval(interval *datetime.Interval, strcoll collations.TypedCollation) eval { + var tmp *evalTemporal + var ok bool + + switch tt := e.SQLType(); { + case tt == sqltypes.Date && !interval.Unit().HasTimeParts(): + tmp = &evalTemporal{t: e.t} + tmp.dt.Date, ok = e.dt.Date.AddInterval(interval) + case tt == sqltypes.Time && !interval.Unit().HasDateParts(): + tmp = &evalTemporal{t: e.t} + tmp.dt.Time, tmp.prec, ok = e.dt.Time.AddInterval(interval, strcoll.Valid()) + case tt == sqltypes.Datetime || tt == sqltypes.Timestamp || (tt == sqltypes.Date && interval.Unit().HasTimeParts()) || (tt == sqltypes.Time && interval.Unit().HasDateParts()): + tmp = e.toDateTime(int(e.prec)) + tmp.dt, tmp.prec, ok = e.dt.AddInterval(interval, strcoll.Valid()) + } + if !ok { + return nil + } + if strcoll.Valid() { + return newEvalRaw(sqltypes.Char, tmp.ToRawBytes(), strcoll) + } + return tmp +} + +func newEvalDateTime(dt datetime.DateTime, l int) *evalTemporal { + return &evalTemporal{t: sqltypes.Datetime, dt: dt.Round(l), prec: uint8(l)} +} + +func newEvalDate(d datetime.Date) *evalTemporal { + return &evalTemporal{t: sqltypes.Date, dt: datetime.DateTime{Date: d}} +} + +func newEvalTime(time datetime.Time, l int) *evalTemporal { + return &evalTemporal{t: sqltypes.Time, dt: datetime.DateTime{Time: time.Round(l)}, prec: uint8(l)} +} + +func errIncorrectTemporal(date string, in []byte) error { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect %s value: '%s'", date, sanitizeErrorValue(in)) +} + +func parseDate(s []byte) (*evalTemporal, error) { + t, ok := datetime.ParseDate(hack.String(s)) + if !ok { + return nil, errIncorrectTemporal("DATE", s) + } + return newEvalDate(t), nil +} + +func parseDateTime(s []byte) (*evalTemporal, error) { + t, l, ok := datetime.ParseDateTime(hack.String(s), -1) + if !ok { + return nil, errIncorrectTemporal("DATETIME", s) + } + return newEvalDateTime(t, l), nil +} + +func parseTime(s []byte) (*evalTemporal, error) { + t, l, ok := datetime.ParseTime(hack.String(s), -1) + if !ok { + return nil, errIncorrectTemporal("TIME", s) + } + return newEvalTime(t, l), nil +} + +func precision(req, got int) int { + if req == -1 { + return got + } + return req +} + +func evalToTemporal(e eval) *evalTemporal { + switch e := e.(type) { + case *evalTemporal: + return e + case *evalBytes: + if t, l, ok := datetime.ParseDateTime(e.string(), -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDate(e.string()); ok { + return newEvalDate(d) + } + if t, l, ok := datetime.ParseTime(e.string(), -1); ok { + return newEvalTime(t, l) + } + case *evalInt64: + if t, ok := datetime.ParseDateTimeInt64(e.i); ok { + return newEvalDateTime(t, 0) + } + if d, ok := datetime.ParseDateInt64(e.i); ok { + return newEvalDate(d) + } + if t, ok := datetime.ParseTimeInt64(e.i); ok { + return newEvalTime(t, 0) + } + case *evalUint64: + if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { + return newEvalDateTime(t, 0) + } + if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { + return newEvalDate(d) + } + if t, ok := datetime.ParseTimeInt64(int64(e.u)); ok { + return newEvalTime(t, 0) + } + case *evalFloat: + if t, l, ok := datetime.ParseDateTimeFloat(e.f, -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateFloat(e.f); ok { + return newEvalDate(d) + } + if t, l, ok := datetime.ParseTimeFloat(e.f, -1); ok { + return newEvalTime(t, l) + } + case *evalDecimal: + if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, -1); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateDecimal(e.dec); ok { + return newEvalDate(d) + } + if d, l, ok := datetime.ParseTimeDecimal(e.dec, e.length, -1); ok { + return newEvalTime(d, l) + } + case *evalJSON: + if dt, ok := e.DateTime(); ok { + if dt.Date.IsZero() { + return newEvalTime(dt.Time, datetime.DefaultPrecision) + } + if dt.Time.IsZero() { + return newEvalDate(dt.Date) + } + return newEvalDateTime(dt, datetime.DefaultPrecision) + } + } + return nil +} + +func evalToTime(e eval, l int) *evalTemporal { + switch e := e.(type) { + case *evalTemporal: + return e.toTime(precision(l, int(e.prec))) + case *evalBytes: + if dt, l, _ := datetime.ParseDateTime(e.string(), l); !dt.IsZero() { + return newEvalTime(dt.Time, l) + } + if t, l, ok := datetime.ParseTime(e.string(), l); ok || !t.IsZero() { + return newEvalTime(t, l) + } + case *evalInt64: + if t, ok := datetime.ParseTimeInt64(e.i); ok { + return newEvalTime(t, precision(l, 0)) + } + if dt, ok := datetime.ParseDateTimeInt64(e.i); ok { + return newEvalTime(dt.Time, precision(l, 0)) + } + case *evalUint64: + if t, ok := datetime.ParseTimeInt64(int64(e.u)); ok { + return newEvalTime(t, precision(l, 0)) + } + if dt, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { + return newEvalTime(dt.Time, precision(l, 0)) + } + case *evalFloat: + if t, l, ok := datetime.ParseTimeFloat(e.f, l); ok { + return newEvalTime(t, l) + } + if dt, l, ok := datetime.ParseDateTimeFloat(e.f, l); ok { + return newEvalTime(dt.Time, l) + } + case *evalDecimal: + if t, l, ok := datetime.ParseTimeDecimal(e.dec, e.length, l); ok { + return newEvalTime(t, l) + } + if dt, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, l); ok { + return newEvalTime(dt.Time, l) + } + case *evalJSON: + if t, ok := e.Time(); ok { + return newEvalTime(t.RoundForJSON(), precision(l, datetime.DefaultPrecision)) + } + } + return nil +} + +func evalToDateTime(e eval, l int) *evalTemporal { + switch e := e.(type) { + case *evalTemporal: + return e.toDateTime(precision(l, int(e.prec))) + case *evalBytes: + if t, l, _ := datetime.ParseDateTime(e.string(), l); !t.IsZero() { + return newEvalDateTime(t, l) + } + if d, _ := datetime.ParseDate(e.string()); !d.IsZero() { + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + } + case *evalInt64: + if t, ok := datetime.ParseDateTimeInt64(e.i); ok { + return newEvalDateTime(t, precision(l, 0)) + } + if d, ok := datetime.ParseDateInt64(e.i); ok { + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + } + case *evalUint64: + if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { + return newEvalDateTime(t, precision(l, 0)) + } + if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + } + case *evalFloat: + if t, l, ok := datetime.ParseDateTimeFloat(e.f, l); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateFloat(e.f); ok { + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + } + case *evalDecimal: + if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, l); ok { + return newEvalDateTime(t, l) + } + if d, ok := datetime.ParseDateDecimal(e.dec); ok { + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + } + case *evalJSON: + if dt, ok := e.DateTime(); ok { + return newEvalDateTime(dt, precision(l, datetime.DefaultPrecision)) + } + } + return nil +} + +func evalToDate(e eval) *evalTemporal { + switch e := e.(type) { + case *evalTemporal: + return e.toDate() + case *evalBytes: + if t, _ := datetime.ParseDate(e.string()); !t.IsZero() { + return newEvalDate(t) + } + if dt, _, _ := datetime.ParseDateTime(e.string(), -1); !dt.IsZero() { + return newEvalDate(dt.Date) + } + case evalNumeric: + if t, ok := datetime.ParseDateInt64(e.toInt64().i); ok { + return newEvalDate(t) + } + if dt, ok := datetime.ParseDateTimeInt64(e.toInt64().i); ok { + return newEvalDate(dt.Date) + } + case *evalJSON: + if d, ok := e.Date(); ok { + return newEvalDate(d) + } + } + return nil +} + +var _ eval = (*evalTemporal)(nil) +var _ hashable = (*evalTemporal)(nil) diff --git a/go/vt/vtgate/evalengine/expr.go b/go/vt/vtgate/evalengine/expr.go index 348234e5cf5..dfa8491391e 100644 --- a/go/vt/vtgate/evalengine/expr.go +++ b/go/vt/vtgate/evalengine/expr.go @@ -18,16 +18,18 @@ package evalengine import ( "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type ( // Expr is the interface that all evaluating expressions must implement Expr interface { eval(env *ExpressionEnv) (eval, error) - typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) + typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) format(buf *formatter, depth int) constant() bool simplify(env *ExpressionEnv) error + compile(c *compiler) (ctype, error) } UnaryExpr struct { diff --git a/go/vt/vtgate/evalengine/expr_arithmetic.go b/go/vt/vtgate/evalengine/expr_arithmetic.go index a7c8e12ed4d..50326c9eb3c 100644 --- a/go/vt/vtgate/evalengine/expr_arithmetic.go +++ b/go/vt/vtgate/evalengine/expr_arithmetic.go @@ -18,6 +18,7 @@ package evalengine import ( "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type ( @@ -32,13 +33,16 @@ type ( opArith interface { eval(left, right eval) (eval, error) + compile(c *compiler, left, right Expr) (ctype, error) String() string } - opArithAdd struct{} - opArithSub struct{} - opArithMul struct{} - opArithDiv struct{} + opArithAdd struct{} + opArithSub struct{} + opArithMul struct{} + opArithDiv struct{} + opArithIntDiv struct{} + opArithMod struct{} ) var _ Expr = (*ArithmeticExpr)(nil) @@ -47,40 +51,64 @@ var _ opArith = (*opArithAdd)(nil) var _ opArith = (*opArithSub)(nil) var _ opArith = (*opArithMul)(nil) var _ opArith = (*opArithDiv)(nil) +var _ opArith = (*opArithIntDiv)(nil) +var _ opArith = (*opArithMod)(nil) func (b *ArithmeticExpr) eval(env *ExpressionEnv) (eval, error) { - left, right, err := b.arguments(env) - if left == nil || right == nil || err != nil { + left, err := b.Left.eval(env) + if left == nil || err != nil { + return nil, err + } + + right, err := b.Right.eval(env) + if right == nil || err != nil { return nil, err } return b.Op.eval(left, right) } -func makeNumericalType(t sqltypes.Type, f typeFlag) sqltypes.Type { +func makeNumericalType(t sqltypes.Type, f typeFlag) (sqltypes.Type, typeFlag) { if sqltypes.IsNumber(t) { - return t + return t, f } if t == sqltypes.VarBinary && (f&flagHex) != 0 { - return sqltypes.Uint64 + return sqltypes.Uint64, f } - return sqltypes.Float64 + if sqltypes.IsDateOrTime(t) { + return sqltypes.Int64, f | flagAmbiguousType + } + return sqltypes.Float64, f } // typeof implements the Expr interface -func (b *ArithmeticExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - t1, f1 := b.Left.typeof(env) - t2, f2 := b.Right.typeof(env) - flags := f1 | f2 +func (b *ArithmeticExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t1, f1 := b.Left.typeof(env, fields) + t2, f2 := b.Right.typeof(env, fields) + + t1, f1 = makeNumericalType(t1, f1) + t2, f2 = makeNumericalType(t2, f2) - t1 = makeNumericalType(t1, f1) - t2 = makeNumericalType(t2, f2) + flags := f1 | f2 switch b.Op.(type) { case *opArithDiv: if t1 == sqltypes.Float64 || t2 == sqltypes.Float64 { - return sqltypes.Float64, flags + return sqltypes.Float64, flags | flagNullable + } + return sqltypes.Decimal, flags | flagNullable + case *opArithIntDiv: + if t1 == sqltypes.Uint64 || t2 == sqltypes.Uint64 { + return sqltypes.Uint64, flags | flagNullable } - return sqltypes.Decimal, flags + return sqltypes.Int64, flags | flagNullable + case *opArithMod: + if t1 == sqltypes.Float64 || t2 == sqltypes.Float64 { + return sqltypes.Float64, flags | flagNullable + } + if t1 == sqltypes.Decimal || t2 == sqltypes.Decimal { + return sqltypes.Decimal, flags | flagNullable + } + return t1, flags | flagNullable } switch t1 { @@ -102,26 +130,414 @@ func (b *ArithmeticExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { return t1, flags } +func (b *ArithmeticExpr) compile(c *compiler) (ctype, error) { + return b.Op.compile(c, b.Left, b.Right) +} + func (op *opArithAdd) eval(left, right eval) (eval, error) { return addNumericWithError(left, right) } func (op *opArithAdd) String() string { return "+" } +func (op *opArithAdd) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + + swap := false + skip2 := c.compileNullCheck1r(rt) + + lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) + lt, rt, swap = c.compileNumericPriority(lt, rt) + + var sumtype sqltypes.Type + + switch lt.Type { + case sqltypes.Int64: + c.asm.Add_ii() + sumtype = sqltypes.Int64 + case sqltypes.Uint64: + switch rt.Type { + case sqltypes.Int64: + c.asm.Add_ui(swap) + case sqltypes.Uint64: + c.asm.Add_uu() + } + sumtype = sqltypes.Uint64 + case sqltypes.Decimal: + if swap { + c.compileToDecimal(rt, 2) + } else { + c.compileToDecimal(rt, 1) + } + c.asm.Add_dd() + sumtype = sqltypes.Decimal + case sqltypes.Float64: + if swap { + c.compileToFloat(rt, 2) + } else { + c.compileToFloat(rt, 1) + } + c.asm.Add_ff() + sumtype = sqltypes.Float64 + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sumtype, Col: collationNumeric}, nil +} + func (op *opArithSub) eval(left, right eval) (eval, error) { return subtractNumericWithError(left, right) } func (op *opArithSub) String() string { return "-" } +func (op *opArithSub) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) + + var subtype sqltypes.Type + + switch lt.Type { + case sqltypes.Int64: + switch rt.Type { + case sqltypes.Int64: + c.asm.Sub_ii() + subtype = sqltypes.Int64 + case sqltypes.Uint64: + c.asm.Sub_iu() + subtype = sqltypes.Uint64 + case sqltypes.Float64: + c.compileToFloat(lt, 2) + c.asm.Sub_ff() + subtype = sqltypes.Float64 + case sqltypes.Decimal: + c.compileToDecimal(lt, 2) + c.asm.Sub_dd() + subtype = sqltypes.Decimal + } + case sqltypes.Uint64: + switch rt.Type { + case sqltypes.Int64: + c.asm.Sub_ui() + subtype = sqltypes.Uint64 + case sqltypes.Uint64: + c.asm.Sub_uu() + subtype = sqltypes.Uint64 + case sqltypes.Float64: + c.compileToFloat(lt, 2) + c.asm.Sub_ff() + subtype = sqltypes.Float64 + case sqltypes.Decimal: + c.compileToDecimal(lt, 2) + c.asm.Sub_dd() + subtype = sqltypes.Decimal + } + case sqltypes.Float64: + c.compileToFloat(rt, 1) + c.asm.Sub_ff() + subtype = sqltypes.Float64 + case sqltypes.Decimal: + switch rt.Type { + case sqltypes.Float64: + c.compileToFloat(lt, 2) + c.asm.Sub_ff() + subtype = sqltypes.Float64 + default: + c.compileToDecimal(rt, 1) + c.asm.Sub_dd() + subtype = sqltypes.Decimal + } + } + + if subtype == 0 { + panic("did not compile?") + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: subtype, Col: collationNumeric}, nil +} + func (op *opArithMul) eval(left, right eval) (eval, error) { return multiplyNumericWithError(left, right) } + func (op *opArithMul) String() string { return "*" } +func (op *opArithMul) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + + swap := false + skip2 := c.compileNullCheck1r(rt) + lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) + lt, rt, swap = c.compileNumericPriority(lt, rt) + + var multype sqltypes.Type + + switch lt.Type { + case sqltypes.Int64: + c.asm.Mul_ii() + multype = sqltypes.Int64 + case sqltypes.Uint64: + switch rt.Type { + case sqltypes.Int64: + c.asm.Mul_ui(swap) + case sqltypes.Uint64: + c.asm.Mul_uu() + } + multype = sqltypes.Uint64 + case sqltypes.Float64: + if swap { + c.compileToFloat(rt, 2) + } else { + c.compileToFloat(rt, 1) + } + c.asm.Mul_ff() + multype = sqltypes.Float64 + case sqltypes.Decimal: + if swap { + c.compileToDecimal(rt, 2) + } else { + c.compileToDecimal(rt, 1) + } + c.asm.Mul_dd() + multype = sqltypes.Decimal + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: multype, Col: collationNumeric}, nil +} + func (op *opArithDiv) eval(left, right eval) (eval, error) { return divideNumericWithError(left, right, true) } + func (op *opArithDiv) String() string { return "/" } +func (op *opArithDiv) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + skip2 := c.compileNullCheck1r(rt) + + lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) + + ct := ctype{Col: collationNumeric, Flag: flagNullable} + if lt.Type == sqltypes.Float64 || rt.Type == sqltypes.Float64 { + ct.Type = sqltypes.Float64 + c.compileToFloat(lt, 2) + c.compileToFloat(rt, 1) + c.asm.Div_ff() + } else { + ct.Type = sqltypes.Decimal + c.compileToDecimal(lt, 2) + c.compileToDecimal(rt, 1) + c.asm.Div_dd() + } + c.asm.jumpDestination(skip1, skip2) + return ct, nil +} + +func (op *opArithIntDiv) eval(left, right eval) (eval, error) { + return integerDivideNumericWithError(left, right) +} + +func (op *opArithIntDiv) String() string { return "DIV" } + +func (op *opArithIntDiv) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + lt = c.compileToNumeric(lt, 2, sqltypes.Decimal, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Decimal, true) + + ct := ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable} + switch lt.Type { + case sqltypes.Int64: + switch rt.Type { + case sqltypes.Int64: + c.asm.IntDiv_ii() + case sqltypes.Uint64: + ct.Type = sqltypes.Uint64 + c.asm.IntDiv_iu() + case sqltypes.Float64: + c.asm.Convert_xd(2, 0, 0) + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_di() + case sqltypes.Decimal: + c.asm.Convert_xd(2, 0, 0) + c.asm.IntDiv_di() + } + case sqltypes.Uint64: + switch rt.Type { + case sqltypes.Int64: + c.asm.IntDiv_ui() + case sqltypes.Uint64: + ct.Type = sqltypes.Uint64 + c.asm.IntDiv_uu() + case sqltypes.Float64: + c.asm.Convert_xd(2, 0, 0) + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_du() + case sqltypes.Decimal: + c.asm.Convert_xd(2, 0, 0) + c.asm.IntDiv_du() + } + case sqltypes.Float64: + switch rt.Type { + case sqltypes.Decimal: + c.asm.Convert_xd(2, 0, 0) + c.asm.IntDiv_di() + case sqltypes.Uint64: + ct.Type = sqltypes.Uint64 + c.asm.Convert_xd(2, 0, 0) + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_du() + default: + c.asm.Convert_xd(2, 0, 0) + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_di() + } + case sqltypes.Decimal: + switch rt.Type { + case sqltypes.Decimal: + c.asm.IntDiv_di() + case sqltypes.Uint64: + ct.Type = sqltypes.Uint64 + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_du() + default: + c.asm.Convert_xd(1, 0, 0) + c.asm.IntDiv_di() + } + } + c.asm.jumpDestination(skip1, skip2) + return ct, nil +} + +func (op *opArithMod) eval(left, right eval) (eval, error) { + return modNumericWithError(left, right, true) +} + +func (op *opArithMod) String() string { return "DIV" } + +func (op *opArithMod) compile(c *compiler, left, right Expr) (ctype, error) { + lt, err := left.compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(lt) + + rt, err := right.compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) + rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) + + ct := ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable} + switch lt.Type { + case sqltypes.Int64: + ct.Type = sqltypes.Int64 + switch rt.Type { + case sqltypes.Int64: + c.asm.Mod_ii() + case sqltypes.Uint64: + c.asm.Mod_iu() + case sqltypes.Float64: + ct.Type = sqltypes.Float64 + c.asm.Convert_xf(2) + c.asm.Mod_ff() + case sqltypes.Decimal: + ct.Type = sqltypes.Decimal + c.asm.Convert_xd(2, 0, 0) + c.asm.Mod_dd() + } + case sqltypes.Uint64: + ct.Type = sqltypes.Uint64 + switch rt.Type { + case sqltypes.Int64: + c.asm.Mod_ui() + case sqltypes.Uint64: + c.asm.Mod_uu() + case sqltypes.Float64: + ct.Type = sqltypes.Float64 + c.asm.Convert_xf(2) + c.asm.Mod_ff() + case sqltypes.Decimal: + ct.Type = sqltypes.Decimal + c.asm.Convert_xd(2, 0, 0) + c.asm.Mod_dd() + } + case sqltypes.Decimal: + ct.Type = sqltypes.Decimal + switch rt.Type { + case sqltypes.Float64: + ct.Type = sqltypes.Float64 + c.asm.Convert_xf(2) + c.asm.Mod_ff() + default: + c.asm.Convert_xd(1, 0, 0) + c.asm.Mod_dd() + } + case sqltypes.Float64: + ct.Type = sqltypes.Float64 + c.asm.Convert_xf(1) + c.asm.Mod_ff() + } + + c.asm.jumpDestination(skip1, skip2) + return ct, nil +} + func (n *NegateExpr) eval(env *ExpressionEnv) (eval, error) { e, err := n.Inner.eval(env) if err != nil { @@ -130,11 +546,11 @@ func (n *NegateExpr) eval(env *ExpressionEnv) (eval, error) { if e == nil { return nil, nil } - return evalToNumeric(e).negate(), nil + return evalToNumeric(e, false).negate(), nil } -func (n *NegateExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - tt, f := n.Inner.typeof(env) +func (n *NegateExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := n.Inner.typeof(env, fields) switch tt { case sqltypes.Uint8, sqltypes.Uint16, sqltypes.Uint32, sqltypes.Uint64: if f&flagIntegerOvf != 0 { @@ -154,3 +570,39 @@ func (n *NegateExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { } return sqltypes.Float64, f } + +func (expr *NegateExpr) compile(c *compiler) (ctype, error) { + arg, err := expr.Inner.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + arg = c.compileToNumeric(arg, 1, sqltypes.Float64, false) + var neg sqltypes.Type + + switch arg.Type { + case sqltypes.Int64: + neg = sqltypes.Int64 + c.asm.Neg_i() + case sqltypes.Uint64: + if arg.Flag&flagHex != 0 { + neg = sqltypes.Float64 + c.asm.Neg_hex() + } else { + neg = sqltypes.Int64 + c.asm.Neg_u() + } + case sqltypes.Float64: + neg = sqltypes.Float64 + c.asm.Neg_f() + case sqltypes.Decimal: + neg = sqltypes.Decimal + c.asm.Neg_d() + default: + panic("unexpected Numeric type") + } + + c.asm.jumpDestination(skip) + return ctype{Type: neg, Col: collationNumeric}, nil +} diff --git a/go/vt/vtgate/evalengine/expr_bit.go b/go/vt/vtgate/evalengine/expr_bit.go index 1e68438b45f..9da632eed30 100644 --- a/go/vt/vtgate/evalengine/expr_bit.go +++ b/go/vt/vtgate/evalengine/expr_bit.go @@ -18,6 +18,7 @@ package evalengine import ( "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -75,18 +76,38 @@ func (b *BitwiseNotExpr) eval(env *ExpressionEnv) (eval, error) { return newEvalBinary(out), nil } - eu := evalToNumeric(e).toInt64() + eu := evalToInt64(e) return newEvalUint64(^uint64(eu.i)), nil } -func (b *BitwiseNotExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - tt, f := b.Inner.typeof(env) +func (b *BitwiseNotExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := b.Inner.typeof(env, fields) if tt == sqltypes.VarBinary && f&(flagHex|flagBit) == 0 { return sqltypes.VarBinary, f } return sqltypes.Uint64, f } +func (expr *BitwiseNotExpr) compile(c *compiler) (ctype, error) { + ct, err := expr.Inner.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(ct) + + if ct.Type == sqltypes.VarBinary && !ct.isHexOrBitLiteral() { + c.asm.BitwiseNot_b() + c.asm.jumpDestination(skip) + return ct, nil + } + + ct = c.compileToBitwiseUint64(ct, 1) + c.asm.BitwiseNot_u() + c.asm.jumpDestination(skip) + return ct, nil +} + func (o opBitShr) BitwiseOp() string { return ">>" } func (o opBitShr) numeric(num, shift uint64) uint64 { return num >> shift } @@ -173,8 +194,13 @@ func (o opBitAnd) BitwiseOp() string { return "&" } var errBitwiseOperandsLength = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Binary operands of bitwise operators must be of equal length") func (bit *BitwiseExpr) eval(env *ExpressionEnv) (eval, error) { - l, r, err := bit.arguments(env) - if l == nil || r == nil || err != nil { + l, err := bit.Left.eval(env) + if l == nil || err != nil { + return nil, err + } + + r, err := bit.Right.eval(env) + if r == nil || err != nil { return nil, err } @@ -201,8 +227,8 @@ func (bit *BitwiseExpr) eval(env *ExpressionEnv) (eval, error) { } } - lu := evalToNumeric(l).toInt64() - ru := evalToNumeric(r).toInt64() + lu := evalToInt64(l) + ru := evalToInt64(r) return newEvalUint64(op.numeric(uint64(lu.i), uint64(ru.i))), nil case opBitShift: @@ -213,11 +239,11 @@ func (bit *BitwiseExpr) eval(env *ExpressionEnv) (eval, error) { unsigned 64-bit integer as necessary. */ if l, ok := l.(*evalBytes); ok && l.isBinary() && !l.isHexOrBitLiteral() { - ru := evalToNumeric(r).toInt64() + ru := evalToInt64(r) return newEvalBinary(op.binary(l.bytes, uint64(ru.i))), nil } - lu := evalToNumeric(l).toInt64() - ru := evalToNumeric(r).toInt64() + lu := evalToInt64(l) + ru := evalToInt64(r) return newEvalUint64(op.numeric(uint64(lu.i), uint64(ru.i))), nil default: @@ -225,9 +251,9 @@ func (bit *BitwiseExpr) eval(env *ExpressionEnv) (eval, error) { } } -func (bit *BitwiseExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - t1, f1 := bit.Left.typeof(env) - t2, f2 := bit.Right.typeof(env) +func (bit *BitwiseExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t1, f1 := bit.Left.typeof(env, fields) + t2, f2 := bit.Right.typeof(env, fields) switch bit.Op.(type) { case opBitBinary: @@ -244,6 +270,93 @@ func (bit *BitwiseExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { return sqltypes.Uint64, f1 | f2 } +func (expr *BitwiseExpr) compileBinary(c *compiler, asm_ins_bb, asm_ins_uu func()) (ctype, error) { + lt, err := expr.Left.compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(lt) + + rt, err := expr.Right.compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + + if lt.Type == sqltypes.VarBinary && rt.Type == sqltypes.VarBinary { + if !lt.isHexOrBitLiteral() || !rt.isHexOrBitLiteral() { + asm_ins_bb() + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil + } + } + + lt = c.compileToBitwiseUint64(lt, 2) + rt = c.compileToBitwiseUint64(rt, 1) + + asm_ins_uu() + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil +} + +func (expr *BitwiseExpr) compileShift(c *compiler, i int) (ctype, error) { + lt, err := expr.Left.compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(lt) + + rt, err := expr.Right.compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + + if lt.Type == sqltypes.VarBinary && !lt.isHexOrBitLiteral() { + _ = c.compileToUint64(rt, 1) + if i < 0 { + c.asm.BitShiftLeft_bu() + } else { + c.asm.BitShiftRight_bu() + } + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil + } + + _ = c.compileToBitwiseUint64(lt, 2) + _ = c.compileToUint64(rt, 1) + + if i < 0 { + c.asm.BitShiftLeft_uu() + } else { + c.asm.BitShiftRight_uu() + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil +} + +func (expr *BitwiseExpr) compile(c *compiler) (ctype, error) { + switch expr.Op.(type) { + case *opBitAnd: + return expr.compileBinary(c, c.asm.BitOp_and_bb, c.asm.BitOp_and_uu) + case *opBitOr: + return expr.compileBinary(c, c.asm.BitOp_or_bb, c.asm.BitOp_or_uu) + case *opBitXor: + return expr.compileBinary(c, c.asm.BitOp_xor_bb, c.asm.BitOp_xor_uu) + case *opBitShl: + return expr.compileShift(c, -1) + case *opBitShr: + return expr.compileShift(c, 1) + default: + panic("unexpected arithmetic operator") + } +} + var _ opBitBinary = (*opBitAnd)(nil) var _ opBitBinary = (*opBitOr)(nil) var _ opBitBinary = (*opBitXor)(nil) diff --git a/go/vt/vtgate/evalengine/expr_bvar.go b/go/vt/vtgate/evalengine/expr_bvar.go index 01c60dfc53f..9172f8abc3c 100644 --- a/go/vt/vtgate/evalengine/expr_bvar.go +++ b/go/vt/vtgate/evalengine/expr_bvar.go @@ -26,39 +26,38 @@ import ( type ( BindVariable struct { - Key string - col collations.TypedCollation - coerce sqltypes.Type - tuple bool + Key string + Type sqltypes.Type + Collation collations.TypedCollation } ) var _ Expr = (*BindVariable)(nil) -func (bv *BindVariable) bvar(env *ExpressionEnv) (*querypb.BindVariable, error) { - val, ok := env.BindVars[bv.Key] +func (env *ExpressionEnv) lookupBindVar(key string) (*querypb.BindVariable, error) { + val, ok := env.BindVars[key] if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query arguments missing for %s", bv.Key) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query arguments missing for %s", key) } return val, nil } // eval implements the Expr interface func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { - bvar, err := bv.bvar(env) + bvar, err := env.lookupBindVar(bv.Key) if err != nil { return nil, err } switch bvar.Type { case sqltypes.Tuple: - if !bv.tuple { + if bv.Type != sqltypes.Tuple { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' cannot be a tuple", bv.Key) } tuple := make([]eval, 0, len(bvar.Values)) for _, value := range bvar.Values { - e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), collations.TypedCollation{}) + e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), defaultCoercionCollation(collations.DefaultCollationForType(value.Type))) if err != nil { return nil, err } @@ -67,32 +66,75 @@ func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { return &evalTuple{t: tuple}, nil default: - typ := bvar.Type - if bv.coerce >= 0 { - typ = bv.coerce + if bv.Type == sqltypes.Tuple { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' must be a tuple (is %s)", bv.Key, bvar.Type) } - if bv.tuple { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' must be a tuple (is %s)", bv.Key, typ) + typ := bvar.Type + if bv.typed() { + typ = bv.Type } - return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), bv.col) + return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), defaultCoercionCollation(collations.DefaultCollationForType(typ))) } } // typeof implements the Expr interface -func (bv *BindVariable) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - bvar, err := bv.bvar(env) - if err != nil { - return sqltypes.Null, flagNull | flagNullable +func (bv *BindVariable) typeof(env *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + var tt sqltypes.Type + if bv.typed() { + tt = bv.Type + } else { + if bvar, err := env.lookupBindVar(bv.Key); err == nil { + tt = bvar.Type + } } - switch bvar.Type { + switch tt { case sqltypes.Null: return sqltypes.Null, flagNull | flagNullable case sqltypes.HexNum, sqltypes.HexVal: return sqltypes.VarBinary, flagHex default: - if bv.coerce >= 0 { - return bv.coerce, 0 + return tt, 0 + } +} + +func (bvar *BindVariable) compile(c *compiler) (ctype, error) { + if !bvar.typed() { + return ctype{}, c.unsupported(bvar) + } + + switch tt := bvar.Type; { + case sqltypes.IsSigned(tt): + c.asm.PushBVar_i(bvar.Key) + case sqltypes.IsUnsigned(tt): + c.asm.PushBVar_u(bvar.Key) + case sqltypes.IsFloat(tt): + c.asm.PushBVar_f(bvar.Key) + case sqltypes.IsDecimal(tt): + c.asm.PushBVar_d(bvar.Key) + case sqltypes.IsText(tt): + if tt == sqltypes.HexNum { + c.asm.PushBVar_hexnum(bvar.Key) + } else if tt == sqltypes.HexVal { + c.asm.PushBVar_hexval(bvar.Key) + } else { + c.asm.PushBVar_text(bvar.Key, bvar.Collation) } - return bvar.Type, 0 + case sqltypes.IsBinary(tt): + c.asm.PushBVar_bin(bvar.Key) + case sqltypes.IsNull(tt): + c.asm.PushNull() + case tt == sqltypes.TypeJSON: + c.asm.PushBVar_json(bvar.Key) + default: + return ctype{}, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Type is not supported: %s", tt) } + + return ctype{ + Type: bvar.Type, + Col: bvar.Collation, + }, nil +} + +func (bvar *BindVariable) typed() bool { + return bvar.Type >= 0 } diff --git a/go/vt/vtgate/evalengine/expr_call.go b/go/vt/vtgate/evalengine/expr_call.go index 5f9c7930fbe..805b3e5b841 100644 --- a/go/vt/vtgate/evalengine/expr_call.go +++ b/go/vt/vtgate/evalengine/expr_call.go @@ -56,3 +56,16 @@ func (c *CallExpr) arg2(env *ExpressionEnv) (left eval, right eval, err error) { right, err = c.Arguments[1].eval(env) return } + +func (c *CallExpr) arg3(env *ExpressionEnv) (arg1 eval, arg2 eval, arg3 eval, err error) { + arg1, err = c.Arguments[0].eval(env) + if err != nil { + return + } + arg2, err = c.Arguments[1].eval(env) + if err != nil { + return + } + arg3, err = c.Arguments[2].eval(env) + return +} diff --git a/go/vt/vtgate/evalengine/expr_collate.go b/go/vt/vtgate/evalengine/expr_collate.go index 55095844afe..9828a1d8722 100644 --- a/go/vt/vtgate/evalengine/expr_collate.go +++ b/go/vt/vtgate/evalengine/expr_collate.go @@ -17,13 +17,11 @@ limitations under the License. package evalengine import ( - "strconv" - "strings" - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" ) @@ -47,15 +45,32 @@ var collationBinary = collations.TypedCollation{ var collationJSON = collations.TypedCollation{ Collation: 46, // utf8mb4_bin + Coercibility: collations.CoerceImplicit, + Repertoire: collations.RepertoireUnicode, +} + +var collationUtf8mb3 = collations.TypedCollation{ + Collation: collations.CollationUtf8mb3ID, Coercibility: collations.CoerceCoercible, Repertoire: collations.RepertoireUnicode, } +var collationRegexpFallback = collations.TypedCollation{ + Collation: collations.CollationLatin1Swedish, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, +} + type ( CollateExpr struct { UnaryExpr TypedCollation collations.TypedCollation } + + IntroducerExpr struct { + UnaryExpr + TypedCollation collations.TypedCollation + } ) var _ Expr = (*CollateExpr)(nil) @@ -78,50 +93,43 @@ func (c *CollateExpr) eval(env *ExpressionEnv) (eval, error) { } } -func (c *CollateExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - t, f := c.Inner.typeof(env) +func (c *CollateExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := c.Inner.typeof(env, fields) return t, f | flagExplicitCollation } -type LookupDefaultCollation collations.ID - -func (d LookupDefaultCollation) ColumnLookup(_ *sqlparser.ColName) (int, error) { - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "column access not supported here") -} - -func (d LookupDefaultCollation) CollationForExpr(_ sqlparser.Expr) collations.ID { - return collations.Unknown -} - -func (d LookupDefaultCollation) DefaultCollation() collations.ID { - return collations.ID(d) -} +func (expr *CollateExpr) compile(c *compiler) (ctype, error) { + ct, err := expr.Inner.compile(c) + if err != nil { + return ctype{}, err + } -type LookupIntegrationTest struct { - Collation collations.ID -} + skip := c.compileNullCheck1(ct) -func (*LookupIntegrationTest) ColumnLookup(name *sqlparser.ColName) (int, error) { - n := name.CompliantName() - if strings.HasPrefix(n, "column") { - return strconv.Atoi(n[len("column"):]) + switch ct.Type { + case sqltypes.VarChar: + if err := collations.Local().EnsureCollate(ct.Col.Collation, expr.TypedCollation.Collation); err != nil { + return ctype{}, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) + } + fallthrough + case sqltypes.VarBinary: + c.asm.Collate(expr.TypedCollation.Collation) + default: + return ctype{}, c.unsupported(expr) } - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unknown column: %q", n) -} -func (tl *LookupIntegrationTest) CollationForExpr(_ sqlparser.Expr) collations.ID { - return tl.Collation -} + c.asm.jumpDestination(skip) -func (tl *LookupIntegrationTest) DefaultCollation() collations.ID { - return tl.Collation + ct.Col = expr.TypedCollation + ct.Flag |= flagExplicitCollation | flagNullable + return ct, nil } func evalCollation(e eval) collations.TypedCollation { switch e := e.(type) { case nil: return collationNull - case evalNumeric: + case evalNumeric, *evalTemporal: return collationNumeric case *evalJSON: return collationJSON @@ -132,50 +140,58 @@ func evalCollation(e eval) collations.TypedCollation { } } -func mergeCollations(left, right eval) (eval, eval, collations.ID, error) { - lc := evalCollation(left) - rc := evalCollation(right) - if lc.Collation == rc.Collation { - return left, right, lc.Collation, nil +func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { + if c1.Collation == c2.Collation { + return c1, nil, nil, nil } - lt := typeIsTextual(left.SQLType()) - rt := typeIsTextual(right.SQLType()) + lt := sqltypes.IsText(t1) || sqltypes.IsBinary(t1) + rt := sqltypes.IsText(t2) || sqltypes.IsBinary(t2) if !lt || !rt { if lt { - return left, right, lc.Collation, nil + return c1, nil, nil, nil } if rt { - return left, right, rc.Collation, nil + return c2, nil, nil, nil } - return left, right, collations.CollationBinaryID, nil + return collationBinary, nil, nil, nil } env := collations.Local() - mc, coerceLeft, coerceRight, err := env.MergeCollations(lc, rc, collations.CoercionOptions{ + return colldata.Merge(env, c1, c2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) +} + +func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.TypedCollation, error) { + lt := left.SQLType() + rt := right.SQLType() + + mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err + } + if coerceLeft == nil && coerceRight == nil { + return left, right, mc, nil } - left1 := newEvalRaw(left.SQLType(), left.(*evalBytes).bytes, mc) - right1 := newEvalRaw(right.SQLType(), right.(*evalBytes).bytes, mc) + left1 := newEvalRaw(lt, left.(*evalBytes).bytes, mc) + right1 := newEvalRaw(rt, right.(*evalBytes).bytes, mc) if coerceLeft != nil { left1.bytes, err = coerceLeft(nil, left1.bytes) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err } } if coerceRight != nil { right1.bytes, err = coerceRight(nil, right1.bytes) if err != nil { - return nil, nil, 0, err + return nil, nil, collations.TypedCollation{}, err } } - return left1, right1, mc.Collation, nil + return left1, right1, mc, nil } type collationAggregation struct { @@ -187,7 +203,7 @@ func (ca *collationAggregation) add(env *collations.Environment, tc collations.T ca.cur = tc } else { var err error - ca.cur, _, _, err = env.MergeCollations(ca.cur, tc, collations.CoercionOptions{ConvertToSuperset: true, ConvertWithCoercion: true}) + ca.cur, _, _, err = colldata.Merge(env, ca.cur, tc, colldata.CoercionOptions{ConvertToSuperset: true, ConvertWithCoercion: true}) if err != nil { return err } @@ -198,3 +214,40 @@ func (ca *collationAggregation) add(env *collations.Environment, tc collations.T func (ca *collationAggregation) result() collations.TypedCollation { return ca.cur } + +var _ Expr = (*IntroducerExpr)(nil) + +func (expr *IntroducerExpr) eval(env *ExpressionEnv) (eval, error) { + e, err := expr.Inner.eval(env) + if err != nil { + return nil, err + } + if expr.TypedCollation.Collation == collations.CollationBinaryID { + return evalToBinary(e), nil + } + return evalToVarchar(e, expr.TypedCollation.Collation, false) +} + +func (expr *IntroducerExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + if expr.TypedCollation.Collation == collations.CollationBinaryID { + return sqltypes.VarBinary, flagExplicitCollation + } + return sqltypes.VarChar, flagExplicitCollation +} + +func (expr *IntroducerExpr) compile(c *compiler) (ctype, error) { + _, err := expr.Inner.compile(c) + if err != nil { + return ctype{}, err + } + + var ct ctype + ct.Type = sqltypes.VarChar + if expr.TypedCollation.Collation == collations.CollationBinaryID { + ct.Type = sqltypes.VarBinary + } + c.asm.Introduce(1, ct.Type, expr.TypedCollation) + ct.Col = expr.TypedCollation + ct.Flag = flagExplicitCollation + return ct, nil +} diff --git a/go/vt/vtgate/evalengine/expr_column.go b/go/vt/vtgate/evalengine/expr_column.go index d8d220d33aa..bf3129bbb0a 100644 --- a/go/vt/vtgate/evalengine/expr_column.go +++ b/go/vt/vtgate/evalengine/expr_column.go @@ -19,12 +19,16 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) type ( Column struct { - Offset int - coll collations.TypedCollation + Offset int + Type sqltypes.Type + Collation collations.TypedCollation } ) @@ -32,22 +36,72 @@ var _ Expr = (*Column)(nil) // eval implements the Expr interface func (c *Column) eval(env *ExpressionEnv) (eval, error) { - return valueToEval(env.Row[c.Offset], c.coll) + return valueToEval(env.Row[c.Offset], c.Collation) } -func (c *Column) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - // we'll try to do the best possible with the information we have +func (c *Column) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + // if we have an active row in the expression Env, use that as an authoritative source if c.Offset < len(env.Row) { value := env.Row[c.Offset] - if value.IsNull() { - return sqltypes.Null, flagNull | flagNullable + if !value.IsNull() { + // if we have a NULL value, we'll instead use the field information + return value.Type(), 0 } - return value.Type(), typeFlag(0) + } + if c.Offset < len(fields) { + return fields[c.Offset].Type, flagNullable // we probably got here because the value was NULL, + // so let's assume we are on a nullable field + } + if c.typed() { + return c.Type, flagNullable + } + return sqltypes.Unknown, flagAmbiguousType +} + +func (column *Column) compile(c *compiler) (ctype, error) { + if !column.typed() { + return ctype{}, c.unsupported(column) } - if c.Offset < len(env.Fields) { - return env.Fields[c.Offset].Type, flagNullable + col := column.Collation + if col.Collation != collations.CollationBinaryID { + col.Repertoire = collations.RepertoireUnicode } - panic("Column missing both data and field") + switch tt := column.Type; { + case sqltypes.IsSigned(tt): + c.asm.PushColumn_i(column.Offset) + case sqltypes.IsUnsigned(tt): + c.asm.PushColumn_u(column.Offset) + case sqltypes.IsFloat(tt): + c.asm.PushColumn_f(column.Offset) + case sqltypes.IsDecimal(tt): + c.asm.PushColumn_d(column.Offset) + case sqltypes.IsText(tt): + if tt == sqltypes.HexNum { + c.asm.PushColumn_hexnum(column.Offset) + } else if tt == sqltypes.HexVal { + c.asm.PushColumn_hexval(column.Offset) + } else { + c.asm.PushColumn_text(column.Offset, col) + } + case sqltypes.IsBinary(tt): + c.asm.PushColumn_bin(column.Offset) + case sqltypes.IsNull(tt): + c.asm.PushNull() + case tt == sqltypes.TypeJSON: + c.asm.PushColumn_json(column.Offset) + default: + return ctype{}, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "Type is not supported: %s", tt) + } + + return ctype{ + Type: column.Type, + Flag: flagNullable, + Col: col, + }, nil +} + +func (column *Column) typed() bool { + return column.Type >= 0 } diff --git a/go/vt/vtgate/evalengine/expr_column_test.go b/go/vt/vtgate/evalengine/expr_column_test.go new file mode 100644 index 00000000000..fbe45d6027c --- /dev/null +++ b/go/vt/vtgate/evalengine/expr_column_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "testing" + "time" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +func TestTypeOf(t *testing.T) { + env := &ExpressionEnv{ + BindVars: make(map[string]*querypb.BindVariable), + now: time.Now(), + } + + field1 := &querypb.Field{ + Name: "field1", + Type: querypb.Type_INT64, + Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), + } + field2 := &querypb.Field{ + Name: "field2", + Type: querypb.Type_VARCHAR, + Flags: 0, + } + fields := []*querypb.Field{field1, field2} + + c := &Column{ + Type: sqltypes.Unknown, + } + env.Row = sqltypes.Row{sqltypes.NewInt64(10)} + + t.Run("Check when row value is not null", func(t *testing.T) { + typ, flag := c.typeof(env, fields) + if typ != sqltypes.Int64 || flag != typeFlag(0) { + t.Errorf("typeof() failed, expected sqltypes.Int64 and typeFlag 0, got %v and %v", typ, flag) + } + }) + + t.Run("Check when row value is null", func(t *testing.T) { + env.Row = sqltypes.Row{ + sqltypes.NULL, + } + typ, flag := c.typeof(env, fields) + if typ != querypb.Type_INT64 || flag != flagNullable { + t.Errorf("typeof() failed, expected querypb.Type_INT64 and flagNullable, got %v and %v", typ, flag) + } + }) + + t.Run("Check when offset is out of bounds", func(t *testing.T) { + c.Offset = 10 + typ, flag := c.typeof(env, fields) + if typ != sqltypes.Unknown || flag != flagAmbiguousType { + t.Errorf("typeof() failed, expected -1 and flagAmbiguousType, got %v and %v", typ, flag) + } + }) + t.Run("Check when typed is true", func(t *testing.T) { + c.Type = querypb.Type_FLOAT32 + typ, flag := c.typeof(env, fields) + if typ != querypb.Type_FLOAT32 || flag != flagNullable { + t.Errorf("typeof() failed, expected querypb.Type_FLOAT32 and flagNullable, got %v and %v", typ, flag) + } + }) +} diff --git a/go/vt/vtgate/evalengine/expr_compare.go b/go/vt/vtgate/evalengine/expr_compare.go index a401ddd940d..e7490370a1b 100644 --- a/go/vt/vtgate/evalengine/expr_compare.go +++ b/go/vt/vtgate/evalengine/expr_compare.go @@ -17,8 +17,12 @@ limitations under the License. package evalengine import ( + "bytes" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vthash" @@ -38,14 +42,13 @@ type ( LikeExpr struct { BinaryExpr Negate bool - Match collations.WildcardPattern + Match colldata.WildcardPattern MatchCollation collations.ID } InExpr struct { BinaryExpr Negate bool - Hashed map[vthash.Hash]int } ComparisonOp interface { @@ -108,11 +111,11 @@ func (compareGE) compare(left, right eval) (boolean, error) { func (compareNullSafeEQ) String() string { return "<=>" } func (compareNullSafeEQ) compare(left, right eval) (boolean, error) { cmp, err := evalCompareNullSafe(left, right) - return makeboolean(cmp), err + return makeboolean(cmp == 0), err } func typeIsTextual(tt sqltypes.Type) bool { - return sqltypes.IsText(tt) || sqltypes.IsBinary(tt) + return sqltypes.IsText(tt) || sqltypes.IsBinary(tt) || tt == sqltypes.Time } func compareAsStrings(l, r sqltypes.Type) bool { @@ -138,7 +141,7 @@ func compareAsDecimal(ltype, rtype sqltypes.Type) bool { } func compareAsDates(l, r sqltypes.Type) bool { - return sqltypes.IsDate(l) && sqltypes.IsDate(r) + return sqltypes.IsDateOrTime(l) && sqltypes.IsDateOrTime(r) } func compareAsDateAndString(l, r sqltypes.Type) bool { @@ -146,7 +149,7 @@ func compareAsDateAndString(l, r sqltypes.Type) bool { } func compareAsDateAndNumeric(ltype, rtype sqltypes.Type) bool { - return sqltypes.IsDate(ltype) && sqltypes.IsNumber(rtype) || sqltypes.IsNumber(ltype) && sqltypes.IsDate(rtype) + return sqltypes.IsDateOrTime(ltype) && sqltypes.IsNumber(rtype) || sqltypes.IsNumber(ltype) && sqltypes.IsDateOrTime(rtype) } func compareAsTuples(left, right eval) (*evalTuple, *evalTuple, bool) { @@ -158,15 +161,25 @@ func compareAsTuples(left, right eval) (*evalTuple, *evalTuple, bool) { return nil, nil, false } -func evalCompareNullSafe(lVal, rVal eval) (bool, error) { - if lVal == nil || rVal == nil { - return lVal == rVal, nil +func compareAsJSON(l, r sqltypes.Type) bool { + return l == sqltypes.TypeJSON || r == sqltypes.TypeJSON +} + +func evalCompareNullSafe(lVal, rVal eval) (int, error) { + if lVal == nil { + if rVal == nil { + return 0, nil + } + return -1, nil + } + if rVal == nil { + return 1, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { return evalCompareTuplesNullSafe(left.t, right.t) } n, err := evalCompare(lVal, rVal) - return n == 0, err + return n, err } func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { @@ -209,23 +222,28 @@ func evalCompare(left, right eval) (comp int, err error) { rt := right.SQLType() switch { + case compareAsDates(lt, rt): + return compareDates(left.(*evalTemporal), right.(*evalTemporal)), nil case compareAsStrings(lt, rt): return compareStrings(left, right) case compareAsSameNumericType(lt, rt) || compareAsDecimal(lt, rt): return compareNumeric(left, right) - case compareAsDates(lt, rt): - return compareDates(left, right) case compareAsDateAndString(lt, rt): - return compareDateAndString(left, right) + return compareDateAndString(left, right), nil case compareAsDateAndNumeric(lt, rt): - // TODO: support comparison between a date and a numeric value - // queries like the ones below should be supported: - // - select 1 where 20210101 = cast("2021-01-01" as date) - // - select 1 where 2021210101 = cast("2021-01-01" as date) - // - select 1 where 104200 = cast("10:42:00" as time) - return 0, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot compare a date with a numeric value") + if sqltypes.IsDateOrTime(lt) { + left = evalToNumeric(left, false) + } + if sqltypes.IsDateOrTime(rt) { + right = evalToNumeric(right, false) + } + return compareNumeric(left, right) + case compareAsJSON(lt, rt): + return compareJSON(left, right) case lt == sqltypes.Tuple || rt == sqltypes.Tuple: return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: evalCompare: tuple comparison should be handled early") + case lt == rt && fallbackBinary(lt): + return bytes.Compare(left.ToRawBytes(), right.ToRawBytes()), nil default: // Quoting MySQL Docs: // @@ -234,34 +252,57 @@ func evalCompare(left, right eval) (comp int, err error) { // comparison of floating-point numbers." // // https://dev.mysql.com/doc/refman/8.0/en/type-conversion.html - lf, _ := evalToNumeric(left).toFloat() - rf, _ := evalToNumeric(right).toFloat() + lf, _ := evalToFloat(left) + rf, _ := evalToFloat(right) return compareNumeric(lf, rf) } } -func evalCompareTuplesNullSafe(left, right []eval) (bool, error) { +// fallbackBinary compares two values of the same type using the fallback binary comparison. +// This is for types we don't yet properly support otherwise but do end up being used +// for comparisons, for example when using vdiff. +// TODO: Clean this up as we add more properly supported types and comparisons. +func fallbackBinary(t sqltypes.Type) bool { + switch t { + case sqltypes.Bit, sqltypes.Enum, sqltypes.Set, sqltypes.Geometry: + return true + } + return false +} + +func evalCompareTuplesNullSafe(left, right []eval) (int, error) { if len(left) != len(right) { panic("did not typecheck cardinality") } for idx, lResult := range left { res, err := evalCompareNullSafe(lResult, right[idx]) if err != nil { - return false, err + return 0, err } - if !res { - return false, nil + if res != 0 { + return res, nil } } - return true, nil + return 0, nil } // eval implements the Expr interface func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { - left, right, err := c.arguments(env) + left, err := c.Left.eval(env) if err != nil { return nil, err } + if _, ok := c.Op.(compareNullSafeEQ); !ok && left == nil { + return nil, nil + } + right, err := c.Right.eval(env) + if err != nil { + return nil, err + } + + if _, ok := c.Op.(compareNullSafeEQ); !ok && right == nil { + return nil, nil + } cmp, err := c.Op.compare(left, right) if err != nil { return nil, err @@ -270,82 +311,266 @@ func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { } // typeof implements the Expr interface -func (c *ComparisonExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f1 := c.Left.typeof(env) - _, f2 := c.Right.typeof(env) +func (c *ComparisonExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := c.Left.typeof(env, fields) + _, f2 := c.Right.typeof(env, fields) return sqltypes.Int64, f1 | f2 } -// eval implements the ComparisonOp interface -func (i *InExpr) eval(env *ExpressionEnv) (eval, error) { - left, right, err := i.arguments(env) +func (expr *ComparisonExpr) compileAsTuple(c *compiler) (ctype, error) { + switch expr.Op.(type) { + case compareNullSafeEQ: + c.asm.CmpTupleNullsafe() + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil + case compareEQ: + c.asm.CmpTuple(true) + c.asm.Cmp_eq_n() + case compareNE: + c.asm.CmpTuple(true) + c.asm.Cmp_ne_n() + case compareLT: + c.asm.CmpTuple(false) + c.asm.Cmp_lt_n() + case compareLE: + c.asm.CmpTuple(false) + c.asm.Cmp_le_n() + case compareGT: + c.asm.CmpTuple(false) + c.asm.Cmp_gt_n() + case compareGE: + c.asm.CmpTuple(false) + c.asm.Cmp_ge_n() + default: + panic("invalid comparison operator") + } + return ctype{Type: sqltypes.Int64, Flag: flagNullable | flagIsBoolean, Col: collationNumeric}, nil +} + +func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { + lt, err := expr.Left.compile(c) if err != nil { - return nil, err + return ctype{}, err } - rtuple, ok := right.(*evalTuple) - if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") + + var skip1 *jump + switch expr.Op.(type) { + case compareNullSafeEQ: + default: + skip1 = c.compileNullCheck1(lt) } - if left == nil { - return nil, nil + + rt, err := expr.Right.compile(c) + if err != nil { + return ctype{}, err + } + + if lt.Type == sqltypes.Tuple || rt.Type == sqltypes.Tuple { + if lt.Type != rt.Type { + return ctype{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not typecheck tuples during comparison") + } + return expr.compileAsTuple(c) + } + + swapped := false + var skip2 *jump + + switch expr.Op.(type) { + case compareNullSafeEQ: + skip2 = c.asm.jumpFrom() + c.asm.Cmp_nullsafe(skip2) + default: + skip2 = c.compileNullCheck1r(rt) + } + + switch { + case compareAsDates(lt.Type, rt.Type): + c.asm.CmpDates() + case compareAsStrings(lt.Type, rt.Type): + if err := c.compareAsStrings(lt, rt); err != nil { + return ctype{}, err + } + case compareAsSameNumericType(lt.Type, rt.Type) || compareAsDecimal(lt.Type, rt.Type): + swapped = c.compareNumericTypes(lt, rt) + case compareAsDateAndString(lt.Type, rt.Type): + c.asm.CmpDateString() + case compareAsDateAndNumeric(lt.Type, rt.Type): + if sqltypes.IsDateOrTime(lt.Type) { + c.asm.Convert_Ti(2) + lt.Type = sqltypes.Int64 + } + if sqltypes.IsDateOrTime(rt.Type) { + c.asm.Convert_Ti(1) + rt.Type = sqltypes.Int64 + } + swapped = c.compareNumericTypes(lt, rt) + case compareAsJSON(lt.Type, rt.Type): + if err := c.compareAsJSON(lt, rt); err != nil { + return ctype{}, err + } + + default: + lt = c.compileToFloat(lt, 2) + rt = c.compileToFloat(rt, 1) + c.asm.CmpNum_ff() + } + + cmptype := ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean} + + switch expr.Op.(type) { + case compareEQ: + c.asm.Cmp_eq() + case compareNE: + c.asm.Cmp_ne() + case compareLT: + if swapped { + c.asm.Cmp_gt() + } else { + c.asm.Cmp_lt() + } + case compareLE: + if swapped { + c.asm.Cmp_ge() + } else { + c.asm.Cmp_le() + } + case compareGT: + if swapped { + c.asm.Cmp_lt() + } else { + c.asm.Cmp_gt() + } + case compareGE: + if swapped { + c.asm.Cmp_le() + } else { + c.asm.Cmp_ge() + } + case compareNullSafeEQ: + c.asm.jumpDestination(skip2) + c.asm.Cmp_eq() + return cmptype, nil + + default: + panic("unexpected comparison operator") + } + + c.asm.jumpDestination(skip1, skip2) + return cmptype, nil +} + +func evalInExpr(lhs eval, rhs *evalTuple) (boolean, error) { + if lhs == nil { + return boolNULL, nil } var foundNull, found bool - var hasher = vthash.New() - if i.Hashed != nil { - if left, ok := left.(hashable); ok { - left.Hash(&hasher) - - hash := hasher.Sum128() - hasher.Reset() - - if idx, ok := i.Hashed[hash]; ok { - var numeric int - numeric, foundNull, err = evalCompareAll(left, rtuple.t[idx], true) - if err != nil { - return nil, err - } - found = numeric == 0 - } + for _, rtuple := range rhs.t { + numeric, isNull, err := evalCompareAll(lhs, rtuple, true) + if err != nil { + return boolNULL, err } - } else { - for _, rtuple := range rtuple.t { - numeric, isNull, err := evalCompareAll(left, rtuple, true) - if err != nil { - return nil, err - } - if isNull { - foundNull = true - continue - } - if numeric == 0 { - found = true - break - } + if isNull { + foundNull = true + continue + } + if numeric == 0 { + found = true + break } } switch { case found: - return newEvalBool(!i.Negate), nil + return boolTrue, nil case foundNull: - return nil, nil + return boolNULL, nil default: - return newEvalBool(i.Negate), nil + return boolFalse, nil } } -func (i *InExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f1 := i.Left.typeof(env) - _, f2 := i.Right.typeof(env) +// eval implements the ComparisonOp interface +func (i *InExpr) eval(env *ExpressionEnv) (eval, error) { + left, right, err := i.arguments(env) + if err != nil { + return nil, err + } + rtuple, ok := right.(*evalTuple) + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") + } + in, err := evalInExpr(left, rtuple) + if err != nil { + return nil, err + } + if i.Negate { + in = in.not() + } + return in.eval(), nil +} + +func (i *InExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := i.Left.typeof(env, fields) + _, f2 := i.Right.typeof(env, fields) return sqltypes.Int64, f1 | f2 } +func (i *InExpr) compileTable(lhs ctype, rhs TupleExpr) map[vthash.Hash]struct{} { + var ( + table = make(map[vthash.Hash]struct{}) + hasher = vthash.New() + ) + + for _, expr := range rhs { + lit, ok := expr.(*Literal) + if !ok { + return nil + } + inner, ok := lit.inner.(hashable) + if !ok { + return nil + } + + thisColl := evalCollation(lit.inner).Collation + thisTyp := lit.inner.SQLType() + + if thisTyp != lhs.Type || thisColl != lhs.Col.Collation { + return nil + } + + inner.Hash(&hasher) + table[hasher.Sum128()] = struct{}{} + hasher.Reset() + } + + return table +} + +func (expr *InExpr) compile(c *compiler) (ctype, error) { + lhs, err := expr.Left.compile(c) + if err != nil { + return ctype{}, nil + } + + rhs := expr.Right.(TupleExpr) + + if table := expr.compileTable(lhs, rhs); table != nil { + c.asm.In_table(expr.Negate, table) + } else { + _, err := rhs.compile(c) + if err != nil { + return ctype{}, err + } + c.asm.In_slow(expr.Negate) + } + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil +} + func (l *LikeExpr) matchWildcard(left, right []byte, coll collations.ID) bool { if l.Match != nil && l.MatchCollation == coll { return l.Match.Match(left) } - fullColl := coll.Get() + fullColl := colldata.Lookup(coll) wc := fullColl.Wildcard(right, 0, 0, 0) return wc.Match(left) } @@ -356,8 +581,8 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - var col collations.ID - left, right, col, err = mergeCollations(left, right) + var col collations.TypedCollation + left, right, col, err = mergeAndCoerceCollations(left, right) if err != nil { return nil, err } @@ -365,11 +590,11 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { var matched bool switch { case typeIsTextual(left.SQLType()) && typeIsTextual(right.SQLType()): - matched = l.matchWildcard(left.(*evalBytes).bytes, right.(*evalBytes).bytes, col) + matched = l.matchWildcard(left.(*evalBytes).bytes, right.(*evalBytes).bytes, col.Collation) case typeIsTextual(right.SQLType()): - matched = l.matchWildcard(left.ToRawBytes(), right.(*evalBytes).bytes, col) + matched = l.matchWildcard(left.ToRawBytes(), right.(*evalBytes).bytes, col.Collation) case typeIsTextual(left.SQLType()): - matched = l.matchWildcard(left.(*evalBytes).bytes, right.ToRawBytes(), col) + matched = l.matchWildcard(left.(*evalBytes).bytes, right.ToRawBytes(), col.Collation) default: matched = l.matchWildcard(left.ToRawBytes(), right.ToRawBytes(), collations.CollationBinaryID) } @@ -377,8 +602,76 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { } // typeof implements the ComparisonOp interface -func (l *LikeExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f1 := l.Left.typeof(env) - _, f2 := l.Right.typeof(env) +func (l *LikeExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := l.Left.typeof(env, fields) + _, f2 := l.Right.typeof(env, fields) return sqltypes.Int64, f1 | f2 } + +func (expr *LikeExpr) compile(c *compiler) (ctype, error) { + lt, err := expr.Left.compile(c) + if err != nil { + return ctype{}, err + } + + rt, err := expr.Right.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(lt, rt) + + if !lt.isTextual() { + c.asm.Convert_xc(2, sqltypes.VarChar, c.cfg.Collation, 0, false) + lt.Col = collations.TypedCollation{ + Collation: c.cfg.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + if !rt.isTextual() { + c.asm.Convert_xc(1, sqltypes.VarChar, c.cfg.Collation, 0, false) + rt.Col = collations.TypedCollation{ + Collation: c.cfg.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + var merged collations.TypedCollation + var coerceLeft colldata.Coercion + var coerceRight colldata.Coercion + var env = collations.Local() + + if lt.Col.Collation != rt.Col.Collation { + merged, coerceLeft, coerceRight, err = colldata.Merge(env, lt.Col, rt.Col, colldata.CoercionOptions{ + ConvertToSuperset: true, + ConvertWithCoercion: true, + }) + } else { + merged = lt.Col + } + if err != nil { + return ctype{}, err + } + + if coerceLeft == nil && coerceRight == nil { + c.asm.Like_collate(expr, colldata.Lookup(merged.Collation)) + } else { + if coerceLeft == nil { + coerceLeft = func(dst, in []byte) ([]byte, error) { return in, nil } + } + if coerceRight == nil { + coerceRight = func(dst, in []byte) ([]byte, error) { return in, nil } + } + c.asm.Like_coerce(expr, &compiledCoercion{ + col: colldata.Lookup(merged.Collation), + left: coerceLeft, + right: coerceRight, + }) + } + + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean | flagNullable}, nil +} diff --git a/go/vt/vtgate/evalengine/expr_convert.go b/go/vt/vtgate/evalengine/expr_convert.go index 8c06a68a595..6531cdd6fae 100644 --- a/go/vt/vtgate/evalengine/expr_convert.go +++ b/go/vt/vtgate/evalengine/expr_convert.go @@ -18,7 +18,9 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -100,9 +102,9 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { return t, nil case "DECIMAL": m, d := c.decimalPrecision() - return evalToNumeric(e).toDecimal(m, d), nil + return evalToDecimal(e, m, d), nil case "DOUBLE", "REAL": - f, _ := evalToNumeric(e).toFloat() + f, _ := evalToFloat(e) return f, nil case "FLOAT": if c.HasLength { @@ -113,20 +115,43 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { } return nil, c.returnUnsupportedError() case "SIGNED", "SIGNED INTEGER": - return evalToNumeric(e).toInt64(), nil + return evalToInt64(e), nil case "UNSIGNED", "UNSIGNED INTEGER": - return evalToNumeric(e).toUint64(), nil + return evalToInt64(e).toUint64(), nil case "JSON": return evalToJSON(e) - case "DATE", "DATETIME", "YEAR", "TIME": + case "DATETIME": + switch p := c.Length; { + case p > 6: + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) + } + if dt := evalToDateTime(e, c.Length); dt != nil { + return dt, nil + } + return nil, nil + case "DATE": + if d := evalToDate(e); d != nil { + return d, nil + } + return nil, nil + case "TIME": + switch p := c.Length; { + case p > 6: + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) + } + if t := evalToTime(e, c.Length); t != nil { + return t, nil + } + return nil, nil + case "YEAR": return nil, c.returnUnsupportedError() default: panic("BUG: sqlparser emitted unknown type") } } -func (c *ConvertExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - tt, f := c.Inner.typeof(env) +func (c *ConvertExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := c.Inner.typeof(env, fields) switch c.Type { case "BINARY": @@ -145,8 +170,14 @@ func (c *ConvertExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { return sqltypes.Uint64, f case "JSON": return sqltypes.TypeJSON, f - case "DATE", "DATETIME", "YEAR", "TIME": - return sqltypes.Null, f + case "DATE": + return sqltypes.Date, f + case "DATETIME": + return sqltypes.Datetime, f + case "TIME": + return sqltypes.Time, f + case "YEAR": + return sqltypes.Year, f default: panic("BUG: sqlparser emitted unknown type") } @@ -165,7 +196,7 @@ func (c *ConvertExpr) convertToBinaryType(tt sqltypes.Type) sqltypes.Type { func (c *ConvertExpr) convertToCharType(tt sqltypes.Type) sqltypes.Type { if c.HasLength { - col := c.Collation.Get() + col := colldata.Lookup(c.Collation) length := c.Length * col.Charset().MaxWidth() if length > 64*1024 { return sqltypes.Text @@ -176,6 +207,77 @@ func (c *ConvertExpr) convertToCharType(tt sqltypes.Type) sqltypes.Type { return sqltypes.VarChar } +func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { + arg, err := conv.Inner.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + var convt ctype + + switch conv.Type { + case "BINARY": + convt = ctype{Type: conv.convertToBinaryType(arg.Type), Col: collationBinary} + c.asm.Convert_xb(1, convt.Type, conv.Length, conv.HasLength) + + case "CHAR", "NCHAR": + convt = ctype{ + Type: conv.convertToCharType(arg.Type), + Col: collations.TypedCollation{Collation: conv.Collation}, + } + c.asm.Convert_xc(1, convt.Type, convt.Col.Collation, conv.Length, conv.HasLength) + + case "DECIMAL": + convt = ctype{Type: sqltypes.Decimal, Col: collationNumeric} + m, d := conv.decimalPrecision() + c.asm.Convert_xd(1, m, d) + + case "DOUBLE", "REAL": + convt = c.compileToFloat(arg, 1) + + case "FLOAT": + return ctype{}, c.unsupported(conv) + + case "SIGNED", "SIGNED INTEGER": + convt = c.compileToInt64(arg, 1) + + case "UNSIGNED", "UNSIGNED INTEGER": + convt = c.compileToUint64(arg, 1) + + case "JSON": + // TODO: what does NULL map to? + convt, err = c.compileToJSON(arg, 1) + if err != nil { + return ctype{}, err + } + + case "DATE": + convt = c.compileToDate(arg, 1) + + case "DATETIME": + switch p := conv.Length; { + case p > 6: + return ctype{}, c.unsupported(conv) + } + convt = c.compileToDateTime(arg, 1, conv.Length) + + case "TIME": + switch p := conv.Length; { + case p > 6: + return ctype{}, c.unsupported(conv) + } + convt = c.compileToTime(arg, 1, conv.Length) + + default: + return ctype{}, c.unsupported(conv) + } + + c.asm.jumpDestination(skip) + convt.Flag = arg.Flag | flagNullable + return convt, nil +} + func (c *ConvertUsingExpr) eval(env *ExpressionEnv) (eval, error) { e, err := c.Inner.eval(env) if err != nil { @@ -192,7 +294,25 @@ func (c *ConvertUsingExpr) eval(env *ExpressionEnv) (eval, error) { return e, nil } -func (c *ConvertUsingExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := c.Inner.typeof(env) +func (c *ConvertUsingExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := c.Inner.typeof(env, fields) return sqltypes.VarChar, f | flagNullable } + +func (conv *ConvertUsingExpr) compile(c *compiler) (ctype, error) { + ct, err := conv.Inner.compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(ct) + c.asm.Convert_xc(1, sqltypes.VarChar, conv.Collation, 0, false) + c.asm.jumpDestination(skip) + + col := collations.TypedCollation{ + Collation: conv.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + return ctype{Type: sqltypes.VarChar, Flag: flagNullable, Col: col}, nil +} diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index bcefe10f81b..e67e25e70a6 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -17,54 +17,105 @@ limitations under the License. package evalengine import ( - "vitess.io/vitess/go/mysql/collations" + "context" + "errors" + "strings" + "time" + + "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/callerid" querypb "vitess.io/vitess/go/vt/proto/query" ) +type VCursor interface { + TimeZone() *time.Location + GetKeyspace() string +} + type ( // ExpressionEnv contains the environment that the expression // evaluates in, such as the current row and bindvars ExpressionEnv struct { - BindVars map[string]*querypb.BindVariable - DefaultCollation collations.ID + vm vmstate - // Row and Fields should line up - Row []sqltypes.Value - Fields []*querypb.Field + BindVars map[string]*querypb.BindVariable + Row []sqltypes.Value + + // internal state + now time.Time + vc VCursor + user *querypb.VTGateCallerID } ) +func (env *ExpressionEnv) time(utc bool) datetime.DateTime { + if utc { + return datetime.NewDateTimeFromStd(env.now.UTC()) + } + return datetime.NewDateTimeFromStd(env.now) +} + +func (env *ExpressionEnv) currentUser() string { + if env.user == nil { + return "vt_app@localhost" + } + user := env.user.GetUsername() + if !strings.Contains(user, "@") { + user = user + "@localhost" + } + return user +} + +func (env *ExpressionEnv) currentDatabase() string { + if env.vc == nil { + return "" + } + return env.vc.GetKeyspace() +} + +func (env *ExpressionEnv) currentTimezone() *time.Location { + if env.vc == nil { + return nil + } + return env.vc.TimeZone() +} + func (env *ExpressionEnv) Evaluate(expr Expr) (EvalResult, error) { - if env == nil { - panic("ExpressionEnv == nil") + if p, ok := expr.(*CompiledExpr); ok { + return env.EvaluateVM(p) } e, err := expr.eval(env) return EvalResult{e}, err } -func (env *ExpressionEnv) TypeOf(expr Expr) (ty sqltypes.Type, err error) { - ty, _ = expr.typeof(env) - return -} +var ErrAmbiguousType = errors.New("the type of this expression cannot be statically computed") -func (env *ExpressionEnv) collation() collations.TypedCollation { - return collations.TypedCollation{ - Collation: env.DefaultCollation, - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireASCII, +func (env *ExpressionEnv) TypeOf(expr Expr, fields []*querypb.Field) (sqltypes.Type, typeFlag, error) { + ty, f := expr.typeof(env, fields) + if f&flagAmbiguousType != 0 { + return ty, f, ErrAmbiguousType } + return ty, f, nil } // EmptyExpressionEnv returns a new ExpressionEnv with no bind vars or row func EmptyExpressionEnv() *ExpressionEnv { - return EnvWithBindVars(map[string]*querypb.BindVariable{}, collations.Unknown) + return NewExpressionEnv(context.Background(), nil, nil) } -// EnvWithBindVars returns an expression environment with no current row, but with bindvars -func EnvWithBindVars(bindVars map[string]*querypb.BindVariable, coll collations.ID) *ExpressionEnv { - if coll == collations.Unknown { - coll = collations.Default() +// NewExpressionEnv returns an expression environment with no current row, but with bindvars +func NewExpressionEnv(ctx context.Context, bindVars map[string]*querypb.BindVariable, vc VCursor) *ExpressionEnv { + env := &ExpressionEnv{BindVars: bindVars, vc: vc} + env.user = callerid.ImmediateCallerIDFromContext(ctx) + + // The current time for this ExpressionEnv is set only once, during creation. + // This is to ensure that all expressions in the same ExpressionEnv evaluate NOW() + // and similar SQL functions to the same value. + env.now = time.Now() + + if tz := env.currentTimezone(); tz != nil { + env.now = env.now.In(tz) } - return &ExpressionEnv{BindVars: bindVars, DefaultCollation: coll} + return env } diff --git a/go/vt/vtgate/evalengine/expr_literal.go b/go/vt/vtgate/evalengine/expr_literal.go index 7a6867704c6..f44e333c976 100644 --- a/go/vt/vtgate/evalengine/expr_literal.go +++ b/go/vt/vtgate/evalengine/expr_literal.go @@ -20,6 +20,7 @@ import ( "math" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type ( @@ -36,7 +37,7 @@ func (l *Literal) eval(_ *ExpressionEnv) (eval, error) { } // typeof implements the Expr interface -func (l *Literal) typeof(*ExpressionEnv) (sqltypes.Type, typeFlag) { +func (l *Literal) typeof(*ExpressionEnv, []*querypb.Field) (sqltypes.Type, typeFlag) { var f typeFlag switch e := l.inner.(type) { case nil: @@ -52,6 +53,9 @@ func (l *Literal) typeof(*ExpressionEnv) (sqltypes.Type, typeFlag) { if e.i == math.MinInt64 { f |= flagIntegerUdf } + if e == evalBoolTrue || e == evalBoolFalse { + f |= flagIsBoolean + } case *evalUint64: if e.hexLiteral { f |= flagHex @@ -65,3 +69,15 @@ func (l *Literal) typeof(*ExpressionEnv) (sqltypes.Type, typeFlag) { } return l.inner.SQLType(), f } + +func (l *Literal) compile(c *compiler) (ctype, error) { + if l.inner == nil { + c.asm.PushNull() + } else if err := c.asm.PushLiteral(l.inner); err != nil { + return ctype{}, err + } + + t, f := l.typeof(nil, nil) + return ctype{t, f, evalCollation(l.inner)}, nil + +} diff --git a/go/vt/vtgate/evalengine/expr_logical.go b/go/vt/vtgate/evalengine/expr_logical.go index 79ed6b97d90..189b68e4136 100644 --- a/go/vt/vtgate/evalengine/expr_logical.go +++ b/go/vt/vtgate/evalengine/expr_logical.go @@ -19,13 +19,14 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" ) type ( LogicalExpr struct { BinaryExpr - op func(left, right boolean) boolean + op func(left, right Expr, env *ExpressionEnv) (boolean, error) opname string } @@ -35,6 +36,10 @@ type ( boolean int8 + IntervalExpr struct { + CallExpr + } + // IsExpr represents the IS expression in MySQL. // boolean_primary IS [NOT] {TRUE | FALSE | NULL} IsExpr struct { @@ -54,6 +59,7 @@ type ( } ) +var _ Expr = (*IntervalExpr)(nil) var _ Expr = (*IsExpr)(nil) var _ Expr = (*LogicalExpr)(nil) var _ Expr = (*NotExpr)(nil) @@ -100,55 +106,100 @@ func (left boolean) not() boolean { } } -func (left boolean) and(right boolean) boolean { +func opAnd(le, re Expr, env *ExpressionEnv) (boolean, error) { // Logical AND. // Evaluates to 1 if all operands are nonzero and not NULL, to 0 if one or more operands are 0, otherwise NULL is returned. + l, err := le.eval(env) + if err != nil { + return boolNULL, err + } + + left := evalIsTruthy(l) + if left == boolFalse { + return boolFalse, nil + } + + r, err := re.eval(env) + if err != nil { + return boolNULL, err + } + right := evalIsTruthy(r) + switch { case left == boolTrue && right == boolTrue: - return boolTrue - case left == boolFalse || right == boolFalse: - return boolFalse + return boolTrue, nil + case right == boolFalse: + return boolFalse, nil default: - return boolNULL + return boolNULL, nil } } -func (left boolean) or(right boolean) boolean { +func opOr(le, re Expr, env *ExpressionEnv) (boolean, error) { // Logical OR. When both operands are non-NULL, the result is 1 if any operand is nonzero, and 0 otherwise. // With a NULL operand, the result is 1 if the other operand is nonzero, and NULL otherwise. // If both operands are NULL, the result is NULL. + l, err := le.eval(env) + if err != nil { + return boolNULL, err + } + + left := evalIsTruthy(l) + if left == boolTrue { + return boolTrue, nil + } + + r, err := re.eval(env) + if err != nil { + return boolNULL, err + } + right := evalIsTruthy(r) + switch { case left == boolNULL: if right == boolTrue { - return boolTrue + return boolTrue, nil } - return boolNULL + return boolNULL, nil case right == boolNULL: - if left == boolTrue { - return boolTrue - } - return boolNULL + return boolNULL, nil default: - if left == boolTrue || right == boolTrue { - return boolTrue + if right == boolTrue { + return boolTrue, nil } - return boolFalse + return boolFalse, nil } } -func (left boolean) xor(right boolean) boolean { +func opXor(le, re Expr, env *ExpressionEnv) (boolean, error) { // Logical XOR. Returns NULL if either operand is NULL. // For non-NULL operands, evaluates to 1 if an odd number of operands is nonzero, otherwise 0 is returned. + l, err := le.eval(env) + if err != nil { + return boolNULL, err + } + + left := evalIsTruthy(l) + if left == boolNULL { + return boolNULL, nil + } + + r, err := re.eval(env) + if err != nil { + return boolNULL, err + } + right := evalIsTruthy(r) + switch { case left == boolNULL || right == boolNULL: - return boolNULL + return boolNULL, nil default: if left != right { - return boolTrue + return boolTrue, nil } - return boolFalse + return boolFalse, nil } } @@ -160,23 +211,244 @@ func (n *NotExpr) eval(env *ExpressionEnv) (eval, error) { return evalIsTruthy(e).not().eval(), nil } -func (n *NotExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, flags := n.Inner.typeof(env) - return sqltypes.Uint64, flags +func (n *NotExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, flags := n.Inner.typeof(env, fields) + return sqltypes.Int64, flags | flagIsBoolean +} + +func (expr *NotExpr) compile(c *compiler) (ctype, error) { + arg, err := expr.Inner.compile(c) + if err != nil { + return ctype{}, nil + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Null: + // No-op. + case sqltypes.Int64: + c.asm.Not_i() + case sqltypes.Uint64: + c.asm.Not_u() + case sqltypes.Float64: + c.asm.Not_f() + case sqltypes.Decimal: + c.asm.Not_d() + case sqltypes.VarChar, sqltypes.VarBinary: + if arg.isHexOrBitLiteral() { + c.asm.Convert_xu(1) + c.asm.Not_u() + } else { + c.asm.Convert_bB(1) + c.asm.Not_i() + } + case sqltypes.TypeJSON: + c.asm.Convert_jB(1) + c.asm.Not_i() + case sqltypes.Time, sqltypes.Datetime, sqltypes.Date, sqltypes.Timestamp: + c.asm.Convert_TB(1) + c.asm.Not_i() + default: + c.asm.Convert_bB(1) + c.asm.Not_i() + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Flag: flagNullable | flagIsBoolean, Col: collationNumeric}, nil } func (l *LogicalExpr) eval(env *ExpressionEnv) (eval, error) { - left, right, err := l.arguments(env) + res, err := l.op(l.Left, l.Right, env) + return res.eval(), err +} + +func (l *LogicalExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := l.Left.typeof(env, fields) + _, f2 := l.Right.typeof(env, fields) + return sqltypes.Int64, f1 | f2 | flagIsBoolean +} + +func (expr *LogicalExpr) compile(c *compiler) (ctype, error) { + lt, err := expr.Left.compile(c) + if err != nil { + return ctype{}, err + } + + switch lt.Type { + case sqltypes.Null, sqltypes.Int64: + // No-op. + case sqltypes.Uint64: + c.asm.Convert_uB(1) + case sqltypes.Float64: + c.asm.Convert_fB(1) + case sqltypes.Decimal: + c.asm.Convert_dB(1) + case sqltypes.VarChar, sqltypes.VarBinary: + if lt.isHexOrBitLiteral() { + c.asm.Convert_xu(1) + c.asm.Convert_uB(1) + } else { + c.asm.Convert_bB(1) + } + case sqltypes.TypeJSON: + c.asm.Convert_jB(1) + case sqltypes.Time, sqltypes.Datetime, sqltypes.Date, sqltypes.Timestamp: + c.asm.Convert_TB(1) + default: + c.asm.Convert_bB(1) + } + + jump := c.asm.LogicalLeft(expr.opname) + + rt, err := expr.Right.compile(c) + if err != nil { + return ctype{}, err + } + + switch rt.Type { + case sqltypes.Null, sqltypes.Int64: + // No-op. + case sqltypes.Uint64: + c.asm.Convert_uB(1) + case sqltypes.Float64: + c.asm.Convert_fB(1) + case sqltypes.Decimal: + c.asm.Convert_dB(1) + case sqltypes.VarChar, sqltypes.VarBinary: + if rt.isHexOrBitLiteral() { + c.asm.Convert_xu(1) + c.asm.Convert_uB(1) + } else { + c.asm.Convert_bB(1) + } + case sqltypes.TypeJSON: + c.asm.Convert_jB(1) + case sqltypes.Time, sqltypes.Datetime, sqltypes.Date, sqltypes.Timestamp: + c.asm.Convert_TB(1) + default: + c.asm.Convert_bB(1) + } + + c.asm.LogicalRight(expr.opname) + c.asm.jumpDestination(jump) + return ctype{Type: sqltypes.Int64, Flag: flagNullable | flagIsBoolean, Col: collationNumeric}, nil +} + +func intervalCompare(n, val eval) (int, bool, error) { + if val == nil { + return 1, true, nil + } + + val = evalToNumeric(val, false) + cmp, err := compareNumeric(n, val) + return cmp, false, err +} + +func findInterval(args []eval) (int64, error) { + n := args[0] + start := int64(1) + end := int64(len(args) - 1) + for { + if start > end { + return end, nil + } + + val := args[start] + cmp, _, err := intervalCompare(n, val) + if err != nil { + return 0, err + } + + if cmp < 0 { + return start - 1, nil + } + + pos := start + (end-start)/2 + + val = args[pos] + cmp, null, err := intervalCompare(n, val) + if err != nil { + return 0, err + } + + prevPos := pos + for null { + prevPos-- + if prevPos < start { + break + } + prevVal := args[prevPos] + cmp, null, err = intervalCompare(n, prevVal) + if err != nil { + return 0, err + } + } + + if cmp < 0 { + end = pos - 1 + } else { + start = pos + 1 + } + } +} + +func (i *IntervalExpr) eval(env *ExpressionEnv) (eval, error) { + args := make([]eval, 0, len(i.Arguments)) + for _, arg := range i.Arguments { + val, err := arg.eval(env) + if err != nil { + return nil, err + } + args = append(args, val) + } + + if args[0] == nil { + return newEvalInt64(-1), nil + } + + args[0] = evalToNumeric(args[0], false) + + idx, err := findInterval(args) if err != nil { return nil, err } - return l.op(evalIsTruthy(left), evalIsTruthy(right)).eval(), nil + return newEvalInt64(idx), err } -func (l *LogicalExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f1 := l.Left.typeof(env) - _, f2 := l.Right.typeof(env) - return sqltypes.Uint64, f1 | f2 +func (i *IntervalExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, 0 +} + +func (i *IntervalExpr) compile(c *compiler) (ctype, error) { + n, err := i.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + switch n.Type { + case sqltypes.Int64, sqltypes.Uint64, sqltypes.Float64, sqltypes.Decimal: + default: + s := c.compileNullCheck1(n) + c.asm.Convert_xf(1) + c.asm.jumpDestination(s) + } + + for j := 1; j < len(i.Arguments); j++ { + argType, err := i.Arguments[j].compile(c) + if err != nil { + return ctype{}, err + } + switch argType.Type { + case sqltypes.Int64, sqltypes.Uint64, sqltypes.Float64, sqltypes.Decimal: + default: + s := c.compileNullCheck1(argType) + c.asm.Convert_xf(1) + c.asm.jumpDestination(s) + } + } + + c.asm.Interval(len(i.Arguments) - 1) + return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil } func (i *IsExpr) eval(env *ExpressionEnv) (eval, error) { @@ -187,10 +459,19 @@ func (i *IsExpr) eval(env *ExpressionEnv) (eval, error) { return newEvalBool(i.Check(e)), nil } -func (i *IsExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (i *IsExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { return sqltypes.Int64, 0 } +func (is *IsExpr) compile(c *compiler) (ctype, error) { + _, err := is.Inner.compile(c) + if err != nil { + return ctype{}, err + } + c.asm.Is(is.Check) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil +} + func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { var ca collationAggregation var local = collations.Local() @@ -238,21 +519,21 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { if !matched { return nil, nil } - t, _ := c.typeof(env) + t, _ := c.typeof(env, nil) return evalCoerce(result, t, ca.result().Collation) } -func (c *CaseExpr) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (c *CaseExpr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { var ta typeAggregation var resultFlag typeFlag for _, whenthen := range c.cases { - t, f := whenthen.then.typeof(env) + t, f := whenthen.then.typeof(env, fields) ta.add(t, f) resultFlag = resultFlag | f } if c.Else != nil { - t, f := c.Else.typeof(env) + t, f := c.Else.typeof(env, fields) ta.add(t, f) resultFlag = f } @@ -310,4 +591,47 @@ func (c *CaseExpr) simplify(env *ExpressionEnv) error { return err } +func (cs *CaseExpr) compile(c *compiler) (ctype, error) { + var ca collationAggregation + var ta typeAggregation + var local = collations.Local() + + for _, wt := range cs.cases { + when, err := wt.when.compile(c) + if err != nil { + return ctype{}, err + } + + if err := c.compileCheckTrue(when, 1); err != nil { + return ctype{}, err + } + + then, err := wt.then.compile(c) + if err != nil { + return ctype{}, err + } + + ta.add(then.Type, then.Flag) + if err := ca.add(local, then.Col); err != nil { + return ctype{}, err + } + } + + if cs.Else != nil { + els, err := cs.Else.compile(c) + if err != nil { + return ctype{}, err + } + + ta.add(els.Type, els.Flag) + if err := ca.add(local, els.Col); err != nil { + return ctype{}, err + } + } + + ct := ctype{Type: ta.result(), Col: ca.result()} + c.asm.CmpCase(len(cs.cases), cs.Else != nil, ct.Type, ct.Col) + return ct, nil +} + var _ Expr = (*CaseExpr)(nil) diff --git a/go/vt/vtgate/evalengine/expr_tuple.go b/go/vt/vtgate/evalengine/expr_tuple.go index 5cfbe845c26..79f8edbbc09 100644 --- a/go/vt/vtgate/evalengine/expr_tuple.go +++ b/go/vt/vtgate/evalengine/expr_tuple.go @@ -16,7 +16,10 @@ limitations under the License. package evalengine -import "vitess.io/vitess/go/sqltypes" +import ( + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) type ( TupleExpr []Expr @@ -37,6 +40,17 @@ func (t TupleExpr) eval(env *ExpressionEnv) (eval, error) { } // typeof implements the Expr interface -func (t TupleExpr) typeof(*ExpressionEnv) (sqltypes.Type, typeFlag) { +func (t TupleExpr) typeof(*ExpressionEnv, []*querypb.Field) (sqltypes.Type, typeFlag) { return sqltypes.Tuple, flagNullable } + +func (tuple TupleExpr) compile(c *compiler) (ctype, error) { + for _, arg := range tuple { + _, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + } + c.asm.PackTuple(len(tuple)) + return ctype{Type: sqltypes.Tuple, Col: collationBinary}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_base64.go b/go/vt/vtgate/evalengine/fn_base64.go index 7cdc2bafe98..0e27052641f 100644 --- a/go/vt/vtgate/evalengine/fn_base64.go +++ b/go/vt/vtgate/evalengine/fn_base64.go @@ -17,14 +17,18 @@ limitations under the License. package evalengine import ( + "bytes" "encoding/base64" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type ( builtinToBase64 struct { CallExpr + collate collations.ID } builtinFromBase64 struct { @@ -35,7 +39,36 @@ type ( var _ Expr = (*builtinToBase64)(nil) var _ Expr = (*builtinFromBase64)(nil) -var mysqlBase64 = base64.StdEncoding +// MySQL wraps every 76 characters with a newline. That maps +// to a 57 byte input. So we encode here in blocks of 57 bytes +// with then each a newline. +var mysqlBase64OutLineLength = 76 +var mysqlBase64InLineLength = (mysqlBase64OutLineLength / 4) * 3 + +func mysqlBase64Encode(in []byte) []byte { + newlines := len(in) / mysqlBase64InLineLength + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(in))+newlines) + out := encoded + for len(in) > mysqlBase64InLineLength { + base64.StdEncoding.Encode(out, in[:mysqlBase64InLineLength]) + in = in[mysqlBase64InLineLength:] + out[mysqlBase64OutLineLength] = '\n' + out = out[mysqlBase64OutLineLength+1:] + } + base64.StdEncoding.Encode(out, in) + return encoded +} + +func mysqlBase64Decode(in []byte) ([]byte, error) { + in = bytes.Trim(in, " \t\r\n") + decoded := make([]byte, len(in)/4*3) + + n, err := base64.StdEncoding.Decode(decoded, in) + if err != nil { + return nil, err + } + return decoded[:n], nil +} func (call *builtinToBase64) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) @@ -47,23 +80,48 @@ func (call *builtinToBase64) eval(env *ExpressionEnv) (eval, error) { } b := evalToBinary(arg) - encoded := make([]byte, mysqlBase64.EncodedLen(len(b.bytes))) - mysqlBase64.Encode(encoded, b.bytes) + encoded := mysqlBase64Encode(b.bytes) if arg.SQLType() == sqltypes.Blob || arg.SQLType() == sqltypes.TypeJSON { - return newEvalRaw(sqltypes.Text, encoded, env.collation()), nil + return newEvalRaw(sqltypes.Text, encoded, defaultCoercionCollation(call.collate)), nil } - return newEvalText(encoded, env.collation()), nil + return newEvalText(encoded, defaultCoercionCollation(call.collate)), nil } -func (call *builtinToBase64) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - tt, f := call.Arguments[0].typeof(env) +func (call *builtinToBase64) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) if tt == sqltypes.Blob || tt == sqltypes.TypeJSON { return sqltypes.Text, f } return sqltypes.VarChar, f } +func (call *builtinToBase64) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + t := sqltypes.VarChar + if str.Type == sqltypes.Blob || str.Type == sqltypes.TypeJSON { + t = sqltypes.Text + } + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, t, 0, false) + } + + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_TO_BASE64(t, col) + c.asm.jumpDestination(skip) + + return ctype{Type: t, Col: col}, nil +} + func (call *builtinFromBase64) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -74,14 +132,45 @@ func (call *builtinFromBase64) eval(env *ExpressionEnv) (eval, error) { } b := evalToBinary(arg) - decoded := make([]byte, mysqlBase64.DecodedLen(len(b.bytes))) - if n, err := mysqlBase64.Decode(decoded, b.bytes); err == nil { - return newEvalBinary(decoded[:n]), nil + decoded, err := mysqlBase64Decode(b.bytes) + if err != nil { + return nil, nil + } + if arg.SQLType() == sqltypes.Text || arg.SQLType() == sqltypes.TypeJSON { + return newEvalRaw(sqltypes.Blob, decoded, collationBinary), nil } - return nil, nil + return newEvalBinary(decoded), nil } -func (call *builtinFromBase64) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinFromBase64) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) + if tt == sqltypes.Text || tt == sqltypes.TypeJSON { + return sqltypes.Blob, f | flagNullable + } return sqltypes.VarBinary, f | flagNullable } + +func (call *builtinFromBase64) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + t := sqltypes.VarBinary + if str.Type == sqltypes.Blob || str.Type == sqltypes.TypeJSON { + t = sqltypes.Blob + } + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, t, 0, false) + } + + c.asm.Fn_FROM_BASE64(t) + c.asm.jumpDestination(skip) + + return ctype{Type: t, Col: collationBinary}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_bit.go b/go/vt/vtgate/evalengine/fn_bit.go index c6d7e9a5995..5a89ff41276 100644 --- a/go/vt/vtgate/evalengine/fn_bit.go +++ b/go/vt/vtgate/evalengine/fn_bit.go @@ -20,6 +20,7 @@ import ( "math/bits" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type builtinBitCount struct { @@ -44,14 +45,34 @@ func (call *builtinBitCount) eval(env *ExpressionEnv) (eval, error) { count += bits.OnesCount8(b) } } else { - u := evalToNumeric(arg).toInt64() + u := evalToInt64(arg) count = bits.OnesCount64(uint64(u.i)) } return newEvalInt64(int64(count)), nil } -func (call *builtinBitCount) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinBitCount) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) // The MySQL docs are actually wrong and this returns an int64, not a uint64. return sqltypes.Int64, f } + +func (expr *builtinBitCount) compile(c *compiler) (ctype, error) { + ct, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(ct) + + if ct.Type == sqltypes.VarBinary && !ct.isHexOrBitLiteral() { + c.asm.BitCount_b() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationBinary}, nil + } + + _ = c.compileToBitwiseUint64(ct, 1) + c.asm.BitCount_u() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationBinary}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_compare.go b/go/vt/vtgate/evalengine/fn_compare.go index 450f7a654e5..ee4f61cb596 100644 --- a/go/vt/vtgate/evalengine/fn_compare.go +++ b/go/vt/vtgate/evalengine/fn_compare.go @@ -21,7 +21,11 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) type ( @@ -53,15 +57,19 @@ func (b *builtinCoalesce) eval(env *ExpressionEnv) (eval, error) { return nil, nil } -func (b *builtinCoalesce) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (b *builtinCoalesce) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { var ta typeAggregation for _, arg := range b.Arguments { - tt, f := arg.typeof(env) + tt, f := arg.typeof(env, fields) ta.add(tt, f) } return ta.result(), flagNullable } +func (b *builtinCoalesce) compile(c *compiler) (ctype, error) { + return ctype{}, c.unsupported(b) +} + func getMultiComparisonFunc(args []eval) multiComparisonFunc { var ( integersI int @@ -157,13 +165,13 @@ func compareAllInteger_i(args []eval, cmp int) (eval, error) { } func compareAllFloat(args []eval, cmp int) (eval, error) { - candidateF, ok := evalToNumeric(args[0]).toFloat() + candidateF, ok := evalToFloat(args[0]) if !ok { return nil, errDecimalOutOfRange } for _, arg := range args[1:] { - thisF, ok := evalToNumeric(arg).toFloat() + thisF, ok := evalToFloat(arg) if !ok { return nil, errDecimalOutOfRange } @@ -182,11 +190,11 @@ func evalDecimalPrecision(e eval) int32 { } func compareAllDecimal(args []eval, cmp int) (eval, error) { - decExtreme := evalToNumeric(args[0]).toDecimal(0, 0).dec + decExtreme := evalToDecimal(args[0], 0, 0).dec precExtreme := evalDecimalPrecision(args[0]) for _, arg := range args[1:] { - d := evalToNumeric(arg).toDecimal(0, 0).dec + d := evalToDecimal(arg, 0, 0).dec if (cmp < 0) == (d.Cmp(decExtreme) < 0) { decExtreme = d } @@ -208,11 +216,11 @@ func compareAllText(args []eval, cmp int) (eval, error) { if err := ca.add(env, col); err != nil { return nil, err } - charsets = append(charsets, col.Collation.Get().Charset()) + charsets = append(charsets, colldata.Lookup(col.Collation).Charset()) } tc := ca.result() - col := tc.Collation.Get() + col := colldata.Lookup(tc.Collation) cs := col.Charset() b1, err := charset.Convert(nil, cs, args[0].ToRawBytes(), charsets[0]) @@ -254,7 +262,7 @@ func (call *builtinMultiComparison) eval(env *ExpressionEnv) (eval, error) { return getMultiComparisonFunc(args)(args, call.cmp) } -func (call *builtinMultiComparison) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (call *builtinMultiComparison) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { var ( integersI int integersU int @@ -266,7 +274,7 @@ func (call *builtinMultiComparison) typeof(env *ExpressionEnv) (sqltypes.Type, t ) for _, expr := range call.Arguments { - tt, f := expr.typeof(env) + tt, f := expr.typeof(env, fields) flags |= f switch tt { @@ -315,6 +323,107 @@ func (call *builtinMultiComparison) typeof(env *ExpressionEnv) (sqltypes.Type, t panic("unexpected argument type") } +func (call *builtinMultiComparison) compile_c(c *compiler, args []ctype) (ctype, error) { + env := collations.Local() + + var ca collationAggregation + for _, arg := range args { + if err := ca.add(env, arg.Col); err != nil { + return ctype{}, err + } + } + + tc := ca.result() + c.asm.Fn_MULTICMP_c(len(args), call.cmp < 0, tc) + return ctype{Type: sqltypes.VarChar, Col: tc}, nil +} + +func (call *builtinMultiComparison) compile_d(c *compiler, args []ctype) (ctype, error) { + for i, tt := range args { + c.compileToDecimal(tt, len(args)-i) + } + c.asm.Fn_MULTICMP_d(len(args), call.cmp < 0) + return ctype{Type: sqltypes.Decimal, Col: collationNumeric}, nil +} + +func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { + var ( + signed int + unsigned int + floats int + decimals int + text int + binary int + args []ctype + ) + + /* + If any argument is NULL, the result is NULL. No comparison is needed. + If all arguments are integer-valued, they are compared as integers. + If at least one argument is double precision, they are compared as double-precision values. Otherwise, if at least one argument is a DECIMAL value, they are compared as DECIMAL values. + If the arguments comprise a mix of numbers and strings, they are compared as strings. + If any argument is a nonbinary (character) string, the arguments are compared as nonbinary strings. + In all other cases, the arguments are compared as binary strings. + */ + + for _, expr := range call.Arguments { + tt, err := expr.compile(c) + if err != nil { + return ctype{}, err + } + + args = append(args, tt) + + switch tt.Type { + case sqltypes.Int64: + signed++ + case sqltypes.Uint64: + unsigned++ + case sqltypes.Float64: + floats++ + case sqltypes.Decimal: + decimals++ + case sqltypes.Text, sqltypes.VarChar: + text++ + case sqltypes.Blob, sqltypes.Binary, sqltypes.VarBinary: + binary++ + default: + return ctype{}, c.unsupported(call) + } + } + + if signed+unsigned == len(args) { + if signed == len(args) { + c.asm.Fn_MULTICMP_i(len(args), call.cmp < 0) + return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil + } + if unsigned == len(args) { + c.asm.Fn_MULTICMP_u(len(args), call.cmp < 0) + return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil + } + return call.compile_d(c, args) + } + if binary > 0 || text > 0 { + if text > 0 { + return call.compile_c(c, args) + } + c.asm.Fn_MULTICMP_b(len(args), call.cmp < 0) + return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil + } else { + if floats > 0 { + for i, tt := range args { + c.compileToFloat(tt, len(args)-i) + } + c.asm.Fn_MULTICMP_f(len(args), call.cmp < 0) + return ctype{Type: sqltypes.Float64, Col: collationNumeric}, nil + } + if decimals > 0 { + return call.compile_d(c, args) + } + } + return ctype{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected argument for GREATEST/LEAST") +} + type typeAggregation struct { double uint16 decimal uint16 diff --git a/go/vt/vtgate/evalengine/fn_crypto.go b/go/vt/vtgate/evalengine/fn_crypto.go new file mode 100644 index 00000000000..8b3eeac2f99 --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_crypto.go @@ -0,0 +1,269 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +type builtinMD5 struct { + CallExpr + collate collations.ID +} + +var _ Expr = (*builtinMD5)(nil) + +func (call *builtinMD5) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + b := evalToBinary(arg) + sum := md5.Sum(b.bytes) + buf := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(buf, sum[:]) + return newEvalText(buf, defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinMD5) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, t +} + +func (call *builtinMD5) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + } + + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_MD5(col) + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag}, nil +} + +type builtinSHA1 struct { + CallExpr + collate collations.ID +} + +var _ Expr = (*builtinSHA1)(nil) + +func (call *builtinSHA1) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + b := evalToBinary(arg) + sum := sha1.Sum(b.bytes) + buf := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(buf, sum[:]) + return newEvalText(buf, defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinSHA1) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, t +} + +func (call *builtinSHA1) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + } + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_SHA1(col) + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag}, nil +} + +type builtinSHA2 struct { + CallExpr + collate collations.ID +} + +var _ Expr = (*builtinSHA2)(nil) + +func (call *builtinSHA2) eval(env *ExpressionEnv) (eval, error) { + arg1, arg2, err := call.arg2(env) + if err != nil { + return nil, err + } + + if arg1 == nil || arg2 == nil { + return nil, nil + } + + b := evalToBinary(arg1) + bits := evalToInt64(arg2) + + var sum []byte + switch bits.i { + case 224: + s := sha256.Sum224(b.bytes) + sum = s[:] + case 0, 256: + s := sha256.Sum256(b.bytes) + sum = s[:] + case 384: + s := sha512.Sum384(b.bytes) + sum = s[:] + case 512: + s := sha512.Sum512(b.bytes) + sum = s[:] + default: + return nil, nil + } + + buf := make([]byte, hex.EncodedLen(len(sum))) + hex.Encode(buf, sum[:]) + return newEvalText(buf, defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinSHA2) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, t +} + +func (call *builtinSHA2) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(str) + + bits, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(bits) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(2, sqltypes.Binary, 0, false) + } + + switch bits.Type { + case sqltypes.Int64: + // No-op, already correct type + case sqltypes.Uint64: + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_SHA2(col) + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag | flagNullable}, nil +} + +type builtinRandomBytes struct { + CallExpr +} + +var _ Expr = (*builtinRandomBytes)(nil) + +func (call *builtinRandomBytes) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + + if arg == nil { + return nil, nil + } + + l := evalToInt64(arg) + if l.i < 1 || l.i > 1024 { + return nil, nil + } + + buf := make([]byte, l.i) + _, err = rand.Read(buf) + if err != nil { + return nil, err + } + return newEvalBinary(buf), nil +} + +func (call *builtinRandomBytes) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.VarBinary, t +} + +func (call *builtinRandomBytes) constant() bool { + return false +} + +func (call *builtinRandomBytes) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Int64: + // No-op, already correct type + case sqltypes.Uint64: + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + + c.asm.Fn_RandomBytes() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarBinary, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_hex.go b/go/vt/vtgate/evalengine/fn_hex.go index ce91f0234e1..0045bfd6688 100644 --- a/go/vt/vtgate/evalengine/fn_hex.go +++ b/go/vt/vtgate/evalengine/fn_hex.go @@ -17,13 +17,16 @@ limitations under the License. package evalengine import ( - "math/bits" - + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/hex" + "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) type builtinHex struct { CallExpr + collate collations.ID } var _ Expr = (*builtinHex)(nil) @@ -40,54 +43,176 @@ func (call *builtinHex) eval(env *ExpressionEnv) (eval, error) { var encoded []byte switch arg := arg.(type) { case *evalBytes: - encoded = hexEncodeBytes(arg.bytes) + encoded = hex.EncodeBytes(arg.bytes) case evalNumeric: - encoded = hexEncodeUint(uint64(arg.toInt64().i)) + encoded = hex.EncodeUint(uint64(arg.toInt64().i)) default: - encoded = hexEncodeBytes(arg.ToRawBytes()) + encoded = hex.EncodeBytes(arg.ToRawBytes()) } if arg.SQLType() == sqltypes.Blob || arg.SQLType() == sqltypes.TypeJSON { - return newEvalRaw(sqltypes.Text, encoded, env.collation()), nil + return newEvalRaw(sqltypes.Text, encoded, defaultCoercionCollation(call.collate)), nil } - return newEvalText(encoded, env.collation()), nil + return newEvalText(encoded, defaultCoercionCollation(call.collate)), nil } -func (call *builtinHex) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - tt, f := call.Arguments[0].typeof(env) +func (call *builtinHex) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) if tt == sqltypes.Blob || tt == sqltypes.TypeJSON { return sqltypes.Text, f } return sqltypes.VarChar, f } -const hextable = "0123456789ABCDEF" +func (call *builtinHex) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + col := defaultCoercionCollation(c.cfg.Collation) + t := sqltypes.VarChar + if str.Type == sqltypes.Blob || str.Type == sqltypes.TypeJSON { + t = sqltypes.Text + } + + switch { + case sqltypes.IsNumber(str.Type): + c.asm.Fn_HEX_d(col) + case str.isTextual(): + c.asm.Fn_HEX_c(t, col) + default: + c.asm.Convert_xc(1, t, c.cfg.Collation, 0, false) + c.asm.Fn_HEX_c(t, col) + } + + c.asm.jumpDestination(skip) + + return ctype{Type: t, Col: col}, nil +} + +type builtinUnhex struct { + CallExpr +} + +var _ Expr = (*builtinUnhex)(nil) + +func hexDecodeJSON(j *evalJSON) ([]byte, bool) { + switch j.Type() { + case json.TypeNumber: + u, ok := j.Uint64() + if ok { + return hex.DecodeUint(u), true + } else { + return nil, false + } + default: + b := j.ToRawBytes() + decoded := make([]byte, hex.DecodedLen(b)) + err := hex.DecodeBytes(decoded, b) + if err != nil { + return nil, false + } + return decoded, true + } +} + +func (call *builtinUnhex) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + var decoded []byte + switch arg := arg.(type) { + case *evalBytes: + decoded = make([]byte, hex.DecodedLen(arg.bytes)) + err := hex.DecodeBytes(decoded, arg.bytes) + if err != nil { + return nil, nil + } + case *evalInt64: + if arg.i < 0 { + return nil, nil + } + decoded = hex.DecodeUint(uint64(arg.i)) + case *evalUint64: + decoded = hex.DecodeUint(arg.u) + case *evalDecimal: + b := arg.ToRawBytes() + decoded = make([]byte, hex.DecodedLen(b)) + err := hex.DecodeBytes(decoded, b) + if err != nil { + return nil, nil + } + case *evalFloat: + f := arg.f + if f != float64(int64(f)) { + return nil, nil + } + decoded = hex.DecodeUint(uint64(arg.f)) + case *evalJSON: + var ok bool + decoded, ok = hexDecodeJSON(arg) + if !ok { + return nil, nil + } + default: + b := evalToBinary(arg) + decoded = make([]byte, hex.DecodedLen(b.bytes)) + err := hex.DecodeBytes(decoded, b.bytes) + if err != nil { + return nil, nil + } + } + + switch arg.SQLType() { + case sqltypes.Text, sqltypes.Blob, sqltypes.TypeJSON: + return newEvalRaw(sqltypes.Blob, decoded, collationBinary), nil + } + return newEvalBinary(decoded), nil +} -func hexEncodeBytes(src []byte) []byte { - j := 0 - dst := make([]byte, len(src)*2) - for _, v := range src { - dst[j] = hextable[v>>4] - dst[j+1] = hextable[v&0x0f] - j += 2 +func (call *builtinUnhex) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) + if tt == sqltypes.Text || tt == sqltypes.Blob || tt == sqltypes.TypeJSON { + return sqltypes.Blob, f } - return dst + return sqltypes.VarBinary, f | flagNullable } -func hexEncodeUint(u uint64) []byte { - var a [16 + 1]byte - i := len(a) - shift := uint(bits.TrailingZeros(uint(16))) & 7 - b := uint64(16) - m := uint(16) - 1 // == 1<= b { - i-- - a[i] = hextable[uint(u)&m] - u >>= shift + switch { + case sqltypes.IsSigned(str.Type): + c.asm.Fn_UNHEX_i(t) + case sqltypes.IsUnsigned(str.Type): + c.asm.Fn_UNHEX_u(t) + case sqltypes.IsFloat(str.Type): + c.asm.Fn_UNHEX_f(t) + case str.isTextual(): + c.asm.Fn_UNHEX_b(t) + case str.Type == sqltypes.TypeJSON: + c.asm.Fn_UNHEX_j(t) + default: + c.asm.Convert_xb(1, t, 0, false) + c.asm.Fn_UNHEX_b(t) } - // u < base - i-- - a[i] = hextable[uint(u)] - return a[i:] + c.asm.jumpDestination(skip) + + return ctype{Type: t, Col: collationBinary, Flag: flagNullable}, nil } diff --git a/go/vt/vtgate/evalengine/fn_info.go b/go/vt/vtgate/evalengine/fn_info.go new file mode 100644 index 00000000000..0489619c3dc --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_info.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" +) + +type builtinUser struct { + CallExpr +} + +var _ Expr = (*builtinUser)(nil) + +func (call *builtinUser) eval(env *ExpressionEnv) (eval, error) { + return newEvalText([]byte(env.currentUser()), collationUtf8mb3), nil +} + +func (call *builtinUser) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, 0 +} + +func (*builtinUser) compile(c *compiler) (ctype, error) { + c.asm.Fn_User() + return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil +} + +func (call *builtinUser) constant() bool { + return false +} + +type builtinVersion struct { + CallExpr +} + +var _ Expr = (*builtinVersion)(nil) + +func (call *builtinVersion) eval(env *ExpressionEnv) (eval, error) { + return newEvalText([]byte(servenv.MySQLServerVersion()), collationUtf8mb3), nil +} + +func (call *builtinVersion) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, 0 +} + +func (*builtinVersion) compile(c *compiler) (ctype, error) { + c.asm.Fn_Version() + return ctype{Type: sqltypes.Datetime, Col: collationUtf8mb3}, nil +} + +type builtinDatabase struct { + CallExpr +} + +var _ Expr = (*builtinDatabase)(nil) + +func (call *builtinDatabase) eval(env *ExpressionEnv) (eval, error) { + db := env.currentDatabase() + if db == "" { + return nil, nil + } + return newEvalText([]byte(db), collationUtf8mb3), nil +} + +func (call *builtinDatabase) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, 0 +} + +func (*builtinDatabase) compile(c *compiler) (ctype, error) { + c.asm.Fn_Database() + return ctype{Type: sqltypes.Datetime, Col: collationUtf8mb3}, nil +} + +func (call *builtinDatabase) constant() bool { + return false +} diff --git a/go/vt/vtgate/evalengine/fn_json.go b/go/vt/vtgate/evalengine/fn_json.go index 2107e4adaeb..7c7c6a67f8d 100644 --- a/go/vt/vtgate/evalengine/fn_json.go +++ b/go/vt/vtgate/evalengine/fn_json.go @@ -18,10 +18,12 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/json" ) type ( @@ -118,11 +120,40 @@ func builtin_JSON_EXTRACT(doc *json.Value, paths []eval) (eval, error) { return json.NewArray(matches), nil } -func (call *builtinJSONExtract) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONExtract) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.TypeJSON, f } +func (call *builtinJSONExtract) compile(c *compiler) (ctype, error) { + doct, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + if slice.All(call.Arguments[1:], func(expr Expr) bool { return expr.constant() }) { + paths := make([]*json.Path, 0, len(call.Arguments[1:])) + + for _, arg := range call.Arguments[1:] { + jp, err := c.jsonExtractPath(arg) + if err != nil { + return ctype{}, err + } + paths = append(paths, jp) + } + + jt, err := c.compileParseJSON("JSON_EXTRACT", doct, 1) + if err != nil { + return ctype{}, err + } + + c.asm.Fn_JSON_EXTRACT0(paths) + return jt, nil + } + + return ctype{}, c.unsupported(call) +} + func (call *builtinJSONUnquote) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -141,22 +172,39 @@ func (call *builtinJSONUnquote) eval(env *ExpressionEnv) (eval, error) { return newEvalRaw(sqltypes.Blob, j.MarshalTo(nil), collationJSON), nil } -func (call *builtinJSONUnquote) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONUnquote) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Blob, f } -func (call *builtinJSONObject) eval(env *ExpressionEnv) (eval, error) { - j := json.NewObject() - obj, _ := j.Object() +func (call *builtinJSONUnquote) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + _, err = c.compileParseJSON("JSON_UNQUOTE", arg, 1) + if err != nil { + return ctype{}, err + } + + c.asm.Fn_JSON_UNQUOTE() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Blob, Flag: flagNullable, Col: collationJSON}, nil +} +var errJSONKeyIsNil = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "JSON documents may not contain NULL member names.") + +func (call *builtinJSONObject) eval(env *ExpressionEnv) (eval, error) { + var obj json.Object for i := 0; i < len(call.Arguments); i += 2 { key, err := call.Arguments[i].eval(env) if err != nil { return nil, err } if key == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "JSON documents may not contain NULL member names.") + return nil, errJSONKeyIsNil } key1, err := evalToVarchar(key, collations.CollationUtf8mb4ID, true) if err != nil { @@ -167,20 +215,42 @@ func (call *builtinJSONObject) eval(env *ExpressionEnv) (eval, error) { if err != nil { return nil, err } - val1, err := evalToJSON(val) + val1, err := argToJSON(val) if err != nil { return nil, err } obj.Set(key1.string(), val1, json.Set) } - return j, nil + return json.NewObject(obj), nil } -func (call *builtinJSONObject) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (call *builtinJSONObject) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { return sqltypes.TypeJSON, 0 } +func (call *builtinJSONObject) compile(c *compiler) (ctype, error) { + for i := 0; i < len(call.Arguments); i += 2 { + key, err := call.Arguments[i].compile(c) + if err != nil { + return ctype{}, err + } + if err := c.compileToJSONKey(key); err != nil { + return ctype{}, err + } + val, err := call.Arguments[i+1].compile(c) + if err != nil { + return ctype{}, err + } + _, err = c.compileArgToJSON(val, 1) + if err != nil { + return ctype{}, err + } + } + c.asm.Fn_JSON_OBJECT(len(call.Arguments)) + return ctype{Type: sqltypes.TypeJSON, Col: collationJSON}, nil +} + func (call *builtinJSONArray) eval(env *ExpressionEnv) (eval, error) { ary := make([]*json.Value, 0, len(call.Arguments)) for _, arg := range call.Arguments { @@ -188,7 +258,7 @@ func (call *builtinJSONArray) eval(env *ExpressionEnv) (eval, error) { if err != nil { return nil, err } - arg1, err := evalToJSON(arg) + arg1, err := argToJSON(arg) if err != nil { return nil, err } @@ -197,10 +267,26 @@ func (call *builtinJSONArray) eval(env *ExpressionEnv) (eval, error) { return json.NewArray(ary), nil } -func (call *builtinJSONArray) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (call *builtinJSONArray) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { return sqltypes.TypeJSON, 0 } +func (call *builtinJSONArray) compile(c *compiler) (ctype, error) { + for _, arg := range call.Arguments { + tt, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + + _, err = c.compileArgToJSON(tt, 1) + if err != nil { + return ctype{}, err + } + } + c.asm.Fn_JSON_ARRAY(len(call.Arguments)) + return ctype{Type: sqltypes.TypeJSON, Col: collationJSON}, nil +} + func (call *builtinJSONDepth) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -216,11 +302,15 @@ func (call *builtinJSONDepth) eval(env *ExpressionEnv) (eval, error) { return newEvalInt64(int64(j.Depth())), nil } -func (call *builtinJSONDepth) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONDepth) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinJSONDepth) compile(c *compiler) (ctype, error) { + return ctype{}, c.unsupported(call) +} + func (call *builtinJSONLength) eval(env *ExpressionEnv) (eval, error) { arg, err := call.Arguments[0].eval(env) if err != nil { @@ -259,11 +349,15 @@ func (call *builtinJSONLength) eval(env *ExpressionEnv) (eval, error) { return newEvalInt64(int64(length)), nil } -func (call *builtinJSONLength) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONLength) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinJSONLength) compile(c *compiler) (ctype, error) { + return ctype{}, c.unsupported(call) +} + func (call *builtinJSONContainsPath) eval(env *ExpressionEnv) (eval, error) { args, err := call.args(env) if err != nil { @@ -302,6 +396,44 @@ func (call *builtinJSONContainsPath) eval(env *ExpressionEnv) (eval, error) { return newEvalBool(match == jsonMatchAll), nil } +func (call *builtinJSONContainsPath) compile(c *compiler) (ctype, error) { + doct, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + if !call.Arguments[1].constant() { + return ctype{}, c.unsupported(call) + } + + if !slice.All(call.Arguments[2:], func(expr Expr) bool { return expr.constant() }) { + return ctype{}, c.unsupported(call) + } + + match, err := c.jsonExtractOneOrAll("JSON_CONTAINS_PATH", call.Arguments[1]) + if err != nil { + return ctype{}, err + } + + paths := make([]*json.Path, 0, len(call.Arguments[2:])) + + for _, arg := range call.Arguments[2:] { + jp, err := c.jsonExtractPath(arg) + if err != nil { + return ctype{}, err + } + paths = append(paths, jp) + } + + _, err = c.compileParseJSON("JSON_CONTAINS_PATH", doct, 1) + if err != nil { + return ctype{}, err + } + + c.asm.Fn_JSON_CONTAINS_PATH(match, paths) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil +} + type jsonMatch int8 const ( @@ -336,8 +468,8 @@ func errOneOrAll(fname string) error { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "The oneOrAll argument to %s may take these values: 'one' or 'all'.", fname) } -func (call *builtinJSONContainsPath) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONContainsPath) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } @@ -382,13 +514,39 @@ func (call *builtinJSONKeys) eval(env *ExpressionEnv) (eval, error) { } var keys []*json.Value - obj.Visit(func(key []byte, _ *json.Value) { + obj.Visit(func(key string, _ *json.Value) { keys = append(keys, json.NewString(key)) }) return json.NewArray(keys), nil } -func (call *builtinJSONKeys) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinJSONKeys) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.TypeJSON, f | flagNullable } + +func (call *builtinJSONKeys) compile(c *compiler) (ctype, error) { + doc, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + _, err = c.compileParseJSON("JSON_KEYS", doc, 1) + if err != nil { + return ctype{}, err + } + + var jp *json.Path + if len(call.Arguments) == 2 { + jp, err = c.jsonExtractPath(call.Arguments[1]) + if err != nil { + return ctype{}, err + } + if jp.ContainsWildcards() { + return ctype{}, errInvalidPathForTransform + } + } + + c.asm.Fn_JSON_KEYS(jp) + return ctype{Type: sqltypes.TypeJSON, Flag: flagNullable, Col: collationJSON}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_misc.go b/go/vt/vtgate/evalengine/fn_misc.go new file mode 100644 index 00000000000..96522a2314f --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_misc.go @@ -0,0 +1,661 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "encoding/binary" + "math" + "net/netip" + + "github.com/google/uuid" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +type ( + builtinInetAton struct { + CallExpr + } + + builtinInetNtoa struct { + CallExpr + collate collations.ID + } + + builtinInet6Aton struct { + CallExpr + } + + builtinInet6Ntoa struct { + CallExpr + collate collations.ID + } + + builtinIsIPV4 struct { + CallExpr + } + + builtinIsIPV4Compat struct { + CallExpr + } + + builtinIsIPV4Mapped struct { + CallExpr + } + + builtinIsIPV6 struct { + CallExpr + } + + builtinBinToUUID struct { + CallExpr + collate collations.ID + } + + builtinIsUUID struct { + CallExpr + } + + builtinUUID struct { + CallExpr + } + + builtinUUIDToBin struct { + CallExpr + } +) + +var _ Expr = (*builtinInetAton)(nil) +var _ Expr = (*builtinInetNtoa)(nil) +var _ Expr = (*builtinInet6Aton)(nil) +var _ Expr = (*builtinInet6Ntoa)(nil) +var _ Expr = (*builtinIsIPV4)(nil) +var _ Expr = (*builtinIsIPV4Compat)(nil) +var _ Expr = (*builtinIsIPV4Mapped)(nil) +var _ Expr = (*builtinIsIPV6)(nil) +var _ Expr = (*builtinBinToUUID)(nil) +var _ Expr = (*builtinIsUUID)(nil) +var _ Expr = (*builtinUUID)(nil) +var _ Expr = (*builtinUUIDToBin)(nil) + +func (call *builtinInetAton) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + rawIp := evalToBinary(arg) + ip, err := netip.ParseAddr(rawIp.string()) + if err != nil || !ip.Is4() { + return nil, nil + } + return newEvalUint64(uint64(binary.BigEndian.Uint32(ip.AsSlice()))), nil +} + +func (call *builtinInetAton) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Uint64, flagNullable +} + +func (call *builtinInetAton) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + c.asm.Fn_INET_ATON() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Uint64, Flag: flagNullable, Col: collationNumeric}, nil +} + +func (call *builtinInetNtoa) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + rawIp := uint64(evalToInt64(arg).i) + + if rawIp > math.MaxUint32 { + return nil, nil + } + + b := binary.BigEndian.AppendUint32(nil, uint32(rawIp)) + return newEvalText(hack.StringBytes(netip.AddrFrom4([4]byte(b)).String()), defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinInetNtoa) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, t | flagNullable +} + +func (call *builtinInetNtoa) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + c.compileToUint64(arg, 1) + col := defaultCoercionCollation(call.collate) + c.asm.Fn_INET_NTOA(col) + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.VarChar, Flag: flagNullable, Col: defaultCoercionCollation(call.collate)}, nil +} + +func (call *builtinInet6Aton) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + rawIp := evalToBinary(arg) + ip, err := netip.ParseAddr(rawIp.string()) + if err != nil { + return nil, nil + } + b := ip.AsSlice() + return newEvalBinary(b), nil +} + +func (call *builtinInet6Aton) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarBinary, flagNullable +} + +func (call *builtinInet6Aton) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + c.asm.Fn_INET6_ATON() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.VarBinary, Flag: flagNullable, Col: collationBinary}, nil +} + +func printIPv6AsIPv4(addr netip.Addr) (netip.Addr, bool) { + b := addr.AsSlice() + if len(b) != 16 { + return addr, false + } + for i := 0; i < 12; i++ { + if b[i] != 0 { + return addr, false + } + } + if b[12] == 0 && b[13] == 0 { + return addr, false + } + return netip.AddrFrom4(([4]byte)(b[12:])), true +} + +func isIPv4Compat(addr netip.Addr) bool { + b := addr.AsSlice() + if len(b) != 16 { + return false + } + for i := 0; i < 12; i++ { + if b[i] != 0 { + return false + } + } + if b[12] == 0 && b[13] == 0 && b[14] == 0 && b[15] < 2 { + return false + } + return true +} + +func (call *builtinInet6Ntoa) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + b, ok := arg.(*evalBytes) + if !ok || !b.isBinary() { + return nil, nil + } + + ip, ok := netip.AddrFromSlice(b.bytes) + if !ok { + return nil, nil + } + + if ip, ok := printIPv6AsIPv4(ip); ok { + return newEvalText(hack.StringBytes("::"+ip.String()), defaultCoercionCollation(call.collate)), nil + } + + return newEvalText(hack.StringBytes(ip.String()), defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinInet6Ntoa) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, flagNullable +} + +func (call *builtinInet6Ntoa) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Blob, sqltypes.Binary: + col := defaultCoercionCollation(call.collate) + c.asm.Fn_INET6_NTOA(col) + default: + c.asm.SetNull(1) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Flag: flagNullable, Col: defaultCoercionCollation(call.collate)}, nil +} + +func (call *builtinIsIPV4) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + rawIp := evalToBinary(arg) + ip, err := netip.ParseAddr(rawIp.string()) + if err != nil { + return newEvalBool(false), nil + } + return newEvalBool(ip.Is4()), nil +} + +func (call *builtinIsIPV4) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.Int64, t +} + +func (call *builtinIsIPV4) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + c.asm.Fn_IS_IPV4() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil +} + +func (call *builtinIsIPV4Compat) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + b, ok := arg.(*evalBytes) + if !ok || !b.isBinary() { + return newEvalBool(false), nil + } + + ip, ok := netip.AddrFromSlice(b.bytes) + return newEvalBool(ok && isIPv4Compat(ip)), nil +} + +func (call *builtinIsIPV4Compat) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagIsBoolean +} + +func (call *builtinIsIPV4Compat) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Blob, sqltypes.Binary: + c.asm.Fn_IS_IPV4_COMPAT() + default: + c.asm.SetBool(1, false) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil +} + +func (call *builtinIsIPV4Mapped) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + b, ok := arg.(*evalBytes) + if !ok || !b.isBinary() { + return newEvalBool(false), nil + } + + ip, ok := netip.AddrFromSlice(b.bytes) + return newEvalBool(ok && ip.Is4In6()), nil +} + +func (call *builtinIsIPV4Mapped) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagIsBoolean +} + +func (call *builtinIsIPV4Mapped) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Blob, sqltypes.Binary: + c.asm.Fn_IS_IPV4_MAPPED() + default: + c.asm.SetBool(1, false) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil +} + +func (call *builtinIsIPV6) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + rawIp := evalToBinary(arg) + ip, err := netip.ParseAddr(rawIp.string()) + if err != nil { + return newEvalBool(false), nil + } + return newEvalBool(ip.Is6()), nil +} + +func (call *builtinIsIPV6) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagIsBoolean +} + +func (call *builtinIsIPV6) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + c.asm.Fn_IS_IPV6() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil +} + +func errIncorrectUUID(in []byte, f string) error { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect string value: '%s' for function %s", sanitizeErrorValue(in), f) +} + +func swapUUIDFrom(in []byte) []byte { + if len(in) != 16 { + return in + } + out := make([]byte, 0, 16) + out = append(out, in[4:8]...) + out = append(out, in[2:4]...) + out = append(out, in[0:2]...) + out = append(out, in[8:]...) + return out +} + +func swapUUIDTo(in []byte) []byte { + if len(in) != 16 { + return in + } + + out := make([]byte, 0, 16) + out = append(out, in[6:8]...) + out = append(out, in[4:6]...) + out = append(out, in[0:4]...) + out = append(out, in[8:]...) + return out +} + +func (call *builtinBinToUUID) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + + raw := evalToBinary(arg).bytes + + if len(call.Arguments) > 1 { + swap, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + + if swap != nil && evalToInt64(swap).i != 0 { + raw = swapUUIDFrom(raw) + } + } + + parsed, err := uuid.FromBytes(raw) + if err != nil { + return nil, errIncorrectUUID(raw, "bin_to_uuid") + } + return newEvalText(hack.StringBytes(parsed.String()), defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinBinToUUID) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, f +} + +func (call *builtinBinToUUID) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + col := defaultCoercionCollation(call.collate) + ct := ctype{Type: sqltypes.VarChar, Flag: arg.Flag, Col: col} + + if len(call.Arguments) == 1 { + c.asm.Fn_BIN_TO_UUID0(col) + c.asm.jumpDestination(skip) + return ct, nil + } + + swap, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + sj := c.compileNullCheck1(swap) + switch swap.Type { + case sqltypes.Int64: + case sqltypes.Uint64: + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + c.asm.jumpDestination(sj) + c.asm.Fn_BIN_TO_UUID1(col) + + c.asm.jumpDestination(skip) + return ct, nil +} + +func (call *builtinIsUUID) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + + raw := evalToBinary(arg).bytes + _, err = uuid.ParseBytes(raw) + return newEvalBool(err == nil), nil +} + +func (call *builtinIsUUID) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Int64, f | flagIsBoolean +} + +func (call *builtinIsUUID) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + c.asm.Fn_IS_UUID() + + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil +} + +func (call *builtinUUID) eval(env *ExpressionEnv) (eval, error) { + v, err := uuid.NewUUID() + if err != nil { + return nil, err + } + m, err := v.MarshalText() + if err != nil { + return nil, err + } + + return newEvalText(m, collationUtf8mb3), nil +} + +func (call *builtinUUID) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, 0 +} + +func (call *builtinUUID) compile(c *compiler) (ctype, error) { + c.asm.Fn_UUID() + return ctype{Type: sqltypes.VarChar, Flag: 0, Col: collationUtf8mb3}, nil +} + +func (call *builtinUUIDToBin) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if arg == nil || err != nil { + return nil, err + } + + raw := evalToBinary(arg).bytes + + parsed, err := uuid.ParseBytes(raw) + if err != nil { + return nil, errIncorrectUUID(raw, "uuid_to_bin") + } + + out := parsed[:] + if len(call.Arguments) > 1 { + swap, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + + if swap != nil && evalToInt64(swap).i != 0 { + out = swapUUIDTo(out) + } + } + + return newEvalBinary(out), nil +} + +func (call *builtinUUIDToBin) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.VarBinary, f +} + +func (call *builtinUUIDToBin) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + ct := ctype{Type: sqltypes.VarBinary, Flag: arg.Flag, Col: collationBinary} + + if len(call.Arguments) == 1 { + c.asm.Fn_UUID_TO_BIN0() + c.asm.jumpDestination(skip) + return ct, nil + } + + swap, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + sj := c.compileNullCheck1(swap) + switch swap.Type { + case sqltypes.Int64: + case sqltypes.Uint64: + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + c.asm.jumpDestination(sj) + c.asm.Fn_UUID_TO_BIN1() + + c.asm.jumpDestination(skip) + return ct, nil +} diff --git a/go/vt/vtgate/evalengine/fn_numeric.go b/go/vt/vtgate/evalengine/fn_numeric.go index 3c157f9bc89..fe8eeffb2c4 100644 --- a/go/vt/vtgate/evalengine/fn_numeric.go +++ b/go/vt/vtgate/evalengine/fn_numeric.go @@ -17,9 +17,18 @@ limitations under the License. package evalengine import ( + "errors" + "hash/crc32" "math" + "strconv" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) type builtinCeil struct { @@ -49,18 +58,1500 @@ func (call *builtinCeil) eval(env *ExpressionEnv) (eval, error) { } return newEvalDecimalWithPrec(dec, 0), nil default: - f, _ := evalToNumeric(num).toFloat() + f, _ := evalToFloat(num) return newEvalFloat(math.Ceil(f.f)), nil } } -func (call *builtinCeil) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - t, f := call.Arguments[0].typeof(env) - if sqltypes.IsIntegral(t) { +func (call *builtinCeil) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := call.Arguments[0].typeof(env, fields) + if sqltypes.IsSigned(t) { return sqltypes.Int64, f + } else if sqltypes.IsUnsigned(t) { + return sqltypes.Uint64, f + } else if sqltypes.Decimal == t { + return sqltypes.Int64, f | flagAmbiguousType + } else { + return sqltypes.Float64, f + } +} + +func (call *builtinCeil) compile(c *compiler) (ctype, error) { + return c.compileFn_rounding(call.Arguments[0], c.asm.Fn_CEIL_f, c.asm.Fn_CEIL_d) +} + +type builtinFloor struct { + CallExpr +} + +var _ Expr = (*builtinFloor)(nil) + +func (call *builtinFloor) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + switch num := arg.(type) { + case *evalInt64, *evalUint64: + return num, nil + case *evalDecimal: + dec := num.dec + dec = dec.Floor() + intnum, isfit := dec.Int64() + if isfit { + return newEvalInt64(intnum), nil + } + return newEvalDecimalWithPrec(dec, 0), nil + default: + f, _ := evalToFloat(num) + return newEvalFloat(math.Floor(f.f)), nil + } +} + +func (call *builtinFloor) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := call.Arguments[0].typeof(env, fields) + if sqltypes.IsSigned(t) { + return sqltypes.Int64, f + } else if sqltypes.IsUnsigned(t) { + return sqltypes.Uint64, f + } else if sqltypes.Decimal == t { + return sqltypes.Int64, f | flagAmbiguousType + } else { + return sqltypes.Float64, f + } +} + +func (call *builtinFloor) compile(c *compiler) (ctype, error) { + return c.compileFn_rounding(call.Arguments[0], c.asm.Fn_FLOOR_f, c.asm.Fn_FLOOR_d) +} + +type builtinAbs struct { + CallExpr +} + +var _ Expr = (*builtinAbs)(nil) + +func (call *builtinAbs) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + switch num := arg.(type) { + case *evalUint64: + return num, nil + case *evalInt64: + if num.i < 0 { + if num.i == math.MinInt64 { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "BIGINT value is out of range") + } + return newEvalInt64(-num.i), nil + } + return num, nil + case *evalDecimal: + return newEvalDecimalWithPrec(num.dec.Abs(), num.length), nil + default: + f, _ := evalToFloat(num) + return newEvalFloat(math.Abs(f.f)), nil + } +} + +func (call *builtinAbs) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := call.Arguments[0].typeof(env, fields) + if sqltypes.IsNumber(t) { + return t, f + } else { + return sqltypes.Float64, f + } +} + +func (expr *builtinAbs) compile(c *compiler) (ctype, error) { + arg, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + if arg.Type == sqltypes.Uint64 { + // No-op if it's unsigned since that's already positive. + return arg, nil + } + + skip := c.compileNullCheck1(arg) + + convt := ctype{Type: arg.Type, Col: collationNumeric, Flag: arg.Flag} + switch arg.Type { + case sqltypes.Int64: + c.asm.Fn_ABS_i() + case sqltypes.Float64: + c.asm.Fn_ABS_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + c.asm.Fn_ABS_d() + default: + convt.Type = sqltypes.Float64 + c.asm.Convert_xf(1) + c.asm.Fn_ABS_f() + } + + c.asm.jumpDestination(skip) + return convt, nil +} + +type builtinPi struct { + CallExpr +} + +var _ Expr = (*builtinPi)(nil) + +func (call *builtinPi) eval(env *ExpressionEnv) (eval, error) { + return newEvalFloat(math.Pi), nil +} + +func (call *builtinPi) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Float64, 0 +} + +func (*builtinPi) compile(c *compiler) (ctype, error) { + c.asm.Fn_PI() + return ctype{Type: sqltypes.Float64, Col: collationNumeric}, nil +} + +func isFinite(f float64) bool { + const mask = 0x7FF + const shift = 64 - 11 - 1 + x := math.Float64bits(f) + return uint32(x>>shift)&mask != mask +} + +type builtinAcos struct { + CallExpr +} + +var _ Expr = (*builtinAcos)(nil) + +func (call *builtinAcos) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + if f.f < -1 || f.f > 1 { + return nil, nil + } + return newEvalFloat(math.Acos(f.f)), nil +} + +func (call *builtinAcos) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinAcos) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_ACOS, flagNullable) +} + +type builtinAsin struct { + CallExpr +} + +var _ Expr = (*builtinAsin)(nil) + +func (call *builtinAsin) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + if f.f < -1 || f.f > 1 { + return nil, nil + } + return newEvalFloat(math.Asin(f.f)), nil +} + +func (call *builtinAsin) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinAsin) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_ASIN, flagNullable) +} + +type builtinAtan struct { + CallExpr +} + +var _ Expr = (*builtinAtan)(nil) + +func (call *builtinAtan) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(math.Atan(f.f)), nil +} + +func (call *builtinAtan) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinAtan) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_ATAN, 0) +} + +type builtinAtan2 struct { + CallExpr +} + +var _ Expr = (*builtinAtan2)(nil) + +func (call *builtinAtan2) eval(env *ExpressionEnv) (eval, error) { + arg1, arg2, err := call.arg2(env) + if err != nil { + return nil, err + } + if arg1 == nil || arg2 == nil { + return nil, nil + } + + f1, _ := evalToFloat(arg1) + f2, _ := evalToFloat(arg2) + return newEvalFloat(math.Atan2(f1.f, f2.f)), nil +} + +func (expr *builtinAtan2) compile(c *compiler) (ctype, error) { + arg1, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + arg2, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(arg1, arg2) + c.compileToFloat(arg1, 2) + c.compileToFloat(arg2, 1) + c.asm.Fn_ATAN2() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag}, nil +} + +func (call *builtinAtan2) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +type builtinCos struct { + CallExpr +} + +var _ Expr = (*builtinCos)(nil) + +func (call *builtinCos) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(math.Cos(f.f)), nil +} + +func (call *builtinCos) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinCos) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_COS, 0) +} + +type builtinCot struct { + CallExpr +} + +var _ Expr = (*builtinCot)(nil) + +func (call *builtinCot) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(1.0 / math.Tan(f.f)), nil +} + +func (call *builtinCot) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinCot) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_COT, 0) +} + +type builtinSin struct { + CallExpr +} + +var _ Expr = (*builtinSin)(nil) + +func (call *builtinSin) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(math.Sin(f.f)), nil +} + +func (call *builtinSin) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinSin) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_SIN, 0) +} + +type builtinTan struct { + CallExpr +} + +var _ Expr = (*builtinTan)(nil) + +func (call *builtinTan) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(math.Tan(f.f)), nil +} + +func (call *builtinTan) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinTan) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_TAN, 0) +} + +type builtinDegrees struct { + CallExpr +} + +var _ Expr = (*builtinDegrees)(nil) + +func (call *builtinDegrees) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(f.f * (180 / math.Pi)), nil +} + +func (call *builtinDegrees) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinDegrees) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_DEGREES, 0) +} + +type builtinRadians struct { + CallExpr +} + +var _ Expr = (*builtinRadians)(nil) + +func (call *builtinRadians) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + return newEvalFloat(f.f * (math.Pi / 180)), nil +} + +func (call *builtinRadians) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f +} + +func (call *builtinRadians) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_RADIANS, 0) +} + +type builtinExp struct { + CallExpr +} + +var _ Expr = (*builtinExp)(nil) + +func (call *builtinExp) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + a := math.Exp(f.f) + if !isFinite(a) { + return nil, nil + } + return newEvalFloat(a), nil +} + +func (call *builtinExp) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinExp) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_EXP, flagNullable) +} + +type builtinLn struct { + CallExpr +} + +var _ Expr = (*builtinLn)(nil) + +func (call *builtinLn) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + a, ok := math_log(f.f) + if !ok { + return nil, nil + } + return newEvalFloat(a), nil +} + +func (call *builtinLn) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinLn) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_LN, flagNullable) +} + +type builtinLog struct { + CallExpr +} + +var _ Expr = (*builtinLog)(nil) + +func (call *builtinLog) eval(env *ExpressionEnv) (eval, error) { + arg1, arg2, err := call.arg2(env) + if err != nil { + return nil, err + } + if arg1 == nil || arg2 == nil { + return nil, nil + } + + f1, _ := evalToFloat(arg1) + f2, _ := evalToFloat(arg2) + + a, ok := math_logN(f1.f, f2.f) + if !ok { + return nil, nil + } + return newEvalFloat(a), nil +} + +func (call *builtinLog) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (expr *builtinLog) compile(c *compiler) (ctype, error) { + arg1, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + arg2, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(arg1, arg2) + c.compileToFloat(arg1, 2) + c.compileToFloat(arg2, 1) + c.asm.Fn_LOG() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag}, nil +} + +type builtinLog10 struct { + CallExpr +} + +var _ Expr = (*builtinLog10)(nil) + +func (call *builtinLog10) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + a, ok := math_log10(f.f) + if !ok { + return nil, nil + } + + return newEvalFloat(a), nil +} + +func (call *builtinLog10) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinLog10) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_LOG10, flagNullable) +} + +type builtinLog2 struct { + CallExpr +} + +var _ Expr = (*builtinLog2)(nil) + +func (call *builtinLog2) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + a, ok := math_log2(f.f) + if !ok { + return nil, nil + } + return newEvalFloat(a), nil +} + +func (call *builtinLog2) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (call *builtinLog2) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_LOG2, flagNullable) +} + +type builtinPow struct { + CallExpr +} + +var _ Expr = (*builtinPow)(nil) + +func (call *builtinPow) eval(env *ExpressionEnv) (eval, error) { + arg1, arg2, err := call.arg2(env) + if err != nil { + return nil, err + } + if arg1 == nil || arg2 == nil { + return nil, nil + } + + f1, _ := evalToFloat(arg1) + f2, _ := evalToFloat(arg2) + + a := math.Pow(f1.f, f2.f) + if !isFinite(a) { + return nil, nil + } + + return newEvalFloat(a), nil +} + +func (call *builtinPow) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, f | flagNullable +} + +func (expr *builtinPow) compile(c *compiler) (ctype, error) { + arg1, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + arg2, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(arg1, arg2) + c.compileToFloat(arg1, 2) + c.compileToFloat(arg2, 1) + c.asm.Fn_POW() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag | flagNullable}, nil +} + +type builtinSign struct { + CallExpr +} + +var _ Expr = (*builtinSign)(nil) + +func (call *builtinSign) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + switch arg := arg.(type) { + case *evalInt64: + if arg.i < 0 { + return newEvalInt64(-1), nil + } else if arg.i > 0 { + return newEvalInt64(1), nil + } else { + return newEvalInt64(0), nil + } + case *evalUint64: + if arg.u > 0 { + return newEvalInt64(1), nil + } else { + return newEvalInt64(0), nil + } + case *evalDecimal: + return newEvalInt64(int64(arg.dec.Sign())), nil + case *evalFloat: + if arg.f < 0 { + return newEvalInt64(-1), nil + } else if arg.f > 0 { + return newEvalInt64(1), nil + } else { + return newEvalInt64(0), nil + } + default: + f, _ := evalToFloat(arg) + if f.f < 0 { + return newEvalInt64(-1), nil + } else if f.f > 0 { + return newEvalInt64(1), nil + } else { + return newEvalInt64(0), nil + } + } +} + +func (call *builtinSign) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.Int64, t +} + +func (expr *builtinSign) compile(c *compiler) (ctype, error) { + arg, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Int64: + c.asm.Fn_SIGN_i() + case sqltypes.Uint64: + c.asm.Fn_SIGN_u() + case sqltypes.Float64: + c.asm.Fn_SIGN_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + c.asm.Fn_SIGN_d() + default: + c.asm.Convert_xf(1) + c.asm.Fn_SIGN_f() + } + + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil +} + +type builtinSqrt struct { + CallExpr +} + +var _ Expr = (*builtinSqrt)(nil) + +func (call *builtinSqrt) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + f, _ := evalToFloat(arg) + a := math.Sqrt(f.f) + if !isFinite(a) { + return nil, nil + } + + return newEvalFloat(a), nil +} + +func (call *builtinSqrt) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, t := call.Arguments[0].typeof(env, fields) + return sqltypes.Float64, t | flagNullable +} + +func (call *builtinSqrt) compile(c *compiler) (ctype, error) { + return c.compileFn_math1(call.Arguments[0], c.asm.Fn_SQRT, flagNullable) +} + +// Math helpers extracted from `math` package + +func math_log(x float64) (float64, bool) { + const ( + Ln2Hi = 6.93147180369123816490e-01 /* 3fe62e42 fee00000 */ + Ln2Lo = 1.90821492927058770002e-10 /* 3dea39ef 35793c76 */ + L1 = 6.666666666666735130e-01 /* 3FE55555 55555593 */ + L2 = 3.999999999940941908e-01 /* 3FD99999 9997FA04 */ + L3 = 2.857142874366239149e-01 /* 3FD24924 94229359 */ + L4 = 2.222219843214978396e-01 /* 3FCC71C5 1D8E78AF */ + L5 = 1.818357216161805012e-01 /* 3FC74664 96CB03DE */ + L6 = 1.531383769920937332e-01 /* 3FC39A09 D078C69F */ + L7 = 1.479819860511658591e-01 /* 3FC2F112 DF3E5244 */ + ) + + // special cases + switch { + case math.IsNaN(x) || math.IsInf(x, 1): + return 0, false + case x < 0: + return 0, false + case x == 0: + return 0, false + } + + // reduce + f1, ki := math.Frexp(x) + if f1 < math.Sqrt2/2 { + f1 *= 2 + ki-- + } + f := f1 - 1 + k := float64(ki) + + // compute + s := f / (2 + f) + s2 := s * s + s4 := s2 * s2 + t1 := s2 * (L1 + s4*(L3+s4*(L5+s4*L7))) + t2 := s4 * (L2 + s4*(L4+s4*L6)) + R := t1 + t2 + hfsq := 0.5 * f * f + return k*Ln2Hi - ((hfsq - (s*(hfsq+R) + k*Ln2Lo)) - f), true +} + +func math_logN(f1, f2 float64) (float64, bool) { + a1, _ := math_log(f1) + if a1 == 0 { + return 0, false + } + a2, ok := math_log(f2) + if !ok { + return 0, false + } + return a2 / a1, true +} + +func math_log10(f float64) (float64, bool) { + if a, ok := math_log(f); ok { + return a * (1 / math.Ln10), true + } + return 0, false +} + +func math_log2(f float64) (float64, bool) { + frac, exp := math.Frexp(f) + // Make sure exact powers of two give an exact answer. + // Don't depend on Log(0.5)*(1/Ln2)+exp being exactly exp-1. + if frac == 0.5 { + return float64(exp - 1), true + } + if a, ok := math_log(frac); ok { + return a*(1/math.Ln2) + float64(exp), true + } + return 0, false +} + +type builtinRound struct { + CallExpr +} + +var _ Expr = (*builtinRound)(nil) + +func clampRounding(round int64) int64 { + // Use some reasonable lower limit to avoid too slow + // iteration for very large numbers. We need to be able + // to at least truncate math.MaxFloat64 to 0 for the largest + // possible values. + if round < -decimal.ExponentLimit { + round = -decimal.ExponentLimit + } else if round > 30 { + round = 30 + } + return round +} + +func roundSigned(v int64, round int64) int64 { + if round >= 0 { + return v + } + round = clampRounding(round) + + if v == 0 { + return 0 + } + for i := round; i < -1 && v != 0; i++ { + v /= 10 + } + + if v == 0 { + return 0 + } + if v%10 <= -5 { + v -= 10 + } else if v%10 >= 5 { + v += 10 + } + + v /= 10 + for i := round; i < 0; i++ { + v *= 10 + } + return v +} + +func roundUnsigned(v uint64, round int64) uint64 { + if round >= 0 { + return v + } + round = clampRounding(round) + + if v == 0 { + return 0 + } + for i := round; i < -1 && v != 0; i++ { + v /= 10 + } + + if v == 0 { + return 0 + } + + if v%10 >= 5 { + v += 10 + } + + v /= 10 + for i := round; i < 0; i++ { + v *= 10 + } + return v +} + +func (call *builtinRound) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + round := int64(0) + if len(call.Arguments) > 1 { + d, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if d == nil { + return nil, nil + } + + switch d := d.(type) { + case *evalUint64: + round = int64(d.u) + if d.u > math.MaxInt64 { + round = math.MaxInt64 + } + default: + round = evalToInt64(d).i + } + } + + switch arg := arg.(type) { + case *evalInt64: + return newEvalInt64(roundSigned(arg.i, round)), nil + case *evalUint64: + return newEvalUint64(roundUnsigned(arg.u, round)), nil + case *evalDecimal: + if arg.dec.IsZero() { + return arg, nil + } + + if round == 0 { + return newEvalDecimalWithPrec(arg.dec.Round(0), 0), nil + } + + round = clampRounding(round) + digit := int32(round) + if digit < 0 { + digit = 0 + } + if digit > arg.length { + digit = arg.length + } + rounded := arg.dec.Round(int32(round)) + if rounded.IsZero() { + return newEvalDecimalWithPrec(decimal.Zero, 0), nil + } + return newEvalDecimalWithPrec(rounded, digit), nil + case *evalFloat: + if arg.f == 0.0 { + return arg, nil + } + if round == 0 { + return newEvalFloat(math.Round(arg.f)), nil + } + + round = clampRounding(round) + f := math.Pow(10, float64(round)) + if f == 0 { + return newEvalFloat(0), nil + } + return newEvalFloat(math.Round(arg.f*f) / f), nil + default: + v, _ := evalToFloat(arg) + if v.f == 0.0 { + return v, nil + } + + if round == 0 { + return newEvalFloat(math.Round(v.f)), nil + } + + round = clampRounding(round) + f := math.Pow(10, float64(round)) + if f == 0 { + return newEvalFloat(0), nil + } + return newEvalFloat(math.Round(v.f*f) / f), nil + } +} + +func (call *builtinRound) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := call.Arguments[0].typeof(env, fields) + if sqltypes.IsSigned(t) { + return sqltypes.Int64, f + } else if sqltypes.IsUnsigned(t) { + return sqltypes.Uint64, f + } else if sqltypes.Decimal == t { + return sqltypes.Decimal, f + } else { + return sqltypes.Float64, f + } +} + +func (expr *builtinRound) compile(c *compiler) (ctype, error) { + arg, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(arg) + var skip2 *jump + + if len(expr.Arguments) == 1 { + switch arg.Type { + case sqltypes.Int64: + // No-op, already rounded + case sqltypes.Uint64: + // No-op, already rounded + case sqltypes.Float64: + c.asm.Fn_ROUND1_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + c.asm.Fn_ROUND1_d() + default: + c.asm.Convert_xf(1) + c.asm.Fn_ROUND1_f() + } + } else { + round, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 = c.compileNullCheck1r(round) + + switch round.Type { + case sqltypes.Int64: + // No-op, already correct type + case sqltypes.Uint64: + c.asm.Clamp_u(1, math.MaxInt64) + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + + switch arg.Type { + case sqltypes.Int64: + c.asm.Fn_ROUND2_i() + case sqltypes.Uint64: + c.asm.Fn_ROUND2_u() + case sqltypes.Float64: + c.asm.Fn_ROUND2_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + c.asm.Fn_ROUND2_d() + default: + c.asm.Convert_xf(2) + c.asm.Fn_ROUND2_f() + } + } + + c.asm.jumpDestination(skip1, skip2) + return arg, nil +} + +type builtinTruncate struct { + CallExpr +} + +var _ Expr = (*builtinRound)(nil) + +func truncateSigned(v int64, round int64) int64 { + if round >= 0 { + return v + } + if v == 0 { + return 0 + } + round = clampRounding(round) + for i := round; i < 0 && v != 0; i++ { + v /= 10 + } + + if v == 0 { + return 0 + } + + for i := round; i < 0; i++ { + v *= 10 + } + return v +} + +func truncateUnsigned(v uint64, round int64) uint64 { + if round >= 0 { + return v + } + if v == 0 { + return 0 + } + round = clampRounding(round) + for i := round; i < 0 && v != 0; i++ { + v /= 10 + } + + if v == 0 { + return 0 + } + + for i := round; i < 0; i++ { + v *= 10 + } + return v +} + +func (call *builtinTruncate) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + round := int64(0) + if len(call.Arguments) > 1 { + d, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if d == nil { + return nil, nil + } + + switch d := d.(type) { + case *evalUint64: + round = int64(d.u) + if d.u > math.MaxInt64 { + round = math.MaxInt64 + } + default: + round = evalToInt64(d).i + } + } + + switch arg := arg.(type) { + case *evalInt64: + return newEvalInt64(truncateSigned(arg.i, round)), nil + case *evalUint64: + return newEvalUint64(truncateUnsigned(arg.u, round)), nil + case *evalDecimal: + if arg.dec.IsZero() { + return arg, nil + } + round = clampRounding(round) + digit := int32(round) + if digit < 0 { + digit = 0 + } + if digit > arg.length { + digit = arg.length + } + + truncated := arg.dec.Truncate(int32(round)) + if truncated.IsZero() { + return newEvalDecimalWithPrec(decimal.Zero, 0), nil + } + return newEvalDecimalWithPrec(truncated, digit), nil + case *evalFloat: + if arg.f == 0.0 { + return arg, nil + } + if round == 0 { + return newEvalFloat(math.Trunc(arg.f)), nil + } + + round = clampRounding(round) + f := math.Pow(10, float64(round)) + if f == 0 { + return newEvalFloat(0), nil + } + return newEvalFloat(math.Trunc(arg.f*f) / f), nil + default: + v, _ := evalToFloat(arg) + if v.f == 0.0 { + return v, nil + } + if round == 0 { + return newEvalFloat(math.Trunc(v.f)), nil + } + + round = clampRounding(round) + f := math.Pow(10, float64(round)) + if f == 0 { + return newEvalFloat(0), nil + } + return newEvalFloat(math.Trunc(v.f*f) / f), nil + } +} + +func (call *builtinTruncate) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + t, f := call.Arguments[0].typeof(env, fields) + if sqltypes.IsSigned(t) { + return sqltypes.Int64, f + } else if sqltypes.IsUnsigned(t) { + return sqltypes.Uint64, f } else if sqltypes.Decimal == t { return sqltypes.Decimal, f } else { return sqltypes.Float64, f } } + +func (expr *builtinTruncate) compile(c *compiler) (ctype, error) { + arg, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(arg) + + round, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(round) + + switch round.Type { + case sqltypes.Int64: + // No-op, already correct type + case sqltypes.Uint64: + c.asm.Clamp_u(1, math.MaxInt64) + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + + switch arg.Type { + case sqltypes.Int64: + c.asm.Fn_TRUNCATE_i() + case sqltypes.Uint64: + c.asm.Fn_TRUNCATE_u() + case sqltypes.Float64: + c.asm.Fn_TRUNCATE_f() + case sqltypes.Decimal: + // We assume here the most common case here is that + // the decimal fits into an integer. + c.asm.Fn_TRUNCATE_d() + default: + c.asm.Convert_xf(2) + c.asm.Fn_TRUNCATE_f() + } + + c.asm.jumpDestination(skip1, skip2) + return arg, nil +} + +type builtinCrc32 struct { + CallExpr +} + +var _ Expr = (*builtinCrc32)(nil) + +func (call *builtinCrc32) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + b := evalToBinary(arg) + hash := crc32.ChecksumIEEE(b.bytes) + return newEvalUint64(uint64(hash)), nil +} + +func (call *builtinCrc32) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Uint64, f +} + +func (expr *builtinCrc32) compile(c *compiler) (ctype, error) { + arg, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + } + + c.asm.Fn_CRC32() + c.asm.jumpDestination(skip) + return arg, nil +} + +type builtinConv struct { + CallExpr + collate collations.ID +} + +var _ Expr = (*builtinConv)(nil) + +func upcaseASCII(b []byte) []byte { + for i, c := range b { + if c >= 'a' && c <= 'z' { + b[i] = c - 32 + } + } + return b +} + +func (call *builtinConv) eval(env *ExpressionEnv) (eval, error) { + n, err := call.Arguments[0].eval(env) + if err != nil { + return nil, err + } + from, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + to, err := call.Arguments[2].eval(env) + if err != nil { + return nil, err + } + + if n == nil || from == nil || to == nil { + return nil, nil + } + + fromBase := evalToInt64(from).i + toBase := evalToInt64(to).i + + if fromBase < -36 || (fromBase > -2 && fromBase < 2) || fromBase > 36 { + return nil, nil + } + if fromBase < 0 { + fromBase = -fromBase + } + + if toBase < -36 || (toBase > -2 && toBase < 2) || toBase > 36 { + return nil, nil + } + + var u uint64 + if b, ok := n.(*evalBytes); ok && b.isHexOrBitLiteral() { + nh, _ := b.toNumericHex() + u = nh.u + } else { + nStr := evalToBinary(n) + i, err := fastparse.ParseInt64(nStr.string(), int(fromBase)) + u = uint64(i) + if errors.Is(err, fastparse.ErrOverflow) { + u, _ = fastparse.ParseUint64(nStr.string(), int(fromBase)) + } + } + + var out []byte + if toBase < 0 { + out = strconv.AppendInt(out, int64(u), -int(toBase)) + } else { + out = strconv.AppendUint(out, u, int(toBase)) + } + return newEvalText(upcaseASCII(out), defaultCoercionCollation(call.collate)), nil +} + +func (call *builtinConv) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, f | flagNullable +} + +func (expr *builtinConv) compile(c *compiler) (ctype, error) { + n, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + from, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + to, err := expr.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck3(n, from, to) + + _ = c.compileToInt64(from, 2) + _ = c.compileToInt64(to, 1) + + t := sqltypes.VarChar + if n.Type == sqltypes.Blob || n.Type == sqltypes.TypeJSON { + t = sqltypes.Text + } + + switch { + case n.isTextual(): + default: + c.asm.Convert_xb(3, t, 0, false) + } + + if n.isHexOrBitLiteral() { + c.asm.Fn_CONV_hu(3, 2) + } else { + c.asm.Fn_CONV_bu(3, 2) + } + + col := defaultCoercionCollation(n.Col.Collation) + c.asm.Fn_CONV_uc(t, col) + c.asm.jumpDestination(skip) + + return ctype{Type: t, Col: col, Flag: flagNullable}, nil +} diff --git a/go/vt/vtgate/evalengine/fn_regexp.go b/go/vt/vtgate/evalengine/fn_regexp.go new file mode 100644 index 00000000000..2ba5b97573f --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_regexp.go @@ -0,0 +1,1064 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "errors" + "strings" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/icuregex" + icuerrors "vitess.io/vitess/go/mysql/icuregex/errors" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func regexpFlags(m eval, flags icuregex.RegexpFlag, f string) (icuregex.RegexpFlag, error) { + switch m := m.(type) { + case *evalBytes: + for _, b := range m.bytes { + switch b { + case 'c': + flags &= ^icuregex.CaseInsensitive + case 'i': + flags |= icuregex.CaseInsensitive + case 'm': + flags |= icuregex.Multiline + case 'n': + flags |= icuregex.DotAll + case 'u': + flags |= icuregex.UnixLines + default: + return flags, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s.", f) + } + } + default: + return flags, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s.", f) + } + + return flags, nil +} + +func occurrence(e *evalInt64, min int64) int64 { + if e.i < min { + return min + } + return e.i +} + +func returnOption(val *evalInt64, f string) (int64, error) { + switch val.i { + case 0, 1: + // Valid return options. + return val.i, nil + } + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "Incorrect arguments to %s: return_option must be 1 or 0.", f) +} + +func positionInstr(val *evalInt64, limit int64) (int64, error) { + pos := val.i + if pos < 1 || pos > limit { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIndexOutOfBounds, "Index out of bounds in regular expression search.") + } + return pos, nil +} + +func position(val *evalInt64, limit int64, f string) (int64, error) { + pos := val.i + if pos < 1 { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongParametersToNativeFct, "Incorrect parameters in the call to native function '%s'", f) + } + if pos-1 > limit { + return 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIndexOutOfBounds, "Index out of bounds in regular expression search.") + } + return pos, nil +} + +func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { + var typedCol collations.TypedCollation + var err error + + if inputBytes, ok := input.(*evalBytes); ok { + if patBytes, ok := pat.(*evalBytes); ok { + inputCol := inputBytes.col.Collation + patCol := patBytes.col.Collation + if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || + (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { + env := collations.Local() + inputColName := env.LookupName(inputCol) + patColName := env.LookupName(patCol) + return nil, nil, typedCol, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) + } + } + } + + input, pat, typedCol, err = mergeAndCoerceCollations(input, pat) + if err != nil { + return nil, nil, collations.TypedCollation{}, 0, err + } + + var flags icuregex.RegexpFlag + var collation = collations.Local().LookupName(typedCol.Collation) + if strings.Contains(collation, "_ci") { + flags |= icuregex.CaseInsensitive + } + + return input, pat, typedCol, flags, nil +} + +func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { + var merged collations.TypedCollation + var err error + + env := collations.Local() + if input.isTextual() && pat.isTextual() { + inputCol := input.Col.Collation + patCol := pat.Col.Collation + if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || + (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { + inputColName := env.LookupName(inputCol) + patColName := env.LookupName(patCol) + return input.Col, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) + } + } + + if input.Col.Collation != pat.Col.Collation { + merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type) + } else { + merged = input.Col + } + if err != nil { + return input.Col, 0, err + } + + var flags icuregex.RegexpFlag + if strings.Contains(env.LookupName(merged.Collation), "_ci") { + flags |= icuregex.CaseInsensitive + } + return merged, flags, nil +} + +func compileRegex(pat eval, c colldata.Charset, flags icuregex.RegexpFlag) (*icuregex.Pattern, error) { + patRunes := charset.Expand(nil, pat.ToRawBytes(), c) + + if len(patRunes) == 0 { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIllegalArgument, "Illegal argument to a regular expression.") + } + + regexp, err := icuregex.Compile(patRunes, flags) + if err == nil { + return regexp, nil + } + + var compileErr *icuregex.CompileError + if errors.Is(err, icuerrors.ErrUnsupported) { + err = vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.RegexpUnimplemented, err.Error()) + } else if errors.Is(err, icuerrors.ErrIllegalArgument) { + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpIllegalArgument, err.Error()) + } else if errors.As(err, &compileErr) { + switch compileErr.Code { + case icuregex.InternalError: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInternal, compileErr.Error()) + case icuregex.RuleSyntax: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpRuleSyntax, compileErr.Error()) + case icuregex.BadEscapeSequence: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpBadEscapeSequence, compileErr.Error()) + case icuregex.PropertySyntax: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpRuleSyntax, compileErr.Error()) + case icuregex.Unimplemented: + err = vterrors.NewErrorf(vtrpcpb.Code_UNIMPLEMENTED, vterrors.RegexpUnimplemented, compileErr.Error()) + case icuregex.MismatchedParen: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMismatchParen, compileErr.Error()) + case icuregex.BadInterval: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpBadInterval, compileErr.Error()) + case icuregex.MaxLtMin: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMaxLtMin, compileErr.Error()) + case icuregex.InvalidBackRef: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidBackRef, compileErr.Error()) + case icuregex.InvalidFlag: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidFlag, compileErr.Error()) + case icuregex.LookBehindLimit: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpLookBehindLimit, compileErr.Error()) + case icuregex.MissingCloseBracket: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpMissingCloseBracket, compileErr.Error()) + case icuregex.InvalidRange: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidRange, compileErr.Error()) + case icuregex.PatternTooBig: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpPatternTooBig, compileErr.Error()) + case icuregex.InvalidCaptureGroupName: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInvalidCaptureGroup, compileErr.Error()) + default: + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.RegexpInternal, compileErr.Error()) + } + } + + return nil, err +} + +func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collations.TypedCollation, flags icuregex.RegexpFlag, f string) (*icuregex.Pattern, error) { + pattern := args[pat] + if !pattern.constant() { + return nil, c.unsupported(pattern) + } + var err error + staticEnv := EmptyExpressionEnv() + pattern, err = simplifyExpr(staticEnv, pattern) + if err != nil { + return nil, err + } + + if len(args) > mt { + fl := args[mt] + if !fl.constant() { + return nil, c.unsupported(fl) + } + fl, err = simplifyExpr(staticEnv, fl) + if err != nil { + return nil, err + } + flags, err = regexpFlags(fl.(*Literal).inner, flags, f) + if err != nil { + return nil, err + } + } + + if pattern.(*Literal).inner == nil { + return nil, c.unsupported(pattern) + } + + innerPat, err := evalToVarchar(pattern.(*Literal).inner, cs.Collation, true) + if err != nil { + return nil, err + } + + return compileRegex(innerPat, colldata.Lookup(cs.Collation).Charset(), flags) +} + +// resultCollation returns the collation to use for the result of a regexp. +// This falls back to latin1_swedish if the input collation is binary. This +// seems to be a side effect of how MySQL also works. Probably due to how it +// is using ICU and converting there. +func resultCollation(in collations.TypedCollation) collations.TypedCollation { + if in.Collation == collationBinary.Collation { + return collationRegexpFallback + } + return in +} + +type builtinRegexpLike struct { + CallExpr + Negate bool +} + +func (r *builtinRegexpLike) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_like") + if err != nil { + return nil, err + } + collation := colldata.Lookup(typedCol.Collation) + + if len(r.Arguments) > 2 { + m, err := r.Arguments[2].eval(env) + if err != nil || m == nil { + return nil, err + } + flags, err = regexpFlags(m, flags, "regexp_like") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes) + + ok, err := m.Find() + if err != nil { + return nil, err + } + if r.Negate { + ok = !ok + } + return newEvalBool(ok), nil +} + +func (r *builtinRegexpLike) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + return sqltypes.Int64, f1 | f2 | f3 | flagIsBoolean +} + +func (r *builtinRegexpLike) compileSlow(c *compiler, input, pat, fl ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_LIKE_slow(r.Negate, colldata.Lookup(merged.Collation).Charset(), flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | fl.Flag | flagIsBoolean}, nil +} + +func (r *builtinRegexpLike) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var f ctype + + if len(r.Arguments) > 2 { + f, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(f, 2)) + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_like") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 2, merged, flags, "regexp_like") + if err != nil { + return r.compileSlow(c, input, pat, f, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_LIKE(icuregex.NewMatcher(p), r.Negate, colldata.Lookup(merged.Collation).Charset(), len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | f.Flag | flagIsBoolean}, nil +} + +var _ Expr = (*builtinRegexpLike)(nil) + +type builtinRegexpInstr struct { + CallExpr +} + +func (r *builtinRegexpInstr) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_instr") + if err != nil { + return nil, err + } + + var posExpr eval + if len(r.Arguments) > 2 { + posExpr, err = r.Arguments[2].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 3 { + occExpr, err = r.Arguments[3].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var retExpr eval + if len(r.Arguments) > 4 { + retExpr, err = r.Arguments[4].eval(env) + if err != nil || retExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 5 { + mtExpr, err = r.Arguments[5].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + + pos := int64(1) + occ := int64(1) + returnOpt := int64(0) + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_instr") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + if len(inputRunes) == 0 { + return newEvalInt64(0), nil + } + + if posExpr != nil { + pos, err = positionInstr(evalToInt64(posExpr), int64(len(inputRunes))) + if err != nil { + return nil, err + } + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if retExpr != nil { + returnOpt, err = returnOption(evalToInt64(retExpr), "regexp_instr") + if err != nil { + return nil, err + } + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, err + } + if !found { + break + } + } + if !found { + return newEvalInt64(0), nil + } + if returnOpt == 0 { + return newEvalInt64(int64(m.Start()) + pos), nil + } + return newEvalInt64(int64(m.End()) + pos), nil +} + +func (r *builtinRegexpInstr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3, f4, f5, f6 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + if len(r.Arguments) > 5 { + _, f6 = r.Arguments[5].typeof(env, fields) + } + return sqltypes.Int64, f1 | f2 | f3 | f4 | f5 | f6 +} + +func (r *builtinRegexpInstr) compileSlow(c *compiler, input, pat, pos, occ, returnOption, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_INSTR_slow(colldata.Lookup(merged.Collation).Charset(), flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | returnOption.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpInstr) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var pos ctype + if len(r.Arguments) > 2 { + pos, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 2)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 3 { + occ, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 3)) + _ = c.compileToInt64(occ, 1) + } + + var returnOpt ctype + if len(r.Arguments) > 4 { + returnOpt, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(returnOpt, 4)) + _ = c.compileToInt64(returnOpt, 1) + } + + var matchType ctype + if len(r.Arguments) > 5 { + matchType, err = r.Arguments[5].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 5)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_instr") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 5, merged, flags, "regexp_instr") + if err != nil { + return r.compileSlow(c, input, pat, pos, occ, returnOpt, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_INSTR(icuregex.NewMatcher(p), colldata.Lookup(merged.Collation).Charset(), len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | flagIsBoolean}, nil +} + +var _ Expr = (*builtinRegexpInstr)(nil) + +type builtinRegexpSubstr struct { + CallExpr +} + +func (r *builtinRegexpSubstr) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_substr") + if err != nil { + return nil, err + } + + var posExpr eval + // For some reason this gets checked before NULL checks of the other values + if len(r.Arguments) > 2 { + posExpr, err = r.Arguments[2].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 3 { + occExpr, err = r.Arguments[3].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 4 { + mtExpr, err = r.Arguments[4].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + pos := int64(1) + occ := int64(1) + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + + if posExpr != nil { + pos, err = position(evalToInt64(posExpr), int64(len(inputRunes)), "regexp_substr") + if err != nil { + return nil, err + } + + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_substr") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + found := false + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, err + } + if !found { + break + } + } + if !found { + return nil, nil + } + out := inputRunes[int64(m.Start())+pos-1 : int64(m.End())+pos-1] + b := charset.Collapse(nil, out, collation.Charset()) + return newEvalText(b, resultCollation(typedCol)), nil +} + +func (r *builtinRegexpSubstr) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + var f3, f4, f5 typeFlag + if len(r.Arguments) > 2 { + _, f3 = r.Arguments[2].typeof(env, fields) + } + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + return sqltypes.VarChar, f1 | f2 | f3 | f4 | f5 +} + +func (r *builtinRegexpSubstr) compileSlow(c *compiler, input, pat, pos, occ, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_SUBSTR_slow(merged, flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpSubstr) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + var pos ctype + if len(r.Arguments) > 2 { + pos, err = r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 2)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 3 { + occ, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 3)) + _ = c.compileToInt64(occ, 1) + } + + var matchType ctype + if len(r.Arguments) > 4 { + matchType, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 4)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_substr") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 4, merged, flags, "regexp_substr") + if err != nil { + return r.compileSlow(c, input, pat, pos, occ, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_SUBSTR(icuregex.NewMatcher(p), merged, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +var _ Expr = (*builtinRegexpSubstr)(nil) + +type builtinRegexpReplace struct { + CallExpr +} + +func regexpReplace(m *icuregex.Matcher, inputRunes, replRunes []rune, pos, occ int64, c colldata.Charset) ([]byte, bool, error) { + var err error + found := false + if occ > 0 { + for i := int64(0); i < occ; i++ { + found, err = m.Find() + if err != nil { + return nil, false, err + } + if !found { + break + } + } + if !found { + return nil, false, nil + } + + out := append(inputRunes[:int64(m.Start())+pos-1], replRunes...) + out = append(out, inputRunes[int64(m.End())+pos-1:]...) + return charset.Collapse(nil, out, c), true, nil + } + + found, err = m.Find() + if err != nil { + return nil, false, err + } + + if !found { + return nil, false, nil + } + + start := int64(m.Start()) + pos - 1 + out := append(inputRunes[:start], replRunes...) + end := int64(m.End()) + pos - 1 + for { + found, err = m.Find() + if err != nil { + return nil, false, err + } + if !found { + break + } + nextStart := int64(m.Start()) + pos - 1 + out = append(out, inputRunes[end:nextStart]...) + out = append(out, replRunes...) + end = int64(m.End()) + pos - 1 + } + + out = append(out, inputRunes[end:]...) + return charset.Collapse(nil, out, c), true, nil +} + +func (r *builtinRegexpReplace) eval(env *ExpressionEnv) (eval, error) { + input, err := r.Arguments[0].eval(env) + if err != nil || input == nil { + return nil, err + } + + pat, err := r.Arguments[1].eval(env) + if err != nil || pat == nil { + return nil, err + } + + replArg, err := r.Arguments[2].eval(env) + if err != nil || replArg == nil { + return nil, err + } + + input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_replace") + if err != nil { + return nil, err + } + + var posExpr eval + // For some reason this gets checked before NULL checks of the other values + if len(r.Arguments) > 3 { + posExpr, err = r.Arguments[3].eval(env) + if err != nil || posExpr == nil { + return nil, err + } + } + + var occExpr eval + if len(r.Arguments) > 4 { + occExpr, err = r.Arguments[4].eval(env) + if err != nil || occExpr == nil { + return nil, err + } + } + + var mtExpr eval + if len(r.Arguments) > 5 { + mtExpr, err = r.Arguments[5].eval(env) + if err != nil || mtExpr == nil { + return nil, err + } + } + + collation := colldata.Lookup(typedCol.Collation) + + repl, ok := replArg.(*evalBytes) + if !ok { + repl, err = evalToVarchar(replArg, typedCol.Collation, true) + if err != nil { + return nil, err + } + } + pos := int64(1) + occ := int64(0) + inputRunes := charset.Expand(nil, input.ToRawBytes(), collation.Charset()) + replRunes := charset.Expand(nil, repl.ToRawBytes(), colldata.Lookup(repl.col.Collation).Charset()) + + if posExpr != nil { + pos, err = position(evalToInt64(posExpr), int64(len(inputRunes)), "regexp_replace") + if err != nil { + return nil, err + } + } + + if occExpr != nil { + occ = occurrence(evalToInt64(occExpr), occ) + } + + if mtExpr != nil { + flags, err = regexpFlags(mtExpr, flags, "regexp_replace") + if err != nil { + return nil, err + } + } + + regexp, err := compileRegex(pat, collation.Charset(), flags) + if err != nil { + return nil, err + } + + m := icuregex.NewMatcher(regexp) + m.Reset(inputRunes[pos-1:]) + + bytes, replaced, err := regexpReplace(m, inputRunes, replRunes, pos, occ, collation.Charset()) + if err != nil { + return nil, err + } + if !replaced { + return newEvalRaw(sqltypes.Text, input.ToRawBytes(), resultCollation(typedCol)), nil + } + return newEvalRaw(sqltypes.Text, bytes, resultCollation(typedCol)), nil +} + +func (r *builtinRegexpReplace) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := r.Arguments[0].typeof(env, fields) + _, f2 := r.Arguments[1].typeof(env, fields) + _, f3 := r.Arguments[2].typeof(env, fields) + var f4, f5, f6 typeFlag + if len(r.Arguments) > 3 { + _, f4 = r.Arguments[3].typeof(env, fields) + } + if len(r.Arguments) > 4 { + _, f5 = r.Arguments[4].typeof(env, fields) + } + if len(r.Arguments) > 5 { + _, f6 = r.Arguments[5].typeof(env, fields) + } + return sqltypes.Text, f1 | f2 | f3 | f4 | f5 | f6 +} + +func (r *builtinRegexpReplace) compileSlow(c *compiler, input, pat, repl, pos, occ, matchType ctype, merged collations.TypedCollation, flags icuregex.RegexpFlag, skips ...*jump) (ctype, error) { + if !pat.isTextual() || pat.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-1, sqltypes.VarChar, merged.Collation) + } + + c.asm.Fn_REGEXP_REPLACE_slow(merged, flags, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | repl.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +func (r *builtinRegexpReplace) compile(c *compiler) (ctype, error) { + input, err := r.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + var skips []*jump + skips = append(skips, c.compileNullCheckArg(input, 0)) + + pat, err := r.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pat, 1)) + + repl, err := r.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(repl, 2)) + + var pos ctype + if len(r.Arguments) > 3 { + pos, err = r.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(pos, 3)) + _ = c.compileToInt64(pos, 1) + } + + var occ ctype + if len(r.Arguments) > 4 { + occ, err = r.Arguments[4].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(occ, 4)) + _ = c.compileToInt64(occ, 1) + } + + var matchType ctype + if len(r.Arguments) > 5 { + matchType, err = r.Arguments[5].compile(c) + if err != nil { + return ctype{}, err + } + skips = append(skips, c.compileNullCheckArg(matchType, 5)) + switch { + case matchType.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + } + + merged, flags, err := compileRegexpCollation(input, pat, "regexp_replace") + if err != nil { + return ctype{}, err + } + + if !input.isTextual() || input.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments), sqltypes.VarChar, merged.Collation) + } + + if !repl.isTextual() || repl.Col.Collation != merged.Collation { + c.asm.Convert_xce(len(r.Arguments)-2, sqltypes.VarChar, merged.Collation) + } + + // We optimize for the case where the pattern is a constant. If not, + // we fall back to the slow path. + p, err := compileConstantRegex(c, r.Arguments, 1, 5, merged, flags, "regexp_replace") + if err != nil { + return r.compileSlow(c, input, pat, repl, pos, occ, matchType, merged, flags, skips...) + } + + c.asm.Fn_REGEXP_REPLACE(icuregex.NewMatcher(p), merged, len(r.Arguments)-1) + c.asm.jumpDestination(skips...) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: input.Flag | pat.Flag | repl.Flag | pos.Flag | occ.Flag | matchType.Flag}, nil +} + +var _ Expr = (*builtinRegexpReplace)(nil) diff --git a/go/vt/vtgate/evalengine/fn_string.go b/go/vt/vtgate/evalengine/fn_string.go index ba9e1c8c640..b34618b00d2 100644 --- a/go/vt/vtgate/evalengine/fn_string.go +++ b/go/vt/vtgate/evalengine/fn_string.go @@ -21,15 +21,19 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" ) type ( builtinChangeCase struct { CallExpr - upcase bool + upcase bool + collate collations.ID } builtinCharLength struct { @@ -44,6 +48,11 @@ type ( CallExpr } + builtinOrd struct { + CallExpr + collate collations.ID + } + builtinBitLength struct { CallExpr } @@ -53,20 +62,47 @@ type ( } builtinWeightString struct { - String Expr + Expr Expr Cast string Len int HasLen bool } + + builtinLeftRight struct { + CallExpr + collate collations.ID + left bool + } + + builtinPad struct { + CallExpr + collate collations.ID + left bool + } + + builtinStrcmp struct { + CallExpr + collate collations.ID + } + + builtinTrim struct { + CallExpr + collate collations.ID + trim sqlparser.TrimType + } ) var _ Expr = (*builtinChangeCase)(nil) var _ Expr = (*builtinCharLength)(nil) var _ Expr = (*builtinLength)(nil) var _ Expr = (*builtinASCII)(nil) +var _ Expr = (*builtinOrd)(nil) var _ Expr = (*builtinBitLength)(nil) var _ Expr = (*builtinCollation)(nil) var _ Expr = (*builtinWeightString)(nil) +var _ Expr = (*builtinLeftRight)(nil) +var _ Expr = (*builtinPad)(nil) +var _ Expr = (*builtinTrim)(nil) func (call *builtinChangeCase) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) @@ -79,11 +115,11 @@ func (call *builtinChangeCase) eval(env *ExpressionEnv) (eval, error) { return nil, nil case evalNumeric: - return evalToVarchar(e, env.DefaultCollation, false) + return evalToVarchar(e, call.collate, false) case *evalBytes: - coll := e.col.Collation.Get() - csa, ok := coll.(collations.CaseAwareCollation) + coll := colldata.Lookup(e.col.Collation) + csa, ok := coll.(colldata.CaseAwareCollation) if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "not implemented") } @@ -100,11 +136,31 @@ func (call *builtinChangeCase) eval(env *ExpressionEnv) (eval, error) { } } -func (call *builtinChangeCase) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinChangeCase) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.VarChar, f } +func (call *builtinChangeCase) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xc(1, sqltypes.VarChar, c.cfg.Collation, 0, false) + } + + c.asm.Fn_LUCASE(call.upcase) + c.asm.jumpDestination(skip) + + return str, nil +} + func (call *builtinCharLength) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -117,7 +173,7 @@ func (call *builtinCharLength) eval(env *ExpressionEnv) (eval, error) { if sqltypes.IsBinary(e.SQLType()) { return newEvalInt64(int64(len(e.bytes))), nil } - coll := e.col.Collation.Get() + coll := colldata.Lookup(e.col.Collation) count := charset.Length(coll.Charset(), e.bytes) return newEvalInt64(int64(count)), nil default: @@ -125,11 +181,15 @@ func (call *builtinCharLength) eval(env *ExpressionEnv) (eval, error) { } } -func (call *builtinCharLength) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinCharLength) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinCharLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_CHAR_LENGTH) +} + func (call *builtinLength) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -141,11 +201,15 @@ func (call *builtinLength) eval(env *ExpressionEnv) (eval, error) { return newEvalInt64(int64(len(arg.ToRawBytes()))), nil } -func (call *builtinLength) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinLength) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_LENGTH) +} + func (call *builtinBitLength) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -157,11 +221,15 @@ func (call *builtinBitLength) eval(env *ExpressionEnv) (eval, error) { return newEvalInt64(int64(len(arg.ToRawBytes())) * 8), nil } -func (call *builtinBitLength) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinBitLength) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinBitLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_BIT_LENGTH) +} + func (call *builtinASCII) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { @@ -181,11 +249,91 @@ func (call *builtinASCII) eval(env *ExpressionEnv) (eval, error) { return newEvalInt64(int64(b.bytes[0])), nil } -func (call *builtinASCII) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := call.Arguments[0].typeof(env) +func (call *builtinASCII) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Int64, f +} + +func (call *builtinASCII) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + c.asm.Fn_ASCII() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: str.Flag}, nil +} + +func charOrd(b []byte, coll collations.ID) int64 { + if len(b) == 0 { + return 0 + } + cs := colldata.Lookup(coll).Charset() + _, l := cs.DecodeRune(b) + var r int64 + for i := 0; i < l; i++ { + r = (r << 8) | int64(b[i]) + } + return r +} + +func (call *builtinOrd) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + c, ok := arg.(*evalBytes) + if !ok { + c, err = evalToVarchar(arg, call.collate, false) + if err != nil { + return nil, err + } + } + + return newEvalInt64(charOrd(c.bytes, c.col.Collation)), nil +} + +func (call *builtinOrd) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) return sqltypes.Int64, f } +func (call *builtinOrd) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + col := call.collate + switch { + case str.isTextual(): + col = str.Col.Collation + default: + c.asm.Convert_xc(1, sqltypes.VarChar, call.collate, 0, false) + } + + c.asm.Fn_ORD(col) + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: str.Flag}, nil +} + // maxRepeatLength is the maximum number of times a string can be repeated. // This is based on how MySQL behaves here. The maximum value in MySQL is // actually based on `max_allowed_packet`. The value here is the maximum @@ -202,6 +350,7 @@ const maxRepeatLength = 1073741824 type builtinRepeat struct { CallExpr + collate collations.ID } func (call *builtinRepeat) eval(env *ExpressionEnv) (eval, error) { @@ -215,24 +364,24 @@ func (call *builtinRepeat) eval(env *ExpressionEnv) (eval, error) { text, ok := arg1.(*evalBytes) if !ok { - text, err = evalToVarchar(arg1, env.DefaultCollation, true) + text, err = evalToVarchar(arg1, call.collate, true) if err != nil { return nil, err } } - repeat := evalToNumeric(arg2).toInt64().i + repeat := evalToInt64(arg2).i if repeat < 0 { repeat = 0 } - if !checkMaxLength(int64(len(text.bytes)), repeat) { + if !validMaxLength(int64(len(text.bytes)), repeat) { return nil, nil } return newEvalText(bytes.Repeat(text.bytes, int(repeat)), text.col), nil } -func checkMaxLength(len, repeat int64) bool { +func validMaxLength(len, repeat int64) bool { if repeat <= 0 { return true } @@ -243,76 +392,914 @@ func checkMaxLength(len, repeat int64) bool { return len*repeat <= maxRepeatLength } -func (call *builtinRepeat) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f1 := call.Arguments[0].typeof(env) +func (call *builtinRepeat) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := call.Arguments[0].typeof(env, fields) // typecheck the right-hand argument but ignore its flags - call.Arguments[1].typeof(env) + call.Arguments[1].typeof(env, fields) return sqltypes.VarChar, f1 } +func (expr *builtinRepeat) compile(c *compiler) (ctype, error) { + str, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + repeat, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(str, repeat) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xc(2, sqltypes.VarChar, c.cfg.Collation, 0, false) + } + _ = c.compileToInt64(repeat, 1) + + c.asm.Fn_REPEAT() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: str.Col, Flag: flagNullable}, nil +} + func (c *builtinCollation) eval(env *ExpressionEnv) (eval, error) { arg, err := c.arg1(env) if err != nil { return nil, err } - col := evalCollation(arg).Collation.Get() + col := evalCollation(arg) - // the collation of a `COLLATION` expr is hardcoded to `utf8_general_ci`, + // the collation of a `COLLATION` expr is hardcoded to `utf8mb3_general_ci`, // not to the default collation of our connection. this is probably a bug in MySQL, but we match it - return newEvalText([]byte(col.Name()), collations.TypedCollation{ - Collation: collations.CollationUtf8ID, - Coercibility: collations.CoerceImplicit, - Repertoire: collations.RepertoireASCII, - }), nil + return newEvalText([]byte(collations.Local().LookupName(col.Collation)), collationUtf8mb3), nil } -func (*builtinCollation) typeof(_ *ExpressionEnv) (sqltypes.Type, typeFlag) { +func (*builtinCollation) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { return sqltypes.VarChar, 0 } +func (expr *builtinCollation) compile(c *compiler) (ctype, error) { + _, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.asm.jumpFrom() + + c.asm.Fn_COLLATION(collationUtf8mb3) + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil +} + func (c *builtinWeightString) callable() []Expr { - return []Expr{c.String} + return []Expr{c.Expr} } -func (c *builtinWeightString) typeof(env *ExpressionEnv) (sqltypes.Type, typeFlag) { - _, f := c.String.typeof(env) +func (c *builtinWeightString) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := c.Expr.typeof(env, fields) + switch tt { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + return sqltypes.Blob, f + } return sqltypes.VarBinary, f } func (c *builtinWeightString) eval(env *ExpressionEnv) (eval, error) { - var ( - tc collations.TypedCollation - text []byte - weights []byte - length = c.Len - ) + var weights []byte - str, err := c.String.eval(env) + input, err := c.Expr.eval(env) if err != nil { return nil, err } - switch str := str.(type) { - case *evalInt64, *evalUint64: - // when calling WEIGHT_STRING with an integral value, MySQL returns the - // internal sort key that would be used in an InnoDB table... we do not - // support that - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: %s", ErrEvaluatedExprNotSupported, FormatExpr(c)) + typ := sqltypes.VarBinary + + if c.Cast == "binary" { + switch input.SQLType() { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + typ = sqltypes.Blob + } + + weights, _, err = evalWeightString(weights, evalToBinary(input), c.Len, 0) + if err != nil { + return nil, err + } + return newEvalRaw(typ, weights, collationBinary), nil + } + + switch val := input.(type) { + case *evalInt64, *evalUint64, *evalTemporal: + weights, _, err = evalWeightString(weights, val, 0, 0) + case *evalJSON: + // JSON doesn't actually use a sortable weight string for this function, but + // returns the weight string directly for the string based representation. This + // means that ordering etc. is not correct for JSON values, but that's how MySQL + // works here for this function. We still have the internal weight string logic + // that can order these correctly. + out, err := evalToVarchar(val, collationJSON.Collation, false) + if err != nil { + return nil, err + } + weights, _, err = evalWeightString(weights, out, 0, 0) + if err != nil { + return nil, err + } + typ = sqltypes.Blob case *evalBytes: - text = str.bytes - tc = str.col + switch val.SQLType() { + case sqltypes.Blob, sqltypes.Text: + typ = sqltypes.Blob + } + if val.isBinary() { + weights, _, err = evalWeightString(weights, val, 0, 0) + } else { + var strLen int + if c.Cast == "char" { + strLen = c.Len + } + weights, _, err = evalWeightString(weights, val, strLen, 0) + } default: return nil, nil } - if c.Cast == "binary" { - tc = collationBinary - weights = make([]byte, 0, c.Len) - length = collations.PadToMax + if err != nil { + return nil, err + } + + return newEvalRaw(typ, weights, collationBinary), nil +} + +func (call *builtinWeightString) compile(c *compiler) (ctype, error) { + str, err := call.Expr.compile(c) + if err != nil { + return ctype{}, err + } + + var flag typeFlag + if str.Flag&flagNullable != 0 { + flag = flag | flagNullable + } + + typ := sqltypes.VarBinary + skip := c.compileNullCheck1(str) + if call.Cast == "binary" { + if !sqltypes.IsBinary(str.Type) { + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + switch str.Type { + case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: + typ = sqltypes.Blob + } + + c.asm.Fn_WEIGHT_STRING(typ, call.Len) + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarBinary, Flag: flagNullable | flagNull, Col: collationBinary}, nil + } + + switch str.Type { + case sqltypes.Int64, sqltypes.Uint64, sqltypes.Date, sqltypes.Datetime, sqltypes.Timestamp, sqltypes.Time, sqltypes.VarBinary, sqltypes.Binary, sqltypes.Blob: + if str.Type == sqltypes.Blob { + typ = sqltypes.Blob + } + c.asm.Fn_WEIGHT_STRING(typ, 0) + case sqltypes.TypeJSON: + typ = sqltypes.Blob + c.asm.Convert_xce(1, sqltypes.VarChar, collationJSON.Collation) + c.asm.Fn_WEIGHT_STRING(typ, 0) + case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: + if str.Type == sqltypes.Text { + typ = sqltypes.Blob + } + var strLen int + if call.Cast == "char" { + strLen = call.Len + } + c.asm.Fn_WEIGHT_STRING(typ, strLen) + + default: + c.asm.SetNull(1) + flag = flag | flagNull | flagNullable + } + + c.asm.jumpDestination(skip) + return ctype{Type: typ, Flag: flag, Col: collationBinary}, nil +} + +func (call builtinLeftRight) eval(env *ExpressionEnv) (eval, error) { + str, l, err := call.arg2(env) + if err != nil { + return nil, err + } + if str == nil || l == nil { + return nil, nil + } + + text, ok := str.(*evalBytes) + if !ok { + text, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + } + + length := evalToInt64(l).i + if length <= 0 { + return newEvalText(nil, text.col), nil + } + + // LEFT / RIGHT operates on characters, not bytes + cs := colldata.Lookup(text.col.Collation).Charset() + strLen := charset.Length(cs, text.bytes) + + if strLen <= int(length) { + return newEvalText(text.bytes, text.col), nil + } + + var res []byte + if call.left { + res = charset.Slice(cs, text.bytes, 0, int(length)) + } else { + res = charset.Slice(cs, text.bytes, strLen-int(length), strLen) + } + return newEvalText(res, text.col), nil +} + +func (call builtinLeftRight) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, f1 +} + +func (call builtinLeftRight) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + l, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck2(str, l) + + col := defaultCoercionCollation(c.cfg.Collation) + switch { + case str.isTextual(): + col = str.Col + default: + c.asm.Convert_xc(2, sqltypes.VarChar, col.Collation, 0, false) + } + _ = c.compileToInt64(l, 1) + + if call.left { + c.asm.Fn_LEFT(col) + } else { + c.asm.Fn_RIGHT(col) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: flagNullable}, nil +} + +func (call builtinPad) eval(env *ExpressionEnv) (eval, error) { + str, l, p, err := call.arg3(env) + if err != nil { + return nil, err + } + + if str == nil || l == nil || p == nil { + return nil, nil + } + + text, ok := str.(*evalBytes) + if !ok { + text, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + } + + cs := colldata.Lookup(text.col.Collation).Charset() + pad, ok := p.(*evalBytes) + if !ok || colldata.Lookup(pad.col.Collation).Charset() != cs { + pad, err = evalToVarchar(p, text.col.Collation, true) + if err != nil { + return nil, err + } + } + + length := evalToInt64(l).i + if length < 0 { + return nil, nil + } + + if !validMaxLength(int64(len(pad.bytes)), length) { + return nil, nil + } + + // LPAD / RPAD operates on characters, not bytes + strLen := charset.Length(cs, text.bytes) + + if strLen >= int(length) { + // If the existing string is longer than the requested padding, + // MySQL truncates the string to the requested padding length. + return newEvalText(charset.Slice(cs, text.bytes, 0, int(length)), text.col), nil + } + + runeLen := charset.Length(cs, pad.bytes) + if runeLen == 0 { + return newEvalText(nil, text.col), nil + } + + repeat := (int(length) - strLen) / runeLen + remainder := (int(length) - strLen) % runeLen + + var res []byte + if !call.left { + res = text.bytes + } + + res = append(res, bytes.Repeat(pad.bytes, repeat)...) + if remainder > 0 { + res = append(res, charset.Slice(cs, pad.bytes, 0, remainder)...) + } + + if call.left { + res = append(res, text.bytes...) + } + + return newEvalText(res, text.col), nil +} + +func (call builtinPad) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, f1 +} + +func (call builtinPad) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err } - collation := tc.Collation.Get() - weights = collation.WeightString(weights, text, length) - return newEvalBinary(weights), nil + l, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + pad, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck3(str, l, pad) + + col := defaultCoercionCollation(c.cfg.Collation) + switch { + case str.isTextual(): + col = str.Col + default: + c.asm.Convert_xce(3, sqltypes.VarChar, col.Collation) + } + _ = c.compileToInt64(l, 2) + + switch { + case pad.isTextual(): + fromCharset := colldata.Lookup(pad.Col.Collation).Charset() + toCharset := colldata.Lookup(col.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + default: + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + + if call.left { + c.asm.Fn_LPAD(col) + } else { + c.asm.Fn_RPAD(col) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: col}, nil +} + +func strcmpCollate(left, right []byte, col collations.ID) int64 { + cmp := colldata.Lookup(col).Collate(left, right, false) + switch { + case cmp == 0: + return 0 + case cmp > 0: + return 1 + default: + return -1 + } +} + +func (l *builtinStrcmp) eval(env *ExpressionEnv) (eval, error) { + left, err := l.Arguments[0].eval(env) + if left == nil || err != nil { + return nil, err + } + + right, err := l.Arguments[1].eval(env) + if right == nil || err != nil { + return nil, err + } + + if _, ok := left.(evalNumeric); ok { + return newEvalInt64(strcmpCollate(left.ToRawBytes(), right.ToRawBytes(), collationNumeric.Collation)), nil + } + if _, ok := right.(evalNumeric); ok { + return newEvalInt64(strcmpCollate(left.ToRawBytes(), right.ToRawBytes(), collationNumeric.Collation)), nil + } + + col1 := evalCollation(left) + col2 := evalCollation(right) + + mcol, _, _, err := colldata.Merge(collations.Local(), col1, col2, colldata.CoercionOptions{ + ConvertToSuperset: true, + ConvertWithCoercion: true, + }) + if err != nil { + return nil, err + } + + left, err = evalToVarchar(left, mcol.Collation, true) + if err != nil { + return nil, err + } + + right, err = evalToVarchar(right, mcol.Collation, true) + if err != nil { + return nil, err + } + + return newEvalInt64(strcmpCollate(left.ToRawBytes(), right.ToRawBytes(), mcol.Collation)), nil +} + +// typeof implements the ComparisonOp interface +func (l *builtinStrcmp) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := l.Arguments[0].typeof(env, fields) + _, f2 := l.Arguments[1].typeof(env, fields) + return sqltypes.Int64, f1 | f2 +} + +func (expr *builtinStrcmp) compile(c *compiler) (ctype, error) { + lt, err := expr.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(lt) + + rt, err := expr.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(rt) + var mcol collations.TypedCollation + + if sqltypes.IsNumber(lt.Type) || sqltypes.IsNumber(rt.Type) { + mcol = collationNumeric + } else { + mcol, _, _, err = colldata.Merge(collations.Local(), lt.Col, rt.Col, colldata.CoercionOptions{ + ConvertToSuperset: true, + ConvertWithCoercion: true, + }) + if err != nil { + return ctype{}, err + } + } + + if !lt.isTextual() || lt.Col.Collation != mcol.Collation { + c.asm.Convert_xce(2, sqltypes.VarChar, mcol.Collation) + } + + if !rt.isTextual() || rt.Col.Collation != mcol.Collation { + c.asm.Convert_xce(1, sqltypes.VarChar, mcol.Collation) + } + + c.asm.Strcmp(mcol) + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable}, nil +} + +func (call builtinTrim) eval(env *ExpressionEnv) (eval, error) { + str, err := call.arg1(env) + if err != nil { + return nil, err + } + + if str == nil { + return nil, nil + } + + text, ok := str.(*evalBytes) + if !ok { + text, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + } + + if len(call.Arguments) == 1 { + switch call.trim { + case sqlparser.LeadingTrimType: + return newEvalText(bytes.TrimLeft(text.bytes, " "), text.col), nil + case sqlparser.TrailingTrimType: + return newEvalText(bytes.TrimRight(text.bytes, " "), text.col), nil + default: + return newEvalText(bytes.Trim(text.bytes, " "), text.col), nil + } + } + + p, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if p == nil { + return nil, nil + } + + pat, ok := p.(*evalBytes) + if !ok || colldata.Lookup(pat.col.Collation).Charset() != colldata.Lookup(text.col.Collation).Charset() { + pat, err = evalToVarchar(p, text.col.Collation, true) + if err != nil { + return nil, err + } + } + + switch call.trim { + case sqlparser.LeadingTrimType: + return newEvalText(bytes.TrimPrefix(text.bytes, pat.bytes), text.col), nil + case sqlparser.TrailingTrimType: + return newEvalText(bytes.TrimSuffix(text.bytes, pat.bytes), text.col), nil + default: + return newEvalText(bytes.TrimPrefix(bytes.TrimSuffix(text.bytes, pat.bytes), pat.bytes), text.col), nil + } +} + +func (call builtinTrim) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := call.Arguments[0].typeof(env, fields) + return sqltypes.VarChar, f1 +} + +func (call builtinTrim) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(str) + + col := defaultCoercionCollation(c.cfg.Collation) + switch { + case str.isTextual(): + col = str.Col + default: + c.asm.Convert_xc(1, sqltypes.VarChar, col.Collation, 0, false) + } + + if len(call.Arguments) == 1 { + switch call.trim { + case sqlparser.LeadingTrimType: + c.asm.Fn_LTRIM1(col) + case sqlparser.TrailingTrimType: + c.asm.Fn_RTRIM1(col) + default: + c.asm.Fn_TRIM1(col) + } + c.asm.jumpDestination(skip1) + return ctype{Type: sqltypes.VarChar, Col: col}, nil + } + + pat, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(pat) + + switch { + case pat.isTextual(): + fromCharset := colldata.Lookup(pat.Col.Collation).Charset() + toCharset := colldata.Lookup(col.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + default: + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + + switch call.trim { + case sqlparser.LeadingTrimType: + c.asm.Fn_LTRIM2(col) + case sqlparser.TrailingTrimType: + c.asm.Fn_RTRIM2(col) + default: + c.asm.Fn_TRIM2(col) + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarChar, Col: col}, nil +} + +type builtinConcat struct { + CallExpr + collate collations.ID +} + +func concatSQLType(arg sqltypes.Type, tt sqltypes.Type) sqltypes.Type { + if arg == sqltypes.TypeJSON { + return sqltypes.Blob + } + + if sqltypes.IsBinary(tt) { + return tt + } + + if sqltypes.IsBinary(arg) { + return sqltypes.VarBinary + } + + return sqltypes.VarChar +} + +func concatConvert(buf []byte, str *evalBytes, tc collations.TypedCollation) ([]byte, error) { + if tc.Collation == collations.CollationBinaryID { + return append(buf, str.bytes...), nil + } + fromCharset := colldata.Lookup(str.col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() + if fromCharset != toCharset { + return charset.Convert(buf, toCharset, str.bytes, fromCharset) + } + return append(buf, str.bytes...), nil +} + +func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { + local := collations.Local() + var ca collationAggregation + tt := sqltypes.VarChar + + args := make([]eval, 0, len(call.Arguments)) + for _, arg := range call.Arguments { + a, err := arg.eval(env) + if a == nil || err != nil { + return nil, err + } + args = append(args, a) + tt = concatSQLType(a.SQLType(), tt) + + err = ca.add(local, evalCollation(a)) + if err != nil { + return nil, err + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = defaultCoercionCollation(call.collate) + } + + var buf []byte + for _, arg := range args { + switch a := arg.(type) { + case *evalBytes: + var err error + buf, err = concatConvert(buf, a, tc) + if err != nil { + return nil, err + } + default: + c, err := evalToVarchar(a, tc.Collation, true) + if err != nil { + return nil, err + } + buf = append(buf, c.bytes...) + } + } + + return newEvalRaw(tt, buf, tc), nil +} + +func (call *builtinConcat) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + var f typeFlag + tt := sqltypes.VarChar + for _, arg := range call.Arguments { + argf, af := arg.typeof(env, fields) + tt = concatSQLType(argf, tt) + f |= af + } + return tt, f +} + +func (call *builtinConcat) compile(c *compiler) (ctype, error) { + local := collations.Local() + var ca collationAggregation + tt := sqltypes.VarChar + var f typeFlag + + args := make([]ctype, 0, len(call.Arguments)) + skips := make([]*jump, 0, len(call.Arguments)) + for i, arg := range call.Arguments { + a, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + f |= a.Flag + skips = append(skips, c.compileNullCheckArg(a, i)) + args = append(args, a) + tt = concatSQLType(a.Type, tt) + + err = ca.add(local, a.Col) + if err != nil { + return ctype{}, err + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = defaultCoercionCollation(call.collate) + } + + for i, arg := range args { + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Binary, sqltypes.Blob: + if tc.Collation != collations.CollationBinaryID { + c.asm.Convert_xce(len(args)-i, arg.Type, tc.Collation) + } + case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: + fromCharset := colldata.Lookup(arg.Col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(len(args)-i, arg.Type, tc.Collation) + } + default: + c.asm.Convert_xce(len(args)-i, arg.Type, tc.Collation) + } + } + + c.asm.Fn_CONCAT(tt, tc, len(args)) + c.asm.jumpDestination(skips...) + + return ctype{Type: tt, Flag: f, Col: tc}, nil +} + +type builtinConcatWs struct { + CallExpr + collate collations.ID +} + +func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { + local := collations.Local() + var ca collationAggregation + tt := sqltypes.VarChar + + args := make([]eval, 0, len(call.Arguments)) + for i, arg := range call.Arguments { + a, err := arg.eval(env) + if err != nil { + return nil, err + } + if a == nil { + if i == 0 { + return nil, nil + } + // Unlike CONCAT, CONCAT_WS skips nil arguments. + continue + } + args = append(args, a) + tt = concatSQLType(a.SQLType(), tt) + + err = ca.add(local, evalCollation(a)) + if err != nil { + return nil, err + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = defaultCoercionCollation(call.collate) + } + + var sep []byte + var buf []byte + for i, arg := range args { + if i > 1 { + buf = append(buf, sep...) + } + switch a := arg.(type) { + case *evalBytes: + var err error + if i == 0 { + sep, err = concatConvert(nil, a, tc) + if err != nil { + return nil, err + } + continue + } + buf, err = concatConvert(buf, a, tc) + if err != nil { + return nil, err + } + default: + c, err := evalToVarchar(a, tc.Collation, true) + if err != nil { + return nil, err + } + if i == 0 { + sep = c.bytes + continue + } + buf = append(buf, c.bytes...) + } + } + + return newEvalRaw(tt, buf, tc), nil +} + +func (call *builtinConcatWs) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt := sqltypes.VarChar + sep, f := call.Arguments[0].typeof(env, fields) + tt = concatSQLType(sep, tt) + for _, arg := range call.Arguments[1:] { + argf, _ := arg.typeof(env, fields) + tt = concatSQLType(argf, tt) + } + return tt, f +} + +func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { + local := collations.Local() + var ca collationAggregation + tt := sqltypes.VarChar + + var skip *jump + args := make([]ctype, 0, len(call.Arguments)-1) + for i, arg := range call.Arguments { + a, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + tt = concatSQLType(a.Type, tt) + + err = ca.add(local, a.Col) + if err != nil { + return ctype{}, err + } + + args = append(args, a) + + if i == 0 { + skip = c.compileNullCheck1(a) + continue + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = defaultCoercionCollation(call.collate) + } + + for i, arg := range args { + offset := len(args) - i + var skip *jump + if i != 0 { + skip = c.compileNullCheckOffset(arg, offset) + } + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Binary, sqltypes.Blob: + if tc.Collation != collations.CollationBinaryID { + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: + fromCharset := colldata.Lookup(arg.Col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + default: + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + c.asm.jumpDestination(skip) + } + + c.asm.Fn_CONCAT_WS(tt, tc, len(args)-1) + c.asm.jumpDestination(skip) + + return ctype{Type: tt, Flag: args[0].Flag, Col: tc}, nil } diff --git a/go/vt/vtgate/evalengine/fn_time.go b/go/vt/vtgate/evalengine/fn_time.go new file mode 100644 index 00000000000..99e0f27f755 --- /dev/null +++ b/go/vt/vtgate/evalengine/fn_time.go @@ -0,0 +1,1786 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "math" + "time" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var SystemTime = time.Now + +type ( + builtinNow struct { + CallExpr + utc bool + onlyTime bool + prec uint8 + } + + builtinSysdate struct { + CallExpr + prec uint8 + } + + builtinCurdate struct { + CallExpr + } + + builtinUtcDate struct { + CallExpr + } + + builtinDateFormat struct { + CallExpr + collate collations.ID + } + + builtinDate struct { + CallExpr + } + + builtinDayOfMonth struct { + CallExpr + } + + builtinDayOfWeek struct { + CallExpr + } + + builtinDayOfYear struct { + CallExpr + } + + builtinFromUnixtime struct { + CallExpr + collate collations.ID + } + + builtinHour struct { + CallExpr + } + + builtinMakedate struct { + CallExpr + } + + builtinMaketime struct { + CallExpr + } + + builtinMicrosecond struct { + CallExpr + } + + builtinMinute struct { + CallExpr + } + + builtinMonth struct { + CallExpr + } + + builtinMonthName struct { + CallExpr + collate collations.ID + } + + builtinQuarter struct { + CallExpr + } + + builtinSecond struct { + CallExpr + } + + builtinTime struct { + CallExpr + } + + builtinUnixTimestamp struct { + CallExpr + } + + builtinWeek struct { + CallExpr + } + + builtinWeekDay struct { + CallExpr + } + + builtinWeekOfYear struct { + CallExpr + } + + builtinYear struct { + CallExpr + } + + builtinYearWeek struct { + CallExpr + } + + builtinDateMath struct { + CallExpr + sub bool + unit datetime.IntervalType + collate collations.ID + } +) + +var _ Expr = (*builtinNow)(nil) +var _ Expr = (*builtinSysdate)(nil) +var _ Expr = (*builtinCurdate)(nil) +var _ Expr = (*builtinUtcDate)(nil) +var _ Expr = (*builtinDateFormat)(nil) +var _ Expr = (*builtinDate)(nil) +var _ Expr = (*builtinDayOfMonth)(nil) +var _ Expr = (*builtinDayOfWeek)(nil) +var _ Expr = (*builtinDayOfYear)(nil) +var _ Expr = (*builtinHour)(nil) +var _ Expr = (*builtinFromUnixtime)(nil) +var _ Expr = (*builtinMakedate)(nil) +var _ Expr = (*builtinMaketime)(nil) +var _ Expr = (*builtinMicrosecond)(nil) +var _ Expr = (*builtinMinute)(nil) +var _ Expr = (*builtinMonth)(nil) +var _ Expr = (*builtinMonthName)(nil) +var _ Expr = (*builtinQuarter)(nil) +var _ Expr = (*builtinSecond)(nil) +var _ Expr = (*builtinTime)(nil) +var _ Expr = (*builtinUnixTimestamp)(nil) +var _ Expr = (*builtinWeek)(nil) +var _ Expr = (*builtinWeekDay)(nil) +var _ Expr = (*builtinWeekOfYear)(nil) +var _ Expr = (*builtinYear)(nil) +var _ Expr = (*builtinYearWeek)(nil) + +func (call *builtinNow) eval(env *ExpressionEnv) (eval, error) { + now := env.time(call.utc) + if call.onlyTime { + buf := datetime.Time_hh_mm_ss.Format(now, call.prec) + return newEvalRaw(sqltypes.Time, buf, collationBinary), nil + } else { + buf := datetime.DateTime_YYYY_MM_DD_hh_mm_ss.Format(now, call.prec) + return newEvalRaw(sqltypes.Datetime, buf, collationBinary), nil + } +} + +func (call *builtinNow) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + if call.onlyTime { + return sqltypes.Time, 0 + } + return sqltypes.Datetime, 0 +} + +func (call *builtinNow) compile(c *compiler) (ctype, error) { + var format *datetime.Strftime + var t sqltypes.Type + + if call.onlyTime { + format = datetime.Time_hh_mm_ss + t = sqltypes.Time + } else { + format = datetime.DateTime_YYYY_MM_DD_hh_mm_ss + t = sqltypes.Datetime + } + c.asm.Fn_Now(t, format, call.prec, call.utc) + return ctype{Type: t, Col: collationBinary}, nil +} + +func (call *builtinNow) constant() bool { + return false +} + +func (call *builtinSysdate) eval(env *ExpressionEnv) (eval, error) { + now := SystemTime() + if tz := env.currentTimezone(); tz != nil { + now = now.In(tz) + } + return newEvalRaw(sqltypes.Datetime, datetime.NewDateTimeFromStd(now).Format(call.prec), collationBinary), nil +} + +func (call *builtinSysdate) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Datetime, 0 +} + +func (call *builtinSysdate) compile(c *compiler) (ctype, error) { + c.asm.Fn_Sysdate(call.prec) + return ctype{Type: sqltypes.Datetime, Col: collationBinary}, nil +} + +func (call *builtinSysdate) constant() bool { + return false +} + +func (call *builtinCurdate) eval(env *ExpressionEnv) (eval, error) { + now := env.time(false) + return newEvalRaw(sqltypes.Date, datetime.Date_YYYY_MM_DD.Format(now, 0), collationBinary), nil +} + +func (call *builtinCurdate) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Date, 0 +} + +func (*builtinCurdate) compile(c *compiler) (ctype, error) { + c.asm.Fn_Curdate() + return ctype{Type: sqltypes.Date, Col: collationBinary}, nil +} + +func (call *builtinCurdate) constant() bool { + return false +} + +func (call *builtinUtcDate) eval(env *ExpressionEnv) (eval, error) { + now := env.time(true) + return newEvalRaw(sqltypes.Date, datetime.Date_YYYY_MM_DD.Format(now, 0), collationBinary), nil +} + +func (call *builtinUtcDate) typeof(_ *ExpressionEnv, _ []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Date, 0 +} + +func (*builtinUtcDate) compile(c *compiler) (ctype, error) { + c.asm.Fn_UtcDate() + return ctype{Type: sqltypes.Date, Col: collationBinary}, nil +} + +func (call *builtinUtcDate) constant() bool { + return false +} + +func (b *builtinDateFormat) eval(env *ExpressionEnv) (eval, error) { + date, format, err := b.arg2(env) + if err != nil { + return nil, err + } + if date == nil || format == nil { + return nil, nil + } + var t *evalTemporal + switch e := date.(type) { + case *evalTemporal: + t = e.toDateTime(datetime.DefaultPrecision) + default: + t = evalToDateTime(date, datetime.DefaultPrecision) + if t == nil || t.isZero() { + return nil, nil + } + } + + f := evalToBinary(format) + d, err := datetime.Format(f.string(), t.dt, t.prec) + if err != nil { + return nil, err + } + return newEvalText(d, defaultCoercionCollation(b.collate)), nil +} + +func (b *builtinDateFormat) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, flagNullable +} + +func (call *builtinDateFormat) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Datetime, sqltypes.Date: + default: + c.asm.Convert_xDT_nz(1, datetime.DefaultPrecision) + } + + format, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(format) + + switch format.Type { + case sqltypes.VarChar, sqltypes.VarBinary: + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_DATE_FORMAT(col) + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: arg.Flag | flagNullable}, nil +} + +type builtinConvertTz struct { + CallExpr +} + +var _ Expr = (*builtinConvertTz)(nil) + +func convertTz(dt datetime.DateTime, from, to *time.Location) (datetime.DateTime, bool) { + buf := datetime.DateTime_YYYY_MM_DD_hh_mm_ss.Format(dt, datetime.DefaultPrecision) + ts, err := time.ParseInLocation(time.DateTime, hack.String(buf), from) + if err != nil { + return datetime.DateTime{}, false + } + return datetime.NewDateTimeFromStd(ts.In(to)), true +} + +func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { + n, err := call.Arguments[0].eval(env) + if err != nil { + return nil, err + } + from, err := call.Arguments[1].eval(env) + if err != nil { + return nil, err + } + to, err := call.Arguments[2].eval(env) + if err != nil { + return nil, err + } + + if n == nil || from == nil || to == nil { + return nil, nil + } + + f := evalToBinary(from) + t := evalToBinary(to) + + fromTz, err := datetime.ParseTimeZone(f.string()) + if err != nil { + return nil, nil + } + + toTz, err := datetime.ParseTimeZone(t.string()) + if err != nil { + return nil, nil + } + + dt := evalToDateTime(n, -1) + if dt == nil || dt.isZero() { + return nil, nil + } + + out, ok := convertTz(dt.dt, fromTz, toTz) + if !ok { + return nil, nil + } + return newEvalDateTime(out, int(dt.prec)), nil +} + +func (call *builtinConvertTz) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := call.Arguments[0].typeof(env, fields) + return sqltypes.Datetime, f | flagNullable +} + +func (call *builtinConvertTz) compile(c *compiler) (ctype, error) { + n, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + from, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + to, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck3(n, from, to) + + switch { + case from.isTextual(): + default: + c.asm.Convert_xb(2, sqltypes.VarBinary, 0, false) + } + + switch { + case to.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + switch n.Type { + case sqltypes.Datetime, sqltypes.Date: + default: + c.asm.Convert_xDT_nz(3, -1) + } + + c.asm.Fn_CONVERT_TZ() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: n.Flag | flagNullable}, nil +} + +func (b *builtinDate) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + return d, nil +} + +func (b *builtinDate) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Date, flagNullable +} + +func (call *builtinDate) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date: + default: + c.asm.Convert_xD(1) + } + + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Date, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinDayOfMonth) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.Day())), nil +} + +func (b *builtinDayOfMonth) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinDayOfMonth) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1) + } + c.asm.Fn_DAYOFMONTH() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinDayOfWeek) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.ToStdTime(time.Local).Weekday() + 1)), nil +} + +func (b *builtinDayOfWeek) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinDayOfWeek) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + c.asm.Fn_DAYOFWEEK() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinDayOfYear) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.ToStdTime(time.Local).YearDay())), nil +} + +func (b *builtinDayOfYear) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinDayOfYear) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + c.asm.Fn_DAYOFYEAR() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +const maxUnixtime = 32536771200 + +func (b *builtinFromUnixtime) eval(env *ExpressionEnv) (eval, error) { + ts, err := b.arg1(env) + if err != nil { + return nil, err + } + + if ts == nil { + return nil, nil + } + + prec := 0 + var sec, frac int64 + + switch ts := ts.(type) { + case *evalInt64: + sec = ts.i + case *evalUint64: + sec = int64(ts.u) + case *evalFloat: + sf, ff := math.Modf(ts.f) + sec = int64(sf) + frac = int64(ff * 1e9) + prec = 6 + case *evalDecimal: + sd, fd := ts.dec.QuoRem(decimal.New(1, 0), 0) + sec, _ = sd.Int64() + frac, _ = fd.Mul(decimal.New(1, 9)).Int64() + prec = int(ts.length) + case *evalTemporal: + if ts.prec == 0 { + sec = ts.toInt64() + } else { + dec := ts.toDecimal() + sd, fd := dec.QuoRem(decimal.New(1, 0), 0) + sec, _ = sd.Int64() + frac, _ = fd.Mul(decimal.New(1, 9)).Int64() + prec = int(ts.prec) + } + case *evalBytes: + if ts.isHexOrBitLiteral() { + u, _ := ts.toNumericHex() + sec = int64(u.u) + } else { + f, _ := evalToFloat(ts) + sf, ff := math.Modf(f.f) + sec = int64(sf) + frac = int64(ff * 1e9) + prec = 6 + } + default: + f, _ := evalToFloat(ts) + sf, ff := math.Modf(f.f) + sec = int64(sf) + frac = int64(ff * 1e9) + prec = 6 + } + + if sec < 0 || sec >= maxUnixtime { + return nil, nil + } + + t := time.Unix(sec, frac) + if tz := env.currentTimezone(); tz != nil { + t = t.In(tz) + } + + dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec) + + if len(b.Arguments) == 1 { + return dt, nil + } + + format, err := b.Arguments[1].eval(env) + if err != nil { + return nil, err + } + f := evalToBinary(format) + d, err := datetime.Format(f.string(), dt.dt, dt.prec) + if err != nil { + return nil, err + } + return newEvalText(d, defaultCoercionCollation(b.collate)), nil +} + +func (b *builtinFromUnixtime) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f := b.Arguments[0].typeof(env, fields) + if len(b.Arguments) == 1 { + return sqltypes.Datetime, f | flagNullable + } + return sqltypes.VarChar, f | flagNullable +} + +func (call *builtinFromUnixtime) compile(c *compiler) (ctype, error) { + arg, err := c.compile(call.Arguments[0]) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Int64: + c.asm.Fn_FROM_UNIXTIME_i() + case sqltypes.Uint64: + c.asm.Fn_FROM_UNIXTIME_u() + case sqltypes.Float64: + c.asm.Fn_FROM_UNIXTIME_f() + case sqltypes.Decimal: + c.asm.Fn_FROM_UNIXTIME_d() + case sqltypes.Datetime, sqltypes.Date, sqltypes.Time: + c.asm.Convert_Ti(1) + c.asm.Fn_FROM_UNIXTIME_i() + case sqltypes.VarChar, sqltypes.VarBinary: + if arg.isHexOrBitLiteral() { + c.asm.Convert_xu(1) + c.asm.Fn_FROM_UNIXTIME_u() + } else { + c.asm.Convert_xf(1) + c.asm.Fn_FROM_UNIXTIME_f() + } + default: + c.asm.Convert_xf(1) + c.asm.Fn_FROM_UNIXTIME_f() + } + + if len(call.Arguments) == 1 { + c.asm.jumpDestination(skip1) + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil + } + + format, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(format) + + switch format.Type { + case sqltypes.VarChar, sqltypes.VarBinary: + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + } + + col := defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_DATE_FORMAT(col) + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinHour) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToTime(date, -1) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Time.Hour())), nil +} + +func (b *builtinHour) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinHour) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime, sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + c.asm.Fn_HOUR() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func yearDayToTime(y, yd int64) time.Time { + if y >= 0 && y < 100 { + if y < 70 { + y += 2000 + } else { + y += 1900 + } + } + + if y < 0 || y > 9999 || yd < 1 || yd > math.MaxInt32 { + return time.Time{} + } + t := time.Date(int(y), time.January, 1, 0, 0, 0, 0, time.Local).AddDate(0, 0, int(yd-1)) + if t.Year() > 9999 { + return time.Time{} + } + return t +} + +func (b *builtinMakedate) eval(env *ExpressionEnv) (eval, error) { + // For some reason, MySQL first evaluates the year day argument. + yearDay, err := b.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if yearDay == nil { + return nil, nil + } + + year, err := b.Arguments[0].eval(env) + if err != nil { + return nil, err + } + if year == nil { + return nil, nil + } + + y := evalToInt64(year).i + yd := evalToInt64(yearDay).i + + t := yearDayToTime(y, yd) + if t.IsZero() { + return nil, nil + } + return newEvalDate(datetime.NewDateTimeFromStd(t).Date), nil +} + +func (b *builtinMakedate) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := b.Arguments[0].typeof(env, fields) + _, f2 := b.Arguments[1].typeof(env, fields) + return sqltypes.Date, f1 | f2 | flagNullable +} + +func (call *builtinMakedate) compile(c *compiler) (ctype, error) { + // Similar here, we have to evaluate these in reverse order as well. + yearDay, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(yearDay) + + year, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip2 := c.compileNullCheck1r(year) + + switch yearDay.Type { + case sqltypes.Int64: + default: + c.asm.Convert_xi(2) + } + + switch year.Type { + case sqltypes.Int64: + default: + c.asm.Convert_xi(1) + } + + c.asm.Fn_MAKEDATE() + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Date, Col: collationBinary, Flag: year.Flag | yearDay.Flag | flagNullable}, nil +} + +func clampHourMinute(h, m int64) (int64, int64, bool, bool) { + var clamped bool + if h > 838 || h < -838 { + clamped = true + if h > 0 { + h = 838 + } else { + h = -838 + } + m = 59 + } + neg := h < 0 + if neg { + h = -h + } + return h, m, neg, clamped +} + +func makeTime_i(h, m, s int64) (int64, bool) { + if m < 0 || m > 59 || s < 0 || s > 59 { + return 0, false + } + + h, m, neg, clamped := clampHourMinute(h, m) + if clamped { + s = 59 + } + + v := h*10000 + m*100 + s + if neg { + v = -v + } + return v, true +} + +func makeTime_d(h, m int64, s decimal.Decimal) (decimal.Decimal, bool) { + if m < 0 || m > 59 || s.Sign() < 0 || s.Cmp(decimal.NewFromInt(60)) >= 0 { + return decimal.Zero, false + } + + h, m, neg, clamped := clampHourMinute(h, m) + if clamped { + s = decimal.NewFromInt(59) + } + + dec := decimal.NewFromInt(h*10000 + m*100).Add(s) + if neg { + dec = dec.Neg() + } + return dec, true +} + +func makeTime_f(h, m int64, s float64) (float64, bool) { + if m < 0 || m > 59 || s < 0.0 || s >= 60.0 { + return 0, false + } + + h, m, neg, clamped := clampHourMinute(h, m) + if clamped { + s = 59.0 + } + + v := float64(h*10000+m*100) + s + if neg { + v = -v + } + return v, true +} + +func (b *builtinMaketime) eval(env *ExpressionEnv) (eval, error) { + hour, err := b.Arguments[0].eval(env) + if err != nil { + return nil, err + } + if hour == nil { + return nil, nil + } + min, err := b.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if min == nil { + return nil, nil + } + sec, err := b.Arguments[2].eval(env) + if err != nil { + return nil, err + } + if sec == nil { + return nil, nil + } + + var h int64 + switch hour := hour.(type) { + case *evalInt64: + h = hour.i + case *evalUint64: + if hour.u > math.MaxInt64 { + h = math.MaxInt64 + } else { + h = int64(hour.u) + } + case *evalBytes: + if hour.isHexOrBitLiteral() { + hex, ok := hour.toNumericHex() + if ok { + if hex.u > math.MaxInt64 { + h = math.MaxInt64 + } else { + h = int64(hex.u) + } + } + } else { + h = evalToInt64(hour).i + } + default: + h = evalToInt64(hour).i + } + + m := evalToInt64(min).i + s := evalToNumeric(sec, false) + + var ok bool + var t datetime.Time + var l int + switch s := s.(type) { + case *evalInt64: + var v int64 + v, ok = makeTime_i(h, m, s.i) + if !ok { + return nil, nil + } + t, ok = datetime.ParseTimeInt64(v) + case *evalUint64: + var v int64 + v, ok = makeTime_i(h, m, int64(s.u)) + if !ok { + return nil, nil + } + t, ok = datetime.ParseTimeInt64(v) + case *evalDecimal: + var v decimal.Decimal + v, ok = makeTime_d(h, m, s.dec) + if !ok { + return nil, nil + } + t, l, ok = datetime.ParseTimeDecimal(v, s.length, -1) + case *evalFloat: + var v float64 + v, ok = makeTime_f(h, m, s.f) + if !ok { + return nil, nil + } + t, l, ok = datetime.ParseTimeFloat(v, -1) + } + if !ok { + return nil, nil + } + + return newEvalTime(t, l), nil +} + +func (b *builtinMaketime) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + _, f1 := b.Arguments[0].typeof(env, fields) + _, f2 := b.Arguments[1].typeof(env, fields) + _, f3 := b.Arguments[2].typeof(env, fields) + return sqltypes.Time, f1 | f2 | f3 | flagNullable +} + +func (call *builtinMaketime) compile(c *compiler) (ctype, error) { + hour, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + skip1 := c.compileNullCheck1(hour) + + min, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skip2 := c.compileNullCheck1r(min) + + sec, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skip3 := c.compileNullCheck3(hour, min, sec) + + switch hour.Type { + case sqltypes.Int64: + case sqltypes.Uint64: + c.asm.Clamp_u(3, math.MaxInt64) + c.asm.Convert_xi(3) + case sqltypes.VarChar, sqltypes.VarBinary: + if hour.isHexOrBitLiteral() { + c.asm.Convert_xu(3) + c.asm.Clamp_u(3, math.MaxInt64) + c.asm.Convert_xi(3) + } else { + c.asm.Convert_xi(3) + } + default: + c.asm.Convert_xi(3) + } + + switch min.Type { + case sqltypes.Int64: + default: + c.asm.Convert_xi(2) + } + + switch sec.Type { + case sqltypes.Int64: + c.asm.Fn_MAKETIME_i() + case sqltypes.Uint64: + c.asm.Convert_ui(1) + c.asm.Fn_MAKETIME_i() + case sqltypes.Decimal: + c.asm.Fn_MAKETIME_d() + case sqltypes.Float64: + c.asm.Fn_MAKETIME_f() + case sqltypes.VarChar, sqltypes.VarBinary: + if sec.isHexOrBitLiteral() { + c.asm.Convert_xi(1) + c.asm.Fn_MAKETIME_i() + } else { + c.asm.Convert_xf(1) + c.asm.Fn_MAKETIME_f() + } + default: + c.asm.Convert_xf(1) + c.asm.Fn_MAKETIME_f() + } + + c.asm.jumpDestination(skip1, skip2, skip3) + return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: hour.Flag | min.Flag | sec.Flag | flagNullable}, nil +} + +func (b *builtinMicrosecond) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToTime(date, -1) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Time.Nanosecond() / 1000)), nil +} + +func (b *builtinMicrosecond) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinMicrosecond) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime, sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + c.asm.Fn_MICROSECOND() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinMinute) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToTime(date, -1) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Time.Minute())), nil +} + +func (b *builtinMinute) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinMinute) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime, sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + c.asm.Fn_MINUTE() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinMonth) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.Month())), nil +} + +func (b *builtinMonth) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinMonth) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1) + } + c.asm.Fn_MONTH() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinMonthName) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + m := d.dt.Date.Month() + if m < 1 || m > 12 { + return nil, nil + } + + return newEvalText(hack.StringBytes(time.Month(d.dt.Date.Month()).String()), defaultCoercionCollation(b.collate)), nil +} + +func (b *builtinMonthName) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.VarChar, flagNullable +} + +func (call *builtinMonthName) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1) + } + col := defaultCoercionCollation(call.collate) + c.asm.Fn_MONTHNAME(col) + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: col, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinQuarter) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.Quarter())), nil +} + +func (b *builtinQuarter) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinQuarter) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1) + } + c.asm.Fn_QUARTER() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinSecond) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToTime(date, -1) + if d == nil { + return nil, nil + } + return newEvalInt64(int64(d.dt.Time.Second())), nil +} + +func (b *builtinSecond) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinSecond) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime, sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + c.asm.Fn_SECOND() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinTime) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToTime(date, -1) + if d == nil { + return nil, nil + } + return d, nil +} + +func (b *builtinTime) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Time, flagNullable +} + +func (call *builtinTime) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil +} + +func dateTimeUnixTimestamp(env *ExpressionEnv, date eval) evalNumeric { + var dt *evalTemporal + switch e := date.(type) { + case *evalTemporal: + dt = e.toDateTime(int(e.prec)) + default: + dt = evalToDateTime(date, -1) + if dt == nil || dt.isZero() { + var prec int32 + switch d := date.(type) { + case *evalInt64, *evalUint64: + return newEvalInt64(0) + case *evalDecimal: + prec = d.length + case *evalBytes: + if d.isHexLiteral { + return newEvalInt64(0) + } + prec = 6 + default: + prec = 6 + } + return newEvalDecimalWithPrec(decimal.Zero, prec) + } + } + + tz := env.currentTimezone() + if tz == nil { + tz = time.Local + } + + ts := dt.dt.ToStdTime(tz) + if dt.prec == 0 { + return newEvalInt64(ts.Unix()) + } + dec := decimal.New(ts.Unix(), 0) + dec = dec.Add(decimal.New(int64(dt.dt.Time.Nanosecond()), -9)) + return newEvalDecimalWithPrec(dec, int32(dt.prec)) +} + +func (b *builtinUnixTimestamp) eval(env *ExpressionEnv) (eval, error) { + if len(b.Arguments) == 0 { + return newEvalInt64(env.now.Unix()), nil + } + + date, err := b.arg1(env) + if err != nil { + return nil, err + } + + if date == nil { + return nil, nil + } + + return dateTimeUnixTimestamp(env, date), nil +} + +func (b *builtinUnixTimestamp) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + if len(b.Arguments) == 0 { + return sqltypes.Int64, 0 + } + _, f := b.Arguments[0].typeof(env, fields) + return sqltypes.Int64, f | flagAmbiguousType +} + +func (call *builtinUnixTimestamp) constant() bool { + if len(call.Arguments) == 0 { + return false + } + return call.Arguments[0].constant() +} + +func (call *builtinUnixTimestamp) compile(c *compiler) (ctype, error) { + if len(call.Arguments) == 0 { + c.asm.Fn_UNIX_TIMESTAMP0() + return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil + } + + arg, err := c.compile(call.Arguments[0]) + if err != nil { + return ctype{}, err + } + skip := c.compileNullCheck1(arg) + + c.asm.Fn_UNIX_TIMESTAMP1() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationBinary, Flag: arg.Flag | flagAmbiguousType}, nil +} + +func (b *builtinWeek) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + + mode := int64(0) + if len(b.Arguments) == 2 { + m, err := b.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if m == nil { + return nil, nil + } + mode = evalToInt64(m).i + } + + week := d.dt.Date.Week(int(mode)) + return newEvalInt64(int64(week)), nil +} + +func (b *builtinWeek) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinWeek) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(arg) + var skip2 *jump + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + + if len(call.Arguments) == 1 { + c.asm.Fn_WEEK0() + } else { + mode, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skip2 = c.compileNullCheck1r(mode) + c.asm.Convert_xi(1) + c.asm.Fn_WEEK() + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinWeekDay) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + return newEvalInt64(int64(d.dt.Date.Weekday()+6) % 7), nil +} + +func (b *builtinWeekDay) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinWeekDay) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + + c.asm.Fn_WEEKDAY() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinWeekOfYear) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + + _, week := d.dt.Date.ISOWeek() + return newEvalInt64(int64(week)), nil +} + +func (b *builtinWeekOfYear) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinWeekOfYear) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + + c.asm.Fn_WEEKOFYEAR() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinYear) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + d := evalToDate(date) + if d == nil { + return nil, nil + } + + return newEvalInt64(int64(d.dt.Date.Year())), nil +} + +func (b *builtinYear) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinYear) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1) + } + + c.asm.Fn_YEAR() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinYearWeek) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + + d := evalToDate(date) + if d == nil || d.isZero() { + return nil, nil + } + + mode := int64(0) + if len(b.Arguments) == 2 { + m, err := b.Arguments[1].eval(env) + if err != nil { + return nil, err + } + if m == nil { + return nil, nil + } + mode = evalToInt64(m).i + } + + week := d.dt.Date.YearWeek(int(mode)) + return newEvalInt64(int64(week)), nil +} + +func (b *builtinYearWeek) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + return sqltypes.Int64, flagNullable +} + +func (call *builtinYearWeek) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck1(arg) + var skip2 *jump + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD_nz(1) + } + + if len(call.Arguments) == 1 { + c.asm.Fn_YEARWEEK0() + } else { + mode, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + skip2 = c.compileNullCheck1r(mode) + c.asm.Convert_xi(1) + c.asm.Fn_YEARWEEK() + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func evalToInterval(itv eval, unit datetime.IntervalType, negate bool) *datetime.Interval { + switch itv := itv.(type) { + case *evalBytes: + return datetime.ParseInterval(itv.string(), unit, negate) + case *evalFloat: + return datetime.ParseIntervalFloat(itv.f, unit, negate) + case *evalDecimal: + return datetime.ParseIntervalDecimal(itv.dec, itv.length, unit, negate) + default: + return datetime.ParseIntervalInt64(evalToNumeric(itv, false).toInt64().i, unit, negate) + } +} + +func (call *builtinDateMath) eval(env *ExpressionEnv) (eval, error) { + date, err := call.Arguments[0].eval(env) + if err != nil || date == nil { + return date, err + } + + itv, err := call.Arguments[1].eval(env) + if err != nil || itv == nil { + return itv, err + } + + interval := evalToInterval(itv, call.unit, call.sub) + if interval == nil { + return nil, nil + } + + if tmp, ok := date.(*evalTemporal); ok { + return tmp.addInterval(interval, collations.TypedCollation{}), nil + } + + if tmp := evalToTemporal(date); tmp != nil { + return tmp.addInterval(interval, defaultCoercionCollation(call.collate)), nil + } + + return nil, nil +} + +func (call *builtinDateMath) typeof(env *ExpressionEnv, fields []*querypb.Field) (sqltypes.Type, typeFlag) { + tt, f := call.Arguments[0].typeof(env, fields) + + switch { + case tt == sqltypes.Date && !call.unit.HasTimeParts(): + return sqltypes.Date, f | flagNullable + case tt == sqltypes.Time && !call.unit.HasDateParts(): + return sqltypes.Time, f | flagNullable + case tt == sqltypes.Datetime || tt == sqltypes.Timestamp || (tt == sqltypes.Date && call.unit.HasTimeParts()) || (tt == sqltypes.Time && call.unit.HasDateParts()): + return sqltypes.Datetime, f | flagNullable + default: + return sqltypes.Char, f | flagNullable + } +} + +func (call *builtinDateMath) compile(c *compiler) (ctype, error) { + date, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + // TODO: constant propagation + _, err = call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + var ret ctype + ret.Flag = date.Flag | flagNullable + ret.Col = collationBinary + + switch { + case date.Type == sqltypes.Date && !call.unit.HasTimeParts(): + ret.Type = sqltypes.Date + c.asm.Fn_DATEADD_D(call.unit, call.sub) + case date.Type == sqltypes.Time && !call.unit.HasDateParts(): + ret.Type = sqltypes.Time + c.asm.Fn_DATEADD_D(call.unit, call.sub) + case date.Type == sqltypes.Datetime || date.Type == sqltypes.Timestamp || (date.Type == sqltypes.Date && call.unit.HasTimeParts()) || (date.Type == sqltypes.Time && call.unit.HasDateParts()): + ret.Type = sqltypes.Datetime + c.asm.Fn_DATEADD_D(call.unit, call.sub) + default: + ret.Type = sqltypes.VarChar + ret.Col = defaultCoercionCollation(c.cfg.Collation) + c.asm.Fn_DATEADD_s(call.unit, call.sub, ret.Col) + } + return ret, nil +} diff --git a/go/vt/vtgate/evalengine/format.go b/go/vt/vtgate/evalengine/format.go index c2e4619faac..446d3e0f28f 100644 --- a/go/vt/vtgate/evalengine/format.go +++ b/go/vt/vtgate/evalengine/format.go @@ -21,6 +21,7 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" ) @@ -69,7 +70,7 @@ func (l *Literal) format(w *formatter, depth int) { func (bv *BindVariable) format(w *formatter, depth int) { w.WriteByte(':') - if bv.tuple { + if bv.Type == sqltypes.Tuple { w.WriteByte(':') } w.WriteString(bv.Key) @@ -116,9 +117,14 @@ func (t TupleExpr) format(w *formatter, depth int) { func (c *CollateExpr) format(w *formatter, depth int) { c.Inner.format(w, depth) - coll := c.TypedCollation.Collation.Get() w.WriteString(" COLLATE ") - w.WriteString(coll.Name()) + w.WriteString(collations.Local().LookupName(c.TypedCollation.Collation)) +} + +func (i *IntroducerExpr) format(w *formatter, depth int) { + w.WriteString("_") + w.WriteString(collations.Local().LookupName(i.TypedCollation.Collation)) + i.Inner.format(w, depth) } func (n *NotExpr) format(w *formatter, depth int) { @@ -162,7 +168,7 @@ func (c *CallExpr) format(w *formatter, depth int) { func (c *builtinWeightString) format(w *formatter, depth int) { w.WriteString("WEIGHT_STRING(") - c.String.format(w, depth) + c.Expr.format(w, depth) if c.Cast != "" { fmt.Fprintf(w, " AS %s(%d)", strings.ToUpper(c.Cast), c.Len) @@ -198,7 +204,7 @@ func (c *ConvertExpr) format(buf *formatter, depth int) { } if c.Collation != collations.Unknown { buf.WriteString(" CHARACTER SET ") - buf.WriteString(c.Collation.Get().Name()) + buf.WriteString(collations.Local().LookupName(c.Collation)) } buf.WriteByte(')') } @@ -207,6 +213,6 @@ func (c *ConvertUsingExpr) format(buf *formatter, depth int) { buf.WriteString("CONVERT(") c.Inner.format(buf, depth) buf.WriteString(" USING ") - buf.WriteString(c.Collation.Get().Name()) + buf.WriteString(collations.Local().LookupName(c.Collation)) buf.WriteByte(')') } diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index 03bf5598ad2..bde8435f688 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -1,3 +1,5 @@ +//go:build !race + /* Copyright 2021 The Vitess Authors. @@ -17,7 +19,10 @@ limitations under the License. package integration import ( + "context" "fmt" + "os/exec" + "strconv" "strings" "testing" "time" @@ -26,7 +31,12 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/format" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/callerid" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" @@ -35,7 +45,7 @@ import ( var ( collationEnv *collations.Environment - debugPrintAll bool + debugGolden = false debugNormalize = true debugSimplify = time.Now().UnixNano()&1 != 0 debugCheckTypes = true @@ -43,22 +53,37 @@ var ( ) func registerFlags(fs *pflag.FlagSet) { - fs.BoolVar(&debugPrintAll, "print-all", debugPrintAll, "print all matching tests") + fs.BoolVar(&debugGolden, "golden", debugGolden, "print golden test files") fs.BoolVar(&debugNormalize, "normalize", debugNormalize, "normalize comparisons against MySQL values") fs.BoolVar(&debugSimplify, "simplify", debugSimplify, "simplify expressions before evaluating them") fs.BoolVar(&debugCheckTypes, "check-types", debugCheckTypes, "check the TypeOf operator for all queries") fs.BoolVar(&debugCheckCollations, "check-collations", debugCheckCollations, "check the returned collations for all queries") } -func init() { - // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) - servenv.OnParse(registerFlags) +// normalizeValue returns a normalized form of this value that matches the output +// of the evaluation engine. This is used to mask quirks in the way MySQL sends SQL +// values over the wire, to allow comparing our implementation against MySQL's in +// integration tests. +func normalizeValue(v sqltypes.Value, coll collations.ID) sqltypes.Value { + typ := v.Type() + if typ == sqltypes.VarChar && coll == collations.CollationBinaryID { + return sqltypes.NewVarBinary(string(v.Raw())) + } + if typ == sqltypes.Float32 || typ == sqltypes.Float64 { + var bitsize = 64 + if typ == sqltypes.Float32 { + bitsize = 32 + } + f, err := strconv.ParseFloat(v.RawStr(), bitsize) + if err != nil { + panic(err) + } + return sqltypes.MakeTrusted(typ, format.FormatFloat(f)) + } + return v } -func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string) { +func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string, fields []*querypb.Field, cmp *testcases.Comparison) { t.Helper() localQuery := "SELECT " + expr @@ -66,14 +91,14 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys if debugCheckCollations { remoteQuery = fmt.Sprintf("SELECT %s, COLLATION(%s)", expr, expr) } - if len(env.Fields) > 0 { + if len(fields) > 0 { if _, err := conn.ExecuteFetch(`DROP TEMPORARY TABLE IF EXISTS vteval_test`, -1, false); err != nil { t.Fatalf("failed to drop temporary table: %v", err) } var schema strings.Builder schema.WriteString(`CREATE TEMPORARY TABLE vteval_test(autopk int primary key auto_increment, `) - for i, field := range env.Fields { + for i, field := range fields { if i > 0 { schema.WriteString(", ") } @@ -88,7 +113,7 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys if len(env.Row) > 0 { var rowsql strings.Builder rowsql.WriteString(`INSERT INTO vteval_test(`) - for i, field := range env.Fields { + for i, field := range fields { if i > 0 { rowsql.WriteString(", ") } @@ -111,14 +136,17 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys remoteQuery = remoteQuery + " FROM vteval_test" } + if cmp == nil { + cmp = &testcases.Comparison{} + } - local, localType, localErr := evaluateLocalEvalengine(env, localQuery) + local, localType, localErr := evaluateLocalEvalengine(env, localQuery, fields) remote, remoteErr := conn.ExecuteFetch(remoteQuery, 1, true) - var localVal, remoteVal string + var localVal, remoteVal sqltypes.Value var localCollation, remoteCollation collations.ID if localErr == nil { - v := local.Value() + v := local.Value(collations.Default()) if debugCheckCollations { if v.IsNull() { localCollation = collations.CollationBinaryID @@ -127,11 +155,11 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys } } if debugNormalize { - localVal = evalengine.NormalizeValue(v, local.Collation()) + localVal = normalizeValue(v, local.Collation()) } else { - localVal = v.String() + localVal = v } - if debugCheckTypes { + if debugCheckTypes && localType != -1 { tt := v.Type() if tt != sqltypes.Null && tt != localType { t.Errorf("evaluation type mismatch: eval=%v vs typeof=%v\nlocal: %s\nquery: %s (SIMPLIFY=%v)", @@ -141,38 +169,112 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys } if remoteErr == nil { if debugNormalize { - remoteVal = evalengine.NormalizeValue(remote.Rows[0][0], collations.ID(remote.Fields[0].Charset)) + remoteVal = normalizeValue(remote.Rows[0][0], collations.ID(remote.Fields[0].Charset)) + cmp.Decimals = remote.Fields[0].Decimals } else { - remoteVal = remote.Rows[0][0].String() + remoteVal = remote.Rows[0][0] } if debugCheckCollations { if remote.Rows[0][0].IsNull() { // TODO: passthrough proper collations for nullable fields remoteCollation = collations.CollationBinaryID } else { - remoteCollation = collationEnv.LookupByName(remote.Rows[0][1].ToString()).ID() + remoteCollation = collationEnv.LookupByName(remote.Rows[0][1].ToString()) } } } - if diff := compareResult(localErr, remoteErr, localVal, remoteVal, localCollation, remoteCollation); diff != "" { - t.Errorf("%s\nquery: %s (SIMPLIFY=%v)\nrow: %v", diff, localQuery, debugSimplify, env.Row) - } else if debugPrintAll { - t.Logf("local=%s mysql=%s\nquery: %s\nrow: %v", localVal, remoteVal, localQuery, env.Row) + localResult := Result{ + Error: localErr, + Value: localVal, + Collation: localCollation, + } + remoteResult := Result{ + Error: remoteErr, + Value: remoteVal, + Collation: remoteCollation, + } + + if debugGolden { + g := GoldenTest{Query: localQuery} + if remoteErr != nil { + g.Error = remoteErr.Error() + } else { + g.Value = remoteVal.String() + } + seenGoldenTests = append(seenGoldenTests, g) + return + } + + if err := compareResult(localResult, remoteResult, cmp); err != nil { + t.Errorf("%s\nquery: %s (SIMPLIFY=%v)\nrow: %v", err, localQuery, debugSimplify, env.Row) + } +} + +var seenGoldenTests []GoldenTest + +type vcursor struct { +} + +func (vc *vcursor) GetKeyspace() string { + return "vttest" +} + +func (vc *vcursor) TimeZone() *time.Location { + return time.Local +} + +func initTimezoneData(t *testing.T, conn *mysql.Conn) { + // We load the timezone information into MySQL. The evalengine assumes + // our backend MySQL is configured with the timezone information as well + // for functions like CONVERT_TZ. + out, err := exec.Command("mysql_tzinfo_to_sql", "/usr/share/zoneinfo").Output() + if err != nil { + t.Fatalf("failed to retrieve timezone info: %v", err) + } + + _, more, err := conn.ExecuteFetchMulti(fmt.Sprintf("USE mysql; %s\n", string(out)), -1, false) + if err != nil { + t.Fatalf("failed to insert timezone info: %v", err) + } + for more { + _, more, _, err = conn.ReadQueryResult(-1, false) + if err != nil { + t.Fatalf("failed to insert timezone info: %v", err) + } + } + _, err = conn.ExecuteFetch(fmt.Sprintf("USE %s", connParams.DbName), -1, false) + if err != nil { + t.Fatalf("failed to switch back to database: %v", err) } } func TestMySQL(t *testing.T) { + defer utils.EnsureNoLeaks(t) var conn = mysqlconn(t) defer conn.Close() + // We require MySQL 8.0 collations for the comparisons in the tests + + servenv.SetMySQLServerVersionForTest(conn.ServerVersion) + collationEnv = collations.NewEnvironment(conn.ServerVersion) + servenv.OnParse(registerFlags) + initTimezoneData(t, conn) + for _, tc := range testcases.Cases { - t.Run(fmt.Sprintf("%T", tc), func(t *testing.T) { - env := tc.Environment() - tc.Test(func(query string, row []sqltypes.Value) { + t.Run(tc.Name(), func(t *testing.T) { + ctx := callerid.NewContext(context.Background(), &vtrpc.CallerID{Principal: "testuser"}, &querypb.VTGateCallerID{ + Username: "vt_dba", + }) + env := evalengine.NewExpressionEnv(ctx, nil, &vcursor{}) + tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - compareRemoteExprEnv(t, env, conn, query) + compareRemoteExprEnv(t, env, conn, query, tc.Schema, tc.Compare) }) }) } + + if debugGolden { + writeGolden(t, seenGoldenTests) + } } diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go index 55b5cc3c4c0..ebfaa486b19 100644 --- a/go/vt/vtgate/evalengine/integration/fuzz_test.go +++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go @@ -1,3 +1,5 @@ +//go:build !race + /* Copyright 2021 The Vitess Authors. @@ -17,7 +19,9 @@ limitations under the License. package integration import ( + "context" "encoding/json" + "errors" "fmt" "math/rand" "os" @@ -30,8 +34,10 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" "vitess.io/vitess/go/vt/vtgate/simplifier" ) @@ -91,6 +97,12 @@ var ( regexp.MustCompile(`Cannot convert string '(.*?)' from \w+ to \w+`), regexp.MustCompile(`Invalid JSON text in argument (\d+) to function (\w+): (.*?)`), regexp.MustCompile(`Illegal mix of collations`), + regexp.MustCompile(`Incorrect (DATE|DATETIME) value`), + regexp.MustCompile(`Syntax error in regular expression`), + regexp.MustCompile(`The regular expression contains an unclosed bracket expression`), + regexp.MustCompile(`Illegal argument to a regular expression`), + regexp.MustCompile(`Incorrect arguments to regexp_substr`), + regexp.MustCompile(`Incorrect arguments to regexp_replace`), } ) @@ -120,7 +132,7 @@ func errorsMatch(remote, local error) bool { return false } -func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string) (evalengine.EvalResult, sqltypes.Type, error) { +func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields []*querypb.Field) (evalengine.EvalResult, sqltypes.Type, error) { stmt, err := sqlparser.Parse(query) if err != nil { return evalengine.EvalResult{}, 0, err @@ -133,8 +145,15 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string) (evale err = fmt.Errorf("PANIC during translate: %v", r) } }() - lookup := &evalengine.LookupIntegrationTest{Collation: collations.CollationUtf8mb4ID} - expr, err = evalengine.TranslateEx(astExpr, lookup, debugSimplify) + cfg := &evalengine.Config{ + ResolveColumn: evalengine.FieldResolver(fields).Column, + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelNone, + } + if debugSimplify { + cfg.Optimization = evalengine.OptimizationLevelSimplify + } + expr, err = evalengine.Translate(astExpr, cfg) return }() @@ -150,7 +169,11 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string) (evale }() eval, err = env.Evaluate(local) if err == nil && debugCheckTypes { - tt, err = env.TypeOf(local) + tt, _, err = env.TypeOf(local, fields) + if errors.Is(err, evalengine.ErrAmbiguousType) { + tt = -1 + err = nil + } } return }() @@ -159,6 +182,12 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string) (evale const syntaxErr = `You have an error in your SQL syntax; (errno 1064) (sqlstate 42000) during query: SQL` const localSyntaxErr = `You have an error in your SQL syntax;` +type GoldenTest struct { + Query string + Value string `json:",omitempty"` + Error string `json:",omitempty"` +} + func TestGenerateFuzzCases(t *testing.T) { if fuzzMaxFailures <= 0 { t.Skipf("skipping fuzz test generation") @@ -182,8 +211,8 @@ func TestGenerateFuzzCases(t *testing.T) { compareWithMySQL := func(expr sqlparser.Expr) *mismatch { query := "SELECT " + sqlparser.String(expr) - env := evalengine.EnvWithBindVars(nil, 255) - eval, _, localErr := evaluateLocalEvalengine(env, query) + env := evalengine.NewExpressionEnv(context.Background(), nil, nil) + eval, _, localErr := evaluateLocalEvalengine(env, query, nil) remote, remoteErr := conn.ExecuteFetch(query, 1, false) if localErr != nil && strings.Contains(localErr.Error(), "syntax error at position") { @@ -200,10 +229,10 @@ func TestGenerateFuzzCases(t *testing.T) { remoteErr: remoteErr, } if localErr == nil { - res.localVal = eval.Value().String() + res.localVal = eval.Value(collations.Default()) } if remoteErr == nil { - res.remoteVal = remote.Rows[0][0].String() + res.remoteVal = remote.Rows[0][0] } if res.Error() != "" { return &res @@ -236,12 +265,7 @@ func TestGenerateFuzzCases(t *testing.T) { return } - type evaltest struct { - Query string - Value string `json:",omitempty"` - Error string `json:",omitempty"` - } - var golden []evaltest + var golden []GoldenTest for _, fail := range failures { failErr := fail.Error() @@ -258,18 +282,22 @@ func TestGenerateFuzzCases(t *testing.T) { query := "SELECT " + sqlparser.String(simplified) if fail.remoteErr != nil { - golden = append(golden, evaltest{ + golden = append(golden, GoldenTest{ Query: query, Error: fail.remoteErr.Error(), }) } else { - golden = append(golden, evaltest{ + golden = append(golden, GoldenTest{ Query: query, - Value: fail.remoteVal, + Value: fail.remoteVal.String(), }) } } + writeGolden(t, golden) +} + +func writeGolden(t *testing.T, golden []GoldenTest) { out, err := os.Create(fmt.Sprintf("testdata/mysql_golden_%d.json", time.Now().Unix())) if err != nil { t.Fatal(err) @@ -285,51 +313,71 @@ func TestGenerateFuzzCases(t *testing.T) { type mismatch struct { expr sqlparser.Expr localErr, remoteErr error - localVal, remoteVal string + localVal, remoteVal sqltypes.Value } -func compareResult(localErr, remoteErr error, localVal, remoteVal string, localCollation, remoteCollation collations.ID) string { - if localErr != nil { - if remoteErr == nil { - return fmt.Sprintf("%v; mysql response: %s", localErr, remoteVal) +type Result struct { + Error error + Value sqltypes.Value + Collation collations.ID +} + +func compareResult(local, remote Result, cmp *testcases.Comparison) error { + if local.Error != nil { + if remote.Error == nil { + return fmt.Errorf("%w: mysql response: %s", local.Error, remote.Value) } - if !errorsMatch(remoteErr, localErr) { - return fmt.Sprintf("mismatch in errors: eval=%s; mysql response: %s", localErr.Error(), remoteErr.Error()) + if !errorsMatch(remote.Error, local.Error) { + return fmt.Errorf("mismatch in errors: eval=%w; mysql response: %w", local.Error, remote.Error) } - return "" + return nil } - if remoteErr != nil { + if remote.Error != nil { for _, ke := range knownErrors { - if ke.MatchString(remoteErr.Error()) { - return "" + if ke.MatchString(remote.Error.Error()) { + return nil } } - return fmt.Sprintf("%v; mysql failed with: %s", localVal, remoteErr.Error()) + return fmt.Errorf("%v; mysql failed with: %w", local.Value, remote.Error) } var localCollationName string var remoteCollationName string - if coll := localCollation.Get(); coll != nil { - localCollationName = coll.Name() + env := collations.Local() + if coll := local.Collation; coll != collations.Unknown { + localCollationName = env.LookupName(coll) } - if coll := remoteCollation.Get(); coll != nil { - remoteCollationName = coll.Name() + if coll := remote.Collation; coll != collations.Unknown { + remoteCollationName = env.LookupName(coll) } - if localVal != remoteVal { - return fmt.Sprintf("different results: %s; mysql response: %s (local collation: %s; mysql collation: %s)", - localVal, remoteVal, localCollationName, remoteCollationName) + equals, err := cmp.Equals(local.Value, remote.Value) + if err != nil { + return err } - if localCollation != remoteCollation { - return fmt.Sprintf("different collations: %s; mysql response: %s (local result: %s; mysql result: %s)", - localCollationName, remoteCollationName, localVal, remoteVal, + if !equals { + return fmt.Errorf("different results: %s; mysql response: %s (local collation: %s; mysql collation: %s)", + local.Value.String(), remote.Value.String(), localCollationName, remoteCollationName) + } + if local.Collation != remote.Collation { + return fmt.Errorf("different collations: %s; mysql response: %s (local result: %s; mysql result: %s)", + localCollationName, remoteCollationName, local.Value.String(), remote.Value.String(), ) } - - return "" + return nil } func (cr *mismatch) Error() string { - return compareResult(cr.localErr, cr.remoteErr, cr.localVal, cr.remoteVal, collations.Unknown, collations.Unknown) + return compareResult( + Result{ + Error: cr.localErr, + Value: cr.localVal, + }, + Result{ + Error: cr.remoteErr, + Value: cr.remoteVal, + }, + &testcases.Comparison{}, + ).Error() } diff --git a/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json b/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json new file mode 100644 index 00000000000..1c12d2d8825 --- /dev/null +++ b/go/vt/vtgate/evalengine/integration/testdata/mysql_golden_1686149004.json @@ -0,0 +1,36046 @@ +[ + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01',INTERVAL 1 DAY)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01',INTERVAL 1 YEAR)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 SECOND)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2018-12-31 23:59:59', INTERVAL 1 DAY)", + "Value": "DATETIME(\"2019-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND)", + "Value": "DATETIME(\"2101-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' DAY_SECOND)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'1900-01-01 00:00:00', INTERVAL '-1 10' DAY_HOUR)", + "Value": "DATETIME(\"1899-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'1998-01-02', INTERVAL 31 DAY)", + "Value": "DATE(\"1997-12-02\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND)", + "Value": "DATETIME(\"1993-01-01 00:00:01.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2024-03-30', INTERVAL 1 MONTH)", + "Value": "DATE(\"2024-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2024-03-31', INTERVAL 1 MONTH)", + "Value": "DATE(\"2024-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day)", + "Value": "DATE(\"2018-03-31\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day)", + "Value": "DATE(\"2018-05-31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day)", + "Value": "DATE(\"2018-05-07\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day)", + "Value": "DATE(\"2018-04-25\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATE(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATE(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "DATE(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "DATE(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 week)", + "Value": "DATE(\"2018-12-04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 week)", + "Value": "DATE(\"2017-09-26\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 week)", + "Value": "DATE(\"2018-11-27\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 week)", + "Value": "DATE(\"2017-10-03\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' week)", + "Value": "DATE(\"2018-06-12\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' week)", + "Value": "DATE(\"2018-03-20\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATE(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATE(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "DATE(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "DATE(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 month)", + "Value": "DATE(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 month)", + "Value": "DATE(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 month)", + "Value": "DATE(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 month)", + "Value": "DATE(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' month)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' month)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 year)", + "Value": "DATE(\"2049-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 year)", + "Value": "DATE(\"1987-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 year)", + "Value": "DATE(\"2048-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 year)", + "Value": "DATE(\"1988-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' year)", + "Value": "DATE(\"2024-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' year)", + "Value": "DATE(\"2012-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATE(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATE(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2018-05-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2018-04-29 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2132-05-30 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1904-04-01 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2018-05-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2018-04-24 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour)", + "Value": "DATETIME(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour)", + "Value": "DATETIME(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour)", + "Value": "DATETIME(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour)", + "Value": "DATETIME(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2018-05-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2018-04-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute)", + "Value": "DATETIME(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute)", + "Value": "DATETIME(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute)", + "Value": "DATETIME(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute)", + "Value": "DATETIME(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2018-05-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2018-04-30 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 quarter)", + "Value": "DATE(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 quarter)", + "Value": "DATE(\"2010-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 quarter)", + "Value": "DATE(\"2025-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 quarter)", + "Value": "DATE(\"2010-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' quarter)", + "Value": "DATE(\"2019-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' quarter)", + "Value": "DATE(\"2016-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATE(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATE(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "DATE(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "DATE(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 second)", + "Value": "DATETIME(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 second)", + "Value": "DATETIME(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 second)", + "Value": "DATETIME(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 second)", + "Value": "DATETIME(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 second)", + "Value": "DATETIME(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2018-05-01 00:00:06.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2018-04-30 23:59:54.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2018-04-30 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2018-05-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2018-04-30 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1 year_month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1 year_month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1' year_month)", + "Value": "DATE(\"2019-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1' year_month)", + "Value": "DATE(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1 10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1 10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 31 year_month)", + "Value": "DATE(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 31 year_month)", + "Value": "DATE(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 30 year_month)", + "Value": "DATE(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 30 year_month)", + "Value": "DATE(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.999 year_month)", + "Value": "DATE(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.999 year_month)", + "Value": "DATE(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.999' year_month)", + "Value": "DATE(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.999' year_month)", + "Value": "DATE(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1:10' year_month)", + "Value": "DATE(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1:10' year_month)", + "Value": "DATE(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "DATE(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "DATE(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 6/4 year_month)", + "Value": "DATE(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 6/4 year_month)", + "Value": "DATE(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '6/4' year_month)", + "Value": "DATE(\"2024-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '6/4' year_month)", + "Value": "DATE(\"2012-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATE(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATE(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATE(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATE(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL 1e0 year_month)", + "Value": "DATE(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL 1e0 year_month)", + "Value": "DATE(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0' year_month)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0' year_month)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(DATE'2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "DATE(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(DATE'2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "DATE(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "DATETIME(\"2021-01-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "DATETIME(\"2020-12-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2021-01-06 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2020-12-25 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "DATETIME(\"2021-08-05 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "DATETIME(\"2020-05-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "DATETIME(\"2021-07-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "DATETIME(\"2020-06-04 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2021-02-11 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2020-11-19 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "DATETIME(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "DATETIME(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "DATETIME(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "DATETIME(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "DATETIME(\"2051-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "DATETIME(\"1989-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "DATETIME(\"2050-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "DATETIME(\"1990-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2026-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2014-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2021-01-02 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2020-12-30 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2135-01-31 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1906-12-03 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2021-01-07 03:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2020-12-25 19:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "DATETIME(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "DATETIME(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "DATETIME(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "DATETIME(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2021-01-01 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2020-12-31 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000031\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999969\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000030\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999970\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000006\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999994\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "DATETIME(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "DATETIME(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "DATETIME(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "DATETIME(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2021-01-01 00:05:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2020-12-31 23:53:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2028-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2013-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2028-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2013-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2022-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2019-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "DATETIME(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "DATETIME(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "DATETIME(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "DATETIME(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "DATETIME(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2021-01-01 00:00:05.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2020-12-31 23:59:53.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2020-12-31 23:59:57.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2021-01-01 00:00:00.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2020-12-31 23:59:58.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2022-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2019-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2027-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2014-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "DATETIME(\"2025-01-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "DATETIME(\"2024-12-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2025-01-07 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "DATETIME(\"2024-12-26 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "DATETIME(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "DATETIME(\"2025-08-06 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "DATETIME(\"2024-05-29 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "DATETIME(\"2025-07-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "DATETIME(\"2024-06-05 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2025-02-12 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "DATETIME(\"2024-11-20 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "DATETIME(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "DATETIME(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "DATETIME(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "DATETIME(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "DATETIME(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "DATETIME(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "DATETIME(\"2056-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "DATETIME(\"1994-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "DATETIME(\"2055-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "DATETIME(\"1995-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2031-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "DATETIME(\"2019-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "DATETIME(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "DATETIME(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "DATETIME(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "DATETIME(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "DATETIME(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "DATETIME(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "DATETIME(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "DATETIME(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "DATETIME(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "DATETIME(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "DATETIME(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "DATETIME(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "DATETIME(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "DATETIME(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "DATETIME(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "DATETIME(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "DATETIME(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "DATETIME(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "DATETIME(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "DATETIME(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "DATETIME(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "DATETIME(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "DATETIME(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "DATETIME(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "DATETIME(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "DATETIME(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "DATETIME(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "DATETIME(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "DATETIME(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "DATETIME(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "DATETIME(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "DATETIME(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "DATETIME(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "DATETIME(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "DATETIME(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "DATETIME(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "DATETIME(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "DATETIME(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2032-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "DATETIME(\"2017-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2032-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "DATETIME(\"2017-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2026-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "DATETIME(\"2023-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "DATETIME(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "DATETIME(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "DATETIME(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "DATETIME(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "DATETIME(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "DATETIME(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "DATETIME(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2025-01-01 00:00:06.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "DATETIME(\"2024-12-31 23:59:54.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.5\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "DATETIME(\"2024-12-31 23:59:58.5\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "DATETIME(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2025-01-01 00:00:01.000000\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "DATETIME(\"2024-12-31 23:59:59.000000\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2026-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "DATETIME(\"2023-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "DATETIME(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "DATETIME(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "DATETIME(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "DATETIME(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "DATETIME(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "DATETIME(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "DATETIME(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2031-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "DATETIME(\"2018-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "DATETIME(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "DATETIME(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "DATETIME(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "DATETIME(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day)", + "Value": "CHAR(\"2018-03-31\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day)", + "Value": "CHAR(\"2018-05-31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day)", + "Value": "CHAR(\"2018-05-07\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day)", + "Value": "CHAR(\"2018-04-25\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2018-05-03\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2018-04-29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2018-05-02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2018-04-30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 week)", + "Value": "CHAR(\"2018-12-04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 week)", + "Value": "CHAR(\"2017-09-26\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 week)", + "Value": "CHAR(\"2018-11-27\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 week)", + "Value": "CHAR(\"2017-10-03\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' week)", + "Value": "CHAR(\"2018-06-12\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' week)", + "Value": "CHAR(\"2018-03-20\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2018-05-15\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2018-04-17\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2018-05-08\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2018-04-24\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 month)", + "Value": "CHAR(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 month)", + "Value": "CHAR(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 month)", + "Value": "CHAR(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 month)", + "Value": "CHAR(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' month)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' month)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 year)", + "Value": "CHAR(\"2049-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 year)", + "Value": "CHAR(\"1987-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 year)", + "Value": "CHAR(\"2048-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 year)", + "Value": "CHAR(\"1988-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' year)", + "Value": "CHAR(\"2024-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' year)", + "Value": "CHAR(\"2012-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2020-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2016-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2018-05-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2018-04-29 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2132-05-30 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1904-04-01 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2018-06-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2018-03-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2018-05-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2018-04-29 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2018-11-26 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2017-10-03 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2018-05-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2018-04-24 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2018-05-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2018-04-29 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2018-05-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2018-04-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2018-05-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2018-04-29 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour)", + "Value": "CHAR(\"2018-05-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour)", + "Value": "CHAR(\"2018-04-29 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour)", + "Value": "CHAR(\"2018-05-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour)", + "Value": "CHAR(\"2018-04-29 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2018-05-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2018-04-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2018-05-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2018-04-30 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2020-03-25 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2016-06-05 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2018-05-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2018-04-30 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2018-04-30 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2018-05-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2018-04-27 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2018-05-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2018-04-30 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2018-05-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2018-04-30 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2018-05-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2018-04-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute)", + "Value": "CHAR(\"2018-05-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute)", + "Value": "CHAR(\"2018-04-30 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute)", + "Value": "CHAR(\"2018-05-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute)", + "Value": "CHAR(\"2018-04-30 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2018-05-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2018-04-30 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2018-05-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2018-04-30 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2018-05-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2018-04-19 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2018-05-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2018-04-30 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2018-05-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2018-04-30 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2018-05-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2018-04-30 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2018-04-30 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2018-05-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2018-04-30 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 quarter)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 quarter)", + "Value": "CHAR(\"2010-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 quarter)", + "Value": "CHAR(\"2025-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 quarter)", + "Value": "CHAR(\"2010-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2019-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2016-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2018-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2017-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2018-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2018-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 second)", + "Value": "CHAR(\"2018-05-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 second)", + "Value": "CHAR(\"2018-04-30 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 second)", + "Value": "CHAR(\"2018-05-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 second)", + "Value": "CHAR(\"2018-04-30 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' second)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' second)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' second)", + "Value": "CHAR(\"2018-05-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' second)", + "Value": "CHAR(\"2018-04-30 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2018-05-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2018-04-30 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2018-05-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2018-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1 year_month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1 year_month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2019-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 31 year_month)", + "Value": "CHAR(\"2020-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 31 year_month)", + "Value": "CHAR(\"2015-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 30 year_month)", + "Value": "CHAR(\"2020-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 30 year_month)", + "Value": "CHAR(\"2015-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2102-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1934-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2020-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2016-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2436-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1600-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2024-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2012-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2019-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2016-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2018-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2018-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2018-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2018-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2019-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('2018-05-01', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2017-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "CHAR(\"2021-01-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day)", + "Value": "CHAR(\"2020-12-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "CHAR(\"2021-01-06 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day)", + "Value": "CHAR(\"2020-12-25 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2021-01-02 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2020-12-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "CHAR(\"2021-08-05 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 week)", + "Value": "CHAR(\"2020-05-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "CHAR(\"2021-07-29 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 week)", + "Value": "CHAR(\"2020-06-04 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "CHAR(\"2021-02-11 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' week)", + "Value": "CHAR(\"2020-11-19 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2021-01-14 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2020-12-17 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2021-01-07 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2020-12-24 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "CHAR(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 month)", + "Value": "CHAR(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "CHAR(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 month)", + "Value": "CHAR(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' month)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "CHAR(\"2051-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 year)", + "Value": "CHAR(\"1989-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "CHAR(\"2050-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 year)", + "Value": "CHAR(\"1990-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "CHAR(\"2026-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' year)", + "Value": "CHAR(\"2014-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2022-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2018-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2021-01-02 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2020-12-30 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2135-01-31 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1906-12-03 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2021-02-12 14:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2020-11-19 08:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2021-01-02 09:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2020-12-30 13:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2021-07-29 07:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2020-06-05 15:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2021-01-07 03:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2020-12-25 19:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2021-01-02 04:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2020-12-30 18:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2021-01-01 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2020-12-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2021-01-02 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2020-12-30 22:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "CHAR(\"2021-01-02 06:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour)", + "Value": "CHAR(\"2020-12-30 16:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "CHAR(\"2021-01-02 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour)", + "Value": "CHAR(\"2020-12-30 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2021-01-01 05:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2020-12-31 17:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2021-01-01 01:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2020-12-31 21:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 01:01:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 22:58:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2022-11-26 11:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2019-02-06 12:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2021-01-01 17:38:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2020-12-31 06:20:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2021-01-01 01:09:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:49:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2021-01-04 12:19:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2020-12-28 11:39:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2021-01-01 06:03:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2020-12-31 17:55:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2021-01-01 01:04:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2020-12-31 22:54:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2021-01-01 00:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2020-12-31 22:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "CHAR(\"2021-01-01 00:30:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute)", + "Value": "CHAR(\"2020-12-31 23:28:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "CHAR(\"2021-01-01 00:29:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute)", + "Value": "CHAR(\"2020-12-31 23:29:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2021-01-01 00:05:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2020-12-31 23:53:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2021-01-01 00:01:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2020-12-31 23:57:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2021-01-12 13:47:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2020-12-20 10:12:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2021-01-01 00:17:38\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2020-12-31 23:42:20\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:09\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:49\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2021-01-01 01:24:19\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2020-12-31 22:35:39\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2021-01-01 00:06:03\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2020-12-31 23:53:55\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2021-01-01 00:01:04\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:54\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2021-01-01 00:00:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2020-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "CHAR(\"2028-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 quarter)", + "Value": "CHAR(\"2013-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "CHAR(\"2028-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 quarter)", + "Value": "CHAR(\"2013-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2022-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2019-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2021-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2020-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2021-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2020-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "CHAR(\"2021-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 second)", + "Value": "CHAR(\"2020-12-31 23:59:28\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "CHAR(\"2021-01-01 00:00:29\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 second)", + "Value": "CHAR(\"2020-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' second)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "CHAR(\"2021-01-01 00:00:05\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' second)", + "Value": "CHAR(\"2020-12-31 23:59:53\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2021-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2020-12-31 23:59:57\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:05.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:52.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:57.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:59.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2021-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2020-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1 year_month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2022-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2019-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "CHAR(\"2023-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 31 year_month)", + "Value": "CHAR(\"2018-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "CHAR(\"2023-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 30 year_month)", + "Value": "CHAR(\"2018-06-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2105-03-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1936-09-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2022-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2019-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2438-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1603-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2027-04-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2014-08-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2022-05-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2019-07-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2021-02-28 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2020-10-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2021-01-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2020-11-30 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2021-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2020-12-31 23:59:59', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2019-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('2025-01-01 00:00:00', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD(20250101, INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB(20250101, INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' week)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' year)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' day_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' quarter)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1 10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 31 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 31 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 30 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 30 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.999 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.999 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '-1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '-1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1:10' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 6/4 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 6/4 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '6/4' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '6/4' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1.5000e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1.5000e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL 1e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL 1e0 year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('pokemon trainers', INTERVAL '1.0foobar' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('pokemon trainers', INTERVAL '1.0foobar' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day)", + "Value": "CHAR(\"2025-01-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day)", + "Value": "CHAR(\"2024-12-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day)", + "Value": "CHAR(\"2025-01-07\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day)", + "Value": "CHAR(\"2024-12-26\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2025-01-03\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day)", + "Value": "CHAR(\"2024-12-30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2025-01-02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day)", + "Value": "CHAR(\"2024-12-31\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 week)", + "Value": "CHAR(\"2025-08-06\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 week)", + "Value": "CHAR(\"2024-05-29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 week)", + "Value": "CHAR(\"2025-07-30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 week)", + "Value": "CHAR(\"2024-06-05\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' week)", + "Value": "CHAR(\"2025-02-12\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' week)", + "Value": "CHAR(\"2024-11-20\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2025-01-15\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) week)", + "Value": "CHAR(\"2024-12-18\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2025-01-08\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' week)", + "Value": "CHAR(\"2024-12-25\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' month)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' month)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 year)", + "Value": "CHAR(\"2056-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 year)", + "Value": "CHAR(\"1994-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 year)", + "Value": "CHAR(\"2055-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 year)", + "Value": "CHAR(\"1995-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' year)", + "Value": "CHAR(\"2031-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' year)", + "Value": "CHAR(\"2019-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2027-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year)", + "Value": "CHAR(\"2023-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' year)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2025-01-02 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_hour)", + "Value": "CHAR(\"2024-12-30 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"2139-01-31 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_hour)", + "Value": "CHAR(\"1910-12-03 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2025-02-12 15:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_hour)", + "Value": "CHAR(\"2024-11-19 09:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_hour)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2025-01-02 10:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_hour)", + "Value": "CHAR(\"2024-12-30 14:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2025-07-29 08:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_hour)", + "Value": "CHAR(\"2024-06-05 16:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2025-01-07 04:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_hour)", + "Value": "CHAR(\"2024-12-25 20:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2025-01-02 05:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_hour)", + "Value": "CHAR(\"2024-12-30 19:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2025-01-02 00:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_hour)", + "Value": "CHAR(\"2024-12-31 00:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' day_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' day_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2025-01-02 01:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' day_second)", + "Value": "CHAR(\"2024-12-30 22:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' day_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 day_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' day_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) day_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) day_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 day_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' day_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour)", + "Value": "CHAR(\"2025-01-02 07:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour)", + "Value": "CHAR(\"2024-12-30 17:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour)", + "Value": "CHAR(\"2025-01-02 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour)", + "Value": "CHAR(\"2024-12-30 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2025-01-01 06:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour)", + "Value": "CHAR(\"2024-12-31 18:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2025-01-01 02:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour)", + "Value": "CHAR(\"2024-12-31 22:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 01:01:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 22:58:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2026-11-26 11:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_minute)", + "Value": "CHAR(\"2023-02-06 12:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2025-01-01 17:39:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_minute)", + "Value": "CHAR(\"2024-12-31 06:21:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_minute)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:10:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_minute)", + "Value": "CHAR(\"2024-12-31 22:50:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2025-01-04 12:20:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_minute)", + "Value": "CHAR(\"2024-12-28 11:40:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2025-01-01 06:04:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_minute)", + "Value": "CHAR(\"2024-12-31 17:56:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2025-01-01 01:05:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_minute)", + "Value": "CHAR(\"2024-12-31 22:55:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2025-01-01 01:00:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_minute)", + "Value": "CHAR(\"2024-12-31 23:00:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' hour_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' hour_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' hour_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 hour_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' hour_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' hour_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000031\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999969\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000030\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999970\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000006\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999994\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000002\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999998\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.000001\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.999999\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute)", + "Value": "CHAR(\"2025-01-01 00:31:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute)", + "Value": "CHAR(\"2024-12-31 23:29:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute)", + "Value": "CHAR(\"2025-01-01 00:30:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute)", + "Value": "CHAR(\"2024-12-31 23:30:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2025-01-01 00:06:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute)", + "Value": "CHAR(\"2024-12-31 23:54:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2025-01-01 00:02:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute)", + "Value": "CHAR(\"2024-12-31 23:58:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2025-01-12 13:47:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' minute_second)", + "Value": "CHAR(\"2024-12-20 10:12:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2025-01-01 00:17:39\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' minute_second)", + "Value": "CHAR(\"2024-12-31 23:42:21\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' minute_second)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:10\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:50\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2025-01-01 01:24:20\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 minute_second)", + "Value": "CHAR(\"2024-12-31 22:35:40\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2025-01-01 00:06:04\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' minute_second)", + "Value": "CHAR(\"2024-12-31 23:53:56\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:05\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:58:55\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2025-01-01 00:01:00\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' minute_second)", + "Value": "CHAR(\"2024-12-31 23:59:00\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 quarter)", + "Value": "CHAR(\"2032-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 quarter)", + "Value": "CHAR(\"2017-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 quarter)", + "Value": "CHAR(\"2032-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 quarter)", + "Value": "CHAR(\"2017-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2026-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' quarter)", + "Value": "CHAR(\"2023-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2025-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) quarter)", + "Value": "CHAR(\"2024-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2025-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' quarter)", + "Value": "CHAR(\"2024-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 second)", + "Value": "CHAR(\"2025-01-01 00:00:31\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 second)", + "Value": "CHAR(\"2024-12-31 23:59:29\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 second)", + "Value": "CHAR(\"2025-01-01 00:00:30\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 second)", + "Value": "CHAR(\"2024-12-31 23:59:30\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' second)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' second)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' second)", + "Value": "CHAR(\"2025-01-01 00:00:06\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' second)", + "Value": "CHAR(\"2024-12-31 23:59:54\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2025-01-01 00:00:02\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second)", + "Value": "CHAR(\"2024-12-31 23:59:58\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' second)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.310000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.690000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.300000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.700000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999999\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.000001\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.999000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.001000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' second_microsecond)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:06.400000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:53.600000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01.500000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:58.500000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.200000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.800000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:00.100000\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59.900000\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2025-01-01 00:00:01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' second_microsecond)", + "Value": "CHAR(\"2024-12-31 23:59:59\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2026-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1' year_month)", + "Value": "CHAR(\"2023-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1 10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 31 year_month)", + "Value": "CHAR(\"2027-08-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 31 year_month)", + "Value": "CHAR(\"2022-06-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 30 year_month)", + "Value": "CHAR(\"2027-07-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 30 year_month)", + "Value": "CHAR(\"2022-07-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999999' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.999 year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"2109-04-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.999' year_month)", + "Value": "CHAR(\"1940-10-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1:1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:1 1:1' year_month)", + "Value": "NULL" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '-1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2026-11-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1:10' year_month)", + "Value": "CHAR(\"2023-03-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"2442-09-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 6/4 year_month)", + "Value": "CHAR(\"1607-05-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2031-05-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '6/4' year_month)", + "Value": "CHAR(\"2018-09-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1.5000e0 year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2026-06-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,1)) year_month)", + "Value": "CHAR(\"2023-08-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2025-03-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL CAST(6/4 AS DECIMAL(3,0)) year_month)", + "Value": "CHAR(\"2024-11-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2025-02-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL 1e0 year_month)", + "Value": "CHAR(\"2024-12-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0' year_month)", + "Value": "CHAR(\"2024-01-01\")" + }, + { + "Query": "SELECT DATE_ADD('20250101', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2026-01-01\")" + }, + { + "Query": "SELECT DATE_SUB('20250101', INTERVAL '1.0foobar' year_month)", + "Value": "CHAR(\"2024-01-01\")" + } +] diff --git a/go/vt/vtgate/evalengine/internal/json/helpers.go b/go/vt/vtgate/evalengine/internal/json/helpers.go deleted file mode 100644 index baeb282832d..00000000000 --- a/go/vt/vtgate/evalengine/internal/json/helpers.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vthash" -) - -const hashPrefixJSON = 0xCCBB - -func (v *Value) Hash(h *vthash.Hasher) { - h.Write16(hashPrefixJSON) - _, _ = h.Write(v.ToRawBytes()) -} - -func (v *Value) ToRawBytes() []byte { - return v.MarshalTo(nil) -} - -func (v *Value) SQLType() sqltypes.Type { - return sqltypes.TypeJSON -} - -func NewArray(vals []*Value) *Value { - return &Value{ - a: vals, - t: TypeArray, - } -} - -func NewObject() *Value { - return &Value{ - o: Object{}, - t: TypeObject, - } -} - -func NewNumber(num []byte) *Value { - return &Value{ - s: string(num), - t: TypeNumber, - } -} - -func NewString(raw []byte) *Value { - return &Value{ - s: string(raw), - t: TypeString, - } -} - -func (v *Value) Depth() int { - max := func(a, b int) int { - if a > b { - return a - } - return b - } - - var depth int - switch v.t { - case TypeObject: - for _, kv := range v.o.kvs { - depth = max(kv.v.Depth(), depth) - } - case TypeArray: - for _, a := range v.a { - depth = max(a.Depth(), depth) - } - } - return depth + 1 -} - -func (v *Value) Len() int { - switch v.t { - case TypeArray: - return len(v.a) - case TypeObject: - return v.o.Len() - default: - return 1 - } -} diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go index 9fece5a9db5..d63962f78c2 100644 --- a/go/vt/vtgate/evalengine/mysql_test.go +++ b/go/vt/vtgate/evalengine/mysql_test.go @@ -17,11 +17,12 @@ limitations under the License. package evalengine import ( + "context" "encoding/json" "errors" - "fmt" "os" "path/filepath" + "slices" "strings" "testing" @@ -49,11 +50,19 @@ var errKnownBadQuery = errors.New("this query is known to give bad results in My func convert(t *testing.T, query string, simplify bool) (Expr, error) { stmt, err := sqlparser.Parse(query) if err != nil { - t.Fatal(err) + t.Fatalf("failed to parse '%s': %v", query, err) + } + + cfg := &Config{ + Collation: collations.CollationUtf8mb4ID, + Optimization: OptimizationLevelNone, + } + if simplify { + cfg.Optimization = OptimizationLevelSimplify } astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - converted, err := TranslateEx(astExpr, &LookupIntegrationTest{collations.CollationUtf8mb4ID}, simplify) + converted, err := Translate(astExpr, cfg) if err == nil { if knownBadQuery(converted) { return nil, errKnownBadQuery @@ -68,11 +77,17 @@ func testSingle(t *testing.T, query string) (EvalResult, error) { if err != nil { return EvalResult{}, err } - return EnvWithBindVars(nil, collations.CollationUtf8mb4ID).Evaluate(converted) + return NewExpressionEnv(context.Background(), nil, nil).Evaluate(converted) } func TestMySQLGolden(t *testing.T) { + const Target = 0 + + var testcount int + golden, _ := filepath.Glob("integration/testdata/*.json") + slices.Sort(golden) + for _, gld := range golden { t.Run(filepath.Base(gld), func(t *testing.T) { var testcases []struct { @@ -93,7 +108,11 @@ func TestMySQLGolden(t *testing.T) { var ok int for _, tc := range testcases { - debug := fmt.Sprintf("\n// Debug\neval, err := testSingle(t, `%s`)\nt.Logf(\"eval=%%s err=%%v\", eval.Value(), err) // want value=%q\n", tc.Query, tc.Value) + testcount++ + if Target != 0 && Target != testcount { + continue + } + eval, err := testSingle(t, tc.Query) if err == errKnownBadQuery { ok++ @@ -101,20 +120,20 @@ func TestMySQLGolden(t *testing.T) { } if err != nil { if tc.Error == "" { - t.Errorf("query: %s\nmysql val: %s\nvitess err: %s\n%s", tc.Query, tc.Value, err.Error(), debug) + t.Errorf("query %d: %s\nmysql val: %s\nvitess err: %s", testcount, tc.Query, tc.Value, err.Error()) } else if !strings.HasPrefix(tc.Error, err.Error()) { - t.Errorf("query: %s\nmysql err: %s\nvitess err: %s\n%s", tc.Query, tc.Error, err.Error(), debug) + t.Errorf("query %d: %s\nmysql err: %s\nvitess err: %s", testcount, tc.Query, tc.Error, err.Error()) } else { ok++ } continue } if tc.Error != "" { - t.Errorf("query: %s\nmysql err: %s\nvitess val: %s\n%s", tc.Query, tc.Error, eval.Value(), debug) + t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.Default())) continue } - if eval.Value().String() != tc.Value { - t.Errorf("query: %s\nmysql val: %s\nvitess val: %s\n%s", tc.Query, tc.Value, eval.Value(), debug) + if eval.String() != tc.Value { + t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.Default())) continue } ok++ @@ -127,6 +146,6 @@ func TestMySQLGolden(t *testing.T) { func TestDebug1(t *testing.T) { // Debug - eval, err := testSingle(t, `SELECT 12.0 * 0`) - t.Logf("eval=%s err=%v coll=%s", eval.String(), err, eval.Collation().Get().Name()) + eval, err := testSingle(t, `SELECT _latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`) + t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.Local().LookupName(eval.Collation())) } diff --git a/go/vt/vtgate/evalengine/perf_test.go b/go/vt/vtgate/evalengine/perf_test.go new file mode 100644 index 00000000000..c2e6e43aec5 --- /dev/null +++ b/go/vt/vtgate/evalengine/perf_test.go @@ -0,0 +1,70 @@ +package evalengine_test + +import ( + "testing" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" +) + +func BenchmarkCompilerExpressions(b *testing.B) { + var testCases = []struct { + name string + expression string + values []sqltypes.Value + }{ + {"complex_arith", "((23 + column0) * 4.0e0) = ((column1 / 3.33e0) * 100)", []sqltypes.Value{sqltypes.NewInt64(666), sqltypes.NewUint64(420)}}, + {"comparison_i64", "column0 = 12", []sqltypes.Value{sqltypes.NewInt64(666)}}, + {"comparison_u64", "column0 = 12", []sqltypes.Value{sqltypes.NewUint64(666)}}, + {"comparison_dec", "column0 = 12", []sqltypes.Value{sqltypes.NewDecimal("420")}}, + {"comparison_f", "column0 = 12", []sqltypes.Value{sqltypes.NewFloat64(420.0)}}, + } + + for _, tc := range testCases { + expr, err := sqlparser.ParseExpr(tc.expression) + if err != nil { + b.Fatal(err) + } + + fields := evalengine.FieldResolver(makeFields(tc.values)) + cfg := &evalengine.Config{ + ResolveColumn: fields.Column, + ResolveType: fields.Type, + Collation: collations.CollationUtf8mb4ID, + Optimization: evalengine.OptimizationLevelCompile, + } + + translated, err := evalengine.Translate(expr, cfg) + if err != nil { + b.Fatal(err) + } + + b.Run(tc.name+"/eval=ast", func(b *testing.B) { + decompiled := evalengine.Deoptimize(translated) + + b.ResetTimer() + b.ReportAllocs() + + var env evalengine.ExpressionEnv + env.Row = tc.values + for n := 0; n < b.N; n++ { + _, _ = env.Evaluate(decompiled) + } + }) + + b.Run(tc.name+"/eval=vm", func(b *testing.B) { + compiled := translated.(*evalengine.CompiledExpr) + + b.ResetTimer() + b.ReportAllocs() + + var env evalengine.ExpressionEnv + env.Row = tc.values + for n := 0; n < b.N; n++ { + _, _ = env.EvaluateVM(compiled) + } + }) + } +} diff --git a/go/vt/vtgate/evalengine/testcases/cases.go b/go/vt/vtgate/evalengine/testcases/cases.go index 7b112b991a2..cd52631c00c 100644 --- a/go/vt/vtgate/evalengine/testcases/cases.go +++ b/go/vt/vtgate/evalengine/testcases/cases.go @@ -18,107 +18,146 @@ package testcases import ( "encoding/base64" + "encoding/hex" "fmt" "math" "strconv" "strings" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) -type TestCase interface { - Test(yield Iterator) - Environment() *evalengine.ExpressionEnv -} - -type Iterator func(query string, row []sqltypes.Value) - -type defaultEnv struct{} - -func (defaultEnv) Environment() *evalengine.ExpressionEnv { - return evalengine.EnvWithBindVars(nil, collations.CollationUtf8mb4ID) -} - -type JSONPathOperations struct{ defaultEnv } -type JSONArray struct{ defaultEnv } -type JSONObject struct{ defaultEnv } -type CharsetConversionOperators struct{ defaultEnv } -type CaseExprWithPredicate struct{ defaultEnv } -type Ceil struct{ defaultEnv } -type CaseExprWithValue struct{ defaultEnv } -type Base64 struct{ defaultEnv } -type Conversion struct{ defaultEnv } -type LargeDecimals struct{ defaultEnv } -type LargeIntegers struct{ defaultEnv } -type DecimalClamping struct{ defaultEnv } -type BitwiseOperatorsUnary struct{ defaultEnv } -type BitwiseOperators struct{ defaultEnv } -type WeightString struct{ defaultEnv } -type FloatFormatting struct{ defaultEnv } -type UnderscoreAndPercentage struct{ defaultEnv } -type Types struct{ defaultEnv } -type HexArithmetic struct{ defaultEnv } -type NumericTypes struct{ defaultEnv } -type NegateArithmetic struct{ defaultEnv } -type CollationOperations struct{ defaultEnv } -type LikeComparison struct{ defaultEnv } -type MultiComparisons struct{ defaultEnv } -type IsStatement struct{ defaultEnv } -type TupleComparisons struct{ defaultEnv } -type Comparisons struct{ defaultEnv } -type JSONExtract struct{} -type FnLower struct{ defaultEnv } -type FnUpper struct{ defaultEnv } -type FnCharLength struct{ defaultEnv } -type FnLength struct{ defaultEnv } -type FnBitLength struct{ defaultEnv } -type FnAscii struct{ defaultEnv } -type FnRepeat struct{ defaultEnv } -type FnHex struct{ defaultEnv } - var Cases = []TestCase{ - JSONExtract{}, - JSONPathOperations{}, - JSONArray{}, - JSONObject{}, - CharsetConversionOperators{}, - CaseExprWithPredicate{}, - Ceil{}, - CaseExprWithValue{}, - Base64{}, - Conversion{}, - LargeDecimals{}, - LargeIntegers{}, - DecimalClamping{}, - BitwiseOperatorsUnary{}, - BitwiseOperators{}, - WeightString{}, - FloatFormatting{}, - UnderscoreAndPercentage{}, - Types{}, - HexArithmetic{}, - NumericTypes{}, - NegateArithmetic{}, - CollationOperations{}, - LikeComparison{}, - MultiComparisons{}, - IsStatement{}, - TupleComparisons{}, - Comparisons{}, - FnLower{}, - FnUpper{}, - FnCharLength{}, - FnLength{}, - FnBitLength{}, - FnAscii{}, - FnRepeat{}, - FnHex{}, -} - -func (JSONPathOperations) Test(yield Iterator) { + {Run: JSONExtract, Schema: JSONExtract_Schema}, + {Run: JSONPathOperations}, + {Run: JSONArray}, + {Run: JSONObject}, + {Run: CharsetConversionOperators}, + {Run: CaseExprWithPredicate}, + {Run: CaseExprWithValue}, + {Run: If}, + {Run: Base64}, + {Run: Conversion}, + {Run: LargeDecimals}, + {Run: LargeIntegers}, + {Run: DecimalClamping}, + {Run: BitwiseOperatorsUnary}, + {Run: BitwiseOperators}, + {Run: WeightString}, + {Run: FloatFormatting}, + {Run: UnderscoreAndPercentage}, + {Run: Types}, + {Run: Arithmetic}, + {Run: HexArithmetic}, + {Run: NumericTypes}, + {Run: NegateArithmetic}, + {Run: CollationOperations}, + {Run: LikeComparison}, + {Run: StrcmpComparison}, + {Run: MultiComparisons}, + {Run: IntervalStatement}, + {Run: IsStatement}, + {Run: NotStatement}, + {Run: LogicalStatement}, + {Run: TupleComparisons}, + {Run: Comparisons}, + {Run: InStatement}, + {Run: FnLower}, + {Run: FnUpper}, + {Run: FnCharLength}, + {Run: FnLength}, + {Run: FnBitLength}, + {Run: FnAscii}, + {Run: FnOrd}, + {Run: FnRepeat}, + {Run: FnLeft}, + {Run: FnLpad}, + {Run: FnRight}, + {Run: FnRpad}, + {Run: FnLTrim}, + {Run: FnRTrim}, + {Run: FnTrim}, + {Run: FnConcat}, + {Run: FnConcatWs}, + {Run: FnHex}, + {Run: FnUnhex}, + {Run: FnCeil}, + {Run: FnFloor}, + {Run: FnAbs}, + {Run: FnPi}, + {Run: FnAcos}, + {Run: FnAsin}, + {Run: FnAtan}, + {Run: FnAtan2}, + {Run: FnCos}, + {Run: FnCot}, + {Run: FnSin}, + {Run: FnTan}, + {Run: FnDegrees}, + {Run: FnRadians}, + {Run: FnNow, Compare: &Comparison{LooseTime: true}}, + {Run: FnInfo}, + {Run: FnExp}, + {Run: FnLn}, + {Run: FnLog}, + {Run: FnLog10}, + {Run: FnMod}, + {Run: FnLog2}, + {Run: FnPow}, + {Run: FnSign}, + {Run: FnSqrt}, + {Run: FnRound}, + {Run: FnTruncate}, + {Run: FnCrc32}, + {Run: FnConv}, + {Run: FnMD5}, + {Run: FnSHA1}, + {Run: FnSHA2}, + {Run: FnRandomBytes}, + {Run: FnDateFormat}, + {Run: FnConvertTz}, + {Run: FnDate}, + {Run: FnDayOfMonth}, + {Run: FnDayOfWeek}, + {Run: FnDayOfYear}, + {Run: FnFromUnixtime}, + {Run: FnHour}, + {Run: FnMakedate}, + {Run: FnMaketime}, + {Run: FnMicroSecond}, + {Run: FnMinute}, + {Run: FnMonth}, + {Run: FnMonthName}, + {Run: FnQuarter}, + {Run: FnSecond}, + {Run: FnTime}, + {Run: FnUnixTimestamp}, + {Run: FnWeek}, + {Run: FnWeekDay}, + {Run: FnWeekOfYear}, + {Run: FnYear}, + {Run: FnYearWeek}, + {Run: FnInetAton}, + {Run: FnInetNtoa}, + {Run: FnInet6Aton}, + {Run: FnInet6Ntoa}, + {Run: FnIsIPv4}, + {Run: FnIsIPv4Compat}, + {Run: FnIsIPv4Mapped}, + {Run: FnIsIPv6}, + {Run: FnBinToUUID}, + {Run: FnIsUUID}, + {Run: FnUUID}, + {Run: FnUUIDToBin}, + {Run: DateMath}, + {Run: RegexpLike}, + {Run: RegexpInstr}, + {Run: RegexpSubstr}, + {Run: RegexpReplace}, +} + +func JSONPathOperations(yield Query) { for _, obj := range inputJSONObjects { yield(fmt.Sprintf("JSON_KEYS('%s')", obj), nil) @@ -137,7 +176,7 @@ func (JSONPathOperations) Test(yield Iterator) { } } -func (JSONArray) Test(yield Iterator) { +func JSONArray(yield Query) { for _, a := range inputJSONPrimitives { yield(fmt.Sprintf("JSON_ARRAY(%s)", a), nil) for _, b := range inputJSONPrimitives { @@ -147,7 +186,7 @@ func (JSONArray) Test(yield Iterator) { yield("JSON_ARRAY()", nil) } -func (JSONObject) Test(yield Iterator) { +func JSONObject(yield Query) { for _, a := range inputJSONPrimitives { for _, b := range inputJSONPrimitives { yield(fmt.Sprintf("JSON_OBJECT(%s, %s)", a, b), nil) @@ -156,7 +195,7 @@ func (JSONObject) Test(yield Iterator) { yield("JSON_OBJECT()", nil) } -func (CharsetConversionOperators) Test(yield Iterator) { +func CharsetConversionOperators(yield Query) { var introducers = []string{ "", "_latin1", "_utf8mb4", "_utf8", "_binary", } @@ -176,7 +215,7 @@ func (CharsetConversionOperators) Test(yield Iterator) { } } -func (CaseExprWithPredicate) Test(yield Iterator) { +func CaseExprWithPredicate(yield Query) { var predicates = []string{ "true", "false", @@ -206,7 +245,7 @@ func (CaseExprWithPredicate) Test(yield Iterator) { }) } -func (Ceil) Test(yield Iterator) { +func FnCeil(yield Query) { var ceilInputs = []string{ "0", "1", @@ -224,49 +263,466 @@ func (Ceil) Test(yield Iterator) { yield(fmt.Sprintf("CEIL(%s)", num), nil) yield(fmt.Sprintf("CEILING(%s)", num), nil) } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("CEIL(%s)", num), nil) + yield(fmt.Sprintf("CEILING(%s)", num), nil) + } +} + +func FnFloor(yield Query) { + var floorInputs = []string{ + "0", + "1", + "-1", + "'1.5'", + "NULL", + "'ABC'", + "1.5e0", + "-1.5e0", + "9223372036854775810.4", + "-9223372036854775810.4", + } + + for _, num := range floorInputs { + yield(fmt.Sprintf("FLOOR(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("FLOOR(%s)", num), nil) + } +} + +func FnAbs(yield Query) { + var absInputs = []string{ + "0", + "1", + "-1", + "'1.5'", + "NULL", + "'ABC'", + "1.5e0", + "-1.5e0", + "9223372036854775810.4", + "-9223372036854775810.4", + } + + for _, num := range absInputs { + yield(fmt.Sprintf("ABS(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("ABS(%s)", num), nil) + } +} + +func FnPi(yield Query) { + yield("PI()+0.000000000000000000", nil) +} + +func FnAcos(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("ACOS(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("ACOS(%s)", num), nil) + } +} + +func FnAsin(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("ASIN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("ASIN(%s)", num), nil) + } +} + +func FnAtan(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("ATAN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("ATAN(%s)", num), nil) + } +} + +func FnAtan2(yield Query) { + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("ATAN(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("ATAN2(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("ATAN(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("ATAN2(%s, %s)", num1, num2), nil) + } + } +} + +func FnCos(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("COS(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("COS(%s)", num), nil) + } +} + +func FnCot(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("COT(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("COT(%s)", num), nil) + } +} + +func FnSin(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("SIN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("SIN(%s)", num), nil) + } +} + +func FnTan(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("TAN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("TAN(%s)", num), nil) + } +} + +func FnDegrees(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("DEGREES(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("DEGREES(%s)", num), nil) + } +} + +func FnRadians(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("RADIANS(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("RADIANS(%s)", num), nil) + } +} + +func FnExp(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("EXP(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("EXP(%s)", num), nil) + } +} + +func FnLn(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("LN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("LN(%s)", num), nil) + } +} + +func FnLog(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("LOG(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("LOG(%s)", num), nil) + } + + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("LOG(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("LOG(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("LOG(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("LOG(%s, %s)", num1, num2), nil) + } + } +} + +func FnLog10(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("LOG10(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("LOG10(%s)", num), nil) + } +} + +func FnMod(yield Query) { + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("MOD(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("MOD(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("MOD(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("MOD(%s, %s)", num1, num2), nil) + } + } +} + +func FnLog2(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("LOG2(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("LOG2(%s)", num), nil) + } +} + +func FnPow(yield Query) { + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("POW(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("POWER(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("POW(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("POWER(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("POW(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("POWER(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("POW(%s, %s)", num1, num2), nil) + yield(fmt.Sprintf("POWER(%s, %s)", num1, num2), nil) + } + } +} + +func FnSign(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("SIGN(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("SIGN(%s)", num), nil) + } +} + +func FnSqrt(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("SQRT(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("SQRT(%s)", num), nil) + } +} + +func FnRound(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("ROUND(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("ROUND(%s)", num), nil) + } + + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("ROUND(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("ROUND(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("ROUND(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("ROUND(%s, %s)", num1, num2), nil) + } + } +} + +func FnTruncate(yield Query) { + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("TRUNCATE(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("TRUNCATE(%s, %s)", num1, num2), nil) + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range radianInputs { + yield(fmt.Sprintf("TRUNCATE(%s, %s)", num1, num2), nil) + } + for _, num2 := range inputBitwise { + yield(fmt.Sprintf("TRUNCATE(%s, %s)", num1, num2), nil) + } + } +} + +func FnCrc32(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("CRC32(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("CRC32(%s)", num), nil) + } + + for _, num := range inputConversions { + yield(fmt.Sprintf("CRC32(%s)", num), nil) + } +} + +func FnConv(yield Query) { + for _, num1 := range radianInputs { + for _, num2 := range radianInputs { + for _, num3 := range radianInputs { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + for _, num3 := range inputBitwise { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + } + } + + for _, num1 := range radianInputs { + for _, num2 := range inputBitwise { + for _, num3 := range radianInputs { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + for _, num3 := range inputBitwise { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + } + } + + for _, num1 := range inputBitwise { + for _, num2 := range inputBitwise { + for _, num3 := range radianInputs { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + for _, num3 := range inputBitwise { + yield(fmt.Sprintf("CONV(%s, %s, %s)", num1, num2, num3), nil) + } + } + } +} + +func FnMD5(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("MD5(%s)", num), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("MD5(%s)", num), nil) + } + + for _, num := range inputConversions { + yield(fmt.Sprintf("MD5(%s)", num), nil) + } } -// HACK: for CASE comparisons, the expression is supposed to decompose like this: -// -// CASE a WHEN b THEN bb WHEN c THEN cc ELSE d -// => CASE WHEN a = b THEN bb WHEN a == c THEN cc ELSE d -// -// See: https://dev.mysql.com/doc/refman/5.7/en/flow-control-functions.html#operator_case -// However, MySQL does not seem to be using the real `=` operator for some of these comparisons -// namely, numerical comparisons are coerced into an unsigned form when they shouldn't. -// Example: -// -// SELECT -1 = 18446744073709551615 -// => 0 -// SELECT -1 WHEN 18446744073709551615 THEN 1 ELSE 0 END -// => 1 -// -// This does not happen for other types, which all follow the behavior of the `=` operator, -// so we're going to assume this is a bug for now. -func comparisonSkip(a, b string) bool { - if a == "-1" && b == "18446744073709551615" { - return true +func FnSHA1(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("SHA1(%s)", num), nil) + yield(fmt.Sprintf("SHA(%s)", num), nil) } - if b == "-1" && a == "18446744073709551615" { - return true + + for _, num := range inputBitwise { + yield(fmt.Sprintf("SHA1(%s)", num), nil) + yield(fmt.Sprintf("SHA(%s)", num), nil) + } + + for _, num := range inputConversions { + yield(fmt.Sprintf("SHA1(%s)", num), nil) + yield(fmt.Sprintf("SHA(%s)", num), nil) + } +} + +func FnSHA2(yield Query) { + bitLengths := []string{"0", "224", "256", "384", "512", "1", "0.1", "256.1e0", "1-1", "128+128"} + for _, bits := range bitLengths { + for _, num := range radianInputs { + yield(fmt.Sprintf("SHA2(%s, %s)", num, bits), nil) + } + + for _, num := range inputBitwise { + yield(fmt.Sprintf("SHA2(%s, %s)", num, bits), nil) + } + + for _, num := range inputConversions { + yield(fmt.Sprintf("SHA2(%s, %s)", num, bits), nil) + } } - if a == "9223372036854775808" && b == "-9223372036854775808" { - return true +} + +func FnRandomBytes(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("LENGTH(RANDOM_BYTES(%s))", num), nil) + yield(fmt.Sprintf("COLLATION(RANDOM_BYTES(%s))", num), nil) } - if a == "-9223372036854775808" && b == "9223372036854775808" { - return true + + for _, num := range inputBitwise { + yield(fmt.Sprintf("LENGTH(RANDOM_BYTES(%s))", num), nil) + yield(fmt.Sprintf("COLLATION(RANDOM_BYTES(%s))", num), nil) } - return false } -func (CaseExprWithValue) Test(yield Iterator) { +func CaseExprWithValue(yield Query) { var elements []string elements = append(elements, inputBitwise...) elements = append(elements, inputComparisonElement...) for _, cmpbase := range elements { for _, val1 := range elements { - if comparisonSkip(cmpbase, val1) { + if !(bugs{}).CanCompare(cmpbase, val1) { continue } yield(fmt.Sprintf("case %s when %s then 1 else 0 end", cmpbase, val1), nil) @@ -274,11 +730,27 @@ func (CaseExprWithValue) Test(yield Iterator) { } } -func (Base64) Test(yield Iterator) { +func If(yield Query) { + var elements []string + elements = append(elements, inputBitwise...) + elements = append(elements, inputComparisonElement...) + + for _, cmpbase := range elements { + for _, val1 := range elements { + for _, val2 := range elements { + yield(fmt.Sprintf("if(%s, %s, %s)", cmpbase, val1, val2), nil) + } + } + } +} + +func Base64(yield Query) { var inputs = []string{ `'bGlnaHQgdw=='`, `'bGlnaHQgd28='`, `'bGlnaHQgd29y'`, + // MySQL trims whitespace + `' \t\r\n bGlnaHQgd28= \n \t '`, } inputs = append(inputs, inputConversions...) @@ -292,17 +764,9 @@ func (Base64) Test(yield Iterator) { } } -func (Conversion) Test(yield Iterator) { - var right = []string{ - "BINARY", "BINARY(1)", "BINARY(0)", "BINARY(16)", "BINARY(-1)", - "CHAR", "CHAR(1)", "CHAR(0)", "CHAR(16)", "CHAR(-1)", - "NCHAR", "NCHAR(1)", "NCHAR(0)", "NCHAR(16)", "NCHAR(-1)", - "DECIMAL", "DECIMAL(0, 4)", "DECIMAL(12, 0)", "DECIMAL(12, 4)", - "DOUBLE", "REAL", - "SIGNED", "UNSIGNED", "SIGNED INTEGER", "UNSIGNED INTEGER", "JSON", - } +func Conversion(yield Query) { for _, lhs := range inputConversions { - for _, rhs := range right { + for _, rhs := range inputConversionTypes { yield(fmt.Sprintf("CAST(%s AS %s)", lhs, rhs), nil) yield(fmt.Sprintf("CONVERT(%s, %s)", lhs, rhs), nil) yield(fmt.Sprintf("CAST(CAST(%s AS JSON) AS %s)", lhs, rhs), nil) @@ -310,7 +774,7 @@ func (Conversion) Test(yield Iterator) { } } -func (LargeDecimals) Test(yield Iterator) { +func LargeDecimals(yield Query) { var largepi = inputPi + inputPi for pos := 0; pos < len(largepi); pos++ { @@ -319,7 +783,7 @@ func (LargeDecimals) Test(yield Iterator) { } } -func (LargeIntegers) Test(yield Iterator) { +func LargeIntegers(yield Query) { var largepi = inputPi + inputPi for pos := 1; pos < len(largepi); pos++ { @@ -328,7 +792,7 @@ func (LargeIntegers) Test(yield Iterator) { } } -func (DecimalClamping) Test(yield Iterator) { +func DecimalClamping(yield Query) { for pos := 0; pos < len(inputPi); pos++ { for m := 0; m < min(len(inputPi), 67); m += 2 { for d := 0; d <= min(m, 33); d += 2 { @@ -338,7 +802,7 @@ func (DecimalClamping) Test(yield Iterator) { } } -func (BitwiseOperatorsUnary) Test(yield Iterator) { +func BitwiseOperatorsUnary(yield Query) { for _, op := range []string{"~", "BIT_COUNT"} { for _, rhs := range inputBitwise { yield(fmt.Sprintf("%s(%s)", op, rhs), nil) @@ -346,23 +810,66 @@ func (BitwiseOperatorsUnary) Test(yield Iterator) { } } -func (BitwiseOperators) Test(yield Iterator) { +func BitwiseOperators(yield Query) { for _, op := range []string{"&", "|", "^", "<<", ">>"} { for _, lhs := range inputBitwise { for _, rhs := range inputBitwise { yield(fmt.Sprintf("%s %s %s", lhs, op, rhs), nil) } } + + for _, lhs := range inputConversions { + for _, rhs := range inputConversions { + yield(fmt.Sprintf("%s %s %s", lhs, op, rhs), nil) + } + } } } -func (WeightString) Test(yield Iterator) { +func WeightString(yield Query) { var inputs = []string{ `'foobar'`, `_latin1 'foobar'`, - `'foobar' as char(12)`, `'foobar' as binary(12)`, + `'foobar' as char(12)`, `'foobar' as char(3)`, `'foobar' as binary(12)`, `'foobar' as binary(3)`, + `'foobar' collate utf8mb4_bin as char(12)`, `'foobar' collate utf8mb4_bin as char(3)`, + `'foobar' collate binary as char(12)`, `'foobar' collate binary as char(3)`, `_latin1 'foobar' as char(12)`, `_latin1 'foobar' as binary(12)`, + `_binary 'foobar' as char(12)`, `_binary 'foobar' as binary(12)`, + `1`, `-1`, `9223372036854775807`, `18446744073709551615`, `-9223372036854775808`, + `1 as char(1)`, `-1 as char(1)`, `9223372036854775807 as char(1)`, `18446744073709551615 as char(1)`, `-9223372036854775808 as char(1)`, + `1 as char(32)`, `-1 as char(32)`, `9223372036854775807 as char(32)`, `18446744073709551615 as char(32)`, `-9223372036854775808 as char(32)`, + `1 as binary(1)`, `-1 as binary(1)`, `9223372036854775807 as binary(1)`, `18446744073709551615 as binary(1)`, `-9223372036854775808 as binary(1)`, + `1 as binary(32)`, `-1 as binary(32)`, `9223372036854775807 as binary(32)`, `18446744073709551615 as binary(32)`, `-9223372036854775808 as binary(32)`, `1234.0`, `12340e0`, `0x1234`, `0x1234 as char(12)`, `0x1234 as char(2)`, + `date'2000-01-01'`, `date'2000-01-01' as char(12)`, `date'2000-01-01' as char(2)`, `date'2000-01-01' as binary(12)`, `date'2000-01-01' as binary(2)`, + `timestamp'2000-01-01 11:22:33'`, `timestamp'2000-01-01 11:22:33' as char(12)`, `timestamp'2000-01-01 11:22:33' as char(2)`, `timestamp'2000-01-01 11:22:33' as binary(12)`, `timestamp'2000-01-01 11:22:33' as binary(2)`, + `timestamp'2000-01-01 11:22:33.123456'`, `timestamp'2000-01-01 11:22:33.123456' as char(12)`, `timestamp'2000-01-01 11:22:33.123456' as char(2)`, `timestamp'2000-01-01 11:22:33.123456' as binary(12)`, `timestamp'2000-01-01 11:22:33.123456' as binary(2)`, + `time'-11:22:33'`, `time'-11:22:33' as char(12)`, `time'-11:22:33' as char(2)`, `time'-11:22:33' as binary(12)`, `time'-11:22:33' as binary(2)`, + `time'11:22:33'`, `time'11:22:33' as char(12)`, `time'11:22:33' as char(2)`, `time'11:22:33' as binary(12)`, `time'11:22:33' as binary(2)`, + `time'101:22:33'`, `time'101:22:33' as char(12)`, `time'101:22:33' as char(2)`, `time'101:22:33' as binary(12)`, `time'101:22:33' as binary(2)`, + "cast(0 as json)", "cast(1 as json)", + "cast(true as json)", "cast(false as json)", + "cast('{}' as json)", "cast('[]' as json)", + "cast('null' as json)", "cast('true' as json)", "cast('false' as json)", + "cast('1' as json)", "cast('2' as json)", "cast('1.1' as json)", "cast('-1.1' as json)", + "cast('9223372036854775807' as json)", "cast('18446744073709551615' as json)", + // JSON strings + "cast('\"foo\"' as json)", "cast('\"bar\"' as json)", "cast('invalid' as json)", + // JSON binary values + "cast(_binary' \"foo\"' as json)", "cast(_binary '\"bar\"' as json)", + "cast(0xFF666F6F626172FF as json)", "cast(0x666F6F626172FF as json)", + "cast(0b01 as json)", "cast(0b001 as json)", + // JSON arrays + "cast('[\"a\"]' as json)", "cast('[\"ab\"]' as json)", + "cast('[\"ab\", \"cd\", \"ef\"]' as json)", "cast('[\"ab\", \"ef\"]' as json)", + // JSON objects + "cast('{\"a\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"a\": 1}' as json)", + "cast('{\"c\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"c\": 1}' as json)", + "cast(' \"b\": 2}' as json)", "cast('\"a\": 1' as json)", + // JSON date, datetime & time + "cast(date '2000-01-01' as json)", "cast(date '2000-01-02' as json)", + "cast(timestamp '2000-01-01 12:34:58' as json)", + "cast(time '12:34:56' as json)", "cast(time '12:34:58' as json)", "cast(time '5 12:34:58' as json)", } for _, i := range inputs { @@ -370,7 +877,7 @@ func (WeightString) Test(yield Iterator) { } } -func (FloatFormatting) Test(yield Iterator) { +func FloatFormatting(yield Query) { var floats = []string{ `18446744073709551615`, `9223372036854775807`, @@ -398,7 +905,7 @@ func (FloatFormatting) Test(yield Iterator) { } } -func (UnderscoreAndPercentage) Test(yield Iterator) { +func UnderscoreAndPercentage(yield Query) { var queries = []string{ `'pokemon' LIKE 'poke%'`, `'pokemon' LIKE 'poke\%'`, @@ -422,7 +929,7 @@ func (UnderscoreAndPercentage) Test(yield Iterator) { } } -func (Types) Test(yield Iterator) { +func Types(yield Query) { var queries = []string{ "1 > 3", "3 > 1", @@ -453,7 +960,25 @@ func (Types) Test(yield Iterator) { } } -func (HexArithmetic) Test(yield Iterator) { +func Arithmetic(yield Query) { + operators := []string{"+", "-", "*", "/", "DIV", "%", "MOD"} + + for _, op := range operators { + for _, lhs := range inputConversions { + for _, rhs := range inputConversions { + yield(fmt.Sprintf("%s %s %s", lhs, op, rhs), nil) + } + } + + for _, lhs := range inputBitwise { + for _, rhs := range inputBitwise { + yield(fmt.Sprintf("%s %s %s", lhs, op, rhs), nil) + } + } + } +} + +func HexArithmetic(yield Query) { var cases = []string{ `0`, `1`, `1.0`, `0.0`, `1.0e0`, `0.0e0`, `X'00'`, `X'1234'`, `X'ff'`, @@ -470,7 +995,7 @@ func (HexArithmetic) Test(yield Iterator) { } } -func (NumericTypes) Test(yield Iterator) { +func NumericTypes(yield Query) { var numbers = []string{ `1234`, `-1234`, `18446744073709551614`, @@ -497,7 +1022,7 @@ func (NumericTypes) Test(yield Iterator) { } } -func (NegateArithmetic) Test(yield Iterator) { +func NegateArithmetic(yield Query) { var cases = []string{ `0`, `1`, `1.0`, `0.0`, `1.0e0`, `0.0e0`, `X'00'`, `X'1234'`, `X'ff'`, @@ -513,10 +1038,15 @@ func (NegateArithmetic) Test(yield Iterator) { yield(fmt.Sprintf("- %s", rhs), nil) yield(fmt.Sprintf("-%s", rhs), nil) } -} -func (CollationOperations) Test(yield Iterator) { - var cases = []string{ + for _, rhs := range inputConversions { + yield(fmt.Sprintf("- %s", rhs), nil) + yield(fmt.Sprintf("-%s", rhs), nil) + } +} + +func CollationOperations(yield Query) { + var cases = []string{ "COLLATION('foobar')", "COLLATION(_latin1 'foobar')", "COLLATION(_utf8mb4 'foobar' COLLATE utf8mb4_general_ci)", @@ -529,7 +1059,7 @@ func (CollationOperations) Test(yield Iterator) { } } -func (LikeComparison) Test(yield Iterator) { +func LikeComparison(yield Query) { var left = []string{ `'foobar'`, `'FOOBAR'`, `'1234'`, `1234`, @@ -552,7 +1082,40 @@ func (LikeComparison) Test(yield Iterator) { } } -func (MultiComparisons) Test(yield Iterator) { +func StrcmpComparison(yield Query) { + inputs := append([]string{ + `'foobar'`, `'FOOBAR'`, + `'1234'`, `1234`, + `_utf8mb4 'foobar' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'FOOBAR' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'foobar' COLLATE utf8mb4_0900_as_ci`, + `_utf8mb4 'FOOBAR' COLLATE utf8mb4_0900_as_ci`, + `'foo%'`, `'FOO%'`, `'foo_ar'`, `'FOO_AR'`, + `'12%'`, `'12_4'`, `'12x4'`, `'12$4'`, + `_utf8mb4 '12_4' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 '12_4' COLLATE utf8mb4_0900_ai_ci`, + `_utf8mb4 '12x4' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 '12x4' COLLATE utf8mb4_0900_ai_ci`, + `_utf8mb4 '12$4' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 '12$4' COLLATE utf8mb4_0900_ai_ci`, + `_utf8mb4 'foo%' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'FOO%' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'foo_ar' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'FOO_AR' COLLATE utf8mb4_0900_as_cs`, + `_utf8mb4 'foo%' COLLATE utf8mb4_0900_as_ci`, + `_utf8mb4 'FOO%' COLLATE utf8mb4_0900_as_ci`, + `_utf8mb4 'foo_ar' COLLATE utf8mb4_0900_as_ci`, + `_utf8mb4 'FOO_AR' COLLATE utf8mb4_0900_as_ci`, + }, inputConversions...) + + for _, lhs := range inputs { + for _, rhs := range inputs { + yield(fmt.Sprintf("STRCMP(%s, %s)", lhs, rhs), nil) + } + } +} + +func MultiComparisons(yield Query) { var numbers = []string{ `0`, `-1`, `1`, `0.0`, `1.0`, `-1.0`, `1.0E0`, `-1.0E0`, `0.0E0`, strconv.FormatUint(math.MaxUint64, 10), @@ -596,7 +1159,29 @@ func (MultiComparisons) Test(yield Iterator) { } } -func (IsStatement) Test(yield Iterator) { +func IntervalStatement(yield Query) { + inputs := []string{ + "-1", "0", "1", "2", "3", "0xFF", "1.1", "1.9", "1.1e0", "1.9e0", + strconv.FormatUint(math.MaxUint64, 10), + strconv.FormatUint(math.MaxInt64, 10), + strconv.FormatUint(math.MaxInt64+1, 10), + strconv.FormatInt(math.MinInt64, 10), + "18446744073709551616", + "-9223372036854775809", + `"foobar"`, "NULL", "cast('invalid' as json)", + } + for _, base := range inputs { + for _, arg1 := range inputs { + for _, arg2 := range inputs { + for _, arg3 := range inputs { + yield(fmt.Sprintf("INTERVAL(%s, %s, %s, %s)", base, arg1, arg2, arg3), nil) + } + } + } + } +} + +func IsStatement(yield Query) { var left = []string{ "NULL", "TRUE", "FALSE", `1`, `0`, `1.0`, `0.0`, `-1`, `666`, @@ -619,7 +1204,27 @@ func (IsStatement) Test(yield Iterator) { } } -func (TupleComparisons) Test(yield Iterator) { +func NotStatement(yield Query) { + var ops = []string{"NOT", "!"} + for _, op := range ops { + for _, i := range inputConversions { + yield(fmt.Sprintf("%s %s", op, i), nil) + } + } +} + +func LogicalStatement(yield Query) { + var ops = []string{"AND", "&&", "OR", "||", "XOR"} + for _, op := range ops { + for _, l := range inputConversions { + for _, r := range inputConversions { + yield(fmt.Sprintf("%s %s %s", l, op, r), nil) + } + } + } +} + +func TupleComparisons(yield Query) { var elems = []string{"NULL", "-1", "0", "1"} var operators = []string{"=", "!=", "<=>", "<", "<=", ">", ">="} @@ -637,18 +1242,24 @@ func (TupleComparisons) Test(yield Iterator) { } } -func (Comparisons) Test(yield Iterator) { +func Comparisons(yield Query) { var operators = []string{"=", "!=", "<=>", "<", "<=", ">", ">="} for _, op := range operators { - for i := 0; i < len(inputComparisonElement); i++ { - for j := 0; j < len(inputComparisonElement); j++ { - yield(fmt.Sprintf("%s %s %s", inputComparisonElement[i], op, inputComparisonElement[j]), nil) + for _, l := range inputComparisonElement { + for _, r := range inputComparisonElement { + yield(fmt.Sprintf("%s %s %s", l, op, r), nil) + } + } + + for _, l := range inputConversions { + for _, r := range inputConversions { + yield(fmt.Sprintf("%s %s %s", l, op, r), nil) } } } } -func (JSONExtract) Test(yield Iterator) { +func JSONExtract(yield Query) { var cases = []struct { Operator string Path string @@ -688,69 +1299,194 @@ func (JSONExtract) Test(yield Iterator) { } } -func (JSONExtract) Environment() *evalengine.ExpressionEnv { - env := new(evalengine.ExpressionEnv) - env.DefaultCollation = collations.CollationUtf8mb4ID - env.Fields = []*querypb.Field{ - { - Name: "column0", - Type: sqltypes.TypeJSON, - ColumnType: "JSON", - }, - } - return env +var JSONExtract_Schema = []*querypb.Field{ + { + Name: "column0", + Type: sqltypes.TypeJSON, + ColumnType: "JSON", + }, } -func (FnLower) Test(yield Iterator) { +func FnLower(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("LOWER(%s)", str), nil) yield(fmt.Sprintf("LCASE(%s)", str), nil) } } -func (FnUpper) Test(yield Iterator) { +func FnUpper(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("UPPER(%s)", str), nil) yield(fmt.Sprintf("UCASE(%s)", str), nil) } } -func (FnCharLength) Test(yield Iterator) { +func FnCharLength(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("CHAR_LENGTH(%s)", str), nil) yield(fmt.Sprintf("CHARACTER_LENGTH(%s)", str), nil) } } -func (FnLength) Test(yield Iterator) { +func FnLength(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("LENGTH(%s)", str), nil) yield(fmt.Sprintf("OCTET_LENGTH(%s)", str), nil) } } -func (FnBitLength) Test(yield Iterator) { +func FnBitLength(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("BIT_LENGTH(%s)", str), nil) } } -func (FnAscii) Test(yield Iterator) { +func FnAscii(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("ASCII(%s)", str), nil) } } -func (FnRepeat) Test(yield Iterator) { - counts := []string{"-1", "1.2", "3", "1073741825"} +func FnOrd(yield Query) { + for _, str := range inputStrings { + yield(fmt.Sprintf("ORD(%s)", str), nil) + } +} + +func FnRepeat(yield Query) { + counts := []string{"-1", "1.9", "3", "1073741825", "'1.9'"} + for _, str := range inputStrings { + for _, cnt := range counts { + yield(fmt.Sprintf("REPEAT(%s, %s)", str, cnt), nil) + } + } +} + +func FnLeft(yield Query) { + counts := []string{"-1", "1.9", "3", "10", "'1.9'"} + for _, str := range inputStrings { + for _, cnt := range counts { + yield(fmt.Sprintf("LEFT(%s, %s)", str, cnt), nil) + } + } +} + +func FnLpad(yield Query) { + counts := []string{"-1", "1.9", "3", "10", "'1.9'"} + for _, str := range inputStrings { + for _, cnt := range counts { + for _, pad := range inputStrings { + yield(fmt.Sprintf("LPAD(%s, %s, %s)", str, cnt, pad), nil) + } + } + } +} + +func FnRight(yield Query) { + counts := []string{"-1", "1.9", "3", "10", "'1.9'"} for _, str := range inputStrings { for _, cnt := range counts { - yield(fmt.Sprintf("repeat(%s, %s)", str, cnt), nil) + yield(fmt.Sprintf("RIGHT(%s, %s)", str, cnt), nil) + } + } +} + +func FnRpad(yield Query) { + counts := []string{"-1", "1.9", "3", "10", "'1.9'"} + for _, str := range inputStrings { + for _, cnt := range counts { + for _, pad := range inputStrings { + yield(fmt.Sprintf("RPAD(%s, %s, %s)", str, cnt, pad), nil) + } + } + } +} + +func FnLTrim(yield Query) { + for _, str := range inputTrimStrings { + yield(fmt.Sprintf("LTRIM(%s)", str), nil) + } +} + +func FnRTrim(yield Query) { + for _, str := range inputTrimStrings { + yield(fmt.Sprintf("RTRIM(%s)", str), nil) + } +} + +func FnTrim(yield Query) { + for _, str := range inputTrimStrings { + yield(fmt.Sprintf("TRIM(%s)", str), nil) + } + + modes := []string{"LEADING", "TRAILING", "BOTH"} + for _, str := range inputTrimStrings { + for _, mode := range modes { + yield(fmt.Sprintf("TRIM(%s FROM %s)", mode, str), nil) + } + } + + for _, str := range inputTrimStrings { + for _, pat := range inputTrimStrings { + yield(fmt.Sprintf("TRIM(%s FROM %s)", pat, str), nil) + for _, mode := range modes { + yield(fmt.Sprintf("TRIM(%s %s FROM %s)", mode, pat, str), nil) + } } } } -func (FnHex) Test(yield Iterator) { +func FnConcat(yield Query) { + for _, str := range inputStrings { + yield(fmt.Sprintf("CONCAT(%s)", str), nil) + } + + for _, str1 := range inputConversions { + for _, str2 := range inputConversions { + yield(fmt.Sprintf("CONCAT(%s, %s)", str1, str2), nil) + } + } + + for _, str1 := range inputStrings { + for _, str2 := range inputStrings { + for _, str3 := range inputStrings { + yield(fmt.Sprintf("CONCAT(%s, %s, %s)", str1, str2, str3), nil) + } + } + } +} + +func FnConcatWs(yield Query) { + for _, str := range inputStrings { + yield(fmt.Sprintf("CONCAT_WS(%s, NULL)", str), nil) + } + + for _, str1 := range inputConversions { + for _, str2 := range inputStrings { + for _, str3 := range inputStrings { + yield(fmt.Sprintf("CONCAT_WS(%s, %s, %s)", str1, str2, str3), nil) + } + } + } + + for _, str1 := range inputStrings { + for _, str2 := range inputConversions { + for _, str3 := range inputStrings { + yield(fmt.Sprintf("CONCAT_WS(%s, %s, %s)", str1, str2, str3), nil) + } + } + } + + for _, str1 := range inputStrings { + for _, str2 := range inputStrings { + for _, str3 := range inputConversions { + yield(fmt.Sprintf("CONCAT_WS(%s, %s, %s)", str1, str2, str3), nil) + } + } + } +} + +func FnHex(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("hex(%s)", str), nil) } @@ -763,3 +1499,727 @@ func (FnHex) Test(yield Iterator) { yield(fmt.Sprintf("hex(%s)", str), nil) } } + +func FnUnhex(yield Query) { + var inputs = []string{ + `'f'`, + `'fe'`, + `'fea'`, + `'666F6F626172'`, + // MySQL trims whitespace + `' \t\r\n 4f \n \t '`, + } + + inputs = append(inputs, inputConversions...) + for _, input := range inputConversions { + inputs = append(inputs, "'"+hex.EncodeToString([]byte(input))+"'") + } + + for _, lhs := range inputs { + yield(fmt.Sprintf("UNHEX(%s)", lhs), nil) + } +} + +func InStatement(yield Query) { + roots := append([]string(nil), inputBitwise...) + roots = append(roots, inputComparisonElement...) + + genSubsets(roots, 3, func(inputs []string) { + if !(bugs{}).CanCompare(inputs...) { + return + } + yield(fmt.Sprintf("%s IN (%s, %s)", inputs[0], inputs[1], inputs[2]), nil) + yield(fmt.Sprintf("%s IN (%s, %s)", inputs[2], inputs[1], inputs[0]), nil) + yield(fmt.Sprintf("%s IN (%s, %s)", inputs[1], inputs[0], inputs[2]), nil) + yield(fmt.Sprintf("%s IN (%s, %s, %s)", inputs[0], inputs[1], inputs[2], inputs[0]), nil) + + yield(fmt.Sprintf("%s NOT IN (%s, %s)", inputs[0], inputs[1], inputs[2]), nil) + yield(fmt.Sprintf("%s NOT IN (%s, %s)", inputs[2], inputs[1], inputs[0]), nil) + yield(fmt.Sprintf("%s NOT IN (%s, %s)", inputs[1], inputs[0], inputs[2]), nil) + yield(fmt.Sprintf("%s NOT IN (%s, %s, %s)", inputs[0], inputs[1], inputs[2], inputs[0]), nil) + }) +} + +func FnNow(yield Query) { + fns := []string{ + "NOW()", "CURRENT_TIMESTAMP()", "CURRENT_TIMESTAMP", + "NOW(1)", "CURRENT_TIMESTAMP(1)", + "LOCALTIME()", "LOCALTIME", "LOCALTIMESTAMP()", "LOCALTIMESTAMP", + "LOCALTIME(1)", "LOCALTIMESTAMP(1)", + "UTC_TIMESTAMP()", "UTC_TIMESTAMP", + "UTC_TIMESTAMP(1)", + "CURDATE()", "CURRENT_DATE()", "CURRENT_DATE", + "UTC_TIME()", "UTC_TIME", + "UTC_DATE()", "UTC_DATE", + "UTC_TIME(1)", + "CURTIME()", "CURRENT_TIME()", "CURRENT_TIME", + "CURTIME(1)", "CURRENT_TIME(1)", + "SYSDATE()", "SYSDATE(1)", + "NOW(1)", "NOW(2)", "NOW(3)", "NOW(4)", "NOW(5)", + "SYSDATE(1)", "SYSDATE(2)", "SYSDATE(3)", "SYSDATE(4)", "SYSDATE(5)", + } + for _, fn := range fns { + yield(fn, nil) + } +} + +func FnInfo(yield Query) { + fns := []string{ + "USER()", "CURRENT_USER()", "CURRENT_USER", + "SESSION_USER()", "SYSTEM_USER()", + "DATABASE()", "SCHEMA()", + "VERSION()", + } + for _, fn := range fns { + yield(fn, nil) + } +} + +func FnDateFormat(yield Query) { + var buf strings.Builder + for _, f := range dateFormats { + buf.WriteByte('%') + buf.WriteByte(f.c) + buf.WriteByte(' ') + } + format := buf.String() + + for _, d := range inputConversions { + yield(fmt.Sprintf("DATE_FORMAT(%s, %q)", d, format), nil) + } +} + +func FnConvertTz(yield Query) { + timezoneInputs := []string{ + "UTC", + "GMT", + "America/New_York", + "America/Los_Angeles", + "Europe/London", + "Europe/Amsterdam", + "+00:00", + "-00:00", + "+01:00", + "-01:00", + "+02:00", + "-02:00", + "+14:00", + "-13:00", + "bogus", + } + for _, num1 := range inputConversions { + for _, tzFrom := range timezoneInputs { + for _, tzTo := range timezoneInputs { + q := fmt.Sprintf("CONVERT_TZ(%s, '%s', '%s')", num1, tzFrom, tzTo) + yield(q, nil) + } + } + } +} + +func FnDate(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("DATE(%s)", d), nil) + } +} + +func FnDayOfMonth(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("DAYOFMONTH(%s)", d), nil) + yield(fmt.Sprintf("DAY(%s)", d), nil) + } +} + +func FnDayOfWeek(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("DAYOFWEEK(%s)", d), nil) + } +} + +func FnDayOfYear(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("DAYOFYEAR(%s)", d), nil) + } +} + +func FnFromUnixtime(yield Query) { + var buf strings.Builder + for _, f := range dateFormats { + buf.WriteByte('%') + buf.WriteByte(f.c) + buf.WriteByte(' ') + } + format := buf.String() + + for _, d := range inputConversions { + yield(fmt.Sprintf("FROM_UNIXTIME(%s)", d), nil) + yield(fmt.Sprintf("FROM_UNIXTIME(%s, %q)", d, format), nil) + } +} + +func FnHour(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("HOUR(%s)", d), nil) + } +} + +func FnMakedate(yield Query) { + for _, y := range inputConversions { + for _, d := range inputConversions { + yield(fmt.Sprintf("MAKEDATE(%s, %s)", y, d), nil) + } + } +} + +func FnMaketime(yield Query) { + // Don't use inputConversions for minutes as those are simplest + // and otherwise we explode in test runtime. + minutes := []string{ + "''", "0", "'3'", "59", "60", "0xFF666F6F626172FF", "18446744073709551615", + } + for _, h := range inputConversions { + for _, m := range minutes { + for _, s := range inputConversions { + yield(fmt.Sprintf("MAKETIME(%s, %s, %s)", h, m, s), nil) + } + } + } +} + +func FnMicroSecond(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("MICROSECOND(%s)", d), nil) + } +} + +func FnMinute(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("MINUTE(%s)", d), nil) + } +} + +func FnMonth(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("MONTH(%s)", d), nil) + } +} + +func FnMonthName(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("MONTHNAME(%s)", d), nil) + } +} + +func FnQuarter(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("QUARTER(%s)", d), nil) + } +} + +func FnSecond(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("SECOND(%s)", d), nil) + } +} + +func FnTime(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("TIME(%s)", d), nil) + } +} + +func FnUnixTimestamp(yield Query) { + yield("UNIX_TIMESTAMP()", nil) + + for _, d := range inputConversions { + yield(fmt.Sprintf("UNIX_TIMESTAMP(%s)", d), nil) + yield(fmt.Sprintf("UNIX_TIMESTAMP(%s) + 1", d), nil) + } +} + +func FnWeek(yield Query) { + for i := 0; i < 16; i++ { + for _, d := range inputConversions { + yield(fmt.Sprintf("WEEK(%s, %d)", d, i), nil) + } + } + for _, d := range inputConversions { + yield(fmt.Sprintf("WEEK(%s)", d), nil) + } +} + +func FnWeekDay(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("WEEKDAY(%s)", d), nil) + } +} + +func FnWeekOfYear(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("WEEKOFYEAR(%s)", d), nil) + } +} + +func FnYear(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("YEAR(%s)", d), nil) + } +} + +func FnYearWeek(yield Query) { + for i := 0; i < 4; i++ { + for _, d := range inputConversions { + yield(fmt.Sprintf("YEARWEEK(%s, %d)", d, i), nil) + } + } + for _, d := range inputConversions { + yield(fmt.Sprintf("YEARWEEK(%s)", d), nil) + } +} + +func FnInetAton(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("INET_ATON(%s)", d), nil) + } +} + +func FnInetNtoa(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("INET_NTOA(%s)", d), nil) + yield(fmt.Sprintf("INET_NTOA(INET_ATON(%s))", d), nil) + } +} + +func FnInet6Aton(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("INET6_ATON(%s)", d), nil) + } +} + +func FnInet6Ntoa(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("INET6_NTOA(%s)", d), nil) + yield(fmt.Sprintf("INET6_NTOA(INET6_ATON(%s))", d), nil) + } +} + +func FnIsIPv4(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("IS_IPV4(%s)", d), nil) + } +} + +func FnIsIPv4Compat(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("IS_IPV4_COMPAT(%s)", d), nil) + yield(fmt.Sprintf("IS_IPV4_COMPAT(INET6_ATON(%s))", d), nil) + } +} + +func FnIsIPv4Mapped(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("IS_IPV4_MAPPED(%s)", d), nil) + yield(fmt.Sprintf("IS_IPV4_MAPPED(INET6_ATON(%s))", d), nil) + } +} + +func FnIsIPv6(yield Query) { + for _, d := range ipInputs { + yield(fmt.Sprintf("IS_IPV6(%s)", d), nil) + } +} + +func FnBinToUUID(yield Query) { + args := []string{ + "NULL", + "-1", + "0", + "1", + "2", + "''", + "'-1'", + "'0'", + "'1'", + "'2'", + } + for _, d := range uuidInputs { + yield(fmt.Sprintf("BIN_TO_UUID(%s)", d), nil) + } + + for _, d := range uuidInputs { + for _, a := range args { + yield(fmt.Sprintf("BIN_TO_UUID(%s, %s)", d, a), nil) + } + } +} + +func FnIsUUID(yield Query) { + for _, d := range uuidInputs { + yield(fmt.Sprintf("IS_UUID(%s)", d), nil) + } +} + +func FnUUID(yield Query) { + yield("LENGTH(UUID())", nil) + yield("COLLATION(UUID())", nil) + yield("IS_UUID(UUID())", nil) + yield("LENGTH(UUID_TO_BIN(UUID())", nil) +} + +func FnUUIDToBin(yield Query) { + args := []string{ + "NULL", + "-1", + "0", + "1", + "2", + "''", + "'-1'", + "'0'", + "'1'", + "'2'", + } + for _, d := range uuidInputs { + yield(fmt.Sprintf("UUID_TO_BIN(%s)", d), nil) + } + + for _, d := range uuidInputs { + for _, a := range args { + yield(fmt.Sprintf("UUID_TO_BIN(%s, %s)", d, a), nil) + } + } +} + +func DateMath(yield Query) { + dates := []string{ + `DATE'2018-05-01'`, + `TIMESTAMP'2020-12-31 23:59:59'`, + `TIMESTAMP'2025-01-01 00:00:00'`, + `'2018-05-01'`, + `'2020-12-31 23:59:59'`, + `'2025-01-01 00:00:00'`, + `20250101`, + `'pokemon trainers'`, + `'20250101'`, + } + intervalValues := []string{ + `1`, `'1:1'`, `'1 1:1:1'`, `'-1 10'`, `'1 10'`, `31`, `30`, `'1.999999'`, `1.999`, `'1.999'`, + `'1:1:1:1'`, `'1:1 1:1'`, `'-1:10'`, `'1:10'`, `1.5`, `1.5000`, `6/4`, `'6/4'`, `1.5e0`, `1.5000e0`, + `CAST(6/4 AS DECIMAL(3,1))`, `CAST(6/4 AS DECIMAL(3,0))`, `1e0`, `'1.0'`, `'1.0foobar'`, + } + mysqlDocSamples := []string{ + `DATE_ADD(DATE'2018-05-01',INTERVAL 1 DAY)`, + `DATE_SUB(DATE'2018-05-01',INTERVAL 1 YEAR)`, + `DATE_ADD(TIMESTAMP'2020-12-31 23:59:59', INTERVAL 1 SECOND)`, + `DATE_ADD(TIMESTAMP'2018-12-31 23:59:59', INTERVAL 1 DAY)`, + `DATE_ADD(TIMESTAMP'2100-12-31 23:59:59', INTERVAL '1:1' MINUTE_SECOND)`, + `DATE_SUB(TIMESTAMP'2025-01-01 00:00:00', INTERVAL '1 1:1:1' DAY_SECOND)`, + `DATE_ADD(TIMESTAMP'1900-01-01 00:00:00', INTERVAL '-1 10' DAY_HOUR)`, + `DATE_SUB(DATE'1998-01-02', INTERVAL 31 DAY)`, + `DATE_ADD(TIMESTAMP'1992-12-31 23:59:59.000002', INTERVAL '1.999999' SECOND_MICROSECOND)`, + `DATE_ADD(DATE'2024-03-30', INTERVAL 1 MONTH)`, + `DATE_ADD(DATE'2024-03-31', INTERVAL 1 MONTH)`, + `TIMESTAMPADD(MINUTE, 1, '2003-01-02')`, + `TIMESTAMPADD(WEEK,1,'2003-01-02')`, + `TIMESTAMPADD(MONTH, 1, DATE '2024-03-30')`, + `TIMESTAMPADD(MONTH, 1, DATE '2024-03-31')`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, d := range dates { + for _, i := range inputIntervals { + for _, v := range intervalValues { + yield(fmt.Sprintf("DATE_ADD(%s, INTERVAL %s %s)", d, v, i), nil) + yield(fmt.Sprintf("DATE_SUB(%s, INTERVAL %s %s)", d, v, i), nil) + yield(fmt.Sprintf("TIMESTAMPADD(%v, %s, %s)", i, v, d), nil) + } + } + } +} + +func RegexpLike(yield Query) { + mysqlDocSamples := []string{ + `'Michael!' REGEXP '.*'`, + `'Michael!' RLIKE '.*'`, + `'Michael!' NOT REGEXP '.*'`, + `'Michael!' NOT RLIKE '.*'`, + `'new*\n*line' REGEXP 'new\\*.\\*line'`, + `'a' REGEXP '^[a-d]'`, + `REGEXP_LIKE('CamelCase', 'CAMELCASE')`, + `REGEXP_LIKE('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_LIKE('abc', 'ABC'`, + `REGEXP_LIKE('abc', 'ABC', 'c')`, + `REGEXP_LIKE(1234, 12)`, + `REGEXP_LIKE(1234, 12, 'c')`, + `' ' REGEXP '[[:blank:]]'`, + `'\t' REGEXP '[[:blank:]]'`, + `' ' REGEXP '[[:space:]]'`, + `'\t' REGEXP '[[:space:]]'`, + `_latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`, + `_koi8r 0xFF regexp _koi8r '[[:lower:]]' COLLATE koi8r_bin`, + `_latin1 0xFF regexp _latin1 '[[:upper:]]' COLLATE latin1_bin`, + `_koi8r 0xFF regexp _koi8r '[[:upper:]]' COLLATE koi8r_bin`, + `_latin1 0xF7 regexp _latin1 '[[:alpha:]]'`, + `_koi8r 0xF7 regexp _koi8r '[[:alpha:]]'`, + `_latin1'a' regexp _latin1'A' collate latin1_general_ci`, + `_latin1'a' regexp _latin1'A' collate latin1_bin`, + + `_latin1 'ÿ' regexp _utf8mb4 'ÿ'`, + `_utf8mb4 'ÿ' regexp _latin1 'ÿ'`, + `convert('ÿ' as char character set latin1) regexp _utf8mb4 'ÿ'`, + `_utf8mb4 'ÿ' regexp convert('ÿ' as char character set latin1)`, + + `'a' regexp '\\p{alphabetic}'`, + `'a' regexp '\\P{alphabetic}'`, + `'👌🏾regexp '\\p{Emoji}\\p{Emoji_modifier}'`, + `'a' regexp '\\p{Lowercase_letter}'`, + `'a' regexp '\\p{Uppercase_letter}'`, + `'A' regexp '\\p{Lowercase_letter}'`, + `'A' regexp '\\p{Uppercase_letter}'`, + `'a' collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}'`, + `'A' collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}'`, + `'a' collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}'`, + `'A' collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}'`, + `0xff REGEXP 0xff`, + `0xff REGEXP 0xfe`, + `cast(time '12:34:58' as json) REGEXP 0xff`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, i := range regexInputs { + for _, p := range regexInputs { + yield(fmt.Sprintf("%s REGEXP %s", i, p), nil) + yield(fmt.Sprintf("%s NOT REGEXP %s", i, p), nil) + for _, m := range regexMatchStrings { + yield(fmt.Sprintf("REGEXP_LIKE(%s, %s, %s)", i, p, m), nil) + } + } + } +} + +func RegexpInstr(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_INSTR('Michael!', '.*')`, + `REGEXP_INSTR('new*\n*line', 'new\\*.\\*line')`, + `REGEXP_INSTR('a', '^[a-d]')`, + `REGEXP_INSTR('CamelCase', 'CAMELCASE')`, + `REGEXP_INSTR('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_INSTR('abc', 'ABC'`, + `REGEXP_INSTR('abc', 'ABC', 'c')`, + `REGEXP_INSTR('0', '0', 1, 0)`, + `REGEXP_INSTR(' ', '[[:blank:]]')`, + `REGEXP_INSTR('\t', '[[:blank:]]')`, + `REGEXP_INSTR(' ', '[[:space:]]')`, + `REGEXP_INSTR('\t', '[[:space:]]')`, + `REGEXP_INSTR(_latin1 0xFF, _latin1 '[[:lower:]]' COLLATE latin1_bin)`, + `REGEXP_INSTR(_koi8r 0xFF, _koi8r '[[:lower:]]' COLLATE koi8r_bin)`, + `REGEXP_INSTR(_latin1 0xFF, _latin1 '[[:upper:]]' COLLATE latin1_bin)`, + `REGEXP_INSTR(_koi8r 0xFF, _koi8r '[[:upper:]]' COLLATE koi8r_bin)`, + `REGEXP_INSTR(_latin1 0xF7, _latin1 '[[:alpha:]]')`, + `REGEXP_INSTR(_koi8r 0xF7, _koi8r '[[:alpha:]]')`, + `REGEXP_INSTR(_latin1'a', _latin1'A' collate latin1_general_ci)`, + `REGEXP_INSTR(_latin1'a', _latin1'A' collate latin1_bin)`, + `REGEXP_INSTR('a', '\\p{alphabetic}')`, + `REGEXP_INSTR('a', '\\P{alphabetic}')`, + `REGEXP_INSTR('👌🏾, '\\p{Emoji}\\p{Emoji_modifier}')`, + `REGEXP_INSTR('a', '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('a', '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('A', '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('A', '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_INSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_INSTR('dog cat dog', 'dog')`, + `REGEXP_INSTR('dog cat dog', 'dog', 2)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1, 0)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 1, 1)`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 1, 1, 'i')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 1, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2, 0)`, + `REGEXP_INSTR('dog cat dog', 'dog', 1, 2, 1)`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, 'i')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, 'c')`, + `REGEXP_INSTR('aa aaa aaaa', 'a{2}')`, + `REGEXP_INSTR('aa aaa aaaa', 'a{4}')`, + `REGEXP_INSTR(1234, 12)`, + `REGEXP_INSTR(1234, 12, 1)`, + `REGEXP_INSTR(1234, 12, 100)`, + `REGEXP_INSTR(1234, 12, 1, 1)`, + `REGEXP_INSTR(1234, 12, 1, 1, 1)`, + `REGEXP_INSTR(1234, 12, 1, 1, 1, 'c')`, + `REGEXP_INSTR('', ' ', 1000)`, + `REGEXP_INSTR(' ', ' ', 1000)`, + `REGEXP_INSTR(NULL, 'DOG', 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', NULL, 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', NULL, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, NULL, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, NULL, 'c')`, + `REGEXP_INSTR('dog cat dog', 'DOG', 1, 2, 1, NULL)`, + + `REGEXP_INSTR('dog cat dog', NULL, 1, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', NULL, 2, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, NULL, 1, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, 2, NULL, 'c')`, + `REGEXP_INSTR('dog cat dog', _latin1 'DOG', 1, 2, 1, NULL)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func RegexpSubstr(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_SUBSTR('Michael!', '.*')`, + `REGEXP_SUBSTR('new*\n*line', 'new\\*.\\*line')`, + `REGEXP_SUBSTR('a', '^[a-d]')`, + `REGEXP_SUBSTR('CamelCase', 'CAMELCASE')`, + `REGEXP_SUBSTR('CamelCase', 'CAMELCASE' COLLATE utf8mb4_0900_as_cs)`, + `REGEXP_SUBSTR('abc', 'ABC'`, + `REGEXP_SUBSTR(' ', '[[:blank:]]')`, + `REGEXP_SUBSTR('\t', '[[:blank:]]')`, + `REGEXP_SUBSTR(' ', '[[:space:]]')`, + `REGEXP_SUBSTR('\t', '[[:space:]]')`, + `REGEXP_SUBSTR(_latin1'a', _latin1'A' collate latin1_general_ci)`, + `REGEXP_SUBSTR(_latin1'a', _latin1'A' collate latin1_bin)`, + `REGEXP_SUBSTR('a', '\\p{alphabetic}')`, + `REGEXP_SUBSTR('a', '\\P{alphabetic}')`, + `REGEXP_SUBSTR('👌🏾, '\\p{Emoji}\\p{Emoji_modifier}')`, + `REGEXP_SUBSTR('a', '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('a', '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('A', '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('A', '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Lowercase_letter}')`, + `REGEXP_SUBSTR('a', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('A', collate utf8mb4_0900_as_cs regexp '\\p{Uppercase_letter}')`, + `REGEXP_SUBSTR('dog cat dog', 'dog')`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 2)`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 1, 1)`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 'c')`, + `REGEXP_SUBSTR('dog cat dog', 'dog', 1, 2)`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 2, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 2, 'c')`, + `REGEXP_SUBSTR('aa aaa aaaa', 'a{2}')`, + `REGEXP_SUBSTR('aa aaa aaaa', 'a{4}')`, + `REGEXP_SUBSTR(1234, 12)`, + `REGEXP_SUBSTR(1234, 12, 1)`, + `REGEXP_SUBSTR(1234, 12, 100)`, + `REGEXP_SUBSTR(1234, 12, 1, 1)`, + `REGEXP_SUBSTR(1234, 12, 1, 1, 'c')`, + + `REGEXP_SUBSTR(NULL, 'DOG', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', NULL, 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', NULL, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, NULL, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, NULL)`, + + `REGEXP_SUBSTR(NULL, '[', 1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', NULL, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', 1, NULL, 'i')`, + `REGEXP_SUBSTR('dog cat dog', '[', 1, 1, NULL)`, + + `REGEXP_SUBSTR('dog cat dog', 'DOG', 0, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', -1, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 100, 1, 'i')`, + `REGEXP_SUBSTR('dog cat dog', 'DOG', 1, 1, 0)`, + + `REGEXP_SUBSTR(' ', ' ', 1)`, + `REGEXP_SUBSTR(' ', ' ', 2)`, + `REGEXP_SUBSTR(' ', ' ', 3)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func RegexpReplace(yield Query) { + mysqlDocSamples := []string{ + `REGEXP_REPLACE('a b c', 'b', 'X')`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 1, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 2, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 3, 3)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 0)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 1)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 2)`, + `REGEXP_REPLACE('abc def ghi', '[a-z]+', 'X', 4, 3)`, + `REGEXP_REPLACE('a', '\\p{Lowercase_letter}', 'X')`, + `REGEXP_REPLACE('a', '\\p{Uppercase_letter}', 'X')`, + `REGEXP_REPLACE('A', '\\p{Lowercase_letter}', 'X')`, + `REGEXP_REPLACE('A', '\\p{Uppercase_letter}', 'X')`, + `REGEXP_REPLACE(1234, 12, 6)`, + `REGEXP_REPLACE(1234, 12, 6, 1)`, + `REGEXP_REPLACE(1234, 12, 6, 100)`, + `REGEXP_REPLACE(1234, 12, 6, 1, 1)`, + `REGEXP_REPLACE(1234, 12, 6, 1, 1, 'c')`, + + `REGEXP_REPLACE(NULL, 'DOG', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', NULL, 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, 1, NULL)`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', '1', '1', 0)`, + + `REGEXP_REPLACE(NULL, _latin1'DOG', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, 1, NULL)`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', '1', '1', 0)`, + + `REGEXP_REPLACE(NULL, '[', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', '[', 'bar', 1, 1, NULL)`, + + `REGEXP_REPLACE(NULL, _latin1'[', 'bar', 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', NULL, 1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', 'bar', 1, NULL, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'[', 'bar', 1, 1, NULL)`, + + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 0, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('', 'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('', 'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', 'DOG', 'bar', 1, 1, 0)`, + + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 0, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('', _latin1'DOG', 'bar', -1, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('', _latin1'DOG', 'bar', 100, 1, 'i')`, + `REGEXP_REPLACE('dog cat dog', _latin1'DOG', 'bar', 1, 1, 0)`, + + `REGEXP_REPLACE(' ', ' ', 'x', 1)`, + `REGEXP_REPLACE(' ', ' ', 'x', 2)`, + `REGEXP_REPLACE(' ', ' ', 'x', 3)`, + + `REGEXP_REPLACE(' ', _latin1' ', 'x', 1)`, + `REGEXP_REPLACE(' ', _latin1' ', 'x', 2)`, + `REGEXP_REPLACE(' ', _latin1' ', 'x', 3)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} diff --git a/go/vt/vtgate/evalengine/testcases/helpers.go b/go/vt/vtgate/evalengine/testcases/helpers.go index 6402549576f..f7cf5b22dd8 100644 --- a/go/vt/vtgate/evalengine/testcases/helpers.go +++ b/go/vt/vtgate/evalengine/testcases/helpers.go @@ -16,7 +16,32 @@ limitations under the License. package testcases -import "vitess.io/vitess/go/sqltypes" +import ( + "fmt" + "math" + "reflect" + "runtime" + "strings" + "time" + + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +type Query func(query string, row []sqltypes.Value) +type Runner func(yield Query) +type TestCase struct { + Run Runner + Schema []*querypb.Field + Compare *Comparison +} + +func (tc TestCase) Name() string { + ptr := reflect.ValueOf(tc.Run).Pointer() + name := runtime.FuncForPC(ptr).Name() + return name[strings.LastIndexByte(name, '.')+1:] +} func perm(a []string, f func([]string)) { perm1(a, f, 0) @@ -53,13 +78,6 @@ func genSubsets(args []string, subsetLen int, yield func([]string)) { genSubsets1(args, subset, 0, 0, yield) } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func mustJSON(j string) sqltypes.Value { v, err := sqltypes.NewJSON(j) if err != nil { @@ -67,3 +85,118 @@ func mustJSON(j string) sqltypes.Value { } return v } + +type bugs struct{} + +// CanCompare skips comparisons in CASE and IN expressions that behave in unexpected +// ways. The following is an example of expressions giving un-intuitive results (i.e. +// results that do not match the behavior of the `=` operator, which is supposed to apply, +// pair-wise, to the comparisons in a CASE or IN statement): +// +// SELECT -1 IN (0xFF, 18446744073709551615) => 1 +// SELECT -1 IN (0, 18446744073709551615) => 0 +// SELECT -1 IN (0.0, 18446744073709551615) => 1 +// +// SELECT 'FOO' IN ('foo', 0x00) => 0 +// SELECT 'FOO' IN ('foo', 0) => 1 +// SELECT 'FOO' IN ('foo', 0x00, CAST('bar' as char)) => 1 +// SELECT 'FOO' IN ('foo', 0x00, 'bar') => 0 +// +// SELECT 9223372036854775808 IN (0.0e0, -9223372036854775808) => 1 +// SELECT 9223372036854775808 IN (0, -9223372036854775808) => 0 +// SELECT 9223372036854775808 IN (0.0, -9223372036854775808) => 1 +// +// Generally speaking, it's counter-intuitive that adding more (unrelated) types to the +// right-hand of the IN operator would change the result of the operation itself. It seems +// like there's logic that changes the way the elements are compared with a type aggregation +// but this is not documented anywhere. +func (bugs) CanCompare(elems ...string) bool { + var invalid = map[string]string{ + "18446744073709551615": "-1", + `9223372036854775808`: `-9223372036854775808`, + } + + for i, e := range elems { + if strings.HasPrefix(e, "_binary ") || + strings.HasPrefix(e, "0x") || + strings.HasPrefix(e, "X'") || + strings.HasSuffix(e, "collate utf8mb4_0900_as_cs") { + return false + } + if other, ok := invalid[e]; ok { + for j := 0; j < len(elems); j++ { + if i != j && elems[j] == other { + return false + } + } + } + } + return true +} + +type Comparison struct { + Decimals uint32 + LooseTime bool +} + +func (cmp *Comparison) closeDatetime(a, b time.Time, diff time.Duration) bool { + d := a.Sub(b) + if d < 0 { + d = -d + } + return d <= diff +} + +func (cmp *Comparison) closeFloat(a, b float64) bool { + const tolerance = 1e-14 + + if cmp.Decimals > 0 { + ratio := math.Pow(10, float64(cmp.Decimals)) + a = math.Round(a*ratio) / ratio + b = math.Round(b*ratio) / ratio + } + if a == b { + return true + } + if b == 0 { + return math.Abs(a) < tolerance + } + return math.Abs((a-b)/b) < tolerance +} + +func (cmp *Comparison) Equals(local, remote sqltypes.Value) (bool, error) { + switch { + case local.IsFloat() && remote.IsFloat(): + localFloat, err := local.ToFloat64() + if err != nil { + return false, fmt.Errorf("error converting local value to float: %w", err) + } + remoteFloat, err := remote.ToFloat64() + if err != nil { + return false, fmt.Errorf("error converting remote value to float: %w", err) + } + return cmp.closeFloat(localFloat, remoteFloat), nil + case cmp.LooseTime && local.IsDateTime() && remote.IsDateTime(): + localDatetime, _, ok := datetime.ParseDateTime(local.ToString(), -1) + if !ok { + return false, fmt.Errorf("error converting local value '%s' to datetime", local) + } + remoteDatetime, _, ok := datetime.ParseDateTime(remote.ToString(), -1) + if !ok { + return false, fmt.Errorf("error converting remote value '%s' to datetime", remote) + } + return cmp.closeDatetime(localDatetime.ToStdTime(time.Local), remoteDatetime.ToStdTime(time.Local), 1*time.Second), nil + case cmp.LooseTime && local.IsTime() && remote.IsTime(): + localTime, _, ok := datetime.ParseTime(local.ToString(), -1) + if !ok { + return false, fmt.Errorf("error converting local value '%s' to time", local) + } + remoteTime, _, ok := datetime.ParseTime(remote.ToString(), -1) + if !ok { + return false, fmt.Errorf("error converting remote value '%s' to time", remote) + } + return cmp.closeDatetime(localTime.ToStdTime(time.Local), remoteTime.ToStdTime(time.Local), 1*time.Second), nil + default: + return local.String() == remote.String(), nil + } +} diff --git a/go/vt/vtgate/evalengine/testcases/inputs.go b/go/vt/vtgate/evalengine/testcases/inputs.go index 067106df468..245318529c3 100644 --- a/go/vt/vtgate/evalengine/testcases/inputs.go +++ b/go/vt/vtgate/evalengine/testcases/inputs.go @@ -19,6 +19,8 @@ package testcases import ( "math" "strconv" + + "vitess.io/vitess/go/mysql/format" ) var inputJSONObjects = []string{ @@ -44,7 +46,7 @@ var inputJSONPrimitives = []string{ } var inputBitwise = []string{ - "0", "1", "0xFF", "255", "1.0", "1.1", "-1", "-255", "7", "9", "13", "1.5", "-1.5", + "0", "1", "0xFF", "255", "1.0", "1.1", "-1", "-255", "7", "9", "13", "1.5", "-1.5", "'1.5'", "'-1.5'", "0.0e0", "1.0e0", "255.0", "1.5e0", "-1.5e0", "1.1e0", "-1e0", "-255e0", "7e0", "9e0", "13e0", strconv.FormatUint(math.MaxUint64, 10), strconv.FormatUint(math.MaxInt64, 10), @@ -57,7 +59,25 @@ var inputBitwise = []string{ "64", "'64'", "_binary '64'", "X'40'", "_binary X'40'", } -var inputComparisonElement = []string{"NULL", "-1", "0", "1", +var radianInputs = []string{ + "0", + "1", + "-1", + "'1.5'", + "NULL", + "'ABC'", + "1.5e0", + "-1.5e0", + "9223372036854775810.4", + "-9223372036854775810.4", + string(format.FormatFloat(math.Pi)), + string(format.FormatFloat(math.MaxFloat64)), + string(format.FormatFloat(math.SmallestNonzeroFloat32)), + string(format.FormatFloat(math.SmallestNonzeroFloat64)), +} + +var inputComparisonElement = []string{ + "NULL", "-1", "0", "1", `'foo'`, `'bar'`, `'FOO'`, `'BAR'`, `'foo' collate utf8mb4_0900_as_cs`, `'FOO' collate utf8mb4_0900_as_cs`, @@ -66,16 +86,80 @@ var inputComparisonElement = []string{"NULL", "-1", "0", "1", } var inputConversions = []string{ - "0", "1", "255", + "0", "1", "255", "' 0 '", "' 1 '", "' 255 '", `'\t1foo\t'`, "' 255 foo'", "0.0e0", "1.0e0", "1.5e0", "-1.5e0", "1.1e0", "-1.1e0", "-1.7e0", - "0.0", "0.000", "1.5", "-1.5", "1.1", "1.7", "-1.1", "-1.7", + "0.0", "0.000", "1.5", "-1.5", "1.1", "1.7", "-1.1", "-1.7", "'1.5'", "'-1.5'", `'foobar'`, `_utf8 'foobar'`, `''`, `_binary 'foobar'`, `0x0`, `0x1`, `0xff`, `X'00'`, `X'01'`, `X'ff'`, "NULL", "true", "false", "0xFF666F6F626172FF", "0x666F6F626172FF", "0xFF666F6F626172", + "9223372036854775807", "-9223372036854775808", "18446744073709551615", "18446744073709540000e0", "-18446744073709540000e0", "JSON_OBJECT()", "JSON_ARRAY()", + "time '10:04:58'", "time '31:34:58'", "time '32:34:58'", "time '101:34:58'", "time '5 10:34:58'", "date '2000-01-01'", + "timestamp '2000-01-01 10:34:58'", "timestamp '2000-01-01 10:34:58.123456'", "timestamp '2000-01-01 10:34:58.978654'", + "20000101103458", "20000101103458.1234", "20000101103458.123456", "20000101", "103458", "103458.123456", + "'20000101103458'", "'20000101103458.1234'", "'20000101103458.123456'", "'20000101'", "'103458'", "'103458.123456'", + "'20000101103458foo'", "'20000101103458.1234foo'", "'20000101103458.123456foo'", "'20000101foo'", "'103458foo'", "'103458.123456foo'", + "time '-10:04:58'", "time '-31:34:58'", "time '-32:34:58'", + "time '-101:34:58'", "time '-5 10:34:58'", + "'10:04:58'", "'101:34:58'", "'5 10:34:58'", "'2000-01-01'", "'2000-01-01 12:34:58'", + "cast(0 as json)", "cast(1 as json)", + "cast(true as json)", "cast(false as json)", + "cast('{}' as json)", "cast('[]' as json)", + "cast('null' as json)", "cast('true' as json)", "cast('false' as json)", + // JSON numbers + "cast(1 as json)", "cast(2 as json)", "cast(1.1 as json)", "cast(-1.1 as json)", + "cast(9223372036854775807 as json)", "cast(18446744073709551615 as json)", + "cast('1' as json)", "cast('2' as json)", "cast('1.1' as json)", "cast('-1.1' as json)", + "cast('9223372036854775807' as json)", "cast('18446744073709551615' as json)", + // JSON strings + "cast('\"foo\"' as json)", "cast('\"bar\"' as json)", "cast('invalid' as json)", + // JSON binary values + "cast(_binary' \"foo\"' as json)", "cast(_binary '\"bar\"' as json)", + "cast(0xFF666F6F626172FF as json)", "cast(0x666F6F626172FF as json)", + "cast(0b01 as json)", "cast(0b001 as json)", + // JSON arrays + "cast('[\"a\"]' as json)", "cast('[\"ab\"]' as json)", + "cast('[\"ab\", \"cd\", \"ef\"]' as json)", "cast('[\"ab\", \"ef\"]' as json)", + // JSON objects + "cast('{\"a\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"a\": 1}' as json)", + "cast('{\"c\": 1, \"b\": 2}' as json)", "cast('{\"b\": 2, \"c\": 1}' as json)", + "cast(' \"b\": 2}' as json)", "cast('\"a\": 1' as json)", + // JSON date, datetime & time + "cast(date '2000-01-01' as json)", "cast(date '2000-01-02' as json)", + "cast(timestamp '2000-01-01 12:34:58' as json)", + "cast(time '12:34:56' as json)", "cast(time '12:34:58' as json)", "cast(time '5 12:34:58' as json)", +} + +var regexInputs = []string{ + "0", "1", "' 0 '", `'\t1foo\t'`, + `'foobar'`, `_utf8 'foobar'`, `''`, `_binary 'foobar'`, + `0x0`, `0x1`, `0xff`, + "NULL", "true", "false", + "0xFF666F6F626172FF", + "time '10:04:58'", "date '2000-01-01'", + "timestamp '2000-01-01 10:34:58'", + "cast(0 as json)", "cast(1 as json)", + "cast(true as json)", "cast(false as json)", + // JSON numbers + "cast(2 as json)", "cast(1.1 as json)", "cast(-1.1 as json)", + // JSON strings + "cast('\"foo\"' as json)", + // JSON binary values + "cast(_binary' \"foo\"' as json)", + "cast(0xFF666F6F626172FF as json)", + "cast(0b01 as json)", + // JSON arrays + "cast('[\"a\"]' as json)", + // JSON objects + "cast('{\"a\": 1, \"b\": 2}' as json)", +} + +var regexMatchStrings = []string{ + "NULL", + "'c'", "'i'", "'m'", "'n'", "'u'", "'cimnu'", "'cimnuunmic'", } const inputPi = "314159265358979323846264338327950288419716939937510582097494459" @@ -100,9 +184,9 @@ var inputStrings = []string{ "-9223372036854775808", "999999999999999999999999", "-999999999999999999999999", - "_latin1 X'ÂÄÌå'", "_binary 'Müller' ", "_utf8mb4 'abcABCÅå'", + "_latin1 0xFF", // TODO: support other multibyte encodings // "_dec8 'ÒòÅå'", // "_utf8mb3 'abcABCÅå'", @@ -110,3 +194,145 @@ var inputStrings = []string{ // "_utf32 'AabcÅå'", // "_ucs2 'AabcÅå'", } + +var inputConversionTypes = []string{ + "BINARY", "BINARY(1)", "BINARY(0)", "BINARY(16)", "BINARY(-1)", + "CHAR", "CHAR(1)", "CHAR(0)", "CHAR(16)", "CHAR(-1)", + "NCHAR", "NCHAR(1)", "NCHAR(0)", "NCHAR(16)", "NCHAR(-1)", + "DECIMAL", "DECIMAL(0, 4)", "DECIMAL(12, 0)", "DECIMAL(12, 4)", "DECIMAL(60)", "DECIMAL(60, 6)", + "DOUBLE", "REAL", + "SIGNED", "UNSIGNED", "SIGNED INTEGER", "UNSIGNED INTEGER", "JSON", + "DATE", "DATETIME", "TIME", "DATETIME(4)", "TIME(4)", "DATETIME(6)", "TIME(6)", +} + +var dateFormats = []struct { + c byte + expr string +}{ + {'a', "LEFT(DAYNAME(d),3)"}, + {'b', "LEFT(MONTHNAME(d),3)"}, + {'c', "MONTH(d)"}, + {'D', ""}, + {'d', "LPAD(DAYOFMONTH(d),0,2)"}, + {'e', "DAYOFMONTH(d)"}, + {'f', "LPAD(MICROSECOND(t),6,0)"}, + {'H', "LPAD(HOUR(t),2,0)"}, + {'h', ""}, + {'I', ""}, + {'i', "LPAD(MINUTE(t),2,0)"}, + {'j', ""}, + {'k', "HOUR(t)"}, + {'l', ""}, + {'M', "MONTHNAME(d)"}, + {'m', "LPAD(MONTH(d),2,0)"}, + {'p', ""}, + {'r', ""}, + {'S', "LPAD(SECOND(t),2,0)"}, + {'s', "LPAD(SECOND(t),2,0)"}, + {'T', ""}, + {'U', "LPAD(WEEK(d,0),2,0)"}, + {'u', "LPAD(WEEK(d,1),2,0)"}, + {'V', "RIGHT(YEARWEEK(d,2),2)"}, + {'v', "RIGHT(YEARWEEK(d,3),2)"}, + {'W', "DAYNAME(d)"}, + {'w', "DAYOFWEEK(d)-1"}, + {'X', "LEFT(YEARWEEK(d,2),4)"}, + {'x', "LEFT(YEARWEEK(d,3),4)"}, + {'Y', "YEAR(d)"}, + {'y', "RIGHT(YEAR(d),2)"}, + {'%', ""}, +} + +var inputTrimStrings = []string{ + "\" Å å\" ", + "NULL", + "\"\"", + "\"a\"", + "\"abc\"", + "'abca'", + "1", + "-1", + "0123", + "0xAACC", + "3.1415926", + "\" 中文测试\"", + "\"日本語テスト \"", + "\"한국어 시험\"", + "\" 😊😂🤢\r\t \"", + "'123'", + "9223372036854775807", + "-9223372036854775808", + "999999999999999999999999", + "-999999999999999999999999", + "_binary 'Müller\r\n' ", + "_utf8mb4 '\nabcABCÅå '", + // utf8mb4 version of the non-breaking space + "_utf8mb4 0xC2A078C2A0", + // latin1 version of the non-breaking space + "_latin1 0xA078A0", +} + +var ipInputs = []string{ + "NULL", + "'10.0.5.9'", + "'10.0.5.256'", + "'fdfe::5a55:caff:fefa:9089'", + "'::ffff:10.0.5.9'", + "'::10.0.5.9'", + "'198.51.100.1'", + "'::c0a8:0001'", + "'::c0a8:1'", + "'::ffff:198.51.100.1'", + "'::ffff:c0a8:0001'", + "'::ffff:c0a8:1'", + "'::'", + "'::1'", + "'::ff'", + "'::ffff'", + "'::1:ffff'", + "'127.0.0.1'", + "'::ffff:'", + "'foobar'", + "167773449", + strconv.FormatInt(math.MinInt32, 10), + strconv.FormatInt(math.MinInt32-1, 10), + strconv.FormatUint(math.MaxInt32, 10), + strconv.FormatUint(math.MaxInt32+1, 10), + strconv.FormatUint(math.MaxUint32, 10), + strconv.FormatUint(math.MaxUint32+1, 10), + "0x0000000000000000000000000A000509", +} + +var uuidInputs = []string{ + "NULL", + "'foobar'", + "''", + "'09db81f6-f266-11ed-a6f9-20fc8fd6830e'", + "'09db81f6f26611eda6f920fc8fd6830e'", + "'{09db81f6-f266-11ed-a6f9-20fc8fd6830e}'", + "0x0000000000000000000000000A000509", + "0x09DB81F6F26611EDA6F920FC8FD6830E", + "0x11EDF26609DB81F6A6F920FC8FD6830E", +} + +var inputIntervals = []string{"day", + "week", + "month", + "year", + "day_hour", + "day_microsecond", + "day_minute", + "day_second", + "hour", + "hour_microsecond", + "hour_minute", + "hour_second", + "microsecond", + "minute", + "minute_microsecond", + "minute_second", + "quarter", + "second", + "second_microsecond", + "year_month", +} diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go index 562dc920c21..3af97a183e3 100644 --- a/go/vt/vtgate/evalengine/translate.go +++ b/go/vt/vtgate/evalengine/translate.go @@ -22,19 +22,12 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" ) -type ( - TranslationLookup interface { - ColumnLookup(col *sqlparser.ColName) (int, error) - CollationForExpr(expr sqlparser.Expr) collations.ID - DefaultCollation() collations.ID - } -) - var ErrTranslateExprNotSupported = "expr cannot be translated, not supported" var ErrEvaluatedExprNotSupported = "expr cannot be evaluated, not supported" @@ -60,7 +53,6 @@ func (ast *astCompiler) translateComparisonExpr2(op sqlparser.ComparisonExprOper return &InExpr{ BinaryExpr: binaryExpr, Negate: op == sqlparser.NotInOp, - Hashed: nil, }, nil } @@ -83,6 +75,14 @@ func (ast *astCompiler) translateComparisonExpr2(op sqlparser.ComparisonExprOper return &LikeExpr{BinaryExpr: binaryExpr}, nil case sqlparser.NotLikeOp: return &LikeExpr{BinaryExpr: binaryExpr, Negate: true}, nil + case sqlparser.RegexpOp, sqlparser.NotRegexpOp: + return &builtinRegexpLike{ + CallExpr: CallExpr{ + Arguments: []Expr{left, right}, + Method: "REGEXP_LIKE", + }, + Negate: op == sqlparser.NotRegexpOp, + }, nil default: return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, op.ToString()) } @@ -107,14 +107,14 @@ func (ast *astCompiler) translateLogicalExpr(opname string, left, right sqlparse return nil, err } - var logic func(l, r boolean) boolean + var logic func(l, r Expr, env *ExpressionEnv) (boolean, error) switch opname { case "AND": - logic = func(l, r boolean) boolean { return l.and(r) } + logic = func(l, r Expr, env *ExpressionEnv) (boolean, error) { return opAnd(l, r, env) } case "OR": - logic = func(l, r boolean) boolean { return l.or(r) } + logic = func(l, r Expr, env *ExpressionEnv) (boolean, error) { return opOr(l, r, env) } case "XOR": - logic = func(l, r boolean) boolean { return l.xor(r) } + logic = func(l, r Expr, env *ExpressionEnv) (boolean, error) { return opXor(l, r, env) } default: panic("unexpected logical operator") } @@ -129,6 +129,31 @@ func (ast *astCompiler) translateLogicalExpr(opname string, left, right sqlparse }, nil } +func (ast *astCompiler) translateIntervalExpr(needle sqlparser.Expr, haystack []sqlparser.Expr) (Expr, error) { + exprs := make([]Expr, 0, len(haystack)+1) + + expr, err := ast.translateExpr(needle) + if err != nil { + return nil, err + } + + exprs = append(exprs, expr) + for _, e := range haystack { + expr, err := ast.translateExpr(e) + if err != nil { + return nil, err + } + exprs = append(exprs, expr) + } + + return &IntervalExpr{ + CallExpr{ + Arguments: exprs, + Method: "INTERVAL", + }, + }, nil +} + func (ast *astCompiler) translateIsExpr(left sqlparser.Expr, op sqlparser.IsExprOperator) (Expr, error) { expr, err := ast.translateExpr(left) if err != nil { @@ -158,35 +183,58 @@ func (ast *astCompiler) translateIsExpr(left sqlparser.Expr, op sqlparser.IsExpr }, nil } -func (ast *astCompiler) getCollation(expr sqlparser.Expr) collations.TypedCollation { - collation := collations.TypedCollation{ - Coercibility: collations.CoerceCoercible, - Repertoire: collations.RepertoireUnicode, +func (ast *astCompiler) translateBindVar(arg *sqlparser.Argument) (Expr, error) { + bvar := NewBindVar(arg.Name, arg.Type, ast.cfg.Collation) + + if !bvar.typed() { + ast.untyped++ } - if ast.lookup != nil { - collation.Collation = ast.lookup.CollationForExpr(expr) - if collation.Collation == collations.Unknown { - collation.Collation = ast.lookup.DefaultCollation() - } - } else { - collation.Collation = collations.Default() + return bvar, nil +} + +func (ast *astCompiler) translateColOffset(col *sqlparser.Offset) (Expr, error) { + var typ sqltypes.Type = sqltypes.Unknown + var coll collations.ID + if ast.cfg.ResolveType != nil { + typ, coll, _ = ast.cfg.ResolveType(col.Original) } - return collation + if coll == collations.Unknown { + coll = ast.cfg.Collation + } + + column := NewColumn(col.V, typ, coll) + if !column.typed() { + ast.untyped++ + } + return column, nil } func (ast *astCompiler) translateColName(colname *sqlparser.ColName) (Expr, error) { - if ast.lookup == nil { - return nil, vterrors.Wrap(translateExprNotSupported(colname), "cannot lookup column") + if ast.cfg.ResolveColumn == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot lookup column '%s' (column access not supported here)", sqlparser.String(colname)) } - idx, err := ast.lookup.ColumnLookup(colname) + idx, err := ast.cfg.ResolveColumn(colname) if err != nil { return nil, err } - collation := ast.getCollation(colname) - return NewColumn(idx, collation), nil + var typ sqltypes.Type = sqltypes.Unknown + var coll collations.ID + if ast.cfg.ResolveType != nil { + typ, coll, _ = ast.cfg.ResolveType(colname) + } + if coll == collations.Unknown { + coll = ast.cfg.Collation + } + + column := NewColumn(idx, typ, coll) + + if !column.typed() { + ast.untyped++ + } + return column, nil } -func (ast *astCompiler) translateLiteral(lit *sqlparser.Literal) (*Literal, error) { +func translateLiteral(lit *sqlparser.Literal, collation collations.ID) (*Literal, error) { switch lit.Type { case sqlparser.IntVal: return NewLiteralIntegralFromBytes(lit.Bytes()) @@ -195,12 +243,13 @@ func (ast *astCompiler) translateLiteral(lit *sqlparser.Literal) (*Literal, erro case sqlparser.DecimalVal: return NewLiteralDecimalFromBytes(lit.Bytes()) case sqlparser.StrVal: - collation := ast.getCollation(lit) - return NewLiteralString(lit.Bytes(), collation), nil + return NewLiteralString(lit.Bytes(), defaultCoercionCollation(collation)), nil case sqlparser.HexNum: return NewLiteralBinaryFromHexNum(lit.Bytes()) case sqlparser.HexVal: return NewLiteralBinaryFromHex(lit.Bytes()) + case sqlparser.BitVal: + return NewLiteralBinaryFromBit(lit.Bytes()) case sqlparser.DateVal: return NewLiteralDateFromBytes(lit.Bytes()) case sqlparser.TimeVal: @@ -235,6 +284,10 @@ func (ast *astCompiler) translateBinaryExpr(binary *sqlparser.BinaryExpr) (Expr, return &ArithmeticExpr{BinaryExpr: binaryExpr, Op: &opArithMul{}}, nil case sqlparser.DivOp: return &ArithmeticExpr{BinaryExpr: binaryExpr, Op: &opArithDiv{}}, nil + case sqlparser.IntDivOp: + return &ArithmeticExpr{BinaryExpr: binaryExpr, Op: &opArithIntDiv{}}, nil + case sqlparser.ModOp: + return &ArithmeticExpr{BinaryExpr: binaryExpr, Op: &opArithMod{}}, nil case sqlparser.BitAndOp: return &BitwiseExpr{BinaryExpr: binaryExpr, Op: &opBitAnd{}}, nil case sqlparser.BitOrOp: @@ -272,13 +325,13 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (Ex return nil, err } coll := collations.Local().LookupByName(collate.Collation) - if coll == nil { + if coll == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown collation: '%s'", collate.Collation) } return &CollateExpr{ UnaryExpr: UnaryExpr{expr}, TypedCollation: collations.TypedCollation{ - Collation: coll.ID(), + Collation: coll, Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, @@ -296,10 +349,10 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer collation = collations.CollationBinaryID } else { defaultCollation := collations.Local().DefaultCollationForCharset(introduced.CharacterSet[1:]) - if defaultCollation == nil { + if defaultCollation == collations.Unknown { panic(fmt.Sprintf("unknown character set: %s", introduced.CharacterSet)) } - collation = defaultCollation.ID() + collation = defaultCollation } switch lit := expr.(type) { @@ -313,34 +366,34 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer return nil, err } } + return expr, nil case *BindVariable: - if lit.tuple { + if lit.Type == sqltypes.Tuple { panic("parser allowed introducer before tuple") } - switch collation { - case collations.CollationBinaryID: - lit.coerce = sqltypes.VarBinary - lit.col = collationBinary - default: - lit.coerce = sqltypes.VarChar - lit.col.Collation = collation - } + return &IntroducerExpr{ + UnaryExpr: UnaryExpr{expr}, + TypedCollation: collations.TypedCollation{ + Collation: collation, + Coercibility: collations.CoerceExplicit, + Repertoire: collations.RepertoireUnicode, + }, + }, nil default: panic("character set introducers are only supported for literals and arguments") } - return expr, nil } func (ast *astCompiler) translateIntegral(lit *sqlparser.Literal) (int, bool, error) { if lit == nil { return 0, false, nil } - literal, err := ast.translateLiteral(lit) + literal, err := translateLiteral(lit, ast.cfg.Collation) if err != nil { return 0, false, err } - return int(evalToNumeric(literal.inner).toUint64().u), true, nil + return int(evalToInt64(literal.inner).toUint64().u), true, nil } func (ast *astCompiler) translateUnaryExpr(unary *sqlparser.UnaryExpr) (Expr, error) { @@ -357,7 +410,7 @@ func (ast *astCompiler) translateUnaryExpr(unary *sqlparser.UnaryExpr) (Expr, er case sqlparser.TildaOp: return &BitwiseNotExpr{UnaryExpr: UnaryExpr{expr}}, nil case sqlparser.NStringOp: - return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8ID}, nil + return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8mb3ID}, nil default: return nil, translateExprNotSupported(unary) } @@ -443,22 +496,17 @@ func (ast *astCompiler) translateExpr(e sqlparser.Expr) (Expr, error) { case sqlparser.BoolVal: return NewLiteralBool(bool(node)), nil case *sqlparser.ColName: - ast.entities.columns++ return ast.translateColName(node) case *sqlparser.Offset: - ast.entities.columns++ - return NewColumn(node.V, ast.getCollation(node)), nil + return ast.translateColOffset(node) case *sqlparser.ComparisonExpr: return ast.translateComparisonExpr(node.Operator, node.Left, node.Right) - case sqlparser.Argument: - ast.entities.bvars++ - collation := ast.getCollation(e) - return NewBindVar(string(node), collation), nil + case *sqlparser.Argument: + return ast.translateBindVar(node) case sqlparser.ListArg: - ast.entities.bvars++ - return NewBindVarTuple(string(node)), nil + return NewBindVarTuple(string(node), ast.cfg.Collation), nil case *sqlparser.Literal: - return ast.translateLiteral(node) + return translateLiteral(node, ast.cfg.Collation) case *sqlparser.AndExpr: return ast.translateLogicalExpr("AND", node.Left, node.Right) case *sqlparser.OrExpr: @@ -477,6 +525,8 @@ func (ast *astCompiler) translateExpr(e sqlparser.Expr) (Expr, error) { return ast.translateCollateExpr(node) case *sqlparser.IntroducerExpr: return ast.translateIntroducerExpr(node) + case *sqlparser.IntervalFuncExpr: + return ast.translateIntervalExpr(node.Expr, node.Exprs) case *sqlparser.IsExpr: return ast.translateIsExpr(node.Left, node.Right) case sqlparser.Callable: @@ -495,15 +545,45 @@ func (ast *astCompiler) translateExpr(e sqlparser.Expr) (Expr, error) { } type astCompiler struct { - lookup TranslationLookup - entities struct { - columns int - bvars int - } + cfg *Config + untyped int } -func TranslateEx(e sqlparser.Expr, lookup TranslationLookup, simplify bool) (Expr, error) { - ast := astCompiler{lookup: lookup} +type ColumnResolver func(name *sqlparser.ColName) (int, error) +type TypeResolver func(expr sqlparser.Expr) (sqltypes.Type, collations.ID, bool) + +type OptimizationLevel int8 + +const ( + OptimizationLevelDefault OptimizationLevel = iota + OptimizationLevelSimplify + OptimizationLevelCompile + OptimizationLevelCompilerDebug + OptimizationLevelMax + OptimizationLevelNone OptimizationLevel = -1 +) + +type Config struct { + ResolveColumn ColumnResolver + ResolveType TypeResolver + + Collation collations.ID + Optimization OptimizationLevel + CompilerErr error +} + +func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { + if cfg == nil { + cfg = &Config{} + } + if cfg.Collation == collations.Unknown { + cfg.Collation = collations.Default() + } + if cfg.Optimization == OptimizationLevelDefault { + cfg.Optimization = OptimizationLevelSimplify + } + + ast := astCompiler{cfg: cfg} expr, err := ast.translateExpr(e) if err != nil { @@ -514,19 +594,43 @@ func TranslateEx(e sqlparser.Expr, lookup TranslationLookup, simplify bool) (Exp return nil, err } - if simplify { - var staticEnv ExpressionEnv - if lookup != nil { - staticEnv.DefaultCollation = lookup.DefaultCollation() - } else { - staticEnv.DefaultCollation = collations.Default() + if cfg.Optimization >= OptimizationLevelSimplify && cfg.Optimization != OptimizationLevelCompilerDebug { + staticEnv := EmptyExpressionEnv() + expr, err = simplifyExpr(staticEnv, expr) + } + + if cfg.Optimization >= OptimizationLevelCompile && ast.untyped == 0 { + comp := compiler{cfg: cfg} + var ct ctype + if ct, cfg.CompilerErr = comp.compile(expr); cfg.CompilerErr == nil { + expr = &CompiledExpr{code: comp.asm.ins, original: expr, stack: comp.asm.stack.max, typed: ct.Type} } - expr, err = simplifyExpr(&staticEnv, expr) } + return expr, err } -// Translate translates between AST expressions and executable expressions -func Translate(e sqlparser.Expr, lookup TranslationLookup) (Expr, error) { - return TranslateEx(e, lookup, true) +type FieldResolver []*querypb.Field + +func (fields FieldResolver) Column(col *sqlparser.ColName) (int, error) { + name := col.CompliantName() + for i, f := range fields { + if f.Name == name { + return i, nil + } + } + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unknown column: %q", sqlparser.String(col)) +} + +func (fields FieldResolver) Type(expr sqlparser.Expr) (sqltypes.Type, collations.ID, bool) { + switch expr := expr.(type) { + case *sqlparser.ColName: + name := expr.CompliantName() + for _, f := range fields { + if f.Name == name { + return f.Type, collations.ID(f.Charset), true + } + } + } + return sqltypes.Unknown, collations.Unknown, false } diff --git a/go/vt/vtgate/evalengine/translate_builtin.go b/go/vt/vtgate/evalengine/translate_builtin.go index 4228970ecbd..4a4c3f1d9d2 100644 --- a/go/vt/vtgate/evalengine/translate_builtin.go +++ b/go/vt/vtgate/evalengine/translate_builtin.go @@ -67,6 +67,8 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (Expr, error) return builtinIfNullRewrite(args) case "nullif": return builtinNullIfRewrite(args) + case "if": + return builtinIfRewrite(args) case "coalesce": if len(args) == 0 { return nil, argError(method) @@ -96,22 +98,183 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (Expr, error) if len(args) != 1 { return nil, argError(method) } - return &builtinHex{CallExpr: call}, nil + return &builtinHex{CallExpr: call, collate: ast.cfg.Collation}, nil + case "unhex": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinUnhex{CallExpr: call}, nil case "ceil", "ceiling": if len(args) != 1 { return nil, argError(method) } return &builtinCeil{CallExpr: call}, nil + case "floor": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinFloor{CallExpr: call}, nil + case "abs": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinAbs{CallExpr: call}, nil + case "pi": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinPi{CallExpr: call}, nil + case "acos": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinAcos{CallExpr: call}, nil + case "asin": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinAsin{CallExpr: call}, nil + case "atan": + switch len(args) { + case 1: + return &builtinAtan{CallExpr: call}, nil + case 2: + return &builtinAtan2{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "atan2": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinAtan2{CallExpr: call}, nil + case "cos": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinCos{CallExpr: call}, nil + case "cot": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinCot{CallExpr: call}, nil + case "sin": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSin{CallExpr: call}, nil + case "tan": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinTan{CallExpr: call}, nil + case "degrees": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinDegrees{CallExpr: call}, nil + case "radians": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinRadians{CallExpr: call}, nil + case "exp": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinExp{CallExpr: call}, nil + case "ln": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinLn{CallExpr: call}, nil + case "log": + switch len(args) { + case 1: + return &builtinLn{CallExpr: call}, nil + case 2: + return &builtinLog{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "log10": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinLog10{CallExpr: call}, nil + case "mod": + if len(args) != 2 { + return nil, argError(method) + } + return &ArithmeticExpr{ + BinaryExpr: BinaryExpr{ + Left: args[0], + Right: args[1], + }, + Op: &opArithMod{}, + }, nil + case "log2": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinLog2{CallExpr: call}, nil + case "pow", "power": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinPow{CallExpr: call}, nil + case "sign": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSign{CallExpr: call}, nil + case "sqrt": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSqrt{CallExpr: call}, nil + case "round": + switch len(args) { + case 1, 2: + return &builtinRound{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "truncate": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinTruncate{CallExpr: call}, nil + case "crc32": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinCrc32{CallExpr: call}, nil + case "conv": + if len(args) != 3 { + return nil, argError(method) + } + return &builtinConv{CallExpr: call, collate: ast.cfg.Collation}, nil + case "left", "right": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinLeftRight{CallExpr: call, collate: ast.cfg.Collation, left: method == "left"}, nil + case "lpad", "rpad": + if len(args) != 3 { + return nil, argError(method) + } + return &builtinPad{CallExpr: call, collate: ast.cfg.Collation, left: method == "lpad"}, nil case "lower", "lcase": if len(args) != 1 { return nil, argError(method) } - return &builtinChangeCase{CallExpr: call, upcase: false}, nil + return &builtinChangeCase{CallExpr: call, upcase: false, collate: ast.cfg.Collation}, nil case "upper", "ucase": if len(args) != 1 { return nil, argError(method) } - return &builtinChangeCase{CallExpr: call, upcase: true}, nil + return &builtinChangeCase{CallExpr: call, upcase: true, collate: ast.cfg.Collation}, nil case "char_length", "character_length": if len(args) != 1 { return nil, argError(method) @@ -132,11 +295,26 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (Expr, error) return nil, argError(method) } return &builtinASCII{CallExpr: call}, nil + case "ord": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinOrd{CallExpr: call, collate: ast.cfg.Collation}, nil case "repeat": if len(args) != 2 { return nil, argError(method) } - return &builtinRepeat{CallExpr: call}, nil + return &builtinRepeat{CallExpr: call, collate: ast.cfg.Collation}, nil + case "concat": + if len(args) < 1 { + return nil, argError(method) + } + return &builtinConcat{CallExpr: call, collate: ast.cfg.Collation}, nil + case "concat_ws": + if len(args) < 2 { + return nil, argError(method) + } + return &builtinConcatWs{CallExpr: call, collate: ast.cfg.Collation}, nil case "from_base64": if len(args) != 1 { return nil, argError(method) @@ -146,7 +324,7 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (Expr, error) if len(args) != 1 { return nil, argError(method) } - return &builtinToBase64{CallExpr: call}, nil + return &builtinToBase64{CallExpr: call, collate: ast.cfg.Collation}, nil case "json_depth": if len(args) != 1 { return nil, argError(method) @@ -159,6 +337,243 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (Expr, error) default: return nil, argError(method) } + case "curdate", "current_date": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinCurdate{CallExpr: call}, nil + case "utc_date": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinUtcDate{CallExpr: call}, nil + case "date_format": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinDateFormat{CallExpr: call, collate: ast.cfg.Collation}, nil + case "date": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinDate{CallExpr: call}, nil + case "dayofmonth", "day": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinDayOfMonth{CallExpr: call}, nil + case "dayofweek": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinDayOfWeek{CallExpr: call}, nil + case "dayofyear": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinDayOfYear{CallExpr: call}, nil + case "from_unixtime": + switch len(args) { + case 1, 2: + return &builtinFromUnixtime{CallExpr: call, collate: ast.cfg.Collation}, nil + default: + return nil, argError(method) + } + case "hour": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinHour{CallExpr: call}, nil + case "makedate": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinMakedate{CallExpr: call}, nil + case "maketime": + if len(args) != 3 { + return nil, argError(method) + } + return &builtinMaketime{CallExpr: call}, nil + case "microsecond": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinMicrosecond{CallExpr: call}, nil + case "minute": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinMinute{CallExpr: call}, nil + case "month": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinMonth{CallExpr: call}, nil + case "monthname": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinMonthName{CallExpr: call, collate: ast.cfg.Collation}, nil + case "quarter": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinQuarter{CallExpr: call}, nil + case "second": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSecond{CallExpr: call}, nil + case "time": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinTime{CallExpr: call}, nil + case "unix_timestamp": + switch len(args) { + case 0, 1: + return &builtinUnixTimestamp{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "week": + switch len(args) { + case 1, 2: + return &builtinWeek{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "weekday": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinWeekDay{CallExpr: call}, nil + case "weekofyear": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinWeekOfYear{CallExpr: call}, nil + case "year": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinYear{CallExpr: call}, nil + case "yearweek": + switch len(args) { + case 1, 2: + return &builtinYearWeek{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "inet_aton": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinInetAton{CallExpr: call}, nil + case "inet_ntoa": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinInetNtoa{CallExpr: call, collate: ast.cfg.Collation}, nil + case "inet6_aton": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinInet6Aton{CallExpr: call}, nil + case "inet6_ntoa": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinInet6Ntoa{CallExpr: call, collate: ast.cfg.Collation}, nil + case "is_ipv4": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinIsIPV4{CallExpr: call}, nil + case "is_ipv4_compat": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinIsIPV4Compat{CallExpr: call}, nil + case "is_ipv4_mapped": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinIsIPV4Mapped{CallExpr: call}, nil + case "is_ipv6": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinIsIPV6{CallExpr: call}, nil + case "bin_to_uuid": + switch len(args) { + case 1, 2: + return &builtinBinToUUID{CallExpr: call, collate: ast.cfg.Collation}, nil + default: + return nil, argError(method) + } + case "is_uuid": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinIsUUID{CallExpr: call}, nil + case "uuid": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinUUID{CallExpr: call}, nil + case "uuid_to_bin": + switch len(args) { + case 1, 2: + return &builtinUUIDToBin{CallExpr: call}, nil + default: + return nil, argError(method) + } + case "user", "current_user", "session_user", "system_user": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinUser{CallExpr: call}, nil + case "database", "schema": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinDatabase{CallExpr: call}, nil + case "version": + if len(args) != 0 { + return nil, argError(method) + } + return &builtinVersion{CallExpr: call}, nil + case "md5": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinMD5{CallExpr: call, collate: ast.cfg.Collation}, nil + case "random_bytes": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinRandomBytes{CallExpr: call}, nil + case "sha1", "sha": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSHA1{CallExpr: call, collate: ast.cfg.Collation}, nil + case "sha2": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinSHA2{CallExpr: call, collate: ast.cfg.Collation}, nil + case "convert_tz": + if len(args) != 3 { + return nil, argError(method) + } + return &builtinConvertTz{CallExpr: call}, nil + case "strcmp": + if len(args) != 2 { + return nil, argError(method) + } + return &builtinStrcmp{CallExpr: call, collate: ast.cfg.Collation}, nil default: return nil, translateExprNotSupported(fn) } @@ -179,7 +594,7 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (Expr, error) var ws builtinWeightString var err error - ws.String, err = ast.translateExpr(call.Expr) + ws.Expr, err = ast.translateExpr(call.Expr) if err != nil { return nil, err } @@ -279,6 +694,238 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (Expr, error) Method: "JSON_KEYS", }}, nil + case *sqlparser.CurTimeFuncExpr: + if call.Fsp > 6 { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision 12 specified for '%s'. Maximum is 6.", call.Name.String()) + } + + var cexpr = CallExpr{Arguments: nil, Method: call.Name.String()} + var utc, onlyTime bool + switch call.Name.Lowered() { + case "current_time", "curtime": + onlyTime = true + case "utc_time": + onlyTime = true + utc = true + case "utc_timestamp": + utc = true + case "sysdate": + return &builtinSysdate{ + CallExpr: cexpr, + prec: uint8(call.Fsp), + }, nil + } + return &builtinNow{ + CallExpr: cexpr, + utc: utc, + onlyTime: onlyTime, + prec: uint8(call.Fsp), + }, nil + + case *sqlparser.TrimFuncExpr: + var args []Expr + str, err := ast.translateExpr(call.StringArg) + if err != nil { + return nil, err + } + args = append(args, str) + if call.TrimArg != nil { + trim, err := ast.translateExpr(call.TrimArg) + if err != nil { + return nil, err + } + args = append(args, trim) + } + + var cexpr = CallExpr{Arguments: args, Method: call.TrimFuncType.ToString()} + return &builtinTrim{ + CallExpr: cexpr, + collate: ast.cfg.Collation, + trim: call.Type, + }, nil + + case *sqlparser.IntervalDateExpr: + var err error + args := make([]Expr, 2) + + args[0], err = ast.translateExpr(call.Date) + if err != nil { + return nil, err + } + args[1], err = ast.translateExpr(call.Interval) + if err != nil { + return nil, err + } + + cexpr := CallExpr{Arguments: args, Method: call.FnName()} + return &builtinDateMath{ + CallExpr: cexpr, + sub: call.IsSubtraction(), + unit: call.NormalizedUnit(), + collate: ast.cfg.Collation, + }, nil + + case *sqlparser.RegexpLikeExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpLike{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_LIKE"}, + Negate: false, + }, nil + + case *sqlparser.RegexpInstrExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.ReturnOption != nil { + returnOption, err := ast.translateExpr(call.ReturnOption) + if err != nil { + return nil, err + } + args = append(args, returnOption) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpInstr{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_INSTR"}, + }, nil + + case *sqlparser.RegexpSubstrExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpSubstr{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_SUBSTR"}, + }, nil + + case *sqlparser.RegexpReplaceExpr: + input, err := ast.translateExpr(call.Expr) + if err != nil { + return nil, err + } + + pattern, err := ast.translateExpr(call.Pattern) + if err != nil { + return nil, err + } + + repl, err := ast.translateExpr(call.Repl) + if err != nil { + return nil, err + } + + args := []Expr{input, pattern, repl} + + if call.Position != nil { + position, err := ast.translateExpr(call.Position) + if err != nil { + return nil, err + } + args = append(args, position) + } + + if call.Occurrence != nil { + occurrence, err := ast.translateExpr(call.Occurrence) + if err != nil { + return nil, err + } + args = append(args, occurrence) + } + + if call.MatchType != nil { + matchType, err := ast.translateExpr(call.MatchType) + if err != nil { + return nil, err + } + args = append(args, matchType) + } + + return &builtinRegexpReplace{ + CallExpr: CallExpr{Arguments: args, Method: "REGEXP_REPLACE"}, + }, nil default: return nil, translateExprNotSupported(call) } @@ -324,36 +971,55 @@ func builtinIfNullRewrite(args []Expr) (Expr, error) { if len(args) != 2 { return nil, argError("IFNULL") } - var result CaseExpr - result.cases = append(result.cases, WhenThen{ - when: &IsExpr{ - UnaryExpr: UnaryExpr{args[0]}, - Op: sqlparser.IsNullOp, - Check: func(e eval) bool { - return e == nil + return &CaseExpr{ + cases: []WhenThen{{ + when: &IsExpr{ + UnaryExpr: UnaryExpr{args[0]}, + Op: sqlparser.IsNullOp, + Check: func(e eval) bool { + return e == nil + }, }, - }, - then: args[1], - }) - result.Else = args[0] - return &result, nil + then: args[1], + }}, + Else: args[0], + }, nil } func builtinNullIfRewrite(args []Expr) (Expr, error) { if len(args) != 2 { return nil, argError("NULLIF") } - var result CaseExpr - result.cases = append(result.cases, WhenThen{ - when: &ComparisonExpr{ - BinaryExpr: BinaryExpr{ - Left: args[0], - Right: args[1], + return &CaseExpr{ + cases: []WhenThen{{ + when: &ComparisonExpr{ + BinaryExpr: BinaryExpr{ + Left: args[0], + Right: args[1], + }, + Op: compareEQ{}, }, - Op: compareEQ{}, - }, - then: NullExpr, - }) - result.Else = args[0] - return &result, nil + then: NullExpr, + }}, + Else: args[0], + }, nil +} + +func builtinIfRewrite(args []Expr) (Expr, error) { + if len(args) != 3 { + return nil, argError("IF") + } + return &CaseExpr{ + cases: []WhenThen{{ + when: &IsExpr{ + UnaryExpr: UnaryExpr{args[0]}, + Op: sqlparser.IsTrueOp, + Check: func(e eval) bool { + return evalIsTruthy(e) == boolTrue + }, + }, + then: args[1], + }}, + Else: args[2], + }, nil } diff --git a/go/vt/vtgate/evalengine/translate_card.go b/go/vt/vtgate/evalengine/translate_card.go index 650563daaea..c8bd04d1dcc 100644 --- a/go/vt/vtgate/evalengine/translate_card.go +++ b/go/vt/vtgate/evalengine/translate_card.go @@ -19,6 +19,7 @@ package evalengine import ( "fmt" + "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -30,7 +31,7 @@ func errCardinality(expected int) error { func (ast *astCompiler) cardinality(expr Expr) int { switch expr := expr.(type) { case *BindVariable: - if expr.tuple { + if expr.Type == sqltypes.Tuple { return -1 } return 1 @@ -130,10 +131,14 @@ func (ast *astCompiler) cardExpr(expr Expr) error { return ast.cardUnary(expr.Inner) case *CollateExpr: return ast.cardUnary(expr.Inner) + case *IntroducerExpr: + return ast.cardUnary(expr.Inner) case *IsExpr: return ast.cardUnary(expr.Inner) case *BitwiseNotExpr: return ast.cardUnary(expr.Inner) + case *NotExpr: + return ast.cardUnary(expr.Inner) case *ArithmeticExpr: return ast.cardBinary(expr.Left, expr.Right) case *LogicalExpr: @@ -162,7 +167,7 @@ func (ast *astCompiler) cardExpr(expr Expr) error { } } case *BindVariable: - if !r.tuple { + if r.Type != sqltypes.Tuple { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") } if left != 1 { diff --git a/go/vt/vtgate/evalengine/translate_convert.go b/go/vt/vtgate/evalengine/translate_convert.go index 847494780d2..5560315f8e2 100644 --- a/go/vt/vtgate/evalengine/translate_convert.go +++ b/go/vt/vtgate/evalengine/translate_convert.go @@ -20,27 +20,24 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/decimal" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine/internal/decimal" ) func (ast *astCompiler) binaryCollationForCollation(collation collations.ID) collations.ID { - binary := collation.Get() + binary := colldata.Lookup(collation) if binary == nil { return collations.Unknown } - binaryCollation := collations.Local().BinaryCollationForCharset(binary.Charset().Name()) - if binaryCollation == nil { - return collations.Unknown - } - return binaryCollation.ID() + return collations.Local().BinaryCollationForCharset(binary.Charset().Name()) } func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (collations.ID, error) { if charset == "" { - collation := ast.lookup.DefaultCollation() + collation := ast.cfg.Collation if binary { collation = ast.binaryCollationForCollation(collation) } @@ -50,11 +47,10 @@ func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (co return collation, nil } charset = strings.ToLower(charset) - collation := collations.Local().DefaultCollationForCharset(charset) - if collation == nil { + collationID := collations.Local().DefaultCollationForCharset(charset) + if collationID == collations.Unknown { return collations.Unknown, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown character set: '%s'", charset) } - collationID := collation.ID() if binary { collationID = ast.binaryCollationForCollation(collationID) if collationID == collations.Unknown { @@ -105,13 +101,13 @@ func (ast *astCompiler) translateConvertExpr(expr sqlparser.Expr, convertType *s convert.Scale, sqlparser.String(expr), decimal.MyMaxScale) } case "NCHAR": - convert.Collation = collations.CollationUtf8ID + convert.Collation = collations.CollationUtf8mb3ID case "CHAR": convert.Collation, err = ast.translateConvertCharset(convertType.Charset.Name, convertType.Charset.Binary) if err != nil { return nil, err } - case "BINARY", "DOUBLE", "REAL", "SIGNED", "SIGNED INTEGER", "UNSIGNED", "UNSIGNED INTEGER", "JSON": + case "BINARY", "DOUBLE", "REAL", "SIGNED", "SIGNED INTEGER", "UNSIGNED", "UNSIGNED INTEGER", "JSON", "TIME", "DATETIME", "DATE": // Supported types for conv expression default: // For unsupported types, we should return an error on translation instead of returning an error on runtime. diff --git a/go/vt/vtgate/evalengine/translate_simplify.go b/go/vt/vtgate/evalengine/translate_simplify.go index b76adfe96a0..7f2261b4790 100644 --- a/go/vt/vtgate/evalengine/translate_simplify.go +++ b/go/vt/vtgate/evalengine/translate_simplify.go @@ -16,11 +16,7 @@ limitations under the License. package evalengine -import ( - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vthash" -) +import "vitess.io/vitess/go/mysql/collations/colldata" func (expr *Literal) constant() bool { return true @@ -84,7 +80,7 @@ func (expr *LikeExpr) simplify(env *ExpressionEnv) error { if lit, ok := expr.Right.(*Literal); ok { if b, ok := lit.inner.(*evalBytes); ok && (b.isVarChar() || b.isBinary()) { expr.MatchCollation = b.col.Collation - coll := expr.MatchCollation.Get() + coll := colldata.Lookup(expr.MatchCollation) expr.Match = coll.Wildcard(b.bytes, 0, 0, 0) } } @@ -96,54 +92,10 @@ func (inexpr *InExpr) simplify(env *ExpressionEnv) error { return err } - tuple, ok := inexpr.Right.(TupleExpr) - if !ok { - return nil - } - - var ( - collation collations.ID - typ sqltypes.Type - optimize = true - ) - - for i, expr := range tuple { - if lit, ok := expr.(*Literal); ok { - thisColl := evalCollation(lit.inner).Collation - thisTyp := lit.inner.SQLType() - if i == 0 { - collation = thisColl - typ = thisTyp - continue - } - if collation == thisColl && typ == thisTyp { - continue - } - } - optimize = false - break + if err := inexpr.Right.simplify(env); err != nil { + return err } - if optimize { - inexpr.Hashed = make(map[vthash.Hash]int) - hasher := vthash.New() - for i, expr := range tuple { - lit := expr.(*Literal) - inner, ok := lit.inner.(hashable) - if !ok { - inexpr.Hashed = nil - break - } - - inner.Hash(&hasher) - hash := hasher.Sum128() - hasher.Reset() - - if _, found := inexpr.Hashed[hash]; !found { - inexpr.Hashed[hash] = i - } - } - } return nil } @@ -173,12 +125,12 @@ func (c *CallExpr) simplify(env *ExpressionEnv) error { } func (c *builtinWeightString) constant() bool { - return c.String.constant() + return c.Expr.constant() } func (c *builtinWeightString) simplify(env *ExpressionEnv) error { var err error - c.String, err = simplifyExpr(env, c.String) + c.Expr, err = simplifyExpr(env, c.Expr) return err } diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go index ea8c1488882..d9ce3812abb 100644 --- a/go/vt/vtgate/evalengine/translate_test.go +++ b/go/vt/vtgate/evalengine/translate_test.go @@ -17,9 +17,11 @@ limitations under the License. package evalengine import ( + "context" "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" @@ -85,8 +87,8 @@ func TestTranslateSimplification(t *testing.T) { {"coalesce(NULL, 2, NULL, 4)", ok("COALESCE(NULL, INT64(2), NULL, INT64(4))"), ok("INT64(2)")}, {"coalesce(NULL, NULL)", ok("COALESCE(NULL, NULL)"), ok("NULL")}, {"coalesce(NULL)", ok("COALESCE(NULL)"), ok("NULL")}, - {"weight_string('foobar')", ok(`WEIGHT_STRING(VARCHAR("foobar"))`), ok(`VARBINARY("\x00F\x00O\x00O\x00B\x00A\x00R")`)}, - {"weight_string('foobar' as char(12))", ok(`WEIGHT_STRING(VARCHAR("foobar") AS CHAR(12))`), ok(`VARBINARY("\x00F\x00O\x00O\x00B\x00A\x00R\x00 \x00 \x00 \x00 \x00 \x00 ")`)}, + {"weight_string('foobar')", ok(`WEIGHT_STRING(VARCHAR("foobar"))`), ok("VARBINARY(\"\\x1c\\xe5\\x1d\\xdd\\x1d\\xdd\\x1c`\\x1cG\\x1e3\")")}, + {"weight_string('foobar' as char(12))", ok(`WEIGHT_STRING(VARCHAR("foobar") AS CHAR(12))`), ok("VARBINARY(\"\\x1c\\xe5\\x1d\\xdd\\x1d\\xdd\\x1c`\\x1cG\\x1e3\")")}, {"case when 1 = 1 then 2 else 3 end", ok("CASE WHEN INT64(1) = INT64(1) THEN INT64(2) ELSE INT64(3)"), ok("INT64(2)")}, {"case when null then 2 when 12 = 4 then 'ohnoes' else 42 end", ok(`CASE WHEN NULL THEN INT64(2) WHEN INT64(12) = INT64(4) THEN VARCHAR("ohnoes") ELSE INT64(42)`), ok(`VARCHAR("42")`)}, {"convert('a', char(2) character set utf8mb4)", ok(`CONVERT(VARCHAR("a"), CHAR(2) CHARACTER SET utf8mb4_0900_ai_ci)`), ok(`VARCHAR("a")`)}, @@ -96,9 +98,9 @@ func TestTranslateSimplification(t *testing.T) { {"date'2022-10-03'", ok(`DATE("2022-10-03")`), ok(`DATE("2022-10-03")`)}, {"time'12:34:45'", ok(`TIME("12:34:45")`), ok(`TIME("12:34:45")`)}, {"timestamp'2022-10-03 12:34:45'", ok(`DATETIME("2022-10-03 12:34:45")`), ok(`DATETIME("2022-10-03 12:34:45")`)}, - {"date'2022'", err(`incorrect DATE value: '2022'`), err(`incorrect DATE value: '2022'`)}, - {"time'2022-10-03'", err(`incorrect TIME value: '2022-10-03'`), err(`incorrect TIME value: '2022-10-03'`)}, - {"timestamp'2022-10-03'", err(`incorrect DATETIME value: '2022-10-03'`), err(`incorrect DATETIME value: '2022-10-03'`)}, + {"date'2022'", err(`Incorrect DATE value: '2022'`), err(`Incorrect DATE value: '2022'`)}, + {"time'2022-10-03'", err(`Incorrect TIME value: '2022-10-03'`), err(`Incorrect TIME value: '2022-10-03'`)}, + {"timestamp'2022-10-03'", err(`Incorrect DATETIME value: '2022-10-03'`), err(`Incorrect DATETIME value: '2022-10-03'`)}, {"ifnull(12, 23)", ok(`CASE WHEN INT64(12) IS NULL THEN INT64(23) ELSE INT64(12)`), ok(`INT64(12)`)}, {"ifnull(null, 23)", ok(`CASE WHEN NULL IS NULL THEN INT64(23) ELSE NULL`), ok(`INT64(23)`)}, {"nullif(1, 1)", ok(`CASE WHEN INT64(1) = INT64(1) THEN NULL ELSE INT64(1)`), ok(`NULL`)}, @@ -106,8 +108,8 @@ func TestTranslateSimplification(t *testing.T) { {"12 between 5 and 20", ok("(INT64(12) >= INT64(5)) AND (INT64(12) <= INT64(20))"), ok(`INT64(1)`)}, {"12 not between 5 and 20", ok("(INT64(12) < INT64(5)) OR (INT64(12) > INT64(20))"), ok(`INT64(0)`)}, {"2 not between 5 and 20", ok("(INT64(2) < INT64(5)) OR (INT64(2) > INT64(20))"), ok(`INT64(1)`)}, - {"column0->\"$.c\"", ok("JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\"))"), ok("JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\"))")}, - {"column0->>\"$.c\"", ok("JSON_UNQUOTE(JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\")))"), ok("JSON_UNQUOTE(JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\")))")}, + {"json->\"$.c\"", ok("JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\"))"), ok("JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\"))")}, + {"json->>\"$.c\"", ok("JSON_UNQUOTE(JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\")))"), ok("JSON_UNQUOTE(JSON_EXTRACT([COLUMN 0], VARCHAR(\"$.c\")))")}, } for _, tc := range testCases { @@ -117,8 +119,18 @@ func TestTranslateSimplification(t *testing.T) { t.Fatal(err) } + fields := FieldResolver([]*querypb.Field{ + {Name: "json", Type: sqltypes.TypeJSON, Charset: collations.CollationUtf8mb4ID}, + }) + + cfg := &Config{ + ResolveColumn: fields.Column, + Collation: collations.Default(), + Optimization: OptimizationLevelNone, + } + astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - converted, err := TranslateEx(astExpr, &LookupIntegrationTest{45}, false) + converted, err := Translate(astExpr, cfg) if err != nil { if tc.converted.err == "" { t.Fatalf("failed to Convert (simplify=false): %v", err) @@ -130,7 +142,8 @@ func TestTranslateSimplification(t *testing.T) { } assert.Equal(t, tc.converted.literal, FormatExpr(converted)) - simplified, err := TranslateEx(astExpr, &LookupIntegrationTest{45}, true) + cfg.Optimization = OptimizationLevelSimplify + simplified, err := Translate(astExpr, cfg) if err != nil { if tc.simplified.err == "" { t.Fatalf("failed to Convert (simplify=true): %v", err) @@ -287,25 +300,24 @@ func TestEvaluate(t *testing.T) { stmt, err := sqlparser.Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, LookupDefaultCollation(45)) + sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) require.Nil(t, err) require.NotNil(t, sqltypesExpr) - env := EnvWithBindVars( - map[string]*querypb.BindVariable{ - "exp": sqltypes.Int64BindVariable(66), - "string_bind_variable": sqltypes.StringBindVariable("bar"), - "int32_bind_variable": sqltypes.Int32BindVariable(20), - "uint32_bind_variable": sqltypes.Uint32BindVariable(21), - "uint64_bind_variable": sqltypes.Uint64BindVariable(22), - "float_bind_variable": sqltypes.Float64BindVariable(2.2), - }, 0) + env := NewExpressionEnv(context.Background(), map[string]*querypb.BindVariable{ + "exp": sqltypes.Int64BindVariable(66), + "string_bind_variable": sqltypes.StringBindVariable("bar"), + "int32_bind_variable": sqltypes.Int32BindVariable(20), + "uint32_bind_variable": sqltypes.Uint32BindVariable(21), + "uint64_bind_variable": sqltypes.Uint64BindVariable(22), + "float_bind_variable": sqltypes.Float64BindVariable(2.2), + }, nil) // When r, err := env.Evaluate(sqltypesExpr) // Then require.NoError(t, err) - assert.Equal(t, test.expected, r.Value(), "expected %s", test.expected.String()) + assert.Equal(t, test.expected, r.Value(collations.Default()), "expected %s", test.expected.String()) }) } } @@ -333,7 +345,7 @@ func TestEvaluateTuple(t *testing.T) { stmt, err := sqlparser.Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, LookupDefaultCollation(45)) + sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) require.Nil(t, err) require.NotNil(t, sqltypesExpr) @@ -356,12 +368,6 @@ func TestTranslationFailures(t *testing.T) { expectedErr string }{ { - expression: "cast('2023-01-07 12:34:56' as date)", - expectedErr: "Unsupported type conversion: DATE", - }, { - expression: "cast('2023-01-07 12:34:56' as datetime(5))", - expectedErr: "Unsupported type conversion: DATETIME(5)", - }, { expression: "cast('3.4' as FLOAT)", expectedErr: "Unsupported type conversion: FLOAT", }, { @@ -376,7 +382,7 @@ func TestTranslationFailures(t *testing.T) { stmt, err := sqlparser.Parse("select " + testcase.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, LookupDefaultCollation(45)) + _, err = Translate(astExpr, &Config{Collation: collations.Default()}) require.EqualError(t, err, testcase.expectedErr) }) } @@ -412,7 +418,7 @@ func TestCardinalityWithBindVariables(t *testing.T) { } astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, LookupDefaultCollation(45)) + _, err = Translate(astExpr, &Config{Collation: collations.Default()}) return err }() diff --git a/go/vt/vtgate/evalengine/vm.go b/go/vt/vtgate/evalengine/vm.go new file mode 100644 index 00000000000..f86a7db0b1a --- /dev/null +++ b/go/vt/vtgate/evalengine/vm.go @@ -0,0 +1,111 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "errors" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vthash" +) + +var errDeoptimize = errors.New("de-optimize") + +type vmstate struct { + stack []eval + sp int + + arena Arena + hash vthash.Hasher + err error + + flags struct { + cmp int + null bool + } +} + +func Deoptimize(expr Expr) Expr { + switch expr := expr.(type) { + case *CompiledExpr: + return expr.original + default: + return expr + } +} + +type CompiledExpr struct { + code []frame + typed sqltypes.Type + stack int + original Expr +} + +func (p *CompiledExpr) eval(env *ExpressionEnv) (eval, error) { + return p.original.eval(env) +} + +func (p *CompiledExpr) typeof(*ExpressionEnv, []*querypb.Field) (sqltypes.Type, typeFlag) { + return p.typed, 0 +} + +func (p *CompiledExpr) format(buf *formatter, depth int) { + p.original.format(buf, depth) +} + +func (p *CompiledExpr) constant() bool { + return p.original.constant() +} + +func (p *CompiledExpr) simplify(env *ExpressionEnv) error { + // No-op + return nil +} + +func (p *CompiledExpr) compile(c *compiler) (ctype, error) { + panic("called compile() on already compiled Expr") +} + +var _ Expr = (*CompiledExpr)(nil) + +func (env *ExpressionEnv) EvaluateVM(p *CompiledExpr) (EvalResult, error) { + env.vm.arena.reset() + env.vm.sp = 0 + env.vm.err = nil + if len(env.vm.stack) < p.stack { + env.vm.stack = make([]eval, p.stack) + } + + code := p.code + ip := 0 + + for ip < len(code) { + ip += code[ip](env) + if env.vm.err != nil { + goto err + } + } + return EvalResult{env.vm.stack[env.vm.sp-1]}, nil + +err: + if env.vm.err == errDeoptimize { + e, err := p.original.eval(env) + return EvalResult{e}, err + } + return EvalResult{}, env.vm.err +} diff --git a/go/vt/vtgate/evalengine/weights.go b/go/vt/vtgate/evalengine/weights.go new file mode 100644 index 00000000000..08ec844f357 --- /dev/null +++ b/go/vt/vtgate/evalengine/weights.go @@ -0,0 +1,178 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "encoding/binary" + "math" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/json" + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// WeightString returns the weight string for a value. +// It appends to dst if an existing slice is given, otherwise it +// returns a new one. +// The returned boolean indicates whether the weight string is a +// fixed-width weight string, such as for fixed size integer values. +// Our WeightString implementation supports more types that MySQL +// externally communicates with the `WEIGHT_STRING` function, so that we +// can also use this to order / sort other types like Float and Decimal +// as well. +func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { + // We optimize here for the case where we already have the desired type. + // Otherwise, we fall back to the general evalengine conversion logic. + if v.Type() != coerceTo { + return fallbackWeightString(dst, v, coerceTo, col, length, precision) + } + + switch { + case sqltypes.IsNull(coerceTo): + return nil, true, nil + + case sqltypes.IsSigned(coerceTo): + i, err := v.ToInt64() + if err != nil { + return dst, false, err + } + raw := uint64(i) + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil + + case sqltypes.IsUnsigned(coerceTo): + u, err := v.ToUint64() + if err != nil { + return dst, false, err + } + return binary.BigEndian.AppendUint64(dst, u), true, nil + + case sqltypes.IsFloat(coerceTo): + f, err := v.ToFloat64() + if err != nil { + return dst, false, err + } + + raw := math.Float64bits(f) + if math.Signbit(f) { + raw = ^raw + } else { + raw = raw ^ (1 << 63) + } + return binary.BigEndian.AppendUint64(dst, raw), true, nil + + case sqltypes.IsBinary(coerceTo): + b := v.Raw() + if length != 0 { + if length > cap(b) { + b = append(b, make([]byte, length-len(b))...) + } else { + b = b[:length] + } + } + return append(dst, b...), false, nil + + case sqltypes.IsText(coerceTo): + coll := colldata.Lookup(col) + if coll == nil { + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot hash unsupported collation") + } + b := v.Raw() + if length != 0 { + b = charset.Slice(coll.Charset(), b, 0, length) + } + return coll.WeightString(dst, b, length), false, nil + + case sqltypes.IsDecimal(coerceTo): + dec, err := decimal.NewFromMySQL(v.Raw()) + if err != nil { + return dst, false, err + } + return dec.WeightString(dst, int32(length), int32(precision)), true, nil + case coerceTo == sqltypes.TypeJSON: + j, err := json.NewFromSQL(v) + if err != nil { + return dst, false, err + } + return j.WeightString(dst), false, nil + default: + return fallbackWeightString(dst, v, coerceTo, col, length, precision) + } +} + +func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { + e, err := valueToEvalCast(v, coerceTo, col) + if err != nil { + return dst, false, err + } + return evalWeightString(dst, e, length, precision) +} + +func evalWeightString(dst []byte, e eval, length, precision int) ([]byte, bool, error) { + switch e := e.(type) { + case nil: + return nil, true, nil + case *evalInt64: + raw := uint64(e.i) + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil + case *evalUint64: + return binary.BigEndian.AppendUint64(dst, e.u), true, nil + case *evalFloat: + raw := math.Float64bits(e.f) + if math.Signbit(e.f) { + raw = ^raw + } else { + raw = raw ^ (1 << 63) + } + return binary.BigEndian.AppendUint64(dst, raw), true, nil + case *evalDecimal: + return e.dec.WeightString(dst, int32(length), int32(precision)), true, nil + case *evalBytes: + if e.isBinary() { + b := e.bytes + if length != 0 { + if length > cap(b) { + b = append(b, make([]byte, length-len(b))...) + } else { + b = b[:length] + } + } + return append(dst, b...), false, nil + } + coll := colldata.Lookup(e.col.Collation) + if coll == nil { + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot hash unsupported collation") + } + b := e.bytes + if length != 0 { + b = charset.Slice(coll.Charset(), b, 0, length) + } + return coll.WeightString(dst, b, length), false, nil + case *evalTemporal: + return e.dt.WeightString(dst), true, nil + case *evalJSON: + return e.WeightString(dst), false, nil + } + + return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", e.SQLType()) +} diff --git a/go/vt/vtgate/evalengine/weights_test.go b/go/vt/vtgate/evalengine/weights_test.go new file mode 100644 index 00000000000..50a1d91f20c --- /dev/null +++ b/go/vt/vtgate/evalengine/weights_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "fmt" + "slices" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" +) + +func TestWeightStrings(t *testing.T) { + const Length = 1000 + + type item struct { + value sqltypes.Value + weight string + } + + var cases = []struct { + name string + gen func() sqltypes.Value + types []sqltypes.Type + col collations.ID + len int + prec int + }{ + {name: "int64", gen: sqltypes.RandomGenerators[sqltypes.Int64], types: []sqltypes.Type{sqltypes.Int64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "uint64", gen: sqltypes.RandomGenerators[sqltypes.Uint64], types: []sqltypes.Type{sqltypes.Uint64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "float64", gen: sqltypes.RandomGenerators[sqltypes.Float64], types: []sqltypes.Type{sqltypes.Float64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "varchar", gen: sqltypes.RandomGenerators[sqltypes.VarChar], types: []sqltypes.Type{sqltypes.VarChar, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationUtf8mb4ID}, + {name: "varbinary", gen: sqltypes.RandomGenerators[sqltypes.VarBinary], types: []sqltypes.Type{sqltypes.VarBinary, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "decimal", gen: sqltypes.RandomGenerators[sqltypes.Decimal], types: []sqltypes.Type{sqltypes.Decimal, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID, len: 20, prec: 10}, + {name: "json", gen: sqltypes.RandomGenerators[sqltypes.TypeJSON], types: []sqltypes.Type{sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "date", gen: sqltypes.RandomGenerators[sqltypes.Date], types: []sqltypes.Type{sqltypes.Date, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "datetime", gen: sqltypes.RandomGenerators[sqltypes.Datetime], types: []sqltypes.Type{sqltypes.Datetime, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "timestamp", gen: sqltypes.RandomGenerators[sqltypes.Timestamp], types: []sqltypes.Type{sqltypes.Timestamp, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "time", gen: sqltypes.RandomGenerators[sqltypes.Time], types: []sqltypes.Type{sqltypes.Time, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + } + + for _, tc := range cases { + for _, typ := range tc.types { + t.Run(fmt.Sprintf("%s/%v", tc.name, typ), func(t *testing.T) { + items := make([]item, 0, Length) + for i := 0; i < Length; i++ { + v := tc.gen() + w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec) + require.NoError(t, err) + + items = append(items, item{value: v, weight: string(w)}) + } + + slices.SortFunc(items, func(a, b item) int { + if a.weight < b.weight { + return -1 + } else if a.weight > b.weight { + return 1 + } else { + return 0 + } + }) + + for i := 0; i < Length-1; i++ { + a := items[i] + b := items[i+1] + + v1, err := valueToEvalCast(a.value, typ, tc.col) + require.NoError(t, err) + v2, err := valueToEvalCast(b.value, typ, tc.col) + require.NoError(t, err) + + cmp, err := evalCompareNullSafe(v1, v2) + require.NoError(t, err) + + if cmp > 0 { + t.Fatalf("expected %v [pos=%d] to come after %v [pos=%d]\nav = %v\nbv = %v", + a.value, i, b.value, i+1, + []byte(a.weight), []byte(b.weight), + ) + } + } + }) + } + } +} diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 6327b72acab..5b94183a950 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -19,10 +19,7 @@ package vtgate import ( "bytes" "context" - "crypto/sha256" - "encoding/hex" "encoding/json" - "errors" "fmt" "io" "net/http" @@ -33,9 +30,11 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/vthash" + "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" - "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" @@ -43,6 +42,11 @@ import ( "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" @@ -56,12 +60,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var ( @@ -104,9 +103,11 @@ type Executor struct { mu sync.Mutex vschema *vindexes.VSchema streamSize int - plans cache.Cache vschemaStats *VSchemaStats + plans *PlanCache + epoch atomic.Uint32 + normalize bool warnShardedOnly bool @@ -115,6 +116,9 @@ type Executor struct { // allowScatter will fail planning if set to false and a plan contains any scatter queries allowScatter bool + + // queryLogger is passed in for logging from this vtgate executor. + queryLogger *streamlog.StreamLogger[*logstats.LogStats] } var executorOnce sync.Once @@ -123,6 +127,15 @@ const pathQueryPlans = "/debug/query_plans" const pathScatterStats = "/debug/scatter_stats" const pathVSchema = "/debug/vschema" +type PlanCacheKey = theine.HashKey256 +type PlanCache = theine.Store[PlanCacheKey, *engine.Plan] + +func DefaultPlanCache() *PlanCache { + // when being endtoend tested, disable the doorkeeper to ensure reproducible results + doorkeeper := !servenv.TestingEndtoend + return theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, doorkeeper) +} + // NewExecutor creates a new Executor. func NewExecutor( ctx context.Context, @@ -131,7 +144,7 @@ func NewExecutor( resolver *Resolver, normalize, warnOnShardedOnly bool, streamSize int, - cacheCfg *cache.Config, + plans *PlanCache, schemaTracker SchemaInfo, noScatter bool, pv plancontext.PlannerVersion, @@ -142,13 +155,13 @@ func NewExecutor( resolver: resolver, scatterConn: resolver.scatterConn, txConn: resolver.scatterConn.txConn, - plans: cache.NewDefaultCacheImpl(cacheCfg), normalize: normalize, warnShardedOnly: warnOnShardedOnly, streamSize: streamSize, schemaTracker: schemaTracker, allowScatter: !noScatter, pv: pv, + plans: plans, } vschemaacl.Init() @@ -166,36 +179,36 @@ func NewExecutor( return int64(e.plans.Len()) }) stats.NewGaugeFunc("QueryPlanCacheSize", "Query plan cache size", func() int64 { - return e.plans.UsedCapacity() + return int64(e.plans.UsedCapacity()) }) stats.NewGaugeFunc("QueryPlanCacheCapacity", "Query plan cache capacity", func() int64 { - return e.plans.MaxCapacity() + return int64(e.plans.MaxCapacity()) }) stats.NewCounterFunc("QueryPlanCacheEvictions", "Query plan cache evictions", func() int64 { - return e.plans.Evictions() + return e.plans.Metrics.Evicted() }) stats.NewCounterFunc("QueryPlanCacheHits", "Query plan cache hits", func() int64 { - return e.plans.Hits() + return e.plans.Metrics.Hits() }) stats.NewCounterFunc("QueryPlanCacheMisses", "Query plan cache misses", func() int64 { - return e.plans.Misses() + return e.plans.Metrics.Hits() }) - http.Handle(pathQueryPlans, e) - http.Handle(pathScatterStats, e) - http.Handle(pathVSchema, e) + servenv.HTTPHandle(pathQueryPlans, e) + servenv.HTTPHandle(pathScatterStats, e) + servenv.HTTPHandle(pathVSchema, e) }) return e } // Execute executes a non-streaming query. -func (e *Executor) Execute(ctx context.Context, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) { +func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable) (result *sqltypes.Result, err error) { span, ctx := trace.NewSpan(ctx, "executor.Execute") span.Annotate("method", method) trace.AnnotateSQL(span, sqlparser.Preview(sql)) defer span.Finish() logStats := logstats.NewLogStats(ctx, method, sql, safeSession.GetSessionUUID(), bindVars) - stmtType, result, err := e.execute(ctx, safeSession, sql, bindVars, logStats) + stmtType, result, err := e.execute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats) logStats.Error = err if result == nil { saveSessionStats(safeSession, stmtType, 0, 0, 0, err) @@ -208,11 +221,12 @@ func (e *Executor) Execute(ctx context.Context, method string, safeSession *Safe if err != nil { piiSafeSQL = logStats.StmtType } - log.Warningf("%q exceeds warning threshold of max memory rows: %v", piiSafeSQL, warnMemoryRows) + log.Warningf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, len(result.Rows)) } logStats.SaveEndTime() - QueryLogger.Send(logStats) + e.queryLogger.Send(logStats) + err = vterrors.TruncateError(err, truncateErrorLen) return result, err } @@ -240,6 +254,7 @@ func (s *streaminResultReceiver) storeResultStats(typ sqlparser.StatementType, q // StreamExecute executes a streaming query. func (e *Executor) StreamExecute( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, method string, safeSession *SafeSession, sql string, @@ -330,7 +345,7 @@ func (e *Executor) StreamExecute( return err } - err = e.newExecute(ctx, safeSession, sql, bindVars, logStats, resultHandler, srr.storeResultStats) + err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, resultHandler, srr.storeResultStats) logStats.Error = err saveSessionStats(safeSession, srr.stmtType, srr.rowsAffected, srr.insertID, srr.rowsReturned, err) @@ -340,12 +355,12 @@ func (e *Executor) StreamExecute( if err != nil { piiSafeSQL = logStats.StmtType } - log.Warningf("%q exceeds warning threshold of max memory rows: %v", piiSafeSQL, warnMemoryRows) + log.Warningf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, srr.rowsReturned) } logStats.SaveEndTime() - QueryLogger.Send(logStats) - return err + e.queryLogger.Send(logStats) + return vterrors.TruncateError(err, truncateErrorLen) } @@ -377,11 +392,11 @@ func saveSessionStats(safeSession *SafeSession, stmtType sqlparser.StatementType } } -func (e *Executor) execute(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) (sqlparser.StatementType, *sqltypes.Result, error) { +func (e *Executor) execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) (sqlparser.StatementType, *sqltypes.Result, error) { var err error var qr *sqltypes.Result var stmtType sqlparser.StatementType - err = e.newExecute(ctx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error { + err = e.newExecute(ctx, mysqlCtx, safeSession, sql, bindVars, logStats, func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, time time.Time) error { stmtType = plan.Type qr, err = e.executePlan(ctx, safeSession, plan, vc, bindVars, logStats, time) return err @@ -395,7 +410,7 @@ func (e *Executor) execute(ctx context.Context, safeSession *SafeSession, sql st } // addNeededBindVars adds bind vars that are needed by the plan -func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindVars map[string]*querypb.BindVariable, session *SafeSession) error { +func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlparser.BindVarNeeds, bindVars map[string]*querypb.BindVariable, session *SafeSession) error { for _, funcName := range bindVarNeeds.NeedFunctionResult { switch funcName { case sqlparser.DBVarName: @@ -444,6 +459,8 @@ func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindV bindVars[key] = sqltypes.StringBindVariable(v) case sysvars.DDLStrategy.Name: bindVars[key] = sqltypes.StringBindVariable(session.DDLStrategy) + case sysvars.MigrationContext.Name: + bindVars[key] = sqltypes.StringBindVariable(session.MigrationContext) case sysvars.SessionUUID.Name: bindVars[key] = sqltypes.StringBindVariable(session.SessionUUID) case sysvars.SessionEnableSystemSettings.Name: @@ -489,7 +506,7 @@ func (e *Executor) addNeededBindVars(bindVarNeeds *sqlparser.BindVarNeeds, bindV if err != nil { return err } - bindVars[key] = sqltypes.ValueBindVariable(evaluated.Value()) + bindVars[key] = sqltypes.ValueBindVariable(evaluated.Value(vcursor.collation)) } } } @@ -623,6 +640,36 @@ func (e *Executor) executeSPInAllSessions(ctx context.Context, safeSession *Safe return qr, nil } +// handleKill executed the kill statement. +func (e *Executor) handleKill(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, stmt sqlparser.Statement, logStats *logstats.LogStats) (result *sqltypes.Result, err error) { + execStart := time.Now() + logStats.PlanTime = execStart.Sub(logStats.StartTime) + e.updateQueryCounts("Kill", "", "", 0) + defer func() { + logStats.ExecuteTime = time.Since(execStart) + }() + + if !allowKillStmt { + return nil, vterrors.VT07001("kill statement execution not permitted.") + } + + if mysqlCtx == nil { + return nil, vterrors.VT12001("kill statement works with access through mysql protocol") + } + + killStmt := stmt.(*sqlparser.Kill) + switch killStmt.Type { + case sqlparser.QueryType: + err = mysqlCtx.KillQuery(uint32(killStmt.ProcesslistID)) + default: + err = mysqlCtx.KillConnection(ctx, uint32(killStmt.ProcesslistID)) + } + if err != nil { + return nil, err + } + return &sqltypes.Result{}, nil +} + // CloseSession releases the current connection, which rollbacks open transactions and closes reserved connections. // It is called then the MySQL servers closes the connection to its client. func (e *Executor) CloseSession(ctx context.Context, safeSession *SafeSession) error { @@ -937,12 +984,19 @@ func (e *Executor) SaveVSchema(vschema *vindexes.VSchema, stats *VSchemaStats) { e.vschema = vschema } e.vschemaStats = stats - e.plans.Clear() + e.ClearPlans() if vschemaCounters != nil { vschemaCounters.Add("Reload", 1) } + if vindexUnknownParams != nil { + var unknownParams int + for _, ks := range stats.Keyspaces { + unknownParams += ks.VindexUnknownParamsCount + } + vindexUnknownParams.Set(int64(unknownParams)) + } } // ParseDestinationTarget parses destination target string and sets default keyspace if possible. @@ -964,92 +1018,119 @@ type iQueryOption interface { // getPlan computes the plan for the given query. If one is in // the cache, it reuses it. -func (e *Executor) getPlan(ctx context.Context, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, qo iQueryOption, logStats *logstats.LogStats) (*engine.Plan, sqlparser.Statement, error) { +func (e *Executor) getPlan( + ctx context.Context, + vcursor *vcursorImpl, + query string, + stmt sqlparser.Statement, + comments sqlparser.MarginComments, + bindVars map[string]*querypb.BindVariable, + reservedVars *sqlparser.ReservedVars, + allowParameterization bool, + logStats *logstats.LogStats, +) (*engine.Plan, error) { if e.VSchema() == nil { - return nil, nil, errors.New("vschema not initialized") + return nil, vterrors.VT13001("vschema not initialized") } - stmt, reserved, err := sqlparser.Parse2(sql) + vcursor.SetIgnoreMaxMemoryRows(sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt)) + vcursor.SetConsolidator(sqlparser.Consolidator(stmt)) + vcursor.SetWorkloadName(sqlparser.GetWorkloadNameFromStatement(stmt)) + priority, err := sqlparser.GetPriorityFromStatement(stmt) if err != nil { - return nil, nil, err - } - query := sql - statement := stmt - reservedVars := sqlparser.NewReservedVars("vtg", reserved) - bindVarNeeds := &sqlparser.BindVarNeeds{} - if !sqlparser.IgnoreMaxPayloadSizeDirective(statement) && !isValidPayloadSize(query) { - return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "query payload size above threshold") + return nil, err } - ignoreMaxMemoryRows := sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt) - vcursor.SetIgnoreMaxMemoryRows(ignoreMaxMemoryRows) - consolidator := sqlparser.Consolidator(stmt) - vcursor.SetConsolidator(consolidator) + vcursor.SetPriority(priority) setVarComment, err := prepareSetVarComment(vcursor, stmt) if err != nil { - return nil, nil, err + return nil, err } - // Normalize if possible and retry. - if e.canNormalizeStatement(stmt, qo, setVarComment) { - parameterize := e.normalize // the public flag is called normalize - result, err := sqlparser.PrepareAST( - stmt, - reservedVars, - bindVars, - parameterize, - vcursor.keyspace, - qo.getSelectLimit(), - setVarComment, - vcursor.safeSession.SystemVariables, - vcursor, - ) - if err != nil { - return nil, nil, err - } - statement = result.AST - bindVarNeeds = result.BindVarNeeds - query = sqlparser.String(statement) + + // Normalize if possible + shouldNormalize := e.canNormalizeStatement(stmt, setVarComment) + parameterize := allowParameterization && shouldNormalize + + rewriteASTResult, err := sqlparser.PrepareAST( + stmt, + reservedVars, + bindVars, + parameterize, + vcursor.keyspace, + vcursor.safeSession.getSelectLimit(), + setVarComment, + vcursor.safeSession.SystemVariables, + vcursor, + ) + if err != nil { + return nil, err + } + stmt = rewriteASTResult.AST + bindVarNeeds := rewriteASTResult.BindVarNeeds + if shouldNormalize { + query = sqlparser.String(stmt) } logStats.SQL = comments.Leading + query + comments.Trailing logStats.BindVariables = sqltypes.CopyBindVariables(bindVars) - return e.cacheAndBuildStatement(ctx, vcursor, query, statement, qo, logStats, stmt, reservedVars, bindVarNeeds) + return e.cacheAndBuildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds, logStats) } -func (e *Executor) cacheAndBuildStatement(ctx context.Context, vcursor *vcursorImpl, query string, statement sqlparser.Statement, qo iQueryOption, logStats *logstats.LogStats, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, bindVarNeeds *sqlparser.BindVarNeeds) (*engine.Plan, sqlparser.Statement, error) { - planHash := sha256.New() - _, _ = planHash.Write([]byte(vcursor.planPrefixKey(ctx))) - _, _ = planHash.Write([]byte{':'}) - _, _ = planHash.Write(hack.StringBytes(query)) - planKey := hex.EncodeToString(planHash.Sum(nil)) +func (e *Executor) hashPlan(ctx context.Context, vcursor *vcursorImpl, query string) PlanCacheKey { + hasher := vthash.New256() + vcursor.keyForPlan(ctx, query, hasher) - if sqlparser.CachePlan(statement) && qo.cachePlan() { - if plan, ok := e.plans.Get(planKey); ok { - logStats.CachedPlan = true - return plan.(*engine.Plan), stmt, nil - } - } + var planKey PlanCacheKey + hasher.Sum(planKey[:0]) + return planKey +} - plan, err := planbuilder.BuildFromStmt(query, statement, reservedVars, vcursor, bindVarNeeds, enableOnlineDDL, enableDirectDDL) +func (e *Executor) buildStatement( + ctx context.Context, + vcursor *vcursorImpl, + query string, + stmt sqlparser.Statement, + reservedVars *sqlparser.ReservedVars, + bindVarNeeds *sqlparser.BindVarNeeds, +) (*engine.Plan, error) { + plan, err := planbuilder.BuildFromStmt(ctx, query, stmt, reservedVars, vcursor, bindVarNeeds, enableOnlineDDL, enableDirectDDL) if err != nil { - return nil, nil, err + return nil, err } plan.Warnings = vcursor.warnings vcursor.warnings = nil err = e.checkThatPlanIsValid(stmt, plan) - // Only cache the plan if it is valid (i.e. does not scatter) - if err == nil && qo.cachePlan() && sqlparser.CachePlan(statement) { - e.plans.Set(planKey, plan) + return plan, err +} + +func (e *Executor) cacheAndBuildStatement( + ctx context.Context, + vcursor *vcursorImpl, + query string, + stmt sqlparser.Statement, + reservedVars *sqlparser.ReservedVars, + bindVarNeeds *sqlparser.BindVarNeeds, + logStats *logstats.LogStats, +) (*engine.Plan, error) { + planCachable := sqlparser.CachePlan(stmt) && vcursor.safeSession.cachePlan() + if planCachable { + planKey := e.hashPlan(ctx, vcursor, query) + + var plan *engine.Plan + var err error + plan, logStats.CachedPlan, err = e.plans.GetOrLoad(planKey, e.epoch.Load(), func() (*engine.Plan, error) { + return e.buildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds) + }) + return plan, err } - return plan, stmt, err + return e.buildStatement(ctx, vcursor, query, stmt, reservedVars, bindVarNeeds) } -func (e *Executor) canNormalizeStatement(stmt sqlparser.Statement, qo iQueryOption, setVarComment string) bool { - return (e.normalize && sqlparser.CanNormalize(stmt)) || - sqlparser.MustRewriteAST(stmt, qo.getSelectLimit() > 0) || setVarComment != "" +func (e *Executor) canNormalizeStatement(stmt sqlparser.Statement, setVarComment string) bool { + return sqlparser.CanNormalize(stmt) || setVarComment != "" } func prepareSetVarComment(vcursor *vcursorImpl, stmt sqlparser.Statement) (string, error) { @@ -1080,27 +1161,10 @@ func prepareSetVarComment(vcursor *vcursorImpl, stmt sqlparser.Statement) (strin return strings.TrimSpace(res.String()), nil } -func (e *Executor) debugGetPlan(planKey string) (*engine.Plan, bool) { - planHash := sha256.Sum256([]byte(planKey)) - planHex := hex.EncodeToString(planHash[:]) - if plan, ok := e.plans.Get(planHex); ok { - return plan.(*engine.Plan), true - } - return nil, false -} - -type cacheItem struct { - Key string - Value *engine.Plan -} - -func (e *Executor) debugCacheEntries() (items []cacheItem) { - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) - items = append(items, cacheItem{ - Key: plan.Original, - Value: plan, - }) +func (e *Executor) debugCacheEntries() (items map[string]*engine.Plan) { + items = make(map[string]*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { + items[plan.Original] = plan return true }) return @@ -1138,10 +1202,20 @@ func returnAsJSON(response http.ResponseWriter, stuff any) { } // Plans returns the LRU plan cache -func (e *Executor) Plans() cache.Cache { +func (e *Executor) Plans() *PlanCache { return e.plans } +func (e *Executor) ForEachPlan(each func(plan *engine.Plan) bool) { + e.plans.Range(e.epoch.Load(), func(_ PlanCacheKey, value *engine.Plan) bool { + return each(value) + }) +} + +func (e *Executor) ClearPlans() { + e.epoch.Add(1) +} + func (e *Executor) updateQueryCounts(planType, keyspace, tableName string, shardQueries int64) { queriesProcessed.Add(planType, 1) queriesRouted.Add(planType, shardQueries) @@ -1169,7 +1243,7 @@ func buildVarCharFields(names ...string) []*querypb.Field { fields[i] = &querypb.Field{ Name: v, Type: sqltypes.VarChar, - Charset: collations.CollationUtf8ID, + Charset: uint32(collations.SystemCollation.Collation), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), } } @@ -1210,9 +1284,9 @@ func (e *Executor) Prepare(ctx context.Context, method string, safeSession *Safe // it was a no-op record (i.e. didn't issue any queries) if !(logStats.StmtType == "ROLLBACK" && logStats.ShardQueries == 0) { logStats.SaveEndTime() - QueryLogger.Send(logStats) + e.queryLogger.Send(logStats) } - return fld, err + return fld, vterrors.TruncateError(err, truncateErrorLen) } func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) { @@ -1245,17 +1319,22 @@ func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql st case sqlparser.StmtSelect, sqlparser.StmtShow: return e.handlePrepare(ctx, safeSession, sql, bindVars, logStats) case sqlparser.StmtDDL, sqlparser.StmtBegin, sqlparser.StmtCommit, sqlparser.StmtRollback, sqlparser.StmtSet, sqlparser.StmtInsert, sqlparser.StmtReplace, sqlparser.StmtUpdate, sqlparser.StmtDelete, - sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtExplain, sqlparser.StmtFlush: + sqlparser.StmtUse, sqlparser.StmtOther, sqlparser.StmtComment, sqlparser.StmtExplain, sqlparser.StmtFlush, sqlparser.StmtKill: return nil, nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unrecognized prepare statement: %s", sql) } func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) { - // V3 mode. query, comments := sqlparser.SplitMarginComments(sql) vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) - plan, _, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats) + + stmt, reservedVars, err := parseAndValidateQuery(query) + if err != nil { + return nil, err + } + + plan, err := e.getPlan(ctx, vcursor, sql, stmt, comments, bindVars, reservedVars /* parameterize */, false, logStats) execStart := time.Now() logStats.PlanTime = execStart.Sub(logStats.StartTime) @@ -1264,7 +1343,7 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return nil, err } - err = e.addNeededBindVars(plan.BindVarNeeds, bindVars, safeSession) + err = e.addNeededBindVars(vcursor, plan.BindVarNeeds, bindVars, safeSession) if err != nil { logStats.Error = err return nil, err @@ -1285,6 +1364,17 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return qr.Fields, err } +func parseAndValidateQuery(query string) (sqlparser.Statement, *sqlparser.ReservedVars, error) { + stmt, reserved, err := sqlparser.Parse2(query) + if err != nil { + return nil, nil, err + } + if !sqlparser.IgnoreMaxPayloadSizeDirective(stmt) && !isValidPayloadSize(query) { + return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.NetPacketTooLarge, "query payload size above threshold") + } + return stmt, sqlparser.NewReservedVars("vtg", reserved), nil +} + // ExecuteMultiShard implements the IExecutor interface func (e *Executor) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool) (qr *sqltypes.Result, errs []error) { return e.scatterConn.ExecuteMultiShard(ctx, primitive, rss, queries, session, autocommit, ignoreMaxMemoryRows) @@ -1407,3 +1497,39 @@ func getTabletThrottlerStatus(tabletHostPort string) (string, error) { func (e *Executor) ReleaseLock(ctx context.Context, session *SafeSession) error { return e.txConn.ReleaseLock(ctx, session) } + +// planPrepareStmt implements the IExecutor interface +func (e *Executor) planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) { + stmt, reservedVars, err := parseAndValidateQuery(query) + if err != nil { + return nil, nil, err + } + + // creating this log stats to not interfere with the original log stats. + lStats := logstats.NewLogStats(ctx, "prepare", query, vcursor.safeSession.SessionUUID, nil) + plan, err := e.getPlan( + ctx, + vcursor, + query, + sqlparser.CloneStatement(stmt), + vcursor.marginComments, + map[string]*querypb.BindVariable{}, + reservedVars, /* normalize */ + false, + lStats, + ) + if err != nil { + return nil, nil, err + } + return plan, stmt, nil +} + +func (e *Executor) Close() { + e.scatterConn.Close() + topo, err := e.serv.GetTopoServer() + if err != nil { + panic(err) + } + topo.Close() + e.plans.Close() +} diff --git a/go/vt/vtgate/executor_ddl_test.go b/go/vt/vtgate/executor_ddl_test.go index 951e25f2896..b2502ab247a 100644 --- a/go/vt/vtgate/executor_ddl_test.go +++ b/go/vt/vtgate/executor_ddl_test.go @@ -26,7 +26,7 @@ import ( ) func TestDDLFlags(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) defer func() { enableOnlineDDL = true @@ -59,7 +59,7 @@ func TestDDLFlags(t *testing.T) { t.Run(fmt.Sprintf("%s-%v-%v", testcase.sql, testcase.enableDirectDDL, testcase.enableOnlineDDL), func(t *testing.T) { enableDirectDDL = testcase.enableDirectDDL enableOnlineDDL = testcase.enableOnlineDDL - _, err := executor.Execute(ctx, "TestDDLFlags", session, testcase.sql, nil) + _, err := executor.Execute(ctx, nil, "TestDDLFlags", session, testcase.sql, nil) if testcase.wantErr { require.EqualError(t, err, testcase.err) } else { diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 8a61c0e80d3..59fbe314346 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -22,29 +22,32 @@ import ( "strings" "testing" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/sqlparser" - _ "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + _ "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) func TestUpdateEqual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Update by primary vindex. - _, err := executorExec(executor, "update user set a=2 where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=2 where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 1", @@ -52,10 +55,10 @@ func TestUpdateEqual(t *testing.T) { }} assertQueries(t, sbc1, wantQueries) assertQueries(t, sbc2, nil) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update user set a=2 where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update `user` set a = 2 where id = 1", 1) sbc1.Queries = nil - _, err = executorExec(executor, "update user set a=2 where id = 3", nil) + _, err = executorExec(ctx, executor, session, "update user set a=2 where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 3", @@ -68,7 +71,7 @@ func TestUpdateEqual(t *testing.T) { sbc1.Queries = nil sbc2.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "update music set a=2 where id = 2", nil) + _, err = executorExec(ctx, executor, session, "update music set a=2 where id = 2", nil) require.NoError(t, err) vars, err := sqltypes.BuildBindVariable([]any{sqltypes.NewInt64(2)}) require.NoError(t, err) @@ -92,7 +95,7 @@ func TestUpdateEqual(t *testing.T) { ), }) - _, err = executorExec(executor, "update user2 set `name`='myname', lastname='mylastname' where id = 1", nil) + _, err = executorExec(ctx, executor, session, "update user2 set `name`='myname', lastname='mylastname' where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -130,10 +133,11 @@ func TestUpdateEqual(t *testing.T) { } func TestUpdateFromSubQuery(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + executor.pv = querypb.ExecuteOptions_Gen4 - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) fields := []*querypb.Field{ {Name: "count(*)", Type: sqltypes.Int64}, @@ -146,7 +150,10 @@ func TestUpdateFromSubQuery(t *testing.T) { }}) // Update by primary vindex, but first execute subquery - _, err := executorExec(executor, "update user set a=(select count(*) from user where id = 3) where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=(select count(*) from user where id = 3) where id = 1", nil) require.NoError(t, err) wantQueriesSbc1 := []*querypb.BoundQuery{{ Sql: "update `user` set a = :__sq1 where id = 1", @@ -160,7 +167,7 @@ func TestUpdateFromSubQuery(t *testing.T) { }} assertQueries(t, sbc1, wantQueriesSbc1) assertQueries(t, sbc2, wantQueriesSbc2) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update user set a=(select count(*) from user where id = 3) where id = 1", 2) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update `user` set a = (select count(*) from `user` where id = 3) where id = 1", 2) } func TestUpdateEqualWithNoVerifyAndWriteOnlyLookupUniqueVindexes(t *testing.T) { @@ -171,9 +178,12 @@ func TestUpdateEqualWithNoVerifyAndWriteOnlyLookupUniqueVindexes(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -222,10 +232,10 @@ func TestUpdateInTransactionLookupDefaultReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where nv_lu_col = 2", nil, @@ -284,10 +294,10 @@ func TestUpdateInTransactionLookupExclusiveReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where erl_lu_col = 2", nil, @@ -346,10 +356,10 @@ func TestUpdateInTransactionLookupSharedReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where srl_lu_col = 2", nil, @@ -408,10 +418,10 @@ func TestUpdateInTransactionLookupNoReadLock(t *testing.T) { ), "1|2|2|2|2|2|1|0", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - _, err := executorExecSession( + _, err := executorExecSession(ctx, executor, "update t2_lookup set lu_col = 5 where nrl_lu_col = 2", nil, @@ -522,7 +532,7 @@ func TestUpdateMultiOwned(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult( @@ -530,7 +540,10 @@ func TestUpdateMultiOwned(t *testing.T) { "1|10|20|30|40|50|60|0|0", ), }) - _, err := executorExec(executor, "update user set a=1, b=2, f=4, e=3 where id=1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=1, b=2, f=4, e=3 where id=1", nil) if err != nil { t.Fatal(err) } @@ -578,9 +591,12 @@ func TestUpdateMultiOwned(t *testing.T) { } func TestUpdateComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "update user set a=2 where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update `user` set a = 2 where id = 1 /* trailing */", @@ -591,13 +607,16 @@ func TestUpdateComments(t *testing.T) { } func TestUpdateNormalize(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - _, err := executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "/* leading */ update `user` set a = :a where id = :id /* trailing */", + Sql: "/* leading */ update `user` set a = :a /* INT64 */ where id = :id /* INT64 */ /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "a": sqltypes.TestBindVariable(int64(2)), "id": sqltypes.TestBindVariable(int64(1)), @@ -608,11 +627,11 @@ func TestUpdateNormalize(t *testing.T) { sbc1.Queries = nil // Force the query to go to the "wrong" shard and ensure that normalization still happens - primarySession.TargetString = "TestExecutor/40-60" - _, err = executorExec(executor, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) + session.TargetString = "TestExecutor/40-60" + _, err = executorExec(ctx, executor, session, "/* leading */ update user set a=2 where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "/* leading */ update `user` set a = :a where id = :id /* trailing */", + Sql: "/* leading */ update `user` set a = :a /* INT64 */ where id = :id /* INT64 */ /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "a": sqltypes.TestBindVariable(int64(2)), "id": sqltypes.TestBindVariable(int64(1)), @@ -621,11 +640,10 @@ func TestUpdateNormalize(t *testing.T) { assertQueries(t, sbc1, nil) assertQueries(t, sbc2, wantQueries) sbc2.Queries = nil - primarySession.TargetString = "" } func TestDeleteEqual(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -639,7 +657,10 @@ func TestDeleteEqual(t *testing.T) { sqltypes.NewVarChar("myname"), }}, }}) - _, err := executorExec(executor, "delete from user where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update", @@ -662,7 +683,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbc.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from user where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update", @@ -677,7 +698,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from music where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from music where id = 1", nil) require.NoError(t, err) vars, err := sqltypes.BuildBindVariable([]any{sqltypes.NewInt64(1)}) require.NoError(t, err) @@ -693,7 +714,7 @@ func TestDeleteEqual(t *testing.T) { sbc.Queries = nil sbclookup.Queries = nil sbclookup.SetResults([]*sqltypes.Result{{}}) - _, err = executorExec(executor, "delete from user_extra where user_id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user_extra where user_id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from user_extra where user_id = 1", @@ -709,7 +730,7 @@ func TestDeleteEqual(t *testing.T) { "1|1|foo", ), }) - _, err = executorExec(executor, "delete from user2 where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from user2 where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -738,8 +759,12 @@ func TestDeleteEqual(t *testing.T) { } func TestUpdateScatter(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - _, err := executorExec(executor, "update user_extra set col = 2", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update user_extra set col = 2", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -751,8 +776,12 @@ func TestUpdateScatter(t *testing.T) { } func TestDeleteScatter(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - _, err := executorExec(executor, "delete from user_extra", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user_extra", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -764,7 +793,7 @@ func TestDeleteScatter(t *testing.T) { } func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createExecutorEnv(t) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -779,7 +808,10 @@ func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { "1|2|2|2|2|2|1|0", )}) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -817,7 +849,7 @@ func TestUpdateEqualWithMultipleLookupVindex(t *testing.T) { } func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createExecutorEnv(t) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -834,7 +866,10 @@ func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) { "1|2|2|2|2|2|2|0", )}) - _, err := executorExec(executor, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -892,9 +927,12 @@ func TestDeleteEqualWithNoVerifyAndWriteOnlyLookupUniqueVindex(t *testing.T) { ), "1|1|1|1|1|1|1", )} - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, res) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, res) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -963,7 +1001,7 @@ func TestDeleteEqualWithNoVerifyAndWriteOnlyLookupUniqueVindex(t *testing.T) { } func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, nil) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -978,7 +1016,10 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { "1|1|1|1|1|1|1", )}) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1 and lu_col = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -1041,7 +1082,7 @@ func TestDeleteEqualWithMultipleLookupVindex(t *testing.T) { } func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { - executor, sbc1, sbc2, sbcLookup := createCustomExecutorSetValues(executorVSchema, nil) + executor, sbc1, sbc2, sbcLookup, ctx := createCustomExecutorSetValues(t, executorVSchema, nil) sbcLookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("lu_col|keyspace_id", "int64|varbinary"), @@ -1058,7 +1099,10 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { "1|1|1|1|1|1|2", )}) - _, err := executorExec(executor, "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -1158,9 +1202,12 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { } func TestDeleteByDestination(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - // This query is not supported in v3, so we know for sure is taking the DeleteByDestination route - _, err := executorExec(executor, "delete from `TestExecutor[-]`.user_extra limit 10", nil) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from `TestExecutor[-]`.user_extra limit 10", nil) require.NoError(t, err) // Queries get annotatted. wantQueries := []*querypb.BoundQuery{{ @@ -1172,7 +1219,7 @@ func TestDeleteByDestination(t *testing.T) { } func TestDeleteComments(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -1186,7 +1233,10 @@ func TestDeleteComments(t *testing.T) { sqltypes.NewVarChar("myname"), }}, }}) - _, err := executorExec(executor, "delete from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id, `name` from `user` where id = 1 for update /* trailing */", @@ -1208,12 +1258,15 @@ func TestDeleteComments(t *testing.T) { } func TestInsertSharded(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", @@ -1234,13 +1287,13 @@ func TestInsertSharded(t *testing.T) { }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(name, user_id) values(:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into user(id, v, name) values (1, 2, 'myname')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (1, 2, 'myname')", 1) sbc1.Queries = nil sbclookup.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (3, 2, 'myname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (3, 2, 'myname2')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0)", @@ -1260,12 +1313,12 @@ func TestInsertSharded(t *testing.T) { }, }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 2) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(name, user_id) values(:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into user(id, v, name) values (3, 2, 'myname2')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 2) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (3, 2, 'myname2')", 1) sbc1.Queries = nil - _, err = executorExec(executor, "insert into user2(id, name, lastname) values (2, 'myname', 'mylastname')", nil) + _, err = executorExec(ctx, executor, session, "insert into user2(id, name, lastname) values (2, 'myname', 'mylastname')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into user2(id, `name`, lastname) values (:_id_0, :_name_0, :_lastname_0)", @@ -1276,19 +1329,19 @@ func TestInsertSharded(t *testing.T) { }, }} assertQueries(t, sbc1, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_lastname_keyspace_id_map(name, lastname, keyspace_id) values(:name_0, :lastname_0, :keyspace_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into user2(id, name, lastname) values (2, 'myname', 'mylastname')", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_lastname_keyspace_id_map(`name`, lastname, keyspace_id) values (:name_0, :lastname_0, :keyspace_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname')", 1) // insert with binary values executor.normalize = true sbc1.Queries = nil sbc2.Queries = nil sbclookup.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (1, 2, _binary 'myname')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, _binary 'myname')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "insert into `user`(id, v, `name`) values (:_Id_0, :vtg2, :_name_0)", + Sql: "insert into `user`(id, v, `name`) values (:_Id_0, :vtg2 /* INT64 */, :_name_0)", BindVariables: map[string]*querypb.BindVariable{ "_Id_0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), @@ -1309,16 +1362,19 @@ func TestInsertSharded(t *testing.T) { }} assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (:vtg1, :vtg2, _binary :vtg3)", 1) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 3) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into `user`(id, v, `name`) values (:vtg1 /* INT64 */, :vtg2 /* INT64 */, _binary :vtg3 /* VARCHAR */)", 1) } func TestInsertShardedKeyrange(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) // If a unique vindex returns a keyrange, we fail the insert - _, err := executorExec(executor, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) require.EqualError(t, err, "could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)") } @@ -1382,9 +1438,9 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) - _, err := executorExecSession(executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) + _, err := executorExecSession(ctx, executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`, music) values (:_Id_0, 2, :_name_0, :_music_0)", @@ -1415,7 +1471,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } func TestInsertShardedIgnore(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) // Build the sequence of responses for sbclookup. This should // match the sequence of queries we validate below. @@ -1444,7 +1500,10 @@ func TestInsertShardedIgnore(t *testing.T) { // Fifth row: first shard. // Sixth row: second shard (because 3 hash maps to 40-60). query := "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1), (2, 2, 2), (3, 3, 1), (4, 4, 4), (5, 5, 1), (6, 6, 3)" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0),(:_pv_4, :_owned_4, :_verify_4)", @@ -1546,7 +1605,7 @@ func TestInsertShardedIgnore(t *testing.T) { {}, }) query = "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1)" - qr, err := executorExec(executor, query, nil) + qr, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) if !qr.Equal(&sqltypes.Result{}) { t.Errorf("qr: %v, want empty result", qr) @@ -1567,13 +1626,16 @@ func TestInsertShardedIgnore(t *testing.T) { func TestInsertOnDupKey(t *testing.T) { // This test just sanity checks that the statement is getting passed through // correctly. The full set of use cases are covered by TestInsertShardedIgnore. - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "1|1", )}) query := "insert into insert_ignore_test(pv, owned, verify) values (1, 1, 1) on duplicate key update col = 2" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0) on duplicate key update col = 2", @@ -1609,26 +1671,29 @@ func TestInsertOnDupKey(t *testing.T) { } func TestAutocommitFail(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) query := "insert into user (id) values (1)" sbc1.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 1 - primarySession.Reset() - primarySession.Autocommit = true - defer func() { - primarySession.Autocommit = false - }() - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + } + + _, err := executorExec(ctx, executor, session, query, nil) require.Error(t, err) // make sure we have closed and rolled back any transactions started - assert.False(t, primarySession.InTransaction, "left with tx open") + assert.False(t, session.InTransaction, "left with tx open") } func TestInsertComments(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname') /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 2, 'myname') /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 2, :_name_0) /* trailing */", @@ -1651,7 +1716,7 @@ func TestInsertComments(t *testing.T) { } func TestInsertGeneratorSharded(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -1660,7 +1725,10 @@ func TestInsertGeneratorSharded(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into user(v, `name`) values (2, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into user(v, `name`) values (2, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(v, `name`, id) values (2, :_name_0, :_Id_0)", @@ -1672,7 +1740,7 @@ func TestInsertGeneratorSharded(t *testing.T) { }} assertQueries(t, sbc, wantQueries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(1)}, }, { Sql: "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", @@ -1690,7 +1758,7 @@ func TestInsertGeneratorSharded(t *testing.T) { } func TestInsertAutoincSharded(t *testing.T) { - router, sbc, _, _ := createExecutorEnv() + router, sbc, _, _, ctx := createExecutorEnv(t) // Fake a mysql auto-inc response. wantResult := &sqltypes.Result{ @@ -1701,7 +1769,10 @@ func TestInsertAutoincSharded(t *testing.T) { InsertID: 2, } sbc.SetResults([]*sqltypes.Result{wantResult}) - result, err := executorExec(router, "insert into user_extra(user_id) values (2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, router, session, "insert into user_extra(user_id) values (2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into user_extra(user_id) values (:_user_id_0)", @@ -1713,15 +1784,18 @@ func TestInsertAutoincSharded(t *testing.T) { if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) } - assert.EqualValues(t, 2, primarySession.LastInsertId) + assert.EqualValues(t, 2, session.LastInsertId) } func TestInsertGeneratorUnsharded(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() - result, err := executorExec(executor, "insert into main1(id, name) values (null, 'myname')", nil) + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into main1(id, name) values (null, 'myname')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(1)}, }, { Sql: "insert into main1(id, `name`) values (:__seq0, 'myname')", @@ -1738,10 +1812,10 @@ func TestInsertGeneratorUnsharded(t *testing.T) { } func TestInsertAutoincUnsharded(t *testing.T) { - router, _, _, sbclookup := createExecutorEnv() + router, _, _, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := router.queryLogger.Subscribe("Test") + defer router.queryLogger.Unsubscribe(logChan) // Fake a mysql auto-inc response. query := "insert into `simple`(val) values ('val')" @@ -1754,7 +1828,10 @@ func TestInsertAutoincUnsharded(t *testing.T) { } sbclookup.SetResults([]*sqltypes.Result{wantResult}) - result, err := executorExec(router, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, router, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: query, @@ -1763,13 +1840,16 @@ func TestInsertAutoincUnsharded(t *testing.T) { assertQueries(t, sbclookup, wantQueries) assert.Equal(t, result, wantResult) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into `simple`(val) values ('val')", 1) + testQueryLog(t, router, logChan, "TestExecute", "INSERT", "insert into `simple`(val) values ('val')", 1) } func TestInsertLookupOwned(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into music(user_id, id) values (2, 3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music(user_id, id) values (2, 3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, id) values (:_user_id_0, :_id_0)", @@ -1791,7 +1871,7 @@ func TestInsertLookupOwned(t *testing.T) { } func TestInsertLookupOwnedGenerator(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -1800,7 +1880,10 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(user_id) values (2)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(user_id) values (2)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, id) values (:_user_id_0, :_id_0)", @@ -1812,7 +1895,7 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { }} assertQueries(t, sbc, wantQueries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(1)}, }, { Sql: "insert into music_user_map(music_id, user_id) values (:music_id_0, :user_id_0)", @@ -1830,9 +1913,12 @@ func TestInsertLookupOwnedGenerator(t *testing.T) { } func TestInsertLookupUnowned(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into music_extra(user_id, music_id) values (2, 3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music_extra(user_id, music_id) values (2, 3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music_extra(user_id, music_id) values (:_user_id_0, :_music_id_0)", @@ -1853,12 +1939,15 @@ func TestInsertLookupUnowned(t *testing.T) { } func TestInsertLookupUnownedUnsupplied(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "int64|varbinary"), "3|1", )}) - _, err := executorExec(executor, "insert into music_extra_reversed(music_id) values (3)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into music_extra_reversed(music_id) values (3)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music_extra_reversed(music_id, user_id) values (:_music_id_0, :_user_id_0)", @@ -1882,13 +1971,14 @@ func TestInsertLookupUnownedUnsupplied(t *testing.T) { // If a statement gets broken up into two, and the first one fails, // then an error should be returned normally. func TestInsertPartialFail1(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) // Make the first DML fail, there should be no rollback. sbclookup.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 _, err := executor.Execute( context.Background(), + nil, "TestExecute", NewSafeSession(&vtgatepb.Session{InTransaction: true}), "insert into user(id, v, name) values (1, 2, 'myname')", @@ -1901,7 +1991,7 @@ func TestInsertPartialFail1(t *testing.T) { // after successful execution of the first, then the transaction must // be rolled back due to partial execution. func TestInsertPartialFail2(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) // Make the second DML fail, it should result in a rollback. sbc1.MustFailExecute[sqlparser.StmtInsert] = 1 @@ -1909,6 +1999,7 @@ func TestInsertPartialFail2(t *testing.T) { safeSession := NewSafeSession(&vtgatepb.Session{InTransaction: true}) _, err := executor.Execute( context.Background(), + nil, "TestExecute", safeSession, "insert into user(id, v, name) values (1, 2, 'myname')", @@ -1940,9 +2031,12 @@ func TestInsertPartialFail2(t *testing.T) { } func TestMultiInsertSharded(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(3, 3, 'myname3')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 1, 'myname1'),(3, 3, 'myname3')", nil) require.NoError(t, err) wantQueries1 := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0)", @@ -1984,7 +2078,7 @@ func TestMultiInsertSharded(t *testing.T) { sbc1.Queries = nil sbclookup.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "insert into user(id, v, name) values (1, 1, 'myname1'),(2, 2, 'myname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user(id, v, name) values (1, 1, 'myname1'),(2, 2, 'myname2')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into `user`(id, v, `name`) values (:_Id_0, 1, :_name_0),(:_Id_1, 2, :_name_1)", @@ -2015,7 +2109,7 @@ func TestMultiInsertSharded(t *testing.T) { sbc1.Queries = nil sbclookup.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname'), (3, 'myname2', 'mylastname2')", nil) + _, err = executorExec(ctx, executor, session, "insert into user2(id, `name`, lastname) values (2, 'myname', 'mylastname'), (3, 'myname2', 'mylastname2')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into user2(id, `name`, lastname) values (:_id_0, :_name_0, :_lastname_0)", @@ -2044,7 +2138,7 @@ func TestMultiInsertSharded(t *testing.T) { } func TestMultiInsertGenerator(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -2053,7 +2147,10 @@ func TestMultiInsertGenerator(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(user_id, `name`) values (:u, 'myname1'),(:u, 'myname2')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(user_id, `name`) values (:u, 'myname1'),(:u, 'myname2')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(user_id, `name`, id) values (:_user_id_0, 'myname1', :_id_0),(:_user_id_1, 'myname2', :_id_1)", @@ -2069,7 +2166,7 @@ func TestMultiInsertGenerator(t *testing.T) { }} assertQueries(t, sbc, wantQueries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(2)}, }, { Sql: "insert into music_user_map(music_id, user_id) values (:music_id_0, :user_id_0), (:music_id_1, :user_id_1)", @@ -2089,7 +2186,7 @@ func TestMultiInsertGenerator(t *testing.T) { } func TestMultiInsertGeneratorSparse(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Rows: [][]sqltypes.Value{{ @@ -2098,7 +2195,10 @@ func TestMultiInsertGeneratorSparse(t *testing.T) { RowsAffected: 1, InsertID: 1, }}) - result, err := executorExec(executor, "insert into music(id, user_id, name) values (NULL, :u, 'myname1'),(2, :u, 'myname2'), (NULL, :u, 'myname3')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "insert into music(id, user_id, name) values (NULL, :u, 'myname1'),(2, :u, 'myname2'), (NULL, :u, 'myname3')", map[string]*querypb.BindVariable{"u": sqltypes.Int64BindVariable(2)}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into music(id, user_id, `name`) values (:_id_0, :_user_id_0, 'myname1'),(:_id_1, :_user_id_1, 'myname2'),(:_id_2, :_user_id_2, 'myname3')", @@ -2117,7 +2217,7 @@ func TestMultiInsertGeneratorSparse(t *testing.T) { }} assertQueries(t, sbc, wantQueries) wantQueries = []*querypb.BoundQuery{{ - Sql: "select next :n values from user_seq", + Sql: "select next :n /* INT64 */ values from user_seq", BindVariables: map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(2)}, }, { Sql: "insert into music_user_map(music_id, user_id) values (:music_id_0, :user_id_0), (:music_id_1, :user_id_1), (:music_id_2, :user_id_2)", @@ -2163,10 +2263,13 @@ func TestInsertBadAutoInc(t *testing.T) { } } ` - executor, _, _, _ := createCustomExecutor(vschema) + executor, _, _, _, ctx := createCustomExecutor(t, vschema) // If auto inc table cannot be found, the table should not be added to vschema. - _, err := executorExec(executor, "insert into bad_auto(v, name) values (1, 'myname')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into bad_auto(v, name) values (1, 'myname')", nil) want := "table bad_auto not found" if err == nil || err.Error() != want { t.Errorf("bad auto inc err: %v, want %v", err, want) @@ -2233,10 +2336,12 @@ func TestKeyDestRangeQuery(t *testing.T) { for _, tc := range tests { t.Run(tc.targetString+" - "+tc.inputQuery, func(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - primarySession.TargetString = tc.targetString - _, err := executorExec(executor, tc.inputQuery, nil) + session := &vtgatepb.Session{ + TargetString: tc.targetString, + } + _, err := executorExec(ctx, executor, session, tc.inputQuery, nil) require.NoError(t, err) if tc.expectedSbc1Query == "" { @@ -2254,13 +2359,13 @@ func TestKeyDestRangeQuery(t *testing.T) { } // it does not work for inserts - executor, _, _, _ := createExecutorEnv() - primarySession.TargetString = "TestExecutor[-]" - _, err := executorExec(executor, insertInput, nil) + executor, _, _, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "TestExecutor[-]", + } + _, err := executorExec(ctx, executor, session, insertInput, nil) require.EqualError(t, err, "VT03023: INSERT not supported when targeting a key range: TestExecutor[-]") - - primarySession.TargetString = "" } func assertQueriesContain(t *testing.T, sql, sbcName string, sbc *sandboxconn.SandboxConn) { @@ -2274,12 +2379,15 @@ func assertQueriesContain(t *testing.T, sql, sbcName string, sbc *sandboxconn.Sa // Prepared statement tests func TestUpdateEqualWithPrepare(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorPrepare(executor, "update music set a = :a0 where id = :id0", map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "update music set a = :a0 where id = :id0", map[string]*querypb.BindVariable{ "a0": sqltypes.Int64BindVariable(3), "id0": sqltypes.Int64BindVariable(2), }) @@ -2292,12 +2400,15 @@ func TestUpdateEqualWithPrepare(t *testing.T) { assertQueries(t, sbc1, nil) } func TestInsertShardedWithPrepare(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorPrepare(executor, "insert into user(id, v, name) values (:_Id0, 2, ':_name_0')", map[string]*querypb.BindVariable{ + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "insert into user(id, v, name) values (:_Id0, 2, ':_name_0')", map[string]*querypb.BindVariable{ "_Id0": sqltypes.Int64BindVariable(1), "_name_0": sqltypes.BytesBindVariable([]byte("myname")), "__seq0": sqltypes.Int64BindVariable(1), @@ -2313,8 +2424,12 @@ func TestInsertShardedWithPrepare(t *testing.T) { } func TestDeleteEqualWithPrepare(t *testing.T) { - executor, sbc, _, sbclookup := createExecutorEnv() - _, err := executorPrepare(executor, "delete from user where id = :id0", map[string]*querypb.BindVariable{ + executor, sbc, _, sbclookup, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, "delete from user where id = :id0", map[string]*querypb.BindVariable{ "id0": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -2327,15 +2442,19 @@ func TestDeleteEqualWithPrepare(t *testing.T) { } func TestUpdateLastInsertID(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) + executor.normalize = true sql := "update user set a = last_insert_id() where id = 1" - primarySession.LastInsertId = 43 - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 43, + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "update `user` set a = :__lastInsertId where id = :id", + Sql: "update `user` set a = :__lastInsertId where id = :id /* INT64 */", BindVariables: map[string]*querypb.BindVariable{ "__lastInsertId": sqltypes.Uint64BindVariable(43), "id": sqltypes.Int64BindVariable(1)}, @@ -2345,12 +2464,15 @@ func TestUpdateLastInsertID(t *testing.T) { } func TestUpdateReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "update zip_detail set status = 'CLOSED' where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "update zip_detail set status = 'CLOSED' where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1", @@ -2360,11 +2482,11 @@ func TestUpdateReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "UPDATE", "update zip_detail set status = 'CLOSED' where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update zip_detail set `status` = 'CLOSED' where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", nil) + _, err = executorExec(ctx, executor, session, "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "update zip_detail set `status` = 'CLOSED' where id = 1", @@ -2374,22 +2496,25 @@ func TestUpdateReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "UPDATE", - "update TestUnsharded.zip_detail set status = 'CLOSED' where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", + "update TestUnsharded.zip_detail set `status` = 'CLOSED' where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "update TestExecutor.zip_detail set status = 'CLOSED' where id = 1", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "update TestExecutor.zip_detail set status = 'CLOSED' where id = 1", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } func TestDeleteLookupOwnedEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("uniq_col|keyspace_id", "int64|varbinary"), "1|N±\u0090ɢú\u0016\u009C"), }) - _, err := executorExec(executor, "delete from t1 where unq_col = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from t1 where unq_col = 1", nil) require.NoError(t, err) tupleBindVar, _ := sqltypes.BuildBindVariable([]int64{1}) sbc1wantQueries := []*querypb.BoundQuery{{ @@ -2411,12 +2536,15 @@ func TestDeleteLookupOwnedEqual(t *testing.T) { } func TestDeleteReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "delete from zip_detail where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "delete from zip_detail where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "delete from zip_detail where id = 1", @@ -2426,11 +2554,11 @@ func TestDeleteReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from zip_detail where id = 1", nil) + _, err = executorExec(ctx, executor, session, "delete from zip_detail where id = 1", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from zip_detail where id = 1", @@ -2440,24 +2568,23 @@ func TestDeleteReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete from zip_detail where id = 1", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from TestExecutor.zip_detail where id = 1", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "delete from TestExecutor.zip_detail where id = 1", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } func TestReservedConnDML(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestReservedConnDML") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestReservedConnDML") + defer executor.queryLogger.Unsubscribe(logChan) - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true}) - _, err := executor.Execute(ctx, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil) + _, err := executor.Execute(ctx, nil, "TestReservedConnDML", session, "use "+KsTestUnsharded, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ @@ -2466,48 +2593,47 @@ func TestReservedConnDML(t *testing.T) { sbc.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int64"), "1"), }) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "set default_week_format = 1", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "set default_week_format = 1", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "begin", nil) require.NoError(t, err) wantQueries = append(wantQueries, &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}}, &querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}}) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "commit", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "commit", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "begin", nil) require.NoError(t, err) - sbc.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, "connection gone") + sbc.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSNetError, "connection gone") // as the first time the query fails due to connection loss i.e. reserved conn lost. It will be recreated to set statement will be executed again. wantQueries = append(wantQueries, &querypb.BoundQuery{Sql: "set default_week_format = 1", BindVariables: map[string]*querypb.BindVariable{}}, &querypb.BoundQuery{Sql: "insert into `simple`() values ()", BindVariables: map[string]*querypb.BindVariable{}}) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "insert into `simple`() values ()", nil) require.NoError(t, err) assertQueries(t, sbc, wantQueries) - _, err = executor.Execute(ctx, "TestReservedConnDML", session, "commit", nil) + _, err = executor.Execute(ctx, nil, "TestReservedConnDML", session, "commit", nil) require.NoError(t, err) } func TestStreamingDML(t *testing.T) { method := "TestStreamingDML" - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe(method) - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe(method) + defer executor.queryLogger.Unsubscribe(logChan) - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) tcases := []struct { @@ -2568,7 +2694,7 @@ func TestStreamingDML(t *testing.T) { for _, tcase := range tcases { sbc.Queries = nil sbc.SetResults([]*sqltypes.Result{tcase.result}) - err := executor.StreamExecute(ctx, method, session, tcase.query, nil, func(result *sqltypes.Result) error { + err := executor.StreamExecute(ctx, nil, method, session, tcase.query, nil, func(result *sqltypes.Result) error { qr = result return nil }) @@ -2587,16 +2713,16 @@ func TestStreamingDML(t *testing.T) { } func TestPartialVindexInsertQueryFailure(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) require.True(t, session.GetAutocommit()) require.False(t, session.InTransaction()) - _, err := executorExecSession(executor, "begin", nil, session.Session) + _, err := executorExecSession(ctx, executor, "begin", nil, session.Session) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction()) @@ -2621,7 +2747,7 @@ func TestPartialVindexInsertQueryFailure(t *testing.T) { BindVariables: map[string]*querypb.BindVariable{}, }} - _, err = executorExecSession(executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) require.Error(t, err) require.Contains(t, err.Error(), "reverted partial DML execution failure") require.True(t, session.GetAutocommit()) @@ -2633,17 +2759,17 @@ func TestPartialVindexInsertQueryFailure(t *testing.T) { wantQ[1].Sql = "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_1, :keyspace_id_1)" assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "VindexCreate", "SAVEPOINT_ROLLBACK", "rollback to x", 0) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) + testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "SAVEPOINT_ROLLBACK", "rollback to x", 0) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) } func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) require.True(t, session.GetAutocommit()) @@ -2663,7 +2789,7 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { }, }} - _, err := executorExecSession(executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) + _, err := executorExecSession(ctx, executor, "insert into t1(id, unq_col) values (1, 1), (2, 3)", nil, session.Session) require.Error(t, err) assert.Contains(t, err.Error(), "transaction rolled back to reverse changes of partial DML execution") assert.True(t, session.GetAutocommit()) @@ -2675,8 +2801,8 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { wantQ[0].Sql = "insert into t1_lkp_idx(unq_col, keyspace_id) values (:_unq_col_1, :keyspace_id_1)" assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values(:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1)", 2) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into t1_lkp_idx(unq_col, keyspace_id) values (:unq_col_0, :keyspace_id_0), (:unq_col_1, :keyspace_id_1)", 2) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into t1(id, unq_col) values (1, 1), (2, 3)", 0) } // TestMultiInternalSavepoint shows that the internal savepoint created for rolling back any partial dml changes on a failure is not removed from the savepoint list. @@ -2684,14 +2810,14 @@ func TestPartialVindexInsertQueryFailureAutoCommit(t *testing.T) { // The change for it cannot be done as the executor level and will be made at the VTGate entry point. // Test TestMultiInternalSavepointVtGate shows that it fixes the behaviour. func TestMultiInternalSavepoint(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) session := NewAutocommitSession(&vtgatepb.Session{}) - _, err := executorExecSession(executor, "begin", nil, session.Session) + _, err := executorExecSession(ctx, executor, "begin", nil, session.Session) require.NoError(t, err) // this query goes to multiple shards so internal savepoint will be created. - _, err = executorExecSession(executor, "insert into user_extra(user_id) values (1), (4)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into user_extra(user_id) values (1), (4)", nil, session.Session) require.NoError(t, err) wantQ := []*querypb.BoundQuery{{ @@ -2708,7 +2834,7 @@ func TestMultiInternalSavepoint(t *testing.T) { require.Len(t, sbc2.Queries, 0) sbc1.Queries = nil - _, err = executorExecSession(executor, "insert into user_extra(user_id) values (3), (6)", nil, session.Session) + _, err = executorExecSession(ctx, executor, "insert into user_extra(user_id) values (3), (6)", nil, session.Session) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "savepoint x", @@ -2732,15 +2858,18 @@ func TestMultiInternalSavepoint(t *testing.T) { } func TestInsertSelectFromDual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestInsertSelect") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestInsertSelect") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) query := "insert into user(id, v, name) select 1, 2, 'myname' from dual" wantQueries := []*querypb.BoundQuery{{ + Sql: "select 1, 2, 'myname' from dual lock in share mode", + BindVariables: map[string]*querypb.BindVariable{}, + }, { Sql: "insert into `user`(id, v, `name`) values (:_c0_0, :_c0_1, :_c0_2)", BindVariables: map[string]*querypb.BindVariable{ "_c0_0": sqltypes.Int64BindVariable(1), @@ -2758,37 +2887,42 @@ func TestInsertSelectFromDual(t *testing.T) { }} for _, workload := range []string{"olap", "oltp"} { - sbc1.Queries = nil - sbc2.Queries = nil - sbclookup.Queries = nil - wQuery := fmt.Sprintf("set @@workload = %s", workload) - _, err := executor.Execute(context.Background(), "TestInsertSelect", session, wQuery, nil) - require.NoError(t, err) + t.Run(workload, func(t *testing.T) { + sbc1.Queries = nil + sbc2.Queries = nil + sbclookup.Queries = nil + wQuery := fmt.Sprintf("set @@workload = %s", workload) + // set result for dual query. + sbc1.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields("1|2|myname", "int64|int64|varchar"), "1|2|myname")}) + + _, err := executor.Execute(context.Background(), nil, "TestInsertSelect", session, wQuery, nil) + require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestInsertSelect", session, query, nil) - require.NoError(t, err) + _, err = executor.Execute(context.Background(), nil, "TestInsertSelect", session, query, nil) + require.NoError(t, err) - assertQueries(t, sbc1, wantQueries) - assertQueries(t, sbc2, nil) - assertQueries(t, sbclookup, wantlkpQueries) + assertQueries(t, sbc1, wantQueries) + assertQueries(t, sbc2, nil) + assertQueries(t, sbclookup, wantlkpQueries) - testQueryLog(t, logChan, "TestInsertSelect", "SET", wQuery, 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(name, user_id) values(:name_0, :user_id_0)", 1) - testQueryLog(t, logChan, "TestInsertSelect", "INSERT", "insert into user(id, v, name) select 1, 2, 'myname' from dual", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "SET", wQuery, 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, v, `name`) select 1, 2, 'myname' from dual", 2) + }) } } func TestInsertSelectFromTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("TestInsertSelect") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("TestInsertSelect") + defer executor.queryLogger.Unsubscribe(logChan) session := NewAutocommitSession(&vtgatepb.Session{}) query := "insert into user(id, name) select c1, c2 from music" wantQueries := []*querypb.BoundQuery{{ - Sql: "select c1, c2 from music for update", + Sql: "select c1, c2 from music lock in share mode", BindVariables: map[string]*querypb.BindVariable{}, }, { Sql: "insert into `user`(id, `name`) values (:_c0_0, :_c0_1), (:_c1_0, :_c1_1), (:_c2_0, :_c2_1), (:_c3_0, :_c3_1), (:_c4_0, :_c4_1), (:_c5_0, :_c5_1), (:_c6_0, :_c6_1), (:_c7_0, :_c7_1)", @@ -2823,29 +2957,32 @@ func TestInsertSelectFromTable(t *testing.T) { sbc2.Queries = nil sbclookup.Queries = nil wQuery := fmt.Sprintf("set @@workload = %s", workload) - _, err := executor.Execute(context.Background(), "TestInsertSelect", session, wQuery, nil) + _, err := executor.Execute(context.Background(), nil, "TestInsertSelect", session, wQuery, nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestInsertSelect", session, query, nil) + _, err = executor.Execute(context.Background(), nil, "TestInsertSelect", session, query, nil) require.NoError(t, err) assertQueries(t, sbc1, wantQueries) assertQueries(t, sbc2, wantQueries[:1]) // select scatter query went scatter. assertQueries(t, sbclookup, wantlkpQueries) - testQueryLog(t, logChan, "TestInsertSelect", "SET", wQuery, 0) - testQueryLog(t, logChan, "VindexCreate", "INSERT", "insert into name_user_map(name, user_id) values(:name_0, :user_id_0), (:name_1, :user_id_1), (:name_2, :user_id_2), (:name_3, :user_id_3), (:name_4, :user_id_4), (:name_5, :user_id_5), (:name_6, :user_id_6), (:name_7, :user_id_7)", 1) - testQueryLog(t, logChan, "TestInsertSelect", "INSERT", "insert into user(id, name) select c1, c2 from music", 9) // 8 from select and 1 from insert. + testQueryLog(t, executor, logChan, "TestInsertSelect", "SET", wQuery, 0) + testQueryLog(t, executor, logChan, "VindexCreate", "INSERT", "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0), (:name_1, :user_id_1), (:name_2, :user_id_2), (:name_3, :user_id_3), (:name_4, :user_id_4), (:name_5, :user_id_5), (:name_6, :user_id_6), (:name_7, :user_id_7)", 1) + testQueryLog(t, executor, logChan, "TestInsertSelect", "INSERT", "insert into `user`(id, `name`) select c1, c2 from music", 9) // 8 from select and 1 from insert. } } func TestInsertReference(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "insert into zip_detail(id, status) values (1, 'CLOSED')", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "insert into zip_detail(id, status) values (1, 'CLOSED')", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')", @@ -2855,11 +2992,11 @@ func TestInsertReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "INSERT", "insert into zip_detail(id, status) values (1, 'CLOSED')", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", "insert into zip_detail(id, `status`) values (1, 'CLOSED')", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", nil) + _, err = executorExec(ctx, executor, session, "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into zip_detail(id, `status`) values (1, 'CLOSED')", @@ -2869,11 +3006,11 @@ func TestInsertReference(t *testing.T) { assertQueries(t, sbc2, nil) assertQueries(t, sbclookup, wantQueries) - testQueryLog(t, logChan, "TestExecute", "INSERT", - "insert into TestUnsharded.zip_detail(id, status) values (1, 'CLOSED')", 1) + testQueryLog(t, executor, logChan, "TestExecute", "INSERT", + "insert into TestUnsharded.zip_detail(id, `status`) values (1, 'CLOSED')", 1) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil) - require.Error(t, err) + _, err = executorExec(ctx, executor, session, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil) + require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 2ea2f83071e..107215d6f4d 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -25,25 +25,27 @@ import ( "strings" "testing" - "vitess.io/vitess/go/vt/vtgate/logstats" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/cache" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) //go:embed testdata/executorVSchema.json @@ -126,13 +128,41 @@ func init() { vindexes.Register("keyrange_lookuper_unique", newKeyRangeLookuperUnique) } -func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createExecutorEnv(t testing.TB) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" - hc := discovery.NewFakeHealthCheck(nil) + hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) + s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema + // Use the 'X' in the name to ensure it's not alphabetically first. + // Otherwise, it would become the default keyspace for the dual table. + bad := createSandbox("TestXBadSharding") + bad.VSchema = badVSchema + + serv := newSandboxForCells(ctx, []string{cell}) + serv.topoServer.CreateKeyspace(ctx, KsTestSharded, &topodatapb.Keyspace{SidecarDbName: sidecar.DefaultName}) + // Force a new cache to use for lookups of the sidecar database identifier + // in use by each keyspace -- as we want to use a different load function + // than the one already created by the vtgate as it uses a different topo. + if sdbc, _ := sidecardb.GetIdentifierCache(); sdbc != nil { + sdbc.Destroy() + } + _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { + ki, err := serv.topoServer.GetKeyspace(ctx, keyspace) + if err != nil { + return "", err + } + return ki.SidecarDbName, nil + }) + if !created { + log.Fatal("Failed to [re]create a sidecar database identifier cache!") + } + + resolver := newTestResolver(ctx, hc, serv, cell) sbc1 = hc.AddTestTablet(cell, "-20", 1, "TestExecutor", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc2 = hc.AddTestTablet(cell, "40-60", 1, "TestExecutor", "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) // Create these connections so scatter queries don't fail. @@ -143,56 +173,76 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn _ = hc.AddTestTablet(cell, "c0-e0", 1, "TestExecutor", "c0-e0", topodatapb.TabletType_PRIMARY, true, 1, nil) _ = hc.AddTestTablet(cell, "e0-", 1, "TestExecutor", "e0-", topodatapb.TabletType_PRIMARY, true, 1, nil) // Below is needed so that SendAnyWherePlan doesn't fail - _ = hc.AddTestTablet(cell, "random", 1, "TestXBadVSchema", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - createSandbox(KsTestUnsharded) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + _ = hc.AddTestTablet(cell, "2", 3, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) - // Ues the 'X' in the name to ensure it's not alphabetically first. - // Otherwise, it would become the default keyspace for the dual table. - bad := createSandbox("TestXBadSharding") - bad.VSchema = badVSchema + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + // All these vtgate tests expect plans to be immediately cached after first use; + // this is not the actual behavior of the system in a production context because we use a doorkeeper + // that sometimes can cause a plan to not be cached the very first time it's seen, to prevent + // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. + plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) + + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbc1, sbc2, sbclookup + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbc1, sbc2, sbclookup, ctx } -func createCustomExecutor(vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + s := createSandbox(KsTestSharded) s.VSchema = vschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) - sbc1 = hc.AddTestTablet(cell, "-20", 1, "TestExecutor", "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc2 = hc.AddTestTablet(cell, "40-60", 1, "TestExecutor", "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema - createSandbox(KsTestUnsharded) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) + sbc1 = hc.AddTestTablet(cell, "-20", 1, KsTestSharded, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc2 = hc.AddTestTablet(cell, "40-60", 1, KsTestSharded, "40-60", topodatapb.TabletType_PRIMARY, true, 1, nil) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbc1, sbc2, sbclookup + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbc1, sbc2, sbclookup, ctx } -func createCustomExecutorSetValues(vschema string, values []*sqltypes.Result) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { +func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqltypes.Result) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(context.Background()) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + s := createSandbox(KsTestSharded) s.VSchema = vschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + sb := createSandbox(KsTestUnsharded) + sb.VSchema = unshardedVSchema + + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} sbcs := []*sandboxconn.SandboxConn{} for _, shard := range shards { @@ -202,45 +252,50 @@ func createCustomExecutorSetValues(vschema string, values []*sqltypes.Result) (e } sbcs = append(sbcs, sbc) } - - createSandbox(KsTestUnsharded) sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - executor = NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } - return executor, sbcs[0], sbcs[1], sbclookup + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + executor.Close() + cancel() + }) + + return executor, sbcs[0], sbcs[1], sbclookup, ctx } -func executorExecSession(executor *Executor, sql string, bv map[string]*querypb.BindVariable, session *vtgatepb.Session) (*sqltypes.Result, error) { +func executorExecSession(ctx context.Context, executor *Executor, sql string, bv map[string]*querypb.BindVariable, session *vtgatepb.Session) (*sqltypes.Result, error) { return executor.Execute( - context.Background(), + ctx, + nil, "TestExecute", NewSafeSession(session), sql, bv) } -func executorExec(executor *Executor, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return executorExecSession(executor, sql, bv, primarySession) +func executorExec(ctx context.Context, executor *Executor, session *vtgatepb.Session, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return executorExecSession(ctx, executor, sql, bv, session) } -func executorPrepare(executor *Executor, sql string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { +func executorPrepare(ctx context.Context, executor *Executor, session *vtgatepb.Session, sql string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { return executor.Prepare( - context.Background(), + ctx, "TestExecute", - NewSafeSession(primarySession), + NewSafeSession(session), sql, bv) } -func executorStream(executor *Executor, sql string) (qr *sqltypes.Result, err error) { +func executorStream(ctx context.Context, executor *Executor, sql string) (qr *sqltypes.Result, err error) { results := make(chan *sqltypes.Result, 100) err = executor.StreamExecute( - context.Background(), + ctx, + nil, "TestExecuteStream", NewSafeSession(nil), sql, @@ -344,14 +399,14 @@ func getQueryLog(logChan chan *logstats.LogStats) *logstats.LogStats { // is a repeat query. var testPlannedQueries = map[string]bool{} -func testQueryLog(t *testing.T, logChan chan *logstats.LogStats, method, stmtType, sql string, shardQueries int) *logstats.LogStats { +func testQueryLog(t *testing.T, executor *Executor, logChan chan *logstats.LogStats, method, stmtType, sql string, shardQueries int) *logstats.LogStats { t.Helper() logStats := getQueryLog(logChan) require.NotNil(t, logStats) var log bytes.Buffer - streamlog.GetFormatter(QueryLogger)(&log, nil, logStats) + streamlog.GetFormatter(executor.queryLogger)(&log, nil, logStats) fields := strings.Split(log.String(), "\t") // fields[0] is the method @@ -407,8 +462,8 @@ func testQueryLog(t *testing.T, logChan chan *logstats.LogStats, method, stmtTyp return logStats } -func newTestResolver(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *Resolver { - sc := newTestScatterConn(hc, serv, cell) +func newTestResolver(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *Resolver { + sc := newTestScatterConn(ctx, hc, serv, cell) srvResolver := srvtopo.NewResolver(serv, sc.gateway, cell) return NewResolver(srvResolver, serv, cell, sc) } diff --git a/go/vt/vtgate/executor_scatter_stats.go b/go/vt/vtgate/executor_scatter_stats.go index 946558e22fd..beaa60d7012 100644 --- a/go/vt/vtgate/executor_scatter_stats.go +++ b/go/vt/vtgate/executor_scatter_stats.go @@ -18,11 +18,12 @@ package vtgate import ( "fmt" - "html/template" "net/http" "sync/atomic" "time" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/vt/logz" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -61,8 +62,7 @@ func (e *Executor) gatherScatterStats() (statsResults, error) { plans := make([]*engine.Plan, 0) routes := make([]*engine.Route, 0) // First we go over all plans and collect statistics and all query plans for scatter queries - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { scatter := engine.Find(findScatter, plan.Instructions) readOnly := !engine.Exists(isUpdating, plan.Instructions) isScatter := scatter != nil diff --git a/go/vt/vtgate/executor_scatter_stats_test.go b/go/vt/vtgate/executor_scatter_stats_test.go index eff8d4f6c77..84dd2744e8b 100644 --- a/go/vt/vtgate/executor_scatter_stats_test.go +++ b/go/vt/vtgate/executor_scatter_stats_test.go @@ -17,10 +17,9 @@ limitations under the License. package vtgate import ( - "context" - "testing" - "net/http/httptest" + "testing" + "time" "github.com/stretchr/testify/require" @@ -28,10 +27,11 @@ import ( ) func TestScatterStatsWithNoScatterQuery(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) result, err := executor.gatherScatterStats() @@ -40,10 +40,10 @@ func TestScatterStatsWithNoScatterQuery(t *testing.T) { } func TestScatterStatsWithSingleScatterQuery(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, "select * from user", nil) + _, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user", nil) require.NoError(t, err) result, err := executor.gatherScatterStats() @@ -52,29 +52,29 @@ func TestScatterStatsWithSingleScatterQuery(t *testing.T) { } func TestScatterStatsHttpWriting(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err := executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, "select * from user", nil) + _, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user", nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, "select * from user where Id = 15", nil) + _, err = executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user where Id = 15", nil) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, "select * from user where Id > 15", nil) + _, err = executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from user where Id > 15", nil) require.NoError(t, err) query4 := "select * from user as u1 join user as u2 on u1.Id = u2.Id" - _, err = executor.Execute(context.Background(), "TestExecutorResultsExceeded", session, query4, nil) + _, err = executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, query4, nil) require.NoError(t, err) - executor.plans.Wait() + time.Sleep(500 * time.Millisecond) recorder := httptest.NewRecorder() executor.WriteScatterStats(recorder) // Here we are checking that the template was executed correctly. // If it wasn't, instead of html, we'll get an error message - require.Contains(t, recorder.Body.String(), query4) + require.Contains(t, recorder.Body.String(), "select * from `user` as u1 join `user` as u2 on u1.Id = u2.Id") require.NoError(t, err) } diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index f76bd742d03..829cd844fd1 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -27,15 +27,16 @@ import ( "time" _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/sqlparser" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" @@ -50,7 +51,7 @@ import ( ) func TestSelectNext(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) query := "select next :n values from user_seq" bv := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(2)} @@ -61,7 +62,7 @@ func TestSelectNext(t *testing.T) { // Autocommit session := NewAutocommitSession(&vtgatepb.Session{}) - _, err := executor.Execute(context.Background(), "TestSelectNext", session, query, bv) + _, err := executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -72,7 +73,7 @@ func TestSelectNext(t *testing.T) { // Txn session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InTransaction = true - _, err = executor.Execute(context.Background(), "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -83,7 +84,7 @@ func TestSelectNext(t *testing.T) { // Reserve session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InReservedConn = true - _, err = executor.Execute(context.Background(), "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -95,7 +96,7 @@ func TestSelectNext(t *testing.T) { session = NewAutocommitSession(&vtgatepb.Session{}) session.Session.InReservedConn = true session.Session.InTransaction = true - _, err = executor.Execute(context.Background(), "TestSelectNext", session, query, bv) + _, err = executor.Execute(context.Background(), nil, "TestSelectNext", session, query, bv) require.NoError(t, err) utils.MustMatch(t, wantQueries, sbclookup.Queries) @@ -104,10 +105,10 @@ func TestSelectNext(t *testing.T) { } func TestSelectDBA(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) query := "select * from INFORMATION_SCHEMA.foo" - _, err := executor.Execute(context.Background(), "TestSelectDBA", + _, err := executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -117,12 +118,12 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname and ist.table_name = :ist_table_name", + wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname /* VARCHAR */ and ist.table_name = :ist_table_name /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "__vtschemaname": sqltypes.StringBindVariable("performance_schema"), "ist_table_name": sqltypes.StringBindVariable("foo"), @@ -131,12 +132,12 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks' and table_name = 'user'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname and table_name = :table_name", + wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "__vtschemaname": sqltypes.StringBindVariable("vt_ks"), "table_name": sqltypes.StringBindVariable("user"), @@ -145,12 +146,12 @@ func TestSelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname", + wantQueries = []*querypb.BoundQuery{{Sql: "select 1 from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "__vtschemaname": sqltypes.StringBindVariable("vt_ks"), }}} @@ -158,7 +159,7 @@ func TestSelectDBA(t *testing.T) { } func TestSystemVariablesMySQLBelow80(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("57000") @@ -168,8 +169,8 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar}, - {Name: "new", Type: sqltypes.VarChar}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -177,24 +178,24 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { }}, }}) - _, err := executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ {Sql: "select @@sql_mode orig, 'only_full_group_by' new"}, {Sql: "set sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, - {Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, sbc1.Queries) } func TestSystemVariablesWithSetVarDisabled(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80000") @@ -206,8 +207,8 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar}, - {Name: "new", Type: sqltypes.VarChar}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -215,41 +216,41 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { }}, }}) - _, err := executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ {Sql: "select @@sql_mode orig, 'only_full_group_by' new"}, {Sql: "set sql_mode = 'only_full_group_by'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, - {Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, sbc1.Queries) } func TestSetSystemVariablesTx(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80001") session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) - _, err := executor.Execute(context.Background(), "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.NotZero(t, session.ShardSessions) sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar}, - {Name: "new", Type: sqltypes.VarChar}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -257,30 +258,30 @@ func TestSetSystemVariablesTx(t *testing.T) { }}, }}) - _, err = executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestCommit", session, "commit", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestCommit", session, "commit", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) require.Zero(t, session.ShardSessions) wantQueries := []*querypb.BoundQuery{ - {Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, {Sql: "select @@sql_mode orig, 'only_full_group_by' new"}, - {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, sbc1.Queries) } func TestSetSystemVariables(t *testing.T) { - executor, _, _, lookup := createExecutorEnv() + executor, _, _, lookup, _ := createExecutorEnv(t) executor.normalize = true sqlparser.SetParserVersion("80001") @@ -291,47 +292,47 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar}, - {Name: "new", Type: sqltypes.VarChar}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), sqltypes.NewVarChar("only_full_group_by"), }}, }}) - _, err := executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ {Sql: "select @@sql_mode orig, 'only_full_group_by' new"}, - {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, lookup.Queries) lookup.Queries = nil // Execute a select with a comment that needs a query hint - _, err = executor.Execute(context.Background(), "TestSelect", session, "select /* comment */ 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select /* comment */ 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ - {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ /* comment */ :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select /*+ SET_VAR(sql_mode = 'only_full_group_by') */ /* comment */ :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, lookup.Queries) lookup.Queries = nil lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "sql_safe_updates", Type: sqltypes.VarChar}, + {Name: "sql_safe_updates", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("0"), }}, }}) - _, err = executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_safe_updates = 0", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_safe_updates = 0", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -340,7 +341,7 @@ func TestSetSystemVariables(t *testing.T) { utils.MustMatch(t, wantQueries, lookup.Queries) lookup.Queries = nil - _, err = executor.Execute(context.Background(), "TestSetStmt", session, "set @var = @@sql_mode", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @var = @@sql_mode", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) require.Nil(t, lookup.Queries) @@ -348,13 +349,13 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("4"), }}, }}) - _, err = executor.Execute(context.Background(), "TestSetStmt", session, "set @x = @@sql_mode, @y = @@max_tmp_tables", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @x = @@sql_mode, @y = @@max_tmp_tables", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.False(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ @@ -371,47 +372,47 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("1"), }}, }}) - _, err = executor.Execute(context.Background(), "TestSetStmt", session, "set @@max_tmp_tables = 1", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@max_tmp_tables = 1", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select 1 from information_schema.table", map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ {Sql: "select 1 from dual where @@max_tmp_tables != 1"}, {Sql: "set max_tmp_tables = '1', sql_mode = 'only_full_group_by', sql_safe_updates = '0'", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, - {Sql: "select :vtg1 from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select :vtg1 /* INT64 */ from information_schema.`table`", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, lookup.Queries) } func TestSetSystemVariablesWithReservedConnection(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, SystemVariables: map[string]string{}}) sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar}, - {Name: "new", Type: sqltypes.VarChar}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("only_full_group_by"), sqltypes.NewVarChar(""), }}, }}) - _, err := executor.Execute(context.Background(), "TestSetStmt", session, "set @@sql_mode = ''", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestSetStmt", session, "set @@sql_mode = ''", map[string]*querypb.BindVariable{}) require.NoError(t, err) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select age, city from user group by age", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select age, city from user group by age", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries := []*querypb.BoundQuery{ @@ -421,14 +422,14 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { } utils.MustMatch(t, wantQueries, sbc1.Queries) - _, err = executor.Execute(context.Background(), "TestSelect", session, "select age, city+1 from user group by age", map[string]*querypb.BindVariable{}) + _, err = executor.Execute(context.Background(), nil, "TestSelect", session, "select age, city+1 from user group by age", map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) wantQueries = []*querypb.BoundQuery{ {Sql: "select @@sql_mode orig, '' new"}, {Sql: "set sql_mode = ''"}, {Sql: "select age, city, weight_string(age) from `user` group by age, weight_string(age) order by age asc"}, - {Sql: "select age, city + :vtg1, weight_string(age) from `user` group by age, weight_string(age) order by age asc", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, + {Sql: "select age, city + :vtg1 /* INT64 */, weight_string(age) from `user` group by age, weight_string(age) order by age asc", BindVariables: map[string]*querypb.BindVariable{"vtg1": {Type: sqltypes.Int64, Value: []byte("1")}}}, } utils.MustMatch(t, wantQueries, sbc1.Queries) require.Equal(t, "''", session.SystemVariables["sql_mode"]) @@ -436,13 +437,13 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { } func TestCreateTableValidTimestamp(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor", SystemVariables: map[string]string{"sql_mode": "ALLOW_INVALID_DATES"}}) query := "create table aa(t timestamp default 0)" - _, err := executor.Execute(context.Background(), "TestSelect", session, query, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(context.Background(), nil, "TestSelect", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.True(t, session.InReservedConn()) @@ -455,12 +456,12 @@ func TestCreateTableValidTimestamp(t *testing.T) { } func TestGen4SelectDBA(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from INFORMATION_SCHEMA.TABLE_CONSTRAINTS" - _, err := executor.Execute(context.Background(), "TestSelectDBA", + _, err := executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) @@ -471,12 +472,12 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES ist WHERE ist.table_schema = 'performance_schema' AND ist.table_name = 'foo'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname and ist.table_name = :ist_table_name1", + wantQueries = []*querypb.BoundQuery{{Sql: "select count(*) from INFORMATION_SCHEMA.`TABLES` as ist where ist.table_schema = :__vtschemaname /* VARCHAR */ and ist.table_name = :ist_table_name1 /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "ist_table_schema": sqltypes.StringBindVariable("performance_schema"), "__vtschemaname": sqltypes.StringBindVariable("performance_schema"), @@ -487,12 +488,12 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks' and table_name = 'user'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 from information_schema.table_constraints where constraint_schema = :__vtschemaname and table_name = :table_name1", + wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 /* INT64 */ from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.Int64BindVariable(1), "constraint_schema": sqltypes.StringBindVariable("vt_ks"), @@ -504,12 +505,9 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select 1 from information_schema.table_constraints where constraint_schema = 'vt_ks'" - _, err = executor.Execute(context.Background(), "TestSelectDBA", - NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}), query, map[string]*querypb.BindVariable{}) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 from information_schema.table_constraints where constraint_schema = :__vtschemaname", + wantQueries = []*querypb.BoundQuery{{Sql: "select :vtg1 /* INT64 */ from information_schema.table_constraints where constraint_schema = :__vtschemaname /* VARCHAR */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.Int64BindVariable(1), "constraint_schema": sqltypes.StringBindVariable("vt_ks"), @@ -519,12 +517,12 @@ func TestGen4SelectDBA(t *testing.T) { sbc1.Queries = nil query = "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'TestExecutor' and c.table_schema = 'TestExecutor' order by t.table_schema,t.table_name,c.column_name" - _, err = executor.Execute(context.Background(), "TestSelectDBA", + _, err = executor.Execute(context.Background(), nil, "TestSelectDBA", NewSafeSession(&vtgatepb.Session{TargetString: "information_schema"}), query, map[string]*querypb.BindVariable{}, ) require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{Sql: "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc", + wantQueries = []*querypb.BoundQuery{{Sql: "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname /* VARCHAR */ and c.table_schema = :__vtschemaname /* VARCHAR */ and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc", BindVariables: map[string]*querypb.BindVariable{ "t_table_schema": sqltypes.StringBindVariable("TestExecutor"), "__replacevtschemaname": sqltypes.Int64BindVariable(1), @@ -533,9 +531,12 @@ func TestGen4SelectDBA(t *testing.T) { } func TestUnsharded(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select id from music_user_map where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from music_user_map where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from music_user_map where id = 1", @@ -545,9 +546,12 @@ func TestUnsharded(t *testing.T) { } func TestUnshardedComments(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "/* leading */ select id from music_user_map where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from music_user_map where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ select id from music_user_map where id = 1 /* trailing */", @@ -555,7 +559,7 @@ func TestUnshardedComments(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbclookup.Queries) - _, err = executorExec(executor, "update music_user_map set id = 1 /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "update music_user_map set id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "/* leading */ select id from music_user_map where id = 1 /* trailing */", @@ -567,7 +571,7 @@ func TestUnshardedComments(t *testing.T) { assertQueries(t, sbclookup, wantQueries) sbclookup.Queries = nil - _, err = executorExec(executor, "delete from music_user_map /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "delete from music_user_map /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "delete from music_user_map /* trailing */", @@ -576,7 +580,7 @@ func TestUnshardedComments(t *testing.T) { assertQueries(t, sbclookup, wantQueries) sbclookup.Queries = nil - _, err = executorExec(executor, "insert into music_user_map values (1) /* trailing */", nil) + _, err = executorExec(ctx, executor, session, "insert into music_user_map values (1) /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "insert into music_user_map values (1) /* trailing */", @@ -586,30 +590,30 @@ func TestUnshardedComments(t *testing.T) { } func TestStreamUnsharded(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, _, _, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from music_user_map where id = 1" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { diff := cmp.Diff(wantResult, result) t.Errorf("result: %+v, want %+v\ndiff: %s", result, wantResult, diff) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", sql, 1) } func TestStreamBuffering(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, _ := createExecutorEnv(t) // This test is similar to TestStreamUnsharded except that it returns a Result > 10 bytes, // such that the splitting of the Result into multiple Result responses gets tested. sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), @@ -621,10 +625,15 @@ func TestStreamBuffering(t *testing.T) { }}) var results []*sqltypes.Result + session := &vtgatepb.Session{ + TargetString: "@primary", + } + err := executor.StreamExecute( context.Background(), + nil, "TestStreamBuffering", - NewSafeSession(primarySession), + NewSafeSession(session), "select id from music_user_map where id = 1", nil, func(qr *sqltypes.Result) error { @@ -635,8 +644,8 @@ func TestStreamBuffering(t *testing.T) { require.NoError(t, err) wantResults := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, }, { Rows: [][]sqltypes.Value{{ @@ -653,15 +662,15 @@ func TestStreamBuffering(t *testing.T) { } func TestStreamLimitOffset(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) // This test is similar to TestStreamUnsharded except that it returns a Result > 10 bytes, // such that the splitting of the Result into multiple Result responses gets tested. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, - {Name: "weight_string(id)", Type: sqltypes.VarBinary}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "weight_string(id)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), @@ -676,9 +685,9 @@ func TestStreamLimitOffset(t *testing.T) { sbc2.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, - {Name: "weight_string(id)", Type: sqltypes.VarBinary}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "weight_string(id)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(2), @@ -688,10 +697,14 @@ func TestStreamLimitOffset(t *testing.T) { }}) results := make(chan *sqltypes.Result, 10) + session := &vtgatepb.Session{ + TargetString: "@primary", + } err := executor.StreamExecute( context.Background(), + nil, "TestStreamLimitOffset", - NewSafeSession(primarySession), + NewSafeSession(session), "select id, textcol from user order by id limit 2 offset 2", nil, func(qr *sqltypes.Result) error { @@ -703,8 +716,8 @@ func TestStreamLimitOffset(t *testing.T) { require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{{ @@ -727,17 +740,20 @@ func TestStreamLimitOffset(t *testing.T) { } func TestSelectLastInsertId(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - primarySession.LastInsertId = 52 + executor, _, _, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 52, + } executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select last_insert_id()" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "last_insert_id()", Type: sqltypes.Uint64}, + {Name: "last_insert_id()", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewUint64(52), @@ -748,36 +764,41 @@ func TestSelectLastInsertId(t *testing.T) { } func TestSelectSystemVariables(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - primarySession.ReadAfterWrite = &vtgatepb.ReadAfterWrite{ - ReadAfterWriteGtid: "a fine gtid", - ReadAfterWriteTimeout: 13, - SessionTrackGtids: true, + executor, _, _, _, ctx := createExecutorEnv(t) + + session := &vtgatepb.Session{ + TargetString: "@primary", + ReadAfterWrite: &vtgatepb.ReadAfterWrite{ + ReadAfterWriteGtid: "a fine gtid", + ReadAfterWriteTimeout: 13, + SessionTrackGtids: true, + }, } executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select @@autocommit, @@client_found_rows, @@skip_query_plan_cache, @@enable_system_settings, " + "@@sql_select_limit, @@transaction_mode, @@workload, @@read_after_write_gtid, " + - "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@socket, @@query_timeout" + "@@read_after_write_timeout, @@session_track_gtids, @@ddl_strategy, @@migration_context, @@socket, @@query_timeout" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@@autocommit", Type: sqltypes.Int64}, - {Name: "@@client_found_rows", Type: sqltypes.Int64}, - {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64}, - {Name: "@@enable_system_settings", Type: sqltypes.Int64}, - {Name: "@@sql_select_limit", Type: sqltypes.Int64}, - {Name: "@@transaction_mode", Type: sqltypes.VarChar}, - {Name: "@@workload", Type: sqltypes.VarChar}, - {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar}, - {Name: "@@read_after_write_timeout", Type: sqltypes.Float64}, - {Name: "@@session_track_gtids", Type: sqltypes.VarChar}, - {Name: "@@ddl_strategy", Type: sqltypes.VarChar}, - {Name: "@@socket", Type: sqltypes.VarChar}, - {Name: "@@query_timeout", Type: sqltypes.Int64}, + {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@client_found_rows", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@sql_select_limit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@transaction_mode", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@workload", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@read_after_write_timeout", Type: sqltypes.Float64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ // the following are the uninitialised session values @@ -794,6 +815,7 @@ func TestSelectSystemVariables(t *testing.T) { sqltypes.NewVarChar("own_gtid"), sqltypes.NewVarChar(""), sqltypes.NewVarChar(""), + sqltypes.NewVarChar(""), sqltypes.NewInt64(0), }}, } @@ -802,29 +824,26 @@ func TestSelectSystemVariables(t *testing.T) { } func TestSelectInitializedVitessAwareVariable(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) - - primarySession.Autocommit = true - primarySession.EnableSystemSettings = true - primarySession.QueryTimeout = 75 + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - defer func() { - primarySession.Autocommit = false - primarySession.EnableSystemSettings = false - primarySession.QueryTimeout = 0 - }() + session := &vtgatepb.Session{ + TargetString: "@primary", + Autocommit: true, + EnableSystemSettings: true, + QueryTimeout: 75, + } sql := "select @@autocommit, @@enable_system_settings, @@query_timeout" - result, err := executorExec(executor, sql, nil) + result, err := executorExec(ctx, executor, session, sql, nil) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@@autocommit", Type: sqltypes.Int64}, - {Name: "@@enable_system_settings", Type: sqltypes.Int64}, - {Name: "@@query_timeout", Type: sqltypes.Int64}, + {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -837,17 +856,20 @@ func TestSelectInitializedVitessAwareVariable(t *testing.T) { } func TestSelectUserDefinedVariable(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select @foo" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@foo", Type: sqltypes.Null}, + {Name: "@foo", Type: sqltypes.Null, Charset: collations.CollationBinaryID}, }, Rows: [][]sqltypes.Value{{ sqltypes.NULL, @@ -855,12 +877,12 @@ func TestSelectUserDefinedVariable(t *testing.T) { } utils.MustMatch(t, wantResult, result, "Mismatch") - primarySession = &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []any{"bar"})} - result, err = executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session = &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []any{"bar"})} + result, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@foo", Type: sqltypes.VarChar}, + {Name: "@foo", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("bar"), @@ -870,20 +892,23 @@ func TestSelectUserDefinedVariable(t *testing.T) { } func TestFoundRows(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) + session := &vtgatepb.Session{ + TargetString: "@primary", + } // run this extra query so we can assert on the number of rows found - _, err := executorExec(executor, "select 42", map[string]*querypb.BindVariable{}) + _, err := executorExec(ctx, executor, session, "select 42", map[string]*querypb.BindVariable{}) require.NoError(t, err) sql := "select found_rows()" - result, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "found_rows()", Type: sqltypes.Int64}, + {Name: "found_rows()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -894,26 +919,29 @@ func TestFoundRows(t *testing.T) { } func TestRowCount(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExec(executor, "select 42", map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select 42", map[string]*querypb.BindVariable{}) require.NoError(t, err) - testRowCount(t, executor, -1) + testRowCount(t, ctx, executor, session, -1) - _, err = executorExec(executor, "delete from user where id in (42, 24)", map[string]*querypb.BindVariable{}) + _, err = executorExec(ctx, executor, session, "delete from user where id in (42, 24)", map[string]*querypb.BindVariable{}) require.NoError(t, err) - testRowCount(t, executor, 2) + testRowCount(t, ctx, executor, session, 2) } -func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { +func testRowCount(t *testing.T, ctx context.Context, executor *Executor, session *vtgatepb.Session, wantRowCount int64) { t.Helper() - result, err := executorExec(executor, "select row_count()", map[string]*querypb.BindVariable{}) + result, err := executorExec(ctx, executor, session, "select row_count()", map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "row_count()", Type: sqltypes.Int64}, + {Name: "row_count()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(wantRowCount), @@ -924,13 +952,17 @@ func testRowCount(t *testing.T, executor *Executor, wantRowCount int64) { } func TestSelectLastInsertIdInUnion(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.normalize = true - primarySession.LastInsertId = 52 + + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 52, + } result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -940,11 +972,11 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { sbc1.SetResults(result1) sql := "select last_insert_id() as id union select last_insert_id() as id" - got, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + got, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(52), @@ -954,13 +986,16 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { } func TestSelectLastInsertIdInWhere(t *testing.T) { - executor, _, _, lookup := createExecutorEnv() + executor, _, _, lookup, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select id from music_user_map where id = last_insert_id()" - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from music_user_map where id = :__lastInsertId", @@ -971,12 +1006,12 @@ func TestSelectLastInsertIdInWhere(t *testing.T) { } func TestLastInsertIDInVirtualTable(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.normalize = true result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -985,10 +1020,13 @@ func TestLastInsertIDInVirtualTable(t *testing.T) { }}, }} sbc1.SetResults(result1) - _, err := executorExec(executor, "select * from (select last_insert_id()) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select * from (select last_insert_id()) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select * from (select :__lastInsertId as `last_insert_id()` from dual) as t", + Sql: "select t.`last_insert_id()` from (select :__lastInsertId as `last_insert_id()` from dual) as t", BindVariables: map[string]*querypb.BindVariable{"__lastInsertId": sqltypes.Uint64BindVariable(0)}, }} @@ -996,18 +1034,17 @@ func TestLastInsertIDInVirtualTable(t *testing.T) { } func TestLastInsertIDInSubQueryExpression(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - primarySession.LastInsertId = 12345 - defer func() { - // clean up global state - primarySession.LastInsertId = 0 - }() - rs, err := executorExec(executor, "select (select last_insert_id()) as x", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + LastInsertId: 12345, + } + rs, err := executorExec(ctx, executor, session, "select (select last_insert_id()) as x", nil) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "x", Type: sqltypes.Uint64}, + {Name: "x", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewUint64(12345), @@ -1021,21 +1058,24 @@ func TestLastInsertIDInSubQueryExpression(t *testing.T) { } func TestSelectDatabase(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true sql := "select database()" - newSession := proto.Clone(primarySession).(*vtgatepb.Session) + newSession := &vtgatepb.Session{ + TargetString: "@primary", + } session := NewSafeSession(newSession) session.TargetString = "TestExecutor@primary" result, err := executor.Execute( context.Background(), + nil, "TestExecute", session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "database()", Type: sqltypes.VarChar}, + {Name: "database()", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("TestExecutor@primary"), @@ -1047,9 +1087,9 @@ func TestSelectDatabase(t *testing.T) { } func TestSelectBindvars(t *testing.T) { - executor, sbc1, sbc2, lookup := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, lookup, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) lookup.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), @@ -1059,8 +1099,11 @@ func TestSelectBindvars(t *testing.T) { "foo2|1", )}) - sql := "select id from user where id = :id" - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{ + sql := "select id from `user` where id = :id" + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -1071,11 +1114,11 @@ func TestSelectBindvars(t *testing.T) { utils.MustMatch(t, sbc1.Queries, wantQueries) assert.Empty(t, sbc2.Queries) sbc1.Queries = nil - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 1) // Test with StringBindVariable - sql = "select id from user where name in (:name1, :name2)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + sql = "select id from `user` where `name` in (:name1, :name2)" + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name1": sqltypes.StringBindVariable("foo1"), "name2": sqltypes.StringBindVariable("foo2"), }) @@ -1090,13 +1133,11 @@ func TestSelectBindvars(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc1.Queries) sbc1.Queries = nil - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select name, user_id from name_user_map where name in ::name", 1) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select name, user_id from name_user_map where name in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where `name` in (:name1, :name2)", 3) // Test with BytesBindVariable - sql = "select id from user where name in (:name1, :name2)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + sql = "select id from `user` where `name` in (:name1, :name2)" + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name1": sqltypes.BytesBindVariable([]byte("foo1")), "name2": sqltypes.BytesBindVariable([]byte("foo2")), }) @@ -1104,21 +1145,20 @@ func TestSelectBindvars(t *testing.T) { wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ - "name1": sqltypes.BytesBindVariable([]byte("foo1")), - "name2": sqltypes.BytesBindVariable([]byte("foo2")), + "__vals": sqltypes.TestBindVariable([]any{[]byte("foo1"), []byte("foo2")}), + "name1": sqltypes.BytesBindVariable([]byte("foo1")), + "name2": sqltypes.BytesBindVariable([]byte("foo2")), }, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select name, user_id from name_user_map where name in ::name", 1) - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select name, user_id from name_user_map where name in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 3) // Test no match in the lookup vindex sbc1.Queries = nil lookup.Queries = nil lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "user_id", Type: sqltypes.Int32}, + {Name: "user_id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, RowsAffected: 0, InsertID: 0, @@ -1126,7 +1166,7 @@ func TestSelectBindvars(t *testing.T) { }}) sql = "select id from user where name = :name" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "name": sqltypes.StringBindVariable("nonexistent"), }) require.NoError(t, err) @@ -1150,16 +1190,16 @@ func TestSelectBindvars(t *testing.T) { }} utils.MustMatch(t, wantLookupQueries, lookup.Queries) - - testQueryLog(t, logChan, "VindexLookup", "SELECT", "select name, user_id from name_user_map where name in ::name", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 1) - + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where `name` = :name", 2) } func TestSelectEqual(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select id from user where id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from user where id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from `user` where id = 1", @@ -1171,7 +1211,7 @@ func TestSelectEqual(t *testing.T) { } sbc1.Queries = nil - _, err = executorExec(executor, "select id from user where id = 3", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id = 3", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id = 3", @@ -1186,7 +1226,7 @@ func TestSelectEqual(t *testing.T) { } sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id = '3'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id = '3'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id = '3'", @@ -1205,7 +1245,7 @@ func TestSelectEqual(t *testing.T) { sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), "foo|1", )}) - _, err = executorExec(executor, "select id from user where name = 'foo'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where `name` = 'foo'", @@ -1224,10 +1264,13 @@ func TestSelectEqual(t *testing.T) { } func TestSelectINFromOR(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 - _, err := executorExec(executor, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select 1 from `user` where id = 1 and `name` = 'apa' or id = 2 and `name` = 'toto'", @@ -1239,9 +1282,12 @@ func TestSelectINFromOR(t *testing.T) { } func TestSelectDual(t *testing.T) { - executor, sbc1, _, lookup := createExecutorEnv() + executor, sbc1, _, lookup, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select @@aa.bb from dual", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select @@aa.bb from dual", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select @@`aa.bb` from dual", @@ -1249,15 +1295,18 @@ func TestSelectDual(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc1.Queries) - _, err = executorExec(executor, "select @@aa.bb from TestUnsharded.dual", nil) + _, err = executorExec(ctx, executor, session, "select @@aa.bb from TestUnsharded.dual", nil) require.NoError(t, err) utils.MustMatch(t, wantQueries, lookup.Queries) } func TestSelectComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "/* leading */ select id from `user` where id = 1 /* trailing */", @@ -1271,13 +1320,16 @@ func TestSelectComments(t *testing.T) { } func TestSelectNormalize(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true - _, err := executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "/* leading */ select id from `user` where id = :id /* trailing */", + Sql: "/* leading */ select id from `user` where id = :id /* INT64 */ /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "id": sqltypes.TestBindVariable(int64(1)), }, @@ -1289,11 +1341,11 @@ func TestSelectNormalize(t *testing.T) { sbc1.Queries = nil // Force the query to go to the "wrong" shard and ensure that normalization still happens - primarySession.TargetString = "TestExecutor/40-60" - _, err = executorExec(executor, "/* leading */ select id from user where id = 1 /* trailing */", nil) + session.TargetString = "TestExecutor/40-60" + _, err = executorExec(ctx, executor, session, "/* leading */ select id from user where id = 1 /* trailing */", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "/* leading */ select id from `user` where id = :id /* trailing */", + Sql: "/* leading */ select id from `user` where id = :id /* INT64 */ /* trailing */", BindVariables: map[string]*querypb.BindVariable{ "id": sqltypes.TestBindVariable(int64(1)), }, @@ -1301,13 +1353,15 @@ func TestSelectNormalize(t *testing.T) { require.Empty(t, sbc1.Queries) utils.MustMatch(t, wantQueries, sbc2.Queries, "sbc2.Queries") sbc2.Queries = nil - primarySession.TargetString = "" } func TestSelectCaseSensitivity(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select Id from user where iD = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select Id from user where iD = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select Id from `user` where iD = 1", @@ -1321,10 +1375,10 @@ func TestSelectCaseSensitivity(t *testing.T) { } func TestStreamSelectEqual(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) sql := "select id from user where id = 1" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1333,9 +1387,12 @@ func TestStreamSelectEqual(t *testing.T) { } func TestSelectKeyRange(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select krcol_unique, krcol from keyrange_table where krcol = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select krcol_unique, krcol from keyrange_table where krcol = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select krcol_unique, krcol from keyrange_table where krcol = 1", @@ -1349,9 +1406,12 @@ func TestSelectKeyRange(t *testing.T) { } func TestSelectKeyRangeUnique(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) - _, err := executorExec(executor, "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select krcol_unique, krcol from keyrange_table where krcol_unique = 1", @@ -1365,16 +1425,17 @@ func TestSelectKeyRangeUnique(t *testing.T) { } func TestSelectIN(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) // Constant in IN clause is just a number, not a bind variable. - _, err := executorExec(executor, "select id from user where id in (1)", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, "select id from user where id in (1)", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id from `user` where id in ::__vals", - BindVariables: map[string]*querypb.BindVariable{ - "__vals": sqltypes.TestBindVariable([]any{int64(1)}), - }, + Sql: "select id from `user` where id in (1)", + BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) if sbc2.Queries != nil { @@ -1385,7 +1446,7 @@ func TestSelectIN(t *testing.T) { // They result in two different queries on two shards. sbc1.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id in (1, 3)", nil) + _, err = executorExec(ctx, executor, session, "select id from user where id in (1, 3)", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where id in ::__vals", @@ -1406,7 +1467,7 @@ func TestSelectIN(t *testing.T) { // This is using []any for the bind variable list. sbc1.Queries = nil sbc2.Queries = nil - _, err = executorExec(executor, "select id from user where id in ::vals", map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, "select id from user where id in ::vals", map[string]*querypb.BindVariable{ "vals": sqltypes.TestBindVariable([]any{int64(1), int64(3)}), }) require.NoError(t, err) @@ -1434,7 +1495,7 @@ func TestSelectIN(t *testing.T) { sqltypes.MakeTestFields("b|a", "varbinary|varbinary"), "foo|1", )}) - _, err = executorExec(executor, "select id from user where name = 'foo'", nil) + _, err = executorExec(ctx, executor, session, "select id from user where name = 'foo'", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ Sql: "select id from `user` where `name` = 'foo'", @@ -1453,10 +1514,10 @@ func TestSelectIN(t *testing.T) { } func TestStreamSelectIN(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) sql := "select id from user where id in (1)" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1464,7 +1525,7 @@ func TestStreamSelectIN(t *testing.T) { } sql = "select id from user where id in (1, 3)" - result, err = executorStream(executor, sql) + result, err = executorStream(ctx, executor, sql) require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: sandboxconn.StreamRowResult.Fields, @@ -1479,7 +1540,7 @@ func TestStreamSelectIN(t *testing.T) { } sql = "select id from user where name = 'foo'" - result, err = executorStream(executor, sql) + result, err = executorStream(ctx, executor, sql) require.NoError(t, err) wantResult = sandboxconn.StreamRowResult if !result.Equal(wantResult) { @@ -1497,31 +1558,42 @@ func TestStreamSelectIN(t *testing.T) { utils.MustMatch(t, wantQueries, sbclookup.Queries) } -func createExecutor(serv *sandboxTopo, cell string, resolver *Resolver) *Executor { - return NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) +func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + ex.SetQueryLogger(queryLogger) + return ex } func TestSelectScatter(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - sql := "select id from user" - _, err := executorExec(executor, sql, nil) + sql := "select id from `user`" + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select id from `user`", @@ -1530,21 +1602,24 @@ func TestSelectScatter(t *testing.T) { for _, conn := range conns { utils.MustMatch(t, wantQueries, conn.Queries) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", sql, 8) } func TestSelectScatterPartial(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. - primarySession = &vtgatepb.Session{ + session := &vtgatepb.Session{ TargetString: "@primary", } cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1552,13 +1627,14 @@ func TestSelectScatterPartial(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation conns[2].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - results, err := executorExec(executor, "select id from user", nil) + results, err := executorExec(ctx, executor, session, "select id from `user`", nil) wantErr := "TestExecutor.40-60.primary" if err == nil || !strings.Contains(err.Error(), wantErr) { t.Errorf("want error %v, got %v", wantErr, err) @@ -1569,15 +1645,15 @@ func TestSelectScatterPartial(t *testing.T) { if results != nil { t.Errorf("want nil results, got %v", results) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) + results, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) require.NoError(t, err) if results == nil || len(results.Rows) != 7 { t.Errorf("want 7 results, got %v", results) } - testQueryLog(t, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // When all shards fail, the execution should also fail conns[0].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 @@ -1588,23 +1664,26 @@ func TestSelectScatterPartial(t *testing.T) { conns[6].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 conns[7].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - _, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) + _, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", nil) require.Error(t, err) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", 8) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) - _, err = executorExec(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id", nil) + _, err = executorExec(ctx, executor, session, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id", nil) require.Error(t, err) } func TestSelectScatterPartialOLAP(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1612,23 +1691,24 @@ func TestSelectScatterPartialOLAP(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation conns[2].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - results, err := executorStream(executor, "select id from user") + results, err := executorStream(ctx, executor, "select id from `user`") assert.EqualError(t, err, "target: TestExecutor.40-60.primary: RESOURCE_EXHAUSTED error") assert.Equal(t, vtrpcpb.Code_RESOURCE_EXHAUSTED, vterrors.Code(err)) assert.Nil(t, results) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select id from user", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // If all shards fail, the operation should also fail conns[0].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 @@ -1639,23 +1719,26 @@ func TestSelectScatterPartialOLAP(t *testing.T) { conns[6].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 conns[7].MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1000 - _, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + _, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.Error(t, err) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) - _, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") + _, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") require.Error(t, err) } func TestSelectScatterPartialOLAP2(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { @@ -1663,9 +1746,10 @@ func TestSelectScatterPartialOLAP2(t *testing.T) { conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // Fail 1 of N without the directive fails the whole operation tablet0 := conns[2].Tablet() @@ -1677,49 +1761,53 @@ func TestSelectScatterPartialOLAP2(t *testing.T) { sbc0Th := ths[0] sbc0Th.Serving = false - results, err := executorStream(executor, "select id from user") + results, err := executorStream(ctx, executor, "select id from `user`") require.Error(t, err) assert.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestExecutor" shard:"40-60"`) assert.Equal(t, vtrpcpb.Code_UNAVAILABLE, vterrors.Code(err)) assert.Nil(t, results) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select id from user", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select id from `user`", 8) // Fail 1 of N with the directive succeeds with 7 rows - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user`", 8) // order by - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id") require.NoError(t, err) assert.EqualValues(t, 7, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc", 8) // order by and limit - results, err = executorStream(executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id limit 5") + results, err = executorStream(ctx, executor, "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id limit 5") require.NoError(t, err) assert.EqualValues(t, 5, len(results.Rows)) - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from user order by id limit 5", 8) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ id from `user` order by id asc limit 5", 8) } func TestStreamSelectScatter(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() - sql := "select id from user" - result, err := executorStream(executor, sql) + sql := "select id from `user`" + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: sandboxconn.SingleRowResult.Fields, @@ -1739,23 +1827,26 @@ func TestStreamSelectScatter(t *testing.T) { // TestSelectScatterOrderBy will run an ORDER BY query that will scatter out to 8 shards and return the 8 rows (one per shard) sorted. func TestSelectScatterOrderBy(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, - {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -1769,10 +1860,14 @@ func TestSelectScatterOrderBy(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -1785,8 +1880,8 @@ func TestSelectScatterOrderBy(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, } @@ -1805,22 +1900,25 @@ func TestSelectScatterOrderBy(t *testing.T) { // TestSelectScatterOrderByVarChar will run an ORDER BY query that will scatter out to 8 shards and return the 8 rows (one per shard) sorted. func TestSelectScatterOrderByVarChar(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -1829,19 +1927,22 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { // This will allow us to test that cross-shard ordering // still works correctly. sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), - sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, textcol from user order by textcol desc" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, textcol, weight_string(textcol) from `user` order by textcol desc", + Sql: "select col1, textcol from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1850,8 +1951,8 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, InsertID: 0, } @@ -1869,23 +1970,26 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { } func TestStreamSelectScatterOrderBy(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, - {Name: "weight_string(col)", Type: sqltypes.VarBinary}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -1896,10 +2000,11 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select id, col from user order by col desc" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -1912,8 +2017,8 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } for i := 0; i < 4; i++ { @@ -1927,40 +2032,43 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { } func TestStreamSelectScatterOrderByVarChar(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), - sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select id, textcol from user order by textcol desc" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, textcol, weight_string(textcol) from `user` order by textcol desc", + Sql: "select id, textcol from `user` order by textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1969,8 +2077,8 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "textcol", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, } for i := 0; i < 4; i++ { @@ -1985,23 +2093,26 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { // TestSelectScatterAggregate will run an aggregate query that will scatter out to 8 shards and return 4 aggregated rows. func TestSelectScatterAggregate(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "sum(foo)", Type: sqltypes.Int32}, - {Name: "weight_string(col)", Type: sqltypes.VarBinary}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2012,10 +2123,14 @@ func TestSelectScatterAggregate(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col, sum(foo) from user group by col" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2028,15 +2143,15 @@ func TestSelectScatterAggregate(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "sum(foo)", Type: sqltypes.Int32}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Decimal, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, } for i := 0; i < 4; i++ { row := []sqltypes.Value{ sqltypes.NewInt32(int32(i)), - sqltypes.NewInt32(int32(i*2 + 4)), + sqltypes.NewDecimal(fmt.Sprintf("%d", i*2+4)), } wantResult.Rows = append(wantResult.Rows, row) } @@ -2044,23 +2159,26 @@ func TestSelectScatterAggregate(t *testing.T) { } func TestStreamSelectScatterAggregate(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "sum(foo)", Type: sqltypes.Int32}, - {Name: "weight_string(col)", Type: sqltypes.VarBinary}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2071,10 +2189,11 @@ func TestStreamSelectScatterAggregate(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col, sum(foo) from user group by col" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2087,14 +2206,14 @@ func TestStreamSelectScatterAggregate(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "sum(foo)", Type: sqltypes.Int32}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "sum(foo)", Type: sqltypes.Decimal, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } for i := 0; i < 4; i++ { row := []sqltypes.Value{ sqltypes.NewInt32(int32(i)), - sqltypes.NewInt32(int32(i*2 + 4)), + sqltypes.NewDecimal(fmt.Sprintf("%d", i*2+4)), } wantResult.Rows = append(wantResult.Rows, row) } @@ -2104,23 +2223,26 @@ func TestStreamSelectScatterAggregate(t *testing.T) { // TestSelectScatterLimit will run a limit query (ordered for consistency) against // a scatter route and verify that the limit primitive works as intended. func TestSelectScatterLimit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, - {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2131,10 +2253,14 @@ func TestSelectScatterLimit(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc limit 3" - gotResult, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + gotResult, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2147,8 +2273,8 @@ func TestSelectScatterLimit(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, } @@ -2172,23 +2298,26 @@ func TestSelectScatterLimit(t *testing.T) { // TestStreamSelectScatterLimit will run a streaming limit query (ordered for consistency) against // a scatter route and verify that the limit primitive works as intended. func TestStreamSelectScatterLimit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, - {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2199,10 +2328,11 @@ func TestStreamSelectScatterLimit(t *testing.T) { }}) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() query := "select col1, col2 from user order by col2 desc limit 3" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -2215,8 +2345,8 @@ func TestStreamSelectScatterLimit(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } wantResult.Rows = append(wantResult.Rows, @@ -2239,12 +2369,15 @@ func TestStreamSelectScatterLimit(t *testing.T) { // TODO(sougou): stream and non-stream testing are very similar. // Could reuse code, func TestSimpleJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3" - result, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2272,16 +2405,19 @@ func TestSimpleJoin(t *testing.T) { t.Errorf("result: %+v, want %+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) } func TestJoinComments(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3 /* trailing */" - _, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1 /* trailing */", @@ -2294,16 +2430,16 @@ func TestJoinComments(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc2.Queries) - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3 /* trailing */", 2) } func TestSimpleJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select u1.id, u2.id from user u1 join user u2 where u1.id = 1 and u2.id = 3" - result, err := executorStream(executor, sql) + result, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2332,18 +2468,18 @@ func TestSimpleJoinStream(t *testing.T) { t.Errorf("result: %+v, want %+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 where u1.id = 1 and u2.id = 3", 2) } func TestVarJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2353,7 +2489,10 @@ func TestVarJoin(t *testing.T) { }} sbc1.SetResults(result1) sql := "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1" - _, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2367,18 +2506,18 @@ func TestVarJoin(t *testing.T) { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestVarJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2388,7 +2527,7 @@ func TestVarJoinStream(t *testing.T) { }} sbc1.SetResults(result1) sql := "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1" - _, err := executorStream(executor, sql) + _, err := executorStream(ctx, executor, sql) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2402,33 +2541,36 @@ func TestVarJoinStream(t *testing.T) { t.Errorf("sbc2.Queries: %s, want %s\n", got, want) } - testQueryLog(t, logChan, "TestExecuteStream", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecuteStream", "SELECT", "select u1.id, u2.id from `user` as u1 join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestLeftJoin(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ - sqltypes.NewInt32(3), sqltypes.NewInt32(1), + sqltypes.NewInt32(3), }}, }} emptyResult := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }} sbc1.SetResults(result1) sbc2.SetResults(emptyResult) sql := "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1" - result, err := executorExec(executor, sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -2445,30 +2587,30 @@ func TestLeftJoin(t *testing.T) { if !result.Equal(wantResult) { t.Errorf("result: \n%+v, want \n%+v", result, wantResult) } - testQueryLog(t, logChan, "TestExecute", "SELECT", sql, 2) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select u1.id, u2.id from `user` as u1 left join `user` as u2 on u2.id = u1.col where u1.id = 1", 2) } func TestLeftJoinStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ - sqltypes.NewInt32(3), sqltypes.NewInt32(1), + sqltypes.NewInt32(3), }}, }} emptyResult := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }} sbc1.SetResults(result1) sbc2.SetResults(emptyResult) - result, err := executorStream(executor, "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id from user u1 left join user u2 on u2.id = u1.col where u1.id = 1") require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ @@ -2489,19 +2631,24 @@ func TestLeftJoinStream(t *testing.T) { } func TestEmptyJoin(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Empty result requires a field query for the second part of join, // which is sent to shard 0. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorExec(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2509,14 +2656,14 @@ func TestEmptyJoin(t *testing.T) { }, { Sql: "select u2.id from `user` as u2 where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ - "u1_col": sqltypes.NullBindVariable, + "u1_col": sqltypes.Int32BindVariable(0), }, }} utils.MustMatch(t, wantQueries, sbc1.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } if !result.Equal(wantResult) { @@ -2525,19 +2672,19 @@ func TestEmptyJoin(t *testing.T) { } func TestEmptyJoinStream(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Empty result requires a field query for the second part of join, // which is sent to shard 0. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorStream(executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id, u1.col from `user` as u1 where u1.id = 1", @@ -2551,8 +2698,8 @@ func TestEmptyJoinStream(t *testing.T) { utils.MustMatch(t, wantQueries, sbc1.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } if !result.Equal(wantResult) { @@ -2561,23 +2708,26 @@ func TestEmptyJoinStream(t *testing.T) { } func TestEmptyJoinRecursive(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Make sure it also works recursively. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorExec(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2594,9 +2744,9 @@ func TestEmptyJoinRecursive(t *testing.T) { utils.MustMatch(t, wantQueries, sbc1.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } if !result.Equal(wantResult) { @@ -2605,23 +2755,23 @@ func TestEmptyJoinRecursive(t *testing.T) { } func TestEmptyJoinRecursiveStream(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) // Make sure it also works recursively. sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }, { Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) - result, err := executorStream(executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1") + result, err := executorStream(ctx, executor, "select u1.id, u2.id, u3.id from user u1 join (user u2 join user u3 on u3.id = u2.col) where u1.id = 1") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: "select u1.id from `user` as u1 where u1.id = 1", @@ -2638,9 +2788,9 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { utils.MustMatch(t, wantQueries, sbc1.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } if !result.Equal(wantResult) { @@ -2649,7 +2799,7 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { } func TestCrossShardSubquery(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32}, @@ -2662,39 +2812,35 @@ func TestCrossShardSubquery(t *testing.T) { }}, }} sbc1.SetResults(result1) - result, err := executorExec(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", + Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - // We have to use string representation because bindvars type is too complex. - got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables:{key:"u1_col" value:{type:INT32 value:"3"}}]` - if got != want { - t.Errorf("sbc2.Queries: %s, want %s\n", got, want) - } - wantResult := &sqltypes.Result{ - Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - }, - Rows: [][]sqltypes.Value{{ - sqltypes.NewInt32(1), - }}, - } + wantQueries = []*querypb.BoundQuery{{ + Sql: "select 1 from (select u2.id from `user` as u2 where u2.id = :u1_col) as t", + BindVariables: map[string]*querypb.BindVariable{"u1_col": sqltypes.Int32BindVariable(3)}, + }} + utils.MustMatch(t, wantQueries, sbc2.Queries) + + wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int32"), "1") if !result.Equal(wantResult) { t.Errorf("result: %+v, want %+v", result, wantResult) } } func TestSubQueryAndQueryWithLimit(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2704,8 +2850,8 @@ func TestSubQueryAndQueryWithLimit(t *testing.T) { }} result2 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2732,11 +2878,11 @@ func TestSubQueryAndQueryWithLimit(t *testing.T) { } func TestCrossShardSubqueryStream(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2745,23 +2891,22 @@ func TestCrossShardSubqueryStream(t *testing.T) { }}, }} sbc1.SetResults(result1) - result, err := executorStream(executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") + result, err := executorStream(ctx, executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where u1.id = 1", + Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - // We have to use string representation because bindvars type is too complex. - got := fmt.Sprintf("%+v", sbc2.Queries) - want := `[sql:"select u2.id from ` + "`user`" + ` as u2 where u2.id = :u1_col" bind_variables:{key:"u1_col" value:{type:INT32 value:"3"}}]` - if got != want { - t.Errorf("sbc2.Queries:\n%s, want\n%s\n", got, want) - } + wantQueries = []*querypb.BoundQuery{{ + Sql: "select 1 from (select u2.id from `user` as u2 where u2.id = :u1_col) as t", + BindVariables: map[string]*querypb.BindVariable{"u1_col": sqltypes.Int32BindVariable(3)}, + }} + utils.MustMatch(t, wantQueries, sbc2.Queries) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), @@ -2773,26 +2918,29 @@ func TestCrossShardSubqueryStream(t *testing.T) { } func TestCrossShardSubqueryGetFields(t *testing.T) { - executor, sbc1, _, sbclookup := createExecutorEnv() + executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }}) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "col", Type: sqltypes.Int32}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }} sbc1.SetResults(result1) - result, err := executorExec(executor, "select main1.col, t.id1 from main1 join (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "select main1.col, t.id1 from main1 join (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select u1.id as id1, u1.col from `user` as u1 where 1 != 1", + Sql: "select t.id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where 1 != 1) as t where 1 != 1", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "select u2.id from `user` as u2 where 1 != 1", + Sql: "select 1 from (select u2.id from `user` as u2 where 1 != 1) as t where 1 != 1", BindVariables: map[string]*querypb.BindVariable{ "u1_col": sqltypes.NullBindVariable, }, @@ -2801,8 +2949,8 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "col", Type: sqltypes.Int32}, - {Name: "id", Type: sqltypes.Int32}, + {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } if !result.Equal(wantResult) { @@ -2811,12 +2959,15 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { } func TestSelectBindvarswithPrepare(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - sql := "select id from user where id = :id" - _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{ + sql := "select id from `user` where id = :id" + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(1), }) require.NoError(t, err) @@ -2832,18 +2983,21 @@ func TestSelectBindvarswithPrepare(t *testing.T) { } func TestSelectDatabasePrepare(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "select database()" - _, err := executorPrepare(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorPrepare(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) } func TestSelectWithUnionAll(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) executor.normalize = true sql := "select id from user where id in (1, 2, 3) union all select id from user where id in (1, 2, 3)" bv, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3}) @@ -2879,7 +3033,10 @@ func TestSelectWithUnionAll(t *testing.T) { "vtg2": bv, }, }} - _, err := executorExec(executor, sql, map[string]*querypb.BindVariable{}) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2") @@ -2888,14 +3045,14 @@ func TestSelectWithUnionAll(t *testing.T) { sbc1.Queries = nil sbc2.Queries = nil - _, err = executorStream(executor, sql) + _, err = executorStream(ctx, executor, sql) require.NoError(t, err) utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "sbc1") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "sbc2") } func TestSelectLock(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := NewSafeSession(nil) session.Session.InTransaction = true session.ShardSessions = []*vtgatepb.Session_ShardSession{{ @@ -2954,6 +3111,8 @@ func TestSelectLock(t *testing.T) { } func TestLockReserve(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + // no connection should be reserved for these queries. tcases := []string{ "select is_free_lock('lock name') from dual", @@ -2962,7 +3121,6 @@ func TestLockReserve(t *testing.T) { "select release_lock('lock name') from dual", } - executor, _, _, _ := createExecutorEnv() session := NewAutocommitSession(&vtgatepb.Session{}) for _, sql := range tcases { @@ -2981,7 +3139,7 @@ func TestLockReserve(t *testing.T) { } func TestSelectFromInformationSchema(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) session := NewSafeSession(nil) // check failure when trying to query two keyspaces @@ -2993,25 +3151,30 @@ func TestSelectFromInformationSchema(t *testing.T) { session.TargetString = "TestExecutor" _, err = exec(executor, session, "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()") require.NoError(t, err) - assert.Equal(t, sbc1.StringQueries(), []string{"select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()"}) + assert.Equal(t, []string{"select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()"}, + sbc1.StringQueries()) // `USE TestXBadSharding` and then query info_schema about TestExecutor - should target TestExecutor and not use the default keyspace sbc1.Queries = nil session.TargetString = "TestXBadSharding" _, err = exec(executor, session, "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'TestExecutor'") require.NoError(t, err) - assert.Equal(t, sbc1.StringQueries(), []string{"select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname"}) + assert.Equal(t, []string{"select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */"}, + sbc1.StringQueries()) } func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} count := 1 for _, shard := range shards { @@ -3023,11 +3186,17 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { count++ } - executor := NewExecutor(context.Background(), serv, cell, resolver, true, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + defer executor.Close() + // some sleep for all goroutines to start + time.Sleep(100 * time.Millisecond) before := runtime.NumGoroutine() query := "select id, col from user order by id limit 2" - gotResult, err := executorStream(executor, query) + gotResult, err := executorStream(ctx, executor, query) require.NoError(t, err) wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int32|int32"), "1|1", "2|2") @@ -3038,23 +3207,26 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } func TestSelectScatterFails(t *testing.T) { + ctx := utils.LeakCheckContext(t) + sess := &vtgatepb.Session{} cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox(KsTestSharded) s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for i, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "col1", Type: sqltypes.Int32}, - {Name: "col2", Type: sqltypes.Int32}, - {Name: "weight_string(col2)", Type: sqltypes.VarBinary}, + {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "col2", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "weight_string(col2)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -3065,40 +3237,41 @@ func TestSelectScatterFails(t *testing.T) { }}) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.allowScatter = false - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) - _, err := executorExecSession(executor, "select id from user", nil, sess) + _, err := executorExecSession(ctx, executor, "select id from `user`", nil, sess) require.Error(t, err) assert.Contains(t, err.Error(), "scatter") // Run the test again, to ensure it behaves the same for a cached query - _, err = executorExecSession(executor, "select id from user", nil, sess) + _, err = executorExecSession(ctx, executor, "select id from `user`", nil, sess) require.Error(t, err) assert.Contains(t, err.Error(), "scatter") - _, err = executorExecSession(executor, "select /*vt+ ALLOW_SCATTER */ id from user", nil, sess) + _, err = executorExecSession(ctx, executor, "select /*vt+ ALLOW_SCATTER */ id from user", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "begin", nil, sess) + _, err = executorExecSession(ctx, executor, "begin", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "commit", nil, sess) + _, err = executorExecSession(ctx, executor, "commit", nil, sess) require.NoError(t, err) - _, err = executorExecSession(executor, "savepoint a", nil, sess) + _, err = executorExecSession(ctx, executor, "savepoint a", nil, sess) require.NoError(t, err) } func TestGen4SelectStraightJoin(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select u.id from user u straight_join user2 u2 on u.id = u2.id" - _, err := executor.Execute(context.Background(), + _, err := executor.Execute(context.Background(), nil, "TestGen4SelectStraightJoin", session, query, map[string]*querypb.BindVariable{}, @@ -3121,21 +3294,17 @@ func TestGen4SelectStraightJoin(t *testing.T) { } func TestGen4MultiColumnVindexEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where cola = 1 and colb = 2" - _, err := executor.Execute(context.Background(), - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { - Sql: "select * from user_region where cola = :cola and colb = :colb", + Sql: "select * from user_region where cola = :cola /* INT64 */ and colb = :colb /* INT64 */", BindVariables: map[string]*querypb.BindVariable{ "cola": sqltypes.Int64BindVariable(1), "colb": sqltypes.Int64BindVariable(2), @@ -3148,15 +3317,11 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) { sbc1.Queries = nil query = "select * from user_region where cola = 17984 and colb = 1" - _, err = executor.Execute(context.Background(), - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { - Sql: "select * from user_region where cola = :cola and colb = :colb", + Sql: "select * from user_region where cola = :cola /* INT64 */ and colb = :colb /* INT64 */", BindVariables: map[string]*querypb.BindVariable{ "cola": sqltypes.Int64BindVariable(17984), "colb": sqltypes.Int64BindVariable(1), @@ -3168,17 +3333,13 @@ func TestGen4MultiColumnVindexEqual(t *testing.T) { } func TestGen4MultiColumnVindexIn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where cola IN (1,17984) and colb IN (2,3,4)" - _, err := executor.Execute(context.Background(), - "TestGen4MultiColumnVindex", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, "TestGen4MultiColumnVindex", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bv1, _ := sqltypes.BuildBindVariable([]int64{1}) bv2, _ := sqltypes.BuildBindVariable([]int64{17984}) @@ -3211,17 +3372,13 @@ func TestGen4MultiColumnVindexIn(t *testing.T) { } func TestGen4MultiColMixedColComparision(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where colb = 2 and cola IN (1,17984)" - _, err := executor.Execute(context.Background(), - "TestGen4MultiColMixedColComparision", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, "TestGen4MultiColMixedColComparision", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg1 := sqltypes.Int64BindVariable(2) bvtg2, _ := sqltypes.BuildBindVariable([]int64{1, 17984}) @@ -3229,7 +3386,7 @@ func TestGen4MultiColMixedColComparision(t *testing.T) { vals0sbc2, _ := sqltypes.BuildBindVariable([]int64{17984}) wantQueries := []*querypb.BoundQuery{ { - Sql: "select * from user_region where colb = :colb and cola in ::__vals0", + Sql: "select * from user_region where colb = :colb /* INT64 */ and cola in ::__vals0", BindVariables: map[string]*querypb.BindVariable{ "__vals0": vals0sbc1, "colb": bvtg1, @@ -3240,7 +3397,7 @@ func TestGen4MultiColMixedColComparision(t *testing.T) { utils.MustMatch(t, wantQueries, sbc1.Queries) wantQueries = []*querypb.BoundQuery{ { - Sql: "select * from user_region where colb = :colb and cola in ::__vals0", + Sql: "select * from user_region where colb = :colb /* INT64 */ and cola in ::__vals0", BindVariables: map[string]*querypb.BindVariable{ "__vals0": vals0sbc2, "colb": bvtg1, @@ -3252,22 +3409,18 @@ func TestGen4MultiColMixedColComparision(t *testing.T) { } func TestGen4MultiColBestVindexSel(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where colb = 2 and cola IN (1,17984) and cola = 1" - _, err := executor.Execute(context.Background(), - "TestGen4MultiColBestVindexSel", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, "TestGen4MultiColBestVindexSel", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg2, _ := sqltypes.BuildBindVariable([]int64{1, 17984}) wantQueries := []*querypb.BoundQuery{ { - Sql: "select * from user_region where colb = :colb and cola in ::vtg1 and cola = :cola", + Sql: "select * from user_region where colb = :colb /* INT64 */ and cola in ::vtg1 and cola = :cola /* INT64 */", BindVariables: map[string]*querypb.BindVariable{ "colb": sqltypes.Int64BindVariable(2), "vtg1": bvtg2, @@ -3282,17 +3435,13 @@ func TestGen4MultiColBestVindexSel(t *testing.T) { sbc1.Queries = nil query = "select * from user_region where colb in (10,20) and cola IN (1,17984) and cola = 1 and colb = 2" - _, err = executor.Execute(context.Background(), - "TestGen4MultiColBestVindexSel", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err = executor.Execute(context.Background(), nil, "TestGen4MultiColBestVindexSel", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) bvtg1, _ := sqltypes.BuildBindVariable([]int64{10, 20}) wantQueries = []*querypb.BoundQuery{ { - Sql: "select * from user_region where colb in ::vtg1 and cola in ::vtg2 and cola = :cola and colb = :colb", + Sql: "select * from user_region where colb in ::vtg1 and cola in ::vtg2 and cola = :cola /* INT64 */ and colb = :colb /* INT64 */", BindVariables: map[string]*querypb.BindVariable{ "vtg1": bvtg1, "vtg2": bvtg2, @@ -3306,21 +3455,17 @@ func TestGen4MultiColBestVindexSel(t *testing.T) { } func TestGen4MultiColMultiEqual(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) executor.normalize = true executor.pv = querypb.ExecuteOptions_Gen4 session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) query := "select * from user_region where (cola,colb) in ((17984,2),(17984,3))" - _, err := executor.Execute(context.Background(), - "TestGen4MultiColMultiEqual", - session, - query, map[string]*querypb.BindVariable{}, - ) + _, err := executor.Execute(context.Background(), nil, "TestGen4MultiColMultiEqual", session, query, map[string]*querypb.BindVariable{}) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { - Sql: "select * from user_region where (cola, colb) in ((:vtg1, :vtg2), (:vtg1, :vtg3))", + Sql: "select * from user_region where (cola, colb) in ((:vtg1 /* INT64 */, :vtg2 /* INT64 */), (:vtg1 /* INT64 */, :vtg3 /* INT64 */))", BindVariables: map[string]*querypb.BindVariable{ "vtg1": sqltypes.Int64BindVariable(17984), "vtg2": sqltypes.Int64BindVariable(2), @@ -3333,11 +3478,14 @@ func TestGen4MultiColMultiEqual(t *testing.T) { } func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from zip_detail" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3351,11 +3499,14 @@ func TestGen4SelectUnqualifiedReferenceTable(t *testing.T) { } func TestGen4SelectQualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := fmt.Sprintf("select * from %s.zip_detail", KsTestSharded) - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3369,11 +3520,14 @@ func TestGen4SelectQualifiedReferenceTable(t *testing.T) { } func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select * from user join zip_detail on user.zip_detail_id = zip_detail.id" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { @@ -3389,7 +3543,7 @@ func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { sbc2.Queries = nil query = "select * from simple join zip_detail on simple.zip_detail_id = zip_detail.id" - _, err = executorExec(executor, query, nil) + _, err = executorExec(ctx, executor, session, query, nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{ { @@ -3403,11 +3557,14 @@ func TestGen4JoinUnqualifiedReferenceTable(t *testing.T) { } func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) executor.pv = querypb.ExecuteOptions_Gen4 query := "select user.id from user join TestUnsharded.zip_detail on user.zip_detail_id = TestUnsharded.zip_detail.id" - _, err := executorExec(executor, query, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + _, err := executorExec(ctx, executor, session, query, nil) require.NoError(t, err) shardedWantQueries := []*querypb.BoundQuery{ @@ -3425,7 +3582,7 @@ func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { sbc2.Queries = nil query = "select simple.id from simple join TestExecutor.zip_detail on simple.zip_detail_id = TestExecutor.zip_detail.id" - _, err = executorExec(executor, query, nil) + _, err = executorExec(ctx, executor, session, query, nil) require.NoError(t, err) unshardedWantQueries := []*querypb.BoundQuery{ { @@ -3439,23 +3596,25 @@ func TestGen4CrossShardJoinQualifiedReferenceTable(t *testing.T) { } func TestRegionRange(t *testing.T) { - // Special setup: Don't use createExecutorEnv. + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "regioncell" ks := "TestExecutor" hc := discovery.NewFakeHealthCheck(nil) s := createSandbox(ks) s.ShardSpec = "-20-20a0-" s.VSchema = executorVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0", "20a0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3474,12 +3633,7 @@ func TestRegionRange(t *testing.T) { for _, tcase := range tcases { t.Run(strconv.Itoa(tcase.regionID), func(t *testing.T) { sql := fmt.Sprintf("select * from user_region where cola = %d", tcase.regionID) - _, err := executor.Execute( - context.Background(), - "TestRegionRange", - NewAutocommitSession(&vtgatepb.Session{}), - sql, - nil) + _, err := executor.Execute(context.Background(), nil, "TestRegionRange", NewAutocommitSession(&vtgatepb.Session{}), sql, nil) require.NoError(t, err) count := 0 for _, sbc := range conns { @@ -3492,6 +3646,8 @@ func TestRegionRange(t *testing.T) { } func TestMultiCol(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createLegacyExecutorEnv. cell := "multicol" ks := "TestMultiCol" @@ -3499,15 +3655,16 @@ func TestMultiCol(t *testing.T) { s := createSandbox(ks) s.ShardSpec = "-20-20a0-" s.VSchema = multiColVschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0", "20a0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3524,13 +3681,12 @@ func TestMultiCol(t *testing.T) { shards: []string{"20a0-"}, }} - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) for _, tcase := range tcases { t.Run(fmt.Sprintf("%d_%d_%d", tcase.cola, tcase.colb, tcase.colc), func(t *testing.T) { sql := fmt.Sprintf("select * from multicoltbl where cola = %d and colb = %d and colc = '%d'", tcase.cola, tcase.colb, tcase.colc) - _, err := executor.Execute(ctx, "TestMultiCol", session, sql, nil) + _, err := executor.Execute(ctx, nil, "TestMultiCol", session, sql, nil) require.NoError(t, err) var shards []string for _, sbc := range conns { @@ -3571,6 +3727,8 @@ var multiColVschema = ` ` func TestMultiColPartial(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createLegacyExecutorEnv. cell := "multicol" ks := "TestMultiCol" @@ -3578,15 +3736,16 @@ func TestMultiColPartial(t *testing.T) { s := createSandbox(ks) s.ShardSpec = "-20-20a0c0-" s.VSchema = multiColVschema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-20a0c0", "20a0c0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3612,13 +3771,12 @@ func TestMultiColPartial(t *testing.T) { shards: []string{"20a0c0-"}, }} - ctx := context.Background() session := NewAutocommitSession(&vtgatepb.Session{}) for _, tcase := range tcases { t.Run(tcase.where, func(t *testing.T) { sql := fmt.Sprintf("select * from multicoltbl where %s", tcase.where) - _, err := executor.Execute(ctx, "TestMultiCol", session, sql, nil) + _, err := executor.Execute(ctx, nil, "TestMultiCol", session, sql, nil) require.NoError(t, err) var shards []string for _, sbc := range conns { @@ -3633,20 +3791,25 @@ func TestMultiColPartial(t *testing.T) { } func TestSelectAggregationNoData(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) - createSandbox(KsTestSharded).VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3680,21 +3843,21 @@ func TestSelectAggregationNoData(t *testing.T) { { sql: `select col, count(*) from user group by col limit 2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)", "int64|int64")), - expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc limit :__upper_limit", + expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc", expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`, expRow: `[]`, }, { sql: `select count(*) from (select col1, col2 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2", "int64|int64")), - expSandboxQ: "select col1, col2 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64")), + expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(0)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 2) x group by col2`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col2)", "int64|int64|varbinary")), - expSandboxQ: "select col1, col2, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary")), + expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, expRow: `[]`, }, @@ -3706,7 +3869,10 @@ func TestSelectAggregationNoData(t *testing.T) { sbc.SetResults([]*sqltypes.Result{tc.sandboxRes}) sbc.Queries = nil } - qr, err := executorExec(executor, tc.sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + qr, err := executorExec(ctx, executor, session, tc.sql, nil) require.NoError(t, err) assert.Equal(t, tc.expField, fmt.Sprintf("%v", qr.Fields)) assert.Equal(t, tc.expRow, fmt.Sprintf("%v", qr.Rows)) @@ -3717,20 +3883,25 @@ func TestSelectAggregationNoData(t *testing.T) { } func TestSelectAggregationData(t *testing.T) { + ctx := utils.LeakCheckContext(t) + // Special setup: Don't use createExecutorEnv. cell := "aa" hc := discovery.NewFakeHealthCheck(nil) - createSandbox(KsTestSharded).VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} var conns []*sandboxconn.SandboxConn for _, shard := range shards { sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) conns = append(conns, sbc) } - executor := createExecutor(serv, cell, resolver) + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() executor.pv = querypb.ExecuteOptions_Gen4 tcases := []struct { @@ -3764,78 +3935,78 @@ func TestSelectAggregationData(t *testing.T) { { sql: `select col, count(*) from user group by col limit 2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col|count(*)|weight_string(col)", "int64|int64|varbinary"), "1|2|NULL", "2|1|NULL", "3|4|NULL"), - expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc limit :__upper_limit", + expSandboxQ: "select col, count(*), weight_string(col) from `user` group by col, weight_string(col) order by col asc", expField: `[name:"col" type:INT64 name:"count(*)" type:INT64]`, expRow: `[[INT64(1) INT64(16)] [INT64(2) INT64(8)]]`, }, { sql: `select count(*) from (select col1, col2 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2", "int64|int64"), "1|2", "2|1"), - expSandboxQ: "select col1, col2 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64"), "100|200|1", "200|300|1"), + expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 9) x group by col2`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col2)", "int64|int64|varbinary"), "3|1|NULL", "2|2|NULL"), - expSandboxQ: "select col1, col2, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary"), "100|3|1|NULL", "200|2|1|NULL"), + expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, - expRow: `[[INT64(1) INT64(8)] [INT64(2) INT64(1)]]`, + expRow: `[[INT64(2) INT64(4)] [INT64(3) INT64(5)]]`, }, { sql: `select count(col1) from (select id, col1 from user limit 2) x`, - sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col1", "int64|varchar"), "3|a", "2|b"), - expSandboxQ: "select id, col1 from `user` limit :__upper_limit", + sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col1", "int64|varchar"), "1|a", "2|b"), + expSandboxQ: "select id, col1 from (select id, col1 from `user`) as x limit :__upper_limit", expField: `[name:"count(col1)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select count(col1), col2 from (select col2, col1 from user limit 9) x group by col2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col2|col1|weight_string(col2)", "int64|varchar|varbinary"), "3|a|NULL", "2|b|NULL"), - expSandboxQ: "select col2, col1, weight_string(col2) from `user` order by col2 asc limit :__upper_limit", + expSandboxQ: "select col2, col1, weight_string(col2) from (select col2, col1 from `user`) as x limit :__upper_limit", expField: `[name:"count(col1)" type:INT64 name:"col2" type:INT64]`, - expRow: `[[INT64(8) INT64(2)] [INT64(1) INT64(3)]]`, + expRow: `[[INT64(4) INT64(2)] [INT64(5) INT64(3)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 9) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, - expRow: `[[VARCHAR("a") INT64(8)] [VARCHAR("b") INT64(0)]]`, + expRow: `[[VARCHAR("a") INT64(5)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 32) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "null|1|null", "null|null|null", "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, expRow: `[[NULL INT64(8)] [VARCHAR("a") INT64(8)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|3|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, expRow: `[[VARCHAR("a") DECIMAL(12)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|2|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, - expRow: `[[VARCHAR("a") DECIMAL(8)]]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, + expRow: `[[VARCHAR("a") FLOAT64(8)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|x|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, - expRow: `[[VARCHAR("a") DECIMAL(0)]]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, + expRow: `[[VARCHAR("a") FLOAT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|null|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from `user` order by col1 asc limit :__upper_limit", - expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, + expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, expRow: `[[VARCHAR("a") NULL]]`, }, } @@ -3846,7 +4017,10 @@ func TestSelectAggregationData(t *testing.T) { sbc.SetResults([]*sqltypes.Result{tc.sandboxRes}) sbc.Queries = nil } - qr, err := executorExec(executor, tc.sql, nil) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + qr, err := executorExec(ctx, executor, session, tc.sql, nil) require.NoError(t, err) assert.Equal(t, tc.expField, fmt.Sprintf("%v", qr.Fields)) assert.Equal(t, tc.expRow, fmt.Sprintf("%v", qr.Rows)) @@ -3856,18 +4030,54 @@ func TestSelectAggregationData(t *testing.T) { } } +func TestSelectAggregationRandom(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + cell := "aa" + hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) + shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} + var conns []*sandboxconn.SandboxConn + for _, shard := range shards { + sbc := hc.AddTestTablet(cell, shard, 1, KsTestSharded, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) + conns = append(conns, sbc) + + sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( + sqltypes.MakeTestFields("a|b", "int64|int64"), + "null|null", + )}) + } + + conns[0].SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( + sqltypes.MakeTestFields("a|b", "int64|int64"), + "10|1", + )}) + + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + executor.pv = querypb.ExecuteOptions_Gen4 + session := NewAutocommitSession(&vtgatepb.Session{}) + + rs, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ A.a, A.b, (A.a / A.b) as c from (select sum(a) as a, sum(b) as b from user) A", nil) + require.NoError(t, err) + assert.Equal(t, `[[DECIMAL(10) DECIMAL(1) DECIMAL(10.0000)]]`, fmt.Sprintf("%v", rs.Rows)) +} + func TestSelectHexAndBit(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - qr, err := executor.Execute(context.Background(), "TestSelectHexAndBit", session, - "select 0b1001, b'1001', 0x9, x'09'", nil) + qr, err := executor.Execute(context.Background(), nil, "TestSelectHexAndBit", session, "select 0b1001, b'1001', 0x9, x'09'", nil) require.NoError(t, err) require.Equal(t, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\t") VARBINARY("\t")]]`, fmt.Sprintf("%v", qr.Rows)) - qr, err = executor.Execute(context.Background(), "TestSelectHexAndBit", session, - "select 1 + 0b1001, 1 + b'1001', 1 + 0x9, 1 + x'09'", nil) + qr, err = executor.Execute(context.Background(), nil, "TestSelectHexAndBit", session, "select 1 + 0b1001, 1 + b'1001', 1 + 0x9, 1 + x'09'", nil) require.NoError(t, err) require.Equal(t, `[[UINT64(10) UINT64(10) UINT64(10) UINT64(10)]]`, fmt.Sprintf("%v", qr.Rows)) } @@ -3875,21 +4085,32 @@ func TestSelectHexAndBit(t *testing.T) { // TestSelectCFC tests validates that cfc vindex plan gets cached and same plan is getting reused. // This also validates that cache_size is able to calculate the cfc vindex plan size. func TestSelectCFC(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - for i := 1; i < 100; i++ { - _, err := executor.Execute(context.Background(), "TestSelectCFC", session, - "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil) - require.NoError(t, err) - assert.EqualValues(t, 1, executor.plans.Misses(), "missed count:") - assert.EqualValues(t, i-1, executor.plans.Hits(), "hit count:") + _, err := executor.Execute(context.Background(), nil, "TestSelectCFC", session, "select /*vt+ PLANNER=gen4 */ c2 from tbl_cfc where c1 like 'A%'", nil) + require.NoError(t, err) + + timeout := time.After(30 * time.Second) + for { + select { + case <-timeout: + t.Fatal("not able to cache a plan within 30 seconds.") + case <-time.After(5 * time.Millisecond): + // should be able to find cache entry before the timeout. + cacheItems := executor.debugCacheEntries() + for _, item := range cacheItems { + if strings.Contains(item.Original, "c2 from tbl_cfc where c1 like") { + return + } + } + } } } func TestSelectView(t *testing.T) { - executor, sbc, _, _ := createExecutorEnv() + executor, sbc, _, _, _ := createExecutorEnv(t) // add the view to local vschema err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id") require.NoError(t, err) @@ -3897,21 +4118,19 @@ func TestSelectView(t *testing.T) { executor.normalize = true session := NewAutocommitSession(&vtgatepb.Session{}) - _, err = executor.Execute(context.Background(), "TestSelectView", session, - "select * from user_details_view", nil) + _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, "select * from user_details_view", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc.Queries) sbc.Queries = nil - _, err = executor.Execute(context.Background(), "TestSelectView", session, - "select * from user_details_view where id = 2", nil) + _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, "select * from user_details_view where id = 2", nil) require.NoError(t, err) wantQueries = []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id = :id", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id = :id /* INT64 */ and `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(2), }, @@ -3919,13 +4138,12 @@ func TestSelectView(t *testing.T) { utils.MustMatch(t, wantQueries, sbc.Queries) sbc.Queries = nil - _, err = executor.Execute(context.Background(), "TestSelectView", session, - "select * from user_details_view where id in (1,2,3,4,5)", nil) + _, err = executor.Execute(context.Background(), nil, "TestSelectView", session, "select * from user_details_view where id in (1,2,3,4,5)", nil) require.NoError(t, err) bvtg1, _ := sqltypes.BuildBindVariable([]int64{1, 2, 3, 4, 5}) bvals, _ := sqltypes.BuildBindVariable([]int64{1, 2}) wantQueries = []*querypb.BoundQuery{{ - Sql: "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view where id in ::__vals", + Sql: "select user_details_view.id, user_details_view.col from (select `user`.id, user_extra.col from `user`, user_extra where `user`.id in ::__vals and `user`.id = user_extra.user_id) as user_details_view", BindVariables: map[string]*querypb.BindVariable{ "vtg1": bvtg1, "__vals": bvals, @@ -3938,3 +4156,38 @@ func TestMain(m *testing.M) { _flag.ParseFlagsForTest() os.Exit(m.Run()) } + +func TestStreamJoinQuery(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Special setup: Don't use createExecutorEnv. + cell := "aa" + hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) + s := createSandbox(KsTestSharded) + s.VSchema = executorVSchema + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) + shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} + for _, shard := range shards { + _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) + } + executor := createExecutor(ctx, serv, cell, resolver) + defer executor.Close() + + sql := "select u.foo, u.apa, ue.bar, ue.apa from user u join user_extra ue on u.foo = ue.bar" + result, err := executorStream(ctx, executor, sql) + require.NoError(t, err) + wantResult := &sqltypes.Result{ + Fields: append(sandboxconn.SingleRowResult.Fields, sandboxconn.SingleRowResult.Fields...), + } + wantRow := append(sandboxconn.StreamRowResult.Rows[0], sandboxconn.StreamRowResult.Rows[0]...) + for i := 0; i < 64; i++ { + wantResult.Rows = append(wantResult.Rows, wantRow) + } + require.Equal(t, len(wantResult.Rows), len(result.Rows)) + for idx := 0; idx < 64; idx++ { + utils.MustMatch(t, wantResult.Rows[idx], result.Rows[idx], "mismatched on: ", strconv.Itoa(idx)) + } +} diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 52f12354bc9..e71a41eeb7f 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -17,11 +17,10 @@ limitations under the License. package vtgate import ( - "context" "fmt" "testing" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" @@ -41,7 +40,7 @@ import ( ) func TestExecutorSet(t *testing.T) { - executorEnv, _, _, _ := createExecutorEnv() + executorEnv, _, _, _, ctx := createExecutorEnv(t) testcases := []struct { in string @@ -228,14 +227,14 @@ func TestExecutorSet(t *testing.T) { in: "set transaction isolation level serializable", out: &vtgatepb.Session{ Autocommit: true, - Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}, + Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}, }, }, { in: "set transaction read only", - out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, + out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, }, { in: "set transaction read write", - out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(mysql.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, + out: &vtgatepb.Session{Autocommit: true, Warnings: []*querypb.QueryWarning{{Code: uint32(sqlerror.ERNotSupportedYet), Message: "converted 'next transaction' scope to 'session' scope"}}}, }, { in: "set session transaction read write", out: &vtgatepb.Session{Autocommit: true}, @@ -270,7 +269,7 @@ func TestExecutorSet(t *testing.T) { for i, tcase := range testcases { t.Run(fmt.Sprintf("%d-%s", i, tcase.in), func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{Autocommit: true}) - _, err := executorEnv.Execute(context.Background(), "TestExecute", session, tcase.in, nil) + _, err := executorEnv.Execute(ctx, nil, "TestExecute", session, tcase.in, nil) if tcase.err == "" { require.NoError(t, err) utils.MustMatch(t, tcase.out, session.Session, "new executor") @@ -282,7 +281,7 @@ func TestExecutorSet(t *testing.T) { } func TestExecutorSetOp(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) sysVarSetEnabled = true returnResult := func(columnName, typ, value string) *sqltypes.Result { @@ -366,16 +365,13 @@ func TestExecutorSetOp(t *testing.T) { }} for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - session := NewAutocommitSession(primarySession) + session := NewAutocommitSession(&vtgatepb.Session{ + TargetString: "@primary", + }) session.TargetString = KsTestUnsharded session.EnableSystemSettings = !tcase.disallowResConn sbclookup.SetResults([]*sqltypes.Result{tcase.result}) - _, err := executor.Execute( - context.Background(), - "TestExecute", - session, - tcase.in, - nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, tcase.in, nil) require.NoError(t, err) utils.MustMatch(t, tcase.warning, session.Warnings, "") utils.MustMatch(t, tcase.sysVars, session.SystemVariables, "") @@ -384,64 +380,69 @@ func TestExecutorSetOp(t *testing.T) { } func TestExecutorSetMetadata(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) - set := "set @@vitess_metadata.app_keyspace_v1= '1'" - _, err := executor.Execute(context.Background(), "TestExecute", session, set, nil) - assert.Equalf(t, vtrpcpb.Code_PERMISSION_DENIED, vterrors.Code(err), "expected error %v, got error: %v", vtrpcpb.Code_PERMISSION_DENIED, err) + t.Run("Session 1", func(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) - vschemaacl.AuthorizedDDLUsers = "%" - defer func() { - vschemaacl.AuthorizedDDLUsers = "" - }() - - executor, _, _, _ = createExecutorEnv() - session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) - - set = "set @@vitess_metadata.app_keyspace_v1= '1'" - _, err = executor.Execute(context.Background(), "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) - - show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` - result, err := executor.Execute(context.Background(), "TestExecute", session, show, nil) - assert.NoError(t, err) - - want := "1" - got := result.Rows[0][1].ToString() - assert.Equalf(t, want, got, "want migrations %s, result %s", want, got) - - // Update metadata - set = "set @@vitess_metadata.app_keyspace_v2='2'" - _, err = executor.Execute(context.Background(), "TestExecute", session, set, nil) - assert.NoError(t, err, "%s error: %v", set, err) - - show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` - gotqr, err := executor.Execute(context.Background(), "TestExecute", session, show, nil) - assert.NoError(t, err) + set := "set @@vitess_metadata.app_keyspace_v1= '1'" + _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) + assert.Equalf(t, vtrpcpb.Code_PERMISSION_DENIED, vterrors.Code(err), "expected error %v, got error: %v", vtrpcpb.Code_PERMISSION_DENIED, err) + }) - wantqr := &sqltypes.Result{ - Fields: buildVarCharFields("Key", "Value"), - Rows: [][]sqltypes.Value{ - buildVarCharRow("app_keyspace_v1", "1"), - buildVarCharRow("app_keyspace_v2", "2"), - }, - RowsAffected: 2, - } + t.Run("Session 2", func(t *testing.T) { + vschemaacl.AuthorizedDDLUsers = "%" + defer func() { + vschemaacl.AuthorizedDDLUsers = "" + }() + + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) + + set := "set @@vitess_metadata.app_keyspace_v1= '1'" + _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) + assert.NoError(t, err, "%s error: %v", set, err) + + show := `show vitess_metadata variables like 'app\\_keyspace\\_v_'` + result, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) + assert.NoError(t, err) + + want := "1" + got := result.Rows[0][1].ToString() + assert.Equalf(t, want, got, "want migrations %s, result %s", want, got) + + // Update metadata + set = "set @@vitess_metadata.app_keyspace_v2='2'" + _, err = executor.Execute(ctx, nil, "TestExecute", session, set, nil) + assert.NoError(t, err, "%s error: %v", set, err) + + show = `show vitess_metadata variables like 'app\\_keyspace\\_v%'` + gotqr, err := executor.Execute(ctx, nil, "TestExecute", session, show, nil) + assert.NoError(t, err) + + wantqr := &sqltypes.Result{ + Fields: buildVarCharFields("Key", "Value"), + Rows: [][]sqltypes.Value{ + buildVarCharRow("app_keyspace_v1", "1"), + buildVarCharRow("app_keyspace_v2", "2"), + }, + RowsAffected: 2, + } - assert.Equal(t, wantqr.Fields, gotqr.Fields) - assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + assert.Equal(t, wantqr.Fields, gotqr.Fields) + assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) - show = "show vitess_metadata variables" - gotqr, err = executor.Execute(context.Background(), "TestExecute", session, show, nil) - require.NoError(t, err) + show = "show vitess_metadata variables" + gotqr, err = executor.Execute(ctx, nil, "TestExecute", session, show, nil) + require.NoError(t, err) - assert.Equal(t, wantqr.Fields, gotqr.Fields) - assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + assert.Equal(t, wantqr.Fields, gotqr.Fields) + assert.ElementsMatch(t, wantqr.Rows, gotqr.Rows) + }) } func TestPlanExecutorSetUDV(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) testcases := []struct { in string @@ -455,12 +456,12 @@ func TestPlanExecutorSetUDV(t *testing.T) { out: &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo"}, []any{2}), Autocommit: true}, }, { in: "set @foo = 2.1, @bar = 'baz'", - out: &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo", "bar"}, []any{sqltypes.DecimalFloat(2.1), "baz"}), Autocommit: true}, + out: &vtgatepb.Session{UserDefinedVariables: createMap([]string{"foo", "bar"}, []any{sqltypes.DecimalString("2.1"), "baz"}), Autocommit: true}, }} for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{Autocommit: true}) - _, err := executor.Execute(context.Background(), "TestExecute", session, tcase.in, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, tcase.in, nil) if err != nil { require.EqualError(t, err, tcase.err) } else { @@ -471,7 +472,7 @@ func TestPlanExecutorSetUDV(t *testing.T) { } func TestSetUDVFromTabletInput(t *testing.T) { - executor, sbc1, _, _ := createExecutorEnv() + executor, sbc1, _, _, ctx := createExecutorEnv(t) fields := sqltypes.MakeTestFields("some", "VARCHAR") sbc1.SetResults([]*sqltypes.Result{ @@ -481,15 +482,12 @@ func TestSetUDVFromTabletInput(t *testing.T) { ), }) - primarySession.TargetString = "TestExecutor" - defer func() { - primarySession.TargetString = "" - }() - _, err := executorExec(executor, "set @foo = concat('a','b','c')", nil) + session := &vtgatepb.Session{TargetString: "TestExecutor"} + _, err := executorExec(ctx, executor, session, "set @foo = concat('a','b','c')", nil) require.NoError(t, err) want := map[string]*querypb.BindVariable{"foo": sqltypes.StringBindVariable("abc")} - utils.MustMatch(t, want, primarySession.UserDefinedVariables, "") + utils.MustMatch(t, want, session.UserDefinedVariables, "") } func createMap(keys []string, values []any) map[string]*querypb.BindVariable { @@ -505,7 +503,7 @@ func createMap(keys []string, values []any) map[string]*querypb.BindVariable { } func TestSetVar(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) executor.normalize = true oldVersion := sqlparser.GetParserVersion() @@ -519,7 +517,7 @@ func TestSetVar(t *testing.T) { sqltypes.MakeTestFields("orig|new", "varchar|varchar"), "|only_full_group_by")}) - _, err := executor.Execute(context.Background(), "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) tcases := []struct { @@ -541,7 +539,7 @@ func TestSetVar(t *testing.T) { // reset reserved conn need. session.SetReservedConn(false) - _, err = executor.Execute(context.Background(), "TestSetVar", session, tc.sql, map[string]*querypb.BindVariable{}) + _, err = executor.Execute(ctx, nil, "TestSetVar", session, tc.sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assert.Equal(t, tc.rc, session.InReservedConn()) }) @@ -549,7 +547,7 @@ func TestSetVar(t *testing.T) { } func TestSetVarShowVariables(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) executor.normalize = true oldVersion := sqlparser.GetParserVersion() @@ -567,18 +565,18 @@ func TestSetVarShowVariables(t *testing.T) { sqltypes.MakeTestResult(sqltypes.MakeTestFields("Variable_name|Value", "varchar|varchar"), "sql_mode|ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE")}) - _, err := executor.Execute(context.Background(), "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, "TestSetVar", session, "set @@sql_mode = only_full_group_by", map[string]*querypb.BindVariable{}) require.NoError(t, err) // this should return the updated value of sql_mode. - qr, err := executor.Execute(context.Background(), "TestSetVar", session, "show variables like 'sql_mode'", map[string]*querypb.BindVariable{}) + qr, err := executor.Execute(ctx, nil, "TestSetVar", session, "show variables like 'sql_mode'", map[string]*querypb.BindVariable{}) require.NoError(t, err) assert.False(t, session.InReservedConn(), "reserved connection should not be used") assert.Equal(t, `[[VARCHAR("sql_mode") VARCHAR("only_full_group_by")]]`, fmt.Sprintf("%v", qr.Rows)) } func TestExecutorSetAndSelect(t *testing.T) { - e, _, _, sbc := createExecutorEnv() + e, _, _, sbc, ctx := createExecutorEnv(t) e.normalize = true testcases := []struct { @@ -614,7 +612,7 @@ func TestExecutorSetAndSelect(t *testing.T) { sqltypes.MakeTestResult(nil)}) // third one for new set query setQ := fmt.Sprintf("set %s = '%s'", tcase.sysVar, tcase.val) - _, err := e.Execute(context.Background(), "TestExecutorSetAndSelect", session, setQ, nil) + _, err := e.Execute(ctx, nil, "TestExecutorSetAndSelect", session, setQ, nil) require.NoError(t, err) } @@ -622,7 +620,7 @@ func TestExecutorSetAndSelect(t *testing.T) { // if the query reaches the shard, it will return REPEATABLE-READ isolation level. sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields(tcase.sysVar, "varchar"), "REPEATABLE-READ")}) - qr, err := e.Execute(context.Background(), "TestExecutorSetAndSelect", session, selectQ, nil) + qr, err := e.Execute(ctx, nil, "TestExecutorSetAndSelect", session, selectQ, nil) require.NoError(t, err) assert.Equal(t, tcase.exp, fmt.Sprintf("%v", qr.Rows)) }) diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index 8fea4ed985f..e5c730eb157 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -17,17 +17,19 @@ limitations under the License. package vtgate import ( + "context" "testing" "time" + "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/vt/discovery" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "context" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -36,9 +38,9 @@ import ( ) func TestStreamSQLUnsharded(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, _, _, _, _ := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) sql := "stream * from user_msgs" result, err := executorStreamMessages(executor, sql) @@ -50,18 +52,26 @@ func TestStreamSQLUnsharded(t *testing.T) { } func TestStreamSQLSharded(t *testing.T) { + ctx := utils.LeakCheckContext(t) cell := "aa" hc := discovery.NewFakeHealthCheck(nil) + u := createSandbox(KsTestUnsharded) s := createSandbox("TestExecutor") s.VSchema = executorVSchema - getSandbox(KsTestUnsharded).VSchema = unshardedVSchema - serv := newSandboxForCells([]string{cell}) - resolver := newTestResolver(hc, serv, cell) + u.VSchema = unshardedVSchema + serv := newSandboxForCells(ctx, []string{cell}) + resolver := newTestResolver(ctx, hc, serv, cell) shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} for _, shard := range shards { _ = hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_PRIMARY, true, 1, nil) } - executor := NewExecutor(context.Background(), serv, cell, resolver, false, false, testBufferSize, cache.DefaultConfig, nil, false, querypb.ExecuteOptions_V3) + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + plans := DefaultPlanCache() + + executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4) + executor.SetQueryLogger(queryLogger) + + defer executor.Close() sql := "stream * from sharded_user_msgs" result, err := executorStreamMessages(executor, sql) @@ -88,10 +98,12 @@ func executorStreamMessages(executor *Executor, sql string) (qr *sqltypes.Result results := make(chan *sqltypes.Result, 100) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() + session := &vtgatepb.Session{TargetString: "@primary"} err = executor.StreamExecute( ctx, + nil, "TestExecuteStream", - NewSafeSession(primarySession), + NewSafeSession(session), sql, nil, func(qr *sqltypes.Result) error { diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 388f84d8c91..2ab45f1ef42 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -20,50 +20,50 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" - "html/template" "net/http" "net/http/httptest" "reflect" "sort" "strings" "testing" - - "vitess.io/vitess/go/vt/vtgate/logstats" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/cache" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/vtgate/engine" - - "vitess.io/vitess/go/vt/topo" + "time" "github.com/google/go-cmp/cmp" + "github.com/google/safehtml/template" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/vtgate/vschemaacl" - + "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtgate/buffer" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vtgate/vschemaacl" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) func TestExecutorResultsExceeded(t *testing.T) { + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + save := warnMemoryRows warnMemoryRows = 3 defer func() { warnMemoryRows = save }() - executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) initial := warnings.Counts()["ResultsExceeded"] @@ -72,21 +72,22 @@ func TestExecutorResultsExceeded(t *testing.T) { result2 := sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "3", "4") sbclookup.SetResults([]*sqltypes.Result{result1, result2}) - _, err := executor.Execute(ctx, "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err := executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) assert.Equal(t, initial, warnings.Counts()["ResultsExceeded"], "warnings count") - _, err = executor.Execute(ctx, "TestExecutorResultsExceeded", session, "select * from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecutorResultsExceeded", session, "select * from main1", nil) require.NoError(t, err) assert.Equal(t, initial+1, warnings.Counts()["ResultsExceeded"], "warnings count") } func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + save := maxMemoryRows maxMemoryRows = 3 defer func() { maxMemoryRows = save }() - executor, _, _, sbclookup := createExecutorEnv() session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) result := sqltypes.MakeTestResult(sqltypes.MakeTestFields("col", "int64"), "1", "2", "3", "4") fn := func(r *sqltypes.Result) error { @@ -105,7 +106,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { stmt, err := sqlparser.Parse(test.query) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) + _, err = executor.Execute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) if sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt) { require.NoError(t, err, "no error when DirectiveIgnoreMaxMemoryRows is provided") } else { @@ -113,36 +114,37 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { } sbclookup.SetResults([]*sqltypes.Result{result}) - err = executor.StreamExecute(ctx, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil, fn) + err = executor.StreamExecute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil, fn) require.NoError(t, err, "maxMemoryRows limit does not apply to StreamExecute") } } func TestExecutorTransactionsNoAutoCommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // begin. - _, err := executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{InTransaction: true, TargetString: "@primary", SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 0, sbclookup.CommitCount.Load(), "commit count") - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, 0, logStats.CommitTime, "logstats: expected zero CommitTime") assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime, "logstats: expected zero CommitTime") assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - _, err = executor.Execute(context.Background(), "TestExecute", session, "commit", nil) + _, err = executor.Execute(context.Background(), nil, "TestExecute", session, "commit", nil) if err != nil { t.Fatal(err) } @@ -153,25 +155,25 @@ func TestExecutorTransactionsNoAutoCommit(t *testing.T) { if commitCount := sbclookup.CommitCount.Load(); commitCount != 1 { t.Errorf("want 1, got %d", commitCount) } - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) if logStats.CommitTime == 0 { t.Errorf("logstats: expected non-zero CommitTime") } assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.RollbackCount.Load(), "rollback count") - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - _ = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) - logStats = testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 1) if logStats.CommitTime == 0 { t.Errorf("logstats: expected non-zero CommitTime") } @@ -187,12 +189,13 @@ func TestExecutorTransactionsNoAutoCommit(t *testing.T) { // Prevent use of non-primary if in_transaction is on. session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary", InTransaction: true}) - _, err = executor.Execute(ctx, "TestExecute", session, "use @replica", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "use @replica", nil) require.EqualError(t, err, `can't execute the given command because you have an active transaction`) } func TestDirectTargetRewrites(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + executor.normalize = true session := &vtgatepb.Session{ @@ -202,7 +205,7 @@ func TestDirectTargetRewrites(t *testing.T) { } sql := "select database()" - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) assertQueries(t, sbclookup, []*querypb.BoundQuery{{ Sql: "select :__vtdbname as `database()` from dual", @@ -211,59 +214,61 @@ func TestDirectTargetRewrites(t *testing.T) { } func TestExecutorTransactionsAutoCommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // begin. - _, err := executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{InTransaction: true, TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") if commitCount := sbclookup.CommitCount.Load(); commitCount != 0 { t.Errorf("want 0, got %d", commitCount) } - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.CommitCount.Load()) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) assert.NotEqual(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") if rollbackCount := sbclookup.RollbackCount.Load(); rollbackCount != 1 { t.Errorf("want 1, got %d", rollbackCount) } - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - _ = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) - logStats = testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 1) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") } func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + oltpOptions := &querypb.ExecuteOptions{Workload: querypb.ExecuteOptions_OLTP} session := NewSafeSession(&vtgatepb.Session{ TargetString: "@primary", @@ -272,13 +277,13 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { SessionUUID: "suuid", }) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) var results []*sqltypes.Result // begin. - err := executor.StreamExecute(ctx, "TestExecute", session, "begin", nil, func(result *sqltypes.Result) error { + err := executor.StreamExecute(ctx, nil, "TestExecute", session, "begin", nil, func(result *sqltypes.Result) error { results = append(results, result) return nil }) @@ -296,31 +301,31 @@ func TestExecutorTransactionsAutoCommitStreaming(t *testing.T) { } utils.MustMatch(t, wantSession, session.Session, "session") assert.Zero(t, sbclookup.CommitCount.Load()) - logStats := testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // commit. - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, Options: oltpOptions, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") assert.EqualValues(t, 1, sbclookup.CommitCount.Load()) - logStats = testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) assert.EqualValues(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") - logStats = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) assert.NotEqual(t, 0, logStats.CommitTime) assert.EqualValues(t, "suuid", logStats.SessionUUID, "logstats: expected non-empty SessionUUID") // rollback. - _, err = executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "rollback", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{TargetString: "@primary", Autocommit: true, Options: oltpOptions, SessionUUID: "suuid"} utils.MustMatch(t, wantSession, session.Session, "session") @@ -333,49 +338,50 @@ func TestExecutorDeleteMetadata(t *testing.T) { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) set := "set @@vitess_metadata.app_v1= '1'" - _, err := executor.Execute(ctx, "TestExecute", session, set, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, set, nil) assert.NoError(t, err, "%s error: %v", set, err) show := `show vitess_metadata variables like 'app\\_%'` - result, _ := executor.Execute(ctx, "TestExecute", session, show, nil) + result, _ := executor.Execute(ctx, nil, "TestExecute", session, show, nil) assert.Len(t, result.Rows, 1) // Fails if deleting key that doesn't exist delQuery := "set @@vitess_metadata.doesn't_exist=''" - _, err = executor.Execute(ctx, "TestExecute", session, delQuery, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, delQuery, nil) assert.True(t, topo.IsErrType(err, topo.NoNode)) // Delete existing key, show should fail given the node doesn't exist delQuery = "set @@vitess_metadata.app_v1=''" - _, err = executor.Execute(ctx, "TestExecute", session, delQuery, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, delQuery, nil) assert.NoError(t, err) show = `show vitess_metadata variables like 'app\\_%'` - _, err = executor.Execute(ctx, "TestExecute", session, show, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, show, nil) assert.True(t, topo.IsErrType(err, topo.NoNode)) } func TestExecutorAutocommit(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) // autocommit = 0 startCount := sbclookup.CommitCount.Load() - _, err := executor.Execute(ctx, "TestExecute", session, "select id from main1", nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, "select id from main1", nil) require.NoError(t, err) wantSession := &vtgatepb.Session{TargetString: "@primary", InTransaction: true, FoundRows: 1, RowCount: -1} - testSession := proto.Clone(session.Session).(*vtgatepb.Session) + testSession := session.Session.CloneVT() testSession.ShardSessions = nil utils.MustMatch(t, wantSession, testSession, "session does not match for autocommit=0") - logStats := testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from main1", 1) + logStats := testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from main1", 1) if logStats.CommitTime != 0 { t.Errorf("logstats: expected zero CommitTime") } @@ -384,42 +390,42 @@ func TestExecutorAutocommit(t *testing.T) { } // autocommit = 1 - _, err = executor.Execute(ctx, "TestExecute", session, "set autocommit=1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "set autocommit=1", nil) require.NoError(t, err) - _ = testQueryLog(t, logChan, "TestExecute", "SET", "set @@autocommit = 1", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "SET", "set @@autocommit = 1", 0) // Setting autocommit=1 commits existing transaction. if got, want := sbclookup.CommitCount.Load(), startCount+1; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _, err = executor.Execute(ctx, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary", FoundRows: 0, RowCount: 1} utils.MustMatch(t, wantSession, session.Session, "session does not match for autocommit=1") - logStats = testQueryLog(t, logChan, "TestExecute", "UPDATE", "update main1 set id=1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) assert.NotZero(t, logStats.CommitTime, "logstats: expected non-zero CommitTime") assert.NotEqual(t, uint64(0), logStats.RowsAffected, "logstats: expected non-zero RowsAffected") // autocommit = 1, "begin" session.ResetTx() startCount = sbclookup.CommitCount.Load() - _, err = executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _ = testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) + _ = testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) - _, err = executor.Execute(ctx, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{InTransaction: true, Autocommit: true, TargetString: "@primary", FoundRows: 0, RowCount: 1} - testSession = proto.Clone(session.Session).(*vtgatepb.Session) + testSession = session.Session.CloneVT() testSession.ShardSessions = nil utils.MustMatch(t, wantSession, testSession, "session does not match for autocommit=1") if got, want := sbclookup.CommitCount.Load(), startCount; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - logStats = testQueryLog(t, logChan, "TestExecute", "UPDATE", "update main1 set id=1", 1) + logStats = testQueryLog(t, executor, logChan, "TestExecute", "UPDATE", "update main1 set id = 1", 1) if logStats.CommitTime != 0 { t.Errorf("logstats: expected zero CommitTime") } @@ -427,7 +433,7 @@ func TestExecutorAutocommit(t *testing.T) { t.Errorf("logstats: expected non-zero RowsAffected") } - _, err = executor.Execute(ctx, "TestExecute", session, "commit", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "commit", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary"} if !proto.Equal(session.Session, wantSession) { @@ -436,19 +442,19 @@ func TestExecutorAutocommit(t *testing.T) { if got, want := sbclookup.CommitCount.Load(), startCount+1; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _ = testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 1) + _ = testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 1) // transition autocommit from 0 to 1 in the middle of a transaction. startCount = sbclookup.CommitCount.Load() session = NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) - _, err = executor.Execute(ctx, "TestExecute", session, "begin", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "begin", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "update main1 set id=1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "update main1 set id=1", nil) require.NoError(t, err) if got, want := sbclookup.CommitCount.Load(), startCount; got != want { t.Errorf("Commit count: %d, want %d", got, want) } - _, err = executor.Execute(ctx, "TestExecute", session, "set autocommit=1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "set autocommit=1", nil) require.NoError(t, err) wantSession = &vtgatepb.Session{Autocommit: true, TargetString: "@primary"} if !proto.Equal(session.Session, wantSession) { @@ -460,7 +466,8 @@ func TestExecutorAutocommit(t *testing.T) { } func TestExecutorShowColumns(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: ""}) queries := []string{ @@ -471,7 +478,7 @@ func TestExecutorShowColumns(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - _, err := executor.Execute(ctx, "TestExecute", session, query, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ @@ -511,38 +518,39 @@ func assertMatchesNoOrder(t *testing.T, expected, got string) { } func TestExecutorShow(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) for _, query := range []string{"show vitess_keyspaces", "show keyspaces"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", qr.Rows)) } for _, query := range []string{"show databases", "show DATABASES", "show schemas", "show SCHEMAS"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Showing default tables (5+4[default]) assertMatchesNoOrder(t, `[[VARCHAR("TestUnsharded")] [VARCHAR("TestMultiCol")] [VARCHAR("TestXBadVSchema")] [VARCHAR("TestXBadSharding")] [VARCHAR("TestExecutor")]] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]`, fmt.Sprintf("%v", qr.Rows)) } - _, err := executor.Execute(ctx, "TestExecute", session, "show variables", nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, "show variables", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "show collation", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show collation", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show collation where `Charset` = 'utf8' and `Collation` = 'utf8_bin'", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "use @primary", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "use @primary", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "show tables", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show tables", nil) assert.EqualError(t, err, errNoKeyspace.Error(), "'show tables' should fail without a keyspace") assert.Empty(t, sbclookup.Queries, "sbclookup unexpectedly has queries already") showResults := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "Tables_in_keyspace", Type: sqltypes.VarChar}, + {Name: "Tables_in_keyspace", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, }, RowsAffected: 1, InsertID: 0, @@ -553,7 +561,7 @@ func TestExecutorShow(t *testing.T) { sbclookup.SetResults([]*sqltypes.Result{showResults}) query := fmt.Sprintf("show tables from %v", KsTestUnsharded) - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) assert.Equal(t, 1, len(sbclookup.Queries), "Tablet should have received one 'show' query. Instead received: %v", sbclookup.Queries) @@ -565,129 +573,130 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, fmt.Sprintf("unexpected results running query: %s", query)) wantErrNoTable := "table unknown_table not found" - _, err = executor.Execute(ctx, "TestExecute", session, "show create table unknown_table", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table unknown_table", nil) assert.EqualErrorf(t, err, wantErrNoTable, "Got: %v. Want: %v", wantErrNoTable) // SHOW CREATE table using vschema to find keyspace. - _, err = executor.Execute(ctx, "TestExecute", session, "show create table user_seq", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table user_seq", nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery := "show create table user_seq" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW CREATE table with query-provided keyspace - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show create table %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show create table %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery = "show create table unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW KEYS with two different syntax - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql wantQuery = "show indexes from unknown" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEX with two different syntax - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show index from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show index from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show index from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW INDEXES with two different syntax - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show indexes from %v.unknown", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show indexes from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show indexes from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW EXTENDED {INDEX | INDEXES | KEYS} - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended index from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended index from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended indexes from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended indexes from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show extended keys from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show extended keys from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // Set desitation keyspace in session session.TargetString = KsTestUnsharded - _, err = executor.Execute(ctx, "TestExecute", session, "show create table unknown", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table unknown", nil) require.NoError(t, err) - _, err = executor.Execute(ctx, "TestExecute", session, "show full columns from table1", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show full columns from table1", nil) require.NoError(t, err) // Reset target string so other tests dont fail. session.TargetString = "@primary" - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show full columns from unknown from %v", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show full columns from unknown from %v", KsTestUnsharded), nil) require.NoError(t, err) - for _, query := range []string{"show charset", "show character set"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like 'utf8%'", "show character set like 'utf8%'"} { + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), }, } utils.MustMatch(t, wantqr, qr, query) } - for _, query := range []string{"show charset like '%foo'", "show character set like 'foo%'", "show charset like 'foo%'", "show character set where foo like 'utf8'", "show character set where charset like '%foo'", "show charset where charset = '%foo'"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like '%foo'", "show character set like 'foo%'", "show charset like 'foo%'", "show character set where charset like '%foo'", "show charset where charset = '%foo'"} { + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}), - Rows: [][]sqltypes.Value{}, + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), RowsAffected: 0, } utils.MustMatch(t, wantqr, qr, query) } - for _, query := range []string{"show charset like 'utf8'", "show character set like 'utf8'", "show charset where charset = 'utf8'", "show character set where charset = 'utf8'"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + for _, query := range []string{"show charset like 'utf8mb3'", "show character set like 'utf8mb3'", "show charset where charset = 'utf8mb3'", "show character set where charset = 'utf8mb3'"} { + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), }, } @@ -695,23 +704,28 @@ func TestExecutorShow(t *testing.T) { } for _, query := range []string{"show charset like 'utf8mb4'", "show character set like 'utf8mb4'", "show charset where charset = 'utf8mb4'", "show character set where charset = 'utf8mb4'"} { - qr, err := executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr := &sqltypes.Result{ - Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32}), + Fields: append(buildVarCharFields("Charset", "Description", "Default collation"), &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)}), Rows: [][]sqltypes.Value{ append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), }, } utils.MustMatch(t, wantqr, qr, query) } + for _, query := range []string{"show character set where foo like '%foo'"} { + _, err := executor.Execute(ctx, nil, "TestExecute", session, query, nil) + require.Error(t, err) + } + query = "show engines" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints"), @@ -728,7 +742,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show plugins" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Name", "Status", "Type", "Library", "License"), @@ -744,12 +758,12 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) for _, sql := range []string{"show session status", "show session status like 'Ssl_cipher'"} { - qr, err = executor.Execute(ctx, "TestExecute", session, sql, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, sql, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "value", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "value", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, }, Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1), sqltypes.NewVarChar("foo")}, @@ -760,11 +774,11 @@ func TestExecutorShow(t *testing.T) { } // Test SHOW FULL COLUMNS FROM where query has a qualifier - _, err = executor.Execute(ctx, "TestExecute", session, fmt.Sprintf("show full columns from %v.table1", KsTestUnsharded), nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show full columns from %v.table1", KsTestUnsharded), nil) require.NoError(t, err) query = "show vitess_shards" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -779,7 +793,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_shards like 'TestExecutor/%'" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -794,7 +808,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_shards like 'TestExec%/%'" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. @@ -809,7 +823,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_replication_status" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) qr.Rows = [][]sqltypes.Value{} wantqr = &sqltypes.Result{ @@ -818,7 +832,7 @@ func TestExecutorShow(t *testing.T) { } utils.MustMatch(t, wantqr, qr, query) query = "show vitess_replication_status like 'x'" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) qr.Rows = [][]sqltypes.Value{} wantqr = &sqltypes.Result{ @@ -828,7 +842,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vitess_tablets" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. qr.Rows = [][]sqltypes.Value{qr.Rows[0], qr.Rows[len(qr.Rows)-1]} @@ -836,13 +850,13 @@ func TestExecutorShow(t *testing.T) { Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), Rows: [][]sqltypes.Value{ buildVarCharRow("aa", "TestExecutor", "-20", "PRIMARY", "SERVING", "aa-0000000001", "-20", "1970-01-01T00:00:01Z"), - buildVarCharRow("aa", "TestXBadVSchema", "-20", "PRIMARY", "SERVING", "aa-0000000009", "random", "1970-01-01T00:00:01Z"), + buildVarCharRow("aa", "TestUnsharded", "0", "REPLICA", "SERVING", "aa-0000000010", "2", "1970-01-01T00:00:01Z"), }, } utils.MustMatch(t, wantqr, qr, query) query = "show vitess_tablets like 'x'" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), @@ -851,7 +865,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, fmt.Sprintf("%q should be empty", query)) query = "show vitess_tablets like '-20%'" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Cell", "Keyspace", "Shard", "TabletType", "State", "Alias", "Hostname", "PrimaryTermStartTime"), @@ -862,7 +876,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Keyspace", "Name", "Type", "Params", "Owner"), @@ -892,7 +906,7 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on TestExecutor.user" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -904,18 +918,18 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on user" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) wantErr := errNoKeyspace.Error() assert.EqualError(t, err, wantErr, query) query = "show vschema vindexes on TestExecutor.garbage" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show vschema vindexes on user" session.TargetString = "TestExecutor" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -928,7 +942,7 @@ func TestExecutorShow(t *testing.T) { query = "show vschema vindexes on user2" session.TargetString = "TestExecutor" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -940,18 +954,18 @@ func TestExecutorShow(t *testing.T) { utils.MustMatch(t, wantqr, qr, query) query = "show vschema vindexes on garbage" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) wantErr = "VT05005: table 'garbage' does not exist in keyspace 'TestExecutor'" assert.EqualError(t, err, wantErr, query) query = "show warnings" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "Level", Type: sqltypes.VarChar}, - {Name: "Code", Type: sqltypes.Uint16}, - {Name: "Message", Type: sqltypes.VarChar}, + {Name: "Level", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, + {Name: "Code", Type: sqltypes.Uint16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Message", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, }, Rows: [][]sqltypes.Value{}, } @@ -959,13 +973,13 @@ func TestExecutorShow(t *testing.T) { query = "show warnings" session.Warnings = []*querypb.QueryWarning{} - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "Level", Type: sqltypes.VarChar}, - {Name: "Code", Type: sqltypes.Uint16}, - {Name: "Message", Type: sqltypes.VarChar}, + {Name: "Level", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, + {Name: "Code", Type: sqltypes.Uint16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Message", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, }, Rows: [][]sqltypes.Value{}, } @@ -973,29 +987,29 @@ func TestExecutorShow(t *testing.T) { query = "show warnings" session.Warnings = []*querypb.QueryWarning{ - {Code: uint32(mysql.ERBadTable), Message: "bad table"}, - {Code: uint32(mysql.EROutOfResources), Message: "ks/-40: query timed out"}, + {Code: uint32(sqlerror.ERBadTable), Message: "bad table"}, + {Code: uint32(sqlerror.EROutOfResources), Message: "ks/-40: query timed out"}, } - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "Level", Type: sqltypes.VarChar}, - {Name: "Code", Type: sqltypes.Uint16}, - {Name: "Message", Type: sqltypes.VarChar}, + {Name: "Level", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, + {Name: "Code", Type: sqltypes.Uint16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Message", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, }, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(mysql.ERBadTable)), sqltypes.NewVarChar("bad table")}, - {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(mysql.EROutOfResources)), sqltypes.NewVarChar("ks/-40: query timed out")}, + {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(sqlerror.ERBadTable)), sqltypes.NewVarChar("bad table")}, + {sqltypes.NewVarChar("Warning"), sqltypes.NewUint32(uint32(sqlerror.EROutOfResources)), sqltypes.NewVarChar("ks/-40: query timed out")}, }, } utils.MustMatch(t, wantqr, qr, query) // Make sure it still works when one of the keyspaces is in a bad state - getSandbox("TestExecutor").SrvKeyspaceMustFail++ + getSandbox(KsTestSharded).SrvKeyspaceMustFail++ query = "show vitess_shards" - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) // Just test for first & last. qr.Rows = [][]sqltypes.Value{qr.Rows[0], qr.Rows[len(qr.Rows)-1]} @@ -1010,7 +1024,7 @@ func TestExecutorShow(t *testing.T) { query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) - qr, err = executor.Execute(ctx, "TestExecute", session, query, nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Tables"), @@ -1036,34 +1050,35 @@ func TestExecutorShow(t *testing.T) { query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{}) - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) want = errNoKeyspace.Error() assert.EqualError(t, err, want, query) query = "show 10" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) want = "syntax error at position 8 near '10'" assert.EqualError(t, err, want, query) query = "show vschema tables" session = NewSafeSession(&vtgatepb.Session{TargetString: "no_such_keyspace"}) - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'no_such_keyspace' in vschema" assert.EqualError(t, err, want, query) query = "show vitess_migrations" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'no_such_keyspace' in vschema" assert.EqualError(t, err, want, query) query = "show vitess_migrations from ks like '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90'" - _, err = executor.Execute(ctx, "TestExecute", session, query, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, query, nil) want = "VT05003: unknown database 'ks' in vschema" assert.EqualError(t, err, want, query) } func TestExecutorShowTargeted(t *testing.T) { - executor, _, sbc2, _ := createExecutorEnv() + executor, _, sbc2, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor/40-60"}) queries := []string{ @@ -1080,7 +1095,7 @@ func TestExecutorShowTargeted(t *testing.T) { } for _, sql := range queries { - _, err := executor.Execute(ctx, "TestExecutorShowTargeted", session, sql, nil) + _, err := executor.Execute(ctx, nil, "TestExecutorShowTargeted", session, sql, nil) require.NoError(t, err) assert.NotZero(t, len(sbc2.Queries), "Tablet should have received 'show' query") lastQuery := sbc2.Queries[len(sbc2.Queries)-1].Sql @@ -1089,7 +1104,8 @@ func TestExecutorShowTargeted(t *testing.T) { } func TestExecutorUse(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary"}) stmts := []string{ @@ -1101,7 +1117,7 @@ func TestExecutorUse(t *testing.T) { "TestExecutor:-80@primary", } for i, stmt := range stmts { - _, err := executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) if err != nil { t.Error(err) } @@ -1109,18 +1125,18 @@ func TestExecutorUse(t *testing.T) { utils.MustMatch(t, wantSession, session.Session, "session does not match") } - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use 1", nil) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use 1", nil) wantErr := "syntax error at position 6 near '1'" if err == nil || err.Error() != wantErr { t.Errorf("got: %v, want %v", err, wantErr) } - _, err = executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "use UnexistentKeyspace", nil) require.EqualError(t, err, "VT05003: unknown database 'UnexistentKeyspace' in vschema") } func TestExecutorComment(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) stmts := []string{ "/*! SET autocommit=1*/", @@ -1129,7 +1145,7 @@ func TestExecutorComment(t *testing.T) { wantResult := &sqltypes.Result{} for _, stmt := range stmts { - gotResult, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) + gotResult, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) if err != nil { t.Error(err) } @@ -1140,7 +1156,7 @@ func TestExecutorComment(t *testing.T) { } func TestExecutorOther(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -1212,7 +1228,7 @@ func TestExecutorOther(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.Error(t, err, errNoKeyspace) } else if tc.hasDestinationShardErr { @@ -1232,10 +1248,10 @@ func TestExecutorOther(t *testing.T) { } func TestExecutorDDL(t *testing.T) { - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) type cnts struct { Sbc1Cnt int64 @@ -1306,7 +1322,7 @@ func TestExecutorDDL(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) stmtType := "DDL" - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail: %q", stmt) stmtType = "" // For error case, plan is not generated to query log will not contain any stmtType. @@ -1323,7 +1339,7 @@ func TestExecutorDDL(t *testing.T) { t.Errorf("stmt: %s\ntc: %+v\n-want,+got:\n%s", stmt, tc, diff) } - testQueryLog(t, logChan, "TestExecute", stmtType, stmt, tc.shardQueryCnt) + testQueryLog(t, executor, logChan, "TestExecute", stmtType, stmt, tc.shardQueryCnt) } } @@ -1344,19 +1360,19 @@ func TestExecutorDDL(t *testing.T) { sbc1.ExecCount.Store(0) sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: ""}), stmt.input, nil) if stmt.hasErr { require.EqualError(t, err, errNoKeyspace.Error(), "expect query to fail") - testQueryLog(t, logChan, "TestExecute", "", stmt.input, 0) + testQueryLog(t, executor, logChan, "TestExecute", "", stmt.input, 0) } else { require.NoError(t, err) - testQueryLog(t, logChan, "TestExecute", "DDL", stmt.input, 8) + testQueryLog(t, executor, logChan, "TestExecute", "DDL", stmt.input, 8) } } } func TestExecutorDDLFk(t *testing.T) { - executor, _, _, sbc := createExecutorEnv() + executor, _, _, sbc, ctx := createExecutorEnv(t) mName := "TestExecutorDDLFk" stmts := []string{ @@ -1369,7 +1385,7 @@ func TestExecutorDDLFk(t *testing.T) { t.Run(stmt+fkMode, func(t *testing.T) { sbc.ExecCount.Store(0) foreignKeyMode = fkMode - _, err := executor.Execute(ctx, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) + _, err := executor.Execute(ctx, nil, mName, NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), stmt, nil) if fkMode == "allow" { require.NoError(t, err) require.EqualValues(t, 1, sbc.ExecCount.Load()) @@ -1387,7 +1403,8 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2) @@ -1403,7 +1420,7 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { } stmt := "alter vschema create vindex TestExecutor.test_vindex using hash" - _, err := executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, "TestExecutor", "test_vindex", vschemaUpdates, executor) @@ -1415,7 +1432,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) @@ -1432,7 +1449,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema create vindex test_vindex using hash" - _, err := executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -1440,7 +1457,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { t.Errorf("updated vschema did not contain test_vindex") } - _, err = executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex already exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("create duplicate vindex: %v, want %s", err, wantErr) @@ -1456,7 +1473,7 @@ func TestExecutorCreateVindexDDL(t *testing.T) { // ksNew := "test_new_keyspace" session = NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt = "alter vschema create vindex test_vindex2 using hash" - _, err = executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) if err != nil { t.Fatalf("error in %s: %v", stmt, err) } @@ -1485,7 +1502,7 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) @@ -1507,19 +1524,19 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add table test_table" - _, err := executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table"}, vschemaTables...), executor) stmt = "alter vschema add table test_table2" - _, err = executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor) // Should fail adding a table on a sharded keyspace session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) stmt = "alter vschema add table test_table" - _, err = executor.Execute(ctx, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "add vschema table: unsupported on sharded keyspace TestExecutor") // No queries should have gone to any tablets @@ -1533,7 +1550,8 @@ func TestExecutorAddDropVschemaTableDDL(t *testing.T) { } func TestExecutorVindexDDLACL(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) @@ -1542,21 +1560,21 @@ func TestExecutorVindexDDLACL(t *testing.T) { // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - _, err := executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled vschemaacl.AuthorizedDDLUsers = "%" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } stmt = "alter vschema create vindex test_hash2 using hash" - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -1564,11 +1582,11 @@ func TestExecutorVindexDDLACL(t *testing.T) { // test when only one user is enabled vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) stmt = "alter vschema create vindex test_hash3 using hash" - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -1578,16 +1596,16 @@ func TestExecutorVindexDDLACL(t *testing.T) { } func TestExecutorUnrecognized(t *testing.T) { - executor, _, _, _ := createExecutorEnv() - _, err := executor.Execute(ctx, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil) + executor, _, _, _, ctx := createExecutorEnv(t) + + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{}), "invalid statement", nil) require.Error(t, err, "unrecognized statement: invalid statement'") } // TestVSchemaStats makes sure the building and displaying of the // VSchemaStats works. func TestVSchemaStats(t *testing.T) { - r, _, _, _ := createExecutorEnv() - + r, _, _, _, _ := createExecutorEnv(t) stats := r.VSchemaStats() templ := template.New("") @@ -1609,224 +1627,271 @@ func TestVSchemaStats(t *testing.T) { var pv = querypb.ExecuteOptions_Gen4 func TestGetPlanUnnormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, ctx := createExecutorEnv(t) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" - plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) wantSQL := query1 + " /* comment */" if logStats1.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) } - plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan2, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 != plan2 { t.Errorf("getPlan(query1): plans must be equal: %p %p", plan1, plan2) } - want := []string{ - "@unknown:" + query1, - } - assertCacheContains(t, r, want) + assertCacheContains(t, r, emptyvc, query1) if logStats2.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) } - plan3, logStats3 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats3 := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan1 == plan3 { t.Errorf("getPlan(query1, ks): plans must not be equal: %p %p", plan1, plan3) } if logStats3.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats3.SQL) } - plan4, logStats4 := getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + plan4, logStats4 := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) if plan3 != plan4 { t.Errorf("getPlan(query1, ks): plans must be equal: %p %p", plan3, plan4) } - want = []string{ - KsTestUnsharded + "@unknown:" + query1, - "@unknown:" + query1, - } - assertCacheContains(t, r, want) + assertCacheContains(t, r, emptyvc, query1) + assertCacheContains(t, r, unshardedvc, query1) if logStats4.SQL != wantSQL { t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats4.SQL) } } -func assertCacheSize(t *testing.T, c cache.Cache, expected int) { +func assertCacheSize(t *testing.T, c *PlanCache, expected int) { t.Helper() - var size int - c.ForEach(func(_ any) bool { - size++ - return true - }) + size := c.Len() if size != expected { t.Errorf("getPlan() expected cache to have size %d, but got: %d", expected, size) } } -func assertCacheContains(t *testing.T, e *Executor, want []string) { +func assertCacheContains(t *testing.T, e *Executor, vc *vcursorImpl, sql string) *engine.Plan { t.Helper() - for _, wantKey := range want { - if _, ok := e.debugGetPlan(wantKey); !ok { - t.Errorf("missing key in plan cache: %v", wantKey) - } + + var plan *engine.Plan + if vc == nil { + e.ForEachPlan(func(p *engine.Plan) bool { + if p.Original == sql { + plan = p + } + return true + }) + } else { + h := e.hashPlan(context.Background(), vc, sql) + plan, _ = e.plans.Get(h, e.epoch.Load()) } + require.Truef(t, plan != nil, "plan not found for query: %s", sql) + return plan } -func getPlanCached(t *testing.T, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) { +func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcursorImpl, sql string, comments sqlparser.MarginComments, bindVars map[string]*querypb.BindVariable, skipQueryPlanCache bool) (*engine.Plan, *logstats.LogStats) { logStats := logstats.NewLogStats(ctx, "Test", "", "", nil) - plan, _, err := e.getPlan(context.Background(), vcursor, sql, comments, bindVars, &SafeSession{ - Session: &vtgatepb.Session{Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}}, - }, logStats) + vcursor.safeSession = &SafeSession{ + Session: &vtgatepb.Session{ + Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}}, + } + + stmt, reservedVars, err := parseAndValidateQuery(sql) + require.NoError(t, err) + plan, err := e.getPlan(context.Background(), vcursor, sql, stmt, comments, bindVars, reservedVars /* normalize */, e.normalize, logStats) require.NoError(t, err) // Wait for cache to settle - e.plans.Wait() + time.Sleep(100 * time.Millisecond) return plan, logStats } func TestGetPlanCacheUnnormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - query1 := "select * from music_user_map where id = 1" + t.Run("Cache", func(t *testing.T) { + r, _, _, _, ctx := createExecutorEnv(t) - _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) - assertCacheSize(t, r.plans, 0) + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + query1 := "select * from music_user_map where id = 1" - wantSQL := query1 + " /* comment */" - if logStats1.SQL != wantSQL { - t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) - } + _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true) + assertCacheSize(t, r.plans, 0) - _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + wantSQL := query1 + " /* comment */" + if logStats1.SQL != wantSQL { + t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats1.SQL) + } - wantSQL = query1 + " /* comment 2 */" - if logStats2.SQL != wantSQL { - t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) - } + _, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) - // Skip cache using directive - r, _, _, _ = createExecutorEnv() - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + wantSQL = query1 + " /* comment 2 */" + if logStats2.SQL != wantSQL { + t.Errorf("logstats sql want \"%s\" got \"%s\"", wantSQL, logStats2.SQL) + } + }) - query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 0) + t.Run("Skip Cache", func(t *testing.T) { + // Skip cache using directive + r, _, _, _, ctx := createExecutorEnv(t) - query1 = "insert into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) + query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) - // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) -} + query1 = "insert into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) -func TestGetPlanCacheNormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() - r.normalize = true - emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + // the target string will be resolved and become part of the plan cache key, which adds a new entry + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) - query1 := "select * from music_user_map where id = 1" - _, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) - assertCacheSize(t, r.plans, 0) - wantSQL := "select * from music_user_map where id = :id /* comment */" - assert.Equal(t, wantSQL, logStats1.SQL) - - _, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */) - assertCacheSize(t, r.plans, 1) - assert.Equal(t, wantSQL, logStats2.SQL) - - // Skip cache using directive - r, _, _, _ = createExecutorEnv() - r.normalize = true - unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - - query1 = "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 0) - - query1 = "insert into user(id) values (1), (2)" - getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 1) + // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + }) +} - // the target string will be resolved and become part of the plan cache key, which adds a new entry - ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) +func TestGetPlanCacheNormalized(t *testing.T) { + t.Run("Cache", func(t *testing.T) { + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true + emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + + query1 := "select * from music_user_map where id = 1" + _, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, true /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 0) + wantSQL := "select * from music_user_map where id = :id /* INT64 */ /* comment */" + assert.Equal(t, wantSQL, logStats1.SQL) + + _, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false /* skipQueryPlanCache */) + assertCacheSize(t, r.plans, 1) + assert.Equal(t, wantSQL, logStats2.SQL) + }) - // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above - ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) - getPlanCached(t, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) - assertCacheSize(t, r.plans, 2) + t.Run("Skip Cache", func(t *testing.T) { + // Skip cache using directive + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true + unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + + query1 := "insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 0) + + query1 = "insert into user(id) values (1), (2)" + getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 1) + + // the target string will be resolved and become part of the plan cache key, which adds a new entry + ksIDVc1, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[deadbeef]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc1, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + + // the target string will be resolved and become part of the plan cache key, as it's an unsharded ks, it will be the same entry as above + ksIDVc2, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "[beefdead]"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + getPlanCached(t, ctx, r, ksIDVc2, query1, makeComments(" /* comment */"), map[string]*querypb.BindVariable{}, false) + assertCacheSize(t, r.plans, 2) + }) } func TestGetPlanNormalized(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, ctx := createExecutorEnv(t) + r.normalize = true emptyvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) unshardedvc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded + "@unknown"}), makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) query1 := "select * from music_user_map where id = 1" query2 := "select * from music_user_map where id = 2" - normalized := "select * from music_user_map where id = :id" + normalized := "select * from music_user_map where id = :id /* INT64 */" - plan1, logStats1 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false) - plan2, logStats2 := getPlanCached(t, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) + plan1, logStats1 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 1 */"), map[string]*querypb.BindVariable{}, false) + plan2, logStats2 := getPlanCached(t, ctx, r, emptyvc, query1, makeComments(" /* comment 2 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan2) - want := []string{ - "@unknown:" + normalized, - } - assertCacheContains(t, r, want) + assertCacheContains(t, r, emptyvc, normalized) wantSQL := normalized + " /* comment 1 */" assert.Equal(t, wantSQL, logStats1.SQL) wantSQL = normalized + " /* comment 2 */" assert.Equal(t, wantSQL, logStats2.SQL) - plan3, logStats3 := getPlanCached(t, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats3 := getPlanCached(t, ctx, r, emptyvc, query2, makeComments(" /* comment 3 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan3) wantSQL = normalized + " /* comment 3 */" assert.Equal(t, wantSQL, logStats3.SQL) - plan4, logStats4 := getPlanCached(t, r, emptyvc, normalized, makeComments(" /* comment 4 */"), map[string]*querypb.BindVariable{}, false) - assert.Equal(t, plan1, plan4) - wantSQL = normalized + " /* comment 4 */" - assert.Equal(t, wantSQL, logStats4.SQL) - var logStats5 *logstats.LogStats - plan3, logStats5 = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false) + plan3, logStats5 = getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment 5 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan3) wantSQL = normalized + " /* comment 5 */" assert.Equal(t, wantSQL, logStats5.SQL) - plan4, _ = getPlanCached(t, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false) + plan4, _ := getPlanCached(t, ctx, r, unshardedvc, query1, makeComments(" /* comment 6 */"), map[string]*querypb.BindVariable{}, false) assert.Equal(t, plan1, plan4) - want = []string{ - KsTestUnsharded + "@unknown:" + normalized, - "@unknown:" + normalized, + assertCacheContains(t, r, emptyvc, normalized) + assertCacheContains(t, r, unshardedvc, normalized) +} + +func TestGetPlanPriority(t *testing.T) { + + testCases := []struct { + name string + sql string + expectedPriority string + expectedError error + }{ + {name: "Invalid priority", sql: "select /*vt+ PRIORITY=something */ * from music_user_map", expectedPriority: "", expectedError: sqlparser.ErrInvalidPriority}, + {name: "Valid priority", sql: "select /*vt+ PRIORITY=33 */ * from music_user_map", expectedPriority: "33", expectedError: nil}, + {name: "empty priority", sql: "select * from music_user_map", expectedPriority: "", expectedError: nil}, + } + + session := NewSafeSession(&vtgatepb.Session{TargetString: "@unknown", Options: &querypb.ExecuteOptions{}}) + + for _, aTestCase := range testCases { + testCase := aTestCase + + t.Run(testCase.name, func(t *testing.T) { + r, _, _, _, ctx := createExecutorEnv(t) + + r.normalize = true + logStats := logstats.NewLogStats(ctx, "Test", "", "", nil) + vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) + assert.NoError(t, err) + + stmt, err := sqlparser.Parse(testCase.sql) + assert.NoError(t, err) + crticalityFromStatement, _ := sqlparser.GetPriorityFromStatement(stmt) + + _, err = r.getPlan(context.Background(), vCursor, testCase.sql, stmt, makeComments("/* some comment */"), map[string]*querypb.BindVariable{}, nil, true, logStats) + if testCase.expectedError != nil { + assert.ErrorIs(t, err, testCase.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, testCase.expectedPriority, crticalityFromStatement) + assert.Equal(t, testCase.expectedPriority, vCursor.safeSession.Options.Priority) + } + }) } - assertCacheContains(t, r, want) - _, _, err := r.getPlan(context.Background(), emptyvc, "syntax", makeComments(""), map[string]*querypb.BindVariable{}, nil, nil) - assert.EqualError(t, err, "syntax error at position 7 near 'syntax'") - assertCacheContains(t, r, want) } func TestPassthroughDDL(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - primarySession.TargetString = "TestExecutor" + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "TestExecutor", + } - alterDDL := "/* leading */ alter table passthrough_ddl add columne col bigint default 123 /* trailing */" - _, err := executorExec(executor, alterDDL, nil) + alterDDL := "/* leading */ alter table passthrough_ddl add column col bigint default 123 /* trailing */" + _, err := executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ Sql: alterDDL, @@ -1842,23 +1907,22 @@ func TestPassthroughDDL(t *testing.T) { sbc2.Queries = nil // Force the query to go to only one shard. Normalization doesn't make any difference. - primarySession.TargetString = "TestExecutor/40-60" + session.TargetString = "TestExecutor/40-60" executor.normalize = true - _, err = executorExec(executor, alterDDL, nil) + _, err = executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) require.Nil(t, sbc1.Queries) if !reflect.DeepEqual(sbc2.Queries, wantQueries) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc2.Queries, wantQueries) } sbc2.Queries = nil - primarySession.TargetString = "" // Use range query - primarySession.TargetString = "TestExecutor[-]" + session.TargetString = "TestExecutor[-]" executor.normalize = true - _, err = executorExec(executor, alterDDL, nil) + _, err = executorExec(ctx, executor, session, alterDDL, nil) require.NoError(t, err) if !reflect.DeepEqual(sbc1.Queries, wantQueries) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries) @@ -1867,11 +1931,11 @@ func TestPassthroughDDL(t *testing.T) { t.Errorf("sbc2.Queries: %+v, want %+v\n", sbc2.Queries, wantQueries) } sbc2.Queries = nil - primarySession.TargetString = "" } func TestParseEmptyTargetSingleKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1893,7 +1957,8 @@ func TestParseEmptyTargetSingleKeyspace(t *testing.T) { } func TestParseEmptyTargetMultiKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1916,7 +1981,8 @@ func TestParseEmptyTargetMultiKeyspace(t *testing.T) { } func TestParseTargetSingleKeyspace(t *testing.T) { - r, _, _, _ := createExecutorEnv() + r, _, _, _, _ := createExecutorEnv(t) + altVSchema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ KsTestUnsharded: r.vschema.Keyspaces[KsTestUnsharded], @@ -1938,10 +2004,11 @@ func TestParseTargetSingleKeyspace(t *testing.T) { } func TestDebugVSchema(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/debug/vschema", nil) - executor, _, _, _ := createExecutorEnv() executor.ServeHTTP(resp, req) v := make(map[string]any) if err := json.Unmarshal(resp.Body.Bytes(), &v); err != nil { @@ -1965,7 +2032,8 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { warnPayloadSize = saveWarn }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary"}) warningCount := warnings.Counts()["WarnPayloadSizeExceeded"] testMaxPayloadSizeExceeded := []string{ @@ -1975,7 +2043,7 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { "delete from main1 where id=1", } for _, query := range testMaxPayloadSizeExceeded { - _, err := executor.Execute(context.Background(), "TestExecutorMaxPayloadSizeExceeded", session, query, nil) + _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) require.NotNil(t, err) assert.EqualError(t, err, "query payload size above threshold") } @@ -1988,21 +2056,22 @@ func TestExecutorMaxPayloadSizeExceeded(t *testing.T) { "delete /*vt+ IGNORE_MAX_PAYLOAD_SIZE=1 */ from main1 where id=1", } for _, query := range testMaxPayloadSizeOverride { - _, err := executor.Execute(context.Background(), "TestExecutorMaxPayloadSizeWithOverride", session, query, nil) + _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeWithOverride", session, query, nil) assert.Equal(t, nil, err, "err should be nil") } assert.Equal(t, warningCount, warnings.Counts()["WarnPayloadSizeExceeded"], "warnings count") maxPayloadSize = 1000 for _, query := range testMaxPayloadSizeExceeded { - _, err := executor.Execute(context.Background(), "TestExecutorMaxPayloadSizeExceeded", session, query, nil) + _, err := executor.Execute(context.Background(), nil, "TestExecutorMaxPayloadSizeExceeded", session, query, nil) assert.Equal(t, nil, err, "err should be nil") } assert.Equal(t, warningCount+4, warnings.Counts()["WarnPayloadSizeExceeded"], "warnings count") } func TestOlapSelectDatabase(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + executor.normalize = true session := &vtgatepb.Session{Autocommit: true} @@ -2013,23 +2082,73 @@ func TestOlapSelectDatabase(t *testing.T) { cbInvoked = true return nil } - err := executor.StreamExecute(context.Background(), "TestExecute", NewSafeSession(session), sql, nil, cb) + err := executor.StreamExecute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, nil, cb) assert.NoError(t, err) assert.True(t, cbInvoked) } func TestExecutorClearsWarnings(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, _ := createExecutorEnv(t) + session := NewSafeSession(&vtgatepb.Session{ Warnings: []*querypb.QueryWarning{{Code: 234, Message: "oh noes"}}, }) - _, err := executor.Execute(context.Background(), "TestExecute", session, "select 42", nil) + _, err := executor.Execute(context.Background(), nil, "TestExecute", session, "select 42", nil) require.NoError(t, err) require.Empty(t, session.Warnings) } +// TestServingKeyspaces tests that the dual queries are routed to the correct keyspaces from the list of serving keyspaces. +func TestServingKeyspaces(t *testing.T) { + buffer.SetBufferingModeInTestingEnv(true) + defer func() { + buffer.SetBufferingModeInTestingEnv(false) + }() + + executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t) + + executor.pv = querypb.ExecuteOptions_Gen4 + gw, ok := executor.resolver.resolver.GetGateway().(*TabletGateway) + require.True(t, ok) + hc := gw.hc.(*discovery.FakeHealthCheck) + + // We broadcast twice because we want to ensure the keyspace event watcher has processed all the healthcheck updates + // from the first broadcast. Since we use a channel for broadcasting, it is blocking and hence the second call ensures + // all the updates (specifically the last one) has been processed by the keyspace-event-watcher. + hc.BroadcastAll() + hc.BroadcastAll() + + sbc1.SetResults([]*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("keyspace", "varchar"), "TestExecutor"), + }) + sbclookup.SetResults([]*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("keyspace", "varchar"), "TestUnsharded"), + }) + + require.ElementsMatch(t, []string{"TestExecutor", "TestUnsharded"}, gw.GetServingKeyspaces()) + result, err := executor.Execute(ctx, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil) + require.NoError(t, err) + require.Equal(t, `[[VARCHAR("TestExecutor")]]`, fmt.Sprintf("%v", result.Rows)) + + for _, tablet := range hc.GetAllTablets() { + if tablet.Keyspace == "TestExecutor" { + hc.SetServing(tablet, false) + } + } + // Two broadcast calls for the same reason as above. + hc.BroadcastAll() + hc.BroadcastAll() + + // Clear plan cache, to force re-planning of the query. + executor.ClearPlans() + require.ElementsMatch(t, []string{"TestUnsharded"}, gw.GetServingKeyspaces()) + result, err = executor.Execute(ctx, nil, "TestServingKeyspaces", NewSafeSession(&vtgatepb.Session{}), "select keyspace_name from dual", nil) + require.NoError(t, err) + require.Equal(t, `[[VARCHAR("TestUnsharded")]]`, fmt.Sprintf("%v", result.Rows)) +} + func TestExecutorOtherRead(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2084,7 +2203,7 @@ func TestExecutorOtherRead(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.EqualError(t, err, errNoKeyspace.Error()) } else if tc.hasDestinationShardErr { @@ -2104,27 +2223,31 @@ func TestExecutorOtherRead(t *testing.T) { } func TestExecutorVExplain(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + executor.normalize = true - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) bindVars := map[string]*querypb.BindVariable{} - result, err := executorExec(executor, "vexplain plan select * from user", bindVars) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + result, err := executorExec(ctx, executor, session, "vexplain plan select * from user", bindVars) require.NoError(t, err) require.Equal(t, `[[VARCHAR("{\n\t\"OperatorType\": \"Route\",\n\t\"Variant\": \"Scatter\",\n\t\"Keyspace\": {\n\t\t\"Name\": \"TestExecutor\",\n\t\t\"Sharded\": true\n\t},\n\t\"FieldQuery\": \"select * from `+"`user`"+` where 1 != 1\",\n\t\"Query\": \"select * from `+"`user`"+`\",\n\t\"Table\": \"`+"`user`"+`\"\n}")]]`, fmt.Sprintf("%v", result.Rows)) - result, err = executorExec(executor, "vexplain plan select 42", bindVars) + result, err = executorExec(ctx, executor, session, "vexplain plan select 42", bindVars) require.NoError(t, err) expected := `[[VARCHAR("{\n\t\"OperatorType\": \"Projection\",\n\t\"Expressions\": [\n\t\t\"INT64(42) as 42\"\n\t],\n\t\"Inputs\": [\n\t\t{\n\t\t\t\"OperatorType\": \"SingleRow\"\n\t\t}\n\t]\n}")]]` require.Equal(t, expected, fmt.Sprintf("%v", result.Rows)) } func TestExecutorOtherAdmin(t *testing.T) { - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2176,7 +2299,7 @@ func TestExecutorOtherAdmin(t *testing.T) { sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { assert.Error(t, err, errNoKeyspace) } else if tc.hasDestinationShardErr { @@ -2198,9 +2321,10 @@ func TestExecutorOtherAdmin(t *testing.T) { } func TestExecutorSavepointInTx(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: false, TargetString: "@primary"}) _, err := exec(executor, session, "savepoint a") @@ -2268,21 +2392,22 @@ func TestExecutorSavepointInTx(t *testing.T) { }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 1) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 3", 1) - testQueryLog(t, logChan, "TestExecute", "ROLLBACK", "rollback", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "ROLLBACK", "rollback", 2) } func TestExecutorSavepointInTxWithReservedConn(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "TestExecutor", EnableSystemSettings: true}) sbc1.SetResults([]*sqltypes.Result{ @@ -2335,20 +2460,21 @@ func TestExecutorSavepointInTxWithReservedConn(t *testing.T) { utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SET", "set @@sql_mode = ''", 1) - testQueryLog(t, logChan, "TestExecute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 1) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 3", 1) - testQueryLog(t, logChan, "TestExecute", "COMMIT", "commit", 2) + testQueryLog(t, executor, logChan, "TestExecute", "SET", "set @@sql_mode = ''", 1) + testQueryLog(t, executor, logChan, "TestExecute", "BEGIN", "begin", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 1) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "COMMIT", "commit", 2) } func TestExecutorSavepointWithoutTx(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() - logChan := QueryLogger.Subscribe("TestExecutorSavepoint") - defer QueryLogger.Unsubscribe(logChan) + executor, sbc1, sbc2, _, _ := createExecutorEnv(t) + + logChan := executor.queryLogger.Subscribe("TestExecutorSavepoint") + defer executor.queryLogger.Unsubscribe(logChan) session := NewSafeSession(&vtgatepb.Session{Autocommit: true, TargetString: "@primary", InTransaction: false}) _, err := exec(executor, session, "savepoint a") @@ -2378,18 +2504,18 @@ func TestExecutorSavepointWithoutTx(t *testing.T) { }} utils.MustMatch(t, sbc1WantQueries, sbc1.Queries, "") utils.MustMatch(t, sbc2WantQueries, sbc2.Queries, "") - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 1", 1) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 0) - testQueryLog(t, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 0) - testQueryLog(t, logChan, "TestExecute", "RELEASE", "release savepoint b", 0) - testQueryLog(t, logChan, "TestExecute", "SELECT", "select id from user where id = 3", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint a", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 1", 1) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT", "savepoint b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SAVEPOINT_ROLLBACK", "rollback to b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "RELEASE", "release savepoint b", 0) + testQueryLog(t, executor, logChan, "TestExecute", "SELECT", "select id from `user` where id = 3", 1) } func TestExecutorCallProc(t *testing.T) { - executor, sbc1, sbc2, sbcUnsharded := createExecutorEnv() + executor, sbc1, sbc2, sbcUnsharded, _ := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2435,7 +2561,7 @@ func TestExecutorCallProc(t *testing.T) { sbc2.ExecCount.Store(0) sbcUnsharded.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil) + _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), "CALL proc()", nil) if tc.hasNoKeyspaceErr { assert.EqualError(t, err, errNoKeyspace.Error()) } else if tc.unshardedOnlyErr { @@ -2454,52 +2580,53 @@ func TestExecutorCallProc(t *testing.T) { } func TestExecutorTempTable(t *testing.T) { - executor, _, _, sbcUnsharded := createExecutorEnv() + executor, _, _, sbcUnsharded, ctx := createExecutorEnv(t) + executor.warnShardedOnly = true creatQuery := "create temporary table temp_t(id bigint primary key)" session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) - ctx := context.Background() - _, err := executor.Execute(ctx, "TestExecutorTempTable", session, creatQuery, nil) + _, err := executor.Execute(ctx, nil, "TestExecutorTempTable", session, creatQuery, nil) require.NoError(t, err) assert.EqualValues(t, 1, sbcUnsharded.ExecCount.Load()) assert.NotEmpty(t, session.Warnings) before := executor.plans.Len() - _, err = executor.Execute(ctx, "TestExecutorTempTable", session, "select * from temp_t", nil) + _, err = executor.Execute(ctx, nil, "TestExecutorTempTable", session, "select * from temp_t", nil) require.NoError(t, err) assert.Equal(t, before, executor.plans.Len()) } func TestExecutorShowVitessMigrations(t *testing.T) { - executor, sbc1, sbc2, _ := createExecutorEnv() + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + showQuery := "show vitess_migrations" session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) - ctx := context.Background() - _, err := executor.Execute(ctx, "", session, showQuery, nil) + _, err := executor.Execute(ctx, nil, "", session, showQuery, nil) require.NoError(t, err) - assert.Contains(t, sbc1.StringQueries(), "SELECT * FROM _vt.schema_migrations") - assert.Contains(t, sbc2.StringQueries(), "SELECT * FROM _vt.schema_migrations") + assert.Contains(t, sbc1.StringQueries(), "show vitess_migrations") + assert.Contains(t, sbc2.StringQueries(), "show vitess_migrations") } func TestExecutorDescHash(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + showQuery := "desc hash_index" session := NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) - ctx := context.Background() - _, err := executor.Execute(ctx, "", session, showQuery, nil) + _, err := executor.Execute(ctx, nil, "", session, showQuery, nil) require.NoError(t, err) } func TestExecutorVExplainQueries(t *testing.T) { - executor, _, _, sbclookup := createExecutorEnv() + executor, _, _, sbclookup, ctx := createExecutorEnv(t) + session := NewAutocommitSession(&vtgatepb.Session{}) sbclookup.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult(sqltypes.MakeTestFields("name|user_id", "varchar|int64"), "apa|1", "apa|2"), }) - qr, err := executor.Execute(ctx, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil) + qr, err := executor.Execute(ctx, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil) require.NoError(t, err) txt := fmt.Sprintf("%v\n", qr.Rows) lookupQuery := "select `name`, user_id from name_user_map where `name` in" @@ -2508,7 +2635,7 @@ func TestExecutorVExplainQueries(t *testing.T) { // Test the streaming side as well var results []sqltypes.Row session = NewAutocommitSession(&vtgatepb.Session{}) - err = executor.StreamExecute(ctx, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error { + err = executor.StreamExecute(ctx, nil, "TestExecutorVExplainQueries", session, "vexplain queries select * from user where name = 'apa'", nil, func(result *sqltypes.Result) error { results = append(results, result.Rows...) return nil }) @@ -2518,7 +2645,8 @@ func TestExecutorVExplainQueries(t *testing.T) { } func TestExecutorStartTxnStmt(t *testing.T) { - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) + session := NewAutocommitSession(&vtgatepb.Session{}) tcases := []struct { @@ -2550,20 +2678,220 @@ func TestExecutorStartTxnStmt(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.beginSQL, func(t *testing.T) { - _, err := executor.Execute(ctx, "TestExecutorStartTxnStmt", session, tcase.beginSQL, nil) + _, err := executor.Execute(ctx, nil, "TestExecutorStartTxnStmt", session, tcase.beginSQL, nil) require.NoError(t, err) assert.Equal(t, tcase.expTxAccessMode, session.GetOrCreateOptions().TransactionAccessMode) - _, err = executor.Execute(ctx, "TestExecutorStartTxnStmt", session, "rollback", nil) + _, err = executor.Execute(ctx, nil, "TestExecutorStartTxnStmt", session, "rollback", nil) require.NoError(t, err) }) } } +func TestExecutorPrepareExecute(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + + executor.normalize = true + session := NewAutocommitSession(&vtgatepb.Session{}) + + // prepare statement. + _, err := executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user from 'select * from user where id = ?'", nil) + require.NoError(t, err) + prepData := session.PrepareStatement["prep_user"] + require.NotNil(t, prepData) + require.Equal(t, "select * from `user` where id = :v1", prepData.PrepareStatement) + require.EqualValues(t, 1, prepData.ParamsCount) + + // prepare statement using user defined variable + _, err = executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "set @udv_query = 'select * from user where id in (?,?,?)'", nil) + require.NoError(t, err) + + _, err = executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user2 from @udv_query", nil) + require.NoError(t, err) + prepData = session.PrepareStatement["prep_user2"] + require.NotNil(t, prepData) + require.Equal(t, "select * from `user` where id in (:v1, :v2, :v3)", prepData.PrepareStatement) + require.EqualValues(t, 3, prepData.ParamsCount) + + // syntax error on prepared query + _, err = executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user2 from 'select'", nil) + require.Error(t, err) + require.Nil(t, session.PrepareStatement["prep_user2"]) // prepared statement is cleared from the session. + + // user defined variable does not exists on prepared query + _, err = executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user from @foo", nil) + require.Error(t, err) + require.Nil(t, session.PrepareStatement["prep_user"]) // prepared statement is cleared from the session. + + // empty prepared query + _, err = executor.Execute(context.Background(), nil, "TestExecutorPrepareExecute", session, "prepare prep_user from ''", nil) + require.Error(t, err) +} + +func TestExecutorTruncateErrors(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + + save := truncateErrorLen + truncateErrorLen = 32 + defer func() { truncateErrorLen = save }() + + session := NewSafeSession(&vtgatepb.Session{}) + fn := func(r *sqltypes.Result) error { + return nil + } + + _, err := executor.Execute(ctx, nil, "TestExecute", session, "invalid statement", nil) + assert.EqualError(t, err, "syntax error at posi [TRUNCATED]") + + err = executor.StreamExecute(ctx, nil, "TestExecute", session, "invalid statement", nil, fn) + assert.EqualError(t, err, "syntax error at posi [TRUNCATED]") + + _, err = executor.Prepare(context.Background(), "TestExecute", session, "invalid statement", nil) + assert.EqualError(t, err, "[BUG] unrecognized p [TRUNCATED]") +} + +func TestExecutorFlushStmt(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + + tcs := []struct { + targetStr string + query string + + expectedErr string + }{{ + targetStr: KsTestUnsharded, + query: "flush status", + }, { + targetStr: KsTestUnsharded, + query: "flush tables user", + }, { + targetStr: "TestUnsharded@replica", + query: "flush tables user", + expectedErr: "VT09012: FLUSH statement with REPLICA tablet not allowed", + }, { + targetStr: "TestUnsharded@replica", + query: "flush binary logs", + expectedErr: "VT09012: FLUSH statement with REPLICA tablet not allowed", + }, { + targetStr: "TestUnsharded@replica", + query: "flush NO_WRITE_TO_BINLOG binary logs", + }, { + targetStr: KsTestUnsharded, + query: "flush NO_WRITE_TO_BINLOG tables user", + }, { + targetStr: "TestUnsharded@replica", + query: "flush LOCAL binary logs", + }, { + targetStr: KsTestUnsharded, + query: "flush LOCAL tables user", + }, { + targetStr: "", + query: "flush LOCAL binary logs", + expectedErr: "VT09005: no database selected", // no database selected error. + }, { + targetStr: "", + query: "flush LOCAL tables user", + }} + + for _, tc := range tcs { + t.Run(tc.query+tc.targetStr, func(t *testing.T) { + _, err := executor.Execute(context.Background(), nil, "TestExecutorFlushStmt", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), tc.query, nil) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.ErrorContains(t, err, tc.expectedErr) + } + }) + } +} + +// TestExecutorKillStmt tests the kill statements on executor. +func TestExecutorKillStmt(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + + tcs := []struct { + errStr string + query string + disallow bool + + expectedLog string + }{{ + query: "kill 42", + expectedLog: "kill connection: 42", + }, { + query: "kill query 42", + expectedLog: "kill query: 42", + }, { + query: "kill 42", + errStr: "connection does not exists: 42", + }, { + query: "kill query 24", + errStr: "connection does not exists: 24", + }, { + query: "kill connection 1", + disallow: true, + errStr: "VT07001: kill statement execution not permitted.", + }, { + query: "kill query 1", + disallow: true, + errStr: "VT07001: kill statement execution not permitted.", + }} + + for _, tc := range tcs { + allowKillStmt = !tc.disallow + t.Run("execute:"+tc.query+tc.errStr, func(t *testing.T) { + mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr} + _, err := executor.Execute(context.Background(), mysqlCtx, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil) + if tc.errStr != "" { + require.ErrorContains(t, err, tc.errStr) + } else { + require.NoError(t, err) + require.Equal(t, mysqlCtx.Log[0], tc.expectedLog) + } + }) + t.Run("stream:"+tc.query+tc.errStr, func(t *testing.T) { + mysqlCtx := &fakeMysqlConnection{ErrMsg: tc.errStr} + err := executor.StreamExecute(context.Background(), mysqlCtx, "TestExecutorKillStmt", NewAutocommitSession(&vtgatepb.Session{}), tc.query, nil, func(result *sqltypes.Result) error { + return nil + }) + if tc.errStr != "" { + require.ErrorContains(t, err, tc.errStr) + } else { + require.NoError(t, err) + require.Contains(t, mysqlCtx.Log[0], tc.expectedLog) + } + }) + } +} + +type fakeMysqlConnection struct { + ErrMsg string + Log []string +} + +func (f *fakeMysqlConnection) KillQuery(connID uint32) error { + if f.ErrMsg != "" { + return errors.New(f.ErrMsg) + } + f.Log = append(f.Log, fmt.Sprintf("kill query: %d", connID)) + return nil +} + +func (f *fakeMysqlConnection) KillConnection(ctx context.Context, connID uint32) error { + if f.ErrMsg != "" { + return errors.New(f.ErrMsg) + } + f.Log = append(f.Log, fmt.Sprintf("kill connection: %d", connID)) + return nil +} + +var _ vtgateservice.MySQLConnection = (*fakeMysqlConnection)(nil) + func exec(executor *Executor, session *SafeSession, sql string) (*sqltypes.Result, error) { - return executor.Execute(context.Background(), "TestExecute", session, sql, nil) + return executor.Execute(context.Background(), nil, "TestExecute", session, sql, nil) } func makeComments(text string) sqlparser.MarginComments { diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index 7e71012f128..a7708f3e255 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -28,8 +28,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" @@ -96,7 +94,6 @@ func waitForVschemaTables(t *testing.T, ks string, tables []string, executor *Ex return nil } -// nolint func waitForColVindexes(t *testing.T, ks, table string, names []string, executor *Executor) *vschemapb.SrvVSchema { t.Helper() @@ -136,11 +133,11 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -152,7 +149,7 @@ func TestPlanExecutorAlterVSchemaKeyspace(t *testing.T) { } stmt := "alter vschema create vindex TestExecutor.test_vindex using hash" - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, "TestExecutor", "test_vindex", vschemaUpdates, executor) @@ -164,11 +161,11 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -181,7 +178,7 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema create vindex test_vindex using hash" - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -189,7 +186,7 @@ func TestPlanExecutorCreateVindexDDL(t *testing.T) { t.Errorf("updated vschema did not contain test_vindex") } - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex already exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("create duplicate vindex: %v, want %s", err, wantErr) @@ -206,11 +203,11 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -223,14 +220,14 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema drop vindex test_vindex" - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr := "vindex test_vindex does not exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) } stmt = "alter vschema drop vindex TestExecutor.test_vindex" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr = "vindex test_vindex does not exists in keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) @@ -238,7 +235,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // add one vindex that has never been used by the tables stmt = "alter vschema create vindex test_vindex using hash" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_vindex", vschemaUpdates, executor) @@ -248,7 +245,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // drop an existing vindex that has never been used by the tables stmt = "alter vschema drop vindex TestExecutor.test_vindex" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema = <-vschemaUpdates _, ok = vschema.Keyspaces[ks].Vindexes["test_vindex"] @@ -258,7 +255,7 @@ func TestPlanExecutorDropVindexDDL(t *testing.T) { // drop an existing vindex that is used by at least one table stmt = "alter vschema drop vindex TestExecutor.keyspace_id" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr = "can not drop vindex cause keyspace_id still defined on table ksid_table" if err == nil || err.Error() != wantErr { t.Errorf("drop vindex still defined: %v, want %s", err, wantErr) @@ -275,11 +272,11 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -297,19 +294,19 @@ func TestPlanExecutorAddDropVschemaTableDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add table test_table" - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table"}, vschemaTables...), executor) stmt = "alter vschema add table test_table2" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append([]string{"test_table", "test_table2"}, vschemaTables...), executor) // Should fail adding a table on a sharded keyspace session = NewSafeSession(&vtgatepb.Session{TargetString: "TestExecutor"}) stmt = "alter vschema add table test_table" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr := "add vschema table: unsupported on sharded keyspace TestExecutor" if err == nil || err.Error() != wantErr { t.Errorf("want error %v got %v", wantErr, err) @@ -332,7 +329,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := KsTestUnsharded vschema := executor.vm.GetCurrentSrvVschema() @@ -344,7 +341,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) stmt := "alter vschema add sequence test_seq" - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _ = waitForVschemaTables(t, ks, append(vschemaTables, []string{"test_seq"}...), executor) vschema = executor.vm.GetCurrentSrvVschema() @@ -359,7 +356,7 @@ func TestExecutorAddSequenceDDL(t *testing.T) { session = NewSafeSession(&vtgatepb.Session{TargetString: ksSharded}) stmt = "alter vschema add sequence sequence_table" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) wantErr := "add sequence table: unsupported on sharded keyspace TestExecutor" if err == nil || err.Error() != wantErr { @@ -368,13 +365,13 @@ func TestExecutorAddSequenceDDL(t *testing.T) { // Should be able to add autoincrement to table in sharded keyspace stmt = "alter vschema on test_table add vindex hash_index (id)" - if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil { + if _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil); err != nil { t.Error(err) } time.Sleep(10 * time.Millisecond) stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`" - if _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil); err != nil { + if _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil); err != nil { t.Error(err) } time.Sleep(10 * time.Millisecond) @@ -392,11 +389,11 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { defer func() { vschemaacl.AuthorizedDDLUsers = "" }() - executor, sbc1, sbc2, sbclookup := createExecutorEnv() // nolint + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 4) - executor.serv.WatchSrvVSchema(context.Background(), "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -407,14 +404,14 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // Create a new vindex implicitly with the statement stmt := "alter vschema on test add vindex test_hash (id) using hash " - _, err := executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex := waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) require.Equal(t, "hash", vindex.Type) _ = waitForColVindexes(t, ks, "test", []string{"test_hash"}, executor) - qr, err := executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err := executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr := &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -426,17 +423,17 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // Drop it stmt = "alter vschema on test drop vindex test_hash" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, _ = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) _ = waitForColVindexes(t, ks, "test", []string{}, executor) - _, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.EqualError(t, err, "VT05005: table 'test' does not exist in keyspace 'TestExecutor'") // add it again using the same syntax stmt = "alter vschema on test add vindex test_hash (id) using hash " - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) _, vindex = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) @@ -444,7 +441,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { _ = waitForColVindexes(t, ks, "test", []string{"test_hash"}, executor) - qr, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -457,7 +454,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // add another stmt = "alter vschema on test add vindex test_lookup (c1,c2) using lookup with owner=`test`, from=`c1,c2`, table=test_lookup, to=keyspace_id" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_lookup", vschemaUpdates, executor) @@ -474,7 +471,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { t.Fatalf("table test not defined in vschema") } - qr, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -486,7 +483,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { utils.MustMatch(t, wantqr, qr) stmt = "alter vschema on test add vindex test_hash_id2 (id2) using hash" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_hash_id2", vschemaUpdates, executor) @@ -503,7 +500,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { t.Fatalf("table test not defined in vschema") } - qr, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -517,13 +514,13 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // drop one stmt = "alter vschema on test drop vindex test_lookup" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) // wait for up to 50ms for it to disappear deadline := time.Now().Add(50 * time.Millisecond) for { - qr, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -544,7 +541,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // use the newly created vindex on a new table stmt = "alter vschema on test2 add vindex test_hash (id)" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_hash", vschemaUpdates, executor) @@ -557,7 +554,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { // create an identical vindex definition on a different table stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=`test`, from=`c1,c2`, table=test_lookup, to=keyspace_id" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) vschema, vindex = waitForVindex(t, ks, "test_lookup", vschemaUpdates, executor) @@ -568,7 +565,7 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { require.Len(t, table.ColumnVindexes, 2) require.Equal(t, "test_lookup", table.ColumnVindexes[1].Name) - qr, err = executor.Execute(context.Background(), "TestExecute", session, "show vschema vindexes on TestExecutor.test2", nil) + qr, err = executor.Execute(ctx, nil, "TestExecute", session, "show vschema vindexes on TestExecutor.test2", nil) require.NoError(t, err) wantqr = &sqltypes.Result{ Fields: buildVarCharFields("Columns", "Name", "Type", "Params", "Owner"), @@ -579,36 +576,48 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { } utils.MustMatch(t, wantqr, qr) + // now make sure we can create another vindex that references a table with dashes (i.e. escaping is necessary) + stmt = "alter vschema on test2 add vindex test_lookup_fqn(c1,c2) using consistent_lookup_unique with owner=`test`, from=`c1,c2`, table=`test-keyspace`.`lookup-fqn`, to=`keyspace_id`" + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) + require.NoError(t, err) + + _, vindex = waitForVindex(t, ks, "test_lookup_fqn", vschemaUpdates, executor) + require.Equal(t, "consistent_lookup_unique", vindex.Type) + require.Equal(t, "test", vindex.Owner) + require.Equal(t, "c1,c2", vindex.Params["from"]) + require.Equal(t, "`test-keyspace`.`lookup-fqn`", vindex.Params["table"]) + require.Equal(t, "keyspace_id", vindex.Params["to"]) + stmt = "alter vschema on test2 add vindex nonexistent (c1,c2)" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex nonexistent does not exist in keyspace TestExecutor") stmt = "alter vschema on test2 add vindex test_hash (c1,c2) using lookup" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_hash defined with type hash not lookup") stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=xyz" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup defined with owner test not xyz") stmt = "alter vschema on test2 add vindex test_lookup (c1,c2) using lookup with owner=`test`, foo=bar" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup defined with different parameters") stmt = "alter vschema on nonexistent drop vindex test_lookup" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "table TestExecutor.nonexistent not defined in vschema") stmt = "alter vschema on nonexistent drop vindex test_lookup" - _, err = executor.Execute(context.Background(), "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: "InvalidKeyspace"}), stmt, nil) require.EqualError(t, err, "VT05003: unknown database 'InvalidKeyspace' in vschema") stmt = "alter vschema on nowhere.nohow drop vindex test_lookup" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "VT05003: unknown database 'nowhere' in vschema") stmt = "alter vschema on test drop vindex test_lookup" - _, err = executor.Execute(context.Background(), "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, "vindex test_lookup not defined in table TestExecutor.test") // no queries should have gone to any tablets @@ -623,30 +632,30 @@ func TestExecutorAddDropVindexDDL(t *testing.T) { func TestPlanExecutorVindexDDLACL(t *testing.T) { // t.Skip("not yet planned") - executor, _, _, _ := createExecutorEnv() + executor, _, _, _, ctx := createExecutorEnv(t) ks := "TestExecutor" session := NewSafeSession(&vtgatepb.Session{TargetString: ks}) - ctxRedUser := callerid.NewContext(context.Background(), &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"}) - ctxBlueUser := callerid.NewContext(context.Background(), &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"}) + ctxRedUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "redUser"}) + ctxBlueUser := callerid.NewContext(ctx, &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "blueUser"}) // test that by default no users can perform the operation stmt := "alter vschema create vindex test_hash using hash" - _, err := executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err := executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'blueUser' is not authorized to perform vschema operations`) // test when all users are enabled vschemaacl.AuthorizedDDLUsers = "%" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } stmt = "alter vschema create vindex test_hash2 using hash" - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } @@ -654,11 +663,11 @@ func TestPlanExecutorVindexDDLACL(t *testing.T) { // test when only one user is enabled vschemaacl.AuthorizedDDLUsers = "orangeUser, blueUser, greenUser" vschemaacl.Init() - _, err = executor.Execute(ctxRedUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxRedUser, nil, "TestExecute", session, stmt, nil) require.EqualError(t, err, `User 'redUser' is not authorized to perform vschema operations`) stmt = "alter vschema create vindex test_hash3 using hash" - _, err = executor.Execute(ctxBlueUser, "TestExecute", session, stmt, nil) + _, err = executor.Execute(ctxBlueUser, nil, "TestExecute", session, stmt, nil) if err != nil { t.Errorf("unexpected error '%v'", err) } diff --git a/go/vt/vtgate/executor_vstream_test.go b/go/vt/vtgate/executor_vstream_test.go index 828cdea2d40..5466e9e8f3f 100644 --- a/go/vt/vtgate/executor_vstream_test.go +++ b/go/vt/vtgate/executor_vstream_test.go @@ -28,8 +28,6 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "context" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -38,10 +36,10 @@ import ( // TestVStreamSQLUnsharded tests the experimental 'vstream * from' vtgate olap query func TestVStreamSQLUnsharded(t *testing.T) { - t.Skip("this test is failing due to races") //FIXME - executor, _, _, sbcLookup := createExecutorEnv() - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + t.Skip("this test is failing due to races") // FIXME + executor, _, _, sbcLookup, ctx := createExecutorEnv(t) + logChan := executor.queryLogger.Subscribe("Test") + defer executor.queryLogger.Unsubscribe(logChan) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, {Type: binlogdatapb.VEventType_FIELD, FieldEvent: &binlogdatapb.FieldEvent{TableName: "t1", Fields: []*querypb.Field{ @@ -77,20 +75,11 @@ func TestVStreamSQLUnsharded(t *testing.T) { sql := "vstream * from t1" results := make(chan *sqltypes.Result, 20) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() go func() { - err := executor.StreamExecute( - ctx, - "TestExecuteStream", - NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), - sql, - nil, - func(qr *sqltypes.Result) error { - results <- qr - return nil - }, - ) + err := executor.StreamExecute(ctx, nil, "TestExecuteStream", NewAutocommitSession(&vtgatepb.Session{TargetString: KsTestUnsharded}), sql, nil, func(qr *sqltypes.Result) error { + results <- qr + return nil + }) require.NoError(t, err) }() timer := time.NewTimer(5 * time.Second) diff --git a/go/vt/vtgate/fakerpcvtgateconn/conn.go b/go/vt/vtgate/fakerpcvtgateconn/conn.go index fb711820912..442c8997979 100644 --- a/go/vt/vtgate/fakerpcvtgateconn/conn.go +++ b/go/vt/vtgate/fakerpcvtgateconn/conn.go @@ -109,7 +109,7 @@ func (conn *FakeVTGateConn) ExecuteBatch(ctx context.Context, session *vtgatepb. } // StreamExecute please see vtgateconn.Impl.StreamExecute -func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVars map[string]*querypb.BindVariable) (sqltypes.ResultStream, error) { +func (conn *FakeVTGateConn) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVars map[string]*querypb.BindVariable, _ func(response *vtgatepb.StreamExecuteResponse)) (sqltypes.ResultStream, error) { response, ok := conn.execMap[sql] if !ok { return nil, fmt.Errorf("no match for: %s", sql) diff --git a/go/vt/vtgate/grpcvtgateconn/conn.go b/go/vt/vtgate/grpcvtgateconn/conn.go index 4988f44b392..a681e3661cd 100644 --- a/go/vt/vtgate/grpcvtgateconn/conn.go +++ b/go/vt/vtgate/grpcvtgateconn/conn.go @@ -156,7 +156,16 @@ type streamExecuteAdapter struct { } func (a *streamExecuteAdapter) Recv() (*sqltypes.Result, error) { - qr, err := a.recv() + var qr *querypb.QueryResult + var err error + for { + qr, err = a.recv() + if qr != nil || err != nil { + break + } + // we reach here, only when it is the last packet. + // as in the last packet we receive the session and there is no result + } if err != nil { return nil, vterrors.FromGRPC(err) } @@ -166,7 +175,7 @@ func (a *streamExecuteAdapter) Recv() (*sqltypes.Result, error) { return sqltypes.CustomProto3ToResult(a.fields, qr), nil } -func (conn *vtgateConn) StreamExecute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable) (sqltypes.ResultStream, error) { +func (conn *vtgateConn) StreamExecute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable, processResponse func(response *vtgatepb.StreamExecuteResponse)) (sqltypes.ResultStream, error) { req := &vtgatepb.StreamExecuteRequest{ CallerId: callerid.EffectiveCallerIDFromContext(ctx), Query: &querypb.BoundQuery{ @@ -185,6 +194,7 @@ func (conn *vtgateConn) StreamExecute(ctx context.Context, session *vtgatepb.Ses if err != nil { return nil, err } + processResponse(ser) return ser.Result, nil }, }, nil diff --git a/go/vt/vtgate/grpcvtgateconn/suite_test.go b/go/vt/vtgate/grpcvtgateconn/suite_test.go index 02d73dc0a81..e5cd5c3ac81 100644 --- a/go/vt/vtgate/grpcvtgateconn/suite_test.go +++ b/go/vt/vtgate/grpcvtgateconn/suite_test.go @@ -22,30 +22,27 @@ package grpcvtgateconn // moved back to its own package for reusability. import ( + "context" "errors" "fmt" "io" "strings" "testing" - "google.golang.org/protobuf/proto" - - "context" - "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) // fakeVTGateService has the server side of this fake @@ -95,7 +92,7 @@ func (q *queryExecute) equal(q2 *queryExecute) bool { } // Execute is part of the VTGateService interface -func (f *fakeVTGateService) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { +func (f *fakeVTGateService) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) { if f.hasError { return session, nil, errTestVtGateError } @@ -156,13 +153,13 @@ func (f *fakeVTGateService) ExecuteBatch(ctx context.Context, session *vtgatepb. } // StreamExecute is part of the VTGateService interface -func (f *fakeVTGateService) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (f *fakeVTGateService) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { if f.panics { panic(fmt.Errorf("test forced panic")) } execCase, ok := execMap[sql] if !ok { - return fmt.Errorf("no match for: %s", sql) + return session, fmt.Errorf("no match for: %s", sql) } f.checkCallerID(ctx, "StreamExecute") query := &queryExecute{ @@ -172,32 +169,32 @@ func (f *fakeVTGateService) StreamExecute(ctx context.Context, session *vtgatepb } if !query.equal(execCase.execQuery) { f.t.Errorf("StreamExecute:\n%+v, want\n%+v", query, execCase.execQuery) - return nil + return session, nil } if execCase.result != nil { result := &sqltypes.Result{ Fields: execCase.result.Fields, } if err := callback(result); err != nil { - return err + return execCase.outSession, err } if f.hasError { // wait until the client has the response, since all streaming implementation may not // send previous messages if an error has been triggered. <-f.errorWait f.errorWait = make(chan struct{}) // for next test - return errTestVtGateError + return execCase.outSession, errTestVtGateError } for _, row := range execCase.result.Rows { result := &sqltypes.Result{ Rows: [][]sqltypes.Value{row}, } if err := callback(result); err != nil { - return err + return execCase.outSession, err } } } - return nil + return execCase.outSession, nil } // Prepare is part of the VTGateService interface @@ -504,9 +501,9 @@ func testPrepare(t *testing.T, session *vtgateconn.VTGateSession) { execCase := execMap["request1"] _, err := session.Prepare(ctx, execCase.execQuery.SQL, execCase.execQuery.BindVariables) require.NoError(t, err) - //if !qr.Equal(execCase.result) { + // if !qr.Equal(execCase.result) { // t.Errorf("Unexpected result from Execute: got\n%#v want\n%#v", qr, execCase.result) - //} + // } _, err = session.Prepare(ctx, "none", nil) require.EqualError(t, err, "no match for: none") diff --git a/go/vt/vtgate/grpcvtgateservice/server.go b/go/vt/vtgate/grpcvtgateservice/server.go index d012786d6eb..7baff6cefe8 100644 --- a/go/vt/vtgate/grpcvtgateservice/server.go +++ b/go/vt/vtgate/grpcvtgateservice/server.go @@ -28,17 +28,16 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtgateservicepb "vitess.io/vitess/go/vt/proto/vtgateservice" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) const ( @@ -46,13 +45,15 @@ const ( ) var ( - useEffective bool - useEffectiveGroups bool + useEffective bool + useEffectiveGroups bool + useStaticAuthenticationIdentity bool ) func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&useEffective, "grpc_use_effective_callerid", false, "If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.") fs.BoolVar(&useEffectiveGroups, "grpc-use-effective-groups", false, "If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.") + fs.BoolVar(&useStaticAuthenticationIdentity, "grpc-use-static-authentication-callerid", false, "If set, will set the immediate caller id to the username authenticated by the static auth plugin.") } func init() { @@ -94,23 +95,35 @@ func immediateCallerIDFromCert(ctx context.Context) (string, []string) { return cert.Subject.CommonName, cert.DNSNames } -func immediateCallerID(ctx context.Context) (string, []string) { +// immediateCallerIdFromStaticAuthentication extracts the username of the current +// static authentication context and returns that to the caller. +func immediateCallerIdFromStaticAuthentication(ctx context.Context) (string, []string) { if immediate := servenv.StaticAuthUsernameFromContext(ctx); immediate != "" { return immediate, nil } - return immediateCallerIDFromCert(ctx) + + return "", nil } // withCallerIDContext creates a context that extracts what we need // from the incoming call and can be forwarded for use when talking to vttablet. func withCallerIDContext(ctx context.Context, effectiveCallerID *vtrpcpb.CallerID) context.Context { - immediate, securityGroups := immediateCallerID(ctx) + // The client cert common name (if using mTLS) + immediate, securityGroups := immediateCallerIDFromCert(ctx) + + // The effective caller id (if --grpc_use_effective_callerid=true) if immediate == "" && useEffective && effectiveCallerID != nil { immediate = effectiveCallerID.Principal if useEffectiveGroups && len(effectiveCallerID.Groups) > 0 { securityGroups = effectiveCallerID.Groups } } + + // The static auth username (if --grpc-use-static-authentication-callerid=true) + if immediate == "" && useStaticAuthenticationIdentity { + immediate, securityGroups = immediateCallerIdFromStaticAuthentication(ctx) + } + if immediate == "" { immediate = unsecureClient } @@ -129,7 +142,7 @@ func (vtg *VTGate) Execute(ctx context.Context, request *vtgatepb.ExecuteRequest if session == nil { session = &vtgatepb.Session{Autocommit: true} } - session, result, err := vtg.server.Execute(ctx, session, request.Query.Sql, request.Query.BindVariables) + session, result, err := vtg.server.Execute(ctx, nil, session, request.Query.Sql, request.Query.BindVariables) return &vtgatepb.ExecuteResponse{ Result: sqltypes.ResultToProto3(result), Session: session, @@ -171,14 +184,28 @@ func (vtg *VTGate) StreamExecute(request *vtgatepb.StreamExecuteRequest, stream session = &vtgatepb.Session{Autocommit: true} } - vtgErr := vtg.server.StreamExecute(ctx, session, request.Query.Sql, request.Query.BindVariables, func(value *sqltypes.Result) error { + session, vtgErr := vtg.server.StreamExecute(ctx, nil, session, request.Query.Sql, request.Query.BindVariables, func(value *sqltypes.Result) error { // Send is not safe to call concurrently, but vtgate // guarantees that it's not. return stream.Send(&vtgatepb.StreamExecuteResponse{ Result: sqltypes.ResultToProto3(value), }) }) - return vterrors.ToGRPC(vtgErr) + + // even if there is an error, session could have been modified. + // So, this needs to be sent back to the client. Session is sent in the last stream response. + lastErr := stream.Send(&vtgatepb.StreamExecuteResponse{ + Session: session, + }) + + var errs []error + if vtgErr != nil { + errs = append(errs, vtgErr) + } + if lastErr != nil { + errs = append(errs, lastErr) + } + return vterrors.ToGRPC(vterrors.Aggregate(errs)) } // Prepare is the RPC version of vtgateservice.VTGateService method diff --git a/go/vt/vtgate/legacy_scatter_conn_test.go b/go/vt/vtgate/legacy_scatter_conn_test.go index 601b07f94c6..7ada1c3ac31 100644 --- a/go/vt/vtgate/legacy_scatter_conn_test.go +++ b/go/vt/vtgate/legacy_scatter_conn_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" @@ -42,10 +44,11 @@ import ( // This file uses the sandbox_test framework. func TestLegacyExecuteFailOnAutocommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestExecuteFailOnAutocommit") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestExecuteFailOnAutocommit", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestExecuteFailOnAutocommit", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) @@ -105,8 +108,8 @@ func TestLegacyExecuteFailOnAutocommit(t *testing.T) { } func TestScatterConnExecuteMulti(t *testing.T) { - testScatterConnGeneric(t, "TestScatterConnExecuteMultiShard", func(sc *ScatterConn, shards []string) (*sqltypes.Result, error) { - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + testScatterConnGeneric(t, "TestScatterConnExecuteMultiShard", func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error) { + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, err := res.ResolveDestination(ctx, "TestScatterConnExecuteMultiShard", topodatapb.TabletType_REPLICA, key.DestinationShards(shards)) if err != nil { return nil, err @@ -126,8 +129,8 @@ func TestScatterConnExecuteMulti(t *testing.T) { } func TestScatterConnStreamExecuteMulti(t *testing.T) { - testScatterConnGeneric(t, "TestScatterConnStreamExecuteMulti", func(sc *ScatterConn, shards []string) (*sqltypes.Result, error) { - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + testScatterConnGeneric(t, "TestScatterConnStreamExecuteMulti", func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error) { + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, err := res.ResolveDestination(ctx, "TestScatterConnStreamExecuteMulti", topodatapb.TabletType_REPLICA, key.DestinationShards(shards)) if err != nil { return nil, err @@ -153,13 +156,15 @@ func verifyScatterConnError(t *testing.T, err error, wantErr string, wantCode vt assert.Equal(t, wantCode, vterrors.Code(err)) } -func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, shards []string) (*sqltypes.Result, error)) { +func testScatterConnGeneric(t *testing.T, name string, f func(ctx context.Context, sc *ScatterConn, shards []string) (*sqltypes.Result, error)) { + ctx := utils.LeakCheckContext(t) + hc := discovery.NewFakeHealthCheck(nil) // no shard s := createSandbox(name) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") - qr, err := f(sc, nil) + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") + qr, err := f(ctx, sc, nil) require.NoError(t, err) if qr.RowsAffected != 0 { t.Errorf("want 0, got %v", qr.RowsAffected) @@ -167,10 +172,10 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // single shard s.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = f(sc, []string{"0"}) + _, err = f(ctx, sc, []string{"0"}) want := fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error", name) // Verify server error string. if err == nil || err.Error() != want { @@ -184,12 +189,12 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // two shards s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 sbc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - _, err = f(sc, []string{"0", "1"}) + _, err = f(ctx, sc, []string{"0", "1"}) // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: INVALID_ARGUMENT error", name, name) verifyScatterConnError(t, err, want, vtrpcpb.Code_INVALID_ARGUMENT) @@ -204,12 +209,12 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // two shards with different errors s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 sbc1.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 1 - _, err = f(sc, []string{"0", "1"}) + _, err = f(ctx, sc, []string{"0", "1"}) // Verify server errors are consolidated. want = fmt.Sprintf("target: %v.0.replica: INVALID_ARGUMENT error\ntarget: %v.1.replica: RESOURCE_EXHAUSTED error", name, name) // We should only surface the higher priority error code @@ -225,9 +230,9 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // duplicate shards s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) - _, _ = f(sc, []string{"0", "0"}) + _, _ = f(ctx, sc, []string{"0", "0"}) // Ensure that we executed only once. if execCount := sbc.ExecCount.Load(); execCount != 1 { t.Errorf("want 1, got %v", execCount) @@ -236,10 +241,10 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s // no errors s.Reset() hc.Reset() - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - qr, err = f(sc, []string{"0", "1"}) + qr, err = f(ctx, sc, []string{"0", "1"}) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -258,17 +263,19 @@ func testScatterConnGeneric(t *testing.T, name string, f func(sc *ScatterConn, s } func TestMaxMemoryRows(t *testing.T) { + ctx := utils.LeakCheckContext(t) + save := maxMemoryRows maxMemoryRows = 3 defer func() { maxMemoryRows = save }() createSandbox("TestMaxMemoryRows") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestMaxMemoryRows", "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestMaxMemoryRows", "1", topodatapb.TabletType_REPLICA, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss, _, err := res.ResolveDestinations(ctx, "TestMaxMemoryRows", topodatapb.TabletType_REPLICA, nil, []key.Destination{key.DestinationShard("0"), key.DestinationShard("1")}) require.NoError(t, err) @@ -313,12 +320,13 @@ func TestMaxMemoryRows(t *testing.T) { } func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) { + ctx := utils.LeakCheckContext(t) keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} @@ -338,12 +346,12 @@ func TestLegaceHealthCheckFailsOnReservedConnections(t *testing.T) { require.Error(t, vterrors.Aggregate(errs)) } -func executeOnShards(t *testing.T, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) { +func executeOnShards(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) { t.Helper() - require.Empty(t, executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations)) + require.Empty(t, executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations)) } -func executeOnShardsReturnsErr(t *testing.T, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) error { +func executeOnShardsReturnsErr(t *testing.T, ctx context.Context, res *srvtopo.Resolver, keyspace string, sc *ScatterConn, session *SafeSession, destinations []key.Destination) error { t.Helper() rss, _, err := res.ResolveDestinations(ctx, keyspace, topodatapb.TabletType_REPLICA, nil, destinations) require.NoError(t, err) @@ -362,9 +370,10 @@ func executeOnShardsReturnsErr(t *testing.T, res *srvtopo.Resolver, keyspace str } func TestMultiExecs(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestMultiExecs") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestMultiExecs", "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestMultiExecs", "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -458,15 +467,16 @@ func TestMultiExecs(t *testing.T) { } func TestScatterConnSingleDB(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestScatterConnSingleDB") hc := discovery.NewFakeHealthCheck(nil) hc.Reset() - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") hc.AddTestTablet("aa", "0", 1, "TestScatterConnSingleDB", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) hc.AddTestTablet("aa", "1", 1, "TestScatterConnSingleDB", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") rss0, err := res.ResolveDestination(ctx, "TestScatterConnSingleDB", topodatapb.TabletType_PRIMARY, key.DestinationShard("0")) require.NoError(t, err) rss1, err := res.ResolveDestination(ctx, "TestScatterConnSingleDB", topodatapb.TabletType_PRIMARY, key.DestinationShard("1")) @@ -509,7 +519,7 @@ func TestAppendResult(t *testing.T) { } innerqr2 := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "foo", Type: sqltypes.Int8}, + {Name: "foo", Type: sqltypes.Int8, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, RowsAffected: 1, InsertID: 1, @@ -551,10 +561,11 @@ func TestAppendResult(t *testing.T) { } func TestReservePrequeries(t *testing.T) { + ctx := utils.LeakCheckContext(t) keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -562,7 +573,7 @@ func TestReservePrequeries(t *testing.T) { sbc0.SetResults([]*sqltypes.Result{{}}) sbc1.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{ InTransaction: false, @@ -574,11 +585,11 @@ func TestReservePrequeries(t *testing.T) { }) destinations := []key.Destination{key.DestinationShard("0")} - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 1+1, len(sbc0.StringQueries())) } -func newTestScatterConn(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *ScatterConn { +func newTestScatterConn(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *ScatterConn { // The topo.Server is used to start watching the cells described // in '-cells_to_watch' command line parameter, which is // empty by default. So it's unused in this test, set to nil. @@ -586,5 +597,3 @@ func newTestScatterConn(hc discovery.HealthCheck, serv srvtopo.Server, cell stri tc := NewTxConn(gw, vtgatepb.TransactionMode_TWOPC) return NewScatterConn("", tc, gw) } - -var ctx = context.Background() diff --git a/go/vt/vtgate/logstats/logstats.go b/go/vt/vtgate/logstats/logstats.go index 1d598e5c5e2..5ea7820a72e 100644 --- a/go/vt/vtgate/logstats/logstats.go +++ b/go/vt/vtgate/logstats/logstats.go @@ -20,11 +20,12 @@ import ( "context" "encoding/json" "fmt" - "html/template" "io" "net/url" "time" + "github.com/google/safehtml" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/tb" @@ -99,7 +100,7 @@ func (stats *LogStats) TotalTime() time.Duration { // ContextHTML returns the HTML version of the context that was used, or "". // This is a method on LogStats instead of a field so that it doesn't need // to be passed by value everywhere. -func (stats *LogStats) ContextHTML() template.HTML { +func (stats *LogStats) ContextHTML() safehtml.HTML { return callinfo.HTMLFromContext(stats.Ctx) } diff --git a/go/vt/vtgate/logstats/logstats_test.go b/go/vt/vtgate/logstats/logstats_test.go index 17b250d3ded..dbe49b200b8 100644 --- a/go/vt/vtgate/logstats/logstats_test.go +++ b/go/vt/vtgate/logstats/logstats_test.go @@ -29,6 +29,7 @@ import ( "testing" "time" + "github.com/google/safehtml/testconversions" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -194,12 +195,12 @@ func TestLogStatsRowThreshold(t *testing.T) { func TestLogStatsContextHTML(t *testing.T) { html := "HtmlContext" callInfo := &fakecallinfo.FakeCallInfo{ - Html: html, + Html: testconversions.MakeHTMLForTest(html), } ctx := callinfo.NewContext(context.Background(), callInfo) logStats := NewLogStats(ctx, "test", "sql1", "", map[string]*querypb.BindVariable{}) - if string(logStats.ContextHTML()) != html { - t.Fatalf("expect to get html: %s, but got: %s", html, string(logStats.ContextHTML())) + if logStats.ContextHTML().String() != html { + t.Fatalf("expect to get html: %s, but got: %s", html, logStats.ContextHTML().String()) } } diff --git a/go/vt/vtgate/mysql_protocol_test.go b/go/vt/vtgate/mysql_protocol_test.go deleted file mode 100644 index dcd12de4bb4..00000000000 --- a/go/vt/vtgate/mysql_protocol_test.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgate - -import ( - "net" - "strconv" - "testing" - - "vitess.io/vitess/go/test/utils" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "context" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestMySQLProtocolExecute(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr, "mismatch in rows") - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - Workload: querypb.ExecuteOptions_OLTP, - } - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -func TestMySQLProtocolStreamExecute(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - _, err = c.ExecuteFetch("set workload='olap'", 1, true /* wantfields */) - require.NoError(t, err) - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr, "mismatch in rows") - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - Workload: querypb.ExecuteOptions_OLAP, - } - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -func TestMySQLProtocolExecuteUseStatement(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{DbName: "@primary"}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - qr, err = c.ExecuteFetch("show vitess_target", 1, false) - require.NoError(t, err) - assert.Equal(t, "VARCHAR(\"@primary\")", qr.Rows[0][0].String()) - - _, err = c.ExecuteFetch("use TestUnsharded", 0, false) - require.NoError(t, err) - - qr, err = c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - // No such keyspace this will fail - _, err = c.ExecuteFetch("use InvalidKeyspace", 0, false) - require.Error(t, err) - assert.Contains(t, err.Error(), "VT05003: unknown database 'InvalidKeyspace' in vschema (errno 1049) (sqlstate 42000)") - - // That doesn't reset the vitess_target - qr, err = c.ExecuteFetch("show vitess_target", 1, false) - require.NoError(t, err) - assert.Equal(t, "VARCHAR(\"TestUnsharded\")", qr.Rows[0][0].String()) - - _, err = c.ExecuteFetch("use @replica", 0, false) - require.NoError(t, err) - - // No replica tablets, this should also fail - _, err = c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.Error(t, err) - assert.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"0" tablet_type:REPLICA`) -} - -func TestMysqlProtocolInvalidDB(t *testing.T) { - _, err := mysqlConnect(&mysql.ConnParams{DbName: "invalidDB"}) - require.EqualError(t, err, "VT05003: unknown database 'invalidDB' in vschema (errno 1049) (sqlstate 42000)") -} - -func TestMySQLProtocolClientFoundRows(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - - c, err := mysqlConnect(&mysql.ConnParams{Flags: mysql.CapabilityClientFoundRows}) - if err != nil { - t.Fatal(err) - } - defer c.Close() - - qr, err := c.ExecuteFetch("select id from t1", 10, true /* wantfields */) - require.NoError(t, err) - utils.MustMatch(t, sandboxconn.SingleRowResult, qr) - - options := &querypb.ExecuteOptions{ - IncludedFields: querypb.ExecuteOptions_ALL, - ClientFoundRows: true, - Workload: querypb.ExecuteOptions_OLTP, - } - - if !proto.Equal(sbc.Options[0], options) { - t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], options) - } -} - -// mysqlConnect fills the host & port into params and connects -// to the mysql protocol port. -func mysqlConnect(params *mysql.ConnParams) (*mysql.Conn, error) { - host, port, err := net.SplitHostPort(mysqlListener.Addr().String()) - if err != nil { - return nil, err - } - portnum, _ := strconv.Atoi(port) - params.Host = host - params.Port = portnum - return mysql.Connect(context.Background(), params) -} diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 91a451ece4d..5d2414ac275 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -18,23 +18,46 @@ package vtgate import ( "context" + "fmt" + "strings" "time" - "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) type planExec func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, startTime time.Time) error type txResult func(sqlparser.StatementType, *sqltypes.Result) error +func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated time.Time) bool { + timeout := 30 * time.Second + pollingInterval := 10 * time.Millisecond + waitCtx, cancel := context.WithTimeout(ctx, timeout) + ticker := time.NewTicker(pollingInterval) + defer ticker.Stop() + defer cancel() + for { + select { + case <-waitCtx.Done(): + return false + case <-ticker.C: + if e.VSchema().GetCreated().After(lastVSchemaCreated) { + return true + } + } + } +} + func (e *Executor) newExecute( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, @@ -55,57 +78,102 @@ func (e *Executor) newExecute( } query, comments := sqlparser.SplitMarginComments(sql) - vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) + + // 2: Parse and Validate query + stmt, reservedVars, err := parseAndValidateQuery(query) if err != nil { return err } - // 2: Create a plan for the query - plan, stmt, err := e.getPlan(ctx, vcursor, query, comments, bindVars, safeSession, logStats) - execStart := e.logPlanningFinished(logStats, plan) + var lastVSchemaCreated time.Time + vs := e.VSchema() + lastVSchemaCreated = vs.GetCreated() + for try := 0; try < MaxBufferingRetries; try++ { + if try > 0 && !vs.GetCreated().After(lastVSchemaCreated) { + // There is a race due to which the executor's vschema may not have been updated yet. + // Without a wait we fail non-deterministically since the previous vschema will not have the updated routing rules + if waitForNewerVSchema(ctx, e, lastVSchemaCreated) { + vs = e.VSchema() + } + } - if err != nil { - safeSession.ClearWarnings() - return err - } + vcursor, err := newVCursorImpl(safeSession, comments, e, logStats, e.vm, vs, e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) + if err != nil { + return err + } - if plan.Type != sqlparser.StmtShow { - safeSession.ClearWarnings() - } + // 3: Create a plan for the query + // If we are retrying, it is likely that the routing rules have changed and hence we need to + // replan the query since the target keyspace of the resolved shards may have changed as a + // result of MoveTables. So we cannot reuse the plan from the first try. + // When buffering ends, many queries might be getting planned at the same time. Ideally we + // should be able to reuse plans once the first drained query has been planned. For now, we + // punt on this and choose not to prematurely optimize since it is not clear how much caching + // will help and if it will result in hard-to-track edge cases. + + var plan *engine.Plan + plan, err = e.getPlan(ctx, vcursor, query, stmt, comments, bindVars, reservedVars, e.normalize, logStats) + execStart := e.logPlanningFinished(logStats, plan) + + if err != nil { + safeSession.ClearWarnings() + return err + } - // add any warnings that the planner wants to add - for _, warning := range plan.Warnings { - safeSession.RecordWarning(warning) - } + if plan.Type != sqlparser.StmtShow { + safeSession.ClearWarnings() + } - result, err := e.handleTransactions(ctx, safeSession, plan, logStats, vcursor, stmt) - if err != nil { - return err - } - if result != nil { - return recResult(plan.Type, result) - } + // add any warnings that the planner wants to add + for _, warning := range plan.Warnings { + safeSession.RecordWarning(warning) + } - // 3: Prepare for execution - err = e.addNeededBindVars(plan.BindVarNeeds, bindVars, safeSession) - if err != nil { - logStats.Error = err - return err - } + result, err := e.handleTransactions(ctx, mysqlCtx, safeSession, plan, logStats, vcursor, stmt) + if err != nil { + return err + } + if result != nil { + return recResult(plan.Type, result) + } - if plan.Instructions.NeedsTransaction() { - return e.insideTransaction(ctx, safeSession, logStats, - func() error { - return execPlan(ctx, plan, vcursor, bindVars, execStart) - }) - } + // 4: Prepare for execution + err = e.addNeededBindVars(vcursor, plan.BindVarNeeds, bindVars, safeSession) + if err != nil { + logStats.Error = err + return err + } + + // 5: Execute the plan and retry if needed + if plan.Instructions.NeedsTransaction() { + err = e.insideTransaction(ctx, safeSession, logStats, + func() error { + return execPlan(ctx, plan, vcursor, bindVars, execStart) + }) + } else { + err = execPlan(ctx, plan, vcursor, bindVars, execStart) + } + + if err == nil || safeSession.InTransaction() { + return err + } - return execPlan(ctx, plan, vcursor, bindVars, execStart) + rootCause := vterrors.RootCause(err) + if rootCause != nil && strings.Contains(rootCause.Error(), "enforce denied tables") { + log.V(2).Infof("Retry: %d, will retry query %s due to %v", try, query, err) + lastVSchemaCreated = vs.GetCreated() + continue + } + + return err + } + return vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("query %s failed after retries: %v ", query, err)) } // handleTransactions deals with transactional queries: begin, commit, rollback and savepoint management func (e *Executor) handleTransactions( ctx context.Context, + mysqlCtx vtgateservice.MySQLConnection, safeSession *SafeSession, plan *engine.Plan, logStats *logstats.LogStats, @@ -142,6 +210,8 @@ func (e *Executor) handleTransactions( return nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.SPDoesNotExist, "SAVEPOINT does not exist: %s", query) }, vcursor.ignoreMaxMemoryRows) return qr, err + case sqlparser.StmtKill: + return e.handleKill(ctx, mysqlCtx, stmt, logStats) } return nil, nil } @@ -234,25 +304,37 @@ func (e *Executor) rollbackExecIfNeeded(ctx context.Context, safeSession *SafeSe // If it fails to rollback to the previous savepoint then, the transaction is forced to be rolled back. func (e *Executor) rollbackPartialExec(ctx context.Context, safeSession *SafeSession, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) error { var err error + var errMsg strings.Builder + + // If the context got cancelled we still have to revert the partial DML execution. + // We cannot use the parent context here anymore. + if ctx.Err() != nil { + errMsg.WriteString("context canceled: ") + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + } // needs to rollback only once. rQuery := safeSession.rollbackOnPartialExec if rQuery != txRollback { safeSession.SavepointRollback() - _, _, err := e.execute(ctx, safeSession, rQuery, bindVars, logStats) + _, _, err = e.execute(ctx, nil, safeSession, rQuery, bindVars, logStats) + // If no error, the revert is successful with the savepoint. Notify the reason as error to the client. if err == nil { - return vterrors.New(vtrpcpb.Code_ABORTED, "reverted partial DML execution failure") + errMsg.WriteString("reverted partial DML execution failure") + return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String()) } // not able to rollback changes of the failed query, so have to abort the complete transaction. } // abort the transaction. _ = e.txConn.Rollback(ctx, safeSession) - var errMsg = "transaction rolled back to reverse changes of partial DML execution" + errMsg.WriteString("transaction rolled back to reverse changes of partial DML execution") if err != nil { - return vterrors.Wrap(err, errMsg) + return vterrors.Wrap(err, errMsg.String()) } - return vterrors.New(vtrpcpb.Code_ABORTED, errMsg) + return vterrors.New(vtrpcpb.Code_ABORTED, errMsg.String()) } func (e *Executor) setLogStats(logStats *logstats.LogStats, plan *engine.Plan, vcursor *vcursorImpl, execStart time.Time, err error, qr *sqltypes.Result) { diff --git a/go/vt/vtgate/planbuilder/aggregation_pushing.go b/go/vt/vtgate/planbuilder/aggregation_pushing.go index 15367f9e3e8..677fabbac18 100644 --- a/go/vt/vtgate/planbuilder/aggregation_pushing.go +++ b/go/vt/vtgate/planbuilder/aggregation_pushing.go @@ -22,7 +22,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -44,12 +44,12 @@ func (hp *horizonPlanning) pushAggregation( err error) { pushed = true switch plan := plan.(type) { - case *routeGen4: + case *route: output = plan groupingOffsets, outputAggrsOffset, _, err = pushAggrOnRoute(ctx, plan, aggregations, grouping, ignoreOutputOrder) return - case *joinGen4: + case *join: output = plan groupingOffsets, outputAggrsOffset, err = hp.pushAggrOnJoin(ctx, plan, grouping, aggregations) return @@ -69,7 +69,7 @@ func (hp *horizonPlanning) pushAggregation( pushed = false for _, grp := range grouping { - offset, wOffset, err := wrapAndPushExpr(ctx, grp.Inner, grp.WeightStrExpr, plan.input) + offset, wOffset, err := wrapAndPushExpr(ctx, grp.Inner, grp.SimplifiedExpr, plan.input) if err != nil { return nil, nil, nil, false, err } @@ -112,7 +112,7 @@ func (hp *horizonPlanning) pushAggregation( func pushAggrOnRoute( ctx *plancontext.PlanningContext, - plan *routeGen4, + plan *route, aggregations []operators.Aggr, grouping []operators.GroupBy, ignoreOutputOrder bool, @@ -166,8 +166,8 @@ func pushAggrOnRoute( pos = newOffset(groupingCols[idx]) } - if expr.WeightStrExpr != nil && ctx.SemTable.NeedsWeightString(expr.Inner) { - wsExpr := weightStringFor(expr.WeightStrExpr) + if expr.SimplifiedExpr != nil && ctx.SemTable.NeedsWeightString(expr.Inner) { + wsExpr := weightStringFor(expr.SimplifiedExpr) wsCol, _, err := addExpressionToRoute(ctx, plan, &sqlparser.AliasedExpr{Expr: wsExpr}, true) if err != nil { return nil, nil, nil, err @@ -184,7 +184,7 @@ func pushAggrOnRoute( func pushAggrsAndGroupingInOrder( ctx *plancontext.PlanningContext, - plan *routeGen4, + plan *route, it *sortedIterator, sel *sqlparser.Select, vtgateAggregation [][]offsets, @@ -228,12 +228,8 @@ func addAggregationToSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Sel func countStarAggr() *operators.Aggr { f := &sqlparser.CountStar{} - - return &operators.Aggr{ - Original: &sqlparser.AliasedExpr{Expr: f}, - OpCode: engine.AggregateCountStar, - Alias: "count(*)", - } + aggr := operators.NewAggr(popcode.AggregateCountStar, f, &sqlparser.AliasedExpr{Expr: f}, "count(*)") + return &aggr } /* @@ -250,7 +246,7 @@ vtgate level, we can offload most of the work to MySQL, and at the vtgate just s */ func (hp *horizonPlanning) pushAggrOnJoin( ctx *plancontext.PlanningContext, - join *joinGen4, + join *join, grouping []operators.GroupBy, aggregations []operators.Aggr, ) ([]offsets, [][]offsets, error) { @@ -287,7 +283,7 @@ func (hp *horizonPlanning) pushAggrOnJoin( return nil, nil, err } l = sqlparser.NewIntLiteral(strconv.Itoa(offset + 1)) - rhsGrouping = append(rhsGrouping, operators.GroupBy{Inner: l}) + rhsGrouping = append(rhsGrouping, operators.NewGroupBy(l, nil, nil)) } // Next we push the aggregations to both sides @@ -420,23 +416,23 @@ func (hp *horizonPlanning) filteredPushAggregation( return newplan, groupingOffsets, outputAggrs, pushed, nil } -func isMinOrMax(in engine.AggregateOpcode) bool { +func isMinOrMax(in popcode.AggregateOpcode) bool { switch in { - case engine.AggregateMin, engine.AggregateMax: + case popcode.AggregateMin, popcode.AggregateMax: return true default: return false } } -func isRandom(in engine.AggregateOpcode) bool { - return in == engine.AggregateRandom +func isAnyValue(in popcode.AggregateOpcode) bool { + return in == popcode.AggregateAnyValue } func splitAggregationsToLeftAndRight( ctx *plancontext.PlanningContext, aggregations []operators.Aggr, - join *joinGen4, + join *join, ) ([]*operators.Aggr, []*operators.Aggr, error) { var lhsAggrs, rhsAggrs []*operators.Aggr for _, aggr := range aggregations { @@ -448,7 +444,7 @@ func splitAggregationsToLeftAndRight( deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) var other *operators.Aggr // if we are sending down min/max/random, we don't have to multiply the results with anything - if !isMinOrMax(aggr.OpCode) && !isRandom(aggr.OpCode) { + if !isMinOrMax(aggr.OpCode) && !isAnyValue(aggr.OpCode) { other = countStarAggr() } switch { @@ -468,7 +464,7 @@ func splitAggregationsToLeftAndRight( func splitGroupingsToLeftAndRight( ctx *plancontext.PlanningContext, - join *joinGen4, + join *join, grouping, lhsGrouping []operators.GroupBy, ) ([]operators.GroupBy, []operators.GroupBy, []int, error) { var rhsGrouping []operators.GroupBy diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index 1181c9b2c8b..0b090e2472e 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -17,40 +17,33 @@ limitations under the License. package planbuilder import ( + "context" "fmt" "sort" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) const ( - // V3 is also the default planner - V3 = querypb.ExecuteOptions_V3 // Gen4 uses the default Gen4 planner, which is the greedy planner Gen4 = querypb.ExecuteOptions_Gen4 // Gen4GreedyOnly uses only the faster greedy planner Gen4GreedyOnly = querypb.ExecuteOptions_Gen4Greedy - // Gen4Left2Right tries to emulate the V3 planner by only joining plans in the order they are listed in the FROM-clause + // Gen4Left2Right joins table in the order they are listed in the FROM-clause Gen4Left2Right = querypb.ExecuteOptions_Gen4Left2Right - // Gen4WithFallback first attempts to use the Gen4 planner, and if that fails, uses the V3 planner instead - Gen4WithFallback = querypb.ExecuteOptions_Gen4WithFallback - // Gen4CompareV3 executes queries on both Gen4 and V3 to compare their results. - Gen4CompareV3 = querypb.ExecuteOptions_Gen4CompareV3 ) var ( - plannerVersions = []plancontext.PlannerVersion{V3, Gen4, Gen4GreedyOnly, Gen4Left2Right, Gen4WithFallback, Gen4CompareV3} + plannerVersions = []plancontext.PlannerVersion{Gen4, Gen4GreedyOnly, Gen4Left2Right} ) type ( @@ -74,24 +67,6 @@ func singleTable(ks, tbl string) string { return fmt.Sprintf("%s.%s", ks, tbl) } -func tablesFromSemantics(semTable *semantics.SemTable) []string { - tables := make(map[string]any, len(semTable.Tables)) - for _, info := range semTable.Tables { - vindexTable := info.GetVindexTable() - if vindexTable == nil { - continue - } - tables[vindexTable.String()] = nil - } - - names := make([]string, 0, len(tables)) - for tbl := range tables { - names = append(names, tbl) - } - sort.Strings(names) - return names -} - // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) { @@ -105,12 +80,12 @@ func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*e } reservedVars := sqlparser.NewReservedVars("vtg", reserved) - return BuildFromStmt(query, result.AST, reservedVars, vschema, result.BindVarNeeds, true, true) + return BuildFromStmt(context.Background(), query, result.AST, reservedVars, vschema, result.BindVarNeeds, true, true) } // BuildFromStmt builds a plan based on the AST provided. -func BuildFromStmt(query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, bindVarNeeds *sqlparser.BindVarNeeds, enableOnlineDDL, enableDirectDDL bool) (*engine.Plan, error) { - planResult, err := createInstructionFor(query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) +func BuildFromStmt(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, bindVarNeeds *sqlparser.BindVarNeeds, enableOnlineDDL, enableDirectDDL bool) (*engine.Plan, error) { + planResult, err := createInstructionFor(ctx, query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, err } @@ -131,54 +106,20 @@ func BuildFromStmt(query string, stmt sqlparser.Statement, reservedVars *sqlpars return plan, nil } -func getConfiguredPlanner(vschema plancontext.VSchema, v3planner func(string) stmtPlanner, stmt sqlparser.Statement, query string) (stmtPlanner, error) { - planner, ok := getPlannerFromQuery(stmt) - if !ok { +func getConfiguredPlanner(vschema plancontext.VSchema, stmt sqlparser.Statement, query string) (stmtPlanner, error) { + planner, found := getPlannerFromQueryHint(stmt) + if !found { // if the query doesn't specify the planner, we check what the configuration is planner = vschema.Planner() } switch planner { - case Gen4CompareV3: - return gen4CompareV3Planner(query), nil - case Gen4Left2Right, Gen4GreedyOnly: - return gen4Planner(query, planner), nil - case Gen4WithFallback: - fp := &fallbackPlanner{ - primary: gen4Planner(query, querypb.ExecuteOptions_Gen4), - fallback: v3planner(query), - } - return fp.plan, nil - case V3: - return v3planner(query), nil + case Gen4Left2Right, Gen4GreedyOnly, Gen4: default: // default is gen4 plan - return gen4Planner(query, Gen4), nil + log.Infof("Using Gen4 planner instead of %s", planner.String()) + planner = Gen4 } -} - -// getPlannerFromQuery chooses the planner to use based on the query -// The default planner can be overridden using /*vt+ PLANNER=gen4 */ -// We will also fall back on the gen4 planner if we encounter outer join, -// since there are known problems with the v3 planner and outer joins -func getPlannerFromQuery(stmt sqlparser.Statement) (version plancontext.PlannerVersion, found bool) { - version, found = getPlannerFromQueryHint(stmt) - if found { - return - } - - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - join, ok := node.(*sqlparser.JoinTableExpr) - if ok { - if join.Join == sqlparser.LeftJoinType || join.Join == sqlparser.RightJoinType { - version = querypb.ExecuteOptions_Gen4 - found = true - return false, nil - } - } - return true, nil - }, stmt) - - return + return gen4Planner(query, planner), nil } func getPlannerFromQueryHint(stmt sqlparser.Statement) (plancontext.PlannerVersion, bool) { @@ -202,38 +143,42 @@ func buildRoutePlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVa return f(stmt, reservedVars, vschema) } -func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { +func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch stmt := stmt.(type) { case *sqlparser.Select: - configuredPlanner, err := getConfiguredPlanner(vschema, buildSelectPlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Insert: - return buildRoutePlan(stmt, reservedVars, vschema, buildInsertPlan) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) + if err != nil { + return nil, err + } + return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Update: - configuredPlanner, err := getConfiguredPlanner(vschema, buildUpdatePlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Delete: - configuredPlanner, err := getConfiguredPlanner(vschema, buildDeletePlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case *sqlparser.Union: - configuredPlanner, err := getConfiguredPlanner(vschema, buildUnionPlan, stmt, query) + configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err } return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) case sqlparser.DDLStatement: - return buildGeneralDDLPlan(query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildGeneralDDLPlan(ctx, query, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.AlterMigration: - return buildAlterMigrationPlan(query, vschema, enableOnlineDDL) + return buildAlterMigrationPlan(query, stmt, vschema, enableOnlineDDL) case *sqlparser.RevertMigration: return buildRevertMigrationPlan(query, stmt, vschema, enableOnlineDDL) case *sqlparser.ShowMigrationLogs: @@ -247,9 +192,9 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars * case *sqlparser.Use: return buildUsePlan(stmt) case sqlparser.Explain: - return buildExplainPlan(stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.VExplainStmt: - return buildVExplainPlan(stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildVExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.OtherRead, *sqlparser.OtherAdmin: return buildOtherReadAndAdmin(query, vschema) case *sqlparser.Set: @@ -258,7 +203,9 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars * return buildLoadPlan(query, vschema) case sqlparser.DBDDLStatement: return buildRoutePlan(stmt, reservedVars, vschema, buildDBDDLPlan) - case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release: + case *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, + *sqlparser.Savepoint, *sqlparser.SRollback, *sqlparser.Release, + *sqlparser.Kill: // Empty by design. Not executed by a plan return nil, nil case *sqlparser.Show: @@ -275,6 +222,12 @@ func createInstructionFor(query string, stmt sqlparser.Statement, reservedVars * return buildStreamPlan(stmt, vschema) case *sqlparser.VStream: return buildVStreamPlan(stmt, vschema) + case *sqlparser.PrepareStmt: + return prepareStmt(ctx, vschema, stmt) + case *sqlparser.DeallocateStmt: + return dropPreparedStatement(vschema, stmt) + case *sqlparser.ExecuteStmt: + return buildExecuteStmtPlan(ctx, vschema, stmt) case *sqlparser.CommentOnly: // There is only a comment in the input. // This is essentially a No-op @@ -364,13 +317,20 @@ func buildFlushPlan(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*planRe } func buildFlushOptions(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*planResult, error) { - dest, keyspace, _, err := vschema.TargetDestination("") + if !stmt.IsLocal && vschema.TabletType() != topodatapb.TabletType_PRIMARY { + return nil, vterrors.VT09012("FLUSH", vschema.TabletType().String()) + } + + keyspace, err := vschema.DefaultKeyspace() if err != nil { return nil, err } + + dest := vschema.Destination() if dest == nil { dest = key.DestinationAllShards{} } + tc := &tableCollector{} for _, tbl := range stmt.TableNames { tc.addASTTable(keyspace.Name, tbl) @@ -386,6 +346,9 @@ func buildFlushOptions(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*pla } func buildFlushTables(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*planResult, error) { + if !stmt.IsLocal && vschema.TabletType() != topodatapb.TabletType_PRIMARY { + return nil, vterrors.VT09012("FLUSH", vschema.TabletType().String()) + } tc := &tableCollector{} type sendDest struct { ks *vindexes.Keyspace @@ -401,20 +364,18 @@ func buildFlushTables(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*plan var keys []sendDest for i, tab := range stmt.TableNames { var ksTab *vindexes.Keyspace - var table *vindexes.Table - var err error - table, _, _, _, _, err = vschema.FindTableOrVindex(tab) + tbl, _, _, _, _, err := vschema.FindTableOrVindex(tab) if err != nil { return nil, err } - if table == nil { + if tbl == nil { return nil, vindexes.NotFoundError{TableName: tab.Name.String()} } - tc.addTable(table.Keyspace.Name, table.Name.String()) - ksTab = table.Keyspace + tc.addTable(tbl.Keyspace.Name, tbl.Name.String()) + ksTab = tbl.Keyspace stmt.TableNames[i] = sqlparser.TableName{ - Name: table.Name, + Name: tbl.Name, } key := sendDest{ksTab, dest} @@ -477,26 +438,6 @@ func (tc *tableCollector) getTables() []string { return tableNames } -func (tc *tableCollector) addVindexTable(t *vindexes.Table) { - if t == nil { - return - } - ks, tbl := "", t.Name.String() - if t.Keyspace != nil { - ks = t.Keyspace.Name - } - tc.addTable(ks, tbl) -} - -func (tc *tableCollector) addAllTables(tables []string) { - if tc.tables == nil { - tc.tables = map[string]any{} - } - for _, tbl := range tables { - tc.tables[tbl] = nil - } -} - func newFlushStmt(stmt *sqlparser.Flush, tables sqlparser.TableNames) *sqlparser.Flush { return &sqlparser.Flush{ IsLocal: stmt.IsLocal, diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go index a5490e2231e..52286816a11 100644 --- a/go/vt/vtgate/planbuilder/bypass.go +++ b/go/vt/vtgate/planbuilder/bypass.go @@ -30,7 +30,6 @@ func buildPlanForBypass(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vsc if err != nil { return nil, err } - switch dest := vschema.Destination().(type) { case key.DestinationExactKeyRange: if _, ok := stmt.(*sqlparser.Insert); ok { diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go index 2a7ffebf91c..24fb038b4c2 100644 --- a/go/vt/vtgate/planbuilder/collations_test.go +++ b/go/vt/vtgate/planbuilder/collations_test.go @@ -20,6 +20,8 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" @@ -39,21 +41,21 @@ type collationTestCase struct { } func (tc *collationTestCase) run(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", false), - sysVarEnabled: true, - version: Gen4, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", false), + SysVarEnabled: true, + Version: Gen4, } tc.addCollationsToSchema(vschemaWrapper) - plan, err := TestBuilder(tc.query, vschemaWrapper, vschemaWrapper.currentDb()) + plan, err := TestBuilder(tc.query, vschemaWrapper, vschemaWrapper.CurrentDb()) require.NoError(t, err) tc.check(t, tc.collations, plan.Instructions) } -func (tc *collationTestCase) addCollationsToSchema(vschema *vschemaWrapper) { +func (tc *collationTestCase) addCollationsToSchema(vschema *vschemawrapper.VSchemaWrapper) { for _, collation := range tc.collations { - tbl := vschema.v.Keyspaces[collation.ks].Tables[collation.table] + tbl := vschema.V.Keyspaces[collation.ks].Tables[collation.table] for i, c := range tbl.Columns { if c.Name.EqualString(collation.colName) { tbl.Columns[i].CollationName = collation.collationName @@ -65,7 +67,7 @@ func (tc *collationTestCase) addCollationsToSchema(vschema *vschemaWrapper) { func TestOrderedAggregateCollations(t *testing.T) { collid := func(collname string) collations.ID { - return collations.Local().LookupByName(collname).ID() + return collations.Local().LookupByName(collname) } testCases := []collationTestCase{ { @@ -81,9 +83,9 @@ func TestOrderedAggregateCollations(t *testing.T) { collations: []collationInTable{{ks: "user", table: "user", collationName: "utf8mb4_bin", colName: "textcol1"}}, query: "select distinct textcol1 from user", check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { - oa, isOA := primitive.(*engine.OrderedAggregate) - require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].CollationID) + distinct, isDistinct := primitive.(*engine.Distinct) + require.True(t, isDistinct, "should be a distinct") + require.Equal(t, collid(colls[0].collationName), distinct.CheckCols[0].Collation) }, }, { diff --git a/go/vt/vtgate/planbuilder/concatenate.go b/go/vt/vtgate/planbuilder/concatenate.go index 70b867b1146..378c0049ed2 100644 --- a/go/vt/vtgate/planbuilder/concatenate.go +++ b/go/vt/vtgate/planbuilder/concatenate.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,69 +20,65 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) type concatenate struct { - v3Plan - lhs, rhs logicalPlan - order int -} - -var _ logicalPlan = (*concatenate)(nil) - -func (c *concatenate) Order() int { - return c.order -} + sources []logicalPlan -func (c *concatenate) ResultColumns() []*resultColumn { - return c.lhs.ResultColumns() + // These column offsets do not need to be typed checked - they usually contain weight_string() + // columns that are not going to be returned to the user + noNeedToTypeCheck []int } -func (c *concatenate) Reorder(order int) { - c.lhs.Reorder(order) - c.rhs.Reorder(c.lhs.Order()) - c.order = c.rhs.Order() + 1 -} +var _ logicalPlan = (*concatenate)(nil) -func (c *concatenate) Wireup(plan logicalPlan, jt *jointab) error { - // TODO systay should we do something different here? - err := c.lhs.Wireup(plan, jt) - if err != nil { - return err +// WireupGen4 implements the logicalPlan interface +func (c *concatenate) Wireup(ctx *plancontext.PlanningContext) error { + for _, source := range c.sources { + err := source.Wireup(ctx) + if err != nil { + return err + } } - return c.rhs.Wireup(plan, jt) -} - -func (c *concatenate) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - panic("implement me") -} - -func (c *concatenate) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("implement me") -} - -func (c *concatenate) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - panic("implement me") + return nil } +// Primitive implements the logicalPlan interface func (c *concatenate) Primitive() engine.Primitive { - lhs := c.lhs.Primitive() - rhs := c.rhs.Primitive() + var sources []engine.Primitive + for _, source := range c.sources { + sources = append(sources, source.Primitive()) + } - return engine.NewConcatenate([]engine.Primitive{lhs, rhs}, nil) + return engine.NewConcatenate(sources, c.noNeedToTypeCheck) } // Rewrite implements the logicalPlan interface func (c *concatenate) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { + if len(inputs) != len(c.sources) { return vterrors.VT13001("concatenate: wrong number of inputs") } - c.lhs = inputs[0] - c.rhs = inputs[1] + c.sources = inputs return nil } +// ContainsTables implements the logicalPlan interface +func (c *concatenate) ContainsTables() semantics.TableSet { + var tableSet semantics.TableSet + for _, source := range c.sources { + tableSet = tableSet.Merge(source.ContainsTables()) + } + return tableSet +} + // Inputs implements the logicalPlan interface func (c *concatenate) Inputs() []logicalPlan { - return []logicalPlan{c.lhs, c.rhs} + return c.sources +} + +// OutputColumns implements the logicalPlan interface +func (c *concatenate) OutputColumns() []sqlparser.SelectExpr { + return c.sources[0].OutputColumns() } diff --git a/go/vt/vtgate/planbuilder/concatenateGen4.go b/go/vt/vtgate/planbuilder/concatenateGen4.go deleted file mode 100644 index fa12d24cf73..00000000000 --- a/go/vt/vtgate/planbuilder/concatenateGen4.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type concatenateGen4 struct { - sources []logicalPlan - - // These column offsets do not need to be typed checked - they usually contain weight_string() - // columns that are not going to be returned to the user - noNeedToTypeCheck []int -} - -var _ logicalPlan = (*concatenateGen4)(nil) - -// Order implements the logicalPlan interface -func (c *concatenateGen4) Order() int { - panic("implement me") -} - -// ResultColumns implements the logicalPlan interface -func (c *concatenateGen4) ResultColumns() []*resultColumn { - panic("implement me") -} - -// Reorder implements the logicalPlan interface -func (c *concatenateGen4) Reorder(order int) { - panic("implement me") -} - -// Wireup implements the logicalPlan interface -func (c *concatenateGen4) Wireup(plan logicalPlan, jt *jointab) error { - panic("implement me") -} - -// WireupGen4 implements the logicalPlan interface -func (c *concatenateGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - for _, source := range c.sources { - err := source.WireupGen4(ctx) - if err != nil { - return err - } - } - return nil -} - -// SupplyVar implements the logicalPlan interface -func (c *concatenateGen4) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - panic("implement me") -} - -// SupplyCol implements the logicalPlan interface -func (c *concatenateGen4) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("implement me") -} - -// SupplyWeightString implements the logicalPlan interface -func (c *concatenateGen4) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - panic("implement me") -} - -// Primitive implements the logicalPlan interface -func (c *concatenateGen4) Primitive() engine.Primitive { - var sources []engine.Primitive - for _, source := range c.sources { - sources = append(sources, source.Primitive()) - } - - return engine.NewConcatenate(sources, c.noNeedToTypeCheck) -} - -// Rewrite implements the logicalPlan interface -func (c *concatenateGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != len(c.sources) { - return vterrors.VT13001("concatenateGen4: wrong number of inputs") - } - c.sources = inputs - return nil -} - -// ContainsTables implements the logicalPlan interface -func (c *concatenateGen4) ContainsTables() semantics.TableSet { - var tableSet semantics.TableSet - for _, source := range c.sources { - tableSet = tableSet.Merge(source.ContainsTables()) - } - return tableSet -} - -// Inputs implements the logicalPlan interface -func (c *concatenateGen4) Inputs() []logicalPlan { - return c.sources -} - -// OutputColumns implements the logicalPlan interface -func (c *concatenateGen4) OutputColumns() []sqlparser.SelectExpr { - return c.sources[0].OutputColumns() -} diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index 797f3a2e52c..f366a169d69 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -1,9 +1,11 @@ package planbuilder import ( + "context" "fmt" "vitess.io/vitess/go/vt/key" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -18,18 +20,6 @@ const ( DifferentDestinations string = "Tables or Views specified in the query do not belong to the same destination" ) -type fkStrategy int - -const ( - fkAllow fkStrategy = iota - fkDisallow -) - -var fkStrategyMap = map[string]fkStrategy{ - "allow": fkAllow, - "disallow": fkDisallow, -} - type fkContraint struct { found bool } @@ -52,11 +42,11 @@ func (fk *fkContraint) FkWalk(node sqlparser.SQLNode) (kontinue bool, err error) // a session context. It's only when we Execute() the primitive that we have that context. // This is why we return a compound primitive (DDL) which contains fully populated primitives (Send & OnlineDDL), // and which chooses which of the two to invoke at runtime. -func buildGeneralDDLPlan(sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { +func buildGeneralDDLPlan(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { if vschema.Destination() != nil { return buildByPassDDLPlan(sql, vschema) } - normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(sql, ddlStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + normalDDLPlan, onlineDDLPlan, err := buildDDLPlans(ctx, sql, ddlStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, err } @@ -102,17 +92,13 @@ func buildByPassDDLPlan(sql string, vschema plancontext.VSchema) (*planResult, e return newPlanResult(send), nil } -func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*engine.Send, *engine.OnlineDDL, error) { +func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLStatement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*engine.Send, *engine.OnlineDDL, error) { var destination key.Destination var keyspace *vindexes.Keyspace var err error switch ddl := ddlStatement.(type) { case *sqlparser.AlterTable, *sqlparser.CreateTable, *sqlparser.TruncateTable: - err = checkFKError(vschema, ddlStatement) - if err != nil { - return nil, nil, err - } // For ALTER TABLE and TRUNCATE TABLE, the table must already exist // // For CREATE TABLE, the table may (in the case of --declarative) @@ -120,10 +106,14 @@ func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars // // We should find the target of the query from this tables location. destination, keyspace, err = findTableDestinationAndKeyspace(vschema, ddlStatement) + if err != nil { + return nil, nil, err + } + err = checkFKError(vschema, ddlStatement, keyspace) case *sqlparser.CreateView: - destination, keyspace, err = buildCreateView(vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) + destination, keyspace, err = buildCreateView(ctx, vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) case *sqlparser.AlterView: - destination, keyspace, err = buildAlterView(vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) + destination, keyspace, err = buildAlterView(ctx, vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) case *sqlparser.DropView: destination, keyspace, err = buildDropView(vschema, ddlStatement) case *sqlparser.DropTable: @@ -160,8 +150,12 @@ func buildDDLPlans(sql string, ddlStatement sqlparser.DDLStatement, reservedVars }, nil } -func checkFKError(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement) error { - if fkStrategyMap[vschema.ForeignKeyMode()] == fkDisallow { +func checkFKError(vschema plancontext.VSchema, ddlStatement sqlparser.DDLStatement, keyspace *vindexes.Keyspace) error { + fkMode, err := vschema.ForeignKeyMode(keyspace.Name) + if err != nil { + return err + } + if fkMode == vschemapb.Keyspace_FK_DISALLOW { fk := &fkContraint{} _ = sqlparser.Walk(fk.FkWalk, ddlStatement) if fk.found { @@ -196,7 +190,7 @@ func findTableDestinationAndKeyspace(vschema plancontext.VSchema, ddlStatement s return destination, keyspace, nil } -func buildAlterView(vschema plancontext.VSchema, ddl *sqlparser.AlterView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { +func buildAlterView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlparser.AlterView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { // For Alter View, we require that the view exist and the select query can be satisfied within the keyspace itself // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name destination, keyspace, err := findTableDestinationAndKeyspace(vschema, ddl) @@ -204,7 +198,7 @@ func buildAlterView(vschema plancontext.VSchema, ddl *sqlparser.AlterView, reser return nil, nil, err } - selectPlan, err := createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, nil, err } @@ -237,7 +231,7 @@ func buildAlterView(vschema plancontext.VSchema, ddl *sqlparser.AlterView, reser return destination, keyspace, nil } -func buildCreateView(vschema plancontext.VSchema, ddl *sqlparser.CreateView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { +func buildCreateView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlparser.CreateView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { // For Create View, we require that the keyspace exist and the select query can be satisfied within the keyspace itself // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name destination, keyspace, _, err := vschema.TargetDestination(ddl.ViewName.Qualifier.String()) @@ -246,7 +240,7 @@ func buildCreateView(vschema plancontext.VSchema, ddl *sqlparser.CreateView, res } ddl.ViewName.Qualifier = sqlparser.NewIdentifierCS("") - selectPlan, err := createInstructionFor(sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, nil, err } @@ -409,8 +403,6 @@ func tryToGetRoutePlan(selectPlan engine.Primitive) (valid bool, opCode engine.O switch plan := selectPlan.(type) { case *engine.Route: return true, plan.Opcode - case engine.Gen4Comparer: - return tryToGetRoutePlan(plan.GetGen4Primitive()) default: return false, engine.Opcode(0) } diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go index 876d4aa73e4..5fa743e7034 100644 --- a/go/vt/vtgate/planbuilder/delete.go +++ b/go/vt/vtgate/planbuilder/delete.go @@ -17,60 +17,99 @@ limitations under the License. package planbuilder import ( + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// buildDeletePlan builds the instructions for a DELETE statement. -func buildDeletePlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - del := stmt.(*sqlparser.Delete) - if del.With != nil { - return nil, vterrors.VT12001("WITH expression in DELETE statement") - } - var err error - if len(del.TableExprs) == 1 && len(del.Targets) == 1 { - del, err = rewriteSingleTbl(del) - if err != nil { - return nil, err - } - } - dml, tables, ksidVindex, err := buildDMLPlan(vschema, "delete", del, reservedVars, del.TableExprs, del.Where, del.OrderBy, del.Limit, del.Comments, del.Targets) +func gen4DeleteStmtPlanner( + version querypb.ExecuteOptions_PlannerVersion, + deleteStmt *sqlparser.Delete, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + if deleteStmt.With != nil { + return nil, vterrors.VT12001("WITH expression in DELETE statement") + } + + var err error + if len(deleteStmt.TableExprs) == 1 && len(deleteStmt.Targets) == 1 { + deleteStmt, err = rewriteSingleTbl(deleteStmt) if err != nil { return nil, err } - edel := &engine.Delete{DML: dml} - if dml.Opcode == engine.Unsharded { - return newPlanResult(edel, tables...), nil - } + } + + ctx, err := plancontext.CreatePlanningContext(deleteStmt, reservedVars, vschema, version) + if err != nil { + return nil, err + } - if len(del.Targets) > 1 { - return nil, vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") + err = rewriteRoutedTables(deleteStmt, vschema) + if err != nil { + return nil, err + } + + if ks, tables := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + if fkManagementNotRequired(ctx, vschema, tables) { + plan := deleteUnshardedShortcut(deleteStmt, ks, tables) + plan = pushCommentDirectivesOnPlan(plan, deleteStmt) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil } + } + + if err := checkIfDeleteSupported(deleteStmt, ctx.SemTable); err != nil { + return nil, err + } + + err = queryRewrite(ctx.SemTable, reservedVars, deleteStmt) + if err != nil { + return nil, err + } + + op, err := operators.PlanQuery(ctx, deleteStmt) + if err != nil { + return nil, err + } + + plan, err := transformToLogicalPlan(ctx, op) + if err != nil { + return nil, err + } + + plan = pushCommentDirectivesOnPlan(plan, deleteStmt) + + setLockOnAllSelect(plan) + + if err := plan.Wireup(ctx); err != nil { + return nil, err + } + + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil +} - edelTable, err := edel.GetSingleTable() +func fkManagementNotRequired(ctx *plancontext.PlanningContext, vschema plancontext.VSchema, vTables []*vindexes.Table) bool { + // Find the foreign key mode and check for any managed child foreign keys. + for _, vTable := range vTables { + ksMode, err := vschema.ForeignKeyMode(vTable.Keyspace.Name) if err != nil { - return nil, err + return false } - if len(del.Targets) == 1 && del.Targets[0].Name != edelTable.Name { - return nil, vterrors.VT03003(del.Targets[0].Name.String()) + if ksMode != vschemapb.Keyspace_FK_MANAGED { + continue } - - if len(edelTable.Owned) > 0 { - aTblExpr, ok := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, vterrors.VT12001("deleting from a complex table expression") - } - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: edelTable.Name}, As: aTblExpr.As} - edel.OwnedVindexQuery = generateDMLSubquery(tblExpr, del.Where, del.OrderBy, del.Limit, edelTable, ksidVindex.Columns) - edel.KsidVindex = ksidVindex.Vindex - edel.KsidLength = len(ksidVindex.Columns) + childFks := vTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.DeleteAction) + if len(childFks) > 0 { + return false } - - return newPlanResult(edel, tables...), nil } + return true } func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { @@ -109,3 +148,51 @@ func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { } return del, nil } + +func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + edml := engine.NewDML() + edml.Keyspace = ks + edml.Opcode = engine.Unsharded + edml.Query = generateQuery(stmt) + for _, tbl := range tables { + edml.TableNames = append(edml.TableNames, tbl.Name.String()) + } + return &primitiveWrapper{prim: &engine.Delete{DML: edml}} +} + +// checkIfDeleteSupported checks if the delete query is supported or we must return an error. +func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) error { + if semTable.NotUnshardedErr != nil { + return semTable.NotUnshardedErr + } + + // Delete is only supported for a single TableExpr which is supposed to be an aliased expression + multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") + if len(del.TableExprs) != 1 { + return multiShardErr + } + _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !isAliasedExpr { + return multiShardErr + } + + if len(del.Targets) > 1 { + return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") + } + + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.Subquery, *sqlparser.DerivedTable: + // We have a subquery, so we must fail the planning. + // If this subquery and the table expression were all belonging to the same unsharded keyspace, + // we would have already created a plan for them before doing these checks. + return false, vterrors.VT12001("subqueries in DML") + } + return true, nil + }, del) + if err != nil { + return err + } + + return nil +} diff --git a/go/vt/vtgate/planbuilder/distinct.go b/go/vt/vtgate/planbuilder/distinct.go index 98e6b550b8b..8b81fa4a8ce 100644 --- a/go/vt/vtgate/planbuilder/distinct.go +++ b/go/vt/vtgate/planbuilder/distinct.go @@ -27,34 +27,33 @@ var _ logicalPlan = (*distinct)(nil) type distinct struct { logicalPlanCommon checkCols []engine.CheckCol + truncateColumn int + + // needToTruncate is the old way to check weight_string column and set truncation. needToTruncate bool } -func newDistinct(source logicalPlan, checkCols []engine.CheckCol, needToTruncate bool) logicalPlan { +func newDistinct(source logicalPlan, checkCols []engine.CheckCol, truncateColumn int) logicalPlan { return &distinct{ logicalPlanCommon: newBuilderCommon(source), checkCols: checkCols, - needToTruncate: needToTruncate, + truncateColumn: truncateColumn, } } -func newDistinctV3(source logicalPlan) logicalPlan { - return &distinct{logicalPlanCommon: newBuilderCommon(source)} -} - func (d *distinct) Primitive() engine.Primitive { - if d.checkCols == nil { - // If we are missing the checkCols information, we are on the V3 planner and should produce a V3 Distinct - return &engine.DistinctV3{Source: d.input.Primitive()} - } - truncate := false + truncate := d.truncateColumn if d.needToTruncate { + wsColFound := false for _, col := range d.checkCols { if col.WsCol != nil { - truncate = true + wsColFound = true break } } + if wsColFound { + truncate = len(d.checkCols) + } } return &engine.Distinct{ Source: d.input.Primitive(), diff --git a/go/vt/vtgate/planbuilder/dml_planner.go b/go/vt/vtgate/planbuilder/dml_planner.go index 9a4608b295c..a85d10b742a 100644 --- a/go/vt/vtgate/planbuilder/dml_planner.go +++ b/go/vt/vtgate/planbuilder/dml_planner.go @@ -17,379 +17,56 @@ limitations under the License. package planbuilder import ( - "fmt" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -type ( - // costDML is used to compare the cost of vindexOptionDML - costDML struct { - vindexCost int - isUnique bool - opCode engine.Opcode - } - - // vindexPlusPredicatesDML is a struct used to store all the predicates that the vindex can be used to query - vindexPlusPredicatesDML struct { - colVindex *vindexes.ColumnVindex - - // during planning, we store the alternatives found for this DML in this slice - options []*vindexOptionDML - } - - // vindexOptionDML stores the information needed to know if we have all the information needed to use a vindex - vindexOptionDML struct { - ready bool - values []evalengine.Expr - // columns that we have seen so far. Used only for multi-column vindexes so that we can track how many columns part of the vindex we have seen - colsSeen map[string]any - opcode engine.Opcode - foundVindex vindexes.Vindex - cost costDML - } ) -// getDMLRouting returns the vindex and values for the DML, -// If it cannot find a unique vindex match, it returns an error. -func getDMLRouting(where *sqlparser.Where, table *vindexes.Table) ( - engine.Opcode, - *vindexes.ColumnVindex, - vindexes.Vindex, - []evalengine.Expr, - error, -) { - // Check that we have a primary vindex which is valid - if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { - return engine.Scatter, nil, nil, nil, vterrors.VT09001(table.Name) - } - // ksidVindex is the primary vindex - ksidVindex := table.ColumnVindexes[0] - if where == nil { - return engine.Scatter, ksidVindex, nil, nil, nil - } - - filters := sqlparser.SplitAndExpression(nil, where.Expr) - // go over the vindexes in the order of increasing cost - for _, colVindex := range table.Ordered { - if lu, isLu := colVindex.Vindex.(vindexes.LookupBackfill); isLu && lu.IsBackfilling() { - // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table - // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex. - continue +func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error { + // Rewrite routed tables + return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr) + if !isAlias { + return true, nil } - // get the best vindex option that can be used for this vindexes.ColumnVindex - if vindexOption := getBestVindexOption(filters, colVindex); vindexOption != nil { - return vindexOption.opcode, ksidVindex, colVindex.Vindex, vindexOption.values, nil - } - } - return engine.Scatter, ksidVindex, nil, nil, nil -} - -// getBestVindexOption returns the best vindex option that can be used for this vindexes.ColumnVindex -// It returns nil if there is no suitable way to use the ColumnVindex -func getBestVindexOption(exprs []sqlparser.Expr, index *vindexes.ColumnVindex) *vindexOptionDML { - vindexPlusPredicates := &vindexPlusPredicatesDML{ - colVindex: index, - } - for _, filter := range exprs { - comparison, ok := filter.(*sqlparser.ComparisonExpr) + tableName, ok := aliasTbl.Expr.(sqlparser.TableName) if !ok { - continue - } - var colName *sqlparser.ColName - var valExpr sqlparser.Expr - if col, ok := comparison.Left.(*sqlparser.ColName); ok { - colName = col - valExpr = comparison.Right - } else if col, ok := comparison.Right.(*sqlparser.ColName); ok { - colName = col - valExpr = comparison.Left - } else { - continue - } - - var opcode engine.Opcode - switch comparison.Operator { - case sqlparser.EqualOp: - if !sqlparser.IsValue(valExpr) { - continue - } - opcode = engine.Equal - case sqlparser.InOp: - if !sqlparser.IsSimpleTuple(valExpr) { - continue - } - opcode = engine.IN - default: - continue + return true, nil } - expr, err := evalengine.Translate(comparison.Right, semantics.EmptySemTable()) + vschemaTable, vindexTbl, _, _, _, err := vschema.FindTableOrVindex(tableName) if err != nil { - continue + return false, err } - addVindexOptions(colName, expr, opcode, vindexPlusPredicates) - } - return vindexPlusPredicates.bestOption() -} - -// bestOption returns the option which is ready and has the lowest associated cost -func (vpp *vindexPlusPredicatesDML) bestOption() *vindexOptionDML { - var best *vindexOptionDML - for _, option := range vpp.options { - if option.ready { - if best == nil || lessCostDML(option.cost, best.cost) { - best = option - } + if vindexTbl != nil { + // vindex cannot be present in a dml statement. + return false, vterrors.VT09014() } - } - return best -} - -// lessCostDML compares two costDML and returns true if the first cost is cheaper than the second -func lessCostDML(c1, c2 costDML) bool { - switch { - case c1.opCode != c2.opCode: - return c1.opCode < c2.opCode - case c1.isUnique == c2.isUnique: - return c1.vindexCost <= c2.vindexCost - default: - return c1.isUnique - } -} -// addVindexOptions adds new vindexOptionDML if it matches any column of the vindexes.ColumnVindex -func addVindexOptions(column *sqlparser.ColName, value evalengine.Expr, opcode engine.Opcode, v *vindexPlusPredicatesDML) { - switch v.colVindex.Vindex.(type) { - case vindexes.SingleColumn: - col := v.colVindex.Columns[0] - if column.Name.Equal(col) { - // single column vindex - just add the option - vindex := v.colVindex - v.options = append(v.options, &vindexOptionDML{ - values: []evalengine.Expr{value}, - opcode: opcode, - foundVindex: vindex.Vindex, - cost: costForDML(v.colVindex, opcode), - ready: true, - }) - } - case vindexes.MultiColumn: - colLoweredName := "" - indexOfCol := -1 - for idx, col := range v.colVindex.Columns { - if column.Name.Equal(col) { - colLoweredName = column.Name.Lowered() - indexOfCol = idx - break + if vschemaTable.Name.String() != tableName.Name.String() { + name := tableName.Name + if aliasTbl.As.IsEmpty() { + // if the user hasn't specified an alias, we'll insert one here so the old table name still works + aliasTbl.As = sqlparser.NewIdentifierCS(name.String()) } - } - if colLoweredName == "" { - break + tableName.Name = sqlparser.NewIdentifierCS(vschemaTable.Name.String()) + aliasTbl.Expr = tableName } - var newOption []*vindexOptionDML - for _, op := range v.options { - if op.ready { - continue - } - _, isPresent := op.colsSeen[colLoweredName] - if isPresent { - continue - } - option := copyOptionDML(op) - option.updateWithNewColumn(colLoweredName, indexOfCol, value, v.colVindex, opcode) - newOption = append(newOption, option) - } - v.options = append(v.options, newOption...) - - // multi column vindex - just always add as new option - option := createOptionDML(v.colVindex) - option.updateWithNewColumn(colLoweredName, indexOfCol, value, v.colVindex, opcode) - v.options = append(v.options, option) - } + return true, nil + }, stmt) } -// copyOptionDML is used to copy vindexOptionDML -func copyOptionDML(orig *vindexOptionDML) *vindexOptionDML { - colsSeen := make(map[string]any, len(orig.colsSeen)) - values := make([]evalengine.Expr, len(orig.values)) - - copy(values, orig.values) - for k, v := range orig.colsSeen { - colsSeen[k] = v - } - vo := &vindexOptionDML{ - values: values, - colsSeen: colsSeen, - opcode: orig.opcode, - foundVindex: orig.foundVindex, - cost: orig.cost, - } - return vo -} - -// updateWithNewColumn is used to update vindexOptionDML with a new column that matches one of its unseen columns -func (option *vindexOptionDML) updateWithNewColumn(colLoweredName string, indexOfCol int, value evalengine.Expr, colVindex *vindexes.ColumnVindex, opcode engine.Opcode) { - option.colsSeen[colLoweredName] = true - option.values[indexOfCol] = value - option.ready = len(option.colsSeen) == len(colVindex.Columns) - if option.opcode < opcode { - option.opcode = opcode - option.cost = costForDML(colVindex, opcode) - } -} - -// createOptionDML is used to create a vindexOptionDML -func createOptionDML( - colVindex *vindexes.ColumnVindex, -) *vindexOptionDML { - values := make([]evalengine.Expr, len(colVindex.Columns)) - vindex := colVindex.Vindex - - return &vindexOptionDML{ - values: values, - colsSeen: map[string]any{}, - foundVindex: vindex, - } -} - -// costForDML returns a cost struct to make route choices easier to compare -func costForDML(foundVindex *vindexes.ColumnVindex, opcode engine.Opcode) costDML { - switch opcode { - // For these opcodes, we should not have a vindex, so we just return the opcode as the cost - case engine.Unsharded, engine.Scatter: - return costDML{ - opCode: opcode, - } - } - - return costDML{ - vindexCost: foundVindex.Cost(), - isUnique: foundVindex.IsUnique(), - opCode: opcode, - } -} - -func buildDMLPlan( - vschema plancontext.VSchema, - dmlType string, - stmt sqlparser.Statement, - reservedVars *sqlparser.ReservedVars, - tableExprs sqlparser.TableExprs, - where *sqlparser.Where, - orderBy sqlparser.OrderBy, - limit *sqlparser.Limit, - comments *sqlparser.ParsedComments, - nodes ...sqlparser.SQLNode, -) (*engine.DML, []string, *vindexes.ColumnVindex, error) { - edml := engine.NewDML() - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - rb, err := pb.processDMLTable(tableExprs, reservedVars, nil) - if err != nil { - return nil, nil, nil, err - } - edml.Keyspace = rb.eroute.Keyspace - tc := &tableCollector{} - for _, tval := range pb.st.tables { - tc.addVindexTable(tval.vschemaTable) - } - - edml.Table, err = pb.st.AllVschemaTableNames() - if err != nil { - return nil, nil, nil, err - } - if !edml.Keyspace.Sharded { - // We only validate non-table subexpressions because the previous analysis has already validated them. - var subqueryArgs []sqlparser.SQLNode - subqueryArgs = append(subqueryArgs, nodes...) - subqueryArgs = append(subqueryArgs, where, orderBy, limit) - subqueryIsUnsharded, subqueryTables := pb.finalizeUnshardedDMLSubqueries(reservedVars, subqueryArgs...) - if subqueryIsUnsharded { - vschema.WarnUnshardedOnly("subqueries can't be sharded in DML") - } else { - return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML") - } - edml.Opcode = engine.Unsharded - // Generate query after all the analysis. Otherwise table name substitutions for - // routed tables won't happen. - edml.Query = generateQuery(stmt) - edml.Table = append(edml.Table, subqueryTables...) - return edml, tc.getTables(), nil, nil - } - - if hasSubquery(stmt) { - return nil, nil, nil, vterrors.VT12001("sharded subqueries in DML") - } - - // Generate query after all the analysis. Otherwise table name substitutions for - // routed tables won't happen. - edml.Query = generateQuery(stmt) - - directives := comments.Directives() - if directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) { - edml.MultiShardAutocommit = true - } - - edml.QueryTimeout = queryTimeout(directives) - - if len(pb.st.tables) != 1 { - return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-table %s statement in a sharded keyspace", dmlType)) - } - edmlTable, err := edml.GetSingleTable() - if err != nil { - return nil, nil, nil, err - } - routingType, ksidVindex, vindex, values, err := getDMLRouting(where, edmlTable) - if err != nil { - return nil, nil, nil, err - } - - if rb.eroute.TargetDestination != nil { - if rb.eroute.TargetTabletType != topodatapb.TabletType_PRIMARY { - return nil, nil, nil, vterrors.VT09002(dmlType) - } - edml.Opcode = engine.ByDestination - edml.TargetDestination = rb.eroute.TargetDestination - return edml, tc.getTables(), ksidVindex, nil - } - - edml.Opcode = routingType - if routingType == engine.Scatter { - if limit != nil { - return nil, nil, nil, vterrors.VT12001(fmt.Sprintf("multi-shard %s with LIMIT", dmlType)) - } - } else { - edml.Vindex = vindex - edml.Values = values - } - - return edml, tc.getTables(), ksidVindex, nil -} - -func generateDMLSubquery(tblExpr sqlparser.TableExpr, where *sqlparser.Where, orderBy sqlparser.OrderBy, limit *sqlparser.Limit, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string { - buf := sqlparser.NewTrackedBuffer(nil) - for idx, col := range ksidCols { - if idx == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) - } - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) +func setLockOnAllSelect(plan logicalPlan) { + _, _ = visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) { + switch node := plan.(type) { + case *route: + if node.Select.GetLock() == sqlparser.NoLock { + node.Select.SetLock(sqlparser.ShareModeLock) + } + return true, node, nil } - } - buf.Myprintf(" from %v%v%v%v for update", tblExpr, where, orderBy, limit) - return buf.String() + return true, plan, nil + }) } func generateQuery(statement sqlparser.Statement) string { @@ -397,13 +74,3 @@ func generateQuery(statement sqlparser.Statement) string { statement.Format(buf) return buf.String() } - -// dmlFormatter strips out keyspace name from dmls. -func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case sqlparser.TableName: - node.Name.Format(buf) - return - } - node.Format(buf) -} diff --git a/go/vt/vtgate/planbuilder/expr.go b/go/vt/vtgate/planbuilder/expr.go deleted file mode 100644 index dfbe23b1640..00000000000 --- a/go/vt/vtgate/planbuilder/expr.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "bytes" - "fmt" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -type subqueryInfo struct { - ast *sqlparser.Subquery - plan logicalPlan - origin logicalPlan -} - -// findOrigin identifies the right-most origin referenced by expr. In situations where -// the expression references columns from multiple origins, the expression will be -// pushed to the right-most origin, and the executor will use the results of -// the previous origins to feed the necessary values to the primitives on the right. -// -// If the expression contains a subquery, the right-most origin identification -// also follows the same rules of a normal expression. This is achieved by -// looking at the Externs field of its symbol table that contains the list of -// external references. -// -// Once the target origin is identified, we have to verify that the subquery's -// route can be merged with it. If it cannot, we fail the query. This is because -// we don't have the ability to wire up subqueries through expression evaluation -// primitives. Consequently, if the plan for a subquery comes out as a Join, -// we can immediately error out. -// -// Since findOrigin can itself be called from within a subquery, it has to assume -// that some of the external references may actually be pointing to an outer -// query. The isLocal response from the symtab is used to make sure that we -// only analyze symbols that point to the current symtab. -// -// If an expression has no references to the current query, then the left-most -// origin is chosen as the default. -func (pb *primitiveBuilder) findOrigin(expr sqlparser.Expr, reservedVars *sqlparser.ReservedVars) (pullouts []*pulloutSubquery, origin logicalPlan, pushExpr sqlparser.Expr, err error) { - // highestOrigin tracks the highest origin referenced by the expression. - // Default is the first. - highestOrigin := first(pb.plan) - - // subqueries tracks the list of subqueries encountered. - var subqueries []subqueryInfo - - // constructsMap tracks the sub-construct in which a subquery - // occurred. The construct type decides on how the query gets - // pulled out. - constructsMap := make(map[*sqlparser.Subquery]sqlparser.Expr) - - err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node := node.(type) { - case *sqlparser.ColName: - newOrigin, isLocal, err := pb.st.Find(node) - if err != nil { - return false, err - } - if isLocal && newOrigin.Order() > highestOrigin.Order() { - highestOrigin = newOrigin - } - case *sqlparser.ComparisonExpr: - if node.Operator == sqlparser.InOp || node.Operator == sqlparser.NotInOp { - if sq, ok := node.Right.(*sqlparser.Subquery); ok { - constructsMap[sq] = node - } - } - case *sqlparser.ExistsExpr: - constructsMap[node.Subquery] = node - case *sqlparser.Subquery: - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - switch stmt := node.Select.(type) { - case *sqlparser.Select: - if err := spb.processSelect(stmt, reservedVars, pb.st, ""); err != nil { - return false, err - } - case *sqlparser.Union: - if err := spb.processUnion(stmt, reservedVars, pb.st); err != nil { - return false, err - } - default: - return false, vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", node)) - } - sqi := subqueryInfo{ - ast: node, - plan: spb.plan, - } - for _, extern := range spb.st.Externs { - // No error expected. These are resolved externs. - newOrigin, isLocal, _ := pb.st.Find(extern) - if !isLocal { - continue - } - if highestOrigin.Order() < newOrigin.Order() { - highestOrigin = newOrigin - } - if sqi.origin == nil { - sqi.origin = newOrigin - } else if sqi.origin.Order() < newOrigin.Order() { - sqi.origin = newOrigin - } - } - subqueries = append(subqueries, sqi) - return false, nil - } - return true, nil - }, expr) - if err != nil { - return nil, nil, nil, err - } - - highestRoute, _ := highestOrigin.(*route) - for _, sqi := range subqueries { - subroute, _ := sqi.plan.(*route) - if highestRoute != nil && subroute != nil && highestRoute.MergeSubquery(pb, subroute) { - continue - } - if sqi.origin != nil { - return nil, nil, nil, vterrors.VT12001("cross-shard correlated subquery") - } - - sqName, hasValues := pb.jt.GenerateSubqueryVars() - construct, ok := constructsMap[sqi.ast] - if !ok { - // (subquery) -> :_sq - expr = sqlparser.ReplaceExpr(expr, sqi.ast, sqlparser.NewArgument(sqName)) - pullouts = append(pullouts, newPulloutSubquery(engine.PulloutValue, sqName, hasValues, sqi.plan)) - continue - } - switch construct := construct.(type) { - case *sqlparser.ComparisonExpr: - if construct.Operator == sqlparser.InOp { - // a in (subquery) -> (:__sq_has_values = 1 and (a in ::__sq)) - right := &sqlparser.ComparisonExpr{ - Operator: construct.Operator, - Left: construct.Left, - Right: sqlparser.ListArg(sqName), - } - left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument(hasValues), - Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral("1"), - } - newExpr := &sqlparser.AndExpr{ - Left: left, - Right: right, - } - expr = sqlparser.ReplaceExpr(expr, construct, newExpr) - pullouts = append(pullouts, newPulloutSubquery(engine.PulloutIn, sqName, hasValues, sqi.plan)) - } else { - // a not in (subquery) -> (:__sq_has_values = 0 or (a not in ::__sq)) - left := &sqlparser.ComparisonExpr{ - Left: sqlparser.NewArgument(hasValues), - Operator: sqlparser.EqualOp, - Right: sqlparser.NewIntLiteral("0"), - } - right := &sqlparser.ComparisonExpr{ - Operator: construct.Operator, - Left: construct.Left, - Right: sqlparser.ListArg(sqName), - } - newExpr := &sqlparser.OrExpr{ - Left: left, - Right: right, - } - expr = sqlparser.ReplaceExpr(expr, construct, newExpr) - pullouts = append(pullouts, newPulloutSubquery(engine.PulloutNotIn, sqName, hasValues, sqi.plan)) - } - case *sqlparser.ExistsExpr: - // exists (subquery) -> :__sq_has_values - expr = sqlparser.ReplaceExpr(expr, construct, sqlparser.NewArgument(hasValues)) - pullouts = append(pullouts, newPulloutSubquery(engine.PulloutExists, sqName, hasValues, sqi.plan)) - } - } - return pullouts, highestOrigin, expr, nil -} - -var dummyErr = vterrors.VT13001("dummy") - -func hasSubquery(node sqlparser.SQLNode) bool { - has := false - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.DerivedTable, *sqlparser.Subquery: - has = true - return false, dummyErr - } - return true, nil - }, node) - return has -} - -func (pb *primitiveBuilder) finalizeUnshardedDMLSubqueries(reservedVars *sqlparser.ReservedVars, nodes ...sqlparser.SQLNode) (bool, []*vindexes.Table) { - var keyspace string - var tables []*vindexes.Table - if rb, ok := pb.plan.(*route); ok { - keyspace = rb.eroute.Keyspace.Name - } else { - // This code is unreachable because the caller checks. - return false, nil - } - - for _, node := range nodes { - samePlan := true - inSubQuery := false - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch nodeType := node.(type) { - case *sqlparser.Subquery, *sqlparser.Insert: - inSubQuery = true - return true, nil - case *sqlparser.Select: - if !inSubQuery { - return true, nil - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processSelect(nodeType, reservedVars, pb.st, ""); err != nil { - samePlan = false - return false, err - } - innerRoute, ok := spb.plan.(*route) - if !ok { - samePlan = false - return false, dummyErr - } - if innerRoute.eroute.Keyspace.Name != keyspace { - samePlan = false - return false, dummyErr - } - for _, sub := range innerRoute.substitutions { - *sub.oldExpr = *sub.newExpr - } - spbTables, err := spb.st.AllVschemaTableNames() - if err != nil { - return false, err - } - tables = append(tables, spbTables...) - case *sqlparser.Union: - if !inSubQuery { - return true, nil - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := spb.processUnion(nodeType, reservedVars, pb.st); err != nil { - samePlan = false - return false, err - } - innerRoute, ok := spb.plan.(*route) - if !ok { - samePlan = false - return false, dummyErr - } - if innerRoute.eroute.Keyspace.Name != keyspace { - samePlan = false - return false, dummyErr - } - } - - return true, nil - }, node) - if !samePlan { - return false, nil - } - } - return true, tables -} - -func valEqual(a, b sqlparser.Expr) bool { - switch a := a.(type) { - case *sqlparser.ColName: - if b, ok := b.(*sqlparser.ColName); ok { - return a.Metadata == b.Metadata - } - case sqlparser.Argument: - b, ok := b.(sqlparser.Argument) - if !ok { - return false - } - return a == b - case *sqlparser.Literal: - b, ok := b.(*sqlparser.Literal) - if !ok { - return false - } - switch a.Type { - case sqlparser.StrVal: - switch b.Type { - case sqlparser.StrVal: - return a.Val == b.Val - case sqlparser.HexVal: - return hexEqual(b, a) - } - case sqlparser.HexVal: - return hexEqual(a, b) - case sqlparser.IntVal: - if b.Type == (sqlparser.IntVal) { - return a.Val == b.Val - } - } - } - return false -} - -func hexEqual(a, b *sqlparser.Literal) bool { - v, err := a.HexDecode() - if err != nil { - return false - } - switch b.Type { - case sqlparser.StrVal: - return bytes.Equal(v, b.Bytes()) - case sqlparser.HexVal: - v2, err := b.HexDecode() - if err != nil { - return false - } - return bytes.Equal(v, v2) - } - return false -} diff --git a/go/vt/vtgate/planbuilder/expr_test.go b/go/vt/vtgate/planbuilder/expr_test.go deleted file mode 100644 index b59bd034810..00000000000 --- a/go/vt/vtgate/planbuilder/expr_test.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "testing" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func TestValEqual(t *testing.T) { - c1 := &column{} - c2 := &column{} - testcases := []struct { - in1, in2 sqlparser.Expr - out bool - }{{ - in1: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - in2: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - out: true, - }, { - // Objects that have the same name need not be the same because - // they might have appeared in different scopes and could have - // resolved to different columns. - in1: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - in2: &sqlparser.ColName{Metadata: c2, Name: sqlparser.NewIdentifierCI("c1")}, - out: false, - }, { - in1: sqlparser.NewArgument("aa"), - in2: &sqlparser.ColName{Metadata: c1, Name: sqlparser.NewIdentifierCI("c1")}, - out: false, - }, { - in1: sqlparser.NewArgument("aa"), - in2: sqlparser.NewArgument("aa"), - out: true, - }, { - in1: sqlparser.NewArgument("aa"), - in2: sqlparser.NewArgument("bb"), - }, { - in1: sqlparser.NewStrLiteral("aa"), - in2: sqlparser.NewStrLiteral("aa"), - out: true, - }, { - in1: sqlparser.NewStrLiteral("11"), - in2: sqlparser.NewHexLiteral("3131"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewStrLiteral("11"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewHexLiteral("3131"), - out: true, - }, { - in1: sqlparser.NewHexLiteral("3131"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("313"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("3132"), - in2: sqlparser.NewHexLiteral("313"), - out: false, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewHexLiteral("3132"), - out: false, - }, { - in1: sqlparser.NewHexLiteral("3132"), - in2: sqlparser.NewIntLiteral("313"), - out: false, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewIntLiteral("313"), - out: true, - }, { - in1: sqlparser.NewIntLiteral("313"), - in2: sqlparser.NewIntLiteral("314"), - out: false, - }} - for _, tc := range testcases { - out := valEqual(tc.in1, tc.in2) - if out != tc.out { - t.Errorf("valEqual(%#v, %#v): %v, want %v", tc.in1, tc.in2, out, tc.out) - } - } -} diff --git a/go/vt/vtgate/planbuilder/expression_converter.go b/go/vt/vtgate/planbuilder/expression_converter.go index ac538f356ce..f100d0d93e0 100644 --- a/go/vt/vtgate/planbuilder/expression_converter.go +++ b/go/vt/vtgate/planbuilder/expression_converter.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/mysql/collations" @@ -63,8 +64,7 @@ func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { func identifierAsStringValue(astExpr sqlparser.Expr) evalengine.Expr { colName, isColName := astExpr.(*sqlparser.ColName) if isColName { - // TODO@collations: proper collation for column name - return evalengine.NewLiteralString([]byte(colName.Name.Lowered()), collations.TypedCollation{}) + return evalengine.NewLiteralString([]byte(colName.Name.Lowered()), collations.SystemCollation) } return nil } @@ -87,7 +87,7 @@ func (ec *expressionConverter) convert(astExpr sqlparser.Expr, boolean, identifi if !strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) { return nil, err } - evalExpr = &evalengine.Column{Offset: len(ec.tabletExpressions)} + evalExpr = evalengine.NewColumn(len(ec.tabletExpressions), sqltypes.Unknown, collations.Unknown) ec.tabletExpressions = append(ec.tabletExpressions, astExpr) } return evalExpr, nil diff --git a/go/vt/vtgate/planbuilder/expression_converter_test.go b/go/vt/vtgate/planbuilder/expression_converter_test.go index b1d4e9b851a..e59df3c7fd1 100644 --- a/go/vt/vtgate/planbuilder/expression_converter_test.go +++ b/go/vt/vtgate/planbuilder/expression_converter_test.go @@ -21,7 +21,10 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -41,7 +44,7 @@ func TestConversion(t *testing.T) { expressionsOut: e(evalengine.NewLiteralInt(1)), }, { expressionsIn: "@@foo", - expressionsOut: e(evalengine.NewColumn(0, collations.TypedCollation{})), + expressionsOut: e(evalengine.NewColumn(0, sqltypes.Unknown, collations.Unknown)), }} for _, tc := range queries { diff --git a/go/vt/vtgate/planbuilder/fallback_planner.go b/go/vt/vtgate/planbuilder/fallback_planner.go deleted file mode 100644 index 8b76efb2a21..00000000000 --- a/go/vt/vtgate/planbuilder/fallback_planner.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/sqlparser" -) - -type fallbackPlanner struct { - primary, fallback stmtPlanner -} - -var _ stmtPlanner = (*fallbackPlanner)(nil).plan - -func (fp *fallbackPlanner) safePrimary(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (res *planResult, err error) { - defer func() { - // if the primary planner panics, we want to catch it here so we can fall back - if r := recover(); r != nil { - err = fmt.Errorf("%v", r) // not using vterror since this will only be used for logging - } - }() - res, err = fp.primary(stmt, reservedVars, vschema) - return -} - -func (fp *fallbackPlanner) plan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - res, err := fp.safePrimary(sqlparser.CloneStatement(stmt), reservedVars, vschema) - if err != nil { - return fp.fallback(stmt, reservedVars, vschema) - } - return res, nil -} diff --git a/go/vt/vtgate/planbuilder/fallback_planner_test.go b/go/vt/vtgate/planbuilder/fallback_planner_test.go deleted file mode 100644 index d0110f72428..00000000000 --- a/go/vt/vtgate/planbuilder/fallback_planner_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "testing" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type testPlanner struct { - panic any - err error - res engine.Primitive - messWithAST func(sqlparser.Statement) - called bool -} - -var _ stmtPlanner = (*testPlanner)(nil).plan - -func (tp *testPlanner) plan(statement sqlparser.Statement, vars *sqlparser.ReservedVars, schema plancontext.VSchema) (*planResult, error) { - tp.called = true - if tp.panic != nil { - panic(tp.panic) - } - if tp.messWithAST != nil { - tp.messWithAST(statement) - } - return newPlanResult(tp.res), tp.err -} - -func TestFallbackPlanner(t *testing.T) { - a := &testPlanner{} - b := &testPlanner{} - fb := &fallbackPlanner{ - primary: a.plan, - fallback: b.plan, - } - - stmt := &sqlparser.Select{} - var vschema plancontext.VSchema - - // first planner succeeds - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.False(t, b.called) - a.called = false - - // first planner errors - a.err = fmt.Errorf("fail") - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.True(t, b.called) - - a.called = false - b.called = false - - // first planner panics - a.panic = "oh noes" - _, _ = fb.plan(stmt, nil, vschema) - assert.True(t, a.called) - assert.True(t, b.called) -} - -func TestFallbackClonesBeforePlanning(t *testing.T) { - a := &testPlanner{ - messWithAST: func(statement sqlparser.Statement) { - sel := statement.(*sqlparser.Select) - sel.SelectExprs = nil - }, - } - b := &testPlanner{} - fb := &fallbackPlanner{ - primary: a.plan, - fallback: b.plan, - } - - stmt := &sqlparser.Select{ - SelectExprs: sqlparser.SelectExprs{&sqlparser.StarExpr{}}, - } - var vschema plancontext.VSchema - - // first planner succeeds - _, _ = fb.plan(stmt, nil, vschema) - - assert.NotNilf(t, stmt.SelectExprs, "should not have changed") -} diff --git a/go/vt/vtgate/planbuilder/filter.go b/go/vt/vtgate/planbuilder/filter.go index efc8d6089e4..589287495a7 100644 --- a/go/vt/vtgate/planbuilder/filter.go +++ b/go/vt/vtgate/planbuilder/filter.go @@ -17,7 +17,6 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -31,43 +30,30 @@ type ( logicalPlanCommon efilter *engine.Filter } - - simpleConverterLookup struct { - ctx *plancontext.PlanningContext - plan logicalPlan - canPushProjection bool - } ) var _ logicalPlan = (*filter)(nil) -var _ evalengine.TranslationLookup = (*simpleConverterLookup)(nil) -func (s *simpleConverterLookup) ColumnLookup(col *sqlparser.ColName) (int, error) { - offset, added, err := pushProjection(s.ctx, &sqlparser.AliasedExpr{Expr: col}, s.plan, true, true, false) - if err != nil { - return 0, err +func resolveFromPlan(ctx *plancontext.PlanningContext, plan logicalPlan, canPushProjection bool) evalengine.ColumnResolver { + return func(expr *sqlparser.ColName) (int, error) { + offset, added, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: expr}, plan, true, true, false) + if err != nil { + return 0, err + } + if added && !canPushProjection { + return 0, vterrors.VT13001("column should not be pushed to projection while doing a column lookup") + } + return offset, nil } - if added && !s.canPushProjection { - return 0, vterrors.VT13001("column should not be pushed to projection while doing a column lookup") - } - return offset, nil -} - -func (s *simpleConverterLookup) CollationForExpr(expr sqlparser.Expr) collations.ID { - return s.ctx.SemTable.CollationForExpr(expr) -} - -func (s *simpleConverterLookup) DefaultCollation() collations.ID { - return s.ctx.SemTable.Collation } // newFilter builds a new filter. func newFilter(ctx *plancontext.PlanningContext, plan logicalPlan, expr sqlparser.Expr) (*filter, error) { - scl := &simpleConverterLookup{ - ctx: ctx, - plan: plan, - } - predicate, err := evalengine.Translate(expr, scl) + predicate, err := evalengine.Translate(expr, &evalengine.Config{ + ResolveColumn: resolveFromPlan(ctx, plan, false), + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + }) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/filtering.go b/go/vt/vtgate/planbuilder/filtering.go deleted file mode 100644 index 0dd4c889e80..00000000000 --- a/go/vt/vtgate/planbuilder/filtering.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -// planFilter solves this particular expression, either by pushing it down to a child or changing this logicalPlan -func planFilter(pb *primitiveBuilder, input logicalPlan, filter sqlparser.Expr, whereType string, origin logicalPlan) (logicalPlan, error) { - switch node := input.(type) { - case *join: - isLeft := true - var in logicalPlan - if node.isOnLeft(origin.Order()) { - in = node.Left - } else { - if node.ejoin.Opcode == engine.LeftJoin { - return nil, vterrors.VT12001("cross-shard LEFT JOIN and WHERE clause") - } - isLeft = false - in = node.Right - } - - filtered, err := planFilter(pb, in, filter, whereType, origin) - if err != nil { - return nil, err - } - if isLeft { - node.Left = filtered - } else { - node.Right = filtered - } - return node, nil - - case *route: - sel := node.Select.(*sqlparser.Select) - switch whereType { - case sqlparser.WhereStr: - sel.AddWhere(filter) - case sqlparser.HavingStr: - sel.AddHaving(filter) - } - node.UpdatePlan(pb, filter) - return node, nil - case *pulloutSubquery: - plan, err := planFilter(pb, node.underlying, filter, whereType, origin) - if err != nil { - return nil, err - } - node.underlying = plan - return node, nil - case *vindexFunc: - return filterVindexFunc(node, filter) - case *simpleProjection: - return nil, vterrors.VT12001("filtering on results of cross-shard subquery") - case *orderedAggregate: - return nil, vterrors.VT12001("filtering on results of aggregates") - } - - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.filtering", input)) -} - -func filterVindexFunc(node *vindexFunc, filter sqlparser.Expr) (logicalPlan, error) { - if node.eVindexFunc.Opcode != engine.VindexNone { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (multiple filters)") - } - - // Check LHS. - comparison, ok := filter.(*sqlparser.ComparisonExpr) - if !ok { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (not a comparison)") - } - if comparison.Operator != sqlparser.EqualOp && comparison.Operator != sqlparser.InOp { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (not equality)") - } - colname, ok := comparison.Left.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not a column)") - } - if !colname.Name.EqualString("id") { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (lhs is not id)") - } - - // Check RHS. - // We have to check before calling NewPlanValue because NewPlanValue allows lists also. - if !sqlparser.IsValue(comparison.Right) && !sqlparser.IsSimpleTuple(comparison.Right) { - return nil, vterrors.VT12001(operators.VindexUnsupported + " (rhs is not a value)") - } - var err error - node.eVindexFunc.Value, err = evalengine.Translate(comparison.Right, semantics.EmptySemTable()) - if err != nil { - return nil, vterrors.VT12001(fmt.Sprintf("%s: %v", operators.VindexUnsupported, err)) - } - - node.eVindexFunc.Opcode = engine.VindexMap - return node, nil -} diff --git a/go/vt/vtgate/planbuilder/fk_cascade.go b/go/vt/vtgate/planbuilder/fk_cascade.go new file mode 100644 index 00000000000..5a709156955 --- /dev/null +++ b/go/vt/vtgate/planbuilder/fk_cascade.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*fkCascade)(nil) + +// fkCascade is the logicalPlan for engine.FkCascade. +type fkCascade struct { + parent logicalPlan + selection logicalPlan + children []*engine.FkChild +} + +// newFkCascade builds a new fkCascade. +func newFkCascade(parent, selection logicalPlan, children []*engine.FkChild) *fkCascade { + return &fkCascade{ + parent: parent, + selection: selection, + children: children, + } +} + +// Primitive implements the logicalPlan interface +func (fkc *fkCascade) Primitive() engine.Primitive { + return &engine.FkCascade{ + Parent: fkc.parent.Primitive(), + Selection: fkc.selection.Primitive(), + Children: fkc.children, + } +} + +// Wireup implements the logicalPlan interface +func (fkc *fkCascade) Wireup(ctx *plancontext.PlanningContext) error { + if err := fkc.parent.Wireup(ctx); err != nil { + return err + } + return fkc.selection.Wireup(ctx) +} + +// Rewrite implements the logicalPlan interface +func (fkc *fkCascade) Rewrite(inputs ...logicalPlan) error { + if len(inputs) != 2 { + return vterrors.VT13001("fkCascade: wrong number of inputs") + } + fkc.parent = inputs[0] + fkc.selection = inputs[1] + return nil +} + +// ContainsTables implements the logicalPlan interface +func (fkc *fkCascade) ContainsTables() semantics.TableSet { + return fkc.parent.ContainsTables() +} + +// Inputs implements the logicalPlan interface +func (fkc *fkCascade) Inputs() []logicalPlan { + return []logicalPlan{fkc.parent, fkc.selection} +} + +// OutputColumns implements the logicalPlan interface +func (fkc *fkCascade) OutputColumns() []sqlparser.SelectExpr { + return nil +} diff --git a/go/vt/vtgate/planbuilder/fk_verify.go b/go/vt/vtgate/planbuilder/fk_verify.go new file mode 100644 index 00000000000..71638f88b9b --- /dev/null +++ b/go/vt/vtgate/planbuilder/fk_verify.go @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +var _ logicalPlan = (*fkVerify)(nil) + +type verifyLP struct { + verify logicalPlan + typ string +} + +// fkVerify is the logicalPlan for engine.FkVerify. +type fkVerify struct { + input logicalPlan + verify []*verifyLP +} + +// newFkVerify builds a new fkVerify. +func newFkVerify(input logicalPlan, verify []*verifyLP) *fkVerify { + return &fkVerify{ + input: input, + verify: verify, + } +} + +// Primitive implements the logicalPlan interface +func (fkc *fkVerify) Primitive() engine.Primitive { + var verify []*engine.Verify + for _, v := range fkc.verify { + verify = append(verify, &engine.Verify{ + Exec: v.verify.Primitive(), + Typ: v.typ, + }) + } + return &engine.FkVerify{ + Exec: fkc.input.Primitive(), + Verify: verify, + } +} + +// Wireup implements the logicalPlan interface +func (fkc *fkVerify) Wireup(ctx *plancontext.PlanningContext) error { + for _, v := range fkc.verify { + err := v.verify.Wireup(ctx) + if err != nil { + return err + } + } + return fkc.input.Wireup(ctx) +} + +// Rewrite implements the logicalPlan interface +func (fkc *fkVerify) Rewrite(inputs ...logicalPlan) error { + if len(fkc.verify) != len(inputs)-1 { + return vterrors.VT13001("fkVerify: wrong number of inputs") + } + fkc.input = inputs[0] + for i := 1; i < len(inputs); i++ { + fkc.verify[i-1].verify = inputs[i] + } + return nil +} + +// ContainsTables implements the logicalPlan interface +func (fkc *fkVerify) ContainsTables() semantics.TableSet { + return fkc.input.ContainsTables() +} + +// Inputs implements the logicalPlan interface +func (fkc *fkVerify) Inputs() []logicalPlan { + inputs := []logicalPlan{fkc.input} + for _, v := range fkc.verify { + inputs = append(inputs, v.verify) + } + return inputs +} + +// OutputColumns implements the logicalPlan interface +func (fkc *fkVerify) OutputColumns() []sqlparser.SelectExpr { + return nil +} diff --git a/go/vt/vtgate/planbuilder/from.go b/go/vt/vtgate/planbuilder/from.go deleted file mode 100644 index 59df24146fb..00000000000 --- a/go/vt/vtgate/planbuilder/from.go +++ /dev/null @@ -1,426 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "sort" - "strings" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -// This file has functions to analyze the FROM clause. - -// processDMLTable analyzes the FROM clause for DMLs and returns a route. -func (pb *primitiveBuilder) processDMLTable(tableExprs sqlparser.TableExprs, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) (*route, error) { - if err := pb.processTableExprs(tableExprs, reservedVars, where); err != nil { - return nil, err - } - rb, ok := pb.plan.(*route) - if !ok { - return nil, vterrors.VT12001("multi-shard or vindex write statement") - } - for _, sub := range rb.substitutions { - *sub.oldExpr = *sub.newExpr - } - return rb, nil -} - -// processTableExprs analyzes the FROM clause. It produces a logicalPlan -// with all the routes identified. -func (pb *primitiveBuilder) processTableExprs(tableExprs sqlparser.TableExprs, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - if len(tableExprs) == 1 { - return pb.processTableExpr(tableExprs[0], reservedVars, where) - } - - if err := pb.processTableExpr(tableExprs[0], reservedVars, where); err != nil { - return err - } - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExprs(tableExprs[1:], reservedVars, where); err != nil { - return err - } - return pb.join(rpb, nil, reservedVars, where) -} - -// processTableExpr produces a logicalPlan subtree for the given TableExpr. -func (pb *primitiveBuilder) processTableExpr(tableExpr sqlparser.TableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - switch tableExpr := tableExpr.(type) { - case *sqlparser.AliasedTableExpr: - return pb.processAliasedTable(tableExpr, reservedVars) - case *sqlparser.ParenTableExpr: - err := pb.processTableExprs(tableExpr.Exprs, reservedVars, where) - // If it's a route, preserve the parenthesis so things - // don't associate differently when more things are pushed - // into it. FROM a, (b, c) should not become FROM a, b, c. - if rb, ok := pb.plan.(*route); ok { - sel, ok := rb.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(rb.Select)) - } - - sel.From = sqlparser.TableExprs{&sqlparser.ParenTableExpr{Exprs: sel.From}} - } - return err - case *sqlparser.JoinTableExpr: - return pb.processJoin(tableExpr, reservedVars, where) - case *sqlparser.JSONTableExpr: - return vterrors.VT12001("JSON_TABLE expressions") - } - return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr)) -} - -// processAliasedTable produces a logicalPlan subtree for the given AliasedTableExpr. -// If the expression is a subquery, then the primitive will create a table -// for it in the symtab. If the subquery is a route, then we build a route -// primitive with the subquery in the From clause, because a route is more -// versatile than a subquery. If a subquery becomes a route, then any result -// columns that represent underlying vindex columns are also exposed as -// vindex columns. -func (pb *primitiveBuilder) processAliasedTable(tableExpr *sqlparser.AliasedTableExpr, reservedVars *sqlparser.ReservedVars) error { - if tableExpr.Columns != nil { - return vterrors.VT12001("column aliases in derived table") - } - switch expr := tableExpr.Expr.(type) { - case sqlparser.TableName: - return pb.buildTablePrimitive(tableExpr, expr) - case *sqlparser.DerivedTable: - if expr.Lateral { - return vterrors.VT12001("lateral derived tables") - } - spb := newPrimitiveBuilder(pb.vschema, pb.jt) - switch stmt := expr.Select.(type) { - case *sqlparser.Select: - if err := spb.processSelect(stmt, reservedVars, nil, ""); err != nil { - return err - } - case *sqlparser.Union: - if err := spb.processUnion(stmt, reservedVars, nil); err != nil { - return err - } - default: - return vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", stmt)) - } - - subroute, ok := spb.plan.(*route) - if !ok { - var err error - pb.plan, pb.st, err = newSimpleProjection(tableExpr.As, spb.plan) - if err != nil { - return err - } - pb.plan.Reorder(0) - return nil - } - - // Since a route is more versatile than a subquery, we - // build a route primitive that has the subquery in its - // FROM clause. This allows for other constructs to be - // later pushed into it. - rb, st := newRoute(&sqlparser.Select{From: []sqlparser.TableExpr{tableExpr}}) - rb.substitutions = subroute.substitutions - rb.condition = subroute.condition - rb.eroute = subroute.eroute - subroute.Redirect = rb - - // The subquery needs to be represented as a new logical table in the symtab. - // The new route will inherit the routeOptions of the underlying subquery. - // For this, we first build new vschema tables based on the columns returned - // by the subquery, and re-expose possible vindexes. When added to the symtab, - // a new set of column references will be generated against the new tables, - // and those vindex maps will be returned. They have to replace the old vindex - // maps of the inherited route options. - var tableNames []string - spbTables, err := spb.st.AllVschemaTableNames() - if err != nil { - return err - } - for _, table := range spbTables { - tableNames = append(tableNames, table.Name.String()) - } - sort.Strings(tableNames) - vschemaTable := &vindexes.Table{ - Keyspace: subroute.eroute.Keyspace, - Name: sqlparser.NewIdentifierCS(strings.Join(tableNames, ", ")), - } - for _, rc := range subroute.ResultColumns() { - if rc.column.vindex == nil { - continue - } - // Check if a colvindex of the same name already exists. - // Dups are not allowed in subqueries in this situation. - for _, colVindex := range vschemaTable.ColumnVindexes { - if colVindex.Columns[0].Equal(rc.alias) { - return vterrors.VT12001(fmt.Sprintf("duplicate column aliases: %v", rc.alias)) - } - } - vschemaTable.ColumnVindexes = append(vschemaTable.ColumnVindexes, &vindexes.ColumnVindex{ - Columns: []sqlparser.IdentifierCI{rc.alias}, - Vindex: rc.column.vindex, - }) - } - if err := st.AddVSchemaTable(sqlparser.TableName{Name: tableExpr.As}, vschemaTable, rb); err != nil { - return err - } - - pb.plan, pb.st = rb, st - return nil - } - return vterrors.VT13001(fmt.Sprintf("unexpected table expression type: %T", tableExpr.Expr)) -} - -// buildTablePrimitive builds a primitive based on the table name. -func (pb *primitiveBuilder) buildTablePrimitive(tableExpr *sqlparser.AliasedTableExpr, tableName sqlparser.TableName) error { - alias := tableName - if !tableExpr.As.IsEmpty() { - alias = sqlparser.TableName{Name: tableExpr.As} - } - sel := &sqlparser.Select{From: sqlparser.TableExprs([]sqlparser.TableExpr{tableExpr})} - - if sqlparser.SystemSchema(tableName.Qualifier.String()) { - ks, err := pb.vschema.AnyKeyspace() - if err != nil { - return err - } - rb, st := newRoute(sel) - rb.eroute = engine.NewSimpleRoute(engine.DBA, ks) - rb.eroute.TableName = sqlparser.String(tableName) - pb.plan, pb.st = rb, st - // Add the table to symtab - return st.AddTable(&table{ - alias: alias, - origin: rb, - }) - } - - vschemaTable, vindex, _, destTableType, destTarget, err := pb.vschema.FindTableOrVindex(tableName) - if err != nil { - return err - } - if vindex != nil { - single, ok := vindex.(vindexes.SingleColumn) - if !ok { - return vterrors.VT12001("multi-column vindexes") - } - pb.plan, pb.st = newVindexFunc(alias, single) - return nil - } - - sourceTable, err := pb.tryRedirectGen4InsertToSource(vschemaTable) - if err != nil { - return err - } - if sourceTable != nil { - vschemaTable = sourceTable - } - - rb, st := newRoute(sel) - pb.plan, pb.st = rb, st - if err := st.AddVSchemaTable(alias, vschemaTable, rb); err != nil { - return err - } - - sub := &tableSubstitution{ - oldExpr: tableExpr, - } - if tableExpr.As.IsEmpty() { - if tableName.Name != vschemaTable.Name { - // Table name does not match. Change and alias it to old name. - sub.newExpr = &sqlparser.AliasedTableExpr{ - Expr: sqlparser.TableName{Name: vschemaTable.Name}, - As: tableName.Name, - } - } - } else { - // Table is already aliased. - if tableName.Name != vschemaTable.Name { - // Table name does not match. Change it and reuse existing alias. - sub.newExpr = &sqlparser.AliasedTableExpr{ - Expr: sqlparser.TableName{Name: vschemaTable.Name}, - As: tableExpr.As, - } - } - } - if sub != nil && sub.newExpr != nil { - rb.substitutions = []*tableSubstitution{sub} - } - - var eroute *engine.Route - switch { - case vschemaTable.Type == vindexes.TypeSequence: - eroute = engine.NewSimpleRoute(engine.Next, vschemaTable.Keyspace) - case vschemaTable.Type == vindexes.TypeReference: - eroute = engine.NewSimpleRoute(engine.Reference, vschemaTable.Keyspace) - case !vschemaTable.Keyspace.Sharded: - eroute = engine.NewSimpleRoute(engine.Unsharded, vschemaTable.Keyspace) - case vschemaTable.Pinned == nil: - eroute = engine.NewSimpleRoute(engine.Scatter, vschemaTable.Keyspace) - eroute.TargetDestination = destTarget - eroute.TargetTabletType = destTableType - default: - // Pinned tables have their keyspace ids already assigned. - // Use the Binary vindex, which is the identity function - // for keyspace id. - eroute = engine.NewSimpleRoute(engine.EqualUnique, vschemaTable.Keyspace) - vindex, _ = vindexes.NewBinary("binary", nil) - eroute.Vindex = vindex - lit := evalengine.NewLiteralString(vschemaTable.Pinned, collations.TypedCollation{}) - eroute.Values = []evalengine.Expr{lit} - } - eroute.TableName = sqlparser.String(vschemaTable.Name) - rb.eroute = eroute - - return nil -} - -// processJoin produces a logicalPlan subtree for the given Join. -// If the left and right nodes can be part of the same route, -// then it's a route. Otherwise, it's a join. -func (pb *primitiveBuilder) processJoin(ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - switch ajoin.Join { - case sqlparser.NormalJoinType, sqlparser.StraightJoinType, sqlparser.LeftJoinType: - case sqlparser.RightJoinType: - convertToLeftJoin(ajoin) - default: - return vterrors.VT12001(ajoin.Join.ToString()) - } - if err := pb.processTableExpr(ajoin.LeftExpr, reservedVars, where); err != nil { - return err - } - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processTableExpr(ajoin.RightExpr, reservedVars, where); err != nil { - return err - } - return pb.join(rpb, ajoin, reservedVars, where) -} - -// If the primitiveBuilder context is a Gen4 planner, the statement is an -// INSERT, and the vschema table is a reference with a valid source reference, -// then redirect the INSERT back to the source. -func (pb *primitiveBuilder) tryRedirectGen4InsertToSource(vschemaTable *vindexes.Table) (*vindexes.Table, error) { - if pb.stmt == nil { - return nil, nil - } - if _, ok := pb.stmt.(*sqlparser.Insert); !ok { - return nil, nil - } - if pb.vschema.Planner() == querypb.ExecuteOptions_V3 { - return nil, nil - } - if vschemaTable.Type != vindexes.TypeReference || vschemaTable.Source == nil { - return nil, nil - } - vschemaTable, _, _, _, _, err := pb.vschema.FindTableOrVindex(vschemaTable.Source.TableName) - return vschemaTable, err -} - -// convertToLeftJoin converts a right join into a left join. -func convertToLeftJoin(ajoin *sqlparser.JoinTableExpr) { - newRHS := ajoin.LeftExpr - // If the LHS is a join, we have to parenthesize it. - // Otherwise, it can be used as is. - if _, ok := newRHS.(*sqlparser.JoinTableExpr); ok { - newRHS = &sqlparser.ParenTableExpr{ - Exprs: sqlparser.TableExprs{newRHS}, - } - } - ajoin.LeftExpr, ajoin.RightExpr = ajoin.RightExpr, newRHS - ajoin.Join = sqlparser.LeftJoinType -} - -func (pb *primitiveBuilder) join(rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars, where sqlparser.Expr) error { - // Merge the symbol tables. In the case of a left join, we have to - // ideally create new symbols that originate from the join primitive. - // However, this is not worth it for now, because the Push functions - // verify that only valid constructs are passed through in case of left join. - err := pb.st.Merge(rpb.st) - if err != nil { - return err - } - - lRoute, leftIsRoute := pb.plan.(*route) - rRoute, rightIsRoute := rpb.plan.(*route) - if !leftIsRoute || !rightIsRoute { - return newJoin(pb, rpb, ajoin, reservedVars) - } - - // Try merging the routes. - if !lRoute.JoinCanMerge(pb, rRoute, ajoin, where) { - return newJoin(pb, rpb, ajoin, reservedVars) - } - - if lRoute.eroute.Opcode == engine.Reference { - // Swap the conditions & eroutes, and then merge. - lRoute.condition, rRoute.condition = rRoute.condition, lRoute.condition - lRoute.eroute, rRoute.eroute = rRoute.eroute, lRoute.eroute - } - lRoute.substitutions = append(lRoute.substitutions, rRoute.substitutions...) - rRoute.Redirect = lRoute - - // Merge the AST. - sel, ok := lRoute.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(lRoute.Select)) - } - if ajoin == nil { - rhsSel, ok := rRoute.Select.(*sqlparser.Select) - if !ok { - return vterrors.VT13002(sqlparser.String(rRoute.Select)) - } - sel.From = append(sel.From, rhsSel.From...) - } else { - sel.From = sqlparser.TableExprs{ajoin} - } - // join table name - if lRoute.eroute.TableName != rRoute.eroute.TableName { - lRoute.eroute.TableName = strings.Join([]string{lRoute.eroute.TableName, rRoute.eroute.TableName}, ", ") - } - - // join sysTableNames - for tableName, expr := range rRoute.eroute.SysTableTableName { - _, ok := lRoute.eroute.SysTableTableName[tableName] - if !ok { - lRoute.eroute.SysTableTableName[tableName] = expr - } - } - - // Since the routes have merged, set st.singleRoute to point at - // the merged route. - pb.st.singleRoute = lRoute - if ajoin == nil { - return nil - } - pullouts, _, expr, err := pb.findOrigin(ajoin.Condition.On, reservedVars) - if err != nil { - return err - } - ajoin.Condition.On = expr - pb.addPullouts(pullouts) - for _, filter := range sqlparser.SplitAndExpression(nil, ajoin.Condition.On) { - lRoute.UpdatePlan(pb, filter) - } - return nil -} diff --git a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go b/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go deleted file mode 100644 index 28879258dd0..00000000000 --- a/go/vt/vtgate/planbuilder/gen4_compare_v3_planner.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -func gen4CompareV3Planner(query string) func(sqlparser.Statement, *sqlparser.ReservedVars, plancontext.VSchema) (*planResult, error) { - return func(statement sqlparser.Statement, vars *sqlparser.ReservedVars, ctxVSchema plancontext.VSchema) (*planResult, error) { - // we will be switching the planner version to Gen4 and V3 in order to - // create instructions using them, thus we make sure to switch back to - // the Gen4CompareV3 planner before exiting this method. - defer ctxVSchema.SetPlannerVersion(Gen4CompareV3) - switch statement.(type) { - case *sqlparser.Select, *sqlparser.Union: - // These we can compare. Everything else we'll just use the Gen4 planner - default: - return planWithPlannerVersion(statement, vars, ctxVSchema, query, Gen4) - } - - // preliminary checks on the given statement - onlyGen4, hasOrderBy, err := preliminaryChecks(statement) - if err != nil { - return nil, err - } - - // plan statement using Gen4 - gen4Primitive, gen4Err := planWithPlannerVersion(statement, vars, ctxVSchema, query, Gen4) - - // if onlyGen4 is set to true or Gen4's instruction contain a lock primitive, - // we use only Gen4's primitive and exit early without using V3's. - // since lock primitives can imply the creation or deletion of locks, - // we want to execute them once using Gen4 to avoid the duplicated locks - // or double lock-releases. - if onlyGen4 || (gen4Primitive != nil && hasLockPrimitive(gen4Primitive.primitive)) { - return gen4Primitive, gen4Err - } - - // get V3's plan - v3Primitive, v3Err := planWithPlannerVersion(statement, vars, ctxVSchema, query, V3) - - // check potential errors from Gen4 and V3 - err = engine.CompareErrors(v3Err, gen4Err, "v3", "Gen4") - if err != nil { - return nil, err - } - - primitive := &engine.Gen4CompareV3{ - V3: v3Primitive.primitive, - Gen4: gen4Primitive.primitive, - HasOrderBy: hasOrderBy, - } - - return newPlanResult(primitive, gen4Primitive.tables...), nil - } -} - -func preliminaryChecks(statement sqlparser.Statement) (bool, bool, error) { - var onlyGen4, hasOrderBy bool - switch s := statement.(type) { - case *sqlparser.Union: - hasOrderBy = len(s.OrderBy) > 0 - - // walk through the union and search for select statements that have - // a next val select expression, in which case we need to only use - // the Gen4 planner instead of using both Gen4 and V3 to avoid unintended - // double-incrementation of sequence. - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if _, isNextVal := node.(*sqlparser.Nextval); isNextVal { - onlyGen4 = true - return false, nil - } - return true, nil - }, s) - if err != nil { - return false, false, err - } - case *sqlparser.Select: - hasOrderBy = len(s.OrderBy) > 0 - - for _, expr := range s.SelectExprs { - // we are not executing the plan a second time if the query is a select next val, - // since the first execution might increment the `next` value, results will almost - // always be different between v3 and Gen4. - if _, nextVal := expr.(*sqlparser.Nextval); nextVal { - onlyGen4 = true - break - } - } - } - return onlyGen4, hasOrderBy, nil -} - -func planWithPlannerVersion(statement sqlparser.Statement, vars *sqlparser.ReservedVars, ctxVSchema plancontext.VSchema, query string, version plancontext.PlannerVersion) (*planResult, error) { - ctxVSchema.SetPlannerVersion(version) - stmt := sqlparser.CloneStatement(statement) - return createInstructionFor(query, stmt, vars, ctxVSchema, false, false) -} - -// hasLockPrimitive recursively walks through the given primitive and its children -// to see if there are any engine.Lock primitive. -func hasLockPrimitive(primitive engine.Primitive) bool { - switch primitive.(type) { - case *engine.Lock: - return true - default: - for _, p := range primitive.Inputs() { - if hasLockPrimitive(p) { - return true - } - } - } - return false -} diff --git a/go/vt/vtgate/planbuilder/gen4_planner.go b/go/vt/vtgate/planbuilder/gen4_planner.go deleted file mode 100644 index dc49ae0a700..00000000000 --- a/go/vt/vtgate/planbuilder/gen4_planner.go +++ /dev/null @@ -1,592 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVersion) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - switch stmt := stmt.(type) { - case sqlparser.SelectStatement: - return gen4SelectStmtPlanner(query, plannerVersion, stmt, reservedVars, vschema) - case *sqlparser.Update: - return gen4UpdateStmtPlanner(plannerVersion, stmt, reservedVars, vschema) - case *sqlparser.Delete: - return gen4DeleteStmtPlanner(plannerVersion, stmt, reservedVars, vschema) - default: - return nil, vterrors.VT12001(fmt.Sprintf("%T", stmt)) - } - } -} - -func gen4SelectStmtPlanner( - query string, - plannerVersion querypb.ExecuteOptions_PlannerVersion, - stmt sqlparser.SelectStatement, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - switch node := stmt.(type) { - case *sqlparser.Select: - if node.With != nil { - return nil, vterrors.VT12001("WITH expression in SELECT statement") - } - case *sqlparser.Union: - if node.With != nil { - return nil, vterrors.VT12001("WITH expression in UNION statement") - } - } - - sel, isSel := stmt.(*sqlparser.Select) - if isSel { - // handle dual table for processing at vtgate. - p, err := handleDualSelects(sel, vschema) - if err != nil { - return nil, err - } - if p != nil { - used := "dual" - keyspace, ksErr := vschema.DefaultKeyspace() - if ksErr == nil { - // we are just getting the ks to log the correct table use. - // no need to fail this if we can't find the default keyspace - used = keyspace.Name + ".dual" - } - return newPlanResult(p, used), nil - } - - if sel.SQLCalcFoundRows && sel.Limit != nil { - return gen4planSQLCalcFoundRows(vschema, sel, query, reservedVars) - } - // if there was no limit, we can safely ignore the SQLCalcFoundRows directive - sel.SQLCalcFoundRows = false - } - - getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error) { - return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion) - } - - plan, _, tablesUsed, err := getPlan(stmt) - if err != nil { - return nil, err - } - - if shouldRetryAfterPredicateRewriting(plan) { - // by transforming the predicates to CNF, the planner will sometimes find better plans - plan2, _, tablesUsed := gen4PredicateRewrite(stmt, getPlan) - if plan2 != nil { - return newPlanResult(plan2.Primitive(), tablesUsed...), nil - } - } - - primitive := plan.Primitive() - if !isSel { - return newPlanResult(primitive, tablesUsed...), nil - } - - // this is done because engine.Route doesn't handle the empty result well - // if it doesn't find a shard to send the query to. - // All other engine primitives can handle this, so we only need it when - // Route is the last (and only) instruction before the user sees a result - if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { - switch prim := primitive.(type) { - case *engine.Route: - prim.NoRoutesSpecialHandling = true - case *engine.VindexLookup: - prim.SendTo.NoRoutesSpecialHandling = true - } - } - return newPlanResult(primitive, tablesUsed...), nil -} - -func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) { - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(sel, ksName, vschema) - if err != nil { - return nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - plan, tablesUsed, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema, planSelectGen4) - if err != nil { - return nil, err - } - return newPlanResult(plan.Primitive(), tablesUsed...), nil -} - -func planSelectGen4(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) { - plan, _, tablesUsed, err := newBuildSelectPlan(sel, reservedVars, vschema, 0) - if err != nil { - return nil, nil, nil, err - } - return nil, plan, tablesUsed, nil -} - -func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, *semantics.SemTable, []string, error)) (logicalPlan, *semantics.SemTable, []string) { - rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement) - if !isSel { - // Fail-safe code, should never happen - return nil, nil, nil - } - plan2, st, op, err := getPlan(rewritten) - if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { - // we only use this new plan if it's better than the old one we got - return plan2, st, op - } - return nil, nil, nil -} - -func newBuildSelectPlan( - selStmt sqlparser.SelectStatement, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, - version querypb.ExecuteOptions_PlannerVersion, -) (plan logicalPlan, semTable *semantics.SemTable, tablesUsed []string, err error) { - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err = semantics.Analyze(selStmt, ksName, vschema) - if err != nil { - return nil, nil, nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - - if ks, _ := semTable.SingleUnshardedKeyspace(); ks != nil { - plan, tablesUsed, err = unshardedShortcut(ctx, selStmt, ks) - if err != nil { - return nil, nil, nil, err - } - plan, err = pushCommentDirectivesOnPlan(plan, selStmt) - if err != nil { - return nil, nil, nil, err - } - return plan, semTable, tablesUsed, err - } - - // From this point on, we know it is not an unsharded query and return the NotUnshardedErr if there is any - if semTable.NotUnshardedErr != nil { - return nil, nil, nil, semTable.NotUnshardedErr - } - - err = queryRewrite(semTable, reservedVars, selStmt) - if err != nil { - return nil, nil, nil, err - } - - op, err := operators.PlanQuery(ctx, selStmt) - if err != nil { - return nil, nil, nil, err - } - - plan, err = transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, nil, nil, err - } - - plan = optimizePlan(plan) - - sel, isSel := selStmt.(*sqlparser.Select) - if isSel { - if err = setMiscFunc(plan, sel); err != nil { - return nil, nil, nil, err - } - } - - if err = plan.WireupGen4(ctx); err != nil { - return nil, nil, nil, err - } - - plan, err = pushCommentDirectivesOnPlan(plan, selStmt) - if err != nil { - return nil, nil, nil, err - } - - return plan, semTable, operators.TablesUsed(op), nil -} - -// optimizePlan removes unnecessary simpleProjections that have been created while planning -func optimizePlan(plan logicalPlan) logicalPlan { - newPlan, _ := visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) { - this, ok := plan.(*simpleProjection) - if !ok { - return true, plan, nil - } - - input, ok := this.input.(*simpleProjection) - if !ok { - return true, plan, nil - } - - for i, col := range this.eSimpleProj.Cols { - this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col] - } - this.input = input.input - return true, this, nil - }) - return newPlan -} - -func gen4UpdateStmtPlanner( - version querypb.ExecuteOptions_PlannerVersion, - updStmt *sqlparser.Update, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - if updStmt.With != nil { - return nil, vterrors.VT12001("WITH expression in UPDATE statement") - } - - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(updStmt, ksName, vschema) - if err != nil { - return nil, err - } - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - - err = rewriteRoutedTables(updStmt, vschema) - if err != nil { - return nil, err - } - - if ks, tables := semTable.SingleUnshardedKeyspace(); ks != nil { - edml := engine.NewDML() - edml.Keyspace = ks - edml.Table = tables - edml.Opcode = engine.Unsharded - edml.Query = generateQuery(updStmt) - upd := &engine.Update{DML: edml} - return newPlanResult(upd, operators.QualifiedTables(ks, tables)...), nil - } - - if semTable.NotUnshardedErr != nil { - return nil, semTable.NotUnshardedErr - } - - err = queryRewrite(semTable, reservedVars, updStmt) - if err != nil { - return nil, err - } - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - - op, err := operators.PlanQuery(ctx, updStmt) - if err != nil { - return nil, err - } - - plan, err := transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, err - } - - plan, err = pushCommentDirectivesOnPlan(plan, updStmt) - if err != nil { - return nil, err - } - - setLockOnAllSelect(plan) - - if err := plan.WireupGen4(ctx); err != nil { - return nil, err - } - - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil -} - -func gen4DeleteStmtPlanner( - version querypb.ExecuteOptions_PlannerVersion, - deleteStmt *sqlparser.Delete, - reservedVars *sqlparser.ReservedVars, - vschema plancontext.VSchema, -) (*planResult, error) { - if deleteStmt.With != nil { - return nil, vterrors.VT12001("WITH expression in DELETE statement") - } - - var err error - if len(deleteStmt.TableExprs) == 1 && len(deleteStmt.Targets) == 1 { - deleteStmt, err = rewriteSingleTbl(deleteStmt) - if err != nil { - return nil, err - } - } - - ksName := "" - if ks, _ := vschema.DefaultKeyspace(); ks != nil { - ksName = ks.Name - } - semTable, err := semantics.Analyze(deleteStmt, ksName, vschema) - if err != nil { - return nil, err - } - - // record any warning as planner warning. - vschema.PlannerWarning(semTable.Warning) - err = rewriteRoutedTables(deleteStmt, vschema) - if err != nil { - return nil, err - } - - if ks, tables := semTable.SingleUnshardedKeyspace(); ks != nil { - edml := engine.NewDML() - edml.Keyspace = ks - edml.Table = tables - edml.Opcode = engine.Unsharded - edml.Query = generateQuery(deleteStmt) - del := &engine.Delete{DML: edml} - return newPlanResult(del, operators.QualifiedTables(ks, tables)...), nil - } - - if err := checkIfDeleteSupported(deleteStmt, semTable); err != nil { - return nil, err - } - - err = queryRewrite(semTable, reservedVars, deleteStmt) - if err != nil { - return nil, err - } - - ctx := plancontext.NewPlanningContext(reservedVars, semTable, vschema, version) - op, err := operators.PlanQuery(ctx, deleteStmt) - if err != nil { - return nil, err - } - - plan, err := transformToLogicalPlan(ctx, op, true) - if err != nil { - return nil, err - } - - plan, err = pushCommentDirectivesOnPlan(plan, deleteStmt) - if err != nil { - return nil, err - } - - setLockOnAllSelect(plan) - - if err := plan.WireupGen4(ctx); err != nil { - return nil, err - } - - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil -} - -func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error { - // Rewrite routed tables - return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr) - if !isAlias { - return true, nil - } - tableName, ok := aliasTbl.Expr.(sqlparser.TableName) - if !ok { - return true, nil - } - var vschemaTable *vindexes.Table - vschemaTable, _, _, _, _, err = vschema.FindTableOrVindex(tableName) - if err != nil { - return false, err - } - - if vschemaTable.Name.String() != tableName.Name.String() { - name := tableName.Name - if aliasTbl.As.IsEmpty() { - // if the user hasn't specified an alias, we'll insert one here so the old table name still works - aliasTbl.As = sqlparser.NewIdentifierCS(name.String()) - } - tableName.Name = sqlparser.NewIdentifierCS(vschemaTable.Name.String()) - aliasTbl.Expr = tableName - } - - return true, nil - }, stmt) -} - -func setLockOnAllSelect(plan logicalPlan) { - _, _ = visit(plan, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := plan.(type) { - case *routeGen4: - node.Select.SetLock(sqlparser.ShareModeLock) - return true, node, nil - } - return true, plan, nil - }) -} - -func planLimit(limit *sqlparser.Limit, plan logicalPlan) (logicalPlan, error) { - if limit == nil { - return plan, nil - } - rb, ok := plan.(*routeGen4) - if ok && rb.isSingleShard() { - rb.SetLimit(limit) - return plan, nil - } - - lPlan, err := createLimit(plan, limit) - if err != nil { - return nil, err - } - - // visit does not modify the plan. - _, err = visit(lPlan, setUpperLimit) - if err != nil { - return nil, err - } - return lPlan, nil -} - -func planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, in sqlparser.SelectStatement, truncateColumns bool) (logicalPlan, error) { - switch node := in.(type) { - case *sqlparser.Select: - hp := horizonPlanning{ - sel: node, - } - - replaceSubQuery(ctx, node) - var err error - plan, err = hp.planHorizon(ctx, plan, truncateColumns) - if err != nil { - return nil, err - } - plan, err = planLimit(node.Limit, plan) - if err != nil { - return nil, err - } - case *sqlparser.Union: - var err error - rb, isRoute := plan.(*routeGen4) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - return nil, ctx.SemTable.NotSingleRouteErr - } - if isRoute && rb.isSingleShard() { - err = planSingleShardRoutePlan(node, rb) - } else { - plan, err = planOrderByOnUnion(ctx, plan, node) - } - if err != nil { - return nil, err - } - - plan, err = planLimit(node.Limit, plan) - if err != nil { - return nil, err - } - } - return plan, nil - -} - -func planOrderByOnUnion(ctx *plancontext.PlanningContext, plan logicalPlan, union *sqlparser.Union) (logicalPlan, error) { - qp, err := operators.CreateQPFromUnion(union) - if err != nil { - return nil, err - } - hp := horizonPlanning{ - qp: qp, - } - if len(qp.OrderExprs) > 0 { - plan, err = hp.planOrderBy(ctx, qp.OrderExprs, plan) - if err != nil { - return nil, err - } - } - return plan, nil -} - -func pushCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) (logicalPlan, error) { - var directives *sqlparser.CommentDirectives - cmt, ok := stmt.(sqlparser.Commented) - if ok { - directives = cmt.GetParsedComments().Directives() - scatterAsWarns := directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) - timeout := queryTimeout(directives) - - if scatterAsWarns || timeout > 0 { - _, _ = visit(plan, func(logicalPlan logicalPlan) (bool, logicalPlan, error) { - switch plan := logicalPlan.(type) { - case *routeGen4: - plan.eroute.ScatterErrorsAsWarnings = scatterAsWarns - plan.eroute.QueryTimeout = timeout - } - return true, logicalPlan, nil - }) - } - } - - return plan, nil -} - -// checkIfDeleteSupported checks if the delete query is supported or we must return an error. -func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) error { - if semTable.NotUnshardedErr != nil { - return semTable.NotUnshardedErr - } - - // Delete is only supported for a single TableExpr which is supposed to be an aliased expression - multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") - if len(del.TableExprs) != 1 { - return multiShardErr - } - _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAliasedExpr { - return multiShardErr - } - - if len(del.Targets) > 1 { - return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") - } - - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.Subquery, *sqlparser.DerivedTable: - // We have a subquery, so we must fail the planning. - // If this subquery and the table expression were all belonging to the same unsharded keyspace, - // we would have already created a plan for them before doing these checks. - return false, vterrors.VT12001("subqueries in DML") - } - return true, nil - }, del) - if err != nil { - return err - } - - return nil -} diff --git a/go/vt/vtgate/planbuilder/grouping.go b/go/vt/vtgate/planbuilder/grouping.go deleted file mode 100644 index 0bd10666029..00000000000 --- a/go/vt/vtgate/planbuilder/grouping.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -func planGroupBy(pb *primitiveBuilder, input logicalPlan, groupBy sqlparser.GroupBy) (logicalPlan, error) { - if len(groupBy) == 0 { - // if we have no grouping declared, we only want to visit orderedAggregate - _, isOrdered := input.(*orderedAggregate) - if !isOrdered { - return input, nil - } - } - - switch node := input.(type) { - case *mergeSort, *pulloutSubquery, *distinct: - inputs := node.Inputs() - input := inputs[0] - - newInput, err := planGroupBy(pb, input, groupBy) - if err != nil { - return nil, err - } - inputs[0] = newInput - err = node.Rewrite(inputs...) - if err != nil { - return nil, err - } - return node, nil - case *route: - node.Select.(*sqlparser.Select).GroupBy = groupBy - return node, nil - case *orderedAggregate: - for _, expr := range groupBy { - colNumber := -1 - switch e := expr.(type) { - case *sqlparser.ColName: - c := e.Metadata.(*column) - if c.Origin() == node { - return nil, vterrors.VT03005(sqlparser.String(e)) - } - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - if colNumber == -1 { - return nil, vterrors.VT12001("in scatter query: GROUP BY column must reference column in SELECT list") - } - case *sqlparser.Literal: - num, err := ResultFromNumber(node.resultColumns, e, "group statement") - if err != nil { - return nil, err - } - colNumber = num - default: - return nil, vterrors.VT12001("in scatter query: only simple references are allowed") - } - node.groupByKeys = append(node.groupByKeys, &engine.GroupByParams{KeyCol: colNumber, WeightStringCol: -1, FromGroupBy: true}) - } - // Append the distinct aggregate if any. - if node.extraDistinct != nil { - groupBy = append(groupBy, node.extraDistinct) - } - - newInput, err := planGroupBy(pb, node.input, groupBy) - if err != nil { - return nil, err - } - node.input = newInput - - return node, nil - } - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.groupBy: ", input)) -} - -// planDistinct makes the output distinct -func planDistinct(input logicalPlan) (logicalPlan, error) { - switch node := input.(type) { - case *route: - node.Select.MakeDistinct() - return node, nil - case *orderedAggregate: - for i, rc := range node.resultColumns { - // If the column origin is oa (and not the underlying route), - // it means that it's an aggregate function supplied by oa. - // So, the distinct 'operator' cannot be pushed down into the - // route. - if rc.column.Origin() == node { - return newDistinctV3(node), nil - } - node.groupByKeys = append(node.groupByKeys, &engine.GroupByParams{KeyCol: i, WeightStringCol: -1, FromGroupBy: false}) - } - newInput, err := planDistinct(node.input) - if err != nil { - return nil, err - } - node.input = newInput - return node, nil - - case *distinct: - return input, nil - } - - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.distinct", input)) -} diff --git a/go/vt/vtgate/planbuilder/hash_join.go b/go/vt/vtgate/planbuilder/hash_join.go index cef2f30bead..3b60d6a4efd 100644 --- a/go/vt/vtgate/planbuilder/hash_join.go +++ b/go/vt/vtgate/planbuilder/hash_join.go @@ -32,7 +32,6 @@ var _ logicalPlan = (*hashJoin)(nil) // hashJoin is used to build a HashJoin primitive. type hashJoin struct { - gen4Plan // Left and Right are the nodes for the join. Left, Right logicalPlan @@ -51,12 +50,12 @@ type hashJoin struct { } // WireupGen4 implements the logicalPlan interface -func (hj *hashJoin) WireupGen4(ctx *plancontext.PlanningContext) error { - err := hj.Left.WireupGen4(ctx) +func (hj *hashJoin) Wireup(ctx *plancontext.PlanningContext) error { + err := hj.Left.Wireup(ctx) if err != nil { return err } - return hj.Right.WireupGen4(ctx) + return hj.Right.Wireup(ctx) } // Primitive implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/horizon_planning.go b/go/vt/vtgate/planbuilder/horizon_planning.go index eea1400b916..f6c470d3e8b 100644 --- a/go/vt/vtgate/planbuilder/horizon_planning.go +++ b/go/vt/vtgate/planbuilder/horizon_planning.go @@ -20,15 +20,14 @@ import ( "fmt" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" ) type horizonPlanning struct { @@ -37,14 +36,14 @@ type horizonPlanning struct { } func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, truncateColumns bool) (logicalPlan, error) { - rb, isRoute := plan.(*routeGen4) + rb, isRoute := plan.(*route) if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { // If we got here, we don't have a single shard plan return nil, ctx.SemTable.NotSingleRouteErr } if isRoute && rb.isSingleShard() { - err := planSingleShardRoutePlan(hp.sel, rb) + err := planSingleRoutePlan(hp.sel, rb) if err != nil { return nil, err } @@ -60,7 +59,8 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo // a simpleProjection. We create a new Route that contains the derived table in the // FROM clause. Meaning that, when we push expressions to the select list of this // new Route, we do not want them to rewrite them. - if _, isSimpleProj := plan.(*simpleProjection); isSimpleProj { + sp, derivedTable := plan.(*simpleProjection) + if derivedTable { oldRewriteDerivedExpr := ctx.RewriteDerivedExpr defer func() { ctx.RewriteDerivedExpr = oldRewriteDerivedExpr @@ -69,16 +69,17 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo } var err error - hp.qp, err = operators.CreateQPFromSelect(ctx, hp.sel) + hp.qp, err = operators.CreateQPFromSelectStatement(ctx, hp.sel) if err != nil { return nil, err } needsOrdering := len(hp.qp.OrderExprs) > 0 - canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering // If we still have a HAVING clause, it's because it could not be pushed to the WHERE, // so it probably has aggregations + canShortcut := isRoute && hp.sel.Having == nil && !needsOrdering + switch { case hp.qp.NeedsAggregation() || hp.sel.Having != nil: plan, err = hp.planAggregations(ctx, plan) @@ -88,10 +89,30 @@ func (hp *horizonPlanning) planHorizon(ctx *plancontext.PlanningContext, plan lo // if we already did sorting, we don't need to do it again needsOrdering = needsOrdering && !hp.qp.CanPushDownSorting case canShortcut: - err = planSingleShardRoutePlan(hp.sel, rb) + err = planSingleRoutePlan(hp.sel, rb) + if err != nil { + return nil, err + } + case derivedTable: + pusher := func(ae *sqlparser.AliasedExpr) (int, error) { + offset, _, err := pushProjection(ctx, ae, sp.input, true, true, false) + return offset, err + } + needsVtGate, projections, colNames, err := hp.qp.NeedsProjecting(ctx, pusher) if err != nil { return nil, err } + if !needsVtGate { + break + } + + // there were some expressions we could not push down entirely, + // so replace the simpleProjection with a real projection + plan = &projection{ + source: sp.input, + columns: projections, + columnNames: colNames, + } default: err = pushProjections(ctx, plan, hp.qp.SelectExprs) if err != nil { @@ -142,9 +163,9 @@ func (hp *horizonPlanning) truncateColumnsIfNeeded(ctx *plancontext.PlanningCont return plan, nil } switch p := plan.(type) { - case *routeGen4: + case *route: p.eroute.SetTruncateColumnCount(hp.qp.GetColumnCount()) - case *joinGen4, *semiJoin, *hashJoin: + case *join, *semiJoin, *hashJoin: // since this is a join, we can safely add extra columns and not need to truncate them case *orderedAggregate: p.truncateColumnCount = hp.qp.GetColumnCount() @@ -243,9 +264,9 @@ func (hp *horizonPlanning) planAggrUsingOA( groupByKeys: make([]*engine.GroupByParams, 0, len(grouping)), } - var order []operators.OrderBy + var order []ops.OrderBy if hp.qp.CanPushDownSorting { - hp.qp.AlignGroupByAndOrderBy(ctx) + hp.qp.OldAlignGroupByAndOrderBy(ctx) // the grouping order might have changed, so we reload the grouping expressions grouping = hp.qp.GetGrouping() order = hp.qp.OrderExprs @@ -258,10 +279,12 @@ func (hp *horizonPlanning) planAggrUsingOA( // here we are building up the grouping keys for the OA, // but they are lacking the input offsets because we have yet to push the columns down for _, expr := range grouping { + typ, col, _ := ctx.SemTable.TypeForExpr(expr.Inner) oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ Expr: expr.Inner, FromGroupBy: true, - CollationID: ctx.SemTable.CollationForExpr(expr.Inner), + Type: typ, + CollationID: col, }) } @@ -273,7 +296,7 @@ func (hp *horizonPlanning) planAggrUsingOA( } } - aggregationExprs, err := hp.qp.AggregationExpressions(ctx) + aggregationExprs, _, err := hp.qp.AggregationExpressions(ctx, false) if err != nil { return nil, err } @@ -289,21 +312,20 @@ func (hp *horizonPlanning) planAggrUsingOA( grouping = append(grouping, distinctGroupBy...) // all the distinct grouping aggregates use the same expression, so it should be OK to just add it once order = append(order, distinctGroupBy[0].AsOrderBy()) - oa.preProcess = true + } + + if err = unsupportedAggregations(aggrs); err != nil { + return nil, err } newPlan, groupingOffsets, aggrParamOffsets, pushed, err := hp.pushAggregation(ctx, plan, grouping, aggrs, false) if err != nil { return nil, err } - if !pushed { - oa.preProcess = true - oa.aggrOnEngine = true - } plan = newPlan - _, isRoute := plan.(*routeGen4) + _, isRoute := plan.(*route) needsProj := !isRoute var aggPlan = plan var proj *projection @@ -337,14 +359,20 @@ func (hp *horizonPlanning) planAggrUsingOA( return nil, err } - oa.resultsBuilder = resultsBuilder{ - logicalPlanCommon: newBuilderCommon(aggPlan), - weightStrings: make(map[*resultColumn]int), - } + oa.resultsBuilder = newResultsBuilder(aggPlan, nil) return hp.planHaving(ctx, oa) } +func unsupportedAggregations(aggrs []operators.Aggr) error { + for _, aggr := range aggrs { + if aggr.OpCode == popcode.AggregateGroupConcat { + return vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Func))) + } + } + return nil +} + func passGroupingColumns(proj *projection, groupings []offsets, grouping []operators.GroupBy) (projGrpOffsets []offsets, err error) { for idx, grp := range groupings { origGrp := grouping[idx] @@ -374,7 +402,7 @@ func generateAggregateParams(aggrs []operators.Aggr, aggrParamOffsets [][]offset if proj != nil { var aggrExpr sqlparser.Expr for _, ofs := range paramOffset { - curr := &sqlparser.Offset{V: ofs.col} + curr := sqlparser.NewOffset(ofs.col, aggr.Func) if aggrExpr == nil { aggrExpr = curr } else { @@ -401,24 +429,21 @@ func generateAggregateParams(aggrs []operators.Aggr, aggrParamOffsets [][]offset offset = incomingOffset } - opcode := engine.AggregateSum + opcode := popcode.AggregateSum switch aggr.OpCode { - case engine.AggregateMin, engine.AggregateMax, engine.AggregateRandom: + case popcode.AggregateMin, popcode.AggregateMax, popcode.AggregateAnyValue: opcode = aggr.OpCode - case engine.AggregateCount, engine.AggregateCountStar, engine.AggregateCountDistinct, engine.AggregateSumDistinct: + case popcode.AggregateCount, popcode.AggregateCountStar, popcode.AggregateCountDistinct, popcode.AggregateSumDistinct: if !pushed { opcode = aggr.OpCode } } - aggrParams[idx] = &engine.AggregateParams{ - Opcode: opcode, - Col: offset, - Alias: aggr.Alias, - Expr: aggr.Original.Expr, - Original: aggr.Original, - OrigOpcode: aggr.OpCode, - } + aggrParam := engine.NewAggregateParam(opcode, offset, aggr.Alias) + aggrParam.Expr = aggr.Original.Expr + aggrParam.Original = aggr.Original + aggrParam.OrigOpcode = aggr.OpCode + aggrParams[idx] = aggrParam } return aggrParams, nil } @@ -448,17 +473,12 @@ func addColumnsToOA( o := groupings[count] count++ a := aggregationExprs[offset] - collID := ctx.SemTable.CollationForExpr(a.Func.GetArg()) - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: a.OpCode, - Col: o.col, - KeyCol: o.col, - WAssigned: o.wsCol >= 0, - WCol: o.wsCol, - Alias: a.Alias, - Original: a.Original, - CollationID: collID, - }) + aggr := engine.NewAggregateParam(a.OpCode, o.col, a.Alias) + aggr.KeyCol = o.col + aggr.WCol = o.wsCol + aggr.Original = a.Original + aggr.Type, aggr.CollationID, _ = ctx.SemTable.TypeForExpr(a.Func.GetArg()) + oa.aggregates = append(oa.aggregates, aggr) } lastOffset := distinctOffsets[len(distinctOffsets)-1] distinctIdx := 0 @@ -495,7 +515,8 @@ func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext, continue } - inner, innerWS, err := hp.qp.GetSimplifiedExpr(expr.Func.GetArg()) + inner := expr.Func.GetArg() + innerWS := hp.qp.GetSimplifiedExpr(inner) if err != nil { return nil, nil, nil, err } @@ -511,11 +532,9 @@ func (hp *horizonPlanning) handleDistinctAggr(ctx *plancontext.PlanningContext, return nil, nil, nil, err } } - distincts = append(distincts, operators.GroupBy{ - Inner: inner, - WeightStrExpr: innerWS, - InnerIndex: expr.Index, - }) + groupBy := operators.NewGroupBy(inner, innerWS, nil) + groupBy.InnerIndex = expr.Index + distincts = append(distincts, groupBy) offsets = append(offsets, i) } return @@ -554,53 +573,33 @@ func newOffset(col int) offsets { func (hp *horizonPlanning) createGroupingsForColumns(columns []*sqlparser.ColName) ([]operators.GroupBy, error) { var lhsGrouping []operators.GroupBy for _, lhsColumn := range columns { - expr, wsExpr, err := hp.qp.GetSimplifiedExpr(lhsColumn) - if err != nil { - return nil, err - } + wsExpr := hp.qp.GetSimplifiedExpr(lhsColumn) - lhsGrouping = append(lhsGrouping, operators.GroupBy{ - Inner: expr, - WeightStrExpr: wsExpr, - }) + lhsGrouping = append(lhsGrouping, operators.NewGroupBy(lhsColumn, wsExpr, nil)) } return lhsGrouping, nil } func hasUniqueVindex(semTable *semantics.SemTable, groupByExprs []operators.GroupBy) bool { for _, groupByExpr := range groupByExprs { - if exprHasUniqueVindex(semTable, groupByExpr.WeightStrExpr) { + if exprHasUniqueVindex(semTable, groupByExpr.SimplifiedExpr) { return true } } return false } -func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan logicalPlan) (logicalPlan, error) { +func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan logicalPlan) (logicalPlan, error) { switch plan := plan.(type) { - case *routeGen4: - newPlan, err := planOrderByForRoute(ctx, orderExprs, plan, hp.qp.HasStar) - if err != nil { - return nil, err - } - return newPlan, nil - case *joinGen4: - newPlan, err := hp.planOrderByForJoin(ctx, orderExprs, plan) - if err != nil { - return nil, err - } - - return newPlan, nil + case *route: + return planOrderByForRoute(ctx, orderExprs, plan, hp.qp.HasStar) + case *join: + return hp.planOrderByForJoin(ctx, orderExprs, plan) case *hashJoin: - newPlan, err := hp.planOrderByForHashJoin(ctx, orderExprs, plan) - if err != nil { - return nil, err - } - - return newPlan, nil + return hp.planOrderByForHashJoin(ctx, orderExprs, plan) case *orderedAggregate: // remove ORDER BY NULL from the list of order by expressions since we will be doing the ordering on vtgate level so NULL is not useful - var orderExprsWithoutNils []operators.OrderBy + var orderExprsWithoutNils []ops.OrderBy for _, expr := range orderExprs { if sqlparser.IsNull(expr.Inner.Expr) { continue @@ -610,7 +609,7 @@ func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderEx orderExprs = orderExprsWithoutNils for _, order := range orderExprs { - if sqlparser.ContainsAggregation(order.WeightStrExpr) { + if sqlparser.ContainsAggregation(order.SimplifiedExpr) { ms, err := createMemorySortPlanOnAggregation(ctx, plan, orderExprs) if err != nil { return nil, err @@ -650,7 +649,7 @@ func (hp *horizonPlanning) planOrderBy(ctx *plancontext.PlanningContext, orderEx return nil, vterrors.VT13001(fmt.Sprintf("ORDER BY in complex query %T", plan)) } -func isSpecialOrderBy(o operators.OrderBy) bool { +func isSpecialOrderBy(o ops.OrderBy) bool { if sqlparser.IsNull(o.Inner.Expr) { return true } @@ -658,7 +657,7 @@ func isSpecialOrderBy(o operators.OrderBy) bool { return isFunction && f.Name.Lowered() == "rand" } -func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *routeGen4, hasStar bool) (logicalPlan, error) { +func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *route, hasStar bool) (logicalPlan, error) { for _, order := range orderExprs { err := checkOrderExprCanBePlannedInScatter(ctx, plan, order, hasStar) if err != nil { @@ -670,18 +669,20 @@ func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []operator } var wsExpr sqlparser.Expr if ctx.SemTable.NeedsWeightString(order.Inner.Expr) { - wsExpr = order.WeightStrExpr + wsExpr = order.SimplifiedExpr } offset, weightStringOffset, err := wrapAndPushExpr(ctx, order.Inner.Expr, wsExpr, plan) if err != nil { return nil, err } + typ, col, _ := ctx.SemTable.TypeForExpr(order.Inner.Expr) plan.eroute.OrderBy = append(plan.eroute.OrderBy, engine.OrderByParams{ Col: offset, WeightStringCol: weightStringOffset, Desc: order.Inner.Direction == sqlparser.DescOrder, - CollationID: ctx.SemTable.CollationForExpr(order.Inner.Expr), + Type: typ, + CollationID: col, }) } return plan, nil @@ -689,7 +690,7 @@ func planOrderByForRoute(ctx *plancontext.PlanningContext, orderExprs []operator // checkOrderExprCanBePlannedInScatter verifies that the given order by expression can be planned. // It checks if the expression exists in the plan's select list when the query is a scatter. -func checkOrderExprCanBePlannedInScatter(ctx *plancontext.PlanningContext, plan *routeGen4, order operators.OrderBy, hasStar bool) error { +func checkOrderExprCanBePlannedInScatter(ctx *plancontext.PlanningContext, plan *route, order ops.OrderBy, hasStar bool) error { if !hasStar { return nil } @@ -729,9 +730,9 @@ func wrapAndPushExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr, weig return 0, 0, vterrors.VT13001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) } } - qt := ctx.SemTable.TypeFor(expr) + qt, _, found := ctx.SemTable.TypeForExpr(expr) wsNeeded := true - if qt != nil && sqltypes.IsNumber(*qt) { + if found && sqltypes.IsNumber(qt) { wsNeeded = false } @@ -750,7 +751,7 @@ func weightStringFor(expr sqlparser.Expr) sqlparser.Expr { return &sqlparser.WeightStringFuncExpr{Expr: expr} } -func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *hashJoin) (logicalPlan, error) { +func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *hashJoin) (logicalPlan, error) { if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) { rhs, err := hp.planOrderBy(ctx, orderExprs, plan.Right) if err != nil { @@ -774,7 +775,7 @@ func (hp *horizonPlanning) planOrderByForHashJoin(ctx *plancontext.PlanningConte return sortPlan, nil } -func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, orderExprs []operators.OrderBy, plan *joinGen4) (logicalPlan, error) { +func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, orderExprs []ops.OrderBy, plan *join) (logicalPlan, error) { if len(orderExprs) == 1 && isSpecialOrderBy(orderExprs[0]) { lhs, err := hp.planOrderBy(ctx, orderExprs, plan.Left) if err != nil { @@ -805,15 +806,11 @@ func (hp *horizonPlanning) planOrderByForJoin(ctx *plancontext.PlanningContext, return sortPlan, nil } -func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *orderedAggregate, orderExprs []operators.OrderBy) (logicalPlan, error) { +func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *orderedAggregate, orderExprs []ops.OrderBy) (logicalPlan, error) { primitive := &engine.MemorySort{} ms := &memorySort{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(plan), - weightStrings: make(map[*resultColumn]int), - truncater: primitive, - }, - eMemorySort: primitive, + resultsBuilder: newResultsBuilder(plan, primitive), + eMemorySort: primitive, } for _, order := range orderExprs { @@ -822,27 +819,28 @@ func createMemorySortPlanOnAggregation(ctx *plancontext.PlanningContext, plan *o return nil, vterrors.VT13001(fmt.Sprintf("expected to find ORDER BY expression (%s) in orderedAggregate", sqlparser.String(order.Inner))) } - collationID := ctx.SemTable.CollationForExpr(order.WeightStrExpr) + typ, collationID, _ := ctx.SemTable.TypeForExpr(order.SimplifiedExpr) ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, engine.OrderByParams{ Col: offset, WeightStringCol: woffset, Desc: order.Inner.Direction == sqlparser.DescOrder, StarColFixedIndex: offset, + Type: typ, CollationID: collationID, }) } return ms, nil } -func findExprInOrderedAggr(ctx *plancontext.PlanningContext, plan *orderedAggregate, order operators.OrderBy) (keyCol int, weightStringCol int, found bool) { +func findExprInOrderedAggr(ctx *plancontext.PlanningContext, plan *orderedAggregate, order ops.OrderBy) (keyCol int, weightStringCol int, found bool) { for _, key := range plan.groupByKeys { - if ctx.SemTable.EqualsExpr(order.WeightStrExpr, key.Expr) || + if ctx.SemTable.EqualsExpr(order.SimplifiedExpr, key.Expr) || ctx.SemTable.EqualsExpr(order.Inner.Expr, key.Expr) { return key.KeyCol, key.WeightStringCol, true } } for _, aggregate := range plan.aggregates { - if ctx.SemTable.EqualsExpr(order.WeightStrExpr, aggregate.Original.Expr) || + if ctx.SemTable.EqualsExpr(order.SimplifiedExpr, aggregate.Original.Expr) || ctx.SemTable.EqualsExpr(order.Inner.Expr, aggregate.Original.Expr) { return aggregate.Col, -1, true } @@ -850,19 +848,15 @@ func findExprInOrderedAggr(ctx *plancontext.PlanningContext, plan *orderedAggreg return 0, 0, false } -func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext, plan logicalPlan, orderExprs []operators.OrderBy, useWeightStr bool) (logicalPlan, error) { +func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext, plan logicalPlan, orderExprs []ops.OrderBy, useWeightStr bool) (logicalPlan, error) { primitive := &engine.MemorySort{} ms := &memorySort{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(plan), - weightStrings: make(map[*resultColumn]int), - truncater: primitive, - }, - eMemorySort: primitive, + resultsBuilder: newResultsBuilder(plan, primitive), + eMemorySort: primitive, } for _, order := range orderExprs { - wsExpr := order.WeightStrExpr + wsExpr := order.SimplifiedExpr if !useWeightStr { wsExpr = nil } @@ -870,18 +864,20 @@ func (hp *horizonPlanning) createMemorySortPlan(ctx *plancontext.PlanningContext if err != nil { return nil, err } + typ, col, _ := ctx.SemTable.TypeForExpr(order.Inner.Expr) ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, engine.OrderByParams{ Col: offset, WeightStringCol: weightStringOffset, Desc: order.Inner.Direction == sqlparser.DescOrder, StarColFixedIndex: offset, - CollationID: ctx.SemTable.CollationForExpr(order.Inner.Expr), + Type: typ, + CollationID: col, }) } return ms, nil } -func orderExprsDependsOnTableSet(orderExprs []operators.OrderBy, semTable *semantics.SemTable, ts semantics.TableSet) bool { +func orderExprsDependsOnTableSet(orderExprs []ops.OrderBy, semTable *semantics.SemTable, ts semantics.TableSet) bool { for _, expr := range orderExprs { exprDependencies := semTable.RecursiveDeps(expr.Inner.Expr) if !exprDependencies.IsSolvedBy(ts) { @@ -896,7 +892,7 @@ func (hp *horizonPlanning) planDistinct(ctx *plancontext.PlanningContext, plan l return plan, nil } switch p := plan.(type) { - case *routeGen4: + case *route: // we always make the underlying query distinct, // and then we might also add a distinct operator on top if it is needed p.Select.MakeDistinct() @@ -905,7 +901,7 @@ func (hp *horizonPlanning) planDistinct(ctx *plancontext.PlanningContext, plan l } return hp.addDistinct(ctx, plan) - case *joinGen4, *pulloutSubquery: + case *join, *pulloutSubquery: return hp.addDistinct(ctx, plan) case *orderedAggregate: return hp.planDistinctOA(ctx.SemTable, p) @@ -916,10 +912,7 @@ func (hp *horizonPlanning) planDistinct(ctx *plancontext.PlanningContext, plan l func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan *orderedAggregate) (logicalPlan, error) { oa := &orderedAggregate{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(currPlan), - weightStrings: make(map[*resultColumn]int), - }, + resultsBuilder: newResultsBuilder(currPlan, nil), } for _, sExpr := range hp.qp.SelectExprs { expr, err := sExpr.GetExpr() @@ -940,7 +933,8 @@ func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan for _, aggrParam := range currPlan.aggregates { if semTable.EqualsExpr(expr, aggrParam.Expr) { found = true - oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{KeyCol: aggrParam.Col, WeightStringCol: -1, CollationID: semTable.CollationForExpr(expr)}) + typ, col, _ := semTable.TypeForExpr(expr) + oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{KeyCol: aggrParam.Col, WeightStringCol: -1, Type: typ, CollationID: col}) break } } @@ -952,7 +946,7 @@ func (hp *horizonPlanning) planDistinctOA(semTable *semantics.SemTable, currPlan } func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan logicalPlan) (logicalPlan, error) { - var orderExprs []operators.OrderBy + var orderExprs []ops.OrderBy var groupByKeys []*engine.GroupByParams for index, sExpr := range hp.qp.SelectExprs { aliasExpr, err := sExpr.GetAliasedExpr() @@ -972,7 +966,8 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo inner = sqlparser.NewColName(aliasExpr.As.String()) ctx.SemTable.CopyDependencies(aliasExpr.Expr, inner) } - grpParam := &engine.GroupByParams{KeyCol: index, WeightStringCol: -1, CollationID: ctx.SemTable.CollationForExpr(inner), Expr: inner} + typ, col, _ := ctx.SemTable.TypeForExpr(inner) + grpParam := &engine.GroupByParams{KeyCol: index, WeightStringCol: -1, Type: typ, CollationID: col, Expr: inner} _, wOffset, err := wrapAndPushExpr(ctx, aliasExpr.Expr, aliasExpr.Expr, plan) if err != nil { return nil, err @@ -980,9 +975,9 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo grpParam.WeightStringCol = wOffset groupByKeys = append(groupByKeys, grpParam) - orderExprs = append(orderExprs, operators.OrderBy{ - Inner: &sqlparser.Order{Expr: inner}, - WeightStrExpr: aliasExpr.Expr}, + orderExprs = append(orderExprs, ops.OrderBy{ + Inner: &sqlparser.Order{Expr: inner}, + SimplifiedExpr: aliasExpr.Expr}, ) } innerPlan, err := hp.planOrderBy(ctx, orderExprs, plan) @@ -990,11 +985,8 @@ func (hp *horizonPlanning) addDistinct(ctx *plancontext.PlanningContext, plan lo return nil, err } oa := &orderedAggregate{ - resultsBuilder: resultsBuilder{ - logicalPlanCommon: newBuilderCommon(innerPlan), - weightStrings: make(map[*resultColumn]int), - }, - groupByKeys: groupByKeys, + resultsBuilder: newResultsBuilder(innerPlan, nil), + groupByKeys: groupByKeys, } return oa, nil } @@ -1048,7 +1040,7 @@ func (hp *horizonPlanning) planHaving(ctx *plancontext.PlanningContext, plan log func pushHaving(ctx *plancontext.PlanningContext, expr sqlparser.Expr, plan logicalPlan) (logicalPlan, error) { switch node := plan.(type) { - case *routeGen4: + case *route: sel := sqlparser.GetFirstSelect(node.Select) sel.AddHaving(expr) return plan, nil @@ -1064,7 +1056,7 @@ func pushHaving(ctx *plancontext.PlanningContext, expr sqlparser.Expr, plan logi func isJoin(plan logicalPlan) bool { switch plan.(type) { - case *joinGen4, *hashJoin: + case *join, *hashJoin: return true default: return false @@ -1097,7 +1089,7 @@ func exprHasVindex(semTable *semantics.SemTable, expr sqlparser.Expr, hasToBeUni return false } -func planSingleShardRoutePlan(sel sqlparser.SelectStatement, rb *routeGen4) error { +func planSingleRoutePlan(sel sqlparser.SelectStatement, rb *route) error { err := stripDownQuery(sel, rb.Select) if err != nil { return err @@ -1160,14 +1152,14 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error { func planGroupByGen4(ctx *plancontext.PlanningContext, groupExpr operators.GroupBy, plan logicalPlan, wsAdded bool) error { switch node := plan.(type) { - case *routeGen4: + case *route: sel := node.Select.(*sqlparser.Select) sel.AddGroupBy(groupExpr.Inner) // If a weight_string function is added to the select list, // then we need to add that to the group by clause otherwise the query will fail on mysql with full_group_by error // as the weight_string function might not be functionally dependent on the group by. if wsAdded { - sel.AddGroupBy(weightStringFor(groupExpr.WeightStrExpr)) + sel.AddGroupBy(weightStringFor(groupExpr.SimplifiedExpr)) } return nil case *pulloutSubquery: diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index d74d8fcebcc..55ef9148b1a 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -17,442 +17,160 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/semantics" - + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// buildInsertPlan builds the route for an INSERT statement. -func buildInsertPlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - pb := newStmtAwarePrimitiveBuilder(vschema, newJointab(reservedVars), stmt) - ins := stmt.(*sqlparser.Insert) - exprs := sqlparser.TableExprs{&sqlparser.AliasedTableExpr{Expr: ins.Table}} - rb, err := pb.processDMLTable(exprs, reservedVars, nil) +func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStmt *sqlparser.Insert, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + ctx, err := plancontext.CreatePlanningContext(insStmt, reservedVars, vschema, version) if err != nil { return nil, err } - // The table might have been routed to a different one. - ins.Table = exprs[0].(*sqlparser.AliasedTableExpr).Expr.(sqlparser.TableName) - if rb.eroute.TargetDestination != nil { - return nil, vterrors.VT12001("INSERT with a target destination") - } - - if len(pb.st.tables) != 1 { - // Unreachable. - return nil, vterrors.VT12001("multi-table INSERT statement in a sharded keyspace") - } - var vschemaTable *vindexes.Table - for _, tval := range pb.st.tables { - // There is only one table. - vschemaTable = tval.vschemaTable - } - if !rb.eroute.Keyspace.Sharded { - return buildInsertUnshardedPlan(ins, vschemaTable, reservedVars, vschema) - } - if ins.Action == sqlparser.ReplaceAct { - return nil, vterrors.VT12001("REPLACE INTO with sharded keyspace") - } - return buildInsertShardedPlan(ins, vschemaTable, reservedVars, vschema) -} - -func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - eins := engine.NewSimpleInsert( - engine.InsertUnsharded, - table, - table.Keyspace, - ) - applyCommentDirectives(ins, eins) - - var rows sqlparser.Values - tc := &tableCollector{} - tc.addVindexTable(table) - switch insertValues := ins.Rows.(type) { - case *sqlparser.Select, *sqlparser.Union: - if eins.Table.AutoIncrement != nil { - return nil, vterrors.VT12001("auto-increment and SELECT in INSERT") - } - plan, err := subquerySelectPlan(ins, vschema, reservedVars, false) - if err != nil { - return nil, err - } - tc.addAllTables(plan.tables) - if route, ok := plan.primitive.(*engine.Route); ok && !route.Keyspace.Sharded && table.Keyspace.Name == route.Keyspace.Name { - eins.Query = generateQuery(ins) - } else { - eins.Input = plan.primitive - generateInsertSelectQuery(ins, eins) - } - return newPlanResult(eins, tc.getTables()...), nil - case sqlparser.Values: - rows = insertValues - default: - return nil, vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", insertValues)) - } - if eins.Table.AutoIncrement == nil { - eins.Query = generateQuery(ins) - } else { - // Table has auto-inc and has a VALUES clause. - // If the column list is nil then add all the columns - // If the column list is empty then add only the auto-inc column and this happens on calling modifyForAutoinc - if ins.Columns == nil { - if table.ColumnListAuthoritative { - populateInsertColumnlist(ins, table) - } else { - return nil, vterrors.VT13001("column list required for tables with auto-inc columns") - } - } - for _, row := range rows { - if len(ins.Columns) != len(row) { - return nil, vterrors.VT13001("column list does not match values") - } - } - if err := modifyForAutoinc(ins, eins); err != nil { - return nil, err - } - eins.Query = generateQuery(ins) - } - - return newPlanResult(eins, tc.getTables()...), nil -} - -func buildInsertShardedPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - eins := &engine.Insert{ - Table: table, - Keyspace: table.Keyspace, - } - tc := &tableCollector{} - tc.addVindexTable(table) - eins.Ignore = bool(ins.Ignore) - if ins.OnDup != nil { - if isVindexChanging(sqlparser.UpdateExprs(ins.OnDup), eins.Table.ColumnVindexes) { - return nil, vterrors.VT12001("DML cannot update vindex column") - } - eins.Ignore = true - } - if ins.Columns == nil && table.ColumnListAuthoritative { - populateInsertColumnlist(ins, table) - } - - applyCommentDirectives(ins, eins) - eins.ColVindexes = getColVindexes(eins.Table.ColumnVindexes) - // Till here common plan building done for insert by providing values or select query. - - rows, isRowValues := ins.Rows.(sqlparser.Values) - if !isRowValues { - return buildInsertSelectPlan(ins, table, reservedVars, vschema, eins) - } - eins.Opcode = engine.InsertSharded - - for _, value := range rows { - if len(ins.Columns) != len(value) { - return nil, vterrors.VT13001("column list does not match values") - } - } - - if err := modifyForAutoinc(ins, eins); err != nil { + err = rewriteRoutedTables(insStmt, vschema) + if err != nil { return nil, err } + // remove any alias added from routing table. + // insert query does not support table alias. + insStmt.Table.As = sqlparser.NewIdentifierCS("") - // Fill out the 3-d Values structure. Please see documentation of Insert.Values for details. - colVindexes := eins.ColVindexes - routeValues := make([][][]evalengine.Expr, len(colVindexes)) - for vIdx, colVindex := range colVindexes { - routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) - for colIdx, col := range colVindex.Columns { - routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) - colNum := findOrAddColumn(ins, col) - for rowNum, row := range rows { - innerpv, err := evalengine.Translate(row[colNum], semantics.EmptySemTable()) - if err != nil { - return nil, err - } - routeValues[vIdx][colIdx][rowNum] = innerpv - } - } - } - for _, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - colNum := findOrAddColumn(ins, col) - for rowNum, row := range rows { - name := engine.InsertVarName(col, rowNum) - row[colNum] = sqlparser.NewArgument(name) - } - } - } - eins.VindexValues = routeValues - eins.Query = generateQuery(ins) - generateInsertShardedQuery(ins, eins, rows) - return newPlanResult(eins, tc.getTables()...), nil -} - -// buildInsertSelectPlan builds an insert using select plan. -func buildInsertSelectPlan(ins *sqlparser.Insert, table *vindexes.Table, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, eins *engine.Insert) (*planResult, error) { - eins.Opcode = engine.InsertSelect - tc := &tableCollector{} - tc.addVindexTable(table) - - // check if column list is provided if not, then vschema should be able to provide the column list. - if len(ins.Columns) == 0 { - if !table.ColumnListAuthoritative { - return nil, vterrors.VT09004() + // Check single unsharded. Even if the table is for single unsharded but sequence table is used. + // We cannot shortcut here as sequence column needs additional planning. + ks, tables := ctx.SemTable.SingleUnshardedKeyspace() + fkPlanNeeded := false + if ks != nil { + noAutoInc := tables[0].AutoIncrement == nil + fkPlanNeeded = fkManagementRequiredForInsert(ctx, tables[0], sqlparser.UpdateExprs(insStmt.OnDup), insStmt.Action == sqlparser.ReplaceAct) + if noAutoInc && !fkPlanNeeded { + plan := insertUnshardedShortcut(insStmt, ks, tables) + plan = pushCommentDirectivesOnPlan(plan, insStmt) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil } - populateInsertColumnlist(ins, table) } - // select plan will be taken as input to insert rows into the table. - plan, err := subquerySelectPlan(ins, vschema, reservedVars, true) + tblInfo, err := ctx.SemTable.TableInfoFor(ctx.SemTable.TableSetFor(insStmt.Table)) if err != nil { return nil, err } - tc.addAllTables(plan.tables) - eins.Input = plan.primitive - // When the table you are steaming data from and table you are inserting from are same. - // Then due to locking of the index range on the table we might not be able to insert into the table. - // Therefore, instead of streaming, this flag will ensure the records are first read and then inserted. - if strings.Contains(plan.primitive.GetTableName(), table.Name.String()) { - eins.ForceNonStreaming = true + if err = errOutIfPlanCannotBeConstructed(ctx, tblInfo.GetVindexTable(), insStmt, fkPlanNeeded); err != nil { + return nil, err } - // auto-increment column is added explicitly if not provided. - if err := modifyForAutoinc(ins, eins); err != nil { + err = queryRewrite(ctx.SemTable, reservedVars, insStmt) + if err != nil { return nil, err } - // Fill out the 3-d Values structure - eins.VindexValueOffset, err = extractColVindexOffsets(ins, eins.ColVindexes) + op, err := operators.PlanQuery(ctx, insStmt) if err != nil { return nil, err } - generateInsertSelectQuery(ins, eins) - return newPlanResult(eins, tc.getTables()...), nil -} - -func subquerySelectPlan(ins *sqlparser.Insert, vschema plancontext.VSchema, reservedVars *sqlparser.ReservedVars, sharded bool) (*planResult, error) { - selectStmt, queryPlanner, err := getStatementAndPlanner(ins, vschema) + plan, err := transformToLogicalPlan(ctx, op) if err != nil { return nil, err } - // validate the columns to match on insert and select - // for sharded insert table only - if sharded { - if err := checkColumnCounts(ins, selectStmt); err != nil { - return nil, err - } - } + plan = pushCommentDirectivesOnPlan(plan, insStmt) - // Override the locking with `for update` to lock the rows for inserting the data. - selectStmt.SetLock(sqlparser.ForUpdateLock) + setLockOnAllSelect(plan) - return queryPlanner(selectStmt, reservedVars, vschema) -} - -func getStatementAndPlanner( - ins *sqlparser.Insert, - vschema plancontext.VSchema, -) (selectStmt sqlparser.SelectStatement, configuredPlanner stmtPlanner, err error) { - switch stmt := ins.Rows.(type) { - case *sqlparser.Select: - configuredPlanner, err = getConfiguredPlanner(vschema, buildSelectPlan, stmt, "") - selectStmt = stmt - case *sqlparser.Union: - configuredPlanner, err = getConfiguredPlanner(vschema, buildUnionPlan, stmt, "") - selectStmt = stmt - default: - err = vterrors.VT12001(fmt.Sprintf("INSERT plan with %T", ins.Rows)) - } - - if err != nil { - return nil, nil, err + if err := plan.Wireup(ctx); err != nil { + return nil, err } - return selectStmt, configuredPlanner, nil + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil } -func checkColumnCounts(ins *sqlparser.Insert, selectStmt sqlparser.SelectStatement) error { - if len(ins.Columns) < selectStmt.GetColumnCount() { - return vterrors.VT03006() +func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vindexes.Table, insStmt *sqlparser.Insert, fkPlanNeeded bool) error { + if vTbl.Keyspace.Sharded && ctx.SemTable.NotUnshardedErr != nil { + return ctx.SemTable.NotUnshardedErr } - if len(ins.Columns) > selectStmt.GetColumnCount() { - sel := sqlparser.GetFirstSelect(selectStmt) - var hasStarExpr bool - for _, sExpr := range sel.SelectExprs { - if _, hasStarExpr = sExpr.(*sqlparser.StarExpr); hasStarExpr { - break - } - } - if !hasStarExpr { - return vterrors.VT03006() - } + if insStmt.Action != sqlparser.ReplaceAct { + return nil + } + if fkPlanNeeded { + return vterrors.VT12001("REPLACE INTO with foreign keys") } return nil } -func applyCommentDirectives(ins *sqlparser.Insert, eins *engine.Insert) { - directives := ins.Comments.Directives() - if directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) { - eins.MultiShardAutocommit = true +// TODO: Handle all this in semantic analysis. +func fkManagementRequiredForInsert(ctx *plancontext.PlanningContext, vTbl *vindexes.Table, updateExprs sqlparser.UpdateExprs, replace bool) bool { + ksMode, err := ctx.VSchema.ForeignKeyMode(vTbl.Keyspace.Name) + if err != nil || ksMode != vschemapb.Keyspace_FK_MANAGED { + return false } - eins.QueryTimeout = queryTimeout(directives) -} -func getColVindexes(allColVindexes []*vindexes.ColumnVindex) (colVindexes []*vindexes.ColumnVindex) { - for _, colVindex := range allColVindexes { - if colVindex.IsPartialVindex() { - continue - } - colVindexes = append(colVindexes, colVindex) + if len(vTbl.ParentFKsNeedsHandling(ctx.VerifyAllFKs, "")) > 0 { + return true } - return -} -func extractColVindexOffsets(ins *sqlparser.Insert, colVindexes []*vindexes.ColumnVindex) ([][]int, error) { - vv := make([][]int, len(colVindexes)) - for idx, colVindex := range colVindexes { - for _, col := range colVindex.Columns { - colNum := findColumn(ins, col) - // sharding column values should be provided in the insert. - if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) - } - vv[idx] = append(vv[idx], colNum) - } + childFks := vTbl.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) > 0 && replace { + return true } - return vv, nil -} -// findColumn returns the column index where it is placed on the insert column list. -// Otherwise, return -1 when not found. -func findColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { - for i, column := range ins.Columns { - if col.Equal(column) { - return i - } - } - return -1 + // Check if any column in the parent table is being updated which has a child foreign key. + return columnModified(updateExprs, func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + return nil, childFks + }) } -func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) { - cols := make(sqlparser.Columns, 0, len(table.Columns)) - for _, c := range table.Columns { - cols = append(cols, c.Name) - } - ins.Columns = cols +func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + eIns := &engine.Insert{} + eIns.Keyspace = ks + eIns.TableName = tables[0].Name.String() + eIns.Opcode = engine.InsertUnsharded + eIns.Query = generateQuery(stmt) + return &insert{eInsert: eIns} } -func generateInsertShardedQuery(node *sqlparser.Insert, eins *engine.Insert, valueTuples sqlparser.Values) { - prefixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - midBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - suffixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - eins.Mid = make([]string, len(valueTuples)) - prefixBuf.Myprintf("insert %v%sinto %v%v values ", - node.Comments, node.Ignore.ToString(), - node.Table, node.Columns) - eins.Prefix = prefixBuf.String() - for rowNum, val := range valueTuples { - midBuf.Myprintf("%v", val) - eins.Mid[rowNum] = midBuf.String() - midBuf.Reset() - } - suffixBuf.Myprintf("%v", node.OnDup) - eins.Suffix = suffixBuf.String() +type insert struct { + eInsert *engine.Insert + source logicalPlan } -func generateInsertSelectQuery(node *sqlparser.Insert, eins *engine.Insert) { - prefixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - suffixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - prefixBuf.Myprintf("insert %v%sinto %v%v ", - node.Comments, node.Ignore.ToString(), - node.Table, node.Columns) - eins.Prefix = prefixBuf.String() - suffixBuf.Myprintf("%v", node.OnDup) - eins.Suffix = suffixBuf.String() -} +var _ logicalPlan = (*insert)(nil) -// modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. -// For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ins *sqlparser.Insert, eins *engine.Insert) error { - if eins.Table.AutoIncrement == nil { +func (i *insert) Wireup(ctx *plancontext.PlanningContext) error { + if i.source == nil { return nil } - colNum := findOrAddColumn(ins, eins.Table.AutoIncrement.Column) - eins.Generate = &engine.Generate{ - Keyspace: eins.Table.AutoIncrement.Sequence.Keyspace, - Query: fmt.Sprintf("select next :n values from %s", sqlparser.String(eins.Table.AutoIncrement.Sequence.Name)), + return i.source.Wireup(ctx) +} + +func (i *insert) Primitive() engine.Primitive { + if i.source != nil { + i.eInsert.Input = i.source.Primitive() } - switch rows := ins.Rows.(type) { - case sqlparser.SelectStatement: - eins.Generate.Offset = colNum - return nil - case sqlparser.Values: - autoIncValues := make([]evalengine.Expr, 0, len(rows)) - for rowNum, row := range rows { - // Support the DEFAULT keyword by treating it as null - if _, ok := row[colNum].(*sqlparser.Default); ok { - row[colNum] = &sqlparser.NullVal{} - } + return i.eInsert +} - pv, err := evalengine.Translate(row[colNum], semantics.EmptySemTable()) - if err != nil { - return err - } - autoIncValues = append(autoIncValues, pv) - row[colNum] = sqlparser.NewArgument(engine.SeqVarName + strconv.Itoa(rowNum)) - } - eins.Generate.Values = evalengine.NewTupleExpr(autoIncValues...) +func (i *insert) Inputs() []logicalPlan { + if i.source == nil { return nil } - return vterrors.VT13001(fmt.Sprintf("unexpected construct in INSERT: %T", ins.Rows)) + return []logicalPlan{i.source} } -// findOrAddColumn finds the position of a column in the insert. If it's -// absent it appends it to the with NULL values and returns that position. -func findOrAddColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { - colNum := findColumn(ins, col) - if colNum >= 0 { - return colNum - } - colOffset := len(ins.Columns) - ins.Columns = append(ins.Columns, col) - if rows, ok := ins.Rows.(sqlparser.Values); ok { - for i := range rows { - rows[i] = append(rows[i], &sqlparser.NullVal{}) - } - } - return colOffset +func (i *insert) Rewrite(inputs ...logicalPlan) error { + panic("does not expect insert to get rewrite call") } -// isVindexChanging returns true if any of the update -// expressions modify a vindex column. -func isVindexChanging(setClauses sqlparser.UpdateExprs, colVindexes []*vindexes.ColumnVindex) bool { - for _, assignment := range setClauses { - for _, vcol := range colVindexes { - for _, col := range vcol.Columns { - if col.Equal(assignment.Name.Name) { - valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) - if !isValuesFuncExpr { - return true - } - // update on duplicate key is changing the vindex column, not supported. - if !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return true - } - } - } - } - } - return false +func (i *insert) ContainsTables() semantics.TableSet { + panic("does not expect insert to get contains tables call") +} + +func (i *insert) OutputColumns() []sqlparser.SelectExpr { + panic("does not expect insert to get output columns call") } diff --git a/go/vt/vtgate/planbuilder/join.go b/go/vt/vtgate/planbuilder/join.go index 0fc9b5f2ce3..f3929f9a8fd 100644 --- a/go/vt/vtgate/planbuilder/join.go +++ b/go/vt/vtgate/planbuilder/join.go @@ -19,230 +19,78 @@ package planbuilder import ( "fmt" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) var _ logicalPlan = (*join)(nil) // join is used to build a Join primitive. -// It's used to build a normal join or a left join -// operation. +// It's used to build an inner join and only used by the Gen4 planner type join struct { - v3Plan - order int - resultColumns []*resultColumn - weightStrings map[*resultColumn]int - - // leftOrder stores the order number of the left node. This is - // used for a b-tree style traversal towards the target route. - // Let us assume the following execution tree: - // J9 - // / \ - // / \ - // J3 J8 - // / \ / \ - // R1 R2 J6 R7 - // / \ - // R4 R5 - // - // In the above trees, the suffix numbers indicate the - // execution order. The leftOrder for the joins will then - // be as follows: - // J3: 1 - // J6: 4 - // J8: 6 - // J9: 3 - // - // The route to R4 would be: - // Go right from J9->J8 because Left(J9)==3, which is <4. - // Go left from J8->J6 because Left(J8)==6, which is >=4. - // Go left from J6->R4 because Left(J6)==4, the destination. - // Look for 'isOnLeft' to see how these numbers are used. - leftOrder int - // Left and Right are the nodes for the join. Left, Right logicalPlan - ejoin *engine.Join -} - -// newJoin makes a new join using the two planBuilder. ajoin can be nil -// if the join is on a ',' operator. lpb will contain the resulting join. -// rpb will be discarded. -func newJoin(lpb, rpb *primitiveBuilder, ajoin *sqlparser.JoinTableExpr, reservedVars *sqlparser.ReservedVars) error { - // This function converts ON clauses to WHERE clauses. The WHERE clause - // scope can see all tables, whereas the ON clause can only see the - // participants of the JOIN. However, since the ON clause doesn't allow - // external references, and the FROM clause doesn't allow duplicates, - // it's safe to perform this conversion and still expect the same behavior. - - opcode := engine.InnerJoin - if ajoin != nil { - switch { - case ajoin.Join == sqlparser.LeftJoinType: - opcode = engine.LeftJoin - - // For left joins, we have to push the ON clause into the RHS. - // We do this before creating the join primitive. - // However, variables of LHS need to be visible. To allow this, - // we mark the LHS symtab as outer scope to the RHS, just like - // a subquery. This make the RHS treat the LHS symbols as external. - // This will prevent constructs from escaping out of the rpb scope. - // At this point, the LHS symtab also contains symbols of the RHS. - // But the RHS will hide those, as intended. - rpb.st.Outer = lpb.st - if err := rpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars); err != nil { - return err - } - case ajoin.Condition.Using != nil: - return vterrors.VT12001("JOIN with USING(column_list) clause for complex queries") - } - } - lpb.plan = &join{ - weightStrings: make(map[*resultColumn]int), - Left: lpb.plan, - Right: rpb.plan, - ejoin: &engine.Join{ - Opcode: opcode, - Vars: make(map[string]int), - }, - } - lpb.plan.Reorder(0) - if ajoin == nil || opcode == engine.LeftJoin { - return nil - } - return lpb.pushFilter(ajoin.Condition.On, sqlparser.WhereStr, reservedVars) -} - -// Order implements the logicalPlan interface -func (jb *join) Order() int { - return jb.order -} + // The Opcode tells us if this is an inner or outer join + Opcode engine.JoinOpcode -// Reorder implements the logicalPlan interface -func (jb *join) Reorder(order int) { - jb.Left.Reorder(order) - jb.leftOrder = jb.Left.Order() - jb.Right.Reorder(jb.leftOrder) - jb.order = jb.Right.Order() + 1 -} + // These are the columns that will be produced by this plan. + // Negative offsets come from the LHS, and positive from the RHS + Cols []int -// Primitive implements the logicalPlan interface -func (jb *join) Primitive() engine.Primitive { - jb.ejoin.Left = jb.Left.Primitive() - jb.ejoin.Right = jb.Right.Primitive() - return jb.ejoin -} + // Vars are the columns that will be sent from the LHS to the RHS + // the number is the offset on the LHS result, and the string is the bind variable name used in the RHS + Vars map[string]int -// ResultColumns implements the logicalPlan interface -func (jb *join) ResultColumns() []*resultColumn { - return jb.resultColumns + // LHSColumns are the columns from the LHS used for the join. + // These are the same columns pushed on the LHS that are now used in the Vars field + LHSColumns []*sqlparser.ColName } -// Wireup implements the logicalPlan interface -func (jb *join) Wireup(plan logicalPlan, jt *jointab) error { - err := jb.Right.Wireup(plan, jt) +// WireupGen4 implements the logicalPlan interface +func (j *join) Wireup(ctx *plancontext.PlanningContext) error { + err := j.Left.Wireup(ctx) if err != nil { return err } - return jb.Left.Wireup(plan, jt) -} - -// SupplyVar implements the logicalPlan interface -func (jb *join) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - if !jb.isOnLeft(from) { - jb.Right.SupplyVar(from, to, col, varname) - return - } - if jb.isOnLeft(to) { - jb.Left.SupplyVar(from, to, col, varname) - return - } - if _, ok := jb.ejoin.Vars[varname]; ok { - // Looks like somebody else already requested this. - return - } - c := col.Metadata.(*column) - for i, rc := range jb.resultColumns { - if jb.ejoin.Cols[i] > 0 { - continue - } - if rc.column == c { - jb.ejoin.Vars[varname] = -jb.ejoin.Cols[i] - 1 - return - } - } - _, jb.ejoin.Vars[varname] = jb.Left.SupplyCol(col) + return j.Right.Wireup(ctx) } -// SupplyCol implements the logicalPlan interface -func (jb *join) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range jb.resultColumns { - if rc.column == c { - return rc, i - } - } - - routeNumber := c.Origin().Order() - var sourceCol int - if jb.isOnLeft(routeNumber) { - rc, sourceCol = jb.Left.SupplyCol(col) - jb.ejoin.Cols = append(jb.ejoin.Cols, -sourceCol-1) - } else { - rc, sourceCol = jb.Right.SupplyCol(col) - jb.ejoin.Cols = append(jb.ejoin.Cols, sourceCol+1) +// Primitive implements the logicalPlan interface +func (j *join) Primitive() engine.Primitive { + return &engine.Join{ + Left: j.Left.Primitive(), + Right: j.Right.Primitive(), + Cols: j.Cols, + Vars: j.Vars, + Opcode: j.Opcode, } - jb.resultColumns = append(jb.resultColumns, rc) - return rc, len(jb.ejoin.Cols) - 1 } -// SupplyWeightString implements the logicalPlan interface -func (jb *join) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := jb.resultColumns[colNumber] - if weightcolNumber, ok := jb.weightStrings[rc]; ok { - return weightcolNumber, nil - } - routeNumber := rc.column.Origin().Order() - if jb.isOnLeft(routeNumber) { - sourceCol, err := jb.Left.SupplyWeightString(-jb.ejoin.Cols[colNumber]-1, alsoAddToGroupBy) - if err != nil { - return 0, err - } - jb.ejoin.Cols = append(jb.ejoin.Cols, -sourceCol-1) - } else { - sourceCol, err := jb.Right.SupplyWeightString(jb.ejoin.Cols[colNumber]-1, alsoAddToGroupBy) - if err != nil { - return 0, err - } - jb.ejoin.Cols = append(jb.ejoin.Cols, sourceCol+1) - } - jb.resultColumns = append(jb.resultColumns, rc) - jb.weightStrings[rc] = len(jb.ejoin.Cols) - 1 - return len(jb.ejoin.Cols) - 1, nil +// Inputs implements the logicalPlan interface +func (j *join) Inputs() []logicalPlan { + return []logicalPlan{j.Left, j.Right} } // Rewrite implements the logicalPlan interface -func (jb *join) Rewrite(inputs ...logicalPlan) error { +func (j *join) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 2 { - return vterrors.VT13001(fmt.Sprintf("join: wrong number of inputs, got: %d, expect: 2", len(inputs))) + return vterrors.VT13001(fmt.Sprintf("wrong number of children in join rewrite, got: %d, expect: 2", len(inputs))) } - jb.Left = inputs[0] - jb.Right = inputs[1] + j.Left = inputs[0] + j.Right = inputs[1] return nil } -// Inputs implements the logicalPlan interface -func (jb *join) Inputs() []logicalPlan { - return []logicalPlan{jb.Left, jb.Right} +// ContainsTables implements the logicalPlan interface +func (j *join) ContainsTables() semantics.TableSet { + return j.Left.ContainsTables().Merge(j.Right.ContainsTables()) } -// isOnLeft returns true if the specified route number -// is on the left side of the join. If false, it means -// the node is on the right. -func (jb *join) isOnLeft(nodeNum int) bool { - return nodeNum <= jb.leftOrder +// OutputColumns implements the logicalPlan interface +func (j *join) OutputColumns() []sqlparser.SelectExpr { + return getOutputColumnsFromJoin(j.Cols, j.Left.OutputColumns(), j.Right.OutputColumns()) } diff --git a/go/vt/vtgate/planbuilder/joinGen4.go b/go/vt/vtgate/planbuilder/joinGen4.go deleted file mode 100644 index 04a408b1fb4..00000000000 --- a/go/vt/vtgate/planbuilder/joinGen4.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -var _ logicalPlan = (*joinGen4)(nil) - -// joinGen4 is used to build a Join primitive. -// It's used to build an inner join and only used by the Gen4 planner -type joinGen4 struct { - // Left and Right are the nodes for the join. - Left, Right logicalPlan - - // The Opcode tells us if this is an inner or outer join - Opcode engine.JoinOpcode - - // These are the columns that will be produced by this plan. - // Negative offsets come from the LHS, and positive from the RHS - Cols []int - - // Vars are the columns that will be sent from the LHS to the RHS - // the number is the offset on the LHS result, and the string is the bind variable name used in the RHS - Vars map[string]int - - // LHSColumns are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the Vars field - LHSColumns []*sqlparser.ColName - - gen4Plan -} - -// WireupGen4 implements the logicalPlan interface -func (j *joinGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - err := j.Left.WireupGen4(ctx) - if err != nil { - return err - } - return j.Right.WireupGen4(ctx) -} - -// Primitive implements the logicalPlan interface -func (j *joinGen4) Primitive() engine.Primitive { - return &engine.Join{ - Left: j.Left.Primitive(), - Right: j.Right.Primitive(), - Cols: j.Cols, - Vars: j.Vars, - Opcode: j.Opcode, - } -} - -// Inputs implements the logicalPlan interface -func (j *joinGen4) Inputs() []logicalPlan { - return []logicalPlan{j.Left, j.Right} -} - -// Rewrite implements the logicalPlan interface -func (j *joinGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 2 { - return vterrors.VT13001(fmt.Sprintf("wrong number of children in joinGen4 rewrite, got: %d, expect: 2", len(inputs))) - } - j.Left = inputs[0] - j.Right = inputs[1] - return nil -} - -// ContainsTables implements the logicalPlan interface -func (j *joinGen4) ContainsTables() semantics.TableSet { - return j.Left.ContainsTables().Merge(j.Right.ContainsTables()) -} - -// OutputColumns implements the logicalPlan interface -func (j *joinGen4) OutputColumns() []sqlparser.SelectExpr { - return getOutputColumnsFromJoin(j.Cols, j.Left.OutputColumns(), j.Right.OutputColumns()) -} diff --git a/go/vt/vtgate/planbuilder/jointab.go b/go/vt/vtgate/planbuilder/jointab.go deleted file mode 100644 index 956f7330bda..00000000000 --- a/go/vt/vtgate/planbuilder/jointab.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" -) - -// jointab manages procurement and naming of join -// variables across primitives. -type jointab struct { - refs map[*column]string - reserved *sqlparser.ReservedVars - varIndex int -} - -// newJointab creates a new jointab for the current plan -// being built. It also needs the current list of bind vars -// used in the original query to make sure that the names -// it generates don't collide with those already in use. -func newJointab(reserved *sqlparser.ReservedVars) *jointab { - return &jointab{ - refs: make(map[*column]string), - reserved: reserved, - } -} - -// Procure requests for the specified column from the plan -// and returns the join var name for it. -func (jt *jointab) Procure(plan logicalPlan, col *sqlparser.ColName, to int) string { - from, joinVar := jt.Lookup(col) - // If joinVar is empty, generate a unique name. - if joinVar == "" { - joinVar = jt.reserved.ReserveColName(col) - jt.refs[col.Metadata.(*column)] = joinVar - } - plan.SupplyVar(from, to, col, joinVar) - return joinVar -} - -// GenerateSubqueryVars generates substitution variable names for -// a subquery. It returns two names based on: __sq, __sq_has_values. -// The appropriate names can be used for substitution -// depending on the scenario. -func (jt *jointab) GenerateSubqueryVars() (sq, hasValues string) { - for { - jt.varIndex++ - var1 := fmt.Sprintf("__sq%d", jt.varIndex) - var2 := fmt.Sprintf("__sq_has_values%d", jt.varIndex) - if !jt.reserved.ReserveAll(var1, var2) { - continue - } - return var1, var2 - } -} - -// Lookup returns the order of the route that supplies the column and -// the join var name if one has already been assigned for it. -func (jt *jointab) Lookup(col *sqlparser.ColName) (order int, joinVar string) { - c := col.Metadata.(*column) - return c.Origin().Order(), jt.refs[c] -} diff --git a/go/vt/vtgate/planbuilder/jointab_test.go b/go/vt/vtgate/planbuilder/jointab_test.go deleted file mode 100644 index 6bfc23c155c..00000000000 --- a/go/vt/vtgate/planbuilder/jointab_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "reflect" - "testing" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func TestGenerateSubqueryVars(t *testing.T) { - reserved := sqlparser.NewReservedVars("vtg", map[string]struct{}{ - "__sq1": {}, - "__sq_has_values3": {}, - }) - jt := newJointab(reserved) - - v1, v2 := jt.GenerateSubqueryVars() - combined := []string{v1, v2} - want := []string{"__sq2", "__sq_has_values2"} - if !reflect.DeepEqual(combined, want) { - t.Errorf("jt.GenerateSubqueryVars: %v, want %v", combined, want) - } - - v1, v2 = jt.GenerateSubqueryVars() - combined = []string{v1, v2} - want = []string{"__sq4", "__sq_has_values4"} - if !reflect.DeepEqual(combined, want) { - t.Errorf("jt.GenerateSubqueryVars: %v, want %v", combined, want) - } -} diff --git a/go/vt/vtgate/planbuilder/logical_plan.go b/go/vt/vtgate/planbuilder/logical_plan.go index 363c012daf8..51ed8e72b0e 100644 --- a/go/vt/vtgate/planbuilder/logical_plan.go +++ b/go/vt/vtgate/planbuilder/logical_plan.go @@ -29,50 +29,9 @@ import ( // logicalPlan defines the interface that a primitive must // satisfy. type logicalPlan interface { - // Order is the execution order of the primitive. If there are subprimitives, - // the order is one above the order of the subprimitives. - // This is because the primitive executes its subprimitives first and - // processes their results to generate its own values. - // Please copy code from an existing primitive to define this function. - Order() int - // ResultColumns returns the list of result columns the - // primitive returns. - // Please copy code from an existing primitive to define this function. - ResultColumns() []*resultColumn - - // Reorder reassigns order for the primitive and its sub-primitives. - // The input is the order of the previous primitive that should - // execute before this one. - Reorder(int) - - // Wireup performs the wire-up work. Nodes should be traversed - // from right to left because the rhs nodes can request vars from - // the lhs nodes. - Wireup(lp logicalPlan, jt *jointab) error - - // WireupGen4 does the wire up work for the Gen4 planner - WireupGen4(*plancontext.PlanningContext) error - - // SupplyVar finds the common root between from and to. If it's - // the common root, it supplies the requested var to the rhs tree. - // If the primitive already has the column in its list, it should - // just supply it to the 'to' node. Otherwise, it should request - // for it by calling SupplyCol on the 'from' sub-tree to request the - // column, and then supply it to the 'to' node. - SupplyVar(from, to int, col *sqlparser.ColName, varname string) - - // SupplyCol is meant to be used for the wire-up process. This function - // changes the primitive to supply the requested column and returns - // the resultColumn and column number of the result. SupplyCol - // is different from PushSelect because it may reuse an existing - // resultColumn, whereas PushSelect guarantees the addition of a new - // result column and returns a distinct symbol for it. - SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) - - // SupplyWeightString must supply a weight_string expression of the - // specified column. It returns an error if we cannot supply a weight column for it. - SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) + // Wireup does the wire up of primitive with the source. + Wireup(*plancontext.PlanningContext) error // Primitive returns the underlying primitive. // This function should only be called after Wireup is finished. @@ -92,59 +51,6 @@ type logicalPlan interface { OutputColumns() []sqlparser.SelectExpr } -// gen4Plan implements a few methods from logicalPlan that are unused by Gen4. -type gen4Plan struct{} - -// Order implements the logicalPlan interface -func (*gen4Plan) Order() int { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// ResultColumns implements the logicalPlan interface -func (*gen4Plan) ResultColumns() []*resultColumn { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// Reorder implements the logicalPlan interface -func (*gen4Plan) Reorder(int) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// Wireup implements the logicalPlan interface -func (*gen4Plan) Wireup(logicalPlan, *jointab) error { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyVar implements the logicalPlan interface -func (*gen4Plan) SupplyVar(int, int, *sqlparser.ColName, string) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyCol implements the logicalPlan interface -func (*gen4Plan) SupplyCol(*sqlparser.ColName) (rc *resultColumn, colNumber int) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// SupplyWeightString implements the logicalPlan interface -func (*gen4Plan) SupplyWeightString(int, bool) (weightcolNumber int, err error) { - panic("[BUG]: should not be called. This is a Gen4 primitive") -} - -// v3Plan implements methods that are only used by gen4 -type v3Plan struct{} - -func (*v3Plan) WireupGen4(*plancontext.PlanningContext) error { - panic("[BUG]: should not be called. This is a V3 primitive") -} - -func (*v3Plan) ContainsTables() semantics.TableSet { - panic("[BUG]: should not be called. This is a V3 primitive") -} - -func (*v3Plan) OutputColumns() []sqlparser.SelectExpr { - panic("[BUG]: should not be called. This is a V3 primitive") -} - type planVisitor func(logicalPlan) (bool, logicalPlan, error) func visit(node logicalPlan, visitor planVisitor) (logicalPlan, error) { @@ -180,16 +86,6 @@ func visit(node logicalPlan, visitor planVisitor) (logicalPlan, error) { return node, nil } -// first returns the first logical plan of the tree, -// which is usually the left most leaf. -func first(input logicalPlan) logicalPlan { - inputs := input.Inputs() - if len(inputs) == 0 { - return input - } - return first(inputs[0]) -} - // ------------------------------------------------------------------------- // logicalPlanCommon implements some common functionality of builders. @@ -207,33 +103,8 @@ func (bc *logicalPlanCommon) Order() int { return bc.order } -func (bc *logicalPlanCommon) Reorder(order int) { - bc.input.Reorder(order) - bc.order = bc.input.Order() + 1 -} - -func (bc *logicalPlanCommon) ResultColumns() []*resultColumn { - return bc.input.ResultColumns() -} - -func (bc *logicalPlanCommon) Wireup(plan logicalPlan, jt *jointab) error { - return bc.input.Wireup(plan, jt) -} - -func (bc *logicalPlanCommon) WireupGen4(ctx *plancontext.PlanningContext) error { - return bc.input.WireupGen4(ctx) -} - -func (bc *logicalPlanCommon) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - bc.input.SupplyVar(from, to, col, varname) -} - -func (bc *logicalPlanCommon) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - return bc.input.SupplyCol(col) -} - -func (bc *logicalPlanCommon) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return bc.input.SupplyWeightString(colNumber, alsoAddToGroupBy) +func (bc *logicalPlanCommon) Wireup(ctx *plancontext.PlanningContext) error { + return bc.input.Wireup(ctx) } // Rewrite implements the logicalPlan interface @@ -266,67 +137,12 @@ func (bc *logicalPlanCommon) OutputColumns() []sqlparser.SelectExpr { // resultsColumn functionality. type resultsBuilder struct { logicalPlanCommon - resultColumns []*resultColumn - weightStrings map[*resultColumn]int - truncater truncater + truncater truncater } func newResultsBuilder(input logicalPlan, truncater truncater) resultsBuilder { return resultsBuilder{ logicalPlanCommon: newBuilderCommon(input), - resultColumns: input.ResultColumns(), - weightStrings: make(map[*resultColumn]int), truncater: truncater, } } - -func (rsb *resultsBuilder) ResultColumns() []*resultColumn { - return rsb.resultColumns -} - -// SupplyCol is currently unreachable because the builders using resultsBuilder -// are currently above a join, which is the only logicalPlan that uses it for now. -// This can change if we start supporting correlated subqueries. -func (rsb *resultsBuilder) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range rsb.resultColumns { - if rc.column == c { - return rc, i - } - } - rc, colNumber = rsb.input.SupplyCol(col) - if colNumber < len(rsb.resultColumns) { - return rc, colNumber - } - // Add result columns from input until colNumber is reached. - for colNumber >= len(rsb.resultColumns) { - rsb.resultColumns = append(rsb.resultColumns, rsb.input.ResultColumns()[len(rsb.resultColumns)]) - } - rsb.truncater.SetTruncateColumnCount(len(rsb.resultColumns)) - return rc, colNumber -} - -func (rsb *resultsBuilder) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := rsb.resultColumns[colNumber] - var ok bool - weightcolNumber, ok = rsb.weightStrings[rc] - if !alsoAddToGroupBy && ok { - return weightcolNumber, nil - } - weightcolNumber, err = rsb.input.SupplyWeightString(colNumber, alsoAddToGroupBy) - if err != nil { - return 0, nil - } - rsb.weightStrings[rc] = weightcolNumber - if weightcolNumber < len(rsb.resultColumns) { - return weightcolNumber, nil - } - // Add result columns from input until weightcolNumber is reached. - for weightcolNumber >= len(rsb.resultColumns) { - rsb.resultColumns = append(rsb.resultColumns, rsb.input.ResultColumns()[len(rsb.resultColumns)]) - } - rsb.truncater.SetTruncateColumnCount(len(rsb.resultColumns)) - return weightcolNumber, nil -} - -// ------------------------------------------------------------------------- diff --git a/go/vt/vtgate/planbuilder/memory_sort.go b/go/vt/vtgate/planbuilder/memory_sort.go index 20dd125ecd0..d32777ac123 100644 --- a/go/vt/vtgate/planbuilder/memory_sort.go +++ b/go/vt/vtgate/planbuilder/memory_sort.go @@ -17,14 +17,10 @@ limitations under the License. package planbuilder import ( - "fmt" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -41,67 +37,6 @@ type memorySort struct { eMemorySort *engine.MemorySort } -func findColNumber(ms *memorySort, expr *sqlparser.ColName) int { - c := expr.Metadata.(*column) - for i, rc := range ms.ResultColumns() { - if rc.column == c { - return i - } - } - return -1 -} - -// newMemorySort builds a new memorySort. -func newMemorySort(plan logicalPlan, orderBy v3OrderBy) (*memorySort, error) { - eMemorySort := &engine.MemorySort{} - ms := &memorySort{ - resultsBuilder: newResultsBuilder(plan, eMemorySort), - eMemorySort: eMemorySort, - } - for _, order := range orderBy { - var colNumber int - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - var err error - if colNumber, err = ResultFromNumber(ms.ResultColumns(), expr, "order clause"); err != nil { - return nil, err - } - case *sqlparser.ColName: - colNumber = findColNumber(ms, expr) - case *sqlparser.CastExpr: - colName, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - colNumber = findColNumber(ms, colName) - case *sqlparser.ConvertExpr: - colName, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - colNumber = findColNumber(ms, colName) - default: - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - // If column is not found, then the order by is referencing - // a column that's not on the select list. - if colNumber == -1 { - return nil, vterrors.VT12001(fmt.Sprintf("memory sort: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order))) - } - // TODO(king-11) need to pass in collation here - ob := engine.OrderByParams{ - Col: colNumber, - WeightStringCol: -1, - Desc: order.Direction == sqlparser.DescOrder, - StarColFixedIndex: colNumber, - FromGroupBy: order.fromGroupBy, - CollationID: collations.Unknown, - } - ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, ob) - } - return ms, nil -} - // Primitive implements the logicalPlan interface func (ms *memorySort) Primitive() engine.Primitive { ms.eMemorySort.Input = ms.input.Primitive() @@ -113,32 +48,6 @@ func (ms *memorySort) SetLimit(limit *sqlparser.Limit) error { return vterrors.VT13001("memorySort.Limit: unreachable") } -// Wireup implements the logicalPlan interface -// If text columns are detected in the keys, then the function modifies -// the primitive to pull a corresponding weight_string from mysql and -// compare those instead. This is because we currently don't have the -// ability to mimic mysql's collation behavior. -func (ms *memorySort) Wireup(plan logicalPlan, jt *jointab) error { - for i, orderby := range ms.eMemorySort.OrderBy { - rc := ms.resultColumns[orderby.Col] - // Add a weight_string column if we know that the column is a textual column or if its type is unknown - if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { - weightcolNumber, err := ms.input.SupplyWeightString(orderby.Col, orderby.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - ms.weightStrings[rc] = weightcolNumber - ms.eMemorySort.OrderBy[i].WeightStringCol = weightcolNumber - ms.eMemorySort.TruncateColumnCount = len(ms.resultColumns) - } - } - return ms.input.Wireup(plan, jt) -} - -func (ms *memorySort) WireupGen4(ctx *plancontext.PlanningContext) error { - return ms.input.WireupGen4(ctx) +func (ms *memorySort) Wireup(ctx *plancontext.PlanningContext) error { + return ms.input.Wireup(ctx) } diff --git a/go/vt/vtgate/planbuilder/merge_sort.go b/go/vt/vtgate/planbuilder/merge_sort.go index 4e72d062241..0da5b5fc135 100644 --- a/go/vt/vtgate/planbuilder/merge_sort.go +++ b/go/vt/vtgate/planbuilder/merge_sort.go @@ -17,7 +17,6 @@ limitations under the License. package planbuilder import ( - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -37,15 +36,6 @@ type mergeSort struct { truncateColumnCount int } -// newMergeSort builds a new mergeSort. -func newMergeSort(rb *route) *mergeSort { - ms := &mergeSort{ - resultsBuilder: newResultsBuilder(rb, nil), - } - ms.truncater = ms - return ms -} - // SetTruncateColumnCount satisfies the truncater interface. // This function records the truncate column count and sets // it later on the eroute during wire-up phase. @@ -58,35 +48,8 @@ func (ms *mergeSort) Primitive() engine.Primitive { return ms.input.Primitive() } -// Wireup implements the logicalPlan interface -func (ms *mergeSort) Wireup(plan logicalPlan, jt *jointab) error { - // If the route has to do the ordering, and if any columns are Text, - // we have to request the corresponding weight_string from mysql - // and use that value instead. This is because we cannot mimic - // mysql's collation behavior yet. - rb := ms.input.(*route) - for i, orderby := range rb.eroute.OrderBy { - rc := ms.resultColumns[orderby.Col] - // Add a weight_string column if we know that the column is a textual column or if its type is unknown - if sqltypes.IsText(rc.column.typ) || rc.column.typ == sqltypes.Null { - var err error - rb.eroute.OrderBy[i].WeightStringCol, err = rb.SupplyWeightString(orderby.Col, orderby.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - ms.truncateColumnCount = len(ms.resultColumns) - } - } - rb.eroute.TruncateColumnCount = ms.truncateColumnCount - return ms.input.Wireup(plan, jt) -} - -func (ms *mergeSort) WireupGen4(ctx *plancontext.PlanningContext) error { - return ms.input.WireupGen4(ctx) +func (ms *mergeSort) Wireup(ctx *plancontext.PlanningContext) error { + return ms.input.Wireup(ctx) } // OutputColumns implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/migration.go b/go/vt/vtgate/planbuilder/migration.go index 468c86d3ffb..6fb73a9039d 100644 --- a/go/vt/vtgate/planbuilder/migration.go +++ b/go/vt/vtgate/planbuilder/migration.go @@ -17,19 +17,74 @@ limitations under the License. package planbuilder import ( + "strconv" + "time" + + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) -func buildAlterMigrationPlan(query string, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) { +func validateThrottleParams(alterMigrationType sqlparser.AlterMigrationType, expireString string, ratioLiteral *sqlparser.Literal) (duration time.Duration, ratio float64, err error) { + switch alterMigrationType { + case sqlparser.UnthrottleMigrationType, + sqlparser.UnthrottleAllMigrationType: + // Unthrottling is like throttling with duration=0 + duration = 0 + default: + duration = throttle.DefaultAppThrottleDuration + if expireString != "" { + duration, err = time.ParseDuration(expireString) + if err != nil || duration < 0 { + return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid EXPIRE value: %s. Try '120s', '30m', '1h', etc. Allowed units are (s)ec, (m)in, (h)hour", expireString) + } + } + } + ratio = 1.0 + if ratioLiteral != nil { + ratio, err = strconv.ParseFloat(ratioLiteral.Val, 64) + if err != nil || ratio < 0 || ratio > 1 { + return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid RATIO value: %s. Try any decimal number between '0.0' (no throttle) and `1.0` (fully throttled)", ratioLiteral.Val) + } + } + return duration, ratio, nil +} + +func buildAlterMigrationThrottleAppPlan(query string, alterMigration *sqlparser.AlterMigration, keyspace *vindexes.Keyspace) (*planResult, error) { + duration, ratio, err := validateThrottleParams(alterMigration.Type, alterMigration.Expire, alterMigration.Ratio) + if err != nil { + return nil, err + } + expireAt := time.Now().Add(duration) + appName := alterMigration.UUID + if appName == "" { + appName = throttlerapp.OnlineDDLName.String() + } + throttledAppRule := &topodatapb.ThrottledAppRule{ + Name: appName, + ExpiresAt: protoutil.TimeToProto(expireAt), + Ratio: ratio, + } + return newPlanResult(&engine.ThrottleApp{ + Keyspace: keyspace, + ThrottledAppRule: throttledAppRule, + }), nil +} + +func buildAlterMigrationPlan(query string, alterMigration *sqlparser.AlterMigration, vschema plancontext.VSchema, enableOnlineDDL bool) (*planResult, error) { if !enableOnlineDDL { return nil, schema.ErrOnlineDDLDisabled } + dest, ks, tabletType, err := vschema.TargetDestination("") if err != nil { return nil, err @@ -38,6 +93,15 @@ func buildAlterMigrationPlan(query string, vschema plancontext.VSchema, enableOn return nil, vterrors.VT09005() } + switch alterMigration.Type { + case sqlparser.ThrottleMigrationType, + sqlparser.ThrottleAllMigrationType, + sqlparser.UnthrottleMigrationType, + sqlparser.UnthrottleAllMigrationType: + // ALTER VITESS_MIGRATION ... THROTTLE ... queries go to topo (similarly to `vtctldclient UpdateThrottlerConfig`) + return buildAlterMigrationThrottleAppPlan(query, alterMigration, ks) + } + if tabletType != topodatapb.TabletType_PRIMARY { return nil, vterrors.VT09006("ALTER") } diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index 2ec3efbac46..04ce68564c0 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -17,84 +17,329 @@ limitations under the License. package planbuilder import ( + "bytes" "fmt" "sort" "strconv" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" - - "vitess.io/vitess/go/vt/vterrors" ) -func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator, isRoot bool) (logicalPlan, error) { +func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) (logicalPlan, error) { switch op := op.(type) { case *operators.Route: return transformRoutePlan(ctx, op) case *operators.ApplyJoin: return transformApplyJoinPlan(ctx, op) case *operators.Union: - return transformUnionPlan(ctx, op, isRoot) + return transformUnionPlan(ctx, op) case *operators.Vindex: return transformVindexPlan(ctx, op) case *operators.SubQueryOp: return transformSubQueryPlan(ctx, op) case *operators.CorrelatedSubQueryOp: return transformCorrelatedSubQueryPlan(ctx, op) - case *operators.Derived: - return transformDerivedPlan(ctx, op) case *operators.Filter: - plan, err := transformToLogicalPlan(ctx, op.Source, false) + return transformFilter(ctx, op) + case *operators.Horizon: + return transformHorizon(ctx, op) + case *operators.Projection: + return transformProjection(ctx, op) + case *operators.Limit: + return transformLimit(ctx, op) + case *operators.Ordering: + return transformOrdering(ctx, op) + case *operators.Aggregator: + return transformAggregator(ctx, op) + case *operators.Distinct: + return transformDistinct(ctx, op) + case *operators.FkCascade: + return transformFkCascade(ctx, op) + case *operators.FkVerify: + return transformFkVerify(ctx, op) + } + + return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToLogicalPlan)", op)) +} + +// transformFkCascade transforms a FkCascade operator into a logical plan. +func transformFkCascade(ctx *plancontext.PlanningContext, fkc *operators.FkCascade) (logicalPlan, error) { + // We convert the parent operator to a logical plan. + parentLP, err := transformToLogicalPlan(ctx, fkc.Parent) + if err != nil { + return nil, nil + } + + // Once we have the parent logical plan, we can create the selection logical plan and the primitives for the children operators. + // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. + ctx.SemTable = nil + selLP, err := transformToLogicalPlan(ctx, fkc.Selection) + if err != nil { + return nil, err + } + + // Go over the children and convert them to Primitives too. + var children []*engine.FkChild + for _, child := range fkc.Children { + childLP, err := transformToLogicalPlan(ctx, child.Op) if err != nil { return nil, err } - scl := &simpleConverterLookup{ - canPushProjection: true, - ctx: ctx, - plan: plan, + err = childLP.Wireup(ctx) + if err != nil { + return nil, err } - ast := ctx.SemTable.AndExpressions(op.Predicates...) - predicate, err := evalengine.Translate(ast, scl) + childEngine := childLP.Primitive() + children = append(children, &engine.FkChild{ + BVName: child.BVName, + Cols: child.Cols, + Exec: childEngine, + }) + } + + return newFkCascade(parentLP, selLP, children), nil +} + +// transformFkVerify transforms a FkVerify operator into a logical plan. +func transformFkVerify(ctx *plancontext.PlanningContext, fkv *operators.FkVerify) (logicalPlan, error) { + inputLP, err := transformToLogicalPlan(ctx, fkv.Input) + if err != nil { + return nil, err + } + + // Once we have the input logical plan, we can create the primitives for the verification operators. + // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. + ctx.SemTable = nil + + // Go over the children and convert them to Primitives too. + var verify []*verifyLP + for _, v := range fkv.Verify { + lp, err := transformToLogicalPlan(ctx, v.Op) if err != nil { return nil, err } + verify = append(verify, &verifyLP{ + verify: lp, + typ: v.Typ, + }) + } - return &filter{ - logicalPlanCommon: newBuilderCommon(plan), - efilter: &engine.Filter{ - Predicate: predicate, - ASTPredicate: ast, - }, - }, nil - case *operators.Horizon: - return transformHorizon(ctx, op, isRoot) + return newFkVerify(inputLP, verify), nil +} + +func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggregator) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err } - return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToLogicalPlan)", op)) + oa := &orderedAggregate{ + resultsBuilder: newResultsBuilder(plan, nil), + } + + for _, aggr := range op.Aggregations { + if aggr.OpCode == opcode.AggregateUnassigned { + return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) + } + aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias) + aggrParam.Expr = aggr.Func + aggrParam.Original = aggr.Original + aggrParam.OrigOpcode = aggr.OriginalOpCode + aggrParam.WCol = aggr.WSOffset + aggrParam.Type, aggrParam.CollationID = aggr.GetTypeCollation(ctx) + oa.aggregates = append(oa.aggregates, aggrParam) + } + for _, groupBy := range op.Grouping { + typ, col, _ := ctx.SemTable.TypeForExpr(groupBy.SimplifiedExpr) + oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ + KeyCol: groupBy.ColOffset, + WeightStringCol: groupBy.WSOffset, + Expr: groupBy.AsAliasedExpr().Expr, + Type: typ, + CollationID: col, + }) + } + + if err != nil { + return nil, err + } + oa.truncateColumnCount = op.ResultColumns + return oa, nil +} + +func transformDistinct(ctx *plancontext.PlanningContext, op *operators.Distinct) (logicalPlan, error) { + src, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err + } + return newDistinct(src, op.Columns, op.Truncate), nil +} + +func transformOrdering(ctx *plancontext.PlanningContext, op *operators.Ordering) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err + } + + return createMemorySort(ctx, plan, op) +} + +func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, ordering *operators.Ordering) (logicalPlan, error) { + primitive := &engine.MemorySort{ + TruncateColumnCount: ordering.ResultColumns, + } + ms := &memorySort{ + resultsBuilder: newResultsBuilder(src, primitive), + eMemorySort: primitive, + } + + for idx, order := range ordering.Order { + typ, collationID, _ := ctx.SemTable.TypeForExpr(order.SimplifiedExpr) + ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, engine.OrderByParams{ + Col: ordering.Offset[idx], + WeightStringCol: ordering.WOffset[idx], + Desc: order.Inner.Direction == sqlparser.DescOrder, + StarColFixedIndex: ordering.Offset[idx], + Type: typ, + CollationID: collationID, + }) + } + + return ms, nil +} + +func transformProjection(ctx *plancontext.PlanningContext, op *operators.Projection) (logicalPlan, error) { + src, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err + } + + if cols := op.AllOffsets(); cols != nil { + // if all this op is doing is passing through columns from the input, we + // can use the faster SimpleProjection + return useSimpleProjection(ctx, op, cols, src) + } + + expressions := slice.Map(op.Projections, func(from operators.ProjExpr) sqlparser.Expr { + return from.GetExpr() + }) + + failed := false + evalengineExprs := slice.Map(op.Projections, func(from operators.ProjExpr) evalengine.Expr { + switch e := from.(type) { + case operators.Eval: + return e.EExpr + case operators.Offset: + typ, col, _ := ctx.SemTable.TypeForExpr(e.Expr) + return evalengine.NewColumn(e.Offset, typ, col) + default: + failed = true + return nil + } + }) + var primitive *engine.Projection + columnNames := slice.Map(op.Columns, func(from *sqlparser.AliasedExpr) string { + return from.ColumnName() + }) + + if !failed { + primitive = &engine.Projection{ + Cols: columnNames, + Exprs: evalengineExprs, + } + } + + return &projection{ + source: src, + columnNames: columnNames, + columns: expressions, + primitive: primitive, + }, nil +} + +// useSimpleProjection uses nothing at all if the output is already correct, +// or SimpleProjection when we have to reorder or truncate the columns +func useSimpleProjection(ctx *plancontext.PlanningContext, op *operators.Projection, cols []int, src logicalPlan) (logicalPlan, error) { + columns, err := op.Source.GetColumns(ctx) + if err != nil { + return nil, err + } + if len(columns) == len(cols) && elementsMatchIndices(cols) { + // the columns are already in the right order. we don't need anything at all here + return src, nil + } + return &simpleProjection{ + logicalPlanCommon: newBuilderCommon(src), + eSimpleProj: &engine.SimpleProjection{ + Cols: cols, + }, + }, nil } -func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon, isRoot bool) (logicalPlan, error) { - source, err := transformToLogicalPlan(ctx, op.Source, isRoot) +// elementsMatchIndices checks if the elements of the input slice match +// their corresponding index values. It returns true if all elements match +// their indices, and false otherwise. +func elementsMatchIndices(in []int) bool { + for idx, val := range in { + if val != idx { + return false + } + } + return true +} + +func transformFilter(ctx *plancontext.PlanningContext, op *operators.Filter) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err + } + + predicate := op.FinalPredicate + ast := ctx.SemTable.AndExpressions(op.Predicates...) + + // this might already have been done on the operators + if predicate == nil { + predicate, err = evalengine.Translate(ast, &evalengine.Config{ + ResolveType: ctx.SemTable.TypeForExpr, + ResolveColumn: resolveFromPlan(ctx, plan, true), + Collation: ctx.SemTable.Collation, + }) + if err != nil { + return nil, err + } + } + + return &filter{ + logicalPlanCommon: newBuilderCommon(plan), + efilter: &engine.Filter{ + Predicate: predicate, + ASTPredicate: ast, + Truncate: op.Truncate, + }, + }, nil +} + +func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon) (logicalPlan, error) { + if op.IsDerived() { + return transformDerivedPlan(ctx, op) + } + source, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } - switch node := op.Select.(type) { + switch node := op.Query.(type) { case *sqlparser.Select: hp := horizonPlanning{ sel: node, @@ -108,13 +353,13 @@ func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon, i return planLimit(node.Limit, plan) case *sqlparser.Union: var err error - rb, isRoute := source.(*routeGen4) + rb, isRoute := source.(*route) if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { return nil, ctx.SemTable.NotSingleRouteErr } var plan logicalPlan if isRoute && rb.isSingleShard() { - err = planSingleShardRoutePlan(node, rb) + err = planSingleRoutePlan(node, rb) plan = rb } else { plan, err = planOrderByOnUnion(ctx, source, node) @@ -129,11 +374,11 @@ func transformHorizon(ctx *plancontext.PlanningContext, op *operators.Horizon, i } func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.ApplyJoin) (logicalPlan, error) { - lhs, err := transformToLogicalPlan(ctx, n.LHS, false) + lhs, err := transformToLogicalPlan(ctx, n.LHS) if err != nil { return nil, err } - rhs, err := transformToLogicalPlan(ctx, n.RHS, false) + rhs, err := transformToLogicalPlan(ctx, n.RHS) if err != nil { return nil, err } @@ -142,7 +387,7 @@ func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.Apply opCode = engine.LeftJoin } - return &joinGen4{ + return &join{ Left: lhs, Right: rhs, Cols: n.Columns, @@ -165,8 +410,9 @@ func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route) ( } return &engine.Route{ - TableName: strings.Join(tableNames, ", "), - RoutingParameters: rp, + TableName: strings.Join(tableNames, ", "), + RoutingParameters: rp, + TruncateColumnCount: op.ResultColumns, }, nil } @@ -184,49 +430,184 @@ func newRoutingParams(ctx *plancontext.PlanningContext, opCode engine.Opcode) *e } func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) (logicalPlan, error) { - switch src := op.Source.(type) { - case *operators.Update: - return transformUpdatePlan(ctx, op, src) - case *operators.Delete: - return transformDeletePlan(ctx, op, src) - } - condition := getVindexPredicate(ctx, op) - sel, err := operators.ToSQL(ctx, op.Source) + stmt, dmlOp, err := operators.ToSQL(ctx, op.Source) if err != nil { return nil, err } - replaceSubQuery(ctx, sel) + + replaceSubQuery(ctx, stmt) + + if stmtWithComments, ok := stmt.(sqlparser.Commented); ok && op.Comments != nil { + stmtWithComments.SetComments(op.Comments.GetComments()) + } + + switch stmt := stmt.(type) { + case sqlparser.SelectStatement: + if op.Lock != sqlparser.NoLock { + stmt.SetLock(op.Lock) + } + return buildRouteLogicalPlan(ctx, op, stmt) + case *sqlparser.Update: + return buildUpdateLogicalPlan(ctx, op, dmlOp, stmt) + case *sqlparser.Delete: + return buildDeleteLogicalPlan(ctx, op, dmlOp, stmt) + case *sqlparser.Insert: + return buildInsertLogicalPlan(ctx, op, dmlOp, stmt) + default: + return nil, vterrors.VT13001(fmt.Sprintf("dont know how to %T", stmt)) + } +} + +func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route, stmt sqlparser.SelectStatement) (logicalPlan, error) { + condition := getVindexPredicate(ctx, op) eroute, err := routeToEngineRoute(ctx, op) + for _, order := range op.Ordering { + typ, collation, _ := ctx.SemTable.TypeForExpr(order.AST) + eroute.OrderBy = append(eroute.OrderBy, engine.OrderByParams{ + Col: order.Offset, + WeightStringCol: order.WOffset, + Desc: order.Direction == sqlparser.DescOrder, + Type: typ, + CollationID: collation, + }) + } if err != nil { return nil, err } - return &routeGen4{ + return &route{ eroute: eroute, - Select: sel, + Select: stmt, tables: operators.TableID(op), condition: condition, }, nil +} + +func buildInsertLogicalPlan(ctx *plancontext.PlanningContext, rb *operators.Route, op ops.Operator, stmt *sqlparser.Insert) (logicalPlan, error) { + ins := op.(*operators.Insert) + eins := &engine.Insert{ + Opcode: mapToInsertOpCode(rb.Routing.OpCode(), ins.Input != nil), + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), + Ignore: ins.Ignore, + ForceNonStreaming: ins.ForceNonStreaming, + Generate: autoIncGenerate(ins.AutoIncrement), + ColVindexes: ins.ColVindexes, + VindexValues: ins.VindexValues, + VindexValueOffset: ins.VindexValueOffset, + } + lp := &insert{eInsert: eins} + + // we would need to generate the query on the fly. The only exception here is + // when unsharded query with autoincrement for that there is no input operator. + if eins.Opcode != engine.InsertUnsharded || ins.Input != nil { + eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) + } + + if ins.Input == nil { + eins.Query = generateQuery(stmt) + } else { + newSrc, err := transformToLogicalPlan(ctx, ins.Input) + if err != nil { + return nil, err + } + lp.source = newSrc + } + + return lp, nil +} + +func mapToInsertOpCode(code engine.Opcode, insertSelect bool) engine.InsertOpcode { + if code == engine.Unsharded { + return engine.InsertUnsharded + } + if insertSelect { + return engine.InsertSelect + } + return engine.InsertSharded +} + +func autoIncGenerate(gen *operators.Generate) *engine.Generate { + if gen == nil { + return nil + } + selNext := &sqlparser.Select{ + From: []sqlparser.TableExpr{&sqlparser.AliasedTableExpr{Expr: gen.TableName}}, + SelectExprs: sqlparser.SelectExprs{&sqlparser.Nextval{Expr: &sqlparser.Argument{Name: "n", Type: sqltypes.Int64}}}, + } + return &engine.Generate{ + Keyspace: gen.Keyspace, + Query: sqlparser.String(selNext), + Values: gen.Values, + Offset: gen.Offset, + } +} +func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mid []string, suffix string) { + valueTuples, isValues := ins.Rows.(sqlparser.Values) + prefixFormat := "insert %v%sinto %v%v " + if isValues { + // the mid values are filled differently + // with select uses sqlparser.String for sqlparser.Values + // with rows uses string. + prefixFormat += "values " + } + prefixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) + prefixBuf.Myprintf(prefixFormat, + ins.Comments, ins.Ignore.ToString(), + ins.Table, ins.Columns) + prefix = prefixBuf.String() + + suffixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) + suffixBuf.Myprintf("%v", ins.OnDup) + suffix = suffixBuf.String() + + if !isValues { + // this is a insert query using select to insert the rows. + return + } + + midBuf := sqlparser.NewTrackedBuffer(dmlFormatter) + mid = make([]string, len(valueTuples)) + for rowNum, val := range valueTuples { + midBuf.Myprintf("%v", val) + mid[rowNum] = midBuf.String() + midBuf.Reset() + } + + return +} + +// dmlFormatter strips out keyspace name from dmls. +func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { + switch node := node.(type) { + case sqlparser.TableName: + node.Name.Format(buf) + return + } + node.Format(buf) } -func transformUpdatePlan(ctx *plancontext.PlanningContext, op *operators.Route, upd *operators.Update) (logicalPlan, error) { - ast := upd.AST - replaceSubQuery(ctx, ast) +func buildUpdateLogicalPlan( + ctx *plancontext.PlanningContext, + op *operators.Route, + dmlOp ops.Operator, + stmt *sqlparser.Update, +) (logicalPlan, error) { + upd := dmlOp.(*operators.Update) rp := newRoutingParams(ctx, op.Routing.OpCode()) err := op.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } edml := &engine.DML{ - Query: generateQuery(ast), - Table: []*vindexes.Table{ - upd.VTable, - }, + Query: generateQuery(stmt), + TableNames: []string{upd.VTable.Name.String()}, + Vindexes: upd.VTable.ColumnVindexes, OwnedVindexQuery: upd.OwnedVindexQuery, RoutingParameters: rp, } - transformDMLPlan(upd.AST, upd.VTable, edml, op.Routing, len(upd.ChangedVindexValues) > 0) + transformDMLPlan(upd.VTable, edml, op.Routing, len(upd.ChangedVindexValues) > 0) e := &engine.Update{ ChangedVindexValues: upd.ChangedVindexValues, @@ -236,24 +617,27 @@ func transformUpdatePlan(ctx *plancontext.PlanningContext, op *operators.Route, return &primitiveWrapper{prim: e}, nil } -func transformDeletePlan(ctx *plancontext.PlanningContext, op *operators.Route, del *operators.Delete) (logicalPlan, error) { - ast := del.AST - replaceSubQuery(ctx, ast) - rp := newRoutingParams(ctx, op.Routing.OpCode()) - err := op.Routing.UpdateRoutingParams(ctx, rp) +func buildDeleteLogicalPlan( + ctx *plancontext.PlanningContext, + rb *operators.Route, + dmlOp ops.Operator, + ast *sqlparser.Delete, +) (logicalPlan, error) { + del := dmlOp.(*operators.Delete) + rp := newRoutingParams(ctx, rb.Routing.OpCode()) + err := rb.Routing.UpdateRoutingParams(ctx, rp) if err != nil { return nil, err } edml := &engine.DML{ - Query: generateQuery(ast), - Table: []*vindexes.Table{ - del.VTable, - }, + Query: generateQuery(ast), + TableNames: []string{del.VTable.Name.String()}, + Vindexes: del.VTable.Owned, OwnedVindexQuery: del.OwnedVindexQuery, RoutingParameters: rp, } - transformDMLPlan(del.AST, del.VTable, edml, op.Routing, del.OwnedVindexQuery != "") + transformDMLPlan(del.VTable, edml, rb.Routing, del.OwnedVindexQuery != "") e := &engine.Delete{ DML: edml, @@ -262,13 +646,7 @@ func transformDeletePlan(ctx *plancontext.PlanningContext, op *operators.Route, return &primitiveWrapper{prim: e}, nil } -func transformDMLPlan(stmt sqlparser.Commented, vtable *vindexes.Table, edml *engine.DML, routing operators.Routing, setVindex bool) { - directives := stmt.GetParsedComments().Directives() - if directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) { - edml.MultiShardAutocommit = true - } - edml.QueryTimeout = queryTimeout(directives) - +func transformDMLPlan(vtable *vindexes.Table, edml *engine.DML, routing operators.Routing, setVindex bool) { if routing.OpCode() != engine.Unsharded && setVindex { primary := vtable.ColumnVindexes[0] edml.KsidVindex = primary.Vindex @@ -354,237 +732,36 @@ func getAllTableNames(op *operators.Route) ([]string, error) { return tableNames, nil } -func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union, isRoot bool) (logicalPlan, error) { - var sources []logicalPlan - var err error - if op.Distinct { - sources, err = transformAndMerge(ctx, op) - if err != nil { - return nil, err - } - for _, source := range sources { - pushDistinct(source) - } - } else { - sources, err = transformAndMergeInOrder(ctx, op) - if err != nil { - return nil, err - } - } - var result logicalPlan - if len(sources) == 1 { - src := sources[0] - if rb, isRoute := src.(*routeGen4); isRoute && rb.isSingleShard() { - // if we have a single shard route, we don't need to do anything to make it distinct - // TODO - // rb.Select.SetLimit(op.limit) - // rb.Select.SetOrderBy(op.ordering) - return src, nil - } - result = src - } else { - if len(op.Ordering) > 0 { - return nil, vterrors.VT12001("ORDER BY on top of UNION") - } - result = &concatenateGen4{sources: sources} - } - if op.Distinct { - colls := getCollationsFor(ctx, op) - checkCols, err := getCheckColsForUnion(ctx, result, colls) - if err != nil { - return nil, err - } - return newDistinct(result, checkCols, isRoot), nil - } - return result, nil - -} - -func getWeightStringForSelectExpr(selectExpr sqlparser.SelectExpr) (*sqlparser.AliasedExpr, error) { - expr, isAliased := selectExpr.(*sqlparser.AliasedExpr) - if !isAliased { - return nil, vterrors.VT12001("get weight string expression for non-aliased expression") - } - return &sqlparser.AliasedExpr{Expr: weightStringFor(expr.Expr)}, nil -} - -func getCheckColsForUnion(ctx *plancontext.PlanningContext, result logicalPlan, colls []collations.ID) ([]engine.CheckCol, error) { - checkCols := make([]engine.CheckCol, 0, len(colls)) - for i, coll := range colls { - checkCol := engine.CheckCol{Col: i, Collation: coll} - if coll != collations.Unknown { - checkCols = append(checkCols, checkCol) - continue - } - // We might need a weight string - let's push one - // `might` because we just don't know what type we are dealing with. - // If we encounter a numerical value, we don't need any weight_string values - newOffset, err := pushWeightStringForDistinct(ctx, result, i) +func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (logicalPlan, error) { + sources, err := slice.MapWithError(op.Sources, func(src ops.Operator) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, src) if err != nil { return nil, err } - checkCol.WsCol = &newOffset - checkCols = append(checkCols, checkCol) - } - return checkCols, nil -} - -// pushWeightStringForDistinct adds a weight_string projection -func pushWeightStringForDistinct(ctx *plancontext.PlanningContext, plan logicalPlan, offset int) (newOffset int, err error) { - switch node := plan.(type) { - case *routeGen4: - allSelects := sqlparser.GetAllSelects(node.Select) - for _, sel := range allSelects { - expr, err := getWeightStringForSelectExpr(sel.SelectExprs[offset]) - if err != nil { - return 0, err - } - if i := checkIfAlreadyExists(expr, sel, ctx.SemTable); i != -1 { - return i, nil - } - sel.SelectExprs = append(sel.SelectExprs, expr) - newOffset = len(sel.SelectExprs) - 1 - } - // we leave the responsibility of truncating to distinct - node.eroute.TruncateColumnCount = 0 - case *concatenateGen4: - for _, source := range node.sources { - newOffset, err = pushWeightStringForDistinct(ctx, source, offset) - if err != nil { - return 0, err - } - } - node.noNeedToTypeCheck = append(node.noNeedToTypeCheck, newOffset) - case *joinGen4: - lhsSolves := node.Left.ContainsTables() - rhsSolves := node.Right.ContainsTables() - expr := node.OutputColumns()[offset] - aliasedExpr, isAliased := expr.(*sqlparser.AliasedExpr) - if !isAliased { - return 0, vterrors.VT13001("cannot convert JOIN output columns to an aliased-expression") - } - deps := ctx.SemTable.RecursiveDeps(aliasedExpr.Expr) - switch { - case deps.IsSolvedBy(lhsSolves): - offset, err = pushWeightStringForDistinct(ctx, node.Left, offset) - node.Cols = append(node.Cols, -(offset + 1)) - case deps.IsSolvedBy(rhsSolves): - offset, err = pushWeightStringForDistinct(ctx, node.Right, offset) - node.Cols = append(node.Cols, offset+1) - default: - return 0, vterrors.VT12001("push DISTINCT WEIGHT_STRING to both sides of the join") - } - newOffset = len(node.Cols) - 1 - default: - return 0, vterrors.VT13001(fmt.Sprintf("pushWeightStringForDistinct on %T", plan)) - } - return -} - -func transformAndMerge(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) { - for _, source := range op.Sources { - // first we go over all the operator inputs and turn them into logical plans, - // including horizon planning - plan, err := transformToLogicalPlan(ctx, source, false) - if err != nil { - return nil, err - } - sources = append(sources, plan) - } - - // next we'll go over all the plans from and check if any two can be merged. if they can, they are merged, - // and we continue checking for pairs of plans that can be merged into a single route - idx := 0 - for idx < len(sources) { - keep := make([]bool, len(sources)) - srcA := sources[idx] - merged := false - for j, srcB := range sources { - if j <= idx { - continue - } - newPlan := mergeUnionLogicalPlans(ctx, srcA, srcB) - if newPlan != nil { - sources[idx] = newPlan - srcA = newPlan - merged = true - } else { - keep[j] = true - } - } - if !merged { - return sources, nil - } - var phase []logicalPlan - for i, source := range sources { - if keep[i] || i <= idx { - phase = append(phase, source) - } - } - idx++ - sources = phase + return plan, nil + }) + if err != nil { + return nil, err } - return sources, nil -} - -func transformAndMergeInOrder(ctx *plancontext.PlanningContext, op *operators.Union) (sources []logicalPlan, err error) { - // We go over all the input operators and turn them into logical plans - for i, source := range op.Sources { - plan, err := transformToLogicalPlan(ctx, source, false) - if err != nil { - return nil, err - } - if i == 0 { - sources = append(sources, plan) - continue - } - // next we check if the last plan we produced can be merged with this new plan - last := sources[len(sources)-1] - newPlan := mergeUnionLogicalPlans(ctx, last, plan) - if newPlan != nil { - // if we could merge them, let's replace the last plan with this new merged one - sources[len(sources)-1] = newPlan - continue - } - // else we just add the new plan to the end of list - sources = append(sources, plan) + if len(sources) == 1 { + return sources[0], nil } - return sources, nil -} - -func getCollationsFor(ctx *plancontext.PlanningContext, n *operators.Union) []collations.ID { - // TODO: coerce selects' select expressions' collations - var colls []collations.ID + return &concatenate{ + sources: sources, + noNeedToTypeCheck: nil, + }, nil - sel, err := n.GetSelectFor(0) - if err != nil { - return nil - } - for _, expr := range sel.SelectExprs { - aliasedE, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil - } - typ := ctx.SemTable.CollationForExpr(aliasedE.Expr) - if typ == collations.Unknown { - if t, hasT := ctx.SemTable.ExprTypes[aliasedE.Expr]; hasT && sqltypes.IsNumber(t.Type) { - typ = collations.CollationBinaryID - } - } - colls = append(colls, typ) - } - return colls } -func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Derived) (logicalPlan, error) { +func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Horizon) (logicalPlan, error) { // transforming the inner part of the derived table into a logical plan // so that we can do horizon planning on the inner. If the logical plan // we've produced is a Route, we set its Select.From field to be an aliased // expression containing our derived table's inner select and the derived // table's alias. - plan, err := transformToLogicalPlan(ctx, op.Source, false) + plan, err := transformToLogicalPlan(ctx, op.Source) if err != nil { return nil, err } @@ -594,7 +771,7 @@ func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Derive return nil, err } - rb, isRoute := plan.(*routeGen4) + rb, isRoute := plan.(*route) if !isRoute { return &simpleProjection{ logicalPlanCommon: newBuilderCommon(plan), @@ -623,6 +800,15 @@ func transformDerivedPlan(ctx *plancontext.PlanningContext, op *operators.Derive return plan, nil } +func transformLimit(ctx *plancontext.PlanningContext, op *operators.Limit) (logicalPlan, error) { + plan, err := transformToLogicalPlan(ctx, op.Source) + if err != nil { + return nil, err + } + + return createLimit(plan, op.AST) +} + type subQReplacer struct { subqueryToReplace []*sqlparser.ExtractedSubquery replaced bool @@ -644,62 +830,7 @@ func (sqr *subQReplacer) replacer(cursor *sqlparser.Cursor) bool { return true } -func pushDistinct(plan logicalPlan) { - switch n := plan.(type) { - case *routeGen4: - n.Select.MakeDistinct() - case *concatenateGen4: - for _, source := range n.sources { - pushDistinct(source) - } - } -} - -func mergeUnionLogicalPlans(ctx *plancontext.PlanningContext, left logicalPlan, right logicalPlan) logicalPlan { - lroute, ok := left.(*routeGen4) - if !ok { - return nil - } - rroute, ok := right.(*routeGen4) - if !ok { - return nil - } - - if canMergeUnionPlans(ctx, lroute, rroute) { - lroute.Select = &sqlparser.Union{Left: lroute.Select, Distinct: false, Right: rroute.Select} - return mergeSystemTableInformation(lroute, rroute) - } - return nil -} - -func canMergeUnionPlans(ctx *plancontext.PlanningContext, a, b *routeGen4) bool { - // this method should be close to tryMerge below. it does the same thing, but on logicalPlans instead of queryTrees - if a.eroute.Keyspace.Name != b.eroute.Keyspace.Name { - return false - } - switch a.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return a.eroute.Opcode == b.eroute.Opcode - case engine.DBA: - return canSelectDBAMerge(a, b) - case engine.EqualUnique: - // Check if they target the same shard. - if b.eroute.Opcode == engine.EqualUnique && - a.eroute.Vindex == b.eroute.Vindex && - a.condition != nil && - b.condition != nil && - gen4ValuesEqual(ctx, []sqlparser.Expr{a.condition}, []sqlparser.Expr{b.condition}) { - return true - } - case engine.Scatter: - return b.eroute.Opcode == engine.Scatter - case engine.Next: - return false - } - return false -} - -func canSelectDBAMerge(a, b *routeGen4) bool { +func canSelectDBAMerge(a, b *route) bool { if a.eroute.Opcode != engine.DBA { return false } @@ -756,12 +887,12 @@ func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool { return ctx.SemTable.DirectDeps(a) == ctx.SemTable.DirectDeps(b) } - case sqlparser.Argument: - b, ok := b.(sqlparser.Argument) + case *sqlparser.Argument: + b, ok := b.(*sqlparser.Argument) if !ok { return false } - return a == b + return a.Name == b.Name case *sqlparser.Literal: b, ok := b.(*sqlparser.Literal) if !ok { @@ -785,3 +916,21 @@ func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool { } return false } + +func hexEqual(a, b *sqlparser.Literal) bool { + v, err := a.HexDecode() + if err != nil { + return false + } + switch b.Type { + case sqlparser.StrVal: + return bytes.Equal(v, b.Bytes()) + case sqlparser.HexVal: + v2, err := b.HexDecode() + if err != nil { + return false + } + return bytes.Equal(v, v2) + } + return false +} diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 93618a95675..ba8e56b4f1c 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -18,33 +18,39 @@ package operators import ( "fmt" + "slices" "sort" - "strings" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( queryBuilder struct { - ctx *plancontext.PlanningContext - sel sqlparser.SelectStatement - tableNames []string + ctx *plancontext.PlanningContext + stmt sqlparser.Statement + tableNames []string + dmlOperator ops.Operator } ) -func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.SelectStatement, error) { +func (qb *queryBuilder) asSelectStatement() sqlparser.SelectStatement { + return qb.stmt.(sqlparser.SelectStatement) +} + +func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.Statement, ops.Operator, error) { q := &queryBuilder{ctx: ctx} err := buildQuery(op, q) if err != nil { - return nil, err + return nil, nil, err + } + if ctx.SemTable != nil { + q.sortTables() } - q.sortTables() - return q.sel, nil + return q.stmt, q.dmlOperator, nil } func (qb *queryBuilder) addTable(db, tableName, alias string, tableID semantics.TableSet, hints sqlparser.IndexHints) { @@ -62,10 +68,10 @@ func (qb *queryBuilder) addTableExpr( hints sqlparser.IndexHints, columnAliases sqlparser.Columns, ) { - if qb.sel == nil { - qb.sel = &sqlparser.Select{} + if qb.stmt == nil { + qb.stmt = &sqlparser.Select{} } - sel := qb.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) elems := &sqlparser.AliasedTableExpr{ Expr: tblExpr, Partitions: nil, @@ -75,7 +81,7 @@ func (qb *queryBuilder) addTableExpr( } qb.ctx.SemTable.ReplaceTableSetFor(tableID, elems) sel.From = append(sel.From, elems) - qb.sel = sel + qb.stmt = sel qb.tableNames = append(qb.tableNames, tableName) } @@ -86,24 +92,121 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) { return } - sel := qb.sel.(*sqlparser.Select) - if sel.Where == nil { - sel.AddWhere(expr) - return + _, isSubQuery := expr.(*sqlparser.ExtractedSubquery) + var addPred func(sqlparser.Expr) + + switch stmt := qb.stmt.(type) { + case *sqlparser.Select: + if sqlparser.ContainsAggregation(expr) && !isSubQuery { + addPred = stmt.AddHaving + } else { + addPred = stmt.AddWhere + } + case *sqlparser.Update: + addPred = stmt.AddWhere + case *sqlparser.Delete: + addPred = stmt.AddWhere + default: + panic(fmt.Sprintf("cant add WHERE to %T", qb.stmt)) } + for _, exp := range sqlparser.SplitAndExpression(nil, expr) { - sel.AddWhere(exp) + addPred(exp) + } +} + +func (qb *queryBuilder) addGroupBy(original sqlparser.Expr) { + sel := qb.stmt.(*sqlparser.Select) + sel.GroupBy = append(sel.GroupBy, original) +} + +func (qb *queryBuilder) addProjection(projection *sqlparser.AliasedExpr) error { + switch stmt := qb.stmt.(type) { + case *sqlparser.Select: + stmt.SelectExprs = append(stmt.SelectExprs, projection) + return nil + case *sqlparser.Union: + switch expr := projection.Expr.(type) { + case *sqlparser.ColName: + return checkUnionColumnByName(expr, stmt) + default: + // if there is more than just column names, we'll just push the UNION + // inside a derived table and then recurse into this method again + qb.pushUnionInsideDerived() + return qb.addProjection(projection) + } + } + return vterrors.VT13001(fmt.Sprintf("unknown select statement type: %T", qb.stmt)) } -func (qb *queryBuilder) addProjection(projection *sqlparser.AliasedExpr) { - sel := qb.sel.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, projection) +func (qb *queryBuilder) pushUnionInsideDerived() { + selStmt := qb.asSelectStatement() + dt := &sqlparser.DerivedTable{ + Lateral: false, + Select: selStmt, + } + sel := &sqlparser.Select{ + From: []sqlparser.TableExpr{&sqlparser.AliasedTableExpr{ + Expr: dt, + As: sqlparser.NewIdentifierCS("dt"), + }}, + } + sel.SelectExprs = unionSelects(sqlparser.GetFirstSelect(selStmt).SelectExprs) + qb.stmt = sel +} + +func unionSelects(exprs sqlparser.SelectExprs) (selectExprs sqlparser.SelectExprs) { + for _, col := range exprs { + switch col := col.(type) { + case *sqlparser.AliasedExpr: + expr := sqlparser.NewColName(col.ColumnName()) + selectExprs = append(selectExprs, &sqlparser.AliasedExpr{Expr: expr}) + default: + selectExprs = append(selectExprs, col) + } + } + return +} + +func checkUnionColumnByName(column *sqlparser.ColName, sel sqlparser.SelectStatement) error { + colName := column.Name.String() + exprs := sqlparser.GetFirstSelect(sel).SelectExprs + offset := slices.IndexFunc(exprs, func(expr sqlparser.SelectExpr) bool { + switch ae := expr.(type) { + case *sqlparser.StarExpr: + return true + case *sqlparser.AliasedExpr: + // When accessing columns on top of a UNION, we fall back to this simple strategy of string comparisons + return ae.ColumnName() == colName + } + return false + }) + if offset == -1 { + return vterrors.VT12001(fmt.Sprintf("did not find column [%s] on UNION", sqlparser.String(column))) + } + return nil +} + +func (qb *queryBuilder) clearProjections() { + sel, isSel := qb.stmt.(*sqlparser.Select) + if !isSel { + return + } + sel.SelectExprs = nil +} + +func (qb *queryBuilder) unionWith(other *queryBuilder, distinct bool) { + qb.stmt = &sqlparser.Union{ + Left: qb.asSelectStatement(), + Right: other.asSelectStatement(), + Distinct: distinct, + } } func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.sel.(*sqlparser.Select) - otherSel := other.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) + otherSel := other.stmt.(*sqlparser.Select) sel.From = append(sel.From, otherSel.From...) sel.SelectExprs = append(sel.SelectExprs, otherSel.SelectExprs...) @@ -124,8 +227,8 @@ func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser } func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.sel.(*sqlparser.Select) - otherSel := other.sel.(*sqlparser.Select) + sel := qb.stmt.(*sqlparser.Select) + otherSel := other.stmt.(*sqlparser.Select) var lhs sqlparser.TableExpr if len(sel.From) == 1 { lhs = sel.From[0] @@ -160,31 +263,6 @@ func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser } } -func (qb *queryBuilder) rewriteExprForDerivedTable(expr sqlparser.Expr, dtName string) { - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - col, ok := node.(*sqlparser.ColName) - if !ok { - return true, nil - } - hasTable := qb.hasTable(col.Qualifier.Name.String()) - if hasTable { - col.Qualifier = sqlparser.TableName{ - Name: sqlparser.NewIdentifierCS(dtName), - } - } - return true, nil - }, expr) -} - -func (qb *queryBuilder) hasTable(tableName string) bool { - for _, name := range qb.tableNames { - if strings.EqualFold(tableName, name) { - return true - } - } - return false -} - func (qb *queryBuilder) sortTables() { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { sel, isSel := node.(*sqlparser.Select) @@ -197,7 +275,7 @@ func (qb *queryBuilder) sortTables() { } sort.Sort(ts) return true, nil - }, qb.sel) + }, qb.stmt) } @@ -232,20 +310,6 @@ func (ts *tableSorter) Swap(i, j int) { ts.sel.From[i], ts.sel.From[j] = ts.sel.From[j], ts.sel.From[i] } -func (h *Horizon) toSQL(qb *queryBuilder) error { - err := stripDownQuery(h.Select, qb.sel) - if err != nil { - return err - } - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { - removeKeyspaceFromSelectExpr(aliasedExpr) - } - return true, nil - }, qb.sel) - return nil -} - func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: @@ -269,6 +333,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error { toNode.Having = node.Having toNode.OrderBy = node.OrderBy toNode.Comments = node.Comments + toNode.Limit = node.Limit toNode.SelectExprs = node.SelectExprs for _, expr := range toNode.SelectExprs { removeKeyspaceFromSelectExpr(expr) @@ -293,90 +358,313 @@ func stripDownQuery(from, to sqlparser.SelectStatement) error { return nil } +// buildQuery recursively builds the query into an AST, from an operator tree func buildQuery(op ops.Operator, qb *queryBuilder) error { switch op := op.(type) { case *Table: - dbName := "" - - if op.QTable.IsInfSchema { - dbName = op.QTable.Table.Qualifier.String() - } - qb.addTable(dbName, op.QTable.Table.Name.String(), op.QTable.Alias.As.String(), TableID(op), op.QTable.Alias.Hints) - for _, pred := range op.QTable.Predicates { - qb.addPredicate(pred) - } - for _, name := range op.Columns { - qb.addProjection(&sqlparser.AliasedExpr{Expr: name}) - } + buildTable(op, qb) + case *Projection: + return buildProjection(op, qb) case *ApplyJoin: - err := buildQuery(op.LHS, qb) - if err != nil { - return err - } - // If we are going to add the predicate used in join here - // We should not add the predicate's copy of when it was split into - // two parts. To avoid this, we use the SkipPredicates map. - for _, expr := range qb.ctx.JoinPredicates[op.Predicate] { - qb.ctx.SkipPredicates[expr] = nil + return buildApplyJoin(op, qb) + case *Filter: + return buildFilter(op, qb) + case *Horizon: + if op.TableId != nil { + return buildDerived(op, qb) } - qbR := &queryBuilder{ctx: qb.ctx} - err = buildQuery(op.RHS, qbR) + return buildHorizon(op, qb) + case *Limit: + return buildLimit(op, qb) + case *Ordering: + return buildOrdering(op, qb) + case *Aggregator: + return buildAggregation(op, qb) + case *Union: + return buildUnion(op, qb) + case *Distinct: + err := buildQuery(op.Source, qb) if err != nil { return err } - if op.LeftJoin { - qb.joinOuterWith(qbR, op.Predicate) - } else { - qb.joinInnerWith(qbR, op.Predicate) - } - case *Filter: - err := buildQuery(op.Source, qb) + qb.asSelectStatement().MakeDistinct() + case *Update: + buildDML(op, qb) + case *Delete: + buildDML(op, qb) + case *Insert: + buildDML(op, qb) + default: + return vterrors.VT13001(fmt.Sprintf("unknown operator to convert to SQL: %T", op)) + } + return nil +} + +type OpWithAST interface { + ops.Operator + Statement() sqlparser.Statement +} + +func buildDML(op OpWithAST, qb *queryBuilder) { + qb.stmt = op.Statement() + qb.dmlOperator = op +} + +func buildAggregation(op *Aggregator, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + + qb.clearProjections() + + cols, err := op.GetColumns(qb.ctx) + if err != nil { + return err + } + for _, column := range cols { + err := qb.addProjection(column) if err != nil { return err } - for _, pred := range op.Predicates { - qb.addPredicate(pred) + } + + for _, by := range op.Grouping { + qb.addGroupBy(by.Inner) + simplified := by.SimplifiedExpr + if by.WSOffset != -1 { + qb.addGroupBy(weightStringFor(simplified)) } - case *Derived: - err := buildQuery(op.Source, qb) + } + + return nil +} + +func buildOrdering(op *Ordering, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + + for _, order := range op.Order { + qb.asSelectStatement().AddOrder(order.Inner) + } + return nil +} + +func buildLimit(op *Limit, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + qb.asSelectStatement().SetLimit(op.AST) + return nil +} + +func buildTable(op *Table, qb *queryBuilder) { + dbName := "" + + if op.QTable.IsInfSchema { + dbName = op.QTable.Table.Qualifier.String() + } + qb.addTable(dbName, op.QTable.Table.Name.String(), op.QTable.Alias.As.String(), TableID(op), op.QTable.Alias.Hints) + for _, pred := range op.QTable.Predicates { + qb.addPredicate(pred) + } + for _, name := range op.Columns { + err := qb.addProjection(&sqlparser.AliasedExpr{Expr: name}) if err != nil { - return err + return + } + } +} + +func buildProjection(op *Projection, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + + _, isSel := qb.stmt.(*sqlparser.Select) + if isSel { + qb.clearProjections() + + for _, column := range op.Columns { + err := qb.addProjection(column) + if err != nil { + return err + } } - sel := qb.sel.(*sqlparser.Select) // we can only handle SELECT in derived tables at the moment - qb.sel = nil - sqlparser.RemoveKeyspace(op.Query) - opQuery := op.Query.(*sqlparser.Select) - sel.Limit = opQuery.Limit - sel.OrderBy = opQuery.OrderBy - sel.GroupBy = opQuery.GroupBy - sel.Having = opQuery.Having - sel.SelectExprs = opQuery.SelectExprs + } + + // if the projection is on derived table, we use the select we have + // created above and transform it into a derived table + if op.TableID != nil { + sel := qb.asSelectStatement() + qb.stmt = nil qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ Select: sel, - }, nil, op.ColumnAliases) - for _, col := range op.Columns { - qb.addProjection(&sqlparser.AliasedExpr{Expr: col}) + }, nil, nil) + } + + if !isSel { + for _, column := range op.Columns { + err := qb.addProjection(column) + if err != nil { + return err + } } - case *Horizon: - err := buildQuery(op.Source, qb) + } + + return nil +} + +func buildApplyJoin(op *ApplyJoin, qb *queryBuilder) error { + err := buildQuery(op.LHS, qb) + if err != nil { + return err + } + // If we are going to add the predicate used in join here + // We should not add the predicate's copy of when it was split into + // two parts. To avoid this, we use the SkipPredicates map. + for _, expr := range qb.ctx.JoinPredicates[op.Predicate] { + qb.ctx.SkipPredicates[expr] = nil + } + qbR := &queryBuilder{ctx: qb.ctx} + err = buildQuery(op.RHS, qbR) + if err != nil { + return err + } + if op.LeftJoin { + qb.joinOuterWith(qbR, op.Predicate) + } else { + qb.joinInnerWith(qbR, op.Predicate) + } + return nil +} + +func buildUnion(op *Union, qb *queryBuilder) error { + // the first input is built first + err := buildQuery(op.Sources[0], qb) + if err != nil { + return err + } + + for i, src := range op.Sources { + if i == 0 { + continue + } + + // now we can go over the remaining inputs and UNION them together + qbOther := &queryBuilder{ctx: qb.ctx} + err = buildQuery(src, qbOther) if err != nil { return err } + qb.unionWith(qbOther, op.distinct) + } + + return nil +} + +func buildFilter(op *Filter, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + for _, pred := range op.Predicates { + qb.addPredicate(pred) + } + return nil +} + +func buildDerived(op *Horizon, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err + } + sqlparser.RemoveKeyspace(op.Query) + + stmt := qb.stmt + qb.stmt = nil + switch sel := stmt.(type) { + case *sqlparser.Select: + return buildDerivedSelect(op, qb, sel) + case *sqlparser.Union: + return buildDerivedUnion(op, qb, sel) + } + panic(fmt.Sprintf("unknown select statement type: %T", stmt)) +} + +func buildDerivedUnion(op *Horizon, qb *queryBuilder, union *sqlparser.Union) error { + opQuery, ok := op.Query.(*sqlparser.Union) + if !ok { + return vterrors.VT12001("Horizon contained SELECT but statement was UNION") + } + + union.Limit = opQuery.Limit + union.OrderBy = opQuery.OrderBy + union.Distinct = opQuery.Distinct + + qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ + Select: union, + }, nil, op.ColumnAliases) + + return nil +} - err = stripDownQuery(op.Select, qb.sel) +func buildDerivedSelect(op *Horizon, qb *queryBuilder, sel *sqlparser.Select) error { + opQuery, ok := op.Query.(*sqlparser.Select) + if !ok { + return vterrors.VT12001("Horizon contained UNION but statement was SELECT") + } + sel.Limit = opQuery.Limit + sel.OrderBy = opQuery.OrderBy + sel.GroupBy = opQuery.GroupBy + sel.Having = mergeHaving(sel.Having, opQuery.Having) + sel.SelectExprs = opQuery.SelectExprs + qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ + Select: sel, + }, nil, op.ColumnAliases) + for _, col := range op.Columns { + err := qb.addProjection(&sqlparser.AliasedExpr{Expr: col}) if err != nil { return err } - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { - removeKeyspaceFromSelectExpr(aliasedExpr) - } - return true, nil - }, qb.sel) - return nil + } + return nil - default: - return vterrors.VT13001(fmt.Sprintf("do not know how to turn %T into SQL", op)) +} + +func buildHorizon(op *Horizon, qb *queryBuilder) error { + err := buildQuery(op.Source, qb) + if err != nil { + return err } + + err = stripDownQuery(op.Query, qb.asSelectStatement()) + if err != nil { + return err + } + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { + removeKeyspaceFromSelectExpr(aliasedExpr) + } + return true, nil + }, qb.stmt) return nil } + +func mergeHaving(h1, h2 *sqlparser.Where) *sqlparser.Where { + switch { + case h1 == nil && h2 == nil: + return nil + case h1 == nil: + return h2 + case h2 == nil: + return h1 + default: + h1.Expr = sqlparser.AndExpressions(h1.Expr, h2.Expr) + return h1 + } +} diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go new file mode 100644 index 00000000000..0d085f2e718 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go @@ -0,0 +1,743 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +func tryPushingDownAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { + if aggregator.Pushed { + return aggregator, rewrite.SameTree, nil + } + switch src := aggregator.Source.(type) { + case *Route: + // if we have a single sharded route, we can push it down + output, applyResult, err = pushDownAggregationThroughRoute(ctx, aggregator, src) + case *ApplyJoin: + if ctx.DelegateAggregation { + output, applyResult, err = pushDownAggregationThroughJoin(ctx, aggregator, src) + } + case *Filter: + if ctx.DelegateAggregation { + output, applyResult, err = pushDownAggregationThroughFilter(ctx, aggregator, src) + } + default: + return aggregator, rewrite.SameTree, nil + } + + if err != nil { + return nil, nil, err + } + + if output == nil { + return aggregator, rewrite.SameTree, nil + } + + aggregator.Pushed = true + + return +} + +func (a *Aggregator) aggregateTheAggregates() { + for i := range a.Aggregations { + aggregateTheAggregate(a, i) + } +} + +func aggregateTheAggregate(a *Aggregator, i int) { + aggr := a.Aggregations[i] + switch aggr.OpCode { + case opcode.AggregateCount, opcode.AggregateCountStar, opcode.AggregateCountDistinct, opcode.AggregateSumDistinct: + // All count variations turn into SUM above the Route. This is also applied for Sum distinct when it is pushed down. + // Think of it as we are SUMming together a bunch of distributed COUNTs. + aggr.OriginalOpCode, aggr.OpCode = aggr.OpCode, opcode.AggregateSum + a.Aggregations[i] = aggr + } +} + +func pushDownAggregationThroughRoute( + ctx *plancontext.PlanningContext, + aggregator *Aggregator, + route *Route, +) (ops.Operator, *rewrite.ApplyResult, error) { + // If the route is single-shard, or we are grouping by sharding keys, we can just push down the aggregation + if route.IsSingleShard() || overlappingUniqueVindex(ctx, aggregator.Grouping) { + return rewrite.Swap(aggregator, route, "push down aggregation under route - remove original") + } + + if !ctx.DelegateAggregation { + return nil, nil, nil + } + + // Create a new aggregator to be placed below the route. + aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs()) + aggrBelowRoute.Aggregations = nil + + err := pushDownAggregations(ctx, aggregator, aggrBelowRoute) + if err != nil { + return nil, nil, err + } + + // Set the source of the route to the new aggregator placed below the route. + route.Source = aggrBelowRoute + + if !aggregator.Original { + // we only keep the root aggregation, if this aggregator was created + // by splitting one and pushing under a join, we can get rid of this one + return aggregator.Source, rewrite.NewTree("push aggregation under route - remove original", aggregator), nil + } + + return aggregator, rewrite.NewTree("push aggregation under route - keep original", aggregator), nil +} + +// pushDownAggregations splits aggregations between the original aggregator and the one we are pushing down +func pushDownAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) error { + canPushDownDistinctAggr, distinctExpr, err := checkIfWeCanPushDown(ctx, aggregator) + if err != nil { + return err + } + + distinctAggrGroupByAdded := false + + for i, aggr := range aggregator.Aggregations { + if !aggr.Distinct || canPushDownDistinctAggr { + aggrBelowRoute.Aggregations = append(aggrBelowRoute.Aggregations, aggr) + aggregateTheAggregate(aggregator, i) + continue + } + + // We handle a distinct aggregation by turning it into a group by and + // doing the aggregating on the vtgate level instead + aeDistinctExpr := aeWrap(distinctExpr) + aggrBelowRoute.Columns[aggr.ColOffset] = aeDistinctExpr + + // We handle a distinct aggregation by turning it into a group by and + // doing the aggregating on the vtgate level instead + // Adding to group by can be done only once even though there are multiple distinct aggregation with same expression. + if !distinctAggrGroupByAdded { + groupBy := NewGroupBy(distinctExpr, distinctExpr, aeDistinctExpr) + groupBy.ColOffset = aggr.ColOffset + aggrBelowRoute.Grouping = append(aggrBelowRoute.Grouping, groupBy) + distinctAggrGroupByAdded = true + } + } + + if !canPushDownDistinctAggr { + aggregator.DistinctExpr = distinctExpr + } + + return nil +} + +func checkIfWeCanPushDown(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr, error) { + canPushDown := true + var distinctExpr sqlparser.Expr + var differentExpr *sqlparser.AliasedExpr + + for _, aggr := range aggregator.Aggregations { + if !aggr.Distinct { + continue + } + + innerExpr := aggr.Func.GetArg() + if !exprHasUniqueVindex(ctx, innerExpr) { + canPushDown = false + } + if distinctExpr == nil { + distinctExpr = innerExpr + } + if !ctx.SemTable.EqualsExpr(distinctExpr, innerExpr) { + differentExpr = aggr.Original + } + } + + if !canPushDown && differentExpr != nil { + return false, nil, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr))) + } + + return canPushDown, distinctExpr, nil +} + +func pushDownAggregationThroughFilter( + ctx *plancontext.PlanningContext, + aggregator *Aggregator, + filter *Filter, +) (ops.Operator, *rewrite.ApplyResult, error) { + + columnsNeeded := collectColNamesNeeded(ctx, filter) + + // Create a new aggregator to be placed below the route. + pushedAggr := aggregator.Clone([]ops.Operator{filter.Source}).(*Aggregator) + pushedAggr.Pushed = false + pushedAggr.Original = false + +withNextColumn: + for _, col := range columnsNeeded { + for _, gb := range pushedAggr.Grouping { + if ctx.SemTable.EqualsExpr(col, gb.SimplifiedExpr) { + continue withNextColumn + } + } + pushedAggr.addColumnWithoutPushing(aeWrap(col), true) + } + + // Set the source of the filter to the new aggregator placed below the route. + filter.Source = pushedAggr + + if !aggregator.Original { + // we only keep the root aggregation, if this aggregator was created + // by splitting one and pushing under a join, we can get rid of this one + return aggregator.Source, rewrite.NewTree("push aggregation under filter - remove original", aggregator), nil + } + aggregator.aggregateTheAggregates() + return aggregator, rewrite.NewTree("push aggregation under filter - keep original", aggregator), nil +} + +func collectColNamesNeeded(ctx *plancontext.PlanningContext, f *Filter) (columnsNeeded []*sqlparser.ColName) { + for _, p := range f.Predicates { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + col, ok := node.(*sqlparser.ColName) + if !ok { + return true, nil + } + for _, existing := range columnsNeeded { + if ctx.SemTable.EqualsExpr(col, existing) { + return true, nil + } + } + columnsNeeded = append(columnsNeeded, col) + return true, nil + }, p) + } + return +} + +func overlappingUniqueVindex(ctx *plancontext.PlanningContext, groupByExprs []GroupBy) bool { + for _, groupByExpr := range groupByExprs { + if exprHasUniqueVindex(ctx, groupByExpr.SimplifiedExpr) { + return true + } + } + return false +} + +func exprHasUniqueVindex(ctx *plancontext.PlanningContext, expr sqlparser.Expr) bool { + return exprHasVindex(ctx, expr, true) +} + +func exprHasVindex(ctx *plancontext.PlanningContext, expr sqlparser.Expr, hasToBeUnique bool) bool { + col, isCol := expr.(*sqlparser.ColName) + if !isCol { + return false + } + ts := ctx.SemTable.RecursiveDeps(expr) + tableInfo, err := ctx.SemTable.TableInfoFor(ts) + if err != nil { + return false + } + vschemaTable := tableInfo.GetVindexTable() + for _, vindex := range vschemaTable.ColumnVindexes { + // TODO: Support composite vindexes (multicol, etc). + if len(vindex.Columns) > 1 || hasToBeUnique && !vindex.IsUnique() { + return false + } + if col.Name.Equal(vindex.Columns[0]) { + return true + } + } + return false +} + +/* +We push down aggregations using the logic from the paper Orthogonal Optimization of Subqueries and Aggregation, by +Cesar A. Galindo-Legaria and Milind M. Joshi from Microsoft Corp. + +It explains how one can split an aggregation into local aggregates that depend on only one side of the join. +The local aggregates can then be gathered together to produce the global +group by/aggregate query that the user asked for. + +In Vitess, this is particularly useful because it allows us to push aggregation down to the routes, even when +we have to join the results at the vtgate level. Instead of doing all the grouping and aggregation at the +vtgate level, we can offload most of the work to MySQL, and at the vtgate just summarize the results. + +# For a query, such as + +select count(*) from R1 JOIN R2 on R1.id = R2.id + +Original: + + GB <- This is the original grouping, doing count(*) + | + JOIN + / \ + R1 R2 + +Transformed: + + rootAggr <- This grouping is now SUMing together the distributed `count(*)` we got back + | + Proj <- This projection makes sure that the columns are lined up as expected + | + Sort <- Here we are sorting the input so that the OrderedAggregate can do its thing + | + JOIN + / \ + lAggr rAggr + / \ + R1 R2 +*/ +func pushDownAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + lhs := &joinPusher{ + orig: rootAggr, + pushed: &Aggregator{ + Source: join.LHS, + QP: rootAggr.QP, + }, + columns: initColReUse(len(rootAggr.Columns)), + tableID: TableID(join.LHS), + } + rhs := &joinPusher{ + orig: rootAggr, + pushed: &Aggregator{ + Source: join.RHS, + QP: rootAggr.QP, + }, + columns: initColReUse(len(rootAggr.Columns)), + tableID: TableID(join.RHS), + } + + joinColumns, output, err := splitAggrColumnsToLeftAndRight(ctx, rootAggr, join, lhs, rhs) + if err != nil { + // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query + if err == errAbortAggrPushing { + return nil, nil, nil + } + return nil, nil, err + } + + groupingJCs, err := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) + if err != nil { + return nil, nil, err + } + joinColumns = append(joinColumns, groupingJCs...) + + // We need to add any columns coming from the lhs of the join to the group by on that side + // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS + err = addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) + if err != nil { + return nil, nil, err + } + + join.LHS, join.RHS = lhs.pushed, rhs.pushed + join.JoinColumns = joinColumns + + if !rootAggr.Original { + // we only keep the root aggregation, if this aggregator was created + // by splitting one and pushing under a join, we can get rid of this one + return output, rewrite.NewTree("push Aggregation under join - keep original", rootAggr), nil + } + + rootAggr.aggregateTheAggregates() + rootAggr.Source = output + return rootAggr, rewrite.NewTree("push Aggregation under join", rootAggr), nil +} + +var errAbortAggrPushing = fmt.Errorf("abort aggregation pushing") + +func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) error { + for _, pred := range join.JoinPredicates { + for _, expr := range pred.LHSExprs { + wexpr := rootAggr.QP.GetSimplifiedExpr(expr) + idx, found := canReuseColumn(ctx, lhs.pushed.Columns, expr, extractExpr) + if !found { + idx = len(lhs.pushed.Columns) + lhs.pushed.Columns = append(lhs.pushed.Columns, aeWrap(expr)) + } + _, found = canReuseColumn(ctx, lhs.pushed.Grouping, wexpr, func(by GroupBy) sqlparser.Expr { + return by.SimplifiedExpr + }) + + if found { + continue + } + + lhs.pushed.Grouping = append(lhs.pushed.Grouping, GroupBy{ + Inner: expr, + SimplifiedExpr: wexpr, + ColOffset: idx, + WSOffset: -1, + }) + } + } + return nil +} + +func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) ([]JoinColumn, error) { + var groupingJCs []JoinColumn + + for _, groupBy := range rootAggr.Grouping { + deps := ctx.SemTable.RecursiveDeps(groupBy.Inner) + expr := groupBy.Inner + switch { + case deps.IsSolvedBy(lhs.tableID): + lhs.addGrouping(ctx, groupBy) + groupingJCs = append(groupingJCs, JoinColumn{ + Original: aeWrap(groupBy.Inner), + LHSExprs: []sqlparser.Expr{expr}, + }) + case deps.IsSolvedBy(rhs.tableID): + rhs.addGrouping(ctx, groupBy) + groupingJCs = append(groupingJCs, JoinColumn{ + Original: aeWrap(groupBy.Inner), + RHSExpr: expr, + }) + case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): + jc, err := BreakExpressionInLHSandRHS(ctx, groupBy.SimplifiedExpr, lhs.tableID) + if err != nil { + return nil, err + } + for _, lhsExpr := range jc.LHSExprs { + lhs.addGrouping(ctx, NewGroupBy(lhsExpr, lhsExpr, aeWrap(lhsExpr))) + } + rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr, aeWrap(jc.RHSExpr))) + default: + return nil, vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr)) + } + } + return groupingJCs, nil +} + +// splitAggrColumnsToLeftAndRight pushes all aggregations on the aggregator above a join and +// pushes them to one or both sides of the join, and also provides the projections needed to re-assemble the +// aggregations that have been spread across the join +func splitAggrColumnsToLeftAndRight( + ctx *plancontext.PlanningContext, + aggregator *Aggregator, + join *ApplyJoin, + lhs, rhs *joinPusher, +) ([]JoinColumn, ops.Operator, error) { + builder := &aggBuilder{ + lhs: lhs, + rhs: rhs, + proj: &Projection{Source: join, FromAggr: true}, + outerJoin: join.LeftJoin, + } + + canPushDownDistinctAggr, distinctExpr, err := checkIfWeCanPushDown(ctx, aggregator) + if err != nil { + return nil, nil, err + } + + // Distinct aggregation cannot be pushed down in the join. + // We keep node of the distinct aggregation expression to be used later for ordering. + if !canPushDownDistinctAggr { + aggregator.DistinctExpr = distinctExpr + return nil, nil, errAbortAggrPushing + } + +outer: + // we prefer adding the aggregations in the same order as the columns are declared + for colIdx, col := range aggregator.Columns { + for _, aggr := range aggregator.Aggregations { + if aggr.ColOffset == colIdx { + err := builder.handleAggr(ctx, aggr) + if err != nil { + return nil, nil, err + } + continue outer + } + } + builder.proj.addUnexploredExpr(col, col.Expr) + } + return builder.joinColumns, builder.proj, nil +} + +type ( + // aggBuilder is a helper struct that aids in pushing down an Aggregator through a join + // it accumulates the projections (if any) that need to be evaluated on top of the join + aggBuilder struct { + lhs, rhs *joinPusher + joinColumns []JoinColumn + proj *Projection + outerJoin bool + } + // joinPusher is a helper struct that aids in pushing down an Aggregator into one side of a Join. + // It creates a new Aggregator that is pushed down and keeps track of the column dependencies that the new Aggregator has. + joinPusher struct { + orig *Aggregator // The original Aggregator before pushing. + pushed *Aggregator // The new Aggregator created for push-down. + columns []int // List of column offsets used in the new Aggregator. + tableID semantics.TableSet // The TableSet denoting the side of the Join where the new Aggregator is pushed. + + // csAE keeps the copy of the countStar expression that has already been added to split an aggregation. + // No need to have multiple countStars, so we cache it here + csAE *sqlparser.AliasedExpr + } +) + +func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { + ae, created := ab.lhs.countStar(ctx) + if created { + ab.joinColumns = append(ab.joinColumns, JoinColumn{ + Original: ae, + LHSExprs: []sqlparser.Expr{ae.Expr}, + }) + } + return ae +} + +func (ab *aggBuilder) rightCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { + ae, created := ab.rhs.countStar(ctx) + if created { + ab.joinColumns = append(ab.joinColumns, JoinColumn{ + Original: ae, + RHSExpr: ae.Expr, + }) + } + return ae +} + +func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.AliasedExpr, bool) { + if p.csAE != nil { + return p.csAE, false + } + cs := &sqlparser.CountStar{} + ae := aeWrap(cs) + csAggr := NewAggr(opcode.AggregateCountStar, cs, ae, "") + expr := p.addAggr(ctx, csAggr) + p.csAE = aeWrap(expr) + return p.csAE, true +} + +func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { + switch aggr.OpCode { + case opcode.AggregateCountStar: + ab.handleCountStar(ctx, aggr) + return nil + case opcode.AggregateCount, opcode.AggregateSum: + return ab.handleAggrWithCountStarMultiplier(ctx, aggr) + case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: + return ab.handlePushThroughAggregation(ctx, aggr) + case opcode.AggregateGroupConcat: + f := aggr.Func.(*sqlparser.GroupConcatExpr) + if f.Distinct || len(f.OrderBy) > 0 || f.Separator != "" { + panic("fail here") + } + // this needs special handling, currently aborting the push of function + // and later will try pushing the column instead. + // TODO: this should be handled better by pushing the function down. + return errAbortAggrPushing + case opcode.AggregateUnassigned: + return vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) + case opcode.AggregateGtid: + // this is only used for SHOW GTID queries that will never contain joins + return vterrors.VT13001("cannot do join with vgtid") + case opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: + // we are not going to see values multiple times, so we don't need to multiply with the count(*) from the other side + return ab.handlePushThroughAggregation(ctx, aggr) + default: + return vterrors.VT12001(fmt.Sprintf("aggregation not planned: %s", aggr.OpCode.String())) + } +} + +// pushThroughLeft and Right are used for extremums and random, +// which are not split and then arithmetics is used to aggregate the per-shard aggregations. +// For these, we just copy the aggregation to one side of the join and then pick the max of the max:es returned +func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { + ab.lhs.pushThroughAggr(aggr) + ab.joinColumns = append(ab.joinColumns, JoinColumn{ + Original: aggr.Original, + LHSExprs: []sqlparser.Expr{aggr.Original.Expr}, + }) +} +func (ab *aggBuilder) pushThroughRight(aggr Aggr) { + ab.rhs.pushThroughAggr(aggr) + ab.joinColumns = append(ab.joinColumns, JoinColumn{ + Original: aggr.Original, + RHSExpr: aggr.Original.Expr, + }) +} + +func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { + ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) + + deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) + switch { + case deps.IsSolvedBy(ab.lhs.tableID): + ab.pushThroughLeft(aggr) + case deps.IsSolvedBy(ab.rhs.tableID): + ab.pushThroughRight(aggr) + default: + return errAbortAggrPushing + } + return nil +} + +func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) { + // Add the aggregate to both sides of the join. + lhsAE := ab.leftCountStar(ctx) + rhsAE := ab.rightCountStar(ctx) + + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) +} + +func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { + var lhsAE, rhsAE *sqlparser.AliasedExpr + var addCoalesce bool + + deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) + switch { + case deps.IsSolvedBy(ab.lhs.tableID): + ab.pushThroughLeft(aggr) + lhsAE = aggr.Original + rhsAE = ab.rightCountStar(ctx) + if ab.outerJoin { + addCoalesce = true + } + + case deps.IsSolvedBy(ab.rhs.tableID): + ab.pushThroughRight(aggr) + lhsAE = ab.leftCountStar(ctx) + rhsAE = aggr.Original + + default: + return errAbortAggrPushing + } + + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + return nil +} + +func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) { + // We expect the expressions to be different on each side of the join, otherwise it's an error. + if lhsAE.Expr == rhsAE.Expr { + panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) + } + + rhsExpr := rhsAE.Expr + + // When dealing with outer joins, we don't want null values from the RHS to ruin the calculations we are doing, + // so we use the MySQL `coalesce` after the join is applied to multiply the count from LHS with 1. + if ab.outerJoin && coalesce { + rhsExpr = coalesceFunc(rhsExpr) + } + + // The final COUNT is obtained by multiplying the counts from both sides. + // This is equivalent to transforming a "select count(*) from t1 join t2" into + // "select count_t1*count_t2 from + // (select count(*) as count_t1 from t1) as x, + // (select count(*) as count_t2 from t2) as y". + projExpr := &sqlparser.BinaryExpr{ + Operator: sqlparser.MultOp, + Left: lhsAE.Expr, + Right: rhsExpr, + } + projAE := &sqlparser.AliasedExpr{ + Expr: aggr.Original.Expr, + As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), + } + + ab.proj.addUnexploredExpr(projAE, projExpr) +} + +func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { + // `coalesce(e,1)` will return `e` if `e` is not `NULL`, otherwise it will return `1` + return &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("coalesce"), + Exprs: sqlparser.SelectExprs{ + aeWrap(e), + aeWrap(sqlparser.NewIntLiteral("1")), + }, + } +} + +// addAggr creates a copy of the given aggregation, updates its column offset to point to the correct location in the new Aggregator, +// and adds it to the list of Aggregations of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. +// It returns the expression of the aggregation as it should be used in the parent Aggregator. +func (p *joinPusher) addAggr(ctx *plancontext.PlanningContext, aggr Aggr) sqlparser.Expr { + copyAggr := aggr + expr := sqlparser.CloneExpr(aggr.Original.Expr) + copyAggr.Original = aeWrap(expr) + // copy dependencies so we can keep track of which side expressions need to be pushed to + ctx.SemTable.Direct[expr] = p.tableID + ctx.SemTable.Recursive[expr] = p.tableID + copyAggr.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, copyAggr.Original) + p.pushed.Aggregations = append(p.pushed.Aggregations, copyAggr) + return expr +} + +// pushThroughAggr pushes through an aggregation without changing dependencies. +// Can be used for aggregations we can push in one piece +func (p *joinPusher) pushThroughAggr(aggr Aggr) { + newAggr := NewAggr(aggr.OpCode, aggr.Func, aggr.Original, aggr.Alias) + newAggr.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, newAggr.Original) + p.pushed.Aggregations = append(p.pushed.Aggregations, newAggr) +} + +// addGrouping creates a copy of the given GroupBy, updates its column offset to point to the correct location in the new Aggregator, +// and adds it to the list of GroupBy expressions of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. +// It returns the expression of the GroupBy as it should be used in the parent Aggregator. +func (p *joinPusher) addGrouping(ctx *plancontext.PlanningContext, gb GroupBy) sqlparser.Expr { + copyGB := gb + expr := sqlparser.CloneExpr(gb.Inner) + // copy dependencies so we can keep track of which side expressions need to be pushed to + ctx.SemTable.CopyDependencies(gb.Inner, expr) + // if the column exists in the selection then copy it down to the pushed aggregator operator. + if copyGB.ColOffset != -1 { + offset := p.useColumn(copyGB.ColOffset) + copyGB.ColOffset = offset + } else { + copyGB.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, aeWrap(copyGB.Inner)) + } + p.pushed.Grouping = append(p.pushed.Grouping, copyGB) + return expr +} + +// useColumn checks whether the column corresponding to the given offset has been used in the new Aggregator. +// If it has not been used before, it adds the column to the new Aggregator +// and updates the columns mapping to reflect the new location of the column. +// It returns the offset of the column in the new Aggregator. +func (p *joinPusher) useColumn(offset int) int { + if p.columns[offset] == -1 { + p.columns[offset] = len(p.pushed.Columns) + // still haven't used this expression on this side + p.pushed.Columns = append(p.pushed.Columns, p.orig.Columns[offset]) + } + return p.columns[offset] +} + +func initColReUse(size int) []int { + cols := make([]int, size) + for i := 0; i < size; i++ { + cols[i] = -1 + } + return cols +} + +func extractExpr(expr *sqlparser.AliasedExpr) sqlparser.Expr { return expr.Expr } diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go new file mode 100644 index 00000000000..b7c6e4a87d2 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -0,0 +1,497 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "slices" + "strings" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + // Aggregator represents a GroupBy γ relational operator. + // Both all aggregations and no grouping, and the inverse + // of all grouping and no aggregations are valid configurations of this operator + Aggregator struct { + Source ops.Operator + Columns []*sqlparser.AliasedExpr + + Grouping []GroupBy + Aggregations []Aggr + + // We support a single distinct aggregation per aggregator. It is stored here. + // When planning the ordering that the OrderedAggregate will require, + // this needs to be the last ORDER BY expression + DistinctExpr sqlparser.Expr + + // Pushed will be set to true once this aggregation has been pushed deeper in the tree + Pushed bool + offsetPlanned bool + + // Original will only be true for the original aggregator created from the AST + Original bool + ResultColumns int + + QP *QueryProjection + // TableID will be non-nil for derived tables + TableID *semantics.TableSet + Alias string + } +) + +func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { + kopy := *a + kopy.Source = inputs[0] + kopy.Columns = slices.Clone(a.Columns) + kopy.Grouping = slices.Clone(a.Grouping) + kopy.Aggregations = slices.Clone(a.Aggregations) + return &kopy +} + +func (a *Aggregator) Inputs() []ops.Operator { + return []ops.Operator{a.Source} +} + +func (a *Aggregator) SetInputs(operators []ops.Operator) { + if len(operators) != 1 { + panic(fmt.Sprintf("unexpected number of operators as input in aggregator: %d", len(operators))) + } + a.Source = operators[0] +} + +func (a *Aggregator) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newOp, err := a.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + a.Source = newOp + return a, nil +} + +func (a *Aggregator) addColumnWithoutPushing(expr *sqlparser.AliasedExpr, addToGroupBy bool) int { + offset := len(a.Columns) + a.Columns = append(a.Columns, expr) + + if addToGroupBy { + groupBy := NewGroupBy(expr.Expr, expr.Expr, expr) + groupBy.ColOffset = offset + a.Grouping = append(a.Grouping, groupBy) + } else { + var aggr Aggr + switch e := expr.Expr.(type) { + case sqlparser.AggrFunc: + aggr = createAggrFromAggrFunc(e, expr) + default: + aggr = NewAggr(opcode.AggregateAnyValue, nil, expr, expr.As.String()) + } + aggr.ColOffset = offset + a.Aggregations = append(a.Aggregations, aggr) + } + return offset +} + +func (a *Aggregator) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, groupby []bool, expr []*sqlparser.AliasedExpr) (offsets []int) { + for i, ae := range expr { + offsets = append(offsets, a.addColumnWithoutPushing(ae, groupby[i])) + } + return +} + +func (a *Aggregator) isDerived() bool { + return a.TableID != nil +} + +func (a *Aggregator) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + if a.isDerived() { + derivedTBL, err := ctx.SemTable.TableInfoFor(*a.TableID) + if err != nil { + return 0, err + } + expr = semantics.RewriteDerivedTableExpression(expr, derivedTBL) + } + if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { + return offset, nil + } + return -1, nil +} + +func (a *Aggregator) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + + var groupBys []bool + var exprsNeeded []*sqlparser.AliasedExpr + var offsetExpected []int + + for i, expr := range exprs { + addToGroupBy := addToGroupBy[i] + + if reuse { + offset, err := a.findColInternal(ctx, expr, addToGroupBy) + if err != nil { + return nil, err + } + if offset >= 0 { + offsets[i] = offset + continue + } + } + + // If weight string function is received from above operator. Then check if we have a group on the expression used. + // If it is found, then continue to push it down but with addToGroupBy true so that is the added to group by sql down in the AddColumn. + // This also set the weight string column offset so that we would not need to add it later in aggregator operator planOffset. + if wsExpr, isWS := expr.Expr.(*sqlparser.WeightStringFuncExpr); isWS { + idx := slices.IndexFunc(a.Grouping, func(by GroupBy) bool { + return ctx.SemTable.EqualsExprWithDeps(wsExpr.Expr, by.SimplifiedExpr) + }) + if idx >= 0 { + a.Grouping[idx].WSOffset = len(a.Columns) + addToGroupBy = true + } + } + + if !addToGroupBy { + aggr := NewAggr(opcode.AggregateAnyValue, nil, expr, expr.As.String()) + aggr.ColOffset = len(a.Columns) + a.Aggregations = append(a.Aggregations, aggr) + } + + offsets[i] = len(a.Columns) + a.Columns = append(a.Columns, expr) + groupBys = append(groupBys, addToGroupBy) + exprsNeeded = append(exprsNeeded, expr) + offsetExpected = append(offsetExpected, offsets[i]) + } + + incomingOffsets, err := a.Source.AddColumns(ctx, false, groupBys, exprsNeeded) + if err != nil { + return nil, err + } + + for i, offset := range offsetExpected { + if offset != incomingOffsets[i] { + return nil, errFailedToPlan(exprsNeeded[i]) + } + } + + return offsets, nil +} + +func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offset, err := a.FindCol(ctx, expr.Expr, false) + if err != nil { + return 0, err + } + if offset >= 0 { + return offset, err + } + if a.isDerived() { + derivedTBL, err := ctx.SemTable.TableInfoFor(*a.TableID) + if err != nil { + return 0, err + } + expr.Expr = semantics.RewriteDerivedTableExpression(expr.Expr, derivedTBL) + } + + // Aggregator is little special and cannot work if the input offset are not matched with the aggregation columns. + // So, before pushing anything from above the aggregator offset planning needs to be completed. + err = a.planOffsets(ctx) + if err != nil { + return 0, err + } + + if offset, found := canReuseColumn(ctx, a.Columns, expr.Expr, extractExpr); found { + return offset, nil + } + colName, isColName := expr.Expr.(*sqlparser.ColName) + for i, col := range a.Columns { + if isColName && colName.Name.EqualString(col.As.String()) { + return i, nil + } + } + + if addToGroupBy { + return 0, vterrors.VT13001("did not expect to add group by here") + } + + return -1, nil +} + +func (a *Aggregator) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + if _, isSourceDerived := a.Source.(*Horizon); isSourceDerived { + return a.Columns, nil + } + + // we update the incoming columns, so we know about any new columns that have been added + // in the optimization phase, other operators could be pushed down resulting in additional columns for aggregator. + // Aggregator should be made aware of these to truncate them in final result. + columns, err := a.Source.GetColumns(ctx) + if err != nil { + return nil, err + } + + // if this operator is producing more columns than expected, we want to know about it + if len(columns) > len(a.Columns) { + a.Columns = append(a.Columns, columns[len(a.Columns):]...) + } + + return a.Columns, nil +} + +func (a *Aggregator) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, a) +} + +func (a *Aggregator) ShortDescription() string { + columns := slice.Map(a.Columns, func(from *sqlparser.AliasedExpr) string { + return sqlparser.String(from) + }) + if a.Alias != "" { + columns = append([]string{"derived[" + a.Alias + "]"}, columns...) + } + + org := "" + if a.Original { + org = "ORG " + } + + if len(a.Grouping) == 0 { + return fmt.Sprintf("%s%s", org, strings.Join(columns, ", ")) + } + + var grouping []string + for _, gb := range a.Grouping { + grouping = append(grouping, sqlparser.String(gb.SimplifiedExpr)) + } + + return fmt.Sprintf("%s%s group by %s", org, strings.Join(columns, ", "), strings.Join(grouping, ",")) +} + +func (a *Aggregator) GetOrdering() ([]ops.OrderBy, error) { + return a.Source.GetOrdering() +} + +func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) error { + if a.offsetPlanned { + return nil + } + defer func() { + a.offsetPlanned = true + }() + if !a.Pushed { + return a.planOffsetsNotPushed(ctx) + } + + for idx, gb := range a.Grouping { + if gb.ColOffset == -1 { + offset, err := a.internalAddColumn(ctx, aeWrap(gb.Inner), false) + if err != nil { + return err + } + a.Grouping[idx].ColOffset = offset + } + if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { + continue + } + + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), true) + if err != nil { + return err + } + a.Grouping[idx].WSOffset = offset + } + + for idx, aggr := range a.Aggregations { + if !aggr.NeedsWeightString(ctx) { + continue + } + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), true) + if err != nil { + return err + } + a.Aggregations[idx].WSOffset = offset + } + + return nil +} + +func (aggr Aggr) getPushDownColumn() sqlparser.Expr { + switch aggr.OpCode { + case opcode.AggregateAnyValue: + return aggr.Original.Expr + case opcode.AggregateCountStar: + return sqlparser.NewIntLiteral("1") + case opcode.AggregateGroupConcat: + if len(aggr.Func.GetArgs()) > 1 { + panic("more than 1 column") + } + fallthrough + default: + return aggr.Func.GetArg() + } +} + +func (a *Aggregator) planOffsetsNotPushed(ctx *plancontext.PlanningContext) error { + a.Source = &Projection{Source: a.Source} + // we need to keep things in the column order, so we can't iterate over the aggregations or groupings + for colIdx := range a.Columns { + idx, err := a.addIfGroupingColumn(ctx, colIdx) + if err != nil { + return err + } + if idx >= 0 { + continue + } + + idx, err = a.addIfAggregationColumn(ctx, colIdx) + if err != nil { + return err + } + + if idx < 0 { + return vterrors.VT13001("failed to find the corresponding column") + } + } + + return a.pushRemainingGroupingColumnsAndWeightStrings(ctx) +} + +func (a *Aggregator) addIfAggregationColumn(ctx *plancontext.PlanningContext, colIdx int) (int, error) { + for _, aggr := range a.Aggregations { + if aggr.ColOffset != colIdx { + continue + } + + wrap := aeWrap(aggr.getPushDownColumn()) + offsets, err := a.Source.AddColumns(ctx, false, []bool{false}, []*sqlparser.AliasedExpr{wrap}) + if err != nil { + return 0, err + } + offset := offsets[0] + if aggr.ColOffset != offset { + return -1, errFailedToPlan(aggr.Original) + } + + return offset, nil + } + return -1, nil +} + +func errFailedToPlan(original *sqlparser.AliasedExpr) *vterrors.VitessError { + return vterrors.VT12001(fmt.Sprintf("failed to plan aggregation on: %s", sqlparser.String(original))) +} + +func (a *Aggregator) addIfGroupingColumn(ctx *plancontext.PlanningContext, colIdx int) (int, error) { + for _, gb := range a.Grouping { + if gb.ColOffset != colIdx { + continue + } + + expr := a.Columns[colIdx] + offsets, err := a.Source.AddColumns(ctx, false, []bool{true}, []*sqlparser.AliasedExpr{expr}) + if err != nil { + return -1, err + } + offset := offsets[0] + if gb.ColOffset != offset { + return -1, errFailedToPlan(expr) + } + + return offset, nil + } + return -1, nil +} + +// pushRemainingGroupingColumnsAndWeightStrings pushes any grouping column that is not part of the columns list and weight strings needed for performing grouping aggregations. +func (a *Aggregator) pushRemainingGroupingColumnsAndWeightStrings(ctx *plancontext.PlanningContext) error { + for idx, gb := range a.Grouping { + if gb.ColOffset == -1 { + offset, err := a.internalAddColumn(ctx, aeWrap(gb.Inner), false) + if err != nil { + return err + } + a.Grouping[idx].ColOffset = offset + } + + if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { + continue + } + + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), false) + if err != nil { + return err + } + a.Grouping[idx].WSOffset = offset + } + for idx, aggr := range a.Aggregations { + if aggr.WSOffset != -1 || !aggr.NeedsWeightString(ctx) { + continue + } + + offset, err := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), false) + if err != nil { + return err + } + a.Aggregations[idx].WSOffset = offset + } + return nil +} + +func (a *Aggregator) setTruncateColumnCount(offset int) { + a.ResultColumns = offset +} + +func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliasedExpr *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offsets, err := a.Source.AddColumns(ctx, true, []bool{addToGroupBy}, []*sqlparser.AliasedExpr{aliasedExpr}) + if err != nil { + return 0, err + } + offset := offsets[0] + if offset == len(a.Columns) { + // if we get an offset at the end of our current column list, it means we added a new column + a.Columns = append(a.Columns, aliasedExpr) + } + return offset, nil +} + +// SplitAggregatorBelowRoute returns the aggregator that will live under the Route. +// This is used when we are splitting the aggregation so one part is done +// at the mysql level and one part at the vtgate level +func (a *Aggregator) SplitAggregatorBelowRoute(input []ops.Operator) *Aggregator { + newOp := a.Clone(input).(*Aggregator) + newOp.Pushed = false + newOp.Original = false + newOp.Alias = "" + newOp.TableID = nil + return newOp +} + +func (a *Aggregator) introducesTableID() semantics.TableSet { + if a.TableID == nil { + return semantics.EmptyTableSet() + } + return *a.TableID +} + +var _ ops.Operator = (*Aggregator)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go index 2968d463b1c..123633f0c1c 100644 --- a/go/vt/vtgate/planbuilder/operators/apply_join.go +++ b/go/vt/vtgate/planbuilder/operators/apply_join.go @@ -17,13 +17,15 @@ limitations under the License. package operators import ( - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "fmt" + "maps" + "slices" + "strings" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -32,16 +34,6 @@ import ( type ApplyJoin struct { LHS, RHS ops.Operator - // Columns stores the column indexes of the columns coming from the left and right side - // negative value comes from LHS and positive from RHS - Columns []int - - // ColumnsAST keeps track of what AST expression is represented in the Columns array - ColumnsAST []sqlparser.Expr - - // Vars are the arguments that need to be copied from the LHS to the RHS - Vars map[string]int - // LeftJoin will be true in the case of an outer join LeftJoin bool @@ -49,10 +41,42 @@ type ApplyJoin struct { // These are the same columns pushed on the LHS that are now used in the Vars field LHSColumns []*sqlparser.ColName + // Before offset planning Predicate sqlparser.Expr + + // JoinColumns keeps track of what AST expression is represented in the Columns array + JoinColumns []JoinColumn + + // JoinPredicates are join predicates that have been broken up into left hand side and right hand side parts. + JoinPredicates []JoinColumn + + // After offset planning + + // Columns stores the column indexes of the columns coming from the left and right side + // negative value comes from LHS and positive from RHS + Columns []int + + // Vars are the arguments that need to be copied from the LHS to the RHS + Vars map[string]int } -var _ ops.PhysicalOperator = (*ApplyJoin)(nil) +// JoinColumn is where we store information about columns passing through the join operator +// It can be in one of three possible configurations: +// - Pure left +// We are projecting a column that comes from the left. The RHSExpr will be nil for these +// - Pure right +// We are projecting a column that comes from the right. The LHSExprs will be empty for these +// - Mix of data from left and right +// Here we need to transmit columns from the LHS to the RHS, +// so they can be used for the result of this expression that is using data from both sides. +// All fields will be used for these +type JoinColumn struct { + Original *sqlparser.AliasedExpr // this is the original expression being passed through + BvNames []string // the BvNames and LHSCols line up + LHSExprs []sqlparser.Expr + RHSExpr sqlparser.Expr + GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true +} func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { return &ApplyJoin{ @@ -64,25 +88,23 @@ func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin } } -// IPhysical implements the PhysicalOperator interface -func (a *ApplyJoin) IPhysical() {} - // Clone implements the Operator interface func (a *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { return &ApplyJoin{ - LHS: inputs[0], - RHS: inputs[1], - Columns: slices.Clone(a.Columns), - ColumnsAST: slices.Clone(a.ColumnsAST), - Vars: maps.Clone(a.Vars), - LeftJoin: a.LeftJoin, - Predicate: sqlparser.CloneExpr(a.Predicate), - LHSColumns: slices.Clone(a.LHSColumns), + LHS: inputs[0], + RHS: inputs[1], + Columns: slices.Clone(a.Columns), + JoinColumns: slices.Clone(a.JoinColumns), + JoinPredicates: slices.Clone(a.JoinPredicates), + Vars: maps.Clone(a.Vars), + LeftJoin: a.LeftJoin, + Predicate: sqlparser.CloneExpr(a.Predicate), + LHSColumns: slices.Clone(a.LHSColumns), } } func (a *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - return AddPredicate(a, ctx, expr, false, newFilter) + return AddPredicate(ctx, a, expr, false, newFilter) } // Inputs implements the Operator interface @@ -90,6 +112,11 @@ func (a *ApplyJoin) Inputs() []ops.Operator { return []ops.Operator{a.LHS, a.RHS} } +// SetInputs implements the Operator interface +func (a *ApplyJoin) SetInputs(inputs []ops.Operator) { + a.LHS, a.RHS = inputs[0], inputs[1] +} + var _ JoinOp = (*ApplyJoin)(nil) func (a *ApplyJoin) GetLHS() ops.Operator { @@ -117,77 +144,208 @@ func (a *ApplyJoin) IsInner() bool { } func (a *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error { - bvName, cols, predicate, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS)) + a.Predicate = ctx.SemTable.AndExpressions(expr, a.Predicate) + + col, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS)) if err != nil { return err } - for i, col := range cols { - offset, err := a.LHS.AddColumn(ctx, col) - if err != nil { - return err - } - a.Vars[bvName[i]] = offset - } - a.LHSColumns = append(a.LHSColumns, cols...) - - rhs, err := a.RHS.AddPredicate(ctx, predicate) + a.JoinPredicates = append(a.JoinPredicates, col) + rhs, err := a.RHS.AddPredicate(ctx, col.RHSExpr) if err != nil { return err } a.RHS = rhs - a.Predicate = ctx.SemTable.AndExpressions(expr, a.Predicate) return nil } -func (a *ApplyJoin) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) { - // first check if we already are passing through this expression - for i, existing := range a.ColumnsAST { - if ctx.SemTable.EqualsExpr(existing, expr) { - return i, nil - } +func (a *ApplyJoin) pushColLeft(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offsets, err := a.LHS.AddColumns(ctx, true, []bool{addToGroupBy}, []*sqlparser.AliasedExpr{e}) + if err != nil { + return 0, err } + return offsets[0], nil +} + +func (a *ApplyJoin) pushColRight(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { + offsets, err := a.RHS.AddColumns(ctx, true, []bool{addToGroupBy}, []*sqlparser.AliasedExpr{e}) + if err != nil { + return 0, err + } + return offsets[0], nil +} + +func (a *ApplyJoin) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(a.JoinColumns, joinColumnToAliasedExpr), nil +} + +func (a *ApplyJoin) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, a) +} + +func (a *ApplyJoin) GetOrdering() ([]ops.OrderBy, error) { + return a.LHS.GetOrdering() +} + +func joinColumnToAliasedExpr(c JoinColumn) *sqlparser.AliasedExpr { + return c.Original +} + +func joinColumnToExpr(column JoinColumn) sqlparser.Expr { + return column.Original.Expr +} +func (a *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (col JoinColumn, err error) { + defer func() { + col.Original = e + }() lhs := TableID(a.LHS) rhs := TableID(a.RHS) both := lhs.Merge(rhs) + expr := e.Expr deps := ctx.SemTable.RecursiveDeps(expr) + col.GroupBy = addToGroupBy - // if we get here, it's a new expression we are dealing with. - // We need to decide if we can push it all on either side, - // or if we have to break the expression into left and right parts switch { case deps.IsSolvedBy(lhs): - offset, err := a.LHS.AddColumn(ctx, expr) + col.LHSExprs = []sqlparser.Expr{expr} + case deps.IsSolvedBy(rhs): + col.RHSExpr = expr + case deps.IsSolvedBy(both): + col, err = BreakExpressionInLHSandRHS(ctx, expr, TableID(a.LHS)) if err != nil { - return 0, err + return JoinColumn{}, err } - a.Columns = append(a.Columns, -offset-1) - case deps.IsSolvedBy(both): - bvNames, lhsExprs, rhsExpr, err := BreakExpressionInLHSandRHS(ctx, expr, lhs) + default: + return JoinColumn{}, vterrors.VT13002(sqlparser.String(e)) + } + + return +} + +func (a *ApplyJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + offset, found := canReuseColumn(ctx, a.JoinColumns, expr, joinColumnToExpr) + if !found { + return -1, nil + } + return offset, nil +} + +func (a *ApplyJoin) AddColumn(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _, addToGroupBy bool) (ops.Operator, int, error) { + if offset, err := a.FindCol(ctx, expr.Expr, false); err != nil || offset != -1 { + return a, offset, err + } + + if offset, found := canReuseColumn(ctx, a.JoinColumns, expr.Expr, joinColumnToExpr); found { + return a, offset, nil + } + col, err := a.getJoinColumnFor(ctx, expr, addToGroupBy) + if err != nil { + return nil, 0, err + } + a.JoinColumns = append(a.JoinColumns, col) + return a, len(a.JoinColumns) - 1, nil +} + +func (a *ApplyJoin) AddColumns( + ctx *plancontext.PlanningContext, + reuse bool, + addToGroupBy []bool, + exprs []*sqlparser.AliasedExpr, +) (offsets []int, err error) { + offsets = make([]int, len(exprs)) + for i, expr := range exprs { + if reuse { + offset, err := a.FindCol(ctx, expr.Expr, false) + if err != nil { + return nil, err + } + if offset != -1 { + offsets[i] = offset + continue + } + } + + col, err := a.getJoinColumnFor(ctx, expr, addToGroupBy[i]) if err != nil { - return 0, err + return nil, err + } + + offsets[i] = len(a.JoinColumns) + a.JoinColumns = append(a.JoinColumns, col) + } + return +} + +func (a *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) (err error) { + for _, col := range a.JoinColumns { + // Read the type description for JoinColumn to understand the following code + for i, lhsExpr := range col.LHSExprs { + offset, err := a.pushColLeft(ctx, aeWrap(lhsExpr), col.GroupBy) + if err != nil { + return err + } + if col.RHSExpr == nil { + // if we don't have an RHS expr, it means that this is a pure LHS expression + a.addOffset(-offset - 1) + } else { + a.Vars[col.BvNames[i]] = offset + } } - for i, lhsExpr := range lhsExprs { - offset, err := a.LHS.AddColumn(ctx, lhsExpr) + if col.RHSExpr != nil { + offset, err := a.pushColRight(ctx, aeWrap(col.RHSExpr), col.GroupBy) if err != nil { - return 0, err + return err } - a.Vars[bvNames[i]] = offset + a.addOffset(offset + 1) } - expr = rhsExpr - fallthrough // now we just pass the rest to the RHS of the join - case deps.IsSolvedBy(rhs): - offset, err := a.RHS.AddColumn(ctx, expr) + } + + for _, col := range a.JoinPredicates { + for i, lhsExpr := range col.LHSExprs { + offset, err := a.pushColLeft(ctx, aeWrap(lhsExpr), false) + if err != nil { + return err + } + a.Vars[col.BvNames[i]] = offset + } + lhsColumns := slice.Map(col.LHSExprs, func(from sqlparser.Expr) *sqlparser.ColName { + col, ok := from.(*sqlparser.ColName) + if !ok { + // todo: there is no good reason to keep this limitation around + err = vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(from)) + } + return col + }) if err != nil { - return 0, err + return err } - a.Columns = append(a.Columns, offset+1) - default: - return 0, vterrors.VT13002(sqlparser.String(expr)) + a.LHSColumns = append(a.LHSColumns, lhsColumns...) } + return nil +} + +func (a *ApplyJoin) addOffset(offset int) { + a.Columns = append(a.Columns, offset) +} + +func (a *ApplyJoin) ShortDescription() string { + pred := sqlparser.String(a.Predicate) + columns := slice.Map(a.JoinColumns, func(from JoinColumn) string { + return sqlparser.String(from.Original) + }) + return fmt.Sprintf("on %s columns: %s", pred, strings.Join(columns, ", ")) +} + +func (jc JoinColumn) IsPureLeft() bool { + return jc.RHSExpr == nil +} + +func (jc JoinColumn) IsPureRight() bool { + return len(jc.LHSExprs) == 0 +} - // the expression wasn't already there - let's add it - a.ColumnsAST = append(a.ColumnsAST, expr) - return len(a.Columns) - 1, nil +func (jc JoinColumn) IsMixedLeftAndRight() bool { + return len(jc.LHSExprs) > 0 && jc.RHSExpr != nil } diff --git a/go/vt/vtgate/planbuilder/operators/logical.go b/go/vt/vtgate/planbuilder/operators/ast2op.go similarity index 55% rename from go/vt/vtgate/planbuilder/operators/logical.go rename to go/vt/vtgate/planbuilder/operators/ast2op.go index 87bcd42709e..8e8d0ddde33 100644 --- a/go/vt/vtgate/planbuilder/operators/logical.go +++ b/go/vt/vtgate/planbuilder/operators/ast2op.go @@ -21,14 +21,16 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// createLogicalOperatorFromAST creates an operator tree that represents the input SELECT or UNION query -func createLogicalOperatorFromAST(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { +const foriegnKeyContraintValues = "fkc_vals" + +// translateQueryToOp creates an operator tree that represents the input SELECT or UNION query +func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { switch node := selStmt.(type) { case *sqlparser.Select: op, err = createOperatorFromSelect(ctx, node) @@ -38,6 +40,8 @@ func createLogicalOperatorFromAST(ctx *plancontext.PlanningContext, selStmt sqlp op, err = createOperatorFromUpdate(ctx, node) case *sqlparser.Delete: op, err = createOperatorFromDelete(ctx, node) + case *sqlparser.Insert: + op, err = createOperatorFromInsert(ctx, node) default: err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) } @@ -48,7 +52,6 @@ func createLogicalOperatorFromAST(ctx *plancontext.PlanningContext, selStmt sqlp return op, nil } -// createOperatorFromSelect creates an operator tree that represents the input SELECT query func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { subq, err := createSubqueryFromStatement(ctx, sel) if err != nil { @@ -69,21 +72,20 @@ func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.S addColumnEquality(ctx, expr) } } - if subq == nil { - return &Horizon{ - Source: op, - Select: sel, - }, nil + + if subq != nil { + subq.Outer = op + op = subq } - subq.Outer = op + return &Horizon{ - Source: subq, - Select: sel, + Source: op, + Query: sel, }, nil } func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { - opLHS, err := createLogicalOperatorFromAST(ctx, node.Left) + opLHS, err := translateQueryToOp(ctx, node.Left) if err != nil { return nil, err } @@ -92,152 +94,38 @@ func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.U if isRHSUnion { return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") } - opRHS, err := createLogicalOperatorFromAST(ctx, node.Right) - if err != nil { - return nil, err - } - - union := &Union{ - Distinct: node.Distinct, - Sources: []ops.Operator{opLHS, opRHS}, - Ordering: node.OrderBy, - } - return &Horizon{Source: union, Select: node}, nil -} - -func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - if err != nil { - return nil, err - } - - assignments := make(map[string]sqlparser.Expr) - for _, set := range updStmt.Exprs { - assignments[set.Name.Name.String()] = set.Expr - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") - if err != nil { - return nil, err - } - - vp, cvv, ovq, err := getUpdateVindexInformation(updStmt, vindexTable, qt.ID, qt.Predicates) + opRHS, err := translateQueryToOp(ctx, node.Right) if err != nil { return nil, err } - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vp - } - - for _, predicate := range qt.Predicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } - } + lexprs := ctx.SemTable.SelectExprs(node.Left) + rexprs := ctx.SemTable.SelectExprs(node.Right) - if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") - } - - r := &Route{ - Source: &Update{ - QTable: qt, - VTable: vindexTable, - Assignments: assignments, - ChangedVindexValues: cvv, - OwnedVindexQuery: ovq, - AST: updStmt, - }, - Routing: routing, - } - - subq, err := createSubqueryFromStatement(ctx, updStmt) - if err != nil { - return nil, err - } - if subq == nil { - return r, nil - } - subq.Outer = r - return subq, nil + unionCols := ctx.SemTable.SelectExprs(node) + union := newUnion([]ops.Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) + return &Horizon{Source: union, Query: node}, nil } -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - if err != nil { - return nil, err - } - - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") +// createOpFromStmt creates an operator from the given statement. It takes in two additional arguments— +// 1. verifyAllFKs: For this given statement, do we need to verify validity of all the foreign keys on the vtgate level. +// 2. fkToIgnore: The foreign key constraint to specifically ignore while planning the statement. +func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) (ops.Operator, error) { + newCtx, err := plancontext.CreatePlanningContext(stmt, ctx.ReservedVars, ctx.VSchema, ctx.PlannerVersion) if err != nil { return nil, err } - del := &Delete{ - QTable: qt, - VTable: vindexTable, - AST: deleteStmt, - } - route := &Route{ - Source: del, - Routing: routing, - } - - if !vindexTable.Keyspace.Sharded { - return route, nil - } - - primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, qt.Predicates, vindexTable) - if err != nil { - return nil, err - } - - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vindexAndPredicates - } - - var ovq string - if len(vindexTable.Owned) > 0 { - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} - ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) - } - - del.OwnedVindexQuery = ovq - - for _, predicate := range qt.Predicates { - var err error - route.Routing, err = UpdateRoutingLogic(ctx, predicate, route.Routing) - if err != nil { - return nil, err - } - } - - if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard DELETE with LIMIT") - } + newCtx.VerifyAllFKs = verifyAllFKs + newCtx.ParentFKToIgnore = fkToIgnore - subq, err := createSubqueryFromStatement(ctx, deleteStmt) - if err != nil { - return nil, err - } - if subq == nil { - return route, nil - } - subq.Outer = route - return subq, nil + return PlanQuery(newCtx, stmt) } -func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr) (ops.Operator, error) { +func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) (ops.Operator, error) { switch tableExpr := tableExpr.(type) { case *sqlparser.AliasedTableExpr: - return getOperatorFromAliasedTableExpr(ctx, tableExpr) + return getOperatorFromAliasedTableExpr(ctx, tableExpr, onlyTable) case *sqlparser.JoinTableExpr: return getOperatorFromJoinTableExpr(ctx, tableExpr) case *sqlparser.ParenTableExpr: @@ -248,11 +136,11 @@ func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlpar } func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { - lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr) + lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) if err != nil { return nil, err } - rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr) + rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) if err != nil { return nil, err } @@ -267,17 +155,17 @@ func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *s } } -func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr) (ops.Operator, error) { +func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) (ops.Operator, error) { + tableID := ctx.SemTable.TableSetFor(tableExpr) switch tbl := tableExpr.Expr.(type) { case sqlparser.TableName: - tableID := ctx.SemTable.TableSetFor(tableExpr) tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { return nil, err } if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { - solves := ctx.SemTable.TableSetFor(tableExpr) + solves := tableID return &Vindex{ Table: VindexTable{ TableID: tableID, @@ -295,7 +183,7 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr qg.Tables = append(qg.Tables, qt) return qg, nil case *sqlparser.DerivedTable: - inner, err := createLogicalOperatorFromAST(ctx, tbl.Select) + inner, err := translateQueryToOp(ctx, tbl.Select) if err != nil { return nil, err } @@ -303,7 +191,22 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr inner = horizon.Source } - return &Derived{Alias: tableExpr.As.String(), Source: inner, Query: tbl.Select, ColumnAliases: tableExpr.Columns}, nil + if onlyTable && tbl.Select.GetLimit() == nil { + tbl.Select.SetOrderBy(nil) + } + qp, err := CreateQPFromSelectStatement(ctx, tbl.Select) + if err != nil { + return nil, err + } + + return &Horizon{ + TableId: &tableID, + Alias: tableExpr.As.String(), + Source: inner, + Query: tbl.Select, + ColumnAliases: tableExpr.Columns, + QP: qp, + }, nil default: return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) } @@ -312,7 +215,7 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { var output ops.Operator for _, tableExpr := range exprs { - op, err := getOperatorFromTableExpr(ctx, tableExpr) + op, err := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) if err != nil { return nil, err } @@ -350,11 +253,10 @@ func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparse predicates = sqlparser.SplitAndExpression(nil, whereClause.Expr) } qt := &QueryTable{ - ID: tableID, - Alias: alTbl, - Table: tblName, - Predicates: predicates, - IsInfSchema: false, + ID: tableID, + Alias: alTbl, + Table: tblName, + Predicates: predicates, } return tableInfo, qt, nil } @@ -374,3 +276,29 @@ func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { } } } + +// createSelectionOp creates the selection operator to select the parent columns for the foreign key constraints. +// The Select statement looks something like this - `SELECT FROM WHERE ` +// TODO (@Harshit, @GuptaManan100): Compress the columns in the SELECT statement, if there are multiple foreign key constraints using the same columns. +func createSelectionOp(ctx *plancontext.PlanningContext, selectExprs []sqlparser.SelectExpr, tableExprs sqlparser.TableExprs, where *sqlparser.Where, limit *sqlparser.Limit, lock sqlparser.Lock) (ops.Operator, error) { + selectionStmt := &sqlparser.Select{ + SelectExprs: selectExprs, + From: tableExprs, + Where: where, + Limit: limit, + Lock: lock, + } + // There are no foreign keys to check for a select query, so we can pass anything for verifyAllFKs and fkToIgnore. + return createOpFromStmt(ctx, selectionStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) +} + +func selectParentColumns(fk vindexes.ChildFKInfo, lastOffset int) ([]int, []sqlparser.SelectExpr) { + var cols []int + var exprs []sqlparser.SelectExpr + for _, column := range fk.ParentColumns { + cols = append(cols, lastOffset) + exprs = append(exprs, aeWrap(sqlparser.NewColName(column.String()))) + lastOffset++ + } + return cols, exprs +} diff --git a/go/vt/vtgate/planbuilder/operators/ast2op_test.go b/go/vt/vtgate/planbuilder/operators/ast2op_test.go new file mode 100644 index 00000000000..4dbcf49e80a --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/ast2op_test.go @@ -0,0 +1,205 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// Test_fkNeedsHandlingForUpdates tests the functionality of the function fkNeedsHandlingForUpdates. +// It verifies the different cases in which foreign key handling is required on vtgate level. +func Test_fkNeedsHandlingForUpdates(t *testing.T) { + t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + } + t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: &vindexes.Keyspace{Name: "ks2"}, + } + t3 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t3"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + } + + tests := []struct { + name string + verifyAllFks bool + parentFkToIgnore string + updateExprs sqlparser.UpdateExprs + parentFks []vindexes.ParentFKInfo + childFks []vindexes.ChildFKInfo + parentFKsWanted []bool + childFKsWanted []bool + }{{ + name: "No Fks filtered", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{true}, + childFKsWanted: []bool{true}, + }, { + name: "Child Fks filtering", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ParentColumns: sqlparser.MakeColumns("d", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{true}, + childFKsWanted: []bool{true, false}, + }, { + name: "Parent Fks filtered based on columns", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("d", "b")}, + }, + parentFKsWanted: []bool{true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Parent Fks filtered because all null values", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + }, + parentFKsWanted: []bool{false, false}, + childFKsWanted: []bool{true}, + }, { + name: "Parent Fks filtered because some column has null values", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + }, + parentFKsWanted: []bool{false, true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Unsharded fk with verifyAllFk", + verifyAllFks: true, + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{false, true, true, false}, + childFKsWanted: []bool{true}, + }, { + name: "Mixed case", + verifyAllFks: true, + parentFkToIgnore: "ks.t1abks.t3", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t3, ChildColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFKsWanted: []bool{false, true, false, false}, + childFKsWanted: []bool{true}, + }, { + name: "Ignore Fk specified", + parentFkToIgnore: "ks.t1aefks2.t2", + updateExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("a"), Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("c"), Expr: &sqlparser.NullVal{}}, + }, + childFks: []vindexes.ChildFKInfo{ + {Table: t2, ParentColumns: sqlparser.MakeColumns("a", "b", "c")}, + }, + parentFks: []vindexes.ParentFKInfo{ + {Table: t2, ChildColumns: sqlparser.MakeColumns("b", "a", "c")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "b")}, + {Table: t2, ChildColumns: sqlparser.MakeColumns("a", "e", "f")}, + }, + parentFKsWanted: []bool{false, true, false}, + childFKsWanted: []bool{true}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t1.ParentForeignKeys = tt.parentFks + t1.ChildForeignKeys = tt.childFks + ctx := &plancontext.PlanningContext{ + VerifyAllFKs: tt.verifyAllFks, + ParentFKToIgnore: tt.parentFkToIgnore, + } + parentFksGot, childFksGot := getFKRequirementsForUpdate(ctx, tt.updateExprs, t1) + var pFks []vindexes.ParentFKInfo + for idx, expected := range tt.parentFKsWanted { + if expected { + pFks = append(pFks, tt.parentFks[idx]) + } + } + var cFks []vindexes.ChildFKInfo + for idx, expected := range tt.childFKsWanted { + if expected { + cFks = append(cFks, tt.childFks[idx]) + } + } + require.EqualValues(t, pFks, parentFksGot) + require.EqualValues(t, cFks, childFksGot) + }) + } +} diff --git a/go/vt/vtgate/planbuilder/operators/correlated_subquery.go b/go/vt/vtgate/planbuilder/operators/correlated_subquery.go index d95207f0a7a..1e59da8e2bc 100644 --- a/go/vt/vtgate/planbuilder/operators/correlated_subquery.go +++ b/go/vt/vtgate/planbuilder/operators/correlated_subquery.go @@ -46,12 +46,6 @@ type ( } ) -var _ ops.PhysicalOperator = (*SubQueryOp)(nil) -var _ ops.PhysicalOperator = (*CorrelatedSubQueryOp)(nil) - -// IPhysical implements the PhysicalOperator interface -func (s *SubQueryOp) IPhysical() {} - // Clone implements the Operator interface func (s *SubQueryOp) Clone(inputs []ops.Operator) ops.Operator { result := &SubQueryOp{ @@ -62,13 +56,23 @@ func (s *SubQueryOp) Clone(inputs []ops.Operator) ops.Operator { return result } +func (s *SubQueryOp) GetOrdering() ([]ops.OrderBy, error) { + return s.Outer.GetOrdering() +} + // Inputs implements the Operator interface func (s *SubQueryOp) Inputs() []ops.Operator { return []ops.Operator{s.Outer, s.Inner} } -// IPhysical implements the PhysicalOperator interface -func (c *CorrelatedSubQueryOp) IPhysical() {} +// SetInputs implements the Operator interface +func (s *SubQueryOp) SetInputs(ops []ops.Operator) { + s.Outer, s.Inner = ops[0], ops[1] +} + +func (s *SubQueryOp) ShortDescription() string { + return "" +} // Clone implements the Operator interface func (c *CorrelatedSubQueryOp) Clone(inputs []ops.Operator) ops.Operator { @@ -89,7 +93,20 @@ func (c *CorrelatedSubQueryOp) Clone(inputs []ops.Operator) ops.Operator { return result } +func (c *CorrelatedSubQueryOp) GetOrdering() ([]ops.OrderBy, error) { + return c.Outer.GetOrdering() +} + // Inputs implements the Operator interface func (c *CorrelatedSubQueryOp) Inputs() []ops.Operator { return []ops.Operator{c.Outer, c.Inner} } + +// SetInputs implements the Operator interface +func (c *CorrelatedSubQueryOp) SetInputs(ops []ops.Operator) { + c.Outer, c.Inner = ops[0], ops[1] +} + +func (c *CorrelatedSubQueryOp) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index d33acd8b013..bdac9e7c99a 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -17,8 +17,14 @@ limitations under the License. package operators import ( + "fmt" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -34,18 +40,13 @@ type Delete struct { noPredicates } -var _ ops.PhysicalOperator = (*Delete)(nil) - // Introduces implements the PhysicalOperator interface -func (d *Delete) Introduces() semantics.TableSet { +func (d *Delete) introducesTableID() semantics.TableSet { return d.QTable.ID } -// IPhysical implements the PhysicalOperator interface -func (d *Delete) IPhysical() {} - // Clone implements the Operator interface -func (d *Delete) Clone(inputs []ops.Operator) ops.Operator { +func (d *Delete) Clone([]ops.Operator) ops.Operator { return &Delete{ QTable: d.QTable, VTable: d.VTable, @@ -60,3 +61,203 @@ func (d *Delete) TablesUsed() []string { } return nil } + +func (d *Delete) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +func (d *Delete) ShortDescription() string { + return fmt.Sprintf("%s.%s %s", d.VTable.Keyspace.Name, d.VTable.Name.String(), sqlparser.String(d.AST.Where)) +} + +func (d *Delete) Statement() sqlparser.Statement { + return d.AST +} + +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") + if err != nil { + return nil, err + } + + delClone := sqlparser.CloneRefOfDelete(deleteStmt) + // Create the delete operator first. + delOp, err := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) + if err != nil { + return nil, err + } + + // Now we check for the foreign key mode and make changes if required. + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + + // Unmanaged foreign-key-mode, we don't need to do anything. + if ksMode != vschemapb.Keyspace_FK_MANAGED { + return delOp, nil + } + + childFks := vindexTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.DeleteAction) + // If there are no foreign key constraints, then we don't need to do anything. + if len(childFks) == 0 { + return delOp, nil + } + // If the delete statement has a limit, we don't support it yet. + if deleteStmt.Limit != nil { + return nil, vterrors.VT12001("foreign keys management at vitess with limit") + } + + return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) +} + +func createDeleteOperator( + ctx *plancontext.PlanningContext, + deleteStmt *sqlparser.Delete, + qt *QueryTable, + vindexTable *vindexes.Table, + routing Routing) (ops.Operator, error) { + del := &Delete{ + QTable: qt, + VTable: vindexTable, + AST: deleteStmt, + } + route := &Route{ + Source: del, + Routing: routing, + } + + if !vindexTable.Keyspace.Sharded { + return route, nil + } + + primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, qt.Predicates, vindexTable) + if err != nil { + return nil, err + } + + tr, ok := routing.(*ShardedRouting) + if ok { + tr.VindexPreds = vindexAndPredicates + } + + var ovq string + if len(vindexTable.Owned) > 0 { + tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} + ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) + } + + del.OwnedVindexQuery = ovq + + for _, predicate := range qt.Predicates { + var err error + route.Routing, err = UpdateRoutingLogic(ctx, predicate, route.Routing) + if err != nil { + return nil, err + } + } + + if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { + // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) + return nil, vterrors.VT12001("multi shard DELETE with LIMIT") + } + + subq, err := createSubqueryFromStatement(ctx, deleteStmt) + if err != nil { + return nil, err + } + if subq == nil { + return route, nil + } + subq.Outer = route + return subq, nil +} + +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp ops.Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) (ops.Operator, error) { + var fkChildren []*FkChild + var selectExprs []sqlparser.SelectExpr + for _, fk := range childFks { + // Any RESTRICT type foreign keys that arrive here, + // are cross-shard/cross-keyspace RESTRICT cases, which we don't currently support. + if fk.OnDelete.IsRestrict() { + return nil, vterrors.VT12002() + } + + // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. + cols, exprs := selectParentColumns(fk, len(selectExprs)) + selectExprs = append(selectExprs, exprs...) + + fkChild, err := createFkChildForDelete(ctx, fk, cols) + if err != nil { + return nil, err + } + fkChildren = append(fkChildren, fkChild) + } + selectionOp, err := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, sqlparser.ForUpdateLock) + if err != nil { + return nil, err + } + + return &FkCascade{ + Selection: selectionOp, + Children: fkChildren, + Parent: parentOp, + }, nil +} + +func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) (*FkChild, error) { + bvName := ctx.ReservedVars.ReserveVariable(foriegnKeyContraintValues) + + var childStmt sqlparser.Statement + switch fk.OnDelete { + case sqlparser.Cascade: + // We now construct the delete query for the child table. + // The query looks something like this - `DELETE FROM WHERE IN ()` + var valTuple sqlparser.ValTuple + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + childStmt = &sqlparser.Delete{ + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, + } + case sqlparser.SetNull: + // We now construct the update query for the child table. + // The query looks something like this - `UPDATE SET = NULL [AND = NULL]... WHERE IN ()` + var valTuple sqlparser.ValTuple + var updExprs sqlparser.UpdateExprs + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + updExprs = append(updExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(column.String()), + Expr: &sqlparser.NullVal{}, + }) + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + childStmt = &sqlparser.Update{ + Exprs: updExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, + } + case sqlparser.SetDefault: + return nil, vterrors.VT09016() + } + + // For the child statement of a DELETE query, we don't need to verify all the FKs on VTgate or ignore any foreign key explicitly. + childOp, err := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) + if err != nil { + return nil, err + } + + return &FkChild{ + BVName: bvName, + Cols: cols, + Op: childOp, + }, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/derived.go b/go/vt/vtgate/planbuilder/operators/derived.go deleted file mode 100644 index 1488162bf46..00000000000 --- a/go/vt/vtgate/planbuilder/operators/derived.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type Derived struct { - Source ops.Operator - - Query sqlparser.SelectStatement - Alias string - ColumnAliases sqlparser.Columns - - // Columns needed to feed other plans - Columns []*sqlparser.ColName - ColumnsOffset []int -} - -var _ ops.PhysicalOperator = (*Derived)(nil) - -// IPhysical implements the PhysicalOperator interface -func (d *Derived) IPhysical() {} - -// Clone implements the Operator interface -func (d *Derived) Clone(inputs []ops.Operator) ops.Operator { - return &Derived{ - Source: inputs[0], - Query: d.Query, - Alias: d.Alias, - ColumnAliases: sqlparser.CloneColumns(d.ColumnAliases), - Columns: slices.Clone(d.Columns), - ColumnsOffset: slices.Clone(d.ColumnsOffset), - } -} - -// findOutputColumn returns the index on which the given name is found in the slice of -// *sqlparser.SelectExprs of the derivedTree. The *sqlparser.SelectExpr must be of type -// *sqlparser.AliasedExpr and match the given name. -// If name is not present but the query's select expressions contain a *sqlparser.StarExpr -// the function will return no error and an index equal to -1. -// If name is not present and the query does not have a *sqlparser.StarExpr, the function -// will return an unknown column error. -func (d *Derived) findOutputColumn(name *sqlparser.ColName) (int, error) { - hasStar := false - for j, exp := range sqlparser.GetFirstSelect(d.Query).SelectExprs { - switch exp := exp.(type) { - case *sqlparser.AliasedExpr: - if !exp.As.IsEmpty() && exp.As.Equal(name.Name) { - return j, nil - } - if exp.As.IsEmpty() { - col, ok := exp.Expr.(*sqlparser.ColName) - if !ok { - return 0, vterrors.VT12001("complex expression needs column alias: %s", sqlparser.String(exp)) - } - if name.Name.Equal(col.Name) { - return j, nil - } - } - case *sqlparser.StarExpr: - hasStar = true - } - } - - // we have found a star but no matching *sqlparser.AliasedExpr, thus we return -1 with no error. - if hasStar { - return -1, nil - } - return 0, vterrors.VT03014(name.Name.String(), "field list") -} - -// IsMergeable is not a great name for this function. Suggestions for a better one are welcome! -// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries -// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join. -// Since vtgate joins are always nested loop joins, we can't execute them on the RHS -// if they do some things, like LIMIT or GROUP BY on wrong columns -func (d *Derived) IsMergeable(ctx *plancontext.PlanningContext) bool { - return isMergeable(ctx, d.Query, d) -} - -// Inputs implements the Operator interface -func (d *Derived) Inputs() []ops.Operator { - return []ops.Operator{d.Source} -} - -func (d *Derived) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - if _, isUNion := d.Source.(*Union); isUNion { - // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting - var err error - d.Source, err = d.Source.AddPredicate(ctx, expr) - return d, err - } - tableInfo, err := ctx.SemTable.TableInfoForExpr(expr) - if err != nil { - if err == semantics.ErrNotSingleTable { - return &Filter{ - Source: d, - Predicates: []sqlparser.Expr{expr}, - }, nil - } - return nil, err - } - - newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) - d.Source, err = d.Source.AddPredicate(ctx, newExpr) - if err != nil { - return nil, err - } - return d, nil -} - -func (d *Derived) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) { - col, ok := expr.(*sqlparser.ColName) - if !ok { - return 0, vterrors.VT13001("cannot push non-colname expression to a derived table") - } - - i, err := d.findOutputColumn(col) - if err != nil { - return 0, err - } - var pos int - d.ColumnsOffset, pos = addToIntSlice(d.ColumnsOffset, i) - - // add it to the source if we were not already passing it through - if i <= -1 { - d.Columns = append(d.Columns, col) - _, err := d.Source.AddColumn(ctx, sqlparser.NewColName(col.Name.String())) - if err != nil { - return 0, err - } - } - return pos, nil -} - -func addToIntSlice(columnOffset []int, valToAdd int) ([]int, int) { - for idx, val := range columnOffset { - if val == valToAdd { - return columnOffset, idx - } - } - columnOffset = append(columnOffset, valToAdd) - return columnOffset, len(columnOffset) - 1 -} diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go new file mode 100644 index 00000000000..c6145aba3b2 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -0,0 +1,155 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type ( + Distinct struct { + Source ops.Operator + QP *QueryProjection + + // When we go from AST to operator, we place DISTINCT ops in the required places in the op tree + // These are marked as `Required`, because they are semantically important to the results of the query. + // During planning, when we can't push down the DISTINCT op any further, we sometimes create and push down + // additional DISTINCT ops that are not strictly required, but that limit the number of incoming rows so less + // work has to be done. When we have pushed down these performance DISTINCTs, we set the `PushedPerformance` + // field to true on the originating op + Required bool + PushedPerformance bool + + // This is only filled in during offset planning + Columns []engine.CheckCol + + Truncate int + } +) + +func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) error { + columns, err := d.GetColumns(ctx) + if err != nil { + return err + } + var wsExprs []*sqlparser.AliasedExpr + var addToGroupBy []bool + wsNeeded := make([]bool, len(columns)) + for idx, col := range columns { + addToGroupBy = append(addToGroupBy, false) + e := d.QP.GetSimplifiedExpr(col.Expr) + if ctx.SemTable.NeedsWeightString(e) { + wsExprs = append(wsExprs, aeWrap(weightStringFor(e))) + addToGroupBy = append(addToGroupBy, false) + wsNeeded[idx] = true + } + } + offsets, err := d.Source.AddColumns(ctx, true, addToGroupBy, append(columns, wsExprs...)) + if err != nil { + return err + } + modifiedCols, err := d.GetColumns(ctx) + if err != nil { + return err + } + if len(modifiedCols) < len(columns) { + return vterrors.VT12001("unable to plan the distinct query as not able to align the columns") + } + n := len(columns) + wsOffset := 0 + for i, col := range columns { + var wsCol *int + if wsNeeded[i] { + wsCol = &offsets[n+wsOffset] + wsOffset++ + } + e := d.QP.GetSimplifiedExpr(col.Expr) + typ, coll, _ := ctx.SemTable.TypeForExpr(e) + d.Columns = append(d.Columns, engine.CheckCol{ + Col: i, + WsCol: wsCol, + Type: typ, + Collation: coll, + }) + } + return nil +} + +func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { + return &Distinct{ + Required: d.Required, + Source: inputs[0], + Columns: slices.Clone(d.Columns), + QP: d.QP, + PushedPerformance: d.PushedPerformance, + Truncate: d.Truncate, + } +} + +func (d *Distinct) Inputs() []ops.Operator { + return []ops.Operator{d.Source} +} + +func (d *Distinct) SetInputs(operators []ops.Operator) { + d.Source = operators[0] +} + +func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newSrc, err := d.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + d.Source = newSrc + return d, nil +} + +func (d *Distinct) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + return d.Source.AddColumns(ctx, reuse, addToGroupBy, exprs) +} + +func (d *Distinct) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return d.Source.FindCol(ctx, expr, underRoute) +} + +func (d *Distinct) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return d.Source.GetColumns(ctx) +} + +func (d *Distinct) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return d.Source.GetSelectExprs(ctx) +} + +func (d *Distinct) ShortDescription() string { + if d.Required { + return "Required" + } + return "Performance" +} + +func (d *Distinct) GetOrdering() ([]ops.OrderBy, error) { + return d.Source.GetOrdering() +} + +func (d *Distinct) setTruncateColumnCount(offset int) { + d.Truncate = offset +} diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index d1d331abe4b..a9c5c4b8871 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -46,7 +46,7 @@ func getVindexInformation( var vindexesAndPredicates []*VindexPlusPredicates for _, colVindex := range table.Ordered { if lu, isLu := colVindex.Vindex.(vindexes.LookupBackfill); isLu && lu.IsBackfilling() { - // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table + // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table, // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex. continue } @@ -156,7 +156,7 @@ func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (evalengine.Expr, error) expr = sqlparser.NewArgument(sq.GetArgName()) } - pv, err := evalengine.Translate(expr, semantics.EmptySemTable()) + pv, err := evalengine.Translate(expr, nil) if err != nil || sqlparser.IsSimpleTuple(expr) { return nil, invalidUpdateExpr(upd, expr) } diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go index 5a49bb3a058..246a6702142 100644 --- a/go/vt/vtgate/planbuilder/operators/expressions.go +++ b/go/vt/vtgate/planbuilder/operators/expressions.go @@ -29,13 +29,15 @@ func BreakExpressionInLHSandRHS( ctx *plancontext.PlanningContext, expr sqlparser.Expr, lhs semantics.TableSet, -) (bvNames []string, columns []*sqlparser.ColName, rewrittenExpr sqlparser.Expr, err error) { - rewrittenExpr = sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { - node, ok := cursor.Node().(*sqlparser.ColName) - if !ok { +) (col JoinColumn, err error) { + rewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + node := cursor.Node() + reservedName := getReservedBVName(node) + if reservedName == "" { return } - deps := ctx.SemTable.RecursiveDeps(node) + nodeExpr := node.(sqlparser.Expr) + deps := ctx.SemTable.RecursiveDeps(nodeExpr) if deps.IsEmpty() { err = vterrors.VT13001("unknown column. has the AST been copied?") cursor.StopTreeWalk() @@ -45,20 +47,34 @@ func BreakExpressionInLHSandRHS( return } - node.Qualifier.Qualifier = sqlparser.NewIdentifierCS("") - columns = append(columns, node) - bvName := node.CompliantName() - bvNames = append(bvNames, bvName) + col.LHSExprs = append(col.LHSExprs, nodeExpr) + bvName := ctx.GetArgumentFor(nodeExpr, func() string { + return ctx.ReservedVars.ReserveVariable(reservedName) + }) + + col.BvNames = append(col.BvNames, bvName) arg := sqlparser.NewArgument(bvName) // we are replacing one of the sides of the comparison with an argument, // but we don't want to lose the type information we have, so we copy it over - ctx.SemTable.CopyExprInfo(node, arg) + ctx.SemTable.CopyExprInfo(nodeExpr, arg) cursor.Replace(arg) }, nil).(sqlparser.Expr) if err != nil { - return nil, nil, nil, err + return JoinColumn{}, err } ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr) + col.RHSExpr = rewrittenExpr return } + +func getReservedBVName(node sqlparser.SQLNode) string { + switch node := node.(type) { + case *sqlparser.ColName: + node.Qualifier.Qualifier = sqlparser.NewIdentifierCS("") + return node.CompliantName() + case sqlparser.AggrFunc: + return sqlparser.CompliantString(node) + } + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index d28511dbe86..a05d0e6eee2 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -17,7 +17,13 @@ limitations under the License. package operators import ( + "slices" + "strings" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -27,9 +33,13 @@ import ( type Filter struct { Source ops.Operator Predicates []sqlparser.Expr -} -var _ ops.PhysicalOperator = (*Filter)(nil) + // FinalPredicate is the evalengine expression that will finally be used. + // It contains the ANDed predicates in Predicates, with ColName:s replaced by Offset:s + FinalPredicate evalengine.Expr + + Truncate int +} func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { return &Filter{ @@ -37,16 +47,13 @@ func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { } } -// IPhysical implements the PhysicalOperator interface -func (f *Filter) IPhysical() {} - // Clone implements the Operator interface func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { - predicatesClone := make([]sqlparser.Expr, len(f.Predicates)) - copy(predicatesClone, f.Predicates) return &Filter{ - Source: inputs[0], - Predicates: predicatesClone, + Source: inputs[0], + Predicates: slices.Clone(f.Predicates), + FinalPredicate: f.FinalPredicate, + Truncate: f.Truncate, } } @@ -55,6 +62,11 @@ func (f *Filter) Inputs() []ops.Operator { return []ops.Operator{f.Source} } +// SetInputs implements the Operator interface +func (f *Filter) SetInputs(ops []ops.Operator) { + f.Source = ops[0] +} + // UnsolvedPredicates implements the unresolved interface func (f *Filter) UnsolvedPredicates(st *semantics.SemTable) []sqlparser.Expr { var result []sqlparser.Expr @@ -77,13 +89,29 @@ func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.E return f, nil } -func (f *Filter) AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) { - return f.Source.AddColumn(ctx, expr) +func (f *Filter) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + return f.Source.AddColumns(ctx, reuse, addToGroupBy, exprs) +} + +func (f *Filter) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return f.Source.FindCol(ctx, expr, underRoute) +} + +func (f *Filter) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return f.Source.GetColumns(ctx) +} + +func (f *Filter) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return f.Source.GetSelectExprs(ctx) +} + +func (f *Filter) GetOrdering() ([]ops.OrderBy, error) { + return f.Source.GetOrdering() } -func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) { +func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { if len(f.Predicates) == 0 { - return f.Source, rewrite.NewTree, nil + return f.Source, rewrite.NewTree("filter with no predicates removed", f), nil } other, isFilter := f.Source.(*Filter) @@ -92,5 +120,36 @@ func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, rewrite.Tr } f.Source = other.Source f.Predicates = append(f.Predicates, other.Predicates...) - return f, rewrite.NewTree, nil + return f, rewrite.NewTree("two filters merged into one", f), nil +} + +func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) error { + cfg := &evalengine.Config{ + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + } + + predicate := sqlparser.AndExpressions(f.Predicates...) + rewritten, err := useOffsets(ctx, predicate, f) + if err != nil { + return err + } + eexpr, err := evalengine.Translate(rewritten, cfg) + if err != nil { + if strings.HasPrefix(err.Error(), evalengine.ErrTranslateExprNotSupported) { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "%s: %s", evalengine.ErrTranslateExprNotSupported, sqlparser.String(predicate)) + } + return err + } + + f.FinalPredicate = eexpr + return nil +} + +func (f *Filter) ShortDescription() string { + return sqlparser.String(sqlparser.AndExpressions(f.Predicates...)) +} + +func (f *Filter) setTruncateColumnCount(offset int) { + f.Truncate = offset } diff --git a/go/vt/vtgate/planbuilder/operators/fk_cascade.go b/go/vt/vtgate/planbuilder/operators/fk_cascade.go new file mode 100644 index 00000000000..a9afbde0a7c --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/fk_cascade.go @@ -0,0 +1,106 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" +) + +// FkChild is used to represent a foreign key child table operation +type FkChild struct { + BVName string + Cols []int // indexes + Op ops.Operator + + noColumns + noPredicates +} + +// FkCascade is used to represent a foreign key cascade operation +// as an operator. This operator is created for DML queries that require +// cascades (for example, ON DELETE CASCADE). +type FkCascade struct { + Selection ops.Operator + Children []*FkChild + Parent ops.Operator + + noColumns + noPredicates +} + +var _ ops.Operator = (*FkCascade)(nil) + +// Inputs implements the Operator interface +func (fkc *FkCascade) Inputs() []ops.Operator { + var inputs []ops.Operator + inputs = append(inputs, fkc.Parent) + inputs = append(inputs, fkc.Selection) + for _, child := range fkc.Children { + inputs = append(inputs, child.Op) + } + return inputs +} + +// SetInputs implements the Operator interface +func (fkc *FkCascade) SetInputs(operators []ops.Operator) { + if len(operators) < 2 { + panic("incorrect count of inputs for FkCascade") + } + fkc.Parent = operators[0] + fkc.Selection = operators[1] + for idx, operator := range operators { + if idx < 2 { + continue + } + fkc.Children[idx-2].Op = operator + } +} + +// Clone implements the Operator interface +func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { + if len(inputs) < 2 { + panic("incorrect count of inputs for FkCascade") + } + newFkc := &FkCascade{ + Parent: inputs[0], + Selection: inputs[1], + } + for idx, operator := range inputs { + if idx < 2 { + continue + } + + newFkc.Children = append(newFkc.Children, &FkChild{ + BVName: fkc.Children[idx-2].BVName, + Cols: slices.Clone(fkc.Children[idx-2].Cols), + Op: operator, + }) + } + return newFkc +} + +// GetOrdering implements the Operator interface +func (fkc *FkCascade) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +// ShortDescription implements the Operator interface +func (fkc *FkCascade) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/fk_verify.go b/go/vt/vtgate/planbuilder/operators/fk_verify.go new file mode 100644 index 00000000000..8c2431d26fc --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/fk_verify.go @@ -0,0 +1,80 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" +) + +// VerifyOp keeps the information about the foreign key verification operation. +// It is a Parent verification or a Child verification. +type VerifyOp struct { + Op ops.Operator + Typ string +} + +// FkVerify is used to represent a foreign key verification operation +// as an operator. This operator is created for DML queries that require +// verifications on the existence of the rows in the parent table (for example, INSERT and UPDATE). +type FkVerify struct { + Verify []*VerifyOp + Input ops.Operator + + noColumns + noPredicates +} + +var _ ops.Operator = (*FkVerify)(nil) + +// Inputs implements the Operator interface +func (fkv *FkVerify) Inputs() []ops.Operator { + inputs := []ops.Operator{fkv.Input} + for _, v := range fkv.Verify { + inputs = append(inputs, v.Op) + } + return inputs +} + +// SetInputs implements the Operator interface +func (fkv *FkVerify) SetInputs(operators []ops.Operator) { + fkv.Input = operators[0] + if len(fkv.Verify) != len(operators)-1 { + panic("mismatched number of verify inputs") + } + for i := 1; i < len(operators); i++ { + fkv.Verify[i-1].Op = operators[i] + } +} + +// Clone implements the Operator interface +func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { + newFkv := &FkVerify{ + Verify: fkv.Verify, + } + newFkv.SetInputs(inputs) + return newFkv +} + +// GetOrdering implements the Operator interface +func (fkv *FkVerify) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +// ShortDescription implements the Operator interface +func (fkv *FkVerify) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/fuzz.go b/go/vt/vtgate/planbuilder/operators/fuzz.go index bb8c508e56b..6ee6b0bab83 100644 --- a/go/vt/vtgate/planbuilder/operators/fuzz.go +++ b/go/vt/vtgate/planbuilder/operators/fuzz.go @@ -17,10 +17,10 @@ limitations under the License. package operators import ( + fuzz "github.com/AdaLogics/go-fuzz-headers" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/semantics" - - fuzz "github.com/AdaLogics/go-fuzz-headers" ) // FuzzAnalyse implements the fuzzer diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go index 3ac028851b4..21be634d7d8 100644 --- a/go/vt/vtgate/planbuilder/operators/helpers.go +++ b/go/vt/vtgate/planbuilder/operators/helpers.go @@ -28,24 +28,24 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// Compact will optimise the operator tree into a smaller but equivalent version -func Compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +// compact will optimise the operator tree into a smaller but equivalent version +func compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { type compactable interface { // Compact implement this interface for operators that have easy to see optimisations - Compact(ctx *plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) + Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) } - newOp, err := rewrite.BottomUp(op, semantics.EmptyTableSet(), TableID, func(_ semantics.TableSet, op ops.Operator) (ops.Operator, rewrite.TreeIdentity, error) { + newOp, err := rewrite.BottomUp(op, TableID, func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { newOp, ok := op.(compactable) if !ok { return op, rewrite.SameTree, nil } return newOp.Compact(ctx) - }) + }, stopAtRoute) return newOp, err } -func CheckValid(op ops.Operator) error { +func checkValid(op ops.Operator) error { type checkable interface { CheckValid() error } @@ -67,15 +67,15 @@ func Clone(op ops.Operator) ops.Operator { return op.Clone(clones) } -// TableIDIntroducer is used to signal that this operator introduces data from a new source -type TableIDIntroducer interface { - Introduces() semantics.TableSet +// tableIDIntroducer is used to signal that this operator introduces data from a new source +type tableIDIntroducer interface { + introducesTableID() semantics.TableSet } func TableID(op ops.Operator) (result semantics.TableSet) { _ = rewrite.Visit(op, func(this ops.Operator) error { - if tbl, ok := this.(TableIDIntroducer); ok { - result = result.Merge(tbl.Introduces()) + if tbl, ok := this.(tableIDIntroducer); ok { + result = result.Merge(tbl.introducesTableID()) } return nil }) @@ -103,7 +103,7 @@ func TablesUsed(op ops.Operator) []string { func UnresolvedPredicates(op ops.Operator, st *semantics.SemTable) (result []sqlparser.Expr) { type unresolved interface { // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and - // on the outside of it (a parent Select expression, any other table not used by Operator, etc). + // on the outside of it (a parent Select expression, any other table not used by Operator, etc.). // This is used for sub-queries. An example query could be: // SELECT * FROM tbl WHERE EXISTS (SELECT 1 FROM otherTbl WHERE tbl.col = otherTbl.col) // The subquery would have one unsolved predicate: `tbl.col = otherTbl.col` @@ -148,14 +148,6 @@ func QualifiedString(ks *vindexes.Keyspace, s string) string { return fmt.Sprintf("%s.%s", ks.Name, s) } -func QualifiedStrings(ks *vindexes.Keyspace, ss []string) []string { - add, collect := collectSortedUniqueStrings() - for _, s := range ss { - add(QualifiedString(ks, s)) - } - return collect() -} - func QualifiedTableName(ks *vindexes.Keyspace, t sqlparser.TableName) string { return QualifiedIdentifier(ks, t.Name) } @@ -184,10 +176,6 @@ func SingleQualifiedString(ks *vindexes.Keyspace, s string) []string { return []string{QualifiedString(ks, s)} } -func SingleQualifiedTableName(ks *vindexes.Keyspace, t sqlparser.TableName) []string { - return SingleQualifiedIdentifier(ks, t.Name) -} - func collectSortedUniqueStrings() (add func(string), collect func() []string) { uniq := make(map[string]any) add = func(v string) { diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go index 7bbe3eb9e98..da97e74d8cc 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon.go +++ b/go/vt/vtgate/planbuilder/operators/horizon.go @@ -17,41 +17,215 @@ limitations under the License. package operators import ( + "slices" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) -// Horizon is an operator we use until we decide how to handle the source to the horizon. +// Horizon is an operator that allows us to postpone planning things like SELECT/GROUP BY/ORDER BY/LIMIT until later. // It contains information about the planning we have to do after deciding how we will send the query to the tablets. +// If we are able to push down the Horizon under a route, we don't have to plan these things separately and can +// just copy over the AST constructs to the query being sent to a tablet. +// If we are not able to push it down, this operator needs to be split up into smaller +// Project/Aggregate/Sort/Limit operations, some which can be pushed down, +// and some that have to be evaluated at the vtgate level. type Horizon struct { Source ops.Operator - Select sqlparser.SelectStatement - noColumns + // If this is a derived table, the two following fields will contain the tableID and name of it + TableId *semantics.TableSet + Alias string + + // QP contains the QueryProjection for this op + QP *QueryProjection + + Query sqlparser.SelectStatement + ColumnAliases sqlparser.Columns + + // Columns needed to feed other plans + Columns []*sqlparser.ColName + ColumnsOffset []int +} + +// Clone implements the Operator interface +func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { + return &Horizon{ + Source: inputs[0], + Query: h.Query, + Alias: h.Alias, + ColumnAliases: sqlparser.CloneColumns(h.ColumnAliases), + Columns: slices.Clone(h.Columns), + ColumnsOffset: slices.Clone(h.ColumnsOffset), + TableId: h.TableId, + QP: h.QP, + } +} + +// IsMergeable is not a great name for this function. Suggestions for a better one are welcome! +// This function will return false if the derived table inside it has to run on the vtgate side, and so can't be merged with subqueries +// This logic can also be used to check if this is a derived table that can be had on the left hand side of a vtgate join. +// Since vtgate joins are always nested loop joins, we can't execute them on the RHS +// if they do some things, like LIMIT or GROUP BY on wrong columns +func (h *Horizon) IsMergeable(ctx *plancontext.PlanningContext) bool { + return isMergeable(ctx, h.Query, h) } -var _ ops.Operator = (*Horizon)(nil) -var _ ops.PhysicalOperator = (*Horizon)(nil) +// Inputs implements the Operator interface +func (h *Horizon) Inputs() []ops.Operator { + return []ops.Operator{h.Source} +} -func (h *Horizon) IPhysical() {} +// SetInputs implements the Operator interface +func (h *Horizon) SetInputs(ops []ops.Operator) { + h.Source = ops[0] +} func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - newSrc, err := h.Source.AddPredicate(ctx, expr) + if _, isUNion := h.Source.(*Union); isUNion { + // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting + var err error + h.Source, err = h.Source.AddPredicate(ctx, expr) + return h, err + } + tableInfo, err := ctx.SemTable.TableInfoForExpr(expr) + if err != nil { + if err == semantics.ErrNotSingleTable { + return &Filter{ + Source: h, + Predicates: []sqlparser.Expr{expr}, + }, nil + } + return nil, err + } + + newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) + if sqlparser.ContainsAggregation(newExpr) { + return &Filter{Source: h, Predicates: []sqlparser.Expr{expr}}, nil + } + h.Source, err = h.Source.AddPredicate(ctx, newExpr) if err != nil { return nil, err } - h.Source = newSrc return h, nil } -func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { - return &Horizon{ - Source: inputs[0], - Select: h.Select, +func (h *Horizon) AddColumns(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + if !reuse { + return nil, errNoNewColumns } + offsets := make([]int, len(exprs)) + for i, expr := range exprs { + col, ok := expr.Expr.(*sqlparser.ColName) + if !ok { + return nil, vterrors.VT13001("cannot push non-ColName expression to horizon") + } + offset, err := h.FindCol(ctx, col, false) + if err != nil { + return nil, err + } + + if offset < 0 { + return nil, errNoNewColumns + } + offsets[i] = offset + } + + return offsets, nil } -func (h *Horizon) Inputs() []ops.Operator { - return []ops.Operator{h.Source} +var errNoNewColumns = vterrors.VT13001("can't add new columns to Horizon") + +// canReuseColumn is generic, so it can be used with slices of different types. +// We don't care about the actual type, as long as we know it's a sqlparser.Expr +func canReuseColumn[T any]( + ctx *plancontext.PlanningContext, + columns []T, + col sqlparser.Expr, + f func(T) sqlparser.Expr, +) (offset int, found bool) { + for offset, column := range columns { + if ctx.SemTable.EqualsExprWithDeps(col, f(column)) { + return offset, true + } + } + + return +} + +func (h *Horizon) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + for idx, se := range sqlparser.GetFirstSelect(h.Query).SelectExprs { + ae, ok := se.(*sqlparser.AliasedExpr) + if !ok { + return 0, vterrors.VT09015() + } + if ctx.SemTable.EqualsExprWithDeps(ae.Expr, expr) { + return idx, nil + } + } + + return -1, nil +} + +func (h *Horizon) GetColumns(ctx *plancontext.PlanningContext) (exprs []*sqlparser.AliasedExpr, err error) { + for _, expr := range ctx.SemTable.SelectExprs(h.Query) { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + return nil, vterrors.VT09015() + } + exprs = append(exprs, ae) + } + + return exprs, nil +} + +func (h *Horizon) GetSelectExprs(*plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return sqlparser.GetFirstSelect(h.Query).SelectExprs, nil +} + +func (h *Horizon) GetOrdering() ([]ops.OrderBy, error) { + if h.QP == nil { + return nil, vterrors.VT13001("QP should already be here") + } + return h.QP.OrderExprs, nil +} + +// TODO: REMOVE +func (h *Horizon) selectStatement() sqlparser.SelectStatement { + return h.Query +} + +func (h *Horizon) src() ops.Operator { + return h.Source +} + +func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { + if h.QP != nil { + return h.QP, nil + } + qp, err := CreateQPFromSelectStatement(ctx, h.Query) + if err != nil { + return nil, err + } + h.QP = qp + return h.QP, nil +} + +func (h *Horizon) ShortDescription() string { + return h.Alias +} + +func (h *Horizon) introducesTableID() semantics.TableSet { + if h.TableId == nil { + return semantics.EmptyTableSet() + } + + return *h.TableId +} + +func (h *Horizon) IsDerived() bool { + return h.TableId != nil } diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go new file mode 100644 index 00000000000..42c91958a61 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go @@ -0,0 +1,250 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { + statement := horizon.selectStatement() + switch sel := statement.(type) { + case *sqlparser.Select: + return expandSelectHorizon(ctx, horizon, sel) + case *sqlparser.Union: + return expandUnionHorizon(ctx, horizon, sel) + } + return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement)) +} + +func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (ops.Operator, *rewrite.ApplyResult, error) { + op := horizon.Source + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, nil, err + } + + if len(qp.OrderExprs) > 0 { + op = &Ordering{ + Source: op, + Order: qp.OrderExprs, + } + } + + if union.Limit != nil { + op = &Limit{ + Source: op, + AST: union.Limit, + } + } + + if horizon.TableId != nil { + op = &Projection{ + Source: op, + TableID: horizon.TableId, + Alias: horizon.Alias, + } + } + + if op == horizon.Source { + return op, rewrite.NewTree("removed UNION horizon not used", op), nil + } + + return op, rewrite.NewTree("expand UNION horizon into smaller components", op), nil +} + +func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (ops.Operator, *rewrite.ApplyResult, error) { + op, err := createProjectionFromSelect(ctx, horizon) + if err != nil { + return nil, nil, err + } + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, nil, err + } + + if qp.NeedsDistinct() { + op = &Distinct{ + Required: true, + Source: op, + QP: qp, + } + } + + if sel.Having != nil { + op = &Filter{ + Source: op, + Predicates: sqlparser.SplitAndExpression(nil, sel.Having.Expr), + FinalPredicate: nil, + } + } + + if len(qp.OrderExprs) > 0 { + op = &Ordering{ + Source: op, + Order: qp.OrderExprs, + } + } + + if sel.Limit != nil { + op = &Limit{ + Source: op, + AST: sel.Limit, + } + } + + return op, rewrite.NewTree("expand SELECT horizon into smaller components", op), nil +} + +func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out ops.Operator, err error) { + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, err + } + + if !qp.NeedsAggregation() { + projX, err := createProjectionWithoutAggr(qp, horizon.src()) + if err != nil { + return nil, err + } + projX.TableID = horizon.TableId + projX.Alias = horizon.Alias + out = projX + + return out, nil + } + + aggregations, complexAggr, err := qp.AggregationExpressions(ctx, true) + if err != nil { + return nil, err + } + + a := &Aggregator{ + Source: horizon.src(), + Original: true, + QP: qp, + Grouping: qp.GetGrouping(), + Aggregations: aggregations, + TableID: horizon.TableId, + Alias: horizon.Alias, + } + + if complexAggr { + return createProjectionForComplexAggregation(a, qp) + } + return createProjectionForSimpleAggregation(ctx, a, qp) +} + +func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) (ops.Operator, error) { +outer: + for colIdx, expr := range qp.SelectExprs { + ae, err := expr.GetAliasedExpr() + if err != nil { + return nil, err + } + addedToCol := false + for idx, groupBy := range a.Grouping { + if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, ae.Expr) { + if !addedToCol { + a.Columns = append(a.Columns, ae) + addedToCol = true + } + if groupBy.ColOffset < 0 { + a.Grouping[idx].ColOffset = colIdx + } + } + } + if addedToCol { + continue + } + for idx, aggr := range a.Aggregations { + if ctx.SemTable.EqualsExprWithDeps(aggr.Original.Expr, ae.Expr) && aggr.ColOffset < 0 { + a.Columns = append(a.Columns, ae) + a.Aggregations[idx].ColOffset = colIdx + continue outer + } + } + return nil, vterrors.VT13001(fmt.Sprintf("Could not find the %s in aggregation in the original query", sqlparser.String(ae))) + } + return a, nil +} + +func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) (ops.Operator, error) { + p := &Projection{ + Source: a, + Alias: a.Alias, + TableID: a.TableID, + } + + for _, expr := range qp.SelectExprs { + ae, err := expr.GetAliasedExpr() + if err != nil { + return nil, err + } + p.Columns = append(p.Columns, ae) + p.Projections = append(p.Projections, UnexploredExpression{E: ae.Expr}) + } + for i, by := range a.Grouping { + a.Grouping[i].ColOffset = len(a.Columns) + a.Columns = append(a.Columns, aeWrap(by.SimplifiedExpr)) + } + for i, aggregation := range a.Aggregations { + a.Aggregations[i].ColOffset = len(a.Columns) + a.Columns = append(a.Columns, aggregation.Original) + } + return p, nil +} + +func createProjectionWithoutAggr(qp *QueryProjection, src ops.Operator) (*Projection, error) { + proj := &Projection{ + Source: src, + } + + for _, e := range qp.SelectExprs { + if _, isStar := e.Col.(*sqlparser.StarExpr); isStar { + return nil, errHorizonNotPlanned() + } + ae, err := e.GetAliasedExpr() + + if err != nil { + return nil, err + } + expr := ae.Expr + if sqlparser.ContainsAggregation(expr) { + aggr, ok := expr.(sqlparser.AggrFunc) + if !ok { + // need to add logic to extract aggregations and pushed them to the top level + return nil, vterrors.VT12001(fmt.Sprintf("unsupported aggregation expression: %s", sqlparser.String(expr))) + } + expr = aggr.GetArg() + if expr == nil { + expr = sqlparser.NewIntLiteral("1") + } + } + + proj.addUnexploredExpr(ae, expr) + } + return proj, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/horizon_planning.go b/go/vt/vtgate/planbuilder/operators/horizon_planning.go index e8d881a3dda..f55a84fb6a1 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon_planning.go +++ b/go/vt/vtgate/planbuilder/operators/horizon_planning.go @@ -17,43 +17,709 @@ limitations under the License. package operators import ( + "fmt" + "io" + + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" +type ( + projector struct { + cols []ProjExpr + names []*sqlparser.AliasedExpr + } ) -var errNotHorizonPlanned = vterrors.VT12001("query cannot be fully operator planned") +func errHorizonNotPlanned() error { + if rewrite.DebugOperatorTree { + fmt.Println("ERROR! Falling back on the old horizon planner") + } + return _errHorizonNotPlanned +} + +var _errHorizonNotPlanned = vterrors.VT12001("query cannot be fully operator planned") + +func tryHorizonPlanning(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { + backup := Clone(root) + defer func() { + // If we encounter the _errHorizonNotPlanned error, we'll revert to using the old horizon planning strategy. + if err == _errHorizonNotPlanned { + // The only offset planning we did before was on joins. + // Therefore, we traverse the tree to find all joins and calculate the joinColumns offsets. + // Our fallback strategy is to clone the original operator tree, compute the join offsets, + // and allow the legacy horizonPlanner to handle this query using logical plans. + err = planOffsetsOnJoins(ctx, backup) + if err == nil { + output = backup + } + } + }() + + _, ok := root.(*Horizon) + + if !ok || len(ctx.SemTable.SubqueryMap) > 0 || len(ctx.SemTable.SubqueryRef) > 0 { + // we are not ready to deal with subqueries yet + return root, errHorizonNotPlanned() + } + + output, err = planHorizons(ctx, root) + if err != nil { + return nil, err + } + + output, err = planOffsets(ctx, output) + if err != nil { + return nil, err + } + + if rewrite.DebugOperatorTree { + fmt.Println("After offset planning:") + fmt.Println(ops.ToTree(output)) + } + + output, err = compact(ctx, output) + if err != nil { + return nil, err + } + + return addTruncationOrProjectionToReturnOutput(ctx, root, output) +} + +// planHorizons is the process of figuring out how to perform the operations in the Horizon +// If we can push it under a route - done. +// If we can't, we will instead expand the Horizon into +// smaller operators and try to push these down as far as possible +func planHorizons(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Operator, err error) { + phases := getPhases() + op = root + + for _, phase := range phases { + if phase.action != nil { + op, err = phase.action(ctx, op) + if err != nil { + return nil, err + } + } + if rewrite.DebugOperatorTree { + fmt.Printf("PHASE: %s\n", phase.Name) + } + op, err = optimizeHorizonPlanning(ctx, op) + if err != nil { + return nil, err + } -func planHorizons(in ops.Operator) (ops.Operator, error) { - return rewrite.TopDown(in, func(in ops.Operator) (ops.Operator, rewrite.TreeIdentity, rewrite.VisitRule, error) { + op, err = compact(ctx, op) + if err != nil { + return nil, err + } + } + + return addGroupByOnRHSOfJoin(op) +} + +func optimizeHorizonPlanning(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { switch in := in.(type) { case *Horizon: - op, err := planHorizon(in) + return pushOrExpandHorizon(ctx, in) + case *Projection: + return tryPushingDownProjection(ctx, in) + case *Limit: + return tryPushingDownLimit(in) + case *Ordering: + return tryPushingDownOrdering(ctx, in) + case *Aggregator: + return tryPushingDownAggregator(ctx, in) + case *Filter: + return tryPushingDownFilter(ctx, in) + case *Distinct: + return tryPushingDownDistinct(in) + case *Union: + return tryPushDownUnion(ctx, in) + default: + return in, rewrite.SameTree, nil + } + } + + return rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) +} + +func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { + if len(in.ColumnAliases) > 0 { + return nil, nil, errHorizonNotPlanned() + } + + rb, isRoute := in.src().(*Route) + if isRoute && rb.IsSingleShard() { + return rewrite.Swap(in, rb, "push horizon into route") + } + + sel, isSel := in.selectStatement().(*sqlparser.Select) + + qp, err := in.getQP(ctx) + if err != nil { + return nil, nil, err + } + + needsOrdering := len(qp.OrderExprs) > 0 + hasHaving := isSel && sel.Having != nil + + canPushDown := isRoute && + !hasHaving && + !needsOrdering && + !qp.NeedsAggregation() && + !in.selectStatement().IsDistinct() && + in.selectStatement().GetLimit() == nil + + if canPushDown { + return rewrite.Swap(in, rb, "push horizon into route") + } + + return expandHorizon(ctx, in) +} + +func tryPushingDownProjection( + ctx *plancontext.PlanningContext, + p *Projection, +) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := p.Source.(type) { + case *Route: + return rewrite.Swap(p, src, "pushed projection under route") + case *ApplyJoin: + if p.FromAggr { + return p, rewrite.SameTree, nil + } + return pushDownProjectionInApplyJoin(ctx, p, src) + case *Vindex: + return pushDownProjectionInVindex(ctx, p, src) + default: + return p, rewrite.SameTree, nil + } +} + +func pushDownProjectionInVindex( + ctx *plancontext.PlanningContext, + p *Projection, + src *Vindex, +) (ops.Operator, *rewrite.ApplyResult, error) { + for _, column := range p.Projections { + expr := column.GetExpr() + _, err := src.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(expr)}) + if err != nil { + return nil, nil, err + } + } + return src, rewrite.NewTree("push projection into vindex", p), nil +} + +func (p *projector) add(e ProjExpr, alias *sqlparser.AliasedExpr) { + p.cols = append(p.cols, e) + p.names = append(p.names, alias) +} + +// pushDownProjectionInApplyJoin pushes down a projection operation into an ApplyJoin operation. +// It processes each input column and creates new JoinColumns for the ApplyJoin operation based on +// the input column's expression. It also creates new Projection operators for the left and right +// children of the ApplyJoin operation, if needed. +func pushDownProjectionInApplyJoin( + ctx *plancontext.PlanningContext, + p *Projection, + src *ApplyJoin, +) (ops.Operator, *rewrite.ApplyResult, error) { + if src.LeftJoin { + // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed + return p, rewrite.SameTree, nil + } + lhs, rhs := &projector{}, &projector{} + + src.JoinColumns = nil + for idx := 0; idx < len(p.Projections); idx++ { + err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, p.Projections[idx], p.Columns[idx]) + if err != nil { + return nil, nil, err + } + } + + if p.TableID != nil { + err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) + if err != nil { + return nil, nil, err + } + } + + var err error + + // Create and update the Projection operators for the left and right children, if needed. + src.LHS, err = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.TableID, p.Alias) + if err != nil { + return nil, nil, err + } + + src.RHS, err = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.TableID, p.Alias) + if err != nil { + return nil, nil, err + } + + return src, rewrite.NewTree("split projection to either side of join", src), nil +} + +// splitProjectionAcrossJoin creates JoinColumns for all projections, +// and pushes down columns as needed between the LHS and RHS of a join +func splitProjectionAcrossJoin( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + in ProjExpr, + colName *sqlparser.AliasedExpr, +) error { + expr := in.GetExpr() + + // Check if the current expression can reuse an existing column in the ApplyJoin. + if _, found := canReuseColumn(ctx, join.JoinColumns, expr, joinColumnToExpr); found { + return nil + } + + // Get a JoinColumn for the current expression. + col, err := join.getJoinColumnFor(ctx, colName, false) + if err != nil { + return err + } + + // Update the left and right child columns and names based on the JoinColumn type. + switch { + case col.IsPureLeft(): + lhs.add(in, colName) + case col.IsPureRight(): + rhs.add(in, colName) + case col.IsMixedLeftAndRight(): + for _, lhsExpr := range col.LHSExprs { + lhs.add(&UnexploredExpression{E: lhsExpr}, aeWrap(lhsExpr)) + } + rhs.add(&UnexploredExpression{E: col.RHSExpr}, &sqlparser.AliasedExpr{Expr: col.RHSExpr, As: colName.As}) + } + + // Add the new JoinColumn to the ApplyJoin's JoinColumns. + join.JoinColumns = append(join.JoinColumns, col) + return nil +} + +// exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table +// in order to make them accessible outside the derived table. This is necessary when swapping the +// positions of the derived table and join operation. +// +// For example, consider the input query: +// select ... from (select T1.foo from T1 join T2 on T1.id = T2.id) as t +// If we push the derived table under the join, with T1 on the LHS of the join, we need to expose +// the values of T1.id through the derived table, or they will not be accessible on the RHS. +// +// The function iterates through each join predicate, rewriting the expressions in the predicate's +// LHS expressions to include the derived table. This allows the expressions to be accessed outside +// the derived table. +func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { + derivedTbl, err := ctx.SemTable.TableInfoFor(*p.TableID) + if err != nil { + return err + } + derivedTblName, err := derivedTbl.Name() + if err != nil { + return err + } + for _, predicate := range src.JoinPredicates { + for idx, expr := range predicate.LHSExprs { + tbl, err := ctx.SemTable.TableInfoForExpr(expr) + if err != nil { + return err + } + tblExpr := tbl.GetExpr() + tblName, err := tblExpr.TableName() if err != nil { - return nil, rewrite.SameTree, rewrite.SkipChildren, err + return err } - return op, rewrite.NewTree, rewrite.VisitChildren, nil + + expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) + out := prefixColNames(tblName, expr) + + alias := sqlparser.UnescapedString(out) + predicate.LHSExprs[idx] = sqlparser.NewColNameWithQualifier(alias, derivedTblName) + lhs.add(&UnexploredExpression{E: out}, &sqlparser.AliasedExpr{Expr: out, As: sqlparser.NewIdentifierCI(alias)}) + } + } + return nil +} + +// prefixColNames adds qualifier prefixes to all ColName:s. +// We want to be more explicit than the user was to make sure we never produce invalid SQL +func prefixColNames(tblName sqlparser.TableName, e sqlparser.Expr) sqlparser.Expr { + return sqlparser.CopyOnRewrite(e, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok { + return + } + col.Qualifier = tblName + }, nil).(sqlparser.Expr) +} + +func createProjectionWithTheseColumns( + ctx *plancontext.PlanningContext, + src ops.Operator, + p *projector, + tableID *semantics.TableSet, + alias string, +) (ops.Operator, error) { + if len(p.cols) == 0 { + return src, nil + } + proj, err := createProjection(ctx, src) + if err != nil { + return nil, err + } + proj.Columns = p.names + proj.Projections = p.cols + proj.TableID = tableID + proj.Alias = alias + return proj, nil +} + +func tryPushingDownLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Route: + return tryPushingDownLimitInRoute(in, src) + case *Projection: + return rewrite.Swap(in, src, "push limit under projection") + case *Aggregator: + return in, rewrite.SameTree, nil + default: + return setUpperLimit(in) + } +} + +func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { + if src.IsSingleShard() { + return rewrite.Swap(in, src, "limit pushed into single sharded route") + } + + return setUpperLimit(in) +} + +func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { + if in.Pushed { + return in, rewrite.SameTree, nil + } + in.Pushed = true + visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + return op, rewrite.SameTree, nil + } + shouldVisit := func(op ops.Operator) rewrite.VisitRule { + switch op := op.(type) { + case *Join, *ApplyJoin: + // we can't push limits down on either side + return rewrite.SkipChildren case *Route: - return in, rewrite.SameTree, rewrite.SkipChildren, nil + newSrc := &Limit{ + Source: op.Source, + AST: &sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}, + Pushed: false, + } + op.Source = newSrc + return rewrite.SkipChildren default: - return in, rewrite.SameTree, rewrite.VisitChildren, nil + return rewrite.VisitChildren + } + } + + _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) + if err != nil { + return nil, nil, err + } + return in, rewrite.SameTree, nil +} + +func tryPushingDownOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Route: + return rewrite.Swap(in, src, "push ordering under route") + case *ApplyJoin: + if canPushLeft(ctx, src, in.Order) { + // ApplyJoin is stable in regard to the columns coming from the LHS, + // so if all the ordering columns come from the LHS, we can push down the Ordering there + src.LHS, in.Source = in, src.LHS + return src, rewrite.NewTree("push down ordering on the LHS of a join", in), nil + } + case *Ordering: + // we'll just remove the order underneath. The top order replaces whatever was incoming + in.Source = src.Source + return in, rewrite.NewTree("remove double ordering", src), nil + case *Projection: + // we can move ordering under a projection if it's not introducing a column we're sorting by + for _, by := range in.Order { + if !fetchByOffset(by.SimplifiedExpr) { + return in, rewrite.SameTree, nil + } + } + return rewrite.Swap(in, src, "push ordering under projection") + case *Aggregator: + if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) { + return in, rewrite.SameTree, nil + } + + return pushOrderingUnderAggr(ctx, in, src) + + } + return in, rewrite.SameTree, nil +} + +func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { +ordering: + for _, orderBy := range order { + for _, groupBy := range grouping { + if ctx.SemTable.EqualsExprWithDeps(orderBy.SimplifiedExpr, groupBy.SimplifiedExpr) { + continue ordering + } + } + return false + } + + return true +} + +func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { + // Step 1: Align the GROUP BY and ORDER BY. + // Reorder the GROUP BY columns to match the ORDER BY columns. + // Since the GB clause is a set, we can reorder these columns freely. + var newGrouping []GroupBy + used := make([]bool, len(aggregator.Grouping)) + for _, orderExpr := range order.Order { + for grpIdx, by := range aggregator.Grouping { + if !used[grpIdx] && ctx.SemTable.EqualsExprWithDeps(by.SimplifiedExpr, orderExpr.SimplifiedExpr) { + newGrouping = append(newGrouping, by) + used[grpIdx] = true + } + } + } + + // Step 2: Add any missing columns from the ORDER BY. + // The ORDER BY column is not a set, but we can add more elements + // to the end without changing the semantics of the query. + if len(newGrouping) != len(aggregator.Grouping) { + // we are missing some groupings. We need to add them both to the new groupings list, but also to the ORDER BY + for i, added := range used { + if !added { + groupBy := aggregator.Grouping[i] + newGrouping = append(newGrouping, groupBy) + order.Order = append(order.Order, groupBy.AsOrderBy()) + } } - }) + } + + aggregator.Grouping = newGrouping + aggrSource, isOrdering := aggregator.Source.(*Ordering) + if isOrdering { + // Transform the query plan tree: + // From: Ordering(1) To: Aggregation + // | | + // Aggregation Ordering(1) + // | | + // Ordering(2) + // | + // + // + // Remove Ordering(2) from the plan tree, as it's redundant + // after pushing down the higher ordering. + order.Source = aggrSource.Source + aggrSource.Source = nil // removing from plan tree + aggregator.Source = order + return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering", aggregator), nil + } + return rewrite.Swap(order, aggregator, "push ordering under aggregation") } -func planHorizon(in *Horizon) (ops.Operator, error) { - rb, isRoute := in.Source.(*Route) - if !isRoute { - return in, nil +func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { + lhs := TableID(aj.LHS) + for _, order := range order { + deps := ctx.SemTable.DirectDeps(order.Inner.Expr) + if !deps.IsSolvedBy(lhs) { + return false + } } - if isRoute && rb.IsSingleShard() && in.Select.GetLimit() == nil { - return planSingleShardRoute(rb, in) + return true +} + +func tryPushingDownFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { + switch src := in.Source.(type) { + case *Projection: + return pushFilterUnderProjection(ctx, in, src) + case *Route: + return rewrite.Swap(in, src, "push filter into Route") } - return nil, errNotHorizonPlanned + return in, rewrite.SameTree, nil } -func planSingleShardRoute(rb *Route, horizon *Horizon) (ops.Operator, error) { - rb.Source, horizon.Source = horizon, rb.Source - return rb, nil + +func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (ops.Operator, *rewrite.ApplyResult, error) { + for _, p := range filter.Predicates { + cantPushDown := false + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + if !fetchByOffset(node) { + return true, nil + } + + if projection.needsEvaluation(ctx, node.(sqlparser.Expr)) { + cantPushDown = true + return false, io.EOF + } + + return true, nil + }, p) + + if cantPushDown { + return filter, rewrite.SameTree, nil + } + } + return rewrite.Swap(filter, projection, "push filter under projection") + +} + +func tryPushingDownDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { + if in.Required && in.PushedPerformance { + return in, rewrite.SameTree, nil + } + switch src := in.Source.(type) { + case *Route: + if isDistinct(src.Source) && src.IsSingleShard() { + return src, rewrite.NewTree("distinct not needed", in), nil + } + if src.IsSingleShard() || !in.Required { + return rewrite.Swap(in, src, "push distinct under route") + } + + if isDistinct(src.Source) { + return in, rewrite.SameTree, nil + } + + src.Source = &Distinct{Source: src.Source} + in.PushedPerformance = true + + return in, rewrite.NewTree("added distinct under route - kept original", src), nil + case *Distinct: + src.Required = false + src.PushedPerformance = false + return src, rewrite.NewTree("removed double distinct", src), nil + case *Union: + for i := range src.Sources { + src.Sources[i] = &Distinct{Source: src.Sources[i]} + } + in.PushedPerformance = true + + return in, rewrite.NewTree("pushed down DISTINCT under UNION", src), nil + case *ApplyJoin: + src.LHS = &Distinct{Source: src.LHS} + src.RHS = &Distinct{Source: src.RHS} + in.PushedPerformance = true + + if in.Required { + return in, rewrite.NewTree("pushed distinct under join - kept original", in.Source), nil + } + + return in.Source, rewrite.NewTree("pushed distinct under join", in.Source), nil + case *Ordering: + in.Source = src.Source + return in, rewrite.NewTree("removed ordering under distinct", in), nil + } + + return in, rewrite.SameTree, nil +} + +func isDistinct(op ops.Operator) bool { + switch op := op.(type) { + case *Distinct: + return true + case *Union: + return op.distinct + case *Horizon: + return op.Query.IsDistinct() + case *Limit: + return isDistinct(op.Source) + default: + return false + } +} + +func tryPushDownUnion(ctx *plancontext.PlanningContext, op *Union) (ops.Operator, *rewrite.ApplyResult, error) { + if res := compactUnion(op); res != rewrite.SameTree { + return op, res, nil + } + + var sources []ops.Operator + var selects []sqlparser.SelectExprs + var err error + + if op.distinct { + sources, selects, err = mergeUnionInputInAnyOrder(ctx, op) + } else { + sources, selects, err = mergeUnionInputsInOrder(ctx, op) + } + if err != nil { + return nil, nil, err + } + + if len(sources) == 1 { + result := sources[0].(*Route) + if result.IsSingleShard() || !op.distinct { + return result, rewrite.NewTree("pushed union under route", op), nil + } + + return &Distinct{ + Source: result, + Required: true, + }, rewrite.NewTree("pushed union under route", op), nil + } + + if len(sources) == len(op.Sources) { + return op, rewrite.SameTree, nil + } + return newUnion(sources, selects, op.unionColumns, op.distinct), rewrite.NewTree("merged union inputs", op), nil +} + +// addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for +func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { + cols, err := output.GetSelectExprs(ctx) + if err != nil { + return nil, err + } + + horizon := oldHorizon.(*Horizon) + + sel := sqlparser.GetFirstSelect(horizon.Query) + + if len(sel.SelectExprs) == len(cols) { + return output, nil + } + + if tryTruncateColumnsAt(output, len(sel.SelectExprs)) { + return output, nil + } + + qp, err := horizon.getQP(ctx) + if err != nil { + return nil, err + } + proj, err := createSimpleProjection(ctx, qp, output) + if err != nil { + return nil, err + } + return proj, nil +} + +func stopAtRoute(operator ops.Operator) rewrite.VisitRule { + _, isRoute := operator.(*Route) + return rewrite.VisitRule(!isRoute) +} + +func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { + return &sqlparser.AliasedExpr{Expr: e} } diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index 2b665fea91d..26ada14b6d7 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -17,14 +17,10 @@ limitations under the License. package operators import ( + "maps" + "slices" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/servenv" @@ -32,7 +28,9 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) // InfoSchemaRouting used for information_schema queries. @@ -47,7 +45,10 @@ type InfoSchemaRouting struct { func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { rp.SysTableTableSchema = nil for _, expr := range isr.SysTableTableSchema { - eexpr, err := evalengine.Translate(expr, ¬ImplementedSchemaInfoConverter{}) + eexpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: collations.SystemCollation.Collation, + ResolveColumn: NotImplementedSchemaInfoResolver, + }) if err != nil { return err } @@ -56,7 +57,10 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext rp.SysTableTableName = make(map[string]evalengine.Expr, len(isr.SysTableTableName)) for k, expr := range isr.SysTableTableName { - eexpr, err := evalengine.Translate(expr, ¬ImplementedSchemaInfoConverter{}) + eexpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: collations.SystemCollation.Collation, + ResolveColumn: NotImplementedSchemaInfoResolver, + }) if err != nil { return err } @@ -127,7 +131,10 @@ func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparse // here we are just checking if this query can be translated to an evalengine expression // we'll need to do this translation again later when building the engine.Route - _, err := evalengine.Translate(rhs, ¬ImplementedSchemaInfoConverter{}) + _, err := evalengine.Translate(rhs, &evalengine.Config{ + Collation: collations.SystemCollation.Collation, + ResolveColumn: NotImplementedSchemaInfoResolver, + }) if err != nil { // if we can't translate this to an evalengine expression, // we are not going to be able to route based on this expression, @@ -140,7 +147,7 @@ func extractInfoSchemaRoutingPredicate(in sqlparser.Expr, reservedVars *sqlparse } else { name = reservedVars.ReserveColName(col) } - cmp.Right = sqlparser.NewArgument(name) + cmp.Right = sqlparser.NewTypedArgument(name, sqltypes.VarChar) return isSchemaName, name, rhs } @@ -306,16 +313,6 @@ func isTableNameCol(col *sqlparser.ColName) bool { return col.Name.EqualString("table_name") || col.Name.EqualString("referenced_table_name") } -type notImplementedSchemaInfoConverter struct{} - -func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) { +func NotImplementedSchemaInfoResolver(*sqlparser.ColName) (int, error) { return 0, vterrors.VT12001("comparing table schema name with a column name") } - -func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID { - return collations.Unknown -} - -func (f *notImplementedSchemaInfoConverter) DefaultCollation() collations.ID { - return collations.Default() -} diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go new file mode 100644 index 00000000000..9853e714c47 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -0,0 +1,451 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "strconv" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// Insert represents an insert operation on a table. +type Insert struct { + // VTable represents the target table for the insert operation. + VTable *vindexes.Table + // AST represents the insert statement from the SQL syntax. + AST *sqlparser.Insert + + // AutoIncrement represents the auto-increment generator for the insert operation. + AutoIncrement *Generate + // Ignore specifies whether to ignore duplicate key errors during insertion. + Ignore bool + // ForceNonStreaming when true, select first then insert, this is to avoid locking rows by select for insert. + ForceNonStreaming bool + + // ColVindexes are the vindexes that will use the VindexValues or VindexValueOffset + ColVindexes []*vindexes.ColumnVindex + + // VindexValues specifies values for all the vindex columns. + VindexValues [][][]evalengine.Expr + + // VindexValueOffset stores the offset for each column in the ColumnVindex + // that will appear in the result set of the select query. + VindexValueOffset [][]int + + // Insert using select query will have select plan as input operator for the insert operation. + Input ops.Operator + + noColumns + noPredicates +} + +func (i *Insert) Inputs() []ops.Operator { + if i.Input == nil { + return nil + } + return []ops.Operator{i.Input} +} + +func (i *Insert) SetInputs(inputs []ops.Operator) { + if len(inputs) > 0 { + i.Input = inputs[0] + } +} + +// Generate represents an auto-increment generator for the insert operation. +type Generate struct { + // Keyspace represents the keyspace information for the table. + Keyspace *vindexes.Keyspace + // TableName represents the name of the table. + TableName sqlparser.TableName + + // Values are the supplied values for the column, which + // will be stored as a list within the expression. New + // values will be generated based on how many were not + // supplied (NULL). + Values evalengine.Expr + // Insert using Select, offset for auto increment column + Offset int + + // added indicates whether the auto-increment column was already present in the insert column list or added. + added bool +} + +func (i *Insert) ShortDescription() string { + return i.VTable.String() +} + +func (i *Insert) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +var _ ops.Operator = (*Insert)(nil) + +func (i *Insert) Clone(inputs []ops.Operator) ops.Operator { + var input ops.Operator + if len(inputs) > 0 { + input = inputs[0] + } + return &Insert{ + Input: input, + VTable: i.VTable, + AST: i.AST, + AutoIncrement: i.AutoIncrement, + Ignore: i.Ignore, + ForceNonStreaming: i.ForceNonStreaming, + ColVindexes: i.ColVindexes, + VindexValues: i.VindexValues, + VindexValueOffset: i.VindexValueOffset, + } +} + +func (i *Insert) TablesUsed() []string { + return SingleQualifiedIdentifier(i.VTable.Keyspace, i.VTable.Name) +} + +func (i *Insert) Statement() sqlparser.Statement { + return i.AST +} + +func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") + if err != nil { + return nil, err + } + + insOp, err := createInsertOperator(ctx, ins, vindexTable, routing) + if err != nil { + return nil, err + } + + // Find the foreign key mode and for unmanaged foreign-key-mode, we don't need to do anything. + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + if ksMode != vschemapb.Keyspace_FK_MANAGED { + return insOp, nil + } + + parentFKsForInsert := vindexTable.ParentFKsNeedsHandling(ctx.VerifyAllFKs, ctx.ParentFKToIgnore) + if len(parentFKsForInsert) > 0 { + return nil, vterrors.VT12002() + } + if len(ins.OnDup) == 0 { + return insOp, nil + } + + parentFksForUpdate, childFksForUpdate := getFKRequirementsForUpdate(ctx, sqlparser.UpdateExprs(ins.OnDup), vindexTable) + if len(parentFksForUpdate) == 0 && len(childFksForUpdate) == 0 { + return insOp, nil + } + return nil, vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys") +} + +func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { + if _, target := routing.(*TargetedRouting); target { + return nil, vterrors.VT12001("INSERT with a target destination") + } + + insOp := &Insert{ + VTable: vTbl, + AST: insStmt, + } + route := &Route{ + Source: insOp, + Routing: routing, + } + + // Table column list is nil then add all the columns + // If the column list is empty then add only the auto-inc column and + // this happens on calling modifyForAutoinc + if insStmt.Columns == nil && valuesProvided(insStmt.Rows) { + if vTbl.ColumnListAuthoritative { + insStmt = populateInsertColumnlist(insStmt, vTbl) + } else { + return nil, vterrors.VT09004() + } + } + + // modify column list or values for autoincrement column. + autoIncGen, err := modifyForAutoinc(insStmt, vTbl) + if err != nil { + return nil, err + } + insOp.AutoIncrement = autoIncGen + + // set insert ignore. + insOp.Ignore = bool(insStmt.Ignore) || insStmt.OnDup != nil + + insOp.ColVindexes = getColVindexes(insOp) + switch rows := insStmt.Rows.(type) { + case sqlparser.Values: + route.Source, err = insertRowsPlan(insOp, insStmt, rows) + if err != nil { + return nil, err + } + case sqlparser.SelectStatement: + route.Source, err = insertSelectPlan(ctx, insOp, insStmt, rows) + if err != nil { + return nil, err + } + } + return route, nil +} + +func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*Insert, error) { + if columnMismatch(insOp.AutoIncrement, ins, sel) { + return nil, vterrors.VT03006() + } + + selOp, err := PlanQuery(ctx, sel) + if err != nil { + return nil, err + } + + // select plan will be taken as input to insert rows into the table. + insOp.Input = selOp + + // When the table you are steaming data from and table you are inserting from are same. + // Then due to locking of the index range on the table we might not be able to insert into the table. + // Therefore, instead of streaming, this flag will ensure the records are first read and then inserted. + insertTbl := insOp.TablesUsed()[0] + selTables := TablesUsed(selOp) + for _, tbl := range selTables { + if insertTbl == tbl { + insOp.ForceNonStreaming = true + break + } + } + + if len(insOp.ColVindexes) == 0 { + return insOp, nil + } + + colVindexes := insOp.ColVindexes + vv := make([][]int, len(colVindexes)) + for idx, colVindex := range colVindexes { + for _, col := range colVindex.Columns { + err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) + if err != nil { + return nil, err + } + + colNum := findColumn(ins, col) + // sharding column values should be provided in the insert. + if colNum == -1 && idx == 0 { + return nil, vterrors.VT09003(col) + } + vv[idx] = append(vv[idx], colNum) + } + } + insOp.VindexValueOffset = vv + return insOp, nil +} + +func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { + origColCount := len(ins.Columns) + if gen != nil && gen.added { + // One column got added to the insert query ast for auto increment column. + // adjusting it here for comparison. + origColCount-- + } + if origColCount < sel.GetColumnCount() { + return true + } + if origColCount > sel.GetColumnCount() { + sel := sqlparser.GetFirstSelect(sel) + var hasStarExpr bool + for _, sExpr := range sel.SelectExprs { + if _, hasStarExpr = sExpr.(*sqlparser.StarExpr); hasStarExpr { + break + } + } + if !hasStarExpr { + return true + } + } + return false +} + +func insertRowsPlan(insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { + for _, row := range rows { + if len(ins.Columns) != len(row) { + return nil, vterrors.VT03006() + } + } + + if len(insOp.ColVindexes) == 0 { + return insOp, nil + } + + colVindexes := insOp.ColVindexes + routeValues := make([][][]evalengine.Expr, len(colVindexes)) + for vIdx, colVindex := range colVindexes { + routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) + for colIdx, col := range colVindex.Columns { + err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) + if err != nil { + return nil, err + } + routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) + colNum, _ := findOrAddColumn(ins, col) + for rowNum, row := range rows { + innerpv, err := evalengine.Translate(row[colNum], nil) + if err != nil { + return nil, err + } + routeValues[vIdx][colIdx][rowNum] = innerpv + } + } + } + // here we are replacing the row value with the argument. + for _, colVindex := range colVindexes { + for _, col := range colVindex.Columns { + colNum, _ := findOrAddColumn(ins, col) + for rowNum, row := range rows { + name := engine.InsertVarName(col, rowNum) + row[colNum] = sqlparser.NewArgument(name) + } + } + } + insOp.VindexValues = routeValues + return insOp, nil +} + +func valuesProvided(rows sqlparser.InsertRows) bool { + switch values := rows.(type) { + case sqlparser.Values: + return len(values) >= 0 && len(values[0]) > 0 + case sqlparser.SelectStatement: + return true + } + return false +} + +func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { + // For unsharded table the Column Vindex does not mean anything. + // And therefore should be ignored. + if !insOp.VTable.Keyspace.Sharded { + return + } + for _, colVindex := range insOp.VTable.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + colVindexes = append(colVindexes, colVindex) + } + return +} + +func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { + for _, assignment := range setClauses { + if col.Equal(assignment.Name.Name) { + valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) + // update on duplicate key is changing the vindex column, not supported. + if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { + return vterrors.VT12001("DML cannot update vindex column") + } + return nil + } + } + return nil +} + +// findOrAddColumn finds the position of a column in the insert. If it's +// absent it appends it to the with NULL values. +// It returns the position of the column and also boolean representing whether it was added or already present. +func findOrAddColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) (int, bool) { + colNum := findColumn(ins, col) + if colNum >= 0 { + return colNum, false + } + colOffset := len(ins.Columns) + ins.Columns = append(ins.Columns, col) + if rows, ok := ins.Rows.(sqlparser.Values); ok { + for i := range rows { + rows[i] = append(rows[i], &sqlparser.NullVal{}) + } + } + return colOffset, true +} + +// findColumn returns the column index where it is placed on the insert column list. +// Otherwise, return -1 when not found. +func findColumn(ins *sqlparser.Insert, col sqlparser.IdentifierCI) int { + for i, column := range ins.Columns { + if col.Equal(column) { + return i + } + } + return -1 +} + +func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sqlparser.Insert { + cols := make(sqlparser.Columns, 0, len(table.Columns)) + for _, c := range table.Columns { + cols = append(cols, c.Name) + } + ins.Columns = cols + return ins +} + +// modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. +// For row values cases, bind variable names are generated using baseName. +func modifyForAutoinc(ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { + if vTable.AutoIncrement == nil { + return nil, nil + } + gen := &Generate{ + Keyspace: vTable.AutoIncrement.Sequence.Keyspace, + TableName: sqlparser.TableName{Name: vTable.AutoIncrement.Sequence.Name}, + } + colNum, newColAdded := findOrAddColumn(ins, vTable.AutoIncrement.Column) + switch rows := ins.Rows.(type) { + case sqlparser.SelectStatement: + gen.Offset = colNum + gen.added = newColAdded + case sqlparser.Values: + autoIncValues := make([]evalengine.Expr, 0, len(rows)) + for rowNum, row := range rows { + // Support the DEFAULT keyword by treating it as null + if _, ok := row[colNum].(*sqlparser.Default); ok { + row[colNum] = &sqlparser.NullVal{} + } + expr, err := evalengine.Translate(row[colNum], nil) + if err != nil { + return nil, err + } + autoIncValues = append(autoIncValues, expr) + row[colNum] = sqlparser.NewArgument(engine.SeqVarName + strconv.Itoa(rowNum)) + } + gen.Values = evalengine.NewTupleExpr(autoIncValues...) + } + return gen, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 3cad6c6bd80..dd119625902 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -47,12 +47,21 @@ func (j *Join) Clone(inputs []ops.Operator) ops.Operator { } } +func (j *Join) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + // Inputs implements the Operator interface func (j *Join) Inputs() []ops.Operator { return []ops.Operator{j.LHS, j.RHS} } -func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) { +// SetInputs implements the Operator interface +func (j *Join) SetInputs(ops []ops.Operator) { + j.LHS, j.RHS = ops[0], ops[1] +} + +func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { if j.LeftJoin { // we can't merge outer joins into a single QG return j, rewrite.SameTree, nil @@ -70,12 +79,9 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, rewrite. NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps), } if j.Predicate != nil { - err := newOp.collectPredicate(ctx, j.Predicate) - if err != nil { - return nil, rewrite.SameTree, err - } + newOp.collectPredicate(ctx, j.Predicate) } - return newOp, rewrite.NewTree, nil + return newOp, rewrite.NewTree("merge querygraphs into a single one", newOp), nil } func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { @@ -116,7 +122,7 @@ func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.Join } func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { - return AddPredicate(j, ctx, expr, false, newFilter) + return AddPredicate(ctx, j, expr, false, newFilter) } var _ JoinOp = (*Join)(nil) @@ -149,3 +155,7 @@ func (j *Join) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser j.Predicate = ctx.SemTable.AndExpressions(j.Predicate, expr) return nil } + +func (j *Join) ShortDescription() string { + return sqlparser.String(j.Predicate) +} diff --git a/go/vt/vtgate/planbuilder/operators/merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go similarity index 87% rename from go/vt/vtgate/planbuilder/operators/merging.go rename to go/vt/vtgate/planbuilder/operators/join_merging.go index 369016e28b9..b39949e2d2a 100644 --- a/go/vt/vtgate/planbuilder/operators/merging.go +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -21,30 +21,19 @@ import ( "reflect" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -// Merge checks whether two operators can be merged into a single one. +// mergeJoinInputs checks whether two operators can be merged into a single one. // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. -func Merge(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) (ops.Operator, error) { - lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) - if lhsRoute == nil || rhsRoute == nil { +func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) (ops.Operator, error) { + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + if lhsRoute == nil { return nil, nil } - lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(lhsRoute, rhsRoute) - - a, b := getRoutingType(routingA), getRoutingType(routingB) - if getTypeName(routingA) < getTypeName(routingB) { - // while deciding if two routes can be merged, the LHS/RHS order of the routes is not important. - // for the actual merging, we still need to remember which side was inner and which was outer for subqueries - a, b = b, a - routingA, routingB = routingB, routingA - } - switch { // if either side is a dual query, we can always merge them together case a == dual: @@ -70,16 +59,35 @@ func Merge(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicat // sharded routing is complex, so we handle it in a separate method case a == sharded && b == sharded: - return tryMergeShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates) + return tryMergeJoinShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates) default: return nil, nil } } +func prepareInputRoutes(lhs ops.Operator, rhs ops.Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { + lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) + if lhsRoute == nil || rhsRoute == nil { + return nil, nil, nil, nil, 0, 0, false + } + + lhsRoute, rhsRoute, routingA, routingB, sameKeyspace := getRoutesOrAlternates(lhsRoute, rhsRoute) + + a, b := getRoutingType(routingA), getRoutingType(routingB) + if getTypeName(routingA) < getTypeName(routingB) { + // while deciding if two routes can be merged, the LHS/RHS order of the routes is not important. + // for the actual merging, we still need to remember which side was inner and which was outer for subqueries + a, b = b, a + routingA, routingB = routingB, routingA + } + + return lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace +} + type ( merger interface { - mergeTables(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) + mergeShardedRouting(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) merge(op1, op2 *Route, r Routing) (*Route, error) } @@ -189,12 +197,12 @@ func newJoinMerge(ctx *plancontext.PlanningContext, predicates []sqlparser.Expr, } } -func (jm *joinMerger) mergeTables(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) { +func (jm *joinMerger) mergeShardedRouting(r1, r2 *ShardedRouting, op1, op2 *Route) (*Route, error) { tr := &ShardedRouting{ VindexPreds: append(r1.VindexPreds, r2.VindexPreds...), keyspace: r1.keyspace, RouteOpCode: r1.RouteOpCode, - SeenPredicates: r1.SeenPredicates, + SeenPredicates: append(r1.SeenPredicates, r2.SeenPredicates...), } if r1.SelectedVindex() == r2.SelectedVindex() { tr.Selected = r1.Selected @@ -237,7 +245,7 @@ func (s *subQueryMerger) markPredicateInOuterRouting(outer *ShardedRouting, inne // predicates list, so this might be a no-op. subQueryWasPredicate := false for i, predicate := range outer.SeenPredicates { - if s.ctx.SemTable.EqualsExpr(predicate, s.subq.ExtractedSubquery) { + if s.ctx.SemTable.EqualsExprWithDeps(predicate, s.subq.ExtractedSubquery) { outer.SeenPredicates = append(outer.SeenPredicates[:i], outer.SeenPredicates[i+1:]...) subQueryWasPredicate = true @@ -273,7 +281,7 @@ func (s *subQueryMerger) markPredicateInOuterRouting(outer *ShardedRouting, inne } } -func (s *subQueryMerger) mergeTables(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { +func (s *subQueryMerger) mergeShardedRouting(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { s.subq.ExtractedSubquery.Merged = true routing, err := s.markPredicateInOuterRouting(outer, inner) @@ -301,8 +309,8 @@ func (s *subQueryMerger) merge(outer, inner *Route, routing Routing) (*Route, er return outer, nil } -func (d *mergeDecorator) mergeTables(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { - merged, err := d.inner.mergeTables(outer, inner, op1, op2) +func (d *mergeDecorator) mergeShardedRouting(outer, inner *ShardedRouting, op1, op2 *Route) (*Route, error) { + merged, err := d.inner.mergeShardedRouting(outer, inner, op1, op2) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go index a91f6b43ffc..2d7451b83d4 100644 --- a/go/vt/vtgate/planbuilder/operators/joins.go +++ b/go/vt/vtgate/planbuilder/operators/joins.go @@ -34,7 +34,13 @@ type JoinOp interface { AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error } -func AddPredicate(join JoinOp, ctx *plancontext.PlanningContext, expr sqlparser.Expr, joinPredicates bool, newFilter func(ops.Operator, sqlparser.Expr) ops.Operator) (ops.Operator, error) { +func AddPredicate( + ctx *plancontext.PlanningContext, + join JoinOp, + expr sqlparser.Expr, + joinPredicates bool, + newFilter func(ops.Operator, sqlparser.Expr) ops.Operator, +) (ops.Operator, error) { deps := ctx.SemTable.RecursiveDeps(expr) switch { case deps.IsSolvedBy(TableID(join.GetLHS())): @@ -48,7 +54,7 @@ func AddPredicate(join JoinOp, ctx *plancontext.PlanningContext, expr sqlparser. case deps.IsSolvedBy(TableID(join.GetRHS())): // if we are dealing with an outer join, always start by checking if this predicate can turn // the join into an inner join - if !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { + if !joinPredicates && !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { join.MakeInner() } @@ -100,7 +106,7 @@ func AddPredicate(join JoinOp, ctx *plancontext.PlanningContext, expr sqlparser. // matched no rows on the right-hand, if we are later going to remove all the rows where the right-hand // side did not match, we might as well turn the join into an inner join. // -// This is based on the paper "Canonical Abstraction for Outerjoin Optimization" by J Rao et al +// This is based on the paper "Canonical Abstraction for Outerjoin Optimization" by J Rao et al. func canConvertToInner(ctx *plancontext.PlanningContext, expr sqlparser.Expr, rhs semantics.TableSet) bool { isColNameFromRHS := func(e sqlparser.Expr) bool { return sqlparser.IsColName(e) && ctx.SemTable.RecursiveDeps(e).IsSolvedBy(rhs) diff --git a/go/vt/vtgate/planbuilder/operators/limit.go b/go/vt/vtgate/planbuilder/operators/limit.go new file mode 100644 index 00000000000..ff4b46ad78c --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/limit.go @@ -0,0 +1,81 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type Limit struct { + Source ops.Operator + AST *sqlparser.Limit + + // Pushed marks whether the limit has been pushed down to the inputs but still need to keep the operator around. + // For example, `select * from user order by id limit 10`. Even after we push the limit to the route, we need a limit on top + // since it is a scatter. + Pushed bool +} + +func (l *Limit) Clone(inputs []ops.Operator) ops.Operator { + return &Limit{ + Source: inputs[0], + AST: sqlparser.CloneRefOfLimit(l.AST), + } +} + +func (l *Limit) Inputs() []ops.Operator { + return []ops.Operator{l.Source} +} + +func (l *Limit) SetInputs(operators []ops.Operator) { + l.Source = operators[0] +} + +func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newSrc, err := l.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + l.Source = newSrc + return l, nil +} + +func (l *Limit) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + return l.Source.AddColumns(ctx, reuse, addToGroupBy, exprs) +} + +func (l *Limit) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return l.Source.FindCol(ctx, expr, underRoute) +} + +func (l *Limit) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return l.Source.GetColumns(ctx) +} + +func (l *Limit) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return l.Source.GetSelectExprs(ctx) +} + +func (l *Limit) GetOrdering() ([]ops.OrderBy, error) { + return l.Source.GetOrdering() +} + +func (l *Limit) ShortDescription() string { + return sqlparser.String(l.AST) +} diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go new file mode 100644 index 00000000000..8a8095a58e6 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go @@ -0,0 +1,195 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +// planOffsets will walk the tree top down, adding offset information to columns in the tree for use in further optimization, +func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + type offsettable interface { + planOffsets(ctx *plancontext.PlanningContext) error + } + + visitor := func(in ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + var err error + switch op := in.(type) { + case *Horizon: + return nil, nil, vterrors.VT13001(fmt.Sprintf("should not see %T here", in)) + case offsettable: + err = op.planOffsets(ctx) + } + if err != nil { + return nil, nil, err + } + return in, rewrite.SameTree, nil + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +func fetchByOffset(e sqlparser.SQLNode) bool { + switch e.(type) { + case *sqlparser.ColName, sqlparser.AggrFunc: + return true + default: + return false + } +} + +func planOffsetsOnJoins(ctx *plancontext.PlanningContext, op ops.Operator) error { + err := rewrite.Visit(op, func(current ops.Operator) error { + join, ok := current.(*ApplyJoin) + if !ok { + return nil + } + return join.planOffsets(ctx) + }) + return err +} + +// useOffsets rewrites an expression to use values from the input +func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (sqlparser.Expr, error) { + var exprOffset *sqlparser.Offset + + in := op.Inputs()[0] + found := func(e sqlparser.Expr, offset int) { exprOffset = sqlparser.NewOffset(offset, e) } + + notFound := func(e sqlparser.Expr) error { + _, addToGroupBy := e.(*sqlparser.ColName) + offsets, err := in.AddColumns(ctx, true, []bool{addToGroupBy}, []*sqlparser.AliasedExpr{aeWrap(e)}) + if err != nil { + return err + } + exprOffset = sqlparser.NewOffset(offsets[0], e) + return nil + } + + visitor := getVisitor(ctx, in.FindCol, found, notFound) + + // The cursor replace is not available while walking `down`, so `up` is used to do the replacement. + up := func(cursor *sqlparser.CopyOnWriteCursor) { + if exprOffset != nil { + cursor.Replace(exprOffset) + exprOffset = nil + } + } + + rewritten := sqlparser.CopyOnRewrite(expr, visitor, up, ctx.SemTable.CopyDependenciesOnSQLNodes) + + return rewritten.(sqlparser.Expr), nil +} + +// addColumnsToInput adds columns needed by an operator to its input. +// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. +func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + filter, ok := in.(*Filter) + if !ok { + return in, rewrite.SameTree, nil + } + + proj, areOnTopOfProj := filter.Source.(selectExpressions) + if !areOnTopOfProj { + // not much we can do here + return in, rewrite.SameTree, nil + } + addedColumns := false + found := func(expr sqlparser.Expr, i int) {} + notFound := func(e sqlparser.Expr) error { + _, addToGroupBy := e.(*sqlparser.ColName) + proj.addColumnWithoutPushing(aeWrap(e), addToGroupBy) + addedColumns = true + return nil + } + visitor := getVisitor(ctx, proj.FindCol, found, notFound) + + for _, expr := range filter.Predicates { + _ = sqlparser.CopyOnRewrite(expr, visitor, nil, ctx.SemTable.CopyDependenciesOnSQLNodes) + } + if addedColumns { + return in, rewrite.NewTree("added columns because filter needs it", in), nil + } + + return in, rewrite.SameTree, nil + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +// addColumnsToInput adds columns needed by an operator to its input. +// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. +func pullDistinctFromUNION(root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + union, ok := in.(*Union) + if !ok || !union.distinct { + return in, rewrite.SameTree, nil + } + + union.distinct = false + + distinct := &Distinct{ + Required: true, + Source: union, + } + return distinct, rewrite.NewTree("pulled out DISTINCT from union", union), nil + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +func getVisitor( + ctx *plancontext.PlanningContext, + findCol func(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error), + found func(sqlparser.Expr, int), + notFound func(sqlparser.Expr) error, +) func(node, parent sqlparser.SQLNode) bool { + var err error + return func(node, parent sqlparser.SQLNode) bool { + if err != nil { + return false + } + e, ok := node.(sqlparser.Expr) + if !ok { + return true + } + var offset int + offset, err = findCol(ctx, e, false) + if err != nil { + return false + } + if offset >= 0 { + found(e, offset) + return false + } + + if fetchByOffset(e) { + err = notFound(e) + return false + } + + return true + } +} diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go index 2621fa9875b..7ee780472bc 100644 --- a/go/vt/vtgate/planbuilder/operators/operator.go +++ b/go/vt/vtgate/planbuilder/operators/operator.go @@ -22,7 +22,7 @@ The operators go through a few phases while planning: It will contain logical joins - we still haven't decided on the join algorithm to use yet. At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause that we can easily do join ordering on. The logical tree will represent the full query, - including projections, grouping, ordering and so on. + including projections, Grouping, ordering and so on. 2. Physical Once the logical plan has been fully built, we go bottom up and plan which routes that will be used. During this phase, we will also decide which join algorithms should be used on the vtgate level @@ -34,9 +34,13 @@ The operators go through a few phases while planning: package operators import ( + "fmt" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -51,34 +55,43 @@ type ( noPredicates struct{} ) -func PlanQuery(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (ops.Operator, error) { - op, err := createLogicalOperatorFromAST(ctx, selStmt) +// PlanQuery creates a query plan for a given SQL statement +func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (ops.Operator, error) { + op, err := translateQueryToOp(ctx, stmt) if err != nil { return nil, err } - if err = CheckValid(op); err != nil { - return nil, err + if rewrite.DebugOperatorTree { + fmt.Println("Initial tree:") + fmt.Println(ops.ToTree(op)) } - op, err = transformToPhysical(ctx, op) - if err != nil { + if op, err = compact(ctx, op); err != nil { return nil, err } - backup := Clone(op) + if err = checkValid(op); err != nil { + return nil, err + } - op, err = planHorizons(op) - if err == errNotHorizonPlanned { - op = backup - } else if err != nil { + if op, err = transformToPhysical(ctx, op); err != nil { return nil, err } - if op, err = Compact(ctx, op); err != nil { + if op, err = tryHorizonPlanning(ctx, op); err != nil { return nil, err } + _, isRoute := op.(*Route) + if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { + // If we got here, we don't have a single shard plan + return nil, ctx.SemTable.NotSingleRouteErr + } + + // set lock and comments on the route to be set on the sql query on conversion. + setCommentsAndLockOnRoute(op, stmt) + return op, err } @@ -87,12 +100,88 @@ func (noInputs) Inputs() []ops.Operator { return nil } +// SetInputs implements the Operator interface +func (noInputs) SetInputs(ops []ops.Operator) { + if len(ops) > 0 { + panic("the noInputs operator does not have inputs") + } +} + // AddColumn implements the Operator interface -func (noColumns) AddColumn(*plancontext.PlanningContext, sqlparser.Expr) (int, error) { - return 0, vterrors.VT13001("the noColumns operator cannot accept columns") +func (noColumns) AddColumn(*plancontext.PlanningContext, *sqlparser.AliasedExpr, bool, bool) (ops.Operator, int, error) { + return nil, 0, vterrors.VT13001("noColumns operators have no column") +} + +func (noColumns) AddColumns(*plancontext.PlanningContext, bool, []bool, []*sqlparser.AliasedExpr) ([]int, error) { + return nil, vterrors.VT13001("noColumns operators have no column") +} + +func (noColumns) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return nil, vterrors.VT13001("noColumns operators have no column") +} +func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) (int, error) { + return 0, vterrors.VT13001("noColumns operators have no column") +} + +func (noColumns) GetSelectExprs(*plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return nil, vterrors.VT13001("noColumns operators have no column") } // AddPredicate implements the Operator interface func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) (ops.Operator, error) { return nil, vterrors.VT13001("the noColumns operator cannot accept predicates") } + +// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us +func tryTruncateColumnsAt(op ops.Operator, truncateAt int) bool { + type columnTruncator interface { + setTruncateColumnCount(offset int) + } + + truncator, ok := op.(columnTruncator) + if ok { + truncator.setTruncateColumnCount(truncateAt) + return true + } + + inputs := op.Inputs() + if len(inputs) != 1 { + return false + } + + switch op.(type) { + case *Limit: + // empty by design + default: + return false + } + + return tryTruncateColumnsAt(inputs[0], truncateAt) +} + +func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op ops.Operator) (sqlparser.SelectExprs, error) { + columns, err := op.GetColumns(ctx) + if err != nil { + return nil, err + } + selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { + return from + }) + return selExprs, nil +} + +func setCommentsAndLockOnRoute(op ops.Operator, stmt sqlparser.Statement) { + _ = rewrite.Visit(op, func(op ops.Operator) error { + route, ok := op.(*Route) + if !ok { + return nil + } + if stmtWithComments, ok := stmt.(sqlparser.Commented); ok { + route.Comments = stmtWithComments.GetParsedComments() + } + if stmtWithLock, ok := stmt.(sqlparser.SelectStatement); ok { + route.Lock = stmtWithLock.GetLock() + } + return nil + }) +} diff --git a/go/vt/vtgate/planbuilder/operators/operator_funcs.go b/go/vt/vtgate/planbuilder/operators/operator_funcs.go index ee23175eac3..7f7aaff29c5 100644 --- a/go/vt/vtgate/planbuilder/operators/operator_funcs.go +++ b/go/vt/vtgate/planbuilder/operators/operator_funcs.go @@ -59,7 +59,7 @@ func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op o var keep []sqlparser.Expr for _, e := range sqlparser.SplitAndExpression(nil, op.Predicate) { - if ctx.SemTable.EqualsExpr(expr, e) { + if ctx.SemTable.EqualsExprWithDeps(expr, e) { isRemoved = true } else { keep = append(keep, e) @@ -75,7 +75,7 @@ func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op o case *Filter: idx := -1 for i, predicate := range op.Predicates { - if ctx.SemTable.EqualsExpr(predicate, expr) { + if ctx.SemTable.EqualsExprWithDeps(predicate, expr) { idx = i } } diff --git a/go/vt/vtgate/planbuilder/operators/operator_test.go b/go/vt/vtgate/planbuilder/operators/operator_test.go deleted file mode 100644 index 4ba5588f22e..00000000000 --- a/go/vt/vtgate/planbuilder/operators/operator_test.go +++ /dev/null @@ -1,251 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "bufio" - "fmt" - "io" - "os" - "sort" - "strings" - "testing" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/engine" - - "vitess.io/vitess/go/vt/vtgate/vindexes" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -type lineCountingReader struct { - line int - r *bufio.Reader -} - -func (lcr *lineCountingReader) nextLine() (string, error) { - queryBytes, err := lcr.r.ReadBytes('\n') - lcr.line++ - return string(queryBytes), err -} - -func readTestCase(lcr *lineCountingReader) (testCase, error) { - query := "" - var err error - for query == "" || query == "\n" || strings.HasPrefix(query, "#") { - query, err = lcr.nextLine() - if err != nil { - return testCase{}, err - } - } - - tc := testCase{query: query, line: lcr.line} - - for { - jsonPart, err := lcr.nextLine() - if err != nil { - if err == io.EOF { - return tc, fmt.Errorf("test data is bad. expectation not finished") - } - return tc, err - } - if jsonPart == "}\n" { - tc.expected += "}" - break - } - tc.expected += jsonPart - } - return tc, nil -} - -type testCase struct { - line int - query, expected string -} - -func TestOperator(t *testing.T) { - fd, err := os.OpenFile("operator_test_data.txt", os.O_RDONLY, 0) - require.NoError(t, err) - r := bufio.NewReader(fd) - - hash, _ := vindexes.NewHash("user_index", map[string]string{}) - si := &semantics.FakeSI{VindexTables: map[string]vindexes.Vindex{"user_index": hash}} - lcr := &lineCountingReader{r: r} - for { - tc, err := readTestCase(lcr) - if err == io.EOF { - break - } - t.Run(fmt.Sprintf("%d:%s", tc.line, tc.query), func(t *testing.T) { - require.NoError(t, err) - stmt, err := sqlparser.Parse(tc.query) - require.NoError(t, err) - semTable, err := semantics.Analyze(stmt, "", si) - require.NoError(t, err) - ctx := plancontext.NewPlanningContext(nil, semTable, nil, 0) - optree, err := createLogicalOperatorFromAST(ctx, stmt) - require.NoError(t, err) - optree, err = Compact(ctx, optree) - require.NoError(t, err) - output := testString(optree) - assert.Equal(t, tc.expected, output) - if t.Failed() { - fmt.Println(output) - } - }) - } -} - -func testString(op interface{}) string { // TODO - switch op := op.(type) { - case *QueryGraph: - return fmt.Sprintf("QueryGraph: %s", op.testString()) - case *Join: - leftStr := indent(testString(op.LHS)) - rightStr := indent(testString(op.RHS)) - if op.LeftJoin { - return fmt.Sprintf("OuterJoin: {\n\tInner: %s\n\tOuter: %s\n\tPredicate: %s\n}", leftStr, rightStr, sqlparser.String(op.Predicate)) - } - return fmt.Sprintf("Join: {\n\tLHS: %s\n\tRHS: %s\n\tPredicate: %s\n}", leftStr, rightStr, sqlparser.String(op.Predicate)) - case *Derived: - inner := indent(testString(op.Source)) - query := sqlparser.String(op.Query) - return fmt.Sprintf("Derived %s: {\n\tQuery: %s\n\tInner:%s\n}", op.Alias, query, inner) - case *SubQuery: - var inners []string - for _, sqOp := range op.Inner { - subquery := fmt.Sprintf("{\n\tType: %s", engine.PulloutOpcode(sqOp.ExtractedSubquery.OpCode).String()) - if sqOp.ExtractedSubquery.GetArgName() != "" { - subquery += fmt.Sprintf("\n\tArgName: %s", sqOp.ExtractedSubquery.GetArgName()) - } - subquery += fmt.Sprintf("\n\tQuery: %s\n}", indent(testString(sqOp.Inner))) - subquery = indent(subquery) - inners = append(inners, subquery) - } - outer := indent(testString(op.Outer)) - join := strings.Join(inners, "\n") - sprintf := fmt.Sprintf("SubQuery: {\n\tSubQueries: [\n%s]\n\tOuter: %s\n}", join, outer) - return sprintf - case *Vindex: - value := sqlparser.String(op.Value) - return fmt.Sprintf("Vindex: {\n\tName: %s\n\tValue: %s\n}", op.Vindex.String(), value) - case *Union: - var inners []string - for _, source := range op.Sources { - inners = append(inners, indent(testString(source))) - } - if len(op.Ordering) > 0 { - inners = append(inners, indent(sqlparser.String(op.Ordering)[1:])) - } - dist := "" - if op.Distinct { - dist = "(distinct)" - } - return fmt.Sprintf("Concatenate%s {\n%s\n}", dist, strings.Join(inners, ",\n")) - case *Update: - tbl := "table: " + op.QTable.testString() - var assignments []string - // sort to produce stable results, otherwise test is flaky - keys := make([]string, 0, len(op.Assignments)) - for k := range op.Assignments { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - assignments = append(assignments, fmt.Sprintf("\t%s = %s", k, sqlparser.String(op.Assignments[k]))) - } - return fmt.Sprintf("Update {\n\t%s\nassignments:\n%s\n}", tbl, strings.Join(assignments, "\n")) - case *Horizon: - src := indent(testString(op.Source)) - return fmt.Sprintf("Horizon {\n\tQuery: \"%s\"\n\tInner:%s\n}", sqlparser.String(op.Select), src) - } - panic(fmt.Sprintf("%T", op)) -} - -func indent(s string) string { - lines := strings.Split(s, "\n") - for i, line := range lines { - lines[i] = "\t" + line - } - return strings.Join(lines, "\n") -} - -// the following code is only used by tests - -func (qt *QueryTable) testString() string { - var alias string - if !qt.Alias.As.IsEmpty() { - alias = " AS " + sqlparser.String(qt.Alias.As) - } - var preds []string - for _, predicate := range qt.Predicates { - preds = append(preds, sqlparser.String(predicate)) - } - var where string - if len(preds) > 0 { - where = " where " + strings.Join(preds, " and ") - } - - return fmt.Sprintf("\t%v:%s%s%s", qt.ID, sqlparser.String(qt.Table), alias, where) -} - -func (qg *QueryGraph) testString() string { - return fmt.Sprintf(`{ -Tables: -%s%s%s -}`, strings.Join(qg.tableNames(), "\n"), qg.crossPredicateString(), qg.noDepsString()) -} - -func (qg *QueryGraph) crossPredicateString() string { - if len(qg.innerJoins) == 0 { - return "" - } - var joinPreds []string - for _, join := range qg.innerJoins { - deps, predicates := join.deps, join.exprs - var expressions []string - for _, expr := range predicates { - expressions = append(expressions, sqlparser.String(expr)) - } - - exprConcat := strings.Join(expressions, " and ") - joinPreds = append(joinPreds, fmt.Sprintf("\t%v - %s", deps, exprConcat)) - } - sort.Strings(joinPreds) - return fmt.Sprintf("\nJoinPredicates:\n%s", strings.Join(joinPreds, "\n")) -} - -func (qg *QueryGraph) tableNames() []string { - var tables []string - for _, t := range qg.Tables { - tables = append(tables, t.testString()) - } - return tables -} - -func (qg *QueryGraph) noDepsString() string { - if qg.NoDeps == nil { - return "" - } - return fmt.Sprintf("\nForAll: %s", sqlparser.String(qg.NoDeps)) -} diff --git a/go/vt/vtgate/planbuilder/operators/operator_test_data.txt b/go/vt/vtgate/planbuilder/operators/operator_test_data.txt deleted file mode 100644 index 2854a43f6bb..00000000000 --- a/go/vt/vtgate/planbuilder/operators/operator_test_data.txt +++ /dev/null @@ -1,628 +0,0 @@ -(select id from unsharded union all select id from unsharded_auto order by id) union select id from user -Horizon { - Query: "(select id from unsharded union all select id from unsharded_auto order by id asc) union select id from `user`" - Inner: Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - }, - Horizon { - Query: "select id from `user`" - Inner: QueryGraph: { - Tables: - TableSet{2}:`user` - } - } - } -} - -select id from unsharded union select id from unsharded_auto -Horizon { - Query: "select id from unsharded union select id from unsharded_auto" - Inner: Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - } - } -} - -select id from unsharded union all select id from unsharded_auto -Horizon { - Query: "select id from unsharded union all select id from unsharded_auto" - Inner: Concatenate { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - } - } -} - -(select id from unsharded union all select id from unsharded_auto limit 10) union select id from x order by id -Horizon { - Query: "(select id from unsharded union all select id from unsharded_auto limit 10) union select id from x order by id asc" - Inner: Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - }, - Horizon { - Query: "select id from x" - Inner: QueryGraph: { - Tables: - TableSet{2}:x - } - }, - order by id asc - } -} - -(select id from unsharded union all select id from unsharded_auto) union all select id from x -Horizon { - Query: "select id from unsharded union all select id from unsharded_auto union all select id from x" - Inner: Concatenate { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - }, - Horizon { - Query: "select id from x" - Inner: QueryGraph: { - Tables: - TableSet{2}:x - } - } - } -} - -(select id from unsharded union select id from unsharded_auto) union select id from x -Horizon { - Query: "select id from unsharded union select id from unsharded_auto union select id from x" - Inner: Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - }, - Horizon { - Query: "select id from x" - Inner: QueryGraph: { - Tables: - TableSet{2}:x - } - } - } -} - -(select id from unsharded union select id from unsharded_auto) union all select id from x -Horizon { - Query: "select id from unsharded union select id from unsharded_auto union all select id from x" - Inner: Concatenate { - Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - } - }, - Horizon { - Query: "select id from x" - Inner: QueryGraph: { - Tables: - TableSet{2}:x - } - } - } -} - -select * from t -Horizon { - Query: "select * from t" - Inner: QueryGraph: { - Tables: - TableSet{0}:t - } -} - -select t.c from t,y,z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2 -Horizon { - Query: "select t.c from t, y, z where t.c = y.c and (t.a = z.a or t.a = y.a) and 1 < 2" - Inner: QueryGraph: { - Tables: - TableSet{0}:t - TableSet{1}:y - TableSet{2}:z - JoinPredicates: - TableSet{0,1,2} - t.a = z.a or t.a = y.a - TableSet{0,1} - t.c = y.c - ForAll: 1 < 2 - } -} - -select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.name = 'foo' and y.col = 42 and z.baz = 101 -Horizon { - Query: "select t.c from t join y on t.id = y.t_id join z on t.id = z.t_id where t.`name` = 'foo' and y.col = 42 and z.baz = 101" - Inner: QueryGraph: { - Tables: - TableSet{0}:t where t.`name` = 'foo' - TableSet{1}:y where y.col = 42 - TableSet{2}:z where z.baz = 101 - JoinPredicates: - TableSet{0,1} - t.id = y.t_id - TableSet{0,2} - t.id = z.t_id - } -} - -select t.c from t,y,z where t.name = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id -Horizon { - Query: "select t.c from t, y, z where t.`name` = 'foo' and y.col = 42 and z.baz = 101 and t.id = y.t_id and t.id = z.t_id" - Inner: QueryGraph: { - Tables: - TableSet{0}:t where t.`name` = 'foo' - TableSet{1}:y where y.col = 42 - TableSet{2}:z where z.baz = 101 - JoinPredicates: - TableSet{0,1} - t.id = y.t_id - TableSet{0,2} - t.id = z.t_id - } -} - -select 1 from t where '1' = 1 and 12 = '12' -Horizon { - Query: "select 1 from t where '1' = 1 and 12 = '12'" - Inner: QueryGraph: { - Tables: - TableSet{0}:t - ForAll: '1' = 1 and 12 = '12' - } -} - -select 1 from t left join s on t.id = s.id -Horizon { - Query: "select 1 from t left join s on t.id = s.id" - Inner: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{0}:t - } - Outer: QueryGraph: { - Tables: - TableSet{1}:s - } - Predicate: t.id = s.id - } -} - -select 1 from t join s on t.id = s.id and t.name = s.name -Horizon { - Query: "select 1 from t join s on t.id = s.id and t.`name` = s.`name`" - Inner: QueryGraph: { - Tables: - TableSet{0}:t - TableSet{1}:s - JoinPredicates: - TableSet{0,1} - t.id = s.id and t.`name` = s.`name` - } -} - -select 1 from t left join s on t.id = s.id where t.name = 'Mister' -Horizon { - Query: "select 1 from t left join s on t.id = s.id where t.`name` = 'Mister'" - Inner: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{0}:t where t.`name` = 'Mister' - } - Outer: QueryGraph: { - Tables: - TableSet{1}:s - } - Predicate: t.id = s.id - } -} - -select 1 from t right join s on t.id = s.id -Horizon { - Query: "select 1 from t right join s on t.id = s.id" - Inner: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{1}:s - } - Outer: QueryGraph: { - Tables: - TableSet{0}:t - } - Predicate: t.id = s.id - } -} - -select 1 from (a left join b on a.id = b.id) join (c left join d on c.id = d.id) on a.id = c.id -Horizon { - Query: "select 1 from (a left join b on a.id = b.id) join (c left join d on c.id = d.id) on a.id = c.id" - Inner: Join: { - LHS: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{0}:a - } - Outer: QueryGraph: { - Tables: - TableSet{1}:b - } - Predicate: a.id = b.id - } - RHS: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{2}:c - } - Outer: QueryGraph: { - Tables: - TableSet{3}:d - } - Predicate: c.id = d.id - } - Predicate: a.id = c.id - } -} - -select 1 from (select 42 as id from tbl) as t -Horizon { - Query: "select 1 from (select 42 as id from tbl) as t" - Inner: Derived t: { - Query: select 42 as id from tbl - Inner: QueryGraph: { - Tables: - TableSet{0}:tbl - } - } -} - -select 1 from (select id from tbl limit 10) as t join (select foo, count(*) from usr group by foo) as s on t.id = s.foo -Horizon { - Query: "select 1 from (select id from tbl limit 10) as t join (select foo, count(*) from usr group by foo) as s on t.id = s.foo" - Inner: Join: { - LHS: Derived t: { - Query: select id from tbl limit 10 - Inner: QueryGraph: { - Tables: - TableSet{0}:tbl - } - } - RHS: Derived s: { - Query: select foo, count(*) from usr group by foo - Inner: QueryGraph: { - Tables: - TableSet{2}:usr - } - } - Predicate: t.id = s.foo - } -} - -select (select 1) from t where exists (select 1) and id in (select 1) -Horizon { - Query: "select (select 1 from dual) from t where exists (select 1 from dual) and id in (select 1 from dual)" - Inner: SubQuery: { - SubQueries: [ - { - Type: PulloutValue - Query: QueryGraph: { - Tables: - TableSet{1}:dual - } - } - { - Type: PulloutExists - Query: QueryGraph: { - Tables: - TableSet{2}:dual - } - } - { - Type: PulloutIn - Query: QueryGraph: { - Tables: - TableSet{3}:dual - } - }] - Outer: QueryGraph: { - Tables: - TableSet{0}:t where id in (select 1 from dual) - ForAll: exists (select 1 from dual) - } - } -} - -select u.id from user u where u.id = (select id from user_extra where id = u.id) -Horizon { - Query: "select u.id from `user` as u where u.id = (select id from user_extra where id = u.id)" - Inner: SubQuery: { - SubQueries: [ - { - Type: PulloutValue - Query: QueryGraph: { - Tables: - TableSet{1}:user_extra - JoinPredicates: - TableSet{0,1} - id = u.id - } - }] - Outer: QueryGraph: { - Tables: - TableSet{0}:`user` AS u where u.id = (select id from user_extra where id = u.id) - } - } -} - -select id from user_index where id = :id -Horizon { - Query: "select id from user_index where id = :id" - Inner: Vindex: { - Name: user_index - Value: :id - } -} - -select ui.id from user_index as ui join user as u where ui.id = 1 and ui.id = u.id -Horizon { - Query: "select ui.id from user_index as ui join `user` as u where ui.id = 1 and ui.id = u.id" - Inner: Join: { - LHS: Vindex: { - Name: user_index - Value: 1 - } - RHS: QueryGraph: { - Tables: - TableSet{1}:`user` AS u - } - Predicate: ui.id = u.id - } -} - -select u.id from (select id from user_index where id = 2) as u -Horizon { - Query: "select u.id from (select id from user_index where id = 2) as u" - Inner: Derived u: { - Query: select id from user_index where id = 2 - Inner: Vindex: { - Name: user_index - Value: 2 - } - } -} - -select 1 from a union select 2 from b -Horizon { - Query: "select 1 from a union select 2 from b" - Inner: Concatenate(distinct) { - Horizon { - Query: "select 1 from a" - Inner: QueryGraph: { - Tables: - TableSet{0}:a - } - }, - Horizon { - Query: "select 2 from b" - Inner: QueryGraph: { - Tables: - TableSet{1}:b - } - } - } -} - -select 1 from a union select 2 from b union select 3 from c -Horizon { - Query: "select 1 from a union select 2 from b union select 3 from c" - Inner: Concatenate(distinct) { - Horizon { - Query: "select 1 from a" - Inner: QueryGraph: { - Tables: - TableSet{0}:a - } - }, - Horizon { - Query: "select 2 from b" - Inner: QueryGraph: { - Tables: - TableSet{1}:b - } - }, - Horizon { - Query: "select 3 from c" - Inner: QueryGraph: { - Tables: - TableSet{2}:c - } - } - } -} - -select 1 from a union select 2 from b union select 3 from c union all select 4 from d -Horizon { - Query: "select 1 from a union select 2 from b union select 3 from c union all select 4 from d" - Inner: Concatenate { - Concatenate(distinct) { - Horizon { - Query: "select 1 from a" - Inner: QueryGraph: { - Tables: - TableSet{0}:a - } - }, - Horizon { - Query: "select 2 from b" - Inner: QueryGraph: { - Tables: - TableSet{1}:b - } - }, - Horizon { - Query: "select 3 from c" - Inner: QueryGraph: { - Tables: - TableSet{2}:c - } - } - }, - Horizon { - Query: "select 4 from d" - Inner: QueryGraph: { - Tables: - TableSet{3}:d - } - } - } -} - -select id from unsharded union select id from unsharded_auto order by id -Horizon { - Query: "select id from unsharded union select id from unsharded_auto order by id asc" - Inner: Concatenate(distinct) { - Horizon { - Query: "select id from unsharded" - Inner: QueryGraph: { - Tables: - TableSet{0}:unsharded - } - }, - Horizon { - Query: "select id from unsharded_auto" - Inner: QueryGraph: { - Tables: - TableSet{1}:unsharded_auto - } - }, - order by id asc - } -} - -select id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) -Horizon { - Query: "select id from `user` where exists (select user_id from user_extra where user_id = 3 and user_id < `user`.id)" - Inner: SubQuery: { - SubQueries: [ - { - Type: PulloutExists - Query: QueryGraph: { - Tables: - TableSet{1}:user_extra where user_id = 3 - JoinPredicates: - TableSet{0,1} - user_id < `user`.id - } - }] - Outer: QueryGraph: { - Tables: - TableSet{0}:`user` where exists (select user_id from user_extra where user_id = 3 and user_id < `user`.id) - } - } -} - -select ks.tbl.col from ks.tbl where ks.tbl.id = 1 -Horizon { - Query: "select ks.tbl.col from ks.tbl where tbl.id = 1" - Inner: QueryGraph: { - Tables: - TableSet{0}:ks.tbl where tbl.id = 1 - } -} - -select 1 from ks.t join ks.y on ks.t.id = ks.y.t_id -Horizon { - Query: "select 1 from ks.t join ks.y on t.id = y.t_id" - Inner: QueryGraph: { - Tables: - TableSet{0}:ks.t - TableSet{1}:ks.y - JoinPredicates: - TableSet{0,1} - t.id = y.t_id - } -} - -select 1 from ks.t left join ks.y on ks.t.id = ks.y.t_id -Horizon { - Query: "select 1 from ks.t left join ks.y on t.id = y.t_id" - Inner: OuterJoin: { - Inner: QueryGraph: { - Tables: - TableSet{0}:ks.t - } - Outer: QueryGraph: { - Tables: - TableSet{1}:ks.y - } - Predicate: t.id = y.t_id - } -} diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go index 4deeb5cee1e..f8c48fcd719 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/op.go +++ b/go/vt/vtgate/planbuilder/operators/ops/op.go @@ -29,22 +29,38 @@ type ( // In some situation we go straight to the physical operator - when there are no options to consider, // we can go straight to the end result. Operator interface { + // Clone will return a copy of this operator, protected so changed to the original will not impact the clone Clone(inputs []Operator) Operator + + // Inputs returns the inputs for this operator Inputs() []Operator + // SetInputs changes the inputs for this op + SetInputs([]Operator) + // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, // where data is fetched from the LHS of the join to be used in the evaluation on the RHS + // TODO: we should remove this and replace it with rewriters AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Operator, error) - // AddColumn tells an operator to also output an additional column specified. - // The offset to the column is returned. - AddColumn(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) + AddColumns(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) + + FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) + + GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) + GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) + + ShortDescription() string + + GetOrdering() ([]OrderBy, error) } - // PhysicalOperator means that this operator is ready to be turned into a logical plan - PhysicalOperator interface { - Operator - IPhysical() + // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. + OrderBy struct { + Inner *sqlparser.Order + + // See GroupBy#SimplifiedExpr for more details about this + SimplifiedExpr sqlparser.Expr } ) diff --git a/go/vt/vtgate/planbuilder/operators/ops/to_json.go b/go/vt/vtgate/planbuilder/operators/ops/to_json.go new file mode 100644 index 00000000000..2b8b747f433 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/ops/to_json.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ops + +import ( + "fmt" + "reflect" + + "github.com/xlab/treeprint" +) + +// ToTree returns the operator as ascii tree. Should only be used for debugging +func ToTree(op Operator) string { + tree := asTree(op, nil) + return tree.String() +} + +func opDescr(op Operator) string { + typ := reflect.TypeOf(op).Elem().Name() + shortDescription := op.ShortDescription + if shortDescription() == "" { + return typ + } + return fmt.Sprintf("%s (%s)", typ, shortDescription()) +} + +func asTree(op Operator, root treeprint.Tree) treeprint.Tree { + txt := opDescr(op) + var branch treeprint.Tree + if root == nil { + branch = treeprint.NewWithRoot(txt) + } else { + branch = root.AddBranch(txt) + } + for _, child := range op.Inputs() { + asTree(child, branch) + } + return branch +} diff --git a/go/vt/vtgate/planbuilder/operators/ordering.go b/go/vt/vtgate/planbuilder/operators/ordering.go new file mode 100644 index 00000000000..786edbd482f --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/ordering.go @@ -0,0 +1,118 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + "strings" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type Ordering struct { + Source ops.Operator + Offset []int + WOffset []int + + Order []ops.OrderBy + ResultColumns int +} + +func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { + return &Ordering{ + Source: inputs[0], + Offset: slices.Clone(o.Offset), + WOffset: slices.Clone(o.WOffset), + Order: slices.Clone(o.Order), + ResultColumns: o.ResultColumns, + } +} + +func (o *Ordering) Inputs() []ops.Operator { + return []ops.Operator{o.Source} +} + +func (o *Ordering) SetInputs(operators []ops.Operator) { + o.Source = operators[0] +} + +func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + newSrc, err := o.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + o.Source = newSrc + return o, nil +} + +func (o *Ordering) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + return o.Source.AddColumns(ctx, reuse, addToGroupBy, exprs) +} + +func (o *Ordering) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + return o.Source.FindCol(ctx, expr, underRoute) +} + +func (o *Ordering) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return o.Source.GetColumns(ctx) +} + +func (o *Ordering) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return o.Source.GetSelectExprs(ctx) +} + +func (o *Ordering) GetOrdering() ([]ops.OrderBy, error) { + return o.Order, nil +} + +func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) error { + for _, order := range o.Order { + offsets, err := o.Source.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(order.SimplifiedExpr)}) + if err != nil { + return err + } + o.Offset = append(o.Offset, offsets[0]) + + if !ctx.SemTable.NeedsWeightString(order.SimplifiedExpr) { + o.WOffset = append(o.WOffset, -1) + continue + } + + wsExpr := &sqlparser.WeightStringFuncExpr{Expr: order.SimplifiedExpr} + offsets, err = o.Source.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(wsExpr)}) + if err != nil { + return err + } + o.WOffset = append(o.WOffset, offsets[0]) + } + + return nil +} + +func (o *Ordering) ShortDescription() string { + ordering := slice.Map(o.Order, func(o ops.OrderBy) string { + return sqlparser.String(o.Inner) + }) + return strings.Join(ordering, ", ") +} + +func (o *Ordering) setTruncateColumnCount(offset int) { + o.ResultColumns = offset +} diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go new file mode 100644 index 00000000000..44e86be5813 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -0,0 +1,164 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +// Phase defines the different planning phases to go through to produce an optimized plan for the input query. +type Phase struct { + Name string + // action is the action to be taken before calling plan optimization operation. + action func(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) +} + +// getPhases returns the phases the planner will go through. +// It's used to control so rewriters collaborate correctly +func getPhases() []Phase { + return []Phase{{ + // Initial optimization + Name: "initial horizon planning optimization phase", + }, { + Name: "pull distinct from UNION", + // to make it easier to compact UNIONs together, we keep the `distinct` flag in the UNION op until this + // phase. Here we will place a DISTINCT op on top of the UNION, and turn the UNION into a UNION ALL + action: func(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + return pullDistinctFromUNION(op) + }, + }, { + // after the initial pushing down of aggregations and filtering, we add columns for the filter ops that + // need it their inputs, and then we start splitting the aggregation + // so parts run on MySQL and parts run on VTGate + Name: "add filter columns to projection or aggregation", + action: func(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + ctx.DelegateAggregation = true + return addColumnsToInput(ctx, op) + }, + }, { + // addOrderBysForAggregations runs after we have pushed aggregations as far down as they'll go + // addOrderBysForAggregations will find Aggregators that have not been pushed under routes and + // add the necessary Ordering operators for them + Name: "add ORDER BY to aggregations above the route and add GROUP BY to aggregations on the RHS of join", + action: addOrderBysForAggregations, + }, { + Name: "remove Distinct operator that are not required and still above a route", + action: func(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { + return rewrite.BottomUp(op, TableID, func(innerOp ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + d, ok := innerOp.(*Distinct) + if !ok || d.Required { + return innerOp, rewrite.SameTree, nil + } + + return d.Source, rewrite.NewTree("removed distinct not required that was not pushed under route", d), nil + }, stopAtRoute) + }, + }} +} + +func addOrderBysForAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + aggrOp, ok := in.(*Aggregator) + if !ok { + return in, rewrite.SameTree, nil + } + + requireOrdering, err := needsOrdering(ctx, aggrOp) + if err != nil { + return nil, nil, err + } + if !requireOrdering { + return in, rewrite.SameTree, nil + } + orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) ops.OrderBy { + return from.AsOrderBy() + }) + if aggrOp.DistinctExpr != nil { + orderBys = append(orderBys, ops.OrderBy{ + Inner: &sqlparser.Order{ + Expr: aggrOp.DistinctExpr, + }, + SimplifiedExpr: aggrOp.DistinctExpr, + }) + } + aggrOp.Source = &Ordering{ + Source: aggrOp.Source, + Order: orderBys, + } + return in, rewrite.NewTree("added ordering before aggregation", in), nil + } + + return rewrite.BottomUp(root, TableID, visitor, stopAtRoute) +} + +func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, error) { + requiredOrder := slice.Map(in.Grouping, func(from GroupBy) sqlparser.Expr { + return from.SimplifiedExpr + }) + if in.DistinctExpr != nil { + requiredOrder = append(requiredOrder, in.DistinctExpr) + } + if len(requiredOrder) == 0 { + return false, nil + } + srcOrdering, err := in.Source.GetOrdering() + if err != nil { + return false, err + } + if len(srcOrdering) < len(requiredOrder) { + return true, nil + } + for idx, gb := range requiredOrder { + if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb) { + return true, nil + } + } + return false, nil +} + +func addGroupByOnRHSOfJoin(root ops.Operator) (ops.Operator, error) { + visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { + join, ok := in.(*ApplyJoin) + if !ok { + return in, rewrite.SameTree, nil + } + + return addLiteralGroupingToRHS(join) + } + + return rewrite.TopDown(root, TableID, visitor, stopAtRoute) +} + +func addLiteralGroupingToRHS(in *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { + aggr, isAggr := op.(*Aggregator) + if !isAggr { + return nil + } + if len(aggr.Grouping) == 0 { + gb := sqlparser.NewIntLiteral(".0") + aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) + } + return nil + }) + return in, rewrite.SameTree, nil +} diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go new file mode 100644 index 00000000000..b9929b29609 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -0,0 +1,444 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "slices" + "strings" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + // Projection is used when we need to evaluate expressions on the vtgate + // It uses the evalengine to accomplish its goal + Projection struct { + Source ops.Operator + + // TODO: we should replace these two slices with a single slice that contains both items. Keeping these two slices in sync leads to fragile code (systay 2023-07-25) + // Columns contain the expressions as viewed from the outside of this operator + Columns []*sqlparser.AliasedExpr + + // Projections will contain the actual evaluations we need to + // do if this operator is still above a route after optimisation + Projections []ProjExpr + + // TableID will be non-nil for derived tables + TableID *semantics.TableSet + Alias string + + FromAggr bool + } + + ProjExpr interface { + GetExpr() sqlparser.Expr + } + + // Offset is used when we are only passing through data from an incoming column + Offset struct { + Expr sqlparser.Expr + Offset int + } + + // Eval is used for expressions that have to be evaluated in the vtgate using the evalengine + Eval struct { + Expr sqlparser.Expr + EExpr evalengine.Expr + } + + // UnexploredExpression is used before we have planned - one of two end results are possible for it + // - we are able to push this projection under a route, and then this is not used at all - we'll just + // use the ColumnNames field of the Projection struct + // - we have to evaluate this on the vtgate, and either it's just a copy from the input, + // or it's an evalengine expression that we have to evaluate + UnexploredExpression struct { + E sqlparser.Expr + } +) + +var _ selectExpressions = (*Projection)(nil) + +// createSimpleProjection returns a projection where all columns are offsets. +// used to change the name and order of the columns in the final output +func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { + p := &Projection{ + Source: src, + } + + var groupby []bool + exprs, err := slice.MapWithError(qp.SelectExprs, func(from SelectExpr) (*sqlparser.AliasedExpr, error) { + groupby = append(groupby, false) + return from.GetAliasedExpr() + }) + if err != nil { + return nil, err + } + + offsets, err := p.Source.AddColumns(ctx, true, groupby, exprs) + if err != nil { + return nil, err + } + for i := range exprs { + offset, ae := offsets[i], exprs[i] + p.Projections = append(p.Projections, Offset{Expr: ae.Expr, Offset: offset}) + p.Columns = append(p.Columns, ae) + } + return p, nil +} + +func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) int { + p.Projections = append(p.Projections, UnexploredExpression{E: e}) + p.Columns = append(p.Columns, ae) + return len(p.Projections) - 1 +} + +func (p *Projection) addColumnWithoutPushing(expr *sqlparser.AliasedExpr, _ bool) int { + return p.addUnexploredExpr(expr, expr.Expr) +} + +func (p *Projection) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) []int { + offsets := make([]int, len(exprs)) + for idx, expr := range exprs { + if reuse { + offset, _ := p.FindCol(ctx, expr.Expr, true) + if offset != -1 { + offsets[idx] = offset + continue + } + } + offsets[idx] = p.addUnexploredExpr(expr, expr.Expr) + } + return offsets +} + +func (p *Projection) isDerived() bool { + return p.TableID != nil +} + +func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + if !(underRoute && p.isDerived()) { + if offset, found := canReuseColumn(ctx, p.Columns, expr, extractExpr); found { + return offset, nil + } + } + + return -1, nil +} + +// fetchExpr is used to accumulate all expressions we'll need from the input, +// and store in which column on the projection we want to store the offset returned +type fetchExpr struct { + expr sqlparser.Expr + colIdx []int + groupBy bool +} + +func (p *Projection) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + var fetch []fetchExpr + startOffset := len(p.Columns) + for i, ae := range exprs { + colIdx := i + startOffset + expr := ae.Expr + + if p.TableID != nil { + vt, err := ctx.SemTable.TableInfoFor(*p.TableID) + if err != nil { + return nil, err + } + expr = semantics.RewriteDerivedTableExpression(expr, vt) + } + + if reuse { + offset, err := p.FindCol(ctx, expr, false) + if err != nil { + return nil, err + } + if offset >= 0 { + offsets[i] = offset + continue + } + } + + // we add the column here, so we can find the expression in the next iteration of this loop, + // but we wait with the actual projection until we have fetched it from the input + offsets[i] = len(p.Columns) + p.Columns = append(p.Columns, aeWrap(expr)) + p.Projections = append(p.Projections, nil) + + // even if the receiver of the Projection output does not want to reuse column, + // we can reuse columns from this input + fIdx := slices.IndexFunc(fetch, func(f fetchExpr) bool { + return ctx.SemTable.EqualsExprWithDeps(expr, f.expr) + }) + + if fIdx == -1 { + // if we are not already asking for this expression, we add it to the list of expressions we'll ask for + fIdx = len(fetch) + fetch = append(fetch, fetchExpr{ + expr: expr, + }) + } + + fetch[fIdx].colIdx = append(fetch[fIdx].colIdx, colIdx) + fetch[fIdx].groupBy = fetch[fIdx].groupBy || addToGroupBy[i] + } + + askForExprs := make([]*sqlparser.AliasedExpr, len(fetch)) + askForGB := make([]bool, len(fetch)) + for i, f := range fetch { + askForExprs[i] = aeWrap(f.expr) + askForGB[i] = f.groupBy + } + + inputOffsets, err := p.Source.AddColumns(ctx, true, askForGB, askForExprs) + if err != nil { + return nil, err + } + + for fIdx, fetched := range fetch { + for _, colIdx := range fetched.colIdx { + p.Projections[colIdx] = Offset{Offset: inputOffsets[fIdx], Expr: fetched.expr} + } + } + + return offsets, nil +} + +func (po Offset) GetExpr() sqlparser.Expr { return po.Expr } +func (po Eval) GetExpr() sqlparser.Expr { return po.Expr } +func (po UnexploredExpression) GetExpr() sqlparser.Expr { return po.E } + +func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { + return &Projection{ + Source: inputs[0], + Columns: slices.Clone(p.Columns), + Projections: slices.Clone(p.Projections), + TableID: p.TableID, + Alias: p.Alias, + FromAggr: p.FromAggr, + } +} + +func (p *Projection) Inputs() []ops.Operator { + return []ops.Operator{p.Source} +} + +func (p *Projection) SetInputs(operators []ops.Operator) { + p.Source = operators[0] +} + +func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { + // we just pass through the predicate to our source + src, err := p.Source.AddPredicate(ctx, expr) + if err != nil { + return nil, err + } + p.Source = src + return p, nil +} + +func (p *Projection) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return p.Columns, nil +} + +func (p *Projection) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, p) +} + +func (p *Projection) GetOrdering() ([]ops.OrderBy, error) { + return p.Source.GetOrdering() +} + +// AllOffsets returns a slice of integer offsets for all columns in the Projection +// if all columns are of type Offset. If any column is not of type Offset, it returns nil. +func (p *Projection) AllOffsets() (cols []int) { + for _, c := range p.Projections { + offset, ok := c.(Offset) + if !ok { + return nil + } + + cols = append(cols, offset.Offset) + } + return +} + +func (p *Projection) ShortDescription() string { + var columns []string + if p.Alias != "" { + columns = append(columns, "derived["+p.Alias+"]") + } + for i, col := range p.Projections { + aliasExpr := p.Columns[i] + if aliasExpr.Expr == col.GetExpr() { + columns = append(columns, sqlparser.String(aliasExpr)) + } else { + columns = append(columns, fmt.Sprintf("%s AS %s", sqlparser.String(col.GetExpr()), aliasExpr.ColumnName())) + } + } + return strings.Join(columns, ", ") +} + +func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { + if p.isDerived() { + return p, rewrite.SameTree, nil + } + + // for projections that are not derived tables, we can check if it is safe to remove or not + needed := false + for i, projection := range p.Projections { + e, ok := projection.(Offset) + if !ok || e.Offset != i { + needed = true + break + } + } + + if !needed { + return p.Source, rewrite.NewTree("removed projection only passing through the input", p), nil + } + + switch src := p.Source.(type) { + case *Route: + return p.compactWithRoute(ctx, src) + case *ApplyJoin: + return p.compactWithJoin(ctx, src) + } + return p, rewrite.SameTree, nil +} + +func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, src *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { + var newColumns []int + var newColumnsAST []JoinColumn + for idx, col := range p.Projections { + switch col := col.(type) { + case Offset: + newColumns = append(newColumns, src.Columns[col.Offset]) + newColumnsAST = append(newColumnsAST, src.JoinColumns[col.Offset]) + case UnexploredExpression: + if !ctx.SemTable.EqualsExprWithDeps(col.E, p.Columns[idx].Expr) { + // the inner expression is different from what we are presenting to the outside - this means we need to evaluate + return p, rewrite.SameTree, nil + } + offset := slices.IndexFunc(src.JoinColumns, func(jc JoinColumn) bool { + return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.E) + }) + if offset < 0 { + return p, rewrite.SameTree, nil + } + if len(src.Columns) > 0 { + newColumns = append(newColumns, src.Columns[offset]) + } + newColumnsAST = append(newColumnsAST, src.JoinColumns[offset]) + default: + return p, rewrite.SameTree, nil + } + } + src.Columns = newColumns + src.JoinColumns = newColumnsAST + return src, rewrite.NewTree("remove projection from before join", src), nil +} + +func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { + for i, col := range p.Projections { + offset, ok := col.(Offset) + if !ok || offset.Offset != i { + return p, rewrite.SameTree, nil + } + } + columns, err := rb.GetColumns(ctx) + if err != nil { + return nil, nil, err + } + + if len(columns) == len(p.Projections) { + return rb, rewrite.NewTree("remove projection from before route", rb), nil + } + rb.ResultColumns = len(columns) + return rb, rewrite.SameTree, nil +} + +func (p *Projection) needsEvaluation(ctx *plancontext.PlanningContext, e sqlparser.Expr) bool { + offset := slices.IndexFunc(p.Columns, func(expr *sqlparser.AliasedExpr) bool { + return ctx.SemTable.EqualsExprWithDeps(expr.Expr, e) + }) + + if offset < 0 { + return false + } + + inside := p.Projections[offset].GetExpr() + outside := p.Columns[offset].Expr + return inside != outside +} + +func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) error { + for i, col := range p.Projections { + _, unexplored := col.(UnexploredExpression) + if !unexplored { + continue + } + + // first step is to replace the expressions we expect to get from our input with the offsets for these + expr := col.GetExpr() + rewritten, err := useOffsets(ctx, expr, p) + if err != nil { + return err + } + + offset, ok := rewritten.(*sqlparser.Offset) + if ok { + // we got a pure offset back. No need to do anything else + p.Projections[i] = Offset{ + Expr: expr, + Offset: offset.V, + } + continue + } + + // for everything else, we'll turn to the evalengine + eexpr, err := evalengine.Translate(rewritten, nil) + if err != nil { + return err + } + + p.Projections[i] = Eval{ + Expr: rewritten, + EExpr: eexpr, + } + } + + p.TableID = nil + p.Alias = "" + + return nil +} + +func (p *Projection) introducesTableID() semantics.TableSet { + if p.TableID == nil { + return semantics.EmptyTableSet() + } + return *p.TableID +} diff --git a/go/vt/vtgate/planbuilder/operators/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go index b22fdf6907e..f384607fe10 100644 --- a/go/vt/vtgate/planbuilder/operators/querygraph.go +++ b/go/vt/vtgate/planbuilder/operators/querygraph.go @@ -17,6 +17,8 @@ limitations under the License. package operators import ( + "strings" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -63,8 +65,8 @@ type ( var _ ops.Operator = (*QueryGraph)(nil) -// Introduces implements the TableIDIntroducer interface -func (qg *QueryGraph) Introduces() semantics.TableSet { +// Introduces implements the tableIDIntroducer interface +func (qg *QueryGraph) introducesTableID() semantics.TableSet { var ts semantics.TableSet for _, table := range qg.Tables { ts = ts.Merge(table.ID) @@ -90,26 +92,6 @@ func newQueryGraph() *QueryGraph { return &QueryGraph{} } -func (qg *QueryGraph) collectPredicates(ctx *plancontext.PlanningContext, sel *sqlparser.Select) error { - predicates := sqlparser.SplitAndExpression(nil, sel.Where.Expr) - - for _, predicate := range predicates { - err := qg.collectPredicate(ctx, predicate) - if err != nil { - return err - } - } - return nil -} - -func (qg *QueryGraph) getPredicateByDeps(ts semantics.TableSet) ([]sqlparser.Expr, bool) { - for _, join := range qg.innerJoins { - if join.deps == ts { - return join.exprs, true - } - } - return nil, false -} func (qg *QueryGraph) addJoinPredicates(ctx *plancontext.PlanningContext, ts semantics.TableSet, predicate sqlparser.Expr) { for _, join := range qg.innerJoins { if join.deps == ts { @@ -128,7 +110,7 @@ func (qg *QueryGraph) addJoinPredicates(ctx *plancontext.PlanningContext, ts sem }) } -func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) error { +func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) { deps := ctx.SemTable.RecursiveDeps(predicate) switch deps.NumberOfTables() { case 0: @@ -142,7 +124,6 @@ func (qg *QueryGraph) collectPredicate(ctx *plancontext.PlanningContext, predica default: qg.addJoinPredicates(ctx, deps, predicate) } - return nil } func (qg *QueryGraph) addToSingleTable(ctx *plancontext.PlanningContext, table semantics.TableSet, predicate sqlparser.Expr) bool { @@ -182,7 +163,7 @@ func (qg *QueryGraph) UnsolvedPredicates(_ *semantics.SemTable) []sqlparser.Expr } // Clone implements the Operator interface -func (qg *QueryGraph) Clone(inputs []ops.Operator) ops.Operator { +func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { result := &QueryGraph{ Tables: nil, innerJoins: nil, @@ -195,12 +176,13 @@ func (qg *QueryGraph) Clone(inputs []ops.Operator) ops.Operator { return result } +func (qg *QueryGraph) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (ops.Operator, error) { for _, e := range sqlparser.SplitAndExpression(nil, expr) { - err := qg.collectPredicate(ctx, e) - if err != nil { - return nil, err - } + qg.collectPredicate(ctx, e) } return qg, nil } @@ -215,3 +197,14 @@ func (qt *QueryTable) Clone() *QueryTable { IsInfSchema: qt.IsInfSchema, } } + +func (qg *QueryGraph) tableNames() (tables []string) { + for _, table := range qg.Tables { + tables = append(tables, sqlparser.String(table.Table)) + } + return +} + +func (qg *QueryGraph) ShortDescription() string { + return strings.Join(qg.tableNames(), ", ") +} diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go index 29e356c6650..9d6aabf9dda 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go @@ -19,20 +19,22 @@ package operators import ( "encoding/json" "fmt" + "slices" "sort" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/vtgate/engine" - + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( - // SelectExpr provides whether the columns is aggregation expression or not. + // SelectExpr provides whether the column is aggregation expression or not. SelectExpr struct { Col sqlparser.SelectExpr Aggr bool @@ -41,45 +43,63 @@ type ( // QueryProjection contains the information about the projections, group by and order by expressions used to do horizon planning. QueryProjection struct { // If you change the contents here, please update the toString() method - SelectExprs []SelectExpr - HasAggr bool - Distinct bool - groupByExprs []GroupBy - OrderExprs []OrderBy - CanPushDownSorting bool - HasStar bool + SelectExprs []SelectExpr + HasAggr bool + Distinct bool + groupByExprs []GroupBy + OrderExprs []ops.OrderBy + HasStar bool // AddedColumn keeps a counter for expressions added to solve HAVING expressions the user is not selecting AddedColumn int - } - // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. - OrderBy struct { - Inner *sqlparser.Order - WeightStrExpr sqlparser.Expr + hasCheckedAlignment bool + + // TODO Remove once all horizon planning is done on the operators + CanPushDownSorting bool } // GroupBy contains the expression to used in group by and also if grouping is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. GroupBy struct { - Inner sqlparser.Expr - WeightStrExpr sqlparser.Expr + Inner sqlparser.Expr + + // The simplified expressions is the "unaliased expression". + // In the following query, the group by has the inner expression + // `x` and the `SimplifiedExpr` is `table.col + 10`: + // select table.col + 10 as x, count(*) from tbl group by x + SimplifiedExpr sqlparser.Expr // The index at which the user expects to see this column. Set to nil, if the user does not ask for it InnerIndex *int // The original aliased expression that this group by is referring aliasedExpr *sqlparser.AliasedExpr + + // points to the column on the same aggregator + ColOffset int + WSOffset int } // Aggr encodes all information needed for aggregation functions Aggr struct { Original *sqlparser.AliasedExpr Func sqlparser.AggrFunc - OpCode engine.AggregateOpcode - Alias string + OpCode opcode.AggregateOpcode + + // OriginalOpCode will contain opcode.AggregateUnassigned unless we are changing opcode while pushing them down + OriginalOpCode opcode.AggregateOpcode + + Alias string + // The index at which the user expects to see this aggregated function. Set to nil, if the user does not ask for it - Index *int + // Only used in the old Horizon Planner + Index *int + Distinct bool + + // the offsets point to columns on the same aggregator + ColOffset int + WSOffset int } AggrRewriter struct { @@ -89,13 +109,52 @@ type ( } ) -func (b GroupBy) AsOrderBy() OrderBy { - return OrderBy{ +func (aggr Aggr) NeedsWeightString(ctx *plancontext.PlanningContext) bool { + return aggr.OpCode.NeedsComparableValues() && ctx.SemTable.NeedsWeightString(aggr.Func.GetArg()) +} + +func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) (sqltypes.Type, collations.ID) { + if aggr.Func == nil { + return sqltypes.Unknown, collations.Unknown + } + switch aggr.OpCode { + case opcode.AggregateMin, opcode.AggregateMax, opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: + typ, col, _ := ctx.SemTable.TypeForExpr(aggr.Func.GetArg()) + return typ, col + + } + return sqltypes.Unknown, collations.Unknown +} + +// NewGroupBy creates a new group by from the given fields. +func NewGroupBy(inner, simplified sqlparser.Expr, aliasedExpr *sqlparser.AliasedExpr) GroupBy { + return GroupBy{ + Inner: inner, + SimplifiedExpr: simplified, + aliasedExpr: aliasedExpr, + ColOffset: -1, + WSOffset: -1, + } +} + +func NewAggr(opCode opcode.AggregateOpcode, f sqlparser.AggrFunc, original *sqlparser.AliasedExpr, alias string) Aggr { + return Aggr{ + Original: original, + Func: f, + OpCode: opCode, + Alias: alias, + ColOffset: -1, + WSOffset: -1, + } +} + +func (b GroupBy) AsOrderBy() ops.OrderBy { + return ops.OrderBy{ Inner: &sqlparser.Order{ Expr: b.Inner, Direction: sqlparser.AscOrder, }, - WeightStrExpr: b.WeightStrExpr, + SimplifiedExpr: b.SimplifiedExpr, } } @@ -104,18 +163,18 @@ func (b GroupBy) AsAliasedExpr() *sqlparser.AliasedExpr { return b.aliasedExpr } col, isColName := b.Inner.(*sqlparser.ColName) - if isColName && b.WeightStrExpr != b.Inner { + if isColName && b.SimplifiedExpr != b.Inner { return &sqlparser.AliasedExpr{ - Expr: b.WeightStrExpr, + Expr: b.SimplifiedExpr, As: col.Name, } } - if !isColName && b.WeightStrExpr != b.Inner { + if !isColName && b.SimplifiedExpr != b.Inner { panic("this should not happen - different inner and weighStringExpr and not a column alias") } return &sqlparser.AliasedExpr{ - Expr: b.WeightStrExpr, + Expr: b.SimplifiedExpr, } } @@ -142,45 +201,25 @@ func (s SelectExpr) GetAliasedExpr() (*sqlparser.AliasedExpr, error) { } } -// CreateQPFromSelect creates the QueryProjection for the input *sqlparser.Select -func CreateQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) { +// createQPFromSelect creates the QueryProjection for the input *sqlparser.Select +func createQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) { qp := &QueryProjection{ Distinct: sel.Distinct, } - err := qp.addSelectExpressions(sel) - if err != nil { + if err := qp.addSelectExpressions(sel); err != nil { return nil, err } - for _, group := range sel.GroupBy { - selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group) - expr, weightStrExpr, err := qp.GetSimplifiedExpr(group) - if err != nil { - return nil, err - } - err = checkForInvalidGroupingExpressions(weightStrExpr) - if err != nil { - return nil, err - } - - groupBy := GroupBy{ - Inner: expr, - WeightStrExpr: weightStrExpr, - InnerIndex: selectExprIdx, - aliasedExpr: aliasExpr, - } - - qp.groupByExprs = append(qp.groupByExprs, groupBy) + if err := qp.addGroupBy(ctx, sel.GroupBy); err != nil { + return nil, err } - - err = qp.addOrderBy(sel.OrderBy) - if err != nil { + if err := qp.addOrderBy(ctx, sel.OrderBy); err != nil { return nil, err } - - if qp.Distinct && !qp.HasAggr { - qp.groupByExprs = nil + if !qp.HasAggr && sel.Having != nil { + qp.HasAggr = sqlparser.ContainsAggregation(sel.Having.Expr) } + qp.calculateDistinct(ctx) return qp, nil } @@ -213,7 +252,7 @@ func (ar *AggrRewriter) RewriteUp() func(*sqlparser.Cursor) bool { ar.Err = err return false } - if ar.st.EqualsExpr(ae.Expr, fExp) { + if ar.st.EqualsExprWithDeps(ae.Expr, fExp) { cursor.Replace(sqlparser.NewOffset(offset, fExp)) return true } @@ -224,7 +263,6 @@ func (ar *AggrRewriter) RewriteUp() func(*sqlparser.Cursor) bool { Col: &sqlparser.AliasedExpr{Expr: fExp}, } ar.qp.HasAggr = true - cursor.Replace(sqlparser.NewOffset(len(ar.qp.SelectExprs), fExp)) ar.qp.SelectExprs = append(ar.qp.SelectExprs, col) ar.qp.AddedColumn++ @@ -271,8 +309,8 @@ func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error { return nil } -// CreateQPFromUnion creates the QueryProjection for the input *sqlparser.Union -func CreateQPFromUnion(union *sqlparser.Union) (*QueryProjection, error) { +// createQPFromUnion creates the QueryProjection for the input *sqlparser.Union +func createQPFromUnion(ctx *plancontext.PlanningContext, union *sqlparser.Union) (*QueryProjection, error) { qp := &QueryProjection{} sel := sqlparser.GetFirstSelect(union) @@ -281,7 +319,7 @@ func CreateQPFromUnion(union *sqlparser.Union) (*QueryProjection, error) { return nil, err } - err = qp.addOrderBy(union.OrderBy) + err = qp.addOrderBy(ctx, union.OrderBy) if err != nil { return nil, err } @@ -289,58 +327,122 @@ func CreateQPFromUnion(union *sqlparser.Union) (*QueryProjection, error) { return qp, nil } -func (qp *QueryProjection) addOrderBy(orderBy sqlparser.OrderBy) error { +type expressionSet struct { + exprs []sqlparser.Expr +} + +func (es *expressionSet) add(ctx *plancontext.PlanningContext, e sqlparser.Expr) bool { + idx := slices.IndexFunc(es.exprs, func(expr sqlparser.Expr) bool { + return ctx.SemTable.EqualsExprWithDeps(e, expr) + }) + + // if we already have this expression, there is no need to repeat it + if idx >= 0 { + return false + } + es.exprs = append(es.exprs, e) + return true +} + +func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy sqlparser.OrderBy) error { canPushDownSorting := true + es := &expressionSet{} for _, order := range orderBy { - expr, weightStrExpr, err := qp.GetSimplifiedExpr(order.Expr) - if err != nil { - return err - } - if sqlparser.IsNull(weightStrExpr) { + simpleExpr := qp.GetSimplifiedExpr(order.Expr) + if sqlparser.IsNull(simpleExpr) { // ORDER BY null can safely be ignored continue } - qp.OrderExprs = append(qp.OrderExprs, OrderBy{ - Inner: &sqlparser.Order{ - Expr: expr, - Direction: order.Direction, - }, - WeightStrExpr: weightStrExpr, + if !es.add(ctx, simpleExpr) { + continue + } + qp.OrderExprs = append(qp.OrderExprs, ops.OrderBy{ + Inner: sqlparser.CloneRefOfOrder(order), + SimplifiedExpr: simpleExpr, }) - canPushDownSorting = canPushDownSorting && !sqlparser.ContainsAggregation(weightStrExpr) + canPushDownSorting = canPushDownSorting && !sqlparser.ContainsAggregation(simpleExpr) } qp.CanPushDownSorting = canPushDownSorting return nil } +func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) { + if qp.Distinct && !qp.HasAggr { + // grouping and distinct both lead to unique results, so we don't need + qp.groupByExprs = nil + } + + if qp.HasAggr && len(qp.groupByExprs) == 0 { + // this is a scalar aggregation and is inherently distinct + qp.Distinct = false + } + + if !qp.Distinct || len(qp.groupByExprs) == 0 { + return + } + + for _, gb := range qp.groupByExprs { + _, found := canReuseColumn(ctx, qp.SelectExprs, gb.SimplifiedExpr, func(expr SelectExpr) sqlparser.Expr { + getExpr, err := expr.GetExpr() + if err != nil { + panic(err) + } + return getExpr + }) + if !found { + return + } + } + + // since we are returning all grouping expressions, we know the results are guaranteed to be unique + qp.Distinct = false +} + +func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy sqlparser.GroupBy) error { + es := &expressionSet{} + for _, group := range groupBy { + selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group) + simpleExpr := qp.GetSimplifiedExpr(group) + err := checkForInvalidGroupingExpressions(simpleExpr) + if err != nil { + return err + } + + if !es.add(ctx, simpleExpr) { + continue + } + + groupBy := NewGroupBy(group, simpleExpr, aliasExpr) + groupBy.InnerIndex = selectExprIdx + + qp.groupByExprs = append(qp.groupByExprs, groupBy) + } + return nil +} + // GetGrouping returns a copy of the grouping parameters of the QP func (qp *QueryProjection) GetGrouping() []GroupBy { - out := make([]GroupBy, len(qp.groupByExprs)) - copy(out, qp.groupByExprs) - return out + return slices.Clone(qp.groupByExprs) } func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error { return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aggrFunc, isAggregate := node.(sqlparser.AggrFunc); isAggregate { - if aggrFunc.GetArgs() != nil && - len(aggrFunc.GetArgs()) != 1 { - return false, vterrors.VT03001(sqlparser.String(node)) - } + aggrFunc, isAggregate := node.(sqlparser.AggrFunc) + if !isAggregate { return true, nil } - + args := aggrFunc.GetArgs() + if args != nil && len(args) != 1 { + return false, vterrors.VT03001(sqlparser.String(node)) + } return true, nil + }, exp.Expr) } -func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr SelectExpr) bool { +func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr sqlparser.Expr) bool { for _, groupByExpr := range qp.groupByExprs { - exp, err := expr.GetExpr() - if err != nil { - return false - } - if ctx.SemTable.EqualsExpr(groupByExpr.WeightStrExpr, exp) { + if ctx.SemTable.EqualsExprWithDeps(groupByExpr.SimplifiedExpr, expr) { return true } } @@ -348,34 +450,32 @@ func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext } // GetSimplifiedExpr takes an expression used in ORDER BY or GROUP BY, and returns an expression that is simpler to evaluate -func (qp *QueryProjection) GetSimplifiedExpr(e sqlparser.Expr) (expr sqlparser.Expr, weightStrExpr sqlparser.Expr, err error) { +func (qp *QueryProjection) GetSimplifiedExpr(e sqlparser.Expr) sqlparser.Expr { + if qp == nil { + return e + } // If the ORDER BY is against a column alias, we need to remember the expression // behind the alias. The weightstring(.) calls needs to be done against that expression and not the alias. // Eg - select music.foo as bar, weightstring(music.foo) from music order by bar colExpr, isColName := e.(*sqlparser.ColName) - if !isColName { - return e, e, nil + if !(isColName && colExpr.Qualifier.IsEmpty()) { + // we are only interested in unqualified column names. if it's not a column name and not + return e } - if sqlparser.IsNull(e) { - return e, nil, nil - } - - if colExpr.Qualifier.IsEmpty() { - for _, selectExpr := range qp.SelectExprs { - aliasedExpr, isAliasedExpr := selectExpr.Col.(*sqlparser.AliasedExpr) - if !isAliasedExpr { - continue - } - isAliasExpr := !aliasedExpr.As.IsEmpty() - if isAliasExpr && colExpr.Name.Equal(aliasedExpr.As) { - return e, aliasedExpr.Expr, nil - } + for _, selectExpr := range qp.SelectExprs { + aliasedExpr, isAliasedExpr := selectExpr.Col.(*sqlparser.AliasedExpr) + if !isAliasedExpr { + continue + } + aliased := !aliasedExpr.As.IsEmpty() + if aliased && colExpr.Name.Equal(aliasedExpr.As) { + return aliasedExpr.Expr } } - return e, e, nil + return e } // toString should only be used for tests @@ -418,7 +518,84 @@ func (qp *QueryProjection) NeedsAggregation() bool { return qp.HasAggr || len(qp.groupByExprs) > 0 } -func (qp QueryProjection) onlyAggr() bool { +// NeedsProjecting returns true if we have projections that need to be evaluated at the vtgate level +// and can't be pushed down to MySQL +func (qp *QueryProjection) NeedsProjecting( + ctx *plancontext.PlanningContext, + pusher func(expr *sqlparser.AliasedExpr) (int, error), +) (needsVtGateEval bool, expressions []sqlparser.Expr, colNames []string, err error) { + for _, se := range qp.SelectExprs { + var ae *sqlparser.AliasedExpr + ae, err = se.GetAliasedExpr() + if err != nil { + return false, nil, nil, err + } + + expr := ae.Expr + colNames = append(colNames, ae.ColumnName()) + + if _, isCol := expr.(*sqlparser.ColName); isCol { + offset, err := pusher(ae) + if err != nil { + return false, nil, nil, err + } + expressions = append(expressions, sqlparser.NewOffset(offset, expr)) + continue + } + + stopOnError := func(sqlparser.SQLNode, sqlparser.SQLNode) bool { + return err == nil + } + rewriter := func(cursor *sqlparser.CopyOnWriteCursor) { + col, isCol := cursor.Node().(*sqlparser.ColName) + if !isCol { + return + } + var tableInfo semantics.TableInfo + tableInfo, err = ctx.SemTable.TableInfoForExpr(col) + if err != nil { + return + } + dt, isDT := tableInfo.(*semantics.DerivedTable) + if !isDT { + return + } + + rewritten := semantics.RewriteDerivedTableExpression(col, dt) + if sqlparser.ContainsAggregation(rewritten) { + offset, tErr := pusher(&sqlparser.AliasedExpr{Expr: col}) + if tErr != nil { + err = tErr + return + } + cursor.Replace(sqlparser.NewOffset(offset, col)) + } + } + newExpr := sqlparser.CopyOnRewrite(expr, stopOnError, rewriter, nil) + + if err != nil { + return + } + + if newExpr != expr { + // if we changed the expression, it means that we have to evaluate the rest at the vtgate level + expressions = append(expressions, newExpr.(sqlparser.Expr)) + needsVtGateEval = true + continue + } + + // we did not need to push any parts of this expression down. Let's check if we can push all of it + offset, err := pusher(ae) + if err != nil { + return false, nil, nil, err + } + expressions = append(expressions, sqlparser.NewOffset(offset, expr)) + } + + return +} + +func (qp *QueryProjection) onlyAggr() bool { if !qp.HasAggr { return false } @@ -441,16 +618,16 @@ func (qp *QueryProjection) NeedsDistinct() bool { return true } -func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext) (out []Aggr, err error) { +func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext, allowComplexExpression bool) (out []Aggr, complex bool, err error) { orderBy: for _, orderExpr := range qp.OrderExprs { - orderExpr := orderExpr.WeightStrExpr + orderExpr := orderExpr.SimplifiedExpr for _, expr := range qp.SelectExprs { col, ok := expr.Col.(*sqlparser.AliasedExpr) if !ok { continue } - if ctx.SemTable.EqualsExpr(col.Expr, orderExpr) { + if ctx.SemTable.EqualsExprWithDeps(col.Expr, orderExpr) { continue orderBy // we found the expression we were looking for! } } @@ -461,62 +638,86 @@ orderBy: qp.AddedColumn++ } + // Here we go over the expressions we are returning. Since we know we are aggregating, + // all expressions have to be either grouping expressions or aggregate expressions. + // If we find an expression that is neither, we treat is as a special aggregation function AggrRandom for idx, expr := range qp.SelectExprs { aliasedExpr, err := expr.GetAliasedExpr() if err != nil { - return nil, err + return nil, false, err } idxCopy := idx if !sqlparser.ContainsAggregation(expr.Col) { - if !qp.isExprInGroupByExprs(ctx, expr) { - out = append(out, Aggr{ - Original: aliasedExpr, - OpCode: engine.AggregateRandom, - Alias: aliasedExpr.ColumnName(), - Index: &idxCopy, - }) + getExpr, err := expr.GetExpr() + if err != nil { + return nil, false, err + } + if !qp.isExprInGroupByExprs(ctx, getExpr) { + aggr := NewAggr(opcode.AggregateAnyValue, nil, aliasedExpr, aliasedExpr.ColumnName()) + aggr.Index = &idxCopy + out = append(out, aggr) } continue } - fnc, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc) - if !isAggregate { - return nil, vterrors.VT12001("in scatter query: complex aggregate expression") - } - - opcode, found := engine.SupportedAggregates[strings.ToLower(fnc.AggrName())] - if !found { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", fnc.AggrName())) + _, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc) + if !isAggregate && !allowComplexExpression { + return nil, false, vterrors.VT12001("in scatter query: complex aggregate expression") } - if opcode == engine.AggregateCount { - if _, isStar := fnc.(*sqlparser.CountStar); isStar { - opcode = engine.AggregateCountStar + sqlparser.CopyOnRewrite(aliasedExpr.Expr, func(node, parent sqlparser.SQLNode) bool { + ex, isExpr := node.(sqlparser.Expr) + if !isExpr { + return true } - } + if aggr, isAggr := node.(sqlparser.AggrFunc); isAggr { + ae := aeWrap(aggr) + if aggr == aliasedExpr.Expr { + ae = aliasedExpr + } + aggrFunc := createAggrFromAggrFunc(aggr, ae) + aggrFunc.Index = &idxCopy + out = append(out, aggrFunc) + return false + } + if sqlparser.ContainsAggregation(node) { + complex = true + return true + } + if !qp.isExprInGroupByExprs(ctx, ex) { + aggr := NewAggr(opcode.AggregateAnyValue, nil, aeWrap(ex), "") + aggr.Index = &idxCopy + out = append(out, aggr) + } + return false + }, nil, nil) + } + return +} - aggr, _ := aliasedExpr.Expr.(sqlparser.AggrFunc) +func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.AliasedExpr) Aggr { + code := opcode.SupportedAggregates[fnc.AggrName()] - if aggr.IsDistinct() { - switch opcode { - case engine.AggregateCount: - opcode = engine.AggregateCountDistinct - case engine.AggregateSum: - opcode = engine.AggregateSumDistinct - } + if code == opcode.AggregateCount { + if _, isStar := fnc.(*sqlparser.CountStar); isStar { + code = opcode.AggregateCountStar } + } - out = append(out, Aggr{ - Original: aliasedExpr, - Func: aggr, - OpCode: opcode, - Alias: aliasedExpr.ColumnName(), - Index: &idxCopy, - Distinct: aggr.IsDistinct(), - }) + distinct := sqlparser.IsDistinct(fnc) + if distinct { + switch code { + case opcode.AggregateCount: + code = opcode.AggregateCountDistinct + case opcode.AggregateSum: + code = opcode.AggregateSumDistinct + } } - return + + aggr := NewAggr(code, fnc, aliasedExpr, aliasedExpr.ColumnName()) + aggr.Distinct = distinct + return aggr } // FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it @@ -535,19 +736,15 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningC return &idx, aliasedExpr } } - if ctx.SemTable.EqualsExpr(aliasedExpr.Expr, expr) { + if ctx.SemTable.EqualsExprWithDeps(aliasedExpr.Expr, expr) { return &idx, aliasedExpr } } return nil, nil } -// AlignGroupByAndOrderBy aligns the group by and order by columns, so they are in the same order -// The GROUP BY clause is a set - the order between the elements does not make any difference, -// so we can simply re-arrange the column order -// We are also free to add more ORDER BY columns than the user asked for which we leverage, -// so the input is already ordered according to the GROUP BY columns used -func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningContext) { +// OldAlignGroupByAndOrderBy TODO Remove once all of horizon planning is done on the operators +func (qp *QueryProjection) OldAlignGroupByAndOrderBy(ctx *plancontext.PlanningContext) { // The ORDER BY can be performed before the OA var newGrouping []GroupBy @@ -565,7 +762,7 @@ func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningConte used := make([]bool, len(qp.groupByExprs)) for _, orderExpr := range qp.OrderExprs { for i, groupingExpr := range qp.groupByExprs { - if !used[i] && ctx.SemTable.EqualsExpr(groupingExpr.WeightStrExpr, orderExpr.WeightStrExpr) { + if !used[i] && ctx.SemTable.EqualsExpr(groupingExpr.SimplifiedExpr, orderExpr.SimplifiedExpr) { newGrouping = append(newGrouping, groupingExpr) used[i] = true } @@ -586,6 +783,45 @@ func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningConte qp.groupByExprs = newGrouping } +// AlignGroupByAndOrderBy aligns the group by and order by columns, so they are in the same order +// The GROUP BY clause is a set - the order between the elements does not make any difference, +// so we can simply re-arrange the column order +// We are also free to add more ORDER BY columns than the user asked for which we leverage, +// so the input is already ordered according to the GROUP BY columns used +func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningContext) bool { + if qp == nil { + return false + } + if qp.hasCheckedAlignment { + return false + } + qp.hasCheckedAlignment = true + newGrouping := make([]GroupBy, 0, len(qp.groupByExprs)) + used := make([]bool, len(qp.groupByExprs)) + +outer: + for _, orderBy := range qp.OrderExprs { + for gidx, groupBy := range qp.groupByExprs { + if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, orderBy.SimplifiedExpr) { + newGrouping = append(newGrouping, groupBy) + used[gidx] = true + continue outer + } + } + return false + } + + // if we get here, it means that all the OrderBy expressions are also in the GroupBy clause + for gidx, gb := range qp.groupByExprs { + if !used[gidx] { + newGrouping = append(newGrouping, gb) + qp.OrderExprs = append(qp.OrderExprs, gb.AsOrderBy()) + } + } + qp.groupByExprs = newGrouping + return true +} + // AddGroupBy does just that func (qp *QueryProjection) AddGroupBy(by GroupBy) { qp.groupByExprs = append(qp.groupByExprs, by) @@ -601,8 +837,8 @@ func checkForInvalidGroupingExpressions(expr sqlparser.Expr) error { return false, vterrors.VT03005(sqlparser.String(expr)) } _, isSubQ := node.(*sqlparser.Subquery) - arg, isArg := node.(sqlparser.Argument) - if isSubQ || (isArg && strings.HasPrefix(string(arg), "__sq")) { + arg, isArg := node.(*sqlparser.Argument) + if isSubQ || (isArg && strings.HasPrefix(arg.Name, "__sq")) { return false, vterrors.VT12001("subqueries in GROUP BY") } return true, nil @@ -632,3 +868,13 @@ func CompareRefInt(a *int, b *int) bool { } return *a < *b } + +func CreateQPFromSelectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement) (*QueryProjection, error) { + switch sel := stmt.(type) { + case *sqlparser.Select: + return createQPFromSelect(ctx, sel) + case *sqlparser.Union: + return createQPFromUnion(ctx, sel) + } + return nil, vterrors.VT13001("can only create query projection from Union and Select statements") +} diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 13a5291f680..7c92b716d7c 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -19,14 +19,13 @@ package operators import ( "testing" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) func TestQP(t *testing.T) { @@ -34,7 +33,7 @@ func TestQP(t *testing.T) { sql string expErr string - expOrder []OrderBy + expOrder []ops.OrderBy }{ { sql: "select * from user", @@ -47,24 +46,23 @@ func TestQP(t *testing.T) { }, { sql: "select 1, count(1) from user order by 1", - expOrder: []OrderBy{ - {Inner: &sqlparser.Order{Expr: sqlparser.NewIntLiteral("1")}, WeightStrExpr: sqlparser.NewIntLiteral("1")}, + expOrder: []ops.OrderBy{ + {Inner: &sqlparser.Order{Expr: sqlparser.NewIntLiteral("1")}, SimplifiedExpr: sqlparser.NewIntLiteral("1")}, }, }, { sql: "select id from user order by col, id, 1", - expOrder: []OrderBy{ - {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("col")}, WeightStrExpr: sqlparser.NewColName("col")}, - {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, WeightStrExpr: sqlparser.NewColName("id")}, - {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, WeightStrExpr: sqlparser.NewColName("id")}, + expOrder: []ops.OrderBy{ + {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("col")}, SimplifiedExpr: sqlparser.NewColName("col")}, + {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, SimplifiedExpr: sqlparser.NewColName("id")}, }, }, { sql: "SELECT CONCAT(last_name,', ',first_name) AS full_name FROM mytable ORDER BY full_name", // alias in order not supported - expOrder: []OrderBy{ + expOrder: []ops.OrderBy{ { Inner: &sqlparser.Order{Expr: sqlparser.NewColName("full_name")}, - WeightStrExpr: &sqlparser.FuncExpr{ + SimplifiedExpr: &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("CONCAT"), Exprs: sqlparser.SelectExprs{ &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("last_name")}, @@ -89,7 +87,7 @@ func TestQP(t *testing.T) { _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) require.NoError(t, err) - qp, err := CreateQPFromSelect(ctx, sel) + qp, err := createQPFromSelect(ctx, sel) if tcase.expErr != "" { require.Error(t, err) require.Contains(t, err.Error(), tcase.expErr) @@ -99,7 +97,7 @@ func TestQP(t *testing.T) { require.Equal(t, len(tcase.expOrder), len(qp.OrderExprs), "not enough order expressions in QP") for index, expOrder := range tcase.expOrder { assert.True(t, sqlparser.Equals.SQLNode(expOrder.Inner, qp.OrderExprs[index].Inner), "want: %+v, got %+v", sqlparser.String(expOrder.Inner), sqlparser.String(qp.OrderExprs[index].Inner)) - assert.True(t, sqlparser.Equals.SQLNode(expOrder.WeightStrExpr, qp.OrderExprs[index].WeightStrExpr), "want: %v, got %v", sqlparser.String(expOrder.WeightStrExpr), sqlparser.String(qp.OrderExprs[index].WeightStrExpr)) + assert.True(t, sqlparser.Equals.SQLNode(expOrder.SimplifiedExpr, qp.OrderExprs[index].SimplifiedExpr), "want: %v, got %v", sqlparser.String(expOrder.SimplifiedExpr), sqlparser.String(qp.OrderExprs[index].SimplifiedExpr)) } } }) @@ -196,7 +194,7 @@ func TestQPSimplifiedExpr(t *testing.T) { _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) require.NoError(t, err) ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} - qp, err := CreateQPFromSelect(ctx, sel) + qp, err := createQPFromSelect(ctx, sel) require.NoError(t, err) require.Equal(t, tc.expected[1:], qp.toString()) }) diff --git a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go index 30d6891c193..d90bcf41c36 100644 --- a/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go +++ b/go/vt/vtgate/planbuilder/operators/rewrite/rewriters.go @@ -17,34 +17,74 @@ limitations under the License. package rewrite import ( + "fmt" + "slices" + + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( - Func func(semantics.TableSet, ops.Operator) (ops.Operator, TreeIdentity, error) - BreakableFunc func(ops.Operator) (ops.Operator, TreeIdentity, VisitRule, error) + // VisitF is the visitor that walks an operator tree + VisitF func( + op ops.Operator, // op is the operator being visited + lhsTables semantics.TableSet, // lhsTables contains the TableSet for all table on the LHS of our parent + isRoot bool, // isRoot will be true for the root of the operator tree + ) (ops.Operator, *ApplyResult, error) + + // ShouldVisit is used when we want to control which nodes and ancestors to visit and which to skip + ShouldVisit func(ops.Operator) VisitRule - // TreeIdentity tracks modifications to node and expression trees. + // ApplyResult tracks modifications to node and expression trees. // Only return SameTree when it is acceptable to return the original // input and discard the returned result as a performance improvement. - TreeIdentity bool + ApplyResult struct { + Transformations []Rewrite + } + + Rewrite struct { + Message string + Op ops.Operator + } // VisitRule signals to the rewriter if the children of this operator should be visited or not VisitRule bool ) -const ( - SameTree TreeIdentity = false - NewTree TreeIdentity = true +var ( + SameTree *ApplyResult = nil +) +const ( VisitChildren VisitRule = true SkipChildren VisitRule = false ) +func NewTree(message string, op ops.Operator) *ApplyResult { + if DebugOperatorTree { + fmt.Println(">>>>>>>> " + message) + } + return &ApplyResult{Transformations: []Rewrite{{Message: message, Op: op}}} +} + +func (ar *ApplyResult) Merge(other *ApplyResult) *ApplyResult { + if ar == nil { + return other + } + if other == nil { + return ar + } + return &ApplyResult{Transformations: append(ar.Transformations, other.Transformations...)} +} + +func (ar *ApplyResult) Changed() bool { + return ar != nil +} + // Visit allows for the walking of the operator tree. If any error is returned, the walk is aborted func Visit(root ops.Operator, visitor func(ops.Operator) error) error { - _, err := TopDown(root, func(op ops.Operator) (ops.Operator, TreeIdentity, VisitRule, error) { + _, _, err := breakableTopDown(root, func(op ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error) { err := visitor(op) if err != nil { return nil, SameTree, SkipChildren, err @@ -55,28 +95,136 @@ func Visit(root ops.Operator, visitor func(ops.Operator) error) error { } // BottomUp rewrites an operator tree from the bottom up. BottomUp applies a transformation function to -// the given operator tree from the bottom up. Each callback [f] returns a TreeIdentity that is aggregated +// the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated // into a final output indicating whether the operator tree was changed. -func BottomUp(root ops.Operator, rootID semantics.TableSet, resolveID func(ops.Operator) semantics.TableSet, f Func) (ops.Operator, error) { - op, _, err := bottomUp(root, rootID, resolveID, f) +func BottomUp( + root ops.Operator, + resolveID func(ops.Operator) semantics.TableSet, + visit VisitF, + shouldVisit ShouldVisit, +) (ops.Operator, error) { + op, _, err := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) if err != nil { return nil, err } return op, nil } -// TopDown applies a transformation function to the given operator tree from the bottom up. = -// Each callback [f] returns a TreeIdentity that is aggregated into a final output indicating whether the -// operator tree was changed. -// The callback also returns a VisitRule that signals whether the children of this operator should be visited or not -func TopDown(in ops.Operator, rewriter BreakableFunc) (ops.Operator, error) { - op, _, err := breakableTopDown(in, rewriter) - return op, err +var DebugOperatorTree = false + +func EnableDebugPrinting() (reset func()) { + t := DebugOperatorTree + DebugOperatorTree = true + return func() { + DebugOperatorTree = t + } +} + +// FixedPointBottomUp rewrites an operator tree much like BottomUp does, +// but does the rewriting repeatedly, until a tree walk is done with no changes to the tree. +func FixedPointBottomUp( + root ops.Operator, + resolveID func(ops.Operator) semantics.TableSet, + visit VisitF, + shouldVisit ShouldVisit, +) (op ops.Operator, err error) { + var id *ApplyResult + op = root + // will loop while the rewriting changes anything + for ok := true; ok; ok = id != SameTree { + if DebugOperatorTree { + fmt.Println(ops.ToTree(op)) + } + // Continue the top-down rewriting process as long as changes were made during the last traversal + op, id, err = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) + if err != nil { + return nil, err + } + } + + return op, nil +} + +// BottomUpAll rewrites an operator tree from the bottom up. BottomUp applies a transformation function to +// the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated +// into a final output indicating whether the operator tree was changed. +func BottomUpAll( + root ops.Operator, + resolveID func(ops.Operator) semantics.TableSet, + visit VisitF, +) (ops.Operator, error) { + return BottomUp(root, resolveID, visit, func(ops.Operator) VisitRule { + return VisitChildren + }) +} + +// TopDown rewrites an operator tree from the bottom up. BottomUp applies a transformation function to +// the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated +// into a final output indicating whether the operator tree was changed. +// +// Parameters: +// - root: The root operator of the tree to be traversed. +// - resolveID: A function to resolve the TableSet of an operator. +// - visit: The VisitF function to be called for each visited operator. +// - shouldVisit: The ShouldVisit function to control which nodes and ancestors to visit and which to skip. +// +// Returns: +// - ops.Operator: The root of the (potentially) transformed operator tree. +// - error: An error if any occurred during the traversal. +func TopDown( + root ops.Operator, + resolveID func(ops.Operator) semantics.TableSet, + visit VisitF, + shouldVisit ShouldVisit, +) (op ops.Operator, err error) { + op, _, err = topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) + if err != nil { + return nil, err + } + + return op, nil } -func bottomUp(root ops.Operator, rootID semantics.TableSet, resolveID func(ops.Operator) semantics.TableSet, rewriter Func) (ops.Operator, TreeIdentity, error) { +// Swap takes a tree like a->b->c and swaps `a` and `b`, so we end up with b->a->c +func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResult, error) { + c := child.Inputs() + if len(c) != 1 { + return nil, nil, vterrors.VT13001("Swap can only be used on single input operators") + } + + aInputs := slices.Clone(parent.Inputs()) + var tmp ops.Operator + for i, in := range aInputs { + if in == child { + tmp = aInputs[i] + aInputs[i] = c[0] + break + } + } + if tmp == nil { + return nil, nil, vterrors.VT13001("Swap can only be used when the second argument is an input to the first") + } + + child.SetInputs([]ops.Operator{parent}) + parent.SetInputs(aInputs) + + return child, NewTree(message, parent), nil +} + +func bottomUp( + root ops.Operator, + rootID semantics.TableSet, + resolveID func(ops.Operator) semantics.TableSet, + rewriter VisitF, + shouldVisit ShouldVisit, + isRoot bool, +) (ops.Operator, *ApplyResult, error) { + if !shouldVisit(root) { + return root, SameTree, nil + } + oldInputs := root.Inputs() - anythingChanged := false + var anythingChanged *ApplyResult newInputs := make([]ops.Operator, len(oldInputs)) childID := rootID @@ -88,56 +236,104 @@ func bottomUp(root ops.Operator, rootID semantics.TableSet, resolveID func(ops.O for i, operator := range oldInputs { // We merge the table set of all the LHS above the current root so that we can // send it down to the current RHS. - // We don't want to send the LHS table set to the RHS if the root is an UNION. + // We don't want to send the LHS table set to the RHS if the root is a UNION. // Some operators, like SubQuery, can have multiple child operators on the RHS if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := bottomUp(operator, childID, resolveID, rewriter) + in, changed, err := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) if err != nil { - return nil, SameTree, err - } - if changed == NewTree { - anythingChanged = true + return nil, nil, err } + anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } - if anythingChanged { + if anythingChanged.Changed() { root = root.Clone(newInputs) } - newOp, treeIdentity, err := rewriter(rootID, root) + newOp, treeIdentity, err := rewriter(root, rootID, isRoot) if err != nil { - return nil, SameTree, err - } - if anythingChanged { - treeIdentity = NewTree + return nil, nil, err } - return newOp, treeIdentity, nil + anythingChanged = anythingChanged.Merge(treeIdentity) + return newOp, anythingChanged, nil } -func breakableTopDown(in ops.Operator, rewriter BreakableFunc) (ops.Operator, TreeIdentity, error) { +func breakableTopDown( + in ops.Operator, + rewriter func(ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error), +) (ops.Operator, *ApplyResult, error) { newOp, identity, visit, err := rewriter(in) if err != nil || visit == SkipChildren { return newOp, identity, err } - anythingChanged := identity == NewTree + var anythingChanged *ApplyResult oldInputs := newOp.Inputs() newInputs := make([]ops.Operator, len(oldInputs)) for i, oldInput := range oldInputs { newInputs[i], identity, err = breakableTopDown(oldInput, rewriter) - anythingChanged = anythingChanged || identity == NewTree + anythingChanged = anythingChanged.Merge(identity) if err != nil { return nil, SameTree, err } } - if anythingChanged { - return newOp.Clone(newInputs), NewTree, nil + if anythingChanged.Changed() { + return newOp, SameTree, nil + } + + return newOp.Clone(newInputs), anythingChanged, nil +} + +// topDown is a helper function that recursively traverses the operator tree from the +// top down and applies the given transformation function. It also returns the ApplyResult +// indicating whether the tree was changed +func topDown( + root ops.Operator, + rootID semantics.TableSet, + resolveID func(ops.Operator) semantics.TableSet, + rewriter VisitF, + shouldVisit ShouldVisit, + isRoot bool, +) (ops.Operator, *ApplyResult, error) { + newOp, anythingChanged, err := rewriter(root, rootID, isRoot) + if err != nil { + return nil, nil, err + } + + if !shouldVisit(root) { + return newOp, anythingChanged, nil + } + + if anythingChanged.Changed() { + root = newOp + } + + oldInputs := root.Inputs() + newInputs := make([]ops.Operator, len(oldInputs)) + childID := rootID + + type noLHSTableSet interface{ NoLHSTableSet() } + + for i, operator := range oldInputs { + if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { + childID = childID.Merge(resolveID(oldInputs[0])) + } + in, changed, err := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) + if err != nil { + return nil, nil, err + } + anythingChanged = anythingChanged.Merge(changed) + newInputs[i] = in + } + + if anythingChanged != SameTree { + return root.Clone(newInputs), anythingChanged, nil } - return newOp, SameTree, nil + return root, SameTree, nil } diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index 276de2a23c1..eef886249af 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -17,13 +17,16 @@ limitations under the License. package operators import ( + "fmt" + "strings" + + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -36,6 +39,20 @@ type ( MergedWith []*Route Routing Routing + + Ordering []RouteOrdering + + Comments *sqlparser.ParsedComments + Lock sqlparser.Lock + + ResultColumns int + } + + RouteOrdering struct { + AST sqlparser.Expr + // Offset and WOffset will contain the offset to the column (and the weightstring column). -1 if it's missing + Offset, WOffset int + Direction sqlparser.OrderDirection } // VindexPlusPredicates is a struct used to store all the predicates that the vindex can be used to query @@ -51,7 +68,7 @@ type ( VindexOption struct { Ready bool Values []evalengine.Expr - // columns that we have seen so far. Used only for multi-column vindexes so that we can track how many columns part of the vindex we have seen + // Columns that we have seen so far. Used only for multi-column vindexes so that we can track how many Columns part of the vindex we have seen ColsSeen map[string]any ValueExprs []sqlparser.Expr Predicates []sqlparser.Expr @@ -90,8 +107,6 @@ type ( } ) -var _ ops.PhysicalOperator = (*Route)(nil) - // UpdateRoutingLogic first checks if we are dealing with a predicate that func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) (Routing, error) { ks := r.Keyspace() @@ -158,7 +173,7 @@ func isConstantFalse(expr sqlparser.Expr) bool { if err != nil { return false } - if eres.Value().IsNull() { + if eres.Value(collations.Default()).IsNull() { return false } b, err := eres.ToBooleanStrict() @@ -168,9 +183,6 @@ func isConstantFalse(expr sqlparser.Expr) bool { return !b } -// IPhysical implements the PhysicalOperator interface -func (*Route) IPhysical() {} - // Cost implements the Operator interface func (r *Route) Cost() int { return r.Routing.Cost() @@ -189,6 +201,11 @@ func (r *Route) Inputs() []ops.Operator { return []ops.Operator{r.Source} } +// SetInputs implements the Operator interface +func (r *Route) SetInputs(ops []ops.Operator) { + r.Source = ops[0] +} + func createOption( colVindex *vindexes.ColumnVindex, vfunc func(*vindexes.ColumnVindex) vindexes.Vindex, @@ -517,8 +534,146 @@ func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return r, err } -func (r *Route) AddColumn(ctx *plancontext.PlanningContext, e sqlparser.Expr) (int, error) { - return r.Source.AddColumn(ctx, e) +func createProjection(ctx *plancontext.PlanningContext, src ops.Operator) (*Projection, error) { + proj := &Projection{Source: src} + cols, err := src.GetColumns(ctx) + if err != nil { + return nil, err + } + for _, col := range cols { + proj.addUnexploredExpr(col, col.Expr) + } + return proj, nil +} + +func (r *Route) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + var notFoundExprs []*sqlparser.AliasedExpr + var pendingOffsetIdx []int + for idx, expr := range exprs { + removeKeyspaceFromSelectExpr(expr) + + if reuse { + offset, err := r.FindCol(ctx, expr.Expr, true) + if err != nil { + return nil, err + } + if offset != -1 { + offsets[idx] = offset + continue + } + } + notFoundExprs = append(notFoundExprs, expr) + pendingOffsetIdx = append(pendingOffsetIdx, idx) + } + + if len(notFoundExprs) == 0 { + // we were able to find all columns, so we don't need to fetch anything else + return offsets, nil + } + + // if at least one column is not already present, we check if we can easily find a projection + // or aggregation in our source that we can add to + op, ok, remainingOffsets := addMultipleColumnsToInput(ctx, r.Source, reuse, addToGroupBy, notFoundExprs) + r.Source = op + if ok { + for i, offsetIdx := range pendingOffsetIdx { + offsets[offsetIdx] = remainingOffsets[i] + } + return offsets, nil + } + + // If no-one could be found, we probably don't have one yet, so we add one here + src, err := createProjection(ctx, r.Source) + if err != nil { + return nil, err + } + r.Source = src + + return src.addColumnsWithoutPushing(ctx, reuse, addToGroupBy, exprs), nil +} + +type selectExpressions interface { + ops.Operator + addColumnWithoutPushing(expr *sqlparser.AliasedExpr, addToGroupBy bool) int + addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) []int + isDerived() bool +} + +// addColumnToInput adds a column to an operator without pushing it down. +// It will return a bool indicating whether the addition was succesful or not, and an offset to where the column can be found +func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator ops.Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (ops.Operator, bool, []int) { + switch op := operator.(type) { + case *CorrelatedSubQueryOp: + src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) + if added { + op.Outer = src + } + return op, added, offset + + case *Distinct: + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + + case *Limit: + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + + case *Ordering: + src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + if added { + op.Source = src + } + return op, added, offset + + case selectExpressions: + if op.isDerived() { + // if the only thing we can push to is a derived table, + // we have to add a new projection and can't build on this one + return op, false, nil + } + offset := op.addColumnsWithoutPushing(ctx, reuse, addToGroupBy, exprs) + return op, true, offset + case *Union: + tableID := semantics.SingleTableSet(len(ctx.SemTable.Tables)) + ctx.SemTable.Tables = append(ctx.SemTable.Tables, nil) + unionColumns, err := op.GetColumns(ctx) + if err != nil { + return op, false, nil + } + proj := &Projection{ + Source: op, + Columns: unionColumns, + Projections: nil, + TableID: &tableID, + Alias: "dt", + } + return addMultipleColumnsToInput(ctx, proj, reuse, addToGroupBy, exprs) + default: + return op, false, nil + } +} + +func (r *Route) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) (int, error) { + return r.Source.FindCol(ctx, expr, true) +} + +func (r *Route) GetColumns(ctx *plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return r.Source.GetColumns(ctx) +} + +func (r *Route) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return r.Source.GetSelectExprs(ctx) +} + +func (r *Route) GetOrdering() ([]ops.OrderBy, error) { + return r.Source.GetOrdering() } // TablesUsed returns tables used by MergedWith routes, which are not included @@ -532,3 +687,107 @@ func (r *Route) TablesUsed() []string { } return collect() } +func isSpecialOrderBy(o ops.OrderBy) bool { + if sqlparser.IsNull(o.Inner.Expr) { + return true + } + f, isFunction := o.Inner.Expr.(*sqlparser.FuncExpr) + return isFunction && f.Name.Lowered() == "rand" +} + +func (r *Route) planOffsets(ctx *plancontext.PlanningContext) (err error) { + // if operator is returning data from a single shard, we don't need to do anything more + if r.IsSingleShard() { + return nil + } + + // if we are getting results from multiple shards, we need to do a merge-sort + // between them to get the final output correctly sorted + ordering, err := r.Source.GetOrdering() + if err != nil || len(ordering) == 0 { + return err + } + + columns, err := r.Source.GetColumns(ctx) + if err != nil { + return err + } + + for _, order := range ordering { + if isSpecialOrderBy(order) { + continue + } + offset, err := r.getOffsetFor(ctx, order, columns) + if err != nil { + return err + } + + if err != nil { + return err + } + o := RouteOrdering{ + AST: order.Inner.Expr, + Offset: offset, + WOffset: -1, + Direction: order.Inner.Direction, + } + if ctx.SemTable.NeedsWeightString(order.SimplifiedExpr) { + wrap := aeWrap(weightStringFor(order.SimplifiedExpr)) + offsets, err := r.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{wrap}) + if err != nil { + return err + } + o.WOffset = offsets[0] + } + r.Ordering = append(r.Ordering, o) + } + + return nil +} + +func weightStringFor(expr sqlparser.Expr) sqlparser.Expr { + return &sqlparser.WeightStringFuncExpr{Expr: expr} +} + +func (r *Route) getOffsetFor(ctx *plancontext.PlanningContext, order ops.OrderBy, columns []*sqlparser.AliasedExpr) (int, error) { + for idx, column := range columns { + if sqlparser.Equals.Expr(order.SimplifiedExpr, column.Expr) { + return idx, nil + } + } + + offsets, err := r.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(order.Inner.Expr)}) + if err != nil { + return 0, err + } + return offsets[0], nil +} + +func (r *Route) ShortDescription() string { + first := r.Routing.OpCode().String() + + ks := r.Routing.Keyspace() + if ks != nil { + first = fmt.Sprintf("%s on %s", r.Routing.OpCode().String(), ks.Name) + } + + orderBy, err := r.Source.GetOrdering() + if err != nil { + return first + } + + ordering := "" + if len(orderBy) > 0 { + var oo []string + for _, o := range orderBy { + oo = append(oo, sqlparser.String(o.Inner)) + } + ordering = " order by " + strings.Join(oo, ",") + } + + return first + ordering +} + +func (r *Route) setTruncateColumnCount(offset int) { + r.ResultColumns = offset +} diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 7ba3c402f8b..6bf88e15847 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -18,23 +18,19 @@ package operators import ( "bytes" - "fmt" "io" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) type ( @@ -50,52 +46,41 @@ type ( // Here we try to merge query parts into the same route primitives. At the end of this process, // all the operators in the tree are guaranteed to be PhysicalOperators func transformToPhysical(ctx *plancontext.PlanningContext, in ops.Operator) (ops.Operator, error) { - op, err := rewrite.BottomUp(in, semantics.EmptyTableSet(), TableID, func(ts semantics.TableSet, operator ops.Operator) (ops.Operator, rewrite.TreeIdentity, error) { + op, err := rewrite.BottomUpAll(in, TableID, func(operator ops.Operator, ts semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { switch op := operator.(type) { case *QueryGraph: return optimizeQueryGraph(ctx, op) case *Join: return optimizeJoin(ctx, op) - case *Derived: - return optimizeDerived(ctx, op) + case *Horizon: + if op.TableId != nil { + return pushDownDerived(ctx, op) + } case *SubQuery: return optimizeSubQuery(ctx, op, ts) case *Filter: - return optimizeFilter(op) - default: - return operator, rewrite.SameTree, nil + return pushDownFilter(op) } + return operator, rewrite.SameTree, nil }) if err != nil { return nil, err } - err = rewrite.Visit(op, func(op ops.Operator) error { - if _, isPhys := op.(ops.PhysicalOperator); !isPhys { - return vterrors.VT13001(fmt.Sprintf("failed to transform %T to a physical operator", op)) - } - return nil - }) - if err != nil { - return nil, err - } - - return Compact(ctx, op) + return compact(ctx, op) } -func optimizeFilter(op *Filter) (ops.Operator, rewrite.TreeIdentity, error) { - if route, ok := op.Source.(*Route); ok { - // let's push the filter into the route - op.Source = route.Source - route.Source = op - return route, rewrite.NewTree, nil +func pushDownFilter(op *Filter) (ops.Operator, *rewrite.ApplyResult, error) { + // TODO: once all horizon planning has been moved to the operators, we can remove this method + if _, ok := op.Source.(*Route); ok { + return rewrite.Swap(op, op.Source, "push filter into Route") } return op, rewrite.SameTree, nil } -func optimizeDerived(ctx *plancontext.PlanningContext, op *Derived) (ops.Operator, rewrite.TreeIdentity, error) { +func pushDownDerived(ctx *plancontext.PlanningContext, op *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { innerRoute, ok := op.Source.(*Route) if !ok { return op, rewrite.SameTree, nil @@ -106,22 +91,15 @@ func optimizeDerived(ctx *plancontext.PlanningContext, op *Derived) (ops.Operato return op, rewrite.SameTree, nil } - op.Source = innerRoute.Source - innerRoute.Source = op - - return innerRoute, rewrite.NewTree, nil + return rewrite.Swap(op, op.Source, "push derived under route") } -func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, rewrite.TreeIdentity, error) { - join, err := mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin) - if err != nil { - return nil, rewrite.SameTree, err - } - return join, rewrite.NewTree, nil +func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, *rewrite.ApplyResult, error) { + return mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin) } -func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed rewrite.TreeIdentity, err error) { - changed = rewrite.NewTree +func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed *rewrite.ApplyResult, err error) { + switch { case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right: result, err = leftToRightSolve(ctx, op) @@ -136,6 +114,7 @@ func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (resul result = newFilter(result, ctx.SemTable.AndExpressions(unresolved...)) } + changed = rewrite.NewTree("solved query graph", result) return } @@ -182,7 +161,7 @@ func buildVindexTableForDML( return nil, nil, vterrors.VT09002(dmlType) } - // we are dealing with an explicitly targeted UPDATE + // we are dealing with an explicitly targeted DML routing := &TargetedRouting{ keyspace: vindexTable.Keyspace, TargetDestination: dest, @@ -263,7 +242,7 @@ func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Ope continue } joinPredicates := qg.GetPredicates(TableID(acc), TableID(plan)) - acc, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true) + acc, _, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true) if err != nil { return nil, err } @@ -397,7 +376,7 @@ func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs ops.Op return cachedPlan, nil } - join, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) + join, _, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) if err != nil { return nil, err } @@ -411,9 +390,9 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b required := false _ = rewrite.Visit(op, func(current ops.Operator) error { - derived, isDerived := current.(*Derived) + horizon, isHorizon := current.(*Horizon) - if isDerived && !derived.IsMergeable(ctx) { + if isHorizon && horizon.IsDerived() && !horizon.IsMergeable(ctx) { required = true return io.EOF } @@ -424,30 +403,38 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b return required } -func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, error) { - newPlan, err := Merge(ctx, lhs, rhs, joinPredicates, newJoinMerge(ctx, joinPredicates, inner)) +func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, *rewrite.ApplyResult, error) { + newPlan, err := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(ctx, joinPredicates, inner)) if err != nil { - return nil, err + return nil, nil, err } if newPlan != nil { - return newPlan, nil + return newPlan, rewrite.NewTree("merge routes into single operator", newPlan), nil } if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) { if !inner { - return nil, vterrors.VT12001("LEFT JOIN with derived tables") + return nil, nil, vterrors.VT12001("LEFT JOIN with derived tables") } if requiresSwitchingSides(ctx, lhs) { - return nil, vterrors.VT12001("JOIN between derived tables") + return nil, nil, vterrors.VT12001("JOIN between derived tables") } join := NewApplyJoin(Clone(rhs), Clone(lhs), nil, !inner) - return pushJoinPredicates(ctx, joinPredicates, join) + newOp, err := pushJoinPredicates(ctx, joinPredicates, join) + if err != nil { + return nil, nil, err + } + return newOp, rewrite.NewTree("merge routes, but switch sides", newOp), nil } join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner) - return pushJoinPredicates(ctx, joinPredicates, join) + newOp, err := pushJoinPredicates(ctx, joinPredicates, join) + if err != nil { + return nil, nil, err + } + return newOp, rewrite.NewTree("logical join to applyJoin ", newOp), nil } func operatorsToRoutes(a, b ops.Operator) (*Route, *Route) { @@ -514,11 +501,11 @@ func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlp deps := ctx.SemTable.RecursiveDeps(expr) _ = rewrite.Visit(a, func(rel ops.Operator) error { - to, isTableOp := rel.(TableIDIntroducer) + to, isTableOp := rel.(tableIDIntroducer) if !isTableOp { return nil } - id := to.Introduces() + id := to.introducesTableID() if deps.IsSolvedBy(id) { tableInfo, err := ctx.SemTable.TableInfoFor(id) if err != nil { @@ -561,8 +548,9 @@ func unwrapDerivedTables(ctx *plancontext.PlanningContext, exp sqlparser.Expr) s } exp = semantics.RewriteDerivedTableExpression(exp, tbl) - exp = getColName(exp) - if exp == nil { + if col := getColName(exp); col != nil { + exp = col + } else { return nil } } @@ -621,12 +609,12 @@ func gen4ValEqual(ctx *plancontext.PlanningContext, a, b sqlparser.Expr) bool { return ctx.SemTable.DirectDeps(a) == ctx.SemTable.DirectDeps(b) } - case sqlparser.Argument: - b, ok := b.(sqlparser.Argument) + case *sqlparser.Argument: + b, ok := b.(*sqlparser.Argument) if !ok { return false } - return a == b + return a.Name == b.Name case *sqlparser.Literal: b, ok := b.(*sqlparser.Literal) if !ok { @@ -675,7 +663,7 @@ func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr } for _, expr := range exprs { - _, err := AddPredicate(op, ctx, expr, true, newFilter) + _, err := AddPredicate(ctx, op, expr, true, newFilter) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index be23698975e..a1babd90e32 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -17,15 +17,16 @@ limitations under the License. package operators import ( - "golang.org/x/exp/slices" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "slices" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -63,10 +64,10 @@ func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { // Use the Binary vindex, which is the identity function // for keyspace id. routing.RouteOpCode = engine.EqualUnique - vindex, _ := vindexes.NewBinary("binary", nil) + vindex, _ := vindexes.CreateVindex("binary", "binary", nil) routing.Selected = &VindexOption{ Ready: true, - Values: []evalengine.Expr{evalengine.NewLiteralString(vtable.Pinned, collations.TypedCollation{})}, + Values: []evalengine.Expr{evalengine.NewLiteralString(vtable.Pinned, collations.SystemCollation)}, ValueExprs: nil, Predicates: nil, OpCode: engine.EqualUnique, @@ -78,6 +79,10 @@ func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { } for _, columnVindex := range vtable.ColumnVindexes { + // ignore any backfilling vindexes from vindex selection. + if columnVindex.IsBackfilling() { + continue + } routing.VindexPreds = append(routing.VindexPreds, &VindexPlusPredicates{ColVindex: columnVindex, TableID: id}) } return routing @@ -90,7 +95,7 @@ func (tr *ShardedRouting) isScatter() bool { // tryImprove rewrites the predicates for this query to see if we can produce a better plan. // The rewrites are two: // 1. first we turn the predicate a conjunctive normal form - an AND of ORs. -// This can sometimes push a predicate to the top so it's not hiding inside of an OR +// This can sometimes push a predicate to the top, so it's not hiding inside an OR // 2. If that is not enough, an additional rewrite pass is performed where we try to // turn ORs into IN, which is easier for the planner to plan func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) (Routing, error) { @@ -150,7 +155,11 @@ func (tr *ShardedRouting) Clone() Routing { selected = &t } return &ShardedRouting{ - VindexPreds: slices.Clone(tr.VindexPreds), + VindexPreds: slice.Map(tr.VindexPreds, func(from *VindexPlusPredicates) *VindexPlusPredicates { + // we do this to create a copy of the struct + p := *from + return &p + }), Selected: selected, keyspace: tr.keyspace, RouteOpCode: tr.RouteOpCode, @@ -318,7 +327,7 @@ func (tr *ShardedRouting) Cost() int { switch tr.RouteOpCode { case engine.EqualUnique: return 1 - case engine.Equal: + case engine.Equal, engine.SubShard: return 5 case engine.IN: return 10 @@ -360,76 +369,116 @@ func (tr *ShardedRouting) haveMatchingVindex( vfunc func(*vindexes.ColumnVindex) vindexes.Vindex, ) bool { newVindexFound := false + for _, v := range tr.VindexPreds { - // check that the + // Check if the dependency is solved by the table ID. if !ctx.SemTable.DirectDeps(column).IsSolvedBy(v.TableID) { continue } + switch v.ColVindex.Vindex.(type) { case vindexes.SingleColumn: - col := v.ColVindex.Columns[0] - if column.Name.Equal(col) { - // single column vindex - just add the option - routeOpcode := opcode(v.ColVindex) - vindex := vfunc(v.ColVindex) - if vindex == nil || routeOpcode == engine.Scatter { - continue - } - v.Options = append(v.Options, &VindexOption{ - Values: []evalengine.Expr{value}, - ValueExprs: []sqlparser.Expr{valueExpr}, - Predicates: []sqlparser.Expr{node}, - OpCode: routeOpcode, - FoundVindex: vindex, - Cost: costFor(v.ColVindex, routeOpcode), - Ready: true, - }) - newVindexFound = true - } + newVindexFound = tr.processSingleColumnVindex(node, valueExpr, column, value, opcode, vfunc, v, newVindexFound) + case vindexes.MultiColumn: - colLoweredName := "" - indexOfCol := -1 - for idx, col := range v.ColVindex.Columns { - if column.Name.Equal(col) { - colLoweredName = column.Name.Lowered() - indexOfCol = idx - break - } - } - if colLoweredName == "" { - break - } + newVindexFound = tr.processMultiColumnVindex(node, valueExpr, column, value, opcode, vfunc, v, newVindexFound) + } + } - var newOption []*VindexOption - for _, op := range v.Options { - if op.Ready { - continue - } - _, isPresent := op.ColsSeen[colLoweredName] - if isPresent { - continue - } - option := copyOption(op) - optionReady := option.updateWithNewColumn(colLoweredName, valueExpr, indexOfCol, value, node, v.ColVindex, opcode) - if optionReady { - newVindexFound = true - } - newOption = append(newOption, option) - } - v.Options = append(v.Options, newOption...) + return newVindexFound +} - // multi-column vindex - just always add as new option - option := createOption(v.ColVindex, vfunc) - optionReady := option.updateWithNewColumn(colLoweredName, valueExpr, indexOfCol, value, node, v.ColVindex, opcode) - if optionReady { - newVindexFound = true - } - v.Options = append(v.Options, option) +func (tr *ShardedRouting) processSingleColumnVindex( + node sqlparser.Expr, + valueExpr sqlparser.Expr, + column *sqlparser.ColName, + value evalengine.Expr, + opcode func(*vindexes.ColumnVindex) engine.Opcode, + vfunc func(*vindexes.ColumnVindex) vindexes.Vindex, + vindexPlusPredicates *VindexPlusPredicates, + newVindexFound bool, +) bool { + col := vindexPlusPredicates.ColVindex.Columns[0] + if !column.Name.Equal(col) { + return newVindexFound + } + + routeOpcode := opcode(vindexPlusPredicates.ColVindex) + vindex := vfunc(vindexPlusPredicates.ColVindex) + if vindex == nil || routeOpcode == engine.Scatter { + return newVindexFound + } + + vindexPlusPredicates.Options = append(vindexPlusPredicates.Options, &VindexOption{ + Values: []evalengine.Expr{value}, + ValueExprs: []sqlparser.Expr{valueExpr}, + Predicates: []sqlparser.Expr{node}, + OpCode: routeOpcode, + FoundVindex: vindex, + Cost: costFor(vindexPlusPredicates.ColVindex, routeOpcode), + Ready: true, + }) + return true +} + +func (tr *ShardedRouting) processMultiColumnVindex( + node sqlparser.Expr, + valueExpr sqlparser.Expr, + column *sqlparser.ColName, + value evalengine.Expr, + opcode func(*vindexes.ColumnVindex) engine.Opcode, + vfunc func(*vindexes.ColumnVindex) vindexes.Vindex, + v *VindexPlusPredicates, + newVindexFound bool, +) bool { + colLoweredName, indexOfCol := tr.getLoweredNameAndIndex(v.ColVindex, column) + + if colLoweredName == "" { + return newVindexFound + } + + var newOption []*VindexOption + for _, op := range v.Options { + if op.Ready { + continue + } + _, isPresent := op.ColsSeen[colLoweredName] + if isPresent { + continue } + option := copyOption(op) + optionReady := option.updateWithNewColumn(colLoweredName, valueExpr, indexOfCol, value, node, v.ColVindex, opcode) + if optionReady { + newVindexFound = true + } + newOption = append(newOption, option) + } + v.Options = append(v.Options, newOption...) + + // Multi-column vindex - just always add as new option + option := createOption(v.ColVindex, vfunc) + optionReady := option.updateWithNewColumn(colLoweredName, valueExpr, indexOfCol, value, node, v.ColVindex, opcode) + if optionReady { + newVindexFound = true } + v.Options = append(v.Options, option) + return newVindexFound } +func (tr *ShardedRouting) getLoweredNameAndIndex(colVindex *vindexes.ColumnVindex, column *sqlparser.ColName) (string, int) { + colLoweredName := "" + indexOfCol := -1 + for idx, col := range colVindex.Columns { + if column.Name.Equal(col) { + colLoweredName = column.Name.Lowered() + indexOfCol = idx + break + } + } + return colLoweredName, indexOfCol +} + func (tr *ShardedRouting) planEqualOp(ctx *plancontext.PlanningContext, node *sqlparser.ComparisonExpr) bool { column, ok := node.Left.(*sqlparser.ColName) other := node.Right @@ -508,29 +557,6 @@ func (tr *ShardedRouting) hasVindex(column *sqlparser.ColName) bool { return false } -// Reset all vindex predicates on this route and re-build their options from -// the list of seen routing predicates. -func (tr *ShardedRouting) resetRoutingSelections(ctx *plancontext.PlanningContext) error { - tr.RouteOpCode = engine.Scatter - tr.Selected = nil - for i, vp := range tr.VindexPreds { - tr.VindexPreds[i] = &VindexPlusPredicates{ColVindex: vp.ColVindex, TableID: vp.TableID} - } - - var routing Routing = tr - for _, predicate := range tr.SeenPredicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return err - } - } - if routing != tr { - return vterrors.VT13001("uh-oh. we ended up with a different type of routing") - } - return nil -} - func (tr *ShardedRouting) SelectedVindex() vindexes.Vindex { if tr.Selected == nil { return nil @@ -545,7 +571,13 @@ func (tr *ShardedRouting) VindexExpressions() []sqlparser.Expr { return tr.Selected.ValueExprs } -func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, routeB *Route, m merger, joinPredicates []sqlparser.Expr) (ops.Operator, error) { +func tryMergeJoinShardedRouting( + ctx *plancontext.PlanningContext, + routeA *Route, + routeB *Route, + m merger, + joinPredicates []sqlparser.Expr, +) (ops.Operator, error) { sameKeyspace := routeA.Routing.Keyspace() == routeB.Routing.Keyspace() tblA := routeA.Routing.(*ShardedRouting) tblB := routeB.Routing.(*ShardedRouting) @@ -559,7 +591,7 @@ func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, rou aExpr := tblA.VindexExpressions() bExpr := tblB.VindexExpressions() if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) { - return m.mergeTables(tblA, tblB, routeA, routeB) + return m.mergeShardedRouting(tblA, tblB, routeA, routeB) } } @@ -583,7 +615,7 @@ func tryMergeShardedRouting(ctx *plancontext.PlanningContext, routeA *Route, rou if !canMerge { return nil, nil } - return m.mergeTables(tblA, tblB, routeA, routeB) + return m.mergeShardedRouting(tblA, tblB, routeA, routeB) } return nil, nil } @@ -600,14 +632,17 @@ func makeEvalEngineExpr(ctx *plancontext.PlanningContext, n sqlparser.Expr) eval if extractedSubquery == nil { continue } - switch engine.PulloutOpcode(extractedSubquery.OpCode) { - case engine.PulloutIn, engine.PulloutNotIn: + switch popcode.PulloutOpcode(extractedSubquery.OpCode) { + case popcode.PulloutIn, popcode.PulloutNotIn: expr = sqlparser.NewListArg(extractedSubquery.GetArgName()) - case engine.PulloutValue, engine.PulloutExists: + case popcode.PulloutValue, popcode.PulloutExists: expr = sqlparser.NewArgument(extractedSubquery.GetArgName()) } } - ee, _ := evalengine.Translate(expr, ctx.SemTable) + ee, _ := evalengine.Translate(expr, &evalengine.Config{ + Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + }) if ee != nil { return ee } diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index e6dbb2f22ed..8966c30e192 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -58,11 +58,20 @@ func (s *SubQueryInner) Clone(inputs []ops.Operator) ops.Operator { } } +func (s *SubQueryInner) GetOrdering() ([]ops.OrderBy, error) { + return s.Inner.GetOrdering() +} + // Inputs implements the Operator interface func (s *SubQueryInner) Inputs() []ops.Operator { return []ops.Operator{s.Inner} } +// SetInputs implements the Operator interface +func (s *SubQueryInner) SetInputs(ops []ops.Operator) { + s.Inner = ops[0] +} + // Clone implements the Operator interface func (s *SubQuery) Clone(inputs []ops.Operator) ops.Operator { result := &SubQuery{ @@ -78,6 +87,10 @@ func (s *SubQuery) Clone(inputs []ops.Operator) ops.Operator { return result } +func (s *SubQuery) GetOrdering() ([]ops.OrderBy, error) { + return s.Outer.GetOrdering() +} + // Inputs implements the Operator interface func (s *SubQuery) Inputs() []ops.Operator { operators := []ops.Operator{s.Outer} @@ -87,13 +100,18 @@ func (s *SubQuery) Inputs() []ops.Operator { return operators } +// SetInputs implements the Operator interface +func (s *SubQuery) SetInputs(ops []ops.Operator) { + s.Outer = ops[0] +} + func createSubqueryFromStatement(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (*SubQuery, error) { if len(ctx.SemTable.SubqueryMap[stmt]) == 0 { return nil, nil } subq := &SubQuery{} for _, sq := range ctx.SemTable.SubqueryMap[stmt] { - opInner, err := createLogicalOperatorFromAST(ctx, sq.Subquery.Select) + opInner, err := translateQueryToOp(ctx, sq.Subquery.Select) if err != nil { return nil, err } @@ -108,3 +126,11 @@ func createSubqueryFromStatement(ctx *plancontext.PlanningContext, stmt sqlparse } return subq, nil } + +func (s *SubQuery) ShortDescription() string { + return "" +} + +func (s *SubQueryInner) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index 6dd5220e389..61f71024626 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -20,13 +20,14 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery, ts semantics.TableSet) (ops.Operator, rewrite.TreeIdentity, error) { +func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery, ts semantics.TableSet) (ops.Operator, *rewrite.ApplyResult, error) { var unmerged []*SubQueryOp // first loop over the subqueries and try to merge them into the outer plan @@ -43,7 +44,7 @@ func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery, ts semanti } merged, err := tryMergeSubQueryOp(ctx, outer, innerOp, newInner, preds, newSubQueryMerge(ctx, newInner), ts) if err != nil { - return nil, rewrite.SameTree, err + return nil, nil, err } if merged != nil { @@ -61,23 +62,23 @@ func optimizeSubQuery(ctx *plancontext.PlanningContext, op *SubQuery, ts semanti continue } - if inner.ExtractedSubquery.OpCode == int(engine.PulloutExists) { + if inner.ExtractedSubquery.OpCode == int(popcode.PulloutExists) { correlatedTree, err := createCorrelatedSubqueryOp(ctx, innerOp, outer, preds, inner.ExtractedSubquery) if err != nil { - return nil, rewrite.SameTree, err + return nil, nil, err } outer = correlatedTree continue } - return nil, rewrite.SameTree, vterrors.VT12001("cross-shard correlated subquery") + return nil, nil, vterrors.VT12001("cross-shard correlated subquery") } for _, tree := range unmerged { tree.Outer = outer outer = tree } - return outer, rewrite.NewTree, nil + return outer, rewrite.NewTree("merged subqueries", outer), nil } func unresolvedAndSource(ctx *plancontext.PlanningContext, op ops.Operator) ([]sqlparser.Expr, ops.Operator) { @@ -93,65 +94,6 @@ func unresolvedAndSource(ctx *plancontext.PlanningContext, op ops.Operator) ([]s return preds, op } -func mergeSubQueryOp(ctx *plancontext.PlanningContext, outer *Route, inner *Route, subq *SubQueryInner, mergedRouting Routing) (*Route, error) { - subq.ExtractedSubquery.Merged = true - - switch outerRouting := outer.Routing.(type) { - case *ShardedRouting: - return mergeSubQueryFromTableRouting(ctx, outer, inner, outerRouting, subq) - default: - outer.Routing = mergedRouting - } - - outer.MergedWith = append(outer.MergedWith, inner) - - return outer, nil -} - -func mergeSubQueryFromTableRouting( - ctx *plancontext.PlanningContext, - outer, inner *Route, - outerRouting *ShardedRouting, - subq *SubQueryInner, -) (*Route, error) { - // When merging an inner query with its outer query, we can remove the - // inner query from the list of predicates that can influence routing of - // the outer query. - // - // Note that not all inner queries necessarily are part of the routing - // predicates list, so this might be a no-op. - subQueryWasPredicate := false - for i, predicate := range outerRouting.SeenPredicates { - if ctx.SemTable.EqualsExpr(predicate, subq.ExtractedSubquery) { - outerRouting.SeenPredicates = append(outerRouting.SeenPredicates[:i], outerRouting.SeenPredicates[i+1:]...) - - subQueryWasPredicate = true - - // The `ExtractedSubquery` of an inner query is unique (due to the uniqueness of bind variable names) - // so we can stop after the first match. - break - } - } - - err := outerRouting.resetRoutingSelections(ctx) - if err != nil { - return nil, err - } - - if subQueryWasPredicate { - if innerTR, isTR := inner.Routing.(*ShardedRouting); isTR { - // Copy Vindex predicates from the inner route to the upper route. - // If we can route based on some of these predicates, the routing can improve - outerRouting.VindexPreds = append(outerRouting.VindexPreds, innerTR.VindexPreds...) - } - - if inner.Routing.OpCode() == engine.None { - outer.Routing = &NoneRouting{keyspace: outerRouting.keyspace} - } - } - return outer, nil -} - func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool { validVindex := func(expr sqlparser.Expr) bool { sc := findColumnVindex(ctx, op, expr) @@ -241,7 +183,7 @@ func tryMergeSubqueryWithRoute( return nil, nil } - merged, err := Merge(ctx, outerOp, subq, joinPredicates, merger) + merged, err := mergeJoinInputs(ctx, outerOp, subq, joinPredicates, merger) if err != nil { return nil, err } @@ -268,7 +210,7 @@ func tryMergeSubqueryWithRoute( if !ok { return nil, nil } - merged, err := merger.mergeTables(outerRouting, innerRouting, outerOp, subqueryRoute) + merged, err := merger.mergeShardedRouting(outerRouting, innerRouting, outerOp, subqueryRoute) mergedRouting := merged.Routing.(*ShardedRouting) mergedRouting.PickBestAvailableVindex() return merged, err @@ -284,7 +226,7 @@ func tryMergeSubqueryWithJoin( merger merger, subQueryInner *SubQueryInner, lhs semantics.TableSet, // these are the tables made available because we are on the RHS of a join -) (ops.PhysicalOperator, error) { +) (ops.Operator, error) { // Trying to merge the subquery with the left-hand or right-hand side of the join if outerOp.LeftJoin { @@ -336,7 +278,6 @@ func rewriteColumnsInSubqueryOpForJoin( outerTree *ApplyJoin, subQueryInner *SubQueryInner, ) (ops.Operator, error) { - resultInnerOp := innerOp var rewriteError error // go over the entire expression in the subquery sqlparser.SafeRewrite(subQueryInner.ExtractedSubquery.Original, nil, func(cursor *sqlparser.Cursor) bool { @@ -346,36 +287,39 @@ func rewriteColumnsInSubqueryOpForJoin( } // check whether the column name belongs to the other side of the join tree - if !ctx.SemTable.RecursiveDeps(node).IsSolvedBy(TableID(resultInnerOp)) { + if !ctx.SemTable.RecursiveDeps(node).IsSolvedBy(TableID(innerOp)) { return true } // get the bindVariable for that column name and replace it in the subquery - bindVar := ctx.ReservedVars.ReserveColName(node) - cursor.Replace(sqlparser.NewArgument(bindVar)) + typ, _, _ := ctx.SemTable.TypeForExpr(node) + bindVar := ctx.GetArgumentFor(node, func() string { + return ctx.ReservedVars.ReserveColName(node) + }) + cursor.Replace(sqlparser.NewTypedArgument(bindVar, typ)) // check whether the bindVariable already exists in the joinVars of the other tree _, alreadyExists := outerTree.Vars[bindVar] if alreadyExists { return true } // if it does not exist, then push this as an output column there and add it to the joinVars - offset, err := resultInnerOp.AddColumn(ctx, node) + offsets, err := innerOp.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(node)}) if err != nil { rewriteError = err return false } - outerTree.Vars[bindVar] = offset + outerTree.Vars[bindVar] = offsets[0] return true }) // update the dependencies for the subquery by removing the dependencies from the innerOp tableSet := ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] - ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp)) + ctx.SemTable.Direct[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(innerOp)) tableSet = ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] - ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(resultInnerOp)) + ctx.SemTable.Recursive[subQueryInner.ExtractedSubquery.Subquery] = tableSet.Remove(TableID(innerOp)) // return any error while rewriting - return resultInnerOp, rewriteError + return innerOp, rewriteError } func createCorrelatedSubqueryOp( @@ -389,7 +333,6 @@ func createCorrelatedSubqueryOp( return nil, vterrors.VT12001("EXISTS sub-queries are only supported with AND clause") } - resultOuterOp := newOuter vars := map[string]int{} bindVars := map[*sqlparser.ColName]string{} var lhsCols []*sqlparser.ColName @@ -402,7 +345,7 @@ func createCorrelatedSubqueryOp( } nodeDeps := ctx.SemTable.RecursiveDeps(node) - if !nodeDeps.IsSolvedBy(TableID(resultOuterOp)) { + if !nodeDeps.IsSolvedBy(TableID(newOuter)) { return true } @@ -410,26 +353,27 @@ func createCorrelatedSubqueryOp( // we do so by checking that the column names are the same and their recursive dependencies are the same // so the column names `user.a` and `a` would be considered equal as long as both are bound to the same table for colName, bindVar := range bindVars { - if ctx.SemTable.EqualsExpr(node, colName) { + if ctx.SemTable.EqualsExprWithDeps(node, colName) { cursor.Replace(sqlparser.NewArgument(bindVar)) return true } } // get the bindVariable for that column name and replace it in the predicate + typ, _, _ := ctx.SemTable.TypeForExpr(node) bindVar := ctx.ReservedVars.ReserveColName(node) - cursor.Replace(sqlparser.NewArgument(bindVar)) + cursor.Replace(sqlparser.NewTypedArgument(bindVar, typ)) // store it in the map for future comparisons bindVars[node] = bindVar // if it does not exist, then push this as an output column in the outerOp and add it to the joinVars - offset, err := resultOuterOp.AddColumn(ctx, node) + offsets, err := newOuter.AddColumns(ctx, true, []bool{false}, []*sqlparser.AliasedExpr{aeWrap(node)}) if err != nil { rewriteError = err return true } lhsCols = append(lhsCols, node) - vars[bindVar] = offset + vars[bindVar] = offsets[0] return true }) if rewriteError != nil { @@ -442,7 +386,7 @@ func createCorrelatedSubqueryOp( } } return &CorrelatedSubQueryOp{ - Outer: resultOuterOp, + Outer: newOuter, Inner: innerOp, Extracted: extractedSubquery, Vars: vars, @@ -456,7 +400,7 @@ func createCorrelatedSubqueryOp( func canMergeSubqueryOnColumnSelection(ctx *plancontext.PlanningContext, a, b *Route, predicate *sqlparser.ExtractedSubquery) bool { left := predicate.OtherSide opCode := predicate.OpCode - if opCode != int(engine.PulloutValue) && opCode != int(engine.PulloutIn) { + if opCode != int(popcode.PulloutValue) && opCode != int(popcode.PulloutIn) { return false } diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index 593dfe3ec7a..94ae634a0bf 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -17,6 +17,9 @@ limitations under the License. package operators import ( + "fmt" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" @@ -34,16 +37,11 @@ type ( noInputs } ColNameColumns interface { - GetColumns() []*sqlparser.ColName + GetColNames() []*sqlparser.ColName AddCol(*sqlparser.ColName) } ) -var _ ops.PhysicalOperator = (*Table)(nil) - -// IPhysical implements the PhysicalOperator interface -func (to *Table) IPhysical() {} - // Clone implements the Operator interface func (to *Table) Clone([]ops.Operator) ops.Operator { var columns []*sqlparser.ColName @@ -58,7 +56,7 @@ func (to *Table) Clone([]ops.Operator) ops.Operator { } // Introduces implements the PhysicalOperator interface -func (to *Table) Introduces() semantics.TableSet { +func (to *Table) introducesTableID() semantics.TableSet { return to.QTable.ID } @@ -67,11 +65,38 @@ func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Exp return newFilter(to, expr), nil } -func (to *Table) AddColumn(_ *plancontext.PlanningContext, e sqlparser.Expr) (int, error) { - return addColumn(to, e) +func (to *Table) AddColumns(*plancontext.PlanningContext, bool, []bool, []*sqlparser.AliasedExpr) ([]int, error) { + return nil, vterrors.VT13001("did not expect this method to be called") +} + +func (to *Table) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + colToFind, ok := expr.(*sqlparser.ColName) + if !ok { + return -1, nil + } + + for idx, colName := range to.Columns { + if colName.Name.Equal(colToFind.Name) { + return idx, nil + } + } + + return -1, nil +} + +func (to *Table) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(to.Columns, colNameToExpr), nil } -func (to *Table) GetColumns() []*sqlparser.ColName { +func (to *Table) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, to) +} + +func (to *Table) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +func (to *Table) GetColNames() []*sqlparser.ColName { return to.Columns } func (to *Table) AddCol(col *sqlparser.ColName) { @@ -85,18 +110,22 @@ func (to *Table) TablesUsed() []string { return SingleQualifiedIdentifier(to.VTable.Keyspace, to.VTable.Name) } -func addColumn(op ColNameColumns, e sqlparser.Expr) (int, error) { +func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser.Expr) (int, error) { col, ok := e.(*sqlparser.ColName) if !ok { - return 0, vterrors.VT13001("cannot push this expression to a table/vindex") + return 0, vterrors.VT12001(fmt.Sprintf("cannot add '%s' expression to a table/vindex", sqlparser.String(e))) } - cols := op.GetColumns() - for idx, column := range cols { - if col.Name.Equal(column.Name) { - return idx, nil - } + sqlparser.RemoveKeyspaceFromColName(col) + cols := op.GetColNames() + colAsExpr := func(c *sqlparser.ColName) sqlparser.Expr { return c } + if offset, found := canReuseColumn(ctx, cols, e, colAsExpr); found { + return offset, nil } offset := len(cols) op.AddCol(col) return offset, nil } + +func (to *Table) ShortDescription() string { + return to.VTable.String() +} diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go index ecfa2d366a8..f6258ebee2d 100644 --- a/go/vt/vtgate/planbuilder/operators/union.go +++ b/go/vt/vtgate/planbuilder/operators/union.go @@ -17,40 +17,61 @@ limitations under the License. package operators import ( + "fmt" + "slices" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Union struct { - Sources []ops.Operator - Distinct bool + Sources []ops.Operator - // TODO this should be removed. For now it's used to fail queries - Ordering sqlparser.OrderBy + // These are the select expressions coming from each source + Selects []sqlparser.SelectExprs + distinct bool - noColumns + unionColumns sqlparser.SelectExprs + unionColumnsAsAlisedExprs []*sqlparser.AliasedExpr } -var _ ops.PhysicalOperator = (*Union)(nil) - -// IPhysical implements the PhysicalOperator interface -func (u *Union) IPhysical() {} +func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { + if columns == nil { + panic("rt") + } + return &Union{ + Sources: srcs, + Selects: sourceSelects, + distinct: distinct, + unionColumns: columns, + } +} // Clone implements the Operator interface func (u *Union) Clone(inputs []ops.Operator) ops.Operator { newOp := *u newOp.Sources = inputs + newOp.Selects = slices.Clone(u.Selects) return &newOp } +func (u *Union) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + // Inputs implements the Operator interface func (u *Union) Inputs() []ops.Operator { return u.Sources } +// SetInputs implements the Operator interface +func (u *Union) SetInputs(ops []ops.Operator) { + u.Sources = ops +} + // AddPredicate adds a predicate a UNION by pushing the predicate to all sources of the UNION. /* this is done by offset and expression rewriting. Say we have a query like so: select * ( @@ -83,16 +104,33 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex if !ok { return nil, vterrors.VT12001("pushing predicates on UNION where the first SELECT contains * or NEXT") } - if !ae.As.IsEmpty() { - offsets[ae.As.String()] = i - continue - } - col, ok := ae.Expr.(*sqlparser.ColName) - if ok { - offsets[col.Name.Lowered()] = i + offsets[ae.ColumnName()] = i + } + + needsFilter, exprPerSource, err := u.predicatePerSource(expr, offsets) + if err != nil { + return nil, err + } + if needsFilter { + return &Filter{ + Source: u, + Predicates: []sqlparser.Expr{expr}, + }, nil + } + + for i, src := range u.Sources { + u.Sources[i], err = src.AddPredicate(ctx, exprPerSource[i]) + if err != nil { + return nil, err } } + return u, nil +} + +func (u *Union) predicatePerSource(expr sqlparser.Expr, offsets map[string]int) (bool, []sqlparser.Expr, error) { + needsFilter := false + exprPerSource := make([]sqlparser.Expr, len(u.Sources)) for i := range u.Sources { var err error predicate := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { @@ -103,7 +141,7 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex idx, ok := offsets[col.Name.Lowered()] if !ok { - err = vterrors.VT13001("cannot push predicates on concatenate, missing columns from the UNION") + needsFilter = true cursor.StopTreeWalk() return } @@ -117,22 +155,18 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex ae, ok := sel.SelectExprs[idx].(*sqlparser.AliasedExpr) if !ok { - err = vterrors.VT12001("pushing non-aliased expression predicates on concatenate") + err = vterrors.VT09015() cursor.StopTreeWalk() return } cursor.Replace(ae.Expr) }, nil).(sqlparser.Expr) - if err != nil { - return nil, err - } - u.Sources[i], err = u.Sources[i].AddPredicate(ctx, predicate) - if err != nil { - return nil, err + if err != nil || needsFilter { + return needsFilter, nil, err } + exprPerSource[i] = predicate } - - return u, nil + return needsFilter, exprPerSource, nil } func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { @@ -140,7 +174,7 @@ func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { for { switch op := src.(type) { case *Horizon: - return sqlparser.GetFirstSelect(op.Select), nil + return sqlparser.GetFirstSelect(op.Query), nil case *Route: src = op.Source default: @@ -149,43 +183,150 @@ func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { } } -func (u *Union) Compact(*plancontext.PlanningContext) (ops.Operator, rewrite.TreeIdentity, error) { - var newSources []ops.Operator - anythingChanged := false - for _, source := range u.Sources { - var other *Union - horizon, ok := source.(*Horizon) - if ok { - union, ok := horizon.Source.(*Union) - if ok { - other = union +func (u *Union) AddColumns(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + cols, err := u.GetColumns(ctx) + if err != nil { + return nil, err + } + for i, ae := range exprs { + if reuse { + offset, err := u.FindCol(ctx, ae.Expr, false) + if err != nil { + return nil, err + } + + if offset >= 0 { + offsets[i] = offset + continue } } - if other == nil { - newSources = append(newSources, source) - continue - } - anythingChanged = true - switch { - case len(other.Ordering) == 0 && !other.Distinct: - fallthrough - case u.Distinct: - // if the current UNION is a DISTINCT, we can safely ignore everything from children UNIONs, except LIMIT - newSources = append(newSources, other.Sources...) + switch e := ae.Expr.(type) { + case *sqlparser.ColName: + // here we deal with pure column access on top of the union + offset := slices.IndexFunc(cols, func(expr *sqlparser.AliasedExpr) bool { + return e.Name.EqualString(expr.ColumnName()) + }) + if offset == -1 { + return nil, vterrors.VT13001(fmt.Sprintf("could not find the column '%s' on the UNION", sqlparser.String(e))) + } + offsets[i] = offset + case *sqlparser.WeightStringFuncExpr: + wsArg := e.Expr + argIdx := slices.IndexFunc(cols, func(expr *sqlparser.AliasedExpr) bool { + return ctx.SemTable.EqualsExprWithDeps(wsArg, expr.Expr) + }) + + if argIdx == -1 { + return nil, vterrors.VT13001(fmt.Sprintf("could not find the argument to the weight_string function: %s", sqlparser.String(wsArg))) + } + + outputOffset, err := u.addWeightStringToOffset(ctx, argIdx, addToGroupBy[i]) + if err != nil { + return nil, err + } + + offsets[i] = outputOffset default: - newSources = append(newSources, other) + return nil, vterrors.VT13001(fmt.Sprintf("only weight_string function is expected - got %s", sqlparser.String(ae))) } } - if anythingChanged { - u.Sources = newSources + + return offsets, nil +} + +func (u *Union) addWeightStringToOffset(ctx *plancontext.PlanningContext, argIdx int, addToGroupBy bool) (outputOffset int, err error) { + for i, src := range u.Sources { + exprs := u.Selects[i] + selectExpr := exprs[argIdx] + ae, ok := selectExpr.(*sqlparser.AliasedExpr) + if !ok { + return 0, vterrors.VT09015() + } + offsets, err := src.AddColumns(ctx, false, []bool{addToGroupBy}, []*sqlparser.AliasedExpr{aeWrap(weightStringFor(ae.Expr))}) + if err != nil { + return 0, err + } + thisOffset := offsets[0] + // all offsets for the newly added ws need to line up + if i == 0 { + outputOffset = thisOffset + } else { + if thisOffset != outputOffset { + return 0, vterrors.VT12001("weight_string offsets did not line up for UNION") + } + } } - identity := rewrite.SameTree - if anythingChanged { - identity = rewrite.NewTree + return +} + +func (u *Union) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + columns, err := u.GetColumns(ctx) + if err != nil { + return 0, err + } + + for idx, col := range columns { + if ctx.SemTable.EqualsExprWithDeps(expr, col.Expr) { + return idx, nil + } } - return u, identity, nil + return -1, nil +} + +func (u *Union) GetColumns(ctx *plancontext.PlanningContext) (result []*sqlparser.AliasedExpr, err error) { + if u.unionColumnsAsAlisedExprs == nil { + allOk := true + u.unionColumnsAsAlisedExprs = slice.Map(u.unionColumns, func(from sqlparser.SelectExpr) *sqlparser.AliasedExpr { + expr, ok := from.(*sqlparser.AliasedExpr) + allOk = allOk && ok + return expr + }) + if !allOk { + return nil, vterrors.VT09015() + } + } + + // if any of the inputs has more columns that we expect, we want to show on top of UNION, so the results can + // be truncated to the expected result columns and nothing else + for _, src := range u.Sources { + columns, err := src.GetColumns(ctx) + if err != nil { + return nil, err + } + + for len(columns) > len(u.unionColumnsAsAlisedExprs) { + u.unionColumnsAsAlisedExprs = append(u.unionColumnsAsAlisedExprs, aeWrap(sqlparser.NewIntLiteral("0"))) + } + } + + return u.unionColumnsAsAlisedExprs, nil +} + +func (u *Union) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + // if any of the inputs has more columns that we expect, we want to show on top of UNION, so the results can + // be truncated to the expected result columns and nothing else + for _, src := range u.Sources { + columns, err := src.GetSelectExprs(ctx) + if err != nil { + return nil, err + } + + for len(columns) > len(u.unionColumns) { + u.unionColumns = append(u.unionColumns, aeWrap(sqlparser.NewIntLiteral("0"))) + } + } + + return u.unionColumns, nil } func (u *Union) NoLHSTableSet() {} + +func (u *Union) ShortDescription() string { + if u.distinct { + return "DISTINCT" + } + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go new file mode 100644 index 00000000000..4c8b02f76d8 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -0,0 +1,259 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// mergeUnionInputInAnyOrder merges sources the sources of the union in any order +// can be used for UNION DISTINCT +func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { + sources := op.Sources + selects := op.Selects + + // next we'll go over all the plans from and check if any two can be merged. if they can, they are merged, + // and we continue checking for pairs of plans that can be merged into a single route + idx := 0 + for idx < len(sources) { + keep := make([]bool, len(sources)) + srcA := sources[idx] + merged := false + for j, srcB := range sources { + if j <= idx { + continue + } + selA := selects[idx] + selB := selects[j] + newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) + if err != nil { + return nil, nil, err + } + if newPlan != nil { + sources[idx] = newPlan + selects[idx] = sel + srcA = newPlan + merged = true + } else { + keep[j] = true + } + } + if !merged { + return sources, selects, nil + } + + var newSources []ops.Operator + var newSelects []sqlparser.SelectExprs + for i, source := range sources { + if keep[i] || i <= idx { + newSources = append(newSources, source) + newSelects = append(newSelects, selects[i]) + } + } + idx++ + sources = newSources + selects = newSelects + } + + return sources, selects, nil +} + +func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { + sources := op.Sources + selects := op.Selects + for { + merged := false + for i := 0; i < len(sources)-1; i++ { + j := i + 1 + srcA, selA := sources[i], selects[i] + srcB, selB := sources[j], selects[j] + newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) + if err != nil { + return nil, nil, err + } + if newPlan != nil { + sources[i] = newPlan + selects[i] = sel + merged = true + sources = append(sources[:i+1], sources[j+1:]...) + selects = append(selects[:i+1], selects[j+1:]...) + } + } + if !merged { + break + } + } + + return sources, selects, nil +} + +// mergeUnionInputs checks whether two operators can be merged into a single one. +// If they can be merged, a new operator with the merged routing is returned +// If they cannot be merged, nil is returned. +// this function is very similar to mergeJoinInputs +func mergeUnionInputs( + ctx *plancontext.PlanningContext, + lhs, rhs ops.Operator, + lhsExprs, rhsExprs sqlparser.SelectExprs, + distinct bool, +) (ops.Operator, sqlparser.SelectExprs, error) { + lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) + if lhsRoute == nil { + return nil, nil, nil + } + + switch { + // if either side is a dual query, we can always merge them together + // an unsharded/reference route can be merged with anything going to that keyspace + case b == dual || (b == anyShard && sameKeyspace): + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) + case a == dual || (a == anyShard && sameKeyspace): + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingB) + + case a == none: + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingB) + case b == none: + return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) + + case a == sharded && b == sharded && sameKeyspace: + res, exprs, err := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) + if err != nil || res != nil { + return res, exprs, err + } + } + return nil, nil, nil +} + +func tryMergeUnionShardedRouting( + ctx *plancontext.PlanningContext, + routeA, routeB *Route, + exprsA, exprsB sqlparser.SelectExprs, + distinct bool, +) (ops.Operator, sqlparser.SelectExprs, error) { + tblA := routeA.Routing.(*ShardedRouting) + tblB := routeB.Routing.(*ShardedRouting) + + scatterA := tblA.RouteOpCode == engine.Scatter + scatterB := tblB.RouteOpCode == engine.Scatter + uniqueA := tblA.RouteOpCode == engine.EqualUnique + uniqueB := tblB.RouteOpCode == engine.EqualUnique + + switch { + case scatterA: + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblA) + + case scatterB: + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblB) + + case uniqueA && uniqueB: + aVdx := tblA.SelectedVindex() + bVdx := tblB.SelectedVindex() + aExpr := tblA.VindexExpressions() + bExpr := tblB.VindexExpressions() + if aVdx == bVdx && gen4ValuesEqual(ctx, aExpr, bExpr) { + return createMergedUnion(ctx, routeA, routeB, exprsA, exprsB, distinct, tblA) + } + } + + return nil, nil, nil +} + +func createMergedUnion( + ctx *plancontext.PlanningContext, + lhsRoute, rhsRoute *Route, + lhsExprs, rhsExprs sqlparser.SelectExprs, + distinct bool, + routing Routing) (ops.Operator, sqlparser.SelectExprs, error) { + + // if there are `*` on either side, or a different number of SelectExpr items, + // we give up aligning the expressions and trust that we can push everything down + cols := make(sqlparser.SelectExprs, len(lhsExprs)) + noDeps := len(lhsExprs) != len(rhsExprs) + for idx, col := range lhsExprs { + ae, ok := col.(*sqlparser.AliasedExpr) + if !ok { + cols[idx] = col + noDeps = true + continue + } + col := sqlparser.NewColName(ae.ColumnName()) + cols[idx] = aeWrap(col) + if noDeps { + continue + } + + deps := ctx.SemTable.RecursiveDeps(ae.Expr) + ae, ok = rhsExprs[idx].(*sqlparser.AliasedExpr) + if !ok { + noDeps = true + continue + } + deps = deps.Merge(ctx.SemTable.RecursiveDeps(ae.Expr)) + ctx.SemTable.Recursive[col] = deps + } + + union := newUnion([]ops.Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) + selectExprs := unionSelects(lhsExprs) + return &Route{ + Source: union, + MergedWith: []*Route{rhsRoute}, + Routing: routing, + }, selectExprs, nil +} + +func compactUnion(u *Union) *rewrite.ApplyResult { + if u.distinct { + // first we remove unnecessary DISTINCTs + for idx, source := range u.Sources { + d, ok := source.(*Distinct) + if !ok || !d.Required { + continue + } + u.Sources[idx] = d.Source + } + } + + var newSources []ops.Operator + var newSelects []sqlparser.SelectExprs + merged := false + + for idx, source := range u.Sources { + other, ok := source.(*Union) + + if ok && (u.distinct || !other.distinct) { + newSources = append(newSources, other.Sources...) + newSelects = append(newSelects, other.Selects...) + merged = true + continue + } + + newSources = append(newSources, source) + newSelects = append(newSelects, u.Selects[idx]) + } + + if !merged { + return rewrite.SameTree + } + + u.Sources = newSources + u.Selects = newSelects + return rewrite.NewTree("merged UNIONs", u) +} diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index 11c46a326a4..8a16b97117e 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -17,9 +17,12 @@ limitations under the License. package operators import ( + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -37,18 +40,13 @@ type Update struct { noPredicates } -var _ ops.PhysicalOperator = (*Update)(nil) - // Introduces implements the PhysicalOperator interface -func (u *Update) Introduces() semantics.TableSet { +func (u *Update) introducesTableID() semantics.TableSet { return u.QTable.ID } -// IPhysical implements the PhysicalOperator interface -func (u *Update) IPhysical() {} - // Clone implements the Operator interface -func (u *Update) Clone(inputs []ops.Operator) ops.Operator { +func (u *Update) Clone([]ops.Operator) ops.Operator { return &Update{ QTable: u.QTable, VTable: u.VTable, @@ -59,9 +57,580 @@ func (u *Update) Clone(inputs []ops.Operator) ops.Operator { } } +func (u *Update) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + func (u *Update) TablesUsed() []string { if u.VTable != nil { return SingleQualifiedIdentifier(u.VTable.Keyspace, u.VTable.Name) } return nil } + +func (u *Update) ShortDescription() string { + return u.VTable.String() +} + +func (u *Update) Statement() sqlparser.Statement { + return u.AST +} + +func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { + tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) + if err != nil { + return nil, err + } + + vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") + if err != nil { + return nil, err + } + + updClone := sqlparser.CloneRefOfUpdate(updStmt) + updOp, err := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) + if err != nil { + return nil, err + } + + ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + if err != nil { + return nil, err + } + // Unmanaged foreign-key-mode, we don't need to do anything. + if ksMode != vschemapb.Keyspace_FK_MANAGED { + return updOp, nil + } + + parentFks, childFks := getFKRequirementsForUpdate(ctx, updStmt.Exprs, vindexTable) + if len(childFks) == 0 && len(parentFks) == 0 { + return updOp, nil + } + + // If the delete statement has a limit, we don't support it yet. + if updStmt.Limit != nil { + return nil, vterrors.VT12001("update with limit with foreign key constraints") + } + + return buildFkOperator(ctx, updOp, updClone, parentFks, childFks, vindexTable) +} + +func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) (ops.Operator, error) { + assignments := make(map[string]sqlparser.Expr) + for _, set := range updStmt.Exprs { + assignments[set.Name.Name.String()] = set.Expr + } + + vp, cvv, ovq, err := getUpdateVindexInformation(updStmt, vindexTable, qt.ID, qt.Predicates) + if err != nil { + return nil, err + } + + tr, ok := routing.(*ShardedRouting) + if ok { + tr.VindexPreds = vp + } + + for _, predicate := range qt.Predicates { + routing, err = UpdateRoutingLogic(ctx, predicate, routing) + if err != nil { + return nil, err + } + } + + if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { + // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) + return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + } + + r := &Route{ + Source: &Update{ + QTable: qt, + VTable: vindexTable, + Assignments: assignments, + ChangedVindexValues: cvv, + OwnedVindexQuery: ovq, + AST: updStmt, + }, + Routing: routing, + } + + subq, err := createSubqueryFromStatement(ctx, updStmt) + if err != nil { + return nil, err + } + if subq == nil { + return r, nil + } + subq.Outer = r + return subq, nil +} + +// getFKRequirementsForUpdate analyzes update expressions to determine which foreign key constraints needs management at the VTGate. +// It identifies parent and child foreign keys that require verification or cascade operations due to column updates. +func getFKRequirementsForUpdate(ctx *plancontext.PlanningContext, updateExprs sqlparser.UpdateExprs, vindexTable *vindexes.Table) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + parentFks := vindexTable.ParentFKsNeedsHandling(ctx.VerifyAllFKs, ctx.ParentFKToIgnore) + childFks := vindexTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) == 0 && len(parentFks) == 0 { + return nil, nil + } + + pFksRequired := make([]bool, len(parentFks)) + cFksRequired := make([]bool, len(childFks)) + // Go over all the update expressions + for _, updateExpr := range updateExprs { + // Any foreign key to a child table for a column that has been updated + // will require the cascade operations or restrict verification to happen, so we include all such foreign keys. + for idx, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + cFksRequired[idx] = true + } + } + // If we are setting a column to NULL, then we don't need to verify the existance of an + // equivalent row in the parent table, even if this column was part of a foreign key to a parent table. + if sqlparser.IsNull(updateExpr.Expr) { + continue + } + // We add all the possible parent foreign key constraints that need verification that an equivalent row + // exists, given that this column has changed. + for idx, parentFk := range parentFks { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[idx] = true + } + } + } + // For the parent foreign keys, if any of the columns part of the fk is set to NULL, + // then, we don't care for the existance of an equivalent row in the parent table. + for idx, parentFk := range parentFks { + for _, updateExpr := range updateExprs { + if !sqlparser.IsNull(updateExpr.Expr) { + continue + } + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[idx] = false + } + } + } + // Get the filtered lists and return them. + var pFksNeedsHandling []vindexes.ParentFKInfo + var cFksNeedsHandling []vindexes.ChildFKInfo + for idx, parentFk := range parentFks { + if pFksRequired[idx] { + pFksNeedsHandling = append(pFksNeedsHandling, parentFk) + } + } + for idx, childFk := range childFks { + if cFksRequired[idx] { + cFksNeedsHandling = append(cFksNeedsHandling, childFk) + } + } + return pFksNeedsHandling, cFksNeedsHandling +} + +func buildFkOperator(ctx *plancontext.PlanningContext, updOp ops.Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { + // We only support simple expressions in update queries for foreign key handling. + if isNonLiteral(updClone.Exprs, parentFks, childFks) { + return nil, vterrors.VT12001("update expression with non-literal values with foreign key constraints") + } + + restrictChildFks, cascadeChildFks := splitChildFks(childFks) + + op, err := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) + if err != nil { + return nil, err + } + + return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks) +} + +func isNonLiteral(updExprs sqlparser.UpdateExprs, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo) bool { + for _, updateExpr := range updExprs { + if sqlparser.IsLiteral(updateExpr.Expr) { + continue + } + for _, parentFk := range parentFks { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + } + return false +} + +// splitChildFks splits the child foreign keys into restrict and cascade list as restrict is handled through Verify operator and cascade is handled through Cascade operator. +func splitChildFks(fks []vindexes.ChildFKInfo) (restrictChildFks, cascadeChildFks []vindexes.ChildFKInfo) { + for _, fk := range fks { + // Any RESTRICT type foreign keys that arrive here for 2 reasons— + // 1. cross-shard/cross-keyspace RESTRICT cases. + // 2. shard-scoped/unsharded RESTRICT cases arising because we have to validate all the foreign keys on VTGate. + if fk.OnUpdate.IsRestrict() { + // For RESTRICT foreign keys, we need to verify that there are no child rows corresponding to the rows being updated. + // This is done using a FkVerify Operator. + restrictChildFks = append(restrictChildFks, fk) + } else { + // For all the other foreign keys like CASCADE, SET NULL, we have to cascade the update to the children, + // This is done by using a FkCascade Operator. + cascadeChildFks = append(cascadeChildFks, fk) + } + } + return +} + +func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { + if len(childFks) == 0 { + return parentOp, nil + } + + var fkChildren []*FkChild + var selectExprs []sqlparser.SelectExpr + + for _, fk := range childFks { + // We should have already filtered out update restrict foreign keys. + if fk.OnUpdate.IsRestrict() { + return nil, vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered") + } + + // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. + cols, exprs := selectParentColumns(fk, len(selectExprs)) + selectExprs = append(selectExprs, exprs...) + + fkChild, err := createFkChildForUpdate(ctx, fk, updStmt, cols, updatedTable) + if err != nil { + return nil, err + } + fkChildren = append(fkChildren, fkChild) + } + + selectionOp, err := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, nil, sqlparser.ForUpdateLock) + if err != nil { + return nil, err + } + + return &FkCascade{ + Selection: selectionOp, + Children: fkChildren, + Parent: parentOp, + }, nil +} + +// createFkChildForUpdate creates the update query operator for the child table based on the foreign key constraints. +func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, cols []int, updatedTable *vindexes.Table) (*FkChild, error) { + // Create a ValTuple of child column names + var valTuple sqlparser.ValTuple + for _, column := range fk.ChildColumns { + valTuple = append(valTuple, sqlparser.NewColName(column.String())) + } + + // Reserve a bind variable name + bvName := ctx.ReservedVars.ReserveVariable(foriegnKeyContraintValues) + // Create a comparison expression for WHERE clause + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) + var childWhereExpr sqlparser.Expr = compExpr + + var childOp ops.Operator + var err error + switch fk.OnUpdate { + case sqlparser.Cascade: + childOp, err = buildChildUpdOpForCascade(ctx, fk, updStmt, childWhereExpr, updatedTable) + case sqlparser.SetNull: + childOp, err = buildChildUpdOpForSetNull(ctx, fk, updStmt, childWhereExpr, valTuple) + case sqlparser.SetDefault: + return nil, vterrors.VT09016() + } + if err != nil { + return nil, err + } + + return &FkChild{ + BVName: bvName, + Cols: cols, + Op: childOp, + }, nil +} + +// buildChildUpdOpForCascade builds the child update statement operator for the CASCADE type foreign key constraint. +// The query looks like this - +// +// `UPDATE SET WHERE IN ()` +func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr, updatedTable *vindexes.Table) (ops.Operator, error) { + // The update expressions are the same as the update expressions in the parent update query + // with the column names replaced with the child column names. + var childUpdateExprs sqlparser.UpdateExprs + for _, updateExpr := range updStmt.Exprs { + colIdx := fk.ParentColumns.FindColumn(updateExpr.Name.Name) + if colIdx == -1 { + continue + } + + // The where condition is the same as the comparison expression above + // with the column names replaced with the child column names. + childUpdateExprs = append(childUpdateExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(fk.ChildColumns[colIdx].String()), + Expr: updateExpr.Expr, + }) + } + // Because we could be updating the child to a non-null value, + // We have to run with foreign key checks OFF because the parent isn't guaranteed to have + // the data being updated to. + parsedComments := sqlparser.Comments{ + "/*+ SET_VAR(foreign_key_checks=OFF) */", + }.Parsed() + childUpdStmt := &sqlparser.Update{ + Comments: parsedComments, + Exprs: childUpdateExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: childWhereExpr}, + } + // Since we are running the child update with foreign key checks turned off, + // we need to verify the validity of the remaining foreign keys on VTGate, + // while specifically ignoring the parent foreign key in question. + return createOpFromStmt(ctx, childUpdStmt, true, fk.String(updatedTable)) + +} + +// buildChildUpdOpForSetNull builds the child update statement operator for the SET NULL type foreign key constraint. +// The query looks like this - +// +// `UPDATE SET +// WHERE IN () +// [AND NOT IN ()]` +func buildChildUpdOpForSetNull(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr, valTuple sqlparser.ValTuple) (ops.Operator, error) { + // For the SET NULL type constraint, we need to set all the child columns to NULL. + var childUpdateExprs sqlparser.UpdateExprs + for _, column := range fk.ChildColumns { + childUpdateExprs = append(childUpdateExprs, &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName(column.String()), + Expr: &sqlparser.NullVal{}, + }) + } + + // SET NULL cascade should be avoided for the case where the parent columns remains unchanged on the update. + // We need to add a condition to the where clause to handle this case. + // The additional condition looks like [AND NOT IN ()]. + // If any of the parent columns is being set to NULL, then we don't need this condition. + var updateValues sqlparser.ValTuple + colSetToNull := false + for _, updateExpr := range updStmt.Exprs { + colIdx := fk.ParentColumns.FindColumn(updateExpr.Name.Name) + if colIdx >= 0 { + if sqlparser.IsNull(updateExpr.Expr) { + colSetToNull = true + break + } + updateValues = append(updateValues, updateExpr.Expr) + } + } + if !colSetToNull { + childWhereExpr = &sqlparser.AndExpr{ + Left: childWhereExpr, + Right: sqlparser.NewComparisonExpr(sqlparser.NotInOp, valTuple, sqlparser.ValTuple{updateValues}, nil), + } + } + childUpdStmt := &sqlparser.Update{ + Exprs: childUpdateExprs, + TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, + Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: childWhereExpr}, + } + return createOpFromStmt(ctx, childUpdStmt, false, "") +} + +// createFKVerifyOp creates the verify operator for the parent foreign key constraints. +func createFKVerifyOp(ctx *plancontext.PlanningContext, childOp ops.Operator, updStmt *sqlparser.Update, parentFks []vindexes.ParentFKInfo, restrictChildFks []vindexes.ChildFKInfo) (ops.Operator, error) { + if len(parentFks) == 0 && len(restrictChildFks) == 0 { + return childOp, nil + } + + var Verify []*VerifyOp + // This validates that new values exists on the parent table. + for _, fk := range parentFks { + op, err := createFkVerifyOpForParentFKForUpdate(ctx, updStmt, fk) + if err != nil { + return nil, err + } + Verify = append(Verify, &VerifyOp{ + Op: op, + Typ: engine.ParentVerify, + }) + } + // This validates that the old values don't exist on the child table. + for _, fk := range restrictChildFks { + op, err := createFkVerifyOpForChildFKForUpdate(ctx, updStmt, fk) + if err != nil { + return nil, err + } + Verify = append(Verify, &VerifyOp{ + Op: op, + Typ: engine.ChildVerify, + }) + } + + return &FkVerify{ + Verify: Verify, + Input: childOp, + }, nil +} + +// Each parent foreign key constraint is verified by an anti join query of the form: +// select 1 from child_tbl left join parent_tbl on +// where and and limit 1 +// E.g: +// Child (c1, c2) references Parent (p1, p2) +// update Child set c1 = 1 where id = 1 +// verify query: +// select 1 from Child left join Parent on Parent.p1 = 1 and Parent.p2 = Child.c2 +// where Parent.p1 is null and Parent.p2 is null and Child.id = 1 +// and Child.c2 is not null +// limit 1 +func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) (ops.Operator, error) { + childTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) + childTbl, err := childTblExpr.TableName() + if err != nil { + return nil, err + } + parentTbl := pFK.Table.GetTableName() + var whereCond sqlparser.Expr + var joinCond sqlparser.Expr + for idx, column := range pFK.ChildColumns { + var matchedExpr *sqlparser.UpdateExpr + for _, updateExpr := range updStmt.Exprs { + if column.Equal(updateExpr.Name.Name) { + matchedExpr = updateExpr + break + } + } + parentIsNullExpr := &sqlparser.IsExpr{ + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.IsNullOp, + } + var predicate sqlparser.Expr = parentIsNullExpr + var joinExpr sqlparser.Expr + if matchedExpr == nil { + predicate = &sqlparser.AndExpr{ + Left: parentIsNullExpr, + Right: &sqlparser.IsExpr{ + Left: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), + Right: sqlparser.IsNotNullOp, + }, + } + joinExpr = &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), + } + } else { + joinExpr = &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), + Right: prefixColNames(childTbl, matchedExpr.Expr), + } + } + + if idx == 0 { + joinCond, whereCond = joinExpr, predicate + continue + } + joinCond = &sqlparser.AndExpr{Left: joinCond, Right: joinExpr} + whereCond = &sqlparser.AndExpr{Left: whereCond, Right: predicate} + } + // add existing where condition on the update statement + if updStmt.Where != nil { + whereCond = &sqlparser.AndExpr{Left: whereCond, Right: prefixColNames(childTbl, updStmt.Where.Expr)} + } + return createSelectionOp(ctx, + sqlparser.SelectExprs{sqlparser.NewAliasedExpr(sqlparser.NewIntLiteral("1"), "")}, + []sqlparser.TableExpr{ + sqlparser.NewJoinTableExpr( + childTblExpr, + sqlparser.LeftJoinType, + sqlparser.NewAliasedTableExpr(parentTbl, ""), + sqlparser.NewJoinCondition(joinCond, nil)), + }, + sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + sqlparser.NewLimitWithoutOffset(1), + sqlparser.ShareModeLock) +} + +// Each child foreign key constraint is verified by a join query of the form: +// select 1 from child_tbl join parent_tbl on where [AND NOT IN ()] limit 1 +// E.g: +// Child (c1, c2) references Parent (p1, p2) +// update Parent set p1 = 1 where id = 1 +// verify query: +// select 1 from Child join Parent on Parent.p1 = Child.c1 and Parent.p2 = Child.c2 +// where Parent.id = 1 and (parent.p1) NOT IN ((1)) limit 1 +func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) (ops.Operator, error) { + // ON UPDATE RESTRICT foreign keys that require validation, should only be allowed in the case where we + // are verifying all the FKs on vtgate level. + if !ctx.VerifyAllFKs { + return nil, vterrors.VT12002() + } + parentTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) + parentTbl, err := parentTblExpr.TableName() + if err != nil { + return nil, err + } + childTbl := cFk.Table.GetTableName() + var joinCond sqlparser.Expr + for idx := range cFk.ParentColumns { + joinExpr := &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(cFk.ParentColumns[idx].String(), parentTbl), + Right: sqlparser.NewColNameWithQualifier(cFk.ChildColumns[idx].String(), childTbl), + } + + if idx == 0 { + joinCond = joinExpr + continue + } + joinCond = &sqlparser.AndExpr{Left: joinCond, Right: joinExpr} + } + + var whereCond sqlparser.Expr + // add existing where condition on the update statement + if updStmt.Where != nil { + whereCond = prefixColNames(parentTbl, updStmt.Where.Expr) + } + + // We don't want to fail the RESTRICT for the case where the parent columns remains unchanged on the update. + // We need to add a condition to the where clause to handle this case. + // The additional condition looks like [AND NOT IN ()]. + // If any of the parent columns is being set to NULL, then we don't need this condition. + var updateValues sqlparser.ValTuple + colSetToNull := false + for _, updateExpr := range updStmt.Exprs { + colIdx := cFk.ParentColumns.FindColumn(updateExpr.Name.Name) + if colIdx >= 0 { + if sqlparser.IsNull(updateExpr.Expr) { + colSetToNull = true + break + } + updateValues = append(updateValues, updateExpr.Expr) + } + } + if !colSetToNull { + // Create a ValTuple of child column names + var valTuple sqlparser.ValTuple + for _, column := range cFk.ParentColumns { + valTuple = append(valTuple, sqlparser.NewColNameWithQualifier(column.String(), parentTbl)) + } + whereCond = sqlparser.AndExpressions(whereCond, sqlparser.NewComparisonExpr(sqlparser.NotInOp, valTuple, sqlparser.ValTuple{updateValues}, nil)) + } + + return createSelectionOp(ctx, + sqlparser.SelectExprs{sqlparser.NewAliasedExpr(sqlparser.NewIntLiteral("1"), "")}, + []sqlparser.TableExpr{ + sqlparser.NewJoinTableExpr( + parentTblExpr, + sqlparser.NormalJoinType, + sqlparser.NewAliasedTableExpr(childTbl, ""), + sqlparser.NewJoinCondition(joinCond, nil)), + }, + sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + sqlparser.NewLimitWithoutOffset(1), + sqlparser.ShareModeLock) +} diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go index 0c0d6976fb5..eeb57561afb 100644 --- a/go/vt/vtgate/planbuilder/operators/vindex.go +++ b/go/vt/vtgate/planbuilder/operators/vindex.go @@ -17,6 +17,7 @@ limitations under the License. package operators import ( + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -51,33 +52,81 @@ type ( const VindexUnsupported = "WHERE clause for vindex function must be of the form id = or id in(,...)" // Introduces implements the Operator interface -func (v *Vindex) Introduces() semantics.TableSet { +func (v *Vindex) introducesTableID() semantics.TableSet { return v.Solved } -// IPhysical implements the PhysicalOperator interface -func (v *Vindex) IPhysical() {} - // Clone implements the Operator interface func (v *Vindex) Clone([]ops.Operator) ops.Operator { clone := *v return &clone } -var _ ops.PhysicalOperator = (*Vindex)(nil) +func (v *Vindex) AddColumns(ctx *plancontext.PlanningContext, reuse bool, groupBys []bool, exprs []*sqlparser.AliasedExpr) ([]int, error) { + offsets := make([]int, len(exprs)) + for idx, ae := range exprs { + if groupBys[idx] { + return nil, vterrors.VT13001("tried to add group by to a table") + } + + if reuse { + offset, err := v.FindCol(ctx, ae.Expr, true) + if err != nil { + return nil, err + } + if offset > -1 { + offsets[idx] = offset + continue + } + } + + offset, err := addColumn(ctx, v, ae.Expr) + if err != nil { + return nil, err + } -func (v *Vindex) AddColumn(_ *plancontext.PlanningContext, expr sqlparser.Expr) (int, error) { - return addColumn(v, expr) + offsets[idx] = offset + } + + return offsets, nil +} + +func colNameToExpr(c *sqlparser.ColName) *sqlparser.AliasedExpr { + return &sqlparser.AliasedExpr{ + Expr: c, + As: sqlparser.IdentifierCI{}, + } } -func (v *Vindex) GetColumns() []*sqlparser.ColName { +func (v *Vindex) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) (int, error) { + for idx, col := range v.Columns { + if ctx.SemTable.EqualsExprWithDeps(expr, col) { + return idx, nil + } + } + + return -1, nil +} + +func (v *Vindex) GetColumns(*plancontext.PlanningContext) ([]*sqlparser.AliasedExpr, error) { + return slice.Map(v.Columns, colNameToExpr), nil +} + +func (v *Vindex) GetSelectExprs(ctx *plancontext.PlanningContext) (sqlparser.SelectExprs, error) { + return transformColumnsToSelectExprs(ctx, v) +} + +func (v *Vindex) GetOrdering() ([]ops.OrderBy, error) { + return nil, nil +} + +func (v *Vindex) GetColNames() []*sqlparser.ColName { return v.Columns } func (v *Vindex) AddCol(col *sqlparser.ColName) { v.Columns = append(v.Columns, col) } -// checkValid implements the Operator interface func (v *Vindex) CheckValid() error { if len(v.Table.Predicates) == 0 { return vterrors.VT12001(VindexUnsupported + " (where clause missing)") @@ -134,3 +183,7 @@ func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.E func (v *Vindex) TablesUsed() []string { return []string{v.Table.Table.Name.String()} } + +func (v *Vindex) ShortDescription() string { + return v.Vindex.String() +} diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go index 9458e85de66..6163900e674 100644 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go @@ -17,20 +17,9 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/sqltypes" - - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) var _ logicalPlan = (*orderedAggregate)(nil) @@ -63,12 +52,6 @@ var _ logicalPlan = (*orderedAggregate)(nil) // } type orderedAggregate struct { resultsBuilder - extraDistinct *sqlparser.ColName - - // preProcess is true if one of the aggregates needs preprocessing. - preProcess bool - - aggrOnEngine bool // aggregates specifies the aggregation parameters for each // aggregation function: function opcode and input column number. @@ -81,320 +64,27 @@ type orderedAggregate struct { truncateColumnCount int } -// checkAggregates analyzes the select expression for aggregates. If it determines -// that a primitive is needed to handle the aggregation, it builds an orderedAggregate -// primitive and returns it. It returns a groupByHandler if there is aggregation it -// can handle. -func (pb *primitiveBuilder) checkAggregates(sel *sqlparser.Select) error { - rb, isRoute := pb.plan.(*route) - if isRoute && rb.isSingleShard() { - // since we can push down all of the aggregation to the route, - // we don't need to do anything else here - return nil - } - - // Check if we can allow aggregates. - hasAggregates := sqlparser.ContainsAggregation(sel.SelectExprs) || len(sel.GroupBy) > 0 - if !hasAggregates && !sel.Distinct { - return nil - } - - // The query has aggregates. We can proceed only - // if the underlying primitive is a route because - // we need the ability to push down group by and - // order by clauses. - if !isRoute { - if hasAggregates { - return vterrors.VT12001("cross-shard query with aggregates") - } - pb.plan = newDistinctV3(pb.plan) - return nil - } - - // If there is a distinct clause, we can check the select list - // to see if it has a unique vindex reference. For example, - // if the query was 'select distinct id, col from t' (with id - // as a unique vindex), then the distinct operation can be - // safely pushed down because the unique vindex guarantees - // that each id can only be in a single shard. Without the - // unique vindex property, the id could come from multiple - // shards, which will require us to perform the grouping - // at the vtgate level. - if sel.Distinct { - for _, selectExpr := range sel.SelectExprs { - switch selectExpr := selectExpr.(type) { - case *sqlparser.AliasedExpr: - vindex := pb.st.Vindex(selectExpr.Expr, rb) - if vindex != nil && vindex.IsUnique() { - return nil - } - } - } - } - - // The group by clause could also reference a unique vindex. The above - // example could itself have been written as - // 'select id, col from t group by id, col', or a query could be like - // 'select id, count(*) from t group by id'. In the above cases, - // the grouping can be done at the shard level, which allows the entire query - // to be pushed down. In order to perform this analysis, we're going to look - // ahead at the group by clause to see if it references a unique vindex. - if pb.groupByHasUniqueVindex(sel, rb) { - return nil - } - - // We need an aggregator primitive. - oa := &orderedAggregate{} - oa.resultsBuilder = newResultsBuilder(rb, oa) - pb.plan = oa - pb.plan.Reorder(0) - return nil -} - -// groupbyHasUniqueVindex looks ahead at the group by expression to see if -// it references a unique vindex. -// -// The vitess group by rules are different from MySQL because it's not possible -// to match the MySQL behavior without knowing the schema. For example: -// 'select id as val from t group by val' will have different interpretations -// under MySQL depending on whether t has a val column or not. -// In vitess, we always assume that 'val' references 'id'. This is achieved -// by the symbol table resolving against the select list before searching -// the tables. -// -// In order to look ahead, we have to overcome the chicken-and-egg problem: -// group by needs the select aliases to be built. Select aliases are built -// on push-down. But push-down decision depends on whether group by expressions -// reference a vindex. -// To overcome this, the look-ahead has to perform a search that matches -// the group by analyzer. The flow is similar to oa.PushGroupBy, except that -// we don't search the ResultColumns because they're not created yet. Also, -// error conditions are treated as no match for simplicity; They will be -// subsequently caught downstream. -func (pb *primitiveBuilder) groupByHasUniqueVindex(sel *sqlparser.Select, rb *route) bool { - for _, expr := range sel.GroupBy { - var matchedExpr sqlparser.Expr - switch node := expr.(type) { - case *sqlparser.ColName: - if expr := findAlias(node, sel.SelectExprs); expr != nil { - matchedExpr = expr - } else { - matchedExpr = node - } - case *sqlparser.Literal: - if node.Type != sqlparser.IntVal { - continue - } - num, err := strconv.ParseInt(string(node.Val), 0, 64) - if err != nil { - continue - } - if num < 1 || num > int64(len(sel.SelectExprs)) { - continue - } - expr, ok := sel.SelectExprs[num-1].(*sqlparser.AliasedExpr) - if !ok { - continue - } - matchedExpr = expr.Expr - default: - continue - } - vindex := pb.st.Vindex(matchedExpr, rb) - if vindex != nil && vindex.IsUnique() { - return true - } - } - return false -} - -func findAlias(colname *sqlparser.ColName, selects sqlparser.SelectExprs) sqlparser.Expr { - // Qualified column names cannot match an (unqualified) alias. - if !colname.Qualifier.IsEmpty() { - return nil - } - // See if this references an alias. - for _, selectExpr := range selects { - selectExpr, ok := selectExpr.(*sqlparser.AliasedExpr) - if !ok { - continue - } - if colname.Name.Equal(selectExpr.As) { - return selectExpr.Expr - } - } - return nil -} - // Primitive implements the logicalPlan interface func (oa *orderedAggregate) Primitive() engine.Primitive { - colls := map[int]collations.ID{} - for _, key := range oa.aggregates { - if key.CollationID != collations.Unknown { - colls[key.KeyCol] = key.CollationID - } - } - for _, key := range oa.groupByKeys { - if key.CollationID != collations.Unknown { - colls[key.KeyCol] = key.CollationID - } - } - input := oa.input.Primitive() if len(oa.groupByKeys) == 0 { return &engine.ScalarAggregate{ - PreProcess: oa.preProcess, - AggrOnEngine: oa.aggrOnEngine, Aggregates: oa.aggregates, TruncateColumnCount: oa.truncateColumnCount, - Collations: colls, Input: input, } } return &engine.OrderedAggregate{ - PreProcess: oa.preProcess, - AggrOnEngine: oa.aggrOnEngine, Aggregates: oa.aggregates, GroupByKeys: oa.groupByKeys, TruncateColumnCount: oa.truncateColumnCount, - Collations: colls, Input: input, } } -func (oa *orderedAggregate) pushAggr(pb *primitiveBuilder, expr *sqlparser.AliasedExpr, origin logicalPlan) (rc *resultColumn, colNumber int, err error) { - aggrFunc, _ := expr.Expr.(sqlparser.AggrFunc) - origOpcode := engine.SupportedAggregates[strings.ToLower(aggrFunc.AggrName())] - opcode := origOpcode - if aggrFunc.GetArgs() != nil && - len(aggrFunc.GetArgs()) != 1 { - return nil, 0, vterrors.VT12001(fmt.Sprintf("only one expression is allowed inside aggregates: %s", sqlparser.String(expr))) - } - - handleDistinct, innerAliased, err := oa.needDistinctHandling(pb, expr, opcode) - if err != nil { - return nil, 0, err - } - if handleDistinct { - if oa.extraDistinct != nil { - return nil, 0, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation allowed in a SELECT: %s", sqlparser.String(expr))) - } - // Push the expression that's inside the aggregate. - // The column will eventually get added to the group by and order by clauses. - newBuilder, _, innerCol, err := planProjection(pb, oa.input, innerAliased, origin) - if err != nil { - return nil, 0, err - } - pb.plan = newBuilder - col, err := BuildColName(oa.input.ResultColumns(), innerCol) - if err != nil { - return nil, 0, err - } - oa.extraDistinct = col - oa.preProcess = true - switch opcode { - case engine.AggregateCount: - opcode = engine.AggregateCountDistinct - case engine.AggregateSum: - opcode = engine.AggregateSumDistinct - } - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: opcode, - Col: innerCol, - Alias: expr.ColumnName(), - OrigOpcode: origOpcode, - }) - } else { - newBuilder, _, innerCol, err := planProjection(pb, oa.input, expr, origin) - if err != nil { - return nil, 0, err - } - pb.plan = newBuilder - oa.aggregates = append(oa.aggregates, &engine.AggregateParams{ - Opcode: opcode, - Col: innerCol, - OrigOpcode: origOpcode, - }) - } - - // Build a new rc with oa as origin because it's semantically different - // from the expression we pushed down. - rc = newResultColumn(expr, oa) - oa.resultColumns = append(oa.resultColumns, rc) - return rc, len(oa.resultColumns) - 1, nil -} - -// needDistinctHandling returns true if oa needs to handle the distinct clause. -// If true, it will also return the aliased expression that needs to be pushed -// down into the underlying route. -func (oa *orderedAggregate) needDistinctHandling(pb *primitiveBuilder, expr *sqlparser.AliasedExpr, opcode engine.AggregateOpcode) (bool, *sqlparser.AliasedExpr, error) { - var innerAliased *sqlparser.AliasedExpr - aggr, ok := expr.Expr.(sqlparser.AggrFunc) - - if !ok { - return false, nil, vterrors.VT03012(sqlparser.String(expr)) - } - - if !aggr.IsDistinct() { - return false, nil, nil - } - if opcode != engine.AggregateCount && opcode != engine.AggregateSum && opcode != engine.AggregateCountStar { - return false, nil, nil - } - - innerAliased = &sqlparser.AliasedExpr{Expr: aggr.GetArg()} - - rb, ok := oa.input.(*route) - if !ok { - // Unreachable - return true, innerAliased, nil - } - vindex := pb.st.Vindex(innerAliased.Expr, rb) - if vindex != nil && vindex.IsUnique() { - return false, nil, nil - } - return true, innerAliased, nil -} - -// Wireup implements the logicalPlan interface -// If text columns are detected in the keys, then the function modifies -// the primitive to pull a corresponding weight_string from mysql and -// compare those instead. This is because we currently don't have the -// ability to mimic mysql's collation behavior. -func (oa *orderedAggregate) Wireup(plan logicalPlan, jt *jointab) error { - for i, gbk := range oa.groupByKeys { - rc := oa.resultColumns[gbk.KeyCol] - if sqltypes.IsText(rc.column.typ) { - weightcolNumber, err := oa.input.SupplyWeightString(gbk.KeyCol, gbk.FromGroupBy) - if err != nil { - _, isUnsupportedErr := err.(UnsupportedSupplyWeightString) - if isUnsupportedErr { - continue - } - return err - } - oa.weightStrings[rc] = weightcolNumber - oa.groupByKeys[i].WeightStringCol = weightcolNumber - oa.groupByKeys[i].KeyCol = weightcolNumber - oa.truncateColumnCount = len(oa.resultColumns) - } - } - for _, key := range oa.aggregates { - switch key.Opcode { - case engine.AggregateCount: - if key.Alias == "" { - key.Alias = key.Opcode.String() - } - key.Opcode = engine.AggregateSum - } - } - - return oa.input.Wireup(plan, jt) -} - -func (oa *orderedAggregate) WireupGen4(ctx *plancontext.PlanningContext) error { - return oa.input.WireupGen4(ctx) +func (oa *orderedAggregate) Wireup(ctx *plancontext.PlanningContext) error { + return oa.input.Wireup(ctx) } // OutputColumns implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/ordering.go b/go/vt/vtgate/planbuilder/ordering.go deleted file mode 100644 index 5abf2823e9e..00000000000 --- a/go/vt/vtgate/planbuilder/ordering.go +++ /dev/null @@ -1,352 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type v3Order struct { - *sqlparser.Order - fromGroupBy bool -} - -type v3OrderBy []*v3Order - -func planOrdering(pb *primitiveBuilder, input logicalPlan, orderBy v3OrderBy) (logicalPlan, error) { - switch node := input.(type) { - case *simpleProjection, *vindexFunc: - if len(orderBy) == 0 { - return node, nil - } - return newMemorySort(node, orderBy) - case *distinct: - // TODO: this is weird, but needed - newInput, err := planOrdering(pb, node.input, orderBy) - node.input = newInput - return node, err - case *pulloutSubquery: - plan, err := planOrdering(pb, node.underlying, orderBy) - if err != nil { - return nil, err - } - node.underlying = plan - return node, nil - case *route: - return planRouteOrdering(orderBy, node) - case *join: - return planJoinOrdering(pb, orderBy, node) - case *orderedAggregate: - return planOAOrdering(pb, orderBy, node) - case *mergeSort: - return nil, vterrors.VT12001("ORDER BY on top of ORDER BY") - case *concatenate: - if len(orderBy) == 0 { - return input, nil - } - return nil, vterrors.VT12001("ORDER BY on top of UNION") - } - return nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.ordering", input)) -} - -func planOAOrdering(pb *primitiveBuilder, orderBy v3OrderBy, oa *orderedAggregate) (logicalPlan, error) { - // The requested order must be such that the ordering can be done - // before the group by, which will allow us to push it down to the - // route. This is actually true in most use cases, except for situations - // where ordering is requested on values of an aggregate result. - // Such constructs will need to be handled by a separate 'Sorter' - // primitive, after aggregation is done. For example, the following - // constructs are allowed: - // 'select a, b, count(*) from t group by a, b order by a desc, b asc' - // 'select a, b, count(*) from t group by a, b order by b' - // The following construct is not allowed: - // 'select a, count(*) from t group by a order by count(*)' - // Treat order by null as nil order by. - if len(orderBy) == 1 { - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - orderBy = nil - } - } - - // referenced tracks the keys referenced by the order by clause. - referenced := make([]bool, len(oa.groupByKeys)) - postSort := false - selOrderBy := make(v3OrderBy, 0, len(orderBy)) - for _, order := range orderBy { - // Identify the order by column. - var orderByCol *column - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - num, err := ResultFromNumber(oa.resultColumns, expr, "order clause") - if err != nil { - return nil, err - } - orderByCol = oa.resultColumns[num].column - case *sqlparser.ColName: - orderByCol = expr.Metadata.(*column) - case *sqlparser.CastExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - orderByCol = col.Metadata.(*column) - case *sqlparser.ConvertExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - orderByCol = col.Metadata.(*column) - default: - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %v", sqlparser.String(expr))) - } - - // Match orderByCol against the group by columns. - found := false - for j, groupBy := range oa.groupByKeys { - if oa.resultColumns[groupBy.KeyCol].column != orderByCol { - continue - } - - found = true - referenced[j] = true - order.fromGroupBy = groupBy.FromGroupBy - selOrderBy = append(selOrderBy, order) - break - } - if !found { - postSort = true - } - } - - // Append any unreferenced keys at the end of the order by. - for i, groupByKey := range oa.groupByKeys { - if referenced[i] { - continue - } - // Build a brand new reference for the key. - col, err := BuildColName(oa.input.ResultColumns(), groupByKey.KeyCol) - if err != nil { - return nil, vterrors.Wrapf(err, "generating ORDER BY clause") - } - selOrderBy = append(selOrderBy, &v3Order{ - Order: &sqlparser.Order{Expr: col, Direction: sqlparser.AscOrder}, - fromGroupBy: groupByKey.FromGroupBy, - }) - } - - // Append the distinct aggregate if any. - if oa.extraDistinct != nil { - selOrderBy = append(selOrderBy, &v3Order{ - Order: &sqlparser.Order{Expr: oa.extraDistinct, Direction: sqlparser.AscOrder}, - fromGroupBy: true, - }) - } - - // Push down the order by. - // It's ok to push the original AST down because all references - // should point to the route. Only aggregate functions are originated - // by node, and we currently don't allow the ORDER BY to reference them. - plan, err := planOrdering(pb, oa.input, selOrderBy) - if err != nil { - return nil, err - } - oa.input = plan - if postSort { - return newMemorySort(oa, orderBy) - } - return oa, nil -} - -func planJoinOrdering(pb *primitiveBuilder, orderBy v3OrderBy, node *join) (logicalPlan, error) { - isSpecial := false - switch len(orderBy) { - case 0: - isSpecial = true - case 1: - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - isSpecial = true - } else if f, ok := orderBy[0].Expr.(*sqlparser.FuncExpr); ok { - if f.Name.Lowered() == "rand" { - isSpecial = true - } - } - } - if isSpecial { - l, err := planOrdering(pb, node.Left, orderBy) - if err != nil { - return nil, err - } - node.Left = l - r, err := planOrdering(pb, node.Right, orderBy) - if err != nil { - return nil, err - } - node.Right = r - return node, nil - } - - for _, order := range orderBy { - if e, ok := order.Expr.(*sqlparser.Literal); ok { - // This block handles constructs that use ordinals for 'ORDER BY'. For example: - // SELECT a, b, c FROM t1, t2 ORDER BY 1, 2, 3. - num, err := ResultFromNumber(node.ResultColumns(), e, "order clause") - if err != nil { - return nil, err - } - if node.ResultColumns()[num].column.Origin().Order() > node.Left.Order() { - return newMemorySort(node, orderBy) - } - } else { - // Analyze column references within the expression to make sure they all - // go to the left. - err := sqlparser.Walk(func(in sqlparser.SQLNode) (kontinue bool, err error) { - switch e := in.(type) { - case *sqlparser.ColName: - if e.Metadata.(*column).Origin().Order() > node.Left.Order() { - return false, vterrors.VT12001("ORDER BY spans across shards") - } - case *sqlparser.Subquery: - // Unreachable because ResolveSymbols perfoms this check up above. - return false, vterrors.VT12001("ORDER BY has subquery") - } - return true, nil - }, order.Expr) - if err != nil { - return newMemorySort(node, orderBy) - } - } - } - - // There were no errors. We can push the order by to the left-most route. - l, err := planOrdering(pb, node.Left, orderBy) - if err != nil { - return nil, err - } - node.Left = l - // Still need to push an empty order by to the right. - r, err := planOrdering(pb, node.Right, nil) - if err != nil { - return nil, err - } - node.Right = r - return node, nil -} - -func planRouteOrdering(orderBy v3OrderBy, node *route) (logicalPlan, error) { - switch len(orderBy) { - case 0: - return node, nil - case 1: - isSpecial := false - if _, ok := orderBy[0].Expr.(*sqlparser.NullVal); ok { - isSpecial = true - } else if f, ok := orderBy[0].Expr.(*sqlparser.FuncExpr); ok { - if f.Name.Lowered() == "rand" { - isSpecial = true - } - } - if isSpecial { - node.Select.AddOrder(orderBy[0].Order) - return node, nil - } - } - - if node.isSingleShard() { - for _, order := range orderBy { - node.Select.AddOrder(order.Order) - } - return node, nil - } - - // If it's a scatter, we have to populate the OrderBy field. - for _, order := range orderBy { - colNumber := -1 - switch expr := order.Expr.(type) { - case *sqlparser.Literal: - var err error - if colNumber, err = ResultFromNumber(node.resultColumns, expr, "order clause"); err != nil { - return nil, err - } - case *sqlparser.ColName: - c := expr.Metadata.(*column) - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - case *sqlparser.UnaryExpr: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - c := col.Metadata.(*column) - for i, rc := range node.resultColumns { - if rc.column == c { - colNumber = i - break - } - } - default: - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: complex ORDER BY expression: %s", sqlparser.String(expr))) - } - // If column is not found, then the order by is referencing - // a column that's not on the select list. - if colNumber == -1 { - return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: ORDER BY must reference a column in the SELECT list: %s", sqlparser.String(order))) - } - starColFixedIndex := colNumber - if selectStatement, ok := node.Select.(*sqlparser.Select); ok { - for i, selectExpr := range selectStatement.SelectExprs { - if starExpr, ok := selectExpr.(*sqlparser.StarExpr); ok { - if i < colNumber { - tableName := starExpr.TableName - tableMap := node.resultColumns[i].column.st.tables - var tableMeta *table - if tableName.IsEmpty() && len(tableMap) == 1 { - for j := range tableMap { - tableMeta = tableMap[j] - } - } else { - tableMeta = tableMap[tableName] - } - if tableMeta == nil || !tableMeta.isAuthoritative { - return nil, vterrors.VT12001("in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list") - } - starColFixedIndex += len(tableMeta.columnNames) - 1 - } - } - } - } - - // TODO(king-11) pass in collation here - ob := engine.OrderByParams{ - Col: colNumber, - WeightStringCol: -1, - Desc: order.Direction == sqlparser.DescOrder, - StarColFixedIndex: starColFixedIndex, - FromGroupBy: order.fromGroupBy, - CollationID: collations.Unknown, - } - node.eroute.OrderBy = append(node.eroute.OrderBy, ob) - - node.Select.AddOrder(order.Order) - } - return newMergeSort(node), nil -} diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index cc823587408..d79436f3850 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -31,185 +31,22 @@ import ( "github.com/nsf/jsondiff" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/servenv" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - - "vitess.io/vitess/go/test/utils" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtgate/engine" + oprewriters "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// hashIndex is a functional, unique Vindex. -type hashIndex struct{ name string } - -func (v *hashIndex) String() string { return v.name } -func (*hashIndex) Cost() int { return 1 } -func (*hashIndex) IsUnique() bool { return true } -func (*hashIndex) NeedsVCursor() bool { return false } -func (*hashIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { - return []bool{}, nil -} -func (*hashIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { - return nil, nil -} - -func newHashIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &hashIndex{name: name}, nil -} - -// lookupIndex is a unique Vindex, and satisfies Lookup. -type lookupIndex struct{ name string } - -func (v *lookupIndex) String() string { return v.name } -func (*lookupIndex) Cost() int { return 2 } -func (*lookupIndex) IsUnique() bool { return true } -func (*lookupIndex) NeedsVCursor() bool { return false } -func (*lookupIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { - return []bool{}, nil -} -func (*lookupIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { - return nil, nil -} -func (*lookupIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { - return nil -} -func (*lookupIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { - return nil -} -func (*lookupIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { - return nil -} - -func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &lookupIndex{name: name}, nil -} - -var _ vindexes.Lookup = (*lookupIndex)(nil) - -// nameLkpIndex satisfies Lookup, NonUnique. -type nameLkpIndex struct{ name string } - -func (v *nameLkpIndex) String() string { return v.name } -func (*nameLkpIndex) Cost() int { return 3 } -func (*nameLkpIndex) IsUnique() bool { return false } -func (*nameLkpIndex) NeedsVCursor() bool { return false } -func (*nameLkpIndex) AllowBatch() bool { return true } -func (*nameLkpIndex) AutoCommitEnabled() bool { return false } -func (*nameLkpIndex) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL } -func (*nameLkpIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { - return []bool{}, nil -} -func (*nameLkpIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { - return nil, nil -} -func (*nameLkpIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { - return nil -} -func (*nameLkpIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { - return nil -} -func (*nameLkpIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { - return nil -} -func (v *nameLkpIndex) Query() (string, []string) { - return "select name, keyspace_id from name_user_vdx where name in ::name", []string{"name"} -} -func (*nameLkpIndex) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) { - return nil, nil -} - -func newNameLkpIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &nameLkpIndex{name: name}, nil -} - -var _ vindexes.Vindex = (*nameLkpIndex)(nil) -var _ vindexes.Lookup = (*nameLkpIndex)(nil) -var _ vindexes.LookupPlanable = (*nameLkpIndex)(nil) - -// costlyIndex satisfies Lookup, NonUnique. -type costlyIndex struct{ name string } - -func (v *costlyIndex) String() string { return v.name } -func (*costlyIndex) Cost() int { return 10 } -func (*costlyIndex) IsUnique() bool { return false } -func (*costlyIndex) NeedsVCursor() bool { return false } -func (*costlyIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { - return []bool{}, nil -} -func (*costlyIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { - return nil, nil -} -func (*costlyIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { - return nil -} -func (*costlyIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { - return nil -} -func (*costlyIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { - return nil -} - -func newCostlyIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &costlyIndex{name: name}, nil -} - -var _ vindexes.Vindex = (*costlyIndex)(nil) -var _ vindexes.Lookup = (*costlyIndex)(nil) - -// multiColIndex satisfies multi column vindex. -type multiColIndex struct { - name string -} - -func newMultiColIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &multiColIndex{name: name}, nil -} - -var _ vindexes.MultiColumn = (*multiColIndex)(nil) - -func (m *multiColIndex) String() string { return m.name } - -func (m *multiColIndex) Cost() int { return 1 } - -func (m *multiColIndex) IsUnique() bool { return true } - -func (m *multiColIndex) NeedsVCursor() bool { return false } - -func (m *multiColIndex) Map(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) { - return nil, nil -} - -func (m *multiColIndex) Verify(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte) ([]bool, error) { - return []bool{}, nil -} - -func (m *multiColIndex) PartialVindex() bool { - return true -} - -func init() { - vindexes.Register("hash_test", newHashIndex) - vindexes.Register("lookup_test", newLookupIndex) - vindexes.Register("name_lkp_test", newNameLkpIndex) - vindexes.Register("costly", newCostlyIndex) - vindexes.Register("multiCol_test", newMultiColIndex) -} - func makeTestOutput(t *testing.T) string { testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test") @@ -217,9 +54,12 @@ func makeTestOutput(t *testing.T) string { } func TestPlan(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - sysVarEnabled: true, + defer utils.EnsureNoLeaks(t) + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + TabletType_: topodatapb.TabletType_PRIMARY, + SysVarEnabled: true, + TestBuilder: TestBuilder, } testOutputTempDir := makeTestOutput(t) @@ -254,47 +94,142 @@ func TestPlan(t *testing.T) { testFile(t, "info_schema80_cases.json", testOutputTempDir, vschemaWrapper, false) testFile(t, "reference_cases.json", testOutputTempDir, vschemaWrapper, false) testFile(t, "vexplain_cases.json", testOutputTempDir, vschemaWrapper, false) + testFile(t, "misc_cases.json", testOutputTempDir, vschemaWrapper, false) +} + +// TestForeignKeyPlanning tests the planning of foreign keys in a managed mode by Vitess. +func TestForeignKeyPlanning(t *testing.T) { + vschema := loadSchema(t, "vschemas/schema.json", true) + setFks(t, vschema) + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: vschema, + } + + testOutputTempDir := makeTestOutput(t) + + testFile(t, "foreignkey_cases.json", testOutputTempDir, vschemaWrapper, false) +} + +func setFks(t *testing.T, vschema *vindexes.VSchema) { + if vschema.Keyspaces["sharded_fk_allow"] != nil { + // FK from multicol_tbl2 referencing multicol_tbl1 that is shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "multicol_tbl2", createFkDefinition([]string{"colb", "cola", "x", "colc", "y"}, "multicol_tbl1", []string{"colb", "cola", "y", "colc", "x"}, sqlparser.Cascade, sqlparser.Cascade)) + + // FK from tbl2 referencing tbl1 that is shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl2", createFkDefinition([]string{"col2"}, "tbl1", []string{"col1"}, sqlparser.Restrict, sqlparser.Restrict)) + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl2", createFkDefinition([]string{"col2", "col"}, "tbl1", []string{"col1", "col"}, sqlparser.Restrict, sqlparser.Restrict)) + // FK from tbl3 referencing tbl1 that is not shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl3", createFkDefinition([]string{"coly"}, "tbl1", []string{"t1col1"}, sqlparser.DefaultAction, sqlparser.DefaultAction)) + // FK from tbl10 referencing tbl2 that is shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl10", createFkDefinition([]string{"sk", "col"}, "tbl2", []string{"col2", "col"}, sqlparser.Restrict, sqlparser.Restrict)) + // FK from tbl10 referencing tbl3 that is not shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl10", createFkDefinition([]string{"col"}, "tbl3", []string{"col"}, sqlparser.Restrict, sqlparser.Restrict)) + + // FK from tbl4 referencing tbl5 that is shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl4", createFkDefinition([]string{"col4"}, "tbl5", []string{"col5"}, sqlparser.SetNull, sqlparser.Cascade)) + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl4", createFkDefinition([]string{"t4col4"}, "tbl5", []string{"t5col5"}, sqlparser.SetNull, sqlparser.Cascade)) + + // FK from tbl5 referencing tbl8 that is shard scoped of SET-NULL types. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl5", createFkDefinition([]string{"col5"}, "tbl8", []string{"col8"}, sqlparser.SetNull, sqlparser.SetNull)) + + // FK from tbl4 referencing tbl9 that is not shard scoped of SET-NULL types. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl4", createFkDefinition([]string{"col_ref"}, "tbl9", []string{"col9"}, sqlparser.SetNull, sqlparser.SetNull)) + + // FK from tbl6 referencing tbl7 that is shard scoped. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl6", createFkDefinition([]string{"col6"}, "tbl7", []string{"col7"}, sqlparser.NoAction, sqlparser.NoAction)) + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl6", createFkDefinition([]string{"t6col6"}, "tbl7", []string{"t7col7"}, sqlparser.NoAction, sqlparser.NoAction)) + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl6", createFkDefinition([]string{"t6col62"}, "tbl7", []string{"t7col72"}, sqlparser.NoAction, sqlparser.NoAction)) + + // FK from tblrefDef referencing tbl20 that is shard scoped of SET-Default types. + _ = vschema.AddForeignKey("sharded_fk_allow", "tblrefDef", createFkDefinition([]string{"ref"}, "tbl20", []string{"col2"}, sqlparser.SetDefault, sqlparser.SetDefault)) + + } + if vschema.Keyspaces["unsharded_fk_allow"] != nil { + // u_tbl2(col2) -> u_tbl1(col1) Cascade. + // u_tbl4(col41) -> u_tbl1(col14) Restrict. + // u_tbl9(col9) -> u_tbl1(col1) Cascade Null. + // u_tbl3(col2) -> u_tbl2(col2) Cascade Null. + // u_tbl4(col4) -> u_tbl3(col3) Restrict. + // u_tbl6(col6) -> u_tbl5(col5) Restrict. + // u_tbl8(col8) -> u_tbl9(col9) Null Null. + // u_tbl8(col8) -> u_tbl6(col6) Cascade Null. + // u_tbl4(col4) -> u_tbl7(col7) Cascade Cascade. + // u_tbl9(col9) -> u_tbl4(col4) Restrict Restrict. + // u_multicol_tbl2(cola, colb) -> u_multicol_tbl1(cola, colb) Null Null. + // u_multicol_tbl3(cola, colb) -> u_multicol_tbl2(cola, colb) Cascade Cascade. + + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl2", createFkDefinition([]string{"col2"}, "u_tbl1", []string{"col1"}, sqlparser.Cascade, sqlparser.Cascade)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl9", createFkDefinition([]string{"col9"}, "u_tbl1", []string{"col1"}, sqlparser.SetNull, sqlparser.NoAction)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl4", createFkDefinition([]string{"col41"}, "u_tbl1", []string{"col14"}, sqlparser.NoAction, sqlparser.NoAction)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl3", createFkDefinition([]string{"col3"}, "u_tbl2", []string{"col2"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl4", createFkDefinition([]string{"col4"}, "u_tbl3", []string{"col3"}, sqlparser.Restrict, sqlparser.Restrict)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl6", createFkDefinition([]string{"col6"}, "u_tbl5", []string{"col5"}, sqlparser.DefaultAction, sqlparser.DefaultAction)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl8", createFkDefinition([]string{"col8"}, "u_tbl9", []string{"col9"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl8", createFkDefinition([]string{"col8"}, "u_tbl6", []string{"col6"}, sqlparser.Cascade, sqlparser.CASCADE)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl4", createFkDefinition([]string{"col4"}, "u_tbl7", []string{"col7"}, sqlparser.Cascade, sqlparser.Cascade)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl9", createFkDefinition([]string{"col9"}, "u_tbl4", []string{"col4"}, sqlparser.Restrict, sqlparser.Restrict)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl", createFkDefinition([]string{"col"}, "sharded_fk_allow.s_tbl", []string{"col"}, sqlparser.Restrict, sqlparser.Restrict)) + + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_multicol_tbl2", createFkDefinition([]string{"cola", "colb"}, "u_multicol_tbl1", []string{"cola", "colb"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_multicol_tbl3", createFkDefinition([]string{"cola", "colb"}, "u_multicol_tbl2", []string{"cola", "colb"}, sqlparser.Cascade, sqlparser.Cascade)) + } } func TestSystemTables57(t *testing.T) { // first we move everything to use 5.7 logic + oldVer := servenv.MySQLServerVersion() servenv.SetMySQLServerVersionForTest("5.7") - defer servenv.SetMySQLServerVersionForTest("") - vschemaWrapper := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)} + defer func() { + servenv.SetMySQLServerVersionForTest(oldVer) + }() + vschemaWrapper := &vschemawrapper.VSchemaWrapper{V: loadSchema(t, "vschemas/schema.json", true)} testOutputTempDir := makeTestOutput(t) testFile(t, "info_schema57_cases.json", testOutputTempDir, vschemaWrapper, false) } func TestSysVarSetDisabled(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - sysVarEnabled: false, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + SysVarEnabled: false, } testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false) } func TestViews(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - enableViews: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + EnableViews: true, } testFile(t, "view_cases.json", makeTestOutput(t), vschemaWrapper, false) } func TestOne(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), + reset := oprewriters.EnableDebugPrinting() + defer reset() + + lv := loadSchema(t, "vschemas/schema.json", true) + setFks(t, lv) + vschema := &vschemawrapper.VSchemaWrapper{ + V: lv, + } + + testFile(t, "onecase.json", "", vschema, false) +} + +func TestOneTPCC(t *testing.T) { + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/tpcc_schema.json", true), } testFile(t, "onecase.json", "", vschema, false) } func TestOneWithMainAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, @@ -304,9 +239,9 @@ func TestOneWithMainAsDefault(t *testing.T) { } func TestOneWithSecondUserAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "second_user", Sharded: true, }, @@ -316,9 +251,9 @@ func TestOneWithSecondUserAsDefault(t *testing.T) { } func TestOneWithUserAsDefault(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "user", Sharded: true, }, @@ -328,9 +263,9 @@ func TestOneWithUserAsDefault(t *testing.T) { } func TestOneWithTPCHVSchema(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpch_schema.json", true), - sysVarEnabled: true, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/tpch_schema.json", true), + SysVarEnabled: true, } testFile(t, "onecase.json", "", vschema, false) @@ -338,44 +273,47 @@ func TestOneWithTPCHVSchema(t *testing.T) { func TestOneWith57Version(t *testing.T) { // first we move everything to use 5.7 logic + oldVer := servenv.MySQLServerVersion() servenv.SetMySQLServerVersionForTest("5.7") - defer servenv.SetMySQLServerVersionForTest("") - vschema := &vschemaWrapper{v: loadSchema(t, "vschemas/schema.json", true)} + defer func() { + servenv.SetMySQLServerVersionForTest(oldVer) + }() + vschema := &vschemawrapper.VSchemaWrapper{V: loadSchema(t, "vschemas/schema.json", true)} testFile(t, "onecase.json", "", vschema, false) } func TestRubyOnRailsQueries(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/rails_schema.json", true), - sysVarEnabled: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/rails_schema.json", true), + SysVarEnabled: true, } testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false) } func TestOLTP(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/oltp_schema.json", true), - sysVarEnabled: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/oltp_schema.json", true), + SysVarEnabled: true, } testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false) } func TestTPCC(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpcc_schema.json", true), - sysVarEnabled: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/tpcc_schema.json", true), + SysVarEnabled: true, } testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false) } func TestTPCH(t *testing.T) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(t, "vschemas/tpch_schema.json", true), - sysVarEnabled: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/tpch_schema.json", true), + SysVarEnabled: true, } testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false) @@ -394,9 +332,9 @@ func BenchmarkTPCH(b *testing.B) { } func benchmarkWorkload(b *testing.B, name string) { - vschemaWrapper := &vschemaWrapper{ - v: loadSchema(b, name+"vschemas/_schema.json", true), - sysVarEnabled: true, + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(b, "vschemas/"+name+"_schema.json", true), + SysVarEnabled: true, } testCases := readJSONTests(name + "_cases.json") @@ -409,43 +347,59 @@ func benchmarkWorkload(b *testing.B, name string) { } func TestBypassPlanningShardTargetFromFile(t *testing.T) { - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, - tabletType: topodatapb.TabletType_PRIMARY, - dest: key.DestinationShard("-80")} + TabletType_: topodatapb.TabletType_PRIMARY, + Dest: key.DestinationShard("-80")} testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false) } + func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) { keyRange, _ := key.ParseShardingSpec("-") - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, - tabletType: topodatapb.TabletType_PRIMARY, - dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]}, + TabletType_: topodatapb.TabletType_PRIMARY, + Dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]}, } testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false) } func TestWithDefaultKeyspaceFromFile(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, - tabletType: topodatapb.TabletType_PRIMARY, - } + TabletType_: topodatapb.TabletType_PRIMARY, + } + ts := memorytopo.NewServer(ctx, "cell1") + ts.CreateKeyspace(ctx, "main", &topodatapb.Keyspace{}) + ts.CreateKeyspace(ctx, "user", &topodatapb.Keyspace{}) + // Create a cache to use for lookups of the sidecar database identifier + // in use by each keyspace. + _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { + ki, err := ts.GetKeyspace(ctx, keyspace) + if err != nil { + return "", err + } + return ki.SidecarDbName, nil + }) + require.True(t, created) testOutputTempDir := makeTestOutput(t) testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false) @@ -458,13 +412,13 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) { func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) { // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "second_user", Sharded: true, }, - tabletType: topodatapb.TabletType_PRIMARY, + TabletType_: topodatapb.TabletType_PRIMARY, } testOutputTempDir := makeTestOutput(t) @@ -473,13 +427,13 @@ func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) { func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) { // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "user", Sharded: true, }, - tabletType: topodatapb.TabletType_PRIMARY, + TabletType_: topodatapb.TabletType_PRIMARY, } testOutputTempDir := makeTestOutput(t) @@ -488,10 +442,10 @@ func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) { func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) { // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{Name: "information_schema"}, - tabletType: topodatapb.TabletType_PRIMARY, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{Name: "information_schema"}, + TabletType_: topodatapb.TabletType_PRIMARY, } testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false) @@ -499,13 +453,13 @@ func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) { func TestOtherPlanningFromFile(t *testing.T) { // We are testing this separately so we can set a default keyspace - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - keyspace: &vindexes.Keyspace{ + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, - tabletType: topodatapb.TabletType_PRIMARY, + TabletType_: topodatapb.TabletType_PRIMARY, } testOutputTempDir := makeTestOutput(t) @@ -552,260 +506,34 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch return vschema } -var _ plancontext.VSchema = (*vschemaWrapper)(nil) - -type vschemaWrapper struct { - v *vindexes.VSchema - keyspace *vindexes.Keyspace - tabletType topodatapb.TabletType - dest key.Destination - sysVarEnabled bool - version plancontext.PlannerVersion - enableViews bool -} - -func (vw *vschemaWrapper) IsShardRoutingEnabled() bool { - return false -} - -func (vw *vschemaWrapper) GetVSchema() *vindexes.VSchema { - return vw.v -} - -func (vw *vschemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { - return &vschemapb.SrvVSchema{ - Keyspaces: map[string]*vschemapb.Keyspace{ - "user": { - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{}, - Tables: map[string]*vschemapb.Table{ - "user": {}, - }, - }, +// createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. +func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { + pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + return &sqlparser.ForeignKeyDefinition{ + Source: sqlparser.MakeColumns(childCols...), + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableNameWithQualifier(pTbl, pKs), + ReferencedColumns: sqlparser.MakeColumns(parentCols...), + OnUpdate: onUpdate, + OnDelete: onDelete, }, } } -func (vw *vschemaWrapper) ConnCollation() collations.ID { - return collations.CollationUtf8ID -} - -func (vw *vschemaWrapper) PlannerWarning(_ string) { -} - -func (vw *vschemaWrapper) ForeignKeyMode() string { - return "allow" -} - -func (vw *vschemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { - if vw.keyspace == nil { - return nil, vterrors.VT13001("keyspace not available") - } - return []*vindexes.Keyspace{vw.keyspace}, nil -} - -// FindKeyspace implements the VSchema interface -func (vw *vschemaWrapper) FindKeyspace(keyspace string) (*vindexes.Keyspace, error) { - if vw.keyspace == nil { - return nil, vterrors.VT13001("keyspace not available") - } - if vw.keyspace.Name == keyspace { - return vw.keyspace, nil - } - return nil, nil -} - -func (vw *vschemaWrapper) Planner() plancontext.PlannerVersion { - return vw.version -} - -// SetPlannerVersion implements the ContextVSchema interface -func (vw *vschemaWrapper) SetPlannerVersion(v plancontext.PlannerVersion) { - vw.version = v -} - -func (vw *vschemaWrapper) GetSemTable() *semantics.SemTable { - return nil -} - -func (vw *vschemaWrapper) KeyspaceExists(keyspace string) bool { - if vw.keyspace != nil { - return vw.keyspace.Name == keyspace - } - return false -} - -func (vw *vschemaWrapper) SysVarSetEnabled() bool { - return vw.sysVarEnabled -} - -func (vw *vschemaWrapper) TargetDestination(qualifier string) (key.Destination, *vindexes.Keyspace, topodatapb.TabletType, error) { - var keyspaceName string - if vw.keyspace != nil { - keyspaceName = vw.keyspace.Name - } - if vw.dest == nil && qualifier != "" { - keyspaceName = qualifier - } - if keyspaceName == "" { - return nil, nil, 0, vterrors.VT03007() - } - keyspace := vw.v.Keyspaces[keyspaceName] - if keyspace == nil { - return nil, nil, 0, vterrors.VT05003(keyspaceName) - } - return vw.dest, keyspace.Keyspace, vw.tabletType, nil - -} - -func (vw *vschemaWrapper) TabletType() topodatapb.TabletType { - return vw.tabletType -} - -func (vw *vschemaWrapper) Destination() key.Destination { - return vw.dest -} - -func (vw *vschemaWrapper) FindTable(tab sqlparser.TableName) (*vindexes.Table, string, topodatapb.TabletType, key.Destination, error) { - destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, destKeyspace, destTabletType, destTarget, err - } - table, err := vw.v.FindTable(destKeyspace, tab.Name.String()) - if err != nil { - return nil, destKeyspace, destTabletType, destTarget, err - } - return table, destKeyspace, destTabletType, destTarget, nil -} - -func (vw *vschemaWrapper) FindView(tab sqlparser.TableName) sqlparser.SelectStatement { - destKeyspace, _, _, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil - } - return vw.v.FindView(destKeyspace, tab.Name.String()) -} - -func (vw *vschemaWrapper) FindTableOrVindex(tab sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) { - if tab.Qualifier.IsEmpty() && tab.Name.String() == "dual" { - ksName := vw.getActualKeyspace() - var ks *vindexes.Keyspace - if ksName == "" { - ks = vw.getfirstKeyspace() - ksName = ks.Name - } else { - ks = vw.v.Keyspaces[ksName].Keyspace - } - tbl := &vindexes.Table{ - Name: sqlparser.NewIdentifierCS("dual"), - Keyspace: ks, - Type: vindexes.TypeReference, - } - return tbl, nil, ksName, topodatapb.TabletType_PRIMARY, nil, nil - } - destKeyspace, destTabletType, destTarget, err := topoproto.ParseDestination(tab.Qualifier.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, nil, destKeyspace, destTabletType, destTarget, err - } - if destKeyspace == "" { - destKeyspace = vw.getActualKeyspace() - } - table, vindex, err := vw.v.FindTableOrVindex(destKeyspace, tab.Name.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - return nil, nil, destKeyspace, destTabletType, destTarget, err - } - return table, vindex, destKeyspace, destTabletType, destTarget, nil -} - -func (vw *vschemaWrapper) getfirstKeyspace() (ks *vindexes.Keyspace) { - var f string - for name, schema := range vw.v.Keyspaces { - if f == "" || f > name { - f = name - ks = schema.Keyspace - } - } - return -} -func (vw *vschemaWrapper) getActualKeyspace() string { - if vw.keyspace == nil { - return "" - } - if !sqlparser.SystemSchema(vw.keyspace.Name) { - return vw.keyspace.Name - } - ks, err := vw.AnyKeyspace() - if err != nil { - return "" - } - return ks.Name -} - -func (vw *vschemaWrapper) DefaultKeyspace() (*vindexes.Keyspace, error) { - return vw.v.Keyspaces["main"].Keyspace, nil -} - -func (vw *vschemaWrapper) AnyKeyspace() (*vindexes.Keyspace, error) { - return vw.DefaultKeyspace() -} - -func (vw *vschemaWrapper) FirstSortedKeyspace() (*vindexes.Keyspace, error) { - return vw.v.Keyspaces["main"].Keyspace, nil -} - -func (vw *vschemaWrapper) TargetString() string { - return "targetString" -} - -func (vw *vschemaWrapper) WarnUnshardedOnly(_ string, _ ...any) { - -} - -func (vw *vschemaWrapper) ErrorIfShardedF(keyspace *vindexes.Keyspace, _, errFmt string, params ...any) error { - if keyspace.Sharded { - return fmt.Errorf(errFmt, params...) - } - return nil -} - -func (vw *vschemaWrapper) currentDb() string { - ksName := "" - if vw.keyspace != nil { - ksName = vw.keyspace.Name - } - return ksName -} - -func (vw *vschemaWrapper) FindRoutedShard(keyspace, shard string) (string, error) { - return "", nil -} - -func (vw *vschemaWrapper) IsViewsEnabled() bool { - return vw.enableViews -} - type ( planTest struct { - Comment string `json:"comment,omitempty"` - Query string `json:"query,omitempty"` - Plan json.RawMessage `json:"plan,omitempty"` - V3Plan json.RawMessage `json:"v3-plan,omitempty"` - Gen4Plan json.RawMessage `json:"gen4-plan,omitempty"` + Comment string `json:"comment,omitempty"` + Query string `json:"query,omitempty"` + Plan json.RawMessage `json:"plan,omitempty"` } ) -func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, render bool) { +func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VSchemaWrapper, render bool) { opts := jsondiff.DefaultConsoleOptions() t.Run(filename, func(t *testing.T) { var expected []planTest - var outFirstPlanner string for _, tcase := range readJSONTests(filename) { - if tcase.V3Plan == nil { - tcase.V3Plan = tcase.Plan - tcase.Gen4Plan = tcase.Plan - } - current := planTest{} testName := tcase.Comment if testName == "" { testName = tcase.Query @@ -813,52 +541,23 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemaWrapper, r if tcase.Query == "" { continue } - t.Run(fmt.Sprintf("V3: %s", testName), func(t *testing.T) { - vschema.version = V3 - plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb()) - if render && plan != nil { - viz, err := engine.GraphViz(plan.Instructions) - if err == nil { - _ = viz.Render() - } - } - out := getPlanOrErrorOutput(err, plan) - - compare, s := jsondiff.Compare(tcase.V3Plan, []byte(out), &opts) - if compare != jsondiff.FullMatch { - t.Errorf("V3 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.V3Plan, out) - } - - outFirstPlanner = out - current.Comment = testName - current.Query = tcase.Query - }) - - vschema.version = Gen4 - out, err := getPlanOutput(tcase, vschema, render) - if err != nil && len(tcase.Gen4Plan) == 0 && strings.HasPrefix(err.Error(), "gen4 does not yet support") { - continue + current := planTest{ + Comment: testName, + Query: tcase.Query, } - - // our expectation for the new planner on this query is one of three - // - it produces the same plan as V3 - this is shown using empty brackets: {\n} - // - it produces a different but accepted plan - this is shown using the accepted plan - // - or it produces a different plan that has not yet been accepted, or it fails to produce a plan - // this is shown by not having any info at all after the result for the V3 planner - // with this last expectation, it is an error if the Gen4 planner - // produces the same plan as the V3 planner does - t.Run(fmt.Sprintf("Gen4: %s", testName), func(t *testing.T) { - compare, s := jsondiff.Compare(tcase.Gen4Plan, []byte(out), &opts) + vschema.Version = Gen4 + out := getPlanOutput(tcase, vschema, render) + + // our expectation for the planner on the query is one of three + // - produces same plan as expected + // - produces a different plan than expected + // - fails to produce a plan + t.Run(testName, func(t *testing.T) { + compare, s := jsondiff.Compare(tcase.Plan, []byte(out), &opts) if compare != jsondiff.FullMatch { - t.Errorf("Gen4 - %s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Gen4Plan, out) - } - - if outFirstPlanner == out { - current.Plan = []byte(out) - } else { - current.V3Plan = []byte(outFirstPlanner) - current.Gen4Plan = []byte(out) + t.Errorf("%s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Plan, out) } + current.Plan = []byte(out) }) expected = append(expected, current) } @@ -892,21 +591,20 @@ func readJSONTests(filename string) []planTest { return output } -func getPlanOutput(tcase planTest, vschema *vschemaWrapper, render bool) (out string, err error) { +func getPlanOutput(tcase planTest, vschema *vschemawrapper.VSchemaWrapper, render bool) (out string) { defer func() { if r := recover(); r != nil { out = fmt.Sprintf("panicked: %v\n%s", r, string(debug.Stack())) } }() - plan, err := TestBuilder(tcase.Query, vschema, vschema.currentDb()) + plan, err := TestBuilder(tcase.Query, vschema, vschema.CurrentDb()) if render && plan != nil { viz, err := engine.GraphViz(plan.Instructions) if err == nil { _ = viz.Render() } } - out = getPlanOrErrorOutput(err, plan) - return out, err + return getPlanOrErrorOutput(err, plan) } func getPlanOrErrorOutput(err error, plan *engine.Plan) string { @@ -931,15 +629,12 @@ func locateFile(name string) string { var benchMarkFiles = []string{"from_cases.json", "filter_cases.json", "large_cases.json", "aggr_cases.json", "select_cases.json", "union_cases.json"} func BenchmarkPlanner(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(b, "vschemas/schema.json", true), + SysVarEnabled: true, } for _, filename := range benchMarkFiles { testCases := readJSONTests(filename) - b.Run(filename+"-v3", func(b *testing.B) { - benchmarkPlanner(b, V3, testCases, vschema) - }) b.Run(filename+"-gen4", func(b *testing.B) { benchmarkPlanner(b, Gen4, testCases, vschema) }) @@ -950,15 +645,15 @@ func BenchmarkPlanner(b *testing.B) { } func BenchmarkSemAnalysis(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(b, "vschemas/schema.json", true), + SysVarEnabled: true, } for i := 0; i < b.N; i++ { for _, filename := range benchMarkFiles { for _, tc := range readJSONTests(filename) { - exerciseAnalyzer(tc.Query, vschema.currentDb(), vschema) + exerciseAnalyzer(tc.Query, vschema.CurrentDb(), vschema) } } } @@ -983,10 +678,10 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) { } func BenchmarkSelectVsDML(b *testing.B) { - vschema := &vschemaWrapper{ - v: loadSchema(b, "vschemas/schema.json", true), - sysVarEnabled: true, - version: V3, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(b, "vschemas/schema.json", true), + SysVarEnabled: true, + Version: Gen4, } dmlCases := readJSONTests("dml_cases.json") @@ -1001,21 +696,21 @@ func BenchmarkSelectVsDML(b *testing.B) { }) b.Run("DML (random sample, N=32)", func(b *testing.B) { - benchmarkPlanner(b, V3, dmlCases[:32], vschema) + benchmarkPlanner(b, Gen4, dmlCases[:32], vschema) }) b.Run("Select (random sample, N=32)", func(b *testing.B) { - benchmarkPlanner(b, V3, selectCases[:32], vschema) + benchmarkPlanner(b, Gen4, selectCases[:32], vschema) }) } -func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemaWrapper) { +func benchmarkPlanner(b *testing.B, version plancontext.PlannerVersion, testCases []planTest, vschema *vschemawrapper.VSchemaWrapper) { b.ReportAllocs() for n := 0; n < b.N; n++ { for _, tcase := range testCases { - if len(tcase.Gen4Plan) > 0 { - vschema.version = version - _, _ = TestBuilder(tcase.Query, vschema, vschema.currentDb()) + if len(tcase.Plan) > 0 { + vschema.Version = version + _, _ = TestBuilder(tcase.Query, vschema, vschema.CurrentDb()) } } } diff --git a/go/vt/vtgate/planbuilder/plan_test_vindex.go b/go/vt/vtgate/planbuilder/plan_test_vindex.go new file mode 100644 index 00000000000..432ef7b8479 --- /dev/null +++ b/go/vt/vtgate/planbuilder/plan_test_vindex.go @@ -0,0 +1,222 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "context" + "strconv" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// hashIndex is a functional, unique Vindex. +type hashIndex struct{ name string } + +func (v *hashIndex) String() string { return v.name } +func (*hashIndex) Cost() int { return 1 } +func (*hashIndex) IsUnique() bool { return true } +func (*hashIndex) NeedsVCursor() bool { return false } +func (*hashIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*hashIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func newHashIndex(name string, _ map[string]string) (vindexes.Vindex, error) { + return &hashIndex{name: name}, nil +} + +// lookupIndex is a unique Vindex, and satisfies Lookup. +type lookupIndex struct{ name string } + +func (v *lookupIndex) String() string { return v.name } +func (*lookupIndex) Cost() int { return 2 } +func (*lookupIndex) IsUnique() bool { return true } +func (*lookupIndex) NeedsVCursor() bool { return false } +func (*lookupIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*lookupIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func (*lookupIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { + return nil +} +func (*lookupIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { + return nil +} +func (*lookupIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} +func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) { + return &lookupIndex{name: name}, nil +} + +var _ vindexes.Lookup = (*lookupIndex)(nil) + +// nameLkpIndex satisfies Lookup, NonUnique. +type nameLkpIndex struct{ name string } + +func (v *nameLkpIndex) String() string { return v.name } +func (*nameLkpIndex) Cost() int { return 3 } +func (*nameLkpIndex) IsUnique() bool { return false } +func (*nameLkpIndex) NeedsVCursor() bool { return false } +func (*nameLkpIndex) AllowBatch() bool { return true } +func (*nameLkpIndex) AutoCommitEnabled() bool { return false } +func (*nameLkpIndex) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL } +func (*nameLkpIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*nameLkpIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func (*nameLkpIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { + return nil +} +func (*nameLkpIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { + return nil +} +func (*nameLkpIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} +func (*nameLkpIndex) Query() (string, []string) { + return "select name, keyspace_id from name_user_vdx where name in ::name", []string{"name"} +} +func (*nameLkpIndex) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) { + return nil, nil +} +func newNameLkpIndex(name string, _ map[string]string) (vindexes.Vindex, error) { + return &nameLkpIndex{name: name}, nil +} + +var _ vindexes.Vindex = (*nameLkpIndex)(nil) +var _ vindexes.Lookup = (*nameLkpIndex)(nil) +var _ vindexes.LookupPlanable = (*nameLkpIndex)(nil) + +// costlyIndex satisfies Lookup, NonUnique. +type costlyIndex struct{ name string } + +func (v *costlyIndex) String() string { return v.name } +func (*costlyIndex) Cost() int { return 10 } +func (*costlyIndex) IsUnique() bool { return false } +func (*costlyIndex) NeedsVCursor() bool { return false } +func (*costlyIndex) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*costlyIndex) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func (*costlyIndex) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { + return nil +} +func (*costlyIndex) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { + return nil +} +func (*costlyIndex) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} +func newCostlyIndex(name string, _ map[string]string) (vindexes.Vindex, error) { + return &costlyIndex{name: name}, nil +} + +var _ vindexes.Vindex = (*costlyIndex)(nil) +var _ vindexes.Lookup = (*costlyIndex)(nil) + +// multiColIndex satisfies multi column vindex. +type multiColIndex struct{ name string } + +func (m *multiColIndex) String() string { return m.name } +func (*multiColIndex) Cost() int { return 1 } +func (*multiColIndex) IsUnique() bool { return true } +func (*multiColIndex) NeedsVCursor() bool { return false } +func (*multiColIndex) Map(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func (*multiColIndex) Verify(ctx context.Context, vcursor vindexes.VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*multiColIndex) PartialVindex() bool { return true } +func newMultiColIndex(name string, _ map[string]string) (vindexes.Vindex, error) { + return &multiColIndex{name: name}, nil +} + +var _ vindexes.MultiColumn = (*multiColIndex)(nil) + +// unqLkpVdxBackfill satisfies Lookup, Unique. +type unqLkpVdxBackfill struct { + name string + inBackfill bool + cost int +} + +func (u *unqLkpVdxBackfill) String() string { return u.name } +func (u *unqLkpVdxBackfill) Cost() int { return u.cost } +func (*unqLkpVdxBackfill) IsUnique() bool { return false } +func (*unqLkpVdxBackfill) NeedsVCursor() bool { return false } +func (*unqLkpVdxBackfill) AllowBatch() bool { return true } +func (*unqLkpVdxBackfill) AutoCommitEnabled() bool { return false } +func (*unqLkpVdxBackfill) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL } +func (*unqLkpVdxBackfill) Verify(context.Context, vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*unqLkpVdxBackfill) Map(ctx context.Context, vcursor vindexes.VCursor, ids []sqltypes.Value) ([]key.Destination, error) { + return nil, nil +} +func (*unqLkpVdxBackfill) Create(context.Context, vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { + return nil +} +func (*unqLkpVdxBackfill) Delete(context.Context, vindexes.VCursor, [][]sqltypes.Value, []byte) error { + return nil +} +func (*unqLkpVdxBackfill) Update(context.Context, vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} +func (*unqLkpVdxBackfill) Query() (string, []string) { + return "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::unq_key", []string{"unq_key"} +} +func (*unqLkpVdxBackfill) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) { + return nil, nil +} +func (u *unqLkpVdxBackfill) IsBackfilling() bool { return u.inBackfill } + +func newUnqLkpVdxBackfill(name string, m map[string]string) (vindexes.Vindex, error) { + vdx := &unqLkpVdxBackfill{name: name} + if val, ok := m["write_only"]; ok { + vdx.inBackfill = val == "true" + } + if val, ok := m["cost"]; ok { + vdx.cost, _ = strconv.Atoi(val) + } + return vdx, nil +} + +var _ vindexes.Vindex = (*unqLkpVdxBackfill)(nil) +var _ vindexes.Lookup = (*unqLkpVdxBackfill)(nil) +var _ vindexes.LookupPlanable = (*unqLkpVdxBackfill)(nil) +var _ vindexes.LookupBackfill = (*unqLkpVdxBackfill)(nil) + +func init() { + vindexes.Register("hash_test", newHashIndex) + vindexes.Register("lookup_test", newLookupIndex) + vindexes.Register("name_lkp_test", newNameLkpIndex) + vindexes.Register("costly", newCostlyIndex) + vindexes.Register("multiCol_test", newMultiColIndex) + vindexes.Register("unq_lkp_test", newUnqLkpVdxBackfill) +} diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go index 653691a4697..28c592758f9 100644 --- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go +++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go @@ -35,29 +35,70 @@ type PlanningContext struct { SkipPredicates map[sqlparser.Expr]any PlannerVersion querypb.ExecuteOptions_PlannerVersion RewriteDerivedExpr bool + + // If we during planning have turned this expression into an argument name, + // we can continue using the same argument name + ReservedArguments map[sqlparser.Expr]string + + // DelegateAggregation tells us when we are allowed to split an aggregation across vtgate and mysql + // We aggregate within a shard, and then at the vtgate level we aggregate the incoming shard aggregates + DelegateAggregation bool + + // VerifyAllFKs tells whether we need verification for all the fk constraints on VTGate. + // This is required for queries we are running with /*+ SET_VAR(foreign_key_checks=OFF) */ + VerifyAllFKs bool + + // ParentFKToIgnore stores a specific parent foreign key that we would need to ignore while planning + // a certain query. This field is used in UPDATE CASCADE planning, wherein while planning the child update + // query, we need to ignore the parent foreign key constraint that caused the cascade in question. + ParentFKToIgnore string } -func NewPlanningContext(reservedVars *sqlparser.ReservedVars, semTable *semantics.SemTable, vschema VSchema, version querypb.ExecuteOptions_PlannerVersion) *PlanningContext { - ctx := &PlanningContext{ - ReservedVars: reservedVars, - SemTable: semTable, - VSchema: vschema, - JoinPredicates: map[sqlparser.Expr][]sqlparser.Expr{}, - SkipPredicates: map[sqlparser.Expr]any{}, - PlannerVersion: version, +func CreatePlanningContext(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema VSchema, version querypb.ExecuteOptions_PlannerVersion) (*PlanningContext, error) { + ksName := "" + if ks, _ := vschema.DefaultKeyspace(); ks != nil { + ksName = ks.Name + } + + semTable, err := semantics.Analyze(stmt, ksName, vschema) + if err != nil { + return nil, err } - return ctx + + // record any warning as planner warning. + vschema.PlannerWarning(semTable.Warning) + + return &PlanningContext{ + ReservedVars: reservedVars, + SemTable: semTable, + VSchema: vschema, + JoinPredicates: map[sqlparser.Expr][]sqlparser.Expr{}, + SkipPredicates: map[sqlparser.Expr]any{}, + PlannerVersion: version, + ReservedArguments: map[sqlparser.Expr]string{}, + }, nil } -func (c PlanningContext) IsSubQueryToReplace(e sqlparser.Expr) bool { +func (ctx *PlanningContext) IsSubQueryToReplace(e sqlparser.Expr) bool { ext, ok := e.(*sqlparser.Subquery) if !ok { return false } - for _, extractedSubq := range c.SemTable.GetSubqueryNeedingRewrite() { - if extractedSubq.Merged && c.SemTable.EqualsExpr(extractedSubq.Subquery, ext) { + for _, extractedSubq := range ctx.SemTable.GetSubqueryNeedingRewrite() { + if extractedSubq.Merged && ctx.SemTable.EqualsExpr(extractedSubq.Subquery, ext) { return true } } return false } + +func (ctx *PlanningContext) GetArgumentFor(expr sqlparser.Expr, f func() string) string { + for key, name := range ctx.ReservedArguments { + if ctx.SemTable.EqualsExpr(key, expr) { + return name + } + } + bvName := f() + ctx.ReservedArguments[expr] = bvName + return bvName +} diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go index 281a3bd9eb0..fc5ee6d9207 100644 --- a/go/vt/vtgate/planbuilder/plancontext/vschema.go +++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go @@ -1,10 +1,12 @@ package plancontext import ( + "context" "strings" - "vitess.io/vitess/go/vt/log" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/key" @@ -53,13 +55,14 @@ type VSchema interface { PlannerWarning(message string) // ForeignKeyMode returns the foreign_key flag value - ForeignKeyMode() string + ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) // GetVSchema returns the latest cached vindexes.VSchema GetVSchema() *vindexes.VSchema // GetSrvVschema returns the latest cached vschema.SrvVSchema GetSrvVschema() *vschemapb.SrvVSchema + // FindRoutedShard looks up shard routing rules for a shard FindRoutedShard(keyspace, shard string) (string, error) @@ -68,26 +71,32 @@ type VSchema interface { // IsViewsEnabled returns true if Vitess manages the views. IsViewsEnabled() bool + + // GetUDV returns user defined value from the variable passed. + GetUDV(name string) *querypb.BindVariable + + // PlanPrepareStatement plans the prepared statement. + PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) + + // ClearPrepareData clears the prepared data from the session. + ClearPrepareData(stmtName string) + + // GetPrepareData returns the prepared data for the statement from the session. + GetPrepareData(stmtName string) *vtgatepb.PrepareData + + // StorePrepareData stores the prepared data in the session. + StorePrepareData(name string, v *vtgatepb.PrepareData) } // PlannerNameToVersion returns the numerical representation of the planner func PlannerNameToVersion(s string) (PlannerVersion, bool) { - deprecationMessage := "The V3 planner is deprecated and will be removed in V17 of Vitess" switch strings.ToLower(s) { - case "v3": - log.Warning(deprecationMessage) - return querypb.ExecuteOptions_V3, true case "gen4": return querypb.ExecuteOptions_Gen4, true case "gen4greedy", "greedy": return querypb.ExecuteOptions_Gen4Greedy, true case "left2right": return querypb.ExecuteOptions_Gen4Left2Right, true - case "gen4fallback": - return querypb.ExecuteOptions_Gen4WithFallback, true - case "gen4comparev3": - log.Warning(deprecationMessage) - return querypb.ExecuteOptions_Gen4CompareV3, true } return 0, false } diff --git a/go/vt/vtgate/planbuilder/planner.go b/go/vt/vtgate/planbuilder/planner.go new file mode 100644 index 00000000000..ab965351ac5 --- /dev/null +++ b/go/vt/vtgate/planbuilder/planner.go @@ -0,0 +1,96 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "strconv" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVersion) stmtPlanner { + return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + switch stmt := stmt.(type) { + case sqlparser.SelectStatement: + return gen4SelectStmtPlanner(query, plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Update: + return gen4UpdateStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Delete: + return gen4DeleteStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + case *sqlparser.Insert: + return gen4InsertStmtPlanner(plannerVersion, stmt, reservedVars, vschema) + default: + return nil, vterrors.VT12001(fmt.Sprintf("%T", stmt)) + } + } +} + +func pushCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) logicalPlan { + var directives *sqlparser.CommentDirectives + cmt, ok := stmt.(sqlparser.Commented) + if ok { + directives = cmt.GetParsedComments().Directives() + scatterAsWarns := directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) + timeout := queryTimeout(directives) + multiShardAutoCommit := directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) + + if scatterAsWarns || timeout > 0 || multiShardAutoCommit { + _, _ = visit(plan, func(logicalPlan logicalPlan) (bool, logicalPlan, error) { + switch plan := logicalPlan.(type) { + case *route: + plan.eroute.ScatterErrorsAsWarnings = scatterAsWarns + plan.eroute.QueryTimeout = timeout + case *primitiveWrapper: + setDirective(plan.prim, multiShardAutoCommit, timeout) + case *insert: + setDirective(plan.eInsert, multiShardAutoCommit, timeout) + } + return true, logicalPlan, nil + }) + } + } + + return plan +} + +func setDirective(prim engine.Primitive, msac bool, timeout int) { + switch edml := prim.(type) { + case *engine.Insert: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + case *engine.Update: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + case *engine.Delete: + edml.MultiShardAutocommit = msac + edml.QueryTimeout = timeout + } +} + +// queryTimeout returns DirectiveQueryTimeout value if set, otherwise returns 0. +func queryTimeout(d *sqlparser.CommentDirectives) int { + val, _ := d.GetString(sqlparser.DirectiveQueryTimeout, "0") + if intVal, err := strconv.Atoi(val); err == nil { + return intVal + } + return 0 +} diff --git a/go/vt/vtgate/planbuilder/gen4_planner_test.go b/go/vt/vtgate/planbuilder/planner_test.go similarity index 100% rename from go/vt/vtgate/planbuilder/gen4_planner_test.go rename to go/vt/vtgate/planbuilder/planner_test.go diff --git a/go/vt/vtgate/planbuilder/postprocess.go b/go/vt/vtgate/planbuilder/postprocess.go index 3c71fdb97fe..655d9c0e053 100644 --- a/go/vt/vtgate/planbuilder/postprocess.go +++ b/go/vt/vtgate/planbuilder/postprocess.go @@ -18,83 +18,15 @@ package planbuilder import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/semantics" ) // This file has functions to analyze postprocessing // clauses like ORDER BY, etc. -// pushGroupBy processes the group by clause. It resolves all symbols -// and ensures that there are no subqueries. -func (pb *primitiveBuilder) pushGroupBy(sel *sqlparser.Select) error { - if sel.Distinct { - newBuilder, err := planDistinct(pb.plan) - if err != nil { - return err - } - pb.plan = newBuilder - } - - if err := pb.st.ResolveSymbols(sel.GroupBy); err != nil { - return err - } - - newInput, err := planGroupBy(pb, pb.plan, sel.GroupBy) - if err != nil { - return err - } - pb.plan = newInput - - return nil -} - -// pushOrderBy pushes the order by clause into the primitives. -// It resolves all symbols and ensures that there are no subqueries. -func (pb *primitiveBuilder) pushOrderBy(orderBy sqlparser.OrderBy) error { - if err := pb.st.ResolveSymbols(orderBy); err != nil { - return err - } - var v3OrderBylist v3OrderBy - for _, order := range orderBy { - v3OrderBylist = append(v3OrderBylist, &v3Order{Order: order}) - } - plan, err := planOrdering(pb, pb.plan, v3OrderBylist) - if err != nil { - return err - } - pb.plan = plan - pb.plan.Reorder(0) - return nil -} - -func (pb *primitiveBuilder) pushLimit(limit *sqlparser.Limit) error { - if limit == nil { - return nil - } - rb, ok := pb.plan.(*route) - if ok && rb.isSingleShard() { - rb.SetLimit(limit) - return nil - } - - lb, err := createLimit(pb.plan, limit) - if err != nil { - return err - } - - plan, err := visit(lb, setUpperLimit) - if err != nil { - return err - } - - pb.plan = plan - pb.plan.Reorder(0) - return nil -} - // make sure we have the right signature for this function var _ planVisitor = setUpperLimit @@ -103,10 +35,10 @@ var _ planVisitor = setUpperLimit // A primitive that cannot perform this can ignore the request. func setUpperLimit(plan logicalPlan) (bool, logicalPlan, error) { switch node := plan.(type) { - case *join, *joinGen4, *hashJoin: + case *join, *hashJoin: return false, node, nil case *memorySort: - pv := evalengine.NewBindVar("__upper_limit", collations.TypedCollation{}) + pv := evalengine.NewBindVar("__upper_limit", sqltypes.Int64, collations.CollationBinaryID) node.eMemorySort.UpperLimit = pv // we don't want to go down to the rest of the tree return false, node, nil @@ -125,28 +57,20 @@ func setUpperLimit(plan logicalPlan) (bool, logicalPlan, error) { // If it's a scatter query, the rows returned will be // more than the upper limit, but enough for the limit node.Select.SetLimit(&sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}) - case *routeGen4: - // The route pushes the limit regardless of the plan. - // If it's a scatter query, the rows returned will be - // more than the upper limit, but enough for the limit - node.Select.SetLimit(&sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}) - case *concatenate: - return false, node, nil } return true, plan, nil } func createLimit(input logicalPlan, limit *sqlparser.Limit) (logicalPlan, error) { plan := newLimit(input) - emptySemTable := semantics.EmptySemTable() - pv, err := evalengine.Translate(limit.Rowcount, emptySemTable) + pv, err := evalengine.Translate(limit.Rowcount, nil) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in LIMIT") } plan.elimit.Count = pv if limit.Offset != nil { - pv, err = evalengine.Translate(limit.Offset, emptySemTable) + pv, err = evalengine.Translate(limit.Offset, nil) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in OFFSET") } diff --git a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go new file mode 100644 index 00000000000..369a99bf5d3 --- /dev/null +++ b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" +) + +type testCase struct { + nodes int + depth int +} + +type nodeType int + +const ( + NODE nodeType = iota + NOT + AND + OR + XOR + SIZE +) + +func (tc testCase) createPredicate(lvl int) sqlparser.Expr { + if lvl >= tc.depth { + // we're at max depth, so we just return one of the nodes + n := rand.Intn(tc.nodes) + return sqlparser.NewColName(fmt.Sprintf("n%d", n)) + } + switch nodeType(rand.Intn(int(SIZE))) { + case NODE: + n := rand.Intn(tc.nodes) + return sqlparser.NewColName(fmt.Sprintf("n%d", n)) + case NOT: + return &sqlparser.NotExpr{ + Expr: tc.createPredicate(lvl + 1), + } + case AND: + return &sqlparser.AndExpr{ + Left: tc.createPredicate(lvl + 1), + Right: tc.createPredicate(lvl + 1), + } + case OR: + return &sqlparser.OrExpr{ + Left: tc.createPredicate(lvl + 1), + Right: tc.createPredicate(lvl + 1), + } + case XOR: + return &sqlparser.XorExpr{ + Left: tc.createPredicate(lvl + 1), + Right: tc.createPredicate(lvl + 1), + } + } + panic("unexpected nodeType") +} + +func TestFuzzRewriting(t *testing.T) { + // This test, that runs for one second only, will produce lots of random boolean expressions, + // mixing AND, NOT, OR, XOR and column expressions. + // It then takes the predicate and simplifies it + // Finally, it runs both the original and simplified predicate with all combinations of column + // values - trying TRUE, FALSE and NULL. If the two expressions do not return the same value, + // this is considered a test failure. + + start := time.Now() + for time.Since(start) < 1*time.Second { + tc := testCase{ + nodes: rand.Intn(4) + 1, + depth: rand.Intn(4) + 1, + } + + predicate := tc.createPredicate(0) + name := sqlparser.String(predicate) + t.Run(name, func(t *testing.T) { + simplified := sqlparser.RewritePredicate(predicate) + + original, err := evalengine.Translate(predicate, &evalengine.Config{ + Collation: collations.Default(), + ResolveColumn: resolveForFuzz, + }) + require.NoError(t, err) + simpler, err := evalengine.Translate(simplified.(sqlparser.Expr), &evalengine.Config{ + Collation: collations.Default(), + ResolveColumn: resolveForFuzz, + }) + require.NoError(t, err) + + env := evalengine.EmptyExpressionEnv() + env.Row = make([]sqltypes.Value, tc.nodes) + for i := range env.Row { + env.Row[i] = sqltypes.NewInt32(1) + } + + testValues(t, env, 0, original, simpler) + }) + } +} + +func testValues(t *testing.T, env *evalengine.ExpressionEnv, i int, original, simpler evalengine.Expr) { + for n := 0; n < 3; n++ { + switch n { + case 0: + env.Row[i] = sqltypes.NewInt32(0) + case 1: + env.Row[i] = sqltypes.NewInt32(1) + case 2: + env.Row[i] = sqltypes.NULL + } + + v1, err := env.Evaluate(original) + require.NoError(t, err) + v2, err := env.Evaluate(simpler) + require.NoError(t, err) + assert.Equal(t, v1.Value(collations.Default()), v2.Value(collations.Default())) + if len(env.Row) > i+1 { + testValues(t, env, i+1, original, simpler) + } + } +} + +func resolveForFuzz(colname *sqlparser.ColName) (int, error) { + offsetStr := colname.Name.String()[1:] + return strconv.Atoi(offsetStr) +} diff --git a/go/vt/vtgate/planbuilder/prepared_statement.go b/go/vt/vtgate/planbuilder/prepared_statement.go new file mode 100644 index 00000000000..88be1f2b168 --- /dev/null +++ b/go/vt/vtgate/planbuilder/prepared_statement.go @@ -0,0 +1,137 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "context" + "regexp" + "strings" + + "vitess.io/vitess/go/sqltypes" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// regexParams checks that argument names are in the form v1, v2, v3... +var regexParams = regexp.MustCompile(`^v\d+`) + +func prepareStmt(ctx context.Context, vschema plancontext.VSchema, pStmt *sqlparser.PrepareStmt) (*planResult, error) { + stmtName := pStmt.Name.Lowered() + vschema.ClearPrepareData(stmtName) + + var pQuery string + var err error + switch expr := pStmt.Statement.(type) { + case *sqlparser.Literal: + pQuery = expr.Val + case *sqlparser.Variable: + pQuery, err = fetchUDVValue(vschema, expr.Name.Lowered()) + case *sqlparser.Argument: + udv, _ := strings.CutPrefix(expr.Name, sqlparser.UserDefinedVariableName) + pQuery, err = fetchUDVValue(vschema, udv) + default: + return nil, vterrors.VT13002("prepare statement should not have : %T", pStmt.Statement) + } + if err != nil { + return nil, err + } + + plan, stmt, err := vschema.PlanPrepareStatement(ctx, pQuery) + if err != nil { + return nil, err + } + + count := countArguments(stmt) + vschema.StorePrepareData(stmtName, &vtgatepb.PrepareData{ + PrepareStatement: sqlparser.String(stmt), + ParamsCount: count, + }) + + return &planResult{ + primitive: engine.NewRowsPrimitive(nil, nil), + tables: plan.TablesUsed, + }, nil +} + +func countArguments(stmt sqlparser.Statement) (paramsCount int32) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + switch node := node.(type) { + case *sqlparser.Argument: + if regexParams.MatchString(node.Name) { + paramsCount++ + } + } + return true, nil + }, stmt) + return +} + +func fetchUDVValue(vschema plancontext.VSchema, udv string) (string, error) { + bv := vschema.GetUDV(udv) + if bv == nil { + return "", vterrors.VT03024(udv) + } + val, err := sqltypes.BindVariableToValue(bv) + if err != nil { + return "", err + } + return val.ToString(), nil +} + +func buildExecuteStmtPlan(ctx context.Context, vschema plancontext.VSchema, eStmt *sqlparser.ExecuteStmt) (*planResult, error) { + stmtName := eStmt.Name.Lowered() + prepareData := vschema.GetPrepareData(stmtName) + if prepareData == nil { + return nil, vterrors.VT09011(stmtName, "EXECUTE") + } + if int(prepareData.ParamsCount) != len(eStmt.Arguments) { + return nil, vterrors.VT03025("EXECUTE") + } + + plan, _, err := vschema.PlanPrepareStatement(ctx, prepareData.PrepareStatement) + if err != nil { + return nil, err + } + + return &planResult{ + primitive: &engine.ExecStmt{ + Params: eStmt.Arguments, + Input: plan.Instructions, + }, + tables: plan.TablesUsed, + }, nil + +} + +func dropPreparedStatement( + vschema plancontext.VSchema, + stmt *sqlparser.DeallocateStmt, +) (*planResult, error) { + stmtName := stmt.Name.Lowered() + prepareData := vschema.GetPrepareData(stmtName) + if prepareData == nil { + return nil, vterrors.VT09011(stmtName, "DEALLOCATE PREPARE") + } + + vschema.ClearPrepareData(stmtName) + return &planResult{ + primitive: engine.NewRowsPrimitive(nil, nil), + }, nil +} diff --git a/go/vt/vtgate/planbuilder/primitive_builder.go b/go/vt/vtgate/planbuilder/primitive_builder.go deleted file mode 100644 index b7c557518e5..00000000000 --- a/go/vt/vtgate/planbuilder/primitive_builder.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -// primitiveBuilder is the top level type for building plans. -// It contains the current logicalPlan tree, the symtab and -// the jointab. It can create transient planBuilders due -// to the recursive nature of SQL. -type primitiveBuilder struct { - vschema plancontext.VSchema - jt *jointab - plan logicalPlan - st *symtab - stmt sqlparser.Statement -} - -func newStmtAwarePrimitiveBuilder(vschema plancontext.VSchema, jt *jointab, stmt sqlparser.Statement) *primitiveBuilder { - return &primitiveBuilder{ - vschema: vschema, - jt: jt, - stmt: stmt, - } -} - -func newPrimitiveBuilder(vschema plancontext.VSchema, jt *jointab) *primitiveBuilder { - return &primitiveBuilder{ - vschema: vschema, - jt: jt, - } -} diff --git a/go/vt/vtgate/planbuilder/primitive_wrapper.go b/go/vt/vtgate/planbuilder/primitive_wrapper.go index b4ed7c8aa39..cb6a65aba04 100644 --- a/go/vt/vtgate/planbuilder/primitive_wrapper.go +++ b/go/vt/vtgate/planbuilder/primitive_wrapper.go @@ -27,10 +27,9 @@ import ( // primitiveWrapper is used when only need a logical plan that supports plan.Primitive() and nothing else type primitiveWrapper struct { prim engine.Primitive - gen4Plan } -func (p *primitiveWrapper) WireupGen4(*plancontext.PlanningContext) error { +func (p *primitiveWrapper) Wireup(*plancontext.PlanningContext) error { return nil } diff --git a/go/vt/vtgate/planbuilder/project.go b/go/vt/vtgate/planbuilder/project.go deleted file mode 100644 index 6dfea3fcec2..00000000000 --- a/go/vt/vtgate/planbuilder/project.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "strings" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" -) - -// planProjection pushes the select expression to the specified -// originator. If successful, the originator must create -// a resultColumn entry and return it. The top level caller -// must accumulate these result columns and set the symtab -// after analysis. -func planProjection(pb *primitiveBuilder, in logicalPlan, expr *sqlparser.AliasedExpr, origin logicalPlan) (logicalPlan, *resultColumn, int, error) { - switch node := in.(type) { - case *join: - var rc *resultColumn - if node.isOnLeft(origin.Order()) { - newLeft, col, colNumber, err := planProjection(pb, node.Left, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.ejoin.Cols = append(node.ejoin.Cols, -colNumber-1) - rc = col - node.Left = newLeft - } else { - // Pushing of non-trivial expressions not allowed for RHS of left joins. - if _, ok := expr.Expr.(*sqlparser.ColName); !ok && node.ejoin.Opcode == engine.LeftJoin { - return nil, nil, 0, vterrors.VT12001("cross-shard LEFT JOIN and column expressions") - } - - newRight, col, colNumber, err := planProjection(pb, node.Right, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.ejoin.Cols = append(node.ejoin.Cols, colNumber+1) - rc = col - node.Right = newRight - } - node.resultColumns = append(node.resultColumns, rc) - return in, rc, len(node.resultColumns) - 1, nil - - // orderedAggregate can accept expressions that are normal (a+b), or aggregate (MAX(v)). - // Normal expressions are pushed through to the underlying route. But aggregate - // expressions require post-processing. In such cases, oa shares the work with - // the underlying route: It asks the scatter route to perform the MAX operation - // also, and only performs the final aggregation with what the route returns. - // Since the results are expected to be ordered, this is something that can - // be performed 'as they come'. In this respect, oa is the originator for - // aggregate expressions like MAX, which will be added to symtab. The underlying - // MAX sent to the route will not be added to symtab and will not be reachable by - // others. This functionality depends on the PushOrderBy to request that - // the rows be correctly ordered. - case *orderedAggregate: - if aggrFunc, isAggregate := expr.Expr.(sqlparser.AggrFunc); isAggregate { - if _, ok := engine.SupportedAggregates[strings.ToLower(aggrFunc.AggrName())]; ok { - rc, colNumber, err := node.pushAggr(pb, expr, origin) - if err != nil { - return nil, nil, 0, err - } - return node, rc, colNumber, nil - } - } - - // Ensure that there are no aggregates in the expression. - if sqlparser.ContainsAggregation(expr.Expr) { - return nil, nil, 0, vterrors.VT12001("in scatter query: complex aggregate expression") - } - - newInput, innerRC, _, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - node.input = newInput - node.resultColumns = append(node.resultColumns, innerRC) - return node, innerRC, len(node.resultColumns) - 1, nil - case *route: - sel := node.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, expr) - - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - - return node, rc, len(node.resultColumns) - 1, nil - case *mergeSort: - projectedInput, rc, idx, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *distinct: - projectedInput, rc, idx, err := planProjection(pb, node.input, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *pulloutSubquery: - projectedInput, rc, idx, err := planProjection(pb, node.underlying, expr, origin) - if err != nil { - return nil, nil, 0, err - } - err = node.Rewrite(projectedInput, node.subquery) - if err != nil { - return nil, nil, 0, err - } - return node, rc, idx, nil - case *simpleProjection: - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, nil, 0, vterrors.VT12001("expression on results of a cross-shard subquery") - } - - // colNumber should already be set for subquery columns. - inner := col.Metadata.(*column).colNumber - node.eSimpleProj.Cols = append(node.eSimpleProj.Cols, inner) - - // Build a new column reference to represent the result column. - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - - return node, rc, len(node.resultColumns) - 1, nil - case *vindexFunc: - // Catch the case where no where clause was specified. If so, the opcode - // won't be set. - if node.eVindexFunc.Opcode == engine.VindexNone { - return nil, nil, 0, vterrors.VT12001(operators.VindexUnsupported + " (where clause missing)") - } - col, ok := expr.Expr.(*sqlparser.ColName) - if !ok { - return nil, nil, 0, vterrors.VT12001("expression on results of a vindex function") - } - rc := newResultColumn(expr, node) - node.resultColumns = append(node.resultColumns, rc) - node.eVindexFunc.Fields = append(node.eVindexFunc.Fields, &querypb.Field{ - Name: rc.alias.String(), - Type: querypb.Type_VARBINARY, - }) - node.eVindexFunc.Cols = append(node.eVindexFunc.Cols, col.Metadata.(*column).colNumber) - return node, rc, len(node.resultColumns) - 1, nil - - } - return nil, nil, 0, vterrors.VT13001(fmt.Sprintf("unreachable %T.projection", in)) -} diff --git a/go/vt/vtgate/planbuilder/projection.go b/go/vt/vtgate/planbuilder/projection.go index 6c942037490..2f9cb7983cc 100644 --- a/go/vt/vtgate/planbuilder/projection.go +++ b/go/vt/vtgate/planbuilder/projection.go @@ -28,7 +28,6 @@ import ( ) type projection struct { - gen4Plan source logicalPlan columnNames []string columns []sqlparser.Expr @@ -41,10 +40,21 @@ type projection struct { var _ logicalPlan = (*projection)(nil) // WireupGen4 implements the logicalPlan interface -func (p *projection) WireupGen4(ctx *plancontext.PlanningContext) error { +func (p *projection) Wireup(ctx *plancontext.PlanningContext) error { + if p.primitive != nil { + // if primitive is not nil, it means that the horizon planning in the operator phase already + // created all the needed evalengine expressions. + // we don't need to do anything here, let's just shortcut out of this call + return p.source.Wireup(ctx) + } + columns := make([]evalengine.Expr, 0, len(p.columns)) for _, expr := range p.columns { - convert, err := evalengine.Translate(expr, ctx.SemTable) + convert, err := evalengine.Translate(expr, &evalengine.Config{ + ResolveColumn: resolveFromPlan(ctx, p.source, false), + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + }) if err != nil { return err } @@ -55,7 +65,7 @@ func (p *projection) WireupGen4(ctx *plancontext.PlanningContext) error { Exprs: columns, } - return p.source.WireupGen4(ctx) + return p.source.Wireup(ctx) } // Inputs implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/projection_pushing.go b/go/vt/vtgate/planbuilder/projection_pushing.go index e770ef1c9bd..e335c1c9ab5 100644 --- a/go/vt/vtgate/planbuilder/projection_pushing.go +++ b/go/vt/vtgate/planbuilder/projection_pushing.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" @@ -39,11 +40,11 @@ func pushProjection( // All of these either push to the single source, or push to the LHS src := node.Inputs()[0] return pushProjection(ctx, expr, src, inner, reuseCol, hasAggregation) - case *routeGen4: + case *route: return addExpressionToRoute(ctx, node, expr, reuseCol) case *hashJoin: return pushProjectionIntoHashJoin(ctx, expr, node, reuseCol, inner, hasAggregation) - case *joinGen4: + case *join: return pushProjectionIntoJoin(ctx, expr, node, reuseCol, inner, hasAggregation) case *simpleProjection: return pushProjectionIntoSimpleProj(ctx, expr, node, inner, hasAggregation, reuseCol) @@ -53,7 +54,7 @@ func pushProjection( return pushProjectionIntoVindexFunc(node, expr, reuseCol) case *semiJoin: return pushProjectionIntoSemiJoin(ctx, expr, reuseCol, node, inner, hasAggregation) - case *concatenateGen4: + case *concatenate: return pushProjectionIntoConcatenate(ctx, expr, hasAggregation, node, inner, reuseCol) default: return 0, false, vterrors.VT13001(fmt.Sprintf("push projection does not yet support: %T", node)) @@ -69,7 +70,7 @@ func pushProjectionIntoVindexFunc(node *vindexFunc, expr *sqlparser.AliasedExpr, return i /* col added */, len(node.eVindexFunc.Cols) > colsBefore, nil } -func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenateGen4, inner bool, reuseCol bool) (int, bool, error) { +func pushProjectionIntoConcatenate(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, hasAggregation bool, node *concatenate, inner bool, reuseCol bool) (int, bool, error) { if hasAggregation { return 0, false, vterrors.VT12001("aggregation on UNIONs") } @@ -137,13 +138,10 @@ func pushProjectionIntoOA(ctx *plancontext.PlanningContext, expr *sqlparser.Alia if err != nil { return 0, false, err } - node.aggregates = append(node.aggregates, &engine.AggregateParams{ - Opcode: engine.AggregateRandom, - Col: offset, - Alias: expr.ColumnName(), - Expr: expr.Expr, - Original: expr, - }) + aggr := engine.NewAggregateParam(popcode.AggregateAnyValue, offset, expr.ColumnName()) + aggr.Expr = expr.Expr + aggr.Original = expr + node.aggregates = append(node.aggregates, aggr) return offset, true, nil } @@ -171,7 +169,7 @@ func pushProjectionIntoSimpleProj( func pushProjectionIntoJoin( ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, - node *joinGen4, + node *join, reuseCol, inner, hasAggregation bool, ) (int, bool, error) { lhsSolves := node.Left.ContainsTables() @@ -206,22 +204,22 @@ func pushProjectionIntoJoin( return 0, false, vterrors.VT12001("cross-shard query with aggregates") } // now we break the expression into left and right side dependencies and rewrite the left ones to bind variables - bvName, cols, rewrittenExpr, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves) + joinCol, err := operators.BreakExpressionInLHSandRHS(ctx, expr.Expr, lhsSolves) if err != nil { return 0, false, err } // go over all the columns coming from the left side of the tree and push them down. While at it, also update the bind variable map. // It is okay to reuse the columns on the left side since // the final expression which will be selected will be pushed into the right side. - for i, col := range cols { + for i, col := range joinCol.LHSExprs { colOffset, _, err := pushProjection(ctx, &sqlparser.AliasedExpr{Expr: col}, node.Left, inner, true, false) if err != nil { return 0, false, err } - node.Vars[bvName[i]] = colOffset + node.Vars[joinCol.BvNames[i]] = colOffset } // push the rewritten expression on the right side of the tree. Here we should take care whether we want to reuse the expression or not. - expr.Expr = rewrittenExpr + expr.Expr = joinCol.RHSExpr offset, added, err := pushProjection(ctx, expr, node.Right, inner && node.Opcode != engine.LeftJoin, passDownReuseCol, false) if err != nil { return 0, false, err @@ -294,7 +292,7 @@ func pushProjectionIntoHashJoin( return len(node.Cols) - 1, true, nil } -func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *routeGen4, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) { +func addExpressionToRoute(ctx *plancontext.PlanningContext, rb *route, expr *sqlparser.AliasedExpr, reuseCol bool) (int, bool, error) { if reuseCol { if i := checkIfAlreadyExists(expr, rb.Select, ctx.SemTable); i != -1 { return i, false, nil diff --git a/go/vt/vtgate/planbuilder/pullout_subquery.go b/go/vt/vtgate/planbuilder/pullout_subquery.go index a70fb5efdc4..06c24cad1f5 100644 --- a/go/vt/vtgate/planbuilder/pullout_subquery.go +++ b/go/vt/vtgate/planbuilder/pullout_subquery.go @@ -20,6 +20,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -30,14 +31,13 @@ var _ logicalPlan = (*pulloutSubquery)(nil) // This gets built if a subquery is not correlated and can // therefore can be pulled out and executed upfront. type pulloutSubquery struct { - order int subquery logicalPlan underlying logicalPlan eSubquery *engine.PulloutSubquery } // newPulloutSubquery builds a new pulloutSubquery. -func newPulloutSubquery(opcode engine.PulloutOpcode, sqName, hasValues string, subquery logicalPlan) *pulloutSubquery { +func newPulloutSubquery(opcode popcode.PulloutOpcode, sqName, hasValues string, subquery logicalPlan) *pulloutSubquery { return &pulloutSubquery{ subquery: subquery, eSubquery: &engine.PulloutSubquery{ @@ -48,25 +48,6 @@ func newPulloutSubquery(opcode engine.PulloutOpcode, sqName, hasValues string, s } } -// setUnderlying sets the underlying primitive. -func (ps *pulloutSubquery) setUnderlying(underlying logicalPlan) { - ps.underlying = underlying - ps.underlying.Reorder(ps.subquery.Order()) - ps.order = ps.underlying.Order() + 1 -} - -// Order implements the logicalPlan interface -func (ps *pulloutSubquery) Order() int { - return ps.order -} - -// Reorder implements the logicalPlan interface -func (ps *pulloutSubquery) Reorder(order int) { - ps.subquery.Reorder(order) - ps.underlying.Reorder(ps.subquery.Order()) - ps.order = ps.underlying.Order() + 1 -} - // Primitive implements the logicalPlan interface func (ps *pulloutSubquery) Primitive() engine.Primitive { ps.eSubquery.Subquery = ps.subquery.Primitive() @@ -74,44 +55,12 @@ func (ps *pulloutSubquery) Primitive() engine.Primitive { return ps.eSubquery } -// ResultColumns implements the logicalPlan interface -func (ps *pulloutSubquery) ResultColumns() []*resultColumn { - return ps.underlying.ResultColumns() -} - -// Wireup implements the logicalPlan interface -func (ps *pulloutSubquery) Wireup(plan logicalPlan, jt *jointab) error { - if err := ps.underlying.Wireup(plan, jt); err != nil { +// WireupGen4 implements the logicalPlan interface +func (ps *pulloutSubquery) Wireup(ctx *plancontext.PlanningContext) error { + if err := ps.underlying.Wireup(ctx); err != nil { return err } - return ps.subquery.Wireup(plan, jt) -} - -// Wireup2 implements the logicalPlan interface -func (ps *pulloutSubquery) WireupGen4(ctx *plancontext.PlanningContext) error { - if err := ps.underlying.WireupGen4(ctx); err != nil { - return err - } - return ps.subquery.WireupGen4(ctx) -} - -// SupplyVar implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - if from <= ps.subquery.Order() { - ps.subquery.SupplyVar(from, to, col, varname) - return - } - ps.underlying.SupplyVar(from, to, col, varname) -} - -// SupplyCol implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - return ps.underlying.SupplyCol(col) -} - -// SupplyWeightString implements the logicalPlan interface -func (ps *pulloutSubquery) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return ps.underlying.SupplyWeightString(colNumber, alsoAddToGroupBy) + return ps.subquery.Wireup(ctx) } // Rewrite implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/rewrite.go b/go/vt/vtgate/planbuilder/rewrite.go index 93884ecf863..4a95696c0f0 100644 --- a/go/vt/vtgate/planbuilder/rewrite.go +++ b/go/vt/vtgate/planbuilder/rewrite.go @@ -19,7 +19,7 @@ package planbuilder import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -131,7 +131,7 @@ func rewriteSubquery(cursor *sqlparser.Cursor, r *rewriter, node *sqlparser.Subq if err != nil { return err } - if semTableSQ.GetArgName() != "" || engine.PulloutOpcode(semTableSQ.OpCode) != engine.PulloutValue { + if semTableSQ.GetArgName() != "" || popcode.PulloutOpcode(semTableSQ.OpCode) != popcode.PulloutValue { return nil } r.inSubquery++ diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index a7429417cc6..6a668b2c5c1 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,16 +17,12 @@ limitations under the License. package planbuilder import ( - "fmt" - "strconv" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/semantics" - + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -38,94 +34,28 @@ var _ logicalPlan = (*route)(nil) // are moved into this node, which will be used to build // the final SQL for this route. type route struct { - v3Plan - order int - - // Redirect may point to another route if this route - // was merged with it. The Resolve function chases - // this pointer till the last un-redirected route. - Redirect *route // Select is the AST for the query fragment that will be // executed by this route. Select sqlparser.SelectStatement - // resultColumns represent the columns returned by this route. - resultColumns []*resultColumn - - // weight_string keeps track of the weight_string expressions - // that were added additionally for each column. These expressions - // are added to be used for collation of text columns. - weightStrings map[*resultColumn]int - - // substitutions contain the list of table expressions that - // have to be substituted in the route's query. - substitutions []*tableSubstitution - // condition stores the AST condition that will be used // to resolve the ERoute Values field. condition sqlparser.Expr // eroute is the primitive being built. eroute *engine.Route -} - -type tableSubstitution struct { - newExpr, oldExpr *sqlparser.AliasedTableExpr -} - -func newRoute(stmt sqlparser.SelectStatement) (*route, *symtab) { - rb := &route{ - Select: stmt, - order: 1, - weightStrings: make(map[*resultColumn]int), - } - return rb, newSymtabWithRoute(rb) -} - -// Resolve resolves redirects, and returns the last -// un-redirected route. -func (rb *route) Resolve() *route { - for rb.Redirect != nil { - rb = rb.Redirect - } - return rb -} -// Order implements the logicalPlan interface -func (rb *route) Order() int { - return rb.order -} + // is the engine primitive we will return from the Primitive() method. Note that it could be different than eroute + enginePrimitive engine.Primitive -// Reorder implements the logicalPlan interface -func (rb *route) Reorder(order int) { - rb.order = order + 1 + // tables keeps track of which tables this route is covering + tables semantics.TableSet } // Primitive implements the logicalPlan interface func (rb *route) Primitive() engine.Primitive { - return rb.eroute -} - -// ResultColumns implements the logicalPlan interface -func (rb *route) ResultColumns() []*resultColumn { - return rb.resultColumns -} - -// PushAnonymous pushes an anonymous expression like '*' or NEXT VALUES -// into the select expression list of the route. This function is -// similar to PushSelect. -func (rb *route) PushAnonymous(expr sqlparser.SelectExpr) *resultColumn { - // TODO: we should not assume that the query is a SELECT - sel := rb.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, expr) - - // We just create a place-holder resultColumn. It won't - // match anything. - rc := &resultColumn{column: &column{origin: rb}} - rb.resultColumns = append(rb.resultColumns, rc) - - return rc + return rb.enginePrimitive } // SetLimit adds a LIMIT clause to the route. @@ -133,82 +63,63 @@ func (rb *route) SetLimit(limit *sqlparser.Limit) { rb.Select.SetLimit(limit) } -// Wireup implements the logicalPlan interface -func (rb *route) Wireup(plan logicalPlan, jt *jointab) error { - // Precaution: update ERoute.Values only if it's not set already. - if rb.eroute.Values == nil { - // Resolve values stored in the logical plan. - switch vals := rb.condition.(type) { - case *sqlparser.ComparisonExpr: - pv, err := rb.procureValues(plan, jt, vals.Right) - if err != nil { - return err - } - rb.eroute.Values = []evalengine.Expr{pv} - vals.Right = sqlparser.ListArg(engine.ListVarName) - case nil: - // no-op. - default: - pv, err := rb.procureValues(plan, jt, vals) - if err != nil { - return err - } - rb.eroute.Values = []evalengine.Expr{pv} - } +// WireupGen4 implements the logicalPlan interface +func (rb *route) Wireup(ctx *plancontext.PlanningContext) error { + rb.prepareTheAST() + + // prepare the queries we will pass down + rb.eroute.Query = sqlparser.String(rb.Select) + buffer := sqlparser.NewTrackedBuffer(sqlparser.FormatImpossibleQuery) + node := buffer.WriteNode(rb.Select) + parsedQuery := node.ParsedQuery() + rb.eroute.FieldQuery = parsedQuery.Query + + // if we have a planable vindex lookup, let's extract it into its own primitive + planableVindex, ok := rb.eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) + if !ok { + rb.enginePrimitive = rb.eroute + return nil } - // Fix up the AST. - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - switch node := node.(type) { - case *sqlparser.Select: - if len(node.SelectExprs) == 0 { - node.SelectExprs = []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral("1"), - }, - } - } - case *sqlparser.ComparisonExpr: - if node.Operator == sqlparser.EqualOp { - if rb.exprIsValue(node.Left) && !rb.exprIsValue(node.Right) { - node.Left, node.Right = node.Right, node.Left - } - } - } - return true, nil - }, rb.Select) + query, args := planableVindex.Query() + stmt, reserved, err := sqlparser.Parse2(query) + if err != nil { + return err + } + reservedVars := sqlparser.NewReservedVars("vtg", reserved) - // Substitute table names - for _, sub := range rb.substitutions { - *sub.oldExpr = *sub.newExpr + lookupPrimitive, err := gen4SelectStmtPlanner(query, querypb.ExecuteOptions_Gen4, stmt.(sqlparser.SelectStatement), reservedVars, ctx.VSchema) + if err != nil { + return vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) } - // Generate query while simultaneously resolving values. - varFormatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - joinVar := jt.Procure(plan, node, rb.Order()) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - node.Format(buf) + rb.enginePrimitive = &engine.VindexLookup{ + Opcode: rb.eroute.Opcode, + Vindex: planableVindex, + Keyspace: rb.eroute.Keyspace, + Values: rb.eroute.Values, + SendTo: rb.eroute, + Arguments: args, + Lookup: lookupPrimitive.primitive, } - buf := sqlparser.NewTrackedBuffer(varFormatter) - varFormatter(buf, rb.Select) - rb.eroute.Query = buf.ParsedQuery().Query - rb.eroute.FieldQuery = rb.generateFieldQuery(rb.Select, jt) + + rb.eroute.RoutingParameters.Opcode = engine.ByDestination + rb.eroute.RoutingParameters.Values = nil + rb.eroute.RoutingParameters.Vindex = nil + return nil } +// ContainsTables implements the logicalPlan interface +func (rb *route) ContainsTables() semantics.TableSet { + return rb.tables +} + +// OutputColumns implements the logicalPlan interface +func (rb *route) OutputColumns() []sqlparser.SelectExpr { + return sqlparser.GetFirstSelect(rb.Select).SelectExprs +} + // prepareTheAST does minor fixups of the SELECT struct before producing the query string func (rb *route) prepareTheAST() { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { @@ -236,130 +147,6 @@ func (rb *route) prepareTheAST() { }, rb.Select) } -// procureValues procures and converts the input into -// the expected types for rb.Values. -func (rb *route) procureValues(plan logicalPlan, jt *jointab, val sqlparser.Expr) (evalengine.Expr, error) { - switch typedVal := val.(type) { - case sqlparser.ValTuple: - exprs := make([]evalengine.Expr, 0, len(typedVal)) - for _, item := range typedVal { - v, err := rb.procureValues(plan, jt, item) - if err != nil { - return nil, err - } - exprs = append(exprs, v) - } - return evalengine.NewTupleExpr(exprs...), nil - case *sqlparser.ColName: - joinVar := jt.Procure(plan, typedVal, rb.Order()) - return evalengine.NewBindVar(joinVar, collations.TypedCollation{}), nil - default: - return evalengine.Translate(typedVal, semantics.EmptySemTable()) - } -} - -func (rb *route) isLocal(col *sqlparser.ColName) bool { - return col.Metadata.(*column).Origin() == rb -} - -// generateFieldQuery generates a query with an impossible where. -// This will be used on the RHS node to fetch field info if the LHS -// returns no result. -func (rb *route) generateFieldQuery(sel sqlparser.SelectStatement, jt *jointab) string { - formatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - _, joinVar := jt.Lookup(node) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - sqlparser.FormatImpossibleQuery(buf, node) - } - - buffer := sqlparser.NewTrackedBuffer(formatter) - node := buffer.WriteNode(sel) - query := node.ParsedQuery() - return query.Query -} - -// SupplyVar implements the logicalPlan interface -func (rb *route) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - // route is an atomic primitive. So, SupplyVar cannot be - // called on it. - panic("BUG: route is an atomic node.") -} - -// SupplyCol implements the logicalPlan interface -func (rb *route) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range rb.resultColumns { - if rc.column == c { - return rc, i - } - } - - // A new result has to be returned. - rc = &resultColumn{column: c} - rb.resultColumns = append(rb.resultColumns, rc) - // TODO: we should not assume that the query is a SELECT query - sel := rb.Select.(*sqlparser.Select) - sel.SelectExprs = append(sel.SelectExprs, &sqlparser.AliasedExpr{Expr: col}) - return rc, len(rb.resultColumns) - 1 -} - -// SupplyWeightString implements the logicalPlan interface -func (rb *route) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - rc := rb.resultColumns[colNumber] - s, ok := rb.Select.(*sqlparser.Select) - if !ok { - return 0, vterrors.VT13001("unexpected AST struct for query") - } - - aliasExpr, ok := s.SelectExprs[colNumber].(*sqlparser.AliasedExpr) - if !ok { - return 0, vterrors.VT13001(fmt.Sprintf("unexpected AST struct for query %T", s.SelectExprs[colNumber])) - } - weightStringExpr := &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("weight_string"), - Exprs: []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: aliasExpr.Expr, - }, - }, - } - expr := &sqlparser.AliasedExpr{ - Expr: weightStringExpr, - } - if alsoAddToGroupBy { - sel, isSelect := rb.Select.(*sqlparser.Select) - if !isSelect { - return 0, vterrors.VT13001(fmt.Sprintf("cannot add weight string in %T", rb.Select)) - } - sel.AddGroupBy(weightStringExpr) - } - - if weightcolNumber, ok := rb.weightStrings[rc]; ok { - return weightcolNumber, nil - } - // It's ok to pass nil for pb and logicalPlan because PushSelect doesn't use them. - // TODO: we are ignoring a potential error here. need to clean this up - _, _, weightcolNumber, err = planProjection(nil, rb, expr, nil) - if err != nil { - return 0, err - } - rb.weightStrings[rc] = weightcolNumber - return weightcolNumber, nil -} - // Rewrite implements the logicalPlan interface func (rb *route) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 0 { @@ -373,487 +160,6 @@ func (rb *route) Inputs() []logicalPlan { return []logicalPlan{} } -// MergeSubquery returns true if the subquery route could successfully be merged -// with the outer route. -func (rb *route) MergeSubquery(pb *primitiveBuilder, inner *route) bool { - if rb.SubqueryCanMerge(pb, inner) { - if inner.eroute.Opcode == engine.DBA && (len(inner.eroute.SysTableTableName) > 0 || len(inner.eroute.SysTableTableSchema) > 0) { - switch rb.eroute.Opcode { - case engine.DBA, engine.Reference: - rb.eroute.SysTableTableSchema = append(rb.eroute.SysTableTableSchema, inner.eroute.SysTableTableSchema...) - for k, v := range inner.eroute.SysTableTableName { - if rb.eroute.SysTableTableName == nil { - rb.eroute.SysTableTableName = map[string]evalengine.Expr{} - } - rb.eroute.SysTableTableName[k] = v - } - rb.eroute.Opcode = engine.DBA - default: - return false - } - } else { - if rb.eroute.Opcode == engine.Reference { - rb.eroute.RoutingParameters = inner.eroute.RoutingParameters - rb.condition = inner.condition - } - } - - rb.substitutions = append(rb.substitutions, inner.substitutions...) - inner.Redirect = rb - return true - } - return false -} - -// MergeUnion returns true if the rhs route could successfully be merged -// with the rb route. -func (rb *route) MergeUnion(right *route, isDistinct bool) bool { - if rb.unionCanMerge(right, isDistinct) { - rb.substitutions = append(rb.substitutions, right.substitutions...) - right.Redirect = rb - return true - } - return false -} - func (rb *route) isSingleShard() bool { return rb.eroute.Opcode.IsSingleShard() } - -// JoinCanMerge, SubqueryCanMerge and unionCanMerge have subtly different behaviors. -// The difference in behavior is around SelectReference. -// It's not worth trying to reuse the code between them. -func (rb *route) JoinCanMerge(pb *primitiveBuilder, rrb *route, ajoin *sqlparser.JoinTableExpr, where sqlparser.Expr) bool { - if rb.eroute.Keyspace.Name != rrb.eroute.Keyspace.Name { - return false - } - if rrb.eroute.Opcode == engine.Reference { - // Any opcode can join with a reference table. - return true - } - switch rb.eroute.Opcode { - case engine.Unsharded: - return rb.eroute.Opcode == rrb.eroute.Opcode - case engine.EqualUnique: - // Check if they target the same shard. - if rrb.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == rrb.eroute.Vindex && valEqual(rb.condition, rrb.condition) { - return true - } - case engine.Reference: - return true - case engine.Next: - return false - case engine.DBA: - if rrb.eroute.Opcode != engine.DBA { - return false - } - if where == nil { - return true - } - return ajoin != nil - } - if ajoin == nil { - return false - } - for _, filter := range sqlparser.SplitAndExpression(nil, ajoin.Condition.On) { - if rb.canMergeOnFilter(pb, rrb, filter) { - return true - } - } - return false -} - -func (rb *route) SubqueryCanMerge(pb *primitiveBuilder, inner *route) bool { - if rb.eroute.Keyspace.Name != inner.eroute.Keyspace.Name { - return false - } - - // if either side is a reference table, and we know the other side will only run once, - // we can just merge them and use the opcode of the other side - if rb.eroute.Opcode == engine.Reference || inner.eroute.Opcode == engine.Reference { - return rb.isSingleShard() && inner.isSingleShard() - } - - switch rb.eroute.Opcode { - case engine.Unsharded, engine.DBA: - return rb.eroute.Opcode == inner.eroute.Opcode - case engine.EqualUnique: - // Check if they target the same shard. - if inner.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == inner.eroute.Vindex && valEqual(rb.condition, inner.condition) { - return true - } - case engine.Next: - return false - } - - switch vals := inner.condition.(type) { - case *sqlparser.ColName: - if pb.st.Vindex(vals, rb) == inner.eroute.Vindex { - return true - } - } - return false -} - -func (rb *route) unionCanMerge(other *route, distinct bool) bool { - if rb.eroute.Keyspace.Name != other.eroute.Keyspace.Name { - return false - } - switch rb.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return rb.eroute.Opcode == other.eroute.Opcode - case engine.DBA: - return other.eroute.Opcode == engine.DBA && - len(rb.eroute.SysTableTableSchema) == 0 && - len(rb.eroute.SysTableTableName) == 0 && - len(other.eroute.SysTableTableSchema) == 0 && - len(other.eroute.SysTableTableName) == 0 - case engine.EqualUnique: - // Check if they target the same shard. - if other.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == other.eroute.Vindex && valEqual(rb.condition, other.condition) { - return true - } - case engine.Scatter: - return other.eroute.Opcode == engine.Scatter && !distinct - case engine.Next: - return false - } - return false -} - -// canMergeOnFilter returns true if the join constraint makes the routes -// mergeable by unique vindex. The constraint has to be an equality -// like a.id = b.id where both columns have the same unique vindex. -func (rb *route) canMergeOnFilter(pb *primitiveBuilder, rrb *route, filter sqlparser.Expr) bool { - comparison, ok := filter.(*sqlparser.ComparisonExpr) - if !ok { - return false - } - if comparison.Operator != sqlparser.EqualOp { - return false - } - left := comparison.Left - right := comparison.Right - lVindex := pb.st.Vindex(left, rb) - if lVindex == nil { - left, right = right, left - lVindex = pb.st.Vindex(left, rb) - } - if lVindex == nil || !lVindex.IsUnique() { - return false - } - rVindex := pb.st.Vindex(right, rrb) - if rVindex == nil { - return false - } - return rVindex == lVindex -} - -// UpdatePlan evaluates the primitive against the specified -// filter. If it's an improvement, the primitive is updated. -// We assume that the filter has already been pushed into -// the route. -func (rb *route) UpdatePlan(pb *primitiveBuilder, filter sqlparser.Expr) { - switch rb.eroute.Opcode { - // For these opcodes, a new filter will not make any difference, so we can just exit early - case engine.Unsharded, engine.Next, engine.DBA, engine.Reference, engine.None: - return - } - opcode, vindex, values := rb.computePlan(pb, filter) - if opcode == engine.Scatter { - return - } - // If we get SelectNone in next filters, override the previous route plan. - if opcode == engine.None { - rb.updateRoute(opcode, vindex, values) - return - } - switch rb.eroute.Opcode { - case engine.EqualUnique: - if opcode == engine.EqualUnique && vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - case engine.Equal: - switch opcode { - case engine.EqualUnique: - rb.updateRoute(opcode, vindex, values) - case engine.Equal: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.IN: - switch opcode { - case engine.EqualUnique, engine.Equal: - rb.updateRoute(opcode, vindex, values) - case engine.IN: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.MultiEqual: - switch opcode { - case engine.EqualUnique, engine.Equal, engine.IN: - rb.updateRoute(opcode, vindex, values) - case engine.MultiEqual: - if vindex.Cost() < rb.eroute.Vindex.Cost() { - rb.updateRoute(opcode, vindex, values) - } - } - case engine.Scatter: - switch opcode { - case engine.EqualUnique, engine.Equal, engine.IN, engine.MultiEqual, engine.None: - rb.updateRoute(opcode, vindex, values) - } - } -} - -func (rb *route) updateRoute(opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - rb.eroute.Opcode = opcode - rb.eroute.Vindex = vindex - rb.condition = condition -} - -// computePlan computes the plan for the specified filter. -func (rb *route) computePlan(pb *primitiveBuilder, filter sqlparser.Expr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - switch node := filter.(type) { - case *sqlparser.ComparisonExpr: - switch node.Operator { - case sqlparser.EqualOp: - return rb.computeEqualPlan(pb, node) - case sqlparser.InOp: - return rb.computeINPlan(pb, node) - case sqlparser.NotInOp: - return rb.computeNotInPlan(node.Right), nil, nil - case sqlparser.LikeOp: - return rb.computeLikePlan(pb, node) - } - case *sqlparser.IsExpr: - return rb.computeISPlan(pb, node) - } - return engine.Scatter, nil, nil -} - -// computeLikePlan computes the plan for 'LIKE' constraint -func (rb *route) computeLikePlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - - left := comparison.Left - right := comparison.Right - - if sqlparser.IsNull(right) { - return engine.None, nil, nil - } - if !rb.exprIsValue(right) { - return engine.Scatter, nil, nil - } - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - // if there is no vindex defined, scatter - return engine.Scatter, nil, nil - } - if subsharding, ok := vindex.(vindexes.Prefixable); ok { - return engine.Equal, subsharding.PrefixVindex(), right - } - - return engine.Scatter, nil, nil -} - -// computeEqualPlan computes the plan for an equality constraint. -func (rb *route) computeEqualPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - left := comparison.Left - right := comparison.Right - - if sqlparser.IsNull(right) { - return engine.None, nil, nil - } - - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - left, right = right, left - vindex = pb.st.Vindex(left, rb) - if vindex == nil { - return engine.Scatter, nil, nil - } - } - if !rb.exprIsValue(right) { - return engine.Scatter, nil, nil - } - if vindex.IsUnique() { - return engine.EqualUnique, vindex, right - } - return engine.Equal, vindex, right -} - -// computeIS computes the plan for an equality constraint. -func (rb *route) computeISPlan(pb *primitiveBuilder, comparison *sqlparser.IsExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - // we only handle IS NULL correct. IsExpr can contain other expressions as well - if comparison.Right != sqlparser.IsNullOp { - return engine.Scatter, nil, nil - } - - vindex = pb.st.Vindex(comparison.Left, rb) - // fallback to scatter gather if there is no vindex - if vindex == nil { - return engine.Scatter, nil, nil - } - if _, isLookup := vindex.(vindexes.Lookup); isLookup { - // the lookup table is keyed by the lookup value, so it does not support nulls - return engine.Scatter, nil, nil - } - if vindex.IsUnique() { - return engine.EqualUnique, vindex, &sqlparser.NullVal{} - } - return engine.Equal, vindex, &sqlparser.NullVal{} -} - -// computeINPlan computes the plan for an IN constraint. -func (rb *route) computeINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - switch comparison.Left.(type) { - case *sqlparser.ColName: - return rb.computeSimpleINPlan(pb, comparison) - case sqlparser.ValTuple: - return rb.computeCompositeINPlan(pb, comparison) - } - return engine.Scatter, nil, nil -} - -// computeSimpleINPlan computes the plan for a simple IN constraint. -func (rb *route) computeSimpleINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, expr sqlparser.Expr) { - vindex = pb.st.Vindex(comparison.Left, rb) - if vindex == nil { - return engine.Scatter, nil, nil - } - switch node := comparison.Right.(type) { - case sqlparser.ValTuple: - if len(node) == 1 && sqlparser.IsNull(node[0]) { - return engine.None, nil, nil - } - - for _, n := range node { - if !rb.exprIsValue(n) { - return engine.Scatter, nil, nil - } - } - return engine.IN, vindex, comparison - case sqlparser.ListArg: - return engine.IN, vindex, comparison - } - return engine.Scatter, nil, nil -} - -// computeCompositeINPlan computes the plan for a composite IN constraint. -func (rb *route) computeCompositeINPlan(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - leftTuple := comparison.Left.(sqlparser.ValTuple) - return rb.iterateCompositeIN(pb, comparison, nil, leftTuple) -} - -// iterateCompositeIN recursively walks the LHS tuple of the IN clause looking -// for column names. For those that match a vindex, it builds a multi-value plan -// using the corresponding values in the RHS. It returns the best of the plans built. -func (rb *route) iterateCompositeIN( - pb *primitiveBuilder, - comparison *sqlparser.ComparisonExpr, - coordinates []int, - tuple sqlparser.ValTuple, -) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - opcode = engine.Scatter - - cindex := len(coordinates) - coordinates = append(coordinates, 0) - for idx, expr := range tuple { - coordinates[cindex] = idx - switch expr := expr.(type) { - case sqlparser.ValTuple: - newOpcode, newVindex, newValues := rb.iterateCompositeIN(pb, comparison, coordinates, expr) - opcode, vindex, values = bestOfComposite(opcode, newOpcode, vindex, newVindex, values, newValues) - case *sqlparser.ColName: - newVindex := pb.st.Vindex(expr, rb) - if newVindex != nil { - newOpcode, newValues := rb.compositePlanForCol(pb, comparison, coordinates) - opcode, vindex, values = bestOfComposite(opcode, newOpcode, vindex, newVindex, values, newValues) - } - } - } - return opcode, vindex, values -} - -// compositePlanForCol builds a plan for a matched column in the LHS -// of a composite IN clause. -func (rb *route) compositePlanForCol(pb *primitiveBuilder, comparison *sqlparser.ComparisonExpr, coordinates []int) (opcode engine.Opcode, values sqlparser.Expr) { - rightTuple, ok := comparison.Right.(sqlparser.ValTuple) - if !ok { - return engine.Scatter, nil - } - retVal := make(sqlparser.ValTuple, len(rightTuple)) - for i, rval := range rightTuple { - val := tupleAccess(rval, coordinates) - if val == nil { - return engine.Scatter, nil - } - if !rb.exprIsValue(val) { - return engine.Scatter, nil - } - retVal[i] = val - } - return engine.MultiEqual, retVal -} - -// tupleAccess returns the value of the expression that corresponds -// to the specified coordinates. -func tupleAccess(expr sqlparser.Expr, coordinates []int) sqlparser.Expr { - tuple, _ := expr.(sqlparser.ValTuple) - for _, idx := range coordinates { - if idx >= len(tuple) { - return nil - } - expr = tuple[idx] - tuple, _ = expr.(sqlparser.ValTuple) - } - return expr -} - -// bestOfComposite returns the best of two composite IN clause plans. -func bestOfComposite(opcode1, opcode2 engine.Opcode, vindex1, vindex2 vindexes.SingleColumn, values1, values2 sqlparser.Expr) (opcode engine.Opcode, vindex vindexes.SingleColumn, values sqlparser.Expr) { - if opcode1 == engine.Scatter { - return opcode2, vindex2, values2 - } - if opcode2 == engine.Scatter { - return opcode1, vindex1, values1 - } - if vindex1.Cost() < vindex2.Cost() { - return opcode1, vindex1, values1 - } - return opcode2, vindex2, values2 -} - -// computeNotInPlan looks for null values to produce a SelectNone if found -func (rb *route) computeNotInPlan(right sqlparser.Expr) engine.Opcode { - switch node := right.(type) { - case sqlparser.ValTuple: - for _, n := range node { - if sqlparser.IsNull(n) { - return engine.None - } - } - } - - return engine.Scatter -} - -// exprIsValue returns true if the expression can be treated as a value -// for the routeOption. External references are treated as value. -func (rb *route) exprIsValue(expr sqlparser.Expr) bool { - if node, ok := expr.(*sqlparser.ColName); ok { - return node.Metadata.(*column).Origin() != rb - } - return sqlparser.IsValue(expr) -} - -// queryTimeout returns DirectiveQueryTimeout value if set, otherwise returns 0. -func queryTimeout(d *sqlparser.CommentDirectives) int { - val, _ := d.GetString(sqlparser.DirectiveQueryTimeout, "0") - if intVal, err := strconv.Atoi(val); err == nil { - return intVal - } - return 0 -} diff --git a/go/vt/vtgate/planbuilder/routeGen4.go b/go/vt/vtgate/planbuilder/routeGen4.go deleted file mode 100644 index a5b6982319e..00000000000 --- a/go/vt/vtgate/planbuilder/routeGen4.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -var _ logicalPlan = (*routeGen4)(nil) - -// routeGen4 is used to build a Route primitive. -// It's used to build one of the Select routes like -// SelectScatter, etc. Portions of the original Select AST -// are moved into this node, which will be used to build -// the final SQL for this route. -type routeGen4 struct { - gen4Plan - - // Select is the AST for the query fragment that will be - // executed by this route. - Select sqlparser.SelectStatement - - // condition stores the AST condition that will be used - // to resolve the ERoute Values field. - condition sqlparser.Expr - - // eroute is the primitive being built. - eroute *engine.Route - - // is the engine primitive we will return from the Primitive() method. Note that it could be different than eroute - enginePrimitive engine.Primitive - - // tables keeps track of which tables this route is covering - tables semantics.TableSet -} - -// Primitive implements the logicalPlan interface -func (rb *routeGen4) Primitive() engine.Primitive { - return rb.enginePrimitive -} - -// SetLimit adds a LIMIT clause to the route. -func (rb *routeGen4) SetLimit(limit *sqlparser.Limit) { - rb.Select.SetLimit(limit) -} - -// WireupGen4 implements the logicalPlan interface -func (rb *routeGen4) WireupGen4(ctx *plancontext.PlanningContext) error { - rb.prepareTheAST() - - // prepare the queries we will pass down - rb.eroute.Query = sqlparser.String(rb.Select) - buffer := sqlparser.NewTrackedBuffer(sqlparser.FormatImpossibleQuery) - node := buffer.WriteNode(rb.Select) - parsedQuery := node.ParsedQuery() - rb.eroute.FieldQuery = parsedQuery.Query - - // if we have a planable vindex lookup, let's extract it into its own primitive - planableVindex, ok := rb.eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) - if !ok { - rb.enginePrimitive = rb.eroute - return nil - } - - query, args := planableVindex.Query() - stmt, reserved, err := sqlparser.Parse2(query) - if err != nil { - return err - } - reservedVars := sqlparser.NewReservedVars("vtg", reserved) - - lookupPrimitive, err := gen4SelectStmtPlanner(query, querypb.ExecuteOptions_Gen4, stmt.(sqlparser.SelectStatement), reservedVars, ctx.VSchema) - if err != nil { - return vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) - } - - rb.enginePrimitive = &engine.VindexLookup{ - Opcode: rb.eroute.Opcode, - Vindex: planableVindex, - Keyspace: rb.eroute.Keyspace, - Values: rb.eroute.Values, - SendTo: rb.eroute, - Arguments: args, - Lookup: lookupPrimitive.primitive, - } - - rb.eroute.RoutingParameters.Opcode = engine.ByDestination - rb.eroute.RoutingParameters.Values = nil - rb.eroute.RoutingParameters.Vindex = nil - - return nil -} - -// ContainsTables implements the logicalPlan interface -func (rb *routeGen4) ContainsTables() semantics.TableSet { - return rb.tables -} - -// OutputColumns implements the logicalPlan interface -func (rb *routeGen4) OutputColumns() []sqlparser.SelectExpr { - return sqlparser.GetFirstSelect(rb.Select).SelectExprs -} - -// prepareTheAST does minor fixups of the SELECT struct before producing the query string -func (rb *routeGen4) prepareTheAST() { - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - switch node := node.(type) { - case *sqlparser.Select: - if len(node.SelectExprs) == 0 { - node.SelectExprs = []sqlparser.SelectExpr{ - &sqlparser.AliasedExpr{ - Expr: sqlparser.NewIntLiteral("1"), - }, - } - } - case *sqlparser.ComparisonExpr: - // 42 = colName -> colName = 42 - b := node.Operator == sqlparser.EqualOp - value := sqlparser.IsValue(node.Left) - name := sqlparser.IsColName(node.Right) - if b && - value && - name { - node.Left, node.Right = node.Right, node.Left - } - } - return true, nil - }, rb.Select) -} - -func (rb *routeGen4) isLocal(col *sqlparser.ColName) bool { - return col.Metadata.(*column).Origin() == rb -} - -// generateFieldQuery generates a query with an impossible where. -// This will be used on the RHS node to fetch field info if the LHS -// returns no result. -func (rb *routeGen4) generateFieldQuery(sel sqlparser.SelectStatement, jt *jointab) string { - formatter := func(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { - switch node := node.(type) { - case *sqlparser.ColName: - if !rb.isLocal(node) { - _, joinVar := jt.Lookup(node) - buf.WriteArg(":", joinVar) - return - } - case sqlparser.TableName: - if !sqlparser.SystemSchema(node.Qualifier.String()) { - node.Name.Format(buf) - return - } - node.Format(buf) - return - } - sqlparser.FormatImpossibleQuery(buf, node) - } - - buffer := sqlparser.NewTrackedBuffer(formatter) - node := buffer.WriteNode(sel) - query := node.ParsedQuery() - return query.Query -} - -// Rewrite implements the logicalPlan interface -func (rb *routeGen4) Rewrite(inputs ...logicalPlan) error { - if len(inputs) != 0 { - return vterrors.VT13001("route: wrong number of inputs") - } - return nil -} - -// Inputs implements the logicalPlan interface -func (rb *routeGen4) Inputs() []logicalPlan { - return []logicalPlan{} -} - -func (rb *routeGen4) isSingleShard() bool { - return rb.eroute.Opcode.IsSingleShard() -} - -func (rb *routeGen4) unionCanMerge(other *routeGen4, distinct bool) bool { - if rb.eroute.Keyspace.Name != other.eroute.Keyspace.Name { - return false - } - switch rb.eroute.Opcode { - case engine.Unsharded, engine.Reference: - return rb.eroute.Opcode == other.eroute.Opcode - case engine.DBA: - return other.eroute.Opcode == engine.DBA && - len(rb.eroute.SysTableTableSchema) == 0 && - len(rb.eroute.SysTableTableName) == 0 && - len(other.eroute.SysTableTableSchema) == 0 && - len(other.eroute.SysTableTableName) == 0 - case engine.EqualUnique: - // Check if they target the same shard. - if other.eroute.Opcode == engine.EqualUnique && rb.eroute.Vindex == other.eroute.Vindex && valEqual(rb.condition, other.condition) { - return true - } - case engine.Scatter: - return other.eroute.Opcode == engine.Scatter && !distinct - case engine.Next: - return false - } - return false -} - -func (rb *routeGen4) updateRoute(opcode engine.Opcode, vindex vindexes.SingleColumn, condition sqlparser.Expr) { - rb.eroute.Opcode = opcode - rb.eroute.Vindex = vindex - rb.condition = condition -} - -// computeNotInPlan looks for null values to produce a SelectNone if found -func (rb *routeGen4) computeNotInPlan(right sqlparser.Expr) engine.Opcode { - switch node := right.(type) { - case sqlparser.ValTuple: - for _, n := range node { - if sqlparser.IsNull(n) { - return engine.None - } - } - } - - return engine.Scatter -} - -// exprIsValue returns true if the expression can be treated as a value -// for the routeOption. External references are treated as value. -func (rb *routeGen4) exprIsValue(expr sqlparser.Expr) bool { - if node, ok := expr.(*sqlparser.ColName); ok { - return node.Metadata.(*column).Origin() != rb - } - return sqlparser.IsValue(expr) -} diff --git a/go/vt/vtgate/planbuilder/route_test.go b/go/vt/vtgate/planbuilder/route_test.go deleted file mode 100644 index 9f4c8fa3b97..00000000000 --- a/go/vt/vtgate/planbuilder/route_test.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -/* - -This test file only tests the V3 planner. It does not test the Subshard opcode - -For easy reference, opcodes are: - Unsharded 0 - EqualUnique 1 - Equal 2 - IN 3 - MultiEqual 4 - Scatter 5 - Next 6 - DBA 7 - Reference 8 - None 9 -*/ - -func TestJoinCanMerge(t *testing.T) { - testcases := [][]bool{ - {true, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, true, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - - {false, false, false, false, false, false, false, false, false, false, false, false}, // this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, true, true, false, false}, - {true, true, true, true, true /*not tested*/, false, true, true, true, true, true, true}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - } - - ks := &vindexes.Keyspace{} - for left, vals := range testcases { - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - if left == int(engine.SubShard) || right == int(engine.SubShard) { - continue // not used by v3 - } - - t.Run(name, func(t *testing.T) { - lRoute := &route{ - // Setting condition will make SelectEqualUnique match itself. - condition: &sqlparser.ColName{}, - } - pb := &primitiveBuilder{ - plan: lRoute, - } - rRoute := &route{ - condition: &sqlparser.ColName{}, - } - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.JoinCanMerge(pb, rRoute, nil, nil), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} - -func TestSubqueryCanMerge(t *testing.T) { - testcases := [][]bool{ - // US EU E IN ME subShard scatter nxt dba ref none byD - {true, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // unsharded - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // equalUnique - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // equal - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // in - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // multiEqual - - {false, false, false, false, false, false, false, false, false, false, false, false, false}, // subshard - this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // scatter - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, // next - {false, false, false, false, false /*not tested*/, false, false, false, true, true, false, false}, // dba - {true, true, false, false, false /*not tested*/, false, false, true, true, true, false, false}, // reference - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // none - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, // byDestination - } - - ks := &vindexes.Keyspace{} - lRoute := &route{} - pb := &primitiveBuilder{ - plan: lRoute, - } - rRoute := &route{} - for left, vals := range testcases { - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - t.Run(name, func(t *testing.T) { - if left == int(engine.SubShard) || right == int(engine.SubShard) { - t.Skip("not used by v3") - } - - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.SubqueryCanMerge(pb, rRoute), fmt.Sprintf("%v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} - -func TestUnionCanMerge(t *testing.T) { - testcases := [][]bool{ - {true, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - - {false, false, false, false, false, false, false, false, false, false, false, false, false}, // this whole line is not tested - - {false, false, false, false, false /*not tested*/, false, true, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, true, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, true, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - {false, false, false, false, false /*not tested*/, false, false, false, false, false, false, false}, - } - - ks := &vindexes.Keyspace{} - lRoute := &route{} - rRoute := &route{} - for left, vals := range testcases { - lRoute.eroute = engine.NewSimpleRoute(engine.Opcode(left), ks) - for right, val := range vals { - name := fmt.Sprintf("%s:%s", engine.Opcode(left).String(), engine.Opcode(right).String()) - t.Run(name, func(t *testing.T) { - if left == int(engine.SubShard) || right == int(engine.SubShard) { - t.Skip("not used by v3") - } - - rRoute.eroute = engine.NewSimpleRoute(engine.Opcode(right), ks) - assert.Equal(t, val, lRoute.unionCanMerge(rRoute, false), fmt.Sprintf("can't create a single route from these two inputs %v:%v", lRoute.eroute.RouteType(), rRoute.eroute.RouteType())) - }) - } - } -} diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 7ba5e27dc07..52f0384981a 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -19,258 +19,115 @@ package planbuilder import ( "fmt" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/key" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/vtgate/evalengine" - + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" ) -func buildSelectPlan(query string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - sel := stmt.(*sqlparser.Select) - if sel.With != nil { +func gen4SelectStmtPlanner( + query string, + plannerVersion querypb.ExecuteOptions_PlannerVersion, + stmt sqlparser.SelectStatement, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + switch node := stmt.(type) { + case *sqlparser.Select: + if node.With != nil { return nil, vterrors.VT12001("WITH expression in SELECT statement") } + case *sqlparser.Union: + if node.With != nil { + return nil, vterrors.VT12001("WITH expression in UNION statement") + } + } + sel, isSel := stmt.(*sqlparser.Select) + if isSel { + // handle dual table for processing at vtgate. p, err := handleDualSelects(sel, vschema) if err != nil { return nil, err } if p != nil { - return newPlanResult(p), nil - } - - getPlan := func(sel *sqlparser.Select) (logicalPlan, error) { - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - if err := pb.processSelect(sel, reservedVars, nil, query); err != nil { - return nil, err - } - if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { - return nil, err + used := "dual" + keyspace, ksErr := vschema.DefaultKeyspace() + if ksErr == nil { + // we are just getting the ks to log the correct table use. + // no need to fail this if we can't find the default keyspace + used = keyspace.Name + ".dual" } - return pb.plan, nil + return newPlanResult(p, used), nil } - plan, err := getPlan(sel) - if err != nil { - return nil, err - } - - if shouldRetryAfterPredicateRewriting(plan) { - // by transforming the predicates to CNF, the planner will sometimes find better plans - primitive := rewriteToCNFAndReplan(stmt, getPlan) - if primitive != nil { - return newPlanResult(primitive), nil - } - } - primitive := plan.Primitive() - if rb, ok := primitive.(*engine.Route); ok { - // this is done because engine.Route doesn't handle the empty result well - // if it doesn't find a shard to send the query to. - // All other engine primitives can handle this, so we only need it when - // Route is the last (and only) instruction before the user sees a result - if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { - rb.NoRoutesSpecialHandling = true - } - } - - return newPlanResult(primitive), nil - } -} - -func rewriteToCNFAndReplan(stmt sqlparser.Statement, getPlan func(sel *sqlparser.Select) (logicalPlan, error)) engine.Primitive { - rewritten := sqlparser.RewritePredicate(stmt) - sel2, isSelect := rewritten.(*sqlparser.Select) - if isSelect { - log.Infof("retrying plan after cnf: %s", sqlparser.String(sel2)) - plan2, err := getPlan(sel2) - if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { - // we only use this new plan if it's better than the old one we got - return plan2.Primitive() - } - } - return nil -} - -func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool { - // if we have a I_S query, but have not found table_schema or table_name, let's try CNF - var opcode engine.Opcode - var sysTableTableName map[string]evalengine.Expr - var sysTableTableSchema []evalengine.Expr - - switch routePlan := plan.(type) { - case *routeGen4: - opcode = routePlan.eroute.Opcode - sysTableTableName = routePlan.eroute.SysTableTableName - sysTableTableSchema = routePlan.eroute.SysTableTableSchema - case *route: - opcode = routePlan.eroute.Opcode - sysTableTableName = routePlan.eroute.SysTableTableName - sysTableTableSchema = routePlan.eroute.SysTableTableSchema - default: - return false - } - - return opcode == engine.DBA && - len(sysTableTableName) == 0 && - len(sysTableTableSchema) == 0 -} - -// processSelect builds a primitive tree for the given query or subquery. -// The tree built by this function has the following general structure: -// -// The leaf nodes can be a route, vindexFunc or subquery. In the symtab, -// the tables map has columns that point to these leaf nodes. A subquery -// itself contains a logicalPlan tree, but it's opaque and is made to look -// like a table for the analysis of the current tree. -// -// The leaf nodes are usually tied together by join nodes. While the join -// nodes are built, they have ON clauses. Those are analyzed and pushed -// down into the leaf nodes as the tree is formed. Join nodes are formed -// during analysis of the FROM clause. -// -// During the WHERE clause analysis, the target leaf node is identified -// for each part, and the PushFilter function is used to push the condition -// down. The same strategy is used for the other clauses. -// -// So, a typical plan would either be a simple leaf node, or may consist -// of leaf nodes tied together by join nodes. -// -// If a query has aggregates that cannot be pushed down, an aggregator -// primitive is built. The current orderedAggregate primitive can only -// be built on top of a route. The orderedAggregate expects the rows -// to be ordered as they are returned. This work is performed by the -// underlying route. This means that a compatible ORDER BY clause -// can also be handled by this combination of primitives. In this case, -// the tree would consist of an orderedAggregate whose input is a route. -// -// If a query has an ORDER BY, but the route is a scatter, then the -// ordering is pushed down into the route itself. This results in a simple -// route primitive. -// -// The LIMIT clause is the last construct of a query. If it cannot be -// pushed into a route, then a primitive is created on top of any -// of the above trees to make it discard unwanted rows. -func (pb *primitiveBuilder) processSelect(sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars, outer *symtab, query string) error { - // Check and error if there is any locking function present in select expression. - for _, expr := range sel.SelectExprs { - if aExpr, ok := expr.(*sqlparser.AliasedExpr); ok && sqlparser.IsLockingFunc(aExpr.Expr) { - return vterrors.VT12001(fmt.Sprintf("%v is allowed only with dual", sqlparser.String(aExpr))) - } - } - if sel.SQLCalcFoundRows { - if outer != nil || query == "" { - return vterrors.VT03008("SQL_CALC_FOUND_ROWS") + if sel.SQLCalcFoundRows && sel.Limit != nil { + return gen4planSQLCalcFoundRows(vschema, sel, query, reservedVars) } + // if there was no limit, we can safely ignore the SQLCalcFoundRows directive sel.SQLCalcFoundRows = false - if sel.Limit != nil { - plan, _, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, pb.vschema, planSelectV3) - if err != nil { - return err - } - pb.plan = plan - return nil - } } - // Into is not supported in subquery. - if sel.Into != nil && (outer != nil || query == "") { - return vterrors.VT03008("INTO") + getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error) { + return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion) } - var where sqlparser.Expr - if sel.Where != nil { - where = sel.Where.Expr - } - if err := pb.processTableExprs(sel.From, reservedVars, where); err != nil { - return err + plan, tablesUsed, err := getPlan(stmt) + if err != nil { + return nil, err } - if rb, ok := pb.plan.(*route); ok { - // TODO(sougou): this can probably be improved. - directives := sel.Comments.Directives() - rb.eroute.QueryTimeout = queryTimeout(directives) - if rb.eroute.TargetDestination != nil { - return vterrors.VT12001("SELECT with a target destination") - } - if directives.IsSet(sqlparser.DirectiveScatterErrorsAsWarnings) { - rb.eroute.ScatterErrorsAsWarnings = true + if shouldRetryAfterPredicateRewriting(plan) { + // by transforming the predicates to CNF, the planner will sometimes find better plans + plan2, tablesUsed := gen4PredicateRewrite(stmt, getPlan) + if plan2 != nil { + return newPlanResult(plan2.Primitive(), tablesUsed...), nil } } - // Set the outer symtab after processing of FROM clause. - // This is because correlation is not allowed there. - pb.st.Outer = outer - if sel.Where != nil { - if err := pb.pushFilter(sel.Where.Expr, sqlparser.WhereStr, reservedVars); err != nil { - return err - } - } - if err := pb.checkAggregates(sel); err != nil { - return err + primitive := plan.Primitive() + if !isSel { + return newPlanResult(primitive, tablesUsed...), nil } - if err := pb.pushSelectExprs(sel, reservedVars); err != nil { - return err - } - if sel.Having != nil { - if err := pb.pushFilter(sel.Having.Expr, sqlparser.HavingStr, reservedVars); err != nil { - return err + + // this is done because engine.Route doesn't handle the empty result well + // if it doesn't find a shard to send the query to. + // All other engine primitives can handle this, so we only need it when + // Route is the last (and only) instruction before the user sees a result + if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { + switch prim := primitive.(type) { + case *engine.Route: + prim.NoRoutesSpecialHandling = true + case *engine.VindexLookup: + prim.SendTo.NoRoutesSpecialHandling = true } } - if err := pb.pushOrderBy(sel.OrderBy); err != nil { - return err - } - if err := pb.pushLimit(sel.Limit); err != nil { - return err - } - - return setMiscFunc(pb.plan, sel) + return newPlanResult(primitive, tablesUsed...), nil } -func setMiscFunc(in logicalPlan, sel *sqlparser.Select) error { - _, err := visit(in, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := plan.(type) { - case *route: - err := copyCommentsAndLocks(node.Select, sel, node.eroute.Opcode) - if err != nil { - return false, nil, err - } - return true, node, nil - case *routeGen4: - err := copyCommentsAndLocks(node.Select, sel, node.eroute.Opcode) - if err != nil { - return false, nil, err - } - return true, node, nil - } - return true, plan, nil - }) - +func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) { + ksName := "" + if ks, _ := vschema.DefaultKeyspace(); ks != nil { + ksName = ks.Name + } + semTable, err := semantics.Analyze(sel, ksName, vschema) if err != nil { - return err + return nil, err } - return nil -} + // record any warning as planner warning. + vschema.PlannerWarning(semTable.Warning) -func copyCommentsAndLocks(statement sqlparser.SelectStatement, sel *sqlparser.Select, opcode engine.Opcode) error { - query := sqlparser.GetFirstSelect(statement) - query.Comments = sel.Comments - query.Lock = sel.Lock - if sel.Into != nil { - if opcode != engine.Unsharded { - return vterrors.VT12001("INTO on sharded keyspace") - } - query.Into = sel.Into + plan, tablesUsed, err := buildSQLCalcFoundRowsPlan(query, sel, reservedVars, vschema) + if err != nil { + return nil, err } - return nil + return newPlanResult(plan.Primitive(), tablesUsed...), nil } func buildSQLCalcFoundRowsPlan( @@ -278,9 +135,8 @@ func buildSQLCalcFoundRowsPlan( sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, - planSelect func(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error), ) (logicalPlan, []string, error) { - ljt, limitPlan, _, err := planSelect(reservedVars, vschema, sel) + limitPlan, _, err := newBuildSelectPlan(sel, reservedVars, vschema, Gen4) if err != nil { return nil, nil, err } @@ -320,18 +176,218 @@ func buildSQLCalcFoundRowsPlan( reservedVars2 := sqlparser.NewReservedVars("vtg", reserved2) - cjt, countPlan, tablesUsed, err := planSelect(reservedVars2, vschema, sel2) + countPlan, tablesUsed, err := newBuildSelectPlan(sel2, reservedVars2, vschema, Gen4) + if err != nil { + return nil, nil, err + } + return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan}, tablesUsed, nil +} + +func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error)) (logicalPlan, []string) { + rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement) + if !isSel { + // Fail-safe code, should never happen + return nil, nil + } + plan2, op, err := getPlan(rewritten) + if err == nil && !shouldRetryAfterPredicateRewriting(plan2) { + // we only use this new plan if it's better than the old one we got + return plan2, op + } + return nil, nil +} + +func newBuildSelectPlan( + selStmt sqlparser.SelectStatement, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, + version querypb.ExecuteOptions_PlannerVersion, +) (plan logicalPlan, tablesUsed []string, err error) { + ctx, err := plancontext.CreatePlanningContext(selStmt, reservedVars, vschema, version) + if err != nil { + return nil, nil, err + } + + if ks, _ := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + plan, tablesUsed, err = selectUnshardedShortcut(ctx, selStmt, ks) + if err != nil { + return nil, nil, err + } + plan = pushCommentDirectivesOnPlan(plan, selStmt) + return plan, tablesUsed, err + } + + // From this point on, we know it is not an unsharded query and return the NotUnshardedErr if there is any + if ctx.SemTable.NotUnshardedErr != nil { + return nil, nil, ctx.SemTable.NotUnshardedErr + } + + op, err := createSelectOperator(ctx, selStmt, reservedVars) if err != nil { return nil, nil, err } - return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan, ljt: ljt, cjt: cjt}, tablesUsed, nil + + plan, err = transformToLogicalPlan(ctx, op) + if err != nil { + return nil, nil, err + } + + optimizePlan(plan) + + if err = plan.Wireup(ctx); err != nil { + return nil, nil, err + } + return pushCommentDirectivesOnPlan(plan, selStmt), operators.TablesUsed(op), nil +} + +func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (ops.Operator, error) { + err := queryRewrite(ctx.SemTable, reservedVars, selStmt) + if err != nil { + return nil, err + } + + return operators.PlanQuery(ctx, selStmt) +} + +// optimizePlan removes unnecessary simpleProjections that have been created while planning +func optimizePlan(plan logicalPlan) { + for _, lp := range plan.Inputs() { + optimizePlan(lp) + } + + this, ok := plan.(*simpleProjection) + if !ok { + return + } + + input, ok := this.input.(*simpleProjection) + if !ok { + return + } + + for i, col := range this.eSimpleProj.Cols { + this.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col] + } + this.input = input.input +} + +func planLimit(limit *sqlparser.Limit, plan logicalPlan) (logicalPlan, error) { + if limit == nil { + return plan, nil + } + rb, ok := plan.(*route) + if ok && rb.isSingleShard() { + rb.SetLimit(limit) + return plan, nil + } + + lPlan, err := createLimit(plan, limit) + if err != nil { + return nil, err + } + + // visit does not modify the plan. + _, err = visit(lPlan, setUpperLimit) + if err != nil { + return nil, err + } + return lPlan, nil +} + +func planHorizon(ctx *plancontext.PlanningContext, plan logicalPlan, in sqlparser.SelectStatement, truncateColumns bool) (logicalPlan, error) { + switch node := in.(type) { + case *sqlparser.Select: + hp := horizonPlanning{ + sel: node, + } + + replaceSubQuery(ctx, node) + var err error + plan, err = hp.planHorizon(ctx, plan, truncateColumns) + if err != nil { + return nil, err + } + plan, err = planLimit(node.Limit, plan) + if err != nil { + return nil, err + } + case *sqlparser.Union: + var err error + rb, isRoute := plan.(*route) + if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { + return nil, ctx.SemTable.NotSingleRouteErr + } + if isRoute && rb.isSingleShard() { + err = planSingleRoutePlan(node, rb) + } else { + plan, err = planOrderByOnUnion(ctx, plan, node) + } + if err != nil { + return nil, err + } + + plan, err = planLimit(node.Limit, plan) + if err != nil { + return nil, err + } + } + return plan, nil + } -func planSelectV3(reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, sel *sqlparser.Select) (*jointab, logicalPlan, []string, error) { - ljt := newJointab(reservedVars) - frpb := newPrimitiveBuilder(vschema, ljt) - err := frpb.processSelect(sel, reservedVars, nil, "") - return ljt, frpb.plan, nil, err +func planOrderByOnUnion(ctx *plancontext.PlanningContext, plan logicalPlan, union *sqlparser.Union) (logicalPlan, error) { + qp, err := operators.CreateQPFromSelectStatement(ctx, union) + if err != nil { + return nil, err + } + hp := horizonPlanning{ + qp: qp, + } + if len(qp.OrderExprs) > 0 { + plan, err = hp.planOrderBy(ctx, qp.OrderExprs, plan) + if err != nil { + return nil, err + } + } + return plan, nil +} + +func isOnlyDual(sel *sqlparser.Select) bool { + if sel.Where != nil || sel.GroupBy != nil || sel.Having != nil || sel.Limit != nil || sel.OrderBy != nil { + // we can only deal with queries without any other subclauses - just SELECT and FROM, nothing else is allowed + return false + } + + if len(sel.From) > 1 { + return false + } + table, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return false + } + tableName, ok := table.Expr.(sqlparser.TableName) + + return ok && tableName.Name.String() == "dual" && tableName.Qualifier.IsEmpty() +} + +func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool { + // if we have a I_S query, but have not found table_schema or table_name, let's try CNF + var opcode engine.Opcode + var sysTableTableName map[string]evalengine.Expr + var sysTableTableSchema []evalengine.Expr + + switch routePlan := plan.(type) { + case *route: + opcode = routePlan.eroute.Opcode + sysTableTableName = routePlan.eroute.SysTableTableName + sysTableTableSchema = routePlan.eroute.SysTableTableSchema + default: + return false + } + + return opcode == engine.DBA && + len(sysTableTableName) == 0 && + len(sysTableTableSchema) == 0 } func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -364,7 +420,7 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi if len(lockFunctions) > 0 { return nil, vterrors.VT12001(fmt.Sprintf("LOCK function and other expression: [%s] in same select query", sqlparser.String(expr))) } - exprs[i], err = evalengine.Translate(expr.Expr, evalengine.LookupDefaultCollation(vschema.ConnCollation())) + exprs[i], err = evalengine.Translate(expr.Expr, &evalengine.Config{Collation: vschema.ConnCollation()}) if err != nil { return nil, nil } @@ -396,236 +452,3 @@ func buildLockingPrimitive(sel *sqlparser.Select, vschema plancontext.VSchema, l LockFunctions: lockFunctions, }, nil } - -func isOnlyDual(sel *sqlparser.Select) bool { - if sel.Where != nil || sel.GroupBy != nil || sel.Having != nil || sel.Limit != nil || sel.OrderBy != nil { - // we can only deal with queries without any other subclauses - just SELECT and FROM, nothing else is allowed - return false - } - - if len(sel.From) > 1 { - return false - } - table, ok := sel.From[0].(*sqlparser.AliasedTableExpr) - if !ok { - return false - } - tableName, ok := table.Expr.(sqlparser.TableName) - - return ok && tableName.Name.String() == "dual" && tableName.Qualifier.IsEmpty() -} - -// pushFilter identifies the target route for the specified bool expr, -// pushes it down, and updates the route info if the new constraint improves -// the primitive. This function can push to a WHERE or HAVING clause. -func (pb *primitiveBuilder) pushFilter(in sqlparser.Expr, whereType string, reservedVars *sqlparser.ReservedVars) error { - filters := sqlparser.SplitAndExpression(nil, in) - reorderBySubquery(filters) - for _, filter := range filters { - pullouts, origin, expr, err := pb.findOrigin(filter, reservedVars) - if err != nil { - return err - } - rut, isRoute := origin.(*route) - if isRoute && rut.eroute.Opcode == engine.DBA { - err := pb.findSysInfoRoutingPredicates(expr, rut, reservedVars) - if err != nil { - return err - } - } - // The returned expression may be complex. Resplit before pushing. - for _, subexpr := range sqlparser.SplitAndExpression(nil, expr) { - pb.plan, err = planFilter(pb, pb.plan, subexpr, whereType, origin) - if err != nil { - return err - } - } - pb.addPullouts(pullouts) - } - return nil -} - -// reorderBySubquery reorders the filters by pushing subqueries -// to the end. This allows the non-subquery filters to be -// pushed first because they can potentially improve the routing -// plan, which can later allow a filter containing a subquery -// to successfully merge with the corresponding route. -func reorderBySubquery(filters []sqlparser.Expr) { - max := len(filters) - for i := 0; i < max; i++ { - if !hasSubquery(filters[i]) { - continue - } - saved := filters[i] - for j := i; j < len(filters)-1; j++ { - filters[j] = filters[j+1] - } - filters[len(filters)-1] = saved - max-- - } -} - -// addPullouts adds the pullout subqueries to the primitiveBuilder. -func (pb *primitiveBuilder) addPullouts(pullouts []*pulloutSubquery) { - for _, pullout := range pullouts { - pullout.setUnderlying(pb.plan) - pb.plan = pullout - pb.plan.Reorder(0) - } -} - -// pushSelectExprs identifies the target route for the -// select expressions and pushes them down. -func (pb *primitiveBuilder) pushSelectExprs(sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars) error { - resultColumns, err := pb.pushSelectRoutes(sel.SelectExprs, reservedVars) - if err != nil { - return err - } - pb.st.SetResultColumns(resultColumns) - return pb.pushGroupBy(sel) -} - -// pushSelectRoutes is a convenience function that pushes all the select -// expressions and returns the list of resultColumns generated for it. -func (pb *primitiveBuilder) pushSelectRoutes(selectExprs sqlparser.SelectExprs, reservedVars *sqlparser.ReservedVars) ([]*resultColumn, error) { - resultColumns := make([]*resultColumn, 0, len(selectExprs)) - for _, node := range selectExprs { - switch node := node.(type) { - case *sqlparser.AliasedExpr: - pullouts, origin, expr, err := pb.findOrigin(node.Expr, reservedVars) - if err != nil { - return nil, err - } - node.Expr = expr - newBuilder, rc, _, err := planProjection(pb, pb.plan, node, origin) - if err != nil { - return nil, err - } - pb.plan = newBuilder - resultColumns = append(resultColumns, rc) - pb.addPullouts(pullouts) - case *sqlparser.StarExpr: - var expanded bool - var err error - resultColumns, expanded, err = pb.expandStar(resultColumns, node) - if err != nil { - return nil, err - } - if expanded { - continue - } - // We'll allow select * for simple routes. - rb, ok := pb.plan.(*route) - if !ok { - return nil, vterrors.VT12001("'*' expression in cross-shard query") - } - // Validate keyspace reference if any. - if !node.TableName.IsEmpty() { - if _, err := pb.st.FindTable(node.TableName); err != nil { - return nil, err - } - } - resultColumns = append(resultColumns, rb.PushAnonymous(node)) - case *sqlparser.Nextval: - rb, ok := pb.plan.(*route) - if !ok { - // This code is unreachable because the parser doesn't allow joins for next val statements. - return nil, vterrors.VT12001("SELECT NEXT query in cross-shard query") - } - if rb.eroute.Opcode != engine.Next { - return nil, vterrors.VT03018() - } - rb.eroute.Opcode = engine.Next - resultColumns = append(resultColumns, rb.PushAnonymous(node)) - default: - return nil, vterrors.VT13001(fmt.Sprintf("unexpected SELECT expression type: %T", node)) - } - } - return resultColumns, nil -} - -// expandStar expands a StarExpr and pushes the expanded -// expressions down if the tables have authoritative column lists. -// If not, it returns false. -// This function breaks the abstraction a bit: it directly sets the -// the Metadata for newly created expressions. In all other cases, -// the Metadata is set through a symtab Find. -func (pb *primitiveBuilder) expandStar(inrcs []*resultColumn, expr *sqlparser.StarExpr) (outrcs []*resultColumn, expanded bool, err error) { - tables := pb.st.AllTables() - if tables == nil { - // no table metadata available. - return inrcs, false, nil - } - if expr.TableName.IsEmpty() { - for _, t := range tables { - // All tables must have authoritative column lists. - if !t.isAuthoritative { - return inrcs, false, nil - } - } - singleTable := false - if len(tables) == 1 { - singleTable = true - } - for _, t := range tables { - for _, col := range t.columnNames { - var expr *sqlparser.AliasedExpr - if singleTable { - // If there's only one table, we use unqualified column names. - expr = &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - }, - } - } else { - // If a and b have id as their column, then - // select * from a join b should result in - // select a.id as id, b.id as id from a join b. - expr = &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - Qualifier: t.alias, - }, - As: col, - } - } - newBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin()) - if err != nil { - // Unreachable because PushSelect won't fail on ColName. - return inrcs, false, err - } - pb.plan = newBuilder - inrcs = append(inrcs, rc) - } - } - return inrcs, true, nil - } - - // Expression qualified with table name. - t, err := pb.st.FindTable(expr.TableName) - if err != nil { - return inrcs, false, err - } - if !t.isAuthoritative { - return inrcs, false, nil - } - for _, col := range t.columnNames { - expr := &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Metadata: t.columns[col.Lowered()], - Name: col, - Qualifier: expr.TableName, - }, - } - newBuilder, rc, _, err := planProjection(pb, pb.plan, expr, t.Origin()) - if err != nil { - // Unreachable because PushSelect won't fail on ColName. - return inrcs, false, err - } - pb.plan = newBuilder - inrcs = append(inrcs, rc) - } - return inrcs, true, nil -} diff --git a/go/vt/vtgate/planbuilder/semi_join.go b/go/vt/vtgate/planbuilder/semi_join.go index 44d99942fe4..5d530c7bce4 100644 --- a/go/vt/vtgate/planbuilder/semi_join.go +++ b/go/vt/vtgate/planbuilder/semi_join.go @@ -30,7 +30,6 @@ var _ logicalPlan = (*semiJoin)(nil) // This gets built if a rhs is correlated and can // be pulled out but requires some variables to be supplied from outside. type semiJoin struct { - gen4Plan rhs logicalPlan lhs logicalPlan cols []int @@ -63,11 +62,11 @@ func (ps *semiJoin) Primitive() engine.Primitive { } // WireupGen4 implements the logicalPlan interface -func (ps *semiJoin) WireupGen4(ctx *plancontext.PlanningContext) error { - if err := ps.lhs.WireupGen4(ctx); err != nil { +func (ps *semiJoin) Wireup(ctx *plancontext.PlanningContext) error { + if err := ps.lhs.Wireup(ctx); err != nil { return err } - return ps.rhs.WireupGen4(ctx) + return ps.rhs.Wireup(ctx) } // Rewrite implements the logicalPlan interface diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 8508a791d41..7b1e584132d 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -261,7 +261,7 @@ func extractValue(expr *sqlparser.SetExpr, boolean bool) (string, error) { } case *sqlparser.ColName: // this is a little of a hack. it's used when the setting is not a normal expression, but rather - // an enumeration, such as utf8, utf8mb4, etc + // an enumeration, such as utf8mb3, utf8mb4, etc switch node.Name.Lowered() { case "on": return "1", nil diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index 49208af21f0..b45ae23bfbc 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -21,27 +21,24 @@ import ( "regexp" "sort" "strings" - - vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "sync" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) const ( - utf8 = "utf8" - utf8mb4 = "utf8mb4" - both = "both" charset = "charset" ) @@ -99,7 +96,7 @@ func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) case sqlparser.StatusGlobal, sqlparser.StatusSession: return buildSendAnywherePlan(show, vschema) case sqlparser.VitessMigrations: - return buildShowVMigrationsPlan(show, vschema) + return buildShowVitessMigrationsPlan(show, vschema) case sqlparser.VGtidExecGlobal: return buildShowVGtidPlan(show, vschema) case sqlparser.GtidExecGlobal: @@ -134,16 +131,13 @@ func buildShowTargetPlan(vschema plancontext.VSchema) (engine.Primitive, error) func buildCharsetPlan(show *sqlparser.ShowBasic) (engine.Primitive, error) { fields := buildVarCharFields("Charset", "Description", "Default collation") - maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Int32} + maxLenField := &querypb.Field{Name: "Maxlen", Type: sqltypes.Uint32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)} fields = append(fields, maxLenField) - - charsets := []string{utf8, utf8mb4} - rows, err := generateCharsetRows(show.Filter, charsets) + cs, err := generateCharsetRows(show.Filter) if err != nil { return nil, err } - - return engine.NewRowsPrimitive(rows, fields), nil + return engine.NewRowsPrimitive(cs, fields), nil } func buildSendAnywherePlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -247,8 +241,9 @@ func buildDBPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine return engine.NewRowsPrimitive(rows, buildVarCharFields("Database")), nil } -// buildShowVMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. It invokes queries on _vt.schema_migrations on all PRIMARY tablets on keyspace's shards. -func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { +// buildShowVitessMigrationsPlan serves `SHOW VITESS_MIGRATIONS ...` queries. +// It sends down the SHOW command to the PRIMARY shard tablets (on all shards) +func buildShowVitessMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { dest, ks, tabletType, err := vschema.TargetDestination(show.DbName.String()) if err != nil { return nil, err @@ -265,20 +260,11 @@ func buildShowVMigrationsPlan(show *sqlparser.ShowBasic, vschema plancontext.VSc dest = key.DestinationAllShards{} } - sql := "SELECT * FROM _vt.schema_migrations" - - if show.Filter != nil { - if show.Filter.Filter != nil { - sql += fmt.Sprintf(" where %s", sqlparser.String(show.Filter.Filter)) - } else if show.Filter.Like != "" { - lit := sqlparser.String(sqlparser.NewStrLiteral(show.Filter.Like)) - sql += fmt.Sprintf(" where migration_uuid LIKE %s OR migration_context LIKE %s OR migration_status LIKE %s", lit, lit, lit) - } - } return &engine.Send{ Keyspace: ks, TargetDestination: dest, - Query: sql, + Query: sqlparser.String(show), + IsDML: false, }, nil } @@ -332,7 +318,7 @@ func buildVarCharFields(names ...string) []*querypb.Field { fields[i] = &querypb.Field{ Name: v, Type: sqltypes.VarChar, - Charset: collations.CollationUtf8ID, + Charset: uint32(collations.SystemCollation.Collation), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), } } @@ -347,20 +333,13 @@ func buildVarCharRow(values ...string) []sqltypes.Value { return row } -func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([][]sqltypes.Value, error) { +func generateCharsetRows(showFilter *sqlparser.ShowFilter) ([][]sqltypes.Value, error) { if showFilter == nil { - return buildCharsetRows(both), nil + return charsets(), nil } - var filteredColName string - var err error - if showFilter.Like != "" { - filteredColName, err = checkLikeOpt(showFilter.Like, colNames) - if err != nil { - return nil, err - } - + return filterLike(showFilter.Like, charsets()) } else { cmpExp, ok := showFilter.Filter.(*sqlparser.ComparisonExpr) if !ok { @@ -382,61 +361,84 @@ func generateCharsetRows(showFilter *sqlparser.ShowFilter, colNames []string) ([ switch cmpExp.Operator { case sqlparser.EqualOp: - for _, colName := range colNames { + for _, row := range charsets() { + colName := row[0].ToString() if rightString == colName { - filteredColName = colName + return [][]sqltypes.Value{row}, nil } } + return nil, nil case sqlparser.LikeOp: - filteredColName, err = checkLikeOpt(rightString, colNames) - if err != nil { - return nil, err - } + return filterLike(rightString, charsets()) } + } else { + return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "Unknown column '%s' in 'where clause'", left.Name.String()) } - } - return buildCharsetRows(filteredColName), nil + return charsets(), nil } -func buildCharsetRows(colName string) [][]sqltypes.Value { - row0 := buildVarCharRow( - "utf8", - "UTF-8 Unicode", - "utf8_general_ci") - row0 = append(row0, sqltypes.NewInt32(3)) - row1 := buildVarCharRow( - "utf8mb4", - "UTF-8 Unicode", - "utf8mb4_general_ci") - row1 = append(row1, sqltypes.NewInt32(4)) - - switch colName { - case utf8: - return [][]sqltypes.Value{row0} - case utf8mb4: - return [][]sqltypes.Value{row1} - case both: - return [][]sqltypes.Value{row0, row1} - } - - return [][]sqltypes.Value{} +var once sync.Once +var charsetRows [][]sqltypes.Value + +func charsets() [][]sqltypes.Value { + once.Do(func() { + charsetRows = [][]sqltypes.Value{ + append(buildVarCharRow("armscii8", "ARMSCII-8 Armenian", "armscii8_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("ascii", "US ASCII", "ascii_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("binary", "Binary pseudo charset", "binary"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1250", "Windows Central European", "cp1250_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1251", "Windows Cyrillic", "cp1251_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1256", "Windows Arabic", "cp1256_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp1257", "Windows Baltic", "cp1257_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp850", "DOS West European", "cp850_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp852", "DOS Central European", "cp852_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp866", "DOS Russian", "cp866_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("cp932", "SJIS for Windows Japanese", "cp932_japanese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("dec8", "DEC West European", "dec8_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("eucjpms", "UJIS for Windows Japanese", "eucjpms_japanese_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("euckr", "EUC-KR Korean", "euckr_korean_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("gb2312", "GB2312 Simplified Chinese", "gb2312_chinese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("geostd8", "GEOSTD8 Georgian", "geostd8_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("greek", "ISO 8859-7 Greek", "greek_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("hebrew", "ISO 8859-8 Hebrew", "hebrew_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("hp8", "HP West European", "hp8_english_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("keybcs2", "DOS Kamenicky Czech-Slovak", "keybcs2_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("koi8r", "KOI8-R Relcom Russian", "koi8r_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("koi8u", "KOI8-U Ukrainian", "koi8u_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin1", "cp1252 West European", "latin1_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin2", "ISO 8859-2 Central European", "latin2_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin5", "ISO 8859-9 Turkish", "latin5_turkish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("latin7", "ISO 8859-13 Baltic", "latin7_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("macce", "Mac Central European", "macce_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("macroman", "Mac West European", "macroman_general_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("sjis", "Shift-JIS Japanese", "sjis_japanese_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("swe7", "7bit Swedish", "swe7_swedish_ci"), sqltypes.NewUint32(1)), + append(buildVarCharRow("ucs2", "UCS-2 Unicode", "ucs2_general_ci"), sqltypes.NewUint32(2)), + append(buildVarCharRow("ujis", "EUC-JP Japanese", "ujis_japanese_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("utf16", "UTF-16 Unicode", "utf16_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf16le", "UTF-16LE Unicode", "utf16le_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf32", "UTF-32 Unicode", "utf32_general_ci"), sqltypes.NewUint32(4)), + append(buildVarCharRow("utf8mb3", "UTF-8 Unicode", "utf8mb3_general_ci"), sqltypes.NewUint32(3)), + append(buildVarCharRow("utf8mb4", "UTF-8 Unicode", "utf8mb4_0900_ai_ci"), sqltypes.NewUint32(4)), + } + }) + + return charsetRows } -func checkLikeOpt(likeOpt string, colNames []string) (string, error) { - likeRegexp := strings.ReplaceAll(likeOpt, "%", ".*") - for _, v := range colNames { - match, err := regexp.MatchString(likeRegexp, v) - if err != nil { - return "", err - } - if match { - return v, nil +func filterLike(likeOpt string, charsets [][]sqltypes.Value) ([][]sqltypes.Value, error) { + likeRegexp := sqlparser.LikeToRegexp(likeOpt) + var results [][]sqltypes.Value + for _, row := range charsets { + colName := row[0].ToString() + if likeRegexp.MatchString(colName) { + results = append(results, row) } } - return "", nil + return results, nil } func buildShowCreatePlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -555,13 +557,8 @@ func buildShowVGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) return nil, err } return &engine.OrderedAggregate{ - PreProcess: true, Aggregates: []*engine.AggregateParams{ - { - Opcode: engine.AggregateGtid, - Col: 1, - Alias: "global vgtid_executed", - }, + engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed"), }, TruncateColumnCount: 2, Input: send, @@ -593,9 +590,9 @@ func buildWarnings() (engine.Primitive, error) { f := func(sa engine.SessionActions) (*sqltypes.Result, error) { fields := []*querypb.Field{ - {Name: "Level", Type: sqltypes.VarChar}, - {Name: "Code", Type: sqltypes.Uint16}, - {Name: "Message", Type: sqltypes.VarChar}, + {Name: "Level", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, + {Name: "Code", Type: sqltypes.Uint16, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "Message", Type: sqltypes.VarChar, Charset: uint32(collations.SystemCollation.Collation)}, } warns := sa.GetWarnings() diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go index 5d84a77c0a9..b36133bb1c7 100644 --- a/go/vt/vtgate/planbuilder/show_test.go +++ b/go/vt/vtgate/planbuilder/show_test.go @@ -21,16 +21,20 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) func TestBuildDBPlan(t *testing.T) { - vschema := &vschemaWrapper{ - keyspace: &vindexes.Keyspace{Name: "main"}, + vschema := &vschemawrapper.VSchemaWrapper{ + Keyspace: &vindexes.Keyspace{Name: "main"}, } testCases := []struct { @@ -61,56 +65,56 @@ func TestBuildDBPlan(t *testing.T) { } func TestGenerateCharsetRows(t *testing.T) { - rows := make([][]sqltypes.Value, 0, 4) rows0 := [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), - sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), } rows1 := [][]sqltypes.Value{ append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), } rows2 := [][]sqltypes.Value{ append(buildVarCharRow( - "utf8", + "utf8mb3", "UTF-8 Unicode", - "utf8_general_ci"), - sqltypes.NewInt32(3)), + "utf8mb3_general_ci"), + sqltypes.NewUint32(3)), append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - "utf8mb4_general_ci"), - sqltypes.NewInt32(4)), + collations.Local().LookupName(collations.Default())), + sqltypes.NewUint32(4)), } testcases := []struct { input string expected [][]sqltypes.Value }{ - {input: "show charset", expected: rows2}, - {input: "show character set", expected: rows2}, - {input: "show charset where charset like 'foo%'", expected: rows}, - {input: "show charset where charset like 'utf8%'", expected: rows0}, - {input: "show charset where charset = 'utf8'", expected: rows0}, - {input: "show charset where charset = 'foo%'", expected: rows}, + {input: "show charset", expected: charsets()}, + {input: "show character set", expected: charsets()}, + {input: "show charset where charset like 'foo%'", expected: nil}, + {input: "show charset where charset like 'utf8%'", expected: rows2}, + {input: "show charset where charset like 'utf8mb3%'", expected: rows0}, + {input: "show charset where charset like 'foo%'", expected: nil}, + {input: "show character set where charset like '%foo'", expected: nil}, + {input: "show charset where charset = 'utf8mb3'", expected: rows0}, + {input: "show charset where charset = 'foo%'", expected: nil}, {input: "show charset where charset = 'utf8mb4'", expected: rows1}, } - charsets := []string{"utf8", "utf8mb4"} - for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { stmt, err := sqlparser.Parse(tc.input) require.NoError(t, err) match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic) filter := match.Filter - actual, err := generateCharsetRows(filter, charsets) + actual, err := generateCharsetRows(filter) require.NoError(t, err) require.Equal(t, tc.expected, actual) }) diff --git a/go/vt/vtgate/planbuilder/simple_projection.go b/go/vt/vtgate/planbuilder/simple_projection.go index fb9894a89e9..e9e8a146b59 100644 --- a/go/vt/vtgate/planbuilder/simple_projection.go +++ b/go/vt/vtgate/planbuilder/simple_projection.go @@ -17,10 +17,7 @@ limitations under the License. package planbuilder import ( - "fmt" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -35,38 +32,9 @@ var _ logicalPlan = (*simpleProjection)(nil) // a new route that keeps the subquery in the FROM // clause, because a route is more versatile than // a simpleProjection. -// this should not be used by the gen4 planner type simpleProjection struct { logicalPlanCommon - resultColumns []*resultColumn - eSimpleProj *engine.SimpleProjection -} - -// newSimpleProjection builds a new simpleProjection. -func newSimpleProjection(alias sqlparser.IdentifierCS, plan logicalPlan) (*simpleProjection, *symtab, error) { - sq := &simpleProjection{ - logicalPlanCommon: newBuilderCommon(plan), - eSimpleProj: &engine.SimpleProjection{}, - } - - // Create a 'table' that represents the derived table. - t := &table{ - alias: sqlparser.TableName{Name: alias}, - origin: sq, - } - - // Create column symbols based on the result column names. - for _, rc := range plan.ResultColumns() { - if _, ok := t.columns[rc.alias.Lowered()]; ok { - return nil, nil, vterrors.VT12001(fmt.Sprintf("duplicate column names in subquery: %s", sqlparser.String(rc.alias))) - } - t.addColumn(rc.alias, &column{origin: sq}) - } - t.isAuthoritative = true - st := newSymtab() - // AddTable will not fail because symtab is empty. - _ = st.AddTable(t) - return sq, st, nil + eSimpleProj *engine.SimpleProjection } // Primitive implements the logicalPlan interface @@ -75,27 +43,6 @@ func (sq *simpleProjection) Primitive() engine.Primitive { return sq.eSimpleProj } -// ResultColumns implements the logicalPlan interface -func (sq *simpleProjection) ResultColumns() []*resultColumn { - return sq.resultColumns -} - -// SupplyCol implements the logicalPlan interface -func (sq *simpleProjection) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range sq.resultColumns { - if rc.column == c { - return rc, i - } - } - - // columns that reference subqueries will have their colNumber set. - // Let's use it here. - sq.eSimpleProj.Cols = append(sq.eSimpleProj.Cols, c.colNumber) - sq.resultColumns = append(sq.resultColumns, &resultColumn{column: c}) - return rc, len(sq.resultColumns) - 1 -} - // OutputColumns implements the logicalPlan interface func (sq *simpleProjection) OutputColumns() []sqlparser.SelectExpr { exprs := make([]sqlparser.SelectExpr, 0, len(sq.eSimpleProj.Cols)) diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 057fb5ab136..1e106adacc0 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -17,9 +17,12 @@ limitations under the License. package planbuilder import ( + "context" "fmt" "testing" + "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" @@ -36,19 +39,21 @@ import ( // TestSimplifyBuggyQuery should be used to whenever we get a planner bug reported // It will try to minimize the query to make it easier to understand and work with the bug. func TestSimplifyBuggyQuery(t *testing.T) { - query := "(select id from unsharded union select id from unsharded_auto) union (select id from user union select name from unsharded)" - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + query := "select distinct count(distinct a), count(distinct 4) from user left join unsharded on 0 limit 5" + // select 0 from unsharded union select 0 from `user` union select 0 from unsharded + // select 0 from unsharded union (select 0 from `user` union select 0 from unsharded) + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } stmt, reserved, err := sqlparser.Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepSameError(query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -59,18 +64,18 @@ func TestSimplifyBuggyQuery(t *testing.T) { func TestSimplifyPanic(t *testing.T) { t.Skip("not needed to run") query := "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)" - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } stmt, reserved, err := sqlparser.Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepPanicking(query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -80,9 +85,9 @@ func TestSimplifyPanic(t *testing.T) { func TestUnsupportedFile(t *testing.T) { t.Skip("run manually to see if any queries can be simplified") - vschema := &vschemaWrapper{ - v: loadSchema(t, "vschemas/schema.json", true), - version: Gen4, + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(t, "vschemas/schema.json", true), + Version: Gen4, } fmt.Println(vschema) for _, tcase := range readJSONTests("unsupported_cases.txt") { @@ -95,11 +100,11 @@ func TestUnsupportedFile(t *testing.T) { t.Skip() return } - rewritten, err := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, err := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) if err != nil { t.Skip() } - vschema.currentDb() + vschema.CurrentDb() reservedVars := sqlparser.NewReservedVars("vtg", reserved) ast := rewritten.AST @@ -107,7 +112,7 @@ func TestUnsupportedFile(t *testing.T) { stmt, _, _ = sqlparser.Parse2(tcase.Query) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), - vschema.currentDb(), + vschema.CurrentDb(), vschema, keepSameError(tcase.Query, reservedVars, vschema, rewritten.BindVarNeeds), ) @@ -124,19 +129,19 @@ func TestUnsupportedFile(t *testing.T) { } } -func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { +func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { stmt, _, err := sqlparser.Parse2(query) if err != nil { panic(err) } - rewritten, _ := sqlparser.RewriteAST(stmt, vschema.currentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) ast := rewritten.AST - _, expected := BuildFromStmt(query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true) + _, expected := BuildFromStmt(context.Background(), query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true) if expected == nil { panic("query does not fail to plan") } return func(statement sqlparser.SelectStatement) bool { - _, myErr := BuildFromStmt(query, statement, reservedVars, vschema, needs, true, true) + _, myErr := BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, true, true) if myErr == nil { return false } @@ -148,7 +153,7 @@ func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema * } } -func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { +func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { cmp := func(statement sqlparser.SelectStatement) (res bool) { defer func() { r := recover() @@ -158,7 +163,7 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema * } }() log.Errorf("trying %s", sqlparser.String(statement)) - _, _ = BuildFromStmt(query, statement, reservedVars, vschema, needs, true, true) + _, _ = BuildFromStmt(context.Background(), query, statement, reservedVars, vschema, needs, true, true) log.Errorf("did not panic") return false diff --git a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go index 80d3b0fba11..3c763ab7060 100644 --- a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go +++ b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go @@ -29,7 +29,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (logicalPlan, []string, error) { +func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (logicalPlan, []string, error) { // this method is used when the query we are handling has all tables in the same unsharded keyspace sqlparser.SafeRewrite(stmt, nil, func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { @@ -47,7 +47,7 @@ func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectSt if err != nil { return nil, nil, err } - plan := &routeGen4{ + plan := &route{ eroute: &engine.Route{ RoutingParameters: &engine.RoutingParameters{ Opcode: engine.Unsharded, @@ -58,7 +58,7 @@ func unshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectSt Select: stmt, } - if err := plan.WireupGen4(ctx); err != nil { + if err := plan.Wireup(ctx); err != nil { return nil, nil, err } return plan, operators.QualifiedTableNames(ks, tableNames), nil diff --git a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go index 72850361a9e..b67b6a0db3e 100644 --- a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go +++ b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go @@ -30,27 +30,15 @@ var _ logicalPlan = (*sqlCalcFoundRows)(nil) type sqlCalcFoundRows struct { LimitQuery, CountQuery logicalPlan - - // only used by WireUp for V3 - ljt, cjt *jointab -} - -// Wireup implements the logicalPlan interface -func (s *sqlCalcFoundRows) Wireup(logicalPlan, *jointab) error { - err := s.LimitQuery.Wireup(s.LimitQuery, s.ljt) - if err != nil { - return err - } - return s.CountQuery.Wireup(s.CountQuery, s.cjt) } // WireupGen4 implements the logicalPlan interface -func (s *sqlCalcFoundRows) WireupGen4(ctx *plancontext.PlanningContext) error { - err := s.LimitQuery.WireupGen4(ctx) +func (s *sqlCalcFoundRows) Wireup(ctx *plancontext.PlanningContext) error { + err := s.LimitQuery.Wireup(ctx) if err != nil { return err } - return s.CountQuery.WireupGen4(ctx) + return s.CountQuery.Wireup(ctx) } // ContainsTables implements the logicalPlan interface @@ -72,38 +60,6 @@ func (s *sqlCalcFoundRows) Primitive() engine.Primitive { } } -// All the methods below are not implemented. They should not be called on a sqlCalcFoundRows plan - -// Order implements the logicalPlan interface -func (s *sqlCalcFoundRows) Order() int { - return s.LimitQuery.Order() -} - -// ResultColumns implements the logicalPlan interface -func (s *sqlCalcFoundRows) ResultColumns() []*resultColumn { - return s.LimitQuery.ResultColumns() -} - -// Reorder implements the logicalPlan interface -func (s *sqlCalcFoundRows) Reorder(order int) { - s.LimitQuery.Reorder(order) -} - -// SupplyVar implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - s.LimitQuery.SupplyVar(from, to, col, varname) -} - -// SupplyCol implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyCol(col *sqlparser.ColName) (*resultColumn, int) { - return s.LimitQuery.SupplyCol(col) -} - -// SupplyWeightString implements the logicalPlan interface -func (s *sqlCalcFoundRows) SupplyWeightString(int, bool) (weightcolNumber int, err error) { - return 0, UnsupportedSupplyWeightString{Type: "sqlCalcFoundRows"} -} - // Rewrite implements the logicalPlan interface func (s *sqlCalcFoundRows) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 2 { diff --git a/go/vt/vtgate/planbuilder/subquery_op.go b/go/vt/vtgate/planbuilder/subquery_op.go index 93596a5c55e..7faf7291a79 100644 --- a/go/vt/vtgate/planbuilder/subquery_op.go +++ b/go/vt/vtgate/planbuilder/subquery_op.go @@ -19,12 +19,13 @@ package planbuilder import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.SubQueryOp) (logicalPlan, error) { - innerPlan, err := transformToLogicalPlan(ctx, op.Inner, false) + innerPlan, err := transformToLogicalPlan(ctx, op.Inner) if err != nil { return nil, err } @@ -35,13 +36,13 @@ func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.SubQu argName := op.Extracted.GetArgName() hasValuesArg := op.Extracted.GetHasValuesArg() - outerPlan, err := transformToLogicalPlan(ctx, op.Outer, false) + outerPlan, err := transformToLogicalPlan(ctx, op.Outer) merged := mergeSubQueryOpPlan(ctx, innerPlan, outerPlan, op) if merged != nil { return merged, nil } - plan := newPulloutSubquery(engine.PulloutOpcode(op.Extracted.OpCode), argName, hasValuesArg, innerPlan) + plan := newPulloutSubquery(opcode.PulloutOpcode(op.Extracted.OpCode), argName, hasValuesArg, innerPlan) if err != nil { return nil, err } @@ -50,11 +51,11 @@ func transformSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.SubQu } func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *operators.CorrelatedSubQueryOp) (logicalPlan, error) { - outer, err := transformToLogicalPlan(ctx, op.Outer, false) + outer, err := transformToLogicalPlan(ctx, op.Outer) if err != nil { return nil, err } - inner, err := transformToLogicalPlan(ctx, op.Inner, false) + inner, err := transformToLogicalPlan(ctx, op.Inner) if err != nil { return nil, err } @@ -62,11 +63,11 @@ func transformCorrelatedSubQueryPlan(ctx *plancontext.PlanningContext, op *opera } func mergeSubQueryOpPlan(ctx *plancontext.PlanningContext, inner, outer logicalPlan, n *operators.SubQueryOp) logicalPlan { - iroute, ok := inner.(*routeGen4) + iroute, ok := inner.(*route) if !ok { return nil } - oroute, ok := outer.(*routeGen4) + oroute, ok := outer.(*route) if !ok { return nil } @@ -82,7 +83,7 @@ func mergeSubQueryOpPlan(ctx *plancontext.PlanningContext, inner, outer logicalP } // mergeSystemTableInformation copies over information from the second route to the first and appends to it -func mergeSystemTableInformation(a *routeGen4, b *routeGen4) logicalPlan { +func mergeSystemTableInformation(a *route, b *route) logicalPlan { // safe to append system table schema and system table names, since either the routing will match or either side would be throwing an error // during run-time which we want to preserve. For example outer side has User in sys table schema and inner side has User and Main in sys table schema // Inner might end up throwing an error at runtime, but if it doesn't then it is safe to merge. @@ -93,7 +94,7 @@ func mergeSystemTableInformation(a *routeGen4, b *routeGen4) logicalPlan { return a } -func canMergeSubqueryPlans(ctx *plancontext.PlanningContext, a, b *routeGen4) bool { +func canMergeSubqueryPlans(ctx *plancontext.PlanningContext, a, b *route) bool { // this method should be close to tryMerge below. it does the same thing, but on logicalPlans instead of queryTrees if a.eroute.Keyspace.Name != b.eroute.Keyspace.Name { return false diff --git a/go/vt/vtgate/planbuilder/symtab.go b/go/vt/vtgate/planbuilder/symtab.go deleted file mode 100644 index 7853899b4f6..00000000000 --- a/go/vt/vtgate/planbuilder/symtab.go +++ /dev/null @@ -1,617 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" -) - -// symtab represents the symbol table for a SELECT statement -// or a subquery. The symtab evolves over time. -// As a query is analyzed, multiple independent -// symtabs are created, and they are later merged as each -// sub-expression of a FROM clause is merged. -// -// A symtab maintains uniqueColumns, which is a list of unique -// vindex column names. These names can be resolved without the -// need to qualify them by their table names. If there are -// duplicates during a merge, those columns are removed from -// the unique list, thereby disallowing unqualified references -// to such columns. -// -// After a select expression is analyzed, the -// ResultColumns field is set. In the case of a subquery, the -// Outer field points to the outer symtab. Any symbols that -// are not resolved locally are added to the Externs field, -// which is later used to determine if the subquery can be -// merged with an outer route. -type symtab struct { - tables map[sqlparser.TableName]*table - tableNames []sqlparser.TableName - - // uniqueColumns has the column name as key - // and points at the columns that tables contains. - uniqueColumns map[string]*column - - // singleRoute is set only if all the symbols in - // the symbol table are part of the same route. - singleRoute *route - - ResultColumns []*resultColumn - Outer *symtab - Externs []*sqlparser.ColName -} - -// newSymtab creates a new symtab. -func newSymtab() *symtab { - return &symtab{ - tables: make(map[sqlparser.TableName]*table), - uniqueColumns: make(map[string]*column), - } -} - -// newSymtab creates a new symtab initialized -// to contain just one route. -func newSymtabWithRoute(rb *route) *symtab { - return &symtab{ - tables: make(map[sqlparser.TableName]*table), - uniqueColumns: make(map[string]*column), - singleRoute: rb, - } -} - -// AddVSchemaTable adds a vschema table to symtab. -func (st *symtab) AddVSchemaTable(alias sqlparser.TableName, vschemaTable *vindexes.Table, rb *route) error { - t := &table{ - alias: alias, - origin: rb, - vschemaTable: vschemaTable, - } - - for _, col := range vschemaTable.Columns { - if _, err := t.mergeColumn(col.Name, &column{ - origin: rb, - st: st, - typ: col.Type, - }); err != nil { - return err - } - } - if vschemaTable.ColumnListAuthoritative { - // This will prevent new columns from being added. - t.isAuthoritative = true - } - - for _, cv := range vschemaTable.ColumnVindexes { - single, ok := cv.Vindex.(vindexes.SingleColumn) - if !ok { - continue - } - for i, cvcol := range cv.Columns { - col, err := t.mergeColumn(cvcol, &column{ - origin: rb, - st: st, - }) - if err != nil { - return err - } - if i == 0 { - if col.vindex == nil || col.vindex.Cost() > single.Cost() { - col.vindex = single - } - } - } - } - - if ai := vschemaTable.AutoIncrement; ai != nil { - if _, ok := t.columns[ai.Column.Lowered()]; !ok { - if _, err := t.mergeColumn(ai.Column, &column{ - origin: rb, - st: st, - }); err != nil { - return err - } - } - } - if err := st.AddTable(t); err != nil { - return err - } - return nil -} - -// Merge merges the new symtab into the current one. -// Duplicate table aliases return an error. -// uniqueColumns is updated, but duplicates are removed. -// Merges are only performed during the FROM clause analysis. -// At this point, only tables and uniqueColumns are set. -// All other fields are ignored. -func (st *symtab) Merge(newsyms *symtab) error { - if st.tableNames == nil || newsyms.tableNames == nil { - // If any side of symtab has anonymous tables, - // we treat the merged symtab as having anonymous tables. - return nil - } - for _, t := range newsyms.tables { - if err := st.AddTable(t); err != nil { - return err - } - } - return nil -} - -// AddTable adds a table to symtab. -func (st *symtab) AddTable(t *table) error { - if rb, ok := t.origin.(*route); !ok || rb.Resolve() != st.singleRoute { - st.singleRoute = nil - } - if _, ok := st.tables[t.alias]; ok { - return vterrors.VT03013(t.alias.Name.String()) - } - st.tables[t.alias] = t - st.tableNames = append(st.tableNames, t.alias) - - // update the uniqueColumns list, and eliminate - // duplicate symbols if found. - for colname, c := range t.columns { - c.st = st - if _, ok := st.uniqueColumns[colname]; ok { - // Keep the entry, but make it nil. This will - // ensure that yet another column of the same name - // doesn't get added back in. - st.uniqueColumns[colname] = nil - continue - } - st.uniqueColumns[colname] = c - } - return nil -} - -// AllTables returns an ordered list of all current tables. -func (st *symtab) AllTables() []*table { - if len(st.tableNames) == 0 { - return nil - } - tables := make([]*table, 0, len(st.tableNames)) - for _, tname := range st.tableNames { - tables = append(tables, st.tables[tname]) - } - return tables -} - -// AllVschemaTableNames returns an ordered list of all current vschema tables. -func (st *symtab) AllVschemaTableNames() ([]*vindexes.Table, error) { - if len(st.tableNames) == 0 { - return nil, nil - } - tables := make([]*vindexes.Table, 0, len(st.tableNames)) - for _, tname := range st.tableNames { - t, ok := st.tables[tname] - if !ok { - return nil, vterrors.VT05004(sqlparser.String(tname)) - } - if t.vschemaTable != nil { - tables = append(tables, t.vschemaTable) - } - } - return tables, nil -} - -// FindTable finds a table in symtab. This function is specifically used -// for expanding 'select a.*' constructs. If you're in a subquery, -// you're most likely referring to a table in the local 'from' clause. -// For this reason, the search is only performed in the current scope. -// This may be a deviation from the formal definition of SQL, but there -// are currently no use cases that require the full support. -func (st *symtab) FindTable(tname sqlparser.TableName) (*table, error) { - if st.tableNames == nil { - // Unreachable because current code path checks for this condition - // before invoking this function. - return nil, vterrors.VT05007() - } - t, ok := st.tables[tname] - if !ok { - return nil, vterrors.VT05004(sqlparser.String(tname)) - } - return t, nil -} - -// SetResultColumns sets the result columns. -func (st *symtab) SetResultColumns(rcs []*resultColumn) { - for _, rc := range rcs { - rc.column.st = st - } - st.ResultColumns = rcs -} - -// Find returns the logicalPlan for the symbol referenced by col. -// If a reference is found, col.Metadata is set to point -// to it. Subsequent searches will reuse this metadata. -// -// Unqualified columns are searched in the following order: -// 1. ResultColumns -// 2. uniqueColumns -// 3. symtab has only one table. The column is presumed to -// belong to that table. -// 4. symtab has more than one table, but all tables belong -// to the same route. An anonymous column is created against -// the current route. -// If all the above fail, an error is returned. This means -// that an unqualified reference can only be locally resolved. -// -// For qualified columns, we first look for the table. If one -// is found, we look for a column in the pre-existing list. -// If one is not found, we optimistically create an entry -// presuming that the table has such a column. If this is -// not the case, the query will fail when sent to vttablet. -// If the table is not found in the local scope, the search -// is continued in the outer scope, but only if ResultColumns -// is not set (this is MySQL behavior). -// -// For symbols that were found locally, isLocal is returned -// as true. Otherwise, it's returned as false and the symbol -// gets added to the Externs list, which can later be used -// to decide where to push-down the subquery. -func (st *symtab) Find(col *sqlparser.ColName) (origin logicalPlan, isLocal bool, err error) { - // Return previously cached info if present. - if column, ok := col.Metadata.(*column); ok { - return column.Origin(), column.st == st, nil - } - - // Unqualified column case. - if col.Qualifier.IsEmpty() { - // Step 1. Search ResultColumns. - c, err := st.searchResultColumn(col) - if err != nil { - return nil, false, err - } - if c != nil { - col.Metadata = c - return c.Origin(), true, nil - } - } - - // Steps 2-4 performed by searchTables. - c, err := st.searchTables(col) - if err != nil { - return nil, false, err - } - if c != nil { - col.Metadata = c - return c.Origin(), true, nil - } - - if st.Outer == nil { - return nil, false, vterrors.VT03019(sqlparser.String(col)) - } - // Search is not continued if ResultColumns already has values: - // select a ... having ... (select b ... having a...). In this case, - // a (in having) should not match the outer-most 'a'. This is to - // match MySQL's behavior. - if len(st.ResultColumns) != 0 { - return nil, false, vterrors.VT03020(sqlparser.String(col)) - } - - if origin, _, err = st.Outer.Find(col); err != nil { - return nil, false, err - } - st.Externs = append(st.Externs, col) - return origin, false, nil -} - -// searchResultColumn looks for col in the results columns. -func (st *symtab) searchResultColumn(col *sqlparser.ColName) (c *column, err error) { - var cursym *resultColumn - for _, rc := range st.ResultColumns { - if rc.alias.Equal(col.Name) { - if cursym != nil { - return nil, vterrors.VT03021(sqlparser.String(col)) - } - cursym = rc - } - } - if cursym != nil { - return cursym.column, nil - } - return nil, nil -} - -// searchTables looks for the column in the tables. The search order -// is as described in Find. -func (st *symtab) searchTables(col *sqlparser.ColName) (*column, error) { - var t *table - // @@ syntax is only allowed for dual tables, in which case there should be - // only one in the symtab. So, such expressions will be implicitly matched. - if col.Qualifier.IsEmpty() || strings.HasPrefix(col.Qualifier.Name.String(), "@@") { - // Search uniqueColumns first. If found, our job is done. - // Check for nil because there can be nil entries if there - // are duplicate columns across multiple tables. - if c := st.uniqueColumns[col.Name.Lowered()]; c != nil { - return c, nil - } - - switch { - case len(st.tables) == 1: - // If there's only one table match against it. - // Loop executes once to match the only table. - for _, v := range st.tables { - t = v - } - // No return: break out. - case st.singleRoute != nil: - // If there's only one route, create an anonymous symbol. - return &column{origin: st.singleRoute, st: st}, nil - default: - // If none of the above, the symbol is unresolvable. - return nil, vterrors.VT03019(sqlparser.String(col)) - } - } else { - var ok bool - t, ok = st.tables[col.Qualifier] - if !ok { - return nil, nil - } - } - - // At this point, t should be set. - c, ok := t.columns[col.Name.Lowered()] - if !ok { - // We know all the column names of a subquery. Might as well return an error if it's not found. - if t.isAuthoritative { - return nil, vterrors.VT03019(sqlparser.String(col)) - } - c = &column{ - origin: t.Origin(), - st: st, - } - t.addColumn(col.Name, c) - } - return c, nil -} - -// ResultFromNumber returns the result column index based on the column -// order expression. -func ResultFromNumber(rcs []*resultColumn, val *sqlparser.Literal, caller string) (int, error) { - if val.Type != sqlparser.IntVal { - return 0, vterrors.VT13001("column number is not an INT") - } - num, err := strconv.ParseInt(val.Val, 0, 64) - if err != nil { - return 0, vterrors.VT13001(fmt.Sprintf("error parsing column number: %s", sqlparser.String(val))) - } - if num < 1 || num > int64(len(rcs)) { - return 0, vterrors.VT03014(num, caller) - } - return int(num - 1), nil -} - -// Vindex returns the vindex if the expression is a plain column reference -// that is part of the specified route, and has an associated vindex. -func (st *symtab) Vindex(expr sqlparser.Expr, scope *route) vindexes.SingleColumn { - col, ok := expr.(*sqlparser.ColName) - if !ok { - return nil - } - if col.Metadata == nil { - // Find will set the Metadata. - if _, _, err := st.Find(col); err != nil { - return nil - } - } - c := col.Metadata.(*column) - if c.Origin() != scope { - return nil - } - return c.vindex -} - -// BuildColName builds a *sqlparser.ColName for the resultColumn specified -// by the index. The built ColName will correctly reference the resultColumn -// it was built from. -func BuildColName(rcs []*resultColumn, index int) (*sqlparser.ColName, error) { - alias := rcs[index].alias - if alias.IsEmpty() { - return nil, vterrors.VT12001("reference a complex expression") - } - for i, rc := range rcs { - if i == index { - continue - } - if rc.alias.Equal(alias) { - return nil, vterrors.VT03021(alias) - } - } - return &sqlparser.ColName{ - Metadata: rcs[index].column, - Name: alias, - }, nil -} - -// ResolveSymbols resolves all column references against symtab. -// This makes sure that they all have their Metadata initialized. -// If a symbol cannot be resolved or if the expression contains -// a subquery, an error is returned. -func (st *symtab) ResolveSymbols(node sqlparser.SQLNode) error { - return sqlparser.Walk(func(currNode sqlparser.SQLNode) (kontinue bool, err error) { - switch currNode := currNode.(type) { - case *sqlparser.ColName: - if _, _, err := st.Find(currNode); err != nil { - return false, err - } - case *sqlparser.Subquery: - return false, vterrors.VT12001(fmt.Sprintf("subqueries disallowed in %T", node)) - } - return true, nil - }, node) -} - -// table is part of symtab. -// It represents a table alias in a FROM clause. It points -// to the logicalPlan that represents it. -type table struct { - alias sqlparser.TableName - columns map[string]*column - columnNames []sqlparser.IdentifierCI - isAuthoritative bool - origin logicalPlan - vschemaTable *vindexes.Table -} - -func (t *table) addColumn(alias sqlparser.IdentifierCI, c *column) { - if t.columns == nil { - t.columns = make(map[string]*column) - } - lowered := alias.Lowered() - // Dups are allowed, but first one wins if referenced. - if _, ok := t.columns[lowered]; !ok { - c.colNumber = len(t.columnNames) - t.columns[lowered] = c - } - t.columnNames = append(t.columnNames, alias) -} - -// mergeColumn merges or creates a new column for the table. -// If the table is authoritative and the column doesn't already -// exist, it returns an error. If the table is not authoritative, -// the column is added if not already present. -func (t *table) mergeColumn(alias sqlparser.IdentifierCI, c *column) (*column, error) { - if t.columns == nil { - t.columns = make(map[string]*column) - } - lowered := alias.Lowered() - if col, ok := t.columns[lowered]; ok { - return col, nil - } - if t.isAuthoritative { - return nil, vterrors.VT03022(sqlparser.String(alias), sqlparser.String(t.alias)) - } - c.colNumber = len(t.columnNames) - t.columns[lowered] = c - t.columnNames = append(t.columnNames, alias) - return c, nil -} - -// Origin returns the route that originates the table. -func (t *table) Origin() logicalPlan { - // If it's a route, we have to resolve it. - if rb, ok := t.origin.(*route); ok { - return rb.Resolve() - } - return t.origin -} - -// column represents a unique symbol in the query that other -// parts can refer to. -// Every column contains the logicalPlan it originates from. -// If a column has associated vindexes, then the one with the -// lowest cost is set. -// -// Two columns are equal if their pointer values match. -// -// For subquery and vindexFunc, the colNumber is also set because -// the column order is known and unchangeable. -type column struct { - origin logicalPlan - st *symtab - vindex vindexes.SingleColumn - typ querypb.Type - colNumber int -} - -// Origin returns the route that originates the column. -func (c *column) Origin() logicalPlan { - // If it's a route, we have to resolve it. - if rb, ok := c.origin.(*route); ok { - return rb.Resolve() - } - return c.origin -} - -// resultColumn contains symbol info about a select expression. If the -// expression represents an underlying column, then it points to it. -// Otherwise, an anonymous column is created as place-holder. -type resultColumn struct { - // alias will represent the unqualified symbol name for that expression. - // If the statement provides an explicit alias, that name will be used. - // If the expression is a simple column, then the base name of the - // column will be used as the alias. If the expression is non-trivial, - // alias will be empty, and cannot be referenced from other parts of - // the query. - alias sqlparser.IdentifierCI - column *column -} - -// NewResultColumn creates a new resultColumn based on the supplied expression. -// The created symbol is not remembered until it is later set as ResultColumns -// after all select expressions are analyzed. -func newResultColumn(expr *sqlparser.AliasedExpr, origin logicalPlan) *resultColumn { - rc := &resultColumn{ - alias: expr.As, - } - if col, ok := expr.Expr.(*sqlparser.ColName); ok { - // If no alias was specified, then the base name - // of the column becomes the alias. - if rc.alias.IsEmpty() { - rc.alias = col.Name - } - // If it's a col it should already have metadata. - rc.column = col.Metadata.(*column) - } else { - // We don't generate an alias if the expression is non-trivial. - // Just to be safe, generate an anonymous column for the expression. - typ, err := GetReturnType(expr.Expr) - rc.column = &column{ - origin: origin, - } - if err == nil { - rc.column.typ = typ - } - } - return rc -} - -// GetReturnType returns the type of the select expression that MySQL will return -func GetReturnType(input sqlparser.Expr) (querypb.Type, error) { - switch node := input.(type) { - case *sqlparser.FuncExpr: - functionName := strings.ToUpper(node.Name.String()) - switch functionName { - case "ABS": - // Returned value depends on the return type of the input - if len(node.Exprs) == 1 { - expr, isAliasedExpr := node.Exprs[0].(*sqlparser.AliasedExpr) - if isAliasedExpr { - return GetReturnType(expr.Expr) - } - } - } - case *sqlparser.ColName: - col := node.Metadata.(*column) - return col.typ, nil - case *sqlparser.Count, *sqlparser.CountStar: - return querypb.Type_INT64, nil - } - return 0, vterrors.VT12001(fmt.Sprintf("evaluate return type for %T", input)) -} diff --git a/go/vt/vtgate/planbuilder/symtab_test.go b/go/vt/vtgate/planbuilder/symtab_test.go deleted file mode 100644 index 725eeaa541a..00000000000 --- a/go/vt/vtgate/planbuilder/symtab_test.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "testing" - - "github.com/stretchr/testify/require" - - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" -) - -/* -func TestSymtabAddVSchemaTable(t *testing.T) { - tname := sqlparser.TableName{Name: sqlparser.NewIdentifierCS("t")} - rb := &route{} - - null, _ := vindexes.CreateVindex("null", "null", nil) - - tcases := []struct { - in *vindexes.Table - authoritative bool - vindex []string - err string - }{{ - // Single table. - in: &vindexes.Table{ - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Column vindex specified. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("C1")}, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // Multi-column vindex. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // AutoIncrement. - in: &vindexes.Table{ - AutoIncrement: &vindexes.AutoIncrement{ - Column: sqlparser.NewIdentifierCI("C1"), - }, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C1"), - }, { - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Column vindex specifies a column not in list. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{sqlparser.NewIdentifierCI("C1")}, - Vindex: null, - }}, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // Column vindex specifies columns with none in list. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - }, - authoritative: false, - vindex: []string{"c1"}, - }, { - // AutoIncrement specifies a column not in list. - in: &vindexes.Table{ - AutoIncrement: &vindexes.AutoIncrement{ - Column: sqlparser.NewIdentifierCI("C1"), - }, - Columns: []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("C2"), - }}, - }, - authoritative: false, - vindex: []string{}, - }, { - // Two column vindexes. - in: &vindexes.Table{ - ColumnVindexes: []*vindexes.ColumnVindex{{ - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C1"), - }, - Vindex: null, - }, { - Columns: []sqlparser.IdentifierCI{ - sqlparser.NewIdentifierCI("C2"), - }, - Vindex: null, - }}, - }, - authoritative: false, - vindex: []string{"c1", "c2"}, - }} - - out := []string{"c1", "c2"} - for _, tcase := range tcases { - st := newSymtab() - vindexMap, err := st.AddVSchemaTable(tname, tcase.in, rb) - tcasein, _ := json.Marshal(tcase.in) - if err != nil { - if err.Error() != tcase.err { - t.Errorf("st.AddVSchemaTable(%s) err: %v, want %s", tcasein, err, tcase.err) - } - continue - } else if tcase.err != "" { - t.Errorf("st.AddVSchemaTable(%s) succeeded, want error: %s", tcasein, tcase.err) - continue - } - tab := st.tables[tname] - for _, col := range out { - if tab.columns[col] == nil { - t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col) - } - } - for _, col := range tcase.vindex { - c := tab.columns[col] - if c == nil { - t.Errorf("st.AddVSchemaTable(%s): column %s not found", tcasein, col) - } - if _, ok := vindexMap[c]; !ok { - t.Errorf("st.AddVSchemaTable(%s).vindexMap: column %s not found", tcasein, col) - } - } - if tab.isAuthoritative != tcase.authoritative { - t.Errorf("st.AddVSchemaTable(%s).authoritative: %v want %v", tcasein, tab.isAuthoritative, tcase.authoritative) - } - } -} -*/ - -func TestGetReturnType(t *testing.T) { - tests := []struct { - input sqlparser.Expr - output querypb.Type - expectedErr error - }{{ - input: &sqlparser.FuncExpr{Name: sqlparser.NewIdentifierCI("Abs"), Exprs: sqlparser.SelectExprs{ - &sqlparser.AliasedExpr{ - Expr: &sqlparser.ColName{ - Name: sqlparser.NewIdentifierCI("A"), - Metadata: &column{ - typ: querypb.Type_DECIMAL, - }, - }, - }, - }}, - output: querypb.Type_DECIMAL, - }, { - input: &sqlparser.Count{}, - output: querypb.Type_INT64, - }, { - input: &sqlparser.CountStar{}, - output: querypb.Type_INT64, - }} - - for _, test := range tests { - t.Run(sqlparser.String(test.input), func(t *testing.T) { - got, err := GetReturnType(test.input) - if test.expectedErr != nil { - require.EqualError(t, err, test.expectedErr.Error()) - } else { - require.NoError(t, err) - require.Equal(t, test.output, got) - } - }) - } -} diff --git a/go/vt/vtgate/planbuilder/system_tables.go b/go/vt/vtgate/planbuilder/system_tables.go deleted file mode 100644 index ba061af909f..00000000000 --- a/go/vt/vtgate/planbuilder/system_tables.go +++ /dev/null @@ -1,153 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "strings" - - "vitess.io/vitess/go/mysql/collations" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" -) - -type notImplementedSchemaInfoConverter struct{} - -func (f *notImplementedSchemaInfoConverter) ColumnLookup(*sqlparser.ColName) (int, error) { - return 0, vterrors.VT12001("comparing table schema name with a column name") -} - -func (f *notImplementedSchemaInfoConverter) CollationForExpr(sqlparser.Expr) collations.ID { - return collations.Unknown -} - -func (f *notImplementedSchemaInfoConverter) DefaultCollation() collations.ID { - return collations.Default() -} - -func (pb *primitiveBuilder) findSysInfoRoutingPredicates(expr sqlparser.Expr, rut *route, reservedVars *sqlparser.ReservedVars) error { - isTableSchema, bvName, out, err := extractInfoSchemaRoutingPredicate(expr, reservedVars) - if err != nil { - return err - } - if out == nil { - // we didn't find a predicate to use for routing, so we just exit early - return nil - } - - if isTableSchema { - rut.eroute.SysTableTableSchema = append(rut.eroute.SysTableTableSchema, out) - } else { - if rut.eroute.SysTableTableName == nil { - rut.eroute.SysTableTableName = map[string]evalengine.Expr{} - } - rut.eroute.SysTableTableName[bvName] = out - } - - return nil -} - -func findOtherComparator(cmp *sqlparser.ComparisonExpr) (bool, sqlparser.Expr, sqlparser.Expr, func(arg sqlparser.Argument)) { - if schema, table := isTableSchemaOrName(cmp.Left); schema || table { - return schema, cmp.Left, cmp.Right, func(arg sqlparser.Argument) { - cmp.Right = arg - } - } - if schema, table := isTableSchemaOrName(cmp.Right); schema || table { - return schema, cmp.Right, cmp.Left, func(arg sqlparser.Argument) { - cmp.Left = arg - } - } - - return false, nil, nil, nil -} - -func isTableSchemaOrName(e sqlparser.Expr) (isTableSchema bool, isTableName bool) { - col, ok := e.(*sqlparser.ColName) - if !ok { - return false, false - } - return isDbNameCol(col), isTableNameCol(col) -} - -var schemaColumns = map[string]any{ - "table_schema": nil, - "constraint_schema": nil, - "schema_name": nil, - "routine_schema": nil, - "specific_schema": nil, - "event_schema": nil, - "referenced_table_schema": nil, - "index_schema": nil, - "trigger_schema": nil, - "event_object_schema": nil, -} - -func isDbNameCol(col *sqlparser.ColName) bool { - _, found := schemaColumns[col.Name.Lowered()] - return found -} - -func isTableNameCol(col *sqlparser.ColName) bool { - return col.Name.EqualString("table_name") || col.Name.EqualString("referenced_table_name") -} - -func extractInfoSchemaRoutingPredicate( - in sqlparser.Expr, - reservedVars *sqlparser.ReservedVars, -) (isSchemaName bool, name string, evalExpr evalengine.Expr, err error) { - cmp, ok := in.(*sqlparser.ComparisonExpr) - if !ok || cmp.Operator != sqlparser.EqualOp { - return - } - - isSchemaName, col, other, replaceOther := findOtherComparator(cmp) - if col == nil || !shouldRewrite(other) { - return - } - - evalExpr, err = evalengine.Translate(other, ¬ImplementedSchemaInfoConverter{}) - if err != nil { - if strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) { - // This just means we can't rewrite this particular expression, - // not that we have to exit altogether - err = nil - return - } - return false, "", nil, err - } - - if isSchemaName { - name = sqltypes.BvSchemaName - } else { - name = reservedVars.ReserveColName(col.(*sqlparser.ColName)) - } - replaceOther(sqlparser.NewArgument(name)) - return isSchemaName, name, evalExpr, nil -} - -func shouldRewrite(e sqlparser.Expr) bool { - switch node := e.(type) { - case *sqlparser.FuncExpr: - // we should not rewrite database() calls against information_schema - return !(node.Name.EqualString("database") || node.Name.EqualString("schema")) - } - return true -} diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index f8e6c7fcde1..6ca0c3cf0d8 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -1,57 +1,203 @@ [ { - "comment": "group by a unique vindex should revert to simple route, and having clause should find the correct symbols.", - "query": "select id, count(*) c from user group by id having max(col) > 10", - "v3-plan": { + "comment": "count(*) spread across join", + "query": "select count(*) from user join user_extra on user.foo = user_extra.bar", + "plan": { "QueryType": "SELECT", - "Original": "select id, count(*) c from user group by id having max(col) > 10", + "Original": "select count(*) from user join user_extra on user.foo = user_extra.bar", "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id", - "Query": "select id, count(*) as c from `user` group by id having max(col) > 10", - "Table": "`user`" - } - }, - "gen4-plan": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra where user_extra.bar = :user_foo group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "sum spread across join", + "query": "select sum(user.col) from user join user_extra on user.foo = user_extra.bar", + "plan": { "QueryType": "SELECT", - "Original": "select id, count(*) c from user group by id having max(col) > 10", + "Original": "select sum(user.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id", - "Query": "select id, count(*) as c from `user` group by id having max(col) > 10", - "Table": "`user`" + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(`user`.col)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as sum(`user`.col)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(`user`.col), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select sum(`user`.col), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra where user_extra.bar = :user_foo group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] }, "TablesUsed": [ - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "scatter aggregate in a subquery", - "query": "select a from (select count(*) as a from user) t", - "v3-plan": { + "comment": "count spread across join", + "query": "select count(user.col) from user join user_extra on user.foo = user_extra.bar", + "plan": { "QueryType": "SELECT", - "Original": "select a from (select count(*) as a from user) t", + "Original": "select count(user.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS count(`user`.col)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(`user`.col)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(`user`.col), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(`user`.col), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra where user_extra.bar = :user_foo group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "max spread across join", + "query": "select max(user.col) from user join user_extra on user.foo = user_extra.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select max(user.col) from user join user_extra on user.foo = user_extra.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0) AS max(`user`.col)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -60,28 +206,50 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", + "FieldQuery": "select max(`user`.col), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select max(`user`.col), `user`.foo from `user` group by `user`.foo", "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1 group by .0", + "Query": "select 1 from user_extra where user_extra.bar = :user_foo group by .0", + "Table": "user_extra" } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "min spread across join RHS", + "query": "select min(user_extra.col) from user join user_extra on user.foo = user_extra.bar", + "plan": { "QueryType": "SELECT", - "Original": "select a from (select count(*) as a from user) t", + "Original": "select min(user_extra.col) from user join user_extra on user.foo = user_extra.bar", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "min(0) AS min(user_extra.col)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_foo": 0 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -90,29 +258,63 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", + "FieldQuery": "select `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select `user`.foo from `user` group by `user`.foo", "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(user_extra.col) from user_extra where 1 != 1 group by .0", + "Query": "select min(user_extra.col) from user_extra where user_extra.bar = :user_foo group by .0", + "Table": "user_extra" } ] } ] }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "group by a unique vindex should revert to simple route, and having clause should find the correct symbols.", + "query": "select id, count(*) c from user group by id having max(col) > 10", + "plan": { + "QueryType": "SELECT", + "Original": "select id, count(*) c from user group by id having max(col) > 10", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, count(*) as c from `user` where 1 != 1 group by id", + "Query": "select id, count(*) as c from `user` group by id having max(col) > 10", + "Table": "`user`" + }, "TablesUsed": [ "user.user" ] } }, { - "comment": "scatter aggregate with non-aggregate expressions.", - "query": "select id, count(*) from user", - "v3-plan": { + "comment": "scatter aggregate in a subquery", + "query": "select a from (select count(*) as a from user) t", + "plan": { "QueryType": "SELECT", - "Original": "select id, count(*) from user", + "Original": "select a from (select count(*) as a from user) t", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(1) AS count", + "Aggregates": "sum_count_star(0) AS a", "Inputs": [ { "OperatorType": "Route", @@ -121,20 +323,27 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate with non-aggregate expressions.", + "query": "select id, count(*) from user", + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", "Inputs": [ { "OperatorType": "Route", @@ -157,13 +366,14 @@ { "comment": "scatter aggregate using distinctdistinct", "query": "select distinct col from user", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct col from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", + "OperatorType": "Distinct", + "Collations": [ + "0" + ], "Inputs": [ { "OperatorType": "Route", @@ -173,31 +383,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select distinct col from `user` order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select distinct col from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select distinct col from `user` order by col asc", + "Query": "select distinct col from `user`", "Table": "`user`" } ] @@ -210,30 +396,7 @@ { "comment": "scatter aggregate group by select col", "query": "select col from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user group by col", "Instructions": { @@ -263,22 +426,7 @@ { "comment": "count with distinct group by unique vindex", "query": "select id, count(distinct col) from user group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(distinct col) from user group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(distinct col) from `user` where 1 != 1 group by id", - "Query": "select id, count(distinct col) from `user` group by id", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(distinct col) from user group by id", "Instructions": { @@ -299,38 +447,14 @@ }, { "comment": "count with distinct unique vindex", - "query": "select col, count(distinct id) from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(distinct id) from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(distinct id) from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "query": "select col, count(distinct id), sum(distinct id) from user group by col", + "plan": { "QueryType": "SELECT", - "Original": "select col, count(distinct id) from user group by col", + "Original": "select col, count(distinct id), sum(distinct id) from user group by col", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count_distinct(1) AS count(distinct id)", + "Aggregates": "sum_count_distinct(1) AS count(distinct id), sum_sum_distinct(2) AS sum(distinct id)", "GroupBy": "0", "Inputs": [ { @@ -340,9 +464,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(distinct id) from `user` where 1 != 1 group by col", + "FieldQuery": "select col, count(distinct id), sum(distinct id) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(distinct id) from `user` group by col order by col asc", + "Query": "select col, count(distinct id), sum(distinct id) from `user` group by col order by col asc", "Table": "`user`" } ] @@ -355,32 +479,7 @@ { "comment": "count with distinct no unique vindex", "query": "select col1, count(distinct col2) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct col2)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) from user group by col1", "Instructions": { @@ -397,9 +496,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -412,31 +511,7 @@ { "comment": "count with distinct no unique vindex and no group by", "query": "select count(distinct col2) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(distinct col2) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "count_distinct_count(0) AS count(distinct col2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)", - "OrderBy": "(0|1) ASC", - "Query": "select col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct col2) from user", "Instructions": { @@ -467,32 +542,7 @@ { "comment": "count with distinct no unique vindex, count expression aliased", "query": "select col1, count(distinct col2) c2 from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) c2 from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS c2", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) c2 from user group by col1", "Instructions": { @@ -509,9 +559,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -524,32 +574,7 @@ { "comment": "sum with distinct no unique vindex", "query": "select col1, sum(distinct col2) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, sum(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_distinct_sum(1) AS sum(distinct col2)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, sum(distinct col2) from user group by col1", "Instructions": { @@ -566,9 +591,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -581,32 +606,7 @@ { "comment": "min with distinct no unique vindex. distinct is ignored.", "query": "select col1, min(distinct col2) from user group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, min(distinct col2) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, min(distinct col2), weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)", - "OrderBy": "(0|2) ASC", - "Query": "select col1, min(distinct col2), weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, min(distinct col2) from user group by col1", "Instructions": { @@ -623,9 +623,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "FieldQuery": "select col1, min(col2) as `min(distinct col2)`, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), weight_string(col2)", + "OrderBy": "(0|2) ASC", + "Query": "select col1, min(col2) as `min(distinct col2)`, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), weight_string(col2) order by col1 asc", "Table": "`user`" } ] @@ -638,39 +638,7 @@ { "comment": "order by count distinct", "query": "select col1, count(distinct col2) k from user group by col1 order by k", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(distinct col2) k from user group by col1 order by k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "1 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS k", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2) k from user group by col1 order by k", "Instructions": { @@ -692,9 +660,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -709,40 +677,14 @@ { "comment": "scatter aggregate group by aggregate function", "query": "select count(*) b from user group by b", - "v3-plan": "VT03005: cannot group on 'b'", - "gen4-plan": "VT03005: cannot group on 'count(*)'" + "plan": "VT03005: cannot group on 'count(*)'" }, { "comment": "scatter aggregate multiple group by (columns)", - "query": "select a, b, count(*) from user group by b, a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by b, a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "1, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)", - "OrderBy": "(1|3) ASC, (0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "query": "select a, b, count(*) from user group by a, b", + "plan": { "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by b, a", + "Original": "select a, b, count(*) from user group by a, b", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", @@ -757,9 +699,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)", + "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, b, weight_string(a), weight_string(b)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc", + "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, b, weight_string(a), weight_string(b) order by a asc, b asc", "Table": "`user`" } ] @@ -772,14 +714,15 @@ { "comment": "scatter aggregate multiple group by (numbers)", "query": "select a, b, count(*) from user group by 2, 1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by 2, 1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "1, 0", + "Aggregates": "sum_count_star(2) AS count(*)", + "GroupBy": "(1|3), (0|4)", + "ResultColumns": 3, "Inputs": [ { "OperatorType": "Route", @@ -788,55 +731,30 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by 2, 1, weight_string(b), weight_string(a)", + "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)", "OrderBy": "(1|3) ASC, (0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by 2, 1, weight_string(b), weight_string(a) order by b asc, a asc", - "ResultColumns": 3, + "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc", "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate multiple group by columns inverse order", + "query": "select a, b, count(*) from user group by b, a", + "plan": { "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by 2, 1", + "Original": "select a, b, count(*) from user group by b, a", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(2) AS count(*)", - "GroupBy": "(0|3), (1|4)", + "GroupBy": "(1|3), (0|4)", "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)", - "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "scatter aggregate multiple group by columns inverse order", - "query": "select a, b, count(*) from user group by b, a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by b, a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "1, 0", "Inputs": [ { "OperatorType": "Route", @@ -848,32 +766,6 @@ "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by b, a, weight_string(b), weight_string(a)", "OrderBy": "(1|3) ASC, (0|4) ASC", "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by b, a, weight_string(b), weight_string(a) order by b asc, a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by b, a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(2) AS count(*)", - "GroupBy": "(0|3), (1|4)", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b)", - "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a), b, weight_string(b) order by a asc, b asc", "Table": "`user`" } ] @@ -886,30 +778,7 @@ { "comment": "scatter aggregate group by column number", "query": "select col from user group by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user group by 1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1 group by 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` group by 1 order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user group by 1", "Instructions": { @@ -939,35 +808,12 @@ { "comment": "scatter aggregate group by invalid column number", "query": "select col from user group by 2", - "v3-plan": "VT03014: unknown column '2' in 'group statement'", - "gen4-plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group statement'" }, { "comment": "scatter aggregate order by null", "query": "select count(*) from user order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) from user order by null", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user order by null", "Instructions": { @@ -996,38 +842,13 @@ { "comment": "scatter aggregate with numbered order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)", - "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by 1 asc, 2 asc, 3 asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by 1, 2, 3", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)", + "Aggregates": "any_value(3) AS d, sum_count_star(4) AS count(*)", "GroupBy": "(0|5), (1|6), (2|7)", "ResultColumns": 5, "Inputs": [ @@ -1038,9 +859,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)", + "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, b, c, weight_string(a), weight_string(b), weight_string(c)", "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, b, c, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc", "Table": "`user`" } ] @@ -1053,38 +874,13 @@ { "comment": "scatter aggregate with named order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c)", - "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by 1, 2, 3, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3 order by a, b, c", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(3) AS d, sum_count_star(4) AS count(*)", + "Aggregates": "any_value(3) AS d, sum_count_star(4) AS count(*)", "GroupBy": "(0|5), (1|6), (2|7)", "ResultColumns": 5, "Inputs": [ @@ -1095,9 +891,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)", + "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, b, c, weight_string(a), weight_string(b), weight_string(c)", "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a asc, b asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, b, c, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc", "Table": "`user`" } ] @@ -1110,39 +906,14 @@ { "comment": "scatter aggregate with jumbled order by columns", "query": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "0, 1, 2, 3", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", - "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 1, 2, 3, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 1, 2, 3, 4 order by d, b, a, c", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(4) AS count(*)", - "GroupBy": "(3|8), (1|6), (0|5), (2|7)", + "GroupBy": "(3|5), (1|6), (0|7), (2|8)", "ResultColumns": 5, "Inputs": [ { @@ -1152,9 +923,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)", - "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", + "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", "Table": "`user`" } ] @@ -1167,39 +938,14 @@ { "comment": "scatter aggregate with jumbled group by and order by columns", "query": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(4) AS count", - "GroupBy": "2, 1, 0, 3", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", - "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by 3, 2, 1, 4, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, d, count(*) from user group by 3, 2, 1, 4 order by d, b, a, c", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(4) AS count(*)", - "GroupBy": "(3|8), (1|6), (0|5), (2|7)", + "GroupBy": "(3|5), (1|6), (0|7), (2|8)", "ResultColumns": 5, "Inputs": [ { @@ -1209,9 +955,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d)", - "OrderBy": "(3|8) ASC, (1|6) ASC, (0|5) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c), weight_string(d) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c), d, weight_string(d) order by d asc, b asc, a asc, c asc", + "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", + "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", "Table": "`user`" } ] @@ -1224,39 +970,14 @@ { "comment": "scatter aggregate with some descending order by cols", "query": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(3) AS count", - "GroupBy": "2, 1, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b)", - "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC", - "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by 3, 2, 1, weight_string(a), weight_string(c), weight_string(b) order by 1 desc, 3 desc, b asc", - "ResultColumns": 4, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, c, count(*) from user group by 3, 2, 1 order by 1 desc, 3 desc, b", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(3) AS count(*)", - "GroupBy": "(0|4), (2|6), (1|5)", + "GroupBy": "(0|4), (2|5), (1|6)", "ResultColumns": 4, "Inputs": [ { @@ -1266,9 +987,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, weight_string(a), b, weight_string(b), c, weight_string(c)", - "OrderBy": "(0|4) DESC, (2|6) DESC, (1|5) ASC", - "Query": "select a, b, c, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, weight_string(a), b, weight_string(b), c, weight_string(c) order by a desc, c desc, b asc", + "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by a, c, b, weight_string(a), weight_string(c), weight_string(b)", + "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC", + "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by a, c, b, weight_string(a), weight_string(c), weight_string(b) order by a desc, c desc, b asc", "Table": "`user`" } ] @@ -1281,43 +1002,12 @@ { "comment": "invalid order by column numner for scatter", "query": "select col, count(*) from user group by col order by 5 limit 10", - "v3-plan": "VT03014: unknown column '5' in 'order clause'", - "gen4-plan": "Unknown column '5' in 'order clause'" + "plan": "Unknown column '5' in 'order clause'" }, { "comment": "aggregate with limit", "query": "select col, count(*) from user group by col limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(*) from user group by col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from user group by col limit 10", "Instructions": { @@ -1339,7 +1029,7 @@ }, "FieldQuery": "select col, count(*) from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(*) from `user` group by col order by col asc limit :__upper_limit", + "Query": "select col, count(*) from `user` group by col order by col asc", "Table": "`user`" } ] @@ -1354,26 +1044,7 @@ { "comment": "Group by with collate operator", "query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci", - "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", "Instructions": { @@ -1399,22 +1070,7 @@ { "comment": "routing rules for aggregates", "query": "select id, count(*) from route2 group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, count(*) from route2 group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id, count(*) from unsharded as route2 where 1 != 1 group by id", - "Query": "select id, count(*) from unsharded as route2 group by id", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, count(*) from route2 group by id", "Instructions": { @@ -1436,22 +1092,7 @@ { "comment": "order by on a reference table", "query": "select col from ref order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from ref order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from ref where 1 != 1", - "Query": "select col from ref order by col asc", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from ref order by col", "Instructions": { @@ -1473,61 +1114,24 @@ { "comment": "distinct and aggregate functions missing group by", "query": "select distinct a, count(*) from user", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, count(*) from user", "Instructions": { - "OperatorType": "Distinct", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS a, sum_count_star(1) AS count(*)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select a, count(*), weight_string(a) from `user` order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select distinct a, count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0, 1", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS a, sum_count_star(1) AS count(*)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*) from `user` where 1 != 1", - "Query": "select a, count(*) from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a, count(*) from `user` where 1 != 1", + "Query": "select a, count(*) from `user`", + "Table": "`user`" } ] }, @@ -1539,64 +1143,27 @@ { "comment": "distinct and aggregate functions", "query": "select distinct a, count(*) from user group by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct a, count(*) from user group by a", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0, 0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc, a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, count(*) from user group by a", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "GroupBy": "(0|2), 1", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|2) ASC", - "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a, count(*), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", + "OrderBy": "(0|2) ASC", + "Query": "select a, count(*), weight_string(a) from `user` group by a, weight_string(a) order by a asc", + "Table": "`user`" } ] }, @@ -1608,14 +1175,13 @@ { "comment": "Group by invalid column number (code is duplicated from symab).", "query": "select id from user group by 1.1", - "v3-plan": "VT13001: [BUG] column number is not an INT", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user group by 1.1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(0) AS id", + "Aggregates": "any_value(0) AS id", "GroupBy": "1", "ResultColumns": 1, "Inputs": [ @@ -1641,14 +1207,12 @@ { "comment": "Group by out of range column number (code is duplicated from symab).", "query": "select id from user group by 2", - "v3-plan": "VT03014: unknown column '2' in 'group statement'", - "gen4-plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group statement'" }, { "comment": "here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)", "query": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", "Instructions": { @@ -1664,7 +1228,7 @@ "Sharded": true }, "FieldQuery": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1", - "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a", + "Query": "select count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a", "Table": "`user`, user_extra" } ] @@ -1678,32 +1242,7 @@ { "comment": "order by inside derived tables can be ignored", "query": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "OrderBy": "(1|2) ASC", - "Query": "select `user`.col, user_extra.extra, weight_string(user_extra.extra) from `user` join user_extra on `user`.id = user_extra.user_id order by user_extra.extra asc", - "ResultColumns": 2, - "Table": "`user`, user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a", "Instructions": { @@ -1714,7 +1253,7 @@ "Sharded": true }, "FieldQuery": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1", - "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a", + "Query": "select col from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a", "Table": "`user`, user_extra" }, "TablesUsed": [ @@ -1726,8 +1265,7 @@ { "comment": "here we keep the order since the column is visible on the outside, and used by the orderedAggregate", "query": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from (select user.col, user_extra.extra from user join user_extra on user.id = user_extra.user_id order by user_extra.extra) a group by col", "Instructions": { @@ -1745,7 +1283,7 @@ }, "FieldQuery": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where 1 != 1) as a where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id order by user_extra.extra asc) as a group by col order by col asc", + "Query": "select col, count(*) from (select `user`.col, user_extra.extra from `user`, user_extra where `user`.id = user_extra.user_id) as a group by col order by col asc", "Table": "`user`, user_extra" } ] @@ -1759,37 +1297,15 @@ { "comment": "optimize group by when using distinct with no aggregation", "query": "select distinct col1, col2 from user group by col1, col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct col1, col2 from user group by col1, col2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0, 1, 0, 1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", - "OrderBy": "(0|2) ASC, (1|3) ASC, (0|2) ASC, (1|3) ASC", - "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc, col1 asc, col2 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct col1, col2 from user group by col1, col2", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|3)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:3)" + ], "ResultColumns": 2, "Inputs": [ { @@ -1799,9 +1315,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2", - "OrderBy": "(0|2) ASC, (1|3) ASC", - "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2 order by col1 asc, col2 asc", + "FieldQuery": "select col1, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1", + "Query": "select distinct col1, col2, weight_string(col1), weight_string(col2) from `user`", "Table": "`user`" } ] @@ -1814,34 +1329,7 @@ { "comment": "do not use distinct when using only aggregates and no group by", "query": "select distinct count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct count(*) from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct count(*) from user", "Instructions": { @@ -1870,8 +1358,7 @@ { "comment": "Grouping on join", "query": "select user.a from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a from user join user_extra group by user.a", "Instructions": { @@ -1881,42 +1368,33 @@ "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as a", - "[COLUMN 1]" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)", - "OrderBy": "(0|1) ASC", - "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1 group by `user`.a, weight_string(`user`.a)", + "OrderBy": "(0|1) ASC", + "Query": "select `user`.a, weight_string(`user`.a) from `user` group by `user`.a, weight_string(`user`.a) order by `user`.a asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1 group by .0", + "Query": "select 1 from user_extra group by .0", + "Table": "user_extra" } ] } @@ -1928,17 +1406,10 @@ ] } }, - { - "comment": "Cannot have more than one aggr(distinct...", - "query": "select count(distinct a), count(distinct b) from user", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: count(distinct b)", - "gen4-plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: count(distinct b)" - }, { "comment": "multiple distinct functions with grouping.", "query": "select col1, count(distinct col2), sum(distinct col2) from user group by col1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct col2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(distinct col2), sum(distinct col2) from user group by col1", "Instructions": { @@ -1955,9 +1426,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, weight_string(col1), col2, weight_string(col2)", + "FieldQuery": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` where 1 != 1 group by col1, col2, weight_string(col1), weight_string(col2)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, weight_string(col1), col2, weight_string(col2) order by col1 asc, col2 asc", + "Query": "select col1, col2, col2, weight_string(col1), weight_string(col2) from `user` group by col1, col2, weight_string(col1), weight_string(col2) order by col1 asc, col2 asc", "Table": "`user`" } ] @@ -1970,8 +1441,7 @@ { "comment": "aggregate query with order by aggregate column along with NULL", "query": "select col, count(*) k from user group by col order by null, k", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: null", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) k from user group by col order by null, k", "Instructions": { @@ -2009,31 +1479,7 @@ { "comment": "aggregate query with order by NULL", "query": "select col, count(*) k from user group by col order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(*) k from user group by col order by null", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(*) as k from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(*) as k from `user` group by col order by col asc", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) k from user group by col order by null", "Instructions": { @@ -2064,8 +1510,7 @@ { "comment": "join query on sharding key with group by a unique vindex with having clause.", "query": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id, count(*) c from user, user_extra where user.id = user_extra.user_id group by user.id having max(user.col) > 10", "Instructions": { @@ -2088,29 +1533,7 @@ { "comment": "correlated subquery on sharding key with group by a unique vindex with having clause.", "query": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where exists (select 1 from user_extra where user_id = `user`.id group by user_id having max(col) > 10 limit 1)", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user where exists (select 1 from user_extra where user_id = user.id group by user_id having max(col) > 10)", "Instructions": { @@ -2140,22 +1563,7 @@ { "comment": "aggregation filtering by having on a route", "query": "select id from user group by id having count(id) = 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user group by id having count(id) = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 group by id", - "Query": "select id from `user` group by id having count(id) = 10", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user group by id having count(id) = 10", "Instructions": { @@ -2177,40 +1585,15 @@ { "comment": "weight_string addition to group by", "query": "select lower(textcol1) as v, count(*) from user group by v", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select lower(textcol1) as v, count(*) from user group by v", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))", - "OrderBy": "(0|2) ASC", - "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select lower(textcol1) as v, count(*) from user group by v", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "ResultColumns": 2, + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", + "ResultColumns": 2, "Inputs": [ { "OperatorType": "Route", @@ -2234,32 +1617,7 @@ { "comment": "weight_string addition to group by when also there in order by", "query": "select char_length(texcol1) as a, count(*) from user group by a order by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))", - "OrderBy": "(0|2) ASC", - "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a", "Instructions": { @@ -2291,30 +1649,7 @@ { "comment": "order by inside and outside parenthesis select", "query": "(select id from user order by 1 desc) order by 1 asc limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by 1 desc) order by 1 asc limit 2", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by 1 asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by 1 desc) order by 1 asc limit 2", "Instructions": { @@ -2344,8 +1679,7 @@ { "comment": "correlated subquery in exists clause with an ordering", "query": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, id from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by id", "Instructions": { @@ -2357,6 +1691,7 @@ "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2369,6 +1704,7 @@ "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -2394,13 +1730,12 @@ { "comment": "Column and Literal equality filter on scatter aggregates", "query": "select count(*) a from user having a = 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = 10", + "Predicate": "count(*) = 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2430,13 +1765,12 @@ { "comment": "Equality filtering with column and string literal on scatter aggregates", "query": "select count(*) a from user having a = '1'", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = '1'", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = '1'", + "Predicate": "count(*) = '1'", "Inputs": [ { "OperatorType": "Aggregate", @@ -2466,13 +1800,12 @@ { "comment": "Column and Literal not equal filter on scatter aggregates", "query": "select count(*) a from user having a != 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a != 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 != 10", + "Predicate": "count(*) != 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2502,13 +1835,12 @@ { "comment": "Not equal filter with column and string literal on scatter aggregates", "query": "select count(*) a from user having a != '1'", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a != '1'", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 != '1'", + "Predicate": "count(*) != '1'", "Inputs": [ { "OperatorType": "Aggregate", @@ -2538,13 +1870,12 @@ { "comment": "Greater than filter on scatter aggregates", "query": "select count(*) a from user having a > 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a > 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 > 10", + "Predicate": "count(*) > 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2574,13 +1905,12 @@ { "comment": "Greater Equal filter on scatter aggregates", "query": "select count(*) a from user having a >= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a >= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 >= 10", + "Predicate": "count(*) >= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2610,13 +1940,12 @@ { "comment": "Less than filter on scatter aggregates", "query": "select count(*) a from user having a < 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a < 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 < 10", + "Predicate": "count(*) < 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2646,13 +1975,12 @@ { "comment": "Less Equal filter on scatter aggregates", "query": "select count(*) a from user having a <= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a <= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 <= 10", + "Predicate": "count(*) <= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2682,13 +2010,12 @@ { "comment": "Less Equal filter on scatter with grouping", "query": "select col, count(*) a from user group by col having a <= 10", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) a from user group by col having a <= 10", "Instructions": { "OperatorType": "Filter", - "Predicate": ":1 <= 10", + "Predicate": "count(*) <= 10", "Inputs": [ { "OperatorType": "Aggregate", @@ -2720,40 +2047,31 @@ { "comment": "We should be able to find grouping keys on ordered aggregates", "query": "select count(*) as a, val1 from user group by val1 having a = 1.00", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 1.00", + "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":0 = 1.00", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS a", + "GroupBy": "(1|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(0) AS a", - "GroupBy": "(1|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)", - "OrderBy": "(1|2) ASC", - "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)", + "OrderBy": "(1|2) ASC", + "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc", + "Table": "`user`" } ] } @@ -2767,32 +2085,7 @@ { "comment": "distinct on text column with collation", "query": "select col, count(distinct textcol1) from user group by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col, count(distinct textcol1) from user group by col", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct textcol1)", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, textcol1, weight_string(textcol1) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol1)", - "OrderBy": "0 ASC, (1|2) ASC", - "Query": "select col, textcol1, weight_string(textcol1) from `user` group by col, textcol1, weight_string(textcol1) order by col asc, textcol1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(distinct textcol1) from user group by col", "Instructions": { @@ -2823,82 +2116,56 @@ { "comment": "aggregation filtering by having on a route with no group by with non-unique vindex filter", "query": "select 1 from user having count(id) = 10 and name = 'a'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user having count(id) = 10 and name = 'a'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` having count(id) = 10 and `name` = 'a'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"a\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user having count(id) = 10 and name = 'a'", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(id) = 10", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 10", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS 1, sum_count(1) AS count(id)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)", + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "VARCHAR(\"a\")" + ], + "Vindex": "name_user_map", "Inputs": [ { - "OperatorType": "VindexLookup", - "Variant": "Equal", + "OperatorType": "Route", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", "Values": [ - "VARCHAR(\"a\")" + "::name" ], - "Vindex": "name_user_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", - "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", - "Table": "name_user_vdx", - "Values": [ - "::name" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(id) from `user` where 1 != 1", - "Query": "select 1, count(id) from `user` where `name` = 'a'", - "Table": "`user`" - } - ] + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(id) from `user` where 1 != 1", + "Query": "select 1, count(id) from `user` where `name` = 'a'", + "Table": "`user`" } ] } @@ -2914,8 +2181,7 @@ { "comment": "Aggregates and joins", "query": "select count(*) from user join user_extra", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user join user_extra", "Instructions": { @@ -2926,13 +2192,13 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)" + "[COLUMN 0] * [COLUMN 1] as count(*)" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:1", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -2953,8 +2219,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1", - "Query": "select 1, count(*) from user_extra group by 1", + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra group by .0", "Table": "user_extra" } ] @@ -2972,51 +2238,29 @@ { "comment": "aggregation filtering by having on a route with no group by", "query": "select 1 from user having count(id) = 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user having count(id) = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` having count(id) = 10", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user having count(id) = 10", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(id) = 10", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 10", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS 1, sum_count(1) AS count(id)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(id)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(id) from `user` where 1 != 1", - "Query": "select 1, count(id) from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(id) from `user` where 1 != 1", + "Query": "select 1, count(id) from `user`", + "Table": "`user`" } ] } @@ -3030,8 +2274,7 @@ { "comment": "Aggregate on join", "query": "select user.a, count(*) from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, count(*) from user join user_extra group by user.a", "Instructions": { @@ -3044,15 +2287,15 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as a", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)", - "[COLUMN 1]" + "[COLUMN 2] as a", + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 3] as weight_string(`user`.a)" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:0,R:1", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2", "TableName": "`user`_user_extra", "Inputs": [ { @@ -3074,8 +2317,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1", - "Query": "select 1, count(*) from user_extra group by 1", + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra group by .0", "Table": "user_extra" } ] @@ -3093,8 +2336,7 @@ { "comment": "Aggregate on other table in join", "query": "select user.a, count(user_extra.a) from user join user_extra group by user.a", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, count(user_extra.a) from user join user_extra group by user.a", "Instructions": { @@ -3107,15 +2349,15 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as a", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(user_extra.a)", - "[COLUMN 1]" + "[COLUMN 2] as a", + "[COLUMN 1] * [COLUMN 0] as count(user_extra.a)", + "[COLUMN 3] as weight_string(`user`.a)" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:0,R:1", + "JoinColumnIndexes": "R:0,L:0,L:1,L:2", "TableName": "`user`_user_extra", "Inputs": [ { @@ -3137,8 +2379,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, count(user_extra.a) from user_extra where 1 != 1 group by 1", - "Query": "select 1, count(user_extra.a) from user_extra group by 1", + "FieldQuery": "select count(user_extra.a) from user_extra where 1 != 1 group by .0", + "Query": "select count(user_extra.a) from user_extra group by .0", "Table": "user_extra" } ] @@ -3156,8 +2398,7 @@ { "comment": "aggregation spread out across three routes", "query": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(u.textcol1), count(ue.foo), us.bar from user u join user_extra ue on u.foo = ue.bar join unsharded us on ue.bar = us.baz group by us.bar", "Instructions": { @@ -3170,23 +2411,23 @@ { "OperatorType": "Projection", "Expressions": [ - "([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1)) as count(u.textcol1)", - "([COLUMN 5] * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as count(ue.foo)", - "[COLUMN 0] as bar", - "[COLUMN 1]" + "[COLUMN 0] * [COLUMN 1] as count(u.textcol1)", + "[COLUMN 3] * [COLUMN 2] as count(ue.foo)", + "[COLUMN 4] as bar", + "[COLUMN 5] as weight_string(us.bar)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OrderBy": "(4|5) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:1,R:2,R:3,L:2,R:4,R:5", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1,R:2,R:3", "JoinVars": { - "u_foo": 0 + "u_foo": 2 }, "TableName": "`user`_user_extra_unsharded", "Inputs": [ @@ -3197,40 +2438,51 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u where 1 != 1 group by u.foo, weight_string(u.foo)", - "Query": "select u.foo, count(u.textcol1), count(*), weight_string(u.foo) from `user` as u group by u.foo, weight_string(u.foo)", + "FieldQuery": "select count(u.textcol1), count(*), u.foo from `user` as u where 1 != 1 group by u.foo", + "Query": "select count(u.textcol1), count(*), u.foo from `user` as u group by u.foo", "Table": "`user`" }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0", - "JoinVars": { - "ue_bar": 0 - }, - "TableName": "user_extra_unsharded", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] * [COLUMN 1] as count(ue.foo)", + "[COLUMN 3] as bar", + "[COLUMN 4] as weight_string(us.bar)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where 1 != 1 group by ue.bar, weight_string(ue.bar)", - "Query": "select ue.bar, count(*), count(ue.foo), weight_string(ue.bar) from user_extra as ue where ue.bar = :u_foo group by ue.bar, weight_string(ue.bar)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,R:2", + "JoinVars": { + "ue_bar": 2 }, - "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)", - "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)", - "Table": "unsharded" + "TableName": "user_extra_unsharded", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), count(ue.foo), ue.bar from user_extra as ue where 1 != 1 group by ue.bar", + "Query": "select count(*), count(ue.foo), ue.bar from user_extra as ue where ue.bar = :u_foo group by ue.bar", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where 1 != 1 group by us.bar, weight_string(us.bar)", + "Query": "select count(*), us.bar, weight_string(us.bar) from unsharded as us where us.baz = :ue_bar group by us.bar, weight_string(us.bar)", + "Table": "unsharded" + } + ] } ] } @@ -3252,14 +2504,15 @@ { "comment": "using two distinct columns - min with distinct vindex, sum with distinct without vindex", "query": "select col1, min(distinct id), sum(distinct col3) from user group by col1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1), sum_distinct_sum(2) AS sum(distinct col3)", - "GroupBy": "0", + "Aggregates": "min(1|4) AS min(distinct id), sum_distinct(2|5) AS sum(distinct col3)", + "GroupBy": "(0|3)", + "ResultColumns": 3, "Inputs": [ { "OperatorType": "Route", @@ -3268,35 +2521,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(col3)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(col3) order by col1 asc, col3 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col1, min(distinct id), sum(distinct col3) from user group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1) AS min(distinct id), sum_distinct(2|4) AS sum(distinct col3)", - "GroupBy": "(0|3)", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` where 1 != 1 group by col1, weight_string(col1), col3, weight_string(col3)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select col1, min(distinct id), col3, weight_string(col1), weight_string(col3) from `user` group by col1, weight_string(col1), col3, weight_string(col3) order by col1 asc, col3 asc", + "FieldQuery": "select col1, min(id) as `min(distinct id)`, col3, weight_string(col1), weight_string(id), weight_string(col3) from `user` where 1 != 1 group by col1, col3, weight_string(col1), weight_string(id), weight_string(col3)", + "OrderBy": "(0|3) ASC, (2|5) ASC", + "Query": "select col1, min(id) as `min(distinct id)`, col3, weight_string(col1), weight_string(id), weight_string(col3) from `user` group by col1, col3, weight_string(col1), weight_string(id), weight_string(col3) order by col1 asc, col3 asc", "Table": "`user`" } ] @@ -3309,8 +2536,7 @@ { "comment": "aggregation on top of semijoin", "query": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) from user where exists (select 0 from user_extra where user.apa = user_extra.bar)", "Instructions": { @@ -3332,6 +2558,7 @@ "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3343,6 +2570,7 @@ "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3368,32 +2596,7 @@ { "comment": "we have to track the order of distinct aggregation expressions", "query": "select val2, count(distinct val1), count(*) from user group by val2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select val2, count(distinct val1), count(*) from user group by val2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "count_distinct_count(1) AS count(distinct val1), sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)", - "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select val2, count(distinct val1), count(*) from user group by val2", "Instructions": { @@ -3410,9 +2613,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, weight_string(val2), val1, weight_string(val1)", + "FieldQuery": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` where 1 != 1 group by val2, val1, weight_string(val2), weight_string(val1)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, weight_string(val2), val1, weight_string(val1) order by val2 asc, val1 asc", + "Query": "select val2, val1, count(*), weight_string(val2), weight_string(val1) from `user` group by val2, val1, weight_string(val2), weight_string(val1) order by val2 asc, val1 asc", "Table": "`user`" } ] @@ -3425,32 +2628,7 @@ { "comment": "group by column alias", "query": "select ascii(val1) as a, count(*) from user group by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ascii(val1) as a, count(*) from user group by a", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))", - "OrderBy": "(0|2) ASC", - "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ascii(val1) as a, count(*) from user group by a", "Instructions": { @@ -3482,8 +2660,7 @@ { "comment": "multiple distinct aggregations on the same column is allowed", "query": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tcol1, count(distinct tcol2), sum(distinct tcol2) from user group by tcol1", "Instructions": { @@ -3500,9 +2677,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2)", + "FieldQuery": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2)", "OrderBy": "(0|3) ASC, (1|4) ASC", - "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, weight_string(tcol1), tcol2, weight_string(tcol2) order by tcol1 asc, tcol2 asc", + "Query": "select tcol1, tcol2, tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2) order by tcol1 asc, tcol2 asc", "Table": "`user`" } ] @@ -3515,15 +2692,14 @@ { "comment": "multiple distinct aggregations on the same column in different positions", "query": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1", - "v3-plan": "VT12001: unsupported: only one DISTINCT aggregation allowed in a SELECT: sum(distinct tcol2)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct tcol2), tcol1, count(*), sum(distinct tcol2) from user group by tcol1", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "count_distinct(0|4) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|4) AS sum(distinct tcol2)", - "GroupBy": "(1|5)", + "Aggregates": "count_distinct(0|5) AS count(distinct tcol2), sum_count_star(2) AS count(*), sum_distinct(3|5) AS sum(distinct tcol2)", + "GroupBy": "(1|4)", "ResultColumns": 4, "Inputs": [ { @@ -3533,9 +2709,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` where 1 != 1 group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1)", - "OrderBy": "(1|5) ASC, (0|4) ASC", - "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol2), weight_string(tcol1) from `user` group by tcol2, weight_string(tcol2), tcol1, weight_string(tcol1) order by tcol1 asc, tcol2 asc", + "FieldQuery": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol1), weight_string(tcol2) from `user` where 1 != 1 group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2)", + "OrderBy": "(1|4) ASC, (0|5) ASC", + "Query": "select tcol2, tcol1, count(*), tcol2, weight_string(tcol1), weight_string(tcol2) from `user` group by tcol1, tcol2, weight_string(tcol1), weight_string(tcol2) order by tcol1 asc, tcol2 asc", "Table": "`user`" } ] @@ -3548,8 +2724,7 @@ { "comment": "distinct aggregation will 3 table join query", "query": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.textcol1, count(distinct u.val2) from user u join user u2 on u.val2 = u2.id join music m on u2.val2 = m.id group by u.textcol1", "Instructions": { @@ -3560,119 +2735,22 @@ "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as textcol1", - "[COLUMN 1] as val2", - "[COLUMN 2]" - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:2,L:3,L:5", - "JoinVars": { - "u2_val2": 0 - }, - "TableName": "`user`_`user`_music", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:0,L:2,L:0,R:1,L:1", - "JoinVars": { - "u_val2": 0 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u where 1 != 1 group by u.val2, weight_string(u.val2), u.textcol1", - "OrderBy": "2 ASC COLLATE latin1_swedish_ci, (0|1) ASC", - "Query": "select u.val2, weight_string(u.val2), u.textcol1 from `user` as u group by u.val2, weight_string(u.val2), u.textcol1 order by u.textcol1 asc, u.val2 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u2.val2, weight_string(u2.val2) from `user` as u2 where 1 != 1 group by u2.val2, weight_string(u2.val2)", - "Query": "select u2.val2, weight_string(u2.val2) from `user` as u2 where u2.id = :u_val2 group by u2.val2, weight_string(u2.val2)", - "Table": "`user`", - "Values": [ - ":u_val2" - ], - "Vindex": "user_index" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.id = :u2_val2", - "Table": "music", - "Values": [ - ":u2_val2" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - ] - }, - "TablesUsed": [ - "user.music", - "user.user" - ] - } - }, - { - "comment": "interleaving grouping, aggregation and join", - "query": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1) AS min(user_extra.foo), max(3) AS max(user_extra.bar)", - "GroupBy": "0, (2|4)", - "ResultColumns": 4, - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as col", - "[COLUMN 3] as min(user_extra.foo)", - "[COLUMN 1] as bar", - "[COLUMN 4] as max(user_extra.bar)", - "[COLUMN 2]" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2", + "JoinVars": { + "u2_val2": 3 + }, + "TableName": "`user`_`user`_music", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,R:1,R:2", + "JoinColumnIndexes": "L:0,L:1,L:2,R:0", "JoinVars": { - "user_col": 0 + "u_val2": 1 }, - "TableName": "`user`_user_extra", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -3681,53 +2759,57 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)", - "OrderBy": "0 ASC, (1|2) ASC", - "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc", + "FieldQuery": "select u.textcol1, u.val2, weight_string(u.val2) from `user` as u where 1 != 1", + "OrderBy": "0 ASC COLLATE latin1_swedish_ci, (1|2) ASC", + "Query": "select u.textcol1, u.val2, weight_string(u.val2) from `user` as u order by u.textcol1 asc, u.val2 asc", "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where 1 != 1 group by 1", - "Query": "select 1, min(user_extra.foo), max(user_extra.bar) from user_extra where user_extra.bar = :user_col group by 1", - "Table": "user_extra" + "FieldQuery": "select u2.val2 from `user` as u2 where 1 != 1", + "Query": "select u2.val2 from `user` as u2 where u2.id = :u_val2", + "Table": "`user`", + "Values": [ + ":u_val2" + ], + "Vindex": "user_index" } ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.id = :u2_val2", + "Table": "music", + "Values": [ + ":u2_val2" + ], + "Vindex": "music_user_map" } ] } ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.music", + "user.user" ] } }, { "comment": "group_concat on single shards", "query": "select group_concat(user_id order by name), id from user group by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select group_concat(user_id order by name), id from user group by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select group_concat(user_id order by `name` asc), id from `user` where 1 != 1 group by id", - "Query": "select group_concat(user_id order by `name` asc), id from `user` group by id", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select group_concat(user_id order by name), id from user group by id", "Instructions": { @@ -3749,22 +2831,7 @@ { "comment": "select count(distinct user_id, name) from unsharded", "query": "select count(distinct user_id, name) from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(distinct user_id, name) from unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select count(distinct user_id, `name`) from unsharded where 1 != 1", - "Query": "select count(distinct user_id, `name`) from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(distinct user_id, name) from unsharded", "Instructions": { @@ -3786,14 +2853,12 @@ { "comment": "select count(distinct user_id, name) from user", "query": "select count(distinct user_id, name) from user", - "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(distinct user_id, `name`)", - "gen4-plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'" + "plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'" }, { "comment": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", "query": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", "Instructions": { @@ -3804,13 +2869,13 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as sum(col)" + "[COLUMN 0] * [COLUMN 1] as sum(col)" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,R:1", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -3820,8 +2885,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col as col, 32, sum(col) from `user` where 1 != 1", - "Query": "select `user`.col as col, 32, sum(col) from `user`", + "FieldQuery": "select sum(col), 32 from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select sum(col), 32 from (select `user`.col as col, 32 from `user`) as t", "Table": "`user`" }, { @@ -3831,8 +2896,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1, count(*) from user_extra where 1 != 1 group by 1", - "Query": "select 1, count(*) from user_extra group by 1", + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra group by .0", "Table": "user_extra" } ] @@ -3850,40 +2915,31 @@ { "comment": "find aggregation expression and use column offset in filter", "query": "select foo, count(*) from user group by foo having count(*) = 3", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, count(*) from user group by foo having count(*) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 3", + "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|2) ASC", - "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -3897,41 +2953,31 @@ { "comment": "find aggregation expression and use column offset in filter times two", "query": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, sum(foo), sum(bar) from user group by foo having sum(foo)+sum(bar) = 42", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 2 - ], + "OperatorType": "Filter", + "Predicate": "sum(foo) + sum(bar) = 42", + "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 + :2 = 42", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)", + "GroupBy": "(0|3)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS sum(foo), sum(2) AS sum(bar)", - "GroupBy": "(0|3)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|3) ASC", - "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|3) ASC", + "Query": "select foo, sum(foo), sum(bar), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -3945,41 +2991,31 @@ { "comment": "find aggregation expression and use column offset in filter times three", "query": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 2 - ], + "OperatorType": "Filter", + "Predicate": "sum(foo) + sum(bar) = 42", + "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 + :2 = 42", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum", + "GroupBy": "(0|3)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS fooSum, sum(2) AS barSum", - "GroupBy": "(0|3)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|3) ASC", - "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|3) ASC", + "Query": "select foo, sum(foo) as fooSum, sum(bar) as barSum, weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -3993,39 +3029,31 @@ { "comment": "having should be able to add new aggregation expressions in having", "query": "select foo from user group by foo having count(*) = 3", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select foo from user group by foo having count(*) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(*) = 3", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", - "OrderBy": "(0|2) ASC", - "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(*), weight_string(foo) from `user` where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(*), weight_string(foo) from `user` group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" } ] } @@ -4039,75 +3067,67 @@ { "comment": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(u.name) = 3", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(u.`name`) = 3", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 3", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS count(u.`name`)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count(u.`name`)", - "GroupBy": "(0|2)", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as id", + "[COLUMN 1] * [COLUMN 0] as count(u.`name`)", + "[COLUMN 3] as weight_string(u.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(u.`name`)", - "[COLUMN 1]" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,R:1,R:2", + "JoinVars": { + "ue_id": 1 + }, + "TableName": "user_extra_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0", - "JoinVars": { - "ue_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)", - "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", - "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", - "Table": "`user`", - "Values": [ - ":ue_id" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select count(*), ue.id from user_extra as ue where 1 != 1 group by ue.id", + "Query": "select count(*), ue.id from user_extra as ue group by ue.id", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", + "Query": "select count(u.`name`), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" } ] } @@ -4128,22 +3148,7 @@ { "comment": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", "query": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id where 1 != 1 group by u.id", - "Query": "select u.id from `user` as u join user_extra as ue on ue.user_id = u.id group by u.id having count(u.`name`) = 3", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.user_id = u.id group by u.id having count(u.name) = 3", "Instructions": { @@ -4166,75 +3171,67 @@ { "comment": "only extract the aggregation once, even if used twice", "query": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u join user_extra ue on ue.id = u.id group by u.id having count(*) < 3 and count(*) > 5", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "count(*) < 3 and count(*) > 5", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 < 3 and :1 > 5", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*)", - "GroupBy": "(0|2)", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as id", + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 3] as weight_string(u.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as count(*)", - "[COLUMN 1]" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "ue_id": 1 + }, + "TableName": "user_extra_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0", - "JoinVars": { - "ue_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue where 1 != 1 group by ue.id, weight_string(ue.id)", - "Query": "select ue.id, count(*), weight_string(ue.id) from user_extra as ue group by ue.id, weight_string(ue.id)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", - "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", - "Table": "`user`", - "Values": [ - ":ue_id" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select count(*), ue.id from user_extra as ue where 1 != 1 group by ue.id", + "Query": "select count(*), ue.id from user_extra as ue group by ue.id", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", + "Query": "select count(*), u.id, weight_string(u.id) from `user` as u where u.id = :ue_id group by u.id, weight_string(u.id)", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" } ] } @@ -4255,8 +3252,7 @@ { "comment": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", "query": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", - "v3-plan": "VT03020: symbol ue.col not found in subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select 1 from user u having count(ue.col) > 10) from user_extra ue", "Instructions": { @@ -4267,6 +3263,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "SimpleProjection", "Columns": [ 0 @@ -4279,7 +3276,7 @@ { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS 1, sum_count(1) AS count(ue.col)", + "Aggregates": "any_value(0) AS 1, sum_count(1) AS count(ue.col)", "Inputs": [ { "OperatorType": "Route", @@ -4299,6 +3296,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -4320,58 +3318,47 @@ { "comment": "group by and ',' joins with condition", "query": "select user.col from user join user_extra on user_extra.col = user.col group by user.id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user_extra.col = user.col group by user.id", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(0) AS col", - "GroupBy": "(2|1)", + "Aggregates": "any_value(0) AS col", + "GroupBy": "(1|2)", "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 2] as col", - "[COLUMN 1]", - "[COLUMN 0] as id" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2", + "JoinVars": { + "user_col": 0 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:0", - "JoinVars": { - "user_col": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.col, `user`.id, weight_string(`user`.id)", - "OrderBy": "(1|2) ASC", - "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.col, `user`.id, weight_string(`user`.id) order by `user`.id asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1 group by 1", - "Query": "select 1 from user_extra where user_extra.col = :user_col group by 1", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.id, `user`.col, weight_string(`user`.id)", + "OrderBy": "(1|2) ASC", + "Query": "select `user`.col, `user`.id, weight_string(`user`.id) from `user` group by `user`.id, `user`.col, weight_string(`user`.id) order by `user`.id asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1 group by .0", + "Query": "select 1 from user_extra where user_extra.col = :user_col group by .0", + "Table": "user_extra" } ] } @@ -4386,15 +3373,50 @@ { "comment": "scatter aggregate symtab lookup error", "query": "select id, b as id, count(*) from user order by id", - "v3-plan": "VT03021: ambiguous symbol reference: id", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, b as id, count(*) from user order by id", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|3) ASC", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS id, any_value(1) AS id, sum_count_star(2) AS count(*), any_value(3)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, b as id, count(*), weight_string(b) from `user` where 1 != 1", + "Query": "select id, b as id, count(*), weight_string(b) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggr and non-aggr without group by (with query does not give useful result out)", + "query": "select id, count(*) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select id, count(*) from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "random(0) AS id, random(1) AS id, sum_count_star(2) AS count(*)", - "ResultColumns": 3, + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", "Inputs": [ { "OperatorType": "Route", @@ -4403,61 +3425,1961 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, b as id, count(*), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(1|3) ASC", - "Query": "select id, b as id, count(*), weight_string(b) from `user` order by id asc", + "FieldQuery": "select id, count(*) from `user` where 1 != 1", + "Query": "select id, count(*) from `user`", "Table": "`user`" } ] }, "TablesUsed": [ - "user.user" + "user.user" + ] + } + }, + { + "comment": "group by and ',' joins", + "query": "select user.id from user, user_extra group by id", + "plan": { + "QueryType": "SELECT", + "Original": "select user.id from user, user_extra group by id", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "(0|1)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, weight_string(id) from `user` where 1 != 1 group by id, weight_string(id)", + "OrderBy": "(0|1) ASC", + "Query": "select `user`.id, weight_string(id) from `user` group by id, weight_string(id) order by id asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1 group by .0", + "Query": "select 1 from user_extra group by .0", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "count on column from LIMIT", + "query": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", + "plan": { + "QueryType": "SELECT", + "Original": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count(0) AS count(city)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 2 + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(10)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select phone, id, city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select phone, id, city from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "count(*) on column from LIMIT", + "query": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 3 + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(10)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select phone, id, city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select phone, id, city, 1 from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "count non-null columns incoming from outer joins should work well", + "query": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x", + "plan": { + "QueryType": "SELECT", + "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count(0) AS count(col)", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(10)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.id = :user_id", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "grouping on data from derived table", + "query": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", + "plan": { + "QueryType": "SELECT", + "Original": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count_star(1) AS count(*)", + "GroupBy": "(0|2)", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 2, + 3 + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", + "OrderBy": "(1|3) ASC", + "Query": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where val2 < 4) as x order by val1 asc limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Can't inline derived table when it has HAVING with aggregation function", + "query": "select * from (select id from user having count(*) = 1) s", + "plan": { + "QueryType": "SELECT", + "Original": "select * from (select id from user having count(*) = 1) s", + "Instructions": { + "OperatorType": "Filter", + "Predicate": "count(*) = 1", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0) AS id, sum_count_star(1) AS count(*)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, count(*) from `user` where 1 != 1", + "Query": "select id, count(*) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Group By X Order By X", + "query": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "1 ASC", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS count(`user`.intcol)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.intcol, count(`user`.intcol) from `user` where 1 != 1 group by `user`.intcol", + "OrderBy": "0 ASC", + "Query": "select `user`.intcol, count(`user`.intcol) from `user` group by `user`.intcol order by `user`.intcol asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "AggregateAnyValue in non full group by query", + "query": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", + "plan": { + "QueryType": "SELECT", + "Original": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "any_value(1) AS name, sum_count(2) AS count(m.predef1)", + "GroupBy": "(0|3)", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 3] as id", + "[COLUMN 0] as name", + "[COLUMN 1] * [COLUMN 2] as count(m.predef1)", + "[COLUMN 4] as weight_string(u.id)" + ], + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(3|4) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,R:1,R:2,R:3", + "JoinVars": { + "m_order": 1 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(m.predef1), m.`order` from user_extra as m where 1 != 1 group by m.`order`", + "Query": "select count(m.predef1), m.`order` from user_extra as m group by m.`order`", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", + "Query": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where u.id = :m_order group by u.id, weight_string(u.id)", + "Table": "`user`", + "Values": [ + ":m_order" + ], + "Vindex": "user_index" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Aggregation on column from inner side in a left join query", + "query": "select count (u.id) from user u left join user_extra ue on u.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count (u.id) from user u left join user_extra ue on u.col = ue.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS count(u.id)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(u.id)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(u.id), u.col from `user` as u where 1 != 1 group by u.col", + "Query": "select count(u.id), u.col from `user` as u group by u.col", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra as ue where 1 != 1 group by .0", + "Query": "select count(*) from user_extra as ue where ue.col = :u_col group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Aggregation on outer side in a left join query", + "query": "select count(ue.id) from user u left join user_extra ue on u.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count(ue.id) from user u left join user_extra ue on u.col = ue.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS count(ue.id)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 1] * [COLUMN 0] as count(ue.id)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,L:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), u.col from `user` as u where 1 != 1 group by u.col", + "Query": "select count(*), u.col from `user` as u group by u.col", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(ue.id) from user_extra as ue where 1 != 1 group by .0", + "Query": "select count(ue.id) from user_extra as ue where ue.col = :u_col group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Aggregations from derived table used in arithmetic outside derived table", + "query": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A", + "plan": { + "QueryType": "SELECT", + "Original": "select A.a, A.b, (A.a / A.b) as d from (select sum(a) as a, sum(b) as b from user) A", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] as a", + "[COLUMN 1] as b", + "[COLUMN 0] / [COLUMN 1] as d" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS a, sum(1) AS b", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(a) as a, sum(b) as b from `user` where 1 != 1", + "Query": "select sum(a) as a, sum(b) as b from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "when pushing predicates into derived tables, make sure to put them in HAVING when they contain aggregations", + "query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2", + "plan": { + "QueryType": "SELECT", + "Original": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as count from user_extra where localDate > :v1 group by user_id, flowId order by null) as t1 where count >= :v2", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where 1 != 1 group by user_id, flowId) as t1 where 1 != 1", + "Query": "select t1.portalId, t1.flowId from (select portalId, flowId, count(*) as `count` from user_extra where localDate > :v1 group by user_id, flowId) as t1 where `count` >= :v2", + "Table": "user_extra" + }, + "TablesUsed": [ + "user.user_extra" + ] + } + }, + { + "comment": "aggregation, where and derived tables - we can push extremums", + "query": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT foo FROM (SELECT foo, max(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", + "Instructions": { + "OperatorType": "Filter", + "Predicate": "bazo between 100 and 200", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "max(1|3) AS bazo", + "GroupBy": "(0|2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, max(baz) as bazo, weight_string(foo), weight_string(baz) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo), weight_string(baz)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, max(baz) as bazo, weight_string(foo), weight_string(baz) from (select foo, baz from `user`) as f group by foo, weight_string(foo), weight_string(baz) order by foo asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation, where and derived tables - we can't push aggregations that might need a second layer of aggregation", + "query": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT foo FROM (SELECT foo, count(baz) as bazo FROM (SELECT foo, baz FROM user) f GROUP BY foo) tt WHERE bazo BETWEEN 100 AND 200", + "Instructions": { + "OperatorType": "Filter", + "Predicate": "bazo between 100 and 200", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count(1) AS bazo", + "GroupBy": "(0|2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user` where 1 != 1) as f where 1 != 1 group by foo, weight_string(foo)", + "OrderBy": "(0|2) ASC", + "Query": "select foo, count(baz) as bazo, weight_string(foo) from (select foo, baz from `user`) as f group by foo, weight_string(foo) order by foo asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Scatter order by is complex with aggregates in select", + "query": "select col, count(*) from user group by col order by col+1", + "plan": { + "QueryType": "SELECT", + "Original": "select col, count(*) from user group by col order by col+1", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*), any_value(2) AS col + 1, any_value(3)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col, count(*), col + 1, weight_string(col + 1) from `user` where 1 != 1 group by col", + "OrderBy": "0 ASC", + "Query": "select col, count(*), col + 1, weight_string(col + 1) from `user` group by col order by col asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate complex order by", + "query": "select id from user group by id order by id+1", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user group by id order by id+1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, id + 1, weight_string(id + 1) from `user` where 1 != 1 group by id", + "OrderBy": "(1|2) ASC", + "Query": "select id, id + 1, weight_string(id + 1) from `user` group by id order by id + 1 asc", + "ResultColumns": 1, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "select expression does not directly depend on grouping expression", + "query": "select a from user group by a+1", + "plan": { + "QueryType": "SELECT", + "Original": "select a from user group by a+1", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "any_value(0) AS a", + "GroupBy": "(1|2)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a, a + 1, weight_string(a + 1) from `user` where 1 != 1 group by a + 1, weight_string(a + 1)", + "OrderBy": "(1|2) ASC", + "Query": "select a, a + 1, weight_string(a + 1) from `user` group by a + 1, weight_string(a + 1) order by a + 1 asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "inner join with scalar aggregation", + "query": "select count(*) from user join music on user.foo = music.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user join music on user.foo = music.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "left outer join with scalar aggregation", + "query": "select count(*) from user left join music on user.foo = music.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "inner join with left grouping", + "query": "select count(*) from user left join music on user.foo = music.bar group by user.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar group by user.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "1", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as col" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "user_foo": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo", + "OrderBy": "1 ASC", + "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo order by `user`.col asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "inner join with right grouping", + "query": "select count(*) from user left join music on user.foo = music.bar group by music.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar group by music.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|2)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as col", + "[COLUMN 3] as weight_string(music.col)" + ], + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), music.col, weight_string(music.col) from music where 1 != 1 group by music.col, weight_string(music.col)", + "Query": "select count(*), music.col, weight_string(music.col) from music where music.bar = :user_foo group by music.col, weight_string(music.col)", + "Table": "music" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "left outer join with left grouping", + "query": "select count(*) from user left join music on user.foo = music.bar group by user.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar group by user.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "1", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as col" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "user_foo": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo", + "OrderBy": "1 ASC", + "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo order by `user`.col asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "left outer join with right grouping", + "query": "select count(*) from user left join music on user.foo = music.bar group by music.col", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar group by music.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|2)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as col", + "[COLUMN 3] as weight_string(music.col)" + ], + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), music.col, weight_string(music.col) from music where 1 != 1 group by music.col, weight_string(music.col)", + "Query": "select count(*), music.col, weight_string(music.col) from music where music.bar = :user_foo group by music.col, weight_string(music.col)", + "Table": "music" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "3 table inner join with scalar aggregation", + "query": "select count(*) from user join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_extra_baz": 1 + }, + "TableName": "user_extra_`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), user_extra.baz from user_extra where 1 != 1 group by user_extra.baz", + "Query": "select count(*), user_extra.baz from user_extra group by user_extra.baz", + "Table": "user_extra" + }, + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` where `user`.foo = :user_extra_baz group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "3 table with mixed join with scalar aggregation", + "query": "select count(*) from user left join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join music on user.foo = music.bar join user_extra on user.foo = user_extra.baz", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music_user_extra", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as foo" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "user_foo": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.foo from `user` where 1 != 1 group by `user`.foo", + "Query": "select count(*), `user`.foo from `user` group by `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music where music.bar = :user_foo group by .0", + "Table": "music" + } + ] + } + ] + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra where 1 != 1 group by .0", + "Query": "select count(*) from user_extra where user_extra.baz = :user_foo group by .0", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "ordering have less column than grouping columns, grouping gets rearranged as order by and missing columns gets added to ordering", + "query": "select u.col, u.intcol, count(*) from user u join music group by 1,2 order by 2", + "plan": { + "QueryType": "SELECT", + "Original": "select u.col, u.intcol, count(*) from user u join music group by 1,2 order by 2", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(2) AS count(*)", + "GroupBy": "1, 0", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 3] as col", + "[COLUMN 2] as intcol", + "[COLUMN 0] * [COLUMN 1] as count(*)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2", + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), u.intcol, u.col from `user` as u where 1 != 1 group by u.intcol, u.col", + "OrderBy": "1 ASC, 2 ASC", + "Query": "select count(*), u.intcol, u.col from `user` as u group by u.intcol, u.col order by u.intcol asc, u.col asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from music where 1 != 1 group by .0", + "Query": "select count(*) from music group by .0", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "redundant group by columns are not added", + "query": "select col, val, id from user group by col, val, id, id, val, col", + "plan": { + "QueryType": "SELECT", + "Original": "select col, val, id from user group by col, val, id, id, val, col", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col, val, id from `user` where 1 != 1 group by col, val, id", + "Query": "select col, val, id from `user` group by col, val, id", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate with ambiguous aliases", + "query": "select distinct a, b as a from user", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct a, b as a from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a, b as a, weight_string(b) from `user` where 1 != 1", + "Query": "select distinct a, b as a, weight_string(b) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scatter aggregate with complex select list (can't build order by)", + "query": "select distinct a+1 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct a+1 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a + 1, weight_string(a + 1) from `user` where 1 != 1", + "Query": "select distinct a + 1, weight_string(a + 1) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "distinct on top of aggregation", + "query": "select distinct count(*) from user group by col", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct count(*) from user group by col", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), col from `user` where 1 != 1 group by col", + "OrderBy": "1 ASC", + "Query": "select count(*), col from `user` group by col order by col asc", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "scalar aggregates with min, max, sum distinct and count distinct using collations", + "query": "select min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "min(0 COLLATE latin1_swedish_ci) AS min(textcol1), max(1 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(2 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(3 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(textcol1), max(textcol2), textcol1, textcol1 from `user` where 1 != 1 group by textcol1", + "OrderBy": "2 ASC COLLATE latin1_swedish_ci", + "Query": "select min(textcol1), max(textcol2), textcol1, textcol1 from `user` group by textcol1 order by textcol1 asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "grouping aggregates with mi, max, sum distinct and count distinct using collations", + "query": "select col, min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user group by col", + "plan": { + "QueryType": "SELECT", + "Original": "select col, min(textcol1), max(textcol2), sum(distinct textcol1), count(distinct textcol1) from user group by col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1 COLLATE latin1_swedish_ci) AS min(textcol1), max(2 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(3 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(4 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col, min(textcol1), max(textcol2), textcol1, textcol1 from `user` where 1 != 1 group by col, textcol1", + "OrderBy": "0 ASC, 3 ASC COLLATE latin1_swedish_ci", + "Query": "select col, min(textcol1), max(textcol2), textcol1, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "using a grouping column multiple times should be OK", + "query": "select col, col, count(*) from user group by col", + "plan": { + "QueryType": "SELECT", + "Original": "select col, col, count(*) from user group by col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(2) AS count(*)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col, col, count(*) from `user` where 1 != 1 group by col", + "OrderBy": "0 ASC", + "Query": "select col, col, count(*) from `user` group by col order by col asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "multiple count star and a count with 3 table join", + "query": "select count(*), count(*), count(u.col) from user u, user u2, user_extra ue", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*), count(*), count(u.col) from user u, user u2, user_extra ue", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*), sum_count_star(1) AS count(*), sum_count(2) AS count(u.col)", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 0] * [COLUMN 2] as count(u.col)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1", + "TableName": "user_extra_`user`_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from user_extra as ue where 1 != 1", + "Query": "select count(*) from user_extra as ue", + "Table": "user_extra" + }, + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] * [COLUMN 1] as count(u.col)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "TableName": "`user`_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), count(u.col) from `user` as u where 1 != 1 group by .0", + "Query": "select count(*), count(u.col) from `user` as u group by .0", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from `user` as u2 where 1 != 1 group by .0", + "Query": "select count(*) from `user` as u2 group by .0", + "Table": "`user`" + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "interleaving grouping, aggregation and join with min, max columns", + "query": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select user.col, min(user_extra.foo), user.bar, max(user_extra.bar) from user join user_extra on user.col = user_extra.bar group by user.col, user.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1|5) AS min(user_extra.foo), max(3|6) AS max(user_extra.bar)", + "GroupBy": "0, (2|4)", + "ResultColumns": 4, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,R:2,R:3", + "JoinVars": { + "user_col": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1 group by `user`.col, `user`.bar, weight_string(`user`.bar)", + "OrderBy": "0 ASC, (1|2) ASC", + "Query": "select `user`.col, `user`.bar, weight_string(`user`.bar) from `user` group by `user`.col, `user`.bar, weight_string(`user`.bar) order by `user`.col asc, `user`.bar asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(user_extra.foo), max(user_extra.bar), weight_string(user_extra.foo), weight_string(user_extra.bar) from user_extra where 1 != 1 group by .0, weight_string(user_extra.foo), weight_string(user_extra.bar)", + "Query": "select min(user_extra.foo), max(user_extra.bar), weight_string(user_extra.foo), weight_string(user_extra.bar) from user_extra where user_extra.bar = :user_col group by .0, weight_string(user_extra.foo), weight_string(user_extra.bar)", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "extremum on input from both sides", + "query": "select max(u.foo*ue.bar) from user u join user_extra ue", + "plan": { + "QueryType": "SELECT", + "Original": "select max(u.foo*ue.bar) from user u join user_extra ue", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max(u.foo * ue.bar)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "u_foo": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.foo from `user` as u where 1 != 1", + "Query": "select u.foo from `user` as u", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :u_foo * ue.bar, weight_string(:u_foo * ue.bar) from user_extra as ue where 1 != 1", + "Query": "select :u_foo * ue.bar, weight_string(:u_foo * ue.bar) from user_extra as ue", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "aggregate on input from both sides - TODO optimize more", + "query": "select sum(user.foo+user_extra.bar) from user, user_extra", + "plan": { + "QueryType": "SELECT", + "Original": "select sum(user.foo+user_extra.bar) from user, user_extra", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(`user`.foo + user_extra.bar)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_foo": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.foo from `user` where 1 != 1", + "Query": "select `user`.foo from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :user_foo + user_extra.bar from user_extra where 1 != 1", + "Query": "select :user_foo + user_extra.bar from user_extra", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" ] } }, { - "comment": "aggr and non-aggr without group by (with query does not give useful result out)", - "query": "select id, count(*) from user", - "v3-plan": { + "comment": "grouping column could be coming from multiple sides", + "query": "select count(*) from user, user_extra group by user.id+user_extra.id", + "plan": { "QueryType": "SELECT", - "Original": "select id, count(*) from user", + "Original": "select count(*) from user, user_extra group by user.id+user_extra.id", "Instructions": { "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(1) AS count", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|2)", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", - "Table": "`user`" + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as `user`.id + user_extra.id", + "[COLUMN 3] as weight_string(`user`.id + user_extra.id)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "user_id": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.id from `user` where 1 != 1 group by `user`.id", + "Query": "select count(*), `user`.id from `user` group by `user`.id", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), :user_id + user_extra.id, weight_string(:user_id + user_extra.id) from user_extra where 1 != 1 group by :user_id + user_extra.id", + "Query": "select count(*), :user_id + user_extra.id, weight_string(:user_id + user_extra.id) from user_extra group by :user_id + user_extra.id", + "Table": "user_extra" + } + ] + } + ] + } + ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Complex aggregate expression on scatter", + "query": "select 1+count(*) from user", + "plan": { "QueryType": "SELECT", - "Original": "select id, count(*) from user", + "Original": "select 1+count(*) from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] + [COLUMN 1] as 1 + count(*)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", - "Table": "`user`" + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0), sum_count_star(1) AS count(*)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, count(*) from `user` where 1 != 1", + "Query": "select 1, count(*) from `user`", + "Table": "`user`" + } + ] } ] }, @@ -4467,53 +5389,61 @@ } }, { - "comment": "group by and ',' joins", - "query": "select user.id from user, user_extra group by id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "comment": "combine the output of two aggregations in the final result", + "query": "select greatest(sum(user.foo), sum(user_extra.bar)) from user join user_extra on user.col = user_extra.col", + "plan": { "QueryType": "SELECT", - "Original": "select user.id from user, user_extra group by id", + "Original": "select greatest(sum(user.foo), sum(user_extra.bar)) from user join user_extra on user.col = user_extra.col", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|1)", - "ResultColumns": 1, + "OperatorType": "Projection", + "Expressions": [ + "GREATEST([COLUMN 0], [COLUMN 1]) as greatest(sum(`user`.foo), sum(user_extra.bar))" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 1]" - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(`user`.foo), sum(1) AS sum(user_extra.bar)", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as sum(`user`.foo)", + "[COLUMN 3] * [COLUMN 2] as sum(user_extra.bar)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 group by id, weight_string(id)", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` group by id, weight_string(id) order by id asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "JoinVars": { + "user_col": 2 }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(`user`.foo), count(*), `user`.col from `user` where 1 != 1 group by `user`.col", + "Query": "select sum(`user`.foo), count(*), `user`.col from `user` group by `user`.col", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), sum(user_extra.bar) from user_extra where 1 != 1 group by .0", + "Query": "select count(*), sum(user_extra.bar) from user_extra where user_extra.col = :user_col group by .0", + "Table": "user_extra" + } + ] } ] } @@ -4528,70 +5458,81 @@ } }, { - "comment": "count on column from LIMIT", - "query": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "comment": "Aggregate detection (group_concat)", + "query": "select group_concat(user.a) from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select count(city) from (select phone, id, city from user where id > 12 limit 10) as x", + "Original": "select group_concat(user.a) from user join user_extra", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "count(0) AS count(city)", + "Aggregates": "group_concat(0) AS group_concat(`user`.a)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 2] as count(city)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select phone, id, city from `user` where 1 != 1", - "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.a from `user` where 1 != 1", + "Query": "select `user`.a from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] } ] }, "TablesUsed": [ - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "count(*) on column from LIMIT", - "query": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "comment": "plan a query with any_value()", + "query": "select count(*), any_value(u.name), any_value(ue.title) from user u join user_extra ue on u.bar = ue.foo ", + "plan": { "QueryType": "SELECT", - "Original": "select count(*) from (select phone, id, city from user where id > 12 limit 10) as x", + "Original": "select count(*), any_value(u.name), any_value(ue.title) from user u join user_extra ue on u.bar = ue.foo ", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "count_star(0) AS count(*)", + "Aggregates": "sum_count_star(0) AS count(*), any_value(1) AS any_value(u.`name`), any_value(2) AS any_value(ue.title)", "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as count(*)" + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as any_value(u.`name`)", + "[COLUMN 3] as any_value(ue.title)" ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1", + "JoinVars": { + "u_bar": 2 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -4600,9 +5541,20 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city from `user` where 1 != 1", - "Query": "select phone, id, city from `user` where id > 12 limit :__upper_limit", + "FieldQuery": "select count(*), any_value(u.`name`), u.bar from `user` as u where 1 != 1 group by u.bar", + "Query": "select count(*), any_value(u.`name`), u.bar from `user` as u group by u.bar", "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), any_value(ue.title) from user_extra as ue where 1 != 1 group by .0", + "Query": "select count(*), any_value(ue.title) from user_extra as ue where ue.foo = :u_bar group by .0", + "Table": "user_extra" } ] } @@ -4611,63 +5563,63 @@ ] }, "TablesUsed": [ - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "count non-null columns incoming from outer joins should work well", - "query": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x", + "comment": "group_concat with group by without in select list", + "query": "select group_concat(user.id) from user, music where user.id = music.foo group by user.bar", "plan": { "QueryType": "SELECT", - "Original": "select count(col) from (select user_extra.col as col from user left join user_extra on user.id = user_extra.id limit 10) as x", + "Original": "select group_concat(user.id) from user, music where user.id = music.foo group by user.bar", "Instructions": { "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "count(0) AS count(col)", + "Variant": "Ordered", + "Aggregates": "group_concat(0) AS group_concat(`user`.id)", + "GroupBy": "(1|2)", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as count(col)" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1,R:2", + "JoinVars": { + "music_foo": 0 + }, + "TableName": "music_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col as col from user_extra where 1 != 1", - "Query": "select user_extra.col as col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" - } - ] + "FieldQuery": "select music.foo from music where 1 != 1", + "Query": "select music.foo from music", + "Table": "music" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, `user`.bar, weight_string(`user`.bar) from `user` where 1 != 1", + "Query": "select `user`.id, `user`.bar, weight_string(`user`.bar) from `user` where `user`.id = :music_foo", + "Table": "`user`", + "Values": [ + ":music_foo" + ], + "Vindex": "user_index" } ] } @@ -4676,36 +5628,68 @@ ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.music", + "user.user" ] } }, { - "comment": "grouping on data from derived table", - "query": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "comment": "group_concat aggregation on top of route", + "query": "select intcol, group_concat(foo) from user group by intcol", + "plan": { "QueryType": "SELECT", - "Original": "select val1, count(*) from (select id, val1 from user where val2 < 4 order by val1 limit 2) as x group by val1", + "Original": "select intcol, group_concat(foo) from user group by intcol", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "count_star(1) AS count(*)", - "GroupBy": "(0|2)", + "Aggregates": "group_concat(1) AS group_concat(foo)", + "GroupBy": "0", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select intcol, group_concat(foo) from `user` where 1 != 1 group by intcol", + "OrderBy": "0 ASC", + "Query": "select intcol, group_concat(foo) from `user` group by intcol order by intcol asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ordering on top of aggregator without pushing the column down during the horizon phase", + "query": "select u.foo, group_concat(u.bar) from user u, music m where u.col = m.col group by u.foo order by u.baz", + "plan": { + "QueryType": "SELECT", + "Original": "select u.foo, group_concat(u.bar) from user u, music m where u.col = m.col group by u.foo order by u.baz", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|4) ASC", "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 1] as val1", - "[COLUMN 0] as count(*)", - "[COLUMN 2]" - ], + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "group_concat(1) AS group_concat(u.bar), any_value(2) AS baz, any_value(4)", + "GroupBy": "(0|3)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(2)", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", + "JoinVars": { + "u_col": 5 + }, + "TableName": "`user`_music", "Inputs": [ { "OperatorType": "Route", @@ -4714,10 +5698,21 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val1, weight_string(val1) from `user` where 1 != 1", - "OrderBy": "(1|2) ASC, (1|2) ASC", - "Query": "select id, val1, weight_string(val1) from `user` where val2 < 4 order by val1 asc, val1 asc limit :__upper_limit", + "FieldQuery": "select u.foo, u.bar, u.baz, weight_string(u.foo), weight_string(u.baz), u.col from `user` as u where 1 != 1", + "OrderBy": "(0|3) ASC", + "Query": "select u.foo, u.bar, u.baz, weight_string(u.foo), weight_string(u.baz), u.col from `user` as u order by u.foo asc", "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.col = :u_col", + "Table": "music" } ] } @@ -4726,86 +5721,108 @@ ] }, "TablesUsed": [ + "user.music", "user.user" ] } }, { - "comment": "Can't inline derived table when it has HAVING with aggregation function", - "query": "select * from (select id from user having count(*) = 1) s", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select id from user having count(*) = 1) s", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select id from `user` where 1 != 1) as s where 1 != 1", - "Query": "select * from (select id from `user` having count(*) = 1) as s", - "Table": "`user`" - } - }, - "gen4-plan": { + "comment": "count distinct and sum distinct on join query pushed down - unique vindex", + "query": "select u.col1, count(distinct m.user_id), sum(distinct m.user_id) from user u join music m group by u.col1", + "plan": { "QueryType": "SELECT", - "Original": "select * from (select id from user having count(*) = 1) s", + "Original": "select u.col1, count(distinct m.user_id), sum(distinct m.user_id) from user u join music m group by u.col1", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_distinct(1) AS count(distinct m.user_id), sum_sum_distinct(2) AS sum(distinct m.user_id)", + "GroupBy": "(0|3)", + "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": ":1 = 1", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "TableName": "`user`_music", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "random(0) AS id, sum_count_star(1) AS count(*)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, count(*) from `user` where 1 != 1", - "Query": "select id, count(*) from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col1, weight_string(u.col1) from `user` as u where 1 != 1 group by u.col1, weight_string(u.col1)", + "OrderBy": "(0|1) ASC", + "Query": "select u.col1, weight_string(u.col1) from `user` as u group by u.col1, weight_string(u.col1) order by u.col1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(distinct m.user_id), sum(distinct m.user_id) from music as m where 1 != 1 group by .0", + "Query": "select count(distinct m.user_id), sum(distinct m.user_id) from music as m group by .0", + "Table": "music" } ] } ] }, "TablesUsed": [ + "user.music", "user.user" ] } }, { - "comment": "Group By X Order By X", - "query": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: count(`user`.intcol)", - "gen4-plan": { + "comment": "count and sum distinct with min distinct on different expressions", + "query": "select foo, min(distinct bar), count(distinct baz), sum(distinct baz), max(distinct toto) from user group by foo", + "plan": { "QueryType": "SELECT", - "Original": "SELECT user.intcol FROM user GROUP BY user.intcol ORDER BY COUNT(user.intcol)", + "Original": "select foo, min(distinct bar), count(distinct baz), sum(distinct baz), max(distinct toto) from user group by foo", "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "1 ASC", - "ResultColumns": 1, + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1|6) AS min(distinct bar), count_distinct(2|7) AS count(distinct baz), sum_distinct(3|7) AS sum(distinct baz), max(4|8) AS max(distinct toto)", + "GroupBy": "(0|5)", + "ResultColumns": 5, "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count(`user`.intcol)", - "GroupBy": "0", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, min(bar) as `min(distinct bar)`, baz, baz, max(toto) as `max(distinct toto)`, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) from `user` where 1 != 1 group by foo, baz, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto)", + "OrderBy": "(0|5) ASC, (2|7) ASC", + "Query": "select foo, min(bar) as `min(distinct bar)`, baz, baz, max(toto) as `max(distinct toto)`, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) from `user` group by foo, baz, weight_string(foo), weight_string(bar), weight_string(baz), weight_string(toto) order by foo asc, baz asc", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation on union", + "query": "select sum(col) from (select col from user union all select col from unsharded) t", + "plan": { + "QueryType": "SELECT", + "Original": "select sum(col) from (select col from user union all select col from unsharded) t", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(col)", + "Inputs": [ + { + "OperatorType": "Concatenate", "Inputs": [ { "OperatorType": "Route", @@ -4814,84 +5831,63 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.intcol, count(`user`.intcol) from `user` where 1 != 1 group by `user`.intcol", - "OrderBy": "0 ASC", - "Query": "select `user`.intcol, count(`user`.intcol) from `user` group by `user`.intcol order by `user`.intcol asc", + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user`", "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select col from unsharded where 1 != 1", + "Query": "select col from unsharded", + "Table": "unsharded" } ] } ] }, "TablesUsed": [ + "main.unsharded", "user.user" ] } }, { - "comment": "AggregateRandom in non full group by query", - "query": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "comment": "aggregation on top of derived table with limit", + "query": "select count(val2), sum(val2) from (select id, val2 from user where val2 is null limit 2) as x", + "plan": { "QueryType": "SELECT", - "Original": "select u.id, u.name, count(m.predef1) from user.user as u join user.user_extra as m on u.id = m.order group by u.id", + "Original": "select count(val2), sum(val2) from (select id, val2 from user where val2 is null limit 2) as x", "Instructions": { "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(1) AS name, sum_count(2) AS count(m.predef1)", - "GroupBy": "(0|3)", - "ResultColumns": 3, + "Variant": "Scalar", + "Aggregates": "count(0) AS count(val2), sum(1) AS sum(val2)", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as id", - "[COLUMN 2] as name", - "[COLUMN 3] * COALESCE([COLUMN 4], INT64(1)) as count(m.predef1)", - "[COLUMN 1]" + "OperatorType": "SimpleProjection", + "Columns": [ + 1, + 1 ], "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Limit", + "Count": "INT64(2)", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:2,R:3,R:0,L:1,R:1", - "JoinVars": { - "m_order": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m where 1 != 1 group by m.`order`, weight_string(m.`order`)", - "Query": "select m.`order`, count(m.predef1), weight_string(m.`order`) from user_extra as m group by m.`order`, weight_string(m.`order`)", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where 1 != 1 group by u.id, weight_string(u.id)", - "Query": "select u.`name`, count(*), u.id, weight_string(u.id) from `user` as u where u.id = :m_order group by u.id, weight_string(u.id)", - "Table": "`user`", - "Values": [ - ":m_order" - ], - "Vindex": "user_index" - } - ] + "FieldQuery": "select id, val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", + "Query": "select id, val2 from (select id, val2 from `user` where val2 is null) as x limit :__upper_limit", + "Table": "`user`" } ] } @@ -4900,36 +5896,32 @@ ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.user" ] } }, { - "comment": "Aggregation in a left join query", - "query": "select count (u.id) from user u left join user_extra ue on u.col = ue.col", + "comment": "aggregation on top of aggregation works fine", + "query": "select distinct count(*) from user, (select distinct count(*) from user) X", "plan": { "QueryType": "SELECT", - "Original": "select count (u.id) from user u left join user_extra ue on u.col = ue.col", + "Original": "select distinct count(*) from user, (select distinct count(*) from user) X", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count(u.id)", + "Aggregates": "sum_count_star(0) AS count(*)", "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(u.id)" + "[COLUMN 0] * [COLUMN 1] as count(*)" ], "Inputs": [ { "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:1,R:1", - "JoinVars": { - "u_col": 0 - }, - "TableName": "`user`_user_extra", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -4938,20 +5930,45 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.col, count(u.id) from `user` as u where 1 != 1 group by u.col", - "Query": "select u.col, count(u.id) from `user` as u group by u.col", + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user`", "Table": "`user`" }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(*) from user_extra as ue where 1 != 1 group by 1", - "Query": "select 1, count(*) from user_extra as ue where ue.col = :u_col group by 1", - "Table": "user_extra" + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count_star(0)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": [ + 2, + 1 + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*), any_value(2)", + "GroupBy": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), .0, 1 from `user` where 1 != 1 group by .0", + "Query": "select count(*), .0, 1 from `user` group by .0", + "Table": "`user`" + } + ] + } + ] + } + ] } ] } @@ -4960,8 +5977,7 @@ ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.user" ] } } diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json index 8326922225c..eec1a0ce101 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json @@ -251,22 +251,7 @@ { "comment": "create view with subquery in unsharded keyspace", "query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a" - }, - "TablesUsed": [ - "main.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", "Instructions": { @@ -380,22 +365,7 @@ { "comment": "Create View with authoritative column", "query": "create view user.tmp_view as select * from user.authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.tmp_view as select * from user.authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view tmp_view as select * from authoritative" - }, - "TablesUsed": [ - "user.tmp_view" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.tmp_view as select * from user.authoritative", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json index 30547db61c4..d05631cbff5 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json @@ -116,22 +116,7 @@ { "comment": "create view with select * from authoritative table", "query": "create view user.view_a as select * from authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select * from authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select * from authoritative" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select * from authoritative", "Instructions": { @@ -150,22 +135,7 @@ { "comment": "create view with select * from join of authoritative tables", "query": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select * from authoritative a join authoritative b on a.user_id=b.user_id", "Instructions": { @@ -184,22 +154,7 @@ { "comment": "create view with select * from qualified authoritative table", "query": "create view user.view_a as select a.* from authoritative a", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select a.* from authoritative a", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select a.* from authoritative as a" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select a.* from authoritative a", "Instructions": { @@ -237,22 +192,7 @@ { "comment": "create view with select authoritative.* with intermixing still expands", "query": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id" - }, - "TablesUsed": [ - "user.view_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_a as select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", "Instructions": { @@ -697,11 +637,6 @@ ] } }, - { - "comment": "drop table with incompatible tables", - "query": "drop table user, unsharded_a", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "drop table with unknown table", "query": "drop table unknown", @@ -727,11 +662,6 @@ ] } }, - { - "comment": "drop view with incompatible views", - "query": "drop view user, unsharded_a", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "drop view with unknown view", "query": "drop view unknown", @@ -776,11 +706,6 @@ ] } }, - { - "comment": "Rename table with different keyspace tables", - "query": "rename table user_extra to b, main.a to b", - "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" - }, { "comment": "Rename table with change in keyspace name", "query": "rename table user_extra to main.b", diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index d41e586019b..90c1cd5e89a 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -23,7 +23,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update m1 set val = 1", "Table": "m1" }, @@ -46,7 +45,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set val = 1", "Table": "unsharded" }, @@ -69,7 +67,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = (select col from unsharded limit 1)", "Table": "unsharded" }, @@ -92,7 +89,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = (select id from unsharded union select id from unsharded)", "Table": "unsharded" }, @@ -115,7 +111,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = (select id from unsharded as a join unsharded as b on a.id = b.id)", "Table": "unsharded" }, @@ -138,7 +133,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id set col1 = 'asdf' where keepers.id is null and foo.col is not null and foo.col < 1000", "Table": "unsharded" }, @@ -150,30 +144,7 @@ { "comment": "routing rules: updated of a routed table", "query": "update route1 set a=1 where id=1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update route1 set a=1 where id=1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` as route1 set a = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update route1 set a=1 where id=1", "Instructions": { @@ -184,7 +155,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` as route1 set a = 1 where id = 1", "Table": "user", "Values": [ @@ -200,26 +170,7 @@ { "comment": "update: routing rules for subquery.", "query": "update unsharded_a set a=(select a from route2)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update unsharded_a set a=(select a from route2)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update unsharded_a set a = (select a from unsharded as route2)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded_a set a=(select a from route2)", "Instructions": { @@ -230,7 +181,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded_a set a = (select a from unsharded as route2)", "Table": "unsharded, unsharded_a" }, @@ -254,7 +204,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from unsharded", "Table": "unsharded" }, @@ -277,7 +226,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from seq", "Table": "seq" }, @@ -300,7 +248,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from unsharded_ref", "Table": "unsharded_ref" }, @@ -312,30 +259,7 @@ { "comment": "update by primary keyspace id", "query": "update user set val = 1 where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where id = 1", "Instructions": { @@ -346,7 +270,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set val = 1 where id = 1", "Table": "user", "Values": [ @@ -362,30 +285,7 @@ { "comment": "update by primary keyspace id with alias", "query": "update user as user_alias set val = 1 where user_alias.id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user as user_alias set val = 1 where user_alias.id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user as user_alias set val = 1 where user_alias.id = 1", "Instructions": { @@ -396,7 +296,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` as user_alias set val = 1 where user_alias.id = 1", "Table": "user", "Values": [ @@ -412,30 +311,7 @@ { "comment": "update by primary keyspace id with parenthesized expression", "query": "update user set val = 1 where (id = 1)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where (id = 1)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where (id = 1)", "Instructions": { @@ -446,7 +322,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set val = 1 where id = 1", "Table": "user", "Values": [ @@ -462,30 +337,7 @@ { "comment": "update by primary keyspace id with multi-part where clause with parens", "query": "update user set val = 1 where (name = 'foo' and id = 1)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where (name = 'foo' and id = 1)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where (name = 'foo' and id = 1)", "Instructions": { @@ -496,7 +348,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set val = 1 where `name` = 'foo' and id = 1", "Table": "user", "Values": [ @@ -512,36 +363,7 @@ { "comment": "update by primary keyspace id, changing one vindex column", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "email_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update", - "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", "Instructions": { @@ -553,12 +375,11 @@ }, "TargetTabletType": "PRIMARY", "ChangedVindexValues": [ - "email_user_map:3" + "email_user_map:4" ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update", + "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io' from user_metadata where user_id = 1 for update", "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1", "Table": "user_metadata", "Values": [ @@ -579,37 +400,7 @@ { "comment": "update by primary keyspace id, changing multiple vindex columns", "query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "address_user_map:4", - "email_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update", - "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", "Instructions": { @@ -621,13 +412,12 @@ }, "TargetTabletType": "PRIMARY", "ChangedVindexValues": [ - "address_user_map:4", - "email_user_map:3" + "address_user_map:5", + "email_user_map:4" ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update", + "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io', address = '155 5th street' from user_metadata where user_id = 1 for update", "Query": "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1", "Table": "user_metadata", "Values": [ @@ -643,36 +433,7 @@ { "comment": "update by primary keyspace id, changing one vindex column, using order by and limit", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "email_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update", - "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", - "Table": "user_metadata", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user_metadata" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", "Instructions": { @@ -684,12 +445,11 @@ }, "TargetTabletType": "PRIMARY", "ChangedVindexValues": [ - "email_user_map:3" + "email_user_map:4" ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, email, address, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update", + "OwnedVindexQuery": "select user_id, email, address, non_planable, email = 'juan@vitess.io' from user_metadata where user_id = 1 order by user_id asc limit 10 for update", "Query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 order by user_id asc limit 10", "Table": "user_metadata", "Values": [ @@ -705,36 +465,7 @@ { "comment": "update changes non owned vindex column", "query": "update music_extra set music_id = 1 where user_id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update music_extra set music_id = 1 where user_id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "music_user_map:1" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update", - "Query": "update music_extra set music_id = 1 where user_id = 1", - "Table": "music_extra", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update music_extra set music_id = 1 where user_id = 1", "Instructions": { @@ -750,7 +481,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select user_id, music_id = 1 from music_extra where user_id = 1 for update", "Query": "update music_extra set music_id = 1 where user_id = 1", "Table": "music_extra", @@ -767,18 +497,17 @@ { "comment": "update by primary keyspace id, stray where clause", "query": "update user set val = 1 where id = id2 and id = 1", - "v3-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set val = 1 where id = id2 and id = 1", "Instructions": { "OperatorType": "Update", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set val = 1 where id = id2 and id = 1", "Table": "user", "Values": [ @@ -789,10 +518,14 @@ "TablesUsed": [ "user.user" ] - }, - "gen4-plan": { + } + }, + { + "comment": "update by primary keyspace id, stray where clause with conversion error", + "query": "update user set val = 1 where id = 18446744073709551616 and id = 1", + "plan": { "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = id2 and id = 1", + "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", "Instructions": { "OperatorType": "Update", "Variant": "EqualUnique", @@ -801,8 +534,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where id = id2 and id = 1", + "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", "Table": "user", "Values": [ "INT64(1)" @@ -815,100 +547,23 @@ } }, { - "comment": "update by primary keyspace id, stray where clause with conversion error", - "query": "update user set val = 1 where id = 18446744073709551616 and id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", + "comment": "delete from by primary keyspace id", + "query": "delete from user where id = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user where id = 1", "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", + "OperatorType": "Delete", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "update user set val = 1 where id = 18446744073709551616 and id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set val = 1 where id = 18446744073709551616 and id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "delete from by primary keyspace id", - "query": "delete from user where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", - "Query": "delete from `user` where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { - "QueryType": "DELETE", - "Original": "delete from user where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", - "Query": "delete from `user` where id = 1", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = 1 for update", + "Query": "delete from `user` where id = 1", "Table": "user", "Values": [ "INT64(1)" @@ -934,7 +589,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete a from unsharded_a as a, unsharded_b as b where a.id = b.id and b.val = 1", "Table": "unsharded_a, unsharded_b" }, @@ -958,7 +612,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete a from unsharded_a as a join unsharded_b as b on a.id = b.id where b.val = 1", "Table": "unsharded_a, unsharded_b" }, @@ -982,7 +635,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete foo from unsharded as foo left join (select id from unsharded where col is not null order by col desc limit 10) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", "Table": "unsharded" }, @@ -994,33 +646,7 @@ { "comment": "routing rules: deleted from a routed table", "query": "delete from route1 where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from route1 where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update", - "Query": "delete from `user` as route1 where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from route1 where id = 1", "Instructions": { @@ -1033,7 +659,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as route1 where id = 1 for update", "Query": "delete from `user` as route1 where id = 1", "Table": "user", @@ -1050,26 +675,7 @@ { "comment": "delete: routing rules for subquery", "query": "delete from unsharded_a where a=(select a from route2)", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from unsharded_a where a=(select a from route2)", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from unsharded_a where a = (select a from unsharded as route2)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded_a" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from unsharded_a where a=(select a from route2)", "Instructions": { @@ -1080,7 +686,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from unsharded_a where a = (select a from unsharded as route2)", "Table": "unsharded, unsharded_a" }, @@ -1093,30 +698,7 @@ { "comment": "update by lookup", "query": "update music set val = 1 where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update music set val = 1 where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update music set val = 1 where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update music set val = 1 where id = 1", "Instructions": { @@ -1127,7 +709,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update music set val = 1 where id = 1", "Table": "music", "Values": [ @@ -1154,7 +735,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded_a as a join unsharded_b as b on a.id = b.id set a.val = 'foo' where b.val = 1", "Table": "unsharded_a, unsharded_b" }, @@ -1178,7 +758,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded_a as a, unsharded_b as b set a.val = 'foo' where a.id = b.id and b.val = 1", "Table": "unsharded_a, unsharded_b" }, @@ -1191,33 +770,7 @@ { "comment": "delete from by lookup", "query": "delete from music where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from music where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", - "Query": "delete from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from music where id = 1", "Instructions": { @@ -1230,7 +783,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", "Query": "delete from music where id = 1", "Table": "music", @@ -1247,30 +799,7 @@ { "comment": "delete from, no owned vindexes", "query": "delete from music_extra where user_id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from music_extra where user_id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from music_extra where user_id = 1", - "Table": "music_extra", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from music_extra where user_id = 1", "Instructions": { @@ -1281,7 +810,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from music_extra where user_id = 1", "Table": "music_extra", "Values": [ @@ -1308,7 +836,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into unsharded values ()", "TableName": "unsharded" }, @@ -1331,7 +858,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into unsharded values (1, 2)", "TableName": "unsharded" }, @@ -1354,7 +880,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into unsharded values (1, 2) on duplicate key update x = 3", "TableName": "unsharded" }, @@ -1377,7 +902,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into unsharded_authoritative(col1, col2) values (:__seq0, 1)", "TableName": "unsharded_authoritative" }, @@ -1401,7 +926,6 @@ }, "TargetTabletType": "PRIMARY", "InsertIgnore": true, - "MultiShardAutocommit": false, "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0) on duplicate key update user_id = values(user_id)", "TableName": "music", "VindexValues": { @@ -1429,7 +953,6 @@ }, "TargetTabletType": "PRIMARY", "InsertIgnore": true, - "MultiShardAutocommit": false, "Query": "insert into music(user_id, id) values (:_user_id_0, :_id_0), (:_user_id_1, :_id_1) on duplicate key update user_id = values(user_id)", "TableName": "music", "VindexValues": { @@ -1445,26 +968,7 @@ { "comment": "insert unsharded with select", "query": "insert into unsharded select id from unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select id from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select id from unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select id from unsharded_auto", "Instructions": { @@ -1475,8 +979,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select id from unsharded_auto for update", + "Query": "insert into unsharded select id from unsharded_auto", "TableName": "unsharded" }, "TablesUsed": [ @@ -1488,26 +991,7 @@ { "comment": "insert unsharded with select with join", "query": "insert into unsharded select id from unsharded join unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select id from unsharded join unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select id from unsharded join unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select id from unsharded join unsharded_auto", "Instructions": { @@ -1518,8 +1002,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select id from unsharded join unsharded_auto for update", + "Query": "insert into unsharded select id from unsharded join unsharded_auto", "TableName": "unsharded" }, "TablesUsed": [ @@ -1542,7 +1025,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(DECIMAL(18446744073709551616))", "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')", "TableName": "unsharded_auto" }, @@ -1565,7 +1048,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa')", "TableName": "unsharded_auto" }, @@ -1588,7 +1071,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into unsharded_auto(val, id) values ('aa', :__seq0)", "TableName": "unsharded_auto" }, @@ -1611,7 +1094,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into unsharded_auto(val, id) values (false, :__seq0)", "TableName": "unsharded_auto" }, @@ -1634,7 +1117,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), NULL)", "Query": "insert into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')", "TableName": "unsharded_auto" }, @@ -1657,7 +1140,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into unsharded values (1, 1)", "TableName": "unsharded" }, @@ -1680,7 +1162,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into `user`(id, val, `Name`, Costly) values (:_Id_0, 1, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1708,7 +1190,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1725,7 +1207,7 @@ { "comment": "insert with mimatched column list", "query": "insert into user(id) values (1, 2)", - "plan": "VT13001: [BUG] column list does not match values" + "plan": "VT03006: column count does not match value count at row 1" }, { "comment": "insert no column list for sharded authoritative table", @@ -1741,7 +1223,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, 2, 3)", "TableName": "authoritative", "VindexValues": { @@ -1767,7 +1248,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1795,7 +1276,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1823,8 +1304,8 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "InsertIgnore": true, - "MultiShardAutocommit": false, "Query": "insert ignore into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1852,8 +1333,8 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "InsertIgnore": true, - "MultiShardAutocommit": false, "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0) on duplicate key update col = 2", "TableName": "user", "VindexValues": { @@ -1881,7 +1362,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(:aa)", "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1909,7 +1390,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into `user`(nonid, id, `Name`, Costly) values (2, :_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1937,7 +1418,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into `user`(id, nonid, `Name`, Costly) values (:_Id_0, 2, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1965,7 +1446,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into `user`(nonid, id, `Name`, Costly) values (true, :_Id_0, :_Name_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -1993,7 +1474,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "insert into `user`(nonid, `name`, id, Costly) values (2, :_Name_0, :_Id_0, :_Costly_0)", "TableName": "user", "VindexValues": { @@ -2021,7 +1502,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)", "TableName": "user_extra", "VindexValues": { @@ -2047,7 +1528,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into `weird``name`(`a``b*c`, `b*c`) values (:_a_b_c_0, 2)", "TableName": "weird`name", "VindexValues": { @@ -2062,26 +1542,7 @@ { "comment": "unsharded insert from union", "query": "insert into unsharded select 1 from dual union select 1 from dual", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded select 1 from dual union select 1 from dual", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select 1 from dual union select 1 from dual for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded select 1 from dual union select 1 from dual", "Instructions": { @@ -2092,8 +1553,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded select 1 from dual union select 1 from dual for update", + "Query": "insert into unsharded select 1 from dual union select 1 from dual", "TableName": "unsharded" }, "TablesUsed": [ @@ -2116,7 +1576,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(DECIMAL(18446744073709551616))", "Query": "insert into user_extra(nonid, extra_id, user_id) values (2, :__seq0, :_user_id_0)", "TableName": "user_extra", "VindexValues": { @@ -2142,7 +1602,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into music_extra(music_id, user_id) values (:_music_id_0, :_user_id_0)", "TableName": "music_extra", "VindexValues": { @@ -2158,7 +1617,7 @@ { "comment": "insert invalid index value", "query": "insert into music_extra(music_id, user_id) values(1, id)", - "plan": "column access not supported here" + "plan": "cannot lookup column 'id' (column access not supported here)" }, { "comment": "insert invalid table", @@ -2179,7 +1638,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), INT64(2))", "Query": "insert into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "TableName": "user", "VindexValues": { @@ -2207,7 +1666,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), INT64(2))", "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "QueryTimeout": 1, "TableName": "user", @@ -2236,6 +1695,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), INT64(2))", "MultiShardAutocommit": true, "Query": "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */ into `user`(id, `Name`, Costly) values (:_Id_0, :_Name_0, :_Costly_0), (:_Id_1, :_Name_1, :_Costly_1)", "TableName": "user", @@ -2253,7 +1713,7 @@ { "comment": "insert into a vindex not allowed", "query": "insert into user_index(id) values(1)", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" + "plan": "VT09014: vindex cannot be modified" }, { "comment": "simple replace unsharded", @@ -2269,7 +1729,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "replace into unsharded values (1, 2)", "TableName": "unsharded" }, @@ -2281,26 +1740,7 @@ { "comment": "replace unsharded with select", "query": "replace into unsharded select id from unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "replace into unsharded select id from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "replace into unsharded select id from unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "replace into unsharded select id from unsharded_auto", "Instructions": { @@ -2311,8 +1751,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "replace into unsharded select id from unsharded_auto for update", + "Query": "replace into unsharded select id from unsharded_auto", "TableName": "unsharded" }, "TablesUsed": [ @@ -2335,7 +1774,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(DECIMAL(18446744073709551616))", "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')", "TableName": "unsharded_auto" }, @@ -2358,7 +1797,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1))", "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa')", "TableName": "unsharded_auto" }, @@ -2381,7 +1820,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL)", "Query": "replace into unsharded_auto(val, id) values ('aa', :__seq0)", "TableName": "unsharded_auto" }, @@ -2404,7 +1843,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), NULL)", "Query": "replace into unsharded_auto(id, val) values (:__seq0, 'aa'), (:__seq1, 'bb')", "TableName": "unsharded_auto" }, @@ -2432,7 +1871,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0)", "TableName": "multicolvin", "VindexValues": { @@ -2460,7 +1898,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into overlap_vindex(kid, column_a, column_b) values (:_kid_0, :_column_a_0, 3)", "TableName": "overlap_vindex", "VindexValues": { @@ -2487,7 +1924,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into multicolvin(column_a, column_b, column_c, kid) values (:_column_a_0, :_column_b_0, :_column_c_0, :_kid_0), (:_column_a_1, :_column_b_1, :_column_c_1, :_kid_1)", "TableName": "multicolvin", "VindexValues": { @@ -2504,12 +1940,12 @@ { "comment": "delete row in a multi column vindex table", "query": "delete from multicolvin where kid=1", - "v3-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicolvin where kid=1", "Instructions": { "OperatorType": "Delete", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true @@ -2517,7 +1953,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "kid_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update", "Query": "delete from multicolvin where kid = 1", "Table": "multicolvin", @@ -2529,69 +1964,14 @@ "TablesUsed": [ "user.multicolvin" ] - }, - "gen4-plan": { - "QueryType": "DELETE", - "Original": "delete from multicolvin where kid=1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "kid_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select kid, column_a, column_b, column_c from multicolvin where kid = 1 for update", - "Query": "delete from multicolvin where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - } - }, - { - "comment": "update columns of multi column vindex", - "query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colb_colc_map:4" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", + } + }, + { + "comment": "update columns of multi column vindex", + "query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", "Instructions": { "OperatorType": "Update", "Variant": "EqualUnique", @@ -2605,7 +1985,6 @@ ], "KsidLength": 1, "KsidVindex": "kid_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", "Query": "update multicolvin set column_b = 1, column_c = 2 where kid = 1", "Table": "multicolvin", @@ -2622,37 +2001,7 @@ { "comment": "update multiple vindexes, with multi column vindex", "query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "cola_map:4", - "colb_colc_map:5" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", "Instructions": { @@ -2669,7 +2018,6 @@ ], "KsidLength": 1, "KsidVindex": "kid_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_a = 0, column_b = 1 and column_c = 2 from multicolvin where kid = 1 for update", "Query": "update multicolvin set column_a = 0, column_b = 1, column_c = 2 where kid = 1", "Table": "multicolvin", @@ -2697,7 +2045,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1", "Table": "user_extra" }, @@ -2720,7 +2067,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1", "Table": "user_extra" }, @@ -2766,7 +2112,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ user_extra set val = 1", "QueryTimeout": 1, "Table": "user_extra" @@ -2790,7 +2135,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1 where id between 1 and 2", "Table": "user_extra" }, @@ -2813,7 +2157,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1 where user_id in (1, 2)", "Table": "user_extra", "Values": [ @@ -2840,7 +2183,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1 where `name` = 'foo'", "Table": "user_extra" }, @@ -2863,7 +2205,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1 where id in (1, 2)", "Table": "user_extra" }, @@ -2886,7 +2227,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1 where `name` = 'foo' or id = 1", "Table": "user_extra" }, @@ -2909,7 +2249,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from user_extra", "Table": "user_extra" }, @@ -2932,7 +2271,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from user_extra", "Table": "user_extra" }, @@ -2955,7 +2293,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from user_extra where user_id between 1 and 2", "Table": "user_extra" }, @@ -2978,7 +2315,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from user_extra where `name` = 'jose'", "Table": "user_extra" }, @@ -3024,7 +2360,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from user_extra where `name` = 'jose'", "QueryTimeout": 1, "Table": "user_extra" @@ -3048,7 +2383,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from user_extra where user_id in (1, 2)", "Table": "user_extra", "Values": [ @@ -3064,26 +2398,7 @@ { "comment": "unsharded update where inner query references outer query", "query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "Instructions": { - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", - "Table": "unsharded, unsharded_a, unsharded_b" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", "Instructions": { @@ -3094,7 +2409,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", "Table": "unsharded, unsharded_a, unsharded_b" }, @@ -3108,26 +2422,7 @@ { "comment": "unsharded delete where inner query references outer query", "query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", - "Table": "unsharded, unsharded_a" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", "Instructions": { @@ -3138,7 +2433,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", "Table": "unsharded, unsharded_a" }, @@ -3151,36 +2445,7 @@ { "comment": "update vindex value to null", "query": "update user set name = null where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set name = null where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update", - "Query": "update `user` set `name` = null where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set name = null where id = 1", "Instructions": { @@ -3196,7 +2461,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id = 1 for update", "Query": "update `user` set `name` = null where id = 1", "Table": "user", @@ -3224,7 +2488,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into unsharded values (:__lastInsertId, 2)", "TableName": "unsharded" }, @@ -3252,7 +2515,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update", "Query": "update `user` set `name` = null where id in (1, 2, 3)", "Table": "user", @@ -3285,7 +2547,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` for update", "Query": "update `user` set `name` = null", "Table": "user" @@ -3314,7 +2575,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id + 1 = 2 for update", "Query": "update `user` set `name` = null where id + 1 = 2", "Table": "user" @@ -3340,7 +2600,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update", "Query": "delete from `user` where id in (1, 2, 3)", "Table": "user", @@ -3370,7 +2629,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id + 1 = 2 for update", "Query": "delete from `user` where id + 1 = 2", "Table": "user" @@ -3396,7 +2654,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", "Query": "delete from `user`", "Table": "user" @@ -3409,33 +2666,7 @@ { "comment": "delete with single table targets", "query": "delete music from music where id = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete music from music where id = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", - "Query": "delete from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete music from music where id = 1", "Instructions": { @@ -3448,7 +2679,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select user_id, id from music where id = 1 for update", "Query": "delete from music where id = 1", "Table": "music", @@ -3476,7 +2706,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set val = 1", "Table": "user" }, @@ -3501,7 +2730,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", "Query": "delete from `user`", "Table": "user" @@ -3514,36 +2742,7 @@ { "comment": "update multi column vindex, without values for all the vindex columns", "query": "update multicolvin set column_c = 2 where kid = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicolvin set column_c = 2 where kid = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colb_colc_map:4" - ], - "KsidLength": 1, - "KsidVindex": "kid_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update", - "Query": "update multicolvin set column_c = 2 where kid = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "kid_index" - }, - "TablesUsed": [ - "user.multicolvin" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicolvin set column_c = 2 where kid = 1", "Instructions": { @@ -3559,7 +2758,6 @@ ], "KsidLength": 1, "KsidVindex": "kid_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select kid, column_a, column_b, column_c, column_c = 2 from multicolvin where kid = 1 for update", "Query": "update multicolvin set column_c = 2 where kid = 1", "Table": "multicolvin", @@ -3576,36 +2774,7 @@ { "comment": "update with binary value", "query": "update user set name = _binary 'abc' where id = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set name = _binary 'abc' where id = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_user_map:3" - ], - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update", - "Query": "update `user` set `name` = _binary 'abc' where id = 1", - "Table": "user", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set name = _binary 'abc' where id = 1", "Instructions": { @@ -3621,7 +2790,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = _binary 'abc' from `user` where id = 1 for update", "Query": "update `user` set `name` = _binary 'abc' where id = 1", "Table": "user", @@ -3638,29 +2806,7 @@ { "comment": "delete with binary value", "query": "delete from user where name = _binary 'abc'", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where name = _binary 'abc'", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update", - "Query": "delete from `user` where `name` = _binary 'abc'", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user where name = _binary 'abc'", "Instructions": { @@ -3673,7 +2819,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `name` = _binary 'abc' for update", "Query": "delete from `user` where `name` = _binary 'abc'", "Table": "user", @@ -3703,7 +2848,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", "Query": "delete from `user`", "Table": "user" @@ -3732,7 +2876,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'myname' from `user` for update", "Query": "update `user` set `name` = 'myname'", "Table": "user" @@ -3756,7 +2899,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update user_extra set val = 1", "Table": "user_extra" }, @@ -3781,7 +2923,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id * `user`.col = `user`.foo for update", "Query": "delete from `user` where `user`.id * `user`.col = `user`.foo", "Table": "user" @@ -3809,26 +2950,7 @@ { "comment": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", "query": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", - "v3-plan": { - "QueryType": "INSERT", - "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update", - "TableName": "user_privacy_consents" - }, - "TablesUsed": [ - "main.user_privacy_consents" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "INSERT INTO main.user_privacy_consents (user_id, accepted_at) SELECT user_id, accepted_at FROM (SELECT 1 as user_id, 1629194864 as accepted_at) AS tmp WHERE NOT EXISTS (SELECT user_id FROM main.user_privacy_consents WHERE user_id = 1)", "Instructions": { @@ -3839,8 +2961,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1) for update", + "Query": "insert into user_privacy_consents(user_id, accepted_at) select user_id, accepted_at from (select 1 as user_id, 1629194864 as accepted_at from dual) as tmp where not exists (select 1 from user_privacy_consents where user_id = 1 limit 1)", "TableName": "user_privacy_consents" }, "TablesUsed": [ @@ -3865,7 +2986,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 20 for update", "Query": "delete from t1 where c2 = 20", "Table": "t1" @@ -3894,7 +3014,6 @@ ], "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 20 for update", "Query": "update t1 set c2 = 1 where c2 = 20", "Table": "t1" @@ -3907,33 +3026,7 @@ { "comment": "Delete on backfilling and non-backfilling unique lookup vindexes should be a delete equal", "query": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "xxhash", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update", - "Query": "delete from t1 where c2 = 10 and c3 = 20", - "Table": "t1", - "Values": [ - "INT64(20)" - ], - "Vindex": "lookup_t1_2" - }, - "TablesUsed": [ - "zlookup_unique.t1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from zlookup_unique.t1 where c2 = 10 and c3 = 20", "Instructions": { @@ -3946,7 +3039,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 = 20 for update", "Query": "delete from t1 where c2 = 10 and c3 = 20", "Table": "t1", @@ -3963,36 +3055,7 @@ { "comment": "Update on backfilling and non-backfilling unique lookup vindexes should be an equal", "query": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "lookup_t1:3" - ], - "KsidLength": 1, - "KsidVindex": "xxhash", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update", - "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20", - "Table": "t1", - "Values": [ - "INT64(20)" - ], - "Vindex": "lookup_t1_2" - }, - "TablesUsed": [ - "zlookup_unique.t1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update zlookup_unique.t1 set c2 = 1 where c2 = 10 and c3 = 20", "Instructions": { @@ -4008,7 +3071,6 @@ ], "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 = 20 for update", "Query": "update t1 set c2 = 1 where c2 = 10 and c3 = 20", "Table": "t1", @@ -4038,7 +3100,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update", "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)", "Table": "t1", @@ -4071,7 +3132,6 @@ ], "KsidLength": 1, "KsidVindex": "xxhash", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update", "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)", "Table": "t1", @@ -4104,7 +3164,6 @@ ], "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'john' from `user` as u where u.col > 20 for update", "Query": "update `user` as u set u.`name` = 'john' where u.col > 20", "Table": "user" @@ -4130,7 +3189,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.col > 20 for update", "Query": "delete from `user` as u where u.col > 20", "Table": "user" @@ -4143,31 +3201,7 @@ { "comment": "update with a multicol vindex", "query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", "Instructions": { @@ -4178,7 +3212,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 1 where cola = 1 and colb = 2", "Table": "multicol_tbl", "Values": [ @@ -4195,31 +3228,7 @@ { "comment": "update with a multicol vindex - reverse order", "query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", "Instructions": { @@ -4230,7 +3239,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 1 where colb = 2 and cola = 1", "Table": "multicol_tbl", "Values": [ @@ -4258,7 +3266,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1", "Table": "multicol_tbl", "Values": [ @@ -4286,7 +3293,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)", "Table": "multicol_tbl", "Values": [ @@ -4303,34 +3309,7 @@ { "comment": "delete with a multicol vindex", "query": "delete from multicol_tbl where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update", - "Query": "delete from multicol_tbl where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where cola = 1 and colb = 2", "Instructions": { @@ -4343,7 +3322,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 and colb = 2 for update", "Query": "delete from multicol_tbl where cola = 1 and colb = 2", "Table": "multicol_tbl", @@ -4361,34 +3339,7 @@ { "comment": "delete with a multicol vindex - reverse order", "query": "delete from multicol_tbl where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update", - "Query": "delete from multicol_tbl where colb = 2 and cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where colb = 2 and cola = 1", "Instructions": { @@ -4401,7 +3352,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb = 2 and cola = 1 for update", "Query": "delete from multicol_tbl where colb = 2 and cola = 1", "Table": "multicol_tbl", @@ -4432,7 +3382,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update", "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1", "Table": "multicol_tbl", @@ -4463,7 +3412,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update", "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)", "Table": "multicol_tbl", @@ -4481,37 +3429,7 @@ { "comment": "update with multicol and an owned vindex which changes", "query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "colc_map:4" - ], - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update", - "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)", - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", "Instructions": { @@ -4527,7 +3445,6 @@ ], "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name`, colc = 1 from multicol_tbl where cola = 1 and colb = 2 for update", "Query": "update multicol_tbl set colc = 1 where cola = 1 and colb = 2", "Table": "multicol_tbl", @@ -4556,7 +3473,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 42 where `name` = 'foo'", "Table": "multicol_tbl", "Values": [ @@ -4572,30 +3488,7 @@ { "comment": "update with routing using subsharding column", "query": "update multicol_tbl set x = 42 where cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 42 where cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update multicol_tbl set x = 42 where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 42 where cola = 1", "Instructions": { @@ -4606,7 +3499,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 42 where cola = 1", "Table": "multicol_tbl", "Values": [ @@ -4622,36 +3514,7 @@ { "comment": "update with routing using subsharding column on lookup vindex", "query": "update multicol_tbl set name = 'bar' where cola = 1", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set name = 'bar' where cola = 1", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "ChangedVindexValues": [ - "name_muticoltbl_map:4" - ], - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update", - "Query": "update multicol_tbl set `name` = 'bar' where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set name = 'bar' where cola = 1", "Instructions": { @@ -4667,7 +3530,6 @@ ], "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola = 1 for update", "Query": "update multicol_tbl set `name` = 'bar' where cola = 1", "Table": "multicol_tbl", @@ -4700,7 +3562,6 @@ ], "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update", "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)", "Table": "multicol_tbl", @@ -4717,30 +3578,7 @@ { "comment": "update with routing using subsharding column with in query as lower cost over lookup vindex", "query": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update multicol_tbl set x = 1 where name = 'foo' and cola = 2", "Instructions": { @@ -4751,7 +3589,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update multicol_tbl set x = 1 where `name` = 'foo' and cola = 2", "Table": "multicol_tbl", "Values": [ @@ -4780,7 +3617,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' for update", "Query": "delete from multicol_tbl where `name` = 'foo'", "Table": "multicol_tbl", @@ -4797,12 +3633,12 @@ { "comment": "delete with routing using subsharding column", "query": "delete from multicol_tbl where cola = 1", - "v3-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where cola = 1", "Instructions": { "OperatorType": "Delete", - "Variant": "Equal", + "Variant": "SubShard", "Keyspace": { "Name": "user", "Sharded": true @@ -4810,7 +3646,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update", "Query": "delete from multicol_tbl where cola = 1", "Table": "multicol_tbl", @@ -4822,13 +3657,17 @@ "TablesUsed": [ "user.multicol_tbl" ] - }, - "gen4-plan": { + } + }, + { + "comment": "delete with routing using subsharding column with in query", + "query": "delete from multicol_tbl where cola in (1,2)", + "plan": { "QueryType": "DELETE", - "Original": "delete from multicol_tbl where cola = 1", + "Original": "delete from multicol_tbl where cola in (1,2)", "Instructions": { "OperatorType": "Delete", - "Variant": "SubShard", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true @@ -4836,37 +3675,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola = 1 for update", - "Query": "delete from multicol_tbl where cola = 1", - "Table": "multicol_tbl", - "Values": [ - "INT64(1)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - } - }, - { - "comment": "delete with routing using subsharding column with in query", - "query": "delete from multicol_tbl where cola in (1,2)", - "plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where cola in (1,2)", - "Instructions": { - "OperatorType": "Delete", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update", "Query": "delete from multicol_tbl where cola in (1, 2)", "Table": "multicol_tbl", @@ -4883,33 +3691,7 @@ { "comment": "delete with routing using subsharding column with in query as lower cost over lookup vindex", "query": "delete from multicol_tbl where name = 'foo' and cola = 2", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from multicol_tbl where name = 'foo' and cola = 2", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 2, - "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update", - "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2", - "Table": "multicol_tbl", - "Values": [ - "INT64(2)" - ], - "Vindex": "multicolIdx" - }, - "TablesUsed": [ - "user.multicol_tbl" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from multicol_tbl where name = 'foo' and cola = 2", "Instructions": { @@ -4922,7 +3704,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 2, "KsidVindex": "multicolIdx", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where `name` = 'foo' and cola = 2 for update", "Query": "delete from multicol_tbl where `name` = 'foo' and cola = 2", "Table": "multicol_tbl", @@ -4939,42 +3720,7 @@ { "comment": "insert using select with simple table.", "query": "insert into music(id, user_id) select * from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into music(id, user_id) select * from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "TableName": "music", - "VindexOffsetFromSelect": { - "music_user_map": "[0]", - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into music(id, user_id) select * from user", "Instructions": { @@ -4985,7 +3731,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "TableName": "music", "VindexOffsetFromSelect": { "music_user_map": "[0]", @@ -5000,7 +3745,7 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` for update", + "Query": "select * from `user` lock in share mode", "Table": "`user`" } ] @@ -5029,42 +3774,7 @@ { "comment": "insert using select with auto-inc column using vitess sequence, sequence column not present", "query": "insert into user_extra(user_id) select id from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id) select id from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:1", - "MultiShardAutocommit": false, - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id) select id from user", "Instructions": { @@ -5075,8 +3785,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:1", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", "TableName": "user_extra", "VindexOffsetFromSelect": { "user_index": "[0]" @@ -5090,7 +3799,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` for update", + "Query": "select id from `user` lock in share mode", "Table": "`user`" } ] @@ -5104,42 +3813,7 @@ { "comment": "insert using select with auto-inc column using vitess sequence, sequence column present", "query": "insert into user_extra(id, user_id) select null, id from user", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(id, user_id) select null, id from user", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select null, id from `user` where 1 != 1", - "Query": "select null, id from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(id, user_id) select null, id from user", "Instructions": { @@ -5150,8 +3824,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", "TableName": "user_extra", "VindexOffsetFromSelect": { "user_index": "[1]" @@ -5165,7 +3838,7 @@ "Sharded": true }, "FieldQuery": "select null, id from `user` where 1 != 1", - "Query": "select null, id from `user` for update", + "Query": "select null, id from `user` lock in share mode", "Table": "`user`" } ] @@ -5179,44 +3852,7 @@ { "comment": "sharded insert from select", "query": "insert into user(id) select 1 from dual", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user(id) select 1 from dual", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:0", - "MultiShardAutocommit": false, - "TableName": "user", - "VindexOffsetFromSelect": { - "costly_map": "[-1]", - "name_user_map": "[-1]", - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user(id) select 1 from dual", "Instructions": { @@ -5227,8 +3863,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:0", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(0)", "TableName": "user", "VindexOffsetFromSelect": { "costly_map": "[-1]", @@ -5237,15 +3872,15 @@ }, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from dual where 1 != 1", + "Query": "select 1 from dual lock in share mode", + "Table": "dual" } ] }, @@ -5258,44 +3893,7 @@ { "comment": "insert using select with sharding column is autoinc and not present in the insert column query", "query": "insert into user(pattern) SELECT 1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user(pattern) SELECT 1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:1", - "MultiShardAutocommit": false, - "TableName": "user", - "VindexOffsetFromSelect": { - "costly_map": "[-1]", - "name_user_map": "[-1]", - "user_index": "[1]" - }, - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user(pattern) SELECT 1", "Instructions": { @@ -5306,8 +3904,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:1", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", "TableName": "user", "VindexOffsetFromSelect": { "costly_map": "[-1]", @@ -5316,15 +3913,15 @@ }, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from dual where 1 != 1", + "Query": "select 1 from dual lock in share mode", + "Table": "dual" } ] }, @@ -5342,7 +3939,7 @@ { "comment": "sharded same keyspace", "query": "insert into user_extra(user_id, col) select col1, col2 from user", - "v3-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from user", "Instructions": { @@ -5353,8 +3950,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", "TableName": "user_extra", "VindexOffsetFromSelect": { "user_index": "[0]" @@ -5368,87 +3964,32 @@ "Sharded": true }, "FieldQuery": "select col1, col2 from `user` where 1 != 1", - "Query": "select col1, col2 from `user` for update", + "Query": "select col1, col2 from `user` lock in share mode", "Table": "`user`" } ] }, "TablesUsed": [ + "user.user", "user.user_extra" ] - }, - "gen4-plan": { + } + }, + { + "comment": "unsharded same keyspace", + "query": "insert into unsharded(col) select col from unsharded_auto", + "plan": { "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from user", + "Original": "insert into unsharded(col) select col from unsharded_auto", "Instructions": { "OperatorType": "Insert", - "Variant": "Select", + "Variant": "Unsharded", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from `user` where 1 != 1", - "Query": "select col1, col2 from `user` for update", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "unsharded same keyspace", - "query": "insert into unsharded(col) select col from unsharded_auto", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded(col) select col from unsharded_auto for update", - "TableName": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from unsharded_auto", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into unsharded(col) select col from unsharded_auto for update", + "Query": "insert into unsharded(col) select col from unsharded_auto", "TableName": "unsharded" }, "TablesUsed": [ @@ -5460,42 +4001,7 @@ { "comment": "sharded different keyspace", "query": "insert into user_extra(user_id, col) select col1, col2 from t1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from t1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from t1 where 1 != 1", - "Query": "select col1, col2 from t1 for update", - "Table": "t1" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from t1", "Instructions": { @@ -5506,8 +4012,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", "TableName": "user_extra", "VindexOffsetFromSelect": { "user_index": "[0]" @@ -5521,7 +4026,7 @@ "Sharded": true }, "FieldQuery": "select col1, col2 from t1 where 1 != 1", - "Query": "select col1, col2 from t1 for update", + "Query": "select col1, col2 from t1 lock in share mode", "Table": "t1" } ] @@ -5535,42 +4040,7 @@ { "comment": "sharded insert table, unsharded select table", "query": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Select", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, - "TableName": "user_extra", - "VindexOffsetFromSelect": { - "user_index": "[0]" - }, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1", - "Query": "select col1, col2 from unsharded_tab for update", - "Table": "unsharded_tab" - } - ] - }, - "TablesUsed": [ - "user.user_extra" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user_extra(user_id, col) select col1, col2 from unsharded_tab", "Instructions": { @@ -5581,8 +4051,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "AutoIncrement": "main:2", - "MultiShardAutocommit": false, + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(2)", "TableName": "user_extra", "VindexOffsetFromSelect": { "user_index": "[0]" @@ -5596,7 +4065,7 @@ "Sharded": false }, "FieldQuery": "select col1, col2 from unsharded_tab where 1 != 1", - "Query": "select col1, col2 from unsharded_tab for update", + "Query": "select col1, col2 from unsharded_tab lock in share mode", "Table": "unsharded_tab" } ] @@ -5610,38 +4079,7 @@ { "comment": "unsharded different keyspace", "query": "insert into unsharded(col) select col from unsharded_tab", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from unsharded_tab", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "TableName": "unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select col from unsharded_tab where 1 != 1", - "Query": "select col from unsharded_tab for update", - "Table": "unsharded_tab" - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded(col) select col from unsharded_tab", "Instructions": { @@ -5652,7 +4090,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "TableName": "unsharded", "Inputs": [ { @@ -5663,7 +4100,7 @@ "Sharded": false }, "FieldQuery": "select col from unsharded_tab where 1 != 1", - "Query": "select col from unsharded_tab for update", + "Query": "select col from unsharded_tab lock in share mode", "Table": "unsharded_tab" } ] @@ -5677,38 +4114,7 @@ { "comment": "unsharded insert table, sharded select table", "query": "insert into unsharded(col) select col from t1", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into unsharded(col) select col from t1", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "TableName": "unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "zlookup_unique", - "Sharded": true - }, - "FieldQuery": "select col from t1 where 1 != 1", - "Query": "select col from t1 for update", - "Table": "t1" - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into unsharded(col) select col from t1", "Instructions": { @@ -5719,7 +4125,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "TableName": "unsharded", "Inputs": [ { @@ -5730,7 +4135,7 @@ "Sharded": true }, "FieldQuery": "select col from t1 where 1 != 1", - "Query": "select col from t1 for update", + "Query": "select col from t1 lock in share mode", "Table": "t1" } ] @@ -5744,8 +4149,7 @@ { "comment": "unsharded subquery in sharded update, not the same keyspace between outer and inner", "query": "update user set col = (select id from unsharded)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select id from unsharded)", "Instructions": { @@ -5756,6 +4160,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -5767,6 +4172,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Scatter", "Keyspace": { @@ -5774,7 +4180,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set col = :__sq1", "Table": "user" } @@ -5789,8 +4194,7 @@ { "comment": "sharded subquery in unsharded update, not the same keyspace", "query": "update unsharded set col = (select id from user)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from user)", "Instructions": { @@ -5801,6 +4205,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5812,6 +4217,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -5819,7 +4225,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = :__sq1", "Table": "unsharded" } @@ -5834,8 +4239,7 @@ { "comment": "sharded join unsharded subqueries in unsharded update", "query": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update unsharded set col = (select id from unsharded join user on unsharded.id = user.id)", "Instructions": { @@ -5846,6 +4250,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "R:0", @@ -5883,6 +4288,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -5890,7 +4296,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update unsharded set col = :__sq1", "Table": "unsharded" } @@ -5905,8 +4310,7 @@ { "comment": "sharded update with sub query where the sources can be merged into a single query", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5", "Instructions": { @@ -5917,7 +4321,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = 5) where id = 5", "Table": "user", "Values": [ @@ -5934,8 +4337,7 @@ { "comment": "merge through correlated subquery", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id = 5", "Instructions": { @@ -5946,7 +4348,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id = 5", "Table": "user", "Values": [ @@ -5963,8 +4364,7 @@ { "comment": "merge through correlated subquery #2", "query": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = (select count(*) from user_extra where user_extra.user_id = user.id) where id > 5", "Instructions": { @@ -5975,7 +4375,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set col = (select count(*) from user_extra where user_extra.user_id = `user`.id) where id > 5", "Table": "user" }, @@ -5999,7 +4398,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into authoritative(user_id) values (:_user_id_0)", "TableName": "authoritative", "VindexValues": { @@ -6036,7 +4434,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", "Query": "delete from `user`", "Table": "user" @@ -6068,7 +4465,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` for update", "Query": "delete from `user`", "Table": "user" @@ -6081,29 +4477,9 @@ } }, { - "comment": "Here V3 populates the TablesUsed incorrectly\n# delete with join from multi table join subquery.", + "comment": "delete with join from multi table join subquery", "query": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", - "Table": "unsharded, unsharded, unsharded_b" - }, - "TablesUsed": [ - "main.unsharded", - "main.unsharded, unsharded_b" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete foo from unsharded as foo join (select id from unsharded a join unsharded_b b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", "Instructions": { @@ -6114,7 +4490,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete foo from unsharded as foo join (select id from unsharded as a join unsharded_b as b on a.user_id = b.user_id) as keepers on foo.id = keepers.id where keepers.id is null and foo.col is not null and foo.col < 1000", "Table": "unsharded, unsharded_b" }, @@ -6127,26 +4502,7 @@ { "comment": "update with routing using multi column vindex", "query": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Update", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user set col = 1 where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -6157,7 +4513,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update `user` set col = 1 where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", "Table": "user", "Values": [ @@ -6173,29 +4528,7 @@ { "comment": "delete with routing using multi column vindex", "query": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "DELETE", - "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "KsidLength": 1, - "KsidVindex": "user_index", - "MultiShardAutocommit": false, - "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update", - "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "user" - }, - "TablesUsed": [ - "user.user" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -6208,7 +4541,6 @@ "TargetTabletType": "PRIMARY", "KsidLength": 1, "KsidVindex": "user_index", - "MultiShardAutocommit": false, "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd')) for update", "Query": "delete from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", "Table": "user", @@ -6236,7 +4568,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into ref(col) values (1)", "TableName": "ref" }, @@ -6244,5 +4575,304 @@ "user.ref" ] } + }, + { + "comment": "update using last_insert_id with an argument", + "query": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345", + "plan": { + "QueryType": "UPDATE", + "Original": "update main.m1 set foo = last_insert_id(foo+1) where id = 12345", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update m1 set foo = last_insert_id(foo + 1) where id = 12345", + "Table": "m1" + }, + "TablesUsed": [ + "main.m1" + ] + } + }, + { + "comment": "unsharded update query with comment directive", + "query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*vt+ QUERY_TIMEOUT_MS=1 */ unsharded set val = 1", + "QueryTimeout": 1, + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "unsharded insert query with comment directive", + "query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into unsharded values ()", + "plan": { + "QueryType": "INSERT", + "Original": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into unsharded values ()", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into unsharded values ()", + "QueryTimeout": 1, + "TableName": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "insert with select using same tables, cannot stream parallel", + "query": "insert into music(id, user_id) select id, user_id from music where user_id = 1", + "plan": { + "QueryType": "INSERT", + "Original": "insert into music(id, user_id) select id, user_id from music where user_id = 1", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Select", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InputAsNonStreaming": true, + "TableName": "music", + "VindexOffsetFromSelect": { + "music_user_map": "[0]", + "user_index": "[1]" + }, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, user_id from music where 1 != 1", + "Query": "select id, user_id from music where user_id = 1 lock in share mode", + "Table": "music", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "insert + lookup vindex + auto increment on lookup column - not provided", + "query": "insert into mixed_tbl(shard_key) values (1),(4),(9)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into mixed_tbl(shard_key) values (1),(4),(9)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(NULL, NULL, NULL)", + "Query": "insert into mixed_tbl(shard_key, lkp_key) values (:_shard_key_0, :_lkp_key_0), (:_shard_key_1, :_lkp_key_1), (:_shard_key_2, :_lkp_key_2)", + "TableName": "mixed_tbl", + "VindexValues": { + "lkp_shard_map": ":__seq0, :__seq1, :__seq2", + "shard_index": "INT64(1), INT64(4), INT64(9)" + } + }, + "TablesUsed": [ + "user.mixed_tbl" + ] + } + }, + { + "comment": "insert + lookup vindex + auto increment on lookup column - partially provided", + "query": "insert into mixed_tbl(shard_key, lkp_key) values (1, 1),(4, null),(9, 27)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into mixed_tbl(shard_key, lkp_key) values (1, 1),(4, null),(9, 27)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Values::(INT64(1), NULL, INT64(27))", + "Query": "insert into mixed_tbl(shard_key, lkp_key) values (:_shard_key_0, :_lkp_key_0), (:_shard_key_1, :_lkp_key_1), (:_shard_key_2, :_lkp_key_2)", + "TableName": "mixed_tbl", + "VindexValues": { + "lkp_shard_map": ":__seq0, :__seq1, :__seq2", + "shard_index": "INT64(1), INT64(4), INT64(9)" + } + }, + "TablesUsed": [ + "user.mixed_tbl" + ] + } + }, + { + "comment": "insert + lookup vindex + auto increment on lookup column + select - not provided", + "query": "insert into mixed_tbl(shard_key) select foo from user where id = 1", + "plan": { + "QueryType": "INSERT", + "Original": "insert into mixed_tbl(shard_key) select foo from user where id = 1", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Select", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", + "TableName": "mixed_tbl", + "VindexOffsetFromSelect": { + "lkp_shard_map": "[1]", + "shard_index": "[0]" + }, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo from `user` where 1 != 1", + "Query": "select foo from `user` where id = 1 lock in share mode", + "Table": "`user`", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.mixed_tbl", + "user.user" + ] + } + }, + { + "comment": "insert + lookup vindex + auto increment on lookup column + select - provided", + "query": "insert into mixed_tbl(shard_key, lkp_key) select foo, bar from user where id = 1", + "plan": { + "QueryType": "INSERT", + "Original": "insert into mixed_tbl(shard_key, lkp_key) select foo, bar from user where id = 1", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Select", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(1)", + "TableName": "mixed_tbl", + "VindexOffsetFromSelect": { + "lkp_shard_map": "[1]", + "shard_index": "[0]" + }, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, bar from `user` where 1 != 1", + "Query": "select foo, bar from `user` where id = 1 lock in share mode", + "Table": "`user`", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.mixed_tbl", + "user.user" + ] + } + }, + { + "comment": "insert into a vindex not allowed", + "query": "insert into user_index(id) values(1)", + "plan": "VT09014: vindex cannot be modified" + }, + { + "comment": "insert with select takes shared lock", + "query": "insert into user(id) select id from user", + "plan": { + "QueryType": "INSERT", + "Original": "insert into user(id) select id from user", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Select", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "AutoIncrement": "select next :n /* INT64 */ values from seq:Offset(0)", + "InputAsNonStreaming": true, + "TableName": "user", + "VindexOffsetFromSelect": { + "costly_map": "[-1]", + "name_user_map": "[-1]", + "user_index": "[0]" + }, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` lock in share mode", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 96ebc8dda10..e14c0b6b4c8 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -2,22 +2,7 @@ { "comment": "No where clause", "query": "select id from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "Query that always return empty", "query": "select id from user where someColumn = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where someColumn = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where someColumn = null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where someColumn = null", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "Null Safe Equality Operator is handled correctly", "query": "SELECT id from user where someColumn <=> null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT id from user where someColumn <=> null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where someColumn <=> null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT id from user where someColumn <=> null", "Instructions": { @@ -113,26 +68,7 @@ { "comment": "Single table unique vindex route", "query": "select id from user where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 5", "Instructions": { @@ -158,22 +94,7 @@ { "comment": "Single table unique vindex route, but complex expr", "query": "select id from user where user.id = 5+5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 5+5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 5 + 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 5+5", "Instructions": { @@ -199,26 +120,7 @@ { "comment": "Single table multiple unique vindex match", "query": "select id from music where id = 5 and user_id = 4", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id = 5 and user_id = 4", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id = 5 and user_id = 4", - "Table": "music", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id = 5 and user_id = 4", "Instructions": { @@ -244,26 +146,7 @@ { "comment": "Single table multiple non-unique vindex match", "query": "select id from user where costly = 'aa' and name = 'bb'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where costly = 'aa' and name = 'bb'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where costly = 'aa' and `name` = 'bb'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"bb\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where costly = 'aa' and name = 'bb'", "Instructions": { @@ -314,26 +197,7 @@ { "comment": "Single table multiple non-unique vindex match for IN clause", "query": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where costly in ('aa', 'bb') and `name` in ::__vals", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"bb\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where costly in ('aa', 'bb') and name in ('aa', 'bb')", "Instructions": { @@ -384,26 +248,7 @@ { "comment": "Composite IN clause", "query": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, col) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, col) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -454,26 +299,7 @@ { "comment": "Composite IN clause, swapped columns", "query": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -524,26 +350,7 @@ { "comment": "Composite IN clause, choose cost within tuple", "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -594,26 +401,7 @@ { "comment": "Composite IN clause, choose cost within tuple, swapped", "query": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, costly) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"cc\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, costly) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -664,26 +452,7 @@ { "comment": "Composite IN clause, choose cost", "query": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, costly) in (('aa', 'bb')) and (col, `name`) in (('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, costly) in (('aa', 'bb')) and (col, name) in (('cc', 'dd'))", "Instructions": { @@ -734,26 +503,7 @@ { "comment": "Composite IN clause vs equality", "query": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col, `name`) in (('aa', 'bb')) and id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col, name) in (('aa', 'bb')) and id = 5", "Instructions": { @@ -779,26 +529,7 @@ { "comment": "Composite IN: multiple vindex matches", "query": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (costly, `name`) in (('aa', 'bb'), ('cc', 'dd'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (costly, name) in (('aa', 'bb'), ('cc', 'dd'))", "Instructions": { @@ -849,26 +580,7 @@ { "comment": "Composite IN: tuple inside tuple", "query": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where ((col1, `name`), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"bb\"), VARCHAR(\"ee\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where ((col1, name), col2) in ((('aa', 'bb'), 'cc'), (('dd', 'ee'), 'ff'))", "Instructions": { @@ -919,26 +631,7 @@ { "comment": "Composite IN: tuple inside tuple, but no match in tuple", "query": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "Instructions": { - "OperatorType": "Route", - "Variant": "MultiEqual", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (`name`, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", - "Table": "`user`", - "Values": [ - "(VARCHAR(\"aa\"), VARCHAR(\"dd\"))" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (name, (col1, col2)) in (('aa', ('bb', 'cc')), ('dd', ('ee', 'ff')))", "Instructions": { @@ -989,22 +682,7 @@ { "comment": "Composite IN: tuple inside tuple, mismiatched values", "query": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where ((col1, `name`), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where ((col1, name), col2) in (('aa', 'bb', 'cc'), (('dd', 'ee'), 'ff'))", "Instructions": { @@ -1026,22 +704,7 @@ { "comment": "Composite IN: RHS not tuple", "query": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col1, `name`) in (select * from music where music.user_id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col1, name) in (select * from music where music.user_id=user.id)", "Instructions": { @@ -1064,22 +727,7 @@ { "comment": "Composite IN: RHS has no simple values", "query": "select id from user where (col1, name) in (('aa', 1+1))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (col1, name) in (('aa', 1+1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (col1, `name`) in (('aa', 1 + 1))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (col1, name) in (('aa', 1+1))", "Instructions": { @@ -1130,22 +778,7 @@ { "comment": "IN clause: LHS is neither column nor composite tuple", "query": "select Id from user where 1 in ('aa', 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select Id from user where 1 in ('aa', 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select Id from `user` where 1 != 1", - "Query": "select Id from `user` where 1 in ('aa', 'bb')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select Id from user where 1 in ('aa', 'bb')", "Instructions": { @@ -1167,22 +800,7 @@ { "comment": "Single table complex in clause", "query": "select id from user where name in (col, 'bb')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name in (col, 'bb')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` in (col, 'bb')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name in (col, 'bb')", "Instructions": { @@ -1204,26 +822,7 @@ { "comment": "Single table equality route with val arg", "query": "select id from user where name = :a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name = :a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = :a", - "Table": "`user`", - "Values": [ - ":a" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name = :a", "Instructions": { @@ -1274,26 +873,7 @@ { "comment": "Single table equality route with unsigned value", "query": "select id from user where name = 18446744073709551615", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name = 18446744073709551615", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` = 18446744073709551615", - "Table": "`user`", - "Values": [ - "UINT64(18446744073709551615)" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name = 18446744073709551615", "Instructions": { @@ -1344,26 +924,7 @@ { "comment": "Single table in clause list arg", "query": "select id from user where name in ::list", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where name in ::list", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `name` in ::__vals", - "Table": "`user`", - "Values": [ - "::list" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where name in ::list", "Instructions": { @@ -1414,26 +975,7 @@ { "comment": "Multi-table unique vindex constraint", "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where `user`.id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user.id = 5", "Instructions": { @@ -1460,26 +1002,7 @@ { "comment": "Multi-table unique vindex constraint on right table", "query": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select user_extra.id from `user` join user_extra on `user`.id = user_extra.user_id where user_extra.user_id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.id = user_extra.user_id where user_extra.user_id = 5", "Instructions": { @@ -1560,48 +1083,7 @@ { "comment": "Multi-route unique vindex constraint", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", "Instructions": { @@ -1650,52 +1132,7 @@ { "comment": "Multi-route unique vindex route on both routes", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = 5", - "Table": "user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5 and user_extra.user_id = 5", "Instructions": { @@ -1722,7 +1159,7 @@ { "comment": "Multi-route with cross-route constraint", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", "Instructions": { @@ -1753,48 +1190,7 @@ "Sharded": true }, "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col", - "Table": "user_extra", - "Values": [ - ":user_col" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where user_extra.user_id = user.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and user_extra.user_id = :user_col", + "Query": "select user_extra.id from user_extra where user_extra.user_id = :user_col and user_extra.col = :user_col", "Table": "user_extra", "Values": [ ":user_col" @@ -1812,44 +1208,7 @@ { "comment": "Multi-route with non-route constraint, should use first route.", "query": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where 1 = 1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", "Instructions": { @@ -1880,7 +1239,7 @@ "Sharded": true }, "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where 1 = 1 and user_extra.col = :user_col", + "Query": "select user_extra.id from user_extra where user_extra.col = :user_col and 1 = 1", "Table": "user_extra" } ] @@ -1894,26 +1253,7 @@ { "comment": "Route with multiple route constraints, SelectIN is the best constraint.", "query": "select id from user where user.col = 5 and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = 5 and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = 5 and `user`.id in ::__vals", - "Table": "`user`", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = 5 and user.id in (1, 2)", "Instructions": { @@ -1939,26 +1279,7 @@ { "comment": "Route with multiple route constraints and boolean, SelectIN is the best constraint.", "query": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = case `user`.col when 'foo' then true else false end and `user`.id in ::__vals", - "Table": "`user`", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = case user.col when 'foo' then true else false end and user.id in (1, 2)", "Instructions": { @@ -1984,26 +1305,7 @@ { "comment": "Route with multiple route constraints and boolean, SelectEqual is the best constraint.", "query": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id or col as val from `user` where 1 != 1", - "Query": "select id or col as val from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (id or col) as val from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa'", "Instructions": { @@ -2054,26 +1356,7 @@ { "comment": "Route with multiple route constraints, SelectEqual is the best constraint.", "query": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = false and `user`.id in (1, 2) and `user`.`name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = false and user.id in (1, 2) and user.name = 'aa'", "Instructions": { @@ -2124,26 +1407,7 @@ { "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint.", "query": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col = 5 and `user`.id in (1, 2) and `user`.`name` = 'aa' and `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col = 5 and user.id in (1, 2) and user.name = 'aa' and user.id = 1", "Instructions": { @@ -2169,26 +1433,7 @@ { "comment": "Route with multiple route constraints, SelectEqualUnique is the best constraint, order reversed.", "query": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 and `user`.`name` = 'aa' and `user`.id in (1, 2) and `user`.col = 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 1 and user.name = 'aa' and user.id in (1, 2) and user.col = 5", "Instructions": { @@ -2214,22 +1459,7 @@ { "comment": "Route with OR and AND clause, must parenthesize correctly.", "query": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = 1 or `user`.`name` = 'aa' and `user`.id in (1, 2)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = 1 or user.name = 'aa' and user.id in (1, 2)", "Instructions": { @@ -2255,44 +1485,7 @@ { "comment": "Unsharded route", "query": "select unsharded.id from user join unsharded where unsharded.id = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.id from user join unsharded where unsharded.id = user.id", "Instructions": { @@ -2341,26 +1534,7 @@ { "comment": "routing rules: choose the redirected table", "query": "select col from route1 where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from route1 where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` as route1 where 1 != 1", - "Query": "select col from `user` as route1 where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from route1 where id = 1", "Instructions": { @@ -2386,7 +1560,7 @@ { "comment": "subquery", "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", "Instructions": { @@ -2417,7 +1591,7 @@ "Sharded": true }, "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col)", + "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col /* INT16 */) and u.id in ::__vals", "Table": "`user`", "Values": [ "(:user_extra_col, INT64(1))" @@ -2425,11 +1599,19 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "correlated subquery merge-able into a route of a join tree", + "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", + "plan": { "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col) and u.id in (user_extra.col, 1)", + "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -2458,7 +1640,7 @@ "Sharded": true }, "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col) and u.id in ::__vals", + "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals", "Table": "`user`", "Values": [ "(:user_extra_col, INT64(1))" @@ -2474,139 +1656,11 @@ } }, { - "comment": "correlated subquery merge-able into a route of a join tree", - "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "v3-plan": { + "comment": "ensure subquery reordering gets us a better plan", + "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", + "plan": { "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id)", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id) and u.id in ::__vals", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "ensure subquery reordering gets us a better plan", - "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id = 5 and u.id in (select m2 from `user` where `user`.id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", + "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = 5) and u.id = 5", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -2650,48 +1704,7 @@ { "comment": "nested subquery", "query": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in ::__vals and u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id))", - "Table": "`user`", - "Values": [ - "(:user_extra_col, INT64(1))" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.m from user_extra join user u where u.id in (select m2 from user where user.id = u.id and user_extra.col = user.col and user.id in (select m3 from user_extra where user_extra.user_id = user.id)) and u.id in (user_extra.col, 1)", "Instructions": { @@ -2722,7 +1735,7 @@ "Sharded": true }, "FieldQuery": "select u.m from `user` as u where 1 != 1", - "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals", + "Query": "select u.m from `user` as u where u.id in (select m2 from `user` where `user`.id = u.id and `user`.col = :user_extra_col /* INT16 */ and `user`.id in (select m3 from user_extra where user_extra.user_id = `user`.id)) and u.id in ::__vals", "Table": "`user`", "Values": [ "(:user_extra_col, INT64(1))" @@ -2740,22 +1753,7 @@ { "comment": "Correlated subquery in where clause", "query": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.col in (select user_extra.col from user_extra where user_extra.user_id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.col in (select user_extra.col from user_extra where user_extra.user_id = user.id)", "Instructions": { @@ -2778,26 +1776,7 @@ { "comment": "outer and inner subquery route by same int val", "query": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and user.col in (select user_extra.col from user_extra where user_extra.user_id = 5)", "Instructions": { @@ -2824,26 +1803,7 @@ { "comment": "outer and inner subquery route by same str val", "query": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 'aa' and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 'aa' and user.col in (select user_extra.col from user_extra where user_extra.user_id = 'aa')", "Instructions": { @@ -2870,26 +1830,7 @@ { "comment": "outer and inner subquery route by same val arg", "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = :a and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", - "Table": "`user`", - "Values": [ - ":a" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a)", "Instructions": { @@ -2916,28 +1857,12 @@ { "comment": "unresolved symbol in inner subquery.", "query": "select id from user where id = :a and user.col in (select user_extra.col from user_extra where user_extra.user_id = :a and foo.id = 1)", - "v3-plan": "VT03019: symbol foo.id not found", - "gen4-plan": "symbol foo.id not found" + "plan": "column 'foo.id' not found" }, { "comment": "outer and inner subquery route by same outermost column value", "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id2 from `user` as uu where 1 != 1", - "Query": "select id2 from `user` as uu where id in (select id from `user` where id = uu.id and `user`.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select user_extra.col from user_extra where user_extra.user_id = uu.id))", "Instructions": { @@ -2960,47 +1885,7 @@ { "comment": "cross-shard subquery in IN clause.\n# Note the improved Underlying plan as SelectIN.", "query": "select id from user where id in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select col from user)", "Instructions": { @@ -3012,6 +1897,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3023,6 +1909,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -3047,43 +1934,7 @@ { "comment": "cross-shard subquery in NOT IN clause.", "query": "select id from user where id not in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id not in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 0 or id not in ::__sq1", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id not in (select col from user)", "Instructions": { @@ -3095,6 +1946,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3106,6 +1958,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3126,49 +1979,7 @@ { "comment": "cross-shard subquery in EXISTS clause.", "query": "select id from user where exists (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where exists (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where exists (select col from user)", "Instructions": { @@ -3179,6 +1990,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -3196,6 +2008,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3212,51 +2025,11 @@ "user.user" ] } - }, - { - "comment": "cross-shard subquery as expression", - "query": "select id from user where id = (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = :__sq1", - "Table": "`user`", - "Values": [ - ":__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + }, + { + "comment": "cross-shard subquery as expression", + "query": "select id from user where id = (select col from user)", + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = (select col from user)", "Instructions": { @@ -3268,6 +2041,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3279,6 +2053,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -3303,68 +2078,7 @@ { "comment": "multi-level pullout", "query": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id3 from `user` where 1 != 1", - "Query": "select id3 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id2 from `user` where 1 != 1", - "Query": "select id2 from `user` where :__sq_has_values1 = 1 and id2 in ::__sq1", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id1 from `user` where 1 != 1", - "Query": "select id1 from `user` where id = :__sq2", - "Table": "`user`", - "Values": [ - ":__sq2" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id1 from user where id = (select id2 from user where id2 in (select id3 from user))", "Instructions": { @@ -3376,6 +2090,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Subquery", "Variant": "PulloutIn", "PulloutVars": [ @@ -3384,6 +2099,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3395,6 +2111,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3408,6 +2125,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -3432,22 +2150,7 @@ { "comment": "routing rules subquery merge", "query": "select col from user where id = (select id from route1 where route1.id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route1 where route1.id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = (select id from `user` as route1 where route1.id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = (select id from route1 where route1.id = user.id)", "Instructions": { @@ -3469,47 +2172,7 @@ { "comment": "routing rules subquery pullout", "query": "select col from user where id = (select id from route2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route2)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded as route2 where 1 != 1", - "Query": "select id from unsharded as route2", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = :__sq1", - "Table": "`user`", - "Values": [ - ":__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = (select id from route2)", "Instructions": { @@ -3521,6 +2184,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -3532,6 +2196,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -3557,26 +2222,7 @@ { "comment": "Case preservation test", "query": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where 1 != 1", - "Query": "select user_extra.Id from `user` join user_extra on `user`.iD = user_extra.User_Id where `user`.Id = 5", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.Id from user join user_extra on user.iD = user_extra.User_Id where user.Id = 5", "Instructions": { @@ -3603,22 +2249,7 @@ { "comment": "database() call in where clause.", "query": "select id from user where database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where database()", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where database()", "Instructions": { @@ -3640,22 +2271,7 @@ { "comment": "Select with equals null", "query": "select id from music where id = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id = null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id = null", "Instructions": { @@ -3677,22 +2293,7 @@ { "comment": "SELECT with IS NULL", "query": "select id from music where id is null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is null", "Instructions": { @@ -3714,22 +2315,7 @@ { "comment": "SELECT with IS NOT NULL", "query": "select id from music where id is not null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is not null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is not null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is not null", "Instructions": { @@ -3751,22 +2337,7 @@ { "comment": "Single table with unique vindex match and null match", "query": "select id from music where user_id = 4 and id = null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id = null", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id = null", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id = null", "Instructions": { @@ -3788,22 +2359,7 @@ { "comment": "Single table with unique vindex match and IN (null)", "query": "select id from music where user_id = 4 and id IN (null)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id IN (null)", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id in (null)", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id IN (null)", "Instructions": { @@ -3825,26 +2381,7 @@ { "comment": "Single table with unique vindex match and IN (null, 1, 2)", "query": "select id from music where user_id = 4 and id IN (null, 1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id in (null, 1, 2)", - "Table": "music", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id IN (null, 1, 2)", "Instructions": { @@ -3860,32 +2397,17 @@ "Values": [ "INT64(4)" ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Single table with unique vindex match and NOT IN (null, 1, 2)", - "query": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where user_id = 4 and id not in (null, 1, 2)", - "Table": "music" - } - }, - "gen4-plan": { + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Single table with unique vindex match and NOT IN (null, 1, 2)", + "query": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", + "plan": { "QueryType": "SELECT", "Original": "select id from music where user_id = 4 and id NOT IN (null, 1, 2)", "Instructions": { @@ -3907,22 +2429,7 @@ { "comment": "Single table with unique vindex match and NOT IN (null, 1, 2) predicates inverted", "query": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", - "Instructions": { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id not in (null, 1, 2) and user_id = 4", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id NOT IN (null, 1, 2) and user_id = 4", "Instructions": { @@ -3944,76 +2451,7 @@ { "comment": "pullout sq after pullout sq", "query": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 42", - "Table": "user_extra", - "Values": [ - "INT64(42)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 411", - "Table": "user_extra", - "Values": [ - "INT64(411)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals and (:__sq_has_values2 = 0 or id not in ::__sq2)", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where not id in (select user_extra.col from user_extra where user_extra.user_id = 42) and id in (select user_extra.col from user_extra where user_extra.user_id = 411)", "Instructions": { @@ -4025,6 +2463,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4040,6 +2479,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Subquery", "Variant": "PulloutNotIn", "PulloutVars": [ @@ -4048,6 +2488,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4063,6 +2504,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -4090,26 +2532,7 @@ { "comment": "solving LIKE query with a CFC prefix vindex", "query": "select c2 from cfc_vindex_col where c1 like 'A%'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select c2 from cfc_vindex_col where c1 like 'A%'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select c2 from cfc_vindex_col where 1 != 1", - "Query": "select c2 from cfc_vindex_col where c1 like 'A%'", - "Table": "cfc_vindex_col", - "Values": [ - "VARCHAR(\"A%\")" - ], - "Vindex": "cfc" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c2 from cfc_vindex_col where c1 like 'A%'", "Instructions": { @@ -4135,26 +2558,7 @@ { "comment": "select * from samecolvin where col = :col", "query": "select * from samecolvin where col = :col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from samecolvin where col = :col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from samecolvin where 1 != 1", - "Query": "select col from samecolvin where col = :col", - "Table": "samecolvin", - "Values": [ - ":col" - ], - "Vindex": "vindex1" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from samecolvin where col = :col", "Instructions": { @@ -4180,22 +2584,7 @@ { "comment": "non unique predicate on vindex", "query": "select id from user where user.id > 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id > 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id > 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id > 5", "Instructions": { @@ -4217,22 +2606,7 @@ { "comment": "select from unsharded keyspace with uncorrelated subquery which should be merged to a single route", "query": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.`name` in (select `name` from unsharded_a)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.id from unsharded where unsharded.name in (select name from unsharded_a)", "Instructions": { @@ -4255,47 +2629,7 @@ { "comment": "in subquery the id will be scoped to local table as there is no qualifier associated with it.", "query": "select id from user where id in (select col from unsharded where col = id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (select col from unsharded where col = id)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded where col = id", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select col from unsharded where col = id)", "Instructions": { @@ -4307,6 +2641,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -4318,6 +2653,7 @@ "Table": "unsharded" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -4340,30 +2676,10 @@ ] } }, - { - "comment": "correlated subquery with different keyspace tables involved", - "query": "select id from user where id in (select col from unsharded where col = user.id)", - "plan": "VT12001: unsupported: cross-shard correlated subquery" - }, { "comment": "correlated subquery with same keyspace", "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u where 1 != 1", - "Query": "select u.id from `user` as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", "Instructions": { @@ -4405,22 +2721,7 @@ { "comment": "SelectReference with uncorrelated subqueries", "query": "select ref.col from ref where ref.col in (select ref.col from ref)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref where ref.col in (select ref.col from ref)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref where 1 != 1", - "Query": "select ref.col from ref where ref.col in (select ref.col from ref)", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref where ref.col in (select ref.col from ref)", "Instructions": { @@ -4442,26 +2743,7 @@ { "comment": "SelectEqualUnique with uncorrelated subqueries", "query": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and u1.`name` in (select u2.`name` from `user` as u2 where u2.id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and u1.name in (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4487,26 +2769,7 @@ { "comment": "SelectEqualUnique with EXISTS uncorrelated subquery", "query": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4532,26 +2795,7 @@ { "comment": "SelectEqualUnique with NOT EXISTS uncorrelated subquery", "query": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where u1.id = 5 and not exists (select 1 from `user` as u2 where u2.id = 5 limit 1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where u1.id = 5 and not exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4577,47 +2821,7 @@ { "comment": "SelectScatter with NOT EXISTS uncorrelated subquery", "query": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = 5 limit 1", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.col from `user` as u1 where not :__sq_has_values1", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.col from user as u1 where not exists (select u2.name from user u2 where u2.id = 5)", "Instructions": { @@ -4628,6 +2832,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4643,6 +2848,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -4663,51 +2869,7 @@ { "comment": "The outer and first inner are SelectEqualUnique with same Vindex value, the second inner has different Vindex value", "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 4", - "Table": "user_extra", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and :__sq_has_values1 = 1 and id in ::__sq1 and id not in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 5) and id in (select user_extra.col from user_extra where user_extra.user_id = 4)", "Instructions": { @@ -4719,6 +2881,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4734,6 +2897,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4759,51 +2923,7 @@ { "comment": "The outer and second inner are SelectEqualUnique with same Vindex value, the first inner has different Vindex value", "query": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutNotIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.user_id = 4", - "Table": "user_extra", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (:__sq_has_values1 = 0 or id not in ::__sq1)", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 5 and not id in (select user_extra.col from user_extra where user_extra.user_id = 4) and id in (select user_extra.col from user_extra where user_extra.user_id = 5)", "Instructions": { @@ -4815,6 +2935,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4830,6 +2951,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -4855,22 +2977,7 @@ { "comment": "two correlated subqueries that can be merge in a single route", "query": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id from `user` as u where 1 != 1", - "Query": "select u.id from `user` as u where u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u where u.col in (select ue.user_id from user_extra as ue where ue.user_id = u.id) and u.col2 in (select ue.user_id from user_extra as ue where ue.user_id = u.id)", "Instructions": { @@ -4893,22 +3000,7 @@ { "comment": "transitive closures for the win", "query": "select id from user where user.id = user.col and user.col = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where user.id = user.col and user.col = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where `user`.id = `user`.col and `user`.col = 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where user.id = user.col and user.col = 5", "Instructions": { @@ -4934,44 +3026,7 @@ { "comment": "join with transitive closures", "query": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id and user_extra.col = user_extra.user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user, user_extra where user.id = user_extra.col and user_extra.col = user_extra.user_id", "Instructions": { @@ -4994,44 +3049,7 @@ { "comment": "not supported transitive closures with equality inside of an OR", "query": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.`name`)", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user, user_extra where user.id = user_extra.col and (user_extra.col = user_extra.user_id or user_extra.col2 = user_extra.name)", "Instructions": { @@ -5071,31 +3089,16 @@ } ] }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "routing rules subquery merge with alias", - "query": "select col from user where id = (select id from route1 as a where a.id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = (select id from `user` as a where a.id = `user`.id)", - "Table": "`user`" - } - }, - "gen4-plan": { + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "routing rules subquery merge with alias", + "query": "select col from user where id = (select id from route1 as a where a.id = user.id)", + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = (select id from route1 as a where a.id = user.id)", "Instructions": { @@ -5123,9 +3126,9 @@ "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "user_col": 0 + "user_col": 1 }, "TableName": "`user`_user_extra", "Inputs": [ @@ -5136,8 +3139,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", "Table": "`user`" }, { @@ -5171,74 +3174,18 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.id is null", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], - "Inputs": [ - { - "OperatorType": "Filter", - "Predicate": "user_extra.id is null", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,L:1", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - ] - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "subquery on other table", - "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", - "Instructions": { - "OperatorType": "Distinct", + "OperatorType": "Filter", + "Predicate": "user_extra.id is null", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -5247,9 +3194,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where col2 = 'a'", - "Table": "music" + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" }, { "OperatorType": "Route", @@ -5258,16 +3205,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "subquery on other table", + "query": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", + "plan": { "QueryType": "SELECT", "Original": "select distinct user.id, user.col from user where user.col in (select id from music where col2 = 'a')", "Instructions": { @@ -5285,6 +3240,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5296,6 +3252,7 @@ "Table": "music" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5320,26 +3277,7 @@ { "comment": "should use colb_colc_map as first column of the vindex is present in predicate", "query": "select * from multicolvin where column_b = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1", "Instructions": { @@ -5365,26 +3303,7 @@ { "comment": "should only use first column of the vindex colb_colc_map", "query": "select * from multicolvin where column_b = 1 and column_c = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1 and column_c = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2", "Instructions": { @@ -5410,26 +3329,7 @@ { "comment": "uses vindex colb_colc_map", "query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", - "Table": "multicolvin", - "Values": [ - "INT64(1)" - ], - "Vindex": "colb_colc_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_b = 1 and column_c = 2 and column_a = 3", "Instructions": { @@ -5453,28 +3353,9 @@ } }, { - "comment": "v3 takes cola_map, gen4 takes colb_colc_map, may be based on map key ordering", + "comment": "colb_colc_map vindex for routing", "query": "select * from multicolvin where column_a = 3 and column_b = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicolvin where column_a = 3 and column_b = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicolvin where 1 != 1", - "Query": "select * from multicolvin where column_a = 3 and column_b = 1", - "Table": "multicolvin", - "Values": [ - "INT64(3)" - ], - "Vindex": "cola_map" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicolvin where column_a = 3 and column_b = 1", "Instructions": { @@ -5498,24 +3379,9 @@ } }, { - "comment": "multi column vindex produces Equal plan in gen4 and Scatter in v3", + "comment": "multi column vindex produces Equal plan", "query": "select * from multicol_tbl where cola = 1 and colb = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1 and colb = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1 and colb = 2", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1 and colb = 2", "Instructions": { @@ -5542,22 +3408,7 @@ { "comment": "multi column vindex with different order places the vindex keys in correct order", "query": "select * from multicol_tbl where colb = 2 and cola = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 2 and cola = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 2 and cola = 1", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 2 and cola = 1", "Instructions": { @@ -5582,24 +3433,9 @@ } }, { - "comment": "multi column vindex produces IN plan in gen4 and Scatter in v3", + "comment": "multi column vindex produces IN plan", "query": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola in (1, 2) and colb in (3, 4)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola in (1,2) and colb in (3,4)", "Instructions": { @@ -5624,24 +3460,9 @@ } }, { - "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan in gen4", + "comment": "multi column vindex with different order places the vindex keys in correct order in IN plan", "query": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (3, 4) and cola in (1, 2)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (3,4) and cola in (1,2)", "Instructions": { @@ -5668,22 +3489,7 @@ { "comment": "multi column vindex with different order with one IN predicate and one equality", "query": "select * from multicol_tbl where colb = 1 and cola in (3,4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 1 and cola in (3, 4)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 1 and cola in (3,4)", "Instructions": { @@ -5710,22 +3516,7 @@ { "comment": "deconstruct tuple equality comparisons", "query": "select id from user where (id, name) = (34, 'apa')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id, name) = (34, 'apa')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where (id, `name`) = (34, 'apa')", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id, name) = (34, 'apa')", "Instructions": { @@ -5751,22 +3542,7 @@ { "comment": "multi column vindex with both IN predicate and equality predicate", "query": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola in (1, 10) and cola = 4 and colb in (5, 6) and colb = 7", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola in (1,10) and cola = 4 and colb in (5,6) and colb = 7", "Instructions": { @@ -5793,22 +3569,7 @@ { "comment": "multi column vindex with one column with equal followed by IN predicate, ordering matters for now", "query": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb = 4 and colb in (1, 10) and cola in (5, 6)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb = 4 and colb in (1,10) and cola in (5,6)", "Instructions": { @@ -5829,28 +3590,13 @@ }, "TablesUsed": [ "user.multicol_tbl" - ] - } - }, - { - "comment": "multi column vindex with one column with IN followed by equal predicate, ordering matters for now", - "query": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (1, 10) and colb = 4 and cola in (5, 6)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + ] + } + }, + { + "comment": "multi column vindex with one column with IN followed by equal predicate, ordering matters for now", + "query": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (1,10) and colb = 4 and cola in (5,6)", "Instructions": { @@ -5877,22 +3623,7 @@ { "comment": "multi column vindex with better plan selection", "query": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where colb in (1, 2) and cola in (3, 4) and cola = 5 and colb = 6", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where colb in (1,2) and cola IN (3,4) and cola = 5 and colb = 6", "Instructions": { @@ -5919,22 +3650,7 @@ { "comment": "multi column vindex as tuple", "query": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where (cola, colb) in ((1, 2), (3, 4))", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where (cola,colb) in ((1,2),(3,4))", "Instructions": { @@ -5961,22 +3677,7 @@ { "comment": "multi column vindex, partial vindex with SelectEqual", "query": "select * from multicol_tbl where cola = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1", "Instructions": { @@ -6002,22 +3703,7 @@ { "comment": "multi column vindex, partial vindex with SelectEqual over full vindex with SelectIN", "query": "select * from multicol_tbl where cola = 1 and colb in (2,3)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from multicol_tbl where 1 != 1", - "Query": "select * from multicol_tbl where cola = 1 and colb in (2, 3)", - "Table": "multicol_tbl" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from multicol_tbl where cola = 1 and colb in (2,3)", "Instructions": { @@ -6042,7 +3728,7 @@ } }, { - "comment": "left join with where clause - should be handled by gen4 but still isn't", + "comment": "left join with where clause", "query": "select 0 from unsharded_a left join unsharded_b on unsharded_a.col = unsharded_b.col where coalesce(unsharded_b.col, 4) = 5", "plan": { "QueryType": "SELECT", @@ -6225,22 +3911,7 @@ { "comment": "optimize ORs to IN route op codes #1", "query": "select col from user where id = 1 or id = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 or id = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 or id = 2", "Instructions": { @@ -6266,22 +3937,7 @@ { "comment": "optimize ORs to IN route op codes #2", "query": "select col from user where id = 1 or id = 2 or id = 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 or id = 2 or id = 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2 or id = 3", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 or id = 2 or id = 3", "Instructions": { @@ -6307,22 +3963,7 @@ { "comment": "optimize ORs to IN route op codes #3", "query": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 or id = 2 or (id = 3 or id = 4)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where (id = 1 or id = 2) or (id = 3 or id = 4)", "Instructions": { @@ -6348,26 +3989,7 @@ { "comment": "Don't pick a vindex for an IS NULL predicate if it's a lookup vindex", "query": "select id from music where id is null and user_id in (1,2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from music where id is null and user_id in (1,2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music where id is null and user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from music where id is null and user_id in (1,2)", "Instructions": { @@ -6393,22 +4015,7 @@ { "comment": "Self referencing columns in HAVING should work", "query": "select a+2 as a from user having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a+2 as a from user having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a + 2 as a from `user` where 1 != 1", - "Query": "select a + 2 as a from `user` having a = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a+2 as a from user having a = 42", "Instructions": { @@ -6430,22 +4037,7 @@ { "comment": "HAVING predicates that use table columns are safe to rewrite if we can move them to the WHERE clause", "query": "select user.col + 2 as a from user having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col + 2 as a from user having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col + 2 as a from `user` where 1 != 1", - "Query": "select `user`.col + 2 as a from `user` having a = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col + 2 as a from user having a = 42", "Instructions": { @@ -6467,22 +4059,7 @@ { "comment": "HAVING predicates that use table columns should not get rewritten on unsharded keyspaces", "query": "select col + 2 as a from unsharded having a = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col + 2 as a from unsharded having a = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col + 2 as a from unsharded where 1 != 1", - "Query": "select col + 2 as a from unsharded having a = 42", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col + 2 as a from unsharded having a = 42", "Instructions": { @@ -6504,22 +4081,7 @@ { "comment": "Single table unique vindex route hiding behind a silly OR", "query": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `name` = 'apa' or id = 5 and foo = 'bar'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id = 5 and name ='apa') or (id = 5 and foo = 'bar')", "Instructions": { @@ -6545,22 +4107,7 @@ { "comment": "Single table IN vindex route hiding behind OR", "query": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `name` = 'foo' or id = 12 and `name` = 'bar'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where (id = 5 and name ='foo') or (id = 12 and name = 'bar')", "Instructions": { @@ -6586,66 +4133,58 @@ { "comment": "Like clause evaluated on the vtgate", "query": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.textcol1 from user a join user b where a.textcol1 = b.textcol2 group by a.textcol1 having repeat(a.textcol1,sum(a.id)) like \"And%res\"", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Filter", + "Predicate": "repeat(a.textcol1, sum(a.id)) like 'And%res'", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "repeat(a.textcol1, :1) like 'And%res'", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS sum(a.id)", + "GroupBy": "0 COLLATE latin1_swedish_ci", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum(1) AS sum(a.id)", - "GroupBy": "0 COLLATE latin1_swedish_ci", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 2] as textcol1", + "[COLUMN 0] * [COLUMN 1] as sum(a.id)" + ], "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as textcol1", - "[COLUMN 1] * COALESCE([COLUMN 2], INT64(1)) as sum(a.id)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinVars": { + "a_textcol1": 1 + }, + "TableName": "`user`_`user`", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:1", - "JoinVars": { - "a_textcol1": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(a.id), a.textcol1 from `user` as a where 1 != 1 group by a.textcol1", + "OrderBy": "1 ASC COLLATE latin1_swedish_ci", + "Query": "select sum(a.id), a.textcol1 from `user` as a group by a.textcol1 order by a.textcol1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.textcol1, sum(a.id) from `user` as a where 1 != 1 group by a.textcol1", - "OrderBy": "0 ASC COLLATE latin1_swedish_ci", - "Query": "select a.textcol1, sum(a.id) from `user` as a group by a.textcol1 order by a.textcol1 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, count(*) from `user` as b where 1 != 1 group by 1", - "Query": "select 1, count(*) from `user` as b where b.textcol2 = :a_textcol1 group by 1", - "Table": "`user`" - } - ] + "FieldQuery": "select count(*) from `user` as b where 1 != 1 group by .0", + "Query": "select count(*) from `user` as b where b.textcol2 = :a_textcol1 group by .0", + "Table": "`user`" } ] } @@ -6663,22 +4202,7 @@ { "comment": "two predicates that mean the same thing", "query": "select textcol1 from user where foo = 42 and user.foo = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select textcol1 from user where foo = 42 and user.foo = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select textcol1 from `user` where 1 != 1", - "Query": "select textcol1 from `user` where foo = 42 and `user`.foo = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select textcol1 from user where foo = 42 and user.foo = 42", "Instructions": { @@ -6700,8 +4224,7 @@ { "comment": "must merge subquery with the right side of the join", "query": "select 1 from unsharded join user u1 where exists (select 1 from unsharded u2 where u1.bar = u2.baz)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from unsharded join user u1 where exists (select 1 from unsharded u2 where u1.bar = u2.baz)", "Instructions": { @@ -6713,6 +4236,7 @@ "TableName": "unsharded_`user`_unsharded", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "R:0,L:0", @@ -6743,6 +4267,7 @@ ] }, { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -6760,5 +4285,103 @@ "user.user" ] } + }, + { + "comment": "push filter under aggregation", + "query": "select count(*) from user left join user_extra on user.id = user_extra.bar where IFNULL(user_extra.collections_status, 'NOTSET') != 'collections_lock'", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join user_extra on user.id = user_extra.bar where IFNULL(user_extra.collections_status, 'NOTSET') != 'collections_lock'", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as count(*)", + "[COLUMN 2] as collections_status" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "IFNULL(user_extra.collections_status, 'NOTSET') != 'collections_lock'", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0,R:1", + "JoinVars": { + "user_id": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.id from `user` where 1 != 1 group by `user`.id", + "Query": "select count(*), `user`.id from `user` group by `user`.id", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), user_extra.collections_status from user_extra where 1 != 1 group by user_extra.collections_status", + "Query": "select count(*), user_extra.collections_status from user_extra where user_extra.bar = :user_id group by user_extra.collections_status", + "Table": "user_extra" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "query that would time out because planning was too slow", + "query": "select 1 from user where shard_key = 1 and is_removed = 1 and cmd in ('A','B','C') and not (user_id = 1 and user_id is not null and ts >= 1 and ts <= 2) and not (user_id = 1 and user_id is not null and ts >= 12 and ts <= 13) and not (user_id = 1 and user_id is not null and ts >= 14 and ts <= 15) and not (user_id = 1 and user_id is not null and ts >= 16 and ts <= 17) and not (user_id = 1 and user_id is not null and ts >= 18 and ts <= 19) and not (user_id = 1 and user_id is not null and ts >= 110 and ts <= 111) and not (user_id = 1 and user_id is not null and ts >= 112 and ts <= 113) and not (user_id = 1 and user_id is not null and ts >= 114 and ts <= 115) and not (user_id = 1 and user_id is not null and ts >= 116 and ts <= 117) and not (user_id = 1 and user_id is not null and ts >= 118 and ts <= 119) and not (user_id = 1 and user_id is not null and ts >= 120 and ts <= 121) and not (user_id = 1 and user_id is not null and ts >= 122 and ts <= 123) and not (user_id = 1 and user_id is not null and ts >= 124 and ts <= 125) and not (user_id = 1 and user_id is not null and ts >= 126 and ts <= 127) and not (user_id = 1 and user_id is not null and ts >= 128 and ts <= 129) and not (user_id = 1 and user_id is not null and ts >= 130 and ts <= 131) and not (user_id = 1 and user_id is not null and ts >= 132 and ts <= 133) and not (user_id = 1 and user_id is not null and ts >= 134 and ts <= 135) and not (user_id = 1 and user_id is not null and ts >= 136 and ts <= 137) and not (user_id = 1 and user_id is not null and ts >= 138 and ts <= 139) and not (user_id = 1 and user_id is not null and ts >= 140 and ts <= 141) and not (user_id = 1 and user_id is not null and ts >= 142 and ts <= 143) and not (user_id = 1 and user_id is not null and ts >= 144 and ts <= 145) and not (user_id = 1 and user_id is not null and ts >= 146 and ts <= 147) and not (user_id = 1 and user_id is not null and ts >= 148 and ts <= 149) and not (user_id = 1 and user_id is not null and ts >= 150 and ts <= 151) and not (user_id = 1 and user_id is not null and ts >= 152 and ts <= 153) and not (user_id = 1 and user_id is not null and ts >= 154 and ts <= 155) and not (user_id = 1 and user_id is not null and ts >= 156 and ts <= 157) and not (user_id = 1 and user_id is not null and ts >= 158 and ts <= 159) and not (user_id = 1 and user_id is not null and ts >= 160 and ts <= 161) and not (user_id = 1 and user_id is not null and ts >= 162 and ts <= 163) and not (user_id = 1 and user_id is not null and ts >= 164 and ts <= 165) and not (user_id = 1 and user_id is not null and ts >= 166 and ts <= 167) and not (user_id = 1 and user_id is not null and ts >= 168 and ts <= 169) and not (user_id = 1 and user_id is not null and ts >= 170 and ts <= 171) and not (user_id = 1 and user_id is not null and ts >= 172 and ts <= 173) and not (user_id = 1 and user_id is not null and ts >= 174 and ts <= 175) and not (user_id = 1 and user_id is not null and ts >= 176 and ts <= 177) and not (user_id = 1 and user_id is not null and ts >= 178 and ts <= 179) and not (user_id = 1 and user_id is not null and ts >= 180 and ts <= 181) and not (user_id = 1 and user_id is not null and ts >= 182 and ts <= 183) and not (user_id = 1 and user_id is not null and ts >= 184 and ts <= 185) and not (user_id = 1 and user_id is not null and ts >= 186 and ts <= 187) and not (user_id = 1 and user_id is not null and ts >= 188 and ts <= 189) and not (user_id = 1 and user_id is not null and ts >= 190 and ts <= 191) and not (user_id = 1 and user_id is not null and ts >= 192 and ts <= 193) and not (user_id = 1 and user_id is not null and ts >= 194 and ts <= 195) and not (user_id = 1 and user_id is not null and ts >= 196 and ts <= 197) and not (user_id = 1 and user_id is not null and ts >= 198 and ts <= 199) and not (user_id = 1 and user_id is not null and ts >= 1100 and ts <= 1101) and not (user_id = 1 and user_id is not null and ts >= 1102 and ts <= 1103) and not (user_id = 1 and user_id is not null and ts >= 1104 and ts <= 1105) and not (user_id = 1 and user_id is not null and ts >= 1106 and ts <= 1107) and not (user_id = 1 and user_id is not null and ts >= 1108 and ts <= 1109) and not (user_id = 1 and user_id is not null and ts >= 1110 and ts <= 1111) and not (user_id = 1 and user_id is not null and ts >= 1112 and ts <= 1113) and not (user_id = 1 and user_id is not null and ts >= 1114 and ts <= 1115) and not (user_id = 1 and user_id is not null and ts >= 1116 and ts <= 1117) and not (user_id = 1 and user_id is not null and ts >= 1118 and ts <= 1119) and not (user_id = 1 and user_id is not null and ts >= 1120 and ts <= 1121) and not (user_id = 1 and user_id is not null and ts >= 1122 and ts <= 1123) and not (user_id = 1 and user_id is not null and ts >= 1124 and ts <= 1125) and not (user_id = 1 and user_id is not null and ts >= 1126 and ts <= 1127) and not (user_id = 1 and user_id is not null and ts >= 1128 and ts <= 1129) and not (user_id = 1 and user_id is not null and ts >= 1130 and ts <= 1131) and not (user_id = 1 and user_id is not null and ts >= 1132 and ts <= 1133) and not (user_id = 1 and user_id is not null and ts >= 1134 and ts <= 1135) and not (user_id = 1 and user_id is not null and ts >= 1136 and ts <= 1137) and not (user_id = 1 and user_id is not null and ts >= 1138 and ts <= 1139) and not (user_id = 1 and user_id is not null and ts >= 1140 and ts <= 1141) and not (user_id = 1 and user_id is not null and ts >= 1142 and ts <= 1143) and not (user_id = 1 and user_id is not null and ts >= 1144 and ts <= 1145) and not (user_id = 1 and user_id is not null and ts >= 1146 and ts <= 1147) and not (user_id = 1 and user_id is not null and ts >= 1148 and ts <= 1149) and not (user_id = 1 and user_id is not null and ts >= 1150 and ts <= 1151) and not (user_id = 1 and user_id is not null and ts >= 1152 and ts <= 1153) and not (user_id = 1 and user_id is not null and ts >= 1154 and ts <= 1155) and not (user_id = 1 and user_id is not null and ts >= 1156 and ts <= 1157) and not (user_id = 1 and user_id is not null and ts >= 1158 and ts <= 1159) and not (user_id = 1 and user_id is not null and ts >= 1160 and ts <= 1161) and not (user_id = 1 and user_id is not null and ts >= 1162 and ts <= 1163) and not (user_id = 1 and user_id is not null and ts >= 1164 and ts <= 1165) and not (user_id = 1 and user_id is not null and ts >= 1166 and ts <= 1167) and not (user_id = 1 and user_id is not null and ts >= 1168 and ts <= 1169) and not (user_id = 1 and user_id is not null and ts >= 1170 and ts <= 1171) and not (user_id = 1 and user_id is not null and ts >= 1172 and ts <= 1173) and not (user_id = 1 and user_id is not null and ts >= 1174 and ts <= 1175) and not (user_id = 1 and user_id is not null and ts >= 1176 and ts <= 1177) and not (user_id = 1 and user_id is not null and ts >= 1178 and ts <= 1179) and not (user_id = 1 and user_id is not null and ts >= 1180 and ts <= 1181) and not (user_id = 1 and user_id is not null and ts >= 1182 and ts <= 1183) and not (user_id = 1 and user_id is not null and ts >= 1184 and ts <= 1185) and not (user_id = 1 and user_id is not null and ts >= 1186 and ts <= 1187) and not (user_id = 1 and user_id is not null and ts >= 1188 and ts <= 1189) and not (user_id = 1 and user_id is not null and ts >= 1190 and ts <= 1191) and not (user_id = 1 and user_id is not null and ts >= 1192 and ts <= 1193) and not (user_id = 1 and user_id is not null and ts >= 1194 and ts <= 1195) and not (user_id = 1 and user_id is not null and ts >= 1196 and ts <= 1197) and not (user_id = 1 and user_id is not null and ts >= 1198 and ts <= 1199) and not (user_id = 1 and user_id is not null and ts >= 1200 and ts <= 1201) and not (user_id = 1 and user_id is not null and ts >= 1202 and ts <= 1203) and not (user_id = 1 and user_id is not null and ts >= 1204 and ts <= 1205) and not (user_id = 1 and user_id is not null and ts >= 1206 and ts <= 1207) and not (user_id = 1 and user_id is not null and ts >= 1208 and ts <= 1209) and not (user_id = 1 and user_id is not null and ts >= 1210 and ts <= 1211) and not (user_id = 1 and user_id is not null and ts >= 1212 and ts <= 1213) and not (user_id = 1 and user_id is not null and ts >= 1214 and ts <= 1215) and not (user_id = 1 and user_id is not null and ts >= 1216 and ts <= 1217) and not (user_id = 1 and user_id is not null and ts >= 1218 and ts <= 1219) and not (user_id = 1 and user_id is not null and ts >= 1220 and ts <= 1221) and not (user_id = 1 and user_id is not null and ts >= 1222 and ts <= 1223) and not (user_id = 1 and user_id is not null and ts >= 1224 and ts <= 1225) and not (user_id = 1 and user_id is not null and ts >= 1226 and ts <= 1227) and not (user_id = 1 and user_id is not null and ts >= 1228 and ts <= 1229) and not (user_id = 1 and user_id is not null and ts >= 1230 and ts <= 1231) and not (user_id = 1 and user_id is not null and ts >= 1232 and ts <= 1233) and not (user_id = 1 and user_id is not null and ts >= 1234 and ts <= 1235) and not (user_id = 1 and user_id is not null and ts >= 1236 and ts <= 1237) and not (user_id = 1 and user_id is not null and ts >= 1238 and ts <= 1239) and not (user_id = 1 and user_id is not null and ts >= 1240 and ts <= 1241) and not (user_id = 1 and user_id is not null and ts >= 1242 and ts <= 1243) and not (user_id = 1 and user_id is not null and ts >= 1244 and ts <= 1245) and not (user_id = 1 and user_id is not null and ts >= 1246 and ts <= 1247) and not (user_id = 1 and user_id is not null and ts >= 1248 and ts <= 1249) and not (user_id = 1 and user_id is not null and ts >= 1250 and ts <= 1251) and not (user_id = 1 and user_id is not null and ts >= 1252 and ts <= 1253) and not (user_id = 1 and user_id is not null and ts >= 1254 and ts <= 1255) and not (user_id = 1 and user_id is not null and ts >= 1256 and ts <= 1257) and not (user_id = 1 and user_id is not null and ts >= 1258 and ts <= 1259) and not (user_id = 1 and user_id is not null and ts >= 1260 and ts <= 1261) and not (user_id = 1 and user_id is not null and ts >= 1262 and ts <= 1263) and not (user_id = 1 and user_id is not null and ts >= 1264 and ts <= 1265) and not (user_id = 1 and user_id is not null and ts >= 1266 and ts <= 1267) and not (user_id = 1 and user_id is not null and ts >= 1268 and ts <= 1269) and not (user_id = 1 and user_id is not null and ts >= 1270 and ts <= 1271) and not (user_id = 1 and user_id is not null and ts >= 1272 and ts <= 1273) and not (user_id = 1 and user_id is not null and ts >= 1274 and ts <= 1275) and not (user_id = 1 and user_id is not null and ts >= 1276 and ts <= 1277) and not (user_id = 1 and user_id is not null and ts >= 1278 and ts <= 1279) and not (user_id = 1 and user_id is not null and ts >= 1280 and ts <= 1281) and not (user_id = 1 and user_id is not null and ts >= 1282 and ts <= 1283) and not (user_id = 1 and user_id is not null and ts >= 1284 and ts <= 1285) and not (user_id = 1 and user_id is not null and ts >= 1286 and ts <= 1287) and not (user_id = 1 and user_id is not null and ts >= 1288 and ts <= 1289) and not (user_id = 1 and user_id is not null and ts >= 1290 and ts <= 1291) and not (user_id = 1 and user_id is not null and ts >= 1292 and ts <= 1293) and not (user_id = 1 and user_id is not null and ts >= 1294 and ts <= 1295) and not (user_id = 1 and user_id is not null and ts >= 1296 and ts <= 1297) and not (user_id = 1 and user_id is not null and ts >= 1298 and ts <= 1299) and not (user_id = 1 and user_id is not null and ts >= 1300 and ts <= 1301) and not (user_id = 1 and user_id is not null and ts >= 1302 and ts <= 1303) and not (user_id = 1 and user_id is not null and ts >= 1304 and ts <= 1305) and not (user_id = 1 and user_id is not null and ts >= 1306 and ts <= 1307) and not (user_id = 1 and user_id is not null and ts >= 1308 and ts <= 1309) and not (user_id = 1 and user_id is not null and ts >= 1310 and ts <= 1311) and not (user_id = 1 and user_id is not null and ts >= 1312 and ts <= 1313) and not (user_id = 1 and user_id is not null and ts >= 1314 and ts <= 1315) and not (user_id = 1 and user_id is not null and ts >= 1316 and ts <= 1317) and not (user_id = 1 and user_id is not null and ts >= 1318 and ts <= 1319) and not (user_id = 1 and user_id is not null and ts >= 1320 and ts <= 1321) and not (user_id = 1 and user_id is not null and ts >= 1322 and ts <= 1323) and not (user_id = 1 and user_id is not null and ts >= 1324 and ts <= 1325) and not (user_id = 1 and user_id is not null and ts >= 1326 and ts <= 1327) and not (user_id = 1 and user_id is not null and ts >= 1328 and ts <= 1329) and not (user_id = 1 and user_id is not null and ts >= 1330 and ts <= 1331) and not (user_id = 1 and user_id is not null and ts >= 1332 and ts <= 1333) and not (user_id = 1 and user_id is not null and ts >= 1334 and ts <= 1335) and not (user_id = 1 and user_id is not null and ts >= 1336 and ts <= 1337) and not (user_id = 1 and user_id is not null and ts >= 1338 and ts <= 1339) and not (user_id = 1 and user_id is not null and ts >= 1340 and ts <= 1341) and not (user_id = 1 and user_id is not null and ts >= 1342 and ts <= 1343) and not (user_id = 1 and user_id is not null and ts >= 1344 and ts <= 1345) and not (user_id = 1 and user_id is not null and ts >= 1346 and ts <= 1347) and not (user_id = 1 and user_id is not null and ts >= 1348 and ts <= 1349) and not (user_id = 1 and user_id is not null and ts >= 1350 and ts <= 1351) and not (user_id = 1 and user_id is not null and ts >= 1352 and ts <= 1353) and not (user_id = 1 and user_id is not null and ts >= 1354 and ts <= 1355) and not (user_id = 1 and user_id is not null and ts >= 1356 and ts <= 1357) and not (user_id = 1 and user_id is not null and ts >= 1358 and ts <= 1359) and not (user_id = 1 and user_id is not null and ts >= 1360 and ts <= 1361) and not (user_id = 1 and user_id is not null and ts >= 1362 and ts <= 1363) and not (user_id = 1 and user_id is not null and ts >= 1364 and ts <= 1365) and not (user_id = 1 and user_id is not null and ts >= 1366 and ts <= 1367) and not (user_id = 1 and user_id is not null and ts >= 1368 and ts <= 1369) and not (user_id = 1 and user_id is not null and ts >= 1370 and ts <= 1371) and not (user_id = 1 and user_id is not null and ts >= 1372 and ts <= 1373) and not (user_id = 1 and user_id is not null and ts >= 1374 and ts <= 1375) and not (user_id = 1 and user_id is not null and ts >= 1376 and ts <= 1377) and not (user_id = 1 and user_id is not null and ts >= 1378 and ts <= 1379) and not (user_id = 1 and user_id is not null and ts >= 1380 and ts <= 1381) and not (user_id = 1 and user_id is not null and ts >= 1382 and ts <= 1383) and not (user_id = 1 and user_id is not null and ts >= 1384 and ts <= 1385) and not (user_id = 1 and user_id is not null and ts >= 1386 and ts <= 1387) and not (user_id = 1 and user_id is not null and ts >= 1388 and ts <= 1389) and not (user_id = 1 and user_id is not null and ts >= 1390 and ts <= 1391) and not (user_id = 1 and user_id is not null and ts >= 1392 and ts <= 1393) and not (user_id = 1 and user_id is not null and ts >= 1394 and ts <= 1395) and not (user_id = 1 and user_id is not null and ts >= 1396 and ts <= 1397) and not (user_id = 1 and user_id is not null and ts >= 1398 and ts <= 1399) and not (user_id = 1 and user_id is not null and ts >= 1400 and ts <= 1401) and not (user_id = 1 and user_id is not null and ts >= 1402 and ts <= 1403) and not (user_id = 1 and user_id is not null and ts >= 1404 and ts <= 1405) and not (user_id = 1 and user_id is not null and ts >= 1406 and ts <= 1407) and not (user_id = 1 and user_id is not null and ts >= 1408 and ts <= 1409) and not (user_id = 1 and user_id is not null and ts >= 1410 and ts <= 1411) and not (user_id = 1 and user_id is not null and ts >= 1412 and ts <= 1413) and not (user_id = 1 and user_id is not null and ts >= 1414 and ts <= 1415) and not (user_id = 1 and user_id is not null and ts >= 1416 and ts <= 1417) and not (user_id = 1 and user_id is not null and ts >= 1418 and ts <= 1419) and not (user_id = 1 and user_id is not null and ts >= 1420 and ts <= 1421) and not (user_id = 1 and user_id is not null and ts >= 1422 and ts <= 1423) and not (user_id = 1 and user_id is not null and ts >= 1424 and ts <= 1425) and not (user_id = 1 and user_id is not null and ts >= 1426 and ts <= 1427) and not (user_id = 1 and user_id is not null and ts >= 1428 and ts <= 1429) and not (user_id = 1 and user_id is not null and ts >= 1430 and ts <= 1431) and not (user_id = 1 and user_id is not null and ts >= 1432 and ts <= 1433) and not (user_id = 1 and user_id is not null and ts >= 1434 and ts <= 1435) and not (user_id = 1 and user_id is not null and ts >= 1436 and ts <= 1437) and not (user_id = 1 and user_id is not null and ts >= 1438 and ts <= 1439) and not (user_id = 1 and user_id is not null and ts >= 1440 and ts <= 1441) and not (user_id = 1 and user_id is not null and ts >= 1442 and ts <= 1443) and not (user_id = 1 and user_id is not null and ts >= 1444 and ts <= 1445) and not (user_id = 1 and user_id is not null and ts >= 1446 and ts <= 1447) and not (user_id = 1 and user_id is not null and ts >= 1448 and ts <= 1449) and not (user_id = 1 and user_id is not null and ts >= 1450 and ts <= 1451) and not (user_id = 1 and user_id is not null and ts >= 1452 and ts <= 1453) and not (user_id = 1 and user_id is not null and ts >= 1454 and ts <= 1455) and not (user_id = 1 and user_id is not null and ts >= 1456 and ts <= 1457) and not (user_id = 1 and user_id is not null and ts >= 1458 and ts <= 1459) and not (user_id = 1 and user_id is not null and ts >= 1460 and ts <= 1461) and not (user_id = 1 and user_id is not null and ts >= 1462 and ts <= 1463) and not (user_id = 1 and user_id is not null and ts >= 1464 and ts <= 1465) and not (user_id = 1 and user_id is not null and ts >= 1466 and ts <= 1467) and not (user_id = 1 and user_id is not null and ts >= 1468 and ts <= 1469) and not (user_id = 1 and user_id is not null and ts >= 1470 and ts <= 1471) and not (user_id = 1 and user_id is not null and ts >= 1472 and ts <= 1473) and not (user_id = 1 and user_id is not null and ts >= 1474 and ts <= 1475) and not (user_id = 1 and user_id is not null and ts >= 1476 and ts <= 1477) and not (user_id = 1 and user_id is not null and ts >= 1478 and ts <= 1479) and not (user_id = 1 and user_id is not null and ts >= 1480 and ts <= 1481) and not (user_id = 1 and user_id is not null and ts >= 1482 and ts <= 1483) and not (user_id = 1 and user_id is not null and ts >= 1484 and ts <= 1485) and not (user_id = 1 and user_id is not null and ts >= 1486 and ts <= 1487) and not (user_id = 1 and user_id is not null and ts >= 1488 and ts <= 1489) and not (user_id = 1 and user_id is not null and ts >= 1490 and ts <= 1491) and not (user_id = 1 and user_id is not null and ts >= 1492 and ts <= 1493) and not (user_id = 1 and user_id is not null and ts >= 1494 and ts <= 1495) and not (user_id = 1 and user_id is not null and ts >= 1496 and ts <= 1497) and not (user_id = 1 and user_id is not null and ts >= 1498 and ts <= 1499) and not (user_id = 1 and user_id is not null and ts >= 1500 and ts <= 1501) and not (user_id = 1 and user_id is not null and ts >= 1502 and ts <= 1503) and not (user_id = 1 and user_id is not null and ts >= 1504 and ts <= 1505) and not (user_id = 1 and user_id is not null and ts >= 1506 and ts <= 1507) and not (user_id = 1 and user_id is not null and ts >= 1508 and ts <= 1509) and not (user_id = 1 and user_id is not null and ts >= 1510 and ts <= 1511) and not (user_id = 1 and user_id is not null and ts >= 1512 and ts <= 1513) and not (user_id = 1 and user_id is not null and ts >= 1514 and ts <= 1515) and not (user_id = 1 and user_id is not null and ts >= 1516 and ts <= 1517) and not (user_id = 1 and user_id is not null and ts >= 1518 and ts <= 1519) and not (user_id = 1 and user_id is not null and ts >= 1520 and ts <= 1521) and not (user_id = 1 and user_id is not null and ts >= 1522 and ts <= 1523) and not (user_id = 1 and user_id is not null and ts >= 1524 and ts <= 1525) and not (user_id = 1 and user_id is not null and ts >= 1526 and ts <= 1527) and not (user_id = 1 and user_id is not null and ts >= 1528 and ts <= 1529) and not (user_id = 1 and user_id is not null and ts >= 1530 and ts <= 1531) and not (user_id = 1 and user_id is not null and ts >= 1532 and ts <= 1533) and not (user_id = 1 and user_id is not null and ts >= 1534 and ts <= 1535) and not (user_id = 1 and user_id is not null and ts >= 1536 and ts <= 1537) and not (user_id = 1 and user_id is not null and ts >= 1538 and ts <= 1539) and not (user_id = 1 and user_id is not null and ts >= 1540 and ts <= 1541) and not (user_id = 1 and user_id is not null and ts >= 1542 and ts <= 1543) and not (user_id = 1 and user_id is not null and ts >= 1544 and ts <= 1545) and not (user_id = 1 and user_id is not null and ts >= 1546 and ts <= 1547) and not (user_id = 1 and user_id is not null and ts >= 1548 and ts <= 1549) and not (user_id = 1 and user_id is not null and ts >= 1550 and ts <= 1551) and not (user_id = 1 and user_id is not null and ts >= 1552 and ts <= 1553) and not (user_id = 1 and user_id is not null and ts >= 1554 and ts <= 1555) and not (user_id = 1 and user_id is not null and ts >= 1556 and ts <= 1557) and not (user_id = 1 and user_id is not null and ts >= 1558 and ts <= 1559) and not (user_id = 1 and user_id is not null and ts >= 1560 and ts <= 1561) and not (user_id = 1 and user_id is not null and ts >= 1562 and ts <= 1563) and not (user_id = 1 and user_id is not null and ts >= 1564 and ts <= 1565) and not (user_id = 1 and user_id is not null and ts >= 1566 and ts <= 1567) and not (user_id = 1 and user_id is not null and ts >= 1568 and ts <= 1569) and not (user_id = 1 and user_id is not null and ts >= 1570 and ts <= 1571) and not (user_id = 1 and user_id is not null and ts >= 1572 and ts <= 1573) and not (user_id = 1 and user_id is not null and ts >= 1574 and ts <= 1575) and not (user_id = 1 and user_id is not null and ts >= 1576 and ts <= 1577) and not (user_id = 1 and user_id is not null and ts >= 1578 and ts <= 1579) and not (user_id = 1 and user_id is not null and ts >= 1580 and ts <= 1581) and not (user_id = 1 and user_id is not null and ts >= 1582 and ts <= 1583) and not (user_id = 1 and user_id is not null and ts >= 1584 and ts <= 1585) and not (user_id = 1 and user_id is not null and ts >= 1586 and ts <= 1587) and not (user_id = 1 and user_id is not null and ts >= 1588 and ts <= 1589) and not (user_id = 1 and user_id is not null and ts >= 1590 and ts <= 1591) and not (user_id = 1 and user_id is not null and ts >= 1592 and ts <= 1593) and not (user_id = 1 and user_id is not null and ts >= 1594 and ts <= 1595) and not (user_id = 1 and user_id is not null and ts >= 1596 and ts <= 1597) and not (user_id = 1 and user_id is not null and ts >= 1598 and ts <= 1599) and not (user_id = 1 and user_id is not null and ts >= 1600 and ts <= 1601) and not (user_id = 1 and user_id is not null and ts >= 1602 and ts <= 1603) and not (user_id = 1 and user_id is not null and ts >= 1604 and ts <= 1605) and not (user_id = 1 and user_id is not null and ts >= 1606 and ts <= 1607) and not (user_id = 1 and user_id is not null and ts >= 1608 and ts <= 1609) and not (user_id = 1 and user_id is not null and ts >= 1610 and ts <= 1611) and not (user_id = 1 and user_id is not null and ts >= 1612 and ts <= 1613) and not (user_id = 1 and user_id is not null and ts >= 1614 and ts <= 1615) and not (user_id = 1 and user_id is not null and ts >= 1616 and ts <= 1617) and not (user_id = 1 and user_id is not null and ts >= 1618 and ts <= 1619) and not (user_id = 1 and user_id is not null and ts >= 1620 and ts <= 1621) and not (user_id = 1 and user_id is not null and ts >= 1622 and ts <= 1623) and not (user_id = 1 and user_id is not null and ts >= 1624 and ts <= 1625) and not (user_id = 1 and user_id is not null and ts >= 1626 and ts <= 1627) and not (user_id = 1 and user_id is not null and ts >= 1628 and ts <= 1629) and not (user_id = 1 and user_id is not null and ts >= 1630 and ts <= 1631) and not (user_id = 1 and user_id is not null and ts >= 1632 and ts <= 1633) and not (user_id = 1 and user_id is not null and ts >= 1634 and ts <= 1635) and not (user_id = 1 and user_id is not null and ts >= 1636 and ts <= 1637) and not (user_id = 1 and user_id is not null and ts >= 1638 and ts <= 1639) and not (user_id = 1 and user_id is not null and ts >= 1640 and ts <= 1641) and not (user_id = 1 and user_id is not null and ts >= 1642 and ts <= 1643) and not (user_id = 1 and user_id is not null and ts >= 1644 and ts <= 1645) and not (user_id = 1 and user_id is not null and ts >= 1646 and ts <= 1647) and not (user_id = 1 and user_id is not null and ts >= 1648 and ts <= 1649) and not (user_id = 1 and user_id is not null and ts >= 1650 and ts <= 1651) and not (user_id = 1 and user_id is not null and ts >= 1652 and ts <= 1653) and not (user_id = 1 and user_id is not null and ts >= 1654 and ts <= 1655) and not (user_id = 1 and user_id is not null and ts >= 1656 and ts <= 1657) and not (user_id = 1 and user_id is not null and ts >= 1658 and ts <= 1659) and not (user_id = 1 and user_id is not null and ts >= 1660 and ts <= 1661) and not (user_id = 1 and user_id is not null and ts >= 1662 and ts <= 1663) and not (user_id = 1 and user_id is not null and ts >= 1664 and ts <= 1665) and not (user_id = 1 and user_id is not null and ts >= 1666 and ts <= 1667) and not (user_id = 1 and user_id is not null and ts >= 1668 and ts <= 1669) and not (user_id = 1 and user_id is not null and ts >= 1670 and ts <= 1671) and not (user_id = 1 and user_id is not null and ts >= 1672 and ts <= 1673) and not (user_id = 1 and user_id is not null and ts >= 1674 and ts <= 1675) and not (user_id = 1 and user_id is not null and ts >= 1676 and ts <= 1677) and not (user_id = 1 and user_id is not null and ts >= 1678 and ts <= 1679) and not (user_id = 1 and user_id is not null and ts >= 1680 and ts <= 1681) and not (user_id = 1 and user_id is not null and ts >= 1682 and ts <= 1683) and not (user_id = 1 and user_id is not null and ts >= 1684 and ts <= 1685) and not (user_id = 1 and user_id is not null and ts >= 1686 and ts <= 1687) and not (user_id = 1 and user_id is not null and ts >= 1688 and ts <= 1689) and not (user_id = 1 and user_id is not null and ts >= 1690 and ts <= 1691) and not (user_id = 1 and user_id is not null and ts >= 1692 and ts <= 1693) and not (user_id = 1 and user_id is not null and ts >= 1694 and ts <= 1695) and not (user_id = 1 and user_id is not null and ts >= 1696 and ts <= 1697) and not (user_id = 1 and user_id is not null and ts >= 1698 and ts <= 1699) and not (user_id = 1 and user_id is not null and ts >= 1700 and ts <= 1701) and not (user_id = 1 and user_id is not null and ts >= 1702 and ts <= 1703) and not (user_id = 1 and user_id is not null and ts >= 1704 and ts <= 1705) and not (user_id = 1 and user_id is not null and ts >= 1706 and ts <= 1707) and not (user_id = 1 and user_id is not null and ts >= 1708 and ts <= 1709) and not (user_id = 1 and user_id is not null and ts >= 1710 and ts <= 1711) and not (user_id = 1 and user_id is not null and ts >= 1712 and ts <= 1713) and not (user_id = 1 and user_id is not null and ts >= 1714 and ts <= 1715) and not (user_id = 1 and user_id is not null and ts >= 1716 and ts <= 1717) and not (user_id = 1 and user_id is not null and ts >= 1718 and ts <= 1719) and not (user_id = 1 and user_id is not null and ts >= 1720 and ts <= 1721) and not (user_id = 1 and user_id is not null and ts >= 1722 and ts <= 1723) and not (user_id = 1 and user_id is not null and ts >= 1724 and ts <= 1725) and not (user_id = 1 and user_id is not null and ts >= 1726 and ts <= 1727) and not (user_id = 1 and user_id is not null and ts >= 1728 and ts <= 1729) and not (user_id = 1 and user_id is not null and ts >= 1730 and ts <= 1731) and not (user_id = 1 and user_id is not null and ts >= 1732 and ts <= 1733) and not (user_id = 1 and user_id is not null and ts >= 1734 and ts <= 1735) and not (user_id = 1 and user_id is not null and ts >= 1736 and ts <= 1737) and not (user_id = 1 and user_id is not null and ts >= 1738 and ts <= 1739) and not (user_id = 1 and user_id is not null and ts >= 1740 and ts <= 1741) and not (user_id = 1 and user_id is not null and ts >= 1742 and ts <= 1743) and not (user_id = 1 and user_id is not null and ts >= 1744 and ts <= 1745) and not (user_id = 1 and user_id is not null and ts >= 1746 and ts <= 1747) and not (user_id = 1 and user_id is not null and ts >= 1748 and ts <= 1749) and not (user_id = 1 and user_id is not null and ts >= 1750 and ts <= 1751) and not (user_id = 1 and user_id is not null and ts >= 1752 and ts <= 1753) and not (user_id = 1 and user_id is not null and ts >= 1754 and ts <= 1755) and not (user_id = 1 and user_id is not null and ts >= 1756 and ts <= 1757) and not (user_id = 1 and user_id is not null and ts >= 1758 and ts <= 1759) and not (user_id = 1 and user_id is not null and ts >= 1760 and ts <= 1761) and not (user_id = 1 and user_id is not null and ts >= 1762 and ts <= 1763) and not (user_id = 1 and user_id is not null and ts >= 1764 and ts <= 1765) and not (user_id = 1 and user_id is not null and ts >= 1766 and ts <= 1767) and not (user_id = 1 and user_id is not null and ts >= 1768 and ts <= 1769) and not (user_id = 1 and user_id is not null and ts >= 1770 and ts <= 1771) and not (user_id = 1 and user_id is not null and ts >= 1772 and ts <= 1773) and not (user_id = 1 and user_id is not null and ts >= 1774 and ts <= 1775) and not (user_id = 1 and user_id is not null and ts >= 1776 and ts <= 1777) and not (user_id = 1 and user_id is not null and ts >= 1778 and ts <= 1779) and not (user_id = 1 and user_id is not null and ts >= 1780 and ts <= 1781) and not (user_id = 1 and user_id is not null and ts >= 1782 and ts <= 1783) and not (user_id = 1 and user_id is not null and ts >= 1784 and ts <= 1785) and not (user_id = 1 and user_id is not null and ts >= 1786 and ts <= 1787) and not (user_id = 1 and user_id is not null and ts >= 1788 and ts <= 1789) and not (user_id = 1 and user_id is not null and ts >= 1790 and ts <= 1791) and not (user_id = 1 and user_id is not null and ts >= 1792 and ts <= 1793) and not (user_id = 1 and user_id is not null and ts >= 1794 and ts <= 1795) and not (user_id = 1 and user_id is not null and ts >= 1796 and ts <= 1797) and not (user_id = 1 and user_id is not null and ts >= 1798 and ts <= 1799) and not (user_id = 1 and user_id is not null and ts >= 1800 and ts <= 1801) and not (user_id = 1 and user_id is not null and ts >= 1802 and ts <= 1803) and not (user_id = 1 and user_id is not null and ts >= 1804 and ts <= 1805) and not (user_id = 1 and user_id is not null and ts >= 1806 and ts <= 1807) and not (user_id = 1 and user_id is not null and ts >= 1808 and ts <= 1809) and not (user_id = 1 and user_id is not null and ts >= 1810 and ts <= 1811) and not (user_id = 1 and user_id is not null and ts >= 1812 and ts <= 1813) and not (user_id = 1 and user_id is not null and ts >= 1814 and ts <= 1815) and not (user_id = 1 and user_id is not null and ts >= 1816 and ts <= 1817) and not (user_id = 1 and user_id is not null and ts >= 1818 and ts <= 1819) and not (user_id = 1 and user_id is not null and ts >= 1820 and ts <= 1821) and not (user_id = 1 and user_id is not null and ts >= 1822 and ts <= 1823) and not (user_id = 1 and user_id is not null and ts >= 1824 and ts <= 1825) and not (user_id = 1 and user_id is not null and ts >= 1826 and ts <= 1827) and not (user_id = 1 and user_id is not null and ts >= 1828 and ts <= 1829) and not (user_id = 1 and user_id is not null and ts >= 1830 and ts <= 1831) and not (user_id = 1 and user_id is not null and ts >= 1832 and ts <= 1833) and not (user_id = 1 and user_id is not null and ts >= 1834 and ts <= 1835) and not (user_id = 1 and user_id is not null and ts >= 1836 and ts <= 1837) and not (user_id = 1 and user_id is not null and ts >= 1838 and ts <= 1839) and not (user_id = 1 and user_id is not null and ts >= 1840 and ts <= 1841) and not (user_id = 1 and user_id is not null and ts >= 1842 and ts <= 1843) and not (user_id = 1 and user_id is not null and ts >= 1844 and ts <= 1845) and not (user_id = 1 and user_id is not null and ts >= 1846 and ts <= 1847) and not (user_id = 1 and user_id is not null and ts >= 1848 and ts <= 1849) and not (user_id = 1 and user_id is not null and ts >= 1850 and ts <= 1851) and not (user_id = 1 and user_id is not null and ts >= 1852 and ts <= 1853) and not (user_id = 1 and user_id is not null and ts >= 1854 and ts <= 1855) and not (user_id = 1 and user_id is not null and ts >= 1856 and ts <= 1857) and not (user_id = 1 and user_id is not null and ts >= 1858 and ts <= 1859) and not (user_id = 1 and user_id is not null and ts >= 1860 and ts <= 1861) and not (user_id = 1 and user_id is not null and ts >= 1862 and ts <= 1863) and not (user_id = 1 and user_id is not null and ts >= 1864 and ts <= 1865) and not (user_id = 1 and user_id is not null and ts >= 1866 and ts <= 1867) and not (user_id = 1 and user_id is not null and ts >= 1868 and ts <= 1869) and not (user_id = 1 and user_id is not null and ts >= 1870 and ts <= 1871) and not (user_id = 1 and user_id is not null and ts >= 1872 and ts <= 1873) and not (user_id = 1 and user_id is not null and ts >= 1874 and ts <= 1875) and not (user_id = 1 and user_id is not null and ts >= 1876 and ts <= 1877) and not (user_id = 1 and user_id is not null and ts >= 1878 and ts <= 1879) and not (user_id = 1 and user_id is not null and ts >= 1880 and ts <= 1881) and not (user_id = 1 and user_id is not null and ts >= 1882 and ts <= 1883) and not (user_id = 1 and user_id is not null and ts >= 1884 and ts <= 1885) and not (user_id = 1 and user_id is not null and ts >= 1886 and ts <= 1887) and not (user_id = 1 and user_id is not null and ts >= 1888 and ts <= 1889) and not (user_id = 1 and user_id is not null and ts >= 1890 and ts <= 1891) and not (user_id = 1 and user_id is not null and ts >= 1892 and ts <= 1893) and not (user_id = 1 and user_id is not null and ts >= 1894 and ts <= 1895) and not (user_id = 1 and user_id is not null and ts >= 1896 and ts <= 1897) and not (user_id = 1 and user_id is not null and ts >= 1898 and ts <= 1899) and not (user_id = 1 and user_id is not null and ts >= 1900 and ts <= 1901) and not (user_id = 1 and user_id is not null and ts >= 1902 and ts <= 1903) and not (user_id = 1 and user_id is not null and ts >= 1904 and ts <= 1905) and not (user_id = 1 and user_id is not null and ts >= 1906 and ts <= 1907) and not (user_id = 1 and user_id is not null and ts >= 1908 and ts <= 1909) and not (user_id = 1 and user_id is not null and ts >= 1910 and ts <= 1911) and not (user_id = 1 and user_id is not null and ts >= 1912 and ts <= 1913) and not (user_id = 1 and user_id is not null and ts >= 1914 and ts <= 1915) and not (user_id = 1 and user_id is not null and ts >= 1916 and ts <= 1917) and not (user_id = 1 and user_id is not null and ts >= 1918 and ts <= 1919) and not (user_id = 1 and user_id is not null and ts >= 1920 and ts <= 1921) and not (user_id = 1 and user_id is not null and ts >= 1922 and ts <= 1923) and not (user_id = 1 and user_id is not null and ts >= 1924 and ts <= 1925) and not (user_id = 1 and user_id is not null and ts >= 1926 and ts <= 1927) and not (user_id = 1 and user_id is not null and ts >= 1928 and ts <= 1929) and not (user_id = 1 and user_id is not null and ts >= 1930 and ts <= 1931) and not (user_id = 1 and user_id is not null and ts >= 1932 and ts <= 1933) and not (user_id = 1 and user_id is not null and ts >= 1934 and ts <= 1935) and not (user_id = 1 and user_id is not null and ts >= 1936 and ts <= 1937) and not (user_id = 1 and user_id is not null and ts >= 1938 and ts <= 1939) and not (user_id = 1 and user_id is not null and ts >= 1940 and ts <= 1941) and not (user_id = 1 and user_id is not null and ts >= 1942 and ts <= 1943) and not (user_id = 1 and user_id is not null and ts >= 1944 and ts <= 1945) and not (user_id = 1 and user_id is not null and ts >= 1946 and ts <= 1947) and not (user_id = 1 and user_id is not null and ts >= 1948 and ts <= 1949) and not (user_id = 1 and user_id is not null and ts >= 1950 and ts <= 1951) and not (user_id = 1 and user_id is not null and ts >= 1952 and ts <= 1953) and not (user_id = 1 and user_id is not null and ts >= 1954 and ts <= 1955) and not (user_id = 1 and user_id is not null and ts >= 1956 and ts <= 1957) and not (user_id = 1 and user_id is not null and ts >= 1958 and ts <= 1959) and not (user_id = 1 and user_id is not null and ts >= 1960 and ts <= 1961) and not (user_id = 1 and user_id is not null and ts >= 1962 and ts <= 1963) and not (user_id = 1 and user_id is not null and ts >= 1964 and ts <= 1965) and not (user_id = 1 and user_id is not null and ts >= 1966 and ts <= 1967) and not (user_id = 1 and user_id is not null and ts >= 1968 and ts <= 1969) and not (user_id = 1 and user_id is not null and ts >= 1970 and ts <= 1971) and not (user_id = 1 and user_id is not null and ts >= 1972 and ts <= 1973) and not (user_id = 1 and user_id is not null and ts >= 1974 and ts <= 1975) and not (user_id = 1 and user_id is not null and ts >= 1976 and ts <= 1977) and not (user_id = 1 and user_id is not null and ts >= 1978 and ts <= 1979) and not (user_id = 1 and user_id is not null and ts >= 1980 and ts <= 1981) and not (user_id = 1 and user_id is not null and ts >= 1982 and ts <= 1983) and not (user_id = 1 and user_id is not null and ts >= 1984 and ts <= 1985) and not (user_id = 1 and user_id is not null and ts >= 1986 and ts <= 1987) and not (user_id = 1 and user_id is not null and ts >= 1988 and ts <= 1989) and not (user_id = 1 and user_id is not null and ts >= 1990 and ts <= 1991) and not (user_id = 1 and user_id is not null and ts >= 1992 and ts <= 1993) and not (user_id = 1 and user_id is not null and ts >= 1994 and ts <= 1995) and not (user_id = 1 and user_id is not null and ts >= 1996 and ts <= 1997) and not (user_id = 1 and user_id is not null and ts >= 1998 and ts <= 1999) and not (user_id = 1 and user_id is not null and ts >= 11000 and ts <= 11001) and not (user_id = 1 and user_id is not null and ts >= 11002 and ts <= 11003) and not (user_id = 1 and user_id is not null and ts >= 11004 and ts <= 11005) and not (user_id = 1 and user_id is not null and ts >= 11006 and ts <= 11007) and not (user_id = 1 and user_id is not null and ts >= 11008 and ts <= 11009) and not (user_id = 1 and user_id is not null and ts >= 11010 and ts <= 11011) and not (user_id = 1 and user_id is not null and ts >= 11012 and ts <= 11013) and not (user_id = 1 and user_id is not null and ts >= 11014 and ts <= 11015) and not (user_id = 1 and user_id is not null and ts >= 11016 and ts <= 11017) and not (user_id = 1 and user_id is not null and ts >= 11018 and ts <= 11019) and not (user_id = 1 and user_id is not null and ts >= 11020 and ts <= 11021) and not (user_id = 1 and user_id is not null and ts >= 11022 and ts <= 11023) and not (user_id = 1 and user_id is not null and ts >= 11024 and ts <= 11025) and not (user_id = 1 and user_id is not null and ts >= 11026 and ts <= 11027) and not (user_id = 1 and user_id is not null and ts >= 11028 and ts <= 11029) and not (user_id = 1 and user_id is not null and ts >= 11030 and ts <= 11031) and not (user_id = 1 and user_id is not null and ts >= 11032 and ts <= 11033) and not (user_id = 1 and user_id is not null and ts >= 11034 and ts <= 11035) and not (user_id = 1 and user_id is not null and ts >= 11036 and ts <= 11037) and not (user_id = 1 and user_id is not null and ts >= 11038 and ts <= 11039) and not (user_id = 1 and user_id is not null and ts >= 11040 and ts <= 11041) and not (user_id = 1 and user_id is not null and ts >= 11042 and ts <= 11043) and not (user_id = 1 and user_id is not null and ts >= 11044 and ts <= 11045) and not (user_id = 1 and user_id is not null and ts >= 11046 and ts <= 11047) and not (user_id = 1 and user_id is not null and ts >= 11048 and ts <= 11049) and not (user_id = 1 and user_id is not null and ts >= 11050 and ts <= 11051) and not (user_id = 1 and user_id is not null and ts >= 11052 and ts <= 11053) and not (user_id = 1 and user_id is not null and ts >= 11054 and ts <= 11055) and not (user_id = 1 and user_id is not null and ts >= 11056 and ts <= 11057) and not (user_id = 1 and user_id is not null and ts >= 11058 and ts <= 11059) and not (user_id = 1 and user_id is not null and ts >= 11060 and ts <= 11061) and not (user_id = 1 and user_id is not null and ts >= 11062 and ts <= 11063) and not (user_id = 1 and user_id is not null and ts >= 11064 and ts <= 11065) and not (user_id = 1 and user_id is not null and ts >= 11066 and ts <= 11067) and not (user_id = 1 and user_id is not null and ts >= 11068 and ts <= 11069) and not (user_id = 1 and user_id is not null and ts >= 11070 and ts <= 11071) and not (user_id = 1 and user_id is not null and ts >= 11072 and ts <= 11073) and not (user_id = 1 and user_id is not null and ts >= 11074 and ts <= 11075) and not (user_id = 1 and user_id is not null and ts >= 11076 and ts <= 11077) and not (user_id = 1 and user_id is not null and ts >= 11078 and ts <= 11079) and not (user_id = 1 and user_id is not null and ts >= 11080 and ts <= 11081) and not (user_id = 1 and user_id is not null and ts >= 11082 and ts <= 11083) and not (user_id = 1 and user_id is not null and ts >= 11084 and ts <= 11085) and not (user_id = 1 and user_id is not null and ts >= 11086 and ts <= 11087) and not (user_id = 1 and user_id is not null and ts >= 11088 and ts <= 11089) and not (user_id = 1 and user_id is not null and ts >= 11090 and ts <= 11091) and not (user_id = 1 and user_id is not null and ts >= 11092 and ts <= 11093) and not (user_id = 1 and user_id is not null and ts >= 11094 and ts <= 11095) and not (user_id = 1 and user_id is not null and ts >= 11096 and ts <= 11097) and not (user_id = 1 and user_id is not null and ts >= 11098 and ts <= 11099) and not (user_id = 1 and user_id is not null and ts >= 11100 and ts <= 11101) and not (user_id = 1 and user_id is not null and ts >= 11102 and ts <= 11103) and not (user_id = 1 and user_id is not null and ts >= 11104 and ts <= 11105) and not (user_id = 1 and user_id is not null and ts >= 11106 and ts <= 11107) and not (user_id = 1 and user_id is not null and ts >= 11108 and ts <= 11109) and not (user_id = 1 and user_id is not null and ts >= 11110 and ts <= 11111) and not (user_id = 1 and user_id is not null and ts >= 11112 and ts <= 11113) and not (user_id = 1 and user_id is not null and ts >= 11114 and ts <= 11115) and not (user_id = 1 and user_id is not null and ts >= 11116 and ts <= 11117) and not (user_id = 1 and user_id is not null and ts >= 11118 and ts <= 11119) and not (user_id = 1 and user_id is not null and ts >= 11120 and ts <= 11121) and not (user_id = 1 and user_id is not null and ts >= 11122 and ts <= 11123) and not (user_id = 1 and user_id is not null and ts >= 11124 and ts <= 11125) and not (user_id = 1 and user_id is not null and ts >= 11126 and ts <= 11127) and not (user_id = 1 and user_id is not null and ts >= 11128 and ts <= 11129) and not (user_id = 1 and user_id is not null and ts >= 11130 and ts <= 11131) and not (user_id = 1 and user_id is not null and ts >= 11132 and ts <= 11133) and not (user_id = 1 and user_id is not null and ts >= 11134 and ts <= 11135) and not (user_id = 1 and user_id is not null and ts >= 11136 and ts <= 11137) and not (user_id = 1 and user_id is not null and ts >= 11138 and ts <= 11139) and not (user_id = 1 and user_id is not null and ts >= 11140 and ts <= 11141) and not (user_id = 1 and user_id is not null and ts >= 11142 and ts <= 11143) and not (user_id = 1 and user_id is not null and ts >= 11144 and ts <= 11145) and not (user_id = 1 and user_id is not null and ts >= 11146 and ts <= 11147) and not (user_id = 1 and user_id is not null and ts >= 11148 and ts <= 11149) and not (user_id = 1 and user_id is not null and ts >= 11150 and ts <= 11151) and not (user_id = 1 and user_id is not null and ts >= 11152 and ts <= 11153) and not (user_id = 1 and user_id is not null and ts >= 11154 and ts <= 11155) and not (user_id = 1 and user_id is not null and ts >= 11156 and ts <= 11157) and not (user_id = 1 and user_id is not null and ts >= 11158 and ts <= 11159) and not (user_id = 1 and user_id is not null and ts >= 11160 and ts <= 11161) and not (user_id = 1 and user_id is not null and ts >= 11162 and ts <= 11163) and not (user_id = 1 and user_id is not null and ts >= 11164 and ts <= 11165) and not (user_id = 1 and user_id is not null and ts >= 11166 and ts <= 11167) and not (user_id = 1 and user_id is not null and ts >= 11168 and ts <= 11169) and not (user_id = 1 and user_id is not null and ts >= 11170 and ts <= 11171) and not (user_id = 1 and user_id is not null and ts >= 11172 and ts <= 11173) and not (user_id = 1 and user_id is not null and ts >= 11174 and ts <= 11175) and not (user_id = 1 and user_id is not null and ts >= 11176 and ts <= 11177) and not (user_id = 1 and user_id is not null and ts >= 11178 and ts <= 11179) and not (user_id = 1 and user_id is not null and ts >= 11180 and ts <= 11181) and not (user_id = 1 and user_id is not null and ts >= 11182 and ts <= 11183) and not (user_id = 1 and user_id is not null and ts >= 11184 and ts <= 11185) and not (user_id = 1 and user_id is not null and ts >= 11186 and ts <= 11187) and not (user_id = 1 and user_id is not null and ts >= 11188 and ts <= 11189) and not (user_id = 1 and user_id is not null and ts >= 11190 and ts <= 11191) and not (user_id = 1 and user_id is not null and ts >= 11192 and ts <= 11193) and not (user_id = 1 and user_id is not null and ts >= 11194 and ts <= 11195) and not (user_id = 1 and user_id is not null and ts >= 11196 and ts <= 11197) and not (user_id = 1 and user_id is not null and ts >= 11198 and ts <= 11199) and not (user_id = 1 and user_id is not null and ts >= 11200 and ts <= 11201) and not (user_id = 1 and user_id is not null and ts >= 11202 and ts <= 11203) and not (user_id = 1 and user_id is not null and ts >= 11204 and ts <= 11205) and not (user_id = 1 and user_id is not null and ts >= 11206 and ts <= 11207) and not (user_id = 1 and user_id is not null and ts >= 11208 and ts <= 11209) and not (user_id = 1 and user_id is not null and ts >= 11210 and ts <= 11211) and not (user_id = 1 and user_id is not null and ts >= 11212 and ts <= 11213) and not (user_id = 1 and user_id is not null and ts >= 11214 and ts <= 11215) and not (user_id = 1 and user_id is not null and ts >= 11216 and ts <= 11217) and not (user_id = 1 and user_id is not null and ts >= 11218 and ts <= 11219) and not (user_id = 1 and user_id is not null and ts >= 11220 and ts <= 11221) and not (user_id = 1 and user_id is not null and ts >= 11222 and ts <= 11223) and not (user_id = 1 and user_id is not null and ts >= 11224 and ts <= 11225) and not (user_id = 1 and user_id is not null and ts >= 11226 and ts <= 11227) and not (user_id = 1 and user_id is not null and ts >= 11228 and ts <= 11229) and not (user_id = 1 and user_id is not null and ts >= 11230 and ts <= 11231) and not (user_id = 1 and user_id is not null and ts >= 11232 and ts <= 11233) and not (user_id = 1 and user_id is not null and ts >= 11234 and ts <= 11235) and not (user_id = 1 and user_id is not null and ts >= 11236 and ts <= 11237) and not (user_id = 1 and user_id is not null and ts >= 11238 and ts <= 11239) and not (user_id = 1 and user_id is not null and ts >= 11240 and ts <= 11241) and not (user_id = 1 and user_id is not null and ts >= 11242 and ts <= 11243) and not (user_id = 1 and user_id is not null and ts >= 11244 and ts <= 11245) and not (user_id = 1 and user_id is not null and ts >= 11246 and ts <= 11247) and not (user_id = 1 and user_id is not null and ts >= 11248 and ts <= 11249) and not (user_id = 1 and user_id is not null and ts >= 11250 and ts <= 11251) and not (user_id = 1 and user_id is not null and ts >= 11252 and ts <= 11253) and not (user_id = 1 and user_id is not null and ts >= 11254 and ts <= 11255) and not (user_id = 1 and user_id is not null and ts >= 11256 and ts <= 11257) and not (user_id = 1 and user_id is not null and ts >= 11258 and ts <= 11259) and not (user_id = 1 and user_id is not null and ts >= 11260 and ts <= 11261) and not (user_id = 1 and user_id is not null and ts >= 11262 and ts <= 11263) and not (user_id = 1 and user_id is not null and ts >= 11264 and ts <= 11265) and not (user_id = 1 and user_id is not null and ts >= 11266 and ts <= 11267) and not (user_id = 1 and user_id is not null and ts >= 11268 and ts <= 11269) and not (user_id = 1 and user_id is not null and ts >= 11270 and ts <= 11271) and not (user_id = 1 and user_id is not null and ts >= 11272 and ts <= 11273) and not (user_id = 1 and user_id is not null and ts >= 11274 and ts <= 11275) and not (user_id = 1 and user_id is not null and ts >= 11276 and ts <= 11277) and not (user_id = 1 and user_id is not null and ts >= 11278 and ts <= 11279) and not (user_id = 1 and user_id is not null and ts >= 11280 and ts <= 11281) and not (user_id = 1 and user_id is not null and ts >= 11282 and ts <= 11283) and not (user_id = 1 and user_id is not null and ts >= 11284 and ts <= 11285) and not (user_id = 1 and user_id is not null and ts >= 11286 and ts <= 11287) and not (user_id = 1 and user_id is not null and ts >= 11288 and ts <= 11289) and not (user_id = 1 and user_id is not null and ts >= 11290 and ts <= 11291) and not (user_id = 1 and user_id is not null and ts >= 11292 and ts <= 11293) and not (user_id = 1 and user_id is not null and ts >= 11294 and ts <= 11295) and not (user_id = 1 and user_id is not null and ts >= 11296 and ts <= 11297) and not (user_id = 1 and user_id is not null and ts >= 11298 and ts <= 11299) and not (user_id = 1 and user_id is not null and ts >= 11300 and ts <= 11301) and not (user_id = 1 and user_id is not null and ts >= 11302 and ts <= 11303) and not (user_id = 1 and user_id is not null and ts >= 11304 and ts <= 11305) and not (user_id = 1 and user_id is not null and ts >= 11306 and ts <= 11307) and not (user_id = 1 and user_id is not null and ts >= 11308 and ts <= 11309) and not (user_id = 1 and user_id is not null and ts >= 11310 and ts <= 11311) and not (user_id = 1 and user_id is not null and ts >= 11312 and ts <= 11313) and not (user_id = 1 and user_id is not null and ts >= 11314 and ts <= 11315) and not (user_id = 1 and user_id is not null and ts >= 11316 and ts <= 11317) and not (user_id = 1 and user_id is not null and ts >= 11318 and ts <= 11319) and not (user_id = 1 and user_id is not null and ts >= 11320 and ts <= 11321) and not (user_id = 1 and user_id is not null and ts >= 11322 and ts <= 11323) and not (user_id = 1 and user_id is not null and ts >= 11324 and ts <= 11325) and not (user_id = 1 and user_id is not null and ts >= 11326 and ts <= 11327) and not (user_id = 1 and user_id is not null and ts >= 11328 and ts <= 11329) and not (user_id = 1 and user_id is not null and ts >= 11330 and ts <= 11331) and not (user_id = 1 and user_id is not null and ts >= 11332 and ts <= 11333) and not (user_id = 1 and user_id is not null and ts >= 11334 and ts <= 11335) and not (user_id = 1 and user_id is not null and ts >= 11336 and ts <= 11337) and not (user_id = 1 and user_id is not null and ts >= 11338 and ts <= 11339) and not (user_id = 1 and user_id is not null and ts >= 11340 and ts <= 11341) and not (user_id = 1 and user_id is not null and ts >= 11342 and ts <= 11343) and not (user_id = 1 and user_id is not null and ts >= 11344 and ts <= 11345) and not (user_id = 1 and user_id is not null and ts >= 11346 and ts <= 11347) and not (user_id = 1 and user_id is not null and ts >= 11348 and ts <= 11349) and not (user_id = 1 and user_id is not null and ts >= 11350 and ts <= 11351) and not (user_id = 1 and user_id is not null and ts >= 11352 and ts <= 11353) and not (user_id = 1 and user_id is not null and ts >= 11354 and ts <= 11355) and not (user_id = 1 and user_id is not null and ts >= 11356 and ts <= 11357) and not (user_id = 1 and user_id is not null and ts >= 11358 and ts <= 11359) and not (user_id = 1 and user_id is not null and ts >= 11360 and ts <= 11361) and not (user_id = 1 and user_id is not null and ts >= 11362 and ts <= 11363) and not (user_id = 1 and user_id is not null and ts >= 11364 and ts <= 11365) and not (user_id = 1 and user_id is not null and ts >= 11366 and ts <= 11367) and not (user_id = 1 and user_id is not null and ts >= 11368 and ts <= 11369) and not (user_id = 1 and user_id is not null and ts >= 11370 and ts <= 11371) and not (user_id = 1 and user_id is not null and ts >= 11372 and ts <= 11373) and not (user_id = 1 and user_id is not null and ts >= 11374 and ts <= 11375) and not (user_id = 1 and user_id is not null and ts >= 11376 and ts <= 11377) and not (user_id = 1 and user_id is not null and ts >= 11378 and ts <= 11379) and not (user_id = 1 and user_id is not null and ts >= 11380 and ts <= 11381) and not (user_id = 1 and user_id is not null and ts >= 11382 and ts <= 11383) and not (user_id = 1 and user_id is not null and ts >= 11384 and ts <= 11385) and not (user_id = 1 and user_id is not null and ts >= 11386 and ts <= 11387) and not (user_id = 1 and user_id is not null and ts >= 11388 and ts <= 11389) and not (user_id = 1 and user_id is not null and ts >= 11390 and ts <= 11391) and not (user_id = 1 and user_id is not null and ts >= 11392 and ts <= 11393) and not (user_id = 1 and user_id is not null and ts >= 11394 and ts <= 11395) and not (user_id = 1 and user_id is not null and ts >= 11396 and ts <= 11397) and not (user_id = 1 and user_id is not null and ts >= 11398 and ts <= 11399) and not (user_id = 1 and user_id is not null and ts >= 11400 and ts <= 11401) and not (user_id = 1 and user_id is not null and ts >= 11402 and ts <= 11403) and not (user_id = 1 and user_id is not null and ts >= 11404 and ts <= 11405) and not (user_id = 1 and user_id is not null and ts >= 11406 and ts <= 11407) and not (user_id = 1 and user_id is not null and ts >= 11408 and ts <= 11409) and not (user_id = 1 and user_id is not null and ts >= 11410 and ts <= 11411) and not (user_id = 1 and user_id is not null and ts >= 11412 and ts <= 11413) and not (user_id = 1 and user_id is not null and ts >= 11414 and ts <= 11415) and not (user_id = 1 and user_id is not null and ts >= 11416 and ts <= 11417) and not (user_id = 1 and user_id is not null and ts >= 11418 and ts <= 11419) and not (user_id = 1 and user_id is not null and ts >= 11420 and ts <= 11421) and not (user_id = 1 and user_id is not null and ts >= 11422 and ts <= 11423) and not (user_id = 1 and user_id is not null and ts >= 11424 and ts <= 11425) and not (user_id = 1 and user_id is not null and ts >= 11426 and ts <= 11427) and not (user_id = 1 and user_id is not null and ts >= 11428 and ts <= 11429) and not (user_id = 1 and user_id is not null and ts >= 11430 and ts <= 11431) and not (user_id = 1 and user_id is not null and ts >= 11432 and ts <= 11433) and not (user_id = 1 and user_id is not null and ts >= 11434 and ts <= 11435) and not (user_id = 1 and user_id is not null and ts >= 11436 and ts <= 11437) and not (user_id = 1 and user_id is not null and ts >= 11438 and ts <= 11439) and not (user_id = 1 and user_id is not null and ts >= 11440 and ts <= 11441) and not (user_id = 1 and user_id is not null and ts >= 11442 and ts <= 11443) and not (user_id = 1 and user_id is not null and ts >= 11444 and ts <= 11445) and not (user_id = 1 and user_id is not null and ts >= 11446 and ts <= 11447) and not (user_id = 1 and user_id is not null and ts >= 11448 and ts <= 11449) and not (user_id = 1 and user_id is not null and ts >= 11450 and ts <= 11451) and not (user_id = 1 and user_id is not null and ts >= 11452 and ts <= 11453) and not (user_id = 1 and user_id is not null and ts >= 11454 and ts <= 11455) and not (user_id = 1 and user_id is not null and ts >= 11456 and ts <= 11457) and not (user_id = 1 and user_id is not null and ts >= 11458 and ts <= 11459) and not (user_id = 1 and user_id is not null and ts >= 11460 and ts <= 11461) and not (user_id = 1 and user_id is not null and ts >= 11462 and ts <= 11463) and not (user_id = 1 and user_id is not null and ts >= 11464 and ts <= 11465) and not (user_id = 1 and user_id is not null and ts >= 11466 and ts <= 11467) and not (user_id = 1 and user_id is not null and ts >= 11468 and ts <= 11469) and not (user_id = 1 and user_id is not null and ts >= 11470 and ts <= 11471) and not (user_id = 1 and user_id is not null and ts >= 11472 and ts <= 11473) and not (user_id = 1 and user_id is not null and ts >= 11474 and ts <= 11475) and not (user_id = 1 and user_id is not null and ts >= 11476 and ts <= 11477) and not (user_id = 1 and user_id is not null and ts >= 11478 and ts <= 11479) and not (user_id = 1 and user_id is not null and ts >= 11480 and ts <= 11481) and not (user_id = 1 and user_id is not null and ts >= 11482 and ts <= 11483) and not (user_id = 1 and user_id is not null and ts >= 11484 and ts <= 11485) and not (user_id = 1 and user_id is not null and ts >= 11486 and ts <= 11487) and not (user_id = 1 and user_id is not null and ts >= 11488 and ts <= 11489) and not (user_id = 1 and user_id is not null and ts >= 11490 and ts <= 11491) and not (user_id = 1 and user_id is not null and ts >= 11492 and ts <= 11493) and not (user_id = 1 and user_id is not null and ts >= 11494 and ts <= 11495) and not (user_id = 1 and user_id is not null and ts >= 11496 and ts <= 11497) and not (user_id = 1 and user_id is not null and ts >= 11498 and ts <= 11499) and not (user_id = 1 and user_id is not null and ts >= 11500 and ts <= 11501) and not (user_id = 1 and user_id is not null and ts >= 11502 and ts <= 11503) and not (user_id = 1 and user_id is not null and ts >= 11504 and ts <= 11505) and not (user_id = 1 and user_id is not null and ts >= 11506 and ts <= 11507) and not (user_id = 1 and user_id is not null and ts >= 11508 and ts <= 11509) and not (user_id = 1 and user_id is not null and ts >= 11510 and ts <= 11511) and not (user_id = 1 and user_id is not null and ts >= 11512 and ts <= 11513) and not (user_id = 1 and user_id is not null and ts >= 11514 and ts <= 11515) and not (user_id = 1 and user_id is not null and ts >= 11516 and ts <= 11517) and not (user_id = 1 and user_id is not null and ts >= 11518 and ts <= 11519) and not (user_id = 1 and user_id is not null and ts >= 11520 and ts <= 11521) and not (user_id = 1 and user_id is not null and ts >= 11522 and ts <= 11523) and not (user_id = 1 and user_id is not null and ts >= 11524 and ts <= 11525) and not (user_id = 1 and user_id is not null and ts >= 11526 and ts <= 11527) and not (user_id = 1 and user_id is not null and ts >= 11528 and ts <= 11529) and not (user_id = 1 and user_id is not null and ts >= 11530 and ts <= 11531) and not (user_id = 1 and user_id is not null and ts >= 11532 and ts <= 11533) and not (user_id = 1 and user_id is not null and ts >= 11534 and ts <= 11535) and not (user_id = 1 and user_id is not null and ts >= 11536 and ts <= 11537) and not (user_id = 1 and user_id is not null and ts >= 11538 and ts <= 11539) and not (user_id = 1 and user_id is not null and ts >= 11540 and ts <= 11541) and not (user_id = 1 and user_id is not null and ts >= 11542 and ts <= 11543) and not (user_id = 1 and user_id is not null and ts >= 11544 and ts <= 11545) and not (user_id = 1 and user_id is not null and ts >= 11546 and ts <= 11547) and not (user_id = 1 and user_id is not null and ts >= 11548 and ts <= 11549) and not (user_id = 1 and user_id is not null and ts >= 11550 and ts <= 11551) and not (user_id = 1 and user_id is not null and ts >= 11552 and ts <= 11553) and not (user_id = 1 and user_id is not null and ts >= 11554 and ts <= 11555) and not (user_id = 1 and user_id is not null and ts >= 11556 and ts <= 11557) and not (user_id = 1 and user_id is not null and ts >= 11558 and ts <= 11559) and not (user_id = 1 and user_id is not null and ts >= 11560 and ts <= 11561) and not (user_id = 1 and user_id is not null and ts >= 11562 and ts <= 11563) and not (user_id = 1 and user_id is not null and ts >= 11564 and ts <= 11565) and not (user_id = 1 and user_id is not null and ts >= 11566 and ts <= 11567) and not (user_id = 1 and user_id is not null and ts >= 11568 and ts <= 11569) and not (user_id = 1 and user_id is not null and ts >= 11570 and ts <= 11571) and not (user_id = 1 and user_id is not null and ts >= 11572 and ts <= 11573) and not (user_id = 1 and user_id is not null and ts >= 11574 and ts <= 11575) and not (user_id = 1 and user_id is not null and ts >= 11576 and ts <= 11577) and not (user_id = 1 and user_id is not null and ts >= 11578 and ts <= 11579) and not (user_id = 1 and user_id is not null and ts >= 11580 and ts <= 11581) and not (user_id = 1 and user_id is not null and ts >= 11582 and ts <= 11583) and not (user_id = 1 and user_id is not null and ts >= 11584 and ts <= 11585) and not (user_id = 1 and user_id is not null and ts >= 11586 and ts <= 11587) and not (user_id = 1 and user_id is not null and ts >= 11588 and ts <= 11589) and not (user_id = 1 and user_id is not null and ts >= 11590 and ts <= 11591) and not (user_id = 1 and user_id is not null and ts >= 11592 and ts <= 11593) and not (user_id = 1 and user_id is not null and ts >= 11594 and ts <= 11595) and not (user_id = 1 and user_id is not null and ts >= 11596 and ts <= 11597) and not (user_id = 1 and user_id is not null and ts >= 11598 and ts <= 11599) and not (user_id = 1 and user_id is not null and ts >= 11600 and ts <= 11601) and not (user_id = 1 and user_id is not null and ts >= 11602 and ts <= 11603) and not (user_id = 1 and user_id is not null and ts >= 11604 and ts <= 11605) and not (user_id = 1 and user_id is not null and ts >= 11606 and ts <= 11607) and not (user_id = 1 and user_id is not null and ts >= 11608 and ts <= 11609) and not (user_id = 1 and user_id is not null and ts >= 11610 and ts <= 11611) and not (user_id = 1 and user_id is not null and ts >= 11612 and ts <= 11613) and not (user_id = 1 and user_id is not null and ts >= 11614 and ts <= 11615) and not (user_id = 1 and user_id is not null and ts >= 11616 and ts <= 11617) and not (user_id = 1 and user_id is not null and ts >= 11618 and ts <= 11619) and not (user_id = 1 and user_id is not null and ts >= 11620 and ts <= 11621) and not (user_id = 1 and user_id is not null and ts >= 11622 and ts <= 11623) and not (user_id = 1 and user_id is not null and ts >= 11624 and ts <= 11625) and not (user_id = 1 and user_id is not null and ts >= 11626 and ts <= 11627) and not (user_id = 1 and user_id is not null and ts >= 11628 and ts <= 11629) and not (user_id = 1 and user_id is not null and ts >= 11630 and ts <= 11631) and not (user_id = 1 and user_id is not null and ts >= 11632 and ts <= 11633) and not (user_id = 1 and user_id is not null and ts >= 11634 and ts <= 11635) and not (user_id = 1 and user_id is not null and ts >= 11636 and ts <= 11637) and not (user_id = 1 and user_id is not null and ts >= 11638 and ts <= 11639) and not (user_id = 1 and user_id is not null and ts >= 11640 and ts <= 11641) and not (user_id = 1 and user_id is not null and ts >= 11642 and ts <= 11643) and not (user_id = 1 and user_id is not null and ts >= 11644 and ts <= 11645) and not (user_id = 1 and user_id is not null and ts >= 11646 and ts <= 11647) and not (user_id = 1 and user_id is not null and ts >= 11648 and ts <= 11649) and not (user_id = 1 and user_id is not null and ts >= 11650 and ts <= 11651) and not (user_id = 1 and user_id is not null and ts >= 11652 and ts <= 11653) and not (user_id = 1 and user_id is not null and ts >= 11654 and ts <= 11655) and not (user_id = 1 and user_id is not null and ts >= 11656 and ts <= 11657) and not (user_id = 1 and user_id is not null and ts >= 11658 and ts <= 11659) and not (user_id = 1 and user_id is not null and ts >= 11660 and ts <= 11661) and not (user_id = 1 and user_id is not null and ts >= 11662 and ts <= 11663) and not (user_id = 1 and user_id is not null and ts >= 11664 and ts <= 11665) and not (user_id = 1 and user_id is not null and ts >= 11666 and ts <= 11667) and not (user_id = 1 and user_id is not null and ts >= 11668 and ts <= 11669) and not (user_id = 1 and user_id is not null and ts >= 11670 and ts <= 11671) and not (user_id = 1 and user_id is not null and ts >= 11672 and ts <= 11673) and not (user_id = 1 and user_id is not null and ts >= 11674 and ts <= 11675) and not (user_id = 1 and user_id is not null and ts >= 11676 and ts <= 11677) and not (user_id = 1 and user_id is not null and ts >= 11678 and ts <= 11679) and not (user_id = 1 and user_id is not null and ts >= 11680 and ts <= 11681) and not (user_id = 1 and user_id is not null and ts >= 11682 and ts <= 11683) and not (user_id = 1 and user_id is not null and ts >= 11684 and ts <= 11685) and not (user_id = 1 and user_id is not null and ts >= 11686 and ts <= 11687) and not (user_id = 1 and user_id is not null and ts >= 11688 and ts <= 11689) and not (user_id = 1 and user_id is not null and ts >= 11690 and ts <= 11691) and not (user_id = 1 and user_id is not null and ts >= 11692 and ts <= 11693) and not (user_id = 1 and user_id is not null and ts >= 11694 and ts <= 11695) and not (user_id = 1 and user_id is not null and ts >= 11696 and ts <= 11697) and not (user_id = 1 and user_id is not null and ts >= 11698 and ts <= 11699) and not (user_id = 1 and user_id is not null and ts >= 11700 and ts <= 11701) and not (user_id = 1 and user_id is not null and ts >= 11702 and ts <= 11703) and not (user_id = 1 and user_id is not null and ts >= 11704 and ts <= 11705) and not (user_id = 1 and user_id is not null and ts >= 11706 and ts <= 11707) and not (user_id = 1 and user_id is not null and ts >= 11708 and ts <= 11709) and not (user_id = 1 and user_id is not null and ts >= 11710 and ts <= 11711) and not (user_id = 1 and user_id is not null and ts >= 11712 and ts <= 11713) and not (user_id = 1 and user_id is not null and ts >= 11714 and ts <= 11715) and not (user_id = 1 and user_id is not null and ts >= 11716 and ts <= 11717) and not (user_id = 1 and user_id is not null and ts >= 11718 and ts <= 11719) and not (user_id = 1 and user_id is not null and ts >= 11720 and ts <= 11721) and not (user_id = 1 and user_id is not null and ts >= 11722 and ts <= 11723) and not (user_id = 1 and user_id is not null and ts >= 11724 and ts <= 11725) and not (user_id = 1 and user_id is not null and ts >= 11726 and ts <= 11727) and not (user_id = 1 and user_id is not null and ts >= 11728 and ts <= 11729) and not (user_id = 1 and user_id is not null and ts >= 11730 and ts <= 11731) and not (user_id = 1 and user_id is not null and ts >= 11732 and ts <= 11733) and not (user_id = 1 and user_id is not null and ts >= 11734 and ts <= 11735) and not (user_id = 1 and user_id is not null and ts >= 11736 and ts <= 11737) and not (user_id = 1 and user_id is not null and ts >= 11738 and ts <= 11739) and not (user_id = 1 and user_id is not null and ts >= 11740 and ts <= 11741) and not (user_id = 1 and user_id is not null and ts >= 11742 and ts <= 11743) and not (user_id = 1 and user_id is not null and ts >= 11744 and ts <= 11745) and not (user_id = 1 and user_id is not null and ts >= 11746 and ts <= 11747) and not (user_id = 1 and user_id is not null and ts >= 11748 and ts <= 11749) and not (user_id = 1 and user_id is not null and ts >= 11750 and ts <= 11751) and not (user_id = 1 and user_id is not null and ts >= 11752 and ts <= 11753) and not (user_id = 1 and user_id is not null and ts >= 11754 and ts <= 11755) and not (user_id = 1 and user_id is not null and ts >= 11756 and ts <= 11757) and not (user_id = 1 and user_id is not null and ts >= 11758 and ts <= 11759) and not (user_id = 1 and user_id is not null and ts >= 11760 and ts <= 11761) and not (user_id = 1 and user_id is not null and ts >= 11762 and ts <= 11763) and not (user_id = 1 and user_id is not null and ts >= 11764 and ts <= 11765) and not (user_id = 1 and user_id is not null and ts >= 11766 and ts <= 11767) and not (user_id = 1 and user_id is not null and ts >= 11768 and ts <= 11769) and not (user_id = 1 and user_id is not null and ts >= 11770 and ts <= 11771) and not (user_id = 1 and user_id is not null and ts >= 11772 and ts <= 11773) and not (user_id = 1 and user_id is not null and ts >= 11774 and ts <= 11775) and not (user_id = 1 and user_id is not null and ts >= 11776 and ts <= 11777) and not (user_id = 1 and user_id is not null and ts >= 11778 and ts <= 11779) and not (user_id = 1 and user_id is not null and ts >= 11780 and ts <= 11781) and not (user_id = 1 and user_id is not null and ts >= 11782 and ts <= 11783) and not (user_id = 1 and user_id is not null and ts >= 11784 and ts <= 11785) and not (user_id = 1 and user_id is not null and ts >= 11786 and ts <= 11787) and not (user_id = 1 and user_id is not null and ts >= 11788 and ts <= 11789) and not (user_id = 1 and user_id is not null and ts >= 11790 and ts <= 11791) and not (user_id = 1 and user_id is not null and ts >= 11792 and ts <= 11793) and not (user_id = 1 and user_id is not null and ts >= 11794 and ts <= 11795) and not (user_id = 1 and user_id is not null and ts >= 11796 and ts <= 11797) and not (user_id = 1 and user_id is not null and ts >= 11798 and ts <= 11799) and not (user_id = 1 and user_id is not null and ts >= 11800 and ts <= 11801) and not (user_id = 1 and user_id is not null and ts >= 11802 and ts <= 11803) and not (user_id = 1 and user_id is not null and ts >= 11804 and ts <= 11805) and not (user_id = 1 and user_id is not null and ts >= 11806 and ts <= 11807) and not (user_id = 1 and user_id is not null and ts >= 11808 and ts <= 11809) and not (user_id = 1 and user_id is not null and ts >= 11810 and ts <= 11811) and not (user_id = 1 and user_id is not null and ts >= 11812 and ts <= 11813) and not (user_id = 1 and user_id is not null and ts >= 11814 and ts <= 11815) and not (user_id = 1 and user_id is not null and ts >= 11816 and ts <= 11817) and not (user_id = 1 and user_id is not null and ts >= 11818 and ts <= 11819) and not (user_id = 1 and user_id is not null and ts >= 11820 and ts <= 11821) and not (user_id = 1 and user_id is not null and ts >= 11822 and ts <= 11823) and not (user_id = 1 and user_id is not null and ts >= 11824 and ts <= 11825) and not (user_id = 1 and user_id is not null and ts >= 11826 and ts <= 11827) and not (user_id = 1 and user_id is not null and ts >= 11828 and ts <= 11829) and not (user_id = 1 and user_id is not null and ts >= 11830 and ts <= 11831) and not (user_id = 1 and user_id is not null and ts >= 11832 and ts <= 11833) and not (user_id = 1 and user_id is not null and ts >= 11834 and ts <= 11835) and not (user_id = 1 and user_id is not null and ts >= 11836 and ts <= 11837) and not (user_id = 1 and user_id is not null and ts >= 11838 and ts <= 11839) and not (user_id = 1 and user_id is not null and ts >= 11840 and ts <= 11841) and not (user_id = 1 and user_id is not null and ts >= 11842 and ts <= 11843) and not (user_id = 1 and user_id is not null and ts >= 11844 and ts <= 11845) and not (user_id = 1 and user_id is not null and ts >= 11846 and ts <= 11847) and not (user_id = 1 and user_id is not null and ts >= 11848 and ts <= 11849) and not (user_id = 1 and user_id is not null and ts >= 11850 and ts <= 11851) and not (user_id = 1 and user_id is not null and ts >= 11852 and ts <= 11853) and not (user_id = 1 and user_id is not null and ts >= 11854 and ts <= 11855) and not (user_id = 1 and user_id is not null and ts >= 11856 and ts <= 11857) and not (user_id = 1 and user_id is not null and ts >= 11858 and ts <= 11859) and not (user_id = 1 and user_id is not null and ts >= 11860 and ts <= 11861) and not (user_id = 1 and user_id is not null and ts >= 11862 and ts <= 11863) and not (user_id = 1 and user_id is not null and ts >= 11864 and ts <= 11865) and not (user_id = 1 and user_id is not null and ts >= 11866 and ts <= 11867) and not (user_id = 1 and user_id is not null and ts >= 11868 and ts <= 11869) and not (user_id = 1 and user_id is not null and ts >= 11870 and ts <= 11871) and not (user_id = 1 and user_id is not null and ts >= 11872 and ts <= 11873) and not (user_id = 1 and user_id is not null and ts >= 11874 and ts <= 11875) and not (user_id = 1 and user_id is not null and ts >= 11876 and ts <= 11877) and not (user_id = 1 and user_id is not null and ts >= 11878 and ts <= 11879) and not (user_id = 1 and user_id is not null and ts >= 11880 and ts <= 11881) and not (user_id = 1 and user_id is not null and ts >= 11882 and ts <= 11883) and not (user_id = 1 and user_id is not null and ts >= 11884 and ts <= 11885) and not (user_id = 1 and user_id is not null and ts >= 11886 and ts <= 11887) and not (user_id = 1 and user_id is not null and ts >= 11888 and ts <= 11889) and not (user_id = 1 and user_id is not null and ts >= 11890 and ts <= 11891) and not (user_id = 1 and user_id is not null and ts >= 11892 and ts <= 11893) and not (user_id = 1 and user_id is not null and ts >= 11894 and ts <= 11895) and not (user_id = 1 and user_id is not null and ts >= 11896 and ts <= 11897) and not (user_id = 1 and user_id is not null and ts >= 11898 and ts <= 11899) and not (user_id = 1 and user_id is not null and ts >= 11900 and ts <= 11901) and not (user_id = 1 and user_id is not null and ts >= 11902 and ts <= 11903) and not (user_id = 1 and user_id is not null and ts >= 11904 and ts <= 11905) and not (user_id = 1 and user_id is not null and ts >= 11906 and ts <= 11907) and not (user_id = 1 and user_id is not null and ts >= 11908 and ts <= 11909) and not (user_id = 1 and user_id is not null and ts >= 11910 and ts <= 11911) and not (user_id = 1 and user_id is not null and ts >= 11912 and ts <= 11913) and not (user_id = 1 and user_id is not null and ts >= 11914 and ts <= 11915) and not (user_id = 1 and user_id is not null and ts >= 11916 and ts <= 11917) and not (user_id = 1 and user_id is not null and ts >= 11918 and ts <= 11919) and not (user_id = 1 and user_id is not null and ts >= 11920 and ts <= 11921) and not (user_id = 1 and user_id is not null and ts >= 11922 and ts <= 11923) and not (user_id = 1 and user_id is not null and ts >= 11924 and ts <= 11925) and not (user_id = 1 and user_id is not null and ts >= 11926 and ts <= 11927) and not (user_id = 1 and user_id is not null and ts >= 11928 and ts <= 11929) and not (user_id = 1 and user_id is not null and ts >= 11930 and ts <= 11931) and not (user_id = 1 and user_id is not null and ts >= 11932 and ts <= 11933) and not (user_id = 1 and user_id is not null and ts >= 11934 and ts <= 11935) and not (user_id = 1 and user_id is not null and ts >= 11936 and ts <= 11937) and not (user_id = 1 and user_id is not null and ts >= 11938 and ts <= 11939) and not (user_id = 1 and user_id is not null and ts >= 11940 and ts <= 11941) and not (user_id = 1 and user_id is not null and ts >= 11942 and ts <= 11943) and not (user_id = 1 and user_id is not null and ts >= 11944 and ts <= 11945) and not (user_id = 1 and user_id is not null and ts >= 11946 and ts <= 11947) and not (user_id = 1 and user_id is not null and ts >= 11948 and ts <= 11949) and not (user_id = 1 and user_id is not null and ts >= 11950 and ts <= 11951) and not (user_id = 1 and user_id is not null and ts >= 11952 and ts <= 11953) and not (user_id = 1 and user_id is not null and ts >= 11954 and ts <= 11955) and not (user_id = 1 and user_id is not null and ts >= 11956 and ts <= 11957) and not (user_id = 1 and user_id is not null and ts >= 11958 and ts <= 11959) and not (user_id = 1 and user_id is not null and ts >= 11960 and ts <= 11961) and not (user_id = 1 and user_id is not null and ts >= 11962 and ts <= 11963) and not (user_id = 1 and user_id is not null and ts >= 11964 and ts <= 11965) and not (user_id = 1 and user_id is not null and ts >= 11966 and ts <= 11967) and not (user_id = 1 and user_id is not null and ts >= 11968 and ts <= 11969) and not (user_id = 1 and user_id is not null and ts >= 11970 and ts <= 11971) and not (user_id = 1 and user_id is not null and ts >= 11972 and ts <= 11973) and not (user_id = 1 and user_id is not null and ts >= 11974 and ts <= 11975) and not (user_id = 1 and user_id is not null and ts >= 11976 and ts <= 11977) and not (user_id = 1 and user_id is not null and ts >= 11978 and ts <= 11979) and not (user_id = 1 and user_id is not null and ts >= 11980 and ts <= 11981) and not (user_id = 1 and user_id is not null and ts >= 11982 and ts <= 11983) and not (user_id = 1 and user_id is not null and ts >= 11984 and ts <= 11985) and not (user_id = 1 and user_id is not null and ts >= 11986 and ts <= 11987) and not (user_id = 1 and user_id is not null and ts >= 11988 and ts <= 11989) and not (user_id = 1 and user_id is not null and ts >= 11990 and ts <= 11991) and not (user_id = 1 and user_id is not null and ts >= 11992 and ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit 100", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user where shard_key = 1 and is_removed = 1 and cmd in ('A','B','C') and not (user_id = 1 and user_id is not null and ts >= 1 and ts <= 2) and not (user_id = 1 and user_id is not null and ts >= 12 and ts <= 13) and not (user_id = 1 and user_id is not null and ts >= 14 and ts <= 15) and not (user_id = 1 and user_id is not null and ts >= 16 and ts <= 17) and not (user_id = 1 and user_id is not null and ts >= 18 and ts <= 19) and not (user_id = 1 and user_id is not null and ts >= 110 and ts <= 111) and not (user_id = 1 and user_id is not null and ts >= 112 and ts <= 113) and not (user_id = 1 and user_id is not null and ts >= 114 and ts <= 115) and not (user_id = 1 and user_id is not null and ts >= 116 and ts <= 117) and not (user_id = 1 and user_id is not null and ts >= 118 and ts <= 119) and not (user_id = 1 and user_id is not null and ts >= 120 and ts <= 121) and not (user_id = 1 and user_id is not null and ts >= 122 and ts <= 123) and not (user_id = 1 and user_id is not null and ts >= 124 and ts <= 125) and not (user_id = 1 and user_id is not null and ts >= 126 and ts <= 127) and not (user_id = 1 and user_id is not null and ts >= 128 and ts <= 129) and not (user_id = 1 and user_id is not null and ts >= 130 and ts <= 131) and not (user_id = 1 and user_id is not null and ts >= 132 and ts <= 133) and not (user_id = 1 and user_id is not null and ts >= 134 and ts <= 135) and not (user_id = 1 and user_id is not null and ts >= 136 and ts <= 137) and not (user_id = 1 and user_id is not null and ts >= 138 and ts <= 139) and not (user_id = 1 and user_id is not null and ts >= 140 and ts <= 141) and not (user_id = 1 and user_id is not null and ts >= 142 and ts <= 143) and not (user_id = 1 and user_id is not null and ts >= 144 and ts <= 145) and not (user_id = 1 and user_id is not null and ts >= 146 and ts <= 147) and not (user_id = 1 and user_id is not null and ts >= 148 and ts <= 149) and not (user_id = 1 and user_id is not null and ts >= 150 and ts <= 151) and not (user_id = 1 and user_id is not null and ts >= 152 and ts <= 153) and not (user_id = 1 and user_id is not null and ts >= 154 and ts <= 155) and not (user_id = 1 and user_id is not null and ts >= 156 and ts <= 157) and not (user_id = 1 and user_id is not null and ts >= 158 and ts <= 159) and not (user_id = 1 and user_id is not null and ts >= 160 and ts <= 161) and not (user_id = 1 and user_id is not null and ts >= 162 and ts <= 163) and not (user_id = 1 and user_id is not null and ts >= 164 and ts <= 165) and not (user_id = 1 and user_id is not null and ts >= 166 and ts <= 167) and not (user_id = 1 and user_id is not null and ts >= 168 and ts <= 169) and not (user_id = 1 and user_id is not null and ts >= 170 and ts <= 171) and not (user_id = 1 and user_id is not null and ts >= 172 and ts <= 173) and not (user_id = 1 and user_id is not null and ts >= 174 and ts <= 175) and not (user_id = 1 and user_id is not null and ts >= 176 and ts <= 177) and not (user_id = 1 and user_id is not null and ts >= 178 and ts <= 179) and not (user_id = 1 and user_id is not null and ts >= 180 and ts <= 181) and not (user_id = 1 and user_id is not null and ts >= 182 and ts <= 183) and not (user_id = 1 and user_id is not null and ts >= 184 and ts <= 185) and not (user_id = 1 and user_id is not null and ts >= 186 and ts <= 187) and not (user_id = 1 and user_id is not null and ts >= 188 and ts <= 189) and not (user_id = 1 and user_id is not null and ts >= 190 and ts <= 191) and not (user_id = 1 and user_id is not null and ts >= 192 and ts <= 193) and not (user_id = 1 and user_id is not null and ts >= 194 and ts <= 195) and not (user_id = 1 and user_id is not null and ts >= 196 and ts <= 197) and not (user_id = 1 and user_id is not null and ts >= 198 and ts <= 199) and not (user_id = 1 and user_id is not null and ts >= 1100 and ts <= 1101) and not (user_id = 1 and user_id is not null and ts >= 1102 and ts <= 1103) and not (user_id = 1 and user_id is not null and ts >= 1104 and ts <= 1105) and not (user_id = 1 and user_id is not null and ts >= 1106 and ts <= 1107) and not (user_id = 1 and user_id is not null and ts >= 1108 and ts <= 1109) and not (user_id = 1 and user_id is not null and ts >= 1110 and ts <= 1111) and not (user_id = 1 and user_id is not null and ts >= 1112 and ts <= 1113) and not (user_id = 1 and user_id is not null and ts >= 1114 and ts <= 1115) and not (user_id = 1 and user_id is not null and ts >= 1116 and ts <= 1117) and not (user_id = 1 and user_id is not null and ts >= 1118 and ts <= 1119) and not (user_id = 1 and user_id is not null and ts >= 1120 and ts <= 1121) and not (user_id = 1 and user_id is not null and ts >= 1122 and ts <= 1123) and not (user_id = 1 and user_id is not null and ts >= 1124 and ts <= 1125) and not (user_id = 1 and user_id is not null and ts >= 1126 and ts <= 1127) and not (user_id = 1 and user_id is not null and ts >= 1128 and ts <= 1129) and not (user_id = 1 and user_id is not null and ts >= 1130 and ts <= 1131) and not (user_id = 1 and user_id is not null and ts >= 1132 and ts <= 1133) and not (user_id = 1 and user_id is not null and ts >= 1134 and ts <= 1135) and not (user_id = 1 and user_id is not null and ts >= 1136 and ts <= 1137) and not (user_id = 1 and user_id is not null and ts >= 1138 and ts <= 1139) and not (user_id = 1 and user_id is not null and ts >= 1140 and ts <= 1141) and not (user_id = 1 and user_id is not null and ts >= 1142 and ts <= 1143) and not (user_id = 1 and user_id is not null and ts >= 1144 and ts <= 1145) and not (user_id = 1 and user_id is not null and ts >= 1146 and ts <= 1147) and not (user_id = 1 and user_id is not null and ts >= 1148 and ts <= 1149) and not (user_id = 1 and user_id is not null and ts >= 1150 and ts <= 1151) and not (user_id = 1 and user_id is not null and ts >= 1152 and ts <= 1153) and not (user_id = 1 and user_id is not null and ts >= 1154 and ts <= 1155) and not (user_id = 1 and user_id is not null and ts >= 1156 and ts <= 1157) and not (user_id = 1 and user_id is not null and ts >= 1158 and ts <= 1159) and not (user_id = 1 and user_id is not null and ts >= 1160 and ts <= 1161) and not (user_id = 1 and user_id is not null and ts >= 1162 and ts <= 1163) and not (user_id = 1 and user_id is not null and ts >= 1164 and ts <= 1165) and not (user_id = 1 and user_id is not null and ts >= 1166 and ts <= 1167) and not (user_id = 1 and user_id is not null and ts >= 1168 and ts <= 1169) and not (user_id = 1 and user_id is not null and ts >= 1170 and ts <= 1171) and not (user_id = 1 and user_id is not null and ts >= 1172 and ts <= 1173) and not (user_id = 1 and user_id is not null and ts >= 1174 and ts <= 1175) and not (user_id = 1 and user_id is not null and ts >= 1176 and ts <= 1177) and not (user_id = 1 and user_id is not null and ts >= 1178 and ts <= 1179) and not (user_id = 1 and user_id is not null and ts >= 1180 and ts <= 1181) and not (user_id = 1 and user_id is not null and ts >= 1182 and ts <= 1183) and not (user_id = 1 and user_id is not null and ts >= 1184 and ts <= 1185) and not (user_id = 1 and user_id is not null and ts >= 1186 and ts <= 1187) and not (user_id = 1 and user_id is not null and ts >= 1188 and ts <= 1189) and not (user_id = 1 and user_id is not null and ts >= 1190 and ts <= 1191) and not (user_id = 1 and user_id is not null and ts >= 1192 and ts <= 1193) and not (user_id = 1 and user_id is not null and ts >= 1194 and ts <= 1195) and not (user_id = 1 and user_id is not null and ts >= 1196 and ts <= 1197) and not (user_id = 1 and user_id is not null and ts >= 1198 and ts <= 1199) and not (user_id = 1 and user_id is not null and ts >= 1200 and ts <= 1201) and not (user_id = 1 and user_id is not null and ts >= 1202 and ts <= 1203) and not (user_id = 1 and user_id is not null and ts >= 1204 and ts <= 1205) and not (user_id = 1 and user_id is not null and ts >= 1206 and ts <= 1207) and not (user_id = 1 and user_id is not null and ts >= 1208 and ts <= 1209) and not (user_id = 1 and user_id is not null and ts >= 1210 and ts <= 1211) and not (user_id = 1 and user_id is not null and ts >= 1212 and ts <= 1213) and not (user_id = 1 and user_id is not null and ts >= 1214 and ts <= 1215) and not (user_id = 1 and user_id is not null and ts >= 1216 and ts <= 1217) and not (user_id = 1 and user_id is not null and ts >= 1218 and ts <= 1219) and not (user_id = 1 and user_id is not null and ts >= 1220 and ts <= 1221) and not (user_id = 1 and user_id is not null and ts >= 1222 and ts <= 1223) and not (user_id = 1 and user_id is not null and ts >= 1224 and ts <= 1225) and not (user_id = 1 and user_id is not null and ts >= 1226 and ts <= 1227) and not (user_id = 1 and user_id is not null and ts >= 1228 and ts <= 1229) and not (user_id = 1 and user_id is not null and ts >= 1230 and ts <= 1231) and not (user_id = 1 and user_id is not null and ts >= 1232 and ts <= 1233) and not (user_id = 1 and user_id is not null and ts >= 1234 and ts <= 1235) and not (user_id = 1 and user_id is not null and ts >= 1236 and ts <= 1237) and not (user_id = 1 and user_id is not null and ts >= 1238 and ts <= 1239) and not (user_id = 1 and user_id is not null and ts >= 1240 and ts <= 1241) and not (user_id = 1 and user_id is not null and ts >= 1242 and ts <= 1243) and not (user_id = 1 and user_id is not null and ts >= 1244 and ts <= 1245) and not (user_id = 1 and user_id is not null and ts >= 1246 and ts <= 1247) and not (user_id = 1 and user_id is not null and ts >= 1248 and ts <= 1249) and not (user_id = 1 and user_id is not null and ts >= 1250 and ts <= 1251) and not (user_id = 1 and user_id is not null and ts >= 1252 and ts <= 1253) and not (user_id = 1 and user_id is not null and ts >= 1254 and ts <= 1255) and not (user_id = 1 and user_id is not null and ts >= 1256 and ts <= 1257) and not (user_id = 1 and user_id is not null and ts >= 1258 and ts <= 1259) and not (user_id = 1 and user_id is not null and ts >= 1260 and ts <= 1261) and not (user_id = 1 and user_id is not null and ts >= 1262 and ts <= 1263) and not (user_id = 1 and user_id is not null and ts >= 1264 and ts <= 1265) and not (user_id = 1 and user_id is not null and ts >= 1266 and ts <= 1267) and not (user_id = 1 and user_id is not null and ts >= 1268 and ts <= 1269) and not (user_id = 1 and user_id is not null and ts >= 1270 and ts <= 1271) and not (user_id = 1 and user_id is not null and ts >= 1272 and ts <= 1273) and not (user_id = 1 and user_id is not null and ts >= 1274 and ts <= 1275) and not (user_id = 1 and user_id is not null and ts >= 1276 and ts <= 1277) and not (user_id = 1 and user_id is not null and ts >= 1278 and ts <= 1279) and not (user_id = 1 and user_id is not null and ts >= 1280 and ts <= 1281) and not (user_id = 1 and user_id is not null and ts >= 1282 and ts <= 1283) and not (user_id = 1 and user_id is not null and ts >= 1284 and ts <= 1285) and not (user_id = 1 and user_id is not null and ts >= 1286 and ts <= 1287) and not (user_id = 1 and user_id is not null and ts >= 1288 and ts <= 1289) and not (user_id = 1 and user_id is not null and ts >= 1290 and ts <= 1291) and not (user_id = 1 and user_id is not null and ts >= 1292 and ts <= 1293) and not (user_id = 1 and user_id is not null and ts >= 1294 and ts <= 1295) and not (user_id = 1 and user_id is not null and ts >= 1296 and ts <= 1297) and not (user_id = 1 and user_id is not null and ts >= 1298 and ts <= 1299) and not (user_id = 1 and user_id is not null and ts >= 1300 and ts <= 1301) and not (user_id = 1 and user_id is not null and ts >= 1302 and ts <= 1303) and not (user_id = 1 and user_id is not null and ts >= 1304 and ts <= 1305) and not (user_id = 1 and user_id is not null and ts >= 1306 and ts <= 1307) and not (user_id = 1 and user_id is not null and ts >= 1308 and ts <= 1309) and not (user_id = 1 and user_id is not null and ts >= 1310 and ts <= 1311) and not (user_id = 1 and user_id is not null and ts >= 1312 and ts <= 1313) and not (user_id = 1 and user_id is not null and ts >= 1314 and ts <= 1315) and not (user_id = 1 and user_id is not null and ts >= 1316 and ts <= 1317) and not (user_id = 1 and user_id is not null and ts >= 1318 and ts <= 1319) and not (user_id = 1 and user_id is not null and ts >= 1320 and ts <= 1321) and not (user_id = 1 and user_id is not null and ts >= 1322 and ts <= 1323) and not (user_id = 1 and user_id is not null and ts >= 1324 and ts <= 1325) and not (user_id = 1 and user_id is not null and ts >= 1326 and ts <= 1327) and not (user_id = 1 and user_id is not null and ts >= 1328 and ts <= 1329) and not (user_id = 1 and user_id is not null and ts >= 1330 and ts <= 1331) and not (user_id = 1 and user_id is not null and ts >= 1332 and ts <= 1333) and not (user_id = 1 and user_id is not null and ts >= 1334 and ts <= 1335) and not (user_id = 1 and user_id is not null and ts >= 1336 and ts <= 1337) and not (user_id = 1 and user_id is not null and ts >= 1338 and ts <= 1339) and not (user_id = 1 and user_id is not null and ts >= 1340 and ts <= 1341) and not (user_id = 1 and user_id is not null and ts >= 1342 and ts <= 1343) and not (user_id = 1 and user_id is not null and ts >= 1344 and ts <= 1345) and not (user_id = 1 and user_id is not null and ts >= 1346 and ts <= 1347) and not (user_id = 1 and user_id is not null and ts >= 1348 and ts <= 1349) and not (user_id = 1 and user_id is not null and ts >= 1350 and ts <= 1351) and not (user_id = 1 and user_id is not null and ts >= 1352 and ts <= 1353) and not (user_id = 1 and user_id is not null and ts >= 1354 and ts <= 1355) and not (user_id = 1 and user_id is not null and ts >= 1356 and ts <= 1357) and not (user_id = 1 and user_id is not null and ts >= 1358 and ts <= 1359) and not (user_id = 1 and user_id is not null and ts >= 1360 and ts <= 1361) and not (user_id = 1 and user_id is not null and ts >= 1362 and ts <= 1363) and not (user_id = 1 and user_id is not null and ts >= 1364 and ts <= 1365) and not (user_id = 1 and user_id is not null and ts >= 1366 and ts <= 1367) and not (user_id = 1 and user_id is not null and ts >= 1368 and ts <= 1369) and not (user_id = 1 and user_id is not null and ts >= 1370 and ts <= 1371) and not (user_id = 1 and user_id is not null and ts >= 1372 and ts <= 1373) and not (user_id = 1 and user_id is not null and ts >= 1374 and ts <= 1375) and not (user_id = 1 and user_id is not null and ts >= 1376 and ts <= 1377) and not (user_id = 1 and user_id is not null and ts >= 1378 and ts <= 1379) and not (user_id = 1 and user_id is not null and ts >= 1380 and ts <= 1381) and not (user_id = 1 and user_id is not null and ts >= 1382 and ts <= 1383) and not (user_id = 1 and user_id is not null and ts >= 1384 and ts <= 1385) and not (user_id = 1 and user_id is not null and ts >= 1386 and ts <= 1387) and not (user_id = 1 and user_id is not null and ts >= 1388 and ts <= 1389) and not (user_id = 1 and user_id is not null and ts >= 1390 and ts <= 1391) and not (user_id = 1 and user_id is not null and ts >= 1392 and ts <= 1393) and not (user_id = 1 and user_id is not null and ts >= 1394 and ts <= 1395) and not (user_id = 1 and user_id is not null and ts >= 1396 and ts <= 1397) and not (user_id = 1 and user_id is not null and ts >= 1398 and ts <= 1399) and not (user_id = 1 and user_id is not null and ts >= 1400 and ts <= 1401) and not (user_id = 1 and user_id is not null and ts >= 1402 and ts <= 1403) and not (user_id = 1 and user_id is not null and ts >= 1404 and ts <= 1405) and not (user_id = 1 and user_id is not null and ts >= 1406 and ts <= 1407) and not (user_id = 1 and user_id is not null and ts >= 1408 and ts <= 1409) and not (user_id = 1 and user_id is not null and ts >= 1410 and ts <= 1411) and not (user_id = 1 and user_id is not null and ts >= 1412 and ts <= 1413) and not (user_id = 1 and user_id is not null and ts >= 1414 and ts <= 1415) and not (user_id = 1 and user_id is not null and ts >= 1416 and ts <= 1417) and not (user_id = 1 and user_id is not null and ts >= 1418 and ts <= 1419) and not (user_id = 1 and user_id is not null and ts >= 1420 and ts <= 1421) and not (user_id = 1 and user_id is not null and ts >= 1422 and ts <= 1423) and not (user_id = 1 and user_id is not null and ts >= 1424 and ts <= 1425) and not (user_id = 1 and user_id is not null and ts >= 1426 and ts <= 1427) and not (user_id = 1 and user_id is not null and ts >= 1428 and ts <= 1429) and not (user_id = 1 and user_id is not null and ts >= 1430 and ts <= 1431) and not (user_id = 1 and user_id is not null and ts >= 1432 and ts <= 1433) and not (user_id = 1 and user_id is not null and ts >= 1434 and ts <= 1435) and not (user_id = 1 and user_id is not null and ts >= 1436 and ts <= 1437) and not (user_id = 1 and user_id is not null and ts >= 1438 and ts <= 1439) and not (user_id = 1 and user_id is not null and ts >= 1440 and ts <= 1441) and not (user_id = 1 and user_id is not null and ts >= 1442 and ts <= 1443) and not (user_id = 1 and user_id is not null and ts >= 1444 and ts <= 1445) and not (user_id = 1 and user_id is not null and ts >= 1446 and ts <= 1447) and not (user_id = 1 and user_id is not null and ts >= 1448 and ts <= 1449) and not (user_id = 1 and user_id is not null and ts >= 1450 and ts <= 1451) and not (user_id = 1 and user_id is not null and ts >= 1452 and ts <= 1453) and not (user_id = 1 and user_id is not null and ts >= 1454 and ts <= 1455) and not (user_id = 1 and user_id is not null and ts >= 1456 and ts <= 1457) and not (user_id = 1 and user_id is not null and ts >= 1458 and ts <= 1459) and not (user_id = 1 and user_id is not null and ts >= 1460 and ts <= 1461) and not (user_id = 1 and user_id is not null and ts >= 1462 and ts <= 1463) and not (user_id = 1 and user_id is not null and ts >= 1464 and ts <= 1465) and not (user_id = 1 and user_id is not null and ts >= 1466 and ts <= 1467) and not (user_id = 1 and user_id is not null and ts >= 1468 and ts <= 1469) and not (user_id = 1 and user_id is not null and ts >= 1470 and ts <= 1471) and not (user_id = 1 and user_id is not null and ts >= 1472 and ts <= 1473) and not (user_id = 1 and user_id is not null and ts >= 1474 and ts <= 1475) and not (user_id = 1 and user_id is not null and ts >= 1476 and ts <= 1477) and not (user_id = 1 and user_id is not null and ts >= 1478 and ts <= 1479) and not (user_id = 1 and user_id is not null and ts >= 1480 and ts <= 1481) and not (user_id = 1 and user_id is not null and ts >= 1482 and ts <= 1483) and not (user_id = 1 and user_id is not null and ts >= 1484 and ts <= 1485) and not (user_id = 1 and user_id is not null and ts >= 1486 and ts <= 1487) and not (user_id = 1 and user_id is not null and ts >= 1488 and ts <= 1489) and not (user_id = 1 and user_id is not null and ts >= 1490 and ts <= 1491) and not (user_id = 1 and user_id is not null and ts >= 1492 and ts <= 1493) and not (user_id = 1 and user_id is not null and ts >= 1494 and ts <= 1495) and not (user_id = 1 and user_id is not null and ts >= 1496 and ts <= 1497) and not (user_id = 1 and user_id is not null and ts >= 1498 and ts <= 1499) and not (user_id = 1 and user_id is not null and ts >= 1500 and ts <= 1501) and not (user_id = 1 and user_id is not null and ts >= 1502 and ts <= 1503) and not (user_id = 1 and user_id is not null and ts >= 1504 and ts <= 1505) and not (user_id = 1 and user_id is not null and ts >= 1506 and ts <= 1507) and not (user_id = 1 and user_id is not null and ts >= 1508 and ts <= 1509) and not (user_id = 1 and user_id is not null and ts >= 1510 and ts <= 1511) and not (user_id = 1 and user_id is not null and ts >= 1512 and ts <= 1513) and not (user_id = 1 and user_id is not null and ts >= 1514 and ts <= 1515) and not (user_id = 1 and user_id is not null and ts >= 1516 and ts <= 1517) and not (user_id = 1 and user_id is not null and ts >= 1518 and ts <= 1519) and not (user_id = 1 and user_id is not null and ts >= 1520 and ts <= 1521) and not (user_id = 1 and user_id is not null and ts >= 1522 and ts <= 1523) and not (user_id = 1 and user_id is not null and ts >= 1524 and ts <= 1525) and not (user_id = 1 and user_id is not null and ts >= 1526 and ts <= 1527) and not (user_id = 1 and user_id is not null and ts >= 1528 and ts <= 1529) and not (user_id = 1 and user_id is not null and ts >= 1530 and ts <= 1531) and not (user_id = 1 and user_id is not null and ts >= 1532 and ts <= 1533) and not (user_id = 1 and user_id is not null and ts >= 1534 and ts <= 1535) and not (user_id = 1 and user_id is not null and ts >= 1536 and ts <= 1537) and not (user_id = 1 and user_id is not null and ts >= 1538 and ts <= 1539) and not (user_id = 1 and user_id is not null and ts >= 1540 and ts <= 1541) and not (user_id = 1 and user_id is not null and ts >= 1542 and ts <= 1543) and not (user_id = 1 and user_id is not null and ts >= 1544 and ts <= 1545) and not (user_id = 1 and user_id is not null and ts >= 1546 and ts <= 1547) and not (user_id = 1 and user_id is not null and ts >= 1548 and ts <= 1549) and not (user_id = 1 and user_id is not null and ts >= 1550 and ts <= 1551) and not (user_id = 1 and user_id is not null and ts >= 1552 and ts <= 1553) and not (user_id = 1 and user_id is not null and ts >= 1554 and ts <= 1555) and not (user_id = 1 and user_id is not null and ts >= 1556 and ts <= 1557) and not (user_id = 1 and user_id is not null and ts >= 1558 and ts <= 1559) and not (user_id = 1 and user_id is not null and ts >= 1560 and ts <= 1561) and not (user_id = 1 and user_id is not null and ts >= 1562 and ts <= 1563) and not (user_id = 1 and user_id is not null and ts >= 1564 and ts <= 1565) and not (user_id = 1 and user_id is not null and ts >= 1566 and ts <= 1567) and not (user_id = 1 and user_id is not null and ts >= 1568 and ts <= 1569) and not (user_id = 1 and user_id is not null and ts >= 1570 and ts <= 1571) and not (user_id = 1 and user_id is not null and ts >= 1572 and ts <= 1573) and not (user_id = 1 and user_id is not null and ts >= 1574 and ts <= 1575) and not (user_id = 1 and user_id is not null and ts >= 1576 and ts <= 1577) and not (user_id = 1 and user_id is not null and ts >= 1578 and ts <= 1579) and not (user_id = 1 and user_id is not null and ts >= 1580 and ts <= 1581) and not (user_id = 1 and user_id is not null and ts >= 1582 and ts <= 1583) and not (user_id = 1 and user_id is not null and ts >= 1584 and ts <= 1585) and not (user_id = 1 and user_id is not null and ts >= 1586 and ts <= 1587) and not (user_id = 1 and user_id is not null and ts >= 1588 and ts <= 1589) and not (user_id = 1 and user_id is not null and ts >= 1590 and ts <= 1591) and not (user_id = 1 and user_id is not null and ts >= 1592 and ts <= 1593) and not (user_id = 1 and user_id is not null and ts >= 1594 and ts <= 1595) and not (user_id = 1 and user_id is not null and ts >= 1596 and ts <= 1597) and not (user_id = 1 and user_id is not null and ts >= 1598 and ts <= 1599) and not (user_id = 1 and user_id is not null and ts >= 1600 and ts <= 1601) and not (user_id = 1 and user_id is not null and ts >= 1602 and ts <= 1603) and not (user_id = 1 and user_id is not null and ts >= 1604 and ts <= 1605) and not (user_id = 1 and user_id is not null and ts >= 1606 and ts <= 1607) and not (user_id = 1 and user_id is not null and ts >= 1608 and ts <= 1609) and not (user_id = 1 and user_id is not null and ts >= 1610 and ts <= 1611) and not (user_id = 1 and user_id is not null and ts >= 1612 and ts <= 1613) and not (user_id = 1 and user_id is not null and ts >= 1614 and ts <= 1615) and not (user_id = 1 and user_id is not null and ts >= 1616 and ts <= 1617) and not (user_id = 1 and user_id is not null and ts >= 1618 and ts <= 1619) and not (user_id = 1 and user_id is not null and ts >= 1620 and ts <= 1621) and not (user_id = 1 and user_id is not null and ts >= 1622 and ts <= 1623) and not (user_id = 1 and user_id is not null and ts >= 1624 and ts <= 1625) and not (user_id = 1 and user_id is not null and ts >= 1626 and ts <= 1627) and not (user_id = 1 and user_id is not null and ts >= 1628 and ts <= 1629) and not (user_id = 1 and user_id is not null and ts >= 1630 and ts <= 1631) and not (user_id = 1 and user_id is not null and ts >= 1632 and ts <= 1633) and not (user_id = 1 and user_id is not null and ts >= 1634 and ts <= 1635) and not (user_id = 1 and user_id is not null and ts >= 1636 and ts <= 1637) and not (user_id = 1 and user_id is not null and ts >= 1638 and ts <= 1639) and not (user_id = 1 and user_id is not null and ts >= 1640 and ts <= 1641) and not (user_id = 1 and user_id is not null and ts >= 1642 and ts <= 1643) and not (user_id = 1 and user_id is not null and ts >= 1644 and ts <= 1645) and not (user_id = 1 and user_id is not null and ts >= 1646 and ts <= 1647) and not (user_id = 1 and user_id is not null and ts >= 1648 and ts <= 1649) and not (user_id = 1 and user_id is not null and ts >= 1650 and ts <= 1651) and not (user_id = 1 and user_id is not null and ts >= 1652 and ts <= 1653) and not (user_id = 1 and user_id is not null and ts >= 1654 and ts <= 1655) and not (user_id = 1 and user_id is not null and ts >= 1656 and ts <= 1657) and not (user_id = 1 and user_id is not null and ts >= 1658 and ts <= 1659) and not (user_id = 1 and user_id is not null and ts >= 1660 and ts <= 1661) and not (user_id = 1 and user_id is not null and ts >= 1662 and ts <= 1663) and not (user_id = 1 and user_id is not null and ts >= 1664 and ts <= 1665) and not (user_id = 1 and user_id is not null and ts >= 1666 and ts <= 1667) and not (user_id = 1 and user_id is not null and ts >= 1668 and ts <= 1669) and not (user_id = 1 and user_id is not null and ts >= 1670 and ts <= 1671) and not (user_id = 1 and user_id is not null and ts >= 1672 and ts <= 1673) and not (user_id = 1 and user_id is not null and ts >= 1674 and ts <= 1675) and not (user_id = 1 and user_id is not null and ts >= 1676 and ts <= 1677) and not (user_id = 1 and user_id is not null and ts >= 1678 and ts <= 1679) and not (user_id = 1 and user_id is not null and ts >= 1680 and ts <= 1681) and not (user_id = 1 and user_id is not null and ts >= 1682 and ts <= 1683) and not (user_id = 1 and user_id is not null and ts >= 1684 and ts <= 1685) and not (user_id = 1 and user_id is not null and ts >= 1686 and ts <= 1687) and not (user_id = 1 and user_id is not null and ts >= 1688 and ts <= 1689) and not (user_id = 1 and user_id is not null and ts >= 1690 and ts <= 1691) and not (user_id = 1 and user_id is not null and ts >= 1692 and ts <= 1693) and not (user_id = 1 and user_id is not null and ts >= 1694 and ts <= 1695) and not (user_id = 1 and user_id is not null and ts >= 1696 and ts <= 1697) and not (user_id = 1 and user_id is not null and ts >= 1698 and ts <= 1699) and not (user_id = 1 and user_id is not null and ts >= 1700 and ts <= 1701) and not (user_id = 1 and user_id is not null and ts >= 1702 and ts <= 1703) and not (user_id = 1 and user_id is not null and ts >= 1704 and ts <= 1705) and not (user_id = 1 and user_id is not null and ts >= 1706 and ts <= 1707) and not (user_id = 1 and user_id is not null and ts >= 1708 and ts <= 1709) and not (user_id = 1 and user_id is not null and ts >= 1710 and ts <= 1711) and not (user_id = 1 and user_id is not null and ts >= 1712 and ts <= 1713) and not (user_id = 1 and user_id is not null and ts >= 1714 and ts <= 1715) and not (user_id = 1 and user_id is not null and ts >= 1716 and ts <= 1717) and not (user_id = 1 and user_id is not null and ts >= 1718 and ts <= 1719) and not (user_id = 1 and user_id is not null and ts >= 1720 and ts <= 1721) and not (user_id = 1 and user_id is not null and ts >= 1722 and ts <= 1723) and not (user_id = 1 and user_id is not null and ts >= 1724 and ts <= 1725) and not (user_id = 1 and user_id is not null and ts >= 1726 and ts <= 1727) and not (user_id = 1 and user_id is not null and ts >= 1728 and ts <= 1729) and not (user_id = 1 and user_id is not null and ts >= 1730 and ts <= 1731) and not (user_id = 1 and user_id is not null and ts >= 1732 and ts <= 1733) and not (user_id = 1 and user_id is not null and ts >= 1734 and ts <= 1735) and not (user_id = 1 and user_id is not null and ts >= 1736 and ts <= 1737) and not (user_id = 1 and user_id is not null and ts >= 1738 and ts <= 1739) and not (user_id = 1 and user_id is not null and ts >= 1740 and ts <= 1741) and not (user_id = 1 and user_id is not null and ts >= 1742 and ts <= 1743) and not (user_id = 1 and user_id is not null and ts >= 1744 and ts <= 1745) and not (user_id = 1 and user_id is not null and ts >= 1746 and ts <= 1747) and not (user_id = 1 and user_id is not null and ts >= 1748 and ts <= 1749) and not (user_id = 1 and user_id is not null and ts >= 1750 and ts <= 1751) and not (user_id = 1 and user_id is not null and ts >= 1752 and ts <= 1753) and not (user_id = 1 and user_id is not null and ts >= 1754 and ts <= 1755) and not (user_id = 1 and user_id is not null and ts >= 1756 and ts <= 1757) and not (user_id = 1 and user_id is not null and ts >= 1758 and ts <= 1759) and not (user_id = 1 and user_id is not null and ts >= 1760 and ts <= 1761) and not (user_id = 1 and user_id is not null and ts >= 1762 and ts <= 1763) and not (user_id = 1 and user_id is not null and ts >= 1764 and ts <= 1765) and not (user_id = 1 and user_id is not null and ts >= 1766 and ts <= 1767) and not (user_id = 1 and user_id is not null and ts >= 1768 and ts <= 1769) and not (user_id = 1 and user_id is not null and ts >= 1770 and ts <= 1771) and not (user_id = 1 and user_id is not null and ts >= 1772 and ts <= 1773) and not (user_id = 1 and user_id is not null and ts >= 1774 and ts <= 1775) and not (user_id = 1 and user_id is not null and ts >= 1776 and ts <= 1777) and not (user_id = 1 and user_id is not null and ts >= 1778 and ts <= 1779) and not (user_id = 1 and user_id is not null and ts >= 1780 and ts <= 1781) and not (user_id = 1 and user_id is not null and ts >= 1782 and ts <= 1783) and not (user_id = 1 and user_id is not null and ts >= 1784 and ts <= 1785) and not (user_id = 1 and user_id is not null and ts >= 1786 and ts <= 1787) and not (user_id = 1 and user_id is not null and ts >= 1788 and ts <= 1789) and not (user_id = 1 and user_id is not null and ts >= 1790 and ts <= 1791) and not (user_id = 1 and user_id is not null and ts >= 1792 and ts <= 1793) and not (user_id = 1 and user_id is not null and ts >= 1794 and ts <= 1795) and not (user_id = 1 and user_id is not null and ts >= 1796 and ts <= 1797) and not (user_id = 1 and user_id is not null and ts >= 1798 and ts <= 1799) and not (user_id = 1 and user_id is not null and ts >= 1800 and ts <= 1801) and not (user_id = 1 and user_id is not null and ts >= 1802 and ts <= 1803) and not (user_id = 1 and user_id is not null and ts >= 1804 and ts <= 1805) and not (user_id = 1 and user_id is not null and ts >= 1806 and ts <= 1807) and not (user_id = 1 and user_id is not null and ts >= 1808 and ts <= 1809) and not (user_id = 1 and user_id is not null and ts >= 1810 and ts <= 1811) and not (user_id = 1 and user_id is not null and ts >= 1812 and ts <= 1813) and not (user_id = 1 and user_id is not null and ts >= 1814 and ts <= 1815) and not (user_id = 1 and user_id is not null and ts >= 1816 and ts <= 1817) and not (user_id = 1 and user_id is not null and ts >= 1818 and ts <= 1819) and not (user_id = 1 and user_id is not null and ts >= 1820 and ts <= 1821) and not (user_id = 1 and user_id is not null and ts >= 1822 and ts <= 1823) and not (user_id = 1 and user_id is not null and ts >= 1824 and ts <= 1825) and not (user_id = 1 and user_id is not null and ts >= 1826 and ts <= 1827) and not (user_id = 1 and user_id is not null and ts >= 1828 and ts <= 1829) and not (user_id = 1 and user_id is not null and ts >= 1830 and ts <= 1831) and not (user_id = 1 and user_id is not null and ts >= 1832 and ts <= 1833) and not (user_id = 1 and user_id is not null and ts >= 1834 and ts <= 1835) and not (user_id = 1 and user_id is not null and ts >= 1836 and ts <= 1837) and not (user_id = 1 and user_id is not null and ts >= 1838 and ts <= 1839) and not (user_id = 1 and user_id is not null and ts >= 1840 and ts <= 1841) and not (user_id = 1 and user_id is not null and ts >= 1842 and ts <= 1843) and not (user_id = 1 and user_id is not null and ts >= 1844 and ts <= 1845) and not (user_id = 1 and user_id is not null and ts >= 1846 and ts <= 1847) and not (user_id = 1 and user_id is not null and ts >= 1848 and ts <= 1849) and not (user_id = 1 and user_id is not null and ts >= 1850 and ts <= 1851) and not (user_id = 1 and user_id is not null and ts >= 1852 and ts <= 1853) and not (user_id = 1 and user_id is not null and ts >= 1854 and ts <= 1855) and not (user_id = 1 and user_id is not null and ts >= 1856 and ts <= 1857) and not (user_id = 1 and user_id is not null and ts >= 1858 and ts <= 1859) and not (user_id = 1 and user_id is not null and ts >= 1860 and ts <= 1861) and not (user_id = 1 and user_id is not null and ts >= 1862 and ts <= 1863) and not (user_id = 1 and user_id is not null and ts >= 1864 and ts <= 1865) and not (user_id = 1 and user_id is not null and ts >= 1866 and ts <= 1867) and not (user_id = 1 and user_id is not null and ts >= 1868 and ts <= 1869) and not (user_id = 1 and user_id is not null and ts >= 1870 and ts <= 1871) and not (user_id = 1 and user_id is not null and ts >= 1872 and ts <= 1873) and not (user_id = 1 and user_id is not null and ts >= 1874 and ts <= 1875) and not (user_id = 1 and user_id is not null and ts >= 1876 and ts <= 1877) and not (user_id = 1 and user_id is not null and ts >= 1878 and ts <= 1879) and not (user_id = 1 and user_id is not null and ts >= 1880 and ts <= 1881) and not (user_id = 1 and user_id is not null and ts >= 1882 and ts <= 1883) and not (user_id = 1 and user_id is not null and ts >= 1884 and ts <= 1885) and not (user_id = 1 and user_id is not null and ts >= 1886 and ts <= 1887) and not (user_id = 1 and user_id is not null and ts >= 1888 and ts <= 1889) and not (user_id = 1 and user_id is not null and ts >= 1890 and ts <= 1891) and not (user_id = 1 and user_id is not null and ts >= 1892 and ts <= 1893) and not (user_id = 1 and user_id is not null and ts >= 1894 and ts <= 1895) and not (user_id = 1 and user_id is not null and ts >= 1896 and ts <= 1897) and not (user_id = 1 and user_id is not null and ts >= 1898 and ts <= 1899) and not (user_id = 1 and user_id is not null and ts >= 1900 and ts <= 1901) and not (user_id = 1 and user_id is not null and ts >= 1902 and ts <= 1903) and not (user_id = 1 and user_id is not null and ts >= 1904 and ts <= 1905) and not (user_id = 1 and user_id is not null and ts >= 1906 and ts <= 1907) and not (user_id = 1 and user_id is not null and ts >= 1908 and ts <= 1909) and not (user_id = 1 and user_id is not null and ts >= 1910 and ts <= 1911) and not (user_id = 1 and user_id is not null and ts >= 1912 and ts <= 1913) and not (user_id = 1 and user_id is not null and ts >= 1914 and ts <= 1915) and not (user_id = 1 and user_id is not null and ts >= 1916 and ts <= 1917) and not (user_id = 1 and user_id is not null and ts >= 1918 and ts <= 1919) and not (user_id = 1 and user_id is not null and ts >= 1920 and ts <= 1921) and not (user_id = 1 and user_id is not null and ts >= 1922 and ts <= 1923) and not (user_id = 1 and user_id is not null and ts >= 1924 and ts <= 1925) and not (user_id = 1 and user_id is not null and ts >= 1926 and ts <= 1927) and not (user_id = 1 and user_id is not null and ts >= 1928 and ts <= 1929) and not (user_id = 1 and user_id is not null and ts >= 1930 and ts <= 1931) and not (user_id = 1 and user_id is not null and ts >= 1932 and ts <= 1933) and not (user_id = 1 and user_id is not null and ts >= 1934 and ts <= 1935) and not (user_id = 1 and user_id is not null and ts >= 1936 and ts <= 1937) and not (user_id = 1 and user_id is not null and ts >= 1938 and ts <= 1939) and not (user_id = 1 and user_id is not null and ts >= 1940 and ts <= 1941) and not (user_id = 1 and user_id is not null and ts >= 1942 and ts <= 1943) and not (user_id = 1 and user_id is not null and ts >= 1944 and ts <= 1945) and not (user_id = 1 and user_id is not null and ts >= 1946 and ts <= 1947) and not (user_id = 1 and user_id is not null and ts >= 1948 and ts <= 1949) and not (user_id = 1 and user_id is not null and ts >= 1950 and ts <= 1951) and not (user_id = 1 and user_id is not null and ts >= 1952 and ts <= 1953) and not (user_id = 1 and user_id is not null and ts >= 1954 and ts <= 1955) and not (user_id = 1 and user_id is not null and ts >= 1956 and ts <= 1957) and not (user_id = 1 and user_id is not null and ts >= 1958 and ts <= 1959) and not (user_id = 1 and user_id is not null and ts >= 1960 and ts <= 1961) and not (user_id = 1 and user_id is not null and ts >= 1962 and ts <= 1963) and not (user_id = 1 and user_id is not null and ts >= 1964 and ts <= 1965) and not (user_id = 1 and user_id is not null and ts >= 1966 and ts <= 1967) and not (user_id = 1 and user_id is not null and ts >= 1968 and ts <= 1969) and not (user_id = 1 and user_id is not null and ts >= 1970 and ts <= 1971) and not (user_id = 1 and user_id is not null and ts >= 1972 and ts <= 1973) and not (user_id = 1 and user_id is not null and ts >= 1974 and ts <= 1975) and not (user_id = 1 and user_id is not null and ts >= 1976 and ts <= 1977) and not (user_id = 1 and user_id is not null and ts >= 1978 and ts <= 1979) and not (user_id = 1 and user_id is not null and ts >= 1980 and ts <= 1981) and not (user_id = 1 and user_id is not null and ts >= 1982 and ts <= 1983) and not (user_id = 1 and user_id is not null and ts >= 1984 and ts <= 1985) and not (user_id = 1 and user_id is not null and ts >= 1986 and ts <= 1987) and not (user_id = 1 and user_id is not null and ts >= 1988 and ts <= 1989) and not (user_id = 1 and user_id is not null and ts >= 1990 and ts <= 1991) and not (user_id = 1 and user_id is not null and ts >= 1992 and ts <= 1993) and not (user_id = 1 and user_id is not null and ts >= 1994 and ts <= 1995) and not (user_id = 1 and user_id is not null and ts >= 1996 and ts <= 1997) and not (user_id = 1 and user_id is not null and ts >= 1998 and ts <= 1999) and not (user_id = 1 and user_id is not null and ts >= 11000 and ts <= 11001) and not (user_id = 1 and user_id is not null and ts >= 11002 and ts <= 11003) and not (user_id = 1 and user_id is not null and ts >= 11004 and ts <= 11005) and not (user_id = 1 and user_id is not null and ts >= 11006 and ts <= 11007) and not (user_id = 1 and user_id is not null and ts >= 11008 and ts <= 11009) and not (user_id = 1 and user_id is not null and ts >= 11010 and ts <= 11011) and not (user_id = 1 and user_id is not null and ts >= 11012 and ts <= 11013) and not (user_id = 1 and user_id is not null and ts >= 11014 and ts <= 11015) and not (user_id = 1 and user_id is not null and ts >= 11016 and ts <= 11017) and not (user_id = 1 and user_id is not null and ts >= 11018 and ts <= 11019) and not (user_id = 1 and user_id is not null and ts >= 11020 and ts <= 11021) and not (user_id = 1 and user_id is not null and ts >= 11022 and ts <= 11023) and not (user_id = 1 and user_id is not null and ts >= 11024 and ts <= 11025) and not (user_id = 1 and user_id is not null and ts >= 11026 and ts <= 11027) and not (user_id = 1 and user_id is not null and ts >= 11028 and ts <= 11029) and not (user_id = 1 and user_id is not null and ts >= 11030 and ts <= 11031) and not (user_id = 1 and user_id is not null and ts >= 11032 and ts <= 11033) and not (user_id = 1 and user_id is not null and ts >= 11034 and ts <= 11035) and not (user_id = 1 and user_id is not null and ts >= 11036 and ts <= 11037) and not (user_id = 1 and user_id is not null and ts >= 11038 and ts <= 11039) and not (user_id = 1 and user_id is not null and ts >= 11040 and ts <= 11041) and not (user_id = 1 and user_id is not null and ts >= 11042 and ts <= 11043) and not (user_id = 1 and user_id is not null and ts >= 11044 and ts <= 11045) and not (user_id = 1 and user_id is not null and ts >= 11046 and ts <= 11047) and not (user_id = 1 and user_id is not null and ts >= 11048 and ts <= 11049) and not (user_id = 1 and user_id is not null and ts >= 11050 and ts <= 11051) and not (user_id = 1 and user_id is not null and ts >= 11052 and ts <= 11053) and not (user_id = 1 and user_id is not null and ts >= 11054 and ts <= 11055) and not (user_id = 1 and user_id is not null and ts >= 11056 and ts <= 11057) and not (user_id = 1 and user_id is not null and ts >= 11058 and ts <= 11059) and not (user_id = 1 and user_id is not null and ts >= 11060 and ts <= 11061) and not (user_id = 1 and user_id is not null and ts >= 11062 and ts <= 11063) and not (user_id = 1 and user_id is not null and ts >= 11064 and ts <= 11065) and not (user_id = 1 and user_id is not null and ts >= 11066 and ts <= 11067) and not (user_id = 1 and user_id is not null and ts >= 11068 and ts <= 11069) and not (user_id = 1 and user_id is not null and ts >= 11070 and ts <= 11071) and not (user_id = 1 and user_id is not null and ts >= 11072 and ts <= 11073) and not (user_id = 1 and user_id is not null and ts >= 11074 and ts <= 11075) and not (user_id = 1 and user_id is not null and ts >= 11076 and ts <= 11077) and not (user_id = 1 and user_id is not null and ts >= 11078 and ts <= 11079) and not (user_id = 1 and user_id is not null and ts >= 11080 and ts <= 11081) and not (user_id = 1 and user_id is not null and ts >= 11082 and ts <= 11083) and not (user_id = 1 and user_id is not null and ts >= 11084 and ts <= 11085) and not (user_id = 1 and user_id is not null and ts >= 11086 and ts <= 11087) and not (user_id = 1 and user_id is not null and ts >= 11088 and ts <= 11089) and not (user_id = 1 and user_id is not null and ts >= 11090 and ts <= 11091) and not (user_id = 1 and user_id is not null and ts >= 11092 and ts <= 11093) and not (user_id = 1 and user_id is not null and ts >= 11094 and ts <= 11095) and not (user_id = 1 and user_id is not null and ts >= 11096 and ts <= 11097) and not (user_id = 1 and user_id is not null and ts >= 11098 and ts <= 11099) and not (user_id = 1 and user_id is not null and ts >= 11100 and ts <= 11101) and not (user_id = 1 and user_id is not null and ts >= 11102 and ts <= 11103) and not (user_id = 1 and user_id is not null and ts >= 11104 and ts <= 11105) and not (user_id = 1 and user_id is not null and ts >= 11106 and ts <= 11107) and not (user_id = 1 and user_id is not null and ts >= 11108 and ts <= 11109) and not (user_id = 1 and user_id is not null and ts >= 11110 and ts <= 11111) and not (user_id = 1 and user_id is not null and ts >= 11112 and ts <= 11113) and not (user_id = 1 and user_id is not null and ts >= 11114 and ts <= 11115) and not (user_id = 1 and user_id is not null and ts >= 11116 and ts <= 11117) and not (user_id = 1 and user_id is not null and ts >= 11118 and ts <= 11119) and not (user_id = 1 and user_id is not null and ts >= 11120 and ts <= 11121) and not (user_id = 1 and user_id is not null and ts >= 11122 and ts <= 11123) and not (user_id = 1 and user_id is not null and ts >= 11124 and ts <= 11125) and not (user_id = 1 and user_id is not null and ts >= 11126 and ts <= 11127) and not (user_id = 1 and user_id is not null and ts >= 11128 and ts <= 11129) and not (user_id = 1 and user_id is not null and ts >= 11130 and ts <= 11131) and not (user_id = 1 and user_id is not null and ts >= 11132 and ts <= 11133) and not (user_id = 1 and user_id is not null and ts >= 11134 and ts <= 11135) and not (user_id = 1 and user_id is not null and ts >= 11136 and ts <= 11137) and not (user_id = 1 and user_id is not null and ts >= 11138 and ts <= 11139) and not (user_id = 1 and user_id is not null and ts >= 11140 and ts <= 11141) and not (user_id = 1 and user_id is not null and ts >= 11142 and ts <= 11143) and not (user_id = 1 and user_id is not null and ts >= 11144 and ts <= 11145) and not (user_id = 1 and user_id is not null and ts >= 11146 and ts <= 11147) and not (user_id = 1 and user_id is not null and ts >= 11148 and ts <= 11149) and not (user_id = 1 and user_id is not null and ts >= 11150 and ts <= 11151) and not (user_id = 1 and user_id is not null and ts >= 11152 and ts <= 11153) and not (user_id = 1 and user_id is not null and ts >= 11154 and ts <= 11155) and not (user_id = 1 and user_id is not null and ts >= 11156 and ts <= 11157) and not (user_id = 1 and user_id is not null and ts >= 11158 and ts <= 11159) and not (user_id = 1 and user_id is not null and ts >= 11160 and ts <= 11161) and not (user_id = 1 and user_id is not null and ts >= 11162 and ts <= 11163) and not (user_id = 1 and user_id is not null and ts >= 11164 and ts <= 11165) and not (user_id = 1 and user_id is not null and ts >= 11166 and ts <= 11167) and not (user_id = 1 and user_id is not null and ts >= 11168 and ts <= 11169) and not (user_id = 1 and user_id is not null and ts >= 11170 and ts <= 11171) and not (user_id = 1 and user_id is not null and ts >= 11172 and ts <= 11173) and not (user_id = 1 and user_id is not null and ts >= 11174 and ts <= 11175) and not (user_id = 1 and user_id is not null and ts >= 11176 and ts <= 11177) and not (user_id = 1 and user_id is not null and ts >= 11178 and ts <= 11179) and not (user_id = 1 and user_id is not null and ts >= 11180 and ts <= 11181) and not (user_id = 1 and user_id is not null and ts >= 11182 and ts <= 11183) and not (user_id = 1 and user_id is not null and ts >= 11184 and ts <= 11185) and not (user_id = 1 and user_id is not null and ts >= 11186 and ts <= 11187) and not (user_id = 1 and user_id is not null and ts >= 11188 and ts <= 11189) and not (user_id = 1 and user_id is not null and ts >= 11190 and ts <= 11191) and not (user_id = 1 and user_id is not null and ts >= 11192 and ts <= 11193) and not (user_id = 1 and user_id is not null and ts >= 11194 and ts <= 11195) and not (user_id = 1 and user_id is not null and ts >= 11196 and ts <= 11197) and not (user_id = 1 and user_id is not null and ts >= 11198 and ts <= 11199) and not (user_id = 1 and user_id is not null and ts >= 11200 and ts <= 11201) and not (user_id = 1 and user_id is not null and ts >= 11202 and ts <= 11203) and not (user_id = 1 and user_id is not null and ts >= 11204 and ts <= 11205) and not (user_id = 1 and user_id is not null and ts >= 11206 and ts <= 11207) and not (user_id = 1 and user_id is not null and ts >= 11208 and ts <= 11209) and not (user_id = 1 and user_id is not null and ts >= 11210 and ts <= 11211) and not (user_id = 1 and user_id is not null and ts >= 11212 and ts <= 11213) and not (user_id = 1 and user_id is not null and ts >= 11214 and ts <= 11215) and not (user_id = 1 and user_id is not null and ts >= 11216 and ts <= 11217) and not (user_id = 1 and user_id is not null and ts >= 11218 and ts <= 11219) and not (user_id = 1 and user_id is not null and ts >= 11220 and ts <= 11221) and not (user_id = 1 and user_id is not null and ts >= 11222 and ts <= 11223) and not (user_id = 1 and user_id is not null and ts >= 11224 and ts <= 11225) and not (user_id = 1 and user_id is not null and ts >= 11226 and ts <= 11227) and not (user_id = 1 and user_id is not null and ts >= 11228 and ts <= 11229) and not (user_id = 1 and user_id is not null and ts >= 11230 and ts <= 11231) and not (user_id = 1 and user_id is not null and ts >= 11232 and ts <= 11233) and not (user_id = 1 and user_id is not null and ts >= 11234 and ts <= 11235) and not (user_id = 1 and user_id is not null and ts >= 11236 and ts <= 11237) and not (user_id = 1 and user_id is not null and ts >= 11238 and ts <= 11239) and not (user_id = 1 and user_id is not null and ts >= 11240 and ts <= 11241) and not (user_id = 1 and user_id is not null and ts >= 11242 and ts <= 11243) and not (user_id = 1 and user_id is not null and ts >= 11244 and ts <= 11245) and not (user_id = 1 and user_id is not null and ts >= 11246 and ts <= 11247) and not (user_id = 1 and user_id is not null and ts >= 11248 and ts <= 11249) and not (user_id = 1 and user_id is not null and ts >= 11250 and ts <= 11251) and not (user_id = 1 and user_id is not null and ts >= 11252 and ts <= 11253) and not (user_id = 1 and user_id is not null and ts >= 11254 and ts <= 11255) and not (user_id = 1 and user_id is not null and ts >= 11256 and ts <= 11257) and not (user_id = 1 and user_id is not null and ts >= 11258 and ts <= 11259) and not (user_id = 1 and user_id is not null and ts >= 11260 and ts <= 11261) and not (user_id = 1 and user_id is not null and ts >= 11262 and ts <= 11263) and not (user_id = 1 and user_id is not null and ts >= 11264 and ts <= 11265) and not (user_id = 1 and user_id is not null and ts >= 11266 and ts <= 11267) and not (user_id = 1 and user_id is not null and ts >= 11268 and ts <= 11269) and not (user_id = 1 and user_id is not null and ts >= 11270 and ts <= 11271) and not (user_id = 1 and user_id is not null and ts >= 11272 and ts <= 11273) and not (user_id = 1 and user_id is not null and ts >= 11274 and ts <= 11275) and not (user_id = 1 and user_id is not null and ts >= 11276 and ts <= 11277) and not (user_id = 1 and user_id is not null and ts >= 11278 and ts <= 11279) and not (user_id = 1 and user_id is not null and ts >= 11280 and ts <= 11281) and not (user_id = 1 and user_id is not null and ts >= 11282 and ts <= 11283) and not (user_id = 1 and user_id is not null and ts >= 11284 and ts <= 11285) and not (user_id = 1 and user_id is not null and ts >= 11286 and ts <= 11287) and not (user_id = 1 and user_id is not null and ts >= 11288 and ts <= 11289) and not (user_id = 1 and user_id is not null and ts >= 11290 and ts <= 11291) and not (user_id = 1 and user_id is not null and ts >= 11292 and ts <= 11293) and not (user_id = 1 and user_id is not null and ts >= 11294 and ts <= 11295) and not (user_id = 1 and user_id is not null and ts >= 11296 and ts <= 11297) and not (user_id = 1 and user_id is not null and ts >= 11298 and ts <= 11299) and not (user_id = 1 and user_id is not null and ts >= 11300 and ts <= 11301) and not (user_id = 1 and user_id is not null and ts >= 11302 and ts <= 11303) and not (user_id = 1 and user_id is not null and ts >= 11304 and ts <= 11305) and not (user_id = 1 and user_id is not null and ts >= 11306 and ts <= 11307) and not (user_id = 1 and user_id is not null and ts >= 11308 and ts <= 11309) and not (user_id = 1 and user_id is not null and ts >= 11310 and ts <= 11311) and not (user_id = 1 and user_id is not null and ts >= 11312 and ts <= 11313) and not (user_id = 1 and user_id is not null and ts >= 11314 and ts <= 11315) and not (user_id = 1 and user_id is not null and ts >= 11316 and ts <= 11317) and not (user_id = 1 and user_id is not null and ts >= 11318 and ts <= 11319) and not (user_id = 1 and user_id is not null and ts >= 11320 and ts <= 11321) and not (user_id = 1 and user_id is not null and ts >= 11322 and ts <= 11323) and not (user_id = 1 and user_id is not null and ts >= 11324 and ts <= 11325) and not (user_id = 1 and user_id is not null and ts >= 11326 and ts <= 11327) and not (user_id = 1 and user_id is not null and ts >= 11328 and ts <= 11329) and not (user_id = 1 and user_id is not null and ts >= 11330 and ts <= 11331) and not (user_id = 1 and user_id is not null and ts >= 11332 and ts <= 11333) and not (user_id = 1 and user_id is not null and ts >= 11334 and ts <= 11335) and not (user_id = 1 and user_id is not null and ts >= 11336 and ts <= 11337) and not (user_id = 1 and user_id is not null and ts >= 11338 and ts <= 11339) and not (user_id = 1 and user_id is not null and ts >= 11340 and ts <= 11341) and not (user_id = 1 and user_id is not null and ts >= 11342 and ts <= 11343) and not (user_id = 1 and user_id is not null and ts >= 11344 and ts <= 11345) and not (user_id = 1 and user_id is not null and ts >= 11346 and ts <= 11347) and not (user_id = 1 and user_id is not null and ts >= 11348 and ts <= 11349) and not (user_id = 1 and user_id is not null and ts >= 11350 and ts <= 11351) and not (user_id = 1 and user_id is not null and ts >= 11352 and ts <= 11353) and not (user_id = 1 and user_id is not null and ts >= 11354 and ts <= 11355) and not (user_id = 1 and user_id is not null and ts >= 11356 and ts <= 11357) and not (user_id = 1 and user_id is not null and ts >= 11358 and ts <= 11359) and not (user_id = 1 and user_id is not null and ts >= 11360 and ts <= 11361) and not (user_id = 1 and user_id is not null and ts >= 11362 and ts <= 11363) and not (user_id = 1 and user_id is not null and ts >= 11364 and ts <= 11365) and not (user_id = 1 and user_id is not null and ts >= 11366 and ts <= 11367) and not (user_id = 1 and user_id is not null and ts >= 11368 and ts <= 11369) and not (user_id = 1 and user_id is not null and ts >= 11370 and ts <= 11371) and not (user_id = 1 and user_id is not null and ts >= 11372 and ts <= 11373) and not (user_id = 1 and user_id is not null and ts >= 11374 and ts <= 11375) and not (user_id = 1 and user_id is not null and ts >= 11376 and ts <= 11377) and not (user_id = 1 and user_id is not null and ts >= 11378 and ts <= 11379) and not (user_id = 1 and user_id is not null and ts >= 11380 and ts <= 11381) and not (user_id = 1 and user_id is not null and ts >= 11382 and ts <= 11383) and not (user_id = 1 and user_id is not null and ts >= 11384 and ts <= 11385) and not (user_id = 1 and user_id is not null and ts >= 11386 and ts <= 11387) and not (user_id = 1 and user_id is not null and ts >= 11388 and ts <= 11389) and not (user_id = 1 and user_id is not null and ts >= 11390 and ts <= 11391) and not (user_id = 1 and user_id is not null and ts >= 11392 and ts <= 11393) and not (user_id = 1 and user_id is not null and ts >= 11394 and ts <= 11395) and not (user_id = 1 and user_id is not null and ts >= 11396 and ts <= 11397) and not (user_id = 1 and user_id is not null and ts >= 11398 and ts <= 11399) and not (user_id = 1 and user_id is not null and ts >= 11400 and ts <= 11401) and not (user_id = 1 and user_id is not null and ts >= 11402 and ts <= 11403) and not (user_id = 1 and user_id is not null and ts >= 11404 and ts <= 11405) and not (user_id = 1 and user_id is not null and ts >= 11406 and ts <= 11407) and not (user_id = 1 and user_id is not null and ts >= 11408 and ts <= 11409) and not (user_id = 1 and user_id is not null and ts >= 11410 and ts <= 11411) and not (user_id = 1 and user_id is not null and ts >= 11412 and ts <= 11413) and not (user_id = 1 and user_id is not null and ts >= 11414 and ts <= 11415) and not (user_id = 1 and user_id is not null and ts >= 11416 and ts <= 11417) and not (user_id = 1 and user_id is not null and ts >= 11418 and ts <= 11419) and not (user_id = 1 and user_id is not null and ts >= 11420 and ts <= 11421) and not (user_id = 1 and user_id is not null and ts >= 11422 and ts <= 11423) and not (user_id = 1 and user_id is not null and ts >= 11424 and ts <= 11425) and not (user_id = 1 and user_id is not null and ts >= 11426 and ts <= 11427) and not (user_id = 1 and user_id is not null and ts >= 11428 and ts <= 11429) and not (user_id = 1 and user_id is not null and ts >= 11430 and ts <= 11431) and not (user_id = 1 and user_id is not null and ts >= 11432 and ts <= 11433) and not (user_id = 1 and user_id is not null and ts >= 11434 and ts <= 11435) and not (user_id = 1 and user_id is not null and ts >= 11436 and ts <= 11437) and not (user_id = 1 and user_id is not null and ts >= 11438 and ts <= 11439) and not (user_id = 1 and user_id is not null and ts >= 11440 and ts <= 11441) and not (user_id = 1 and user_id is not null and ts >= 11442 and ts <= 11443) and not (user_id = 1 and user_id is not null and ts >= 11444 and ts <= 11445) and not (user_id = 1 and user_id is not null and ts >= 11446 and ts <= 11447) and not (user_id = 1 and user_id is not null and ts >= 11448 and ts <= 11449) and not (user_id = 1 and user_id is not null and ts >= 11450 and ts <= 11451) and not (user_id = 1 and user_id is not null and ts >= 11452 and ts <= 11453) and not (user_id = 1 and user_id is not null and ts >= 11454 and ts <= 11455) and not (user_id = 1 and user_id is not null and ts >= 11456 and ts <= 11457) and not (user_id = 1 and user_id is not null and ts >= 11458 and ts <= 11459) and not (user_id = 1 and user_id is not null and ts >= 11460 and ts <= 11461) and not (user_id = 1 and user_id is not null and ts >= 11462 and ts <= 11463) and not (user_id = 1 and user_id is not null and ts >= 11464 and ts <= 11465) and not (user_id = 1 and user_id is not null and ts >= 11466 and ts <= 11467) and not (user_id = 1 and user_id is not null and ts >= 11468 and ts <= 11469) and not (user_id = 1 and user_id is not null and ts >= 11470 and ts <= 11471) and not (user_id = 1 and user_id is not null and ts >= 11472 and ts <= 11473) and not (user_id = 1 and user_id is not null and ts >= 11474 and ts <= 11475) and not (user_id = 1 and user_id is not null and ts >= 11476 and ts <= 11477) and not (user_id = 1 and user_id is not null and ts >= 11478 and ts <= 11479) and not (user_id = 1 and user_id is not null and ts >= 11480 and ts <= 11481) and not (user_id = 1 and user_id is not null and ts >= 11482 and ts <= 11483) and not (user_id = 1 and user_id is not null and ts >= 11484 and ts <= 11485) and not (user_id = 1 and user_id is not null and ts >= 11486 and ts <= 11487) and not (user_id = 1 and user_id is not null and ts >= 11488 and ts <= 11489) and not (user_id = 1 and user_id is not null and ts >= 11490 and ts <= 11491) and not (user_id = 1 and user_id is not null and ts >= 11492 and ts <= 11493) and not (user_id = 1 and user_id is not null and ts >= 11494 and ts <= 11495) and not (user_id = 1 and user_id is not null and ts >= 11496 and ts <= 11497) and not (user_id = 1 and user_id is not null and ts >= 11498 and ts <= 11499) and not (user_id = 1 and user_id is not null and ts >= 11500 and ts <= 11501) and not (user_id = 1 and user_id is not null and ts >= 11502 and ts <= 11503) and not (user_id = 1 and user_id is not null and ts >= 11504 and ts <= 11505) and not (user_id = 1 and user_id is not null and ts >= 11506 and ts <= 11507) and not (user_id = 1 and user_id is not null and ts >= 11508 and ts <= 11509) and not (user_id = 1 and user_id is not null and ts >= 11510 and ts <= 11511) and not (user_id = 1 and user_id is not null and ts >= 11512 and ts <= 11513) and not (user_id = 1 and user_id is not null and ts >= 11514 and ts <= 11515) and not (user_id = 1 and user_id is not null and ts >= 11516 and ts <= 11517) and not (user_id = 1 and user_id is not null and ts >= 11518 and ts <= 11519) and not (user_id = 1 and user_id is not null and ts >= 11520 and ts <= 11521) and not (user_id = 1 and user_id is not null and ts >= 11522 and ts <= 11523) and not (user_id = 1 and user_id is not null and ts >= 11524 and ts <= 11525) and not (user_id = 1 and user_id is not null and ts >= 11526 and ts <= 11527) and not (user_id = 1 and user_id is not null and ts >= 11528 and ts <= 11529) and not (user_id = 1 and user_id is not null and ts >= 11530 and ts <= 11531) and not (user_id = 1 and user_id is not null and ts >= 11532 and ts <= 11533) and not (user_id = 1 and user_id is not null and ts >= 11534 and ts <= 11535) and not (user_id = 1 and user_id is not null and ts >= 11536 and ts <= 11537) and not (user_id = 1 and user_id is not null and ts >= 11538 and ts <= 11539) and not (user_id = 1 and user_id is not null and ts >= 11540 and ts <= 11541) and not (user_id = 1 and user_id is not null and ts >= 11542 and ts <= 11543) and not (user_id = 1 and user_id is not null and ts >= 11544 and ts <= 11545) and not (user_id = 1 and user_id is not null and ts >= 11546 and ts <= 11547) and not (user_id = 1 and user_id is not null and ts >= 11548 and ts <= 11549) and not (user_id = 1 and user_id is not null and ts >= 11550 and ts <= 11551) and not (user_id = 1 and user_id is not null and ts >= 11552 and ts <= 11553) and not (user_id = 1 and user_id is not null and ts >= 11554 and ts <= 11555) and not (user_id = 1 and user_id is not null and ts >= 11556 and ts <= 11557) and not (user_id = 1 and user_id is not null and ts >= 11558 and ts <= 11559) and not (user_id = 1 and user_id is not null and ts >= 11560 and ts <= 11561) and not (user_id = 1 and user_id is not null and ts >= 11562 and ts <= 11563) and not (user_id = 1 and user_id is not null and ts >= 11564 and ts <= 11565) and not (user_id = 1 and user_id is not null and ts >= 11566 and ts <= 11567) and not (user_id = 1 and user_id is not null and ts >= 11568 and ts <= 11569) and not (user_id = 1 and user_id is not null and ts >= 11570 and ts <= 11571) and not (user_id = 1 and user_id is not null and ts >= 11572 and ts <= 11573) and not (user_id = 1 and user_id is not null and ts >= 11574 and ts <= 11575) and not (user_id = 1 and user_id is not null and ts >= 11576 and ts <= 11577) and not (user_id = 1 and user_id is not null and ts >= 11578 and ts <= 11579) and not (user_id = 1 and user_id is not null and ts >= 11580 and ts <= 11581) and not (user_id = 1 and user_id is not null and ts >= 11582 and ts <= 11583) and not (user_id = 1 and user_id is not null and ts >= 11584 and ts <= 11585) and not (user_id = 1 and user_id is not null and ts >= 11586 and ts <= 11587) and not (user_id = 1 and user_id is not null and ts >= 11588 and ts <= 11589) and not (user_id = 1 and user_id is not null and ts >= 11590 and ts <= 11591) and not (user_id = 1 and user_id is not null and ts >= 11592 and ts <= 11593) and not (user_id = 1 and user_id is not null and ts >= 11594 and ts <= 11595) and not (user_id = 1 and user_id is not null and ts >= 11596 and ts <= 11597) and not (user_id = 1 and user_id is not null and ts >= 11598 and ts <= 11599) and not (user_id = 1 and user_id is not null and ts >= 11600 and ts <= 11601) and not (user_id = 1 and user_id is not null and ts >= 11602 and ts <= 11603) and not (user_id = 1 and user_id is not null and ts >= 11604 and ts <= 11605) and not (user_id = 1 and user_id is not null and ts >= 11606 and ts <= 11607) and not (user_id = 1 and user_id is not null and ts >= 11608 and ts <= 11609) and not (user_id = 1 and user_id is not null and ts >= 11610 and ts <= 11611) and not (user_id = 1 and user_id is not null and ts >= 11612 and ts <= 11613) and not (user_id = 1 and user_id is not null and ts >= 11614 and ts <= 11615) and not (user_id = 1 and user_id is not null and ts >= 11616 and ts <= 11617) and not (user_id = 1 and user_id is not null and ts >= 11618 and ts <= 11619) and not (user_id = 1 and user_id is not null and ts >= 11620 and ts <= 11621) and not (user_id = 1 and user_id is not null and ts >= 11622 and ts <= 11623) and not (user_id = 1 and user_id is not null and ts >= 11624 and ts <= 11625) and not (user_id = 1 and user_id is not null and ts >= 11626 and ts <= 11627) and not (user_id = 1 and user_id is not null and ts >= 11628 and ts <= 11629) and not (user_id = 1 and user_id is not null and ts >= 11630 and ts <= 11631) and not (user_id = 1 and user_id is not null and ts >= 11632 and ts <= 11633) and not (user_id = 1 and user_id is not null and ts >= 11634 and ts <= 11635) and not (user_id = 1 and user_id is not null and ts >= 11636 and ts <= 11637) and not (user_id = 1 and user_id is not null and ts >= 11638 and ts <= 11639) and not (user_id = 1 and user_id is not null and ts >= 11640 and ts <= 11641) and not (user_id = 1 and user_id is not null and ts >= 11642 and ts <= 11643) and not (user_id = 1 and user_id is not null and ts >= 11644 and ts <= 11645) and not (user_id = 1 and user_id is not null and ts >= 11646 and ts <= 11647) and not (user_id = 1 and user_id is not null and ts >= 11648 and ts <= 11649) and not (user_id = 1 and user_id is not null and ts >= 11650 and ts <= 11651) and not (user_id = 1 and user_id is not null and ts >= 11652 and ts <= 11653) and not (user_id = 1 and user_id is not null and ts >= 11654 and ts <= 11655) and not (user_id = 1 and user_id is not null and ts >= 11656 and ts <= 11657) and not (user_id = 1 and user_id is not null and ts >= 11658 and ts <= 11659) and not (user_id = 1 and user_id is not null and ts >= 11660 and ts <= 11661) and not (user_id = 1 and user_id is not null and ts >= 11662 and ts <= 11663) and not (user_id = 1 and user_id is not null and ts >= 11664 and ts <= 11665) and not (user_id = 1 and user_id is not null and ts >= 11666 and ts <= 11667) and not (user_id = 1 and user_id is not null and ts >= 11668 and ts <= 11669) and not (user_id = 1 and user_id is not null and ts >= 11670 and ts <= 11671) and not (user_id = 1 and user_id is not null and ts >= 11672 and ts <= 11673) and not (user_id = 1 and user_id is not null and ts >= 11674 and ts <= 11675) and not (user_id = 1 and user_id is not null and ts >= 11676 and ts <= 11677) and not (user_id = 1 and user_id is not null and ts >= 11678 and ts <= 11679) and not (user_id = 1 and user_id is not null and ts >= 11680 and ts <= 11681) and not (user_id = 1 and user_id is not null and ts >= 11682 and ts <= 11683) and not (user_id = 1 and user_id is not null and ts >= 11684 and ts <= 11685) and not (user_id = 1 and user_id is not null and ts >= 11686 and ts <= 11687) and not (user_id = 1 and user_id is not null and ts >= 11688 and ts <= 11689) and not (user_id = 1 and user_id is not null and ts >= 11690 and ts <= 11691) and not (user_id = 1 and user_id is not null and ts >= 11692 and ts <= 11693) and not (user_id = 1 and user_id is not null and ts >= 11694 and ts <= 11695) and not (user_id = 1 and user_id is not null and ts >= 11696 and ts <= 11697) and not (user_id = 1 and user_id is not null and ts >= 11698 and ts <= 11699) and not (user_id = 1 and user_id is not null and ts >= 11700 and ts <= 11701) and not (user_id = 1 and user_id is not null and ts >= 11702 and ts <= 11703) and not (user_id = 1 and user_id is not null and ts >= 11704 and ts <= 11705) and not (user_id = 1 and user_id is not null and ts >= 11706 and ts <= 11707) and not (user_id = 1 and user_id is not null and ts >= 11708 and ts <= 11709) and not (user_id = 1 and user_id is not null and ts >= 11710 and ts <= 11711) and not (user_id = 1 and user_id is not null and ts >= 11712 and ts <= 11713) and not (user_id = 1 and user_id is not null and ts >= 11714 and ts <= 11715) and not (user_id = 1 and user_id is not null and ts >= 11716 and ts <= 11717) and not (user_id = 1 and user_id is not null and ts >= 11718 and ts <= 11719) and not (user_id = 1 and user_id is not null and ts >= 11720 and ts <= 11721) and not (user_id = 1 and user_id is not null and ts >= 11722 and ts <= 11723) and not (user_id = 1 and user_id is not null and ts >= 11724 and ts <= 11725) and not (user_id = 1 and user_id is not null and ts >= 11726 and ts <= 11727) and not (user_id = 1 and user_id is not null and ts >= 11728 and ts <= 11729) and not (user_id = 1 and user_id is not null and ts >= 11730 and ts <= 11731) and not (user_id = 1 and user_id is not null and ts >= 11732 and ts <= 11733) and not (user_id = 1 and user_id is not null and ts >= 11734 and ts <= 11735) and not (user_id = 1 and user_id is not null and ts >= 11736 and ts <= 11737) and not (user_id = 1 and user_id is not null and ts >= 11738 and ts <= 11739) and not (user_id = 1 and user_id is not null and ts >= 11740 and ts <= 11741) and not (user_id = 1 and user_id is not null and ts >= 11742 and ts <= 11743) and not (user_id = 1 and user_id is not null and ts >= 11744 and ts <= 11745) and not (user_id = 1 and user_id is not null and ts >= 11746 and ts <= 11747) and not (user_id = 1 and user_id is not null and ts >= 11748 and ts <= 11749) and not (user_id = 1 and user_id is not null and ts >= 11750 and ts <= 11751) and not (user_id = 1 and user_id is not null and ts >= 11752 and ts <= 11753) and not (user_id = 1 and user_id is not null and ts >= 11754 and ts <= 11755) and not (user_id = 1 and user_id is not null and ts >= 11756 and ts <= 11757) and not (user_id = 1 and user_id is not null and ts >= 11758 and ts <= 11759) and not (user_id = 1 and user_id is not null and ts >= 11760 and ts <= 11761) and not (user_id = 1 and user_id is not null and ts >= 11762 and ts <= 11763) and not (user_id = 1 and user_id is not null and ts >= 11764 and ts <= 11765) and not (user_id = 1 and user_id is not null and ts >= 11766 and ts <= 11767) and not (user_id = 1 and user_id is not null and ts >= 11768 and ts <= 11769) and not (user_id = 1 and user_id is not null and ts >= 11770 and ts <= 11771) and not (user_id = 1 and user_id is not null and ts >= 11772 and ts <= 11773) and not (user_id = 1 and user_id is not null and ts >= 11774 and ts <= 11775) and not (user_id = 1 and user_id is not null and ts >= 11776 and ts <= 11777) and not (user_id = 1 and user_id is not null and ts >= 11778 and ts <= 11779) and not (user_id = 1 and user_id is not null and ts >= 11780 and ts <= 11781) and not (user_id = 1 and user_id is not null and ts >= 11782 and ts <= 11783) and not (user_id = 1 and user_id is not null and ts >= 11784 and ts <= 11785) and not (user_id = 1 and user_id is not null and ts >= 11786 and ts <= 11787) and not (user_id = 1 and user_id is not null and ts >= 11788 and ts <= 11789) and not (user_id = 1 and user_id is not null and ts >= 11790 and ts <= 11791) and not (user_id = 1 and user_id is not null and ts >= 11792 and ts <= 11793) and not (user_id = 1 and user_id is not null and ts >= 11794 and ts <= 11795) and not (user_id = 1 and user_id is not null and ts >= 11796 and ts <= 11797) and not (user_id = 1 and user_id is not null and ts >= 11798 and ts <= 11799) and not (user_id = 1 and user_id is not null and ts >= 11800 and ts <= 11801) and not (user_id = 1 and user_id is not null and ts >= 11802 and ts <= 11803) and not (user_id = 1 and user_id is not null and ts >= 11804 and ts <= 11805) and not (user_id = 1 and user_id is not null and ts >= 11806 and ts <= 11807) and not (user_id = 1 and user_id is not null and ts >= 11808 and ts <= 11809) and not (user_id = 1 and user_id is not null and ts >= 11810 and ts <= 11811) and not (user_id = 1 and user_id is not null and ts >= 11812 and ts <= 11813) and not (user_id = 1 and user_id is not null and ts >= 11814 and ts <= 11815) and not (user_id = 1 and user_id is not null and ts >= 11816 and ts <= 11817) and not (user_id = 1 and user_id is not null and ts >= 11818 and ts <= 11819) and not (user_id = 1 and user_id is not null and ts >= 11820 and ts <= 11821) and not (user_id = 1 and user_id is not null and ts >= 11822 and ts <= 11823) and not (user_id = 1 and user_id is not null and ts >= 11824 and ts <= 11825) and not (user_id = 1 and user_id is not null and ts >= 11826 and ts <= 11827) and not (user_id = 1 and user_id is not null and ts >= 11828 and ts <= 11829) and not (user_id = 1 and user_id is not null and ts >= 11830 and ts <= 11831) and not (user_id = 1 and user_id is not null and ts >= 11832 and ts <= 11833) and not (user_id = 1 and user_id is not null and ts >= 11834 and ts <= 11835) and not (user_id = 1 and user_id is not null and ts >= 11836 and ts <= 11837) and not (user_id = 1 and user_id is not null and ts >= 11838 and ts <= 11839) and not (user_id = 1 and user_id is not null and ts >= 11840 and ts <= 11841) and not (user_id = 1 and user_id is not null and ts >= 11842 and ts <= 11843) and not (user_id = 1 and user_id is not null and ts >= 11844 and ts <= 11845) and not (user_id = 1 and user_id is not null and ts >= 11846 and ts <= 11847) and not (user_id = 1 and user_id is not null and ts >= 11848 and ts <= 11849) and not (user_id = 1 and user_id is not null and ts >= 11850 and ts <= 11851) and not (user_id = 1 and user_id is not null and ts >= 11852 and ts <= 11853) and not (user_id = 1 and user_id is not null and ts >= 11854 and ts <= 11855) and not (user_id = 1 and user_id is not null and ts >= 11856 and ts <= 11857) and not (user_id = 1 and user_id is not null and ts >= 11858 and ts <= 11859) and not (user_id = 1 and user_id is not null and ts >= 11860 and ts <= 11861) and not (user_id = 1 and user_id is not null and ts >= 11862 and ts <= 11863) and not (user_id = 1 and user_id is not null and ts >= 11864 and ts <= 11865) and not (user_id = 1 and user_id is not null and ts >= 11866 and ts <= 11867) and not (user_id = 1 and user_id is not null and ts >= 11868 and ts <= 11869) and not (user_id = 1 and user_id is not null and ts >= 11870 and ts <= 11871) and not (user_id = 1 and user_id is not null and ts >= 11872 and ts <= 11873) and not (user_id = 1 and user_id is not null and ts >= 11874 and ts <= 11875) and not (user_id = 1 and user_id is not null and ts >= 11876 and ts <= 11877) and not (user_id = 1 and user_id is not null and ts >= 11878 and ts <= 11879) and not (user_id = 1 and user_id is not null and ts >= 11880 and ts <= 11881) and not (user_id = 1 and user_id is not null and ts >= 11882 and ts <= 11883) and not (user_id = 1 and user_id is not null and ts >= 11884 and ts <= 11885) and not (user_id = 1 and user_id is not null and ts >= 11886 and ts <= 11887) and not (user_id = 1 and user_id is not null and ts >= 11888 and ts <= 11889) and not (user_id = 1 and user_id is not null and ts >= 11890 and ts <= 11891) and not (user_id = 1 and user_id is not null and ts >= 11892 and ts <= 11893) and not (user_id = 1 and user_id is not null and ts >= 11894 and ts <= 11895) and not (user_id = 1 and user_id is not null and ts >= 11896 and ts <= 11897) and not (user_id = 1 and user_id is not null and ts >= 11898 and ts <= 11899) and not (user_id = 1 and user_id is not null and ts >= 11900 and ts <= 11901) and not (user_id = 1 and user_id is not null and ts >= 11902 and ts <= 11903) and not (user_id = 1 and user_id is not null and ts >= 11904 and ts <= 11905) and not (user_id = 1 and user_id is not null and ts >= 11906 and ts <= 11907) and not (user_id = 1 and user_id is not null and ts >= 11908 and ts <= 11909) and not (user_id = 1 and user_id is not null and ts >= 11910 and ts <= 11911) and not (user_id = 1 and user_id is not null and ts >= 11912 and ts <= 11913) and not (user_id = 1 and user_id is not null and ts >= 11914 and ts <= 11915) and not (user_id = 1 and user_id is not null and ts >= 11916 and ts <= 11917) and not (user_id = 1 and user_id is not null and ts >= 11918 and ts <= 11919) and not (user_id = 1 and user_id is not null and ts >= 11920 and ts <= 11921) and not (user_id = 1 and user_id is not null and ts >= 11922 and ts <= 11923) and not (user_id = 1 and user_id is not null and ts >= 11924 and ts <= 11925) and not (user_id = 1 and user_id is not null and ts >= 11926 and ts <= 11927) and not (user_id = 1 and user_id is not null and ts >= 11928 and ts <= 11929) and not (user_id = 1 and user_id is not null and ts >= 11930 and ts <= 11931) and not (user_id = 1 and user_id is not null and ts >= 11932 and ts <= 11933) and not (user_id = 1 and user_id is not null and ts >= 11934 and ts <= 11935) and not (user_id = 1 and user_id is not null and ts >= 11936 and ts <= 11937) and not (user_id = 1 and user_id is not null and ts >= 11938 and ts <= 11939) and not (user_id = 1 and user_id is not null and ts >= 11940 and ts <= 11941) and not (user_id = 1 and user_id is not null and ts >= 11942 and ts <= 11943) and not (user_id = 1 and user_id is not null and ts >= 11944 and ts <= 11945) and not (user_id = 1 and user_id is not null and ts >= 11946 and ts <= 11947) and not (user_id = 1 and user_id is not null and ts >= 11948 and ts <= 11949) and not (user_id = 1 and user_id is not null and ts >= 11950 and ts <= 11951) and not (user_id = 1 and user_id is not null and ts >= 11952 and ts <= 11953) and not (user_id = 1 and user_id is not null and ts >= 11954 and ts <= 11955) and not (user_id = 1 and user_id is not null and ts >= 11956 and ts <= 11957) and not (user_id = 1 and user_id is not null and ts >= 11958 and ts <= 11959) and not (user_id = 1 and user_id is not null and ts >= 11960 and ts <= 11961) and not (user_id = 1 and user_id is not null and ts >= 11962 and ts <= 11963) and not (user_id = 1 and user_id is not null and ts >= 11964 and ts <= 11965) and not (user_id = 1 and user_id is not null and ts >= 11966 and ts <= 11967) and not (user_id = 1 and user_id is not null and ts >= 11968 and ts <= 11969) and not (user_id = 1 and user_id is not null and ts >= 11970 and ts <= 11971) and not (user_id = 1 and user_id is not null and ts >= 11972 and ts <= 11973) and not (user_id = 1 and user_id is not null and ts >= 11974 and ts <= 11975) and not (user_id = 1 and user_id is not null and ts >= 11976 and ts <= 11977) and not (user_id = 1 and user_id is not null and ts >= 11978 and ts <= 11979) and not (user_id = 1 and user_id is not null and ts >= 11980 and ts <= 11981) and not (user_id = 1 and user_id is not null and ts >= 11982 and ts <= 11983) and not (user_id = 1 and user_id is not null and ts >= 11984 and ts <= 11985) and not (user_id = 1 and user_id is not null and ts >= 11986 and ts <= 11987) and not (user_id = 1 and user_id is not null and ts >= 11988 and ts <= 11989) and not (user_id = 1 and user_id is not null and ts >= 11990 and ts <= 11991) and not (user_id = 1 and user_id is not null and ts >= 11992 and ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit 100", + "Instructions": { + "OperatorType": "Limit", + "Count": "INT64(100)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, ts, weight_string(ts) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select 1, ts, weight_string(ts) from `user` where shard_key = 1 and is_removed = 1 and cmd in ('A', 'B', 'C') and (not user_id = 1 or not user_id is not null or not ts >= 1 or not ts <= 2) and (not user_id = 1 or not user_id is not null or not ts >= 12 or not ts <= 13) and (not user_id = 1 or not user_id is not null or not ts >= 14 or not ts <= 15) and (not user_id = 1 or not user_id is not null or not ts >= 16 or not ts <= 17) and (not user_id = 1 or not user_id is not null or not ts >= 18 or not ts <= 19) and (not user_id = 1 or not user_id is not null or not ts >= 110 or not ts <= 111) and (not user_id = 1 or not user_id is not null or not ts >= 112 or not ts <= 113) and (not user_id = 1 or not user_id is not null or not ts >= 114 or not ts <= 115) and (not user_id = 1 or not user_id is not null or not ts >= 116 or not ts <= 117) and (not user_id = 1 or not user_id is not null or not ts >= 118 or not ts <= 119) and (not user_id = 1 or not user_id is not null or not ts >= 120 or not ts <= 121) and (not user_id = 1 or not user_id is not null or not ts >= 122 or not ts <= 123) and (not user_id = 1 or not user_id is not null or not ts >= 124 or not ts <= 125) and (not user_id = 1 or not user_id is not null or not ts >= 126 or not ts <= 127) and (not user_id = 1 or not user_id is not null or not ts >= 128 or not ts <= 129) and (not user_id = 1 or not user_id is not null or not ts >= 130 or not ts <= 131) and (not user_id = 1 or not user_id is not null or not ts >= 132 or not ts <= 133) and (not user_id = 1 or not user_id is not null or not ts >= 134 or not ts <= 135) and (not user_id = 1 or not user_id is not null or not ts >= 136 or not ts <= 137) and (not user_id = 1 or not user_id is not null or not ts >= 138 or not ts <= 139) and (not user_id = 1 or not user_id is not null or not ts >= 140 or not ts <= 141) and (not user_id = 1 or not user_id is not null or not ts >= 142 or not ts <= 143) and (not user_id = 1 or not user_id is not null or not ts >= 144 or not ts <= 145) and (not user_id = 1 or not user_id is not null or not ts >= 146 or not ts <= 147) and (not user_id = 1 or not user_id is not null or not ts >= 148 or not ts <= 149) and (not user_id = 1 or not user_id is not null or not ts >= 150 or not ts <= 151) and (not user_id = 1 or not user_id is not null or not ts >= 152 or not ts <= 153) and (not user_id = 1 or not user_id is not null or not ts >= 154 or not ts <= 155) and (not user_id = 1 or not user_id is not null or not ts >= 156 or not ts <= 157) and (not user_id = 1 or not user_id is not null or not ts >= 158 or not ts <= 159) and (not user_id = 1 or not user_id is not null or not ts >= 160 or not ts <= 161) and (not user_id = 1 or not user_id is not null or not ts >= 162 or not ts <= 163) and (not user_id = 1 or not user_id is not null or not ts >= 164 or not ts <= 165) and (not user_id = 1 or not user_id is not null or not ts >= 166 or not ts <= 167) and (not user_id = 1 or not user_id is not null or not ts >= 168 or not ts <= 169) and (not user_id = 1 or not user_id is not null or not ts >= 170 or not ts <= 171) and (not user_id = 1 or not user_id is not null or not ts >= 172 or not ts <= 173) and (not user_id = 1 or not user_id is not null or not ts >= 174 or not ts <= 175) and (not user_id = 1 or not user_id is not null or not ts >= 176 or not ts <= 177) and (not user_id = 1 or not user_id is not null or not ts >= 178 or not ts <= 179) and (not user_id = 1 or not user_id is not null or not ts >= 180 or not ts <= 181) and (not user_id = 1 or not user_id is not null or not ts >= 182 or not ts <= 183) and (not user_id = 1 or not user_id is not null or not ts >= 184 or not ts <= 185) and (not user_id = 1 or not user_id is not null or not ts >= 186 or not ts <= 187) and (not user_id = 1 or not user_id is not null or not ts >= 188 or not ts <= 189) and (not user_id = 1 or not user_id is not null or not ts >= 190 or not ts <= 191) and (not user_id = 1 or not user_id is not null or not ts >= 192 or not ts <= 193) and (not user_id = 1 or not user_id is not null or not ts >= 194 or not ts <= 195) and (not user_id = 1 or not user_id is not null or not ts >= 196 or not ts <= 197) and (not user_id = 1 or not user_id is not null or not ts >= 198 or not ts <= 199) and (not user_id = 1 or not user_id is not null or not ts >= 1100 or not ts <= 1101) and (not user_id = 1 or not user_id is not null or not ts >= 1102 or not ts <= 1103) and (not user_id = 1 or not user_id is not null or not ts >= 1104 or not ts <= 1105) and (not user_id = 1 or not user_id is not null or not ts >= 1106 or not ts <= 1107) and (not user_id = 1 or not user_id is not null or not ts >= 1108 or not ts <= 1109) and (not user_id = 1 or not user_id is not null or not ts >= 1110 or not ts <= 1111) and (not user_id = 1 or not user_id is not null or not ts >= 1112 or not ts <= 1113) and (not user_id = 1 or not user_id is not null or not ts >= 1114 or not ts <= 1115) and (not user_id = 1 or not user_id is not null or not ts >= 1116 or not ts <= 1117) and (not user_id = 1 or not user_id is not null or not ts >= 1118 or not ts <= 1119) and (not user_id = 1 or not user_id is not null or not ts >= 1120 or not ts <= 1121) and (not user_id = 1 or not user_id is not null or not ts >= 1122 or not ts <= 1123) and (not user_id = 1 or not user_id is not null or not ts >= 1124 or not ts <= 1125) and (not user_id = 1 or not user_id is not null or not ts >= 1126 or not ts <= 1127) and (not user_id = 1 or not user_id is not null or not ts >= 1128 or not ts <= 1129) and (not user_id = 1 or not user_id is not null or not ts >= 1130 or not ts <= 1131) and (not user_id = 1 or not user_id is not null or not ts >= 1132 or not ts <= 1133) and (not user_id = 1 or not user_id is not null or not ts >= 1134 or not ts <= 1135) and (not user_id = 1 or not user_id is not null or not ts >= 1136 or not ts <= 1137) and (not user_id = 1 or not user_id is not null or not ts >= 1138 or not ts <= 1139) and (not user_id = 1 or not user_id is not null or not ts >= 1140 or not ts <= 1141) and (not user_id = 1 or not user_id is not null or not ts >= 1142 or not ts <= 1143) and (not user_id = 1 or not user_id is not null or not ts >= 1144 or not ts <= 1145) and (not user_id = 1 or not user_id is not null or not ts >= 1146 or not ts <= 1147) and (not user_id = 1 or not user_id is not null or not ts >= 1148 or not ts <= 1149) and (not user_id = 1 or not user_id is not null or not ts >= 1150 or not ts <= 1151) and (not user_id = 1 or not user_id is not null or not ts >= 1152 or not ts <= 1153) and (not user_id = 1 or not user_id is not null or not ts >= 1154 or not ts <= 1155) and (not user_id = 1 or not user_id is not null or not ts >= 1156 or not ts <= 1157) and (not user_id = 1 or not user_id is not null or not ts >= 1158 or not ts <= 1159) and (not user_id = 1 or not user_id is not null or not ts >= 1160 or not ts <= 1161) and (not user_id = 1 or not user_id is not null or not ts >= 1162 or not ts <= 1163) and (not user_id = 1 or not user_id is not null or not ts >= 1164 or not ts <= 1165) and (not user_id = 1 or not user_id is not null or not ts >= 1166 or not ts <= 1167) and (not user_id = 1 or not user_id is not null or not ts >= 1168 or not ts <= 1169) and (not user_id = 1 or not user_id is not null or not ts >= 1170 or not ts <= 1171) and (not user_id = 1 or not user_id is not null or not ts >= 1172 or not ts <= 1173) and (not user_id = 1 or not user_id is not null or not ts >= 1174 or not ts <= 1175) and (not user_id = 1 or not user_id is not null or not ts >= 1176 or not ts <= 1177) and (not user_id = 1 or not user_id is not null or not ts >= 1178 or not ts <= 1179) and (not user_id = 1 or not user_id is not null or not ts >= 1180 or not ts <= 1181) and (not user_id = 1 or not user_id is not null or not ts >= 1182 or not ts <= 1183) and (not user_id = 1 or not user_id is not null or not ts >= 1184 or not ts <= 1185) and (not user_id = 1 or not user_id is not null or not ts >= 1186 or not ts <= 1187) and (not user_id = 1 or not user_id is not null or not ts >= 1188 or not ts <= 1189) and (not user_id = 1 or not user_id is not null or not ts >= 1190 or not ts <= 1191) and (not user_id = 1 or not user_id is not null or not ts >= 1192 or not ts <= 1193) and (not user_id = 1 or not user_id is not null or not ts >= 1194 or not ts <= 1195) and (not user_id = 1 or not user_id is not null or not ts >= 1196 or not ts <= 1197) and (not user_id = 1 or not user_id is not null or not ts >= 1198 or not ts <= 1199) and (not user_id = 1 or not user_id is not null or not ts >= 1200 or not ts <= 1201) and (not user_id = 1 or not user_id is not null or not ts >= 1202 or not ts <= 1203) and (not user_id = 1 or not user_id is not null or not ts >= 1204 or not ts <= 1205) and (not user_id = 1 or not user_id is not null or not ts >= 1206 or not ts <= 1207) and (not user_id = 1 or not user_id is not null or not ts >= 1208 or not ts <= 1209) and (not user_id = 1 or not user_id is not null or not ts >= 1210 or not ts <= 1211) and (not user_id = 1 or not user_id is not null or not ts >= 1212 or not ts <= 1213) and (not user_id = 1 or not user_id is not null or not ts >= 1214 or not ts <= 1215) and (not user_id = 1 or not user_id is not null or not ts >= 1216 or not ts <= 1217) and (not user_id = 1 or not user_id is not null or not ts >= 1218 or not ts <= 1219) and (not user_id = 1 or not user_id is not null or not ts >= 1220 or not ts <= 1221) and (not user_id = 1 or not user_id is not null or not ts >= 1222 or not ts <= 1223) and (not user_id = 1 or not user_id is not null or not ts >= 1224 or not ts <= 1225) and (not user_id = 1 or not user_id is not null or not ts >= 1226 or not ts <= 1227) and (not user_id = 1 or not user_id is not null or not ts >= 1228 or not ts <= 1229) and (not user_id = 1 or not user_id is not null or not ts >= 1230 or not ts <= 1231) and (not user_id = 1 or not user_id is not null or not ts >= 1232 or not ts <= 1233) and (not user_id = 1 or not user_id is not null or not ts >= 1234 or not ts <= 1235) and (not user_id = 1 or not user_id is not null or not ts >= 1236 or not ts <= 1237) and (not user_id = 1 or not user_id is not null or not ts >= 1238 or not ts <= 1239) and (not user_id = 1 or not user_id is not null or not ts >= 1240 or not ts <= 1241) and (not user_id = 1 or not user_id is not null or not ts >= 1242 or not ts <= 1243) and (not user_id = 1 or not user_id is not null or not ts >= 1244 or not ts <= 1245) and (not user_id = 1 or not user_id is not null or not ts >= 1246 or not ts <= 1247) and (not user_id = 1 or not user_id is not null or not ts >= 1248 or not ts <= 1249) and (not user_id = 1 or not user_id is not null or not ts >= 1250 or not ts <= 1251) and (not user_id = 1 or not user_id is not null or not ts >= 1252 or not ts <= 1253) and (not user_id = 1 or not user_id is not null or not ts >= 1254 or not ts <= 1255) and (not user_id = 1 or not user_id is not null or not ts >= 1256 or not ts <= 1257) and (not user_id = 1 or not user_id is not null or not ts >= 1258 or not ts <= 1259) and (not user_id = 1 or not user_id is not null or not ts >= 1260 or not ts <= 1261) and (not user_id = 1 or not user_id is not null or not ts >= 1262 or not ts <= 1263) and (not user_id = 1 or not user_id is not null or not ts >= 1264 or not ts <= 1265) and (not user_id = 1 or not user_id is not null or not ts >= 1266 or not ts <= 1267) and (not user_id = 1 or not user_id is not null or not ts >= 1268 or not ts <= 1269) and (not user_id = 1 or not user_id is not null or not ts >= 1270 or not ts <= 1271) and (not user_id = 1 or not user_id is not null or not ts >= 1272 or not ts <= 1273) and (not user_id = 1 or not user_id is not null or not ts >= 1274 or not ts <= 1275) and (not user_id = 1 or not user_id is not null or not ts >= 1276 or not ts <= 1277) and (not user_id = 1 or not user_id is not null or not ts >= 1278 or not ts <= 1279) and (not user_id = 1 or not user_id is not null or not ts >= 1280 or not ts <= 1281) and (not user_id = 1 or not user_id is not null or not ts >= 1282 or not ts <= 1283) and (not user_id = 1 or not user_id is not null or not ts >= 1284 or not ts <= 1285) and (not user_id = 1 or not user_id is not null or not ts >= 1286 or not ts <= 1287) and (not user_id = 1 or not user_id is not null or not ts >= 1288 or not ts <= 1289) and (not user_id = 1 or not user_id is not null or not ts >= 1290 or not ts <= 1291) and (not user_id = 1 or not user_id is not null or not ts >= 1292 or not ts <= 1293) and (not user_id = 1 or not user_id is not null or not ts >= 1294 or not ts <= 1295) and (not user_id = 1 or not user_id is not null or not ts >= 1296 or not ts <= 1297) and (not user_id = 1 or not user_id is not null or not ts >= 1298 or not ts <= 1299) and (not user_id = 1 or not user_id is not null or not ts >= 1300 or not ts <= 1301) and (not user_id = 1 or not user_id is not null or not ts >= 1302 or not ts <= 1303) and (not user_id = 1 or not user_id is not null or not ts >= 1304 or not ts <= 1305) and (not user_id = 1 or not user_id is not null or not ts >= 1306 or not ts <= 1307) and (not user_id = 1 or not user_id is not null or not ts >= 1308 or not ts <= 1309) and (not user_id = 1 or not user_id is not null or not ts >= 1310 or not ts <= 1311) and (not user_id = 1 or not user_id is not null or not ts >= 1312 or not ts <= 1313) and (not user_id = 1 or not user_id is not null or not ts >= 1314 or not ts <= 1315) and (not user_id = 1 or not user_id is not null or not ts >= 1316 or not ts <= 1317) and (not user_id = 1 or not user_id is not null or not ts >= 1318 or not ts <= 1319) and (not user_id = 1 or not user_id is not null or not ts >= 1320 or not ts <= 1321) and (not user_id = 1 or not user_id is not null or not ts >= 1322 or not ts <= 1323) and (not user_id = 1 or not user_id is not null or not ts >= 1324 or not ts <= 1325) and (not user_id = 1 or not user_id is not null or not ts >= 1326 or not ts <= 1327) and (not user_id = 1 or not user_id is not null or not ts >= 1328 or not ts <= 1329) and (not user_id = 1 or not user_id is not null or not ts >= 1330 or not ts <= 1331) and (not user_id = 1 or not user_id is not null or not ts >= 1332 or not ts <= 1333) and (not user_id = 1 or not user_id is not null or not ts >= 1334 or not ts <= 1335) and (not user_id = 1 or not user_id is not null or not ts >= 1336 or not ts <= 1337) and (not user_id = 1 or not user_id is not null or not ts >= 1338 or not ts <= 1339) and (not user_id = 1 or not user_id is not null or not ts >= 1340 or not ts <= 1341) and (not user_id = 1 or not user_id is not null or not ts >= 1342 or not ts <= 1343) and (not user_id = 1 or not user_id is not null or not ts >= 1344 or not ts <= 1345) and (not user_id = 1 or not user_id is not null or not ts >= 1346 or not ts <= 1347) and (not user_id = 1 or not user_id is not null or not ts >= 1348 or not ts <= 1349) and (not user_id = 1 or not user_id is not null or not ts >= 1350 or not ts <= 1351) and (not user_id = 1 or not user_id is not null or not ts >= 1352 or not ts <= 1353) and (not user_id = 1 or not user_id is not null or not ts >= 1354 or not ts <= 1355) and (not user_id = 1 or not user_id is not null or not ts >= 1356 or not ts <= 1357) and (not user_id = 1 or not user_id is not null or not ts >= 1358 or not ts <= 1359) and (not user_id = 1 or not user_id is not null or not ts >= 1360 or not ts <= 1361) and (not user_id = 1 or not user_id is not null or not ts >= 1362 or not ts <= 1363) and (not user_id = 1 or not user_id is not null or not ts >= 1364 or not ts <= 1365) and (not user_id = 1 or not user_id is not null or not ts >= 1366 or not ts <= 1367) and (not user_id = 1 or not user_id is not null or not ts >= 1368 or not ts <= 1369) and (not user_id = 1 or not user_id is not null or not ts >= 1370 or not ts <= 1371) and (not user_id = 1 or not user_id is not null or not ts >= 1372 or not ts <= 1373) and (not user_id = 1 or not user_id is not null or not ts >= 1374 or not ts <= 1375) and (not user_id = 1 or not user_id is not null or not ts >= 1376 or not ts <= 1377) and (not user_id = 1 or not user_id is not null or not ts >= 1378 or not ts <= 1379) and (not user_id = 1 or not user_id is not null or not ts >= 1380 or not ts <= 1381) and (not user_id = 1 or not user_id is not null or not ts >= 1382 or not ts <= 1383) and (not user_id = 1 or not user_id is not null or not ts >= 1384 or not ts <= 1385) and (not user_id = 1 or not user_id is not null or not ts >= 1386 or not ts <= 1387) and (not user_id = 1 or not user_id is not null or not ts >= 1388 or not ts <= 1389) and (not user_id = 1 or not user_id is not null or not ts >= 1390 or not ts <= 1391) and (not user_id = 1 or not user_id is not null or not ts >= 1392 or not ts <= 1393) and (not user_id = 1 or not user_id is not null or not ts >= 1394 or not ts <= 1395) and (not user_id = 1 or not user_id is not null or not ts >= 1396 or not ts <= 1397) and (not user_id = 1 or not user_id is not null or not ts >= 1398 or not ts <= 1399) and (not user_id = 1 or not user_id is not null or not ts >= 1400 or not ts <= 1401) and (not user_id = 1 or not user_id is not null or not ts >= 1402 or not ts <= 1403) and (not user_id = 1 or not user_id is not null or not ts >= 1404 or not ts <= 1405) and (not user_id = 1 or not user_id is not null or not ts >= 1406 or not ts <= 1407) and (not user_id = 1 or not user_id is not null or not ts >= 1408 or not ts <= 1409) and (not user_id = 1 or not user_id is not null or not ts >= 1410 or not ts <= 1411) and (not user_id = 1 or not user_id is not null or not ts >= 1412 or not ts <= 1413) and (not user_id = 1 or not user_id is not null or not ts >= 1414 or not ts <= 1415) and (not user_id = 1 or not user_id is not null or not ts >= 1416 or not ts <= 1417) and (not user_id = 1 or not user_id is not null or not ts >= 1418 or not ts <= 1419) and (not user_id = 1 or not user_id is not null or not ts >= 1420 or not ts <= 1421) and (not user_id = 1 or not user_id is not null or not ts >= 1422 or not ts <= 1423) and (not user_id = 1 or not user_id is not null or not ts >= 1424 or not ts <= 1425) and (not user_id = 1 or not user_id is not null or not ts >= 1426 or not ts <= 1427) and (not user_id = 1 or not user_id is not null or not ts >= 1428 or not ts <= 1429) and (not user_id = 1 or not user_id is not null or not ts >= 1430 or not ts <= 1431) and (not user_id = 1 or not user_id is not null or not ts >= 1432 or not ts <= 1433) and (not user_id = 1 or not user_id is not null or not ts >= 1434 or not ts <= 1435) and (not user_id = 1 or not user_id is not null or not ts >= 1436 or not ts <= 1437) and (not user_id = 1 or not user_id is not null or not ts >= 1438 or not ts <= 1439) and (not user_id = 1 or not user_id is not null or not ts >= 1440 or not ts <= 1441) and (not user_id = 1 or not user_id is not null or not ts >= 1442 or not ts <= 1443) and (not user_id = 1 or not user_id is not null or not ts >= 1444 or not ts <= 1445) and (not user_id = 1 or not user_id is not null or not ts >= 1446 or not ts <= 1447) and (not user_id = 1 or not user_id is not null or not ts >= 1448 or not ts <= 1449) and (not user_id = 1 or not user_id is not null or not ts >= 1450 or not ts <= 1451) and (not user_id = 1 or not user_id is not null or not ts >= 1452 or not ts <= 1453) and (not user_id = 1 or not user_id is not null or not ts >= 1454 or not ts <= 1455) and (not user_id = 1 or not user_id is not null or not ts >= 1456 or not ts <= 1457) and (not user_id = 1 or not user_id is not null or not ts >= 1458 or not ts <= 1459) and (not user_id = 1 or not user_id is not null or not ts >= 1460 or not ts <= 1461) and (not user_id = 1 or not user_id is not null or not ts >= 1462 or not ts <= 1463) and (not user_id = 1 or not user_id is not null or not ts >= 1464 or not ts <= 1465) and (not user_id = 1 or not user_id is not null or not ts >= 1466 or not ts <= 1467) and (not user_id = 1 or not user_id is not null or not ts >= 1468 or not ts <= 1469) and (not user_id = 1 or not user_id is not null or not ts >= 1470 or not ts <= 1471) and (not user_id = 1 or not user_id is not null or not ts >= 1472 or not ts <= 1473) and (not user_id = 1 or not user_id is not null or not ts >= 1474 or not ts <= 1475) and (not user_id = 1 or not user_id is not null or not ts >= 1476 or not ts <= 1477) and (not user_id = 1 or not user_id is not null or not ts >= 1478 or not ts <= 1479) and (not user_id = 1 or not user_id is not null or not ts >= 1480 or not ts <= 1481) and (not user_id = 1 or not user_id is not null or not ts >= 1482 or not ts <= 1483) and (not user_id = 1 or not user_id is not null or not ts >= 1484 or not ts <= 1485) and (not user_id = 1 or not user_id is not null or not ts >= 1486 or not ts <= 1487) and (not user_id = 1 or not user_id is not null or not ts >= 1488 or not ts <= 1489) and (not user_id = 1 or not user_id is not null or not ts >= 1490 or not ts <= 1491) and (not user_id = 1 or not user_id is not null or not ts >= 1492 or not ts <= 1493) and (not user_id = 1 or not user_id is not null or not ts >= 1494 or not ts <= 1495) and (not user_id = 1 or not user_id is not null or not ts >= 1496 or not ts <= 1497) and (not user_id = 1 or not user_id is not null or not ts >= 1498 or not ts <= 1499) and (not user_id = 1 or not user_id is not null or not ts >= 1500 or not ts <= 1501) and (not user_id = 1 or not user_id is not null or not ts >= 1502 or not ts <= 1503) and (not user_id = 1 or not user_id is not null or not ts >= 1504 or not ts <= 1505) and (not user_id = 1 or not user_id is not null or not ts >= 1506 or not ts <= 1507) and (not user_id = 1 or not user_id is not null or not ts >= 1508 or not ts <= 1509) and (not user_id = 1 or not user_id is not null or not ts >= 1510 or not ts <= 1511) and (not user_id = 1 or not user_id is not null or not ts >= 1512 or not ts <= 1513) and (not user_id = 1 or not user_id is not null or not ts >= 1514 or not ts <= 1515) and (not user_id = 1 or not user_id is not null or not ts >= 1516 or not ts <= 1517) and (not user_id = 1 or not user_id is not null or not ts >= 1518 or not ts <= 1519) and (not user_id = 1 or not user_id is not null or not ts >= 1520 or not ts <= 1521) and (not user_id = 1 or not user_id is not null or not ts >= 1522 or not ts <= 1523) and (not user_id = 1 or not user_id is not null or not ts >= 1524 or not ts <= 1525) and (not user_id = 1 or not user_id is not null or not ts >= 1526 or not ts <= 1527) and (not user_id = 1 or not user_id is not null or not ts >= 1528 or not ts <= 1529) and (not user_id = 1 or not user_id is not null or not ts >= 1530 or not ts <= 1531) and (not user_id = 1 or not user_id is not null or not ts >= 1532 or not ts <= 1533) and (not user_id = 1 or not user_id is not null or not ts >= 1534 or not ts <= 1535) and (not user_id = 1 or not user_id is not null or not ts >= 1536 or not ts <= 1537) and (not user_id = 1 or not user_id is not null or not ts >= 1538 or not ts <= 1539) and (not user_id = 1 or not user_id is not null or not ts >= 1540 or not ts <= 1541) and (not user_id = 1 or not user_id is not null or not ts >= 1542 or not ts <= 1543) and (not user_id = 1 or not user_id is not null or not ts >= 1544 or not ts <= 1545) and (not user_id = 1 or not user_id is not null or not ts >= 1546 or not ts <= 1547) and (not user_id = 1 or not user_id is not null or not ts >= 1548 or not ts <= 1549) and (not user_id = 1 or not user_id is not null or not ts >= 1550 or not ts <= 1551) and (not user_id = 1 or not user_id is not null or not ts >= 1552 or not ts <= 1553) and (not user_id = 1 or not user_id is not null or not ts >= 1554 or not ts <= 1555) and (not user_id = 1 or not user_id is not null or not ts >= 1556 or not ts <= 1557) and (not user_id = 1 or not user_id is not null or not ts >= 1558 or not ts <= 1559) and (not user_id = 1 or not user_id is not null or not ts >= 1560 or not ts <= 1561) and (not user_id = 1 or not user_id is not null or not ts >= 1562 or not ts <= 1563) and (not user_id = 1 or not user_id is not null or not ts >= 1564 or not ts <= 1565) and (not user_id = 1 or not user_id is not null or not ts >= 1566 or not ts <= 1567) and (not user_id = 1 or not user_id is not null or not ts >= 1568 or not ts <= 1569) and (not user_id = 1 or not user_id is not null or not ts >= 1570 or not ts <= 1571) and (not user_id = 1 or not user_id is not null or not ts >= 1572 or not ts <= 1573) and (not user_id = 1 or not user_id is not null or not ts >= 1574 or not ts <= 1575) and (not user_id = 1 or not user_id is not null or not ts >= 1576 or not ts <= 1577) and (not user_id = 1 or not user_id is not null or not ts >= 1578 or not ts <= 1579) and (not user_id = 1 or not user_id is not null or not ts >= 1580 or not ts <= 1581) and (not user_id = 1 or not user_id is not null or not ts >= 1582 or not ts <= 1583) and (not user_id = 1 or not user_id is not null or not ts >= 1584 or not ts <= 1585) and (not user_id = 1 or not user_id is not null or not ts >= 1586 or not ts <= 1587) and (not user_id = 1 or not user_id is not null or not ts >= 1588 or not ts <= 1589) and (not user_id = 1 or not user_id is not null or not ts >= 1590 or not ts <= 1591) and (not user_id = 1 or not user_id is not null or not ts >= 1592 or not ts <= 1593) and (not user_id = 1 or not user_id is not null or not ts >= 1594 or not ts <= 1595) and (not user_id = 1 or not user_id is not null or not ts >= 1596 or not ts <= 1597) and (not user_id = 1 or not user_id is not null or not ts >= 1598 or not ts <= 1599) and (not user_id = 1 or not user_id is not null or not ts >= 1600 or not ts <= 1601) and (not user_id = 1 or not user_id is not null or not ts >= 1602 or not ts <= 1603) and (not user_id = 1 or not user_id is not null or not ts >= 1604 or not ts <= 1605) and (not user_id = 1 or not user_id is not null or not ts >= 1606 or not ts <= 1607) and (not user_id = 1 or not user_id is not null or not ts >= 1608 or not ts <= 1609) and (not user_id = 1 or not user_id is not null or not ts >= 1610 or not ts <= 1611) and (not user_id = 1 or not user_id is not null or not ts >= 1612 or not ts <= 1613) and (not user_id = 1 or not user_id is not null or not ts >= 1614 or not ts <= 1615) and (not user_id = 1 or not user_id is not null or not ts >= 1616 or not ts <= 1617) and (not user_id = 1 or not user_id is not null or not ts >= 1618 or not ts <= 1619) and (not user_id = 1 or not user_id is not null or not ts >= 1620 or not ts <= 1621) and (not user_id = 1 or not user_id is not null or not ts >= 1622 or not ts <= 1623) and (not user_id = 1 or not user_id is not null or not ts >= 1624 or not ts <= 1625) and (not user_id = 1 or not user_id is not null or not ts >= 1626 or not ts <= 1627) and (not user_id = 1 or not user_id is not null or not ts >= 1628 or not ts <= 1629) and (not user_id = 1 or not user_id is not null or not ts >= 1630 or not ts <= 1631) and (not user_id = 1 or not user_id is not null or not ts >= 1632 or not ts <= 1633) and (not user_id = 1 or not user_id is not null or not ts >= 1634 or not ts <= 1635) and (not user_id = 1 or not user_id is not null or not ts >= 1636 or not ts <= 1637) and (not user_id = 1 or not user_id is not null or not ts >= 1638 or not ts <= 1639) and (not user_id = 1 or not user_id is not null or not ts >= 1640 or not ts <= 1641) and (not user_id = 1 or not user_id is not null or not ts >= 1642 or not ts <= 1643) and (not user_id = 1 or not user_id is not null or not ts >= 1644 or not ts <= 1645) and (not user_id = 1 or not user_id is not null or not ts >= 1646 or not ts <= 1647) and (not user_id = 1 or not user_id is not null or not ts >= 1648 or not ts <= 1649) and (not user_id = 1 or not user_id is not null or not ts >= 1650 or not ts <= 1651) and (not user_id = 1 or not user_id is not null or not ts >= 1652 or not ts <= 1653) and (not user_id = 1 or not user_id is not null or not ts >= 1654 or not ts <= 1655) and (not user_id = 1 or not user_id is not null or not ts >= 1656 or not ts <= 1657) and (not user_id = 1 or not user_id is not null or not ts >= 1658 or not ts <= 1659) and (not user_id = 1 or not user_id is not null or not ts >= 1660 or not ts <= 1661) and (not user_id = 1 or not user_id is not null or not ts >= 1662 or not ts <= 1663) and (not user_id = 1 or not user_id is not null or not ts >= 1664 or not ts <= 1665) and (not user_id = 1 or not user_id is not null or not ts >= 1666 or not ts <= 1667) and (not user_id = 1 or not user_id is not null or not ts >= 1668 or not ts <= 1669) and (not user_id = 1 or not user_id is not null or not ts >= 1670 or not ts <= 1671) and (not user_id = 1 or not user_id is not null or not ts >= 1672 or not ts <= 1673) and (not user_id = 1 or not user_id is not null or not ts >= 1674 or not ts <= 1675) and (not user_id = 1 or not user_id is not null or not ts >= 1676 or not ts <= 1677) and (not user_id = 1 or not user_id is not null or not ts >= 1678 or not ts <= 1679) and (not user_id = 1 or not user_id is not null or not ts >= 1680 or not ts <= 1681) and (not user_id = 1 or not user_id is not null or not ts >= 1682 or not ts <= 1683) and (not user_id = 1 or not user_id is not null or not ts >= 1684 or not ts <= 1685) and (not user_id = 1 or not user_id is not null or not ts >= 1686 or not ts <= 1687) and (not user_id = 1 or not user_id is not null or not ts >= 1688 or not ts <= 1689) and (not user_id = 1 or not user_id is not null or not ts >= 1690 or not ts <= 1691) and (not user_id = 1 or not user_id is not null or not ts >= 1692 or not ts <= 1693) and (not user_id = 1 or not user_id is not null or not ts >= 1694 or not ts <= 1695) and (not user_id = 1 or not user_id is not null or not ts >= 1696 or not ts <= 1697) and (not user_id = 1 or not user_id is not null or not ts >= 1698 or not ts <= 1699) and (not user_id = 1 or not user_id is not null or not ts >= 1700 or not ts <= 1701) and (not user_id = 1 or not user_id is not null or not ts >= 1702 or not ts <= 1703) and (not user_id = 1 or not user_id is not null or not ts >= 1704 or not ts <= 1705) and (not user_id = 1 or not user_id is not null or not ts >= 1706 or not ts <= 1707) and (not user_id = 1 or not user_id is not null or not ts >= 1708 or not ts <= 1709) and (not user_id = 1 or not user_id is not null or not ts >= 1710 or not ts <= 1711) and (not user_id = 1 or not user_id is not null or not ts >= 1712 or not ts <= 1713) and (not user_id = 1 or not user_id is not null or not ts >= 1714 or not ts <= 1715) and (not user_id = 1 or not user_id is not null or not ts >= 1716 or not ts <= 1717) and (not user_id = 1 or not user_id is not null or not ts >= 1718 or not ts <= 1719) and (not user_id = 1 or not user_id is not null or not ts >= 1720 or not ts <= 1721) and (not user_id = 1 or not user_id is not null or not ts >= 1722 or not ts <= 1723) and (not user_id = 1 or not user_id is not null or not ts >= 1724 or not ts <= 1725) and (not user_id = 1 or not user_id is not null or not ts >= 1726 or not ts <= 1727) and (not user_id = 1 or not user_id is not null or not ts >= 1728 or not ts <= 1729) and (not user_id = 1 or not user_id is not null or not ts >= 1730 or not ts <= 1731) and (not user_id = 1 or not user_id is not null or not ts >= 1732 or not ts <= 1733) and (not user_id = 1 or not user_id is not null or not ts >= 1734 or not ts <= 1735) and (not user_id = 1 or not user_id is not null or not ts >= 1736 or not ts <= 1737) and (not user_id = 1 or not user_id is not null or not ts >= 1738 or not ts <= 1739) and (not user_id = 1 or not user_id is not null or not ts >= 1740 or not ts <= 1741) and (not user_id = 1 or not user_id is not null or not ts >= 1742 or not ts <= 1743) and (not user_id = 1 or not user_id is not null or not ts >= 1744 or not ts <= 1745) and (not user_id = 1 or not user_id is not null or not ts >= 1746 or not ts <= 1747) and (not user_id = 1 or not user_id is not null or not ts >= 1748 or not ts <= 1749) and (not user_id = 1 or not user_id is not null or not ts >= 1750 or not ts <= 1751) and (not user_id = 1 or not user_id is not null or not ts >= 1752 or not ts <= 1753) and (not user_id = 1 or not user_id is not null or not ts >= 1754 or not ts <= 1755) and (not user_id = 1 or not user_id is not null or not ts >= 1756 or not ts <= 1757) and (not user_id = 1 or not user_id is not null or not ts >= 1758 or not ts <= 1759) and (not user_id = 1 or not user_id is not null or not ts >= 1760 or not ts <= 1761) and (not user_id = 1 or not user_id is not null or not ts >= 1762 or not ts <= 1763) and (not user_id = 1 or not user_id is not null or not ts >= 1764 or not ts <= 1765) and (not user_id = 1 or not user_id is not null or not ts >= 1766 or not ts <= 1767) and (not user_id = 1 or not user_id is not null or not ts >= 1768 or not ts <= 1769) and (not user_id = 1 or not user_id is not null or not ts >= 1770 or not ts <= 1771) and (not user_id = 1 or not user_id is not null or not ts >= 1772 or not ts <= 1773) and (not user_id = 1 or not user_id is not null or not ts >= 1774 or not ts <= 1775) and (not user_id = 1 or not user_id is not null or not ts >= 1776 or not ts <= 1777) and (not user_id = 1 or not user_id is not null or not ts >= 1778 or not ts <= 1779) and (not user_id = 1 or not user_id is not null or not ts >= 1780 or not ts <= 1781) and (not user_id = 1 or not user_id is not null or not ts >= 1782 or not ts <= 1783) and (not user_id = 1 or not user_id is not null or not ts >= 1784 or not ts <= 1785) and (not user_id = 1 or not user_id is not null or not ts >= 1786 or not ts <= 1787) and (not user_id = 1 or not user_id is not null or not ts >= 1788 or not ts <= 1789) and (not user_id = 1 or not user_id is not null or not ts >= 1790 or not ts <= 1791) and (not user_id = 1 or not user_id is not null or not ts >= 1792 or not ts <= 1793) and (not user_id = 1 or not user_id is not null or not ts >= 1794 or not ts <= 1795) and (not user_id = 1 or not user_id is not null or not ts >= 1796 or not ts <= 1797) and (not user_id = 1 or not user_id is not null or not ts >= 1798 or not ts <= 1799) and (not user_id = 1 or not user_id is not null or not ts >= 1800 or not ts <= 1801) and (not user_id = 1 or not user_id is not null or not ts >= 1802 or not ts <= 1803) and (not user_id = 1 or not user_id is not null or not ts >= 1804 or not ts <= 1805) and (not user_id = 1 or not user_id is not null or not ts >= 1806 or not ts <= 1807) and (not user_id = 1 or not user_id is not null or not ts >= 1808 or not ts <= 1809) and (not user_id = 1 or not user_id is not null or not ts >= 1810 or not ts <= 1811) and (not user_id = 1 or not user_id is not null or not ts >= 1812 or not ts <= 1813) and (not user_id = 1 or not user_id is not null or not ts >= 1814 or not ts <= 1815) and (not user_id = 1 or not user_id is not null or not ts >= 1816 or not ts <= 1817) and (not user_id = 1 or not user_id is not null or not ts >= 1818 or not ts <= 1819) and (not user_id = 1 or not user_id is not null or not ts >= 1820 or not ts <= 1821) and (not user_id = 1 or not user_id is not null or not ts >= 1822 or not ts <= 1823) and (not user_id = 1 or not user_id is not null or not ts >= 1824 or not ts <= 1825) and (not user_id = 1 or not user_id is not null or not ts >= 1826 or not ts <= 1827) and (not user_id = 1 or not user_id is not null or not ts >= 1828 or not ts <= 1829) and (not user_id = 1 or not user_id is not null or not ts >= 1830 or not ts <= 1831) and (not user_id = 1 or not user_id is not null or not ts >= 1832 or not ts <= 1833) and (not user_id = 1 or not user_id is not null or not ts >= 1834 or not ts <= 1835) and (not user_id = 1 or not user_id is not null or not ts >= 1836 or not ts <= 1837) and (not user_id = 1 or not user_id is not null or not ts >= 1838 or not ts <= 1839) and (not user_id = 1 or not user_id is not null or not ts >= 1840 or not ts <= 1841) and (not user_id = 1 or not user_id is not null or not ts >= 1842 or not ts <= 1843) and (not user_id = 1 or not user_id is not null or not ts >= 1844 or not ts <= 1845) and (not user_id = 1 or not user_id is not null or not ts >= 1846 or not ts <= 1847) and (not user_id = 1 or not user_id is not null or not ts >= 1848 or not ts <= 1849) and (not user_id = 1 or not user_id is not null or not ts >= 1850 or not ts <= 1851) and (not user_id = 1 or not user_id is not null or not ts >= 1852 or not ts <= 1853) and (not user_id = 1 or not user_id is not null or not ts >= 1854 or not ts <= 1855) and (not user_id = 1 or not user_id is not null or not ts >= 1856 or not ts <= 1857) and (not user_id = 1 or not user_id is not null or not ts >= 1858 or not ts <= 1859) and (not user_id = 1 or not user_id is not null or not ts >= 1860 or not ts <= 1861) and (not user_id = 1 or not user_id is not null or not ts >= 1862 or not ts <= 1863) and (not user_id = 1 or not user_id is not null or not ts >= 1864 or not ts <= 1865) and (not user_id = 1 or not user_id is not null or not ts >= 1866 or not ts <= 1867) and (not user_id = 1 or not user_id is not null or not ts >= 1868 or not ts <= 1869) and (not user_id = 1 or not user_id is not null or not ts >= 1870 or not ts <= 1871) and (not user_id = 1 or not user_id is not null or not ts >= 1872 or not ts <= 1873) and (not user_id = 1 or not user_id is not null or not ts >= 1874 or not ts <= 1875) and (not user_id = 1 or not user_id is not null or not ts >= 1876 or not ts <= 1877) and (not user_id = 1 or not user_id is not null or not ts >= 1878 or not ts <= 1879) and (not user_id = 1 or not user_id is not null or not ts >= 1880 or not ts <= 1881) and (not user_id = 1 or not user_id is not null or not ts >= 1882 or not ts <= 1883) and (not user_id = 1 or not user_id is not null or not ts >= 1884 or not ts <= 1885) and (not user_id = 1 or not user_id is not null or not ts >= 1886 or not ts <= 1887) and (not user_id = 1 or not user_id is not null or not ts >= 1888 or not ts <= 1889) and (not user_id = 1 or not user_id is not null or not ts >= 1890 or not ts <= 1891) and (not user_id = 1 or not user_id is not null or not ts >= 1892 or not ts <= 1893) and (not user_id = 1 or not user_id is not null or not ts >= 1894 or not ts <= 1895) and (not user_id = 1 or not user_id is not null or not ts >= 1896 or not ts <= 1897) and (not user_id = 1 or not user_id is not null or not ts >= 1898 or not ts <= 1899) and (not user_id = 1 or not user_id is not null or not ts >= 1900 or not ts <= 1901) and (not user_id = 1 or not user_id is not null or not ts >= 1902 or not ts <= 1903) and (not user_id = 1 or not user_id is not null or not ts >= 1904 or not ts <= 1905) and (not user_id = 1 or not user_id is not null or not ts >= 1906 or not ts <= 1907) and (not user_id = 1 or not user_id is not null or not ts >= 1908 or not ts <= 1909) and (not user_id = 1 or not user_id is not null or not ts >= 1910 or not ts <= 1911) and (not user_id = 1 or not user_id is not null or not ts >= 1912 or not ts <= 1913) and (not user_id = 1 or not user_id is not null or not ts >= 1914 or not ts <= 1915) and (not user_id = 1 or not user_id is not null or not ts >= 1916 or not ts <= 1917) and (not user_id = 1 or not user_id is not null or not ts >= 1918 or not ts <= 1919) and (not user_id = 1 or not user_id is not null or not ts >= 1920 or not ts <= 1921) and (not user_id = 1 or not user_id is not null or not ts >= 1922 or not ts <= 1923) and (not user_id = 1 or not user_id is not null or not ts >= 1924 or not ts <= 1925) and (not user_id = 1 or not user_id is not null or not ts >= 1926 or not ts <= 1927) and (not user_id = 1 or not user_id is not null or not ts >= 1928 or not ts <= 1929) and (not user_id = 1 or not user_id is not null or not ts >= 1930 or not ts <= 1931) and (not user_id = 1 or not user_id is not null or not ts >= 1932 or not ts <= 1933) and (not user_id = 1 or not user_id is not null or not ts >= 1934 or not ts <= 1935) and (not user_id = 1 or not user_id is not null or not ts >= 1936 or not ts <= 1937) and (not user_id = 1 or not user_id is not null or not ts >= 1938 or not ts <= 1939) and (not user_id = 1 or not user_id is not null or not ts >= 1940 or not ts <= 1941) and (not user_id = 1 or not user_id is not null or not ts >= 1942 or not ts <= 1943) and (not user_id = 1 or not user_id is not null or not ts >= 1944 or not ts <= 1945) and (not user_id = 1 or not user_id is not null or not ts >= 1946 or not ts <= 1947) and (not user_id = 1 or not user_id is not null or not ts >= 1948 or not ts <= 1949) and (not user_id = 1 or not user_id is not null or not ts >= 1950 or not ts <= 1951) and (not user_id = 1 or not user_id is not null or not ts >= 1952 or not ts <= 1953) and (not user_id = 1 or not user_id is not null or not ts >= 1954 or not ts <= 1955) and (not user_id = 1 or not user_id is not null or not ts >= 1956 or not ts <= 1957) and (not user_id = 1 or not user_id is not null or not ts >= 1958 or not ts <= 1959) and (not user_id = 1 or not user_id is not null or not ts >= 1960 or not ts <= 1961) and (not user_id = 1 or not user_id is not null or not ts >= 1962 or not ts <= 1963) and (not user_id = 1 or not user_id is not null or not ts >= 1964 or not ts <= 1965) and (not user_id = 1 or not user_id is not null or not ts >= 1966 or not ts <= 1967) and (not user_id = 1 or not user_id is not null or not ts >= 1968 or not ts <= 1969) and (not user_id = 1 or not user_id is not null or not ts >= 1970 or not ts <= 1971) and (not user_id = 1 or not user_id is not null or not ts >= 1972 or not ts <= 1973) and (not user_id = 1 or not user_id is not null or not ts >= 1974 or not ts <= 1975) and (not user_id = 1 or not user_id is not null or not ts >= 1976 or not ts <= 1977) and (not user_id = 1 or not user_id is not null or not ts >= 1978 or not ts <= 1979) and (not user_id = 1 or not user_id is not null or not ts >= 1980 or not ts <= 1981) and (not user_id = 1 or not user_id is not null or not ts >= 1982 or not ts <= 1983) and (not user_id = 1 or not user_id is not null or not ts >= 1984 or not ts <= 1985) and (not user_id = 1 or not user_id is not null or not ts >= 1986 or not ts <= 1987) and (not user_id = 1 or not user_id is not null or not ts >= 1988 or not ts <= 1989) and (not user_id = 1 or not user_id is not null or not ts >= 1990 or not ts <= 1991) and (not user_id = 1 or not user_id is not null or not ts >= 1992 or not ts <= 1993) and (not user_id = 1 or not user_id is not null or not ts >= 1994 or not ts <= 1995) and (not user_id = 1 or not user_id is not null or not ts >= 1996 or not ts <= 1997) and (not user_id = 1 or not user_id is not null or not ts >= 1998 or not ts <= 1999) and (not user_id = 1 or not user_id is not null or not ts >= 11000 or not ts <= 11001) and (not user_id = 1 or not user_id is not null or not ts >= 11002 or not ts <= 11003) and (not user_id = 1 or not user_id is not null or not ts >= 11004 or not ts <= 11005) and (not user_id = 1 or not user_id is not null or not ts >= 11006 or not ts <= 11007) and (not user_id = 1 or not user_id is not null or not ts >= 11008 or not ts <= 11009) and (not user_id = 1 or not user_id is not null or not ts >= 11010 or not ts <= 11011) and (not user_id = 1 or not user_id is not null or not ts >= 11012 or not ts <= 11013) and (not user_id = 1 or not user_id is not null or not ts >= 11014 or not ts <= 11015) and (not user_id = 1 or not user_id is not null or not ts >= 11016 or not ts <= 11017) and (not user_id = 1 or not user_id is not null or not ts >= 11018 or not ts <= 11019) and (not user_id = 1 or not user_id is not null or not ts >= 11020 or not ts <= 11021) and (not user_id = 1 or not user_id is not null or not ts >= 11022 or not ts <= 11023) and (not user_id = 1 or not user_id is not null or not ts >= 11024 or not ts <= 11025) and (not user_id = 1 or not user_id is not null or not ts >= 11026 or not ts <= 11027) and (not user_id = 1 or not user_id is not null or not ts >= 11028 or not ts <= 11029) and (not user_id = 1 or not user_id is not null or not ts >= 11030 or not ts <= 11031) and (not user_id = 1 or not user_id is not null or not ts >= 11032 or not ts <= 11033) and (not user_id = 1 or not user_id is not null or not ts >= 11034 or not ts <= 11035) and (not user_id = 1 or not user_id is not null or not ts >= 11036 or not ts <= 11037) and (not user_id = 1 or not user_id is not null or not ts >= 11038 or not ts <= 11039) and (not user_id = 1 or not user_id is not null or not ts >= 11040 or not ts <= 11041) and (not user_id = 1 or not user_id is not null or not ts >= 11042 or not ts <= 11043) and (not user_id = 1 or not user_id is not null or not ts >= 11044 or not ts <= 11045) and (not user_id = 1 or not user_id is not null or not ts >= 11046 or not ts <= 11047) and (not user_id = 1 or not user_id is not null or not ts >= 11048 or not ts <= 11049) and (not user_id = 1 or not user_id is not null or not ts >= 11050 or not ts <= 11051) and (not user_id = 1 or not user_id is not null or not ts >= 11052 or not ts <= 11053) and (not user_id = 1 or not user_id is not null or not ts >= 11054 or not ts <= 11055) and (not user_id = 1 or not user_id is not null or not ts >= 11056 or not ts <= 11057) and (not user_id = 1 or not user_id is not null or not ts >= 11058 or not ts <= 11059) and (not user_id = 1 or not user_id is not null or not ts >= 11060 or not ts <= 11061) and (not user_id = 1 or not user_id is not null or not ts >= 11062 or not ts <= 11063) and (not user_id = 1 or not user_id is not null or not ts >= 11064 or not ts <= 11065) and (not user_id = 1 or not user_id is not null or not ts >= 11066 or not ts <= 11067) and (not user_id = 1 or not user_id is not null or not ts >= 11068 or not ts <= 11069) and (not user_id = 1 or not user_id is not null or not ts >= 11070 or not ts <= 11071) and (not user_id = 1 or not user_id is not null or not ts >= 11072 or not ts <= 11073) and (not user_id = 1 or not user_id is not null or not ts >= 11074 or not ts <= 11075) and (not user_id = 1 or not user_id is not null or not ts >= 11076 or not ts <= 11077) and (not user_id = 1 or not user_id is not null or not ts >= 11078 or not ts <= 11079) and (not user_id = 1 or not user_id is not null or not ts >= 11080 or not ts <= 11081) and (not user_id = 1 or not user_id is not null or not ts >= 11082 or not ts <= 11083) and (not user_id = 1 or not user_id is not null or not ts >= 11084 or not ts <= 11085) and (not user_id = 1 or not user_id is not null or not ts >= 11086 or not ts <= 11087) and (not user_id = 1 or not user_id is not null or not ts >= 11088 or not ts <= 11089) and (not user_id = 1 or not user_id is not null or not ts >= 11090 or not ts <= 11091) and (not user_id = 1 or not user_id is not null or not ts >= 11092 or not ts <= 11093) and (not user_id = 1 or not user_id is not null or not ts >= 11094 or not ts <= 11095) and (not user_id = 1 or not user_id is not null or not ts >= 11096 or not ts <= 11097) and (not user_id = 1 or not user_id is not null or not ts >= 11098 or not ts <= 11099) and (not user_id = 1 or not user_id is not null or not ts >= 11100 or not ts <= 11101) and (not user_id = 1 or not user_id is not null or not ts >= 11102 or not ts <= 11103) and (not user_id = 1 or not user_id is not null or not ts >= 11104 or not ts <= 11105) and (not user_id = 1 or not user_id is not null or not ts >= 11106 or not ts <= 11107) and (not user_id = 1 or not user_id is not null or not ts >= 11108 or not ts <= 11109) and (not user_id = 1 or not user_id is not null or not ts >= 11110 or not ts <= 11111) and (not user_id = 1 or not user_id is not null or not ts >= 11112 or not ts <= 11113) and (not user_id = 1 or not user_id is not null or not ts >= 11114 or not ts <= 11115) and (not user_id = 1 or not user_id is not null or not ts >= 11116 or not ts <= 11117) and (not user_id = 1 or not user_id is not null or not ts >= 11118 or not ts <= 11119) and (not user_id = 1 or not user_id is not null or not ts >= 11120 or not ts <= 11121) and (not user_id = 1 or not user_id is not null or not ts >= 11122 or not ts <= 11123) and (not user_id = 1 or not user_id is not null or not ts >= 11124 or not ts <= 11125) and (not user_id = 1 or not user_id is not null or not ts >= 11126 or not ts <= 11127) and (not user_id = 1 or not user_id is not null or not ts >= 11128 or not ts <= 11129) and (not user_id = 1 or not user_id is not null or not ts >= 11130 or not ts <= 11131) and (not user_id = 1 or not user_id is not null or not ts >= 11132 or not ts <= 11133) and (not user_id = 1 or not user_id is not null or not ts >= 11134 or not ts <= 11135) and (not user_id = 1 or not user_id is not null or not ts >= 11136 or not ts <= 11137) and (not user_id = 1 or not user_id is not null or not ts >= 11138 or not ts <= 11139) and (not user_id = 1 or not user_id is not null or not ts >= 11140 or not ts <= 11141) and (not user_id = 1 or not user_id is not null or not ts >= 11142 or not ts <= 11143) and (not user_id = 1 or not user_id is not null or not ts >= 11144 or not ts <= 11145) and (not user_id = 1 or not user_id is not null or not ts >= 11146 or not ts <= 11147) and (not user_id = 1 or not user_id is not null or not ts >= 11148 or not ts <= 11149) and (not user_id = 1 or not user_id is not null or not ts >= 11150 or not ts <= 11151) and (not user_id = 1 or not user_id is not null or not ts >= 11152 or not ts <= 11153) and (not user_id = 1 or not user_id is not null or not ts >= 11154 or not ts <= 11155) and (not user_id = 1 or not user_id is not null or not ts >= 11156 or not ts <= 11157) and (not user_id = 1 or not user_id is not null or not ts >= 11158 or not ts <= 11159) and (not user_id = 1 or not user_id is not null or not ts >= 11160 or not ts <= 11161) and (not user_id = 1 or not user_id is not null or not ts >= 11162 or not ts <= 11163) and (not user_id = 1 or not user_id is not null or not ts >= 11164 or not ts <= 11165) and (not user_id = 1 or not user_id is not null or not ts >= 11166 or not ts <= 11167) and (not user_id = 1 or not user_id is not null or not ts >= 11168 or not ts <= 11169) and (not user_id = 1 or not user_id is not null or not ts >= 11170 or not ts <= 11171) and (not user_id = 1 or not user_id is not null or not ts >= 11172 or not ts <= 11173) and (not user_id = 1 or not user_id is not null or not ts >= 11174 or not ts <= 11175) and (not user_id = 1 or not user_id is not null or not ts >= 11176 or not ts <= 11177) and (not user_id = 1 or not user_id is not null or not ts >= 11178 or not ts <= 11179) and (not user_id = 1 or not user_id is not null or not ts >= 11180 or not ts <= 11181) and (not user_id = 1 or not user_id is not null or not ts >= 11182 or not ts <= 11183) and (not user_id = 1 or not user_id is not null or not ts >= 11184 or not ts <= 11185) and (not user_id = 1 or not user_id is not null or not ts >= 11186 or not ts <= 11187) and (not user_id = 1 or not user_id is not null or not ts >= 11188 or not ts <= 11189) and (not user_id = 1 or not user_id is not null or not ts >= 11190 or not ts <= 11191) and (not user_id = 1 or not user_id is not null or not ts >= 11192 or not ts <= 11193) and (not user_id = 1 or not user_id is not null or not ts >= 11194 or not ts <= 11195) and (not user_id = 1 or not user_id is not null or not ts >= 11196 or not ts <= 11197) and (not user_id = 1 or not user_id is not null or not ts >= 11198 or not ts <= 11199) and (not user_id = 1 or not user_id is not null or not ts >= 11200 or not ts <= 11201) and (not user_id = 1 or not user_id is not null or not ts >= 11202 or not ts <= 11203) and (not user_id = 1 or not user_id is not null or not ts >= 11204 or not ts <= 11205) and (not user_id = 1 or not user_id is not null or not ts >= 11206 or not ts <= 11207) and (not user_id = 1 or not user_id is not null or not ts >= 11208 or not ts <= 11209) and (not user_id = 1 or not user_id is not null or not ts >= 11210 or not ts <= 11211) and (not user_id = 1 or not user_id is not null or not ts >= 11212 or not ts <= 11213) and (not user_id = 1 or not user_id is not null or not ts >= 11214 or not ts <= 11215) and (not user_id = 1 or not user_id is not null or not ts >= 11216 or not ts <= 11217) and (not user_id = 1 or not user_id is not null or not ts >= 11218 or not ts <= 11219) and (not user_id = 1 or not user_id is not null or not ts >= 11220 or not ts <= 11221) and (not user_id = 1 or not user_id is not null or not ts >= 11222 or not ts <= 11223) and (not user_id = 1 or not user_id is not null or not ts >= 11224 or not ts <= 11225) and (not user_id = 1 or not user_id is not null or not ts >= 11226 or not ts <= 11227) and (not user_id = 1 or not user_id is not null or not ts >= 11228 or not ts <= 11229) and (not user_id = 1 or not user_id is not null or not ts >= 11230 or not ts <= 11231) and (not user_id = 1 or not user_id is not null or not ts >= 11232 or not ts <= 11233) and (not user_id = 1 or not user_id is not null or not ts >= 11234 or not ts <= 11235) and (not user_id = 1 or not user_id is not null or not ts >= 11236 or not ts <= 11237) and (not user_id = 1 or not user_id is not null or not ts >= 11238 or not ts <= 11239) and (not user_id = 1 or not user_id is not null or not ts >= 11240 or not ts <= 11241) and (not user_id = 1 or not user_id is not null or not ts >= 11242 or not ts <= 11243) and (not user_id = 1 or not user_id is not null or not ts >= 11244 or not ts <= 11245) and (not user_id = 1 or not user_id is not null or not ts >= 11246 or not ts <= 11247) and (not user_id = 1 or not user_id is not null or not ts >= 11248 or not ts <= 11249) and (not user_id = 1 or not user_id is not null or not ts >= 11250 or not ts <= 11251) and (not user_id = 1 or not user_id is not null or not ts >= 11252 or not ts <= 11253) and (not user_id = 1 or not user_id is not null or not ts >= 11254 or not ts <= 11255) and (not user_id = 1 or not user_id is not null or not ts >= 11256 or not ts <= 11257) and (not user_id = 1 or not user_id is not null or not ts >= 11258 or not ts <= 11259) and (not user_id = 1 or not user_id is not null or not ts >= 11260 or not ts <= 11261) and (not user_id = 1 or not user_id is not null or not ts >= 11262 or not ts <= 11263) and (not user_id = 1 or not user_id is not null or not ts >= 11264 or not ts <= 11265) and (not user_id = 1 or not user_id is not null or not ts >= 11266 or not ts <= 11267) and (not user_id = 1 or not user_id is not null or not ts >= 11268 or not ts <= 11269) and (not user_id = 1 or not user_id is not null or not ts >= 11270 or not ts <= 11271) and (not user_id = 1 or not user_id is not null or not ts >= 11272 or not ts <= 11273) and (not user_id = 1 or not user_id is not null or not ts >= 11274 or not ts <= 11275) and (not user_id = 1 or not user_id is not null or not ts >= 11276 or not ts <= 11277) and (not user_id = 1 or not user_id is not null or not ts >= 11278 or not ts <= 11279) and (not user_id = 1 or not user_id is not null or not ts >= 11280 or not ts <= 11281) and (not user_id = 1 or not user_id is not null or not ts >= 11282 or not ts <= 11283) and (not user_id = 1 or not user_id is not null or not ts >= 11284 or not ts <= 11285) and (not user_id = 1 or not user_id is not null or not ts >= 11286 or not ts <= 11287) and (not user_id = 1 or not user_id is not null or not ts >= 11288 or not ts <= 11289) and (not user_id = 1 or not user_id is not null or not ts >= 11290 or not ts <= 11291) and (not user_id = 1 or not user_id is not null or not ts >= 11292 or not ts <= 11293) and (not user_id = 1 or not user_id is not null or not ts >= 11294 or not ts <= 11295) and (not user_id = 1 or not user_id is not null or not ts >= 11296 or not ts <= 11297) and (not user_id = 1 or not user_id is not null or not ts >= 11298 or not ts <= 11299) and (not user_id = 1 or not user_id is not null or not ts >= 11300 or not ts <= 11301) and (not user_id = 1 or not user_id is not null or not ts >= 11302 or not ts <= 11303) and (not user_id = 1 or not user_id is not null or not ts >= 11304 or not ts <= 11305) and (not user_id = 1 or not user_id is not null or not ts >= 11306 or not ts <= 11307) and (not user_id = 1 or not user_id is not null or not ts >= 11308 or not ts <= 11309) and (not user_id = 1 or not user_id is not null or not ts >= 11310 or not ts <= 11311) and (not user_id = 1 or not user_id is not null or not ts >= 11312 or not ts <= 11313) and (not user_id = 1 or not user_id is not null or not ts >= 11314 or not ts <= 11315) and (not user_id = 1 or not user_id is not null or not ts >= 11316 or not ts <= 11317) and (not user_id = 1 or not user_id is not null or not ts >= 11318 or not ts <= 11319) and (not user_id = 1 or not user_id is not null or not ts >= 11320 or not ts <= 11321) and (not user_id = 1 or not user_id is not null or not ts >= 11322 or not ts <= 11323) and (not user_id = 1 or not user_id is not null or not ts >= 11324 or not ts <= 11325) and (not user_id = 1 or not user_id is not null or not ts >= 11326 or not ts <= 11327) and (not user_id = 1 or not user_id is not null or not ts >= 11328 or not ts <= 11329) and (not user_id = 1 or not user_id is not null or not ts >= 11330 or not ts <= 11331) and (not user_id = 1 or not user_id is not null or not ts >= 11332 or not ts <= 11333) and (not user_id = 1 or not user_id is not null or not ts >= 11334 or not ts <= 11335) and (not user_id = 1 or not user_id is not null or not ts >= 11336 or not ts <= 11337) and (not user_id = 1 or not user_id is not null or not ts >= 11338 or not ts <= 11339) and (not user_id = 1 or not user_id is not null or not ts >= 11340 or not ts <= 11341) and (not user_id = 1 or not user_id is not null or not ts >= 11342 or not ts <= 11343) and (not user_id = 1 or not user_id is not null or not ts >= 11344 or not ts <= 11345) and (not user_id = 1 or not user_id is not null or not ts >= 11346 or not ts <= 11347) and (not user_id = 1 or not user_id is not null or not ts >= 11348 or not ts <= 11349) and (not user_id = 1 or not user_id is not null or not ts >= 11350 or not ts <= 11351) and (not user_id = 1 or not user_id is not null or not ts >= 11352 or not ts <= 11353) and (not user_id = 1 or not user_id is not null or not ts >= 11354 or not ts <= 11355) and (not user_id = 1 or not user_id is not null or not ts >= 11356 or not ts <= 11357) and (not user_id = 1 or not user_id is not null or not ts >= 11358 or not ts <= 11359) and (not user_id = 1 or not user_id is not null or not ts >= 11360 or not ts <= 11361) and (not user_id = 1 or not user_id is not null or not ts >= 11362 or not ts <= 11363) and (not user_id = 1 or not user_id is not null or not ts >= 11364 or not ts <= 11365) and (not user_id = 1 or not user_id is not null or not ts >= 11366 or not ts <= 11367) and (not user_id = 1 or not user_id is not null or not ts >= 11368 or not ts <= 11369) and (not user_id = 1 or not user_id is not null or not ts >= 11370 or not ts <= 11371) and (not user_id = 1 or not user_id is not null or not ts >= 11372 or not ts <= 11373) and (not user_id = 1 or not user_id is not null or not ts >= 11374 or not ts <= 11375) and (not user_id = 1 or not user_id is not null or not ts >= 11376 or not ts <= 11377) and (not user_id = 1 or not user_id is not null or not ts >= 11378 or not ts <= 11379) and (not user_id = 1 or not user_id is not null or not ts >= 11380 or not ts <= 11381) and (not user_id = 1 or not user_id is not null or not ts >= 11382 or not ts <= 11383) and (not user_id = 1 or not user_id is not null or not ts >= 11384 or not ts <= 11385) and (not user_id = 1 or not user_id is not null or not ts >= 11386 or not ts <= 11387) and (not user_id = 1 or not user_id is not null or not ts >= 11388 or not ts <= 11389) and (not user_id = 1 or not user_id is not null or not ts >= 11390 or not ts <= 11391) and (not user_id = 1 or not user_id is not null or not ts >= 11392 or not ts <= 11393) and (not user_id = 1 or not user_id is not null or not ts >= 11394 or not ts <= 11395) and (not user_id = 1 or not user_id is not null or not ts >= 11396 or not ts <= 11397) and (not user_id = 1 or not user_id is not null or not ts >= 11398 or not ts <= 11399) and (not user_id = 1 or not user_id is not null or not ts >= 11400 or not ts <= 11401) and (not user_id = 1 or not user_id is not null or not ts >= 11402 or not ts <= 11403) and (not user_id = 1 or not user_id is not null or not ts >= 11404 or not ts <= 11405) and (not user_id = 1 or not user_id is not null or not ts >= 11406 or not ts <= 11407) and (not user_id = 1 or not user_id is not null or not ts >= 11408 or not ts <= 11409) and (not user_id = 1 or not user_id is not null or not ts >= 11410 or not ts <= 11411) and (not user_id = 1 or not user_id is not null or not ts >= 11412 or not ts <= 11413) and (not user_id = 1 or not user_id is not null or not ts >= 11414 or not ts <= 11415) and (not user_id = 1 or not user_id is not null or not ts >= 11416 or not ts <= 11417) and (not user_id = 1 or not user_id is not null or not ts >= 11418 or not ts <= 11419) and (not user_id = 1 or not user_id is not null or not ts >= 11420 or not ts <= 11421) and (not user_id = 1 or not user_id is not null or not ts >= 11422 or not ts <= 11423) and (not user_id = 1 or not user_id is not null or not ts >= 11424 or not ts <= 11425) and (not user_id = 1 or not user_id is not null or not ts >= 11426 or not ts <= 11427) and (not user_id = 1 or not user_id is not null or not ts >= 11428 or not ts <= 11429) and (not user_id = 1 or not user_id is not null or not ts >= 11430 or not ts <= 11431) and (not user_id = 1 or not user_id is not null or not ts >= 11432 or not ts <= 11433) and (not user_id = 1 or not user_id is not null or not ts >= 11434 or not ts <= 11435) and (not user_id = 1 or not user_id is not null or not ts >= 11436 or not ts <= 11437) and (not user_id = 1 or not user_id is not null or not ts >= 11438 or not ts <= 11439) and (not user_id = 1 or not user_id is not null or not ts >= 11440 or not ts <= 11441) and (not user_id = 1 or not user_id is not null or not ts >= 11442 or not ts <= 11443) and (not user_id = 1 or not user_id is not null or not ts >= 11444 or not ts <= 11445) and (not user_id = 1 or not user_id is not null or not ts >= 11446 or not ts <= 11447) and (not user_id = 1 or not user_id is not null or not ts >= 11448 or not ts <= 11449) and (not user_id = 1 or not user_id is not null or not ts >= 11450 or not ts <= 11451) and (not user_id = 1 or not user_id is not null or not ts >= 11452 or not ts <= 11453) and (not user_id = 1 or not user_id is not null or not ts >= 11454 or not ts <= 11455) and (not user_id = 1 or not user_id is not null or not ts >= 11456 or not ts <= 11457) and (not user_id = 1 or not user_id is not null or not ts >= 11458 or not ts <= 11459) and (not user_id = 1 or not user_id is not null or not ts >= 11460 or not ts <= 11461) and (not user_id = 1 or not user_id is not null or not ts >= 11462 or not ts <= 11463) and (not user_id = 1 or not user_id is not null or not ts >= 11464 or not ts <= 11465) and (not user_id = 1 or not user_id is not null or not ts >= 11466 or not ts <= 11467) and (not user_id = 1 or not user_id is not null or not ts >= 11468 or not ts <= 11469) and (not user_id = 1 or not user_id is not null or not ts >= 11470 or not ts <= 11471) and (not user_id = 1 or not user_id is not null or not ts >= 11472 or not ts <= 11473) and (not user_id = 1 or not user_id is not null or not ts >= 11474 or not ts <= 11475) and (not user_id = 1 or not user_id is not null or not ts >= 11476 or not ts <= 11477) and (not user_id = 1 or not user_id is not null or not ts >= 11478 or not ts <= 11479) and (not user_id = 1 or not user_id is not null or not ts >= 11480 or not ts <= 11481) and (not user_id = 1 or not user_id is not null or not ts >= 11482 or not ts <= 11483) and (not user_id = 1 or not user_id is not null or not ts >= 11484 or not ts <= 11485) and (not user_id = 1 or not user_id is not null or not ts >= 11486 or not ts <= 11487) and (not user_id = 1 or not user_id is not null or not ts >= 11488 or not ts <= 11489) and (not user_id = 1 or not user_id is not null or not ts >= 11490 or not ts <= 11491) and (not user_id = 1 or not user_id is not null or not ts >= 11492 or not ts <= 11493) and (not user_id = 1 or not user_id is not null or not ts >= 11494 or not ts <= 11495) and (not user_id = 1 or not user_id is not null or not ts >= 11496 or not ts <= 11497) and (not user_id = 1 or not user_id is not null or not ts >= 11498 or not ts <= 11499) and (not user_id = 1 or not user_id is not null or not ts >= 11500 or not ts <= 11501) and (not user_id = 1 or not user_id is not null or not ts >= 11502 or not ts <= 11503) and (not user_id = 1 or not user_id is not null or not ts >= 11504 or not ts <= 11505) and (not user_id = 1 or not user_id is not null or not ts >= 11506 or not ts <= 11507) and (not user_id = 1 or not user_id is not null or not ts >= 11508 or not ts <= 11509) and (not user_id = 1 or not user_id is not null or not ts >= 11510 or not ts <= 11511) and (not user_id = 1 or not user_id is not null or not ts >= 11512 or not ts <= 11513) and (not user_id = 1 or not user_id is not null or not ts >= 11514 or not ts <= 11515) and (not user_id = 1 or not user_id is not null or not ts >= 11516 or not ts <= 11517) and (not user_id = 1 or not user_id is not null or not ts >= 11518 or not ts <= 11519) and (not user_id = 1 or not user_id is not null or not ts >= 11520 or not ts <= 11521) and (not user_id = 1 or not user_id is not null or not ts >= 11522 or not ts <= 11523) and (not user_id = 1 or not user_id is not null or not ts >= 11524 or not ts <= 11525) and (not user_id = 1 or not user_id is not null or not ts >= 11526 or not ts <= 11527) and (not user_id = 1 or not user_id is not null or not ts >= 11528 or not ts <= 11529) and (not user_id = 1 or not user_id is not null or not ts >= 11530 or not ts <= 11531) and (not user_id = 1 or not user_id is not null or not ts >= 11532 or not ts <= 11533) and (not user_id = 1 or not user_id is not null or not ts >= 11534 or not ts <= 11535) and (not user_id = 1 or not user_id is not null or not ts >= 11536 or not ts <= 11537) and (not user_id = 1 or not user_id is not null or not ts >= 11538 or not ts <= 11539) and (not user_id = 1 or not user_id is not null or not ts >= 11540 or not ts <= 11541) and (not user_id = 1 or not user_id is not null or not ts >= 11542 or not ts <= 11543) and (not user_id = 1 or not user_id is not null or not ts >= 11544 or not ts <= 11545) and (not user_id = 1 or not user_id is not null or not ts >= 11546 or not ts <= 11547) and (not user_id = 1 or not user_id is not null or not ts >= 11548 or not ts <= 11549) and (not user_id = 1 or not user_id is not null or not ts >= 11550 or not ts <= 11551) and (not user_id = 1 or not user_id is not null or not ts >= 11552 or not ts <= 11553) and (not user_id = 1 or not user_id is not null or not ts >= 11554 or not ts <= 11555) and (not user_id = 1 or not user_id is not null or not ts >= 11556 or not ts <= 11557) and (not user_id = 1 or not user_id is not null or not ts >= 11558 or not ts <= 11559) and (not user_id = 1 or not user_id is not null or not ts >= 11560 or not ts <= 11561) and (not user_id = 1 or not user_id is not null or not ts >= 11562 or not ts <= 11563) and (not user_id = 1 or not user_id is not null or not ts >= 11564 or not ts <= 11565) and (not user_id = 1 or not user_id is not null or not ts >= 11566 or not ts <= 11567) and (not user_id = 1 or not user_id is not null or not ts >= 11568 or not ts <= 11569) and (not user_id = 1 or not user_id is not null or not ts >= 11570 or not ts <= 11571) and (not user_id = 1 or not user_id is not null or not ts >= 11572 or not ts <= 11573) and (not user_id = 1 or not user_id is not null or not ts >= 11574 or not ts <= 11575) and (not user_id = 1 or not user_id is not null or not ts >= 11576 or not ts <= 11577) and (not user_id = 1 or not user_id is not null or not ts >= 11578 or not ts <= 11579) and (not user_id = 1 or not user_id is not null or not ts >= 11580 or not ts <= 11581) and (not user_id = 1 or not user_id is not null or not ts >= 11582 or not ts <= 11583) and (not user_id = 1 or not user_id is not null or not ts >= 11584 or not ts <= 11585) and (not user_id = 1 or not user_id is not null or not ts >= 11586 or not ts <= 11587) and (not user_id = 1 or not user_id is not null or not ts >= 11588 or not ts <= 11589) and (not user_id = 1 or not user_id is not null or not ts >= 11590 or not ts <= 11591) and (not user_id = 1 or not user_id is not null or not ts >= 11592 or not ts <= 11593) and (not user_id = 1 or not user_id is not null or not ts >= 11594 or not ts <= 11595) and (not user_id = 1 or not user_id is not null or not ts >= 11596 or not ts <= 11597) and (not user_id = 1 or not user_id is not null or not ts >= 11598 or not ts <= 11599) and (not user_id = 1 or not user_id is not null or not ts >= 11600 or not ts <= 11601) and (not user_id = 1 or not user_id is not null or not ts >= 11602 or not ts <= 11603) and (not user_id = 1 or not user_id is not null or not ts >= 11604 or not ts <= 11605) and (not user_id = 1 or not user_id is not null or not ts >= 11606 or not ts <= 11607) and (not user_id = 1 or not user_id is not null or not ts >= 11608 or not ts <= 11609) and (not user_id = 1 or not user_id is not null or not ts >= 11610 or not ts <= 11611) and (not user_id = 1 or not user_id is not null or not ts >= 11612 or not ts <= 11613) and (not user_id = 1 or not user_id is not null or not ts >= 11614 or not ts <= 11615) and (not user_id = 1 or not user_id is not null or not ts >= 11616 or not ts <= 11617) and (not user_id = 1 or not user_id is not null or not ts >= 11618 or not ts <= 11619) and (not user_id = 1 or not user_id is not null or not ts >= 11620 or not ts <= 11621) and (not user_id = 1 or not user_id is not null or not ts >= 11622 or not ts <= 11623) and (not user_id = 1 or not user_id is not null or not ts >= 11624 or not ts <= 11625) and (not user_id = 1 or not user_id is not null or not ts >= 11626 or not ts <= 11627) and (not user_id = 1 or not user_id is not null or not ts >= 11628 or not ts <= 11629) and (not user_id = 1 or not user_id is not null or not ts >= 11630 or not ts <= 11631) and (not user_id = 1 or not user_id is not null or not ts >= 11632 or not ts <= 11633) and (not user_id = 1 or not user_id is not null or not ts >= 11634 or not ts <= 11635) and (not user_id = 1 or not user_id is not null or not ts >= 11636 or not ts <= 11637) and (not user_id = 1 or not user_id is not null or not ts >= 11638 or not ts <= 11639) and (not user_id = 1 or not user_id is not null or not ts >= 11640 or not ts <= 11641) and (not user_id = 1 or not user_id is not null or not ts >= 11642 or not ts <= 11643) and (not user_id = 1 or not user_id is not null or not ts >= 11644 or not ts <= 11645) and (not user_id = 1 or not user_id is not null or not ts >= 11646 or not ts <= 11647) and (not user_id = 1 or not user_id is not null or not ts >= 11648 or not ts <= 11649) and (not user_id = 1 or not user_id is not null or not ts >= 11650 or not ts <= 11651) and (not user_id = 1 or not user_id is not null or not ts >= 11652 or not ts <= 11653) and (not user_id = 1 or not user_id is not null or not ts >= 11654 or not ts <= 11655) and (not user_id = 1 or not user_id is not null or not ts >= 11656 or not ts <= 11657) and (not user_id = 1 or not user_id is not null or not ts >= 11658 or not ts <= 11659) and (not user_id = 1 or not user_id is not null or not ts >= 11660 or not ts <= 11661) and (not user_id = 1 or not user_id is not null or not ts >= 11662 or not ts <= 11663) and (not user_id = 1 or not user_id is not null or not ts >= 11664 or not ts <= 11665) and (not user_id = 1 or not user_id is not null or not ts >= 11666 or not ts <= 11667) and (not user_id = 1 or not user_id is not null or not ts >= 11668 or not ts <= 11669) and (not user_id = 1 or not user_id is not null or not ts >= 11670 or not ts <= 11671) and (not user_id = 1 or not user_id is not null or not ts >= 11672 or not ts <= 11673) and (not user_id = 1 or not user_id is not null or not ts >= 11674 or not ts <= 11675) and (not user_id = 1 or not user_id is not null or not ts >= 11676 or not ts <= 11677) and (not user_id = 1 or not user_id is not null or not ts >= 11678 or not ts <= 11679) and (not user_id = 1 or not user_id is not null or not ts >= 11680 or not ts <= 11681) and (not user_id = 1 or not user_id is not null or not ts >= 11682 or not ts <= 11683) and (not user_id = 1 or not user_id is not null or not ts >= 11684 or not ts <= 11685) and (not user_id = 1 or not user_id is not null or not ts >= 11686 or not ts <= 11687) and (not user_id = 1 or not user_id is not null or not ts >= 11688 or not ts <= 11689) and (not user_id = 1 or not user_id is not null or not ts >= 11690 or not ts <= 11691) and (not user_id = 1 or not user_id is not null or not ts >= 11692 or not ts <= 11693) and (not user_id = 1 or not user_id is not null or not ts >= 11694 or not ts <= 11695) and (not user_id = 1 or not user_id is not null or not ts >= 11696 or not ts <= 11697) and (not user_id = 1 or not user_id is not null or not ts >= 11698 or not ts <= 11699) and (not user_id = 1 or not user_id is not null or not ts >= 11700 or not ts <= 11701) and (not user_id = 1 or not user_id is not null or not ts >= 11702 or not ts <= 11703) and (not user_id = 1 or not user_id is not null or not ts >= 11704 or not ts <= 11705) and (not user_id = 1 or not user_id is not null or not ts >= 11706 or not ts <= 11707) and (not user_id = 1 or not user_id is not null or not ts >= 11708 or not ts <= 11709) and (not user_id = 1 or not user_id is not null or not ts >= 11710 or not ts <= 11711) and (not user_id = 1 or not user_id is not null or not ts >= 11712 or not ts <= 11713) and (not user_id = 1 or not user_id is not null or not ts >= 11714 or not ts <= 11715) and (not user_id = 1 or not user_id is not null or not ts >= 11716 or not ts <= 11717) and (not user_id = 1 or not user_id is not null or not ts >= 11718 or not ts <= 11719) and (not user_id = 1 or not user_id is not null or not ts >= 11720 or not ts <= 11721) and (not user_id = 1 or not user_id is not null or not ts >= 11722 or not ts <= 11723) and (not user_id = 1 or not user_id is not null or not ts >= 11724 or not ts <= 11725) and (not user_id = 1 or not user_id is not null or not ts >= 11726 or not ts <= 11727) and (not user_id = 1 or not user_id is not null or not ts >= 11728 or not ts <= 11729) and (not user_id = 1 or not user_id is not null or not ts >= 11730 or not ts <= 11731) and (not user_id = 1 or not user_id is not null or not ts >= 11732 or not ts <= 11733) and (not user_id = 1 or not user_id is not null or not ts >= 11734 or not ts <= 11735) and (not user_id = 1 or not user_id is not null or not ts >= 11736 or not ts <= 11737) and (not user_id = 1 or not user_id is not null or not ts >= 11738 or not ts <= 11739) and (not user_id = 1 or not user_id is not null or not ts >= 11740 or not ts <= 11741) and (not user_id = 1 or not user_id is not null or not ts >= 11742 or not ts <= 11743) and (not user_id = 1 or not user_id is not null or not ts >= 11744 or not ts <= 11745) and (not user_id = 1 or not user_id is not null or not ts >= 11746 or not ts <= 11747) and (not user_id = 1 or not user_id is not null or not ts >= 11748 or not ts <= 11749) and (not user_id = 1 or not user_id is not null or not ts >= 11750 or not ts <= 11751) and (not user_id = 1 or not user_id is not null or not ts >= 11752 or not ts <= 11753) and (not user_id = 1 or not user_id is not null or not ts >= 11754 or not ts <= 11755) and (not user_id = 1 or not user_id is not null or not ts >= 11756 or not ts <= 11757) and (not user_id = 1 or not user_id is not null or not ts >= 11758 or not ts <= 11759) and (not user_id = 1 or not user_id is not null or not ts >= 11760 or not ts <= 11761) and (not user_id = 1 or not user_id is not null or not ts >= 11762 or not ts <= 11763) and (not user_id = 1 or not user_id is not null or not ts >= 11764 or not ts <= 11765) and (not user_id = 1 or not user_id is not null or not ts >= 11766 or not ts <= 11767) and (not user_id = 1 or not user_id is not null or not ts >= 11768 or not ts <= 11769) and (not user_id = 1 or not user_id is not null or not ts >= 11770 or not ts <= 11771) and (not user_id = 1 or not user_id is not null or not ts >= 11772 or not ts <= 11773) and (not user_id = 1 or not user_id is not null or not ts >= 11774 or not ts <= 11775) and (not user_id = 1 or not user_id is not null or not ts >= 11776 or not ts <= 11777) and (not user_id = 1 or not user_id is not null or not ts >= 11778 or not ts <= 11779) and (not user_id = 1 or not user_id is not null or not ts >= 11780 or not ts <= 11781) and (not user_id = 1 or not user_id is not null or not ts >= 11782 or not ts <= 11783) and (not user_id = 1 or not user_id is not null or not ts >= 11784 or not ts <= 11785) and (not user_id = 1 or not user_id is not null or not ts >= 11786 or not ts <= 11787) and (not user_id = 1 or not user_id is not null or not ts >= 11788 or not ts <= 11789) and (not user_id = 1 or not user_id is not null or not ts >= 11790 or not ts <= 11791) and (not user_id = 1 or not user_id is not null or not ts >= 11792 or not ts <= 11793) and (not user_id = 1 or not user_id is not null or not ts >= 11794 or not ts <= 11795) and (not user_id = 1 or not user_id is not null or not ts >= 11796 or not ts <= 11797) and (not user_id = 1 or not user_id is not null or not ts >= 11798 or not ts <= 11799) and (not user_id = 1 or not user_id is not null or not ts >= 11800 or not ts <= 11801) and (not user_id = 1 or not user_id is not null or not ts >= 11802 or not ts <= 11803) and (not user_id = 1 or not user_id is not null or not ts >= 11804 or not ts <= 11805) and (not user_id = 1 or not user_id is not null or not ts >= 11806 or not ts <= 11807) and (not user_id = 1 or not user_id is not null or not ts >= 11808 or not ts <= 11809) and (not user_id = 1 or not user_id is not null or not ts >= 11810 or not ts <= 11811) and (not user_id = 1 or not user_id is not null or not ts >= 11812 or not ts <= 11813) and (not user_id = 1 or not user_id is not null or not ts >= 11814 or not ts <= 11815) and (not user_id = 1 or not user_id is not null or not ts >= 11816 or not ts <= 11817) and (not user_id = 1 or not user_id is not null or not ts >= 11818 or not ts <= 11819) and (not user_id = 1 or not user_id is not null or not ts >= 11820 or not ts <= 11821) and (not user_id = 1 or not user_id is not null or not ts >= 11822 or not ts <= 11823) and (not user_id = 1 or not user_id is not null or not ts >= 11824 or not ts <= 11825) and (not user_id = 1 or not user_id is not null or not ts >= 11826 or not ts <= 11827) and (not user_id = 1 or not user_id is not null or not ts >= 11828 or not ts <= 11829) and (not user_id = 1 or not user_id is not null or not ts >= 11830 or not ts <= 11831) and (not user_id = 1 or not user_id is not null or not ts >= 11832 or not ts <= 11833) and (not user_id = 1 or not user_id is not null or not ts >= 11834 or not ts <= 11835) and (not user_id = 1 or not user_id is not null or not ts >= 11836 or not ts <= 11837) and (not user_id = 1 or not user_id is not null or not ts >= 11838 or not ts <= 11839) and (not user_id = 1 or not user_id is not null or not ts >= 11840 or not ts <= 11841) and (not user_id = 1 or not user_id is not null or not ts >= 11842 or not ts <= 11843) and (not user_id = 1 or not user_id is not null or not ts >= 11844 or not ts <= 11845) and (not user_id = 1 or not user_id is not null or not ts >= 11846 or not ts <= 11847) and (not user_id = 1 or not user_id is not null or not ts >= 11848 or not ts <= 11849) and (not user_id = 1 or not user_id is not null or not ts >= 11850 or not ts <= 11851) and (not user_id = 1 or not user_id is not null or not ts >= 11852 or not ts <= 11853) and (not user_id = 1 or not user_id is not null or not ts >= 11854 or not ts <= 11855) and (not user_id = 1 or not user_id is not null or not ts >= 11856 or not ts <= 11857) and (not user_id = 1 or not user_id is not null or not ts >= 11858 or not ts <= 11859) and (not user_id = 1 or not user_id is not null or not ts >= 11860 or not ts <= 11861) and (not user_id = 1 or not user_id is not null or not ts >= 11862 or not ts <= 11863) and (not user_id = 1 or not user_id is not null or not ts >= 11864 or not ts <= 11865) and (not user_id = 1 or not user_id is not null or not ts >= 11866 or not ts <= 11867) and (not user_id = 1 or not user_id is not null or not ts >= 11868 or not ts <= 11869) and (not user_id = 1 or not user_id is not null or not ts >= 11870 or not ts <= 11871) and (not user_id = 1 or not user_id is not null or not ts >= 11872 or not ts <= 11873) and (not user_id = 1 or not user_id is not null or not ts >= 11874 or not ts <= 11875) and (not user_id = 1 or not user_id is not null or not ts >= 11876 or not ts <= 11877) and (not user_id = 1 or not user_id is not null or not ts >= 11878 or not ts <= 11879) and (not user_id = 1 or not user_id is not null or not ts >= 11880 or not ts <= 11881) and (not user_id = 1 or not user_id is not null or not ts >= 11882 or not ts <= 11883) and (not user_id = 1 or not user_id is not null or not ts >= 11884 or not ts <= 11885) and (not user_id = 1 or not user_id is not null or not ts >= 11886 or not ts <= 11887) and (not user_id = 1 or not user_id is not null or not ts >= 11888 or not ts <= 11889) and (not user_id = 1 or not user_id is not null or not ts >= 11890 or not ts <= 11891) and (not user_id = 1 or not user_id is not null or not ts >= 11892 or not ts <= 11893) and (not user_id = 1 or not user_id is not null or not ts >= 11894 or not ts <= 11895) and (not user_id = 1 or not user_id is not null or not ts >= 11896 or not ts <= 11897) and (not user_id = 1 or not user_id is not null or not ts >= 11898 or not ts <= 11899) and (not user_id = 1 or not user_id is not null or not ts >= 11900 or not ts <= 11901) and (not user_id = 1 or not user_id is not null or not ts >= 11902 or not ts <= 11903) and (not user_id = 1 or not user_id is not null or not ts >= 11904 or not ts <= 11905) and (not user_id = 1 or not user_id is not null or not ts >= 11906 or not ts <= 11907) and (not user_id = 1 or not user_id is not null or not ts >= 11908 or not ts <= 11909) and (not user_id = 1 or not user_id is not null or not ts >= 11910 or not ts <= 11911) and (not user_id = 1 or not user_id is not null or not ts >= 11912 or not ts <= 11913) and (not user_id = 1 or not user_id is not null or not ts >= 11914 or not ts <= 11915) and (not user_id = 1 or not user_id is not null or not ts >= 11916 or not ts <= 11917) and (not user_id = 1 or not user_id is not null or not ts >= 11918 or not ts <= 11919) and (not user_id = 1 or not user_id is not null or not ts >= 11920 or not ts <= 11921) and (not user_id = 1 or not user_id is not null or not ts >= 11922 or not ts <= 11923) and (not user_id = 1 or not user_id is not null or not ts >= 11924 or not ts <= 11925) and (not user_id = 1 or not user_id is not null or not ts >= 11926 or not ts <= 11927) and (not user_id = 1 or not user_id is not null or not ts >= 11928 or not ts <= 11929) and (not user_id = 1 or not user_id is not null or not ts >= 11930 or not ts <= 11931) and (not user_id = 1 or not user_id is not null or not ts >= 11932 or not ts <= 11933) and (not user_id = 1 or not user_id is not null or not ts >= 11934 or not ts <= 11935) and (not user_id = 1 or not user_id is not null or not ts >= 11936 or not ts <= 11937) and (not user_id = 1 or not user_id is not null or not ts >= 11938 or not ts <= 11939) and (not user_id = 1 or not user_id is not null or not ts >= 11940 or not ts <= 11941) and (not user_id = 1 or not user_id is not null or not ts >= 11942 or not ts <= 11943) and (not user_id = 1 or not user_id is not null or not ts >= 11944 or not ts <= 11945) and (not user_id = 1 or not user_id is not null or not ts >= 11946 or not ts <= 11947) and (not user_id = 1 or not user_id is not null or not ts >= 11948 or not ts <= 11949) and (not user_id = 1 or not user_id is not null or not ts >= 11950 or not ts <= 11951) and (not user_id = 1 or not user_id is not null or not ts >= 11952 or not ts <= 11953) and (not user_id = 1 or not user_id is not null or not ts >= 11954 or not ts <= 11955) and (not user_id = 1 or not user_id is not null or not ts >= 11956 or not ts <= 11957) and (not user_id = 1 or not user_id is not null or not ts >= 11958 or not ts <= 11959) and (not user_id = 1 or not user_id is not null or not ts >= 11960 or not ts <= 11961) and (not user_id = 1 or not user_id is not null or not ts >= 11962 or not ts <= 11963) and (not user_id = 1 or not user_id is not null or not ts >= 11964 or not ts <= 11965) and (not user_id = 1 or not user_id is not null or not ts >= 11966 or not ts <= 11967) and (not user_id = 1 or not user_id is not null or not ts >= 11968 or not ts <= 11969) and (not user_id = 1 or not user_id is not null or not ts >= 11970 or not ts <= 11971) and (not user_id = 1 or not user_id is not null or not ts >= 11972 or not ts <= 11973) and (not user_id = 1 or not user_id is not null or not ts >= 11974 or not ts <= 11975) and (not user_id = 1 or not user_id is not null or not ts >= 11976 or not ts <= 11977) and (not user_id = 1 or not user_id is not null or not ts >= 11978 or not ts <= 11979) and (not user_id = 1 or not user_id is not null or not ts >= 11980 or not ts <= 11981) and (not user_id = 1 or not user_id is not null or not ts >= 11982 or not ts <= 11983) and (not user_id = 1 or not user_id is not null or not ts >= 11984 or not ts <= 11985) and (not user_id = 1 or not user_id is not null or not ts >= 11986 or not ts <= 11987) and (not user_id = 1 or not user_id is not null or not ts >= 11988 or not ts <= 11989) and (not user_id = 1 or not user_id is not null or not ts >= 11990 or not ts <= 11991) and (not user_id = 1 or not user_id is not null or not ts >= 11992 or not ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit :__upper_limit", + "ResultColumns": 1, + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json index 871c30c2ea6..a3370a74f5d 100644 --- a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json +++ b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json @@ -38,7 +38,19 @@ { "comment": "Flush statement with flush options", "query": "flush no_write_to_binlog hosts, logs", - "plan": "VT03007: keyspace not specified" + "plan": { + "QueryType": "FLUSH", + "Original": "flush no_write_to_binlog hosts, logs", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "Query": "flush local hosts, logs" + } + } }, { "comment": "Flush statement with routing rules", diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json new file mode 100644 index 00000000000..c9d9097e9a4 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json @@ -0,0 +1,1393 @@ +[ + { + "comment": "Insertion in a table with cross-shard foreign keys disallowed", + "query": "insert into tbl3 (col3, coly) values (1, 3)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Insertion in a table with shard-scoped foreign keys is allowed", + "query": "insert into tbl2 (col2, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into tbl2 (col2, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into tbl2(col2, coly) values (:_col2_0, 3)", + "TableName": "tbl2", + "VindexValues": { + "hash_vin": "INT64(1)" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "Insertion in a table with shard-scoped multiple column foreign key is allowed", + "query": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into multicol_tbl2(cola, colb, colc) values (:_cola_0, :_colb_0, :_colc_0)", + "TableName": "multicol_tbl2", + "VindexValues": { + "multicolIdx": "INT64(1), INT64(2), INT64(3)" + } + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with cross-shard foreign keys disallowed", + "query": "delete from tbl1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with not all column shard-scoped foreign keys - disallowed", + "query": "delete from tbl7", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with shard-scoped multiple column foreign key with cascade", + "query": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", + "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", + "Table": "multicol_tbl1", + "Values": [ + "INT64(1)", + "INT64(2)", + "INT64(3)" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1, + 2, + 3, + 4 + ], + "Query": "delete from multicol_tbl2 where (colb, cola, x, colc, y) in ::fkc_vals", + "Table": "multicol_tbl2" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "INT64(1)", + "INT64(2)", + "INT64(3)" + ], + "Vindex": "multicolIdx" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1", + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with cascade", + "query": "delete from tbl5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", + "Query": "select col5, t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl5", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with SET NULL", + "query": "delete from tbl8 where col8 = 1", + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: hash_vin" + }, + { + "comment": "Delete in a table with unsharded foreign key with SET NULL", + "query": "delete from u_tbl9 where col9 = 5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from u_tbl9 where col9 = 5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where col9 = 5 for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl9 where col9 = 5", + "Table": "u_tbl9" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in unsharded table with restrict", + "query": "update u_tbl5 set col5 = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl5 set col5 = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl5 set col5 = 'foo' where id = 1", + "Table": "u_tbl5" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl5" + ] + } + }, + { + "comment": "update in unsharded table with cascade", + "query": "update u_tbl2 set col2 = 'bar' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col2 = 'bar' where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in (('bar'))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col2 = 'bar' where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in unsharded table with cascade - on non-referenced column", + "query": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "Table": "u_tbl2" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2" + ] + } + }, + { + "comment": "Update in a table with cross-shard foreign keys disallowed", + "query": "update tbl1 set t1col1 = 'foo' where col1 = 1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with cross-shard foreign keys, column not in update expression - allowed", + "query": "update tbl1 set not_ref_col = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl1 set not_ref_col = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl1 set not_ref_col = 'foo' where id = 1", + "Table": "tbl1" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1" + ] + } + }, + { + "comment": "Update in a table with column modified not shard-scoped foreign key whereas other column referencing same table is - disallowed", + "query": "update tbl7 set t7col7 = 'foo', t7col72 = 42", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade", + "query": "update tbl5 set t5col5 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl5 set t5col5 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select t5col5 from tbl5 where 1 != 1", + "Query": "select t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (t4col4) not in (('foo'))", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl5 set t5col5 = 'foo'", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Insertion in a table with 2 foreign keys constraint with same table on different columns - both are not shard scoped - disallowed", + "query": "insert into tbl6 (col6, t6col6) values (100, 'foo')", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update a table with parent and child foreign keys - shard scoped", + "query": "update tbl2 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl2 set col = 'foo'", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl2 set col = 'foo'", + "Table": "tbl2" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "update table with column's parent foreign key cross shard", + "query": "update tbl10 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl10 set col = 'foo'", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl3.col is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,R:0", + "TableName": "tbl10_tbl3", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl10 where 1 != 1", + "Query": "select 1 from tbl10 lock in share mode", + "Table": "tbl10" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' lock in share mode", + "Table": "tbl3" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl10 set col = 'foo'", + "Table": "tbl10" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl10", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "delete table with shard scoped foreign key set default - disallowed", + "query": "delete from tbl20 where col = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "Delete table with cross-shard foreign key with set null - should be eventually allowed", + "query": "delete from tbl9 where col9 = 34", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl9 where col9 = 34", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select col9 from tbl9 where 1 != 1", + "Query": "select col9 from tbl9 where col9 = 34 for update", + "Table": "tbl9", + "Values": [ + "INT64(34)" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update tbl4 set col_ref = null where (col_ref) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl9 where col9 = 34", + "Table": "tbl9", + "Values": [ + "INT64(34)" + ], + "Vindex": "hash_vin" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl9" + ] + } + }, + { + "comment": "update table with same column having reference to different tables, one with on update cascade other with on update set null - child table have further reference", + "query": "update u_tbl1 set col1 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set col1 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", + "Query": "select col1, col1 from u_tbl1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in (('foo'))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 'foo' where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in (('foo')) for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in (('foo'))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set col1 = 'foo'", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with limit - disallowed", + "query": "update u_tbl2 set col2 = 'bar' limit 2", + "plan": "VT12001: unsupported: update with limit with foreign key constraints" + }, + { + "comment": "update in a table with non-literal value - set null fail due to child update where condition", + "query": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update in a table with non-literal value - with cascade fail as the cascade value is not known", + "query": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update in a table with set null, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((2))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in a table with cascade, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", + "Query": "select col1, col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col2 from u_tbl2 where 1 != 1", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((2))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col9 from u_tbl9 where 1 != 1", + "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((2)) for update", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((2))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with a child table having SET DEFAULT constraint - disallowed", + "query": "update tbl20 set col2 = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "delete in a table with limit - disallowed", + "query": "delete from u_tbl2 limit 2", + "plan": "VT12001: unsupported: foreign keys management at vitess with limit" + }, + { + "comment": "update with fk on cross-shard with a where condition on non-literal value - disallowed", + "query": "update tbl3 set coly = colx + 10 where coly = 10", + "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" + }, + { + "comment": "update with fk on cross-shard with a where condition", + "query": "update tbl3 set coly = 20 where coly = 10", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = 20 where coly = 10", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0,R:0", + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl3 where 1 != 1", + "Query": "select 1 from tbl3 where tbl3.coly = 10 lock in share mode", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 lock in share mode", + "Table": "tbl1" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl3 set coly = 20 where tbl3.coly = 10", + "Table": "tbl3" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade that requires a validation of a different parent foreign key", + "query": "update u_tbl6 set col6 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl6 set col6 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col6 from u_tbl6 where 1 != 1", + "Query": "select col6 from u_tbl6 for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where 1 != 1", + "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where (u_tbl8.col8) in ::fkc_vals and u_tbl9.col9 is null limit 1 lock in share mode", + "Table": "u_tbl8, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (u_tbl8.col8) in ::fkc_vals", + "Table": "u_tbl8" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl6 set col6 = 'foo'", + "Table": "u_tbl6" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification", + "query": "update u_tbl7 set col7 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col7 from u_tbl7 where 1 != 1", + "Query": "select col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 lock in share mode", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl4.col4) not in (('foo')) and u_tbl4.col4 = u_tbl9.col9 limit 1 lock in share mode", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = 'foo' where (u_tbl4.col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl7 set col7 = 'foo'", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign keys disallowed", + "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "plan": "VT12001: unsupported: ON DUPLICATE KEY UPDATE with foreign keys" + }, + { + "comment": "Insert with on duplicate key update - foreign keys not on update column - allowed", + "query": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into u_tbl1(id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "TableName": "u_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1" + ] + } + }, + { + "comment": "Insert with unsharded table having fk reference in sharded table", + "query": "insert into u_tbl (id, col) values (1, 2)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "replace with fk reference unsupported", + "query": "replace into u_tbl1 (id, col1) values (1, 2)", + "plan": "VT12001: unsupported: REPLACE INTO with foreign keys" + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades", + "query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + } +] diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json index 340d98cf3b4..805ce85dad9 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json @@ -2,22 +2,7 @@ { "comment": "Single table sharded scatter", "query": "select col from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "Single table unsharded", "query": "select col from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from unsharded", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "Select from sequence", "query": "select next 2 values from seq", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select next 2 values from seq", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 2 values from seq where 1 != 1", - "Query": "select next 2 values from seq", - "Table": "seq" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select next 2 values from seq", "Instructions": { @@ -113,154 +68,32 @@ { "comment": "select next from non-sequence table", "query": "select next value from user", - "v3-plan": "VT03018: NEXT used on a non-sequence table", - "gen4-plan": "NEXT used on a non-sequence table" + "plan": "NEXT used on a non-sequence table `user`" }, { "comment": "select next in derived table", "query": "select 1 from (select next value from seq) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from (select next value from seq) t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (select next 1 values from seq where 1 != 1) as t where 1 != 1", - "Query": "select 1 from (select next 1 values from seq) as t", - "Table": "seq" - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in derived table", "query": "select * from (select next value from seq) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select next value from seq) t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select next 1 values from seq where 1 != 1) as t where 1 != 1", - "Query": "select * from (select next 1 values from seq) as t", - "Table": "seq" - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in subquery", "query": "select 1 from user where id in (select next value from seq)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user where id in (select next value from seq)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 1 values from seq where 1 != 1", - "Query": "select next 1 values from seq", - "Table": "seq" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "select next in projection", "query": "select (select next value from seq) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select next value from seq) from user", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Next", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select next 1 values from seq where 1 != 1", - "Query": "select next 1 values from seq", - "Table": "seq" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from `user` where 1 != 1", - "Query": "select :__sq1 from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": "Incorrect usage/placement of 'NEXT'" + "plan": "Incorrect usage/placement of 'NEXT'" }, { "comment": "Select from reference", "query": "select * from ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from ref where 1 != 1", - "Query": "select * from ref", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ref", "Instructions": { @@ -282,22 +115,7 @@ { "comment": "Multi-table unsharded", "query": "select m1.col from unsharded as m1 join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1 join unsharded as m2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from unsharded as m1 join unsharded as m2", "Instructions": { @@ -319,41 +137,7 @@ { "comment": "Multi-table, multi-chunk", "query": "select music.col from user join music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select music.col from user join music", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col from music where 1 != 1", - "Query": "select music.col from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select music.col from user join music", "Instructions": { @@ -395,22 +179,7 @@ { "comment": "routing rules where table name matches, and there's no alias.", "query": "select * from second_user.user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from second_user.user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from second_user.user", "Instructions": { @@ -432,22 +201,7 @@ { "comment": "routing rules where table name matches, and there's an alias.", "query": "select * from second_user.user as a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from second_user.user as a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as a where 1 != 1", - "Query": "select * from `user` as a", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from second_user.user as a", "Instructions": { @@ -469,22 +223,7 @@ { "comment": "routing rules where table name does not match, and there's no alias.", "query": "select * from route1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from route1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as route1 where 1 != 1", - "Query": "select * from `user` as route1", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from route1", "Instructions": { @@ -506,22 +245,7 @@ { "comment": "routing rules where table name does not match, and there's an alias.", "query": "select * from route1 as a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from route1 as a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as a where 1 != 1", - "Query": "select * from `user` as a", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from route1 as a", "Instructions": { @@ -543,22 +267,7 @@ { "comment": "routing rules with primary targeting", "query": "select * from primary_redirect", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from primary_redirect", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as primary_redirect where 1 != 1", - "Query": "select * from `user` as primary_redirect", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from primary_redirect", "Instructions": { @@ -590,22 +299,7 @@ { "comment": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", "query": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where 1 != 1", - "Query": "select foo.col from `user` as foo join `user` on foo.id = `user`.id where foo.col = 42", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select second_user.foo.col from second_user.foo join user on second_user.foo.id = user.id where second_user.foo.col = 42", "Instructions": { @@ -627,7 +321,7 @@ { "comment": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", "query": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", "Instructions": { @@ -666,47 +360,6 @@ "Vindex": "user_index" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.music.foo from user.music join user on user.music.id = user.id where user.music.col = 42", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1", - "JoinVars": { - "music_id": 0 - }, - "TableName": "music_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id, music.foo from music where 1 != 1", - "Query": "select music.id, music.foo from music where music.col = 42", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :music_id", - "Table": "`user`", - "Values": [ - ":music_id" - ], - "Vindex": "user_index" - } - ] }, "TablesUsed": [ "user.music", @@ -717,41 +370,7 @@ { "comment": "',' join", "query": "select music.col from user, music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select music.col from user, music", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col from music where 1 != 1", - "Query": "select music.col from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select music.col from user, music", "Instructions": { @@ -793,22 +412,7 @@ { "comment": "',' join unsharded", "query": "select u1.a, u2.a from unsharded u1, unsharded u2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a, u2.a from unsharded u1, unsharded u2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2 where 1 != 1", - "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a, u2.a from unsharded u1, unsharded u2", "Instructions": { @@ -830,22 +434,7 @@ { "comment": "',' 3-way join unsharded", "query": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3 where 1 != 1", - "Query": "select u1.a, u2.a from unsharded as u1, unsharded as u2, unsharded as u3", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a, u2.a from unsharded u1, unsharded u2, unsharded u3", "Instructions": { @@ -895,9 +484,9 @@ "Instructions": { "OperatorType": "Join", "Variant": "LeftJoin", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "u_a": 0 + "u_a": 1 }, "TableName": "`user`_unsharded", "Inputs": [ @@ -908,8 +497,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, u.col from `user` as u where 1 != 1", - "Query": "select u.a, u.col from `user` as u", + "FieldQuery": "select u.col, u.a from `user` as u where 1 != 1", + "Query": "select u.col, u.a from `user` as u", "Table": "`user`" }, { @@ -940,16 +529,16 @@ "Instructions": { "OperatorType": "Join", "Variant": "LeftJoin", - "JoinColumnIndexes": "L:1,R:0", + "JoinColumnIndexes": "L:0,R:0", "JoinVars": { - "m1_col": 0 + "m1_col": 1 }, "TableName": "`user`_unsharded_unsharded", "Inputs": [ { "OperatorType": "Join", "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,L:0", + "JoinColumnIndexes": "L:0,R:0", "JoinVars": { "user_col": 0 }, @@ -1110,24 +699,9 @@ } }, { - "comment": "Straight-join (Gen4 ignores the straight_join hint)", + "comment": "Straight-join (ignores the straight_join hint)", "query": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from unsharded as m1 straight_join unsharded as m2", "Instructions": { @@ -1149,60 +723,7 @@ { "comment": "Three-way join", "query": "select user.col from user join unsharded as m1 join unsharded as m2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join unsharded as m1 join unsharded as m2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded_unsharded", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m1 where 1 != 1", - "Query": "select 1 from unsharded as m1", - "Table": "unsharded" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded as m2 where 1 != 1", - "Query": "select 1 from unsharded as m2", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join unsharded as m1 join unsharded as m2", "Instructions": { @@ -1244,41 +765,7 @@ { "comment": "Parenthesized, single chunk", "query": "select user.col from user join (unsharded as m1 join unsharded as m2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (unsharded as m1 join unsharded as m2) where 1 != 1", - "Query": "select 1 from (unsharded as m1 join unsharded as m2)", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join (unsharded as m1 join unsharded as m2)", "Instructions": { @@ -1320,59 +807,7 @@ { "comment": "Parenthesized, multi-chunk", "query": "select user.col from user join (user as u1 join unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join (user as u1 join unsharded)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u1 where 1 != 1", - "Query": "select 1 from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join (user as u1 join unsharded)", "Instructions": { @@ -1433,22 +868,7 @@ { "comment": "index hints, make sure they are not stripped.", "query": "select user.col from user use index(a)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user use index(a)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` use index (a) where 1 != 1", - "Query": "select `user`.col from `user` use index (a)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user use index(a)", "Instructions": { @@ -1470,22 +890,7 @@ { "comment": "multiple index hints, make sure they are not stripped.", "query": "select user.col from user use index(a) use index for group by (b)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user use index(a) use index for group by (b)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` use index (a) use index for group by (b) where 1 != 1", - "Query": "select `user`.col from `user` use index (a) use index for group by (b)", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user use index(a) use index for group by (b)", "Instructions": { @@ -1507,7 +912,7 @@ { "comment": "mergeable sharded join on unique vindex", "query": "select user.col from user join user_extra on user.id = user_extra.user_id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = user_extra.user_id", "Instructions": { @@ -1517,23 +922,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", - "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id", + "FieldQuery": "select `user`.col from `user`, user_extra where 1 != 1", + "Query": "select `user`.col from `user`, user_extra where `user`.id = user_extra.user_id", "Table": "`user`, user_extra" }, "TablesUsed": [ @@ -1545,22 +935,7 @@ { "comment": "mergeable sharded join on unique vindex (parenthesized ON clause)", "query": "select user.col from user join user_extra on (user.id = user_extra.user_id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on (user.id = user_extra.user_id)", "Instructions": { @@ -1583,22 +958,7 @@ { "comment": "mergeable sharded join on unique vindex, with a stray condition", "query": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.col between 1 and 2 and `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.col between 1 and 2 and user.id = user_extra.user_id", "Instructions": { @@ -1621,22 +981,7 @@ { "comment": "mergeable sharded join on unique vindex, swapped operands", "query": "select user.col from user join user_extra on user_extra.user_id = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user_extra.user_id = user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on user_extra.user_id = `user`.id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user_extra.user_id = user.id", "Instructions": { @@ -1659,26 +1004,7 @@ { "comment": "mergeable sharded join on unique vindex, and condition", "query": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.id = 5 and `user`.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = 5 and user.id = user_extra.user_id", "Instructions": { @@ -1705,7 +1031,7 @@ { "comment": "sharded join on unique vindex, inequality", "query": "select user.col from user join user_extra on user.id < user_extra.user_id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id < user_extra.user_id", "Instructions": { @@ -1740,43 +1066,6 @@ "Table": "user_extra" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id < user_extra.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where :user_id < user_extra.user_id", - "Table": "user_extra" - } - ] }, "TablesUsed": [ "user.user", @@ -1787,45 +1076,7 @@ { "comment": "sharded join, non-col reference RHS", "query": "select user.col from user join user_extra on user.id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = 5", "Instructions": { @@ -1871,45 +1122,7 @@ { "comment": "sharded join, non-col reference LHS", "query": "select user.col from user join user_extra on 5 = user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on 5 = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on 5 = user.id", "Instructions": { @@ -1955,44 +1168,7 @@ { "comment": "sharded join, non-vindex col", "query": "select user.col from user join user_extra on user.id = user_extra.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.id = user_extra.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.id = user_extra.col", "Instructions": { @@ -2041,56 +1217,15 @@ { "comment": "sharded join, non-unique vindex", "query": "select user.col from user_extra join user on user_extra.user_id = user.name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user_extra join user on user_extra.user_id = user.name", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_user_id": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.`name` = :user_extra_user_id", - "Table": "`user`", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "name_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user_extra join user on user_extra.user_id = user.name", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "user_name": 0 + "user_name": 1 }, "TableName": "`user`_user_extra", "Inputs": [ @@ -2101,8 +1236,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.`name`, `user`.col from `user` where 1 != 1", - "Query": "select `user`.`name`, `user`.col from `user`", + "FieldQuery": "select `user`.col, `user`.`name` from `user` where 1 != 1", + "Query": "select `user`.col, `user`.`name` from `user`", "Table": "`user`" }, { @@ -2131,22 +1266,7 @@ { "comment": "join with reference table", "query": "select user.col from user join ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join ref where 1 != 1", - "Query": "select `user`.col from `user` join ref", - "Table": "`user`, ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ref", "Instructions": { @@ -2169,22 +1289,7 @@ { "comment": "reference table self-join", "query": "select r1.col from ref r1 join ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select r1.col from ref r1 join ref", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select r1.col from ref as r1 join ref where 1 != 1", - "Query": "select r1.col from ref as r1 join ref", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select r1.col from ref r1 join ref", "Instructions": { @@ -2206,7 +1311,7 @@ { "comment": "reference table can merge with other opcodes left to right.", "query": "select ref.col from ref join user", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref join user", "Instructions": { @@ -2216,23 +1321,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ref.col from ref join `user` where 1 != 1", - "Query": "select ref.col from ref join `user`", - "Table": "`user`, ref" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref join user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref, `user` where 1 != 1", - "Query": "select ref.col from ref, `user`", + "FieldQuery": "select ref.col from ref, `user` where 1 != 1", + "Query": "select ref.col from ref, `user`", "Table": "`user`, ref" }, "TablesUsed": [ @@ -2244,26 +1334,7 @@ { "comment": "reference table can merge with other opcodes left to right and vindex value is in the plan.\n# This tests that route.Merge also copies the condition to the LHS.", "query": "select ref.col from ref join (select aa from user where user.id=1) user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ref.col from ref join (select aa from user where user.id=1) user", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ref.col from ref join (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ref.col from ref join (select aa from `user` where `user`.id = 1) as `user`", - "Table": "`user`, ref", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ref.col from ref join (select aa from user where user.id=1) user", "Instructions": { @@ -2273,8 +1344,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ref.col from ref, (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ref.col from ref, (select aa from `user` where `user`.id = 1) as `user`", + "FieldQuery": "select ref.col from (select aa from `user` where 1 != 1) as `user`, ref where 1 != 1", + "Query": "select ref.col from (select aa from `user` where `user`.id = 1) as `user`, ref", "Table": "`user`, ref", "Values": [ "INT64(1)" @@ -2290,41 +1361,7 @@ { "comment": "routing rules for join, unsharded route wins if we can't find a merged route", "query": "select route2.col from route2 join user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select route2.col from route2 join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select route2.col from unsharded as route2 where 1 != 1", - "Query": "select route2.col from unsharded as route2", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select route2.col from route2 join user_extra", "Instructions": { @@ -2366,26 +1403,7 @@ { "comment": "derived table", "query": "select id from (select id, col from user where id = 5) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from user where id = 5) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from user where id = 5) as t", "Instructions": { @@ -2411,26 +1429,7 @@ { "comment": "derived table with join", "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select id from `user` where id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.user_id", "Instructions": { @@ -2457,26 +1456,7 @@ { "comment": "derived table with join, and aliased references", "query": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select `user`.id from `user` where `user`.id = 5) as t join user_extra on t.id = user_extra.user_id", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select user.id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", "Instructions": { @@ -2503,28 +1483,12 @@ { "comment": "derived table with join, duplicate columns", "query": "select t.id from (select user.id, id from user where user.id = 5) as t join user_extra on t.id = user_extra.user_id", - "v3-plan": "VT12001: unsupported: duplicate column aliases: id", - "gen4-plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'" }, { "comment": "derived table in RHS of join", "query": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from user_extra join (select id from `user` where 1 != 1) as t on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from user_extra join (select id from `user` where id = 5) as t on t.id = user_extra.user_id", - "Table": "user_extra, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from user_extra join (select id from user where id = 5) as t on t.id = user_extra.user_id", "Instructions": { @@ -2534,8 +1498,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id from user_extra, (select id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id from user_extra, (select id from `user` where id = 5) as t where t.id = user_extra.user_id", + "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t, user_extra where 1 != 1", + "Query": "select t.id from (select id from `user` where id = 5) as t, user_extra where t.id = user_extra.user_id", "Table": "`user`, user_extra", "Values": [ "INT64(5)" @@ -2551,48 +1515,7 @@ { "comment": "derived table in FROM with cross-shard join", "query": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "t_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id from (select id from `user` where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :t_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id from user where id = 5) as t join user_extra on t.id = user_extra.col", "Instructions": { @@ -2641,26 +1564,7 @@ { "comment": "routing rules for derived table", "query": "select id from (select id, col from route1 where id = 5) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from route1 where id = 5) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` as route1 where id = 5) as t", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from route1 where id = 5) as t", "Instructions": { @@ -2686,46 +1590,12 @@ { "comment": "derived table missing columns", "query": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id from user) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id from `user` where 1 != 1) as t join user_extra on t.id = user_extra.user_id where 1 != 1", - "Query": "select t.id from (select id from `user`) as t join user_extra on t.id = user_extra.user_id where t.col = 42", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": "symbol t.col not found" + "plan": "column 't.col' not found" }, { "comment": "routing rules for derived table where the constraint is in the outer query", "query": "select id from (select id, col from route1) as t where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id, col from route1) as t where id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id, col from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id, col from `user` as route1) as t where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id, col from route1) as t where id = 5", "Instructions": { @@ -2751,42 +1621,12 @@ { "comment": "routing rules for derived table where the constraint is in the outer query", "query": "select id from (select id+col as foo from route1) as t where foo = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id+col as foo from route1) as t where foo = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id + col as foo from `user` as route1 where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id + col as foo from `user` as route1) as t where foo = 5", - "Table": "`user`" - } - }, - "gen4-plan": "symbol id not found" + "plan": "column 'id' not found in table 't'" }, { "comment": "push predicate on joined derived tables", "query": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id from (select id, textcol1 as baz from `user` as route1 where 1 != 1) as t join (select id, textcol1 + textcol1 as baz from `user` where 1 != 1) as s on t.id = s.id where 1 != 1", - "Query": "select t.id from (select id, textcol1 as baz from `user` as route1) as t join (select id, textcol1 + textcol1 as baz from `user`) as s on t.id = s.id where t.baz = '3' and s.baz = '3'", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id from (select id, textcol1 as baz from route1) as t join (select id, textcol1+textcol1 as baz from user) as s ON t.id = s.id WHERE t.baz = '3' AND s.baz = '3'", "Instructions": { @@ -2808,22 +1648,7 @@ { "comment": "recursive derived table predicate push down", "query": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1", - "Query": "select bar from (select foo + 4 as bar from (select colA + colB as foo from `user`) as u) as t where bar = 5", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select bar from (select foo+4 as bar from (select colA+colB as foo from user) as u) as t where bar = 5", "Instructions": { @@ -2845,26 +1670,7 @@ { "comment": "recursive derived table lookups", "query": "select id from (select id from (select id from user) as u) as t where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from (select id from (select id from user) as u) as t where id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select id from (select id from `user` where 1 != 1) as u where 1 != 1) as t where 1 != 1", - "Query": "select id from (select id from (select id from `user`) as u) as t where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select id from (select id from user) as u) as t where id = 5", "Instructions": { @@ -2890,7 +1696,7 @@ { "comment": "merge derived tables with single-shard routes", "query": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", "Instructions": { @@ -2900,27 +1706,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u join (select col from user_extra where 1 != 1) as e where 1 != 1", - "Query": "select u.col, e.col from (select col from `user` where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.col, e.col from (select col from user where id = 5) as u join (select col from user_extra where user_id = 5) as e", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1", - "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e", + "FieldQuery": "select u.col, e.col from (select col from `user` where 1 != 1) as u, (select col from user_extra where 1 != 1) as e where 1 != 1", + "Query": "select u.col, e.col from (select col from `user` where id = 5) as u, (select col from user_extra where user_id = 5) as e", "Table": "`user`, user_extra", "Values": [ "INT64(5)" @@ -2936,41 +1723,7 @@ { "comment": "join of information_schema with normal table", "query": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "information_schema.CHARACTER_SETS_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select 1 from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.foo from unsharded where 1 != 1", - "Query": "select unsharded.foo from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.foo from information_schema.CHARACTER_SETS join unsharded", "Instructions": { @@ -3011,41 +1764,7 @@ { "comment": "join of normal table with information_schema", "query": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_information_schema.CHARACTER_SETS", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.foo from unsharded where 1 != 1", - "Query": "select unsharded.foo from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select 1 from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.foo from unsharded join information_schema.CHARACTER_SETS", "Instructions": { @@ -3086,7 +1805,7 @@ { "comment": "wire-up on join with cross-shard derived table", "query": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", "Instructions": { @@ -3100,41 +1819,32 @@ "TableName": "`user`_user_extra_unsharded", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user`) as t", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -3146,148 +1856,90 @@ "Sharded": false }, "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id", + "Query": "select 1 from unsharded where unsharded.id = :t_id and unsharded.col1 = :t_col1", "Table": "unsharded" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "wire-up on within cross-shard derived table", + "query": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", + "plan": { "QueryType": "SELECT", - "Original": "select t.col1 from (select user.id, user.col1 from user join user_extra) as t join unsharded on unsharded.col1 = t.col1 and unsharded.id = t.id", + "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", "Instructions": { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:0", "JoinVars": { - "t_col1": 0, - "t_id": 1 + "user_col": 1 }, - "TableName": "`user`_user_extra_unsharded", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user`) as t", + "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Unsharded", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.id = :t_id", - "Table": "unsharded" + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] }, "TablesUsed": [ - "main.unsharded", "user.user", "user.user_extra" ] } }, { - "comment": "wire-up on within cross-shard derived table", - "query": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "v3-plan": { + "comment": "Join with cross-shard derived table on rhs", + "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", + "plan": { "QueryType": "SELECT", - "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", + "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "TableName": "unsharded_a_`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "user_col": 2 + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ + "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1", + "Query": "select 1 from unsharded_a as ua", + "Table": "unsharded_a" + }, { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2", - "JoinVars": { - "user_col": 0 - }, + "JoinColumnIndexes": "L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -3297,8 +1949,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id, `user`.col1 from `user`", + "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user`) as t", "Table": "`user`" }, { @@ -3309,7 +1961,7 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.col = :user_col", + "Query": "select 1 from user_extra", "Table": "user_extra" } ] @@ -3317,147 +1969,16 @@ ] }, "TablesUsed": [ + "main.unsharded_a", "user.user", "user.user_extra" ] } }, { - "comment": "Join with cross-shard derived table on rhs", - "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "unsharded_a_`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1", - "Query": "select 1 from unsharded_a as ua", - "Table": "unsharded_a" - }, - { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "TableName": "unsharded_a_`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a as ua where 1 != 1", - "Query": "select 1 from unsharded_a as ua", - "Table": "unsharded_a" - }, - { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - }, - "TablesUsed": [ - "main.unsharded_a", - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "Join with cross-shard derived table on rhs - push down join predicate to derived table", - "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "comment": "Join with cross-shard derived table on rhs - push down join predicate to derived table", + "query": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id", + "plan": { "QueryType": "SELECT", "Original": "select t.col1 from unsharded_a ua join (select user.id, user.col1 from user join user_extra) as t on t.id = ua.id", "Instructions": { @@ -3481,44 +2002,36 @@ "Table": "unsharded_a" }, { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id", - "Table": "`user`", - "Values": [ - ":ua_id" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id) as t", + "Table": "`user`", + "Values": [ + ":ua_id" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] } @@ -3534,43 +2047,7 @@ { "comment": "subquery in ON clause, single route", "query": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded_a.col from unsharded_a join unsharded_b on (select col from user)", "Instructions": { @@ -3581,6 +2058,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3592,6 +2070,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -3614,98 +2093,18 @@ { "comment": "subquery in ON clause as sub-expression", "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col + :__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1", - "Table": "unsharded_a, unsharded_b" - } - ] - }, - "TablesUsed": [ - "main.unsharded_a", - "main.unsharded_b", - "user.user" - ] - } - }, - { - "comment": "IN subquery in ON clause, single route", - "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", - "v3-plan": { + "plan": { "QueryType": "SELECT", - "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", + "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col+(select col from user)", "Instructions": { "OperatorType": "Subquery", - "Variant": "PulloutIn", + "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3717,20 +2116,30 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1 where 1 != 1", - "Query": "select unsharded_a.col from unsharded_a join unsharded_b on :__sq_has_values1 = 1 and unsharded_a.col in ::__sq1", + "FieldQuery": "select unsharded_a.col from unsharded_a, unsharded_b where 1 != 1", + "Query": "select unsharded_a.col from unsharded_a, unsharded_b where unsharded_a.col + :__sq1", "Table": "unsharded_a, unsharded_b" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded_a", + "main.unsharded_b", + "user.user" + ] + } + }, + { + "comment": "IN subquery in ON clause, single route", + "query": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", + "plan": { "QueryType": "SELECT", "Original": "select unsharded_a.col from unsharded_a join unsharded_b on unsharded_a.col in (select col from user)", "Instructions": { @@ -3742,6 +2151,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3753,6 +2163,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -3775,62 +2186,7 @@ { "comment": "subquery in ON clause, with join primitives", "query": "select unsharded.col from unsharded join user on user.col in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.col from unsharded join user on user.col in (select col from user)", "Instructions": { @@ -3842,6 +2198,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3853,6 +2210,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:0", @@ -3905,6 +2263,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -3916,6 +2275,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "LeftJoin", "JoinColumnIndexes": "L:0", @@ -3956,81 +2316,7 @@ { "comment": "subquery in ON clause, with join primitives, and join on top\n# The subquery is not pulled all the way out.", "query": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`_unsharded_a", - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col from unsharded where 1 != 1", - "Query": "select unsharded.col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and `user`.col in ::__sq1", - "Table": "`user`" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a where 1 != 1", - "Query": "select 1 from unsharded_a", - "Table": "unsharded_a" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select unsharded.col from unsharded join user on user.col in (select col from user) join unsharded_a", "Instructions": { @@ -4042,6 +2328,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -4053,6 +2340,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "R:0", @@ -4094,7 +2382,7 @@ { "comment": "keyspace-qualified queries", "query": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", "Instructions": { @@ -4129,43 +2417,6 @@ "Table": "unsharded" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.user.col1, main.unsharded.col1 from user.user join main.unsharded where main.unsharded.col2 = user.user.col2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "user_col2": 0 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col2, `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col2, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.col1 from unsharded where 1 != 1", - "Query": "select unsharded.col1 from unsharded where unsharded.col2 = :user_col2", - "Table": "unsharded" - } - ] }, "TablesUsed": [ "main.unsharded", @@ -4176,22 +2427,7 @@ { "comment": "implicit table reference for unsharded keyspace", "query": "select main.foo.col from main.foo", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select main.foo.col from main.foo", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select foo.col from foo where 1 != 1", - "Query": "select foo.col from foo", - "Table": "foo" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select main.foo.col from main.foo", "Instructions": { @@ -4213,22 +2449,7 @@ { "comment": "col refs should be case-insensitive", "query": "select user.col from user join user_extra on user.ID = user_extra.User_Id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id where 1 != 1", - "Query": "select `user`.col from `user` join user_extra on `user`.ID = user_extra.User_Id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra on user.ID = user_extra.User_Id", "Instructions": { @@ -4251,50 +2472,7 @@ { "comment": "derived table with join primitive (FROM)", "query": "select id, t.id from (select user.id from user join user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, t.id from (select user.id from user join user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, t.id from (select user.id from user join user_extra) as t", "Instructions": { @@ -4317,8 +2495,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", + "FieldQuery": "select id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select id from (select `user`.id from `user`) as t", "Table": "`user`" }, { @@ -4345,22 +2523,7 @@ { "comment": "database call in ON clause.\n# The on clause is weird because the substitution must even for root expressions.", "query": "select u1.a from unsharded u1 join unsharded u2 on database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.a from unsharded u1 join unsharded u2 on database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u1.a from unsharded as u1 join unsharded as u2 on database() where 1 != 1", - "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.a from unsharded u1 join unsharded u2 on database()", "Instructions": { @@ -4374,30 +2537,15 @@ "Query": "select u1.a from unsharded as u1 join unsharded as u2 on database()", "Table": "unsharded" }, - "TablesUsed": [ - "main.unsharded" - ] - } - }, - { - "comment": "last_insert_id for dual", - "query": "select last_insert_id()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id()", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - ":__lastInsertId as last_insert_id()" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "last_insert_id for dual", + "query": "select last_insert_id()", + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id()", "Instructions": { @@ -4419,22 +2567,7 @@ { "comment": "last_insert_id for sharded keyspace", "query": "select last_insert_id() from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__lastInsertId as `last_insert_id()` from `user` where 1 != 1", - "Query": "select :__lastInsertId as `last_insert_id()` from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() from user", "Instructions": { @@ -4456,22 +2589,7 @@ { "comment": "last_insert_id for unsharded route", "query": "select last_insert_id() from main.unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() from main.unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__lastInsertId as `last_insert_id()` from unsharded where 1 != 1", - "Query": "select :__lastInsertId as `last_insert_id()` from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() from main.unsharded", "Instructions": { @@ -4493,48 +2611,7 @@ { "comment": "join with bindvariables", "query": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.assembly_id = :user_id and user_extra.user_id = 2", - "Table": "user_extra", - "Values": [ - "INT64(2)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT `user`.`id` FROM `user` INNER JOIN `user_extra` ON `user`.`id` = `user_extra`.`assembly_id` WHERE `user_extra`.`user_id` = 2", "Instructions": { @@ -4587,8 +2664,24 @@ { "comment": "verify ',' vs JOIN precedence", "query": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a", - "v3-plan": "VT03019: symbol u1.a not found", - "gen4-plan": "symbol u1.a not found" + "plan": { + "QueryType": "SELECT", + "Original": "select u1.a from unsharded u1, unsharded u2 join unsharded u3 on u1.a = u2.a", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a where 1 != 1", + "Query": "select u1.a from unsharded as u1, unsharded as u2 join unsharded as u3 on u1.a = u2.a", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } }, { "comment": "first expression fails for ',' join (code coverage: ensure error is returned)", @@ -4598,8 +2691,24 @@ { "comment": "table names should be case-sensitive", "query": "select unsharded.id from unsharded where Unsharded.val = 1", - "v3-plan": "VT03019: symbol Unsharded.val not found", - "gen4-plan": "symbol Unsharded.val not found" + "plan": { + "QueryType": "SELECT", + "Original": "select unsharded.id from unsharded where Unsharded.val = 1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.id from unsharded where Unsharded.val = 1", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } }, { "comment": "implicit table reference for sharded keyspace", @@ -4634,22 +2743,7 @@ { "comment": "query with parens is planned correctly", "query": "select m1.col from (unsharded as m1, unsharded as m2)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select m1.col from (unsharded as m1, unsharded as m2)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select m1.col from (unsharded as m1, unsharded as m2) where 1 != 1", - "Query": "select m1.col from (unsharded as m1, unsharded as m2)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select m1.col from (unsharded as m1, unsharded as m2)", "Instructions": { @@ -4669,9 +2763,9 @@ } }, { - "comment": "gen4 - optimise plan by merging user_extra and music first, and then querying for user info", + "comment": "optimise plan by merging user_extra and music first, and then querying for user info", "query": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", "Instructions": { @@ -4679,70 +2773,7 @@ "Variant": "Join", "JoinColumnIndexes": "L:0", "JoinVars": { - "ue_user_id": 1 - }, - "TableName": "`user`_user_extra_music", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1, u.id from `user` as u where 1 != 1", - "Query": "select 1, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.user_id from user_extra as ue where 1 != 1", - "Query": "select ue.user_id from user_extra as ue where ue.id = :u_id", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music as m where 1 != 1", - "Query": "select 1 from music as m where m.user_id = :ue_user_id", - "Table": "music", - "Values": [ - ":ue_user_id" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user u join user_extra ue on ue.id = u.id join music m on m.user_id = ue.user_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1", - "JoinVars": { - "ue_id": 0 + "ue_id": 1 }, "TableName": "music, user_extra_`user`", "Inputs": [ @@ -4753,8 +2784,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ue.id, 1 from user_extra as ue, music as m where 1 != 1", - "Query": "select ue.id, 1 from user_extra as ue, music as m where m.user_id = ue.user_id", + "FieldQuery": "select 1, ue.id from user_extra as ue, music as m where 1 != 1", + "Query": "select 1, ue.id from user_extra as ue, music as m where m.user_id = ue.user_id", "Table": "music, user_extra" }, { @@ -4784,50 +2815,13 @@ { "comment": "join column selected as alias", "query": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1", - "Query": "select ue.id as ueid from user_extra as ue where ue.id = :u_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT u.id as uid, ue.id as ueid FROM user u join user_extra ue where u.id = ue.id", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,L:1", + "JoinColumnIndexes": "R:0,L:0", "JoinVars": { "ue_id": 0 }, @@ -4840,8 +2834,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ue.id, ue.id as ueid from user_extra as ue where 1 != 1", - "Query": "select ue.id, ue.id as ueid from user_extra as ue", + "FieldQuery": "select ue.id as ueid from user_extra as ue where 1 != 1", + "Query": "select ue.id as ueid from user_extra as ue", "Table": "user_extra" }, { @@ -4870,62 +2864,24 @@ { "comment": "alias on column from derived table. TODO: to support alias in SimpleProjection engine primitive.", "query": "select a as k from (select count(*) as a from user) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a as k from (select count(*) as a from user) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a as k from (select count(*) as a from user) t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS a", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", + "Table": "`user`" } ] }, @@ -4936,23 +2892,8 @@ }, { "comment": "select star from derived table on expandable and unsharded table", - "query": "select u.* from (select * from unsharded) u", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.* from (select * from unsharded) u", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1", - "Query": "select u.* from (select * from unsharded) as u", - "Table": "unsharded" - } - }, - "gen4-plan": { + "query": "select u.* from (select * from unsharded) u", + "plan": { "QueryType": "SELECT", "Original": "select u.* from (select * from unsharded) u", "Instructions": { @@ -4974,49 +2915,40 @@ { "comment": "filtering on a cross-shard derived table", "query": "select id from (select user.id, user.col from user join user_extra) as t where id=5", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select user.id, user.col from user join user_extra) as t where id=5", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "Query": "select id from (select `user`.id, `user`.col from `user` where `user`.id = 5) as t", + "Table": "`user`", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -5029,45 +2961,36 @@ { "comment": "expression on a cross-shard derived table", "query": "select id+1 from (select user.id, user.col from user join user_extra) as t", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id+1 from (select user.id, user.col from user join user_extra) as t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 2 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col, `user`.id + 1 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col, `user`.id + 1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id + 1 from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "Query": "select id + 1 from (select `user`.id, `user`.col from `user`) as t", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -5080,8 +3003,7 @@ { "comment": "derived table with aliased columns and outer predicate pushed in derived table", "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "Instructions": { @@ -5132,8 +3054,7 @@ { "comment": "derived table with aliased columns predicate in both the outer and inner", "query": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from (select id as b, name from user where b = 1) u(a, n) where u.n = 1", "Instructions": { @@ -5184,8 +3105,7 @@ { "comment": "derived table with aliased columns and a join that requires pushProjection", "query": "select i+1 from (select user.id from user join user_extra) t(i)", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select i+1 from (select user.id from user join user_extra) t(i)", "Instructions": { @@ -5235,74 +3155,7 @@ { "comment": "two subqueries with different Select and OpCode", "query": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values2", - "__sq2" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from user_extra where 1 != 1", - "Query": "select id from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra limit :__upper_limit", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where col = :__sq1 and :__sq_has_values2 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq2" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (select id from user_extra) and col = (select user_id from user_extra limit 1)", "Instructions": { @@ -5314,6 +3167,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -5331,6 +3185,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Subquery", "Variant": "PulloutIn", "PulloutVars": [ @@ -5339,6 +3194,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -5350,6 +3206,7 @@ "Table": "user_extra" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -5377,7 +3234,7 @@ { "comment": "join on int columns", "query": "select u.id from user as u join user as uu on u.intcol = uu.intcol", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol", "Instructions": { @@ -5412,43 +3269,6 @@ "Table": "`user`" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.id from user as u join user as uu on u.intcol = uu.intcol", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1", - "JoinVars": { - "u_intcol": 0 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1", - "Query": "select u.intcol, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as uu where 1 != 1", - "Query": "select 1 from `user` as uu where uu.intcol = :u_intcol", - "Table": "`user`" - } - ] }, "TablesUsed": [ "user.user" @@ -5458,55 +3278,45 @@ { "comment": "Duplicate output column from derived table having a join", "query": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 0 from (select `user`.col1 from `user` join unsharded) as t join unsharded on unsharded.col1 = t.col1 and unsharded.a = t.col1", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "t_col1": 0 + "t_col1": 1 }, "TableName": "`user`_unsharded_unsharded", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "TableName": "`user`_unsharded", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1, 0 from `user` where 1 != 1", - "Query": "select `user`.col1, 0 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 0, t.col1 from (select `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select 0, t.col1 from (select `user`.col1 from `user`) as t", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from unsharded where 1 != 1", + "Query": "select 1 from unsharded", + "Table": "unsharded" } ] }, @@ -5518,7 +3328,7 @@ "Sharded": false }, "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded where unsharded.col1 = :t_col1 and unsharded.a = :t_col1", + "Query": "select 1 from unsharded where unsharded.a = :t_col1 and unsharded.col1 = :t_col1", "Table": "unsharded" } ] @@ -5536,47 +3346,40 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where coalesce(user_extra.col, 4) = 5", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "OperatorType": "Filter", + "Predicate": "coalesce(user_extra.col, 4) = 5", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "coalesce(user_extra.col, 4) = 5", + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,L:1", - "JoinVars": { - "user_col": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } @@ -5591,41 +3394,7 @@ { "comment": "dont merge unsharded tables from different keyspaces", "query": "select 1 from main.unsharded join main_2.unsharded_tab", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from main.unsharded join main_2.unsharded_tab", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "unsharded_unsharded_tab", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main_2", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_tab where 1 != 1", - "Query": "select 1 from unsharded_tab", - "Table": "unsharded_tab" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from main.unsharded join main_2.unsharded_tab", "Instructions": { @@ -5667,22 +3436,7 @@ { "comment": "Unsharded join with using", "query": "select * from unsharded_a join unsharded_b using (propertyId);", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded_a join unsharded_b using (propertyId);", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded_a join unsharded_b using (propertyId) where 1 != 1", - "Query": "select * from unsharded_a join unsharded_b using (propertyId)", - "Table": "unsharded_a, unsharded_b" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from unsharded_a join unsharded_b using (propertyId);", "Instructions": { @@ -5705,8 +3459,7 @@ { "comment": "Column aliases in Derived Table", "query": "select id2 from (select id from user) as x (id2)", - "v3-plan": "VT12001: unsupported: column aliases in derived table", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id2 from (select id from user) as x (id2)", "Instructions": { @@ -5728,22 +3481,7 @@ { "comment": "single unsharded keyspace with derived table", "query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1", - "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1", - "Table": "unsharded, unsharded_b, unsharded_a" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a ua limit 1", "Instructions": { @@ -5767,7 +3505,7 @@ { "comment": "query builder with derived table having join inside it", "query": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", "Instructions": { @@ -5781,107 +3519,32 @@ "TableName": "`user`_user_extra_user_extra", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.col from (select user.col from user join user_extra) as u join user_extra ue limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra_user_extra", - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col from (select `user`.col from `user` where 1 != 1) as u where 1 != 1", + "Query": "select u.col from (select `user`.col from `user`) as u", + "Table": "`user`" + }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -5913,35 +3576,43 @@ "QueryType": "SELECT", "Original": "select user_extra.col+1 from user left join user_extra on user.col = user_extra.col", "Instructions": { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] + INT64(1) as user_extra.col + 1" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_col": 0 }, - "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1", - "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" + } + ] } ] }, @@ -5964,35 +3635,44 @@ "TableName": "`user`_user_extra_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] as id", + "[COLUMN 1] + INT64(1) as user_extra.col + 1" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 }, - "FieldQuery": "select user_extra.col + 1 from user_extra where 1 != 1", - "Query": "select user_extra.col + 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" + } + ] } ] }, @@ -6022,36 +3702,43 @@ "QueryType": "SELECT", "Original": "select user.foo+user_extra.col+1 from user left join user_extra on user.col = user_extra.col", "Instructions": { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0, - "user_foo": 1 - }, - "TableName": "`user`_user_extra", + "OperatorType": "Projection", + "Expressions": [ + "([COLUMN 0] + [COLUMN 1]) + INT64(1) as `user`.foo + user_extra.col + 1" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.foo from `user` where 1 != 1", - "Query": "select `user`.col, `user`.foo from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 }, - "FieldQuery": "select :user_foo + user_extra.col + 1 from user_extra where 1 != 1", - "Query": "select :user_foo + user_extra.col + 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.foo, `user`.col from `user` where 1 != 1", + "Query": "select `user`.foo, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" + } + ] } ] }, @@ -6064,32 +3751,7 @@ { "comment": "Do not rewrite derived expressions when the derived table is merged with the outer", "query": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user` where 1 != 1) as tbl where 1 != 1 group by col1, weight_string(col1)", - "OrderBy": "(0|2) ASC", - "Query": "select col1, count(*), weight_string(col1) from (select colC + colD as col1 from `user`) as tbl group by col1, weight_string(col1) order by col1 asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, count(*) from (select colC+colD as col1 from user) as tbl group by col1", "Instructions": { @@ -6121,14 +3783,13 @@ { "comment": "join with USING construct", "query": "select * from authoritative join unsharded_authoritative using(col1)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative join unsharded_authoritative using(col1)", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:3,R:0", + "JoinColumnIndexes": "L:0,L:1,L:2,R:0", "JoinVars": { "authoritative_col1": 0 }, @@ -6141,8 +3802,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1", - "Query": "select authoritative.col1, authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative", + "FieldQuery": "select authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1", + "Query": "select authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative", "Table": "authoritative" }, { @@ -6167,26 +3828,7 @@ { "comment": "derived table inside derived table with a where clause depending on columns from the derived table", "query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user` where 1 != 1) as t1 where 1 != 1) as t2 where 1 != 1) as t3 where 1 != 1", - "Query": "select * from (select bar as push_it from (select foo as bar from (select id as foo from `user`) as t1) as t2) as t3 where push_it = 12", - "Table": "`user`", - "Values": [ - "INT64(12)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select bar as push_it from (select foo as bar from (select id as foo from user) as t1) as t2) as t3 where push_it = 12", "Instructions": { @@ -6212,22 +3854,7 @@ { "comment": "use a view", "query": "select * from user.user_details_view", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user.user_details_view", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1", - "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user.user_details_view", "Instructions": { @@ -6250,22 +3877,7 @@ { "comment": "use a view without qualifying the keyspace", "query": "select * from user_details_view", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user_details_view", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1) as user_details_view where 1 != 1", - "Query": "select * from (select `user`.id, user_extra.col from `user` join user_extra on `user`.id = user_extra.user_id) as user_details_view", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user_details_view", "Instructions": { @@ -6292,47 +3904,40 @@ "QueryType": "SELECT", "Original": "select user.id from user left join user_extra on user.col = user_extra.col where user_extra.col between 10 and 20", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "OperatorType": "Filter", + "Predicate": "user_extra.col between 10 and 20", + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "user_extra.col between 10 and 20", + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,L:1", - "JoinVars": { - "user_col": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] + "FieldQuery": "select `user`.id, `user`.col from `user` where 1 != 1", + "Query": "select `user`.id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.col from user_extra where user_extra.col = :user_col", + "Table": "user_extra" } ] } @@ -6343,5 +3948,144 @@ "user.user_extra" ] } + }, + { + "comment": "missing and ambiguous column info is OK as long as we can send the query to a single unsharded keyspace", + "query": "select missing_column from unsharded, unsharded_a", + "plan": { + "QueryType": "SELECT", + "Original": "select missing_column from unsharded, unsharded_a", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select missing_column from unsharded, unsharded_a where 1 != 1", + "Query": "select missing_column from unsharded, unsharded_a", + "Table": "unsharded, unsharded_a" + }, + "TablesUsed": [ + "main.unsharded", + "main.unsharded_a" + ] + } + }, + { + "comment": "missing and ambiguous column info is not valid when we have two different unsharded keyspaces in the query", + "query": "select missing_column from unsharded, unsharded_tab", + "plan": "Column 'missing_column' in field list is ambiguous" + }, + { + "comment": "join predicate only depending on the RHS should not turn outer join into inner join", + "query": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2", + "plan": { + "QueryType": "SELECT", + "Original": "select t1.id1, t2.id1 from t1 left join t1 as t2 on t2.id1 = t2.id2", + "Instructions": { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "t1_t1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "zlookup_unique", + "Sharded": true + }, + "FieldQuery": "select t1.id1 from t1 where 1 != 1", + "Query": "select t1.id1 from t1", + "Table": "t1" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "zlookup_unique", + "Sharded": true + }, + "FieldQuery": "select t2.id1 from t1 as t2 where 1 != 1", + "Query": "select t2.id1 from t1 as t2 where t2.id1 = t2.id2", + "Table": "t1" + } + ] + }, + "TablesUsed": [ + "zlookup_unique.t1" + ] + } + }, + { + "comment": "left join with using has to be transformed into inner join with on condition", + "query": "SELECT * FROM unsharded_authoritative as A LEFT JOIN unsharded_authoritative as B USING(col1)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT * FROM unsharded_authoritative as A LEFT JOIN unsharded_authoritative as B USING(col1)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1 where 1 != 1", + "Query": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1", + "Table": "unsharded_authoritative" + }, + "TablesUsed": [ + "main.unsharded_authoritative" + ] + } + }, + { + "comment": "join query using table with muticolumn vindex", + "query": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl m1 join multicol_tbl m2 on m1.cola = m2.cola", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "m1_cola": 1 + }, + "TableName": "multicol_tbl_multicol_tbl", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1, m1.cola from multicol_tbl as m1 where 1 != 1", + "Query": "select 1, m1.cola from multicol_tbl as m1", + "Table": "multicol_tbl" + }, + { + "OperatorType": "Route", + "Variant": "SubShard", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from multicol_tbl as m2 where 1 != 1", + "Query": "select 1 from multicol_tbl as m2 where m2.cola = :m1_cola", + "Table": "multicol_tbl", + "Values": [ + ":m1_cola" + ], + "Vindex": "multicolIdx" + } + ] + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json index bc589fe29be..d3789673d9d 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json @@ -21,22 +21,7 @@ { "comment": "',' join information_schema", "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1", - "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b", - "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", "Instructions": { @@ -74,8 +59,7 @@ { "comment": "information schema join", "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", - "v3-plan": "VT03019: symbol `tables`.TABLE_SCHEMA not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", "Instructions": { @@ -94,22 +78,7 @@ { "comment": "access to qualified column names in information_schema", "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1", - "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'", - "Table": "information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", "Instructions": { @@ -131,27 +100,11 @@ "plan": { "QueryType": "SELECT", "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`", - "Table": "information_schema.`columns`" - } - } - }, - { - "comment": "union between information_schema tables that should not be merged", - "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -163,10 +116,9 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" }, { "OperatorType": "Route", @@ -175,45 +127,47 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"main\")]", + "FieldQuery": "select table_schema from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_schema from information_schema.`tables`", "Table": "information_schema.`tables`" } ] } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union between information_schema tables that should not be merged", + "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "(0:21)", - "(1:22)", - "(2:23)", - "(3:24)", - "(4:25)", - "5: binary", - "(6:26)", - "7: binary", - "8: binary", - "9: binary", - "10: binary", - "11: binary", - "12: binary", - "13: binary", - "(14:27)", - "(15:28)", - "(16:29)", - "(17:30)", - "18: binary", - "(19:31)", - "(20:32)" + "0: utf8mb4_0900_ai_ci", + "1: utf8mb4_0900_ai_ci", + "2: utf8mb4_0900_ai_ci", + "3: utf8mb4_0900_ai_ci", + "4: utf8mb4_0900_ai_ci", + "5", + "6: utf8mb4_0900_ai_ci", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15", + "16", + "17: utf8mb4_0900_ai_ci", + "18", + "19: utf8mb4_0900_ai_ci", + "20: utf8mb4_0900_ai_ci" ], - "ResultColumns": 21, "Inputs": [ { "OperatorType": "Concatenate", @@ -225,8 +179,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1", - "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname", + "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1", + "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\")]", "Table": "information_schema.`tables`" }, @@ -237,8 +191,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1", - "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname", + "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1", + "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"main\")]", "Table": "information_schema.`tables`" } @@ -251,24 +205,7 @@ { "comment": "Select from information schema query with two tables that route should be merged", "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -279,7 +216,7 @@ "Sharded": false }, "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", + "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", "SysTableTableSchema": "[VARCHAR(\"test\")]", "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" @@ -289,45 +226,7 @@ { "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.", "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME", - "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -338,7 +237,7 @@ "Sharded": false }, "FieldQuery": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", + "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and S.TABLE_NAME = :S_TABLE_NAME /* VARCHAR */ and KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]", "SysTableTableSchema": "[VARCHAR(\"test\")]", "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS, INFORMATION_SCHEMA.`TABLES`" @@ -359,7 +258,7 @@ "Sharded": false }, "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1", - "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'", + "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname /* VARCHAR */ and ROUTINE_TYPE = 'PROCEDURE'", "SysTableTableSchema": "[:v1]", "Table": "information_schema.routines" } @@ -379,7 +278,7 @@ "Sharded": false }, "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1", - "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname", + "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[:v1]", "Table": "information_schema.`TABLES`" } @@ -388,29 +287,13 @@ { "comment": "information_schema referential contraints", "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", - "SysTableTableSchema": "[:v1, :v2]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5,L:6,R:0,R:1", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,L:5,R:0,R:1", "JoinVars": { "kcu_constraint_name": 0 }, @@ -423,9 +306,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select kcu.constraint_name, kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", - "OrderBy": "5 ASC", - "Query": "select kcu.constraint_name, kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", + "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", "SysTableTableSchema": "[:v1]", "Table": "information_schema.key_column_usage" }, @@ -437,7 +319,7 @@ "Sharded": false }, "FieldQuery": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where 1 != 1", - "Query": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname and rc.constraint_name = :kcu_constraint_name", + "Query": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.constraint_name = :kcu_constraint_name", "SysTableTableSchema": "[:v2]", "Table": "information_schema.referential_constraints" } @@ -448,23 +330,7 @@ { "comment": "rails query", "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name", - "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", "Instructions": { @@ -475,7 +341,7 @@ "Sharded": false }, "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name /* VARCHAR */ and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" } @@ -484,23 +350,7 @@ { "comment": "rails_query 2", "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.schemata where 1 != 1", - "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", "Instructions": { @@ -511,7 +361,7 @@ "Sharded": false }, "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where 1 != 1", - "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where schema_name = :__vtschemaname", + "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\")]", "Table": "information_schema.schemata" } @@ -531,7 +381,7 @@ "Sharded": false }, "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1", - "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name", + "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"schema_name\")]", "Table": "information_schema.`tables`" @@ -541,24 +391,7 @@ { "comment": "rails_query 4", "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name", - "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", "Instructions": { @@ -569,7 +402,7 @@ "Sharded": false }, "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.table_name = :rc_table_name /* VARCHAR */ and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname /* VARCHAR */ and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" @@ -590,7 +423,7 @@ "Sharded": false }, "FieldQuery": "select column_name from information_schema.statistics where 1 != 1", - "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc", + "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */ order by seq_in_index asc", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.statistics" @@ -611,7 +444,7 @@ "Sharded": false }, "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1", - "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'", + "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */ and column_name = 'column_name'", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`columns`" @@ -640,23 +473,7 @@ { "comment": "rails_query 9", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", "Instructions": { @@ -667,7 +484,7 @@ "Sharded": false }, "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery", + "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`tables`" } @@ -676,24 +493,7 @@ { "comment": "rails_query 10", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name", - "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", "Instructions": { @@ -704,7 +504,7 @@ "Sharded": false }, "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery", + "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_type = 'table_type' and table_name = 'table_name') as _subquery", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`tables`" } @@ -724,7 +524,7 @@ "Sharded": false }, "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name", + "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */", "SysTableTableName": "[table_name:VARCHAR(\"foo\")]", "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" @@ -734,8 +534,7 @@ { "comment": "subquery of information_schema with itself", "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", - "v3-plan": "VT03019: symbol `TABLES`.`CHECKSUM` not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", "Instructions": { @@ -754,23 +553,7 @@ { "comment": "query trying to query two different keyspaces at the same time", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", "Instructions": { @@ -781,7 +564,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -790,22 +573,7 @@ { "comment": "information_schema query using database() func", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", "Instructions": { @@ -824,23 +592,7 @@ { "comment": "table_schema predicate the wrong way around", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", "Instructions": { @@ -851,7 +603,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -860,24 +612,7 @@ { "comment": "table_name predicate against a routed table", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME", - "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", "Instructions": { @@ -888,7 +623,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_NAME = :TABLE_NAME /* VARCHAR */", "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" @@ -909,7 +644,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42", + "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and DATA_FREE = 42", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -918,23 +653,7 @@ { "comment": "able to isolate table_schema value even when hidden inside of ORs", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", "Instructions": { @@ -945,7 +664,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and (DATA_FREE = 42 or `CHECKSUM` = 'value')", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -954,22 +673,7 @@ { "comment": "expand star with information schema", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", "Instructions": { @@ -988,7 +692,7 @@ { "comment": "expand star with information schema in a derived table", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", "Instructions": { @@ -1007,49 +711,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME", - "Table": "`user`", - "Values": [ - ":x_COLUMN_NAME" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "x_COLUMN_NAME": 0 - }, - "TableName": "information_schema.key_column_usage_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x", + "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", + "Query": "select x.table_name, x.COLUMN_NAME from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x", "Table": "information_schema.key_column_usage" }, { @@ -1077,22 +740,7 @@ { "comment": "join of information_schema queries with select stars exprs", "query": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.*, b.* from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b", - "Table": "information_schema.GLOBAL_STATUS, information_schema.CHARACTER_SETS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.*, b.* from information_schema.GLOBAL_STATUS a, information_schema.CHARACTER_SETS b", "Instructions": { @@ -1111,23 +759,7 @@ { "comment": "join two routes with SysTableTableName entries in LHS and RHS", "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b", - "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", "Instructions": { @@ -1138,7 +770,7 @@ "Sharded": false }, "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b", + "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name /* VARCHAR */) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name /* VARCHAR */) as b", "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" } @@ -1147,35 +779,13 @@ { "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(found)", "Inputs": [ { "OperatorType": "Concatenate", @@ -1188,7 +798,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\")]", "Table": "information_schema.`tables`" }, @@ -1200,7 +810,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\")]", "Table": "information_schema.views" } @@ -1208,28 +818,49 @@ } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", + "plan": { "QueryType": "SELECT", "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.views" + } + ] } } }, { "comment": "merge system schema queries as long as they have any same table_schema", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1243,7 +874,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1255,34 +886,18 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge system schema queries as long as they have any same table_name", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1296,7 +911,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1308,45 +923,29 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge union subquery with outer query referencing the same system schemas", "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Concatenate", "Inputs": [ { @@ -1357,7 +956,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */", "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1369,13 +968,14 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1", + "Query": "select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1", "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]", "Table": "information_schema.views" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1383,99 +983,62 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and :__sq_has_values1", "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]", "Table": "information_schema.`tables`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in derived table", "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" ], "Inputs": [ { - "OperatorType": "Distinct", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1", - "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select TABLE_NAME from information_schema.`columns`) as dt", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in subquery", "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { @@ -1487,39 +1050,36 @@ ], "Inputs": [ { - "OperatorType": "Distinct", + "InputName": "SubQuery", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", + "Query": "select COLUMN_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1532,43 +1092,12 @@ } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select COLUMN_NAME from information_schema.`columns`)", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`COLUMNS`" - } } }, { "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json index 1021cee9094..88df818a050 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json @@ -21,22 +21,7 @@ { "comment": "',' join information_schema", "query": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b where 1 != 1", - "Query": "select a.`ENGINE`, b.DATA_TYPE from information_schema.`TABLES` as a, information_schema.`COLUMNS` as b", - "Table": "information_schema.`TABLES`, information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.ENGINE, b.DATA_TYPE from information_schema.TABLES as a, information_schema.COLUMNS as b", "Instructions": { @@ -74,8 +59,7 @@ { "comment": "information schema join", "query": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", - "v3-plan": "VT03019: symbol `tables`.TABLE_SCHEMA not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tables.TABLE_SCHEMA, files.`STATUS` from information_schema.tables join information_schema.files", "Instructions": { @@ -94,22 +78,7 @@ { "comment": "access to qualified column names in information_schema", "query": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.`COLUMNS` where 1 != 1", - "Query": "select * from information_schema.`COLUMNS` where information_schema.`COLUMNS`.COLUMN_NAME = 'toto'", - "Table": "information_schema.`COLUMNS`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.COLUMNS where information_schema.COLUMNS.COLUMN_NAME='toto'", "Instructions": { @@ -131,27 +100,11 @@ "plan": { "QueryType": "SELECT", "Original": "select TABLE_NAME from information_schema.columns union select table_schema from information_schema.tables", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1 union select table_schema from information_schema.`tables` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns` union select table_schema from information_schema.`tables`", - "Table": "information_schema.`columns`" - } - } - }, - { - "comment": "union between information_schema tables that should not be merged", - "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], "Inputs": [ { "OperatorType": "Concatenate", @@ -163,10 +116,9 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" }, { "OperatorType": "Route", @@ -175,45 +127,47 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select * from information_schema.`tables` where 1 != 1", - "Query": "select * from information_schema.`tables` where table_schema = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"main\")]", + "FieldQuery": "select table_schema from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_schema from information_schema.`tables`", "Table": "information_schema.`tables`" } ] } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union between information_schema tables that should not be merged", + "query": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", + "plan": { "QueryType": "SELECT", "Original": "select * from information_schema.tables where table_schema = 'user' union select * from information_schema.tables where table_schema = 'main'", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "(0:21)", - "(1:22)", - "(2:23)", - "(3:24)", - "(4:25)", - "5: binary", - "(6:26)", - "7: binary", - "8: binary", - "9: binary", - "10: binary", - "11: binary", - "12: binary", - "13: binary", - "(14:27)", - "(15:28)", - "(16:29)", - "(17:30)", - "18: binary", - "(19:31)", - "(20:32)" + "0: utf8mb4_0900_ai_ci", + "1: utf8mb4_0900_ai_ci", + "2: utf8mb4_0900_ai_ci", + "3", + "4: utf8mb4_0900_ai_ci", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15", + "16", + "17: utf8mb4_0900_ai_ci", + "18", + "19: utf8mb4_0900_ai_ci", + "20: utf8mb4_0900_ai_ci" ], - "ResultColumns": 21, "Inputs": [ { "OperatorType": "Concatenate", @@ -225,8 +179,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1", - "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname", + "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1", + "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\")]", "Table": "information_schema.`tables`" }, @@ -237,8 +191,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where 1 != 1", - "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT, weight_string(TABLE_CATALOG), weight_string(TABLE_SCHEMA), weight_string(TABLE_NAME), weight_string(TABLE_TYPE), weight_string(`ENGINE`), weight_string(`ROW_FORMAT`), weight_string(CREATE_TIME), weight_string(UPDATE_TIME), weight_string(CHECK_TIME), weight_string(TABLE_COLLATION), weight_string(CREATE_OPTIONS), weight_string(TABLE_COMMENT) from information_schema.`tables` where table_schema = :__vtschemaname", + "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1", + "Query": "select distinct TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"main\")]", "Table": "information_schema.`tables`" } @@ -251,24 +205,7 @@ { "comment": "Select from information schema query with two tables that route should be merged", "query": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\"), VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT RC.CONSTRAINT_NAME, ORDINAL_POSITION FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.COLUMN_NAME = 'id' AND KCU.REFERENCED_TABLE_SCHEMA = 'test' AND KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -279,7 +216,7 @@ "Sharded": false }, "FieldQuery": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where 1 != 1", - "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", + "Query": "select RC.CONSTRAINT_NAME, ORDINAL_POSITION from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC where KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.COLUMN_NAME = 'id' and KCU.REFERENCED_TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.CONSTRAINT_NAME = 'data_type_table_id_fkey' and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", "SysTableTableSchema": "[VARCHAR(\"test\")]", "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" @@ -289,45 +226,7 @@ { "comment": "Select from information schema query with three tables such that route for 2 should be merged but not for the last.", "query": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS_INFORMATION_SCHEMA.`TABLES`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where 1 != 1", - "Query": "select KCU.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU join INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC on KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME where KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.TABLE_NAME = :KCU_TABLE_NAME1 order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", - "SysTableTableName": "[KCU_TABLE_NAME1:VARCHAR(\"data_type_table\"), KCU_TABLE_NAME:VARCHAR(\"data_type_table\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select S.TABLE_NAME from INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME", - "SysTableTableName": "[S_TABLE_NAME:VARCHAR(\"sc\")]", - "SysTableTableSchema": "[VARCHAR(\"test\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT KCU.`TABLE_NAME`, S.`TABLE_NAME` FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS KCU INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS RC ON KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME, INFORMATION_SCHEMA.`TABLES` AS S WHERE KCU.TABLE_SCHEMA = 'test' AND KCU.TABLE_NAME = 'data_type_table' AND KCU.TABLE_NAME = 'data_type_table' AND S.TABLE_SCHEMA = 'test' AND S.TABLE_NAME = 'sc' ORDER BY KCU.CONSTRAINT_NAME, KCU.COLUMN_NAME", "Instructions": { @@ -338,7 +237,7 @@ "Sharded": false }, "FieldQuery": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where 1 != 1", - "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname and S.TABLE_NAME = :S_TABLE_NAME and KCU.TABLE_SCHEMA = :__vtschemaname and KCU.TABLE_NAME = :KCU_TABLE_NAME and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", + "Query": "select KCU.TABLE_NAME, S.TABLE_NAME from INFORMATION_SCHEMA.KEY_COLUMN_USAGE as KCU, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS as RC, INFORMATION_SCHEMA.`TABLES` as S where S.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and S.TABLE_NAME = :S_TABLE_NAME /* VARCHAR */ and KCU.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and KCU.TABLE_NAME = :KCU_TABLE_NAME /* VARCHAR */ and KCU.CONSTRAINT_NAME = RC.CONSTRAINT_NAME order by KCU.CONSTRAINT_NAME asc, KCU.COLUMN_NAME asc", "SysTableTableName": "[KCU_TABLE_NAME:VARCHAR(\"data_type_table\"), S_TABLE_NAME:VARCHAR(\"sc\")]", "SysTableTableSchema": "[VARCHAR(\"test\")]", "Table": "INFORMATION_SCHEMA.KEY_COLUMN_USAGE, INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS, INFORMATION_SCHEMA.`TABLES`" @@ -359,7 +258,7 @@ "Sharded": false }, "FieldQuery": "select routine_name as `name`, routine_definition as definition from information_schema.routines where 1 != 1", - "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname and ROUTINE_TYPE = 'PROCEDURE'", + "Query": "select routine_name as `name`, routine_definition as definition from information_schema.routines where ROUTINE_SCHEMA = :__vtschemaname /* VARCHAR */ and ROUTINE_TYPE = 'PROCEDURE'", "SysTableTableSchema": "[:v1]", "Table": "information_schema.routines" } @@ -379,7 +278,7 @@ "Sharded": false }, "FieldQuery": "select sum(data_length + index_length) as size from information_schema.`TABLES` where 1 != 1", - "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname", + "Query": "select sum(data_length + index_length) as size from information_schema.`TABLES` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[:v1]", "Table": "information_schema.`TABLES`" } @@ -388,29 +287,13 @@ { "comment": "information_schema referential contraints - cant merge without knowing values", "query": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name, rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.key_column_usage as kcu join information_schema.referential_constraints as rc on kcu.constraint_name = rc.constraint_name where kcu.table_schema = :__vtschemaname and rc.constraint_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", - "SysTableTableSchema": "[:v1, :v2]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT kcu.constraint_name constraint_name, kcu.column_name column_name, kcu.referenced_table_name referenced_table_name, kcu.referenced_column_name referenced_column_name, kcu.ordinal_position ordinal_position, kcu.table_name table_name, rc.delete_rule delete_rule, rc.update_rule update_rule FROM information_schema.key_column_usage AS kcu INNER JOIN information_schema.referential_constraints AS rc ON kcu.constraint_name = rc.constraint_name WHERE kcu.table_schema = ? AND rc.constraint_schema = ? AND kcu.referenced_column_name IS NOT NULL ORDER BY ordinal_position", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5,L:6,R:0,R:1", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,L:5,R:0,R:1", "JoinVars": { "kcu_constraint_name": 0 }, @@ -423,9 +306,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select kcu.constraint_name, kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", - "OrderBy": "5 ASC", - "Query": "select kcu.constraint_name, kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.referenced_column_name is not null order by ordinal_position asc", + "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", "SysTableTableSchema": "[:v1]", "Table": "information_schema.key_column_usage" }, @@ -437,7 +319,7 @@ "Sharded": false }, "FieldQuery": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where 1 != 1", - "Query": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname and rc.constraint_name = :kcu_constraint_name", + "Query": "select rc.delete_rule as delete_rule, rc.update_rule as update_rule from information_schema.referential_constraints as rc where rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.constraint_name = :kcu_constraint_name", "SysTableTableSchema": "[:v2]", "Table": "information_schema.referential_constraints" } @@ -448,23 +330,7 @@ { "comment": "rails query", "query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = database() and rc.table_name = :rc_table_name", - "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as name, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = ':vtg1' and rc.constraint_schema = database() and rc.table_name = ':vtg1'", "Instructions": { @@ -475,7 +341,7 @@ "Sharded": false }, "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = database() and rc.table_name = :rc_table_name /* VARCHAR */ and fk.referenced_column_name is not null and fk.table_schema = database() and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", "SysTableTableName": "[fk_table_name:VARCHAR(\":vtg1\"), rc_table_name:VARCHAR(\":vtg1\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" } @@ -484,23 +350,7 @@ { "comment": "rails_query 2", "query": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.schemata where 1 != 1", - "Query": "select * from information_schema.schemata where schema_name = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM information_schema.schemata WHERE schema_name = 'user'", "Instructions": { @@ -511,7 +361,7 @@ "Sharded": false }, "FieldQuery": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where 1 != 1", - "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where schema_name = :__vtschemaname", + "Query": "select CATALOG_NAME, SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME, DEFAULT_COLLATION_NAME, SQL_PATH, DEFAULT_ENCRYPTION from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\")]", "Table": "information_schema.schemata" } @@ -531,7 +381,7 @@ "Sharded": false }, "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1", - "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname and table_name = :table_name", + "Query": "select table_comment from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"schema_name\")]", "Table": "information_schema.`tables`" @@ -541,24 +391,7 @@ { "comment": "rails_query 4", "query": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc join information_schema.key_column_usage as fk using (constraint_schema, constraint_name) where fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name", - "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"table_schema\")]", - "Table": "information_schema.referential_constraints, information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT fk.referenced_table_name AS 'to_table', fk.referenced_column_name AS 'primary_key',fk.column_name AS 'column',fk.constraint_name AS 'name',rc.update_rule AS 'on_update',rc.delete_rule AS 'on_delete' FROM information_schema.referential_constraints rc JOIN information_schema.key_column_usage fk USING (constraint_schema, constraint_name) WHERE fk.referenced_column_name IS NOT NULL AND fk.table_schema = 'table_schema' AND fk.table_name = 'table_name' AND rc.constraint_schema = 'table_schema' AND rc.table_name = 'table_name'", "Instructions": { @@ -569,7 +402,7 @@ "Sharded": false }, "FieldQuery": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where 1 != 1", - "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname and rc.table_name = :rc_table_name and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname and fk.table_name = :fk_table_name and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", + "Query": "select fk.referenced_table_name as to_table, fk.referenced_column_name as primary_key, fk.column_name as `column`, fk.constraint_name as `name`, rc.update_rule as on_update, rc.delete_rule as on_delete from information_schema.referential_constraints as rc, information_schema.key_column_usage as fk where rc.constraint_schema = :__vtschemaname /* VARCHAR */ and rc.table_name = :rc_table_name /* VARCHAR */ and fk.referenced_column_name is not null and fk.table_schema = :__vtschemaname /* VARCHAR */ and fk.table_name = :fk_table_name /* VARCHAR */ and rc.constraint_schema = fk.constraint_schema and rc.constraint_name = fk.constraint_name", "SysTableTableName": "[fk_table_name:VARCHAR(\"table_name\"), rc_table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" @@ -579,33 +412,16 @@ { "comment": "rails_query 5", "query": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where 1 != 1", - "Query": "select cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc join information_schema.table_constraints as tc using (constraint_schema, constraint_name) where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and cc.constraint_schema = :__vtschemaname", - "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\"), VARCHAR(\"constraint_schema\")]", - "Table": "information_schema.check_constraints, information_schema.table_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT cc.constraint_name AS 'name', cc.check_clause AS 'expression' FROM information_schema.check_constraints cc JOIN information_schema.table_constraints tc USING (constraint_schema, constraint_name) WHERE tc.table_schema = 'table_schema' AND tc.table_name = 'table_name' AND cc.constraint_schema = 'constraint_schema'", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:2,L:3", + "JoinColumnIndexes": "L:0,L:1", "JoinVars": { - "cc_constraint_name": 1, - "cc_constraint_schema": 0 + "cc_constraint_name": 0, + "cc_constraint_schema": 2 }, "TableName": "information_schema.check_constraints_information_schema.table_constraints", "Inputs": [ @@ -616,8 +432,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select cc.constraint_schema, cc.constraint_name, cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc where 1 != 1", - "Query": "select cc.constraint_schema, cc.constraint_name, cc.constraint_name as `name`, cc.check_clause as expression from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname", + "FieldQuery": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where 1 != 1", + "Query": "select cc.constraint_name as `name`, cc.check_clause as expression, cc.constraint_schema from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"constraint_schema\")]", "Table": "information_schema.check_constraints" }, @@ -629,7 +445,7 @@ "Sharded": false }, "FieldQuery": "select 1 from information_schema.table_constraints as tc where 1 != 1", - "Query": "select 1 from information_schema.table_constraints as tc where tc.table_schema = :__vtschemaname and tc.table_name = :tc_table_name and tc.constraint_schema = :__vtschemaname and tc.constraint_name = :cc_constraint_name", + "Query": "select 1 from information_schema.table_constraints as tc where tc.table_schema = :__vtschemaname /* VARCHAR */ and tc.table_name = :tc_table_name /* VARCHAR */ and tc.constraint_name = :cc_constraint_name and tc.constraint_schema = :__vtschemaname /* VARCHAR */", "SysTableTableName": "[tc_table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\"), :cc_constraint_schema]", "Table": "information_schema.table_constraints" @@ -652,7 +468,7 @@ "Sharded": false }, "FieldQuery": "select column_name from information_schema.statistics where 1 != 1", - "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname and table_name = :table_name order by seq_in_index asc", + "Query": "select column_name from information_schema.statistics where index_name = 'PRIMARY' and table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */ order by seq_in_index asc", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.statistics" @@ -673,7 +489,7 @@ "Sharded": false }, "FieldQuery": "select generation_expression from information_schema.`columns` where 1 != 1", - "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname and table_name = :table_name and column_name = 'column_name'", + "Query": "select generation_expression from information_schema.`columns` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */ and column_name = 'column_name'", "SysTableTableName": "[table_name:VARCHAR(\"table_name\")]", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`columns`" @@ -702,23 +518,7 @@ { "comment": "rails_query 9", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery", "Instructions": { @@ -729,7 +529,7 @@ "Sharded": false }, "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery", + "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */) as _subquery", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`tables`" } @@ -738,24 +538,7 @@ { "comment": "rails_query 10", "query": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select table_name from (select * from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select * from information_schema.`tables` where table_schema = :__vtschemaname) as _subquery where _subquery.table_type = 'table_type' and _subquery.table_name = :_subquery_table_name", - "SysTableTableName": "[_subquery_table_name:VARCHAR(\"table_name\")]", - "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", - "Table": "information_schema.`tables`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT table_name FROM (SELECT * FROM information_schema.tables WHERE table_schema = 'table_schema') _subquery WHERE _subquery.table_type = 'table_type' AND _subquery.table_name = 'table_name'", "Instructions": { @@ -766,7 +549,7 @@ "Sharded": false }, "FieldQuery": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where 1 != 1) as _subquery where 1 != 1", - "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname and table_type = 'table_type' and table_name = 'table_name') as _subquery", + "Query": "select table_name from (select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_type = 'table_type' and table_name = 'table_name') as _subquery", "SysTableTableSchema": "[VARCHAR(\"table_schema\")]", "Table": "information_schema.`tables`" } @@ -786,7 +569,7 @@ "Sharded": false }, "FieldQuery": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where 1 != 1", - "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname and cc.CONSTRAINT_CATALOG = 'a'", + "Query": "select cc.constraint_name as `name` from information_schema.check_constraints as cc where cc.constraint_schema = :__vtschemaname /* VARCHAR */ and cc.CONSTRAINT_CATALOG = 'a'", "SysTableTableSchema": "[VARCHAR(\"a\")]", "Table": "information_schema.check_constraints" } @@ -806,7 +589,7 @@ "Sharded": false }, "FieldQuery": "select count(*) from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname and table_name = :table_name", + "Query": "select count(*) from INFORMATION_SCHEMA.`TABLES` where table_schema = :__vtschemaname /* VARCHAR */ and table_name = :table_name /* VARCHAR */", "SysTableTableName": "[table_name:VARCHAR(\"foo\")]", "SysTableTableSchema": "[VARCHAR(\"performance_schema\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" @@ -816,8 +599,7 @@ { "comment": "subquery of information_schema with itself", "query": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", - "v3-plan": "VT03019: symbol `TABLES`.`CHECKSUM` not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select TABLES.CHECKSUM from information_schema.`TABLES` where `TABLE_NAME` in (select `TABLE_NAME` from information_schema.`COLUMNS`)", "Instructions": { @@ -836,23 +618,7 @@ { "comment": "query trying to query two different keyspaces at the same time", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'user' AND TABLE_SCHEMA = 'main'", "Instructions": { @@ -863,7 +629,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"main\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -872,22 +638,7 @@ { "comment": "information_schema query using database() func", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = database()", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = database()", "Instructions": { @@ -906,23 +657,7 @@ { "comment": "table_schema predicate the wrong way around", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE 'ks' = TABLE_SCHEMA", "Instructions": { @@ -933,7 +668,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -942,24 +677,7 @@ { "comment": "table_name predicate against a routed table", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME", - "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 'route1'", "Instructions": { @@ -970,7 +688,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and TABLE_NAME = :TABLE_NAME", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and TABLE_NAME = :TABLE_NAME /* VARCHAR */", "SysTableTableName": "[TABLE_NAME:VARCHAR(\"route1\")]", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" @@ -991,7 +709,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and DATA_FREE = 42", + "Query": "select TABLE_NAME from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and DATA_FREE = 42", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -1000,23 +718,7 @@ { "comment": "able to isolate table_schema value even when hidden inside of ORs", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')", - "SysTableTableSchema": "[VARCHAR(\"ks\")]", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE (TABLE_SCHEMA = 'ks' and DATA_FREE = 42) OR (TABLE_SCHEMA = 'ks' and CHECKSUM = 'value')", "Instructions": { @@ -1027,7 +729,7 @@ "Sharded": false }, "FieldQuery": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname and (DATA_FREE = 42 or `CHECKSUM` = 'value')", + "Query": "select TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME, TABLE_TYPE, `ENGINE`, VERSION, `ROW_FORMAT`, TABLE_ROWS, `AVG_ROW_LENGTH`, DATA_LENGTH, MAX_DATA_LENGTH, INDEX_LENGTH, DATA_FREE, `AUTO_INCREMENT`, CREATE_TIME, UPDATE_TIME, CHECK_TIME, TABLE_COLLATION, `CHECKSUM`, CREATE_OPTIONS, TABLE_COMMENT from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ and (DATA_FREE = 42 or `CHECKSUM` = 'value')", "SysTableTableSchema": "[VARCHAR(\"ks\")]", "Table": "INFORMATION_SCHEMA.`TABLES`" } @@ -1036,22 +738,7 @@ { "comment": "expand star with information schema", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x", "Instructions": { @@ -1070,7 +757,7 @@ { "comment": "expand star with information schema in a derived table", "query": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", "Instructions": { @@ -1089,49 +776,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name, x.COLUMN_NAME from (select a.* from information_schema.key_column_usage as a) as x", - "Table": "information_schema.key_column_usage" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where `user`.id = :x_COLUMN_NAME", - "Table": "`user`", - "Values": [ - ":x_COLUMN_NAME" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select x.table_name from (select a.* from information_schema.key_column_usage a) x join user on x.`COLUMN_NAME` = user.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "x_COLUMN_NAME": 0 - }, - "TableName": "information_schema.key_column_usage_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", - "Query": "select x.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x", + "FieldQuery": "select x.table_name, x.COLUMN_NAME from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as x where 1 != 1", + "Query": "select x.table_name, x.COLUMN_NAME from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a) as x", "Table": "information_schema.key_column_usage" }, { @@ -1159,22 +805,7 @@ { "comment": "join of information_schema queries with select stars exprs", "query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b", - "Table": "information_schema.CHECK_CONSTRAINTS, information_schema.CHARACTER_SETS" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.*, b.* from information_schema.CHECK_CONSTRAINTS a, information_schema.CHARACTER_SETS b", "Instructions": { @@ -1193,23 +824,7 @@ { "comment": "join two routes with SysTableTableName entries in LHS and RHS", "query": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a.table_name from (select * from information_schema.key_column_usage as a where 1 != 1) as a join (select * from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select * from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a join (select * from information_schema.referential_constraints where table_name = :table_name) as b", - "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", - "Table": "information_schema.key_column_usage, information_schema.referential_constraints" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.table_name from (select * from information_schema.key_column_usage a where a.table_name = 'users') a join (select * from information_schema.referential_constraints where table_name = 'users') b", "Instructions": { @@ -1220,7 +835,7 @@ "Sharded": false }, "FieldQuery": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where 1 != 1) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where 1 != 1) as b where 1 != 1", - "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name) as b", + "Query": "select a.table_name from (select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.TABLE_CATALOG, a.TABLE_SCHEMA, a.TABLE_NAME, a.COLUMN_NAME, a.ORDINAL_POSITION, a.POSITION_IN_UNIQUE_CONSTRAINT, a.REFERENCED_TABLE_SCHEMA, a.REFERENCED_TABLE_NAME, a.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as a where a.table_name = :a_table_name /* VARCHAR */) as a, (select CONSTRAINT_CATALOG, CONSTRAINT_SCHEMA, CONSTRAINT_NAME, UNIQUE_CONSTRAINT_CATALOG, UNIQUE_CONSTRAINT_SCHEMA, UNIQUE_CONSTRAINT_NAME, MATCH_OPTION, UPDATE_RULE, DELETE_RULE, TABLE_NAME, REFERENCED_TABLE_NAME from information_schema.referential_constraints where table_name = :table_name /* VARCHAR */) as b", "SysTableTableName": "[a_table_name:VARCHAR(\"users\"), table_name:VARCHAR(\"users\")]", "Table": "information_schema.key_column_usage, information_schema.referential_constraints" } @@ -1229,35 +844,13 @@ { "comment": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select sum(found) from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select sum(found) from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS sum(found)", "Inputs": [ { "OperatorType": "Concatenate", @@ -1270,7 +863,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\")]", "Table": "information_schema.`tables`" }, @@ -1282,7 +875,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\")]", "Table": "information_schema.views" } @@ -1290,28 +883,49 @@ } ] } - }, - "gen4-plan": { + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", + "plan": { "QueryType": "SELECT", "Original": "select found from (select 1 as found from information_schema.`tables` where table_schema = 'music' union all (select 1 as found from information_schema.views where table_schema = 'music' limit 1)) as t", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select found from (select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)) as t where 1 != 1", - "Query": "select found from (select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)) as t", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ limit 1", + "SysTableTableSchema": "[VARCHAR(\"music\")]", + "Table": "information_schema.views" + } + ] } } }, { "comment": "merge system schema queries as long as they have any same table_schema", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1325,7 +939,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1337,34 +951,18 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge system schema queries as long as they have any same table_name", "query": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", "Instructions": { @@ -1378,7 +976,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname", + "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1390,45 +988,29 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1", + "Query": "select 1 as found from information_schema.views where table_schema = :__vtschemaname /* VARCHAR */ and table_schema = :__vtschemaname /* VARCHAR */ limit 1", "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"user\")]", "Table": "information_schema.views" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_schema = 'music' and table_schema = 'Music' union all (select 1 as found from information_schema.views where table_schema = 'music' and table_schema = 'user' limit 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1 union all (select 1 as found from information_schema.views where 1 != 1)", - "Query": "select 1 as found from information_schema.`tables` where table_schema = :__vtschemaname union all (select 1 as found from information_schema.views where table_schema = :__vtschemaname limit 1)", - "SysTableTableSchema": "[VARCHAR(\"music\"), VARCHAR(\"Music\"), VARCHAR(\"music\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge union subquery with outer query referencing the same system schemas", "query": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Concatenate", "Inputs": [ { @@ -1439,7 +1021,7 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name2 /* VARCHAR */ and table_name = :table_name3 /* VARCHAR */", "SysTableTableName": "[table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\")]", "Table": "information_schema.`tables`" }, @@ -1451,13 +1033,14 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.views where 1 != 1", - "Query": "select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1", + "Query": "select 1 as found from information_schema.views where table_name = :table_name4 /* VARCHAR */ and table_name = :table_name5 /* VARCHAR */ limit 1", "SysTableTableName": "[table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\")]", "Table": "information_schema.views" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1465,99 +1048,62 @@ "Sharded": false }, "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and :__sq_has_values1", + "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name /* VARCHAR */ and table_name = :table_name1 /* VARCHAR */ and :__sq_has_values1", "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name:VARCHAR(\"music\")]", "Table": "information_schema.`tables`" } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' and exists (select 1 as found from information_schema.`tables` where table_name = 'music' and table_name = 'Music' union all (select 1 as found from information_schema.views where table_name = 'music' and table_name = 'user' limit 1))", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 as found from information_schema.`tables` where 1 != 1", - "Query": "select 1 as found from information_schema.`tables` where table_name = :table_name and table_name = :table_name1 and exists (select 1 as found from information_schema.`tables` where table_name = :table_name2 and table_name = :table_name3 union all (select 1 as found from information_schema.views where table_name = :table_name4 and table_name = :table_name5 limit 1))", - "SysTableTableName": "[table_name1:VARCHAR(\"Music\"), table_name2:VARCHAR(\"music\"), table_name3:VARCHAR(\"Music\"), table_name4:VARCHAR(\"music\"), table_name5:VARCHAR(\"user\"), table_name:VARCHAR(\"music\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in derived table", "query": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" ], "Inputs": [ { - "OperatorType": "Distinct", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", - "Query": "select TABLE_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select TABLE_NAME from information_schema.`columns` where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select TABLE_NAME from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select TABLE_NAME from information_schema.columns) dt", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where 1 != 1 union select TABLE_NAME from information_schema.`columns` where 1 != 1) as dt where 1 != 1", - "Query": "select dt.TABLE_NAME from (select TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select TABLE_NAME from information_schema.`columns`) as dt", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - } } }, { "comment": "merge even one side have schema name in subquery", "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { @@ -1569,39 +1115,36 @@ ], "Inputs": [ { - "OperatorType": "Distinct", + "InputName": "SubQuery", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`tables`" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", - "Query": "select COLUMN_NAME from information_schema.`columns`", - "Table": "information_schema.`columns`" - } - ] + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select COLUMN_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "SysTableTableSchema": "[VARCHAR(\"a\")]", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select COLUMN_NAME from information_schema.`columns` where 1 != 1", + "Query": "select COLUMN_NAME from information_schema.`columns`", + "Table": "information_schema.`columns`" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "DBA", "Keyspace": { @@ -1614,43 +1157,12 @@ } ] } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select COLLATION_NAME from information_schema.`COLUMNS` as t where 1 != 1", - "Query": "select COLLATION_NAME from information_schema.`COLUMNS` as t where COLUMN_NAME in (select COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname union select COLUMN_NAME from information_schema.`columns`)", - "SysTableTableSchema": "[VARCHAR(\"a\")]", - "Table": "information_schema.`COLUMNS`" - } } }, { "comment": "table_schema OR predicate\n# It is unsupported because we do not route queries to multiple keyspaces right now", "query": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from INFORMATION_SCHEMA.`TABLES` where 1 != 1", - "Query": "select * from INFORMATION_SCHEMA.`TABLES` where TABLE_SCHEMA = 'ks' or TABLE_SCHEMA = 'main'", - "Table": "INFORMATION_SCHEMA.`TABLES`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'ks' OR TABLE_SCHEMA = 'main'", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/large_cases.json b/go/vt/vtgate/planbuilder/testdata/large_cases.json index 4b2fae633ab..43adc1f5343 100644 --- a/go/vt/vtgate/planbuilder/testdata/large_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/large_cases.json @@ -2,191 +2,7 @@ { "comment": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", "query": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "user_extra_user_id": 0 - }, - "TableName": "user_extra_user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra where user_extra.user_id = :user_id", - "Table": "user_extra", - "Values": [ - ":user_id" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "user_metadata_music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_metadata where 1 != 1", - "Query": "select 1 from user_metadata where user_metadata.user_id = :user_extra_user_id", - "Table": "user_metadata", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "music_id": 0 - }, - "TableName": "music_unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music", - "Table": "music" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinVars": { - "unsharded_x": 0 - }, - "TableName": "unsharded_unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.x from unsharded where 1 != 1", - "Query": "select unsharded.x from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_a_unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_a where 1 != 1", - "Query": "select 1 from unsharded_a where unsharded_a.y = :unsharded_x", - "Table": "unsharded_a" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_b_unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_b where 1 != 1", - "Query": "select 1 from unsharded_b", - "Table": "unsharded_b" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "unsharded_auto_music_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded_auto where 1 != 1", - "Query": "select 1 from unsharded_auto", - "Table": "unsharded_auto" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music_extra where 1 != 1", - "Query": "select 1 from music_extra where music_extra.music_id = :music_id", - "Table": "music_extra", - "Values": [ - ":music_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id from user, user_extra, user_metadata, music, unsharded, unsharded_a, unsharded_b, unsharded_auto, music_extra where user.id = user_extra.user_id and user_metadata.user_id = user_extra.user_id and music.id = music_extra.music_id and unsharded.x = unsharded_a.y", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json index 9120e39bfd6..1ad7b33d589 100644 --- a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json @@ -2,1626 +2,7 @@ { "comment": "this testcase breaks goland, so it lives on its own file", "query": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270698330)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270699497)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270703806 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270703806)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270707364)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270714657 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270714657)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270721330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270721330)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270812079 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270812079)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271011532 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271011532)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271034164 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271034164)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271034177 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271034177)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271066849 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271066849)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271098740 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271098740)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271355000 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271355000)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271924504 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271924504)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272086055 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272086055)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272127855)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272191137)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272468271)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270644941)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270650576 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270650576)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270652906 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270652906)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270660650 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270660650)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270670201)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270698330)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270699497)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270707364)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271799956)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271799956)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270637436)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270644941)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270649256 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270649256)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270653671 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270653671)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270670201)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270717223 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270717223)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270720898 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270720898)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271346411 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271346411)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271352121 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271352121)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271354908 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271354908)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271367516 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271367516)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271472522 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271472522)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271639345)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271821733 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271821733)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271914117)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272068709 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272068709)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272127855)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272191137)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272244005)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272468271)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1270982590)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271365691)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1271607757)" - ], - "Vindex": "user_index" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select content, user_id from music where 1 != 1", - "Query": "select content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11", - "Table": "music", - "Values": [ - "INT64(1272244005)" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270703806 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270714657 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270721330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270812079 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271011532 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034164 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271034177 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271066849 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271098740 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271355000 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271924504 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272086055 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270650576 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270652906 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270660650 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270698330 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270699497 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270707364 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271799956 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270637436 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270644941 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270649256 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270653671 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270670201 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270717223 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270720898 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271346411 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271352121 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271354908 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271367516 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271472522 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271639345 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271821733 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271914117 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272068709 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272127855 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272191137 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272468271 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1270982590 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271365691 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1271607757 ORDER BY created_at ASC, id ASC LIMIT 11) UNION (SELECT `content`, `user_id` FROM `music` WHERE `user_id` = 1272244005 ORDER BY created_at ASC, id ASC LIMIT 11)", "Instructions": { @@ -1642,8 +23,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)", - "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270698330 order by created_at asc, id asc limit 11)", + "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", + "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11)) as dt", "Table": "music", "Values": [ "INT64(1270698330)" @@ -1657,8 +38,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1) union (select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1)", - "Query": "(select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270699497 order by created_at asc, id asc limit 11)", + "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", + "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11)) as dt", "Table": "music", "Values": [ "INT64(1270699497)" diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.json b/go/vt/vtgate/planbuilder/testdata/lock_cases.json index 98ffa9d1bb9..c14ba026869 100644 --- a/go/vt/vtgate/planbuilder/testdata/lock_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.json @@ -2,23 +2,7 @@ { "comment": "get_lock from dual", "query": "select get_lock('xyz', 10) from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock('xyz', 10) from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock('xyz', 10) from dual where 1 != 1", - "lock_func": [ - "get_lock('xyz', 10)" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock('xyz', 10) from dual", "Instructions": { @@ -41,23 +25,7 @@ { "comment": "is_free_lock from dual", "query": "select is_free_lock('xyz') from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select is_free_lock('xyz') from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select is_free_lock('xyz') from dual where 1 != 1", - "lock_func": [ - "is_free_lock('xyz')" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select is_free_lock('xyz') from dual", "Instructions": { @@ -80,23 +48,7 @@ { "comment": "get_lock from dual prepare query", "query": "select get_lock(?, ?)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock(?, ?)", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock(:v1, :v2) from dual where 1 != 1", - "lock_func": [ - "get_lock(:v1, :v2)" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock(?, ?)", "Instructions": { @@ -152,24 +104,7 @@ { "comment": "multiple lock functions", "query": "select get_lock('xyz', 10), is_free_lock('abc') from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual", - "Instructions": { - "OperatorType": "Lock", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "TargetDestination": "KeyspaceID(00)", - "FieldQuery": "select get_lock('xyz', 10), is_free_lock('abc') from dual where 1 != 1", - "lock_func": [ - "get_lock('xyz', 10)", - "is_free_lock('abc')" - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select get_lock('xyz', 10), is_free_lock('abc') from dual", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json index c1b4fbe83b7..58e6744f1a6 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json @@ -2,21 +2,20 @@ { "comment": "Test cases in this file follow the code in memory_sort.go.\n# scatter aggregate order by references ungrouped column", "query": "select a, b, count(*) from user group by a order by b", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) from user group by a order by b", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC", + "OrderBy": "(1|4) ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 4, + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS count(*), any_value(4)", + "GroupBy": "(0|3)", "Inputs": [ { "OperatorType": "Route", @@ -25,40 +24,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*), weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*), weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 4, + "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", + "OrderBy": "(0|3) ASC", + "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) from user group by a order by b", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS count(*)", - "GroupBy": "(0|3)", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(1|4) ASC, (0|3) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by b asc, a asc", - "Table": "`user`" - } - ] }, "TablesUsed": [ "user.user" @@ -68,39 +41,7 @@ { "comment": "scatter aggregate order by references aggregate expression", "query": "select a, b, count(*) k from user group by a order by k", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by k", "Instructions": { @@ -112,7 +53,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -139,53 +80,19 @@ { "comment": "select a, b, count(*) k from user group by a order by b, a, k", "query": "select a, b, count(*) k from user group by a order by b, a, k", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by b, a, k", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC, (0|4) ASC, 2 ASC", + "OrderBy": "(1|4) ASC, (0|3) ASC, 2 ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 5, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|4) ASC", - "Query": "select a, b, count(*) as k, weight_string(b), weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 5, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by b, a, k", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "1 ASC, (0|3) ASC, 2 ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k, any_value(4)", "GroupBy": "(0|3)", "Inputs": [ { @@ -195,9 +102,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", + "FieldQuery": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", + "Query": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] @@ -212,45 +119,7 @@ { "comment": "scatter aggregate with memory sort and limit", "query": "select a, b, count(*) k from user group by a order by k desc limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by k desc limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 DESC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by k desc limit 10", "Instructions": { @@ -266,7 +135,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -278,7 +147,7 @@ }, "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc", + "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by a asc limit :__upper_limit", "Table": "`user`" } ] @@ -295,41 +164,7 @@ { "comment": "scatter aggregate with memory sort and order by number", "query": "select a, b, count(*) k from user group by a order by 1,3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, b, count(*) k from user group by a order by 1,3", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|3) ASC, 2 ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(2) AS count", - "GroupBy": "0", - "ResultColumns": 4, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a) from `user` where 1 != 1 group by a, weight_string(a)", - "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a) from `user` group by a, weight_string(a) order by 1 asc", - "ResultColumns": 4, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, b, count(*) k from user group by a order by 1,3", "Instructions": { @@ -341,7 +176,7 @@ { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "random(1) AS b, sum_count_star(2) AS k", + "Aggregates": "any_value(1) AS b, sum_count_star(2) AS k", "GroupBy": "(0|3)", "Inputs": [ { @@ -366,49 +201,15 @@ } }, { - "comment": "scatter aggregate with memory sort and order by number, reuse weight_string\n# we have to use a meaningless construct to test this. TODO: improve to do ordering once for textcol1", + "comment": "scatter aggregate with memory sort and order by number, reuse weight_string\n# we have to use a meaningless construct to test this", "query": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|2) ASC, 1 ASC, (0|2) ASC", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(1) AS count", - "GroupBy": "2", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` where 1 != 1 group by textcol1, weight_string(textcol1)", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select textcol1 as t, count(*) as k, weight_string(textcol1) from `user` group by textcol1, weight_string(textcol1) order by textcol1 asc, textcol1 asc", - "ResultColumns": 3, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select textcol1 as t, count(*) k from user group by textcol1 order by textcol1, k, textcol1", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC, 0 ASC COLLATE latin1_swedish_ci", + "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 1 ASC", "Inputs": [ { "OperatorType": "Aggregate", @@ -440,103 +241,37 @@ { "comment": "order by on a cross-shard derived table", "query": "select id from (select user.id, user.col from user join user_extra) as t order by id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id", "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|2) ASC", - "ResultColumns": 1, + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from (select user.id, user.col from user join user_extra) as t order by id", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", - "ResultColumns": 1, - "Inputs": [ + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from (select `user`.id, `user`.col from `user`) as t order by id asc", + "Table": "`user`" + }, { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 2 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col, weight_string(`user`.id) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col, weight_string(`user`.id) from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -549,7 +284,7 @@ { "comment": "order by on a cross-shard query. Note: this happens only when an order by column is from the second table", "query": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", "Instructions": { @@ -600,59 +335,6 @@ ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2 b, music.col3 c from user, music where user.id = music.id and user.id = 1 order by c", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(2|3) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0,R:1", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2 as b from `user` where `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 as c, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3 as c, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] }, "TablesUsed": [ "user.music", @@ -663,7 +345,7 @@ { "comment": "Order by for join, with mixed cross-shard ordering", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", "Instructions": { @@ -714,59 +396,6 @@ ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by 1 asc, 3 desc, 2 asc", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|3) ASC, (2|4) DESC, (1|5) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0,L:3,R:1,L:4", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1), weight_string(`user`.col2) from `user` where `user`.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3, weight_string(music.col3) from music where 1 != 1", - "Query": "select music.col3, weight_string(music.col3) from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - ] }, "TablesUsed": [ "user.music", @@ -777,61 +406,19 @@ { "comment": "Order by for join, on text column in LHS.", "query": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC, (2|4) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1", - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", - "Query": "select un.col2, weight_string(un.col2) from unsharded as un", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a, u.textcol1, un.col2 from user u join unsharded un order by u.textcol1, un.col2", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC", + "OrderBy": "1 ASC COLLATE latin1_swedish_ci, (2|3) ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0,L:2,R:1", + "JoinColumnIndexes": "L:0,L:1,R:0,R:1", "TableName": "`user`_unsharded", "Inputs": [ { @@ -841,8 +428,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", + "FieldQuery": "select u.a, u.textcol1 from `user` as u where 1 != 1", + "Query": "select u.a, u.textcol1 from `user` as u", "Table": "`user`" }, { @@ -869,19 +456,19 @@ { "comment": "Order by for join, on text column in RHS.", "query": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", "Instructions": { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(1|3) ASC, (2|4) ASC", + "OrderBy": "1 ASC COLLATE latin1_swedish_ci, (2|3) ASC", "ResultColumns": 3, "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1", + "JoinColumnIndexes": "R:0,R:1,L:0,L:1", "TableName": "unsharded_`user`", "Inputs": [ { @@ -902,50 +489,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.a, u.textcol1, un.col2 from unsharded un join user u order by u.textcol1, un.col2", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(1|3) ASC COLLATE latin1_swedish_ci, (2|4) ASC", - "ResultColumns": 3, - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:0,R:2,L:1", - "TableName": "unsharded_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select un.col2, weight_string(un.col2) from unsharded as un where 1 != 1", - "Query": "select un.col2, weight_string(un.col2) from unsharded as un", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u where 1 != 1", - "Query": "select u.a, u.textcol1, weight_string(u.textcol1) from `user` as u", + "FieldQuery": "select u.a, u.textcol1 from `user` as u where 1 != 1", + "Query": "select u.a, u.textcol1 from `user` as u", "Table": "`user`" } ] @@ -961,36 +506,7 @@ { "comment": "order by for vindex func", "query": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "2 ASC", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id order by range_start", "Instructions": { @@ -1026,8 +542,7 @@ { "comment": "unary expression", "query": "select a from user order by binary a desc", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a from user order by binary a desc", "Instructions": { @@ -1051,8 +566,7 @@ { "comment": "unary expression in join query", "query": "select u.a from user u join music m on u.a = m.a order by binary a desc", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: convert(a, binary)", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.a from user u join music m on u.a = m.a order by binary a desc", "Instructions": { @@ -1098,23 +612,7 @@ { "comment": "intcol order by", "query": "select id, intcol from user order by intcol", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, intcol from user order by intcol", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, intcol from `user` where 1 != 1", - "OrderBy": "1 ASC", - "Query": "select id, intcol from `user` order by intcol asc", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, intcol from user order by intcol", "Instructions": { @@ -1137,8 +635,7 @@ { "comment": "scatter order by with order by column not present", "query": "select col from user order by id", - "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by id", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/misc_cases.json b/go/vt/vtgate/planbuilder/testdata/misc_cases.json new file mode 100644 index 00000000000..399cebe8939 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/misc_cases.json @@ -0,0 +1,210 @@ +[ + { + "comment": "prepare statement with select", + "query": "prepare prep from 'select * from user where id = ?'", + "plan": { + "QueryType": "PREPARE", + "Original": "prepare prep from 'select * from user where id = ?'", + "Instructions": { + "OperatorType": "Rows" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "prepare statement with delete", + "query": "prepare prep from 'delete from user where id between ? and ?'", + "plan": { + "QueryType": "PREPARE", + "Original": "prepare prep from 'delete from user where id between ? and ?'", + "Instructions": { + "OperatorType": "Rows" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "prepare statement with drop", + "query": "prepare prep from 'drop table user'", + "plan": { + "QueryType": "PREPARE", + "Original": "prepare prep from 'drop table user'", + "Instructions": { + "OperatorType": "Rows" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "prepare statement with user defined variable", + "query": "prepare prep from @prep_stmt", + "plan": { + "QueryType": "PREPARE", + "Original": "prepare prep from @prep_stmt", + "Instructions": { + "OperatorType": "Rows" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "prepare statement with invalid query", + "query": "prepare prep from 'wrong query syntax'", + "plan": "syntax error at position 6 near 'wrong'" + }, + { + "comment": "prepare statement with non existing variable", + "query": "prepare prep from @foo", + "plan": "VT03024: 'foo' user defined variable does not exists" + }, + { + "comment": "execute one param statement", + "query": "execute prep_one_param using @foo", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_one_param using @foo", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": [ + "foo" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where id = :v1", + "Table": "`user`", + "Values": [ + ":v1" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "execute in param statement", + "query": "execute prep_in_param using @x, @y", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_in_param using @x, @y", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": [ + "x", + "y" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where id in ::__vals", + "Table": "`user`", + "Values": [ + "(:v1, :v2)" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "execute no param statement", + "query": "execute prep_no_param", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_no_param", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": null, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "execute prepared statement does not exists", + "query": "execute not_prepared using @foo", + "plan": "VT09011: Unknown prepared statement handler (not_prepared) given to EXECUTE" + }, + { + "comment": "execute wrong number of parameters", + "query": "execute prep_one_param", + "plan": "VT03025: Incorrect arguments to EXECUTE" + }, + { + "comment": "execute wrong number of parameters", + "query": "execute prep_one_param using @foo, @bar", + "plan": "VT03025: Incorrect arguments to EXECUTE" + }, + { + "comment": "prepare a dual query", + "query": "prepare prep_dual from 'select 1+?, 10/?'", + "plan": { + "QueryType": "PREPARE", + "Original": "prepare prep_dual from 'select 1+?, 10/?'", + "Instructions": { + "OperatorType": "Rows" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "drop prepare", + "query": "drop prepare prep_no_param", + "plan": { + "QueryType": "DEALLOCATE PREPARE", + "Original": "drop prepare prep_no_param", + "Instructions": { + "OperatorType": "Rows" + } + } + }, + { + "comment": "drop prepare that does not exists", + "query": "drop prepare prep_not_exist", + "plan": "VT09011: Unknown prepared statement handler (prep_not_exist) given to DEALLOCATE PREPARE" + } +] diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json index 88717292379..810f58b4ea9 100644 --- a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json @@ -2,26 +2,7 @@ { "comment": "OLTP simple select", "query": "SELECT c FROM sbtest34 WHERE id=15", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest34 WHERE id=15", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c from sbtest34 where 1 != 1", - "Query": "select c from sbtest34 where id = 15", - "Table": "sbtest34", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest34 WHERE id=15", "Instructions": { @@ -47,22 +28,7 @@ { "comment": "OLTP simple range select", "query": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c from sbtest12 where 1 != 1", - "Query": "select c from sbtest12 where id between 1 and 10", - "Table": "sbtest12" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest12 WHERE id BETWEEN 1 AND 10", "Instructions": { @@ -84,29 +50,7 @@ { "comment": "OLTP sum range select", "query": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum(0)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(k) from sbtest43 where 1 != 1", - "Query": "select sum(k) from sbtest43 where id between 90 and 990", - "Table": "sbtest43" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT SUM(k) FROM sbtest43 WHERE id BETWEEN 90 AND 990", "Instructions": { @@ -135,24 +79,7 @@ { "comment": "OLTP order range select", "query": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c, weight_string(c) from sbtest1 where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select c, weight_string(c) from sbtest1 where id between 50 and 235 order by c asc", - "ResultColumns": 1, - "Table": "sbtest1" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c FROM sbtest1 WHERE id BETWEEN 50 AND 235 ORDER BY c", "Instructions": { @@ -175,51 +102,32 @@ { "comment": "OLTP distinct range select", "query": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "1", - "ResultColumns": 1, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc", - "ResultColumns": 2, - "Table": "sbtest30" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT DISTINCT c FROM sbtest30 WHERE id BETWEEN 1 AND 10 ORDER BY c", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|1) COLLATE latin1_swedish_ci", - "ResultColumns": 1, + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "0 ASC COLLATE latin1_swedish_ci", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c, weight_string(c) from sbtest30 where 1 != 1", - "OrderBy": "0 ASC COLLATE latin1_swedish_ci, 0 ASC COLLATE latin1_swedish_ci", - "Query": "select distinct c, weight_string(c) from sbtest30 where id between 1 and 10 order by c asc, c asc", - "Table": "sbtest30" + "OperatorType": "Distinct", + "Collations": [ + "0: latin1_swedish_ci" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select c from sbtest30 where 1 != 1", + "Query": "select distinct c from sbtest30 where id between 1 and 10", + "Table": "sbtest30" + } + ] } ] }, @@ -231,30 +139,7 @@ { "comment": "OLTP index udpate", "query": "UPDATE sbtest6 SET k=k+1 WHERE id=5", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update sbtest6 set k = k + 1 where id = 5", - "Table": "sbtest6", - "Values": [ - "INT64(5)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest6" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE sbtest6 SET k=k+1 WHERE id=5", "Instructions": { @@ -265,7 +150,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update sbtest6 set k = k + 1 where id = 5", "Table": "sbtest6", "Values": [ @@ -281,30 +165,7 @@ { "comment": "OLTP non index update", "query": "UPDATE sbtest9 SET c=7 WHERE id=8", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE sbtest9 SET c=7 WHERE id=8", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update sbtest9 set c = 7 where id = 8", - "Table": "sbtest9", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest9" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE sbtest9 SET c=7 WHERE id=8", "Instructions": { @@ -315,7 +176,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update sbtest9 set c = 7 where id = 8", "Table": "sbtest9", "Values": [ @@ -331,30 +191,7 @@ { "comment": "OLTP delete", "query": "DELETE FROM sbtest15 WHERE id=7525", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM sbtest15 WHERE id=7525", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from sbtest15 where id = 7525", - "Table": "sbtest15", - "Values": [ - "INT64(7525)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.sbtest15" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM sbtest15 WHERE id=7525", "Instructions": { @@ -365,7 +202,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from sbtest15 where id = 7525", "Table": "sbtest15", "Values": [ @@ -392,7 +228,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into sbtest16(id, k, c, pad) values (:_id_0, 1, 2, 50)", "TableName": "sbtest16", "VindexValues": { @@ -404,4 +239,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json index d2018535e51..43a9af95744 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json @@ -2,22 +2,7 @@ { "comment": "HAVING implicitly references table col", "query": "select user.col1 from user having col2 = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 from user having col2 = 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col1 from `user` having col2 = 2", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 from user having col2 = 2", "Instructions": { @@ -39,47 +24,12 @@ { "comment": "ambiguous symbol reference", "query": "select user.col1, user_extra.col1 from user join user_extra having col1 = 2", - "v3-plan": "VT03021: ambiguous symbol reference: col1", - "gen4-plan": "Column 'col1' in field list is ambiguous" + "plan": "Column 'col1' in field list is ambiguous" }, { "comment": "TODO: this should be 'Column 'col1' in having clause is ambiguous'\n# non-ambiguous symbol reference", "query": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 from `user` where 1 != 1", - "Query": "select `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col1 from user_extra where 1 != 1", - "Query": "select user_extra.col1 from user_extra having user_extra.col1 = 2", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1, user_extra.col1 from user join user_extra having user_extra.col1 = 2", "Instructions": { @@ -121,41 +71,7 @@ { "comment": "HAVING multi-route", "query": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2 from `user` having 1 = 1 and a = 1 and a = `user`.col2", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1", - "Query": "select user_extra.col3 from user_extra having user_extra.col3 = 1", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, user_extra.col3 from user join user_extra having 1 = 1 and a = 1 and a = user.col2 and user_extra.col3 = 1", "Instructions": { @@ -197,47 +113,7 @@ { "comment": "HAVING uses subquery", "query": "select id from user having id in (select col from user)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user having id in (select col from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` having :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user having id in (select col from user)", "Instructions": { @@ -249,6 +125,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -260,6 +137,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -284,26 +162,7 @@ { "comment": "ORDER BY, reference col from local table.", "query": "select col from user where id = 5 order by aa", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 5 order by aa", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 5 order by aa asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 5 order by aa", "Instructions": { @@ -329,26 +188,7 @@ { "comment": "ORDER BY uses column numbers", "query": "select col from user where id = 1 order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where id = 1 order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where id = 1 order by 1 asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where id = 1 order by 1", "Instructions": { @@ -374,23 +214,7 @@ { "comment": "ORDER BY on scatter", "query": "select col from user order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` order by col asc", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by col", "Instructions": { @@ -410,51 +234,10 @@ ] } }, - { - "comment": "ORDER BY on select t.*", - "query": "select t.*, t.col from user t order by t.col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select *", - "query": "select *, col from user order by col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select multi t.*", - "query": "select t.*, t.name, t.*, t.col from user t order by t.col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "ORDER BY on select multi *", - "query": "select *, name, *, col from user order by col", - "v3-plan": "VT12001: unsupported: in scatter query, cannot ORDER BY a column that comes after `*` expressions in the SELECT list", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, { "comment": "ORDER BY works for select * from authoritative table", "query": "select * from authoritative order by user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative order by user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1", - "OrderBy": "(0|3) ASC", - "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc", - "ResultColumns": 3, - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative order by user_id", "Instructions": { @@ -478,24 +261,7 @@ { "comment": "ORDER BY works for select * from authoritative table", "query": "select * from authoritative order by col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative order by col1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2, weight_string(col1) from authoritative where 1 != 1", - "OrderBy": "(1|3) ASC", - "Query": "select user_id, col1, col2, weight_string(col1) from authoritative order by col1 asc", - "ResultColumns": 3, - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative order by col1", "Instructions": { @@ -518,24 +284,7 @@ { "comment": "ORDER BY on scatter with text column", "query": "select a, textcol1, b from user order by a, textcol1, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, textcol1, b from user order by a, textcol1, b", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC", - "Query": "select a, textcol1, b, weight_string(a), weight_string(textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", - "ResultColumns": 3, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, textcol1, b from user order by a, textcol1, b", "Instructions": { @@ -559,24 +308,7 @@ { "comment": "ORDER BY on scatter with text column, qualified name TODO: can plan better", "query": "select a, user.textcol1, b from user order by a, textcol1, b", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, user.textcol1, b from user order by a, textcol1, b", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC", - "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(`user`.textcol1), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", - "ResultColumns": 3, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, user.textcol1, b from user order by a, textcol1, b", "Instructions": { @@ -600,24 +332,7 @@ { "comment": "ORDER BY on scatter with multiple text columns", "query": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` where 1 != 1", - "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC, (3|7) ASC", - "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(textcol1), weight_string(b), weight_string(textcol2) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc", - "ResultColumns": 4, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, textcol1, b, textcol2 from user order by a, textcol1, b, textcol2", "Instructions": { @@ -641,30 +356,12 @@ { "comment": "ORDER BY invalid col number on scatter", "query": "select col from user order by 2", - "v3-plan": "VT03014: unknown column '2' in 'order clause'", - "gen4-plan": "Unknown column '2' in 'order clause'" + "plan": "Unknown column '2' in 'order clause'" }, { "comment": "ORDER BY column offset", "query": "select id as foo from music order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo from music order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by 1 asc", - "ResultColumns": 1, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo from music order by 1", "Instructions": { @@ -688,22 +385,7 @@ { "comment": "ORDER BY NULL", "query": "select col from user order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by null", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` order by null", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by null", "Instructions": { @@ -725,7 +407,7 @@ { "comment": "ORDER BY after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by col", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by col", "Instructions": { @@ -737,6 +419,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -748,6 +431,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -760,55 +444,18 @@ "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ORDER BY NULL for join", + "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", + "plan": { "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by col", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "OrderBy": "0 ASC", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by col asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "ORDER BY NULL for join", - "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", + "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -826,52 +473,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by null", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id order by null", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by null", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1", "Table": "`user`", "Values": [ "INT64(1)" @@ -904,7 +506,7 @@ { "comment": "ORDER BY non-key column for join", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", "Instructions": { @@ -947,52 +549,6 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by a", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1", - "OrderBy": "(1|3) ASC", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] }, "TablesUsed": [ "user.music", @@ -1003,7 +559,7 @@ { "comment": "ORDER BY non-key column for implicit join", "query": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", "Instructions": { @@ -1046,52 +602,6 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user, music where user.id = music.id and user.id = 1 order by a", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where 1 != 1", - "OrderBy": "(1|3) ASC", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2, weight_string(`user`.col1) from `user` where `user`.id = 1 order by a asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] }, "TablesUsed": [ "user.music", @@ -1102,43 +612,7 @@ { "comment": "ORDER BY NULL after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by null", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by null", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by null", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by null", "Instructions": { @@ -1150,6 +624,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1161,6 +636,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1181,22 +657,7 @@ { "comment": "ORDER BY RAND()", "query": "select col from user order by RAND()", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user order by RAND()", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` order by RAND()", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user order by RAND()", "Instructions": { @@ -1218,7 +679,7 @@ { "comment": "ORDER BY RAND() for join", "query": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", "Instructions": { @@ -1253,52 +714,7 @@ "Sharded": true }, "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id order by RAND()", - "Table": "music", - "Values": [ - ":user_id" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a, user.col2, music.col3 from user join music on user.id = music.id where user.id = 1 order by RAND()", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.id, `user`.col1 as a, `user`.col2 from `user` where `user`.id = 1 order by RAND()", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.col3 from music where 1 != 1", - "Query": "select music.col3 from music where music.id = :user_id order by RAND()", + "Query": "select music.col3 from music where music.id = :user_id", "Table": "music", "Values": [ ":user_id" @@ -1316,7 +732,7 @@ { "comment": "ORDER BY RAND() after pull-out subquery", "query": "select col from user where col in (select col2 from user) order by rand()", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col2 from user) order by rand()", "Instructions": { @@ -1328,6 +744,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1339,6 +756,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1350,73 +768,18 @@ "Table": "`user`" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by, '*' expression", + "query": "select * from user where id = 5 order by col", + "plan": { "QueryType": "SELECT", - "Original": "select col from user where col in (select col2 from user) order by rand()", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col2 from `user` where 1 != 1", - "Query": "select col2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 order by rand()", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Order by, '*' expression", - "query": "select * from user where id = 5 order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by col", + "Original": "select * from user where id = 5 order by col", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1440,26 +803,7 @@ { "comment": "Order by, qualified '*' expression", "query": "select user.* from user where id = 5 order by user.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user where id = 5 order by user.col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user` where id = 5 order by `user`.col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user where id = 5 order by user.col", "Instructions": { @@ -1485,26 +829,7 @@ { "comment": "Order by, '*' expression with qualified reference", "query": "select * from user where id = 5 order by user.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by user.col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by `user`.col asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by user.col", "Instructions": { @@ -1530,44 +855,7 @@ { "comment": "Order by, '*' expression in a subquery", "query": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_col": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", - "Query": "select u.id, u.col from `user` as u where u.col in (select * from `user` where `user`.id = u.id order by col asc)", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.col = :u_col", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id from user u join user_extra e where u.col = e.col and u.col in (select * from user where user.id = u.id order by col)", "Instructions": { @@ -1612,8 +900,7 @@ { "comment": "Order by, verify outer symtab is searched according to its own context.", "query": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)", - "v3-plan": "VT03020: symbol u.col not found in subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id from user u having u.id in (select col2 from user where user.id = u.id order by u.col)", "Instructions": { @@ -1635,44 +922,22 @@ { "comment": "Order by, qualified '*' expression, name mismatched.", "query": "select user.* from user where id = 5 order by e.col", - "v3-plan": "VT03019: symbol e.col not found", - "gen4-plan": "symbol e.col not found" + "plan": "column 'e.col' not found" }, { "comment": "Order by, invalid column number", "query": "select col from user order by 18446744073709551616", - "v3-plan": "VT13001: [BUG] error parsing column number: 18446744073709551616", - "gen4-plan": "error parsing column number: 18446744073709551616" + "plan": "error parsing column number: 18446744073709551616" }, { "comment": "Order by, out of range column number", "query": "select col from user order by 2", - "v3-plan": "VT03014: unknown column '2' in 'order clause'", - "gen4-plan": "Unknown column '2' in 'order clause'" + "plan": "Unknown column '2' in 'order clause'" }, { "comment": "Order by, '*' expression with qualified reference and using collate", "query": "select * from user where id = 5 order by user.col collate utf8_general_ci", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by `user`.col collate utf8_general_ci asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by user.col collate utf8_general_ci", "Instructions": { @@ -1698,26 +963,7 @@ { "comment": "Order by with math functions", "query": "select * from user where id = 5 order by -col1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by -col1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by -col1 asc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by -col1", "Instructions": { @@ -1743,26 +989,7 @@ { "comment": "Order by with string operations", "query": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by concat(col, col1) collate utf8_general_ci desc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by concat(col,col1) collate utf8_general_ci desc", "Instructions": { @@ -1788,26 +1015,7 @@ { "comment": "Order by with math operations", "query": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 order by id + col collate utf8_general_ci desc", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where id = 5 order by id+col collate utf8_general_ci desc", "Instructions": { @@ -1833,26 +1041,7 @@ { "comment": "Order by derived table column", "query": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` as u join (select user_id from user_extra where 1 != 1) as eu on u.id = eu.user_id where 1 != 1", - "Query": "select * from `user` as u join (select user_id from user_extra where user_id = 5) as eu on u.id = eu.user_id where u.id = 5 order by eu.user_id asc", - "Table": "`user`, user_extra", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user u join (select user_id from user_extra where user_id = 5) eu on u.id = eu.user_id where u.id = 5 order by eu.user_id", "Instructions": { @@ -1862,8 +1051,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` as u, (select user_id from user_extra where 1 != 1) as eu where 1 != 1", - "Query": "select * from `user` as u, (select user_id from user_extra where user_id = 5) as eu where u.id = 5 and u.id = eu.user_id order by eu.user_id asc", + "FieldQuery": "select * from (select user_id from user_extra where 1 != 1) as eu, `user` as u where 1 != 1", + "Query": "select * from (select user_id from user_extra where user_id = 5) as eu, `user` as u where u.id = 5 and u.id = eu.user_id order by eu.user_id asc", "Table": "`user`, user_extra", "Values": [ "INT64(5)" @@ -1879,26 +1068,7 @@ { "comment": "routing rules: order by gets pushed for routes", "query": "select col from route1 where id = 1 order by col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from route1 where id = 1 order by col", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` as route1 where 1 != 1", - "Query": "select col from `user` as route1 where id = 1 order by col asc", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from route1 where id = 1 order by col", "Instructions": { @@ -1924,26 +1094,7 @@ { "comment": "LIMIT", "query": "select col1 from user where id = 1 limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1 from user where id = 1 limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1 from `user` where 1 != 1", - "Query": "select col1 from `user` where id = 1 limit 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1 from user where id = 1 limit 1", "Instructions": { @@ -1969,7 +1120,7 @@ { "comment": "limit for joins. Can't push down the limit because result\n# counts get multiplied by join operations.", "query": "select user.col from user join user_extra limit 1", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join user_extra limit 1", "Instructions": { @@ -2007,80 +1158,19 @@ ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "limit for scatter", + "query": "select col from user limit 1", + "plan": { "QueryType": "SELECT", - "Original": "select user.col from user join user_extra limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "limit for scatter", - "query": "select col from user limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col from user limit 1", + "Original": "select col from user limit 1", "Instructions": { "OperatorType": "Limit", "Count": "INT64(1)", @@ -2106,28 +1196,7 @@ { "comment": "limit for scatter with bind var", "query": "select col from user limit :a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user limit :a", - "Instructions": { - "OperatorType": "Limit", - "Count": ":a", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user limit :a", "Instructions": { @@ -2155,28 +1224,7 @@ { "comment": "cross-shard expression in parenthesis with limit", "query": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user where (id1 = 4 AND name1 ='abc') limit 5", "Instructions": { @@ -2204,49 +1252,7 @@ { "comment": "scatter limit after pullout subquery", "query": "select col from user where col in (select col1 from user) limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from user where col in (select col1 from user) limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1 from `user` where 1 != 1", - "Query": "select col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` where :__sq_has_values1 = 1 and col in ::__sq1 limit :__upper_limit", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from user where col in (select col1 from user) limit 1", "Instructions": { @@ -2262,6 +1268,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2273,6 +1280,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2295,22 +1303,7 @@ { "comment": "limit on reference table", "query": "select col from ref limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from ref limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from ref where 1 != 1", - "Query": "select col from ref limit 1", - "Table": "ref" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from ref limit 1", "Instructions": { @@ -2332,28 +1325,7 @@ { "comment": "arithmetic limit", "query": "select id from user limit 1+1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user limit 1+1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user limit 1+1", "Instructions": { @@ -2381,24 +1353,7 @@ { "comment": "order by column alias", "query": "select id as foo from music order by foo", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo from music order by foo", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by foo asc", - "ResultColumns": 1, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo from music order by foo", "Instructions": { @@ -2422,24 +1377,7 @@ { "comment": "column alias for a table column in order by", "query": "select id as foo, id2 as id from music order by id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id as foo, id2 as id from music order by id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1", - "OrderBy": "(1|2) ASC", - "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc", - "ResultColumns": 2, - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id as foo, id2 as id from music order by id", "Instructions": { @@ -2463,43 +1401,7 @@ { "comment": "ordering on the left side of the join", "query": "select name from user, music order by name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select name from user, music order by name", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select name from user, music order by name", "Instructions": { @@ -2542,35 +1444,13 @@ { "comment": "aggregation and non-aggregations column without group by", "query": "select count(id), num from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num from `user` where 1 != 1", - "Query": "select count(id), num from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user", "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count(id), random(1) AS num", + "Aggregates": "sum_count(0) AS count(id), any_value(1) AS num", "Inputs": [ { "OperatorType": "Route", @@ -2593,7 +1473,7 @@ { "comment": "aggregation and non-aggregations column with order by", "query": "select count(id), num from user order by 2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user order by 2", "Instructions": { @@ -2605,8 +1485,7 @@ { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "ResultColumns": 3, + "Aggregates": "sum_count(0) AS count(id), any_value(1) AS num, any_value(2)", "Inputs": [ { "OperatorType": "Route", @@ -2622,30 +1501,6 @@ ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user order by 2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count(id), random(1) AS num", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1", - "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` order by num asc", - "Table": "`user`" - } - ] }, "TablesUsed": [ "user.user" @@ -2655,14 +1510,15 @@ { "comment": "aggregation and non-aggregations column with group by", "query": "select count(id), num from user group by 2", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user group by 2", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "sum_count(0) AS count", - "GroupBy": "1", + "Aggregates": "sum_count(0) AS count(id)", + "GroupBy": "(1|2)", + "ResultColumns": 2, "Inputs": [ { "OperatorType": "Route", @@ -2671,80 +1527,22 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)", + "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)", "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc", - "ResultColumns": 2, + "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc", "Table": "`user`" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user group by 2", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(0) AS count(id)", - "GroupBy": "(1|2)", - "ResultColumns": 2, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by num, weight_string(num)", - "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` group by num, weight_string(num) order by num asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "aggregation and non-aggregations column with group by and order by", - "query": "select count(id), num from user group by 2 order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(id), num from user group by 2 order by 1", - "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "0 ASC", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count(0) AS count", - "GroupBy": "1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(id), num, weight_string(num) from `user` where 1 != 1 group by 2, weight_string(num)", - "OrderBy": "(1|2) ASC", - "Query": "select count(id), num, weight_string(num) from `user` group by 2, weight_string(num) order by num asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "aggregation and non-aggregations column with group by and order by", + "query": "select count(id), num from user group by 2 order by 1", + "plan": { "QueryType": "SELECT", "Original": "select count(id), num from user group by 2 order by 1", "Instructions": { @@ -2783,38 +1581,46 @@ { "comment": "join order by with ambiguous column reference ; valid in MySQL", "query": "select name, name from user, music order by name", - "v3-plan": "VT03021: ambiguous symbol reference: `name`", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select name, name from user, music order by name", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:0", - "TableName": "`user`_music", + "OperatorType": "SimpleProjection", + "Columns": [ + 0, + 0 + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select 1 from music", + "Table": "music" + } + ] } ] }, @@ -2827,8 +1633,7 @@ { "comment": "order by with ambiguous column reference ; valid in MySQL", "query": "select id, id from user order by id", - "v3-plan": "VT03021: ambiguous symbol reference: id", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, id from user order by id", "Instructions": { @@ -2852,50 +1657,20 @@ { "comment": "Scatter order by and aggregation: order by column must reference column from select list", "query": "select col, count(*) from user group by col order by c1", - "v3-plan": "VT12001: unsupported: memory sort: ORDER BY must reference a column in the SELECT list: c1 asc", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col, count(*) from user group by col order by c1", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "sum_count_star(1) AS count(*), random(2) AS c1", - "GroupBy": "0", + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(2|3) ASC", "ResultColumns": 2, "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col", - "OrderBy": "(2|3) ASC, 0 ASC", - "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by c1 asc, col asc", - "Table": "`user`" - } - ] - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Distinct with cross shard query", - "query": "select distinct user.a from user join user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select distinct user.a from user join user_extra", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(1) AS count(*), any_value(2) AS c1, any_value(3)", + "GroupBy": "0", "Inputs": [ { "OperatorType": "Route", @@ -2904,33 +1679,31 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.a from `user` where 1 != 1", - "Query": "select `user`.a from `user`", + "FieldQuery": "select col, count(*), c1, weight_string(c1) from `user` where 1 != 1 group by col", + "OrderBy": "0 ASC", + "Query": "select col, count(*), c1, weight_string(c1) from `user` group by col order by col asc", "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Distinct with cross shard query", + "query": "select distinct user.a from user join user_extra", + "plan": { "QueryType": "SELECT", "Original": "select distinct user.a from user join user_extra", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|1)", + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], "ResultColumns": 1, "Inputs": [ { @@ -2947,8 +1720,7 @@ "Sharded": true }, "FieldQuery": "select `user`.a, weight_string(`user`.a) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `user`.a, weight_string(`user`.a) from `user` order by `user`.a asc", + "Query": "select distinct `user`.a, weight_string(`user`.a) from `user`", "Table": "`user`" }, { @@ -2959,7 +1731,7 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", + "Query": "select distinct 1 from user_extra", "Table": "user_extra" } ] @@ -2975,37 +1747,15 @@ { "comment": "Distinct with column alias", "query": "select distinct a as c, a from user", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a as c, a from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "0, 1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc", - "ResultColumns": 2, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select distinct a as c, a from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|2)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], "ResultColumns": 2, "Inputs": [ { @@ -3016,8 +1766,7 @@ "Sharded": true }, "FieldQuery": "select a as c, a, weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select distinct a as c, a, weight_string(a) from `user` order by c asc, a asc", + "Query": "select distinct a as c, a, weight_string(a) from `user`", "Table": "`user`" } ] @@ -3030,14 +1779,15 @@ { "comment": "Distinct with same column", "query": "select distinct a, a from user", - "v3-plan": "generating ORDER BY clause: VT03021: ambiguous symbol reference: a", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select distinct a, a from user", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "GroupBy": "(0|2), (1|2)", + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:2)" + ], "ResultColumns": 2, "Inputs": [ { @@ -3048,8 +1798,7 @@ "Sharded": true }, "FieldQuery": "select a, a, weight_string(a) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC, (0|2) ASC", - "Query": "select distinct a, a, weight_string(a) from `user` order by a asc, a asc", + "Query": "select distinct a, a, weight_string(a) from `user`", "Table": "`user`" } ] @@ -3062,8 +1811,7 @@ { "comment": "Order by has subqueries", "query": "select id from unsharded order by (select id from unsharded)", - "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.OrderBy", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from unsharded order by (select id from unsharded)", "Instructions": { @@ -3085,13 +1833,12 @@ { "comment": "Equal filter with hexadecimal value", "query": "select count(*) a from user having a = 0x01", - "v3-plan": "VT12001: unsupported: filtering on results of aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) a from user having a = 0x01", "Instructions": { "OperatorType": "Filter", - "Predicate": ":0 = 0x01", + "Predicate": "count(*) = 0x01", "Inputs": [ { "OperatorType": "Aggregate", @@ -3117,5 +1864,222 @@ "user.user" ] } + }, + { + "comment": "Order by uses cross-shard expression", + "query": "select id from user order by id+1", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user order by id+1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, id + 1, weight_string(id + 1) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select id, id + 1, weight_string(id + 1) from `user` order by id + 1 asc", + "ResultColumns": 1, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by column number with collate", + "query": "select user.col1 as a from user order by 1 collate utf8_general_ci", + "plan": { + "QueryType": "SELECT", + "Original": "select user.col1 as a from user order by 1 collate utf8_general_ci", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` order by a collate utf8_general_ci asc", + "ResultColumns": 1, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by uses cross-shard expression", + "query": "select id from user order by id+1", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user order by id+1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, id + 1, weight_string(id + 1) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select id, id + 1, weight_string(id + 1) from `user` order by id + 1 asc", + "ResultColumns": 1, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by column number with collate", + "query": "select user.col1 as a from user order by 1 collate utf8_general_ci", + "plan": { + "QueryType": "SELECT", + "Original": "select user.col1 as a from user order by 1 collate utf8_general_ci", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` where 1 != 1", + "OrderBy": "(1|2) ASC", + "Query": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` order by a collate utf8_general_ci asc", + "ResultColumns": 1, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by column number with coalesce with columns from both sides", + "query": "select id from user, user_extra order by coalesce(user.col, user_extra.col)", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user, user_extra order by coalesce(user.col, user_extra.col)", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1", + "JoinVars": { + "user_col": 1 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, `user`.col from `user` where 1 != 1", + "Query": "select id, `user`.col from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select coalesce(:user_col, user_extra.col), weight_string(coalesce(:user_col, user_extra.col)) from user_extra where 1 != 1", + "Query": "select coalesce(:user_col, user_extra.col), weight_string(coalesce(:user_col, user_extra.col)) from user_extra", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "having filter with %", + "query": "select a.tcol1 from user a join music b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like \"A\\%B\" order by a.tcol1", + "plan": { + "QueryType": "SELECT", + "Original": "select a.tcol1 from user a join music b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like \"A\\%B\" order by a.tcol1", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|2) ASC", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "repeat(a.tcol1, min(a.id)) like 'A\\%B'", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1|3) AS min(a.id)", + "GroupBy": "(0|2)", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:1,L:0,L:2,L:3", + "JoinVars": { + "a_tcol1": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a where 1 != 1 group by a.tcol1, weight_string(a.tcol1), weight_string(a.id)", + "OrderBy": "(1|2) ASC", + "Query": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a group by a.tcol1, weight_string(a.tcol1), weight_string(a.id) order by a.tcol1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as b where 1 != 1 group by .0", + "Query": "select 1 from music as b where b.tcol2 = :a_tcol1 group by .0", + "Table": "music" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.json b/go/vt/vtgate/planbuilder/testdata/rails_cases.json index 89fdc4ff059..ef36b79c855 100644 --- a/go/vt/vtgate/planbuilder/testdata/rails_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/rails_cases.json @@ -2,122 +2,7 @@ { "comment": "Author5.joins(books: [{orders: :customer}, :supplier])", "query": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3", - "JoinVars": { - "book6s_supplier5_id": 4 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", - "JoinVars": { - "order2s_customer2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0", - "JoinVars": { - "book6s_order2s_order2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4,R:0", - "JoinVars": { - "book6s_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1", - "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id", - "Table": "author5s, book6s" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1", - "Query": "select book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id", - "Table": "book6s_order2s", - "Values": [ - ":book6s_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1", - "Query": "select order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id", - "Table": "order2s" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from customer2s where 1 != 1", - "Query": "select 1 from customer2s where customer2s.id = :order2s_customer2_id", - "Table": "customer2s", - "Values": [ - ":order2s_customer2_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from supplier5s where 1 != 1", - "Query": "select 1 from supplier5s where supplier5s.id = :book6s_supplier5_id", - "Table": "supplier5s", - "Values": [ - ":book6s_supplier5_id" - ], - "Vindex": "binary_md5" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", "Instructions": { @@ -143,18 +28,18 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:3,L:4", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3", "JoinVars": { - "book6s_supplier5_id": 0 + "book6s_supplier5_id": 4 }, "TableName": "author5s, book6s_book6s_order2s_supplier5s", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:2,L:3,L:4,L:5", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", "JoinVars": { - "book6s_id": 0 + "book6s_id": 5 }, "TableName": "author5s, book6s_book6s_order2s", "Inputs": [ @@ -165,8 +50,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1", - "Query": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id", + "FieldQuery": "select author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where 1 != 1", + "Query": "select author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where book6s.author5_id = author5s.id", "Table": "author5s, book6s" }, { @@ -177,7 +62,7 @@ "Sharded": true }, "FieldQuery": "select 1 from book6s_order2s where 1 != 1", - "Query": "select 1 from book6s_order2s where book6s_order2s.book6_id = :book6s_id and book6s_order2s.order2_id = :order2s_id", + "Query": "select 1 from book6s_order2s where book6s_order2s.order2_id = :order2s_id and book6s_order2s.book6_id = :book6s_id", "Table": "book6s_order2s", "Values": [ ":book6s_id" @@ -215,4 +100,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json index a5078bd89d1..351cb54190e 100644 --- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json @@ -2,22 +2,7 @@ { "comment": "select from unqualified ambiguous reference routes to reference source", "query": "select * from ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from ambiguous_ref_with_source where 1 != 1", - "Query": "select * from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ambiguous_ref_with_source", "Instructions": { @@ -39,41 +24,7 @@ { "comment": "join with unqualified ambiguous reference table routes to optimal keyspace", "query": "select user.col from user join ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_ambiguous_ref_with_source", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1", - "Query": "select 1 from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ambiguous_ref_with_source", "Instructions": { @@ -96,12 +47,12 @@ { "comment": "ambiguous unqualified reference table self-join routes to reference source", "query": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", "Instructions": { "OperatorType": "Route", - "Variant": "Reference", + "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false @@ -109,21 +60,6 @@ "FieldQuery": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source where 1 != 1", "Query": "select r1.col from ambiguous_ref_with_source as r1 join ambiguous_ref_with_source", "Table": "ambiguous_ref_with_source" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select r1.col from ambiguous_ref_with_source r1 join ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select r1.col from ambiguous_ref_with_source as r1, ambiguous_ref_with_source where 1 != 1", - "Query": "select r1.col from ambiguous_ref_with_source as r1, ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" }, "TablesUsed": [ "main.ambiguous_ref_with_source" @@ -133,41 +69,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right.", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "ambiguous_ref_with_source_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join user", "Instructions": { @@ -190,45 +92,7 @@ { "comment": "ambiguous unqualified reference table can merge with other opcodes left to right and vindex value is in the plan", "query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "ambiguous_ref_with_source_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select 1 from (select aa from `user` where `user`.id = 1) as `user`", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source join (select aa from user where user.id=1) user", "Instructions": { @@ -238,8 +102,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where 1 != 1) as `user` where 1 != 1", - "Query": "select ambiguous_ref_with_source.col from ambiguous_ref_with_source, (select aa from `user` where `user`.id = 1) as `user`", + "FieldQuery": "select ambiguous_ref_with_source.col from (select aa from `user` where 1 != 1) as `user`, ambiguous_ref_with_source where 1 != 1", + "Query": "select ambiguous_ref_with_source.col from (select aa from `user` where `user`.id = 1) as `user`, ambiguous_ref_with_source", "Table": "`user`, ambiguous_ref_with_source", "Values": [ "INT64(1)" @@ -255,41 +119,7 @@ { "comment": "qualified join to reference table routes to optimal keyspace", "query": "select user.col from user join main.ambiguous_ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join main.ambiguous_ref_with_source", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_ambiguous_ref_with_source", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from ambiguous_ref_with_source where 1 != 1", - "Query": "select 1 from ambiguous_ref_with_source", - "Table": "ambiguous_ref_with_source" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join main.ambiguous_ref_with_source", "Instructions": { @@ -310,7 +140,7 @@ } }, { - "comment": "insert into ambiguous qualified reference table routes to source", + "comment": "insert into ambiguous unqualified reference table routes to source", "query": "insert into ambiguous_ref_with_source(col) values(1)", "plan": { "QueryType": "INSERT", @@ -323,7 +153,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into ambiguous_ref_with_source(col) values (1)", "TableName": "ambiguous_ref_with_source" }, @@ -333,28 +162,9 @@ } }, { - "comment": "insert into qualified ambiguous reference table routes v3 to requested keyspace gen4 to source", + "comment": "insert into qualified ambiguous reference table routes to source", "query": "insert into user.ambiguous_ref_with_source(col) values(1)", - "v3-plan": { - "QueryType": "INSERT", - "Original": "insert into user.ambiguous_ref_with_source(col) values(1)", - "Instructions": { - "OperatorType": "Insert", - "Variant": "Sharded", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "insert into ambiguous_ref_with_source(col) values (1)", - "TableName": "ambiguous_ref_with_source" - }, - "TablesUsed": [ - "user.ambiguous_ref_with_source" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "INSERT", "Original": "insert into user.ambiguous_ref_with_source(col) values(1)", "Instructions": { @@ -365,7 +175,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into ambiguous_ref_with_source(col) values (1)", "TableName": "ambiguous_ref_with_source" }, @@ -388,7 +197,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update ambiguous_ref_with_source set col = 1", "Table": "ambiguous_ref_with_source" }, @@ -398,10 +206,9 @@ } }, { - "comment": "update qualified ambiguous reference table v3 error no primary vindex v4 route to source", + "comment": "update qualified ambiguous reference table route to source", "query": "update user.ambiguous_ref_with_source set col = 1", - "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex", - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "update user.ambiguous_ref_with_source set col = 1", "Instructions": { @@ -412,7 +219,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update ambiguous_ref_with_source set col = 1", "Table": "ambiguous_ref_with_source" }, @@ -435,7 +241,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from ambiguous_ref_with_source where col = 1", "Table": "ambiguous_ref_with_source" }, @@ -445,10 +250,9 @@ } }, { - "comment": "delete from qualified ambiguous reference table v3 error no primary vindex v4 route to source", + "comment": "delete from qualified ambiguous reference table route to source", "query": "delete from user.ambiguous_ref_with_source where col = 1", - "v3-plan": "VT09001: table 'ambiguous_ref_with_source' does not have a primary vindex", - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "delete from user.ambiguous_ref_with_source where col = 1", "Instructions": { @@ -459,7 +263,6 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from ambiguous_ref_with_source where col = 1", "Table": "ambiguous_ref_with_source" }, @@ -471,22 +274,7 @@ { "comment": "join with unqualified unambiguous ref with source routes to requested table", "query": "select user.col from user join ref_with_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ref_with_source", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` join ref_with_source where 1 != 1", - "Query": "select `user`.col from `user` join ref_with_source", - "Table": "`user`, ref_with_source" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join ref_with_source", "Instructions": { @@ -508,44 +296,10 @@ }, { "comment": "join with unqualified reference optimize routes when source & reference have different names", - "query": "select user.col from user join ref_in_source", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join ref_in_source", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_ref_in_source", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from ref_in_source where 1 != 1", - "Query": "select 1 from ref_in_source", - "Table": "ref_in_source" - } - ] - } - }, - "gen4-plan": { + "query": "select user.col from user join source_of_ref", + "plan": { "QueryType": "SELECT", - "Original": "select user.col from user join ref_in_source", + "Original": "select user.col from user join source_of_ref", "Instructions": { "OperatorType": "Route", "Variant": "Scatter", @@ -553,8 +307,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user`, ref_with_source as ref_in_source where 1 != 1", - "Query": "select `user`.col from `user`, ref_with_source as ref_in_source", + "FieldQuery": "select `user`.col from `user`, ref_with_source as source_of_ref where 1 != 1", + "Query": "select `user`.col from `user`, ref_with_source as source_of_ref", "Table": "`user`, ref_with_source" }, "TablesUsed": [ @@ -566,41 +320,7 @@ { "comment": "join with unqualified reference respects routing rules", "query": "select user.col from user join rerouted_ref", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join rerouted_ref", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_rerouted_ref", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from rerouted_ref where 1 != 1", - "Query": "select 1 from rerouted_ref", - "Table": "rerouted_ref" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.col from user join rerouted_ref", "Instructions": { @@ -619,5 +339,50 @@ "user.user" ] } + }, + { + "comment": "join with reference to unqualified source routes to optimal keyspace", + "query": "select user.col from user join global_ref", + "plan": { + "QueryType": "SELECT", + "Original": "select user.col from user join global_ref", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col from `user`, global_ref where 1 != 1", + "Query": "select `user`.col from `user`, global_ref", + "Table": "`user`, global_ref" + }, + "TablesUsed": [ + "user.global_ref", + "user.user" + ] + } + }, + { + "comment": "insert into qualified reference with unqualified source routes to source", + "query": "insert into user.global_ref(col) values(1)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into user.global_ref(col) values(1)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into global_ref(col) values (1)", + "TableName": "global_ref" + }, + "TablesUsed": [ + "main.global_ref" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json index 0366686cb74..148a6389dfb 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json @@ -2,22 +2,7 @@ { "comment": "No column referenced", "query": "select 1 from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user", "Instructions": { @@ -39,22 +24,7 @@ { "comment": "'*' expression for simple route", "query": "select user.* from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user", "Instructions": { @@ -76,22 +46,7 @@ { "comment": "unqualified '*' expression for simple route", "query": "select * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user", "Instructions": { @@ -113,23 +68,7 @@ { "comment": "select with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user`", - "QueryTimeout": 1000, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user", "Instructions": { @@ -152,30 +91,7 @@ { "comment": "select aggregation with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from `user`", - "QueryTimeout": 1000, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ count(*) from user", "Instructions": { @@ -205,29 +121,7 @@ { "comment": "select limit with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit", - "QueryTimeout": 1000, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from user limit 10", "Instructions": { @@ -256,23 +150,7 @@ { "comment": "select limit with timeout directive sets QueryTimeout in the route", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded limit 10", - "QueryTimeout": 1000, - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from main.unsharded limit 10", "Instructions": { @@ -295,23 +173,7 @@ { "comment": "select with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ * from user", "Instructions": { @@ -334,30 +196,7 @@ { "comment": "select aggregation with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", "Instructions": { @@ -387,30 +226,7 @@ { "comment": "select aggregation with partial scatter directive - added comments to try to confuse the hint extraction", "query": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from `user`", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "/*VT_SPAN_CONTEXT=123*/select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ count(*) from user", "Instructions": { @@ -440,29 +256,7 @@ { "comment": "select limit with partial scatter directive", "query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit", - "ScatterErrorsAsWarnings": true, - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from user limit 10", "Instructions": { @@ -491,22 +285,7 @@ { "comment": "qualified '*' expression for simple route", "query": "select user.* from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.* from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.* from user", "Instructions": { @@ -528,22 +307,7 @@ { "comment": "fully qualified '*' expression for simple route", "query": "select user.user.* from user.user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.user.* from user.user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.* from `user` where 1 != 1", - "Query": "select `user`.* from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.user.* from user.user", "Instructions": { @@ -565,22 +329,7 @@ { "comment": "select * from authoritative table", "query": "select * from authoritative", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1", - "Query": "select user_id, col1, col2 from authoritative", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative", "Instructions": { @@ -602,22 +351,7 @@ { "comment": "select * from join of authoritative tables", "query": "select * from authoritative a join authoritative b on a.user_id=b.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id where 1 != 1", - "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative a join authoritative b on a.user_id=b.user_id", "Instructions": { @@ -639,28 +373,12 @@ { "comment": "test table lookup failure for authoritative code path", "query": "select a.* from authoritative", - "v3-plan": "VT05004: table 'a' does not exist", - "gen4-plan": "Unknown table 'a'" + "plan": "Unknown table 'a'" }, { "comment": "select * from qualified authoritative table", "query": "select a.* from authoritative a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a.* from authoritative a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a.user_id, a.col1, a.col2 from authoritative as a where 1 != 1", - "Query": "select a.user_id, a.col1, a.col2 from authoritative as a", - "Table": "authoritative" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a.* from authoritative a", "Instructions": { @@ -682,22 +400,7 @@ { "comment": "select * from intermixing of authoritative table with non-authoritative results in no expansion", "query": "select * from authoritative join user on authoritative.user_id=user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from authoritative join user on authoritative.user_id=user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from authoritative join `user` on authoritative.user_id = `user`.id where 1 != 1", - "Query": "select * from authoritative join `user` on authoritative.user_id = `user`.id", - "Table": "authoritative, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from authoritative join user on authoritative.user_id=user.id", "Instructions": { @@ -720,22 +423,7 @@ { "comment": "select authoritative.* with intermixing still expands", "query": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id where 1 != 1", - "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id", - "Table": "authoritative, `user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.id, a.*, user.col1 from authoritative a join user on a.user_id=user.id", "Instructions": { @@ -758,22 +446,7 @@ { "comment": "auto-resolve anonymous columns for simple route", "query": "select anon_col from user join user_extra on user.id = user_extra.user_id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id where 1 != 1", - "Query": "select anon_col from `user` join user_extra on `user`.id = user_extra.user_id", - "Table": "`user`, user_extra" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select anon_col from user join user_extra on user.id = user_extra.user_id", "Instructions": { @@ -796,47 +469,12 @@ { "comment": "Cannot auto-resolve for cross-shard joins", "query": "select col from user join user_extra", - "v3-plan": "VT03019: symbol col not found", - "gen4-plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous" }, { "comment": "Auto-resolve should work if unique vindex columns are referenced", "query": "select id, user_id from user join user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, user_id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, user_id from user join user_extra", "Instructions": { @@ -878,22 +516,7 @@ { "comment": "database calls should be substituted", "query": "select database() from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select database() from dual", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - ":__vtdbname as database()" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select database() from dual", "Instructions": { @@ -915,22 +538,7 @@ { "comment": "last_insert_id for unsharded route", "query": "select last_insert_id() as x from main.unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select last_insert_id() as x from main.unsharded", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select :__lastInsertId as x from unsharded where 1 != 1", - "Query": "select :__lastInsertId as x from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select last_insert_id() as x from main.unsharded", "Instructions": { @@ -952,22 +560,7 @@ { "comment": "select from dual on unqualified keyspace", "query": "select @@session.auto_increment_increment from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select @@session.auto_increment_increment from dual", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select @@auto_increment_increment from dual where 1 != 1", - "Query": "select @@auto_increment_increment from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select @@session.auto_increment_increment from dual", "Instructions": { @@ -989,26 +582,7 @@ { "comment": "select from pinned table", "query": "select * from pin_test", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from pin_test", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from pin_test where 1 != 1", - "Query": "select * from pin_test", - "Table": "pin_test", - "Values": [ - "VARCHAR(\"\\x80\")" - ], - "Vindex": "binary" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from pin_test", "Instructions": { @@ -1039,7 +613,7 @@ { "comment": "RHS route referenced", "query": "select user_extra.id from user join user_extra", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_extra.id from user join user_extra", "Instructions": { @@ -1071,15 +645,23 @@ "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Both routes referenced", + "query": "select user.col, user_extra.id from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1089,8 +671,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user`", + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user`", "Table": "`user`" }, { @@ -1113,11 +695,11 @@ } }, { - "comment": "Both routes referenced", - "query": "select user.col, user_extra.id from user join user_extra", - "v3-plan": { + "comment": "Expression with single-route reference", + "query": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -1142,20 +724,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", + "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", + "Query": "select user_extra.id + user_extra.col from user_extra", "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Jumbled references", + "query": "select user.col, user_extra.id, user.col2 from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id from user join user_extra", + "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0,R:0,L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1165,8 +755,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", + "Query": "select `user`.col, `user`.col2 from `user`", "Table": "`user`" }, { @@ -1189,15 +779,15 @@ } }, { - "comment": "Expression with single-route reference", - "query": "select user.col, user_extra.id + user_extra.col from user join user_extra", - "v3-plan": { + "comment": "Comments", + "query": "select /* comment */ user.col from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "Original": "select /* comment */ user.col from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1208,7 +798,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "Query": "select /* comment */ `user`.col from `user`", "Table": "`user`" }, { @@ -1218,20 +808,28 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.id + user_extra.col from user_extra", + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select /* comment */ 1 from user_extra", "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "for update", + "query": "select user.col from user join user_extra for update", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id + user_extra.col from user join user_extra", + "Original": "select user.col from user join user_extra for update", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "JoinColumnIndexes": "L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1242,7 +840,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user`", + "Query": "select `user`.col from `user` for update", "Table": "`user`" }, { @@ -1252,8 +850,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id + user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.id + user_extra.col from user_extra", + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra for update", "Table": "user_extra" } ] @@ -1265,16 +863,19 @@ } }, { - "comment": "Jumbled references", - "query": "select user.col, user_extra.id, user.col2 from user join user_extra", - "v3-plan": { + "comment": "Field query should work for joins select bind vars", + "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", + "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "TableName": "`user`_user_extra", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_unsharded", "Inputs": [ { "OperatorType": "Route", @@ -1283,31 +884,39 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col, `user`.col2 from `user`", + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "Unsharded", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", - "Table": "user_extra" + "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1", + "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm", + "Table": "unsharded" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "Case preservation", + "query": "select user.Col, user_extra.Id from user join user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_extra.id, user.col2 from user join user_extra", + "Original": "select user.Col, user_extra.Id from user join user_extra", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", + "JoinColumnIndexes": "L:0,R:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1317,8 +926,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col, `user`.col2 from `user`", + "FieldQuery": "select `user`.Col from `user` where 1 != 1", + "Query": "select `user`.Col from `user`", "Table": "`user`" }, { @@ -1328,8 +937,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select user_extra.id from user_extra", + "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", + "Query": "select user_extra.Id from user_extra", "Table": "user_extra" } ] @@ -1341,126 +950,46 @@ } }, { - "comment": "Comments", - "query": "select /* comment */ user.col from user join user_extra", - "v3-plan": { + "comment": "syntax error", + "query": "the quick brown fox", + "plan": "syntax error at position 4 near 'the'" + }, + { + "comment": "Hex number is not treated as a simple value", + "query": "select * from user where id = 0x04", + "plan": { "QueryType": "SELECT", - "Original": "select /* comment */ user.col from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /* comment */ `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select /* comment */ 1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select /* comment */ user.col from user join user_extra", + "Original": "select * from user where id = 0x04", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /* comment */ `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select /* comment */ 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 0x04", + "Table": "`user`", + "Values": [ + "VARBINARY(\"\\x04\")" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.user" ] } }, { - "comment": "for update", - "query": "select user.col from user join user_extra for update", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.col from user join user_extra for update", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` for update", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra for update", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "comment": "sharded limit offset", + "query": "select user_id from music order by user_id limit 10, 20", + "plan": { "QueryType": "SELECT", - "Original": "select user.col from user join user_extra for update", + "Original": "select user_id from music order by user_id limit 10, 20", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", + "OperatorType": "Limit", + "Count": "INT64(20)", + "Offset": "INT64(10)", "Inputs": [ { "OperatorType": "Route", @@ -1469,213 +998,77 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` for update", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra for update", - "Table": "user_extra" + "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", + "ResultColumns": 1, + "Table": "music" } ] }, "TablesUsed": [ - "user.user", - "user.user_extra" + "user.music" ] } }, { - "comment": "Field query should work for joins select bind vars", - "query": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", - "v3-plan": { + "comment": "Sharding Key Condition in Parenthesis", + "query": "select * from user where name ='abc' AND (id = 4) limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "Original": "select * from user where name ='abc' AND (id = 4) limit 5", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1", - "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", + "Table": "`user`", + "Values": [ + "INT64(4)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Multiple parenthesized expressions", + "query": "select * from user where (id = 4) AND (name ='abc') limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.id, (select user.id+outm.m+unsharded.m from unsharded) from user join unsharded outm", + "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_id": 0 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select :user_id + outm.m + unsharded.m from unsharded where 1 != 1) from unsharded as outm where 1 != 1", - "Query": "select (select :user_id + outm.m + unsharded.m from unsharded) from unsharded as outm", - "Table": "unsharded" - } - ] + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Table": "`user`", + "Values": [ + "INT64(4)" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "main.unsharded", "user.user" ] } }, { - "comment": "Case preservation", - "query": "select user.Col, user_extra.Id from user join user_extra", - "v3-plan": { + "comment": "Multiple parenthesized expressions", + "query": "select * from user where (id = 4 and name ='abc') limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select user.Col, user_extra.Id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.Col from `user` where 1 != 1", - "Query": "select `user`.Col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", - "Query": "select user_extra.Id from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.Col, user_extra.Id from user join user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.Col from `user` where 1 != 1", - "Query": "select `user`.Col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.Id from user_extra where 1 != 1", - "Query": "select user_extra.Id from user_extra", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "syntax error", - "query": "the quick brown fox", - "plan": "syntax error at position 4 near 'the'" - }, - { - "comment": "Hex number is not treated as a simple value", - "query": "select * from user where id = 0x04", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 0x04", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 0x04", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where id = 0x04", + "Original": "select * from user where (id = 4 and name ='abc') limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1684,10 +1077,10 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 0x04", + "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", "Table": "`user`", "Values": [ - "VARBINARY(\"\\x04\")" + "INT64(4)" ], "Vindex": "user_index" }, @@ -1697,85 +1090,11 @@ } }, { - "comment": "sharded limit offset", - "query": "select user_id from music order by user_id limit 10, 20", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_id from music order by user_id limit 10, 20", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(20)", - "Offset": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_id from music order by user_id limit 10, 20", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(20)", - "Offset": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "Sharding Key Condition in Parenthesis", - "query": "select * from user where name ='abc' AND (id = 4) limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where name ='abc' AND (id = 4) limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "comment": "Column Aliasing with Table.Column", + "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where name ='abc' AND (id = 4) limit 5", + "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1783,11 +1102,11 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where `name` = 'abc' and id = 4 limit 5", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" }, @@ -1797,11 +1116,11 @@ } }, { - "comment": "Multiple parenthesized expressions", - "query": "select * from user where (id = 4) AND (name ='abc') limit 5", - "v3-plan": { + "comment": "Column Aliasing with Column", + "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", + "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1809,18 +1128,25 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Booleans and parenthesis", + "query": "select * from user where (id = 1) AND name = true limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4) AND (name ='abc') limit 5", + "Original": "select * from user where (id = 1) AND name = true limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1829,10 +1155,10 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Query": "select * from `user` where id = 1 and `name` = true limit 5", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" }, @@ -1842,11 +1168,11 @@ } }, { - "comment": "Multiple parenthesized expressions", - "query": "select * from user where (id = 4 and name ='abc') limit 5", - "v3-plan": { + "comment": "Column as boolean-ish", + "query": "select * from user where (id = 1) AND name limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4 and name ='abc') limit 5", + "Original": "select * from user where (id = 1) AND name limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1855,17 +1181,24 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Query": "select * from `user` where id = 1 and `name` limit 5", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(1)" ], "Vindex": "user_index" - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "PK as fake boolean, and column as boolean-ish", + "query": "select * from user where (id = 5) AND name = true limit 5", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where (id = 4 and name ='abc') limit 5", + "Original": "select * from user where (id = 5) AND name = true limit 5", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -1874,10 +1207,10 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 4 and `name` = 'abc' limit 5", + "Query": "select * from `user` where id = 5 and `name` = true limit 5", "Table": "`user`", "Values": [ - "INT64(4)" + "INT64(5)" ], "Vindex": "user_index" }, @@ -1887,280 +1220,20 @@ } }, { - "comment": "Column Aliasing with Table.Column", - "query": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", - "v3-plan": { + "comment": "top level subquery in select", + "query": "select a, (select col from user) from unsharded", + "plan": { "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", + "Original": "select a, (select col from user) from unsharded", "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by user0_.col desc limit 2", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 2", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Column Aliasing with Column", - "query": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user0_.col as col0_ from user user0_ where id = 1 order by col0_ desc limit 3", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Booleans and parenthesis", - "query": "select * from user where (id = 1) AND name = true limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Column as boolean-ish", - "query": "select * from user where (id = 1) AND name limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 1) AND name limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 and `name` limit 5", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "PK as fake boolean, and column as boolean-ish", - "query": "select * from user where (id = 5) AND name = true limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 5) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from user where (id = 5) AND name = true limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 5 and `name` = true limit 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "top level subquery in select", - "query": "select a, (select col from user) from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select a, (select col from user) from unsharded", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a, :__sq1 from unsharded where 1 != 1", - "Query": "select a, :__sq1 from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select a, (select col from user) from unsharded", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" + "OperatorType": "Subquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2172,6 +1245,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -2193,18 +1267,18 @@ { "comment": "sub-expression subquery in select", "query": "select a, 1+(select col from user) from unsharded", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select a, 1+(select col from user) from unsharded", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutValue", "PulloutVars": [ - "__sq_has_values1", "__sq1" ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -2216,6 +1290,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { @@ -2227,17 +1302,24 @@ "Table": "unsharded" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "select * from derived table expands specific columns", + "query": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", + "plan": { "QueryType": "SELECT", - "Original": "select a, 1+(select col from user) from unsharded", + "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", @@ -2246,114 +1328,20 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", + "FieldQuery": "select t.id1 from (select `user`.id as id1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id1 from (select `user`.id as id1 from `user`) as t", "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Unsharded", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select a, 1 + :__sq1 from unsharded where 1 != 1", - "Query": "select a, 1 + :__sq1 from unsharded", - "Table": "unsharded" - } - ] - }, - "TablesUsed": [ - "main.unsharded", - "user.user" - ] - } - }, - { - "comment": "select * from derived table expands specific columns", - "query": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1", - "Query": "select `user`.id as id1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1", - "Query": "select user_extra.id as id2 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from (select user.id id1, user_extra.id id2 from user join user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id as id1 from `user` where 1 != 1", - "Query": "select `user`.id as id1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id as id2 from user_extra where 1 != 1", - "Query": "select user_extra.id as id2 from user_extra", - "Table": "user_extra" - } - ] + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id2 from (select user_extra.id as id2 from user_extra where 1 != 1) as t where 1 != 1", + "Query": "select t.id2 from (select user_extra.id as id2 from user_extra) as t", + "Table": "user_extra" } ] }, @@ -2366,38 +1354,17 @@ { "comment": "duplicate columns not allowed in derived table", "query": "select * from (select user.id, user_extra.id from user join user_extra) as t", - "v3-plan": "VT12001: unsupported: duplicate column names in subquery: id", - "gen4-plan": "Duplicate column name 'id'" + "plan": "Duplicate column name 'id'" }, { "comment": "non-existent symbol in cross-shard derived table", "query": "select t.col from (select user.id from user join user_extra) as t", - "v3-plan": "VT03019: symbol t.col not found", - "gen4-plan": "symbol t.col not found" + "plan": "column 't.col' not found" }, { "comment": "union with the same target shard", "query": "select * from music where user_id = 1 union select * from user where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from music where user_id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", - "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from music where user_id = 1 union select * from user where id = 1", "Instructions": { @@ -2409,7 +1376,7 @@ }, "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", + "Table": "`user`, music", "Values": [ "INT64(1)" ], @@ -2424,26 +1391,7 @@ { "comment": "union with the same target shard last_insert_id", "query": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1", - "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select *, last_insert_id() from music where user_id = 1 union select * from user where id = 1", "Instructions": { @@ -2455,7 +1403,7 @@ }, "FieldQuery": "select *, :__lastInsertId as `last_insert_id()` from music where 1 != 1 union select * from `user` where 1 != 1", "Query": "select *, :__lastInsertId as `last_insert_id()` from music where user_id = 1 union select * from `user` where id = 1", - "Table": "music", + "Table": "`user`, music", "Values": [ "INT64(1)" ], @@ -2470,22 +1418,7 @@ { "comment": "unsharded union in derived table", "query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1", - "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) a", "Instructions": { @@ -2507,22 +1440,7 @@ { "comment": "unsharded union in subquery", "query": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id, `name` from unsharded where 1 != 1", - "Query": "select id, `name` from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, name from unsharded where id in (select id from unsharded where id = 1 union select id from unsharded where id = 3)", "Instructions": { @@ -2544,22 +1462,7 @@ { "comment": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", "query": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto order by id asc limit 5", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded) union (select id from unsharded_auto) order by id limit 5", "Instructions": { @@ -2582,22 +1485,7 @@ { "comment": "unsharded union", "query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto where id in (132)", "Instructions": { @@ -2620,22 +1508,7 @@ { "comment": "unsharded nested union", "query": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1 union select id from unsharded_auto where 1 != 1 union select id from unsharded_auto where 1 != 1 union select `name` from unsharded where 1 != 1", - "Query": "select id from unsharded union select id from unsharded_auto union select id from unsharded_auto union select `name` from unsharded", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded union select id from unsharded_auto) union (select id from unsharded_auto union select name from unsharded)", "Instructions": { @@ -2658,22 +1531,7 @@ { "comment": "unsharded nested union with limit", "query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "(select id from unsharded where 1 != 1) union (select id from unsharded where 1 != 1)", - "Query": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from unsharded order by id asc limit 1) union (select id from unsharded order by id desc limit 1) order by id asc limit 1", "Instructions": { @@ -2695,23 +1553,7 @@ { "comment": "routing rules: ensure directives are not lost", "query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded as route2 where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from unsharded as route2", - "QueryTimeout": 1000, - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from route2", "Instructions": { @@ -2734,22 +1576,7 @@ { "comment": "testing SingleRow Projection", "query": "select 42", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(42) as 42" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42", "Instructions": { @@ -2771,22 +1598,7 @@ { "comment": "don't filter on the vtgate", "query": "select 42 from dual where false", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42 from dual where false", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 42 from dual where 1 != 1", - "Query": "select 42 from dual where false", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42 from dual where false", "Instructions": { @@ -2808,22 +1620,7 @@ { "comment": "testing SingleRow Projection with arithmetics", "query": "select 42+2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42+2", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(44) as 42 + 2" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42+2", "Instructions": { @@ -2845,26 +1642,7 @@ { "comment": "sql_calc_found_rows without limit", "query": "select sql_calc_found_rows * from music where user_id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music where user_id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where user_id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music where user_id = 1", "Instructions": { @@ -2890,51 +1668,7 @@ { "comment": "sql_calc_found_rows with limit", "query": "select sql_calc_found_rows * from music limit 100", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music limit 100", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(100)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from music where 1 != 1", - "Query": "select count(*) from music", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music limit 100", "Instructions": { @@ -2985,46 +1719,7 @@ { "comment": "sql_calc_found_rows with SelectEqualUnique plans", "query": "select sql_calc_found_rows * from music where user_id = 1 limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where user_id = 1 limit 2", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from music where 1 != 1", - "Query": "select count(*) from music where user_id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows * from music where user_id = 1 limit 2", "Instructions": { @@ -3070,53 +1765,7 @@ { "comment": "sql_calc_found_rows with group by and having", "query": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", - "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id", - "OrderBy": "(0|2) ASC", - "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit", - "ResultColumns": 2, - "Table": "music" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from (select user_id, count(id) from music where 1 != 1 group by user_id) as t where 1 != 1", - "Query": "select count(*) from (select user_id, count(id) from music group by user_id having count(user_id) = 1) as t", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sql_calc_found_rows user_id, count(id) from music group by user_id having count(user_id) = 1 order by user_id limit 2", "Instructions": { @@ -3169,34 +1818,17 @@ { "comment": "sql_calc_found_rows in sub queries", "query": "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)", - "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", - "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" }, { "comment": "sql_calc_found_rows in derived table", "query": "select sql_calc_found_rows * from (select sql_calc_found_rows * from music limit 10) t limit 1", - "v3-plan": "VT03008: incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", - "gen4-plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" + "plan": "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'" }, { "comment": "select from unsharded keyspace into dumpfile", "query": "select * from main.unsharded into Dumpfile 'x.txt'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into Dumpfile 'x.txt'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into dumpfile 'x.txt'", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into Dumpfile 'x.txt'", "Instructions": { @@ -3218,22 +1850,7 @@ { "comment": "select from unsharded keyspace into outfile", "query": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n'", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into outfile 'x.txt' character set binary fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n'", "Instructions": { @@ -3255,22 +1872,7 @@ { "comment": "select from unsharded keyspace into outfile s3", "query": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\\n' manifest on overwrite off", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from main.unsharded into outfile s3 'out_file_name' character set binary format csv header fields terminated by 'term' optionally enclosed by 'c' escaped by 'e' lines starting by 'a' terminated by '\n' manifest on overwrite off", "Instructions": { @@ -3307,26 +1909,7 @@ { "comment": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", "query": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select (select u.id from `user` as u where 1 != 1), a.id from `user` as a where 1 != 1", - "Query": "select (select u.id from `user` as u where u.id = 1), a.id from `user` as a where a.id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select u.id from user as u where u.id = 1), a.id from user as a where a.id = 1", "Instructions": { @@ -3352,41 +1935,7 @@ { "comment": "Add two tables with the same column in a join", "query": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`, user_extra_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id where 1 != 1", - "Query": "select t.id, s.id from `user` as t join user_extra as s on t.id = s.user_id", - "Table": "`user`, user_extra" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from unsharded where 1 != 1", - "Query": "select 1 from unsharded", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.id, s.id from user t join user_extra s on t.id = s.user_id join unsharded", "Instructions": { @@ -3429,22 +1978,7 @@ { "comment": "((((select 1))))", "query": "((((select 1))))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "((((select 1))))", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "INT64(1) as 1" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "((((select 1))))", "Instructions": { @@ -3466,41 +2000,7 @@ { "comment": "Merging dual with user", "query": "select 42, id from dual, user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 42, id from dual, user", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "dual_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 42 from dual where 1 != 1", - "Query": "select 42 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 42, id from dual, user", "Instructions": { @@ -3523,70 +2023,7 @@ { "comment": "select (select col from user limit 1) as a from user join user_extra order by a", "query": "select (select col from user limit 1) as a from user join user_extra order by a", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select col from user limit 1) as a from user join user_extra order by a", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select (select col from user limit 1) as a from user join user_extra order by a", "Instructions": { @@ -3597,6 +2034,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -3614,6 +2052,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:0", @@ -3655,76 +2094,7 @@ { "comment": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 as a from `user` where 1 != 1", - "Query": "select :__sq1 as a from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "Instructions": { @@ -3741,6 +2111,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(1)", "Inputs": [ @@ -3758,6 +2129,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:0", @@ -3798,1212 +2170,21 @@ } }, { - "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", - "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", - "plan": "VT12001: unsupported: cross-shard correlated subquery" - }, - { - "comment": "plan test for a natural character set string", - "query": "select N'string' from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select N'string' from dual", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "VARCHAR(\"string\") as N'string'" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select N'string' from dual", - "Instructions": { - "OperatorType": "Projection", - "Expressions": [ - "VARCHAR(\"string\") as N'string'" - ], - "Inputs": [ - { - "OperatorType": "SingleRow" - } - ] - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "select expression having dependencies on both sides of a join", - "query": "select user.id * user_id as amount from user, user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id * user_id as amount from user, user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1", - "Query": "select :user_id * user_id as amount from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.id * user_id as amount from user, user_extra", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_id": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1", - "Query": "select :user_id * user_id as amount from user_extra", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery in exists clause", - "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", - "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "user_id": 0 - }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, col from `user` where 1 != 1", - "Query": "select `user`.id, col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", - "Table": "user_extra", - "Values": [ - "INT64(3)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery in exists clause with an order by", - "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", - "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "user_id": 0 - }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, col from `user` where 1 != 1", - "OrderBy": "1 ASC", - "Query": "select `user`.id, col from `user` order by col asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", - "Table": "user_extra", - "Values": [ - "INT64(3)" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery having dependencies on two tables", - "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", - "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "u1_col": 0, - "u2_col": 1 - }, - "ProjectedIndexes": "-3", - "TableName": "`user`_`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1", - "Query": "select u1.col, 1 from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", - "Query": "select u2.col from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue where ue.col = :u1_col and ue.col = :u2_col", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery using a column twice", - "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", - "Instructions": { - "OperatorType": "SemiJoin", - "JoinVars": { - "u_col": 0 - }, - "ProjectedIndexes": "-2", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1", - "Query": "select u.col, 1 from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue where ue.col = :u_col and ue.col2 = :u_col", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "correlated subquery part of an OR clause", - "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", - "v3-plan": "VT12001: unsupported: cross-shard correlated subquery", - "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" - }, - { - "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", - "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1", - "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id)", - "Table": "music, `user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music, `user` where 1 != 1", - "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id", - "Table": "`user`, music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.music", - "user.user" - ] - } - }, - { - "comment": "union as a derived table", - "query": "select found from (select id as found from user union all (select id from unsharded)) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as found from `user` where 1 != 1", - "Query": "select id as found from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1", - "Query": "select id from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id as found from `user` where 1 != 1", - "Query": "select id as found from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select id from unsharded where 1 != 1", - "Query": "select id from unsharded", - "Table": "unsharded" - } - ] - } - ] - }, - "TablesUsed": [ - "main.unsharded", - "user.user" - ] - } - }, - { - "comment": "use output column containing data from both sides of the join", - "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_col": 0, - "user_id": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col, `user`.id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col + :user_col from user_extra where 1 != 1", - "Query": "select user_extra.col + :user_col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "user_extra_col": 1, - "user_extra_id": 0 - }, - "TableName": "user_extra_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id, user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.id, user_extra.col from user_extra", - "Table": "user_extra" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1", - "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id", - "Table": "`user`", - "Values": [ - ":user_extra_id" - ], - "Vindex": "user_index" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "mergeable derived table with order by and limit", - "query": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id", - "plan": { - "QueryType": "SELECT", - "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1", - "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id", - "Table": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - } - }, - { - "comment": "mergeable derived table with group by and limit", - "query": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id", - "plan": { - "QueryType": "SELECT", - "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1", - "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id", - "Table": "unsharded" - }, - "TablesUsed": [ - "main.unsharded" - ] - } - }, - { - "comment": "select user.id, trim(leading 'x' from user.name) from user", - "query": "select user.id, trim(leading 'x' from user.name) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user.id, trim(leading 'x' from user.name) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user.id, trim(leading 'x' from user.name) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "json utility functions", - "query": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1", - "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1", - "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "dual query with exists clause", - "query": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where information_schema.`TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and information_schema.`TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)", - "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]", - "SysTableTableSchema": "[VARCHAR(\"mysql\")]", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME and `TABLES`.TABLE_SCHEMA = :__vtschemaname limit 1)", - "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]", - "SysTableTableSchema": "[VARCHAR(\"mysql\")]", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "json_quote, json_object and json_array", - "query": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1", - "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual where 1 != 1", - "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, CURTIME()) from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "select (select id from user order by id limit 1) from user_extra", - "query": "select (select id from user order by id limit 1) from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select (select id from user order by id limit 1) from user_extra", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from user_extra where 1 != 1", - "Query": "select :__sq1 from user_extra", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select (select id from user order by id limit 1) from user_extra", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq1 from user_extra where 1 != 1", - "Query": "select :__sq1 from user_extra", - "Table": "user_extra" - } - ] - }, - "TablesUsed": [ - "user.user", - "user.user_extra" - ] - } - }, - { - "comment": "yeah, it does not make sense, but it's valid", - "query": "select exists(select 1) from user where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select 1) from user where id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual limit 1", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :__sq_has_values1 from `user` where 1 != 1", - "Query": "select :__sq_has_values1 from `user` where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select exists(select 1) from user where id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1", - "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "main.dual", - "user.user" - ] - } - }, - { - "comment": "json schema validation functions", - "query": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "json search functions", - "query": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1", - "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1", - "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "Json extract and json unquote shorthands", - "query": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1", - "Query": "select a -> '$[4]', a ->> '$[3]' from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1", - "Query": "select a -> '$[4]', a ->> '$[3]' from `user`", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "groupe by with non aggregated columns and table alias", - "query": "select u.id, u.age from user u group by u.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id, u.age from user u group by u.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id", - "Query": "select u.id, u.age from `user` as u group by u.id", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.id, u.age from user u group by u.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id", - "Query": "select u.id, u.age from `user` as u group by u.id", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" - ] - } - }, - { - "comment": "Functions that return JSON value attributes", - "query": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1", - "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1", - "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "Json array functions", - "query": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1", - "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1", - "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "Json merge functions", - "query": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1", - "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1", - "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "JSON modifier functions", - "query": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1", - "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual", - "Table": "dual" + "comment": "plan test for a natural character set string", + "query": "select N'string' from dual", + "plan": { + "QueryType": "SELECT", + "Original": "select N'string' from dual", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "VARCHAR(\"string\") as N'string'" + ], + "Inputs": [ + { + "OperatorType": "SingleRow" + } + ] }, "TablesUsed": [ "main.dual" @@ -5011,297 +2192,172 @@ } }, { - "comment": "Reference with a subquery which can be merged", - "query": "select exists(select id from user where id = 4)", - "v3-plan": { + "comment": "select expression having dependencies on both sides of a join", + "query": "select user.id * user_id as amount from user, user_extra", + "plan": { "QueryType": "SELECT", - "Original": "select exists(select id from user where id = 4)", + "Original": "select user.id * user_id as amount from user, user_extra", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_user_extra", "Inputs": [ { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id = 4 limit 1", - "Table": "`user`", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "Reference", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" + "FieldQuery": "select :user_id * user_id as amount from user_extra where 1 != 1", + "Query": "select :user_id * user_id as amount from user_extra", + "Table": "user_extra" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select exists(select id from user where id = 4)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual", - "Table": "dual", - "Values": [ - "INT64(4)" - ], - "Vindex": "user_index" }, "TablesUsed": [ - "main.dual", - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "Reference with a subquery which cannot be merged", - "query": "select exists(select * from user)", - "v3-plan": { + "comment": "correlated subquery in exists clause", + "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", + "plan": { "QueryType": "SELECT", - "Original": "select exists(select * from user)", + "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "SemiJoin", + "JoinVars": { + "user_id": 0 + }, + "ProjectedIndexes": "-2", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] - }, - { + "InputName": "Outer", "OperatorType": "Route", - "Variant": "Reference", + "Variant": "Scatter", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` limit :__upper_limit", - "Table": "`user`" - } - ] + "FieldQuery": "select `user`.id, col from `user` where 1 != 1", + "Query": "select `user`.id, col from `user`", + "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "Reference", + "Variant": "EqualUnique", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", + "Table": "user_extra", + "Values": [ + "INT64(3)" + ], + "Vindex": "user_index" } ] }, "TablesUsed": [ - "main.dual", - "user.user" - ] - } - }, - { - "comment": "insert function not requiring any table", - "query": "select insert('Quadratic', 3, 4, 'What')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select insert('Quadratic', 3, 4, 'What')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1", - "Query": "select insert('Quadratic', 3, 4, 'What') from dual", - "Table": "dual" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select insert('Quadratic', 3, 4, 'What')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1", - "Query": "select insert('Quadratic', 3, 4, 'What') from dual", - "Table": "dual" - }, - "TablesUsed": [ - "main.dual" - ] - } - }, - { - "comment": "insert function using column names as arguments", - "query": "select insert(tcol1, id, 3, tcol2) from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select insert(tcol1, id, 3, tcol2) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1", - "Query": "select insert(tcol1, id, 3, tcol2) from `user`", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select insert(tcol1, id, 3, tcol2) from user", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1", - "Query": "select insert(tcol1, id, 3, tcol2) from `user`", - "Table": "`user`" - }, - "TablesUsed": [ - "user.user" + "user.user", + "user.user_extra" ] } }, { - "comment": "gtid functions", - "query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1", - "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "comment": "correlated subquery in exists clause with an order by", + "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", + "plan": { "QueryType": "SELECT", - "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", + "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false + "OperatorType": "SemiJoin", + "JoinVars": { + "user_id": 0 }, - "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1", - "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual", - "Table": "dual" + "ProjectedIndexes": "-2", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, col from `user` where 1 != 1", + "OrderBy": "1 ASC", + "Query": "select `user`.id, col from `user` order by col asc", + "Table": "`user`" + }, + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_id = 3 and user_id < :user_id", + "Table": "user_extra", + "Values": [ + "INT64(3)" + ], + "Vindex": "user_index" + } + ] }, "TablesUsed": [ - "main.dual" + "user.user", + "user.user_extra" ] } }, { - "comment": "Predicate in apply join which is merged", - "query": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", - "v3-plan": { + "comment": "correlated subquery having dependencies on two tables", + "query": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", + "Original": "select 1 from user u1, user u2 where exists (select 1 from user_extra ue where ue.col = u1.col and ue.col = u2.col)", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "OperatorType": "SemiJoin", "JoinVars": { - "user_extra_user_id": 1 + "u1_col": 0, + "u2_col": 1 }, - "TableName": "`user`_user_extra_user_metadata", + "ProjectedIndexes": "-3", + "TableName": "`user`_`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", + "JoinColumnIndexes": "L:0,R:0,L:1", + "TableName": "`user`_`user`", "Inputs": [ { "OperatorType": "Route", @@ -5310,8 +2366,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'", + "FieldQuery": "select u1.col, 1 from `user` as u1 where 1 != 1", + "Query": "select u1.col, 1 from `user` as u1", "Table": "`user`" }, { @@ -5321,715 +2377,690 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", - "Query": "select user_extra.user_id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" + "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", + "Query": "select u2.col from `user` as u2", + "Table": "`user`" } ] }, { + "InputName": "SubQuery", "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select user_metadata.user_id from user_metadata where 1 != 1", - "Query": "select user_metadata.user_id from user_metadata where user_metadata.user_id = :user_extra_user_id", - "Table": "user_metadata", - "Values": [ - ":user_extra_user_id" - ], - "Vindex": "user_index" + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue where ue.col = :u1_col /* INT16 */ and ue.col = :u2_col /* INT16 */", + "Table": "user_extra" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "correlated subquery using a column twice", + "query": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", + "plan": { "QueryType": "SELECT", - "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", + "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", + "OperatorType": "SemiJoin", "JoinVars": { - "user_col": 0 + "u_col": 0 }, - "TableName": "`user`_user_extra, user_metadata", + "ProjectedIndexes": "-2", + "TableName": "`user`_user_extra", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'", + "FieldQuery": "select u.col, 1 from `user` as u where 1 != 1", + "Query": "select u.col, 1 from `user` as u", "Table": "`user`" }, { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1", - "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id", - "Table": "user_extra, user_metadata" + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue where ue.col = :u_col /* INT16 */ and ue.col2 = :u_col", + "Table": "user_extra" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "correlated subquery that is dependent on one side of a join, fully mergeable", + "query": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM music INNER JOIN user ON music.user_id = user.id WHERE music.user_id = 5 AND music.id = (SELECT MAX(m2.id) FROM music m2 WHERE m2.user_id = user.id)", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music, `user` where 1 != 1", + "Query": "select music.id from music, `user` where music.user_id = 5 and music.id = (select max(m2.id) from music as m2 where m2.user_id = `user`.id) and music.user_id = `user`.id", + "Table": "`user`, music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "union as a derived table", + "query": "select found from (select id as found from user union all (select id from unsharded)) as t", + "plan": { + "QueryType": "SELECT", + "Original": "select found from (select id as found from user union all (select id from unsharded)) as t", + "Instructions": { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id as found from `user` where 1 != 1", + "Query": "select id as found from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select id from unsharded where 1 != 1", + "Query": "select id from unsharded", + "Table": "unsharded" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "use output column containing data from both sides of the join", + "query": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", + "plan": { + "QueryType": "SELECT", + "Original": "select user_extra.col + user.col from user join user_extra on user.id = user_extra.id", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_extra_col": 0, + "user_extra_id": 1 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.col, user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.col, user_extra.id from user_extra", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :user_extra_col + `user`.col from `user` where 1 != 1", + "Query": "select :user_extra_col + `user`.col from `user` where `user`.id = :user_extra_id", + "Table": "`user`", + "Values": [ + ":user_extra_id" + ], + "Vindex": "user_index" } ] }, "TablesUsed": [ - "user.user", - "user.user_extra", - "user.user_metadata" + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "mergeable derived table with order by and limit", + "query": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from (select col from main.unsharded order by main.unsharded.col1 desc limit 12 offset 0) as f left join unsharded as u on f.col = u.id", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from (select col from unsharded where 1 != 1) as f left join unsharded as u on f.col = u.id where 1 != 1", + "Query": "select 1 from (select col from unsharded order by unsharded.col1 desc limit 0, 12) as f left join unsharded as u on f.col = u.id", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "mergeable derived table with group by and limit", + "query": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from (select col, count(*) as a from main.unsharded group by col having a > 0 limit 12 offset 0) as f left join unsharded as u on f.col = u.id", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1", + "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" ] } }, { - "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates", - "query": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", - "v3-plan": { + "comment": "select user.id, trim(leading 'x' from user.name) from user", + "query": "select user.id, trim(leading 'x' from user.name) from user", + "plan": { "QueryType": "SELECT", - "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", + "Original": "select user.id, trim(leading 'x' from user.name) from user", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where 1 != 1", - "Query": "select `user`.id from `user` join music_extra on `user`.id = music_extra.user_id join music on music_extra.user_id = music.user_id where `user`.id = 123 and music.id = 456", - "Table": "`user`, music_extra, music", - "Values": [ - "INT64(123)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "FieldQuery": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user` where 1 != 1", + "Query": "select `user`.id, trim(leading 'x' from `user`.`name`) from `user`", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "json utility functions", + "query": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", + "plan": { "QueryType": "SELECT", - "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", + "Original": "select jcol, JSON_STORAGE_SIZE(jcol), JSON_STORAGE_FREE(jcol), JSON_PRETTY(jcol) from user", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1", - "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id", - "Table": "`user`, music, music_extra", - "Values": [ - "INT64(123)" - ], - "Vindex": "user_index" + "FieldQuery": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user` where 1 != 1", + "Query": "select jcol, json_storage_size(jcol), json_storage_free(jcol), json_pretty(jcol) from `user`", + "Table": "`user`" }, "TablesUsed": [ - "user.music", - "user.music_extra", "user.user" ] } }, { - "comment": "SQL_CALC_FOUND_ROWS with vindex lookup", - "query": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", - "v3-plan": { + "comment": "dual query with exists clause", + "query": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", + "plan": { "QueryType": "SELECT", - "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", + "Original": "select 1 from dual where exists (select 1 from information_schema.TABLES where information_schema.TABLES.TABLE_NAME = 'proc' and information_schema.TABLES.TABLE_SCHEMA = 'mysql')", "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(2)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit", - "ResultColumns": 2, - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where `name` = 'aa'", - "Table": "`user`", - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 from dual where 1 != 1", + "Query": "select 1 from dual where exists (select 1 from information_schema.`TABLES` where `TABLES`.TABLE_NAME = :TABLES_TABLE_NAME /* VARCHAR */ and `TABLES`.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */ limit 1)", + "SysTableTableName": "[TABLES_TABLE_NAME:VARCHAR(\"proc\")]", + "SysTableTableSchema": "[VARCHAR(\"mysql\")]", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "json_quote, json_object and json_array", + "query": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", + "plan": { "QueryType": "SELECT", - "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", + "Original": "SELECT JSON_QUOTE('null'), JSON_QUOTE('\"null\"'), JSON_OBJECT(BIN(1),2,'abc',ASCII(4)), JSON_ARRAY(1, \"abc\", NULL, TRUE, CURTIME())", "Instructions": { - "OperatorType": "SQL_CALC_FOUND_ROWS", + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, curtime()) from dual where 1 != 1", + "Query": "select json_quote('null'), json_quote('\\\"null\\\"'), json_object(BIN(1), 2, 'abc', ASCII(4)), json_array(1, 'abc', null, true, curtime()) from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "select (select id from user order by id limit 1) from user_extra", + "query": "select (select id from user order by id limit 1) from user_extra", + "plan": { + "QueryType": "SELECT", + "Original": "select (select id from user order by id limit 1) from user_extra", + "Instructions": { + "OperatorType": "Subquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", - "Count": "INT64(2)", + "Count": "INT64(1)", "Inputs": [ { - "OperatorType": "VindexLookup", - "Variant": "Equal", + "OperatorType": "Route", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", - "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", - "Table": "name_user_vdx", - "Values": [ - "::name" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|2) ASC", - "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit", - "ResultColumns": 2, - "Table": "`user`" - } - ] + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "ResultColumns": 1, + "Table": "`user`" } ] }, { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS count(*)", - "Inputs": [ - { - "OperatorType": "VindexLookup", - "Variant": "Equal", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Values": [ - "VARCHAR(\"aa\")" - ], - "Vindex": "name_user_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", - "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", - "Table": "name_user_vdx", - "Values": [ - "::name" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) from `user` where 1 != 1", - "Query": "select count(*) from `user` where `name` = 'aa'", - "Table": "`user`" - } - ] - } - ] + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :__sq1 from user_extra where 1 != 1", + "Query": "select :__sq1 from user_extra", + "Table": "user_extra" } ] }, "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "yeah, it does not make sense, but it's valid", + "query": "select exists(select 1) from user where id = 5", + "plan": { + "QueryType": "SELECT", + "Original": "select exists(select 1) from user where id = 5", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select exists (select 1 from dual where 1 != 1) from `user` where 1 != 1", + "Query": "select exists (select 1 from dual limit 1) from `user` where id = 5", + "Table": "`user`", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "main.dual", "user.user" ] } }, { - "comment": "`None` route being merged with another route via join predicate on Vindex columns", - "query": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", - "v3-plan": { + "comment": "json schema validation functions", + "query": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", + "Original": "SELECT JSON_SCHEMA_VALID('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"'), JSON_SCHEMA_VALIDATION_REPORT('{\"type\":\"string\",\"pattern\":\"(\"}', '\"abc\"')", "Instructions": { "OperatorType": "Route", - "Variant": "None", + "Variant": "Reference", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select music.id from music join `user` on music.user_id = `user`.id where 1 != 1", - "Query": "select music.id from music join `user` on music.user_id = `user`.id where music.user_id in (null) and `user`.id = 5", - "Table": "music, `user`" - } - }, - "gen4-plan": { + "FieldQuery": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual where 1 != 1", + "Query": "select json_schema_valid('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"'), json_schema_validation_report('{\\\"type\\\":\\\"string\\\",\\\"pattern\\\":\\\"(\\\"}', '\\\"abc\\\"') from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "json search functions", + "query": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", + "Original": "SELECT JSON_CONTAINS('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', '1'), JSON_CONTAINS_PATH('{\"a\": 1, \"b\": 2, \"c\": {\"d\": 4}}', 'one', '$.a', '$.e'), JSON_EXTRACT('[10, 20, [30, 40]]', '$[1]'), JSON_UNQUOTE(JSON_EXTRACT('[\"a\",\"b\"]', '$[1]')), JSON_KEYS('{\"a\": 1, \"b\": {\"c\": 30}}'), JSON_OVERLAPS(\"[1,3,5,7]\", \"[2,5,7]\"), JSON_SEARCH('[\"abc\"]', 'one', 'abc'), JSON_VALUE('{\"fname\": \"Joe\", \"lname\": \"Palmer\"}', '$.fname'), JSON_ARRAY(4,5) MEMBER OF('[[3,4],[4,5]]')", "Instructions": { "OperatorType": "Route", - "Variant": "None", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual where 1 != 1", + "Query": "select json_contains('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', '1'), json_contains_path('{\\\"a\\\": 1, \\\"b\\\": 2, \\\"c\\\": {\\\"d\\\": 4}}', 'one', '$.a', '$.e'), json_extract('[10, 20, [30, 40]]', '$[1]'), json_unquote(json_extract('[\\\"a\\\",\\\"b\\\"]', '$[1]')), json_keys('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}'), json_overlaps('[1,3,5,7]', '[2,5,7]'), json_search('[\\\"abc\\\"]', 'one', 'abc'), json_value('{\\\"fname\\\": \\\"Joe\\\", \\\"lname\\\": \\\"Palmer\\\"}', '$.fname'), json_array(4, 5) member of ('[[3,4],[4,5]]') from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "Json extract and json unquote shorthands", + "query": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT a->\"$[4]\", a->>\"$[3]\" FROM user", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, `user` where 1 != 1", - "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id", - "Table": "`user`, music" + "FieldQuery": "select a -> '$[4]', a ->> '$[3]' from `user` where 1 != 1", + "Query": "select a -> '$[4]', a ->> '$[3]' from `user`", + "Table": "`user`" }, "TablesUsed": [ - "user.music", "user.user" ] } }, { - "comment": "Treating single value tuples as `EqualUnique` routes", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "comment": "groupe by with non aggregated columns and table alias", + "query": "select u.id, u.age from user u group by u.id", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", + "Original": "select u.id, u.age from user u group by u.id", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "FieldQuery": "select u.id, u.age from `user` as u where 1 != 1 group by u.id", + "Query": "select u.id, u.age from `user` as u group by u.id", + "Table": "`user`" }, "TablesUsed": [ - "user.music" + "user.user" ] } }, { - "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "comment": "Functions that return JSON value attributes", + "query": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", + "Original": "select JSON_DEPTH('{}'), JSON_LENGTH('{\"a\": 1, \"b\": {\"c\": 30}}', '$.b'), JSON_TYPE(JSON_EXTRACT('{\"a\": [10, true]}', '$.a')), JSON_VALID('{\"a\": 1}')", "Instructions": { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Reference", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" + "FieldQuery": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual where 1 != 1", + "Query": "select json_depth('{}'), json_length('{\\\"a\\\": 1, \\\"b\\\": {\\\"c\\\": 30}}', '$.b'), json_type(json_extract('{\\\"a\\\": [10, true]}', '$.a')), json_valid('{\\\"a\\\": 1}') from dual", + "Table": "dual" }, "TablesUsed": [ - "user.music" + "main.dual" ] } }, { - "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with derived table", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", - "v3-plan": { + "comment": "Json array functions", + "query": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", + "Original": "select JSON_ARRAY_APPEND('{\"a\": 1}', '$', 'z'), JSON_ARRAY_INSERT('[\"a\", {\"b\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), JSON_INSERT('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', CAST('[true, false]' AS JSON))", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select music.id from music where 1 != 1) as _inner where 1 != 1", - "Query": "select * from (select music.id from music where music.user_id in ::__vals) as _inner", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual where 1 != 1", + "Query": "select json_array_append('{\\\"a\\\": 1}', '$', 'z'), json_array_insert('[\\\"a\\\", {\\\"b\\\": [1, 2]}, [3, 4]]', '$[0]', 'x', '$[2][1]', 'y'), json_insert('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', cast('[true, false]' as JSON)) from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "Json merge functions", + "query": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", + "Original": "select JSON_MERGE('[1, 2]', '[true, false]'), JSON_MERGE_PATCH('{\"name\": \"x\"}', '{\"id\": 47}'), JSON_MERGE_PRESERVE('[1, 2]', '{\"id\": 47}')", "Instructions": { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Reference", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" + "FieldQuery": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual where 1 != 1", + "Query": "select json_merge('[1, 2]', '[true, false]'), json_merge_patch('{\\\"name\\\": \\\"x\\\"}', '{\\\"id\\\": 47}'), json_merge_preserve('[1, 2]', '{\\\"id\\\": 47}') from dual", + "Table": "dual" }, "TablesUsed": [ - "user.music" + "main.dual" ] } }, { - "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", - "v3-plan": { + "comment": "JSON modifier functions", + "query": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", + "Original": "select JSON_REMOVE('[1, [2, 3], 4]', '$[1]'), JSON_REPLACE('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_SET('{ \"a\": 1, \"b\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), JSON_UNQUOTE('\"abc\"')", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.foo = 'bar'", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "(INT64(3), INT64(4), INT64(5))" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual where 1 != 1", + "Query": "select json_remove('[1, [2, 3], 4]', '$[1]'), json_replace('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_set('{ \\\"a\\\": 1, \\\"b\\\": [2, 3]}', '$.a', 10, '$.c', '[true, false]'), json_unquote('\\\"abc\\\"') from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "Reference with a subquery which can be merged", + "query": "select exists(select id from user where id = 4)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", + "Original": "select exists(select id from user where id = 4)", "Instructions": { "OperatorType": "Route", - "Variant": "IN", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals", - "Table": "music", + "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", + "Query": "select exists (select 1 from `user` where id = 4 limit 1) from dual", + "Table": "dual", "Values": [ - "(INT64(3), INT64(4), INT64(5))" + "INT64(4)" ], "Vindex": "user_index" }, "TablesUsed": [ - "user.music" + "main.dual", + "user.user" ] } }, { - "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", - "v3-plan": { + "comment": "Reference with a subquery which cannot be merged", + "query": "select exists(select * from user)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", + "Original": "select exists(select * from user)", "Instructions": { "OperatorType": "Subquery", - "Variant": "PulloutIn", + "Variant": "PulloutExists", "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "__sq_has_values1" ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` limit :__upper_limit", + "Table": "`user`" + } + ] }, { + "InputName": "Outer", "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Reference", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", + "Query": "select :__sq_has_values1 from dual", + "Table": "dual" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.dual", + "user.user" + ] + } + }, + { + "comment": "insert function not requiring any table", + "query": "select insert('Quadratic', 3, 4, 'What')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", + "Original": "select insert('Quadratic', 3, 4, 'What')", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Reference", "Keyspace": { - "Name": "user", - "Sharded": true + "Name": "main", + "Sharded": false }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1", + "Query": "select insert('Quadratic', 3, 4, 'What') from dual", + "Table": "dual" }, "TablesUsed": [ - "user.music" + "main.dual" ] } }, { - "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(1), INT64(2), INT64(3))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "comment": "insert function using column names as arguments", + "query": "select insert(tcol1, id, 3, tcol2) from user", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", + "Original": "select insert(tcol1, id, 3, tcol2) from user", "Instructions": { "OperatorType": "Route", "Variant": "Scatter", @@ -6037,191 +3068,291 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5", - "Table": "music" + "FieldQuery": "select insert(tcol1, id, 3, tcol2) from `user` where 1 != 1", + "Query": "select insert(tcol1, id, 3, tcol2) from `user`", + "Table": "`user`" }, "TablesUsed": [ - "user.music" + "user.user" ] } }, { - "comment": "`IN` comparison on Vindex with `None` subquery, as routing predicate", - "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "v3-plan": { + "comment": "gtid functions", + "query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "Original": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23','3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57')", + "Instructions": { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual where 1 != 1", + "Query": "select gtid_subset('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57'), gtid_subtract('3E11FA47-71CA-11E1-9E33-C80AA9429562:23', '3E11FA47-71CA-11E1-9E33-C80AA9429562:21-57') from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } + }, + { + "comment": "Predicate in apply join which is merged", + "query": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", + "plan": { + "QueryType": "SELECT", + "Original": "select user.col, user_metadata.user_id from user join user_extra on user.col = user_extra.col join user_metadata on user_extra.user_id = user_metadata.user_id where user.textcol1 = 'alice@gmail.com'", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_col": 0 + }, + "TableName": "`user`_user_extra, user_metadata", "Inputs": [ { "OperatorType": "Route", - "Variant": "None", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + "FieldQuery": "select `user`.col from `user` where 1 != 1", + "Query": "select `user`.col from `user` where `user`.textcol1 = 'alice@gmail.com'", + "Table": "`user`" }, { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "FieldQuery": "select user_metadata.user_id from user_extra, user_metadata where 1 != 1", + "Query": "select user_metadata.user_id from user_extra, user_metadata where user_extra.col = :user_col and user_extra.user_id = user_metadata.user_id", + "Table": "user_extra, user_metadata" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user", + "user.user_extra", + "user.user_metadata" + ] + } + }, + { + "comment": "Join across multiple tables, with conditions on different vindexes, but mergeable through join predicates", + "query": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "Original": "SELECT user.id FROM user INNER JOIN music_extra ON user.id = music_extra.user_id INNER JOIN music ON music_extra.user_id = music.user_id WHERE user.id = 123 and music.id = 456", "Instructions": { "OperatorType": "Route", - "Variant": "None", + "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5", - "Table": "music" + "FieldQuery": "select `user`.id from `user`, music_extra, music where 1 != 1", + "Query": "select `user`.id from `user`, music_extra, music where music.id = 456 and `user`.id = 123 and `user`.id = music_extra.user_id and music_extra.user_id = music.user_id", + "Table": "`user`, music, music_extra", + "Values": [ + "INT64(123)" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "user.music" + "user.music", + "user.music_extra", + "user.user" ] } }, { - "comment": "`IN` comparison on Vindex with `None` subquery, as non-routing predicate", - "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", - "v3-plan": { + "comment": "SQL_CALC_FOUND_ROWS with vindex lookup", + "query": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", + "Original": "select SQL_CALC_FOUND_ROWS id, name from user where name = 'aa' order by id limit 2", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], + "OperatorType": "SQL_CALC_FOUND_ROWS", "Inputs": [ { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + "OperatorType": "Limit", + "Count": "INT64(2)", + "Inputs": [ + { + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "VARCHAR(\"aa\")" + ], + "Vindex": "name_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|2) ASC", + "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit", + "ResultColumns": 2, + "Table": "`user`" + } + ] + } + ] }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "VARCHAR(\"aa\")" + ], + "Vindex": "name_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) from `user` where 1 != 1", + "Query": "select count(*) from `user` where `name` = 'aa'", + "Table": "`user`" + } + ] + } + ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "`None` route being merged with another route via join predicate on Vindex columns", + "query": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", + "Original": "SELECT `music`.id FROM `music` INNER JOIN `user` ON music.user_id = user.id WHERE music.user_id IN (NULL) AND user.id = 5", "Instructions": { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "None", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", - "Table": "music" + "FieldQuery": "select music.id from music, `user` where 1 != 1", + "Query": "select music.id from music, `user` where music.user_id in (null) and `user`.id = 5 and music.user_id = `user`.id", + "Table": "`user`, music" }, "TablesUsed": [ - "user.music" + "user.music", + "user.user" ] } }, { - "comment": "Mergeable scatter subquery", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", - "v3-plan": { + "comment": "Treating single value tuples as `EqualUnique` routes", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (5)) AND music.user_id = 5", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (5)) and music.user_id = 5", + "Table": "music", + "Values": [ + "INT64(5)" ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop'", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3))", "Instructions": { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')", - "Table": "music" + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in ::__vals)", + "Table": "music", + "Values": [ + "(INT64(1), INT64(2), INT64(3))" + ], + "Vindex": "user_index" }, "TablesUsed": [ "user.music" @@ -6229,61 +3360,25 @@ } }, { - "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1 group by music.id", - "Query": "select music.id from music where music.genre = 'pop' group by music.id", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with derived table", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) _inner)", "Instructions": { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "IN", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)", - "Table": "music" + "Query": "select music.id from music where music.id in (select _inner.id from (select music.id from music where music.user_id in ::__vals) as _inner)", + "Table": "music", + "Values": [ + "(INT64(1), INT64(2), INT64(3))" + ], + "Vindex": "user_index" }, "TablesUsed": [ "user.music" @@ -6291,57 +3386,25 @@ } }, { - "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", - "v3-plan": "VT12001: unsupported: in scatter query: GROUP BY column must reference column in SELECT list", - "gen4-plan": { + "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, with inner scatter query", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.foo = 'bar') AND music.user_id IN (3, 4, 5)", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.foo = 'bar') and music.user_id in ::__vals", + "Table": "music", + "Values": [ + "(INT64(3), INT64(4), INT64(5))" ], - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "random(0) AS id", - "GroupBy": "(1|2)", - "ResultColumns": 1, - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)", - "OrderBy": "(1|2) ASC", - "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] + "Vindex": "user_index" }, "TablesUsed": [ "user.music" @@ -6349,98 +3412,69 @@ } }, { - "comment": "Unmergeable scatter subquery with LIMIT", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", - "v3-plan": { + "comment": "Subquery with `IN` condition using columns with matching lookup vindexes", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) and music.user_id = 5", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) and music.user_id = 5", + "Table": "music", + "Values": [ + "INT64(5)" ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Subquery with `IN` condition using columns with matching lookup vindexes, but not a top level predicate", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (1, 2, 3)) OR music.user_id = 5", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", - "Table": "music" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (1, 2, 3)) or music.user_id = 5", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "`IN` comparison on Vindex with `None` subquery, as routing predicate", + "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", + "Instructions": { + "OperatorType": "Route", + "Variant": "None", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) and music.user_id = 5", + "Table": "music" }, "TablesUsed": [ "user.music" @@ -6448,69 +3482,43 @@ } }, { - "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", - "v3-plan": { + "comment": "`IN` comparison on Vindex with `None` subquery, as non-routing predicate", + "query": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", + "Original": "SELECT `music`.id FROM `music` WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1 group by music.user_id", - "Query": "select max(music.id) from music where music.user_id in ::__vals group by music.user_id", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.user_id in (null)) or music.user_id = 5", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Mergeable scatter subquery", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop')", "Instructions": { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" + "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop')", + "Table": "music" }, "TablesUsed": [ "user.music" @@ -6518,11 +3526,33 @@ } }, { - "comment": "Unmergeable subquery with `MAX` aggregate", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", - "v3-plan": { + "comment": "Mergeable scatter subquery with `GROUP BY` on unique vindex column", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.id)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select music.id from music where music.genre = 'pop' group by music.id)", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable scatter subquery with `GROUP BY` on-non vindex column", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' GROUP BY music.genre)", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutIn", @@ -6532,28 +3562,29 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "max(0)", + "Variant": "Ordered", + "Aggregates": "any_value(0) AS id", + "GroupBy": "(1|2)", + "ResultColumns": 1, "Inputs": [ { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" + "FieldQuery": "select music.id, music.genre, weight_string(music.genre) from music where 1 != 1 group by music.genre, weight_string(music.genre)", + "OrderBy": "(1|2) ASC", + "Query": "select music.id, music.genre, weight_string(music.genre) from music where music.genre = 'pop' group by music.genre, weight_string(music.genre) order by music.genre asc", + "Table": "music" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -6569,11 +3600,18 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Unmergeable scatter subquery with LIMIT", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.genre = 'pop' LIMIT 10)", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutIn", @@ -6583,28 +3621,25 @@ ], "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "max(0) AS max(music.id)", + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "INT64(10)", "Inputs": [ { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id in ::__vals", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", + "Table": "music" } ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -6627,94 +3662,25 @@ } }, { - "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "comment": "Mergeable subquery with `MAX` aggregate and grouped by unique vindex", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6) GROUP BY music.user_id)", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music where music.id in (select max(music.id) from music where music.user_id in ::__vals group by music.user_id)", + "Table": "music", + "Values": [ + "(INT64(5), INT64(6))" + ], + "Vindex": "user_index" }, "TablesUsed": [ "user.music" @@ -6722,11 +3688,11 @@ } }, { - "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", - "v3-plan": { + "comment": "Unmergeable subquery with `MAX` aggregate", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id IN (5, 6))", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutIn", @@ -6736,21 +3702,30 @@ ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5 limit 10", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" + "InputName": "SubQuery", + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0) AS max(music.id)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(music.id) from music where 1 != 1", + "Query": "select max(music.id) from music where music.user_id in ::__vals", + "Table": "music", + "Values": [ + "(INT64(5), INT64(6))" + ], + "Vindex": "user_index" + } + ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -6766,11 +3741,18 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Mergeable subquery with `MAX` aggregate with `EqualUnique` route operator", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5)", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutIn", @@ -6780,6 +3762,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { @@ -6787,7 +3770,7 @@ "Sharded": true }, "FieldQuery": "select max(music.id) from music where 1 != 1", - "Query": "select max(music.id) from music where music.user_id = 5 limit 10", + "Query": "select max(music.id) from music where music.user_id = 5", "Table": "music", "Values": [ "INT64(5)" @@ -6795,6 +3778,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -6817,11 +3801,11 @@ } }, { - "comment": "Mergeable subquery with multiple levels of derived statements", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { + "comment": "Mergeable subquery with `LIMIT` due to `EqualUnique` route", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", + "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT MAX(music.id) FROM music WHERE music.user_id = 5 LIMIT 10)", "Instructions": { "OperatorType": "Subquery", "Variant": "PulloutIn", @@ -6831,14 +3815,15 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from (select * from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", - "Query": "select * from (select * from (select music.id from music where music.user_id = 5 limit 10) as subquery_for_limit) as subquery_for_limit", + "FieldQuery": "select max(music.id) from music where 1 != 1", + "Query": "select max(music.id) from music where music.user_id = 5 limit 10", "Table": "music", "Values": [ "INT64(5)" @@ -6846,6 +3831,7 @@ "Vindex": "user_index" }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -6861,9 +3847,16 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Mergeable subquery with multiple levels of derived statements", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id = 5 LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { @@ -6889,73 +3882,7 @@ { "comment": "Mergeable subquery with multiple levels of derived statements, using a single value `IN` predicate", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit", - "Table": "music", - "Values": [ - "(INT64(5))" - ], - "Vindex": "user_index" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5) LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { @@ -6981,73 +3908,7 @@ { "comment": "Unmergeable subquery with multiple levels of derived statements, using a multi value `IN` predicate", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in ::__vals limit :__upper_limit", - "Table": "music", - "Values": [ - "(INT64(5), INT64(6))" - ], - "Vindex": "user_index" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music WHERE music.user_id IN (5, 6) LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { @@ -7059,6 +3920,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "SimpleProjection", "Columns": [ 0 @@ -7088,6 +3950,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7112,69 +3975,7 @@ { "comment": "Unmergeable subquery with multiple levels of derived statements", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music limit :__upper_limit", - "Table": "music" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT * FROM (SELECT * FROM (SELECT music.id FROM music LIMIT 10) subquery_for_limit) subquery_for_limit)", "Instructions": { @@ -7186,6 +3987,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "SimpleProjection", "Columns": [ 0 @@ -7206,58 +4008,12 @@ "Query": "select music.id from music limit :__upper_limit", "Table": "music" } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__vals", - "Table": "music", - "Values": [ - "::__sq1" - ], - "Vindex": "music_user_map" - } - ] - }, - "TablesUsed": [ - "user.music" - ] - } - }, - { - "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", - "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" + ] + } + ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -7273,9 +4029,16 @@ "Vindex": "music_user_map" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "`None` subquery as top level predicate - outer query changes from `Scatter` to `None` on merge", + "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL))", "Instructions": { @@ -7297,47 +4060,7 @@ { "comment": "`None` subquery as top level predicate - outer query changes from `EqualUnique` to `None` on merge", "query": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id = 5 and :__sq_has_values1 = 1 and music.id in ::__sq1", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) AND music.user_id = 5", "Instructions": { @@ -7359,43 +4082,7 @@ { "comment": "`None` subquery nested inside `OR` expression - outer query keeps routing information", "query": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "None", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.user_id in (null)", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where :__sq_has_values1 = 1 and music.id in ::__sq1 or music.user_id = 5", - "Table": "music" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music WHERE (music.id IN (SELECT music.id FROM music WHERE music.user_id IN (NULL)) OR music.user_id = 5)", "Instructions": { @@ -7417,48 +4104,7 @@ { "comment": "Joining with a subquery that uses an aggregate column and an `EqualUnique` route can be merged together", "query": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "music_id": 0 - }, - "TableName": "music_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music", - "Table": "music" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select 1 from (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = :music_id", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other ON other.maxt = music.id", "Instructions": { @@ -7468,8 +4114,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select max(id) as maxt from music where music.user_id = 5) as other where other.maxt = music.id", + "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id", "Table": "music", "Values": [ "INT64(5)" @@ -7484,22 +4130,7 @@ { "comment": "Joining with a subquery that uses an `EqualUnique` route can be merged", "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1", - "Query": "select music.id from music join (select id from music where music.user_id = 5) as other on other.id = music.id", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id = 5) other ON other.id = music.id", "Instructions": { @@ -7509,8 +4140,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select id from music where music.user_id = 5) as other where other.id = music.id", + "FieldQuery": "select music.id from (select id from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select id from music where music.user_id = 5) as other, music where other.id = music.id", "Table": "music", "Values": [ "INT64(5)" @@ -7525,22 +4156,7 @@ { "comment": "Joining with a subquery that has an `IN` route can be merged", "query": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music join (select id from music where 1 != 1) as other on other.id = music.id where 1 != 1", - "Query": "select music.id from music join (select id from music where music.user_id in (5, 6, 7)) as other on other.id = music.id", - "Table": "music" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT music.id FROM music INNER JOIN (SELECT id FROM music WHERE music.user_id IN (5, 6, 7)) other ON other.id = music.id", "Instructions": { @@ -7550,8 +4166,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from music, (select id from music where 1 != 1) as other where 1 != 1", - "Query": "select music.id from music, (select id from music where music.user_id in ::__vals) as other where other.id = music.id", + "FieldQuery": "select music.id from (select id from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select id from music where music.user_id in ::__vals) as other, music where other.id = music.id", "Table": "music", "Values": [ "(INT64(5), INT64(6), INT64(7))" @@ -7566,8 +4182,7 @@ { "comment": "limit on the vtgate has to be executed on the LHS of a join", "query": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", "Instructions": { @@ -7580,27 +4195,19 @@ "TableName": "user_extra_`user`", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Limit", + "Count": "INT64(10)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra limit :__upper_limit", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_id from (select user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select user_id from (select user_id from user_extra) as ue limit :__upper_limit", + "Table": "user_extra" } ] }, @@ -7630,31 +4237,30 @@ { "comment": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", "query": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user.a, t.b from user join (select id, count(*) b, req from user_extra group by req, id) as t on user.id = t.id", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,L:1", + "JoinColumnIndexes": "R:0,L:0", "JoinVars": { - "t_id": 0 + "t_id": 1 }, "TableName": "user_extra_`user`", "Inputs": [ { "OperatorType": "SimpleProjection", "Columns": [ - 0, - 1 + 1, + 0 ], "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(1) AS b", - "GroupBy": "(0|3), (2|4)", + "GroupBy": "(2|3), (0|4)", "Inputs": [ { "OperatorType": "Route", @@ -7663,9 +4269,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra where 1 != 1 group by id, weight_string(id), req, weight_string(req)", - "OrderBy": "(0|3) ASC, (2|4) ASC", - "Query": "select id, count(*) as b, req, weight_string(id), weight_string(req) from user_extra group by id, weight_string(id), req, weight_string(req) order by id asc, req asc", + "FieldQuery": "select id, count(*) as b, req, weight_string(req), weight_string(id) from user_extra where 1 != 1 group by req, id, weight_string(req), weight_string(id)", + "OrderBy": "(2|3) ASC, (0|4) ASC", + "Query": "select id, count(*) as b, req, weight_string(req), weight_string(id) from user_extra group by req, id, weight_string(req), weight_string(id) order by req asc, id asc", "Table": "user_extra" } ] @@ -7696,67 +4302,85 @@ } }, { - "comment": "cant switch sides for outer joins", - "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", - "plan": "VT12001: unsupported: LEFT JOIN with derived tables" + "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", + "query": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1", + "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id", + "Table": "music", + "Values": [ + "INT64(5)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music" + ] + } }, { - "comment": "limit on both sides means that we can't evaluate this at all", - "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": "VT12001: unsupported: JOIN between derived tables" + "comment": "Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246", + "query": "SELECT 1 as x, (SELECT x)", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT 1 as x, (SELECT x)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1", + "Query": "select 1 as x, (select x from dual) from dual", + "Table": "dual" + }, + "TablesUsed": [ + "main.dual" + ] + } }, { - "comment": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", - "query": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", - "v3-plan": { + "comment": "(OR 1 = 0) doesn't cause unnecessary scatter", + "query": "select * from user where id = 1 or 1 = 0", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", + "Original": "select * from user where id = 1 or 1 = 0", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "other_maxt": 0 + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "music_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select other.maxt from (select max(id) as maxt from music where 1 != 1) as other where 1 != 1", - "Query": "select other.maxt from (select max(id) as maxt from music where music.user_id = 5) as other", - "Table": "music", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.id = :other_maxt", - "Table": "music", - "Values": [ - ":other_maxt" - ], - "Vindex": "music_user_map" - } - ] - } - }, - "gen4-plan": { + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1", + "Table": "`user`", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "(OR 2 < 1) doesn't cause unnecessary scatter", + "query": "select * from user where id = 1 or 2 < 1", + "plan": { "QueryType": "SELECT", - "Original": "SELECT music.id FROM (SELECT MAX(id) as maxt FROM music WHERE music.user_id = 5) other JOIN music ON other.maxt = music.id", + "Original": "select * from user where id = 1 or 2 < 1", "Instructions": { "OperatorType": "Route", "Variant": "EqualUnique", @@ -7764,62 +4388,74 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select music.id from (select max(id) as maxt from music where 1 != 1) as other, music where 1 != 1", - "Query": "select music.id from (select max(id) as maxt from music where music.user_id = 5) as other, music where other.maxt = music.id", - "Table": "music", + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1", + "Table": "`user`", "Values": [ - "INT64(5)" + "INT64(1)" ], "Vindex": "user_index" }, "TablesUsed": [ - "user.music" + "user.user" ] } }, { - "comment": "Earlier columns are in scope in subqueries https://github.com/vitessio/vitess/issues/11246", - "query": "SELECT 1 as x, (SELECT x)", - "v3-plan": { + "comment": "query with a derived table and dual table in unsharded keyspace", + "query": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", + "plan": { "QueryType": "SELECT", - "Original": "SELECT 1 as x, (SELECT x)", + "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", "Instructions": { "OperatorType": "Route", - "Variant": "Reference", + "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1", - "Query": "select 1 as x, (select x from dual) from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1", + "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1", + "Table": "dual, unsharded_a" + }, + "TablesUsed": [ + "main.dual", + "main.unsharded_a" + ] + } + }, + { + "comment": "subquery having join table on clause, using column reference of outer select table", + "query": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1", + "plan": { "QueryType": "SELECT", - "Original": "SELECT 1 as x, (SELECT x)", + "Original": "select (select 1 from user u1 join user u2 on u1.id = u2.id and u1.id = u3.id) subquery from user u3 where u3.id = 1", "Instructions": { "OperatorType": "Route", - "Variant": "Reference", + "Variant": "EqualUnique", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "FieldQuery": "select 1 as x, (select x from dual where 1 != 1) from dual where 1 != 1", - "Query": "select 1 as x, (select x from dual) from dual", - "Table": "dual" + "FieldQuery": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id where 1 != 1) as subquery from `user` as u3 where 1 != 1", + "Query": "select (select 1 from `user` as u1 join `user` as u2 on u1.id = u2.id and u1.id = u3.id) as subquery from `user` as u3 where u3.id = 1", + "Table": "`user`", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" }, "TablesUsed": [ - "main.dual" + "user.user" ] } }, { - "comment": "(OR 1 = 0) doesn't cause unnecessary scatter", - "query": "select * from user where id = 1 or 1 = 0", - "v3-plan": { + "comment": "allow last_insert_id with argument", + "query": "select last_insert_id(id) from user", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 1 = 0", + "Original": "select last_insert_id(id) from user", "Instructions": { "OperatorType": "Route", "Variant": "Scatter", @@ -7827,110 +4463,288 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 or 1 = 0", + "FieldQuery": "select last_insert_id(id) from `user` where 1 != 1", + "Query": "select last_insert_id(id) from `user`", "Table": "`user`" - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "merge subquery using MAX and join into single route", + "query": "select 1 from user join music_extra on user.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = user.id)", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 1 = 0", + "Original": "select 1 from user join music_extra on user.id = music_extra.user_id where music_extra.music_id = (select max(music_id) from music_extra where user_id = user.id)", "Instructions": { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" + "FieldQuery": "select 1 from `user`, music_extra where 1 != 1", + "Query": "select 1 from `user`, music_extra where music_extra.music_id = (select max(music_id) from music_extra where user_id = `user`.id) and `user`.id = music_extra.user_id", + "Table": "`user`, music_extra" }, "TablesUsed": [ + "user.music_extra", "user.user" ] } }, { - "comment": "(OR 2 < 1) doesn't cause unnecessary scatter", - "query": "select * from user where id = 1 or 2 < 1", - "v3-plan": { + "comment": "Query with non-plannable lookup vindex", + "query": "SELECT * FROM user_metadata WHERE user_metadata.non_planable = 'foo'", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 2 < 1", + "Original": "SELECT * FROM user_metadata WHERE user_metadata.non_planable = 'foo'", "Instructions": { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "Equal", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1 or 2 < 1", - "Table": "`user`" - } - }, - "gen4-plan": { + "FieldQuery": "select * from user_metadata where 1 != 1", + "Query": "select * from user_metadata where user_metadata.non_planable = 'foo'", + "Table": "user_metadata", + "Values": [ + "VARCHAR(\"foo\")" + ], + "Vindex": "non_planable_user_map" + }, + "TablesUsed": [ + "user.user_metadata" + ] + } + }, + { + "comment": "join query with lookup and join on different vindex column", + "query": "select u.id from user u, user_metadata um where u.name = 'foo' and u.id = um.user_id", + "plan": { "QueryType": "SELECT", - "Original": "select * from user where id = 1 or 2 < 1", + "Original": "select u.id from user u, user_metadata um where u.name = 'foo' and u.id = um.user_id", "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", + "OperatorType": "VindexLookup", + "Variant": "Equal", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1", - "Table": "`user`", "Values": [ - "INT64(1)" + "VARCHAR(\"foo\")" ], - "Vindex": "user_index" + "Vindex": "name_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u, user_metadata as um where 1 != 1", + "Query": "select u.id from `user` as u, user_metadata as um where u.`name` = 'foo' and u.id = um.user_id", + "Table": "`user`, user_metadata" + } + ] }, "TablesUsed": [ - "user.user" + "user.user", + "user.user_metadata" ] } }, { - "comment": "query with a derived table and dual table in unsharded keyspace", - "query": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", - "v3-plan": { + "comment": "pick email as vindex lookup", + "query": "select * from customer where email = 'a@mail.com'", + "plan": { + "QueryType": "SELECT", + "Original": "select * from customer where email = 'a@mail.com'", "Instructions": { - "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1", + "OperatorType": "VindexLookup", + "Variant": "Equal", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, + "Values": [ + "VARCHAR(\"a@mail.com\")" + ], + "Vindex": "unq_lkp_vdx", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1", + "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals", + "Table": "unq_lkp_idx", + "Values": [ + "::unq_key" + ], + "Vindex": "shard_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from customer where 1 != 1", + "Query": "select * from customer where email = 'a@mail.com'", + "Table": "customer" + } + ] + }, + "TablesUsed": [ + "user.customer" + ] + } + }, + { + "comment": "phone is in backfill vindex - not selected for vindex lookup", + "query": "select * from customer where phone = 123456", + "plan": { + "QueryType": "SELECT", + "Original": "select * from customer where phone = 123456", + "Instructions": { "OperatorType": "Route", - "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1", - "Table": "unsharded_a, dual", - "Variant": "Unsharded" + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from customer where 1 != 1", + "Query": "select * from customer where phone = 123456", + "Table": "customer" }, - "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", - "QueryType": "SELECT" - }, - "gen4-plan": { + "TablesUsed": [ + "user.customer" + ] + } + }, + { + "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", + "query": "select * from customer where email = 'a@mail.com' and phone = 123456", + "plan": { + "QueryType": "SELECT", + "Original": "select * from customer where email = 'a@mail.com' and phone = 123456", "Instructions": { - "FieldQuery": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a where 1 != 1)) as `name` from dual where 1 != 1) as t2 where 1 != 1", + "OperatorType": "VindexLookup", + "Variant": "Equal", "Keyspace": { - "Name": "main", - "Sharded": false + "Name": "user", + "Sharded": true }, - "OperatorType": "Route", - "Query": "select * from unsharded_a as t1 join (select trim((select max(`name`) from unsharded_a)) as `name` from dual) as t2 where t1.`name` >= t2.`name` order by t1.`name` asc limit 1", - "Table": "dual, unsharded_a", - "Variant": "Unsharded" + "Values": [ + "VARCHAR(\"a@mail.com\")" + ], + "Vindex": "unq_lkp_vdx", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1", + "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals", + "Table": "unq_lkp_idx", + "Values": [ + "::unq_key" + ], + "Vindex": "shard_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from customer where 1 != 1", + "Query": "select * from customer where email = 'a@mail.com' and phone = 123456", + "Table": "customer" + } + ] }, - "Original": "SELECT * FROM unsharded_a AS t1 JOIN (SELECT trim((SELECT MAX(name) FROM unsharded_a)) AS name) AS t2 WHERE t1.name >= t2.name ORDER BY t1.name ASC LIMIT 1;", + "TablesUsed": [ + "user.customer" + ] + } + }, + { + "comment": "predicate order changed: email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", + "query": "select * from customer where phone = 123456 and email = 'a@mail.com'", + "plan": { "QueryType": "SELECT", + "Original": "select * from customer where phone = 123456 and email = 'a@mail.com'", + "Instructions": { + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "VARCHAR(\"a@mail.com\")" + ], + "Vindex": "unq_lkp_vdx", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select unq_key, keyspace_id from unq_lkp_idx where 1 != 1", + "Query": "select unq_key, keyspace_id from unq_lkp_idx where unq_key in ::__vals", + "Table": "unq_lkp_idx", + "Values": [ + "::unq_key" + ], + "Vindex": "shard_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from customer where 1 != 1", + "Query": "select * from customer where phone = 123456 and email = 'a@mail.com'", + "Table": "customer" + } + ] + }, "TablesUsed": [ - "main.dual", - "main.unsharded_a" + "user.customer" ] } } diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json index 5817157752b..02fd7330a8f 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_default.json @@ -2,47 +2,7 @@ { "comment": "EXISTS subquery when the default ks is different than the inner query", "query": "select exists(select * from user where id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user where id = 5)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutExists", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where id = 5 limit 1", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "second_user", - "Sharded": true - }, - "FieldQuery": "select :__sq_has_values1 from dual where 1 != 1", - "Query": "select :__sq_has_values1 from dual", - "Table": "dual" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select * from user where id = 5)", "Instructions": { @@ -66,4 +26,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json index 822ed6c2307..6f1145b345e 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases_with_user_as_default.json @@ -2,26 +2,7 @@ { "comment": "EXISTS subquery", "query": "select exists(select * from user where id = 5)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select exists(select * from user where id = 5)", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select exists (select 1 from `user` where 1 != 1) from dual where 1 != 1", - "Query": "select exists (select 1 from `user` where id = 5 limit 1) from dual", - "Table": "dual", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select exists(select * from user where id = 5)", "Instructions": { @@ -45,4 +26,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/set_cases.json b/go/vt/vtgate/planbuilder/testdata/set_cases.json index b6b14665f1a..00ead0033f5 100644 --- a/go/vt/vtgate/planbuilder/testdata/set_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/set_cases.json @@ -80,10 +80,10 @@ }, { "comment": "set UDV to expression that can't be evaluated at vtgate", - "query": "set @foo = CONCAT('Any','Expression','Is','Valid')", + "query": "set @foo = SOUNDEX('Hello')", "plan": { "QueryType": "SET", - "Original": "set @foo = CONCAT('Any','Expression','Is','Valid')", + "Original": "set @foo = SOUNDEX('Hello')", "Instructions": { "OperatorType": "Set", "Ops": [ @@ -101,7 +101,7 @@ "Sharded": false }, "TargetDestination": "AnyShard()", - "Query": "select CONCAT('Any', 'Expression', 'Is', 'Valid') from dual", + "Query": "select SOUNDEX('Hello') from dual", "SingleShardOnly": true } ] diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.json b/go/vt/vtgate/planbuilder/testdata/show_cases.json index 3750aa9c4bc..c20a1c79f5a 100644 --- a/go/vt/vtgate/planbuilder/testdata/show_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/show_cases.json @@ -165,9 +165,9 @@ "Charset": "VARCHAR", "Default collation": "VARCHAR", "Description": "VARCHAR", - "Maxlen": "INT32" + "Maxlen": "UINT32" }, - "RowCount": 2 + "RowCount": 37 } } }, @@ -535,7 +535,7 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "SELECT * FROM _vt.schema_migrations where migration_uuid LIKE '%format' OR migration_context LIKE '%format' OR migration_status LIKE '%format'" + "Query": "show vitess_migrations from `user` like '%format'" } } }, @@ -552,7 +552,7 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "SELECT * FROM _vt.schema_migrations where id = 5" + "Query": "show vitess_migrations from `user` where id = 5" } } }, @@ -716,7 +716,7 @@ "Fields": { "Tables": "VARCHAR" }, - "RowCount": 10 + "RowCount": 11 } } }, diff --git a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json index 40558770196..db9fe66d41e 100644 --- a/go/vt/vtgate/planbuilder/testdata/symtab_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/symtab_cases.json @@ -2,44 +2,7 @@ { "comment": "Tests in this file are for testing symtab functionality\n#\n# Column names need not be qualified if they are predefined in vschema and unambiguous.", "query": "select predef2, predef3 from user join unsharded on predef2 = predef3", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "predef2": 0 - }, - "TableName": "`user`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select predef2 from `user` where 1 != 1", - "Query": "select predef2 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select predef3 from unsharded where 1 != 1", - "Query": "select predef3 from unsharded where predef3 = :predef2", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select predef2, predef3 from user join unsharded on predef2 = predef3", "Instructions": { @@ -84,7 +47,6 @@ { "comment": "predef1 is in both user and unsharded. So, it's ambiguous.", "query": "select predef1, predef3 from user join unsharded on predef1 = predef3", - "v3-plan": "VT03019: symbol predef1 not found", - "gen4-plan": "Column 'predef1' in field list is ambiguous" + "plan": "Column 'predef1' in field list is ambiguous" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json index 3dcf08b102b..0d2bbfa4adc 100644 --- a/go/vt/vtgate/planbuilder/testdata/sysschema_default.json +++ b/go/vt/vtgate/planbuilder/testdata/sysschema_default.json @@ -2,22 +2,7 @@ { "comment": "max_allowed_packet", "query": "select @@max_allowed_packet from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select @@max_allowed_packet from dual", - "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select @@max_allowed_packet from dual where 1 != 1", - "Query": "select @@max_allowed_packet from dual", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select @@max_allowed_packet from dual", "Instructions": { @@ -39,23 +24,7 @@ { "comment": "unqualified table name", "query": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where 1 != 1", - "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t join information_schema.`columns` as c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname order by t.table_schema asc, t.table_name asc, c.column_name asc", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.`tables`, information_schema.`columns`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select t.table_schema,t.table_name,c.column_name,c.column_type from tables t join columns c on c.table_schema = t.table_schema and c.table_name = t.table_name where t.table_schema = 'user' and c.table_schema = 'user' order by t.table_schema,t.table_name,c.column_name", "Instructions": { @@ -66,7 +35,7 @@ "Sharded": false }, "FieldQuery": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where 1 != 1", - "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname and c.table_schema = :__vtschemaname and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc", + "Query": "select t.table_schema, t.table_name, c.column_name, c.column_type from information_schema.`tables` as t, information_schema.`columns` as c where t.table_schema = :__vtschemaname /* VARCHAR */ and c.table_schema = :__vtschemaname /* VARCHAR */ and c.table_schema = t.table_schema and c.table_name = t.table_name order by t.table_schema asc, t.table_name asc, c.column_name asc", "SysTableTableSchema": "[VARCHAR(\"user\")]", "Table": "information_schema.`columns`, information_schema.`tables`" } @@ -75,23 +44,7 @@ { "comment": "system schema query as a subquery", "query": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1", - "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual", - "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", - "Table": "dual" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1);", "Instructions": { @@ -102,7 +55,7 @@ "Sharded": false }, "FieldQuery": "select (select 1 from information_schema.schemata where 1 != 1) from dual where 1 != 1", - "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) from dual", + "Query": "select (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) from dual", "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", "Table": "dual" }, @@ -114,23 +67,7 @@ { "comment": "system schema query as a derived table", "query": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", - "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1", - "Query": "select * from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x", - "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", - "Table": "information_schema.schemata" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT * from (SELECT 1 FROM information_schema.schemata WHERE schema_name='MyDatabase' LIMIT 1) x", "Instructions": { @@ -141,7 +78,7 @@ "Sharded": false }, "FieldQuery": "select x.`1` from (select 1 from information_schema.schemata where 1 != 1) as x where 1 != 1", - "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname limit 1) as x", + "Query": "select x.`1` from (select 1 from information_schema.schemata where schema_name = :__vtschemaname /* VARCHAR */ limit 1) as x", "SysTableTableSchema": "[VARCHAR(\"MyDatabase\")]", "Table": "information_schema.schemata" } diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json index ed28ddf599b..331a9cdfa13 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json @@ -2,26 +2,7 @@ { "comment": "TPC-C select join customer1 and warehouse1", "query": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where 1 != 1", - "Query": "select c_discount, c_last, c_credit, w_tax from customer1 as c join warehouse1 as w on c_w_id = w_id where w_id = 1 and c_d_id = 15 and c_id = 10", - "Table": "customer1, warehouse1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_discount, c_last, c_credit, w_tax FROM customer1 AS c JOIN warehouse1 AS w ON c_w_id=w_id WHERE w_id = 1 AND c_d_id = 15 AND c_id = 10", "Instructions": { @@ -48,26 +29,7 @@ { "comment": "TPC-C select district1 for update", "query": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_next_o_id, d_tax from district1 where 1 != 1", - "Query": "select d_next_o_id, d_tax from district1 where d_w_id = 15 and d_id = 95 for update", - "Table": "district1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_next_o_id, d_tax FROM district1 WHERE d_w_id = 15 AND d_id = 95 FOR UPDATE", "Instructions": { @@ -93,30 +55,7 @@ { "comment": "TPC-C update district1 unique", "query": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546", - "Table": "district1", - "Values": [ - "INT64(8546)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.district1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE district1 SET d_next_o_id = 56 WHERE d_id = 9842 AND d_w_id= 8546", "Instructions": { @@ -127,7 +66,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update district1 set d_next_o_id = 56 where d_id = 9842 and d_w_id = 8546", "Table": "district1", "Values": [ @@ -154,7 +92,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into orders1(o_id, o_d_id, o_w_id, o_c_id, o_entry_d, o_ol_cnt, o_all_local) values (334983, 59896, :_o_w_id_0, 156, now(), 781038, 'hello')", "TableName": "orders1", "VindexValues": { @@ -180,7 +117,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into new_orders1(no_o_id, no_d_id, no_w_id) values (8, 9, :_no_w_id_0)", "TableName": "new_orders1", "VindexValues": { @@ -195,26 +131,7 @@ { "comment": "TPC-C select unique item1", "query": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select i_price, i_name, i_data from item1 where 1 != 1", - "Query": "select i_price, i_name, i_data from item1 where i_id = 9654", - "Table": "item1", - "Values": [ - "INT64(9654)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT i_price, i_name, i_data FROM item1 WHERE i_id = 9654", "Instructions": { @@ -240,26 +157,7 @@ { "comment": "TPC-C select stock1 for update", "query": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where 1 != 1", - "Query": "select s_quantity, s_data, s_dist_01 as s_dist from stock1 where s_i_id = 2198 and s_w_id = 89 for update", - "Table": "stock1", - "Values": [ - "INT64(89)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT s_quantity, s_data, s_dist_01 s_dist FROM stock1 WHERE s_i_id = 2198 AND s_w_id = 89 FOR UPDATE", "Instructions": { @@ -285,30 +183,7 @@ { "comment": "TPC-C update stock1", "query": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6", - "Table": "stock1", - "Values": [ - "INT64(6)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.stock1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE stock1 SET s_quantity = 894 WHERE s_i_id = 156 AND s_w_id= 6", "Instructions": { @@ -319,7 +194,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update stock1 set s_quantity = 894 where s_i_id = 156 and s_w_id = 6", "Table": "stock1", "Values": [ @@ -346,7 +220,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into order_line1(ol_o_id, ol_d_id, ol_w_id, ol_number, ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_dist_info) values (648, 36812, :_ol_w_id_0, 4946378, 3, 7, 89, 1, 'info')", "TableName": "order_line1", "VindexValues": { @@ -361,30 +234,7 @@ { "comment": "TPC-C update warehouse1 unique", "query": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3", - "Table": "warehouse1", - "Values": [ - "INT64(3)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.warehouse1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE warehouse1 SET w_ytd = w_ytd + 946879 WHERE w_id = 3", "Instructions": { @@ -395,7 +245,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update warehouse1 set w_ytd = w_ytd + 946879 where w_id = 3", "Table": "warehouse1", "Values": [ @@ -411,26 +260,7 @@ { "comment": "TPC-C select warehouse1 unique", "query": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where 1 != 1", - "Query": "select w_street_1, w_street_2, w_city, w_state, w_zip, w_name from warehouse1 where w_id = 998", - "Table": "warehouse1", - "Values": [ - "INT64(998)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse1 WHERE w_id = 998", "Instructions": { @@ -456,30 +286,7 @@ { "comment": "TPC-C update district1 unique", "query": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9", - "Table": "district1", - "Values": [ - "INT64(89)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.district1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE district1 SET d_ytd = d_ytd + 2 WHERE d_w_id = 89 AND d_id= 9", "Instructions": { @@ -490,7 +297,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update district1 set d_ytd = d_ytd + 2 where d_w_id = 89 and d_id = 9", "Table": "district1", "Values": [ @@ -506,26 +312,7 @@ { "comment": "TPC-C select district1 unique", "query": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where 1 != 1", - "Query": "select d_street_1, d_street_2, d_city, d_state, d_zip, d_name from district1 where d_w_id = 896 and d_id = 9", - "Table": "district1", - "Values": [ - "INT64(896)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district1 WHERE d_w_id = 896 AND d_id = 9", "Instructions": { @@ -551,26 +338,7 @@ { "comment": "TPC-C select aggr from customer1", "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1", - "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 5 and c_d_id = 1 and c_last = 'last'", - "Table": "customer1", - "Values": [ - "INT64(5)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 5 AND c_d_id= 1 AND c_last='last'", "Instructions": { @@ -596,26 +364,7 @@ { "comment": "TPC-C select customer1 order by", "query": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_id from customer1 where 1 != 1", - "Query": "select c_id from customer1 where c_w_id = 8 and c_d_id = 5 and c_last = 'item_last' order by c_first asc", - "Table": "customer1", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_id FROM customer1 WHERE c_w_id = 8 AND c_d_id = 5 AND c_last='item_last' ORDER BY c_first", "Instructions": { @@ -641,26 +390,7 @@ { "comment": "TPC-C select for update customer1 unique", "query": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where 1 != 1", - "Query": "select c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since from customer1 where c_w_id = 8965 and c_d_id = 1 and c_id = 9 for update", - "Table": "customer1", - "Values": [ - "INT64(8965)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_since FROM customer1 WHERE c_w_id = 8965 AND c_d_id = 1 AND c_id = 9 FOR UPDATE", "Instructions": { @@ -686,26 +416,7 @@ { "comment": "TPC-C select customer1 unique", "query": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_data from customer1 where 1 != 1", - "Query": "select c_data from customer1 where c_w_id = 32 and c_d_id = 68 and c_id = 5", - "Table": "customer1", - "Values": [ - "INT64(32)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_data FROM customer1 WHERE c_w_id = 32 AND c_d_id=68 AND c_id = 5", "Instructions": { @@ -731,18 +442,17 @@ { "comment": "TPC-C update customer1 unique and float value", "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "v3-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", "Instructions": { "OperatorType": "Update", - "Variant": "Equal", + "Variant": "EqualUnique", "Keyspace": { "Name": "main", "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98", "Table": "customer1", "Values": [ @@ -753,58 +463,12 @@ "TablesUsed": [ "main.customer1" ] - }, - "gen4-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301, c_data='i am data' WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "Instructions": { - "OperatorType": "Update", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301, c_data = 'i am data' where c_w_id = 20 and c_d_id = 387 and c_id = 98", - "Table": "customer1", - "Values": [ - "INT64(20)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - } - }, - { - "comment": "TPC-C update customer1 unique and float value", - "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98", - "Table": "customer1", - "Values": [ - "INT64(20)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - }, - "gen4-plan": { + } + }, + { + "comment": "TPC-C update customer1 unique and float value", + "query": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance=508.98, c_ytd_payment=48941.980301 WHERE c_w_id = 20 AND c_d_id=387 AND c_id=98", "Instructions": { @@ -815,7 +479,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update customer1 set c_balance = 508.98, c_ytd_payment = 48941.980301 where c_w_id = 20 and c_d_id = 387 and c_id = 98", "Table": "customer1", "Values": [ @@ -842,7 +505,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "insert into history1(h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data) values (6809887, 38748, 8746, 210, :_h_w_id_0, now(), 8907, 'data')", "TableName": "history1", "VindexValues": { @@ -857,26 +519,7 @@ { "comment": "TPC-C select aggr customer1", "query": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(c_id) as namecnt from customer1 where 1 != 1", - "Query": "select count(c_id) as namecnt from customer1 where c_w_id = 870 and c_d_id = 780 and c_last = 'last'", - "Table": "customer1", - "Values": [ - "INT64(870)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(c_id) namecnt FROM customer1 WHERE c_w_id = 870 AND c_d_id= 780 AND c_last='last'", "Instructions": { @@ -902,26 +545,7 @@ { "comment": "TPC-C select order by customer1", "query": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1", - "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc", - "Table": "customer1", - "Values": [ - "INT64(840)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_balance, c_first, c_middle, c_id FROM customer1 WHERE c_w_id = 840 AND c_d_id= 1 AND c_last='test' ORDER BY c_first", "Instructions": { @@ -947,26 +571,7 @@ { "comment": "TPC-C select unique customer1", "query": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_balance, c_first, c_middle, c_last from customer1 where 1 != 1", - "Query": "select c_balance, c_first, c_middle, c_last from customer1 where c_w_id = 15 and c_d_id = 5169 and c_id = 1", - "Table": "customer1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT c_balance, c_first, c_middle, c_last FROM customer1 WHERE c_w_id = 15 AND c_d_id=5169 AND c_id=1", "Instructions": { @@ -992,26 +597,7 @@ { "comment": "TPC-C select order by orders1", "query": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1", - "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc", - "Table": "orders1", - "Values": [ - "INT64(9894)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT o_id, o_carrier_id, o_entry_d FROM orders1 WHERE o_w_id = 9894 AND o_d_id = 3 AND o_c_id = 159 ORDER BY o_id DESC", "Instructions": { @@ -1037,26 +623,7 @@ { "comment": "TPC-C select order_line1", "query": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where 1 != 1", - "Query": "select ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d from order_line1 where ol_w_id = 92 and ol_d_id = 5 and ol_o_id = 1", - "Table": "order_line1", - "Values": [ - "INT64(92)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line1 WHERE ol_w_id = 92 AND ol_d_id = 5 AND ol_o_id = 1", "Instructions": { @@ -1082,26 +649,7 @@ { "comment": "TPC-C select for update new_orders1", "query": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select no_o_id from new_orders1 where 1 != 1", - "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update", - "Table": "new_orders1", - "Values": [ - "INT64(15)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT no_o_id FROM new_orders1 WHERE no_d_id = 689 AND no_w_id = 15 ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE", "Instructions": { @@ -1127,30 +675,7 @@ { "comment": "TPC-C delete new_orders1", "query": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465", - "Table": "new_orders1", - "Values": [ - "INT64(98465)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.new_orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM new_orders1 WHERE no_o_id = 2218 AND no_d_id = 358 AND no_w_id = 98465", "Instructions": { @@ -1161,7 +686,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from new_orders1 where no_o_id = 2218 and no_d_id = 358 and no_w_id = 98465", "Table": "new_orders1", "Values": [ @@ -1177,26 +701,7 @@ { "comment": "TPC-C select unique orders1", "query": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_c_id from orders1 where 1 != 1", - "Query": "select o_c_id from orders1 where o_id = 6 and o_d_id = 1983 and o_w_id = 894605", - "Table": "orders1", - "Values": [ - "INT64(894605)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT o_c_id FROM orders1 WHERE o_id = 6 AND o_d_id = 1983 AND o_w_id = 894605", "Instructions": { @@ -1222,30 +727,7 @@ { "comment": "TPC-C update orders1 unique", "query": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897", - "Table": "orders1", - "Values": [ - "INT64(897)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE orders1 SET o_carrier_id = 9 WHERE o_id = 56 AND o_d_id = 98 AND o_w_id = 897", "Instructions": { @@ -1256,7 +738,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update orders1 set o_carrier_id = 9 where o_id = 56 and o_d_id = 98 and o_w_id = 897", "Table": "orders1", "Values": [ @@ -1272,30 +753,7 @@ { "comment": "TPC-C update order_line1", "query": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8", - "Table": "order_line1", - "Values": [ - "INT64(8)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.order_line1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE order_line1 SET ol_delivery_d = NOW() WHERE ol_o_id = 235 AND ol_d_id = 315 AND ol_w_id = 8", "Instructions": { @@ -1306,7 +764,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update order_line1 set ol_delivery_d = now() where ol_o_id = 235 and ol_d_id = 315 and ol_w_id = 8", "Table": "order_line1", "Values": [ @@ -1322,26 +779,7 @@ { "comment": "TPC-C select sum order_line1", "query": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(ol_amount) as sm from order_line1 where 1 != 1", - "Query": "select sum(ol_amount) as sm from order_line1 where ol_o_id = 680 and ol_d_id = 201 and ol_w_id = 87", - "Table": "order_line1", - "Values": [ - "INT64(87)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT SUM(ol_amount) sm FROM order_line1 WHERE ol_o_id = 680 AND ol_d_id = 201 AND ol_w_id = 87", "Instructions": { @@ -1367,30 +805,7 @@ { "comment": "TPC-C update customer1", "query": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", - "v3-plan": { - "QueryType": "UPDATE", - "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", - "Instructions": { - "OperatorType": "Update", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160", - "Table": "customer1", - "Values": [ - "INT64(160)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.customer1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "UPDATE", "Original": "UPDATE customer1 SET c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_id = 6 AND c_d_id = 5 AND c_w_id = 160", "Instructions": { @@ -1401,7 +816,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "update customer1 set c_balance = c_balance + 988.01, c_delivery_cnt = c_delivery_cnt + 1 where c_id = 6 and c_d_id = 5 and c_w_id = 160", "Table": "customer1", "Values": [ @@ -1417,26 +831,7 @@ { "comment": "TPC-C select unique district1", "query": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select d_next_o_id from district1 where 1 != 1", - "Query": "select d_next_o_id from district1 where d_id = 6 and d_w_id = 21", - "Table": "district1", - "Values": [ - "INT64(21)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT d_next_o_id FROM district1 WHERE d_id = 6 AND d_w_id= 21", "Instructions": { @@ -1462,26 +857,7 @@ { "comment": "TPC-C select count distinct stock1 join order_line1", "query": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where 1 != 1", - "Query": "select count(distinct s.s_i_id) from stock1 as s join order_line1 as ol on ol.ol_w_id = s.s_w_id and ol.ol_i_id = s.s_i_id where ol.ol_w_id = 12 and ol.ol_d_id = 1908 and ol.ol_o_id < 30 and ol.ol_o_id >= 15 and s.s_w_id = 12 and s.s_quantity < 10", - "Table": "stock1, order_line1", - "Values": [ - "INT64(12)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT COUNT(DISTINCT(s.s_i_id)) FROM stock1 AS s JOIN order_line1 AS ol ON ol.ol_w_id=s.s_w_id AND ol.ol_i_id=s.s_i_id WHERE ol.ol_w_id = 12 AND ol.ol_d_id = 1908 AND ol.ol_o_id < 30 AND ol.ol_o_id >= 15 AND s.s_w_id= 12 AND s.s_quantity < 10", "Instructions": { @@ -1508,26 +884,7 @@ { "comment": "TPC-C select distinct order_line1", "query": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select ol_i_id from order_line1 where 1 != 1", - "Query": "select distinct ol_i_id from order_line1 where ol_w_id = 1 and ol_d_id = 156 and ol_o_id < 500 and ol_o_id >= 56", - "Table": "order_line1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT DISTINCT ol_i_id FROM order_line1 WHERE ol_w_id = 1 AND ol_d_id = 156 AND ol_o_id < 500 AND ol_o_id >= 56", "Instructions": { @@ -1553,26 +910,7 @@ { "comment": "TPC-C", "query": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", - "v3-plan": { - "QueryType": "SELECT", - "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(*) from stock1 where 1 != 1", - "Query": "select count(*) from stock1 where s_w_id = 1 and s_i_id = 8 and s_quantity < 1000", - "Table": "stock1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "SELECT count(*) FROM stock1 WHERE s_w_id = 1 AND s_i_id = 8 AND s_quantity < 1000", "Instructions": { @@ -1598,56 +936,7 @@ { "comment": "TPC-C select with subquery,aggr,distinct,having,limit", "query": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "o_o_c_id": 3, - "o_o_d_id": 1, - "o_o_w_id": 2 - }, - "TableName": "orders1_orders1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o where 1 != 1", - "Query": "select o.o_id, o.o_d_id, o.o_w_id, o.o_c_id from orders1 as o", - "Table": "orders1" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1", - "Query": "select 1 from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = :o_o_w_id and t.o_d_id = :o_o_d_id and t.o_c_id = :o_o_c_id", - "Table": "orders1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select o.o_id,o.o_d_id from orders1 o, (select o_c_id,o_w_id,o_d_id,count(distinct o_w_id),o_id from orders1 where o_w_id=1 and o_id > 2100 and o_id < 11153 group by o_c_id,o_d_id,o_w_id having count( distinct o_id) > 1 limit 1) t where t.o_w_id=o.o_w_id and t.o_d_id=o.o_d_id and t.o_c_id=o.o_c_id limit 1", "Instructions": { @@ -1657,8 +946,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t where 1 != 1", - "Query": "select o.o_id, o.o_d_id from orders1 as o, (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1", + "FieldQuery": "select o.o_id, o.o_d_id from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where 1 != 1 group by o_c_id, o_d_id, o_w_id) as t, orders1 as o where 1 != 1", + "Query": "select o.o_id, o.o_d_id from (select o_c_id, o_w_id, o_d_id, count(distinct o_w_id), o_id from orders1 where o_w_id = 1 and o_id > 2100 and o_id < 11153 group by o_c_id, o_d_id, o_w_id having count(distinct o_id) > 1 limit 1) as t, orders1 as o where t.o_w_id = o.o_w_id and t.o_d_id = o.o_d_id and t.o_c_id = o.o_c_id limit 1", "Table": "orders1", "Values": [ "INT64(1)" @@ -1673,30 +962,7 @@ { "comment": "TPC-C delete order_line1", "query": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84", - "Table": "order_line1", - "Values": [ - "INT64(178)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.order_line1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM order_line1 where ol_w_id=178 AND ol_d_id=1 AND ol_o_id=84", "Instructions": { @@ -1707,7 +973,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from order_line1 where ol_w_id = 178 and ol_d_id = 1 and ol_o_id = 84", "Table": "order_line1", "Values": [ @@ -1723,30 +988,7 @@ { "comment": "TPC-C delete orders1", "query": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384", - "Table": "orders1", - "Values": [ - "INT64(1)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.orders1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM orders1 where o_w_id=1 AND o_d_id=3 and o_id=384", "Instructions": { @@ -1757,7 +999,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from orders1 where o_w_id = 1 and o_d_id = 3 and o_id = 384", "Table": "orders1", "Values": [ @@ -1773,30 +1014,7 @@ { "comment": "TPC-C delete history1", "query": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", - "v3-plan": { - "QueryType": "DELETE", - "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", - "Instructions": { - "OperatorType": "Delete", - "Variant": "Equal", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, - "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10", - "Table": "history1", - "Values": [ - "INT64(75)" - ], - "Vindex": "hash" - }, - "TablesUsed": [ - "main.history1" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DELETE", "Original": "DELETE FROM history1 where h_w_id=75 AND h_d_id=102 LIMIT 10", "Instructions": { @@ -1807,7 +1025,6 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "MultiShardAutocommit": false, "Query": "delete from history1 where h_w_id = 75 and h_d_id = 102 limit 10", "Table": "history1", "Values": [ @@ -1820,4 +1037,4 @@ ] } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json index c41dd7468d2..5cb16682a46 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json @@ -2,20 +2,17 @@ { "comment": "TPC-H query 1", "query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus", - "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg'" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(l_quantity) as avg_qty'" }, { "comment": "TPC-H query 2", "query": "select s_acctbal, s_name, n_name, p_partkey, p_mfgr, s_address, s_phone, s_comment from part, supplier, partsupp, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and p_size = 15 and p_type like '%BRASS' and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' and ps_supplycost = ( select min(ps_supplycost) from partsupp, supplier, nation, region where p_partkey = ps_partkey and s_suppkey = ps_suppkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'EUROPE' ) order by s_acctbal desc, n_name, s_name, p_partkey limit 10", - "v3-plan": "VT03019: symbol p_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: cross-shard correlated subquery" }, { "comment": "TPC-H query 3", "query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, o_orderdate, o_shippriority from customer, orders, lineitem where c_mktsegment = 'BUILDING' and c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate < date('1995-03-15') and l_shipdate > date('1995-03-15') group by l_orderkey, o_orderdate, o_shippriority order by revenue desc, o_orderdate limit 10", "Instructions": { @@ -32,31 +29,31 @@ "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(1) AS revenue", - "GroupBy": "(0|6), (2|5), (3|4)", + "GroupBy": "(0|4), (2|5), (3|6)", "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as l_orderkey", - "([COLUMN 6] * COALESCE([COLUMN 7], INT64(1))) * COALESCE([COLUMN 8], INT64(1)) as revenue", - "[COLUMN 1] as o_orderdate", - "[COLUMN 2] as o_shippriority", - "[COLUMN 5]", - "[COLUMN 4]", - "[COLUMN 3]" + "[COLUMN 2] as l_orderkey", + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 3] as o_orderdate", + "[COLUMN 4] as o_shippriority", + "[COLUMN 5] as weight_string(l_orderkey)", + "[COLUMN 6] as weight_string(o_orderdate)", + "[COLUMN 7] as weight_string(o_shippriority)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(0|3) ASC, (1|4) ASC, (2|5) ASC", + "OrderBy": "(2|5) ASC, (3|6) ASC, (4|7) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,R:1,L:2,R:2,R:3,L:1,R:4,R:5", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,R:2,L:2,R:3,R:4", "JoinVars": { - "l_orderkey": 0 + "l_orderkey": 1 }, "TableName": "lineitem_orders_customer", "Inputs": [ @@ -67,48 +64,60 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)", - "Query": "select l_orderkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_orderkey) from lineitem where l_shipdate > date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)", + "FieldQuery": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_orderkey, weight_string(l_orderkey) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey)", + "Query": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_orderkey, weight_string(l_orderkey) from lineitem where l_shipdate > date('1995-03-15') group by l_orderkey, weight_string(l_orderkey)", "Table": "lineitem" }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:3,L:5,L:4,L:6,L:1,R:1", - "JoinVars": { - "o_custkey": 0 - }, - "TableName": "orders_customer", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as o_orderdate", + "[COLUMN 3] as o_shippriority", + "[COLUMN 4] as weight_string(o_orderdate)", + "[COLUMN 5] as weight_string(o_shippriority)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)", - "Query": "select o_custkey, count(*), weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority) from orders where o_orderdate < date('1995-03-15') and o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey), o_orderdate, weight_string(o_orderdate), o_shippriority, weight_string(o_shippriority)", - "Table": "orders", - "Values": [ - ":l_orderkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:4,L:5", + "JoinVars": { + "o_custkey": 3 }, - "FieldQuery": "select 1, count(*) from customer where 1 != 1 group by 1", - "Query": "select 1, count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by 1", - "Table": "customer", - "Values": [ - ":o_custkey" - ], - "Vindex": "hash" + "TableName": "orders_customer", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), o_orderdate, o_shippriority, o_custkey, weight_string(o_orderdate), weight_string(o_shippriority) from orders where 1 != 1 group by o_orderdate, o_shippriority, o_custkey, weight_string(o_orderdate), weight_string(o_shippriority)", + "Query": "select count(*), o_orderdate, o_shippriority, o_custkey, weight_string(o_orderdate), weight_string(o_shippriority) from orders where o_orderdate < date('1995-03-15') and o_orderkey = :l_orderkey group by o_orderdate, o_shippriority, o_custkey, weight_string(o_orderdate), weight_string(o_shippriority)", + "Table": "orders", + "Values": [ + ":l_orderkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from customer where 1 != 1 group by .0", + "Query": "select count(*) from customer where c_mktsegment = 'BUILDING' and c_custkey = :o_custkey group by .0", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + } + ] } ] } @@ -134,8 +143,7 @@ { "comment": "TPC-H query 4", "query": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority", - "v3-plan": "VT03019: symbol o_orderkey not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select o_orderpriority, count(*) as order_count from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month and exists ( select * from lineitem where l_orderkey = o_orderkey and l_commitdate < l_receiptdate ) group by o_orderpriority order by o_orderpriority", "Instructions": { @@ -161,6 +169,7 @@ "TableName": "orders_lineitem", "Inputs": [ { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -173,6 +182,7 @@ "Table": "orders" }, { + "InputName": "SubQuery", "OperatorType": "VindexLookup", "Variant": "EqualUnique", "Keyspace": { @@ -225,10 +235,9 @@ } }, { - "comment": "TPC-H query 5 - Gen4 produces plan but the plan output is flaky", + "comment": "TPC-H query 5", "query": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue from customer, orders, lineitem, supplier, nation, region where c_custkey = o_custkey and l_orderkey = o_orderkey and l_suppkey = s_suppkey and c_nationkey = s_nationkey and s_nationkey = n_nationkey and n_regionkey = r_regionkey and r_name = 'ASIA' and o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by n_name order by revenue desc", "Instructions": { @@ -246,120 +255,199 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as n_name", - "(((([COLUMN 2] * COALESCE([COLUMN 3], INT64(1))) * COALESCE([COLUMN 4], INT64(1))) * COALESCE([COLUMN 5], INT64(1))) * COALESCE([COLUMN 6], INT64(1))) * COALESCE([COLUMN 7], INT64(1)) as revenue", - "[COLUMN 1]" + "[COLUMN 2] as n_name", + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 3] as weight_string(n_name)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OrderBy": "(2|3) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,L:3,L:4,L:5,L:6,R:2,R:3", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", "JoinVars": { - "s_nationkey": 0 + "s_nationkey": 1 }, "TableName": "orders_customer_lineitem_supplier_nation_region", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,R:2,L:6,L:7,R:3,R:4", - "JoinVars": { - "c_nationkey": 1, - "o_orderkey": 0 - }, - "TableName": "orders_customer_lineitem_supplier", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 1] * [COLUMN 0] as revenue", + "[COLUMN 2] as s_nationkey" + ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,L:1,R:0,L:4,R:2,L:2,R:1", + "JoinColumnIndexes": "R:0,L:0,R:1", "JoinVars": { - "o_custkey": 0 + "c_nationkey": 2, + "o_orderkey": 1 }, - "TableName": "orders_customer", + "TableName": "orders_customer_lineitem_supplier", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)", - "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey) from orders where o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey)", - "Table": "orders" + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as o_orderkey", + "[COLUMN 3] as c_nationkey" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1", + "JoinVars": { + "o_custkey": 2 + }, + "TableName": "orders_customer", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), o_orderkey, o_custkey from orders where 1 != 1 group by o_orderkey, o_custkey", + "Query": "select count(*), o_orderkey, o_custkey from orders where o_orderdate >= date('1994-01-01') and o_orderdate < date('1994-01-01') + interval '1' year group by o_orderkey, o_custkey", + "Table": "orders" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), c_nationkey from customer where 1 != 1 group by c_nationkey", + "Query": "select count(*), c_nationkey from customer where c_custkey = :o_custkey group by c_nationkey", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + } + ] + } + ] }, { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)", - "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)", - "Table": "customer", - "Values": [ - ":o_custkey" + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 2] as s_nationkey" ], - "Vindex": "hash" + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1", + "JoinVars": { + "l_suppkey": 1 + }, + "TableName": "lineitem_supplier", + "Inputs": [ + { + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_suppkey from lineitem where 1 != 1 group by l_suppkey", + "Query": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_suppkey from lineitem where l_orderkey = :o_orderkey group by l_suppkey", + "Table": "lineitem" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), s_nationkey from supplier where 1 != 1 group by s_nationkey", + "Query": "select count(*), s_nationkey from supplier where s_nationkey = :c_nationkey and s_suppkey = :l_suppkey group by s_nationkey", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + } + ] + } + ] } ] - }, + } + ] + }, + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as n_name", + "[COLUMN 3] as weight_string(n_name)" + ], + "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1", + "JoinColumnIndexes": "L:0,R:0,L:1,L:3", "JoinVars": { - "l_suppkey": 0 + "n_regionkey": 2 }, - "TableName": "lineitem_supplier", + "TableName": "nation_region", "Inputs": [ { - "OperatorType": "VindexLookup", + "OperatorType": "Route", "Variant": "EqualUnique", "Keyspace": { "Name": "main", "Sharded": true }, + "FieldQuery": "select count(*), n_name, n_regionkey, weight_string(n_name) from nation where 1 != 1 group by n_name, n_regionkey, weight_string(n_name)", + "Query": "select count(*), n_name, n_regionkey, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_name, n_regionkey, weight_string(n_name)", + "Table": "nation", "Values": [ - ":o_orderkey" + ":s_nationkey" ], - "Vindex": "lineitem_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" - ], - "Vindex": "md5" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where 1 != 1 group by l_suppkey, weight_string(l_suppkey)", - "Query": "select l_suppkey, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_suppkey) from lineitem where l_orderkey = :o_orderkey group by l_suppkey, weight_string(l_suppkey)", - "Table": "lineitem" - } - ] + "Vindex": "hash" }, { "OperatorType": "Route", @@ -368,58 +456,17 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)", - "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey and s_nationkey = :c_nationkey group by s_nationkey, weight_string(s_nationkey)", - "Table": "supplier", + "FieldQuery": "select count(*) from region where 1 != 1 group by .0", + "Query": "select count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by .0", + "Table": "region", "Values": [ - ":l_suppkey" + ":n_regionkey" ], "Vindex": "hash" } ] } ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:3,L:4,L:1,R:1", - "JoinVars": { - "n_regionkey": 0 - }, - "TableName": "nation_region", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where 1 != 1 group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)", - "Query": "select n_regionkey, count(*), weight_string(n_regionkey), n_name, weight_string(n_name) from nation where n_nationkey = :s_nationkey group by n_regionkey, weight_string(n_regionkey), n_name, weight_string(n_name)", - "Table": "nation", - "Values": [ - ":s_nationkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select 1, count(*) from region where 1 != 1 group by 1", - "Query": "select 1, count(*) from region where r_name = 'ASIA' and r_regionkey = :n_regionkey group by 1", - "Table": "region", - "Values": [ - ":n_regionkey" - ], - "Vindex": "hash" - } - ] } ] } @@ -444,29 +491,7 @@ { "comment": "TPC-H query 6", "query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum(0)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select sum(l_extendedprice * l_discount) as revenue from lineitem where 1 != 1", - "Query": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", - "Table": "lineitem" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(l_extendedprice * l_discount) as revenue from lineitem where l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24", "Instructions": { @@ -495,139 +520,179 @@ { "comment": "TPC-H query 7", "query": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select supp_nation, cust_nation, l_year, sum(volume) as revenue from (select n1.n_name as supp_nation, n2.n_name as cust_nation, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume from supplier, lineitem, orders, customer, nation n1, nation n2 where s_suppkey = l_suppkey and o_orderkey = l_orderkey and c_custkey = o_custkey and s_nationkey = n1.n_nationkey and c_nationkey = n2.n_nationkey and ((n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE')) and l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by supp_nation, cust_nation, l_year order by supp_nation, cust_nation, l_year", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(3) AS revenue", - "GroupBy": "(0|6), (1|5), (2|4)", + "GroupBy": "(0|4), (1|5), (2|6)", "ResultColumns": 4, "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 4] as supp_nation", - "[COLUMN 5] as cust_nation", - "[COLUMN 6] as l_year", - "(((([COLUMN 10] * COALESCE([COLUMN 11], INT64(1))) * COALESCE([COLUMN 12], INT64(1))) * COALESCE([COLUMN 13], INT64(1))) * COALESCE([COLUMN 14], INT64(1))) * COALESCE([COLUMN 15], INT64(1)) as revenue", - "[COLUMN 9]", - "[COLUMN 8]", - "[COLUMN 7]" + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as cust_nation", + "[COLUMN 4] as l_year", + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 5] as weight_string(supp_nation)", + "[COLUMN 6] as weight_string(cust_nation)", + "[COLUMN 7] as weight_string(l_year)" ], "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|16) ASC, (1|17) ASC, (2|18) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,L:5,R:2,L:6", + "JoinVars": { + "n1_n_name": 4, + "o_custkey": 3 + }, + "TableName": "lineitem_orders_supplier_nation_customer_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:2,R:0,L:3,L:4,L:8,R:1,L:9,L:13,R:2,L:14,L:15,L:16,L:17,L:18,R:3,R:4,L:19,R:5,L:20", - "JoinVars": { - "n1_n_name": 1, - "o_custkey": 0 - }, - "TableName": "lineitem_orders_supplier_nation_customer_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as l_year", + "[COLUMN 4] as orders.o_custkey", + "[COLUMN 5] as n1.n_name", + "[COLUMN 6] as weight_string(supp_nation)", + "[COLUMN 7] as weight_string(l_year)" + ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,R:1,L:2,L:3,L:5,R:2,R:3,R:4,L:6,L:8,R:5,R:6,R:7,L:9,L:10,L:11,R:8,R:9,R:10,L:12", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1,L:2,L:3,R:2,L:5", "JoinVars": { - "l_suppkey": 0 + "l_suppkey": 4 }, "TableName": "lineitem_orders_supplier_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,L:2,L:3,L:1,R:0,L:2,L:6,R:2,L:7,L:4,R:1,L:8", - "JoinVars": { - "l_orderkey": 0 - }, - "TableName": "lineitem_orders", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 2] as l_year", + "[COLUMN 3] as orders.o_custkey", + "[COLUMN 4] as n1.n_name", + "[COLUMN 5] as lineitem.l_suppkey", + "[COLUMN 6] as weight_string(l_year)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where 1 != 1 group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)", - "Query": "select l_orderkey, l_suppkey, extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, sum(volume) as revenue, weight_string(l_orderkey), weight_string(l_suppkey), weight_string(extract(year from l_shipdate)), weight_string(extract(year from l_shipdate)) from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31') group by l_orderkey, weight_string(l_orderkey), l_suppkey, weight_string(l_suppkey), l_year, weight_string(l_year)", - "Table": "lineitem" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:3,L:4,L:6", + "JoinVars": { + "l_orderkey": 5 }, - "FieldQuery": "select o_custkey, count(*), weight_string(o_custkey) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey)", - "Query": "select o_custkey, count(*), weight_string(o_custkey) from orders where o_orderkey = :l_orderkey group by o_custkey, weight_string(o_custkey)", - "Table": "orders", - "Values": [ - ":l_orderkey" - ], - "Vindex": "hash" + "TableName": "lineitem_orders", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where 1 != 1) as shipping where 1 != 1 group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year)", + "OrderBy": "(7|8) ASC, (9|10) ASC, (1|6) ASC", + "Query": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year) order by supp_nation asc, cust_nation asc, l_year asc", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*) from orders where 1 != 1 group by .0", + "Query": "select count(*) from orders where o_orderkey = :l_orderkey group by .0", + "Table": "orders", + "Values": [ + ":l_orderkey" + ], + "Vindex": "hash" + } + ] } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,R:0,R:0,R:1,R:3,R:3,R:4,L:1,R:2,R:5", - "JoinVars": { - "s_nationkey": 0 - }, - "TableName": "supplier_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as supp_nation", + "[COLUMN 3] as weight_string(supp_nation)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey)", - "Query": "select s_nationkey, count(*), weight_string(s_nationkey) from supplier where s_suppkey = :l_suppkey group by s_nationkey, weight_string(s_nationkey)", - "Table": "supplier", - "Values": [ - ":l_suppkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinVars": { + "s_nationkey": 1 }, - "FieldQuery": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where 1 != 1 group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)", - "Query": "select n1.n_name, n1.n_name as supp_nation, count(*), weight_string(n1.n_name), weight_string(n1.n_name), weight_string(n1.n_name) from nation as n1 where n1.n_nationkey = :s_nationkey group by n1.n_name, weight_string(n1.n_name), supp_nation, weight_string(supp_nation)", - "Table": "nation", - "Values": [ - ":s_nationkey" - ], - "Vindex": "hash" + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where 1 != 1) as shipping where 1 != 1 group by shipping.`supplier.s_nationkey`", + "Query": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where s_suppkey = :l_suppkey) as shipping group by shipping.`supplier.s_nationkey`", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where 1 != 1) as shipping where 1 != 1 group by supp_nation, weight_string(supp_nation)", + "Query": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where n1.n_nationkey = :s_nationkey) as shipping group by supp_nation, weight_string(supp_nation)", + "Table": "nation", + "Values": [ + ":s_nationkey" + ], + "Vindex": "hash" + } + ] } ] } ] - }, + } + ] + }, + { + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as cust_nation", + "[COLUMN 3] as weight_string(cust_nation)" + ], + "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:0,R:2,L:1,R:1,R:3", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2", "JoinVars": { - "c_nationkey": 0 + "c_nationkey": 1 }, "TableName": "customer_nation", "Inputs": [ @@ -638,8 +703,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey)", - "Query": "select c_nationkey, count(*), weight_string(c_nationkey) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey)", + "FieldQuery": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where 1 != 1) as shipping where 1 != 1 group by shipping.`customer.c_nationkey`", + "Query": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where c_custkey = :o_custkey) as shipping group by shipping.`customer.c_nationkey`", "Table": "customer", "Values": [ ":o_custkey" @@ -653,8 +718,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where 1 != 1 group by cust_nation, weight_string(cust_nation)", - "Query": "select n2.n_name as cust_nation, count(*), weight_string(n2.n_name), weight_string(n2.n_name) from nation as n2 where n2.n_nationkey = :c_nationkey and (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') group by cust_nation, weight_string(cust_nation)", + "FieldQuery": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where 1 != 1) as shipping where 1 != 1 group by cust_nation, weight_string(cust_nation)", + "Query": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') and n2.n_nationkey = :c_nationkey) as shipping group by cust_nation, weight_string(cust_nation)", "Table": "nation", "Values": [ ":c_nationkey" @@ -683,20 +748,17 @@ { "comment": "TPC-H query 8", "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": "VT13002: unexpected AST struct for query: o_year" }, { "comment": "TPC-H query 9", "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: aggregation on columns from different sources" + "plan": "VT13002: unexpected AST struct for query: nation" }, { "comment": "TPC-H query 10", "query": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20", "Instructions": { @@ -713,142 +775,173 @@ "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum(2) AS revenue", - "GroupBy": "(0|14), (1|13), (3|12), (6|11), (4|10), (5|9), (7|8)", + "GroupBy": "(0|8), (1|9), (3|10), (6|11), (4|12), (5|13), (7|14)", "Inputs": [ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] as c_custkey", - "[COLUMN 1] as c_name", - "(([COLUMN 14] * COALESCE([COLUMN 15], INT64(1))) * COALESCE([COLUMN 16], INT64(1))) * COALESCE([COLUMN 17], INT64(1)) as revenue", - "[COLUMN 2] as c_acctbal", - "[COLUMN 4] as n_name", - "[COLUMN 5] as c_address", - "[COLUMN 3] as c_phone", - "[COLUMN 6] as c_comment", - "[COLUMN 13]", - "[COLUMN 12]", - "[COLUMN 11]", - "[COLUMN 10]", - "[COLUMN 9]", - "[COLUMN 8]", - "[COLUMN 7]" + "[COLUMN 2] as c_custkey", + "[COLUMN 3] as c_name", + "[COLUMN 0] * [COLUMN 1] as revenue", + "[COLUMN 4] as c_acctbal", + "[COLUMN 6] as n_name", + "[COLUMN 7] as c_address", + "[COLUMN 5] as c_phone", + "[COLUMN 8] as c_comment", + "[COLUMN 9] as weight_string(c_custkey)", + "[COLUMN 10] as weight_string(c_name)", + "[COLUMN 11] as weight_string(c_acctbal)", + "[COLUMN 12] as weight_string(c_phone)", + "[COLUMN 13] as weight_string(n_name)", + "[COLUMN 14] as weight_string(c_address)", + "[COLUMN 15] as weight_string(c_comment)" ], "Inputs": [ { "OperatorType": "Sort", "Variant": "Memory", - "OrderBy": "(0|7) ASC, (1|8) ASC, (2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC", + "OrderBy": "(2|9) ASC, (3|10) ASC, (4|11) ASC, (5|12) ASC, (6|13) ASC, (7|14) ASC, (8|15) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,L:3,L:4,R:14,R:15", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2,R:3,R:4,R:5,R:6,R:7,R:8,R:9,R:10,R:11,R:12,R:13,R:14", "JoinVars": { - "o_custkey": 0 + "o_custkey": 1 }, "TableName": "orders_lineitem_customer_nation", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1", - "JoinVars": { - "o_orderkey": 0 - }, - "TableName": "orders_lineitem", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 1] * [COLUMN 0] as revenue", + "[COLUMN 2] as o_custkey" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1", + "JoinVars": { + "o_orderkey": 2 }, - "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)", - "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_orderkey), weight_string(o_custkey) from orders where o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month group by o_orderkey, weight_string(o_orderkey), o_custkey, weight_string(o_custkey)", - "Table": "orders" - }, - { - "OperatorType": "VindexLookup", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "Values": [ - ":o_orderkey" - ], - "Vindex": "lineitem_map", + "TableName": "orders_lineitem", "Inputs": [ { "OperatorType": "Route", - "Variant": "IN", + "Variant": "Scatter", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" - ], - "Vindex": "md5" + "FieldQuery": "select count(*), o_custkey, o_orderkey from orders where 1 != 1 group by o_custkey, o_orderkey", + "Query": "select count(*), o_custkey, o_orderkey from orders where o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month group by o_custkey, o_orderkey", + "Table": "orders" }, { - "OperatorType": "Route", - "Variant": "ByDestination", + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by 1", - "Query": "select 1, sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by 1", - "Table": "lineitem" + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where 1 != 1 group by .0", + "Query": "select sum(l_extendedprice * (1 - l_discount)) as revenue from lineitem where l_returnflag = 'R' and l_orderkey = :o_orderkey group by .0", + "Table": "lineitem" + } + ] } ] } ] }, { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:3,L:5,L:7,L:9,R:1,L:11,L:13,L:4,L:6,L:8,L:10,R:2,L:12,L:14,L:1,R:0", - "JoinVars": { - "c_nationkey": 0 - }, - "TableName": "customer_nation", + "OperatorType": "Projection", + "Expressions": [ + "[COLUMN 0] * [COLUMN 1] as count(*)", + "[COLUMN 2] as c_custkey", + "[COLUMN 3] as c_name", + "[COLUMN 4] as c_acctbal", + "[COLUMN 5] as c_phone", + "[COLUMN 6] as n_name", + "[COLUMN 7] as c_address", + "[COLUMN 8] as c_comment", + "[COLUMN 9] as weight_string(c_custkey)", + "[COLUMN 10] as weight_string(c_name)", + "[COLUMN 11] as weight_string(c_acctbal)", + "[COLUMN 12] as weight_string(c_phone)", + "[COLUMN 13] as weight_string(n_name)", + "[COLUMN 14] as weight_string(c_address)", + "[COLUMN 15] as weight_string(c_comment)" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where 1 != 1 group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)", - "Query": "select c_nationkey, count(*), weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_nationkey, weight_string(c_nationkey), c_custkey, weight_string(c_custkey), c_name, weight_string(c_name), c_acctbal, weight_string(c_acctbal), c_phone, weight_string(c_phone), c_address, weight_string(c_address), c_comment, weight_string(c_comment)", - "Table": "customer", - "Values": [ - ":o_custkey" - ], - "Vindex": "hash" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:3,L:4,R:1,L:5,L:6,L:8,L:9,L:10,L:11,R:2,L:12,L:13", + "JoinVars": { + "c_nationkey": 7 }, - "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)", - "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)", - "Table": "nation", - "Values": [ - ":c_nationkey" - ], - "Vindex": "hash" + "TableName": "customer_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), c_custkey, c_name, c_acctbal, c_phone, c_address, c_comment, c_nationkey, weight_string(c_custkey), weight_string(c_name), weight_string(c_acctbal), weight_string(c_phone), weight_string(c_address), weight_string(c_comment) from customer where 1 != 1 group by c_custkey, c_name, c_acctbal, c_phone, c_address, c_comment, c_nationkey, weight_string(c_custkey), weight_string(c_name), weight_string(c_acctbal), weight_string(c_phone), weight_string(c_address), weight_string(c_comment)", + "Query": "select count(*), c_custkey, c_name, c_acctbal, c_phone, c_address, c_comment, c_nationkey, weight_string(c_custkey), weight_string(c_name), weight_string(c_acctbal), weight_string(c_phone), weight_string(c_address), weight_string(c_comment) from customer where c_custkey = :o_custkey group by c_custkey, c_name, c_acctbal, c_phone, c_address, c_comment, c_nationkey, weight_string(c_custkey), weight_string(c_name), weight_string(c_acctbal), weight_string(c_phone), weight_string(c_address), weight_string(c_comment)", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select count(*), n_name, weight_string(n_name) from nation where 1 != 1 group by n_name, weight_string(n_name)", + "Query": "select count(*), n_name, weight_string(n_name) from nation where n_nationkey = :c_nationkey group by n_name, weight_string(n_name)", + "Table": "nation", + "Values": [ + ":c_nationkey" + ], + "Vindex": "hash" + } + ] } ] } @@ -875,14 +968,12 @@ { "comment": "TPC-H query 11", "query": "select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.00001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'GERMANY' ) order by value desc", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": "VT12001: unsupported: in scatter query: complex aggregate expression" }, { "comment": "TPC-H query 12", "query": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select l_shipmode, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority <> '1-URGENT' and o_orderpriority <> '2-HIGH' then 1 else 0 end) as low_line_count from orders, lineitem where o_orderkey = l_orderkey and l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year group by l_shipmode order by l_shipmode", "Instructions": { @@ -893,78 +984,67 @@ "ResultColumns": 3, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "[COLUMN 0] as l_shipmode", - "[COLUMN 2] * COALESCE([COLUMN 3], INT64(1)) as high_line_count", - "[COLUMN 4] * COALESCE([COLUMN 5], INT64(1)) as low_line_count", - "[COLUMN 1]" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|3) ASC", "Inputs": [ { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|1) ASC", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1,R:1", + "JoinVars": { + "o_orderkey": 2 + }, + "TableName": "orders_lineitem", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:1,R:2,L:1,R:0,L:2,R:0", - "JoinVars": { - "o_orderkey": 0 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end, case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end, o_orderkey from orders where 1 != 1", + "Query": "select case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end, case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end, o_orderkey from orders", + "Table": "orders" + }, + { + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true }, - "TableName": "orders_lineitem", + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", "Inputs": [ { "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "IN", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders where 1 != 1 group by o_orderkey, weight_string(o_orderkey)", - "Query": "select o_orderkey, sum(case when o_orderpriority = '1-URGENT' or o_orderpriority = '2-HIGH' then 1 else 0 end) as high_line_count, sum(case when o_orderpriority != '1-URGENT' and o_orderpriority != '2-HIGH' then 1 else 0 end) as low_line_count, weight_string(o_orderkey) from orders group by o_orderkey, weight_string(o_orderkey)", - "Table": "orders" + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" }, { - "OperatorType": "VindexLookup", - "Variant": "EqualUnique", + "OperatorType": "Route", + "Variant": "ByDestination", "Keyspace": { "Name": "main", "Sharded": true }, - "Values": [ - ":o_orderkey" - ], - "Vindex": "lineitem_map", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", - "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", - "Table": "lineitem_map", - "Values": [ - "::l_orderkey" - ], - "Vindex": "md5" - }, - { - "OperatorType": "Route", - "Variant": "ByDestination", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1 group by l_shipmode, weight_string(l_shipmode)", - "Query": "select count(*), l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey group by l_shipmode, weight_string(l_shipmode)", - "Table": "lineitem" - } - ] + "FieldQuery": "select l_shipmode, weight_string(l_shipmode) from lineitem where 1 != 1", + "Query": "select l_shipmode, weight_string(l_shipmode) from lineitem where l_shipmode in ('MAIL', 'SHIP') and l_commitdate < l_receiptdate and l_shipdate < l_commitdate and l_receiptdate >= date('1994-01-01') and l_receiptdate < date('1994-01-01') + interval '1' year and l_orderkey = :o_orderkey", + "Table": "lineitem" } ] } @@ -988,84 +1068,73 @@ { "comment": "TPC-H query 14", "query": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: complex aggregate expression" - }, - { - "comment": "TPC-H query 15 view\n#\"with revenue0(supplier_no, total_revenue) as (select l_suppkey, sum(l_extendedprice * (1 - l_discount)) from lineitem where l_shipdate >= date('1996-01-01') and l_shipdate < date('1996-01-01') + interval '3' month group by l_suppkey )\"\n#\"syntax error at position 236\"\n#Gen4 plan same as above\n# TPC-H query 15", - "query": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", - "v3-plan": { + "plan": { "QueryType": "SELECT", - "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", + "Original": "select 100.00 * sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue from lineitem, part where l_partkey = p_partkey and l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" + "OperatorType": "Projection", + "Expressions": [ + "([COLUMN 0] * [COLUMN 1]) / [COLUMN 2] as promo_revenue" ], "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "max(0)", + "Aggregates": "any_value(0), sum(1) AS sum(case when p_type like 'PROMO%' then l_extendedprice * (1 - l_discount) else 0 end), sum(2) AS sum(l_extendedprice * (1 - l_discount))", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select max(total_revenue) from revenue0 where 1 != 1", - "Query": "select max(total_revenue) from revenue0", - "Table": "revenue0" - } - ] - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3,R:0", - "JoinVars": { - "s_suppkey": 0 - }, - "TableName": "supplier_revenue0", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "main", - "Sharded": true - }, - "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier where 1 != 1", - "OrderBy": "(0|4) ASC", - "Query": "select s_suppkey, s_name, s_address, s_phone, weight_string(s_suppkey) from supplier order by s_suppkey asc", - "ResultColumns": 4, - "Table": "supplier" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "main", - "Sharded": true + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:3", + "JoinVars": { + "l_discount": 2, + "l_extendedprice": 1, + "l_partkey": 4 }, - "FieldQuery": "select total_revenue from revenue0 where 1 != 1", - "Query": "select total_revenue from revenue0 where supplier_no = :s_suppkey and total_revenue = :__sq1", - "Table": "revenue0", - "Values": [ - ":s_suppkey" - ], - "Vindex": "hash" + "TableName": "lineitem_part", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select 100.00, l_extendedprice, l_discount, l_extendedprice * (1 - l_discount), l_partkey from lineitem where 1 != 1", + "Query": "select 100.00, l_extendedprice, l_discount, l_extendedprice * (1 - l_discount), l_partkey from lineitem where l_shipdate >= date('1995-09-01') and l_shipdate < date('1995-09-01') + interval '1' month", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select case when p_type like 'PROMO%' then :l_extendedprice * (1 - :l_discount) else 0 end from part where 1 != 1", + "Query": "select case when p_type like 'PROMO%' then :l_extendedprice * (1 - :l_discount) else 0 end from part where p_partkey = :l_partkey", + "Table": "part", + "Values": [ + ":l_partkey" + ], + "Vindex": "hash" + } + ] } ] } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "main.lineitem", + "main.part" + ] + } + }, + { + "comment": "TPC-H query 15", + "query": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", + "plan": { "QueryType": "SELECT", "Original": "select s_suppkey, s_name, s_address, s_phone, total_revenue from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = ( select max(total_revenue) from revenue0 ) order by s_suppkey", "Instructions": { @@ -1077,6 +1146,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Aggregate", "Variant": "Scalar", "Aggregates": "max(0) AS max(total_revenue)", @@ -1095,6 +1165,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1118,20 +1189,17 @@ { "comment": "TPC-H query 16", "query": "select p_brand, p_type, p_size, count(distinct ps_suppkey) as supplier_cnt from partsupp, part where p_partkey = ps_partkey and p_brand <> 'Brand#45' and p_type not like 'MEDIUM POLISHED%' and p_size in (49, 14, 23, 45, 19, 3, 36, 9) and ps_suppkey not in ( select s_suppkey from supplier where s_comment like '%Customer%Complaints%' ) group by p_brand, p_type, p_size order by supplier_cnt desc, p_brand, p_type, p_size", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.pulloutSubquery plan" + "plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.pulloutSubquery plan" }, { "comment": "TPC-H query 17", "query": "select sum(l_extendedprice) / 7.0 as avg_yearly from lineitem, part where p_partkey = l_partkey and p_brand = 'Brand#23' and p_container = 'MED BOX' and l_quantity < ( select 0.2 * avg(l_quantity) from lineitem where l_partkey = p_partkey )", - "v3-plan": "VT03019: symbol p_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: cross-shard correlated subquery" }, { "comment": "TPC-H query 18", "query": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem where o_orderkey in ( select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300 ) and c_custkey = o_custkey and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate limit 100", "Instructions": { @@ -1178,9 +1246,9 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:1,L:5,L:7,R:1,R:3,L:1,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0", + "JoinColumnIndexes": "L:0,L:0,L:5,L:7,R:1,R:3,L:0,L:4,L:6,L:8,R:2,R:4,L:4,L:2,R:0", "JoinVars": { - "o_custkey": 0 + "o_custkey": 1 }, "TableName": "orders_customer", "Inputs": [ @@ -1191,8 +1259,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", - "Query": "select o_custkey, o_orderkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", + "FieldQuery": "select o_orderkey, o_custkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where 1 != 1 group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", + "Query": "select o_orderkey, o_custkey, count(*), weight_string(o_custkey), weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate) from orders where :o_orderkey in (select l_orderkey from lineitem group by l_orderkey having sum(l_quantity) > 300) group by o_custkey, weight_string(o_custkey), o_orderkey, weight_string(o_orderkey), o_totalprice, weight_string(o_totalprice), o_orderdate, weight_string(o_orderdate)", "Table": "orders", "Values": [ "::__sq1" @@ -1276,8 +1344,7 @@ { "comment": "TPC-H query 19", "query": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select sum(l_extendedprice* (1 - l_discount)) as revenue from lineitem, part where ( p_partkey = l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and l_quantity >= 1 and l_quantity <= 1 + 10 and p_size between 1 and 5 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and l_quantity >= 10 and l_quantity <= 10 + 10 and p_size between 1 and 10 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' ) or ( p_partkey = l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and l_quantity >= 20 and l_quantity <= 20 + 10 and p_size between 1 and 15 and l_shipmode in ('AIR', 'AIR REG') and l_shipinstruct = 'DELIVER IN PERSON' )", "Instructions": { @@ -1288,18 +1355,18 @@ { "OperatorType": "Projection", "Expressions": [ - "[COLUMN 0] * COALESCE([COLUMN 1], INT64(1)) as revenue" + "[COLUMN 0] * [COLUMN 1] as revenue" ], "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:4,R:1", + "JoinColumnIndexes": "L:0,R:0", "JoinVars": { - "l_partkey": 0, - "l_quantity": 1, - "l_shipinstruct": 3, - "l_shipmode": 2 + "l_partkey": 1, + "l_quantity": 2, + "l_shipinstruct": 4, + "l_shipmode": 3 }, "TableName": "lineitem_part", "Inputs": [ @@ -1310,8 +1377,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem where 1 != 1 group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)", - "Query": "select l_partkey, l_quantity, l_shipmode, l_shipinstruct, sum(l_extendedprice * (1 - l_discount)) as revenue, weight_string(l_partkey), weight_string(l_quantity), weight_string(l_shipmode), weight_string(l_shipinstruct) from lineitem group by l_partkey, weight_string(l_partkey), l_quantity, weight_string(l_quantity), l_shipmode, weight_string(l_shipmode), l_shipinstruct, weight_string(l_shipinstruct)", + "FieldQuery": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_partkey, l_quantity, l_shipmode, l_shipinstruct from lineitem where 1 != 1 group by l_partkey, l_quantity, l_shipmode, l_shipinstruct", + "Query": "select sum(l_extendedprice * (1 - l_discount)) as revenue, l_partkey, l_quantity, l_shipmode, l_shipinstruct from lineitem group by l_partkey, l_quantity, l_shipmode, l_shipinstruct", "Table": "lineitem" }, { @@ -1321,8 +1388,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select 1, count(*) from part where 1 != 1 group by 1", - "Query": "select 1, count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity >= 1 and :l_quantity <= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity >= 10 and :l_quantity <= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity >= 20 and :l_quantity <= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by 1", + "FieldQuery": "select count(*) from part where 1 != 1 group by .0", + "Query": "select count(*) from part where p_partkey = :l_partkey and p_brand = 'Brand#12' and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') and :l_quantity >= 1 and :l_quantity <= 1 + 10 and p_size between 1 and 5 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#23' and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') and :l_quantity >= 10 and :l_quantity <= 10 + 10 and p_size between 1 and 10 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' or p_partkey = :l_partkey and p_brand = 'Brand#34' and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') and :l_quantity >= 20 and :l_quantity <= 20 + 10 and p_size between 1 and 15 and :l_shipmode in ('AIR', 'AIR REG') and :l_shipinstruct = 'DELIVER IN PERSON' group by .0", "Table": "part" } ] @@ -1340,14 +1407,12 @@ { "comment": "TPC-H query 20", "query": "select s_name, s_address from supplier, nation where s_suppkey in ( select ps_suppkey from partsupp where ps_partkey in ( select p_partkey from part where p_name like 'forest%' ) and ps_availqty > ( select 0.5 * sum(l_quantity) from lineitem where l_partkey = ps_partkey and l_suppkey = ps_suppkey and l_shipdate >= date('1994-01-01') and l_shipdate < date('1994-01-01') + interval '1' year ) ) and s_nationkey = n_nationkey and n_name = 'CANADA' order by s_name", - "v3-plan": "VT03019: symbol ps_partkey not found", - "gen4-plan": "VT12001: unsupported: cross-shard correlated subquery" + "plan": "VT12001: unsupported: cross-shard correlated subquery" }, { "comment": "TPC-H query 21", "query": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select s_name, count(*) as numwait from supplier, lineitem l1, orders, nation where s_suppkey = l1.l_suppkey and o_orderkey = l1.l_orderkey and o_orderstatus = 'F' and l1.l_receiptdate > l1.l_commitdate and exists ( select * from lineitem l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey <> l1.l_suppkey ) and not exists ( select * from lineitem l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey <> l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate ) and s_nationkey = n_nationkey and n_name = 'SAUDI ARABIA' group by s_name order by numwait desc, s_name limit 100", "Instructions": { @@ -1391,9 +1456,9 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,L:1,L:4,L:2,R:1", + "JoinColumnIndexes": "L:0,L:0,L:4,L:2,R:1", "JoinVars": { - "l1_l_orderkey": 0 + "l1_l_orderkey": 1 }, "TableName": "lineitem_orders", "Inputs": [ @@ -1404,8 +1469,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", - "Query": "select l1.l_orderkey, l1.l_suppkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", + "FieldQuery": "select l1.l_suppkey, l1.l_orderkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where 1 != 1 group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", + "Query": "select l1.l_suppkey, l1.l_orderkey, count(*) as numwait, weight_string(l1.l_orderkey), weight_string(l1.l_suppkey) from lineitem as l1 where l1.l_receiptdate > l1.l_commitdate and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by l1.l_orderkey, weight_string(l1.l_orderkey), l1.l_suppkey, weight_string(l1.l_suppkey)", "Table": "lineitem" }, { @@ -1416,7 +1481,7 @@ "Sharded": true }, "FieldQuery": "select 1, count(*) as numwait from orders where 1 != 1 group by 1", - "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and o_orderkey = :l1_l_orderkey group by 1", + "Query": "select 1, count(*) as numwait from orders where o_orderstatus = 'F' and o_orderkey = :l1_l_orderkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by 1", "Table": "orders", "Values": [ ":l1_l_orderkey" @@ -1442,7 +1507,7 @@ "Sharded": true }, "FieldQuery": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where 1 != 1 group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)", - "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and s_suppkey = :l1_l_suppkey group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)", + "Query": "select s_nationkey, count(*) as numwait, weight_string(s_nationkey), s_name, weight_string(s_name) from supplier where s_suppkey = :l1_l_suppkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by s_nationkey, weight_string(s_nationkey), s_name, weight_string(s_name)", "Table": "supplier", "Values": [ ":l1_l_suppkey" @@ -1457,7 +1522,7 @@ "Sharded": true }, "FieldQuery": "select 1, count(*) as numwait from nation where 1 != 1 group by 1", - "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) and n_nationkey = :s_nationkey group by 1", + "Query": "select 1, count(*) as numwait from nation where n_name = 'SAUDI ARABIA' and n_nationkey = :s_nationkey and exists (select 1 from lineitem as l2 where l2.l_orderkey = l1.l_orderkey and l2.l_suppkey != l1.l_suppkey limit 1) and not exists (select 1 from lineitem as l3 where l3.l_orderkey = l1.l_orderkey and l3.l_suppkey != l1.l_suppkey and l3.l_receiptdate > l3.l_commitdate limit 1) group by 1", "Table": "nation", "Values": [ ":s_nationkey" @@ -1489,7 +1554,6 @@ { "comment": "TPC-H query 22", "query": "select cntrycode, count(*) as numcust, sum(c_acctbal) as totacctbal from ( select substring(c_phone from 1 for 2) as cntrycode, c_acctbal from customer where substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') and c_acctbal > ( select avg(c_acctbal) from customer where c_acctbal > 0.00 and substring(c_phone from 1 for 2) in ('13', '31', '23', '29', '30', '18', '17') ) and not exists ( select * from orders where o_custkey = c_custkey ) ) as custsale group by cntrycode order by cntrycode", - "v3-plan": "VT03019: symbol c_custkey not found", - "gen4-plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" + "plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json index 5aba78fcb40..e6f84dd1631 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json @@ -2,7 +2,7 @@ { "comment": "union all between two scatter selects", "query": "select id from user union all select id from music", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union all select id from music", "Instructions": { @@ -14,22 +14,7 @@ }, "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1", "Query": "select id from `user` union all select id from music", - "Table": "`user`" - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select id from user union all select id from music", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union all select id from music where 1 != 1", - "Query": "select id from `user` union all select id from music", - "Table": "`user`" + "Table": "`user`, music" }, "TablesUsed": [ "user.music", @@ -40,43 +25,7 @@ { "comment": "union distinct between two scatter selects", "query": "select id from user union select id from music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user union select id from music", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music", - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union select id from music", "Instructions": { @@ -93,9 +42,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "Table": "`user`, music" } ] }, @@ -108,46 +57,7 @@ { "comment": "union all between two SelectEqualUnique", "query": "select id from user where id = 1 union all select id from user where id = 5", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 1 union all select id from user where id = 5", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 1 union all select id from user where id = 5", "Instructions": { @@ -193,94 +103,53 @@ { "comment": "almost dereks query - two queries with order by and limit being scattered to two different sets of tablets", "query": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", "Instructions": { - "OperatorType": "Concatenate", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(1)", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(1)", - "Inputs": [ + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Table": "`user`" + } + ] + }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" + "OperatorType": "Limit", + "Count": "INT64(1)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Table": "music" + } + ] } ] } @@ -295,22 +164,7 @@ { "comment": "Union all", "query": "select col1, col2 from user union all select col1, col2 from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1, col2 from user union all select col1, col2 from user_extra", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", - "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1, col2 from user union all select col1, col2 from user_extra", "Instructions": { @@ -322,7 +176,7 @@ }, "FieldQuery": "select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1", "Query": "select col1, col2 from `user` union all select col1, col2 from user_extra", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -333,22 +187,7 @@ { "comment": "union operations in subqueries (FROM)", "query": "select * from (select * from user union all select * from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from (select * from user union all select * from user_extra) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select * from (select * from `user` union all select * from user_extra) as t", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select * from user union all select * from user_extra) as t", "Instructions": { @@ -360,7 +199,7 @@ }, "FieldQuery": "select * from (select * from `user` where 1 != 1 union all select * from user_extra where 1 != 1) as t where 1 != 1", "Query": "select * from (select * from `user` union all select * from user_extra) as t", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -371,22 +210,7 @@ { "comment": "union operations in derived table, without star expression (FROM)¡", "query": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", - "Instructions": { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t", - "Table": "`user`" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col1,col2 from (select col1, col2 from user union all select col1, col2 from user_extra) as t", "Instructions": { @@ -398,7 +222,7 @@ }, "FieldQuery": "select col1, col2 from (select col1, col2 from `user` where 1 != 1 union all select col1, col2 from user_extra where 1 != 1) as t where 1 != 1", "Query": "select col1, col2 from (select col1, col2 from `user` union all select col1, col2 from user_extra) as t", - "Table": "`user`" + "Table": "`user`, user_extra" }, "TablesUsed": [ "user.user", @@ -409,94 +233,53 @@ { "comment": "union all between two scatter selects, with order by", "query": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", "Instructions": { - "OperatorType": "Concatenate", + "OperatorType": "SimpleProjection", + "Columns": [ + 0 + ], "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(5)", + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ + "OperatorType": "Limit", + "Count": "INT64(5)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Table": "`user`" + } + ] + }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from music where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "music" + "OperatorType": "Limit", + "Count": "INT64(5)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from music where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Table": "music" + } + ] } ] } @@ -511,74 +294,19 @@ { "comment": "union all on scatter and single route", "query": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", - "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1 union select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id = 1 union select id from user where id = 1 union all select id from user", "Instructions": { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 1 union select id from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1 union select id from `user` where 1 != 1 union all select id from `user` where 1 != 1", + "Query": "select id from `user` where id = 1 union select id from `user` where id = 1 union all select id from `user`", + "Table": "`user`" }, "TablesUsed": [ "user.user" @@ -588,51 +316,14 @@ { "comment": "union of information_schema with normal table", "query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select user_name from unsharded where 1 != 1", - "Query": "select user_name from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS union select user_name from unsharded", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "(0:1)" + "0: utf8mb4_0900_ai_ci" ], - "ResultColumns": 1, "Inputs": [ { "OperatorType": "Concatenate", @@ -644,8 +335,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select CHARACTER_SET_NAME, weight_string(CHARACTER_SET_NAME) from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select distinct CHARACTER_SET_NAME, weight_string(CHARACTER_SET_NAME) from information_schema.CHARACTER_SETS", + "FieldQuery": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1", + "Query": "select distinct CHARACTER_SET_NAME from information_schema.CHARACTER_SETS", "Table": "information_schema.CHARACTER_SETS" }, { @@ -655,8 +346,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select user_name, weight_string(user_name) from unsharded where 1 != 1", - "Query": "select distinct user_name, weight_string(user_name) from unsharded", + "FieldQuery": "select user_name from unsharded where 1 != 1", + "Query": "select distinct user_name from unsharded", "Table": "unsharded" } ] @@ -671,143 +362,12 @@ { "comment": "union of information_schema with normal table", "query": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select * from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select * from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from unsharded union select * from information_schema.CHARACTER_SETS", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select * from unsharded where 1 != 1", - "Query": "select distinct * from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select distinct CHARACTER_SET_NAME, DEFAULT_COLLATE_NAME, DESCRIPTION, MAXLEN from information_schema.CHARACTER_SETS", - "Table": "information_schema.CHARACTER_SETS" - } - ] - } - ] - }, - "TablesUsed": [ - "main.unsharded" - ] - } + "plan": "VT09015: schema tracking required" }, { "comment": "multi-shard union", "query": "(select id from user union select id from music) union select 1 from dual", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user union select id from music) union select 1 from dual", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from music where 1 != 1", - "Query": "select id from music", - "Table": "music" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1", - "Query": "select 1 from dual", - "Table": "dual" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user union select id from music) union select 1 from dual", "Instructions": { @@ -818,310 +378,67 @@ "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1, weight_string(1) from dual where 1 != 1", - "Query": "select distinct 1, weight_string(1) from dual", - "Table": "dual" - } - ] - } - ] - }, - "TablesUsed": [ - "main.dual", - "user.music", - "user.user" - ] - } - }, - { - "comment": "multi-shard union", - "query": "select 1 from music union (select id from user union all select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from music union (select id from user union all select name from unsharded)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - }, - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select `name` from unsharded where 1 != 1", - "Query": "select `name` from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" - }, - { - "comment": "multi-shard union", - "query": "select 1 from music union (select id from user union select name from unsharded)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from music union (select id from user union select name from unsharded)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - }, - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select `name` from unsharded where 1 != 1", - "Query": "select `name` from unsharded", - "Table": "unsharded" - } - ] - } - ] - } - ] - } - ] - } - }, - "gen4-plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" - }, - { - "comment": "union with the same target shard because of vindex", - "query": "select * from music where id = 1 union select * from user where id = 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from music where id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select * from music where id = 1 union select * from user where id = 1", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from music where 1 != 1", - "Query": "select distinct * from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select distinct * from `user` where id = 1", - "Table": "`user`", - "Values": [ - "INT64(1)" - ], - "Vindex": "user_index" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1 union select 1 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music union select 1 from dual) as dt", + "Table": "`user`, dual, music" } ] }, "TablesUsed": [ + "main.dual", "user.music", "user.user" ] } }, { - "comment": "union with different target shards", - "query": "select 1 from music where id = 1 union select 1 from music where id = 2", - "v3-plan": { + "comment": "union with the same target shard because of vindex", + "query": "select * from music where user_id = 1 union select * from user where id = 1", + "plan": { "QueryType": "SELECT", - "Original": "select 1 from music where id = 1 union select 1 from music where id = 2", + "Original": "select * from music where user_id = 1 union select * from user where id = 1", "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music where id = 1", - "Table": "music", - "Values": [ - "INT64(1)" - ], - "Vindex": "music_user_map" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music where id = 2", - "Table": "music", - "Values": [ - "INT64(2)" - ], - "Vindex": "music_user_map" - } - ] - } - ] - } - }, - "gen4-plan": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from music where 1 != 1 union select * from `user` where 1 != 1", + "Query": "select * from music where user_id = 1 union select * from `user` where id = 1", + "Table": "`user`, music", + "Values": [ + "INT64(1)" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "union with the different target shard because of vindex (music -> lookup vindex, user -> hash vindex)", + "query": "select * from music where id = 1 union select * from user where id = 1", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "union with different target shards", + "query": "select 1 from music where id = 1 union select 1 from music where id = 2", + "plan": { "QueryType": "SELECT", "Original": "select 1 from music where id = 1 union select 1 from music where id = 2", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: binary" + "0" ], "Inputs": [ { @@ -1169,47 +486,7 @@ { "comment": "multiple select statement have inner order by with union - TODO (systay) no need to send down ORDER BY if we are going to loose it with UNION DISTINCT", "query": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by 1 desc", - "ResultColumns": 1, - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by 1 asc", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user order by 1 desc) union (select id from user order by 1 asc)", "Instructions": { @@ -1226,10 +503,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "(select id, weight_string(id) from `user` where 1 != 1) union (select id, weight_string(id) from `user` where 1 != 1)", - "OrderBy": "(0|1) DESC", - "Query": "(select id, weight_string(id) from `user` order by id desc) union (select id, weight_string(id) from `user` order by id asc)", - "ResultColumns": 1, + "FieldQuery": "select id, weight_string(id) from ((select id from `user` where 1 != 1) union (select id from `user` where 1 != 1)) as dt where 1 != 1", + "Query": "select id, weight_string(id) from ((select id from `user` order by id desc) union (select id from `user` order by id asc)) as dt", "Table": "`user`" } ] @@ -1242,77 +517,26 @@ { "comment": "multiple unions", "query": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1", - "Query": "select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 2.0 from `user` where 1 != 1", - "Query": "select 2.0 from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 union select null union select 1.0 union select '1' union select 2 union select 2.0 from user", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: binary" + "(0:1)" ], + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 1 from dual where 1 != 1 union all select null from dual where 1 != 1 union all select 1.0 from dual where 1 != 1 union all select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1", - "Query": "select 1 from dual union all select null from dual union all select 1.0 from dual union all select '1' from dual union select 2 from dual", - "Table": "dual" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 2.0 from `user` where 1 != 1", - "Query": "select distinct 2.0 from `user`", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `1`, weight_string(`1`) from (select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1 union select 2.0 from `user` where 1 != 1) as dt where 1 != 1", + "Query": "select `1`, weight_string(`1`) from (select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual union select 2.0 from `user`) as dt", + "Table": "`user`, dual" } ] }, @@ -1325,62 +549,7 @@ { "comment": "union distinct between a scatter query and a join (other side)", "query": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name` from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", - "Table": "user_extra" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", - "Query": "select 'b', 'c' from `user`", - "Table": "`user`" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select user.id, user.name from user join user_extra where user_extra.extra = 'asdf') union select 'b','c' from user", "Instructions": { @@ -1408,7 +577,7 @@ "Sharded": true }, "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", + "Query": "select distinct `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", "Table": "`user`" }, { @@ -1419,7 +588,7 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", + "Query": "select distinct 1 from user_extra where user_extra.extra = 'asdf'", "Table": "user_extra" } ] @@ -1448,71 +617,15 @@ { "comment": "union distinct between a scatter query and a join (other side)", "query": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", - "Query": "select 'b', 'c' from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name` from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 'b','c' from user union (select user.id, user.name from user join user_extra where user_extra.extra = 'asdf')", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "(0:2)", - "(1:3)" + "0: utf8mb4_0900_ai_ci", + "1: utf8mb4_0900_ai_ci" ], - "ResultColumns": 2, "Inputs": [ { "OperatorType": "Concatenate", @@ -1524,14 +637,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1", - "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`", + "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", + "Query": "select distinct 'b', 'c' from `user`", "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,L:2,L:3", + "JoinColumnIndexes": "L:0,L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1541,8 +654,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1", - "Query": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", + "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", + "Query": "select distinct `user`.id, `user`.`name` from `user`", "Table": "`user`" }, { @@ -1553,7 +666,7 @@ "Sharded": true }, "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra where user_extra.extra = 'asdf'", + "Query": "select distinct 1 from user_extra where user_extra.extra = 'asdf'", "Table": "user_extra" } ] @@ -1571,63 +684,13 @@ { "comment": "unmergable because we are using aggregation", "query": "select count(*) as s from user union select count(*) as s from music", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select count(*) as s from user union select count(*) as s from music", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as s from `user` where 1 != 1", - "Query": "select count(*) as s from `user`", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count(0) AS count", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as s from music where 1 != 1", - "Query": "select count(*) as s from music", - "Table": "music" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select count(*) as s from user union select count(*) as s from music", "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: binary" + "0" ], "Inputs": [ { @@ -1682,98 +745,26 @@ { "comment": "Union in derived table with first SELECT being an UNION", "query": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id + 1 from `user` where 1 != 1", - "Query": "select id + 1 from `user`", - "Table": "`user`" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from ((select id from user union select id+1 from user) union select user_id from user_extra) as t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" ], + "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Distinct", - "Collations": [ - "(0:1)" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union all select id + 1, weight_string(id + 1) from `user` where 1 != 1 union select user_id, weight_string(user_id) from user_extra where 1 != 1", - "Query": "select id, weight_string(id) from `user` union all select id + 1, weight_string(id + 1) from `user` union select user_id, weight_string(user_id) from user_extra", - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id + 1 from `user` where 1 != 1 union select user_id from user_extra where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id + 1 from `user` union select user_id from user_extra) as dt", + "Table": "`user`, user_extra" } ] }, @@ -1786,8 +777,7 @@ { "comment": "gen4 optimises away ORDER BY when it's safe to do", "query": "(select id from user union select id from music order by id) union select 1 from unsharded", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select id from user union select id from music order by id) union select 1 from unsharded", "Instructions": { @@ -1807,9 +797,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select id, weight_string(id) from music where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select id, weight_string(id) from music", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "Table": "`user`, music" }, { "OperatorType": "Route", @@ -1836,49 +826,7 @@ { "comment": "push down the ::upper_limit to the sources, since we are doing DISTINCT on them, it's safe", "query": "select id from user union select 3 limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user union select 3 limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 3 from dual where 1 != 1", - "Query": "select 3 from dual", - "Table": "dual" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user union select 3 limit 10", "Instructions": { @@ -1893,31 +841,15 @@ "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "Query": "select distinct id, weight_string(id) from `user` limit :__upper_limit", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select 3, weight_string(3) from dual where 1 != 1", - "Query": "select distinct 3, weight_string(3) from dual limit :__upper_limit", - "Table": "dual" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual limit :__upper_limit) as dt", + "Table": "`user`, dual" } ] } @@ -1932,22 +864,7 @@ { "comment": "silly query that should be collapsed into a single unsharded UNION route", "query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", - "v3-plan": { - "QueryType": "SELECT", - "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", - "Instructions": { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "(select 1 from unsharded where 1 != 1 union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1) union select 1 from unsharded where 1 != 1 union all select 1 from unsharded where 1 != 1", - "Query": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1 asc) union select 1 from unsharded union all select 1 from unsharded order by 1 asc", - "Table": "unsharded" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "(select 1 from unsharded union select 1 from unsharded union all select 1 from unsharded order by 1) union select 1 from unsharded union all select 1 from unsharded order by 1", "Instructions": { @@ -1969,85 +886,7 @@ { "comment": "UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes", "query": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", - "Instructions": { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Distinct", - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col from unsharded where 1 != 1", - "Query": "select col from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user`", - "Table": "`user`" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select col2 from unsharded where 1 != 1", - "Query": "select col2 from unsharded", - "Table": "unsharded" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from user_extra where 1 != 1", - "Query": "select col from user_extra", - "Table": "user_extra" - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", "Instructions": { @@ -2067,8 +906,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select col, weight_string(col) from unsharded where 1 != 1 union select col2, weight_string(col2) from unsharded where 1 != 1", - "Query": "select col, weight_string(col) from unsharded union select col2, weight_string(col2) from unsharded", + "FieldQuery": "select col, weight_string(col) from (select col from unsharded where 1 != 1 union select col2 from unsharded where 1 != 1) as dt where 1 != 1", + "Query": "select col, weight_string(col) from (select col from unsharded union select col2 from unsharded) as dt", "Table": "unsharded" }, { @@ -2078,9 +917,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1 union select col, weight_string(col) from user_extra where 1 != 1", - "Query": "select id, weight_string(id) from `user` union select col, weight_string(col) from user_extra", - "Table": "`user`" + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select col from user_extra where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select col from user_extra) as dt", + "Table": "`user`, user_extra" } ] } @@ -2096,88 +935,7 @@ { "comment": "derived table with union", "query": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "tbl1_id": 0 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Concatenate", - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select tbl2.id from `user` as tbl2 where 1 != 1", - "Query": "select tbl2.id from `user` as tbl2 where tbl2.id = :tbl1_id", - "Table": "`user`", - "Values": [ - ":tbl1_id" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select tbl2.id FROM ((select id from user order by id limit 5) union all (select id from user order by id desc limit 5)) as tbl1 INNER JOIN user as tbl2 ON tbl1.id = tbl2.id", "Instructions": { @@ -2190,51 +948,41 @@ "TableName": "`user`_`user`", "Inputs": [ { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "OperatorType": "Concatenate", "Inputs": [ { - "OperatorType": "Concatenate", + "OperatorType": "Limit", + "Count": "INT64(5)", "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] - }, + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Limit", + "Count": "INT64(5)", + "Inputs": [ { - "OperatorType": "Limit", - "Count": "INT64(5)", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", - "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", - "ResultColumns": 1, - "Table": "`user`" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", + "OrderBy": "(0|1) DESC", + "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Table": "`user`" } ] } @@ -2275,31 +1023,63 @@ { "comment": "different number of columns", "query": "select id, 42 from user where id = 1 union all select id from user where id = 5", - "v3-plan": "The used SELECT statements have a different number of columns (errno 1222) (sqlstate 21000) during query: select id, 42 from `user` where id = 1 union all select id from `user` where id = 5", - "gen4-plan": "The used SELECT statements have a different number of columns" + "plan": "The used SELECT statements have a different number of columns: 2, 1" }, { "comment": "union with invalid order by clause with table qualifier", "query": "select id from user union select 3 order by user.id", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": "Table `user` from one of the SELECTs cannot be used in global ORDER clause" + "plan": "Table `user` from one of the SELECTs cannot be used in global ORDER clause" }, { "comment": "union with invalid order by clause with table qualifier", "query": "select id from user union select 3 order by id", - "plan": "VT12001: unsupported: ORDER BY on top of UNION" + "plan": { + "QueryType": "SELECT", + "Original": "select id from user union select 3 order by id", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|1) ASC", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)", + "1" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", + "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual) as dt", + "Table": "`user`, dual" + } + ] + } + ] + }, + "TablesUsed": [ + "main.dual", + "user.user" + ] + } }, { "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", - "v3-plan": "VT12001: unsupported: expression on results of a cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 2 + "OperatorType": "Projection", + "Expressions": [ + "INT64(1) as 1" ], "Inputs": [ { @@ -2318,8 +1098,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id + 42 as foo, weight_string(id + 42), 1 from `user` where 1 != 1", - "Query": "select distinct id + 42 as foo, weight_string(id + 42), 1 from `user`", + "FieldQuery": "select id + 42 as foo, weight_string(id + 42) from `user` where 1 != 1", + "Query": "select distinct id + 42 as foo, weight_string(id + 42) from `user`", "Table": "`user`" }, { @@ -2329,8 +1109,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select 1 + id as foo, weight_string(1 + id), 1 from unsharded where 1 != 1", - "Query": "select distinct 1 + id as foo, weight_string(1 + id), 1 from unsharded", + "FieldQuery": "select 1 + id as foo, weight_string(1 + id) from unsharded where 1 != 1", + "Query": "select distinct 1 + id as foo, weight_string(1 + id) from unsharded", "Table": "unsharded" } ] @@ -2348,30 +1128,60 @@ { "comment": "systable union query in derived table with constraint on outside (without star projection)", "query": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select kcu.`COLUMN_NAME` from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `COLUMN_NAME` = 'primary'", "Instructions": { - "OperatorType": "Route", - "Variant": "DBA", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1", - "Query": "select kcu.COLUMN_NAME from (select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and kcu.COLUMN_NAME = 'primary' union select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and kcu.COLUMN_NAME = 'primary') as kcu", - "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.key_column_usage" + "OperatorType": "Filter", + "Predicate": "COLUMN_NAME = 'primary'", + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci" + ], + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */", + "SysTableTableName": "[kcu_table_name:VARCHAR(\"user_extra\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */", + "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + } + ] + } + ] + } + ] } } }, { "comment": "pushes predicate on both sides of UNION", "query": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3", - "v3-plan": "VT12001: unsupported: filtering on results of cross-shard subquery", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select name, id as foo from user union select 'extra', user_id from user_extra) X where X.foo = 3", "Instructions": { @@ -2383,7 +1193,7 @@ }, "FieldQuery": "select X.`name`, X.foo from (select `name`, id as foo from `user` where 1 != 1 union select 'extra', user_id from user_extra where 1 != 1) as X where 1 != 1", "Query": "select X.`name`, X.foo from (select `name`, id as foo from `user` where id = 3 union select 'extra', user_id from user_extra where user_id = 3) as X", - "Table": "`user`", + "Table": "`user`, user_extra", "Values": [ "INT64(3)" ], @@ -2398,23 +1208,271 @@ { "comment": "systable union query in derived table with constraint on outside (star projection)", "query": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'", - "v3-plan": "VT03019: symbol constraint_name not found", - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from (select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'user_extra' union select * from `information_schema`.`key_column_usage` `kcu` where `kcu`.`table_schema` = 'user' and `kcu`.`table_name` = 'music') `kcu` where `constraint_name` = 'primary'", + "Instructions": { + "OperatorType": "Filter", + "Predicate": "constraint_name = 'primary'", + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci", + "1: utf8mb4_0900_ai_ci", + "2: utf8mb4_0900_ai_ci", + "3: utf8mb4_0900_ai_ci", + "4: utf8mb4_0900_ai_ci", + "5: utf8mb4_0900_ai_ci", + "6: utf8mb4_0900_ai_ci", + "7", + "8", + "9: utf8mb4_0900_ai_ci", + "10: utf8mb4_0900_ai_ci", + "11: utf8mb4_0900_ai_ci" + ], + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name /* VARCHAR */", + "SysTableTableName": "[kcu_table_name:VARCHAR(\"user_extra\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1", + "Query": "select distinct kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.table_name = :kcu_table_name1 /* VARCHAR */", + "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\")]", + "SysTableTableSchema": "[VARCHAR(\"user\")]", + "Table": "information_schema.key_column_usage" + } + ] + } + ] + } + ] + } + } + }, + { + "comment": "unknown columns are OK as long as the whole query is unsharded", + "query": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1", + "plan": { + "QueryType": "SELECT", + "Original": "(SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'FAILED' ORDER BY buildNumber DESC LIMIT 1) AS last_failed) UNION ALL (SELECT * FROM (SELECT * FROM unsharded WHERE branchId = 203622 AND buildNumber <= 113893 AND state = 'SUCCEEDED' ORDER BY buildNumber DESC LIMIT 1) AS last_succeeded) ORDER BY buildNumber DESC LIMIT 1", "Instructions": { "OperatorType": "Route", - "Variant": "DBA", + "Variant": "Unsharded", "Keyspace": { "Name": "main", "Sharded": false }, - "FieldQuery": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1 union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where 1 != 1) as kcu where 1 != 1", - "Query": "select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from (select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name and kcu.CONSTRAINT_NAME = 'primary' union select kcu.CONSTRAINT_CATALOG, kcu.CONSTRAINT_SCHEMA, kcu.CONSTRAINT_NAME, kcu.TABLE_CATALOG, kcu.TABLE_SCHEMA, kcu.TABLE_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION, kcu.POSITION_IN_UNIQUE_CONSTRAINT, kcu.REFERENCED_TABLE_SCHEMA, kcu.REFERENCED_TABLE_NAME, kcu.REFERENCED_COLUMN_NAME from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname and kcu.table_name = :kcu_table_name1 and kcu.CONSTRAINT_NAME = 'primary') as kcu", - "SysTableTableName": "[kcu_table_name1:VARCHAR(\"music\"), kcu_table_name:VARCHAR(\"user_extra\")]", - "SysTableTableSchema": "[VARCHAR(\"user\"), VARCHAR(\"user\")]", - "Table": "information_schema.key_column_usage" - } + "FieldQuery": "select * from (select * from unsharded where 1 != 1) as last_failed where 1 != 1 union all select * from (select * from unsharded where 1 != 1) as last_succeeded where 1 != 1", + "Query": "select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'FAILED' order by buildNumber desc limit 1) as last_failed union all select * from (select * from unsharded where branchId = 203622 and buildNumber <= 113893 and state = 'SUCCEEDED' order by buildNumber desc limit 1) as last_succeeded order by buildNumber desc limit 1", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "union of unsharded route with sharded join with involvement of weight string", + "query": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative", + "plan": { + "QueryType": "SELECT", + "Original": "select id, foo, bar from unsharded union select user.intcol, user.textcol2, authoritative.col2 from user join authoritative", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:3)", + "(1:4)", + "(2:5)" + ], + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded where 1 != 1", + "Query": "select distinct id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,R:0,L:2,L:3,R:1", + "TableName": "`user`_authoritative", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user` where 1 != 1", + "Query": "select distinct `user`.intcol, `user`.textcol2, weight_string(`user`.intcol), weight_string(`user`.textcol2) from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select authoritative.col2, weight_string(authoritative.col2) from authoritative where 1 != 1", + "Query": "select distinct authoritative.col2, weight_string(authoritative.col2) from authoritative", + "Table": "authoritative" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.authoritative", + "user.user" + ] + } + }, + { + "comment": "UNION ALL with repeating column on the LHS", + "query": "select foo, foo, foo from user union all select bar, baz, toto from music", + "plan": { + "QueryType": "SELECT", + "Original": "select foo, foo, foo from user union all select bar, baz, toto from music", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, foo, foo from `user` where 1 != 1 union all select bar, baz, toto from music where 1 != 1", + "Query": "select foo, foo, foo from `user` union all select bar, baz, toto from music", + "Table": "`user`, music" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION ALL with repeating column on the RHS", + "query": "select bar, baz, toto from music union all select foo, foo, foo from user", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, baz, toto from music union all select foo, foo, foo from user", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, baz, toto from music where 1 != 1 union all select foo, foo, foo from `user` where 1 != 1", + "Query": "select bar, baz, toto from music union all select foo, foo, foo from `user`", + "Table": "`user`, music" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION with repeating column on the RHS", + "query": "select bar, baz, toto from music union select foo, foo, foo from user", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, baz, toto from music union select foo, foo, foo from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:3)", + "(1:4)", + "(2:5)" + ], + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music where 1 != 1 union select foo, foo, foo from `user` where 1 != 1) as dt where 1 != 1", + "Query": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music union select foo, foo, foo from `user`) as dt", + "Table": "`user`, music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "UNION with repeating column on the LHS", + "query": "select foo, foo, foo from user union select bar, baz, toto from music", + "plan": { + "QueryType": "SELECT", + "Original": "select foo, foo, foo from user union select bar, baz, toto from music", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:3)", + "(1:3)", + "(2:3)" + ], + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` where 1 != 1 union select bar, baz, toto from music where 1 != 1) as dt where 1 != 1", + "Query": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` union select bar, baz, toto from music) as dt", + "Table": "`user`, music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json index 5a2c92451d4..9919b600b23 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json @@ -27,14 +27,7 @@ { "comment": "scatter order by with * expression", "query": "select * from user order by id", - "v3-plan": "VT12001: unsupported: in scatter query: ORDER BY must reference a column in the SELECT list: id asc", - "gen4-plan": "VT12001: unsupported: '*' expression in cross-shard query" - }, - { - "comment": "order by rand on a cross-shard subquery", - "query": "select id from (select user.id, user.col from user join user_extra) as t order by rand()", - "v3-plan": "VT12001: unsupported: memory sort: complex ORDER BY expression: rand()", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: rand()" + "plan": "VT12001: unsupported: '*' expression in cross-shard query" }, { "comment": "natural join", @@ -44,14 +37,12 @@ { "comment": "join with USING construct", "query": "select * from user join user_extra using(id)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": "can't handle JOIN USING without authoritative tables" + "plan": "VT09015: schema tracking required" }, { "comment": "join with USING construct with 3 tables", "query": "select user.id from user join user_extra using(id) join music using(id2)", - "v3-plan": "VT12001: unsupported: JOIN with USING(column_list) clause for complex queries", - "gen4-plan": "can't handle JOIN USING without authoritative tables" + "plan": "VT09015: schema tracking required" }, { "comment": "natural left join", @@ -71,97 +62,47 @@ { "comment": "Group by column number, used with non-aliased expression (duplicated code)", "query": "select * from user group by 1", - "v3-plan": "VT12001: unsupported: '*' expression in cross-shard query", - "gen4-plan": "cannot use column offsets in group statement when using `*`" - }, - { - "comment": "complex group by expression", - "query": "select a from user group by a+1", - "v3-plan": "VT12001: unsupported: in scatter query: only simple references are allowed", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a + 1" - }, - { - "comment": "Complex aggregate expression on scatter", - "query": "select 1+count(*) from user", - "plan": "VT12001: unsupported: in scatter query: complex aggregate expression" + "plan": "cannot use column offsets in group statement when using `*`" }, { "comment": "Multi-value aggregates not supported", "query": "select count(a,b) from user", - "v3-plan": "VT12001: unsupported: only one expression is allowed inside aggregates: count(a, b)", - "gen4-plan": "VT03001: aggregate functions take a single argument 'count(a, b)'" - }, - { - "comment": "scatter aggregate complex order by", - "query": "select id from user group by id order by id+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: id + 1" - }, - { - "comment": "Scatter order by is complex with aggregates in select", - "query": "select col, count(*) from user group by col order by col+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: col + 1", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: col + 1" - }, - { - "comment": "Aggregate detection (group_concat)", - "query": "select group_concat(user.a) from user join user_extra", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'group_concat'" + "plan": "VT03001: aggregate functions take a single argument 'count(a, b)'" }, { "comment": "subqueries not supported in group by", "query": "select id from user group by id, (select id from user_extra)", - "v3-plan": "VT12001: unsupported: subqueries disallowed in sqlparser.GroupBy", - "gen4-plan": "VT12001: unsupported: subqueries in GROUP BY" - }, - { - "comment": "Order by uses cross-shard expression", - "query": "select id from user order by id+1", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: id + 1", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: id + 1" - }, - { - "comment": "Order by column number with collate", - "query": "select user.col1 as a from user order by 1 collate utf8_general_ci", - "v3-plan": "VT12001: unsupported: in scatter query: complex ORDER BY expression: 1 collate utf8_general_ci", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a collate utf8_general_ci" + "plan": "VT12001: unsupported: subqueries in GROUP BY" }, { "comment": "subqueries in delete", "query": "delete from user where col = (select id from unsharded)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded subqueries in unsharded delete", "query": "delete from unsharded where col = (select id from user)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded delete with limit clasue", "query": "delete from user_extra limit 10", - "v3-plan": "VT12001: unsupported: multi-shard delete with LIMIT", - "gen4-plan": "VT12001: unsupported: multi shard DELETE with LIMIT" + "plan": "VT12001: unsupported: multi shard DELETE with LIMIT" }, { "comment": "sharded subquery in unsharded subquery in unsharded delete", "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "sharded join unsharded subqueries in unsharded delete", "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "VT12001: unsupported: subqueries in DML" + "plan": "VT12001: unsupported: subqueries in DML" }, { "comment": "scatter update with limit clause", "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1", - "v3-plan": "VT12001: unsupported: multi-shard update with LIMIT", - "gen4-plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" + "plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" }, { "comment": "multi delete multi table", @@ -171,65 +112,57 @@ { "comment": "update changes primary vindex column", "query": "update user set id = 1 where id = 1", - "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: user_index", - "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index" + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: user_index" }, { "comment": "update change in multicol vindex column", "query": "update multicol_tbl set colc = 5, colb = 4 where cola = 1 and colb = 2", - "v3-plan": "VT12001: unsupported: you cannot update primary vindex columns; invalid update on vindex: multicolIdx", - "gen4-plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: multicolIdx" + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: multicolIdx" }, { "comment": "update changes non lookup vindex column", "query": "update user_metadata set md5 = 1 where user_id = 1", - "v3-plan": "VT12001: unsupported: you can only update lookup vindexes; invalid update on vindex: user_md5_index", - "gen4-plan": "VT12001: unsupported: you can only UPDATE lookup vindexes; invalid update on vindex: user_md5_index" + "plan": "VT12001: unsupported: you can only UPDATE lookup vindexes; invalid update on vindex: user_md5_index" }, { "comment": "update with complex set clause", "query": "update music set id = id + 1 where id = 1", - "v3-plan": "VT12001: unsupported: only values are supported: invalid update on column: `id` with expr: [id + 1]", - "gen4-plan": "VT12001: unsupported: only values are supported; invalid update on column: `id` with expr: [id + 1]" + "plan": "VT12001: unsupported: only values are supported; invalid update on column: `id` with expr: [id + 1]" }, { "comment": "update by primary keyspace id, changing one vindex column, limit without order clause", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10", - "v3-plan": "VT12001: unsupported: need to provide ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map", - "gen4-plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map" + "plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map" }, { "comment": "update with derived table", "query": "update (select id from user) as u set id = 4", - "v3-plan": "VT12001: unsupported: sharded subqueries in DML", - "gen4-plan": "The target table u of the UPDATE is not updatable" + "plan": "The target table u of the UPDATE is not updatable" }, { "comment": "join in update tables", "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'", - "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement", - "gen4-plan": "VT12001: unsupported: multiple tables in update" + "plan": "VT12001: unsupported: unaliased multiple tables in update" }, { "comment": "multiple tables in update", "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id", - "v3-plan": "VT12001: unsupported: multi-shard or vindex write statement", - "gen4-plan": "VT12001: unsupported: multiple tables in update" + "plan": "VT12001: unsupported: multiple (2) tables in update" }, { "comment": "unsharded insert, unqualified names and auto-inc combined", "query": "insert into unsharded_auto select col from unsharded", - "plan": "VT12001: unsupported: auto-increment and SELECT in INSERT" + "plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" }, { "comment": "unsharded insert, no col list with auto-inc", "query": "insert into unsharded_auto values(1,1)", - "plan": "VT13001: [BUG] column list required for tables with auto-inc columns" + "plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" }, { "comment": "unsharded insert, col list does not match values", "query": "insert into unsharded_auto(id, val) values(1)", - "plan": "VT13001: [BUG] column list does not match values" + "plan": "VT03006: column count does not match value count at row 1" }, { "comment": "sharded upsert can't change vindex", @@ -329,7 +262,7 @@ { "comment": "select func(keyspace_id) from user_index where id = :id", "query": "select func(keyspace_id) from user_index where id = :id", - "plan": "VT12001: unsupported: expression on results of a vindex function" + "plan": "VT12001: unsupported: cannot add 'func(keyspace_id)' expression to a table/vindex" }, { "comment": "delete with multi-table targets", @@ -339,20 +272,17 @@ { "comment": "select get_lock with non-dual table", "query": "select get_lock('xyz', 10) from user", - "v3-plan": "VT12001: unsupported: get_lock('xyz', 10) is allowed only with dual", - "gen4-plan": "get_lock('xyz', 10) allowed only with dual" + "plan": "get_lock('xyz', 10) allowed only with dual" }, { "comment": "select is_free_lock with non-dual table", "query": "select is_free_lock('xyz') from user", - "v3-plan": "VT12001: unsupported: is_free_lock('xyz') is allowed only with dual", - "gen4-plan": "is_free_lock('xyz') allowed only with dual" + "plan": "is_free_lock('xyz') allowed only with dual" }, { "comment": "union with SQL_CALC_FOUND_ROWS", "query": "(select sql_calc_found_rows id from user where id = 1 limit 1) union select id from user where id = 1", - "v3-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with UNION", - "gen4-plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union" + "plan": "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union" }, { "comment": "set with DEFAULT - vitess aware", @@ -372,8 +302,7 @@ { "comment": "create view with Cannot auto-resolve for cross-shard joins", "query": "create view user.view_a as select col from user join user_extra", - "v3-plan": "VT03019: symbol col not found", - "gen4-plan": "Column 'col' in field list is ambiguous" + "plan": "Column 'col' in field list is ambiguous" }, { "comment": "create view with join that cannot be served in each shard separately", @@ -408,14 +337,7 @@ { "comment": "avg function on scatter query", "query": "select avg(id) from user", - "v3-plan": "VT12001: unsupported: in scatter query: complex aggregate expression", - "gen4-plan": "VT12001: unsupported: in scatter query: aggregation function 'avg'" - }, - { - "comment": "scatter aggregate with ambiguous aliases", - "query": "select distinct a, b as a from user", - "v3-plan": "generating ORDER BY clause: VT03021: ambiguous symbol reference: a", - "gen4-plan": "VT13001: [BUG] generating ORDER BY clause: ambiguous symbol reference: a" + "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(id)'" }, { "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# This query will never work as the inner derived table is only selecting one of the column", @@ -428,10 +350,9 @@ "plan": "VT12001: unsupported: cross-shard correlated subquery" }, { - "comment": "Gen4 does a rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.", + "comment": "rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.", "query": "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2", - "v3-plan": "VT12001: unsupported: ORDER BY on top of UNION", - "gen4-plan": "Column 'id' in field list is ambiguous" + "plan": "Column 'id' in field list is ambiguous" }, { "comment": "unsupported with clause in delete statement", @@ -453,18 +374,6 @@ "query": "with x as (select * from user) select * from x union select * from x", "plan": "VT12001: unsupported: WITH expression in UNION statement" }, - { - "comment": "scatter aggregate with complex select list (can't build order by)", - "query": "select distinct a+1 from user", - "v3-plan": "generating ORDER BY clause: VT12001: unsupported: reference a complex expression", - "gen4-plan": "VT13001: [BUG] in scatter query: complex ORDER BY expression: a + 1" - }, - { - "comment": "aggregation on union", - "query": "select sum(col) from (select col from user union all select col from unsharded) t", - "v3-plan": "VT12001: unsupported: cross-shard query with aggregates", - "gen4-plan": "VT12001: unsupported: using aggregation on top of a *planbuilder.concatenateGen4 plan" - }, { "comment": "insert having subquery in row values", "query": "insert into user(id, name) values ((select 1 from user where id = 1), 'A')", @@ -478,12 +387,116 @@ { "comment": "json_table expressions", "query": "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt", - "v3-plan": "VT12001: unsupported: JSON_TABLE expressions", - "gen4-plan": "VT12001: unsupported: json_table expressions" + "plan": "VT12001: unsupported: json_table expressions" }, { "comment": "mix lock with other expr", "query": "select get_lock('xyz', 10), 1 from dual", "plan": "VT12001: unsupported: LOCK function and other expression: [1] in same select query" + }, + { + "comment": "Assignment expression in select statements", + "query": "select @val := 42", + "plan": "VT12001: unsupported: Assignment expression" + }, + { + "comment": "Assignment expression in union statements", + "query": "select @val := 42 union select 1", + "plan": "VT12001: unsupported: Assignment expression" + }, + { + "comment": "Assignment expression in update statements", + "query": "update user set name = @val := 42", + "plan": "VT12001: unsupported: Assignment expression" + }, + { + "comment": "Assignment expression in insert statement", + "query": "insert into user (id) values (@val := 42)", + "plan": "VT12001: unsupported: Assignment expression" + }, + { + "comment": "Assignment expression in delete statement", + "query": "delete from user where x = (@val := 42)", + "plan": "VT12001: unsupported: Assignment expression" + }, + { + "comment": "extremum on input from both sides", + "query": "insert into music(user_id, id) select foo, bar from music on duplicate key update id = id+1", + "plan": "VT12001: unsupported: DML cannot update vindex column" + }, + { + "comment": "drop table with incompatible tables", + "query": "drop table user, unsharded_a", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" + }, + { + "comment": "drop view with incompatible views", + "query": "drop view user, unsharded_a", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" + }, + { + "comment": "Rename table with different keyspace tables", + "query": "rename table user_extra to b, main.a to b", + "plan": "VT12001: unsupported: Tables or Views specified in the query do not belong to the same destination" + }, + { + "comment": "correlated subquery with different keyspace tables involved", + "query": "select id from user where id in (select col from unsharded where col = user.id)", + "plan": "VT12001: unsupported: cross-shard correlated subquery" + }, + { + "comment": "ORDER BY on select t.*", + "query": "select t.*, t.col from user t order by t.col", + "plan": "VT12001: unsupported: '*' expression in cross-shard query" + }, + { + "comment": "ORDER BY on select *", + "query": "select *, col from user order by col", + "plan": "VT12001: unsupported: '*' expression in cross-shard query" + }, + { + "comment": "ORDER BY on select multi t.*", + "query": "select t.*, t.name, t.*, t.col from user t order by t.col", + "plan": "VT12001: unsupported: '*' expression in cross-shard query" + }, + { + "comment": "ORDER BY on select multi *", + "query": "select *, name, *, col from user order by col", + "plan": "VT12001: unsupported: '*' expression in cross-shard query" + }, + { + "comment": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", + "query": "select (select col from user where user_extra.id = 4 limit 1) as a from user join user_extra", + "plan": "VT12001: unsupported: cross-shard correlated subquery" + }, + { + "comment": "correlated subquery part of an OR clause", + "query": "select 1 from user u where u.col = 6 or exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", + "plan": "VT12001: unsupported: EXISTS sub-queries are only supported with AND clause" + }, + { + "comment": "cant switch sides for outer joins", + "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", + "plan": "VT12001: unsupported: LEFT JOIN with derived tables" + }, + { + "comment": "limit on both sides means that we can't evaluate this at all", + "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id", + "plan": "VT12001: unsupported: JOIN between derived tables" + }, + { + "comment": "multi-shard union", + "query": "select 1 from music union (select id from user union all select name from unsharded)", + "plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" + }, + { + "comment": "multi-shard union", + "query": "select 1 from music union (select id from user union select name from unsharded)", + "plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side" + }, + { + "comment": "Cannot have more than one aggr(distinct...", + "query": "select count(distinct a), count(distinct b) from user", + "plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: count(distinct b)" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json b/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json index bd828fe2dbf..630e59f3526 100644 --- a/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/vexplain_cases.json @@ -17,28 +17,7 @@ { "comment": "vexplain queries", "query": "vexplain QUERIES select * from user", - "v3-plan": { - "QueryType": "EXPLAIN", - "Original": "vexplain QUERIES select * from user", - "Instructions": { - "OperatorType": "VEXPLAIN", - "Type": "queries", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXPLAIN", "Original": "vexplain QUERIES select * from user", "Instructions": { @@ -66,28 +45,7 @@ { "comment": "vexplain table", "query": "vexplain ALL select * from user", - "v3-plan": { - "QueryType": "EXPLAIN", - "Original": "vexplain ALL select * from user", - "Instructions": { - "OperatorType": "VEXPLAIN", - "Type": "all", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user`", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "EXPLAIN", "Original": "vexplain ALL select * from user", "Instructions": { diff --git a/go/vt/vtgate/planbuilder/testdata/view_cases.json b/go/vt/vtgate/planbuilder/testdata/view_cases.json index 5b5e76fe9ed..decc6a117cf 100644 --- a/go/vt/vtgate/planbuilder/testdata/view_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/view_cases.json @@ -26,22 +26,7 @@ { "comment": "create view with authoritative columns", "query": "create view user.view_ac as select * from authoritative", - "v3-plan": { - "QueryType": "DDL", - "Original": "create view user.view_ac as select * from authoritative", - "Instructions": { - "OperatorType": "DDL", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "Query": "create view view_ac as select * from authoritative" - }, - "TablesUsed": [ - "user.view_ac" - ] - }, - "gen4-plan": { + "plan": { "QueryType": "DDL", "Original": "create view user.view_ac as select * from authoritative", "Instructions": { @@ -107,4 +92,4 @@ "query": "drop view main.a, main.b, main.a", "plan": "VT03013: not unique table/alias: 'a'" } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json index 59e83eb5e10..320b5ae7bac 100644 --- a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json @@ -2,33 +2,7 @@ { "comment": "vindex func read all cols", "query": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "Fields": { - "hex_keyspace_id": "VARBINARY", - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY", - "shard": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, range_start, range_end, hex_keyspace_id, shard from user_index where id = :id", "Instructions": { @@ -61,33 +35,7 @@ { "comment": "vindex func select *", "query": "select * from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select * from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 2, - 3, - 4, - 5 - ], - "Fields": { - "hex_keyspace_id": "VARBINARY", - "id": "VARBINARY", - "keyspace_id": "VARBINARY", - "range_end": "VARBINARY", - "range_start": "VARBINARY", - "shard": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select * from user_index where id = :id", "Instructions": { @@ -120,42 +68,32 @@ { "comment": "vindex func read with id repeated", "query": "select id, keyspace_id, id from user_index where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, id from user_index where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, id from user_index where id = :id", "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", + "OperatorType": "SimpleProjection", "Columns": [ 0, 1, 0 ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" + "Inputs": [ + { + "OperatorType": "VindexFunc", + "Variant": "VindexMap", + "Columns": [ + 0, + 1 + ], + "Fields": { + "id": "VARBINARY", + "keyspace_id": "VARBINARY" + }, + "Value": ":id", + "Vindex": "user_index" + } + ] }, "TablesUsed": [ "user_index" @@ -170,87 +108,42 @@ { "comment": "disambiguated vindex reference", "query": "select id, keyspace_id, id from second_user.hash_dup where id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id", - "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1, - 0 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "hash_dup" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id", "Instructions": { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", + "OperatorType": "SimpleProjection", "Columns": [ 0, 1, 0 ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "hash_dup" - }, - "TablesUsed": [ - "hash_dup" - ] - } - }, - { - "comment": "You can even join with a vindexFunc primitive", - "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "_unsharded", "Inputs": [ { "OperatorType": "VindexFunc", "Variant": "VindexMap", "Columns": [ + 0, 1 ], "Fields": { + "id": "VARBINARY", "keyspace_id": "VARBINARY" }, "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded", - "Table": "unsharded" + "Vindex": "hash_dup" } ] - } - }, - "gen4-plan": { + }, + "TablesUsed": [ + "hash_dup" + ] + } + }, + { + "comment": "You can even join with a vindexFunc primitive", + "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id", "Instructions": { @@ -293,42 +186,7 @@ { "comment": "Join vindexFunc on RHS", "query": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0", - "TableName": "unsharded_", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded", - "Table": "unsharded" - }, - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 1 - ], - "Fields": { - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from unsharded join user_index where user_index.id = :id", "Instructions": { @@ -371,47 +229,7 @@ { "comment": "Join with vindexFunc on a column of it, already present in select list", "query": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1,R:0", - "JoinVars": { - "user_index_id": 0 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.id, user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -459,7 +277,7 @@ { "comment": "Join with vindexFunc on a column of it, already present at the end of the select list", "query": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -497,46 +315,6 @@ "Table": "unsharded" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, user_index.id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:0,R:0", - "JoinVars": { - "user_index_id": 0 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] }, "TablesUsed": [ "main.unsharded", @@ -547,7 +325,7 @@ { "comment": "Join with vindexFunc on a column of it, not present in select list", "query": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", "Instructions": { @@ -585,46 +363,6 @@ "Table": "unsharded" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select user_index.keyspace_id, unsharded.id from user_index join unsharded where user_index.id = :id and unsharded.id = user_index.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "user_index_id": 0 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :user_index_id", - "Table": "unsharded" - } - ] }, "TablesUsed": [ "main.unsharded", @@ -635,7 +373,7 @@ { "comment": "Join with aliased table name", "query": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", "Instructions": { @@ -673,46 +411,6 @@ "Table": "unsharded" } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select ui.keyspace_id, unsharded.id from user_index ui join unsharded where ui.id = :id and unsharded.id = ui.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "ui_id": 0 - }, - "TableName": "_unsharded", - "Inputs": [ - { - "OperatorType": "VindexFunc", - "Variant": "VindexMap", - "Columns": [ - 0, - 1 - ], - "Fields": { - "id": "VARBINARY", - "keyspace_id": "VARBINARY" - }, - "Value": ":id", - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.id from unsharded where 1 != 1", - "Query": "select unsharded.id from unsharded where unsharded.id = :ui_id", - "Table": "unsharded" - } - ] }, "TablesUsed": [ "main.unsharded", @@ -723,7 +421,6 @@ { "comment": "select none from user_index where id = :id", "query": "select none from user_index where id = :id", - "v3-plan": "VT03019: symbol `none` not found", - "gen4-plan": "symbol `none` not found" + "plan": "column '`none`' not found in table 'user_index'" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json index 8c38997f06e..50a09c97a48 100644 --- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json +++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json @@ -124,6 +124,48 @@ "name_muticoltbl_map": { "type": "name_lkp_test", "owner": "multicol_tbl" + }, + "non_planable_user_map": { + "type": "lookup_unicodeloosemd5_hash", + "params": { + "table": "non_planable_user_vdx", + "from": "non_planable", + "to": "keyspace_id" + }, + "owner": "user_metadata" + }, + "lkp_shard_map": { + "type": "name_lkp_test", + "owner": "mixed_tbl", + "params": { + "table": "lkp_shard_vdx", + "from": "lkp_key", + "to": "keyspace_id" + } + }, + "shard_index": { + "type": "xxhash" + }, + "unq_lkp_bf_vdx": { + "type": "unq_lkp_test", + "owner": "customer", + "params": { + "table": "unq_lkp_idx", + "from": " ", + "to": "keyspace_id", + "cost": "100", + "write_only": "true" + } + }, + "unq_lkp_vdx": { + "type": "unq_lkp_test", + "owner": "customer", + "params": { + "table": "unq_lkp_idx", + "from": "unq_key", + "to": "keyspace_id", + "cost": "300" + } } }, "tables": { @@ -188,6 +230,10 @@ { "column": "md5", "name": "user_md5_index" + }, + { + "column": "non_planable", + "name": "non_planable_user_map" } ] }, @@ -315,12 +361,16 @@ }, "ref_with_source": { "type": "reference", - "source": "main.ref_in_source" + "source": "main.source_of_ref" }, "rerouted_ref": { "type": "reference", "source": "main.rerouted_ref" }, + "global_ref": { + "type": "reference", + "source": "global_ref" + }, "pin_test": { "pinned": "80" }, @@ -376,6 +426,62 @@ "name": "user_index" } ] + }, + "non_planable_user_vdx": { + "column_vindexes": [ + { + "column": "non_planable", + "name": "user_index" + } + ] + }, + "mixed_tbl": { + "column_vindexes": [ + { + "column": "shard_key", + "name": "shard_index" + }, + { + "column": "lkp_key", + "name": "lkp_shard_map" + } + ], + "auto_increment": { + "column": "lkp_key", + "sequence": "seq" + } + }, + "lkp_shard_vdx": { + "column_vindexes": [ + { + "column": "lkp_key", + "name": "shard_index" + } + ] + }, + "customer": { + "column_vindexes": [ + { + "column": "id", + "name": "shard_index" + }, + { + "column": "email", + "name": "unq_lkp_vdx" + }, + { + "column": "phone", + "name": "unq_lkp_bf_vdx" + } + ] + }, + "unq_lkp_idx": { + "column_vindexes": [ + { + "column": "unq_key", + "name": "shard_index" + } + ] } } }, @@ -476,18 +582,11 @@ "seq": { "type": "sequence" }, - "unsharded_ref": { - "type": "reference" - }, - "ambiguous_ref_with_source": { - "type": "reference" - }, - "ref_in_source": { - "type": "reference" - }, - "rerouted_ref": { - "type": "reference" - } + "ambiguous_ref_with_source": {}, + "global_ref": {}, + "rerouted_ref": {}, + "source_of_ref": {}, + "unsharded_ref": {} } }, "main_2": { @@ -503,6 +602,166 @@ ] } } + }, + "sharded_fk_allow": { + "sharded": true, + "foreignKeyMode": "FK_MANAGED", + "vindexes": { + "hash_vin": { + "type": "hash_test" + }, + "multicolIdx": { + "type": "multiCol_test" + } + }, + "tables": { + "multicol_tbl1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicolIdx" + } + ] + }, + "multicol_tbl2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicolIdx" + } + ] + }, + "tbl1": { + "column_vindexes": [ + { + "column": "col1", + "name": "hash_vin" + } + ] + }, + "tbl2": { + "column_vindexes": [ + { + "column": "col2", + "name": "hash_vin" + } + ] + }, + "tbl3": { + "column_vindexes": [ + { + "column": "col3", + "name": "hash_vin" + } + ] + }, + "tbl4": { + "column_vindexes": [ + { + "column": "col4", + "name": "hash_vin" + } + ] + }, + "tbl5": { + "column_vindexes": [ + { + "column": "col5", + "name": "hash_vin" + } + ] + }, + "tbl6": { + "column_vindexes": [ + { + "column": "col6", + "name": "hash_vin" + } + ] + }, + "tbl7": { + "column_vindexes": [ + { + "column": "col7", + "name": "hash_vin" + } + ] + }, + "tbl8": { + "column_vindexes": [ + { + "column": "col8", + "name": "hash_vin" + } + ] + }, + "tbl9": { + "column_vindexes": [ + { + "column": "col9", + "name": "hash_vin" + } + ] + }, + "tbl10": { + "column_vindexes": [ + { + "column": "sk", + "name": "hash_vin" + } + ] + }, + "tblrefDef": { + "column_vindexes": [ + { + "column": "ref", + "name": "hash_vin" + } + ] + }, + "tbl20": { + "column_vindexes": [ + { + "column": "col", + "name": "hash_vin" + } + ] + }, + "s_tbl": { + "column_vindexes": [ + { + "column": "col", + "name": "hash_vin" + } + ] + } + } + }, + "unsharded_fk_allow": { + "foreignKeyMode": "FK_MANAGED", + "tables": { + "u_tbl1": {}, + "u_tbl2": {}, + "u_tbl3": {}, + "u_tbl4": {}, + "u_tbl5": {}, + "u_tbl6": {}, + "u_tbl7": {}, + "u_tbl8": {}, + "u_tbl9": {}, + "u_tbl": {}, + "u_multicol_tbl1": {}, + "u_multicol_tbl2": {}, + "u_multicol_tbl3": {} + } } } } diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json index 8a894895edf..cb1e67c021e 100644 --- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json @@ -2,52 +2,15 @@ { "comment": "join on having clause", "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0,R:1", - "JoinVars": { - "uid": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,L:2", + "JoinColumnIndexes": "L:0,R:0,L:1", "JoinVars": { - "e_id": 0 + "e_id": 1 }, "TableName": "user_extra_`user`", "Inputs": [ @@ -58,8 +21,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.id, e.col, e.id as eid from user_extra as e", + "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", + "Query": "select e.col, e.id as eid from user_extra as e", "Table": "user_extra" }, { @@ -88,52 +51,15 @@ { "comment": "bind var already in use", "query": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0,L:0,R:1", - "JoinVars": { - "uid1": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id as uid from `user` as u where 1 != 1", - "Query": "select u.id as uid from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.col, e.id as eid from user_extra as e having eid = :uid1 and e.col = :uid", - "Table": "user_extra" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select e.col, u.id uid, e.id eid from user u join user_extra e having uid = eid and e.col = :uid", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0,L:2", + "JoinColumnIndexes": "L:0,R:0,L:1", "JoinVars": { - "e_id": 0 + "e_id": 1 }, "TableName": "user_extra_`user`", "Inputs": [ @@ -144,8 +70,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select e.id, e.col, e.id as eid from user_extra as e where 1 != 1", - "Query": "select e.id, e.col, e.id as eid from user_extra as e where e.col = :uid", + "FieldQuery": "select e.col, e.id as eid from user_extra as e where 1 != 1", + "Query": "select e.col, e.id as eid from user_extra as e where e.col = :uid", "Table": "user_extra" }, { @@ -174,63 +100,7 @@ { "comment": "wire-up join with join, going left", "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u1.col", "Instructions": { @@ -253,9 +123,9 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "u1_col": 0 + "u1_col": 1 }, "TableName": "`user`_`user`", "Inputs": [ @@ -266,8 +136,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.col, u1.id from `user` as u1", + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", "Table": "`user`" }, { @@ -293,63 +163,7 @@ { "comment": "wire-up join with join, going left, then right", "query": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u2_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u2.col from `user` as u2 where 1 != 1", - "Query": "select u2.col from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u2_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 where u3.col = u2.col", "Instructions": { @@ -411,66 +225,7 @@ { "comment": "wire-up join with join, reuse existing result from a lower join", "query": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.col = :u1_col", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 on u2.col = u1.col join user u3 where u3.col = u1.col", "Instructions": { @@ -496,9 +251,9 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "u1_col": 0 + "u1_col": 1 }, "TableName": "`user`_`user`", "Inputs": [ @@ -509,8 +264,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u3_col", + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1 where u1.col = :u3_col", "Table": "`user`" }, { @@ -536,89 +291,7 @@ { "comment": "wire-up join with join, reuse existing result from a lower join.\n# You need two levels of join nesting to test this: when u3 requests\n# col from u1, the u1-u2 joins exports the column to u2-u3. When\n# u4 requests it, it should be reused from the u1-u2 join.", "query": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "JoinVars": { - "u1_col": 1 - }, - "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u4 where 1 != 1", - "Query": "select 1 from `user` as u4 where u4.col = :u1_col", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 join user u3 on u3.id = u1.col join user u4 where u4.col = u1.col", "Instructions": { @@ -661,9 +334,9 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "u1_col": 0 + "u1_col": 1 }, "TableName": "`user`_`user`", "Inputs": [ @@ -674,8 +347,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.col, u1.id from `user` as u1 where u1.col = :u4_col", + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1 where u1.col = :u4_col", "Table": "`user`" }, { @@ -707,7 +380,7 @@ { "comment": "Test reuse of join var already being supplied to the right of a node.", "query": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", "Instructions": { @@ -718,76 +391,13 @@ "u1_col": 1 }, "TableName": "`user`_`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", - "Query": "select u1.id, u1.col from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u3 where 1 != 1", - "Query": "select 1 from `user` as u3 where u3.id = :u1_col", - "Table": "`user`", - "Values": [ - ":u1_col" - ], - "Vindex": "user_index" - } - ] - } - ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join (user u2 join user u3) where u2.id = u1.col and u3.id = u1.col", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1", - "JoinVars": { - "u1_col": 0 - }, - "TableName": "`user`_`user`_`user`", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:0,L:1", "JoinVars": { - "u1_col": 0 + "u1_col": 1 }, "TableName": "`user`_`user`", "Inputs": [ @@ -798,8 +408,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u1.col, u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.col, u1.id from `user` as u1", + "FieldQuery": "select u1.id, u1.col from `user` as u1 where 1 != 1", + "Query": "select u1.id, u1.col from `user` as u1", "Table": "`user`" }, { @@ -844,52 +454,15 @@ { "comment": "Join on weird columns.", "query": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "weird_name_a_b_c": 1 - }, - "TableName": "`weird``name`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name` where 1 != 1", - "Query": "select `weird``name`.a, `weird``name`.`a``b*c` from `weird``name`", - "Table": "`weird``name`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select `weird``name`.a, unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "R:0,L:1", + "JoinColumnIndexes": "R:0,L:0", "JoinVars": { - "unsharded_id": 0 + "unsharded_id": 1 }, "TableName": "unsharded_`weird``name`", "Inputs": [ @@ -900,8 +473,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.id, unsharded.b from unsharded", + "FieldQuery": "select unsharded.b, unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.b, unsharded.id from unsharded", "Table": "unsharded" }, { @@ -923,59 +496,22 @@ }, "TablesUsed": [ "main.unsharded", - "user.weird`name" - ] - } - }, - { - "comment": "Join on weird column (col is not in select)", - "query": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "R:0", - "JoinVars": { - "weird_name_a_b_c": 0 - }, - "TableName": "`weird``name`_unsharded", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `weird``name`.`a``b*c` from `weird``name` where 1 != 1", - "Query": "select `weird``name`.`a``b*c` from `weird``name`", - "Table": "`weird``name`" - }, - { - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.b from unsharded where unsharded.id = :weird_name_a_b_c", - "Table": "unsharded" - } - ] - } - }, - "gen4-plan": { + "user.weird`name" + ] + } + }, + { + "comment": "Join on weird column (col is not in select)", + "query": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", + "plan": { "QueryType": "SELECT", "Original": "select unsharded.b from `weird``name` join unsharded on `weird``name`.`a``b*c` = unsharded.id", "Instructions": { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:1", + "JoinColumnIndexes": "L:0", "JoinVars": { - "unsharded_id": 0 + "unsharded_id": 1 }, "TableName": "unsharded_`weird``name`", "Inputs": [ @@ -986,8 +522,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded.id, unsharded.b from unsharded where 1 != 1", - "Query": "select unsharded.id, unsharded.b from unsharded", + "FieldQuery": "select unsharded.b, unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.b, unsharded.id from unsharded", "Table": "unsharded" }, { @@ -1016,7 +552,7 @@ { "comment": "wire-up with limit primitive", "query": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", - "v3-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", "Instructions": { @@ -1057,49 +593,6 @@ ] } ] - } - }, - "gen4-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id from user u join user_extra e where e.id = u.col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,R:0", - "JoinVars": { - "u_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.col, u.id from `user` as u where 1 != 1", - "Query": "select u.col, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] - } - ] }, "TablesUsed": [ "user.user", @@ -1110,75 +603,7 @@ { "comment": "Wire-up in subquery", "query": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", - "Instructions": { - "OperatorType": "Subquery", - "Variant": "PulloutIn", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0", - "JoinVars": { - "u_col": 1 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", - "Query": "select u.id, u.col from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] - } - ] - }, - { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` where 1 != 1", - "Query": "select 1 from `user` where :__sq_has_values1 = 1 and id in ::__vals", - "Table": "`user`", - "Values": [ - "::__sq1" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select 1 from user where id in (select u.id, e.id from user u join user_extra e where e.id = u.col limit 10)", "Instructions": { @@ -1190,6 +615,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Limit", "Count": "INT64(10)", "Inputs": [ @@ -1229,6 +655,7 @@ ] }, { + "InputName": "Outer", "OperatorType": "Route", "Variant": "IN", "Keyspace": { @@ -1254,71 +681,7 @@ { "comment": "Wire-up in underlying primitive after pullout", "query": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", - "Instructions": { - "OperatorType": "Limit", - "Count": "INT64(10)", - "Inputs": [ - { - "OperatorType": "Subquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq_has_values1", - "__sq1" - ], - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1", - "JoinVars": { - "u_col": 2 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, :__sq1, u.col from `user` as u where 1 != 1", - "Query": "select u.id, :__sq1, u.col from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" - } - ] - } - ] - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u.id, e.id, (select col from user) from user u join user_extra e where e.id = u.col limit 10", "Instructions": { @@ -1333,6 +696,7 @@ ], "Inputs": [ { + "InputName": "SubQuery", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { @@ -1344,6 +708,7 @@ "Table": "`user`" }, { + "InputName": "Outer", "OperatorType": "Join", "Variant": "Join", "JoinColumnIndexes": "L:1,R:0,L:2", @@ -1389,26 +754,7 @@ { "comment": "Invalid value in IN clause", "query": "select id from user where id in (18446744073709551616, 1)", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select id from user where id in (18446744073709551616, 1)", - "Instructions": { - "OperatorType": "Route", - "Variant": "IN", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id in ::__vals", - "Table": "`user`", - "Values": [ - "(DECIMAL(18446744073709551616), INT64(1))" - ], - "Vindex": "user_index" - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select id from user where id in (18446744073709551616, 1)", "Instructions": { @@ -1434,45 +780,7 @@ { "comment": "Invalid value in IN clause from LHS of join", "query": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1 where u1.id = 18446744073709551616", - "Table": "`user`", - "Values": [ - "DECIMAL(18446744073709551616)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2", - "Table": "`user`" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 where u1.id = 18446744073709551616", "Instructions": { @@ -1517,45 +825,7 @@ { "comment": "Invalid value in IN clause from RHS of join", "query": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", - "v3-plan": { - "QueryType": "SELECT", - "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u1.id from `user` as u1 where 1 != 1", - "Query": "select u1.id from `user` as u1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as u2 where 1 != 1", - "Query": "select 1 from `user` as u2 where u2.id = 18446744073709551616", - "Table": "`user`", - "Values": [ - "DECIMAL(18446744073709551616)" - ], - "Vindex": "user_index" - } - ] - } - }, - "gen4-plan": { + "plan": { "QueryType": "SELECT", "Original": "select u1.id from user u1 join user u2 where u2.id = 18446744073709551616", "Instructions": { @@ -1598,11 +868,11 @@ } }, { - "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint", - "query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", + "comment": "derived table with column aliases", + "query": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "plan": { "QueryType": "SELECT", - "Original": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", + "Original": "select u.a from (select id as b, name from user) u(a, n) where u.n = 1", "Instructions": { "OperatorType": "VindexLookup", "Variant": "Equal", @@ -1638,7 +908,7 @@ "Sharded": true }, "FieldQuery": "select u.a from (select id as b, `name` from `user` where 1 != 1) as u(a, n) where 1 != 1", - "Query": "select /*vt+ PLANNER=gen4 */ u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)", + "Query": "select u.a from (select id as b, `name` from `user` where `name` = 1) as u(a, n)", "Table": "`user`" } ] @@ -1648,11 +918,6 @@ ] } }, - { - "comment": "derived table with column aliases not supported by v3, but planner is overridden with hint", - "query": "select /*vt+ PLANNER=v3 */ u.a from (select id as b, name from user) u(a, n) where u.n = 1", - "plan": "VT12001: unsupported: column aliases in derived table" - }, { "comment": "Three-way join using the left2right. The normal gen4 planner would merge m1 and m2 first, but the left to right doesnt", "query": "select /*vt+ PLANNER=left2right */ user.col from user join unsharded as m1 join unsharded as m2", diff --git a/go/vt/vtgate/planbuilder/union.go b/go/vt/vtgate/planbuilder/union.go deleted file mode 100644 index f92c3390303..00000000000 --- a/go/vt/vtgate/planbuilder/union.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/mysql" - - "vitess.io/vitess/go/vt/sqlparser" -) - -func buildUnionPlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - union := stmt.(*sqlparser.Union) - if union.With != nil { - return nil, vterrors.VT12001("WITH expression in UNION statement") - } - // For unions, create a pb with anonymous scope. - pb := newPrimitiveBuilder(vschema, newJointab(reservedVars)) - if err := pb.processUnion(union, reservedVars, nil); err != nil { - return nil, err - } - if err := pb.plan.Wireup(pb.plan, pb.jt); err != nil { - return nil, err - } - return newPlanResult(pb.plan.Primitive()), nil - } -} - -func (pb *primitiveBuilder) processUnion(union *sqlparser.Union, reservedVars *sqlparser.ReservedVars, outer *symtab) error { - if err := pb.processPart(union.Left, reservedVars, outer); err != nil { - return err - } - - rpb := newPrimitiveBuilder(pb.vschema, pb.jt) - if err := rpb.processPart(union.Right, reservedVars, outer); err != nil { - return err - } - err := unionRouteMerge(pb.plan, rpb.plan, union) - if err != nil { - // we are merging between two routes - let's check if we can see so that we have the same amount of columns on both sides of the union - lhsCols := len(pb.plan.ResultColumns()) - rhsCols := len(rpb.plan.ResultColumns()) - if lhsCols != rhsCols { - return &mysql.SQLError{ - Num: mysql.ERWrongNumberOfColumnsInSelect, - State: "21000", - Message: "The used SELECT statements have a different number of columns", - Query: sqlparser.String(union), - } - } - - pb.plan = &concatenate{ - lhs: pb.plan, - rhs: rpb.plan, - } - - if union.Distinct { - pb.plan = newDistinctV3(pb.plan) - } - } - pb.st.Outer = outer - - if err := setLock(pb.plan, union.Lock); err != nil { - return err - } - - if err := pb.pushOrderBy(union.OrderBy); err != nil { - return err - } - return pb.pushLimit(union.Limit) -} - -func (pb *primitiveBuilder) processPart(part sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars, outer *symtab) error { - switch part := part.(type) { - case *sqlparser.Union: - return pb.processUnion(part, reservedVars, outer) - case *sqlparser.Select: - if part.SQLCalcFoundRows { - return vterrors.VT12001("SQL_CALC_FOUND_ROWS not supported with UNION") - } - return pb.processSelect(part, reservedVars, outer, "") - } - return vterrors.VT13001(fmt.Sprintf("unexpected SELECT type: %T", part)) -} - -// TODO (systay) we never use this as an actual error. we should rethink the return type -func unionRouteMerge(left, right logicalPlan, us *sqlparser.Union) error { - lroute, ok := left.(*route) - if !ok { - return vterrors.VT12001("SELECT of UNION is non-trivial") - } - rroute, ok := right.(*route) - if !ok { - return vterrors.VT12001("SELECT of UNION is non-trivial") - } - mergeSuccess := lroute.MergeUnion(rroute, us.Distinct) - if !mergeSuccess { - return vterrors.VT12001("execute UNION as a single route") - } - - lroute.Select = &sqlparser.Union{Left: lroute.Select, Right: us.Right, Distinct: us.Distinct} - - return nil -} - -// planLock pushes "FOR UPDATE", "LOCK IN SHARE MODE" down to all routes -func setLock(in logicalPlan, lock sqlparser.Lock) error { - _, err := visit(in, func(plan logicalPlan) (bool, logicalPlan, error) { - switch node := in.(type) { - case *route: - node.Select.SetLock(lock) - return false, node, nil - case *sqlCalcFoundRows, *vindexFunc: - return false, nil, vterrors.VT13001(fmt.Sprintf("unreachable %T.locking", in)) - } - return true, plan, nil - }) - if err != nil { - return err - } - return nil -} diff --git a/go/vt/vtgate/planbuilder/update.go b/go/vt/vtgate/planbuilder/update.go new file mode 100644 index 00000000000..b99631f6b55 --- /dev/null +++ b/go/vt/vtgate/planbuilder/update.go @@ -0,0 +1,150 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func gen4UpdateStmtPlanner( + version querypb.ExecuteOptions_PlannerVersion, + updStmt *sqlparser.Update, + reservedVars *sqlparser.ReservedVars, + vschema plancontext.VSchema, +) (*planResult, error) { + if updStmt.With != nil { + return nil, vterrors.VT12001("WITH expression in UPDATE statement") + } + + ctx, err := plancontext.CreatePlanningContext(updStmt, reservedVars, vschema, version) + if err != nil { + return nil, err + } + + err = rewriteRoutedTables(updStmt, vschema) + if err != nil { + return nil, err + } + + if ks, tables := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { + if fkManagementNotRequiredForUpdate(ctx, tables, updStmt.Exprs) { + plan := updateUnshardedShortcut(updStmt, ks, tables) + plan = pushCommentDirectivesOnPlan(plan, updStmt) + return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil + } + } + + if ctx.SemTable.NotUnshardedErr != nil { + return nil, ctx.SemTable.NotUnshardedErr + } + + err = queryRewrite(ctx.SemTable, reservedVars, updStmt) + if err != nil { + return nil, err + } + + op, err := operators.PlanQuery(ctx, updStmt) + if err != nil { + return nil, err + } + + plan, err := transformToLogicalPlan(ctx, op) + if err != nil { + return nil, err + } + + plan = pushCommentDirectivesOnPlan(plan, updStmt) + + setLockOnAllSelect(plan) + + if err := plan.Wireup(ctx); err != nil { + return nil, err + } + + return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil +} + +// TODO: Handle all this in semantic analysis. +func fkManagementNotRequiredForUpdate(ctx *plancontext.PlanningContext, vTables []*vindexes.Table, updateExprs sqlparser.UpdateExprs) bool { + childFkMap := make(map[string][]vindexes.ChildFKInfo) + + // Find the foreign key mode and check for any managed child foreign keys. + for _, vTable := range vTables { + ksMode, err := ctx.VSchema.ForeignKeyMode(vTable.Keyspace.Name) + if err != nil { + return false + } + if ksMode != vschemapb.Keyspace_FK_MANAGED { + continue + } + childFks := vTable.ChildFKsNeedsHandling(ctx.VerifyAllFKs, vindexes.UpdateAction) + if len(childFks) > 0 { + childFkMap[vTable.String()] = childFks + } + } + + getFKInfo := func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo) { + tblInfo, err := ctx.SemTable.TableInfoForExpr(expr.Name) + if err != nil { + return nil, nil + } + vTable := tblInfo.GetVindexTable() + return vTable.ParentForeignKeys, childFkMap[vTable.String()] + } + + // Check if any column in the parent table is being updated which has a child foreign key. + return !columnModified(updateExprs, getFKInfo) +} + +// columnModified checks if any column in the parent table is being updated which has a child foreign key. +func columnModified(exprs sqlparser.UpdateExprs, getFks func(expr *sqlparser.UpdateExpr) ([]vindexes.ParentFKInfo, []vindexes.ChildFKInfo)) bool { + for _, updateExpr := range exprs { + parentFKs, childFks := getFks(updateExpr) + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + if sqlparser.IsNull(updateExpr.Expr) { + continue + } + for _, parentFk := range parentFKs { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + } + return false +} + +func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { + edml := engine.NewDML() + edml.Keyspace = ks + edml.Opcode = engine.Unsharded + edml.Query = generateQuery(stmt) + for _, tbl := range tables { + edml.TableNames = append(edml.TableNames, tbl.Name.String()) + } + return &primitiveWrapper{prim: &engine.Update{DML: edml}} +} diff --git a/go/vt/vtgate/planbuilder/update_planner.go b/go/vt/vtgate/planbuilder/update_planner.go deleted file mode 100644 index 26884525d8c..00000000000 --- a/go/vt/vtgate/planbuilder/update_planner.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -// buildUpdatePlan returns a stmtPlanner that builds the instructions for an UPDATE statement. -func buildUpdatePlan(string) stmtPlanner { - return func(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { - upd := stmt.(*sqlparser.Update) - if upd.With != nil { - return nil, vterrors.VT12001("WITH expression in UPDATE statement") - } - dml, tables, ksidVindex, err := buildDMLPlan(vschema, "update", stmt, reservedVars, upd.TableExprs, upd.Where, upd.OrderBy, upd.Limit, upd.Comments, upd.Exprs) - if err != nil { - return nil, err - } - eupd := &engine.Update{DML: dml} - - if dml.Opcode == engine.Unsharded { - return newPlanResult(eupd, tables...), nil - } - eupdTable, err := eupd.GetSingleTable() - if err != nil { - return nil, err - } - cvv, ovq, err := buildChangedVindexesValues(upd, eupdTable, ksidVindex.Columns) - if err != nil { - return nil, err - } - eupd.ChangedVindexValues = cvv - eupd.OwnedVindexQuery = ovq - if len(eupd.ChangedVindexValues) != 0 { - eupd.KsidVindex = ksidVindex.Vindex - eupd.KsidLength = len(ksidVindex.Columns) - } - return newPlanResult(eupd, tables...), nil - } -} - -// buildChangedVindexesValues adds to the plan all the lookup vindexes that are changing. -// Updates can only be performed to secondary lookup vindexes with no complex expressions -// in the set clause. -func buildChangedVindexesValues(update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) (map[string]*engine.VindexValues, string, error) { - changedVindexes := make(map[string]*engine.VindexValues) - buf, offset := initialQuery(ksidCols, table) - for i, vindex := range table.ColumnVindexes { - vindexValueMap := make(map[string]evalengine.Expr) - first := true - for _, vcol := range vindex.Columns { - // Searching in order of columns in colvindex. - found := false - for _, assignment := range update.Exprs { - if !vcol.Equal(assignment.Name.Name) { - continue - } - if found { - return nil, "", vterrors.VT03015(assignment.Name.Name) - } - found = true - pv, err := extractValueFromUpdate(assignment) - if err != nil { - return nil, "", err - } - vindexValueMap[vcol.String()] = pv - if first { - buf.Myprintf(", %v", assignment) - first = false - } else { - buf.Myprintf(" and %v", assignment) - } - } - } - if len(vindexValueMap) == 0 { - // Vindex not changing, continue - continue - } - - if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("need to provide ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) - } - if i == 0 { - return nil, "", vterrors.VT12001(fmt.Sprintf("you cannot update primary vindex columns; invalid update on vindex: %v", vindex.Name)) - } - if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", vterrors.VT12001(fmt.Sprintf("you can only update lookup vindexes; invalid update on vindex: %v", vindex.Name)) - } - changedVindexes[vindex.Name] = &engine.VindexValues{ - PvMap: vindexValueMap, - Offset: offset, - } - offset++ - } - if len(changedVindexes) == 0 { - return nil, "", nil - } - // generate rest of the owned vindex query. - aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, "", vterrors.VT12001("UPDATE on complex table expression") - } - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} - buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), nil -} - -func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { - buf := sqlparser.NewTrackedBuffer(nil) - offset := 0 - for _, col := range ksidCols { - if offset == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) - } - offset++ - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) - offset++ - } - } - return buf, offset -} - -// extractValueFromUpdate given an UpdateExpr attempts to extracts the Value -// it's holding. At the moment it only supports: StrVal, HexVal, IntVal, ValArg. -// If a complex expression is provided (e.g set name = name + 1), the update will be rejected. -func extractValueFromUpdate(upd *sqlparser.UpdateExpr) (evalengine.Expr, error) { - pv, err := evalengine.Translate(upd.Expr, semantics.EmptySemTable()) - if err != nil || sqlparser.IsSimpleTuple(upd.Expr) { - err := vterrors.VT12001(fmt.Sprintf("only values are supported: invalid update on column: `%s` with expr: [%s]", upd.Name.Name.String(), sqlparser.String(upd.Expr))) - return nil, err - } - return pv, nil -} diff --git a/go/vt/vtgate/planbuilder/vexplain.go b/go/vt/vtgate/planbuilder/vexplain.go index 86d650ec57c..1e01576b25d 100644 --- a/go/vt/vtgate/planbuilder/vexplain.go +++ b/go/vt/vtgate/planbuilder/vexplain.go @@ -17,6 +17,7 @@ limitations under the License. package planbuilder import ( + "context" "encoding/json" "fmt" @@ -32,7 +33,7 @@ import ( ) // Builds an explain-plan for the given Primitive -func buildExplainPlan(stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { +func buildExplainPlan(ctx context.Context, stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch explain := stmt.(type) { case *sqlparser.ExplainTab: return explainTabPlan(explain, vschema) @@ -40,10 +41,10 @@ func buildExplainPlan(stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVa switch explain.Type { case sqlparser.VitessType: vschema.PlannerWarning("EXPLAIN FORMAT = VITESS is deprecated, please use VEXPLAIN PLAN instead.") - return buildVExplainVtgatePlan(explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildVExplainVtgatePlan(ctx, explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case sqlparser.VTExplainType: vschema.PlannerWarning("EXPLAIN FORMAT = VTEXPLAIN is deprecated, please use VEXPLAIN QUERIES instead.") - return buildVExplainLoggingPlan(&sqlparser.VExplainStmt{Type: sqlparser.QueriesVExplainType, Statement: explain.Statement, Comments: explain.Comments}, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildVExplainLoggingPlan(ctx, &sqlparser.VExplainStmt{Type: sqlparser.QueriesVExplainType, Statement: explain.Statement, Comments: explain.Comments}, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) default: return buildOtherReadAndAdmin(sqlparser.String(explain), vschema) } @@ -51,12 +52,12 @@ func buildExplainPlan(stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVa return nil, vterrors.VT13001(fmt.Sprintf("unexpected explain type: %T", stmt)) } -func buildVExplainPlan(vexplainStmt *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { +func buildVExplainPlan(ctx context.Context, vexplainStmt *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch vexplainStmt.Type { case sqlparser.QueriesVExplainType, sqlparser.AllVExplainType: - return buildVExplainLoggingPlan(vexplainStmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildVExplainLoggingPlan(ctx, vexplainStmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case sqlparser.PlanVExplainType: - return buildVExplainVtgatePlan(vexplainStmt.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + return buildVExplainVtgatePlan(ctx, vexplainStmt.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected vtexplain type: %s", vexplainStmt.Type.ToString()) } @@ -88,8 +89,8 @@ func explainTabPlan(explain *sqlparser.ExplainTab, vschema plancontext.VSchema) }, singleTable(keyspace.Name, explain.Table.Name.String())), nil } -func buildVExplainVtgatePlan(explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { - innerInstruction, err := createInstructionFor(sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) +func buildVExplainVtgatePlan(ctx context.Context, explainStatement sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { + innerInstruction, err := createInstructionFor(ctx, sqlparser.String(explainStatement), explainStatement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, err } @@ -109,8 +110,8 @@ func buildVExplainVtgatePlan(explainStatement sqlparser.Statement, reservedVars return newPlanResult(engine.NewRowsPrimitive(rows, fields)), nil } -func buildVExplainLoggingPlan(explain *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { - input, err := createInstructionFor(sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) +func buildVExplainLoggingPlan(ctx context.Context, explain *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { + input, err := createInstructionFor(ctx, sqlparser.String(explain.Statement), explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go index e72a35e7814..be0f4e0ffa7 100644 --- a/go/vt/vtgate/planbuilder/vindex_func.go +++ b/go/vt/vtgate/planbuilder/vindex_func.go @@ -19,17 +19,16 @@ package planbuilder import ( "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vterrors" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - querypb "vitess.io/vitess/go/vt/proto/query" ) var _ logicalPlan = (*vindexFunc)(nil) @@ -41,9 +40,6 @@ type vindexFunc struct { // the tableID field is only used by the gen4 planner tableID semantics.TableSet - // resultColumns represent the columns returned by this route. - resultColumns []*resultColumn - // eVindexFunc is the primitive being built. eVindexFunc *engine.VindexFunc } @@ -57,89 +53,16 @@ var colnames = []string{ "shard", } -func newVindexFunc(alias sqlparser.TableName, vindex vindexes.SingleColumn) (*vindexFunc, *symtab) { - vf := &vindexFunc{ - order: 1, - eVindexFunc: &engine.VindexFunc{ - Vindex: vindex, - }, - } - - // Create a 'table' that represents the vindex. - t := &table{ - alias: alias, - origin: vf, - } - - for _, colName := range colnames { - t.addColumn(sqlparser.NewIdentifierCI(colName), &column{origin: vf}) - } - t.isAuthoritative = true - - st := newSymtab() - // AddTable will not fail because symtab is empty. - _ = st.AddTable(t) - return vf, st -} - -// Order implements the logicalPlan interface -func (vf *vindexFunc) Order() int { - return vf.order -} - -// Reorder implements the logicalPlan interface -func (vf *vindexFunc) Reorder(order int) { - vf.order = order + 1 -} - // Primitive implements the logicalPlan interface func (vf *vindexFunc) Primitive() engine.Primitive { return vf.eVindexFunc } -// ResultColumns implements the logicalPlan interface -func (vf *vindexFunc) ResultColumns() []*resultColumn { - return vf.resultColumns -} - -// Wireup implements the logicalPlan interface -func (vf *vindexFunc) Wireup(logicalPlan, *jointab) error { - return nil -} - // WireupGen4 implements the logicalPlan interface -func (vf *vindexFunc) WireupGen4(*plancontext.PlanningContext) error { +func (vf *vindexFunc) Wireup(*plancontext.PlanningContext) error { return nil } -// SupplyVar implements the logicalPlan interface -func (vf *vindexFunc) SupplyVar(from, to int, col *sqlparser.ColName, varname string) { - // vindexFunc is an atomic primitive. So, SupplyVar cannot be - // called on it. - panic("BUG: vindexFunc is an atomic node.") -} - -// SupplyCol implements the logicalPlan interface -func (vf *vindexFunc) SupplyCol(col *sqlparser.ColName) (rc *resultColumn, colNumber int) { - c := col.Metadata.(*column) - for i, rc := range vf.resultColumns { - if rc.column == c { - return rc, i - } - } - - vf.resultColumns = append(vf.resultColumns, &resultColumn{column: c}) - vf.eVindexFunc.Fields = append(vf.eVindexFunc.Fields, &querypb.Field{ - Name: col.Name.String(), - Type: querypb.Type_VARBINARY, - }) - - // columns that reference vindexFunc will have their colNumber set. - // Let's use it here. - vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, c.colNumber) - return rc, len(vf.resultColumns) - 1 -} - // SupplyProjection pushes the given aliased expression into the fields and cols slices of the // vindexFunc engine primitive. The method returns the offset of the new expression in the columns // list. @@ -163,8 +86,10 @@ func (vf *vindexFunc) SupplyProjection(expr *sqlparser.AliasedExpr, reuse bool) } vf.eVindexFunc.Fields = append(vf.eVindexFunc.Fields, &querypb.Field{ - Name: expr.ColumnName(), - Type: querypb.Type_VARBINARY, + Name: expr.ColumnName(), + Type: querypb.Type_VARBINARY, + Charset: collations.CollationBinaryID, + Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }) vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, enum) return len(vf.eVindexFunc.Cols) - 1, nil @@ -180,11 +105,6 @@ func (err UnsupportedSupplyWeightString) Error() string { return fmt.Sprintf("cannot do collation on %s", err.Type) } -// SupplyWeightString implements the logicalPlan interface -func (vf *vindexFunc) SupplyWeightString(colNumber int, alsoAddToGroupBy bool) (weightcolNumber int, err error) { - return 0, UnsupportedSupplyWeightString{Type: "vindex function"} -} - // Rewrite implements the logicalPlan interface func (vf *vindexFunc) Rewrite(inputs ...logicalPlan) error { if len(inputs) != 0 { diff --git a/go/vt/vtgate/planbuilder/vindex_op.go b/go/vt/vtgate/planbuilder/vindex_op.go index 665ed70c4ca..c439dec1701 100644 --- a/go/vt/vtgate/planbuilder/vindex_op.go +++ b/go/vt/vtgate/planbuilder/vindex_op.go @@ -32,14 +32,16 @@ func transformVindexPlan(ctx *plancontext.PlanningContext, op *operators.Vindex) return nil, vterrors.VT12001("multi-column vindexes not supported") } - expr, err := evalengine.Translate(op.Value, ctx.SemTable) + expr, err := evalengine.Translate(op.Value, &evalengine.Config{ + Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + }) if err != nil { return nil, err } plan := &vindexFunc{ - order: 1, - tableID: op.Solved, - resultColumns: nil, + order: 1, + tableID: op.Solved, eVindexFunc: &engine.VindexFunc{ Opcode: op.OpCode, Vindex: single, diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 1dbd9074485..bfbb7b105f8 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -29,10 +29,11 @@ import ( "syscall" "time" + "github.com/google/uuid" "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -40,13 +41,12 @@ import ( "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vttls" - - "github.com/google/uuid" - querypb "vitess.io/vitess/go/vt/proto/query" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttls" ) var ( @@ -65,6 +65,7 @@ var ( mysqlSslServerCA string mysqlTLSMinVersion string + mysqlKeepAlivePeriod time.Duration mysqlConnReadTimeout time.Duration mysqlConnWriteTimeout time.Duration mysqlQueryTimeout time.Duration @@ -73,8 +74,6 @@ var ( mysqlDefaultWorkloadName = "OLTP" mysqlDefaultWorkload int32 - - busyConnections int32 ) func registerPluginFlags(fs *pflag.FlagSet) { @@ -97,6 +96,7 @@ func registerPluginFlags(fs *pflag.FlagSet) { fs.DurationVar(&mysqlConnWriteTimeout, "mysql_server_write_timeout", mysqlConnWriteTimeout, "connection write timeout") fs.DurationVar(&mysqlQueryTimeout, "mysql_server_query_timeout", mysqlQueryTimeout, "mysql query timeout") fs.BoolVar(&mysqlConnBufferPooling, "mysql-server-pool-conn-read-buffers", mysqlConnBufferPooling, "If set, the server will pool incoming connection read buffers") + fs.DurationVar(&mysqlKeepAlivePeriod, "mysql-server-keepalive-period", mysqlKeepAlivePeriod, "TCP period between keep-alives") fs.StringVar(&mysqlDefaultWorkloadName, "mysql_default_workload", mysqlDefaultWorkloadName, "Default session workload (OLTP, OLAP, DBA)") } @@ -107,20 +107,22 @@ type vtgateHandler struct { mu sync.Mutex vtg *VTGate - connections map[*mysql.Conn]bool + connections map[uint32]*mysql.Conn + + busyConnections atomic.Int32 } func newVtgateHandler(vtg *VTGate) *vtgateHandler { return &vtgateHandler{ vtg: vtg, - connections: make(map[*mysql.Conn]bool), + connections: make(map[uint32]*mysql.Conn), } } func (vh *vtgateHandler) NewConnection(c *mysql.Conn) { vh.mu.Lock() defer vh.mu.Unlock() - vh.connections[c] = true + vh.connections[c.ConnectionID] = c } func (vh *vtgateHandler) numConnections() int { @@ -133,7 +135,7 @@ func (vh *vtgateHandler) ComResetConnection(c *mysql.Conn) { ctx := context.Background() session := vh.session(c) if session.InTransaction { - defer atomic.AddInt32(&busyConnections, -1) + defer vh.busyConnections.Add(-1) } err := vh.vtg.CloseSession(ctx, session) if err != nil { @@ -145,8 +147,8 @@ func (vh *vtgateHandler) ConnectionClosed(c *mysql.Conn) { // Rollback if there is an ongoing transaction. Ignore error. defer func() { vh.mu.Lock() - defer vh.mu.Unlock() - delete(vh.connections, c) + delete(vh.connections, c.ConnectionID) + vh.mu.Unlock() }() var ctx context.Context @@ -159,7 +161,7 @@ func (vh *vtgateHandler) ConnectionClosed(c *mysql.Conn) { } session := vh.session(c) if session.InTransaction { - defer atomic.AddInt32(&busyConnections, -1) + defer vh.busyConnections.Add(-1) } _ = vh.vtg.CloseSession(ctx, session) } @@ -199,8 +201,9 @@ func startSpan(ctx context.Context, query, label string) (trace.Span, context.Co } func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error { - ctx := context.Background() - var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + c.UpdateCancelCtx(cancel) + if mysqlQueryTimeout != 0 { ctx, cancel = context.WithTimeout(ctx, mysqlQueryTimeout) defer cancel() @@ -228,21 +231,25 @@ func (vh *vtgateHandler) ComQuery(c *mysql.Conn, query string, callback func(*sq session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() if session.Options.Workload == querypb.ExecuteOptions_OLAP { - err := vh.vtg.StreamExecute(ctx, session, query, make(map[string]*querypb.BindVariable), callback) - return mysql.NewSQLErrorFromError(err) + session, err := vh.vtg.StreamExecute(ctx, vh, session, query, make(map[string]*querypb.BindVariable), callback) + if err != nil { + return sqlerror.NewSQLErrorFromError(err) + } + fillInTxStatusFlags(c, session) + return nil } - session, result, err := vh.vtg.Execute(ctx, session, query, make(map[string]*querypb.BindVariable)) + session, result, err := vh.vtg.Execute(ctx, vh, session, query, make(map[string]*querypb.BindVariable)) - if err := mysql.NewSQLErrorFromError(err); err != nil { + if err := sqlerror.NewSQLErrorFromError(err); err != nil { return err } fillInTxStatusFlags(c, session) @@ -289,16 +296,16 @@ func (vh *vtgateHandler) ComPrepare(c *mysql.Conn, query string, bindVars map[st session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() session, fld, err := vh.vtg.Prepare(ctx, session, query, bindVars) - err = mysql.NewSQLErrorFromError(err) + err = sqlerror.NewSQLErrorFromError(err) if err != nil { return nil, err } @@ -306,13 +313,12 @@ func (vh *vtgateHandler) ComPrepare(c *mysql.Conn, query string, bindVars map[st } func (vh *vtgateHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareData, callback func(*sqltypes.Result) error) error { - var ctx context.Context - var cancel context.CancelFunc + ctx, cancel := context.WithCancel(context.Background()) + c.UpdateCancelCtx(cancel) + if mysqlQueryTimeout != 0 { - ctx, cancel = context.WithTimeout(context.Background(), mysqlQueryTimeout) + ctx, cancel = context.WithTimeout(ctx, mysqlQueryTimeout) defer cancel() - } else { - ctx = context.Background() } ctx = callinfo.MysqlCallInfo(ctx, c) @@ -331,22 +337,25 @@ func (vh *vtgateHandler) ComStmtExecute(c *mysql.Conn, prepare *mysql.PrepareDat session := vh.session(c) if !session.InTransaction { - atomic.AddInt32(&busyConnections, 1) + vh.busyConnections.Add(1) } defer func() { if !session.InTransaction { - atomic.AddInt32(&busyConnections, -1) + vh.busyConnections.Add(-1) } }() if session.Options.Workload == querypb.ExecuteOptions_OLAP { - err := vh.vtg.StreamExecute(ctx, session, prepare.PrepareStmt, prepare.BindVars, callback) - return mysql.NewSQLErrorFromError(err) + _, err := vh.vtg.StreamExecute(ctx, vh, session, prepare.PrepareStmt, prepare.BindVars, callback) + if err != nil { + return sqlerror.NewSQLErrorFromError(err) + } + fillInTxStatusFlags(c, session) + return nil } - _, qr, err := vh.vtg.Execute(ctx, session, prepare.PrepareStmt, prepare.BindVars) + _, qr, err := vh.vtg.Execute(ctx, vh, session, prepare.PrepareStmt, prepare.BindVars) if err != nil { - err = mysql.NewSQLErrorFromError(err) - return err + return sqlerror.NewSQLErrorFromError(err) } fillInTxStatusFlags(c, session) @@ -368,10 +377,41 @@ func (vh *vtgateHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (vh *vtgateHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return vterrors.VT12001("ComBinlogDumpGTID for the VTGate handler") } +// KillConnection closes an open connection by connection ID. +func (vh *vtgateHandler) KillConnection(ctx context.Context, connectionID uint32) error { + vh.mu.Lock() + defer vh.mu.Unlock() + + c, exists := vh.connections[connectionID] + if !exists { + return sqlerror.NewSQLError(sqlerror.ERNoSuchThread, sqlerror.SSUnknownSQLState, "Unknown thread id: %d", connectionID) + } + + // First, we mark the connection for close, so that even when the context is cancelled, while returning the response back to client, + // the connection can get closed, + // Closing the connection will trigger ConnectionClosed method which rollback any open transaction. + c.MarkForClose() + c.CancelCtx() + + return nil +} + +// KillQuery cancels any execution query on the provided connection ID. +func (vh *vtgateHandler) KillQuery(connectionID uint32) error { + vh.mu.Lock() + defer vh.mu.Unlock() + c, exists := vh.connections[connectionID] + if !exists { + return sqlerror.NewSQLError(sqlerror.ERNoSuchThread, sqlerror.SSUnknownSQLState, "Unknown thread id: %d", connectionID) + } + c.CancelCtx() + return nil +} + func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { session, _ := c.ClientData.(*vtgatepb.Session) if session == nil { @@ -385,6 +425,7 @@ func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { }, Autocommit: true, DDLStrategy: defaultDDLStrategy, + MigrationContext: "", SessionUUID: u.String(), EnableSystemSettings: sysVarSetEnabled, } @@ -396,30 +437,37 @@ func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { return session } -var mysqlListener *mysql.Listener -var mysqlUnixListener *mysql.Listener -var sigChan chan os.Signal -var vtgateHandle *vtgateHandler +type mysqlServer struct { + tcpListener *mysql.Listener + unixListener *mysql.Listener + sigChan chan os.Signal + vtgateHandle *vtgateHandler +} // initTLSConfig inits tls config for the given mysql listener -func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool, mysqlMinTLSVersion uint16) error { +func initTLSConfig(ctx context.Context, srv *mysqlServer, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA string, mysqlServerRequireSecureTransport bool, mysqlMinTLSVersion uint16) error { serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) if err != nil { log.Exitf("grpcutils.TLSServerConfig failed: %v", err) return err } - mysqlListener.TLSConfig.Store(serverConfig) - mysqlListener.RequireSecureTransport = mysqlServerRequireSecureTransport - sigChan = make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGHUP) + srv.tcpListener.TLSConfig.Store(serverConfig) + srv.tcpListener.RequireSecureTransport = mysqlServerRequireSecureTransport + srv.sigChan = make(chan os.Signal, 1) + signal.Notify(srv.sigChan, syscall.SIGHUP) go func() { - for range sigChan { - serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) - if err != nil { - log.Errorf("grpcutils.TLSServerConfig failed: %v", err) - } else { - log.Info("grpcutils.TLSServerConfig updated") - mysqlListener.TLSConfig.Store(serverConfig) + for { + select { + case <-ctx.Done(): + return + case <-srv.sigChan: + serverConfig, err := vttls.ServerConfig(mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlMinTLSVersion) + if err != nil { + log.Errorf("grpcutils.TLSServerConfig failed: %v", err) + } else { + log.Info("grpcutils.TLSServerConfig updated") + srv.tcpListener.TLSConfig.Store(serverConfig) + } } } }() @@ -428,15 +476,15 @@ func initTLSConfig(mysqlListener *mysql.Listener, mysqlSslCert, mysqlSslKey, mys // initiMySQLProtocol starts the mysql protocol. // It should be called only once in a process. -func initMySQLProtocol() { +func initMySQLProtocol(vtgate *VTGate) *mysqlServer { // Flag is not set, just return. if mysqlServerPort < 0 && mysqlServerSocketPath == "" { - return + return nil } // If no VTGate was created, just return. - if rpcVTGate == nil { - return + if vtgate == nil { + return nil } // Initialize registered AuthServer implementations (or other plugins) @@ -460,53 +508,56 @@ func initMySQLProtocol() { // Create a Listener. var err error - vtgateHandle = newVtgateHandler(rpcVTGate) + srv := &mysqlServer{} + srv.vtgateHandle = newVtgateHandler(vtgate) if mysqlServerPort >= 0 { - mysqlListener, err = mysql.NewListener( + srv.tcpListener, err = mysql.NewListener( mysqlTCPVersion, net.JoinHostPort(mysqlServerBindAddress, fmt.Sprintf("%v", mysqlServerPort)), authServer, - vtgateHandle, + srv.vtgateHandle, mysqlConnReadTimeout, mysqlConnWriteTimeout, mysqlProxyProtocol, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - mysqlListener.ServerVersion = servenv.MySQLServerVersion() + srv.tcpListener.ServerVersion = servenv.MySQLServerVersion() if mysqlSslCert != "" && mysqlSslKey != "" { tlsVersion, err := vttls.TLSVersionToNumber(mysqlTLSMinVersion) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - _ = initTLSConfig(mysqlListener, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlServerRequireSecureTransport, tlsVersion) + _ = initTLSConfig(context.Background(), srv, mysqlSslCert, mysqlSslKey, mysqlSslCa, mysqlSslCrl, mysqlSslServerCA, mysqlServerRequireSecureTransport, tlsVersion) } - mysqlListener.AllowClearTextWithoutTLS.Store(mysqlAllowClearTextWithoutTLS) + srv.tcpListener.AllowClearTextWithoutTLS.Store(mysqlAllowClearTextWithoutTLS) // Check for the connection threshold if mysqlSlowConnectWarnThreshold != 0 { log.Infof("setting mysql slow connection threshold to %v", mysqlSlowConnectWarnThreshold) - mysqlListener.SlowConnectWarnThreshold.Store(mysqlSlowConnectWarnThreshold.Nanoseconds()) + srv.tcpListener.SlowConnectWarnThreshold.Store(mysqlSlowConnectWarnThreshold.Nanoseconds()) } // Start listening for tcp - go mysqlListener.Accept() + go srv.tcpListener.Accept() } if mysqlServerSocketPath != "" { // Let's create this unix socket with permissions to all users. In this way, // clients can connect to vtgate mysql server without being vtgate user oldMask := syscall.Umask(000) - mysqlUnixListener, err = newMysqlUnixSocket(mysqlServerSocketPath, authServer, vtgateHandle) + srv.unixListener, err = newMysqlUnixSocket(mysqlServerSocketPath, authServer, srv.vtgateHandle) _ = syscall.Umask(oldMask) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) - return + return nil } // Listen for unix socket - go mysqlUnixListener.Accept() + go srv.unixListener.Accept() } + return srv } // newMysqlUnixSocket creates a new unix socket mysql listener. If a socket file already exists, attempts @@ -521,6 +572,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlConnWriteTimeout, false, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) switch err := err.(type) { @@ -552,6 +604,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlConnWriteTimeout, false, mysqlConnBufferPooling, + mysqlKeepAlivePeriod, ) return listener, listenerErr default: @@ -559,37 +612,38 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys } } -func shutdownMysqlProtocolAndDrain() { - if mysqlListener != nil { - mysqlListener.Close() - mysqlListener = nil +func (srv *mysqlServer) shutdownMysqlProtocolAndDrain() { + if srv.tcpListener != nil { + srv.tcpListener.Close() + srv.tcpListener = nil } - if mysqlUnixListener != nil { - mysqlUnixListener.Close() - mysqlUnixListener = nil + if srv.unixListener != nil { + srv.unixListener.Close() + srv.unixListener = nil } - if sigChan != nil { - signal.Stop(sigChan) + if srv.sigChan != nil { + signal.Stop(srv.sigChan) } - if atomic.LoadInt32(&busyConnections) > 0 { - log.Infof("Waiting for all client connections to be idle (%d active)...", atomic.LoadInt32(&busyConnections)) + if busy := srv.vtgateHandle.busyConnections.Load(); busy > 0 { + log.Infof("Waiting for all client connections to be idle (%d active)...", busy) start := time.Now() reported := start - for atomic.LoadInt32(&busyConnections) != 0 { + for busy > 0 { if time.Since(reported) > 2*time.Second { - log.Infof("Still waiting for client connections to be idle (%d active)...", atomic.LoadInt32(&busyConnections)) + log.Infof("Still waiting for client connections to be idle (%d active)...", busy) reported = time.Now() } time.Sleep(1 * time.Millisecond) + busy = srv.vtgateHandle.busyConnections.Load() } } } -func rollbackAtShutdown() { +func (srv *mysqlServer) rollbackAtShutdown() { defer log.Flush() - if vtgateHandle == nil { + if srv.vtgateHandle == nil { // we still haven't been able to initialise the vtgateHandler, so we don't need to rollback anything return } @@ -597,12 +651,12 @@ func rollbackAtShutdown() { // Close all open connections. If they're waiting for reads, this will cause // them to error out, which will automatically rollback open transactions. func() { - if vtgateHandle != nil { - vtgateHandle.mu.Lock() - defer vtgateHandle.mu.Unlock() - for c := range vtgateHandle.connections { + if srv.vtgateHandle != nil { + srv.vtgateHandle.mu.Lock() + defer srv.vtgateHandle.mu.Unlock() + for id, c := range srv.vtgateHandle.connections { if c != nil { - log.Infof("Rolling back transactions associated with connection ID: %v", c.ConnectionID) + log.Infof("Rolling back transactions associated with connection ID: %v", id) c.Close() } } @@ -612,7 +666,7 @@ func rollbackAtShutdown() { // If vtgate is instead busy executing a query, the number of open conns // will be non-zero. Give another second for those queries to finish. for i := 0; i < 100; i++ { - if vtgateHandle.numConnections() == 0 { + if srv.vtgateHandle.numConnections() == 0 { log.Infof("All connections have been rolled back.") return } @@ -631,10 +685,6 @@ func mysqlSocketPath() string { func init() { servenv.OnParseFor("vtgate", registerPluginFlags) servenv.OnParseFor("vtcombo", registerPluginFlags) - - servenv.OnRun(initMySQLProtocol) - servenv.OnTermSync(shutdownMysqlProtocolAndDrain) - servenv.OnClose(rollbackAtShutdown) } var pluginInitializers []func() diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index ceccadf706a..1b161dfb171 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -28,11 +28,15 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" - "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/trace" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/tlstest" ) @@ -71,7 +75,7 @@ func (th *testHandler) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos ui return nil } -func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (th *testHandler) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } @@ -244,6 +248,7 @@ func newTestAuthServerStatic() *mysql.AuthServerStatic { func TestDefaultWorkloadEmpty(t *testing.T) { vh := &vtgateHandler{} + mysqlDefaultWorkload = int32(querypb.ExecuteOptions_OLTP) sess := vh.session(&mysql.Conn{}) if sess.Options.Workload != querypb.ExecuteOptions_OLTP { t.Fatalf("Expected default workload OLTP") @@ -269,6 +274,8 @@ func TestInitTLSConfigWithServerCA(t *testing.T) { func testInitTLSConfig(t *testing.T, serverCA bool) { // Create the certs. + ctx := utils.LeakCheckContext(t) + root := t.TempDir() tlstest.CreateCA(root) tlstest.CreateCRL(root, tlstest.CA) @@ -279,20 +286,59 @@ func testInitTLSConfig(t *testing.T, serverCA bool) { serverCACert = path.Join(root, "ca-cert.pem") } - listener := &mysql.Listener{} - if err := initTLSConfig(listener, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), path.Join(root, "ca-crl.pem"), serverCACert, true, tls.VersionTLS12); err != nil { + srv := &mysqlServer{tcpListener: &mysql.Listener{}} + if err := initTLSConfig(ctx, srv, path.Join(root, "server-cert.pem"), path.Join(root, "server-key.pem"), path.Join(root, "ca-cert.pem"), path.Join(root, "ca-crl.pem"), serverCACert, true, tls.VersionTLS12); err != nil { t.Fatalf("init tls config failure due to: +%v", err) } - serverConfig := listener.TLSConfig.Load() + serverConfig := srv.tcpListener.TLSConfig.Load() if serverConfig == nil { t.Fatalf("init tls config shouldn't create nil server config") } - sigChan <- syscall.SIGHUP + srv.sigChan <- syscall.SIGHUP time.Sleep(100 * time.Millisecond) // wait for signal handler - if listener.TLSConfig.Load() == serverConfig { + if srv.tcpListener.TLSConfig.Load() == serverConfig { t.Fatalf("init tls config should have been recreated after SIGHUP") } } + +// TestKillMethods test the mysql plugin for kill method calls. +func TestKillMethods(t *testing.T) { + executor, _, _, _, _ := createExecutorEnv(t) + vh := newVtgateHandler(&VTGate{executor: executor}) + + // connection does not exist + err := vh.KillQuery(12345) + assert.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + err = vh.KillConnection(context.Background(), 12345) + assert.ErrorContains(t, err, "Unknown thread id: 12345 (errno 1094) (sqlstate HY000)") + + // add a connection + mysqlConn := mysql.GetTestConn() + mysqlConn.ConnectionID = 1 + vh.connections[1] = mysqlConn + + // connection exists + + // updating context. + cancelCtx, cancelFunc := context.WithCancel(context.Background()) + mysqlConn.UpdateCancelCtx(cancelFunc) + + // kill query + err = vh.KillQuery(1) + assert.NoError(t, err) + require.EqualError(t, cancelCtx.Err(), "context canceled") + + // updating context. + cancelCtx, cancelFunc = context.WithCancel(context.Background()) + mysqlConn.UpdateCancelCtx(cancelFunc) + + // kill connection + err = vh.KillConnection(context.Background(), 1) + assert.NoError(t, err) + require.EqualError(t, cancelCtx.Err(), "context canceled") + require.True(t, mysqlConn.IsMarkedForClose()) +} diff --git a/go/vt/vtgate/querylog.go b/go/vt/vtgate/querylog.go index f7d1af01613..7425f2feba9 100644 --- a/go/vt/vtgate/querylog.go +++ b/go/vt/vtgate/querylog.go @@ -18,9 +18,9 @@ package vtgate import ( "net/http" - "sync" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/logstats" ) @@ -33,38 +33,33 @@ var ( // QueryzHandler is the debug UI path for exposing query plan stats QueryzHandler = "/debug/queryz" - - // QueryLogger enables streaming logging of queries - QueryLogger *streamlog.StreamLogger[*logstats.LogStats] - queryLoggerMu sync.Mutex ) -func SetQueryLogger(logger *streamlog.StreamLogger[*logstats.LogStats]) { - queryLoggerMu.Lock() - defer queryLoggerMu.Unlock() - QueryLogger = logger -} - -func initQueryLogger(vtg *VTGate) error { - SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) - QueryLogger.ServeLogs(QueryLogHandler, streamlog.GetFormatter(QueryLogger)) +func (e *Executor) defaultQueryLogger() error { + queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) + queryLogger.ServeLogs(QueryLogHandler, streamlog.GetFormatter(queryLogger)) - http.HandleFunc(QueryLogzHandler, func(w http.ResponseWriter, r *http.Request) { - ch := QueryLogger.Subscribe("querylogz") - defer QueryLogger.Unsubscribe(ch) + servenv.HTTPHandleFunc(QueryLogzHandler, func(w http.ResponseWriter, r *http.Request) { + ch := queryLogger.Subscribe("querylogz") + defer queryLogger.Unsubscribe(ch) querylogzHandler(ch, w, r) }) - http.HandleFunc(QueryzHandler, func(w http.ResponseWriter, r *http.Request) { - queryzHandler(vtg.executor, w, r) + servenv.HTTPHandleFunc(QueryzHandler, func(w http.ResponseWriter, r *http.Request) { + queryzHandler(e, w, r) }) if queryLogToFile != "" { - _, err := QueryLogger.LogToFile(queryLogToFile, streamlog.GetFormatter(QueryLogger)) + _, err := queryLogger.LogToFile(queryLogToFile, streamlog.GetFormatter(queryLogger)) if err != nil { return err } } + e.queryLogger = queryLogger return nil } + +func (e *Executor) SetQueryLogger(ql *streamlog.StreamLogger[*logstats.LogStats]) { + e.queryLogger = ql +} diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index e301a9ead44..e546fc68c6f 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -18,11 +18,12 @@ package vtgate import ( "fmt" - "html/template" "net/http" "sort" "time" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" @@ -142,8 +143,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { }, } - e.plans.ForEach(func(value any) bool { - plan := value.(*engine.Plan) + e.ForEachPlan(func(plan *engine.Plan) bool { Value := &queryzRow{ Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), } diff --git a/go/vt/vtgate/queryz_test.go b/go/vt/vtgate/queryz_test.go index f45d1fa7eb8..826cb8641d8 100644 --- a/go/vt/vtgate/queryz_test.go +++ b/go/vt/vtgate/queryz_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vtgate/engine" @@ -34,52 +36,43 @@ import ( ) func TestQueryzHandler(t *testing.T) { + executor, _, _, _, ctx := createExecutorEnv(t) + resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/schemaz", nil) - executor, _, _, _ := createExecutorEnv() - + session := &vtgatepb.Session{TargetString: "@primary"} // single shard query sql := "select id from user where id = 1" - _, err := executorExec(executor, sql, nil) + _, err := executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) - executor.plans.Wait() - plan1, ok := executor.debugGetPlan("@primary:" + sql) - if !ok { - t.Fatalf("couldn't get plan from cache") - } + time.Sleep(100 * time.Millisecond) + plan1 := assertCacheContains(t, executor, nil, "select id from `user` where id = 1") plan1.ExecTime = uint64(1 * time.Millisecond) // scatter sql = "select id from user" - _, err = executorExec(executor, sql, nil) + _, err = executorExec(ctx, executor, session, sql, nil) require.NoError(t, err) - executor.plans.Wait() - plan2, ok := executor.debugGetPlan("@primary:" + sql) - if !ok { - t.Fatalf("couldn't get plan from cache") - } + time.Sleep(100 * time.Millisecond) + plan2 := assertCacheContains(t, executor, nil, "select id from `user`") plan2.ExecTime = uint64(1 * time.Second) sql = "insert into user (id, name) values (:id, :name)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Uint64BindVariable(1), "name": sqltypes.BytesBindVariable([]byte("myname")), }) require.NoError(t, err) - executor.plans.Wait() - plan3, ok := executor.debugGetPlan("@primary:" + sql) - if !ok { - t.Fatalf("couldn't get plan from cache") - } + time.Sleep(100 * time.Millisecond) + plan3 := assertCacheContains(t, executor, nil, "insert into `user`(id, `name`) values (:id, :name)") // vindex insert from above execution - plan4, ok := executor.debugGetPlan("@primary:" + "insert into name_user_map(name, user_id) values(:name_0, :user_id_0)") - require.True(t, ok, "couldn't get plan from cache") + plan4 := assertCacheContains(t, executor, nil, "insert into name_user_map(`name`, user_id) values (:name_0, :user_id_0)") // same query again should add query counts to existing plans sql = "insert into user (id, name) values (:id, :name)" - _, err = executorExec(executor, sql, map[string]*querypb.BindVariable{ + _, err = executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{ "id": sqltypes.Uint64BindVariable(1), "name": sqltypes.BytesBindVariable([]byte("myname")), }) @@ -93,7 +86,7 @@ func TestQueryzHandler(t *testing.T) { body, _ := io.ReadAll(resp.Body) planPattern1 := []string{ `
`, - ``, + "", ``, ``, ``, @@ -110,7 +103,7 @@ func TestQueryzHandler(t *testing.T) { checkQueryzHasPlan(t, planPattern1, plan1, body) planPattern2 := []string{ ``, - ``, + "", ``, ``, ``, @@ -127,7 +120,7 @@ func TestQueryzHandler(t *testing.T) { checkQueryzHasPlan(t, planPattern2, plan2, body) planPattern3 := []string{ ``, - ``, + "", ``, ``, ``, diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index 7ecb2943265..e2f3c235c94 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -25,6 +25,8 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/sysvars" @@ -133,7 +135,7 @@ func NewSafeSession(sessn *vtgatepb.Session) *SafeSession { // NewAutocommitSession returns a SafeSession based on the original // session, but with autocommit enabled. func NewAutocommitSession(sessn *vtgatepb.Session) *SafeSession { - newSession := proto.Clone(sessn).(*vtgatepb.Session) + newSession := sessn.CloneVT() newSession.InTransaction = false newSession.ShardSessions = nil newSession.PreSessions = nil @@ -149,11 +151,22 @@ func (session *SafeSession) ResetTx() { session.mu.Lock() defer session.mu.Unlock() session.resetCommonLocked() - if !session.Session.InReservedConn { - session.ShardSessions = nil - session.PreSessions = nil - session.PostSessions = nil + // If settings pools is enabled on the vttablet. + // This variable will be true but there will not be a shard session with reserved connection id. + // So, we should check the shard session and not just this variable. + if session.Session.InReservedConn { + allSessions := append(session.ShardSessions, append(session.PreSessions, session.PostSessions...)...) + for _, ss := range allSessions { + if ss.ReservedId != 0 { + // found that reserved connection exists. + // abort here, we should keep the shard sessions. + return + } + } } + session.ShardSessions = nil + session.PreSessions = nil + session.PostSessions = nil } // Reset clears the session @@ -547,6 +560,18 @@ func (session *SafeSession) HasSystemVariables() (found bool) { return } +func (session *SafeSession) TimeZone() *time.Location { + session.mu.Lock() + tz, ok := session.SystemVariables["time_zone"] + session.mu.Unlock() + + if !ok { + return nil + } + loc, _ := datetime.ParseTimeZone(tz) + return loc +} + // SetOptions sets the options func (session *SafeSession) SetOptions(options *querypb.ExecuteOptions) { session.mu.Lock() @@ -691,6 +716,20 @@ func (session *SafeSession) GetDDLStrategy() string { return session.DDLStrategy } +// SetMigrationContext set the migration_context setting. +func (session *SafeSession) SetMigrationContext(migrationContext string) { + session.mu.Lock() + defer session.mu.Unlock() + session.MigrationContext = migrationContext +} + +// GetMigrationContext returns the migration_context value. +func (session *SafeSession) GetMigrationContext() string { + session.mu.Lock() + defer session.mu.Unlock() + return session.MigrationContext +} + // GetSessionUUID returns the SessionUUID value. func (session *SafeSession) GetSessionUUID() string { session.mu.Lock() @@ -889,6 +928,39 @@ func (session *SafeSession) EnableLogging() { session.logging = &executeLogger{} } +// GetUDV returns the bind variable value for the user defined variable. +func (session *SafeSession) GetUDV(name string) *querypb.BindVariable { + session.mu.Lock() + defer session.mu.Unlock() + + if session.UserDefinedVariables == nil { + return nil + } + return session.UserDefinedVariables[name] +} + +// StorePrepareData stores the prepared data information for the given key. +func (session *SafeSession) StorePrepareData(key string, value *vtgatepb.PrepareData) { + session.mu.Lock() + defer session.mu.Unlock() + + if session.PrepareStatement == nil { + session.PrepareStatement = map[string]*vtgatepb.PrepareData{} + } + session.PrepareStatement[key] = value +} + +// GetPrepareData returns the prepared data information for the given key. +func (session *SafeSession) GetPrepareData(name string) *vtgatepb.PrepareData { + session.mu.Lock() + defer session.mu.Unlock() + + if session.PrepareStatement == nil { + return nil + } + return session.PrepareStatement[name] +} + func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target, gateway srvtopo.Gateway, query string, begin bool, bv map[string]*querypb.BindVariable) { if l == nil { return diff --git a/go/vt/vtgate/safe_session_test.go b/go/vt/vtgate/safe_session_test.go index 4bcd095362c..21bb2d6697a 100644 --- a/go/vt/vtgate/safe_session_test.go +++ b/go/vt/vtgate/safe_session_test.go @@ -19,7 +19,9 @@ package vtgate import ( "reflect" "testing" + "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" @@ -64,3 +66,35 @@ func TestPrequeries(t *testing.T) { t.Errorf("got %v but wanted %v", preQueries, want) } } + +func TestTimeZone(t *testing.T) { + testCases := []struct { + tz string + want string + }{ + { + tz: "Europe/Amsterdam", + want: "Europe/Amsterdam", + }, + { + tz: "+02:00", + want: "UTC+02:00", + }, + { + tz: "foo", + want: (*time.Location)(nil).String(), + }, + } + + for _, tc := range testCases { + t.Run(tc.tz, func(t *testing.T) { + session := NewSafeSession(&vtgatepb.Session{ + SystemVariables: map[string]string{ + "time_zone": tc.tz, + }, + }) + + assert.Equal(t, tc.want, session.TimeZone().String()) + }) + } +} diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 4197e6ef231..1629e9a4faa 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -19,6 +19,8 @@ package vtgate import ( "context" "fmt" + "hash/fnv" + "strconv" "sync" "vitess.io/vitess/go/json2" @@ -218,9 +220,9 @@ type sandboxTopo struct { // the given cells. // // when this version is used, WatchSrvVSchema can properly simulate watches -func newSandboxForCells(cells []string) *sandboxTopo { +func newSandboxForCells(ctx context.Context, cells []string) *sandboxTopo { return &sandboxTopo{ - topoServer: memorytopo.NewServer(cells...), + topoServer: memorytopo.NewServer(ctx, cells...), } } @@ -284,6 +286,16 @@ func (sct *sandboxTopo) WatchSrvKeyspace(ctx context.Context, cell, keyspace str // panic("not supported: WatchSrvKeyspace") } +func hash(s string) uint32 { + h := fnv.New32a() + h.Write([]byte(s)) + return h.Sum32() +} + +func GetSrvVSchemaHash(vs *vschemapb.SrvVSchema) string { + return strconv.Itoa(int(hash(vs.String()))) +} + // WatchSrvVSchema is part of the srvtopo.Server interface. // // If the sandbox was created with a backing topo service, piggy back on it @@ -302,11 +314,31 @@ func (sct *sandboxTopo) WatchSrvVSchema(ctx context.Context, cell string, callba if !callback(current.Value, nil) { panic("sandboxTopo callback returned false") } + if updateChan == nil { + panic("sandboxTopo updateChan is nil") + } + currentHash := GetSrvVSchemaHash(current.Value) go func() { for { - update := <-updateChan - if !callback(update.Value, update.Err) { - panic("sandboxTopo callback returned false") + select { + case <-ctx.Done(): + return + case update := <-updateChan: + // If the channel was closed, we're done. + if update == nil { + return + } + newHash := GetSrvVSchemaHash(update.Value) + if newHash == currentHash { + // sometimes we get the same update multiple times. This results in the plan cache to be cleared + // causing tests to fail. So we just ignore the duplicate updates. + continue + } + currentHash = newHash + if !callback(update.Value, update.Err) { + panic("sandboxTopo callback returned false") + } + } } }() diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 7b89872ca67..ede88e2d9b8 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -22,11 +22,11 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/concurrency" @@ -755,13 +755,13 @@ func (stc *ScatterConn) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedSha } func wasConnectionClosed(err error) bool { - sqlErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sqlErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) message := sqlErr.Error() switch sqlErr.Number() { - case mysql.CRServerGone, mysql.CRServerLost: + case sqlerror.CRServerGone, sqlerror.CRServerLost: return true - case mysql.ERQueryInterrupted: + case sqlerror.ERQueryInterrupted: return vterrors.TxClosed.MatchString(message) default: return false diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 7fe751c9a00..6e57c10bbbd 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -19,11 +19,11 @@ package vtgate import ( "testing" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/test/utils" @@ -42,10 +42,11 @@ import ( // This file uses the sandbox_test framework. func TestExecuteFailOnAutocommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) createSandbox("TestExecuteFailOnAutocommit") hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, "TestExecuteFailOnAutocommit", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, "TestExecuteFailOnAutocommit", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) @@ -105,10 +106,12 @@ func TestExecuteFailOnAutocommit(t *testing.T) { } func TestReservedOnMultiReplica(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0_1 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc0_2 := hc.AddTestTablet("aa", "2", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) // sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -117,18 +120,20 @@ func TestReservedOnMultiReplica(t *testing.T) { sbc0_1.SetResults([]*sqltypes.Result{{}}) sbc0_2.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} for i := 0; i < 10; i++ { - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0_1.ReserveCount.Load()+sbc0_2.ReserveCount.Load(), "sbc0 reserve count") assert.EqualValues(t, 0, sbc0_1.BeginCount.Load()+sbc0_2.BeginCount.Load(), "sbc0 begin count") } } func TestReservedBeginTableDriven(t *testing.T) { + ctx := utils.LeakCheckContext(t) + type testAction struct { transaction, reserved bool shards []string @@ -253,7 +258,7 @@ func TestReservedBeginTableDriven(t *testing.T) { keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) sbc1 := hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) @@ -261,7 +266,7 @@ func TestReservedBeginTableDriven(t *testing.T) { sbc0.SetResults([]*sqltypes.Result{{}}) sbc1.SetResults([]*sqltypes.Result{{}}) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") t.Run(test.name, func(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{}) @@ -272,7 +277,7 @@ func TestReservedBeginTableDriven(t *testing.T) { for _, shard := range action.shards { destinations = append(destinations, key.DestinationShard(shard)) } - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, action.sbc0Reserve, sbc0.ReserveCount.Load(), "sbc0 reserve count") assert.EqualValues(t, action.sbc0Begin, sbc0.BeginCount.Load(), "sbc0 begin count") assert.EqualValues(t, action.sbc1Reserve, sbc1.ReserveCount.Load(), "sbc1 reserve count") @@ -287,47 +292,49 @@ func TestReservedBeginTableDriven(t *testing.T) { } func TestReservedConnFail(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "keyspace" createSandbox(keyspace) hc := discovery.NewFakeHealthCheck(nil) - sc := newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 := hc.AddTestTablet("aa", "0", 1, keyspace, "0", topodatapb.TabletType_REPLICA, true, 1, nil) _ = hc.AddTestTablet("aa", "1", 1, keyspace, "1", topodatapb.TabletType_REPLICA, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") session := NewSafeSession(&vtgatepb.Session{InTransaction: false, InReservedConn: true}) destinations := []key.Destination{key.DestinationShard("0")} - executeOnShards(t, res, keyspace, sc, session, destinations) + executeOnShards(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 1, len(session.ShardSessions)) oldRId := session.Session.ShardSessions[0].ReservedId - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.CRServerGone, mysql.SSUnknownSQLState, "lost connection") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "lost connection") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 3, len(sbc0.Queries), "1 for the successful run, one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 not found") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 not found") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 ended at 2020-01-20") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 ended at 2020-01-20") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") oldRId = session.Session.ShardSessions[0].ReservedId sbc0.Queries = nil - sbc0.EphemeralShardErr = mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 123 in use: for tx killer rollback") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + sbc0.EphemeralShardErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 123 in use: for tx killer rollback") + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -335,7 +342,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.EphemeralShardErr = vterrors.New(vtrpcpb.Code_CLUSTER_EVENT, "operation not allowed in state NOT_SERVING during query: query1") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -343,7 +350,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.EphemeralShardErr = vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "invalid tablet type: REPLICA, want: PRIMARY") - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.Equal(t, 2, len(sbc0.Queries), "one for the failed attempt, and one for the retry") require.Equal(t, 1, len(session.ShardSessions)) assert.NotEqual(t, oldRId, session.Session.ShardSessions[0].ReservedId, "should have recreated a reserved connection since the last connection was lost") @@ -364,7 +371,7 @@ func TestReservedConnFail(t *testing.T) { sbc0.Queries = nil sbc0.ExecCount.Store(0) - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0.ExecCount.Load(), "first attempt should be made on original tablet") assert.EqualValues(t, 0, len(sbc0.Queries), "no query should be executed on it") assert.Equal(t, 1, len(sbc0Rep.Queries), "this attempt on new healthy tablet should pass") @@ -394,7 +401,7 @@ func TestReservedConnFail(t *testing.T) { sbc0Rep.Queries = nil sbc0Rep.ExecCount.Store(0) - _ = executeOnShardsReturnsErr(t, res, keyspace, sc, session, destinations) + _ = executeOnShardsReturnsErr(t, ctx, res, keyspace, sc, session, destinations) assert.EqualValues(t, 1, sbc0Rep.ExecCount.Load(), "first attempt should be made on the changed tablet type") assert.EqualValues(t, 0, len(sbc0Rep.Queries), "no query should be executed on it") assert.Equal(t, 1, len(sbc0.Queries), "this attempt should pass as it is on new healthy tablet and matches the target") @@ -410,27 +417,27 @@ func TestIsConnClosed(t *testing.T) { conClosed bool }{{ "server gone", - mysql.NewSQLError(mysql.CRServerGone, mysql.SSNetError, ""), + sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSNetError, ""), true, }, { "connection lost", - mysql.NewSQLError(mysql.CRServerLost, mysql.SSNetError, ""), + sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSNetError, ""), true, }, { "tx ended", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 ended at ..."), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 ended at ..."), true, }, { "tx not found", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 not found ..."), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 not found ..."), true, }, { "tx not found missing tx id", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction not found"), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction not found"), false, }, { "tx getting killed by tx killer", - mysql.NewSQLError(mysql.ERQueryInterrupted, mysql.SSUnknownSQLState, "transaction 111 in use: for tx killer rollback"), + sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, sqlerror.SSUnknownSQLState, "transaction 111 in use: for tx killer rollback"), true, }} diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index a75a583ae11..3a03ea83a84 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -18,24 +18,17 @@ package schema import ( "context" + "maps" + "strings" "sync" "time" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/callerid" - - "vitess.io/vitess/go/vt/vttablet/queryservice" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/queryservice" ) type ( @@ -63,22 +56,12 @@ type ( // defaultConsumeDelay is the default time, the updateController will wait before checking the schema fetch request queue. const defaultConsumeDelay = 1 * time.Second -// aclErrorMessageLog is for logging a warning when an acl error message is received for querying schema tracking table. -const aclErrorMessageLog = "Table ACL might be enabled, --schema_change_signal_user needs to be passed to VTGate for schema tracking to work. Check 'schema tracking' docs on vitess.io" - // NewTracker creates the tracker object. -func NewTracker(ch chan *discovery.TabletHealth, user string, enableViews bool) *Tracker { - ctx := context.Background() - // Set the caller on the context if the user is provided. - // This user that will be sent down to vttablet calls. - if user != "" { - ctx = callerid.NewContext(ctx, nil, callerid.NewImmediateCallerID(user)) - } - +func NewTracker(ch chan *discovery.TabletHealth, enableViews bool) *Tracker { t := &Tracker{ - ctx: ctx, + ctx: context.Background(), ch: ch, - tables: &tableMap{m: map[keyspaceStr]map[tableNameStr][]vindexes.Column{}}, + tables: &tableMap{m: make(map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo)}, tracked: map[keyspaceStr]*updateController{}, consumeDelay: defaultConsumeDelay, } @@ -110,10 +93,6 @@ func (t *Tracker) loadTables(conn queryservice.QueryService, target *querypb.Tar return nil } - ftRes, err := conn.Execute(t.ctx, target, mysql.FetchTables, nil, 0, 0, nil) - if err != nil { - return err - } t.mu.Lock() defer t.mu.Unlock() @@ -122,8 +101,17 @@ func (t *Tracker) loadTables(conn queryservice.QueryService, target *querypb.Tar // clearing out the previous schema we can end up with duplicate entries when the // tablet is simply restarted or potentially when we elect a new primary. t.clearKeyspaceTables(target.Keyspace) - t.updateTables(target.Keyspace, ftRes) - log.Infof("finished loading schema for keyspace %s. Found %d columns in total across the tables", target.Keyspace, len(ftRes.Rows)) + + var numTables int + err := conn.GetSchema(t.ctx, target, querypb.SchemaTableType_TABLES, nil, func(schemaRes *querypb.GetSchemaResponse) error { + t.updateTables(target.Keyspace, schemaRes.TableDefinition) + numTables += len(schemaRes.TableDefinition) + return nil + }) + if err != nil { + return err + } + log.Infof("finished loading tables for keyspace %s. Found %d tables", target.Keyspace, numTables) return nil } @@ -163,6 +151,10 @@ func (t *Tracker) Start() { for { select { case th := <-t.ch: + if th == nil { + // channel closed + return + } ksUpdater := t.getKeyspaceUpdateController(th) ksUpdater.add(th) case <-ctx.Done(): @@ -195,10 +187,6 @@ func (t *Tracker) initKeyspace(th *discovery.TabletHealth) error { err := t.LoadKeyspace(th.Conn, th.Target) if err != nil { log.Warningf("Unable to add the %s keyspace to the schema tracker: %v", th.Target.Keyspace, err) - code := vterrors.Code(err) - if code == vtrpcpb.Code_UNAUTHENTICATED || code == vtrpcpb.Code_PERMISSION_DENIED { - log.Warning(aclErrorMessageLog) - } return err } return nil @@ -215,20 +203,30 @@ func (t *Tracker) GetColumns(ks string, tbl string) []vindexes.Column { t.mu.Lock() defer t.mu.Unlock() - return t.tables.get(ks, tbl) + tblInfo := t.tables.get(ks, tbl) + return tblInfo.Columns +} + +// GetForeignKeys returns the foreign keys for table in the given keyspace. +func (t *Tracker) GetForeignKeys(ks string, tbl string) []*sqlparser.ForeignKeyDefinition { + t.mu.Lock() + defer t.mu.Unlock() + + tblInfo := t.tables.get(ks, tbl) + return tblInfo.ForeignKeys } // Tables returns a map with the columns for all known tables in the keyspace -func (t *Tracker) Tables(ks string) map[string][]vindexes.Column { +func (t *Tracker) Tables(ks string) map[string]*vindexes.TableInfo { t.mu.Lock() defer t.mu.Unlock() m := t.tables.m[ks] if m == nil { - return map[string][]vindexes.Column{} // we know nothing about this KS, so that is the info we can give out + return map[string]*vindexes.TableInfo{} // we know nothing about this KS, so that is the info we can give out } - return m + return maps.Clone(m) } // Views returns all known views in the keyspace with their definition. @@ -239,7 +237,9 @@ func (t *Tracker) Views(ks string) map[string]sqlparser.SelectStatement { if t.views == nil { return nil } - return t.views.m[ks] + + m := t.views.m[ks] + return maps.Clone(m) } func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool { @@ -255,50 +255,96 @@ func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool { } func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { + t.mu.Lock() + defer t.mu.Unlock() + tablesUpdated := th.Stats.TableSchemaChanged - tables, err := sqltypes.BuildBindVariable(tablesUpdated) - if err != nil { - log.Errorf("failed to read updated tables from TabletHealth: %v", err) - return false + + // first we empty all prior schema. deleted tables will not show up in the result, + // so this is the only chance to delete + for _, tbl := range tablesUpdated { + t.tables.delete(th.Target.Keyspace, tbl) } - bv := map[string]*querypb.BindVariable{"tableNames": tables} - res, err := th.Conn.Execute(t.ctx, th.Target, mysql.FetchUpdatedTables, bv, 0, 0, nil) + err := th.Conn.GetSchema(t.ctx, th.Target, querypb.SchemaTableType_TABLES, tablesUpdated, func(schemaRes *querypb.GetSchemaResponse) error { + t.updateTables(th.Target.Keyspace, schemaRes.TableDefinition) + return nil + }) if err != nil { t.tracked[th.Target.Keyspace].setLoaded(false) // TODO: optimize for the tables that got errored out. log.Warningf("error fetching new schema for %v, making them non-authoritative: %v", tablesUpdated, err) - code := vterrors.Code(err) - if code == vtrpcpb.Code_UNAUTHENTICATED || code == vtrpcpb.Code_PERMISSION_DENIED { - log.Warning(aclErrorMessageLog) - } return false } + return true +} - t.mu.Lock() - defer t.mu.Unlock() +func (t *Tracker) updateTables(keyspace string, res map[string]string) { + for tableName, tableDef := range res { + stmt, err := sqlparser.Parse(tableDef) + if err != nil { + log.Warningf("error parsing table definition for %s: %v", tableName, err) + continue + } + ddl, ok := stmt.(*sqlparser.CreateTable) + if !ok { + log.Warningf("parsed table definition for '%s' is not a create table definition", tableName) + continue + } - // first we empty all prior schema. deleted tables will not show up in the result, - // so this is the only chance to delete - for _, tbl := range tablesUpdated { - t.tables.delete(th.Target.Keyspace, tbl) + cols := getColumns(ddl.TableSpec) + fks := getForeignKeys(ddl.TableSpec) + t.tables.set(keyspace, tableName, cols, fks) } - t.updateTables(th.Target.Keyspace, res) - return true } -func (t *Tracker) updateTables(keyspace string, res *sqltypes.Result) { - for _, row := range res.Rows { - tbl := row[0].ToString() - colName := row[1].ToString() - colType := row[2].ToString() - collation := row[3].ToString() +func getColumns(tblSpec *sqlparser.TableSpec) []vindexes.Column { + tblCollation := getTableCollation(tblSpec) + cols := make([]vindexes.Column, 0, len(tblSpec.Columns)) + for _, column := range tblSpec.Columns { + colCollation := getColumnCollation(tblCollation, column) + cols = append(cols, + vindexes.Column{ + Name: column.Name, + Type: column.Type.SQLType(), + CollationName: colCollation, + }) + } + return cols +} - cType := sqlparser.ColumnType{Type: colType} - col := vindexes.Column{Name: sqlparser.NewIdentifierCI(colName), Type: cType.SQLType(), CollationName: collation} - cols := t.tables.get(keyspace, tbl) +func getForeignKeys(tblSpec *sqlparser.TableSpec) []*sqlparser.ForeignKeyDefinition { + if tblSpec.Constraints == nil { + return nil + } + var fks []*sqlparser.ForeignKeyDefinition + for _, constraint := range tblSpec.Constraints { + fkDef, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition) + if !ok { + continue + } + fks = append(fks, fkDef) + } + return fks +} - t.tables.set(keyspace, tbl, append(cols, col)) +func getTableCollation(tblSpec *sqlparser.TableSpec) string { + if tblSpec.Options == nil { + return "" + } + collate := sqlparser.KeywordString(sqlparser.COLLATE) + for _, option := range tblSpec.Options { + if strings.EqualFold(option.Name, collate) { + return option.String + } } + return "" +} + +func getColumnCollation(defaultCollation string, column *sqlparser.ColumnDefinition) string { + if column.Type.Options == nil || column.Type.Options.Collate == "" { + return defaultCollation + } + return column.Type.Options.Collate } func (t *Tracker) updatedViewSchema(th *discovery.TabletHealth) bool { @@ -353,22 +399,22 @@ func (t *Tracker) AddNewKeyspace(conn queryservice.QueryService, target *querypb } type tableMap struct { - m map[keyspaceStr]map[tableNameStr][]vindexes.Column + m map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo } -func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column) { +func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column, fks []*sqlparser.ForeignKeyDefinition) { m := tm.m[ks] if m == nil { - m = make(map[tableNameStr][]vindexes.Column) + m = make(map[tableNameStr]*vindexes.TableInfo) tm.m[ks] = m } - m[tbl] = cols + m[tbl] = &vindexes.TableInfo{Columns: cols, ForeignKeys: fks} } -func (tm *tableMap) get(ks, tbl string) []vindexes.Column { +func (tm *tableMap) get(ks, tbl string) *vindexes.TableInfo { m := tm.m[ks] if m == nil { - return nil + return &vindexes.TableInfo{} } return m[tbl] } diff --git a/go/vt/vtgate/schema/tracker_test.go b/go/vt/vtgate/schema/tracker_test.go index d515c6b6bdc..4f514fec101 100644 --- a/go/vt/vtgate/schema/tracker_test.go +++ b/go/vt/vtgate/schema/tracker_test.go @@ -17,190 +17,61 @@ limitations under the License. package schema import ( - "fmt" + "context" + "os" "sync" "testing" "time" - "vitess.io/vitess/go/mysql" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) -func TestTracking(t *testing.T) { - target := &querypb.Target{ - Keyspace: "ks", - Shard: "-80", - TabletType: topodatapb.TabletType_PRIMARY, - Cell: "aa", - } - tablet := &topodatapb.Tablet{ - Keyspace: target.Keyspace, - Shard: target.Shard, - Type: target.TabletType, - } - fields := sqltypes.MakeTestFields( - "table_name|col_name|col_type|collation_name", - "varchar|varchar|varchar|varchar", - ) - - type delta struct { - result *sqltypes.Result - updTbl []string - } - var ( - d0 = delta{ - result: sqltypes.MakeTestResult( - fields, - "prior|id|int|", - ), - updTbl: []string{"prior"}, - } - - d1 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t1|id|int|", - "t1|name|varchar|utf8_bin", - "t2|id|varchar|utf8_bin", - ), - updTbl: []string{"t1", "t2"}, - } - - d2 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t2|id|varchar|utf8_bin", - "t2|name|varchar|utf8_bin", - "t3|id|datetime|", - ), - updTbl: []string{"prior", "t1", "t2", "t3"}, - } - - d3 = delta{ - result: sqltypes.MakeTestResult( - fields, - "t4|name|varchar|utf8_bin", - ), - updTbl: []string{"t4"}, - } - ) - - testcases := []struct { - tName string - deltas []delta - exp map[string][]vindexes.Column - }{{ - tName: "new tables", - deltas: []delta{d0, d1}, - exp: map[string][]vindexes.Column{ - "t1": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "prior": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, - }, - }, { - tName: "delete t1 and prior, updated t2 and new t3", - deltas: []delta{d0, d1, d2}, - exp: map[string][]vindexes.Column{ - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t3": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, - }, - }, { - tName: "new t4", - deltas: []delta{d0, d1, d2, d3}, - exp: map[string][]vindexes.Column{ - "t2": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - "t3": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, - "t4": { - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "utf8_bin"}}, - }, - }, - } - for i, tcase := range testcases { - t.Run(fmt.Sprintf("%d - %s", i, tcase.tName), func(t *testing.T) { - sbc := sandboxconn.NewSandboxConn(tablet) - ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", false) - tracker.consumeDelay = 1 * time.Millisecond - tracker.Start() - defer tracker.Stop() - - results := []*sqltypes.Result{{}} - for _, d := range tcase.deltas { - for _, deltaRow := range d.result.Rows { - same := false - for _, row := range results[0].Rows { - if row[0].String() == deltaRow[0].String() && row[1].String() == deltaRow[1].String() { - same = true - break - } - } - if same == false { - results[0].Rows = append(results[0].Rows, deltaRow) - } - } - } - - sbc.SetResults(results) - sbc.Queries = nil - - wg := sync.WaitGroup{} - wg.Add(1) - tracker.RegisterSignalReceiver(func() { - wg.Done() - }) - - for _, d := range tcase.deltas { - ch <- &discovery.TabletHealth{ - Conn: sbc, - Tablet: tablet, - Target: target, - Serving: true, - Stats: &querypb.RealtimeStats{TableSchemaChanged: d.updTbl}, - } - } - - require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal") - - require.Equal(t, 1, len(sbc.StringQueries())) - - _, keyspacePresent := tracker.tracked[target.Keyspace] - require.Equal(t, true, keyspacePresent) +var ( + keyspace = "ks" + cell = "aa" +) - for k, v := range tcase.exp { - utils.MustMatch(t, v, tracker.GetColumns("ks", k), "mismatch for table: ", k) +func TestMain(m *testing.M) { + exitCode := func() int { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, cell) + ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}) + _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { + ki, err := ts.GetKeyspace(ctx, keyspace) + if err != nil { + return "", err } + return ki.SidecarDbName, nil }) - } + if !created { + log.Error("Failed to create a new sidecar database identifier cache as one already existed!") + return 1 + } + return m.Run() + }() + os.Exit(exitCode) } +// TestTrackingUnHealthyTablet tests that the tracker is sending GetSchema calls only when the tablet is healthy. func TestTrackingUnHealthyTablet(t *testing.T) { target := &querypb.Target{ - Keyspace: "ks", + Keyspace: keyspace, Shard: "-80", TabletType: topodatapb.TabletType_PRIMARY, - Cell: "aa", + Cell: cell, } tablet := &topodatapb.Tablet{ Keyspace: target.Keyspace, @@ -210,7 +81,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { sbc := sandboxconn.NewSandboxConn(tablet) ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", false) + tracker := NewTracker(ch, false) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -247,7 +118,6 @@ func TestTrackingUnHealthyTablet(t *testing.T) { }, } - sbc.SetResults([]*sqltypes.Result{{}, {}, {}}) for _, tcase := range tcases { ch <- &discovery.TabletHealth{ Conn: sbc, @@ -260,23 +130,10 @@ func TestTrackingUnHealthyTablet(t *testing.T) { } require.False(t, waitTimeout(&wg, 5*time.Second), "schema was updated but received no signal") - require.Equal(t, []string{mysql.FetchTables, mysql.FetchUpdatedTables, mysql.FetchTables}, sbc.StringQueries()) -} - -func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - select { - case <-c: - return false // completed normally - case <-time.After(timeout): - return true // timed out - } + require.EqualValues(t, 3, sbc.GetSchemaCount.Load()) } +// TestTrackerGetKeyspaceUpdateController tests table update controller initialization. func TestTrackerGetKeyspaceUpdateController(t *testing.T) { ks3 := &updateController{} tracker := Tracker{ @@ -309,15 +166,66 @@ func TestTrackerGetKeyspaceUpdateController(t *testing.T) { assert.Nil(t, ks3.reloadKeyspace, "ks3 already initialized") } +// TestTableTracking tests that the tracker is able to track table schema changes. +func TestTableTracking(t *testing.T) { + schemaDefResult := []map[string]string{{ + "prior": "create table prior(id int primary key)", + }, { + // initial load of view - kept empty + }, { + "t1": "create table t1(id bigint primary key, name varchar(50))", + "t2": "create table t2(id varchar(50) primary key)", + }, { + "t2": "create table t2(id varchar(50) primary key, name varchar(50))", + "t3": "create table t3(id datetime primary key)", + }, { + "t4": "create table t4(name varchar(50) primary key)", + }} + + testcases := []testCases{{ + testName: "initial table load", + expTbl: map[string][]vindexes.Column{ + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, + }, + }, { + testName: "new tables", + updTbl: []string{"t1", "t2"}, + expTbl: map[string][]vindexes.Column{ + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}}, + }, + }, { + testName: "delete prior, updated t2 and new t3", + updTbl: []string{"prior", "t2", "t3"}, + expTbl: map[string][]vindexes.Column{ + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, + }, + }, { + testName: "new t4", + updTbl: []string{"t4"}, + expTbl: map[string][]vindexes.Column{ + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, + "t4": {{Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + }, + }} + + testTracker(t, schemaDefResult, testcases) +} + // TestViewsTracking tests that the tracker is able to track views. func TestViewsTracking(t *testing.T) { - target := &querypb.Target{Cell: "aa", Keyspace: "ks", Shard: "-80", TabletType: topodatapb.TabletType_PRIMARY} - tablet := &topodatapb.Tablet{Keyspace: target.Keyspace, Shard: target.Shard, Type: target.TabletType} - schemaDefResult := []map[string]string{{ + // initial load of table - kept empty + }, { "prior": "create view prior as select 1 from tbl", - "t1": "create view t1 as select 1 from tbl1", - "t2": "create view t2 as select 1 from tbl2", + }, { + "t1": "create view t1 as select 1 from tbl1", + "t2": "create view t2 as select 1 from tbl2", }, { "t2": "create view t2 as select 1,2 from tbl2", "t3": "create view t3 as select 1 from tbl3", @@ -325,37 +233,109 @@ func TestViewsTracking(t *testing.T) { "t4": "create view t4 as select 1 from tbl4", }} - testcases := []struct { - testName string - updView []string - exp map[string]string - }{{ - testName: "new views", - updView: []string{"prior", "t1", "t2"}, - exp: map[string]string{ + testcases := []testCases{{ + testName: "initial view load", + expView: map[string]string{ + "prior": "select 1 from tbl"}, + }, { + testName: "new view t1, t2", + updView: []string{"t1", "t2"}, + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1 from tbl2", "prior": "select 1 from tbl"}, }, { testName: "delete prior, updated t2 and new t3", updView: []string{"prior", "t2", "t3"}, - exp: map[string]string{ + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1, 2 from tbl2", "t3": "select 1 from tbl3"}, }, { testName: "new t4", updView: []string{"t4"}, - exp: map[string]string{ + expView: map[string]string{ "t1": "select 1 from tbl1", "t2": "select 1, 2 from tbl2", "t3": "select 1 from tbl3", "t4": "select 1 from tbl4"}, }} + testTracker(t, schemaDefResult, testcases) +} + +// TestTableInfoRetrieval tests that the tracker is able to retrieve required information from ddl statement. +func TestTableInfoRetrieval(t *testing.T) { + schemaDefResult := []map[string]string{{ + "my_tbl": "CREATE TABLE `my_tbl` (" + + "`id` bigint NOT NULL AUTO_INCREMENT," + + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + + "`email` varbinary(100) DEFAULT NULL," + + "PRIMARY KEY (`id`)," + + "KEY `id` (`id`,`name`)) " + + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }, { + // initial load of view - kept empty + }, { + "my_child_tbl": "CREATE TABLE `my_child_tbl` (" + + "`id` bigint NOT NULL AUTO_INCREMENT," + + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + + "`code` varchar(6) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL," + + "`my_id` bigint DEFAULT NULL," + + "PRIMARY KEY (`id`)," + + "KEY `my_id` (`my_id`,`name`)," + + "CONSTRAINT `my_child_tbl_ibfk_1` FOREIGN KEY (`my_id`, `name`) REFERENCES `my_tbl` (`id`, `name`) ON DELETE CASCADE) " + + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }} + + testcases := []testCases{{ + testName: "initial table load", + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + }, + }, + }, { + testName: "new tables", + updTbl: []string{"my_child_tbl"}, + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + }, + "my_child_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, + {Name: sqlparser.NewIdentifierCI("code"), Type: querypb.Type_VARCHAR, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("my_id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + }, + }, + expFk: map[string]string{ + "my_tbl": "", + "my_child_tbl": "foreign key (my_id, `name`) references my_tbl (id, `name`) on delete cascade", + }, + }} + + testTracker(t, schemaDefResult, testcases) +} + +type testCases struct { + testName string + + updTbl []string + expTbl map[string][]vindexes.Column + expFk map[string]string + + updView []string + expView map[string]string +} + +func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []testCases) { ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, "", true) - tracker.tables = nil // making tables map nil - so load keyspace does not try to load the tables information. + tracker := NewTracker(ch, true) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -365,10 +345,13 @@ func TestViewsTracking(t *testing.T) { wg.Done() }) + target := &querypb.Target{Cell: cell, Keyspace: keyspace, Shard: "-80", TabletType: topodatapb.TabletType_PRIMARY} + tablet := &topodatapb.Tablet{Keyspace: target.Keyspace, Shard: target.Shard, Type: target.TabletType} + sbc := sandboxconn.NewSandboxConn(tablet) sbc.SetSchemaResult(schemaDefResult) - for count, tcase := range testcases { + for count, tcase := range tcases { t.Run(tcase.testName, func(t *testing.T) { wg.Add(1) ch <- &discovery.TabletHealth{ @@ -376,18 +359,42 @@ func TestViewsTracking(t *testing.T) { Tablet: tablet, Target: target, Serving: true, - Stats: &querypb.RealtimeStats{ViewSchemaChanged: tcase.updView}, + Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updTbl, ViewSchemaChanged: tcase.updView}, } require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal") - require.EqualValues(t, count+1, sbc.GetSchemaCount.Load()) + require.EqualValues(t, count+2, sbc.GetSchemaCount.Load()) _, keyspacePresent := tracker.tracked[target.Keyspace] require.Equal(t, true, keyspacePresent) - for k, v := range tcase.exp { - utils.MustMatch(t, v, sqlparser.String(tracker.GetViews("ks", k)), "mismatch for table: ", k) + for k, v := range tcase.expTbl { + utils.MustMatch(t, v, tracker.GetColumns(keyspace, k), "mismatch columns for table: ", k) + if len(tcase.expFk[k]) > 0 { + fks := tracker.GetForeignKeys(keyspace, k) + for _, fk := range fks { + utils.MustMatch(t, tcase.expFk[k], sqlparser.String(fk), "mismatch foreign keys for table: ", k) + } + } + } + + for k, v := range tcase.expView { + utils.MustMatch(t, v, sqlparser.String(tracker.GetViews(keyspace, k)), "mismatch for view: ", k) } }) } } + +func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool { + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + select { + case <-c: + return false // completed normally + case <-time.After(timeout): + return true // timed out + } +} diff --git a/go/vt/vtgate/schema/update_controller.go b/go/vt/vtgate/schema/update_controller.go index 0d595a0897d..f68a9448d55 100644 --- a/go/vt/vtgate/schema/update_controller.go +++ b/go/vt/vtgate/schema/update_controller.go @@ -20,7 +20,7 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -84,8 +84,8 @@ func (u *updateController) consume() { // checkIfWeShouldIgnoreKeyspace inspects an error and // will mark a keyspace as failed and won't try to load more information from it func checkIfWeShouldIgnoreKeyspace(err error) bool { - sqlErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - if sqlErr.Num == mysql.ERBadDb || sqlErr.Num == mysql.ERNoSuchTable { + sqlErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) + if sqlErr.Num == sqlerror.ERBadDb || sqlErr.Num == sqlerror.ERNoSuchTable { // if we are missing the db or table, no point in retrying return true } diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index 1375e3914ab..6955f4bafcd 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -18,10 +18,8 @@ package semantics import ( "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" ) // analyzer controls the flow of the analysis. @@ -80,19 +78,39 @@ func Analyze(statement sqlparser.Statement, currentDb string, si SchemaInformati return semTable, nil } +// AnalyzeStrict analyzes the parsed query, and fails the analysis for any possible errors +func AnalyzeStrict(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) { + st, err := Analyze(statement, currentDb, si) + if err != nil { + return nil, err + } + + if st.NotUnshardedErr != nil { + return nil, st.NotUnshardedErr + } + if st.NotSingleRouteErr != nil { + return nil, st.NotSingleRouteErr + } + + return st, nil +} + func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID) *SemTable { var comments *sqlparser.ParsedComments commentedStmt, isCommented := statement.(sqlparser.Commented) if isCommented { comments = commentedStmt.GetParsedComments() } + columns := map[*sqlparser.Union]sqlparser.SelectExprs{} + for union, info := range a.tables.unionInfo { + columns[union] = info.exprs + } return &SemTable{ Recursive: a.binder.recursive, Direct: a.binder.direct, ExprTypes: a.typer.exprTypes, Tables: a.tables.Tables, - selectScope: a.scoper.rScope, NotSingleRouteErr: a.projErr, NotUnshardedErr: a.unshardedErr, Warning: a.warning, @@ -102,6 +120,7 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID ColumnEqualities: map[columnName][]sqlparser.Expr{}, Collation: coll, ExpandedColumns: a.rewriter.expandedColumns, + columns: columns, } } @@ -109,7 +128,7 @@ func (a *analyzer) setError(err error) { switch err := err.(type) { case ProjError: a.projErr = err.Inner - case UnshardedError: + case ShardedError: a.unshardedErr = err.Inner default: if a.inProjection > 0 && vterrors.ErrState(err) == vterrors.NonUniqError { @@ -154,11 +173,6 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { return false } - if err := a.binder.up(cursor); err != nil { - a.setError(err) - return true - } - if err := a.scoper.up(cursor); err != nil { a.setError(err) return false @@ -167,11 +181,22 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { a.setError(err) return false } + + if err := a.binder.up(cursor); err != nil { + a.setError(err) + return true + } + if err := a.typer.up(cursor); err != nil { a.setError(err) return false } + if err := a.rewriter.up(cursor); err != nil { + a.setError(err) + return true + } + a.leaveProjection(cursor) return a.shouldContinue() } @@ -193,15 +218,14 @@ func checkUnionColumns(union *sqlparser.Union) error { // we'll fail it at run time instead return nil } - count := len(firstProj) secondProj := sqlparser.GetFirstSelect(union.Right).SelectExprs if containsStar(secondProj) { return nil } - if len(secondProj) != count { - return NewError(UnionColumnsDoNotMatch) + if len(secondProj) != len(firstProj) { + return &UnionColumnsDoNotMatchError{FirstProj: len(firstProj), SecondProj: len(secondProj)} } return nil @@ -256,89 +280,6 @@ func (a *analyzer) analyze(statement sqlparser.Statement) error { return a.err } -func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { - switch node := cursor.Node().(type) { - case *sqlparser.Update: - if len(node.TableExprs) != 1 { - return UnshardedError{Inner: NewError(UnsupportedMultiTablesInUpdate)} - } - alias, isAlias := node.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAlias { - return UnshardedError{Inner: NewError(UnsupportedMultiTablesInUpdate)} - } - _, isDerived := alias.Expr.(*sqlparser.DerivedTable) - if isDerived { - return NewError(TableNotUpdatable, alias.As.String()) - } - case *sqlparser.Select: - parent := cursor.Parent() - if _, isUnion := parent.(*sqlparser.Union); isUnion && node.SQLCalcFoundRows { - return NewError(UnionWithSQLCalcFoundRows) - } - if _, isRoot := parent.(*sqlparser.RootNode); !isRoot && node.SQLCalcFoundRows { - return NewError(SQLCalcFoundRowsUsage) - } - errMsg := "INTO" - nextVal := false - if len(node.SelectExprs) == 1 { - if _, isNextVal := node.SelectExprs[0].(*sqlparser.Nextval); isNextVal { - nextVal = true - errMsg = "NEXT" - } - } - if !nextVal && node.Into == nil { - return nil - } - if a.scoper.currentScope().parent != nil { - return NewError(CantUseOptionHere, errMsg) - } - case *sqlparser.Nextval: - currScope := a.scoper.currentScope() - if currScope.parent != nil { - return NewError(CantUseOptionHere, "Incorrect usage/placement of 'INTO'") - } - if len(currScope.tables) != 1 { - return NewError(NextWithMultipleTables) - } - vindexTbl := currScope.tables[0].GetVindexTable() - if vindexTbl == nil { - return NewError(MissingInVSchema) - } - if vindexTbl.Type != vindexes.TypeSequence { - return NewError(NotSequenceTable) - } - case *sqlparser.JoinTableExpr: - if node.Join == sqlparser.NaturalJoinType || node.Join == sqlparser.NaturalRightJoinType || node.Join == sqlparser.NaturalLeftJoinType { - return NewError(UnsupportedNaturalJoin, node.Join.ToString()) - } - case *sqlparser.LockingFunc: - return NewError(LockOnlyWithDual, node) - case *sqlparser.Union: - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node := node.(type) { - case *sqlparser.ColName: - if !node.Qualifier.IsEmpty() { - return false, NewError(QualifiedOrderInUnion, node.Qualifier.Name) - } - case *sqlparser.Subquery: - return false, nil - } - return true, nil - }, node.OrderBy) - if err != nil { - return err - } - err = checkUnionColumns(node) - if err != nil { - return err - } - case *sqlparser.JSONTableExpr: - return NewError(JSONTables) - } - - return nil -} - func (a *analyzer) shouldContinue() bool { return a.err == nil } @@ -357,12 +298,12 @@ func (p ProjError) Error() string { return p.Inner.Error() } -// UnshardedError is used to mark an error as something that should only be returned +// ShardedError is used to mark an error as something that should only be returned // if the query is not unsharded -type UnshardedError struct { +type ShardedError struct { Inner error } -func (p UnshardedError) Error() string { +func (p ShardedError) Error() string { return p.Inner.Error() } diff --git a/go/vt/vtgate/semantics/analyzer_update_test.go b/go/vt/vtgate/semantics/analyzer_dml_test.go similarity index 92% rename from go/vt/vtgate/semantics/analyzer_update_test.go rename to go/vt/vtgate/semantics/analyzer_dml_test.go index bb72a21811e..7c87066dced 100644 --- a/go/vt/vtgate/semantics/analyzer_update_test.go +++ b/go/vt/vtgate/semantics/analyzer_dml_test.go @@ -45,8 +45,8 @@ func TestUpdBindingColName(t *testing.T) { updExpr := extractFromUpdateSet(upd, 0) recursiveDeps := semTable.RecursiveDeps(updExpr.Name) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(updExpr.Name), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(updExpr.Name), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") recursiveDeps = semTable.RecursiveDeps(updExpr.Expr) @@ -73,13 +73,13 @@ func TestUpdBindingExpr(t *testing.T) { updExpr := extractFromUpdateSet(upd, 0) recursiveDeps := semTable.RecursiveDeps(updExpr.Name) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(updExpr.Name), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(updExpr.Name), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") recursiveDeps = semTable.RecursiveDeps(updExpr.Expr) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(updExpr.Expr), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(updExpr.Expr), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") }) } @@ -102,8 +102,8 @@ func TestUpdSetSubquery(t *testing.T) { updExpr := extractFromUpdateSet(upd, 0) recursiveDeps := semTable.RecursiveDeps(updExpr.Name) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(updExpr.Name), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(updExpr.Name), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") extractedSubqs := semTable.SubqueryMap[upd] diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index c88c295ac68..ec6c69960b0 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -20,13 +20,13 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/vt/vtgate/engine" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -35,11 +35,11 @@ var T0 TableSet var ( // Just here to make outputs more readable None = EmptyTableSet() - T1 = SingleTableSet(0) - T2 = SingleTableSet(1) - T3 = SingleTableSet(2) - T4 = SingleTableSet(3) - T5 = SingleTableSet(4) + TS0 = SingleTableSet(0) + TS1 = SingleTableSet(1) + TS2 = SingleTableSet(2) + TS3 = SingleTableSet(3) + TS4 = SingleTableSet(4) ) func extract(in *sqlparser.Select, idx int) sqlparser.Expr { @@ -68,8 +68,8 @@ func TestBindingSingleTablePositive(t *testing.T) { assert.Equal(t, SingleTableSet(0), ts) recursiveDeps := semTable.RecursiveDeps(extract(sel, 0)) - assert.Equal(t, T1, recursiveDeps, query) - assert.Equal(t, T1, semTable.DirectDeps(extract(sel, 0)), query) + assert.Equal(t, TS0, recursiveDeps, query) + assert.Equal(t, TS0, semTable.DirectDeps(extract(sel, 0)), query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") }) } @@ -105,7 +105,7 @@ func TestBindingSingleAliasedTablePositive(t *testing.T) { assert.Equal(t, SingleTableSet(0), ts) recursiveDeps := semTable.RecursiveDeps(extract(sel, 0)) - require.Equal(t, T1, recursiveDeps, query) + require.Equal(t, TS0, recursiveDeps, query) assert.Equal(t, 1, recursiveDeps.NumberOfTables(), "number of tables is wrong") }) } @@ -124,10 +124,10 @@ func TestBindingSingleTableNegative(t *testing.T) { t.Run(query, func(t *testing.T) { parse, err := sqlparser.Parse(query) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "d", &FakeSI{}) - require.Error(t, err) - require.Contains(t, err.Error(), "symbol") - require.Contains(t, err.Error(), "not found") + st, err := Analyze(parse, "d", &FakeSI{}) + require.NoError(t, err) + require.ErrorContains(t, st.NotUnshardedErr, "column") + require.ErrorContains(t, st.NotUnshardedErr, "not found") }) } } @@ -144,12 +144,13 @@ func TestBindingSingleAliasedTableNegative(t *testing.T) { t.Run(query, func(t *testing.T) { parse, err := sqlparser.Parse(query) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{ + st, err := Analyze(parse, "", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, }, }) - require.Error(t, err) + require.NoError(t, err) + require.Error(t, st.NotUnshardedErr) }) } } @@ -162,35 +163,35 @@ func TestBindingMultiTablePositive(t *testing.T) { } queries := []testCase{{ query: "select t.col from t, s", - deps: T1, + deps: TS0, numberOfTables: 1, }, { query: "select s.col from t join s", - deps: T2, + deps: TS1, numberOfTables: 1, }, { query: "select max(t.col+s.col) from t, s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(t.col+s.col) from t join s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select case t.col when s.col then r.col else u.col end from t, s, r, w, u", - deps: MergeTableSets(T1, T2, T3, T5), + deps: MergeTableSets(TS0, TS1, TS2, TS4), numberOfTables: 4, // }, { // TODO: move to subquery // make sure that we don't let sub-query dependencies leak out by mistake // query: "select t.col + (select 42 from s) from t", - // deps: T1, + // deps: TS0, // }, { // query: "select (select 42 from s where r.id = s.id) from r", - // deps: T1 | T2, + // deps: TS0 | TS1, }, { query: "select u1.a + u2.a from u1, u2", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }} for _, query := range queries { @@ -212,19 +213,19 @@ func TestBindingMultiAliasedTablePositive(t *testing.T) { } queries := []testCase{{ query: "select X.col from t as X, s as S", - deps: T1, + deps: TS0, numberOfTables: 1, }, { query: "select X.col+S.col from t as X, s as S", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(X.col+S.col) from t as X, s as S", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }, { query: "select max(X.col+s.col) from t as X, s", - deps: MergeTableSets(T1, T2), + deps: MergeTableSets(TS0, TS1), numberOfTables: 2, }} for _, query := range queries { @@ -249,7 +250,7 @@ func TestBindingMultiTableNegative(t *testing.T) { t.Run(query, func(t *testing.T) { parse, err := sqlparser.Parse(query) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "d", &FakeSI{ + _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ "tabl": {Name: sqlparser.NewIdentifierCS("tabl")}, "foo": {Name: sqlparser.NewIdentifierCS("foo")}, @@ -273,7 +274,7 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { t.Run(query, func(t *testing.T) { parse, err := sqlparser.Parse(query) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "d", &FakeSI{ + _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ "tabl": {Name: sqlparser.NewIdentifierCS("tabl")}, "foo": {Name: sqlparser.NewIdentifierCS("foo")}, @@ -295,7 +296,7 @@ func TestNotUniqueTableName(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { parse, _ := sqlparser.Parse(query) - _, err := Analyze(parse.(sqlparser.SelectStatement), "test", &FakeSI{}) + _, err := Analyze(parse, "test", &FakeSI{}) require.Error(t, err) require.Contains(t, err.Error(), "VT03013: not unique table/alias") }) @@ -310,22 +311,19 @@ func TestMissingTable(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { parse, _ := sqlparser.Parse(query) - _, err := Analyze(parse.(sqlparser.SelectStatement), "", &FakeSI{}) - require.Error(t, err) - require.Contains(t, err.Error(), "symbol t.col not found") + st, err := Analyze(parse, "", &FakeSI{}) + require.NoError(t, err) + require.ErrorContains(t, st.NotUnshardedErr, "column 't.col' not found") }) } } func TestUnknownColumnMap2(t *testing.T) { - varchar := querypb.Type_VARCHAR - int := querypb.Type_INT32 - authoritativeTblA := vindexes.Table{ Name: sqlparser.NewIdentifierCS("a"), Columns: []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("col2"), - Type: varchar, + Type: sqltypes.VarChar, }}, ColumnListAuthoritative: true, } @@ -333,7 +331,7 @@ func TestUnknownColumnMap2(t *testing.T) { Name: sqlparser.NewIdentifierCS("b"), Columns: []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("col"), - Type: varchar, + Type: sqltypes.VarChar, }}, ColumnListAuthoritative: true, } @@ -345,7 +343,7 @@ func TestUnknownColumnMap2(t *testing.T) { Name: sqlparser.NewIdentifierCS("a"), Columns: []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("col"), - Type: int, + Type: sqltypes.Int64, }}, ColumnListAuthoritative: true, } @@ -353,7 +351,7 @@ func TestUnknownColumnMap2(t *testing.T) { Name: sqlparser.NewIdentifierCS("b"), Columns: []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("col"), - Type: int, + Type: sqltypes.Int64, }}, ColumnListAuthoritative: true, } @@ -362,7 +360,7 @@ func TestUnknownColumnMap2(t *testing.T) { name string schema map[string]*vindexes.Table err bool - typ *querypb.Type + typ querypb.Type }{{ name: "no info about tables", schema: map[string]*vindexes.Table{"a": {}, "b": {}}, @@ -375,22 +373,22 @@ func TestUnknownColumnMap2(t *testing.T) { name: "non authoritative columns - one authoritative and one not", schema: map[string]*vindexes.Table{"a": &nonAuthoritativeTblA, "b": &authoritativeTblB}, err: false, - typ: &varchar, + typ: sqltypes.VarChar, }, { name: "non authoritative columns - one authoritative and one not", schema: map[string]*vindexes.Table{"a": &authoritativeTblA, "b": &nonAuthoritativeTblB}, err: false, - typ: &varchar, + typ: sqltypes.VarChar, }, { name: "authoritative columns", schema: map[string]*vindexes.Table{"a": &authoritativeTblA, "b": &authoritativeTblB}, err: false, - typ: &varchar, + typ: sqltypes.VarChar, }, { name: "authoritative columns", schema: map[string]*vindexes.Table{"a": &authoritativeTblA, "b": &authoritativeTblBWithInt}, err: false, - typ: &int, + typ: sqltypes.Int64, }, { name: "authoritative columns with overlap", schema: map[string]*vindexes.Table{"a": &authoritativeTblAWithConflict, "b": &authoritativeTblB}, @@ -406,13 +404,14 @@ func TestUnknownColumnMap2(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { si := &FakeSI{Tables: test.schema} - tbl, err := Analyze(parse.(sqlparser.SelectStatement), "", si) + tbl, err := Analyze(parse, "", si) if test.err { require.True(t, err != nil || tbl.NotSingleRouteErr != nil) } else { require.NoError(t, err) require.NoError(t, tbl.NotSingleRouteErr) - typ := tbl.TypeFor(expr) + typ, _, found := tbl.TypeForExpr(expr) + assert.True(t, found) assert.Equal(t, test.typ, typ) } }) @@ -446,7 +445,7 @@ func TestUnknownPredicate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { si := &FakeSI{Tables: test.schema} - _, err := Analyze(parse.(sqlparser.SelectStatement), "", si) + _, err := Analyze(parse, "", si) if test.err { require.Error(t, err) } else { @@ -463,23 +462,20 @@ func TestScoping(t *testing.T) { }{ { query: "select 1 from u1, u2 left join u3 on u1.a = u2.a", - errorMessage: "symbol u1.a not found", + errorMessage: "column 'u1.a' not found", }, } for _, query := range queries { t.Run(query.query, func(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{ + st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, }, }) - if query.errorMessage == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, query.errorMessage) - } + require.NoError(t, err) + require.EqualError(t, st.NotUnshardedErr, query.errorMessage) }) } } @@ -491,13 +487,13 @@ func TestScopeForSubqueries(t *testing.T) { }{ { sql: `select t.col1, (select t.col2 from z as t) from x as t`, - deps: T2, + deps: TS1, }, { sql: `select t.col1, (select t.col2 from z) from x as t`, - deps: T1, + deps: TS0, }, { sql: `select t.col1, (select (select z.col2 from y) from z) from x as t`, - deps: T2, + deps: TS1, }, { sql: `select t.col1, (select (select y.col2 from y) from z) from x as t`, deps: None, @@ -506,7 +502,7 @@ func TestScopeForSubqueries(t *testing.T) { deps: None, }, { sql: `select t.col1, (select id from t) from x as t`, - deps: T2, + deps: TS1, }, } for _, tc := range tcases { @@ -528,32 +524,32 @@ func TestScopeForSubqueries(t *testing.T) { func TestSubqueriesMappingWhereClause(t *testing.T) { tcs := []struct { sql string - opCode engine.PulloutOpcode + opCode opcode.PulloutOpcode otherSideName string }{ { sql: "select id from t1 where id in (select uid from t2)", - opCode: engine.PulloutIn, + opCode: opcode.PulloutIn, otherSideName: "id", }, { sql: "select id from t1 where id not in (select uid from t2)", - opCode: engine.PulloutNotIn, + opCode: opcode.PulloutNotIn, otherSideName: "id", }, { sql: "select id from t where col1 = (select uid from t2 order by uid desc limit 1)", - opCode: engine.PulloutValue, + opCode: opcode.PulloutValue, otherSideName: "col1", }, { sql: "select id from t where exists (select uid from t2 where uid = 42)", - opCode: engine.PulloutExists, + opCode: opcode.PulloutExists, otherSideName: "", }, { sql: "select id from t where col1 >= (select uid from t2 where uid = 42)", - opCode: engine.PulloutValue, + opCode: opcode.PulloutValue, otherSideName: "col1", }, } @@ -608,7 +604,7 @@ func TestSubqueriesMappingSelectExprs(t *testing.T) { extractedSubq := semTable.SubqueryRef[subq] assert.True(t, sqlparser.Equals.Expr(extractedSubq.Subquery, subq)) assert.True(t, sqlparser.Equals.Expr(extractedSubq.Original, subq)) - assert.EqualValues(t, engine.PulloutValue, extractedSubq.OpCode) + assert.EqualValues(t, opcode.PulloutValue, extractedSubq.OpCode) }) } } @@ -619,22 +615,22 @@ func TestSubqueryOrderByBinding(t *testing.T) { expected TableSet }{{ query: "select * from user u where exists (select * from user order by col)", - expected: T2, + expected: TS1, }, { query: "select * from user u where exists (select * from user order by user.col)", - expected: T2, + expected: TS1, }, { query: "select * from user u where exists (select * from user order by u.col)", - expected: T1, + expected: TS0, }, { query: "select * from dbName.user as u where exists (select * from dbName.user order by u.col)", - expected: T1, + expected: TS0, }, { query: "select * from dbName.user where exists (select * from otherDb.user order by dbName.user.col)", - expected: T1, + expected: TS0, }, { query: "select id from dbName.t1 where exists (select * from dbName.t2 order by dbName.t1.id)", - expected: T1, + expected: TS0, }} for _, tc := range queries { @@ -659,52 +655,52 @@ func TestOrderByBindingTable(t *testing.T) { deps TableSet }{{ "select col from tabl order by col", - T1, + TS0, }, { "select tabl.col from d.tabl order by col", - T1, + TS0, }, { "select d.tabl.col from d.tabl order by col", - T1, + TS0, }, { "select col from tabl order by tabl.col", - T1, + TS0, }, { "select col from tabl order by d.tabl.col", - T1, + TS0, }, { "select col from tabl order by 1", - T1, + TS0, }, { "select col as c from tabl order by c", - T1, + TS0, }, { "select 1 as c from tabl order by c", T0, }, { "select name, name from t1, t2 order by name", - T2, + TS1, }, { "(select id from t1) union (select uid from t2) order by id", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select id from t1 union (select uid from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select id from t1 union select uid from t2 union (select name from t) order by 1", - MergeTableSets(T1, T2, T3), + MergeTableSets(TS0, TS1, TS2), }, { "select a.id from t1 as a union (select uid from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select b.id as a from t1 as b union (select uid as c from t2) order by 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select a.id from t1 as a union (select uid from t2, t union (select name from t) order by 1) order by 1", - MergeTableSets(T1, T2, T4), + MergeTableSets(TS0, TS1, TS3), }, { "select a.id from t1 as a union (select uid from t2, t union (select name from t) order by 1) order by id", - MergeTableSets(T1, T2, T4), + MergeTableSets(TS0, TS1, TS3), }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -731,49 +727,49 @@ func TestGroupByBinding(t *testing.T) { deps TableSet }{{ "select col from tabl group by col", - T1, + TS0, }, { "select col from tabl group by tabl.col", - T1, + TS0, }, { "select col from tabl group by d.tabl.col", - T1, + TS0, }, { "select tabl.col as x from tabl group by x", - T1, + TS0, }, { "select tabl.col as x from tabl group by col", - T1, + TS0, }, { "select d.tabl.col as x from tabl group by x", - T1, + TS0, }, { "select d.tabl.col as x from tabl group by col", - T1, + TS0, }, { "select col from tabl group by 1", - T1, + TS0, }, { "select col as c from tabl group by c", - T1, + TS0, }, { "select 1 as c from tabl group by c", T0, }, { "select t1.id from t1, t2 group by id", - T1, + TS0, }, { "select id from t, t1 group by id", - T2, + TS1, }, { "select id from t, t1 group by id", - T2, + TS1, }, { "select a.id from t as a, t1 group by id", - T1, + TS0, }, { "select a.id from t, t1 as a group by id", - T2, + TS1, }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -792,43 +788,43 @@ func TestHavingBinding(t *testing.T) { deps TableSet }{{ "select col from tabl having col = 1", - T1, + TS0, }, { "select col from tabl having tabl.col = 1", - T1, + TS0, }, { "select col from tabl having d.tabl.col = 1", - T1, + TS0, }, { "select tabl.col as x from tabl having x = 1", - T1, + TS0, }, { "select tabl.col as x from tabl having col", - T1, + TS0, }, { "select col from tabl having 1 = 1", T0, }, { "select col as c from tabl having c = 1", - T1, + TS0, }, { "select 1 as c from tabl having c = 1", T0, }, { "select t1.id from t1, t2 having id = 1", - T1, + TS0, }, { "select t.id from t, t1 having id = 1", - T1, + TS0, }, { "select t.id, count(*) as a from t, t1 group by t.id having a = 1", - MergeTableSets(T1, T2), + MergeTableSets(TS0, TS1), }, { "select t.id, sum(t2.name) as a from t, t2 group by t.id having a = 1", - T2, + TS1, }, { sql: "select u2.a, u1.a from u1, u2 having u2.a = 2", - deps: T2, + deps: TS1, }} for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { @@ -858,8 +854,8 @@ func TestUnionCheckFirstAndLastSelectsDeps(t *testing.T) { d1 := semTable.RecursiveDeps(extract(sel1, 0)) d2 := semTable.RecursiveDeps(extract(sel2, 0)) - assert.Equal(t, T1, d1) - assert.Equal(t, T2, d2) + assert.Equal(t, TS0, d1) + assert.Equal(t, TS1, d2) } func TestUnionOrderByRewrite(t *testing.T) { @@ -871,44 +867,75 @@ func TestUnionOrderByRewrite(t *testing.T) { func TestInvalidQueries(t *testing.T) { tcases := []struct { - sql string - err string + sql string + serr string + err error + notUnshardedErr string }{{ sql: "select t1.id, t1.col1 from t1 union select t2.uid from t2", - err: "The used SELECT statements have a different number of columns", + err: &UnionColumnsDoNotMatchError{FirstProj: 2, SecondProj: 1}, }, { sql: "select t1.id from t1 union select t2.uid, t2.price from t2", - err: "The used SELECT statements have a different number of columns", + err: &UnionColumnsDoNotMatchError{FirstProj: 1, SecondProj: 2}, }, { sql: "select t1.id from t1 union select t2.uid, t2.price from t2", - err: "The used SELECT statements have a different number of columns", + err: &UnionColumnsDoNotMatchError{FirstProj: 1, SecondProj: 2}, }, { sql: "(select 1,2 union select 3,4) union (select 5,6 union select 7)", - err: "The used SELECT statements have a different number of columns", + err: &UnionColumnsDoNotMatchError{FirstProj: 2, SecondProj: 1}, + }, { + sql: "select id from a union select 3 order by a.id", + err: &QualifiedOrderInUnionError{Table: "a"}, + serr: "Table `a` from one of the SELECTs cannot be used in global ORDER clause", + }, { + sql: "select a.id, b.id from a, b union select 1, 2 order by id", + serr: "Column 'id' in field list is ambiguous", + }, { + sql: "select sql_calc_found_rows id from a union select 1 limit 109", + err: &UnionWithSQLCalcFoundRowsError{}, + serr: "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union", + }, { + sql: "select * from (select sql_calc_found_rows id from a) as t", + serr: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + }, { + sql: "select (select sql_calc_found_rows id from a) as t", + serr: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", }, { - sql: "select id from a union select 3 order by a.id", - err: "Table a from one of the SELECTs cannot be used in global ORDER clause", + sql: "select id from t1 natural join t2", + serr: "VT12001: unsupported: natural join", }, { - sql: "select a.id, b.id from a, b union select 1, 2 order by id", - err: "Column 'id' in field list is ambiguous", + sql: "select * from music where user_id IN (select sql_calc_found_rows * from music limit 10)", + err: &SQLCalcFoundRowsUsageError{}, }, { - sql: "select sql_calc_found_rows id from a union select 1 limit 109", - err: "VT12001: unsupported: SQL_CALC_FOUND_ROWS not supported with union", + sql: "select is_free_lock('xyz') from user", + serr: "is_free_lock('xyz') allowed only with dual", }, { - sql: "select * from (select sql_calc_found_rows id from a) as t", - err: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + sql: "SELECT * FROM JSON_TABLE('[ {\"c1\": null} ]','$[*]' COLUMNS( c1 INT PATH '$.c1' ERROR ON ERROR )) as jt", + err: &JSONTablesError{}, }, { - sql: "select (select sql_calc_found_rows id from a) as t", - err: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", + sql: "select does_not_exist from t1", + notUnshardedErr: "column 'does_not_exist' not found in table 't1'", + }, { + sql: "select t1.does_not_exist from t1, t2", + notUnshardedErr: "column 't1.does_not_exist' not found", }} + for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { parse, err := sqlparser.Parse(tc.sql) require.NoError(t, err) - _, err = Analyze(parse.(sqlparser.SelectStatement), "dbName", fakeSchemaInfo()) - require.Error(t, err) - require.Equal(t, tc.err, err.Error()) + st, err := Analyze(parse, "dbName", fakeSchemaInfo()) + + switch { + case tc.err != nil: + require.Error(t, err) + require.Equal(t, tc.err, err) + case tc.serr != "": + require.EqualError(t, err, tc.serr) + case tc.notUnshardedErr != "": + require.EqualError(t, st.NotUnshardedErr, tc.notUnshardedErr) + } }) } } @@ -930,8 +957,8 @@ func TestUnionWithOrderBy(t *testing.T) { d1 := semTable.RecursiveDeps(extract(sel1, 0)) d2 := semTable.RecursiveDeps(extract(sel2, 0)) - assert.Equal(t, T1, d1) - assert.Equal(t, T2, d2) + assert.Equal(t, TS0, d1) + assert.Equal(t, TS1, d2) } func TestScopingWDerivedTables(t *testing.T) { @@ -943,70 +970,70 @@ func TestScopingWDerivedTables(t *testing.T) { }{ { query: "select id from (select x as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select foo as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select foo as id from (select x as foo from user) as c) as t", - recursiveExpectation: T1, - expectation: T3, + recursiveExpectation: TS0, + expectation: TS2, }, { query: "select t.id from (select foo as id from user) as t", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select t.id2 from (select foo as id from user) as t", - errorMessage: "symbol t.id2 not found", + errorMessage: "column 't.id2' not found", }, { query: "select id from (select 42 as id) as t", recursiveExpectation: T0, - expectation: T2, + expectation: TS1, }, { query: "select t.id from (select 42 as id) as t", recursiveExpectation: T0, - expectation: T2, + expectation: TS1, }, { query: "select ks.t.id from (select 42 as id) as t", - errorMessage: "symbol ks.t.id not found", + errorMessage: "column 'ks.t.id' not found", }, { query: "select * from (select id, id from user) as t", errorMessage: "Duplicate column name 'id'", }, { query: "select t.baz = 1 from (select id as baz from user) as t", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select t.id from (select * from user, music) as t", - expectation: T3, - recursiveExpectation: MergeTableSets(T1, T2), + expectation: TS2, + recursiveExpectation: MergeTableSets(TS0, TS1), }, { query: "select t.id from (select * from user, music) as t order by t.id", - expectation: T3, - recursiveExpectation: MergeTableSets(T1, T2), + expectation: TS2, + recursiveExpectation: MergeTableSets(TS0, TS1), }, { query: "select t.id from (select * from user) as t join user as u on t.id = u.id", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", - expectation: T4, - recursiveExpectation: T2, + expectation: TS3, + recursiveExpectation: TS1, }, { query: "select uu.test from (select id from t1) uu", - errorMessage: "symbol uu.test not found", + errorMessage: "column 'uu.test' not found", }, { query: "select uu.id from (select id as col from t1) uu", - errorMessage: "symbol uu.id not found", + errorMessage: "column 'uu.id' not found", }, { query: "select uu.id from (select id as col from t1) uu", - errorMessage: "symbol uu.id not found", + errorMessage: "column 'uu.id' not found", }, { query: "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", - expectation: T2, - recursiveExpectation: T1, + expectation: TS1, + recursiveExpectation: TS0, }, { query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", expectation: T0, @@ -1016,14 +1043,18 @@ func TestScopingWDerivedTables(t *testing.T) { t.Run(query.query, func(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - st, err := Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{ + st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, }, }) - if query.errorMessage != "" { + + switch { + case query.errorMessage != "" && err != nil: require.EqualError(t, err, query.errorMessage) - } else { + case query.errorMessage != "": + require.EqualError(t, st.NotUnshardedErr, query.errorMessage) + default: require.NoError(t, err) sel := parse.(*sqlparser.Select) assert.Equal(t, query.recursiveExpectation, st.RecursiveDeps(extract(sel, 0)), "RecursiveDeps") @@ -1033,6 +1064,48 @@ func TestScopingWDerivedTables(t *testing.T) { } } +func TestJoinPredicateDependencies(t *testing.T) { + // create table t() + // create table t1(id bigint) + // create table t2(uid bigint, name varchar(255)) + + queries := []struct { + query string + recursiveExpect TableSet + directExpect TableSet + }{{ + query: "select 1 from t1 join t2 on t1.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS1), + directExpect: MergeTableSets(TS0, TS1), + }, { + query: "select 1 from (select * from t1) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS2), + directExpect: MergeTableSets(TS1, TS2), + }, { + query: "select 1 from (select id from t1) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS2), + directExpect: MergeTableSets(TS1, TS2), + }, { + query: "select 1 from (select id from t1 union select id from t) x join t2 on x.id = t2.uid", + recursiveExpect: MergeTableSets(TS0, TS1, TS3), + directExpect: MergeTableSets(TS2, TS3), + }} + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + parse, err := sqlparser.Parse(query.query) + require.NoError(t, err) + + st, err := Analyze(parse, "user", fakeSchemaInfo()) + require.NoError(t, err) + + sel := parse.(*sqlparser.Select) + expr := sel.From[0].(*sqlparser.JoinTableExpr).Condition.On + assert.Equal(t, query.recursiveExpect, st.RecursiveDeps(expr), "RecursiveDeps") + assert.Equal(t, query.directExpect, st.DirectDeps(expr), "DirectDeps") + }) + } +} + func TestDerivedTablesOrderClause(t *testing.T) { queries := []struct { query string @@ -1040,40 +1113,40 @@ func TestDerivedTablesOrderClause(t *testing.T) { expectation TableSet }{{ query: "select 1 from (select id from user) as t order by id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select id from user) as t order by id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id from (select id from user) as t order by t.id", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select id as foo from (select id from user) as t order by foo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar from (select id as bar from user) as t order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar from user) as t order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar from user) as t order by foo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id as bar, oo from user) as t order by oo", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }, { query: "select bar as foo from (select id, oo from user) as t(bar,oo) order by bar", - recursiveExpectation: T1, - expectation: T2, + recursiveExpectation: TS0, + expectation: TS1, }} si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} for _, query := range queries { @@ -1081,7 +1154,7 @@ func TestDerivedTablesOrderClause(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - st, err := Analyze(parse.(sqlparser.SelectStatement), "user", si) + st, err := Analyze(parse, "user", si) require.NoError(t, err) sel := parse.(*sqlparser.Select) @@ -1101,20 +1174,20 @@ func TestScopingWComplexDerivedTables(t *testing.T) { }{ { query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - rightExpectation: T1, - leftExpectation: T1, + rightExpectation: TS0, + leftExpectation: TS0, }, { query: "select 1 from user.user uu where exists (select 1 from user.user as uu where exists (select 1 from (select 1 from user.t1) uu where uu.user_id = uu.id))", - rightExpectation: T2, - leftExpectation: T2, + rightExpectation: TS1, + leftExpectation: TS1, }, } for _, query := range queries { t.Run(query.query, func(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - st, err := Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{ + st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, }, @@ -1143,20 +1216,20 @@ func TestScopingWVindexTables(t *testing.T) { }{ { query: "select id from user_index where id = 1", - recursiveExpectation: T1, - expectation: T1, + recursiveExpectation: TS0, + expectation: TS0, }, { query: "select u.id + t.id from t as t join user_index as u where u.id = 1 and u.id = t.id", - recursiveExpectation: MergeTableSets(T1, T2), - expectation: MergeTableSets(T1, T2), + recursiveExpectation: MergeTableSets(TS0, TS1), + expectation: MergeTableSets(TS0, TS1), }, } for _, query := range queries { t.Run(query.query, func(t *testing.T) { parse, err := sqlparser.Parse(query.query) require.NoError(t, err) - hash, _ := vindexes.NewHash("user_index", nil) - st, err := Analyze(parse.(sqlparser.SelectStatement), "user", &FakeSI{ + hash, _ := vindexes.CreateVindex("hash", "user_index", nil) + st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ "t": {Name: sqlparser.NewIdentifierCS("t")}, }, @@ -1198,7 +1271,7 @@ func BenchmarkAnalyzeMultipleDifferentQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1222,7 +1295,7 @@ func BenchmarkAnalyzeUnionQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1248,7 +1321,7 @@ func BenchmarkAnalyzeSubQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1278,7 +1351,7 @@ func BenchmarkAnalyzeDerivedTableQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1304,7 +1377,7 @@ func BenchmarkAnalyzeHavingQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1333,7 +1406,7 @@ func BenchmarkAnalyzeGroupByQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1356,7 +1429,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { parse, err := sqlparser.Parse(query) require.NoError(b, err) - _, _ = Analyze(parse.(sqlparser.SelectStatement), "d", fakeSchemaInfo()) + _, _ = Analyze(parse, "d", fakeSchemaInfo()) } } } @@ -1396,6 +1469,13 @@ func TestSingleUnshardedKeyspace(t *testing.T) { {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, }, + }, { + query: "insert into t select * from t", + unsharded: ks1, + tables: []*vindexes.Table{ + {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, + {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, + }, }, } @@ -1409,6 +1489,82 @@ func TestSingleUnshardedKeyspace(t *testing.T) { } } +func TestNextErrors(t *testing.T) { + tests := []struct { + query, expectedError string + }{ + { + query: "select next 2 values from dual", + expectedError: "Table information is not provided in vschema for table `dual`", + }, { + query: "select next 2 values from t1", + expectedError: "NEXT used on a non-sequence table `t1`", + }, { + query: "select * from (select next 2 values from t1) dt", + expectedError: "Incorrect usage/placement of 'NEXT'", + }, + } + + for _, test := range tests { + t.Run(test.query, func(t *testing.T) { + parse, err := sqlparser.Parse(test.query) + require.NoError(t, err) + + _, err = Analyze(parse, "d", fakeSchemaInfo()) + assert.EqualError(t, err, test.expectedError) + }) + } +} + +func TestUpdateErrors(t *testing.T) { + tests := []struct { + query, expectedError string + }{ + { + query: "update t1, t2 set id = 12", + expectedError: "VT12001: unsupported: multiple (2) tables in update", + }, { + query: "update (select 1 from dual) dt set id = 1", + expectedError: "The target table dt of the UPDATE is not updatable", + }, + } + + for _, test := range tests { + t.Run(test.query, func(t *testing.T) { + parse, err := sqlparser.Parse(test.query) + require.NoError(t, err) + + st, err := Analyze(parse, "d", fakeSchemaInfo()) + if err == nil { + err = st.NotUnshardedErr + } + assert.EqualError(t, err, test.expectedError) + }) + } +} + +// TestScopingSubQueryJoinClause tests the scoping behavior of a subquery containing a join clause. +// The test ensures that the scoping analysis correctly identifies and handles the relationships +// between the tables involved in the join operation with the outer query. +func TestScopingSubQueryJoinClause(t *testing.T) { + query := "select (select 1 from u1 join u2 on u1.id = u2.id and u2.id = u3.id) x from u3" + + parse, err := sqlparser.Parse(query) + require.NoError(t, err) + + st, err := Analyze(parse, "user", &FakeSI{ + Tables: map[string]*vindexes.Table{ + "t": {Name: sqlparser.NewIdentifierCS("t")}, + }, + }) + require.NoError(t, err) + require.NoError(t, st.NotUnshardedErr) + + tb := st.DirectDeps(parse.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Subquery).Select.(*sqlparser.Select).From[0].(*sqlparser.JoinTableExpr).Condition.On) + require.Equal(t, 3, tb.NumberOfTables()) + +} + var ks1 = &vindexes.Keyspace{ Name: "ks1", Sharded: false, @@ -1422,6 +1578,9 @@ var ks3 = &vindexes.Keyspace{ Sharded: true, } +// create table t() +// create table t1(id bigint) +// create table t2(uid bigint, name varchar(255)) func fakeSchemaInfo() *FakeSI { cols1 := []vindexes.Column{{ Name: sqlparser.NewIdentifierCI("id"), diff --git a/go/vt/vtgate/semantics/binder.go b/go/vt/vtgate/semantics/binder.go index 446489928fc..d467a97c130 100644 --- a/go/vt/vtgate/semantics/binder.go +++ b/go/vt/vtgate/semantics/binder.go @@ -19,9 +19,8 @@ package semantics import ( "strings" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) // binder is responsible for finding all the column references in @@ -62,6 +61,10 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Subquery: currScope := b.scoper.currentScope() + // do not extract subquery in insert statement. + if _, isInsert := currScope.stmt.(*sqlparser.Insert); isInsert { + return nil + } sq, err := b.createExtractedSubquery(cursor, currScope, node) if err != nil { return err @@ -81,13 +84,6 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { } currScope.joinUsing[ident.Lowered()] = deps.direct } - if len(node.Using) > 0 { - err := rewriteJoinUsing(currScope, node.Using, b.org) - if err != nil { - return err - } - node.Using = nil - } case *sqlparser.ColName: currentScope := b.scoper.currentScope() deps, err := b.resolveColumn(node, currentScope, false) @@ -113,6 +109,20 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { } case *sqlparser.CountStar: b.bindCountStar(node) + case *sqlparser.Union: + info := b.tc.unionInfo[node] + // TODO: this check can be removed and available type information should be used. + if !info.isAuthoritative { + return nil + } + + for i, expr := range info.exprs { + ae := expr.(*sqlparser.AliasedExpr) + b.recursive[ae.Expr] = info.recursive[i] + if t := info.types[i]; t != nil { + b.typer.exprTypes[ae.Expr] = *t + } + } } return nil } @@ -129,7 +139,7 @@ func (b *binder) bindCountStar(node *sqlparser.CountStar) { } } default: - expr := tbl.getExpr() + expr := tbl.GetExpr() if expr != nil { setFor := b.tc.tableSetFor(expr) ts = ts.Merge(setFor) @@ -143,14 +153,14 @@ func (b *binder) bindCountStar(node *sqlparser.CountStar) { func (b *binder) rewriteJoinUsingColName(deps dependency, node *sqlparser.ColName, currentScope *scope) (dependency, error) { constituents := deps.recursive.Constituents() if len(constituents) < 1 { - return dependency{}, NewError(Buggy, "we should not have a *ColName that depends on nothing") + return dependency{}, &BuggyError{Msg: "we should not have a *ColName that depends on nothing"} } newTbl := constituents[0] infoFor, err := b.tc.tableInfoFor(newTbl) if err != nil { return dependency{}, err } - alias := infoFor.getExpr().As + alias := infoFor.GetExpr().As if alias.IsEmpty() { name, err := infoFor.Name() if err != nil { @@ -205,22 +215,22 @@ func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery, currScope *sc func (b *binder) createExtractedSubquery(cursor *sqlparser.Cursor, currScope *scope, subq *sqlparser.Subquery) (*sqlparser.ExtractedSubquery, error) { if currScope.stmt == nil { - return nil, NewError(Buggy, "unable to bind subquery to select statement") + return nil, &BuggyError{Msg: "unable to bind subquery to select statement"} } sq := &sqlparser.ExtractedSubquery{ Subquery: subq, Original: subq, - OpCode: int(engine.PulloutValue), + OpCode: int(opcode.PulloutValue), } switch par := cursor.Parent().(type) { case *sqlparser.ComparisonExpr: switch par.Operator { case sqlparser.InOp: - sq.OpCode = int(engine.PulloutIn) + sq.OpCode = int(opcode.PulloutIn) case sqlparser.NotInOp: - sq.OpCode = int(engine.PulloutNotIn) + sq.OpCode = int(opcode.PulloutNotIn) } subq, exp := GetSubqueryAndOtherSide(par) sq.Original = &sqlparser.ComparisonExpr{ @@ -230,7 +240,7 @@ func (b *binder) createExtractedSubquery(cursor *sqlparser.Cursor, currScope *sc } sq.OtherSide = exp case *sqlparser.ExistsExpr: - sq.OpCode = int(engine.PulloutExists) + sq.OpCode = int(opcode.PulloutExists) sq.Original = par } return sq, nil @@ -238,6 +248,8 @@ func (b *binder) createExtractedSubquery(cursor *sqlparser.Cursor, currScope *sc func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allowMulti bool) (dependency, error) { var thisDeps dependencies + first := true + var tableName *sqlparser.TableName for current != nil { var err error thisDeps, err = b.resolveColumnInScope(current, colName, allowMulti) @@ -256,9 +268,22 @@ func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allow } else if err != nil { return dependency{}, err } + if current.parent == nil && len(current.tables) == 1 && first && colName.Qualifier.IsEmpty() { + // if this is the top scope, and we still haven't been able to find a match, we know we are about to fail + // we can check this last scope and see if there is a single table. if there is just one table in the scope + // we assume that the column is meant to come from this table. + // we also check that this is the first scope we are looking in. + // If there are more scopes the column could come from, we can't assume anything + // This is just used for a clearer error message + name, err := current.tables[0].Name() + if err == nil { + tableName = &name + } + } + first = false current = current.parent } - return dependency{}, NewError(ColumnNotFound, colName) + return dependency{}, ShardedError{&ColumnNotFoundError{Column: colName, Table: tableName}} } func (b *binder) resolveColumnInScope(current *scope, expr *sqlparser.ColName, allowMulti bool) (dependencies, error) { @@ -275,14 +300,14 @@ func (b *binder) resolveColumnInScope(current *scope, expr *sqlparser.ColName, a } if deps, isUncertain := deps.(*uncertain); isUncertain && deps.fail { // if we have a failure from uncertain, we matched the column to multiple non-authoritative tables - return nil, ProjError{Inner: NewError(AmbiguousColumn, expr)} + return nil, ProjError{Inner: &AmbiguousColumnError{Column: sqlparser.String(expr)}} } return deps, nil } func makeAmbiguousError(colName *sqlparser.ColName, err error) error { if err == ambigousErr { - err = NewError(AmbiguousColumn, colName) + err = &AmbiguousColumnError{Column: sqlparser.String(colName)} } return err } diff --git a/go/vt/vtgate/semantics/bitset/bitset.go b/go/vt/vtgate/semantics/bitset/bitset.go index 6bb1e2785aa..898d55e1d95 100644 --- a/go/vt/vtgate/semantics/bitset/bitset.go +++ b/go/vt/vtgate/semantics/bitset/bitset.go @@ -50,16 +50,9 @@ func toBitset(words []byte) Bitset { return *(*Bitset)(unsafe.Pointer(&words)) } -func minlen(a, b Bitset) int { - if len(a) < len(b) { - return len(a) - } - return len(b) -} - // Overlaps returns whether this Bitset and the input have any bits in common func (bs Bitset) Overlaps(b2 Bitset) bool { - min := minlen(bs, b2) + min := min(len(bs), len(b2)) for i := 0; i < min; i++ { if bs[i]&b2[i] != 0 { return true @@ -126,7 +119,7 @@ func (bs Bitset) And(b2 Bitset) Bitset { return "" } - merged := make([]byte, minlen(bs, b2)) + merged := make([]byte, min(len(bs), len(b2))) m := 0 for m = 0; m < len(merged); m++ { diff --git a/go/vt/vtgate/semantics/check_invalid.go b/go/vt/vtgate/semantics/check_invalid.go new file mode 100644 index 00000000000..c5f5c016398 --- /dev/null +++ b/go/vt/vtgate/semantics/check_invalid.go @@ -0,0 +1,154 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { + switch node := cursor.Node().(type) { + case *sqlparser.Update: + return checkUpdate(node) + case *sqlparser.Select: + return a.checkSelect(cursor, node) + case *sqlparser.Nextval: + return a.checkNextVal() + case *sqlparser.JoinTableExpr: + return a.checkJoin(node) + case *sqlparser.LockingFunc: + return &LockOnlyWithDualError{Node: node} + case *sqlparser.Union: + return checkUnion(node) + case *sqlparser.JSONTableExpr: + return &JSONTablesError{} + case *sqlparser.DerivedTable: + return checkDerived(node) + case *sqlparser.AssignmentExpr: + return vterrors.VT12001("Assignment expression") + case *sqlparser.Insert: + if node.Action == sqlparser.ReplaceAct { + return ShardedError{Inner: &UnsupportedConstruct{errString: "REPLACE INTO with sharded keyspace"}} + } + } + + return nil +} + +func checkDerived(node *sqlparser.DerivedTable) error { + if node.Lateral { + return vterrors.VT12001("lateral derived tables") + } + return nil +} + +func checkUnion(node *sqlparser.Union) error { + err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ColName: + if !node.Qualifier.IsEmpty() { + return false, &QualifiedOrderInUnionError{Table: node.Qualifier.Name.String()} + } + case *sqlparser.Subquery: + return false, nil + } + return true, nil + }, node.OrderBy) + if err != nil { + return err + } + err = checkUnionColumns(node) + if err != nil { + return err + } + return nil +} + +func (a *analyzer) checkJoin(j *sqlparser.JoinTableExpr) error { + if j.Join == sqlparser.NaturalJoinType || j.Join == sqlparser.NaturalRightJoinType || j.Join == sqlparser.NaturalLeftJoinType { + return &UnsupportedNaturalJoinError{JoinExpr: j} + } + return nil +} +func (a *analyzer) checkNextVal() error { + currScope := a.scoper.currentScope() + if currScope.parent != nil { + // This is defensively checking that we are not inside a subquery or derived table + // Will probably already have been checked on the SELECT level + return &CantUseOptionHereError{Msg: "INTO"} + } + if len(currScope.tables) != 1 { + // This is defensively checking that we don't have too many tables. + // Hard to check this with unit tests, since the parser does not accept these queries + return &NextWithMultipleTablesError{CountTables: len(currScope.tables)} + } + vindexTbl := currScope.tables[0].GetVindexTable() + if vindexTbl == nil { + return &MissingInVSchemaError{ + Table: currScope.tables[0], + } + } + if vindexTbl.Type != vindexes.TypeSequence { + return &NotSequenceTableError{Table: vindexTbl.Name.String()} + } + return nil +} + +func (a *analyzer) checkSelect(cursor *sqlparser.Cursor, node *sqlparser.Select) error { + parent := cursor.Parent() + if _, isUnion := parent.(*sqlparser.Union); isUnion && node.SQLCalcFoundRows { + return &UnionWithSQLCalcFoundRowsError{} + } + if _, isRoot := parent.(*sqlparser.RootNode); !isRoot && node.SQLCalcFoundRows { + return &SQLCalcFoundRowsUsageError{} + } + errMsg := "INTO" + nextVal := false + if len(node.SelectExprs) == 1 { + if _, isNextVal := node.SelectExprs[0].(*sqlparser.Nextval); isNextVal { + nextVal = true + errMsg = "NEXT" + } + } + if !nextVal && node.Into == nil { + return nil + } + if a.scoper.currentScope().parent != nil { + return &CantUseOptionHereError{Msg: errMsg} + } + if node.Into != nil { + return ShardedError{Inner: &UnsupportedConstruct{errString: "INTO on sharded keyspace"}} + } + return nil +} + +func checkUpdate(node *sqlparser.Update) error { + if len(node.TableExprs) != 1 { + return ShardedError{Inner: &UnsupportedMultiTablesInUpdateError{ExprCount: len(node.TableExprs)}} + } + alias, isAlias := node.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !isAlias { + return ShardedError{Inner: &UnsupportedMultiTablesInUpdateError{NotAlias: true}} + } + _, isDerived := alias.Expr.(*sqlparser.DerivedTable) + if isDerived { + return &TableNotUpdatableError{Table: alias.As.String()} + } + return nil +} diff --git a/go/vt/vtgate/semantics/derived_table.go b/go/vt/vtgate/semantics/derived_table.go index 7379fa43f4e..a88f39cf8af 100644 --- a/go/vt/vtgate/semantics/derived_table.go +++ b/go/vt/vtgate/semantics/derived_table.go @@ -33,41 +33,72 @@ type DerivedTable struct { cols []sqlparser.Expr tables TableSet isAuthoritative bool + + recursive []TableSet + types []*Type +} + +type unionInfo struct { + isAuthoritative bool + recursive []TableSet + types []*Type + exprs sqlparser.SelectExprs } var _ TableInfo = (*DerivedTable)(nil) -func createDerivedTableForExpressions(expressions sqlparser.SelectExprs, cols sqlparser.Columns, tables []TableInfo, org originable) *DerivedTable { - vTbl := &DerivedTable{isAuthoritative: true} +func createDerivedTableForExpressions( + expressions sqlparser.SelectExprs, + cols sqlparser.Columns, + tables []TableInfo, + org originable, + expanded bool, + recursiveDeps []TableSet, + types []*Type, +) *DerivedTable { + vTbl := &DerivedTable{isAuthoritative: expanded, recursive: recursiveDeps, types: types} for i, selectExpr := range expressions { switch expr := selectExpr.(type) { case *sqlparser.AliasedExpr: - vTbl.cols = append(vTbl.cols, expr.Expr) - if len(cols) > 0 { - vTbl.columnNames = append(vTbl.columnNames, cols[i].String()) - } else if expr.As.IsEmpty() { - switch expr := expr.Expr.(type) { - case *sqlparser.ColName: - // for projections, we strip out the qualifier and keep only the column name - vTbl.columnNames = append(vTbl.columnNames, expr.Name.String()) - default: - vTbl.columnNames = append(vTbl.columnNames, sqlparser.String(expr)) - } - } else { - vTbl.columnNames = append(vTbl.columnNames, expr.As.String()) - } + handleAliasedExpr(vTbl, expr, cols, i) case *sqlparser.StarExpr: - for _, table := range tables { - vTbl.tables = vTbl.tables.Merge(table.getTableSet(org)) - if !table.authoritative() { - vTbl.isAuthoritative = false - } - } + handleUnexpandedStarExpression(tables, vTbl, org) } } return vTbl } +func handleAliasedExpr(vTbl *DerivedTable, expr *sqlparser.AliasedExpr, cols sqlparser.Columns, i int) { + vTbl.cols = append(vTbl.cols, expr.Expr) + + if len(cols) > 0 { + vTbl.columnNames = append(vTbl.columnNames, cols[i].String()) + return + } + + if !expr.As.IsEmpty() { + vTbl.columnNames = append(vTbl.columnNames, expr.As.String()) + return + } + + switch expr := expr.Expr.(type) { + case *sqlparser.ColName: + // for projections, we strip out the qualifier and keep only the column name + vTbl.columnNames = append(vTbl.columnNames, expr.Name.String()) + default: + vTbl.columnNames = append(vTbl.columnNames, sqlparser.String(expr)) + } +} + +func handleUnexpandedStarExpression(tables []TableInfo, vTbl *DerivedTable, org originable) { + for _, table := range tables { + vTbl.tables = vTbl.tables.Merge(table.getTableSet(org)) + if !table.authoritative() { + vTbl.isAuthoritative = false + } + } +} + // dependencies implements the TableInfo interface func (dt *DerivedTable) dependencies(colName string, org originable) (dependencies, error) { directDeps := org.tableSetFor(dt.ASTNode) @@ -75,7 +106,7 @@ func (dt *DerivedTable) dependencies(colName string, org originable) (dependenci if !strings.EqualFold(name, colName) { continue } - _, recursiveDeps, qt := org.depsForExpr(dt.cols[i]) + recursiveDeps, qt := dt.recursive[i], dt.types[i] return createCertain(directDeps, recursiveDeps, qt), nil } @@ -105,7 +136,7 @@ func (dt *DerivedTable) Name() (sqlparser.TableName, error) { return dt.ASTNode.TableName() } -func (dt *DerivedTable) getExpr() *sqlparser.AliasedTableExpr { +func (dt *DerivedTable) GetExpr() *sqlparser.AliasedTableExpr { return dt.ASTNode } @@ -135,6 +166,9 @@ func (dt *DerivedTable) getTableSet(_ originable) TableSet { // GetExprFor implements the TableInfo interface func (dt *DerivedTable) getExprFor(s string) (sqlparser.Expr, error) { + if !dt.isAuthoritative { + return nil, vterrors.VT09015() + } for i, colName := range dt.columnNames { if colName == s { return dt.cols[i], nil diff --git a/go/vt/vtgate/semantics/early_rewriter.go b/go/vt/vtgate/semantics/early_rewriter.go index 4071cfe3837..ca1ebc6d2f4 100644 --- a/go/vt/vtgate/semantics/early_rewriter.go +++ b/go/vt/vtgate/semantics/early_rewriter.go @@ -17,9 +17,10 @@ limitations under the License. package semantics import ( + "fmt" "strconv" - "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/evalengine" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -38,76 +39,143 @@ type earlyRewriter struct { func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Where: - if node.Type != sqlparser.HavingClause { - return nil - } - rewriteHavingAndOrderBy(node, cursor.Parent()) + handleWhereClause(node, cursor.Parent()) case sqlparser.SelectExprs: - _, isSel := cursor.Parent().(*sqlparser.Select) - if !isSel { - return nil - } - err := r.expandStar(cursor, node) - if err != nil { - return err - } + return handleSelectExprs(r, cursor, node) case *sqlparser.JoinTableExpr: - if node.Join == sqlparser.StraightJoinType { - node.Join = sqlparser.NormalJoinType - r.warning = "straight join is converted to normal join" - } + handleJoinTableExpr(r, node) case sqlparser.OrderBy: - r.clause = "order clause" - rewriteHavingAndOrderBy(node, cursor.Parent()) + handleOrderBy(r, cursor, node) case *sqlparser.OrExpr: - newNode := rewriteOrFalse(*node) - if newNode != nil { - cursor.Replace(newNode) - } + rewriteOrExpr(cursor, node) case sqlparser.GroupBy: r.clause = "group statement" - case *sqlparser.Literal: - newNode, err := r.rewriteOrderByExpr(node) - if err != nil { - return err - } - if newNode != nil { - cursor.Replace(newNode) - } + return handleLiteral(r, cursor, node) case *sqlparser.CollateExpr: - lit, ok := node.Expr.(*sqlparser.Literal) - if !ok { - return nil - } - newNode, err := r.rewriteOrderByExpr(lit) - if err != nil { - return err - } - if newNode != nil { - node.Expr = newNode - } + return handleCollateExpr(r, node) case *sqlparser.ComparisonExpr: - lft, lftOK := node.Left.(sqlparser.ValTuple) - rgt, rgtOK := node.Right.(sqlparser.ValTuple) - if !lftOK || !rgtOK || len(lft) != len(rgt) || node.Operator != sqlparser.EqualOp { - return nil - } - var predicates []sqlparser.Expr - for i, l := range lft { - r := rgt[i] - predicates = append(predicates, &sqlparser.ComparisonExpr{ - Operator: sqlparser.EqualOp, - Left: l, - Right: r, - Escape: node.Escape, - }) + return handleComparisonExpr(cursor, node) + } + return nil +} + +func (r *earlyRewriter) up(cursor *sqlparser.Cursor) error { + // this rewriting is done in the `up` phase, because we need the scope to have been + // filled in with the available tables + node, ok := cursor.Node().(*sqlparser.JoinTableExpr) + if !ok || len(node.Condition.Using) == 0 { + return nil + } + + err := rewriteJoinUsing(r.binder, node) + if err != nil { + return err + } + + // since the binder has already been over the join, we need to invoke it again so it + // can bind columns to the right tables + sqlparser.Rewrite(node.Condition.On, nil, func(cursor *sqlparser.Cursor) bool { + innerErr := r.binder.up(cursor) + if innerErr == nil { + return true } - cursor.Replace(sqlparser.AndExpressions(predicates...)) + + err = innerErr + return false + }) + return err +} + +// handleWhereClause processes WHERE clauses, specifically the HAVING clause. +func handleWhereClause(node *sqlparser.Where, parent sqlparser.SQLNode) { + if node.Type != sqlparser.HavingClause { + return + } + rewriteHavingAndOrderBy(node, parent) +} + +// handleSelectExprs expands * in SELECT expressions. +func handleSelectExprs(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { + _, isSel := cursor.Parent().(*sqlparser.Select) + if !isSel { + return nil + } + return r.expandStar(cursor, node) +} + +// handleJoinTableExpr processes JOIN table expressions and handles the Straight Join type. +func handleJoinTableExpr(r *earlyRewriter, node *sqlparser.JoinTableExpr) { + if node.Join != sqlparser.StraightJoinType { + return + } + node.Join = sqlparser.NormalJoinType + r.warning = "straight join is converted to normal join" +} + +// handleOrderBy processes the ORDER BY clause. +func handleOrderBy(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.OrderBy) { + r.clause = "order clause" + rewriteHavingAndOrderBy(node, cursor.Parent()) +} + +// rewriteOrExpr rewrites OR expressions when the right side is FALSE. +func rewriteOrExpr(cursor *sqlparser.Cursor, node *sqlparser.OrExpr) { + newNode := rewriteOrFalse(*node) + if newNode != nil { + cursor.Replace(newNode) + } +} + +// handleLiteral processes literals within the context of ORDER BY expressions. +func handleLiteral(r *earlyRewriter, cursor *sqlparser.Cursor, node *sqlparser.Literal) error { + newNode, err := r.rewriteOrderByExpr(node) + if err != nil { + return err + } + if newNode != nil { + cursor.Replace(newNode) } return nil } +// handleCollateExpr processes COLLATE expressions. +func handleCollateExpr(r *earlyRewriter, node *sqlparser.CollateExpr) error { + lit, ok := node.Expr.(*sqlparser.Literal) + if !ok { + return nil + } + newNode, err := r.rewriteOrderByExpr(lit) + if err != nil { + return err + } + if newNode != nil { + node.Expr = newNode + } + return nil +} + +// handleComparisonExpr processes Comparison expressions, specifically for tuples with equal length and EqualOp operator. +func handleComparisonExpr(cursor *sqlparser.Cursor, node *sqlparser.ComparisonExpr) error { + lft, lftOK := node.Left.(sqlparser.ValTuple) + rgt, rgtOK := node.Right.(sqlparser.ValTuple) + if !lftOK || !rgtOK || len(lft) != len(rgt) || node.Operator != sqlparser.EqualOp { + return nil + } + var predicates []sqlparser.Expr + for i, l := range lft { + r := rgt[i] + predicates = append(predicates, &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: l, + Right: r, + Escape: node.Escape, + }) + } + cursor.Replace(sqlparser.AndExpressions(predicates...)) + return nil +} + func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { currentScope := r.scoper.currentScope() var selExprs sqlparser.SelectExprs @@ -135,61 +203,69 @@ func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.Sele return nil } -// rewriteHavingAndOrderBy rewrites columns on the ORDER BY/HAVING -// clauses to use aliases from the SELECT expressions when available. -// The scoping rules are: -// - A column identifier with no table qualifier that matches an alias introduced -// in SELECT points to that expression, and not at any table column -// - Except when expression aliased is an aggregation, and the column identifier in the -// HAVING/ORDER BY clause is inside an aggregation function -// -// This is a fucking weird scoping rule, but it's what MySQL seems to do... ¯\_(ツ)_/¯ +// rewriteHavingAndOrderBy rewrites columns in the ORDER BY and HAVING clauses to use aliases +// from the SELECT expressions when applicable, following MySQL scoping rules: +// - A column identifier without a table qualifier that matches an alias introduced +// in SELECT points to that expression, not any table column. +// - However, if the aliased expression is an aggregation and the column identifier in +// the HAVING/ORDER BY clause is inside an aggregation function, the rule does not apply. func rewriteHavingAndOrderBy(node, parent sqlparser.SQLNode) { - // TODO - clean up and comment this mess sel, isSel := parent.(*sqlparser.Select) if !isSel { return } - sqlparser.SafeRewrite(node, func(node, _ sqlparser.SQLNode) bool { - _, isSubQ := node.(*sqlparser.Subquery) - return !isSubQ - }, func(cursor *sqlparser.Cursor) bool { - col, ok := cursor.Node().(*sqlparser.ColName) - if !ok { - return true - } - if !col.Qualifier.IsEmpty() { - return true - } - _, parentIsAggr := cursor.Parent().(sqlparser.AggrFunc) - for _, e := range sel.SelectExprs { - ae, ok := e.(*sqlparser.AliasedExpr) - if !ok || !ae.As.Equal(col.Name) { - continue - } - _, aliasPointsToAggr := ae.Expr.(sqlparser.AggrFunc) - if parentIsAggr && aliasPointsToAggr { - return false + sqlparser.SafeRewrite(node, avoidSubqueries, + func(cursor *sqlparser.Cursor) bool { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok || !col.Qualifier.IsEmpty() { + // we are only interested in columns not qualified by table names + return true } - safeToRewrite := true - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.ColName: - safeToRewrite = false - return false, nil - case sqlparser.AggrFunc: - return false, nil + _, parentIsAggr := cursor.Parent().(sqlparser.AggrFunc) + + // Iterate through SELECT expressions. + for _, e := range sel.SelectExprs { + ae, ok := e.(*sqlparser.AliasedExpr) + if !ok || !ae.As.Equal(col.Name) { + // we are searching for aliased expressions that match the column we have found + continue + } + + expr := ae.Expr + if parentIsAggr { + if _, aliasPointsToAggr := expr.(sqlparser.AggrFunc); aliasPointsToAggr { + return false + } + } + + if isSafeToRewrite(expr) { + cursor.Replace(expr) } - return true, nil - }, ae.Expr) - if safeToRewrite { - cursor.Replace(ae.Expr) } - } - return true - }) + return true + }) +} + +func avoidSubqueries(node, _ sqlparser.SQLNode) bool { + _, isSubQ := node.(*sqlparser.Subquery) + return !isSubQ +} + +func isSafeToRewrite(e sqlparser.Expr) bool { + safeToRewrite := true + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node.(type) { + case *sqlparser.ColName: + safeToRewrite = false + return false, nil + case sqlparser.AggrFunc: + return false, nil + } + return true, nil + }, e) + return safeToRewrite } func (r *earlyRewriter) rewriteOrderByExpr(node *sqlparser.Literal) (sqlparser.Expr, error) { @@ -262,7 +338,7 @@ func rewriteOrFalse(orExpr sqlparser.OrExpr) sqlparser.Expr { return false } - boolValue, err := res.Value().ToBool() + boolValue, err := res.Value(collations.Default()).ToBool() if err != nil { return false } @@ -279,67 +355,144 @@ func rewriteOrFalse(orExpr sqlparser.OrExpr) sqlparser.Expr { return nil } -func rewriteJoinUsing( - current *scope, - using sqlparser.Columns, - org originable, -) error { - joinUsing := current.prepareUsingMap() - predicates := make([]sqlparser.Expr, 0, len(using)) - for _, column := range using { - var foundTables []sqlparser.TableName - for _, tbl := range current.tables { - if !tbl.authoritative() { - return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "can't handle JOIN USING without authoritative tables") - } +// rewriteJoinUsing rewrites SQL JOINs that use the USING clause to their equivalent +// JOINs with the ON condition. This function finds all the tables that have the +// specified columns in the USING clause, constructs an equality predicate for +// each pair of tables, and adds the resulting predicates to the WHERE clause +// of the outermost SELECT statement. +// +// For example, given the query: +// +// SELECT * FROM t1 JOIN t2 USING (col1, col2) +// +// The rewriteJoinUsing function will rewrite the query to: +// +// SELECT * FROM t1 JOIN t2 ON (t1.col1 = t2.col1 AND t1.col2 = t2.col2) +// +// This function returns an error if it encounters a non-authoritative table or +// if it cannot find a SELECT statement to add the WHERE predicate to. +func rewriteJoinUsing(b *binder, join *sqlparser.JoinTableExpr) error { + predicates, err := buildJoinPredicates(b, join) + if err != nil { + return err + } + if len(predicates) > 0 { + join.Condition.On = sqlparser.AndExpressions(predicates...) + join.Condition.Using = nil + } + return nil +} - currTable := tbl.getTableSet(org) - usingCols := joinUsing[currTable] - if usingCols == nil { - usingCols = map[string]TableSet{} - } - for _, col := range tbl.getColumns() { - _, found := usingCols[strings.ToLower(col.Name)] - if found { - tblName, err := tbl.Name() - if err != nil { - return err - } +// buildJoinPredicates constructs the join predicates for a given set of USING columns. +// It returns a slice of sqlparser.Expr, each representing a join predicate for the given columns. +func buildJoinPredicates(b *binder, join *sqlparser.JoinTableExpr) ([]sqlparser.Expr, error) { + var predicates []sqlparser.Expr - foundTables = append(foundTables, tblName) - break // no need to look at other columns in this table - } + for _, column := range join.Condition.Using { + foundTables, err := findTablesWithColumn(b, join, column) + if err != nil { + return nil, err + } + + predicates = append(predicates, createComparisonPredicates(column, foundTables)...) + } + + return predicates, nil +} + +func findOnlyOneTableInfoThatHasColumn(b *binder, tbl sqlparser.TableExpr, column sqlparser.IdentifierCI) ([]TableInfo, error) { + switch tbl := tbl.(type) { + case *sqlparser.AliasedTableExpr: + ts := b.tc.tableSetFor(tbl) + tblInfo := b.tc.Tables[ts.TableOffset()] + for _, info := range tblInfo.getColumns() { + if column.EqualString(info.Name) { + return []TableInfo{tblInfo}, nil } } - for i, lft := range foundTables { - for j := i + 1; j < len(foundTables); j++ { - rgt := foundTables[j] - predicates = append(predicates, &sqlparser.ComparisonExpr{ - Operator: sqlparser.EqualOp, - Left: sqlparser.NewColNameWithQualifier(column.String(), lft), - Right: sqlparser.NewColNameWithQualifier(column.String(), rgt), - }) + return nil, nil + case *sqlparser.JoinTableExpr: + tblInfoR, err := findOnlyOneTableInfoThatHasColumn(b, tbl.RightExpr, column) + if err != nil { + return nil, err + } + tblInfoL, err := findOnlyOneTableInfoThatHasColumn(b, tbl.LeftExpr, column) + if err != nil { + return nil, err + } + + return append(tblInfoL, tblInfoR...), nil + case *sqlparser.ParenTableExpr: + var tblInfo []TableInfo + for _, parenTable := range tbl.Exprs { + newTblInfo, err := findOnlyOneTableInfoThatHasColumn(b, parenTable, column) + if err != nil { + return nil, err + } + if tblInfo != nil && newTblInfo != nil { + return nil, vterrors.VT03021(column.String()) + } + if newTblInfo != nil { + tblInfo = newTblInfo } } + return tblInfo, nil + default: + panic(fmt.Sprintf("unsupported TableExpr type in JOIN: %T", tbl)) } +} - // now, we go up the scope until we find a SELECT with a where clause we can add this predicate to - for current != nil { - sel, found := current.stmt.(*sqlparser.Select) - if found { - if sel.Where == nil { - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: sqlparser.AndExpressions(predicates...), - } - } else { - sel.Where.Expr = sqlparser.AndExpressions(append(predicates, sel.Where.Expr)...) - } - return nil +// findTablesWithColumn finds the tables with the specified column in the current scope. +func findTablesWithColumn(b *binder, join *sqlparser.JoinTableExpr, column sqlparser.IdentifierCI) ([]sqlparser.TableName, error) { + leftTableInfo, err := findOnlyOneTableInfoThatHasColumn(b, join.LeftExpr, column) + if err != nil { + return nil, err + } + + rightTableInfo, err := findOnlyOneTableInfoThatHasColumn(b, join.RightExpr, column) + if err != nil { + return nil, err + } + + if leftTableInfo == nil || rightTableInfo == nil { + return nil, ShardedError{Inner: vterrors.VT09015()} + } + var tableNames []sqlparser.TableName + for _, info := range leftTableInfo { + nm, err := info.Name() + if err != nil { + return nil, err + } + tableNames = append(tableNames, nm) + } + for _, info := range rightTableInfo { + nm, err := info.Name() + if err != nil { + return nil, err } - current = current.parent + tableNames = append(tableNames, nm) + } + return tableNames, nil +} + +// createComparisonPredicates creates a list of comparison predicates between the given column and foundTables. +func createComparisonPredicates(column sqlparser.IdentifierCI, foundTables []sqlparser.TableName) []sqlparser.Expr { + var predicates []sqlparser.Expr + for i, lft := range foundTables { + for j := i + 1; j < len(foundTables); j++ { + rgt := foundTables[j] + predicates = append(predicates, createComparisonBetween(column, lft, rgt)) + } + } + return predicates +} + +func createComparisonBetween(column sqlparser.IdentifierCI, lft, rgt sqlparser.TableName) *sqlparser.ComparisonExpr { + return &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: sqlparser.NewColNameWithQualifier(column.String(), lft), + Right: sqlparser.NewColNameWithQualifier(column.String(), rgt), } - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not find WHERE clause") } func (r *earlyRewriter) expandTableColumns( @@ -349,9 +502,15 @@ func (r *earlyRewriter) expandTableColumns( org originable, ) (bool, sqlparser.SelectExprs, error) { unknownTbl := true - var colNames sqlparser.SelectExprs starExpanded := true - expandedColumns := map[sqlparser.TableName][]*sqlparser.ColName{} + state := &expanderState{ + colNames: []sqlparser.SelectExpr{}, + needsQualifier: len(tables) > 1, + joinUsing: joinUsing, + org: org, + expandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{}, + } + for _, tbl := range tables { if !starExpr.TableName.IsEmpty() && !tbl.matches(starExpr.TableName) { continue @@ -361,88 +520,114 @@ func (r *earlyRewriter) expandTableColumns( starExpanded = false break } - tblName, err := tbl.Name() + err := state.processColumnsFor(tbl) if err != nil { return false, nil, err } + } - needsQualifier := len(tables) > 1 - tableAliased := !tbl.getExpr().As.IsEmpty() - withQualifier := needsQualifier || tableAliased - currTable := tbl.getTableSet(org) - usingCols := joinUsing[currTable] - if usingCols == nil { - usingCols = map[string]TableSet{} - } + if unknownTbl { + // This will only happen for case when starExpr has qualifier. + return false, nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadDb, "Unknown table '%s'", sqlparser.String(starExpr.TableName)) + } - addColName := func(col ColumnInfo) { - var colName *sqlparser.ColName - var alias sqlparser.IdentifierCI - if withQualifier { - colName = sqlparser.NewColNameWithQualifier(col.Name, tblName) - } else { - colName = sqlparser.NewColName(col.Name) - } - if needsQualifier { - alias = sqlparser.NewIdentifierCI(col.Name) - } - colNames = append(colNames, &sqlparser.AliasedExpr{Expr: colName, As: alias}) - vt := tbl.GetVindexTable() - if vt != nil { - keyspace := vt.Keyspace - var ks sqlparser.IdentifierCS - if keyspace != nil { - ks = sqlparser.NewIdentifierCS(keyspace.Name) - } - tblName := sqlparser.TableName{ - Name: tblName.Name, - Qualifier: ks, - } - expandedColumns[tblName] = append(expandedColumns[tblName], colName) - } + if starExpanded { + for k, v := range state.expandedColumns { + r.expandedColumns[k] = v } + } + + return starExpanded, state.colNames, nil +} + +func (e *expanderState) processColumnsFor(tbl TableInfo) error { + tblName, err := tbl.Name() + if err != nil { + return err + } + currTable := tbl.getTableSet(e.org) + usingCols := e.joinUsing[currTable] + if usingCols == nil { + usingCols = map[string]TableSet{} + } + + /* + Redundant column elimination and column ordering occurs according to standard SQL, producing this display order: + * First, coalesced common columns of the two joined tables, in the order in which they occur in the first table + * Second, columns unique to the first table, in order in which they occur in that table + * Third, columns unique to the second table, in order in which they occur in that table - /* - Redundant column elimination and column ordering occurs according to standard SQL, producing this display order: - * First, coalesced common columns of the two joined tables, in the order in which they occur in the first table - * Second, columns unique to the first table, in order in which they occur in that table - * Third, columns unique to the second table, in order in which they occur in that table - - From: https://dev.mysql.com/doc/refman/8.0/en/join.html - */ - outer: - // in this first loop we just find columns used in any JOIN USING used on this table - for _, col := range tbl.getColumns() { - ts, found := usingCols[col.Name] - if found { - for i, ts := range ts.Constituents() { - if ts == currTable { - if i == 0 { - addColName(col) - } else { - continue outer - } + From: https://dev.mysql.com/doc/refman/8.0/en/join.html + */ + +outer: + // in this first loop we just find columns used in any JOIN USING used on this table + for _, col := range tbl.getColumns() { + ts, found := usingCols[col.Name] + if found { + for i, ts := range ts.Constituents() { + if ts == currTable { + if i == 0 { + e.addColumn(col, tbl, tblName) + } else { + continue outer } } } } + } - // and this time around we are printing any columns not involved in any JOIN USING - for _, col := range tbl.getColumns() { - if ts, found := usingCols[col.Name]; found && currTable.IsSolvedBy(ts) { - continue - } - - addColName(col) + // and this time around we are printing any columns not involved in any JOIN USING + for _, col := range tbl.getColumns() { + if ts, found := usingCols[col.Name]; found && currTable.IsSolvedBy(ts) { + continue } + + e.addColumn(col, tbl, tblName) } + return nil +} - if unknownTbl { - // This will only happen for case when starExpr has qualifier. - return false, nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadDb, "Unknown table '%s'", sqlparser.String(starExpr.TableName)) +type expanderState struct { + needsQualifier bool + colNames sqlparser.SelectExprs + joinUsing map[TableSet]map[string]TableSet + org originable + expandedColumns map[sqlparser.TableName][]*sqlparser.ColName +} + +// addColumn adds columns to the expander state. If we have vschema info about the query, +// we also store which columns were expanded +func (e *expanderState) addColumn(col ColumnInfo, tbl TableInfo, tblName sqlparser.TableName) { + tableAliased := !tbl.GetExpr().As.IsEmpty() + withQualifier := e.needsQualifier || tableAliased + var colName *sqlparser.ColName + var alias sqlparser.IdentifierCI + if withQualifier { + colName = sqlparser.NewColNameWithQualifier(col.Name, tblName) + } else { + colName = sqlparser.NewColName(col.Name) } - if starExpanded { - r.expandedColumns = expandedColumns + if e.needsQualifier { + alias = sqlparser.NewIdentifierCI(col.Name) + } + e.colNames = append(e.colNames, &sqlparser.AliasedExpr{Expr: colName, As: alias}) + e.storeExpandInfo(tbl, tblName, colName) +} + +func (e *expanderState) storeExpandInfo(tbl TableInfo, tblName sqlparser.TableName, colName *sqlparser.ColName) { + vt := tbl.GetVindexTable() + if vt == nil { + return + } + keyspace := vt.Keyspace + var ks sqlparser.IdentifierCS + if keyspace != nil { + ks = sqlparser.NewIdentifierCS(keyspace.Name) + } + tblName = sqlparser.TableName{ + Name: tblName.Name, + Qualifier: ks, } - return starExpanded, colNames, nil + e.expandedColumns[tblName] = append(e.expandedColumns[tblName], colName) } diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go index 1edd45a9c4d..2846bfd9366 100644 --- a/go/vt/vtgate/semantics/early_rewriter_test.go +++ b/go/vt/vtgate/semantics/early_rewriter_test.go @@ -17,6 +17,8 @@ limitations under the License. package semantics import ( + "sort" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -98,10 +100,10 @@ func TestExpandStar(t *testing.T) { } cDB := "db" tcases := []struct { - sql string - expSQL string - expErr string - colExpandedNumber int + sql string + expSQL string + expErr string + expanded string }{{ sql: "select * from t1", expSQL: "select a, b, c from t1", @@ -109,22 +111,21 @@ func TestExpandStar(t *testing.T) { sql: "select t1.* from t1", expSQL: "select a, b, c from t1", }, { - sql: "select *, 42, t1.* from t1", - expSQL: "select a, b, c, 42, a, b, c from t1", - colExpandedNumber: 6, + sql: "select *, 42, t1.* from t1", + expSQL: "select a, b, c, 42, a, b, c from t1", }, { sql: "select 42, t1.* from t1", expSQL: "select 42, a, b, c from t1", }, { - sql: "select * from t1, t2", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1, t2", + sql: "select * from t1, t2", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1, t2", + expanded: "main.t1.a, main.t1.b, main.t1.c, main.t2.c1, main.t2.c2", }, { sql: "select t1.* from t1, t2", expSQL: "select t1.a as a, t1.b as b, t1.c as c from t1, t2", }, { - sql: "select *, t1.* from t1, t2", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2, t1.a as a, t1.b as b, t1.c as c from t1, t2", - colExpandedNumber: 6, + sql: "select *, t1.* from t1, t2", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2, t1.a as a, t1.b as b, t1.c as c from t1, t2", }, { // aliased table sql: "select * from t1 a, t2 b", expSQL: "select a.a as a, a.b as b, a.c as c, b.c1 as c1, b.c2 as c2 from t1 as a, t2 as b", @@ -144,23 +145,31 @@ func TestExpandStar(t *testing.T) { sql: "select * from t1 join t2 on t1.a = t2.c1", expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 join t2 on t1.a = t2.c1", }, { - sql: "select * from t2 join t4 using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4 from t2 join t4 where t2.c1 = t4.c1", + sql: "select * from t1 left join t2 on t1.a = t2.c1", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 left join t2 on t1.a = t2.c1", + }, { + sql: "select * from t1 right join t2 on t1.a = t2.c1", + expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 right join t2 on t1.a = t2.c1", + }, { + sql: "select * from t2 join t4 using (c1)", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4 from t2 join t4 on t2.c1 = t4.c1", + expanded: "main.t2.c1, main.t2.c2, main.t4.c4", }, { sql: "select * from t2 join t4 using (c1) join t2 as X using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, X.c2 as c2 from t2 join t4 join t2 as X where t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, X.c2 as c2 from t2 join t4 on t2.c1 = t4.c1 join t2 as X on t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", }, { sql: "select * from t2 join t4 using (c1), t2 as t2b join t4 as t4b using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, t2b.c1 as c1, t2b.c2 as c2, t4b.c4 as c4 from t2 join t4, t2 as t2b join t4 as t4b where t2b.c1 = t4b.c1 and t2.c1 = t4.c1", + expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, t2b.c1 as c1, t2b.c2 as c2, t4b.c4 as c4 from t2 join t4 on t2.c1 = t4.c1, t2 as t2b join t4 as t4b on t2b.c1 = t4b.c1", }, { - sql: "select * from t1 join t5 using (b)", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b", + sql: "select * from t1 join t5 using (b)", + expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b", + expanded: "main.t1.a, main.t1.b, main.t1.c, main.t5.a", }, { sql: "select * from t1 join t5 using (b) having b = 12", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 where t1.b = t5.b having b = 12", + expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b having b = 12", }, { sql: "select 1 from t1 join t5 using (b) having b = 12", - expSQL: "select 1 from t1 join t5 where t1.b = t5.b having t1.b = 12", + expSQL: "select 1 from t1 join t5 on t1.b = t5.b having t1.b = 12", }, { sql: "select * from (select 12) as t", expSQL: "select t.`12` from (select 12 from dual) as t", @@ -180,34 +189,11 @@ func TestExpandStar(t *testing.T) { require.True(t, isSelectStatement, "analyzer expects a select statement") st, err := Analyze(selectStatement, cDB, schemaInfo) if tcase.expErr == "" { - found := 0 - outer: - for _, selExpr := range selectStatement.SelectExprs { - aliasedExpr, isAliased := selExpr.(*sqlparser.AliasedExpr) - if !isAliased { - continue - } - for _, tbl := range st.ExpandedColumns { - for _, col := range tbl { - if sqlparser.Equals.Expr(aliasedExpr.Expr, col) { - found++ - continue outer - } - } - } - } - if tcase.colExpandedNumber == 0 { - for _, tbl := range st.ExpandedColumns { - found -= len(tbl) - } - require.Zero(t, found) - } else { - require.Equal(t, tcase.colExpandedNumber, found) - } require.NoError(t, err) require.NoError(t, st.NotUnshardedErr) require.NoError(t, st.NotSingleRouteErr) assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + assertExpandedColumns(t, st, tcase.expanded) } else { require.EqualError(t, err, tcase.expErr) } @@ -215,6 +201,22 @@ func TestExpandStar(t *testing.T) { } } +func assertExpandedColumns(t *testing.T, st *SemTable, expandedColumns string) { + t.Helper() + if expandedColumns == "" { + return + } + var expanded []string + for tbl, cols := range st.ExpandedColumns { + for _, col := range cols { + col.Qualifier = tbl + expanded = append(expanded, sqlparser.String(col)) + } + } + sort.Strings(expanded) + assert.Equal(t, expandedColumns, strings.Join(expanded, ", ")) +} + func TestRewriteJoinUsingColumns(t *testing.T) { schemaInfo := &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -269,13 +271,16 @@ func TestRewriteJoinUsingColumns(t *testing.T) { expErr string }{{ sql: "select 1 from t1 join t2 using (a) where a = 42", - expSQL: "select 1 from t1 join t2 where t1.a = t2.a and t1.a = 42", + expSQL: "select 1 from t1 join t2 on t1.a = t2.a where t1.a = 42", }, { sql: "select 1 from t1 join t2 using (a), t3 where a = 42", expErr: "Column 'a' in field list is ambiguous", }, { sql: "select 1 from t1 join t2 using (a), t1 as b join t3 on (a) where a = 42", expErr: "Column 'a' in field list is ambiguous", + }, { + sql: "select 1 from t1 left join t2 using (a) where a = 42", + expSQL: "select 1 from t1 left join t2 on t1.a = t2.a where t1.a = 42", }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { diff --git a/go/vt/vtgate/semantics/errors.go b/go/vt/vtgate/semantics/errors.go index 3088fe19430..520dda98c42 100644 --- a/go/vt/vtgate/semantics/errors.go +++ b/go/vt/vtgate/semantics/errors.go @@ -25,179 +25,261 @@ import ( ) type ( - ErrorCode int - - Error struct { - Code ErrorCode - args []any + unsupportedError interface { + error + unsupported() } - ErrType int - - info struct { - format string - state vterrors.State - code vtrpcpb.Code - id string - typ ErrType + + bugError interface { + error + bug() } ) -const ( - Other ErrType = iota - Unsupported - Bug -) +func eprintf(e error, format string, args ...any) string { + switch e.(type) { + case unsupportedError: + format = "VT12001: unsupported: " + format + case bugError: + format = "VT13001: [BUG] " + format + } + return fmt.Sprintf(format, args...) +} -const ( - UnionColumnsDoNotMatch ErrorCode = iota - UnsupportedMultiTablesInUpdate - UnsupportedNaturalJoin - TableNotUpdatable - UnionWithSQLCalcFoundRows - SQLCalcFoundRowsUsage - CantUseOptionHere - MissingInVSchema - NotSequenceTable - NextWithMultipleTables - LockOnlyWithDual - QualifiedOrderInUnion - JSONTables - Buggy - ColumnNotFound - AmbiguousColumn -) +// Specific error implementations follow -func NewError(code ErrorCode, args ...any) *Error { - return &Error{ - Code: code, - args: args, - } +// UnionColumnsDoNotMatchError +type UnionColumnsDoNotMatchError struct { + FirstProj int + SecondProj int } -var errors = map[ErrorCode]info{ - UnionColumnsDoNotMatch: { - format: "The used SELECT statements have a different number of columns", - state: vterrors.WrongNumberOfColumnsInSelect, - code: vtrpcpb.Code_FAILED_PRECONDITION, - }, - UnsupportedMultiTablesInUpdate: { - format: "multiple tables in update", - typ: Unsupported, - }, - TableNotUpdatable: { - format: "The target table %s of the UPDATE is not updatable", - state: vterrors.NonUpdateableTable, - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - UnsupportedNaturalJoin: { - format: "%s", - typ: Unsupported, - }, - UnionWithSQLCalcFoundRows: { - format: "SQL_CALC_FOUND_ROWS not supported with union", - typ: Unsupported, - }, - SQLCalcFoundRowsUsage: { - format: "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'", - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - CantUseOptionHere: { - format: "Incorrect usage/placement of '%s'", - state: vterrors.CantUseOptionHere, - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - MissingInVSchema: { - format: "Table information is not provided in vschema", - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - NotSequenceTable: { - format: "NEXT used on a non-sequence table", - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - NextWithMultipleTables: { - format: "Next statement should not contain multiple tables", - typ: Bug, - }, - LockOnlyWithDual: { - format: "%v allowed only with dual", - code: vtrpcpb.Code_UNIMPLEMENTED, - }, - QualifiedOrderInUnion: { - format: "Table %s from one of the SELECTs cannot be used in global ORDER clause", - }, - JSONTables: { - format: "json_table expressions", - typ: Unsupported, - }, - Buggy: { - format: "%s", - typ: Bug, - }, - ColumnNotFound: { - format: "symbol %s not found", - state: vterrors.BadFieldError, - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, - AmbiguousColumn: { - format: "Column '%s' in field list is ambiguous", - state: vterrors.BadFieldError, - code: vtrpcpb.Code_INVALID_ARGUMENT, - }, -} - -func (n *Error) Error() string { - f, ok := errors[n.Code] - if !ok { - return "unknown error" - } +func (e *UnionColumnsDoNotMatchError) ErrorState() vterrors.State { + return vterrors.WrongNumberOfColumnsInSelect +} - format := f.format +func (e *UnionColumnsDoNotMatchError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_FAILED_PRECONDITION +} - if f.id != "" { - format = fmt.Sprintf("%s: %s", f.id, format) - } +func (e *UnionColumnsDoNotMatchError) Error() string { + return eprintf(e, "The used SELECT statements have a different number of columns: %v, %v", e.FirstProj, e.SecondProj) +} - switch f.typ { - case Unsupported: - format = "VT12001: unsupported: " + format - case Bug: - format = "VT13001: [BUG] " + format - } +// UnsupportedMultiTablesInUpdateError +type UnsupportedMultiTablesInUpdateError struct { + ExprCount int + NotAlias bool +} - var args []any - for _, arg := range n.args { - ast, isAST := arg.(sqlparser.SQLNode) - if isAST { - args = append(args, sqlparser.String(ast)) - } else { - args = append(args, arg) - } +func (e *UnsupportedMultiTablesInUpdateError) Error() string { + switch { + case e.NotAlias: + return eprintf(e, "unaliased multiple tables in update") + default: + return eprintf(e, "multiple (%d) tables in update", e.ExprCount) } +} - return fmt.Sprintf(format, args...) +func (e *UnsupportedMultiTablesInUpdateError) unsupported() {} + +// UnsupportedNaturalJoinError +type UnsupportedNaturalJoinError struct { + JoinExpr *sqlparser.JoinTableExpr } -func (n *Error) ErrorState() vterrors.State { - f, ok := errors[n.Code] - if !ok { - return vterrors.Undefined - } +func (e *UnsupportedNaturalJoinError) Error() string { + return eprintf(e, "%s", e.JoinExpr.Join.ToString()) +} - return f.state +func (e *UnsupportedNaturalJoinError) unsupported() {} + +// UnionWithSQLCalcFoundRowsError +type UnionWithSQLCalcFoundRowsError struct { } -func (n *Error) ErrorCode() vtrpcpb.Code { - f, ok := errors[n.Code] - if !ok { - return vtrpcpb.Code_UNKNOWN - } +func (e *UnionWithSQLCalcFoundRowsError) Error() string { + return eprintf(e, "SQL_CALC_FOUND_ROWS not supported with union") +} - switch f.typ { - case Unsupported: - return vtrpcpb.Code_UNIMPLEMENTED - case Bug: - return vtrpcpb.Code_INTERNAL - default: - return f.code +func (e *UnionWithSQLCalcFoundRowsError) unsupported() {} + +// TableNotUpdatableError +type TableNotUpdatableError struct { + Table string +} + +func (e *TableNotUpdatableError) Error() string { + return eprintf(e, "The target table %s of the UPDATE is not updatable", e.Table) +} + +func (e *TableNotUpdatableError) ErrorState() vterrors.State { + return vterrors.NonUpdateableTable +} + +func (e *TableNotUpdatableError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +// SQLCalcFoundRowsUsageError +type SQLCalcFoundRowsUsageError struct { +} + +func (e *SQLCalcFoundRowsUsageError) Error() string { + return eprintf(e, "Incorrect usage/placement of 'SQL_CALC_FOUND_ROWS'") +} + +func (e *SQLCalcFoundRowsUsageError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +// CantUseOptionHereError +type CantUseOptionHereError struct { + Msg string +} + +func (e *CantUseOptionHereError) Error() string { + return eprintf(e, "Incorrect usage/placement of '%s'", e.Msg) +} + +func (e *CantUseOptionHereError) ErrorState() vterrors.State { + return vterrors.CantUseOptionHere +} + +func (e *CantUseOptionHereError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +// MissingInVSchemaError +type MissingInVSchemaError struct { + Table TableInfo +} + +func (e *MissingInVSchemaError) Error() string { + tableName, _ := e.Table.Name() + return eprintf(e, "Table information is not provided in vschema for table `%s`", sqlparser.String(tableName)) +} + +func (e *MissingInVSchemaError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +// NotSequenceTableError +type NotSequenceTableError struct { + Table string +} + +func (e *NotSequenceTableError) Error() string { + return eprintf(e, "NEXT used on a non-sequence table `%s`", e.Table) +} + +func (e *NotSequenceTableError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +// NextWithMultipleTablesError +type NextWithMultipleTablesError struct { + CountTables int +} + +func (e *NextWithMultipleTablesError) Error() string { + return eprintf(e, "Next statement should not contain multiple tables: found %d tables", e.CountTables) +} + +func (e *NextWithMultipleTablesError) bug() {} + +// LockOnlyWithDualError +type LockOnlyWithDualError struct { + Node *sqlparser.LockingFunc +} + +func (e *LockOnlyWithDualError) Error() string { + return eprintf(e, "%v allowed only with dual", sqlparser.String(e.Node)) +} + +func (e *LockOnlyWithDualError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_UNIMPLEMENTED +} + +// QualifiedOrderInUnionError +type QualifiedOrderInUnionError struct { + Table string +} + +func (e *QualifiedOrderInUnionError) Error() string { + return eprintf(e, "Table `%s` from one of the SELECTs cannot be used in global ORDER clause", e.Table) +} + +// JSONTablesError +type JSONTablesError struct { + Table string +} + +func (e *JSONTablesError) Error() string { + return eprintf(e, "json_table expressions") +} + +func (e *JSONTablesError) unsupported() {} + +// BuggyError is used for checking conditions that should never occur +type BuggyError struct { + Msg string +} + +func (e *BuggyError) Error() string { + return eprintf(e, e.Msg) +} + +func (e *BuggyError) bug() {} + +// ColumnNotFoundError +type ColumnNotFoundError struct { + Column *sqlparser.ColName + Table *sqlparser.TableName +} + +func (e *ColumnNotFoundError) Error() string { + if e.Table == nil { + return eprintf(e, "column '%s' not found", sqlparser.String(e.Column)) } + return eprintf(e, "column '%s' not found in table '%s'", sqlparser.String(e.Column), sqlparser.String(e.Table)) +} + +func (e *ColumnNotFoundError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +func (e *ColumnNotFoundError) ErrorState() vterrors.State { + return vterrors.BadFieldError +} + +// AmbiguousColumnError +type AmbiguousColumnError struct { + Column string +} + +func (e *AmbiguousColumnError) Error() string { + return eprintf(e, "Column '%s' in field list is ambiguous", e.Column) +} + +func (e *AmbiguousColumnError) ErrorState() vterrors.State { + return vterrors.BadFieldError +} + +func (e *AmbiguousColumnError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +type UnsupportedConstruct struct { + errString string +} + +func (e *UnsupportedConstruct) unsupported() {} + +func (e *UnsupportedConstruct) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_UNIMPLEMENTED +} + +func (e *UnsupportedConstruct) Error() string { + return eprintf(e, e.errString) } diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go index b27e197c16f..af050d5ff1b 100644 --- a/go/vt/vtgate/semantics/info_schema.go +++ b/go/vt/vtgate/semantics/info_schema.go @@ -1671,16 +1671,22 @@ type infoSchemaWithColumns struct { infoSchemaData map[string][]vindexes.Column } +// We cache this information, since these are maps that are not changed +var infoSchema57 = getInfoSchema57() +var infoSchema80 = getInfoSchema80() + // newSchemaInfo returns a SchemaInformation that has the column information for all info_schema tables func newSchemaInfo(inner SchemaInformation) SchemaInformation { + return &infoSchemaWithColumns{inner: inner, infoSchemaData: loadSchemaInfo()} +} + +func loadSchemaInfo() map[string][]vindexes.Column { version := servenv.MySQLServerVersion() - var infoSchema map[string][]vindexes.Column if strings.HasPrefix(version, "5.7") { - infoSchema = getInfoSchema57() - } else { - infoSchema = getInfoSchema80() + return infoSchema57 } - return &infoSchemaWithColumns{inner: inner, infoSchemaData: infoSchema} + + return infoSchema80 } // FindTableOrVindex implements the SchemaInformation interface diff --git a/go/vt/vtgate/semantics/info_schema_gen_test.go b/go/vt/vtgate/semantics/info_schema_gen_test.go index c5fe0123852..61241d96653 100644 --- a/go/vt/vtgate/semantics/info_schema_gen_test.go +++ b/go/vt/vtgate/semantics/info_schema_gen_test.go @@ -70,69 +70,6 @@ func TestGenerateInfoSchemaMap(t *testing.T) { } var ( - informationSchemaTables57 = []string{ - "CHARACTER_SETS", - "COLLATION_CHARACTER_SET_APPLICABILITY", - "COLLATIONS", - "COLUMN_PRIVILEGES", - "COLUMNS", - "ENGINES", - "EVENTS", - "FILES", - "GLOBAL_STATUS", - "GLOBAL_VARIABLES", - "INNODB_BUFFER_PAGE", - "INNODB_BUFFER_PAGE_LRU", - "INNODB_BUFFER_POOL_STATS", - "INNODB_CMP", - "INNODB_CMP_PER_INDEX", - "INNODB_CMP_PER_INDEX_RESET", - "INNODB_CMP_RESET", - "INNODB_CMPMEM", - "INNODB_CMPMEM_RESET", - "INNODB_FT_BEING_DELETED", - "INNODB_FT_CONFIG", - "INNODB_FT_DEFAULT_STOPWORD", - "INNODB_FT_DELETED", - "INNODB_FT_INDEX_CACHE", - "INNODB_FT_INDEX_TABLE", - "INNODB_LOCK_WAITS", - "INNODB_LOCKS", - "INNODB_METRICS", - "INNODB_SYS_COLUMNS", - "INNODB_SYS_DATAFILES", - "INNODB_SYS_FIELDS", - "INNODB_SYS_FOREIGN", - "INNODB_SYS_FOREIGN_COLS", - "INNODB_SYS_INDEXES", - "INNODB_SYS_TABLES", - "INNODB_SYS_TABLESPACES", - "INNODB_SYS_TABLESTATS", - "INNODB_SYS_VIRTUAL", - "INNODB_TEMP_TABLE_INFO", - "INNODB_TRX", - "KEY_COLUMN_USAGE", - "OPTIMIZER_TRACE", - "PARAMETERS", - "PARTITIONS", - "PLUGINS", - "PROCESSLIST", - "PROFILING", - "REFERENTIAL_CONSTRAINTS", - "ROUTINES", - "SCHEMA_PRIVILEGES", - "SCHEMATA", - "SESSION_STATUS", - "SESSION_VARIABLES", - "STATISTICS", - "TABLE_CONSTRAINTS", - "TABLE_PRIVILEGES", - "TABLES", - "TABLESPACES", - "TRIGGERS", - "USER_PRIVILEGES", - "VIEWS", - } informationSchemaTables80 = []string{ "ADMINISTRABLE_ROLE_AUTHORIZATIONS", "APPLICABLE_ROLES", diff --git a/go/vt/vtgate/semantics/real_table.go b/go/vt/vtgate/semantics/real_table.go index 5914b9324d9..bd57ab81474 100644 --- a/go/vt/vtgate/semantics/real_table.go +++ b/go/vt/vtgate/semantics/real_table.go @@ -73,7 +73,7 @@ func (r *RealTable) getColumns() []ColumnInfo { } // GetExpr implements the TableInfo interface -func (r *RealTable) getExpr() *sqlparser.AliasedTableExpr { +func (r *RealTable) GetExpr() *sqlparser.AliasedTableExpr { return r.ASTNode } @@ -104,10 +104,14 @@ func vindexTableToColumnInfo(tbl *vindexes.Table) []ColumnInfo { nameMap := map[string]any{} cols := make([]ColumnInfo, 0, len(tbl.Columns)) for _, col := range tbl.Columns { - var collation collations.ID + collation := collations.DefaultCollationForType(col.Type) if sqltypes.IsText(col.Type) { - collation, _ = collations.Local().LookupID(col.CollationName) + coll, found := collations.Local().LookupID(col.CollationName) + if found { + collation = coll + } } + cols = append(cols, ColumnInfo{ Name: col.Name.String(), Type: Type{ diff --git a/go/vt/vtgate/semantics/scoper.go b/go/vt/vtgate/semantics/scoper.go index adae1319e37..4df6fb06685 100644 --- a/go/vt/vtgate/semantics/scoper.go +++ b/go/vt/vtgate/semantics/scoper.go @@ -21,7 +21,6 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/sqlparser" ) @@ -46,6 +45,7 @@ type ( tables []TableInfo isUnion bool joinUsing map[string]TableSet + stmtScope bool } ) @@ -60,80 +60,104 @@ func newScoper() *scoper { func (s *scoper) down(cursor *sqlparser.Cursor) error { node := cursor.Node() switch node := node.(type) { - case *sqlparser.Update, *sqlparser.Delete: - currScope := newScope(s.currentScope()) - s.push(currScope) - - currScope.stmt = node.(sqlparser.Statement) + case *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert: + s.pushDMLScope(node) case *sqlparser.Select: - currScope := newScope(s.currentScope()) - s.push(currScope) - - // Needed for order by with Literal to find the Expression. - currScope.stmt = node - - s.rScope[node] = currScope - s.wScope[node] = newScope(nil) + s.pushSelectScope(node) case sqlparser.TableExpr: - if isParentSelect(cursor) { - // when checking the expressions used in JOIN conditions, special rules apply where the ON expression - // can only see the two tables involved in the JOIN, and no other tables. - // To create this special context, we create a special scope here that is then merged with - // the surrounding scope when we come back out from the JOIN - nScope := newScope(nil) - nScope.stmt = cursor.Parent().(*sqlparser.Select) - s.push(nScope) - } + s.enterJoinScope(cursor) case sqlparser.SelectExprs: - sel, parentIsSelect := cursor.Parent().(*sqlparser.Select) - if !parentIsSelect { + s.copySelectExprs(cursor, node) + case sqlparser.OrderBy: + return s.addColumnInfoForOrderBy(cursor, node) + case sqlparser.GroupBy: + return s.addColumnInfoForGroupBy(cursor, node) + case *sqlparser.Where: + if node.Type != sqlparser.HavingClause { break } + return s.createSpecialScopePostProjection(cursor.Parent()) + } + return nil +} - // adding a vTableInfo for each SELECT, so it can be used by GROUP BY, HAVING, ORDER BY - // the vTableInfo we are creating here should not be confused with derived tables' vTableInfo - wScope, exists := s.wScope[sel] - if !exists { - break - } - wScope.tables = []TableInfo{createVTableInfoForExpressions(node, s.currentScope().tables, s.org)} - case sqlparser.OrderBy: - if isParentSelectStatement(cursor) { - err := s.createSpecialScopePostProjection(cursor.Parent()) - if err != nil { - return err - } - for _, order := range node { - lit := keepIntLiteral(order.Expr) - if lit != nil { - s.specialExprScopes[lit] = s.currentScope() - } - } +func (s *scoper) addColumnInfoForGroupBy(cursor *sqlparser.Cursor, node sqlparser.GroupBy) error { + err := s.createSpecialScopePostProjection(cursor.Parent()) + if err != nil { + return err + } + for _, expr := range node { + lit := keepIntLiteral(expr) + if lit != nil { + s.specialExprScopes[lit] = s.currentScope() } - case sqlparser.GroupBy: + } + return nil +} + +func (s *scoper) addColumnInfoForOrderBy(cursor *sqlparser.Cursor, node sqlparser.OrderBy) error { + if isParentSelectStatement(cursor) { err := s.createSpecialScopePostProjection(cursor.Parent()) if err != nil { return err } - for _, expr := range node { - lit := keepIntLiteral(expr) + for _, order := range node { + lit := keepIntLiteral(order.Expr) if lit != nil { s.specialExprScopes[lit] = s.currentScope() } } - case *sqlparser.Where: - if node.Type != sqlparser.HavingClause { - break - } - return s.createSpecialScopePostProjection(cursor.Parent()) - case *sqlparser.DerivedTable: - if node.Lateral { - return vterrors.VT12001("lateral derived tables") - } } return nil } +func (s *scoper) copySelectExprs(cursor *sqlparser.Cursor, node sqlparser.SelectExprs) { + sel, parentIsSelect := cursor.Parent().(*sqlparser.Select) + if !parentIsSelect { + return + } + + // adding a vTableInfo for each SELECT, so it can be used by GROUP BY, HAVING, ORDER BY + // the vTableInfo we are creating here should not be confused with derived tables' vTableInfo + wScope, exists := s.wScope[sel] + if !exists { + return + } + wScope.tables = []TableInfo{createVTableInfoForExpressions(node, s.currentScope().tables, s.org)} +} + +func (s *scoper) enterJoinScope(cursor *sqlparser.Cursor) { + if isParentSelect(cursor) { + // when checking the expressions used in JOIN conditions, special rules apply where the ON expression + // can only see the two tables involved in the JOIN, and no other tables of that select statement. + // They are allowed to see the tables of the outer select query. + // To create this special context, we will find the parent scope of the select statement involved. + nScope := newScope(s.currentScope().findParentScopeOfStatement()) + nScope.stmt = cursor.Parent().(*sqlparser.Select) + s.push(nScope) + } +} + +func (s *scoper) pushSelectScope(node *sqlparser.Select) { + currScope := newScope(s.currentScope()) + currScope.stmtScope = true + s.push(currScope) + + // Needed for order by with Literal to find the Expression. + currScope.stmt = node + + s.rScope[node] = currScope + s.wScope[node] = newScope(nil) +} + +func (s *scoper) pushDMLScope(node sqlparser.SQLNode) { + currScope := newScope(s.currentScope()) + currScope.stmtScope = true + s.push(currScope) + + currScope.stmt = node.(sqlparser.Statement) +} + func keepIntLiteral(e sqlparser.Expr) *sqlparser.Literal { coll, ok := e.(*sqlparser.CollateExpr) if ok { @@ -156,7 +180,7 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { if isParentSelectStatement(cursor) { s.popScope() } - case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update: + case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert: s.popScope() case *sqlparser.Where: if node.Type != sqlparser.HavingClause { @@ -212,7 +236,7 @@ func (s *scoper) createSpecialScopePostProjection(parent sqlparser.SQLNode) erro } thisTableInfo := createVTableInfoForExpressions(sel.SelectExprs, nil /*needed for star expressions*/, s.org) if len(tableInfo.cols) != len(thisTableInfo.cols) { - return engine.ErrWrongNumberOfColumnsInSelect + return vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.WrongNumberOfColumnsInSelect, "The used SELECT statements have a different number of columns") } for i, col := range tableInfo.cols { // at this stage, we don't store the actual dependencies, we only store the expressions. @@ -289,3 +313,14 @@ func (s *scope) prepareUsingMap() (result map[TableSet]map[string]TableSet) { } return } + +// findParentScopeOfStatement finds the scope that belongs to a statement. +func (s *scope) findParentScopeOfStatement() *scope { + if s.stmtScope { + return s.parent + } + if s.parent == nil { + return nil + } + return s.parent.findParentScopeOfStatement() +} diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go index 42b5f4d39c4..6f3a4962961 100644 --- a/go/vt/vtgate/semantics/semantic_state.go +++ b/go/vt/vtgate/semantics/semantic_state.go @@ -17,17 +17,16 @@ limitations under the License. package semantics import ( + "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" - - "vitess.io/vitess/go/vt/sqlparser" ) type ( @@ -48,8 +47,8 @@ type ( // authoritative is true if we have exhaustive column information authoritative() bool - // getExpr returns the AST struct behind this table - getExpr() *sqlparser.AliasedTableExpr + // GetExpr returns the AST struct behind this table + GetExpr() *sqlparser.AliasedTableExpr // getColumns returns the known column information for this table getColumns() []ColumnInfo @@ -70,41 +69,52 @@ type ( // SemTable contains semantic analysis information about the query. SemTable struct { + // Tables stores information about the tables in the query, including derived tables Tables []TableInfo + // Comments stores any comments of the /* vt+ */ type in the query + Comments *sqlparser.ParsedComments + // Warning stores any warnings generated during semantic analysis. + Warning string + // Collation represents the default collation for the query, usually inherited + // from the connection's default collation. + Collation collations.ID + // ExprTypes maps expressions to their respective types in the query. + ExprTypes map[sqlparser.Expr]Type - // NotSingleRouteErr stores any errors that have to be generated if the query cannot be planned as a single route. + // NotSingleRouteErr stores errors related to missing schema information. + // This typically occurs when a column's existence is uncertain. + // Instead of failing early, the query is allowed to proceed, possibly + // succeeding once it reaches MySQL. NotSingleRouteErr error - // NotUnshardedErr stores any errors that have to be generated if the query is not unsharded. + + // NotUnshardedErr stores errors that occur if the query isn't planned as a single route + // targeting an unsharded keyspace. This typically arises when information is missing, but + // for unsharded tables, the code operates in a passthrough mode, relying on the underlying + // MySQL engine to handle errors appropriately. NotUnshardedErr error - // Recursive contains the dependencies from the expression to the actual tables - // in the query (i.e. not including derived tables). If an expression is a column on a derived table, - // this map will contain the accumulated dependencies for the column expression inside the derived table + // Recursive contains dependencies from the expression to the actual tables + // in the query (excluding derived tables). For columns in derived tables, + // this map holds the accumulated dependencies for the column expression. Recursive ExprDependencies - - // Direct keeps information about the closest dependency for an expression. - // It does not recurse inside derived tables and the like to find the original dependencies + // Direct stores information about the closest dependency for an expression. + // It doesn't recurse inside derived tables to find the original dependencies. Direct ExprDependencies - ExprTypes map[sqlparser.Expr]Type - selectScope map[*sqlparser.Select]*scope - Comments *sqlparser.ParsedComments + // SubqueryMap holds extracted subqueries for each statement. SubqueryMap map[sqlparser.Statement][]*sqlparser.ExtractedSubquery + // SubqueryRef maps subquery pointers to their extracted subquery. SubqueryRef map[*sqlparser.Subquery]*sqlparser.ExtractedSubquery - // ColumnEqualities is used to enable transitive closures - // if a == b and b == c then a == c + // ColumnEqualities is used for transitive closures (e.g., if a == b and b == c, then a == c). ColumnEqualities map[columnName][]sqlparser.Expr - // DefaultCollation is the default collation for this query, which is usually - // inherited from the connection's default collation. - Collation collations.ID - - Warning string - // ExpandedColumns is a map of all the added columns for a given table. + // The columns were added because of the use of `*` in the query ExpandedColumns map[sqlparser.TableName][]*sqlparser.ColName + columns map[*sqlparser.Union]sqlparser.SelectExprs + comparator *sqlparser.Comparator } @@ -131,19 +141,72 @@ func (st *SemTable) CopyDependencies(from, to sqlparser.Expr) { st.Direct[to] = st.DirectDeps(from) } +func (st *SemTable) SelectExprs(sel sqlparser.SelectStatement) sqlparser.SelectExprs { + switch sel := sel.(type) { + case *sqlparser.Select: + return sel.SelectExprs + case *sqlparser.Union: + exprs, found := st.columns[sel] + if found { + return exprs + } + panic("BUG: union not found in semantic table for select expressions") + } + panic(fmt.Sprintf("BUG: unexpected select statement type %T", sel)) +} + +func getColumnNames(exprs sqlparser.SelectExprs) (expanded bool, selectExprs sqlparser.SelectExprs) { + expanded = true + for _, col := range exprs { + switch col := col.(type) { + case *sqlparser.AliasedExpr: + expr := sqlparser.NewColName(col.ColumnName()) + selectExprs = append(selectExprs, &sqlparser.AliasedExpr{Expr: expr}) + default: + selectExprs = append(selectExprs, col) + expanded = false + } + } + return +} + +// CopyDependenciesOnSQLNodes copies the dependencies from one expression into the other +func (st *SemTable) CopyDependenciesOnSQLNodes(from, to sqlparser.SQLNode) { + f, ok := from.(sqlparser.Expr) + if !ok { + return + } + t, ok := to.(sqlparser.Expr) + if !ok { + return + } + st.CopyDependencies(f, t) +} + +// Cloned copies the dependencies from one expression into the other +func (st *SemTable) Cloned(from, to sqlparser.SQLNode) { + f, fromOK := from.(sqlparser.Expr) + t, toOK := to.(sqlparser.Expr) + if !(fromOK && toOK) { + return + } + st.CopyDependencies(f, t) +} + // EmptySemTable creates a new empty SemTable func EmptySemTable() *SemTable { return &SemTable{ Recursive: map[sqlparser.Expr]TableSet{}, Direct: map[sqlparser.Expr]TableSet{}, ColumnEqualities: map[columnName][]sqlparser.Expr{}, + columns: map[*sqlparser.Union]sqlparser.SelectExprs{}, } } // TableSetFor returns the bitmask for this particular table func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for idx, t2 := range st.Tables { - if t == t2.getExpr() { + if t == t2.GetExpr() { return SingleTableSet(idx) } } @@ -152,6 +215,9 @@ func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet { // ReplaceTableSetFor replaces the given single TabletSet with the new *sqlparser.AliasedTableExpr func (st *SemTable) ReplaceTableSetFor(id TableSet, t *sqlparser.AliasedTableExpr) { + if st == nil { + return + } if id.NumberOfTables() != 1 { // This is probably a derived table return @@ -218,12 +284,6 @@ func (st *SemTable) TableInfoForExpr(expr sqlparser.Expr) (TableInfo, error) { return st.TableInfoFor(st.Direct.dependencies(expr)) } -// GetSelectTables returns the table in the select. -func (st *SemTable) GetSelectTables(node *sqlparser.Select) []TableInfo { - scope := st.selectScope[node] - return scope.tables -} - // AddExprs adds new select exprs to the SemTable. func (st *SemTable) AddExprs(tbl *sqlparser.AliasedTableExpr, cols sqlparser.SelectExprs) { tableSet := st.TableSetFor(tbl) @@ -232,31 +292,35 @@ func (st *SemTable) AddExprs(tbl *sqlparser.AliasedTableExpr, cols sqlparser.Sel } } -// TypeFor returns the type of expressions in the query -func (st *SemTable) TypeFor(e sqlparser.Expr) *querypb.Type { - typ, found := st.ExprTypes[e] - if found { - return &typ.Type +// TypeForExpr returns the type of expressions in the query +func (st *SemTable) TypeForExpr(e sqlparser.Expr) (sqltypes.Type, collations.ID, bool) { + if typ, found := st.ExprTypes[e]; found { + return typ.Type, typ.Collation, true } - return nil -} -// CollationForExpr returns the collation name of expressions in the query -func (st *SemTable) CollationForExpr(e sqlparser.Expr) collations.ID { - typ, found := st.ExprTypes[e] - if found { - return typ.Collation + // We add a lot of WeightString() expressions to queries at late stages of the planning, + // which means that they don't have any type information. We can safely assume that they + // are VarBinary, since that's the only type that WeightString() can return. + _, isWS := e.(*sqlparser.WeightStringFuncExpr) + if isWS { + return sqltypes.VarBinary, collations.CollationBinaryID, true } - return collations.Unknown + + return sqltypes.Unknown, collations.Unknown, false } // NeedsWeightString returns true if the given expression needs weight_string to do safe comparisons func (st *SemTable) NeedsWeightString(e sqlparser.Expr) bool { - typ, found := st.ExprTypes[e] - if !found { - return true + switch e := e.(type) { + case *sqlparser.WeightStringFuncExpr, *sqlparser.Literal: + return false + default: + typ, found := st.ExprTypes[e] + if !found { + return true + } + return typ.Collation == collations.Unknown && !sqltypes.IsNumber(typ.Type) } - return typ.Collation == collations.Unknown && !sqltypes.IsNumber(typ.Type) } func (st *SemTable) DefaultCollation() collations.ID { @@ -341,6 +405,9 @@ func (st *SemTable) FindSubqueryReference(subquery *sqlparser.Subquery) *sqlpars // GetSubqueryNeedingRewrite returns a list of sub-queries that need to be rewritten func (st *SemTable) GetSubqueryNeedingRewrite() []*sqlparser.ExtractedSubquery { + if st == nil { + return nil + } var res []*sqlparser.ExtractedSubquery for _, extractedSubquery := range st.SubqueryRef { if extractedSubquery.Merged { @@ -359,8 +426,6 @@ func (st *SemTable) CopyExprInfo(src, dest sqlparser.Expr) { } } -var _ evalengine.TranslationLookup = (*SemTable)(nil) - var columnNotSupportedErr = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "column access not supported here") // ColumnLookup implements the TranslationLookup interface @@ -376,7 +441,7 @@ func (st *SemTable) SingleUnshardedKeyspace() (*vindexes.Keyspace, []*vindexes.T vindexTable := table.GetVindexTable() if vindexTable == nil { - _, isDT := table.getExpr().Expr.(*sqlparser.DerivedTable) + _, isDT := table.GetExpr().Expr.(*sqlparser.DerivedTable) if isDT { // derived tables are ok, as long as all real tables are from the same unsharded keyspace // we check the real tables inside the derived table as well for same unsharded keyspace. @@ -387,11 +452,12 @@ func (st *SemTable) SingleUnshardedKeyspace() (*vindexes.Keyspace, []*vindexes.T if vindexTable.Type != "" { // A reference table is not an issue when seeing if a query is going to an unsharded keyspace if vindexTable.Type == vindexes.TypeReference { + tables = append(tables, vindexTable) continue } return nil, nil } - name, ok := table.getExpr().Expr.(sqlparser.TableName) + name, ok := table.GetExpr().Expr.(sqlparser.TableName) if !ok { return nil, nil } @@ -425,6 +491,24 @@ func (st *SemTable) EqualsExpr(a, b sqlparser.Expr) bool { return st.ASTEquals().Expr(a, b) } +// EqualsExprWithDeps compares two expressions taking into account their semantic +// information. Dependency data typically pertains only to column expressions, +// this method considers them for all expression types. The method checks +// if dependency information exists for both expressions. If it does, the dependencies +// must match. If we are missing dependency information for either +func (st *SemTable) EqualsExprWithDeps(a, b sqlparser.Expr) bool { + eq := st.ASTEquals().Expr(a, b) + if !eq { + return false + } + adeps := st.RecursiveDeps(a) + bdeps := st.RecursiveDeps(b) + if adeps.IsEmpty() || bdeps.IsEmpty() || adeps == bdeps { + return true + } + return false +} + func (st *SemTable) ContainsExpr(e sqlparser.Expr, expres []sqlparser.Expr) bool { for _, expre := range expres { if st.EqualsExpr(e, expre) { diff --git a/go/vt/vtgate/semantics/table_collector.go b/go/vt/vtgate/semantics/table_collector.go index 16bebf364d7..d6fd4c6efd6 100644 --- a/go/vt/vtgate/semantics/table_collector.go +++ b/go/vt/vtgate/semantics/table_collector.go @@ -17,6 +17,7 @@ limitations under the License. package semantics import ( + querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -31,6 +32,7 @@ type tableCollector struct { si SchemaInformation currentDb string org originable + unionInfo map[*sqlparser.Union]unionInfo } func newTableCollector(scoper *scoper, si SchemaInformation, currentDb string) *tableCollector { @@ -38,44 +40,60 @@ func newTableCollector(scoper *scoper, si SchemaInformation, currentDb string) * scoper: scoper, si: si, currentDb: currentDb, + unionInfo: map[*sqlparser.Union]unionInfo{}, } } func (tc *tableCollector) up(cursor *sqlparser.Cursor) error { - node, ok := cursor.Node().(*sqlparser.AliasedTableExpr) - if !ok { - return nil + switch node := cursor.Node().(type) { + case *sqlparser.AliasedTableExpr: + return tc.visitAliasedTableExpr(node) + case *sqlparser.Union: + firstSelect := sqlparser.GetFirstSelect(node) + expanded, selectExprs := getColumnNames(firstSelect.SelectExprs) + info := unionInfo{ + isAuthoritative: expanded, + exprs: selectExprs, + } + tc.unionInfo[node] = info + if !expanded { + return nil + } + + size := len(firstSelect.SelectExprs) + info.recursive = make([]TableSet, size) + info.types = make([]*Type, size) + + _ = sqlparser.VisitAllSelects(node, func(s *sqlparser.Select, idx int) error { + for i, expr := range s.SelectExprs { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + continue + } + _, recursiveDeps, qt := tc.org.depsForExpr(ae.Expr) + info.recursive[i] = info.recursive[i].Merge(recursiveDeps) + if idx == 0 { + // TODO: we probably should coerce these types together somehow, but I'm not sure how + info.types[i] = qt + } + } + return nil + }) + tc.unionInfo[node] = info } + + return nil +} + +func (tc *tableCollector) visitAliasedTableExpr(node *sqlparser.AliasedTableExpr) error { switch t := node.Expr.(type) { case *sqlparser.DerivedTable: switch sel := t.Select.(type) { case *sqlparser.Select: - tables := tc.scoper.wScope[sel] - tableInfo := createDerivedTableForExpressions(sqlparser.GetFirstSelect(sel).SelectExprs, node.Columns, tables.tables, tc.org) - if err := tableInfo.checkForDuplicates(); err != nil { - return err - } - - tableInfo.ASTNode = node - tableInfo.tableName = node.As.String() - - tc.Tables = append(tc.Tables, tableInfo) - scope := tc.scoper.currentScope() - return scope.addTable(tableInfo) + return tc.addSelectDerivedTable(sel, node) case *sqlparser.Union: - firstSelect := sqlparser.GetFirstSelect(sel) - tables := tc.scoper.wScope[firstSelect] - tableInfo := createDerivedTableForExpressions(firstSelect.SelectExprs, node.Columns, tables.tables, tc.org) - if err := tableInfo.checkForDuplicates(); err != nil { - return err - } - tableInfo.ASTNode = node - tableInfo.tableName = node.As.String() - - tc.Tables = append(tc.Tables, tableInfo) - scope := tc.scoper.currentScope() - return scope.addTable(tableInfo) + return tc.addUnionDerivedTable(sel, node) default: return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] %T in a derived table", sel) @@ -104,14 +122,62 @@ func (tc *tableCollector) up(cursor *sqlparser.Cursor) error { return nil } +func (tc *tableCollector) addSelectDerivedTable(sel *sqlparser.Select, node *sqlparser.AliasedTableExpr) error { + tables := tc.scoper.wScope[sel] + size := len(sel.SelectExprs) + deps := make([]TableSet, size) + types := make([]*Type, size) + expanded := true + for i, expr := range sel.SelectExprs { + ae, ok := expr.(*sqlparser.AliasedExpr) + if !ok { + expanded = false + continue + } + _, deps[i], types[i] = tc.org.depsForExpr(ae.Expr) + } + + tableInfo := createDerivedTableForExpressions(sel.SelectExprs, node.Columns, tables.tables, tc.org, expanded, deps, types) + if err := tableInfo.checkForDuplicates(); err != nil { + return err + } + + tableInfo.ASTNode = node + tableInfo.tableName = node.As.String() + + tc.Tables = append(tc.Tables, tableInfo) + scope := tc.scoper.currentScope() + return scope.addTable(tableInfo) +} + +func (tc *tableCollector) addUnionDerivedTable(union *sqlparser.Union, node *sqlparser.AliasedTableExpr) error { + firstSelect := sqlparser.GetFirstSelect(union) + tables := tc.scoper.wScope[firstSelect] + info, found := tc.unionInfo[union] + if !found { + return vterrors.VT13001("information about union is not available") + } + + tableInfo := createDerivedTableForExpressions(info.exprs, node.Columns, tables.tables, tc.org, info.isAuthoritative, info.recursive, info.types) + if err := tableInfo.checkForDuplicates(); err != nil { + return err + } + tableInfo.ASTNode = node + tableInfo.tableName = node.As.String() + + tc.Tables = append(tc.Tables, tableInfo) + scope := tc.scoper.currentScope() + return scope.addTable(tableInfo) +} + func newVindexTable(t sqlparser.IdentifierCS) *vindexes.Table { vindexCols := []vindexes.Column{ - {Name: sqlparser.NewIdentifierCI("id")}, - {Name: sqlparser.NewIdentifierCI("keyspace_id")}, - {Name: sqlparser.NewIdentifierCI("range_start")}, - {Name: sqlparser.NewIdentifierCI("range_end")}, - {Name: sqlparser.NewIdentifierCI("hex_keyspace_id")}, - {Name: sqlparser.NewIdentifierCI("shard")}, + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("keyspace_id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("range_start"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("range_end"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("hex_keyspace_id"), Type: querypb.Type_VARBINARY}, + {Name: sqlparser.NewIdentifierCI("shard"), Type: querypb.Type_VARBINARY}, } return &vindexes.Table{ @@ -125,7 +191,7 @@ func newVindexTable(t sqlparser.IdentifierCS) *vindexes.Table { // The code lives in this file since it is only touching tableCollector data func (tc *tableCollector) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for i, t2 := range tc.Tables { - if t == t2.getExpr() { + if t == t2.GetExpr() { return SingleTableSet(i) } } diff --git a/go/vt/vtgate/semantics/typer.go b/go/vt/vtgate/semantics/typer.go index 49f6da5bfe0..6652f1a476b 100644 --- a/go/vt/vtgate/semantics/typer.go +++ b/go/vt/vtgate/semantics/typer.go @@ -17,13 +17,11 @@ limitations under the License. package semantics import ( - "strings" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) // typer is responsible for setting the type for expressions @@ -44,31 +42,28 @@ func newTyper() *typer { } } -var typeInt32 = Type{Type: sqltypes.Int32} -var decimal = Type{Type: sqltypes.Decimal} -var floatval = Type{Type: sqltypes.Float64} - func (t *typer) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Literal: - switch node.Type { - case sqlparser.IntVal: - t.exprTypes[node] = typeInt32 - case sqlparser.StrVal: - t.exprTypes[node] = Type{Type: sqltypes.VarChar} // TODO - add system default collation name - case sqlparser.DecimalVal: - t.exprTypes[node] = decimal - case sqlparser.FloatVal: - t.exprTypes[node] = floatval + t.exprTypes[node] = Type{Type: node.SQLType(), Collation: collations.DefaultCollationForType(node.SQLType())} + case *sqlparser.Argument: + if node.Type >= 0 { + t.exprTypes[node] = Type{Type: node.Type, Collation: collations.DefaultCollationForType(node.Type)} } case sqlparser.AggrFunc: - code, ok := engine.SupportedAggregates[strings.ToLower(node.AggrName())] - if ok { - typ, ok := engine.OpcodeType[code] + code, ok := opcode.SupportedAggregates[node.AggrName()] + if !ok { + return nil + } + var inputType sqltypes.Type + if arg := node.GetArg(); arg != nil { + t, ok := t.exprTypes[arg] if ok { - t.exprTypes[node] = Type{Type: typ} + inputType = t.Type } } + type_ := code.Type(inputType) + t.exprTypes[node] = Type{Type: type_, Collation: collations.DefaultCollationForType(type_)} } return nil } diff --git a/go/vt/vtgate/semantics/typer_test.go b/go/vt/vtgate/semantics/typer_test.go new file mode 100644 index 00000000000..4c77e6f5657 --- /dev/null +++ b/go/vt/vtgate/semantics/typer_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "testing" + + "github.com/stretchr/testify/require" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestNormalizerAndSemanticAnalysisIntegration(t *testing.T) { + // This test runs the normalizer which extracts literals and replaces them with arguments + // It then tests that the semantic state contains the correct type + tests := []struct { + query, typ string + }{ + {query: "select 1", typ: "INT64"}, + {query: "select 1.2", typ: "DECIMAL"}, + {query: "select 'text'", typ: "VARCHAR"}, + {query: "select 0x1234", typ: "HEXNUM"}, + {query: "select x'7b7d'", typ: "HEXVAL"}, + } + + for _, test := range tests { + t.Run(test.query, func(t *testing.T) { + parse, err := sqlparser.Parse(test.query) + require.NoError(t, err) + + err = sqlparser.Normalize(parse, sqlparser.NewReservedVars("bv", sqlparser.BindVars{}), map[string]*querypb.BindVariable{}) + require.NoError(t, err) + + st, err := Analyze(parse, "d", fakeSchemaInfo()) + require.NoError(t, err) + bv := parse.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Argument) + typ, found := st.ExprTypes[bv] + require.True(t, found, "bindvar was not typed") + require.Equal(t, test.typ, typ.Type.String()) + }) + } + +} diff --git a/go/vt/vtgate/semantics/vindex_table.go b/go/vt/vtgate/semantics/vindex_table.go index 93e17fb37d0..46e5c2133ac 100644 --- a/go/vt/vtgate/semantics/vindex_table.go +++ b/go/vt/vtgate/semantics/vindex_table.go @@ -67,8 +67,8 @@ func (v *VindexTable) Name() (sqlparser.TableName, error) { } // GetExpr implements the TableInfo interface -func (v *VindexTable) getExpr() *sqlparser.AliasedTableExpr { - return v.Table.getExpr() +func (v *VindexTable) GetExpr() *sqlparser.AliasedTableExpr { + return v.Table.GetExpr() } // GetColumns implements the TableInfo interface diff --git a/go/vt/vtgate/semantics/vtable.go b/go/vt/vtgate/semantics/vtable.go index 48589ff7ffd..ce7efe22371 100644 --- a/go/vt/vtgate/semantics/vtable.go +++ b/go/vt/vtgate/semantics/vtable.go @@ -70,7 +70,7 @@ func (v *vTableInfo) Name() (sqlparser.TableName, error) { return sqlparser.TableName{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "oh noes") } -func (v *vTableInfo) getExpr() *sqlparser.AliasedTableExpr { +func (v *vTableInfo) GetExpr() *sqlparser.AliasedTableExpr { return nil } diff --git a/go/vt/vtgate/simplifier/expression_simplifier.go b/go/vt/vtgate/simplifier/expression_simplifier.go index 279cb1ac7dd..4537a137e76 100644 --- a/go/vt/vtgate/simplifier/expression_simplifier.go +++ b/go/vt/vtgate/simplifier/expression_simplifier.go @@ -21,104 +21,59 @@ import ( "strconv" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) // CheckF is used to see if the given expression exhibits the sought after issue type CheckF = func(sqlparser.Expr) bool -func SimplifyExpr(in sqlparser.Expr, test CheckF) (smallestKnown sqlparser.Expr) { - var maxDepth, level int - resetTo := func(e sqlparser.Expr) { - smallestKnown = e - maxDepth = depth(e) - level = 0 +func SimplifyExpr(in sqlparser.Expr, test CheckF) sqlparser.Expr { + // since we can't rewrite the top level, wrap the expr in an Exprs object + smallestKnown := sqlparser.Exprs{sqlparser.CloneExpr(in)} + + alwaysVisit := func(node, parent sqlparser.SQLNode) bool { + return true } - resetTo(in) - for level <= maxDepth { - current := sqlparser.CloneExpr(smallestKnown) - nodes, replaceF := getNodesAtLevel(current, level) - replace := func(e sqlparser.Expr, idx int) { - // if we are at the first level, we are replacing the root, - // not rewriting something deep in the tree - if level == 0 { - current = e + + up := func(cursor *sqlparser.Cursor) bool { + node := sqlparser.CloneSQLNode(cursor.Node()) + s := &shrinker{orig: node} + expr := s.Next() + for expr != nil { + cursor.Replace(expr) + + valid := test(smallestKnown[0]) + log.Errorf("test: %t: simplified %s to %s, full expr: %s", valid, sqlparser.String(node), sqlparser.String(expr), sqlparser.String(smallestKnown)) + if valid { + break // we will still continue trying to simplify other expressions at this level } else { - // replace `node` in current with the simplified expression - replaceF[idx](e) + // undo the change + cursor.Replace(node) } + expr = s.Next() } - simplified := false - for idx, node := range nodes { - // simplify each element and create a new expression with the node replaced by the simplification - // this means that we not only need the node, but also a way to replace the node - s := &shrinker{orig: node} - expr := s.Next() - for expr != nil { - replace(expr, idx) - - valid := test(current) - log.Errorf("test: %t - %s", valid, sqlparser.String(current)) - if valid { - simplified = true - break // we will still continue trying to simplify other expressions at this level - } else { - // undo the change - replace(node, idx) - } - expr = s.Next() - } - } - if simplified { - resetTo(current) - } else { - level++ - } - } - return smallestKnown -} - -func getNodesAtLevel(e sqlparser.Expr, level int) (result []sqlparser.Expr, replaceF []func(node sqlparser.SQLNode)) { - lvl := 0 - pre := func(cursor *sqlparser.Cursor) bool { - if expr, isExpr := cursor.Node().(sqlparser.Expr); level == lvl && isExpr { - result = append(result, expr) - replaceF = append(replaceF, cursor.ReplacerF()) - } - lvl++ - return true - } - post := func(cursor *sqlparser.Cursor) bool { - lvl-- return true } - sqlparser.Rewrite(e, pre, post) - return -} -func depth(e sqlparser.Expr) (depth int) { - lvl := 0 - pre := func(cursor *sqlparser.Cursor) bool { - lvl++ - if lvl > depth { - depth = lvl + // loop until rewriting introduces no more changes + for { + prevSmallest := sqlparser.CloneExprs(smallestKnown) + sqlparser.SafeRewrite(smallestKnown, alwaysVisit, up) + if sqlparser.Equals.Exprs(prevSmallest, smallestKnown) { + break } - return true } - post := func(cursor *sqlparser.Cursor) bool { - lvl-- - return true - } - sqlparser.Rewrite(e, pre, post) - return + + return smallestKnown[0] } type shrinker struct { - orig sqlparser.Expr - queue []sqlparser.Expr + orig sqlparser.SQLNode + queue []sqlparser.SQLNode } -func (s *shrinker) Next() sqlparser.Expr { +func (s *shrinker) Next() sqlparser.SQLNode { for { // first we check if there is already something in the queue. // note that we are doing a nil check and not a length check here. @@ -142,6 +97,10 @@ func (s *shrinker) Next() sqlparser.Expr { func (s *shrinker) fillQueue() bool { before := len(s.queue) switch e := s.orig.(type) { + case *sqlparser.AndExpr: + s.queue = append(s.queue, e.Left, e.Right) + case *sqlparser.OrExpr: + s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.ComparisonExpr: s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.BinaryExpr: @@ -228,9 +187,39 @@ func (s *shrinker) fillQueue() bool { for _, ae := range e.GetArgs() { s.queue = append(s.queue, ae) } + + clone := sqlparser.CloneAggrFunc(e) + if da, ok := clone.(sqlparser.DistinctableAggr); ok { + if da.IsDistinct() { + da.SetDistinct(false) + s.queue = append(s.queue, clone) + } + } case *sqlparser.ColName: // we can try to replace the column with a literal value - s.queue = []sqlparser.Expr{sqlparser.NewIntLiteral("0")} + s.queue = append(s.queue, sqlparser.NewIntLiteral("0")) + case *sqlparser.CaseExpr: + s.queue = append(s.queue, e.Expr, e.Else) + for _, when := range e.Whens { + s.queue = append(s.queue, when.Cond, when.Val) + } + + if len(e.Whens) > 1 { + for i := range e.Whens { + whensCopy := sqlparser.CloneSliceOfRefOfWhen(e.Whens) + // replace ith element with last element, then truncate last element + whensCopy[i] = whensCopy[len(whensCopy)-1] + whensCopy = whensCopy[:len(whensCopy)-1] + s.queue = append(s.queue, sqlparser.NewCaseExpr(e.Expr, whensCopy, e.Else)) + } + } + + if e.Else != nil { + s.queue = append(s.queue, sqlparser.NewCaseExpr(e.Expr, e.Whens, nil)) + } + if e.Expr != nil { + s.queue = append(s.queue, sqlparser.NewCaseExpr(nil, e.Whens, e.Else)) + } default: return false } diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go index ef7be4e30e5..0e19935caba 100644 --- a/go/vt/vtgate/simplifier/simplifier.go +++ b/go/vt/vtgate/simplifier/simplifier.go @@ -40,12 +40,12 @@ func SimplifyStatement( return testF(sqlparser.CloneSelectStatement(s)) } + // first we try to simplify the query by removing any unions if success := trySimplifyUnions(sqlparser.CloneSelectStatement(in), test); success != nil { return SimplifyStatement(success, currentDB, si, testF) } - // first we try to simplify the query by removing any table. - // If we can remove a table and all uses of it, that's a good start + // then we try to remove a table and all uses of it if success := tryRemoveTable(tables, sqlparser.CloneSelectStatement(in), currentDB, si, testF); success != nil { return SimplifyStatement(success, currentDB, si, testF) } @@ -55,54 +55,93 @@ func SimplifyStatement( return SimplifyStatement(success, currentDB, si, testF) } - // we try to remove select expressions next + // we try to remove/replace any expressions next if success := trySimplifyExpressions(sqlparser.CloneSelectStatement(in), test); success != nil { return SimplifyStatement(success, currentDB, si, testF) } + + // we try to remove distinct last + if success := trySimplifyDistinct(sqlparser.CloneSelectStatement(in), test); success != nil { + return SimplifyStatement(success, currentDB, si, testF) + } + return in } +func trySimplifyDistinct(in sqlparser.SelectStatement, test func(statement sqlparser.SelectStatement) bool) sqlparser.SelectStatement { + simplified := false + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } + + up := func(cursor *sqlparser.Cursor) bool { + if sel, ok := cursor.Node().(*sqlparser.Select); ok { + if sel.Distinct { + sel.Distinct = false + if test(sel) { + log.Errorf("removed distinct to yield: %s", sqlparser.String(sel)) + simplified = true + } else { + sel.Distinct = true + } + } + } + + return true + } + + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + + if simplified { + + return in + } + // we found no simplifications + return nil +} + func trySimplifyExpressions(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { simplified := false - visitAllExpressionsInAST(in, func(cursor expressionCursor) bool { + visit := func(cursor expressionCursor) bool { // first - let's try to remove the expression if cursor.remove() { if test(in) { log.Errorf("removed expression: %s", sqlparser.String(cursor.expr)) simplified = true - return false + // initially return false, but that made the rewriter prematurely abort, if it was the last selectExpr + return true } cursor.restore() } // ok, we seem to need this expression. let's see if we can find a simpler version - s := &shrinker{orig: cursor.expr} - newExpr := s.Next() - for newExpr != nil { - cursor.replace(newExpr) + newExpr := SimplifyExpr(cursor.expr, func(expr sqlparser.Expr) bool { + cursor.replace(expr) if test(in) { - log.Errorf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(newExpr)) + log.Errorf("simplified expression: %s -> %s", sqlparser.String(cursor.expr), sqlparser.String(expr)) + cursor.restore() simplified = true - return false + return true } - newExpr = s.Next() - } - // if we get here, we failed to simplify this expression, - // so we put back in the original expression - cursor.restore() + cursor.restore() + return false + }) + + cursor.replace(newExpr) return true - }) + } + + visitAllExpressionsInAST(in, visit) if simplified { return in } - + // we found no simplifications return nil } func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) (res sqlparser.SelectStatement) { - if union, ok := in.(*sqlparser.Union); ok { // the root object is an UNION if test(sqlparser.CloneSelectStatement(union.Left)) { @@ -113,9 +152,12 @@ func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectS } } - abort := false + simplified := false + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } - sqlparser.Rewrite(in, func(cursor *sqlparser.Cursor) bool { + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.Union: if _, ok := cursor.Parent().(*sqlparser.RootNode); ok { @@ -125,29 +167,30 @@ func trySimplifyUnions(in sqlparser.SelectStatement, test func(sqlparser.SelectS cursor.Replace(node.Left) clone := sqlparser.CloneSelectStatement(in) if test(clone) { - log.Errorf("replaced UNION with one of its children") - abort = true + log.Errorf("replaced UNION with its left child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Left)) + simplified = true return true } cursor.Replace(node.Right) clone = sqlparser.CloneSelectStatement(in) if test(clone) { - log.Errorf("replaced UNION with one of its children") - abort = true + log.Errorf("replaced UNION with its right child: %s -> %s", sqlparser.String(node), sqlparser.String(node.Right)) + simplified = true return true } cursor.Replace(node) } return true - }, func(*sqlparser.Cursor) bool { - return !abort - }) + } - if !abort { - // we found no simplifications - return nil + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + + if simplified { + + return in } - return in + // we found no simplifications + return nil } func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, currentDB string, si semantics.SchemaInformation, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { @@ -158,11 +201,11 @@ func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, simplified := removeTable(clone, searchedTS, currentDB, si) name, _ := tbl.Name() if simplified && test(clone) { - log.Errorf("removed table %s", sqlparser.String(name)) + log.Errorf("removed table %s: %s -> %s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) return clone } } - + // we found no simplifications return nil } @@ -178,7 +221,11 @@ func getTables(in sqlparser.SelectStatement, currentDB string, si semantics.Sche func simplifyStarExpr(in sqlparser.SelectStatement, test func(sqlparser.SelectStatement) bool) sqlparser.SelectStatement { simplified := false - sqlparser.Rewrite(in, func(cursor *sqlparser.Cursor) bool { + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true + } + + up := func(cursor *sqlparser.Cursor) bool { se, ok := cursor.Node().(*sqlparser.StarExpr) if !ok { return true @@ -189,15 +236,19 @@ func simplifyStarExpr(in sqlparser.SelectStatement, test func(sqlparser.SelectSt if test(in) { log.Errorf("replaced star with literal") simplified = true - return false + return true } cursor.Replace(se) return true - }, nil) + } + + sqlparser.SafeRewrite(in, alwaysVisitChildren, up) + if simplified { return in } + // we found no simplifications return nil } @@ -209,92 +260,148 @@ func removeTable(clone sqlparser.SelectStatement, searchedTS semantics.TableSet, panic(err) } - simplified := true + simplified, kontinue := false, true shouldKeepExpr := func(expr sqlparser.Expr) bool { + // why do we keep if the expr contains an aggregation? return !semTable.RecursiveDeps(expr).IsOverlapping(searchedTS) || sqlparser.ContainsAggregation(expr) } - sqlparser.Rewrite(clone, func(cursor *sqlparser.Cursor) bool { + checkSelect := func(node, parent sqlparser.SQLNode) bool { + if sel, ok := node.(*sqlparser.Select); ok { + // remove the table from the from clause on the way down + // so that it happens before removing it anywhere else + kontinue, simplified = removeTableinSelect(sel, searchedTS, semTable, simplified) + } + + return kontinue + } + + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.JoinTableExpr: - lft, ok := node.LeftExpr.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(lft) - if searchedTS == ts { - cursor.Replace(node.RightExpr) - } - } - rgt, ok := node.RightExpr.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(rgt) - if searchedTS == ts { - cursor.Replace(node.LeftExpr) - } - } - case *sqlparser.Select: - if len(node.From) == 1 { - _, notJoin := node.From[0].(*sqlparser.AliasedTableExpr) - if notJoin { - simplified = false - return false - } - } - for i, tbl := range node.From { - lft, ok := tbl.(*sqlparser.AliasedTableExpr) - if ok { - ts := semTable.TableSetFor(lft) - if searchedTS == ts { - node.From = append(node.From[:i], node.From[i+1:]...) - return true - } - } - } + simplified = removeTableinJoinTableExpr(node, searchedTS, semTable, cursor, simplified) case *sqlparser.Where: - exprs := sqlparser.SplitAndExpression(nil, node.Expr) - var newPredicate sqlparser.Expr - for _, expr := range exprs { - if !semTable.RecursiveDeps(expr).IsOverlapping(searchedTS) { - newPredicate = sqlparser.AndExpressions(newPredicate, expr) - } - } - node.Expr = newPredicate + simplified = removeTableinWhere(node, shouldKeepExpr, simplified) case sqlparser.SelectExprs: - _, isSel := cursor.Parent().(*sqlparser.Select) - if !isSel { - return true - } - - var newExprs sqlparser.SelectExprs - for _, ae := range node { - expr, ok := ae.(*sqlparser.AliasedExpr) - if !ok { - newExprs = append(newExprs, ae) - continue - } - if shouldKeepExpr(expr.Expr) { - newExprs = append(newExprs, ae) - } - } - cursor.Replace(newExprs) + simplified = removeTableinSelectExprs(node, cursor, shouldKeepExpr, simplified) case sqlparser.GroupBy: - var newExprs sqlparser.GroupBy - for _, expr := range node { - if shouldKeepExpr(expr) { - newExprs = append(newExprs, expr) - } - } - cursor.Replace(newExprs) + simplified = removeTableinGroupBy(node, cursor, shouldKeepExpr, simplified) case sqlparser.OrderBy: - var newExprs sqlparser.OrderBy - for _, expr := range node { - if shouldKeepExpr(expr.Expr) { - newExprs = append(newExprs, expr) - } + simplified = removeTableinOrderBy(node, cursor, shouldKeepExpr, simplified) + } + return true + } + + sqlparser.SafeRewrite(clone, checkSelect, up) + return simplified +} + +func removeTableinJoinTableExpr(node *sqlparser.JoinTableExpr, searchedTS semantics.TableSet, semTable *semantics.SemTable, cursor *sqlparser.Cursor, simplified bool) bool { + lft, ok := node.LeftExpr.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(lft) + if searchedTS == ts { + cursor.Replace(node.RightExpr) + simplified = true + } + } + rgt, ok := node.RightExpr.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(rgt) + if searchedTS == ts { + cursor.Replace(node.LeftExpr) + simplified = true + } + } + + return simplified +} + +func removeTableinSelect(node *sqlparser.Select, searchedTS semantics.TableSet, semTable *semantics.SemTable, simplified bool) (bool, bool) { + if len(node.From) == 1 { + _, notJoin := node.From[0].(*sqlparser.AliasedTableExpr) + if notJoin { + return false, simplified + } + } + for i, tbl := range node.From { + lft, ok := tbl.(*sqlparser.AliasedTableExpr) + if ok { + ts := semTable.TableSetFor(lft) + if searchedTS == ts { + node.From = append(node.From[:i], node.From[i+1:]...) + simplified = true } + } + } + + return true, simplified +} - cursor.Replace(newExprs) +func removeTableinWhere(node *sqlparser.Where, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + exprs := sqlparser.SplitAndExpression(nil, node.Expr) + var newPredicate sqlparser.Expr + for _, expr := range exprs { + if shouldKeepExpr(expr) { + newPredicate = sqlparser.AndExpressions(newPredicate, expr) + } else { + simplified = true } - return true - }, nil) + } + node.Expr = newPredicate + + return simplified +} + +func removeTableinSelectExprs(node sqlparser.SelectExprs, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + _, isSel := cursor.Parent().(*sqlparser.Select) + if !isSel { + return simplified + } + + var newExprs sqlparser.SelectExprs + for _, ae := range node { + expr, ok := ae.(*sqlparser.AliasedExpr) + if !ok { + newExprs = append(newExprs, ae) + continue + } + if shouldKeepExpr(expr.Expr) { + newExprs = append(newExprs, ae) + } else { + simplified = true + } + } + cursor.Replace(newExprs) + + return simplified +} + +func removeTableinGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + var newExprs sqlparser.GroupBy + for _, expr := range node { + if shouldKeepExpr(expr) { + newExprs = append(newExprs, expr) + } else { + simplified = true + } + } + cursor.Replace(newExprs) + + return simplified +} + +func removeTableinOrderBy(node sqlparser.OrderBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + var newExprs sqlparser.OrderBy + for _, expr := range node { + if shouldKeepExpr(expr.Expr) { + newExprs = append(newExprs, expr) + } else { + simplified = true + } + } + + cursor.Replace(newExprs) + return simplified } @@ -315,180 +422,223 @@ func newExprCursor(expr sqlparser.Expr, replace func(replaceWith sqlparser.Expr) } // visitAllExpressionsInAST will walk the AST and visit all expressions -// This cursor has a few extra capabilities that the normal sqlparser.Rewrite does not have, +// This cursor has a few extra capabilities that the normal sqlparser.SafeRewrite does not have, // such as visiting and being able to change individual expressions in a AND tree +// if visit returns true, then traversal continues, otherwise traversal stops func visitAllExpressionsInAST(clone sqlparser.SelectStatement, visit func(expressionCursor) bool) { - abort := false - post := func(*sqlparser.Cursor) bool { - return !abort + alwaysVisitChildren := func(node, parent sqlparser.SQLNode) bool { + return true } - pre := func(cursor *sqlparser.Cursor) bool { - if abort { - return true - } + up := func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case sqlparser.SelectExprs: - _, isSel := cursor.Parent().(*sqlparser.Select) - if !isSel { - return true - } - for idx := 0; idx < len(node); idx++ { - ae := node[idx] - expr, ok := ae.(*sqlparser.AliasedExpr) - if !ok { - continue - } - removed := false - original := sqlparser.CloneExpr(expr.Expr) - item := newExprCursor( - expr.Expr, - /*replace*/ func(replaceWith sqlparser.Expr) { - if removed { - panic("cant replace after remove without restore") - } - expr.Expr = replaceWith - }, - /*remove*/ func() bool { - if removed { - panic("can't remove twice, silly") - } - if len(node) == 1 { - // can't remove the last expressions - we'd end up with an empty SELECT clause - return false - } - withoutElement := append(node[:idx], node[idx+1:]...) - cursor.Replace(withoutElement) - node = withoutElement - removed = true - return true - }, - /*restore*/ func() { - if removed { - front := make(sqlparser.SelectExprs, idx) - copy(front, node[:idx]) - back := make(sqlparser.SelectExprs, len(node)-idx) - copy(back, node[idx:]) - frontWithRestoredExpr := append(front, ae) - node = append(frontWithRestoredExpr, back...) - cursor.Replace(node) - removed = false - return - } - expr.Expr = original - }, - ) - abort = !visit(item) - } + return visitSelectExprs(node, cursor, visit) case *sqlparser.Where: - exprs := sqlparser.SplitAndExpression(nil, node.Expr) - set := func(input []sqlparser.Expr) { - node.Expr = sqlparser.AndExpressions(input...) - exprs = input - } - abort = !visitExpressions(exprs, set, visit) + return visitWhere(node, visit) case *sqlparser.JoinCondition: - join, ok := cursor.Parent().(*sqlparser.JoinTableExpr) - if !ok { - return true - } - if join.Join != sqlparser.NormalJoinType || node.Using != nil { - return false - } - exprs := sqlparser.SplitAndExpression(nil, node.On) - set := func(input []sqlparser.Expr) { - node.On = sqlparser.AndExpressions(input...) - exprs = input - } - abort = !visitExpressions(exprs, set, visit) + return visitJoinCondition(node, cursor, visit) case sqlparser.GroupBy: - set := func(input []sqlparser.Expr) { - node = input - cursor.Replace(node) - } - abort = !visitExpressions(node, set, visit) + return visitGroupBy(node, cursor, visit) case sqlparser.OrderBy: - for idx := 0; idx < len(node); idx++ { - order := node[idx] - removed := false - original := sqlparser.CloneExpr(order.Expr) - item := newExprCursor( - order.Expr, - /*replace*/ func(replaceWith sqlparser.Expr) { - if removed { - panic("cant replace after remove without restore") - } - order.Expr = replaceWith - }, - /*remove*/ func() bool { - if removed { - panic("can't remove twice, silly") - } - withoutElement := append(node[:idx], node[idx+1:]...) - if len(withoutElement) == 0 { - var nilVal sqlparser.OrderBy // this is used to create a typed nil value - cursor.Replace(nilVal) - } else { - cursor.Replace(withoutElement) - } - node = withoutElement - removed = true - return true - }, - /*restore*/ func() { - if removed { - front := make(sqlparser.OrderBy, idx) - copy(front, node[:idx]) - back := make(sqlparser.OrderBy, len(node)-idx) - copy(back, node[idx:]) - frontWithRestoredExpr := append(front, order) - node = append(frontWithRestoredExpr, back...) - cursor.Replace(node) - removed = false - return - } - order.Expr = original - }, - ) - abort = visit(item) - if abort { - break - } - } + return visitOrderBy(node, cursor, visit) case *sqlparser.Limit: - if node.Offset != nil { - original := node.Offset - cursor := newExprCursor(node.Offset, - /*replace*/ func(replaceWith sqlparser.Expr) { - node.Offset = replaceWith - }, - /*remove*/ func() bool { - node.Offset = nil - return true - }, - /*restore*/ func() { - node.Offset = original - }) - abort = visit(cursor) - } - if !abort && node.Rowcount != nil { - original := node.Rowcount - cursor := newExprCursor(node.Rowcount, - /*replace*/ func(replaceWith sqlparser.Expr) { - node.Rowcount = replaceWith - }, - /*remove*/ func() bool { - // removing Rowcount is an invalid op - return false - }, - /*restore*/ func() { - node.Rowcount = original - }) - abort = visit(cursor) - } + return visitLimit(node, cursor, visit) } return true } - sqlparser.Rewrite(clone, pre, post) + sqlparser.SafeRewrite(clone, alwaysVisitChildren, up) +} + +func visitSelectExprs(node sqlparser.SelectExprs, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + _, isSel := cursor.Parent().(*sqlparser.Select) + if !isSel { + return true + } + for idx := 0; idx < len(node); idx++ { + ae := node[idx] + expr, ok := ae.(*sqlparser.AliasedExpr) + if !ok { + continue + } + removed := false + original := sqlparser.CloneExpr(expr.Expr) + item := newExprCursor( + expr.Expr, + /*replace*/ func(replaceWith sqlparser.Expr) { + if removed { + panic("cant replace after remove without restore") + } + expr.Expr = replaceWith + }, + /*remove*/ func() bool { + if removed { + panic("can't remove twice, silly") + } + if len(node) == 1 { + // can't remove the last expressions - we'd end up with an empty SELECT clause + return false + } + withoutElement := append(node[:idx], node[idx+1:]...) + cursor.Replace(withoutElement) + node = withoutElement + removed = true + return true + }, + /*restore*/ func() { + if removed { + front := make(sqlparser.SelectExprs, idx) + copy(front, node[:idx]) + back := make(sqlparser.SelectExprs, len(node)-idx) + copy(back, node[idx:]) + frontWithRestoredExpr := append(front, ae) + node = append(frontWithRestoredExpr, back...) + cursor.Replace(node) + removed = false + return + } + expr.Expr = original + }, + ) + if !visit(item) { + return false + } + } + + return true +} + +func visitWhere(node *sqlparser.Where, visit func(expressionCursor) bool) bool { + exprs := sqlparser.SplitAndExpression(nil, node.Expr) + set := func(input []sqlparser.Expr) { + node.Expr = sqlparser.AndExpressions(input...) + exprs = input + } + return visitExpressions(exprs, set, visit, 0) +} + +func visitJoinCondition(node *sqlparser.JoinCondition, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + join, ok := cursor.Parent().(*sqlparser.JoinTableExpr) + if !ok { + return true + } + + if node.Using != nil { + return true + } + + // for only left and right joins must the join condition be nonempty + minExprs := 0 + if join.Join == sqlparser.LeftJoinType || join.Join == sqlparser.RightJoinType { + minExprs = 1 + } + + exprs := sqlparser.SplitAndExpression(nil, node.On) + set := func(input []sqlparser.Expr) { + node.On = sqlparser.AndExpressions(input...) + exprs = input + } + return visitExpressions(exprs, set, visit, minExprs) +} + +func visitGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + set := func(input []sqlparser.Expr) { + node = input + cursor.Replace(node) + } + return visitExpressions(node, set, visit, 0) +} + +func visitOrderBy(node sqlparser.OrderBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + for idx := 0; idx < len(node); idx++ { + order := node[idx] + removed := false + original := sqlparser.CloneExpr(order.Expr) + item := newExprCursor( + order.Expr, + /*replace*/ func(replaceWith sqlparser.Expr) { + if removed { + panic("cant replace after remove without restore") + } + order.Expr = replaceWith + }, + /*remove*/ func() bool { + if removed { + panic("can't remove twice, silly") + } + withoutElement := append(node[:idx], node[idx+1:]...) + if len(withoutElement) == 0 { + var nilVal sqlparser.OrderBy // this is used to create a typed nil value + cursor.Replace(nilVal) + } else { + cursor.Replace(withoutElement) + } + node = withoutElement + removed = true + return true + }, + /*restore*/ func() { + if removed { + front := make(sqlparser.OrderBy, idx) + copy(front, node[:idx]) + back := make(sqlparser.OrderBy, len(node)-idx) + copy(back, node[idx:]) + frontWithRestoredExpr := append(front, order) + node = append(frontWithRestoredExpr, back...) + cursor.Replace(node) + removed = false + return + } + order.Expr = original + }, + ) + if !visit(item) { + return false + } + } + + return true +} + +func visitLimit(node *sqlparser.Limit, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { + if node.Offset != nil { + original := node.Offset + item := newExprCursor(node.Offset, + /*replace*/ func(replaceWith sqlparser.Expr) { + node.Offset = replaceWith + }, + /*remove*/ func() bool { + node.Offset = nil + return true + }, + /*restore*/ func() { + node.Offset = original + }) + if !visit(item) { + return false + } + } + if node.Rowcount != nil { + original := node.Rowcount + item := newExprCursor(node.Rowcount, + /*replace*/ func(replaceWith sqlparser.Expr) { + node.Rowcount = replaceWith + }, + // this removes the whole limit clause + /*remove*/ + func() bool { + var nilVal *sqlparser.Limit // this is used to create a typed nil value + cursor.Replace(nilVal) + return true + }, + /*restore*/ func() { + node.Rowcount = original + }) + if !visit(item) { + return false + } + } + + return true } // visitExpressions allows the cursor to visit all expressions in a slice, @@ -497,6 +647,7 @@ func visitExpressions( exprs []sqlparser.Expr, set func(input []sqlparser.Expr), visit func(expressionCursor) bool, + minExprs int, ) bool { for idx := 0; idx < len(exprs); idx++ { expr := exprs[idx] @@ -513,6 +664,10 @@ func visitExpressions( if removed { panic("can't remove twice, silly") } + // need to keep at least minExprs + if len(exprs) <= minExprs { + return false + } exprs = append(exprs[:idx], exprs[idx+1:]...) set(exprs) removed = true diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index bf8201dee46..c9edbbab8d8 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -21,6 +21,8 @@ import ( "testing" "vitess.io/vitess/go/vt/log" + + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/evalengine" "github.com/stretchr/testify/require" @@ -53,11 +55,11 @@ limit 123 offset 456 require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Printf(">> found expression: %s\n", sqlparser.String(cursor.expr)) - cursor.replace(sqlparser.NewIntLiteral("1")) + cursor.remove() fmt.Printf("remove: %s\n", sqlparser.String(ast)) cursor.restore() fmt.Printf("restore: %s\n", sqlparser.String(ast)) - cursor.remove() + cursor.replace(sqlparser.NewIntLiteral("1")) fmt.Printf("replace it with literal: %s\n", sqlparser.String(ast)) cursor.restore() fmt.Printf("restore: %s\n", sqlparser.String(ast)) @@ -81,26 +83,35 @@ func TestAbortExpressionCursor(t *testing.T) { func TestSimplifyEvalEngineExpr(t *testing.T) { // ast struct for L0 + - // L1 + + - // L2 + + + + - // L3 1 2 3 4 5 6 7 8 + // L1 + + + // L2 + + + + + // L3 1 2 3 4 5 6 + + + // L4 7 8 9 10 + + // L4 + i7, i8, i9, i10 := + sqlparser.NewIntLiteral("7"), + sqlparser.NewIntLiteral("8"), + sqlparser.NewIntLiteral("9"), + sqlparser.NewIntLiteral("10") // L3 - i1, i2, i3, i4, i5, i6, i7, i8 := + i1, i2, i3, i4, i5, i6, p31, p32 := sqlparser.NewIntLiteral("1"), sqlparser.NewIntLiteral("2"), sqlparser.NewIntLiteral("3"), sqlparser.NewIntLiteral("4"), sqlparser.NewIntLiteral("5"), sqlparser.NewIntLiteral("6"), - sqlparser.NewIntLiteral("7"), - sqlparser.NewIntLiteral("8") + plus(i7, i8), + plus(i9, i10) + // L2 p21, p22, p23, p24 := plus(i1, i2), plus(i3, i4), plus(i5, i6), - plus(i7, i8) + plus(p31, p32) // L1 p11, p12 := @@ -111,7 +122,7 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { p0 := plus(p11, p12) expr := SimplifyExpr(p0, func(expr sqlparser.Expr) bool { - local, err := evalengine.TranslateEx(expr, nil, true) + local, err := evalengine.Translate(expr, nil) if err != nil { return false } @@ -119,13 +130,13 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { if err != nil { return false } - toInt64, err := res.Value().ToInt64() + toInt64, err := res.Value(collations.Default()).ToInt64() if err != nil { return false } return toInt64 >= 8 }) - log.Infof("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0)) + log.Errorf("simplest expr to evaluate to >= 8: [%s], started from: [%s]", sqlparser.String(expr), sqlparser.String(p0)) } func plus(a, b sqlparser.Expr) sqlparser.Expr { diff --git a/go/vt/vtgate/status.go b/go/vt/vtgate/status.go index 326e3da1a49..853d795b17e 100644 --- a/go/vt/vtgate/status.go +++ b/go/vt/vtgate/status.go @@ -53,7 +53,6 @@ const ( - @@ -64,7 +63,6 @@ const ( - diff --git a/go/vt/vtgate/status_test.go b/go/vt/vtgate/status_test.go index 06c624f2cfe..6ef108066f2 100644 --- a/go/vt/vtgate/status_test.go +++ b/go/vt/vtgate/status_test.go @@ -18,11 +18,12 @@ package vtgate import ( "bytes" - "html/template" "reflect" "testing" "time" + "github.com/google/safehtml/template" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index c437721d58d..de63da87907 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "math/rand" + "runtime/debug" "sort" "sync" "sync/atomic" @@ -58,6 +59,7 @@ func init() { servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { fs.StringVar(&CellsToWatch, "cells_to_watch", "", "comma-separated list of cells for watching tablets") fs.StringVar(&bufferImplementation, "buffer_implementation", "keyspace_events", "Allowed values: healthcheck (legacy implementation), keyspace_events (default)") + fs.MarkDeprecated("buffer_implementation", "The 'healthcheck' buffer implementation has been removed in v18 and this option will be removed in v19") fs.DurationVar(&initialTabletTimeout, "gateway_initial_tablet_timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") fs.IntVar(&retryCount, "retry-count", 2, "retry count") }) @@ -116,57 +118,31 @@ func NewTabletGateway(ctx context.Context, hc discovery.HealthCheck, serv srvtop func (gw *TabletGateway) setupBuffering(ctx context.Context) { cfg := buffer.NewConfigFromFlags() + if !cfg.Enabled { + log.Info("Query buffering is disabled") + return + } gw.buffer = buffer.New(cfg) - switch bufferImplementation { - case "healthcheck": - // subscribe to healthcheck updates so that buffer can be notified if needed - // we run this in a separate goroutine so that normal processing doesn't need to block - hcChan := gw.hc.Subscribe() - bufferCtx, bufferCancel := context.WithCancel(ctx) - - go func(ctx context.Context, c chan *discovery.TabletHealth, buffer *buffer.Buffer) { - defer bufferCancel() - - for { - select { - case <-ctx.Done(): - return - case result := <-hcChan: - if result == nil { - return - } - if result.Target.TabletType == topodatapb.TabletType_PRIMARY { - buffer.ProcessPrimaryHealth(result) - } - } - } - }(bufferCtx, hcChan, gw.buffer) - - case "keyspace_events": - gw.kev = discovery.NewKeyspaceEventWatcher(ctx, gw.srvTopoServer, gw.hc, gw.localCell) - ksChan := gw.kev.Subscribe() - bufferCtx, bufferCancel := context.WithCancel(ctx) + gw.kev = discovery.NewKeyspaceEventWatcher(ctx, gw.srvTopoServer, gw.hc, gw.localCell) + ksChan := gw.kev.Subscribe() + bufferCtx, bufferCancel := context.WithCancel(ctx) - go func(ctx context.Context, c chan *discovery.KeyspaceEvent, buffer *buffer.Buffer) { - defer bufferCancel() + go func(ctx context.Context, c chan *discovery.KeyspaceEvent, buffer *buffer.Buffer) { + defer bufferCancel() - for { - select { - case <-ctx.Done(): + for { + select { + case <-ctx.Done(): + return + case result := <-ksChan: + if result == nil { return - case result := <-ksChan: - if result == nil { - return - } - buffer.HandleKeyspaceEvent(result) } + buffer.HandleKeyspaceEvent(result) } - }(bufferCtx, ksChan, gw.buffer) - - default: - log.Exitf("unknown buffering implementation for TabletGateway: %q", bufferImplementation) - } + } + }(bufferCtx, ksChan, gw.buffer) } // QueryServiceByAlias satisfies the Gateway interface @@ -175,6 +151,14 @@ func (gw *TabletGateway) QueryServiceByAlias(alias *topodatapb.TabletAlias, targ return queryservice.Wrap(qs, gw.withShardError), NewShardError(err, target) } +// GetServingKeyspaces returns list of serving keyspaces. +func (gw *TabletGateway) GetServingKeyspaces() []string { + if gw.kev == nil { + return nil + } + return gw.kev.GetServingKeyspaces() +} + // RegisterStats registers the stats to export the lag since the last refresh // and the checksum of the topology func (gw *TabletGateway) RegisterStats() { @@ -182,9 +166,9 @@ func (gw *TabletGateway) RegisterStats() { } // WaitForTablets is part of the Gateway interface. -func (gw *TabletGateway) WaitForTablets(tabletTypesToWait []topodatapb.TabletType) (err error) { +func (gw *TabletGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) (err error) { log.Infof("Gateway waiting for serving tablets of types %v ...", tabletTypesToWait) - ctx, cancel := context.WithTimeout(context.Background(), initialTabletTimeout) + ctx, cancel := context.WithTimeout(ctx, initialTabletTimeout) defer cancel() defer func() { @@ -217,7 +201,9 @@ func (gw *TabletGateway) WaitForTablets(tabletTypesToWait []topodatapb.TabletTyp // Close shuts down underlying connections. // This function hides the inner implementation. func (gw *TabletGateway) Close(_ context.Context) error { - gw.buffer.Shutdown() + if gw.buffer != nil { + gw.buffer.Shutdown() + } return gw.hc.Close() } @@ -244,6 +230,7 @@ func (gw *TabletGateway) CacheStatus() TabletCacheStatusList { // withShardError should not be combined with withRetry. func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, _ queryservice.QueryService, _ string, inTransaction bool, inner func(ctx context.Context, target *querypb.Target, conn queryservice.QueryService) (bool, error)) error { + // for transactions, we connect to a specific tablet instead of letting gateway choose one if inTransaction && target.TabletType != topodatapb.TabletType_PRIMARY { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "tabletGateway's query service can only be used for non-transactional queries on replicas") @@ -267,12 +254,11 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, bufferedOnce := false for i := 0; i < gw.retryCount+1; i++ { - // Check if we should buffer PRIMARY queries which failed due to an ongoing - // failover. + // Check if we should buffer PRIMARY queries which failed due to an ongoing failover. // Note: We only buffer once and only "!inTransaction" queries i.e. // a) no transaction is necessary (e.g. critical reads) or // b) no transaction was created yet. - if !bufferedOnce && !inTransaction && target.TabletType == topodatapb.TabletType_PRIMARY { + if gw.buffer != nil && !bufferedOnce && !inTransaction && target.TabletType == topodatapb.TabletType_PRIMARY { // The next call blocks if we should buffer during a failover. retryDone, bufferErr := gw.buffer.WaitForFailoverEnd(ctx, target.Keyspace, target.Shard, err) @@ -297,12 +283,20 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, // if we have a keyspace event watcher, check if the reason why our primary is not available is that it's currently being resharded // or if a reparent operation is in progress. if kev := gw.kev; kev != nil { - if kev.TargetIsBeingResharded(target) { - err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, "current keyspace is being resharded") + if kev.TargetIsBeingResharded(ctx, target) { + log.V(2).Infof("current keyspace is being resharded, retrying: %s: %s", target.Keyspace, debug.Stack()) + err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, buffer.ClusterEventReshardingInProgress) continue } - if kev.PrimaryIsNotServing(target) { - err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, "primary is not serving, there is a reparent operation in progress") + primary, notServing := kev.PrimaryIsNotServing(ctx, target) + if notServing { + err = vterrors.Errorf(vtrpcpb.Code_CLUSTER_EVENT, buffer.ClusterEventReparentInProgress) + continue + } + // if primary is serving, but we initially found no tablet, we're in an inconsistent state + // we then retry the entire loop + if primary != nil { + err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "inconsistent state detected, primary is serving but initially found no available tablet") continue } } @@ -311,6 +305,7 @@ func (gw *TabletGateway) withRetry(ctx context.Context, target *querypb.Target, err = vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "no healthy tablet available for '%s'", target.String()) break } + gw.shuffleTablets(gw.localCell, tablets) var th *discovery.TabletHealth diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go index 34bb65363be..f625b5599cd 100644 --- a/go/vt/vtgate/tabletgateway_flaky_test.go +++ b/go/vt/vtgate/tabletgateway_flaky_test.go @@ -17,12 +17,15 @@ limitations under the License. package vtgate import ( - "context" "testing" "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" @@ -33,11 +36,11 @@ import ( // TestGatewayBufferingWhenPrimarySwitchesServingState is used to test that the buffering mechanism buffers the queries when a primary goes to a non serving state and // stops buffering when the primary is healthy again func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { - bufferImplementation = "keyspace_events" + ctx := utils.LeakCheckContext(t) + buffer.SetBufferingModeInTestingEnv(true) defer func() { buffer.SetBufferingModeInTestingEnv(false) - bufferImplementation = "healthcheck" }() keyspace := "ks1" @@ -55,7 +58,8 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) // create a new tablet gateway - tg := NewTabletGateway(context.Background(), hc, ts, "cell") + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // add a primary tabelt which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -63,8 +67,9 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // add a result to the sandbox connection sqlResult1 := &sqltypes.Result{ Fields: []*querypb.Field{{ - Name: "col1", - Type: sqltypes.VarChar, + Name: "col1", + Type: sqltypes.VarChar, + Charset: uint32(collations.Default()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -74,7 +79,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { sbc.SetResults([]*sqltypes.Result{sqlResult1}) // run a query that we indeed get the result added to the sandbox connection back - res, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) require.NoError(t, err) require.Equal(t, res, sqlResult1) @@ -92,7 +97,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // execute the query in a go routine since it should be buffered, and check that it eventually succeed queryChan := make(chan struct{}) go func() { - res, err = tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) queryChan <- struct{}{} }() @@ -116,11 +121,11 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // TestGatewayBufferingWhileReparenting is used to test that the buffering mechanism buffers the queries when a PRS happens // the healthchecks that happen during a PRS are simulated in this test func TestGatewayBufferingWhileReparenting(t *testing.T) { - bufferImplementation = "keyspace_events" + ctx := utils.LeakCheckContext(t) + buffer.SetBufferingModeInTestingEnv(true) defer func() { buffer.SetBufferingModeInTestingEnv(false) - bufferImplementation = "healthcheck" }() keyspace := "ks1" @@ -140,7 +145,8 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) // create a new tablet gateway - tg := NewTabletGateway(context.Background(), hc, ts, "cell") + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // add a primary tabelt which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -150,8 +156,9 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // add a result to the sandbox connection sqlResult1 := &sqltypes.Result{ Fields: []*querypb.Field{{ - Name: "col1", - Type: sqltypes.VarChar, + Name: "col1", + Type: sqltypes.VarChar, + Charset: uint32(collations.Default()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -162,7 +169,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { // run a query that we indeed get the result added to the sandbox connection back // this also checks that the query reaches the primary tablet and not the replica - res, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) require.NoError(t, err) require.Equal(t, res, sqlResult1) @@ -185,15 +192,22 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { hc.Broadcast(primaryTablet) // set the serving type for the primary tablet false and broadcast it so that the buffering code registers this change hc.SetServing(primaryTablet, false) + // We call the broadcast twice to ensure that the change has been processed by the keyspace event watcher. + // The second broadcast call is blocking until the first one has been processed. + hc.Broadcast(primaryTablet) hc.Broadcast(primaryTablet) + require.Len(t, tg.hc.GetHealthyTabletStats(target), 0, "GetHealthyTabletStats has tablets even though it shouldn't") + _, isNotServing := tg.kev.PrimaryIsNotServing(ctx, target) + require.True(t, isNotServing) + // add a result to the sandbox connection of the new primary sbcReplica.SetResults([]*sqltypes.Result{sqlResult1}) // execute the query in a go routine since it should be buffered, and check that it eventually succeed queryChan := make(chan struct{}) go func() { - res, err = tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) queryChan <- struct{}{} }() @@ -208,11 +222,111 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { hc.SetServing(replicaTablet, true) hc.Broadcast(replicaTablet) + timeout := time.After(1 * time.Minute) +outer: + for { + select { + case <-timeout: + require.Fail(t, "timed out - could not verify the new primary") + case <-time.After(10 * time.Millisecond): + newPrimary, notServing := tg.kev.PrimaryIsNotServing(ctx, target) + if newPrimary != nil && newPrimary.Uid == 1 && !notServing { + break outer + } + } + } + // wait for the query to execute before checking for results select { case <-queryChan: require.NoError(t, err) - require.Equal(t, res, sqlResult1) + require.Equal(t, sqlResult1, res) + case <-time.After(15 * time.Second): + t.Fatalf("timed out waiting for query to execute") + } +} + +// TestInconsistentStateDetectedBuffering simulates the case where we have used up all our buffering retries and in the +// last attempt we are in an inconsistent state. Meaning that we initially thought that there are no available tablets +// but after a moment the primary is found to be serving. +// This is inconsistent and we want to fail properly. This scenario used to panic since no error and no results were +// returned. +func TestInconsistentStateDetectedBuffering(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + buffer.SetBufferingModeInTestingEnv(true) + defer func() { + buffer.SetBufferingModeInTestingEnv(false) + }() + + keyspace := "ks1" + shard := "-80" + tabletType := topodatapb.TabletType_PRIMARY + host := "1.1.1.1" + port := int32(1001) + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + } + + ts := &fakeTopoServer{} + // create a new fake health check. We want to check the buffering code which uses Subscribe, so we must also pass a channel + hc := discovery.NewFakeHealthCheck(make(chan *discovery.TabletHealth)) + // create a new tablet gateway + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) + + tg.retryCount = 0 + + // add a primary tabelt which is serving + sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) + + // add a result to the sandbox connection + sqlResult1 := &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "col1", + Type: sqltypes.VarChar, + Charset: uint32(collations.Default()), + }}, + RowsAffected: 1, + Rows: [][]sqltypes.Value{{ + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("bb")), + }}, + } + sbc.SetResults([]*sqltypes.Result{sqlResult1}) + + // get the primary and replica tablet from the fake health check + tablets := hc.GetAllTablets() + var primaryTablet *topodatapb.Tablet + + for _, tablet := range tablets { + if tablet.Type == topodatapb.TabletType_PRIMARY { + primaryTablet = tablet + } + } + require.NotNil(t, primaryTablet) + hc.SetServing(primaryTablet, true) + hc.Broadcast(primaryTablet) + hc.SetServing(primaryTablet, false) + + var res *sqltypes.Result + var err error + queryChan := make(chan struct{}) + go func() { + res, err = tg.Execute(ctx, target, "query", nil, 0, 0, nil) + queryChan <- struct{}{} + }() + + select { + case <-queryChan: + require.Nil(t, res) + require.Error(t, err) + // depending on whether the health check ticks before or after the buffering code, we might get different errors + if !(err.Error() == "target: ks1.-80.primary: inconsistent state detected, primary is serving but initially found no available tablet" || + err.Error() == "target: ks1.-80.primary: no healthy tablet available for 'keyspace:\"ks1\" shard:\"-80\" tablet_type:PRIMARY'") { + t.Fatalf("wrong error returned: %v", err) + } case <-time.After(15 * time.Second): t.Fatalf("timed out waiting for query to execute") } diff --git a/go/vt/vtgate/tabletgateway_test.go b/go/vt/vtgate/tabletgateway_test.go index 99388551ebf..32d18dcc9ab 100644 --- a/go/vt/vtgate/tabletgateway_test.go +++ b/go/vt/vtgate/tabletgateway_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" @@ -35,19 +37,21 @@ import ( ) func TestTabletGatewayExecute(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Execute(context.Background(), target, "query", nil, 0, 0, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Execute(ctx, target, "query", nil, 0, 0, nil) return err }) - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Execute(context.Background(), target, "query", nil, 1, 0, nil) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Execute(ctx, target, "query", nil, 1, 0, nil) return err }) } func TestTabletGatewayExecuteStream(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - err := tg.StreamExecute(context.Background(), target, "query", nil, 0, 0, nil, func(qr *sqltypes.Result) error { + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + err := tg.StreamExecute(ctx, target, "query", nil, 0, 0, nil, func(qr *sqltypes.Result) error { return nil }) return err @@ -55,36 +59,44 @@ func TestTabletGatewayExecuteStream(t *testing.T) { } func TestTabletGatewayBegin(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Begin(context.Background(), target, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Begin(ctx, target, nil) return err }) } func TestTabletGatewayCommit(t *testing.T) { - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Commit(context.Background(), target, 1) + ctx := utils.LeakCheckContext(t) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Commit(ctx, target, 1) return err }) } func TestTabletGatewayRollback(t *testing.T) { - testTabletGatewayTransact(t, func(tg *TabletGateway, target *querypb.Target) error { - _, err := tg.Rollback(context.Background(), target, 1) + ctx := utils.LeakCheckContext(t) + testTabletGatewayTransact(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, err := tg.Rollback(ctx, target, 1) return err }) } func TestTabletGatewayBeginExecute(t *testing.T) { - testTabletGatewayGeneric(t, func(tg *TabletGateway, target *querypb.Target) error { - _, _, err := tg.BeginExecute(context.Background(), target, nil, "query", nil, 0, nil) + ctx := utils.LeakCheckContext(t) + testTabletGatewayGeneric(t, ctx, func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error { + _, _, err := tg.BeginExecute(ctx, target, nil, "query", nil, 0, nil) return err }) } func TestTabletGatewayShuffleTablets(t *testing.T) { + ctx := utils.LeakCheckContext(t) + hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "local") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "local") + defer tg.Close(ctx) ts1 := &discovery.TabletHealth{ Tablet: topo.NewTablet(1, "cell1", "host1"), @@ -141,6 +153,8 @@ func TestTabletGatewayShuffleTablets(t *testing.T) { } func TestTabletGatewayReplicaTransactionError(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "ks" shard := "0" // transactions on REPLICA are not allowed from tabletgateway @@ -154,14 +168,16 @@ func TestTabletGatewayReplicaTransactionError(t *testing.T) { TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) _ = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) - _, err := tg.Execute(context.Background(), target, "query", nil, 1, 0, nil) + _, err := tg.Execute(ctx, target, "query", nil, 1, 0, nil) verifyContainsError(t, err, "query service can only be used for non-transactional queries on replicas", vtrpcpb.Code_INTERNAL) } -func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *querypb.Target) error) { +func testTabletGatewayGeneric(t *testing.T, ctx context.Context, f func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error) { t.Helper() keyspace := "ks" shard := "0" @@ -174,23 +190,25 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // no tablet want := []string{"target: ks.0.replica", `no healthy tablet available for 'keyspace:"ks" shard:"0" tablet_type:REPLICA`} - err := f(tg, target) + err := f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet with error hc.Reset() hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, false, 10, fmt.Errorf("no connection")) - err = f(tg, target) + err = f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // tablet without connection hc.Reset() _ = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, false, 10, nil).Tablet() - err = f(tg, target) + err = f(ctx, tg, target) verifyShardErrors(t, err, want, vtrpcpb.Code_UNAVAILABLE) // retry error @@ -200,7 +218,7 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) // fatal error @@ -209,25 +227,26 @@ func testTabletGatewayGeneric(t *testing.T, f func(tg *TabletGateway, target *qu sc2 = hc.AddTestTablet("cell", host, port+1, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.replica", vtrpcpb.Code_FAILED_PRECONDITION) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - err = f(tg, target) + err = f(ctx, tg, target) assert.Equal(t, vtrpcpb.Code_INVALID_ARGUMENT, vterrors.Code(err)) // no failure hc.Reset() hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) - err = f(tg, target) + err = f(ctx, tg, target) assert.NoError(t, err) } -func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *querypb.Target) error) { +func testTabletGatewayTransact(t *testing.T, ctx context.Context, f func(ctx context.Context, tg *TabletGateway, target *querypb.Target) error) { t.Helper() + keyspace := "ks" shard := "0" // test with PRIMARY because replica transactions don't use gateway's queryservice @@ -241,7 +260,9 @@ func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *q TabletType: tabletType, } hc := discovery.NewFakeHealthCheck(nil) - tg := NewTabletGateway(context.Background(), hc, nil, "cell") + ts := &fakeTopoServer{} + tg := NewTabletGateway(ctx, hc, ts, "cell") + defer tg.Close(ctx) // retry error - no retry sc1 := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) @@ -249,14 +270,14 @@ func testTabletGatewayTransact(t *testing.T, f func(tg *TabletGateway, target *q sc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sc2.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 - err := f(tg, target) + err := f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.primary", vtrpcpb.Code_FAILED_PRECONDITION) // server error - no retry hc.Reset() sc1 = hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) sc1.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 - err = f(tg, target) + err = f(ctx, tg, target) verifyContainsError(t, err, "target: ks.0.primary", vtrpcpb.Code_INVALID_ARGUMENT) } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index f7dff51accd..3fc141c64ac 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -43,7 +43,9 @@ var queries = []*querypb.BoundQuery{{Sql: "query1"}} var twoQueries = []*querypb.BoundQuery{{Sql: "query1"}, {Sql: "query1"}} func TestTxConnBegin(t *testing.T) { - sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") session := &vtgatepb.Session{} // begin @@ -63,7 +65,9 @@ func TestTxConnBegin(t *testing.T) { } func TestTxConnCommitFailure(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -120,7 +124,9 @@ func TestTxConnCommitFailure(t *testing.T) { } func TestTxConnCommitSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -171,7 +177,9 @@ func TestTxConnCommitSuccess(t *testing.T) { } func TestTxConnReservedCommitSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure commit order @@ -253,8 +261,10 @@ func TestTxConnReservedCommitSuccess(t *testing.T) { } func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "TestTxConn" - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, keyspace) + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure shard session order @@ -346,8 +356,10 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndCommit(t *testing.T) { } func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) { + ctx := utils.LeakCheckContext(t) + keyspace := "TestTxConn" - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, keyspace) + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, keyspace) sc.txConn.mode = vtgatepb.TransactionMode_MULTI // Sequence the executes to ensure shard session order @@ -439,7 +451,9 @@ func TestTxConnReservedOn2ShardTxOn1ShardAndRollback(t *testing.T) { } func TestTxConnCommitOrderFailure1(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{Sql: "query1"}} @@ -470,7 +484,9 @@ func TestTxConnCommitOrderFailure1(t *testing.T) { } func TestTxConnCommitOrderFailure2(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -502,7 +518,9 @@ func TestTxConnCommitOrderFailure2(t *testing.T) { } func TestTxConnCommitOrderFailure3(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -542,7 +560,9 @@ func TestTxConnCommitOrderFailure3(t *testing.T) { } func TestTxConnCommitOrderSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -638,7 +658,9 @@ func TestTxConnCommitOrderSuccess(t *testing.T) { } func TestTxConnReservedCommitOrderSuccess(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConn") sc.txConn.mode = vtgatepb.TransactionMode_MULTI queries := []*querypb.BoundQuery{{ @@ -779,7 +801,9 @@ func TestTxConnReservedCommitOrderSuccess(t *testing.T) { } func TestTxConnCommit2PC(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PC") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PC") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -795,7 +819,9 @@ func TestTxConnCommit2PC(t *testing.T) { } func TestTxConnCommit2PCOneParticipant(t *testing.T) { - sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, "TestTxConnCommit2PCOneParticipant") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, rss0, _, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCOneParticipant") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) session.TransactionMode = vtgatepb.TransactionMode_TWOPC @@ -805,7 +831,9 @@ func TestTxConnCommit2PCOneParticipant(t *testing.T) { } func TestTxConnCommit2PCCreateTransactionFail(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, "TestTxConnCommit2PCCreateTransactionFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, _ := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCreateTransactionFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -827,7 +855,9 @@ func TestTxConnCommit2PCCreateTransactionFail(t *testing.T) { } func TestTxConnCommit2PCPrepareFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCPrepareFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCPrepareFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -847,7 +877,9 @@ func TestTxConnCommit2PCPrepareFail(t *testing.T) { } func TestTxConnCommit2PCStartCommitFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCStartCommitFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCStartCommitFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -867,7 +899,9 @@ func TestTxConnCommit2PCStartCommitFail(t *testing.T) { } func TestTxConnCommit2PCCommitPreparedFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCCommitPreparedFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCCommitPreparedFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -887,7 +921,9 @@ func TestTxConnCommit2PCCommitPreparedFail(t *testing.T) { } func TestTxConnCommit2PCConcludeTransactionFail(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TestTxConnCommit2PCConcludeTransactionFail") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TestTxConnCommit2PCConcludeTransactionFail") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -907,7 +943,9 @@ func TestTxConnCommit2PCConcludeTransactionFail(t *testing.T) { } func TestTxConnRollback(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TxConnRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -921,7 +959,9 @@ func TestTxConnRollback(t *testing.T) { } func TestTxConnReservedRollback(t *testing.T) { - sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, "TxConnReservedRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, _, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -956,7 +996,9 @@ func TestTxConnReservedRollback(t *testing.T) { } func TestTxConnReservedRollbackFailure(t *testing.T) { - sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, "TxConnReservedRollback") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TxConnReservedRollback") session := NewSafeSession(&vtgatepb.Session{InTransaction: true, InReservedConn: true}) sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) @@ -985,7 +1027,9 @@ func TestTxConnReservedRollbackFailure(t *testing.T) { } func TestTxConnResolveOnPrepare(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1006,7 +1050,9 @@ func TestTxConnResolveOnPrepare(t *testing.T) { } func TestTxConnResolveOnRollback(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1027,7 +1073,9 @@ func TestTxConnResolveOnRollback(t *testing.T) { } func TestTxConnResolveOnCommit(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1048,7 +1096,9 @@ func TestTxConnResolveOnCommit(t *testing.T) { } func TestTxConnResolveInvalidDTID(t *testing.T) { - sc, _, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, _, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") err := sc.txConn.Resolve(ctx, "abcd") want := "invalid parts in dtid: abcd" @@ -1056,7 +1106,9 @@ func TestTxConnResolveInvalidDTID(t *testing.T) { } func TestTxConnResolveReadTransactionFail(t *testing.T) { - sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 @@ -1067,7 +1119,9 @@ func TestTxConnResolveReadTransactionFail(t *testing.T) { } func TestTxConnResolveInternalError(t *testing.T) { - sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1086,7 +1140,9 @@ func TestTxConnResolveInternalError(t *testing.T) { } func TestTxConnResolveSetRollbackFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1110,7 +1166,9 @@ func TestTxConnResolveSetRollbackFail(t *testing.T) { } func TestTxConnResolveRollbackPreparedFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1134,7 +1192,9 @@ func TestTxConnResolveRollbackPreparedFail(t *testing.T) { } func TestTxConnResolveCommitPreparedFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1158,7 +1218,9 @@ func TestTxConnResolveCommitPreparedFail(t *testing.T) { } func TestTxConnResolveConcludeTransactionFail(t *testing.T) { - sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, sbc0, sbc1, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") dtid := "TestTxConn:0:1234" sbc0.ReadTransactionResults = []*querypb.TransactionMetadata{{ @@ -1182,6 +1244,8 @@ func TestTxConnResolveConcludeTransactionFail(t *testing.T) { } func TestTxConnMultiGoSessions(t *testing.T) { + ctx := utils.LeakCheckContext(t) + txc := &TxConn{} input := []*vtgatepb.Session_ShardSession{{ @@ -1249,7 +1313,9 @@ func TestTxConnMultiGoTargets(t *testing.T) { } func TestTxConnAccessModeReset(t *testing.T) { - sc, _, _, _, _, _ := newTestTxConnEnv(t, "TestTxConn") + ctx := utils.LeakCheckContext(t) + + sc, _, _, _, _, _ := newTestTxConnEnv(t, ctx, "TestTxConn") tcases := []struct { name string @@ -1290,14 +1356,14 @@ func TestTxConnAccessModeReset(t *testing.T) { } } -func newTestTxConnEnv(t *testing.T, name string) (sc *ScatterConn, sbc0, sbc1 *sandboxconn.SandboxConn, rss0, rss1, rss01 []*srvtopo.ResolvedShard) { +func newTestTxConnEnv(t *testing.T, ctx context.Context, name string) (sc *ScatterConn, sbc0, sbc1 *sandboxconn.SandboxConn, rss0, rss1, rss01 []*srvtopo.ResolvedShard) { t.Helper() createSandbox(name) hc := discovery.NewFakeHealthCheck(nil) - sc = newTestScatterConn(hc, newSandboxForCells([]string{"aa"}), "aa") + sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_PRIMARY, true, 1, nil) - res := srvtopo.NewResolver(newSandboxForCells([]string{"aa"}), sc.gateway, "aa") + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") var err error rss0, err = res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShard("0")) require.NoError(t, err) diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 179da80b279..8f65884dba3 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -19,17 +19,16 @@ package vtgate import ( "context" "fmt" + "io" "sort" "strings" "sync/atomic" - - "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "time" "github.com/google/uuid" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" @@ -39,6 +38,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" @@ -49,9 +49,12 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/buffer" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var _ engine.VCursor = (*vcursorImpl)(nil) @@ -61,7 +64,7 @@ var _ vindexes.VCursor = (*vcursorImpl)(nil) // vcursor_impl needs these facilities to be able to be able to execute queries for vindexes type iExecute interface { - Execute(ctx context.Context, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error) + Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, method string, session *SafeSession, s string, vars map[string]*querypb.BindVariable) (*sqltypes.Result, error) ExecuteMultiShard(ctx context.Context, primitive engine.Primitive, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, session *SafeSession, autocommit bool, ignoreMaxMemoryRows bool) (qr *sqltypes.Result, errs []error) StreamExecuteMulti(ctx context.Context, primitive engine.Primitive, query string, rss []*srvtopo.ResolvedShard, vars []map[string]*querypb.BindVariable, session *SafeSession, autocommit bool, callback func(reply *sqltypes.Result) error) []error ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, session *SafeSession, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) @@ -79,6 +82,7 @@ type iExecute interface { // TODO: remove when resolver is gone ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) VSchema() *vindexes.VSchema + planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) } // VSchemaOperator is an interface to Vschema Operations @@ -186,6 +190,10 @@ func (vc *vcursorImpl) ConnCollation() collations.ID { return vc.collation } +func (vc *vcursorImpl) TimeZone() *time.Location { + return vc.safeSession.TimeZone() +} + // MaxMemoryRows returns the maxMemoryRows flag value. func (vc *vcursorImpl) MaxMemoryRows() int { return maxMemoryRows @@ -336,13 +344,7 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { return nil, errNoDbAvailable } - var keyspaces = make([]*vindexes.Keyspace, 0, len(vc.vschema.Keyspaces)) - for _, ks := range vc.vschema.Keyspaces { - keyspaces = append(keyspaces, ks.Keyspace) - } - sort.Slice(keyspaces, func(i, j int) bool { - return keyspaces[i].Name < keyspaces[j].Name - }) + keyspaces := vc.getSortedServingKeyspaces() // Look for any sharded keyspace if present, otherwise take the first keyspace, // sorted alphabetically @@ -354,18 +356,38 @@ func (vc *vcursorImpl) AnyKeyspace() (*vindexes.Keyspace, error) { return keyspaces[0], nil } +// getSortedServingKeyspaces gets the sorted serving keyspaces +func (vc *vcursorImpl) getSortedServingKeyspaces() []*vindexes.Keyspace { + var keyspaces []*vindexes.Keyspace + + if vc.resolver != nil && vc.resolver.GetGateway() != nil { + keyspaceNames := vc.resolver.GetGateway().GetServingKeyspaces() + for _, ksName := range keyspaceNames { + ks, exists := vc.vschema.Keyspaces[ksName] + if exists { + keyspaces = append(keyspaces, ks.Keyspace) + } + } + } + + if len(keyspaces) == 0 { + for _, ks := range vc.vschema.Keyspaces { + keyspaces = append(keyspaces, ks.Keyspace) + } + } + sort.Slice(keyspaces, func(i, j int) bool { + return keyspaces[i].Name < keyspaces[j].Name + }) + return keyspaces +} + func (vc *vcursorImpl) FirstSortedKeyspace() (*vindexes.Keyspace, error) { if len(vc.vschema.Keyspaces) == 0 { return nil, errNoDbAvailable } - kss := vc.vschema.Keyspaces - keys := make([]string, 0, len(kss)) - for ks := range kss { - keys = append(keys, ks) - } - sort.Strings(keys) + keyspaces := vc.getSortedServingKeyspaces() - return kss[keys[0]].Keyspace, nil + return keyspaces[0], nil } // SysVarSetEnabled implements the ContextVSchema interface @@ -491,7 +513,7 @@ func (vc *vcursorImpl) Execute(ctx context.Context, method string, query string, return nil, err } - qr, err := vc.executor.Execute(ctx, method, session, vc.marginComments.Leading+query+vc.marginComments.Trailing, bindVars) + qr, err := vc.executor.Execute(ctx, nil, method, session, vc.marginComments.Leading+query+vc.marginComments.Trailing, bindVars) vc.setRollbackOnPartialExecIfRequired(err != nil, rollbackOnError) return qr, err @@ -506,7 +528,7 @@ func (vc *vcursorImpl) markSavepoint(ctx context.Context, needsRollbackOnParialE } uID := fmt.Sprintf("_vt%s", strings.ReplaceAll(uuid.NewString(), "-", "_")) spQuery := fmt.Sprintf("%ssavepoint %s%s", vc.marginComments.Leading, uID, vc.marginComments.Trailing) - _, err := vc.executor.Execute(ctx, "MarkSavepoint", vc.safeSession, spQuery, bindVars) + _, err := vc.executor.Execute(ctx, nil, "MarkSavepoint", vc.safeSession, spQuery, bindVars) if err != nil { return err } @@ -841,6 +863,15 @@ func (vc *vcursorImpl) SetPlannerVersion(v plancontext.PlannerVersion) { vc.safeSession.GetOrCreateOptions().PlannerVersion = v } +func (vc *vcursorImpl) SetPriority(priority string) { + if priority != "" { + vc.safeSession.GetOrCreateOptions().Priority = priority + } else if vc.safeSession.Options != nil && vc.safeSession.Options.Priority != "" { + vc.safeSession.Options.Priority = "" + } + +} + // SetConsolidator implements the SessionActions interface func (vc *vcursorImpl) SetConsolidator(consolidator querypb.ExecuteOptions_Consolidator) { // Avoid creating session Options when they do not yet exist and the @@ -851,6 +882,12 @@ func (vc *vcursorImpl) SetConsolidator(consolidator querypb.ExecuteOptions_Conso vc.safeSession.GetOrCreateOptions().Consolidator = consolidator } +func (vc *vcursorImpl) SetWorkloadName(workloadName string) { + if workloadName != "" { + vc.safeSession.GetOrCreateOptions().WorkloadName = workloadName + } +} + // SetFoundRows implements the SessionActions interface func (vc *vcursorImpl) SetFoundRows(foundRows uint64) { vc.safeSession.FoundRows = foundRows @@ -867,6 +904,16 @@ func (vc *vcursorImpl) GetDDLStrategy() string { return vc.safeSession.GetDDLStrategy() } +// SetMigrationContext implements the SessionActions interface +func (vc *vcursorImpl) SetMigrationContext(migrationContext string) { + vc.safeSession.SetMigrationContext(migrationContext) +} + +// GetMigrationContext implements the SessionActions interface +func (vc *vcursorImpl) GetMigrationContext() string { + return vc.safeSession.GetMigrationContext() +} + // GetSessionUUID implements the SessionActions interface func (vc *vcursorImpl) GetSessionUUID() string { return vc.safeSession.GetSessionUUID() @@ -956,7 +1003,7 @@ func (vc *vcursorImpl) ErrorIfShardedF(ks *vindexes.Keyspace, warn, errFormat st func (vc *vcursorImpl) WarnUnshardedOnly(format string, params ...any) { if vc.warnShardedOnly { vc.warnings = append(vc.warnings, &querypb.QueryWarning{ - Code: uint32(mysql.ERNotSupportedYet), + Code: uint32(sqlerror.ERNotSupportedYet), Message: fmt.Sprintf(format, params...), }) } @@ -968,14 +1015,21 @@ func (vc *vcursorImpl) PlannerWarning(message string) { return } vc.warnings = append(vc.warnings, &querypb.QueryWarning{ - Code: uint32(mysql.ERNotSupportedYet), + Code: uint32(sqlerror.ERNotSupportedYet), Message: message, }) } // ForeignKeyMode implements the VCursor interface -func (vc *vcursorImpl) ForeignKeyMode() string { - return strings.ToLower(foreignKeyMode) +func (vc *vcursorImpl) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { + if strings.ToLower(foreignKeyMode) == "disallow" { + return vschemapb.Keyspace_FK_DISALLOW, nil + } + ks := vc.vschema.Keyspaces[keyspace] + if ks == nil { + return 0, vterrors.VT14004(keyspace) + } + return ks.ForeignKeyMode, nil } // ParseDestinationTarget parses destination target string and sets default keyspace if possible. @@ -990,7 +1044,12 @@ func parseDestinationTarget(targetString string, vschema *vindexes.VSchema) (str return destKeyspace, destTabletType, dest, err } -func (vc *vcursorImpl) planPrefixKey(ctx context.Context) string { +func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.StringWriter) { + _, _ = buf.WriteString(vc.keyspace) + _, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType]) + _, _ = buf.WriteString("+Collate:") + _, _ = buf.WriteString(collations.Local().LookupName(vc.collation)) + if vc.destination != nil { switch vc.destination.(type) { case key.DestinationKeyspaceID, key.DestinationKeyspaceIDs: @@ -1001,14 +1060,22 @@ func (vc *vcursorImpl) planPrefixKey(ctx context.Context) string { shards[i] = resolved[i].Target.GetShard() } sort.Strings(shards) - return fmt.Sprintf("%s%sKsIDsResolved(%s)", vc.keyspace, vindexes.TabletTypeSuffix[vc.tabletType], strings.Join(shards, ",")) + + _, _ = buf.WriteString("+KsIDsResolved:") + for i, s := range shards { + if i > 0 { + _, _ = buf.WriteString(",") + } + _, _ = buf.WriteString(s) + } } default: - // use destination string (out of the switch) + _, _ = buf.WriteString("+") + _, _ = buf.WriteString(vc.destination.String()) } - return fmt.Sprintf("%s%s%s", vc.keyspace, vindexes.TabletTypeSuffix[vc.tabletType], vc.destination.String()) } - return fmt.Sprintf("%s%s", vc.keyspace, vindexes.TabletTypeSuffix[vc.tabletType]) + _, _ = buf.WriteString("+Query:") + _, _ = buf.WriteString(query) } func (vc *vcursorImpl) GetKeyspace() string { @@ -1089,6 +1156,55 @@ func (vc *vcursorImpl) SetExec(ctx context.Context, name string, value string) e return vc.executor.setVitessMetadata(ctx, name, value) } +func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topodatapb.ThrottledAppRule) (err error) { + if throttledAppRule == nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ThrottleApp: nil rule") + } + if throttledAppRule.Name == "" { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ThrottleApp: app name is empty") + } + // We don't strictly have to construct a UpdateThrottlerConfigRequest here, because we only populate it + // with a couple variables; we could do without it. However, constructing the request makes the remaining code + // consistent with vtctldclient/command/throttler.go and we prefer this consistency + req := &vtctldatapb.UpdateThrottlerConfigRequest{ + Keyspace: vc.keyspace, + ThrottledApp: throttledAppRule, + } + + update := func(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { + if throttlerConfig == nil { + throttlerConfig = &topodatapb.ThrottlerConfig{} + } + if throttlerConfig.ThrottledApps == nil { + throttlerConfig.ThrottledApps = make(map[string]*topodatapb.ThrottledAppRule) + } + throttlerConfig.ThrottledApps[req.ThrottledApp.Name] = req.ThrottledApp + return throttlerConfig + } + + ctx, unlock, lockErr := vc.topoServer.LockKeyspace(ctx, req.Keyspace, "UpdateThrottlerConfig") + if lockErr != nil { + return lockErr + } + defer unlock(&err) + + ki, err := vc.topoServer.GetKeyspace(ctx, req.Keyspace) + if err != nil { + return err + } + + ki.ThrottlerConfig = update(ki.ThrottlerConfig) + + err = vc.topoServer.UpdateKeyspace(ctx, ki) + if err != nil { + return err + } + + _, err = vc.topoServer.UpdateSrvKeyspaceThrottlerConfig(ctx, req.Keyspace, []string{}, update) + + return err +} + func (vc *vcursorImpl) CanUseSetVar() bool { return sqlparser.IsMySQL80AndAbove() && setVarEnabled } @@ -1132,3 +1248,23 @@ func (vc *vcursorImpl) FindRoutedShard(keyspace, shard string) (keyspaceName str func (vc *vcursorImpl) IsViewsEnabled() bool { return enableViews } + +func (vc *vcursorImpl) GetUDV(name string) *querypb.BindVariable { + return vc.safeSession.GetUDV(name) +} + +func (vc *vcursorImpl) PlanPrepareStatement(ctx context.Context, query string) (*engine.Plan, sqlparser.Statement, error) { + return vc.executor.planPrepareStmt(ctx, vc, query) +} + +func (vc *vcursorImpl) ClearPrepareData(name string) { + delete(vc.safeSession.PrepareStatement, name) +} + +func (vc *vcursorImpl) StorePrepareData(stmtName string, prepareData *vtgatepb.PrepareData) { + vc.safeSession.StorePrepareData(stmtName, prepareData) +} + +func (vc *vcursorImpl) GetPrepareData(stmtName string) *vtgatepb.PrepareData { + return vc.safeSession.GetPrepareData(stmtName) +} diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index 5a22de0faef..3160b8a9b1a 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -5,23 +5,21 @@ import ( "encoding/hex" "fmt" "strconv" + "strings" "testing" - querypb "vitess.io/vitess/go/vt/proto/query" + "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vschema" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" - - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/vtgate/vindexes" - "github.com/stretchr/testify/require" - + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/sqlparser" ) var _ VSchemaOperator = (*fakeVSchemaOperator)(nil) @@ -30,11 +28,11 @@ type fakeVSchemaOperator struct { vschema *vindexes.VSchema } -func (f fakeVSchemaOperator) GetCurrentSrvVschema() *vschema.SrvVSchema { +func (f fakeVSchemaOperator) GetCurrentSrvVschema() *vschemapb.SrvVSchema { panic("implement me") } -func (f fakeVSchemaOperator) UpdateVSchema(ctx context.Context, ksName string, vschema *vschema.SrvVSchema) error { +func (f fakeVSchemaOperator) UpdateVSchema(ctx context.Context, ksName string, vschema *vschemapb.SrvVSchema) error { panic("implement me") } @@ -259,7 +257,7 @@ func TestSetTarget(t *testing.T) { } } -func TestPlanPrefixKey(t *testing.T) { +func TestKeyForPlan(t *testing.T) { type testCase struct { vschema *vindexes.VSchema targetString string @@ -269,19 +267,19 @@ func TestPlanPrefixKey(t *testing.T) { tests := []testCase{{ vschema: vschemaWith1KS, targetString: "", - expectedPlanPrefixKey: "ks1@primary", + expectedPlanPrefixKey: "ks1@primary+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", }, { vschema: vschemaWith1KS, targetString: "ks1@replica", - expectedPlanPrefixKey: "ks1@replica", + expectedPlanPrefixKey: "ks1@replica+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", }, { vschema: vschemaWith1KS, targetString: "ks1:-80", - expectedPlanPrefixKey: "ks1@primaryDestinationShard(-80)", + expectedPlanPrefixKey: "ks1@primary+Collate:utf8mb4_0900_ai_ci+DestinationShard(-80)+Query:SELECT 1", }, { vschema: vschemaWith1KS, targetString: "ks1[deadbeef]", - expectedPlanPrefixKey: "ks1@primaryKsIDsResolved(80-)", + expectedPlanPrefixKey: "ks1@primary+Collate:utf8mb4_0900_ai_ci+KsIDsResolved:80-+Query:SELECT 1", }} for i, tc := range tests { @@ -291,7 +289,10 @@ func TestPlanPrefixKey(t *testing.T) { vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) vc.vschema = tc.vschema - require.Equal(t, tc.expectedPlanPrefixKey, vc.planPrefixKey(context.Background())) + + var buf strings.Builder + vc.keyForPlan(context.Background(), "SELECT 1", &buf) + require.Equal(t, tc.expectedPlanPrefixKey, buf.String()) }) } } diff --git a/go/vt/vtgate/vindexes/binary.go b/go/vt/vtgate/vindexes/binary.go index d4487ee84ab..b78451ca1fb 100644 --- a/go/vt/vtgate/vindexes/binary.go +++ b/go/vt/vtgate/vindexes/binary.go @@ -26,19 +26,24 @@ import ( ) var ( - _ SingleColumn = (*Binary)(nil) - _ Reversible = (*Binary)(nil) - _ Hashing = (*Binary)(nil) + _ SingleColumn = (*Binary)(nil) + _ Reversible = (*Binary)(nil) + _ Hashing = (*Binary)(nil) + _ ParamValidating = (*Binary)(nil) ) // Binary is a vindex that converts binary bits to a keyspace id. type Binary struct { - name string + name string + unknownParams []string } -// NewBinary creates a new Binary. -func NewBinary(name string, _ map[string]string) (Vindex, error) { - return &Binary{name: name}, nil +// newBinary creates a new Binary. +func newBinary(name string, params map[string]string) (Vindex, error) { + return &Binary{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -103,6 +108,11 @@ func (*Binary) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value, error) { return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Binary) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("binary", NewBinary) + Register("binary", newBinary) } diff --git a/go/vt/vtgate/vindexes/binary_test.go b/go/vt/vtgate/vindexes/binary_test.go index a1675b4b44d..27ae6ceca11 100644 --- a/go/vt/vtgate/vindexes/binary_test.go +++ b/go/vt/vtgate/vindexes/binary_test.go @@ -24,7 +24,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -34,15 +33,58 @@ import ( var binOnlyVindex SingleColumn func init() { - vindex, _ := CreateVindex("binary", "binary_varchar", nil) + vindex, err := CreateVindex("binary", "binary_varchar", nil) + if err != nil { + panic(err) + } binOnlyVindex = vindex.(SingleColumn) } -func TestBinaryInfo(t *testing.T) { - assert.Equal(t, 0, binOnlyVindex.Cost()) - assert.Equal(t, "binary_varchar", binOnlyVindex.String()) - assert.True(t, binOnlyVindex.IsUnique()) - assert.False(t, binOnlyVindex.NeedsVCursor()) +func binaryCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "binary", + vindexName: "binary", + vindexParams: vindexParams, + + expectCost: 0, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "binary", + expectUnknownParams: expectUnknownParams, + } +} + +func TestBinaryCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + binaryCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + binaryCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + binaryCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestBinaryMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/binarymd5.go b/go/vt/vtgate/vindexes/binarymd5.go index 49d823a7ed7..d3495e28deb 100644 --- a/go/vt/vtgate/vindexes/binarymd5.go +++ b/go/vt/vtgate/vindexes/binarymd5.go @@ -26,18 +26,23 @@ import ( ) var ( - _ SingleColumn = (*BinaryMD5)(nil) - _ Hashing = (*BinaryMD5)(nil) + _ SingleColumn = (*BinaryMD5)(nil) + _ Hashing = (*BinaryMD5)(nil) + _ ParamValidating = (*BinaryMD5)(nil) ) // BinaryMD5 is a vindex that hashes binary bits to a keyspace id. type BinaryMD5 struct { - name string + name string + unknownParams []string } -// NewBinaryMD5 creates a new BinaryMD5. -func NewBinaryMD5(name string, _ map[string]string) (Vindex, error) { - return &BinaryMD5{name: name}, nil +// newBinaryMD5 creates a new BinaryMD5. +func newBinaryMD5(name string, params map[string]string) (Vindex, error) { + return &BinaryMD5{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -94,11 +99,16 @@ func (vind *BinaryMD5) Hash(id sqltypes.Value) ([]byte, error) { return vMD5Hash(idBytes), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *BinaryMD5) UnknownParams() []string { + return vind.unknownParams +} + func vMD5Hash(source []byte) []byte { sum := md5.Sum(source) return sum[:] } func init() { - Register("binary_md5", NewBinaryMD5) + Register("binary_md5", newBinaryMD5) } diff --git a/go/vt/vtgate/vindexes/binarymd5_test.go b/go/vt/vtgate/vindexes/binarymd5_test.go index c3c5dccb0aa..2c25bc794d4 100644 --- a/go/vt/vtgate/vindexes/binarymd5_test.go +++ b/go/vt/vtgate/vindexes/binarymd5_test.go @@ -23,7 +23,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -33,15 +32,58 @@ import ( var binVindex SingleColumn func init() { - vindex, _ := CreateVindex("binary_md5", "binary_md5_varchar", nil) + vindex, err := CreateVindex("binary_md5", "binary_md5_varchar", nil) + if err != nil { + panic(err) + } binVindex = vindex.(SingleColumn) } -func TestBinaryMD5Info(t *testing.T) { - assert.Equal(t, 1, binVindex.Cost()) - assert.Equal(t, "binary_md5_varchar", binVindex.String()) - assert.True(t, binVindex.IsUnique()) - assert.False(t, binVindex.NeedsVCursor()) +func binaryMD5CreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "binary_md5", + vindexName: "binary_md5", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "binary_md5", + expectUnknownParams: expectUnknownParams, + } +} + +func TestBinaryMD5CreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + binaryMD5CreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + binaryMD5CreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + binaryMD5CreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestBinaryMD5Map(t *testing.T) { @@ -134,3 +176,17 @@ func benchmarkMD5HashBytes(b *testing.B, input []byte) { sinkMD5 = vMD5Hash(input) } } + +func TestCreateVindexBinaryMD5Params(t *testing.T) { + vindex, err := CreateVindex("binary_md5", "binary_md5", nil) + require.NotNil(t, vindex) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + + vindex, err = CreateVindex("binary_md5", "binary_md5", map[string]string{"hello": "world"}) + require.NotNil(t, vindex) + unknownParams = vindex.(ParamValidating).UnknownParams() + require.Len(t, unknownParams, 1) + require.NoError(t, err) +} diff --git a/go/vt/vtgate/vindexes/cached_size.go b/go/vt/vtgate/vindexes/cached_size.go index 76fe7f4abf5..a97411a6ac8 100644 --- a/go/vt/vtgate/vindexes/cached_size.go +++ b/go/vt/vtgate/vindexes/cached_size.go @@ -29,30 +29,23 @@ type cachedObject interface { CachedSize(alloc bool) int64 } -func (cached *AutoIncrement) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(48) - } - // field Column vitess.io/vitess/go/vt/sqlparser.IdentifierCI - size += cached.Column.CachedSize(false) - // field Sequence *vitess.io/vitess/go/vt/vtgate/vindexes.Table - size += cached.Sequence.CachedSize(true) - return size -} func (cached *Binary) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *BinaryMD5) CachedSize(alloc bool) int64 { @@ -61,10 +54,17 @@ func (cached *BinaryMD5) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *CFC) CachedSize(alloc bool) int64 { @@ -81,20 +81,6 @@ func (cached *CFC) CachedSize(alloc bool) int64 { size += cached.prefixCFC.CachedSize(true) return size } -func (cached *Column) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(64) - } - // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI - size += cached.Name.CachedSize(false) - // field CollationName string - size += hack.RuntimeAllocSize(int64(len(cached.CollationName))) - return size -} func (cached *ColumnVindex) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -126,10 +112,17 @@ func (cached *ConsistentLookup) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(8) + size += int64(32) } // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon size += cached.clCommon.CachedSize(true) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *ConsistentLookupUnique) CachedSize(alloc bool) int64 { @@ -138,10 +131,17 @@ func (cached *ConsistentLookupUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(8) + size += int64(32) } // field clCommon *vitess.io/vitess/go/vt/vtgate/vindexes.clCommon size += cached.clCommon.CachedSize(true) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Hash) CachedSize(alloc bool) int64 { @@ -150,10 +150,17 @@ func (cached *Hash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Keyspace) CachedSize(alloc bool) int64 { @@ -174,12 +181,19 @@ func (cached *LookupHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupHashUnique) CachedSize(alloc bool) int64 { @@ -188,12 +202,19 @@ func (cached *LookupHashUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupNonUnique) CachedSize(alloc bool) int64 { @@ -202,12 +223,19 @@ func (cached *LookupNonUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnicodeLooseMD5Hash) CachedSize(alloc bool) int64 { @@ -216,12 +244,19 @@ func (cached *LookupUnicodeLooseMD5Hash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnicodeLooseMD5HashUnique) CachedSize(alloc bool) int64 { @@ -230,12 +265,19 @@ func (cached *LookupUnicodeLooseMD5HashUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *LookupUnique) CachedSize(alloc bool) int64 { @@ -244,12 +286,19 @@ func (cached *LookupUnique) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(176) + size += int64(192) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) // field lkp vitess.io/vitess/go/vt/vtgate/vindexes.lookupInternal size += cached.lkp.CachedSize(false) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -299,10 +348,17 @@ func (cached *Null) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Numeric) CachedSize(alloc bool) int64 { @@ -311,10 +367,17 @@ func (cached *Numeric) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -325,10 +388,14 @@ func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(64) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field hashVdx vitess.io/vitess/go/vt/vtgate/vindexes.Hashing + if cc, ok := cached.hashVdx.(cachedObject); ok { + size += cc.CachedSize(true) + } // field lookup vitess.io/vitess/go/vt/vtgate/vindexes.NumericLookupTable if cached.lookup != nil { size += int64(48) @@ -340,6 +407,13 @@ func (cached *NumericStaticMap) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(numBuckets * 144)) } } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *RegionExperimental) CachedSize(alloc bool) int64 { @@ -348,10 +422,17 @@ func (cached *RegionExperimental) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } @@ -362,7 +443,7 @@ func (cached *RegionJSON) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(64) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) @@ -380,6 +461,13 @@ func (cached *RegionJSON) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(k))) } } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *ReverseBits) CachedSize(alloc bool) int64 { @@ -388,91 +476,17 @@ func (cached *ReverseBits) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) - return size -} -func (cached *Source) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(32) - } - // field TableName vitess.io/vitess/go/vt/sqlparser.TableName - size += cached.TableName.CachedSize(false) - return size -} - -//go:nocheckptr -func (cached *Table) CachedSize(alloc bool) int64 { - if cached == nil { - return int64(0) - } - size := int64(0) - if alloc { - size += int64(192) - } - // field Type string - size += hack.RuntimeAllocSize(int64(len(cached.Type))) - // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCS - size += cached.Name.CachedSize(false) - // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace - size += cached.Keyspace.CachedSize(true) - // field ColumnVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.ColumnVindexes)) * int64(8)) - for _, elem := range cached.ColumnVindexes { - size += elem.CachedSize(true) - } - } - // field Ordered []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.Ordered)) * int64(8)) - for _, elem := range cached.Ordered { - size += elem.CachedSize(true) - } - } - // field Owned []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.Owned)) * int64(8)) - for _, elem := range cached.Owned { - size += elem.CachedSize(true) - } - } - // field AutoIncrement *vitess.io/vitess/go/vt/vtgate/vindexes.AutoIncrement - size += cached.AutoIncrement.CachedSize(true) - // field Columns []vitess.io/vitess/go/vt/vtgate/vindexes.Column + // field unknownParams []string { - size += hack.RuntimeAllocSize(int64(cap(cached.Columns)) * int64(56)) - for _, elem := range cached.Columns { - size += elem.CachedSize(false) - } - } - // field Pinned []byte - { - size += hack.RuntimeAllocSize(int64(cap(cached.Pinned))) - } - // field ReferencedBy map[string]*vitess.io/vitess/go/vt/vtgate/vindexes.Table - if cached.ReferencedBy != nil { - size += int64(48) - hmap := reflect.ValueOf(cached.ReferencedBy) - numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) - numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) - size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) - if len(cached.ReferencedBy) > 0 || numBuckets > 1 { - size += hack.RuntimeAllocSize(int64(numBuckets * 208)) - } - for k, v := range cached.ReferencedBy { - size += hack.RuntimeAllocSize(int64(len(k))) - size += v.CachedSize(true) + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) } } - // field Source *vitess.io/vitess/go/vt/vtgate/vindexes.Source - size += cached.Source.CachedSize(true) return size } func (cached *UnicodeLooseMD5) CachedSize(alloc bool) int64 { @@ -481,10 +495,17 @@ func (cached *UnicodeLooseMD5) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *UnicodeLooseXXHash) CachedSize(alloc bool) int64 { @@ -493,10 +514,17 @@ func (cached *UnicodeLooseXXHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *XXHash) CachedSize(alloc bool) int64 { @@ -505,10 +533,17 @@ func (cached *XXHash) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(48) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *cfcCommon) CachedSize(alloc bool) int64 { @@ -517,7 +552,7 @@ func (cached *cfcCommon) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(80) } // field name string size += hack.RuntimeAllocSize(int64(len(cached.name))) @@ -525,6 +560,13 @@ func (cached *cfcCommon) CachedSize(alloc bool) int64 { { size += hack.RuntimeAllocSize(int64(cap(cached.offsets)) * int64(8)) } + // field unknownParams []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.unknownParams)) * int64(16)) + for _, elem := range cached.unknownParams { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *clCommon) CachedSize(alloc bool) int64 { diff --git a/go/vt/vtgate/vindexes/cfc.go b/go/vt/vtgate/vindexes/cfc.go index 0be28f96bc9..af269b1a0d9 100644 --- a/go/vt/vtgate/vindexes/cfc.go +++ b/go/vt/vtgate/vindexes/cfc.go @@ -28,6 +28,20 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +const ( + cfcParamHash = "hash" + cfcParamOffsets = "offsets" +) + +var ( + _ ParamValidating = (*CFC)(nil) + + cfcParams = []string{ + cfcParamHash, + cfcParamOffsets, + } +) + // CFC is Concatenated Fixed-width Composite Vindex. // // The purpose of this vindex is to shard the rows based on the prefix of @@ -94,15 +108,17 @@ type CFC struct { } type cfcCommon struct { - name string - hash func([]byte) []byte - offsets []int + name string + hash func([]byte) []byte + offsets []int + unknownParams []string } -// NewCFC creates a new CFC vindex -func NewCFC(name string, params map[string]string) (Vindex, error) { +// newCFC creates a new CFC vindex +func newCFC(name string, params map[string]string) (Vindex, error) { ss := &cfcCommon{ - name: name, + name: name, + unknownParams: FindUnknownParams(params, cfcParams), } cfc := &CFC{ cfcCommon: ss, @@ -113,7 +129,7 @@ func NewCFC(name string, params map[string]string) (Vindex, error) { return cfc, nil } - switch h := params["hash"]; h { + switch h := params[cfcParamHash]; h { case "": return cfc, nil case "md5": @@ -125,7 +141,7 @@ func NewCFC(name string, params map[string]string) (Vindex, error) { } var offsets []int - if p := params["offsets"]; p == "" { + if p := params[cfcParamOffsets]; p == "" { return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined") } else if err := json.Unmarshal([]byte(p), &offsets); err != nil || !validOffsets(offsets) { return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets %s to CFC vindex %s. expected sorted positive ints in brackets", p, name) @@ -231,6 +247,11 @@ func (vind *cfcCommon) verify(ids []sqltypes.Value, ksids [][]byte) ([]bool, err return out, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *cfcCommon) UnknownParams() []string { + return vind.unknownParams +} + // Verify returns true if ids maps to ksids. func (vind *CFC) Verify(_ context.Context, _ VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { return vind.verify(ids, ksids) @@ -406,5 +427,5 @@ func xxhash64(in []byte) []byte { } func init() { - Register("cfc", NewCFC) + Register("cfc", newCFC) } diff --git a/go/vt/vtgate/vindexes/cfc_test.go b/go/vt/vtgate/vindexes/cfc_test.go index 2e4ff7e6d00..553d36de6c6 100644 --- a/go/vt/vtgate/vindexes/cfc_test.go +++ b/go/vt/vtgate/vindexes/cfc_test.go @@ -30,94 +30,119 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -func assertEqualVtError(t *testing.T, expected, actual error) { - // vterrors.Errorf returns a struct containing a stacktrace, which fails - // assert.EqualError since the stacktrace would be guaranteed to be different. - // so just check the error message - if expected == nil { - assert.NoError(t, actual) - } else { - assert.EqualError(t, actual, expected.Error()) +func cfcCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "cfc", + vindexName: "cfc", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "cfc", + expectUnknownParams: expectUnknownParams, } } -func TestCFCBuildCFC(t *testing.T) { - cases := []struct { - testName string - params map[string]string - err error - offsets []int - }{ - { - testName: "no params", - }, - { - testName: "no hash", - params: map[string]string{}, - }, - { - testName: "no hash", - params: map[string]string{"offsets": "[1,2]"}, - }, - { - testName: "no offsets", - params: map[string]string{"hash": "md5"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined"), - }, - { - testName: "invalid offset", - params: map[string]string{"hash": "md5", "offsets": "10,12"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets 10,12 to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "invalid offset", - params: map[string]string{"hash": "md5", "offsets": "xxx"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets xxx to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "empty offsets", - params: map[string]string{"hash": "md5", "offsets": "[]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "unsorted offsets", - params: map[string]string{"hash": "md5", "offsets": "[10,3]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [10,3] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "negative offsets", - params: map[string]string{"hash": "md5", "offsets": "[-1,3]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [-1,3] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, - { - testName: "normal", - params: map[string]string{"hash": "md5", "offsets": "[3, 7]"}, - offsets: []int{3, 7}, - }, - { - testName: "duplicated offsets", - params: map[string]string{"hash": "md5", "offsets": "[4,4,6]"}, - err: vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [4,4,6] to CFC vindex cfc. expected sorted positive ints in brackets"), - }, +func TestCFCCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + cfcCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + cfcCreateVindexTestCase( + "no hash", + map[string]string{}, + nil, + nil, + ), + cfcCreateVindexTestCase( + "no hash with offsets", + map[string]string{"offsets": "[1,2]"}, + nil, + nil, + ), + cfcCreateVindexTestCase( + "hash with no offsets", + map[string]string{"hash": "md5"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "CFC vindex requires offsets when hash is defined"), + nil, + ), + cfcCreateVindexTestCase( + "invalid offsets 10,12", + map[string]string{"hash": "md5", "offsets": "10,12"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets 10,12 to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "invalid offsets xxx", + map[string]string{"hash": "md5", "offsets": "xxx"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets xxx to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "empty offsets", + map[string]string{"hash": "md5", "offsets": "[]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "unsorted offsets", + map[string]string{"hash": "md5", "offsets": "[10,3]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [10,3] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "negative offsets", + map[string]string{"hash": "md5", "offsets": "[-1,3]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [-1,3] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "duplicated offsets", + map[string]string{"hash": "md5", "offsets": "[4,4,6]"}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "invalid offsets [4,4,6] to CFC vindex cfc. expected sorted positive ints in brackets"), + nil, + ), + cfcCreateVindexTestCase( + "unknown params", + map[string]string{"hash": "md5", "offsets": "[3, 7]", "hello": "world"}, + nil, + []string{"hello"}, + ), } - for _, tc := range cases { - t.Run(tc.testName, func(t *testing.T) { - cfc, err := NewCFC("cfc", tc.params) - assertEqualVtError(t, tc.err, err) - if cfc != nil { - assert.EqualValues(t, tc.offsets, cfc.(*CFC).offsets) - assert.Equal(t, "cfc", cfc.String()) - assert.Equal(t, 1, cfc.Cost()) - assert.Equal(t, true, cfc.IsUnique()) - assert.Equal(t, false, cfc.NeedsVCursor()) - } - }) - } + testCreateVindexes(t, cases) +} + +func TestCFCCreateVindexOptions(t *testing.T) { + vdx, err := CreateVindex( + "cfc", + "normal", + map[string]string{ + "hash": "md5", + "offsets": "[3, 7]", + }, + ) + require.NotNil(t, vdx) + require.Nil(t, err) + unknownParams := vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.EqualValues(t, vdx.(*CFC).offsets, []int{3, 7}) } func makeCFC(t *testing.T, params map[string]string) *CFC { - vind, err := NewCFC("cfc", params) + vind, err := newCFC("cfc", params) require.NoError(t, err) cfc, ok := vind.(*CFC) require.True(t, ok) @@ -225,7 +250,6 @@ func TestCFCComputeKsid(t *testing.T) { } }) } - } func TestCFCComputeKsidXxhash(t *testing.T) { @@ -406,7 +430,6 @@ func TestCFCPrefixMap(t *testing.T) { assert.EqualValues(t, tc.dest, dests[0]) }) } - } func TestCFCPrefixQueryMapNoHash(t *testing.T) { @@ -512,7 +535,6 @@ func TestCFCFindPrefixEscape(t *testing.T) { for _, tc := range cases { assert.EqualValues(t, tc.prefix, string(findPrefix([]byte(tc.str)))) } - } func TestDestinationKeyRangeFromPrefix(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go index 3c2166c0aaf..d73631cc6ca 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup.go +++ b/go/vt/vtgate/vindexes/consistent_lookup.go @@ -22,53 +22,66 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/evalengine" + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/proto/vtgate" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/sqlparser" +) + +const ( + consistentLookupParamWriteOnly = "write_only" ) var ( - _ SingleColumn = (*ConsistentLookupUnique)(nil) - _ Lookup = (*ConsistentLookupUnique)(nil) - _ WantOwnerInfo = (*ConsistentLookupUnique)(nil) - _ LookupPlanable = (*ConsistentLookupUnique)(nil) - _ SingleColumn = (*ConsistentLookup)(nil) - _ Lookup = (*ConsistentLookup)(nil) - _ WantOwnerInfo = (*ConsistentLookup)(nil) - _ LookupPlanable = (*ConsistentLookup)(nil) + _ SingleColumn = (*ConsistentLookupUnique)(nil) + _ Lookup = (*ConsistentLookupUnique)(nil) + _ WantOwnerInfo = (*ConsistentLookupUnique)(nil) + _ LookupPlanable = (*ConsistentLookupUnique)(nil) + _ ParamValidating = (*ConsistentLookupUnique)(nil) + _ SingleColumn = (*ConsistentLookup)(nil) + _ Lookup = (*ConsistentLookup)(nil) + _ WantOwnerInfo = (*ConsistentLookup)(nil) + _ LookupPlanable = (*ConsistentLookup)(nil) + _ ParamValidating = (*ConsistentLookup)(nil) + + consistentLookupParams = append( + append(make([]string, 0), lookupInternalParams...), + consistentLookupParamWriteOnly, + ) ) func init() { - Register("consistent_lookup", NewConsistentLookup) - Register("consistent_lookup_unique", NewConsistentLookupUnique) + Register("consistent_lookup", newConsistentLookup) + Register("consistent_lookup_unique", newConsistentLookupUnique) } // ConsistentLookup is a non-unique lookup vindex that can stay // consistent with respect to its owner table. type ConsistentLookup struct { *clCommon + unknownParams []string } -// NewConsistentLookup creates a ConsistentLookup vindex. +// newConsistentLookup creates a ConsistentLookup vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. // from: list of columns in the table that have the 'from' values of the lookup vindex. // to: The 'to' column name of the table. -func NewConsistentLookup(name string, m map[string]string) (Vindex, error) { +func newConsistentLookup(name string, m map[string]string) (Vindex, error) { clc, err := newCLCommon(name, m) if err != nil { return nil, err } - return &ConsistentLookup{clCommon: clc}, nil + return &ConsistentLookup{ + clCommon: clc, + unknownParams: FindUnknownParams(m, consistentLookupParams), + }, nil } // Cost returns the cost of this vindex as 20. @@ -152,6 +165,11 @@ func (lu *ConsistentLookup) AutoCommitEnabled() bool { return lu.lkp.Autocommit } +// UnknownParams implements the ParamValidating interface. +func (lu *ConsistentLookup) UnknownParams() []string { + return lu.unknownParams +} + //==================================================================== // ConsistentLookupUnique defines a vindex that uses a lookup table. @@ -159,20 +177,24 @@ func (lu *ConsistentLookup) AutoCommitEnabled() bool { // Unique and a Lookup. type ConsistentLookupUnique struct { *clCommon + unknownParams []string } -// NewConsistentLookupUnique creates a ConsistentLookupUnique vindex. +// newConsistentLookupUnique creates a ConsistentLookupUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. // from: list of columns in the table that have the 'from' values of the lookup vindex. // to: The 'to' column name of the table. -func NewConsistentLookupUnique(name string, m map[string]string) (Vindex, error) { +func newConsistentLookupUnique(name string, m map[string]string) (Vindex, error) { clc, err := newCLCommon(name, m) if err != nil { return nil, err } - return &ConsistentLookupUnique{clCommon: clc}, nil + return &ConsistentLookupUnique{ + clCommon: clc, + unknownParams: FindUnknownParams(m, consistentLookupParams), + }, nil } // Cost returns the cost of this vindex as 10. @@ -271,7 +293,7 @@ type clCommon struct { func newCLCommon(name string, m map[string]string) (*clCommon, error) { lu := &clCommon{name: name} var err error - lu.writeOnly, err = boolFromMap(m, "write_only") + lu.writeOnly, err = boolFromMap(m, consistentLookupParamWriteOnly) if err != nil { return nil, err } @@ -313,7 +335,7 @@ func (lu *clCommon) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes. } return out, nil } - return lu.lkp.VerifyCustom(ctx, vcursor, ids, ksidsToValues(ksids), vtgate.CommitOrder_PRE) + return lu.lkp.VerifyCustom(ctx, vcursor, ids, ksidsToValues(ksids), vtgatepb.CommitOrder_PRE) } // Create reserves the id by inserting it into the vindex table. @@ -323,10 +345,10 @@ func (lu *clCommon) Create(ctx context.Context, vcursor VCursor, rowsColValues [ return nil } // Try and convert the error to a MySQL error - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(origErr).(*mysql.SQLError) + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(origErr).(*sqlerror.SQLError) // If it is a MySQL error and its code is of duplicate entry, then we would like to continue // Otherwise, we return the error - if !(isSQLErr && sqlErr != nil && sqlErr.Number() == mysql.ERDupEntry) { + if !(isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERDupEntry) { return origErr } for i, row := range rowsColValues { @@ -389,8 +411,7 @@ func (lu *clCommon) Delete(ctx context.Context, vcursor VCursor, rowsColValues [ func (lu *clCommon) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { equal := true for i := range oldValues { - // TODO(king-11) make collation aware - result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], collations.Unknown) + result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.ConnCollation()) // errors from NullsafeCompare can be ignored. if they are real problems, we'll see them in the Create/Update if err != nil || result != 0 { equal = false @@ -470,3 +491,8 @@ func (lu *clCommon) GetCommitOrder() vtgatepb.CommitOrder { func (lu *ConsistentLookupUnique) IsBackfilling() bool { return lu.writeOnly } + +// UnknownParams implements the ParamValidating interface. +func (lu *ConsistentLookupUnique) UnknownParams() []string { + return lu.unknownParams +} diff --git a/go/vt/vtgate/vindexes/consistent_lookup_test.go b/go/vt/vtgate/vindexes/consistent_lookup_test.go index 297732325ea..deecc23ebdd 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup_test.go +++ b/go/vt/vtgate/vindexes/consistent_lookup_test.go @@ -29,7 +29,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,6 +43,60 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) +func consistentLookupCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "consistent_lookup", + vindexName: "consistent_lookup", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "consistent_lookup", + expectUnknownParams: expectUnknownParams, + } +} + +func consistentLookupUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "consistent_lookup_unique", + vindexName: "consistent_lookup_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "consistent_lookup_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func TestConsistentLookupCreateVindex(t *testing.T) { + testCaseFs := []func(string, map[string]string, error, []string) createVindexTestCase{ + consistentLookupCreateVindexTestCase, + consistentLookupUniqueCreateVindexTestCase, + } + for _, testCaseF := range testCaseFs { + testLookupCreateVindexInternalCases(t, testCaseF) + } +} + func TestConsistentLookupInit(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", true) cols := []sqlparser.IdentifierCI{ @@ -55,22 +112,6 @@ func TestConsistentLookupInit(t *testing.T) { } } -func TestConsistentLookupInfo(t *testing.T) { - lookup := createConsistentLookup(t, "consistent_lookup", false) - assert.Equal(t, 20, lookup.Cost()) - assert.Equal(t, "consistent_lookup", lookup.String()) - assert.False(t, lookup.IsUnique()) - assert.True(t, lookup.NeedsVCursor()) -} - -func TestConsistentLookupUniqueInfo(t *testing.T) { - lookup := createConsistentLookup(t, "consistent_lookup_unique", false) - assert.Equal(t, 10, lookup.Cost()) - assert.Equal(t, "consistent_lookup_unique", lookup.String()) - assert.True(t, lookup.IsUnique()) - assert.True(t, lookup.NeedsVCursor()) -} - func TestConsistentLookupMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} @@ -239,7 +280,7 @@ func TestConsistentLookupCreateSimple(t *testing.T) { func TestConsistentLookupCreateThenRecreate(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} - vc.AddResult(nil, mysql.NewSQLError(mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry")) + vc.AddResult(nil, sqlerror.NewSQLError(sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry")) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) @@ -413,7 +454,7 @@ func TestConsistentLookupNoUpdate(t *testing.T) { vc.verifyLog(t, []string{}) } -func TestConsistentLookupUpdateBecauseUncomparableTypes(t *testing.T) { +func TestConsistentLookupUpdateBecauseComparableTypes(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} @@ -426,7 +467,6 @@ func TestConsistentLookupUpdateBecauseUncomparableTypes(t *testing.T) { {querypb.Type_TEXT, "some string"}, {querypb.Type_VARCHAR, "some string"}, {querypb.Type_CHAR, "some string"}, - {querypb.Type_GEOMETRY, "some string"}, } for _, val := range tests { @@ -438,7 +478,7 @@ func TestConsistentLookupUpdateBecauseUncomparableTypes(t *testing.T) { err = lookup.(Lookup).Update(context.Background(), vc, []sqltypes.Value{literal, literal}, []byte("test"), []sqltypes.Value{literal, literal}) require.NoError(t, err) - require.NotEmpty(t, vc.log) + vc.verifyLog(t, []string{}) vc.log = nil }) } @@ -459,6 +499,7 @@ func createConsistentLookup(t *testing.T, name string, writeOnly bool) SingleCol if err != nil { t.Fatal(err) } + require.Empty(t, l.(ParamValidating).UnknownParams()) cols := []sqlparser.IdentifierCI{ sqlparser.NewIdentifierCI("fc1"), sqlparser.NewIdentifierCI("fc2"), @@ -486,6 +527,10 @@ func (vc *loggingVCursor) InTransactionAndIsDML() bool { return false } +func (vc *loggingVCursor) ConnCollation() collations.ID { + return collations.Default() +} + type bv struct { Name string Bv string diff --git a/go/vt/vtgate/vindexes/foreign_keys.go b/go/vt/vtgate/vindexes/foreign_keys.go new file mode 100644 index 00000000000..3fcbc719624 --- /dev/null +++ b/go/vt/vtgate/vindexes/foreign_keys.go @@ -0,0 +1,252 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "encoding/json" + "fmt" + "strings" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// ParentFKInfo contains the parent foreign key info for the table. +type ParentFKInfo struct { + Table *Table + ParentColumns sqlparser.Columns + ChildColumns sqlparser.Columns +} + +// MarshalJSON returns a JSON representation of ParentFKInfo. +func (fk *ParentFKInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Name string `json:"parent_table"` + ParentColumns sqlparser.Columns `json:"parent_columns"` + ChildColumns sqlparser.Columns `json:"child_columns"` + }{ + Name: fk.Table.Name.String(), + ChildColumns: fk.ChildColumns, + ParentColumns: fk.ParentColumns, + }) +} + +func (fk *ParentFKInfo) String(childTable *Table) string { + var str strings.Builder + str.WriteString(childTable.String()) + for _, column := range fk.ChildColumns { + str.WriteString(column.String()) + } + str.WriteString(fk.Table.String()) + for _, column := range fk.ParentColumns { + str.WriteString(column.String()) + } + return str.String() +} + +// NewParentFkInfo creates a new ParentFKInfo. +func NewParentFkInfo(parentTbl *Table, fkDef *sqlparser.ForeignKeyDefinition) ParentFKInfo { + return ParentFKInfo{ + Table: parentTbl, + ChildColumns: fkDef.Source, + ParentColumns: fkDef.ReferenceDefinition.ReferencedColumns, + } +} + +// ChildFKInfo contains the child foreign key info for the table. +type ChildFKInfo struct { + Table *Table + ChildColumns sqlparser.Columns + ParentColumns sqlparser.Columns + Match sqlparser.MatchAction + OnDelete sqlparser.ReferenceAction + OnUpdate sqlparser.ReferenceAction +} + +// MarshalJSON returns a JSON representation of ChildFKInfo. +func (fk *ChildFKInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Name string `json:"child_table"` + ChildColumns sqlparser.Columns `json:"child_columns"` + ParentColumns sqlparser.Columns `json:"parent_columns"` + }{ + Name: fk.Table.Name.String(), + ChildColumns: fk.ChildColumns, + ParentColumns: fk.ParentColumns, + }) +} + +func (fk *ChildFKInfo) String(parentTable *Table) string { + var str strings.Builder + str.WriteString(fk.Table.String()) + for _, column := range fk.ChildColumns { + str.WriteString(column.String()) + } + str.WriteString(parentTable.String()) + for _, column := range fk.ParentColumns { + str.WriteString(column.String()) + } + return str.String() +} + +// NewChildFkInfo creates a new ChildFKInfo. +func NewChildFkInfo(childTbl *Table, fkDef *sqlparser.ForeignKeyDefinition) ChildFKInfo { + return ChildFKInfo{ + Table: childTbl, + ChildColumns: fkDef.Source, + ParentColumns: fkDef.ReferenceDefinition.ReferencedColumns, + Match: fkDef.ReferenceDefinition.Match, + OnDelete: fkDef.ReferenceDefinition.OnDelete, + OnUpdate: fkDef.ReferenceDefinition.OnUpdate, + } +} + +// ParentFKsNeedsHandling returns all the parent fk constraints on this table that are not shard scoped. +func (t *Table) ParentFKsNeedsHandling(verifyAllFKs bool, fkToIgnore string) (fks []ParentFKInfo) { + for _, fk := range t.ParentForeignKeys { + // Check if we need to specifically ignore this foreign key + if fkToIgnore != "" && fk.String(t) == fkToIgnore { + continue + } + + // If we require all the foreign keys, add them all. + if verifyAllFKs { + fks = append(fks, fk) + continue + } + + // If the keyspaces are different, then the fk definition + // is going to go across shards. + if fk.Table.Keyspace.Name != t.Keyspace.Name { + fks = append(fks, fk) + continue + } + // If the keyspaces match and they are unsharded, then the fk defintion + // is shard-scoped. + if !t.Keyspace.Sharded { + continue + } + + if !isShardScoped(fk.Table, t, fk.ParentColumns, fk.ChildColumns) { + fks = append(fks, fk) + } + } + return +} + +// ChildFKsNeedsHandling retuns the child foreign keys that needs to be handled by the vtgate. +// This can be either the foreign key is not shard scoped or the child tables needs cascading. +func (t *Table) ChildFKsNeedsHandling(verifyAllFKs bool, getAction func(fk ChildFKInfo) sqlparser.ReferenceAction) (fks []ChildFKInfo) { + // If we require all the foreign keys, return the entire list. + if verifyAllFKs { + return t.ChildForeignKeys + } + for _, fk := range t.ChildForeignKeys { + // If the keyspaces are different, then the fk definition + // is going to go across shards. + if fk.Table.Keyspace.Name != t.Keyspace.Name { + fks = append(fks, fk) + continue + } + // If the action is not Restrict, then it needs a cascade. + switch getAction(fk) { + case sqlparser.Cascade, sqlparser.SetNull, sqlparser.SetDefault: + fks = append(fks, fk) + continue + } + // sqlparser.Restrict, sqlparser.NoAction, sqlparser.DefaultAction + // all the actions means the same thing i.e. Restrict + // do not allow modification if there is a child row. + // Check if the restrict is shard scoped. + if !isShardScoped(t, fk.Table, fk.ParentColumns, fk.ChildColumns) { + fks = append(fks, fk) + } + } + return +} + +func UpdateAction(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnUpdate } +func DeleteAction(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnDelete } + +func isShardScoped(pTable *Table, cTable *Table, pCols sqlparser.Columns, cCols sqlparser.Columns) bool { + if !pTable.Keyspace.Sharded { + return true + } + + pPrimaryVdx := pTable.ColumnVindexes[0] + cPrimaryVdx := cTable.ColumnVindexes[0] + + // If the primary vindexes don't match between the parent and child table, + // we cannot infer that the fk constraint in shard scoped. + if cPrimaryVdx.Vindex != pPrimaryVdx.Vindex { + return false + } + + childFkContatined, childFkIndexes := cCols.Indexes(cPrimaryVdx.Columns) + if !childFkContatined { + // PrimaryVindex is not part of the foreign key constraint on the children side. + // So it is a cross-shard foreign key. + return false + } + + // We need to run the same check for the parent columns. + parentFkContatined, parentFkIndexes := pCols.Indexes(pPrimaryVdx.Columns) + if !parentFkContatined { + return false + } + + // Both the child and parent table contain the foreign key and that the vindexes are the same, + // now we need to make sure, that the indexes of both match. + // For example, consider the following tables, + // t1 (primary vindex (x,y)) + // t2 (primary vindex (a,b)) + // If we have a foreign key constraint from t1(x,y) to t2(b,a), then they are not shard scoped. + // Let's say in t1, (1,3) will be in -80 and (3,1) will be in 80-, then in t2 (1,3) will end up in 80-. + for i := range parentFkIndexes { + if parentFkIndexes[i] != childFkIndexes[i] { + return false + } + } + return true +} + +// AddForeignKey is for testing only. +func (vschema *VSchema) AddForeignKey(ksname, childTableName string, fkConstraint *sqlparser.ForeignKeyDefinition) error { + ks, ok := vschema.Keyspaces[ksname] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", ksname) + } + cTbl, ok := ks.Tables[childTableName] + if !ok { + return fmt.Errorf("child table %s not found in keyspace %s", childTableName, ksname) + } + pKsName := fkConstraint.ReferenceDefinition.ReferencedTable.Qualifier.String() + if pKsName != "" { + ks, ok = vschema.Keyspaces[pKsName] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", pKsName) + } + ksname = pKsName + } + parentTableName := fkConstraint.ReferenceDefinition.ReferencedTable.Name.String() + pTbl, ok := ks.Tables[parentTableName] + if !ok { + return fmt.Errorf("parent table %s not found in keyspace %s", parentTableName, ksname) + } + pTbl.ChildForeignKeys = append(pTbl.ChildForeignKeys, NewChildFkInfo(cTbl, fkConstraint)) + cTbl.ParentForeignKeys = append(cTbl.ParentForeignKeys, NewParentFkInfo(pTbl, fkConstraint)) + return nil +} diff --git a/go/vt/vtgate/vindexes/foreign_keys_test.go b/go/vt/vtgate/vindexes/foreign_keys_test.go new file mode 100644 index 00000000000..147614edcbf --- /dev/null +++ b/go/vt/vtgate/vindexes/foreign_keys_test.go @@ -0,0 +1,314 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +var ( + uks = &Keyspace{Name: "uks"} + uks2 = &Keyspace{Name: "uks2"} + sks = &Keyspace{Name: "sks", Sharded: true} +) + +// TestTable_CrossShardParentFKs tests the functionality of the method CrossShardParentFKs. +func TestTable_CrossShardParentFKs(t *testing.T) { + col1Vindex := &ColumnVindex{ + Name: "v1", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1"), + } + col4DiffVindex := &ColumnVindex{ + Name: "v2", + Vindex: binOnlyVindex, + Columns: sqlparser.MakeColumns("col4"), + } + col123Vindex := &ColumnVindex{ + Name: "v2", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1", "col2", "col3"), + } + col456Vindex := &ColumnVindex{ + Name: "v2", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col4", "col5", "col6"), + } + + unshardedTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: uks2, + } + shardedSingleColTblWithDiffVindex := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col4DiffVindex}, + } + shardedMultiColTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col456Vindex}, + } + + tests := []struct { + name string + table *Table + wantCrossShardFKTables []string + verifyAllFKs bool + fkToIgnore string + }{{ + name: "No Parent FKs", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace with verify all FKs", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Keyspaces don't match", // parent table is on uks2 + table: &Table{ + Keyspace: uks, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Keyspaces don't match with ignore fk", // parent table is on uks2 + fkToIgnore: "uks.col1uks2.t1col4", + table: &Table{ + Keyspace: uks, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Unsharded keyspace with verify all FKs and fk to ignore", + verifyAllFKs: true, + fkToIgnore: "uks2.col1uks2.t1col4", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ParentForeignKeys: []ParentFKInfo{pkInfo(unshardedTbl, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{}, + }, { + name: "Column Vindexes don't match", // primary vindexes on different vindex type + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "child table foreign key does not contain primary vindex columns", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col5", "col6"}, []string{"col3", "col9", "col1"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Parent FK doesn't contain primary vindex", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col6"}, []string{"col1", "col2", "col3"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Indexes of the two FKs with column vindexes don't line up", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col5", "col6"}, []string{"col1", "col2", "col3", "col9"})}, + }, + wantCrossShardFKTables: []string{"t1"}, + }, { + name: "Shard scoped foreign key constraint", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col123Vindex}, + ParentForeignKeys: []ParentFKInfo{pkInfo(shardedMultiColTbl, []string{"col4", "col9", "col5", "col6", "colc"}, []string{"col1", "cola", "col2", "col3", "colb"})}, + }, + wantCrossShardFKTables: []string{}, + }} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + crossShardFks := tt.table.ParentFKsNeedsHandling(tt.verifyAllFKs, tt.fkToIgnore) + var crossShardFkTables []string + for _, fk := range crossShardFks { + crossShardFkTables = append(crossShardFkTables, fk.Table.Name.String()) + } + require.ElementsMatch(t, tt.wantCrossShardFKTables, crossShardFkTables) + }) + } +} + +func pkInfo(parentTable *Table, pCols []string, cCols []string) ParentFKInfo { + return ParentFKInfo{ + Table: parentTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + } +} + +// TestChildFKs tests the ChildFKsNeedsHandling method is provides the child foreign key table whose +// rows needs to be managed by vitess. +func TestChildFKs(t *testing.T) { + col1Vindex := &ColumnVindex{ + Name: "v1", + Vindex: binVindex, + Columns: sqlparser.MakeColumns("col1"), + } + col4DiffVindex := &ColumnVindex{ + Name: "v2", + Vindex: binOnlyVindex, + Columns: sqlparser.MakeColumns("col4"), + } + + unshardedTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: uks2, + } + shardedSingleColTbl := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + } + shardedSingleColTblWithDiffVindex := &Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col4DiffVindex}, + } + + tests := []struct { + verifyAllFKs bool + name string + table *Table + expChildTbls []string + }{{ + name: "No Parent FKs", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + }, + expChildTbls: []string{}, + }, { + name: "restrict unsharded", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{}, + }, { + name: "restrict unsharded with verify all fks", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: uks2, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict shard scoped", + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{}, + }, { + name: "restrict shard scoped with verify all fks", + verifyAllFKs: true, + table: &Table{ + ColumnVindexes: []*ColumnVindex{col1Vindex}, + Keyspace: sks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict Keyspaces don't match", + table: &Table{ + Keyspace: uks, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTbl, []string{"col1"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "restrict cross shard", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"}, sqlparser.Restrict)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "cascade unsharded", + table: &Table{ + Keyspace: uks2, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(unshardedTbl, []string{"col4"}, []string{"col1"}, sqlparser.Cascade)}, + }, + expChildTbls: []string{"t1"}, + }, { + name: "cascade cross shard", + table: &Table{ + Keyspace: sks, + ColumnVindexes: []*ColumnVindex{col1Vindex}, + ChildForeignKeys: []ChildFKInfo{ckInfo(shardedSingleColTblWithDiffVindex, []string{"col4"}, []string{"col1"}, sqlparser.Cascade)}, + }, + expChildTbls: []string{"t1"}, + }} + deleteAction := func(fk ChildFKInfo) sqlparser.ReferenceAction { return fk.OnDelete } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + childFks := tt.table.ChildFKsNeedsHandling(tt.verifyAllFKs, deleteAction) + var actualChildTbls []string + for _, fk := range childFks { + actualChildTbls = append(actualChildTbls, fk.Table.Name.String()) + } + require.ElementsMatch(t, tt.expChildTbls, actualChildTbls) + }) + } +} + +func ckInfo(cTable *Table, pCols []string, cCols []string, refAction sqlparser.ReferenceAction) ChildFKInfo { + return ChildFKInfo{ + Table: cTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + OnDelete: refAction, + } +} diff --git a/go/vt/vtgate/vindexes/hash.go b/go/vt/vtgate/vindexes/hash.go index a488809aeb8..d30895be48a 100644 --- a/go/vt/vtgate/vindexes/hash.go +++ b/go/vt/vtgate/vindexes/hash.go @@ -26,16 +26,15 @@ import ( "fmt" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) var ( - _ SingleColumn = (*Hash)(nil) - _ Reversible = (*Hash)(nil) - _ Hashing = (*Hash)(nil) + _ SingleColumn = (*Hash)(nil) + _ Reversible = (*Hash)(nil) + _ Hashing = (*Hash)(nil) + _ ParamValidating = (*Hash)(nil) ) // Hash defines vindex that hashes an int64 to a KeyspaceId @@ -44,12 +43,16 @@ var ( // Note that at once stage we used a 3DES-based hash here, // but for a null key as in our case, they are completely equivalent. type Hash struct { - name string + name string + unknownParams []string } -// NewHash creates a new Hash. -func NewHash(name string, _ map[string]string) (Vindex, error) { - return &Hash{name: name}, nil +// newHash creates a new Hash. +func newHash(name string, params map[string]string) (Vindex, error) { + return &Hash{ + name: name, + unknownParams: FindUnknownParams(params, nil), + }, nil } // String returns the name of the vindex. @@ -90,7 +93,7 @@ func (vind *Hash) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value func (vind *Hash) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { out := make([]bool, len(ids)) for i := range ids { - num, err := evalengine.ToUint64(ids[i]) + num, err := ids[i].ToCastUint64() if err != nil { return nil, err } @@ -123,7 +126,7 @@ func (vind *Hash) Hash(id sqltypes.Value) ([]byte, error) { ival, err = strconv.ParseInt(str, 10, 64) num = uint64(ival) } else { - num, err = evalengine.ToUint64(id) + num, err = id.ToCastUint64() } if err != nil { @@ -132,6 +135,11 @@ func (vind *Hash) Hash(id sqltypes.Value) ([]byte, error) { return vhash(num), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Hash) UnknownParams() []string { + return vind.unknownParams +} + var blockDES cipher.Block func init() { @@ -140,7 +148,7 @@ func init() { if err != nil { panic(err) } - Register("hash", NewHash) + Register("hash", newHash) } func vhash(shardKey uint64) []byte { diff --git a/go/vt/vtgate/vindexes/hash_test.go b/go/vt/vtgate/vindexes/hash_test.go index 3a7d1125c3e..4a9df88180d 100644 --- a/go/vt/vtgate/vindexes/hash_test.go +++ b/go/vt/vtgate/vindexes/hash_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +30,62 @@ import ( var hash SingleColumn func init() { - hv, err := CreateVindex("hash", "nn", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("hash", "nn", map[string]string{}) + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("hash test init: expected 0 unknown params") + } if err != nil { panic(err) } hash = hv.(SingleColumn) } -func TestHashInfo(t *testing.T) { - assert.Equal(t, 1, hash.Cost()) - assert.Equal(t, "nn", hash.String()) - assert.True(t, hash.IsUnique()) - assert.False(t, hash.NeedsVCursor()) +func hashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "hash", + vindexName: "hash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + hashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + hashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + hashCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestHashMap(t *testing.T) { @@ -100,7 +143,7 @@ func TestHashVerify(t *testing.T) { // Failure test _, err = hash.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestHashReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup.go b/go/vt/vtgate/vindexes/lookup.go index 9ac514175df..b3e14fa01f6 100644 --- a/go/vt/vtgate/vindexes/lookup.go +++ b/go/vt/vtgate/vindexes/lookup.go @@ -27,27 +27,41 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupParamNoVerify = "no_verify" + lookupParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupUnique)(nil) - _ Lookup = (*LookupUnique)(nil) - _ LookupPlanable = (*LookupUnique)(nil) - _ SingleColumn = (*LookupNonUnique)(nil) - _ Lookup = (*LookupNonUnique)(nil) - _ LookupPlanable = (*LookupNonUnique)(nil) + _ SingleColumn = (*LookupUnique)(nil) + _ Lookup = (*LookupUnique)(nil) + _ LookupPlanable = (*LookupUnique)(nil) + _ ParamValidating = (*LookupUnique)(nil) + _ SingleColumn = (*LookupNonUnique)(nil) + _ Lookup = (*LookupNonUnique)(nil) + _ LookupPlanable = (*LookupNonUnique)(nil) + _ ParamValidating = (*LookupNonUnique)(nil) + + lookupParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupParamNoVerify, + lookupParamWriteOnly, + ) ) func init() { - Register("lookup", NewLookup) - Register("lookup_unique", NewLookupUnique) + Register("lookup", newLookup) + Register("lookup_unique", newLookupUnique) } // LookupNonUnique defines a vindex that uses a lookup table and create a mapping between from ids and KeyspaceId. // It's NonUnique and a Lookup. type LookupNonUnique struct { - name string - writeOnly bool - noVerify bool - lkp lookupInternal + name string + writeOnly bool + noVerify bool + lkp lookupInternal + unknownParams []string } func (ln *LookupNonUnique) GetCommitOrder() vtgatepb.CommitOrder { @@ -172,7 +186,12 @@ func (ln *LookupNonUnique) Query() (selQuery string, arguments []string) { return ln.lkp.query() } -// NewLookup creates a LookupNonUnique vindex. +// UnknownParams implements the ParamValidating interface. +func (ln *LookupNonUnique) UnknownParams() []string { + return ln.unknownParams +} + +// newLookup creates a LookupNonUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -184,19 +203,22 @@ func (ln *LookupNonUnique) Query() (selQuery string, arguments []string) { // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. // no_verify: in this mode, Verify will always succeed. -func NewLookup(name string, m map[string]string) (Vindex, error) { - lookup := &LookupNonUnique{name: name} +func newLookup(name string, m map[string]string) (Vindex, error) { + lookup := &LookupNonUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lookup.writeOnly, err = boolFromMap(m, "write_only") + lookup.writeOnly, err = boolFromMap(m, lookupParamWriteOnly) if err != nil { return nil, err } - lookup.noVerify, err = boolFromMap(m, "no_verify") + lookup.noVerify, err = boolFromMap(m, lookupParamNoVerify) if err != nil { return nil, err } @@ -223,10 +245,11 @@ func ksidsToValues(ksids [][]byte) []sqltypes.Value { // The table is expected to define the id column as unique. It's // Unique and a Lookup. type LookupUnique struct { - name string - writeOnly bool - noVerify bool - lkp lookupInternal + name string + writeOnly bool + noVerify bool + lkp lookupInternal + unknownParams []string } func (lu *LookupUnique) GetCommitOrder() vtgatepb.CommitOrder { @@ -241,7 +264,7 @@ func (lu *LookupUnique) AutoCommitEnabled() bool { return lu.lkp.Autocommit } -// NewLookupUnique creates a LookupUnique vindex. +// newLookupUnique creates a LookupUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -252,19 +275,22 @@ func (lu *LookupUnique) AutoCommitEnabled() bool { // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnique(name string, m map[string]string) (Vindex, error) { - lu := &LookupUnique{name: name} +func newLookupUnique(name string, m map[string]string) (Vindex, error) { + lu := &LookupUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lu.writeOnly, err = boolFromMap(m, "write_only") + lu.writeOnly, err = boolFromMap(m, lookupParamWriteOnly) if err != nil { return nil, err } - lu.noVerify, err = boolFromMap(m, "no_verify") + lu.noVerify, err = boolFromMap(m, lookupParamNoVerify) if err != nil { return nil, err } @@ -375,3 +401,8 @@ func (lu *LookupUnique) LookupQuery() (string, error) { func (lu *LookupUnique) Query() (string, []string) { return lu.lkp.query() } + +// UnknownParams implements the ParamValidating interface. +func (ln *LookupUnique) UnknownParams() []string { + return ln.unknownParams +} diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go index 993b9655660..de3d078f556 100644 --- a/go/vt/vtgate/vindexes/lookup_hash.go +++ b/go/vt/vtgate/vindexes/lookup_hash.go @@ -21,26 +21,35 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupHashParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupHash)(nil) - _ Lookup = (*LookupHash)(nil) - _ LookupPlanable = (*LookupHash)(nil) - _ SingleColumn = (*LookupHashUnique)(nil) - _ Lookup = (*LookupHashUnique)(nil) - _ LookupPlanable = (*LookupHashUnique)(nil) + _ SingleColumn = (*LookupHash)(nil) + _ Lookup = (*LookupHash)(nil) + _ LookupPlanable = (*LookupHash)(nil) + _ ParamValidating = (*LookupHash)(nil) + _ SingleColumn = (*LookupHashUnique)(nil) + _ Lookup = (*LookupHashUnique)(nil) + _ LookupPlanable = (*LookupHashUnique)(nil) + _ ParamValidating = (*LookupHashUnique)(nil) + + lookupHashParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupHashParamWriteOnly, + ) ) func init() { - Register("lookup_hash", NewLookupHash) - Register("lookup_hash_unique", NewLookupHashUnique) + Register("lookup_hash", newLookupHash) + Register("lookup_hash_unique", newLookupHashUnique) } //==================================================================== @@ -50,12 +59,13 @@ func init() { // NonUnique and a Lookup. // Warning: This Vindex is being deprecated in favor of Lookup type LookupHash struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupHash creates a LookupHash vindex. +// newLookupHash creates a LookupHash vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -66,14 +76,17 @@ type LookupHash struct { // // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupHash(name string, m map[string]string) (Vindex, error) { - lh := &LookupHash{name: name} +func newLookupHash(name string, m map[string]string) (Vindex, error) { + lh := &LookupHash{ + name: name, + unknownParams: FindUnknownParams(m, lookupHashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lh.writeOnly, err = boolFromMap(m, "write_only") + lh.writeOnly, err = boolFromMap(m, lookupHashParamWriteOnly) if err != nil { return nil, err } @@ -148,7 +161,7 @@ func (lh *LookupHash) MapResult(ids []sqltypes.Value, results []*sqltypes.Result } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { - num, err := evalengine.ToUint64(row[0]) + num, err := row[0].ToCastUint64() if err != nil { // A failure to convert is equivalent to not being // able to map. @@ -229,6 +242,11 @@ func (lh *LookupHash) MarshalJSON() ([]byte, error) { return json.Marshal(lh.lkp) } +// UnknownParams satisifes the ParamValidating interface. +func (lh *LookupHash) UnknownParams() []string { + return lh.unknownParams +} + // unhashList unhashes a list of keyspace ids into []sqltypes.Value. func unhashList(ksids [][]byte) ([]sqltypes.Value, error) { values := make([]sqltypes.Value, 0, len(ksids)) @@ -249,14 +267,15 @@ func unhashList(ksids [][]byte) ([]sqltypes.Value, error) { // Unique and a Lookup. // Warning: This Vindex is being depcreated in favor of LookupUnique type LookupHashUnique struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } var _ LookupPlanable = (*LookupHashUnique)(nil) -// NewLookupHashUnique creates a LookupHashUnique vindex. +// newLookupHashUnique creates a LookupHashUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -267,14 +286,17 @@ var _ LookupPlanable = (*LookupHashUnique)(nil) // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupHashUnique(name string, m map[string]string) (Vindex, error) { - lhu := &LookupHashUnique{name: name} +func newLookupHashUnique(name string, m map[string]string) (Vindex, error) { + lhu := &LookupHashUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupHashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lhu.writeOnly, err = boolFromMap(m, "write_only") + lhu.writeOnly, err = boolFromMap(m, lookupHashParamWriteOnly) if err != nil { return nil, err } @@ -336,7 +358,7 @@ func (lhu *LookupHashUnique) MapResult(ids []sqltypes.Value, results []*sqltypes case 0: out = append(out, key.DestinationNone{}) case 1: - num, err := evalengine.ToUint64(result.Rows[0][0]) + num, err := result.Rows[0][0].ToCastUint64() if err != nil { out = append(out, key.DestinationNone{}) continue @@ -419,3 +441,8 @@ func (lhu *LookupHashUnique) Query() (selQuery string, arguments []string) { func (lhu *LookupHashUnique) GetCommitOrder() vtgatepb.CommitOrder { return vtgatepb.CommitOrder_NORMAL } + +// UnknownParams implements the ParamValidating interface. +func (lhu *LookupHashUnique) UnknownParams() []string { + return lhu.unknownParams +} diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index 7703bd4d344..69bff9f6f34 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -29,6 +28,32 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +func lookupHashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_hash", + vindexName: "lookup_hash", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup_hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupHashCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupHashCreateVindexTestCase) +} + func TestLookupHashNew(t *testing.T) { l := createLookup(t, "lookup_hash", false /* writeOnly */) if want, got := l.(*LookupHash).writeOnly, false; got != want { @@ -40,7 +65,7 @@ func TestLookupHashNew(t *testing.T) { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{ + vdx, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -50,20 +75,10 @@ func TestLookupHashNew(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("Create(bad_scatter): %v, want %s", err, want) } -} - -func TestLookupHashInfo(t *testing.T) { - lookuphash := createLookup(t, "lookup_hash", false /* writeOnly */) - assert.Equal(t, 20, lookuphash.Cost()) - assert.Equal(t, "lookup_hash", lookuphash.String()) - assert.False(t, lookuphash.IsUnique()) - assert.True(t, lookuphash.NeedsVCursor()) - - lookuphashunique := createLookup(t, "lookup_hash_unique", false /* writeOnly */) - assert.Equal(t, 10, lookuphashunique.Cost()) - assert.Equal(t, "lookup_hash_unique", lookuphashunique.String()) - assert.True(t, lookuphashunique.IsUnique()) - assert.True(t, lookuphashunique.NeedsVCursor()) + if err == nil { + unknownParams := vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + } } func TestLookupHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go index 9158f99dc04..67697fb5eac 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -29,24 +28,54 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +func lookupHashUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_hash_unique", + vindexName: "lookup_hash_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "lookup_hash_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupHashUniqueCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupHashUniqueCreateVindexTestCase) +} + func TestLookupHashUniqueNew(t *testing.T) { l := createLookup(t, "lookup_hash_unique", false /* writeOnly */) if want, got := l.(*LookupHashUnique).writeOnly, false; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - vindex, _ := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + vindex, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "write_only": "true", }) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + l = vindex.(SingleColumn) if want, got := l.(*LookupHashUnique).writeOnly, true; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + vdx, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -56,14 +85,10 @@ func TestLookupHashUniqueNew(t *testing.T) { if err == nil || err.Error() != want { t.Errorf("Create(bad_scatter): %v, want %s", err, want) } -} - -func TestLookupHashUniqueInfo(t *testing.T) { - lhu := createLookup(t, "lookup_hash_unique", false /* writeOnly */) - assert.Equal(t, 10, lhu.Cost()) - assert.Equal(t, "lookup_hash_unique", lhu.String()) - assert.True(t, lhu.IsUnique()) - assert.True(t, lhu.NeedsVCursor()) + if err == nil { + unknownParams = vdx.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + } } func TestLookupHashUniqueMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_internal.go b/go/vt/vtgate/vindexes/lookup_internal.go index 2644c261407..673b3fcb64b 100644 --- a/go/vt/vtgate/vindexes/lookup_internal.go +++ b/go/vt/vtgate/vindexes/lookup_internal.go @@ -33,17 +33,47 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -var ( +const ( readLockExclusive = "exclusive" readLockShared = "shared" readLockNone = "none" readLockDefault = readLockExclusive + lookupCommonParamAutocommit = "autocommit" + lookupCommonParamMultiShardAutocommit = "multi_shard_autocommit" + + lookupInternalParamTable = "table" + lookupInternalParamFrom = "from" + lookupInternalParamTo = "to" + lookupInternalParamIgnoreNulls = "ignore_nulls" + lookupInternalParamBatchLookup = "batch_lookup" + lookupInternalParamReadLock = "read_lock" +) + +var ( readLockExprs = map[string]string{ readLockExclusive: "for update", readLockShared: "lock in share mode", readLockNone: "", } + + // lookupCommonParams are used only by lookup_* vindexes. + lookupCommonParams = append( + append(make([]string, 0), lookupInternalParams...), + lookupCommonParamAutocommit, + lookupCommonParamMultiShardAutocommit, + ) + + // lookupInternalParams are used by both lookup_* vindexes and the newer + // consistent_lookup_* vindexes. + lookupInternalParams = []string{ + lookupInternalParamTable, + lookupInternalParamFrom, + lookupInternalParamTo, + lookupInternalParamIgnoreNulls, + lookupInternalParamBatchLookup, + lookupInternalParamReadLock, + } ) // lookupInternal implements the functions for the Lookup vindexes. @@ -61,26 +91,26 @@ type lookupInternal struct { } func (lkp *lookupInternal) Init(lookupQueryParams map[string]string, autocommit, upsert, multiShardAutocommit bool) error { - lkp.Table = lookupQueryParams["table"] - lkp.To = lookupQueryParams["to"] + lkp.Table = lookupQueryParams[lookupInternalParamTable] + lkp.To = lookupQueryParams[lookupInternalParamTo] var fromColumns []string - for _, from := range strings.Split(lookupQueryParams["from"], ",") { + for _, from := range strings.Split(lookupQueryParams[lookupInternalParamFrom], ",") { fromColumns = append(fromColumns, strings.TrimSpace(from)) } lkp.FromColumns = fromColumns var err error - lkp.IgnoreNulls, err = boolFromMap(lookupQueryParams, "ignore_nulls") + lkp.IgnoreNulls, err = boolFromMap(lookupQueryParams, lookupInternalParamIgnoreNulls) if err != nil { return err } - lkp.BatchLookup, err = boolFromMap(lookupQueryParams, "batch_lookup") + lkp.BatchLookup, err = boolFromMap(lookupQueryParams, lookupInternalParamBatchLookup) if err != nil { return err } - if readLock, ok := lookupQueryParams["read_lock"]; ok { + if readLock, ok := lookupQueryParams[lookupInternalParamReadLock]; ok { if _, valid := readLockExprs[readLock]; !valid { - return fmt.Errorf("invalid read_lock value: %s", readLock) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid %s value: %s", lookupInternalParamReadLock, readLock) } lkp.ReadLock = readLock } @@ -399,10 +429,10 @@ type commonConfig struct { func parseCommonConfig(m map[string]string) (*commonConfig, error) { var c commonConfig var err error - if c.autocommit, err = boolFromMap(m, "autocommit"); err != nil { + if c.autocommit, err = boolFromMap(m, lookupCommonParamAutocommit); err != nil { return nil, err } - if c.multiShardAutocommit, err = boolFromMap(m, "multi_shard_autocommit"); err != nil { + if c.multiShardAutocommit, err = boolFromMap(m, lookupCommonParamMultiShardAutocommit); err != nil { return nil, err } return &c, nil diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index df21f07c83d..a59fcbf1da9 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -19,12 +19,12 @@ package vindexes import ( "context" "errors" + "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" - "strings" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,6 +34,8 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // LookupNonUnique tests are more comprehensive than others. @@ -112,6 +114,242 @@ func (vc *vcursor) execute(query string, bindvars map[string]*querypb.BindVariab panic("unexpected") } +func (vc *vcursor) ConnCollation() collations.ID { + return collations.Default() +} + +func lookupCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup", + vindexName: "lookup", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup", + expectUnknownParams: expectUnknownParams, + } +} + +func lookupUniqueCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_unique", + vindexName: "lookup_unique", + vindexParams: vindexParams, + + expectCost: 10, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: true, + expectString: "lookup_unique", + expectUnknownParams: expectUnknownParams, + } +} + +func testLookupCreateVindexCommonCases(t *testing.T, testCaseF func(string, map[string]string, error, []string) createVindexTestCase) { + testLookupCreateVindexInternalCases(t, testCaseF) + + cases := []createVindexTestCase{ + testCaseF( + "autocommit true", + map[string]string{"autocommit": "true"}, + nil, + nil, + ), + testCaseF( + "autocommit false", + map[string]string{"autocommit": "false"}, + nil, + nil, + ), + testCaseF( + "autocommit reject not bool", + map[string]string{"autocommit": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "autocommit value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "multi_shard_autocommit true", + map[string]string{"multi_shard_autocommit": "true"}, + nil, + nil, + ), + testCaseF( + "multi_shard_autocommit false", + map[string]string{"multi_shard_autocommit": "false"}, + nil, + nil, + ), + testCaseF( + "multi_shard_autocommit reject not bool", + map[string]string{"multi_shard_autocommit": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multi_shard_autocommit value must be 'true' or 'false': 'hello'"), + nil, + ), + } + + testCreateVindexes(t, cases) +} + +func testLookupCreateVindexInternalCases(t *testing.T, testCaseF func(string, map[string]string, error, []string) createVindexTestCase) { + cases := []createVindexTestCase{ + // TODO(maxeng): make table, to, from required params. + testCaseF( + "no params", + nil, + nil, + nil, + ), + testCaseF( + "empty params", + map[string]string{}, + nil, + nil, + ), + testCaseF( + "batch_lookup true", + map[string]string{"batch_lookup": "true"}, + nil, + nil, + ), + testCaseF( + "batch_lookup false", + map[string]string{"batch_lookup": "false"}, + nil, + nil, + ), + testCaseF( + "batch_lookup reject not bool", + map[string]string{"batch_lookup": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "batch_lookup value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "ignore_nulls true", + map[string]string{"ignore_nulls": "true"}, + nil, + nil, + ), + testCaseF( + "ignore_nulls false", + map[string]string{"ignore_nulls": "false"}, + nil, + nil, + ), + testCaseF( + "ignore_nulls reject not bool", + map[string]string{"ignore_nulls": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "read_lock exclusive", + map[string]string{"read_lock": "exclusive"}, + nil, + nil, + ), + testCaseF( + "read_lock shared", + map[string]string{"read_lock": "shared"}, + nil, + nil, + ), + testCaseF( + "read_lock none", + map[string]string{"read_lock": "none"}, + nil, + nil, + ), + testCaseF( + "read_lock reject unknown values", + map[string]string{"read_lock": "unknown"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid read_lock value: unknown"), + nil, + ), + testCaseF( + "ignore_nulls reject not bool", + map[string]string{"ignore_nulls": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ignore_nulls value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "write_only true", + map[string]string{"write_only": "true"}, + nil, + nil, + ), + testCaseF( + "write_only false", + map[string]string{"write_only": "false"}, + nil, + nil, + ), + testCaseF( + "write_only reject not bool", + map[string]string{"write_only": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "write_only value must be 'true' or 'false': 'hello'"), + nil, + ), + testCaseF( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) +} + +func TestLookupCreateVindex(t *testing.T) { + testCaseFs := []func(string, map[string]string, error, []string) createVindexTestCase{ + lookupCreateVindexTestCase, + lookupUniqueCreateVindexTestCase, + } + for _, testCaseF := range testCaseFs { + testLookupCreateVindexCommonCases(t, testCaseF) + + cases := []createVindexTestCase{ + testCaseF( + "no_verify true", + map[string]string{"no_verify": "true"}, + nil, + nil, + ), + testCaseF( + "no_verify false", + map[string]string{"no_verify": "false"}, + nil, + nil, + ), + testCaseF( + "no_verify reject not bool", + map[string]string{"no_verify": "hello"}, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no_verify value must be 'true' or 'false': 'hello'"), + nil, + ), + } + + testCreateVindexes(t, cases) + } +} + func TestLookupNonUniqueNew(t *testing.T) { l := createLookup(t, "lookup", false /* writeOnly */) assert.False(t, l.(*LookupNonUnique).writeOnly, "Create(lookup, false)") @@ -128,25 +366,17 @@ func TestLookupNonUniqueNew(t *testing.T) { require.EqualError(t, err, "write_only value must be 'true' or 'false': 'invalid'") } -func TestLookupNonUniqueInfo(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) - assert.Equal(t, 20, lookupNonUnique.Cost()) - assert.Equal(t, "lookup", lookupNonUnique.String()) - assert.False(t, lookupNonUnique.IsUnique()) - assert.True(t, lookupNonUnique.NeedsVCursor()) -} - func TestLookupNilVCursor(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) - _, err := lookupNonUnique.Map(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + lnu := createLookup(t, "lookup", false /* writeOnly */) + _, err := lnu.Map(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.EqualError(t, err, "cannot perform lookup: no vcursor provided") } func TestLookupNonUniqueMap(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 2} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -172,7 +402,7 @@ func TestLookupNonUniqueMap(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) require.EqualError(t, err, "lookup.Map: execute failed") } @@ -184,10 +414,11 @@ func TestLookupNonUniqueMapAutocommit(t *testing.T) { "autocommit": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 2} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -214,10 +445,10 @@ func TestLookupNonUniqueMapAutocommit(t *testing.T) { } func TestLookupNonUniqueMapWriteOnly(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", true) + lnu := createLookup(t, "lookup", true) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyRange{ @@ -231,10 +462,10 @@ func TestLookupNonUniqueMapWriteOnly(t *testing.T) { } func TestLookupNonUniqueMapAbsent(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -244,10 +475,10 @@ func TestLookupNonUniqueMapAbsent(t *testing.T) { } func TestLookupNonUniqueVerify(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{numRows: 1} - _, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -267,15 +498,15 @@ func TestLookupNonUniqueVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) require.EqualError(t, err, "lookup.Verify: execute failed") vc.mustFail = false // writeOnly true should always yield true. - lookupNonUnique = createLookup(t, "lookup", true) + lnu = createLookup(t, "lookup", true) vc.queries = nil - got, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + got, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) assert.Empty(t, vc.queries, "lookup verify queries") utils.MustMatch(t, []bool{true, true}, got) @@ -289,10 +520,11 @@ func TestLookupNonUniqueNoVerify(t *testing.T) { "no_verify": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) var wantqueries []*querypb.BoundQuery @@ -300,7 +532,7 @@ func TestLookupNonUniqueNoVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) require.NoError(t, err) } @@ -312,6 +544,7 @@ func TestLookupUniqueNoVerify(t *testing.T) { "no_verify": "true", }) require.NoError(t, err) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) lookupUnique := vindex.(SingleColumn) vc := &vcursor{numRows: 1} @@ -335,10 +568,11 @@ func TestLookupNonUniqueVerifyAutocommit(t *testing.T) { "autocommit": "true", }) require.NoError(t, err) - lookupNonUnique := vindex.(SingleColumn) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -360,10 +594,10 @@ func TestLookupNonUniqueVerifyAutocommit(t *testing.T) { } func TestLookupNonUniqueCreate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) + err := lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -379,19 +613,19 @@ func TestLookupNonUniqueCreate(t *testing.T) { // With ignore. vc.queries = nil - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NewInt64(1)}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NewInt64(1)}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) require.NoError(t, err) wantqueries[0].Sql = "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0), (:fromc_1, :toc_1)" utils.MustMatch(t, wantqueries, vc.queries) // With ignore_nulls off - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: input has null values: row: 1, col: 0") // With ignore_nulls on vc.queries = nil - lookupNonUnique.(*LookupNonUnique).lkp.IgnoreNulls = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) + lnu.(*LookupNonUnique).lkp.IgnoreNulls = true + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) require.NoError(t, err) wantqueries = []*querypb.BoundQuery{{ Sql: "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0)", @@ -404,26 +638,27 @@ func TestLookupNonUniqueCreate(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: execute failed") vc.mustFail = false // Test column mismatch. - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) assert.EqualError(t, err, "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]") } func TestLookupNonUniqueCreateAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", "autocommit": "true", }) require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { sqltypes.NewInt64(3), sqltypes.NewInt64(4), @@ -446,10 +681,10 @@ func TestLookupNonUniqueCreateAutocommit(t *testing.T) { } func TestLookupNonUniqueDelete(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) + err := lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -469,35 +704,37 @@ func TestLookupNonUniqueDelete(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) assert.EqualError(t, err, "lookup.Delete: execute failed") vc.mustFail = false // Test column count fail. - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) assert.EqualError(t, err, "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]") } func TestLookupNonUniqueDeleteAutocommit(t *testing.T) { - lookupNonUnique, _ := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "autocommit": "true", }) + require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) require.NoError(t, err) utils.MustMatch(t, []*querypb.BoundQuery(nil), vc.queries) } func TestLookupNonUniqueUpdate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup", false /* writeOnly */) + lnu := createLookup(t, "lookup", false /* writeOnly */) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) + err := lnu.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -567,16 +804,17 @@ func TestLookupUniqueMapResult(t *testing.T) { } func TestLookupNonUniqueCreateMultiShardAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", "multi_shard_autocommit": "true", }) require.NoError(t, err) + require.Empty(t, lnu.(ParamValidating).UnknownParams()) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { sqltypes.NewInt64(3), sqltypes.NewInt64(4), @@ -611,5 +849,6 @@ func createLookup(t *testing.T, name string, writeOnly bool) SingleColumn { "write_only": write, }) require.NoError(t, err) + require.Empty(t, l.(ParamValidating).UnknownParams()) return l.(SingleColumn) } diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go index 433234b82cb..74cbe1423a0 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go @@ -22,26 +22,33 @@ import ( "encoding/json" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) +const ( + lookupUnicodeLooseMD5HashParamWriteOnly = "write_only" +) + var ( - _ SingleColumn = (*LookupUnicodeLooseMD5Hash)(nil) - _ Lookup = (*LookupUnicodeLooseMD5Hash)(nil) - _ LookupPlanable = (*LookupUnicodeLooseMD5Hash)(nil) - _ SingleColumn = (*LookupUnicodeLooseMD5HashUnique)(nil) - _ Lookup = (*LookupUnicodeLooseMD5HashUnique)(nil) - _ LookupPlanable = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ SingleColumn = (*LookupUnicodeLooseMD5Hash)(nil) + _ Lookup = (*LookupUnicodeLooseMD5Hash)(nil) + _ ParamValidating = (*LookupUnicodeLooseMD5Hash)(nil) + _ SingleColumn = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ Lookup = (*LookupUnicodeLooseMD5HashUnique)(nil) + _ ParamValidating = (*LookupUnicodeLooseMD5HashUnique)(nil) + + lookupUnicodeLooseMD5HashParams = append( + append(make([]string, 0), lookupCommonParams...), + lookupUnicodeLooseMD5HashParamWriteOnly, + ) ) func init() { - Register("lookup_unicodeloosemd5_hash", NewLookupUnicodeLooseMD5Hash) - Register("lookup_unicodeloosemd5_hash_unique", NewLookupUnicodeLooseMD5HashUnique) + Register("lookup_unicodeloosemd5_hash", newLookupUnicodeLooseMD5Hash) + Register("lookup_unicodeloosemd5_hash_unique", newLookupUnicodeLooseMD5HashUnique) } //==================================================================== @@ -51,12 +58,13 @@ func init() { // NonUnique and a Lookup and stores the from value in a hashed form. // Warning: This Vindex is being depcreated in favor of Lookup type LookupUnicodeLooseMD5Hash struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupUnicodeLooseMD5Hash creates a LookupUnicodeLooseMD5Hash vindex. +// newLookupUnicodeLooseMD5Hash creates a LookupUnicodeLooseMD5Hash vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -67,14 +75,17 @@ type LookupUnicodeLooseMD5Hash struct { // // autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnicodeLooseMD5Hash(name string, m map[string]string) (Vindex, error) { - lh := &LookupUnicodeLooseMD5Hash{name: name} +func newLookupUnicodeLooseMD5Hash(name string, m map[string]string) (Vindex, error) { + lh := &LookupUnicodeLooseMD5Hash{ + name: name, + unknownParams: FindUnknownParams(m, lookupUnicodeLooseMD5HashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lh.writeOnly, err = boolFromMap(m, "write_only") + lh.writeOnly, err = boolFromMap(m, lookupUnicodeLooseMD5HashParamWriteOnly) if err != nil { return nil, err } @@ -139,7 +150,7 @@ func (lh *LookupUnicodeLooseMD5Hash) Map(ctx context.Context, vcursor VCursor, i } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { - num, err := evalengine.ToUint64(row[0]) + num, err := row[0].ToCastUint64() if err != nil { // A failure to convert is equivalent to not being // able to map. @@ -152,55 +163,10 @@ func (lh *LookupUnicodeLooseMD5Hash) Map(ctx context.Context, vcursor VCursor, i return out, nil } -// MapResult implements the LookupPlanable interface -func (lh *LookupUnicodeLooseMD5Hash) MapResult(ids []sqltypes.Value, results []*sqltypes.Result) ([]key.Destination, error) { - out := make([]key.Destination, 0, len(ids)) - if lh.writeOnly { - for range ids { - out = append(out, key.DestinationKeyRange{KeyRange: &topodatapb.KeyRange{}}) - } - return out, nil - } - - for _, result := range results { - if len(result.Rows) == 0 { - out = append(out, key.DestinationNone{}) - continue - } - ksids := make([][]byte, 0, len(result.Rows)) - for _, row := range result.Rows { - num, err := evalengine.ToUint64(row[0]) - if err != nil { - // A failure to convert is equivalent to not being - // able to map. - continue - } - ksids = append(ksids, vhash(num)) - } - out = append(out, key.DestinationKeyspaceIDs(ksids)) - } - return out, nil -} - -// Query implements the LookupPlanable interface -func (lh *LookupUnicodeLooseMD5Hash) Query() (selQuery string, arguments []string) { - return lh.lkp.query() -} - -// AllowBatch implements the LookupPlanable interface -func (lh *LookupUnicodeLooseMD5Hash) AllowBatch() bool { - return lh.lkp.BatchLookup -} - func (lh *LookupUnicodeLooseMD5Hash) AutoCommitEnabled() bool { return lh.lkp.Autocommit } -// GetCommitOrder implements the LookupPlanable interface -func (lh *LookupUnicodeLooseMD5Hash) GetCommitOrder() vtgatepb.CommitOrder { - return vtgatepb.CommitOrder_NORMAL -} - // Verify returns true if ids maps to ksids. func (lh *LookupUnicodeLooseMD5Hash) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { if lh.writeOnly { @@ -270,6 +236,11 @@ func (lh *LookupUnicodeLooseMD5Hash) MarshalJSON() ([]byte, error) { return json.Marshal(lh.lkp) } +// UnknownParams implements the ParamValidating interface. +func (lh *LookupUnicodeLooseMD5Hash) UnknownParams() []string { + return lh.unknownParams +} + //==================================================================== // LookupUnicodeLooseMD5HashUnique defines a vindex that uses a lookup table. @@ -277,12 +248,13 @@ func (lh *LookupUnicodeLooseMD5Hash) MarshalJSON() ([]byte, error) { // Unique and a Lookup and will store the from value in a hashed format. // Warning: This Vindex is being depcreated in favor of LookupUnique type LookupUnicodeLooseMD5HashUnique struct { - name string - writeOnly bool - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal + unknownParams []string } -// NewLookupUnicodeLooseMD5HashUnique creates a LookupUnicodeLooseMD5HashUnique vindex. +// newLookupUnicodeLooseMD5HashUnique creates a LookupUnicodeLooseMD5HashUnique vindex. // The supplied map has the following required fields: // // table: name of the backing table. It can be qualified by the keyspace. @@ -293,20 +265,23 @@ type LookupUnicodeLooseMD5HashUnique struct { // // autocommit: setting this to "true" will cause deletes to be ignored. // write_only: in this mode, Map functions return the full keyrange causing a full scatter. -func NewLookupUnicodeLooseMD5HashUnique(name string, m map[string]string) (Vindex, error) { - lhu := &LookupUnicodeLooseMD5HashUnique{name: name} +func newLookupUnicodeLooseMD5HashUnique(name string, m map[string]string) (Vindex, error) { + lhu := &LookupUnicodeLooseMD5HashUnique{ + name: name, + unknownParams: FindUnknownParams(m, lookupUnicodeLooseMD5HashParams), + } cc, err := parseCommonConfig(m) if err != nil { return nil, err } - lhu.writeOnly, err = boolFromMap(m, "write_only") + lhu.writeOnly, err = boolFromMap(m, lookupUnicodeLooseMD5HashParamWriteOnly) if err != nil { return nil, err } // Don't allow upserts for unique vindexes. - if err := lhu.lkp.Init(m, cc.autocommit, false, cc.multiShardAutocommit); err != nil { + if err := lhu.lkp.Init(m, cc.autocommit, false /* upsert */, cc.multiShardAutocommit); err != nil { return nil, err } return lhu, nil @@ -355,35 +330,7 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) Map(ctx context.Context, vcursor VCu case 0: out = append(out, key.DestinationNone{}) case 1: - num, err := evalengine.ToUint64(result.Rows[0][0]) - if err != nil { - out = append(out, key.DestinationNone{}) - continue - } - out = append(out, key.DestinationKeyspaceID(vhash(num))) - default: - return nil, fmt.Errorf("LookupUnicodeLooseMD5HashUnique.Map: unexpected multiple results from vindex %s: %v", lhu.lkp.Table, ids[i]) - } - } - return out, nil -} - -// MapResult implements the LookupPlanable interface -func (lhu *LookupUnicodeLooseMD5HashUnique) MapResult(ids []sqltypes.Value, results []*sqltypes.Result) ([]key.Destination, error) { - out := make([]key.Destination, 0, len(ids)) - if lhu.writeOnly { - for range ids { - out = append(out, key.DestinationKeyRange{KeyRange: &topodatapb.KeyRange{}}) - } - return out, nil - } - - for i, result := range results { - switch len(result.Rows) { - case 0: - out = append(out, key.DestinationNone{}) - case 1: - num, err := evalengine.ToUint64(result.Rows[0][0]) + num, err := result.Rows[0][0].ToCastUint64() if err != nil { out = append(out, key.DestinationNone{}) continue @@ -396,25 +343,10 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) MapResult(ids []sqltypes.Value, resu return out, nil } -// Query implements the LookupPlanable interface -func (lhu *LookupUnicodeLooseMD5HashUnique) Query() (selQuery string, arguments []string) { - return lhu.lkp.query() -} - -// AllowBatch implements the LookupPlanable interface -func (lhu *LookupUnicodeLooseMD5HashUnique) AllowBatch() bool { - return lhu.lkp.BatchLookup -} - func (lhu *LookupUnicodeLooseMD5HashUnique) AutoCommitEnabled() bool { return lhu.lkp.Autocommit } -// GetCommitOrder implements the LookupPlanable interface -func (lhu *LookupUnicodeLooseMD5HashUnique) GetCommitOrder() vtgatepb.CommitOrder { - return vtgatepb.CommitOrder_NORMAL -} - // Verify returns true if ids maps to ksids. func (lhu *LookupUnicodeLooseMD5HashUnique) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { if lhu.writeOnly { @@ -489,6 +421,11 @@ func (lhu *LookupUnicodeLooseMD5HashUnique) IsBackfilling() bool { return lhu.writeOnly } +// UnknownParams implements the ParamValidating interface. +func (lhu *LookupUnicodeLooseMD5HashUnique) UnknownParams() []string { + return lhu.unknownParams +} + func unicodeHashValue(value sqltypes.Value) (sqltypes.Value, error) { hash, err := unicodeHash(vMD5Hash, value) if err != nil { diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go index 373bc374074..989458ccc13 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go @@ -37,6 +37,32 @@ const ( hashed40 uint64 = 16576388050845489136 ) +func lookupUnicodeLooseMD5HashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "lookup_unicodeloosemd5_hash", + vindexName: "lookup_unicodeloosemd5_hash", + vindexParams: vindexParams, + + expectCost: 20, + expectErr: expectErr, + expectIsUnique: false, + expectNeedsVCursor: true, + expectString: "lookup_unicodeloosemd5_hash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestLookupUnicodeLooseMD5HashCreateVindex(t *testing.T) { + testLookupCreateVindexCommonCases(t, lookupUnicodeLooseMD5HashCreateVindexTestCase) +} + func TestLookupUnicodeLooseMD5HashMap(t *testing.T) { lookup := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 2, keys: []sqltypes.Value{sqltypes.NewUint64(hashed10), sqltypes.NewUint64(hashed20)}} @@ -84,16 +110,17 @@ func TestLookupUnicodeLooseMD5HashMapAutocommit(t *testing.T) { "table": "t", "from": "fromc", "to": "toc", - "hash_from": "true", "autocommit": "true", }) if err != nil { t.Fatal(err) } - lookupNonUnique := vindex.(SingleColumn) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 2, keys: []sqltypes.Value{sqltypes.NewUint64(hashed10), sqltypes.NewUint64(hashed20)}} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -127,10 +154,10 @@ func TestLookupUnicodeLooseMD5HashMapAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashMapWriteOnly(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", true) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", true) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyRange{ @@ -146,10 +173,10 @@ func TestLookupUnicodeLooseMD5HashMapWriteOnly(t *testing.T) { } func TestLookupUnicodeLooseMD5HashMapAbsent(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 0} - got, err := lookupNonUnique.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) + got, err := lnu.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -161,10 +188,10 @@ func TestLookupUnicodeLooseMD5HashMapAbsent(t *testing.T) { } func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{numRows: 1} - got, err := lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) + got, err := lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) require.NoError(t, err) wantResult := []bool{true, true} if !reflect.DeepEqual(got, wantResult) { @@ -190,7 +217,7 @@ func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { // Test query fail. vc.mustFail = true - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) want := "lookup.Verify: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -198,10 +225,10 @@ func TestLookupUnicodeLooseMD5HashVerify(t *testing.T) { vc.mustFail = false // writeOnly true should always yield true. - lookupNonUnique = createLookup(t, "lookup_unicodeloosemd5_hash", true) + lnu = createLookup(t, "lookup_unicodeloosemd5_hash", true) vc.queries = nil - got, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte(""), []byte("")}) + got, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) if vc.queries != nil { t.Errorf("lookup.Verify(writeOnly), queries: %v, want nil", vc.queries) @@ -222,10 +249,12 @@ func TestLookupUnicodeLooseMD5HashVerifyAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } - lookupNonUnique := vindex.(SingleColumn) + unknownParams := vindex.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + lnu := vindex.(SingleColumn) vc := &vcursor{numRows: 1} - _, err = lookupNonUnique.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) + _, err = lnu.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -251,10 +280,10 @@ func TestLookupUnicodeLooseMD5HashVerifyAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, false) + err := lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, false) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -272,7 +301,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { // With ignore. vc.queries = nil - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, true) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6"), []byte("\x06\xe7\xea\"Βp\x8f")}, true) require.NoError(t, err) wantqueries[0].Sql = "insert ignore into t(fromc, toc) values(:fromc_0, :toc_0), (:fromc_1, :toc_1)" @@ -282,7 +311,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) want := "lookup.Create: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -290,7 +319,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { vc.mustFail = false // Test column mismatch. - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) want = "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -298,7 +327,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", @@ -307,9 +336,11 @@ func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(10), sqltypes.NewInt64(20), }, { sqltypes.NewInt64(30), sqltypes.NewInt64(40), @@ -337,7 +368,7 @@ func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { - lookupNonUnique, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "from1,from2", "to": "toc", @@ -346,9 +377,11 @@ func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { if err != nil { t.Fatal(err) } + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) vc := &vcursor{} - err = lookupNonUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ sqltypes.NewInt64(10), sqltypes.NewInt64(20), }, { sqltypes.NewInt64(30), sqltypes.NewInt64(40), @@ -376,10 +409,10 @@ func TestLookupUnicodeLooseMD5HashCreateMultiShardAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err := lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ @@ -401,7 +434,7 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) want := "lookup.Delete: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -409,7 +442,7 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { vc.mustFail = false // Test column count fail. - err = lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) want = "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -417,15 +450,19 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { } func TestLookupUnicodeLooseMD5HashDeleteAutocommit(t *testing.T) { - lookupNonUnique, _ := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ + lnu, err := CreateVindex("lookup_unicodeloosemd5_hash", "lookup", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "autocommit": "true", }) + unknownParams := lnu.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10)}, {sqltypes.NewInt64(20)}}, []byte("\x16k@\xb4J\xbaK\xd6")) require.NoError(t, err) wantqueries := []*querypb.BoundQuery(nil) @@ -435,10 +472,10 @@ func TestLookupUnicodeLooseMD5HashDeleteAutocommit(t *testing.T) { } func TestLookupUnicodeLooseMD5HashUpdate(t *testing.T) { - lookupNonUnique := createLookup(t, "lookup_unicodeloosemd5_hash", false) + lnu := createLookup(t, "lookup_unicodeloosemd5_hash", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(20)}) + err := lnu.(Lookup).Update(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(10)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(20)}) require.NoError(t, err) wantqueries := []*querypb.BoundQuery{{ diff --git a/go/vt/vtgate/vindexes/lookup_unique_test.go b/go/vt/vtgate/vindexes/lookup_unique_test.go index cc04d5340c3..fd2a62c4d21 100644 --- a/go/vt/vtgate/vindexes/lookup_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_unique_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -36,18 +35,21 @@ func TestLookupUniqueNew(t *testing.T) { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - vindex, _ := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + vindex, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", "write_only": "true", }) + require.NoError(t, err) + require.Empty(t, vindex.(ParamValidating).UnknownParams()) + l = vindex.(SingleColumn) if want, got := l.(*LookupUnique).writeOnly, true; got != want { t.Errorf("Create(lookup, false): %v, want %v", got, want) } - _, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + _, err = CreateVindex("lookup_unique", "lookup_unique", map[string]string{ "table": "t", "from": "fromc", "to": "toc", @@ -59,13 +61,6 @@ func TestLookupUniqueNew(t *testing.T) { } } -func TestLookupUniqueInfo(t *testing.T) { - lookupUnique := createLookup(t, "lookup_unique", false) - assert.Equal(t, 10, lookupUnique.Cost()) - assert.Equal(t, "lookup_unique", lookupUnique.String()) - assert.True(t, lookupUnique.IsUnique()) -} - func TestLookupUniqueMap(t *testing.T) { lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{numRows: 1} @@ -163,6 +158,7 @@ func TestLookupUniqueCreate(t *testing.T) { if err != nil { t.Fatal(err) } + require.Empty(t, lookupUnique.(ParamValidating).UnknownParams()) vc := &vcursor{} err = lookupUnique.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test")}, false /* ignoreMode */) diff --git a/go/vt/vtgate/vindexes/main_test.go b/go/vt/vtgate/vindexes/main_test.go new file mode 100644 index 00000000000..226ecfff431 --- /dev/null +++ b/go/vt/vtgate/vindexes/main_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vindexes + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type createVindexTestCase struct { + testName string + + vindexType string + vindexName string + vindexParams map[string]string + + expectCost int + expectErr error + expectIsUnique bool + expectNeedsVCursor bool + expectString string + expectUnknownParams []string +} + +func assertEqualVtError(t *testing.T, expected, actual error) { + // vterrors.Errorf returns a struct containing a stacktrace, which fails + // assert.EqualError since the stacktrace would be guaranteed to be different. + // so just check the error message + if expected == nil { + assert.NoError(t, actual) + } else { + assert.EqualError(t, actual, expected.Error()) + } +} + +func testCreateVindex( + t *testing.T, + tc createVindexTestCase, + fns ...func(createVindexTestCase, Vindex, []error, error), +) { + t.Run(tc.testName, func(t *testing.T) { + vdx, err := CreateVindex( + tc.vindexType, + tc.vindexName, + tc.vindexParams, + ) + assertEqualVtError(t, tc.expectErr, err) + if err == nil { + assert.NotNil(t, vdx) + } + paramValidating, ok := vdx.(ParamValidating) + var unknownParams []string + if ok { + unknownParams = paramValidating.UnknownParams() + } + require.Equal(t, len(tc.expectUnknownParams), len(unknownParams)) + sort.Strings(tc.expectUnknownParams) + sort.Strings(unknownParams) + require.Equal(t, tc.expectUnknownParams, unknownParams) + if vdx != nil { + assert.Equal(t, tc.expectString, vdx.String()) + assert.Equal(t, tc.expectCost, vdx.Cost()) + assert.Equal(t, tc.expectIsUnique, vdx.IsUnique()) + assert.Equal(t, tc.expectNeedsVCursor, vdx.NeedsVCursor()) + } + }) +} + +func testCreateVindexes( + t *testing.T, + tcs []createVindexTestCase, + fns ...func(createVindexTestCase, Vindex, []error, error), +) { + for _, tc := range tcs { + testCreateVindex(t, tc, fns...) + } +} diff --git a/go/vt/vtgate/vindexes/multicol.go b/go/vt/vtgate/vindexes/multicol.go index 04d84fadb3a..ee53ea5bb60 100644 --- a/go/vt/vtgate/vindexes/multicol.go +++ b/go/vt/vtgate/vindexes/multicol.go @@ -29,7 +29,9 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -var _ MultiColumn = (*MultiCol)(nil) +var ( + _ MultiColumn = (*MultiCol)(nil) +) type MultiCol struct { name string @@ -46,8 +48,8 @@ const ( defaultVindex = "hash" ) -// NewMultiCol creates a new MultiCol. -func NewMultiCol(name string, m map[string]string) (Vindex, error) { +// newMultiCol creates a new MultiCol. +func newMultiCol(name string, m map[string]string) (Vindex, error) { colCount, err := getColumnCount(m) if err != nil { return nil, err @@ -150,7 +152,7 @@ func (m *MultiCol) mapKsid(colValues []sqltypes.Value) (bool, []byte, error) { } func init() { - Register("multicol", NewMultiCol) + Register("multicol", newMultiCol) } func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, error) { @@ -164,6 +166,15 @@ func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, e } columnVdx := make(map[int]Hashing, colCount) vindexCost := 0 + subParams := make(map[string]string) + for k, v := range m { + if k == paramColumnCount || + k == paramColumnBytes || + k == paramColumnVindex { + continue + } + subParams[k] = v + } for i := 0; i < colCount; i++ { selVdx := defaultVindex if len(colVdxs) > i { @@ -173,7 +184,7 @@ func getColumnVindex(m map[string]string, colCount int) (map[int]Hashing, int, e } } // TODO: reuse vindex. avoid creating same vindex. - vdx, err := CreateVindex(selVdx, selVdx, m) + vdx, err := CreateVindex(selVdx, selVdx, subParams) if err != nil { return nil, 0, err } diff --git a/go/vt/vtgate/vindexes/multicol_test.go b/go/vt/vtgate/vindexes/multicol_test.go index ce2e57dcb0e..e4e2098dd1b 100644 --- a/go/vt/vtgate/vindexes/multicol_test.go +++ b/go/vt/vtgate/vindexes/multicol_test.go @@ -23,29 +23,199 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func multicolCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectCost int, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "multicol", + vindexName: "multicol", + vindexParams: vindexParams, + + expectCost: expectCost, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "multicol", + expectUnknownParams: expectUnknownParams, + } +} + +func TestMulticolCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + multicolCreateVindexTestCase( + "column count 0 invalid", + map[string]string{ + "column_count": "0", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns should be between 1 and 8 in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "column count 3 ok", + map[string]string{ + "column_count": "3", + }, + 3, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column count 9 invalid", + map[string]string{ + "column_count": "9", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns should be between 1 and 8 in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "column bytes ok", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + }, + 3, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column bytes more than column count invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3,4", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of column bytes provided are more than column count in the parameter 'column_bytes'"), + nil, + ), + multicolCreateVindexTestCase( + "column bytes exceeds keyspace id length", + map[string]string{ + "column_count": "3", + "column_bytes": "100,200,300", + }, + 3, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column bytes count exceeds the keyspace id length (total bytes count cannot exceed 8 bytes) in the parameter 'column_bytes'"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex ok", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,binary", + }, + 0, + nil, + nil, + ), + multicolCreateVindexTestCase( + "column vindex more than column count", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,binary,binary", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of vindex function provided are more than column count in the parameter 'column_vindex'"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex non-hashing invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,null", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'null' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex non-unique invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,cfc", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'cfc' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "column vindex lookup or needs vcursor invalid", + map[string]string{ + "column_count": "3", + "column_bytes": "1,2,3", + "column_vindex": "binary,binary,lookup", + }, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "multicol vindex supports vindexes that exports hashing function, are unique and are non-lookup vindex, passed vindex 'lookup' is invalid"), + nil, + ), + multicolCreateVindexTestCase( + "no params", + nil, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns not provided in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "empty params", + map[string]string{}, + 0, + vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "number of columns not provided in the parameter 'column_count'"), + nil, + ), + multicolCreateVindexTestCase( + "allow unknown params", + map[string]string{ + "column_count": "1", + "hello": "world", + }, + 1, + nil, + nil, + ), + } + + testCreateVindexes(t, cases) +} + func TestMultiColMisc(t *testing.T) { - vindex, err := CreateVindex("multicol", "multicol", map[string]string{ + vindex, err := CreateVindex("multicol", "multicol_misc", map[string]string{ "column_count": "3", }) require.NoError(t, err) + _, ok := vindex.(ParamValidating) + require.False(t, ok) multiColVdx, isMultiColVdx := vindex.(*MultiCol) assert.True(t, isMultiColVdx) assert.Equal(t, 3, multiColVdx.Cost()) - assert.Equal(t, "multicol", multiColVdx.String()) + assert.Equal(t, "multicol_misc", multiColVdx.String()) assert.True(t, multiColVdx.IsUnique()) assert.False(t, multiColVdx.NeedsVCursor()) assert.True(t, multiColVdx.PartialVindex()) } func TestMultiColMap(t *testing.T) { - vindex, err := CreateVindex("multicol", "multicol", map[string]string{ + vindex, err := CreateVindex("multicol", "multicol_map", map[string]string{ "column_count": "3", }) require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/null.go b/go/vt/vtgate/vindexes/null.go index 3e8085b7501..58435643ea7 100644 --- a/go/vt/vtgate/vindexes/null.go +++ b/go/vt/vtgate/vindexes/null.go @@ -25,8 +25,10 @@ import ( ) var ( - _ Vindex = (*Null)(nil) - nullksid = []byte{0} + _ Vindex = (*Null)(nil) + _ ParamValidating = (*Null)(nil) + + nullksid = []byte{0} ) // Null defines a vindex that always return 0. It's Unique and @@ -36,12 +38,16 @@ var ( // Unlike other vindexes, this one will work even for NULL input values. This // will allow you to keep MySQL auto-inc columns unchanged. type Null struct { - name string + name string + unknownParams []string } -// NewNull creates a new Null. -func NewNull(name string, m map[string]string) (Vindex, error) { - return &Null{name: name}, nil +// newNull creates a new Null. +func newNull(name string, m map[string]string) (Vindex, error) { + return &Null{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -82,6 +88,11 @@ func (vind *Null) Verify(ctx context.Context, vcursor VCursor, ids []sqltypes.Va return out, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Null) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("null", NewNull) + Register("null", newNull) } diff --git a/go/vt/vtgate/vindexes/null_test.go b/go/vt/vtgate/vindexes/null_test.go index 87baea46ee3..03b97fe651b 100644 --- a/go/vt/vtgate/vindexes/null_test.go +++ b/go/vt/vtgate/vindexes/null_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +30,62 @@ import ( var null SingleColumn func init() { - hv, err := CreateVindex("null", "nn", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("null", "nn", map[string]string{}) if err != nil { panic(err) } + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("null test init: expected 0 unknown params") + } null = hv.(SingleColumn) } -func TestNullInfo(t *testing.T) { - assert.Equal(t, 100, null.Cost()) - assert.Equal(t, "nn", null.String()) - assert.True(t, null.IsUnique()) - assert.False(t, null.NeedsVCursor()) +func nullCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "null", + vindexName: "null", + vindexParams: vindexParams, + + expectCost: 100, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "null", + expectUnknownParams: expectUnknownParams, + } +} + +func TestNullCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + nullCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + nullCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + nullCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNullMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/numeric.go b/go/vt/vtgate/vindexes/numeric.go index e2f8b512fb9..091807ec2cc 100644 --- a/go/vt/vtgate/vindexes/numeric.go +++ b/go/vt/vtgate/vindexes/numeric.go @@ -22,27 +22,30 @@ import ( "encoding/binary" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) var ( - _ SingleColumn = (*Numeric)(nil) - _ Reversible = (*Numeric)(nil) - _ Hashing = (*Numeric)(nil) + _ SingleColumn = (*Numeric)(nil) + _ Reversible = (*Numeric)(nil) + _ Hashing = (*Numeric)(nil) + _ ParamValidating = (*Numeric)(nil) ) // Numeric defines a bit-pattern mapping of a uint64 to the KeyspaceId. // It's Unique and Reversible. type Numeric struct { - name string + name string + unknownParams []string } -// NewNumeric creates a Numeric vindex. -func NewNumeric(name string, _ map[string]string) (Vindex, error) { - return &Numeric{name: name}, nil +// newNumeric creates a Numeric vindex. +func newNumeric(name string, m map[string]string) (Vindex, error) { + return &Numeric{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -105,8 +108,13 @@ func (*Numeric) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value, error) return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *Numeric) UnknownParams() []string { + return vind.unknownParams +} + func (*Numeric) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } @@ -116,5 +124,5 @@ func (*Numeric) Hash(id sqltypes.Value) ([]byte, error) { } func init() { - Register("numeric", NewNumeric) + Register("numeric", newNumeric) } diff --git a/go/vt/vtgate/vindexes/numeric_static_map.go b/go/vt/vtgate/vindexes/numeric_static_map.go index 790832aa848..f97016d915f 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map.go +++ b/go/vt/vtgate/vindexes/numeric_static_map.go @@ -21,19 +21,31 @@ import ( "context" "encoding/binary" "encoding/json" - "errors" "os" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + numericStaticMapParamJSON = "json" + numericStaticMapParamJSONPath = "json_path" + numericStaticMapParamFallbackType = "fallback_type" ) var ( - _ SingleColumn = (*NumericStaticMap)(nil) - _ Hashing = (*NumericStaticMap)(nil) + _ SingleColumn = (*NumericStaticMap)(nil) + _ Hashing = (*NumericStaticMap)(nil) + _ ParamValidating = (*NumericStaticMap)(nil) + + numericStaticMapParams = []string{ + numericStaticMapParamJSON, + numericStaticMapParamJSONPath, + numericStaticMapParamFallbackType, + } ) // NumericLookupTable stores the mapping of keys. @@ -42,29 +54,61 @@ type NumericLookupTable map[uint64]uint64 // NumericStaticMap is similar to vindex Numeric but first attempts a lookup via // a JSON file. type NumericStaticMap struct { - name string - lookup NumericLookupTable + name string + hashVdx Hashing + lookup NumericLookupTable + unknownParams []string } func init() { - Register("numeric_static_map", NewNumericStaticMap) + Register("numeric_static_map", newNumericStaticMap) } -// NewNumericStaticMap creates a NumericStaticMap vindex. -func NewNumericStaticMap(name string, params map[string]string) (Vindex, error) { - jsonPath, ok := params["json_path"] - if !ok { - return nil, errors.New("NumericStaticMap: Could not find `json_path` param in vschema") +// newNumericStaticMap creates a NumericStaticMap vindex. +func newNumericStaticMap(name string, params map[string]string) (Vindex, error) { + jsonStr, jsok := params[numericStaticMapParamJSON] + jsonPath, jpok := params[numericStaticMapParamJSONPath] + + if !jsok && !jpok { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema") } - lt, err := loadNumericLookupTable(jsonPath) - if err != nil { - return nil, err + if jsok && jpok { + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Found both `json` and `json_path` params in vschema") + } + + var err error + var lt NumericLookupTable + + if jpok { + lt, err = loadNumericLookupTable(jsonPath) + if err != nil { + return nil, err + } + } + + if jsok { + lt, err = parseNumericLookupTable([]byte(jsonStr)) + if err != nil { + return nil, err + } + } + + var hashVdx Hashing + + if s, ok := params[numericStaticMapParamFallbackType]; ok { + vindex, err := CreateVindex(s, name+"_hash", map[string]string{}) + if err != nil { + return nil, err + } + hashVdx, _ = vindex.(Hashing) // We know this will not fail } return &NumericStaticMap{ - name: name, - lookup: lt, + hashVdx: hashVdx, + lookup: lt, + name: name, + unknownParams: FindUnknownParams(params, numericStaticMapParams), }, nil } @@ -116,27 +160,42 @@ func (vind *NumericStaticMap) Map(ctx context.Context, vcursor VCursor, ids []sq } func (vind *NumericStaticMap) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } lookupNum, ok := vind.lookup[num] - if ok { + if !ok { + // Not in lookup, use fallback hash + if vind.hashVdx != nil { + return vind.hashVdx.Hash(id) + } + } else { num = lookupNum } + var keybytes [8]byte binary.BigEndian.PutUint64(keybytes[:], num) return keybytes[:], nil } +// UnknownParams implements the ParamValidating interface. +func (vind *NumericStaticMap) UnknownParams() []string { + return vind.unknownParams +} + func loadNumericLookupTable(path string) (NumericLookupTable, error) { - var m map[string]uint64 - lt := make(map[uint64]uint64) data, err := os.ReadFile(path) if err != nil { - return lt, err + return nil, err } - err = json.Unmarshal(data, &m) + return parseNumericLookupTable(data) +} + +func parseNumericLookupTable(data []byte) (NumericLookupTable, error) { + var m map[string]uint64 + lt := make(map[uint64]uint64) + err := json.Unmarshal(data, &m) if err != nil { return lt, err } diff --git a/go/vt/vtgate/vindexes/numeric_static_map_test.go b/go/vt/vtgate/vindexes/numeric_static_map_test.go index 45a66e6fb52..7a373060f16 100644 --- a/go/vt/vtgate/vindexes/numeric_static_map_test.go +++ b/go/vt/vtgate/vindexes/numeric_static_map_test.go @@ -18,6 +18,7 @@ package vindexes import ( "context" + "errors" "reflect" "testing" @@ -26,6 +27,8 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // createVindex creates the "numeric_static_map" vindex object which is used by @@ -40,13 +43,105 @@ func createVindex() (SingleColumn, error) { return vindex.(SingleColumn), nil } -func TestNumericStaticMapInfo(t *testing.T) { - numericStaticMap, err := createVindex() - require.NoError(t, err) - assert.Equal(t, 1, numericStaticMap.Cost()) - assert.Equal(t, "numericStaticMap", numericStaticMap.String()) - assert.True(t, numericStaticMap.IsUnique()) - assert.False(t, numericStaticMap.NeedsVCursor()) +func numericStaticMapCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "numeric_static_map", + vindexName: "numeric_static_map", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "numeric_static_map", + expectUnknownParams: expectUnknownParams, + } +} + +func TestNumericStaticMapCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + numericStaticMapCreateVindexTestCase( + "no params invalid, require either json_path or json", + nil, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "empty params invalid, require either json_path or json", + map[string]string{}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Could not find either `json_path` or `json` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json_path and json mutually exclusive", + map[string]string{ + "json": "{}", + "json_path": "/path/to/map.json", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "NumericStaticMap: Found both `json` and `json_path` params in vschema"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json_path must exist", + map[string]string{ + "json_path": "/path/to/map.json", + }, + errors.New("open /path/to/map.json: no such file or directory"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "json ok", + map[string]string{ + "json": "{}", + }, + nil, + nil, + ), + numericStaticMapCreateVindexTestCase( + "json must be valid syntax", + map[string]string{ + "json": "{]", + }, + errors.New("invalid character ']' looking for beginning of object key string"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "fallback_type ok", + map[string]string{ + "json": "{}", + "fallback_type": "binary", + }, + nil, + nil, + ), + numericStaticMapCreateVindexTestCase( + "fallback_type must be valid vindex type", + map[string]string{ + "json": "{}", + "fallback_type": "not_found", + }, + vterrors.Errorf(vtrpc.Code_NOT_FOUND, "vindexType %q not found", "not_found"), + nil, + ), + numericStaticMapCreateVindexTestCase( + "unknown params", + map[string]string{ + "json": "{}", + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNumericStaticMapMap(t *testing.T) { @@ -101,5 +196,122 @@ func TestNumericStaticMapVerify(t *testing.T) { // Failure test _, err = numericStaticMap.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") +} + +func TestNumericStaticMapWithJsonVdx(t *testing.T) { + withFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"5\":6}", + }, + ) + + require.NoError(t, err) + require.Empty(t, withFallbackVdx.(ParamValidating).UnknownParams()) + assert.Equal(t, 1, withFallbackVdx.Cost()) + assert.Equal(t, t.Name(), withFallbackVdx.String()) + assert.True(t, withFallbackVdx.IsUnique()) + assert.False(t, withFallbackVdx.NeedsVCursor()) + + // Bad format tests + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"5\":6:8,\"10\":11}", + }, + ) + require.EqualError(t, err, "invalid character ':' after object key:value pair") + + // Letters in key or value not allowed + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{"json": "{\"1\":a}"}, + ) + require.EqualError(t, err, "invalid character 'a' looking for beginning of value") + _, err = CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{"json": "{\"a\":1}"}, + ) + require.EqualError(t, err, "strconv.ParseUint: parsing \"a\": invalid syntax") +} + +// Test mapping of vindex, both for specified map keys and underlying xxhash +func TestNumericStaticMapWithFallback(t *testing.T) { + mapWithFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", + "fallback_type": "xxhash", + }, + ) + if err != nil { + t.Fatalf("failed to create vindex: %v", err) + } + require.Empty(t, mapWithFallbackVdx.(ParamValidating).UnknownParams()) + singleCol := mapWithFallbackVdx.(SingleColumn) + got, err := singleCol.Map(context.Background(), nil, []sqltypes.Value{ + sqltypes.NewInt64(1), + sqltypes.NewInt64(2), + sqltypes.NewInt64(3), + sqltypes.NewFloat64(1.1), + sqltypes.NewVarChar("test1"), + sqltypes.NewInt64(4), + sqltypes.NewInt64(5), + sqltypes.NewInt64(6), + sqltypes.NewInt64(7), + sqltypes.NewInt64(8), + sqltypes.NewInt64(10), + sqltypes.NULL, + }) + require.NoError(t, err) + + want := []key.Destination{ + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x02")), + key.DestinationKeyspaceID([]byte("\x8b\x59\x80\x16\x62\xb5\x21\x60")), + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x04")), + key.DestinationNone{}, + key.DestinationNone{}, // strings do not map + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x05")), + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x06")), + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x07")), + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x08")), + key.DestinationKeyspaceID([]byte("\x00\x00\x00\x00\x00\x00\x00\x09")), + key.DestinationKeyspaceID([]byte("\xff\xff\xff\xff\xff\xff\xff\xff")), + key.DestinationNone{}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Map()\ngot: %+v\nwant: %+v", got, want) + } +} + +func TestNumericStaticMapWithFallbackVerify(t *testing.T) { + mapWithFallbackVdx, err := CreateVindex( + "numeric_static_map", + t.Name(), + map[string]string{ + "json": "{\"1\":2,\"3\":4,\"4\":5,\"5\":6,\"6\":7,\"7\":8,\"8\":9,\"10\":18446744073709551615}", + "fallback_type": "xxhash", + }, + ) + if err != nil { + t.Fatalf("failed to create vindex: %v", err) + } + require.Empty(t, mapWithFallbackVdx.(ParamValidating).UnknownParams()) + singleCol := mapWithFallbackVdx.(SingleColumn) + got, err := singleCol.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2), sqltypes.NewInt64(11), sqltypes.NewInt64(10)}, [][]byte{[]byte("\x00\x00\x00\x00\x00\x00\x00\x02"), []byte("\x8b\x59\x80\x16\x62\xb5\x21\x60"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff"), []byte("\xff\xff\xff\xff\xff\xff\xff\xff")}) + require.NoError(t, err) + want := []bool{true, true, false, true} + if !reflect.DeepEqual(got, want) { + t.Errorf("Verify(match): %v, want %v", got, want) + } + + // Failure test + _, err = singleCol.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } diff --git a/go/vt/vtgate/vindexes/numeric_test.go b/go/vt/vtgate/vindexes/numeric_test.go index 5d317d3a161..612c0f3c5e7 100644 --- a/go/vt/vtgate/vindexes/numeric_test.go +++ b/go/vt/vtgate/vindexes/numeric_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,15 +30,58 @@ import ( var numeric SingleColumn func init() { - vindex, _ := CreateVindex("numeric", "num", nil) + vindex, err := CreateVindex("numeric", "num", nil) + if err != nil { + panic(err) + } numeric = vindex.(SingleColumn) } -func TestNumericInfo(t *testing.T) { - assert.Equal(t, 0, numeric.Cost()) - assert.Equal(t, "num", numeric.String()) - assert.True(t, numeric.IsUnique()) - assert.False(t, numeric.NeedsVCursor()) +func numericCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "numeric", + vindexName: "numeric", + vindexParams: vindexParams, + + expectCost: 0, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "numeric", + expectUnknownParams: expectUnknownParams, + } +} + +func TestNumericCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + numericCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + numericCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + numericCreateVindexTestCase( + "unknown params", + map[string]string{"hello": "world"}, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestNumericMap(t *testing.T) { @@ -85,7 +127,7 @@ func TestNumericVerify(t *testing.T) { // Failure test _, err = numeric.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestNumericReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/region_experimental.go b/go/vt/vtgate/vindexes/region_experimental.go index 14347e9d9a4..c116e9bd84d 100644 --- a/go/vt/vtgate/vindexes/region_experimental.go +++ b/go/vt/vtgate/vindexes/region_experimental.go @@ -22,18 +22,27 @@ import ( "encoding/binary" "fmt" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + regionExperimentalParamRegionBytes = "region_bytes" ) var ( - _ MultiColumn = (*RegionExperimental)(nil) + _ MultiColumn = (*RegionExperimental)(nil) + _ ParamValidating = (*RegionExperimental)(nil) + + regionExperimentalParams = []string{ + regionExperimentalParamRegionBytes, + } ) func init() { - Register("region_experimental", NewRegionExperimental) + Register("region_experimental", newRegionExperimental) } // RegionExperimental is a multi-column unique vindex. The first column is prefixed @@ -41,17 +50,18 @@ func init() { // RegionExperimental can be used for geo-partitioning because the first column can denote a region, // and its value will dictate the shard for that region. type RegionExperimental struct { - name string - regionBytes int + name string + regionBytes int + unknownParams []string } -// NewRegionExperimental creates a RegionExperimental vindex. +// newRegionExperimental creates a RegionExperimental vindex. // The supplied map requires all the fields of "consistent_lookup_unique". // Additionally, it requires a region_bytes argument whose value can be "1", or "2". -func NewRegionExperimental(name string, m map[string]string) (Vindex, error) { - rbs, ok := m["region_bytes"] +func newRegionExperimental(name string, m map[string]string) (Vindex, error) { + rbs, ok := m[regionExperimentalParamRegionBytes] if !ok { - return nil, fmt.Errorf("region_experimental missing region_bytes param") + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, fmt.Sprintf("region_experimental missing %s param", regionExperimentalParamRegionBytes)) } var rb int switch rbs { @@ -60,11 +70,12 @@ func NewRegionExperimental(name string, m map[string]string) (Vindex, error) { case "2": rb = 2 default: - return nil, fmt.Errorf("region_bits must be 1 or 2: %v", rbs) + return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: %v", rbs) } return &RegionExperimental{ - name: name, - regionBytes: rb, + name: name, + regionBytes: rb, + unknownParams: FindUnknownParams(m, regionExperimentalParams), }, nil } @@ -97,7 +108,7 @@ func (ge *RegionExperimental) Map(ctx context.Context, vcursor VCursor, rowsColV continue } // Compute region prefix. - rn, err := evalengine.ToUint64(row[0]) + rn, err := row[0].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -113,7 +124,7 @@ func (ge *RegionExperimental) Map(ctx context.Context, vcursor VCursor, rowsColV dest := r if len(row) == 2 { // Compute hash. - hn, err := evalengine.ToUint64(row[1]) + hn, err := row[1].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -145,3 +156,8 @@ func (ge *RegionExperimental) Verify(ctx context.Context, vcursor VCursor, rowsC func (ge *RegionExperimental) PartialVindex() bool { return true } + +// UnknownParams implements the ParamValidating interface. +func (ge *RegionExperimental) UnknownParams() []string { + return ge.unknownParams +} diff --git a/go/vt/vtgate/vindexes/region_experimental_test.go b/go/vt/vtgate/vindexes/region_experimental_test.go index dde9a2f6ea9..56b16b8f3ee 100644 --- a/go/vt/vtgate/vindexes/region_experimental_test.go +++ b/go/vt/vtgate/vindexes/region_experimental_test.go @@ -22,6 +22,8 @@ import ( "testing" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,6 +32,88 @@ import ( "vitess.io/vitess/go/vt/key" ) +func regionExperimentalCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "region_experimental", + vindexName: "region_experimental", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "region_experimental", + expectUnknownParams: expectUnknownParams, + } +} + +func TestRegionExperimentalCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + regionExperimentalCreateVindexTestCase( + "no params invalid: region_bytes required", + nil, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_experimental missing region_bytes param"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "empty params invalid: region_bytes required", + map[string]string{}, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_experimental missing region_bytes param"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may not be 0", + map[string]string{ + "region_bytes": "0", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: 0"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may be 1", + map[string]string{ + "region_bytes": "1", + }, + nil, + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may be 2", + map[string]string{ + "region_bytes": "2", + }, + nil, + nil, + ), + regionExperimentalCreateVindexTestCase( + "region_bytes may not be 3", + map[string]string{ + "region_bytes": "3", + }, + vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "region_bytes must be 1 or 2: 3"), + nil, + ), + regionExperimentalCreateVindexTestCase( + "unknown params", + map[string]string{ + "region_bytes": "1", + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) +} + func TestRegionExperimentalMisc(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) require.NoError(t, err) @@ -122,18 +206,8 @@ func TestRegionExperimentalVerifyMulti(t *testing.T) { assert.Equal(t, want, got) } -func TestRegionExperimentalCreateErrors(t *testing.T) { - _, err := createRegionVindex(t, "region_experimental", "f1,f2", 3) - assert.EqualError(t, err, "region_bits must be 1 or 2: 3") - _, err = CreateVindex("region_experimental", "region_experimental", nil) - assert.EqualError(t, err, "region_experimental missing region_bytes param") -} - func createRegionVindex(t *testing.T, name, from string, rb int) (Vindex, error) { return CreateVindex(name, name, map[string]string{ "region_bytes": strconv.Itoa(rb), - "table": "t", - "from": from, - "to": "toc", }) } diff --git a/go/vt/vtgate/vindexes/region_json.go b/go/vt/vtgate/vindexes/region_json.go index 093ccd9090b..f0ac2ef18fa 100644 --- a/go/vt/vtgate/vindexes/region_json.go +++ b/go/vt/vtgate/vindexes/region_json.go @@ -25,19 +25,27 @@ import ( "os" "strconv" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" ) +const ( + regionJSONParamRegionBytes = "region_bytes" + regionJSONParamRegionMap = "region_map" +) + var ( _ MultiColumn = (*RegionJSON)(nil) + + regionJSONParams = []string{ + regionJSONParamRegionBytes, + regionJSONParamRegionMap, + } ) func init() { - Register("region_json", NewRegionJSON) + Register("region_json", newRegionJSON) } // RegionMap is used to store mapping of country to region @@ -49,17 +57,18 @@ type RegionMap map[string]uint64 // RegionJson can be used for geo-partitioning because the first column can denote a region, // and it will dictate the shard range for that region. type RegionJSON struct { - name string - regionMap RegionMap - regionBytes int + name string + regionMap RegionMap + regionBytes int + unknownParams []string } -// NewRegionJSON creates a RegionJson vindex. +// newRegionJSON creates a RegionJson vindex. // The supplied map requires all the fields of "RegionExperimental". // Additionally, it requires a region_map argument representing the path to a json file // containing a map of country to region. -func NewRegionJSON(name string, m map[string]string) (Vindex, error) { - rmPath := m["region_map"] +func newRegionJSON(name string, m map[string]string) (Vindex, error) { + rmPath := m[regionJSONParamRegionMap] rmap := make(map[string]uint64) data, err := os.ReadFile(rmPath) if err != nil { @@ -70,7 +79,7 @@ func NewRegionJSON(name string, m map[string]string) (Vindex, error) { if err != nil { return nil, err } - rb, err := strconv.Atoi(m["region_bytes"]) + rb, err := strconv.Atoi(m[regionJSONParamRegionBytes]) if err != nil { return nil, err } @@ -81,9 +90,10 @@ func NewRegionJSON(name string, m map[string]string) (Vindex, error) { } return &RegionJSON{ - name: name, - regionMap: rmap, - regionBytes: rb, + name: name, + regionMap: rmap, + regionBytes: rb, + unknownParams: FindUnknownParams(m, regionJSONParams), }, nil } @@ -116,7 +126,7 @@ func (rv *RegionJSON) Map(ctx context.Context, vcursor VCursor, rowsColValues [] continue } // Compute hash. - hn, err := evalengine.ToUint64(row[0]) + hn, err := row[0].ToCastUint64() if err != nil { destinations = append(destinations, key.DestinationNone{}) continue @@ -158,3 +168,8 @@ func (rv *RegionJSON) Verify(ctx context.Context, vcursor VCursor, rowsColValues func (rv *RegionJSON) PartialVindex() bool { return false } + +// UnknownParams implements the ParamValidating interface. +func (rv *RegionJSON) UnknownParams() []string { + return rv.unknownParams +} diff --git a/go/vt/vtgate/vindexes/reverse_bits.go b/go/vt/vtgate/vindexes/reverse_bits.go index 332cae5dfce..80c72ca6924 100644 --- a/go/vt/vtgate/vindexes/reverse_bits.go +++ b/go/vt/vtgate/vindexes/reverse_bits.go @@ -24,8 +24,6 @@ import ( "fmt" "math/bits" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -39,12 +37,16 @@ var ( // ReverseBits defines vindex that reverses the bits of a number. // It's Unique, Reversible and Functional. type ReverseBits struct { - name string + name string + unknownParams []string } -// NewReverseBits creates a new ReverseBits. -func NewReverseBits(name string, m map[string]string) (Vindex, error) { - return &ReverseBits{name: name}, nil +// newReverseBits creates a new ReverseBits. +func newReverseBits(name string, m map[string]string) (Vindex, error) { + return &ReverseBits{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -107,8 +109,13 @@ func (vind *ReverseBits) ReverseMap(_ VCursor, ksids [][]byte) ([]sqltypes.Value return reverseIds, nil } +// UnknownParams implements the ParamValidating interface. +func (vind *ReverseBits) UnknownParams() []string { + return vind.unknownParams +} + func (vind *ReverseBits) Hash(id sqltypes.Value) ([]byte, error) { - num, err := evalengine.ToUint64(id) + num, err := id.ToCastUint64() if err != nil { return nil, err } @@ -116,7 +123,7 @@ func (vind *ReverseBits) Hash(id sqltypes.Value) ([]byte, error) { } func init() { - Register("reverse_bits", NewReverseBits) + Register("reverse_bits", newReverseBits) } func reverse(shardKey uint64) []byte { diff --git a/go/vt/vtgate/vindexes/reverse_bits_test.go b/go/vt/vtgate/vindexes/reverse_bits_test.go index 14f3d59820a..dbc2d207919 100644 --- a/go/vt/vtgate/vindexes/reverse_bits_test.go +++ b/go/vt/vtgate/vindexes/reverse_bits_test.go @@ -21,7 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" @@ -31,18 +30,64 @@ import ( var reverseBits SingleColumn func init() { - hv, err := CreateVindex("reverse_bits", "rr", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("reverse_bits", "rr", map[string]string{}) if err != nil { panic(err) } + unknownParams := hv.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("reverse_bits test init: expected 0 unknown params") + } reverseBits = hv.(SingleColumn) } -func TestReverseBitsInfo(t *testing.T) { - assert.Equal(t, 1, reverseBits.Cost()) - assert.Equal(t, "rr", reverseBits.String()) - assert.True(t, reverseBits.IsUnique()) - assert.False(t, reverseBits.NeedsVCursor()) +func reverseBitsCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "reverse_bits", + vindexName: "reverse_bits", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "reverse_bits", + expectUnknownParams: expectUnknownParams, + } +} + +func TestReverseBitsCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + reverseBitsCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + reverseBitsCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + reverseBitsCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestReverseBitsMap(t *testing.T) { @@ -84,7 +129,7 @@ func TestReverseBitsVerify(t *testing.T) { // Failure test _, err = reverseBits.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewVarBinary("aa")}, [][]byte{nil}) - require.EqualError(t, err, "could not parse value: 'aa'") + require.EqualError(t, err, "cannot parse uint64 from \"aa\"") } func TestReverseBitsReverseMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/unicodeloosemd5.go b/go/vt/vtgate/vindexes/unicodeloosemd5.go index dfe7d59f737..8fa6ac33bef 100644 --- a/go/vt/vtgate/vindexes/unicodeloosemd5.go +++ b/go/vt/vtgate/vindexes/unicodeloosemd5.go @@ -26,8 +26,9 @@ import ( ) var ( - _ SingleColumn = (*UnicodeLooseMD5)(nil) - _ Hashing = (*UnicodeLooseMD5)(nil) + _ SingleColumn = (*UnicodeLooseMD5)(nil) + _ Hashing = (*UnicodeLooseMD5)(nil) + _ ParamValidating = (*UnicodeLooseMD5)(nil) ) // UnicodeLooseMD5 is a vindex that normalizes and hashes unicode strings @@ -36,12 +37,16 @@ var ( // Ref: http://www.unicode.org/reports/tr10/#Multi_Level_Comparison. // This is compatible with MySQL's utf8_unicode_ci collation. type UnicodeLooseMD5 struct { - name string + name string + unknownParams []string } -// NewUnicodeLooseMD5 creates a new UnicodeLooseMD5. -func NewUnicodeLooseMD5(name string, _ map[string]string) (Vindex, error) { - return &UnicodeLooseMD5{name: name}, nil +// newUnicodeLooseMD5 creates a new UnicodeLooseMD5. +func newUnicodeLooseMD5(name string, m map[string]string) (Vindex, error) { + return &UnicodeLooseMD5{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -94,6 +99,11 @@ func (vind *UnicodeLooseMD5) Hash(id sqltypes.Value) ([]byte, error) { return unicodeHash(vMD5Hash, id) } +// UnknownParams implements the ParamValidating interface. +func (vind *UnicodeLooseMD5) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("unicode_loose_md5", NewUnicodeLooseMD5) + Register("unicode_loose_md5", newUnicodeLooseMD5) } diff --git a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go index dea4a048783..879414c5be9 100644 --- a/go/vt/vtgate/vindexes/unicodeloosemd5_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosemd5_test.go @@ -21,8 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -30,15 +28,60 @@ import ( var charVindexMD5 SingleColumn func init() { - vindex, _ := CreateVindex("unicode_loose_md5", "utf8ch", nil) + vindex, err := CreateVindex("unicode_loose_md5", "utf8ch", nil) + if err != nil { + panic(err) + } charVindexMD5 = vindex.(SingleColumn) } -func TestUnicodeLooseMD5Info(t *testing.T) { - assert.Equal(t, 1, charVindexMD5.Cost()) - assert.Equal(t, "utf8ch", charVindexMD5.String()) - assert.True(t, charVindexMD5.IsUnique()) - assert.False(t, charVindexMD5.NeedsVCursor()) +func unicodeLooseMD5CreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "unicode_loose_md5", + vindexName: "unicode_loose_md5", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "unicode_loose_md5", + expectUnknownParams: expectUnknownParams, + } +} + +func TestUnicodeLooseMD5CreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + unicodeLooseMD5CreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + unicodeLooseMD5CreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + unicodeLooseMD5CreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestUnicodeLooseMD5Map(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/unicodeloosexxhash.go b/go/vt/vtgate/vindexes/unicodeloosexxhash.go index dcd924131aa..5e04bff1866 100644 --- a/go/vt/vtgate/vindexes/unicodeloosexxhash.go +++ b/go/vt/vtgate/vindexes/unicodeloosexxhash.go @@ -26,8 +26,9 @@ import ( ) var ( - _ SingleColumn = (*UnicodeLooseXXHash)(nil) - _ Hashing = (*UnicodeLooseXXHash)(nil) + _ SingleColumn = (*UnicodeLooseXXHash)(nil) + _ Hashing = (*UnicodeLooseXXHash)(nil) + _ ParamValidating = (*UnicodeLooseXXHash)(nil) ) // UnicodeLooseXXHash is a vindex that normalizes and hashes unicode strings @@ -36,12 +37,16 @@ var ( // Ref: http://www.unicode.org/reports/tr10/#Multi_Level_Comparison. // This is compatible with MySQL's utf8_unicode_ci collation. type UnicodeLooseXXHash struct { - name string + name string + unknownParams []string } -// NewUnicodeLooseXXHash creates a new UnicodeLooseXXHash struct. -func NewUnicodeLooseXXHash(name string, _ map[string]string) (Vindex, error) { - return &UnicodeLooseXXHash{name: name}, nil +// newUnicodeLooseXXHash creates a new UnicodeLooseXXHash struct. +func newUnicodeLooseXXHash(name string, m map[string]string) (Vindex, error) { + return &UnicodeLooseXXHash{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -94,6 +99,11 @@ func (vind *UnicodeLooseXXHash) Hash(id sqltypes.Value) ([]byte, error) { return unicodeHash(vXXHash, id) } +// UnknownParams implements the ParamValidating interface. +func (vind *UnicodeLooseXXHash) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("unicode_loose_xxhash", NewUnicodeLooseXXHash) + Register("unicode_loose_xxhash", newUnicodeLooseXXHash) } diff --git a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go index e5ae98cf87f..6836bfd4ffa 100644 --- a/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go +++ b/go/vt/vtgate/vindexes/unicodeloosexxhash_test.go @@ -21,8 +21,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -30,15 +28,64 @@ import ( var charVindexXXHash SingleColumn func init() { - vindex, _ := CreateVindex("unicode_loose_xxhash", "utf8ch", nil) + vindex, err := CreateVindex("unicode_loose_xxhash", "utf8ch", nil) + if err != nil { + panic(err) + } + unknownParams := vindex.(ParamValidating).UnknownParams() + if len(unknownParams) > 0 { + panic("unicode_loose_xxhash test init: expected 0 unknown params") + } charVindexXXHash = vindex.(SingleColumn) } -func TestUnicodeLooseXXHashInfo(t *testing.T) { - assert.Equal(t, 1, charVindexXXHash.Cost()) - assert.Equal(t, "utf8ch", charVindexXXHash.String()) - assert.True(t, charVindexXXHash.IsUnique()) - assert.False(t, charVindexXXHash.NeedsVCursor()) +func unicodeLooseXXHashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "unicode_loose_xxhash", + vindexName: "unicode_loose_xxhash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "unicode_loose_xxhash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestUnicodeLooseXXHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + unicodeLooseXXHashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + unicodeLooseXXHashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + unicodeLooseXXHashCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestUnicodeLooseXXHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index 700b8e6175c..a5295681248 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -19,7 +19,9 @@ package vindexes import ( "context" "fmt" + "sort" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" @@ -41,6 +43,7 @@ type ( ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) InTransactionAndIsDML() bool LookupRowLockShardSession() vtgatepb.CommitOrder + ConnCollation() collations.ID } // Vindex defines the interface required to register a vindex. @@ -71,6 +74,22 @@ type ( NeedsVCursor() bool } + // ParamValidating is an optional interface that Vindexes may implement to + // report errors about unknown params encountered during Vindex creation. + ParamValidating interface { + // UnknownParams returns a slice of param names that were provided + // during Vindex creation, but were not known and therefore ignored by + // the Vindex. + UnknownParams() []string + } + + // ParamValidationOpts may be used by Vindexes that accept params to + // validate params with ValidateParams(params, opts). + ParamValidationOpts struct { + // Params contains param names known by the vindex. + Params []string + } + // SingleColumn defines the interface for a single column vindex. SingleColumn interface { Vindex @@ -166,7 +185,7 @@ type ( var registry = make(map[string]NewVindexFunc) -// Register registers a vindex under the specified vindexType. +// Register registers a vindex factory under the specified vindexType. // A duplicate vindexType will generate a panic. // New vindexes will be created using these functions at the // time of vschema loading. @@ -179,7 +198,7 @@ func Register(vindexType string, newVindexFunc NewVindexFunc) { // CreateVindex creates a vindex of the specified type using the // supplied params. The type must have been previously registered. -func CreateVindex(vindexType, name string, params map[string]string) (Vindex, error) { +func CreateVindex(vindexType, name string, params map[string]string) (vindex Vindex, err error) { f, ok := registry[vindexType] if !ok { return nil, fmt.Errorf("vindexType %q not found", vindexType) @@ -216,3 +235,19 @@ func firstColsOnly(rowsColValues [][]sqltypes.Value) []sqltypes.Value { } return firstCols } + +// FindUnknownParams a sorted slice of keys in params that are not present in knownParams. +func FindUnknownParams(params map[string]string, knownParams []string) []string { + var unknownParams []string + knownParamsByName := make(map[string]struct{}) + for _, knownParam := range knownParams { + knownParamsByName[knownParam] = struct{}{} + } + for name := range params { + if _, ok := knownParamsByName[name]; !ok { + unknownParams = append(unknownParams, name) + } + } + sort.Strings(unknownParams) + return unknownParams +} diff --git a/go/vt/vtgate/vindexes/vindex_test.go b/go/vt/vtgate/vindexes/vindex_test.go index 6e2952e6bca..97b17da2a4b 100644 --- a/go/vt/vtgate/vindexes/vindex_test.go +++ b/go/vt/vtgate/vindexes/vindex_test.go @@ -18,14 +18,68 @@ package vindexes import ( "context" + "sort" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) +type testVindex struct { + allowUnknownParams bool + knownParams []string + params map[string]string +} + +func (v *testVindex) Cost() int { + return 0 +} + +func (v *testVindex) String() string { + return "" +} + +func (v *testVindex) IsUnique() bool { + return false +} + +func (v *testVindex) NeedsVCursor() bool { + return false +} + +func (v *testVindex) UnknownParams() []string { + if v.allowUnknownParams { + return nil + } + return FindUnknownParams(v.params, v.knownParams) +} + +func init() { + Register("allow_unknown_params", func(_ string, params map[string]string) (Vindex, error) { + return &testVindex{ + allowUnknownParams: true, + knownParams: []string{ + "option1", + "option2", + }, + params: params, + }, nil + }) + Register("warn_unknown_params", func(_ string, params map[string]string) (Vindex, error) { + return &testVindex{ + allowUnknownParams: false, + knownParams: []string{ + "option1", + "option2", + }, + params: params, + }, nil + }) +} + func TestVindexMap(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) assert.NoError(t, err) @@ -42,6 +96,7 @@ func TestVindexMap(t *testing.T) { hash, err := CreateVindex("hash", "hash", nil) assert.NoError(t, err) + require.Empty(t, hash.(ParamValidating).UnknownParams()) got, err = Map(context.Background(), hash, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), }}) @@ -55,6 +110,7 @@ func TestVindexMap(t *testing.T) { func TestVindexVerify(t *testing.T) { ge, err := createRegionVindex(t, "region_experimental", "f1,f2", 1) assert.NoError(t, err) + require.Empty(t, ge.(ParamValidating).UnknownParams()) got, err := Verify(context.Background(), ge, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(1), @@ -67,6 +123,7 @@ func TestVindexVerify(t *testing.T) { assert.Equal(t, want, got) hash, err := CreateVindex("hash", "hash", nil) + require.Empty(t, hash.(ParamValidating).UnknownParams()) assert.NoError(t, err) got, err = Verify(context.Background(), hash, nil, [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -76,3 +133,40 @@ func TestVindexVerify(t *testing.T) { assert.NoError(t, err) assert.Equal(t, want, got) } + +func TestCreateVindexAllowUnknownParams(t *testing.T) { + vindex, err := CreateVindex( + "allow_unknown_params", + "allow_unknown_params", + map[string]string{ + "option1": "value1", + "option2": "value2", + "option3": "value3", + "option4": "value4", + }, + ) + + require.NotNil(t, vindex) + require.NoError(t, err) +} + +func TestCreateVindexWarnUnknownParams(t *testing.T) { + vindex, err := CreateVindex( + "warn_unknown_params", + "warn_unknown_params", + map[string]string{ + "option1": "value1", + "option2": "value2", + "option3": "value3", + "option4": "value4", + }, + ) + + require.NotNil(t, vindex) + require.NoError(t, err) + + unknownParams := vindex.(ParamValidating).UnknownParams() + sort.Strings(unknownParams) + require.Len(t, unknownParams, 2) + require.Equal(t, []string{"option3", "option4"}, unknownParams) +} diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index be1223d0c23..95de008c705 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -23,6 +23,7 @@ import ( "os" "sort" "strings" + "time" "vitess.io/vitess/go/sqlescape" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -61,13 +62,17 @@ const ( type VSchema struct { RoutingRules map[string]*RoutingRule `json:"routing_rules"` - // uniqueTables contains the name of all tables in all keyspaces. if the table is uniquely named, the value will - // be the name of the keyspace where this table exists. if multiple keyspaces have a table with the same name, the - // value will be a `nil` value + // globalTables contains the name of all tables in all keyspaces. If the + // table is uniquely named, the value will be the qualified Table object + // with the keyspace where this table exists. If multiple keyspaces have a + // table with the same name, the value will be a `nil`. globalTables map[string]*Table uniqueVindexes map[string]Vindex Keyspaces map[string]*KeyspaceSchema `json:"keyspaces"` ShardRoutingRules map[string]string `json:"shard_routing_rules"` + // created is the time when the VSchema object was created. Used to detect if a cached + // copy of the vschema is stale. + created time.Time } // RoutingRule represents one routing rule. @@ -110,6 +115,14 @@ type Table struct { // Source is a keyspace-qualified table name that points to the source of a // reference table. Only applicable for tables with Type set to "reference". Source *Source `json:"source,omitempty"` + + ChildForeignKeys []ChildFKInfo `json:"child_foreign_keys,omitempty"` + ParentForeignKeys []ParentFKInfo `json:"parent_foreign_keys,omitempty"` +} + +// GetTableName gets the sqlparser.TableName for the vindex Table. +func (t *Table) GetTableName() sqlparser.TableName { + return sqlparser.NewTableNameWithQualifier(t.Name.String(), t.Keyspace.Name) } // Keyspace contains the keyspcae info for each Table. @@ -128,6 +141,13 @@ type ColumnVindex struct { isUnique bool cost int partial bool + backfill bool +} + +// TableInfo contains column and foreign key info for a table. +type TableInfo struct { + Columns []Column + ForeignKeys []*sqlparser.ForeignKeyDefinition } // IsUnique is used to tell whether the ColumnVindex @@ -148,6 +168,11 @@ func (c *ColumnVindex) IsPartialVindex() bool { return c.partial } +// IsBackfilling returns true if the vindex is in the process of backfilling the rows. +func (c *ColumnVindex) IsBackfilling() bool { + return c.backfill +} + // Column describes a column. type Column struct { Name sqlparser.IdentifierCI `json:"name"` @@ -168,27 +193,53 @@ func (col *Column) MarshalJSON() ([]byte, error) { // KeyspaceSchema contains the schema(table) for a keyspace. type KeyspaceSchema struct { - Keyspace *Keyspace - Tables map[string]*Table - Vindexes map[string]Vindex - Views map[string]sqlparser.SelectStatement - Error error + Keyspace *Keyspace + ForeignKeyMode vschemapb.Keyspace_ForeignKeyMode + Tables map[string]*Table + Vindexes map[string]Vindex + Views map[string]sqlparser.SelectStatement + Error error } type ksJSON struct { - Sharded bool `json:"sharded,omitempty"` - Tables map[string]*Table `json:"tables,omitempty"` - Vindexes map[string]Vindex `json:"vindexes,omitempty"` - Views map[string]string `json:"views,omitempty"` - Error string `json:"error,omitempty"` + Sharded bool `json:"sharded,omitempty"` + ForeignKeyMode string `json:"foreignKeyMode,omitempty"` + Tables map[string]*Table `json:"tables,omitempty"` + Vindexes map[string]Vindex `json:"vindexes,omitempty"` + Views map[string]string `json:"views,omitempty"` + Error string `json:"error,omitempty"` +} + +// findTable looks for the table with the requested tablename in the keyspace. +// +// If no table matching the requested tablename is found, and: +// +// - constructUnshardedIfNotFound is not requested, than no table is returned. +// - constructUnshardedIfNotFound is requested, and the keyspace is +// unsharded, then a *Table representing that table is returned. +func (ks *KeyspaceSchema) findTable( + tablename string, + constructUnshardedIfNotFound bool, +) *Table { + table := ks.Tables[tablename] + if table != nil { + return table + } + + if constructUnshardedIfNotFound && !ks.Keyspace.Sharded { + return &Table{Name: sqlparser.NewIdentifierCS(tablename), Keyspace: ks.Keyspace} + } + + return nil } // MarshalJSON returns a JSON representation of KeyspaceSchema. func (ks *KeyspaceSchema) MarshalJSON() ([]byte, error) { ksJ := ksJSON{ - Sharded: ks.Keyspace.Sharded, - Tables: ks.Tables, - Vindexes: ks.Vindexes, + Sharded: ks.Keyspace.Sharded, + Tables: ks.Tables, + ForeignKeyMode: ks.ForeignKeyMode.String(), + Vindexes: ks.Vindexes, } if ks.Error != nil { ksJ.Error = ks.Error.Error() @@ -226,13 +277,17 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { globalTables: make(map[string]*Table), uniqueVindexes: make(map[string]Vindex), Keyspaces: make(map[string]*KeyspaceSchema), + created: time.Now(), } buildKeyspaces(source, vschema) - buildReferences(source, vschema) + // buildGlobalTables before buildReferences so that buildReferences can + // resolve sources which reference global tables. buildGlobalTables(source, vschema) - resolveAutoIncrement(source, vschema) + buildReferences(source, vschema) buildRoutingRule(source, vschema) buildShardRoutingRule(source, vschema) + // Resolve auto-increments after routing rules are built since sequence tables also obey routing rules. + resolveAutoIncrement(source, vschema) return vschema } @@ -258,11 +313,10 @@ func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceS return vschema.Keyspaces[keyspace], err } -// ValidateKeyspace ensures that the keyspace vschema is valid. +// BuildKeyspace ensures that the keyspace vschema is valid. // External references (like sequence) are not validated. -func ValidateKeyspace(input *vschemapb.Keyspace) error { - _, err := BuildKeyspaceSchema(input, "") - return err +func BuildKeyspace(input *vschemapb.Keyspace) (*KeyspaceSchema, error) { + return BuildKeyspaceSchema(input, "") } func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { @@ -272,14 +326,23 @@ func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { Name: ksname, Sharded: ks.Sharded, }, - Tables: make(map[string]*Table), - Vindexes: make(map[string]Vindex), + ForeignKeyMode: replaceDefaultForeignKeyMode(ks.ForeignKeyMode), + Tables: make(map[string]*Table), + Vindexes: make(map[string]Vindex), } vschema.Keyspaces[ksname] = ksvschema ksvschema.Error = buildTables(ks, vschema, ksvschema) } } +// replaceDefaultForeignKeyMode replaces the default value of the foreign key mode enum with the default we want to keep. +func replaceDefaultForeignKeyMode(fkMode vschemapb.Keyspace_ForeignKeyMode) vschemapb.Keyspace_ForeignKeyMode { + if fkMode == vschemapb.Keyspace_FK_DEFAULT { + return vschemapb.Keyspace_FK_UNMANAGED + } + return fkMode +} + func (vschema *VSchema) AddView(ksname string, viewName, query string) error { ks, ok := vschema.Keyspaces[ksname] if !ok { @@ -327,18 +390,26 @@ func buildKeyspaceGlobalTables(vschema *VSchema, ksvschema *KeyspaceSchema) { if gt == nil { // Table name is already marked ambiguous, nothing to do. continue - } else if t.isReferencedInKeyspace(gt.Keyspace.Name) { - // If the stored table refers to this table, store this - // table instead. - vschema.globalTables[tname] = t - } else if gt.isReferencedInKeyspace(t.Keyspace.Name) { - // The source of this table is already stored. Do nothing. - continue } else { + // Special handling for reference tables which specify their source. + if t.Type == TypeReference && t.Source != nil { + // If the reference table points to the already stored + // global table, there is no ambiguity. + if t.Source.Qualifier.IsEmpty() || t.Source.Qualifier.String() == gt.Keyspace.Name { + continue + } + } // Otherwise, mark this table name ambiguous. vschema.globalTables[tname] = nil } } else { + // Reference tables which define a source with the same name may be + // globally routable through their source, as long as the source + // doesn't require explicit routing. + if t.Type == TypeReference && t.Source != nil && t.Source.Name.String() == t.Name.String() { + continue + } + vschema.globalTables[tname] = t } } @@ -363,6 +434,7 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error } sourceKsname := source.Qualifier.String() + sourceTname := source.Name.String() // Prohibit self-references. if sourceKsname == keyspace.Name { @@ -374,10 +446,32 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error ) } - // Validate that reference can be resolved. - _, sourceT, err := vschema.findKeyspaceAndTableBySource(source) + // Verify the reference source can be resolved. + sourceT, err := vschema.findTable( + sourceKsname, + sourceTname, + false, /* constructTableIfNotFound */ + ) + // Rephrase errors to be more helpful in the context of VSchema linting. + if err != nil { + if vterrors.Code(err) != vtrpcpb.Code_NOT_FOUND || vterrors.ErrState(err) != vterrors.BadDb { + return err + } + return vterrors.Errorf( + vtrpcpb.Code_NOT_FOUND, + "source %q references a non-existent keyspace %q", + source, + sourceKsname, + ) + } if sourceT == nil { - return err + return vterrors.Errorf( + vtrpcpb.Code_NOT_FOUND, + "source %q references a table %q that is not present in the VSchema of keyspace %q", + source, + sourceTname, + sourceKsname, + ) } // Validate source table types. @@ -405,8 +499,10 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error } sourceT.addReferenceInKeyspace(keyspace.Name, t) - // Forbid reference chains. - for sourceT.Source != nil { + // Forbid reference chains. This is not necessarily a technical + // limitation. If people want this, in theory it should be possible as + // long as reference chains are not circular. + if sourceT.Source != nil { chain := fmt.Sprintf("%s => %s => %s", tname, sourceT, sourceT.Source) return vterrors.Errorf( @@ -451,7 +547,7 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc t.Type = table.Type case TypeReference: if table.Source != "" { - tableName, err := parseQualifiedTable(table.Source) + tableName, err := parseTable(table.Source) if err != nil { return vterrors.Errorf( vtrpcpb.Code_INVALID_ARGUMENT, @@ -557,6 +653,10 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc columns = append(columns, sqlparser.NewIdentifierCI(indCol)) } } + backfill := false + if lkpBackfill, ok := vindex.(LookupBackfill); ok { + backfill = lkpBackfill.IsBackfilling() + } columnVindex := &ColumnVindex{ Columns: columns, Type: vindexInfo.Type, @@ -565,6 +665,7 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc Vindex: vindex, isUnique: vindex.IsUnique(), cost: vindex.Cost(), + backfill: backfill, } if i == 0 { // Perform Primary vindex check. @@ -617,13 +718,14 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc columnSubset := columns[:i] cost++ columnVindex = &ColumnVindex{ - Columns: columnSubset, - Type: vindexInfo.Type, - Name: ind.Name, - Owned: owned, - Vindex: vindex, - cost: cost, - partial: true, + Columns: columnSubset, + Type: vindexInfo.Type, + Name: ind.Name, + Owned: owned, + Vindex: vindex, + cost: cost, + partial: true, + backfill: backfill, } t.ColumnVindexes = append(t.ColumnVindexes, columnVindex) } @@ -657,7 +759,11 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { seqks, seqtab, err := sqlparser.ParseTable(table.AutoIncrement.Sequence) var seq *Table if err == nil { - seq, err = vschema.FindTable(seqks, seqtab) + // Ensure that sequence tables also obey routing rules. + seq, err = vschema.FindRoutedTable(seqks, seqtab, topodatapb.TabletType_PRIMARY) + if seq == nil && err == nil { + err = vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "table %s not found", seqtab) + } } if err != nil { // Better to remove the table than to leave it partially initialized. @@ -682,7 +788,7 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { // expects table name of the form . func escapeQualifiedTable(qualifiedTableName string) (string, error) { - keyspace, tableName, err := extractQualifiedTableParts(qualifiedTableName) + keyspace, tableName, err := extractTableParts(qualifiedTableName, false /* allowUnqualified */) if err != nil { return "", err } @@ -692,22 +798,31 @@ func escapeQualifiedTable(qualifiedTableName string) (string, error) { sqlescape.EscapeID(sqlescape.UnescapeID(tableName))), nil } -func extractQualifiedTableParts(qualifiedTableName string) (string, string, error) { - // It's possible to have a database or table name with a dot in it, but that's not otherwise supported within vitess today - arr := strings.Split(qualifiedTableName, ".") +func extractTableParts(tableName string, allowUnqualified bool) (string, string, error) { + errMsgFormat := "invalid table name: %s, it must be of the " + if allowUnqualified { + errMsgFormat = errMsgFormat + "unqualified form or the " + } + errMsgFormat = errMsgFormat + "qualified form . (dots are not allowed in either name)" + + // It's possible to have a database or table name with a dot in it, but + // that's not otherwise supported within vitess today + arr := strings.Split(tableName, ".") switch len(arr) { + case 1: + if allowUnqualified { + return "", arr[0], nil + } case 2: return arr[0], arr[1], nil } // Using fmt.Errorf instead of vterrors here because this error is always wrapped in vterrors. - return "", "", fmt.Errorf( - "invalid table name: %s, it must be of the qualified form . (dots are not allowed in either name)", - qualifiedTableName, - ) + return "", "", fmt.Errorf(errMsgFormat, tableName) + } -func parseQualifiedTable(qualifiedTableName string) (sqlparser.TableName, error) { - keyspace, tableName, err := extractQualifiedTableParts(qualifiedTableName) +func parseTable(tableName string) (sqlparser.TableName, error) { + keyspace, tableName, err := extractTableParts(tableName, true /* allowUnqualified */) if err != nil { return sqlparser.TableName{}, err } @@ -810,57 +925,92 @@ func buildShardRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { // valid and belonging to that keyspace. // FindTable bypasses routing rules and returns at most one table. func (vschema *VSchema) FindTable(keyspace, tablename string) (*Table, error) { - t, err := vschema.findTable(keyspace, tablename) - if err != nil { - return nil, err + table, err := vschema.findTable( + keyspace, + tablename, + true, /* constructUnshardedIfNotFound */ + ) + + if table != nil || err != nil { + return table, err } - if t == nil { - return nil, vterrors.NewErrorf( - vtrpcpb.Code_NOT_FOUND, - vterrors.UnknownTable, - "table %s not found", + + return nil, vterrors.NewErrorf( + vtrpcpb.Code_NOT_FOUND, + vterrors.UnknownTable, + "table %s not found", + tablename, + ) +} + +// findGlobalTable looks for a table that is uniquely named across all +// keyspaces. +// +// If multiple tables with the requested tablename are found, an error +// indicating this ambiguity is returned. +// +// If no table matching requested table name is found, and: +// +// - constructUnshardedIfNotFound is not requested, than no table is returned. +// - constructUnshardedIfNotFound is requested, and there is only one keyspace, +// and that keyspace is unsharded, then a *Table representing that table is +// returned. +func (vschema *VSchema) findGlobalTable( + tablename string, + constructUnshardedIfNotFound bool, +) (*Table, error) { + if len(vschema.Keyspaces) == 1 { + for _, ks := range vschema.Keyspaces { + table := ks.findTable(tablename, constructUnshardedIfNotFound) + return table, nil + } + } + + table, ok := vschema.globalTables[tablename] + + if table != nil { + return table, nil + } + + if ok { + return nil, vterrors.Errorf( + vtrpcpb.Code_FAILED_PRECONDITION, + "ambiguous table reference: %s", tablename, ) } - return t, nil + + return nil, nil } -// findTable is like FindTable, but does not return an error if a table is not found. -func (vschema *VSchema) findTable(keyspace, tablename string) (*Table, error) { +// findTable looks for a keyspace matching the provided keyspace, and then +// looks for a table in that keyspace using the provided tablename. +// +// If the requested keyspace is "", then (*VSchema).findGlobalTable is used to +// find a global table. +// +// Otherwise: +// +// - If no matching keyspace is found, an error is returned. +// - If a matching keyspace is found, (*Keyspace).findTable is used to find a +// matching table. +// +// constructUnshardedIfNotFound is passed to (*VSchema).findGlobalTable or +// (*Keyspace).findTable, and is used to construct a *Table representing a +// table present in an unsharded keyspace but not defined in the vschema. +func (vschema *VSchema) findTable( + keyspace, + tablename string, + constructUnshardedIfNotFound bool, +) (*Table, error) { if keyspace == "" { - t, ok := vschema.globalTables[tablename] - if t == nil { - if ok { - return nil, vterrors.Errorf( - vtrpcpb.Code_FAILED_PRECONDITION, - "ambiguous table reference: %s", - tablename, - ) - } - if len(vschema.Keyspaces) != 1 { - return nil, nil - } - // Loop happens only once. - for _, ks := range vschema.Keyspaces { - if ks.Keyspace.Sharded { - return nil, nil - } - return &Table{Name: sqlparser.NewIdentifierCS(tablename), Keyspace: ks.Keyspace}, nil - } - } - keyspace = t.Keyspace.Name + return vschema.findGlobalTable(tablename, constructUnshardedIfNotFound) } ks, ok := vschema.Keyspaces[keyspace] if !ok { return nil, vterrors.VT05003(keyspace) } - table := ks.Tables[tablename] - if table == nil { - if ks.Keyspace.Sharded { - return nil, nil - } - return &Table{Name: sqlparser.NewIdentifierCS(tablename), Keyspace: ks.Keyspace}, nil - } + table := ks.findTable(tablename, constructUnshardedIfNotFound) return table, nil } @@ -901,7 +1051,11 @@ func (vschema *VSchema) FindRoutedTable(keyspace, tablename string, tabletType t return rr.Tables[0], nil } } - return vschema.findTable(keyspace, tablename) + return vschema.findTable( + keyspace, + tablename, + true, /* constructUnshardedTableIfNotFound */ + ) } // FindTableOrVindex finds a table or a Vindex by name using Find and FindVindex. @@ -965,33 +1119,6 @@ func (vschema *VSchema) FindView(keyspace, name string) sqlparser.SelectStatemen return statement } -func (vschema *VSchema) findKeyspaceAndTableBySource(source *Source) (*Keyspace, *Table, error) { - sourceKsname := source.Qualifier.String() - sourceTname := source.Name.String() - - sourceKs, ok := vschema.Keyspaces[sourceKsname] - if !ok { - return nil, nil, vterrors.NewErrorf( - vtrpcpb.Code_NOT_FOUND, - vterrors.BadDb, - "source %q references a non-existent keyspace %q", - source, - sourceKsname, - ) - } - - sourceT, ok := sourceKs.Tables[sourceTname] - if !ok { - return sourceKs.Keyspace, nil, vterrors.NewErrorf( - vtrpcpb.Code_NOT_FOUND, - vterrors.UnknownTable, - "source %q references a table %q that is not present in the VSchema of keyspace %q", source, sourceTname, sourceKsname, - ) - } - - return sourceKs.Keyspace, sourceT, nil -} - // NotFoundError represents the error where the table name was not found type NotFoundError struct { TableName string @@ -1039,6 +1166,17 @@ func (vschema *VSchema) FindRoutedShard(keyspace, shard string) (string, error) return keyspace, nil } +// GetCreated returns the time when the VSchema was created. +func (vschema *VSchema) GetCreated() time.Time { + return vschema.created +} + +// ResetCreated resets the created time to zero value. +// Used only in tests where vschema protos are compared. +func (vschema *VSchema) ResetCreated() { + vschema.created = time.Time{} +} + // ByCost provides the interface needed for ColumnVindexes to // be sorted by cost order. type ByCost []*ColumnVindex @@ -1088,11 +1226,12 @@ func LoadFormalKeyspace(filename string) (*vschemapb.Keyspace, error) { return formal, nil } -// ChooseVindexForType chooses the most appropriate vindex for the give type. +// ChooseVindexForType chooses the most appropriate vindex type for +// the given SQL data type. func ChooseVindexForType(typ querypb.Type) (string, error) { switch { case sqltypes.IsIntegral(typ): - return "hash", nil + return "xxhash", nil case sqltypes.IsText(typ): return "unicode_loose_md5", nil case sqltypes.IsBinary(typ): @@ -1198,7 +1337,3 @@ func (t *Table) getReferenceInKeyspace(keyspace string) *Table { } return t } - -func (t *Table) isReferencedInKeyspace(keyspace string) bool { - return t.getReferenceInKeyspace(keyspace) != nil -} diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index 274ee89a61e..5b66059e25c 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "reflect" "strings" "testing" @@ -41,8 +42,7 @@ import ( // cheapVindex is a Functional, Unique Vindex. type cheapVindex struct { - name string - Params map[string]string + name string } func (v *cheapVindex) String() string { return v.name } @@ -56,16 +56,15 @@ func (*cheapVindex) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Val return nil, nil } -func NewCheapVindex(name string, params map[string]string) (Vindex, error) { - return &cheapVindex{name: name, Params: params}, nil +func newCheapVindex(name string, _ map[string]string) (Vindex, error) { + return &cheapVindex{name: name}, nil } var _ SingleColumn = (*stFU)(nil) // stFU is a Functional, Unique Vindex. type stFU struct { - name string - Params map[string]string + name string } func (v *stFU) String() string { return v.name } @@ -79,16 +78,15 @@ func (*stFU) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value) ([] return nil, nil } -func NewSTFU(name string, params map[string]string) (Vindex, error) { - return &stFU{name: name, Params: params}, nil +func newSTFU(name string, _ map[string]string) (Vindex, error) { + return &stFU{name: name}, nil } var _ SingleColumn = (*stFU)(nil) // stFN is a Functional, NonUnique Vindex. type stFN struct { - name string - Params map[string]string + name string } func (v *stFN) String() string { return v.name } @@ -102,16 +100,15 @@ func (*stFN) Map(ctx context.Context, vcursor VCursor, ids []sqltypes.Value) ([] return nil, nil } -func NewSTFN(name string, params map[string]string) (Vindex, error) { - return &stFN{name: name, Params: params}, nil +func newSTFN(name string, _ map[string]string) (Vindex, error) { + return &stFN{name: name}, nil } var _ SingleColumn = (*stFN)(nil) // stLN is a Lookup, NonUnique Vindex. type stLN struct { - name string - Params map[string]string + name string } func (v *stLN) String() string { return v.name } @@ -130,8 +127,8 @@ func (*stLN) Update(context.Context, VCursor, []sqltypes.Value, []byte, []sqltyp return nil } -func NewSTLN(name string, params map[string]string) (Vindex, error) { - return &stLN{name: name, Params: params}, nil +func newSTLN(name string, _ map[string]string) (Vindex, error) { + return &stLN{name: name}, nil } var _ SingleColumn = (*stLN)(nil) @@ -139,8 +136,7 @@ var _ Lookup = (*stLN)(nil) // stLU is a Lookup, Unique Vindex. type stLU struct { - name string - Params map[string]string + name string } func (v *stLU) String() string { return v.name } @@ -159,8 +155,8 @@ func (*stLU) Update(context.Context, VCursor, []sqltypes.Value, []byte, []sqltyp return nil } -func NewSTLU(name string, params map[string]string) (Vindex, error) { - return &stLU{name: name, Params: params}, nil +func newSTLU(name string, _ map[string]string) (Vindex, error) { + return &stLU{name: name}, nil } var _ SingleColumn = (*stLO)(nil) @@ -197,7 +193,7 @@ func (v *stLO) SetOwnerInfo(keyspace, table string, cols []sqlparser.IdentifierC return nil } -func NewSTLO(name string, _ map[string]string) (Vindex, error) { +func newSTLO(name string, _ map[string]string) (Vindex, error) { return &stLO{name: name}, nil } @@ -206,8 +202,7 @@ var _ Lookup = (*stLO)(nil) // mcFU is a multi-column Functional, Unique Vindex. type mcFU struct { - name string - Params map[string]string + name string } func (v *mcFU) String() string { return v.name } @@ -222,25 +217,33 @@ func (*mcFU) Map(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltype } func (*mcFU) PartialVindex() bool { return false } -func NewMCFU(name string, params map[string]string) (Vindex, error) { - return &mcFU{name: name, Params: params}, nil +func newMCFU(name string, _ map[string]string) (Vindex, error) { + return &mcFU{name: name}, nil } var _ MultiColumn = (*mcFU)(nil) func init() { - Register("cheap", NewCheapVindex) - Register("stfu", NewSTFU) - Register("stfn", NewSTFN) - Register("stln", NewSTLN) - Register("stlu", NewSTLU) - Register("stlo", NewSTLO) - Register("region_experimental_test", NewRegionExperimental) - Register("mcfu", NewMCFU) + Register("cheap", newCheapVindex) + Register("stfu", newSTFU) + Register("stfn", newSTFN) + Register("stln", newSTLN) + Register("stlu", newSTLU) + Register("stlo", newSTLO) + Register("region_experimental_test", newRegionExperimental) + Register("mcfu", newMCFU) +} + +func buildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { + vs := BuildVSchema(source) + if vs != nil { + vs.ResetCreated() + } + return vs } func TestUnshardedVSchemaValid(t *testing.T) { - err := ValidateKeyspace(&vschemapb.Keyspace{ + _, err := BuildKeyspace(&vschemapb.Keyspace{ Sharded: false, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), @@ -248,6 +251,45 @@ func TestUnshardedVSchemaValid(t *testing.T) { require.NoError(t, err) } +func TestForeignKeyMode(t *testing.T) { + tests := []struct { + name string + fkMode vschemapb.Keyspace_ForeignKeyMode + wantedFkMode vschemapb.Keyspace_ForeignKeyMode + }{ + { + name: "Default Value", + wantedFkMode: vschemapb.Keyspace_FK_UNMANAGED, + }, { + name: "Managed Value", + fkMode: vschemapb.Keyspace_FK_MANAGED, + wantedFkMode: vschemapb.Keyspace_FK_MANAGED, + }, { + name: "Unmanaged Value", + fkMode: vschemapb.Keyspace_FK_UNMANAGED, + wantedFkMode: vschemapb.Keyspace_FK_UNMANAGED, + }, { + name: "Disallow Value", + fkMode: vschemapb.Keyspace_FK_DISALLOW, + wantedFkMode: vschemapb.Keyspace_FK_DISALLOW, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ksSchema, err := BuildKeyspace(&vschemapb.Keyspace{ + Sharded: false, + ForeignKeyMode: test.fkMode, + Vindexes: make(map[string]*vschemapb.Vindex), + Tables: make(map[string]*vschemapb.Table), + }) + require.NoError(t, err) + require.Equal(t, test.wantedFkMode, ksSchema.ForeignKeyMode) + }) + + } +} + func TestUnshardedVSchema(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -323,6 +365,7 @@ func TestVSchemaViews(t *testing.T) { got := string(out) want := ` { + "foreignKeyMode":"FK_UNMANAGED", "tables": { "t1": { "name": "t1", @@ -345,6 +388,76 @@ func TestVSchemaViews(t *testing.T) { require.JSONEq(t, want, got) } +func TestVSchemaForeignKeys(t *testing.T) { + good := vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "unsharded": { + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{{ + Name: "c1", + }, { + Name: "c2", + Type: sqltypes.VarChar}}}}}, + "main": { + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{{ + Name: "c1", + }, { + Name: "c2", + Type: sqltypes.VarChar}}}}}}} + vschema := BuildVSchema(&good) + require.NoError(t, vschema.Keyspaces["main"].Error) + + // add fk containst a keyspace. + vschema.AddForeignKey("main", "t1", &sqlparser.ForeignKeyDefinition{ + Source: sqlparser.Columns{sqlparser.NewIdentifierCI("c2")}, + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("t1"), + ReferencedColumns: sqlparser.Columns{sqlparser.NewIdentifierCI("c1")}, + }, + }) + + out, err := json.MarshalIndent(vschema.Keyspaces["main"], "", " ") + require.NoError(t, err) + want := ` +{ + "foreignKeyMode": "FK_UNMANAGED", + "tables": { + "t1": { + "name": "t1", + "columns": [ + { + "name": "c1", + "type": "NULL_TYPE" + }, + { + "name": "c2", + "type": "VARCHAR" + } + ], + "parent_foreign_keys": [ + { + "parent_table": "t1", + "parent_columns": ["c1"], + "child_columns": ["c2"] + } + ], + "child_foreign_keys": [ + { + "child_table": "t1", + "child_columns": ["c2"], + "parent_columns": ["c1"] + } + ] + } + } +}` + got := string(out) + require.JSONEq(t, want, got) +} + func TestVSchemaColumnListAuthoritative(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -406,10 +519,9 @@ func TestShardedVSchemaOwned(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1"}, - Owner: "t1"}, + Type: "stfu", + Params: map[string]string{}, + Owner: "t1"}, "stln1": { Type: "stln", Owner: "t1"}}, @@ -429,10 +541,7 @@ func TestShardedVSchemaOwned(t *testing.T) { t1, err := got.FindTable("sharded", "t1") require.NoError(t, err) - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1"}} + vindex1 := &stFU{name: "stfu1"} assertVindexMatches(t, t1.ColumnVindexes[0], vindex1, "stfu1", false) vindex2 := &stLN{name: "stln1"} @@ -571,7 +680,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, Keyspaces: map[string]*vschemapb.Keyspace{ "ks1": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { Type: "stfu", @@ -589,6 +699,7 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, "ks2": { + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, Tables: map[string]*vschemapb.Table{ "t2": {}, }, @@ -661,7 +772,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "ks1": { - Keyspace: ks1, + Keyspace: ks1, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Tables: map[string]*Table{ "t1": t1, }, @@ -670,7 +782,8 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, "ks2": { - Keyspace: ks2, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Keyspace: ks2, Tables: map[string]*Table{ "t2": t2, }, @@ -692,37 +805,37 @@ func TestChooseVindexForType(t *testing.T) { out: "", }, { in: sqltypes.Int8, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint8, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int16, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint16, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int24, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint24, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int32, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint32, - out: "hash", + out: "xxhash", }, { in: sqltypes.Int64, - out: "hash", + out: "xxhash", }, { in: sqltypes.Uint64, - out: "hash", + out: "xxhash", }, { in: sqltypes.Float32, - out: "hash", + out: "", }, { in: sqltypes.Float64, out: "", @@ -740,7 +853,7 @@ func TestChooseVindexForType(t *testing.T) { out: "", }, { in: sqltypes.Year, - out: "hash", + out: "xxhash", }, { in: sqltypes.Decimal, out: "", @@ -784,11 +897,16 @@ func TestChooseVindexForType(t *testing.T) { for _, tcase := range testcases { out, err := ChooseVindexForType(tcase.in) - if out == "" { - assert.Error(t, err, tcase.in) + // If no type is returned then we do not recommend the column be + // used for a vindex. If the test case provides an empty output + // value then we expect an error. + if tcase.out == "" { + assert.Error(t, err, "unexpectedly got a recommended vindex type of %s for input column type %v", + out, tcase.in) continue } - assert.Equal(t, out, tcase.out, tcase.in) + assert.Equal(t, out, tcase.out, "expected a recommended vindex type of %s for input column type %v but got %s", + tcase.out, tcase.in, out) } } @@ -876,12 +994,7 @@ func TestFindVindexForSharding(t *testing.T) { Name: "sharded", Sharded: true, } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} vindex2 := &stLN{name: "stln1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), @@ -959,12 +1072,7 @@ func TestFindVindexForSharding2(t *testing.T) { Sharded: true, } vindex1 := &stLU{name: "stlu1"} - vindex2 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex2 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ks, @@ -999,32 +1107,27 @@ func TestShardedVSchemaMultiColumnVindex(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_DISALLOW, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1"}, - Owner: "t1"}}, + Type: "stfu", + Params: map[string]string{}, + Owner: "t1"}}, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{{ Columns: []string{"c1", "c2"}, Name: "stfu1"}}}}}}} - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) ks := &Keyspace{ Name: "sharded", Sharded: true, } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ks, @@ -1052,7 +1155,8 @@ func TestShardedVSchemaMultiColumnVindex(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "sharded": { - Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_FK_DISALLOW, + Keyspace: ks, Tables: map[string]*Table{ "t1": t1}, Vindexes: map[string]Vindex{ @@ -1069,7 +1173,8 @@ func TestShardedVSchemaNotOwned(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "sharded": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1084,7 +1189,7 @@ func TestShardedVSchemaNotOwned(t *testing.T) { Name: "stlu1"}, { Column: "c2", Name: "stfu1"}}}}}}} - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) ks := &Keyspace{ @@ -1126,7 +1231,8 @@ func TestShardedVSchemaNotOwned(t *testing.T) { "stfu1": vindex2}, Keyspaces: map[string]*KeyspaceSchema{ "sharded": { - Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Keyspace: ks, Tables: map[string]*Table{ "t1": t1, }, @@ -1200,10 +1306,12 @@ func TestBuildVSchemaDupSeq(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, Tables: map[string]*vschemapb.Table{ "t1": { Type: "sequence"}}}, "ksb": { + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, Tables: map[string]*vschemapb.Table{ "t1": { Type: "sequence"}}}}} @@ -1211,7 +1319,7 @@ func TestBuildVSchemaDupSeq(t *testing.T) { Name: "ksa"} ksb := &Keyspace{ Name: "ksb"} - got := BuildVSchema(&good) + got := buildVSchema(&good) t1a := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: ksa, @@ -1228,14 +1336,16 @@ func TestBuildVSchemaDupSeq(t *testing.T) { uniqueVindexes: map[string]Vindex{}, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1a, }, Vindexes: map[string]Vindex{}, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t1b, }, @@ -1251,18 +1361,20 @@ func TestBuildVSchemaDupTable(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Tables: map[string]*vschemapb.Table{ "t1": {}, }, }, "ksb": { + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Tables: map[string]*vschemapb.Table{ "t1": {}, }, }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) ksa := &Keyspace{ Name: "ksa", } @@ -1285,14 +1397,16 @@ func TestBuildVSchemaDupTable(t *testing.T) { uniqueVindexes: map[string]Vindex{}, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1a, }, Vindexes: map[string]Vindex{}, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t1b, }, @@ -1311,7 +1425,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "ksa": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1330,7 +1445,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, "ksb": { - Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stlu1": { Type: "stlu", @@ -1350,7 +1466,7 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["ksa"].Error err1 := got.Keyspaces["ksb"].Error require.NoError(t, err) @@ -1412,7 +1528,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "ksa": { - Keyspace: ksa, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Keyspace: ksa, Tables: map[string]*Table{ "t1": t1, }, @@ -1421,7 +1538,8 @@ func TestBuildVSchemaDupVindex(t *testing.T) { }, }, "ksb": { - Keyspace: ksb, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Keyspace: ksb, Tables: map[string]*Table{ "t1": t2, }, @@ -1594,7 +1712,7 @@ func TestBuildVSchemaPrimaryCannotBeOwned(t *testing.T) { } } -func TestBuildVSchemaReferenceTableSourceMustBeQualified(t *testing.T) { +func TestBuildVSchemaReferenceTableSourceMayBeUnqualified(t *testing.T) { input := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "unsharded": { @@ -1616,9 +1734,7 @@ func TestBuildVSchemaReferenceTableSourceMustBeQualified(t *testing.T) { } vschema := BuildVSchema(&input) require.NoError(t, vschema.Keyspaces["unsharded"].Error) - require.Error(t, vschema.Keyspaces["sharded"].Error) - require.EqualError(t, vschema.Keyspaces["sharded"].Error, - "invalid source \"src\" for reference table: ref; invalid table name: src, it must be of the qualified form . (dots are not allowed in either name)") + require.NoError(t, vschema.Keyspaces["sharded"].Error) } func TestBuildVSchemaReferenceTableSourceMustBeInDifferentKeyspace(t *testing.T) { @@ -1737,7 +1853,7 @@ func TestBuildVSchemaReferenceTableSourceMayUseShardedKeyspace(t *testing.T) { require.NoError(t, vschema.Keyspaces["sharded2"].Error) } -func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReference(t *testing.T) { +func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReferenceWithoutSource(t *testing.T) { input := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "unsharded1": { @@ -1764,6 +1880,11 @@ func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReference(t *testing. }, }, }, + "unsharded4": { + Tables: map[string]*vschemapb.Table{ + "src4": {}, + }, + }, "sharded1": { Sharded: true, Tables: map[string]*vschemapb.Table{ @@ -1782,13 +1903,26 @@ func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReference(t *testing. }, }, }, + "sharded3": { + Sharded: true, + Tables: map[string]*vschemapb.Table{ + "ref3": { + Type: "reference", + Source: "unsharded4.src4", + }, + }, + }, }, } + vschema := BuildVSchema(&input) require.Error(t, vschema.Keyspaces["sharded1"].Error) require.EqualError(t, vschema.Keyspaces["sharded1"].Error, "source \"unsharded1.src1\" may not reference a table of type \"sequence\": ref1") + require.NoError(t, vschema.Keyspaces["sharded2"].Error) + + require.NoError(t, vschema.Keyspaces["sharded3"].Error) } func TestBuildVSchemaSourceMayBeReferencedAtMostOncePerKeyspace(t *testing.T) { @@ -1863,6 +1997,7 @@ func TestSequence(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ "unsharded": { + ForeignKeyMode: vschemapb.Keyspace_FK_DISALLOW, Tables: map[string]*vschemapb.Table{ "seq": { Type: "sequence", @@ -1870,13 +2005,12 @@ func TestSequence(t *testing.T) { }, }, "sharded": { - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1", - }, + Type: "stfu", + Params: map[string]string{}, }, }, Tables: map[string]*vschemapb.Table{ @@ -1908,7 +2042,7 @@ func TestSequence(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := buildVSchema(&good) err := got.Keyspaces["sharded"].Error require.NoError(t, err) err1 := got.Keyspaces["unsharded"].Error @@ -1927,12 +2061,7 @@ func TestSequence(t *testing.T) { Keyspace: ksu, Type: "sequence", } - vindex1 := &stFU{ - name: "stfu1", - Params: map[string]string{ - "stfu1": "1", - }, - } + vindex1 := &stFU{name: "stfu1"} t1 := &Table{ Name: sqlparser.NewIdentifierCS("t1"), Keyspace: kss, @@ -1987,14 +2116,16 @@ func TestSequence(t *testing.T) { }, Keyspaces: map[string]*KeyspaceSchema{ "unsharded": { - Keyspace: ksu, + ForeignKeyMode: vschemapb.Keyspace_FK_DISALLOW, + Keyspace: ksu, Tables: map[string]*Table{ "seq": seq, }, Vindexes: map[string]Vindex{}, }, "sharded": { - Keyspace: kss, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Keyspace: kss, Tables: map[string]*Table{ "t1": t1, "t2": t2, @@ -2152,10 +2283,8 @@ func TestFindTable(t *testing.T) { Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "stfu1": { - Type: "stfu", - Params: map[string]string{ - "stfu1": "1", - }, + Type: "stfu", + Params: map[string]string{}, }, }, Tables: map[string]*vschemapb.Table{ @@ -2425,7 +2554,8 @@ func TestBuildKeyspaceSchema(t *testing.T) { Keyspace: ks, } want := &KeyspaceSchema{ - Keyspace: ks, + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, Tables: map[string]*Table{ "t1": t1, "t2": t2, @@ -2451,7 +2581,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - err := ValidateKeyspace(good) + _, err := BuildKeyspace(good) require.NoError(t, err) bad := &vschemapb.Keyspace{ Sharded: true, @@ -2464,7 +2594,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - err = ValidateKeyspace(bad) + _, err = BuildKeyspace(bad) want := `vindexType "absent" not found` if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Validate: %v, must start with %s", err, want) @@ -2536,13 +2666,18 @@ func TestVSchemaPBJSON(t *testing.T) { } func TestVSchemaJSON(t *testing.T) { - lkp, _ := NewLookupHash("n2", map[string]string{ + lkp, err := newLookupHash("n2", map[string]string{ "from": "f", "table": "t", "to": "2", }) + unknownParams := lkp.(ParamValidating).UnknownParams() + require.Empty(t, unknownParams) + require.NoError(t, err) + in := map[string]*KeyspaceSchema{ "unsharded": { + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, Keyspace: &Keyspace{ Name: "k1", }, @@ -2563,6 +2698,7 @@ func TestVSchemaJSON(t *testing.T) { }, }, "sharded": { + ForeignKeyMode: vschemapb.Keyspace_FK_DISALLOW, Keyspace: &Keyspace{ Name: "k2", Sharded: true, @@ -2589,6 +2725,7 @@ func TestVSchemaJSON(t *testing.T) { want := `{ "sharded": { "sharded": true, + "foreignKeyMode": "FK_DISALLOW", "tables": { "t3": { "name": "n3", @@ -2613,6 +2750,7 @@ func TestVSchemaJSON(t *testing.T) { } }, "unsharded": { + "foreignKeyMode": "FK_MANAGED", "tables": { "t1": { "name": "n1", @@ -2828,20 +2966,25 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { }, }, } + vs := BuildVSchema(&input) t1, err := vs.FindTable("unsharded", "t1") require.NoError(t, err) + // If the source of a reference table does not require explicit routing, + // then the source table can be globally routed. globalT1, err := vs.FindTable("", "t1") require.NoError(t, err) require.Equal(t, t1, globalT1) input.Keyspaces["unsharded"].RequireExplicitRouting = true vs = BuildVSchema(&input) - t1, err = vs.FindTable("sharded", "t1") - require.NoError(t, err) - globalT1, err = vs.FindTable("", "t1") + _, err = vs.FindTable("sharded", "t1") require.NoError(t, err) - require.Equal(t, t1, globalT1) + // If the source of a reference table requires explicit routing, then + // neither the reference table nor its souce can be globally routed. + _, err = vs.FindTable("", "t1") + require.Error(t, err) + require.EqualError(t, err, "table t1 not found") } func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { @@ -2875,6 +3018,135 @@ func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { require.Error(t, err) } +// TestFindTableWithSequences tests tables with an autoincrement column that are associated with a sequence. +// It validates that sequences obey routing rules, which might be set, for example, during a MoveTables +// when sequence tables are being migrated to a new cluster. +func TestFindTableWithSequences(t *testing.T) { + input := vschemapb.SrvVSchema{ + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{{ + FromTable: "seq3", + ToTables: []string{"ksb.seq3"}, + }, + { + FromTable: "seq4", + ToTables: []string{"ksb.seq4"}, + }}, + }, + Keyspaces: map[string]*vschemapb.Keyspace{ + "ksa": { + Vindexes: map[string]*vschemapb.Vindex{ + "stfu1": { + Type: "stfu", + }}, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c1", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c1", + Sequence: "seq1", + }, + }, + "t2": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c2", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c2", + Sequence: "seq2", + }, + }, + "t3": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c3", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c3", + Sequence: "seq3", + }, + }, + "t4": { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "c4", + Name: "stfu1", + }, + }, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "c4", + Sequence: "ksa.seq4", + }, + }, + "seq1": { + Type: "sequence", + }, + "seq2": { + Type: "sequence", + }, + "seq3": { + Type: "sequence", + }, + "seq4": { + Type: "sequence", + }, + }, + }, + "ksb": { + Tables: map[string]*vschemapb.Table{ + "seq2": { + Type: "sequence", + }, + "seq3": { + Type: "sequence", + }, + }, + }, + }, + } + vschema := BuildVSchema(&input) + + notFoundError := func(table string) string { + return fmt.Sprintf("table %s not found", table) + } + + type testCase struct { + name string + keyspace string + table string + mustError bool + errorContains string + } + testCases := []testCase{ + {"unambiguous", "", "t1", false, ""}, + {"ambiguous", "", "t2", true, notFoundError("t2")}, + {"routed unambiguous", "", "t3", false, ""}, + {"routed qualified unambiguous", "", "t4", false, ""}, + {"keyspace specified", "ksa", "t2", false, ""}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, _, err := vschema.FindTableOrVindex(tc.keyspace, tc.table, topodatapb.TabletType_PRIMARY) + if tc.mustError { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errorContains) + } else { + require.NoError(t, err) + } + }) + } +} + func vindexNames(vindexes []*ColumnVindex) (result []string) { for _, vindex := range vindexes { result = append(result, vindex.Name) diff --git a/go/vt/vtgate/vindexes/xxhash.go b/go/vt/vtgate/vindexes/xxhash.go index 471ad996757..3362cd0aab1 100644 --- a/go/vt/vtgate/vindexes/xxhash.go +++ b/go/vt/vtgate/vindexes/xxhash.go @@ -28,19 +28,24 @@ import ( ) var ( - _ SingleColumn = (*XXHash)(nil) - _ Hashing = (*XXHash)(nil) + _ SingleColumn = (*XXHash)(nil) + _ Hashing = (*XXHash)(nil) + _ ParamValidating = (*XXHash)(nil) ) // XXHash defines vindex that hashes any sql types to a KeyspaceId // by using xxhash64. It's Unique and works on any platform giving identical result. type XXHash struct { - name string + name string + unknownParams []string } -// NewXXHash creates a new XXHash. -func NewXXHash(name string, _ map[string]string) (Vindex, error) { - return &XXHash{name: name}, nil +// newXXHash creates a new XXHash. +func newXXHash(name string, m map[string]string) (Vindex, error) { + return &XXHash{ + name: name, + unknownParams: FindUnknownParams(m, nil), + }, nil } // String returns the name of the vindex. @@ -97,8 +102,13 @@ func (vind *XXHash) Hash(id sqltypes.Value) ([]byte, error) { return vXXHash(idBytes), nil } +// UnknownParams implements the ParamValidating interface. +func (vind *XXHash) UnknownParams() []string { + return vind.unknownParams +} + func init() { - Register("xxhash", NewXXHash) + Register("xxhash", newXXHash) } func vXXHash(shardKey []byte) []byte { diff --git a/go/vt/vtgate/vindexes/xxhash_test.go b/go/vt/vtgate/vindexes/xxhash_test.go index 148a2e7e7f8..b7bd77a1142 100644 --- a/go/vt/vtgate/vindexes/xxhash_test.go +++ b/go/vt/vtgate/vindexes/xxhash_test.go @@ -24,8 +24,6 @@ import ( "reflect" "testing" - "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" ) @@ -33,18 +31,60 @@ import ( var xxHash SingleColumn func init() { - hv, err := CreateVindex("xxhash", "xxhash_name", map[string]string{"Table": "t", "Column": "c"}) + hv, err := CreateVindex("xxhash", "xxhash_name", map[string]string{}) if err != nil { panic(err) } xxHash = hv.(SingleColumn) } -func TestXXHashInfo(t *testing.T) { - assert.Equal(t, 1, xxHash.Cost()) - assert.Equal(t, "xxhash_name", xxHash.String()) - assert.True(t, xxHash.IsUnique()) - assert.False(t, xxHash.NeedsVCursor()) +func xxhashCreateVindexTestCase( + testName string, + vindexParams map[string]string, + expectErr error, + expectUnknownParams []string, +) createVindexTestCase { + return createVindexTestCase{ + testName: testName, + + vindexType: "xxhash", + vindexName: "xxhash", + vindexParams: vindexParams, + + expectCost: 1, + expectErr: expectErr, + expectIsUnique: true, + expectNeedsVCursor: false, + expectString: "xxhash", + expectUnknownParams: expectUnknownParams, + } +} + +func TestXXHashCreateVindex(t *testing.T) { + cases := []createVindexTestCase{ + xxhashCreateVindexTestCase( + "no params", + nil, + nil, + nil, + ), + xxhashCreateVindexTestCase( + "empty params", + map[string]string{}, + nil, + nil, + ), + xxhashCreateVindexTestCase( + "unknown params", + map[string]string{ + "hello": "world", + }, + nil, + []string{"hello"}, + ), + } + + testCreateVindexes(t, cases) } func TestXXHashMap(t *testing.T) { diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index baa232a87d8..3b99be052b0 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -20,12 +20,10 @@ import ( "context" "sync" + "vitess.io/vitess/go/vt/log" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -48,7 +46,7 @@ type VSchemaManager struct { // SchemaInfo is an interface to schema tracker. type SchemaInfo interface { - Tables(ks string) map[string][]vindexes.Column + Tables(ks string) map[string]*vindexes.TableInfo Views(ks string) map[string]sqlparser.SelectStatement } @@ -57,7 +55,7 @@ type SchemaInfo interface { func (vm *VSchemaManager) GetCurrentSrvVschema() *vschemapb.SrvVSchema { vm.mu.Lock() defer vm.mu.Unlock() - return proto.Clone(vm.currentSrvVschema).(*vschemapb.SrvVSchema) + return vm.currentSrvVschema.CloneVT() } // UpdateVSchema propagates the updated vschema to the topo. The entry for @@ -70,6 +68,12 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ksName string, vsch } ks := vschema.Keyspaces[ksName] + + _, err = vindexes.BuildKeyspace(ks) + if err != nil { + return err + } + err = topoServer.SaveVSchema(ctx, ksName, ks) if err != nil { return err @@ -191,23 +195,29 @@ func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vinde func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) { for ksName, ks := range vschema.Keyspaces { m := vm.schema.Tables(ksName) + // Before we add the foreign key definitions in the tables, we need to make sure that all the tables + // are created in the Vschema, so that later when we try to find the routed tables, we don't end up + // getting dummy tables. + for tblName, tblInfo := range m { + setColumns(ks, tblName, tblInfo.Columns) + } - for tblName, columns := range m { - vTbl := ks.Tables[tblName] - if vTbl == nil { - // a table that is unknown by the vschema. we add it as a normal table - ks.Tables[tblName] = &vindexes.Table{ - Name: sqlparser.NewIdentifierCS(tblName), - Keyspace: ks.Keyspace, - Columns: columns, - ColumnListAuthoritative: true, + // Now that we have ensured that all the tables are created, we can start populating the foreign keys + // in the tables. + for tblName, tblInfo := range m { + for _, fkDef := range tblInfo.ForeignKeys { + parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err) + continue } - continue - } - if !vTbl.ColumnListAuthoritative { - // if we found the matching table and the vschema view of it is not authoritative, then we just update the columns of the table - vTbl.Columns = columns - vTbl.ColumnListAuthoritative = true + childTbl, err := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding child table %s: %v", tblName, err) + continue + } + childTbl.ParentForeignKeys = append(childTbl.ParentForeignKeys, vindexes.NewParentFkInfo(parentTbl, fkDef)) + parentTbl.ChildForeignKeys = append(parentTbl.ChildForeignKeys, vindexes.NewChildFkInfo(childTbl, fkDef)) } } @@ -220,3 +230,23 @@ func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) { } } } + +func setColumns(ks *vindexes.KeyspaceSchema, tblName string, columns []vindexes.Column) *vindexes.Table { + vTbl := ks.Tables[tblName] + if vTbl == nil { + // a table that is unknown by the vschema. we add it as a normal table + ks.Tables[tblName] = &vindexes.Table{ + Name: sqlparser.NewIdentifierCS(tblName), + Keyspace: ks.Keyspace, + Columns: columns, + ColumnListAuthoritative: true, + } + return ks.Tables[tblName] + } + // if we found the matching table and the vschema view of it is not authoritative, then we just update the columns of the table + if !vTbl.ColumnListAuthoritative { + vTbl.Columns = columns + vTbl.ColumnListAuthoritative = true + } + return ks.Tables[tblName] +} diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go index 7e9a9224371..5d7af6fe5c6 100644 --- a/go/vt/vtgate/vschema_manager_test.go +++ b/go/vt/vtgate/vschema_manager_test.go @@ -29,11 +29,63 @@ func TestVSchemaUpdate(t *testing.T) { tblCol2 := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, Columns: cols2, ColumnListAuthoritative: true} tblCol2NA := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, Columns: cols2} + vindexTable_multicol_t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("multicol_t1"), + Keyspace: ks, + Columns: cols2, + ColumnListAuthoritative: true, + } + vindexTable_multicol_t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("multicol_t2"), + Keyspace: ks, + Columns: cols2, + ColumnListAuthoritative: true, + } + vindexTable_t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + vindexTable_t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + sqlparserCols1 := sqlparser.MakeColumns("id") + sqlparserCols2 := sqlparser.MakeColumns("uid", "name") + + vindexTable_multicol_t1.ChildForeignKeys = append(vindexTable_multicol_t1.ChildForeignKeys, vindexes.ChildFKInfo{ + Table: vindexTable_multicol_t2, + ChildColumns: sqlparserCols2, + ParentColumns: sqlparserCols2, + OnDelete: sqlparser.NoAction, + OnUpdate: sqlparser.Restrict, + }) + vindexTable_multicol_t2.ParentForeignKeys = append(vindexTable_multicol_t2.ParentForeignKeys, vindexes.ParentFKInfo{ + Table: vindexTable_multicol_t1, + ChildColumns: sqlparserCols2, + ParentColumns: sqlparserCols2, + }) + vindexTable_t1.ChildForeignKeys = append(vindexTable_t1.ChildForeignKeys, vindexes.ChildFKInfo{ + Table: vindexTable_t2, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + OnDelete: sqlparser.SetNull, + OnUpdate: sqlparser.Cascade, + }) + vindexTable_t2.ParentForeignKeys = append(vindexTable_t2.ParentForeignKeys, vindexes.ParentFKInfo{ + Table: vindexTable_t1, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + }) + tcases := []struct { name string srvVschema *vschemapb.SrvVSchema currentVSchema *vindexes.VSchema - schema map[string][]vindexes.Column + schema map[string]*vindexes.TableInfo expected *vindexes.VSchema }{{ name: "0 Schematracking- 1 srvVSchema", @@ -47,12 +99,12 @@ func TestVSchemaUpdate(t *testing.T) { }, { name: "1 Schematracking- 0 srvVSchema", srvVschema: makeTestSrvVSchema("ks", false, nil), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative", srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -63,7 +115,7 @@ func TestVSchemaUpdate(t *testing.T) { ColumnListAuthoritative: false, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -71,7 +123,7 @@ func TestVSchemaUpdate(t *testing.T) { srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": { ColumnListAuthoritative: true, }}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblNoCol}), }, { @@ -82,24 +134,127 @@ func TestVSchemaUpdate(t *testing.T) { ColumnListAuthoritative: true, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema tracker will be ignored for authoritative tables. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol2}), }, { name: "srvVschema received as nil", - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestEmptyVSchema(), }, { name: "srvVschema received as nil - have existing vschema", currentVSchema: &vindexes.VSchema{}, - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: &vindexes.VSchema{}, + }, { + name: "foreign keys in schema", + currentVSchema: &vindexes.VSchema{}, + schema: map[string]*vindexes.TableInfo{ + "t1": { + Columns: cols1, + }, + "t2": { + Columns: cols1, + ForeignKeys: []*sqlparser.ForeignKeyDefinition{ + { + Source: sqlparser.MakeColumns("id"), + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("t1"), + ReferencedColumns: sqlparserCols1, + OnUpdate: sqlparser.Cascade, + OnDelete: sqlparser.SetNull, + }, + }, + }, + }, + "multicol_t1": { + Columns: cols2, + }, + "multicol_t2": { + Columns: cols2, + ForeignKeys: []*sqlparser.ForeignKeyDefinition{ + { + Source: sqlparser.MakeColumns("uid", "name"), + ReferenceDefinition: &sqlparser.ReferenceDefinition{ + ReferencedTable: sqlparser.NewTableName("multicol_t1"), + ReferencedColumns: sqlparserCols2, + OnUpdate: sqlparser.Restrict, + OnDelete: sqlparser.NoAction, + }, + }, + }, + }, + }, + srvVschema: &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks": { + Sharded: false, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Tables: map[string]*vschemapb.Table{ + "t1": { + Columns: []*vschemapb.Column{ + { + Name: "id", + Type: querypb.Type_INT64, + }, + }, + }, + "t2": { + Columns: []*vschemapb.Column{ + { + Name: "id", + Type: querypb.Type_INT64, + }, + }, + }, + "multicol_t1": { + Columns: []*vschemapb.Column{ + { + Name: "uid", + Type: querypb.Type_INT64, + }, { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + }, "multicol_t2": { + Columns: []*vschemapb.Column{ + { + Name: "uid", + Type: querypb.Type_INT64, + }, { + Name: "name", + Type: querypb.Type_VARCHAR, + }, + }, + }, + }, + }, + }, + }, + expected: &vindexes.VSchema{ + RoutingRules: map[string]*vindexes.RoutingRule{}, + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + "ks": { + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_FK_MANAGED, + Vindexes: map[string]vindexes.Vindex{}, + Tables: map[string]*vindexes.Table{ + "t1": vindexTable_t1, + "t2": vindexTable_t2, + "multicol_t1": vindexTable_multicol_t1, + "multicol_t2": vindexTable_multicol_t2, + }, + }, + }, + }, }} vm := &VSchemaManager{} var vs *vindexes.VSchema vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { vs = vschema + vs.ResetCreated() } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { @@ -138,7 +293,7 @@ func TestRebuildVSchema(t *testing.T) { tcases := []struct { name string srvVschema *vschemapb.SrvVSchema - schema map[string][]vindexes.Column + schema map[string]*vindexes.TableInfo expected *vindexes.VSchema }{{ name: "0 Schematracking- 1 srvVSchema", @@ -152,12 +307,12 @@ func TestRebuildVSchema(t *testing.T) { }, { name: "1 Schematracking- 0 srvVSchema", srvVschema: makeTestSrvVSchema("ks", false, nil), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { name: "1 Schematracking - 1 srvVSchema (no columns) not authoritative", srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": {}}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -168,7 +323,7 @@ func TestRebuildVSchema(t *testing.T) { ColumnListAuthoritative: false, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol1}), }, { @@ -176,7 +331,7 @@ func TestRebuildVSchema(t *testing.T) { srvVschema: makeTestSrvVSchema("ks", false, map[string]*vschemapb.Table{"tbl": { ColumnListAuthoritative: true, }}), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema will override what srvSchema has. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblNoCol}), }, { @@ -187,18 +342,19 @@ func TestRebuildVSchema(t *testing.T) { ColumnListAuthoritative: true, }, }), - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, // schema tracker will be ignored for authoritative tables. expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"tbl": tblCol2}), }, { name: "srvVschema received as nil", - schema: map[string][]vindexes.Column{"tbl": cols1}, + schema: map[string]*vindexes.TableInfo{"tbl": {Columns: cols1}}, }} vm := &VSchemaManager{} var vs *vindexes.VSchema vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { vs = vschema + vs.ResetCreated() } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { @@ -222,11 +378,14 @@ func makeTestVSchema(ks string, sharded bool, tbls map[string]*vindexes.Table) * Name: ks, Sharded: sharded, }, - Tables: tbls, - Vindexes: map[string]vindexes.Vindex{}, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, + Tables: tbls, + Vindexes: map[string]vindexes.Vindex{}, } vs := makeTestEmptyVSchema() vs.Keyspaces[ks] = keyspaceSchema + vs.ResetCreated() return vs } @@ -241,6 +400,8 @@ func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Tabl keyspaceSchema := &vschemapb.Keyspace{ Sharded: sharded, Tables: tbls, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_FK_UNMANAGED, } return &vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ks: keyspaceSchema}, @@ -248,14 +409,14 @@ func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Tabl } type fakeSchema struct { - t map[string][]vindexes.Column + t map[string]*vindexes.TableInfo } -func (f *fakeSchema) Tables(string) map[string][]vindexes.Column { +func (f *fakeSchema) Tables(string) map[string]*vindexes.TableInfo { return f.t } -func (f *fakeSchema) Views(ks string) map[string]sqlparser.SelectStatement { +func (f *fakeSchema) Views(string) map[string]sqlparser.SelectStatement { return nil } diff --git a/go/vt/vtgate/vschema_stats.go b/go/vt/vtgate/vschema_stats.go index ce234fdba9a..d4920d7486f 100644 --- a/go/vt/vtgate/vschema_stats.go +++ b/go/vt/vtgate/vschema_stats.go @@ -31,11 +31,12 @@ type VSchemaStats struct { // VSchemaKeyspaceStats contains a rollup of the VSchema stats for a keyspace. // It is used to display a table with the information in the status page. type VSchemaKeyspaceStats struct { - Keyspace string - Sharded bool - TableCount int - VindexCount int - Error string + Keyspace string + Sharded bool + TableCount int + VindexCount int + VindexUnknownParamsCount int + Error string } // NewVSchemaStats returns a new VSchemaStats from a VSchema. @@ -54,6 +55,11 @@ func NewVSchemaStats(vschema *vindexes.VSchema, errorMessage string) *VSchemaSta for _, t := range k.Tables { s.VindexCount += len(t.ColumnVindexes) + len(t.Ordered) + len(t.Owned) } + for _, vdx := range k.Vindexes { + if pv, ok := vdx.(vindexes.ParamValidating); ok { + s.VindexUnknownParamsCount += len(pv.UnknownParams()) + } + } } if k.Error != nil { s.Error = k.Error.Error() @@ -95,6 +101,7 @@ const ( + {{range $i, $ks := .Keyspaces}} @@ -102,6 +109,7 @@ const ( + {{end}}
- Alias: {{github_com_vitessio_vitess_vtctld_tablet .Tablet.AliasString}}
- Keyspace: {{github_com_vitessio_vitess_vtctld_keyspace .Tablet.Keyspace}} Shard: {{github_com_vitessio_vitess_vtctld_shard .Tablet.Keyspace .Tablet.Shard}} Tablet Type: {{.Tablet.Type}}
- SrvKeyspace: {{github_com_vitessio_vitess_vtctld_srv_keyspace .Tablet.Alias.Cell .Tablet.Keyspace}}
- Replication graph: {{github_com_vitessio_vitess_vtctld_replication .Tablet.Alias.Cell .Tablet.Keyspace .Tablet.Shard}}
+ Alias: {{.Tablet.AliasString}}
+ Keyspace: {{.Tablet.Keyspace}}
+ Shard: {{.Tablet.Shard}}
+ Tablet Type: {{.Tablet.Type}}
{{if .DeniedTables}} DeniedTables: {{range .DeniedTables}}{{.}} {{end}}
{{end}} diff --git a/go/cmd/vttablet/docgen/main.go b/go/cmd/vttablet/docgen/main.go new file mode 100644 index 00000000000..9915d641352 --- /dev/null +++ b/go/cmd/vttablet/docgen/main.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/internal/docgen" + "vitess.io/vitess/go/cmd/vttablet/cli" +) + +func main() { + var dir string + cmd := cobra.Command{ + Use: "docgen [-d ]", + RunE: func(cmd *cobra.Command, args []string) error { + return docgen.GenerateMarkdownTree(cli.Main, dir) + }, + } + + cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation") + _ = cmd.Execute() +} diff --git a/go/cmd/vttablet/index.go b/go/cmd/vttablet/index.go index be06ed6f10b..aec221b5339 100644 --- a/go/cmd/vttablet/index.go +++ b/go/cmd/vttablet/index.go @@ -18,6 +18,8 @@ package main import ( "net/http" + + "vitess.io/vitess/go/vt/servenv" ) // This is a separate file so it can be selectively included/excluded from @@ -25,7 +27,7 @@ import ( func init() { // Anything unrecognized gets redirected to the status page. - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, "/debug/status", http.StatusFound) }) } diff --git a/go/cmd/vttablet/plugin_kubernetestopo.go b/go/cmd/vttablet/plugin_kubernetestopo.go deleted file mode 100644 index 671d0c8321f..00000000000 --- a/go/cmd/vttablet/plugin_kubernetestopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports k8stopo to register the kubernetes implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/k8stopo" -) diff --git a/go/cmd/vttablet/plugin_statsd.go b/go/cmd/vttablet/plugin_statsd.go deleted file mode 100644 index 51761e6c406..00000000000 --- a/go/cmd/vttablet/plugin_statsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package main - -import "vitess.io/vitess/go/stats/statsd" - -func init() { - statsd.Init("vttablet") -} diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index c7ee81511d5..0f91f48b649 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -18,206 +18,12 @@ limitations under the License. package main import ( - "bytes" - "context" - "os" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/binlog" - "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/cmd/vttablet/cli" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/tableacl" - "vitess.io/vitess/go/vt/tableacl/simpleacl" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vttablet/onlineddl" - "vitess.io/vitess/go/vt/vttablet/tabletmanager" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - "vitess.io/vitess/go/vt/vttablet/tabletserver" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/yaml2" - "vitess.io/vitess/resources" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -var ( - enforceTableACLConfig bool - tableACLConfig string - tableACLConfigReloadInterval time.Duration - tabletPath string - tabletConfig string - - tm *tabletmanager.TabletManager ) -func registerFlags(fs *pflag.FlagSet) { - fs.BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist") - fs.StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file") - fs.DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload") - fs.StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias") - fs.StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet") - - acl.RegisterFlags(fs) -} - -func init() { - servenv.RegisterDefaultFlags() - servenv.RegisterFlags() - servenv.RegisterGRPCServerFlags() - servenv.RegisterGRPCServerAuthFlags() - servenv.RegisterServiceMapFlag() - servenv.OnParseFor("vttablet", registerFlags) -} - func main() { - dbconfigs.RegisterFlags(dbconfigs.All...) - mysqlctl.RegisterFlags() - - servenv.ParseFlags("vttablet") - servenv.Init() - - if tabletPath == "" { - log.Exit("--tablet-path required") - } - tabletAlias, err := topoproto.ParseTabletAlias(tabletPath) - if err != nil { - log.Exitf("failed to parse --tablet-path: %v", err) - } - - // config and mycnf initializations are intertwined. - config, mycnf := initConfig(tabletAlias) - - ts := topo.Open() - qsc := createTabletServer(config, ts, tabletAlias) - - mysqld := mysqlctl.NewMysqld(config.DB) - servenv.OnClose(mysqld.Close) - - if err := extractOnlineDDL(); err != nil { - log.Exitf("failed to extract online DDL binaries: %v", err) - } - - // Initialize and start tm. - gRPCPort := int32(0) - if servenv.GRPCPort() != 0 { - gRPCPort = int32(servenv.GRPCPort()) - } - tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, mysqld.GetVersionString(), config.DB) - if err != nil { - log.Exitf("failed to parse --tablet-path: %v", err) - } - tm = &tabletmanager.TabletManager{ - BatchCtx: context.Background(), - TopoServer: ts, - Cnf: mycnf, - MysqlDaemon: mysqld, - DBConfigs: config.DB.Clone(), - QueryServiceControl: qsc, - UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), - VDiffEngine: vdiff.NewEngine(config, ts, tablet), - } - if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { - log.Exitf("failed to parse --tablet-path or initialize DB credentials: %v", err) - } - servenv.OnClose(func() { - // Close the tm so that our topo entry gets pruned properly and any - // background goroutines that use the topo connection are stopped. - tm.Close() - - // tm uses ts. So, it should be closed after tm. - ts.Close() - }) - - servenv.RunDefault() -} - -func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf) { - tabletenv.Init() - // Load current config after tabletenv.Init, because it changes it. - config := tabletenv.NewCurrentConfig() - if err := config.Verify(); err != nil { - log.Exitf("invalid config: %v", err) - } - - if tabletConfig != "" { - bytes, err := os.ReadFile(tabletConfig) - if err != nil { - log.Exitf("error reading config file %s: %v", tabletConfig, err) - } - if err := yaml2.Unmarshal(bytes, config); err != nil { - log.Exitf("error parsing config file %s: %v", bytes, err) - } - } - gotBytes, _ := yaml2.Marshal(config) - log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes) - - var mycnf *mysqlctl.Mycnf - var socketFile string - // If no connection parameters were specified, load the mycnf file - // and use the socket from it. If connection parameters were specified, - // we assume that the mysql is not local, and we skip loading mycnf. - // This also means that backup and restore will not be allowed. - if !config.DB.HasGlobalSettings() { - var err error - if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil { - log.Exitf("mycnf read failed: %v", err) - } - socketFile = mycnf.SocketFile - } else { - log.Info("connection parameters were specified. Not loading my.cnf.") - } - - // If connection parameters were specified, socketFile will be empty. - // Otherwise, the socketFile (read from mycnf) will be used to initialize - // dbconfigs. - config.DB.InitWithSocket(socketFile) - for _, cfg := range config.ExternalConnections { - cfg.InitWithSocket("") - } - return config, mycnf -} - -// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended -// to vttablet executable by `make build` with a go:embed -func extractOnlineDDL() error { - if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride { - if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil { - // One possibility of failure is that gh-ost is up and running. In that case, - // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract. - foundBytes, _ := os.ReadFile(binaryFileName) - if bytes.Equal(resources.GhostBinary, foundBytes) { - // OK, it's the same binary, there is no need to extract the file anyway - return nil - } - return err - } - } - - return nil -} - -func createTabletServer(config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) *tabletserver.TabletServer { - if tableACLConfig != "" { - // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory - tableacl.Register("simpleacl", &simpleacl.Factory{}) - } else if enforceTableACLConfig { - log.Exit("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") + if err := cli.Main.Execute(); err != nil { + log.Exit(err) } - // creates and registers the query service - qsc := tabletserver.NewTabletServer("", config, ts, tabletAlias) - servenv.OnRun(func() { - qsc.Register() - addStatusParts(qsc) - }) - servenv.OnClose(qsc.StopService) - qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval) - return qsc } diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index a91005f841c..f5f9c6bf41c 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -93,9 +93,9 @@ func registerFlags(fs *pflag.FlagSet) { " vttestserver as a database container in local developer environments. Note"+ " that db migration files (--schema_dir option) and seeding of"+ " random data (--initialize_with_random_data option) will only run during"+ - " cluster startup if the data directory does not already exist. vschema"+ - " migrations are run every time the cluster starts, since persistence"+ - " for the topology server has not been implemented yet") + " cluster startup if the data directory does not already exist. "+ + " Changes to VSchema are persisted across cluster restarts using a simple"+ + " watcher if the --data_dir argument is specified.") fs.BoolVar(&doSeed, "initialize_with_random_data", false, "If this flag is each table-shard will be initialized"+ @@ -141,7 +141,7 @@ func registerFlags(fs *pflag.FlagSet) { fs.StringVar(&config.Charset, "charset", "utf8mb4", "MySQL charset") - fs.StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.") + fs.StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right") fs.StringVar(&config.SnapshotFile, "snapshot_file", "", "A MySQL DB snapshot file") @@ -268,6 +268,7 @@ func parseFlags() (env vttest.Environment, err error) { func main() { cluster, err := runCluster() + servenv.Init() if err != nil { log.Fatal(err) } diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go index 0665d5f9c46..226d66305be 100644 --- a/go/cmd/vttestserver/vttestserver_test.go +++ b/go/cmd/vttestserver/vttestserver_test.go @@ -81,8 +81,14 @@ func TestPersistentMode(t *testing.T) { cluster, err := startPersistentCluster(dir) assert.NoError(t, err) - // basic sanity checks similar to TestRunsVschemaMigrations + // Add a new "ad-hoc" vindex via vtgate once the cluster is up, to later make sure it is persisted across teardowns + err = addColumnVindex(cluster, "test_keyspace", "alter vschema on persistence_test add vindex my_vdx(id)") + assert.NoError(t, err) + + // Basic sanity checks similar to TestRunsVschemaMigrations + // See go/cmd/vttestserver/data/schema/app_customer/* and go/cmd/vttestserver/data/schema/test_keyspace/* assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // insert some data to ensure persistence across teardowns @@ -108,11 +114,15 @@ func TestPersistentMode(t *testing.T) { // reboot the persistent cluster cluster.TearDown() cluster, err = startPersistentCluster(dir) - defer cluster.TearDown() + defer func() { + cluster.PersistentMode = false // Cleanup the tmpdir as we're done + cluster.TearDown() + }() assert.NoError(t, err) - // rerun our sanity checks to make sure vschema migrations are run during every startup + // rerun our sanity checks to make sure vschema is persisted correctly assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) + assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"}) assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"}) // ensure previous data was successfully persisted @@ -249,7 +259,10 @@ func TestMtlsAuth(t *testing.T) { fmt.Sprintf("--vtctld_grpc_ca=%s", caCert), fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp")) assert.NoError(t, err) - defer cluster.TearDown() + defer func() { + cluster.PersistentMode = false // Cleanup the tmpdir as we're done + cluster.TearDown() + }() // startCluster will apply vschema migrations using vtctl grpc and the clientCert. assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"}) @@ -316,7 +329,8 @@ func startCluster(flags ...string) (vttest.LocalCluster, error) { keyspaceArg := "--keyspaces=" + strings.Join(clusterKeyspaces, ",") numShardsArg := "--num_shards=2,2" vschemaDDLAuthorizedUsers := "--vschema_ddl_authorized_users=%" - os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers}...) + alsoLogToStderr := "--alsologtostderr" // better debugging + os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers, alsoLogToStderr}...) os.Args = append(os.Args, flags...) return runCluster() } diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go index 5cc736ea959..8d456f6b081 100644 --- a/go/cmd/zk/zkcmd.go +++ b/go/cmd/zk/zkcmd.go @@ -478,19 +478,15 @@ func cmdTouch(ctx context.Context, subFlags *pflag.FlagSet, args []string) error func cmdRm(ctx context.Context, subFlags *pflag.FlagSet, args []string) error { var ( - force bool - recursiveDelete bool - forceAndRecursive bool + force bool + recursiveDelete bool ) subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node") subFlags.BoolVarP(&recursiveDelete, "recursivedelete", "r", false, "recursive delete") - subFlags.BoolVarP(&forceAndRecursive, "forceandrecursive", "rf", false, "shorthand for -r -f") if err := subFlags.Parse(args); err != nil { return err } - force = force || forceAndRecursive - recursiveDelete = recursiveDelete || forceAndRecursive if subFlags.NArg() == 0 { return fmt.Errorf("rm: no path specified") diff --git a/go/cmd/zkctl/zkctl.go b/go/cmd/zkctl/zkctl.go index fb1d167adcf..566631e8cc2 100644 --- a/go/cmd/zkctl/zkctl.go +++ b/go/cmd/zkctl/zkctl.go @@ -18,9 +18,6 @@ limitations under the License. package main import ( - "bufio" - "os" - "github.com/spf13/pflag" "vitess.io/vitess/go/exit" @@ -37,10 +34,9 @@ Commands: ` var ( - // Reason for nolint : Used in line 54 (stdin = bufio.NewReader(os.Stdin)) in the init function - stdin *bufio.Reader //nolint - zkCfg = "6@:3801:3802:3803" - myID uint + zkCfg = "6@:3801:3802:3803" + myID uint + zkExtra []string ) func registerZkctlFlags(fs *pflag.FlagSet) { @@ -48,11 +44,12 @@ func registerZkctlFlags(fs *pflag.FlagSet) { "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") fs.UintVar(&myID, "zk.myid", myID, "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") - + fs.StringArrayVar(&zkExtra, "zk.extra", zkExtra, + "extra config line(s) to append verbatim to config (flag can be specified more than once)") } + func init() { servenv.OnParse(registerZkctlFlags) - stdin = bufio.NewReader(os.Stdin) } func main() { @@ -65,6 +62,7 @@ func main() { args := servenv.ParseFlagsWithArgs("zkctl") zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) + zkConfig.Extra = zkExtra zkd := zkctl.NewZkd(zkConfig) action := args[0] diff --git a/go/cmd/zkctld/zkctld.go b/go/cmd/zkctld/zkctld.go index 1e512382ab4..0d1ee413a66 100644 --- a/go/cmd/zkctld/zkctld.go +++ b/go/cmd/zkctld/zkctld.go @@ -35,8 +35,9 @@ import ( ) var ( - zkCfg = "6@:3801:3802:3803" - myID uint + zkCfg = "6@:3801:3802:3803" + myID uint + zkExtra []string ) func init() { @@ -48,7 +49,8 @@ func registerFlags(fs *pflag.FlagSet) { "zkid@server1:leaderPort1:electionPort1:clientPort1,...)") fs.UintVar(&myID, "zk.myid", myID, "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname") - + fs.StringArrayVar(&zkExtra, "zk.extra", zkExtra, + "extra config line(s) to append verbatim to config (flag can be specified more than once)") acl.RegisterFlags(fs) } @@ -57,7 +59,9 @@ func main() { defer logutil.Flush() servenv.ParseFlags("zkctld") + servenv.Init() zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID)) + zkConfig.Extra = zkExtra zkd := zkctl.NewZkd(zkConfig) if zkd.Inited() { diff --git a/go/constants/sidecar/name.go b/go/constants/sidecar/name.go new file mode 100644 index 00000000000..063452782b7 --- /dev/null +++ b/go/constants/sidecar/name.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import ( + "sync/atomic" +) + +const ( + DefaultName = "_vt" +) + +var ( + // This should be accessed via GetName() + sidecarDBName atomic.Value +) + +func init() { + sidecarDBName.Store(DefaultName) +} + +func SetName(name string) { + sidecarDBName.Store(name) +} + +func GetName() string { + return sidecarDBName.Load().(string) +} diff --git a/go/constants/sidecar/queries.go b/go/constants/sidecar/queries.go new file mode 100644 index 00000000000..97fa30ebecc --- /dev/null +++ b/go/constants/sidecar/queries.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecar + +import "vitess.io/vitess/go/vt/sqlparser" + +// region unit-test-only +// This section uses helpers used in tests, but also in +// go/vt/vtexplain/vtexplain_vttablet.go. +// Hence, it is here and not in the _test.go file. +const ( + createDBQuery = "create database if not exists %s" + createTableRegexp = "(?i)CREATE TABLE .* `?\\_vt\\`?..*" + alterTableRegexp = "(?i)ALTER TABLE `?\\_vt\\`?..*" +) + +var ( + DBInitQueries = []string{ + "use %s", + createDBQuery, + } + // Query patterns to handle in mocks. + DBInitQueryPatterns = []string{ + createTableRegexp, + alterTableRegexp, + } +) + +// GetCreateQuery returns the CREATE DATABASE SQL statement +// used to create the sidecar database. +func GetCreateQuery() string { + return sqlparser.BuildParsedQuery(createDBQuery, GetIdentifier()).Query +} + +// GetIdentifier returns the sidecar database name as an SQL +// identifier string, most importantly this means that it will +// be properly escaped if/as needed. +func GetIdentifier() string { + ident := sqlparser.NewIdentifierCS(GetName()) + return sqlparser.String(ident) +} diff --git a/go/errors/errors.go b/go/errors/errors.go new file mode 100644 index 00000000000..d3349d320ed --- /dev/null +++ b/go/errors/errors.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +// Wrapped is used to unwrap an error created by errors.Join() in Go 1.20 +type Wrapped interface { + Unwrap() []error +} + +// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components +func Unwrap(err error) []error { + if err == nil { + return nil + } + if u, ok := err.(Wrapped); ok { + return u.Unwrap() + } + return nil +} + +// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively +func UnwrapAll(err error) (errs []error) { + if err == nil { + return nil + } + if u, ok := err.(Wrapped); ok { + for _, e := range u.Unwrap() { + errs = append(errs, UnwrapAll(e)...) + } + return errs + } + return []error{err} +} + +// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively, +// and returns one (the first) unwrapped error +func UnwrapFirst(err error) error { + if err == nil { + return nil + } + return UnwrapAll(err)[0] +} diff --git a/go/errors/errors_test.go b/go/errors/errors_test.go new file mode 100644 index 00000000000..32f8f916d6e --- /dev/null +++ b/go/errors/errors_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnwrap(t *testing.T) { + err1 := errors.New("err1") + err2 := errors.New("err2") + err3 := errors.New("err3") + err4 := errors.New("err4") + + tt := []struct { + name string + err error + expectUnwrap []error + expectUnwrapAll []error + expectUnwrapFirst error + }{ + { + name: "nil", + expectUnwrap: nil, + expectUnwrapAll: nil, + expectUnwrapFirst: nil, + }, + { + name: "single", + err: err1, + expectUnwrap: nil, + expectUnwrapAll: []error{err1}, + expectUnwrapFirst: err1, + }, + { + name: "wrapped nil", + err: errors.Join(nil), + expectUnwrap: nil, + expectUnwrapAll: nil, + expectUnwrapFirst: nil, + }, + { + name: "single wrapped", + err: errors.Join(err1), + expectUnwrap: []error{err1}, + expectUnwrapAll: []error{err1}, + expectUnwrapFirst: err1, + }, + { + name: "flat wrapped", + err: errors.Join(err1, err2, err3, err4), + expectUnwrap: []error{err1, err2, err3, err4}, + expectUnwrapAll: []error{err1, err2, err3, err4}, + expectUnwrapFirst: err1, + }, + { + name: "double wrapped", + err: errors.Join(errors.Join(err1)), + expectUnwrap: []error{errors.Join(err1)}, + expectUnwrapAll: []error{err1}, + expectUnwrapFirst: err1, + }, + { + name: "double nested wrapped", + err: errors.Join(errors.Join(err1, err2), errors.Join(err3, err4)), + expectUnwrap: []error{errors.Join(err1, err2), errors.Join(err3, err4)}, + expectUnwrapAll: []error{err1, err2, err3, err4}, + expectUnwrapFirst: err1, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + unwrapped := Unwrap(tc.err) + unwrappedAll := UnwrapAll(tc.err) + unwrappedFirst := UnwrapFirst(tc.err) + + assert.Equal(t, tc.expectUnwrap, unwrapped) + assert.Equal(t, tc.expectUnwrapAll, unwrappedAll) + assert.Equal(t, tc.expectUnwrapFirst, unwrappedFirst) + }) + } +} diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go new file mode 100644 index 00000000000..63c0942c069 --- /dev/null +++ b/go/event/syslogger/fake_logger.go @@ -0,0 +1,85 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syslogger + +import ( + "fmt" + + "vitess.io/vitess/go/vt/log" +) + +type loggerMsg struct { + msg string + level string +} +type testLogger struct { + logs []loggerMsg + savedInfof func(format string, args ...any) + savedWarningf func(format string, args ...any) + savedErrorf func(format string, args ...any) +} + +func NewTestLogger() *testLogger { + tl := &testLogger{ + savedInfof: log.Infof, + savedWarningf: log.Warningf, + savedErrorf: log.Errorf, + } + log.Infof = tl.recordInfof + log.Warningf = tl.recordWarningf + log.Errorf = tl.recordErrorf + return tl +} + +func (tl *testLogger) Close() { + log.Infof = tl.savedInfof + log.Warningf = tl.savedWarningf + log.Errorf = tl.savedErrorf +} + +func (tl *testLogger) recordInfof(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + tl.logs = append(tl.logs, loggerMsg{msg, "INFO"}) + tl.savedInfof(msg) +} + +func (tl *testLogger) recordWarningf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"}) + tl.savedWarningf(msg) +} + +func (tl *testLogger) recordErrorf(format string, args ...any) { + msg := fmt.Sprintf(format, args...) + tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"}) + tl.savedErrorf(msg) +} + +func (tl *testLogger) getLog() loggerMsg { + if len(tl.logs) > 0 { + return tl.logs[len(tl.logs)-1] + } + return loggerMsg{"no logs!", "ERROR"} +} + +func (tl *testLogger) GetAllLogs() []string { + var logs []string + for _, l := range tl.logs { + logs = append(logs, l.level+":"+l.msg) + } + return logs +} diff --git a/go/event/syslogger/syslogger_test.go b/go/event/syslogger/syslogger_test.go index 6549e4ca8bb..4847fecac2a 100644 --- a/go/event/syslogger/syslogger_test.go +++ b/go/event/syslogger/syslogger_test.go @@ -23,7 +23,6 @@ import ( "testing" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/vt/log" ) type TestEvent struct { @@ -63,60 +62,6 @@ func (fw *fakeWriter) Info(msg string) error { return fw.write(syslog.LOG_INF func (fw *fakeWriter) Notice(msg string) error { return fw.write(syslog.LOG_NOTICE, msg) } func (fw *fakeWriter) Warning(msg string) error { return fw.write(syslog.LOG_WARNING, msg) } -type loggerMsg struct { - msg string - level string -} -type testLogger struct { - logs []loggerMsg - savedInfof func(format string, args ...any) - savedWarningf func(format string, args ...any) - savedErrorf func(format string, args ...any) -} - -func newTestLogger() *testLogger { - tl := &testLogger{ - savedInfof: log.Infof, - savedWarningf: log.Warningf, - savedErrorf: log.Errorf, - } - log.Infof = tl.recordInfof - log.Warningf = tl.recordWarningf - log.Errorf = tl.recordErrorf - return tl -} - -func (tl *testLogger) Close() { - log.Infof = tl.savedInfof - log.Warningf = tl.savedWarningf - log.Errorf = tl.savedErrorf -} - -func (tl *testLogger) recordInfof(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "INFO"}) - tl.savedInfof(msg) -} - -func (tl *testLogger) recordWarningf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "WARNING"}) - tl.savedWarningf(msg) -} - -func (tl *testLogger) recordErrorf(format string, args ...any) { - msg := fmt.Sprintf(format, args...) - tl.logs = append(tl.logs, loggerMsg{msg, "ERROR"}) - tl.savedErrorf(msg) -} - -func (tl *testLogger) getLog() loggerMsg { - if len(tl.logs) > 0 { - return tl.logs[len(tl.logs)-1] - } - return loggerMsg{"no logs!", "ERROR"} -} - // TestSyslog checks that our callback works. func TestSyslog(t *testing.T) { writer = &fakeWriter{} @@ -132,7 +77,7 @@ func TestSyslog(t *testing.T) { // TestBadWriter verifies we are still triggering (to normal logs) if // the syslog connection failed func TestBadWriter(t *testing.T) { - tl := newTestLogger() + tl := NewTestLogger() defer tl.Close() writer = nil diff --git a/go/flags/endtoend/flags_test.go b/go/flags/endtoend/flags_test.go index 1ad69c7fa84..ee24fd6a36d 100644 --- a/go/flags/endtoend/flags_test.go +++ b/go/flags/endtoend/flags_test.go @@ -22,13 +22,16 @@ package flags import ( "bytes" - _ "embed" + "os" "os/exec" "testing" + "text/template" - "vitess.io/vitess/go/test/utils" + _ "embed" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" ) var ( @@ -47,9 +50,6 @@ var ( //go:embed vtgate.txt vtgateTxt string - //go:embed vtgr.txt - vtgrTxt string - //go:embed vttablet.txt vttabletTxt string @@ -89,7 +89,6 @@ var ( "vtaclcheck": vtaclcheckTxt, "vtexplain": vtexplainTxt, "vtgate": vtgateTxt, - "vtgr": vtgrTxt, "vttablet": vttabletTxt, "vttlstest": vttlstestTxt, "vtctld": vtctldTxt, @@ -105,16 +104,30 @@ var ( ) func TestHelpOutput(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + args := []string{"--help"} for binary, helptext := range helpOutput { t.Run(binary, func(t *testing.T) { + tmpl, err := template.New(binary).Parse(helptext) + require.NoError(t, err) + + var buf bytes.Buffer + err = tmpl.Execute(&buf, struct { + Workdir string + }{ + Workdir: wd, + }) + require.NoError(t, err) + cmd := exec.Command(binary, args...) output := bytes.Buffer{} cmd.Stderr = &output cmd.Stdout = &output - err := cmd.Run() + err = cmd.Run() require.NoError(t, err) - utils.MustMatch(t, helptext, output.String()) + utils.MustMatch(t, buf.String(), output.String()) }) } } diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt index cdd64593649..4af44804749 100644 --- a/go/flags/endtoend/mysqlctl.txt +++ b/go/flags/endtoend/mysqlctl.txt @@ -11,71 +11,77 @@ The commands are listed below. Use 'mysqlctl -- {-h, --help}' for comm position Global flags: - --alsologtostderr log to standard error as well as files - --app_idle_timeout duration Idle timeout for app connections (default 1m0s) - --app_pool_size int Size of the connection pool for app connections (default 40) - --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified - --db-credentials-file string db credentials file; send SIGHUP to reload this file - --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") - --db-credentials-vault-addr string URL to Vault server - --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds - --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") - --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable - --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable - --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) - --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate - --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable - --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) - --db_charset string Character set used for this tablet. (default "utf8mb4") - --db_conn_query_info enable parsing and processing of QUERY_OK info fields - --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout) - --db_dba_password string db dba password - --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true) - --db_dba_user string db dba user userKey (default "vt_dba") - --db_flags uint Flag values as defined by MySQL. - --db_flavor string Flavor overrid. Valid value is FilePos. - --db_host string The host name for the tcp connection. - --db_port int tcp port - --db_server_name string server name of the DB we are connecting to. - --db_socket string The unix socket to connect on. If this is specified, host and port will not be used. - --db_ssl_ca string connection ssl ca - --db_ssl_ca_path string connection ssl ca path - --db_ssl_cert string connection ssl certificate - --db_ssl_key string connection ssl key - --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity. - --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) - --dba_pool_size int Size of the connection pool for dba connections (default 20) - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --max-stack-size int configure the maximum stack size in bytes (default 67108864) - --mysql_port int MySQL port (default 3306) - --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") - --mysql_socket string Path to the mysqld socket file - --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc") - --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init - --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) - --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) - --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) - --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice - --socket_file string Local unix socket file to listen on - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class - --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. - --tablet_uid uint32 Tablet UID (default 41983) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --alsologtostderr log to standard error as well as files + --app_idle_timeout duration Idle timeout for app connections (default 1m0s) + --app_pool_size int Size of the connection pool for app connections (default 40) + --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --db-credentials-file string db credentials file; send SIGHUP to reload this file + --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") + --db-credentials-vault-addr string URL to Vault server + --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds + --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") + --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable + --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable + --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) + --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate + --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable + --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) + --db_charset string Character set used for this tablet. (default "utf8mb4") + --db_conn_query_info enable parsing and processing of QUERY_OK info fields + --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout) + --db_dba_password string db dba password + --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true) + --db_dba_user string db dba user userKey (default "vt_dba") + --db_flags uint Flag values as defined by MySQL. + --db_flavor string Flavor overrid. Valid value is FilePos. + --db_host string The host name for the tcp connection. + --db_port int tcp port + --db_server_name string server name of the DB we are connecting to. + --db_socket string The unix socket to connect on. If this is specified, host and port will not be used. + --db_ssl_ca string connection ssl ca + --db_ssl_ca_path string connection ssl ca path + --db_ssl_cert string connection ssl certificate + --db_ssl_key string connection ssl key + --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity. + --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. + --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) + --dba_pool_size int Size of the connection pool for dba connections (default 20) + -h, --help display usage and exit + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --mysql_port int MySQL port (default 3306) + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --mysql_socket string Path to the mysqld socket file + --mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc") + --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init + --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) + --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) + --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) + --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. + --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice + --socket_file string Local unix socket file to listen on + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class + --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. + --tablet_uid uint32 Tablet UID (default 41983) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index 09e3c144620..6fbbd059492 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -3,6 +3,12 @@ Usage of mysqlctld: --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). --db-credentials-file string db credentials file; send SIGHUP to reload this file --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") --db-credentials-vault-addr string URL to Vault server diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt index 6e2c57db109..001d3a5b192 100644 --- a/go/flags/endtoend/vtaclcheck.txt +++ b/go/flags/endtoend/vtaclcheck.txt @@ -1,19 +1,25 @@ Usage of vtaclcheck: - --acl-file string The path of the JSON ACL file to check - --alsologtostderr log to standard error as well as files - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --static-auth-file string The path of the auth_server_static JSON file to check - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --acl-file string The path of the JSON ACL file to check + --alsologtostderr log to standard error as well as files + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + -h, --help display usage and exit + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --static-auth-file string The path of the auth_server_static JSON file to check + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index f91cb1d35e4..44cb2a08462 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -1,187 +1,198 @@ Usage of vtbackup: - --allow_first_backup Allow this job to take the first backup of an existing shard. - --alsologtostderr log to standard error as well as files - --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). - --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used. - --azblob_backup_container_name string Azure Blob Container Name. - --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1) - --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/'). - --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin") - --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000) - --backup_storage_compress if set, the backup files will be compressed. (default true) - --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups. - --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) - --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. - --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) - --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) - --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) - --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json") - --compression-engine-name string compressor engine used for compression. (default "pargzip") - --compression-level int what level to pass to the compressor. (default 1) - --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4) - --consul_auth_static_file string JSON File to read the topos/tokens from. - --db-credentials-file string db credentials file; send SIGHUP to reload this file - --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") - --db-credentials-vault-addr string URL to Vault server - --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds - --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") - --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable - --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable - --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) - --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate - --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable - --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) - --db_allprivs_password string db allprivs password - --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true) - --db_allprivs_user string db allprivs user userKey (default "vt_allprivs") - --db_app_password string db app password - --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true) - --db_app_user string db app user userKey (default "vt_app") - --db_appdebug_password string db appdebug password - --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true) - --db_appdebug_user string db appdebug user userKey (default "vt_appdebug") - --db_charset string Character set used for this tablet. (default "utf8mb4") - --db_conn_query_info enable parsing and processing of QUERY_OK info fields - --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout) - --db_dba_password string db dba password - --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true) - --db_dba_user string db dba user userKey (default "vt_dba") - --db_erepl_password string db erepl password - --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true) - --db_erepl_user string db erepl user userKey (default "vt_erepl") - --db_filtered_password string db filtered password - --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true) - --db_filtered_user string db filtered user userKey (default "vt_filtered") - --db_flags uint Flag values as defined by MySQL. - --db_flavor string Flavor overrid. Valid value is FilePos. - --db_host string The host name for the tcp connection. - --db_port int tcp port - --db_repl_password string db repl password - --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true) - --db_repl_user string db repl user userKey (default "vt_repl") - --db_server_name string server name of the DB we are connecting to. - --db_socket string The unix socket to connect on. If this is specified, host and port will not be used. - --db_ssl_ca string connection ssl ca - --db_ssl_ca_path string connection ssl ca path - --db_ssl_cert string connection ssl certificate - --db_ssl_key string connection ssl key - --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity. - --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - --detach detached mode - run backups detached from the terminal - --disable-redo-log Disable InnoDB redo log during replication-from-primary phase of backup. - --emit_stats If set, emit stats to push-based monitoring and stats backends - --external-compressor string command with arguments to use when compressing a backup. - --external-compressor-extension string extension to use when using an external compressor. - --external-decompressor string command with arguments to use when decompressing a backup. - --file_backup_storage_root string Root directory for the file backup storage. - --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups. - --gcs_backup_storage_root string Root prefix for all backup-related object names. - --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. - --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy - --grpc_enable_tracing Enable gRPC tracing. - --grpc_initial_conn_window_size int gRPC initial connection window size - --grpc_initial_window_size int gRPC initial window size - --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) - --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) - --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) - --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit - --incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position - --init_db_name_override string (init parameter) override the name of the db used by vttablet - --init_db_sql_file string path to .sql file to run after mysql_install_db - --init_keyspace string (init parameter) keyspace to use for this tablet - --init_shard string (init parameter) shard to use for this tablet - --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed). - --keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down. - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup. - --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1) - --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups. - --mycnf-file string path to my.cnf, if reading all config params from there - --mycnf_bin_log_path string mysql binlog path - --mycnf_data_dir string data directory for mysql - --mycnf_error_log_path string mysql error log path - --mycnf_general_log_path string mysql general log path - --mycnf_innodb_data_home_dir string Innodb data home directory - --mycnf_innodb_log_group_home_dir string Innodb log group home directory - --mycnf_master_info_file string mysql master.info file - --mycnf_mysql_port int port mysql is listening on - --mycnf_pid_file string mysql pid file - --mycnf_relay_log_index_path string mysql relay log index path - --mycnf_relay_log_info_path string mysql relay log info path - --mycnf_relay_log_path string mysql relay log path - --mycnf_secure_file_priv string mysql path for loading secure files - --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored) - --mycnf_slow_log_path string mysql slow query log path - --mycnf_socket_file string mysql socket file - --mycnf_tmp_dir string mysql tmp directory - --mysql_port int mysql port (default 3306) - --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") - --mysql_socket string path to the mysql socket - --mysql_timeout duration how long to wait for mysqld startup (default 5m0s) - --port int port for the server - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --remote_operation_timeout duration time to wait for a remote operation (default 15s) - --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs. - --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided). - --s3_backup_aws_region string AWS region to use. (default "us-east-1") - --s3_backup_aws_retries int AWS request retries. (default -1) - --s3_backup_force_path_style force the s3 path style. - --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff") - --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file). - --s3_backup_storage_bucket string S3 bucket to use for backups. - --s3_backup_storage_root string root prefix for all backup-related object names. - --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections. - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) - --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stats_backend string The name of the registered push-based monitoring/stats backend to use - --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars - --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 - --stats_drop_variables string Variables to be dropped from the list of exported variables. - --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting - --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) - --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) - --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting - --tablet_manager_grpc_key string the key to use to connect - --tablet_manager_grpc_server_name string the server name to use to validate server certificate - --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") - --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) - --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") - --topo_consul_lock_session_ttl string TTL for consul session. - --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) - --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) - --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server - --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS - --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS - --topo_global_root string the path of the global topology data in the global topology server - --topo_global_server_address string the address of the global topology server - --topo_implementation string the topology implementation to use - --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass - --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) - --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) - --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server - --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS - --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt - --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command - --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command - --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin - --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar") - --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400) - --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression - --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation. + --allow_first_backup Allow this job to take the first backup of an existing shard. + --alsologtostderr log to standard error as well as files + --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). + --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used. + --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600) + --azblob_backup_container_name string Azure Blob Container Name. + --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased -- a multiple of azblob_backup_buffer_size). (default 1) + --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/'). + --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin") + --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000) + --backup_storage_compress if set, the backup files will be compressed. (default true) + --backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups. + --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) + --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. + --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) + --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) + --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json") + --compression-engine-name string compressor engine used for compression. (default "pargzip") + --compression-level int what level to pass to the compressor. (default 1) + --concurrency int (init restore parameter) how many concurrent files to restore at once (default 4) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --consul_auth_static_file string JSON File to read the topos/tokens from. + --db-credentials-file string db credentials file; send SIGHUP to reload this file + --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file") + --db-credentials-vault-addr string URL to Vault server + --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds + --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle") + --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable + --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable + --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s) + --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate + --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable + --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s) + --db_allprivs_password string db allprivs password + --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true) + --db_allprivs_user string db allprivs user userKey (default "vt_allprivs") + --db_app_password string db app password + --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true) + --db_app_user string db app user userKey (default "vt_app") + --db_appdebug_password string db appdebug password + --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true) + --db_appdebug_user string db appdebug user userKey (default "vt_appdebug") + --db_charset string Character set used for this tablet. (default "utf8mb4") + --db_conn_query_info enable parsing and processing of QUERY_OK info fields + --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout) + --db_dba_password string db dba password + --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true) + --db_dba_user string db dba user userKey (default "vt_dba") + --db_erepl_password string db erepl password + --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true) + --db_erepl_user string db erepl user userKey (default "vt_erepl") + --db_filtered_password string db filtered password + --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true) + --db_filtered_user string db filtered user userKey (default "vt_filtered") + --db_flags uint Flag values as defined by MySQL. + --db_flavor string Flavor overrid. Valid value is FilePos. + --db_host string The host name for the tcp connection. + --db_port int tcp port + --db_repl_password string db repl password + --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true) + --db_repl_user string db repl user userKey (default "vt_repl") + --db_server_name string server name of the DB we are connecting to. + --db_socket string The unix socket to connect on. If this is specified, host and port will not be used. + --db_ssl_ca string connection ssl ca + --db_ssl_ca_path string connection ssl ca path + --db_ssl_cert string connection ssl certificate + --db_ssl_key string connection ssl key + --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity. + --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. + --detach detached mode - run backups detached from the terminal + --disable-redo-log Disable InnoDB redo log during replication-from-primary phase of backup. + --emit_stats If set, emit stats to push-based monitoring and stats backends + --external-compressor string command with arguments to use when compressing a backup. + --external-compressor-extension string extension to use when using an external compressor. + --external-decompressor string command with arguments to use when decompressing a backup. + --file_backup_storage_root string Root directory for the file backup storage. + --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups. + --gcs_backup_storage_root string Root prefix for all backup-related object names. + --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. + --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy + --grpc_enable_tracing Enable gRPC tracing. + --grpc_initial_conn_window_size int gRPC initial connection window size + --grpc_initial_window_size int gRPC initial window size + --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) + --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help display usage and exit + --incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position + --init_db_name_override string (init parameter) override the name of the db used by vttablet + --init_db_sql_file string path to .sql file to run after mysql_install_db + --init_keyspace string (init parameter) keyspace to use for this tablet + --init_shard string (init parameter) shard to use for this tablet + --initial_backup Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed). + --keep-alive-timeout duration Wait until timeout elapses after a successful backup before shutting down. + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --manifest-external-decompressor string command with arguments to store in the backup manifest when compressing a backup with an external compression engine. + --min_backup_interval duration Only take a new backup if it's been at least this long since the most recent backup. + --min_retention_count int Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made (default 1) + --min_retention_time duration Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups. + --mycnf-file string path to my.cnf, if reading all config params from there + --mycnf_bin_log_path string mysql binlog path + --mycnf_data_dir string data directory for mysql + --mycnf_error_log_path string mysql error log path + --mycnf_general_log_path string mysql general log path + --mycnf_innodb_data_home_dir string Innodb data home directory + --mycnf_innodb_log_group_home_dir string Innodb log group home directory + --mycnf_master_info_file string mysql master.info file + --mycnf_mysql_port int port mysql is listening on + --mycnf_pid_file string mysql pid file + --mycnf_relay_log_index_path string mysql relay log index path + --mycnf_relay_log_info_path string mysql relay log info path + --mycnf_relay_log_path string mysql relay log path + --mycnf_secure_file_priv string mysql path for loading secure files + --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored) + --mycnf_slow_log_path string mysql slow query log path + --mycnf_socket_file string mysql socket file + --mycnf_tmp_dir string mysql tmp directory + --mysql_port int mysql port (default 3306) + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --mysql_socket string path to the mysql socket + --mysql_timeout duration how long to wait for mysqld startup (default 5m0s) + --port int port for the server + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --remote_operation_timeout duration time to wait for a remote operation (default 15s) + --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs. + --s2a_enable_appengine_dialer If true, opportunistically use AppEngine-specific dialer to call S2A. + --s2a_timeout duration Timeout enforced on the connection to the S2A service for handshake. (default 3s) + --s3_backup_aws_endpoint string endpoint of the S3 backend (region must be provided). + --s3_backup_aws_region string AWS region to use. (default "us-east-1") + --s3_backup_aws_retries int AWS request retries. (default -1) + --s3_backup_force_path_style force the s3 path style. + --s3_backup_log_level string determine the S3 loglevel to use from LogOff, LogDebug, LogDebugWithSigning, LogDebugWithHTTPBody, LogDebugWithRequestRetries, LogDebugWithRequestErrors. (default "LogOff") + --s3_backup_server_side_encryption string server-side encryption algorithm (e.g., AES256, aws:kms, sse_c:/path/to/key/file). + --s3_backup_storage_bucket string S3 bucket to use for backups. + --s3_backup_storage_root string root prefix for all backup-related object names. + --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections. + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) + --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) + --stats_backend string The name of the registered push-based monitoring/stats backend to use + --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars + --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 + --stats_drop_variables string Variables to be dropped from the list of exported variables. + --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting + --tablet_manager_grpc_cert string the cert to use to connect + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) + --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting + --tablet_manager_grpc_key string the key to use to connect + --tablet_manager_grpc_server_name string the server name to use to validate server certificate + --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") + --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) + --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") + --topo_consul_lock_session_ttl string TTL for consul session. + --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) + --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) + --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server + --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS + --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS + --topo_global_root string the path of the global topology data in the global topology server + --topo_global_server_address string the address of the global topology server + --topo_implementation string the topology implementation to use + --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass + --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) + --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) + --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server + --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS + --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS + --upgrade-safe Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades. + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt + --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command + --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command + --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin + --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar") + --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400) + --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression + --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation. diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt index 207f31905f2..7fa186acbd0 100644 --- a/go/flags/endtoend/vtctlclient.txt +++ b/go/flags/endtoend/vtctlclient.txt @@ -1,41 +1,47 @@ Usage of vtctlclient: - --action_timeout duration timeout for the total command (default 1h0m0s) - --alsologtostderr log to standard error as well as files - --datadog-agent-host string host to send spans to. if empty, no tracing will be done - --datadog-agent-port string port to send spans to. if empty, no tracing will be done - --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. - --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy - --grpc_enable_tracing Enable gRPC tracing. - --grpc_initial_conn_window_size int gRPC initial connection window size - --grpc_initial_window_size int gRPC initial window size - --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) - --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) - --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) - --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit - --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --server string server to use for connection - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --tracer string tracing service to use (default "noop") - --tracing-enable-logging whether to enable logging in the tracing service - --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) - --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc") - --vtctld_grpc_ca string the server ca to use to validate servers when connecting - --vtctld_grpc_cert string the cert to use to connect - --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting - --vtctld_grpc_key string the key to use to connect - --vtctld_grpc_server_name string the server name to use to validate server certificate + --action_timeout duration timeout for the total command (default 1h0m0s) + --alsologtostderr log to standard error as well as files + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --datadog-agent-host string host to send spans to. if empty, no tracing will be done + --datadog-agent-port string port to send spans to. if empty, no tracing will be done + --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. + --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy + --grpc_enable_tracing Enable gRPC tracing. + --grpc_initial_conn_window_size int gRPC initial connection window size + --grpc_initial_window_size int gRPC initial window size + --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) + --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help display usage and exit + --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --server string server to use for connection + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --tracer string tracing service to use (default "noop") + --tracing-enable-logging whether to enable logging in the tracing service + --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) + --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc") + --vtctld_grpc_ca string the server ca to use to validate servers when connecting + --vtctld_grpc_cert string the cert to use to connect + --vtctld_grpc_crl string the server crl to use to validate server certificates when connecting + --vtctld_grpc_key string the key to use to connect + --vtctld_grpc_server_name string the server name to use to validate server certificate diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt index 8b6b8fa0913..2cf009be350 100644 --- a/go/flags/endtoend/vtctld.txt +++ b/go/flags/endtoend/vtctld.txt @@ -1,10 +1,33 @@ -Usage of vtctld: +vtctld provides web and gRPC interfaces to manage a single Vitess cluster. +It is usually the first Vitess component to be started after a valid global topology service has been created. + +For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests. +This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release. +To enable this newer service, include "grpc-vtctld" in the --service_map argument. +This is demonstrated in the example usage below. + +Usage: + vtctld [flags] + +Examples: +vtctld \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --service_map 'grpc-vtctl,grpc-vtctld' \ + --backup_storage_implementation file \ + --file_backup_storage_root $VTDATAROOT/backups \ + --port 15000 \ + --grpc_port 15999 + +Flags: --action_timeout duration time to wait for an action before resorting to force (default 1m0s) --alsologtostderr log to standard error as well as files --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used. + --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600) --azblob_backup_container_name string Azure Blob Container Name. - --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1) + --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased -- a multiple of azblob_backup_buffer_size). (default 1) --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/'). --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin") --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000) @@ -18,17 +41,17 @@ Usage of vtctld: --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified --cell string cell to use --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json") - --compression-engine-name string compressor engine used for compression. (default "pargzip") - --compression-level int what level to pass to the compressor. (default 1) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). --consul_auth_static_file string JSON File to read the topos/tokens from. --datadog-agent-host string host to send spans to. if empty, no tracing will be done --datadog-agent-port string port to send spans to. if empty, no tracing will be done --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents. - --durability_policy string type of durability to enforce. Default is none. Other values are dictated by registered plugins (default "none") --emit_stats If set, emit stats to push-based monitoring and stats backends - --external-compressor string command with arguments to use when compressing a backup. - --external-compressor-extension string extension to use when using an external compressor. - --external-decompressor string command with arguments to use when decompressing a backup. --file_backup_storage_root string Root directory for the file backup storage. --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups. --gcs_backup_storage_root string Root prefix for all backup-related object names. @@ -57,7 +80,7 @@ Usage of vtctld: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) - -h, --help display usage and exit + -h, --help help for vtctld --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -87,7 +110,7 @@ Usage of vtctld: --s3_backup_storage_bucket string S3 bucket to use for backups. --s3_backup_storage_root string root prefix for all backup-related object names. --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections. - --schema_change_check_interval duration How often the schema change dir is checked for schema changes (deprecated: if passed as a bare integer, the duration will be in seconds). (default 1m0s) + --schema_change_check_interval duration How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used. (default 1m0s) --schema_change_controller string Schema change controller is responsible for finding schema changes and responding to schema change events. --schema_change_dir string Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change. --schema_change_replicas_timeout duration How long to wait for replicas to receive a schema change. (default 10s) @@ -121,7 +144,7 @@ Usage of vtctld: --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc") --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s) --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true) - --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}") + --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{ "{{.GetTabletHostPort}}" }}") --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") --topo_consul_lock_session_ttl string TTL for consul session. @@ -133,9 +156,6 @@ Usage of vtctld: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_read_concurrency int Concurrency of topo reads. (default 32) --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt index 41c0b64076f..651a34af6bd 100644 --- a/go/flags/endtoend/vtctldclient.txt +++ b/go/flags/endtoend/vtctldclient.txt @@ -51,6 +51,8 @@ Available Commands: GetVSchema Prints a JSON representation of a keyspace's topo record. GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace. LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort. + MoveTables Perform commands related to moving tables from a source keyspace to a target keyspace. + OnlineDDL Operates on online DDL (schema migrations). PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations. PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running. RebuildKeyspaceGraph Rebuilds the serving data for the keyspace(s). This command may trigger an update to all connected clients. @@ -64,6 +66,7 @@ Available Commands: RemoveKeyspaceCell Removes the specified cell from the Cells list for all shards in the specified keyspace (by calling RemoveShardCell on every shard). It also removes the SrvKeyspace for that keyspace in that cell. RemoveShardCell Remove the specified cell from the specified shard's Cells list. ReparentTablet Reparent a tablet to the current primary in the shard. + Reshard Perform commands related to resharding a keyspace. RestoreFromBackup Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`. RunHealthCheck Runs a healthcheck on the remote tablet. SetKeyspaceDurabilityPolicy Sets the durability-policy used by the specified keyspace. @@ -87,6 +90,7 @@ Available Commands: ValidateShard Validates that all nodes reachable from the specified shard are consistent. ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace. ValidateVersionShard Validates that the version on the primary matches all of the replicas. + Workflow Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace. completion Generate the autocompletion script for the specified shell help Help about any command diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt index a70067a9bc8..39adec5467c 100644 --- a/go/flags/endtoend/vtexplain.txt +++ b/go/flags/endtoend/vtexplain.txt @@ -1,37 +1,43 @@ Usage of vtexplain: - --alsologtostderr log to standard error as well as files - --batch-interval duration Interval between logical time slots. (default 10ms) - --dbname string Optional database target to override normal routing - --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) - --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi") - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace - --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") - --normalize Whether to enable vtgate normalization - --output-mode string Output in human-friendly text or json (default "text") - --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW") - --schema string The SQL table schema - --schema-file string Identifies the file that contains the SQL table schema - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2) - --sql string A list of semicolon-delimited SQL commands to analyze - --sql-file string Identifies the file that contains the SQL commands to analyze - --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) - --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --vschema string Identifies the VTGate routing schema - --vschema-file string Identifies the VTGate routing schema file + --alsologtostderr log to standard error as well as files + --batch-interval duration Interval between logical time slots. (default 10ms) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --dbname string Optional database target to override normal routing + --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) + --execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi") + -h, --help display usage and exit + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace + --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") + --normalize Whether to enable vtgate normalization + --output-mode string Output in human-friendly text or json (default "text") + --planner-version string Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW") + --schema string The SQL table schema + --schema-file string Identifies the file that contains the SQL table schema + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --shards int Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored. (default 2) + --sql string A list of semicolon-delimited SQL commands to analyze + --sql-file string Identifies the file that contains the SQL commands to analyze + --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) + --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vschema string Identifies the VTGate routing schema + --vschema-file string Identifies the VTGate routing schema file diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 240a4b3aab5..89f6544ca8f 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -1,8 +1,34 @@ -Usage of vtgate: +VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol. + +### Key Options + +* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number: + * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds. + +Usage: + vtgate [flags] + +Examples: +vtgate \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15001 \ + --grpc_port 15991 \ + --mysql_server_port 15306 \ + --cell test \ + --cells_to_watch test \ + --tablet_types_to_wait PRIMARY,REPLICA \ + --service_map 'grpc-vtgateservice' \ + --pid_file $VTDATAROOT/tmp/vtgate.pid \ + --mysql_auth_server_impl none + +Flags: + --allow-kill-statement Allows the execution of kill statement --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types. --alsologtostderr log to standard error as well as files --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1) - --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events") --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true. --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s) --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s) @@ -11,6 +37,12 @@ Usage of vtgate: --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified --cell string cell to use --cells_to_watch string comma-separated list of cells for watching tablets + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). --consul_auth_static_file string JSON File to read the topos/tokens from. --datadog-agent-host string host to send spans to. if empty, no tracing will be done --datadog-agent-port string port to send spans to. if empty, no tracing will be done @@ -29,11 +61,10 @@ Usage of vtgate: --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true) --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true) --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow") - --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true) --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000) --gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s) --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups. + --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin. --grpc_auth_mode string Which auth plugin implementation to use (eg: static) --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. @@ -62,7 +93,7 @@ Usage of vtgate: --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal. --healthcheck_retry_delay duration health check retry delay (default 2ms) --healthcheck_timeout duration the health check timeout period (default 1m0s) - -h, --help display usage and exit + -h, --help help for vtgate --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) @@ -82,6 +113,7 @@ Usage of vtgate: --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query. --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s) --min_number_serving_vttablets int The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving. (default 2) + --mysql-server-keepalive-period duration TCP period between keep-alives --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections. --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static") @@ -125,7 +157,7 @@ Usage of vtgate: --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) --opentsdb_uri string URI of opentsdb /api/put method --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails. + --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --port int port for the server --pprof strings enable profiling --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket @@ -139,7 +171,6 @@ Usage of vtgate: --remote_operation_timeout duration time to wait for a remote operation (default 15s) --retry-count int retry count (default 2) --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true) - --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) @@ -167,7 +198,7 @@ Usage of vtgate: --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s) --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true) --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types. - --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{.GetTabletHostPort}}") + --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{ "{{.GetTabletHostPort}}" }}") --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") --topo_consul_lock_session_ttl string TTL for consul session. @@ -179,9 +210,6 @@ Usage of vtgate: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_read_concurrency int Concurrency of topo reads. (default 32) --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) @@ -194,11 +222,11 @@ Usage of vtgate: --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI") + --truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --v Level log level for V logs -v, --version print binary version --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. - --vtctld_addr string address of a vtctld instance --vtgate-config-terse-errors prevent bind vars from escaping in returned errors --warn_memory_rows int Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented. (default 30000) --warn_payload_size int The warning threshold for query payloads in bytes. A payload greater than this threshold will cause the VtGateWarnings.WarnPayloadSizeExceeded counter to be incremented. diff --git a/go/flags/endtoend/vtgr.txt b/go/flags/endtoend/vtgr.txt deleted file mode 100644 index 9e0798f9fca..00000000000 --- a/go/flags/endtoend/vtgr.txt +++ /dev/null @@ -1,73 +0,0 @@ -Usage of vtgr: - --abort_rebootstrap Don't allow vtgr to rebootstrap an existing group. - --alsologtostderr log to standard error as well as files - --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80" - --consul_auth_static_file string JSON File to read the topos/tokens from. - --db_config string Full path to db config file that will be used by VTGR. - --db_flavor string MySQL flavor override. (default "MySQL56") - --db_port int Local mysql port, set this to enable local fast check. - --emit_stats If set, emit stats to push-based monitoring and stats backends - --enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold. - --gr_port int Port to bootstrap a MySQL group. (default 33061) - --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check. - --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. - --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy - --grpc_enable_tracing Enable gRPC tracing. - --grpc_initial_conn_window_size int gRPC initial connection window size - --grpc_initial_window_size int gRPC initial window size - --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) - --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) - --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) - --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s) - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --refresh_interval duration Refresh interval to load tablets. (default 10s) - --remote_operation_timeout duration time to wait for a remote operation (default 15s) - --scan_interval duration Scan interval to diagnose and repair. (default 3s) - --scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --stats_backend string The name of the registered push-based monitoring/stats backend to use - --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars - --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 - --stats_drop_variables string Variables to be dropped from the list of exported variables. - --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting - --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) - --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) - --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting - --tablet_manager_grpc_key string the key to use to connect - --tablet_manager_grpc_server_name string the server name to use to validate server certificate - --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") - --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) - --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") - --topo_consul_lock_session_ttl string TTL for consul session. - --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) - --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) - --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server - --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS - --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS - --topo_global_root string the path of the global topology data in the global topology server - --topo_global_server_address string the address of the global topology server - --topo_implementation string the topology implementation to use - --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass - --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) - --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) - --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server - --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS - --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --vtgr_config string Config file for vtgr. diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index 60af343eca9..74690187ed0 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -1,87 +1,109 @@ -Usage of vtorc: - --alsologtostderr log to standard error as well as files - --audit-file-location string File location where the audit logs are to be stored - --audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s) - --audit-to-backend Whether to store the audit log in the VTOrc database - --audit-to-syslog Whether to store the audit log in the syslog - --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified - --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80" - --config string config file name - --consul_auth_static_file string JSON File to read the topos/tokens from. - --emit_stats If set, emit stats to push-based monitoring and stats backends - --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. - --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy - --grpc_enable_tracing Enable gRPC tracing. - --grpc_initial_conn_window_size int gRPC initial connection window size - --grpc_initial_window_size int gRPC initial window size - --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) - --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) - --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) - --grpc_prometheus Enable gRPC monitoring with Prometheus. - -h, --help display usage and exit - --instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s) - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --max-stack-size int configure the maximum stack size in bytes (default 67108864) - --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) - --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) - --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --port int port for the server - --pprof strings enable profiling - --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s) - --recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s) - --recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s) - --remote_operation_timeout duration time to wait for a remote operation (default 15s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s) - --snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours - --sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared") - --stats_backend string The name of the registered push-based monitoring/stats backend to use - --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars - --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 - --stats_drop_variables string Variables to be dropped from the list of exported variables. - --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class - --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting - --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) - --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) - --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting - --tablet_manager_grpc_key string the key to use to connect - --tablet_manager_grpc_server_name string the server name to use to validate server certificate - --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") - --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s) - --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) - --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") - --topo_consul_lock_session_ttl string TTL for consul session. - --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) - --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) - --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server - --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS - --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS - --topo_global_root string the path of the global topology data in the global topology server - --topo_global_server_address string the address of the global topology server - --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config - --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass - --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) - --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) - --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server - --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS - --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s) +VTOrc is the automated fault detection and repair tool in Vitess. + +Usage: + vtorc [flags] + +Examples: +vtorc \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/global \ + --log_dir $VTDATAROOT/tmp \ + --port 15000 \ + --recovery-period-block-duration "10m" \ + --instance-poll-time "1s" \ + --topo-information-refresh-duration "30s" \ + --alsologtostderr + +Flags: + --allow-emergency-reparent Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary (default true) + --alsologtostderr log to standard error as well as files + --audit-file-location string File location where the audit logs are to be stored + --audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s) + --audit-to-backend Whether to store the audit log in the VTOrc database + --audit-to-syslog Whether to store the audit log in the syslog + --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified + --change-tablets-with-errant-gtid-to-drained Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED + --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80" + --config string config file name + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + --consul_auth_static_file string JSON File to read the topos/tokens from. + --emit_stats If set, emit stats to push-based monitoring and stats backends + --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. + --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy + --grpc_enable_tracing Enable gRPC tracing. + --grpc_initial_conn_window_size int gRPC initial connection window size + --grpc_initial_window_size int gRPC initial window size + --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s) + --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) + --grpc_prometheus Enable gRPC monitoring with Prometheus. + -h, --help help for vtorc + --instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s) + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) + --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) + --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) + --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. + --port int port for the server + --pprof strings enable profiling + --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s) + --recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s) + --recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s) + --remote_operation_timeout duration time to wait for a remote operation (default 15s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --shutdown_wait_time duration Maximum time to wait for VTOrc to release all the locks that it is holding before shutting down on SIGTERM (default 30s) + --snapshot-topology-interval duration Timer duration on which VTOrc takes a snapshot of the current MySQL information it has in the database. Should be in multiple of hours + --sqlite-data-file string SQLite Datafile to use as VTOrc's database (default "file::memory:?mode=memory&cache=shared") + --stats_backend string The name of the registered push-based monitoring/stats backend to use + --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars + --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 + --stats_drop_variables string Variables to be dropped from the list of exported variables. + --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class + --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting + --tablet_manager_grpc_cert string the cert to use to connect + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) + --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting + --tablet_manager_grpc_key string the key to use to connect + --tablet_manager_grpc_server_name string the server name to use to validate server certificate + --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") + --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s) + --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) + --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") + --topo_consul_lock_session_ttl string TTL for consul session. + --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s) + --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30) + --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server + --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS + --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS + --topo_global_root string the path of the global topology data in the global topology server + --topo_global_server_address string the address of the global topology server + --topo_implementation string the topology implementation to use + --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass + --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) + --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) + --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server + --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS + --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index ec9cdb1a277..98ea41a5f8e 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -1,11 +1,55 @@ -Usage of vttablet: +The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments: + +* Managed MySQL (most common) +* External MySQL + +In addition to these deployment types, a partially managed VTTablet is also possible by setting `--disable_active_reparents`. + +### Managed MySQL + +In this mode, Vitess actively manages MySQL. + +### External MySQL. + +In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation. + +See "Unmanaged Tablet" for the full guide. + +Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: + +* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary. +* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter. +* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on. +* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag. + +Usage: + vttablet [flags] + +Examples: + +vttablet \ + --topo_implementation etcd2 \ + --topo_global_server_address localhost:2379 \ + --topo_global_root /vitess/ \ + --tablet-path $alias \ + --init_keyspace $keyspace \ + --init_shard $shard \ + --init_tablet_type $tablet_type \ + --port $port \ + --grpc_port $grpc_port \ + --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' + +`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous. + +Flags: --alsologtostderr log to standard error as well as files --app_idle_timeout duration Idle timeout for app connections (default 1m0s) --app_pool_size int Size of the connection pool for app connections (default 40) --azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path). --azblob_backup_account_name string Azure Storage Account name for backups; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_NAME will be used. + --azblob_backup_buffer_size int The memory buffer size to use in bytes, per file or stripe, when streaming to Azure Blob Service. (default 104857600) --azblob_backup_container_name string Azure Blob Container Name. - --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased). (default 1) + --azblob_backup_parallelism int Azure Blob operation parallelism (requires extra memory when increased -- a multiple of azblob_backup_buffer_size). (default 1) --azblob_backup_storage_root string Root prefix for all backup-related Azure Blobs; this should exclude both initial and trailing '/' (e.g. just 'a/b' not '/a/b/'). --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin") --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000) @@ -34,6 +78,12 @@ Usage of vttablet: --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json") --compression-engine-name string compressor engine used for compression. (default "pargzip") --compression-level int what level to pass to the compressor. (default 1) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152) --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728) --consul_auth_static_file string JSON File to read the topos/tokens from. @@ -93,13 +143,12 @@ Usage of vttablet: --emit_stats If set, emit stats to push-based monitoring and stats backends --enable-consolidator Synonym to -enable_consolidator (default true) --enable-consolidator-replicas Synonym to -enable_consolidator_replicas - --enable-lag-throttler Synonym to -enable_lag_throttler + --enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload --enable-tx-throttler Synonym to -enable_tx_throttler --enable_consolidator This option enables the query consolidator. (default true) --enable_consolidator_replicas This option enables the query consolidator only on replicas. --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots. --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued. - --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats --enable_replication_reporter Use polling to track replication lag. --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not. --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced. @@ -143,10 +192,10 @@ Usage of vttablet: --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) --health_check_interval duration Interval between health checks (default 20s) - --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the table _vt.heartbeat. The result is used to inform the serving state of the vttablet via healthchecks. + --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks. --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s) --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests - -h, --help display usage and exit + -h, --help help for vttablet --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5) --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000) --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20) @@ -169,6 +218,7 @@ Usage of vttablet: --log_queries_to_file string Enable query logging to the specified file --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files + --manifest-external-decompressor string command with arguments to store in the backup manifest when compressing a backup with an external compression engine. --max-stack-size int configure the maximum stack size in bytes (default 67108864) --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256) --migration_check_interval duration Interval between migration checks (default 1m0s) @@ -211,34 +261,32 @@ Usage of vttablet: --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results - --queryserver-config-idle-timeout float query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 1800) + --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) - --queryserver-config-olap-transaction-timeout float query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30) + --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting - --queryserver-config-pool-conn-max-lifetime float query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) - --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000) - --queryserver-config-query-pool-timeout float query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. + --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) - --queryserver-config-query-timeout float query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30) + --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) - --queryserver-config-schema-change-signal-interval float query server schema change signal interval defines at which interval the query server shall send schema updates to vtgate. (default 5) - --queryserver-config-schema-reload-time float query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 1800) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) - --queryserver-config-stream-pool-timeout float query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. + --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection --queryserver-config-strict-table-acl only allow queries that pass table acl checks --queryserver-config-terse-errors prevent bind vars from escaping in client error messages --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) - --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30) - --queryserver-config-txpool-timeout float query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1) + --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) + --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this - --queryserver-enable-settings-pool Enable pooling of connections with modified system settings + --queryserver-enable-settings-pool Enable pooling of connections with modified system settings (default true) --queryserver-enable-views Enable views support in vttablet. --queryserver_enable_online_ddl Enable online DDL. (default true) --redact-debug-ui-queries redact full queries and bind variables from debug UI @@ -260,11 +308,13 @@ Usage of vttablet: --s3_backup_storage_root string root prefix for all backup-related object names. --s3_backup_tls_skip_verify_cert skip the 'certificate is valid' check for SSL connections. --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters. + --schema-change-reload-timeout duration query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up (default 30s) + --schema-version-max-age-seconds int max age of schema version records to kept in memory by the vreplication historian --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) - --shutdown_grace_period float how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. + --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) @@ -301,12 +351,7 @@ Usage of vttablet: --tablet_manager_grpc_server_name string the server name to use to validate server certificate --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc") - --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes) - --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively. - --throttle_metrics_threshold float Override default throttle threshold, respective to -throttle_metrics_query (default 1.7976931348623157e+308) --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica") - --throttle_threshold duration Replication lag threshold for default lag throttling (default 1s) - --throttler-config-via-topo When 'true', read config from topo service and ignore throttle_threshold, throttle_metrics_threshold, throttle_metrics_query, throttle_check_as_check_self --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") --topo_consul_lock_session_ttl string TTL for consul session. @@ -318,9 +363,6 @@ Usage of vttablet: --topo_global_root string the path of the global topology data in the global topology server --topo_global_server_address string the address of the global topology server --topo_implementation string the topology implementation to use - --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config - --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod - --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s) --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64) @@ -343,12 +385,15 @@ Usage of vttablet: --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved. --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions. --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied. - --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n") + --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") + --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100) + --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests. --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells - --tx_throttler_config string The configuration of the transaction throttler as a text formatted throttlerdata.Configuration protocol buffer message (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n") + --tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica) + --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s) + --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler. --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s) - --use_super_read_only Set super_read_only flag when performing planned failover. --v Level log level for V logs -v, --version print binary version --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging @@ -356,7 +401,7 @@ Usage of vttablet: --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s) --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000) --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200) - --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 1) + --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 3) --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s) --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s) --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s) @@ -364,12 +409,11 @@ Usage of vttablet: --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s) --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s) - --vreplication_store_compressed_gtid Store compressed gtids in the pos column of _vt.vreplication + --vreplication_store_compressed_gtid Store compressed gtids in the pos column of the sidecar database's vreplication table --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY") --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864) --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true) --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000) - --vtctld_addr string address of a vtctld instance --vtgate_protocol string how to talk to vtgate (default "grpc") --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/") --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index 57c79b06bca..5849f0c1e81 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -15,6 +15,12 @@ Usage of vttestserver: --charset string MySQL charset (default "utf8mb4") --compression-engine-name string compressor engine used for compression. (default "pargzip") --compression-level int what level to pass to the compressor. (default 1) + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). --consul_auth_static_file string JSON File to read the topos/tokens from. --data_dir string Directory where the data files will be placed, defaults to a random directory under /vt/vtdataroot --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) @@ -68,6 +74,7 @@ Usage of vttestserver: --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files + --manifest-external-decompressor string command with arguments to store in the backup manifest when compressing a backup with an external compression engine. --max-stack-size int configure the maximum stack size in bytes (default 67108864) --max_table_shard_size int The maximum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly (default 10000) --min_table_shard_size int The minimum number of initial rows in a table shard. Ignored if--initialize_with_random_data is false. The actual number is chosen randomly. (default 1000) @@ -80,9 +87,9 @@ Usage of vttestserver: --num_shards strings Comma separated shard count (one per keyspace) (default [2]) --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) - --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet + --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. Changes to VSchema are persisted across cluster restarts using a simple watcher if the --data_dir argument is specified. --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. - --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails. + --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int Port to use for vtcombo. If this is 0, a random port will be chosen. --pprof strings enable profiling diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt index e7e41c4cb4d..6b0473d1cb2 100644 --- a/go/flags/endtoend/zkctl.txt +++ b/go/flags/endtoend/zkctl.txt @@ -1,18 +1,25 @@ Usage of zkctl: - --alsologtostderr log to standard error as well as files - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") - --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname + --alsologtostderr log to standard error as well as files + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + -h, --help display usage and exit + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") + --zk.extra stringArray extra config line(s) to append verbatim to config (flag can be specified more than once) + --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt index 6ec026be814..e957f7a3b3c 100644 --- a/go/flags/endtoend/zkctld.txt +++ b/go/flags/endtoend/zkctld.txt @@ -1,19 +1,26 @@ Usage of zkctld: - --alsologtostderr log to standard error as well as files - -h, --help display usage and exit - --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) - --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) - --log_dir string If non-empty, write log files in this directory - --log_err_stacks log stack traces for errors - --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) - --logtostderr log to standard error instead of files - --pprof strings enable profiling - --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) - --v Level log level for V logs - -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") - --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname + --alsologtostderr log to standard error as well as files + --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored. + --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn) + --config-name string Name of the config file (without extension) to search for. (default "vtconfig") + --config-path strings Paths to search for config files in. (default [{{ .Workdir }}]) + --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s) + --config-type string Config file type (omit to infer config type from file extension). + -h, --help display usage and exit + --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) + --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) + --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_dir string If non-empty, write log files in this directory + --log_err_stacks log stack traces for errors + --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logtostderr log to standard error instead of files + --pprof strings enable profiling + --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) + --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) + --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --v Level log level for V logs + -v, --version print binary version + --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") + --zk.extra stringArray extra config line(s) to append verbatim to config (flag can be specified more than once) + --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname diff --git a/go/flagutil/deprecated_float64_seconds.go b/go/flagutil/deprecated_float64_seconds.go new file mode 100644 index 00000000000..d9afb11aaa2 --- /dev/null +++ b/go/flagutil/deprecated_float64_seconds.go @@ -0,0 +1,72 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "strconv" + "time" + + "vitess.io/vitess/go/vt/log" +) + +type DeprecatedFloat64Seconds struct { + name string + val time.Duration +} + +var _ Value[time.Duration] = (*DeprecatedFloat64Seconds)(nil) + +func NewDeprecatedFloat64Seconds(name string, defVal time.Duration) DeprecatedFloat64Seconds { + return DeprecatedFloat64Seconds{ + name: name, + val: defVal, + } +} + +func (f *DeprecatedFloat64Seconds) String() string { return f.val.String() } +func (f *DeprecatedFloat64Seconds) Type() string { return "duration" } + +func (f *DeprecatedFloat64Seconds) Set(arg string) error { + v, err := time.ParseDuration(arg) + if err != nil { + log.Warningf("failed to parse %s as duration (err: %v); falling back to parsing to %s as seconds. this is deprecated and will be removed in a future release", f.name, err, f.val) + + n, err := strconv.ParseFloat(arg, 64) + if err != nil { + return err + } + + v = time.Duration(n * float64(time.Second)) + } + + f.val = v + return nil +} + +func (f DeprecatedFloat64Seconds) Clone() DeprecatedFloat64Seconds { + return DeprecatedFloat64Seconds{ + name: f.name, + val: f.val, + } +} + +func (f DeprecatedFloat64Seconds) Name() string { return f.name } +func (f DeprecatedFloat64Seconds) Get() time.Duration { return f.val } + +func (f *DeprecatedFloat64Seconds) UnmarshalJSON(data []byte) error { + return f.Set(string(data)) +} diff --git a/go/flagutil/enum.go b/go/flagutil/enum.go new file mode 100644 index 00000000000..5bc279ee493 --- /dev/null +++ b/go/flagutil/enum.go @@ -0,0 +1,117 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// StringEnum provides a string-like flag value that raises an error if given a +// value not in the set of allowed choices. +// +// This parse-time validation can be case-sensitive or not, depending on which +// constructor (NewStringEnum vs NewCaseInsensitiveStringEnum) was used. +type StringEnum struct { + name string + val string + + caseInsensitive bool + choices map[string]struct{} + choiceNames []string + choiceMapper func(string) string +} + +// ErrInvalidChoice is returned when parsing a value that is not a valid choice +// for the StringEnum flag. +var ErrInvalidChoice = errors.New("invalid choice for enum") + +// NewStringEnum returns a new string enum flag with the given name, default, +// and choices. +// +// Parse-time validation is case-sensitive. +func NewStringEnum(name string, initialValue string, choices []string) *StringEnum { + return newStringEnum(name, initialValue, choices, false) +} + +// NewCaseInsensitiveStringEnum returns a new string enum flag with the given +// name, default, and choices. +// +// Parse-time validation is case-insensitive. +func NewCaseInsensitiveStringEnum(name string, initialValue string, choices []string) *StringEnum { + return newStringEnum(name, initialValue, choices, true) +} + +func newStringEnum(name string, initialValue string, choices []string, caseInsensitive bool) *StringEnum { + choiceMapper := func(s string) string { return s } + choiceMap := map[string]struct{}{} + + if caseInsensitive { + choiceMapper = strings.ToLower + } + + for _, choice := range choices { + choiceMap[choiceMapper(choice)] = struct{}{} + } + + choiceNames := make([]string, 0, len(choiceMap)) + for choice := range choiceMap { + choiceNames = append(choiceNames, choice) + } + sort.Strings(choiceNames) + + if initialValue != "" { + if _, ok := choiceMap[choiceMapper(initialValue)]; !ok { + // This will panic if we've misconfigured something in the source code. + // It's not a user-error, so it had damn-well be better caught by a test + // somewhere. + panic(fmt.Errorf("%w (valid choices: %v)", ErrInvalidChoice, choiceNames)) + } + } + + return &StringEnum{ + name: name, + val: initialValue, + choices: choiceMap, + choiceNames: choiceNames, + choiceMapper: choiceMapper, + } +} + +// Set is part of the pflag.Value interface. +func (s *StringEnum) Set(arg string) error { + if _, ok := s.choices[s.choiceMapper(arg)]; !ok { + msg := "%w (valid choices: %v" + if s.caseInsensitive { + msg += " [case insensitive]" + } + msg += ")" + return fmt.Errorf(msg, ErrInvalidChoice, s.choiceNames) + } + + s.val = arg + + return nil +} + +// String is part of the pflag.Value interface. +func (s *StringEnum) String() string { return s.val } + +// Type is part of the pflag.Value interface. +func (s *StringEnum) Type() string { return "string" } diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go index d010ea0bc4f..ebf4ccef485 100644 --- a/go/flagutil/flagutil.go +++ b/go/flagutil/flagutil.go @@ -22,13 +22,9 @@ import ( "errors" "fmt" "sort" - "strconv" "strings" - "time" "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/log" ) var ( @@ -193,62 +189,18 @@ func DualFormatBoolVar(fs *pflag.FlagSet, p *bool, name string, value bool, usag } } -// DurationOrIntVar implements pflag.Value for flags that have historically been -// of type IntVar (and then converted to seconds or some other unit) but are -// now transitioning to a proper DurationVar type. -// -// When parsing a command-line argument, it will first attempt to parse the -// argument using time.ParseDuration; if this fails, it will fallback to -// strconv.ParseInt and multiply that value by the `fallback` unit value to get -// a duration. If the initial ParseDuration fails, it will also log a -// deprecation warning. -type DurationOrIntVar struct { - name string - val time.Duration - fallback time.Duration -} - -// NewDurationOrIntVar returns a new DurationOrIntVar struct with the given name, -// default value, and fallback unit. -// -// The name is used only when issuing a deprecation warning (so the user knows -// which flag needs its argument format updated). -// -// The `fallback` argument is used when parsing an argument as an int (legacy behavior) as the multiplier -// to get a time.Duration value. As an example, if a flag used to be "the amount -// of time to wait in seconds" with a default of 60, you would do: -// -// myFlag := flagutil.NewDurationOrIntVar("my-flag", time.Minute /* 60 second default */, time.Second /* fallback unit to multiply by */) -func NewDurationOrIntVar(name string, val time.Duration, fallback time.Duration) *DurationOrIntVar { - return &DurationOrIntVar{name: name, val: val, fallback: fallback} -} - -// Set is part of the pflag.Value interface. -func (v *DurationOrIntVar) Set(s string) error { - d, derr := time.ParseDuration(s) - if derr != nil { - msg := &strings.Builder{} - fmt.Fprintf(msg, "non-duration value passed to %s (error: %s)", v.name, derr) - - i, ierr := strconv.ParseInt(s, 10, 64) - if ierr != nil { - log.Warningf("%s; attempted to parse as int in %s, which failed with %s", msg.String(), v.fallback, ierr) - return ierr - } +// DualFormatVar creates a flag which supports both dashes and underscores +func DualFormatVar(fs *pflag.FlagSet, val pflag.Value, name string, usage string) { + dashes := strings.Replace(name, "_", "-", -1) + underscores := strings.Replace(name, "-", "_", -1) - d = time.Duration(i) * v.fallback - log.Warningf("%s; parsed as int to %s, which is deprecated behavior", d) + fs.Var(val, underscores, usage) + if dashes != underscores { + fs.Var(val, dashes, fmt.Sprintf("Synonym to -%s", underscores)) } - - v.val = d - return nil } -// String is part of the pflag.Value interface. -func (v *DurationOrIntVar) String() string { return v.val.String() } - -// Type is part of the pflag.Type interface. -func (v *DurationOrIntVar) Type() string { return "duration" } - -// Value returns the underlying Duration value passed to the flag. -func (v *DurationOrIntVar) Value() time.Duration { return v.val } +type Value[T any] interface { + pflag.Value + Get() T +} diff --git a/go/flagutil/flagutil_test.go b/go/flagutil/flagutil_test.go index 4dbba7b832d..f95c46a53f7 100644 --- a/go/flagutil/flagutil_test.go +++ b/go/flagutil/flagutil_test.go @@ -19,11 +19,8 @@ package flagutil import ( "strings" "testing" - "time" "github.com/spf13/pflag" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestStringList(t *testing.T) { @@ -109,53 +106,3 @@ func TestStringMap(t *testing.T) { } } } - -func TestDurationOrIntVar(t *testing.T) { - getflag := func() *DurationOrIntVar { return NewDurationOrIntVar("test-flag", time.Minute, time.Second) } - - tests := []struct { - name string - arg string - want time.Duration - wantErr bool - }{ - { - name: "duration format", - arg: "1h", - want: time.Hour, - }, - { - name: "legacy format", - arg: "10", - want: 10 * time.Second, - }, - { - name: "invalid", - arg: "this is not a duration or an int", - want: 0, - wantErr: true, - }, - { - name: "default value", - arg: "", - want: time.Minute, - }, - } - - for _, tt := range tests { - flag := getflag() - if tt.arg == "" { - assert.Equal(t, tt.want, flag.Value()) - return - } - - err := flag.Set(tt.arg) - if tt.wantErr { - assert.Error(t, err) - return - } - - require.NoError(t, err) - assert.Equal(t, tt.want, flag.Value()) - } -} diff --git a/go/hack/hack.go b/go/hack/hack.go index 8b042950d1e..95bf11f5530 100644 --- a/go/hack/hack.go +++ b/go/hack/hack.go @@ -21,7 +21,6 @@ limitations under the License. package hack import ( - "reflect" "unsafe" ) @@ -37,10 +36,5 @@ func String(b []byte) (s string) { // StringBytes returns the underlying bytes for a string. Modifying this byte slice // will lead to undefined behavior. func StringBytes(s string) []byte { - var b []byte - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - hdr.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - hdr.Cap = len(s) - hdr.Len = len(s) - return b + return unsafe.Slice(unsafe.StringData(s), len(s)) } diff --git a/go/hack/runtime.go b/go/hack/runtime.go index d1ccb699460..c80ac1d38e5 100644 --- a/go/hack/runtime.go +++ b/go/hack/runtime.go @@ -19,7 +19,6 @@ limitations under the License. package hack import ( - "reflect" "unsafe" ) @@ -35,8 +34,7 @@ func strhash(p unsafe.Pointer, h uintptr) uintptr // This is an optimal hash function which takes an input seed and is potentially implemented in hardware // for most architectures. This is the same hash function that the language's `map` uses. func RuntimeMemhash(b []byte, seed uint64) uint64 { - pstring := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - return uint64(memhash(unsafe.Pointer(pstring.Data), uintptr(seed), uintptr(pstring.Len))) + return uint64(memhash(unsafe.Pointer(unsafe.SliceData(b)), uintptr(seed), uintptr(len(b)))) } // RuntimeStrhash provides access to the Go runtime's default hash function for strings. diff --git a/go/internal/flag/flag.go b/go/internal/flag/flag.go index 6f087143610..ade4907e573 100644 --- a/go/internal/flag/flag.go +++ b/go/internal/flag/flag.go @@ -42,7 +42,7 @@ import ( // // See VEP-4, phase 1 for details: https://github.com/vitessio/enhancements/blob/c766ea905e55409cddeb666d6073cd2ac4c9783e/veps/vep-4.md#phase-1-preparation func Parse(fs *flag.FlagSet) { - preventGlogVFlagFromClobberingVersionFlagShorthand(fs) + PreventGlogVFlagFromClobberingVersionFlagShorthand(fs) fs.AddGoFlagSet(goflag.CommandLine) if fs.Lookup("help") == nil { @@ -115,7 +115,7 @@ func TrickGlog() { // // IMPORTANT: This must be called prior to AddGoFlagSet in both Parse and // ParseFlagsForTest. -func preventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) { +func PreventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) { // N.B. we use goflag.Lookup instead of this package's Lookup, because we // explicitly want to check only the goflags. if f := goflag.Lookup("v"); f != nil { @@ -178,7 +178,7 @@ func ParseFlagsForTest() { } // parse remaining flags including the log-related ones like --alsologtostderr - preventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine) + PreventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine) flag.CommandLine.AddGoFlagSet(goflag.CommandLine) flag.Parse() } diff --git a/go/ioutil/timeout_closer.go b/go/ioutil/timeout_closer.go new file mode 100644 index 00000000000..1f025fbdb44 --- /dev/null +++ b/go/ioutil/timeout_closer.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "context" + "io" + "time" +) + +// TimeoutCloser is an io.Closer that has a timeout for executing the Close() function. +type TimeoutCloser struct { + ctx context.Context + closer io.Closer + timeout time.Duration +} + +func NewTimeoutCloser(ctx context.Context, closer io.Closer, timeout time.Duration) *TimeoutCloser { + return &TimeoutCloser{ + ctx: ctx, + closer: closer, + timeout: timeout, + } +} + +func (c *TimeoutCloser) Close() error { + done := make(chan error) + + ctx, cancel := context.WithTimeout(c.ctx, c.timeout) + defer cancel() + + go func() { + defer close(done) + select { + case done <- c.closer.Close(): + case <-ctx.Done(): + } + }() + select { + case err := <-done: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/go/ioutil/timeout_closer_test.go b/go/ioutil/timeout_closer_test.go new file mode 100644 index 00000000000..9aabe307c85 --- /dev/null +++ b/go/ioutil/timeout_closer_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type hangCloser struct { + hang bool +} + +func (c hangCloser) Close() error { + if c.hang { + ch := make(chan bool) + ch <- true // hang forever + } + return nil +} + +func TestTimeoutCloser(t *testing.T) { + ctx := context.Background() + { + closer := NewTimeoutCloser(ctx, &hangCloser{hang: false}, time.Second) + err := closer.Close() + require.NoError(t, err) + } + { + closer := NewTimeoutCloser(ctx, &hangCloser{hang: true}, time.Second) + err := closer.Close() + require.Error(t, err) + assert.ErrorIs(t, err, context.DeadlineExceeded) + } +} diff --git a/go/json2/unmarshal.go b/go/json2/unmarshal.go index 4f2def0473e..e382b8ad47a 100644 --- a/go/json2/unmarshal.go +++ b/go/json2/unmarshal.go @@ -33,7 +33,8 @@ var carriageReturn = []byte("\n") // efficient and should not be used for high QPS operations. func Unmarshal(data []byte, v any) error { if pb, ok := v.(proto.Message); ok { - return annotate(data, protojson.Unmarshal(data, pb)) + opts := protojson.UnmarshalOptions{DiscardUnknown: true} + return annotate(data, opts.Unmarshal(data, pb)) } return annotate(data, json.Unmarshal(data, v)) } diff --git a/go/maps2/maps.go b/go/maps2/maps.go new file mode 100644 index 00000000000..56191bea1a7 --- /dev/null +++ b/go/maps2/maps.go @@ -0,0 +1,37 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package maps2 + +// Keys returns the keys of the map m. +// The keys will be in an indeterminate order. +func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} + +// Values returns the values of the map m. +// The values will be in an indeterminate order. +func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) + for _, v := range m { + r = append(r, v) + } + return r +} diff --git a/go/mathutil/equivalence_relation.go b/go/mathutil/equivalence_relation.go new file mode 100644 index 00000000000..d820d943194 --- /dev/null +++ b/go/mathutil/equivalence_relation.go @@ -0,0 +1,145 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mathutil + +import ( + "fmt" + "sort" +) + +type UnknownElementError struct { + element string +} + +func (e *UnknownElementError) Error() string { + return fmt.Sprintf("unknown element %s", e.element) +} + +type UnknownClassError struct { + class int +} + +func (e *UnknownClassError) Error() string { + return fmt.Sprintf("unknown class %d", e.class) +} + +// EquivalenceRelation implements a mathematical equivalence relation. +// Elements in this set are named, ie identified by strings. +// Elements are potentially grouped together in an equivalence relation. Each element belongs in exactly one +// relation, and each relation has 1 or more elements. +// If a,b are in the same relation, and if b,c are in the same relation, it follows that a,c are in the same relation. +// therefore two different entity relations cannot have any shared elements. +// Functions of this struct are not thread safe. +type EquivalenceRelation struct { + elementClassMap map[string]int + classElementsMap map[int]([]string) + + classCounter int +} + +func NewEquivalenceRelation() *EquivalenceRelation { + return &EquivalenceRelation{ + elementClassMap: make(map[string]int), + classElementsMap: make(map[int][]string), + } +} + +// Add adds a single element to the set. The element is associated with its own unique class +func (r *EquivalenceRelation) Add(element string) { + if _, ok := r.elementClassMap[element]; ok { + // element already exists + return + } + r.elementClassMap[element] = r.classCounter + r.classElementsMap[r.classCounter] = []string{element} + r.classCounter++ // Set the grounds for next element +} + +// AddAll adds multiple elements to the set. Each element is associated with its own unique class +func (r *EquivalenceRelation) AddAll(elements []string) { + for _, element := range elements { + r.Add(element) + } +} + +// ElementClass returns the class id for the given element, or errors if the element is unknown +func (r *EquivalenceRelation) ElementClass(element string) (int, error) { + class, ok := r.elementClassMap[element] + if !ok { + return 0, &UnknownElementError{element: element} + } + return class, nil +} + +// Declare two elements to be associated in the same class. If they're already in the same class, nothing is done. +// Otherwise, this merges their two classes into one. Specifically, the classes are merged into the lower-valued +// class of the two, ie the "earlier" class of the two, and the "later" class is erased. +func (r *EquivalenceRelation) Relate(element1, element2 string) (int, error) { + class1, err := r.ElementClass(element1) + if err != nil { + return class1, err + } + class2, err := r.ElementClass(element2) + if err != nil { + return class1, err + } + if class1 == class2 { + // already associated + return class1, nil + } + // We deterministically merge into the class with the lower Id + if class1 > class2 { + class1, class2 = class2, class1 + } + r.classElementsMap[class1] = append(r.classElementsMap[class1], r.classElementsMap[class2]...) + for _, element := range r.classElementsMap[class2] { + r.elementClassMap[element] = class1 + } + delete(r.classElementsMap, class2) + + return class1, nil +} + +// Related returns true when both elements are in the same equivalence class. An error is returned if +// either element is unknown +func (r *EquivalenceRelation) Related(element1, element2 string) (bool, error) { + class1, err := r.ElementClass(element1) + if err != nil { + return false, err + } + class2, err := r.ElementClass(element2) + if err != nil { + return false, err + } + return class1 == class2, nil +} + +// OrderedClasses returns the list of classes, increasing +func (r *EquivalenceRelation) OrderedClasses() []int { + // The classes are the map's keys. + classes := make([]int, 0, len(r.classElementsMap)) + for class := range r.classElementsMap { + classes = append(classes, class) + } + sort.Ints(classes) + return classes +} + +// Map returns the complete map of classes to list of elements. +func (r *EquivalenceRelation) Map() map[int]([]string) { + return r.classElementsMap +} diff --git a/go/mathutil/equivalence_relation_test.go b/go/mathutil/equivalence_relation_test.go new file mode 100644 index 00000000000..3873562a080 --- /dev/null +++ b/go/mathutil/equivalence_relation_test.go @@ -0,0 +1,176 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mathutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEquivalenceRelation(t *testing.T) { + initialElements := []string{"a", "b", "c", "d", "e", "f"} + trivialExpect := map[int]([]string){ + 0: []string{"a"}, + 1: []string{"b"}, + 2: []string{"c"}, + 3: []string{"d"}, + 4: []string{"e"}, + 5: []string{"f"}, + } + trivialExpectClasses := []int{0, 1, 2, 3, 4, 5} + tt := []struct { + name string + relations []string + expect map[int]([]string) + classes []int + }{ + { + name: "empty", + expect: trivialExpect, + classes: trivialExpectClasses, + }, + { + name: "reflective", + relations: []string{"aa"}, + expect: trivialExpect, + classes: trivialExpectClasses, + }, + { + name: "reflective2", + relations: []string{"aa", "bb", "ff", "dd"}, + expect: trivialExpect, + classes: trivialExpectClasses, + }, + { + name: "relate", + relations: []string{"ab"}, + expect: map[int]([]string){ + 0: []string{"a", "b"}, + 2: []string{"c"}, + 3: []string{"d"}, + 4: []string{"e"}, + 5: []string{"f"}, + }, + classes: []int{0, 2, 3, 4, 5}, + }, + { + name: "relate ef", + relations: []string{"ef"}, + expect: map[int]([]string){ + 0: []string{"a"}, + 1: []string{"b"}, + 2: []string{"c"}, + 3: []string{"d"}, + 4: []string{"e", "f"}, + }, + classes: []int{0, 1, 2, 3, 4}, + }, + { + name: "relate, reverse", + relations: []string{"ba"}, + expect: map[int]([]string){ + 0: []string{"a", "b"}, + 2: []string{"c"}, + 3: []string{"d"}, + 4: []string{"e"}, + 5: []string{"f"}, + }, + classes: []int{0, 2, 3, 4, 5}, + }, + { + name: "relate, relate reverse, reflective", + relations: []string{"ba", "ab", "aa"}, + expect: map[int]([]string){ + 0: []string{"a", "b"}, + 2: []string{"c"}, + 3: []string{"d"}, + 4: []string{"e"}, + 5: []string{"f"}, + }, + classes: []int{0, 2, 3, 4, 5}, + }, + { + name: "relate, ab cd", + relations: []string{"ba", "cd"}, + expect: map[int]([]string){ + 0: []string{"a", "b"}, + 2: []string{"c", "d"}, + 4: []string{"e"}, + 5: []string{"f"}, + }, + classes: []int{0, 2, 4, 5}, + }, + { + name: "relate, multi", + relations: []string{"ba", "cd", "ef"}, + expect: map[int]([]string){ + 0: []string{"a", "b"}, + 2: []string{"c", "d"}, + 4: []string{"e", "f"}, + }, + classes: []int{0, 2, 4}, + }, + { + name: "relate, multi join", + relations: []string{"ba", "cb", "fc"}, + expect: map[int]([]string){ + 0: []string{"a", "b", "c", "f"}, + 3: []string{"d"}, + 4: []string{"e"}, + }, + classes: []int{0, 3, 4}, + }, + { + name: "relate, multi, join", + relations: []string{"ba", "cd", "ef", "eb"}, + expect: map[int]([]string){ + 0: []string{"a", "b", "e", "f"}, + 2: []string{"c", "d"}, + }, + classes: []int{0, 2}, + }, + { + name: "relate, multi, join all", + relations: []string{"ba", "cd", "ef", "eb", "fc"}, + expect: map[int]([]string){ + 0: []string{"a", "b", "e", "f", "c", "d"}, + }, + classes: []int{0}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + r := NewEquivalenceRelation() + r.AddAll(initialElements) + trivialM := r.Map() + assert.Equal(t, trivialExpect, trivialM) + require.Equal(t, len(initialElements), len(trivialM)) + + for _, relation := range tc.relations { + require.Equal(t, 2, len(relation)) + _, err := r.Relate(relation[0:1], relation[1:2]) + assert.NoError(t, err) + } + m := r.Map() + assert.Equal(t, tc.expect, m) + assert.Equal(t, tc.classes, r.OrderedClasses()) + }) + } +} diff --git a/go/mysql/auth_server.go b/go/mysql/auth_server.go index 18fa2ac2785..f4bda2655a5 100644 --- a/go/mysql/auth_server.go +++ b/go/mysql/auth_server.go @@ -26,6 +26,7 @@ import ( "net" "sync" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -427,7 +428,7 @@ func (n *mysqlNativePasswordAuthMethod) AllowClearTextWithoutTLS() bool { func (n *mysqlNativePasswordAuthMethod) HandleAuthPluginData(conn *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) { if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } salt := serverAuthPluginData[:len(serverAuthPluginData)-1] @@ -519,7 +520,7 @@ func (n *mysqlCachingSha2AuthMethod) AllowClearTextWithoutTLS() bool { func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) { if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } salt := serverAuthPluginData[:len(serverAuthPluginData)-1] @@ -531,7 +532,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, switch cacheState { case AuthRejected: - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) case AuthAccepted: // We need to write a more data packet to indicate the // handshake completed properly. This will be followed @@ -546,7 +547,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, return result, nil case AuthNeedMoreData: if !c.TLSEnabled() && !c.IsUnixSocket() { - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } data, pos := c.startEphemeralPacketWithHeader(2) @@ -562,7 +563,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, return n.storage.UserEntryWithPassword(c, user, password, remoteAddr) default: // Somehow someone returned an unknown state, let's error with access denied. - return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } } diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 4528ee5dbf4..28ed19fd9c5 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -45,7 +45,7 @@ func TestValidCert(t *testing.T) { authServer := newAuthServerClientCert() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -114,7 +114,7 @@ func TestNoCert(t *testing.T) { authServer := newAuthServerClientCert() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index d01bab23cdd..fae886039f0 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -29,6 +29,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" @@ -179,7 +181,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo a.mu.Unlock() if !ok { - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { @@ -188,7 +190,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo return &StaticUserData{entry.UserData, entry.Groups}, nil } } - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // UserEntryWithHash implements password lookup based on a @@ -199,14 +201,14 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin a.mu.Unlock() if !ok { - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { if entry.MysqlNativePassword != "" { hash, err := DecodeMysqlNativePasswordHex(entry.MysqlNativePassword) if err != nil { - return &StaticUserData{entry.UserData, entry.Groups}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{entry.UserData, entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } isPass := VerifyHashedMysqlNativePassword(authResponse, salt, hash) @@ -221,7 +223,7 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin } } } - return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // UserEntryWithCacheHash implements password lookup based on a @@ -232,7 +234,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user a.mu.Unlock() if !ok { - return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } for _, entry := range entries { @@ -243,7 +245,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user return &StaticUserData{entry.UserData, entry.Groups}, AuthAccepted, nil } } - return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user) + return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user) } // AuthMethods returns the AuthMethod instances this auth server can handle. diff --git a/go/mysql/auth_server_static_flaky_test.go b/go/mysql/auth_server_static_flaky_test.go index 52e8fee8ab4..12ae74e0d60 100644 --- a/go/mysql/auth_server_static_flaky_test.go +++ b/go/mysql/auth_server_static_flaky_test.go @@ -126,9 +126,7 @@ func TestStaticConfigHUP(t *testing.T) { mu.Lock() defer mu.Unlock() // delete registered Auth server - for auth := range authServers { - delete(authServers, auth) - } + clear(authServers) } func TestStaticConfigHUPWithRotation(t *testing.T) { diff --git a/go/mysql/binlog_event_json.go b/go/mysql/binlog/binlog_json.go similarity index 50% rename from go/mysql/binlog_event_json.go rename to go/mysql/binlog/binlog_json.go index 82b53311c0f..2a0aba3163a 100644 --- a/go/mysql/binlog_event_json.go +++ b/go/mysql/binlog/binlog_json.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,26 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package binlog import ( "encoding/binary" "fmt" "math" + "strconv" - "vitess.io/vitess/go/vt/log" - - "github.com/spyzhov/ajson" - + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/format" + "vitess.io/vitess/go/mysql/json" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) /* - References: -* C source of mysql json data type implementation -https://fossies.org/linux/mysql/sql/json_binary.cc +* Docs for MySQL JSON binary format: +https://dev.mysql.com/doc/dev/mysql-server/latest/json__binary_8h.html * nice description of MySQL's json representation https://lafengnan.gitbooks.io/blog/content/mysql/chapter2.html @@ -41,127 +42,24 @@ https://lafengnan.gitbooks.io/blog/content/mysql/chapter2.html * java/python connector links: useful for test cases and reverse engineering https://github.com/shyiko/mysql-binlog-connector-java/pull/119/files https://github.com/noplay/python-mysql-replication/blob/175df28cc8b536a68522ff9b09dc5440adad6094/pymysqlreplication/packet.py - */ -// region debug-only -// TODO remove once the json refactor is tested live -var jsonDebug = false - -func jlog(tpl string, vals ...any) { - if !jsonDebug { - return - } - log.Infof("JSON:"+tpl+"\n", vals...) - _ = printASCIIBytes -} - -func printASCIIBytes(data []byte) { - if !jsonDebug { - return - } - s := "" - for _, c := range data { - if c < 127 && c > 32 { - s += fmt.Sprintf("%c ", c) - } else { - s += fmt.Sprintf("%02d ", c) - } - } - log.Infof("[%s]", s) -} - -// only used for logging/debugging -var jsonTypeToName = map[uint]string{ - jsonSmallObject: "sObject", - jsonLargeObject: "lObject", - jsonSmallArray: "sArray", - jsonLargeArray: "lArray", - jsonLiteral: "literal", - jsonInt16: "int16", - jsonUint16: "uint16", - jsonInt32: "int32", - jsonUint32: "uint32", - jsonInt64: "int64", - jsonUint64: "uint64", - jsonDouble: "double", //0x0b - jsonString: "string", //0x0c a utf8mb4 string - jsonOpaque: "opaque", //0x0f "custom" data -} - -func jsonDataTypeToString(typ uint) string { - sType, ok := jsonTypeToName[typ] - if !ok { - return "undefined" - } - return sType -} - -//endregion - -// provides the single API function, used to convert json from binary format used in binlogs to a string representation -func getJSONValue(data []byte) (string, error) { - var ast *ajson.Node +// ParseBinaryJSON provides the parsing function from the mysql binary json +// representation to a JSON value instance. +func ParseBinaryJSON(data []byte) (*json.Value, error) { var err error + var node *json.Value if len(data) == 0 { - ast = ajson.NullNode("") + node = json.ValueNull } else { - ast, err = binlogJSON.parse(data) + node, err = binparserNode(jsonDataType(data[0]), data, 1) if err != nil { - return "", err + return nil, err } } - bytes, err := ajson.Marshal(ast) - if err != nil { - return "", err - } - return string(bytes), nil -} - -var binlogJSON *BinlogJSON - -func init() { - binlogJSON = &BinlogJSON{ - plugins: make(map[jsonDataType]jsonPlugin), - } -} - -//region plugin manager - -// BinlogJSON contains the plugins for all json types and methods for parsing the -// binary json representation of a specific type from the binlog -type BinlogJSON struct { - plugins map[jsonDataType]jsonPlugin -} - -// parse decodes a value from the binlog -func (jh *BinlogJSON) parse(data []byte) (node *ajson.Node, err error) { - // pos keeps track of the offset of the current node being parsed - pos := 0 - typ := data[pos] - jlog("Top level object is type %s\n", jsonDataTypeToString(uint(typ))) - pos++ - return jh.getNode(jsonDataType(typ), data, pos) -} - -// each plugin registers itself in init()s -func (jh *BinlogJSON) register(typ jsonDataType, Plugin jsonPlugin) { - jh.plugins[typ] = Plugin -} - -// gets the node at this position -func (jh *BinlogJSON) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { - Plugin := jh.plugins[typ] - if Plugin == nil { - return nil, fmt.Errorf("Plugin not found for type %d", typ) - } - return Plugin.getNode(typ, data, pos) + return node, nil } -//endregion - -//region enums - // jsonDataType has the values used in the mysql json binary representation to denote types. // We have string, literal(true/false/null), number, object or array types. // large object => doc size > 64K: you get pointers instead of inline values. @@ -195,10 +93,6 @@ const ( jsonFalseLiteral = '\x02' ) -//endregion - -//region util funcs - // in objects and arrays some values are inlined, other types have offsets into the raw data. // literals (true/false/null) and 16bit integers are always inlined. // for large documents 32bit integers are also inlined. @@ -235,9 +129,7 @@ func readInt(data []byte, pos int, large bool) (int, int) { } // readVariableLength implements the logic to decode the length -// of an arbitrarily long string as implemented by the mysql server -// https://github.com/mysql/mysql-server/blob/5.7/sql/json_binary.cc#L234 -// https://github.com/mysql/mysql-server/blob/8.0/sql/json_binary.cc#L283 +// of an arbitrarily long string. // readVariableLength also returns the new position (by advancing the position by the number of bytes read). func readVariableLength(data []byte, pos int) (int, int) { var bb byte @@ -257,15 +149,43 @@ func readVariableLength(data []byte, pos int) (int, int) { return length, pos } +var binparserFn [16]func(dataType jsonDataType, data []byte, pos int) (*json.Value, error) + +func init() { + binparserFn[jsonSmallObject] = binparserObject + binparserFn[jsonLargeObject] = binparserObject + binparserFn[jsonSmallArray] = binparserArray + binparserFn[jsonLargeArray] = binparserArray + binparserFn[jsonLiteral] = binparserLiteral + binparserFn[jsonInt16] = binparserInt + binparserFn[jsonUint16] = binparserInt + binparserFn[jsonInt32] = binparserInt + binparserFn[jsonUint32] = binparserInt + binparserFn[jsonInt64] = binparserInt + binparserFn[jsonUint64] = binparserInt + binparserFn[jsonDouble] = binparserInt + binparserFn[jsonString] = binparserString + binparserFn[jsonOpaque] = binparserOpaque +} + +func binparserNode(typ jsonDataType, data []byte, pos int) (node *json.Value, err error) { + if int(typ) < len(binparserFn) { + if p := binparserFn[typ]; p != nil { + return p(typ, data, pos) + } + } + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid json type: %d", typ) +} + // getElem returns the json value found inside json objects and arrays at the provided position -func getElem(data []byte, pos int, large bool) (*ajson.Node, int, error) { - var elem *ajson.Node +func binparserElement(data []byte, pos int, large bool) (*json.Value, int, error) { + var elem *json.Value var err error var offset int typ := jsonDataType(data[pos]) pos++ if isInline(typ, large) { - elem, err = binlogJSON.getNode(typ, data, pos) + elem, err = binparserNode(typ, data, pos) if err != nil { return nil, 0, err } @@ -277,12 +197,11 @@ func getElem(data []byte, pos int, large bool) (*ajson.Node, int, error) { } else { offset, pos = readInt(data, pos, large) if offset >= len(data) { // consistency check, should only come here is there is a bug in the code - log.Errorf("unable to decode element") return nil, 0, fmt.Errorf("unable to decode element: %+v", data) } newData := data[offset:] //newPos ignored because this is an offset into the "extra" section of the buffer - elem, err = binlogJSON.getNode(typ, newData, 1) + elem, err = binparserNode(typ, newData, 1) if err != nil { return nil, 0, err } @@ -292,142 +211,70 @@ func getElem(data []byte, pos int, large bool) (*ajson.Node, int, error) { //endregion -// json sub-type interface -// one plugin for each sub-type, plugins are stateless and initialized on load via individual init() functions -type jsonPlugin interface { - getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) +var binaryIntSizes = map[jsonDataType]int{ + jsonUint64: 8, + jsonInt64: 8, + jsonUint32: 4, + jsonInt32: 4, + jsonUint16: 2, + jsonInt16: 2, + jsonDouble: 8, } -type jsonPluginInfo struct { - name string - types []jsonDataType -} - -//region int plugin - -func init() { - newIntPlugin() -} - -type intPlugin struct { - info *jsonPluginInfo - sizes map[jsonDataType]int -} - -var _ jsonPlugin = (*intPlugin)(nil) - -func (ih intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float64) { +func binparserInt(typ jsonDataType, data []byte, pos int) (*json.Value, error) { var val uint64 - var val2 float64 - size := ih.sizes[typ] + size := binaryIntSizes[typ] for i := 0; i < size; i++ { val = val + uint64(data[pos+i])<<(8*i) } + var s string + var n json.NumberType switch typ { case jsonInt16: - val2 = float64(int16(val)) + s = strconv.FormatInt(int64(int16(val)), 10) + n = json.NumberTypeSigned case jsonUint16: - val2 = float64(uint16(val)) + s = strconv.FormatUint(uint64(uint16(val)), 10) + n = json.NumberTypeUnsigned case jsonInt32: - val2 = float64(int32(val)) + s = strconv.FormatInt(int64(int32(val)), 10) + n = json.NumberTypeSigned case jsonUint32: - val2 = float64(uint32(val)) + s = strconv.FormatUint(uint64(uint32(val)), 10) + n = json.NumberTypeUnsigned case jsonInt64: - val2 = float64(int64(val)) + s = strconv.FormatInt(int64(val), 10) + n = json.NumberTypeSigned case jsonUint64: - val2 = float64(val) + s = strconv.FormatUint(val, 10) + n = json.NumberTypeUnsigned case jsonDouble: - val2 = math.Float64frombits(val) - } - return val2 -} - -func (ih intPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { - val := ih.getVal(typ, data, pos) - node = ajson.NumericNode("", val) - return node, nil -} - -func newIntPlugin() *intPlugin { - ih := &intPlugin{ - info: &jsonPluginInfo{ - name: "Int", - types: []jsonDataType{jsonInt64, jsonInt32, jsonInt16, jsonUint16, jsonUint32, jsonUint64, jsonDouble}, - }, - sizes: make(map[jsonDataType]int), - } - ih.sizes = map[jsonDataType]int{ - jsonUint64: 8, - jsonInt64: 8, - jsonUint32: 4, - jsonInt32: 4, - jsonUint16: 2, - jsonInt16: 2, - jsonDouble: 8, - } - for _, typ := range ih.info.types { - binlogJSON.register(typ, ih) + s = hack.String(format.FormatFloat(math.Float64frombits(val))) + n = json.NumberTypeFloat + default: + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid int type: %d", typ) } - return ih + return json.NewNumber(s, n), nil } -//endregion - -//region literal plugin - -func init() { - newLiteralPlugin() -} - -type literalPlugin struct { - info *jsonPluginInfo -} - -var _ jsonPlugin = (*literalPlugin)(nil) - -func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { +func binparserLiteral(_ jsonDataType, data []byte, pos int) (node *json.Value, err error) { val := jsonDataLiteral(data[pos]) switch val { case jsonNullLiteral: - node = ajson.NullNode("") + node = json.ValueNull case jsonTrueLiteral: - node = ajson.BoolNode("", true) + node = json.ValueTrue case jsonFalseLiteral: - node = ajson.BoolNode("", false) + node = json.ValueFalse default: - return nil, fmt.Errorf("unknown literal value %v", val) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unknown literal value %v", val) } return node, nil } -func newLiteralPlugin() *literalPlugin { - lh := &literalPlugin{ - info: &jsonPluginInfo{ - name: "Literal", - types: []jsonDataType{jsonLiteral}, - }, - } - binlogJSON.register(jsonLiteral, lh) - return lh -} - -//endregion - -//region opaque plugin - -func init() { - newOpaquePlugin() -} - -type opaquePlugin struct { - info *jsonPluginInfo -} - -var _ jsonPlugin = (*opaquePlugin)(nil) - // other types are stored as catch-all opaque types: documentation on these is scarce. // we currently know about (and support) date/time/datetime/decimal. -func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { +func binparserOpaque(_ jsonDataType, data []byte, pos int) (node *json.Value, err error) { dataType := data[pos] start := 3 // account for length of stored value end := start + 8 // all currently supported opaque data types are 8 bytes in size @@ -440,8 +287,8 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj month := yearMonth % 13 day := (value >> 17) & 0x1f // 5 bits starting at 17th dateString := fmt.Sprintf("%04d-%02d-%02d", year, month, day) - node = ajson.StringNode("", dateString) - case TypeTime: + node = json.NewDate(dateString) + case TypeTime2, TypeTime: raw := binary.LittleEndian.Uint64(data[start:end]) value := raw >> 24 hour := (value >> 12) & 0x03ff // 10 bits starting at 12th @@ -449,8 +296,8 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj second := value & 0x3f // 6 bits starting at 0th microSeconds := raw & 0xffffff // 24 lower bits timeString := fmt.Sprintf("%02d:%02d:%02d.%06d", hour, minute, second, microSeconds) - node = ajson.StringNode("", timeString) - case TypeDateTime: + node = json.NewTime(timeString) + case TypeDateTime2, TypeDateTime, TypeTimestamp2, TypeTimestamp: raw := binary.LittleEndian.Uint64(data[start:end]) value := raw >> 24 yearMonth := (value >> 22) & 0x01ffff // 17 bits starting at 22nd @@ -462,8 +309,8 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj second := value & 0x3f // 6 bits starting at 0th microSeconds := raw & 0xffffff // 24 lower bits timeString := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, microSeconds) - node = ajson.StringNode("", timeString) - case TypeNewDecimal: + node = json.NewDateTime(timeString) + case TypeDecimal, TypeNewDecimal: decimalData := data[start:end] precision := decimalData[0] scale := decimalData[1] @@ -472,138 +319,51 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj if err != nil { return nil, err } - float, err := val.ToFloat64() - if err != nil { - return nil, err - } - node = ajson.NumericNode("", float) + node = json.NewNumber(val.ToString(), json.NumberTypeDecimal) + case TypeVarchar, TypeVarString, TypeString, TypeBlob, TypeTinyBlob, TypeMediumBlob, TypeLongBlob: + node = json.NewBlob(string(data[pos+1:])) + case TypeBit: + node = json.NewBit(string(data[pos+1:])) default: - return nil, fmt.Errorf("opaque type %d is not supported yet, data %v", dataType, data[2:]) + node = json.NewOpaqueValue(string(data[pos+1:])) } return node, nil } -func newOpaquePlugin() *opaquePlugin { - oh := &opaquePlugin{ - info: &jsonPluginInfo{ - name: "Opaque", - types: []jsonDataType{jsonOpaque}, - }, - } - binlogJSON.register(jsonOpaque, oh) - return oh -} - -//endregion - -//region string plugin - -func init() { - newStringPlugin() -} - -type stringPlugin struct { - info *jsonPluginInfo -} - -var _ jsonPlugin = (*stringPlugin)(nil) - -func (sh stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { +func binparserString(_ jsonDataType, data []byte, pos int) (node *json.Value, err error) { size, pos := readVariableLength(data, pos) - node = ajson.StringNode("", string(data[pos:pos+size])) - - return node, nil -} - -func newStringPlugin() *stringPlugin { - sh := &stringPlugin{ - info: &jsonPluginInfo{ - name: "String", - types: []jsonDataType{jsonString}, - }, - } - binlogJSON.register(jsonString, sh) - return sh -} - -//endregion - -//region array plugin - -func init() { - newArrayPlugin() -} - -type arrayPlugin struct { - info *jsonPluginInfo + return json.NewString(string(data[pos : pos+size])), nil } -var _ jsonPlugin = (*arrayPlugin)(nil) - // arrays are stored thus: // | type_identifier(one of [2,3]) | elem count | obj size | list of offsets+lengths of values | actual values | -func (ah arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { - jlog("JSON Array %s, len %d", jsonDataTypeToString(uint(typ)), len(data)) - var nodes []*ajson.Node - var elem *ajson.Node - var elementCount, size int +func binparserArray(typ jsonDataType, data []byte, pos int) (node *json.Value, err error) { + var nodes []*json.Value + var elem *json.Value + var elementCount int large := typ == jsonLargeArray elementCount, pos = readInt(data, pos, large) - jlog("Array(%t): elem count: %d\n", large, elementCount) - size, pos = readInt(data, pos, large) - jlog("Array(%t): elem count: %d, size:%d\n", large, elementCount, size) + _, pos = readInt(data, pos, large) for i := 0; i < elementCount; i++ { - elem, pos, err = getElem(data, pos, large) + elem, pos, err = binparserElement(data, pos, large) if err != nil { return nil, err } nodes = append(nodes, elem) - jlog("Index is %d:%s", i, jsonDataTypeToString(uint(typ))) } - node = ajson.ArrayNode("", nodes) + node = json.NewArray(nodes) return node, nil } -func newArrayPlugin() *arrayPlugin { - ah := &arrayPlugin{ - info: &jsonPluginInfo{ - name: "Array", - types: []jsonDataType{jsonSmallArray, jsonLargeArray}, - }, - } - binlogJSON.register(jsonSmallArray, ah) - binlogJSON.register(jsonLargeArray, ah) - return ah -} - -//endregion - -//region object plugin - -func init() { - newObjectPlugin() -} - -type objectPlugin struct { - info *jsonPluginInfo -} - -var _ jsonPlugin = (*objectPlugin)(nil) - // objects are stored thus: // | type_identifier(0/1) | elem count | obj size | list of offsets+lengths of keys | list of offsets+lengths of values | actual keys | actual values | -func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { - jlog("JSON Type is %s, len %d", jsonDataTypeToString(uint(typ)), len(data)) - +func binparserObject(typ jsonDataType, data []byte, pos int) (node *json.Value, err error) { // "large" decides number of bytes used to specify element count and total object size: 4 bytes for large, 2 for small var large = typ == jsonLargeObject - var elementCount int // total number of elements (== keys) in this object map. (element can be another object: recursively handled) - var size int // total size of object elementCount, pos = readInt(data, pos, large) - size, pos = readInt(data, pos, large) - jlog("Object: elem count: %d, size %d\n", elementCount, size) + _, pos = readInt(data, pos, large) keys := make([]string, elementCount) // stores all the keys in this object for i := 0; i < elementCount; i++ { @@ -615,40 +375,22 @@ func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj keyOffsetStart := keyOffset + 1 // check that offsets are not out of bounds (can happen only if there is a bug in the parsing code) if keyOffsetStart >= len(data) || keyOffsetStart+keyLength > len(data) { - log.Errorf("unable to decode object elements") return nil, fmt.Errorf("unable to decode object elements: %v", data) } keys[i] = string(data[keyOffsetStart : keyOffsetStart+keyLength]) } - jlog("Object keys: %+v", keys) - object := make(map[string]*ajson.Node) - var elem *ajson.Node + var object json.Object + var elem *json.Value // get the value for each key for i := 0; i < elementCount; i++ { - elem, pos, err = getElem(data, pos, large) + elem, pos, err = binparserElement(data, pos, large) if err != nil { return nil, err } - object[keys[i]] = elem - jlog("Key is %s:%s", keys[i], jsonDataTypeToString(uint(typ))) + object.Add(keys[i], elem) } - node = ajson.ObjectNode("", object) - return node, nil -} - -func newObjectPlugin() *objectPlugin { - oh := &objectPlugin{ - info: &jsonPluginInfo{ - name: "Object", - types: []jsonDataType{jsonSmallObject, jsonLargeObject}, - }, - } - binlogJSON.register(jsonSmallObject, oh) - binlogJSON.register(jsonLargeObject, oh) - return oh + return json.NewObject(object), nil } - -//endregion diff --git a/go/mysql/binlog/binlog_json_test.go b/go/mysql/binlog/binlog_json_test.go new file mode 100644 index 00000000000..5652b58567e --- /dev/null +++ b/go/mysql/binlog/binlog_json_test.go @@ -0,0 +1,469 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binlog + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/json" +) + +func jsonObject(entries map[string]*json.Value) *json.Value { + var obj json.Object + for k, v := range entries { + obj.Set(k, v, json.Set) + } + return json.NewObject(obj) +} + +func TestBinaryJSON(t *testing.T) { + testcases := []struct { + name string + data []byte + expected *json.Value + }{ + { + name: "null", + data: []byte{}, + expected: json.ValueNull, + }, + { + name: `object {"a": "b"}`, + data: []byte{0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98}, + // expected: &json.Value{t: json.TypeObject, o: json.Object{kvs: []json.kv{{k: "a", v: &json.Value{t: json.TypeString, s: "b"}}}}}, + expected: jsonObject(map[string]*json.Value{"a": json.NewString("b")}), + }, + { + name: `object {"a": 2}`, + data: []byte{0, 1, 0, 12, 0, 11, 0, 1, 0, 5, 2, 0, 97}, + expected: jsonObject(map[string]*json.Value{"a": json.NewNumber("2", json.NumberTypeSigned)}), + }, + { + name: `nested object {"asdf":{"foo":123}}`, + data: []byte{0, 1, 0, 29, 0, 11, 0, 4, 0, 0, 15, 0, 97, 115, 100, 102, 1, 0, 14, 0, 11, 0, 3, 0, 5, 123, 0, 102, 111, 111}, + expected: jsonObject(map[string]*json.Value{"asdf": jsonObject(map[string]*json.Value{"foo": json.NewNumber("123", json.NumberTypeSigned)})}), + }, + { + name: `array [1,2]`, + data: []byte{2, 2, 0, 10, 0, 5, 1, 0, 5, 2, 0}, + expected: json.NewArray([]*json.Value{json.NewNumber("1", json.NumberTypeSigned), json.NewNumber("2", json.NumberTypeSigned)}), + }, + { + name: `complex {"a":"b","c":"d","ab":"abc","bc":["x","y"]}`, + data: []byte{0, 4, 0, 60, 0, 32, 0, 1, 0, 33, 0, 1, 0, 34, 0, 2, 0, 36, 0, 2, 0, 12, 38, 0, 12, 40, 0, 12, 42, 0, 2, 46, 0, 97, 99, 97, 98, 98, 99, 1, 98, 1, 100, 3, 97, 98, 99, 2, 0, 14, 0, 12, 10, 0, 12, 12, 0, 1, 120, 1, 121}, + expected: jsonObject(map[string]*json.Value{ + "a": json.NewString("b"), + "c": json.NewString("d"), + "ab": json.NewString("abc"), + "bc": json.NewArray([]*json.Value{json.NewString("x"), json.NewString("y")}), + }), + }, + { + name: `array ["here"]`, + data: []byte{2, 1, 0, 37, 0, 12, 8, 0, 0, 4, 104, 101, 114, 101}, + expected: json.NewArray([]*json.Value{json.NewString("here")}), + }, + { + name: `array ["here",["I","am"],"!!!"]`, + data: []byte{2, 3, 0, 37, 0, 12, 13, 0, 2, 18, 0, 12, 33, 0, 4, 104, 101, 114, 101, 2, 0, 15, 0, 12, 10, 0, 12, 12, 0, 1, 73, 2, 97, 109, 3, 33, 33, 33}, + expected: json.NewArray([]*json.Value{ + json.NewString("here"), + json.NewArray([]*json.Value{json.NewString("I"), json.NewString("am")}), + json.NewString("!!!"), + }), + }, + { + name: `scalar "scalar string"`, + data: []byte{12, 13, 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, + expected: json.NewString("scalar string"), + }, + { + name: `object {"scopes":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}`, + data: []byte{0, 1, 0, 149, 0, 11, 0, 6, 0, 12, 17, 0, 115, 99, 111, 112, 101, 115, 130, 1, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 56, 65, 65, 65, 66, 103, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 84, 216, 142, 184}, + // expected: &json.Value{t: json.TypeObject, o: json.Object{kvs: []json.kv{{k: "scopes", v: &json.Value{t: json.TypeString, s: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}}}}}, + expected: jsonObject(map[string]*json.Value{ + "scopes": json.NewString("AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"), + }), + }, + { + name: `scalar "scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"`, + data: []byte{12, 130, 1, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, + expected: json.NewString("scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"), + }, + { + name: `true`, + data: []byte{4, 1}, + expected: json.ValueTrue, + }, + { + name: `false`, + data: []byte{4, 2}, + expected: json.ValueFalse, + }, + { + name: `null`, + data: []byte{4, 0}, + expected: json.ValueNull, + }, + { + name: `-1`, + data: []byte{5, 255, 255}, + expected: json.NewNumber("-1", json.NumberTypeSigned), + }, + { + name: `1`, + data: []byte{6, 1, 0}, + expected: json.NewNumber("1", json.NumberTypeUnsigned), + }, + { + name: `32767`, + data: []byte{5, 255, 127}, + expected: json.NewNumber("32767", json.NumberTypeSigned), + }, + { + name: `32768`, + data: []byte{7, 0, 128, 0, 0}, + expected: json.NewNumber("32768", json.NumberTypeSigned), + }, + { + name: `-32769`, + data: []byte{7, 255, 127, 255, 255}, + expected: json.NewNumber("-32769", json.NumberTypeSigned), + }, + { + name: `2147483647`, + data: []byte{7, 255, 255, 255, 127}, + expected: json.NewNumber("2147483647", json.NumberTypeSigned), + }, + { + name: `32768`, + data: []byte{8, 0, 128, 0, 0}, + expected: json.NewNumber("32768", json.NumberTypeUnsigned), + }, + { + name: `2147483648`, + data: []byte{9, 0, 0, 0, 128, 0, 0, 0, 0}, + expected: json.NewNumber("2147483648", json.NumberTypeSigned), + }, + { + name: `-2147483648`, + data: []byte{7, 0, 0, 0, 128}, + expected: json.NewNumber("-2147483648", json.NumberTypeSigned), + }, + { + name: `-2147483649`, + data: []byte{9, 255, 255, 255, 127, 255, 255, 255, 255}, + expected: json.NewNumber("-2147483649", json.NumberTypeSigned), + }, + { + name: `18446744073709551615`, + data: []byte{10, 255, 255, 255, 255, 255, 255, 255, 255}, + expected: json.NewNumber("18446744073709551615", json.NumberTypeUnsigned), + }, + { + name: `-9223372036854775808`, + data: []byte{9, 0, 0, 0, 0, 0, 0, 0, 128}, + expected: json.NewNumber("-9223372036854775808", json.NumberTypeSigned), + }, + { + name: `3.14159`, + data: []byte{11, 110, 134, 27, 240, 249, 33, 9, 64}, + expected: json.NewNumber("3.14159", json.NumberTypeFloat), + }, + { + name: `empty object {}`, + data: []byte{0, 0, 0, 4, 0}, + expected: json.NewObject(json.Object{}), + }, + { + name: `empty array []`, + data: []byte{2, 0, 0, 4, 0}, + expected: json.NewArray(nil), + }, + { + name: `datetime "2015-01-15 23:24:25.000000"`, + data: []byte{15, 12, 8, 0, 0, 0, 25, 118, 31, 149, 25}, + expected: json.NewDateTime("2015-01-15 23:24:25.000000"), + }, + { + name: `time "23:24:25.000000"`, + data: []byte{15, 11, 8, 0, 0, 0, 25, 118, 1, 0, 0}, + expected: json.NewTime("23:24:25.000000"), + }, + { + name: `time "23:24:25.120000"`, + data: []byte{15, 11, 8, 192, 212, 1, 25, 118, 1, 0, 0}, + expected: json.NewTime("23:24:25.120000"), + }, + { + name: `date "2015-01-15"`, + data: []byte{15, 10, 8, 0, 0, 0, 0, 0, 30, 149, 25}, + expected: json.NewDate("2015-01-15"), + }, + { + name: `decimal "123456789.1234"`, + data: []byte{15, 246, 8, 13, 4, 135, 91, 205, 21, 4, 210}, + expected: json.NewNumber("123456789.1234", json.NumberTypeDecimal), + }, + { + name: `bit literal [2 202 254]`, + data: []byte{15, 16, 2, 202, 254}, + expected: json.NewBit(string([]byte{2, 202, 254})), + }, + { + name: `opaque string [2 202 254]`, + data: []byte{15, 15, 2, 202, 254}, + expected: json.NewBlob(string([]byte{2, 202, 254})), + }, + { + name: `opaque blob [2 202 254]`, + data: []byte{15, 252, 2, 202, 254}, + expected: json.NewBlob(string([]byte{2, 202, 254})), + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + val, err := ParseBinaryJSON(tc.data) + require.NoError(t, err) + require.Equal(t, tc.expected, val) + }) + } +} + +func TestMarshalJSONToSQL(t *testing.T) { + testcases := []struct { + name string + data []byte + expected string + }{ + { + name: "null", + data: []byte{}, + expected: "CAST(_utf8mb4'null' as JSON)", + }, + { + name: `object {"a": "b"}`, + data: []byte{0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98}, + expected: `JSON_OBJECT(_utf8mb4'a', _utf8mb4'b')`, + }, + { + name: `object {"a": 2}`, + data: []byte{0, 1, 0, 12, 0, 11, 0, 1, 0, 5, 2, 0, 97}, + expected: "JSON_OBJECT(_utf8mb4'a', 2)", + }, + { + name: `nested object {"asdf":{"foo":123}}`, + data: []byte{0, 1, 0, 29, 0, 11, 0, 4, 0, 0, 15, 0, 97, 115, 100, 102, 1, 0, 14, 0, 11, 0, 3, 0, 5, 123, 0, 102, 111, 111}, + expected: `JSON_OBJECT(_utf8mb4'asdf', JSON_OBJECT(_utf8mb4'foo', 123))`, + }, + { + name: `array [1,2]`, + data: []byte{2, 2, 0, 10, 0, 5, 1, 0, 5, 2, 0}, + expected: "JSON_ARRAY(1, 2)", + }, + { + name: `complex {"a":"b","c":"d","ab":"abc","bc":["x","y"]}`, + data: []byte{0, 4, 0, 60, 0, 32, 0, 1, 0, 33, 0, 1, 0, 34, 0, 2, 0, 36, 0, 2, 0, 12, 38, 0, 12, 40, 0, 12, 42, 0, 2, 46, 0, 97, 99, 97, 98, 98, 99, 1, 98, 1, 100, 3, 97, 98, 99, 2, 0, 14, 0, 12, 10, 0, 12, 12, 0, 1, 120, 1, 121}, + expected: "JSON_OBJECT(_utf8mb4'a', _utf8mb4'b', _utf8mb4'ab', _utf8mb4'abc', _utf8mb4'bc', JSON_ARRAY(_utf8mb4'x', _utf8mb4'y'), _utf8mb4'c', _utf8mb4'd')", + }, + { + name: `array ["here"]`, + data: []byte{2, 1, 0, 37, 0, 12, 8, 0, 0, 4, 104, 101, 114, 101}, + expected: "JSON_ARRAY(_utf8mb4'here')", + }, + { + name: `array ["here",["I","am"],"!!!"]`, + data: []byte{2, 3, 0, 37, 0, 12, 13, 0, 2, 18, 0, 12, 33, 0, 4, 104, 101, 114, 101, 2, 0, 15, 0, 12, 10, 0, 12, 12, 0, 1, 73, 2, 97, 109, 3, 33, 33, 33}, + expected: "JSON_ARRAY(_utf8mb4'here', JSON_ARRAY(_utf8mb4'I', _utf8mb4'am'), _utf8mb4'!!!')", + }, + { + name: `scalar "scalar string"`, + data: []byte{12, 13, 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, + expected: `CAST(JSON_QUOTE(_utf8mb4'scalar string') as JSON)`, + }, + { + name: `object {"scopes":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}`, + data: []byte{0, 1, 0, 149, 0, 11, 0, 6, 0, 12, 17, 0, 115, 99, 111, 112, 101, 115, 130, 1, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 56, 65, 65, 65, 66, 103, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 84, 216, 142, 184}, + expected: "JSON_OBJECT(_utf8mb4'scopes', _utf8mb4'AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA')", + }, + { + name: `scalar "scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"`, + data: []byte{12, 130, 1, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, + 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, + expected: `CAST(JSON_QUOTE(_utf8mb4'scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string') as JSON)`, + }, + { + name: `true`, + data: []byte{4, 1}, + expected: `CAST(_utf8mb4'true' as JSON)`, + }, + { + name: `false`, + data: []byte{4, 2}, + expected: `CAST(_utf8mb4'false' as JSON)`, + }, + { + name: `null`, + data: []byte{4, 0}, + expected: `CAST(_utf8mb4'null' as JSON)`, + }, + { + name: `-1`, + data: []byte{5, 255, 255}, + expected: `CAST(-1 as JSON)`, + }, + { + name: `1`, + data: []byte{6, 1, 0}, + expected: `CAST(1 as JSON)`, + }, + { + name: `32767`, + data: []byte{5, 255, 127}, + expected: `CAST(32767 as JSON)`, + }, + { + name: `32768`, + data: []byte{7, 0, 128, 0, 0}, + expected: `CAST(32768 as JSON)`, + }, + { + name: `-32769`, + data: []byte{7, 255, 127, 255, 255}, + expected: `CAST(-32769 as JSON)`, + }, + { + name: `2147483647`, + data: []byte{7, 255, 255, 255, 127}, + expected: `CAST(2147483647 as JSON)`, + }, + { + name: `32768`, + data: []byte{8, 0, 128, 0, 0}, + expected: `CAST(32768 as JSON)`, + }, + { + name: `2147483648`, + data: []byte{9, 0, 0, 0, 128, 0, 0, 0, 0}, + expected: `CAST(2147483648 as JSON)`, + }, + { + name: `-2147483648`, + data: []byte{7, 0, 0, 0, 128}, + expected: `CAST(-2147483648 as JSON)`, + }, + { + name: `-2147483649`, + data: []byte{9, 255, 255, 255, 127, 255, 255, 255, 255}, + expected: `CAST(-2147483649 as JSON)`, + }, + { + name: `18446744073709551615`, + data: []byte{10, 255, 255, 255, 255, 255, 255, 255, 255}, + expected: `CAST(18446744073709551615 as JSON)`, + }, + { + name: `-9223372036854775808`, + data: []byte{9, 0, 0, 0, 0, 0, 0, 0, 128}, + expected: `CAST(-9223372036854775808 as JSON)`, + }, + { + name: `3.14159`, + data: []byte{11, 110, 134, 27, 240, 249, 33, 9, 64}, + expected: `CAST(3.14159 as JSON)`, + }, + { + name: `empty object {}`, + data: []byte{0, 0, 0, 4, 0}, + expected: `JSON_OBJECT()`, + }, + { + name: `empty array []`, + data: []byte{2, 0, 0, 4, 0}, + expected: `JSON_ARRAY()`, + }, + { + name: `datetime "2015-01-15 23:24:25.000000"`, + data: []byte{15, 12, 8, 0, 0, 0, 25, 118, 31, 149, 25}, + expected: `CAST(timestamp '2015-01-15 23:24:25.000000' as JSON)`, + }, + { + name: `time "23:24:25.000000"`, + data: []byte{15, 11, 8, 0, 0, 0, 25, 118, 1, 0, 0}, + expected: `CAST(time '23:24:25.000000' as JSON)`, + }, + { + name: `time "23:24:25.120000"`, + data: []byte{15, 11, 8, 192, 212, 1, 25, 118, 1, 0, 0}, + expected: `CAST(time '23:24:25.120000' as JSON)`, + }, + { + name: `date "2015-01-15"`, + data: []byte{15, 10, 8, 0, 0, 0, 0, 0, 30, 149, 25}, + expected: `CAST(date '2015-01-15' as JSON)`, + }, + { + name: `decimal "123456789.1234"`, + data: []byte{15, 246, 8, 13, 4, 135, 91, 205, 21, 4, 210}, + expected: `CAST(123456789.1234 as JSON)`, + }, + { + name: `bit literal [2 202 254]`, + data: []byte{15, 16, 2, 202, 254}, + expected: `CAST(b'101100101011111110' as JSON)`, + }, + { + name: `opaque string [2 202 254]`, + data: []byte{15, 15, 2, 202, 254}, + expected: `CAST(x'02CAFE' as JSON)`, + }, + { + name: `opaque blob [2 202 254]`, + data: []byte{15, 252, 2, 202, 254}, + expected: `CAST(x'02CAFE' as JSON)`, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + val, err := ParseBinaryJSON(tc.data) + buf := val.MarshalSQLTo(nil) + require.NoError(t, err) + require.Equal(t, tc.expected, string(buf)) + }) + } +} diff --git a/go/mysql/binlog/constants.go b/go/mysql/binlog/constants.go new file mode 100644 index 00000000000..d08889efde8 --- /dev/null +++ b/go/mysql/binlog/constants.go @@ -0,0 +1,114 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binlog + +// This is the data type for a field. +// Values taken from include/mysql/mysql_com.h +const ( + // TypeDecimal is MYSQL_TYPE_DECIMAL. It is deprecated. + TypeDecimal = 0 + + // TypeTiny is MYSQL_TYPE_TINY + TypeTiny = 1 + + // TypeShort is MYSQL_TYPE_SHORT + TypeShort = 2 + + // TypeLong is MYSQL_TYPE_LONG + TypeLong = 3 + + // TypeFloat is MYSQL_TYPE_FLOAT + TypeFloat = 4 + + // TypeDouble is MYSQL_TYPE_DOUBLE + TypeDouble = 5 + + // TypeNull is MYSQL_TYPE_NULL + TypeNull = 6 + + // TypeTimestamp is MYSQL_TYPE_TIMESTAMP + TypeTimestamp = 7 + + // TypeLongLong is MYSQL_TYPE_LONGLONG + TypeLongLong = 8 + + // TypeInt24 is MYSQL_TYPE_INT24 + TypeInt24 = 9 + + // TypeDate is MYSQL_TYPE_DATE + TypeDate = 10 + + // TypeTime is MYSQL_TYPE_TIME + TypeTime = 11 + + // TypeDateTime is MYSQL_TYPE_DATETIME + TypeDateTime = 12 + + // TypeYear is MYSQL_TYPE_YEAR + TypeYear = 13 + + // TypeNewDate is MYSQL_TYPE_NEWDATE + TypeNewDate = 14 + + // TypeVarchar is MYSQL_TYPE_VARCHAR + TypeVarchar = 15 + + // TypeBit is MYSQL_TYPE_BIT + TypeBit = 16 + + // TypeTimestamp2 is MYSQL_TYPE_TIMESTAMP2 + TypeTimestamp2 = 17 + + // TypeDateTime2 is MYSQL_TYPE_DATETIME2 + TypeDateTime2 = 18 + + // TypeTime2 is MYSQL_TYPE_TIME2 + TypeTime2 = 19 + + // TypeJSON is MYSQL_TYPE_JSON + TypeJSON = 245 + + // TypeNewDecimal is MYSQL_TYPE_NEWDECIMAL + TypeNewDecimal = 246 + + // TypeEnum is MYSQL_TYPE_ENUM + TypeEnum = 247 + + // TypeSet is MYSQL_TYPE_SET + TypeSet = 248 + + // TypeTinyBlob is MYSQL_TYPE_TINY_BLOB + TypeTinyBlob = 249 + + // TypeMediumBlob is MYSQL_TYPE_MEDIUM_BLOB + TypeMediumBlob = 250 + + // TypeLongBlob is MYSQL_TYPE_LONG_BLOB + TypeLongBlob = 251 + + // TypeBlob is MYSQL_TYPE_BLOB + TypeBlob = 252 + + // TypeVarString is MYSQL_TYPE_VAR_STRING + TypeVarString = 253 + + // TypeString is MYSQL_TYPE_STRING + TypeString = 254 + + // TypeGeometry is MYSQL_TYPE_GEOMETRY + TypeGeometry = 255 +) diff --git a/go/mysql/binlog/rbr.go b/go/mysql/binlog/rbr.go new file mode 100644 index 00000000000..73c293a8750 --- /dev/null +++ b/go/mysql/binlog/rbr.go @@ -0,0 +1,788 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package binlog + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "strconv" + "strings" + "time" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// ZeroTimestamp is the special value 0 for a timestamp. +var ZeroTimestamp = []byte("0000-00-00 00:00:00") + +var dig2bytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4} + +// CellLength returns the new position after the field with the given +// type is read. +func CellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { + switch typ { + case TypeNull: + return 0, nil + case TypeTiny, TypeYear: + return 1, nil + case TypeShort: + return 2, nil + case TypeInt24: + return 3, nil + case TypeLong, TypeFloat, TypeTimestamp: + return 4, nil + case TypeLongLong, TypeDouble: + return 8, nil + case TypeDate, TypeTime, TypeNewDate: + return 3, nil + case TypeDateTime: + return 8, nil + case TypeVarchar, TypeVarString: + // Length is encoded in 1 or 2 bytes. + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + 2, nil + } + l := int(data[pos]) + return l + 1, nil + case TypeBit: + // bitmap length is in metadata, as: + // upper 8 bits: bytes length + // lower 8 bits: bit length + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + return (int(nbits) + 7) / 8, nil + case TypeTimestamp2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 4 + (int(metadata)+1)/2, nil + case TypeDateTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 5 + (int(metadata)+1)/2, nil + case TypeTime2: + // metadata has number of decimals. One byte encodes + // two decimals. + return 3 + (int(metadata)+1)/2, nil + case TypeNewDecimal: + precision := int(metadata >> 8) + scale := int(metadata & 0xff) + // Example: + // NNNNNNNNNNNN.MMMMMM + // 12 bytes 6 bytes + // precision is 18 + // scale is 6 + // storage is done by groups of 9 digits: + // - 32 bits are used to store groups of 9 digits. + // - any leftover digit is stored in: + // - 1 byte for 1 and 2 digits + // - 2 bytes for 3 and 4 digits + // - 3 bytes for 5 and 6 digits + // - 4 bytes for 7 and 8 digits (would also work for 9) + // both sides of the dot are stored separately. + // In this example, we'd have: + // - 2 bytes to store the first 3 full digits. + // - 4 bytes to store the next 9 full digits. + // - 3 bytes to store the 6 fractional digits. + intg := precision - scale + intg0 := intg / 9 + frac0 := scale / 9 + intg0x := intg - intg0*9 + frac0x := scale - frac0*9 + return intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x], nil + case TypeEnum, TypeSet: + return int(metadata & 0xff), nil + case TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + // Of the Blobs, only TypeBlob is used in binary logs, + // but supports others just in case. + switch metadata { + case 1: + return 1 + int(uint32(data[pos])), nil + case 2: + return 2 + int(uint32(data[pos])| + uint32(data[pos+1])<<8), nil + case 3: + return 3 + int(uint32(data[pos])| + uint32(data[pos+1])<<8| + uint32(data[pos+2])<<16), nil + case 4: + return 4 + int(uint32(data[pos])| + uint32(data[pos+1])<<8| + uint32(data[pos+2])<<16| + uint32(data[pos+3])<<24), nil + default: + return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob/geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) + } + case TypeString: + // This may do String, Enum, and Set. The type is in + // metadata. If it's a string, then there will be more bits. + // This will give us the maximum length of the field. + t := metadata >> 8 + if t == TypeEnum || t == TypeSet { + return int(metadata & 0xff), nil + } + max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) + // Length is encoded in 1 or 2 bytes. + if max > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return l + 2, nil + } + l := int(data[pos]) + return l + 1, nil + + default: + return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v (data: %v pos: %v)", typ, data, pos) + } +} + +// printTimestamp is a helper method to append a timestamp into a bytes.Buffer, +// and return the Buffer. +func printTimestamp(v uint32) *bytes.Buffer { + if v == 0 { + return bytes.NewBuffer(ZeroTimestamp) + } + + t := time.Unix(int64(v), 0).UTC() + year, month, day := t.Date() + hour, minute, second := t.Clock() + + result := &bytes.Buffer{} + fmt.Fprintf(result, "%04d-%02d-%02d %02d:%02d:%02d", year, int(month), day, hour, minute, second) + return result +} + +// CellValue returns the data for a cell as a sqltypes.Value, and how +// many bytes it takes. It uses source type in querypb.Type and vitess type +// byte to determine general shared aspects of types and the querypb.Field to +// determine other info specifically about its underlying column (SQL column +// type, column length, charset, etc) +func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.Field) (sqltypes.Value, int, error) { + switch typ { + case TypeTiny: + if sqltypes.IsSigned(field.Type) { + return sqltypes.MakeTrusted(querypb.Type_INT8, + strconv.AppendInt(nil, int64(int8(data[pos])), 10)), 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT8, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case TypeYear: + val := data[pos] + if val == 0 { + return sqltypes.MakeTrusted(querypb.Type_YEAR, + []byte{'0', '0', '0', '0'}), 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_YEAR, + strconv.AppendUint(nil, uint64(data[pos])+1900, 10)), 1, nil + case TypeShort: + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + if sqltypes.IsSigned(field.Type) { + return sqltypes.MakeTrusted(querypb.Type_INT16, + strconv.AppendInt(nil, int64(int16(val)), 10)), 2, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT16, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + case TypeInt24: + if sqltypes.IsSigned(field.Type) && data[pos+2]&128 > 0 { + // Negative number, have to extend the sign. + val := int32(uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + + uint32(255)<<24) + return sqltypes.MakeTrusted(querypb.Type_INT24, + strconv.AppendInt(nil, int64(val), 10)), 3, nil + } + // Positive number. + val := uint64(data[pos]) + + uint64(data[pos+1])<<8 + + uint64(data[pos+2])<<16 + return sqltypes.MakeTrusted(querypb.Type_UINT24, + strconv.AppendUint(nil, val, 10)), 3, nil + case TypeLong: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + if sqltypes.IsSigned(field.Type) { + return sqltypes.MakeTrusted(querypb.Type_INT32, + strconv.AppendInt(nil, int64(int32(val)), 10)), 4, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT32, + strconv.AppendUint(nil, uint64(val), 10)), 4, nil + case TypeFloat: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + fval := math.Float32frombits(val) + return sqltypes.MakeTrusted(querypb.Type_FLOAT32, + strconv.AppendFloat(nil, float64(fval), 'E', -1, 32)), 4, nil + case TypeDouble: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + fval := math.Float64frombits(val) + return sqltypes.MakeTrusted(querypb.Type_FLOAT64, + strconv.AppendFloat(nil, fval, 'E', -1, 64)), 8, nil + case TypeTimestamp: + val := binary.LittleEndian.Uint32(data[pos : pos+4]) + txt := printTimestamp(val) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 4, nil + case TypeLongLong: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if sqltypes.IsSigned(field.Type) { + return sqltypes.MakeTrusted(querypb.Type_INT64, + strconv.AppendInt(nil, int64(val), 10)), 8, nil + } + return sqltypes.MakeTrusted(querypb.Type_UINT64, + strconv.AppendUint(nil, val, 10)), 8, nil + case TypeDate, TypeNewDate: + val := uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + day := val & 31 + month := val >> 5 & 15 + year := val >> 9 + return sqltypes.MakeTrusted(querypb.Type_DATE, + []byte(fmt.Sprintf("%04d-%02d-%02d", year, month, day))), 3, nil + case TypeTime: + var hour, minute, second int32 + if data[pos+2]&128 > 0 { + // Negative number, have to extend the sign. + val := int32(uint32(data[pos]) + + uint32(data[pos+1])<<8 + + uint32(data[pos+2])<<16 + + uint32(255)<<24) + hour = val / 10000 + minute = -((val % 10000) / 100) + second = -(val % 100) + } else { + val := int32(data[pos]) + + int32(data[pos+1])<<8 + + int32(data[pos+2])<<16 + hour = val / 10000 + minute = (val % 10000) / 100 + second = val % 100 + } + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%02d:%02d:%02d", hour, minute, second))), 3, nil + case TypeDateTime: + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + d := val / 1000000 + t := val % 1000000 + year := d / 10000 + month := (d % 10000) / 100 + day := d % 100 + hour := t / 10000 + minute := (t % 10000) / 100 + second := t % 100 + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil + case TypeVarchar, TypeVarString: + // We trust that typ is compatible with the field.Type + // Length is encoded in 1 or 2 bytes. + typeToUse := querypb.Type_VARCHAR + if field.Type == querypb.Type_VARBINARY || field.Type == querypb.Type_BINARY || field.Type == querypb.Type_BLOB { + typeToUse = field.Type + } + if metadata > 255 { + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return sqltypes.MakeTrusted(typeToUse, + data[pos+2:pos+2+l]), l + 2, nil + } + l := int(data[pos]) + return sqltypes.MakeTrusted(typeToUse, + data[pos+1:pos+1+l]), l + 1, nil + case TypeBit: + // The contents is just the bytes, quoted. + nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) + l := (int(nbits) + 7) / 8 + return sqltypes.MakeTrusted(querypb.Type_BIT, + data[pos:pos+l]), l, nil + case TypeTimestamp2: + second := binary.BigEndian.Uint32(data[pos : pos+4]) + txt := printTimestamp(second) + switch metadata { + case 1: + decimals := int(data[pos+4]) + fmt.Fprintf(txt, ".%01d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 5, nil + case 2: + decimals := int(data[pos+4]) + fmt.Fprintf(txt, ".%02d", decimals) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 5, nil + case 3: + decimals := int(data[pos+4])<<8 + + int(data[pos+5]) + fmt.Fprintf(txt, ".%03d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 6, nil + case 4: + decimals := int(data[pos+4])<<8 + + int(data[pos+5]) + fmt.Fprintf(txt, ".%04d", decimals) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 6, nil + case 5: + decimals := int(data[pos+4])<<16 + + int(data[pos+5])<<8 + + int(data[pos+6]) + fmt.Fprintf(txt, ".%05d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 7, nil + case 6: + decimals := int(data[pos+4])<<16 + + int(data[pos+5])<<8 + + int(data[pos+6]) + fmt.Fprintf(txt, ".%06d", decimals) + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 7, nil + } + return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, + txt.Bytes()), 4, nil + case TypeDateTime2: + ymdhms := (uint64(data[pos])<<32 | + uint64(data[pos+1])<<24 | + uint64(data[pos+2])<<16 | + uint64(data[pos+3])<<8 | + uint64(data[pos+4])) - uint64(0x8000000000) + ymd := ymdhms >> 17 + ym := ymd >> 5 + hms := ymdhms % (1 << 17) + + day := ymd % (1 << 5) + month := ym % 13 + year := ym / 13 + + second := hms % (1 << 6) + minute := (hms >> 6) % (1 << 6) + hour := hms >> 12 + + txt := &bytes.Buffer{} + fmt.Fprintf(txt, "%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) + + switch metadata { + case 1: + decimals := int(data[pos+5]) + fmt.Fprintf(txt, ".%01d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 6, nil + case 2: + decimals := int(data[pos+5]) + fmt.Fprintf(txt, ".%02d", decimals) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 6, nil + case 3: + decimals := int(data[pos+5])<<8 + + int(data[pos+6]) + fmt.Fprintf(txt, ".%03d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 7, nil + case 4: + decimals := int(data[pos+5])<<8 + + int(data[pos+6]) + fmt.Fprintf(txt, ".%04d", decimals) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 7, nil + case 5: + decimals := int(data[pos+5])<<16 + + int(data[pos+6])<<8 + + int(data[pos+7]) + fmt.Fprintf(txt, ".%05d", decimals/10) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 8, nil + case 6: + decimals := int(data[pos+5])<<16 + + int(data[pos+6])<<8 + + int(data[pos+7]) + fmt.Fprintf(txt, ".%06d", decimals) + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 8, nil + } + return sqltypes.MakeTrusted(querypb.Type_DATETIME, + txt.Bytes()), 5, nil + case TypeTime2: + hms := (int64(data[pos])<<16 | + int64(data[pos+1])<<8 | + int64(data[pos+2])) - 0x800000 + sign := "" + if hms < 0 { + hms = -hms + sign = "-" + } + + fracStr := "" + switch metadata { + case 1: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.1d", frac/10) + case 2: + frac := int(data[pos+3]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x100 - frac + } + fracStr = fmt.Sprintf(".%.2d", frac) + case 3: + frac := int(data[pos+3])<<8 | + int(data[pos+4]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.3d", frac/10) + case 4: + frac := int(data[pos+3])<<8 | + int(data[pos+4]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x10000 - frac + } + fracStr = fmt.Sprintf(".%.4d", frac) + case 5: + frac := int(data[pos+3])<<16 | + int(data[pos+4])<<8 | + int(data[pos+5]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.5d", frac/10) + case 6: + frac := int(data[pos+3])<<16 | + int(data[pos+4])<<8 | + int(data[pos+5]) + if sign == "-" && frac != 0 { + hms-- + frac = 0x1000000 - frac + } + fracStr = fmt.Sprintf(".%.6d", frac) + } + + hour := (hms >> 12) % (1 << 10) + minute := (hms >> 6) % (1 << 6) + second := hms % (1 << 6) + return sqltypes.MakeTrusted(querypb.Type_TIME, + []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil + + case TypeNewDecimal: + precision := int(metadata >> 8) // total digits number + scale := int(metadata & 0xff) // number of fractional digits + intg := precision - scale // number of full digits + intg0 := intg / 9 // number of 32-bits digits + intg0x := intg - intg0*9 // leftover full digits + frac0 := scale / 9 // number of 32 bits fractionals + frac0x := scale - frac0*9 // leftover fractionals + + l := intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x] + + // Copy the data so we can change it. Otherwise + // decoding is just too hard. + d := make([]byte, l) + copy(d, data[pos:pos+l]) + + txt := &bytes.Buffer{} + + isNegative := (d[0] & 0x80) == 0 + d[0] ^= 0x80 // First bit is inverted. + if isNegative { + // Negative numbers are just inverted bytes. + txt.WriteByte('-') + for i := range d { + d[i] ^= 0xff + } + } + + // first we have the leftover full digits + var val uint32 + switch dig2bytes[intg0x] { + case 0: + // nothing to do + case 1: + // one byte, up to two digits + val = uint32(d[0]) + case 2: + // two bytes, up to 4 digits + val = uint32(d[0])<<8 + + uint32(d[1]) + case 3: + // 3 bytes, up to 6 digits + val = uint32(d[0])<<16 + + uint32(d[1])<<8 + + uint32(d[2]) + case 4: + // 4 bytes, up to 8 digits (9 digits would be a full) + val = uint32(d[0])<<24 + + uint32(d[1])<<16 + + uint32(d[2])<<8 + + uint32(d[3]) + } + pos = dig2bytes[intg0x] + if val > 0 { + txt.Write(strconv.AppendUint(nil, uint64(val), 10)) + } + + // now the full digits, 32 bits each, 9 digits + for i := 0; i < intg0; i++ { + val = binary.BigEndian.Uint32(d[pos : pos+4]) + fmt.Fprintf(txt, "%09d", val) + pos += 4 + } + + // now see if we have a fraction + if scale == 0 { + // When the field is a DECIMAL using a scale of 0, e.g. + // DECIMAL(5,0), a binlogged value of 0 is almost treated + // like the NULL byte and we get a 0 byte length value. + // In this case let's return the correct value of 0. + if txt.Len() == 0 { + txt.WriteRune('0') + } + + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, + txt.Bytes()), l, nil + } + txt.WriteByte('.') + + // now the full fractional digits + for i := 0; i < frac0; i++ { + val = binary.BigEndian.Uint32(d[pos : pos+4]) + fmt.Fprintf(txt, "%09d", val) + pos += 4 + } + + // then the partial fractional digits + switch dig2bytes[frac0x] { + case 0: + // Nothing to do + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, + txt.Bytes()), l, nil + case 1: + // one byte, 1 or 2 digits + val = uint32(d[pos]) + if frac0x == 1 { + fmt.Fprintf(txt, "%1d", val) + } else { + fmt.Fprintf(txt, "%02d", val) + } + case 2: + // two bytes, 3 or 4 digits + val = uint32(d[pos])<<8 + + uint32(d[pos+1]) + if frac0x == 3 { + fmt.Fprintf(txt, "%03d", val) + } else { + fmt.Fprintf(txt, "%04d", val) + } + case 3: + // 3 bytes, 5 or 6 digits + val = uint32(d[pos])<<16 + + uint32(d[pos+1])<<8 + + uint32(d[pos+2]) + if frac0x == 5 { + fmt.Fprintf(txt, "%05d", val) + } else { + fmt.Fprintf(txt, "%06d", val) + } + case 4: + // 4 bytes, 7 or 8 digits (9 digits would be a full) + val = uint32(d[pos])<<24 + + uint32(d[pos+1])<<16 + + uint32(d[pos+2])<<8 + + uint32(d[pos+3]) + if frac0x == 7 { + fmt.Fprintf(txt, "%07d", val) + } else { + fmt.Fprintf(txt, "%08d", val) + } + } + + // remove preceding 0s from the integral part, otherwise we get "000000000001.23" instead of "1.23" + trimPrecedingZeroes := func(b []byte) []byte { + s := string(b) + isNegative := false + if s[0] == '-' { + isNegative = true + s = s[1:] + } + s = strings.TrimLeft(s, "0") + if isNegative { + s = fmt.Sprintf("-%s", s) + } + return []byte(s) + } + return sqltypes.MakeTrusted(querypb.Type_DECIMAL, trimPrecedingZeroes(txt.Bytes())), l, nil + + case TypeEnum: + switch metadata & 0xff { + case 1: + // One byte storage. + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case 2: + // Two bytes storage. + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + return sqltypes.MakeTrusted(querypb.Type_ENUM, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + default: + return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff) + } + + case TypeSet: + l := int(metadata & 0xff) + return sqltypes.MakeTrusted(querypb.Type_SET, + data[pos:pos+l]), l, nil + + case TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob: + // Only TypeBlob is used in binary logs, + // but supports others just in case. + l := 0 + switch metadata { + case 1: + l = int(uint32(data[pos])) + case 2: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8) + case 3: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16) + case 4: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16 | + uint32(data[pos+3])<<24) + default: + return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos) + } + pos += int(metadata) + + // For JSON, we parse the data, and emit SQL. + if typ == TypeJSON { + var err error + jsonData := data[pos : pos+l] + jsonVal, err := ParseBinaryJSON(jsonData) + if err != nil { + panic(err) + } + d := jsonVal.MarshalTo(nil) + return sqltypes.MakeTrusted(sqltypes.Expression, + d), l + int(metadata), nil + } + + return sqltypes.MakeTrusted(querypb.Type_VARBINARY, + data[pos:pos+l]), l + int(metadata), nil + + case TypeString: + // This may do String, Enum, and Set. The type is in + // metadata. If it's a string, then there will be more bits. + t := metadata >> 8 + if t == TypeEnum { + // We don't know the string values. So just use the + // numbers. + switch metadata & 0xff { + case 1: + // One byte storage. + return sqltypes.MakeTrusted(querypb.Type_UINT8, + strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil + case 2: + // Two bytes storage. + val := binary.LittleEndian.Uint16(data[pos : pos+2]) + return sqltypes.MakeTrusted(querypb.Type_UINT16, + strconv.AppendUint(nil, uint64(val), 10)), 2, nil + default: + return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff) + } + } + if t == TypeSet { + // We don't know the set values. So just use the + // numbers. + l := int(metadata & 0xff) + var val uint64 + for i := 0; i < l; i++ { + val += uint64(data[pos+i]) << (uint(i) * 8) + } + return sqltypes.MakeTrusted(querypb.Type_UINT64, + strconv.AppendUint(nil, uint64(val), 10)), l, nil + } + // This is a real string. The length is weird. + max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) + // Length is encoded in 1 or 2 bytes. + if max > 255 { + // This code path exists due to https://bugs.mysql.com/bug.php?id=37426. + // CHAR types need to allocate 3 bytes per char. So, the length for CHAR(255) + // cannot be represented in 1 byte. This also means that this rule does not + // apply to BINARY data. + l := int(uint64(data[pos]) | + uint64(data[pos+1])<<8) + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, + data[pos+2:pos+2+l]), l + 2, nil + } + l := int(data[pos]) + mdata := data[pos+1 : pos+1+l] + if sqltypes.IsBinary(field.Type) { + // For binary(n) column types, mysql pads the data on the right with nulls. However the binlog event contains + // the data without this padding. This causes several issues: + // * if a binary(n) column is part of the sharding key, the keyspace_id() returned during the copy phase + // (where the value is the result of a mysql query) is different from the one during replication + // (where the value is the one from the binlogs) + // * mysql where clause comparisons do not do the right thing without padding + // So for fixed length BINARY columns we right-pad it with nulls if necessary to match what MySQL returns. + // Because CHAR columns with a binary collation (e.g. utf8mb4_bin) have the same metadata as a BINARY column + // in binlog events, we also need to check for this case based on the underlying column type. + if l < max && strings.HasPrefix(strings.ToLower(field.ColumnType), "binary") { + paddedData := make([]byte, max) + copy(paddedData[:l], mdata) + mdata = paddedData + } + return sqltypes.MakeTrusted(querypb.Type_BINARY, mdata), l + 1, nil + } + return sqltypes.MakeTrusted(querypb.Type_VARCHAR, mdata), l + 1, nil + + case TypeGeometry: + l := 0 + switch metadata { + case 1: + l = int(uint32(data[pos])) + case 2: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8) + case 3: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16) + case 4: + l = int(uint32(data[pos]) | + uint32(data[pos+1])<<8 | + uint32(data[pos+2])<<16 | + uint32(data[pos+3])<<24) + default: + return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) + } + pos += int(metadata) + return sqltypes.MakeTrusted(querypb.Type_GEOMETRY, + data[pos:pos+l]), l + int(metadata), nil + + default: + return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v", typ) + } +} diff --git a/go/mysql/binlog_event_rbr_test.go b/go/mysql/binlog/rbr_test.go similarity index 98% rename from go/mysql/binlog_event_rbr_test.go rename to go/mysql/binlog/rbr_test.go index 357ab88422e..260af2f3821 100644 --- a/go/mysql/binlog_event_rbr_test.go +++ b/go/mysql/binlog/rbr_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package mysql +package binlog import ( "bytes" @@ -406,14 +406,14 @@ func TestCellLengthAndData(t *testing.T) { data: []byte{0x0f, 0x00, 0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98}, out: sqltypes.MakeTrusted(sqltypes.Expression, - []byte(`{"a":"b"}`)), + []byte(`{"a": "b"}`)), }, { typ: TypeJSON, metadata: 4, data: []byte{0x0f, 0x00, 0x00, 0x00, 0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98}, out: sqltypes.MakeTrusted(sqltypes.Expression, - []byte(`{"a":"b"}`)), + []byte(`{"a": "b"}`)), }, { typ: TypeEnum, metadata: 1, @@ -543,7 +543,7 @@ func TestCellLengthAndData(t *testing.T) { copy(padded[1:], tcase.data) // Test cellLength. - l, err := cellLength(padded, 1, tcase.typ, tcase.metadata) + l, err := CellLength(padded, 1, tcase.typ, tcase.metadata) if err != nil || l != len(tcase.data) { t.Errorf("testcase cellLength(%v,%v) returned unexpected result: %v %v was expected %v ", tcase.typ, tcase.data, l, err, len(tcase.data)) @@ -552,8 +552,8 @@ func TestCellLengthAndData(t *testing.T) { // Test CellValue. out, l, err := CellValue(padded, 1, tcase.typ, tcase.metadata, &querypb.Field{Type: tcase.styp}) if err != nil || l != len(tcase.data) || out.Type() != tcase.out.Type() || !bytes.Equal(out.Raw(), tcase.out.Raw()) { - t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v %v %v, was expecting %v %v ", - tcase.typ, tcase.data, out, l, err, tcase.out, len(tcase.data)) + t.Errorf("testcase cellData(%v,%v) returned unexpected result: %v %v %v, was expecting %v %v \nwant: %s\ngot: %s", + tcase.typ, tcase.data, out, l, err, tcase.out, len(tcase.data), tcase.out.Raw(), out.Raw()) } } } diff --git a/go/mysql/binlog_dump.go b/go/mysql/binlog_dump.go index 8383a590c5e..d6768056974 100644 --- a/go/mysql/binlog_dump.go +++ b/go/mysql/binlog_dump.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "io" + "vitess.io/vitess/go/mysql/replication" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -51,7 +52,7 @@ func (c *Conn) parseComBinlogDump(data []byte) (logFile string, binlogPos uint32 return logFile, binlogPos, nil } -func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position Position, err error) { +func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position replication.Position, err error) { // see https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html pos := 1 @@ -80,7 +81,7 @@ func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint6 return logFile, logPos, position, readPacketErr } if gtid := string(data[pos : pos+int(dataSize)]); gtid != "" { - position, err = DecodePosition(gtid) + position, err = replication.DecodePosition(gtid) if err != nil { return logFile, logPos, position, err } diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index 822d5c65447..e58cb9b254c 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -19,6 +19,7 @@ package mysql import ( "fmt" + "vitess.io/vitess/go/mysql/replication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -95,7 +96,7 @@ type BinlogEvent interface { // GTID returns the GTID from the event, and if this event // also serves as a BEGIN statement. // This is only valid if IsGTID() returns true. - GTID(BinlogFormat) (GTID, bool, error) + GTID(BinlogFormat) (replication.GTID, bool, error) // Query returns a Query struct representing data from a QUERY_EVENT. // This is only valid if IsQuery() returns true. Query(BinlogFormat) (Query, error) @@ -107,7 +108,7 @@ type BinlogEvent interface { Rand(BinlogFormat) (uint64, uint64, error) // PreviousGTIDs returns the Position from the event. // This is only valid if IsPreviousGTIDs() returns true. - PreviousGTIDs(BinlogFormat) (Position, error) + PreviousGTIDs(BinlogFormat) (replication.Position, error) // TableID returns the table ID for a TableMap, UpdateRows, // WriteRows or DeleteRows event. @@ -121,6 +122,9 @@ type BinlogEvent interface { // IsWriteRows(), IsUpdateRows(), or IsDeleteRows() returns // true. Rows(BinlogFormat, *TableMap) (Rows, error) + // TransactionPayload returns a list of BinlogEvents contained + // within the compressed transaction. + TransactionPayload(BinlogFormat) ([]BinlogEvent, error) // NextLogFile returns the name of the next binary log file & pos. // This is only valid if IsRotate() returns true NextLogFile(BinlogFormat) (string, uint64, error) @@ -133,8 +137,9 @@ type BinlogEvent interface { // IsPseudo is for custom implementations of GTID. IsPseudo() bool - // IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON) - IsCompressed() bool + // IsTransactionPayload returns true if a compressed transaction + // payload event is found (binlog_transaction_compression=ON). + IsTransactionPayload() bool // Bytes returns the binary representation of the event Bytes() []byte @@ -283,6 +288,11 @@ func (b *Bitmap) Count() int { return b.count } +// Bits returns the underlying bitmap. +func (b *Bitmap) Bits() []byte { + return b.data[:] +} + // Bit returned the value of a given bit in the Bitmap. func (b *Bitmap) Bit(index int) bool { byteIndex := index / 8 diff --git a/go/mysql/binlog_event_common.go b/go/mysql/binlog_event_common.go index e481a69ceae..f95ed847e0a 100644 --- a/go/mysql/binlog_event_common.go +++ b/go/mysql/binlog_event_common.go @@ -50,10 +50,21 @@ import ( // http://dev.mysql.com/doc/internals/en/event-header-fields.html type binlogEvent []byte +const ( + // Default length of the fixed header for v4 events. + BinlogFixedHeaderLen = 19 + // The offset from 0 where the type is stored as 1 byte. + BinlogEventTypeOffset = 4 + // Offset from 0 where the 4 byte length is stored. + BinlogEventLenOffset = 9 + // Byte length of the checksum suffix when the CRC32 algorithm is used. + BinlogCRC32ChecksumLen = 4 +) + // dataBytes returns the event bytes without header prefix and without checksum suffix func (ev binlogEvent) dataBytes(f BinlogFormat) []byte { data := ev.Bytes()[f.HeaderLength:] - data = data[0 : len(data)-4] + data = data[0 : len(data)-BinlogCRC32ChecksumLen] return data } @@ -62,14 +73,14 @@ func (ev binlogEvent) IsValid() bool { bufLen := len(ev.Bytes()) // The buffer must be at least 19 bytes to contain a valid header. - if bufLen < 19 { + if bufLen < BinlogFixedHeaderLen { return false } // It's now safe to use methods that examine header fields. // Let's see if the event is right about its own size. evLen := ev.Length() - if evLen < 19 || evLen != uint32(bufLen) { + if evLen < BinlogFixedHeaderLen || evLen != uint32(bufLen) { return false } @@ -86,7 +97,7 @@ func (ev binlogEvent) Bytes() []byte { // Type returns the type_code field from the header. func (ev binlogEvent) Type() byte { - return ev.Bytes()[4] + return ev.Bytes()[BinlogEventTypeOffset] } // Flags returns the flags field from the header. @@ -190,11 +201,6 @@ func (ev binlogEvent) IsPseudo() bool { return false } -// IsCompressed returns true if a compressed event is found (binlog_transaction_compression=ON) -func (ev binlogEvent) IsCompressed() bool { - return ev.Type() == eCompressedEvent -} - // Format implements BinlogEvent.Format(). // // Expected format (L = total length of event data): @@ -211,7 +217,7 @@ func (ev binlogEvent) IsCompressed() bool { func (ev binlogEvent) Format() (f BinlogFormat, err error) { // FORMAT_DESCRIPTION_EVENT has a fixed header size of 19 // because we have to read it before we know the header_length. - data := ev.Bytes()[19:] + data := ev.Bytes()[BinlogFixedHeaderLen:] f.FormatVersion = binary.LittleEndian.Uint16(data[:2]) if f.FormatVersion != 4 { @@ -219,8 +225,8 @@ func (ev binlogEvent) Format() (f BinlogFormat, err error) { } f.ServerVersion = string(bytes.TrimRight(data[2:2+50], "\x00")) f.HeaderLength = data[2+50+4] - if f.HeaderLength < 19 { - return f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "header length = %d, should be >= 19", f.HeaderLength) + if f.HeaderLength < BinlogFixedHeaderLen { + return f, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "header length = %d, should be >= %d", f.HeaderLength, BinlogFixedHeaderLen) } // MySQL/MariaDB 5.6.1+ always adds a 4-byte checksum to the end of a diff --git a/go/mysql/binlog_event_compression.go b/go/mysql/binlog_event_compression.go new file mode 100644 index 00000000000..325bfeb4827 --- /dev/null +++ b/go/mysql/binlog_event_compression.go @@ -0,0 +1,269 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + + "github.com/klauspost/compress/zstd" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// This file contains code related to handling compression related +// events. More specifically today, compressed transaction payloads: +// See: https://dev.mysql.com/doc/refman/en/binary-log-transaction-compression.html + +// Transaction Payload wire protocol fields: +// https://dev.mysql.com/doc/dev/mysql-server/latest/classbinary__log_1_1codecs_1_1binary_1_1Transaction__payload.html +const ( + payloadHeaderEndMark = iota + payloadSizeField + payloadCompressionTypeField + payloadUncompressedSizeField +) + +// Compression algorithms that are supported (only zstd today +// in MySQL 8.0): +// https://dev.mysql.com/doc/refman/8.0/en/binary-log-transaction-compression.html +const ( + TransactionPayloadCompressionZstd = 0 + TransactionPayloadCompressionNone = 255 +) + +var TransactionPayloadCompressionTypes = map[uint64]string{ + TransactionPayloadCompressionZstd: "ZSTD", + TransactionPayloadCompressionNone: "NONE", +} + +// Create a reader that caches decompressors. This is used for +// smaller events that we want to handle entirely using in-memory +// buffers. +var zstdDecoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// At what size should we switch from the in-memory buffer +// decoding to streaming mode -- which is slower, but does not +// require everything be done in memory. +const zstdInMemoryDecompressorMaxSize = 128 << (10 * 2) // 128MiB + +type TransactionPayload struct { + Size uint64 + CompressionType uint64 + UncompressedSize uint64 + Payload []byte + Events []BinlogEvent +} + +// IsTransactionPayload returns true if a compressed transaction +// payload event is found (binlog_transaction_compression=ON). +func (ev binlogEvent) IsTransactionPayload() bool { + return ev.Type() == eTransactionPayloadEvent +} + +// TransactionPayload returns the BinlogEvents contained within +// the compressed transaction. +// The following event types are compressed as part of the +// transaction payload: +// +// QUERY_EVENT = 2 +// INTVAR_EVENT = 5 +// APPEND_BLOCK_EVENT = 9 +// DELETE_FILE_EVENT = 11 +// RAND_EVENT = 13 +// USER_VAR_EVENT = 14 +// XID_EVENT = 16 +// BEGIN_LOAD_QUERY_EVENT = 17 +// EXECUTE_LOAD_QUERY_EVENT = 18 +// TABLE_MAP_EVENT = 19 +// WRITE_ROWS_EVENT_V1 = 23 +// UPDATE_ROWS_EVENT_V1 = 24 +// DELETE_ROWS_EVENT_V1 = 25 +// IGNORABLE_LOG_EVENT = 28 +// ROWS_QUERY_LOG_EVENT = 29 +// WRITE_ROWS_EVENT = 30 +// UPDATE_ROWS_EVENT = 31 +// DELETE_ROWS_EVENT = 32 +// XA_PREPARE_LOG_EVENT = 38 +// PARTIAL_UPDATE_ROWS_EVENT = 39 +// +// When transaction compression is enabled, the GTID log event has +// the following fields: +// +-----------------------------------------+ +// | field_type (1-9 bytes) | +// +-----------------------------------------+ +// | field_size (1-9 bytes) | +// +-----------------------------------------+ +// | m_payload (1 to N bytes) | +// +-----------------------------------------+ +// | field_type (1-9 bytes) | +// +-----------------------------------------+ +// | field_size (1-9 bytes) | +// +-----------------------------------------+ +// | m_compression_type (1 to 9 bytes) | +// +-----------------------------------------+ +// | field_type (1-9 bytes) | +// +-----------------------------------------+ +// | field_size (1-9 bytes) | +// +-----------------------------------------+ +// | m_uncompressed_size size (0 to 9 bytes) | +// +-----------------------------------------+ +// +// We need to extract the compressed transaction payload from the GTID +// event, decompress it with zstd, and then process the internal events +// (e.g. Query and Row events) that make up the transaction. +func (ev binlogEvent) TransactionPayload(format BinlogFormat) ([]BinlogEvent, error) { + tp := &TransactionPayload{} + if err := tp.Decode(ev.Bytes()[format.HeaderLength:]); err != nil { + return nil, vterrors.Wrapf(err, "error decoding transaction payload event") + } + return tp.Events, nil +} + +// Decode decodes and decompresses the payload. +func (tp *TransactionPayload) Decode(data []byte) error { + if err := tp.read(data); err != nil { + return err + } + return tp.decode() +} + +// read unmarshalls the transaction payload event into the +// TransactionPayload struct. The compressed payload itself will still +// need to be decoded -- meaning decompressing it and extracting the +// internal events. +func (tp *TransactionPayload) read(data []byte) error { + pos := uint64(0) + + for { + fieldType, ok := readFixedLenUint64(data[pos : pos+1]) + if !ok { + return vterrors.New(vtrpcpb.Code_INTERNAL, "error reading field type") + } + pos++ + + if fieldType == payloadHeaderEndMark { + tp.Payload = data[pos:] + return nil // we're done + } + + fieldLen, ok := readFixedLenUint64(data[pos : pos+1]) + if !ok { + return vterrors.New(vtrpcpb.Code_INTERNAL, "error reading field length") + } + pos++ + + switch fieldType { + case payloadSizeField: + tp.Size, ok = readFixedLenUint64(data[pos : pos+fieldLen]) + if !ok { + return vterrors.New(vtrpcpb.Code_INTERNAL, "error reading payload size") + } + case payloadCompressionTypeField: + tp.CompressionType, ok = readFixedLenUint64(data[pos : pos+fieldLen]) + if !ok { + return vterrors.New(vtrpcpb.Code_INTERNAL, "error reading compression type") + } + case payloadUncompressedSizeField: + tp.UncompressedSize, ok = readFixedLenUint64(data[pos : pos+fieldLen]) + if !ok { + return vterrors.New(vtrpcpb.Code_INTERNAL, "error reading uncompressed payload size") + } + } + + pos += fieldLen + } +} + +// decode decompresses the payload and extracts the internal binlog +// events. +func (tp *TransactionPayload) decode() error { + if tp.CompressionType != TransactionPayloadCompressionZstd { + return vterrors.New(vtrpcpb.Code_INTERNAL, + fmt.Sprintf("TransactionPayload has unsupported compression type of %d", tp.CompressionType)) + } + + decompressedPayload, err := tp.decompress() + decompressedPayloadLen := uint64(len(decompressedPayload)) + if err != nil { + return vterrors.Wrapf(err, "error decompressing transaction payload") + } + + pos := uint64(0) + + for { + eventLenPosEnd := pos + BinlogEventLenOffset + 4 + if eventLenPosEnd > decompressedPayloadLen { // No more events in the payload + break + } + eventLen := uint64(binary.LittleEndian.Uint32(decompressedPayload[pos+BinlogEventLenOffset : eventLenPosEnd])) + if pos+eventLen > decompressedPayloadLen { + return vterrors.New(vtrpcpb.Code_INTERNAL, + fmt.Sprintf("[BUG] event length of %d at pos %d in decompressed transaction payload is beyond the expected payload length of %d", + eventLen, pos, decompressedPayloadLen)) + } + eventData := decompressedPayload[pos : pos+eventLen] + ble := NewMysql56BinlogEvent(eventData) + tp.Events = append(tp.Events, ble) + + pos += eventLen + } + + return nil +} + +// Decompress the payload. +func (tp *TransactionPayload) decompress() ([]byte, error) { + if len(tp.Payload) == 0 { + return []byte{}, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "cannot decompress empty payload") + } + var ( + decompressedBytes []byte + err error + ) + + // Switch to slower but less memory intensive stream mode for larger payloads. + if tp.UncompressedSize > zstdInMemoryDecompressorMaxSize { + in := bytes.NewReader(tp.Payload) + streamDecoder, err := zstd.NewReader(in) + if err != nil { + return nil, err + } + defer streamDecoder.Close() + out := io.Writer(&bytes.Buffer{}) + _, err = io.Copy(out, streamDecoder) + if err != nil { + return nil, err + } + decompressedBytes = out.(*bytes.Buffer).Bytes() + } else { // Process smaller payloads using in-memory buffers. + decompressedBytes, err = zstdDecoder.DecodeAll(tp.Payload, nil) + if err != nil { + return nil, err + } + } + + if uint64(len(decompressedBytes)) != tp.UncompressedSize { + return []byte{}, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, + fmt.Sprintf("decompressed size %d does not match expected size %d", len(decompressedBytes), tp.UncompressedSize)) + } + + return decompressedBytes, nil +} diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go index c4b63ddaf50..4edc4bb91ff 100644 --- a/go/mysql/binlog_event_filepos.go +++ b/go/mysql/binlog_event_filepos.go @@ -19,6 +19,8 @@ package mysql import ( "encoding/binary" "fmt" + + "vitess.io/vitess/go/mysql/replication" ) // filePosBinlogEvent wraps a raw packet buffer and provides methods to examine @@ -38,7 +40,7 @@ func newFilePosBinlogEvent(buf []byte) *filePosBinlogEvent { return &filePosBinlogEvent{binlogEvent: binlogEvent(buf)} } -func (*filePosBinlogEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (*filePosBinlogEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return nil, false, nil } @@ -51,8 +53,8 @@ func (*filePosBinlogEvent) IsGTID() bool { return false } -func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (Position, error) { - return Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events") +func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) { + return replication.Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events") } // StripChecksum implements BinlogEvent.StripChecksum(). @@ -213,7 +215,7 @@ func (ev filePosFakeEvent) Format() (BinlogFormat, error) { return BinlogFormat{}, nil } -func (ev filePosFakeEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (ev filePosFakeEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return nil, false, nil } @@ -229,8 +231,8 @@ func (ev filePosFakeEvent) Rand(BinlogFormat) (uint64, uint64, error) { return 0, 0, nil } -func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (Position, error) { - return Position{}, nil +func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) { + return replication.Position{}, nil } func (ev filePosFakeEvent) TableID(BinlogFormat) uint64 { @@ -245,6 +247,10 @@ func (ev filePosFakeEvent) Rows(BinlogFormat, *TableMap) (Rows, error) { return Rows{}, nil } +func (ev filePosFakeEvent) TransactionPayload(BinlogFormat) ([]BinlogEvent, error) { + return []BinlogEvent{}, nil +} + func (ev filePosFakeEvent) NextLogFile(BinlogFormat) (string, uint64, error) { return "", 0, nil } @@ -253,7 +259,7 @@ func (ev filePosFakeEvent) IsPseudo() bool { return false } -func (ev filePosFakeEvent) IsCompressed() bool { +func (ev filePosFakeEvent) IsTransactionPayload() bool { return false } @@ -266,7 +272,7 @@ func (ev filePosFakeEvent) Bytes() []byte { // filePosGTIDEvent is a fake GTID event for filePos. type filePosGTIDEvent struct { filePosFakeEvent - gtid filePosGTID + gtid replication.FilePosGTID } func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDEvent { @@ -274,9 +280,9 @@ func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDE filePosFakeEvent: filePosFakeEvent{ timestamp: timestamp, }, - gtid: filePosGTID{ - file: file, - pos: pos, + gtid: replication.FilePosGTID{ + File: file, + Pos: pos, }, } } @@ -289,6 +295,6 @@ func (ev filePosGTIDEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, e return ev, nil, nil } -func (ev filePosGTIDEvent) GTID(BinlogFormat) (GTID, bool, error) { +func (ev filePosGTIDEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { return ev.gtid, false, nil } diff --git a/go/mysql/binlog_event_json_test.go b/go/mysql/binlog_event_json_test.go deleted file mode 100644 index 711965386ed..00000000000 --- a/go/mysql/binlog_event_json_test.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysql - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestJSONTypes(t *testing.T) { - testcases := []struct { - data []byte - expected string - isMap bool - }{{ - data: []byte{}, - expected: `null`, - }, { - data: []byte{0, 1, 0, 14, 0, 11, 0, 1, 0, 12, 12, 0, 97, 1, 98}, - expected: `{"a":"b"}`, - }, { - data: []byte{0, 1, 0, 12, 0, 11, 0, 1, 0, 5, 2, 0, 97}, - expected: `{"a":2}`, - }, { - data: []byte{0, 1, 0, 29, 0, 11, 0, 4, 0, 0, 15, 0, 97, 115, 100, 102, 1, 0, 14, 0, 11, 0, 3, 0, 5, 123, 0, 102, 111, 111}, - expected: `{"asdf":{"foo":123}}`, - }, { - data: []byte{2, 2, 0, 10, 0, 5, 1, 0, 5, 2, 0}, - expected: `[1,2]`, - }, { - data: []byte{0, 4, 0, 60, 0, 32, 0, 1, 0, 33, 0, 1, 0, 34, 0, 2, 0, 36, 0, 2, 0, 12, 38, 0, 12, 40, 0, 12, 42, 0, 2, 46, 0, 97, 99, 97, 98, 98, 99, 1, 98, 1, 100, 3, 97, 98, 99, 2, 0, 14, 0, 12, 10, 0, 12, 12, 0, 1, 120, 1, 121}, - expected: `{"a":"b","c":"d","ab":"abc","bc":["x","y"]}`, - isMap: true, - }, { - data: []byte{2, 1, 0, 37, 0, 12, 8, 0, 0, 4, 104, 101, 114, 101}, - expected: `["here"]`, - }, { - data: []byte{2, 3, 0, 37, 0, 12, 13, 0, 2, 18, 0, 12, 33, 0, 4, 104, 101, 114, 101, 2, 0, 15, 0, 12, 10, 0, 12, 12, 0, 1, 73, 2, 97, 109, 3, 33, 33, 33}, - expected: `["here",["I","am"],"!!!"]`, - }, { - data: []byte{12, 13, 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, - expected: `"scalar string"`, - }, { - data: []byte{0, 1, 0, 149, 0, 11, 0, 6, 0, 12, 17, 0, 115, 99, 111, 112, 101, 115, 130, 1, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 69, 65, 65, 65, 65, 65, 65, 56, 65, 65, 65, 66, 103, 65, 65, 65, 65, 65, 65, 66, 65, 65, 65, 65, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 84, 216, 142, 184}, - expected: `{"scopes":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAEAAAAAAEAAAAAA8AAABgAAAAAABAAAACAAAAAAAAA"}`, - }, { - // repeat the same string 10 times, to test the case where length of string - // requires 2 bytes to store - data: []byte{12, 130, 1, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103, - 115, 99, 97, 108, 97, 114, 32, 115, 116, 114, 105, 110, 103}, - expected: `"scalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar stringscalar string"`, - }, { - data: []byte{4, 1}, - expected: `true`, - }, { - data: []byte{4, 2}, - expected: `false`, - }, { - data: []byte{4, 0}, - expected: `null`, - }, { - data: []byte{5, 255, 255}, - expected: `-1`, - }, { - data: []byte{6, 1, 0}, - expected: `1`, - }, { - data: []byte{5, 255, 127}, - expected: `32767`, - }, { - data: []byte{7, 0, 128, 0, 0}, - expected: `32768`, - }, { - data: []byte{5, 0, 128}, - expected: `-32768`, - }, { - data: []byte{7, 255, 127, 255, 255}, - expected: `-32769`, - }, { - data: []byte{7, 255, 255, 255, 127}, - expected: `2.147483647e+09`, - }, { - data: []byte{8, 0, 128, 0, 0}, - expected: `32768`, - }, { - data: []byte{9, 0, 0, 0, 128, 0, 0, 0, 0}, - expected: `2.147483648e+09`, - }, { - data: []byte{7, 0, 0, 0, 128}, - expected: `-2.147483648e+09`, - }, { - data: []byte{9, 255, 255, 255, 127, 255, 255, 255, 255}, - expected: `-2.147483649e+09`, - }, { - data: []byte{10, 255, 255, 255, 255, 255, 255, 255, 255}, - expected: `1.8446744073709552e+19`, - }, { - data: []byte{9, 0, 0, 0, 0, 0, 0, 0, 128}, - expected: `-9.223372036854776e+18`, - }, { - data: []byte{11, 110, 134, 27, 240, 249, 33, 9, 64}, - expected: `3.14159`, - }, { - data: []byte{0, 0, 0, 4, 0}, - expected: `{}`, - }, { - data: []byte{2, 0, 0, 4, 0}, - expected: `[]`, - }, { - // opaque, datetime - data: []byte{15, 12, 8, 0, 0, 0, 25, 118, 31, 149, 25}, - expected: `"2015-01-15 23:24:25.000000"`, - }, { - // opaque, time - data: []byte{15, 11, 8, 0, 0, 0, 25, 118, 1, 0, 0}, - expected: `"23:24:25.000000"`, - }, { - // opaque, time - data: []byte{15, 11, 8, 192, 212, 1, 25, 118, 1, 0, 0}, - expected: `"23:24:25.120000"`, - }, { - // opaque, date - data: []byte{15, 10, 8, 0, 0, 0, 0, 0, 30, 149, 25}, - expected: `"2015-01-15"`, - }, { - // opaque, decimal - data: []byte{15, 246, 8, 13, 4, 135, 91, 205, 21, 4, 210}, - expected: `1.234567891234e+08`, - }, { - // opaque, bit field. Not yet implemented. - data: []byte{15, 16, 2, 202, 254}, - expected: `opaque type 16 is not supported yet, data [2 202 254]`, - }} - for _, tc := range testcases { - t.Run(tc.expected, func(t *testing.T) { - val, err := getJSONValue(tc.data) - if err != nil { - require.Equal(t, tc.expected, err.Error()) - return - } - if tc.isMap { // map keys sorting order is not guaranteed, so we convert back to golang maps and compare - var gotJSON, wantJSON map[string]any - err = json.Unmarshal([]byte(val), &gotJSON) - require.NoError(t, err) - err = json.Unmarshal([]byte(tc.expected), &wantJSON) - require.NoError(t, err) - require.EqualValues(t, wantJSON, gotJSON) - return - } - require.Equal(t, tc.expected, val) - }) - } -} diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go index 0688fa9540b..52a8c453517 100644 --- a/go/mysql/binlog_event_make.go +++ b/go/mysql/binlog_event_make.go @@ -19,6 +19,8 @@ package mysql import ( "encoding/binary" "hash/crc32" + + "vitess.io/vitess/go/mysql/replication" ) const ( @@ -292,7 +294,7 @@ func NewIntVarEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, value uint64) // NewMariaDBGTIDEvent returns a MariaDB specific GTID event. // It ignores the Server in the gtid, instead uses the FakeBinlogStream.ServerID. -func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid MariadbGTID, hasBegin bool) BinlogEvent { +func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid replication.MariadbGTID, hasBegin bool) BinlogEvent { length := 8 + // sequence 4 + // domain 1 // flags2 diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go index df9bc9d2d3f..12d8a54ff97 100644 --- a/go/mysql/binlog_event_make_test.go +++ b/go/mysql/binlog_event_make_test.go @@ -23,6 +23,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + + "vitess.io/vitess/go/mysql/binlog" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -148,7 +151,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { s.ServerID = 0x87654321 // With built-in begin. - event := NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true) + event := NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true) require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false") require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false") @@ -159,7 +162,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.True(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") - mgtid, ok := gtid.(MariadbGTID) + mgtid, ok := gtid.(replication.MariadbGTID) require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID") if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 { @@ -167,7 +170,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { } // Without built-in begin. - event = NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false) + event = NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false) require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false") require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false") @@ -178,7 +181,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.False(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") - mgtid, ok = gtid.(MariadbGTID) + mgtid, ok = gtid.(replication.MariadbGTID) require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID") if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 { @@ -195,16 +198,16 @@ func TestTableMapEvent(t *testing.T) { Database: "my_database", Name: "my_table", Types: []byte{ - TypeLongLong, - TypeLongLong, - TypeLongLong, - TypeLongLong, - TypeLongLong, - TypeTime, - TypeLongLong, - TypeLongLong, - TypeLongLong, - TypeVarchar, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeTime, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeLongLong, + binlog.TypeVarchar, }, CanBeNull: NewServerBitmap(10), Metadata: []uint16{ @@ -250,7 +253,7 @@ func TestLargeTableMapEvent(t *testing.T) { metadata := make([]uint16, 0, colLen) for i := 0; i < colLen; i++ { - types = append(types, TypeLongLong) + types = append(types, binlog.TypeLongLong) metadata = append(metadata, 0) } @@ -302,8 +305,8 @@ func TestRowsEvent(t *testing.T) { Database: "my_database", Name: "my_table", Types: []byte{ - TypeLong, - TypeVarchar, + binlog.TypeLong, + binlog.TypeVarchar, }, CanBeNull: NewServerBitmap(2), Metadata: []uint16{ @@ -424,7 +427,7 @@ func TestLargeRowsEvent(t *testing.T) { metadata := make([]uint16, 0, colLen) for i := 0; i < colLen; i++ { - types = append(types, TypeLong) + types = append(types, binlog.TypeLong) metadata = append(metadata, 0) } diff --git a/go/mysql/binlog_event_mariadb.go b/go/mysql/binlog_event_mariadb.go index d2cdd6ac18f..f2c0ec8f369 100644 --- a/go/mysql/binlog_event_mariadb.go +++ b/go/mysql/binlog_event_mariadb.go @@ -19,6 +19,7 @@ package mysql import ( "encoding/binary" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -59,13 +60,13 @@ func (ev mariadbBinlogEvent) IsGTID() bool { // 8 sequence number // 4 domain ID // 1 flags2 -func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { +func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { const FLStandalone = 1 data := ev.Bytes()[f.HeaderLength:] flags2 := data[8+4] - return MariadbGTID{ + return replication.MariadbGTID{ Sequence: binary.LittleEndian.Uint64(data[:8]), Domain: binary.LittleEndian.Uint32(data[8 : 8+4]), Server: ev.ServerID(), @@ -73,8 +74,8 @@ func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). -func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) { - return Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events") +func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) { + return replication.Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events") } // StripChecksum implements BinlogEvent.StripChecksum(). @@ -87,8 +88,8 @@ func (ev mariadbBinlogEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, // Checksum is the last 4 bytes of the event buffer. data := ev.Bytes() length := len(data) - checksum := data[length-4:] - data = data[:length-4] + checksum := data[length-BinlogCRC32ChecksumLen:] + data = data[:length-BinlogCRC32ChecksumLen] return mariadbBinlogEvent{binlogEvent: binlogEvent(data)}, checksum, nil } } diff --git a/go/mysql/binlog_event_mariadb_test.go b/go/mysql/binlog_event_mariadb_test.go index 1464da0e573..c4eeac39c38 100644 --- a/go/mysql/binlog_event_mariadb_test.go +++ b/go/mysql/binlog_event_mariadb_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" ) // sample event data @@ -99,7 +101,7 @@ func TestMariadbStandaloneBinlogEventGTID(t *testing.T) { } input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)} - want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 9} + want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 9} got, hasBegin, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.False(t, hasBegin, "unexpected hasBegin") @@ -115,7 +117,7 @@ func TestMariadbBinlogEventGTID(t *testing.T) { } input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)} - want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 10} + want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 10} got, hasBegin, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.True(t, hasBegin, "unexpected !hasBegin") diff --git a/go/mysql/binlog_event_mysql56.go b/go/mysql/binlog_event_mysql56.go index b4c4e3c0bca..3f931310ba9 100644 --- a/go/mysql/binlog_event_mysql56.go +++ b/go/mysql/binlog_event_mysql56.go @@ -19,6 +19,7 @@ package mysql import ( "encoding/binary" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -59,22 +60,22 @@ func (ev mysql56BinlogEvent) IsGTID() bool { // 1 flags // 16 SID (server UUID) // 8 GNO (sequence number, signed int) -func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) { +func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { data := ev.Bytes()[f.HeaderLength:] - var sid SID + var sid replication.SID copy(sid[:], data[1:1+16]) gno := int64(binary.LittleEndian.Uint64(data[1+16 : 1+16+8])) - return Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil + return replication.Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). -func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) { +func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) { data := ev.Bytes()[f.HeaderLength:] - set, err := NewMysql56GTIDSetFromSIDBlock(data) + set, err := replication.NewMysql56GTIDSetFromSIDBlock(data) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: set, }, nil } @@ -89,8 +90,8 @@ func (ev mysql56BinlogEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, // Checksum is the last 4 bytes of the event buffer. data := ev.Bytes() length := len(data) - checksum := data[length-4:] - data = data[:length-4] + checksum := data[length-BinlogCRC32ChecksumLen:] + data = data[:length-BinlogCRC32ChecksumLen] return mysql56BinlogEvent{binlogEvent: binlogEvent(data)}, checksum, nil default: // MySQL 5.6 does not guarantee that future checksum algorithms will be diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go index d1cb16499e7..e5fa3545278 100644 --- a/go/mysql/binlog_event_mysql56_test.go +++ b/go/mysql/binlog_event_mysql56_test.go @@ -17,17 +17,22 @@ limitations under the License. package mysql import ( + "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" ) // Sample event data for MySQL 5.6. var ( - mysql56FormatEvent = NewMysql56BinlogEvent([]byte{0x78, 0x4e, 0x49, 0x55, 0xf, 0x64, 0x0, 0x0, 0x0, 0x74, 0x0, 0x0, 0x0, 0x78, 0x0, 0x0, 0x0, 0x1, 0x0, 0x4, 0x0, 0x35, 0x2e, 0x36, 0x2e, 0x32, 0x34, 0x2d, 0x6c, 0x6f, 0x67, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x78, 0x4e, 0x49, 0x55, 0x13, 0x38, 0xd, 0x0, 0x8, 0x0, 0x12, 0x0, 0x4, 0x4, 0x4, 0x4, 0x12, 0x0, 0x0, 0x5c, 0x0, 0x4, 0x1a, 0x8, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x2, 0x0, 0x0, 0x0, 0xa, 0xa, 0xa, 0x19, 0x19, 0x0, 0x1, 0x18, 0x4a, 0xf, 0xca}) - mysql56GTIDEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x21, 0x64, 0x0, 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0xf5, 0x2, 0x0, 0x0, 0x0, 0x0, 0x1, 0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x48, 0x45, 0x82, 0x27}) + mysql56FormatEvent = NewMysql56BinlogEvent([]byte{0x78, 0x4e, 0x49, 0x55, 0xf, 0x64, 0x0, 0x0, 0x0, 0x74, 0x0, 0x0, 0x0, 0x78, 0x0, 0x0, 0x0, 0x1, 0x0, 0x4, 0x0, 0x35, 0x2e, 0x36, 0x2e, 0x32, 0x34, 0x2d, 0x6c, 0x6f, 0x67, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x78, 0x4e, 0x49, 0x55, 0x13, 0x38, 0xd, 0x0, 0x8, 0x0, 0x12, 0x0, 0x4, 0x4, 0x4, 0x4, 0x12, 0x0, 0x0, 0x5c, 0x0, 0x4, 0x1a, 0x8, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x2, 0x0, 0x0, 0x0, 0xa, 0xa, 0xa, 0x19, 0x19, 0x0, 0x1, 0x18, 0x4a, 0xf, 0xca}) + mysql56GTIDEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x21, 0x64, 0x0, 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0xf5, 0x2, 0x0, 0x0, 0x0, 0x0, 0x1, 0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x48, 0x45, 0x82, 0x27}) + // This is the result of: begin; insert into customer values (1, "mlord@planetscale.com"), (2, "sup@planetscale.com"); commit; + mysql56TransactionPayloadEvent = NewMysql56BinlogEvent([]byte{0xc7, 0xe1, 0x4b, 0x64, 0x28, 0x5b, 0xd2, 0xc7, 0x19, 0xdb, 0x00, 0x00, 0x00, 0x3a, 0x50, 0x00, 0x00, 0x00, 0x00, 0x02, 0x01, 0x00, 0x03, 0x03, 0xfc, 0xfe, 0x00, 0x01, 0x01, 0xb8, 0x00, 0x28, 0xb5, 0x2f, 0xfd, 0x00, 0x58, 0x64, 0x05, 0x00, 0xf2, 0x49, 0x23, 0x2a, 0xa0, 0x27, 0x69, 0x0c, 0xff, 0xe8, 0x06, 0xeb, 0xfe, 0xc3, 0xab, 0x8a, 0x7b, 0xc0, 0x36, 0x42, 0x5c, 0x6f, 0x1b, 0x2f, 0xfb, 0x6e, 0xc4, 0x9a, 0xe6, 0x6e, 0x6b, 0xda, 0x08, 0xf1, 0x37, 0x7e, 0xff, 0xb8, 0x6c, 0xbc, 0x27, 0x3c, 0xb7, 0x4f, 0xee, 0x14, 0xff, 0xaf, 0x09, 0x06, 0x69, 0xe3, 0x12, 0x68, 0x4a, 0x6e, 0xc3, 0xe1, 0x28, 0xaf, 0x3f, 0xc8, 0x14, 0x1c, 0xc3, 0x60, 0xce, 0xe3, 0x1e, 0x18, 0x4c, 0x63, 0xa1, 0x35, 0x90, 0x79, 0x04, 0xe8, 0xa9, 0xeb, 0x4a, 0x1b, 0xd7, 0x41, 0x53, 0x72, 0x17, 0xa4, 0x23, 0xa4, 0x47, 0x68, 0x00, 0xa2, 0x37, 0xee, 0xc1, 0xc7, 0x71, 0x30, 0x24, 0x19, 0xfd, 0x78, 0x49, 0x1b, 0x97, 0xd2, 0x94, 0xdc, 0x85, 0xa2, 0x21, 0xc1, 0xb0, 0x63, 0x8d, 0x7b, 0x0f, 0x32, 0x87, 0x07, 0xe2, 0x39, 0xf0, 0x7c, 0x3e, 0x01, 0xfe, 0x13, 0x8f, 0x11, 0xd0, 0x05, 0x9f, 0xbc, 0x18, 0x59, 0x91, 0x36, 0x2e, 0x6d, 0x4a, 0x6e, 0x0b, 0x00, 0x5e, 0x28, 0x10, 0xc0, 0x02, 0x50, 0x77, 0xe0, 0x64, 0x30, 0x02, 0x9e, 0x09, 0x54, 0xec, 0x80, 0x6d, 0x07, 0xa4, 0xc1, 0x7d, 0x60, 0xe4, 0x01, 0x78, 0x01, 0x01, 0x00, 0x00}) mysql56QueryEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) mysql56SemiSyncNoAckQueryEvent = NewMysql56BinlogEvent([]byte{0xef, 0x00, 0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) mysql56SemiSyncAckQueryEvent = NewMysql56BinlogEvent([]byte{0xef, 0x01, 0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) @@ -76,37 +81,71 @@ func TestMysql56GTID(t *testing.T) { require.NoError(t, err, "StripChecksum() error: %v", err) require.True(t, input.IsGTID(), "IsGTID() = false, want true") - want, _ := parseMysql56GTID("439192bd-f37c-11e4-bbeb-0242ac11035a:4") + want := replication.Mysql56GTID{ + Server: replication.SID{0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a}, + Sequence: 4, + } got, hasBegin, err := input.GTID(format) require.NoError(t, err, "GTID() error: %v", err) assert.False(t, hasBegin, "GTID() returned hasBegin") assert.Equal(t, want, got, "GTID() = %#v, want %#v", got, want) - } -func TestMysql56ParseGTID(t *testing.T) { - input := "00010203-0405-0607-0809-0A0B0C0D0E0F:56789" - want := Mysql56GTID{ - Server: SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - Sequence: 56789, +func TestMysql56DecodeTransactionPayload(t *testing.T) { + format := NewMySQL56BinlogFormat() + tableMap := &TableMap{} + require.True(t, mysql56TransactionPayloadEvent.IsTransactionPayload()) + + // The generated event is the result of the following SQL being executed in vtgate + // against the commerce keyspace: + // begin; insert into customer values (1, "mlord@planetscale.com"), (2, "sup@planetscale.com"); commit; + // All of these below internal events are encoded in the compressed transaction + // payload event. + want := []string{ + "BEGIN", // Query event + "vt_commerce.customer", // TableMap event + "[1 mlord@planetscale.com]", // WriteRows event + "[2 sup@planetscale.com]", // WriteRows event + "COMMIT", // XID event } - - got, err := parseMysql56GTID(input) - require.NoError(t, err, "unexpected error: %v", err) - assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want) - + internalEvents, err := mysql56TransactionPayloadEvent.TransactionPayload(format) + require.NoError(t, err) + eventStrs := []string{} + for _, ev := range internalEvents { + switch { + case ev.IsTableMap(): + tableMap, err = ev.TableMap(format) + require.NoError(t, err) + eventStrs = append(eventStrs, fmt.Sprintf("%s.%s", tableMap.Database, tableMap.Name)) + case ev.IsQuery(): + query, err := ev.Query(format) + require.NoError(t, err) + eventStrs = append(eventStrs, query.SQL) + case ev.IsWriteRows(): + rows, err := ev.Rows(format, tableMap) + require.NoError(t, err) + for i := range rows.Rows { + rowStr, err := rows.StringValuesForTests(tableMap, i) + require.NoError(t, err) + eventStrs = append(eventStrs, fmt.Sprintf("%v", rowStr)) + } + case ev.IsXID(): + eventStrs = append(eventStrs, "COMMIT") + } + } + require.Equal(t, want, eventStrs) } func TestMysql56ParsePosition(t *testing.T) { input := "00010203-0405-0607-0809-0a0b0c0d0e0f:1-2" - sid := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - var set GTIDSet = Mysql56GTIDSet{} - set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 1}) - set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 2}) - want := Position{GTIDSet: set} + sid := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + var set replication.GTIDSet = replication.Mysql56GTIDSet{} + set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 1}) + set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 2}) + want := replication.Position{GTIDSet: set} - got, err := ParsePosition(Mysql56FlavorID, input) + got, err := replication.ParsePosition(replication.Mysql56FlavorID, input) assert.NoError(t, err, "unexpected error: %v", err) assert.True(t, got.Equal(want), "(&mysql56{}).ParsePosition(%#v) = %#v, want %#v", input, got, want) diff --git a/go/mysql/binlog_event_rbr.go b/go/mysql/binlog_event_rbr.go index 4c38f317a10..58777d4cfba 100644 --- a/go/mysql/binlog_event_rbr.go +++ b/go/mysql/binlog_event_rbr.go @@ -17,24 +17,15 @@ limitations under the License. package mysql import ( - "bytes" "encoding/binary" - "fmt" - "math" - "strconv" - "strings" - "time" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" ) -// ZeroTimestamp is the special value 0 for a timestamp. -var ZeroTimestamp = []byte("0000-00-00 00:00:00") - // TableMap implements BinlogEvent.TableMap(). // // Expected format (L = total length of event data): @@ -109,19 +100,19 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { // metadataLength returns how many bytes are used for metadata, based on a type. func metadataLength(typ byte) int { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: + case binlog.TypeDecimal, binlog.TypeTiny, binlog.TypeShort, binlog.TypeLong, binlog.TypeNull, binlog.TypeTimestamp, binlog.TypeLongLong, binlog.TypeInt24, binlog.TypeDate, binlog.TypeTime, binlog.TypeDateTime, binlog.TypeYear, binlog.TypeNewDate: // No data here. return 0 - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + case binlog.TypeFloat, binlog.TypeDouble, binlog.TypeTimestamp2, binlog.TypeDateTime2, binlog.TypeTime2, binlog.TypeJSON, binlog.TypeTinyBlob, binlog.TypeMediumBlob, binlog.TypeLongBlob, binlog.TypeBlob, binlog.TypeGeometry: // One byte. return 1 - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + case binlog.TypeNewDecimal, binlog.TypeEnum, binlog.TypeSet, binlog.TypeString: // Two bytes, Big Endian because of crazy encoding. return 2 - case TypeVarchar, TypeBit, TypeVarString: + case binlog.TypeVarchar, binlog.TypeBit, binlog.TypeVarString: // Two bytes, Little Endian return 2 @@ -145,19 +136,19 @@ func metadataTotalLength(types []byte) int { func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: + case binlog.TypeDecimal, binlog.TypeTiny, binlog.TypeShort, binlog.TypeLong, binlog.TypeNull, binlog.TypeTimestamp, binlog.TypeLongLong, binlog.TypeInt24, binlog.TypeDate, binlog.TypeTime, binlog.TypeDateTime, binlog.TypeYear, binlog.TypeNewDate: // No data here. return 0, pos, nil - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + case binlog.TypeFloat, binlog.TypeDouble, binlog.TypeTimestamp2, binlog.TypeDateTime2, binlog.TypeTime2, binlog.TypeJSON, binlog.TypeTinyBlob, binlog.TypeMediumBlob, binlog.TypeLongBlob, binlog.TypeBlob, binlog.TypeGeometry: // One byte. return uint16(data[pos]), pos + 1, nil - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + case binlog.TypeNewDecimal, binlog.TypeEnum, binlog.TypeSet, binlog.TypeString: // Two bytes, Big Endian because of crazy encoding. return uint16(data[pos])<<8 + uint16(data[pos+1]), pos + 2, nil - case TypeVarchar, TypeBit, TypeVarString: + case binlog.TypeVarchar, binlog.TypeBit, binlog.TypeVarString: // Two bytes, Little Endian return uint16(data[pos]) + uint16(data[pos+1])<<8, pos + 2, nil @@ -171,22 +162,22 @@ func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { func metadataWrite(data []byte, pos int, typ byte, value uint16) int { switch typ { - case TypeDecimal, TypeTiny, TypeShort, TypeLong, TypeNull, TypeTimestamp, TypeLongLong, TypeInt24, TypeDate, TypeTime, TypeDateTime, TypeYear, TypeNewDate: + case binlog.TypeDecimal, binlog.TypeTiny, binlog.TypeShort, binlog.TypeLong, binlog.TypeNull, binlog.TypeTimestamp, binlog.TypeLongLong, binlog.TypeInt24, binlog.TypeDate, binlog.TypeTime, binlog.TypeDateTime, binlog.TypeYear, binlog.TypeNewDate: // No data here. return pos - case TypeFloat, TypeDouble, TypeTimestamp2, TypeDateTime2, TypeTime2, TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: + case binlog.TypeFloat, binlog.TypeDouble, binlog.TypeTimestamp2, binlog.TypeDateTime2, binlog.TypeTime2, binlog.TypeJSON, binlog.TypeTinyBlob, binlog.TypeMediumBlob, binlog.TypeLongBlob, binlog.TypeBlob, binlog.TypeGeometry: // One byte. data[pos] = byte(value) return pos + 1 - case TypeNewDecimal, TypeEnum, TypeSet, TypeString: + case binlog.TypeNewDecimal, binlog.TypeEnum, binlog.TypeSet, binlog.TypeString: // Two bytes, Big Endian because of crazy encoding. data[pos] = byte(value >> 8) data[pos+1] = byte(value) return pos + 2 - case TypeVarchar, TypeBit, TypeVarString: + case binlog.TypeVarchar, binlog.TypeBit, binlog.TypeVarString: // Two bytes, Little Endian data[pos] = byte(value) data[pos+1] = byte(value >> 8) @@ -198,765 +189,6 @@ func metadataWrite(data []byte, pos int, typ byte, value uint16) int { } } -var dig2bytes = []int{0, 1, 1, 2, 2, 3, 3, 4, 4, 4} - -// cellLength returns the new position after the field with the given -// type is read. -func cellLength(data []byte, pos int, typ byte, metadata uint16) (int, error) { - switch typ { - case TypeNull: - return 0, nil - case TypeTiny, TypeYear: - return 1, nil - case TypeShort: - return 2, nil - case TypeInt24: - return 3, nil - case TypeLong, TypeFloat, TypeTimestamp: - return 4, nil - case TypeLongLong, TypeDouble: - return 8, nil - case TypeDate, TypeTime, TypeNewDate: - return 3, nil - case TypeDateTime: - return 8, nil - case TypeVarchar, TypeVarString: - // Length is encoded in 1 or 2 bytes. - if metadata > 255 { - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return l + 2, nil - } - l := int(data[pos]) - return l + 1, nil - case TypeBit: - // bitmap length is in metadata, as: - // upper 8 bits: bytes length - // lower 8 bits: bit length - nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) - return (int(nbits) + 7) / 8, nil - case TypeTimestamp2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 4 + (int(metadata)+1)/2, nil - case TypeDateTime2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 5 + (int(metadata)+1)/2, nil - case TypeTime2: - // metadata has number of decimals. One byte encodes - // two decimals. - return 3 + (int(metadata)+1)/2, nil - case TypeNewDecimal: - precision := int(metadata >> 8) - scale := int(metadata & 0xff) - // Example: - // NNNNNNNNNNNN.MMMMMM - // 12 bytes 6 bytes - // precision is 18 - // scale is 6 - // storage is done by groups of 9 digits: - // - 32 bits are used to store groups of 9 digits. - // - any leftover digit is stored in: - // - 1 byte for 1 and 2 digits - // - 2 bytes for 3 and 4 digits - // - 3 bytes for 5 and 6 digits - // - 4 bytes for 7 and 8 digits (would also work for 9) - // both sides of the dot are stored separately. - // In this example, we'd have: - // - 2 bytes to store the first 3 full digits. - // - 4 bytes to store the next 9 full digits. - // - 3 bytes to store the 6 fractional digits. - intg := precision - scale - intg0 := intg / 9 - frac0 := scale / 9 - intg0x := intg - intg0*9 - frac0x := scale - frac0*9 - return intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x], nil - case TypeEnum, TypeSet: - return int(metadata & 0xff), nil - case TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob, TypeGeometry: - // Of the Blobs, only TypeBlob is used in binary logs, - // but supports others just in case. - switch metadata { - case 1: - return 1 + int(uint32(data[pos])), nil - case 2: - return 2 + int(uint32(data[pos])| - uint32(data[pos+1])<<8), nil - case 3: - return 3 + int(uint32(data[pos])| - uint32(data[pos+1])<<8| - uint32(data[pos+2])<<16), nil - case 4: - return 4 + int(uint32(data[pos])| - uint32(data[pos+1])<<8| - uint32(data[pos+2])<<16| - uint32(data[pos+3])<<24), nil - default: - return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob/geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) - } - case TypeString: - // This may do String, Enum, and Set. The type is in - // metadata. If it's a string, then there will be more bits. - // This will give us the maximum length of the field. - t := metadata >> 8 - if t == TypeEnum || t == TypeSet { - return int(metadata & 0xff), nil - } - max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) - // Length is encoded in 1 or 2 bytes. - if max > 255 { - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return l + 2, nil - } - l := int(data[pos]) - return l + 1, nil - - default: - return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v (data: %v pos: %v)", typ, data, pos) - } -} - -// printTimestamp is a helper method to append a timestamp into a bytes.Buffer, -// and return the Buffer. -func printTimestamp(v uint32) *bytes.Buffer { - if v == 0 { - return bytes.NewBuffer(ZeroTimestamp) - } - - t := time.Unix(int64(v), 0).UTC() - year, month, day := t.Date() - hour, minute, second := t.Clock() - - result := &bytes.Buffer{} - fmt.Fprintf(result, "%04d-%02d-%02d %02d:%02d:%02d", year, int(month), day, hour, minute, second) - return result -} - -// CellValue returns the data for a cell as a sqltypes.Value, and how -// many bytes it takes. It uses source type in querypb.Type and vitess type -// byte to determine general shared aspects of types and the querypb.Field to -// determine other info specifically about its underlying column (SQL column -// type, column length, charset, etc) -func CellValue(data []byte, pos int, typ byte, metadata uint16, field *querypb.Field) (sqltypes.Value, int, error) { - switch typ { - case TypeTiny: - if sqltypes.IsSigned(field.Type) { - return sqltypes.MakeTrusted(querypb.Type_INT8, - strconv.AppendInt(nil, int64(int8(data[pos])), 10)), 1, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT8, - strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil - case TypeYear: - val := data[pos] - if val == 0 { - return sqltypes.MakeTrusted(querypb.Type_YEAR, - []byte{'0', '0', '0', '0'}), 1, nil - } - return sqltypes.MakeTrusted(querypb.Type_YEAR, - strconv.AppendUint(nil, uint64(data[pos])+1900, 10)), 1, nil - case TypeShort: - val := binary.LittleEndian.Uint16(data[pos : pos+2]) - if sqltypes.IsSigned(field.Type) { - return sqltypes.MakeTrusted(querypb.Type_INT16, - strconv.AppendInt(nil, int64(int16(val)), 10)), 2, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT16, - strconv.AppendUint(nil, uint64(val), 10)), 2, nil - case TypeInt24: - if sqltypes.IsSigned(field.Type) && data[pos+2]&128 > 0 { - // Negative number, have to extend the sign. - val := int32(uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 + - uint32(255)<<24) - return sqltypes.MakeTrusted(querypb.Type_INT24, - strconv.AppendInt(nil, int64(val), 10)), 3, nil - } - // Positive number. - val := uint64(data[pos]) + - uint64(data[pos+1])<<8 + - uint64(data[pos+2])<<16 - return sqltypes.MakeTrusted(querypb.Type_UINT24, - strconv.AppendUint(nil, val, 10)), 3, nil - case TypeLong: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - if sqltypes.IsSigned(field.Type) { - return sqltypes.MakeTrusted(querypb.Type_INT32, - strconv.AppendInt(nil, int64(int32(val)), 10)), 4, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT32, - strconv.AppendUint(nil, uint64(val), 10)), 4, nil - case TypeFloat: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - fval := math.Float32frombits(val) - return sqltypes.MakeTrusted(querypb.Type_FLOAT32, - strconv.AppendFloat(nil, float64(fval), 'E', -1, 32)), 4, nil - case TypeDouble: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - fval := math.Float64frombits(val) - return sqltypes.MakeTrusted(querypb.Type_FLOAT64, - strconv.AppendFloat(nil, fval, 'E', -1, 64)), 8, nil - case TypeTimestamp: - val := binary.LittleEndian.Uint32(data[pos : pos+4]) - txt := printTimestamp(val) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 4, nil - case TypeLongLong: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if sqltypes.IsSigned(field.Type) { - return sqltypes.MakeTrusted(querypb.Type_INT64, - strconv.AppendInt(nil, int64(val), 10)), 8, nil - } - return sqltypes.MakeTrusted(querypb.Type_UINT64, - strconv.AppendUint(nil, val, 10)), 8, nil - case TypeDate, TypeNewDate: - val := uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 - day := val & 31 - month := val >> 5 & 15 - year := val >> 9 - return sqltypes.MakeTrusted(querypb.Type_DATE, - []byte(fmt.Sprintf("%04d-%02d-%02d", year, month, day))), 3, nil - case TypeTime: - var hour, minute, second int32 - if data[pos+2]&128 > 0 { - // Negative number, have to extend the sign. - val := int32(uint32(data[pos]) + - uint32(data[pos+1])<<8 + - uint32(data[pos+2])<<16 + - uint32(255)<<24) - hour = val / 10000 - minute = -((val % 10000) / 100) - second = -(val % 100) - } else { - val := int32(data[pos]) + - int32(data[pos+1])<<8 + - int32(data[pos+2])<<16 - hour = val / 10000 - minute = (val % 10000) / 100 - second = val % 100 - } - return sqltypes.MakeTrusted(querypb.Type_TIME, - []byte(fmt.Sprintf("%02d:%02d:%02d", hour, minute, second))), 3, nil - case TypeDateTime: - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - d := val / 1000000 - t := val % 1000000 - year := d / 10000 - month := (d % 10000) / 100 - day := d % 100 - hour := t / 10000 - minute := (t % 10000) / 100 - second := t % 100 - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - []byte(fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second))), 8, nil - case TypeVarchar, TypeVarString: - // We trust that typ is compatible with the field.Type - // Length is encoded in 1 or 2 bytes. - typeToUse := querypb.Type_VARCHAR - if field.Type == querypb.Type_VARBINARY || field.Type == querypb.Type_BINARY || field.Type == querypb.Type_BLOB { - typeToUse = field.Type - } - if metadata > 255 { - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return sqltypes.MakeTrusted(typeToUse, - data[pos+2:pos+2+l]), l + 2, nil - } - l := int(data[pos]) - return sqltypes.MakeTrusted(typeToUse, - data[pos+1:pos+1+l]), l + 1, nil - case TypeBit: - // The contents is just the bytes, quoted. - nbits := ((metadata >> 8) * 8) + (metadata & 0xFF) - l := (int(nbits) + 7) / 8 - return sqltypes.MakeTrusted(querypb.Type_BIT, - data[pos:pos+l]), l, nil - case TypeTimestamp2: - second := binary.BigEndian.Uint32(data[pos : pos+4]) - txt := printTimestamp(second) - switch metadata { - case 1: - decimals := int(data[pos+4]) - fmt.Fprintf(txt, ".%01d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 5, nil - case 2: - decimals := int(data[pos+4]) - fmt.Fprintf(txt, ".%02d", decimals) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 5, nil - case 3: - decimals := int(data[pos+4])<<8 + - int(data[pos+5]) - fmt.Fprintf(txt, ".%03d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 6, nil - case 4: - decimals := int(data[pos+4])<<8 + - int(data[pos+5]) - fmt.Fprintf(txt, ".%04d", decimals) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 6, nil - case 5: - decimals := int(data[pos+4])<<16 + - int(data[pos+5])<<8 + - int(data[pos+6]) - fmt.Fprintf(txt, ".%05d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 7, nil - case 6: - decimals := int(data[pos+4])<<16 + - int(data[pos+5])<<8 + - int(data[pos+6]) - fmt.Fprintf(txt, ".%06d", decimals) - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 7, nil - } - return sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, - txt.Bytes()), 4, nil - case TypeDateTime2: - ymdhms := (uint64(data[pos])<<32 | - uint64(data[pos+1])<<24 | - uint64(data[pos+2])<<16 | - uint64(data[pos+3])<<8 | - uint64(data[pos+4])) - uint64(0x8000000000) - ymd := ymdhms >> 17 - ym := ymd >> 5 - hms := ymdhms % (1 << 17) - - day := ymd % (1 << 5) - month := ym % 13 - year := ym / 13 - - second := hms % (1 << 6) - minute := (hms >> 6) % (1 << 6) - hour := hms >> 12 - - txt := &bytes.Buffer{} - fmt.Fprintf(txt, "%04d-%02d-%02d %02d:%02d:%02d", year, month, day, hour, minute, second) - - switch metadata { - case 1: - decimals := int(data[pos+5]) - fmt.Fprintf(txt, ".%01d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 6, nil - case 2: - decimals := int(data[pos+5]) - fmt.Fprintf(txt, ".%02d", decimals) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 6, nil - case 3: - decimals := int(data[pos+5])<<8 + - int(data[pos+6]) - fmt.Fprintf(txt, ".%03d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 7, nil - case 4: - decimals := int(data[pos+5])<<8 + - int(data[pos+6]) - fmt.Fprintf(txt, ".%04d", decimals) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 7, nil - case 5: - decimals := int(data[pos+5])<<16 + - int(data[pos+6])<<8 + - int(data[pos+7]) - fmt.Fprintf(txt, ".%05d", decimals/10) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 8, nil - case 6: - decimals := int(data[pos+5])<<16 + - int(data[pos+6])<<8 + - int(data[pos+7]) - fmt.Fprintf(txt, ".%06d", decimals) - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 8, nil - } - return sqltypes.MakeTrusted(querypb.Type_DATETIME, - txt.Bytes()), 5, nil - case TypeTime2: - hms := (int64(data[pos])<<16 | - int64(data[pos+1])<<8 | - int64(data[pos+2])) - 0x800000 - sign := "" - if hms < 0 { - hms = -hms - sign = "-" - } - - fracStr := "" - switch metadata { - case 1: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.1d", frac/10) - case 2: - frac := int(data[pos+3]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x100 - frac - } - fracStr = fmt.Sprintf(".%.2d", frac) - case 3: - frac := int(data[pos+3])<<8 | - int(data[pos+4]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.3d", frac/10) - case 4: - frac := int(data[pos+3])<<8 | - int(data[pos+4]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x10000 - frac - } - fracStr = fmt.Sprintf(".%.4d", frac) - case 5: - frac := int(data[pos+3])<<16 | - int(data[pos+4])<<8 | - int(data[pos+5]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.5d", frac/10) - case 6: - frac := int(data[pos+3])<<16 | - int(data[pos+4])<<8 | - int(data[pos+5]) - if sign == "-" && frac != 0 { - hms-- - frac = 0x1000000 - frac - } - fracStr = fmt.Sprintf(".%.6d", frac) - } - - hour := (hms >> 12) % (1 << 10) - minute := (hms >> 6) % (1 << 6) - second := hms % (1 << 6) - return sqltypes.MakeTrusted(querypb.Type_TIME, - []byte(fmt.Sprintf("%v%02d:%02d:%02d%v", sign, hour, minute, second, fracStr))), 3 + (int(metadata)+1)/2, nil - - case TypeNewDecimal: - precision := int(metadata >> 8) // total digits number - scale := int(metadata & 0xff) // number of fractional digits - intg := precision - scale // number of full digits - intg0 := intg / 9 // number of 32-bits digits - intg0x := intg - intg0*9 // leftover full digits - frac0 := scale / 9 // number of 32 bits fractionals - frac0x := scale - frac0*9 // leftover fractionals - - l := intg0*4 + dig2bytes[intg0x] + frac0*4 + dig2bytes[frac0x] - - // Copy the data so we can change it. Otherwise - // decoding is just too hard. - d := make([]byte, l) - copy(d, data[pos:pos+l]) - - txt := &bytes.Buffer{} - - isNegative := (d[0] & 0x80) == 0 - d[0] ^= 0x80 // First bit is inverted. - if isNegative { - // Negative numbers are just inverted bytes. - txt.WriteByte('-') - for i := range d { - d[i] ^= 0xff - } - } - - // first we have the leftover full digits - var val uint32 - switch dig2bytes[intg0x] { - case 0: - // nothing to do - case 1: - // one byte, up to two digits - val = uint32(d[0]) - case 2: - // two bytes, up to 4 digits - val = uint32(d[0])<<8 + - uint32(d[1]) - case 3: - // 3 bytes, up to 6 digits - val = uint32(d[0])<<16 + - uint32(d[1])<<8 + - uint32(d[2]) - case 4: - // 4 bytes, up to 8 digits (9 digits would be a full) - val = uint32(d[0])<<24 + - uint32(d[1])<<16 + - uint32(d[2])<<8 + - uint32(d[3]) - } - pos = dig2bytes[intg0x] - if val > 0 { - txt.Write(strconv.AppendUint(nil, uint64(val), 10)) - } - - // now the full digits, 32 bits each, 9 digits - for i := 0; i < intg0; i++ { - val = binary.BigEndian.Uint32(d[pos : pos+4]) - fmt.Fprintf(txt, "%09d", val) - pos += 4 - } - - // now see if we have a fraction - if scale == 0 { - // When the field is a DECIMAL using a scale of 0, e.g. - // DECIMAL(5,0), a binlogged value of 0 is almost treated - // like the NULL byte and we get a 0 byte length value. - // In this case let's return the correct value of 0. - if txt.Len() == 0 { - txt.WriteRune('0') - } - - return sqltypes.MakeTrusted(querypb.Type_DECIMAL, - txt.Bytes()), l, nil - } - txt.WriteByte('.') - - // now the full fractional digits - for i := 0; i < frac0; i++ { - val = binary.BigEndian.Uint32(d[pos : pos+4]) - fmt.Fprintf(txt, "%09d", val) - pos += 4 - } - - // then the partial fractional digits - switch dig2bytes[frac0x] { - case 0: - // Nothing to do - return sqltypes.MakeTrusted(querypb.Type_DECIMAL, - txt.Bytes()), l, nil - case 1: - // one byte, 1 or 2 digits - val = uint32(d[pos]) - if frac0x == 1 { - fmt.Fprintf(txt, "%1d", val) - } else { - fmt.Fprintf(txt, "%02d", val) - } - case 2: - // two bytes, 3 or 4 digits - val = uint32(d[pos])<<8 + - uint32(d[pos+1]) - if frac0x == 3 { - fmt.Fprintf(txt, "%03d", val) - } else { - fmt.Fprintf(txt, "%04d", val) - } - case 3: - // 3 bytes, 5 or 6 digits - val = uint32(d[pos])<<16 + - uint32(d[pos+1])<<8 + - uint32(d[pos+2]) - if frac0x == 5 { - fmt.Fprintf(txt, "%05d", val) - } else { - fmt.Fprintf(txt, "%06d", val) - } - case 4: - // 4 bytes, 7 or 8 digits (9 digits would be a full) - val = uint32(d[pos])<<24 + - uint32(d[pos+1])<<16 + - uint32(d[pos+2])<<8 + - uint32(d[pos+3]) - if frac0x == 7 { - fmt.Fprintf(txt, "%07d", val) - } else { - fmt.Fprintf(txt, "%08d", val) - } - } - - // remove preceding 0s from the integral part, otherwise we get "000000000001.23" instead of "1.23" - trimPrecedingZeroes := func(b []byte) []byte { - s := string(b) - isNegative := false - if s[0] == '-' { - isNegative = true - s = s[1:] - } - s = strings.TrimLeft(s, "0") - if isNegative { - s = fmt.Sprintf("-%s", s) - } - return []byte(s) - } - return sqltypes.MakeTrusted(querypb.Type_DECIMAL, trimPrecedingZeroes(txt.Bytes())), l, nil - - case TypeEnum: - switch metadata & 0xff { - case 1: - // One byte storage. - return sqltypes.MakeTrusted(querypb.Type_ENUM, - strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil - case 2: - // Two bytes storage. - val := binary.LittleEndian.Uint16(data[pos : pos+2]) - return sqltypes.MakeTrusted(querypb.Type_ENUM, - strconv.AppendUint(nil, uint64(val), 10)), 2, nil - default: - return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff) - } - - case TypeSet: - l := int(metadata & 0xff) - return sqltypes.MakeTrusted(querypb.Type_SET, - data[pos:pos+l]), l, nil - - case TypeJSON, TypeTinyBlob, TypeMediumBlob, TypeLongBlob, TypeBlob: - // Only TypeBlob is used in binary logs, - // but supports others just in case. - l := 0 - switch metadata { - case 1: - l = int(uint32(data[pos])) - case 2: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8) - case 3: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8 | - uint32(data[pos+2])<<16) - case 4: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8 | - uint32(data[pos+2])<<16 | - uint32(data[pos+3])<<24) - default: - return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported blob metadata value %v (data: %v pos: %v)", metadata, data, pos) - } - pos += int(metadata) - - var limitArray = func(data []byte, limit int) []byte { - if len(data) > limit { - return data[:limit] - } - return data - } - // For JSON, we parse the data, and emit SQL. - if typ == TypeJSON { - var err error - jsonData := data[pos : pos+l] - s, err := getJSONValue(jsonData) - if err != nil { - return sqltypes.NULL, 0, vterrors.Wrapf(err, "error stringifying JSON data %v", limitArray(jsonData, 100)) - } - d := []byte(s) - return sqltypes.MakeTrusted(sqltypes.Expression, - d), l + int(metadata), nil - } - - return sqltypes.MakeTrusted(querypb.Type_VARBINARY, - data[pos:pos+l]), l + int(metadata), nil - - case TypeString: - // This may do String, Enum, and Set. The type is in - // metadata. If it's a string, then there will be more bits. - t := metadata >> 8 - if t == TypeEnum { - // We don't know the string values. So just use the - // numbers. - switch metadata & 0xff { - case 1: - // One byte storage. - return sqltypes.MakeTrusted(querypb.Type_UINT8, - strconv.AppendUint(nil, uint64(data[pos]), 10)), 1, nil - case 2: - // Two bytes storage. - val := binary.LittleEndian.Uint16(data[pos : pos+2]) - return sqltypes.MakeTrusted(querypb.Type_UINT16, - strconv.AppendUint(nil, uint64(val), 10)), 2, nil - default: - return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected enum size: %v", metadata&0xff) - } - } - if t == TypeSet { - // We don't know the set values. So just use the - // numbers. - l := int(metadata & 0xff) - var val uint64 - for i := 0; i < l; i++ { - val += uint64(data[pos+i]) << (uint(i) * 8) - } - return sqltypes.MakeTrusted(querypb.Type_UINT64, - strconv.AppendUint(nil, uint64(val), 10)), l, nil - } - // This is a real string. The length is weird. - max := int((((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0xff)) - // Length is encoded in 1 or 2 bytes. - if max > 255 { - // This code path exists due to https://bugs.mysql.com/bug.php?id=37426. - // CHAR types need to allocate 3 bytes per char. So, the length for CHAR(255) - // cannot be represented in 1 byte. This also means that this rule does not - // apply to BINARY data. - l := int(uint64(data[pos]) | - uint64(data[pos+1])<<8) - return sqltypes.MakeTrusted(querypb.Type_VARCHAR, - data[pos+2:pos+2+l]), l + 2, nil - } - l := int(data[pos]) - mdata := data[pos+1 : pos+1+l] - if sqltypes.IsBinary(field.Type) { - // For binary(n) column types, mysql pads the data on the right with nulls. However the binlog event contains - // the data without this padding. This causes several issues: - // * if a binary(n) column is part of the sharding key, the keyspace_id() returned during the copy phase - // (where the value is the result of a mysql query) is different from the one during replication - // (where the value is the one from the binlogs) - // * mysql where clause comparisons do not do the right thing without padding - // So for fixed length BINARY columns we right-pad it with nulls if necessary to match what MySQL returns. - // Because CHAR columns with a binary collation (e.g. utf8mb4_bin) have the same metadata as a BINARY column - // in binlog events, we also need to check for this case based on the underlying column type. - if l < max && strings.HasPrefix(strings.ToLower(field.ColumnType), "binary") { - paddedData := make([]byte, max) - copy(paddedData[:l], mdata) - mdata = paddedData - } - return sqltypes.MakeTrusted(querypb.Type_BINARY, mdata), l + 1, nil - } - return sqltypes.MakeTrusted(querypb.Type_VARCHAR, mdata), l + 1, nil - - case TypeGeometry: - l := 0 - switch metadata { - case 1: - l = int(uint32(data[pos])) - case 2: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8) - case 3: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8 | - uint32(data[pos+2])<<16) - case 4: - l = int(uint32(data[pos]) | - uint32(data[pos+1])<<8 | - uint32(data[pos+2])<<16 | - uint32(data[pos+3])<<24) - default: - return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported geometry metadata value %v (data: %v pos: %v)", metadata, data, pos) - } - pos += int(metadata) - return sqltypes.MakeTrusted(querypb.Type_GEOMETRY, - data[pos:pos+l]), l + int(metadata), nil - - default: - return sqltypes.NULL, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "unsupported type %v", typ) - } -} - // Rows implements BinlogEvent.TableMap(). // // Expected format (L = total length of event data): @@ -1046,7 +278,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { } // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) + l, err := binlog.CellLength(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return result, err } @@ -1076,7 +308,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { } // This column is represented now. We need to skip its length. - l, err := cellLength(data, pos, tm.Types[c], tm.Metadata[c]) + l, err := binlog.CellLength(data, pos, tm.Types[c], tm.Metadata[c]) if err != nil { return result, err } @@ -1115,7 +347,7 @@ func (rs *Rows) StringValuesForTests(tm *TableMap, rowIndex int) ([]string, erro } // We have real data - value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) + value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) if err != nil { return nil, err } @@ -1150,7 +382,7 @@ func (rs *Rows) StringIdentifiesForTests(tm *TableMap, rowIndex int) ([]string, } // We have real data - value, l, err := CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) + value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) if err != nil { return nil, err } diff --git a/go/mysql/client.go b/go/mysql/client.go index 2780ad0dfd9..c4dd87d95cc 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -27,6 +27,7 @@ import ( "time" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" @@ -94,11 +95,11 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) { // should return a 2003. if netProto == "tcp" { status <- connectResult{ - err: NewSQLError(CRConnHostError, SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err), + err: sqlerror.NewSQLError(sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err), } } else { status <- connectResult{ - err: NewSQLError(CRConnectionError, SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err), + err: sqlerror.NewSQLError(sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err), } } return @@ -178,11 +179,11 @@ func (c *Conn) Ping() error { data[pos] = ComPing if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err) } data, err := c.readEphemeralPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() switch data[0] { @@ -207,7 +208,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Wait for the server initial handshake packet, and parse it. data, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, "", "initial packet read failed: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, "", "initial packet read failed: %v", err) } capabilities, salt, err := c.parseInitialHandshakePacket(data) if err != nil { @@ -218,7 +219,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Sanity check. if capabilities&CapabilityClientProtocol41 == 0 { - return NewSQLError(CRVersionError, SSUnknownSQLState, "cannot connect to servers earlier than 4.1") + return sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "cannot connect to servers earlier than 4.1") } // Remember a subset of the capabilities, so we can use them @@ -238,7 +239,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // If client asked for SSL, but server doesn't support it, // stop right here. if params.SslRequired() && capabilities&CapabilityClientSSL == 0 { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support SSL but client asked for it") + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support SSL but client asked for it") } // The ServerName to verify depends on what the hostname is. @@ -259,13 +260,13 @@ func (c *Conn) clientHandshake(params *ConnParams) error { tlsVersion, err := vttls.TLSVersionToNumber(params.TLSMinVersion) if err != nil { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error parsing minimal TLS version: %v", err) + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error parsing minimal TLS version: %v", err) } // Build the TLS config. clientConfig, err := vttls.ClientConfig(params.EffectiveSslMode(), params.SslCert, params.SslKey, params.SslCa, params.SslCrl, serverName, tlsVersion) if err != nil { - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error loading client cert and ca: %v", err) + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error loading client cert and ca: %v", err) } // Send the SSLRequest packet. @@ -296,7 +297,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { } else if params.Flags&CapabilityClientSessionTrack == CapabilityClientSessionTrack { // If client asked for ClientSessionTrack, but server doesn't support it, // stop right here. - return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it") + return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it") } // Build and send our handshake response 41. @@ -321,7 +322,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Wait for response, should be OK. response, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } switch response[0] { case OKPacket: @@ -331,7 +332,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { return ParseErrorPacket(response) default: // FIXME(alainjobart) handle extra auth cases and so on. - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response) } } @@ -346,7 +347,7 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Protocol version. pver, pos, ok := readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version") } // Server is allowed to immediately send ERR packet @@ -355,41 +356,41 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Normally there would be a 1-byte sql_state_marker field and a 5-byte // sql_state field here, but docs say these will not be present in this case. errorMsg, _, _ := readEOFString(data, pos) - return 0, nil, NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg) } if pver != protocolVersion { - return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "bad protocol version: %v", pver) + return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "bad protocol version: %v", pver) } // Read the server version. c.ServerVersion, pos, ok = readNullString(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version") } // Read the connection id. c.ConnectionID, pos, ok = readUint32(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id") } // Read the first part of the auth-plugin-data authPluginData, pos, ok := readBytes(data, pos, 8) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1") } // One byte filler, 0. We don't really care about the value. _, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler") } // Lower 2 bytes of the capability flags. capLower, pos, ok := readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)") } var capabilities = uint32(capLower) @@ -401,20 +402,20 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) // Character set. characterSet, pos, ok := readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set") } c.CharacterSet = collations.ID(characterSet) // Status flags. Ignored. _, pos, ok = readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags") } // Upper 2 bytes of the capability flags. capUpper, pos, ok := readUint16(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)") } capabilities += uint32(capUpper) << 16 @@ -424,13 +425,13 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) if capabilities&CapabilityClientPluginAuth != 0 { authPluginDataLength, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data") } } else { // One byte filler, 0. We don't really care about the value. _, pos, ok = readByte(data, pos) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler") } } @@ -447,12 +448,12 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error) var authPluginDataPart2 []byte authPluginDataPart2, pos, ok = readBytes(data, pos, l) if !ok { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2") } // The last byte has to be 0, and is not part of the data. if authPluginDataPart2[l-1] != 0 { - return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated") + return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated") } authPluginData = append(authPluginData, authPluginDataPart2[0:l-1]...) } @@ -509,7 +510,7 @@ func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params * // And send it as is. if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send SSLRequest: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send SSLRequest: %v", err) } return nil } @@ -606,11 +607,11 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ // Sanity-check the length. if pos != len(data) { - return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data)) + return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data)) } if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err) } return nil } @@ -620,7 +621,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [ func (c *Conn) handleAuthResponse(params *ConnParams) error { response, err := c.readPacket() if err != nil { - return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } switch response[0] { @@ -640,7 +641,7 @@ func (c *Conn) handleAuthResponse(params *ConnParams) error { case ErrPacket: return ParseErrorPacket(response) default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response cannot be parsed: %v", response) } return nil @@ -652,7 +653,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error var salt []byte c.authPluginName, salt, err = parseAuthSwitchRequest(response) if err != nil { - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse auth switch request: %v", err) } if salt != nil { c.salt = salt @@ -673,7 +674,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error return err } default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName) } // The response could be an OKPacket, AuthMoreDataPacket or ErrPacket @@ -715,7 +716,7 @@ func (c *Conn) handleAuthMoreDataPacket(data byte, params *ConnParams) error { // Next packet should either be an OKPacket or ErrPacket return c.handleAuthResponse(params) default: - return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data) + return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data) } } @@ -745,7 +746,7 @@ func (c *Conn) requestPublicKey() (rsaKey *rsa.PublicKey, err error) { response, err := c.readPacket() if err != nil { - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } // Server should respond with a AuthMoreDataPacket containing the public key diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go index f9db5cee523..c349cdcd531 100644 --- a/go/mysql/client_test.go +++ b/go/mysql/client_test.go @@ -32,16 +32,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/tlstest" "vitess.io/vitess/go/vt/vttls" ) // assertSQLError makes sure we get the right error. -func assertSQLError(t *testing.T, err error, code ErrorCode, sqlState, subtext, query, pattern string) { +func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState, subtext, query, pattern string) { t.Helper() require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext) - serr, ok := err.(*SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err) require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num) require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State) @@ -110,14 +112,14 @@ func TestConnectTimeout(t *testing.T) { }() ctx = context.Background() _, err = Connect(ctx, params) - assertSQLError(t, err, CRServerLost, SSUnknownSQLState, "initial packet read failed", "", "") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "initial packet read failed", "", "") // Now close the listener. Connect should fail right away, // check the error. listener.Close() wg.Wait() _, err = Connect(ctx, params) - assertSQLError(t, err, CRConnHostError, SSUnknownSQLState, "connection refused", "", "") + assertSQLError(t, err, sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "connection refused", "", "") // Tests a connection where Dial to a unix socket fails // properly returns the right error. To simulate exactly the @@ -131,7 +133,7 @@ func TestConnectTimeout(t *testing.T) { _, err = Connect(ctx, params) os.Remove(name) t.Log(err) - assertSQLError(t, err, CRConnectionError, SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:") + assertSQLError(t, err, sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:") } // TestTLSClientDisabled creates a Server with TLS support, then connects @@ -149,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -221,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -294,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -341,7 +343,7 @@ func TestTLSClientVerifyCA(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() @@ -424,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err) defer l.Close() diff --git a/go/mysql/collations/charset/convert.go b/go/mysql/collations/charset/convert.go index 8518964529c..bc51e9b8377 100644 --- a/go/mysql/collations/charset/convert.go +++ b/go/mysql/collations/charset/convert.go @@ -19,6 +19,8 @@ package charset import ( "fmt" "unicode/utf8" + + "vitess.io/vitess/go/hack" ) func failedConversionError(from, to Charset, input []byte) error { @@ -31,6 +33,7 @@ func convertFastFromUTF8(dst []byte, dstCharset Charset, src []byte) ([]byte, er if dst == nil { dst = make([]byte, len(src)*3) } else { + nDst = len(dst) dst = dst[:cap(dst)] } @@ -63,6 +66,7 @@ func convertSlow(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) if dst == nil { dst = make([]byte, len(src)*3) } else { + nDst = len(dst) dst = dst[:cap(dst)] } @@ -108,6 +112,9 @@ type Convertible interface { // a new byte slice will be allocated to store the result. func Convert(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) ([]byte, error) { if dstCharset.IsSuperset(srcCharset) { + if dst != nil { + return append(dst, src...), nil + } return src, nil } if trans, ok := dstCharset.(Convertible); ok { @@ -123,6 +130,79 @@ func Convert(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) ([] } } +func Expand(dst []rune, src []byte, srcCharset Charset) []rune { + switch srcCharset := srcCharset.(type) { + case Charset_utf8mb3, Charset_utf8mb4: + if dst == nil { + return []rune(string(src)) + } + dst = make([]rune, 0, len(src)) + for _, cp := range string(src) { + dst = append(dst, cp) + } + return dst + case Charset_binary: + if dst == nil { + dst = make([]rune, 0, len(src)) + } + for _, c := range src { + dst = append(dst, rune(c)) + } + return dst + default: + if dst == nil { + dst = make([]rune, 0, len(src)) + } + for len(src) > 0 { + cp, width := srcCharset.DecodeRune(src) + src = src[width:] + dst = append(dst, cp) + } + return dst + } +} + +func Collapse(dst []byte, src []rune, dstCharset Charset) []byte { + switch dstCharset := dstCharset.(type) { + case Charset_utf8mb3, Charset_utf8mb4: + if dst == nil { + return hack.StringBytes(string(src)) + } + return append(dst, hack.StringBytes(string(src))...) + case Charset_binary: + if dst == nil { + dst = make([]byte, 0, len(src)) + } + for _, b := range src { + dst = append(dst, byte(b)) + } + return dst + default: + nDst := 0 + if dst == nil { + dst = make([]byte, len(src)*dstCharset.MaxWidth()) + } else { + nDst = len(dst) + dst = dst[:cap(dst)] + } + for _, c := range src { + if len(dst)-nDst < 4 { + newDst := make([]byte, len(dst)*2) + copy(newDst, dst[:nDst]) + dst = newDst + } + w := dstCharset.EncodeRune(dst[nDst:], c) + if w < 0 { + if w = dstCharset.EncodeRune(dst[nDst:], '?'); w < 0 { + break + } + } + nDst += w + } + return dst[:nDst] + } +} + func ConvertFromUTF8(dst []byte, dstCharset Charset, src []byte) ([]byte, error) { return Convert(dst, dstCharset, src, Charset_utf8mb4{}) } diff --git a/go/mysql/collations/charset/helpers.go b/go/mysql/collations/charset/helpers.go index 6dee09e77bc..851ce4bebf9 100644 --- a/go/mysql/collations/charset/helpers.go +++ b/go/mysql/collations/charset/helpers.go @@ -21,14 +21,18 @@ func Slice(charset Charset, input []byte, from, to int) []byte { return charset.Slice(input, from, to) } iter := input + start := 0 for i := 0; i < to; i++ { r, size := charset.DecodeRune(iter) if r == RuneError && size < 2 { break } + if i < from { + start += size + } iter = iter[size:] } - return input[:len(input)-len(iter)] + return input[start : len(input)-len(iter)] } func Validate(charset Charset, input []byte) bool { diff --git a/go/mysql/collations/charset/korean/tables.go b/go/mysql/collations/charset/korean/tables.go index 0480e85c4aa..7f7ad3e4264 100644 --- a/go/mysql/collations/charset/korean/tables.go +++ b/go/mysql/collations/charset/korean/tables.go @@ -17056,8 +17056,6 @@ var decode = [...]uint16{ 17629: 0x8A70, } -const numEncodeTables = 7 - // encodeX are the encoding tables from Unicode to EUC-KR code, // sorted by decreasing length. // encode0: 20893 entries for runes in [19968, 40861). diff --git a/go/mysql/collations/charset/simplifiedchinese/tables.go b/go/mysql/collations/charset/simplifiedchinese/tables.go index 415f52a1116..645127580f6 100644 --- a/go/mysql/collations/charset/simplifiedchinese/tables.go +++ b/go/mysql/collations/charset/simplifiedchinese/tables.go @@ -22091,8 +22091,6 @@ var decode = [...]uint16{ 23844: 0x4DAE, } -const numEncodeTables = 5 - // encodeX are the encoding tables from Unicode to GBK code, // sorted by decreasing length. // encode0: 28965 entries for runes in [11905, 40870). diff --git a/go/mysql/collations/coercion.go b/go/mysql/collations/coercion.go index 8e72ebf3c37..8b66c818cc0 100644 --- a/go/mysql/collations/coercion.go +++ b/go/mysql/collations/coercion.go @@ -19,8 +19,6 @@ package collations import ( "fmt" "unsafe" - - "vitess.io/vitess/go/mysql/collations/charset" ) func init() { @@ -95,11 +93,6 @@ const ( RepertoireUnicode ) -// Coercion is a function that will transform either the given argument -// arguments of the function into a specific character set. The `dst` argument -// will be used as the destination of the coerced argument, but it can be nil. -type Coercion func(dst, in []byte) ([]byte, error) - // TypedCollation is the Collation of a SQL expression, including its coercibility // and repertoire. type TypedCollation struct { @@ -112,208 +105,13 @@ func (tc TypedCollation) Valid() bool { return tc.Collation != Unknown } -func checkCompatibleCollations( - left Collation, leftCoercibility Coercibility, leftRepertoire Repertoire, - right Collation, rightCoercibility Coercibility, rightRepertoire Repertoire, -) bool { - leftCS := left.Charset() - rightCS := right.Charset() - - switch leftCS.(type) { - case charset.Charset_utf8mb4: - if leftCoercibility <= rightCoercibility { - return true - } - - case charset.Charset_utf32: - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if !charset.IsUnicode(rightCS) { - return true - } - if !left.IsBinary() { - return true - } - } - - case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le: - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if !charset.IsUnicode(rightCS) { - return true - } - } - } - - if rightRepertoire == RepertoireASCII { - switch { - case leftCoercibility < rightCoercibility: - return true - case leftCoercibility == rightCoercibility: - if leftRepertoire == RepertoireUnicode { - return true - } - } - } - - return false -} - -// CoercionOptions is used to configure how aggressive the algorithm can be -// when merging two different collations by transcoding them. -type CoercionOptions struct { - // ConvertToSuperset allows merging two different collations as long - // as the charset of one of them is a strict superset of the other. In - // order to operate on the two expressions, one of them will need to - // be transcoded. This transcoding will always be safe because the string - // with the smallest repertoire will be transcoded to its superset, which - // cannot fail. - ConvertToSuperset bool - - // ConvertWithCoercion allows merging two different collations by forcing - // a coercion as long as the coercibility of the two sides is lax enough. - // This will force a transcoding of one of the expressions even if their - // respective charsets are not a strict superset, so the resulting transcoding - // CAN fail depending on the content of their strings. - ConvertWithCoercion bool -} - -// MergeCollations returns a Coercion function for a pair of TypedCollation based -// on their coercibility. -// -// The function takes the typed collations for the two sides of a text operation -// (namely, a comparison or concatenation of two textual expressions). These typed -// collations includes the actual collation for the expression on each size, their -// coercibility values (see: Coercibility) and their respective repertoires, -// and returns the target collation (i.e. the collation into which the two expressions -// must be coerced, and a Coercion function. The Coercion function can be called repeatedly -// with the different values for the two expressions and will transcode either -// the left-hand or right-hand value to the appropriate charset so it can be -// collated against the other value. -// -// If the collations for both sides of the expressions are the same, the returned -// Coercion function will be a no-op. Likewise, if the two collations are not the same, -// but they are compatible and have the same charset, the Coercion function will also -// be a no-op. -// -// If the collations for both sides of the expression are not compatible, an error -// will be returned and the returned TypedCollation and Coercion will be nil. -func (env *Environment) MergeCollations(left, right TypedCollation, opt CoercionOptions) (TypedCollation, Coercion, Coercion, error) { - leftColl := left.Collation.Get() - rightColl := right.Collation.Get() - if leftColl == nil || rightColl == nil { - return TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation) - } - - leftCS := leftColl.Charset() - rightCS := rightColl.Charset() - - if left.Coercibility == CoerceExplicit && right.Coercibility == CoerceExplicit { - if left.Collation != right.Collation { - goto cannotCoerce - } - } - - if leftCS.Name() == rightCS.Name() { - switch { - case left.Coercibility < right.Coercibility: - left.Repertoire |= right.Repertoire - return left, nil, nil, nil - - case left.Coercibility > right.Coercibility: - right.Repertoire |= left.Repertoire - return right, nil, nil, nil - - case left.Collation == right.Collation: - left.Repertoire |= right.Repertoire - return left, nil, nil, nil - } - - if left.Coercibility == CoerceExplicit { - goto cannotCoerce - } - - leftCsBin := leftColl.IsBinary() - rightCsBin := rightColl.IsBinary() - - switch { - case leftCsBin && rightCsBin: - left.Coercibility = CoerceNone - return left, nil, nil, nil - - case leftCsBin: - return left, nil, nil, nil - - case rightCsBin: - return right, nil, nil, nil - } - - defaults := env.byCharset[leftCS.Name()] - return TypedCollation{ - Collation: defaults.Binary.ID(), - Coercibility: CoerceNone, - Repertoire: left.Repertoire | right.Repertoire, - }, nil, nil, nil - } - - if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary { - if left.Coercibility <= right.Coercibility { - return left, nil, nil, nil - } - goto coerceToRight - } - if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary { - if left.Coercibility >= right.Coercibility { - return right, nil, nil, nil - } - goto coerceToLeft - } - - if opt.ConvertToSuperset { - if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) { - goto coerceToLeft - } - if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) { - goto coerceToRight - } - } - - if opt.ConvertWithCoercion { - if left.Coercibility < right.Coercibility && right.Coercibility > CoerceImplicit { - goto coerceToLeft - } - if right.Coercibility < left.Coercibility && left.Coercibility > CoerceImplicit { - goto coerceToRight - } - } - -cannotCoerce: - return TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)", - leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility) - -coerceToLeft: - return left, nil, - func(dst, in []byte) ([]byte, error) { - return charset.Convert(dst, leftCS, in, rightCS) - }, nil - -coerceToRight: - return right, - func(dst, in []byte) ([]byte, error) { - return charset.Convert(dst, rightCS, in, leftCS) - }, nil, nil -} - func (env *Environment) EnsureCollate(fromID, toID ID) error { // these two lookups should never fail - from := fromID.Get() - to := toID.Get() - if from.Charset().Name() != to.Charset().Name() { - return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", to.Name(), from.Charset().Name()) + fromCharsetName := env.LookupCharsetName(fromID) + toCharsetName := env.LookupCharsetName(toID) + if fromCharsetName != toCharsetName { + toCollName := env.LookupName(toID) + return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", toCollName, fromCharsetName) } return nil } diff --git a/go/mysql/collations/collation.go b/go/mysql/collations/collation.go index 172f5d4552f..aebc4dc9646 100644 --- a/go/mysql/collations/collation.go +++ b/go/mysql/collations/collation.go @@ -16,167 +16,10 @@ limitations under the License. package collations -import ( - "math" - - "vitess.io/vitess/go/mysql/collations/charset" - "vitess.io/vitess/go/vt/vthash" -) - //go:generate go run ./tools/makecolldata/ --embed=true -// CaseAwareCollation implements lowercase and uppercase conventions for collations. -type CaseAwareCollation interface { - Collation - ToUpper(dst []byte, src []byte) []byte - ToLower(dst []byte, src []byte) []byte -} - // ID is a numeric identifier for a collation. These identifiers are defined by MySQL, not by Vitess. type ID uint16 -// Get returns the Collation identified by this ID. If the ID is invalid, this returns nil -func (i ID) Get() Collation { - if int(i) < len(collationsById) { - return collationsById[i] - } - return nil -} - -// Valid returns whether this Collation ID is valid (i.e. identifies a valid collation) -func (i ID) Valid() bool { - return int(i) < len(collationsById) && collationsById[i] != nil -} - // Unknown is the default ID for an unknown collation. const Unknown ID = 0 - -// Collation implements a MySQL-compatible collation. It defines how to compare -// for sorting order and equality two strings with the same encoding. -type Collation interface { - // ID returns the numerical identifier for this collation. This is the same - // value that is returned by MySQL in a query's headers to identify the collation - // for a given column - ID() ID - - // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY" - Name() string - - // Collate compares two strings using this collation. `left` and `right` must be the - // two strings encoded in the proper encoding for this collation. If `isPrefix` is true, - // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but - // being collation-aware. - // It returns a numeric value like a normal comparison function: <0 if left < right, - // 0 if left == right, >0 if left > right - Collate(left, right []byte, isPrefix bool) int - - // WeightString returns a weight string for the given `src` string. A weight string - // is a binary representation of the weights for the given string, that can be - // compared byte-wise to return identical results to collating this string. - // - // This means: - // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right) - // - // The semantics of this API have been carefully designed to match MySQL's behavior - // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different - // behaviors depending on the collation's padding mode: - // - // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except - // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints` - // can have the following values: - // - // - if `numCodepoints` is any integer greater than zero, this treats the `src` string - // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting - // weight string will be padded with the weight for the SPACE character until it becomes - // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons - // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of - // codepoints stored in `src`, the result is unspecified. - // - // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`, - // meaning that the resulting weight string will have no padding at the end: it'll only have - // the weight values for the exact amount of codepoints contained in `src`. This is the - // behavior required to sort `VARCHAR` columns. - // - // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be - // pre-allocated to a zero-length slice with enough capacity to hold the complete weight - // string, and any remaining capacity in `dst` will be filled by the weights for the - // padding character, repeatedly. This is a special flag used by MySQL when performing - // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR` - // columns. - // - // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations - // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make - // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous - // section). All other values for `numCodepoints` are ignored, because NO PAD collations always - // return the weights for the codepoints in their strings, with no further padding at the end. - // - // The resulting weight string is written to `dst`, which can be pre-allocated to - // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which - // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained - // earlier, `dst` MUST be pre-allocated to the target size or the function will return an - // empty slice. - WeightString(dst, src []byte, numCodepoints int) []byte - - // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string - // with `numCodepoints` using this collation. Note that this is a higher bound for the size - // of the string, and in practice weight strings can be significantly smaller than the - // returned value. - WeightStringLen(numCodepoints int) int - - // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies - // the given string based on this collation. It is functionally equivalent to calling WeightString - // and then hashing the result. - // - // Consequently, if the hashes for two strings are different, then the two strings are considered - // different according to this collation. If the hashes for two strings are equal, the two strings - // may or may not be considered equal according to this collation, because hashes can collide unlike - // weight strings. - // - // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE, - // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of - // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`. - // For collations with NO PAD, the numCodepoint argument is ignored. - Hash(hasher *vthash.Hasher, src []byte, numCodepoints int) - - // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly - // test different strings to check if they match the pattern. The pattern must be a traditional wildcard - // pattern, which may contain the provided special characters for matching one character or several characters. - // The provided `escape` character will be used as an escape sequence in front of the other special characters. - // - // This method is fully collation aware; the matching will be performed according to the underlying collation. - // I.e. if this is a case-insensitive collation, matching will be case-insensitive. - // - // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this - // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern - // has invalid syntax, the returned pattern will not match any strings. - // - // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used. - // This is, '_' for matching one character, '%' for matching many and '\\' for escape. - // - // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special - // characters. - Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern - - // Charset returns the Charset with which this collation is encoded - Charset() Charset - - // IsBinary returns whether this collation is a binary collation - IsBinary() bool -} - -// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation -type WildcardPattern interface { - // Match returns whether the given string matches this pattern - Match(in []byte) bool -} - -type Charset = charset.Charset - -const PadToMax = math.MaxInt32 - -func minInt(i1, i2 int) int { - if i1 < i2 { - return i1 - } - return i2 -} diff --git a/go/mysql/collations/8bit.go b/go/mysql/collations/colldata/8bit.go similarity index 92% rename from go/mysql/collations/8bit.go rename to go/mysql/collations/colldata/8bit.go index 7a22ed1d0e1..2355888bbab 100644 --- a/go/mysql/collations/8bit.go +++ b/go/mysql/collations/colldata/8bit.go @@ -14,9 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) @@ -42,7 +43,7 @@ type simpletables struct { } type Collation_8bit_bin struct { - id ID + id collations.ID name string simpletables charset charset.Charset @@ -52,7 +53,7 @@ func (c *Collation_8bit_bin) Name() string { return c.name } -func (c *Collation_8bit_bin) ID() ID { +func (c *Collation_8bit_bin) ID() collations.ID { return c.id } @@ -78,7 +79,7 @@ func (c *Collation_8bit_bin) WeightString(dst, src []byte, numCodepoints int) [] case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } dst = append(dst, src[:copyCodepoints]...) @@ -92,7 +93,7 @@ func (c *Collation_8bit_bin) Hash(hasher *vthash.Hasher, src []byte, numCodepoin return } - tocopy := minInt(len(src), numCodepoints) + tocopy := min(len(src), numCodepoints) hasher.Write(src[:tocopy]) numCodepoints -= tocopy @@ -129,7 +130,7 @@ func (c *Collation_8bit_bin) ToUpper(dst, src []byte) []byte { } type Collation_8bit_simple_ci struct { - id ID + id collations.ID name string simpletables charset charset.Charset @@ -139,7 +140,7 @@ func (c *Collation_8bit_simple_ci) Name() string { return c.name } -func (c *Collation_8bit_simple_ci) ID() ID { +func (c *Collation_8bit_simple_ci) ID() collations.ID { return c.id } @@ -153,7 +154,7 @@ func (c *Collation_8bit_simple_ci) IsBinary() bool { func (c *Collation_8bit_simple_ci) Collate(left, right []byte, rightIsPrefix bool) int { sortOrder := c.sort - cmpLen := minInt(len(left), len(right)) + cmpLen := min(len(left), len(right)) for i := 0; i < cmpLen; i++ { sortL, sortR := sortOrder[left[i]], sortOrder[right[i]] @@ -178,7 +179,7 @@ func (c *Collation_8bit_simple_ci) WeightString(dst, src []byte, numCodepoints i case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } for _, ch := range src[:copyCodepoints] { @@ -192,7 +193,7 @@ func (c *Collation_8bit_simple_ci) Hash(hasher *vthash.Hasher, src []byte, numCo var tocopy = len(src) if numCodepoints > 0 { - tocopy = minInt(tocopy, numCodepoints) + tocopy = min(tocopy, numCodepoints) } hasher.Write64(uint64(c.id)) @@ -251,8 +252,8 @@ func (c *Collation_8bit_simple_ci) ToUpper(dst, src []byte) []byte { type Collation_binary struct{} -func (c *Collation_binary) ID() ID { - return CollationBinaryID +func (c *Collation_binary) ID() collations.ID { + return collations.CollationBinaryID } func (c *Collation_binary) Name() string { @@ -280,7 +281,7 @@ func (c *Collation_binary) WeightString(dst, src []byte, numCodepoints int) []by case PadToMax: padToMax = true default: - copyCodepoints = minInt(copyCodepoints, numCodepoints) + copyCodepoints = min(copyCodepoints, numCodepoints) } dst = append(dst, src[:copyCodepoints]...) diff --git a/go/mysql/collations/cached_size.go b/go/mysql/collations/colldata/cached_size.go similarity index 98% rename from go/mysql/collations/cached_size.go rename to go/mysql/collations/colldata/cached_size.go index 6b5e901dffd..36167c69d6d 100644 --- a/go/mysql/collations/cached_size.go +++ b/go/mysql/collations/colldata/cached_size.go @@ -15,7 +15,7 @@ limitations under the License. */ // Code generated by Sizegen. DO NOT EDIT. -package collations +package colldata import hack "vitess.io/vitess/go/hack" diff --git a/go/mysql/collations/colldata/collation.go b/go/mysql/collations/colldata/collation.go new file mode 100644 index 00000000000..ec66fc09b58 --- /dev/null +++ b/go/mysql/collations/colldata/collation.go @@ -0,0 +1,374 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package colldata + +import ( + "fmt" + "math" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/vt/vthash" +) + +type Charset = charset.Charset + +// Collation implements a MySQL-compatible collation. It defines how to compare +// for sorting order and equality two strings with the same encoding. +type Collation interface { + // ID returns the numerical identifier for this collation. This is the same + // value that is returned by MySQL in a query's headers to identify the collation + // for a given column + ID() collations.ID + + // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY" + Name() string + + // Collate compares two strings using this collation. `left` and `right` must be the + // two strings encoded in the proper encoding for this collation. If `isPrefix` is true, + // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but + // being collation-aware. + // It returns a numeric value like a normal comparison function: <0 if left < right, + // 0 if left == right, >0 if left > right + Collate(left, right []byte, isPrefix bool) int + + // WeightString returns a weight string for the given `src` string. A weight string + // is a binary representation of the weights for the given string, that can be + // compared byte-wise to return identical results to collating this string. + // + // This means: + // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right) + // + // The semantics of this API have been carefully designed to match MySQL's behavior + // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different + // behaviors depending on the collation's padding mode: + // + // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except + // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints` + // can have the following values: + // + // - if `numCodepoints` is any integer greater than zero, this treats the `src` string + // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting + // weight string will be padded with the weight for the SPACE character until it becomes + // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons + // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of + // codepoints stored in `src`, the result is unspecified. + // + // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`, + // meaning that the resulting weight string will have no padding at the end: it'll only have + // the weight values for the exact amount of codepoints contained in `src`. This is the + // behavior required to sort `VARCHAR` columns. + // + // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be + // pre-allocated to a zero-length slice with enough capacity to hold the complete weight + // string, and any remaining capacity in `dst` will be filled by the weights for the + // padding character, repeatedly. This is a special flag used by MySQL when performing + // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR` + // columns. + // + // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations + // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make + // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous + // section). All other values for `numCodepoints` are ignored, because NO PAD collations always + // return the weights for the codepoints in their strings, with no further padding at the end. + // + // The resulting weight string is written to `dst`, which can be pre-allocated to + // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which + // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained + // earlier, `dst` MUST be pre-allocated to the target size or the function will return an + // empty slice. + WeightString(dst, src []byte, numCodepoints int) []byte + + // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string + // with `numCodepoints` using this collation. Note that this is a higher bound for the size + // of the string, and in practice weight strings can be significantly smaller than the + // returned value. + WeightStringLen(numCodepoints int) int + + // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies + // the given string based on this collation. It is functionally equivalent to calling WeightString + // and then hashing the result. + // + // Consequently, if the hashes for two strings are different, then the two strings are considered + // different according to this collation. If the hashes for two strings are equal, the two strings + // may or may not be considered equal according to this collation, because hashes can collide unlike + // weight strings. + // + // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE, + // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of + // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`. + // For collations with NO PAD, the numCodepoint argument is ignored. + Hash(hasher *vthash.Hasher, src []byte, numCodepoints int) + + // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly + // test different strings to check if they match the pattern. The pattern must be a traditional wildcard + // pattern, which may contain the provided special characters for matching one character or several characters. + // The provided `escape` character will be used as an escape sequence in front of the other special characters. + // + // This method is fully collation aware; the matching will be performed according to the underlying collation. + // I.e. if this is a case-insensitive collation, matching will be case-insensitive. + // + // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this + // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern + // has invalid syntax, the returned pattern will not match any strings. + // + // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used. + // This is, '_' for matching one character, '%' for matching many and '\\' for escape. + // + // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special + // characters. + Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern + + // Charset returns the Charset with which this collation is encoded + Charset() Charset + + // IsBinary returns whether this collation is a binary collation + IsBinary() bool +} + +// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation +type WildcardPattern interface { + // Match returns whether the given string matches this pattern + Match(in []byte) bool +} + +const PadToMax = math.MaxInt32 + +// CaseAwareCollation implements lowercase and uppercase conventions for collations. +type CaseAwareCollation interface { + Collation + ToUpper(dst []byte, src []byte) []byte + ToLower(dst []byte, src []byte) []byte +} + +func Lookup(id collations.ID) Collation { + if int(id) >= len(collationsById) { + return nil + } + return collationsById[id] +} + +// All returns a slice with all known collations in Vitess. +func All(env *collations.Environment) []Collation { + allCols := env.AllCollationIDs() + all := make([]Collation, 0, len(allCols)) + for _, col := range allCols { + all = append(all, collationsById[col]) + } + return all +} + +func checkCompatibleCollations( + left Collation, leftCoercibility collations.Coercibility, leftRepertoire collations.Repertoire, + right Collation, rightCoercibility collations.Coercibility, rightRepertoire collations.Repertoire, +) bool { + leftCS := left.Charset() + rightCS := right.Charset() + + switch leftCS.(type) { + case charset.Charset_utf8mb4: + if leftCoercibility <= rightCoercibility { + return true + } + + case charset.Charset_utf32: + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if !charset.IsUnicode(rightCS) { + return true + } + if !left.IsBinary() { + return true + } + } + + case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le: + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if !charset.IsUnicode(rightCS) { + return true + } + } + } + + if rightRepertoire == collations.RepertoireASCII { + switch { + case leftCoercibility < rightCoercibility: + return true + case leftCoercibility == rightCoercibility: + if leftRepertoire == collations.RepertoireUnicode { + return true + } + } + } + + return false +} + +// CoercionOptions is used to configure how aggressive the algorithm can be +// when merging two different collations by transcoding them. +type CoercionOptions struct { + // ConvertToSuperset allows merging two different collations as long + // as the charset of one of them is a strict superset of the other. In + // order to operate on the two expressions, one of them will need to + // be transcoded. This transcoding will always be safe because the string + // with the smallest repertoire will be transcoded to its superset, which + // cannot fail. + ConvertToSuperset bool + + // ConvertWithCoercion allows merging two different collations by forcing + // a coercion as long as the coercibility of the two sides is lax enough. + // This will force a transcoding of one of the expressions even if their + // respective charsets are not a strict superset, so the resulting transcoding + // CAN fail depending on the content of their strings. + ConvertWithCoercion bool +} + +// Coercion is a function that will transform either the given argument +// arguments of the function into a specific character set. The `dst` argument +// will be used as the destination of the coerced argument, but it can be nil. +type Coercion func(dst, in []byte) ([]byte, error) + +// Merge returns a Coercion function for a pair of TypedCollation based +// on their coercibility. +// +// The function takes the typed collations for the two sides of a text operation +// (namely, a comparison or concatenation of two textual expressions). These typed +// collations includes the actual collation for the expression on each size, their +// coercibility values (see: Coercibility) and their respective repertoires, +// and returns the target collation (i.e. the collation into which the two expressions +// must be coerced, and a Coercion function. The Coercion function can be called repeatedly +// with the different values for the two expressions and will transcode either +// the left-hand or right-hand value to the appropriate charset so it can be +// collated against the other value. +// +// If the collations for both sides of the expressions are the same, the returned +// Coercion function will be a no-op. Likewise, if the two collations are not the same, +// but they are compatible and have the same charset, the Coercion function will also +// be a no-op. +// +// If the collations for both sides of the expression are not compatible, an error +// will be returned and the returned TypedCollation and Coercion will be nil. +func Merge(env *collations.Environment, left, right collations.TypedCollation, opt CoercionOptions) (collations.TypedCollation, Coercion, Coercion, error) { + leftColl := Lookup(left.Collation) + rightColl := Lookup(right.Collation) + if leftColl == nil || rightColl == nil { + return collations.TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation) + } + + leftCS := leftColl.Charset() + rightCS := rightColl.Charset() + + if left.Coercibility == collations.CoerceExplicit && right.Coercibility == collations.CoerceExplicit { + if left.Collation != right.Collation { + goto cannotCoerce + } + } + + if leftCS.Name() == rightCS.Name() { + switch { + case left.Coercibility < right.Coercibility: + left.Repertoire |= right.Repertoire + return left, nil, nil, nil + + case left.Coercibility > right.Coercibility: + right.Repertoire |= left.Repertoire + return right, nil, nil, nil + + case left.Collation == right.Collation: + left.Repertoire |= right.Repertoire + return left, nil, nil, nil + } + + if left.Coercibility == collations.CoerceExplicit { + goto cannotCoerce + } + + leftCsBin := leftColl.IsBinary() + rightCsBin := rightColl.IsBinary() + + switch { + case leftCsBin && rightCsBin: + left.Coercibility = collations.CoerceNone + return left, nil, nil, nil + + case leftCsBin: + return left, nil, nil, nil + + case rightCsBin: + return right, nil, nil, nil + } + + defaults := env.LookupByCharset(leftCS.Name()) + return collations.TypedCollation{ + Collation: defaults.Binary, + Coercibility: collations.CoerceNone, + Repertoire: left.Repertoire | right.Repertoire, + }, nil, nil, nil + } + + if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary { + if left.Coercibility <= right.Coercibility { + return left, nil, nil, nil + } + goto coerceToRight + } + if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary { + if left.Coercibility >= right.Coercibility { + return right, nil, nil, nil + } + goto coerceToLeft + } + + if opt.ConvertToSuperset { + if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) { + goto coerceToLeft + } + if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) { + goto coerceToRight + } + } + + if opt.ConvertWithCoercion { + if left.Coercibility < right.Coercibility && right.Coercibility > collations.CoerceImplicit { + goto coerceToLeft + } + if right.Coercibility < left.Coercibility && left.Coercibility > collations.CoerceImplicit { + goto coerceToRight + } + } + +cannotCoerce: + return collations.TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)", + leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility) + +coerceToLeft: + return left, nil, + func(dst, in []byte) ([]byte, error) { + return charset.Convert(dst, leftCS, in, rightCS) + }, nil + +coerceToRight: + return right, + func(dst, in []byte) ([]byte, error) { + return charset.Convert(dst, rightCS, in, leftCS) + }, nil, nil +} diff --git a/go/mysql/collations/fuzz.go b/go/mysql/collations/colldata/fuzz.go similarity index 98% rename from go/mysql/collations/fuzz.go rename to go/mysql/collations/colldata/fuzz.go index e71eae3fbdc..c5ebf50698b 100644 --- a/go/mysql/collations/fuzz.go +++ b/go/mysql/collations/colldata/fuzz.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( fuzz "github.com/AdaLogics/go-fuzz-headers" diff --git a/go/mysql/collations/fuzz_test.go b/go/mysql/collations/colldata/fuzz_test.go similarity index 96% rename from go/mysql/collations/fuzz_test.go rename to go/mysql/collations/colldata/fuzz_test.go index 1f36fd34ff3..0c11116f580 100644 --- a/go/mysql/collations/fuzz_test.go +++ b/go/mysql/collations/colldata/fuzz_test.go @@ -18,9 +18,11 @@ limitations under the License. // The fuzzing tests for collations use the new Fuzz implementation in Go 1.18+ -package collations +package colldata -import "testing" +import ( + "testing" +) func FuzzUCACollate(f *testing.F) { for _, left := range AllTestStrings { diff --git a/go/mysql/collations/colldata/golden_test.go b/go/mysql/collations/colldata/golden_test.go new file mode 100644 index 00000000000..2b41ebcddc6 --- /dev/null +++ b/go/mysql/collations/colldata/golden_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package colldata + +import ( + "bytes" + "fmt" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/internal/testutil" +) + +func TestGoldenWeights(t *testing.T) { + gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz") + if err != nil { + t.Fatal(err) + } + + for _, goldenPath := range gllGoldenTests { + golden := &testutil.GoldenTest{} + if err := golden.DecodeFromFile(goldenPath); err != nil { + t.Fatal(err) + } + + for _, goldenCase := range golden.Cases { + t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) { + for coll, expected := range goldenCase.Weights { + coll := testcollation(t, coll) + + input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text) + if err != nil { + t.Fatal(err) + } + + result := coll.WeightString(nil, input, 0) + assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result) + + } + }) + } + } +} + +func TestCollationsForLanguage(t *testing.T) { + allCollations := testall() + langCounts := make(map[testutil.Lang][]string) + + for lang := range testutil.KnownLanguages { + var matched []string + for _, coll := range allCollations { + name := coll.Name() + if lang.MatchesCollation(name) { + matched = append(matched, name) + } + } + langCounts[lang] = matched + } + + for lang := range testutil.KnownLanguages { + assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang) + + t.Logf("%s: %v", lang, langCounts[lang]) + } +} diff --git a/go/mysql/collations/multibyte.go b/go/mysql/collations/colldata/multibyte.go similarity index 95% rename from go/mysql/collations/multibyte.go rename to go/mysql/collations/colldata/multibyte.go index f9d13df2d1f..cc123a25a1a 100644 --- a/go/mysql/collations/multibyte.go +++ b/go/mysql/collations/colldata/multibyte.go @@ -14,23 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "math" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) type Collation_multibyte struct { - id ID + id collations.ID name string sort *[256]byte charset charset.Charset } -func (c *Collation_multibyte) ID() ID { +func (c *Collation_multibyte) ID() collations.ID { return c.id } @@ -51,7 +52,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int { return collationBinary(left, right, isPrefix) } - cmpLen := minInt(len(left), len(right)) + cmpLen := min(len(left), len(right)) cs := c.charset sortOrder := c.sort for i := 0; i < cmpLen; i++ { @@ -62,7 +63,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int { } _, widthL := cs.DecodeRune(left[i:]) _, widthR := cs.DecodeRune(right[i:]) - switch minInt(widthL, widthR) { + switch min(widthL, widthR) { case 4: i++ if left[i] != right[i] { diff --git a/go/mysql/collations/mysqldata.go b/go/mysql/collations/colldata/mysqldata.go similarity index 61% rename from go/mysql/collations/mysqldata.go rename to go/mysql/collations/colldata/mysqldata.go index e9a119b3a20..f626028cb95 100644 --- a/go/mysql/collations/mysqldata.go +++ b/go/mysql/collations/colldata/mysqldata.go @@ -1,6 +1,22 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT -package collations +package colldata import ( charset "vitess.io/vitess/go/mysql/collations/charset" @@ -119,7 +135,7 @@ var tounicode_dec8_swedish_ci = [...]uint16{ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x00ff, 0x0000, 0x0000, } -var fromunicode_dec8_swedish_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa8, 0xa5, 0x0, 0xa7, 0x0, 0xa9, 0xaa, 0xab, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0x0, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0x0, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0x0, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xfd}}, eightbit.UnicodeMapping{From: 0x152, To: 0x178, Range: []uint8{0xd7, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd}}} +var fromunicode_dec8_swedish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa8, 0xa5, 0x0, 0xa7, 0x0, 0xa9, 0xaa, 0xab, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0x0, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0x0, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0x0, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xfd}}, {From: 0x152, To: 0x178, Range: []uint8{0xd7, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd}}} var ctype_cp850_general_ci = [...]uint8{ 0x00, 0x20, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x30, @@ -232,7 +248,7 @@ var tounicode_cp850_general_ci = [...]uint16{ 0x00b0, 0x00a8, 0x00b7, 0x00b9, 0x00b3, 0x00b2, 0x25a0, 0x00a0, } -var fromunicode_cp850_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, 0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, 0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, 0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, 0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, 0xd0, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, 0x9b, 0x97, 0xa3, 0x96, 0x81, 0xec, 0xe7, 0x98}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0x0, 0x0, 0xc9, 0x0, 0x0, 0xbb, 0x0, 0x0, 0xc8, 0x0, 0x0, 0xbc, 0x0, 0x0, 0xcc, 0x0, 0x0, 0xb9, 0x0, 0x0, 0xcb, 0x0, 0x0, 0xca, 0x0, 0x0, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, eightbit.UnicodeMapping{From: 0x131, To: 0x192, Range: []uint8{0xd5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, eightbit.UnicodeMapping{From: 0x2017, To: 0x2017, Range: []uint8{0xf2}}} +var fromunicode_cp850_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0xbd, 0x9c, 0xcf, 0xbe, 0xdd, 0xf5, 0xf9, 0xb8, 0xa6, 0xae, 0xaa, 0xf0, 0xa9, 0xee, 0xf8, 0xf1, 0xfd, 0xfc, 0xef, 0xe6, 0xf4, 0xfa, 0xf7, 0xfb, 0xa7, 0xaf, 0xac, 0xab, 0xf3, 0xa8, 0xb7, 0xb5, 0xb6, 0xc7, 0x8e, 0x8f, 0x92, 0x80, 0xd4, 0x90, 0xd2, 0xd3, 0xde, 0xd6, 0xd7, 0xd8, 0xd1, 0xa5, 0xe3, 0xe0, 0xe2, 0xe5, 0x99, 0x9e, 0x9d, 0xeb, 0xe9, 0xea, 0x9a, 0xed, 0xe8, 0xe1, 0x85, 0xa0, 0x83, 0xc6, 0x84, 0x86, 0x91, 0x87, 0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, 0xd0, 0xa4, 0x95, 0xa2, 0x93, 0xe4, 0x94, 0xf6, 0x9b, 0x97, 0xa3, 0x96, 0x81, 0xec, 0xe7, 0x98}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0x0, 0x0, 0xc9, 0x0, 0x0, 0xbb, 0x0, 0x0, 0xc8, 0x0, 0x0, 0xbc, 0x0, 0x0, 0xcc, 0x0, 0x0, 0xb9, 0x0, 0x0, 0xcb, 0x0, 0x0, 0xca, 0x0, 0x0, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x131, To: 0x192, Range: []uint8{0xd5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, {From: 0x2017, To: 0x2017, Range: []uint8{0xf2}}} var ctype_latin1_german1_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -383,7 +399,7 @@ var tounicode_hp8_english_ci = [...]uint16{ 0x00bd, 0x00aa, 0x00ba, 0x00ab, 0x25a0, 0x00bb, 0x00b1, 0x0000, } -var fromunicode_hp8_english_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xb8, 0xbf, 0xbb, 0xba, 0xbc, 0x0, 0xbd, 0xab, 0x0, 0xf9, 0xfb, 0x0, 0x0, 0x0, 0xb0, 0xb3, 0xfe, 0x0, 0x0, 0xa8, 0xf3, 0xf4, 0xf2, 0x0, 0x0, 0xfa, 0xfd, 0xf7, 0xf8, 0xf5, 0xb9, 0xa1, 0xe0, 0xa2, 0xe1, 0xd8, 0xd0, 0xd3, 0xb4, 0xa3, 0xdc, 0xa4, 0xa5, 0xe6, 0xe5, 0xa6, 0xa7, 0xe3, 0xb6, 0xe8, 0xe7, 0xdf, 0xe9, 0xda, 0x0, 0xd2, 0xad, 0xed, 0xae, 0xdb, 0xb1, 0xf0, 0xde, 0xc8, 0xc4, 0xc0, 0xe2, 0xcc, 0xd4, 0xd7, 0xb5, 0xc9, 0xc5, 0xc1, 0xcd, 0xd9, 0xd5, 0xd1, 0xdd, 0xe4, 0xb7, 0xca, 0xc6, 0xc2, 0xea, 0xce, 0x0, 0xd6, 0xcb, 0xc7, 0xc3, 0xcf, 0xb2, 0xf1, 0xef}}, eightbit.UnicodeMapping{From: 0x160, To: 0x192, Range: []uint8{0xeb, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbe}}, eightbit.UnicodeMapping{From: 0x2c6, To: 0x2dc, Range: []uint8{0xaa, 0x0, 0x0, 0x0, 0x0, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac}}, eightbit.UnicodeMapping{From: 0x2014, To: 0x20a4, Range: []uint8{0xf6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}, eightbit.UnicodeMapping{From: 0x25a0, To: 0x25a0, Range: []uint8{0xfc}}} +var fromunicode_hp8_english_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xb8, 0xbf, 0xbb, 0xba, 0xbc, 0x0, 0xbd, 0xab, 0x0, 0xf9, 0xfb, 0x0, 0x0, 0x0, 0xb0, 0xb3, 0xfe, 0x0, 0x0, 0xa8, 0xf3, 0xf4, 0xf2, 0x0, 0x0, 0xfa, 0xfd, 0xf7, 0xf8, 0xf5, 0xb9, 0xa1, 0xe0, 0xa2, 0xe1, 0xd8, 0xd0, 0xd3, 0xb4, 0xa3, 0xdc, 0xa4, 0xa5, 0xe6, 0xe5, 0xa6, 0xa7, 0xe3, 0xb6, 0xe8, 0xe7, 0xdf, 0xe9, 0xda, 0x0, 0xd2, 0xad, 0xed, 0xae, 0xdb, 0xb1, 0xf0, 0xde, 0xc8, 0xc4, 0xc0, 0xe2, 0xcc, 0xd4, 0xd7, 0xb5, 0xc9, 0xc5, 0xc1, 0xcd, 0xd9, 0xd5, 0xd1, 0xdd, 0xe4, 0xb7, 0xca, 0xc6, 0xc2, 0xea, 0xce, 0x0, 0xd6, 0xcb, 0xc7, 0xc3, 0xcf, 0xb2, 0xf1, 0xef}}, {From: 0x160, To: 0x192, Range: []uint8{0xeb, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbe}}, {From: 0x2c6, To: 0x2dc, Range: []uint8{0xaa, 0x0, 0x0, 0x0, 0x0, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac}}, {From: 0x2014, To: 0x20a4, Range: []uint8{0xf6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}, {From: 0x25a0, To: 0x25a0, Range: []uint8{0xfc}}} var ctype_koi8r_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -496,7 +512,7 @@ var tounicode_koi8r_general_ci = [...]uint16{ 0x042c, 0x042b, 0x0417, 0x0428, 0x042d, 0x0429, 0x0427, 0x042a, } -var fromunicode_koi8r_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9c, 0x0, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, eightbit.UnicodeMapping{From: 0x401, To: 0x451, Range: []uint8{0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, 0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, 0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, 0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, 0x0, 0xa3}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0x80, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x86, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x87, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x8c, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x8f, 0x90, 0x91, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x94}}, eightbit.UnicodeMapping{From: 0x2219, To: 0x2265, Range: []uint8{0x95, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x98, 0x99}}, eightbit.UnicodeMapping{From: 0x2320, To: 0x2321, Range: []uint8{0x93, 0x9b}}} +var fromunicode_koi8r_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9c, 0x0, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, {From: 0x401, To: 0x451, Range: []uint8{0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, 0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, 0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, 0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, 0x0, 0xa3}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0x80, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x86, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x87, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x8c, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x8f, 0x90, 0x91, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x94}}, {From: 0x2219, To: 0x2265, Range: []uint8{0x95, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x98, 0x99}}, {From: 0x2320, To: 0x2321, Range: []uint8{0x93, 0x9b}}} var ctype_latin2_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -609,7 +625,7 @@ var tounicode_latin2_general_ci = [...]uint16{ 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, } -var fromunicode_latin2_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0x0, 0xa7, 0xa8, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0xb8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0xc2, 0x0, 0xc4, 0x0, 0x0, 0xc7, 0x0, 0xc9, 0x0, 0xcb, 0x0, 0xcd, 0xce, 0x0, 0x0, 0x0, 0x0, 0xd3, 0xd4, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0xda, 0x0, 0xdc, 0xdd, 0x0, 0xdf, 0x0, 0xe1, 0xe2, 0x0, 0xe4, 0x0, 0x0, 0xe7, 0x0, 0xe9, 0x0, 0xeb, 0x0, 0xed, 0xee, 0x0, 0x0, 0x0, 0x0, 0xf3, 0xf4, 0x0, 0xf6, 0xf7, 0x0, 0x0, 0xfa, 0x0, 0xfc, 0xfd}}, eightbit.UnicodeMapping{From: 0x102, To: 0x17e, Range: []uint8{0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0xcf, 0xef, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0xe5, 0x0, 0x0, 0xa5, 0xb5, 0x0, 0x0, 0xa3, 0xb3, 0xd1, 0xf1, 0x0, 0x0, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd5, 0xf5, 0x0, 0x0, 0xc0, 0xe0, 0x0, 0x0, 0xd8, 0xf8, 0xa6, 0xb6, 0x0, 0x0, 0xaa, 0xba, 0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe}}, eightbit.UnicodeMapping{From: 0x2c7, To: 0x2dd, Range: []uint8{0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0xff, 0x0, 0xb2, 0x0, 0xbd}}} +var fromunicode_latin2_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0x0, 0xa7, 0xa8, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0xb8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0xc2, 0x0, 0xc4, 0x0, 0x0, 0xc7, 0x0, 0xc9, 0x0, 0xcb, 0x0, 0xcd, 0xce, 0x0, 0x0, 0x0, 0x0, 0xd3, 0xd4, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0xda, 0x0, 0xdc, 0xdd, 0x0, 0xdf, 0x0, 0xe1, 0xe2, 0x0, 0xe4, 0x0, 0x0, 0xe7, 0x0, 0xe9, 0x0, 0xeb, 0x0, 0xed, 0xee, 0x0, 0x0, 0x0, 0x0, 0xf3, 0xf4, 0x0, 0xf6, 0xf7, 0x0, 0x0, 0xfa, 0x0, 0xfc, 0xfd}}, {From: 0x102, To: 0x17e, Range: []uint8{0xc3, 0xe3, 0xa1, 0xb1, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0xcf, 0xef, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0xe5, 0x0, 0x0, 0xa5, 0xb5, 0x0, 0x0, 0xa3, 0xb3, 0xd1, 0xf1, 0x0, 0x0, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd5, 0xf5, 0x0, 0x0, 0xc0, 0xe0, 0x0, 0x0, 0xd8, 0xf8, 0xa6, 0xb6, 0x0, 0x0, 0xaa, 0xba, 0xa9, 0xb9, 0xde, 0xfe, 0xab, 0xbb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xac, 0xbc, 0xaf, 0xbf, 0xae, 0xbe}}, {From: 0x2c7, To: 0x2dd, Range: []uint8{0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0xff, 0x0, 0xb2, 0x0, 0xbd}}} var ctype_swe7_swedish_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -722,7 +738,7 @@ var tounicode_swe7_swedish_ci = [...]uint16{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, } -var fromunicode_swe7_swedish_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x0, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5b, 0x5d, 0x0, 0x0, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x7d, 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7e}}} +var fromunicode_swe7_swedish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x0, 0x0, 0x0, 0x0, 0x5f, 0x0, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5b, 0x5d, 0x0, 0x0, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7b, 0x7d, 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7e}}} var ctype_ascii_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -816,7 +832,7 @@ var tounicode_ascii_general_ci = [...]uint16{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, } -var fromunicode_ascii_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0x7f, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f}}} +var fromunicode_ascii_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0x7f, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f}}} var ctype_cp1251_bulgarian_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -929,7 +945,7 @@ var tounicode_cp1251_bulgarian_ci = [...]uint16{ 0x0448, 0x0449, 0x044a, 0x044b, 0x044c, 0x044d, 0x044e, 0x044f, } -var fromunicode_cp1251_bulgarian_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xbb, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0x0, 0x0, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0x0, 0x0, 0xbb}}, eightbit.UnicodeMapping{From: 0x401, To: 0x491, Range: []uint8{0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, 0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x0, 0xa1, 0x8f, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0x0, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, 0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x0, 0xa2, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa5, 0xb4}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88}}, eightbit.UnicodeMapping{From: 0x2116, To: 0x2122, Range: []uint8{0xb9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x99}}} +var fromunicode_cp1251_bulgarian_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbb, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0x0, 0x0, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0x0, 0x0, 0xbb}}, {From: 0x401, To: 0x491, Range: []uint8{0xa8, 0x80, 0x81, 0xaa, 0xbd, 0xb2, 0xaf, 0xa3, 0x8a, 0x8c, 0x8e, 0x8d, 0x0, 0xa1, 0x8f, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, 0x0, 0xb8, 0x90, 0x83, 0xba, 0xbe, 0xb3, 0xbf, 0xbc, 0x9a, 0x9c, 0x9e, 0x9d, 0x0, 0xa2, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa5, 0xb4}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88}}, {From: 0x2116, To: 0x2122, Range: []uint8{0xb9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x99}}} var sortorder_latin1_danish_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -1023,7 +1039,7 @@ var tounicode_hebrew_general_ci = [...]uint16{ 0x05e8, 0x05e9, 0x05ea, 0x0000, 0x0000, 0x200e, 0x200f, 0x0000, } -var fromunicode_hebrew_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba}}, eightbit.UnicodeMapping{From: 0x5d0, To: 0x5ea, Range: []uint8{0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa}}, eightbit.UnicodeMapping{From: 0x200e, To: 0x203e, Range: []uint8{0xfd, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}} +var fromunicode_hebrew_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba}}, {From: 0x5d0, To: 0x5ea, Range: []uint8{0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa}}, {From: 0x200e, To: 0x203e, Range: []uint8{0xfd, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaf}}} var ctype_latin7_estonian_cs = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -1136,7 +1152,7 @@ var tounicode_latin7_estonian_cs = [...]uint16{ 0x0173, 0x0142, 0x015b, 0x016b, 0x00fc, 0x017c, 0x017e, 0x2019, } -var fromunicode_latin7_estonian_cs = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0xc5, 0xaf, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x0, 0xd5, 0xd6, 0xd7, 0xa8, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe5, 0xbf, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0xf5, 0xf6, 0xf7, 0xb8, 0x0, 0x0, 0x0, 0xfc}}, eightbit.UnicodeMapping{From: 0x100, To: 0x17e, Range: []uint8{0xc2, 0xe2, 0x0, 0x0, 0xc0, 0xe0, 0xc3, 0xe3, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xc7, 0xe7, 0x0, 0x0, 0xcb, 0xeb, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xee, 0x0, 0x0, 0xc1, 0xe1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xed, 0x0, 0x0, 0x0, 0xcf, 0xef, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd4, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0xba, 0x0, 0x0, 0xda, 0xfa, 0x0, 0x0, 0x0, 0x0, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd8, 0xf8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe}}, eightbit.UnicodeMapping{From: 0x2019, To: 0x201e, Range: []uint8{0xff, 0x0, 0x0, 0xb4, 0xa1, 0xa5}}} +var fromunicode_latin7_estonian_cs = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0xb5, 0xb6, 0xb7, 0x0, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0xc5, 0xaf, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x0, 0xd5, 0xd6, 0xd7, 0xa8, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe5, 0xbf, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0xf5, 0xf6, 0xf7, 0xb8, 0x0, 0x0, 0x0, 0xfc}}, {From: 0x100, To: 0x17e, Range: []uint8{0xc2, 0xe2, 0x0, 0x0, 0xc0, 0xe0, 0xc3, 0xe3, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xc7, 0xe7, 0x0, 0x0, 0xcb, 0xeb, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xee, 0x0, 0x0, 0xc1, 0xe1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xed, 0x0, 0x0, 0x0, 0xcf, 0xef, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd4, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0xba, 0x0, 0x0, 0xda, 0xfa, 0x0, 0x0, 0x0, 0x0, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd8, 0xf8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe}}, {From: 0x2019, To: 0x201e, Range: []uint8{0xff, 0x0, 0x0, 0xb4, 0xa1, 0xa5}}} var sortorder_latin2_hungarian_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -1268,7 +1284,7 @@ var tounicode_koi8u_general_ci = [...]uint16{ 0x042c, 0x042b, 0x0417, 0x0428, 0x042d, 0x0429, 0x0427, 0x042a, } -var fromunicode_koi8u_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9c, 0x0, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, eightbit.UnicodeMapping{From: 0x401, To: 0x491, Range: []uint8{0xb3, 0x0, 0x0, 0xb4, 0x0, 0xb6, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, 0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, 0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, 0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, 0x0, 0xa3, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbd, 0xad}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0x80, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x86, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x87, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0x0, 0xa5, 0x0, 0x0, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x0, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0x0, 0xb5, 0x0, 0x0, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0x0, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x8c, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x8f, 0x90, 0x91, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x94}}, eightbit.UnicodeMapping{From: 0x221a, To: 0x2265, Range: []uint8{0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x98, 0x99}}, eightbit.UnicodeMapping{From: 0x2320, To: 0x2321, Range: []uint8{0x93, 0x9b}}, eightbit.UnicodeMapping{From: 0x2022, To: 0x2022, Range: []uint8{0x95}}} +var fromunicode_koi8u_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xf7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9c, 0x0, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9f}}, {From: 0x401, To: 0x491, Range: []uint8{0xb3, 0x0, 0x0, 0xb4, 0x0, 0xb6, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe, 0xfb, 0xfd, 0xff, 0xf9, 0xf8, 0xfc, 0xe0, 0xf1, 0xc1, 0xc2, 0xd7, 0xc7, 0xc4, 0xc5, 0xd6, 0xda, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd2, 0xd3, 0xd4, 0xd5, 0xc6, 0xc8, 0xc3, 0xde, 0xdb, 0xdd, 0xdf, 0xd9, 0xd8, 0xdc, 0xc0, 0xd1, 0x0, 0xa3, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xbd, 0xad}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0x80, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x86, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x87, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0x0, 0xa5, 0x0, 0x0, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x0, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0x0, 0xb5, 0x0, 0x0, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0x0, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x8c, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x8f, 0x90, 0x91, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x94}}, {From: 0x221a, To: 0x2265, Range: []uint8{0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x98, 0x99}}, {From: 0x2320, To: 0x2321, Range: []uint8{0x93, 0x9b}}, {From: 0x2022, To: 0x2022, Range: []uint8{0x95}}} var sortorder_cp1251_ukrainian_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -1419,7 +1435,7 @@ var tounicode_greek_general_ci = [...]uint16{ 0x03c8, 0x03c9, 0x03ca, 0x03cb, 0x03cc, 0x03cd, 0x03ce, 0x0000, } -var fromunicode_greek_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xbd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0xa3, 0x0, 0x0, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0xbb, 0x0, 0xbd}}, eightbit.UnicodeMapping{From: 0x384, To: 0x3ce, Range: []uint8{0xb4, 0xb5, 0xb6, 0x0, 0xb8, 0xb9, 0xba, 0x0, 0xbc, 0x0, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0x0, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe}}, eightbit.UnicodeMapping{From: 0x2bc, To: 0x2bd, Range: []uint8{0xa2, 0xa1}}, eightbit.UnicodeMapping{From: 0x2015, To: 0x2015, Range: []uint8{0xaf}}} +var fromunicode_greek_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0xa3, 0x0, 0x0, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0xb3, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0xbb, 0x0, 0xbd}}, {From: 0x384, To: 0x3ce, Range: []uint8{0xb4, 0xb5, 0xb6, 0x0, 0xb8, 0xb9, 0xba, 0x0, 0xbc, 0x0, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0x0, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe}}, {From: 0x2bc, To: 0x2bd, Range: []uint8{0xa2, 0xa1}}, {From: 0x2015, To: 0x2015, Range: []uint8{0xaf}}} var ctype_cp1250_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -1532,7 +1548,7 @@ var tounicode_cp1250_general_ci = [...]uint16{ 0x0159, 0x016f, 0x00fa, 0x0171, 0x00fc, 0x00fd, 0x0163, 0x02d9, } -var fromunicode_cp1250_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0x0, 0x0, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0x0, 0x0, 0xbb, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0xc2, 0x0, 0xc4, 0x0, 0x0, 0xc7, 0x0, 0xc9, 0x0, 0xcb, 0x0, 0xcd, 0xce, 0x0, 0x0, 0x0, 0x0, 0xd3, 0xd4, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0xda, 0x0, 0xdc, 0xdd, 0x0, 0xdf, 0x0, 0xe1, 0xe2, 0x0, 0xe4, 0x0, 0x0, 0xe7, 0x0, 0xe9, 0x0, 0xeb, 0x0, 0xed, 0xee, 0x0, 0x0, 0x0, 0x0, 0xf3, 0xf4, 0x0, 0xf6, 0xf7, 0x0, 0x0, 0xfa, 0x0, 0xfc, 0xfd}}, eightbit.UnicodeMapping{From: 0x102, To: 0x17e, Range: []uint8{0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0xcf, 0xef, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0xe5, 0x0, 0x0, 0xbc, 0xbe, 0x0, 0x0, 0xa3, 0xb3, 0xd1, 0xf1, 0x0, 0x0, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd5, 0xf5, 0x0, 0x0, 0xc0, 0xe0, 0x0, 0x0, 0xd8, 0xf8, 0x8c, 0x9c, 0x0, 0x0, 0xaa, 0xba, 0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, eightbit.UnicodeMapping{From: 0x2c7, To: 0x2dd, Range: []uint8{0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0xff, 0x0, 0xb2, 0x0, 0xbd}}, eightbit.UnicodeMapping{From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} +var fromunicode_cp1250_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0x0, 0x0, 0xa4, 0x0, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x0, 0xb0, 0xb1, 0x0, 0x0, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0x0, 0x0, 0xbb, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0xc2, 0x0, 0xc4, 0x0, 0x0, 0xc7, 0x0, 0xc9, 0x0, 0xcb, 0x0, 0xcd, 0xce, 0x0, 0x0, 0x0, 0x0, 0xd3, 0xd4, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0xda, 0x0, 0xdc, 0xdd, 0x0, 0xdf, 0x0, 0xe1, 0xe2, 0x0, 0xe4, 0x0, 0x0, 0xe7, 0x0, 0xe9, 0x0, 0xeb, 0x0, 0xed, 0xee, 0x0, 0x0, 0x0, 0x0, 0xf3, 0xf4, 0x0, 0xf6, 0xf7, 0x0, 0x0, 0xfa, 0x0, 0xfc, 0xfd}}, {From: 0x102, To: 0x17e, Range: []uint8{0xc3, 0xe3, 0xa5, 0xb9, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0xcf, 0xef, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0xe5, 0x0, 0x0, 0xbc, 0xbe, 0x0, 0x0, 0xa3, 0xb3, 0xd1, 0xf1, 0x0, 0x0, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd5, 0xf5, 0x0, 0x0, 0xc0, 0xe0, 0x0, 0x0, 0xd8, 0xf8, 0x8c, 0x9c, 0x0, 0x0, 0xaa, 0xba, 0x8a, 0x9a, 0xde, 0xfe, 0x8d, 0x9d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x9f, 0xaf, 0xbf, 0x8e, 0x9e}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, {From: 0x2c7, To: 0x2dd, Range: []uint8{0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0xff, 0x0, 0xb2, 0x0, 0xbd}}, {From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} var sortorder_latin2_croatian_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -1645,7 +1661,7 @@ var tounicode_cp1257_lithuanian_ci = [...]uint16{ 0x0173, 0x0142, 0x015b, 0x016b, 0x00fc, 0x017c, 0x017e, 0x02d9, } -var fromunicode_cp1257_lithuanian_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0x0, 0xa6, 0xa7, 0x8d, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x9d, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0x8f, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0xc5, 0xaf, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x0, 0xd5, 0xd6, 0xd7, 0xa8, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe5, 0xbf, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0xf5, 0xf6, 0xf7, 0xb8, 0x0, 0x0, 0x0, 0xfc}}, eightbit.UnicodeMapping{From: 0x100, To: 0x17e, Range: []uint8{0xc2, 0xe2, 0x0, 0x0, 0xc0, 0xe0, 0xc3, 0xe3, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xc7, 0xe7, 0x0, 0x0, 0xcb, 0xeb, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xee, 0x0, 0x0, 0xc1, 0xe1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xed, 0x0, 0x0, 0x0, 0xcf, 0xef, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd4, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0xba, 0x0, 0x0, 0xda, 0xfa, 0x0, 0x0, 0x0, 0x0, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd8, 0xf8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, eightbit.UnicodeMapping{From: 0x2c7, To: 0x2db, Range: []uint8{0x8e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x9e}}, eightbit.UnicodeMapping{From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} +var fromunicode_cp1257_lithuanian_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0x0, 0xa6, 0xa7, 0x8d, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0x9d, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0x8f, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4, 0xc5, 0xaf, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd3, 0x0, 0xd5, 0xd6, 0xd7, 0xa8, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0x0, 0xe4, 0xe5, 0xbf, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf3, 0x0, 0xf5, 0xf6, 0xf7, 0xb8, 0x0, 0x0, 0x0, 0xfc}}, {From: 0x100, To: 0x17e, Range: []uint8{0xc2, 0xe2, 0x0, 0x0, 0xc0, 0xe0, 0xc3, 0xe3, 0x0, 0x0, 0x0, 0x0, 0xc8, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xc7, 0xe7, 0x0, 0x0, 0xcb, 0xeb, 0xc6, 0xe6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcc, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xee, 0x0, 0x0, 0xc1, 0xe1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xed, 0x0, 0x0, 0x0, 0xcf, 0xef, 0x0, 0x0, 0x0, 0x0, 0xd9, 0xf9, 0xd1, 0xf1, 0xd2, 0xf2, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd4, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xaa, 0xba, 0x0, 0x0, 0xda, 0xfa, 0x0, 0x0, 0x0, 0x0, 0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd8, 0xf8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xea, 0xdd, 0xfd, 0xde, 0xfe}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, {From: 0x2c7, To: 0x2db, Range: []uint8{0x8e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x9e}}, {From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} var tolower_latin5_turkish_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -1739,7 +1755,7 @@ var tounicode_latin5_turkish_ci = [...]uint16{ 0x00f8, 0x00f9, 0x00fa, 0x00fb, 0x00fc, 0x0131, 0x015f, 0x00ff, } -var fromunicode_latin5_turkish_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xff}}, eightbit.UnicodeMapping{From: 0x11e, To: 0x15f, Range: []uint8{0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xde, 0xfe}}} +var fromunicode_latin5_turkish_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0x0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0x0, 0x0, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x0, 0x0, 0xff}}, {From: 0x11e, To: 0x15f, Range: []uint8{0xd0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdd, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xde, 0xfe}}} var ctype_armscii8_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -1833,7 +1849,7 @@ var tounicode_armscii8_general_ci = [...]uint16{ 0x0554, 0x0584, 0x0555, 0x0585, 0x0556, 0x0586, 0x2019, 0x0027, } -var fromunicode_armscii8_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xbb, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x0, 0x0, 0x0, 0xa7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6}}, eightbit.UnicodeMapping{From: 0x531, To: 0x589, Range: []uint8{0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xaf, 0xaa, 0xb1, 0xad, 0x0, 0xb3, 0xb5, 0xb7, 0xb9, 0xbb, 0xbd, 0xbf, 0xc1, 0xc3, 0xc5, 0xc7, 0xc9, 0xcb, 0xcd, 0xcf, 0xd1, 0xd3, 0xd5, 0xd7, 0xd9, 0xdb, 0xdd, 0xdf, 0xe1, 0xe3, 0xe5, 0xe7, 0xe9, 0xeb, 0xed, 0xef, 0xf1, 0xf3, 0xf5, 0xf7, 0xf9, 0xfb, 0xfd, 0x0, 0x0, 0xa3}}, eightbit.UnicodeMapping{From: 0x2014, To: 0x2026, Range: []uint8{0xa8, 0x0, 0x0, 0x0, 0x0, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae}}, eightbit.UnicodeMapping{From: 0x2741, To: 0x2741, Range: []uint8{0xa1}}} +var fromunicode_armscii8_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbb, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x0, 0x0, 0x0, 0xa7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6}}, {From: 0x531, To: 0x589, Range: []uint8{0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xaf, 0xaa, 0xb1, 0xad, 0x0, 0xb3, 0xb5, 0xb7, 0xb9, 0xbb, 0xbd, 0xbf, 0xc1, 0xc3, 0xc5, 0xc7, 0xc9, 0xcb, 0xcd, 0xcf, 0xd1, 0xd3, 0xd5, 0xd7, 0xd9, 0xdb, 0xdd, 0xdf, 0xe1, 0xe3, 0xe5, 0xe7, 0xe9, 0xeb, 0xed, 0xef, 0xf1, 0xf3, 0xf5, 0xf7, 0xf9, 0xfb, 0xfd, 0x0, 0x0, 0xa3}}, {From: 0x2014, To: 0x2026, Range: []uint8{0xa8, 0x0, 0x0, 0x0, 0x0, 0xfe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae}}, {From: 0x2741, To: 0x2741, Range: []uint8{0xa1}}} var ctype_cp866_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -1946,7 +1962,7 @@ var tounicode_cp866_general_ci = [...]uint16{ 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, } -var fromunicode_cp866_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xb7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf8, 0x0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0xfa}}, eightbit.UnicodeMapping{From: 0x401, To: 0x45e, Range: []uint8{0xf0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf6, 0x0, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0x0, 0x0, 0xf3, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, eightbit.UnicodeMapping{From: 0x2219, To: 0x221a, Range: []uint8{0xf9, 0xfb}}, eightbit.UnicodeMapping{From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}} +var fromunicode_cp866_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xb7, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf8, 0x0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0xfa}}, {From: 0x401, To: 0x45e, Range: []uint8{0xf0, 0x0, 0x0, 0xf2, 0x0, 0x0, 0xf4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf6, 0x0, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0x0, 0xf1, 0x0, 0x0, 0xf3, 0x0, 0x0, 0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x2219, To: 0x221a, Range: []uint8{0xf9, 0xfb}}, {From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}} var ctype_keybcs2_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -2059,7 +2075,7 @@ var tounicode_keybcs2_general_ci = [...]uint16{ 0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0, } -var fromunicode_keybcs2_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae, 0x0, 0x0, 0x0, 0x0, 0xf8, 0xf1, 0xfd, 0x0, 0x0, 0xe6, 0x0, 0xfa, 0x0, 0x0, 0x0, 0xaf, 0xac, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x0, 0x90, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x95, 0xa7, 0x0, 0x99, 0x0, 0x0, 0x0, 0x97, 0x0, 0x9a, 0x9d, 0x0, 0xe1, 0x0, 0xa0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x93, 0x0, 0x94, 0xf6, 0x0, 0x0, 0xa3, 0x0, 0x81, 0x98}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, eightbit.UnicodeMapping{From: 0x10c, To: 0x17e, Range: []uint8{0x80, 0x87, 0x85, 0x83, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x8d, 0x0, 0x0, 0x9c, 0x8c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa5, 0xa4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xab, 0xaa, 0x0, 0x0, 0x9e, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9b, 0xa8, 0x0, 0x0, 0x86, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x92, 0x91}}, eightbit.UnicodeMapping{From: 0x393, To: 0x3c6, Range: []uint8{0xe2, 0x0, 0x0, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0xe8, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x0, 0x0, 0xeb, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe3, 0x0, 0x0, 0xe5, 0xe7, 0x0, 0xed}}, eightbit.UnicodeMapping{From: 0x2219, To: 0x2265, Range: []uint8{0xf9, 0xfb, 0x0, 0x0, 0x0, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xef, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0xf3, 0xf2}}, eightbit.UnicodeMapping{From: 0x2320, To: 0x2321, Range: []uint8{0xf4, 0xf5}}, eightbit.UnicodeMapping{From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}} +var fromunicode_keybcs2_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xad, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xae, 0x0, 0x0, 0x0, 0x0, 0xf8, 0xf1, 0xfd, 0x0, 0x0, 0xe6, 0x0, 0xfa, 0x0, 0x0, 0x0, 0xaf, 0xac, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x0, 0x90, 0x0, 0x0, 0x0, 0x8b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x95, 0xa7, 0x0, 0x99, 0x0, 0x0, 0x0, 0x97, 0x0, 0x9a, 0x9d, 0x0, 0xe1, 0x0, 0xa0, 0x0, 0x0, 0x84, 0x0, 0x0, 0x0, 0x0, 0x82, 0x0, 0x0, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x93, 0x0, 0x94, 0xf6, 0x0, 0x0, 0xa3, 0x0, 0x81, 0x98}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, 0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, 0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, 0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0xde, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x10c, To: 0x17e, Range: []uint8{0x80, 0x87, 0x85, 0x83, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x88, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x8d, 0x0, 0x0, 0x9c, 0x8c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa5, 0xa4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xab, 0xaa, 0x0, 0x0, 0x9e, 0xa9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x9b, 0xa8, 0x0, 0x0, 0x86, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x96, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x92, 0x91}}, {From: 0x393, To: 0x3c6, Range: []uint8{0xe2, 0x0, 0x0, 0x0, 0x0, 0xe9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0xe8, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x0, 0x0, 0xeb, 0xee, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe3, 0x0, 0x0, 0xe5, 0xe7, 0x0, 0xed}}, {From: 0x2219, To: 0x2265, Range: []uint8{0xf9, 0xfb, 0x0, 0x0, 0x0, 0xec, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xef, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0xf3, 0xf2}}, {From: 0x2320, To: 0x2321, Range: []uint8{0xf4, 0xf5}}, {From: 0x207f, To: 0x207f, Range: []uint8{0xfc}}} var ctype_macce_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -2172,7 +2188,7 @@ var tounicode_macce_general_ci = [...]uint16{ 0x00dd, 0x00fd, 0x0137, 0x017b, 0x0141, 0x017c, 0x0122, 0x02c7, } -var fromunicode_macce_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0x0, 0x0, 0xa3, 0x0, 0x0, 0x0, 0xa4, 0xac, 0xa9, 0x0, 0xc7, 0xc2, 0x0, 0xa8, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe7, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0xef, 0xcd, 0x85, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x86, 0xf8, 0x0, 0xa7, 0x0, 0x87, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0x0, 0x0, 0x9c, 0x0, 0x9f, 0xf9}}, eightbit.UnicodeMapping{From: 0x100, To: 0x17e, Range: []uint8{0x81, 0x82, 0x0, 0x0, 0x84, 0x88, 0x8c, 0x8d, 0x0, 0x0, 0x0, 0x0, 0x89, 0x8b, 0x91, 0x93, 0x0, 0x0, 0x94, 0x95, 0x0, 0x0, 0x96, 0x98, 0xa2, 0xab, 0x9d, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe, 0xae, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb1, 0xb4, 0x0, 0x0, 0xaf, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb5, 0xfa, 0x0, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x0, 0x0, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, 0xcb, 0x0, 0x0, 0x0, 0xcf, 0xd8, 0x0, 0x0, 0xcc, 0xce, 0x0, 0x0, 0xd9, 0xda, 0xdf, 0xe0, 0xdb, 0xde, 0xe5, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe4, 0x0, 0x0, 0xe8, 0xe9, 0x0, 0x0, 0x0, 0x0, 0xed, 0xf0, 0x0, 0x0, 0xf1, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x203a, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0x0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd}}, eightbit.UnicodeMapping{From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, eightbit.UnicodeMapping{From: 0x2c7, To: 0x2c7, Range: []uint8{0xff}}, eightbit.UnicodeMapping{From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, eightbit.UnicodeMapping{From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}} +var fromunicode_macce_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0x0, 0x0, 0xa3, 0x0, 0x0, 0x0, 0xa4, 0xac, 0xa9, 0x0, 0xc7, 0xc2, 0x0, 0xa8, 0x0, 0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa6, 0x0, 0x0, 0x0, 0x0, 0xc8, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe7, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x83, 0x0, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x0, 0x0, 0xee, 0xef, 0xcd, 0x85, 0x0, 0x0, 0x0, 0xf2, 0x0, 0x86, 0xf8, 0x0, 0xa7, 0x0, 0x87, 0x0, 0x0, 0x8a, 0x0, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x92, 0x0, 0x0, 0x0, 0x0, 0x0, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0x0, 0x0, 0x9c, 0x0, 0x9f, 0xf9}}, {From: 0x100, To: 0x17e, Range: []uint8{0x81, 0x82, 0x0, 0x0, 0x84, 0x88, 0x8c, 0x8d, 0x0, 0x0, 0x0, 0x0, 0x89, 0x8b, 0x91, 0x93, 0x0, 0x0, 0x94, 0x95, 0x0, 0x0, 0x96, 0x98, 0xa2, 0xab, 0x9d, 0x9e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe, 0xae, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb1, 0xb4, 0x0, 0x0, 0xaf, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb5, 0xfa, 0x0, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x0, 0x0, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, 0xcb, 0x0, 0x0, 0x0, 0xcf, 0xd8, 0x0, 0x0, 0xcc, 0xce, 0x0, 0x0, 0xd9, 0xda, 0xdf, 0xe0, 0xdb, 0xde, 0xe5, 0xe6, 0x0, 0x0, 0x0, 0x0, 0xe1, 0xe4, 0x0, 0x0, 0xe8, 0xe9, 0x0, 0x0, 0x0, 0x0, 0xed, 0xf0, 0x0, 0x0, 0xf1, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec}}, {From: 0x2013, To: 0x203a, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0x0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd}}, {From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, {From: 0x2c7, To: 0x2c7, Range: []uint8{0xff}}, {From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, {From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}} var ctype_macroman_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -2285,7 +2301,7 @@ var tounicode_macroman_general_ci = [...]uint16{ 0x00af, 0x02d8, 0x02d9, 0x02da, 0x00b8, 0x02dd, 0x02db, 0x02c7, } -var fromunicode_macroman_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xc1, 0xa2, 0xa3, 0x0, 0xb4, 0x0, 0xa4, 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x0, 0xa8, 0xf8, 0xa1, 0xb1, 0x0, 0x0, 0xab, 0xb5, 0xa6, 0xe1, 0xfc, 0x0, 0xbc, 0xc8, 0x0, 0x0, 0x0, 0xc0, 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, 0x0, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x0, 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x0, 0x0, 0xa7, 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, 0x0, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x0, 0x0, 0xd8}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x20ac, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0xe0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb}}, eightbit.UnicodeMapping{From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb8, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, eightbit.UnicodeMapping{From: 0x2c6, To: 0x2dd, Range: []uint8{0xf6, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd}}, eightbit.UnicodeMapping{From: 0x131, To: 0x192, Range: []uint8{0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xcf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4}}, eightbit.UnicodeMapping{From: 0x3a9, To: 0x3c0, Range: []uint8{0xbd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb9}}, eightbit.UnicodeMapping{From: 0xfb01, To: 0xfb02, Range: []uint8{0xde, 0xdf}}, eightbit.UnicodeMapping{From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, eightbit.UnicodeMapping{From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}, eightbit.UnicodeMapping{From: 0xf8ff, To: 0xf8ff, Range: []uint8{0xf0}}} +var fromunicode_macroman_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xff, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xca, 0xc1, 0xa2, 0xa3, 0x0, 0xb4, 0x0, 0xa4, 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x0, 0xa8, 0xf8, 0xa1, 0xb1, 0x0, 0x0, 0xab, 0xb5, 0xa6, 0xe1, 0xfc, 0x0, 0xbc, 0xc8, 0x0, 0x0, 0x0, 0xc0, 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, 0x0, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x0, 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x0, 0x0, 0xa7, 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, 0x0, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x0, 0x0, 0xd8}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0xd0, 0xd1, 0x0, 0x0, 0x0, 0xd4, 0xd5, 0xe2, 0x0, 0xd2, 0xd3, 0xe3, 0x0, 0xa0, 0xe0, 0xa5, 0x0, 0x0, 0x0, 0xc9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdb}}, {From: 0x2202, To: 0x2265, Range: []uint8{0xb6, 0x0, 0x0, 0x0, 0xc6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb8, 0x0, 0xb7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0xb0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xad, 0x0, 0x0, 0x0, 0xb2, 0xb3}}, {From: 0x2c6, To: 0x2dd, Range: []uint8{0xf6, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd}}, {From: 0x131, To: 0x192, Range: []uint8{0xf5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xce, 0xcf, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc4}}, {From: 0x3a9, To: 0x3c0, Range: []uint8{0xbd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb9}}, {From: 0xfb01, To: 0xfb02, Range: []uint8{0xde, 0xdf}}, {From: 0x2122, To: 0x2122, Range: []uint8{0xaa}}, {From: 0x25ca, To: 0x25ca, Range: []uint8{0xd7}}, {From: 0xf8ff, To: 0xf8ff, Range: []uint8{0xf0}}} var ctype_cp852_general_ci = [...]uint8{ 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x28, 0x28, 0x20, @@ -2398,7 +2414,7 @@ var tounicode_cp852_general_ci = [...]uint16{ 0x00b0, 0x00a8, 0x02d9, 0x0171, 0x0158, 0x0159, 0x25a0, 0x00a0, } -var fromunicode_cp852_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0xcf, 0x0, 0x0, 0xf5, 0xf9, 0x0, 0x0, 0xae, 0xaa, 0xf0, 0x0, 0x0, 0xf8, 0x0, 0x0, 0x0, 0xef, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x0, 0xaf, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb5, 0xb6, 0x0, 0x8e, 0x0, 0x0, 0x80, 0x0, 0x90, 0x0, 0xd3, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0x0, 0x0, 0xe0, 0xe2, 0x0, 0x99, 0x9e, 0x0, 0x0, 0xe9, 0x0, 0x9a, 0xed, 0x0, 0xe1, 0x0, 0xa0, 0x83, 0x0, 0x84, 0x0, 0x0, 0x87, 0x0, 0x82, 0x0, 0x89, 0x0, 0xa1, 0x8c, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x93, 0x0, 0x94, 0xf6, 0x0, 0x0, 0xa3, 0x0, 0x81, 0xec}}, eightbit.UnicodeMapping{From: 0x102, To: 0x17e, Range: []uint8{0xc6, 0xc7, 0xa4, 0xa5, 0x8f, 0x86, 0x0, 0x0, 0x0, 0x0, 0xac, 0x9f, 0xd2, 0xd4, 0xd1, 0xd0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa8, 0xa9, 0xb7, 0xd8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x91, 0x92, 0x0, 0x0, 0x95, 0x96, 0x0, 0x0, 0x9d, 0x88, 0xe3, 0xe4, 0x0, 0x0, 0xd5, 0xe5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x8b, 0x0, 0x0, 0xe8, 0xea, 0x0, 0x0, 0xfc, 0xfd, 0x97, 0x98, 0x0, 0x0, 0xb8, 0xad, 0xe6, 0xe7, 0xdd, 0xee, 0x9b, 0x9c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xde, 0x85, 0xeb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8d, 0xab, 0xbd, 0xbe, 0xa6, 0xa7}}, eightbit.UnicodeMapping{From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0x0, 0x0, 0xc9, 0x0, 0x0, 0xbb, 0x0, 0x0, 0xc8, 0x0, 0x0, 0xbc, 0x0, 0x0, 0xcc, 0x0, 0x0, 0xb9, 0x0, 0x0, 0xcb, 0x0, 0x0, 0xca, 0x0, 0x0, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, eightbit.UnicodeMapping{From: 0x2c7, To: 0x2dd, Range: []uint8{0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0xfa, 0x0, 0xf2, 0x0, 0xf1}}} +var fromunicode_cp852_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfd, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0xcf, 0x0, 0x0, 0xf5, 0xf9, 0x0, 0x0, 0xae, 0xaa, 0xf0, 0x0, 0x0, 0xf8, 0x0, 0x0, 0x0, 0xef, 0x0, 0x0, 0x0, 0xf7, 0x0, 0x0, 0xaf, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb5, 0xb6, 0x0, 0x8e, 0x0, 0x0, 0x80, 0x0, 0x90, 0x0, 0xd3, 0x0, 0xd6, 0xd7, 0x0, 0x0, 0x0, 0x0, 0xe0, 0xe2, 0x0, 0x99, 0x9e, 0x0, 0x0, 0xe9, 0x0, 0x9a, 0xed, 0x0, 0xe1, 0x0, 0xa0, 0x83, 0x0, 0x84, 0x0, 0x0, 0x87, 0x0, 0x82, 0x0, 0x89, 0x0, 0xa1, 0x8c, 0x0, 0x0, 0x0, 0x0, 0xa2, 0x93, 0x0, 0x94, 0xf6, 0x0, 0x0, 0xa3, 0x0, 0x81, 0xec}}, {From: 0x102, To: 0x17e, Range: []uint8{0xc6, 0xc7, 0xa4, 0xa5, 0x8f, 0x86, 0x0, 0x0, 0x0, 0x0, 0xac, 0x9f, 0xd2, 0xd4, 0xd1, 0xd0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa8, 0xa9, 0xb7, 0xd8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x91, 0x92, 0x0, 0x0, 0x95, 0x96, 0x0, 0x0, 0x9d, 0x88, 0xe3, 0xe4, 0x0, 0x0, 0xd5, 0xe5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x8b, 0x0, 0x0, 0xe8, 0xea, 0x0, 0x0, 0xfc, 0xfd, 0x97, 0x98, 0x0, 0x0, 0xb8, 0xad, 0xe6, 0xe7, 0xdd, 0xee, 0x9b, 0x9c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xde, 0x85, 0xeb, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8d, 0xab, 0xbd, 0xbe, 0xa6, 0xa7}}, {From: 0x2500, To: 0x25a0, Range: []uint8{0xc4, 0x0, 0xb3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xda, 0x0, 0x0, 0x0, 0xbf, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0xd9, 0x0, 0x0, 0x0, 0xc3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xcd, 0xba, 0x0, 0x0, 0xc9, 0x0, 0x0, 0xbb, 0x0, 0x0, 0xc8, 0x0, 0x0, 0xbc, 0x0, 0x0, 0xcc, 0x0, 0x0, 0xb9, 0x0, 0x0, 0xcb, 0x0, 0x0, 0xca, 0x0, 0x0, 0xce, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdf, 0x0, 0x0, 0x0, 0xdc, 0x0, 0x0, 0x0, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xb0, 0xb1, 0xb2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xfe}}, {From: 0x2c7, To: 0x2dd, Range: []uint8{0xf3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf4, 0xfa, 0x0, 0xf2, 0x0, 0xf1}}} var sortorder_latin7_general_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -2644,7 +2660,7 @@ var tounicode_cp1256_general_ci = [...]uint16{ 0x0651, 0x00f9, 0x0652, 0x00fb, 0x00fc, 0x200e, 0x200f, 0x0000, } -var fromunicode_cp1256_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x0, 0xe2, 0x0, 0x0, 0x0, 0x0, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0x0, 0x0, 0xee, 0xef, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0xf7, 0x0, 0xf9, 0x0, 0xfb, 0xfc}}, eightbit.UnicodeMapping{From: 0x60c, To: 0x6af, Range: []uint8{0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba, 0x0, 0x0, 0x0, 0xbf, 0x0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd8, 0xd9, 0xda, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd, 0xde, 0xdf, 0xe1, 0xe3, 0xe4, 0xe5, 0xe6, 0xec, 0xed, 0xf0, 0xf1, 0xf2, 0xf3, 0xf5, 0xf6, 0xf8, 0xfa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x90}}, eightbit.UnicodeMapping{From: 0x200c, To: 0x20ac, Range: []uint8{0x9d, 0x9e, 0xfd, 0xfe, 0x0, 0x0, 0x0, 0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, eightbit.UnicodeMapping{From: 0x152, To: 0x192, Range: []uint8{0x8c, 0x9c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x83}}, eightbit.UnicodeMapping{From: 0x2c6, To: 0x2c6, Range: []uint8{0x88}}, eightbit.UnicodeMapping{From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} +var fromunicode_cp1256_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xfc, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0x0, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0x0, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0x0, 0xbb, 0xbc, 0xbd, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xe0, 0x0, 0xe2, 0x0, 0x0, 0x0, 0x0, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0x0, 0x0, 0xee, 0xef, 0x0, 0x0, 0x0, 0x0, 0xf4, 0x0, 0x0, 0xf7, 0x0, 0xf9, 0x0, 0xfb, 0xfc}}, {From: 0x60c, To: 0x6af, Range: []uint8{0xa1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xba, 0x0, 0x0, 0x0, 0xbf, 0x0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd8, 0xd9, 0xda, 0xdb, 0x0, 0x0, 0x0, 0x0, 0x0, 0xdc, 0xdd, 0xde, 0xdf, 0xe1, 0xe3, 0xe4, 0xe5, 0xe6, 0xec, 0xed, 0xf0, 0xf1, 0xf2, 0xf3, 0xf5, 0xf6, 0xf8, 0xfa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8e, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x90}}, {From: 0x200c, To: 0x20ac, Range: []uint8{0x9d, 0x9e, 0xfd, 0xfe, 0x0, 0x0, 0x0, 0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, {From: 0x152, To: 0x192, Range: []uint8{0x8c, 0x9c, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x83}}, {From: 0x2c6, To: 0x2c6, Range: []uint8{0x88}}, {From: 0x2122, To: 0x2122, Range: []uint8{0x99}}} var sortorder_cp1257_general_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -2738,7 +2754,7 @@ var tounicode_geostd8_general_ci = [...]uint16{ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2116, 0x0000, 0x0000, } -var fromunicode_geostd8_general_ci = []eightbit.UnicodeMapping{eightbit.UnicodeMapping{From: 0x0, To: 0xbf, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf}}, eightbit.UnicodeMapping{From: 0x10d0, To: 0x10f5, Range: []uint8{0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe3, 0xe4, 0xc7, 0xce, 0xd5, 0xe2, 0xe5}}, eightbit.UnicodeMapping{From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, eightbit.UnicodeMapping{From: 0x2116, To: 0x2116, Range: []uint8{0xfd}}} +var fromunicode_geostd8_general_ci = []eightbit.UnicodeMapping{{From: 0x0, To: 0xbf, Range: []uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf}}, {From: 0x10d0, To: 0x10f5, Range: []uint8{0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe3, 0xe4, 0xc7, 0xce, 0xd5, 0xe2, 0xe5}}, {From: 0x2013, To: 0x20ac, Range: []uint8{0x96, 0x97, 0x0, 0x0, 0x0, 0x91, 0x92, 0x82, 0x0, 0x93, 0x94, 0x84, 0x0, 0x86, 0x87, 0x95, 0x0, 0x0, 0x0, 0x85, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8b, 0x9b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80}}, {From: 0x2116, To: 0x2116, Range: []uint8{0xfd}}} var sortorder_latin1_spanish_ci = [...]uint8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, @@ -2778,25 +2794,25 @@ var sortorder_cp1250_polish_ci = [...]uint8{ 0x48, 0x58, 0x57, 0x5a, 0x59, 0x59, 0x59, 0xc9, 0x5d, 0x64, 0x64, 0x64, 0x64, 0x69, 0x62, 0xff, } -var weightTailoring_utf16_icelandic_ci = []uca.Patch{uca.Patch{Codepoint: 193, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 196, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x106e}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 201, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 205, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 208, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 211, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 218, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 221, Patch: []uint16{0x105f}}, uca.Patch{Codepoint: 222, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 225, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x106e}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 233, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 237, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 240, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 243, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 250, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 253, Patch: []uint16{0x105f}}, uca.Patch{Codepoint: 254, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_icelandic_ci = []uca.Patch{{Codepoint: 193, Patch: []uint16{0xe34}}, {Codepoint: 196, Patch: []uint16{0x106c}}, {Codepoint: 197, Patch: []uint16{0x106e}}, {Codepoint: 198, Patch: []uint16{0x106c}}, {Codepoint: 201, Patch: []uint16{0xe8c}}, {Codepoint: 205, Patch: []uint16{0xefc}}, {Codepoint: 208, Patch: []uint16{0xe6e}}, {Codepoint: 211, Patch: []uint16{0xf83}}, {Codepoint: 214, Patch: []uint16{0x106d}}, {Codepoint: 216, Patch: []uint16{0x106d}}, {Codepoint: 218, Patch: []uint16{0x1020}}, {Codepoint: 221, Patch: []uint16{0x105f}}, {Codepoint: 222, Patch: []uint16{0x106b}}, {Codepoint: 225, Patch: []uint16{0xe34}}, {Codepoint: 228, Patch: []uint16{0x106c}}, {Codepoint: 229, Patch: []uint16{0x106e}}, {Codepoint: 230, Patch: []uint16{0x106c}}, {Codepoint: 233, Patch: []uint16{0xe8c}}, {Codepoint: 237, Patch: []uint16{0xefc}}, {Codepoint: 240, Patch: []uint16{0xe6e}}, {Codepoint: 243, Patch: []uint16{0xf83}}, {Codepoint: 246, Patch: []uint16{0x106d}}, {Codepoint: 248, Patch: []uint16{0x106d}}, {Codepoint: 250, Patch: []uint16{0x1020}}, {Codepoint: 253, Patch: []uint16{0x105f}}, {Codepoint: 254, Patch: []uint16{0x106b}}} -var weightTailoring_utf16_latvian_ci = []uca.Patch{uca.Patch{Codepoint: 89, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 121, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 290, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 291, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 310, Patch: []uint16{0xf22}}, uca.Patch{Codepoint: 311, Patch: []uint16{0xf22}}, uca.Patch{Codepoint: 315, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 316, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 325, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 326, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 342, Patch: []uint16{0xfc1}}, uca.Patch{Codepoint: 343, Patch: []uint16{0xfc1}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_latvian_ci = []uca.Patch{{Codepoint: 89, Patch: []uint16{0xefc}}, {Codepoint: 121, Patch: []uint16{0xefc}}, {Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 290, Patch: []uint16{0xec2}}, {Codepoint: 291, Patch: []uint16{0xec2}}, {Codepoint: 310, Patch: []uint16{0xf22}}, {Codepoint: 311, Patch: []uint16{0xf22}}, {Codepoint: 315, Patch: []uint16{0xf2f}}, {Codepoint: 316, Patch: []uint16{0xf2f}}, {Codepoint: 325, Patch: []uint16{0xf65}}, {Codepoint: 326, Patch: []uint16{0xf65}}, {Codepoint: 342, Patch: []uint16{0xfc1}}, {Codepoint: 343, Patch: []uint16{0xfc1}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}} -var weightTailoring_utf16_romanian_ci = []uca.Patch{uca.Patch{Codepoint: 194, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 206, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 226, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 238, Patch: []uint16{0xefc}}, uca.Patch{Codepoint: 258, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 259, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 350, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 351, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 354, Patch: []uint16{0x1003}}, uca.Patch{Codepoint: 355, Patch: []uint16{0x1003}}, uca.Patch{Codepoint: 536, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 537, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 538, Patch: []uint16{0x1003}}, uca.Patch{Codepoint: 539, Patch: []uint16{0x1003}}} +var weightTailoring_utf16_romanian_ci = []uca.Patch{{Codepoint: 194, Patch: []uint16{0xe35}}, {Codepoint: 206, Patch: []uint16{0xefc}}, {Codepoint: 226, Patch: []uint16{0xe35}}, {Codepoint: 238, Patch: []uint16{0xefc}}, {Codepoint: 258, Patch: []uint16{0xe34}}, {Codepoint: 259, Patch: []uint16{0xe34}}, {Codepoint: 350, Patch: []uint16{0xfeb}}, {Codepoint: 351, Patch: []uint16{0xfeb}}, {Codepoint: 354, Patch: []uint16{0x1003}}, {Codepoint: 355, Patch: []uint16{0x1003}}, {Codepoint: 536, Patch: []uint16{0xfeb}}, {Codepoint: 537, Patch: []uint16{0xfeb}}, {Codepoint: 538, Patch: []uint16{0x1003}}, {Codepoint: 539, Patch: []uint16{0x1003}}} -var weightTailoring_utf16_slovenian_ci = []uca.Patch{uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_slovenian_ci = []uca.Patch{{Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}} -var weightTailoring_utf16_polish_ci = []uca.Patch{uca.Patch{Codepoint: 211, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 243, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 260, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 261, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 262, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 263, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 280, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 281, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 321, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 322, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 323, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 324, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 346, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 347, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 377, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 378, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 379, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 380, Patch: []uint16{0x106c}}} +var weightTailoring_utf16_polish_ci = []uca.Patch{{Codepoint: 211, Patch: []uint16{0xf83}}, {Codepoint: 243, Patch: []uint16{0xf83}}, {Codepoint: 260, Patch: []uint16{0xe34}}, {Codepoint: 261, Patch: []uint16{0xe34}}, {Codepoint: 262, Patch: []uint16{0xe61}}, {Codepoint: 263, Patch: []uint16{0xe61}}, {Codepoint: 280, Patch: []uint16{0xe8c}}, {Codepoint: 281, Patch: []uint16{0xe8c}}, {Codepoint: 321, Patch: []uint16{0xf2f}}, {Codepoint: 322, Patch: []uint16{0xf2f}}, {Codepoint: 323, Patch: []uint16{0xf65}}, {Codepoint: 324, Patch: []uint16{0xf65}}, {Codepoint: 346, Patch: []uint16{0xfeb}}, {Codepoint: 347, Patch: []uint16{0xfeb}}, {Codepoint: 377, Patch: []uint16{0x106b}}, {Codepoint: 378, Patch: []uint16{0x106b}}, {Codepoint: 379, Patch: []uint16{0x106c}}, {Codepoint: 380, Patch: []uint16{0x106c}}} -var weightTailoring_utf16_estonian_ci = []uca.Patch{uca.Patch{Codepoint: 90, Patch: []uint16{0xfec}}, uca.Patch{Codepoint: 122, Patch: []uint16{0xfec}}, uca.Patch{Codepoint: 196, Patch: []uint16{0x1053}}, uca.Patch{Codepoint: 213, Patch: []uint16{0x1052}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1054}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1055}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1053}}, uca.Patch{Codepoint: 245, Patch: []uint16{0x1052}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1054}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1055}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0xfed}}, uca.Patch{Codepoint: 382, Patch: []uint16{0xfed}}} +var weightTailoring_utf16_estonian_ci = []uca.Patch{{Codepoint: 90, Patch: []uint16{0xfec}}, {Codepoint: 122, Patch: []uint16{0xfec}}, {Codepoint: 196, Patch: []uint16{0x1053}}, {Codepoint: 213, Patch: []uint16{0x1052}}, {Codepoint: 214, Patch: []uint16{0x1054}}, {Codepoint: 220, Patch: []uint16{0x1055}}, {Codepoint: 228, Patch: []uint16{0x1053}}, {Codepoint: 245, Patch: []uint16{0x1052}}, {Codepoint: 246, Patch: []uint16{0x1054}}, {Codepoint: 252, Patch: []uint16{0x1055}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0xfed}}, {Codepoint: 382, Patch: []uint16{0xfed}}} -var weightTailoring_utf16_spanish_ci = []uca.Patch{uca.Patch{Codepoint: 209, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 241, Patch: []uint16{0xf65}}} +var weightTailoring_utf16_spanish_ci = []uca.Patch{{Codepoint: 209, Patch: []uint16{0xf65}}, {Codepoint: 241, Patch: []uint16{0xf65}}} -var weightTailoring_utf16_swedish_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x105e}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x105e}}} +var weightTailoring_utf16_swedish_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x106c}}, {Codepoint: 197, Patch: []uint16{0x106b}}, {Codepoint: 198, Patch: []uint16{0x106c}}, {Codepoint: 214, Patch: []uint16{0x106d}}, {Codepoint: 216, Patch: []uint16{0x106d}}, {Codepoint: 220, Patch: []uint16{0x105e}}, {Codepoint: 228, Patch: []uint16{0x106c}}, {Codepoint: 229, Patch: []uint16{0x106b}}, {Codepoint: 230, Patch: []uint16{0x106c}}, {Codepoint: 246, Patch: []uint16{0x106d}}, {Codepoint: 248, Patch: []uint16{0x106d}}, {Codepoint: 252, Patch: []uint16{0x105e}}} -var weightTailoring_utf16_turkish_ci = []uca.Patch{uca.Patch{Codepoint: 73, Patch: []uint16{0xee2}}, uca.Patch{Codepoint: 199, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 214, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 231, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 246, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 286, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 287, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 305, Patch: []uint16{0xee2}}, uca.Patch{Codepoint: 350, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 351, Patch: []uint16{0xfeb}}} +var weightTailoring_utf16_turkish_ci = []uca.Patch{{Codepoint: 73, Patch: []uint16{0xee2}}, {Codepoint: 199, Patch: []uint16{0xe61}}, {Codepoint: 214, Patch: []uint16{0xf83}}, {Codepoint: 220, Patch: []uint16{0x1020}}, {Codepoint: 231, Patch: []uint16{0xe61}}, {Codepoint: 246, Patch: []uint16{0xf83}}, {Codepoint: 252, Patch: []uint16{0x1020}}, {Codepoint: 286, Patch: []uint16{0xec2}}, {Codepoint: 287, Patch: []uint16{0xec2}}, {Codepoint: 305, Patch: []uint16{0xee2}}, {Codepoint: 350, Patch: []uint16{0xfeb}}, {Codepoint: 351, Patch: []uint16{0xfeb}}} -var weightTailoring_utf16_czech_ci = []uca.Patch{uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 344, Patch: []uint16{0xfc1}}, uca.Patch{Codepoint: 345, Patch: []uint16{0xfc1}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_czech_ci = []uca.Patch{{Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 344, Patch: []uint16{0xfc1}}, {Codepoint: 345, Patch: []uint16{0xfc1}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}} type contractor_utf16_czech_ci struct{} @@ -2825,7 +2841,7 @@ func (contractor_utf16_czech_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf16_czech_ci_weights = [...]uint16{0x0ee2, 0x0000, 0x0000, 0x0ee2, 0x0000, 0x0000, 0x0ee2, 0x0000, 0x0000} -var weightTailoring_utf16_danish_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x105e}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x106d}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x105e}}, uca.Patch{Codepoint: 336, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 337, Patch: []uint16{0x106c}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x105e}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x105e}}} +var weightTailoring_utf16_danish_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x106b}}, {Codepoint: 197, Patch: []uint16{0x106d}}, {Codepoint: 198, Patch: []uint16{0x106b}}, {Codepoint: 214, Patch: []uint16{0x106c}}, {Codepoint: 216, Patch: []uint16{0x106c}}, {Codepoint: 220, Patch: []uint16{0x105e}}, {Codepoint: 228, Patch: []uint16{0x106b}}, {Codepoint: 229, Patch: []uint16{0x106d}}, {Codepoint: 230, Patch: []uint16{0x106b}}, {Codepoint: 246, Patch: []uint16{0x106c}}, {Codepoint: 248, Patch: []uint16{0x106c}}, {Codepoint: 252, Patch: []uint16{0x105e}}, {Codepoint: 336, Patch: []uint16{0x106c}}, {Codepoint: 337, Patch: []uint16{0x106c}}, {Codepoint: 368, Patch: []uint16{0x105e}}, {Codepoint: 369, Patch: []uint16{0x105e}}} type contractor_utf16_danish_ci struct{} @@ -2854,7 +2870,7 @@ func (contractor_utf16_danish_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf16_danish_ci_weights = [...]uint16{0x106d, 0x0000, 0x0000, 0x106d, 0x0000, 0x0000, 0x106d, 0x0000, 0x0000} -var weightTailoring_utf16_lithuanian_ci = []uca.Patch{uca.Patch{Codepoint: 89, Patch: []uint16{0xefb}}, uca.Patch{Codepoint: 121, Patch: []uint16{0xefb}}, uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_lithuanian_ci = []uca.Patch{{Codepoint: 89, Patch: []uint16{0xefb}}, {Codepoint: 121, Patch: []uint16{0xefb}}, {Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}} type contractor_utf16_lithuanian_ci struct{} @@ -2883,7 +2899,7 @@ func (contractor_utf16_lithuanian_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf16_lithuanian_ci_weights = [...]uint16{0x0e60, 0x0000, 0x0000, 0x0e60, 0x0000, 0x0000, 0x0e60, 0x0000, 0x0000} -var weightTailoring_utf16_slovak_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 212, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 228, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 244, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}} +var weightTailoring_utf16_slovak_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0xe34}}, {Codepoint: 212, Patch: []uint16{0xf83}}, {Codepoint: 228, Patch: []uint16{0xe34}}, {Codepoint: 244, Patch: []uint16{0xf83}}, {Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}} type contractor_utf16_spanish2_ci struct{} @@ -2926,19 +2942,19 @@ func (contractor_utf16_spanish2_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf16_spanish2_ci_weights = [...]uint16{0x0e61, 0x0000, 0x0000, 0x0e61, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0e61, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000} -var weightTailoring_utf16_roman_ci = []uca.Patch{uca.Patch{Codepoint: 74, Patch: []uint16{0xefb}}, uca.Patch{Codepoint: 85, Patch: []uint16{0x1044}}, uca.Patch{Codepoint: 106, Patch: []uint16{0xefb}}, uca.Patch{Codepoint: 117, Patch: []uint16{0x1044}}} +var weightTailoring_utf16_roman_ci = []uca.Patch{{Codepoint: 74, Patch: []uint16{0xefb}}, {Codepoint: 85, Patch: []uint16{0x1044}}, {Codepoint: 106, Patch: []uint16{0xefb}}, {Codepoint: 117, Patch: []uint16{0x1044}}} -var weightTailoring_utf16_persian_ci = []uca.Patch{uca.Patch{Codepoint: 1569, Patch: []uint16{0xe36}}, uca.Patch{Codepoint: 1570, Patch: []uint16{0xe33}}, uca.Patch{Codepoint: 1571, Patch: []uint16{0xe37}}, uca.Patch{Codepoint: 1572, Patch: []uint16{0xe39}}, uca.Patch{Codepoint: 1573, Patch: []uint16{0xe38}}, uca.Patch{Codepoint: 1574, Patch: []uint16{0xe3a}}, uca.Patch{Codepoint: 1575, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 1577, Patch: []uint16{0x13bf}}, uca.Patch{Codepoint: 1603, Patch: []uint16{0x139d}}, uca.Patch{Codepoint: 1607, Patch: []uint16{0x13be}}, uca.Patch{Codepoint: 1609, Patch: []uint16{0x13c2}}, uca.Patch{Codepoint: 1610, Patch: []uint16{0x13c3}}, uca.Patch{Codepoint: 1611, Patch: []uint16{0x2d5}}, uca.Patch{Codepoint: 1612, Patch: []uint16{0x2da}}, uca.Patch{Codepoint: 1613, Patch: []uint16{0x2d8}}, uca.Patch{Codepoint: 1614, Patch: []uint16{0x2cc}}, uca.Patch{Codepoint: 1615, Patch: []uint16{0x2d2}}, uca.Patch{Codepoint: 1616, Patch: []uint16{0x2cf}}, uca.Patch{Codepoint: 1619, Patch: []uint16{0x1}}, uca.Patch{Codepoint: 1620, Patch: []uint16{0x2}}, uca.Patch{Codepoint: 1621, Patch: []uint16{0x3}}, uca.Patch{Codepoint: 1648, Patch: []uint16{0x4}}, uca.Patch{Codepoint: 1649, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 1673, Patch: []uint16{0x1355}}, uca.Patch{Codepoint: 1674, Patch: []uint16{0x1356}}, uca.Patch{Codepoint: 1705, Patch: []uint16{0x139c}}, uca.Patch{Codepoint: 1728, Patch: []uint16{0x13c0}}, uca.Patch{Codepoint: 1740, Patch: []uint16{0x13c1}}, uca.Patch{Codepoint: 64336, Patch: []uint16{0x134c}}, uca.Patch{Codepoint: 64337, Patch: []uint16{0x134d}}, uca.Patch{Codepoint: 64398, Patch: []uint16{0x139c}}, uca.Patch{Codepoint: 64399, Patch: []uint16{0x139d}}, uca.Patch{Codepoint: 64400, Patch: []uint16{0x139e}}, uca.Patch{Codepoint: 64420, Patch: []uint16{0x13c4}}, uca.Patch{Codepoint: 64421, Patch: []uint16{0x13c5}}, uca.Patch{Codepoint: 64508, Patch: []uint16{0x13c6}}, uca.Patch{Codepoint: 64509, Patch: []uint16{0x13c7}}, uca.Patch{Codepoint: 64510, Patch: []uint16{0x13c8}}, uca.Patch{Codepoint: 65020, Patch: []uint16{0x1376}}, uca.Patch{Codepoint: 65136, Patch: []uint16{0x2d6}}, uca.Patch{Codepoint: 65137, Patch: []uint16{0x2d7}}, uca.Patch{Codepoint: 65138, Patch: []uint16{0x2db}}, uca.Patch{Codepoint: 65140, Patch: []uint16{0x2d9}}, uca.Patch{Codepoint: 65142, Patch: []uint16{0x2cd}}, uca.Patch{Codepoint: 65143, Patch: []uint16{0x2ce}}, uca.Patch{Codepoint: 65144, Patch: []uint16{0x2d3}}, uca.Patch{Codepoint: 65145, Patch: []uint16{0x2d4}}, uca.Patch{Codepoint: 65146, Patch: []uint16{0x2d0}}, uca.Patch{Codepoint: 65147, Patch: []uint16{0x2d1}}, uca.Patch{Codepoint: 65152, Patch: []uint16{0x134e}}, uca.Patch{Codepoint: 65154, Patch: []uint16{0x1349}}, uca.Patch{Codepoint: 65155, Patch: []uint16{0x134f}}, uca.Patch{Codepoint: 65156, Patch: []uint16{0x1350}}, uca.Patch{Codepoint: 65157, Patch: []uint16{0x1353}}, uca.Patch{Codepoint: 65158, Patch: []uint16{0x1354}}, uca.Patch{Codepoint: 65159, Patch: []uint16{0x1351}}, uca.Patch{Codepoint: 65160, Patch: []uint16{0x1352}}, uca.Patch{Codepoint: 65165, Patch: []uint16{0x134a}}, uca.Patch{Codepoint: 65166, Patch: []uint16{0x134b}}, uca.Patch{Codepoint: 65171, Patch: []uint16{0x13c2}}, uca.Patch{Codepoint: 65172, Patch: []uint16{0x13c3}}, uca.Patch{Codepoint: 65241, Patch: []uint16{0x13a0}}, uca.Patch{Codepoint: 65242, Patch: []uint16{0x13a1}}, uca.Patch{Codepoint: 65243, Patch: []uint16{0x13a2}}, uca.Patch{Codepoint: 65244, Patch: []uint16{0x13a3}}, uca.Patch{Codepoint: 65257, Patch: []uint16{0x13be}}, uca.Patch{Codepoint: 65258, Patch: []uint16{0x13bf}}, uca.Patch{Codepoint: 65259, Patch: []uint16{0x13c0}}, uca.Patch{Codepoint: 65260, Patch: []uint16{0x13c1}}, uca.Patch{Codepoint: 65263, Patch: []uint16{0x13ca}}, uca.Patch{Codepoint: 65264, Patch: []uint16{0x13cb}}, uca.Patch{Codepoint: 65265, Patch: []uint16{0x13cc}}, uca.Patch{Codepoint: 65266, Patch: []uint16{0x13cd}}, uca.Patch{Codepoint: 65267, Patch: []uint16{0x13ce}}, uca.Patch{Codepoint: 65268, Patch: []uint16{0x13cf}}, uca.Patch{Codepoint: 65269, Patch: []uint16{0x13d0}}, uca.Patch{Codepoint: 65270, Patch: []uint16{0x13d1}}, uca.Patch{Codepoint: 65271, Patch: []uint16{0x13d2}}, uca.Patch{Codepoint: 65272, Patch: []uint16{0x13d3}}, uca.Patch{Codepoint: 65273, Patch: []uint16{0x13d4}}, uca.Patch{Codepoint: 65274, Patch: []uint16{0x13d5}}, uca.Patch{Codepoint: 65275, Patch: []uint16{0x13d6}}, uca.Patch{Codepoint: 65276, Patch: []uint16{0x13d7}}} +var weightTailoring_utf16_persian_ci = []uca.Patch{{Codepoint: 1569, Patch: []uint16{0xe36}}, {Codepoint: 1570, Patch: []uint16{0xe33}}, {Codepoint: 1571, Patch: []uint16{0xe37}}, {Codepoint: 1572, Patch: []uint16{0xe39}}, {Codepoint: 1573, Patch: []uint16{0xe38}}, {Codepoint: 1574, Patch: []uint16{0xe3a}}, {Codepoint: 1575, Patch: []uint16{0xe34}}, {Codepoint: 1577, Patch: []uint16{0x13bf}}, {Codepoint: 1603, Patch: []uint16{0x139d}}, {Codepoint: 1607, Patch: []uint16{0x13be}}, {Codepoint: 1609, Patch: []uint16{0x13c2}}, {Codepoint: 1610, Patch: []uint16{0x13c3}}, {Codepoint: 1611, Patch: []uint16{0x2d5}}, {Codepoint: 1612, Patch: []uint16{0x2da}}, {Codepoint: 1613, Patch: []uint16{0x2d8}}, {Codepoint: 1614, Patch: []uint16{0x2cc}}, {Codepoint: 1615, Patch: []uint16{0x2d2}}, {Codepoint: 1616, Patch: []uint16{0x2cf}}, {Codepoint: 1619, Patch: []uint16{0x1}}, {Codepoint: 1620, Patch: []uint16{0x2}}, {Codepoint: 1621, Patch: []uint16{0x3}}, {Codepoint: 1648, Patch: []uint16{0x4}}, {Codepoint: 1649, Patch: []uint16{0xe35}}, {Codepoint: 1673, Patch: []uint16{0x1355}}, {Codepoint: 1674, Patch: []uint16{0x1356}}, {Codepoint: 1705, Patch: []uint16{0x139c}}, {Codepoint: 1728, Patch: []uint16{0x13c0}}, {Codepoint: 1740, Patch: []uint16{0x13c1}}, {Codepoint: 64336, Patch: []uint16{0x134c}}, {Codepoint: 64337, Patch: []uint16{0x134d}}, {Codepoint: 64398, Patch: []uint16{0x139c}}, {Codepoint: 64399, Patch: []uint16{0x139d}}, {Codepoint: 64400, Patch: []uint16{0x139e}}, {Codepoint: 64420, Patch: []uint16{0x13c4}}, {Codepoint: 64421, Patch: []uint16{0x13c5}}, {Codepoint: 64508, Patch: []uint16{0x13c6}}, {Codepoint: 64509, Patch: []uint16{0x13c7}}, {Codepoint: 64510, Patch: []uint16{0x13c8}}, {Codepoint: 65020, Patch: []uint16{0x1376}}, {Codepoint: 65136, Patch: []uint16{0x2d6}}, {Codepoint: 65137, Patch: []uint16{0x2d7}}, {Codepoint: 65138, Patch: []uint16{0x2db}}, {Codepoint: 65140, Patch: []uint16{0x2d9}}, {Codepoint: 65142, Patch: []uint16{0x2cd}}, {Codepoint: 65143, Patch: []uint16{0x2ce}}, {Codepoint: 65144, Patch: []uint16{0x2d3}}, {Codepoint: 65145, Patch: []uint16{0x2d4}}, {Codepoint: 65146, Patch: []uint16{0x2d0}}, {Codepoint: 65147, Patch: []uint16{0x2d1}}, {Codepoint: 65152, Patch: []uint16{0x134e}}, {Codepoint: 65154, Patch: []uint16{0x1349}}, {Codepoint: 65155, Patch: []uint16{0x134f}}, {Codepoint: 65156, Patch: []uint16{0x1350}}, {Codepoint: 65157, Patch: []uint16{0x1353}}, {Codepoint: 65158, Patch: []uint16{0x1354}}, {Codepoint: 65159, Patch: []uint16{0x1351}}, {Codepoint: 65160, Patch: []uint16{0x1352}}, {Codepoint: 65165, Patch: []uint16{0x134a}}, {Codepoint: 65166, Patch: []uint16{0x134b}}, {Codepoint: 65171, Patch: []uint16{0x13c2}}, {Codepoint: 65172, Patch: []uint16{0x13c3}}, {Codepoint: 65241, Patch: []uint16{0x13a0}}, {Codepoint: 65242, Patch: []uint16{0x13a1}}, {Codepoint: 65243, Patch: []uint16{0x13a2}}, {Codepoint: 65244, Patch: []uint16{0x13a3}}, {Codepoint: 65257, Patch: []uint16{0x13be}}, {Codepoint: 65258, Patch: []uint16{0x13bf}}, {Codepoint: 65259, Patch: []uint16{0x13c0}}, {Codepoint: 65260, Patch: []uint16{0x13c1}}, {Codepoint: 65263, Patch: []uint16{0x13ca}}, {Codepoint: 65264, Patch: []uint16{0x13cb}}, {Codepoint: 65265, Patch: []uint16{0x13cc}}, {Codepoint: 65266, Patch: []uint16{0x13cd}}, {Codepoint: 65267, Patch: []uint16{0x13ce}}, {Codepoint: 65268, Patch: []uint16{0x13cf}}, {Codepoint: 65269, Patch: []uint16{0x13d0}}, {Codepoint: 65270, Patch: []uint16{0x13d1}}, {Codepoint: 65271, Patch: []uint16{0x13d2}}, {Codepoint: 65272, Patch: []uint16{0x13d3}}, {Codepoint: 65273, Patch: []uint16{0x13d4}}, {Codepoint: 65274, Patch: []uint16{0x13d5}}, {Codepoint: 65275, Patch: []uint16{0x13d6}}, {Codepoint: 65276, Patch: []uint16{0x13d7}}} -var weightTailoring_utf16_esperanto_ci = []uca.Patch{uca.Patch{Codepoint: 264, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 265, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 284, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 285, Patch: []uint16{0xec2}}, uca.Patch{Codepoint: 292, Patch: []uint16{0xee2}}, uca.Patch{Codepoint: 293, Patch: []uint16{0xee2}}, uca.Patch{Codepoint: 308, Patch: []uint16{0xf11}}, uca.Patch{Codepoint: 309, Patch: []uint16{0xf11}}, uca.Patch{Codepoint: 348, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 349, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 364, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 365, Patch: []uint16{0x1020}}} +var weightTailoring_utf16_esperanto_ci = []uca.Patch{{Codepoint: 264, Patch: []uint16{0xe61}}, {Codepoint: 265, Patch: []uint16{0xe61}}, {Codepoint: 284, Patch: []uint16{0xec2}}, {Codepoint: 285, Patch: []uint16{0xec2}}, {Codepoint: 292, Patch: []uint16{0xee2}}, {Codepoint: 293, Patch: []uint16{0xee2}}, {Codepoint: 308, Patch: []uint16{0xf11}}, {Codepoint: 309, Patch: []uint16{0xf11}}, {Codepoint: 348, Patch: []uint16{0xfeb}}, {Codepoint: 349, Patch: []uint16{0xfeb}}, {Codepoint: 364, Patch: []uint16{0x1020}}, {Codepoint: 365, Patch: []uint16{0x1020}}} -var weightTailoring_utf16_hungarian_ci = []uca.Patch{uca.Patch{Codepoint: 214, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 246, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 336, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 337, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1020}}} +var weightTailoring_utf16_hungarian_ci = []uca.Patch{{Codepoint: 214, Patch: []uint16{0xf83}}, {Codepoint: 220, Patch: []uint16{0x1020}}, {Codepoint: 246, Patch: []uint16{0xf83}}, {Codepoint: 252, Patch: []uint16{0x1020}}, {Codepoint: 336, Patch: []uint16{0xf83}}, {Codepoint: 337, Patch: []uint16{0xf83}}, {Codepoint: 368, Patch: []uint16{0x1020}}, {Codepoint: 369, Patch: []uint16{0x1020}}} -var weightTailoring_utf16_sinhala_ci = []uca.Patch{uca.Patch{Codepoint: 3458, Patch: []uint16{0x17a9}}, uca.Patch{Codepoint: 3459, Patch: []uint16{0x17aa}}, uca.Patch{Codepoint: 3482, Patch: []uint16{0x17ab}}, uca.Patch{Codepoint: 3483, Patch: []uint16{0x17ac}}, uca.Patch{Codepoint: 3484, Patch: []uint16{0x17ad}}, uca.Patch{Codepoint: 3485, Patch: []uint16{0x17ae}}, uca.Patch{Codepoint: 3486, Patch: []uint16{0x17af}}, uca.Patch{Codepoint: 3487, Patch: []uint16{0x17b0}}, uca.Patch{Codepoint: 3488, Patch: []uint16{0x17b1}}, uca.Patch{Codepoint: 3489, Patch: []uint16{0x17b2}}, uca.Patch{Codepoint: 3490, Patch: []uint16{0x17b3}}, uca.Patch{Codepoint: 3491, Patch: []uint16{0x17b4}}, uca.Patch{Codepoint: 3492, Patch: []uint16{0x17b6}}, uca.Patch{Codepoint: 3493, Patch: []uint16{0x17b5}}, uca.Patch{Codepoint: 3494, Patch: []uint16{0x17b7}}, uca.Patch{Codepoint: 3495, Patch: []uint16{0x17b8}}, uca.Patch{Codepoint: 3496, Patch: []uint16{0x17b9}}, uca.Patch{Codepoint: 3497, Patch: []uint16{0x17ba}}, uca.Patch{Codepoint: 3498, Patch: []uint16{0x17bb}}, uca.Patch{Codepoint: 3499, Patch: []uint16{0x17bc}}, uca.Patch{Codepoint: 3500, Patch: []uint16{0x17bd}}, uca.Patch{Codepoint: 3501, Patch: []uint16{0x17be}}, uca.Patch{Codepoint: 3502, Patch: []uint16{0x17bf}}, uca.Patch{Codepoint: 3503, Patch: []uint16{0x17c0}}, uca.Patch{Codepoint: 3504, Patch: []uint16{0x17c1}}, uca.Patch{Codepoint: 3505, Patch: []uint16{0x17c2}}, uca.Patch{Codepoint: 3507, Patch: []uint16{0x17c3}}, uca.Patch{Codepoint: 3508, Patch: []uint16{0x17c4}}, uca.Patch{Codepoint: 3509, Patch: []uint16{0x17c5}}, uca.Patch{Codepoint: 3510, Patch: []uint16{0x17c6}}, uca.Patch{Codepoint: 3511, Patch: []uint16{0x17c7}}, uca.Patch{Codepoint: 3512, Patch: []uint16{0x17c8}}, uca.Patch{Codepoint: 3513, Patch: []uint16{0x17c9}}, uca.Patch{Codepoint: 3514, Patch: []uint16{0x17ca}}, uca.Patch{Codepoint: 3515, Patch: []uint16{0x17cb}}, uca.Patch{Codepoint: 3517, Patch: []uint16{0x17cc}}, uca.Patch{Codepoint: 3520, Patch: []uint16{0x17cd}}, uca.Patch{Codepoint: 3521, Patch: []uint16{0x17ce}}, uca.Patch{Codepoint: 3522, Patch: []uint16{0x17cf}}, uca.Patch{Codepoint: 3523, Patch: []uint16{0x17d0}}, uca.Patch{Codepoint: 3524, Patch: []uint16{0x17d1}}, uca.Patch{Codepoint: 3525, Patch: []uint16{0x17d2}}, uca.Patch{Codepoint: 3526, Patch: []uint16{0x17d3}}, uca.Patch{Codepoint: 3530, Patch: []uint16{0x17e5}}, uca.Patch{Codepoint: 3535, Patch: []uint16{0x17d4}}, uca.Patch{Codepoint: 3536, Patch: []uint16{0x17d5}}, uca.Patch{Codepoint: 3537, Patch: []uint16{0x17d6}}, uca.Patch{Codepoint: 3538, Patch: []uint16{0x17d7}}, uca.Patch{Codepoint: 3539, Patch: []uint16{0x17d8}}, uca.Patch{Codepoint: 3540, Patch: []uint16{0x17d9}}, uca.Patch{Codepoint: 3542, Patch: []uint16{0x17da}}, uca.Patch{Codepoint: 3544, Patch: []uint16{0x17db}}, uca.Patch{Codepoint: 3545, Patch: []uint16{0x17df}}, uca.Patch{Codepoint: 3546, Patch: []uint16{0x17e0}}, uca.Patch{Codepoint: 3547, Patch: []uint16{0x17e1}}, uca.Patch{Codepoint: 3548, Patch: []uint16{0x17e2}}, uca.Patch{Codepoint: 3549, Patch: []uint16{0x17e3}}, uca.Patch{Codepoint: 3550, Patch: []uint16{0x17e4}}, uca.Patch{Codepoint: 3551, Patch: []uint16{0x17dd}}, uca.Patch{Codepoint: 3570, Patch: []uint16{0x17dc}}, uca.Patch{Codepoint: 3571, Patch: []uint16{0x17de}}} +var weightTailoring_utf16_sinhala_ci = []uca.Patch{{Codepoint: 3458, Patch: []uint16{0x17a9}}, {Codepoint: 3459, Patch: []uint16{0x17aa}}, {Codepoint: 3482, Patch: []uint16{0x17ab}}, {Codepoint: 3483, Patch: []uint16{0x17ac}}, {Codepoint: 3484, Patch: []uint16{0x17ad}}, {Codepoint: 3485, Patch: []uint16{0x17ae}}, {Codepoint: 3486, Patch: []uint16{0x17af}}, {Codepoint: 3487, Patch: []uint16{0x17b0}}, {Codepoint: 3488, Patch: []uint16{0x17b1}}, {Codepoint: 3489, Patch: []uint16{0x17b2}}, {Codepoint: 3490, Patch: []uint16{0x17b3}}, {Codepoint: 3491, Patch: []uint16{0x17b4}}, {Codepoint: 3492, Patch: []uint16{0x17b6}}, {Codepoint: 3493, Patch: []uint16{0x17b5}}, {Codepoint: 3494, Patch: []uint16{0x17b7}}, {Codepoint: 3495, Patch: []uint16{0x17b8}}, {Codepoint: 3496, Patch: []uint16{0x17b9}}, {Codepoint: 3497, Patch: []uint16{0x17ba}}, {Codepoint: 3498, Patch: []uint16{0x17bb}}, {Codepoint: 3499, Patch: []uint16{0x17bc}}, {Codepoint: 3500, Patch: []uint16{0x17bd}}, {Codepoint: 3501, Patch: []uint16{0x17be}}, {Codepoint: 3502, Patch: []uint16{0x17bf}}, {Codepoint: 3503, Patch: []uint16{0x17c0}}, {Codepoint: 3504, Patch: []uint16{0x17c1}}, {Codepoint: 3505, Patch: []uint16{0x17c2}}, {Codepoint: 3507, Patch: []uint16{0x17c3}}, {Codepoint: 3508, Patch: []uint16{0x17c4}}, {Codepoint: 3509, Patch: []uint16{0x17c5}}, {Codepoint: 3510, Patch: []uint16{0x17c6}}, {Codepoint: 3511, Patch: []uint16{0x17c7}}, {Codepoint: 3512, Patch: []uint16{0x17c8}}, {Codepoint: 3513, Patch: []uint16{0x17c9}}, {Codepoint: 3514, Patch: []uint16{0x17ca}}, {Codepoint: 3515, Patch: []uint16{0x17cb}}, {Codepoint: 3517, Patch: []uint16{0x17cc}}, {Codepoint: 3520, Patch: []uint16{0x17cd}}, {Codepoint: 3521, Patch: []uint16{0x17ce}}, {Codepoint: 3522, Patch: []uint16{0x17cf}}, {Codepoint: 3523, Patch: []uint16{0x17d0}}, {Codepoint: 3524, Patch: []uint16{0x17d1}}, {Codepoint: 3525, Patch: []uint16{0x17d2}}, {Codepoint: 3526, Patch: []uint16{0x17d3}}, {Codepoint: 3530, Patch: []uint16{0x17e5}}, {Codepoint: 3535, Patch: []uint16{0x17d4}}, {Codepoint: 3536, Patch: []uint16{0x17d5}}, {Codepoint: 3537, Patch: []uint16{0x17d6}}, {Codepoint: 3538, Patch: []uint16{0x17d7}}, {Codepoint: 3539, Patch: []uint16{0x17d8}}, {Codepoint: 3540, Patch: []uint16{0x17d9}}, {Codepoint: 3542, Patch: []uint16{0x17da}}, {Codepoint: 3544, Patch: []uint16{0x17db}}, {Codepoint: 3545, Patch: []uint16{0x17df}}, {Codepoint: 3546, Patch: []uint16{0x17e0}}, {Codepoint: 3547, Patch: []uint16{0x17e1}}, {Codepoint: 3548, Patch: []uint16{0x17e2}}, {Codepoint: 3549, Patch: []uint16{0x17e3}}, {Codepoint: 3550, Patch: []uint16{0x17e4}}, {Codepoint: 3551, Patch: []uint16{0x17dd}}, {Codepoint: 3570, Patch: []uint16{0x17dc}}, {Codepoint: 3571, Patch: []uint16{0x17de}}} -var weightTailoring_utf16_german2_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0xe33, 0xe8b}}, uca.Patch{Codepoint: 198, Patch: []uint16{0xe33, 0xe8b}}, uca.Patch{Codepoint: 214, Patch: []uint16{0xf82, 0xe8b}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x101f, 0xe8b}}, uca.Patch{Codepoint: 228, Patch: []uint16{0xe33, 0xe8b}}, uca.Patch{Codepoint: 230, Patch: []uint16{0xe33, 0xe8b}}, uca.Patch{Codepoint: 246, Patch: []uint16{0xf82, 0xe8b}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x101f, 0xe8b}}} +var weightTailoring_utf16_german2_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0xe33, 0xe8b}}, {Codepoint: 198, Patch: []uint16{0xe33, 0xe8b}}, {Codepoint: 214, Patch: []uint16{0xf82, 0xe8b}}, {Codepoint: 220, Patch: []uint16{0x101f, 0xe8b}}, {Codepoint: 228, Patch: []uint16{0xe33, 0xe8b}}, {Codepoint: 230, Patch: []uint16{0xe33, 0xe8b}}, {Codepoint: 246, Patch: []uint16{0xf82, 0xe8b}}, {Codepoint: 252, Patch: []uint16{0x101f, 0xe8b}}} -var weightTailoring_utf16_croatian_ci = []uca.Patch{uca.Patch{Codepoint: 262, Patch: []uint16{0xe62}}, uca.Patch{Codepoint: 263, Patch: []uint16{0xe62}}, uca.Patch{Codepoint: 268, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 269, Patch: []uint16{0xe61}}, uca.Patch{Codepoint: 272, Patch: []uint16{0xe6f}}, uca.Patch{Codepoint: 273, Patch: []uint16{0xe6f}}, uca.Patch{Codepoint: 352, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 353, Patch: []uint16{0xfeb}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x106b}}, uca.Patch{Codepoint: 452, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 453, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 454, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 455, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 456, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 457, Patch: []uint16{0xf2f}}, uca.Patch{Codepoint: 458, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 459, Patch: []uint16{0xf65}}, uca.Patch{Codepoint: 460, Patch: []uint16{0xf65}}} +var weightTailoring_utf16_croatian_ci = []uca.Patch{{Codepoint: 262, Patch: []uint16{0xe62}}, {Codepoint: 263, Patch: []uint16{0xe62}}, {Codepoint: 268, Patch: []uint16{0xe61}}, {Codepoint: 269, Patch: []uint16{0xe61}}, {Codepoint: 272, Patch: []uint16{0xe6f}}, {Codepoint: 273, Patch: []uint16{0xe6f}}, {Codepoint: 352, Patch: []uint16{0xfeb}}, {Codepoint: 353, Patch: []uint16{0xfeb}}, {Codepoint: 381, Patch: []uint16{0x106b}}, {Codepoint: 382, Patch: []uint16{0x106b}}, {Codepoint: 452, Patch: []uint16{0xe6e}}, {Codepoint: 453, Patch: []uint16{0xe6e}}, {Codepoint: 454, Patch: []uint16{0xe6e}}, {Codepoint: 455, Patch: []uint16{0xf2f}}, {Codepoint: 456, Patch: []uint16{0xf2f}}, {Codepoint: 457, Patch: []uint16{0xf2f}}, {Codepoint: 458, Patch: []uint16{0xf65}}, {Codepoint: 459, Patch: []uint16{0xf65}}, {Codepoint: 460, Patch: []uint16{0xf65}}} type contractor_utf16_croatian_ci struct{} @@ -3001,29 +3017,29 @@ func (contractor_utf16_croatian_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf16_croatian_ci_weights = [...]uint16{0x0e6e, 0x0000, 0x0000, 0x0e6e, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0f65, 0x0000, 0x0000, 0x0f65, 0x0000, 0x0000, 0x0e6e, 0x0000, 0x0000, 0x0e6e, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0f2f, 0x0000, 0x0000, 0x0f65, 0x0000, 0x0000, 0x0f65, 0x0000, 0x0000} -var weightTailoring_utf16_vietnamese_ci = []uca.Patch{uca.Patch{Codepoint: 194, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 202, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 212, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 226, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 234, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 244, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 258, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 259, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 272, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 273, Patch: []uint16{0xe6e}}, uca.Patch{Codepoint: 416, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 417, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 431, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 432, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7844, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7845, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7846, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7847, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7848, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7849, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7850, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7851, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7852, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7853, Patch: []uint16{0xe35}}, uca.Patch{Codepoint: 7854, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7855, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7856, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7857, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7858, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7859, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7860, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7861, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7862, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7863, Patch: []uint16{0xe34}}, uca.Patch{Codepoint: 7870, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7871, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7872, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7873, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7874, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7875, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7876, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7877, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7878, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7879, Patch: []uint16{0xe8c}}, uca.Patch{Codepoint: 7888, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7889, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7890, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7891, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7892, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7893, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7894, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7895, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7896, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7897, Patch: []uint16{0xf83}}, uca.Patch{Codepoint: 7898, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7899, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7900, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7901, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7902, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7903, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7904, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7905, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7906, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7907, Patch: []uint16{0xf84}}, uca.Patch{Codepoint: 7912, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7913, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7914, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7915, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7916, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7917, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7918, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7919, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7920, Patch: []uint16{0x1020}}, uca.Patch{Codepoint: 7921, Patch: []uint16{0x1020}}} +var weightTailoring_utf16_vietnamese_ci = []uca.Patch{{Codepoint: 194, Patch: []uint16{0xe35}}, {Codepoint: 202, Patch: []uint16{0xe8c}}, {Codepoint: 212, Patch: []uint16{0xf83}}, {Codepoint: 226, Patch: []uint16{0xe35}}, {Codepoint: 234, Patch: []uint16{0xe8c}}, {Codepoint: 244, Patch: []uint16{0xf83}}, {Codepoint: 258, Patch: []uint16{0xe34}}, {Codepoint: 259, Patch: []uint16{0xe34}}, {Codepoint: 272, Patch: []uint16{0xe6e}}, {Codepoint: 273, Patch: []uint16{0xe6e}}, {Codepoint: 416, Patch: []uint16{0xf84}}, {Codepoint: 417, Patch: []uint16{0xf84}}, {Codepoint: 431, Patch: []uint16{0x1020}}, {Codepoint: 432, Patch: []uint16{0x1020}}, {Codepoint: 7844, Patch: []uint16{0xe35}}, {Codepoint: 7845, Patch: []uint16{0xe35}}, {Codepoint: 7846, Patch: []uint16{0xe35}}, {Codepoint: 7847, Patch: []uint16{0xe35}}, {Codepoint: 7848, Patch: []uint16{0xe35}}, {Codepoint: 7849, Patch: []uint16{0xe35}}, {Codepoint: 7850, Patch: []uint16{0xe35}}, {Codepoint: 7851, Patch: []uint16{0xe35}}, {Codepoint: 7852, Patch: []uint16{0xe35}}, {Codepoint: 7853, Patch: []uint16{0xe35}}, {Codepoint: 7854, Patch: []uint16{0xe34}}, {Codepoint: 7855, Patch: []uint16{0xe34}}, {Codepoint: 7856, Patch: []uint16{0xe34}}, {Codepoint: 7857, Patch: []uint16{0xe34}}, {Codepoint: 7858, Patch: []uint16{0xe34}}, {Codepoint: 7859, Patch: []uint16{0xe34}}, {Codepoint: 7860, Patch: []uint16{0xe34}}, {Codepoint: 7861, Patch: []uint16{0xe34}}, {Codepoint: 7862, Patch: []uint16{0xe34}}, {Codepoint: 7863, Patch: []uint16{0xe34}}, {Codepoint: 7870, Patch: []uint16{0xe8c}}, {Codepoint: 7871, Patch: []uint16{0xe8c}}, {Codepoint: 7872, Patch: []uint16{0xe8c}}, {Codepoint: 7873, Patch: []uint16{0xe8c}}, {Codepoint: 7874, Patch: []uint16{0xe8c}}, {Codepoint: 7875, Patch: []uint16{0xe8c}}, {Codepoint: 7876, Patch: []uint16{0xe8c}}, {Codepoint: 7877, Patch: []uint16{0xe8c}}, {Codepoint: 7878, Patch: []uint16{0xe8c}}, {Codepoint: 7879, Patch: []uint16{0xe8c}}, {Codepoint: 7888, Patch: []uint16{0xf83}}, {Codepoint: 7889, Patch: []uint16{0xf83}}, {Codepoint: 7890, Patch: []uint16{0xf83}}, {Codepoint: 7891, Patch: []uint16{0xf83}}, {Codepoint: 7892, Patch: []uint16{0xf83}}, {Codepoint: 7893, Patch: []uint16{0xf83}}, {Codepoint: 7894, Patch: []uint16{0xf83}}, {Codepoint: 7895, Patch: []uint16{0xf83}}, {Codepoint: 7896, Patch: []uint16{0xf83}}, {Codepoint: 7897, Patch: []uint16{0xf83}}, {Codepoint: 7898, Patch: []uint16{0xf84}}, {Codepoint: 7899, Patch: []uint16{0xf84}}, {Codepoint: 7900, Patch: []uint16{0xf84}}, {Codepoint: 7901, Patch: []uint16{0xf84}}, {Codepoint: 7902, Patch: []uint16{0xf84}}, {Codepoint: 7903, Patch: []uint16{0xf84}}, {Codepoint: 7904, Patch: []uint16{0xf84}}, {Codepoint: 7905, Patch: []uint16{0xf84}}, {Codepoint: 7906, Patch: []uint16{0xf84}}, {Codepoint: 7907, Patch: []uint16{0xf84}}, {Codepoint: 7912, Patch: []uint16{0x1020}}, {Codepoint: 7913, Patch: []uint16{0x1020}}, {Codepoint: 7914, Patch: []uint16{0x1020}}, {Codepoint: 7915, Patch: []uint16{0x1020}}, {Codepoint: 7916, Patch: []uint16{0x1020}}, {Codepoint: 7917, Patch: []uint16{0x1020}}, {Codepoint: 7918, Patch: []uint16{0x1020}}, {Codepoint: 7919, Patch: []uint16{0x1020}}, {Codepoint: 7920, Patch: []uint16{0x1020}}, {Codepoint: 7921, Patch: []uint16{0x1020}}} -var weightTailoring_utf8mb4_de_pb_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}} +var weightTailoring_utf8mb4_de_pb_0900_ai_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 228, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 478, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1c47, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}} -var weightTailoring_utf8mb4_is_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 193, Patch: []uint16{0x1c5f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 201, Patch: []uint16{0x1ce4, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 205, Patch: []uint16{0x1d4b, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 211, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, uca.Patch{Codepoint: 218, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 221, Patch: []uint16{0x1f20, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 225, Patch: []uint16{0x1c5f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 233, Patch: []uint16{0x1ce4, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 237, Patch: []uint16{0x1d4b, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 243, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, uca.Patch{Codepoint: 250, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 253, Patch: []uint16{0x1f20, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7898, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 7899, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 7912, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 7913, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}} +var weightTailoring_utf8mb4_is_0900_ai_ci = []uca.Patch{{Codepoint: 193, Patch: []uint16{0x1c5f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21}}, {Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, {Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 201, Patch: []uint16{0x1ce4, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 205, Patch: []uint16{0x1d4b, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 211, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, {Codepoint: 218, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 221, Patch: []uint16{0x1f20, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 225, Patch: []uint16{0x1c5f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0}}, {Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, {Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 233, Patch: []uint16{0x1ce4, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 237, Patch: []uint16{0x1d4b, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 243, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, {Codepoint: 250, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 253, Patch: []uint16{0x1f20, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x116, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7898, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, {Codepoint: 7899, Patch: []uint16{0x1e0b, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}, {Codepoint: 7912, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, {Codepoint: 7913, Patch: []uint16{0x1ee2, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}, {Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}} -var weightTailoring_utf8mb4_lv_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 89, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 121, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 221, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 253, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 255, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 268, Patch: []uint16{0x1c8e, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c8e, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 290, Patch: []uint16{0x1d17, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 291, Patch: []uint16{0x1d17, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 310, Patch: []uint16{0x1d76, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 311, Patch: []uint16{0x1d76, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 315, Patch: []uint16{0x1da9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 316, Patch: []uint16{0x1da9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 325, Patch: []uint16{0x1ddc, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 326, Patch: []uint16{0x1ddc, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 342, Patch: []uint16{0x1e70, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 343, Patch: []uint16{0x1e70, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 374, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 375, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 376, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f3d, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f3d, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 562, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 563, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7822, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7823, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7833, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x29, 0x2}}, uca.Patch{Codepoint: 7922, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7923, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7924, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7925, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7926, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7927, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7928, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7929, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2}}} +var weightTailoring_utf8mb4_lv_0900_ai_ci = []uca.Patch{{Codepoint: 89, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 121, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 221, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 253, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 255, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 268, Patch: []uint16{0x1c8e, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c8e, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 290, Patch: []uint16{0x1d17, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 291, Patch: []uint16{0x1d17, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 310, Patch: []uint16{0x1d76, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 311, Patch: []uint16{0x1d76, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 315, Patch: []uint16{0x1da9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 316, Patch: []uint16{0x1da9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 325, Patch: []uint16{0x1ddc, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 326, Patch: []uint16{0x1ddc, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 342, Patch: []uint16{0x1e70, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 343, Patch: []uint16{0x1e70, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 374, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 375, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, {Codepoint: 376, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 381, Patch: []uint16{0x1f3d, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f3d, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 562, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 563, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7822, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7823, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7833, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x29, 0x2}}, {Codepoint: 7922, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7923, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7924, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7925, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 7926, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7927, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7928, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7929, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2}}} -var weightTailoring_utf8mb4_ro_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 206, Patch: []uint16{0x1d32, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 238, Patch: []uint16{0x1d32, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 350, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 351, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 354, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 355, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 536, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 537, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 538, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 539, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x42, 0x2}}} +var weightTailoring_utf8mb4_ro_0900_ai_ci = []uca.Patch{{Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 206, Patch: []uint16{0x1d32, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 238, Patch: []uint16{0x1d32, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 350, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 351, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 354, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 355, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 536, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 537, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 538, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 539, Patch: []uint16{0x1e95, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x2d, 0x2}}, {Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2d, 0x2}}, {Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x42, 0x2}}} -var weightTailoring_utf8mb4_sl_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} +var weightTailoring_utf8mb4_sl_0900_ai_ci = []uca.Patch{{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x30, 0x2}}, {Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x30, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} -var weightTailoring_utf8mb4_pl_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 211, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 243, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 260, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 261, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 280, Patch: []uint16{0x1caa, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 281, Patch: []uint16{0x1caa, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 321, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 322, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 323, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 324, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 346, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 347, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 377, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 378, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 379, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 380, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7780, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7781, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}} +var weightTailoring_utf8mb4_pl_0900_ai_ci = []uca.Patch{{Codepoint: 211, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 243, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 260, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 261, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 280, Patch: []uint16{0x1caa, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 281, Patch: []uint16{0x1caa, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 321, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 322, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 323, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 324, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 346, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 347, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 377, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 378, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 379, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 380, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x30, 0x2}}, {Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x30, 0x2}}, {Codepoint: 7780, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7781, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, {Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}} -var weightTailoring_utf8mb4_et_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 90, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 122, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 196, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 213, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 245, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 377, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 378, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 379, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 380, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a7, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 556, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 557, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7756, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7757, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7758, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7759, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7824, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 7825, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 7826, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7827, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7828, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x49, 0x2}}, uca.Patch{Codepoint: 7829, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x49, 0x2}}, uca.Patch{Codepoint: 7904, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 7905, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}} +var weightTailoring_utf8mb4_et_0900_ai_ci = []uca.Patch{{Codepoint: 90, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 122, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 196, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 213, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 214, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21}}, {Codepoint: 228, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 245, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 377, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 378, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 379, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 380, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 381, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a7, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a7, 0x0, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a8, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 478, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a7, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 556, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 557, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7756, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7757, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7758, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 7759, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7824, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 7825, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x27, 0x2}}, {Codepoint: 7826, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7827, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 7828, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x49, 0x2}}, {Codepoint: 7829, Patch: []uint16{0x1e94, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x49, 0x2}}, {Codepoint: 7904, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3f, 0x2}}, {Codepoint: 7905, Patch: []uint16{0x1efe, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3f, 0x2}}} -var weightTailoring_utf8mb4_es_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 209, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 241, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}} +var weightTailoring_utf8mb4_es_0900_ai_ci = []uca.Patch{{Codepoint: 209, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 241, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x0}}} -var weightTailoring_utf8mb4_sv_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, uca.Patch{Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 212, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x8, 0x1d18, 0x20, 0x8, 0x0, 0x0, 0x21}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, uca.Patch{Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 244, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x2, 0x1d18, 0x20, 0x2, 0x0, 0x0, 0x21}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 280, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x21}}, uca.Patch{Codepoint: 281, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x0}}, uca.Patch{Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x117, 0x21}}, uca.Patch{Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x117, 0x0}}, uca.Patch{Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x118, 0x21}}, uca.Patch{Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x118, 0x0}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7888, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7889, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7890, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7891, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7892, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7893, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7894, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7895, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7896, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7897, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}} +var weightTailoring_utf8mb4_sv_0900_ai_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, {Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 212, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21}}, {Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, {Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x8, 0x1d18, 0x20, 0x8, 0x0, 0x0, 0x21}}, {Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, {Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 244, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, {Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x2, 0x1d18, 0x20, 0x2, 0x0, 0x0, 0x21}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 280, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x21}}, {Codepoint: 281, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x0}}, {Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x117, 0x21}}, {Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x117, 0x0}}, {Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x118, 0x21}}, {Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x118, 0x0}}, {Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7888, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7889, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7890, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7891, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7892, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7893, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7894, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7895, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x2d, 0x2}}, {Codepoint: 7896, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7897, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x119, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}} -var weightTailoring_utf8mb4_tr_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 73, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 199, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 204, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 205, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 206, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 207, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 231, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 286, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 287, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 296, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 298, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 300, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x26, 0x2}}, uca.Patch{Codepoint: 302, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x31, 0x2}}, uca.Patch{Codepoint: 304, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x0, 0x21}}, uca.Patch{Codepoint: 305, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 350, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 351, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 463, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 520, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3c, 0x2}}, uca.Patch{Codepoint: 522, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3e, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7724, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x48, 0x2}}, uca.Patch{Codepoint: 7726, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7880, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7882, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}} +var weightTailoring_utf8mb4_tr_0900_ai_ci = []uca.Patch{{Codepoint: 73, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 199, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 204, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 205, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 206, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 207, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 231, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 286, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 287, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 296, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 298, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 300, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x26, 0x2}}, {Codepoint: 302, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x31, 0x2}}, {Codepoint: 304, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x0, 0x21}}, {Codepoint: 305, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 350, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 351, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 463, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 520, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3c, 0x2}}, {Codepoint: 522, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3e, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7724, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x48, 0x2}}, {Codepoint: 7726, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 7880, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7882, Patch: []uint16{0x1d31, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}} -var weightTailoring_utf8mb4_cs_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 344, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 345, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} +var weightTailoring_utf8mb4_cs_0900_ai_ci = []uca.Patch{{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 344, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 345, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} type contractor_utf8mb4_cs_0900_ai_ci struct{} @@ -3058,7 +3074,7 @@ var contractor_utf8mb4_cs_0900_ai_ci_weights = [...]uint16{ 0x0000, 0x0021, 0x1d18, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, } -var weightTailoring_utf8mb4_da_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x8, 0x1d18, 0x20, 0x8, 0x0, 0x0, 0x21}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x2, 0x1d18, 0x20, 0x2, 0x0, 0x0, 0x21}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x21}}, uca.Patch{Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x0}}, uca.Patch{Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x118, 0x21}}, uca.Patch{Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x118, 0x0}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}} +var weightTailoring_utf8mb4_da_0900_ai_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21}}, {Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}, {Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21}}, {Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x8, 0x1d18, 0x20, 0x8, 0x0, 0x0, 0x21}}, {Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0}}, {Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0}}, {Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0}}, {Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x2, 0x1d18, 0x20, 0x2, 0x0, 0x0, 0x21}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x21}}, {Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x117, 0x0}}, {Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x118, 0x21}}, {Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x118, 0x0}}, {Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x2, 0x54a7, 0x0, 0x21}}} type contractor_utf8mb4_da_0900_ai_ci struct{} @@ -3087,7 +3103,7 @@ func (contractor_utf8mb4_da_0900_ai_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf8mb4_da_0900_ai_ci_weights = [...]uint16{0x1f98, 0x0020, 0x0002, 0x54a7, 0x0000, 0x0024, 0x1f98, 0x0020, 0x0002, 0x54a7, 0x0000, 0x0023, 0x1f98, 0x0020, 0x0002, 0x54a7, 0x0000, 0x0022} -var weightTailoring_utf8mb4_lt_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 89, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 121, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 221, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 253, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 255, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 260, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 261, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 278, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 279, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 280, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 281, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 302, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 303, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 362, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x21}}, uca.Patch{Codepoint: 363, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 370, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 371, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 374, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 375, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 376, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 562, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 563, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7802, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7803, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7822, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7823, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7833, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x29, 0x2}}, uca.Patch{Codepoint: 7922, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7923, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7924, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7925, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7926, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7927, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7928, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7929, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2d, 0x2}}} +var weightTailoring_utf8mb4_lt_0900_ai_ci = []uca.Patch{{Codepoint: 89, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 121, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 221, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 253, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 255, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 260, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 261, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 278, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 279, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 280, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 281, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 302, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 303, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 362, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x21}}, {Codepoint: 363, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x0}}, {Codepoint: 370, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 371, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 374, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 375, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x27, 0x2}}, {Codepoint: 376, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 562, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 563, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7802, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 7803, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 7822, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7823, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7833, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x29, 0x2}}, {Codepoint: 7922, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7923, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7924, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7925, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 7926, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7927, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7928, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7929, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x117, 0x0, 0x0, 0x2d, 0x2}}} type contractor_utf8mb4_lt_0900_ai_ci struct{} @@ -3112,7 +3128,7 @@ func (contractor_utf8mb4_lt_0900_ai_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf8mb4_lt_0900_ai_ci_weights = [...]uint16{0x0000, 0x0025, 0x0002, 0x0000, 0x0024, 0x0002, 0x0000, 0x002d, 0x0002} -var weightTailoring_utf8mb4_sk_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 344, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 345, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x42, 0x2}}} +var weightTailoring_utf8mb4_sk_0900_ai_ci = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 228, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 344, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 345, Patch: []uint16{0x1e33, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 478, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 479, Patch: []uint16{0x1c47, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2d, 0x2}}, {Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x42, 0x2}}} type contractor_utf8mb4_es_trad_0900_ai_ci struct{} @@ -3155,11 +3171,11 @@ func (contractor_utf8mb4_es_trad_0900_ai_ci) FindContextual(cp1, cp0 rune) []uin var contractor_utf8mb4_es_trad_0900_ai_ci_weights = [...]uint16{0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0002, 0x54a5, 0x0000, 0x0022, 0x1d77, 0x0020, 0x0002, 0x54a5, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1d77, 0x0020, 0x0002, 0x54a5, 0x0000, 0x0000} -var weightTailoring_utf8mb4_la_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 74, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 85, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21}}, uca.Patch{Codepoint: 106, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 117, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 217, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 218, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 219, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 249, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 250, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 251, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 308, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 309, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, uca.Patch{Codepoint: 360, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 361, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 362, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 363, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 364, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x26, 0x2}}, uca.Patch{Codepoint: 365, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x26, 0x2}}, uca.Patch{Codepoint: 366, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x29, 0x2}}, uca.Patch{Codepoint: 367, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x29, 0x2}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2c, 0x2}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2c, 0x2}}, uca.Patch{Codepoint: 370, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x31, 0x2}}, uca.Patch{Codepoint: 371, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x31, 0x2}}, uca.Patch{Codepoint: 431, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 432, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2}}, uca.Patch{Codepoint: 467, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 468, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 496, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 532, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3c, 0x2}}, uca.Patch{Codepoint: 533, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3c, 0x2}}, uca.Patch{Codepoint: 534, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3e, 0x2}}, uca.Patch{Codepoint: 535, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3e, 0x2}}, uca.Patch{Codepoint: 7794, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x43, 0x2}}, uca.Patch{Codepoint: 7795, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x43, 0x2}}, uca.Patch{Codepoint: 7796, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x48, 0x2}}, uca.Patch{Codepoint: 7797, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x48, 0x2}}, uca.Patch{Codepoint: 7798, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x46, 0x2}}, uca.Patch{Codepoint: 7799, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x46, 0x2}}, uca.Patch{Codepoint: 7800, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7801, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7802, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7803, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7908, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7909, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7910, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7911, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7912, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7913, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 7914, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7915, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7916, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7917, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x3b, 0x2}}, uca.Patch{Codepoint: 7918, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7919, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x2d, 0x2}}, uca.Patch{Codepoint: 7920, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x42, 0x2}}, uca.Patch{Codepoint: 7921, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x42, 0x2}}} +var weightTailoring_utf8mb4_la_0900_ai_ci = []uca.Patch{{Codepoint: 74, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 85, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21}}, {Codepoint: 106, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 117, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0}}, {Codepoint: 217, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 218, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 219, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 220, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2}}, {Codepoint: 249, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 250, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 251, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, {Codepoint: 252, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 308, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x27, 0x2}}, {Codepoint: 309, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x27, 0x2}}, {Codepoint: 360, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2}}, {Codepoint: 361, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2}}, {Codepoint: 362, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 363, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 364, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x26, 0x2}}, {Codepoint: 365, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x26, 0x2}}, {Codepoint: 366, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x29, 0x2}}, {Codepoint: 367, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x29, 0x2}}, {Codepoint: 368, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2c, 0x2}}, {Codepoint: 369, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2c, 0x2}}, {Codepoint: 370, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x31, 0x2}}, {Codepoint: 371, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x31, 0x2}}, {Codepoint: 431, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2}}, {Codepoint: 432, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2}}, {Codepoint: 467, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 468, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 469, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2}}, {Codepoint: 496, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 532, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3c, 0x2}}, {Codepoint: 533, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3c, 0x2}}, {Codepoint: 534, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3e, 0x2}}, {Codepoint: 535, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3e, 0x2}}, {Codepoint: 7794, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x43, 0x2}}, {Codepoint: 7795, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x43, 0x2}}, {Codepoint: 7796, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x48, 0x2}}, {Codepoint: 7797, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x48, 0x2}}, {Codepoint: 7798, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x46, 0x2}}, {Codepoint: 7799, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x46, 0x2}}, {Codepoint: 7800, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x2d, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 7801, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x2d, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 7802, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x32, 0x2, 0x0, 0x2b, 0x2}}, {Codepoint: 7803, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x32, 0x2, 0x0, 0x2b, 0x2}}, {Codepoint: 7908, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x42, 0x2}}, {Codepoint: 7909, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x42, 0x2}}, {Codepoint: 7910, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3b, 0x2}}, {Codepoint: 7911, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3b, 0x2}}, {Codepoint: 7912, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 7913, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x24, 0x2}}, {Codepoint: 7914, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x25, 0x2}}, {Codepoint: 7915, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x25, 0x2}}, {Codepoint: 7916, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x3b, 0x2}}, {Codepoint: 7917, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x3b, 0x2}}, {Codepoint: 7918, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x2d, 0x2}}, {Codepoint: 7919, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x2d, 0x2}}, {Codepoint: 7920, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x21, 0x0, 0x3f, 0x2, 0x0, 0x42, 0x2}}, {Codepoint: 7921, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x116, 0x0, 0x0, 0x3f, 0x2, 0x0, 0x42, 0x2}}} -var weightTailoring_utf8mb4_eo_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 264, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 265, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 284, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 285, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 292, Patch: []uint16{0x1d18, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 293, Patch: []uint16{0x1d18, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 308, Patch: []uint16{0x1d4c, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 309, Patch: []uint16{0x1d4c, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 348, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 349, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 364, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 365, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}} +var weightTailoring_utf8mb4_eo_0900_ai_ci = []uca.Patch{{Codepoint: 264, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 265, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 284, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 285, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 292, Patch: []uint16{0x1d18, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 293, Patch: []uint16{0x1d18, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 308, Patch: []uint16{0x1d4c, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 309, Patch: []uint16{0x1d4c, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 348, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 349, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 364, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 365, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}} -var weightTailoring_utf8mb4_hu_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 336, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x116, 0x21}}, uca.Patch{Codepoint: 337, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x116, 0x0}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x116, 0x21}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x116, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x28, 0x2}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}} +var weightTailoring_utf8mb4_hu_0900_ai_ci = []uca.Patch{{Codepoint: 214, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 220, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 246, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 336, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x116, 0x21}}, {Codepoint: 337, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x116, 0x0}}, {Codepoint: 368, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x116, 0x21}}, {Codepoint: 369, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x116, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 470, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x24, 0x2}}, {Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x2}}, {Codepoint: 473, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x28, 0x2}}, {Codepoint: 474, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x28, 0x2}}, {Codepoint: 475, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 476, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 554, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x32, 0x2}}, {Codepoint: 555, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x2}}} type contractor_utf8mb4_hu_0900_ai_ci struct{} @@ -3490,7 +3506,7 @@ func (contractor_utf8mb4_hu_0900_ai_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf8mb4_hu_0900_ai_ci_weights = [...]uint16{0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0022, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0022, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x0000, 0x0000, 0x0021, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0021, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c7a, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0000, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x54a5, 0x0000, 0x0000, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1cf4, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1e71, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021, 0x1e95, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1f21, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x0000, 0x0000, 0x0021} -var weightTailoring_utf8mb4_hr_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 452, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, uca.Patch{Codepoint: 453, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, uca.Patch{Codepoint: 454, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 455, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, uca.Patch{Codepoint: 456, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, uca.Patch{Codepoint: 457, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 458, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, uca.Patch{Codepoint: 459, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, uca.Patch{Codepoint: 460, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x30, 0x2}}, uca.Patch{Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} +var weightTailoring_utf8mb4_hr_0900_ai_ci = []uca.Patch{{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 268, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 269, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a6, 0x0, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a6, 0x0, 0x0}}, {Codepoint: 352, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 353, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 381, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 382, Patch: []uint16{0x1f21, 0x20, 0x8, 0x54a5, 0x0, 0x0}}, {Codepoint: 452, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, {Codepoint: 453, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, {Codepoint: 454, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 455, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, {Codepoint: 456, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, {Codepoint: 457, Patch: []uint16{0x1d77, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 458, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x25}}, {Codepoint: 459, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x23}}, {Codepoint: 460, Patch: []uint16{0x1db9, 0x20, 0x8, 0x54a5, 0x0, 0x21}}, {Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x21, 0x0, 0x30, 0x2}}, {Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x54a6, 0x0, 0x0, 0x0, 0x30, 0x2}}, {Codepoint: 7782, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x21, 0x0, 0x2e, 0x2}}, {Codepoint: 7783, Patch: []uint16{0x1e71, 0x20, 0x8, 0x54a5, 0x0, 0x0, 0x0, 0x2e, 0x2}}} type contractor_utf8mb4_hr_0900_ai_ci struct{} @@ -3547,11 +3563,11 @@ func (contractor_utf8mb4_hr_0900_ai_ci) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf8mb4_hr_0900_ai_ci_weights = [...]uint16{0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0024, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0024, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0024, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0022, 0x1c8f, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1d77, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000, 0x1db9, 0x0020, 0x0008, 0x54a5, 0x0000, 0x0000} -var reorder_utf8mb4_hr_0900_ai_ci = []uca.Reorder{uca.Reorder{FromMin: 0x1c47, FromMax: 0x1fb5, ToMin: 0x1c47, ToMax: 0x1fb5}, uca.Reorder{FromMin: 0x2022, FromMax: 0x21e1, ToMin: 0x1fb6, ToMax: 0x2175}, uca.Reorder{FromMin: 0x1fb6, FromMax: 0x2021, ToMin: 0x2176, ToMax: 0x21e1}} +var reorder_utf8mb4_hr_0900_ai_ci = []uca.Reorder{{FromMin: 0x1c47, FromMax: 0x1fb5, ToMin: 0x1c47, ToMax: 0x1fb5}, {FromMin: 0x2022, FromMax: 0x21e1, ToMin: 0x1fb6, ToMax: 0x2175}, {FromMin: 0x1fb6, FromMax: 0x2021, ToMin: 0x2176, ToMax: 0x21e1}} -var weightTailoring_utf8mb4_vi_0900_ai_ci = []uca.Patch{uca.Patch{Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 202, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 234, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 416, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 417, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 431, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 432, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 769, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 771, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 777, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 803, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 833, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7870, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7871, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7872, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7873, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7874, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7875, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7876, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7877, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7878, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7879, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7900, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7901, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7902, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7903, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7904, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7905, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7906, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7907, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7912, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7913, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7914, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7915, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7916, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7917, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7918, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7919, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7920, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7921, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}} +var weightTailoring_utf8mb4_vi_0900_ai_ci = []uca.Patch{{Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 202, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 234, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 416, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 417, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 431, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 432, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 769, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 771, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 777, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 803, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 833, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7870, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7871, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7872, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7873, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7874, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7875, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7876, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7877, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7878, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7879, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7900, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7901, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7902, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7903, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7904, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7905, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7906, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7907, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7912, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7913, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7914, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7915, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7916, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7917, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7918, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7919, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7920, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7921, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}} -var weightTailoring_utf8mb4_da_0900_as_cs = []uca.Patch{uca.Patch{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x116, 0x121}}, uca.Patch{Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121}}, uca.Patch{Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121}}, uca.Patch{Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x108, 0x0, 0x117, 0x121}}, uca.Patch{Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x116, 0x121}}, uca.Patch{Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x0, 0x121}}, uca.Patch{Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121}}, uca.Patch{Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x108, 0x1d18, 0x20, 0x0, 0x0, 0x0, 0x121}}, uca.Patch{Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x116, 0x0}}, uca.Patch{Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a7, 0x0, 0x0}}, uca.Patch{Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x308, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x116, 0x0}}, uca.Patch{Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x302, 0x1d18, 0x20, 0x0, 0x0, 0x0, 0x321}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x108, 0x0, 0x116, 0x121}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x308, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x117, 0x121}}, uca.Patch{Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x117, 0x0}}, uca.Patch{Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x118, 0x121}}, uca.Patch{Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x118, 0x0}}, uca.Patch{Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x117, 0x121}}, uca.Patch{Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x32, 0x102}}, uca.Patch{Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x32, 0x302}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x24, 0x102}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x24, 0x302}}, uca.Patch{Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x28, 0x102}}, uca.Patch{Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x28, 0x302}}, uca.Patch{Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x25, 0x102}}, uca.Patch{Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x25, 0x302}}, uca.Patch{Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x116, 0x121, 0x0, 0x32, 0x102}}, uca.Patch{Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x302}}, uca.Patch{Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121, 0x0, 0x32, 0x102}}, uca.Patch{Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x302}}, uca.Patch{Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121, 0x0, 0x24, 0x102}}, uca.Patch{Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x302}}, uca.Patch{Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121, 0x0, 0x24, 0x102}}, uca.Patch{Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x302}}, uca.Patch{Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x0, 0x121, 0x0, 0x24, 0x102}}, uca.Patch{Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x302}}, uca.Patch{Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x116, 0x121, 0x0, 0x32, 0x102}}, uca.Patch{Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x302}}, uca.Patch{Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121}}} +var weightTailoring_utf8mb4_da_0900_as_cs = []uca.Patch{{Codepoint: 196, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x116, 0x121}}, {Codepoint: 197, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121}}, {Codepoint: 198, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121}}, {Codepoint: 208, Patch: []uint16{0x1c8f, 0x20, 0x108, 0x0, 0x117, 0x121}}, {Codepoint: 214, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x116, 0x121}}, {Codepoint: 216, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x0, 0x121}}, {Codepoint: 220, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121}}, {Codepoint: 222, Patch: []uint16{0x1e95, 0x20, 0x108, 0x1d18, 0x20, 0x0, 0x0, 0x0, 0x121}}, {Codepoint: 228, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x116, 0x0}}, {Codepoint: 229, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a7, 0x0, 0x0}}, {Codepoint: 230, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0}}, {Codepoint: 240, Patch: []uint16{0x1c8f, 0x20, 0x308, 0x0, 0x117, 0x0}}, {Codepoint: 246, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x116, 0x0}}, {Codepoint: 248, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x0, 0x0}}, {Codepoint: 252, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0}}, {Codepoint: 254, Patch: []uint16{0x1e95, 0x20, 0x302, 0x1d18, 0x20, 0x0, 0x0, 0x0, 0x321}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x108, 0x0, 0x116, 0x121}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x308, 0x0, 0x116, 0x0}}, {Codepoint: 336, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x117, 0x121}}, {Codepoint: 337, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x117, 0x0}}, {Codepoint: 338, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x118, 0x121}}, {Codepoint: 339, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x118, 0x0}}, {Codepoint: 368, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x117, 0x121}}, {Codepoint: 369, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x117, 0x0}}, {Codepoint: 469, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x32, 0x102}}, {Codepoint: 470, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x32, 0x302}}, {Codepoint: 471, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x24, 0x102}}, {Codepoint: 472, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x24, 0x302}}, {Codepoint: 473, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x28, 0x102}}, {Codepoint: 474, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x28, 0x302}}, {Codepoint: 475, Patch: []uint16{0x1f0b, 0x20, 0x108, 0x0, 0x116, 0x121, 0x0, 0x25, 0x102}}, {Codepoint: 476, Patch: []uint16{0x1f0b, 0x20, 0x308, 0x0, 0x116, 0x0, 0x0, 0x25, 0x302}}, {Codepoint: 478, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x116, 0x121, 0x0, 0x32, 0x102}}, {Codepoint: 479, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x116, 0x0, 0x0, 0x32, 0x302}}, {Codepoint: 482, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121, 0x0, 0x32, 0x102}}, {Codepoint: 483, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0, 0x0, 0x32, 0x302}}, {Codepoint: 506, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121, 0x0, 0x24, 0x102}}, {Codepoint: 507, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a7, 0x0, 0x0, 0x0, 0x24, 0x302}}, {Codepoint: 508, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a5, 0x0, 0x121, 0x0, 0x24, 0x102}}, {Codepoint: 509, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a5, 0x0, 0x0, 0x0, 0x24, 0x302}}, {Codepoint: 510, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x0, 0x121, 0x0, 0x24, 0x102}}, {Codepoint: 511, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x0, 0x0, 0x0, 0x24, 0x302}}, {Codepoint: 554, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a6, 0x116, 0x121, 0x0, 0x32, 0x102}}, {Codepoint: 555, Patch: []uint16{0x1f98, 0x20, 0x302, 0x54a6, 0x116, 0x0, 0x0, 0x32, 0x302}}, {Codepoint: 8491, Patch: []uint16{0x1f98, 0x20, 0x102, 0x54a7, 0x0, 0x121}}} type contractor_utf8mb4_da_0900_as_cs struct{} @@ -3580,7 +3596,7 @@ func (contractor_utf8mb4_da_0900_as_cs) FindContextual(cp1, cp0 rune) []uint16 { var contractor_utf8mb4_da_0900_as_cs_weights = [...]uint16{0x1f98, 0x0020, 0x0102, 0x54a7, 0x0000, 0x0124, 0x1f98, 0x0020, 0x0202, 0x54a7, 0x0000, 0x0223, 0x1f98, 0x0020, 0x0302, 0x54a7, 0x0000, 0x0322} -var weightTailoring_utf8mb4_vi_0900_as_cs = []uca.Patch{uca.Patch{Codepoint: 193, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 195, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 201, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 202, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 205, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 209, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 211, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 213, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 218, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 221, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 225, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 227, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 233, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 234, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 237, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 241, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 243, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 245, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 250, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 253, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 296, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 297, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 313, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 314, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 323, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 324, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 340, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 341, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 346, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 347, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 360, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 361, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 377, Patch: []uint16{0x1f21, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 378, Patch: []uint16{0x1f21, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 416, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, uca.Patch{Codepoint: 417, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, uca.Patch{Codepoint: 431, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, uca.Patch{Codepoint: 432, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, uca.Patch{Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 500, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 501, Patch: []uint16{0x1cf4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 506, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x29, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 507, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x29, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 508, Patch: []uint16{0x1c47, 0x20, 0xa, 0x0, 0x110, 0x4, 0x1caa, 0x20, 0xa, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 509, Patch: []uint16{0x1c47, 0x20, 0x4, 0x0, 0x110, 0x4, 0x1caa, 0x20, 0x4, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 510, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x2f, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 511, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x2f, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 556, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 557, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 769, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 771, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 777, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 803, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 833, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 836, Patch: []uint16{0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 901, Patch: []uint16{0x489, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 902, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 904, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 905, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 906, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 908, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 910, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 911, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 912, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 940, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 941, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 942, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 943, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 944, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 972, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 973, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 974, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 979, Patch: []uint16{0x1fdc, 0x20, 0xa, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 1027, Patch: []uint16{0x2036, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 1036, Patch: []uint16{0x2096, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 1107, Patch: []uint16{0x2036, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 1116, Patch: []uint16{0x2096, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7684, Patch: []uint16{0x1c60, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7685, Patch: []uint16{0x1c60, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x0, 0x30, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x2, 0x0, 0x30, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7692, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7693, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7702, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7703, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7716, Patch: []uint16{0x1d18, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7717, Patch: []uint16{0x1d18, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7726, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7727, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7728, Patch: []uint16{0x1d65, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7729, Patch: []uint16{0x1d65, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7730, Patch: []uint16{0x1d65, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7731, Patch: []uint16{0x1d65, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7734, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7735, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7736, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7737, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7742, Patch: []uint16{0x1daa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7743, Patch: []uint16{0x1daa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7746, Patch: []uint16{0x1daa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7747, Patch: []uint16{0x1daa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7750, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7751, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7756, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7757, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7758, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7759, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, uca.Patch{Codepoint: 7762, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7763, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7764, Patch: []uint16{0x1e0c, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7765, Patch: []uint16{0x1e0c, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7770, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7771, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7772, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7773, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, uca.Patch{Codepoint: 7778, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7779, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7780, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7781, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7784, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7785, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x2e, 0x2}}, uca.Patch{Codepoint: 7788, Patch: []uint16{0x1e95, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7789, Patch: []uint16{0x1e95, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7800, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7801, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7804, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7805, Patch: []uint16{0x1ee3, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7806, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7807, Patch: []uint16{0x1ee3, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7810, Patch: []uint16{0x1ef5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7811, Patch: []uint16{0x1ef5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7816, Patch: []uint16{0x1ef5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7817, Patch: []uint16{0x1ef5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7826, Patch: []uint16{0x1f21, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7827, Patch: []uint16{0x1f21, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7840, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7841, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7842, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7843, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7864, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7865, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7866, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7867, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7868, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7869, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7870, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7871, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7872, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7873, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7874, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7875, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7876, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7877, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7878, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7879, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7880, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7881, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7882, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7883, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7884, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7885, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7886, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7887, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7900, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7901, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7902, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7903, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7904, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7905, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7906, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7907, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7908, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7909, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7910, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7911, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7912, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7913, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7914, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7915, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, uca.Patch{Codepoint: 7916, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7917, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7918, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7919, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7920, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7921, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7924, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7925, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, uca.Patch{Codepoint: 7926, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7927, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, uca.Patch{Codepoint: 7928, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7929, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, uca.Patch{Codepoint: 7940, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7941, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7948, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7949, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7956, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7957, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7964, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7965, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7972, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7973, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7980, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7981, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7988, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7989, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7996, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 7997, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8004, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8005, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8012, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8013, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8020, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8021, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8029, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8036, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8037, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8044, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8045, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8049, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8051, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8053, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8055, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8057, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8059, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8061, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8068, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8069, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8076, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8077, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8084, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8085, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8092, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8093, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8100, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8101, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8108, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8109, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8116, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8123, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8132, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8137, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8139, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8142, Patch: []uint16{0x48e, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8147, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8155, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8158, Patch: []uint16{0x48f, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8163, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8171, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8174, Patch: []uint16{0x489, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8180, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, uca.Patch{Codepoint: 8185, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, uca.Patch{Codepoint: 8187, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}} +var weightTailoring_utf8mb4_vi_0900_as_cs = []uca.Patch{{Codepoint: 193, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 194, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 195, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 201, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 202, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 205, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 209, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 211, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 212, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 213, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 218, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 221, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 225, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 226, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 227, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 233, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 234, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 237, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 241, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 243, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 244, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 245, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 250, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 253, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 258, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 259, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 262, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 263, Patch: []uint16{0x1c7a, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 272, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 273, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 296, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 297, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 313, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 314, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 323, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 324, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 340, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 341, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 346, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 347, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 360, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 361, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 377, Patch: []uint16{0x1f21, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 378, Patch: []uint16{0x1f21, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 416, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21}}, {Codepoint: 417, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0}}, {Codepoint: 431, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21}}, {Codepoint: 432, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0}}, {Codepoint: 471, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 472, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 500, Patch: []uint16{0x1cf4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 501, Patch: []uint16{0x1cf4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 506, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x29, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 507, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x29, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 508, Patch: []uint16{0x1c47, 0x20, 0xa, 0x0, 0x110, 0x4, 0x1caa, 0x20, 0xa, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 509, Patch: []uint16{0x1c47, 0x20, 0x4, 0x0, 0x110, 0x4, 0x1caa, 0x20, 0x4, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 510, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x2f, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 511, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x2f, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 556, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 557, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 769, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 771, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 777, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 803, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 833, Patch: []uint16{0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 836, Patch: []uint16{0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 901, Patch: []uint16{0x489, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 902, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 904, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 905, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 906, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 908, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 910, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 911, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 912, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 940, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 941, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 942, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 943, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 944, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 972, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 973, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 974, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 979, Patch: []uint16{0x1fdc, 0x20, 0xa, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 1027, Patch: []uint16{0x2036, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 1036, Patch: []uint16{0x2096, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 1107, Patch: []uint16{0x2036, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 1116, Patch: []uint16{0x2096, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7684, Patch: []uint16{0x1c60, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7685, Patch: []uint16{0x1c60, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7688, Patch: []uint16{0x1c7a, 0x20, 0x8, 0x0, 0x30, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7689, Patch: []uint16{0x1c7a, 0x20, 0x2, 0x0, 0x30, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7692, Patch: []uint16{0x1c8f, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7693, Patch: []uint16{0x1c8f, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7702, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7703, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7716, Patch: []uint16{0x1d18, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7717, Patch: []uint16{0x1d18, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7726, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7727, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7728, Patch: []uint16{0x1d65, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7729, Patch: []uint16{0x1d65, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7730, Patch: []uint16{0x1d65, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7731, Patch: []uint16{0x1d65, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7734, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7735, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7736, Patch: []uint16{0x1d77, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7737, Patch: []uint16{0x1d77, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7742, Patch: []uint16{0x1daa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7743, Patch: []uint16{0x1daa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7746, Patch: []uint16{0x1daa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7747, Patch: []uint16{0x1daa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7750, Patch: []uint16{0x1db9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7751, Patch: []uint16{0x1db9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7756, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7757, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7758, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 7759, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x2b, 0x2}}, {Codepoint: 7762, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7763, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x32, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7764, Patch: []uint16{0x1e0c, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7765, Patch: []uint16{0x1e0c, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7770, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7771, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7772, Patch: []uint16{0x1e33, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7773, Patch: []uint16{0x1e33, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x32, 0x2}}, {Codepoint: 7778, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7779, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7780, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7781, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7784, Patch: []uint16{0x1e71, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7785, Patch: []uint16{0x1e71, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0, 0x0, 0x2e, 0x2}}, {Codepoint: 7788, Patch: []uint16{0x1e95, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7789, Patch: []uint16{0x1e95, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7800, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7801, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7804, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7805, Patch: []uint16{0x1ee3, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7806, Patch: []uint16{0x1ee3, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7807, Patch: []uint16{0x1ee3, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7810, Patch: []uint16{0x1ef5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7811, Patch: []uint16{0x1ef5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7816, Patch: []uint16{0x1ef5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7817, Patch: []uint16{0x1ef5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7826, Patch: []uint16{0x1f21, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7827, Patch: []uint16{0x1f21, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7840, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7841, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7842, Patch: []uint16{0x1c47, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7843, Patch: []uint16{0x1c47, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7844, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7845, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7846, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7847, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7848, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7849, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7850, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7851, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7852, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7853, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7854, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7855, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7856, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7857, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7858, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7859, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7860, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7861, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7862, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7863, Patch: []uint16{0x1c47, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7864, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7865, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7866, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7867, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7868, Patch: []uint16{0x1caa, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7869, Patch: []uint16{0x1caa, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7870, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7871, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7872, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7873, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7874, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7875, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7876, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7877, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7878, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7879, Patch: []uint16{0x1caa, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7880, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7881, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7882, Patch: []uint16{0x1d32, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7883, Patch: []uint16{0x1d32, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7884, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7885, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7886, Patch: []uint16{0x1ddd, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7887, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7888, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7889, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7890, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7891, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7892, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7893, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7894, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7895, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7896, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7897, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7898, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7899, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7900, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7901, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7902, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7903, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7904, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7905, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7906, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7907, Patch: []uint16{0x1ddd, 0x20, 0x2, 0x54a6, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7908, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7909, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7910, Patch: []uint16{0x1eb5, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7911, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7912, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7913, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7914, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2}}, {Codepoint: 7915, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2}}, {Codepoint: 7916, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7917, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7918, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7919, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7920, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x21, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7921, Patch: []uint16{0x1eb5, 0x20, 0x2, 0x54a5, 0x0, 0x0, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7924, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7925, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x119, 0x0}}, {Codepoint: 7926, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7927, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x116, 0x0}}, {Codepoint: 7928, Patch: []uint16{0x1f0b, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7929, Patch: []uint16{0x1f0b, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x117, 0x0}}, {Codepoint: 7940, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7941, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7948, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7949, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7956, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7957, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7964, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7965, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7972, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7973, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7980, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7981, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7988, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7989, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7996, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 7997, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8004, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8005, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8012, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8013, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8020, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8021, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8029, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8036, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8037, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8044, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8045, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8049, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8051, Patch: []uint16{0x1fbe, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8053, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8055, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8057, Patch: []uint16{0x1fce, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8059, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8061, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8068, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8069, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8076, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8077, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8084, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8085, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8092, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8093, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8100, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8101, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8108, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x22, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8109, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x23, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8116, Patch: []uint16{0x1fb9, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8123, Patch: []uint16{0x1fb9, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8132, Patch: []uint16{0x1fc4, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8137, Patch: []uint16{0x1fbe, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8139, Patch: []uint16{0x1fc4, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8142, Patch: []uint16{0x48e, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8147, Patch: []uint16{0x1fc6, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8155, Patch: []uint16{0x1fc6, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8158, Patch: []uint16{0x48f, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8163, Patch: []uint16{0x1fdc, 0x20, 0x2, 0x0, 0x2b, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8171, Patch: []uint16{0x1fdc, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8174, Patch: []uint16{0x489, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8180, Patch: []uint16{0x1fe1, 0x20, 0x2, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0, 0x0, 0x4c, 0x2}}, {Codepoint: 8185, Patch: []uint16{0x1fce, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}, {Codepoint: 8187, Patch: []uint16{0x1fe1, 0x20, 0x8, 0x0, 0x25, 0x2, 0x0, 0x118, 0x0}}} type contractor_utf8mb4_ja_0900_as_cs struct{} @@ -3588,7 +3604,7 @@ func (contractor_utf8mb4_ja_0900_as_cs) Find(charset.Charset, rune, []byte) ([]u return nil, nil, 0 } -var contractor_utf8mb4_ja_0900_as_cs_weights = map[uint32][]uint16{0x309d3041: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3042: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3043: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3044: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3045: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3046: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3047: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3048: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3049: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304a: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304b: []uint16{0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304c: []uint16{0x3d60, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309d304d: []uint16{0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304e: []uint16{0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304f: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3050: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3051: []uint16{0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3052: []uint16{0x3d63, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309d3053: []uint16{0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3054: []uint16{0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3055: []uint16{0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3056: []uint16{0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3057: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3058: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3059: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305a: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305b: []uint16{0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305c: []uint16{0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305d: []uint16{0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305e: []uint16{0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305f: []uint16{0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3060: []uint16{0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3061: []uint16{0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3062: []uint16{0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3063: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3064: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3065: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3066: []uint16{0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3067: []uint16{0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3068: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3069: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306a: []uint16{0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306b: []uint16{0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306c: []uint16{0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306d: []uint16{0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306e: []uint16{0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306f: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3070: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3071: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3072: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3073: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3074: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3075: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3076: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3077: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3078: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3079: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307a: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307b: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307c: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307d: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307e: []uint16{0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307f: []uint16{0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3080: []uint16{0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3081: []uint16{0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3082: []uint16{0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3083: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3084: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3085: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3086: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3087: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3088: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3089: []uint16{0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308a: []uint16{0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308b: []uint16{0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308c: []uint16{0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308d: []uint16{0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308e: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308f: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3090: []uint16{0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3091: []uint16{0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3092: []uint16{0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3093: []uint16{0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3094: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3095: []uint16{0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3096: []uint16{0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309e3045: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3046: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304d: []uint16{0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304e: []uint16{0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304f: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3050: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3053: []uint16{0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3054: []uint16{0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3055: []uint16{0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3056: []uint16{0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3057: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3058: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3059: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305a: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305b: []uint16{0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305c: []uint16{0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305d: []uint16{0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305e: []uint16{0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305f: []uint16{0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3060: []uint16{0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3061: []uint16{0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3062: []uint16{0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3063: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3064: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3065: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3066: []uint16{0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3067: []uint16{0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3068: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3069: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e306f: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3070: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3071: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3072: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3073: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3074: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3075: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3076: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3077: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3078: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3079: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307a: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307b: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307c: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307d: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e308e: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e308f: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3090: []uint16{0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3091: []uint16{0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3092: []uint16{0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3094: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fc3041: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3042: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3043: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3044: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3045: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3046: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3047: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3048: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3049: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304a: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304b: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304c: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304d: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304e: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304f: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3050: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3051: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3052: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3053: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3054: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3055: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3056: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3057: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3058: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3059: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305a: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305b: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305c: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305d: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305e: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305f: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3060: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3061: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3062: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3063: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3064: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3065: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3066: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3067: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3068: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3069: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306a: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306b: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306c: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306d: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306e: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306f: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3070: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3071: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3072: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3073: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3074: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3075: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3076: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3077: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3078: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3079: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307a: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307b: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307c: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307d: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307e: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307f: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3080: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3081: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3082: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3083: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3084: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3085: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3086: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3087: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3088: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3089: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308a: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308b: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308c: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308d: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308e: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308f: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3090: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3091: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3092: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3094: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3095: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3096: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a1: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a2: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a3: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a4: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a5: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a6: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a7: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a8: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a9: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30aa: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ab: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ac: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ad: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ae: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30af: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b0: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b1: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b2: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b3: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b4: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b5: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b6: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b7: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b8: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b9: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ba: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bb: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bc: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bd: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30be: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bf: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c0: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c1: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c2: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c3: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c4: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c5: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c6: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c7: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c8: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c9: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ca: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cb: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cc: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cd: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ce: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cf: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d0: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d1: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d2: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d3: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d4: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d5: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d6: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d7: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d8: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d9: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30da: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30db: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30dc: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30dd: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30de: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30df: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e0: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e1: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e2: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e3: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e4: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e5: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e6: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e7: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e8: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e9: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ea: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30eb: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ec: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ed: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ee: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ef: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f0: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f1: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f2: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f4: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f5: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f6: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f7: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f8: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f9: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30fa: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f0: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f1: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f2: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f3: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f4: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f5: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f6: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f7: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f8: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f9: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fa: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fb: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fc: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fd: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fe: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31ff: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff66: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff67: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff68: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff69: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6a: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6b: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6c: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6d: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6e: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6f: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff71: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff72: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff73: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff74: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff75: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff76: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff77: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff78: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff79: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7a: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7b: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7c: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7d: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7e: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7f: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff80: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff81: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff82: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff83: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff84: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff85: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff86: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff87: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff88: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff89: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8a: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8b: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8c: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8d: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8e: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8f: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff90: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff91: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff92: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff93: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff94: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff95: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff96: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff97: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff98: []uint16{0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff99: []uint16{0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9a: []uint16{0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9b: []uint16{0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9c: []uint16{0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fd30a1: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a2: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a3: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a4: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a5: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a6: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a7: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a8: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a9: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30aa: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ab: []uint16{0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ac: []uint16{0x3d60, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fd30ad: []uint16{0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ae: []uint16{0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30af: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b0: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b1: []uint16{0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b2: []uint16{0x3d63, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fd30b3: []uint16{0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b4: []uint16{0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b5: []uint16{0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b6: []uint16{0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b7: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b8: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b9: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ba: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bb: []uint16{0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bc: []uint16{0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bd: []uint16{0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30be: []uint16{0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bf: []uint16{0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c0: []uint16{0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c1: []uint16{0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c2: []uint16{0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c3: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c4: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c5: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c6: []uint16{0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c7: []uint16{0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c8: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c9: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ca: []uint16{0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cb: []uint16{0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cc: []uint16{0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cd: []uint16{0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ce: []uint16{0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cf: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d0: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d1: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d2: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d3: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d4: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d5: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d6: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d7: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d8: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d9: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30da: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30db: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30dc: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30dd: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30de: []uint16{0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30df: []uint16{0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e0: []uint16{0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e1: []uint16{0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e2: []uint16{0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e3: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e4: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e5: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e6: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e7: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e8: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e9: []uint16{0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ea: []uint16{0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30eb: []uint16{0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ec: []uint16{0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ed: []uint16{0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ee: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ef: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f0: []uint16{0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f1: []uint16{0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f2: []uint16{0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f3: []uint16{0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f4: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f5: []uint16{0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f6: []uint16{0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f7: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f8: []uint16{0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f9: []uint16{0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30fa: []uint16{0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f0: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f1: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f2: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f3: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f4: []uint16{0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f5: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f6: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f7: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f8: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f9: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fa: []uint16{0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fb: []uint16{0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fc: []uint16{0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fd: []uint16{0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fe: []uint16{0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31ff: []uint16{0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff66: []uint16{0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff67: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff68: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff69: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6a: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6b: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6c: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6d: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6e: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6f: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff71: []uint16{0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff72: []uint16{0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff73: []uint16{0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff74: []uint16{0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff75: []uint16{0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff76: []uint16{0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff77: []uint16{0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff78: []uint16{0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff79: []uint16{0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7a: []uint16{0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7b: []uint16{0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7c: []uint16{0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7d: []uint16{0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7e: []uint16{0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7f: []uint16{0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff80: []uint16{0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff81: []uint16{0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff82: []uint16{0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff83: []uint16{0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff84: []uint16{0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff85: []uint16{0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff86: []uint16{0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff87: []uint16{0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff88: []uint16{0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff89: []uint16{0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8a: []uint16{0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8b: []uint16{0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8c: []uint16{0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8d: []uint16{0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8e: []uint16{0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8f: []uint16{0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff90: []uint16{0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff91: []uint16{0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff92: []uint16{0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff93: []uint16{0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff94: []uint16{0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff95: []uint16{0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff96: []uint16{0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff97: []uint16{0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff98: []uint16{0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff99: []uint16{0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9a: []uint16{0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9b: []uint16{0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9c: []uint16{0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9d: []uint16{0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fe30a5: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30a6: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ad: []uint16{0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ae: []uint16{0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30af: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b0: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b3: []uint16{0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b4: []uint16{0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b5: []uint16{0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b6: []uint16{0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b7: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b8: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b9: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ba: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bb: []uint16{0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bc: []uint16{0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bd: []uint16{0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30be: []uint16{0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bf: []uint16{0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c0: []uint16{0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c1: []uint16{0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c2: []uint16{0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c3: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c4: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c5: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c6: []uint16{0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c7: []uint16{0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c8: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c9: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30cf: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d0: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d1: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d2: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d3: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d4: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d5: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d6: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d7: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d8: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d9: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30da: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30db: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30dc: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30dd: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ee: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ef: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f0: []uint16{0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f1: []uint16{0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f2: []uint16{0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f4: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f7: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f8: []uint16{0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f9: []uint16{0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30fa: []uint16{0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f0: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f1: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f2: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f3: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f5: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f6: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f7: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f8: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f9: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff66: []uint16{0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff69: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff6f: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff73: []uint16{0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff77: []uint16{0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff78: []uint16{0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7a: []uint16{0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7b: []uint16{0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7c: []uint16{0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7d: []uint16{0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7e: []uint16{0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7f: []uint16{0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff80: []uint16{0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff81: []uint16{0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff82: []uint16{0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff83: []uint16{0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff84: []uint16{0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8a: []uint16{0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8b: []uint16{0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8c: []uint16{0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8d: []uint16{0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8e: []uint16{0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff9c: []uint16{0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}} +var contractor_utf8mb4_ja_0900_as_cs_weights = map[uint32][]uint16{0x309d3041: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3042: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3043: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3044: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3045: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3046: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3047: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3048: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3049: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304a: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304b: {0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304c: {0x3d60, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309d304d: {0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304e: {0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d304f: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3050: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3051: {0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3052: {0x3d63, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309d3053: {0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3054: {0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3055: {0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3056: {0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3057: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3058: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3059: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305a: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305b: {0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305c: {0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305d: {0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305e: {0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d305f: {0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3060: {0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3061: {0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3062: {0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3063: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3064: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3065: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3066: {0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3067: {0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3068: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3069: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306a: {0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306b: {0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306c: {0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306d: {0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306e: {0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d306f: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3070: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3071: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3072: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3073: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3074: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3075: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3076: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3077: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3078: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3079: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307a: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307b: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307c: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307d: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307e: {0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d307f: {0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3080: {0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3081: {0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3082: {0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3083: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3084: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3085: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3086: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3087: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3088: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3089: {0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308a: {0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308b: {0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308c: {0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308d: {0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308e: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d308f: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3090: {0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3091: {0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3092: {0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3093: {0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3094: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3095: {0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309d3096: {0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x309e3045: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3046: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304d: {0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304e: {0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e304f: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3050: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3053: {0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3054: {0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3055: {0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3056: {0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3057: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3058: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3059: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305a: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305b: {0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305c: {0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305d: {0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305e: {0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e305f: {0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3060: {0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3061: {0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3062: {0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3063: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3064: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3065: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3066: {0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3067: {0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3068: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3069: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e306f: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3070: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3071: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3072: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3073: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3074: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3075: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3076: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3077: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3078: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3079: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307a: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307b: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307c: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e307d: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e308e: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e308f: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3090: {0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3091: {0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3092: {0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x309e3094: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fc3041: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3042: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3043: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3044: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3045: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3046: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3047: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3048: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3049: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304a: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304b: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304c: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304d: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304e: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc304f: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3050: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3051: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3052: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3053: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3054: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3055: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3056: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3057: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3058: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3059: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305a: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305b: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305c: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305d: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305e: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc305f: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3060: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3061: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3062: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3063: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3064: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3065: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3066: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3067: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3068: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3069: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306a: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306b: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306c: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306d: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306e: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc306f: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3070: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3071: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3072: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3073: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3074: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3075: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3076: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3077: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3078: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3079: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307a: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307b: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307c: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307d: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307e: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc307f: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3080: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3081: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3082: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3083: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3084: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3085: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3086: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3087: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3088: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3089: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308a: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308b: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308c: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308d: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308e: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc308f: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3090: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3091: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3092: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3094: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3095: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc3096: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a1: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a2: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a3: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a4: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a5: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a6: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a7: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a8: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30a9: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30aa: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ab: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ac: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ad: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ae: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30af: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b0: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b1: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b2: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b3: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b4: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b5: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b6: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b7: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b8: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30b9: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ba: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bb: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bc: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bd: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30be: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30bf: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c0: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c1: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c2: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c3: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c4: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c5: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c6: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c7: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c8: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30c9: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ca: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cb: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cc: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cd: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ce: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30cf: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d0: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d1: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d2: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d3: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d4: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d5: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d6: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d7: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d8: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30d9: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30da: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30db: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30dc: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30dd: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30de: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30df: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e0: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e1: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e2: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e3: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e4: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e5: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e6: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e7: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e8: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30e9: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ea: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30eb: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ec: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ed: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ee: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30ef: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f0: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f1: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f2: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f4: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f5: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f6: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f7: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f8: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30f9: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc30fa: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f0: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f1: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f2: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f3: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f4: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f5: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f6: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f7: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f8: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31f9: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fa: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fb: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fc: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fd: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31fe: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fc31ff: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff66: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff67: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff68: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff69: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6a: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6b: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6c: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6d: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6e: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff6f: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff71: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff72: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff73: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff74: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff75: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff76: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff77: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff78: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff79: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7a: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7b: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7c: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7d: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7e: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff7f: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff80: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff81: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff82: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff83: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff84: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff85: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff86: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff87: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff88: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff89: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8a: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8b: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8c: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8d: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8e: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff8f: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff90: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff91: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff92: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff93: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff94: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff95: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff96: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff97: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff98: {0x3d5b, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff99: {0x3d5c, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9a: {0x3d5e, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9b: {0x3d5f, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fcff9c: {0x3d5a, 0x20, 0xc, 0x0, 0x0, 0x21}, 0x30fd30a1: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a2: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a3: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a4: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a5: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a6: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a7: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a8: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30a9: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30aa: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ab: {0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ac: {0x3d60, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fd30ad: {0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ae: {0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30af: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b0: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b1: {0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b2: {0x3d63, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fd30b3: {0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b4: {0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b5: {0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b6: {0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b7: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b8: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30b9: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ba: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bb: {0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bc: {0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bd: {0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30be: {0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30bf: {0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c0: {0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c1: {0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c2: {0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c3: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c4: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c5: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c6: {0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c7: {0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c8: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30c9: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ca: {0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cb: {0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cc: {0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cd: {0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ce: {0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30cf: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d0: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d1: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d2: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d3: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d4: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d5: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d6: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d7: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d8: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30d9: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30da: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30db: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30dc: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30dd: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30de: {0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30df: {0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e0: {0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e1: {0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e2: {0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e3: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e4: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e5: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e6: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e7: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e8: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30e9: {0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ea: {0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30eb: {0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ec: {0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ed: {0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ee: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30ef: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f0: {0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f1: {0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f2: {0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f3: {0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f4: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f5: {0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f6: {0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f7: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f8: {0x3d88, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30f9: {0x3d89, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd30fa: {0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f0: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f1: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f2: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f3: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f4: {0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f5: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f6: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f7: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f8: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31f9: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fa: {0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fb: {0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fc: {0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fd: {0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31fe: {0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fd31ff: {0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff66: {0x3d8a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff67: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff68: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff69: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6a: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6b: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6c: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6d: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6e: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff6f: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff71: {0x3d5a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff72: {0x3d5b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff73: {0x3d5c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff74: {0x3d5e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff75: {0x3d5f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff76: {0x3d60, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff77: {0x3d61, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff78: {0x3d62, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff79: {0x3d63, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7a: {0x3d64, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7b: {0x3d65, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7c: {0x3d66, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7d: {0x3d67, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7e: {0x3d68, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff7f: {0x3d69, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff80: {0x3d6a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff81: {0x3d6b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff82: {0x3d6c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff83: {0x3d6d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff84: {0x3d6e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff85: {0x3d6f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff86: {0x3d70, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff87: {0x3d71, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff88: {0x3d72, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff89: {0x3d73, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8a: {0x3d74, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8b: {0x3d75, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8c: {0x3d76, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8d: {0x3d77, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8e: {0x3d78, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff8f: {0x3d79, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff90: {0x3d7a, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff91: {0x3d7b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff92: {0x3d7c, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff93: {0x3d7d, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff94: {0x3d7e, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff95: {0x3d7f, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff96: {0x3d81, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff97: {0x3d82, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff98: {0x3d83, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff99: {0x3d84, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9a: {0x3d85, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9b: {0x3d86, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9c: {0x3d87, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fdff9d: {0x3d8b, 0x20, 0xd, 0x0, 0x0, 0x21}, 0x30fe30a5: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30a6: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ad: {0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ae: {0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30af: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b0: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b3: {0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b4: {0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b5: {0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b6: {0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b7: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b8: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30b9: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ba: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bb: {0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bc: {0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bd: {0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30be: {0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30bf: {0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c0: {0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c1: {0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c2: {0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c3: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c4: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c5: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c6: {0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c7: {0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c8: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30c9: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30cf: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d0: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d1: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d2: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d3: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d4: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d5: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d6: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d7: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d8: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30d9: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30da: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30db: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30dc: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30dd: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ee: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30ef: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f0: {0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f1: {0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f2: {0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f4: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f7: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f8: {0x3d88, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30f9: {0x3d89, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe30fa: {0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f0: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f1: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f2: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f3: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f5: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f6: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f7: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f8: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30fe31f9: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff66: {0x3d8a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff69: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff6f: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff73: {0x3d5c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff77: {0x3d61, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff78: {0x3d62, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7a: {0x3d64, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7b: {0x3d65, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7c: {0x3d66, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7d: {0x3d67, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7e: {0x3d68, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff7f: {0x3d69, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff80: {0x3d6a, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff81: {0x3d6b, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff82: {0x3d6c, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff83: {0x3d6d, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff84: {0x3d6e, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8a: {0x3d74, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8b: {0x3d75, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8c: {0x3d76, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8d: {0x3d77, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff8e: {0x3d78, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}, 0x30feff9c: {0x3d87, 0x20, 0xe, 0x0, 0x37, 0x1, 0x0, 0x0, 0x21}} func (contractor_utf8mb4_ja_0900_as_cs) FindContextual(cp1, cp0 rune) []uint16 { if cp0 < 12353 || cp1 < 12445 || cp0 > 0xFFFF || cp1 > 0xFFFF { @@ -3597,9 +3613,9 @@ func (contractor_utf8mb4_ja_0900_as_cs) FindContextual(cp1, cp0 rune) []uint16 { return contractor_utf8mb4_ja_0900_as_cs_weights[uint32(cp1)<<16|uint32(cp0)] } -var reorder_utf8mb4_ja_0900_as_cs = []uca.Reorder{uca.Reorder{FromMin: 0x1c47, FromMax: 0x1fb5, ToMin: 0x1c47, ToMax: 0x1fb5}, uca.Reorder{FromMin: 0x3d5a, FromMax: 0x3d8b, ToMin: 0x1fb6, ToMax: 0x1fe7}, uca.Reorder{FromMin: 0x1fb6, FromMax: 0x3d59, ToMin: 0x0, ToMax: 0x0}, uca.Reorder{FromMin: 0x3d8c, FromMax: 0x54a3, ToMin: 0x0, ToMax: 0x0}} +var reorder_utf8mb4_ja_0900_as_cs = []uca.Reorder{{FromMin: 0x1c47, FromMax: 0x1fb5, ToMin: 0x1c47, ToMax: 0x1fb5}, {FromMin: 0x3d5a, FromMax: 0x3d8b, ToMin: 0x1fb6, ToMax: 0x1fe7}, {FromMin: 0x1fb6, FromMax: 0x3d59, ToMin: 0x0, ToMax: 0x0}, {FromMin: 0x3d8c, FromMax: 0x54a3, ToMin: 0x0, ToMax: 0x0}} -var reorder_utf8mb4_ru_0900_ai_ci = []uca.Reorder{uca.Reorder{FromMin: 0x2022, FromMax: 0x21e1, ToMin: 0x1c47, ToMax: 0x1e06}, uca.Reorder{FromMin: 0x1c47, FromMax: 0x2021, ToMin: 0x1e07, ToMax: 0x21e1}} +var reorder_utf8mb4_ru_0900_ai_ci = []uca.Reorder{{FromMin: 0x2022, FromMax: 0x21e1, ToMin: 0x1c47, ToMax: 0x1e06}, {FromMin: 0x1c47, FromMax: 0x2021, ToMin: 0x1e07, ToMax: 0x21e1}} type contractor_utf8mb4_zh_0900_as_cs struct{} diff --git a/go/mysql/collations/mysqlucadata.bin b/go/mysql/collations/colldata/mysqlucadata.bin similarity index 100% rename from go/mysql/collations/mysqlucadata.bin rename to go/mysql/collations/colldata/mysqlucadata.bin diff --git a/go/mysql/collations/mysqlucadata.go b/go/mysql/collations/colldata/mysqlucadata.go similarity index 99% rename from go/mysql/collations/mysqlucadata.go rename to go/mysql/collations/colldata/mysqlucadata.go index ae8e2d48642..0affc45d11f 100644 --- a/go/mysql/collations/mysqlucadata.go +++ b/go/mysql/collations/colldata/mysqlucadata.go @@ -1,10 +1,25 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT -package collations +package colldata import ( _ "embed" - reflect "reflect" unsafe "unsafe" ) @@ -1402,5 +1417,5 @@ var weightTable_uca520 = []*[]uint16{ var weightsUCA_embed_data string func weightsUCA_embed(pos, length int) []uint16 { - return (*[0x7fff0000]uint16)(unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&weightsUCA_embed_data)).Data))[pos : pos+length] + return (*[0x7fff0000]uint16)(unsafe.Pointer(unsafe.StringData(weightsUCA_embed_data)))[pos : pos+length] } diff --git a/go/mysql/collations/uca.go b/go/mysql/collations/colldata/uca.go similarity index 96% rename from go/mysql/collations/uca.go rename to go/mysql/collations/colldata/uca.go index 444fd3c295c..4b7272bfbc3 100644 --- a/go/mysql/collations/uca.go +++ b/go/mysql/collations/colldata/uca.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "math/bits" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/internal/uca" "vitess.io/vitess/go/vt/vthash" @@ -27,7 +28,7 @@ import ( type Collation_utf8mb4_uca_0900 struct { name string - id ID + id collations.ID uca *uca.Collation900 } @@ -35,7 +36,7 @@ func (c *Collation_utf8mb4_uca_0900) Name() string { return c.name } -func (c *Collation_utf8mb4_uca_0900) ID() ID { +func (c *Collation_utf8mb4_uca_0900) ID() collations.ID { return c.id } @@ -213,7 +214,7 @@ func (c *Collation_utf8mb4_uca_0900) ToUpper(dst, src []byte) []byte { type Collation_utf8mb4_0900_bin struct{} -func (c *Collation_utf8mb4_0900_bin) ID() ID { +func (c *Collation_utf8mb4_0900_bin) ID() collations.ID { return 309 } @@ -271,11 +272,11 @@ func (c *Collation_utf8mb4_0900_bin) ToUpper(dst, src []byte) []byte { type Collation_uca_legacy struct { name string - id ID + id collations.ID uca *uca.CollationLegacy } -func (c *Collation_uca_legacy) ID() ID { +func (c *Collation_uca_legacy) ID() collations.ID { return c.id } diff --git a/go/mysql/collations/uca_contraction_test.go b/go/mysql/collations/colldata/uca_contraction_test.go similarity index 99% rename from go/mysql/collations/uca_contraction_test.go rename to go/mysql/collations/colldata/uca_contraction_test.go index 7d59b6fa4a8..d17ff21e255 100644 --- a/go/mysql/collations/uca_contraction_test.go +++ b/go/mysql/collations/colldata/uca_contraction_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "encoding/json" diff --git a/go/mysql/collations/uca_tables_test.go b/go/mysql/collations/colldata/uca_tables_test.go similarity index 95% rename from go/mysql/collations/uca_tables_test.go rename to go/mysql/collations/colldata/uca_tables_test.go index 011095e1cf6..40c2f3bbed3 100644 --- a/go/mysql/collations/uca_tables_test.go +++ b/go/mysql/collations/colldata/uca_tables_test.go @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "encoding/json" "fmt" "os" - "reflect" "strconv" "testing" "unsafe" @@ -95,12 +94,12 @@ func TestWeightsForAllCodepoints(t *testing.T) { } func TestWeightTablesAreDeduplicated(t *testing.T) { - sliceptr := func(table uca.Weights) uintptr { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - return hdr.Data + sliceptr := func(table uca.Weights) unsafe.Pointer { + data := unsafe.SliceData(table) + return unsafe.Pointer(data) } - uniqueTables := make(map[uintptr]int) + uniqueTables := make(map[unsafe.Pointer]int) for _, col := range testall() { var weights uca.Weights switch col := col.(type) { diff --git a/go/mysql/collations/uca_test.go b/go/mysql/collations/colldata/uca_test.go similarity index 99% rename from go/mysql/collations/uca_test.go rename to go/mysql/collations/colldata/uca_test.go index 5e3f22929c8..70c9312636e 100644 --- a/go/mysql/collations/uca_test.go +++ b/go/mysql/collations/colldata/uca_test.go @@ -14,12 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "fmt" "math/rand" + "slices" "sort" "strings" "sync" @@ -28,7 +29,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" @@ -949,8 +949,8 @@ func TestUCACollationOrder(t *testing.T) { j := rand.Intn(i + 1) ary[i], ary[j] = ary[j], ary[i] } - slices.SortFunc(ary, func(a, b string) bool { - return col.Collate([]byte(a), []byte(b), false) < 0 + slices.SortFunc(ary, func(a, b string) int { + return col.Collate([]byte(a), []byte(b), false) }) require.Equal(t, sorted, ary) } diff --git a/go/mysql/collations/unicase.go b/go/mysql/collations/colldata/unicase.go similarity index 99% rename from go/mysql/collations/unicase.go rename to go/mysql/collations/colldata/unicase.go index c669c2368ad..964d48d7107 100644 --- a/go/mysql/collations/unicase.go +++ b/go/mysql/collations/colldata/unicase.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "vitess.io/vitess/go/mysql/collations/charset" diff --git a/go/mysql/collations/unicode.go b/go/mysql/collations/colldata/unicode.go similarity index 96% rename from go/mysql/collations/unicode.go rename to go/mysql/collations/colldata/unicode.go index 8168595cd34..c0495b0474f 100644 --- a/go/mysql/collations/unicode.go +++ b/go/mysql/collations/colldata/unicode.go @@ -14,25 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "bytes" "math" "math/bits" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/vt/vthash" ) type Collation_unicode_general_ci struct { - id ID + id collations.ID name string unicase *UnicaseInfo charset charset.Charset } -func (c *Collation_unicode_general_ci) ID() ID { +func (c *Collation_unicode_general_ci) ID() collations.ID { return c.id } @@ -164,12 +165,12 @@ func (c *Collation_unicode_general_ci) Wildcard(pat []byte, matchOne rune, match } type Collation_unicode_bin struct { - id ID + id collations.ID name string charset charset.Charset } -func (c *Collation_unicode_bin) ID() ID { +func (c *Collation_unicode_bin) ID() collations.ID { return c.id } @@ -352,7 +353,7 @@ func (c *Collation_unicode_bin) Wildcard(pat []byte, matchOne rune, matchMany ru } func collationBinary(left, right []byte, rightPrefix bool) int { - minLen := minInt(len(left), len(right)) + minLen := min(len(left), len(right)) if diff := bytes.Compare(left[:minLen], right[:minLen]); diff != 0 { return diff } diff --git a/go/mysql/collations/wildcard.go b/go/mysql/collations/colldata/wildcard.go similarity index 99% rename from go/mysql/collations/wildcard.go rename to go/mysql/collations/colldata/wildcard.go index 5d8fd012375..01f4807b7df 100644 --- a/go/mysql/collations/wildcard.go +++ b/go/mysql/collations/colldata/wildcard.go @@ -38,7 +38,7 @@ limitations under the License. // // Because of this, we intend to enable the recursive algorithm by default. -package collations +package colldata import ( "unicode/utf8" diff --git a/go/mysql/collations/wildcard_test.go b/go/mysql/collations/colldata/wildcard_test.go similarity index 99% rename from go/mysql/collations/wildcard_test.go rename to go/mysql/collations/colldata/wildcard_test.go index dc6a44c644c..fff08f35c22 100644 --- a/go/mysql/collations/wildcard_test.go +++ b/go/mysql/collations/colldata/wildcard_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package collations +package colldata import ( "testing" diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go index c6e86375027..91fc2a8bd8c 100644 --- a/go/mysql/collations/env.go +++ b/go/mysql/collations/env.go @@ -18,26 +18,29 @@ package collations import ( "fmt" + "slices" "strings" "sync" ) type colldefaults struct { - Default Collation - Binary Collation + Default ID + Binary ID } // Environment is a collation environment for a MySQL version, which contains // a database of collations and defaults for that specific version. type Environment struct { - version collver - byName map[string]Collation - byCharset map[string]*colldefaults - unsupported map[string]ID + version collver + byName map[string]ID + byCharset map[string]*colldefaults + byCharsetName map[ID]string + unsupported map[string]ID + byID map[ID]string } // LookupByName returns the collation with the given name. -func (env *Environment) LookupByName(name string) Collation { +func (env *Environment) LookupByName(name string) ID { return env.byName[name] } @@ -45,37 +48,34 @@ func (env *Environment) LookupByName(name string) Collation { // the collation is supported by this package. func (env *Environment) LookupID(name string) (ID, bool) { if supported, ok := env.byName[name]; ok { - return supported.ID(), true + return supported, true } - if unsupported, ok := env.unsupported[name]; ok { - return unsupported, false + if unsup, ok := env.unsupported[name]; ok { + return unsup, false } return Unknown, false } +// LookupName returns the collation name for the given ID and whether +// the collation is supported by this package. +func (env *Environment) LookupName(id ID) string { + return env.byID[id] +} + // DefaultCollationForCharset returns the default collation for a charset -func (env *Environment) DefaultCollationForCharset(charset string) Collation { +func (env *Environment) DefaultCollationForCharset(charset string) ID { if defaults, ok := env.byCharset[charset]; ok { return defaults.Default } - return nil + return Unknown } // BinaryCollationForCharset returns the default binary collation for a charset -func (env *Environment) BinaryCollationForCharset(charset string) Collation { +func (env *Environment) BinaryCollationForCharset(charset string) ID { if defaults, ok := env.byCharset[charset]; ok { return defaults.Binary } - return nil -} - -// AllCollations returns a slice with all known collations in Vitess. -func (env *Environment) AllCollations() (all []Collation) { - all = make([]Collation, 0, len(env.byName)) - for _, col := range env.byName { - all = append(all, col) - } - return + return Unknown } var globalEnvironments = make(map[collver]*Environment) @@ -109,7 +109,7 @@ func NewEnvironment(serverVersion string) *Environment { case strings.HasSuffix(serverVersion, "-ripple"): // the ripple binlog server can mask the actual version of mysqld; // assume we have the highest - version = collverMySQL80 + version = collverMySQL8 case strings.Contains(serverVersion, "mariadb"): switch { case strings.Contains(serverVersion, "10.0."): @@ -125,66 +125,62 @@ func NewEnvironment(serverVersion string) *Environment { version = collverMySQL56 case strings.HasPrefix(serverVersion, "5.7."): version = collverMySQL57 - case strings.HasPrefix(serverVersion, "8.0."): - version = collverMySQL80 + case strings.HasPrefix(serverVersion, "8."): + version = collverMySQL8 } return fetchCacheEnvironment(version) } func makeEnv(version collver) *Environment { env := &Environment{ - version: version, - byName: make(map[string]Collation), - byCharset: make(map[string]*colldefaults), - unsupported: make(map[string]ID), + version: version, + byName: make(map[string]ID), + byCharset: make(map[string]*colldefaults), + byCharsetName: make(map[ID]string), + byID: make(map[ID]string), + unsupported: make(map[string]ID), } for collid, vi := range globalVersionInfo { var ournames []string + var ourcharsets []string for _, alias := range vi.alias { if alias.mask&version != 0 { ournames = append(ournames, alias.name) + ourcharsets = append(ourcharsets, alias.charset) } } if len(ournames) == 0 { continue } - var collation Collation - if int(collid) < len(collationsById) { - collation = collationsById[collid] - } - if collation == nil { + if int(collid) >= len(supported) || supported[collid] == "" { for _, name := range ournames { env.unsupported[name] = collid } continue } - for _, name := range ournames { - env.byName[name] = collation - } - - csname := collation.Charset().Name() - if _, ok := env.byCharset[csname]; !ok { - env.byCharset[csname] = &colldefaults{} - } - defaults := env.byCharset[csname] - if vi.isdefault&version != 0 { - defaults.Default = collation - } - if collation.IsBinary() { - if defaults.Binary != nil && defaults.Binary.ID() > collation.ID() { - // If there's more than one binary collation, the one with the - // highest ID (i.e. the newest one) takes precedence. This applies - // to utf8mb4_bin vs utf8mb4_0900_bin - continue + for i, name := range ournames { + cs := ourcharsets[i] + env.byName[name] = collid + env.byID[collid] = name + env.byCharsetName[collid] = cs + defaults := env.byCharset[cs] + if defaults == nil { + defaults = &colldefaults{} + env.byCharset[cs] = defaults + } + if vi.isdefault&version != 0 { + defaults.Default = collid + } + if strings.HasSuffix(name, "_bin") && defaults.Binary < collid { + defaults.Binary = collid } - defaults.Binary = collation } } - for from, to := range version.charsetAliases() { + for from, to := range charsetAliases() { env.byCharset[from] = env.byCharset[to] } @@ -194,20 +190,29 @@ func makeEnv(version collver) *Environment { // A few interesting character set values. // See http://dev.mysql.com/doc/internals/en/character-set.html#packet-Protocol::CharacterSet const ( - CollationUtf8ID = 33 - CollationUtf8mb4ID = 255 - CollationBinaryID = 63 + CollationUtf8mb3ID = 33 + CollationUtf8mb4ID = 255 + CollationBinaryID = 63 + CollationUtf8mb4BinID = 46 + CollationLatin1Swedish = 8 ) -// Binary is the default Binary collation -var Binary = ID(CollationBinaryID).Get() +// SystemCollation is the default collation for the system tables +// such as the information schema. This is still utf8mb3 to match +// MySQLs behavior. This means that you can't use utf8mb4 in table +// names, column names, without running into significant issues. +var SystemCollation = TypedCollation{ + Collation: CollationUtf8mb3ID, + Coercibility: CoerceCoercible, + Repertoire: RepertoireUnicode, +} // CharsetAlias returns the internal charset name for the given charset. // For now, this only maps `utf8` to `utf8mb3`; in future versions of MySQL, // this mapping will change, so it's important to use this helper so that // Vitess code has a consistent mapping for the active collations environment. func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) { - alias, ok = env.version.charsetAliases()[charset] + alias, ok = charsetAliases()[charset] return } @@ -217,10 +222,10 @@ func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) { // Vitess code has a consistent mapping for the active collations environment. func (env *Environment) CollationAlias(collation string) (string, bool) { col := env.LookupByName(collation) - if col == nil { + if col == Unknown { return collation, false } - allCols, ok := globalVersionInfo[col.ID()] + allCols, ok := globalVersionInfo[col] if !ok { return collation, false } @@ -228,7 +233,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { return collation, false } for _, alias := range allCols.alias { - for source, dest := range env.version.charsetAliases() { + for source, dest := range charsetAliases() { if strings.HasPrefix(collation, fmt.Sprintf("%s_", source)) && strings.HasPrefix(alias.name, fmt.Sprintf("%s_", dest)) { return alias.name, true @@ -245,7 +250,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { // For older MySQL environments, the default charset is `utf8mb4_general_ci`. func (env *Environment) DefaultConnectionCharset() uint8 { switch env.version { - case collverMySQL80: + case collverMySQL8: return uint8(CollationUtf8mb4ID) default: return 45 @@ -270,12 +275,29 @@ func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { var collid ID = 0 csname = strings.ToLower(csname) if defaults, ok := env.byCharset[csname]; ok { - collid = defaults.Default.ID() + collid = defaults.Default } else if coll, ok := env.byName[csname]; ok { - collid = coll.ID() + collid = coll } if collid == 0 || collid > 255 { return 0, fmt.Errorf("unsupported connection charset: %q", csname) } return uint8(collid), nil } + +func (env *Environment) AllCollationIDs() []ID { + all := make([]ID, 0, len(env.byID)) + for v := range env.byID { + all = append(all, v) + } + slices.Sort(all) + return all +} + +func (env *Environment) LookupByCharset(name string) *colldefaults { + return env.byCharset[name] +} + +func (env *Environment) LookupCharsetName(coll ID) string { + return env.byCharsetName[coll] +} diff --git a/go/mysql/collations/golden_test.go b/go/mysql/collations/golden_test.go index 32b9e90394f..099f77268b7 100644 --- a/go/mysql/collations/golden_test.go +++ b/go/mysql/collations/golden_test.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,71 +17,58 @@ limitations under the License. package collations import ( - "bytes" "fmt" "os" - "path/filepath" "sort" "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql/collations/charset" - "vitess.io/vitess/go/mysql/collations/internal/testutil" ) -func TestGoldenWeights(t *testing.T) { - gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz") - if err != nil { - t.Fatal(err) +func TestAllCollationsByCharset(t *testing.T) { + var defaults1 = map[string][2]string{ + "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"}, } - - for _, goldenPath := range gllGoldenTests { - golden := &testutil.GoldenTest{} - if err := golden.DecodeFromFile(goldenPath); err != nil { - t.Fatal(err) - } - - for _, goldenCase := range golden.Cases { - t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) { - for coll, expected := range goldenCase.Weights { - coll := testcollation(t, coll) - - input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text) - if err != nil { - t.Fatal(err) - } - - result := coll.WeightString(nil, input, 0) - assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result) - - } - }) - } + var defaults2 = map[string][2]string{ + "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"}, } -} -func TestCollationsForLanguage(t *testing.T) { - allCollations := testall() - langCounts := make(map[testutil.Lang][]string) + for _, tc := range []struct { + version collver + defaults map[string][2]string + }{ + {collverMariaDB100, defaults1}, + {collverMariaDB101, defaults1}, + {collverMariaDB102, defaults1}, + {collverMariaDB103, defaults1}, + {collverMySQL56, defaults1}, + {collverMySQL57, defaults1}, + {collverMySQL8, defaults2}, + } { + t.Run(tc.version.String(), func(t *testing.T) { + env := makeEnv(tc.version) + for csname, cset := range env.byCharset { + switch csname { + case "gb18030": + // this doesn't work yet + continue + } + require.NotNil(t, cset.Default, "charset %s has no default", csname) + require.NotNil(t, cset.Binary, "charset %s has no binary", csname) - for lang := range testutil.KnownLanguages { - var matched []string - for _, coll := range allCollations { - name := coll.Name() - if lang.MatchesCollation(name) { - matched = append(matched, name) } - } - langCounts[lang] = matched - } - for lang := range testutil.KnownLanguages { - assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang) - - t.Logf("%s: %v", lang, langCounts[lang]) + for charset, expected := range tc.defaults { + expectedDefault, expectedBinary := expected[0], expected[1] + if def := env.DefaultCollationForCharset(charset); env.LookupName(def) != expectedDefault { + t.Fatalf("bad default for utf8mb4: %s (expected %s)", env.LookupName(def), expectedDefault) + } + if def := env.BinaryCollationForCharset(charset); env.LookupName(def) != expectedBinary { + t.Fatalf("bad binary for utf8mb4: %s (expected %s)", env.LookupName(def), expectedBinary) + } + } + }) } } @@ -89,7 +76,7 @@ func TestCollationsForLanguage(t *testing.T) { // table with Collation support information for the current build of Vitess. func XTestSupportTables(t *testing.T) { var versions = []collver{ - collverMySQL80, + collverMySQL8, collverMySQL57, collverMySQL56, collverMariaDB103, @@ -120,8 +107,8 @@ func XTestSupportTables(t *testing.T) { fmt.Fprintf(out, " |\n|%s\n", strings.Repeat("---|", len(envs)+2)) for _, id := range all { - coll := collationsById[id] - if coll == nil { + name := envs[0].LookupName(id) + if name == "" { vdata := globalVersionInfo[id] var collnames []string @@ -148,9 +135,9 @@ func XTestSupportTables(t *testing.T) { } } } else { - fmt.Fprintf(out, "| %s | %s", coll.Name(), coll.Charset().Name()) + fmt.Fprintf(out, "| %s | %s", name, envs[0].LookupCharsetName(id)) for _, env := range envs { - _, supported := env.byName[coll.Name()] + _, supported := env.LookupID(name) if supported { fmt.Fprintf(out, " | ✅") } else { @@ -162,49 +149,3 @@ func XTestSupportTables(t *testing.T) { fmt.Fprintf(out, " |\n") } } - -func TestAllCollationsByCharset(t *testing.T) { - var defaults1 = map[string][2]string{ - "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"}, - } - var defaults2 = map[string][2]string{ - "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"}, - } - - for _, tc := range []struct { - version collver - defaults map[string][2]string - }{ - {collverMariaDB100, defaults1}, - {collverMariaDB101, defaults1}, - {collverMariaDB102, defaults1}, - {collverMariaDB103, defaults1}, - {collverMySQL56, defaults1}, - {collverMySQL57, defaults1}, - {collverMySQL80, defaults2}, - } { - t.Run(tc.version.String(), func(t *testing.T) { - env := makeEnv(tc.version) - for csname, cset := range env.byCharset { - switch csname { - case "gb18030": - // this doesn't work yet - continue - } - require.NotNil(t, cset.Default, "charset %s has no default", csname) - require.NotNil(t, cset.Binary, "charset %s has no binary", csname) - - } - - for charset, expected := range tc.defaults { - expectedDefault, expectedBinary := expected[0], expected[1] - if def := env.DefaultCollationForCharset(charset); def.Name() != expectedDefault { - t.Fatalf("bad default for utf8mb4: %s (expected %s)", def.Name(), expectedDefault) - } - if def := env.BinaryCollationForCharset(charset); def.Name() != expectedBinary { - t.Fatalf("bad binary for utf8mb4: %s (expected %s)", def.Name(), expectedBinary) - } - } - }) - } -} diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go index 2705dc29f5d..8a4d12a0e4d 100644 --- a/go/mysql/collations/integration/charset_test.go +++ b/go/mysql/collations/integration/charset_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/remote" @@ -45,7 +47,7 @@ func TestLocalEncodings(t *testing.T) { for _, tc := range cases { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - verifyTranscoding(t, local, remote, tc.input) + verifyTranscoding(t, colldata.Lookup(local), remote, tc.input) } } diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go index 7ad31f78852..dad55bcafad 100644 --- a/go/mysql/collations/integration/coercion_test.go +++ b/go/mysql/collations/integration/coercion_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" "vitess.io/vitess/go/sqltypes" @@ -33,18 +35,18 @@ import ( type TextWithCollation struct { Text []byte - Collation collations.Collation + Collation collations.ID } type RemoteCoercionResult struct { Expr sqltypes.Value - Collation collations.Collation + Collation collations.ID Coercibility collations.Coercibility } type RemoteCoercionTest interface { Expression() string - Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion) + Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion) } type testConcat struct { @@ -52,15 +54,17 @@ type testConcat struct { } func (tc *testConcat) Expression() string { + env := collations.Local() return fmt.Sprintf("CONCAT((_%s X'%x' COLLATE %q), (_%s X'%x' COLLATE %q))", - tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(), - tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(), + colldata.Lookup(tc.left.Collation).Charset().Name(), tc.left.Text, env.LookupName(tc.left.Collation), + colldata.Lookup(tc.right.Collation).Charset().Name(), tc.right.Text, env.LookupName(tc.right.Collation), ) } -func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 collations.Coercion) { - localCollation := local.Collation.Get() - assert.Equal(t, remote.Collation.Name(), localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remote.Collation.Name()) +func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 colldata.Coercion) { + localCollation := colldata.Lookup(local.Collation) + remoteName := collations.Local().LookupName(remote.Collation) + assert.Equal(t, remoteName, localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remoteName) assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility) leftText, err := coercion1(nil, tc.left.Text) @@ -81,8 +85,8 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col rEBytes, err := remote.Expr.ToBytes() require.NoError(t, err) - assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, tc.left.Collation.Name(), - tc.right.Text, tc.right.Collation.Name(), leftText, rightText, localCollation.Name(), + assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.Local().LookupName(tc.left.Collation), + tc.right.Text, collations.Local().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), concat.Bytes(), rEBytes) } @@ -92,14 +96,15 @@ type testComparison struct { } func (tc *testComparison) Expression() string { + env := collations.Local() return fmt.Sprintf("(_%s X'%x' COLLATE %q) = (_%s X'%x' COLLATE %q)", - tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(), - tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(), + env.LookupCharsetName(tc.left.Collation), tc.left.Text, env.LookupName(tc.left.Collation), + env.LookupCharsetName(tc.right.Collation), tc.right.Text, env.LookupName(tc.right.Collation), ) } -func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion) { - localCollation := local.Collation.Get() +func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion) { + localCollation := colldata.Lookup(local.Collation) leftText, err := coerce1(nil, tc.left.Text) if err != nil { t.Errorf("failed to transcode left: %v", err) @@ -130,12 +135,12 @@ func TestComparisonSemantics(t *testing.T) { t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31") } - for _, coll := range collations.Local().AllCollations() { + for _, coll := range colldata.All(collations.Local()) { text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString)) - testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll}) + testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll.ID()}) } sort.Slice(testInputs, func(i, j int) bool { - return testInputs[i].Collation.ID() < testInputs[j].Collation.ID() + return testInputs[i].Collation < testInputs[j].Collation }) var testCases = []struct { @@ -161,17 +166,17 @@ func TestComparisonSemantics(t *testing.T) { for _, collA := range testInputs { for _, collB := range testInputs { left := collations.TypedCollation{ - Collation: collA.Collation.ID(), + Collation: collA.Collation, Coercibility: 0, Repertoire: collations.RepertoireASCII, } right := collations.TypedCollation{ - Collation: collB.Collation.ID(), + Collation: collB.Collation, Coercibility: 0, Repertoire: collations.RepertoireASCII, } - resultLocal, coercionLocal1, coercionLocal2, errLocal := collations.Local().MergeCollations(left, right, - collations.CoercionOptions{ + resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.Local(), left, right, + colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -189,11 +194,12 @@ func TestComparisonSemantics(t *testing.T) { query := fmt.Sprintf("SELECT CAST((%s) AS BINARY), COLLATION(%s), COERCIBILITY(%s)", expr, expr, expr) resultRemote, errRemote := conn.ExecuteFetch(query, 1, false) + env := collations.Local() if errRemote != nil { require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote) if errLocal == nil { - t.Errorf("expected %s vs %s to fail coercion: %v", collA.Collation.Name(), collB.Collation.Name(), errRemote) + t.Errorf("expected %s vs %s to fail coercion: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errRemote) continue } require.True(t, strings.HasPrefix(normalizeCollationInError(errRemote.Error()), normalizeCollationInError(errLocal.Error())), "bad error message: expected %q, got %q", errRemote, errLocal) @@ -202,7 +208,7 @@ func TestComparisonSemantics(t *testing.T) { } if errLocal != nil { - t.Errorf("expected %s vs %s to coerce, but they failed: %v", collA.Collation.Name(), collB.Collation.Name(), errLocal) + t.Errorf("expected %s vs %s to coerce, but they failed: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errLocal) continue } diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go index 32ffb81a498..3b33e23e2d3 100644 --- a/go/mysql/collations/integration/collations_test.go +++ b/go/mysql/collations/integration/collations_test.go @@ -31,6 +31,8 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/text/encoding/unicode/utf32" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" @@ -140,7 +142,7 @@ func (u *uca900CollationTest) Test(t *testing.T, result *sqltypes.Result) { continue } - weightString := coll.WeightString(make([]byte, 0, 128), utf8Input, 0) + weightString := colldata.Lookup(coll).WeightString(make([]byte, 0, 128), utf8Input, 0) if !bytes.Equal(weightString, expectedWeightString) { t.Errorf("[%s] mismatch for %s (%v): \n\twant: %v\n\tgot: %v", u.collation, row[2].ToString(), utf8Input, expectedWeightString, weightString) errors++ @@ -227,7 +229,7 @@ func TestCollationWithSpace(t *testing.T) { remote := remote.NewCollation(conn, collName) for _, size := range []int{0, codepoints, codepoints + 1, codepoints + 2, 20, 32} { - localWeight := local.WeightString(nil, []byte(ExampleString), size) + localWeight := colldata.Lookup(local).WeightString(nil, []byte(ExampleString), size) remoteWeight := remote.WeightString(nil, []byte(ExampleString), size) require.True(t, bytes.Equal(localWeight, remoteWeight), "mismatch at len=%d\ninput: %#v\nexpected: %#v\nactual: %#v", size, []byte(ExampleString), remoteWeight, localWeight) diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go index 95410fbb74a..d436280f04b 100644 --- a/go/mysql/collations/integration/helpers_test.go +++ b/go/mysql/collations/integration/helpers_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" @@ -52,7 +54,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) { t.Run(tc.collation, func(t *testing.T) { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - localResult := local.WeightString(nil, tc.input, 0) + localResult := colldata.Lookup(local).WeightString(nil, tc.input, 0) remoteResult := remote.WeightString(nil, tc.input, 0) if err := remote.LastError(); err != nil { @@ -85,7 +87,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { t.Run(tc.collation, func(t *testing.T) { local := collations.Local().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) - localResult := normalizecmp(local.Collate(tc.left, tc.right, false)) + localResult := normalizecmp(colldata.Lookup(local).Collate(tc.left, tc.right, false)) remoteResult := remote.Collate(tc.left, tc.right, false) if err := remote.LastError(); err != nil { @@ -101,7 +103,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { } } -func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) []byte { +func verifyTranscoding(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) []byte { transRemote, err := charset.ConvertFromUTF8(nil, remote.Charset(), text) require.NoError(t, err, "remote transcoding failed: %v", err) @@ -112,7 +114,7 @@ func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote. return transLocal } -func verifyWeightString(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) { +func verifyWeightString(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) { localResult := local.WeightString(nil, text, 0) remoteResult := remote.WeightString(nil, text, 0) diff --git a/go/mysql/collations/integration/weight_string_test.go b/go/mysql/collations/integration/weight_string_test.go index c93a9ed586e..170da4f5987 100644 --- a/go/mysql/collations/integration/weight_string_test.go +++ b/go/mysql/collations/integration/weight_string_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/collations/internal/testutil" "vitess.io/vitess/go/mysql/collations/remote" ) @@ -46,7 +47,7 @@ func TestFastIterators(t *testing.T) { func TestWeightStringsComprehensive(t *testing.T) { type collationsForCharset struct { charset charset.Charset - locals []collations.Collation + locals []colldata.Collation remotes []*remote.Collation } var charsetMap = make(map[string]*collationsForCharset) @@ -59,7 +60,7 @@ func TestWeightStringsComprehensive(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := collations.Local().AllCollations() + allCollations := colldata.All(collations.Local()) sort.Slice(allCollations, func(i, j int) bool { return allCollations[i].ID() < allCollations[j].ID() }) @@ -103,16 +104,16 @@ func TestCJKWeightStrings(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := collations.Local().AllCollations() + allCollations := colldata.All(collations.Local()) testdata, _ := filepath.Glob("../internal/charset/testdata/*.txt") for _, testfile := range testdata { - charset := filepath.Base(testfile) - charset = strings.TrimSuffix(charset, ".txt") - charset = charset[strings.LastIndexByte(charset, '-')+1:] + cs := filepath.Base(testfile) + cs = strings.TrimSuffix(cs, ".txt") + cs = cs[strings.LastIndexByte(cs, '-')+1:] - var valid []collations.Collation + var valid []colldata.Collation for _, coll := range allCollations { - if coll.Charset().Name() == charset { + if coll.Charset().Name() == cs { valid = append(valid, coll) t.Logf("%s -> %s", testfile, coll.Name()) } diff --git a/go/mysql/collations/integration/wildcard_test.go b/go/mysql/collations/integration/wildcard_test.go index a848e5b7867..6475a35dd21 100644 --- a/go/mysql/collations/integration/wildcard_test.go +++ b/go/mysql/collations/integration/wildcard_test.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/collations/remote" ) @@ -78,7 +79,7 @@ func TestRemoteWildcardMatches(t *testing.T) { {"Ǎḅeçd", "a%bd"}, } - for _, local := range collations.Local().AllCollations() { + for _, local := range colldata.All(collations.Local()) { t.Run(local.Name(), func(t *testing.T) { var remote = remote.NewCollation(conn, local.Name()) var err error diff --git a/go/mysql/collations/internal/uca/contractions.go b/go/mysql/collations/internal/uca/contractions.go index c4ff99d42e2..d894b0e206e 100644 --- a/go/mysql/collations/internal/uca/contractions.go +++ b/go/mysql/collations/internal/uca/contractions.go @@ -18,7 +18,6 @@ package uca import ( "fmt" - "unicode/utf8" "vitess.io/vitess/go/mysql/collations/charset" ) @@ -28,19 +27,6 @@ type trie struct { weights []uint16 } -func (t *trie) walkUTF8(remainder []byte) ([]uint16, []byte) { - if len(remainder) > 0 { - cp, width := utf8.DecodeRune(remainder) - if cp == utf8.RuneError && width < 3 { - return nil, nil - } - if ch := t.children[cp]; ch != nil { - return ch.walkUTF8(remainder[width:]) - } - } - return t.weights, remainder -} - func (t *trie) walkCharset(cs charset.Charset, remainder []byte, depth int) ([]uint16, []byte, int) { if len(remainder) > 0 { cp, width := cs.DecodeRune(remainder) diff --git a/go/mysql/collations/internal/uca/fasttables.go b/go/mysql/collations/internal/uca/fasttables.go index 1995a78a664..40f3718babe 100644 --- a/go/mysql/collations/internal/uca/fasttables.go +++ b/go/mysql/collations/internal/uca/fasttables.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT package uca diff --git a/go/mysql/collations/internal/uca/layout.go b/go/mysql/collations/internal/uca/layout.go index a5ee45a0ece..35a2749eb21 100644 --- a/go/mysql/collations/internal/uca/layout.go +++ b/go/mysql/collations/internal/uca/layout.go @@ -17,7 +17,6 @@ limitations under the License. package uca import ( - "reflect" "sync" "unsafe" ) @@ -287,29 +286,29 @@ func (Layout_uca_legacy) applyPatches(page []uint16, offset int, weights []uint1 } type tableWithPatch struct { - tableptr uintptr - patchptr uintptr + tableptr unsafe.Pointer + patchptr unsafe.Pointer } var cachedTables = make(map[tableWithPatch]Weights) var cachedTablesMu sync.Mutex func lookupCachedTable(table Weights, patch []Patch) (Weights, bool) { - hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch)) + data1 := unsafe.Pointer(unsafe.SliceData(table)) + data2 := unsafe.Pointer(unsafe.SliceData(patch)) cachedTablesMu.Lock() defer cachedTablesMu.Unlock() - tbl, ok := cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}] + tbl, ok := cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}] return tbl, ok } func storeCachedTable(table Weights, patch []Patch, result Weights) { - hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table)) - hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch)) + data1 := unsafe.Pointer(unsafe.SliceData(table)) + data2 := unsafe.Pointer(unsafe.SliceData(patch)) cachedTablesMu.Lock() - cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}] = result + cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}] = result cachedTablesMu.Unlock() } diff --git a/go/mysql/collations/local.go b/go/mysql/collations/local.go index c0d3c10da09..4bbe9a35a9c 100644 --- a/go/mysql/collations/local.go +++ b/go/mysql/collations/local.go @@ -22,6 +22,7 @@ import ( "sync" "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/servenv" ) @@ -46,3 +47,14 @@ func Local() *Environment { func Default() ID { return ID(Local().DefaultConnectionCharset()) } + +func DefaultCollationForType(t sqltypes.Type) ID { + switch { + case sqltypes.IsText(t): + return Default() + case t == sqltypes.TypeJSON: + return CollationUtf8mb4ID + default: + return CollationBinaryID + } +} diff --git a/go/mysql/collations/mysqlversion.go b/go/mysql/collations/mysqlversion.go index 2a1409fbb7e..93d1add9b6a 100644 --- a/go/mysql/collations/mysqlversion.go +++ b/go/mysql/collations/mysqlversion.go @@ -1,11 +1,28 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Code generated by makecolldata DO NOT EDIT package collations type collver byte type collalias struct { - mask collver - name string + mask collver + name string + charset string } const ( @@ -16,7 +33,7 @@ const ( collverMariaDB103 collver = 1 << 3 collverMySQL56 collver = 1 << 4 collverMySQL57 collver = 1 << 5 - collverMySQL80 collver = 1 << 6 + collverMySQL8 collver = 1 << 6 ) func (v collver) String() string { @@ -35,405 +52,405 @@ func (v collver) String() string { return "MySQL 5.6" case collverMySQL57: return "MySQL 5.7" - case collverMySQL80: - return "MySQL 8.0" + case collverMySQL8: + return "MySQL 0.8" default: panic("invalid version identifier") } } -func (v collver) charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} } +func charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} } var globalVersionInfo = map[ID]struct { alias []collalias isdefault collver }{ - 1: {alias: []collalias{{0b01111111, "big5_chinese_ci"}}, isdefault: 0b01111111}, - 2: {alias: []collalias{{0b01111111, "latin2_czech_cs"}}, isdefault: 0b00000000}, - 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci"}}, isdefault: 0b01111111}, - 4: {alias: []collalias{{0b01111111, "cp850_general_ci"}}, isdefault: 0b01111111}, - 5: {alias: []collalias{{0b01111111, "latin1_german1_ci"}}, isdefault: 0b00000000}, - 6: {alias: []collalias{{0b01111111, "hp8_english_ci"}}, isdefault: 0b01111111}, - 7: {alias: []collalias{{0b01111111, "koi8r_general_ci"}}, isdefault: 0b01111111}, - 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci"}}, isdefault: 0b01111111}, - 9: {alias: []collalias{{0b01111111, "latin2_general_ci"}}, isdefault: 0b01111111}, - 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci"}}, isdefault: 0b01111111}, - 11: {alias: []collalias{{0b01111111, "ascii_general_ci"}}, isdefault: 0b01111111}, - 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci"}}, isdefault: 0b01111111}, - 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci"}}, isdefault: 0b01111111}, - 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci"}}, isdefault: 0b00000000}, - 15: {alias: []collalias{{0b01111111, "latin1_danish_ci"}}, isdefault: 0b00000000}, - 16: {alias: []collalias{{0b01111111, "hebrew_general_ci"}}, isdefault: 0b01111111}, - 18: {alias: []collalias{{0b01111111, "tis620_thai_ci"}}, isdefault: 0b01111111}, - 19: {alias: []collalias{{0b01111111, "euckr_korean_ci"}}, isdefault: 0b01111111}, - 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs"}}, isdefault: 0b00000000}, - 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci"}}, isdefault: 0b00000000}, - 22: {alias: []collalias{{0b01111111, "koi8u_general_ci"}}, isdefault: 0b01111111}, - 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci"}}, isdefault: 0b00000000}, - 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci"}}, isdefault: 0b01111111}, - 25: {alias: []collalias{{0b01111111, "greek_general_ci"}}, isdefault: 0b01111111}, - 26: {alias: []collalias{{0b01111111, "cp1250_general_ci"}}, isdefault: 0b01111111}, - 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci"}}, isdefault: 0b00000000}, - 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci"}}, isdefault: 0b01111111}, - 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci"}}, isdefault: 0b00000000}, - 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci"}}, isdefault: 0b01111111}, - 31: {alias: []collalias{{0b01111111, "latin1_german2_ci"}}, isdefault: 0b00000000}, - 32: {alias: []collalias{{0b01111111, "armscii8_general_ci"}}, isdefault: 0b01111111}, - 33: {alias: []collalias{{0b01111111, "utf8_general_ci"}, {0b01111111, "utf8mb3_general_ci"}}, isdefault: 0b01111111}, - 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs"}}, isdefault: 0b00000000}, - 35: {alias: []collalias{{0b01111111, "ucs2_general_ci"}}, isdefault: 0b01111111}, - 36: {alias: []collalias{{0b01111111, "cp866_general_ci"}}, isdefault: 0b01111111}, - 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci"}}, isdefault: 0b01111111}, - 38: {alias: []collalias{{0b01111111, "macce_general_ci"}}, isdefault: 0b01111111}, - 39: {alias: []collalias{{0b01111111, "macroman_general_ci"}}, isdefault: 0b01111111}, - 40: {alias: []collalias{{0b01111111, "cp852_general_ci"}}, isdefault: 0b01111111}, - 41: {alias: []collalias{{0b01111111, "latin7_general_ci"}}, isdefault: 0b01111111}, - 42: {alias: []collalias{{0b01111111, "latin7_general_cs"}}, isdefault: 0b00000000}, - 43: {alias: []collalias{{0b01111111, "macce_bin"}}, isdefault: 0b00000000}, - 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci"}}, isdefault: 0b00000000}, - 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci"}}, isdefault: 0b00111111}, - 46: {alias: []collalias{{0b01111111, "utf8mb4_bin"}}, isdefault: 0b00000000}, - 47: {alias: []collalias{{0b01111111, "latin1_bin"}}, isdefault: 0b00000000}, - 48: {alias: []collalias{{0b01111111, "latin1_general_ci"}}, isdefault: 0b00000000}, - 49: {alias: []collalias{{0b01111111, "latin1_general_cs"}}, isdefault: 0b00000000}, - 50: {alias: []collalias{{0b01111111, "cp1251_bin"}}, isdefault: 0b00000000}, - 51: {alias: []collalias{{0b01111111, "cp1251_general_ci"}}, isdefault: 0b01111111}, - 52: {alias: []collalias{{0b01111111, "cp1251_general_cs"}}, isdefault: 0b00000000}, - 53: {alias: []collalias{{0b01111111, "macroman_bin"}}, isdefault: 0b00000000}, - 54: {alias: []collalias{{0b01111111, "utf16_general_ci"}}, isdefault: 0b01111111}, - 55: {alias: []collalias{{0b01111111, "utf16_bin"}}, isdefault: 0b00000000}, - 56: {alias: []collalias{{0b01111111, "utf16le_general_ci"}}, isdefault: 0b01111111}, - 57: {alias: []collalias{{0b01111111, "cp1256_general_ci"}}, isdefault: 0b01111111}, - 58: {alias: []collalias{{0b01111111, "cp1257_bin"}}, isdefault: 0b00000000}, - 59: {alias: []collalias{{0b01111111, "cp1257_general_ci"}}, isdefault: 0b01111111}, - 60: {alias: []collalias{{0b01111111, "utf32_general_ci"}}, isdefault: 0b01111111}, - 61: {alias: []collalias{{0b01111111, "utf32_bin"}}, isdefault: 0b00000000}, - 62: {alias: []collalias{{0b01111111, "utf16le_bin"}}, isdefault: 0b00000000}, - 63: {alias: []collalias{{0b01111111, "binary"}}, isdefault: 0b01111111}, - 64: {alias: []collalias{{0b01111111, "armscii8_bin"}}, isdefault: 0b00000000}, - 65: {alias: []collalias{{0b01111111, "ascii_bin"}}, isdefault: 0b00000000}, - 66: {alias: []collalias{{0b01111111, "cp1250_bin"}}, isdefault: 0b00000000}, - 67: {alias: []collalias{{0b01111111, "cp1256_bin"}}, isdefault: 0b00000000}, - 68: {alias: []collalias{{0b01111111, "cp866_bin"}}, isdefault: 0b00000000}, - 69: {alias: []collalias{{0b01111111, "dec8_bin"}}, isdefault: 0b00000000}, - 70: {alias: []collalias{{0b01111111, "greek_bin"}}, isdefault: 0b00000000}, - 71: {alias: []collalias{{0b01111111, "hebrew_bin"}}, isdefault: 0b00000000}, - 72: {alias: []collalias{{0b01111111, "hp8_bin"}}, isdefault: 0b00000000}, - 73: {alias: []collalias{{0b01111111, "keybcs2_bin"}}, isdefault: 0b00000000}, - 74: {alias: []collalias{{0b01111111, "koi8r_bin"}}, isdefault: 0b00000000}, - 75: {alias: []collalias{{0b01111111, "koi8u_bin"}}, isdefault: 0b00000000}, - 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci"}, {0b01000000, "utf8mb3_tolower_ci"}}, isdefault: 0b00000000}, - 77: {alias: []collalias{{0b01111111, "latin2_bin"}}, isdefault: 0b00000000}, - 78: {alias: []collalias{{0b01111111, "latin5_bin"}}, isdefault: 0b00000000}, - 79: {alias: []collalias{{0b01111111, "latin7_bin"}}, isdefault: 0b00000000}, - 80: {alias: []collalias{{0b01111111, "cp850_bin"}}, isdefault: 0b00000000}, - 81: {alias: []collalias{{0b01111111, "cp852_bin"}}, isdefault: 0b00000000}, - 82: {alias: []collalias{{0b01111111, "swe7_bin"}}, isdefault: 0b00000000}, - 83: {alias: []collalias{{0b01111111, "utf8_bin"}, {0b01111111, "utf8mb3_bin"}}, isdefault: 0b00000000}, - 84: {alias: []collalias{{0b01111111, "big5_bin"}}, isdefault: 0b00000000}, - 85: {alias: []collalias{{0b01111111, "euckr_bin"}}, isdefault: 0b00000000}, - 86: {alias: []collalias{{0b01111111, "gb2312_bin"}}, isdefault: 0b00000000}, - 87: {alias: []collalias{{0b01111111, "gbk_bin"}}, isdefault: 0b00000000}, - 88: {alias: []collalias{{0b01111111, "sjis_bin"}}, isdefault: 0b00000000}, - 89: {alias: []collalias{{0b01111111, "tis620_bin"}}, isdefault: 0b00000000}, - 90: {alias: []collalias{{0b01111111, "ucs2_bin"}}, isdefault: 0b00000000}, - 91: {alias: []collalias{{0b01111111, "ujis_bin"}}, isdefault: 0b00000000}, - 92: {alias: []collalias{{0b01111111, "geostd8_general_ci"}}, isdefault: 0b01111111}, - 93: {alias: []collalias{{0b01111111, "geostd8_bin"}}, isdefault: 0b00000000}, - 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci"}}, isdefault: 0b00000000}, - 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci"}}, isdefault: 0b01111111}, - 96: {alias: []collalias{{0b01111111, "cp932_bin"}}, isdefault: 0b00000000}, - 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci"}}, isdefault: 0b01111111}, - 98: {alias: []collalias{{0b01111111, "eucjpms_bin"}}, isdefault: 0b00000000}, - 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci"}}, isdefault: 0b00000000}, - 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci"}}, isdefault: 0b00000000}, - 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci"}}, isdefault: 0b00000000}, - 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci"}}, isdefault: 0b00000000}, - 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci"}}, isdefault: 0b00000000}, - 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci"}}, isdefault: 0b00000000}, - 106: {alias: []collalias{{0b01111111, "utf16_polish_ci"}}, isdefault: 0b00000000}, - 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci"}}, isdefault: 0b00000000}, - 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci"}}, isdefault: 0b00000000}, - 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci"}}, isdefault: 0b00000000}, - 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci"}}, isdefault: 0b00000000}, - 111: {alias: []collalias{{0b01111111, "utf16_czech_ci"}}, isdefault: 0b00000000}, - 112: {alias: []collalias{{0b01111111, "utf16_danish_ci"}}, isdefault: 0b00000000}, - 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci"}}, isdefault: 0b00000000}, - 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci"}}, isdefault: 0b00000000}, - 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci"}}, isdefault: 0b00000000}, - 116: {alias: []collalias{{0b01111111, "utf16_roman_ci"}}, isdefault: 0b00000000}, - 117: {alias: []collalias{{0b01111111, "utf16_persian_ci"}}, isdefault: 0b00000000}, - 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci"}}, isdefault: 0b00000000}, - 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci"}}, isdefault: 0b00000000}, - 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci"}}, isdefault: 0b00000000}, - 121: {alias: []collalias{{0b01111111, "utf16_german2_ci"}}, isdefault: 0b00000000}, - 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci"}, {0b00001111, "utf16_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci"}}, isdefault: 0b00000000}, - 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci"}}, isdefault: 0b00000000}, - 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci"}}, isdefault: 0b00000000}, - 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci"}}, isdefault: 0b00000000}, - 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci"}}, isdefault: 0b00000000}, - 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci"}}, isdefault: 0b00000000}, - 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci"}}, isdefault: 0b00000000}, - 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci"}}, isdefault: 0b00000000}, - 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci"}}, isdefault: 0b00000000}, - 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci"}}, isdefault: 0b00000000}, - 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci"}}, isdefault: 0b00000000}, - 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci"}}, isdefault: 0b00000000}, - 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci"}}, isdefault: 0b00000000}, - 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci"}}, isdefault: 0b00000000}, - 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci"}}, isdefault: 0b00000000}, - 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci"}}, isdefault: 0b00000000}, - 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci"}}, isdefault: 0b00000000}, - 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci"}}, isdefault: 0b00000000}, - 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci"}}, isdefault: 0b00000000}, - 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci"}}, isdefault: 0b00000000}, - 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci"}}, isdefault: 0b00000000}, - 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci"}}, isdefault: 0b00000000}, - 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci"}}, isdefault: 0b00000000}, - 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci"}, {0b00001111, "ucs2_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci"}}, isdefault: 0b00000000}, - 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci"}}, isdefault: 0b00000000}, - 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci"}}, isdefault: 0b00000000}, - 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci"}}, isdefault: 0b00000000}, - 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci"}}, isdefault: 0b00000000}, - 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci"}}, isdefault: 0b00000000}, - 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci"}}, isdefault: 0b00000000}, - 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci"}}, isdefault: 0b00000000}, - 165: {alias: []collalias{{0b01111111, "utf32_polish_ci"}}, isdefault: 0b00000000}, - 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci"}}, isdefault: 0b00000000}, - 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci"}}, isdefault: 0b00000000}, - 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci"}}, isdefault: 0b00000000}, - 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci"}}, isdefault: 0b00000000}, - 170: {alias: []collalias{{0b01111111, "utf32_czech_ci"}}, isdefault: 0b00000000}, - 171: {alias: []collalias{{0b01111111, "utf32_danish_ci"}}, isdefault: 0b00000000}, - 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci"}}, isdefault: 0b00000000}, - 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci"}}, isdefault: 0b00000000}, - 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci"}}, isdefault: 0b00000000}, - 175: {alias: []collalias{{0b01111111, "utf32_roman_ci"}}, isdefault: 0b00000000}, - 176: {alias: []collalias{{0b01111111, "utf32_persian_ci"}}, isdefault: 0b00000000}, - 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci"}}, isdefault: 0b00000000}, - 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci"}}, isdefault: 0b00000000}, - 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci"}}, isdefault: 0b00000000}, - 180: {alias: []collalias{{0b01111111, "utf32_german2_ci"}}, isdefault: 0b00000000}, - 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci"}, {0b00001111, "utf32_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci"}}, isdefault: 0b00000000}, - 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci"}}, isdefault: 0b00000000}, - 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci"}, {0b01111111, "utf8mb3_unicode_ci"}}, isdefault: 0b00000000}, - 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci"}, {0b01111111, "utf8mb3_icelandic_ci"}}, isdefault: 0b00000000}, - 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci"}, {0b01111111, "utf8mb3_latvian_ci"}}, isdefault: 0b00000000}, - 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci"}, {0b01111111, "utf8mb3_romanian_ci"}}, isdefault: 0b00000000}, - 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci"}, {0b01111111, "utf8mb3_slovenian_ci"}}, isdefault: 0b00000000}, - 197: {alias: []collalias{{0b01111111, "utf8_polish_ci"}, {0b01111111, "utf8mb3_polish_ci"}}, isdefault: 0b00000000}, - 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci"}, {0b01111111, "utf8mb3_estonian_ci"}}, isdefault: 0b00000000}, - 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci"}, {0b01111111, "utf8mb3_spanish_ci"}}, isdefault: 0b00000000}, - 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci"}, {0b01111111, "utf8mb3_swedish_ci"}}, isdefault: 0b00000000}, - 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci"}, {0b01111111, "utf8mb3_turkish_ci"}}, isdefault: 0b00000000}, - 202: {alias: []collalias{{0b01111111, "utf8_czech_ci"}, {0b01111111, "utf8mb3_czech_ci"}}, isdefault: 0b00000000}, - 203: {alias: []collalias{{0b01111111, "utf8_danish_ci"}, {0b01111111, "utf8mb3_danish_ci"}}, isdefault: 0b00000000}, - 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci"}, {0b01111111, "utf8mb3_lithuanian_ci"}}, isdefault: 0b00000000}, - 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci"}, {0b01111111, "utf8mb3_slovak_ci"}}, isdefault: 0b00000000}, - 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci"}, {0b01111111, "utf8mb3_spanish2_ci"}}, isdefault: 0b00000000}, - 207: {alias: []collalias{{0b01111111, "utf8_roman_ci"}, {0b01111111, "utf8mb3_roman_ci"}}, isdefault: 0b00000000}, - 208: {alias: []collalias{{0b01111111, "utf8_persian_ci"}, {0b01111111, "utf8mb3_persian_ci"}}, isdefault: 0b00000000}, - 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci"}, {0b01111111, "utf8mb3_esperanto_ci"}}, isdefault: 0b00000000}, - 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci"}, {0b01111111, "utf8mb3_hungarian_ci"}}, isdefault: 0b00000000}, - 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci"}, {0b01111111, "utf8mb3_sinhala_ci"}}, isdefault: 0b00000000}, - 212: {alias: []collalias{{0b01111111, "utf8_german2_ci"}, {0b01111111, "utf8mb3_german2_ci"}}, isdefault: 0b00000000}, - 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci"}, {0b00001111, "utf8_croatian_mysql561_ci"}, {0b01110000, "utf8mb3_croatian_ci"}, {0b00001111, "utf8mb3_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci"}, {0b01111111, "utf8mb3_unicode_520_ci"}}, isdefault: 0b00000000}, - 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci"}, {0b01111111, "utf8mb3_vietnamese_ci"}}, isdefault: 0b00000000}, - 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci"}, {0b01111111, "utf8mb3_general_mysql500_ci"}}, isdefault: 0b00000000}, - 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci"}}, isdefault: 0b00000000}, - 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci"}}, isdefault: 0b00000000}, - 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci"}}, isdefault: 0b00000000}, - 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci"}}, isdefault: 0b00000000}, - 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci"}}, isdefault: 0b00000000}, - 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci"}}, isdefault: 0b00000000}, - 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci"}}, isdefault: 0b00000000}, - 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci"}}, isdefault: 0b00000000}, - 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci"}}, isdefault: 0b00000000}, - 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci"}}, isdefault: 0b00000000}, - 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci"}}, isdefault: 0b00000000}, - 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci"}}, isdefault: 0b00000000}, - 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci"}}, isdefault: 0b00000000}, - 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci"}}, isdefault: 0b00000000}, - 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci"}}, isdefault: 0b00000000}, - 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci"}}, isdefault: 0b00000000}, - 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci"}}, isdefault: 0b00000000}, - 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci"}}, isdefault: 0b00000000}, - 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci"}}, isdefault: 0b00000000}, - 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci"}}, isdefault: 0b00000000}, - 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci"}}, isdefault: 0b00000000}, - 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci"}, {0b00001111, "utf8mb4_croatian_mysql561_ci"}}, isdefault: 0b00000000}, - 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci"}}, isdefault: 0b00000000}, - 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci"}}, isdefault: 0b00000000}, - 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci"}}, isdefault: 0b01100000}, - 249: {alias: []collalias{{0b01100000, "gb18030_bin"}}, isdefault: 0b00000000}, - 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci"}}, isdefault: 0b00000000}, - 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci"}}, isdefault: 0b01000000}, - 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci"}}, isdefault: 0b00000000}, - 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci"}}, isdefault: 0b00000000}, - 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci"}}, isdefault: 0b00000000}, - 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci"}}, isdefault: 0b00000000}, - 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci"}}, isdefault: 0b00000000}, - 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci"}}, isdefault: 0b00000000}, - 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci"}}, isdefault: 0b00000000}, - 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci"}}, isdefault: 0b00000000}, - 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci"}}, isdefault: 0b00000000}, - 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci"}}, isdefault: 0b00000000}, - 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci"}}, isdefault: 0b00000000}, - 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci"}}, isdefault: 0b00000000}, - 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci"}}, isdefault: 0b00000000}, - 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci"}}, isdefault: 0b00000000}, - 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci"}}, isdefault: 0b00000000}, - 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci"}}, isdefault: 0b00000000}, - 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci"}}, isdefault: 0b00000000}, - 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci"}}, isdefault: 0b00000000}, - 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci"}}, isdefault: 0b00000000}, - 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci"}}, isdefault: 0b00000000}, - 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs"}}, isdefault: 0b00000000}, - 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs"}}, isdefault: 0b00000000}, - 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs"}}, isdefault: 0b00000000}, - 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs"}}, isdefault: 0b00000000}, - 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs"}}, isdefault: 0b00000000}, - 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs"}}, isdefault: 0b00000000}, - 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs"}}, isdefault: 0b00000000}, - 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs"}}, isdefault: 0b00000000}, - 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs"}}, isdefault: 0b00000000}, - 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs"}}, isdefault: 0b00000000}, - 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs"}}, isdefault: 0b00000000}, - 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs"}}, isdefault: 0b00000000}, - 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs"}}, isdefault: 0b00000000}, - 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs"}}, isdefault: 0b00000000}, - 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs"}}, isdefault: 0b00000000}, - 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs"}}, isdefault: 0b00000000}, - 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs"}}, isdefault: 0b00000000}, - 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs"}}, isdefault: 0b00000000}, - 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs"}}, isdefault: 0b00000000}, - 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs"}}, isdefault: 0b00000000}, - 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs"}}, isdefault: 0b00000000}, - 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs"}}, isdefault: 0b00000000}, - 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks"}}, isdefault: 0b00000000}, - 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci"}}, isdefault: 0b00000000}, - 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci"}}, isdefault: 0b00000000}, - 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs"}}, isdefault: 0b00000000}, - 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs"}}, isdefault: 0b00000000}, - 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin"}}, isdefault: 0b00000000}, - 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci"}}, isdefault: 0b00000000}, - 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs"}}, isdefault: 0b00000000}, - 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci"}}, isdefault: 0b00000000}, - 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs"}}, isdefault: 0b00000000}, - 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci"}}, isdefault: 0b00000000}, - 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs"}}, isdefault: 0b00000000}, - 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci"}}, isdefault: 0b00000000}, - 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs"}}, isdefault: 0b00000000}, - 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci"}}, isdefault: 0b00000000}, - 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs"}}, isdefault: 0b00000000}, - 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci"}}, isdefault: 0b00000000}, - 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs"}}, isdefault: 0b00000000}, - 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci"}}, isdefault: 0b00000000}, - 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs"}}, isdefault: 0b00000000}, - 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci"}, {0b00001111, "utf8mb3_croatian_ci"}}, isdefault: 0b00000000}, - 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci"}, {0b00001111, "utf8mb3_myanmar_ci"}}, isdefault: 0b00000000}, - 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2"}, {0b00001110, "utf8mb3_thai_520_w2"}}, isdefault: 0b00000000}, - 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci"}}, isdefault: 0b00000000}, - 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci"}}, isdefault: 0b00000000}, - 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2"}}, isdefault: 0b00000000}, - 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci"}}, isdefault: 0b00000000}, - 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci"}}, isdefault: 0b00000000}, - 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2"}}, isdefault: 0b00000000}, - 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci"}}, isdefault: 0b00000000}, - 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci"}}, isdefault: 0b00000000}, - 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2"}}, isdefault: 0b00000000}, - 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci"}}, isdefault: 0b00000000}, - 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci"}}, isdefault: 0b00000000}, - 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2"}}, isdefault: 0b00000000}, - 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci"}}, isdefault: 0b00000000}, - 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci"}}, isdefault: 0b00000000}, - 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci"}}, isdefault: 0b00000000}, - 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci"}}, isdefault: 0b00000000}, - 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci"}}, isdefault: 0b00000000}, - 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci"}}, isdefault: 0b00000000}, - 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci"}}, isdefault: 0b00000000}, - 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci"}}, isdefault: 0b00000000}, - 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci"}}, isdefault: 0b00000000}, - 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci"}}, isdefault: 0b00000000}, - 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci"}}, isdefault: 0b00000000}, - 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci"}}, isdefault: 0b00000000}, - 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci"}}, isdefault: 0b00000000}, - 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci"}}, isdefault: 0b00000000}, - 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci"}, {0b00001100, "utf8mb3_general_nopad_ci"}}, isdefault: 0b00000000}, - 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci"}}, isdefault: 0b00000000}, - 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci"}}, isdefault: 0b00000000}, - 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci"}}, isdefault: 0b00000000}, - 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci"}}, isdefault: 0b00000000}, - 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci"}}, isdefault: 0b00000000}, - 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci"}}, isdefault: 0b00000000}, - 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin"}}, isdefault: 0b00000000}, - 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci"}}, isdefault: 0b00000000}, - 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin"}}, isdefault: 0b00000000}, - 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin"}}, isdefault: 0b00000000}, - 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin"}}, isdefault: 0b00000000}, - 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci"}}, isdefault: 0b00000000}, - 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin"}}, isdefault: 0b00000000}, - 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci"}}, isdefault: 0b00000000}, - 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin"}}, isdefault: 0b00000000}, - 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci"}}, isdefault: 0b00000000}, - 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci"}}, isdefault: 0b00000000}, - 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin"}}, isdefault: 0b00000000}, - 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci"}}, isdefault: 0b00000000}, - 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci"}}, isdefault: 0b00000000}, - 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin"}}, isdefault: 0b00000000}, - 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin"}}, isdefault: 0b00000000}, - 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin"}}, isdefault: 0b00000000}, - 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin"}}, isdefault: 0b00000000}, - 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin"}}, isdefault: 0b00000000}, - 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin"}}, isdefault: 0b00000000}, - 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin"}}, isdefault: 0b00000000}, - 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin"}}, isdefault: 0b00000000}, - 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin"}}, isdefault: 0b00000000}, - 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin"}}, isdefault: 0b00000000}, - 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin"}}, isdefault: 0b00000000}, - 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin"}}, isdefault: 0b00000000}, - 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin"}}, isdefault: 0b00000000}, - 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin"}}, isdefault: 0b00000000}, - 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin"}}, isdefault: 0b00000000}, - 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin"}}, isdefault: 0b00000000}, - 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin"}}, isdefault: 0b00000000}, - 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin"}}, isdefault: 0b00000000}, - 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin"}}, isdefault: 0b00000000}, - 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin"}}, isdefault: 0b00000000}, - 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin"}, {0b00001100, "utf8mb3_nopad_bin"}}, isdefault: 0b00000000}, - 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin"}}, isdefault: 0b00000000}, - 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin"}}, isdefault: 0b00000000}, - 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin"}}, isdefault: 0b00000000}, - 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin"}}, isdefault: 0b00000000}, - 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin"}}, isdefault: 0b00000000}, - 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin"}}, isdefault: 0b00000000}, - 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin"}}, isdefault: 0b00000000}, - 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin"}}, isdefault: 0b00000000}, - 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci"}}, isdefault: 0b00000000}, - 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin"}}, isdefault: 0b00000000}, - 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin"}}, isdefault: 0b00000000}, - 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci"}}, isdefault: 0b00000000}, - 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin"}}, isdefault: 0b00000000}, - 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci"}, {0b00001100, "utf8mb3_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, - 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci"}}, isdefault: 0b00000000}, - 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci"}}, isdefault: 0b00000000}, + 1: {alias: []collalias{{0b01111111, "big5_chinese_ci", "big5"}}, isdefault: 0b01111111}, + 2: {alias: []collalias{{0b01111111, "latin2_czech_cs", "latin2"}}, isdefault: 0b00000000}, + 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci", "dec8"}}, isdefault: 0b01111111}, + 4: {alias: []collalias{{0b01111111, "cp850_general_ci", "cp850"}}, isdefault: 0b01111111}, + 5: {alias: []collalias{{0b01111111, "latin1_german1_ci", "latin1"}}, isdefault: 0b00000000}, + 6: {alias: []collalias{{0b01111111, "hp8_english_ci", "hp8"}}, isdefault: 0b01111111}, + 7: {alias: []collalias{{0b01111111, "koi8r_general_ci", "koi8r"}}, isdefault: 0b01111111}, + 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci", "latin1"}}, isdefault: 0b01111111}, + 9: {alias: []collalias{{0b01111111, "latin2_general_ci", "latin2"}}, isdefault: 0b01111111}, + 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci", "swe7"}}, isdefault: 0b01111111}, + 11: {alias: []collalias{{0b01111111, "ascii_general_ci", "ascii"}}, isdefault: 0b01111111}, + 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci", "ujis"}}, isdefault: 0b01111111}, + 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci", "sjis"}}, isdefault: 0b01111111}, + 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci", "cp1251"}}, isdefault: 0b00000000}, + 15: {alias: []collalias{{0b01111111, "latin1_danish_ci", "latin1"}}, isdefault: 0b00000000}, + 16: {alias: []collalias{{0b01111111, "hebrew_general_ci", "hebrew"}}, isdefault: 0b01111111}, + 18: {alias: []collalias{{0b01111111, "tis620_thai_ci", "tis620"}}, isdefault: 0b01111111}, + 19: {alias: []collalias{{0b01111111, "euckr_korean_ci", "euckr"}}, isdefault: 0b01111111}, + 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs", "latin7"}}, isdefault: 0b00000000}, + 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci", "latin2"}}, isdefault: 0b00000000}, + 22: {alias: []collalias{{0b01111111, "koi8u_general_ci", "koi8u"}}, isdefault: 0b01111111}, + 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci", "cp1251"}}, isdefault: 0b00000000}, + 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci", "gb2312"}}, isdefault: 0b01111111}, + 25: {alias: []collalias{{0b01111111, "greek_general_ci", "greek"}}, isdefault: 0b01111111}, + 26: {alias: []collalias{{0b01111111, "cp1250_general_ci", "cp1250"}}, isdefault: 0b01111111}, + 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci", "latin2"}}, isdefault: 0b00000000}, + 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci", "gbk"}}, isdefault: 0b01111111}, + 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci", "cp1257"}}, isdefault: 0b00000000}, + 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci", "latin5"}}, isdefault: 0b01111111}, + 31: {alias: []collalias{{0b01111111, "latin1_german2_ci", "latin1"}}, isdefault: 0b00000000}, + 32: {alias: []collalias{{0b01111111, "armscii8_general_ci", "armscii8"}}, isdefault: 0b01111111}, + 33: {alias: []collalias{{0b01111111, "utf8_general_ci", "utf8"}, {0b01111111, "utf8mb3_general_ci", "utf8mb3"}}, isdefault: 0b01111111}, + 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs", "cp1250"}}, isdefault: 0b00000000}, + 35: {alias: []collalias{{0b01111111, "ucs2_general_ci", "ucs2"}}, isdefault: 0b01111111}, + 36: {alias: []collalias{{0b01111111, "cp866_general_ci", "cp866"}}, isdefault: 0b01111111}, + 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci", "keybcs2"}}, isdefault: 0b01111111}, + 38: {alias: []collalias{{0b01111111, "macce_general_ci", "macce"}}, isdefault: 0b01111111}, + 39: {alias: []collalias{{0b01111111, "macroman_general_ci", "macroman"}}, isdefault: 0b01111111}, + 40: {alias: []collalias{{0b01111111, "cp852_general_ci", "cp852"}}, isdefault: 0b01111111}, + 41: {alias: []collalias{{0b01111111, "latin7_general_ci", "latin7"}}, isdefault: 0b01111111}, + 42: {alias: []collalias{{0b01111111, "latin7_general_cs", "latin7"}}, isdefault: 0b00000000}, + 43: {alias: []collalias{{0b01111111, "macce_bin", "macce"}}, isdefault: 0b00000000}, + 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci", "cp1250"}}, isdefault: 0b00000000}, + 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci", "utf8mb4"}}, isdefault: 0b00111111}, + 46: {alias: []collalias{{0b01111111, "utf8mb4_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 47: {alias: []collalias{{0b01111111, "latin1_bin", "latin1"}}, isdefault: 0b00000000}, + 48: {alias: []collalias{{0b01111111, "latin1_general_ci", "latin1"}}, isdefault: 0b00000000}, + 49: {alias: []collalias{{0b01111111, "latin1_general_cs", "latin1"}}, isdefault: 0b00000000}, + 50: {alias: []collalias{{0b01111111, "cp1251_bin", "cp1251"}}, isdefault: 0b00000000}, + 51: {alias: []collalias{{0b01111111, "cp1251_general_ci", "cp1251"}}, isdefault: 0b01111111}, + 52: {alias: []collalias{{0b01111111, "cp1251_general_cs", "cp1251"}}, isdefault: 0b00000000}, + 53: {alias: []collalias{{0b01111111, "macroman_bin", "macroman"}}, isdefault: 0b00000000}, + 54: {alias: []collalias{{0b01111111, "utf16_general_ci", "utf16"}}, isdefault: 0b01111111}, + 55: {alias: []collalias{{0b01111111, "utf16_bin", "utf16"}}, isdefault: 0b00000000}, + 56: {alias: []collalias{{0b01111111, "utf16le_general_ci", "utf16le"}}, isdefault: 0b01111111}, + 57: {alias: []collalias{{0b01111111, "cp1256_general_ci", "cp1256"}}, isdefault: 0b01111111}, + 58: {alias: []collalias{{0b01111111, "cp1257_bin", "cp1257"}}, isdefault: 0b00000000}, + 59: {alias: []collalias{{0b01111111, "cp1257_general_ci", "cp1257"}}, isdefault: 0b01111111}, + 60: {alias: []collalias{{0b01111111, "utf32_general_ci", "utf32"}}, isdefault: 0b01111111}, + 61: {alias: []collalias{{0b01111111, "utf32_bin", "utf32"}}, isdefault: 0b00000000}, + 62: {alias: []collalias{{0b01111111, "utf16le_bin", "utf16le"}}, isdefault: 0b00000000}, + 63: {alias: []collalias{{0b01111111, "binary", "binary"}}, isdefault: 0b01111111}, + 64: {alias: []collalias{{0b01111111, "armscii8_bin", "armscii8"}}, isdefault: 0b00000000}, + 65: {alias: []collalias{{0b01111111, "ascii_bin", "ascii"}}, isdefault: 0b00000000}, + 66: {alias: []collalias{{0b01111111, "cp1250_bin", "cp1250"}}, isdefault: 0b00000000}, + 67: {alias: []collalias{{0b01111111, "cp1256_bin", "cp1256"}}, isdefault: 0b00000000}, + 68: {alias: []collalias{{0b01111111, "cp866_bin", "cp866"}}, isdefault: 0b00000000}, + 69: {alias: []collalias{{0b01111111, "dec8_bin", "dec8"}}, isdefault: 0b00000000}, + 70: {alias: []collalias{{0b01111111, "greek_bin", "greek"}}, isdefault: 0b00000000}, + 71: {alias: []collalias{{0b01111111, "hebrew_bin", "hebrew"}}, isdefault: 0b00000000}, + 72: {alias: []collalias{{0b01111111, "hp8_bin", "hp8"}}, isdefault: 0b00000000}, + 73: {alias: []collalias{{0b01111111, "keybcs2_bin", "keybcs2"}}, isdefault: 0b00000000}, + 74: {alias: []collalias{{0b01111111, "koi8r_bin", "koi8r"}}, isdefault: 0b00000000}, + 75: {alias: []collalias{{0b01111111, "koi8u_bin", "koi8u"}}, isdefault: 0b00000000}, + 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci", "utf8"}, {0b01000000, "utf8mb3_tolower_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 77: {alias: []collalias{{0b01111111, "latin2_bin", "latin2"}}, isdefault: 0b00000000}, + 78: {alias: []collalias{{0b01111111, "latin5_bin", "latin5"}}, isdefault: 0b00000000}, + 79: {alias: []collalias{{0b01111111, "latin7_bin", "latin7"}}, isdefault: 0b00000000}, + 80: {alias: []collalias{{0b01111111, "cp850_bin", "cp850"}}, isdefault: 0b00000000}, + 81: {alias: []collalias{{0b01111111, "cp852_bin", "cp852"}}, isdefault: 0b00000000}, + 82: {alias: []collalias{{0b01111111, "swe7_bin", "swe7"}}, isdefault: 0b00000000}, + 83: {alias: []collalias{{0b01111111, "utf8_bin", "utf8"}, {0b01111111, "utf8mb3_bin", "utf8mb3"}}, isdefault: 0b00000000}, + 84: {alias: []collalias{{0b01111111, "big5_bin", "big5"}}, isdefault: 0b00000000}, + 85: {alias: []collalias{{0b01111111, "euckr_bin", "euckr"}}, isdefault: 0b00000000}, + 86: {alias: []collalias{{0b01111111, "gb2312_bin", "gb2312"}}, isdefault: 0b00000000}, + 87: {alias: []collalias{{0b01111111, "gbk_bin", "gbk"}}, isdefault: 0b00000000}, + 88: {alias: []collalias{{0b01111111, "sjis_bin", "sjis"}}, isdefault: 0b00000000}, + 89: {alias: []collalias{{0b01111111, "tis620_bin", "tis620"}}, isdefault: 0b00000000}, + 90: {alias: []collalias{{0b01111111, "ucs2_bin", "ucs2"}}, isdefault: 0b00000000}, + 91: {alias: []collalias{{0b01111111, "ujis_bin", "ujis"}}, isdefault: 0b00000000}, + 92: {alias: []collalias{{0b01111111, "geostd8_general_ci", "geostd8"}}, isdefault: 0b01111111}, + 93: {alias: []collalias{{0b01111111, "geostd8_bin", "geostd8"}}, isdefault: 0b00000000}, + 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci", "latin1"}}, isdefault: 0b00000000}, + 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci", "cp932"}}, isdefault: 0b01111111}, + 96: {alias: []collalias{{0b01111111, "cp932_bin", "cp932"}}, isdefault: 0b00000000}, + 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci", "eucjpms"}}, isdefault: 0b01111111}, + 98: {alias: []collalias{{0b01111111, "eucjpms_bin", "eucjpms"}}, isdefault: 0b00000000}, + 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci", "cp1250"}}, isdefault: 0b00000000}, + 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci", "utf16"}}, isdefault: 0b00000000}, + 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci", "utf16"}}, isdefault: 0b00000000}, + 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci", "utf16"}}, isdefault: 0b00000000}, + 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci", "utf16"}}, isdefault: 0b00000000}, + 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci", "utf16"}}, isdefault: 0b00000000}, + 106: {alias: []collalias{{0b01111111, "utf16_polish_ci", "utf16"}}, isdefault: 0b00000000}, + 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci", "utf16"}}, isdefault: 0b00000000}, + 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci", "utf16"}}, isdefault: 0b00000000}, + 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci", "utf16"}}, isdefault: 0b00000000}, + 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci", "utf16"}}, isdefault: 0b00000000}, + 111: {alias: []collalias{{0b01111111, "utf16_czech_ci", "utf16"}}, isdefault: 0b00000000}, + 112: {alias: []collalias{{0b01111111, "utf16_danish_ci", "utf16"}}, isdefault: 0b00000000}, + 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci", "utf16"}}, isdefault: 0b00000000}, + 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci", "utf16"}}, isdefault: 0b00000000}, + 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci", "utf16"}}, isdefault: 0b00000000}, + 116: {alias: []collalias{{0b01111111, "utf16_roman_ci", "utf16"}}, isdefault: 0b00000000}, + 117: {alias: []collalias{{0b01111111, "utf16_persian_ci", "utf16"}}, isdefault: 0b00000000}, + 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci", "utf16"}}, isdefault: 0b00000000}, + 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci", "utf16"}}, isdefault: 0b00000000}, + 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci", "utf16"}}, isdefault: 0b00000000}, + 121: {alias: []collalias{{0b01111111, "utf16_german2_ci", "utf16"}}, isdefault: 0b00000000}, + 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci", "utf16"}, {0b00001111, "utf16_croatian_mysql561_ci", "utf16"}}, isdefault: 0b00000000}, + 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci", "utf16"}}, isdefault: 0b00000000}, + 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci", "utf16"}}, isdefault: 0b00000000}, + 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci", "ucs2"}}, isdefault: 0b00000000}, + 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci", "ucs2"}}, isdefault: 0b00000000}, + 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci", "ucs2"}}, isdefault: 0b00000000}, + 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci", "ucs2"}}, isdefault: 0b00000000}, + 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci", "ucs2"}}, isdefault: 0b00000000}, + 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci", "ucs2"}}, isdefault: 0b00000000}, + 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci", "ucs2"}}, isdefault: 0b00000000}, + 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci", "ucs2"}}, isdefault: 0b00000000}, + 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci", "ucs2"}}, isdefault: 0b00000000}, + 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci", "ucs2"}}, isdefault: 0b00000000}, + 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci", "ucs2"}}, isdefault: 0b00000000}, + 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci", "ucs2"}}, isdefault: 0b00000000}, + 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci", "ucs2"}}, isdefault: 0b00000000}, + 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci", "ucs2"}}, isdefault: 0b00000000}, + 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci", "ucs2"}}, isdefault: 0b00000000}, + 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci", "ucs2"}}, isdefault: 0b00000000}, + 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci", "ucs2"}}, isdefault: 0b00000000}, + 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci", "ucs2"}}, isdefault: 0b00000000}, + 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci", "ucs2"}}, isdefault: 0b00000000}, + 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci", "ucs2"}}, isdefault: 0b00000000}, + 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci", "ucs2"}}, isdefault: 0b00000000}, + 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci", "ucs2"}, {0b00001111, "ucs2_croatian_mysql561_ci", "ucs2"}}, isdefault: 0b00000000}, + 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci", "ucs2"}}, isdefault: 0b00000000}, + 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci", "ucs2"}}, isdefault: 0b00000000}, + 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci", "ucs2"}}, isdefault: 0b00000000}, + 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci", "utf32"}}, isdefault: 0b00000000}, + 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci", "utf32"}}, isdefault: 0b00000000}, + 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci", "utf32"}}, isdefault: 0b00000000}, + 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci", "utf32"}}, isdefault: 0b00000000}, + 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci", "utf32"}}, isdefault: 0b00000000}, + 165: {alias: []collalias{{0b01111111, "utf32_polish_ci", "utf32"}}, isdefault: 0b00000000}, + 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci", "utf32"}}, isdefault: 0b00000000}, + 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci", "utf32"}}, isdefault: 0b00000000}, + 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci", "utf32"}}, isdefault: 0b00000000}, + 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci", "utf32"}}, isdefault: 0b00000000}, + 170: {alias: []collalias{{0b01111111, "utf32_czech_ci", "utf32"}}, isdefault: 0b00000000}, + 171: {alias: []collalias{{0b01111111, "utf32_danish_ci", "utf32"}}, isdefault: 0b00000000}, + 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci", "utf32"}}, isdefault: 0b00000000}, + 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci", "utf32"}}, isdefault: 0b00000000}, + 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci", "utf32"}}, isdefault: 0b00000000}, + 175: {alias: []collalias{{0b01111111, "utf32_roman_ci", "utf32"}}, isdefault: 0b00000000}, + 176: {alias: []collalias{{0b01111111, "utf32_persian_ci", "utf32"}}, isdefault: 0b00000000}, + 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci", "utf32"}}, isdefault: 0b00000000}, + 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci", "utf32"}}, isdefault: 0b00000000}, + 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci", "utf32"}}, isdefault: 0b00000000}, + 180: {alias: []collalias{{0b01111111, "utf32_german2_ci", "utf32"}}, isdefault: 0b00000000}, + 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci", "utf32"}, {0b00001111, "utf32_croatian_mysql561_ci", "utf32"}}, isdefault: 0b00000000}, + 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci", "utf32"}}, isdefault: 0b00000000}, + 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci", "utf32"}}, isdefault: 0b00000000}, + 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci", "utf8"}, {0b01111111, "utf8mb3_icelandic_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci", "utf8"}, {0b01111111, "utf8mb3_latvian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci", "utf8"}, {0b01111111, "utf8mb3_romanian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci", "utf8"}, {0b01111111, "utf8mb3_slovenian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 197: {alias: []collalias{{0b01111111, "utf8_polish_ci", "utf8"}, {0b01111111, "utf8mb3_polish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci", "utf8"}, {0b01111111, "utf8mb3_estonian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci", "utf8"}, {0b01111111, "utf8mb3_spanish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci", "utf8"}, {0b01111111, "utf8mb3_swedish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci", "utf8"}, {0b01111111, "utf8mb3_turkish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 202: {alias: []collalias{{0b01111111, "utf8_czech_ci", "utf8"}, {0b01111111, "utf8mb3_czech_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 203: {alias: []collalias{{0b01111111, "utf8_danish_ci", "utf8"}, {0b01111111, "utf8mb3_danish_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci", "utf8"}, {0b01111111, "utf8mb3_lithuanian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci", "utf8"}, {0b01111111, "utf8mb3_slovak_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci", "utf8"}, {0b01111111, "utf8mb3_spanish2_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 207: {alias: []collalias{{0b01111111, "utf8_roman_ci", "utf8"}, {0b01111111, "utf8mb3_roman_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 208: {alias: []collalias{{0b01111111, "utf8_persian_ci", "utf8"}, {0b01111111, "utf8mb3_persian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci", "utf8"}, {0b01111111, "utf8mb3_esperanto_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci", "utf8"}, {0b01111111, "utf8mb3_hungarian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci", "utf8"}, {0b01111111, "utf8mb3_sinhala_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 212: {alias: []collalias{{0b01111111, "utf8_german2_ci", "utf8"}, {0b01111111, "utf8mb3_german2_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8_croatian_mysql561_ci", "utf8"}, {0b01110000, "utf8mb3_croatian_ci", "utf8mb3"}, {0b00001111, "utf8mb3_croatian_mysql561_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_520_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci", "utf8"}, {0b01111111, "utf8mb3_vietnamese_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci", "utf8"}, {0b01111111, "utf8mb3_general_mysql500_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci", "utf8mb4"}, {0b00001111, "utf8mb4_croatian_mysql561_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci", "gb18030"}}, isdefault: 0b01100000}, + 249: {alias: []collalias{{0b01100000, "gb18030_bin", "gb18030"}}, isdefault: 0b00000000}, + 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci", "gb18030"}}, isdefault: 0b00000000}, + 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci", "utf8mb4"}}, isdefault: 0b01000000}, + 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks", "utf8mb4"}}, isdefault: 0b00000000}, + 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000}, + 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8mb3_croatian_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci", "utf8"}, {0b00001111, "utf8mb3_myanmar_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2", "utf8"}, {0b00001110, "utf8mb3_thai_520_w2", "utf8mb3"}}, isdefault: 0b00000000}, + 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2", "utf8mb4"}}, isdefault: 0b00000000}, + 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci", "ucs2"}}, isdefault: 0b00000000}, + 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci", "ucs2"}}, isdefault: 0b00000000}, + 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2", "ucs2"}}, isdefault: 0b00000000}, + 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci", "utf16"}}, isdefault: 0b00000000}, + 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci", "utf16"}}, isdefault: 0b00000000}, + 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2", "utf16"}}, isdefault: 0b00000000}, + 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci", "utf32"}}, isdefault: 0b00000000}, + 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci", "utf32"}}, isdefault: 0b00000000}, + 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2", "utf32"}}, isdefault: 0b00000000}, + 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci", "big5"}}, isdefault: 0b00000000}, + 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci", "dec8"}}, isdefault: 0b00000000}, + 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci", "cp850"}}, isdefault: 0b00000000}, + 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci", "hp8"}}, isdefault: 0b00000000}, + 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci", "koi8r"}}, isdefault: 0b00000000}, + 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci", "latin1"}}, isdefault: 0b00000000}, + 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci", "latin2"}}, isdefault: 0b00000000}, + 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci", "swe7"}}, isdefault: 0b00000000}, + 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci", "ascii"}}, isdefault: 0b00000000}, + 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci", "ujis"}}, isdefault: 0b00000000}, + 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci", "sjis"}}, isdefault: 0b00000000}, + 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci", "hebrew"}}, isdefault: 0b00000000}, + 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci", "tis620"}}, isdefault: 0b00000000}, + 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci", "euckr"}}, isdefault: 0b00000000}, + 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci", "koi8u"}}, isdefault: 0b00000000}, + 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci", "gb2312"}}, isdefault: 0b00000000}, + 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci", "greek"}}, isdefault: 0b00000000}, + 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci", "cp1250"}}, isdefault: 0b00000000}, + 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci", "gbk"}}, isdefault: 0b00000000}, + 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci", "latin5"}}, isdefault: 0b00000000}, + 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci", "armscii8"}}, isdefault: 0b00000000}, + 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_general_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci", "cp866"}}, isdefault: 0b00000000}, + 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci", "keybcs2"}}, isdefault: 0b00000000}, + 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci", "macce"}}, isdefault: 0b00000000}, + 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci", "macroman"}}, isdefault: 0b00000000}, + 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci", "cp852"}}, isdefault: 0b00000000}, + 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci", "latin7"}}, isdefault: 0b00000000}, + 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin", "macce"}}, isdefault: 0b00000000}, + 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin", "utf8mb4"}}, isdefault: 0b00000000}, + 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin", "latin1"}}, isdefault: 0b00000000}, + 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin", "cp1251"}}, isdefault: 0b00000000}, + 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci", "cp1251"}}, isdefault: 0b00000000}, + 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin", "macroman"}}, isdefault: 0b00000000}, + 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin", "utf16"}}, isdefault: 0b00000000}, + 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci", "utf16le"}}, isdefault: 0b00000000}, + 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci", "cp1256"}}, isdefault: 0b00000000}, + 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin", "cp1257"}}, isdefault: 0b00000000}, + 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci", "cp1257"}}, isdefault: 0b00000000}, + 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin", "utf32"}}, isdefault: 0b00000000}, + 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin", "utf16le"}}, isdefault: 0b00000000}, + 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin", "armscii8"}}, isdefault: 0b00000000}, + 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin", "ascii"}}, isdefault: 0b00000000}, + 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin", "cp1250"}}, isdefault: 0b00000000}, + 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin", "cp1256"}}, isdefault: 0b00000000}, + 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin", "cp866"}}, isdefault: 0b00000000}, + 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin", "dec8"}}, isdefault: 0b00000000}, + 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin", "greek"}}, isdefault: 0b00000000}, + 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin", "hebrew"}}, isdefault: 0b00000000}, + 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin", "hp8"}}, isdefault: 0b00000000}, + 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin", "keybcs2"}}, isdefault: 0b00000000}, + 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin", "koi8r"}}, isdefault: 0b00000000}, + 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin", "koi8u"}}, isdefault: 0b00000000}, + 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin", "latin2"}}, isdefault: 0b00000000}, + 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin", "latin5"}}, isdefault: 0b00000000}, + 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin", "latin7"}}, isdefault: 0b00000000}, + 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin", "cp850"}}, isdefault: 0b00000000}, + 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin", "cp852"}}, isdefault: 0b00000000}, + 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin", "swe7"}}, isdefault: 0b00000000}, + 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin", "utf8"}, {0b00001100, "utf8mb3_nopad_bin", "utf8mb3"}}, isdefault: 0b00000000}, + 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin", "big5"}}, isdefault: 0b00000000}, + 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin", "euckr"}}, isdefault: 0b00000000}, + 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin", "gb2312"}}, isdefault: 0b00000000}, + 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin", "gbk"}}, isdefault: 0b00000000}, + 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin", "sjis"}}, isdefault: 0b00000000}, + 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin", "tis620"}}, isdefault: 0b00000000}, + 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin", "ucs2"}}, isdefault: 0b00000000}, + 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin", "ujis"}}, isdefault: 0b00000000}, + 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci", "geostd8"}}, isdefault: 0b00000000}, + 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin", "geostd8"}}, isdefault: 0b00000000}, + 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci", "cp932"}}, isdefault: 0b00000000}, + 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin", "cp932"}}, isdefault: 0b00000000}, + 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci", "eucjpms"}}, isdefault: 0b00000000}, + 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin", "eucjpms"}}, isdefault: 0b00000000}, + 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci", "utf16"}}, isdefault: 0b00000000}, + 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci", "ucs2"}}, isdefault: 0b00000000}, + 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci", "utf32"}}, isdefault: 0b00000000}, + 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000}, + 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, + 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000}, } diff --git a/go/mysql/collations/remote/collation.go b/go/mysql/collations/remote/collation.go index 1e81c429794..dcc2acfee61 100644 --- a/go/mysql/collations/remote/collation.go +++ b/go/mysql/collations/remote/collation.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vthash" ) @@ -53,22 +54,22 @@ type Collation struct { err error } -var _ collations.Collation = (*Collation)(nil) +var _ colldata.Collation = (*Collation)(nil) func makeRemoteCollation(conn *mysql.Conn, collid collations.ID, collname string) *Collation { - charset := collname + cs := collname if idx := strings.IndexByte(collname, '_'); idx >= 0 { - charset = collname[:idx] + cs = collname[:idx] } coll := &Collation{ name: collname, id: collid, conn: conn, - charset: charset, + charset: cs, } - coll.prefix = fmt.Sprintf("_%s X'", charset) + coll.prefix = fmt.Sprintf("_%s X'", cs) coll.suffix = fmt.Sprintf("' COLLATE %q", collname) coll.hex = hex.NewEncoder(&coll.sql) return coll @@ -204,7 +205,7 @@ func (rp *remotePattern) Match(in []byte) bool { return match } -func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) collations.WildcardPattern { +func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) colldata.WildcardPattern { return &remotePattern{ pattern: fmt.Sprintf("_%s X'%x'", c.charset, pat), remote: c, diff --git a/go/mysql/collations/supported.go b/go/mysql/collations/supported.go new file mode 100644 index 00000000000..4404af2d4fb --- /dev/null +++ b/go/mysql/collations/supported.go @@ -0,0 +1,294 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by makecolldata DO NOT EDIT + +package collations + +var supported = [...]string{ + 0x3: "dec8_swedish_ci", + 0x4: "cp850_general_ci", + 0x5: "latin1_german1_ci", + 0x6: "hp8_english_ci", + 0x7: "koi8r_general_ci", + 0x8: "latin1_swedish_ci", + 0x9: "latin2_general_ci", + 0xa: "swe7_swedish_ci", + 0xb: "ascii_general_ci", + 0xc: "ujis_japanese_ci", + 0xd: "sjis_japanese_ci", + 0xe: "cp1251_bulgarian_ci", + 0xf: "latin1_danish_ci", + 0x10: "hebrew_general_ci", + 0x13: "euckr_korean_ci", + 0x14: "latin7_estonian_cs", + 0x15: "latin2_hungarian_ci", + 0x16: "koi8u_general_ci", + 0x17: "cp1251_ukrainian_ci", + 0x18: "gb2312_chinese_ci", + 0x19: "greek_general_ci", + 0x1a: "cp1250_general_ci", + 0x1b: "latin2_croatian_ci", + 0x1d: "cp1257_lithuanian_ci", + 0x1e: "latin5_turkish_ci", + 0x20: "armscii8_general_ci", + 0x21: "utf8mb3_general_ci", + 0x23: "ucs2_general_ci", + 0x24: "cp866_general_ci", + 0x25: "keybcs2_general_ci", + 0x26: "macce_general_ci", + 0x27: "macroman_general_ci", + 0x28: "cp852_general_ci", + 0x29: "latin7_general_ci", + 0x2a: "latin7_general_cs", + 0x2b: "macce_bin", + 0x2c: "cp1250_croatian_ci", + 0x2d: "utf8mb4_general_ci", + 0x2e: "utf8mb4_bin", + 0x2f: "latin1_bin", + 0x30: "latin1_general_ci", + 0x31: "latin1_general_cs", + 0x32: "cp1251_bin", + 0x33: "cp1251_general_ci", + 0x34: "cp1251_general_cs", + 0x35: "macroman_bin", + 0x36: "utf16_general_ci", + 0x37: "utf16_bin", + 0x38: "utf16le_general_ci", + 0x39: "cp1256_general_ci", + 0x3a: "cp1257_bin", + 0x3b: "cp1257_general_ci", + 0x3c: "utf32_general_ci", + 0x3d: "utf32_bin", + 0x3e: "utf16le_bin", + 0x3f: "binary", + 0x40: "armscii8_bin", + 0x41: "ascii_bin", + 0x42: "cp1250_bin", + 0x43: "cp1256_bin", + 0x44: "cp866_bin", + 0x45: "dec8_bin", + 0x46: "greek_bin", + 0x47: "hebrew_bin", + 0x48: "hp8_bin", + 0x49: "keybcs2_bin", + 0x4a: "koi8r_bin", + 0x4b: "koi8u_bin", + 0x4d: "latin2_bin", + 0x4e: "latin5_bin", + 0x4f: "latin7_bin", + 0x50: "cp850_bin", + 0x51: "cp852_bin", + 0x52: "swe7_bin", + 0x53: "utf8mb3_bin", + 0x55: "euckr_bin", + 0x56: "gb2312_bin", + 0x58: "sjis_bin", + 0x5a: "ucs2_bin", + 0x5b: "ujis_bin", + 0x5c: "geostd8_general_ci", + 0x5d: "geostd8_bin", + 0x5e: "latin1_spanish_ci", + 0x5f: "cp932_japanese_ci", + 0x60: "cp932_bin", + 0x61: "eucjpms_japanese_ci", + 0x62: "eucjpms_bin", + 0x63: "cp1250_polish_ci", + 0x65: "utf16_unicode_ci", + 0x66: "utf16_icelandic_ci", + 0x67: "utf16_latvian_ci", + 0x68: "utf16_romanian_ci", + 0x69: "utf16_slovenian_ci", + 0x6a: "utf16_polish_ci", + 0x6b: "utf16_estonian_ci", + 0x6c: "utf16_spanish_ci", + 0x6d: "utf16_swedish_ci", + 0x6e: "utf16_turkish_ci", + 0x6f: "utf16_czech_ci", + 0x70: "utf16_danish_ci", + 0x71: "utf16_lithuanian_ci", + 0x72: "utf16_slovak_ci", + 0x73: "utf16_spanish2_ci", + 0x74: "utf16_roman_ci", + 0x75: "utf16_persian_ci", + 0x76: "utf16_esperanto_ci", + 0x77: "utf16_hungarian_ci", + 0x78: "utf16_sinhala_ci", + 0x79: "utf16_german2_ci", + 0x7a: "utf16_croatian_ci", + 0x7b: "utf16_unicode_520_ci", + 0x7c: "utf16_vietnamese_ci", + 0x80: "ucs2_unicode_ci", + 0x81: "ucs2_icelandic_ci", + 0x82: "ucs2_latvian_ci", + 0x83: "ucs2_romanian_ci", + 0x84: "ucs2_slovenian_ci", + 0x85: "ucs2_polish_ci", + 0x86: "ucs2_estonian_ci", + 0x87: "ucs2_spanish_ci", + 0x88: "ucs2_swedish_ci", + 0x89: "ucs2_turkish_ci", + 0x8a: "ucs2_czech_ci", + 0x8b: "ucs2_danish_ci", + 0x8c: "ucs2_lithuanian_ci", + 0x8d: "ucs2_slovak_ci", + 0x8e: "ucs2_spanish2_ci", + 0x8f: "ucs2_roman_ci", + 0x90: "ucs2_persian_ci", + 0x91: "ucs2_esperanto_ci", + 0x92: "ucs2_hungarian_ci", + 0x93: "ucs2_sinhala_ci", + 0x94: "ucs2_german2_ci", + 0x95: "ucs2_croatian_ci", + 0x96: "ucs2_unicode_520_ci", + 0x97: "ucs2_vietnamese_ci", + 0xa0: "utf32_unicode_ci", + 0xa1: "utf32_icelandic_ci", + 0xa2: "utf32_latvian_ci", + 0xa3: "utf32_romanian_ci", + 0xa4: "utf32_slovenian_ci", + 0xa5: "utf32_polish_ci", + 0xa6: "utf32_estonian_ci", + 0xa7: "utf32_spanish_ci", + 0xa8: "utf32_swedish_ci", + 0xa9: "utf32_turkish_ci", + 0xaa: "utf32_czech_ci", + 0xab: "utf32_danish_ci", + 0xac: "utf32_lithuanian_ci", + 0xad: "utf32_slovak_ci", + 0xae: "utf32_spanish2_ci", + 0xaf: "utf32_roman_ci", + 0xb0: "utf32_persian_ci", + 0xb1: "utf32_esperanto_ci", + 0xb2: "utf32_hungarian_ci", + 0xb3: "utf32_sinhala_ci", + 0xb4: "utf32_german2_ci", + 0xb5: "utf32_croatian_ci", + 0xb6: "utf32_unicode_520_ci", + 0xb7: "utf32_vietnamese_ci", + 0xc0: "utf8mb3_unicode_ci", + 0xc1: "utf8mb3_icelandic_ci", + 0xc2: "utf8mb3_latvian_ci", + 0xc3: "utf8mb3_romanian_ci", + 0xc4: "utf8mb3_slovenian_ci", + 0xc5: "utf8mb3_polish_ci", + 0xc6: "utf8mb3_estonian_ci", + 0xc7: "utf8mb3_spanish_ci", + 0xc8: "utf8mb3_swedish_ci", + 0xc9: "utf8mb3_turkish_ci", + 0xca: "utf8mb3_czech_ci", + 0xcb: "utf8mb3_danish_ci", + 0xcc: "utf8mb3_lithuanian_ci", + 0xcd: "utf8mb3_slovak_ci", + 0xce: "utf8mb3_spanish2_ci", + 0xcf: "utf8mb3_roman_ci", + 0xd0: "utf8mb3_persian_ci", + 0xd1: "utf8mb3_esperanto_ci", + 0xd2: "utf8mb3_hungarian_ci", + 0xd3: "utf8mb3_sinhala_ci", + 0xd4: "utf8mb3_german2_ci", + 0xd5: "utf8mb3_croatian_ci", + 0xd6: "utf8mb3_unicode_520_ci", + 0xd7: "utf8mb3_vietnamese_ci", + 0xe0: "utf8mb4_unicode_ci", + 0xe1: "utf8mb4_icelandic_ci", + 0xe2: "utf8mb4_latvian_ci", + 0xe3: "utf8mb4_romanian_ci", + 0xe4: "utf8mb4_slovenian_ci", + 0xe5: "utf8mb4_polish_ci", + 0xe6: "utf8mb4_estonian_ci", + 0xe7: "utf8mb4_spanish_ci", + 0xe8: "utf8mb4_swedish_ci", + 0xe9: "utf8mb4_turkish_ci", + 0xea: "utf8mb4_czech_ci", + 0xeb: "utf8mb4_danish_ci", + 0xec: "utf8mb4_lithuanian_ci", + 0xed: "utf8mb4_slovak_ci", + 0xee: "utf8mb4_spanish2_ci", + 0xef: "utf8mb4_roman_ci", + 0xf0: "utf8mb4_persian_ci", + 0xf1: "utf8mb4_esperanto_ci", + 0xf2: "utf8mb4_hungarian_ci", + 0xf3: "utf8mb4_sinhala_ci", + 0xf4: "utf8mb4_german2_ci", + 0xf5: "utf8mb4_croatian_ci", + 0xf6: "utf8mb4_unicode_520_ci", + 0xf7: "utf8mb4_vietnamese_ci", + 0xfa: "gb18030_unicode_520_ci", + 0xff: "utf8mb4_0900_ai_ci", + 0x100: "utf8mb4_de_pb_0900_ai_ci", + 0x101: "utf8mb4_is_0900_ai_ci", + 0x102: "utf8mb4_lv_0900_ai_ci", + 0x103: "utf8mb4_ro_0900_ai_ci", + 0x104: "utf8mb4_sl_0900_ai_ci", + 0x105: "utf8mb4_pl_0900_ai_ci", + 0x106: "utf8mb4_et_0900_ai_ci", + 0x107: "utf8mb4_es_0900_ai_ci", + 0x108: "utf8mb4_sv_0900_ai_ci", + 0x109: "utf8mb4_tr_0900_ai_ci", + 0x10a: "utf8mb4_cs_0900_ai_ci", + 0x10b: "utf8mb4_da_0900_ai_ci", + 0x10c: "utf8mb4_lt_0900_ai_ci", + 0x10d: "utf8mb4_sk_0900_ai_ci", + 0x10e: "utf8mb4_es_trad_0900_ai_ci", + 0x10f: "utf8mb4_la_0900_ai_ci", + 0x111: "utf8mb4_eo_0900_ai_ci", + 0x112: "utf8mb4_hu_0900_ai_ci", + 0x113: "utf8mb4_hr_0900_ai_ci", + 0x115: "utf8mb4_vi_0900_ai_ci", + 0x116: "utf8mb4_0900_as_cs", + 0x117: "utf8mb4_de_pb_0900_as_cs", + 0x118: "utf8mb4_is_0900_as_cs", + 0x119: "utf8mb4_lv_0900_as_cs", + 0x11a: "utf8mb4_ro_0900_as_cs", + 0x11b: "utf8mb4_sl_0900_as_cs", + 0x11c: "utf8mb4_pl_0900_as_cs", + 0x11d: "utf8mb4_et_0900_as_cs", + 0x11e: "utf8mb4_es_0900_as_cs", + 0x11f: "utf8mb4_sv_0900_as_cs", + 0x120: "utf8mb4_tr_0900_as_cs", + 0x121: "utf8mb4_cs_0900_as_cs", + 0x122: "utf8mb4_da_0900_as_cs", + 0x123: "utf8mb4_lt_0900_as_cs", + 0x124: "utf8mb4_sk_0900_as_cs", + 0x125: "utf8mb4_es_trad_0900_as_cs", + 0x126: "utf8mb4_la_0900_as_cs", + 0x128: "utf8mb4_eo_0900_as_cs", + 0x129: "utf8mb4_hu_0900_as_cs", + 0x12a: "utf8mb4_hr_0900_as_cs", + 0x12c: "utf8mb4_vi_0900_as_cs", + 0x12f: "utf8mb4_ja_0900_as_cs", + 0x130: "utf8mb4_ja_0900_as_cs_ks", + 0x131: "utf8mb4_0900_as_ci", + 0x132: "utf8mb4_ru_0900_ai_ci", + 0x133: "utf8mb4_ru_0900_as_cs", + 0x134: "utf8mb4_zh_0900_as_cs", + 0x135: "utf8mb4_0900_bin", + 0x136: "utf8mb4_nb_0900_ai_ci", + 0x137: "utf8mb4_nb_0900_as_cs", + 0x138: "utf8mb4_nn_0900_ai_ci", + 0x139: "utf8mb4_nn_0900_as_cs", + 0x13a: "utf8mb4_sr_latn_0900_ai_ci", + 0x13b: "utf8mb4_sr_latn_0900_as_cs", + 0x13c: "utf8mb4_bs_0900_ai_ci", + 0x13d: "utf8mb4_bs_0900_as_cs", + 0x13e: "utf8mb4_bg_0900_ai_ci", + 0x13f: "utf8mb4_bg_0900_as_cs", + 0x140: "utf8mb4_gl_0900_ai_ci", + 0x141: "utf8mb4_gl_0900_as_cs", + 0x142: "utf8mb4_mn_cyrl_0900_ai_ci", + 0x143: "utf8mb4_mn_cyrl_0900_as_cs", +} diff --git a/go/mysql/collations/testdata/versions/collations_MySQL80.csv b/go/mysql/collations/testdata/versions/collations_MySQL8.csv similarity index 100% rename from go/mysql/collations/testdata/versions/collations_MySQL80.csv rename to go/mysql/collations/testdata/versions/collations_MySQL8.csv diff --git a/go/mysql/collations/tools/colldump/Dockerfile b/go/mysql/collations/tools/colldump/Dockerfile new file mode 100644 index 00000000000..3e5acf4d9a6 --- /dev/null +++ b/go/mysql/collations/tools/colldump/Dockerfile @@ -0,0 +1,20 @@ +FROM debian:latest + +ARG MYSQL_VERSION=8.0.34 + +RUN apt-get update && apt-get -y install curl cmake build-essential libssl-dev libncurses5-dev pkg-config rapidjson-dev + +RUN cd /tmp && \ + curl -OL https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-${MYSQL_VERSION}.tar.gz && \ + tar zxvf mysql-${MYSQL_VERSION}.tar.gz + +ADD colldump.cc /tmp/mysql-${MYSQL_VERSION}/strings/colldump.cc +RUN echo "MYSQL_ADD_EXECUTABLE(colldump colldump.cc SKIP_INSTALL)\nTARGET_LINK_LIBRARIES(colldump strings)\n" >> /tmp/mysql-${MYSQL_VERSION}/strings/CMakeLists.txt + +RUN cd /tmp/mysql-${MYSQL_VERSION} && \ + mkdir build && \ + cd build && \ + cmake -DDOWNLOAD_BOOST=1 -DWITH_BOOST=dist/boost .. && \ + make colldump + +RUN mkdir /mysql-collations && /tmp/mysql-${MYSQL_VERSION}/build/runtime_output_directory/colldump /mysql-collations diff --git a/go/mysql/collations/tools/colldump/colldump.cc b/go/mysql/collations/tools/colldump/colldump.cc new file mode 100644 index 00000000000..7668ae1dc70 --- /dev/null +++ b/go/mysql/collations/tools/colldump/colldump.cc @@ -0,0 +1,418 @@ +/* Copyright (c) 2023, The Vitess Authors + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License, version 2.0, + as published by the Free Software Foundation. + + This program is also distributed with certain software (including + but not limited to OpenSSL) that is licensed under separate terms, + as designated in a particular file or component or in included license + documentation. The authors of MySQL hereby grant you an additional + permission to link the program and your derivative works with the + separately licensed software that they have included with MySQL. + + Without limiting anything contained in the foregoing, this file, + which is part of C Driver for MySQL (Connector/C), is also subject to the + Universal FOSS Exception, version 1.0, a copy of which can be found at + http://oss.oracle.com/licenses/universal-foss-exception. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License, version 2.0, for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include +#include +#include +#include +#include +#include + +#include "m_ctype.h" + +#ifdef HAVE_UNISTD_H +#include +#endif + +#include "my_sys.h" +#include "my_config.h" +#include "my_compiler.h" +#include "my_inttypes.h" +#include "my_io.h" +#include "my_loglevel.h" +#include "my_macros.h" +#include "str_uca_type.h" + +#include "rapidjson/rapidjson.h" +#include "rapidjson/filewritestream.h" +#include "rapidjson/writer.h" + +template +static void print_contractions_1(J &json, my_wc_t *path, size_t depth, bool contextual, const MY_CONTRACTION &contraction) +{ + path[depth] = contraction.ch; + + if (contraction.is_contraction_tail) + { + json.StartObject(); + + json.Key("Path"); + json.StartArray(); + for (size_t i = 0; i <= depth; i++) + { + json.Uint((unsigned int)path[i]); + } + json.EndArray(); + + json.Key("Weights"); + json.StartArray(); + for (size_t i = 0; i < MY_UCA_MAX_WEIGHT_SIZE; i++) + { + json.Uint(contraction.weight[i]); + } + json.EndArray(); + + if (contextual) + { + json.Key("Contextual"); + json.Bool(true); + } + + json.EndObject(); + } + + for (const MY_CONTRACTION &ctr : contraction.child_nodes) + { + print_contractions_1(json, path, depth + 1, false, ctr); + } + for (const MY_CONTRACTION &ctr : contraction.child_nodes_context) + { + print_contractions_1(json, path, depth + 1, true, ctr); + } +} + +template +static void print_contractions(J &json, std::vector *contractions) +{ + my_wc_t path[256]; + json.StartArray(); + for (const MY_CONTRACTION &ctr : *contractions) + { + print_contractions_1(json, path, 0, false, ctr); + } + json.EndArray(); +} + +template +static void print_reorder_params(J &json, struct Reorder_param *reorder) +{ + json.StartArray(); + for (int i = 0; i < reorder->wt_rec_num; i++) + { + struct Reorder_wt_rec &r = reorder->wt_rec[i]; + json.StartArray(); + json.Uint(r.old_wt_bdy.begin); + json.Uint(r.old_wt_bdy.end); + json.Uint(r.new_wt_bdy.begin); + json.Uint(r.new_wt_bdy.end); + json.EndArray(); + } + json.EndArray(); +} + +template +static void print_unipages(J &json, const MY_UNI_IDX *unicodeidx) +{ + json.StartArray(); + for (const MY_UNI_IDX *idx = unicodeidx; idx->tab != NULL; idx++) + { + json.StartObject(); + json.Key("From"); + json.Uint(idx->from); + json.Key("To"); + json.Uint(idx->to); + json.Key("Tab"); + json.StartArray(); + const size_t entries = idx->to - idx->from; + for (size_t i = 0; i <= entries; i++) + { + json.Uint(idx->tab[i]); + } + json.EndArray(); + json.EndObject(); + } + json.EndArray(); +} + +template +static void print_uca_weights_900(J &json, int codepoint, uint16 **weights) +{ + uint16 *page = weights[codepoint >> 8]; + if (page == NULL) + return; + + int offset = codepoint & 0xFF; + int cecount = page[offset]; + char key[32]; + snprintf(key, sizeof(key), "U+%04X", codepoint); + + json.Key(key); + json.StartArray(); + for (int ce = 0; ce < cecount; ce++) + { + json.Uint(page[256 + (ce * 3 + 0) * 256 + offset]); + json.Uint(page[256 + (ce * 3 + 1) * 256 + offset]); + json.Uint(page[256 + (ce * 3 + 2) * 256 + offset]); + } + json.EndArray(); +} + +template +static void print_uca_weights_legacy(J &json, int codepoint, uint16 **weights, uchar *lengths) +{ + uint16 *page = weights[codepoint >> 8]; + if (page == NULL) + return; + + int offset = codepoint & 0xFF; + uint16 *w = page + offset * lengths[codepoint >> 8]; + if (!w[0]) + return; + + char key[32]; + snprintf(key, sizeof(key), "U+%04X", codepoint); + + json.Key(key); + json.StartArray(); + for (; w[0]; w++) + { + json.Uint(w[0]); + } + json.EndArray(); +} + +template +static void print_array_uchar(J &json, const uchar *arr, size_t len) +{ + json.StartArray(); + for (size_t i = 0; i < len; ++i) + { + json.Uint(arr[i]); + } + json.EndArray(); +} + +template +static void print_array_uint16(J &json, const uint16 *arr, size_t len) +{ + json.StartArray(); + for (size_t i = 0; i < len; ++i) + { + json.Uint(arr[i]); + } + json.EndArray(); +} + +static CHARSET_INFO *init_collation(const char *name) +{ + MY_CHARSET_LOADER loader; + return my_collation_get_by_name(&loader, name, MYF(0)); +} + +#define MY_UCA_MAXCHAR (0x10FFFF + 1) +#define MY_UCA_CHARS_PER_PAGE 256 + +extern MY_COLLATION_HANDLER my_collation_uca_900_handler; +extern MY_COLLATION_HANDLER my_collation_any_uca_handler; +extern MY_COLLATION_HANDLER my_collation_utf16_uca_handler; +extern MY_COLLATION_HANDLER my_collation_utf32_uca_handler; +extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler; + +struct KNOWN_HANDLER +{ + const char *name; + const MY_COLLATION_HANDLER *h; +}; + +static KNOWN_HANDLER known_handlers[] = { + {"8bit_bin", &my_collation_8bit_bin_handler}, + {"8bit_simple_ci", &my_collation_8bit_simple_ci_handler}, + {"any_uca", &my_collation_any_uca_handler}, + {"uca_900", &my_collation_uca_900_handler}, + {"utf16_uca", &my_collation_utf16_uca_handler}, + {"utf32_uca", &my_collation_utf32_uca_handler}, + {"ucs2_uca", &my_collation_ucs2_uca_handler}, +}; + +static int dumpall(const char *dumppath) +{ + char pathbuf[4096]; + char jsonbuf[4096 * 4]; + + // bootstrap the `all_charsets` collation array + init_collation("utf8mb4_0900_ai_ci"); + + for (const CHARSET_INFO *charset : all_charsets) + { + if (!charset || (charset->state & MY_CS_AVAILABLE) == 0) + continue; + + charset = init_collation(charset->m_coll_name); + snprintf(pathbuf, sizeof(pathbuf), "%s/%s.json", dumppath, charset->m_coll_name); + + FILE *jsonfile = fopen(pathbuf, "w"); + if (jsonfile == NULL) + { + fprintf(stderr, "failed to create '%s'\n", pathbuf); + return 1; + } + + rapidjson::FileWriteStream os(jsonfile, jsonbuf, sizeof(jsonbuf)); + rapidjson::Writer, rapidjson::ASCII<>> json(os); + + json.StartObject(); + json.Key("Name"); + json.String(charset->m_coll_name); + json.Key("Charset"); + json.String(charset->csname); + json.Key("Number"); + json.Uint(charset->number); + + json.Key("Flags"); + json.StartObject(); + + json.Key("Binary"); + json.Bool((charset->state & MY_CS_BINSORT) != 0); + json.Key("ASCII"); + json.Bool((charset->state & MY_CS_PUREASCII) != 0); + json.Key("Default"); + json.Bool((charset->state & MY_CS_PRIMARY) != 0); + + json.EndObject(); + + for (const KNOWN_HANDLER &handler : known_handlers) + { + if (charset->coll == handler.h) + { + json.Key("CollationImpl"); + json.String(handler.name); + break; + } + } + + if (charset->ctype != NULL) + { + json.Key("CType"); + print_array_uchar(json, charset->ctype, 256); + } + + if (charset->to_lower != NULL) + { + json.Key("ToLower"); + print_array_uchar(json, charset->to_lower, 256); + } + + if (charset->to_upper != NULL) + { + json.Key("ToUpper"); + print_array_uchar(json, charset->to_upper, 256); + } + + if (charset->tab_to_uni != NULL) + { + json.Key("TabToUni"); + print_array_uint16(json, charset->tab_to_uni, 256); + } + + if (charset->tab_from_uni != NULL) + { + json.Key("TabFromUni"); + print_unipages(json, charset->tab_from_uni); + } + + if (charset->sort_order != NULL) + { + json.Key("SortOrder"); + print_array_uchar(json, charset->sort_order, 256); + } + + if (charset->uca != NULL) + { + MY_UCA_INFO *uca = charset->uca; + + json.Key("UCAVersion"); + + switch (uca->version) + { + case UCA_V400: + json.Uint(400); + break; + case UCA_V520: + json.Uint(520); + break; + case UCA_V900: + json.Uint(900); + break; + default: + json.Uint(0); + break; + } + + json.Key("Weights"); + json.StartObject(); + if (uca->version == UCA_V900) + { + for (my_wc_t cp = 0; cp < MY_UCA_MAXCHAR; cp++) + { + print_uca_weights_900(json, cp, uca->weights); + } + } + else + { + for (my_wc_t cp = 0; cp < uca->maxchar; cp++) + { + print_uca_weights_legacy(json, cp, uca->weights, uca->lengths); + } + } + json.EndObject(); + + if (uca->have_contractions) + { + json.Key("Contractions"); + print_contractions(json, uca->contraction_nodes); + } + } + + if (charset->coll_param != NULL) + { + json.Key("UppercaseFirst"); + json.Bool(charset->coll_param->case_first == CASE_FIRST_UPPER); + + if (charset->coll_param->reorder_param != NULL) + { + json.Key("Reorder"); + print_reorder_params(json, charset->coll_param->reorder_param); + } + } + + json.EndObject(); + os.Flush(); + fclose(jsonfile); + } + return 0; +} + +int main(int argc, char **argv) +{ + if (argc < 2) + { + fprintf(stderr, "usage: %s \n", argv[0]); + return 1; + } + + return dumpall(argv[1]); +} \ No newline at end of file diff --git a/go/mysql/collations/tools/colldump/colldump.sh b/go/mysql/collations/tools/colldump/colldump.sh new file mode 100755 index 00000000000..fe6d1d9d7d2 --- /dev/null +++ b/go/mysql/collations/tools/colldump/colldump.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd "$(dirname "$0")" +docker build --tag mysql-collation-data . + +imgid=$(docker create mysql-collation-data) +docker cp $imgid:/mysql-collations/. ../../testdata/mysqldata +docker rm -v $imgid \ No newline at end of file diff --git a/go/mysql/collations/tools/makecolldata/codegen/codegen.go b/go/mysql/collations/tools/makecolldata/codegen/codegen.go index cc2d5ad3a90..4fa98f2afd1 100644 --- a/go/mysql/collations/tools/makecolldata/codegen/codegen.go +++ b/go/mysql/collations/tools/makecolldata/codegen/codegen.go @@ -24,6 +24,7 @@ import ( "path" "reflect" "sort" + "time" "vitess.io/vitess/go/tools/codegen" ) @@ -64,10 +65,29 @@ func Merge(gens ...*Generator) *Generator { return result } +const licenseFileHeader = `/* +Copyright %d The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +` + func (g *Generator) WriteToFile(out string) { var file, fmtfile bytes.Buffer file.Grow(g.Buffer.Len() + 1024) + fmt.Fprintf(&file, licenseFileHeader, time.Now().Year()) fmt.Fprintf(&file, "// Code generated by %s DO NOT EDIT\n\n", path.Base(os.Args[0])) fmt.Fprintf(&file, "package %s\n\n", g.local.Name()) fmt.Fprintf(&file, "import (\n") diff --git a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go index 787d41293be..b12d32f59d7 100644 --- a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go +++ b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go @@ -24,7 +24,6 @@ import ( "log" "math/bits" "os" - "reflect" "vitess.io/vitess/go/mysql/collations/internal/uca" ) @@ -91,7 +90,6 @@ func (pg *EmbedPageGenerator) WritePage16(g *Generator, varname string, values [ func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) { unsafe := Package("unsafe") - reflect := Package("reflect") g.UsePackage("embed") g.P() @@ -99,7 +97,7 @@ func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) { g.P("var weightsUCA_embed_data string") g.P() g.P("func weightsUCA_embed(pos, length int) []uint16 {") - g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer((*", reflect, ".StringHeader)(", unsafe, ".Pointer(&weightsUCA_embed_data)).Data))[pos:pos+length]") + g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer(", unsafe, ".StringData(weightsUCA_embed_data)))[pos:pos+length]") g.P("}") } @@ -126,23 +124,12 @@ type entry struct { weights []uint16 } -func (e *entry) adjustHangulWeights(tb *TableGenerator, jamos []rune) { - for _, jamo := range jamos { - _, entry := tb.entryForCodepoint(jamo) - e.weights = append(e.weights, entry.weights[0], entry.weights[1], entry.weights[2]+1) - } -} - type page struct { n int entryCount int entries [uca.CodepointsPerPage]entry } -func (p *page) equals(other *page) bool { - return reflect.DeepEqual(p, other) -} - func (p *page) name(uca string) string { if p.entryCount == 0 { panic("cannot name empty page") diff --git a/go/mysql/collations/tools/makecolldata/main.go b/go/mysql/collations/tools/makecolldata/main.go index 0bcbd1ecb2b..ee559a886b5 100644 --- a/go/mysql/collations/tools/makecolldata/main.go +++ b/go/mysql/collations/tools/makecolldata/main.go @@ -106,7 +106,7 @@ func (all AllMetadata) get(name string) *CollationMetadata { return nil } -const PkgCollations codegen.Package = "vitess.io/vitess/go/mysql/collations" +const PkgCollationsData codegen.Package = "vitess.io/vitess/go/mysql/collations/colldata" const PkgCharset codegen.Package = "vitess.io/vitess/go/mysql/collations/charset" func main() { @@ -114,5 +114,5 @@ func main() { metadata := loadMysqlMetadata() maketables(*Embed, ".", metadata) makeversions(".") - makemysqldata(".", metadata) + makemysqldata("colldata", ".", metadata) } diff --git a/go/mysql/collations/tools/makecolldata/maketables.go b/go/mysql/collations/tools/makecolldata/maketables.go index 8ac2f9049ce..055162401bb 100644 --- a/go/mysql/collations/tools/makecolldata/maketables.go +++ b/go/mysql/collations/tools/makecolldata/maketables.go @@ -39,7 +39,7 @@ func maketable(g *codegen.Generator, table string, collation *CollationMetadata, func maketables(embed bool, output string, metadata AllMetadata) { var pages = codegen.NewPageGenerator(embed) - var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations") + var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/colldata") var fastg = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/internal/uca") tablegen := maketable(g, "uca900", metadata.get("utf8mb4_0900_ai_ci"), pages, uca.Layout_uca900{}) @@ -53,9 +53,9 @@ func maketables(embed bool, output string, metadata AllMetadata) { if pages, ok := pages.(*codegen.EmbedPageGenerator); ok { pages.WriteTrailer(g, "mysqlucadata.bin") - pages.WriteToFile(path.Join(output, "mysqlucadata.bin")) + pages.WriteToFile(path.Join(output, "colldata/mysqlucadata.bin")) } - g.WriteToFile(path.Join(output, "mysqlucadata.go")) + g.WriteToFile(path.Join(output, "colldata/mysqlucadata.go")) fastg.WriteToFile(path.Join(output, "internal/uca/fasttables.go")) } diff --git a/go/mysql/collations/tools/makecolldata/mysqldata.go b/go/mysql/collations/tools/makecolldata/mysqldata.go index 351e578d2af..567f04362de 100644 --- a/go/mysql/collations/tools/makecolldata/mysqldata.go +++ b/go/mysql/collations/tools/makecolldata/mysqldata.go @@ -353,12 +353,12 @@ func (g *Generator) printCollationMultibyte(meta *CollationMetadata) { g.P("},") } -func makemysqldata(output string, metadata AllMetadata) { +func makemysqldata(output string, supportedOutput string, metadata AllMetadata) { var unsupportedByCharset = make(map[string][]string) var g = Generator{ - Generator: codegen.NewGenerator(PkgCollations), + Generator: codegen.NewGenerator(PkgCollationsData), Tables: TableGenerator{ - Generator: codegen.NewGenerator(PkgCollations), + Generator: codegen.NewGenerator(PkgCollationsData), dedup: make(map[string]string), baseWeightsUca400: metadata.get("utf8mb4_unicode_ci").Weights, baseWeightsUca520: metadata.get("utf8mb4_unicode_520_ci").Weights, @@ -366,15 +366,22 @@ func makemysqldata(output string, metadata AllMetadata) { }, } + var h = Generator{ + Generator: codegen.NewGenerator("vitess.io/vitess/go/mysql/collations"), + } + g.P("var collationsById = [...]Collation{") + h.P("var supported = [...]string{") for _, meta := range metadata { switch { case meta.Name == "utf8mb4_0900_bin": g.P(uint(309), ": &Collation_utf8mb4_0900_bin{},") + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "binary": g.P(uint(63), ": &Collation_binary{},") + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "tis620_bin": // explicitly unsupported for now because of not accurate results @@ -384,24 +391,31 @@ func makemysqldata(output string, metadata AllMetadata) { meta.CollationImpl == "utf32_uca" || meta.CollationImpl == "ucs2_uca": g.printCollationUcaLegacy(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.CollationImpl == "uca_900": g.printCollationUca900(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.CollationImpl == "8bit_bin" || meta.CollationImpl == "8bit_simple_ci": g.printCollation8bit(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case meta.Name == "gb18030_unicode_520_ci": g.printCollationUcaLegacy(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case charset.IsMultibyteByName(meta.Charset): g.printCollationMultibyte(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case strings.HasSuffix(meta.Name, "_bin") && charset.IsUnicodeByName(meta.Charset): g.printCollationUnicode(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") case strings.HasSuffix(meta.Name, "_general_ci"): g.printCollationUnicode(meta) + h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",") default: unsupportedByCharset[meta.Charset] = append(unsupportedByCharset[meta.Charset], meta.Name) @@ -409,7 +423,9 @@ func makemysqldata(output string, metadata AllMetadata) { } g.P("}") + h.P("}") codegen.Merge(g.Tables.Generator, g.Generator).WriteToFile(path.Join(output, "mysqldata.go")) + h.WriteToFile(path.Join(supportedOutput, "supported.go")) var unhandledCount int for impl, collations := range unsupportedByCharset { diff --git a/go/mysql/collations/tools/makecolldata/mysqlversions.go b/go/mysql/collations/tools/makecolldata/mysqlversions.go index 5bdd3165e53..f0578ecd95b 100644 --- a/go/mysql/collations/tools/makecolldata/mysqlversions.go +++ b/go/mysql/collations/tools/makecolldata/mysqlversions.go @@ -60,6 +60,7 @@ func makeversions(output string) { } sort.Strings(versionfiles) + charsets := make(map[string]string) versioninfo := make(map[uint]*versionInfo) for v, versionCsv := range versionfiles { f, err := os.Open(versionCsv) @@ -89,14 +90,17 @@ func makeversions(output string) { collname := cols[0] vi.alias[collname] |= 1 << v + charsets[collname] = cols[1] for from, to := range CharsetAliases { if strings.HasPrefix(collname, from+"_") { aliased := strings.Replace(collname, from+"_", to+"_", 1) + charsets[aliased] = to vi.alias[aliased] |= 1 << v } if strings.HasPrefix(collname, to+"_") { aliased := strings.Replace(collname, to+"_", from+"_", 1) + charsets[aliased] = from vi.alias[aliased] |= 1 << v } } @@ -123,7 +127,7 @@ func makeversions(output string) { var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations") g.P("type collver byte") - g.P("type collalias struct { mask collver; name string }") + g.P("type collalias struct { mask collver; name string; charset string }") g.P("const (") g.P("collverInvalid collver = 0") for n, version := range versions { @@ -150,7 +154,7 @@ func makeversions(output string) { // all MySQL versions, but this is implemented as a method on `collver` so when // MySQL maps utf8 to utfmb4, we can perform the mapping only for the specific // MySQL version onwards. - g.P("func (v collver) charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}") + g.P("func charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}") g.P() g.P("var globalVersionInfo = map[ID]struct{alias []collalias; isdefault collver}{") @@ -164,14 +168,14 @@ func makeversions(output string) { for _, vi := range sorted { var reverse []alias for a, m := range vi.alias { - reverse = append(reverse, alias{m, a}) + reverse = append(reverse, alias{mask: m, name: a}) } sort.Slice(reverse, func(i, j int) bool { return reverse[i].name < reverse[j].name }) fmt.Fprintf(g, "%d: {alias: []collalias{", vi.id) for _, a := range reverse { - fmt.Fprintf(g, "{0b%08b, %q},", a.mask, a.name) + fmt.Fprintf(g, "{0b%08b, %q, %q},", a.mask, a.name, charsets[a.name]) } fmt.Fprintf(g, "}, isdefault: 0b%08b},\n", vi.isdefault) } diff --git a/go/mysql/collations/tools/maketestdata/maketestdata.go b/go/mysql/collations/tools/maketestdata/maketestdata.go index e8cb0daee5d..67d5a4739f6 100644 --- a/go/mysql/collations/tools/maketestdata/maketestdata.go +++ b/go/mysql/collations/tools/maketestdata/maketestdata.go @@ -30,6 +30,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" @@ -166,17 +168,17 @@ func main() { flag.Parse(fs) var defaults = collations.Local() - var collationsForLanguage = make(map[testutil.Lang][]collations.Collation) - var allcollations = defaults.AllCollations() + var collationsForLanguage = make(map[testutil.Lang][]collations.ID) + var allcollations = colldata.All(defaults) for lang := range testutil.KnownLanguages { for _, coll := range allcollations { if lang.MatchesCollation(coll.Name()) { - collationsForLanguage[lang] = append(collationsForLanguage[lang], coll) + collationsForLanguage[lang] = append(collationsForLanguage[lang], coll.ID()) } } } - var rootCollations = []collations.Collation{ + var rootCollations = []collations.ID{ defaults.LookupByName("utf8mb4_0900_as_cs"), defaults.LookupByName("utf8mb4_0900_as_ci"), defaults.LookupByName("utf8mb4_0900_ai_ci"), @@ -211,21 +213,22 @@ func main() { var total int var collationNames []string - var interestingCollations []collations.Collation + var interestingCollations []collations.ID interestingCollations = append(interestingCollations, rootCollations...) interestingCollations = append(interestingCollations, collationsForLanguage[lang]...) for _, collation := range interestingCollations { - transcoded, err := charset.ConvertFromUTF8(nil, collation.Charset(), []byte(snippet)) + transcoded, err := charset.ConvertFromUTF8(nil, colldata.Lookup(collation).Charset(), []byte(snippet)) if err != nil { - log.Printf("[%s] skip collation %s", lang, collation.Name()) + log.Printf("[%s] skip collation %s", lang, defaults.LookupName(collation)) continue } - weights := colldump(collation.Name(), transcoded) - gcase.Weights[collation.Name()] = weights + colName := defaults.LookupName(collation) + weights := colldump(colName, transcoded) + gcase.Weights[colName] = weights total += len(weights) - collationNames = append(collationNames, collation.Name()) + collationNames = append(collationNames, colName) } log.Printf("[%s] written samples for %d collations (%.02fkb): %s", diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 74c70da7728..6f3643ebc7f 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -18,6 +18,7 @@ package mysql import ( "bufio" + "context" "crypto/tls" "crypto/x509" "errors" @@ -29,11 +30,10 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/bucketpool" "vitess.io/vitess/go/mysql/collations" - + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" - - "vitess.io/vitess/go/bucketpool" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" @@ -199,6 +199,19 @@ type Conn struct { // enableQueryInfo controls whether we parse the INFO field in QUERY_OK packets // See: ConnParams.EnableQueryInfo enableQueryInfo bool + + // keepAliveOn marks when keep alive is active on the connection. + // This is currently used for testing. + keepAliveOn bool + + // mu protects the fields below + mu sync.Mutex + // cancel keep the cancel function for the current executing query. + // this is used by `kill [query|connection] ID` command from other connection. + cancel context.CancelFunc + // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and + // the connection gets closed. + closing bool } // splitStatementFunciton is the function that is used to split the statement in case of a multi-statement query. @@ -246,10 +259,21 @@ func newConn(conn net.Conn) *Conn { // the server is shutting down, and has the ability to control buffer // size for reads. func newServerConn(conn net.Conn, listener *Listener) *Conn { + // Enable KeepAlive on TCP connections and change keep-alive period if provided. + enabledKeepAlive := false + if tcpConn, ok := conn.(*net.TCPConn); ok { + if err := setTcpConnProperties(tcpConn, listener.connKeepAlivePeriod); err != nil { + log.Errorf("error in setting tcp properties: %v", err) + } else { + enabledKeepAlive = true + } + } + c := &Conn{ conn: conn, listener: listener, PrepareData: make(map[uint32]*PrepareData), + keepAliveOn: enabledKeepAlive, } if listener.connReadBufferSize > 0 { @@ -267,6 +291,22 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { return c } +func setTcpConnProperties(conn *net.TCPConn, keepAlivePeriod time.Duration) error { + if err := conn.SetKeepAlive(true); err != nil { + return vterrors.Wrapf(err, "unable to enable keepalive on tcp connection") + } + + if keepAlivePeriod <= 0 { + return nil + } + + if err := conn.SetKeepAlivePeriod(keepAlivePeriod); err != nil { + return vterrors.Wrapf(err, "unable to set keepalive period on tcp connection") + } + + return nil +} + // startWriterBuffering starts using buffered writes. This should // be terminated by a call to endWriteBuffering. func (c *Conn) startWriterBuffering() { @@ -559,7 +599,7 @@ func (c *Conn) readPacket() ([]byte, error) { func (c *Conn) ReadPacket() ([]byte, error) { result, err := c.readPacket() if err != nil { - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } return result, err } @@ -683,7 +723,7 @@ func (c *Conn) writeComQuit() error { data, pos := c.startEphemeralPacketWithHeader(1) data[pos] = ComQuit if err := c.writeEphemeralPacket(); err != nil { - return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error()) + return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error()) } return nil } @@ -738,7 +778,7 @@ func (c *Conn) writeOKPacketWithEOFHeader(packetOk *PacketOK) error { return c.writeOKPacketWithHeader(packetOk, EOFPacket) } -// writeOKPacketWithEOFHeader writes an OK packet with an EOF header. +// writeOKPacketWithHeader writes an OK packet with an EOF header. // This is used at the end of a result set if // CapabilityClientDeprecateEOF is set. // Server -> Client. @@ -767,7 +807,7 @@ func (c *Conn) writeOKPacketWithHeader(packetOk *PacketOK, headerType byte) erro bytes, pos := c.startEphemeralPacketWithHeader(length) data := &coder{data: bytes, pos: pos} - data.writeByte(headerType) //header - OK or EOF + data.writeByte(headerType) // header - OK or EOF data.writeLenEncInt(packetOk.affectedRows) data.writeLenEncInt(packetOk.lastInsertID) data.writeUint16(packetOk.statusFlags) @@ -817,10 +857,10 @@ func getLenEncInt(i uint64) []byte { } func (c *Conn) WriteErrorAndLog(format string, args ...interface{}) bool { - return c.writeErrorAndLog(ERUnknownComError, SSNetError, format, args...) + return c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, format, args...) } -func (c *Conn) writeErrorAndLog(errorCode ErrorCode, sqlState string, format string, args ...any) bool { +func (c *Conn) writeErrorAndLog(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) bool { if err := c.writeErrorPacket(errorCode, sqlState, format, args...); err != nil { log.Errorf("Error writing error to %s: %v", c, err) return false @@ -840,7 +880,7 @@ func (c *Conn) writeErrorPacketFromErrorAndLog(err error) bool { // writeErrorPacket writes an error packet. // Server -> Client. // This method returns a generic error, not a SQLError. -func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format string, args ...any) error { +func (c *Conn) writeErrorPacket(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) error { errorMessage := fmt.Sprintf(format, args...) length := 1 + 2 + 1 + 5 + len(errorMessage) data, pos := c.startEphemeralPacketWithHeader(length) @@ -848,7 +888,7 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str pos = writeUint16(data, pos, uint16(errorCode)) pos = writeByte(data, pos, '#') if sqlState == "" { - sqlState = SSUnknownSQLState + sqlState = sqlerror.SSUnknownSQLState } if len(sqlState) != 5 { panic("sqlState has to be 5 characters long") @@ -862,11 +902,11 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str // writeErrorPacketFromError writes an error packet, from a regular error. // See writeErrorPacket for other info. func (c *Conn) writeErrorPacketFromError(err error) error { - if se, ok := err.(*SQLError); ok { + if se, ok := err.(*sqlerror.SQLError); ok { return c.writeErrorPacket(se.Num, se.State, "%v", se.Message) } - return c.writeErrorPacket(ERUnknownError, SSUnknownSQLState, "unknown error: %v", err) + return c.writeErrorPacket(sqlerror.ERUnknownError, sqlerror.SSUnknownSQLState, "unknown error: %v", err) } // writeEOFPacket writes an EOF packet, through the buffer, and @@ -896,6 +936,10 @@ func (c *Conn) handleNextCommand(handler Handler) bool { if len(data) == 0 { return false } + // before continue to process the packet, check if the connection should be closed or not. + if c.IsMarkedForClose() { + return false + } switch data[0] { case ComQuit: @@ -931,7 +975,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { return true case ComFieldList: c.recycleReadPacket() - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]) { return false } case ComBinlogDump: @@ -943,7 +987,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { default: log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) c.recycleReadPacket() - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]) { return false } } @@ -1034,7 +1078,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { c.recycleReadPacket() if !ok { log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1042,7 +1086,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { prepare, ok := c.PrepareData[stmtID] if !ok { log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(CRCommandsOutOfSync, SSNetError, "commands were executed in an improper order: %v", data) { + if !c.writeErrorAndLog(sqlerror.CRCommandsOutOfSync, sqlerror.SSNetError, "commands were executed in an improper order: %v", data) { return false } } @@ -1152,7 +1196,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool if !fieldSent { // This is just a failsafe. Should never happen. if err == nil || err == io.EOF { - err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) } if !c.writeErrorPacketFromErrorAndLog(err) { return false @@ -1226,8 +1270,8 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { paramsCount := uint16(0) _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { switch node := node.(type) { - case sqlparser.Argument: - if strings.HasPrefix(string(node), "v") { + case *sqlparser.Argument: + if strings.HasPrefix(node.Name, "v") { paramsCount++ } } @@ -1272,7 +1316,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { c.Capabilities &^= CapabilityClientMultiStatements default: log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1282,7 +1326,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { } } else { log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) { return false } } @@ -1293,7 +1337,7 @@ func (c *Conn) handleComPing() bool { c.recycleReadPacket() // Return error if listener was shut down and OK otherwise if c.listener.shutdown.Load() { - if !c.writeErrorAndLog(ERServerShutdown, SSNetError, "Server shutdown in progress") { + if !c.writeErrorAndLog(sqlerror.ERServerShutdown, sqlerror.SSNetError, "Server shutdown in progress") { return false } } else { @@ -1305,7 +1349,7 @@ func (c *Conn) handleComPing() bool { return true } -var errEmptyStatement = NewSQLError(EREmptyQuery, SSClientError, "Query was empty") +var errEmptyStatement = sqlerror.NewSQLError(sqlerror.EREmptyQuery, sqlerror.SSClientError, "Query was empty") func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { c.startWriterBuffering() @@ -1400,7 +1444,7 @@ func (c *Conn) execQuery(query string, handler Handler, more bool) execResult { if !callbackCalled { // This is just a failsafe. Should never happen. if err == nil || err == io.EOF { - err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) + err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error")) } if !c.writeErrorPacketFromErrorAndLog(err) { return connErr @@ -1591,7 +1635,7 @@ func ParseErrorPacket(data []byte) error { // Error code is 2 bytes. code, pos, ok := readUint16(data, pos) if !ok { - return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet code: %v", data) + return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet code: %v", data) } // '#' marker of the SQL state is 1 byte. Ignored. @@ -1600,13 +1644,13 @@ func ParseErrorPacket(data []byte) error { // SQL state is 5 bytes sqlState, pos, ok := readBytes(data, pos, 5) if !ok { - return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet sqlState: %v", data) + return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet sqlState: %v", data) } // Human readable error message is the rest. msg := string(data[pos:]) - return NewSQLError(ErrorCode(code), string(sqlState), "%v", msg) + return sqlerror.NewSQLError(sqlerror.ErrorCode(code), string(sqlState), "%v", msg) } // GetTLSClientCerts gets TLS certificates. @@ -1632,3 +1676,38 @@ func (c *Conn) IsUnixSocket() bool { func (c *Conn) GetRawConn() net.Conn { return c.conn } + +// CancelCtx aborts an existing running query +func (c *Conn) CancelCtx() { + c.mu.Lock() + defer c.mu.Unlock() + if c.cancel != nil { + c.cancel() + } +} + +// UpdateCancelCtx updates the cancel function on the connection. +func (c *Conn) UpdateCancelCtx(cancel context.CancelFunc) { + c.mu.Lock() + defer c.mu.Unlock() + c.cancel = cancel +} + +// MarkForClose marks the connection for close. +func (c *Conn) MarkForClose() { + c.mu.Lock() + defer c.mu.Unlock() + c.closing = true +} + +// IsMarkedForClose return true if the connection should be closed. +func (c *Conn) IsMarkedForClose() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.closing +} + +// GetTestConn returns a conn for testing purpose only. +func GetTestConn() *Conn { + return newConn(testConn{}) +} diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go new file mode 100644 index 00000000000..72d944c2f3b --- /dev/null +++ b/go/mysql/conn_fake.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "fmt" + "net" + "time" +) + +// testConn to be used for testing only as net.Conn interface implementation. +type testConn struct { + writeToPass []bool + pos int + queryPacket []byte +} + +func (t testConn) Read(b []byte) (n int, err error) { + copy(b, t.queryPacket) + return len(b), nil +} + +func (t testConn) Write(b []byte) (n int, err error) { + t.pos = t.pos + 1 + if t.writeToPass[t.pos] { + return 0, nil + } + return 0, fmt.Errorf("error in writing to connection") +} + +func (t testConn) Close() error { + return nil +} + +func (t testConn) LocalAddr() net.Addr { + panic("implement me") +} + +func (t testConn) RemoteAddr() net.Addr { + return mockAddress{s: "a"} +} + +func (t testConn) SetDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t testConn) SetReadDeadline(t1 time.Time) error { + panic("implement me") +} + +func (t testConn) SetWriteDeadline(t1 time.Time) error { + panic("implement me") +} + +var _ net.Conn = (*testConn)(nil) + +type mockAddress struct { + s string +} + +func (m mockAddress) Network() string { + return m.s +} + +func (m mockAddress) String() string { + return m.s +} + +var _ net.Addr = (*mockAddress)(nil) diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go index e73e566dad3..9df52a47589 100644 --- a/go/mysql/conn_flaky_test.go +++ b/go/mysql/conn_flaky_test.go @@ -31,6 +31,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" @@ -304,7 +306,7 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(78, packetOk.warnings) // Write error packet, read it, compare. - err = sConn.writeErrorPacket(ERAccessDeniedError, SSAccessDeniedError, "access denied: %v", "reason") + err = sConn.writeErrorPacket(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: %v", "reason") require.NoError(err) data, err = cConn.ReadPacket() require.NoError(err) @@ -312,10 +314,10 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(data[0], ErrPacket, "ErrPacket") err = ParseErrorPacket(data) - utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied: reason"), "") + utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: reason"), "") // Write error packet from error, read it, compare. - err = sConn.writeErrorPacketFromError(NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied")) + err = sConn.writeErrorPacketFromError(sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied")) require.NoError(err) data, err = cConn.ReadPacket() @@ -324,7 +326,7 @@ func TestBasicPackets(t *testing.T) { assert.EqualValues(data[0], ErrPacket, "ErrPacket") err = ParseErrorPacket(data) - utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied"), "") + utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied"), "") // Write EOF packet, read it, compare first byte. Payload is always ignored. err = sConn.writeEOFPacket(0x8912, 0xabba) @@ -840,9 +842,9 @@ func TestMultiStatement(t *testing.T) { // this handler will return results according to the query. In case the query contains "error" it will return an error // panic if the query contains "panic" and it will return selectRowsResult in case of any other query - handler := &testRun{t: t, err: NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")} + handler := &testRun{t: t, err: sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number")} res := sConn.handleNextCommand(handler) - //The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open + // The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open require.True(t, res, "we should not break the connection in case of no errors") // Read the result of the query and assert that it is indeed what we want. This will contain the result of the first query. data, more, _, err := cConn.ReadQueryResult(100, true) @@ -992,67 +994,6 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) { require.False(t, res, "we should beak the connection in case of error writing error packet") } -var _ Handler = (*testRun)(nil) - -type testConn struct { - writeToPass []bool - pos int - queryPacket []byte -} - -func (t testConn) Read(b []byte) (n int, err error) { - copy(b, t.queryPacket) - return len(b), nil -} - -func (t testConn) Write(b []byte) (n int, err error) { - t.pos = t.pos + 1 - if t.writeToPass[t.pos] { - return 0, nil - } - return 0, fmt.Errorf("error in writing to connection") -} - -func (t testConn) Close() error { - panic("implement me") -} - -func (t testConn) LocalAddr() net.Addr { - panic("implement me") -} - -func (t testConn) RemoteAddr() net.Addr { - return mockAddress{s: "a"} -} - -func (t testConn) SetDeadline(t1 time.Time) error { - panic("implement me") -} - -func (t testConn) SetReadDeadline(t1 time.Time) error { - panic("implement me") -} - -func (t testConn) SetWriteDeadline(t1 time.Time) error { - panic("implement me") -} - -var _ net.Conn = (*testConn)(nil) - -type mockAddress struct { - s string -} - -func (m mockAddress) Network() string { - return m.s -} - -func (m mockAddress) String() string { - return m.s -} - -var _ net.Addr = (*mockAddress)(nil) - var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") func randSeq(n int) string { @@ -1169,7 +1110,7 @@ func (t testRun) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error panic("implement me") } -func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error { +func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { panic("implement me") } diff --git a/go/mysql/constants.go b/go/mysql/constants.go index 2a1ecaabbd3..194ed568b39 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -17,12 +17,7 @@ limitations under the License. package mysql import ( - "strconv" - "strings" - - "golang.org/x/text/encoding" - "golang.org/x/text/encoding/charmap" - "golang.org/x/text/encoding/simplifiedchinese" + "vitess.io/vitess/go/mysql/binlog" ) const ( @@ -33,6 +28,9 @@ const ( // protocolVersion is the current version of the protocol. // Always 10. protocolVersion = 10 + + // https://dev.mysql.com/doc/refman/en/identifier-length.html + MaxIdentifierLength = 64 ) // AuthMethodDescription is the type for different supported and @@ -276,514 +274,10 @@ const ( AuthSwitchRequestPacket = 0xfe ) -// Error codes for client-side errors. -// Originally found in include/mysql/errmsg.h and -// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html -const ( - // CRUnknownError is CR_UNKNOWN_ERROR - CRUnknownError = ErrorCode(2000) - - // CRConnectionError is CR_CONNECTION_ERROR - // This is returned if a connection via a Unix socket fails. - CRConnectionError = ErrorCode(2002) - - // CRConnHostError is CR_CONN_HOST_ERROR - // This is returned if a connection via a TCP socket fails. - CRConnHostError = ErrorCode(2003) - - // CRUnknownHost is CR_UNKNOWN_HOST - // This is returned if the host name cannot be resolved. - CRUnknownHost = ErrorCode(2005) - - // CRServerGone is CR_SERVER_GONE_ERROR. - // This is returned if the client tries to send a command but it fails. - CRServerGone = ErrorCode(2006) - - // CRVersionError is CR_VERSION_ERROR - // This is returned if the server versions don't match what we support. - CRVersionError = ErrorCode(2007) - - // CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR - CRServerHandshakeErr = ErrorCode(2012) - - // CRServerLost is CR_SERVER_LOST. - // Used when: - // - the client cannot write an initial auth packet. - // - the client cannot read an initial auth packet. - // - the client cannot read a response from the server. - // This happens when a running query is killed. - CRServerLost = ErrorCode(2013) - - // CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC - // Sent when the streaming calls are not done in the right order. - CRCommandsOutOfSync = ErrorCode(2014) - - // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR. - // This is the highest possible number for a connection error. - CRNamedPipeStateError = ErrorCode(2018) - - // CRCantReadCharset is CR_CANT_READ_CHARSET - CRCantReadCharset = ErrorCode(2019) - - // CRSSLConnectionError is CR_SSL_CONNECTION_ERROR - CRSSLConnectionError = ErrorCode(2026) - - // CRMalformedPacket is CR_MALFORMED_PACKET - CRMalformedPacket = ErrorCode(2027) -) - -type ErrorCode uint16 - -func (e ErrorCode) ToString() string { - return strconv.FormatUint(uint64(e), 10) -} - -// Error codes for server-side errors. -// Originally found in include/mysql/mysqld_error.h and -// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html -// The below are in sorted order by value, grouped by vterror code they should be bucketed into. -// See above reference for more information on each code. -const ( - // Vitess specific errors, (100-999) - ERNotReplica = ErrorCode(100) - - // unknown - ERUnknownError = ErrorCode(1105) - - // internal - ERInternalError = ErrorCode(1815) - - // unimplemented - ERNotSupportedYet = ErrorCode(1235) - ERUnsupportedPS = ErrorCode(1295) - - // resource exhausted - ERDiskFull = ErrorCode(1021) - EROutOfMemory = ErrorCode(1037) - EROutOfSortMemory = ErrorCode(1038) - ERConCount = ErrorCode(1040) - EROutOfResources = ErrorCode(1041) - ERRecordFileFull = ErrorCode(1114) - ERHostIsBlocked = ErrorCode(1129) - ERCantCreateThread = ErrorCode(1135) - ERTooManyDelayedThreads = ErrorCode(1151) - ERNetPacketTooLarge = ErrorCode(1153) - ERTooManyUserConnections = ErrorCode(1203) - ERLockTableFull = ErrorCode(1206) - ERUserLimitReached = ErrorCode(1226) - - // deadline exceeded - ERLockWaitTimeout = ErrorCode(1205) - - // unavailable - ERServerShutdown = ErrorCode(1053) - - // not found - ERDbDropExists = ErrorCode(1008) - ERCantFindFile = ErrorCode(1017) - ERFormNotFound = ErrorCode(1029) - ERKeyNotFound = ErrorCode(1032) - ERBadFieldError = ErrorCode(1054) - ERNoSuchThread = ErrorCode(1094) - ERUnknownTable = ErrorCode(1109) - ERCantFindUDF = ErrorCode(1122) - ERNonExistingGrant = ErrorCode(1141) - ERNoSuchTable = ErrorCode(1146) - ERNonExistingTableGrant = ErrorCode(1147) - ERKeyDoesNotExist = ErrorCode(1176) - - // permissions - ERDBAccessDenied = ErrorCode(1044) - ERAccessDeniedError = ErrorCode(1045) - ERKillDenied = ErrorCode(1095) - ERNoPermissionToCreateUsers = ErrorCode(1211) - ERSpecifiedAccessDenied = ErrorCode(1227) - - // failed precondition - ERNoDb = ErrorCode(1046) - ERNoSuchIndex = ErrorCode(1082) - ERCantDropFieldOrKey = ErrorCode(1091) - ERTableNotLockedForWrite = ErrorCode(1099) - ERTableNotLocked = ErrorCode(1100) - ERTooBigSelect = ErrorCode(1104) - ERNotAllowedCommand = ErrorCode(1148) - ERTooLongString = ErrorCode(1162) - ERDelayedInsertTableLocked = ErrorCode(1165) - ERDupUnique = ErrorCode(1169) - ERRequiresPrimaryKey = ErrorCode(1173) - ERCantDoThisDuringAnTransaction = ErrorCode(1179) - ERReadOnlyTransaction = ErrorCode(1207) - ERCannotAddForeign = ErrorCode(1215) - ERNoReferencedRow = ErrorCode(1216) - ERRowIsReferenced = ErrorCode(1217) - ERCantUpdateWithReadLock = ErrorCode(1223) - ERNoDefault = ErrorCode(1230) - ERMasterFatalReadingBinlog = ErrorCode(1236) - EROperandColumns = ErrorCode(1241) - ERSubqueryNo1Row = ErrorCode(1242) - ERWarnDataOutOfRange = ErrorCode(1264) - ERNonUpdateableTable = ErrorCode(1288) - ERFeatureDisabled = ErrorCode(1289) - EROptionPreventsStatement = ErrorCode(1290) - ERDuplicatedValueInType = ErrorCode(1291) - ERSPDoesNotExist = ErrorCode(1305) - ERNoDefaultForField = ErrorCode(1364) - ErSPNotVarArg = ErrorCode(1414) - ERRowIsReferenced2 = ErrorCode(1451) - ErNoReferencedRow2 = ErrorCode(1452) - ERDupIndex = ErrorCode(1831) - ERInnodbReadOnly = ErrorCode(1874) - - // already exists - ERDbCreateExists = ErrorCode(1007) - ERTableExists = ErrorCode(1050) - ERDupEntry = ErrorCode(1062) - ERFileExists = ErrorCode(1086) - ERUDFExists = ErrorCode(1125) - - // aborted - ERGotSignal = ErrorCode(1078) - ERForcingClose = ErrorCode(1080) - ERAbortingConnection = ErrorCode(1152) - ERLockDeadlock = ErrorCode(1213) - - // invalid arg - ERUnknownComError = ErrorCode(1047) - ERBadNullError = ErrorCode(1048) - ERBadDb = ErrorCode(1049) - ERBadTable = ErrorCode(1051) - ERNonUniq = ErrorCode(1052) - ERWrongFieldWithGroup = ErrorCode(1055) - ERWrongGroupField = ErrorCode(1056) - ERWrongSumSelect = ErrorCode(1057) - ERWrongValueCount = ErrorCode(1058) - ERTooLongIdent = ErrorCode(1059) - ERDupFieldName = ErrorCode(1060) - ERDupKeyName = ErrorCode(1061) - ERWrongFieldSpec = ErrorCode(1063) - ERParseError = ErrorCode(1064) - EREmptyQuery = ErrorCode(1065) - ERNonUniqTable = ErrorCode(1066) - ERInvalidDefault = ErrorCode(1067) - ERMultiplePriKey = ErrorCode(1068) - ERTooManyKeys = ErrorCode(1069) - ERTooManyKeyParts = ErrorCode(1070) - ERTooLongKey = ErrorCode(1071) - ERKeyColumnDoesNotExist = ErrorCode(1072) - ERBlobUsedAsKey = ErrorCode(1073) - ERTooBigFieldLength = ErrorCode(1074) - ERWrongAutoKey = ErrorCode(1075) - ERWrongFieldTerminators = ErrorCode(1083) - ERBlobsAndNoTerminated = ErrorCode(1084) - ERTextFileNotReadable = ErrorCode(1085) - ERWrongSubKey = ErrorCode(1089) - ERCantRemoveAllFields = ErrorCode(1090) - ERUpdateTableUsed = ErrorCode(1093) - ERNoTablesUsed = ErrorCode(1096) - ERTooBigSet = ErrorCode(1097) - ERBlobCantHaveDefault = ErrorCode(1101) - ERWrongDbName = ErrorCode(1102) - ERWrongTableName = ErrorCode(1103) - ERUnknownProcedure = ErrorCode(1106) - ERWrongParamCountToProcedure = ErrorCode(1107) - ERWrongParametersToProcedure = ErrorCode(1108) - ERFieldSpecifiedTwice = ErrorCode(1110) - ERInvalidGroupFuncUse = ErrorCode(1111) - ERTableMustHaveColumns = ErrorCode(1113) - ERUnknownCharacterSet = ErrorCode(1115) - ERTooManyTables = ErrorCode(1116) - ERTooManyFields = ErrorCode(1117) - ERTooBigRowSize = ErrorCode(1118) - ERWrongOuterJoin = ErrorCode(1120) - ERNullColumnInIndex = ErrorCode(1121) - ERFunctionNotDefined = ErrorCode(1128) - ERWrongValueCountOnRow = ErrorCode(1136) - ERInvalidUseOfNull = ErrorCode(1138) - ERRegexpError = ErrorCode(1139) - ERMixOfGroupFuncAndFields = ErrorCode(1140) - ERIllegalGrantForTable = ErrorCode(1144) - ERSyntaxError = ErrorCode(1149) - ERWrongColumnName = ErrorCode(1166) - ERWrongKeyColumn = ErrorCode(1167) - ERBlobKeyWithoutLength = ErrorCode(1170) - ERPrimaryCantHaveNull = ErrorCode(1171) - ERTooManyRows = ErrorCode(1172) - ERLockOrActiveTransaction = ErrorCode(1192) - ERUnknownSystemVariable = ErrorCode(1193) - ERSetConstantsOnly = ErrorCode(1204) - ERWrongArguments = ErrorCode(1210) - ERWrongUsage = ErrorCode(1221) - ERWrongNumberOfColumnsInSelect = ErrorCode(1222) - ERDupArgument = ErrorCode(1225) - ERLocalVariable = ErrorCode(1228) - ERGlobalVariable = ErrorCode(1229) - ERWrongValueForVar = ErrorCode(1231) - ERWrongTypeForVar = ErrorCode(1232) - ERVarCantBeRead = ErrorCode(1233) - ERCantUseOptionHere = ErrorCode(1234) - ERIncorrectGlobalLocalVar = ErrorCode(1238) - ERWrongFKDef = ErrorCode(1239) - ERKeyRefDoNotMatchTableRef = ErrorCode(1240) - ERCyclicReference = ErrorCode(1245) - ERIllegalReference = ErrorCode(1247) - ERDerivedMustHaveAlias = ErrorCode(1248) - ERTableNameNotAllowedHere = ErrorCode(1250) - ERCollationCharsetMismatch = ErrorCode(1253) - ERWarnDataTruncated = ErrorCode(1265) - ERCantAggregate2Collations = ErrorCode(1267) - ERCantAggregate3Collations = ErrorCode(1270) - ERCantAggregateNCollations = ErrorCode(1271) - ERVariableIsNotStruct = ErrorCode(1272) - ERUnknownCollation = ErrorCode(1273) - ERWrongNameForIndex = ErrorCode(1280) - ERWrongNameForCatalog = ErrorCode(1281) - ERBadFTColumn = ErrorCode(1283) - ERTruncatedWrongValue = ErrorCode(1292) - ERTooMuchAutoTimestampCols = ErrorCode(1293) - ERInvalidOnUpdate = ErrorCode(1294) - ERUnknownTimeZone = ErrorCode(1298) - ERInvalidCharacterString = ErrorCode(1300) - ERQueryInterrupted = ErrorCode(1317) - ERTruncatedWrongValueForField = ErrorCode(1366) - ERIllegalValueForType = ErrorCode(1367) - ERDataTooLong = ErrorCode(1406) - ErrWrongValueForType = ErrorCode(1411) - ERForbidSchemaChange = ErrorCode(1450) - ERWrongValue = ErrorCode(1525) - ERDataOutOfRange = ErrorCode(1690) - ERInvalidJSONText = ErrorCode(3140) - ERInvalidJSONTextInParams = ErrorCode(3141) - ERInvalidJSONBinaryData = ErrorCode(3142) - ERInvalidJSONCharset = ErrorCode(3144) - ERInvalidCastToJSON = ErrorCode(3147) - ERJSONValueTooBig = ErrorCode(3150) - ERJSONDocumentTooDeep = ErrorCode(3157) - - // max execution time exceeded - ERQueryTimeout = ErrorCode(3024) - - ErrCantCreateGeometryObject = ErrorCode(1416) - ErrGISDataWrongEndianess = ErrorCode(3055) - ErrNotImplementedForCartesianSRS = ErrorCode(3704) - ErrNotImplementedForProjectedSRS = ErrorCode(3705) - ErrNonPositiveRadius = ErrorCode(3706) - - // server not available - ERServerIsntAvailable = ErrorCode(3168) -) - -// Sql states for errors. -// Originally found in include/mysql/sql_state.h -const ( - // SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in - // include/mysql/sql_state.h, but: - // const char *unknown_sqlstate= "HY000" - // in client.c. So using that one. - SSUnknownSQLState = "HY000" - - // SSNetError is network related error - SSNetError = "08S01" - - // SSWrongNumberOfColumns is related to columns error - SSWrongNumberOfColumns = "21000" - - // SSWrongValueCountOnRow is related to columns count mismatch error - SSWrongValueCountOnRow = "21S01" - - // SSDataTooLong is ER_DATA_TOO_LONG - SSDataTooLong = "22001" - - // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE - SSDataOutOfRange = "22003" - - // SSConstraintViolation is constraint violation - SSConstraintViolation = "23000" - - // SSCantDoThisDuringAnTransaction is - // ER_CANT_DO_THIS_DURING_AN_TRANSACTION - SSCantDoThisDuringAnTransaction = "25000" - - // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR - SSAccessDeniedError = "28000" - - // SSNoDB is ER_NO_DB_ERROR - SSNoDB = "3D000" - - // SSLockDeadlock is ER_LOCK_DEADLOCK - SSLockDeadlock = "40001" - - // SSClientError is the state on client errors - SSClientError = "42000" - - // SSDupFieldName is ER_DUP_FIELD_NAME - SSDupFieldName = "42S21" - - // SSBadFieldError is ER_BAD_FIELD_ERROR - SSBadFieldError = "42S22" - - // SSUnknownTable is ER_UNKNOWN_TABLE - SSUnknownTable = "42S02" - - // SSQueryInterrupted is ER_QUERY_INTERRUPTED; - SSQueryInterrupted = "70100" -) - -// CharacterSetEncoding maps a charset name to a golang encoder. -// golang does not support encoders for all MySQL charsets. -// A charset not in this map is unsupported. -// A trivial encoding (e.g. utf8) has a `nil` encoder -var CharacterSetEncoding = map[string]encoding.Encoding{ - "cp850": charmap.CodePage850, - "koi8r": charmap.KOI8R, - "latin1": charmap.Windows1252, - "latin2": charmap.ISO8859_2, - "ascii": nil, - "hebrew": charmap.ISO8859_8, - "greek": charmap.ISO8859_7, - "cp1250": charmap.Windows1250, - "gbk": simplifiedchinese.GBK, - "latin5": charmap.ISO8859_9, - "utf8": nil, - "utf8mb3": nil, - "cp866": charmap.CodePage866, - "cp852": charmap.CodePage852, - "latin7": charmap.ISO8859_13, - "utf8mb4": nil, - "cp1251": charmap.Windows1251, - "cp1256": charmap.Windows1256, - "cp1257": charmap.Windows1257, - "binary": nil, -} - // IsNum returns true if a MySQL type is a numeric value. // It is the same as IS_NUM defined in mysql.h. func IsNum(typ uint8) bool { - return (typ <= TypeInt24 && typ != TypeTimestamp) || - typ == TypeYear || - typ == TypeNewDecimal -} - -// IsConnErr returns true if the error is a connection error. -func IsConnErr(err error) bool { - if IsTooManyConnectionsErr(err) { - return false - } - if sqlErr, ok := err.(*SQLError); ok { - num := sqlErr.Number() - return (num >= CRUnknownError && num <= CRNamedPipeStateError) || num == ERQueryInterrupted - } - return false -} - -// IsConnLostDuringQuery returns true if the error is a CRServerLost error. -// Happens most commonly when a query is killed MySQL server-side. -func IsConnLostDuringQuery(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - num := sqlErr.Number() - return (num == CRServerLost) - } - return false -} - -// IsEphemeralError returns true if the error is ephemeral and the caller should -// retry if possible. Note: non-SQL errors are always treated as ephemeral. -func IsEphemeralError(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - en := sqlErr.Number() - switch en { - case - CRConnectionError, - CRConnHostError, - CRMalformedPacket, - CRNamedPipeStateError, - CRServerHandshakeErr, - CRServerGone, - CRServerLost, - CRSSLConnectionError, - CRUnknownError, - CRUnknownHost, - ERCantCreateThread, - ERDiskFull, - ERForcingClose, - ERGotSignal, - ERHostIsBlocked, - ERLockTableFull, - ERInnodbReadOnly, - ERInternalError, - ERLockDeadlock, - ERLockWaitTimeout, - ERQueryTimeout, - EROutOfMemory, - EROutOfResources, - EROutOfSortMemory, - ERQueryInterrupted, - ERServerIsntAvailable, - ERServerShutdown, - ERTooManyUserConnections, - ERUnknownError, - ERUserLimitReached: - return true - default: - return false - } - } - // If it's not an sqlError then we assume it's ephemeral - return true -} - -// IsTooManyConnectionsErr returns true if the error is due to too many connections. -func IsTooManyConnectionsErr(err error) bool { - if sqlErr, ok := err.(*SQLError); ok { - if sqlErr.Number() == CRServerHandshakeErr && strings.Contains(sqlErr.Message, "Too many connections") { - return true - } - } - return false -} - -// IsSchemaApplyError returns true when given error is a MySQL error applying schema change -func IsSchemaApplyError(err error) bool { - merr, isSQLErr := err.(*SQLError) - if !isSQLErr { - return false - } - switch merr.Num { - case - ERDupKeyName, - ERCantDropFieldOrKey, - ERTableExists, - ERDupFieldName: - return true - } - return false -} - -type ReplicationState int32 - -const ( - ReplicationStateUnknown ReplicationState = iota - ReplicationStateStopped - ReplicationStateConnecting - ReplicationStateRunning -) - -// ReplicationStatusToState converts a value you have for the IO thread(s) or SQL -// thread(s) or Group Replication applier thread(s) from MySQL or intermediate -// layers to a mysql.ReplicationState. -// on,yes,true == ReplicationStateRunning -// off,no,false == ReplicationStateStopped -// connecting == ReplicationStateConnecting -// anything else == ReplicationStateUnknown -func ReplicationStatusToState(s string) ReplicationState { - // Group Replication uses ON instead of Yes - switch strings.ToLower(s) { - case "yes", "on", "true": - return ReplicationStateRunning - case "no", "off", "false": - return ReplicationStateStopped - case "connecting": - return ReplicationStateConnecting - default: - return ReplicationStateUnknown - } + return (typ <= binlog.TypeInt24 && typ != binlog.TypeTimestamp) || + typ == binlog.TypeYear || + typ == binlog.TypeNewDecimal } diff --git a/go/mysql/constants_test.go b/go/mysql/constants_test.go index 34d8c09ca54..1a54aad4c02 100644 --- a/go/mysql/constants_test.go +++ b/go/mysql/constants_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/sqlerror" ) func TestIsConnErr(t *testing.T) { @@ -31,23 +33,23 @@ func TestIsConnErr(t *testing.T) { in: errors.New("t"), want: false, }, { - in: NewSQLError(5, "", ""), + in: sqlerror.NewSQLError(5, "", ""), want: false, }, { - in: NewSQLError(CRServerGone, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""), want: true, }, { - in: NewSQLError(CRServerLost, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""), want: true, }, { - in: NewSQLError(ERQueryInterrupted, "", ""), + in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""), want: true, }, { - in: NewSQLError(CRCantReadCharset, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""), want: false, }} for _, tcase := range testcases { - got := IsConnErr(tcase.in) + got := sqlerror.IsConnErr(tcase.in) assert.Equal(t, tcase.want, got, "IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want) } @@ -61,23 +63,23 @@ func TestIsConnLostDuringQuery(t *testing.T) { in: errors.New("t"), want: false, }, { - in: NewSQLError(5, "", ""), + in: sqlerror.NewSQLError(5, "", ""), want: false, }, { - in: NewSQLError(CRServerGone, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""), want: false, }, { - in: NewSQLError(CRServerLost, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""), want: true, }, { - in: NewSQLError(ERQueryInterrupted, "", ""), + in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""), want: false, }, { - in: NewSQLError(CRCantReadCharset, "", ""), + in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""), want: false, }} for _, tcase := range testcases { - got := IsConnLostDuringQuery(tcase.in) + got := sqlerror.IsConnLostDuringQuery(tcase.in) assert.Equal(t, tcase.want, got, "IsConnLostDuringQuery(%#v): %v, want %v", tcase.in, got, tcase.want) } diff --git a/go/mysql/datetime/LICENSE b/go/mysql/datetime/LICENSE new file mode 100644 index 00000000000..eed693818e7 --- /dev/null +++ b/go/mysql/datetime/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/go/mysql/datetime/datetime.go b/go/mysql/datetime/datetime.go new file mode 100644 index 00000000000..162ac970e67 --- /dev/null +++ b/go/mysql/datetime/datetime.go @@ -0,0 +1,684 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "encoding/binary" + "time" + + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/vt/vthash" +) + +const negMask = uint16(1 << 15) + +type Time struct { + hour uint16 + minute uint8 + second uint8 + nanosecond uint32 +} + +type Date struct { + year uint16 + month uint8 + day uint8 +} + +type DateTime struct { + Date Date + Time Time +} + +const DefaultPrecision = 6 + +func (t Time) AppendFormat(b []byte, prec uint8) []byte { + if t.Neg() { + b = append(b, '-') + } + + b = appendInt(b, t.Hour(), 2) + b = append(b, ':') + b = appendInt(b, t.Minute(), 2) + b = append(b, ':') + b = appendInt(b, t.Second(), 2) + if prec > 0 { + b = append(b, '.') + b = appendNsec(b, t.Nanosecond(), int(prec)) + } + return b +} + +func (t Time) Format(prec uint8) []byte { + return t.AppendFormat(make([]byte, 0, 16), prec) +} + +func (t Time) FormatInt64() (n int64) { + tr := t.Round(0) + v := int64(tr.Hour())*10000 + int64(tr.Minute())*100 + int64(tr.Second()) + if t.Neg() { + return -v + } + return v +} + +func (t Time) FormatFloat64() (n float64) { + v := float64(t.Hour())*10000 + float64(t.Minute())*100 + float64(t.Second()) + float64(t.Nanosecond())/1e9 + if t.Neg() { + return -v + } + return v +} + +func (t Time) FormatDecimal() decimal.Decimal { + v := int64(t.Hour())*10000 + int64(t.Minute())*100 + int64(t.Second()) + dec := decimal.NewFromInt(v) + dec = dec.Add(decimal.New(int64(t.Nanosecond()), -9)) + if t.Neg() { + dec = dec.Neg() + } + return dec +} + +func (t Time) ToDateTime() (out DateTime) { + return NewDateTimeFromStd(t.ToStdTime(time.Local)) +} + +func (t Time) IsZero() bool { + return t.Hour() == 0 && t.Minute() == 0 && t.Second() == 0 && t.Nanosecond() == 0 +} + +// RoundForJSON rounds the time to the nearest 32nd hour. This is some really +// weird behavior that MySQL does when it casts a JSON time back to a MySQL +// TIME value. We just mimic the behavior here. +func (t Time) RoundForJSON() Time { + if t.Hour() < 32 { + return t + } + res := t + res.hour = uint16(t.Hour() % 32) + if t.Neg() { + res.hour |= negMask + } + return res +} + +func (t Time) Hour() int { + return int(t.hour & ^negMask) +} + +func (t Time) Minute() int { + return int(t.minute) +} + +func (t Time) Second() int { + return int(t.second) +} + +func (t Time) Nanosecond() int { + return int(t.nanosecond) +} + +func (t Time) Neg() bool { + return t.hour&negMask != 0 +} + +func (t Time) Hash(h *vthash.Hasher) { + h.Write16(t.hour) + h.Write8(t.minute) + h.Write8(t.second) + h.Write32(t.nanosecond) +} + +func (t Time) Compare(t2 Time) int { + if t.Neg() != t2.Neg() { + if t.Neg() { + return -1 + } + return 1 + } + // Need to swap if both are negative. + if t.Neg() { + t, t2 = t2, t + } + + h1, h2 := t.Hour(), t2.Hour() + if h1 < h2 { + return -1 + } + if h1 > h2 { + return 1 + } + m1, m2 := t.Minute(), t2.Minute() + if m1 < m2 { + return -1 + } + if m1 > m2 { + return 1 + } + s1, s2 := t.Second(), t2.Second() + if s1 < s2 { + return -1 + } + if s1 > s2 { + return 1 + } + ns1, ns2 := t.Nanosecond(), t2.Nanosecond() + if ns1 < ns2 { + return -1 + } + if ns1 > ns2 { + return 1 + } + return 0 +} + +var precs = []int{1e9, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + +func (t Time) Round(p int) (r Time) { + if t.nanosecond == 0 { + return t + } + + n := int(t.nanosecond) + prec := precs[p] + s := (n / prec) * prec + l := s + prec + + if n-s >= l-n { + n = l + } else { + n = s + } + + r = t + if n == 1e9 { + r.second++ + n = 0 + if r.second == 60 { + r.minute++ + r.second = 0 + if r.minute == 60 { + r.hour++ + r.minute = 0 + } + } + } + r.nanosecond = uint32(n) + return r +} + +func (d Date) IsZero() bool { + return d.Year() == 0 && d.Month() == 0 && d.Day() == 0 +} + +func (d Date) Year() int { + return int(d.year) +} + +func (d Date) Month() int { + return int(d.month) +} + +func (d Date) Day() int { + return int(d.day) +} + +func (d Date) Hash(h *vthash.Hasher) { + h.Write16(d.year) + h.Write8(d.month) + h.Write8(d.day) +} + +func (dt Date) Weekday() time.Weekday { + return dt.ToStdTime(time.Local).Weekday() +} + +func (dt Date) Yearday() int { + return dt.ToStdTime(time.Local).YearDay() +} + +func (d Date) ISOWeek() (int, int) { + return d.ToStdTime(time.Local).ISOWeek() +} + +// SundayWeek returns the year and week number of the current +// date, when week numbers are defined by starting on the first +// Sunday of the year. +func (d Date) SundayWeek() (int, int) { + t := d.ToStdTime(time.Local) + // Since the week numbers always start on a Sunday, we can look + // at the week number of Sunday itself. So we shift back to last + // Sunday we saw and compute the week number based on that. + sun := t.AddDate(0, 0, -int(t.Weekday())) + return sun.Year(), (sun.YearDay()-1)/7 + 1 +} + +// MondayWeek returns the year and week number of the current +// date, when week numbers are defined by starting on the first +// Monday of the year. +func (d Date) MondayWeek() (int, int) { + t := d.ToStdTime(time.Local) + // Since the week numbers always start on a Monday, we can look + // at the week number of Monday itself. So we shift back to last + // Monday we saw and compute the week number based on that. + wd := (t.Weekday() + 6) % 7 + mon := t.AddDate(0, 0, -int(wd)) + return mon.Year(), (mon.YearDay()-1)/7 + 1 +} + +// Sunday4DayWeek returns the year and week number of the current +// date, when week numbers are defined by starting on the Sunday +// where week 1 is defined as having at least 4 days in the new +// year. +func (d Date) Sunday4DayWeek() (int, int) { + t := d.ToStdTime(time.Local) + + // In this format, the first Wednesday of the year is always + // in the first week. So we can look at the week number of + // Wednesday in the same week. On days before Wednesday, we need + // to move the time forward to Wednesday, on days after we need to + // move it back to Wednesday. + var wed time.Time + + switch wd := t.Weekday(); { + case wd == 3: + wed = t + case wd < 3: + wed = t.AddDate(0, 0, int(3-t.Weekday())) + case wd > 3: + wed = t.AddDate(0, 0, -int(t.Weekday()-3)) + } + + return wed.Year(), (wed.YearDay()-1)/7 + 1 +} + +const DefaultWeekMode = 0 + +func (d Date) Week(mode int) int { + switch mode & 7 { + case 0: + year, week := d.SundayWeek() + if year < d.Year() { + return 0 + } + return week + case 1: + year, week := d.ISOWeek() + if year < d.Year() { + return 0 + } + return week + case 2: + _, week := d.SundayWeek() + return week + case 3: + _, week := d.ISOWeek() + return week + case 4: + year, week := d.Sunday4DayWeek() + if year < d.Year() { + return 0 + } + return week + case 5: + year, week := d.MondayWeek() + if year < d.Year() { + return 0 + } + return week + case 6: + _, week := d.Sunday4DayWeek() + return week + case 7: + _, week := d.MondayWeek() + return week + default: + return d.Week(DefaultWeekMode) + } +} + +func (d Date) YearWeek(mode int) int { + switch mode { + case 0, 2: + year, week := d.SundayWeek() + return year*100 + week + case 1, 3: + year, week := d.ISOWeek() + return year*100 + week + case 4, 5, 6, 7: + // TODO + return 0 + default: + return d.YearWeek(DefaultWeekMode) + } +} + +func (d Date) Quarter() int { + switch d.Month() { + case 0: + return 0 + case 1, 2, 3: + return 1 + case 4, 5, 6: + return 2 + case 7, 8, 9: + return 3 + case 10, 11, 12: + return 4 + default: + panic("unreachable") + } +} + +func (dt DateTime) IsZero() bool { + return dt.Date.IsZero() && dt.Time.IsZero() +} + +func (dt DateTime) Hash(h *vthash.Hasher) { + dt.Date.Hash(h) + dt.Time.Hash(h) +} + +func (t Time) ToDuration() time.Duration { + duration := time.Duration(t.Hour())*time.Hour + + time.Duration(t.Minute())*time.Minute + + time.Duration(t.Second())*time.Second + + time.Duration(t.Nanosecond())*time.Nanosecond + if t.Neg() { + return -duration + } + return duration +} + +func (t Time) toStdTime(year int, month time.Month, day int, loc *time.Location) (out time.Time) { + return time.Date(year, month, day, 0, 0, 0, 0, loc).Add(t.ToDuration()) +} + +func (t Time) ToStdTime(loc *time.Location) (out time.Time) { + year, month, day := time.Now().Date() + return t.toStdTime(year, month, day, loc) +} + +func (t Time) AddInterval(itv *Interval, stradd bool) (Time, uint8, bool) { + dt := DateTime{Time: t} + ok := dt.addInterval(itv) + return dt.Time, itv.precision(stradd), ok +} + +func (t Time) toSeconds() int { + tsecs := t.Hour()*secondsPerHour + t.Minute()*secondsPerMinute + t.Second() + if t.Neg() { + return -tsecs + } + return tsecs +} + +func (d Date) ToStdTime(loc *time.Location) (out time.Time) { + return time.Date(d.Year(), time.Month(d.Month()), d.Day(), 0, 0, 0, 0, loc) +} + +func (dt DateTime) ToStdTime(loc *time.Location) time.Time { + zerodate := dt.Date.IsZero() + zerotime := dt.Time.IsZero() + + switch { + case zerodate && zerotime: + return time.Time{} + case zerodate: + return dt.Time.ToStdTime(loc) + case zerotime: + return dt.Date.ToStdTime(loc) + default: + year, month, day := dt.Date.Year(), time.Month(dt.Date.Month()), dt.Date.Day() + return dt.Time.toStdTime(year, month, day, loc) + } +} + +func (dt DateTime) Format(prec uint8) []byte { + return DateTime_YYYY_MM_DD_hh_mm_ss.Format(dt, prec) +} + +func (d Date) Format() []byte { + return Date_YYYY_MM_DD.Format(DateTime{Date: d}, 0) +} + +func (d Date) FormatInt64() int64 { + return int64(d.Year())*10000 + int64(d.Month())*100 + int64(d.Day()) +} + +func (d Date) Compare(d2 Date) int { + y1, y2 := d.Year(), d2.Year() + if y1 < y2 { + return -1 + } + if y1 > y2 { + return 1 + } + m1, m2 := d.Month(), d2.Month() + if m1 < m2 { + return -1 + } + if m1 > m2 { + return 1 + } + day1, day2 := d.Day(), d2.Day() + if day1 < day2 { + return -1 + } + if day1 > day2 { + return 1 + } + return 0 +} + +func (d Date) AddInterval(itv *Interval) (Date, bool) { + dt := DateTime{Date: d} + ok := dt.addInterval(itv) + return dt.Date, ok +} + +func (dt DateTime) FormatInt64() int64 { + d := dt.Round(0) + return d.Date.FormatInt64()*1000000 + d.Time.FormatInt64() +} + +func (dt DateTime) FormatFloat64() float64 { + return float64(dt.Date.FormatInt64()*1000000) + dt.Time.FormatFloat64() +} + +func (dt DateTime) FormatDecimal() decimal.Decimal { + return decimal.New(dt.Date.FormatInt64(), 6).Add(dt.Time.FormatDecimal()) +} + +func (dt DateTime) Compare(dt2 DateTime) int { + zerodate1, zerodate2 := dt.Date.IsZero(), dt2.Date.IsZero() + + switch { + case zerodate1 && zerodate2: + return dt.Time.Compare(dt2.Time) + case zerodate1 || zerodate2: + // if we're comparing a time to a datetime, we need to normalize them + // both into datetimes; this normalization is not trivial because negative + // times result in a date change, so let the standard library handle this + return dt.ToStdTime(time.Local).Compare(dt2.ToStdTime(time.Local)) + } + if cmp := dt.Date.Compare(dt2.Date); cmp != 0 { + return cmp + } + return dt.Time.Compare(dt2.Time) +} + +func (dt DateTime) AddInterval(itv *Interval, stradd bool) (DateTime, uint8, bool) { + ok := dt.addInterval(itv) + return dt, itv.precision(stradd), ok +} + +func (dt DateTime) Round(p int) (r DateTime) { + if dt.Time.nanosecond == 0 { + return dt + } + + n := dt.Time.Nanosecond() + prec := precs[p] + s := (n / prec) * prec + l := s + prec + + if n-s >= l-n { + n = l + } else { + n = s + } + + r = dt + if n == 1e9 { + r.Time.nanosecond = 0 + return NewDateTimeFromStd(r.ToStdTime(time.Local).Add(time.Second)) + } + r.Time.nanosecond = uint32(n) + return r +} + +func (dt DateTime) toSeconds() int { + return (dt.Date.Day()-1)*secondsPerDay + dt.Time.toSeconds() +} + +func (dt *DateTime) addInterval(itv *Interval) bool { + switch { + case itv.unit.HasTimeParts(): + if !itv.inRange() { + return false + } + + nsec := dt.Time.Nanosecond() + itv.nsec + sec := dt.toSeconds() + itv.toSeconds() + (nsec / int(time.Second)) + nsec = nsec % int(time.Second) + + if nsec < 0 { + nsec += int(time.Second) + sec-- + } + + days := sec / secondsPerDay + sec -= days * secondsPerDay + + if sec < 0 { + sec += secondsPerDay + days-- + } + + dt.Time.nanosecond = uint32(nsec) + dt.Time.second = uint8(sec % secondsPerMinute) + dt.Time.minute = uint8((sec / secondsPerMinute) % secondsPerMinute) + dt.Time.hour = uint16(sec / secondsPerHour) + + daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), 1) + days + if daynum < 0 || daynum > maxDay { + return false + } + + dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum) + return true + + case itv.unit.HasDayParts(): + daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day()) + daynum += itv.day + dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum) + return true + + case itv.unit.HasMonthParts(): + months := dt.Date.Year()*12 + itv.year*12 + (dt.Date.Month() - 1) + itv.month + if months < 0 || months >= 120000 { + return false + } + + year := months / 12 + month := (months % 12) + 1 + + dt.Date.year = uint16(year) + dt.Date.month = uint8(month) + + // MySQL quirk: if the original date was in a day that the new month + // doesn't have, the date is offset backwards to the last day of + // the new month. This is the opposite to normal date handling where + // we'd offset days into the next month. + if dim := daysIn(time.Month(month), year); dt.Date.Day() > dim { + dt.Date.day = uint8(dim) + } + return true + + case itv.unit == IntervalYear: + if itv.year > 10000 { + return false + } + + year := dt.Date.Year() + itv.year + dt.Date.year = uint16(year) + + // MySQL quirk: if the original date was Feb 29th on a leap year, and + // the resulting year is not a leap year, the date is offset backwards. + // This is the opposite to what normal date handling does. + if dt.Date.Month() == 2 && dt.Date.Day() == 29 && !isLeap(year) { + dt.Date.day = 28 + } + return true + + default: + panic("unexpected IntervalType") + } +} + +func (dt DateTime) WeightString(dst []byte) []byte { + // This logic does the inverse of what we do in the binlog parser for the datetime2 type. + year, month, day := dt.Date.Year(), dt.Date.Month(), dt.Date.Day() + ymd := uint64(year*13+month)<<5 | uint64(day) + hms := uint64(dt.Time.Hour())<<12 | uint64(dt.Time.Minute())<<6 | uint64(dt.Time.Second()) + raw := (ymd<<17|hms)<<24 + uint64(dt.Time.Nanosecond()/1000) + if dt.Time.Neg() { + raw = -raw + } + + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw) +} + +func NewDateFromStd(t time.Time) Date { + year, month, day := t.Date() + return Date{ + year: uint16(year), + month: uint8(month), + day: uint8(day), + } +} + +func NewTimeFromStd(t time.Time) Time { + hour, min, sec := t.Clock() + nsec := t.Nanosecond() + return Time{ + hour: uint16(hour), + minute: uint8(min), + second: uint8(sec), + nanosecond: uint32(nsec), + } +} + +func NewDateTimeFromStd(t time.Time) DateTime { + return DateTime{ + Date: NewDateFromStd(t), + Time: NewTimeFromStd(t), + } +} diff --git a/go/mysql/datetime/format_test.go b/go/mysql/datetime/format_test.go new file mode 100644 index 00000000000..b0db59e49f9 --- /dev/null +++ b/go/mysql/datetime/format_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFormattingFromMySQL(t *testing.T) { + const FormatString = `%a %b %c %D %d %e %f %H %h %I %i %j %k %l %M %m %p %r %S %s %T %U %u %V %v %W %w %X %x %Y %y %%` + + var cases = []struct { + timestamp string + output string + }{ + { + `1999-12-31 23:59:58.999`, + `Fri Dec 12 31st 31 31 999000 23 11 11 59 365 23 11 December 12 PM 11:59:58 PM 58 58 23:59:58 52 52 52 52 Friday 5 1999 1999 1999 99 %`, + }, + { + `2000-01-02 03:04:05`, + `Sun Jan 1 2nd 02 2 000000 03 03 03 04 002 3 3 January 01 AM 03:04:05 AM 05 05 03:04:05 01 00 01 52 Sunday 0 2000 1999 2000 00 %`, + }, + { + `2001-01-01 01:04:05`, + `Mon Jan 1 1st 01 1 000000 01 01 01 04 001 1 1 January 01 AM 01:04:05 AM 05 05 01:04:05 00 01 53 01 Monday 1 2000 2001 2001 01 %`, + }, + } + + for _, tc := range cases { + t.Run(tc.timestamp, func(t *testing.T) { + dt, _, ok := ParseDateTime(tc.timestamp, -1) + require.True(t, ok) + + eval, err := Format(FormatString, dt, 6) + require.NoError(t, err) + + require.Equal(t, tc.output, string(eval)) + }) + } +} diff --git a/go/mysql/datetime/formats.go b/go/mysql/datetime/formats.go new file mode 100644 index 00000000000..adec39919ae --- /dev/null +++ b/go/mysql/datetime/formats.go @@ -0,0 +1,197 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +var DefaultMySQLStrftime = map[byte]Spec{ + 'a': fmtWeekdayNameShort{}, + 'b': fmtMonthNameShort{}, + 'c': fmtMonth{false}, + 'D': fmtMonthDaySuffix{}, + 'd': fmtDay{true}, + 'e': fmtDay{false}, + 'f': fmtMicroseconds{}, + 'H': fmtHour24{true}, + 'h': fmtHour12{true}, + 'I': fmtHour12{true}, + 'i': fmtMin{true}, + 'j': fmtZeroYearDay{}, + 'k': fmtHour24{false}, + 'l': fmtHour12{false}, + 'M': fmtMonthName{}, + 'm': fmtMonth{true}, + 'p': fmtAMorPM{}, + 'r': fmtFullTime12{}, + 'S': fmtSecond{true, false}, + 's': fmtSecond{true, false}, + 'T': fmtFullTime24{}, + 'U': fmtWeek0{}, + 'u': fmtWeek1{}, + 'V': fmtWeek2{}, + 'v': fmtWeek3{}, + 'W': fmtWeekdayName{}, + 'w': fmtWeekday{}, + 'X': fmtYearForWeek2{}, + 'x': fmtYearForWeek3{}, + 'Y': fmtYearLong{}, + 'y': fmtYearShort{}, +} + +var Date_YYYY_MM_DD = &Strftime{ + pattern: "YYYY-MM-DD", + compiled: []Spec{ + fmtYearLong{}, + fmtSeparator('-'), + fmtMonth{true}, + fmtSeparator('-'), + fmtDay{true}, + }, +} + +var Date_YYYY_M_D = &Strftime{ + pattern: "YYYY-M-D", + compiled: []Spec{ + fmtYearLong{}, + fmtSeparator('-'), + fmtMonth{false}, + fmtSeparator('-'), + fmtDay{false}, + }, +} + +var Date_YY_M_D = &Strftime{ + pattern: "YY-M-D", + compiled: []Spec{ + fmtYearShort{}, + fmtSeparator('-'), + fmtMonth{false}, + fmtSeparator('-'), + fmtDay{false}, + }, +} + +var Date_YYYYMMDD = &Strftime{ + pattern: "YYYYMMDD", + compiled: []Spec{ + fmtYearLong{}, + fmtMonth{true}, + fmtDay{true}, + }, +} + +var Date_YYMMDD = &Strftime{ + pattern: "YYMMDD", + compiled: []Spec{ + fmtYearShort{}, + fmtMonth{true}, + fmtDay{true}, + }, +} + +var DateTime_YYYY_MM_DD_hh_mm_ss = &Strftime{ + pattern: "YYYY-MM-DD hh:mm:ss", + compiled: []Spec{ + fmtYearLong{}, + fmtSeparator('-'), + fmtMonth{true}, + fmtSeparator('-'), + fmtDay{true}, + fmtTimeSeparator{}, + fmtHour24{true}, + fmtSeparator(':'), + fmtMin{true}, + fmtSeparator(':'), + fmtSecond{true, true}, + }, +} + +var DateTime_YYYY_M_D_h_m_s = &Strftime{ + pattern: "YYYY-M-D h:m:s", + compiled: []Spec{ + fmtYearLong{}, + fmtSeparator('-'), + fmtMonth{false}, + fmtSeparator('-'), + fmtDay{false}, + fmtTimeSeparator{}, + fmtHour24{false}, + fmtSeparator(':'), + fmtMin{false}, + fmtSeparator(':'), + fmtSecond{false, true}, + }, +} + +var DateTime_YY_M_D_h_m_s = &Strftime{ + pattern: "YY-M-D h:m:s", + compiled: []Spec{ + fmtYearShort{}, + fmtSeparator('-'), + fmtMonth{false}, + fmtSeparator('-'), + fmtDay{false}, + fmtTimeSeparator{}, + fmtHour24{false}, + fmtSeparator(':'), + fmtMin{false}, + fmtSeparator(':'), + fmtSecond{false, true}, + }, +} + +var DateTime_YYYYMMDDhhmmss = &Strftime{ + pattern: "YYYYMMDDhhmmss", + compiled: []Spec{ + fmtYearLong{}, + fmtMonth{true}, + fmtDay{true}, + fmtHour24{true}, + fmtMin{true}, + fmtSecond{true, true}, + }, +} + +var DateTime_YYMMDDhhmmss = &Strftime{ + pattern: "YYMMDDhhmmss", + compiled: []Spec{ + fmtYearShort{}, + fmtMonth{true}, + fmtDay{}, + fmtHour24{true}, + fmtMin{true}, + fmtSecond{true, true}, + }, +} + +var Time_hh_mm_ss = &Strftime{ + pattern: "hh:mm:ss", + compiled: []Spec{ + fmtHour24{true}, + fmtSeparator(':'), + fmtMin{true}, + fmtSeparator(':'), + fmtSecond{true, true}, + }, +} + +var Time_hhmmss = &Strftime{ + pattern: "hhmmss", + compiled: []Spec{ + fmtHour24{true}, + fmtMin{true}, + fmtSecond{true, true}, + }, +} diff --git a/go/mysql/datetime/helpers.go b/go/mysql/datetime/helpers.go new file mode 100644 index 00000000000..33d673782fc --- /dev/null +++ b/go/mysql/datetime/helpers.go @@ -0,0 +1,291 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "time" +) + +// appendInt appends the decimal form of x to b and returns the result. +// If the decimal form (excluding sign) is shorter than width, the result is padded with leading 0's. +// Duplicates functionality in strconv, but avoids dependency. +func appendInt(b []byte, x int, width int) []byte { + u := uint(x) + if x < 0 { + b = append(b, '-') + u = uint(-x) + } + + // 2-digit and 4-digit fields are the most common in time formats. + utod := func(u uint) byte { return '0' + byte(u) } + switch { + case width == 2 && u < 1e2: + return append(b, utod(u/1e1), utod(u%1e1)) + case width == 4 && u < 1e4: + return append(b, utod(u/1e3), utod(u/1e2%1e1), utod(u/1e1%1e1), utod(u%1e1)) + } + + // Compute the number of decimal digits. + var n int + if u == 0 { + n = 1 + } + for u2 := u; u2 > 0; u2 /= 10 { + n++ + } + + // Add 0-padding. + for pad := width - n; pad > 0; pad-- { + b = append(b, '0') + } + + // Ensure capacity. + if len(b)+n <= cap(b) { + b = b[:len(b)+n] + } else { + b = append(b, make([]byte, n)...) + } + + // Assemble decimal in reverse order. + i := len(b) - 1 + for u >= 10 && i > 0 { + q := u / 10 + b[i] = utod(u - q*10) + u = q + i-- + } + b[i] = utod(u) + return b +} + +// match reports whether s1 and s2 match ignoring case. +// It is assumed s1 and s2 are the same length. +func match(s1, s2 string) bool { + for i := 0; i < len(s1); i++ { + c1 := s1[i] + c2 := s2[i] + if c1 != c2 { + // Switch to lower-case; 'a'-'A' is known to be a single bit. + c1 |= 'a' - 'A' + c2 |= 'a' - 'A' + if c1 != c2 || c1 < 'a' || c1 > 'z' { + return false + } + } + } + return true +} + +func lookup(tab []string, val string) (int, string, bool) { + for i, v := range tab { + if len(val) >= len(v) && match(val[0:len(v)], v) { + return i, val[len(v):], true + } + } + return -1, val, false +} + +func leadingInt[bytes []byte | string](s bytes) (x uint64, rem bytes, ok bool) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x > 1<<63/10 { + // overflow + return 0, rem, false + } + x = x*10 + uint64(c) - '0' + if x > 1<<63 { + // overflow + return 0, rem, false + } + } + return x, s[i:], true +} + +func atoi[bytes []byte | string](s bytes) (x int, ok bool) { + neg := false + if len(s) > 0 && (s[0] == '-' || s[0] == '+') { + neg = s[0] == '-' + s = s[1:] + } + q, rem, ok := leadingInt(s) + x = int(q) + if !ok || len(rem) > 0 { + return 0, false + } + if neg { + x = -x + } + return x, true +} + +// isDigit reports whether s[i] is in range and is a decimal digit. +func isDigit[bytes []byte | string](s bytes, i int) bool { + if len(s) <= i { + return false + } + c := s[i] + return '0' <= c && c <= '9' +} + +// isNumber reports whether s is an integer or decimal number. +// Returns the length of the integral part of the number if it's +// a valid number. +// It accepts trailing garbage but only after a dot. +func isNumber[bytes []byte | string](s bytes) (int, bool) { + var dot bool + pos := -1 + for i := 0; i < len(s); i++ { + if !isDigit(s, i) { + if dot { + return i, true + } + if s[i] == '.' { + dot = true + continue + } + return 0, false + } + if !dot { + pos = i + 1 + } + } + return pos, true +} + +// getnum parses s[0:1] or s[0:2] (fixed forces s[0:2]) +// as a decimal integer and returns the integer and the +// remainder of the string. +func getnum(s string, fixed bool) (int, string, bool) { + if !isDigit(s, 0) { + return 0, s, false + } + if !isDigit(s, 1) { + if fixed { + return 0, s, false + } + return int(s[0] - '0'), s[1:], true + } + return int(s[0]-'0')*10 + int(s[1]-'0'), s[2:], true +} + +func getnuml(s string, l int) (int, string, bool) { + var res int + for i := 0; i < l; i++ { + if !isDigit(s, i) { + return 0, s, false + } + res = res*10 + int(s[i]-'0') + } + return res, s[l:], true +} + +func getnumn(s string) (int, string, bool) { + if len(s) == 0 || !('0' <= s[0] && s[0] <= '9') { + return 0, s, false + } + + var n int + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + n = n*10 + int(s[0]-'0') + s = s[1:] + } + return n, s, true +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +var daysInMonth = [...]int{ + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, +} + +var daysInMonthLeap = [...]int{ + 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, +} + +func daysIn(m time.Month, year int) int { + if m == time.February && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} + +func daysInYear(year int) int { + if isLeap(year) { + return 366 + } + return 365 +} + +func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l int, ok bool) { + if value[0] != '.' { + return 0, 0, false + } + if nbytes > 10 { + value = value[:10] + nbytes = 10 + } + if ns, ok = atoi(value[1:nbytes]); !ok { + return 0, 0, false + } + if ns < 0 { + return 0, 0, false + } + // We need nanoseconds, which means scaling by the number + // of missing digits in the format, maximum length 10. + scaleDigits := 10 - nbytes + for i := 0; i < scaleDigits; i++ { + ns *= 10 + } + + l = nbytes - 1 + if l > 6 { + l = 6 + } + + return +} + +const ( + secondsPerMinute = 60 + secondsPerHour = 60 * secondsPerMinute + secondsPerDay = 24 * secondsPerHour +) diff --git a/go/mysql/datetime/interval.go b/go/mysql/datetime/interval.go new file mode 100644 index 00000000000..21395f2174d --- /dev/null +++ b/go/mysql/datetime/interval.go @@ -0,0 +1,425 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "math" + "math/bits" + "strconv" + "strings" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/mysql/decimal" +) + +// IntervalType represents the temporal elements contained in an Interval. +// Intervals in MySQL can contain more than one temporal element. We define their types as +// a bitset to let us efficiently query the temporal elements that form each interval. +// There are two kinds of IntervalTypes: unary and compound. Unary interval types contain +// a single temporal element (e.g. SECONDS, or DAYS) and hence contain only one bit set. +// Compount interval types are the logical combination of several unary interval types. +type IntervalType uint8 + +// IntervalType constants. +const ( + // Unary interval types + IntervalNone IntervalType = 0 + IntervalMicrosecond IntervalType = 1 << 0 + IntervalSecond IntervalType = 1 << 1 + IntervalMinute IntervalType = 1 << 2 + IntervalHour IntervalType = 1 << 3 + IntervalDay IntervalType = 1 << 4 + IntervalMonth IntervalType = 1 << 5 + IntervalYear IntervalType = 1 << 6 + intervalMulti IntervalType = 1 << 7 + + // IntervalWeek and IntervalQuarter are an exception for unary interval types, + // which are not unique temporal elements but instead a modifier on a unary element + // - WEEK is just a count of DAYS multiplied by 7 + // - QUARTER is just a count of MONTHS multiplied by 3 + IntervalWeek = IntervalDay | intervalMulti + IntervalQuarter = IntervalMonth | intervalMulti + + // Compound interval types + IntervalSecondMicrosecond = IntervalSecond | IntervalMicrosecond + IntervalMinuteMicrosecond = IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalMinuteSecond = IntervalMinute | IntervalSecond + IntervalHourMicrosecond = IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalHourSecond = IntervalHour | IntervalMinute | IntervalSecond + IntervalHourMinute = IntervalHour | IntervalMinute + IntervalDayMicrosecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond + IntervalDaySecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond + IntervalDayMinute = IntervalDay | IntervalHour | IntervalMinute + IntervalDayHour = IntervalDay | IntervalHour + IntervalYearMonth = IntervalYear | IntervalMonth +) + +type intervalSetter func(tp *Interval, val int) + +var intervalSet = [...]intervalSetter{ + intervalSetMicrosecond, + intervalSetSecond, + intervalSetMinute, + intervalSetHour, + intervalSetDay, + intervalSetMonth, + intervalSetYear, +} + +// setter returns the setter method for this interval's type. +// If this is a unary interval, it'll return the setter for the interval's unary type. +// If this is a compound interval, it'll return the setter for the smallest unary type +// in the interval. +func (itv IntervalType) setter() intervalSetter { + // find the lowest bit set in the interval, this is the smallest unary type + unary := itv & -itv + + // map from an unary interval type to its offset by counting the trailing + // zeroes. e.g. for HOUR(1 << 3), this will return 3, which the position + // for the HOUR setter in intervalSet + return intervalSet[bits.TrailingZeros8(uint8(unary))] +} + +func (itv IntervalType) PartCount() int { + return bits.OnesCount8(uint8(itv & ^intervalMulti)) +} + +func (itv IntervalType) HasTimeParts() bool { + return itv&(IntervalHour|IntervalMinute|IntervalSecond|IntervalMicrosecond) != 0 +} + +func (itv IntervalType) HasDateParts() bool { + return itv&(IntervalYear|IntervalMonth|IntervalDay) != 0 +} + +func (itv IntervalType) HasDayParts() bool { + return (itv & IntervalDay) != 0 +} + +func (itv IntervalType) HasMonthParts() bool { + return (itv & IntervalMonth) != 0 +} + +func (itv IntervalType) NeedsPrecision() bool { + return itv&IntervalMicrosecond != 0 +} + +// ToString returns the type as a string +func (itv IntervalType) ToString() string { + switch itv { + case IntervalYear: + return "year" + case IntervalQuarter: + return "quarter" + case IntervalMonth: + return "month" + case IntervalWeek: + return "week" + case IntervalDay: + return "day" + case IntervalHour: + return "hour" + case IntervalMinute: + return "minute" + case IntervalSecond: + return "second" + case IntervalMicrosecond: + return "microsecond" + case IntervalYearMonth: + return "year_month" + case IntervalDayHour: + return "day_hour" + case IntervalDayMinute: + return "day_minute" + case IntervalDaySecond: + return "day_second" + case IntervalHourMinute: + return "hour_minute" + case IntervalHourSecond: + return "hour_second" + case IntervalMinuteSecond: + return "minute_second" + case IntervalDayMicrosecond: + return "day_microsecond" + case IntervalHourMicrosecond: + return "hour_microsecond" + case IntervalMinuteMicrosecond: + return "minute_microsecond" + case IntervalSecondMicrosecond: + return "second_microsecond" + default: + return "[unknown IntervalType]" + } +} + +func intervalSetYear(tp *Interval, val int) { + tp.year = val +} + +func intervalSetMonth(tp *Interval, val int) { + // if the intervalMulti flag is set, this interval expects QUARTERS instead of months + if tp.unit&intervalMulti != 0 { + val = val * 3 + } + tp.month = val +} + +func intervalSetDay(tp *Interval, val int) { + // if the intervalMulti flag is set, this interval expects WEEKS instead of days + if tp.unit&intervalMulti != 0 { + val = val * 7 + } + tp.day = val +} + +func intervalSetHour(tp *Interval, val int) { + tp.hour = val +} + +func intervalSetMinute(tp *Interval, val int) { + tp.min = val +} + +func intervalSetSecond(tp *Interval, val int) { + tp.sec = val +} + +func intervalSetMicrosecond(tp *Interval, val int) { + // if we are setting the Microseconds in this interval, but the + // interval's type isn't explicitly MICROSECOND (i.e. it's an interval + // with several values besides MICROSECOND), the value being passed + // here won't be a fixed number of microseconds, but a fractional part. + // We need to scale it into microseconds. + // E.g. when parsing a SECOND:MICROSECOND value of '1.5', the input + // to this setter will be 5, but the interval doesn't contain 5 microseconds, + // it contains 500000. We perform the scaling into 6 digits using base10 log. + if tp.unit != IntervalMicrosecond { + digits := int(math.Log10(float64(val)) + 1) + val = val * int(math.Pow10(6-digits)) + } + // we store nsec internally, so convert from microseconds to nanoseconds + tp.nsec = val * 1000 +} + +// parseIntervalFields parses a internal string into separate numeric fields. +// The parsing is extremely lax according to MySQL. Any contiguous run of numbers +// is considered a field, and any non-numeric character is ignored. +func parseIntervalFields(itv string, negate *bool) (fields []int) { + if len(itv) > 0 && itv[0] == '-' { + *negate = !*negate + itv = itv[1:] + } + + for { + for len(itv) > 0 && !('0' <= itv[0] && itv[0] <= '9') { + itv = itv[1:] + } + if len(itv) == 0 { + break + } + + var n int + for len(itv) > 0 && '0' <= itv[0] && itv[0] <= '9' { + n = n*10 + int(itv[0]-'0') + itv = itv[1:] + } + + fields = append(fields, n) + } + return +} + +type Interval struct { + timeparts + unit IntervalType +} + +func (itv *Interval) Unit() IntervalType { + return itv.unit +} + +const maxDay = 3652424 + +func (itv *Interval) inRange() bool { + if itv.day > maxDay { + return false + } + if itv.hour > maxDay*24 { + return false + } + if itv.min > maxDay*24*60 { + return false + } + if itv.sec > maxDay*24*60*60 { + return false + } + return true +} + +// setFromFields sets the duration of interval from a slice of fields and +// the given interval type. +// This follow's MySQL's behavior: if there are fewer fields than the ones +// we'd expect to see in the interval's type, we pick the RIGHTMOST as +// the values for the interval. +// E.g. if our interval type wants HOUR:MINUTE:SECOND and we have [1, 1] +// as input fields, the resulting interval is '1min1sec' +func (itv *Interval) setFromFields(fields []int, unit IntervalType) bool { + parts := unit.PartCount() + if parts == 1 { + unit.setter()(itv, fields[0]) + return true + } + if len(fields) > 3 && parts < 4 { + return false + } + + for f, set := range intervalSet { + if len(fields) == 0 { + break + } + if unit&(1<= 3652500 { + return 0, 0, 0 + } + + year := daynr * 100 / 36525 + leapAdjust := (((year-1)/100 + 1) * 3) / 4 + yday := (daynr - year*365) - (year-1)/4 + leapAdjust + + if diy := daysInYear(year); yday > diy { + yday -= diy + year++ + } + + daycount := daysInMonth + if isLeap(year) { + daycount = daysInMonthLeap + } + for month, dim := range daycount { + if yday <= dim { + return uint16(year), uint8(month + 1), uint8(yday) + } + yday -= dim + } + + panic("unreachable: yday is too large?") +} diff --git a/go/mysql/datetime/mydate_test.go b/go/mysql/datetime/mydate_test.go new file mode 100644 index 00000000000..29ecd2df9d2 --- /dev/null +++ b/go/mysql/datetime/mydate_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "encoding/json" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDayNumber(t *testing.T) { + td, err := os.Open("testdata/year_to_daynr.json") + require.NoError(t, err) + defer td.Close() + + var expected []int + err = json.NewDecoder(td).Decode(&expected) + require.NoError(t, err) + + for year, daynr := range expected { + assert.Equal(t, daynr, mysqlDayNumber(year, 1, 1)) + } +} + +func TestDayNumberFields(t *testing.T) { + td, err := os.Open("testdata/daynr_to_date.json") + require.NoError(t, err) + defer td.Close() + + var expected [][4]int + err = json.NewDecoder(td).Decode(&expected) + require.NoError(t, err) + + for _, tc := range expected { + y, m, d := mysqlDateFromDayNumber(tc[0]) + assert.Equal(t, tc[1], int(y)) + assert.Equal(t, tc[2], int(m)) + assert.Equal(t, tc[3], int(d)) + + assert.Equalf(t, tc[0], mysqlDayNumber(tc[1], tc[2], tc[3]), "date %d-%d-%d", tc[1], tc[2], tc[3]) + } +} diff --git a/go/mysql/datetime/parse.go b/go/mysql/datetime/parse.go new file mode 100644 index 00000000000..e8f17191f4c --- /dev/null +++ b/go/mysql/datetime/parse.go @@ -0,0 +1,408 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "math" + "strings" + + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/mysql/fastparse" +) + +func parsetimeHours(tp *timeparts, in string) (out string, ok bool) { + if tp.hour, in, ok = getnumn(in); ok { + tp.day = tp.day + tp.hour/24 + tp.hour = tp.hour % 24 + + switch { + case len(in) == 0: + return "", true + case in[0] == ':': + return parsetimeMinutes(tp, in[1:]) + } + } + return "", false +} + +func parsetimeMinutes(tp *timeparts, in string) (out string, ok bool) { + if tp.min, in, ok = getnum(in, false); ok { + switch { + case tp.min > 59: + return "", false + case len(in) == 0: + return "", true + case in[0] == ':': + return parsetimeSeconds(tp, in[1:]) + } + } + return "", false +} + +func parsetimeSeconds(tp *timeparts, in string) (out string, ok bool) { + if tp.sec, in, ok = getnum(in, false); ok { + switch { + case tp.sec > 59: + return "", false + case len(in) == 0: + return "", true + case len(in) > 1 && in[0] == '.': + n := 1 + for ; n < len(in) && isDigit(in, n); n++ { + } + var l int + tp.nsec, l, ok = parseNanoseconds(in, n) + tp.prec = uint8(l) + return "", ok && len(in) == n + } + } + return "", false +} + +func parsetimeAny(tp *timeparts, in string) (out string, ok bool) { + orig := in + for i := 0; i < len(in); i++ { + switch r := in[i]; { + case isSpace(r): + tp.day, in, ok = getnum(in, false) + if !ok || !isSpace(in[0]) { + tp.day = 0 + return parsetimeNoDelimiters(tp, orig) + } + for len(in) > 0 && isSpace(in[0]) { + in = in[1:] + } + if !isDigit(in, 0) { + tp.day = 0 + return parsetimeNoDelimiters(tp, orig) + } + if tp.day > 34 { + return "", clampTimeparts(tp) + } + return parsetimeHours(tp, in) + case r == ':': + return parsetimeHours(tp, in) + } + } + return parsetimeNoDelimiters(tp, in) +} + +func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { + var integral int + for ; integral < len(in); integral++ { + if in[integral] == '.' || !isDigit(in, integral) { + break + } + } + + switch integral { + default: + // MySQL limits this to a numeric value that fits in a 32-bit unsigned integer. + i, _ := fastparse.ParseInt64(in[:integral], 10) + if i > math.MaxUint32 { + return "", false + } + if i < -math.MaxUint32 { + return "", false + } + + tp.hour, in, ok = getnuml(in, integral-4) + if !ok { + return + } + tp.day = tp.day + tp.hour/24 + tp.hour = tp.hour % 24 + integral = 4 + fallthrough + + case 3, 4: + tp.min, in, ok = getnuml(in, integral-2) + if !ok || tp.min > 59 { + return "", false + } + integral = 2 + fallthrough + + case 1, 2: + tp.sec, in, ok = getnuml(in, integral) + if !ok || tp.sec > 59 { + return "", false + } + case 0: + return "", false + } + + if len(in) > 1 && in[0] == '.' && isDigit(in, 1) { + n := 1 + for ; n < len(in) && isDigit(in, n); n++ { + } + var l int + tp.nsec, l, ok = parseNanoseconds(in, n) + tp.prec = uint8(l) + in = in[n:] + } + + return in, clampTimeparts(tp) && ok +} + +func clampTimeparts(tp *timeparts) bool { + // Maximum time is 838:59:59, so we have to clamp + // it to that value here if we otherwise successfully + // parser the time. + if tp.day > 34 || tp.day == 34 && tp.hour > 22 { + tp.day = 34 + tp.hour = 22 + tp.min = 59 + tp.sec = 59 + return false + } + return true +} + +func ParseTime(in string, prec int) (t Time, l int, ok bool) { + in = strings.Trim(in, " \t\r\n") + if len(in) == 0 { + return Time{}, 0, false + } + var neg bool + if in[0] == '-' { + neg = true + in = in[1:] + } + + var tp timeparts + in, ok = parsetimeAny(&tp, in) + ok = clampTimeparts(&tp) && ok + + hours := uint16(24*tp.day + tp.hour) + if !tp.isZero() && neg { + hours |= negMask + } + + t = Time{ + hour: hours, + minute: uint8(tp.min), + second: uint8(tp.sec), + nanosecond: uint32(tp.nsec), + } + + if prec < 0 { + prec = int(tp.prec) + } else { + t = t.Round(prec) + } + + return t, prec, ok && len(in) == 0 +} + +func ParseDate(s string) (Date, bool) { + if _, ok := isNumber(s); ok { + if len(s) >= 8 { + dt, _, ok := Date_YYYYMMDD.Parse(s, 0) + return dt.Date, ok + } + dt, _, ok := Date_YYMMDD.Parse(s, 0) + return dt.Date, ok + } + + if len(s) >= 8 { + if t, _, ok := Date_YYYY_M_D.Parse(s, 0); ok { + return t.Date, true + } + } + if len(s) >= 6 { + if t, _, ok := Date_YY_M_D.Parse(s, 0); ok { + return t.Date, true + } + } + return Date{}, false +} + +func ParseDateTime(s string, l int) (DateTime, int, bool) { + if sl, ok := isNumber(s); ok { + if sl >= 14 { + return DateTime_YYYYMMDDhhmmss.Parse(s, l) + } + return DateTime_YYMMDDhhmmss.Parse(s, l) + } + if t, l, ok := DateTime_YYYY_M_D_h_m_s.Parse(s, l); ok { + return t, l, true + } + if t, l, ok := DateTime_YY_M_D_h_m_s.Parse(s, l); ok { + return t, l, true + } + return DateTime{}, 0, false +} + +func ParseDateInt64(i int64) (d Date, ok bool) { + if i == 0 { + return d, true + } + + d.day = uint8(i % 100) + i /= 100 + if d.day == 0 || d.day > 31 { + return d, false + } + + d.month = uint8(i % 100) + i /= 100 + if d.month == 0 || d.month > 12 { + return d, false + } + + d.year = uint16(i) + if d.year == 0 { + return d, false + } + if d.year < 100 { + if d.year < 70 { + d.year += 2000 + } else { + d.year += 1900 + } + } + if d.year < 1000 || d.year > 9999 { + return d, false + } + return d, true +} + +func ParseTimeInt64(i int64) (t Time, ok bool) { + if i == 0 { + return t, true + } + neg := false + if i < 0 { + i = -i + neg = true + } + + t.second = uint8(i % 100) + i /= 100 + if t.second > 59 { + return t, false + } + + t.minute = uint8(i % 100) + i /= 100 + if t.minute > 59 { + return t, false + } + + if i > 838 { + return t, false + } + t.hour = uint16(i) + if neg { + t.hour |= negMask + } + return t, true +} + +func ParseDateTimeInt64(i int64) (dt DateTime, ok bool) { + t := i % 1000000 + d := i / 1000000 + + if i == 0 { + return dt, true + } + if d == 0 { + return dt, false + } + dt.Time, ok = ParseTimeInt64(t) + if !ok { + return dt, false + } + dt.Date, ok = ParseDateInt64(d) + return dt, ok +} + +func ParseDateTimeFloat(f float64, prec int) (DateTime, int, bool) { + i, frac := math.Modf(f) + dt, ok := ParseDateTimeInt64(int64(i)) + nsec := int(frac * 1e9) + dt.Time.nanosecond = uint32(nsec) + if prec < 0 { + prec = DefaultPrecision + } else { + dt = dt.Round(prec) + } + return dt, prec, ok +} + +func ParseDateTimeDecimal(d decimal.Decimal, l int32, prec int) (DateTime, int, bool) { + id, frac := d.QuoRem(decimal.New(1, 0), 0) + i, _ := id.Int64() + dt, ok := ParseDateTimeInt64(i) + + rem, _ := frac.Mul(decimal.New(1, 9)).Int64() + dt.Time.nanosecond = uint32(rem) + + if prec < 0 { + prec = int(l) + } else { + dt = dt.Round(prec) + } + return dt, prec, ok +} + +func ParseDateFloat(f float64) (Date, bool) { + i, _ := math.Modf(f) + return ParseDateInt64(int64(i)) +} + +func ParseDateDecimal(d decimal.Decimal) (Date, bool) { + id, _ := d.QuoRem(decimal.New(1, 0), 0) + i, _ := id.Int64() + return ParseDateInt64(i) +} + +func ParseTimeFloat(f float64, prec int) (Time, int, bool) { + i, frac := math.Modf(f) + t, ok := ParseTimeInt64(int64(i)) + ns := int(math.Abs(frac * 1e9)) + t.nanosecond = uint32(ns) + + if prec < 0 { + prec = DefaultPrecision + } else { + t = t.Round(prec) + } + return t, prec, ok +} + +func ParseTimeDecimal(d decimal.Decimal, l int32, prec int) (Time, int, bool) { + id, frac := d.QuoRem(decimal.New(1, 0), 0) + i, _ := id.Int64() + + t, ok := ParseTimeInt64(i) + rem, _ := frac.Abs().Mul(decimal.New(1e9, 0)).Int64() + t.nanosecond = uint32(rem) + + if prec < 0 { + prec = int(l) + } else { + t = t.Round(prec) + } + // We only support a maximum of nanosecond precision, + // so if the decimal has any larger precision we truncate it. + if prec > 9 { + prec = 9 + } + return t, prec, ok +} diff --git a/go/mysql/datetime/parse_test.go b/go/mysql/datetime/parse_test.go new file mode 100644 index 00000000000..6ed342edfb3 --- /dev/null +++ b/go/mysql/datetime/parse_test.go @@ -0,0 +1,344 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParseDate(t *testing.T) { + type date struct { + year int + month int + day int + } + tests := []struct { + input string + output date + err bool + }{ + {input: "0000-00-00", output: date{}}, + {input: "2022-10-12", output: date{2022, 10, 12}}, + {input: "22-10-12", output: date{2022, 10, 12}}, + {input: "20221012", output: date{2022, 10, 12}}, + {input: "221012", output: date{2022, 10, 12}}, + {input: "2012-12-31", output: date{2012, 12, 31}}, + {input: "2012-1-1", output: date{2012, 1, 1}}, + {input: "2012-12-1", output: date{2012, 12, 1}}, + {input: "2012-1-11", output: date{2012, 1, 11}}, + {input: "12-12-31", output: date{2012, 12, 31}}, + {input: "12-1-1", output: date{2012, 1, 1}}, + {input: "12-12-1", output: date{2012, 12, 1}}, + {input: "12-1-11", output: date{2012, 1, 11}}, + {input: "2012/12/31", output: date{2012, 12, 31}}, + {input: "2012^12^31", output: date{2012, 12, 31}}, + {input: "2012@12@31", output: date{2012, 12, 31}}, + {input: "20070523", output: date{2007, 5, 23}}, + {input: "20070523111111", output: date{2007, 5, 23}, err: true}, + // Go can't represent a zero month or day. + {input: "99000000", output: date{9900, 1, 1}, err: true}, + {input: "9900000", output: date{1999, 1, 1}, err: true}, + {input: "990000", output: date{1999, 1, 1}, err: true}, + {input: "070523", output: date{2007, 5, 23}}, + {input: "071332", err: true}, + {input: "2022", err: true}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + got, ok := ParseDate(test.input) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + } + assert.Falsef(t, ok, "expected '%s' to fail to parse", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + }) + } +} + +func TestParseTime(t *testing.T) { + type testTime struct { + hour int + minute int + second int + nanosecond int + negative bool + } + tests := []struct { + input string + output testTime + norm string + l int + err bool + }{ + {input: "00:00:00", norm: "00:00:00.000000", output: testTime{}}, + {input: "00:00:00foo", norm: "00:00:00.000000", output: testTime{}, err: true}, + {input: "11:12:13", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}}, + {input: "11:12:13foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, err: true}, + {input: "11:12:13.1", norm: "11:12:13.100000", output: testTime{11, 12, 13, 100000000, false}, l: 1}, + {input: "11:12:13.foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, err: true}, + {input: "11:12:13.1foo", norm: "11:12:13.100000", output: testTime{11, 12, 13, 100000000, false}, l: 1, err: true}, + {input: "11:12:13.123456", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6}, + {input: "11:12:13.000001", norm: "11:12:13.000001", output: testTime{11, 12, 13, 1000, false}, l: 6}, + {input: "11:12:13.000000", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, l: 6}, + {input: "11:12:13.123456foo", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6, err: true}, + {input: "3 11:12:13", norm: "83:12:13.000000", output: testTime{3*24 + 11, 12, 13, 0, false}}, + {input: "3 11:12:13foo", norm: "83:12:13.000000", output: testTime{3*24 + 11, 12, 13, 0, false}, err: true}, + {input: "3 41:12:13", norm: "113:12:13.000000", output: testTime{3*24 + 41, 12, 13, 0, false}}, + {input: "3 41:12:13foo", norm: "113:12:13.000000", output: testTime{3*24 + 41, 12, 13, 0, false}, err: true}, + {input: "34 23:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "35 11:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "11:12", norm: "11:12:00.000000", output: testTime{11, 12, 0, 0, false}}, + {input: "5 11:12", norm: "131:12:00.000000", output: testTime{5*24 + 11, 12, 0, 0, false}}, + {input: "-2 11:12", norm: "-59:12:00.000000", output: testTime{2*24 + 11, 12, 0, 0, true}}, + {input: "--2 11:12", norm: "00:00:00.000000", err: true}, + {input: "nonsense", norm: "00:00:00.000000", err: true}, + {input: "2 11", norm: "59:00:00.000000", output: testTime{2*24 + 11, 0, 0, 0, false}}, + {input: "2 -11", norm: "00:00:02.000000", output: testTime{0, 0, 2, 0, false}, err: true}, + {input: "13", norm: "00:00:13.000000", output: testTime{0, 0, 13, 0, false}}, + {input: "111213", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}}, + {input: "111213.123456", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6}, + {input: "-111213", norm: "-11:12:13.000000", output: testTime{11, 12, 13, 0, true}}, + {input: "1213", norm: "00:12:13.000000", output: testTime{0, 12, 13, 0, false}}, + {input: "25:12:13", norm: "25:12:13.000000", output: testTime{25, 12, 13, 0, false}}, + {input: "32:35", norm: "32:35:00.000000", output: testTime{32, 35, 0, 0, false}}, + {input: "101:34:58", norm: "101:34:58.000000", output: testTime{101, 34, 58, 0, false}}, + {input: "1", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}}, + {input: "11", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}}, + {input: "111", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}}, + {input: "1111", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}}, + {input: "11111", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}}, + {input: "111111", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}}, + {input: "1foo", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}, err: true}, + {input: "11foo", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}, err: true}, + {input: "111foo", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}, err: true}, + {input: "1111foo", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}, err: true}, + {input: "11111foo", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}, err: true}, + {input: "111111foo", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}, err: true}, + {input: "1111111foo", norm: "111:11:11.000000", output: testTime{111, 11, 11, 0, false}, err: true}, + {input: "-1", norm: "-00:00:01.000000", output: testTime{0, 0, 1, 0, true}}, + {input: "-11", norm: "-00:00:11.000000", output: testTime{0, 0, 11, 0, true}}, + {input: "-111", norm: "-00:01:11.000000", output: testTime{0, 1, 11, 0, true}}, + {input: "-1111", norm: "-00:11:11.000000", output: testTime{0, 11, 11, 0, true}}, + {input: "-11111", norm: "-01:11:11.000000", output: testTime{1, 11, 11, 0, true}}, + {input: "-111111", norm: "-11:11:11.000000", output: testTime{11, 11, 11, 0, true}}, + {input: "-1111111", norm: "-111:11:11.000000", output: testTime{111, 11, 11, 0, true}}, + {input: "0", norm: "00:00:00.000000", output: testTime{0, 0, 0, 0, false}}, + {input: "1", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}}, + {input: "11", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}}, + {input: "111", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}}, + {input: "1111", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}}, + {input: "11111", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}}, + {input: "111111", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}}, + {input: "1111111", norm: "111:11:11.000000", output: testTime{111, 11, 11, 0, false}}, + {input: "-1.1", norm: "-00:00:01.100000", output: testTime{0, 0, 1, 100000000, true}, l: 1}, + {input: "-11.1", norm: "-00:00:11.100000", output: testTime{0, 0, 11, 100000000, true}, l: 1}, + {input: "-111.1", norm: "-00:01:11.100000", output: testTime{0, 1, 11, 100000000, true}, l: 1}, + {input: "-1111.1", norm: "-00:11:11.100000", output: testTime{0, 11, 11, 100000000, true}, l: 1}, + {input: "-11111.1", norm: "-01:11:11.100000", output: testTime{1, 11, 11, 100000000, true}, l: 1}, + {input: "-111111.1", norm: "-11:11:11.100000", output: testTime{11, 11, 11, 100000000, true}, l: 1}, + {input: "-1111111.1", norm: "-111:11:11.100000", output: testTime{111, 11, 11, 100000000, true}, l: 1}, + {input: "1.1", norm: "00:00:01.100000", output: testTime{0, 0, 1, 100000000, false}, l: 1}, + {input: "11.1", norm: "00:00:11.100000", output: testTime{0, 0, 11, 100000000, false}, l: 1}, + {input: "111.1", norm: "00:01:11.100000", output: testTime{0, 1, 11, 100000000, false}, l: 1}, + {input: "1111.1", norm: "00:11:11.100000", output: testTime{0, 11, 11, 100000000, false}, l: 1}, + {input: "11111.1", norm: "01:11:11.100000", output: testTime{1, 11, 11, 100000000, false}, l: 1}, + {input: "111111.1", norm: "11:11:11.100000", output: testTime{11, 11, 11, 100000000, false}, l: 1}, + {input: "1111111.1", norm: "111:11:11.100000", output: testTime{111, 11, 11, 100000000, false}, l: 1}, + {input: "20000101", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "-20000101", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, + {input: "999995959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "-999995959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, + {input: "4294965959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "-4294965959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, + {input: "4294975959", norm: "00:00:00.000000", err: true}, + {input: "-4294975959", norm: "00:00:00.000000", err: true}, + {input: "\t34 foo\t", norm: "00:00:34.000000", output: testTime{0, 0, 34, 0, false}, err: true}, + {input: "\t34 1foo\t", norm: "817:00:00.000000", output: testTime{817, 0, 0, 0, false}, err: true}, + {input: "\t34 23foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "\t35 foo\t", norm: "00:00:35.000000", output: testTime{0, 0, 35, 0, false}, err: true}, + {input: "\t35 1foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: " 255 foo", norm: "00:02:55.000000", output: testTime{0, 2, 55, 0, false}, err: true}, + {input: "255", norm: "00:02:55.000000", output: testTime{0, 2, 55, 0, false}}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + got, l, ok := ParseTime(test.input, -1) + if test.err { + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + assert.Equal(t, test.norm, string(got.AppendFormat(nil, 6))) + assert.Equal(t, test.l, l) + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + assert.Equal(t, test.l, l) + assert.Equal(t, test.norm, string(got.AppendFormat(nil, 6))) + }) + } +} + +func TestParseDateTime(t *testing.T) { + type datetime struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input string + output datetime + l int + err bool + }{ + {input: "0000-00-00 00:00:00", output: datetime{}}, + {input: "2022-10-12 11:12:13", output: datetime{2022, 10, 12, 11, 12, 13, 0}}, + {input: "2022-10-12 11:12:13.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6}, + {input: "20221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6}, + {input: "221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6}, + {input: "2022101211121321321312", output: datetime{2022, 10, 12, 11, 12, 13, 0}, err: true}, + {input: "3284004416225113510", output: datetime{}, err: true}, + {input: "2012-12-31 11:30:45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, + {input: "2012^12^31 11+30+45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, + {input: "2012/12/31 11*30*45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, + {input: "2012@12@31 11^30^45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, + {input: "2012-12-31T11:30:45", output: datetime{2012, 12, 31, 11, 30, 45, 0}}, + {input: "20070523091528", output: datetime{2007, 5, 23, 9, 15, 28, 0}}, + {input: "070523091528", output: datetime{2007, 5, 23, 9, 15, 28, 0}}, + + {input: "2042-07-07 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-7-07 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-07-7 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-07-07 7:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-07-07 07:7:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-07-07 07:07:7", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "2042-7-7 7:7:7", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + + {input: "42-07-07 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-7-07 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-07-7 07:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-07-07 7:07:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-07-07 07:7:07", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-07-07 07:07:7", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + {input: "42-7-7 7:7:7", output: datetime{2042, 7, 7, 7, 7, 7, 0}}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + got, l, ok := ParseDateTime(test.input, -1) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + assert.Equal(t, test.l, l) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + assert.Equal(t, test.l, l) + }) + } +} + +func TestParseDateTimeInt64(t *testing.T) { + type datetime struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input int64 + output datetime + l int + err bool + }{ + {input: 1, output: datetime{}, err: true}, + {input: 20221012000000, output: datetime{2022, 10, 12, 0, 0, 0, 0}}, + {input: 20221012112233, output: datetime{2022, 10, 12, 11, 22, 33, 0}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%d", test.input), func(t *testing.T) { + got, ok := ParseDateTimeInt64(test.input) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + }) + } +} diff --git a/go/mysql/datetime/spec.go b/go/mysql/datetime/spec.go new file mode 100644 index 00000000000..ce19126ce55 --- /dev/null +++ b/go/mysql/datetime/spec.go @@ -0,0 +1,565 @@ +/* +Copyright 2016 lestrrat +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "time" +) + +type Spec interface { + parser + formatter +} + +type formatter interface { + format(dst []byte, t DateTime, prec uint8) []byte +} + +type parser interface { + parse(*timeparts, string) (string, bool) +} + +type numeric interface { + numeric(t DateTime) (int, int) +} + +var shortDayNames = []string{ + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat", +} + +var shortMonthNames = []string{ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", +} + +type fmtWeekdayNameShort struct{} + +func (fmtWeekdayNameShort) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, t.Date.Weekday().String()[:3]...) +} + +func (fmtWeekdayNameShort) parse(_ *timeparts, b string) (out string, ok bool) { + _, out, ok = lookup(shortDayNames, b) + return +} + +type fmtMonthNameShort struct{} + +func (fmtMonthNameShort) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, time.Month(t.Date.Month()).String()[:3]...) +} + +func (fmtMonthNameShort) parse(tp *timeparts, b string) (out string, ok bool) { + tp.month, out, ok = lookup(shortMonthNames, b) + return +} + +type fmtMonth struct { + zero bool +} + +func (s fmtMonth) format(dst []byte, t DateTime, prec uint8) []byte { + if s.zero { + return appendInt(dst, t.Date.Month(), 2) + } + return appendInt(dst, t.Date.Month(), 0) +} + +func (s fmtMonth) parse(tp *timeparts, b string) (out string, ok bool) { + tp.month, out, ok = getnum(b, s.zero) + if ok && (tp.month < 0 || tp.month > 12) { + ok = false + } + return +} + +func (s fmtMonth) numeric(t DateTime) (int, int) { + return t.Date.Month(), 100 +} + +type fmtMonthDaySuffix struct{} + +func (fmtMonthDaySuffix) format(dst []byte, t DateTime, prec uint8) []byte { + d := t.Date.Day() + dst = appendInt(dst, d, 0) + + switch { + case d >= 11 && d < 20: + return append(dst, "th"...) + case d%10 == 1: + return append(dst, "st"...) + case d%10 == 2: + return append(dst, "nd"...) + case d%10 == 3: + return append(dst, "rd"...) + default: + return append(dst, "th"...) + } +} + +func (d fmtMonthDaySuffix) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtDay struct { + zero bool +} + +func (s fmtDay) format(dst []byte, t DateTime, prec uint8) []byte { + if s.zero { + return appendInt(dst, t.Date.Day(), 2) + } + return appendInt(dst, t.Date.Day(), 0) +} + +func (s fmtDay) parse(tp *timeparts, b string) (out string, ok bool) { + tp.day, out, ok = getnum(b, s.zero) + return +} + +func (fmtDay) numeric(t DateTime) (int, int) { + return t.Date.Day(), 100 +} + +type fmtMicroseconds struct{} + +func appendNsec(b []byte, nsec int, prec int) []byte { + f := nsec / 1000 + l := len(b) + prec + b = appendInt(b, f, 6) + for len(b) < l { + b = append(b, '0') + } + return b[:l] +} + +func (fmtMicroseconds) format(dst []byte, t DateTime, prec uint8) []byte { + return appendNsec(dst, t.Time.Nanosecond(), 6) +} + +func (f fmtMicroseconds) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtHour24 struct { + zero bool +} + +func (s fmtHour24) format(dst []byte, t DateTime, prec uint8) []byte { + if s.zero { + return appendInt(dst, t.Time.Hour(), 2) + } + return appendInt(dst, t.Time.Hour(), 0) +} + +func (s fmtHour24) parse(tp *timeparts, b string) (out string, ok bool) { + tp.hour, out, ok = getnum(b, s.zero) + if tp.hour < 0 || 24 <= tp.hour { + ok = false + } + return +} + +func (fmtHour24) numeric(t DateTime) (int, int) { + return t.Time.Hour(), 100 +} + +type fmtHour12 struct { + zero bool +} + +func (f fmtHour12) format(dst []byte, t DateTime, prec uint8) []byte { + hr, _ := f.numeric(t) + if f.zero { + return appendInt(dst, hr, 2) + } + return appendInt(dst, hr, 0) +} + +func (f fmtHour12) parse(tp *timeparts, b string) (out string, ok bool) { + tp.hour, out, ok = getnum(b, f.zero) + if tp.hour < 0 || 12 < tp.hour { + ok = false + } + return +} + +func (f fmtHour12) numeric(t DateTime) (int, int) { + hr := t.Time.Hour() % 12 + if hr == 0 { + hr = 12 + } + return hr, 100 +} + +type fmtMin struct { + zero bool +} + +func (s fmtMin) format(dst []byte, t DateTime, prec uint8) []byte { + if s.zero { + return appendInt(dst, t.Time.Minute(), 2) + } + return appendInt(dst, t.Time.Minute(), 0) +} + +func (s fmtMin) parse(tp *timeparts, b string) (out string, ok bool) { + tp.min, out, ok = getnum(b, s.zero) + if tp.min < 0 || 60 <= tp.min { + ok = false + } + return +} + +func (s fmtMin) numeric(t DateTime) (int, int) { + return t.Time.Minute(), 100 +} + +type fmtZeroYearDay struct{} + +func (fmtZeroYearDay) format(dst []byte, t DateTime, prec uint8) []byte { + return appendInt(dst, t.Date.Yearday(), 3) +} +func (j fmtZeroYearDay) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtMonthName struct{} + +func (fmtMonthName) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, time.Month(t.Date.Month()).String()...) +} + +func (m fmtMonthName) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtAMorPM struct{} + +func (fmtAMorPM) format(dst []byte, t DateTime, prec uint8) []byte { + if t.Time.Hour() < 12 { + return append(dst, "AM"...) + } + return append(dst, "PM"...) +} + +func (p fmtAMorPM) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtFullTime12 struct{} + +func (fmtFullTime12) format(dst []byte, t DateTime, prec uint8) []byte { + dst = (fmtHour12{true}).format(dst, t, prec) + dst = append(dst, ':') + dst = (fmtMin{true}).format(dst, t, prec) + dst = append(dst, ':') + dst = (fmtSecond{true, false}).format(dst, t, prec) + dst = append(dst, ' ') + dst = (fmtAMorPM{}).format(dst, t, prec) + return dst +} + +func (r fmtFullTime12) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtSecond struct { + zero bool + nsec bool +} + +func (s fmtSecond) format(dst []byte, t DateTime, prec uint8) []byte { + if s.zero { + dst = appendInt(dst, t.Time.Second(), 2) + } else { + dst = appendInt(dst, t.Time.Second(), 0) + } + if s.nsec && prec > 0 { + dst = append(dst, '.') + dst = appendNsec(dst, t.Time.Nanosecond(), int(prec)) + } + return dst +} + +func (s fmtSecond) parse(tp *timeparts, b string) (out string, ok bool) { + tp.sec, out, ok = getnum(b, s.zero) + if tp.sec < 0 || 60 <= tp.sec { + return "", false + } + if s.nsec && len(out) >= 2 && out[0] == '.' && isDigit(out, 1) { + n := 2 + for ; n < len(out) && isDigit(out, n); n++ { + } + var l int + tp.nsec, l, ok = parseNanoseconds(out, n) + tp.prec = uint8(l) + out = out[n:] + } + return +} + +func (s fmtSecond) numeric(t DateTime) (int, int) { + return t.Time.Second(), 100 +} + +type fmtFullTime24 struct{} + +func (fmtFullTime24) format(dst []byte, t DateTime, prec uint8) []byte { + dst = (fmtHour24{true}).format(dst, t, prec) + dst = append(dst, ':') + dst = (fmtMin{true}).format(dst, t, prec) + dst = append(dst, ':') + dst = (fmtSecond{true, false}).format(dst, t, prec) + return dst +} + +func (t2 fmtFullTime24) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeek0 struct{} + +func (fmtWeek0) format(dst []byte, t DateTime, prec uint8) []byte { + year, week := t.Date.SundayWeek() + if year < t.Date.Year() { + week = 0 + } + return appendInt(dst, week, 2) +} + +func (u fmtWeek0) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeek1 struct{} + +func (fmtWeek1) format(dst []byte, t DateTime, prec uint8) []byte { + year, week := t.Date.ISOWeek() + if year < t.Date.Year() { + week = 0 + } + return appendInt(dst, week, 2) +} + +func (u fmtWeek1) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeek2 struct{} + +func (fmtWeek2) format(dst []byte, t DateTime, prec uint8) []byte { + _, week := t.Date.SundayWeek() + return appendInt(dst, week, 2) +} + +func (v fmtWeek2) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeek3 struct{} + +func (fmtWeek3) format(dst []byte, t DateTime, prec uint8) []byte { + _, week := t.Date.ISOWeek() + return appendInt(dst, week, 2) +} + +func (v fmtWeek3) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeekdayName struct{} + +func (fmtWeekdayName) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, t.Date.Weekday().String()...) +} +func (w fmtWeekdayName) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtWeekday struct{} + +func (fmtWeekday) format(dst []byte, t DateTime, prec uint8) []byte { + return appendInt(dst, int(t.Date.Weekday()), 0) +} +func (w fmtWeekday) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtYearForWeek2 struct{} + +func (fmtYearForWeek2) format(dst []byte, t DateTime, prec uint8) []byte { + year, _ := t.Date.SundayWeek() + return appendInt(dst, year, 4) +} +func (x fmtYearForWeek2) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtYearForWeek3 struct{} + +func (fmtYearForWeek3) format(dst []byte, t DateTime, prec uint8) []byte { + year, _ := t.Date.ISOWeek() + return appendInt(dst, year, 4) +} +func (x fmtYearForWeek3) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +type fmtYearLong struct{} + +func (fmtYearLong) format(dst []byte, t DateTime, prec uint8) []byte { + return appendInt(dst, t.Date.Year(), 4) +} + +func (y fmtYearLong) parse(tp *timeparts, b string) (out string, ok bool) { + if len(b) >= 4 { + b, out = b[0:4], b[4:] + tp.year, ok = atoi(b) + } + return +} + +func (y fmtYearLong) numeric(t DateTime) (int, int) { + return t.Date.Year(), 10000 +} + +type fmtYearShort struct{} + +func (f fmtYearShort) format(dst []byte, t DateTime, prec uint8) []byte { + y, _ := f.numeric(t) + return appendInt(dst, y, 2) +} + +func (fmtYearShort) parse(tp *timeparts, b string) (out string, ok bool) { + if len(b) >= 2 { + b, out = b[0:2], b[2:] + if tp.year, ok = atoi(b); ok { + if tp.year >= 70 { + tp.year += 1900 + } else { + tp.year += 2000 + } + } + } + return +} + +func (fmtYearShort) numeric(t DateTime) (int, int) { + y := t.Date.Year() + if y < 0 { + y = -y + } + return y % 100, 2 +} + +type fmtVerbatim struct { + s string +} + +func (v *fmtVerbatim) parse(t *timeparts, bytes string) (string, bool) { + //TODO implement me + panic("implement me") +} + +func (v *fmtVerbatim) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, v.s...) +} + +type fmtSeparator byte + +func (s fmtSeparator) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, byte(s)) +} + +func (s fmtSeparator) parse(_ *timeparts, b string) (string, bool) { + if len(b) > 0 { + return b[1:], isSeparator(b[0]) + } + return "", false +} + +func isSeparator(b byte) bool { + switch b { + case '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~': + return true + default: + return false + } +} + +type fmtTimeSeparator struct{} + +func (s fmtTimeSeparator) format(dst []byte, t DateTime, prec uint8) []byte { + return append(dst, byte(' ')) +} + +func (s fmtTimeSeparator) parse(_ *timeparts, b string) (string, bool) { + if len(b) > 0 { + if b[0] == 'T' { + return b[1:], true + } + if isSpace(b[0]) { + for len(b) > 0 && isSpace(b[0]) { + b = b[1:] + } + return b, true + } + } + return "", false +} + +func isSpace(b byte) bool { + switch b { + case ' ', '\t', '\n', '\v', '\r': + return true + default: + return false + } +} diff --git a/go/mysql/datetime/strftime.go b/go/mysql/datetime/strftime.go new file mode 100644 index 00000000000..542142d3fcb --- /dev/null +++ b/go/mysql/datetime/strftime.go @@ -0,0 +1,144 @@ +/* +Copyright 2016 lestrrat +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "fmt" + "strings" +) + +func compile(ds map[byte]Spec, p string, exec func(Spec)) error { + for l := len(p); l > 0; l = len(p) { + i := strings.IndexByte(p, '%') + if i < 0 { + exec(&fmtVerbatim{s: p}) + break + } + if i == l-1 { + return fmt.Errorf(`stray %% at the end of pattern`) + } + + // we found a '%'. we need the next byte to decide what to do next + // we already know that i < l - 1 + // everything up to the i is verbatim + if i > 0 { + exec(&fmtVerbatim{s: p[:i]}) + p = p[i:] + } + + if spec, ok := ds[p[1]]; ok { + if spec == nil { + return fmt.Errorf(`unsupported format specifier: %%%c`, p[1]) + } + exec(spec) + } else { + exec(&fmtVerbatim{s: p[1:2]}) + } + p = p[2:] + } + return nil +} + +// Format takes the format `p` and the time `t` to produce the +// format date/time. Note that this function re-compiles the +// pattern every time it is called. +// +// If you know beforehand that you will be reusing the pattern +// within your application, consider creating a `Strftime` object +// and reusing it. +func Format(p string, t DateTime, prec uint8) ([]byte, error) { + var dst []byte + err := compile(DefaultMySQLStrftime, p, func(a Spec) { + dst = a.format(dst, t, prec) + }) + return dst, err +} + +// Strftime is the object that represents a compiled strftime pattern +type Strftime struct { + pattern string + compiled []Spec +} + +// New creates a new Strftime object. If the compilation fails, then +// an error is returned in the second argument. +func New(p string) (*Strftime, error) { + var list []Spec + err := compile(DefaultMySQLStrftime, p, func(a Spec) { + list = append(list, a) + }) + if err != nil { + return nil, err + } + return &Strftime{pattern: p, compiled: list}, nil +} + +// Pattern returns the original pattern string +func (f *Strftime) Pattern() string { + return f.pattern +} + +func (f *Strftime) Format(dt DateTime, prec uint8) []byte { + return f.format(make([]byte, 0, len(f.pattern)+10), dt, prec) +} + +func (f *Strftime) AppendFormat(dst []byte, t DateTime, prec uint8) []byte { + return f.format(dst, t, prec) +} + +func (f *Strftime) format(dst []byte, t DateTime, prec uint8) []byte { + for _, w := range f.compiled { + dst = w.format(dst, t, prec) + } + return dst +} + +func (f *Strftime) FormatString(t DateTime, prec uint8) string { + return string(f.Format(t, prec)) +} + +func (f *Strftime) FormatNumeric(t DateTime) (n int64) { + for _, w := range f.compiled { + w := w.(numeric) + x, width := w.numeric(t) + n = n*int64(width) + int64(x) + } + return n +} + +func (f *Strftime) parse(s string, prec int) (DateTime, string, int, bool) { + var tp timeparts + tp.month = -1 + tp.day = -1 + tp.yday = -1 + + var ok bool + for _, w := range f.compiled { + s, ok = w.parse(&tp, s) + if !ok { + return DateTime{}, "", 0, false + } + } + t, l, ok := tp.toDateTime(prec) + return t, s, l, ok +} + +func (f *Strftime) Parse(s string, prec int) (DateTime, int, bool) { + t, s, l, ok := f.parse(s, prec) + return t, l, ok && len(s) == 0 +} diff --git a/go/mysql/datetime/testdata/daynr_to_date.json b/go/mysql/datetime/testdata/daynr_to_date.json new file mode 100644 index 00000000000..3bb175d97e2 --- /dev/null +++ b/go/mysql/datetime/testdata/daynr_to_date.json @@ -0,0 +1,8188 @@ +[[456, 1, 4, 1], +[559, 1, 7, 13], +[572, 1, 7, 26], +[618, 1, 9, 10], +[785, 2, 2, 24], +[911, 2, 6, 30], +[1067, 2, 12, 3], +[1173, 3, 3, 19], +[1214, 3, 4, 29], +[1402, 3, 11, 3], +[1518, 4, 2, 27], +[1680, 4, 8, 7], +[1706, 4, 9, 2], +[1805, 4, 12, 10], +[1829, 5, 1, 3], +[1861, 5, 2, 4], +[1891, 5, 3, 6], +[1983, 5, 6, 6], +[2043, 5, 8, 5], +[2223, 6, 2, 1], +[2260, 6, 3, 10], +[2367, 6, 6, 25], +[2449, 6, 9, 15], +[2533, 6, 12, 8], +[2682, 7, 5, 6], +[2769, 7, 8, 1], +[2881, 7, 11, 21], +[2962, 8, 2, 10], +[3014, 8, 4, 2], +[3206, 8, 10, 11], +[3253, 8, 11, 27], +[3416, 9, 5, 9], +[3422, 9, 5, 15], +[3601, 9, 11, 10], +[3637, 9, 12, 16], +[3794, 10, 5, 22], +[3885, 10, 8, 21], +[3966, 10, 11, 10], +[3978, 10, 11, 22], +[4012, 10, 12, 26], +[4087, 11, 3, 11], +[4101, 11, 3, 25], +[4138, 11, 5, 1], +[4317, 11, 10, 27], +[4421, 12, 2, 8], +[4440, 12, 2, 27], +[4450, 12, 3, 8], +[4615, 12, 8, 20], +[4671, 12, 10, 15], +[4855, 13, 4, 17], +[4879, 13, 5, 11], +[5057, 13, 11, 5], +[5257, 14, 5, 24], +[5272, 14, 6, 8], +[5313, 14, 7, 19], +[5323, 14, 7, 29], +[5409, 14, 10, 23], +[5525, 15, 2, 16], +[5656, 15, 6, 27], +[5829, 15, 12, 17], +[5914, 16, 3, 11], +[6010, 16, 6, 15], +[6104, 16, 9, 17], +[6241, 17, 2, 1], +[6303, 17, 4, 4], +[6397, 17, 7, 7], +[6516, 17, 11, 3], +[6695, 18, 5, 1], +[6833, 18, 9, 16], +[6994, 19, 2, 24], +[7061, 19, 5, 2], +[7110, 19, 6, 20], +[7229, 19, 10, 17], +[7314, 20, 1, 10], +[7374, 20, 3, 10], +[7462, 20, 6, 6], +[7555, 20, 9, 7], +[7736, 21, 3, 7], +[7862, 21, 7, 11], +[7926, 21, 9, 13], +[7938, 21, 9, 25], +[8021, 21, 12, 17], +[8087, 22, 2, 21], +[8159, 22, 5, 4], +[8218, 22, 7, 2], +[8233, 22, 7, 17], +[8337, 22, 10, 29], +[8488, 23, 3, 29], +[8612, 23, 7, 31], +[8677, 23, 10, 4], +[8705, 23, 11, 1], +[8783, 24, 1, 18], +[8815, 24, 2, 19], +[8944, 24, 6, 27], +[9077, 24, 11, 7], +[9218, 25, 3, 28], +[9376, 25, 9, 2], +[9512, 26, 1, 16], +[9628, 26, 5, 12], +[9764, 26, 9, 25], +[9862, 27, 1, 1], +[10027, 27, 6, 15], +[10161, 27, 10, 27], +[10273, 28, 2, 16], +[10373, 28, 5, 26], +[10542, 28, 11, 11], +[10700, 29, 4, 18], +[10875, 29, 10, 10], +[10995, 30, 2, 7], +[11121, 30, 6, 13], +[11157, 30, 7, 19], +[11314, 30, 12, 23], +[11498, 31, 6, 25], +[11603, 31, 10, 8], +[11779, 32, 4, 1], +[11931, 32, 8, 31], +[12026, 32, 12, 4], +[12063, 33, 1, 10], +[12127, 33, 3, 15], +[12306, 33, 9, 10], +[12337, 33, 10, 11], +[12491, 34, 3, 14], +[12657, 34, 8, 27], +[12832, 35, 2, 18], +[12877, 35, 4, 4], +[13005, 35, 8, 10], +[13038, 35, 9, 12], +[13198, 36, 2, 19], +[13346, 36, 7, 16], +[13532, 37, 1, 18], +[13701, 37, 7, 6], +[13727, 37, 8, 1], +[13829, 37, 11, 11], +[13849, 37, 12, 1], +[13969, 38, 3, 31], +[14112, 38, 8, 21], +[14173, 38, 10, 21], +[14177, 38, 10, 25], +[14363, 39, 4, 29], +[14513, 39, 9, 26], +[14678, 40, 3, 9], +[14846, 40, 8, 24], +[15036, 41, 3, 2], +[15159, 41, 7, 3], +[15266, 41, 10, 18], +[15450, 42, 4, 20], +[15618, 42, 10, 5], +[15683, 42, 12, 9], +[15754, 43, 2, 18], +[15883, 43, 6, 27], +[16058, 43, 12, 19], +[16082, 44, 1, 12], +[16198, 44, 5, 7], +[16375, 44, 10, 31], +[16467, 45, 1, 31], +[16486, 45, 2, 19], +[16511, 45, 3, 16], +[16642, 45, 7, 25], +[16751, 45, 11, 11], +[16949, 46, 5, 28], +[17077, 46, 10, 3], +[17116, 46, 11, 11], +[17206, 47, 2, 9], +[17388, 47, 8, 10], +[17562, 48, 1, 31], +[17741, 48, 7, 28], +[17873, 48, 12, 7], +[17963, 49, 3, 7], +[17983, 49, 3, 27], +[18069, 49, 6, 21], +[18144, 49, 9, 4], +[18343, 50, 3, 22], +[18387, 50, 5, 5], +[18519, 50, 9, 14], +[18645, 51, 1, 18], +[18729, 51, 4, 12], +[18742, 51, 4, 25], +[18839, 51, 7, 31], +[18890, 51, 9, 20], +[19027, 52, 2, 4], +[19042, 52, 2, 19], +[19128, 52, 5, 15], +[19296, 52, 10, 30], +[19416, 53, 2, 27], +[19566, 53, 7, 27], +[19624, 53, 9, 23], +[19685, 53, 11, 23], +[19821, 54, 4, 8], +[19909, 54, 7, 5], +[20006, 54, 10, 10], +[20139, 55, 2, 20], +[20336, 55, 9, 5], +[20505, 56, 2, 21], +[20655, 56, 7, 20], +[20841, 57, 1, 22], +[20906, 57, 3, 28], +[20931, 57, 4, 22], +[21114, 57, 10, 22], +[21158, 57, 12, 5], +[21246, 58, 3, 3], +[21414, 58, 8, 18], +[21528, 58, 12, 10], +[21550, 59, 1, 1], +[21582, 59, 2, 2], +[21731, 59, 7, 1], +[21903, 59, 12, 20], +[22062, 60, 5, 27], +[22128, 60, 8, 1], +[22212, 60, 10, 24], +[22411, 61, 5, 11], +[22568, 61, 10, 15], +[22591, 61, 11, 7], +[22647, 62, 1, 2], +[22710, 62, 3, 6], +[22840, 62, 7, 14], +[22850, 62, 7, 24], +[23046, 63, 2, 5], +[23231, 63, 8, 9], +[23248, 63, 8, 26], +[23273, 63, 9, 20], +[23347, 63, 12, 3], +[23444, 64, 3, 9], +[23482, 64, 4, 16], +[23682, 64, 11, 2], +[23806, 65, 3, 6], +[23957, 65, 8, 4], +[24061, 65, 11, 16], +[24120, 66, 1, 14], +[24160, 66, 2, 23], +[24298, 66, 7, 11], +[24450, 66, 12, 10], +[24567, 67, 4, 6], +[24704, 67, 8, 21], +[24773, 67, 10, 29], +[24973, 68, 5, 16], +[25079, 68, 8, 30], +[25170, 68, 11, 29], +[25350, 69, 5, 28], +[25496, 69, 10, 21], +[25611, 70, 2, 13], +[25732, 70, 6, 14], +[25782, 70, 8, 3], +[25937, 71, 1, 5], +[26109, 71, 6, 26], +[26300, 72, 1, 3], +[26319, 72, 1, 22], +[26325, 72, 1, 28], +[26448, 72, 5, 30], +[26627, 72, 11, 25], +[26752, 73, 3, 30], +[26775, 73, 4, 22], +[26836, 73, 6, 22], +[26870, 73, 7, 26], +[26967, 73, 10, 31], +[27013, 73, 12, 16], +[27028, 73, 12, 31], +[27164, 74, 5, 16], +[27350, 74, 11, 18], +[27412, 75, 1, 19], +[27503, 75, 4, 20], +[27692, 75, 10, 26], +[27700, 75, 11, 3], +[27875, 76, 4, 26], +[27937, 76, 6, 27], +[27984, 76, 8, 13], +[28058, 76, 10, 26], +[28217, 77, 4, 3], +[28264, 77, 5, 20], +[28436, 77, 11, 8], +[28620, 78, 5, 11], +[28741, 78, 9, 9], +[28878, 79, 1, 24], +[28916, 79, 3, 3], +[29032, 79, 6, 27], +[29084, 79, 8, 18], +[29096, 79, 8, 30], +[29136, 79, 10, 9], +[29143, 79, 10, 16], +[29218, 79, 12, 30], +[29335, 80, 4, 25], +[29361, 80, 5, 21], +[29388, 80, 6, 17], +[29468, 80, 9, 5], +[29482, 80, 9, 19], +[29665, 81, 3, 21], +[29666, 81, 3, 22], +[29672, 81, 3, 28], +[29759, 81, 6, 23], +[29861, 81, 10, 3], +[30050, 82, 4, 10], +[30101, 82, 5, 31], +[30293, 82, 12, 9], +[30338, 83, 1, 23], +[30513, 83, 7, 17], +[30667, 83, 12, 18], +[30859, 84, 6, 27], +[30925, 84, 9, 1], +[31078, 85, 2, 1], +[31101, 85, 2, 24], +[31134, 85, 3, 29], +[31284, 85, 8, 26], +[31400, 85, 12, 20], +[31470, 86, 2, 28], +[31628, 86, 8, 5], +[31803, 87, 1, 27], +[31850, 87, 3, 15], +[32031, 87, 9, 12], +[32093, 87, 11, 13], +[32293, 88, 5, 31], +[32411, 88, 9, 26], +[32565, 89, 2, 27], +[32710, 89, 7, 22], +[32889, 90, 1, 17], +[33066, 90, 7, 13], +[33243, 91, 1, 6], +[33370, 91, 5, 13], +[33462, 91, 8, 13], +[33619, 92, 1, 17], +[33730, 92, 5, 7], +[33874, 92, 9, 28], +[33925, 92, 11, 18], +[34037, 93, 3, 10], +[34229, 93, 9, 18], +[34411, 94, 3, 19], +[34448, 94, 4, 25], +[34602, 94, 9, 26], +[34747, 95, 2, 18], +[34914, 95, 8, 4], +[35003, 95, 11, 1], +[35106, 96, 2, 12], +[35159, 96, 4, 5], +[35182, 96, 4, 28], +[35382, 96, 11, 14], +[35387, 96, 11, 19], +[35555, 97, 5, 6], +[35636, 97, 7, 26], +[35710, 97, 10, 8], +[35876, 98, 3, 23], +[36055, 98, 9, 18], +[36182, 99, 1, 23], +[36336, 99, 6, 26], +[36510, 99, 12, 17], +[36523, 99, 12, 30], +[36646, 100, 5, 2], +[36803, 100, 10, 6], +[36881, 100, 12, 23], +[37053, 101, 6, 13], +[37159, 101, 9, 27], +[37316, 102, 3, 3], +[37388, 102, 5, 14], +[37545, 102, 10, 18], +[37624, 103, 1, 5], +[37666, 103, 2, 16], +[37705, 103, 3, 27], +[37809, 103, 7, 9], +[37836, 103, 8, 5], +[37868, 103, 9, 6], +[38031, 104, 2, 16], +[38175, 104, 7, 9], +[38269, 104, 10, 11], +[38361, 105, 1, 11], +[38483, 105, 5, 13], +[38642, 105, 10, 19], +[38714, 105, 12, 30], +[38795, 106, 3, 21], +[38893, 106, 6, 27], +[38983, 106, 9, 25], +[39116, 107, 2, 5], +[39262, 107, 7, 1], +[39336, 107, 9, 13], +[39456, 108, 1, 11], +[39521, 108, 3, 16], +[39529, 108, 3, 24], +[39719, 108, 9, 30], +[39888, 109, 3, 18], +[39988, 109, 6, 26], +[40092, 109, 10, 8], +[40152, 109, 12, 7], +[40244, 110, 3, 9], +[40410, 110, 8, 22], +[40480, 110, 10, 31], +[40508, 110, 11, 28], +[40514, 110, 12, 4], +[40662, 111, 5, 1], +[40850, 111, 11, 5], +[40854, 111, 11, 9], +[40951, 112, 2, 14], +[41039, 112, 5, 12], +[41166, 112, 9, 16], +[41269, 112, 12, 28], +[41427, 113, 6, 4], +[41575, 113, 10, 30], +[41633, 113, 12, 27], +[41641, 114, 1, 4], +[41682, 114, 2, 14], +[41694, 114, 2, 26], +[41774, 114, 5, 17], +[41890, 114, 9, 10], +[41893, 114, 9, 13], +[41933, 114, 10, 23], +[41963, 114, 11, 22], +[42012, 115, 1, 10], +[42013, 115, 1, 11], +[42117, 115, 4, 25], +[42164, 115, 6, 11], +[42331, 115, 11, 25], +[42481, 116, 4, 23], +[42521, 116, 6, 2], +[42525, 116, 6, 6], +[42557, 116, 7, 8], +[42604, 116, 8, 24], +[42612, 116, 9, 1], +[42744, 117, 1, 11], +[42898, 117, 6, 14], +[42914, 117, 6, 30], +[42961, 117, 8, 16], +[43134, 118, 2, 5], +[43222, 118, 5, 4], +[43346, 118, 9, 5], +[43386, 118, 10, 15], +[43495, 119, 2, 1], +[43683, 119, 8, 8], +[43812, 119, 12, 15], +[43950, 120, 5, 1], +[44027, 120, 7, 17], +[44156, 120, 11, 23], +[44242, 121, 2, 17], +[44379, 121, 7, 4], +[44392, 121, 7, 17], +[44588, 122, 1, 29], +[44763, 122, 7, 23], +[44828, 122, 9, 26], +[44837, 122, 10, 5], +[44905, 122, 12, 12], +[45032, 123, 4, 18], +[45066, 123, 5, 22], +[45114, 123, 7, 9], +[45243, 123, 11, 15], +[45268, 123, 12, 10], +[45286, 123, 12, 28], +[45436, 124, 5, 26], +[45571, 124, 10, 8], +[45572, 124, 10, 9], +[45713, 125, 2, 27], +[45771, 125, 4, 26], +[45919, 125, 9, 21], +[46098, 126, 3, 19], +[46221, 126, 7, 20], +[46403, 127, 1, 18], +[46492, 127, 4, 17], +[46599, 127, 8, 2], +[46604, 127, 8, 7], +[46613, 127, 8, 16], +[46672, 127, 10, 14], +[46773, 128, 1, 23], +[46825, 128, 3, 15], +[46848, 128, 4, 7], +[47037, 128, 10, 13], +[47075, 128, 11, 20], +[47225, 129, 4, 19], +[47235, 129, 4, 29], +[47401, 129, 10, 12], +[47567, 130, 3, 27], +[47735, 130, 9, 11], +[47768, 130, 10, 14], +[47873, 131, 1, 27], +[47977, 131, 5, 11], +[48124, 131, 10, 5], +[48274, 132, 3, 3], +[48351, 132, 5, 19], +[48373, 132, 6, 10], +[48474, 132, 9, 19], +[48497, 132, 10, 12], +[48619, 133, 2, 11], +[48631, 133, 2, 23], +[48745, 133, 6, 17], +[48793, 133, 8, 4], +[48935, 133, 12, 24], +[49011, 134, 3, 10], +[49058, 134, 4, 26], +[49108, 134, 6, 15], +[49174, 134, 8, 20], +[49242, 134, 10, 27], +[49278, 134, 12, 2], +[49366, 135, 2, 28], +[49435, 135, 5, 8], +[49606, 135, 10, 26], +[49786, 136, 4, 23], +[49931, 136, 9, 15], +[50044, 137, 1, 6], +[50127, 137, 3, 30], +[50258, 137, 8, 8], +[50315, 137, 10, 4], +[50438, 138, 2, 4], +[50572, 138, 6, 18], +[50630, 138, 8, 15], +[50633, 138, 8, 18], +[50799, 139, 1, 31], +[50981, 139, 8, 1], +[51027, 139, 9, 16], +[51084, 139, 11, 12], +[51200, 140, 3, 7], +[51229, 140, 4, 5], +[51389, 140, 9, 12], +[51464, 140, 11, 26], +[51654, 141, 6, 4], +[51801, 141, 10, 29], +[51925, 142, 3, 2], +[52074, 142, 7, 29], +[52191, 142, 11, 23], +[52284, 143, 2, 24], +[52431, 143, 7, 21], +[52504, 143, 10, 2], +[52587, 143, 12, 24], +[52661, 144, 3, 7], +[52728, 144, 5, 13], +[52771, 144, 6, 25], +[52914, 144, 11, 15], +[52978, 145, 1, 18], +[53001, 145, 2, 10], +[53085, 145, 5, 5], +[53244, 145, 10, 11], +[53296, 145, 12, 2], +[53330, 146, 1, 5], +[53372, 146, 2, 16], +[53385, 146, 3, 1], +[53401, 146, 3, 17], +[53502, 146, 6, 26], +[53516, 146, 7, 10], +[53574, 146, 9, 6], +[53654, 146, 11, 25], +[53702, 147, 1, 12], +[53846, 147, 6, 5], +[53892, 147, 7, 21], +[54067, 148, 1, 12], +[54105, 148, 2, 19], +[54304, 148, 9, 5], +[54374, 148, 11, 14], +[54559, 149, 5, 18], +[54586, 149, 6, 14], +[54745, 149, 11, 20], +[54843, 150, 2, 26], +[54932, 150, 5, 26], +[54937, 150, 5, 31], +[54972, 150, 7, 5], +[54981, 150, 7, 14], +[54991, 150, 7, 24], +[55008, 150, 8, 10], +[55063, 150, 10, 4], +[55095, 150, 11, 5], +[55279, 151, 5, 8], +[55308, 151, 6, 6], +[55312, 151, 6, 10], +[55406, 151, 9, 12], +[55441, 151, 10, 17], +[55491, 151, 12, 6], +[55590, 152, 3, 14], +[55756, 152, 8, 27], +[55776, 152, 9, 16], +[55834, 152, 11, 13], +[55935, 153, 2, 22], +[55986, 153, 4, 14], +[56105, 153, 8, 11], +[56139, 153, 9, 14], +[56315, 154, 3, 9], +[56343, 154, 4, 6], +[56406, 154, 6, 8], +[56550, 154, 10, 30], +[56706, 155, 4, 4], +[56906, 155, 10, 21], +[56964, 155, 12, 18], +[57118, 156, 5, 20], +[57256, 156, 10, 5], +[57419, 157, 3, 17], +[57474, 157, 5, 11], +[57608, 157, 9, 22], +[57719, 158, 1, 11], +[57725, 158, 1, 17], +[57814, 158, 4, 16], +[57878, 158, 6, 19], +[57881, 158, 6, 22], +[58019, 158, 11, 7], +[58026, 158, 11, 14], +[58084, 159, 1, 11], +[58105, 159, 2, 1], +[58135, 159, 3, 3], +[58292, 159, 8, 7], +[58483, 160, 2, 14], +[58557, 160, 4, 28], +[58639, 160, 7, 19], +[58665, 160, 8, 14], +[58812, 161, 1, 8], +[58822, 161, 1, 18], +[58961, 161, 6, 6], +[59055, 161, 9, 8], +[59235, 162, 3, 7], +[59304, 162, 5, 15], +[59372, 162, 7, 22], +[59407, 162, 8, 26], +[59488, 162, 11, 15], +[59627, 163, 4, 3], +[59690, 163, 6, 5], +[59870, 163, 12, 2], +[59876, 163, 12, 8], +[59954, 164, 2, 24], +[60106, 164, 7, 25], +[60153, 164, 9, 10], +[60179, 164, 10, 6], +[60315, 165, 2, 19], +[60353, 165, 3, 29], +[60517, 165, 9, 9], +[60615, 165, 12, 16], +[60668, 166, 2, 7], +[60729, 166, 4, 9], +[60760, 166, 5, 10], +[60766, 166, 5, 16], +[60876, 166, 9, 3], +[60948, 166, 11, 14], +[60987, 166, 12, 23], +[61185, 167, 7, 9], +[61341, 167, 12, 12], +[61521, 168, 6, 9], +[61576, 168, 8, 3], +[61714, 168, 12, 19], +[61836, 169, 4, 20], +[61890, 169, 6, 13], +[62060, 169, 11, 30], +[62153, 170, 3, 3], +[62239, 170, 5, 28], +[62305, 170, 8, 2], +[62352, 170, 9, 18], +[62444, 170, 12, 19], +[62625, 171, 6, 18], +[62628, 171, 6, 21], +[62782, 171, 11, 22], +[62793, 171, 12, 3], +[62808, 171, 12, 18], +[62888, 172, 3, 7], +[62901, 172, 3, 20], +[62948, 172, 5, 6], +[63060, 172, 8, 26], +[63242, 173, 2, 24], +[63425, 173, 8, 26], +[63587, 174, 2, 4], +[63733, 174, 6, 30], +[63752, 174, 7, 19], +[63927, 175, 1, 10], +[63970, 175, 2, 22], +[64083, 175, 6, 15], +[64176, 175, 9, 16], +[64214, 175, 10, 24], +[64361, 176, 3, 19], +[64497, 176, 8, 2], +[64528, 176, 9, 2], +[64721, 177, 3, 14], +[64783, 177, 5, 15], +[64914, 177, 9, 23], +[64926, 177, 10, 5], +[65059, 178, 2, 15], +[65107, 178, 4, 4], +[65209, 178, 7, 15], +[65377, 178, 12, 30], +[65489, 179, 4, 21], +[65532, 179, 6, 3], +[65596, 179, 8, 6], +[65784, 180, 2, 10], +[65917, 180, 6, 22], +[65995, 180, 9, 8], +[66102, 180, 12, 24], +[66228, 181, 4, 29], +[66232, 181, 5, 3], +[66296, 181, 7, 6], +[66429, 181, 11, 16], +[66529, 182, 2, 24], +[66708, 182, 8, 22], +[66846, 183, 1, 7], +[66911, 183, 3, 13], +[66977, 183, 5, 18], +[67144, 183, 11, 1], +[67165, 183, 11, 22], +[67289, 184, 3, 25], +[67305, 184, 4, 10], +[67425, 184, 8, 8], +[67517, 184, 11, 8], +[67706, 185, 5, 16], +[67715, 185, 5, 25], +[67885, 185, 11, 11], +[68064, 186, 5, 9], +[68194, 186, 9, 16], +[68385, 187, 3, 26], +[68545, 187, 9, 2], +[68680, 188, 1, 15], +[68687, 188, 1, 22], +[68852, 188, 7, 5], +[68943, 188, 10, 4], +[68948, 188, 10, 9], +[69103, 189, 3, 13], +[69160, 189, 5, 9], +[69167, 189, 5, 16], +[69236, 189, 7, 24], +[69254, 189, 8, 11], +[69400, 190, 1, 4], +[69489, 190, 4, 3], +[69573, 190, 6, 26], +[69726, 190, 11, 26], +[69803, 191, 2, 11], +[69806, 191, 2, 14], +[69830, 191, 3, 10], +[70029, 191, 9, 25], +[70211, 192, 3, 25], +[70404, 192, 10, 4], +[70529, 193, 2, 6], +[70715, 193, 8, 11], +[70774, 193, 10, 9], +[70883, 194, 1, 26], +[71004, 194, 5, 27], +[71022, 194, 6, 14], +[71067, 194, 7, 29], +[71172, 194, 11, 11], +[71286, 195, 3, 5], +[71466, 195, 9, 1], +[71575, 195, 12, 19], +[71616, 196, 1, 29], +[71618, 196, 1, 31], +[71812, 196, 8, 12], +[71836, 196, 9, 5], +[72026, 197, 3, 14], +[72157, 197, 7, 23], +[72163, 197, 7, 29], +[72351, 198, 2, 2], +[72466, 198, 5, 28], +[72549, 198, 8, 19], +[72578, 198, 9, 17], +[72620, 198, 10, 29], +[72745, 199, 3, 3], +[72859, 199, 6, 25], +[72964, 199, 10, 8], +[73117, 200, 3, 10], +[73247, 200, 7, 18], +[73252, 200, 7, 23], +[73418, 201, 1, 5], +[73440, 201, 1, 27], +[73460, 201, 2, 16], +[73543, 201, 5, 10], +[73599, 201, 7, 5], +[73759, 201, 12, 12], +[73783, 202, 1, 5], +[73959, 202, 6, 30], +[74041, 202, 9, 20], +[74079, 202, 10, 28], +[74095, 202, 11, 13], +[74277, 203, 5, 14], +[74459, 203, 11, 12], +[74476, 203, 11, 29], +[74559, 204, 2, 20], +[74650, 204, 5, 21], +[74815, 204, 11, 2], +[74829, 204, 11, 16], +[74922, 205, 2, 17], +[75008, 205, 5, 14], +[75164, 205, 10, 17], +[75352, 206, 4, 23], +[75484, 206, 9, 2], +[75592, 206, 12, 19], +[75605, 207, 1, 1], +[75771, 207, 6, 16], +[75843, 207, 8, 27], +[75857, 207, 9, 10], +[75905, 207, 10, 28], +[75950, 207, 12, 12], +[76098, 208, 5, 8], +[76206, 208, 8, 24], +[76383, 209, 2, 17], +[76523, 209, 7, 7], +[76680, 209, 12, 11], +[76728, 210, 1, 28], +[76925, 210, 8, 13], +[77075, 211, 1, 10], +[77263, 211, 7, 17], +[77453, 212, 1, 23], +[77460, 212, 1, 30], +[77486, 212, 2, 25], +[77487, 212, 2, 26], +[77544, 212, 4, 23], +[77587, 212, 6, 5], +[77711, 212, 10, 7], +[77730, 212, 10, 26], +[77771, 212, 12, 6], +[77875, 213, 3, 20], +[77885, 213, 3, 30], +[77947, 213, 5, 31], +[78112, 213, 11, 12], +[78192, 214, 1, 31], +[78288, 214, 5, 7], +[78382, 214, 8, 9], +[78522, 214, 12, 27], +[78604, 215, 3, 19], +[78778, 215, 9, 9], +[78787, 215, 9, 18], +[78972, 216, 3, 21], +[78975, 216, 3, 24], +[79175, 216, 10, 10], +[79249, 216, 12, 23], +[79306, 217, 2, 18], +[79489, 217, 8, 20], +[79676, 218, 2, 23], +[79762, 218, 5, 20], +[79857, 218, 8, 23], +[79961, 218, 12, 5], +[80134, 219, 5, 27], +[80236, 219, 9, 6], +[80321, 219, 11, 30], +[80472, 220, 4, 29], +[80541, 220, 7, 7], +[80657, 220, 10, 31], +[80830, 221, 4, 22], +[80985, 221, 9, 24], +[81176, 222, 4, 3], +[81360, 222, 10, 4], +[81378, 222, 10, 22], +[81409, 222, 11, 22], +[81593, 223, 5, 25], +[81786, 223, 12, 4], +[81965, 224, 5, 31], +[81979, 224, 6, 14], +[81999, 224, 7, 4], +[82070, 224, 9, 13], +[82130, 224, 11, 12], +[82276, 225, 4, 7], +[82413, 225, 8, 22], +[82578, 226, 2, 3], +[82722, 226, 6, 27], +[82730, 226, 7, 5], +[82734, 226, 7, 9], +[82844, 226, 10, 27], +[82955, 227, 2, 15], +[83127, 227, 8, 6], +[83254, 227, 12, 11], +[83351, 228, 3, 17], +[83503, 228, 8, 16], +[83667, 229, 1, 27], +[83822, 229, 7, 1], +[83927, 229, 10, 14], +[84028, 230, 1, 23], +[84114, 230, 4, 19], +[84149, 230, 5, 24], +[84226, 230, 8, 9], +[84354, 230, 12, 15], +[84489, 231, 4, 29], +[84507, 231, 5, 17], +[84684, 231, 11, 10], +[84763, 232, 1, 28], +[84845, 232, 4, 19], +[85006, 232, 9, 27], +[85018, 232, 10, 9], +[85155, 233, 2, 23], +[85290, 233, 7, 8], +[85486, 234, 1, 20], +[85528, 234, 3, 3], +[85670, 234, 7, 23], +[85710, 234, 9, 1], +[85782, 234, 11, 12], +[85830, 234, 12, 30], +[85992, 235, 6, 10], +[86076, 235, 9, 2], +[86099, 235, 9, 25], +[86281, 236, 3, 25], +[86316, 236, 4, 29], +[86456, 236, 9, 16], +[86500, 236, 10, 30], +[86629, 237, 3, 8], +[86711, 237, 5, 29], +[86818, 237, 9, 13], +[86915, 237, 12, 19], +[86977, 238, 2, 19], +[87128, 238, 7, 20], +[87270, 238, 12, 9], +[87469, 239, 6, 26], +[87557, 239, 9, 22], +[87642, 239, 12, 16], +[87783, 240, 5, 5], +[87970, 240, 11, 8], +[88020, 240, 12, 28], +[88024, 241, 1, 1], +[88128, 241, 4, 15], +[88207, 241, 7, 3], +[88336, 241, 11, 9], +[88345, 241, 11, 18], +[88378, 241, 12, 21], +[88519, 242, 5, 11], +[88556, 242, 6, 17], +[88730, 242, 12, 8], +[88880, 243, 5, 7], +[89041, 243, 10, 15], +[89059, 243, 11, 2], +[89167, 244, 2, 18], +[89245, 244, 5, 6], +[89417, 244, 10, 25], +[89614, 245, 5, 10], +[89628, 245, 5, 24], +[89696, 245, 7, 31], +[89735, 245, 9, 8], +[89793, 245, 11, 5], +[89858, 246, 1, 9], +[90055, 246, 7, 25], +[90210, 246, 12, 27], +[90246, 247, 2, 1], +[90301, 247, 3, 28], +[90379, 247, 6, 14], +[90464, 247, 9, 7], +[90653, 248, 3, 14], +[90792, 248, 7, 31], +[90886, 248, 11, 2], +[90930, 248, 12, 16], +[91126, 249, 6, 30], +[91260, 249, 11, 11], +[91340, 250, 1, 30], +[91392, 250, 3, 23], +[91507, 250, 7, 16], +[91661, 250, 12, 17], +[91680, 251, 1, 5], +[91722, 251, 2, 16], +[91893, 251, 8, 6], +[92022, 251, 12, 13], +[92078, 252, 2, 7], +[92277, 252, 8, 24], +[92404, 252, 12, 29], +[92448, 253, 2, 11], +[92621, 253, 8, 3], +[92696, 253, 10, 17], +[92889, 254, 4, 28], +[93011, 254, 8, 28], +[93165, 255, 1, 29], +[93364, 255, 8, 16], +[93451, 255, 11, 11], +[93651, 256, 5, 29], +[93749, 256, 9, 4], +[93817, 256, 11, 11], +[93980, 257, 4, 23], +[94112, 257, 9, 2], +[94146, 257, 10, 6], +[94172, 257, 11, 1], +[94178, 257, 11, 7], +[94255, 258, 1, 23], +[94437, 258, 7, 24], +[94501, 258, 9, 26], +[94528, 258, 10, 23], +[94661, 259, 3, 5], +[94725, 259, 5, 8], +[94802, 259, 7, 24], +[94990, 260, 1, 28], +[95114, 260, 5, 31], +[95164, 260, 7, 20], +[95211, 260, 9, 5], +[95333, 261, 1, 5], +[95409, 261, 3, 22], +[95572, 261, 9, 1], +[95669, 261, 12, 7], +[95757, 262, 3, 5], +[95768, 262, 3, 16], +[95938, 262, 9, 2], +[96123, 263, 3, 6], +[96141, 263, 3, 24], +[96285, 263, 8, 15], +[96383, 263, 11, 21], +[96486, 264, 3, 3], +[96544, 264, 4, 30], +[96727, 264, 10, 30], +[96805, 265, 1, 16], +[96870, 265, 3, 22], +[96950, 265, 6, 10], +[96989, 265, 7, 19], +[97108, 265, 11, 15], +[97167, 266, 1, 13], +[97246, 266, 4, 2], +[97281, 266, 5, 7], +[97391, 266, 8, 25], +[97415, 266, 9, 18], +[97508, 266, 12, 20], +[97670, 267, 5, 31], +[97835, 267, 11, 12], +[98002, 268, 4, 27], +[98083, 268, 7, 17], +[98180, 268, 10, 22], +[98364, 269, 4, 24], +[98394, 269, 5, 24], +[98418, 269, 6, 17], +[98549, 269, 10, 26], +[98697, 270, 3, 23], +[98720, 270, 4, 15], +[98912, 270, 10, 24], +[99005, 271, 1, 25], +[99074, 271, 4, 4], +[99150, 271, 6, 19], +[99346, 272, 1, 1], +[99513, 272, 6, 16], +[99569, 272, 8, 11], +[99672, 272, 11, 22], +[99844, 273, 5, 13], +[99891, 273, 6, 29], +[99982, 273, 9, 28], +[100180, 274, 4, 14], +[100331, 274, 9, 12], +[100477, 275, 2, 5], +[100627, 275, 7, 5], +[100659, 275, 8, 6], +[100741, 275, 10, 27], +[100847, 276, 2, 10], +[101009, 276, 7, 21], +[101066, 276, 9, 16], +[101123, 276, 11, 12], +[101252, 277, 3, 21], +[101375, 277, 7, 22], +[101443, 277, 9, 28], +[101504, 277, 11, 28], +[101680, 278, 5, 23], +[101746, 278, 7, 28], +[101849, 278, 11, 8], +[101969, 279, 3, 8], +[102076, 279, 6, 23], +[102157, 279, 9, 12], +[102206, 279, 10, 31], +[102291, 280, 1, 24], +[102432, 280, 6, 13], +[102502, 280, 8, 22], +[102608, 280, 12, 6], +[102617, 280, 12, 15], +[102808, 281, 6, 24], +[102839, 281, 7, 25], +[102884, 281, 9, 8], +[102988, 281, 12, 21], +[103109, 282, 4, 21], +[103276, 282, 10, 5], +[103470, 283, 4, 17], +[103595, 283, 8, 20], +[103739, 284, 1, 11], +[103795, 284, 3, 7], +[103935, 284, 7, 25], +[104118, 285, 1, 24], +[104198, 285, 4, 14], +[104280, 285, 7, 5], +[104454, 285, 12, 26], +[104532, 286, 3, 14], +[104679, 286, 8, 8], +[104716, 286, 9, 14], +[104718, 286, 9, 16], +[104876, 287, 2, 21], +[104934, 287, 4, 20], +[105117, 287, 10, 20], +[105315, 288, 5, 5], +[105405, 288, 8, 3], +[105602, 289, 2, 16], +[105692, 289, 5, 17], +[105877, 289, 11, 18], +[106025, 290, 4, 15], +[106159, 290, 8, 27], +[106305, 291, 1, 20], +[106455, 291, 6, 19], +[106536, 291, 9, 8], +[106716, 292, 3, 6], +[106816, 292, 6, 14], +[106820, 292, 6, 18], +[106834, 292, 7, 2], +[106918, 292, 9, 24], +[107071, 293, 2, 24], +[107141, 293, 5, 5], +[107187, 293, 6, 20], +[107242, 293, 8, 14], +[107299, 293, 10, 10], +[107499, 294, 4, 28], +[107640, 294, 9, 16], +[107833, 295, 3, 28], +[107874, 295, 5, 8], +[107980, 295, 8, 22], +[108078, 295, 11, 28], +[108102, 295, 12, 22], +[108188, 296, 3, 17], +[108193, 296, 3, 22], +[108232, 296, 4, 30], +[108292, 296, 6, 29], +[108308, 296, 7, 15], +[108423, 296, 11, 7], +[108509, 297, 2, 1], +[108658, 297, 6, 30], +[108837, 297, 12, 26], +[108863, 298, 1, 21], +[108978, 298, 5, 16], +[109095, 298, 9, 10], +[109286, 299, 3, 20], +[109461, 299, 9, 11], +[109488, 299, 10, 8], +[109511, 299, 10, 31], +[109598, 300, 1, 26], +[109599, 300, 1, 27], +[109763, 300, 7, 10], +[109852, 300, 10, 7], +[109896, 300, 11, 20], +[109944, 301, 1, 7], +[110047, 301, 4, 20], +[110174, 301, 8, 25], +[110308, 302, 1, 6], +[110340, 302, 2, 7], +[110486, 302, 7, 3], +[110606, 302, 10, 31], +[110667, 302, 12, 31], +[110809, 303, 5, 22], +[110811, 303, 5, 24], +[110929, 303, 9, 19], +[111107, 304, 3, 15], +[111259, 304, 8, 14], +[111298, 304, 9, 22], +[111469, 305, 3, 12], +[111610, 305, 7, 31], +[111720, 305, 11, 18], +[111751, 305, 12, 19], +[111804, 306, 2, 10], +[111822, 306, 2, 28], +[111953, 306, 7, 9], +[112135, 307, 1, 7], +[112285, 307, 6, 6], +[112296, 307, 6, 17], +[112457, 307, 11, 25], +[112493, 307, 12, 31], +[112665, 308, 6, 20], +[112686, 308, 7, 11], +[112783, 308, 10, 16], +[112967, 309, 4, 18], +[113063, 309, 7, 23], +[113158, 309, 10, 26], +[113344, 310, 4, 30], +[113374, 310, 5, 30], +[113457, 310, 8, 21], +[113612, 311, 1, 23], +[113667, 311, 3, 19], +[113840, 311, 9, 8], +[113902, 311, 11, 9], +[114074, 312, 4, 29], +[114190, 312, 8, 23], +[114261, 312, 11, 2], +[114386, 313, 3, 7], +[114467, 313, 5, 27], +[114581, 313, 9, 18], +[114663, 313, 12, 9], +[114790, 314, 4, 15], +[114894, 314, 7, 28], +[114986, 314, 10, 28], +[115062, 315, 1, 12], +[115082, 315, 2, 1], +[115083, 315, 2, 2], +[115113, 315, 3, 4], +[115268, 315, 8, 6], +[115390, 315, 12, 6], +[115484, 316, 3, 9], +[115635, 316, 8, 7], +[115791, 317, 1, 10], +[115848, 317, 3, 8], +[116035, 317, 9, 11], +[116122, 317, 12, 7], +[116230, 318, 3, 25], +[116362, 318, 8, 4], +[116416, 318, 9, 27], +[116435, 318, 10, 16], +[116626, 319, 4, 25], +[116761, 319, 9, 7], +[116900, 320, 1, 24], +[117069, 320, 7, 11], +[117088, 320, 7, 30], +[117206, 320, 11, 25], +[117365, 321, 5, 3], +[117514, 321, 9, 29], +[117520, 321, 10, 5], +[117692, 322, 3, 26], +[117886, 322, 10, 6], +[117968, 322, 12, 27], +[118103, 323, 5, 11], +[118268, 323, 10, 23], +[118333, 323, 12, 27], +[118339, 324, 1, 2], +[118448, 324, 4, 20], +[118573, 324, 8, 23], +[118591, 324, 9, 10], +[118724, 325, 1, 21], +[118894, 325, 7, 10], +[118946, 325, 8, 31], +[119091, 326, 1, 23], +[119273, 326, 7, 24], +[119380, 326, 11, 8], +[119579, 327, 5, 26], +[119602, 327, 6, 18], +[119640, 327, 7, 26], +[119702, 327, 9, 26], +[119790, 327, 12, 23], +[119926, 328, 5, 7], +[120054, 328, 9, 12], +[120239, 329, 3, 16], +[120436, 329, 9, 29], +[120598, 330, 3, 10], +[120679, 330, 5, 30], +[120824, 330, 10, 22], +[120961, 331, 3, 8], +[121143, 331, 9, 6], +[121162, 331, 9, 25], +[121216, 331, 11, 18], +[121230, 331, 12, 2], +[121419, 332, 6, 8], +[121608, 332, 12, 14], +[121639, 333, 1, 14], +[121664, 333, 2, 8], +[121679, 333, 2, 23], +[121709, 333, 3, 25], +[121783, 333, 6, 7], +[121823, 333, 7, 17], +[121858, 333, 8, 21], +[121939, 333, 11, 10], +[121991, 334, 1, 1], +[122133, 334, 5, 23], +[122200, 334, 7, 29], +[122345, 334, 12, 21], +[122507, 335, 6, 1], +[122539, 335, 7, 3], +[122684, 335, 11, 25], +[122730, 336, 1, 10], +[122813, 336, 4, 2], +[122862, 336, 5, 21], +[123009, 336, 10, 15], +[123097, 337, 1, 11], +[123293, 337, 7, 26], +[123323, 337, 8, 25], +[123330, 337, 9, 1], +[123500, 338, 2, 18], +[123535, 338, 3, 25], +[123696, 338, 9, 2], +[123713, 338, 9, 19], +[123852, 339, 2, 5], +[123930, 339, 4, 24], +[123985, 339, 6, 18], +[123994, 339, 6, 27], +[124090, 339, 10, 1], +[124237, 340, 2, 25], +[124427, 340, 9, 2], +[124613, 341, 3, 7], +[124644, 341, 4, 7], +[124671, 341, 5, 4], +[124766, 341, 8, 7], +[124837, 341, 10, 17], +[124969, 342, 2, 26], +[125075, 342, 6, 12], +[125217, 342, 11, 1], +[125385, 343, 4, 18], +[125477, 343, 7, 19], +[125663, 344, 1, 21], +[125854, 344, 7, 30], +[125987, 344, 12, 10], +[126079, 345, 3, 12], +[126241, 345, 8, 21], +[126386, 346, 1, 13], +[126528, 346, 6, 4], +[126701, 346, 11, 24], +[126878, 347, 5, 20], +[126990, 347, 9, 9], +[127151, 348, 2, 17], +[127292, 348, 7, 7], +[127376, 348, 9, 29], +[127451, 348, 12, 13], +[127507, 349, 2, 7], +[127661, 349, 7, 11], +[127737, 349, 9, 25], +[127787, 349, 11, 14], +[127874, 350, 2, 9], +[128042, 350, 7, 27], +[128140, 350, 11, 2], +[128327, 351, 5, 8], +[128362, 351, 6, 12], +[128537, 351, 12, 4], +[128613, 352, 2, 18], +[128623, 352, 2, 28], +[128694, 352, 5, 9], +[128799, 352, 8, 22], +[128895, 352, 11, 26], +[129061, 353, 5, 11], +[129067, 353, 5, 17], +[129208, 353, 10, 5], +[129403, 354, 4, 18], +[129524, 354, 8, 17], +[129719, 355, 2, 28], +[129809, 355, 5, 29], +[129849, 355, 7, 8], +[129985, 355, 11, 21], +[130177, 356, 5, 31], +[130363, 356, 12, 3], +[130558, 357, 6, 16], +[130666, 357, 10, 2], +[130782, 358, 1, 26], +[130833, 358, 3, 18], +[130861, 358, 4, 15], +[131027, 358, 9, 28], +[131159, 359, 2, 7], +[131340, 359, 8, 7], +[131380, 359, 9, 16], +[131548, 360, 3, 2], +[131655, 360, 6, 17], +[131776, 360, 10, 16], +[131825, 360, 12, 4], +[131883, 361, 1, 31], +[132061, 361, 7, 28], +[132186, 361, 11, 30], +[132201, 361, 12, 15], +[132295, 362, 3, 19], +[132337, 362, 4, 30], +[132481, 362, 9, 21], +[132504, 362, 10, 14], +[132639, 363, 2, 26], +[132747, 363, 6, 14], +[132784, 363, 7, 21], +[132933, 363, 12, 17], +[132962, 364, 1, 15], +[133090, 364, 5, 22], +[133119, 364, 6, 20], +[133197, 364, 9, 6], +[133292, 364, 12, 10], +[133409, 365, 4, 6], +[133453, 365, 5, 20], +[133571, 365, 9, 15], +[133679, 366, 1, 1], +[133720, 366, 2, 11], +[133914, 366, 8, 24], +[133964, 366, 10, 13], +[134091, 367, 2, 17], +[134286, 367, 8, 31], +[134424, 368, 1, 16], +[134527, 368, 4, 28], +[134553, 368, 5, 24], +[134709, 368, 10, 27], +[134798, 369, 1, 24], +[134885, 369, 4, 21], +[134904, 369, 5, 10], +[134927, 369, 6, 2], +[134994, 369, 8, 8], +[135098, 369, 11, 20], +[135172, 370, 2, 2], +[135220, 370, 3, 22], +[135353, 370, 8, 2], +[135467, 370, 11, 24], +[135665, 371, 6, 10], +[135811, 371, 11, 3], +[135934, 372, 3, 5], +[136045, 372, 6, 24], +[136061, 372, 7, 10], +[136106, 372, 8, 24], +[136163, 372, 10, 20], +[136202, 372, 11, 28], +[136297, 373, 3, 3], +[136317, 373, 3, 23], +[136509, 373, 10, 1], +[136552, 373, 11, 13], +[136671, 374, 3, 12], +[136809, 374, 7, 28], +[137003, 375, 2, 7], +[137163, 375, 7, 17], +[137259, 375, 10, 21], +[137345, 376, 1, 15], +[137418, 376, 3, 28], +[137484, 376, 6, 2], +[137627, 376, 10, 23], +[137664, 376, 11, 29], +[137795, 377, 4, 9], +[137834, 377, 5, 18], +[137906, 377, 7, 29], +[137983, 377, 10, 14], +[138110, 378, 2, 18], +[138265, 378, 7, 23], +[138332, 378, 9, 28], +[138377, 378, 11, 12], +[138382, 378, 11, 17], +[138580, 379, 6, 3], +[138774, 379, 12, 14], +[138938, 380, 5, 26], +[138947, 380, 6, 4], +[138997, 380, 7, 24], +[139176, 381, 1, 19], +[139234, 381, 3, 18], +[139321, 381, 6, 13], +[139521, 381, 12, 30], +[139708, 382, 7, 5], +[139828, 382, 11, 2], +[139908, 383, 1, 21], +[139960, 383, 3, 14], +[139997, 383, 4, 20], +[140028, 383, 5, 21], +[140046, 383, 6, 8], +[140233, 383, 12, 12], +[140257, 384, 1, 5], +[140282, 384, 1, 30], +[140463, 384, 7, 29], +[140464, 384, 7, 30], +[140604, 384, 12, 17], +[140738, 385, 4, 30], +[140773, 385, 6, 4], +[140835, 385, 8, 5], +[140850, 385, 8, 20], +[141042, 386, 2, 28], +[141183, 386, 7, 19], +[141260, 386, 10, 4], +[141324, 386, 12, 7], +[141333, 386, 12, 16], +[141448, 387, 4, 10], +[141639, 387, 10, 18], +[141767, 388, 2, 23], +[141781, 388, 3, 8], +[141826, 388, 4, 22], +[141951, 388, 8, 25], +[142005, 388, 10, 18], +[142068, 388, 12, 20], +[142186, 389, 4, 17], +[142195, 389, 4, 26], +[142380, 389, 10, 28], +[142479, 390, 2, 4], +[142484, 390, 2, 9], +[142660, 390, 8, 4], +[142838, 391, 1, 29], +[142926, 391, 4, 27], +[142994, 391, 7, 4], +[142996, 391, 7, 6], +[143058, 391, 9, 6], +[143123, 391, 11, 10], +[143152, 391, 12, 9], +[143320, 392, 5, 25], +[143507, 392, 11, 28], +[143547, 393, 1, 7], +[143726, 393, 7, 5], +[143744, 393, 7, 23], +[143817, 393, 10, 4], +[143921, 394, 1, 16], +[144046, 394, 5, 21], +[144077, 394, 6, 21], +[144166, 394, 9, 18], +[144190, 394, 10, 12], +[144245, 394, 12, 6], +[144309, 395, 2, 8], +[144488, 395, 8, 6], +[144610, 395, 12, 6], +[144630, 395, 12, 26], +[144690, 396, 2, 24], +[144820, 396, 7, 3], +[144871, 396, 8, 23], +[144961, 396, 11, 21], +[144995, 396, 12, 25], +[145093, 397, 4, 2], +[145165, 397, 6, 13], +[145286, 397, 10, 12], +[145393, 398, 1, 27], +[145540, 398, 6, 23], +[145700, 398, 11, 30], +[145795, 399, 3, 5], +[145808, 399, 3, 18], +[145913, 399, 7, 1], +[145983, 399, 9, 9], +[146105, 400, 1, 9], +[146118, 400, 1, 22], +[146307, 400, 7, 29], +[146418, 400, 11, 17], +[146453, 400, 12, 22], +[146628, 401, 6, 15], +[146824, 401, 12, 28], +[146907, 402, 3, 21], +[147037, 402, 7, 29], +[147106, 402, 10, 6], +[147130, 402, 10, 30], +[147199, 403, 1, 7], +[147209, 403, 1, 17], +[147404, 403, 7, 31], +[147585, 404, 1, 28], +[147697, 404, 5, 19], +[147812, 404, 9, 11], +[147817, 404, 9, 16], +[147962, 405, 2, 8], +[148019, 405, 4, 6], +[148136, 405, 8, 1], +[148159, 405, 8, 24], +[148297, 406, 1, 9], +[148371, 406, 3, 24], +[148447, 406, 6, 8], +[148580, 406, 10, 19], +[148747, 407, 4, 4], +[148938, 407, 10, 12], +[149061, 408, 2, 12], +[149227, 408, 7, 27], +[149371, 408, 12, 18], +[149452, 409, 3, 9], +[149521, 409, 5, 17], +[149621, 409, 8, 25], +[149686, 409, 10, 29], +[149749, 409, 12, 31], +[149823, 410, 3, 15], +[149877, 410, 5, 8], +[149944, 410, 7, 14], +[150134, 411, 1, 20], +[150318, 411, 7, 23], +[150380, 411, 9, 23], +[150525, 412, 2, 15], +[150716, 412, 8, 24], +[150741, 412, 9, 18], +[150819, 412, 12, 5], +[150884, 413, 2, 8], +[151017, 413, 6, 21], +[151030, 413, 7, 4], +[151183, 413, 12, 4], +[151280, 414, 3, 11], +[151374, 414, 6, 13], +[151393, 414, 7, 2], +[151506, 414, 10, 23], +[151601, 415, 1, 26], +[151746, 415, 6, 20], +[151767, 415, 7, 11], +[151853, 415, 10, 5], +[151958, 416, 1, 18], +[152090, 416, 5, 29], +[152149, 416, 7, 27], +[152219, 416, 10, 5], +[152370, 417, 3, 5], +[152555, 417, 9, 6], +[152670, 417, 12, 30], +[152695, 418, 1, 24], +[152760, 418, 3, 30], +[152802, 418, 5, 11], +[153002, 418, 11, 27], +[153200, 419, 6, 13], +[153380, 419, 12, 10], +[153567, 420, 6, 14], +[153680, 420, 10, 5], +[153782, 421, 1, 15], +[153814, 421, 2, 16], +[153924, 421, 6, 6], +[153964, 421, 7, 16], +[154100, 421, 11, 29], +[154109, 421, 12, 8], +[154258, 422, 5, 6], +[154307, 422, 6, 24], +[154382, 422, 9, 7], +[154412, 422, 10, 7], +[154612, 423, 4, 25], +[154737, 423, 8, 28], +[154904, 424, 2, 11], +[155052, 424, 7, 8], +[155106, 424, 8, 31], +[155255, 425, 1, 27], +[155362, 425, 5, 14], +[155471, 425, 8, 31], +[155506, 425, 10, 5], +[155535, 425, 11, 3], +[155717, 426, 5, 4], +[155746, 426, 6, 2], +[155776, 426, 7, 2], +[155961, 427, 1, 3], +[155986, 427, 1, 28], +[155996, 427, 2, 7], +[156146, 427, 7, 7], +[156280, 427, 11, 18], +[156446, 428, 5, 2], +[156535, 428, 7, 30], +[156669, 428, 12, 11], +[156734, 429, 2, 14], +[156747, 429, 2, 27], +[156857, 429, 6, 17], +[157014, 429, 11, 21], +[157169, 430, 4, 25], +[157183, 430, 5, 9], +[157380, 430, 11, 22], +[157531, 431, 4, 22], +[157680, 431, 9, 18], +[157805, 432, 1, 21], +[157894, 432, 4, 19], +[157916, 432, 5, 11], +[158081, 432, 10, 23], +[158137, 432, 12, 18], +[158263, 433, 4, 23], +[158385, 433, 8, 23], +[158443, 433, 10, 20], +[158606, 434, 4, 1], +[158739, 434, 8, 12], +[158892, 435, 1, 12], +[159012, 435, 5, 12], +[159142, 435, 9, 19], +[159274, 436, 1, 29], +[159384, 436, 5, 18], +[159385, 436, 5, 19], +[159583, 436, 12, 3], +[159697, 437, 3, 27], +[159844, 437, 8, 21], +[160005, 438, 1, 29], +[160083, 438, 4, 17], +[160096, 438, 4, 30], +[160221, 438, 9, 2], +[160362, 439, 1, 21], +[160488, 439, 5, 27], +[160506, 439, 6, 14], +[160589, 439, 9, 5], +[160774, 440, 3, 8], +[160812, 440, 4, 15], +[160931, 440, 8, 12], +[161086, 441, 1, 14], +[161277, 441, 7, 24], +[161334, 441, 9, 19], +[161493, 442, 2, 25], +[161574, 442, 5, 17], +[161701, 442, 9, 21], +[161836, 443, 2, 3], +[162014, 443, 7, 31], +[162031, 443, 8, 17], +[162205, 444, 2, 7], +[162370, 444, 7, 21], +[162375, 444, 7, 26], +[162432, 444, 9, 21], +[162513, 444, 12, 11], +[162552, 445, 1, 19], +[162579, 445, 2, 15], +[162633, 445, 4, 10], +[162636, 445, 4, 13], +[162688, 445, 6, 4], +[162874, 445, 12, 7], +[162909, 446, 1, 11], +[162967, 446, 3, 10], +[162999, 446, 4, 11], +[163056, 446, 6, 7], +[163253, 446, 12, 21], +[163392, 447, 5, 9], +[163490, 447, 8, 15], +[163614, 447, 12, 17], +[163782, 448, 6, 2], +[163956, 448, 11, 23], +[164091, 449, 4, 7], +[164272, 449, 10, 5], +[164426, 450, 3, 8], +[164472, 450, 4, 23], +[164488, 450, 5, 9], +[164536, 450, 6, 26], +[164723, 450, 12, 30], +[164863, 451, 5, 19], +[164915, 451, 7, 10], +[164920, 451, 7, 15], +[164937, 451, 8, 1], +[165090, 452, 1, 1], +[165113, 452, 1, 24], +[165123, 452, 2, 3], +[165130, 452, 2, 10], +[165157, 452, 3, 8], +[165194, 452, 4, 14], +[165273, 452, 7, 2], +[165440, 452, 12, 16], +[165634, 453, 6, 28], +[165790, 453, 12, 1], +[165828, 454, 1, 8], +[166008, 454, 7, 7], +[166175, 454, 12, 21], +[166320, 455, 5, 15], +[166455, 455, 9, 27], +[166640, 456, 3, 30], +[166801, 456, 9, 7], +[166877, 456, 11, 22], +[167018, 457, 4, 12], +[167191, 457, 10, 2], +[167369, 458, 3, 29], +[167473, 458, 7, 11], +[167558, 458, 10, 4], +[167684, 459, 2, 7], +[167741, 459, 4, 5], +[167854, 459, 7, 27], +[167906, 459, 9, 17], +[168002, 459, 12, 22], +[168149, 460, 5, 17], +[168267, 460, 9, 12], +[168433, 461, 2, 25], +[168469, 461, 4, 2], +[168658, 461, 10, 8], +[168805, 462, 3, 4], +[168939, 462, 7, 16], +[169003, 462, 9, 18], +[169104, 462, 12, 28], +[169164, 463, 2, 26], +[169196, 463, 3, 30], +[169229, 463, 5, 2], +[169341, 463, 8, 22], +[169362, 463, 9, 12], +[169372, 463, 9, 22], +[169382, 463, 10, 2], +[169483, 464, 1, 11], +[169660, 464, 7, 6], +[169837, 464, 12, 30], +[169937, 465, 4, 9], +[170074, 465, 8, 24], +[170180, 465, 12, 8], +[170334, 466, 5, 11], +[170490, 466, 10, 14], +[170645, 467, 3, 18], +[170829, 467, 9, 18], +[171022, 468, 3, 29], +[171059, 468, 5, 5], +[171150, 468, 8, 4], +[171202, 468, 9, 25], +[171208, 468, 10, 1], +[171347, 469, 2, 17], +[171351, 469, 2, 21], +[171419, 469, 4, 30], +[171433, 469, 5, 14], +[171553, 469, 9, 11], +[171559, 469, 9, 17], +[171562, 469, 9, 20], +[171678, 470, 1, 14], +[171798, 470, 5, 14], +[171967, 470, 10, 30], +[172141, 471, 4, 22], +[172266, 471, 8, 25], +[172386, 471, 12, 23], +[172462, 472, 3, 8], +[172600, 472, 7, 24], +[172789, 473, 1, 29], +[172870, 473, 4, 20], +[172911, 473, 5, 31], +[172972, 473, 7, 31], +[173098, 473, 12, 4], +[173258, 474, 5, 13], +[173360, 474, 8, 23], +[173486, 474, 12, 27], +[173610, 475, 4, 30], +[173687, 475, 7, 16], +[173828, 475, 12, 4], +[174024, 476, 6, 17], +[174047, 476, 7, 10], +[174064, 476, 7, 27], +[174169, 476, 11, 9], +[174214, 476, 12, 24], +[174400, 477, 6, 28], +[174437, 477, 8, 4], +[174483, 477, 9, 19], +[174613, 478, 1, 27], +[174634, 478, 2, 17], +[174816, 478, 8, 18], +[174881, 478, 10, 22], +[175045, 479, 4, 4], +[175221, 479, 9, 27], +[175252, 479, 10, 28], +[175445, 480, 5, 8], +[175517, 480, 7, 19], +[175683, 481, 1, 1], +[175780, 481, 4, 8], +[175962, 481, 10, 7], +[176002, 481, 11, 16], +[176087, 482, 2, 9], +[176146, 482, 4, 9], +[176285, 482, 8, 26], +[176481, 483, 3, 10], +[176607, 483, 7, 14], +[176636, 483, 8, 12], +[176785, 484, 1, 8], +[176880, 484, 4, 12], +[177013, 484, 8, 23], +[177210, 485, 3, 8], +[177308, 485, 6, 14], +[177504, 485, 12, 27], +[177515, 486, 1, 7], +[177562, 486, 2, 23], +[177598, 486, 3, 31], +[177723, 486, 8, 3], +[177809, 486, 10, 28], +[177961, 487, 3, 29], +[178083, 487, 7, 29], +[178241, 488, 1, 3], +[178349, 488, 4, 20], +[178387, 488, 5, 28], +[178520, 488, 10, 8], +[178591, 488, 12, 18], +[178791, 489, 7, 6], +[178857, 489, 9, 10], +[179018, 490, 2, 18], +[179113, 490, 5, 24], +[179149, 490, 6, 29], +[179163, 490, 7, 13], +[179253, 490, 10, 11], +[179390, 491, 2, 25], +[179537, 491, 7, 22], +[179716, 492, 1, 17], +[179896, 492, 7, 15], +[180079, 493, 1, 14], +[180257, 493, 7, 11], +[180358, 493, 10, 20], +[180363, 493, 10, 25], +[180509, 494, 3, 20], +[180564, 494, 5, 14], +[180753, 494, 11, 19], +[180854, 495, 2, 28], +[180965, 495, 6, 19], +[181131, 495, 12, 2], +[181264, 496, 4, 13], +[181356, 496, 7, 14], +[181469, 496, 11, 4], +[181516, 496, 12, 21], +[181570, 497, 2, 13], +[181674, 497, 5, 28], +[181761, 497, 8, 23], +[181846, 497, 11, 16], +[181905, 498, 1, 14], +[182076, 498, 7, 4], +[182185, 498, 10, 21], +[182248, 498, 12, 23], +[182313, 499, 2, 26], +[182320, 499, 3, 5], +[182496, 499, 8, 28], +[182566, 499, 11, 6], +[182745, 500, 5, 4], +[182900, 500, 10, 6], +[182914, 500, 10, 20], +[182978, 500, 12, 23], +[183149, 501, 6, 12], +[183332, 501, 12, 12], +[183482, 502, 5, 11], +[183616, 502, 9, 22], +[183793, 503, 3, 18], +[183873, 503, 6, 6], +[184014, 503, 10, 25], +[184108, 504, 1, 27], +[184250, 504, 6, 17], +[184288, 504, 7, 25], +[184411, 504, 11, 25], +[184611, 505, 6, 13], +[184737, 505, 10, 17], +[184928, 506, 4, 26], +[185097, 506, 10, 12], +[185267, 507, 3, 31], +[185323, 507, 5, 26], +[185356, 507, 6, 28], +[185539, 507, 12, 28], +[185652, 508, 4, 19], +[185781, 508, 8, 26], +[185911, 509, 1, 3], +[186005, 509, 4, 7], +[186177, 509, 9, 26], +[186256, 509, 12, 14], +[186447, 510, 6, 23], +[186563, 510, 10, 17], +[186593, 510, 11, 16], +[186729, 511, 4, 1], +[186757, 511, 4, 29], +[186913, 511, 10, 2], +[187047, 512, 2, 13], +[187184, 512, 6, 29], +[187353, 512, 12, 15], +[187460, 513, 4, 1], +[187501, 513, 5, 12], +[187610, 513, 8, 29], +[187759, 514, 1, 25], +[187911, 514, 6, 26], +[187944, 514, 7, 29], +[187960, 514, 8, 14], +[188019, 514, 10, 12], +[188080, 514, 12, 12], +[188130, 515, 1, 31], +[188153, 515, 2, 23], +[188248, 515, 5, 29], +[188439, 515, 12, 6], +[188522, 516, 2, 27], +[188528, 516, 3, 4], +[188636, 516, 6, 20], +[188694, 516, 8, 17], +[188713, 516, 9, 5], +[188899, 517, 3, 10], +[188952, 517, 5, 2], +[188957, 517, 5, 7], +[188996, 517, 6, 15], +[189106, 517, 10, 3], +[189225, 518, 1, 30], +[189284, 518, 3, 30], +[189330, 518, 5, 15], +[189402, 518, 7, 26], +[189433, 518, 8, 26], +[189625, 519, 3, 6], +[189721, 519, 6, 10], +[189847, 519, 10, 14], +[190026, 520, 4, 10], +[190091, 520, 6, 14], +[190213, 520, 10, 14], +[190318, 521, 1, 27], +[190362, 521, 3, 12], +[190545, 521, 9, 11], +[190581, 521, 10, 17], +[190690, 522, 2, 3], +[190842, 522, 7, 5], +[190889, 522, 8, 21], +[191086, 523, 3, 6], +[191206, 523, 7, 4], +[191207, 523, 7, 5], +[191283, 523, 9, 19], +[191329, 523, 11, 4], +[191404, 524, 1, 18], +[191479, 524, 4, 2], +[191624, 524, 8, 25], +[191800, 525, 2, 17], +[191842, 525, 3, 31], +[191985, 525, 8, 21], +[192184, 526, 3, 8], +[192197, 526, 3, 21], +[192371, 526, 9, 11], +[192567, 527, 3, 26], +[192707, 527, 8, 13], +[192773, 527, 10, 18], +[192935, 528, 3, 28], +[193080, 528, 8, 20], +[193093, 528, 9, 2], +[193216, 529, 1, 3], +[193385, 529, 6, 21], +[193573, 529, 12, 26], +[193722, 530, 5, 24], +[193751, 530, 6, 22], +[193880, 530, 10, 29], +[194063, 531, 4, 30], +[194110, 531, 6, 16], +[194174, 531, 8, 19], +[194280, 531, 12, 3], +[194461, 532, 6, 1], +[194574, 532, 9, 22], +[194670, 532, 12, 27], +[194737, 533, 3, 4], +[194853, 533, 6, 28], +[194875, 533, 7, 20], +[194911, 533, 8, 25], +[194978, 533, 10, 31], +[195036, 533, 12, 28], +[195098, 534, 2, 28], +[195112, 534, 3, 14], +[195242, 534, 7, 22], +[195296, 534, 9, 14], +[195365, 534, 11, 22], +[195434, 535, 1, 30], +[195521, 535, 4, 27], +[195544, 535, 5, 20], +[195601, 535, 7, 16], +[195699, 535, 10, 22], +[195721, 535, 11, 13], +[195750, 535, 12, 12], +[195785, 536, 1, 16], +[195853, 536, 3, 24], +[195994, 536, 8, 12], +[196176, 537, 2, 10], +[196294, 537, 6, 8], +[196435, 537, 10, 27], +[196620, 538, 4, 30], +[196759, 538, 9, 16], +[196774, 538, 10, 1], +[196969, 539, 4, 14], +[197036, 539, 6, 20], +[197165, 539, 10, 27], +[197263, 540, 2, 2], +[197421, 540, 7, 9], +[197527, 540, 10, 23], +[197623, 541, 1, 27], +[197750, 541, 6, 3], +[197767, 541, 6, 20], +[197786, 541, 7, 9], +[197986, 542, 1, 25], +[198133, 542, 6, 21], +[198281, 542, 11, 16], +[198449, 543, 5, 3], +[198543, 543, 8, 5], +[198599, 543, 9, 30], +[198643, 543, 11, 13], +[198791, 544, 4, 9], +[198906, 544, 8, 2], +[198957, 544, 9, 22], +[198978, 544, 10, 13], +[198995, 544, 10, 30], +[199049, 544, 12, 23], +[199082, 545, 1, 25], +[199170, 545, 4, 23], +[199307, 545, 9, 7], +[199485, 546, 3, 4], +[199512, 546, 3, 31], +[199608, 546, 7, 5], +[199748, 546, 11, 22], +[199775, 546, 12, 19], +[199848, 547, 3, 2], +[199896, 547, 4, 19], +[199969, 547, 7, 1], +[200087, 547, 10, 27], +[200201, 548, 2, 18], +[200291, 548, 5, 18], +[200425, 548, 9, 29], +[200547, 549, 1, 29], +[200601, 549, 3, 24], +[200748, 549, 8, 18], +[200776, 549, 9, 15], +[200809, 549, 10, 18], +[200837, 549, 11, 15], +[201017, 550, 5, 14], +[201023, 550, 5, 20], +[201187, 550, 10, 31], +[201277, 551, 1, 29], +[201433, 551, 7, 4], +[201526, 551, 10, 5], +[201541, 551, 10, 20], +[201658, 552, 2, 14], +[201830, 552, 8, 4], +[201986, 553, 1, 7], +[202156, 553, 6, 26], +[202352, 554, 1, 8], +[202530, 554, 7, 5], +[202550, 554, 7, 25], +[202601, 554, 9, 14], +[202662, 554, 11, 14], +[202736, 555, 1, 27], +[202898, 555, 7, 8], +[202909, 555, 7, 19], +[202989, 555, 10, 7], +[203162, 556, 3, 28], +[203204, 556, 5, 9], +[203226, 556, 5, 31], +[203346, 556, 9, 28], +[203431, 556, 12, 22], +[203594, 557, 6, 3], +[203615, 557, 6, 24], +[203803, 557, 12, 29], +[203857, 558, 2, 21], +[204012, 558, 7, 26], +[204032, 558, 8, 15], +[204107, 558, 10, 29], +[204153, 558, 12, 14], +[204236, 559, 3, 7], +[204241, 559, 3, 12], +[204367, 559, 7, 16], +[204502, 559, 11, 28], +[204503, 559, 11, 29], +[204654, 560, 4, 28], +[204813, 560, 10, 4], +[204874, 560, 12, 4], +[204913, 561, 1, 12], +[204927, 561, 1, 26], +[205101, 561, 7, 19], +[205266, 561, 12, 31], +[205283, 562, 1, 17], +[205404, 562, 5, 18], +[205550, 562, 10, 11], +[205611, 562, 12, 11], +[205795, 563, 6, 13], +[205863, 563, 8, 20], +[205884, 563, 9, 10], +[205930, 563, 10, 26], +[205936, 563, 11, 1], +[206066, 564, 3, 10], +[206205, 564, 7, 27], +[206222, 564, 8, 13], +[206277, 564, 10, 7], +[206350, 564, 12, 19], +[206521, 565, 6, 8], +[206709, 565, 12, 13], +[206898, 566, 6, 20], +[207062, 566, 12, 1], +[207092, 566, 12, 31], +[207147, 567, 2, 24], +[207197, 567, 4, 15], +[207204, 567, 4, 22], +[207355, 567, 9, 20], +[207413, 567, 11, 17], +[207515, 568, 2, 27], +[207517, 568, 2, 29], +[207674, 568, 8, 4], +[207806, 568, 12, 14], +[207846, 569, 1, 23], +[207943, 569, 4, 30], +[207975, 569, 6, 1], +[208151, 569, 11, 24], +[208233, 570, 2, 14], +[208261, 570, 3, 14], +[208360, 570, 6, 21], +[208482, 570, 10, 21], +[208496, 570, 11, 4], +[208624, 571, 3, 12], +[208771, 571, 8, 6], +[208901, 571, 12, 14], +[208926, 572, 1, 8], +[208985, 572, 3, 7], +[209172, 572, 9, 10], +[209211, 572, 10, 19], +[209396, 573, 4, 22], +[209580, 573, 10, 23], +[209680, 574, 1, 31], +[209751, 574, 4, 12], +[209884, 574, 8, 23], +[210029, 575, 1, 15], +[210150, 575, 5, 16], +[210173, 575, 6, 8], +[210182, 575, 6, 17], +[210291, 575, 10, 4], +[210337, 575, 11, 19], +[210469, 576, 3, 30], +[210637, 576, 9, 14], +[210696, 576, 11, 12], +[210878, 577, 5, 13], +[210881, 577, 5, 16], +[210950, 577, 7, 24], +[210975, 577, 8, 18], +[211030, 577, 10, 12], +[211061, 577, 11, 12], +[211256, 578, 5, 26], +[211318, 578, 7, 27], +[211369, 578, 9, 16], +[211542, 579, 3, 8], +[211590, 579, 4, 25], +[211732, 579, 9, 14], +[211758, 579, 10, 10], +[211843, 580, 1, 3], +[211992, 580, 5, 31], +[212100, 580, 9, 16], +[212155, 580, 11, 10], +[212203, 580, 12, 28], +[212397, 581, 7, 10], +[212438, 581, 8, 20], +[212562, 581, 12, 22], +[212611, 582, 2, 9], +[212715, 582, 5, 24], +[212765, 582, 7, 13], +[212828, 582, 9, 14], +[212880, 582, 11, 5], +[212894, 582, 11, 19], +[213041, 583, 4, 15], +[213047, 583, 4, 21], +[213082, 583, 5, 26], +[213126, 583, 7, 9], +[213164, 583, 8, 16], +[213174, 583, 8, 26], +[213372, 584, 3, 11], +[213537, 584, 8, 23], +[213737, 585, 3, 11], +[213848, 585, 6, 30], +[214033, 586, 1, 1], +[214115, 586, 3, 24], +[214118, 586, 3, 27], +[214158, 586, 5, 6], +[214202, 586, 6, 19], +[214285, 586, 9, 10], +[214324, 586, 10, 19], +[214360, 586, 11, 24], +[214474, 587, 3, 18], +[214552, 587, 6, 4], +[214750, 587, 12, 19], +[214877, 588, 4, 24], +[215036, 588, 9, 30], +[215082, 588, 11, 15], +[215229, 589, 4, 11], +[215241, 589, 4, 23], +[215433, 589, 11, 1], +[215454, 589, 11, 22], +[215499, 590, 1, 6], +[215625, 590, 5, 12], +[215744, 590, 9, 8], +[215815, 590, 11, 18], +[215979, 591, 5, 1], +[216083, 591, 8, 13], +[216252, 592, 1, 29], +[216316, 592, 4, 2], +[216358, 592, 5, 14], +[216491, 592, 9, 24], +[216568, 592, 12, 10], +[216702, 593, 4, 23], +[216847, 593, 9, 15], +[216858, 593, 9, 26], +[216884, 593, 10, 22], +[217064, 594, 4, 20], +[217104, 594, 5, 30], +[217220, 594, 9, 23], +[217299, 594, 12, 11], +[217491, 595, 6, 21], +[217498, 595, 6, 28], +[217502, 595, 7, 2], +[217657, 595, 12, 4], +[217825, 596, 5, 20], +[218012, 596, 11, 23], +[218157, 597, 4, 17], +[218199, 597, 5, 29], +[218366, 597, 11, 12], +[218405, 597, 12, 21], +[218439, 598, 1, 24], +[218474, 598, 2, 28], +[218514, 598, 4, 9], +[218538, 598, 5, 3], +[218603, 598, 7, 7], +[218625, 598, 7, 29], +[218711, 598, 10, 23], +[218803, 599, 1, 23], +[218871, 599, 4, 1], +[219071, 599, 10, 18], +[219207, 600, 3, 3], +[219243, 600, 4, 8], +[219356, 600, 7, 30], +[219379, 600, 8, 22], +[219476, 600, 11, 27], +[219493, 600, 12, 14], +[219675, 601, 6, 14], +[219844, 601, 11, 30], +[220040, 602, 6, 14], +[220136, 602, 9, 18], +[220158, 602, 10, 10], +[220296, 603, 2, 25], +[220450, 603, 7, 29], +[220506, 603, 9, 23], +[220530, 603, 10, 17], +[220633, 604, 1, 28], +[220638, 604, 2, 2], +[220715, 604, 4, 19], +[220808, 604, 7, 21], +[220820, 604, 8, 2], +[220860, 604, 9, 11], +[220891, 604, 10, 12], +[221030, 605, 2, 28], +[221145, 605, 6, 23], +[221339, 606, 1, 3], +[221366, 606, 1, 30], +[221478, 606, 5, 22], +[221612, 606, 10, 3], +[221726, 607, 1, 25], +[221876, 607, 6, 24], +[222020, 607, 11, 15], +[222091, 608, 1, 25], +[222167, 608, 4, 10], +[222224, 608, 6, 6], +[222380, 608, 11, 9], +[222484, 609, 2, 21], +[222644, 609, 7, 31], +[222802, 610, 1, 5], +[222883, 610, 3, 27], +[223045, 610, 9, 5], +[223120, 610, 11, 19], +[223171, 611, 1, 9], +[223228, 611, 3, 7], +[223324, 611, 6, 11], +[223362, 611, 7, 19], +[223427, 611, 9, 22], +[223444, 611, 10, 9], +[223619, 612, 4, 1], +[223637, 612, 4, 19], +[223672, 612, 5, 24], +[223720, 612, 7, 11], +[223876, 612, 12, 14], +[223943, 613, 2, 19], +[223975, 613, 3, 23], +[224077, 613, 7, 3], +[224248, 613, 12, 21], +[224427, 614, 6, 18], +[224615, 614, 12, 23], +[224797, 615, 6, 23], +[224841, 615, 8, 6], +[224890, 615, 9, 24], +[225053, 616, 3, 5], +[225242, 616, 9, 10], +[225273, 616, 10, 11], +[225299, 616, 11, 6], +[225409, 617, 2, 24], +[225557, 617, 7, 22], +[225590, 617, 8, 24], +[225625, 617, 9, 28], +[225666, 617, 11, 8], +[225825, 618, 4, 16], +[225859, 618, 5, 20], +[225973, 618, 9, 11], +[226097, 619, 1, 13], +[226216, 619, 5, 12], +[226380, 619, 10, 23], +[226473, 620, 1, 24], +[226506, 620, 2, 26], +[226562, 620, 4, 22], +[226577, 620, 5, 7], +[226663, 620, 8, 1], +[226859, 621, 2, 13], +[226959, 621, 5, 24], +[227154, 621, 12, 5], +[227183, 622, 1, 3], +[227251, 622, 3, 12], +[227273, 622, 4, 3], +[227364, 622, 7, 3], +[227488, 622, 11, 4], +[227578, 623, 2, 2], +[227594, 623, 2, 18], +[227691, 623, 5, 26], +[227705, 623, 6, 9], +[227813, 623, 9, 25], +[227957, 624, 2, 16], +[228052, 624, 5, 21], +[228125, 624, 8, 2], +[228226, 624, 11, 11], +[228231, 624, 11, 16], +[228384, 625, 4, 18], +[228532, 625, 9, 13], +[228715, 626, 3, 15], +[228898, 626, 9, 14], +[229047, 627, 2, 10], +[229153, 627, 5, 27], +[229284, 627, 10, 5], +[229432, 628, 3, 1], +[229559, 628, 7, 6], +[229742, 629, 1, 5], +[229930, 629, 7, 12], +[230041, 629, 10, 31], +[230074, 629, 12, 3], +[230163, 630, 3, 2], +[230299, 630, 7, 16], +[230394, 630, 10, 19], +[230590, 631, 5, 3], +[230693, 631, 8, 14], +[230736, 631, 9, 26], +[230908, 632, 3, 16], +[231021, 632, 7, 7], +[231141, 632, 11, 4], +[231178, 632, 12, 11], +[231312, 633, 4, 24], +[231330, 633, 5, 12], +[231349, 633, 5, 31], +[231536, 633, 12, 4], +[231672, 634, 4, 19], +[231813, 634, 9, 7], +[231980, 635, 2, 21], +[232112, 635, 7, 3], +[232119, 635, 7, 10], +[232130, 635, 7, 21], +[232175, 635, 9, 4], +[232320, 636, 1, 27], +[232334, 636, 2, 10], +[232338, 636, 2, 14], +[232518, 636, 8, 12], +[232567, 636, 9, 30], +[232656, 636, 12, 28], +[232798, 637, 5, 19], +[232906, 637, 9, 4], +[233081, 638, 2, 26], +[233211, 638, 7, 6], +[233391, 639, 1, 2], +[233542, 639, 6, 2], +[233639, 639, 9, 7], +[233815, 640, 3, 1], +[233941, 640, 7, 5], +[234130, 641, 1, 10], +[234214, 641, 4, 4], +[234249, 641, 5, 9], +[234270, 641, 5, 30], +[234291, 641, 6, 20], +[234455, 641, 12, 1], +[234504, 642, 1, 19], +[234536, 642, 2, 20], +[234674, 642, 7, 8], +[234852, 643, 1, 2], +[234955, 643, 4, 15], +[235132, 643, 10, 9], +[235206, 643, 12, 22], +[235302, 644, 3, 27], +[235479, 644, 9, 20], +[235563, 644, 12, 13], +[235584, 645, 1, 3], +[235760, 645, 6, 28], +[235781, 645, 7, 19], +[235891, 645, 11, 6], +[235900, 645, 11, 15], +[236028, 646, 3, 23], +[236050, 646, 4, 14], +[236152, 646, 7, 25], +[236275, 646, 11, 25], +[236331, 647, 1, 20], +[236373, 647, 3, 3], +[236567, 647, 9, 13], +[236596, 647, 10, 12], +[236760, 648, 3, 24], +[236829, 648, 6, 1], +[236857, 648, 6, 29], +[237048, 649, 1, 6], +[237241, 649, 7, 18], +[237304, 649, 9, 19], +[237463, 650, 2, 25], +[237615, 650, 7, 27], +[237768, 650, 12, 27], +[237889, 651, 4, 27], +[237977, 651, 7, 24], +[238082, 651, 11, 6], +[238153, 652, 1, 16], +[238295, 652, 6, 6], +[238338, 652, 7, 19], +[238535, 653, 2, 1], +[238578, 653, 3, 16], +[238673, 653, 6, 19], +[238694, 653, 7, 10], +[238784, 653, 10, 8], +[238915, 654, 2, 16], +[239102, 654, 8, 22], +[239157, 654, 10, 16], +[239338, 655, 4, 15], +[239425, 655, 7, 11], +[239604, 656, 1, 6], +[239768, 656, 6, 18], +[239776, 656, 6, 26], +[239888, 656, 10, 16], +[239890, 656, 10, 18], +[240084, 657, 4, 30], +[240220, 657, 9, 13], +[240375, 658, 2, 15], +[240379, 658, 2, 19], +[240473, 658, 5, 24], +[240562, 658, 8, 21], +[240591, 658, 9, 19], +[240638, 658, 11, 5], +[240803, 659, 4, 19], +[240891, 659, 7, 16], +[241060, 660, 1, 1], +[241100, 660, 2, 10], +[241199, 660, 5, 19], +[241366, 660, 11, 2], +[241510, 661, 3, 26], +[241563, 661, 5, 18], +[241663, 661, 8, 26], +[241784, 661, 12, 25], +[241790, 661, 12, 31], +[241857, 662, 3, 8], +[241915, 662, 5, 5], +[242028, 662, 8, 26], +[242087, 662, 10, 24], +[242249, 663, 4, 4], +[242431, 663, 10, 3], +[242605, 664, 3, 25], +[242775, 664, 9, 11], +[242953, 665, 3, 8], +[243056, 665, 6, 19], +[243206, 665, 11, 16], +[243218, 665, 11, 28], +[243275, 666, 1, 24], +[243321, 666, 3, 11], +[243480, 666, 8, 17], +[243666, 667, 2, 19], +[243708, 667, 4, 2], +[243766, 667, 5, 30], +[243785, 667, 6, 18], +[243887, 667, 9, 28], +[243953, 667, 12, 3], +[243971, 667, 12, 21], +[243981, 667, 12, 31], +[244144, 668, 6, 11], +[244249, 668, 9, 24], +[244445, 669, 4, 8], +[244605, 669, 9, 15], +[244691, 669, 12, 10], +[244869, 670, 6, 6], +[244904, 670, 7, 11], +[245001, 670, 10, 16], +[245084, 671, 1, 7], +[245252, 671, 6, 24], +[245332, 671, 9, 12], +[245353, 671, 10, 3], +[245475, 672, 2, 2], +[245599, 672, 6, 5], +[245769, 672, 11, 22], +[245924, 673, 4, 26], +[246070, 673, 9, 19], +[246086, 673, 10, 5], +[246260, 674, 3, 28], +[246383, 674, 7, 29], +[246573, 675, 2, 4], +[246650, 675, 4, 22], +[246733, 675, 7, 14], +[246743, 675, 7, 24], +[246891, 675, 12, 19], +[246929, 676, 1, 26], +[247016, 676, 4, 22], +[247086, 676, 7, 1], +[247126, 676, 8, 10], +[247225, 676, 11, 17], +[247364, 677, 4, 5], +[247393, 677, 5, 4], +[247446, 677, 6, 26], +[247513, 677, 9, 1], +[247520, 677, 9, 8], +[247711, 678, 3, 18], +[247822, 678, 7, 7], +[247916, 678, 10, 9], +[248050, 679, 2, 20], +[248072, 679, 3, 14], +[248087, 679, 3, 29], +[248209, 679, 7, 29], +[248373, 680, 1, 9], +[248567, 680, 7, 21], +[248599, 680, 8, 22], +[248725, 680, 12, 26], +[248789, 681, 2, 28], +[248834, 681, 4, 14], +[248845, 681, 4, 25], +[248994, 681, 9, 21], +[249010, 681, 10, 7], +[249139, 682, 2, 13], +[249187, 682, 4, 2], +[249372, 682, 10, 4], +[249376, 682, 10, 8], +[249551, 683, 4, 1], +[249674, 683, 8, 2], +[249680, 683, 8, 8], +[249707, 683, 9, 4], +[249812, 683, 12, 18], +[249999, 684, 6, 22], +[250155, 684, 11, 25], +[250311, 685, 4, 30], +[250499, 685, 11, 4], +[250670, 686, 4, 24], +[250848, 686, 10, 19], +[250898, 686, 12, 8], +[250937, 687, 1, 16], +[250973, 687, 2, 21], +[251003, 687, 3, 23], +[251193, 687, 9, 29], +[251364, 688, 3, 18], +[251473, 688, 7, 5], +[251525, 688, 8, 26], +[251535, 688, 9, 5], +[251636, 688, 12, 15], +[251667, 689, 1, 15], +[251822, 689, 6, 19], +[251844, 689, 7, 11], +[251954, 689, 10, 29], +[252034, 690, 1, 17], +[252051, 690, 2, 3], +[252162, 690, 5, 25], +[252189, 690, 6, 21], +[252236, 690, 8, 7], +[252414, 691, 2, 1], +[252509, 691, 5, 7], +[252520, 691, 5, 18], +[252658, 691, 10, 3], +[252664, 691, 10, 9], +[252679, 691, 10, 24], +[252780, 692, 2, 2], +[252836, 692, 3, 29], +[252912, 692, 6, 13], +[253089, 692, 12, 7], +[253132, 693, 1, 19], +[253308, 693, 7, 14], +[253445, 693, 11, 28], +[253446, 693, 11, 29], +[253464, 693, 12, 17], +[253577, 694, 4, 9], +[253631, 694, 6, 2], +[253774, 694, 10, 23], +[253963, 695, 4, 30], +[254105, 695, 9, 19], +[254151, 695, 11, 4], +[254224, 696, 1, 16], +[254247, 696, 2, 8], +[254310, 696, 4, 11], +[254445, 696, 8, 24], +[254607, 697, 2, 2], +[254632, 697, 2, 27], +[254826, 697, 9, 9], +[254857, 697, 10, 10], +[255010, 698, 3, 12], +[255198, 698, 9, 16], +[255226, 698, 10, 14], +[255281, 698, 12, 8], +[255443, 699, 5, 19], +[255466, 699, 6, 11], +[255589, 699, 10, 12], +[255647, 699, 12, 9], +[255758, 700, 3, 30], +[255958, 700, 10, 16], +[255985, 700, 11, 12], +[256185, 701, 5, 31], +[256186, 701, 6, 1], +[256335, 701, 10, 28], +[256388, 701, 12, 20], +[256466, 702, 3, 8], +[256581, 702, 7, 1], +[256601, 702, 7, 21], +[256791, 703, 1, 27], +[256975, 703, 7, 30], +[256985, 703, 8, 9], +[257133, 704, 1, 4], +[257224, 704, 4, 4], +[257381, 704, 9, 8], +[257492, 704, 12, 28], +[257541, 705, 2, 15], +[257628, 705, 5, 13], +[257711, 705, 8, 4], +[257819, 705, 11, 20], +[257910, 706, 2, 19], +[258056, 706, 7, 15], +[258188, 706, 11, 24], +[258262, 707, 2, 6], +[258306, 707, 3, 22], +[258349, 707, 5, 4], +[258535, 707, 11, 6], +[258544, 707, 11, 15], +[258554, 707, 11, 25], +[258635, 708, 2, 14], +[258656, 708, 3, 6], +[258748, 708, 6, 6], +[258880, 708, 10, 16], +[258979, 709, 1, 23], +[259071, 709, 4, 25], +[259112, 709, 6, 5], +[259301, 709, 12, 11], +[259309, 709, 12, 19], +[259490, 710, 6, 18], +[259584, 710, 9, 20], +[259689, 711, 1, 3], +[259887, 711, 7, 20], +[259970, 711, 10, 11], +[260145, 712, 4, 3], +[260340, 712, 10, 15], +[260408, 712, 12, 22], +[260477, 713, 3, 1], +[260608, 713, 7, 10], +[260703, 713, 10, 13], +[260888, 714, 4, 16], +[260949, 714, 6, 16], +[260956, 714, 6, 23], +[261027, 714, 9, 2], +[261108, 714, 11, 22], +[261297, 715, 5, 30], +[261460, 715, 11, 9], +[261654, 716, 5, 21], +[261672, 716, 6, 8], +[261774, 716, 9, 18], +[261919, 717, 2, 10], +[262069, 717, 7, 10], +[262263, 718, 1, 20], +[262395, 718, 6, 1], +[262534, 718, 10, 18], +[262590, 718, 12, 13], +[262750, 719, 5, 22], +[262779, 719, 6, 20], +[262954, 719, 12, 12], +[263036, 720, 3, 3], +[263072, 720, 4, 8], +[263198, 720, 8, 12], +[263303, 720, 11, 25], +[263361, 721, 1, 22], +[263362, 721, 1, 23], +[263552, 721, 8, 1], +[263746, 722, 2, 11], +[263890, 722, 7, 5], +[264078, 723, 1, 9], +[264254, 723, 7, 4], +[264314, 723, 9, 2], +[264508, 724, 3, 14], +[264673, 724, 8, 26], +[264830, 725, 1, 30], +[264910, 725, 4, 20], +[264941, 725, 5, 21], +[265038, 725, 8, 26], +[265203, 726, 2, 7], +[265308, 726, 5, 23], +[265416, 726, 9, 8], +[265542, 727, 1, 12], +[265659, 727, 5, 9], +[265759, 727, 8, 17], +[265883, 727, 12, 19], +[266018, 728, 5, 2], +[266030, 728, 5, 14], +[266132, 728, 8, 24], +[266177, 728, 10, 8], +[266237, 728, 12, 7], +[266307, 729, 2, 15], +[266483, 729, 8, 10], +[266501, 729, 8, 28], +[266512, 729, 9, 8], +[266605, 729, 12, 10], +[266634, 730, 1, 8], +[266756, 730, 5, 10], +[266867, 730, 8, 29], +[267036, 731, 2, 14], +[267139, 731, 5, 28], +[267287, 731, 10, 23], +[267332, 731, 12, 7], +[267418, 732, 3, 2], +[267613, 732, 9, 13], +[267756, 733, 2, 3], +[267829, 733, 4, 17], +[267834, 733, 4, 22], +[267914, 733, 7, 11], +[268059, 733, 12, 3], +[268198, 734, 4, 21], +[268240, 734, 6, 2], +[268293, 734, 7, 25], +[268320, 734, 8, 21], +[268433, 734, 12, 12], +[268459, 735, 1, 7], +[268537, 735, 3, 26], +[268648, 735, 7, 15], +[268756, 735, 10, 31], +[268801, 735, 12, 15], +[268805, 735, 12, 19], +[268998, 736, 6, 29], +[269162, 736, 12, 10], +[269292, 737, 4, 19], +[269387, 737, 7, 23], +[269466, 737, 10, 10], +[269513, 737, 11, 26], +[269657, 738, 4, 19], +[269796, 738, 9, 5], +[269960, 739, 2, 16], +[270156, 739, 8, 31], +[270251, 739, 12, 4], +[270276, 739, 12, 29], +[270380, 740, 4, 11], +[270473, 740, 7, 13], +[270614, 740, 12, 1], +[270724, 741, 3, 21], +[270807, 741, 6, 12], +[270881, 741, 8, 25], +[271014, 742, 1, 5], +[271027, 742, 1, 18], +[271058, 742, 2, 18], +[271119, 742, 4, 20], +[271206, 742, 7, 16], +[271358, 742, 12, 15], +[271496, 743, 5, 2], +[271681, 743, 11, 3], +[271803, 744, 3, 4], +[271929, 744, 7, 8], +[272071, 744, 11, 27], +[272175, 745, 3, 11], +[272257, 745, 6, 1], +[272419, 745, 11, 10], +[272491, 746, 1, 21], +[272588, 746, 4, 28], +[272711, 746, 8, 29], +[272738, 746, 9, 25], +[272758, 746, 10, 15], +[272927, 747, 4, 2], +[273076, 747, 8, 29], +[273258, 748, 2, 27], +[273379, 748, 6, 27], +[273459, 748, 9, 15], +[273636, 749, 3, 11], +[273756, 749, 7, 9], +[273829, 749, 9, 20], +[274000, 750, 3, 10], +[274146, 750, 8, 3], +[274148, 750, 8, 5], +[274235, 750, 10, 31], +[274368, 751, 3, 13], +[274393, 751, 4, 7], +[274574, 751, 10, 5], +[274667, 752, 1, 6], +[274736, 752, 3, 15], +[274784, 752, 5, 2], +[274934, 752, 9, 29], +[274980, 752, 11, 14], +[275006, 752, 12, 10], +[275030, 753, 1, 3], +[275202, 753, 6, 24], +[275333, 753, 11, 2], +[275433, 754, 2, 10], +[275607, 754, 8, 3], +[275774, 755, 1, 17], +[275837, 755, 3, 21], +[275843, 755, 3, 27], +[276018, 755, 9, 18], +[276165, 756, 2, 12], +[276226, 756, 4, 13], +[276397, 756, 10, 1], +[276526, 757, 2, 7], +[276698, 757, 7, 29], +[276878, 758, 1, 25], +[276911, 758, 2, 27], +[277080, 758, 8, 15], +[277280, 759, 3, 3], +[277325, 759, 4, 17], +[277432, 759, 8, 2], +[277583, 759, 12, 31], +[277723, 760, 5, 19], +[277895, 760, 11, 7], +[277962, 761, 1, 13], +[277974, 761, 1, 25], +[278109, 761, 6, 9], +[278257, 761, 11, 4], +[278313, 761, 12, 30], +[278363, 762, 2, 18], +[278533, 762, 8, 7], +[278590, 762, 10, 3], +[278784, 763, 4, 15], +[278927, 763, 9, 5], +[279125, 764, 3, 21], +[279254, 764, 7, 28], +[279321, 764, 10, 3], +[279471, 765, 3, 2], +[279641, 765, 8, 19], +[279841, 766, 3, 7], +[279975, 766, 7, 19], +[279992, 766, 8, 5], +[280138, 766, 12, 29], +[280183, 767, 2, 12], +[280358, 767, 8, 6], +[280412, 767, 9, 29], +[280467, 767, 11, 23], +[280622, 768, 4, 26], +[280716, 768, 7, 29], +[280914, 769, 2, 12], +[281027, 769, 6, 5], +[281110, 769, 8, 27], +[281186, 769, 11, 11], +[281299, 770, 3, 4], +[281353, 770, 4, 27], +[281384, 770, 5, 28], +[281466, 770, 8, 18], +[281643, 771, 2, 11], +[281666, 771, 3, 6], +[281739, 771, 5, 18], +[281756, 771, 6, 4], +[281822, 771, 8, 9], +[281865, 771, 9, 21], +[281873, 771, 9, 29], +[281915, 771, 11, 10], +[281931, 771, 11, 26], +[281989, 772, 1, 23], +[282160, 772, 7, 12], +[282242, 772, 10, 2], +[282396, 773, 3, 5], +[282481, 773, 5, 29], +[282585, 773, 9, 10], +[282746, 774, 2, 18], +[282924, 774, 8, 15], +[283005, 774, 11, 4], +[283146, 775, 3, 25], +[283235, 775, 6, 22], +[283363, 775, 10, 28], +[283460, 776, 2, 2], +[283562, 776, 5, 14], +[283645, 776, 8, 5], +[283696, 776, 9, 25], +[283827, 777, 2, 3], +[283998, 777, 7, 24], +[284129, 777, 12, 2], +[284156, 777, 12, 29], +[284326, 778, 6, 17], +[284394, 778, 8, 24], +[284474, 778, 11, 12], +[284615, 779, 4, 2], +[284641, 779, 4, 28], +[284644, 779, 5, 1], +[284801, 779, 10, 5], +[284949, 780, 3, 1], +[285065, 780, 6, 25], +[285197, 780, 11, 4], +[285234, 780, 12, 11], +[285399, 781, 5, 25], +[285400, 781, 5, 26], +[285444, 781, 7, 9], +[285640, 782, 1, 21], +[285686, 782, 3, 8], +[285862, 782, 8, 31], +[286005, 783, 1, 21], +[286107, 783, 5, 3], +[286117, 783, 5, 13], +[286130, 783, 5, 26], +[286226, 783, 8, 30], +[286250, 783, 9, 23], +[286392, 784, 2, 12], +[286525, 784, 6, 24], +[286713, 784, 12, 29], +[286746, 785, 1, 31], +[286819, 785, 4, 14], +[286830, 785, 4, 25], +[286948, 785, 8, 21], +[287106, 786, 1, 26], +[287219, 786, 5, 19], +[287227, 786, 5, 27], +[287359, 786, 10, 6], +[287401, 786, 11, 17], +[287485, 787, 2, 9], +[287643, 787, 7, 17], +[287759, 787, 11, 10], +[287819, 788, 1, 9], +[287991, 788, 6, 29], +[288064, 788, 9, 10], +[288191, 789, 1, 15], +[288352, 789, 6, 25], +[288517, 789, 12, 7], +[288685, 790, 5, 24], +[288808, 790, 9, 24], +[288854, 790, 11, 9], +[288868, 790, 11, 23], +[288965, 791, 2, 28], +[289163, 791, 9, 14], +[289279, 792, 1, 8], +[289307, 792, 2, 5], +[289444, 792, 6, 21], +[289540, 792, 9, 25], +[289579, 792, 11, 3], +[289708, 793, 3, 12], +[289711, 793, 3, 15], +[289733, 793, 4, 6], +[289870, 793, 8, 21], +[289983, 793, 12, 12], +[290158, 794, 6, 5], +[290356, 794, 12, 20], +[290511, 795, 5, 24], +[290609, 795, 8, 30], +[290641, 795, 10, 1], +[290715, 795, 12, 14], +[290905, 796, 6, 21], +[291014, 796, 10, 8], +[291101, 797, 1, 3], +[291158, 797, 3, 1], +[291187, 797, 3, 30], +[291349, 797, 9, 8], +[291410, 797, 11, 8], +[291455, 797, 12, 23], +[291623, 798, 6, 9], +[291657, 798, 7, 13], +[291687, 798, 8, 12], +[291769, 798, 11, 2], +[291808, 798, 12, 11], +[291943, 799, 4, 25], +[291974, 799, 5, 26], +[292076, 799, 9, 5], +[292242, 800, 2, 18], +[292272, 800, 3, 19], +[292348, 800, 6, 3], +[292416, 800, 8, 10], +[292581, 801, 1, 22], +[292647, 801, 3, 29], +[292782, 801, 8, 11], +[292825, 801, 9, 23], +[292868, 801, 11, 5], +[292887, 801, 11, 24], +[292970, 802, 2, 15], +[293001, 802, 3, 18], +[293131, 802, 7, 26], +[293229, 802, 11, 1], +[293285, 802, 12, 27], +[293332, 803, 2, 12], +[293391, 803, 4, 12], +[293407, 803, 4, 28], +[293457, 803, 6, 17], +[293633, 803, 12, 10], +[293740, 804, 3, 26], +[293892, 804, 8, 25], +[293893, 804, 8, 26], +[293896, 804, 8, 29], +[293948, 804, 10, 20], +[294038, 805, 1, 18], +[294158, 805, 5, 18], +[294310, 805, 10, 17], +[294433, 806, 2, 17], +[294603, 806, 8, 6], +[294678, 806, 10, 20], +[294756, 807, 1, 6], +[294885, 807, 5, 15], +[294978, 807, 8, 16], +[295003, 807, 9, 10], +[295198, 808, 3, 23], +[295344, 808, 8, 16], +[295466, 808, 12, 16], +[295646, 809, 6, 14], +[295829, 809, 12, 14], +[295911, 810, 3, 6], +[295953, 810, 4, 17], +[296052, 810, 7, 25], +[296225, 811, 1, 14], +[296312, 811, 4, 11], +[296455, 811, 9, 1], +[296521, 811, 11, 6], +[296700, 812, 5, 3], +[296866, 812, 10, 16], +[296892, 812, 11, 11], +[296983, 813, 2, 10], +[297158, 813, 8, 4], +[297259, 813, 11, 13], +[297407, 814, 4, 10], +[297426, 814, 4, 29], +[297620, 814, 11, 9], +[297625, 814, 11, 14], +[297814, 815, 5, 22], +[297938, 815, 9, 23], +[298079, 816, 2, 11], +[298204, 816, 6, 15], +[298277, 816, 8, 27], +[298408, 817, 1, 5], +[298510, 817, 4, 17], +[298656, 817, 9, 10], +[298840, 818, 3, 13], +[298876, 818, 4, 18], +[298993, 818, 8, 13], +[299145, 819, 1, 12], +[299245, 819, 4, 22], +[299389, 819, 9, 13], +[299564, 820, 3, 6], +[299595, 820, 4, 6], +[299623, 820, 5, 4], +[299742, 820, 8, 31], +[299911, 821, 2, 16], +[300100, 821, 8, 24], +[300224, 821, 12, 26], +[300395, 822, 6, 15], +[300487, 822, 9, 15], +[300546, 822, 11, 13], +[300697, 823, 4, 13], +[300753, 823, 6, 8], +[300819, 823, 8, 13], +[301017, 824, 2, 27], +[301102, 824, 5, 22], +[301259, 824, 10, 26], +[301352, 825, 1, 27], +[301426, 825, 4, 11], +[301500, 825, 6, 24], +[301612, 825, 10, 14], +[301639, 825, 11, 10], +[301667, 825, 12, 8], +[301683, 825, 12, 24], +[301870, 826, 6, 29], +[301901, 826, 7, 30], +[301986, 826, 10, 23], +[302035, 826, 12, 11], +[302100, 827, 2, 14], +[302201, 827, 5, 26], +[302333, 827, 10, 5], +[302500, 828, 3, 20], +[302666, 828, 9, 2], +[302712, 828, 10, 18], +[302811, 829, 1, 25], +[302889, 829, 4, 13], +[302986, 829, 7, 19], +[303163, 830, 1, 12], +[303313, 830, 6, 11], +[303471, 830, 11, 16], +[303510, 830, 12, 25], +[303536, 831, 1, 20], +[303712, 831, 7, 15], +[303852, 831, 12, 2], +[303953, 832, 3, 12], +[304021, 832, 5, 19], +[304060, 832, 6, 27], +[304085, 832, 7, 22], +[304164, 832, 10, 9], +[304299, 833, 2, 21], +[304336, 833, 3, 30], +[304367, 833, 4, 30], +[304447, 833, 7, 19], +[304585, 833, 12, 4], +[304624, 834, 1, 12], +[304789, 834, 6, 26], +[304959, 834, 12, 13], +[305001, 835, 1, 24], +[305191, 835, 8, 2], +[305228, 835, 9, 8], +[305402, 836, 2, 29], +[305451, 836, 4, 18], +[305503, 836, 6, 9], +[305554, 836, 7, 30], +[305563, 836, 8, 8], +[305618, 836, 10, 2], +[305652, 836, 11, 5], +[305680, 836, 12, 3], +[305719, 837, 1, 11], +[305751, 837, 2, 12], +[305799, 837, 4, 1], +[305898, 837, 7, 9], +[306069, 837, 12, 27], +[306113, 838, 2, 9], +[306236, 838, 6, 12], +[306292, 838, 8, 7], +[306464, 839, 1, 26], +[306555, 839, 4, 27], +[306568, 839, 5, 10], +[306669, 839, 8, 19], +[306845, 840, 2, 11], +[307042, 840, 8, 26], +[307225, 841, 2, 25], +[307354, 841, 7, 4], +[307361, 841, 7, 11], +[307422, 841, 9, 10], +[307542, 842, 1, 8], +[307705, 842, 6, 20], +[307887, 842, 12, 19], +[307933, 843, 2, 3], +[308063, 843, 6, 13], +[308235, 843, 12, 2], +[308392, 844, 5, 7], +[308419, 844, 6, 3], +[308559, 844, 10, 21], +[308583, 844, 11, 14], +[308639, 845, 1, 9], +[308792, 845, 6, 11], +[308893, 845, 9, 20], +[309020, 846, 1, 25], +[309057, 846, 3, 3], +[309130, 846, 5, 15], +[309175, 846, 6, 29], +[309373, 847, 1, 13], +[309472, 847, 4, 22], +[309541, 847, 6, 30], +[309571, 847, 7, 30], +[309748, 848, 1, 23], +[309923, 848, 7, 16], +[310015, 848, 10, 16], +[310104, 849, 1, 13], +[310209, 849, 4, 28], +[310218, 849, 5, 7], +[310314, 849, 8, 11], +[310352, 849, 9, 18], +[310438, 849, 12, 13], +[310463, 850, 1, 7], +[310468, 850, 1, 12], +[310597, 850, 5, 21], +[310754, 850, 10, 25], +[310837, 851, 1, 16], +[310994, 851, 6, 22], +[311169, 851, 12, 14], +[311357, 852, 6, 19], +[311438, 852, 9, 8], +[311635, 853, 3, 24], +[311816, 853, 9, 21], +[311823, 853, 9, 28], +[311961, 854, 2, 13], +[312065, 854, 5, 28], +[312227, 854, 11, 6], +[312406, 855, 5, 4], +[312493, 855, 7, 30], +[312554, 855, 9, 29], +[312602, 855, 11, 16], +[312759, 856, 4, 21], +[312906, 856, 9, 15], +[312912, 856, 9, 21], +[312962, 856, 11, 10], +[313086, 857, 3, 14], +[313206, 857, 7, 12], +[313298, 857, 10, 12], +[313362, 857, 12, 15], +[313497, 858, 4, 29], +[313617, 858, 8, 27], +[313796, 859, 2, 22], +[313962, 859, 8, 7], +[314041, 859, 10, 25], +[314077, 859, 11, 30], +[314178, 860, 3, 10], +[314253, 860, 5, 24], +[314377, 860, 9, 25], +[314391, 860, 10, 9], +[314455, 860, 12, 12], +[314614, 861, 5, 20], +[314785, 861, 11, 7], +[314863, 862, 1, 24], +[314996, 862, 6, 6], +[315049, 862, 7, 29], +[315109, 862, 9, 27], +[315251, 863, 2, 16], +[315437, 863, 8, 21], +[315569, 863, 12, 31], +[315624, 864, 2, 24], +[315778, 864, 7, 27], +[315887, 864, 11, 13], +[316055, 865, 4, 30], +[316174, 865, 8, 27], +[316210, 865, 10, 2], +[316322, 866, 1, 22], +[316455, 866, 6, 4], +[316485, 866, 7, 4], +[316538, 866, 8, 26], +[316621, 866, 11, 17], +[316748, 867, 3, 24], +[316939, 867, 10, 1], +[317056, 868, 1, 26], +[317239, 868, 7, 27], +[317316, 868, 10, 12], +[317367, 868, 12, 2], +[317454, 869, 2, 27], +[317504, 869, 4, 18], +[317560, 869, 6, 13], +[317577, 869, 6, 30], +[317675, 869, 10, 6], +[317733, 869, 12, 3], +[317759, 869, 12, 29], +[317950, 870, 7, 8], +[317959, 870, 7, 17], +[318098, 870, 12, 3], +[318233, 871, 4, 17], +[318329, 871, 7, 22], +[318511, 872, 1, 20], +[318561, 872, 3, 10], +[318589, 872, 4, 7], +[318767, 872, 10, 2], +[318900, 873, 2, 12], +[318973, 873, 4, 26], +[319011, 873, 6, 3], +[319113, 873, 9, 13], +[319249, 874, 1, 27], +[319367, 874, 5, 25], +[319503, 874, 10, 8], +[319528, 874, 11, 2], +[319648, 875, 3, 2], +[319670, 875, 3, 24], +[319822, 875, 8, 23], +[320012, 876, 2, 29], +[320122, 876, 6, 18], +[320300, 876, 12, 13], +[320481, 877, 6, 12], +[320532, 877, 8, 2], +[320712, 878, 1, 29], +[320876, 878, 7, 12], +[320880, 878, 7, 16], +[320901, 878, 8, 6], +[321082, 879, 2, 3], +[321236, 879, 7, 7], +[321326, 879, 10, 5], +[321377, 879, 11, 25], +[321381, 879, 11, 29], +[321390, 879, 12, 8], +[321498, 880, 3, 25], +[321650, 880, 8, 24], +[321659, 880, 9, 2], +[321809, 881, 1, 30], +[321894, 881, 4, 25], +[322072, 881, 10, 20], +[322109, 881, 11, 26], +[322248, 882, 4, 14], +[322268, 882, 5, 4], +[322321, 882, 6, 26], +[322386, 882, 8, 30], +[322397, 882, 9, 10], +[322488, 882, 12, 10], +[322623, 883, 4, 24], +[322786, 883, 10, 4], +[322844, 883, 12, 1], +[323028, 884, 6, 2], +[323088, 884, 8, 1], +[323204, 884, 11, 25], +[323402, 885, 6, 11], +[323487, 885, 9, 4], +[323665, 886, 3, 1], +[323856, 886, 9, 8], +[323900, 886, 10, 22], +[323957, 886, 12, 18], +[324135, 887, 6, 14], +[324333, 887, 12, 29], +[324511, 888, 6, 24], +[324671, 888, 12, 1], +[324763, 889, 3, 3], +[324870, 889, 6, 18], +[324996, 889, 10, 22], +[325049, 889, 12, 14], +[325054, 889, 12, 19], +[325059, 889, 12, 24], +[325079, 890, 1, 13], +[325214, 890, 5, 28], +[325364, 890, 10, 25], +[325449, 891, 1, 18], +[325512, 891, 3, 22], +[325581, 891, 5, 30], +[325717, 891, 10, 13], +[325784, 891, 12, 19], +[325832, 892, 2, 5], +[325843, 892, 2, 16], +[326021, 892, 8, 12], +[326188, 893, 1, 26], +[326273, 893, 4, 21], +[326355, 893, 7, 12], +[326363, 893, 7, 20], +[326523, 893, 12, 27], +[326545, 894, 1, 18], +[326636, 894, 4, 19], +[326766, 894, 8, 27], +[326918, 895, 1, 26], +[326924, 895, 2, 1], +[327104, 895, 7, 31], +[327195, 895, 10, 30], +[327364, 896, 4, 16], +[327547, 896, 10, 16], +[327708, 897, 3, 26], +[327894, 897, 9, 28], +[328063, 898, 3, 16], +[328129, 898, 5, 21], +[328287, 898, 10, 26], +[328465, 899, 4, 22], +[328471, 899, 4, 28], +[328539, 899, 7, 5], +[328601, 899, 9, 5], +[328730, 900, 1, 12], +[328903, 900, 7, 4], +[329011, 900, 10, 20], +[329075, 900, 12, 23], +[329097, 901, 1, 14], +[329256, 901, 6, 22], +[329315, 901, 8, 20], +[329502, 902, 2, 23], +[329618, 902, 6, 19], +[329812, 902, 12, 30], +[329815, 903, 1, 2], +[329958, 903, 5, 25], +[330143, 903, 11, 26], +[330288, 904, 4, 19], +[330367, 904, 7, 7], +[330438, 904, 9, 16], +[330566, 905, 1, 22], +[330755, 905, 7, 30], +[330759, 905, 8, 3], +[330781, 905, 8, 25], +[330934, 906, 1, 25], +[331037, 906, 5, 8], +[331040, 906, 5, 11], +[331196, 906, 10, 14], +[331219, 906, 11, 6], +[331237, 906, 11, 24], +[331384, 907, 4, 20], +[331548, 907, 10, 1], +[331677, 908, 2, 7], +[331817, 908, 6, 26], +[331843, 908, 7, 22], +[331873, 908, 8, 21], +[331886, 908, 9, 3], +[331972, 908, 11, 28], +[332145, 909, 5, 20], +[332223, 909, 8, 6], +[332371, 910, 1, 1], +[332570, 910, 7, 19], +[332766, 911, 1, 31], +[332892, 911, 6, 6], +[333046, 911, 11, 7], +[333066, 911, 11, 27], +[333100, 911, 12, 31], +[333188, 912, 3, 28], +[333343, 912, 8, 30], +[333484, 913, 1, 18], +[333630, 913, 6, 13], +[333759, 913, 10, 20], +[333871, 914, 2, 9], +[333993, 914, 6, 11], +[334088, 914, 9, 14], +[334202, 915, 1, 6], +[334211, 915, 1, 15], +[334382, 915, 7, 5], +[334450, 915, 9, 11], +[334568, 916, 1, 7], +[334670, 916, 4, 18], +[334697, 916, 5, 15], +[334790, 916, 8, 16], +[334982, 917, 2, 24], +[335177, 917, 9, 7], +[335198, 917, 9, 28], +[335254, 917, 11, 23], +[335427, 918, 5, 15], +[335530, 918, 8, 26], +[335683, 919, 1, 26], +[335813, 919, 6, 5], +[335972, 919, 11, 11], +[336045, 920, 1, 23], +[336046, 920, 1, 24], +[336160, 920, 5, 17], +[336230, 920, 7, 26], +[336292, 920, 9, 26], +[336357, 920, 11, 30], +[336382, 920, 12, 25], +[336409, 921, 1, 21], +[336451, 921, 3, 4], +[336472, 921, 3, 25], +[336505, 921, 4, 27], +[336682, 921, 10, 21], +[336707, 921, 11, 15], +[336764, 922, 1, 11], +[336901, 922, 5, 28], +[336948, 922, 7, 14], +[336960, 922, 7, 26], +[336972, 922, 8, 7], +[337029, 922, 10, 3], +[337072, 922, 11, 15], +[337200, 923, 3, 23], +[337389, 923, 9, 28], +[337534, 924, 2, 20], +[337707, 924, 8, 11], +[337719, 924, 8, 23], +[337755, 924, 9, 28], +[337796, 924, 11, 8], +[337861, 925, 1, 12], +[338051, 925, 7, 21], +[338134, 925, 10, 12], +[338218, 926, 1, 4], +[338325, 926, 4, 21], +[338345, 926, 5, 11], +[338425, 926, 7, 30], +[338575, 926, 12, 27], +[338696, 927, 4, 27], +[338758, 927, 6, 28], +[338893, 927, 11, 10], +[338981, 928, 2, 6], +[339179, 928, 8, 22], +[339281, 928, 12, 2], +[339344, 929, 2, 3], +[339476, 929, 6, 15], +[339522, 929, 7, 31], +[339633, 929, 11, 19], +[339692, 930, 1, 17], +[339846, 930, 6, 20], +[339857, 930, 7, 1], +[340027, 930, 12, 18], +[340135, 931, 4, 5], +[340167, 931, 5, 7], +[340190, 931, 5, 30], +[340385, 931, 12, 11], +[340506, 932, 4, 10], +[340553, 932, 5, 27], +[340699, 932, 10, 20], +[340770, 932, 12, 30], +[340811, 933, 2, 9], +[340976, 933, 7, 24], +[341153, 934, 1, 17], +[341232, 934, 4, 6], +[341345, 934, 7, 28], +[341456, 934, 11, 16], +[341469, 934, 11, 29], +[341549, 935, 2, 17], +[341656, 935, 6, 4], +[341703, 935, 7, 21], +[341895, 936, 1, 29], +[342028, 936, 6, 10], +[342072, 936, 7, 24], +[342167, 936, 10, 27], +[342317, 937, 3, 26], +[342412, 937, 6, 29], +[342480, 937, 9, 5], +[342663, 938, 3, 7], +[342664, 938, 3, 8], +[342854, 938, 9, 14], +[343032, 939, 3, 11], +[343067, 939, 4, 15], +[343082, 939, 4, 30], +[343135, 939, 6, 22], +[343157, 939, 7, 14], +[343305, 939, 12, 9], +[343346, 940, 1, 19], +[343512, 940, 7, 3], +[343682, 940, 12, 20], +[343775, 941, 3, 23], +[343785, 941, 4, 2], +[343960, 941, 9, 24], +[344005, 941, 11, 8], +[344156, 942, 4, 8], +[344189, 942, 5, 11], +[344348, 942, 10, 17], +[344521, 943, 4, 8], +[344538, 943, 4, 25], +[344614, 943, 7, 10], +[344791, 944, 1, 3], +[344827, 944, 2, 8], +[344957, 944, 6, 17], +[345107, 944, 11, 14], +[345158, 945, 1, 4], +[345303, 945, 5, 29], +[345469, 945, 11, 11], +[345556, 946, 2, 6], +[345558, 946, 2, 8], +[345737, 946, 8, 6], +[345756, 946, 8, 25], +[345770, 946, 9, 8], +[345798, 946, 10, 6], +[345972, 947, 3, 29], +[346053, 947, 6, 18], +[346171, 947, 10, 14], +[346336, 948, 3, 27], +[346533, 948, 10, 10], +[346570, 948, 11, 16], +[346649, 949, 2, 3], +[346798, 949, 7, 2], +[346919, 949, 10, 31], +[347015, 950, 2, 4], +[347130, 950, 5, 30], +[347214, 950, 8, 22], +[347344, 950, 12, 30], +[347439, 951, 4, 4], +[347442, 951, 4, 7], +[347633, 951, 10, 15], +[347753, 952, 2, 12], +[347843, 952, 5, 12], +[347872, 952, 6, 10], +[347884, 952, 6, 22], +[348036, 952, 11, 21], +[348059, 952, 12, 14], +[348096, 953, 1, 20], +[348143, 953, 3, 8], +[348310, 953, 8, 22], +[348374, 953, 10, 25], +[348506, 954, 3, 6], +[348639, 954, 7, 17], +[348670, 954, 8, 17], +[348839, 955, 2, 2], +[348954, 955, 5, 28], +[348959, 955, 6, 2], +[349059, 955, 9, 10], +[349141, 955, 12, 1], +[349293, 956, 5, 1], +[349361, 956, 7, 8], +[349412, 956, 8, 28], +[349593, 957, 2, 25], +[349631, 957, 4, 4], +[349810, 957, 9, 30], +[349841, 957, 10, 31], +[349994, 958, 4, 2], +[350133, 958, 8, 19], +[350271, 959, 1, 4], +[350353, 959, 3, 27], +[350358, 959, 4, 1], +[350420, 959, 6, 2], +[350431, 959, 6, 13], +[350607, 959, 12, 6], +[350752, 960, 4, 29], +[350894, 960, 9, 18], +[350934, 960, 10, 28], +[350937, 960, 10, 31], +[351125, 961, 5, 7], +[351211, 961, 8, 1], +[351257, 961, 9, 16], +[351405, 962, 2, 11], +[351541, 962, 6, 27], +[351629, 962, 9, 23], +[351742, 963, 1, 14], +[351791, 963, 3, 4], +[351916, 963, 7, 7], +[351921, 963, 7, 12], +[352086, 963, 12, 24], +[352117, 964, 1, 24], +[352193, 964, 4, 9], +[352343, 964, 9, 6], +[352442, 964, 12, 14], +[352632, 965, 6, 22], +[352816, 965, 12, 23], +[352893, 966, 3, 10], +[352979, 966, 6, 4], +[353176, 966, 12, 18], +[353259, 967, 3, 11], +[353410, 967, 8, 9], +[353427, 967, 8, 26], +[353430, 967, 8, 29], +[353432, 967, 8, 31], +[353472, 967, 10, 10], +[353646, 968, 4, 1], +[353807, 968, 9, 9], +[353841, 968, 10, 13], +[353944, 969, 1, 24], +[354131, 969, 7, 30], +[354220, 969, 10, 27], +[354399, 970, 4, 24], +[354598, 970, 11, 9], +[354760, 971, 4, 20], +[354874, 971, 8, 12], +[354901, 971, 9, 8], +[355070, 972, 2, 24], +[355228, 972, 7, 31], +[355361, 972, 12, 11], +[355371, 972, 12, 21], +[355481, 973, 4, 10], +[355614, 973, 8, 21], +[355694, 973, 11, 9], +[355789, 974, 2, 12], +[355867, 974, 5, 1], +[355957, 974, 7, 30], +[356009, 974, 9, 20], +[356096, 974, 12, 16], +[356247, 975, 5, 16], +[356259, 975, 5, 28], +[356370, 975, 9, 16], +[356461, 975, 12, 16], +[356586, 976, 4, 19], +[356660, 976, 7, 2], +[356779, 976, 10, 29], +[356957, 977, 4, 25], +[357029, 977, 7, 6], +[357151, 977, 11, 5], +[357203, 977, 12, 27], +[357230, 978, 1, 23], +[357328, 978, 5, 1], +[357367, 978, 6, 9], +[357499, 978, 10, 19], +[357567, 978, 12, 26], +[357748, 979, 6, 25], +[357946, 980, 1, 9], +[358095, 980, 6, 6], +[358214, 980, 10, 3], +[358260, 980, 11, 18], +[358442, 981, 5, 19], +[358565, 981, 9, 19], +[358710, 982, 2, 11], +[358877, 982, 7, 28], +[358982, 982, 11, 10], +[359136, 983, 4, 13], +[359298, 983, 9, 22], +[359343, 983, 11, 6], +[359504, 984, 4, 15], +[359506, 984, 4, 17], +[359603, 984, 7, 23], +[359735, 984, 12, 2], +[359848, 985, 3, 25], +[359919, 985, 6, 4], +[360090, 985, 11, 22], +[360176, 986, 2, 16], +[360208, 986, 3, 20], +[360338, 986, 7, 28], +[360510, 987, 1, 16], +[360684, 987, 7, 9], +[360732, 987, 8, 26], +[360765, 987, 9, 28], +[360876, 988, 1, 17], +[361047, 988, 7, 6], +[361084, 988, 8, 12], +[361136, 988, 10, 3], +[361317, 989, 4, 2], +[361488, 989, 9, 20], +[361661, 990, 3, 12], +[361828, 990, 8, 26], +[362005, 991, 2, 19], +[362182, 991, 8, 15], +[362331, 992, 1, 11], +[362370, 992, 2, 19], +[362416, 992, 4, 5], +[362497, 992, 6, 25], +[362534, 992, 8, 1], +[362615, 992, 10, 21], +[362795, 993, 4, 19], +[362941, 993, 9, 12], +[363086, 994, 2, 4], +[363190, 994, 5, 19], +[363227, 994, 6, 25], +[363390, 994, 12, 5], +[363524, 995, 4, 18], +[363686, 995, 9, 27], +[363882, 996, 4, 10], +[364026, 996, 9, 1], +[364218, 997, 3, 12], +[364257, 997, 4, 20], +[364328, 997, 6, 30], +[364391, 997, 9, 1], +[364416, 997, 9, 26], +[364569, 998, 2, 26], +[364576, 998, 3, 5], +[364670, 998, 6, 7], +[364708, 998, 7, 15], +[364898, 999, 1, 21], +[365086, 999, 7, 28], +[365282, 1000, 2, 9], +[365425, 1000, 7, 2], +[365572, 1000, 11, 26], +[365751, 1001, 5, 24], +[365873, 1001, 9, 23], +[365876, 1001, 9, 26], +[366023, 1002, 2, 20], +[366047, 1002, 3, 16], +[366071, 1002, 4, 9], +[366215, 1002, 8, 31], +[366342, 1003, 1, 5], +[366463, 1003, 5, 6], +[366663, 1003, 11, 22], +[366740, 1004, 2, 7], +[366801, 1004, 4, 8], +[366823, 1004, 4, 30], +[366961, 1004, 9, 15], +[367055, 1004, 12, 18], +[367166, 1005, 4, 8], +[367304, 1005, 8, 24], +[367395, 1005, 11, 23], +[367402, 1005, 11, 30], +[367511, 1006, 3, 19], +[367559, 1006, 5, 6], +[367671, 1006, 8, 26], +[367708, 1006, 10, 2], +[367803, 1007, 1, 5], +[367910, 1007, 4, 22], +[367962, 1007, 6, 13], +[368095, 1007, 10, 24], +[368108, 1007, 11, 6], +[368139, 1007, 12, 7], +[368178, 1008, 1, 15], +[368324, 1008, 6, 9], +[368362, 1008, 7, 17], +[368495, 1008, 11, 27], +[368642, 1009, 4, 23], +[368715, 1009, 7, 5], +[368734, 1009, 7, 24], +[368757, 1009, 8, 16], +[368850, 1009, 11, 17], +[368986, 1010, 4, 2], +[369078, 1010, 7, 3], +[369263, 1011, 1, 4], +[369288, 1011, 1, 29], +[369435, 1011, 6, 25], +[369560, 1011, 10, 28], +[369748, 1012, 5, 3], +[369767, 1012, 5, 22], +[369915, 1012, 10, 17], +[370074, 1013, 3, 25], +[370209, 1013, 8, 7], +[370279, 1013, 10, 16], +[370284, 1013, 10, 21], +[370331, 1013, 12, 7], +[370452, 1014, 4, 7], +[370561, 1014, 7, 25], +[370751, 1015, 1, 31], +[370766, 1015, 2, 15], +[370788, 1015, 3, 9], +[370972, 1015, 9, 9], +[371037, 1015, 11, 13], +[371158, 1016, 3, 13], +[371162, 1016, 3, 17], +[371238, 1016, 6, 1], +[371253, 1016, 6, 16], +[371310, 1016, 8, 12], +[371368, 1016, 10, 9], +[371517, 1017, 3, 7], +[371535, 1017, 3, 25], +[371605, 1017, 6, 3], +[371640, 1017, 7, 8], +[371676, 1017, 8, 13], +[371686, 1017, 8, 23], +[371801, 1017, 12, 16], +[371903, 1018, 3, 28], +[372077, 1018, 9, 18], +[372236, 1019, 2, 24], +[372322, 1019, 5, 21], +[372333, 1019, 6, 1], +[372450, 1019, 9, 26], +[372480, 1019, 10, 26], +[372680, 1020, 5, 13], +[372757, 1020, 7, 29], +[372881, 1020, 11, 30], +[373058, 1021, 5, 26], +[373163, 1021, 9, 8], +[373256, 1021, 12, 10], +[373405, 1022, 5, 8], +[373457, 1022, 6, 29], +[373498, 1022, 8, 9], +[373519, 1022, 8, 30], +[373708, 1023, 3, 7], +[373724, 1023, 3, 23], +[373895, 1023, 9, 10], +[373941, 1023, 10, 26], +[374102, 1024, 4, 4], +[374301, 1024, 10, 20], +[374342, 1024, 11, 30], +[374479, 1025, 4, 16], +[374661, 1025, 10, 15], +[374696, 1025, 11, 19], +[374711, 1025, 12, 4], +[374806, 1026, 3, 9], +[374931, 1026, 7, 12], +[375121, 1027, 1, 18], +[375213, 1027, 4, 20], +[375360, 1027, 9, 14], +[375373, 1027, 9, 27], +[375567, 1028, 4, 8], +[375642, 1028, 6, 22], +[375705, 1028, 8, 24], +[375898, 1029, 3, 5], +[376013, 1029, 6, 28], +[376144, 1029, 11, 6], +[376164, 1029, 11, 26], +[376239, 1030, 2, 9], +[376312, 1030, 4, 23], +[376430, 1030, 8, 19], +[376593, 1031, 1, 29], +[376769, 1031, 7, 24], +[376837, 1031, 9, 30], +[376902, 1031, 12, 4], +[376970, 1032, 2, 10], +[377122, 1032, 7, 11], +[377261, 1032, 11, 27], +[377392, 1033, 4, 7], +[377528, 1033, 8, 21], +[377690, 1034, 1, 30], +[377732, 1034, 3, 13], +[377793, 1034, 5, 13], +[377902, 1034, 8, 30], +[377998, 1034, 12, 4], +[378005, 1034, 12, 11], +[378125, 1035, 4, 10], +[378283, 1035, 9, 15], +[378339, 1035, 11, 10], +[378487, 1036, 4, 6], +[378547, 1036, 6, 5], +[378730, 1036, 12, 5], +[378892, 1037, 5, 16], +[379084, 1037, 11, 24], +[379182, 1038, 3, 2], +[379210, 1038, 3, 30], +[379340, 1038, 8, 7], +[379387, 1038, 9, 23], +[379572, 1039, 3, 27], +[379606, 1039, 4, 30], +[379789, 1039, 10, 30], +[379858, 1040, 1, 7], +[380020, 1040, 6, 17], +[380096, 1040, 9, 1], +[380199, 1040, 12, 13], +[380337, 1041, 4, 30], +[380374, 1041, 6, 6], +[380395, 1041, 6, 27], +[380572, 1041, 12, 21], +[380657, 1042, 3, 16], +[380826, 1042, 9, 1], +[380952, 1043, 1, 5], +[380961, 1043, 1, 14], +[381110, 1043, 6, 12], +[381170, 1043, 8, 11], +[381319, 1044, 1, 7], +[381344, 1044, 2, 1], +[381356, 1044, 2, 13], +[381502, 1044, 7, 8], +[381685, 1045, 1, 7], +[381690, 1045, 1, 12], +[381862, 1045, 7, 3], +[381951, 1045, 9, 30], +[382116, 1046, 3, 14], +[382170, 1046, 5, 7], +[382253, 1046, 7, 29], +[382393, 1046, 12, 16], +[382572, 1047, 6, 13], +[382602, 1047, 7, 13], +[382605, 1047, 7, 16], +[382776, 1048, 1, 3], +[382797, 1048, 1, 24], +[382976, 1048, 7, 21], +[383153, 1049, 1, 14], +[383213, 1049, 3, 15], +[383383, 1049, 9, 1], +[383571, 1050, 3, 8], +[383717, 1050, 8, 1], +[383741, 1050, 8, 25], +[383792, 1050, 10, 15], +[383892, 1051, 1, 23], +[383987, 1051, 4, 28], +[384088, 1051, 8, 7], +[384230, 1051, 12, 27], +[384404, 1052, 6, 18], +[384470, 1052, 8, 23], +[384491, 1052, 9, 13], +[384537, 1052, 10, 29], +[384688, 1053, 3, 29], +[384729, 1053, 5, 9], +[384800, 1053, 7, 19], +[384803, 1053, 7, 22], +[384959, 1053, 12, 25], +[385021, 1054, 2, 25], +[385187, 1054, 8, 10], +[385295, 1054, 11, 26], +[385451, 1055, 5, 1], +[385499, 1055, 6, 18], +[385667, 1055, 12, 3], +[385758, 1056, 3, 3], +[385917, 1056, 8, 9], +[386005, 1056, 11, 5], +[386129, 1057, 3, 9], +[386130, 1057, 3, 10], +[386205, 1057, 5, 24], +[386208, 1057, 5, 27], +[386361, 1057, 10, 27], +[386418, 1057, 12, 23], +[386464, 1058, 2, 7], +[386468, 1058, 2, 11], +[386636, 1058, 7, 29], +[386811, 1059, 1, 20], +[386909, 1059, 4, 28], +[386973, 1059, 7, 1], +[387133, 1059, 12, 8], +[387145, 1059, 12, 20], +[387171, 1060, 1, 15], +[387269, 1060, 4, 22], +[387467, 1060, 11, 6], +[387572, 1061, 2, 19], +[387702, 1061, 6, 29], +[387747, 1061, 8, 13], +[387800, 1061, 10, 5], +[387972, 1062, 3, 26], +[388073, 1062, 7, 5], +[388150, 1062, 9, 20], +[388155, 1062, 9, 25], +[388319, 1063, 3, 8], +[388472, 1063, 8, 8], +[388611, 1063, 12, 25], +[388631, 1064, 1, 14], +[388796, 1064, 6, 27], +[388962, 1064, 12, 10], +[389101, 1065, 4, 28], +[389292, 1065, 11, 5], +[389417, 1066, 3, 10], +[389571, 1066, 8, 11], +[389754, 1067, 2, 10], +[389922, 1067, 7, 28], +[390023, 1067, 11, 6], +[390197, 1068, 4, 28], +[390203, 1068, 5, 4], +[390348, 1068, 9, 26], +[390493, 1069, 2, 18], +[390647, 1069, 7, 22], +[390703, 1069, 9, 16], +[390706, 1069, 9, 19], +[390748, 1069, 10, 31], +[390853, 1070, 2, 13], +[390961, 1070, 6, 1], +[391050, 1070, 8, 29], +[391106, 1070, 10, 24], +[391239, 1071, 3, 6], +[391279, 1071, 4, 15], +[391302, 1071, 5, 8], +[391364, 1071, 7, 9], +[391562, 1072, 1, 23], +[391565, 1072, 1, 26], +[391745, 1072, 7, 24], +[391871, 1072, 11, 27], +[391983, 1073, 3, 19], +[392131, 1073, 8, 14], +[392323, 1074, 2, 22], +[392420, 1074, 5, 30], +[392430, 1074, 6, 9], +[392596, 1074, 11, 22], +[392637, 1075, 1, 2], +[392748, 1075, 4, 23], +[392856, 1075, 8, 9], +[392918, 1075, 10, 10], +[392947, 1075, 11, 8], +[393123, 1076, 5, 2], +[393312, 1076, 11, 7], +[393373, 1077, 1, 7], +[393442, 1077, 3, 17], +[393599, 1077, 8, 21], +[393619, 1077, 9, 10], +[393770, 1078, 2, 8], +[393794, 1078, 3, 4], +[393932, 1078, 7, 20], +[394107, 1079, 1, 11], +[394265, 1079, 6, 18], +[394345, 1079, 9, 6], +[394496, 1080, 2, 4], +[394589, 1080, 5, 7], +[394620, 1080, 6, 7], +[394773, 1080, 11, 7], +[394811, 1080, 12, 15], +[394923, 1081, 4, 6], +[395109, 1081, 10, 9], +[395192, 1081, 12, 31], +[395200, 1082, 1, 8], +[395315, 1082, 5, 3], +[395337, 1082, 5, 25], +[395490, 1082, 10, 25], +[395573, 1083, 1, 16], +[395657, 1083, 4, 10], +[395722, 1083, 6, 14], +[395760, 1083, 7, 22], +[395790, 1083, 8, 21], +[395869, 1083, 11, 8], +[395989, 1084, 3, 7], +[396070, 1084, 5, 27], +[396262, 1084, 12, 5], +[396340, 1085, 2, 21], +[396385, 1085, 4, 7], +[396450, 1085, 6, 11], +[396500, 1085, 7, 31], +[396557, 1085, 9, 26], +[396735, 1086, 3, 23], +[396747, 1086, 4, 4], +[396894, 1086, 8, 29], +[396943, 1086, 10, 17], +[396978, 1086, 11, 21], +[397103, 1087, 3, 26], +[397241, 1087, 8, 11], +[397370, 1087, 12, 18], +[397460, 1088, 3, 17], +[397650, 1088, 9, 23], +[397825, 1089, 3, 17], +[397970, 1089, 8, 9], +[398088, 1089, 12, 5], +[398214, 1090, 4, 10], +[398228, 1090, 4, 24], +[398262, 1090, 5, 28], +[398457, 1090, 12, 9], +[398579, 1091, 4, 10], +[398695, 1091, 8, 4], +[398779, 1091, 10, 27], +[398844, 1091, 12, 31], +[398955, 1092, 4, 20], +[398984, 1092, 5, 19], +[399054, 1092, 7, 28], +[399239, 1093, 1, 29], +[399250, 1093, 2, 9], +[399385, 1093, 6, 24], +[399409, 1093, 7, 18], +[399480, 1093, 9, 27], +[399574, 1093, 12, 30], +[399596, 1094, 1, 21], +[399717, 1094, 5, 22], +[399723, 1094, 5, 28], +[399783, 1094, 7, 27], +[399789, 1094, 8, 2], +[399861, 1094, 10, 13], +[399912, 1094, 12, 3], +[400030, 1095, 3, 31], +[400228, 1095, 10, 15], +[400406, 1096, 4, 10], +[400544, 1096, 8, 26], +[400653, 1096, 12, 13], +[400756, 1097, 3, 26], +[400911, 1097, 8, 28], +[400919, 1097, 9, 5], +[400924, 1097, 9, 10], +[400975, 1097, 10, 31], +[401166, 1098, 5, 10], +[401263, 1098, 8, 15], +[401463, 1099, 3, 3], +[401609, 1099, 7, 27], +[401802, 1100, 2, 5], +[401835, 1100, 3, 10], +[401880, 1100, 4, 24], +[402018, 1100, 9, 9], +[402177, 1101, 2, 15], +[402200, 1101, 3, 10], +[402399, 1101, 9, 25], +[402542, 1102, 2, 15], +[402668, 1102, 6, 21], +[402706, 1102, 7, 29], +[402807, 1102, 11, 7], +[402864, 1103, 1, 3], +[403043, 1103, 7, 1], +[403162, 1103, 10, 28], +[403203, 1103, 12, 8], +[403221, 1103, 12, 26], +[403416, 1104, 7, 8], +[403605, 1105, 1, 13], +[403766, 1105, 6, 23], +[403847, 1105, 9, 12], +[403913, 1105, 11, 17], +[404056, 1106, 4, 9], +[404064, 1106, 4, 17], +[404135, 1106, 6, 27], +[404239, 1106, 10, 9], +[404392, 1107, 3, 11], +[404447, 1107, 5, 5], +[404527, 1107, 7, 24], +[404600, 1107, 10, 5], +[404774, 1108, 3, 27], +[404813, 1108, 5, 5], +[404985, 1108, 10, 24], +[405086, 1109, 2, 2], +[405217, 1109, 6, 13], +[405351, 1109, 10, 25], +[405373, 1109, 11, 16], +[405456, 1110, 2, 7], +[405597, 1110, 6, 28], +[405754, 1110, 12, 2], +[405859, 1111, 3, 17], +[405928, 1111, 5, 25], +[406109, 1111, 11, 22], +[406274, 1112, 5, 5], +[406362, 1112, 8, 1], +[406474, 1112, 11, 21], +[406624, 1113, 4, 20], +[406697, 1113, 7, 2], +[406745, 1113, 8, 19], +[406824, 1113, 11, 6], +[406851, 1113, 12, 3], +[406863, 1113, 12, 15], +[406886, 1114, 1, 7], +[406922, 1114, 2, 12], +[407084, 1114, 7, 24], +[407124, 1114, 9, 2], +[407251, 1115, 1, 7], +[407307, 1115, 3, 4], +[407383, 1115, 5, 19], +[407401, 1115, 6, 6], +[407508, 1115, 9, 21], +[407694, 1116, 3, 25], +[407867, 1116, 9, 14], +[407941, 1116, 11, 27], +[407989, 1117, 1, 14], +[408064, 1117, 3, 30], +[408145, 1117, 6, 19], +[408189, 1117, 8, 2], +[408316, 1117, 12, 7], +[408501, 1118, 6, 10], +[408655, 1118, 11, 11], +[408713, 1119, 1, 8], +[408813, 1119, 4, 18], +[408938, 1119, 8, 21], +[409015, 1119, 11, 6], +[409076, 1120, 1, 6], +[409193, 1120, 5, 2], +[409369, 1120, 10, 25], +[409527, 1121, 4, 1], +[409584, 1121, 5, 28], +[409713, 1121, 10, 4], +[409890, 1122, 3, 30], +[410038, 1122, 8, 25], +[410056, 1122, 9, 12], +[410110, 1122, 11, 5], +[410263, 1123, 4, 7], +[410272, 1123, 4, 16], +[410370, 1123, 7, 23], +[410544, 1124, 1, 13], +[410718, 1124, 7, 5], +[410806, 1124, 10, 1], +[410833, 1124, 10, 28], +[410960, 1125, 3, 4], +[411031, 1125, 5, 14], +[411134, 1125, 8, 25], +[411209, 1125, 11, 8], +[411300, 1126, 2, 7], +[411373, 1126, 4, 21], +[411378, 1126, 4, 26], +[411397, 1126, 5, 15], +[411556, 1126, 10, 21], +[411739, 1127, 4, 22], +[411923, 1127, 10, 23], +[411968, 1127, 12, 7], +[412052, 1128, 2, 29], +[412066, 1128, 3, 14], +[412149, 1128, 6, 5], +[412308, 1128, 11, 11], +[412367, 1129, 1, 9], +[412515, 1129, 6, 6], +[412674, 1129, 11, 12], +[412861, 1130, 5, 18], +[412867, 1130, 5, 24], +[412956, 1130, 8, 21], +[413015, 1130, 10, 19], +[413190, 1131, 4, 12], +[413353, 1131, 9, 22], +[413466, 1132, 1, 13], +[413542, 1132, 3, 29], +[413670, 1132, 8, 4], +[413828, 1133, 1, 9], +[414001, 1133, 7, 1], +[414030, 1133, 7, 30], +[414109, 1133, 10, 17], +[414227, 1134, 2, 12], +[414301, 1134, 4, 27], +[414341, 1134, 6, 6], +[414540, 1134, 12, 22], +[414577, 1135, 1, 28], +[414626, 1135, 3, 18], +[414648, 1135, 4, 9], +[414829, 1135, 10, 7], +[414929, 1136, 1, 15], +[415050, 1136, 5, 15], +[415134, 1136, 8, 7], +[415333, 1137, 2, 22], +[415377, 1137, 4, 7], +[415474, 1137, 7, 13], +[415536, 1137, 9, 13], +[415646, 1138, 1, 1], +[415753, 1138, 4, 18], +[415904, 1138, 9, 16], +[415946, 1138, 10, 28], +[416016, 1139, 1, 6], +[416034, 1139, 1, 24], +[416160, 1139, 5, 30], +[416191, 1139, 6, 30], +[416308, 1139, 10, 25], +[416419, 1140, 2, 13], +[416453, 1140, 3, 18], +[416540, 1140, 6, 13], +[416732, 1140, 12, 22], +[416810, 1141, 3, 10], +[416995, 1141, 9, 11], +[417001, 1141, 9, 17], +[417102, 1141, 12, 27], +[417277, 1142, 6, 20], +[417312, 1142, 7, 25], +[417317, 1142, 7, 30], +[417412, 1142, 11, 2], +[417518, 1143, 2, 16], +[417523, 1143, 2, 21], +[417690, 1143, 8, 7], +[417841, 1144, 1, 5], +[417854, 1144, 1, 18], +[417984, 1144, 5, 27], +[418036, 1144, 7, 18], +[418054, 1144, 8, 5], +[418148, 1144, 11, 7], +[418331, 1145, 5, 9], +[418381, 1145, 6, 28], +[418563, 1145, 12, 27], +[418641, 1146, 3, 15], +[418806, 1146, 8, 27], +[418849, 1146, 10, 9], +[418864, 1146, 10, 24], +[419037, 1147, 4, 15], +[419132, 1147, 7, 19], +[419290, 1147, 12, 24], +[419374, 1148, 3, 17], +[419413, 1148, 4, 25], +[419457, 1148, 6, 8], +[419487, 1148, 7, 8], +[419645, 1148, 12, 13], +[419792, 1149, 5, 9], +[419873, 1149, 7, 29], +[419968, 1149, 11, 1], +[419991, 1149, 11, 24], +[420104, 1150, 3, 17], +[420173, 1150, 5, 25], +[420347, 1150, 11, 15], +[420394, 1151, 1, 1], +[420549, 1151, 6, 5], +[420600, 1151, 7, 26], +[420646, 1151, 9, 10], +[420726, 1151, 11, 29], +[420891, 1152, 5, 12], +[421025, 1152, 9, 23], +[421199, 1153, 3, 16], +[421201, 1153, 3, 18], +[421279, 1153, 6, 4], +[421316, 1153, 7, 11], +[421460, 1153, 12, 2], +[421508, 1154, 1, 19], +[421562, 1154, 3, 14], +[421607, 1154, 4, 28], +[421774, 1154, 10, 12], +[421776, 1154, 10, 14], +[421899, 1155, 2, 14], +[421955, 1155, 4, 11], +[422006, 1155, 6, 1], +[422200, 1155, 12, 12], +[422215, 1155, 12, 27], +[422345, 1156, 5, 5], +[422463, 1156, 8, 31], +[422628, 1157, 2, 12], +[422741, 1157, 6, 5], +[422921, 1157, 12, 2], +[423079, 1158, 5, 9], +[423256, 1158, 11, 2], +[423418, 1159, 4, 13], +[423565, 1159, 9, 7], +[423677, 1159, 12, 28], +[423691, 1160, 1, 11], +[423818, 1160, 5, 17], +[423850, 1160, 6, 18], +[424013, 1160, 11, 28], +[424044, 1160, 12, 29], +[424064, 1161, 1, 18], +[424194, 1161, 5, 28], +[424314, 1161, 9, 25], +[424427, 1162, 1, 16], +[424431, 1162, 1, 20], +[424631, 1162, 8, 8], +[424681, 1162, 9, 27], +[424757, 1162, 12, 12], +[424929, 1163, 6, 2], +[424980, 1163, 7, 23], +[425155, 1164, 1, 14], +[425337, 1164, 7, 14], +[425454, 1164, 11, 8], +[425464, 1164, 11, 18], +[425494, 1164, 12, 18], +[425690, 1165, 7, 2], +[425875, 1166, 1, 3], +[426058, 1166, 7, 5], +[426226, 1166, 12, 20], +[426386, 1167, 5, 29], +[426572, 1167, 12, 1], +[426603, 1168, 1, 1], +[426706, 1168, 4, 13], +[426849, 1168, 9, 3], +[426942, 1168, 12, 5], +[426992, 1169, 1, 24], +[427125, 1169, 6, 6], +[427152, 1169, 7, 3], +[427350, 1170, 1, 17], +[427402, 1170, 3, 10], +[427537, 1170, 7, 23], +[427678, 1170, 12, 11], +[427686, 1170, 12, 19], +[427842, 1171, 5, 24], +[427929, 1171, 8, 19], +[428115, 1172, 2, 21], +[428287, 1172, 8, 11], +[428378, 1172, 11, 10], +[428427, 1172, 12, 29], +[428542, 1173, 4, 23], +[428551, 1173, 5, 2], +[428687, 1173, 9, 15], +[428843, 1174, 2, 18], +[428922, 1174, 5, 8], +[428960, 1174, 6, 15], +[429052, 1174, 9, 15], +[429082, 1174, 10, 15], +[429247, 1175, 3, 29], +[429391, 1175, 8, 20], +[429554, 1176, 1, 30], +[429732, 1176, 7, 26], +[429930, 1177, 2, 9], +[429974, 1177, 3, 25], +[430004, 1177, 4, 24], +[430022, 1177, 5, 12], +[430044, 1177, 6, 3], +[430122, 1177, 8, 20], +[430277, 1178, 1, 22], +[430333, 1178, 3, 19], +[430392, 1178, 5, 17], +[430403, 1178, 5, 28], +[430521, 1178, 9, 23], +[430627, 1179, 1, 7], +[430703, 1179, 3, 24], +[430734, 1179, 4, 24], +[430771, 1179, 5, 31], +[430884, 1179, 9, 21], +[430947, 1179, 11, 23], +[431015, 1180, 1, 30], +[431075, 1180, 3, 30], +[431194, 1180, 7, 27], +[431369, 1181, 1, 18], +[431407, 1181, 2, 25], +[431503, 1181, 6, 1], +[431649, 1181, 10, 25], +[431778, 1182, 3, 3], +[431845, 1182, 5, 9], +[431983, 1182, 9, 24], +[432093, 1183, 1, 12], +[432234, 1183, 6, 2], +[432269, 1183, 7, 7], +[432461, 1184, 1, 15], +[432614, 1184, 6, 16], +[432741, 1184, 10, 21], +[432934, 1185, 5, 2], +[433049, 1185, 8, 25], +[433233, 1186, 2, 25], +[433267, 1186, 3, 31], +[433358, 1186, 6, 30], +[433557, 1187, 1, 15], +[433754, 1187, 7, 31], +[433914, 1188, 1, 7], +[434104, 1188, 7, 15], +[434134, 1188, 8, 14], +[434164, 1188, 9, 13], +[434267, 1188, 12, 25], +[434402, 1189, 5, 9], +[434601, 1189, 11, 24], +[434694, 1190, 2, 25], +[434700, 1190, 3, 3], +[434841, 1190, 7, 22], +[435020, 1191, 1, 17], +[435117, 1191, 4, 24], +[435166, 1191, 6, 12], +[435325, 1191, 11, 18], +[435400, 1192, 2, 1], +[435509, 1192, 5, 20], +[435679, 1192, 11, 6], +[435865, 1193, 5, 11], +[436063, 1193, 11, 25], +[436131, 1194, 2, 1], +[436300, 1194, 7, 20], +[436326, 1194, 8, 15], +[436487, 1195, 1, 23], +[436687, 1195, 8, 11], +[436736, 1195, 9, 29], +[436919, 1196, 3, 30], +[437009, 1196, 6, 28], +[437178, 1196, 12, 14], +[437353, 1197, 6, 7], +[437379, 1197, 7, 3], +[437489, 1197, 10, 21], +[437492, 1197, 10, 24], +[437512, 1197, 11, 13], +[437562, 1198, 1, 2], +[437758, 1198, 7, 17], +[437866, 1198, 11, 2], +[437985, 1199, 3, 1], +[438078, 1199, 6, 2], +[438157, 1199, 8, 20], +[438180, 1199, 9, 12], +[438351, 1200, 3, 1], +[438424, 1200, 5, 13], +[438618, 1200, 11, 23], +[438657, 1201, 1, 1], +[438812, 1201, 6, 5], +[438991, 1201, 12, 1], +[439057, 1202, 2, 5], +[439059, 1202, 2, 7], +[439098, 1202, 3, 18], +[439286, 1202, 9, 22], +[439370, 1202, 12, 15], +[439504, 1203, 4, 28], +[439569, 1203, 7, 2], +[439684, 1203, 10, 25], +[439801, 1204, 2, 19], +[439856, 1204, 4, 14], +[439950, 1204, 7, 17], +[440002, 1204, 9, 7], +[440034, 1204, 10, 9], +[440060, 1204, 11, 4], +[440078, 1204, 11, 22], +[440150, 1205, 2, 2], +[440153, 1205, 2, 5], +[440350, 1205, 8, 21], +[440488, 1206, 1, 6], +[440605, 1206, 5, 3], +[440658, 1206, 6, 25], +[440683, 1206, 7, 20], +[440776, 1206, 10, 21], +[440934, 1207, 3, 28], +[441118, 1207, 9, 28], +[441145, 1207, 10, 25], +[441297, 1208, 3, 25], +[441479, 1208, 9, 23], +[441527, 1208, 11, 10], +[441689, 1209, 4, 21], +[441738, 1209, 6, 9], +[441746, 1209, 6, 17], +[441900, 1209, 11, 18], +[442094, 1210, 5, 31], +[442259, 1210, 11, 12], +[442418, 1211, 4, 20], +[442555, 1211, 9, 4], +[442616, 1211, 11, 4], +[442787, 1212, 4, 23], +[442792, 1212, 4, 28], +[442903, 1212, 8, 17], +[442921, 1212, 9, 4], +[442988, 1212, 11, 10], +[443143, 1213, 4, 14], +[443158, 1213, 4, 29], +[443343, 1213, 10, 31], +[443521, 1214, 4, 27], +[443637, 1214, 8, 21], +[443827, 1215, 2, 27], +[443918, 1215, 5, 29], +[443938, 1215, 6, 18], +[443989, 1215, 8, 8], +[444161, 1216, 1, 27], +[444209, 1216, 3, 15], +[444213, 1216, 3, 19], +[444308, 1216, 6, 22], +[444348, 1216, 8, 1], +[444442, 1216, 11, 3], +[444498, 1216, 12, 29], +[444677, 1217, 6, 26], +[444852, 1217, 12, 18], +[445012, 1218, 5, 27], +[445020, 1218, 6, 4], +[445173, 1218, 11, 4], +[445242, 1219, 1, 12], +[445377, 1219, 5, 27], +[445476, 1219, 9, 3], +[445545, 1219, 11, 11], +[445669, 1220, 3, 14], +[445695, 1220, 4, 9], +[445708, 1220, 4, 22], +[445809, 1220, 8, 1], +[445814, 1220, 8, 6], +[445967, 1221, 1, 6], +[445973, 1221, 1, 12], +[446099, 1221, 5, 18], +[446254, 1221, 10, 20], +[446355, 1222, 1, 29], +[446464, 1222, 5, 18], +[446602, 1222, 10, 3], +[446727, 1223, 2, 5], +[446812, 1223, 5, 1], +[446898, 1223, 7, 26], +[447024, 1223, 11, 29], +[447084, 1224, 1, 28], +[447254, 1224, 7, 16], +[447365, 1224, 11, 4], +[447475, 1225, 2, 22], +[447668, 1225, 9, 3], +[447865, 1226, 3, 19], +[447904, 1226, 4, 27], +[448059, 1226, 9, 29], +[448210, 1227, 2, 27], +[448211, 1227, 2, 28], +[448280, 1227, 5, 8], +[448353, 1227, 7, 20], +[448529, 1228, 1, 12], +[448558, 1228, 2, 10], +[448695, 1228, 6, 26], +[448719, 1228, 7, 20], +[448772, 1228, 9, 11], +[448773, 1228, 9, 12], +[448923, 1229, 2, 9], +[449085, 1229, 7, 21], +[449247, 1229, 12, 30], +[449386, 1230, 5, 18], +[449483, 1230, 8, 23], +[449538, 1230, 10, 17], +[449645, 1231, 2, 1], +[449686, 1231, 3, 14], +[449794, 1231, 6, 30], +[449994, 1232, 1, 16], +[450139, 1232, 6, 9], +[450155, 1232, 6, 25], +[450286, 1232, 11, 3], +[450406, 1233, 3, 3], +[450419, 1233, 3, 16], +[450617, 1233, 9, 30], +[450744, 1234, 2, 4], +[450891, 1234, 7, 1], +[451088, 1235, 1, 14], +[451186, 1235, 4, 22], +[451356, 1235, 10, 9], +[451447, 1236, 1, 8], +[451566, 1236, 5, 6], +[451642, 1236, 7, 21], +[451744, 1236, 10, 31], +[451874, 1237, 3, 10], +[451892, 1237, 3, 28], +[451994, 1237, 7, 8], +[452148, 1237, 12, 9], +[452219, 1238, 2, 18], +[452254, 1238, 3, 25], +[452374, 1238, 7, 23], +[452393, 1238, 8, 11], +[452509, 1238, 12, 5], +[452569, 1239, 2, 3], +[452721, 1239, 7, 5], +[452902, 1240, 1, 2], +[452964, 1240, 3, 4], +[452971, 1240, 3, 11], +[453150, 1240, 9, 6], +[453326, 1241, 3, 1], +[453514, 1241, 9, 5], +[453634, 1242, 1, 3], +[453829, 1242, 7, 17], +[453900, 1242, 9, 26], +[454044, 1243, 2, 17], +[454126, 1243, 5, 10], +[454129, 1243, 5, 13], +[454222, 1243, 8, 14], +[454326, 1243, 11, 26], +[454524, 1244, 6, 11], +[454536, 1244, 6, 23], +[454680, 1244, 11, 14], +[454829, 1245, 4, 12], +[454875, 1245, 5, 28], +[454974, 1245, 9, 4], +[454985, 1245, 9, 15], +[455148, 1246, 2, 25], +[455192, 1246, 4, 10], +[455266, 1246, 6, 23], +[455289, 1246, 7, 16], +[455291, 1246, 7, 18], +[455398, 1246, 11, 2], +[455529, 1247, 3, 13], +[455614, 1247, 6, 6], +[455647, 1247, 7, 9], +[455685, 1247, 8, 16], +[455794, 1247, 12, 3], +[455870, 1248, 2, 17], +[455919, 1248, 4, 6], +[456077, 1248, 9, 11], +[456251, 1249, 3, 4], +[456308, 1249, 4, 30], +[456489, 1249, 10, 28], +[456519, 1249, 11, 27], +[456608, 1250, 2, 24], +[456647, 1250, 4, 4], +[456810, 1250, 9, 14], +[456865, 1250, 11, 8], +[456995, 1251, 3, 18], +[457006, 1251, 3, 29], +[457202, 1251, 10, 11], +[457265, 1251, 12, 13], +[457308, 1252, 1, 25], +[457477, 1252, 7, 12], +[457504, 1252, 8, 8], +[457593, 1252, 11, 5], +[457688, 1253, 2, 8], +[457707, 1253, 2, 27], +[457738, 1253, 3, 30], +[457793, 1253, 5, 24], +[457960, 1253, 11, 7], +[457962, 1253, 11, 9], +[458069, 1254, 2, 24], +[458115, 1254, 4, 11], +[458158, 1254, 5, 24], +[458201, 1254, 7, 6], +[458205, 1254, 7, 10], +[458325, 1254, 11, 7], +[458503, 1255, 5, 4], +[458543, 1255, 6, 13], +[458703, 1255, 11, 20], +[458841, 1256, 4, 6], +[459027, 1256, 10, 9], +[459111, 1257, 1, 1], +[459271, 1257, 6, 10], +[459456, 1257, 12, 12], +[459473, 1257, 12, 29], +[459534, 1258, 2, 28], +[459698, 1258, 8, 11], +[459782, 1258, 11, 3], +[459831, 1258, 12, 22], +[459921, 1259, 3, 22], +[460083, 1259, 8, 31], +[460102, 1259, 9, 19], +[460256, 1260, 2, 20], +[460295, 1260, 3, 30], +[460446, 1260, 8, 28], +[460461, 1260, 9, 12], +[460469, 1260, 9, 20], +[460474, 1260, 9, 25], +[460628, 1261, 2, 26], +[460677, 1261, 4, 16], +[460687, 1261, 4, 26], +[460690, 1261, 4, 29], +[460722, 1261, 5, 31], +[460915, 1261, 12, 10], +[461062, 1262, 5, 6], +[461090, 1262, 6, 3], +[461171, 1262, 8, 23], +[461196, 1262, 9, 17], +[461255, 1262, 11, 15], +[461402, 1263, 4, 11], +[461563, 1263, 9, 19], +[461711, 1264, 2, 14], +[461846, 1264, 6, 28], +[461945, 1264, 10, 5], +[462137, 1265, 4, 15], +[462192, 1265, 6, 9], +[462266, 1265, 8, 22], +[462320, 1265, 10, 15], +[462482, 1266, 3, 26], +[462500, 1266, 4, 13], +[462695, 1266, 10, 25], +[462853, 1267, 4, 1], +[462981, 1267, 8, 7], +[463181, 1268, 2, 23], +[463340, 1268, 7, 31], +[463434, 1268, 11, 2], +[463561, 1269, 3, 9], +[463734, 1269, 8, 29], +[463925, 1270, 3, 8], +[463951, 1270, 4, 3], +[464104, 1270, 9, 3], +[464201, 1270, 12, 9], +[464303, 1271, 3, 21], +[464476, 1271, 9, 10], +[464643, 1272, 2, 24], +[464834, 1272, 9, 2], +[464990, 1273, 2, 5], +[465130, 1273, 6, 25], +[465315, 1273, 12, 27], +[465350, 1274, 1, 31], +[465437, 1274, 4, 28], +[465482, 1274, 6, 12], +[465557, 1274, 8, 26], +[465735, 1275, 2, 20], +[465935, 1275, 9, 8], +[466005, 1275, 11, 17], +[466185, 1276, 5, 15], +[466289, 1276, 8, 27], +[466345, 1276, 10, 22], +[466470, 1277, 2, 24], +[466561, 1277, 5, 26], +[466680, 1277, 9, 22], +[466850, 1278, 3, 11], +[466958, 1278, 6, 27], +[467106, 1278, 11, 22], +[467113, 1278, 11, 29], +[467187, 1279, 2, 11], +[467346, 1279, 7, 20], +[467508, 1279, 12, 29], +[467540, 1280, 1, 30], +[467689, 1280, 6, 27], +[467803, 1280, 10, 19], +[467932, 1281, 2, 25], +[467953, 1281, 3, 18], +[468028, 1281, 6, 1], +[468224, 1281, 12, 14], +[468393, 1282, 6, 1], +[468488, 1282, 9, 4], +[468504, 1282, 9, 20], +[468551, 1282, 11, 6], +[468631, 1283, 1, 25], +[468702, 1283, 4, 6], +[468877, 1283, 9, 28], +[468982, 1284, 1, 11], +[469141, 1284, 6, 18], +[469150, 1284, 6, 27], +[469216, 1284, 9, 1], +[469284, 1284, 11, 8], +[469471, 1285, 5, 14], +[469603, 1285, 9, 23], +[469630, 1285, 10, 20], +[469671, 1285, 11, 30], +[469863, 1286, 6, 10], +[469903, 1286, 7, 20], +[470035, 1286, 11, 29], +[470222, 1287, 6, 4], +[470330, 1287, 9, 20], +[470488, 1288, 2, 25], +[470657, 1288, 8, 12], +[470775, 1288, 12, 8], +[470919, 1289, 5, 1], +[470950, 1289, 6, 1], +[470978, 1289, 6, 29], +[471032, 1289, 8, 22], +[471186, 1290, 1, 23], +[471260, 1290, 4, 7], +[471459, 1290, 10, 23], +[471630, 1291, 4, 12], +[471778, 1291, 9, 7], +[471977, 1292, 3, 24], +[472167, 1292, 9, 30], +[472269, 1293, 1, 10], +[472378, 1293, 4, 29], +[472482, 1293, 8, 11], +[472620, 1293, 12, 27], +[472640, 1294, 1, 16], +[472822, 1294, 7, 17], +[472840, 1294, 8, 4], +[472994, 1295, 1, 5], +[473081, 1295, 4, 2], +[473159, 1295, 6, 19], +[473214, 1295, 8, 13], +[473309, 1295, 11, 16], +[473486, 1296, 5, 11], +[473657, 1296, 10, 29], +[473682, 1296, 11, 23], +[473825, 1297, 4, 15], +[473961, 1297, 8, 29], +[474108, 1298, 1, 23], +[474193, 1298, 4, 18], +[474243, 1298, 6, 7], +[474279, 1298, 7, 13], +[474299, 1298, 8, 2], +[474459, 1299, 1, 9], +[474525, 1299, 3, 16], +[474685, 1299, 8, 23], +[474694, 1299, 9, 1], +[474742, 1299, 10, 19], +[474854, 1300, 2, 8], +[474944, 1300, 5, 9], +[475070, 1300, 9, 12], +[475254, 1301, 3, 15], +[475441, 1301, 9, 18], +[475573, 1302, 1, 28], +[475749, 1302, 7, 23], +[475935, 1303, 1, 25], +[476007, 1303, 4, 7], +[476162, 1303, 9, 9], +[476174, 1303, 9, 21], +[476357, 1304, 3, 22], +[476539, 1304, 9, 20], +[476609, 1304, 11, 29], +[476703, 1305, 3, 3], +[476815, 1305, 6, 23], +[476990, 1305, 12, 15], +[477007, 1306, 1, 1], +[477144, 1306, 5, 18], +[477190, 1306, 7, 3], +[477375, 1307, 1, 4], +[477475, 1307, 4, 14], +[477498, 1307, 5, 7], +[477544, 1307, 6, 22], +[477656, 1307, 10, 12], +[477838, 1308, 4, 11], +[478032, 1308, 10, 22], +[478148, 1309, 2, 15], +[478316, 1309, 8, 2], +[478416, 1309, 11, 10], +[478474, 1310, 1, 7], +[478667, 1310, 7, 19], +[478681, 1310, 8, 2], +[478714, 1310, 9, 4], +[478723, 1310, 9, 13], +[478826, 1310, 12, 25], +[478956, 1311, 5, 4], +[479065, 1311, 8, 21], +[479226, 1312, 1, 29], +[479412, 1312, 8, 2], +[479552, 1312, 12, 20], +[479679, 1313, 4, 26], +[479786, 1313, 8, 11], +[479834, 1313, 9, 28], +[480005, 1314, 3, 18], +[480055, 1314, 5, 7], +[480143, 1314, 8, 3], +[480278, 1314, 12, 16], +[480391, 1315, 4, 8], +[480399, 1315, 4, 16], +[480558, 1315, 9, 22], +[480595, 1315, 10, 29], +[480780, 1316, 5, 1], +[480865, 1316, 7, 25], +[480965, 1316, 11, 2], +[481132, 1317, 4, 18], +[481209, 1317, 7, 4], +[481406, 1318, 1, 17], +[481522, 1318, 5, 13], +[481633, 1318, 9, 1], +[481821, 1319, 3, 8], +[481830, 1319, 3, 17], +[481922, 1319, 6, 17], +[482104, 1319, 12, 16], +[482238, 1320, 4, 28], +[482405, 1320, 10, 12], +[482522, 1321, 2, 6], +[482575, 1321, 3, 31], +[482693, 1321, 7, 27], +[482734, 1321, 9, 6], +[482870, 1322, 1, 20], +[482956, 1322, 4, 16], +[483063, 1322, 8, 1], +[483174, 1322, 11, 20], +[483351, 1323, 5, 16], +[483394, 1323, 6, 28], +[483415, 1323, 7, 19], +[483579, 1323, 12, 30], +[483611, 1324, 1, 31], +[483699, 1324, 4, 28], +[483741, 1324, 6, 9], +[483922, 1324, 12, 7], +[483996, 1325, 2, 19], +[484196, 1325, 9, 7], +[484383, 1326, 3, 13], +[484430, 1326, 4, 29], +[484448, 1326, 5, 17], +[484607, 1326, 10, 23], +[484799, 1327, 5, 3], +[484937, 1327, 9, 18], +[485001, 1327, 11, 21], +[485194, 1328, 6, 1], +[485199, 1328, 6, 6], +[485226, 1328, 7, 3], +[485279, 1328, 8, 25], +[485427, 1329, 1, 20], +[485611, 1329, 7, 23], +[485622, 1329, 8, 3], +[485668, 1329, 9, 18], +[485681, 1329, 10, 1], +[485729, 1329, 11, 18], +[485873, 1330, 4, 11], +[486014, 1330, 8, 30], +[486127, 1330, 12, 21], +[486307, 1331, 6, 19], +[486415, 1331, 10, 5], +[486515, 1332, 1, 13], +[486700, 1332, 7, 16], +[486717, 1332, 8, 2], +[486726, 1332, 8, 11], +[486891, 1333, 1, 23], +[487034, 1333, 6, 15], +[487055, 1333, 7, 6], +[487148, 1333, 10, 7], +[487334, 1334, 4, 11], +[487404, 1334, 6, 20], +[487432, 1334, 7, 18], +[487446, 1334, 8, 1], +[487618, 1335, 1, 20], +[487741, 1335, 5, 23], +[487925, 1335, 11, 23], +[488107, 1336, 5, 23], +[488298, 1336, 11, 30], +[488307, 1336, 12, 9], +[488321, 1336, 12, 23], +[488430, 1337, 4, 11], +[488517, 1337, 7, 7], +[488651, 1337, 11, 18], +[488770, 1338, 3, 17], +[488904, 1338, 7, 29], +[488927, 1338, 8, 21], +[489121, 1339, 3, 3], +[489200, 1339, 5, 21], +[489233, 1339, 6, 23], +[489306, 1339, 9, 4], +[489436, 1340, 1, 12], +[489567, 1340, 5, 22], +[489706, 1340, 10, 8], +[489728, 1340, 10, 30], +[489733, 1340, 11, 4], +[489891, 1341, 4, 11], +[489944, 1341, 6, 3], +[489951, 1341, 6, 10], +[489990, 1341, 7, 19], +[490066, 1341, 10, 3], +[490226, 1342, 3, 12], +[490232, 1342, 3, 18], +[490398, 1342, 8, 31], +[490531, 1343, 1, 11], +[490685, 1343, 6, 14], +[490869, 1343, 12, 15], +[490988, 1344, 4, 12], +[491150, 1344, 9, 21], +[491181, 1344, 10, 22], +[491218, 1344, 11, 28], +[491228, 1344, 12, 8], +[491242, 1344, 12, 22], +[491386, 1345, 5, 15], +[491421, 1345, 6, 19], +[491520, 1345, 9, 26], +[491653, 1346, 2, 6], +[491765, 1346, 5, 29], +[491911, 1346, 10, 22], +[492055, 1347, 3, 15], +[492237, 1347, 9, 13], +[492376, 1348, 1, 30], +[492496, 1348, 5, 29], +[492601, 1348, 9, 11], +[492799, 1349, 3, 28], +[492802, 1349, 3, 31], +[492926, 1349, 8, 2], +[493022, 1349, 11, 6], +[493169, 1350, 4, 2], +[493237, 1350, 6, 9], +[493417, 1350, 12, 6], +[493425, 1350, 12, 14], +[493580, 1351, 5, 18], +[493693, 1351, 9, 8], +[493783, 1351, 12, 7], +[493856, 1352, 2, 18], +[493922, 1352, 4, 24], +[494108, 1352, 10, 27], +[494284, 1353, 4, 21], +[494381, 1353, 7, 27], +[494430, 1353, 9, 14], +[494536, 1353, 12, 29], +[494640, 1354, 4, 12], +[494785, 1354, 9, 4], +[494938, 1355, 2, 4], +[494976, 1355, 3, 14], +[495142, 1355, 8, 27], +[495267, 1355, 12, 30], +[495452, 1356, 7, 2], +[495575, 1356, 11, 2], +[495637, 1357, 1, 3], +[495789, 1357, 6, 4], +[495848, 1357, 8, 2], +[495853, 1357, 8, 7], +[495952, 1357, 11, 14], +[496004, 1358, 1, 5], +[496137, 1358, 5, 18], +[496155, 1358, 6, 5], +[496228, 1358, 8, 17], +[496373, 1359, 1, 9], +[496438, 1359, 3, 15], +[496630, 1359, 9, 23], +[496694, 1359, 11, 26], +[496820, 1360, 3, 31], +[497001, 1360, 9, 28], +[497065, 1360, 12, 1], +[497242, 1361, 5, 27], +[497441, 1361, 12, 12], +[497639, 1362, 6, 28], +[497742, 1362, 10, 9], +[497788, 1362, 11, 24], +[497960, 1363, 5, 15], +[498037, 1363, 7, 31], +[498152, 1363, 11, 23], +[498187, 1363, 12, 28], +[498195, 1364, 1, 5], +[498205, 1364, 1, 15], +[498229, 1364, 2, 8], +[498371, 1364, 6, 29], +[498466, 1364, 10, 2], +[498568, 1365, 1, 12], +[498580, 1365, 1, 24], +[498771, 1365, 8, 3], +[498782, 1365, 8, 14], +[498942, 1366, 1, 21], +[499100, 1366, 6, 28], +[499199, 1366, 10, 5], +[499258, 1366, 12, 3], +[499417, 1367, 5, 11], +[499521, 1367, 8, 23], +[499528, 1367, 8, 30], +[499640, 1367, 12, 20], +[499645, 1367, 12, 25], +[499698, 1368, 2, 16], +[499814, 1368, 6, 11], +[499970, 1368, 11, 14], +[500016, 1368, 12, 30], +[500065, 1369, 2, 17], +[500231, 1369, 8, 2], +[500286, 1369, 9, 26], +[500404, 1370, 1, 22], +[500486, 1370, 4, 14], +[500667, 1370, 10, 12], +[500798, 1371, 2, 20], +[500824, 1371, 3, 18], +[500986, 1371, 8, 27], +[501151, 1372, 2, 8], +[501323, 1372, 7, 29], +[501496, 1373, 1, 18], +[501580, 1373, 4, 12], +[501684, 1373, 7, 25], +[501764, 1373, 10, 13], +[501810, 1373, 11, 28], +[501893, 1374, 2, 19], +[501954, 1374, 4, 21], +[502011, 1374, 6, 17], +[502101, 1374, 9, 15], +[502110, 1374, 9, 24], +[502163, 1374, 11, 16], +[502317, 1375, 4, 19], +[502496, 1375, 10, 15], +[502550, 1375, 12, 8], +[502570, 1375, 12, 28], +[502767, 1376, 7, 12], +[502944, 1377, 1, 5], +[503082, 1377, 5, 23], +[503244, 1377, 11, 1], +[503401, 1378, 4, 7], +[503587, 1378, 10, 10], +[503767, 1379, 4, 8], +[503909, 1379, 8, 28], +[504060, 1380, 1, 26], +[504136, 1380, 4, 11], +[504280, 1380, 9, 2], +[504347, 1380, 11, 8], +[504501, 1381, 4, 11], +[504590, 1381, 7, 9], +[504643, 1381, 8, 31], +[504645, 1381, 9, 2], +[504770, 1382, 1, 5], +[504954, 1382, 7, 8], +[505143, 1383, 1, 13], +[505166, 1383, 2, 5], +[505253, 1383, 5, 3], +[505282, 1383, 6, 1], +[505415, 1383, 10, 12], +[505521, 1384, 1, 26], +[505719, 1384, 8, 11], +[505888, 1385, 1, 27], +[506078, 1385, 8, 5], +[506089, 1385, 8, 16], +[506282, 1386, 2, 25], +[506346, 1386, 4, 30], +[506413, 1386, 7, 6], +[506562, 1386, 12, 2], +[506741, 1387, 5, 30], +[506835, 1387, 9, 1], +[506919, 1387, 11, 24], +[506960, 1388, 1, 4], +[507060, 1388, 4, 13], +[507225, 1388, 9, 25], +[507289, 1388, 11, 28], +[507446, 1389, 5, 4], +[507503, 1389, 6, 30], +[507609, 1389, 10, 14], +[507634, 1389, 11, 8], +[507783, 1390, 4, 6], +[507789, 1390, 4, 12], +[507982, 1390, 10, 22], +[508101, 1391, 2, 18], +[508203, 1391, 5, 31], +[508270, 1391, 8, 6], +[508326, 1391, 10, 1], +[508449, 1392, 2, 1], +[508520, 1392, 4, 12], +[508695, 1392, 10, 4], +[508728, 1392, 11, 6], +[508909, 1393, 5, 6], +[509040, 1393, 9, 14], +[509176, 1394, 1, 28], +[509178, 1394, 1, 30], +[509216, 1394, 3, 9], +[509306, 1394, 6, 7], +[509310, 1394, 6, 11], +[509482, 1394, 11, 30], +[509636, 1395, 5, 3], +[509788, 1395, 10, 2], +[509804, 1395, 10, 18], +[509976, 1396, 4, 7], +[510054, 1396, 6, 24], +[510139, 1396, 9, 17], +[510316, 1397, 3, 13], +[510336, 1397, 4, 2], +[510348, 1397, 4, 14], +[510527, 1397, 10, 10], +[510613, 1398, 1, 4], +[510777, 1398, 6, 17], +[510956, 1398, 12, 13], +[510990, 1399, 1, 16], +[511123, 1399, 5, 29], +[511247, 1399, 9, 30], +[511308, 1399, 11, 30], +[511369, 1400, 1, 30], +[511546, 1400, 7, 26], +[511609, 1400, 9, 27], +[511702, 1400, 12, 29], +[511793, 1401, 3, 30], +[511854, 1401, 5, 30], +[511878, 1401, 6, 23], +[511944, 1401, 8, 28], +[512130, 1402, 3, 2], +[512205, 1402, 5, 16], +[512354, 1402, 10, 12], +[512502, 1403, 3, 9], +[512663, 1403, 8, 17], +[512819, 1404, 1, 20], +[512875, 1404, 3, 16], +[512921, 1404, 5, 1], +[513118, 1404, 11, 14], +[513175, 1405, 1, 10], +[513227, 1405, 3, 3], +[513384, 1405, 8, 7], +[513555, 1406, 1, 25], +[513623, 1406, 4, 3], +[513653, 1406, 5, 3], +[513709, 1406, 6, 28], +[513885, 1406, 12, 21], +[514081, 1407, 7, 5], +[514173, 1407, 10, 5], +[514320, 1408, 2, 29], +[514413, 1408, 6, 1], +[514603, 1408, 12, 8], +[514638, 1409, 1, 12], +[514709, 1409, 3, 24], +[514735, 1409, 4, 19], +[514863, 1409, 8, 25], +[514901, 1409, 10, 2], +[515000, 1410, 1, 9], +[515026, 1410, 2, 4], +[515092, 1410, 4, 11], +[515140, 1410, 5, 29], +[515231, 1410, 8, 28], +[515311, 1410, 11, 16], +[515368, 1411, 1, 12], +[515408, 1411, 2, 21], +[515551, 1411, 7, 14], +[515624, 1411, 9, 25], +[515700, 1411, 12, 10], +[515756, 1412, 2, 4], +[515802, 1412, 3, 21], +[515943, 1412, 8, 9], +[516065, 1412, 12, 9], +[516249, 1413, 6, 11], +[516402, 1413, 11, 11], +[516430, 1413, 12, 9], +[516511, 1414, 2, 28], +[516668, 1414, 8, 4], +[516682, 1414, 8, 18], +[516850, 1415, 2, 2], +[516875, 1415, 2, 27], +[516913, 1415, 4, 6], +[517096, 1415, 10, 6], +[517278, 1416, 4, 5], +[517314, 1416, 5, 11], +[517388, 1416, 7, 24], +[517419, 1416, 8, 24], +[517556, 1417, 1, 8], +[517676, 1417, 5, 8], +[517844, 1417, 10, 23], +[517917, 1418, 1, 4], +[518013, 1418, 4, 10], +[518094, 1418, 6, 30], +[518156, 1418, 8, 31], +[518185, 1418, 9, 29], +[518252, 1418, 12, 5], +[518388, 1419, 4, 20], +[518390, 1419, 4, 22], +[518508, 1419, 8, 18], +[518651, 1420, 1, 8], +[518695, 1420, 2, 21], +[518841, 1420, 7, 16], +[518852, 1420, 7, 27], +[519041, 1421, 2, 1], +[519052, 1421, 2, 12], +[519118, 1421, 4, 19], +[519313, 1421, 10, 31], +[519438, 1422, 3, 5], +[519513, 1422, 5, 19], +[519602, 1422, 8, 16], +[519650, 1422, 10, 3], +[519817, 1423, 3, 19], +[519892, 1423, 6, 2], +[520047, 1423, 11, 4], +[520177, 1424, 3, 13], +[520178, 1424, 3, 14], +[520293, 1424, 7, 7], +[520318, 1424, 8, 1], +[520342, 1424, 8, 25], +[520385, 1424, 10, 7], +[520555, 1425, 3, 26], +[520669, 1425, 7, 18], +[520846, 1426, 1, 11], +[520921, 1426, 3, 27], +[521020, 1426, 7, 4], +[521182, 1426, 12, 13], +[521244, 1427, 2, 13], +[521354, 1427, 6, 3], +[521439, 1427, 8, 27], +[521506, 1427, 11, 2], +[521527, 1427, 11, 23], +[521585, 1428, 1, 20], +[521691, 1428, 5, 5], +[521780, 1428, 8, 2], +[521941, 1429, 1, 10], +[521984, 1429, 2, 22], +[522025, 1429, 4, 4], +[522054, 1429, 5, 3], +[522119, 1429, 7, 7], +[522294, 1429, 12, 29], +[522477, 1430, 6, 30], +[522614, 1430, 11, 14], +[522763, 1431, 4, 12], +[522921, 1431, 9, 17], +[523032, 1432, 1, 6], +[523074, 1432, 2, 17], +[523247, 1432, 8, 8], +[523422, 1433, 1, 30], +[523474, 1433, 3, 23], +[523565, 1433, 6, 22], +[523600, 1433, 7, 27], +[523633, 1433, 8, 29], +[523666, 1433, 10, 1], +[523768, 1434, 1, 11], +[523939, 1434, 7, 1], +[523979, 1434, 8, 10], +[524053, 1434, 10, 23], +[524133, 1435, 1, 11], +[524297, 1435, 6, 24], +[524354, 1435, 8, 20], +[524478, 1435, 12, 22], +[524504, 1436, 1, 17], +[524534, 1436, 2, 16], +[524661, 1436, 6, 22], +[524718, 1436, 8, 18], +[524837, 1436, 12, 15], +[524874, 1437, 1, 21], +[524889, 1437, 2, 5], +[525011, 1437, 6, 7], +[525069, 1437, 8, 4], +[525222, 1438, 1, 4], +[525252, 1438, 2, 3], +[525420, 1438, 7, 21], +[525569, 1438, 12, 17], +[525585, 1439, 1, 2], +[525614, 1439, 1, 31], +[525799, 1439, 8, 4], +[525920, 1439, 12, 3], +[526008, 1440, 2, 29], +[526117, 1440, 6, 17], +[526175, 1440, 8, 14], +[526328, 1441, 1, 14], +[526365, 1441, 2, 20], +[526441, 1441, 5, 7], +[526590, 1441, 10, 3], +[526679, 1441, 12, 31], +[526789, 1442, 4, 20], +[526938, 1442, 9, 16], +[526956, 1442, 10, 4], +[527084, 1443, 2, 9], +[527096, 1443, 2, 21], +[527135, 1443, 4, 1], +[527257, 1443, 8, 1], +[527452, 1444, 2, 12], +[527506, 1444, 4, 6], +[527574, 1444, 6, 13], +[527593, 1444, 7, 2], +[527768, 1444, 12, 24], +[527869, 1445, 4, 4], +[527961, 1445, 7, 5], +[528126, 1445, 12, 17], +[528168, 1446, 1, 28], +[528272, 1446, 5, 12], +[528412, 1446, 9, 29], +[528572, 1447, 3, 8], +[528576, 1447, 3, 12], +[528712, 1447, 7, 26], +[528866, 1447, 12, 27], +[528896, 1448, 1, 26], +[529060, 1448, 7, 8], +[529249, 1449, 1, 13], +[529265, 1449, 1, 29], +[529391, 1449, 6, 4], +[529504, 1449, 9, 25], +[529596, 1449, 12, 26], +[529682, 1450, 3, 22], +[529850, 1450, 9, 6], +[529913, 1450, 11, 8], +[530019, 1451, 2, 22], +[530177, 1451, 7, 30], +[530213, 1451, 9, 4], +[530318, 1451, 12, 18], +[530424, 1452, 4, 2], +[530498, 1452, 6, 15], +[530656, 1452, 11, 20], +[530854, 1453, 6, 6], +[531009, 1453, 11, 8], +[531176, 1454, 4, 24], +[531217, 1454, 6, 4], +[531275, 1454, 8, 1], +[531323, 1454, 9, 18], +[531337, 1454, 10, 2], +[531356, 1454, 10, 21], +[531501, 1455, 3, 15], +[531671, 1455, 9, 1], +[531791, 1455, 12, 30], +[531793, 1456, 1, 1], +[531873, 1456, 3, 21], +[531894, 1456, 4, 11], +[532018, 1456, 8, 13], +[532056, 1456, 9, 20], +[532192, 1457, 2, 3], +[532220, 1457, 3, 3], +[532319, 1457, 6, 10], +[532450, 1457, 10, 19], +[532560, 1458, 2, 6], +[532567, 1458, 2, 13], +[532616, 1458, 4, 3], +[532744, 1458, 8, 9], +[532928, 1459, 2, 9], +[533128, 1459, 8, 28], +[533285, 1460, 2, 1], +[533325, 1460, 3, 12], +[533396, 1460, 5, 22], +[533508, 1460, 9, 11], +[533522, 1460, 9, 25], +[533668, 1461, 2, 18], +[533778, 1461, 6, 8], +[533793, 1461, 6, 23], +[533874, 1461, 9, 12], +[533913, 1461, 10, 21], +[534090, 1462, 4, 16], +[534217, 1462, 8, 21], +[534354, 1463, 1, 5], +[534409, 1463, 3, 1], +[534563, 1463, 8, 2], +[534697, 1463, 12, 14], +[534875, 1464, 6, 9], +[534993, 1464, 10, 5], +[535144, 1465, 3, 5], +[535300, 1465, 8, 8], +[535457, 1466, 1, 12], +[535483, 1466, 2, 7], +[535554, 1466, 4, 19], +[535655, 1466, 7, 29], +[535730, 1466, 10, 12], +[535821, 1467, 1, 11], +[536013, 1467, 7, 22], +[536157, 1467, 12, 13], +[536271, 1468, 4, 5], +[536440, 1468, 9, 21], +[536567, 1469, 1, 26], +[536748, 1469, 7, 26], +[536825, 1469, 10, 11], +[536973, 1470, 3, 8], +[537039, 1470, 5, 13], +[537185, 1470, 10, 6], +[537380, 1471, 4, 19], +[537545, 1471, 10, 1], +[537715, 1472, 3, 19], +[537854, 1472, 8, 5], +[538019, 1473, 1, 17], +[538077, 1473, 3, 16], +[538117, 1473, 4, 25], +[538205, 1473, 7, 22], +[538401, 1474, 2, 3], +[538595, 1474, 8, 16], +[538794, 1475, 3, 3], +[538951, 1475, 8, 7], +[538956, 1475, 8, 12], +[539022, 1475, 10, 17], +[539052, 1475, 11, 16], +[539213, 1476, 4, 25], +[539276, 1476, 6, 27], +[539446, 1476, 12, 14], +[539572, 1477, 4, 19], +[539695, 1477, 8, 20], +[539841, 1478, 1, 13], +[539913, 1478, 3, 26], +[540003, 1478, 6, 24], +[540052, 1478, 8, 12], +[540214, 1479, 1, 21], +[540378, 1479, 7, 4], +[540534, 1479, 12, 7], +[540595, 1480, 2, 6], +[540745, 1480, 7, 5], +[540929, 1481, 1, 5], +[540982, 1481, 2, 27], +[541021, 1481, 4, 7], +[541180, 1481, 9, 13], +[541286, 1481, 12, 28], +[541391, 1482, 4, 12], +[541395, 1482, 4, 16], +[541527, 1482, 8, 26], +[541559, 1482, 9, 27], +[541628, 1482, 12, 5], +[541769, 1483, 4, 25], +[541840, 1483, 7, 5], +[542037, 1484, 1, 18], +[542167, 1484, 5, 27], +[542293, 1484, 9, 30], +[542326, 1484, 11, 2], +[542464, 1485, 3, 20], +[542489, 1485, 4, 14], +[542648, 1485, 9, 20], +[542728, 1485, 12, 9], +[542744, 1485, 12, 25], +[542886, 1486, 5, 16], +[542978, 1486, 8, 16], +[543039, 1486, 10, 16], +[543141, 1487, 1, 26], +[543213, 1487, 4, 8], +[543336, 1487, 8, 9], +[543445, 1487, 11, 26], +[543526, 1488, 2, 15], +[543656, 1488, 6, 24], +[543684, 1488, 7, 22], +[543819, 1488, 12, 4], +[543933, 1489, 3, 28], +[543981, 1489, 5, 15], +[544007, 1489, 6, 10], +[544074, 1489, 8, 16], +[544111, 1489, 9, 22], +[544129, 1489, 10, 10], +[544303, 1490, 4, 2], +[544371, 1490, 6, 9], +[544460, 1490, 9, 6], +[544606, 1491, 1, 30], +[544608, 1491, 2, 1], +[544633, 1491, 2, 26], +[544790, 1491, 8, 2], +[544825, 1491, 9, 6], +[545025, 1492, 3, 24], +[545186, 1492, 9, 1], +[545275, 1492, 11, 29], +[545336, 1493, 1, 29], +[545424, 1493, 4, 27], +[545452, 1493, 5, 25], +[545505, 1493, 7, 17], +[545640, 1493, 11, 29], +[545660, 1493, 12, 19], +[545736, 1494, 3, 5], +[545871, 1494, 7, 18], +[546005, 1494, 11, 29], +[546015, 1494, 12, 9], +[546171, 1495, 5, 14], +[546316, 1495, 10, 6], +[546505, 1496, 4, 12], +[546576, 1496, 6, 22], +[546671, 1496, 9, 25], +[546780, 1497, 1, 12], +[546818, 1497, 2, 19], +[546905, 1497, 5, 17], +[546918, 1497, 5, 30], +[546933, 1497, 6, 14], +[547104, 1497, 12, 2], +[547151, 1498, 1, 18], +[547194, 1498, 3, 2], +[547375, 1498, 8, 30], +[547398, 1498, 9, 22], +[547475, 1498, 12, 8], +[547636, 1499, 5, 18], +[547647, 1499, 5, 29], +[547826, 1499, 11, 24], +[547857, 1499, 12, 25], +[547929, 1500, 3, 7], +[548025, 1500, 6, 11], +[548169, 1500, 11, 2], +[548316, 1501, 3, 29], +[548399, 1501, 6, 20], +[548536, 1501, 11, 4], +[548634, 1502, 2, 10], +[548825, 1502, 8, 20], +[548921, 1502, 11, 24], +[548963, 1503, 1, 5], +[549051, 1503, 4, 3], +[549130, 1503, 6, 21], +[549251, 1503, 10, 20], +[549259, 1503, 10, 28], +[549409, 1504, 3, 26], +[549524, 1504, 7, 19], +[549723, 1505, 2, 3], +[549817, 1505, 5, 8], +[549885, 1505, 7, 15], +[550044, 1505, 12, 21], +[550139, 1506, 3, 26], +[550176, 1506, 5, 2], +[550271, 1506, 8, 5], +[550385, 1506, 11, 27], +[550488, 1507, 3, 10], +[550594, 1507, 6, 24], +[550736, 1507, 11, 13], +[550848, 1508, 3, 4], +[550888, 1508, 4, 13], +[551079, 1508, 10, 21], +[551164, 1509, 1, 14], +[551272, 1509, 5, 2], +[551433, 1509, 10, 10], +[551548, 1510, 2, 2], +[551581, 1510, 3, 7], +[551667, 1510, 6, 1], +[551797, 1510, 10, 9], +[551938, 1511, 2, 27], +[551967, 1511, 3, 28], +[552128, 1511, 9, 5], +[552229, 1511, 12, 15], +[552318, 1512, 3, 13], +[552406, 1512, 6, 9], +[552513, 1512, 9, 24], +[552560, 1512, 11, 10], +[552589, 1512, 12, 9], +[552654, 1513, 2, 12], +[552699, 1513, 3, 29], +[552750, 1513, 5, 19], +[552865, 1513, 9, 11], +[552944, 1513, 11, 29], +[552990, 1514, 1, 14], +[553149, 1514, 6, 22], +[553312, 1514, 12, 2], +[553436, 1515, 4, 5], +[553476, 1515, 5, 15], +[553620, 1515, 10, 6], +[553679, 1515, 12, 4], +[553691, 1515, 12, 16], +[553720, 1516, 1, 14], +[553852, 1516, 5, 25], +[553874, 1516, 6, 16], +[553891, 1516, 7, 3], +[553912, 1516, 7, 24], +[554029, 1516, 11, 18], +[554132, 1517, 3, 1], +[554214, 1517, 5, 22], +[554384, 1517, 11, 8], +[554420, 1517, 12, 14], +[554476, 1518, 2, 8], +[554536, 1518, 4, 9], +[554659, 1518, 8, 10], +[554810, 1519, 1, 8], +[554879, 1519, 3, 18], +[555004, 1519, 7, 21], +[555035, 1519, 8, 21], +[555232, 1520, 3, 5], +[555276, 1520, 4, 18], +[555430, 1520, 9, 19], +[555589, 1521, 2, 25], +[555769, 1521, 8, 24], +[555893, 1521, 12, 26], +[555928, 1522, 1, 30], +[555994, 1522, 4, 6], +[556034, 1522, 5, 16], +[556046, 1522, 5, 28], +[556081, 1522, 7, 2], +[556144, 1522, 9, 3], +[556184, 1522, 10, 13], +[556285, 1523, 1, 22], +[556429, 1523, 6, 15], +[556567, 1523, 10, 31], +[556604, 1523, 12, 7], +[556707, 1524, 3, 19], +[556866, 1524, 8, 25], +[556992, 1524, 12, 29], +[557175, 1525, 6, 30], +[557265, 1525, 9, 28], +[557317, 1525, 11, 19], +[557399, 1526, 2, 9], +[557504, 1526, 5, 25], +[557527, 1526, 6, 17], +[557587, 1526, 8, 16], +[557783, 1527, 2, 28], +[557862, 1527, 5, 18], +[557906, 1527, 7, 1], +[558062, 1527, 12, 4], +[558107, 1528, 1, 18], +[558114, 1528, 1, 25], +[558138, 1528, 2, 18], +[558332, 1528, 8, 30], +[558418, 1528, 11, 24], +[558483, 1529, 1, 28], +[558657, 1529, 7, 21], +[558822, 1530, 1, 2], +[558892, 1530, 3, 13], +[559075, 1530, 9, 12], +[559111, 1530, 10, 18], +[559258, 1531, 3, 14], +[559293, 1531, 4, 18], +[559380, 1531, 7, 14], +[559390, 1531, 7, 24], +[559425, 1531, 8, 28], +[559542, 1531, 12, 23], +[559661, 1532, 4, 20], +[559667, 1532, 4, 26], +[559847, 1532, 10, 23], +[559946, 1533, 1, 30], +[560102, 1533, 7, 5], +[560104, 1533, 7, 7], +[560144, 1533, 8, 16], +[560332, 1534, 2, 20], +[560499, 1534, 8, 6], +[560631, 1534, 12, 16], +[560810, 1535, 6, 13], +[560853, 1535, 7, 26], +[560951, 1535, 11, 1], +[561057, 1536, 2, 15], +[561210, 1536, 7, 17], +[561352, 1536, 12, 6], +[561450, 1537, 3, 14], +[561511, 1537, 5, 14], +[561650, 1537, 9, 30], +[561659, 1537, 10, 9], +[561824, 1538, 3, 23], +[561847, 1538, 4, 15], +[562047, 1538, 11, 1], +[562058, 1538, 11, 12], +[562070, 1538, 11, 24], +[562116, 1539, 1, 9], +[562302, 1539, 7, 14], +[562378, 1539, 9, 28], +[562432, 1539, 11, 21], +[562437, 1539, 11, 26], +[562582, 1540, 4, 19], +[562593, 1540, 4, 30], +[562686, 1540, 8, 1], +[562777, 1540, 10, 31], +[562883, 1541, 2, 14], +[562971, 1541, 5, 13], +[563061, 1541, 8, 11], +[563076, 1541, 8, 26], +[563155, 1541, 11, 13], +[563315, 1542, 4, 22], +[563411, 1542, 7, 27], +[563434, 1542, 8, 19], +[563543, 1542, 12, 6], +[563579, 1543, 1, 11], +[563623, 1543, 2, 24], +[563676, 1543, 4, 18], +[563787, 1543, 8, 7], +[563828, 1543, 9, 17], +[563840, 1543, 9, 29], +[564020, 1544, 3, 27], +[564203, 1544, 9, 26], +[564377, 1545, 3, 19], +[564421, 1545, 5, 2], +[564455, 1545, 6, 5], +[564605, 1545, 11, 2], +[564740, 1546, 3, 17], +[564850, 1546, 7, 5], +[565033, 1547, 1, 4], +[565145, 1547, 4, 26], +[565233, 1547, 7, 23], +[565349, 1547, 11, 16], +[565359, 1547, 11, 26], +[565449, 1548, 2, 24], +[565640, 1548, 9, 2], +[565819, 1549, 2, 28], +[565827, 1549, 3, 8], +[565874, 1549, 4, 24], +[566023, 1549, 9, 20], +[566136, 1550, 1, 11], +[566250, 1550, 5, 5], +[566450, 1550, 11, 21], +[566459, 1550, 11, 30], +[566506, 1551, 1, 16], +[566523, 1551, 2, 2], +[566606, 1551, 4, 26], +[566612, 1551, 5, 2], +[566622, 1551, 5, 12], +[566699, 1551, 7, 28], +[566850, 1551, 12, 26], +[567020, 1552, 6, 13], +[567028, 1552, 6, 21], +[567154, 1552, 10, 25], +[567274, 1553, 2, 22], +[567474, 1553, 9, 10], +[567574, 1553, 12, 19], +[567592, 1554, 1, 6], +[567662, 1554, 3, 17], +[567770, 1554, 7, 3], +[567864, 1554, 10, 5], +[567945, 1554, 12, 25], +[567999, 1555, 2, 17], +[568182, 1555, 8, 19], +[568277, 1555, 11, 22], +[568462, 1556, 5, 25], +[568518, 1556, 7, 20], +[568693, 1557, 1, 11], +[568832, 1557, 5, 30], +[568913, 1557, 8, 19], +[569071, 1558, 1, 24], +[569085, 1558, 2, 7], +[569089, 1558, 2, 11], +[569195, 1558, 5, 28], +[569320, 1558, 9, 30], +[569346, 1558, 10, 26], +[569409, 1558, 12, 28], +[569439, 1559, 1, 27], +[569455, 1559, 2, 12], +[569459, 1559, 2, 16], +[569518, 1559, 4, 16], +[569662, 1559, 9, 7], +[569811, 1560, 2, 3], +[569970, 1560, 7, 11], +[570148, 1561, 1, 5], +[570236, 1561, 4, 3], +[570286, 1561, 5, 23], +[570412, 1561, 9, 26], +[570424, 1561, 10, 8], +[570609, 1562, 4, 11], +[570763, 1562, 9, 12], +[570809, 1562, 10, 28], +[570923, 1563, 2, 19], +[571052, 1563, 6, 28], +[571205, 1563, 11, 28], +[571252, 1564, 1, 14], +[571380, 1564, 5, 21], +[571464, 1564, 8, 13], +[571645, 1565, 2, 10], +[571775, 1565, 6, 20], +[571841, 1565, 8, 25], +[571848, 1565, 9, 1], +[571902, 1565, 10, 25], +[572014, 1566, 2, 14], +[572104, 1566, 5, 15], +[572228, 1566, 9, 16], +[572345, 1567, 1, 11], +[572358, 1567, 1, 24], +[572441, 1567, 4, 17], +[572545, 1567, 7, 30], +[572706, 1568, 1, 7], +[572769, 1568, 3, 10], +[572806, 1568, 4, 16], +[572883, 1568, 7, 2], +[572886, 1568, 7, 5], +[573086, 1569, 1, 21], +[573091, 1569, 1, 26], +[573242, 1569, 6, 26], +[573352, 1569, 10, 14], +[573508, 1570, 3, 19], +[573557, 1570, 5, 7], +[573677, 1570, 9, 4], +[573864, 1571, 3, 10], +[573887, 1571, 4, 2], +[573891, 1571, 4, 6], +[574013, 1571, 8, 6], +[574093, 1571, 10, 25], +[574264, 1572, 4, 13], +[574331, 1572, 6, 19], +[574482, 1572, 11, 17], +[574569, 1573, 2, 12], +[574750, 1573, 8, 12], +[574751, 1573, 8, 13], +[574807, 1573, 10, 8], +[574968, 1574, 3, 18], +[575146, 1574, 9, 12], +[575293, 1575, 2, 6], +[575400, 1575, 5, 24], +[575459, 1575, 7, 22], +[575647, 1576, 1, 26], +[575783, 1576, 6, 10], +[575916, 1576, 10, 21], +[575935, 1576, 11, 9], +[576012, 1577, 1, 25], +[576114, 1577, 5, 7], +[576242, 1577, 9, 12], +[576378, 1578, 1, 26], +[576408, 1578, 2, 25], +[576591, 1578, 8, 27], +[576599, 1578, 9, 4], +[576615, 1578, 9, 20], +[576748, 1579, 1, 31], +[576937, 1579, 8, 8], +[577112, 1580, 1, 30], +[577256, 1580, 6, 22], +[577356, 1580, 9, 30], +[577529, 1581, 3, 22], +[577661, 1581, 8, 1], +[577720, 1581, 9, 29], +[577892, 1582, 3, 20], +[578049, 1582, 8, 24], +[578096, 1582, 10, 10], +[578195, 1583, 1, 17], +[578223, 1583, 2, 14], +[578293, 1583, 4, 25], +[578357, 1583, 6, 28], +[578380, 1583, 7, 21], +[578570, 1584, 1, 27], +[578690, 1584, 5, 26], +[578830, 1584, 10, 13], +[578992, 1585, 3, 24], +[579190, 1585, 10, 8], +[579310, 1586, 2, 5], +[579376, 1586, 4, 12], +[579420, 1586, 5, 26], +[579496, 1586, 8, 10], +[579603, 1586, 11, 25], +[579724, 1587, 3, 26], +[579807, 1587, 6, 17], +[579813, 1587, 6, 23], +[580012, 1588, 1, 8], +[580209, 1588, 7, 23], +[580360, 1588, 12, 21], +[580452, 1589, 3, 23], +[580572, 1589, 7, 21], +[580650, 1589, 10, 7], +[580766, 1590, 1, 31], +[580957, 1590, 8, 10], +[581011, 1590, 10, 3], +[581114, 1591, 1, 14], +[581191, 1591, 4, 1], +[581308, 1591, 7, 27], +[581489, 1592, 1, 24], +[581681, 1592, 8, 3], +[581862, 1593, 1, 31], +[581982, 1593, 5, 31], +[582006, 1593, 6, 24], +[582184, 1593, 12, 19], +[582293, 1594, 4, 7], +[582421, 1594, 8, 13], +[582562, 1595, 1, 1], +[582679, 1595, 4, 28], +[582704, 1595, 5, 23], +[582896, 1595, 12, 1], +[583024, 1596, 4, 7], +[583106, 1596, 6, 28], +[583291, 1596, 12, 30], +[583453, 1597, 6, 10], +[583519, 1597, 8, 15], +[583649, 1597, 12, 23], +[583785, 1598, 5, 8], +[583914, 1598, 9, 14], +[584084, 1599, 3, 3], +[584222, 1599, 7, 19], +[584247, 1599, 8, 13], +[584446, 1600, 2, 28], +[584597, 1600, 7, 28], +[584612, 1600, 8, 12], +[584666, 1600, 10, 5], +[584774, 1601, 1, 21], +[584779, 1601, 1, 26], +[584861, 1601, 4, 18], +[584914, 1601, 6, 10], +[585101, 1601, 12, 14], +[585153, 1602, 2, 4], +[585302, 1602, 7, 3], +[585335, 1602, 8, 5], +[585505, 1603, 1, 22], +[585671, 1603, 7, 7], +[585736, 1603, 9, 10], +[585913, 1604, 3, 5], +[585937, 1604, 3, 29], +[585943, 1604, 4, 4], +[586108, 1604, 9, 16], +[586249, 1605, 2, 4], +[586443, 1605, 8, 17], +[586498, 1605, 10, 11], +[586601, 1606, 1, 22], +[586677, 1606, 4, 8], +[586733, 1606, 6, 3], +[586777, 1606, 7, 17], +[586817, 1606, 8, 26], +[586863, 1606, 10, 11], +[586925, 1606, 12, 12], +[586991, 1607, 2, 16], +[587150, 1607, 7, 25], +[587265, 1607, 11, 17], +[587346, 1608, 2, 6], +[587415, 1608, 4, 15], +[587450, 1608, 5, 20], +[587615, 1608, 11, 1], +[587645, 1608, 12, 1], +[587755, 1609, 3, 21], +[587869, 1609, 7, 13], +[588007, 1609, 11, 28], +[588053, 1610, 1, 13], +[588067, 1610, 1, 27], +[588092, 1610, 2, 21], +[588250, 1610, 7, 29], +[588294, 1610, 9, 11], +[588400, 1610, 12, 26], +[588540, 1611, 5, 15], +[588657, 1611, 9, 9], +[588735, 1611, 11, 26], +[588856, 1612, 3, 26], +[588976, 1612, 7, 24], +[589087, 1612, 11, 12], +[589240, 1613, 4, 14], +[589433, 1613, 10, 24], +[589609, 1614, 4, 18], +[589643, 1614, 5, 22], +[589796, 1614, 10, 22], +[589950, 1615, 3, 25], +[590068, 1615, 7, 21], +[590164, 1615, 10, 25], +[590346, 1616, 4, 24], +[590525, 1616, 10, 20], +[590680, 1617, 3, 24], +[590828, 1617, 8, 19], +[590844, 1617, 9, 4], +[590920, 1617, 11, 19], +[591114, 1618, 6, 1], +[591234, 1618, 9, 29], +[591339, 1619, 1, 12], +[591457, 1619, 5, 10], +[591502, 1619, 6, 24], +[591680, 1619, 12, 19], +[591708, 1620, 1, 16], +[591785, 1620, 4, 2], +[591838, 1620, 5, 25], +[591890, 1620, 7, 16], +[592015, 1620, 11, 18], +[592027, 1620, 11, 30], +[592183, 1621, 5, 5], +[592333, 1621, 10, 2], +[592387, 1621, 11, 25], +[592562, 1622, 5, 19], +[592685, 1622, 9, 19], +[592691, 1622, 9, 25], +[592717, 1622, 10, 21], +[592841, 1623, 2, 22], +[592887, 1623, 4, 9], +[592898, 1623, 4, 20], +[592908, 1623, 4, 30], +[592927, 1623, 5, 19], +[593104, 1623, 11, 12], +[593272, 1624, 4, 28], +[593411, 1624, 9, 14], +[593529, 1625, 1, 10], +[593692, 1625, 6, 22], +[593700, 1625, 6, 30], +[593855, 1625, 12, 2], +[594044, 1626, 6, 9], +[594227, 1626, 12, 9], +[594253, 1627, 1, 4], +[594267, 1627, 1, 18], +[594374, 1627, 5, 5], +[594483, 1627, 8, 22], +[594514, 1627, 9, 22], +[594677, 1628, 3, 3], +[594873, 1628, 9, 15], +[595012, 1629, 2, 1], +[595015, 1629, 2, 4], +[595054, 1629, 3, 15], +[595230, 1629, 9, 7], +[595343, 1629, 12, 29], +[595355, 1630, 1, 10], +[595405, 1630, 3, 1], +[595522, 1630, 6, 26], +[595525, 1630, 6, 29], +[595650, 1630, 11, 1], +[595677, 1630, 11, 28], +[595762, 1631, 2, 21], +[595879, 1631, 6, 18], +[596060, 1631, 12, 16], +[596140, 1632, 3, 5], +[596214, 1632, 5, 18], +[596301, 1632, 8, 13], +[596381, 1632, 11, 1], +[596550, 1633, 4, 19], +[596694, 1633, 9, 10], +[596759, 1633, 11, 14], +[596820, 1634, 1, 14], +[597007, 1634, 7, 20], +[597169, 1634, 12, 29], +[597190, 1635, 1, 19], +[597241, 1635, 3, 11], +[597313, 1635, 5, 22], +[597504, 1635, 11, 29], +[597536, 1635, 12, 31], +[597656, 1636, 4, 29], +[597836, 1636, 10, 26], +[597897, 1636, 12, 26], +[597995, 1637, 4, 3], +[598026, 1637, 5, 4], +[598171, 1637, 9, 26], +[598324, 1638, 2, 26], +[598453, 1638, 7, 5], +[598497, 1638, 8, 18], +[598504, 1638, 8, 25], +[598647, 1639, 1, 15], +[598689, 1639, 2, 26], +[598787, 1639, 6, 4], +[598956, 1639, 11, 20], +[599124, 1640, 5, 6], +[599163, 1640, 6, 14], +[599167, 1640, 6, 18], +[599266, 1640, 9, 25], +[599385, 1641, 1, 22], +[599555, 1641, 7, 11], +[599639, 1641, 10, 3], +[599734, 1642, 1, 6], +[599828, 1642, 4, 10], +[600002, 1642, 10, 1], +[600022, 1642, 10, 21], +[600026, 1642, 10, 25], +[600074, 1642, 12, 12], +[600169, 1643, 3, 17], +[600284, 1643, 7, 10], +[600359, 1643, 9, 23], +[600429, 1643, 12, 2], +[600569, 1644, 4, 20], +[600677, 1644, 8, 6], +[600722, 1644, 9, 20], +[600817, 1644, 12, 24], +[600922, 1645, 4, 8], +[600940, 1645, 4, 26], +[600966, 1645, 5, 22], +[601163, 1645, 12, 5], +[601297, 1646, 4, 18], +[601373, 1646, 7, 3], +[601467, 1646, 10, 5], +[601560, 1647, 1, 6], +[601586, 1647, 2, 1], +[601664, 1647, 4, 20], +[601731, 1647, 6, 26], +[601770, 1647, 8, 4], +[601867, 1647, 11, 9], +[602033, 1648, 4, 23], +[602174, 1648, 9, 11], +[602199, 1648, 10, 6], +[602289, 1649, 1, 4], +[602363, 1649, 3, 19], +[602511, 1649, 8, 14], +[602572, 1649, 10, 14], +[602702, 1650, 2, 21], +[602785, 1650, 5, 15], +[602832, 1650, 7, 1], +[602875, 1650, 8, 13], +[603070, 1651, 2, 24], +[603202, 1651, 7, 6], +[603328, 1651, 11, 9], +[603352, 1651, 12, 3], +[603506, 1652, 5, 5], +[603564, 1652, 7, 2], +[603603, 1652, 8, 10], +[603768, 1653, 1, 22], +[603816, 1653, 3, 11], +[603945, 1653, 7, 18], +[604106, 1653, 12, 26], +[604293, 1654, 7, 1], +[604311, 1654, 7, 19], +[604470, 1654, 12, 25], +[604646, 1655, 6, 19], +[604667, 1655, 7, 10], +[604669, 1655, 7, 12], +[604731, 1655, 9, 12], +[604855, 1656, 1, 14], +[604960, 1656, 4, 28], +[605020, 1656, 6, 27], +[605124, 1656, 10, 9], +[605211, 1657, 1, 4], +[605317, 1657, 4, 20], +[605407, 1657, 7, 19], +[605463, 1657, 9, 13], +[605517, 1657, 11, 6], +[605690, 1658, 4, 28], +[605701, 1658, 5, 9], +[605787, 1658, 8, 3], +[605905, 1658, 11, 29], +[605956, 1659, 1, 19], +[606113, 1659, 6, 25], +[606291, 1659, 12, 20], +[606466, 1660, 6, 12], +[606630, 1660, 11, 23], +[606672, 1661, 1, 4], +[606784, 1661, 4, 26], +[606870, 1661, 7, 21], +[606879, 1661, 7, 30], +[606960, 1661, 10, 19], +[607158, 1662, 5, 5], +[607353, 1662, 11, 16], +[607370, 1662, 12, 3], +[607499, 1663, 4, 11], +[607527, 1663, 5, 9], +[607549, 1663, 5, 31], +[607689, 1663, 10, 18], +[607752, 1663, 12, 20], +[607928, 1664, 6, 13], +[608127, 1664, 12, 29], +[608207, 1665, 3, 19], +[608311, 1665, 7, 1], +[608357, 1665, 8, 16], +[608530, 1666, 2, 5], +[608641, 1666, 5, 27], +[608709, 1666, 8, 3], +[608779, 1666, 10, 12], +[608880, 1667, 1, 21], +[609008, 1667, 5, 29], +[609182, 1667, 11, 19], +[609226, 1668, 1, 2], +[609285, 1668, 3, 1], +[609458, 1668, 8, 21], +[609619, 1669, 1, 29], +[609647, 1669, 2, 26], +[609718, 1669, 5, 8], +[609898, 1669, 11, 4], +[609921, 1669, 11, 27], +[610069, 1670, 4, 24], +[610108, 1670, 6, 2], +[610175, 1670, 8, 8], +[610335, 1671, 1, 15], +[610528, 1671, 7, 27], +[610655, 1671, 12, 1], +[610727, 1672, 2, 11], +[610734, 1672, 2, 18], +[610780, 1672, 4, 4], +[610966, 1672, 10, 7], +[611032, 1672, 12, 12], +[611168, 1673, 4, 27], +[611268, 1673, 8, 5], +[611404, 1673, 12, 19], +[611405, 1673, 12, 20], +[611438, 1674, 1, 22], +[611633, 1674, 8, 5], +[611787, 1675, 1, 6], +[611815, 1675, 2, 3], +[611907, 1675, 5, 6], +[611963, 1675, 7, 1], +[611994, 1675, 8, 1], +[612192, 1676, 2, 15], +[612374, 1676, 8, 15], +[612484, 1676, 12, 3], +[612664, 1677, 6, 1], +[612754, 1677, 8, 30], +[612839, 1677, 11, 23], +[612849, 1677, 12, 3], +[612949, 1678, 3, 13], +[613057, 1678, 6, 29], +[613187, 1678, 11, 6], +[613282, 1679, 2, 9], +[613301, 1679, 2, 28], +[613415, 1679, 6, 22], +[613471, 1679, 8, 17], +[613539, 1679, 10, 24], +[613716, 1680, 4, 18], +[613752, 1680, 5, 24], +[613787, 1680, 6, 28], +[613967, 1680, 12, 25], +[613999, 1681, 1, 26], +[614135, 1681, 6, 11], +[614285, 1681, 11, 8], +[614290, 1681, 11, 13], +[614443, 1682, 4, 15], +[614529, 1682, 7, 10], +[614650, 1682, 11, 8], +[614838, 1683, 5, 15], +[614879, 1683, 6, 25], +[614946, 1683, 8, 31], +[615036, 1683, 11, 29], +[615091, 1684, 1, 23], +[615246, 1684, 6, 26], +[615286, 1684, 8, 5], +[615345, 1684, 10, 3], +[615443, 1685, 1, 9], +[615540, 1685, 4, 16], +[615694, 1685, 9, 17], +[615849, 1686, 2, 19], +[615987, 1686, 7, 7], +[616138, 1686, 12, 5], +[616255, 1687, 4, 1], +[616434, 1687, 9, 27], +[616624, 1688, 4, 4], +[616650, 1688, 4, 30], +[616703, 1688, 6, 22], +[616752, 1688, 8, 10], +[616759, 1688, 8, 17], +[616844, 1688, 11, 10], +[616929, 1689, 2, 3], +[617067, 1689, 6, 21], +[617099, 1689, 7, 23], +[617182, 1689, 10, 14], +[617240, 1689, 12, 11], +[617368, 1690, 4, 18], +[617444, 1690, 7, 3], +[617469, 1690, 7, 28], +[617596, 1690, 12, 2], +[617726, 1691, 4, 11], +[617918, 1691, 10, 20], +[617974, 1691, 12, 15], +[617998, 1692, 1, 8], +[618182, 1692, 7, 10], +[618279, 1692, 10, 15], +[618337, 1692, 12, 12], +[618437, 1693, 3, 22], +[618595, 1693, 8, 27], +[618789, 1694, 3, 9], +[618955, 1694, 8, 22], +[619135, 1695, 2, 18], +[619274, 1695, 7, 7], +[619419, 1695, 11, 29], +[619608, 1696, 6, 5], +[619670, 1696, 8, 6], +[619740, 1696, 10, 15], +[619820, 1697, 1, 3], +[619917, 1697, 4, 10], +[619936, 1697, 4, 29], +[619942, 1697, 5, 5], +[619987, 1697, 6, 19], +[620073, 1697, 9, 13], +[620218, 1698, 2, 5], +[620316, 1698, 5, 14], +[620378, 1698, 7, 15], +[620493, 1698, 11, 7], +[620602, 1699, 2, 24], +[620630, 1699, 3, 24], +[620753, 1699, 7, 25], +[620898, 1699, 12, 17], +[620995, 1700, 3, 24], +[621080, 1700, 6, 17], +[621232, 1700, 11, 16], +[621312, 1701, 2, 4], +[621381, 1701, 4, 14], +[621481, 1701, 7, 23], +[621513, 1701, 8, 24], +[621614, 1701, 12, 3], +[621666, 1702, 1, 24], +[621732, 1702, 3, 31], +[621854, 1702, 7, 31], +[622030, 1703, 1, 23], +[622055, 1703, 2, 17], +[622124, 1703, 4, 27], +[622290, 1703, 10, 10], +[622426, 1704, 2, 23], +[622451, 1704, 3, 19], +[622522, 1704, 5, 29], +[622714, 1704, 12, 7], +[622901, 1705, 6, 12], +[622938, 1705, 7, 19], +[622956, 1705, 8, 6], +[623054, 1705, 11, 12], +[623227, 1706, 5, 4], +[623239, 1706, 5, 16], +[623241, 1706, 5, 18], +[623346, 1706, 8, 31], +[623496, 1707, 1, 28], +[623497, 1707, 1, 29], +[623690, 1707, 8, 10], +[623871, 1708, 2, 7], +[623970, 1708, 5, 16], +[624064, 1708, 8, 18], +[624116, 1708, 10, 9], +[624136, 1708, 10, 29], +[624252, 1709, 2, 22], +[624436, 1709, 8, 25], +[624608, 1710, 2, 13], +[624699, 1710, 5, 15], +[624741, 1710, 6, 26], +[624865, 1710, 10, 28], +[624952, 1711, 1, 23], +[625054, 1711, 5, 5], +[625141, 1711, 7, 31], +[625333, 1712, 2, 8], +[625378, 1712, 3, 24], +[625506, 1712, 7, 30], +[625514, 1712, 8, 7], +[625666, 1713, 1, 6], +[625745, 1713, 3, 26], +[625872, 1713, 7, 31], +[625991, 1713, 11, 27], +[626099, 1714, 3, 15], +[626108, 1714, 3, 24], +[626268, 1714, 8, 31], +[626283, 1714, 9, 15], +[626385, 1714, 12, 26], +[626525, 1715, 5, 15], +[626647, 1715, 9, 14], +[626779, 1716, 1, 24], +[626849, 1716, 4, 3], +[626897, 1716, 5, 21], +[626952, 1716, 7, 15], +[627065, 1716, 11, 5], +[627156, 1717, 2, 4], +[627308, 1717, 7, 6], +[627405, 1717, 10, 11], +[627474, 1717, 12, 19], +[627548, 1718, 3, 3], +[627745, 1718, 9, 16], +[627771, 1718, 10, 12], +[627948, 1719, 4, 7], +[628099, 1719, 9, 5], +[628168, 1719, 11, 13], +[628254, 1720, 2, 7], +[628382, 1720, 6, 14], +[628445, 1720, 8, 16], +[628560, 1720, 12, 9], +[628645, 1721, 3, 4], +[628768, 1721, 7, 5], +[628868, 1721, 10, 13], +[628913, 1721, 11, 27], +[628993, 1722, 2, 15], +[629006, 1722, 2, 28], +[629084, 1722, 5, 17], +[629164, 1722, 8, 5], +[629229, 1722, 10, 9], +[629393, 1723, 3, 22], +[629421, 1723, 4, 19], +[629592, 1723, 10, 7], +[629688, 1724, 1, 11], +[629774, 1724, 4, 6], +[629926, 1724, 9, 5], +[630094, 1725, 2, 20], +[630266, 1725, 8, 11], +[630461, 1726, 2, 22], +[630569, 1726, 6, 10], +[630761, 1726, 12, 19], +[630918, 1727, 5, 25], +[631035, 1727, 9, 19], +[631217, 1728, 3, 19], +[631283, 1728, 5, 24], +[631442, 1728, 10, 30], +[631501, 1728, 12, 28], +[631549, 1729, 2, 14], +[631738, 1729, 8, 22], +[631833, 1729, 11, 25], +[632004, 1730, 5, 15], +[632117, 1730, 9, 5], +[632244, 1731, 1, 10], +[632443, 1731, 7, 28], +[632570, 1731, 12, 2], +[632622, 1732, 1, 23], +[632679, 1732, 3, 20], +[632786, 1732, 7, 5], +[632969, 1733, 1, 4], +[633139, 1733, 6, 23], +[633178, 1733, 8, 1], +[633350, 1734, 1, 20], +[633424, 1734, 4, 4], +[633437, 1734, 4, 17], +[633615, 1734, 10, 12], +[633737, 1735, 2, 11], +[633913, 1735, 8, 6], +[634107, 1736, 2, 16], +[634226, 1736, 6, 14], +[634348, 1736, 10, 14], +[634409, 1736, 12, 14], +[634440, 1737, 1, 14], +[634516, 1737, 3, 31], +[634621, 1737, 7, 14], +[634722, 1737, 10, 23], +[634837, 1738, 2, 15], +[635001, 1738, 7, 29], +[635082, 1738, 10, 18], +[635084, 1738, 10, 20], +[635229, 1739, 3, 14], +[635307, 1739, 5, 31], +[635391, 1739, 8, 23], +[635419, 1739, 9, 20], +[635512, 1739, 12, 22], +[635590, 1740, 3, 9], +[635628, 1740, 4, 16], +[635808, 1740, 10, 13], +[635880, 1740, 12, 24], +[636040, 1741, 6, 2], +[636066, 1741, 6, 28], +[636178, 1741, 10, 18], +[636302, 1742, 2, 19], +[636451, 1742, 7, 18], +[636500, 1742, 9, 5], +[636570, 1742, 11, 14], +[636727, 1743, 4, 20], +[636767, 1743, 5, 30], +[636870, 1743, 9, 10], +[637061, 1744, 3, 19], +[637226, 1744, 8, 31], +[637258, 1744, 10, 2], +[637407, 1745, 2, 28], +[637570, 1745, 8, 10], +[637725, 1746, 1, 12], +[637876, 1746, 6, 12], +[637928, 1746, 8, 3], +[637977, 1746, 9, 21], +[638143, 1747, 3, 6], +[638297, 1747, 8, 7], +[638486, 1748, 2, 12], +[638614, 1748, 6, 19], +[638700, 1748, 9, 13], +[638787, 1748, 12, 9], +[638926, 1749, 4, 27], +[639112, 1749, 10, 30], +[639272, 1750, 4, 8], +[639433, 1750, 9, 16], +[639620, 1751, 3, 22], +[639645, 1751, 4, 16], +[639665, 1751, 5, 6], +[639724, 1751, 7, 4], +[639881, 1751, 12, 8], +[640032, 1752, 5, 7], +[640184, 1752, 10, 6], +[640310, 1753, 2, 9], +[640430, 1753, 6, 9], +[640507, 1753, 8, 25], +[640602, 1753, 11, 28], +[640775, 1754, 5, 20], +[640898, 1754, 9, 20], +[641079, 1755, 3, 20], +[641150, 1755, 5, 30], +[641202, 1755, 7, 21], +[641236, 1755, 8, 24], +[641318, 1755, 11, 14], +[641435, 1756, 3, 10], +[641587, 1756, 8, 9], +[641745, 1757, 1, 14], +[641747, 1757, 1, 16], +[641887, 1757, 6, 5], +[642035, 1757, 10, 31], +[642049, 1757, 11, 14], +[642067, 1757, 12, 2], +[642241, 1758, 5, 25], +[642427, 1758, 11, 27], +[642505, 1759, 2, 13], +[642604, 1759, 5, 23], +[642666, 1759, 7, 24], +[642717, 1759, 9, 13], +[642810, 1759, 12, 15], +[642821, 1759, 12, 26], +[642877, 1760, 2, 20], +[643039, 1760, 7, 31], +[643229, 1761, 2, 6], +[643397, 1761, 7, 24], +[643429, 1761, 8, 25], +[643476, 1761, 10, 11], +[643486, 1761, 10, 21], +[643565, 1762, 1, 8], +[643650, 1762, 4, 3], +[643750, 1762, 7, 12], +[643889, 1762, 11, 28], +[644080, 1763, 6, 7], +[644093, 1763, 6, 20], +[644223, 1763, 10, 28], +[644322, 1764, 2, 4], +[644388, 1764, 4, 10], +[644572, 1764, 10, 11], +[644626, 1764, 12, 4], +[644766, 1765, 4, 23], +[644773, 1765, 4, 30], +[644928, 1765, 10, 2], +[645069, 1766, 2, 20], +[645247, 1766, 8, 17], +[645376, 1766, 12, 24], +[645399, 1767, 1, 16], +[645508, 1767, 5, 5], +[645596, 1767, 8, 1], +[645778, 1768, 1, 30], +[645876, 1768, 5, 7], +[645988, 1768, 8, 27], +[646175, 1769, 3, 2], +[646255, 1769, 5, 21], +[646313, 1769, 7, 18], +[646445, 1769, 11, 27], +[646514, 1770, 2, 4], +[646558, 1770, 3, 20], +[646715, 1770, 8, 24], +[646771, 1770, 10, 19], +[646925, 1771, 3, 22], +[646940, 1771, 4, 6], +[647015, 1771, 6, 20], +[647043, 1771, 7, 18], +[647225, 1772, 1, 16], +[647425, 1772, 8, 3], +[647508, 1772, 10, 25], +[647628, 1773, 2, 22], +[647712, 1773, 5, 17], +[647911, 1773, 12, 2], +[648003, 1774, 3, 4], +[648140, 1774, 7, 19], +[648217, 1774, 10, 4], +[648293, 1774, 12, 19], +[648381, 1775, 3, 17], +[648398, 1775, 4, 3], +[648417, 1775, 4, 22], +[648480, 1775, 6, 24], +[648677, 1776, 1, 7], +[648688, 1776, 1, 18], +[648819, 1776, 5, 28], +[648901, 1776, 8, 18], +[649002, 1776, 11, 27], +[649075, 1777, 2, 8], +[649133, 1777, 4, 7], +[649165, 1777, 5, 9], +[649175, 1777, 5, 19], +[649209, 1777, 6, 22], +[649292, 1777, 9, 13], +[649409, 1778, 1, 8], +[649513, 1778, 4, 22], +[649692, 1778, 10, 18], +[649836, 1779, 3, 11], +[649974, 1779, 7, 27], +[650166, 1780, 2, 4], +[650334, 1780, 7, 21], +[650478, 1780, 12, 12], +[650521, 1781, 1, 24], +[650569, 1781, 3, 13], +[650657, 1781, 6, 9], +[650679, 1781, 7, 1], +[650837, 1781, 12, 6], +[650900, 1782, 2, 7], +[650911, 1782, 2, 18], +[651087, 1782, 8, 13], +[651232, 1783, 1, 5], +[651288, 1783, 3, 2], +[651421, 1783, 7, 13], +[651621, 1784, 1, 29], +[651649, 1784, 2, 26], +[651776, 1784, 7, 2], +[651935, 1784, 12, 8], +[651952, 1784, 12, 25], +[652132, 1785, 6, 23], +[652228, 1785, 9, 27], +[652301, 1785, 12, 9], +[652398, 1786, 3, 16], +[652449, 1786, 5, 6], +[652545, 1786, 8, 10], +[652616, 1786, 10, 20], +[652696, 1787, 1, 8], +[652745, 1787, 2, 26], +[652913, 1787, 8, 13], +[652949, 1787, 9, 18], +[652997, 1787, 11, 5], +[653051, 1787, 12, 29], +[653249, 1788, 7, 14], +[653275, 1788, 8, 9], +[653368, 1788, 11, 10], +[653444, 1789, 1, 25], +[653606, 1789, 7, 6], +[653803, 1790, 1, 19], +[653874, 1790, 3, 31], +[653926, 1790, 5, 22], +[653979, 1790, 7, 14], +[654093, 1790, 11, 5], +[654134, 1790, 12, 16], +[654232, 1791, 3, 24], +[654280, 1791, 5, 11], +[654355, 1791, 7, 25], +[654455, 1791, 11, 2], +[654485, 1791, 12, 2], +[654662, 1792, 5, 27], +[654723, 1792, 7, 27], +[654818, 1792, 10, 30], +[654928, 1793, 2, 17], +[654995, 1793, 4, 25], +[655042, 1793, 6, 11], +[655103, 1793, 8, 11], +[655264, 1794, 1, 19], +[655286, 1794, 2, 10], +[655359, 1794, 4, 24], +[655426, 1794, 6, 30], +[655519, 1794, 10, 1], +[655679, 1795, 3, 10], +[655755, 1795, 5, 25], +[655943, 1795, 11, 29], +[655945, 1795, 12, 1], +[656034, 1796, 2, 28], +[656148, 1796, 6, 21], +[656218, 1796, 8, 30], +[656221, 1796, 9, 2], +[656352, 1797, 1, 11], +[656540, 1797, 7, 18], +[656634, 1797, 10, 20], +[656804, 1798, 4, 8], +[656843, 1798, 5, 17], +[656875, 1798, 6, 18], +[656952, 1798, 9, 3], +[657057, 1798, 12, 17], +[657156, 1799, 3, 26], +[657314, 1799, 8, 31], +[657315, 1799, 9, 1], +[657476, 1800, 2, 9], +[657505, 1800, 3, 10], +[657546, 1800, 4, 20], +[657703, 1800, 9, 24], +[657712, 1800, 10, 3], +[657771, 1800, 12, 1], +[657926, 1801, 5, 5], +[657966, 1801, 6, 14], +[658080, 1801, 10, 6], +[658259, 1802, 4, 3], +[658439, 1802, 9, 30], +[658610, 1803, 3, 20], +[658628, 1803, 4, 7], +[658794, 1803, 9, 20], +[658806, 1803, 10, 2], +[658969, 1804, 3, 13], +[659032, 1804, 5, 15], +[659219, 1804, 11, 18], +[659262, 1804, 12, 31], +[659457, 1805, 7, 14], +[659495, 1805, 8, 21], +[659659, 1806, 2, 1], +[659669, 1806, 2, 11], +[659765, 1806, 5, 18], +[659932, 1806, 11, 1], +[660098, 1807, 4, 16], +[660154, 1807, 6, 11], +[660262, 1807, 9, 27], +[660439, 1808, 3, 22], +[660543, 1808, 7, 4], +[660548, 1808, 7, 9], +[660681, 1808, 11, 19], +[660746, 1809, 1, 23], +[660793, 1809, 3, 11], +[660825, 1809, 4, 12], +[661007, 1809, 10, 11], +[661177, 1810, 3, 30], +[661199, 1810, 4, 21], +[661259, 1810, 6, 20], +[661452, 1810, 12, 30], +[661644, 1811, 7, 10], +[661844, 1812, 1, 26], +[661934, 1812, 4, 25], +[662055, 1812, 8, 24], +[662206, 1813, 1, 22], +[662363, 1813, 6, 28], +[662447, 1813, 9, 20], +[662636, 1814, 3, 28], +[662666, 1814, 4, 27], +[662802, 1814, 9, 10], +[662974, 1815, 3, 1], +[663146, 1815, 8, 20], +[663275, 1815, 12, 27], +[663328, 1816, 2, 18], +[663451, 1816, 6, 20], +[663547, 1816, 9, 24], +[663576, 1816, 10, 23], +[663604, 1816, 11, 20], +[663794, 1817, 5, 29], +[663846, 1817, 7, 20], +[663897, 1817, 9, 9], +[663899, 1817, 9, 11], +[664048, 1818, 2, 7], +[664145, 1818, 5, 15], +[664206, 1818, 7, 15], +[664358, 1818, 12, 14], +[664550, 1819, 6, 24], +[664636, 1819, 9, 18], +[664782, 1820, 2, 11], +[664919, 1820, 6, 27], +[664968, 1820, 8, 15], +[665126, 1821, 1, 20], +[665298, 1821, 7, 11], +[665415, 1821, 11, 5], +[665428, 1821, 11, 18], +[665617, 1822, 5, 26], +[665634, 1822, 6, 12], +[665683, 1822, 7, 31], +[665729, 1822, 9, 15], +[665796, 1822, 11, 21], +[665972, 1823, 5, 16], +[666069, 1823, 8, 21], +[666114, 1823, 10, 5], +[666177, 1823, 12, 7], +[666337, 1824, 5, 15], +[666524, 1824, 11, 18], +[666697, 1825, 5, 10], +[666782, 1825, 8, 3], +[666873, 1825, 11, 2], +[666957, 1826, 1, 25], +[667032, 1826, 4, 10], +[667178, 1826, 9, 3], +[667193, 1826, 9, 18], +[667386, 1827, 3, 30], +[667546, 1827, 9, 6], +[667678, 1828, 1, 16], +[667722, 1828, 2, 29], +[667809, 1828, 5, 26], +[667941, 1828, 10, 5], +[667983, 1828, 11, 16], +[668108, 1829, 3, 21], +[668198, 1829, 6, 19], +[668247, 1829, 8, 7], +[668425, 1830, 2, 1], +[668622, 1830, 8, 17], +[668746, 1830, 12, 19], +[668927, 1831, 6, 18], +[669116, 1831, 12, 24], +[669234, 1832, 4, 20], +[669268, 1832, 5, 24], +[669405, 1832, 10, 8], +[669499, 1833, 1, 10], +[669681, 1833, 7, 11], +[669780, 1833, 10, 18], +[669933, 1834, 3, 20], +[670093, 1834, 8, 27], +[670111, 1834, 9, 14], +[670225, 1835, 1, 6], +[670263, 1835, 2, 13], +[670264, 1835, 2, 14], +[670436, 1835, 8, 5], +[670550, 1835, 11, 27], +[670603, 1836, 1, 19], +[670737, 1836, 6, 1], +[670837, 1836, 9, 9], +[671025, 1837, 3, 16], +[671127, 1837, 6, 26], +[671275, 1837, 11, 21], +[671412, 1838, 4, 7], +[671416, 1838, 4, 11], +[671544, 1838, 8, 17], +[671702, 1839, 1, 22], +[671814, 1839, 5, 14], +[671966, 1839, 10, 13], +[672158, 1840, 4, 22], +[672358, 1840, 11, 8], +[672437, 1841, 1, 26], +[672478, 1841, 3, 8], +[672561, 1841, 5, 30], +[672653, 1841, 8, 30], +[672811, 1842, 2, 4], +[672977, 1842, 7, 20], +[673077, 1842, 10, 28], +[673144, 1843, 1, 3], +[673193, 1843, 2, 21], +[673328, 1843, 7, 6], +[673348, 1843, 7, 26], +[673395, 1843, 9, 11], +[673548, 1844, 2, 11], +[673665, 1844, 6, 7], +[673863, 1844, 12, 22], +[674062, 1845, 7, 9], +[674250, 1846, 1, 13], +[674315, 1846, 3, 19], +[674368, 1846, 5, 11], +[674453, 1846, 8, 4], +[674549, 1846, 11, 8], +[674685, 1847, 3, 24], +[674697, 1847, 4, 5], +[674784, 1847, 7, 1], +[674887, 1847, 10, 12], +[675083, 1848, 4, 25], +[675267, 1848, 10, 26], +[675274, 1848, 11, 2], +[675297, 1848, 11, 25], +[675333, 1848, 12, 31], +[675378, 1849, 2, 14], +[675550, 1849, 8, 5], +[675694, 1849, 12, 27], +[675775, 1850, 3, 18], +[675961, 1850, 9, 20], +[676069, 1851, 1, 6], +[676212, 1851, 5, 29], +[676295, 1851, 8, 20], +[676451, 1852, 1, 23], +[676644, 1852, 8, 3], +[676698, 1852, 9, 26], +[676795, 1853, 1, 1], +[676971, 1853, 6, 26], +[677014, 1853, 8, 8], +[677080, 1853, 10, 13], +[677111, 1853, 11, 13], +[677169, 1854, 1, 10], +[677195, 1854, 2, 5], +[677338, 1854, 6, 28], +[677443, 1854, 10, 11], +[677640, 1855, 4, 26], +[677715, 1855, 7, 10], +[677816, 1855, 10, 19], +[677823, 1855, 10, 26], +[677934, 1856, 2, 14], +[678019, 1856, 5, 9], +[678154, 1856, 9, 21], +[678303, 1857, 2, 17], +[678345, 1857, 3, 31], +[678410, 1857, 6, 4], +[678459, 1857, 7, 23], +[678608, 1857, 12, 19], +[678793, 1858, 6, 22], +[678983, 1858, 12, 29], +[679074, 1859, 3, 30], +[679163, 1859, 6, 27], +[679215, 1859, 8, 18], +[679251, 1859, 9, 23], +[679280, 1859, 10, 22], +[679426, 1860, 3, 16], +[679441, 1860, 3, 31], +[679610, 1860, 9, 16], +[679646, 1860, 10, 22], +[679763, 1861, 2, 16], +[679873, 1861, 6, 6], +[680067, 1861, 12, 17], +[680142, 1862, 3, 2], +[680249, 1862, 6, 17], +[680420, 1862, 12, 5], +[680517, 1863, 3, 12], +[680541, 1863, 4, 5], +[680721, 1863, 10, 2], +[680809, 1863, 12, 29], +[680888, 1864, 3, 17], +[680943, 1864, 5, 11], +[680975, 1864, 6, 12], +[681040, 1864, 8, 16], +[681086, 1864, 10, 1], +[681186, 1865, 1, 9], +[681198, 1865, 1, 21], +[681390, 1865, 8, 1], +[681581, 1866, 2, 8], +[681653, 1866, 4, 21], +[681820, 1866, 10, 5], +[681926, 1867, 1, 19], +[682064, 1867, 6, 6], +[682149, 1867, 8, 30], +[682222, 1867, 11, 11], +[682307, 1868, 2, 4], +[682318, 1868, 2, 15], +[682330, 1868, 2, 27], +[682385, 1868, 4, 22], +[682478, 1868, 7, 24], +[682591, 1868, 11, 14], +[682787, 1869, 5, 29], +[682866, 1869, 8, 16], +[683056, 1870, 2, 22], +[683183, 1870, 6, 29], +[683338, 1870, 12, 1], +[683428, 1871, 3, 1], +[683617, 1871, 9, 6], +[683808, 1872, 3, 15], +[683918, 1872, 7, 3], +[684116, 1873, 1, 17], +[684241, 1873, 5, 22], +[684394, 1873, 10, 22], +[684529, 1874, 3, 6], +[684674, 1874, 7, 29], +[684865, 1875, 2, 5], +[684994, 1875, 6, 14], +[685034, 1875, 7, 24], +[685101, 1875, 9, 29], +[685111, 1875, 10, 9], +[685153, 1875, 11, 20], +[685199, 1876, 1, 5], +[685271, 1876, 3, 17], +[685367, 1876, 6, 21], +[685491, 1876, 10, 23], +[685561, 1877, 1, 1], +[685753, 1877, 7, 12], +[685936, 1878, 1, 11], +[686107, 1878, 7, 1], +[686220, 1878, 10, 22], +[686343, 1879, 2, 22], +[686491, 1879, 7, 20], +[686621, 1879, 11, 27], +[686633, 1879, 12, 9], +[686795, 1880, 5, 19], +[686928, 1880, 9, 29], +[687071, 1881, 2, 19], +[687180, 1881, 6, 8], +[687181, 1881, 6, 9], +[687221, 1881, 7, 19], +[687227, 1881, 7, 25], +[687312, 1881, 10, 18], +[687478, 1882, 4, 2], +[687677, 1882, 10, 18], +[687772, 1883, 1, 21], +[687879, 1883, 5, 8], +[688020, 1883, 9, 26], +[688028, 1883, 10, 4], +[688033, 1883, 10, 9], +[688119, 1884, 1, 3], +[688269, 1884, 6, 1], +[688357, 1884, 8, 28], +[688439, 1884, 11, 18], +[688578, 1885, 4, 6], +[688660, 1885, 6, 27], +[688752, 1885, 9, 27], +[688769, 1885, 10, 14], +[688968, 1886, 5, 1], +[689166, 1886, 11, 15], +[689211, 1886, 12, 30], +[689337, 1887, 5, 5], +[689352, 1887, 5, 20], +[689490, 1887, 10, 5], +[689554, 1887, 12, 8], +[689574, 1887, 12, 28], +[689760, 1888, 7, 1], +[689917, 1888, 12, 5], +[690038, 1889, 4, 5], +[690202, 1889, 9, 16], +[690205, 1889, 9, 19], +[690222, 1889, 10, 6], +[690299, 1889, 12, 22], +[690472, 1890, 6, 13], +[690508, 1890, 7, 19], +[690604, 1890, 10, 23], +[690701, 1891, 1, 28], +[690860, 1891, 7, 6], +[691054, 1892, 1, 16], +[691154, 1892, 4, 25], +[691288, 1892, 9, 6], +[691348, 1892, 11, 5], +[691440, 1893, 2, 5], +[691538, 1893, 5, 14], +[691560, 1893, 6, 5], +[691660, 1893, 9, 13], +[691694, 1893, 10, 17], +[691890, 1894, 5, 1], +[692028, 1894, 9, 16], +[692073, 1894, 10, 31], +[692268, 1895, 5, 14], +[692341, 1895, 7, 26], +[692448, 1895, 11, 10], +[692450, 1895, 11, 12], +[692580, 1896, 3, 21], +[692662, 1896, 6, 11], +[692680, 1896, 6, 29], +[692793, 1896, 10, 20], +[692917, 1897, 2, 21], +[692948, 1897, 3, 24], +[692995, 1897, 5, 10], +[693024, 1897, 6, 8], +[693214, 1897, 12, 15], +[693279, 1898, 2, 18], +[693388, 1898, 6, 7], +[693432, 1898, 7, 21], +[693449, 1898, 8, 7], +[693613, 1899, 1, 18], +[693686, 1899, 4, 1], +[693767, 1899, 6, 21], +[693890, 1899, 10, 22], +[693924, 1899, 11, 25], +[694083, 1900, 5, 3], +[694139, 1900, 6, 28], +[694253, 1900, 10, 20], +[694369, 1901, 2, 13], +[694561, 1901, 8, 24], +[694728, 1902, 2, 7], +[694855, 1902, 6, 14], +[694942, 1902, 9, 9], +[695080, 1903, 1, 25], +[695180, 1903, 5, 5], +[695269, 1903, 8, 2], +[695369, 1903, 11, 10], +[695560, 1904, 5, 19], +[695570, 1904, 5, 29], +[695637, 1904, 8, 4], +[695690, 1904, 9, 26], +[695854, 1905, 3, 9], +[695888, 1905, 4, 12], +[695985, 1905, 7, 18], +[696007, 1905, 8, 9], +[696016, 1905, 8, 18], +[696124, 1905, 12, 4], +[696159, 1906, 1, 8], +[696203, 1906, 2, 21], +[696214, 1906, 3, 4], +[696323, 1906, 6, 21], +[696332, 1906, 6, 30], +[696405, 1906, 9, 11], +[696585, 1907, 3, 10], +[696667, 1907, 5, 31], +[696723, 1907, 7, 26], +[696804, 1907, 10, 15], +[696919, 1908, 2, 7], +[697010, 1908, 5, 8], +[697048, 1908, 6, 15], +[697107, 1908, 8, 13], +[697290, 1909, 2, 12], +[697471, 1909, 8, 12], +[697524, 1909, 10, 4], +[697571, 1909, 11, 20], +[697723, 1910, 4, 21], +[697892, 1910, 10, 7], +[697917, 1910, 11, 1], +[698001, 1911, 1, 24], +[698093, 1911, 4, 26], +[698258, 1911, 10, 8], +[698268, 1911, 10, 18], +[698269, 1911, 10, 19], +[698457, 1912, 4, 24], +[698628, 1912, 10, 12], +[698684, 1912, 12, 7], +[698860, 1913, 6, 1], +[698919, 1913, 7, 30], +[698990, 1913, 10, 9], +[699065, 1913, 12, 23], +[699161, 1914, 3, 29], +[699213, 1914, 5, 20], +[699368, 1914, 10, 22], +[699523, 1915, 3, 26], +[699705, 1915, 9, 24], +[699797, 1915, 12, 25], +[699881, 1916, 3, 18], +[699891, 1916, 3, 28], +[700003, 1916, 7, 18], +[700158, 1916, 12, 20], +[700185, 1917, 1, 16], +[700338, 1917, 6, 18], +[700426, 1917, 9, 14], +[700499, 1917, 11, 26], +[700505, 1917, 12, 2], +[700663, 1918, 5, 9], +[700729, 1918, 7, 14], +[700785, 1918, 9, 8], +[700807, 1918, 9, 30], +[700825, 1918, 10, 18], +[700872, 1918, 12, 4], +[701045, 1919, 5, 26], +[701200, 1919, 10, 28], +[701237, 1919, 12, 4], +[701411, 1920, 5, 26], +[701418, 1920, 6, 2], +[701459, 1920, 7, 13], +[701592, 1920, 11, 23], +[701620, 1920, 12, 21], +[701691, 1921, 3, 2], +[701807, 1921, 6, 26], +[701885, 1921, 9, 12], +[701960, 1921, 11, 26], +[702029, 1922, 2, 3], +[702155, 1922, 6, 9], +[702244, 1922, 9, 6], +[702291, 1922, 10, 23], +[702481, 1923, 5, 1], +[702621, 1923, 9, 18], +[702821, 1924, 4, 5], +[702911, 1924, 7, 4], +[703008, 1924, 10, 9], +[703133, 1925, 2, 11], +[703251, 1925, 6, 9], +[703291, 1925, 7, 19], +[703418, 1925, 11, 23], +[703445, 1925, 12, 20], +[703501, 1926, 2, 14], +[703569, 1926, 4, 23], +[703644, 1926, 7, 7], +[703664, 1926, 7, 27], +[703697, 1926, 8, 29], +[703754, 1926, 10, 25], +[703848, 1927, 1, 27], +[703965, 1927, 5, 24], +[704071, 1927, 9, 7], +[704219, 1928, 2, 2], +[704235, 1928, 2, 18], +[704377, 1928, 7, 9], +[704546, 1928, 12, 25], +[704581, 1929, 1, 29], +[704620, 1929, 3, 9], +[704804, 1929, 9, 9], +[704833, 1929, 10, 8], +[705011, 1930, 4, 4], +[705059, 1930, 5, 22], +[705062, 1930, 5, 25], +[705188, 1930, 9, 28], +[705332, 1931, 2, 19], +[705513, 1931, 8, 19], +[705572, 1931, 10, 17], +[705715, 1932, 3, 8], +[705836, 1932, 7, 7], +[705959, 1932, 11, 7], +[706103, 1933, 3, 31], +[706246, 1933, 8, 21], +[706342, 1933, 11, 25], +[706347, 1933, 11, 30], +[706439, 1934, 3, 2], +[706531, 1934, 6, 2], +[706669, 1934, 10, 18], +[706763, 1935, 1, 20], +[706765, 1935, 1, 22], +[706872, 1935, 5, 9], +[707027, 1935, 10, 11], +[707076, 1935, 11, 29], +[707208, 1936, 4, 9], +[707244, 1936, 5, 15], +[707355, 1936, 9, 3], +[707493, 1937, 1, 19], +[707524, 1937, 2, 19], +[707644, 1937, 6, 19], +[707771, 1937, 10, 24], +[707916, 1938, 3, 18], +[708059, 1938, 8, 8], +[708085, 1938, 9, 3], +[708229, 1939, 1, 25], +[708232, 1939, 1, 28], +[708288, 1939, 3, 25], +[708469, 1939, 9, 22], +[708643, 1940, 3, 14], +[708793, 1940, 8, 11], +[708954, 1941, 1, 19], +[709052, 1941, 4, 27], +[709080, 1941, 5, 25], +[709154, 1941, 8, 7], +[709309, 1942, 1, 9], +[709419, 1942, 4, 29], +[709426, 1942, 5, 6], +[709435, 1942, 5, 15], +[709507, 1942, 7, 26], +[709535, 1942, 8, 23], +[709563, 1942, 9, 20], +[709662, 1942, 12, 28], +[709835, 1943, 6, 19], +[709987, 1943, 11, 18], +[710131, 1944, 4, 10], +[710164, 1944, 5, 13], +[710205, 1944, 6, 23], +[710214, 1944, 7, 2], +[710279, 1944, 9, 5], +[710319, 1944, 10, 15], +[710400, 1945, 1, 4], +[710521, 1945, 5, 5], +[710529, 1945, 5, 13], +[710691, 1945, 10, 22], +[710743, 1945, 12, 13], +[710832, 1946, 3, 12], +[710966, 1946, 7, 24], +[711012, 1946, 9, 8], +[711018, 1946, 9, 14], +[711204, 1947, 3, 19], +[711285, 1947, 6, 8], +[711452, 1947, 11, 22], +[711531, 1948, 2, 9], +[711710, 1948, 8, 6], +[711907, 1949, 2, 19], +[711981, 1949, 5, 4], +[712109, 1949, 9, 9], +[712117, 1949, 9, 17], +[712240, 1950, 1, 18], +[712371, 1950, 5, 29], +[712436, 1950, 8, 2], +[712474, 1950, 9, 9], +[712656, 1951, 3, 10], +[712754, 1951, 6, 16], +[712845, 1951, 9, 15], +[712946, 1951, 12, 25], +[713102, 1952, 5, 29], +[713299, 1952, 12, 12], +[713498, 1953, 6, 29], +[713561, 1953, 8, 31], +[713656, 1953, 12, 4], +[713845, 1954, 6, 11], +[713907, 1954, 8, 12], +[713910, 1954, 8, 15], +[713974, 1954, 10, 18], +[714017, 1954, 11, 30], +[714132, 1955, 3, 25], +[714175, 1955, 5, 7], +[714311, 1955, 9, 20], +[714416, 1956, 1, 3], +[714511, 1956, 4, 7], +[714581, 1956, 6, 16], +[714693, 1956, 10, 6], +[714732, 1956, 11, 14], +[714875, 1957, 4, 6], +[715048, 1957, 9, 26], +[715090, 1957, 11, 7], +[715127, 1957, 12, 14], +[715220, 1958, 3, 17], +[715368, 1958, 8, 12], +[715415, 1958, 9, 28], +[715419, 1958, 10, 2], +[715450, 1958, 11, 2], +[715633, 1959, 5, 4], +[715682, 1959, 6, 22], +[715712, 1959, 7, 22], +[715750, 1959, 8, 29], +[715896, 1960, 1, 22], +[715957, 1960, 3, 23], +[716119, 1960, 9, 1], +[716260, 1961, 1, 20], +[716378, 1961, 5, 18], +[716400, 1961, 6, 9], +[716487, 1961, 9, 4], +[716575, 1961, 12, 1], +[716676, 1962, 3, 12], +[716682, 1962, 3, 18], +[716801, 1962, 7, 15], +[716978, 1963, 1, 8], +[717008, 1963, 2, 7], +[717030, 1963, 3, 1], +[717120, 1963, 5, 30], +[717179, 1963, 7, 28], +[717215, 1963, 9, 2], +[717394, 1964, 2, 28], +[717495, 1964, 6, 8], +[717507, 1964, 6, 20], +[717559, 1964, 8, 11], +[717598, 1964, 9, 19], +[717680, 1964, 12, 10], +[717859, 1965, 6, 7], +[717953, 1965, 9, 9], +[718070, 1966, 1, 4], +[718210, 1966, 5, 24], +[718265, 1966, 7, 18], +[718353, 1966, 10, 14], +[718450, 1967, 1, 19], +[718506, 1967, 3, 16], +[718529, 1967, 4, 8], +[718703, 1967, 9, 29], +[718900, 1968, 4, 13], +[718986, 1968, 7, 8], +[719029, 1968, 8, 20], +[719228, 1969, 3, 7], +[719362, 1969, 7, 19], +[719558, 1970, 1, 31], +[719580, 1970, 2, 22], +[719623, 1970, 4, 6], +[719732, 1970, 7, 24], +[719795, 1970, 9, 25], +[719824, 1970, 10, 24], +[719955, 1971, 3, 4], +[720091, 1971, 7, 18], +[720225, 1971, 11, 29], +[720282, 1972, 1, 25], +[720299, 1972, 2, 11], +[720395, 1972, 5, 17], +[720402, 1972, 5, 24], +[720570, 1972, 11, 8], +[720684, 1973, 3, 2], +[720756, 1973, 5, 13], +[720941, 1973, 11, 14], +[721122, 1974, 5, 14], +[721154, 1974, 6, 15], +[721283, 1974, 10, 22], +[721438, 1975, 3, 26], +[721616, 1975, 9, 20], +[721745, 1976, 1, 27], +[721792, 1976, 3, 14], +[721829, 1976, 4, 20], +[721984, 1976, 9, 22], +[722045, 1976, 11, 22], +[722143, 1977, 2, 28], +[722288, 1977, 7, 23], +[722371, 1977, 10, 14], +[722561, 1978, 4, 22], +[722700, 1978, 9, 8], +[722722, 1978, 9, 30], +[722866, 1979, 2, 21], +[723044, 1979, 8, 18], +[723054, 1979, 8, 28], +[723084, 1979, 9, 27], +[723148, 1979, 11, 30], +[723334, 1980, 6, 3], +[723509, 1980, 11, 25], +[723652, 1981, 4, 17], +[723832, 1981, 10, 14], +[724028, 1982, 4, 28], +[724042, 1982, 5, 12], +[724178, 1982, 9, 25], +[724320, 1983, 2, 14], +[724438, 1983, 6, 12], +[724596, 1983, 11, 17], +[724693, 1984, 2, 22], +[724742, 1984, 4, 11], +[724865, 1984, 8, 12], +[724912, 1984, 9, 28], +[724926, 1984, 10, 12], +[724930, 1984, 10, 16], +[724938, 1984, 10, 24], +[725062, 1985, 2, 25], +[725067, 1985, 3, 2], +[725242, 1985, 8, 24], +[725265, 1985, 9, 16], +[725385, 1986, 1, 14], +[725555, 1986, 7, 3], +[725615, 1986, 9, 1], +[725747, 1987, 1, 11], +[725754, 1987, 1, 18], +[725932, 1987, 7, 15], +[726014, 1987, 10, 5], +[726138, 1988, 2, 6], +[726288, 1988, 7, 5], +[726390, 1988, 10, 15], +[726574, 1989, 4, 17], +[726719, 1989, 9, 9], +[726802, 1989, 12, 1], +[726953, 1990, 5, 1], +[727099, 1990, 9, 24], +[727157, 1990, 11, 21], +[727250, 1991, 2, 22], +[727394, 1991, 7, 16], +[727581, 1992, 1, 19], +[727729, 1992, 6, 15], +[727926, 1992, 12, 29], +[728032, 1993, 4, 14], +[728082, 1993, 6, 3], +[728210, 1993, 10, 9], +[728274, 1993, 12, 12], +[728344, 1994, 2, 20], +[728540, 1994, 9, 4], +[728546, 1994, 9, 10], +[728624, 1994, 11, 27], +[728629, 1994, 12, 2], +[728647, 1994, 12, 20], +[728649, 1994, 12, 22], +[728671, 1995, 1, 13], +[728859, 1995, 7, 20], +[728967, 1995, 11, 5], +[729141, 1996, 4, 27], +[729278, 1996, 9, 11], +[729461, 1997, 3, 13], +[729539, 1997, 5, 30], +[729563, 1997, 6, 23], +[729634, 1997, 9, 2], +[729786, 1998, 2, 1], +[729882, 1998, 5, 8], +[730022, 1998, 9, 25], +[730115, 1998, 12, 27], +[730184, 1999, 3, 6], +[730383, 1999, 9, 21], +[730469, 1999, 12, 16], +[730554, 2000, 3, 10], +[730745, 2000, 9, 17], +[730891, 2001, 2, 10], +[730995, 2001, 5, 25], +[731002, 2001, 6, 1], +[731050, 2001, 7, 19], +[731154, 2001, 10, 31], +[731299, 2002, 3, 25], +[731348, 2002, 5, 13], +[731541, 2002, 11, 22], +[731692, 2003, 4, 22], +[731779, 2003, 7, 18], +[731800, 2003, 8, 8], +[731978, 2004, 2, 2], +[732056, 2004, 4, 20], +[732099, 2004, 6, 2], +[732108, 2004, 6, 11], +[732296, 2004, 12, 16], +[732435, 2005, 5, 4], +[732612, 2005, 10, 28], +[732654, 2005, 12, 9], +[732841, 2006, 6, 14], +[732965, 2006, 10, 16], +[733043, 2007, 1, 2], +[733206, 2007, 6, 14], +[733349, 2007, 11, 4], +[733459, 2008, 2, 22], +[733620, 2008, 8, 1], +[733661, 2008, 9, 11], +[733798, 2009, 1, 26], +[733832, 2009, 3, 1], +[733851, 2009, 3, 20], +[734010, 2009, 8, 26], +[734202, 2010, 3, 6], +[734298, 2010, 6, 10], +[734317, 2010, 6, 29], +[734516, 2011, 1, 14], +[734665, 2011, 6, 12], +[734857, 2011, 12, 21], +[734884, 2012, 1, 17], +[734939, 2012, 3, 12], +[735073, 2012, 7, 24], +[735241, 2013, 1, 8], +[735419, 2013, 7, 5], +[735489, 2013, 9, 13], +[735604, 2014, 1, 6], +[735750, 2014, 6, 1], +[735839, 2014, 8, 29], +[736006, 2015, 2, 12], +[736040, 2015, 3, 18], +[736132, 2015, 6, 18], +[736176, 2015, 8, 1], +[736181, 2015, 8, 6], +[736354, 2016, 1, 26], +[736482, 2016, 6, 2], +[736485, 2016, 6, 5], +[736522, 2016, 7, 12], +[736523, 2016, 7, 13], +[736549, 2016, 8, 8], +[736603, 2016, 10, 1], +[736641, 2016, 11, 8], +[736647, 2016, 11, 14], +[736688, 2016, 12, 25], +[736765, 2017, 3, 12], +[736914, 2017, 8, 8], +[736932, 2017, 8, 26], +[737066, 2018, 1, 7], +[737113, 2018, 2, 23], +[737233, 2018, 6, 23], +[737382, 2018, 11, 19], +[737557, 2019, 5, 13], +[737586, 2019, 6, 11], +[737700, 2019, 10, 3], +[737724, 2019, 10, 27], +[737735, 2019, 11, 7], +[737736, 2019, 11, 8], +[737810, 2020, 1, 21], +[737885, 2020, 4, 5], +[738021, 2020, 8, 19], +[738116, 2020, 11, 22], +[738306, 2021, 5, 31], +[738374, 2021, 8, 7], +[738521, 2022, 1, 1], +[738546, 2022, 1, 26], +[738739, 2022, 8, 7], +[738904, 2023, 1, 19], +[738965, 2023, 3, 21], +[739009, 2023, 5, 4], +[739127, 2023, 8, 30], +[739243, 2023, 12, 24], +[739401, 2024, 5, 30], +[739573, 2024, 11, 18], +[739581, 2024, 11, 26], +[739611, 2024, 12, 26], +[739684, 2025, 3, 9], +[739755, 2025, 5, 19], +[739896, 2025, 10, 7], +[740083, 2026, 4, 12], +[740134, 2026, 6, 2], +[740317, 2026, 12, 2], +[740396, 2027, 2, 19], +[740536, 2027, 7, 9], +[740576, 2027, 8, 18], +[740650, 2027, 10, 31], +[740796, 2028, 3, 25], +[740850, 2028, 5, 18], +[740965, 2028, 9, 10], +[740999, 2028, 10, 14], +[741100, 2029, 1, 23], +[741125, 2029, 2, 17], +[741266, 2029, 7, 8], +[741434, 2029, 12, 23], +[741541, 2030, 4, 9], +[741615, 2030, 6, 22], +[741666, 2030, 8, 12], +[741863, 2031, 2, 25], +[741880, 2031, 3, 14], +[741987, 2031, 6, 29], +[742020, 2031, 8, 1], +[742143, 2031, 12, 2], +[742233, 2032, 3, 1], +[742359, 2032, 7, 5], +[742518, 2032, 12, 11], +[742590, 2033, 2, 21], +[742761, 2033, 8, 11], +[742953, 2034, 2, 19], +[743092, 2034, 7, 8], +[743279, 2035, 1, 11], +[743302, 2035, 2, 3], +[743467, 2035, 7, 18], +[743515, 2035, 9, 4], +[743552, 2035, 10, 11], +[743661, 2036, 1, 28], +[743812, 2036, 6, 27], +[743891, 2036, 9, 14], +[743997, 2036, 12, 29], +[744108, 2037, 4, 19], +[744155, 2037, 6, 5], +[744320, 2037, 11, 17], +[744520, 2038, 6, 5], +[744598, 2038, 8, 22], +[744695, 2038, 11, 27], +[744854, 2039, 5, 5], +[744904, 2039, 6, 24], +[744923, 2039, 7, 13], +[745072, 2039, 12, 9], +[745085, 2039, 12, 22], +[745171, 2040, 3, 17], +[745371, 2040, 10, 3], +[745539, 2041, 3, 20], +[745585, 2041, 5, 5], +[745678, 2041, 8, 6], +[745856, 2042, 1, 31], +[745915, 2042, 3, 31], +[745964, 2042, 5, 19], +[746020, 2042, 7, 14], +[746148, 2042, 11, 19], +[746202, 2043, 1, 12], +[746343, 2043, 6, 2], +[746483, 2043, 10, 20], +[746608, 2044, 2, 22], +[746699, 2044, 5, 23], +[746844, 2044, 10, 15], +[747028, 2045, 4, 17], +[747035, 2045, 4, 24], +[747174, 2045, 9, 10], +[747256, 2045, 12, 1], +[747428, 2046, 5, 22], +[747510, 2046, 8, 12], +[747701, 2047, 2, 19], +[747703, 2047, 2, 21], +[747766, 2047, 4, 25], +[747940, 2047, 10, 16], +[748093, 2048, 3, 17], +[748225, 2048, 7, 27], +[748280, 2048, 9, 20], +[748293, 2048, 10, 3], +[748467, 2049, 3, 26], +[748641, 2049, 9, 16], +[748698, 2049, 11, 12], +[748827, 2050, 3, 21], +[748870, 2050, 5, 3], +[749041, 2050, 10, 21], +[749130, 2051, 1, 18], +[749283, 2051, 6, 20], +[749328, 2051, 8, 4], +[749486, 2052, 1, 9], +[749633, 2052, 6, 4], +[749791, 2052, 11, 9], +[749810, 2052, 11, 28], +[749834, 2052, 12, 22], +[749884, 2053, 2, 10], +[749993, 2053, 5, 30], +[750002, 2053, 6, 8], +[750093, 2053, 9, 7], +[750275, 2054, 3, 8], +[750399, 2054, 7, 10], +[750550, 2054, 12, 8], +[750663, 2055, 3, 31], +[750856, 2055, 10, 10], +[751008, 2056, 3, 10], +[751118, 2056, 6, 28], +[751134, 2056, 7, 14], +[751193, 2056, 9, 11], +[751268, 2056, 11, 25], +[751440, 2057, 5, 16], +[751530, 2057, 8, 14], +[751534, 2057, 8, 18], +[751637, 2057, 11, 29], +[751652, 2057, 12, 14], +[751669, 2057, 12, 31], +[751692, 2058, 1, 23], +[751780, 2058, 4, 21], +[751803, 2058, 5, 14], +[751976, 2058, 11, 3], +[752056, 2059, 1, 22], +[752177, 2059, 5, 23], +[752253, 2059, 8, 7], +[752256, 2059, 8, 10], +[752276, 2059, 8, 30], +[752345, 2059, 11, 7], +[752465, 2060, 3, 6], +[752487, 2060, 3, 28], +[752578, 2060, 6, 27], +[752626, 2060, 8, 14], +[752778, 2061, 1, 13], +[752870, 2061, 4, 15], +[752964, 2061, 7, 18], +[753007, 2061, 8, 30], +[753070, 2061, 11, 1], +[753114, 2061, 12, 15], +[753264, 2062, 5, 14], +[753379, 2062, 9, 6], +[753495, 2062, 12, 31], +[753523, 2063, 1, 28], +[753593, 2063, 4, 8], +[753628, 2063, 5, 13], +[753810, 2063, 11, 11], +[754001, 2064, 5, 20], +[754199, 2064, 12, 4], +[754248, 2065, 1, 22], +[754302, 2065, 3, 17], +[754312, 2065, 3, 27], +[754503, 2065, 10, 4], +[754566, 2065, 12, 6], +[754748, 2066, 6, 6], +[754750, 2066, 6, 8], +[754774, 2066, 7, 2], +[754862, 2066, 9, 28], +[754986, 2067, 1, 30], +[755042, 2067, 3, 27], +[755082, 2067, 5, 6], +[755250, 2067, 10, 21], +[755437, 2068, 4, 25], +[755602, 2068, 10, 7], +[755692, 2069, 1, 5], +[755825, 2069, 5, 18], +[755928, 2069, 8, 29], +[755971, 2069, 10, 11], +[756112, 2070, 3, 1], +[756152, 2070, 4, 10], +[756331, 2070, 10, 6], +[756504, 2071, 3, 28], +[756593, 2071, 6, 25], +[756751, 2071, 11, 30], +[756755, 2071, 12, 4], +[756759, 2071, 12, 8], +[756902, 2072, 4, 29], +[756945, 2072, 6, 11], +[757006, 2072, 8, 11], +[757052, 2072, 9, 26], +[757135, 2072, 12, 18], +[757312, 2073, 6, 13], +[757314, 2073, 6, 15], +[757466, 2073, 11, 14], +[757612, 2074, 4, 9], +[757704, 2074, 7, 10], +[757834, 2074, 11, 17], +[757889, 2075, 1, 11], +[757921, 2075, 2, 12], +[757925, 2075, 2, 16], +[758045, 2075, 6, 16], +[758065, 2075, 7, 6], +[758263, 2076, 1, 20], +[758402, 2076, 6, 7], +[758530, 2076, 10, 13], +[758615, 2077, 1, 6], +[758674, 2077, 3, 6], +[758761, 2077, 6, 1], +[758853, 2077, 9, 1], +[759002, 2078, 1, 28], +[759004, 2078, 1, 30], +[759135, 2078, 6, 10], +[759156, 2078, 7, 1], +[759248, 2078, 10, 1], +[759412, 2079, 3, 14], +[759515, 2079, 6, 25], +[759636, 2079, 10, 24], +[759736, 2080, 2, 1], +[759912, 2080, 7, 26], +[760080, 2081, 1, 10], +[760143, 2081, 3, 14], +[760250, 2081, 6, 29], +[760282, 2081, 7, 31], +[760473, 2082, 2, 7], +[760586, 2082, 5, 31], +[760767, 2082, 11, 28], +[760836, 2083, 2, 5], +[761013, 2083, 8, 1], +[761053, 2083, 9, 10], +[761134, 2083, 11, 30], +[761154, 2083, 12, 20], +[761252, 2084, 3, 27], +[761423, 2084, 9, 14], +[761586, 2085, 2, 24], +[761780, 2085, 9, 6], +[761979, 2086, 3, 24], +[762126, 2086, 8, 18], +[762282, 2087, 1, 21], +[762427, 2087, 6, 15], +[762506, 2087, 9, 2], +[762564, 2087, 10, 30], +[762597, 2087, 12, 2], +[762731, 2088, 4, 14], +[762823, 2088, 7, 15], +[762905, 2088, 10, 5], +[762996, 2089, 1, 4], +[763115, 2089, 5, 3], +[763244, 2089, 9, 9], +[763308, 2089, 11, 12], +[763364, 2090, 1, 7], +[763458, 2090, 4, 11], +[763539, 2090, 7, 1], +[763596, 2090, 8, 27], +[763634, 2090, 10, 4], +[763683, 2090, 11, 22], +[763854, 2091, 5, 12], +[763871, 2091, 5, 29], +[763946, 2091, 8, 12], +[764027, 2091, 11, 1], +[764041, 2091, 11, 15], +[764102, 2092, 1, 15], +[764172, 2092, 3, 25], +[764182, 2092, 4, 4], +[764250, 2092, 6, 11], +[764348, 2092, 9, 17], +[764401, 2092, 11, 9], +[764542, 2093, 3, 30], +[764543, 2093, 3, 31], +[764571, 2093, 4, 28], +[764572, 2093, 4, 29], +[764604, 2093, 5, 31], +[764631, 2093, 6, 27], +[764680, 2093, 8, 15], +[764690, 2093, 8, 25], +[764757, 2093, 10, 31], +[764857, 2094, 2, 8], +[764993, 2094, 6, 24], +[765101, 2094, 10, 10], +[765258, 2095, 3, 16], +[765307, 2095, 5, 4], +[765469, 2095, 10, 13], +[765629, 2096, 3, 21], +[765725, 2096, 6, 25], +[765742, 2096, 7, 12], +[765752, 2096, 7, 22], +[765888, 2096, 12, 5], +[766068, 2097, 6, 3], +[766139, 2097, 8, 13], +[766211, 2097, 10, 24], +[766233, 2097, 11, 15], +[766346, 2098, 3, 8], +[766418, 2098, 5, 19], +[766528, 2098, 9, 6], +[766588, 2098, 11, 5], +[766755, 2099, 4, 21], +[766774, 2099, 5, 10], +[766863, 2099, 8, 7], +[766943, 2099, 10, 26], +[766953, 2099, 11, 5], +[766989, 2099, 12, 11], +[767145, 2100, 5, 16], +[767151, 2100, 5, 22], +[767217, 2100, 7, 27], +[767286, 2100, 10, 4], +[767305, 2100, 10, 23], +[767429, 2101, 2, 24], +[767508, 2101, 5, 14], +[767579, 2101, 7, 24], +[767751, 2102, 1, 12], +[767919, 2102, 6, 29], +[767958, 2102, 8, 7], +[768090, 2102, 12, 17], +[768251, 2103, 5, 27], +[768405, 2103, 10, 28], +[768543, 2104, 3, 14], +[768714, 2104, 9, 1], +[768857, 2105, 1, 22], +[769001, 2105, 6, 15], +[769084, 2105, 9, 6], +[769104, 2105, 9, 26], +[769167, 2105, 11, 28], +[769340, 2106, 5, 20], +[769452, 2106, 9, 9], +[769529, 2106, 11, 25], +[769718, 2107, 6, 2], +[769741, 2107, 6, 25], +[769779, 2107, 8, 2], +[769847, 2107, 10, 9], +[769961, 2108, 1, 31], +[770048, 2108, 4, 27], +[770098, 2108, 6, 16], +[770295, 2108, 12, 30], +[770378, 2109, 3, 23], +[770461, 2109, 6, 14], +[770653, 2109, 12, 23], +[770822, 2110, 6, 10], +[770919, 2110, 9, 15], +[771047, 2111, 1, 21], +[771208, 2111, 7, 1], +[771319, 2111, 10, 20], +[771446, 2112, 2, 24], +[771574, 2112, 7, 1], +[771742, 2112, 12, 16], +[771765, 2113, 1, 8], +[771808, 2113, 2, 20], +[771904, 2113, 5, 27], +[771934, 2113, 6, 26], +[772040, 2113, 10, 10], +[772058, 2113, 10, 28], +[772212, 2114, 3, 31], +[772261, 2114, 5, 19], +[772349, 2114, 8, 15], +[772472, 2114, 12, 16], +[772578, 2115, 4, 1], +[772617, 2115, 5, 10], +[772741, 2115, 9, 11], +[772761, 2115, 10, 1], +[772854, 2116, 1, 2], +[772951, 2116, 4, 8], +[773117, 2116, 9, 21], +[773266, 2117, 2, 17], +[773299, 2117, 3, 22], +[773388, 2117, 6, 19], +[773507, 2117, 10, 16], +[773575, 2117, 12, 23], +[773750, 2118, 6, 16], +[773946, 2118, 12, 29], +[774040, 2119, 4, 2], +[774195, 2119, 9, 4], +[774342, 2120, 1, 29], +[774509, 2120, 7, 14], +[774603, 2120, 10, 16], +[774624, 2120, 11, 6], +[774815, 2121, 5, 16], +[774984, 2121, 11, 1], +[775029, 2121, 12, 16], +[775164, 2122, 4, 30], +[775191, 2122, 5, 27], +[775349, 2122, 11, 1], +[775526, 2123, 4, 27], +[775673, 2123, 9, 21], +[775789, 2124, 1, 15], +[775873, 2124, 4, 8], +[775884, 2124, 4, 19], +[775885, 2124, 4, 20], +[775964, 2124, 7, 8], +[776076, 2124, 10, 28], +[776096, 2124, 11, 17], +[776107, 2124, 11, 28], +[776110, 2124, 12, 1], +[776228, 2125, 3, 29], +[776292, 2125, 6, 1], +[776420, 2125, 10, 7], +[776511, 2126, 1, 6], +[776548, 2126, 2, 12], +[776648, 2126, 5, 23], +[776733, 2126, 8, 16], +[776741, 2126, 8, 24], +[776810, 2126, 11, 1], +[776915, 2127, 2, 14], +[776982, 2127, 4, 22], +[777049, 2127, 6, 28], +[777104, 2127, 8, 22], +[777209, 2127, 12, 5], +[777227, 2127, 12, 23], +[777295, 2128, 2, 29], +[777384, 2128, 5, 28], +[777555, 2128, 11, 15], +[777731, 2129, 5, 10], +[777847, 2129, 9, 3], +[777898, 2129, 10, 24], +[777926, 2129, 11, 21], +[778017, 2130, 2, 20], +[778063, 2130, 4, 7], +[778233, 2130, 9, 24], +[778361, 2131, 1, 30], +[778452, 2131, 5, 1], +[778555, 2131, 8, 12], +[778733, 2132, 2, 6], +[778823, 2132, 5, 6], +[778888, 2132, 7, 10], +[778945, 2132, 9, 5], +[779049, 2132, 12, 18], +[779062, 2132, 12, 31], +[779157, 2133, 4, 5], +[779356, 2133, 10, 21], +[779520, 2134, 4, 3], +[779676, 2134, 9, 6], +[779768, 2134, 12, 7], +[779918, 2135, 5, 6], +[780004, 2135, 7, 31], +[780161, 2136, 1, 4], +[780329, 2136, 6, 20], +[780496, 2136, 12, 4], +[780530, 2137, 1, 7], +[780706, 2137, 7, 2], +[780750, 2137, 8, 15], +[780764, 2137, 8, 29], +[780846, 2137, 11, 19], +[781025, 2138, 5, 17], +[781091, 2138, 7, 22], +[781096, 2138, 7, 27], +[781198, 2138, 11, 6], +[781226, 2138, 12, 4], +[781348, 2139, 4, 5], +[781547, 2139, 10, 21], +[781562, 2139, 11, 5], +[781597, 2139, 12, 10], +[781764, 2140, 5, 25], +[781808, 2140, 7, 8], +[781941, 2140, 11, 18], +[782103, 2141, 4, 29], +[782239, 2141, 9, 12], +[782396, 2142, 2, 16], +[782579, 2142, 8, 18], +[782698, 2142, 12, 15], +[782719, 2143, 1, 5], +[782860, 2143, 5, 26], +[782990, 2143, 10, 3], +[783027, 2143, 11, 9], +[783202, 2144, 5, 2], +[783259, 2144, 6, 28], +[783319, 2144, 8, 27], +[783489, 2145, 2, 13], +[783608, 2145, 6, 12], +[783679, 2145, 8, 22], +[783741, 2145, 10, 23], +[783936, 2146, 5, 6], +[784029, 2146, 8, 7], +[784033, 2146, 8, 11], +[784135, 2146, 11, 21], +[784181, 2147, 1, 6], +[784340, 2147, 6, 14], +[784420, 2147, 9, 2], +[784516, 2147, 12, 7], +[784518, 2147, 12, 9], +[784632, 2148, 4, 1], +[784783, 2148, 8, 30], +[784787, 2148, 9, 3], +[784968, 2149, 3, 3], +[785067, 2149, 6, 10], +[785243, 2149, 12, 3], +[785399, 2150, 5, 8], +[785531, 2150, 9, 17], +[785696, 2151, 3, 1], +[785840, 2151, 7, 23], +[786033, 2152, 2, 1], +[786098, 2152, 4, 6], +[786184, 2152, 7, 1], +[786202, 2152, 7, 19], +[786385, 2153, 1, 18], +[786463, 2153, 4, 6], +[786577, 2153, 7, 29], +[786697, 2153, 11, 26], +[786848, 2154, 4, 26], +[787023, 2154, 10, 18], +[787153, 2155, 2, 25], +[787166, 2155, 3, 10], +[787295, 2155, 7, 17], +[787421, 2155, 11, 20], +[787448, 2155, 12, 17], +[787615, 2156, 6, 1], +[787759, 2156, 10, 23], +[787800, 2156, 12, 3], +[787846, 2157, 1, 18], +[788022, 2157, 7, 13], +[788063, 2157, 8, 23], +[788261, 2158, 3, 9], +[788277, 2158, 3, 25], +[788425, 2158, 8, 20], +[788602, 2159, 2, 13], +[788734, 2159, 6, 25], +[788872, 2159, 11, 10], +[788903, 2159, 12, 11], +[789025, 2160, 4, 11], +[789094, 2160, 6, 19], +[789215, 2160, 10, 18], +[789320, 2161, 1, 31], +[789433, 2161, 5, 24], +[789504, 2161, 8, 3], +[789681, 2162, 1, 27], +[789685, 2162, 1, 31], +[789786, 2162, 5, 12], +[789901, 2162, 9, 4], +[789981, 2162, 11, 23], +[790123, 2163, 4, 14], +[790198, 2163, 6, 28], +[790237, 2163, 8, 6], +[790353, 2163, 11, 30], +[790474, 2164, 3, 30], +[790508, 2164, 5, 3], +[790589, 2164, 7, 23], +[790707, 2164, 11, 18], +[790865, 2165, 4, 25], +[790984, 2165, 8, 22], +[791138, 2166, 1, 23], +[791308, 2166, 7, 12], +[791493, 2167, 1, 13], +[791518, 2167, 2, 7], +[791636, 2167, 6, 5], +[791666, 2167, 7, 5], +[791737, 2167, 9, 14], +[791898, 2168, 2, 22], +[792069, 2168, 8, 11], +[792234, 2169, 1, 23], +[792259, 2169, 2, 17], +[792263, 2169, 2, 21], +[792317, 2169, 4, 16], +[792492, 2169, 10, 8], +[792658, 2170, 3, 23], +[792681, 2170, 4, 15], +[792780, 2170, 7, 23], +[792890, 2170, 11, 10], +[792965, 2171, 1, 24], +[793165, 2171, 8, 12], +[793349, 2172, 2, 12], +[793351, 2172, 2, 14], +[793531, 2172, 8, 12], +[793577, 2172, 9, 27], +[793749, 2173, 3, 18], +[793867, 2173, 7, 14], +[793909, 2173, 8, 25], +[794082, 2174, 2, 14], +[794143, 2174, 4, 16], +[794207, 2174, 6, 19], +[794296, 2174, 9, 16], +[794362, 2174, 11, 21], +[794414, 2175, 1, 12], +[794552, 2175, 5, 30], +[794571, 2175, 6, 18], +[794672, 2175, 9, 27], +[794797, 2176, 1, 30], +[794930, 2176, 6, 11], +[795127, 2176, 12, 25], +[795251, 2177, 4, 28], +[795350, 2177, 8, 5], +[795463, 2177, 11, 26], +[795477, 2177, 12, 10], +[795558, 2178, 3, 1], +[795609, 2178, 4, 21], +[795759, 2178, 9, 18], +[795933, 2179, 3, 11], +[795938, 2179, 3, 16], +[796130, 2179, 9, 24], +[796288, 2180, 2, 29], +[796455, 2180, 8, 14], +[796598, 2181, 1, 4], +[796742, 2181, 5, 28], +[796905, 2181, 11, 7], +[796925, 2181, 11, 27], +[796970, 2182, 1, 11], +[797022, 2182, 3, 4], +[797175, 2182, 8, 4], +[797237, 2182, 10, 5], +[797282, 2182, 11, 19], +[797343, 2183, 1, 19], +[797394, 2183, 3, 11], +[797564, 2183, 8, 28], +[797701, 2184, 1, 12], +[797714, 2184, 1, 25], +[797744, 2184, 2, 24], +[797811, 2184, 5, 1], +[797972, 2184, 10, 9], +[798049, 2184, 12, 25], +[798182, 2185, 5, 7], +[798312, 2185, 9, 14], +[798337, 2185, 10, 9], +[798396, 2185, 12, 7], +[798511, 2186, 4, 1], +[798585, 2186, 6, 14], +[798705, 2186, 10, 12], +[798831, 2187, 2, 15], +[799003, 2187, 8, 6], +[799126, 2187, 12, 7], +[799323, 2188, 6, 21], +[799359, 2188, 7, 27], +[799540, 2189, 1, 24], +[799706, 2189, 7, 9], +[799762, 2189, 9, 3], +[799773, 2189, 9, 14], +[799951, 2190, 3, 11], +[799991, 2190, 4, 20], +[800085, 2190, 7, 23], +[800121, 2190, 8, 28], +[800259, 2191, 1, 13], +[800364, 2191, 4, 28], +[800549, 2191, 10, 30], +[800728, 2192, 4, 26], +[800892, 2192, 10, 7], +[800938, 2192, 11, 22], +[801129, 2193, 6, 1], +[801232, 2193, 9, 12], +[801265, 2193, 10, 15], +[801447, 2194, 4, 15], +[801532, 2194, 7, 9], +[801646, 2194, 10, 31], +[801705, 2194, 12, 29], +[801892, 2195, 7, 4], +[801973, 2195, 9, 23], +[801995, 2195, 10, 15], +[802139, 2196, 3, 7], +[802243, 2196, 6, 19], +[802406, 2196, 11, 29], +[802480, 2197, 2, 11], +[802559, 2197, 5, 1], +[802655, 2197, 8, 5], +[802735, 2197, 10, 24], +[802830, 2198, 1, 27], +[802833, 2198, 1, 30], +[802839, 2198, 2, 5], +[803037, 2198, 8, 22], +[803139, 2198, 12, 2], +[803207, 2199, 2, 8], +[803341, 2199, 6, 22], +[803479, 2199, 11, 7], +[803679, 2200, 5, 26], +[803737, 2200, 7, 23], +[803775, 2200, 8, 30], +[803914, 2201, 1, 16], +[803976, 2201, 3, 19], +[804027, 2201, 5, 9], +[804144, 2201, 9, 3], +[804257, 2201, 12, 25], +[804373, 2202, 4, 20], +[804402, 2202, 5, 19], +[804482, 2202, 8, 7], +[804603, 2202, 12, 6], +[804736, 2203, 4, 18], +[804747, 2203, 4, 29], +[804881, 2203, 9, 10], +[805059, 2204, 3, 6], +[805077, 2204, 3, 24], +[805146, 2204, 6, 1], +[805250, 2204, 9, 13], +[805268, 2204, 10, 1], +[805442, 2205, 3, 24], +[805592, 2205, 8, 21], +[805702, 2205, 12, 9], +[805748, 2206, 1, 24], +[805848, 2206, 5, 4], +[805868, 2206, 5, 24], +[806052, 2206, 11, 24], +[806095, 2207, 1, 6], +[806200, 2207, 4, 21], +[806321, 2207, 8, 20], +[806503, 2208, 2, 18], +[806673, 2208, 8, 6], +[806686, 2208, 8, 19], +[806759, 2208, 10, 31], +[806785, 2208, 11, 26], +[806846, 2209, 1, 26], +[806914, 2209, 4, 4], +[807038, 2209, 8, 6], +[807226, 2210, 2, 10], +[807365, 2210, 6, 29], +[807460, 2210, 10, 2], +[807474, 2210, 10, 16], +[807584, 2211, 2, 3], +[807756, 2211, 7, 25], +[807825, 2211, 10, 2], +[807913, 2211, 12, 29], +[808060, 2212, 5, 24], +[808223, 2212, 11, 3], +[808282, 2213, 1, 1], +[808412, 2213, 5, 11], +[808437, 2213, 6, 5], +[808532, 2213, 9, 8], +[808560, 2213, 10, 6], +[808658, 2214, 1, 12], +[808742, 2214, 4, 6], +[808860, 2214, 8, 2], +[808939, 2214, 10, 20], +[809027, 2215, 1, 16], +[809192, 2215, 6, 30], +[809354, 2215, 12, 9], +[809474, 2216, 4, 7], +[809525, 2216, 5, 28], +[809649, 2216, 9, 29], +[809757, 2217, 1, 15], +[809780, 2217, 2, 7], +[809857, 2217, 4, 25], +[809958, 2217, 8, 4], +[810126, 2218, 1, 19], +[810162, 2218, 2, 24], +[810188, 2218, 3, 22], +[810269, 2218, 6, 11], +[810378, 2218, 9, 28], +[810422, 2218, 11, 11], +[810508, 2219, 2, 5], +[810540, 2219, 3, 9], +[810707, 2219, 8, 23], +[810761, 2219, 10, 16], +[810888, 2220, 2, 20], +[811066, 2220, 8, 16], +[811178, 2220, 12, 6], +[811205, 2221, 1, 2], +[811391, 2221, 7, 7], +[811533, 2221, 11, 26], +[811691, 2222, 5, 3], +[811775, 2222, 7, 26], +[811895, 2222, 11, 23], +[812019, 2223, 3, 27], +[812144, 2223, 7, 30], +[812274, 2223, 12, 7], +[812275, 2223, 12, 8], +[812406, 2224, 4, 17], +[812554, 2224, 9, 12], +[812721, 2225, 2, 26], +[812897, 2225, 8, 21], +[813053, 2226, 1, 24], +[813252, 2226, 8, 11], +[813360, 2226, 11, 27], +[813385, 2226, 12, 22], +[813529, 2227, 5, 15], +[813548, 2227, 6, 3], +[813566, 2227, 6, 21], +[813693, 2227, 10, 26], +[813808, 2228, 2, 18], +[813818, 2228, 2, 28], +[813879, 2228, 4, 29], +[813972, 2228, 7, 31], +[814080, 2228, 11, 16], +[814132, 2229, 1, 7], +[814248, 2229, 5, 3], +[814268, 2229, 5, 23], +[814331, 2229, 7, 25], +[814451, 2229, 11, 22], +[814455, 2229, 11, 26], +[814592, 2230, 4, 12], +[814713, 2230, 8, 11], +[814788, 2230, 10, 25], +[814899, 2231, 2, 13], +[814978, 2231, 5, 3], +[815028, 2231, 6, 22], +[815083, 2231, 8, 16], +[815151, 2231, 10, 23], +[815248, 2232, 1, 28], +[815333, 2232, 4, 22], +[815429, 2232, 7, 27], +[815609, 2233, 1, 23], +[815772, 2233, 7, 5], +[815878, 2233, 10, 19], +[815911, 2233, 11, 21], +[815942, 2233, 12, 22], +[816116, 2234, 6, 14], +[816195, 2234, 9, 1], +[816218, 2234, 9, 24], +[816318, 2235, 1, 2], +[816511, 2235, 7, 14], +[816521, 2235, 7, 24], +[816536, 2235, 8, 8], +[816605, 2235, 10, 16], +[816631, 2235, 11, 11], +[816702, 2236, 1, 21], +[816900, 2236, 8, 6], +[817062, 2237, 1, 15], +[817223, 2237, 6, 25], +[817385, 2237, 12, 4], +[817504, 2238, 4, 2], +[817532, 2238, 4, 30], +[817675, 2238, 9, 20], +[817689, 2238, 10, 4], +[817854, 2239, 3, 18], +[818017, 2239, 8, 28], +[818153, 2240, 1, 11], +[818255, 2240, 4, 22], +[818416, 2240, 9, 30], +[818607, 2241, 4, 9], +[818781, 2241, 9, 30], +[818900, 2242, 1, 27], +[818975, 2242, 4, 12], +[819127, 2242, 9, 11], +[819130, 2242, 9, 14], +[819171, 2242, 10, 25], +[819280, 2243, 2, 11], +[819333, 2243, 4, 5], +[819452, 2243, 8, 2], +[819571, 2243, 11, 29], +[819678, 2244, 3, 15], +[819702, 2244, 4, 8], +[819799, 2244, 7, 14], +[819937, 2244, 11, 29], +[820005, 2245, 2, 5], +[820148, 2245, 6, 28], +[820299, 2245, 11, 26], +[820337, 2246, 1, 3], +[820379, 2246, 2, 14], +[820441, 2246, 4, 17], +[820531, 2246, 7, 16], +[820565, 2246, 8, 19], +[820736, 2247, 2, 6], +[820895, 2247, 7, 15], +[820999, 2247, 10, 27], +[821186, 2248, 5, 1], +[821205, 2248, 5, 20], +[821217, 2248, 6, 1], +[821358, 2248, 10, 20], +[821393, 2248, 11, 24], +[821532, 2249, 4, 12], +[821568, 2249, 5, 18], +[821735, 2249, 11, 1], +[821867, 2250, 3, 13], +[821884, 2250, 3, 30]] diff --git a/go/mysql/datetime/testdata/year_to_daynr.json b/go/mysql/datetime/testdata/year_to_daynr.json new file mode 100644 index 00000000000..43914806d21 --- /dev/null +++ b/go/mysql/datetime/testdata/year_to_daynr.json @@ -0,0 +1 @@ +[1, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862, 10227, 10593, 10958, 11323, 11688, 12054, 12419, 12784, 13149, 13515, 13880, 14245, 14610, 14976, 15341, 15706, 16071, 16437, 16802, 17167, 17532, 17898, 18263, 18628, 18993, 19359, 19724, 20089, 20454, 20820, 21185, 21550, 21915, 22281, 22646, 23011, 23376, 23742, 24107, 24472, 24837, 25203, 25568, 25933, 26298, 26664, 27029, 27394, 27759, 28125, 28490, 28855, 29220, 29586, 29951, 30316, 30681, 31047, 31412, 31777, 32142, 32508, 32873, 33238, 33603, 33969, 34334, 34699, 35064, 35430, 35795, 36160, 36525, 36890, 37255, 37620, 37985, 38351, 38716, 39081, 39446, 39812, 40177, 40542, 40907, 41273, 41638, 42003, 42368, 42734, 43099, 43464, 43829, 44195, 44560, 44925, 45290, 45656, 46021, 46386, 46751, 47117, 47482, 47847, 48212, 48578, 48943, 49308, 49673, 50039, 50404, 50769, 51134, 51500, 51865, 52230, 52595, 52961, 53326, 53691, 54056, 54422, 54787, 55152, 55517, 55883, 56248, 56613, 56978, 57344, 57709, 58074, 58439, 58805, 59170, 59535, 59900, 60266, 60631, 60996, 61361, 61727, 62092, 62457, 62822, 63188, 63553, 63918, 64283, 64649, 65014, 65379, 65744, 66110, 66475, 66840, 67205, 67571, 67936, 68301, 68666, 69032, 69397, 69762, 70127, 70493, 70858, 71223, 71588, 71954, 72319, 72684, 73049, 73414, 73779, 74144, 74509, 74875, 75240, 75605, 75970, 76336, 76701, 77066, 77431, 77797, 78162, 78527, 78892, 79258, 79623, 79988, 80353, 80719, 81084, 81449, 81814, 82180, 82545, 82910, 83275, 83641, 84006, 84371, 84736, 85102, 85467, 85832, 86197, 86563, 86928, 87293, 87658, 88024, 88389, 88754, 89119, 89485, 89850, 90215, 90580, 90946, 91311, 91676, 92041, 92407, 92772, 93137, 93502, 93868, 94233, 94598, 94963, 95329, 95694, 96059, 96424, 96790, 97155, 97520, 97885, 98251, 98616, 98981, 99346, 99712, 100077, 100442, 100807, 101173, 101538, 101903, 102268, 102634, 102999, 103364, 103729, 104095, 104460, 104825, 105190, 105556, 105921, 106286, 106651, 107017, 107382, 107747, 108112, 108478, 108843, 109208, 109573, 109938, 110303, 110668, 111033, 111399, 111764, 112129, 112494, 112860, 113225, 113590, 113955, 114321, 114686, 115051, 115416, 115782, 116147, 116512, 116877, 117243, 117608, 117973, 118338, 118704, 119069, 119434, 119799, 120165, 120530, 120895, 121260, 121626, 121991, 122356, 122721, 123087, 123452, 123817, 124182, 124548, 124913, 125278, 125643, 126009, 126374, 126739, 127104, 127470, 127835, 128200, 128565, 128931, 129296, 129661, 130026, 130392, 130757, 131122, 131487, 131853, 132218, 132583, 132948, 133314, 133679, 134044, 134409, 134775, 135140, 135505, 135870, 136236, 136601, 136966, 137331, 137697, 138062, 138427, 138792, 139158, 139523, 139888, 140253, 140619, 140984, 141349, 141714, 142080, 142445, 142810, 143175, 143541, 143906, 144271, 144636, 145002, 145367, 145732, 146097, 146463, 146828, 147193, 147558, 147924, 148289, 148654, 149019, 149385, 149750, 150115, 150480, 150846, 151211, 151576, 151941, 152307, 152672, 153037, 153402, 153768, 154133, 154498, 154863, 155229, 155594, 155959, 156324, 156690, 157055, 157420, 157785, 158151, 158516, 158881, 159246, 159612, 159977, 160342, 160707, 161073, 161438, 161803, 162168, 162534, 162899, 163264, 163629, 163995, 164360, 164725, 165090, 165456, 165821, 166186, 166551, 166917, 167282, 167647, 168012, 168378, 168743, 169108, 169473, 169839, 170204, 170569, 170934, 171300, 171665, 172030, 172395, 172761, 173126, 173491, 173856, 174222, 174587, 174952, 175317, 175683, 176048, 176413, 176778, 177144, 177509, 177874, 178239, 178605, 178970, 179335, 179700, 180066, 180431, 180796, 181161, 181527, 181892, 182257, 182622, 182987, 183352, 183717, 184082, 184448, 184813, 185178, 185543, 185909, 186274, 186639, 187004, 187370, 187735, 188100, 188465, 188831, 189196, 189561, 189926, 190292, 190657, 191022, 191387, 191753, 192118, 192483, 192848, 193214, 193579, 193944, 194309, 194675, 195040, 195405, 195770, 196136, 196501, 196866, 197231, 197597, 197962, 198327, 198692, 199058, 199423, 199788, 200153, 200519, 200884, 201249, 201614, 201980, 202345, 202710, 203075, 203441, 203806, 204171, 204536, 204902, 205267, 205632, 205997, 206363, 206728, 207093, 207458, 207824, 208189, 208554, 208919, 209285, 209650, 210015, 210380, 210746, 211111, 211476, 211841, 212207, 212572, 212937, 213302, 213668, 214033, 214398, 214763, 215129, 215494, 215859, 216224, 216590, 216955, 217320, 217685, 218051, 218416, 218781, 219146, 219511, 219876, 220241, 220606, 220972, 221337, 221702, 222067, 222433, 222798, 223163, 223528, 223894, 224259, 224624, 224989, 225355, 225720, 226085, 226450, 226816, 227181, 227546, 227911, 228277, 228642, 229007, 229372, 229738, 230103, 230468, 230833, 231199, 231564, 231929, 232294, 232660, 233025, 233390, 233755, 234121, 234486, 234851, 235216, 235582, 235947, 236312, 236677, 237043, 237408, 237773, 238138, 238504, 238869, 239234, 239599, 239965, 240330, 240695, 241060, 241426, 241791, 242156, 242521, 242887, 243252, 243617, 243982, 244348, 244713, 245078, 245443, 245809, 246174, 246539, 246904, 247270, 247635, 248000, 248365, 248731, 249096, 249461, 249826, 250192, 250557, 250922, 251287, 251653, 252018, 252383, 252748, 253114, 253479, 253844, 254209, 254575, 254940, 255305, 255670, 256035, 256400, 256765, 257130, 257496, 257861, 258226, 258591, 258957, 259322, 259687, 260052, 260418, 260783, 261148, 261513, 261879, 262244, 262609, 262974, 263340, 263705, 264070, 264435, 264801, 265166, 265531, 265896, 266262, 266627, 266992, 267357, 267723, 268088, 268453, 268818, 269184, 269549, 269914, 270279, 270645, 271010, 271375, 271740, 272106, 272471, 272836, 273201, 273567, 273932, 274297, 274662, 275028, 275393, 275758, 276123, 276489, 276854, 277219, 277584, 277950, 278315, 278680, 279045, 279411, 279776, 280141, 280506, 280872, 281237, 281602, 281967, 282333, 282698, 283063, 283428, 283794, 284159, 284524, 284889, 285255, 285620, 285985, 286350, 286716, 287081, 287446, 287811, 288177, 288542, 288907, 289272, 289638, 290003, 290368, 290733, 291099, 291464, 291829, 292194, 292560, 292925, 293290, 293655, 294021, 294386, 294751, 295116, 295482, 295847, 296212, 296577, 296943, 297308, 297673, 298038, 298404, 298769, 299134, 299499, 299865, 300230, 300595, 300960, 301326, 301691, 302056, 302421, 302787, 303152, 303517, 303882, 304248, 304613, 304978, 305343, 305709, 306074, 306439, 306804, 307170, 307535, 307900, 308265, 308631, 308996, 309361, 309726, 310092, 310457, 310822, 311187, 311553, 311918, 312283, 312648, 313014, 313379, 313744, 314109, 314475, 314840, 315205, 315570, 315936, 316301, 316666, 317031, 317397, 317762, 318127, 318492, 318858, 319223, 319588, 319953, 320319, 320684, 321049, 321414, 321780, 322145, 322510, 322875, 323241, 323606, 323971, 324336, 324702, 325067, 325432, 325797, 326163, 326528, 326893, 327258, 327624, 327989, 328354, 328719, 329084, 329449, 329814, 330179, 330545, 330910, 331275, 331640, 332006, 332371, 332736, 333101, 333467, 333832, 334197, 334562, 334928, 335293, 335658, 336023, 336389, 336754, 337119, 337484, 337850, 338215, 338580, 338945, 339311, 339676, 340041, 340406, 340772, 341137, 341502, 341867, 342233, 342598, 342963, 343328, 343694, 344059, 344424, 344789, 345155, 345520, 345885, 346250, 346616, 346981, 347346, 347711, 348077, 348442, 348807, 349172, 349538, 349903, 350268, 350633, 350999, 351364, 351729, 352094, 352460, 352825, 353190, 353555, 353921, 354286, 354651, 355016, 355382, 355747, 356112, 356477, 356843, 357208, 357573, 357938, 358304, 358669, 359034, 359399, 359765, 360130, 360495, 360860, 361226, 361591, 361956, 362321, 362687, 363052, 363417, 363782, 364148, 364513, 364878, 365243, 365608, 365973, 366338, 366703, 367069, 367434, 367799, 368164, 368530, 368895, 369260, 369625, 369991, 370356, 370721, 371086, 371452, 371817, 372182, 372547, 372913, 373278, 373643, 374008, 374374, 374739, 375104, 375469, 375835, 376200, 376565, 376930, 377296, 377661, 378026, 378391, 378757, 379122, 379487, 379852, 380218, 380583, 380948, 381313, 381679, 382044, 382409, 382774, 383140, 383505, 383870, 384235, 384601, 384966, 385331, 385696, 386062, 386427, 386792, 387157, 387523, 387888, 388253, 388618, 388984, 389349, 389714, 390079, 390445, 390810, 391175, 391540, 391906, 392271, 392636, 393001, 393367, 393732, 394097, 394462, 394828, 395193, 395558, 395923, 396289, 396654, 397019, 397384, 397750, 398115, 398480, 398845, 399211, 399576, 399941, 400306, 400672, 401037, 401402, 401767, 402132, 402497, 402862, 403227, 403593, 403958, 404323, 404688, 405054, 405419, 405784, 406149, 406515, 406880, 407245, 407610, 407976, 408341, 408706, 409071, 409437, 409802, 410167, 410532, 410898, 411263, 411628, 411993, 412359, 412724, 413089, 413454, 413820, 414185, 414550, 414915, 415281, 415646, 416011, 416376, 416742, 417107, 417472, 417837, 418203, 418568, 418933, 419298, 419664, 420029, 420394, 420759, 421125, 421490, 421855, 422220, 422586, 422951, 423316, 423681, 424047, 424412, 424777, 425142, 425508, 425873, 426238, 426603, 426969, 427334, 427699, 428064, 428430, 428795, 429160, 429525, 429891, 430256, 430621, 430986, 431352, 431717, 432082, 432447, 432813, 433178, 433543, 433908, 434274, 434639, 435004, 435369, 435735, 436100, 436465, 436830, 437196, 437561, 437926, 438291, 438657, 439022, 439387, 439752, 440118, 440483, 440848, 441213, 441579, 441944, 442309, 442674, 443040, 443405, 443770, 444135, 444501, 444866, 445231, 445596, 445962, 446327, 446692, 447057, 447423, 447788, 448153, 448518, 448884, 449249, 449614, 449979, 450345, 450710, 451075, 451440, 451806, 452171, 452536, 452901, 453267, 453632, 453997, 454362, 454728, 455093, 455458, 455823, 456189, 456554, 456919, 457284, 457650, 458015, 458380, 458745, 459111, 459476, 459841, 460206, 460572, 460937, 461302, 461667, 462033, 462398, 462763, 463128, 463494, 463859, 464224, 464589, 464955, 465320, 465685, 466050, 466416, 466781, 467146, 467511, 467877, 468242, 468607, 468972, 469338, 469703, 470068, 470433, 470799, 471164, 471529, 471894, 472260, 472625, 472990, 473355, 473721, 474086, 474451, 474816, 475181, 475546, 475911, 476276, 476642, 477007, 477372, 477737, 478103, 478468, 478833, 479198, 479564, 479929, 480294, 480659, 481025, 481390, 481755, 482120, 482486, 482851, 483216, 483581, 483947, 484312, 484677, 485042, 485408, 485773, 486138, 486503, 486869, 487234, 487599, 487964, 488330, 488695, 489060, 489425, 489791, 490156, 490521, 490886, 491252, 491617, 491982, 492347, 492713, 493078, 493443, 493808, 494174, 494539, 494904, 495269, 495635, 496000, 496365, 496730, 497096, 497461, 497826, 498191, 498557, 498922, 499287, 499652, 500018, 500383, 500748, 501113, 501479, 501844, 502209, 502574, 502940, 503305, 503670, 504035, 504401, 504766, 505131, 505496, 505862, 506227, 506592, 506957, 507323, 507688, 508053, 508418, 508784, 509149, 509514, 509879, 510245, 510610, 510975, 511340, 511705, 512070, 512435, 512800, 513166, 513531, 513896, 514261, 514627, 514992, 515357, 515722, 516088, 516453, 516818, 517183, 517549, 517914, 518279, 518644, 519010, 519375, 519740, 520105, 520471, 520836, 521201, 521566, 521932, 522297, 522662, 523027, 523393, 523758, 524123, 524488, 524854, 525219, 525584, 525949, 526315, 526680, 527045, 527410, 527776, 528141, 528506, 528871, 529237, 529602, 529967, 530332, 530698, 531063, 531428, 531793, 532159, 532524, 532889, 533254, 533620, 533985, 534350, 534715, 535081, 535446, 535811, 536176, 536542, 536907, 537272, 537637, 538003, 538368, 538733, 539098, 539464, 539829, 540194, 540559, 540925, 541290, 541655, 542020, 542386, 542751, 543116, 543481, 543847, 544212, 544577, 544942, 545308, 545673, 546038, 546403, 546769, 547134, 547499, 547864, 548229, 548594, 548959, 549324, 549690, 550055, 550420, 550785, 551151, 551516, 551881, 552246, 552612, 552977, 553342, 553707, 554073, 554438, 554803, 555168, 555534, 555899, 556264, 556629, 556995, 557360, 557725, 558090, 558456, 558821, 559186, 559551, 559917, 560282, 560647, 561012, 561378, 561743, 562108, 562473, 562839, 563204, 563569, 563934, 564300, 564665, 565030, 565395, 565761, 566126, 566491, 566856, 567222, 567587, 567952, 568317, 568683, 569048, 569413, 569778, 570144, 570509, 570874, 571239, 571605, 571970, 572335, 572700, 573066, 573431, 573796, 574161, 574527, 574892, 575257, 575622, 575988, 576353, 576718, 577083, 577449, 577814, 578179, 578544, 578910, 579275, 579640, 580005, 580371, 580736, 581101, 581466, 581832, 582197, 582562, 582927, 583293, 583658, 584023, 584388, 584754, 585119, 585484, 585849, 586215, 586580, 586945, 587310, 587676, 588041, 588406, 588771, 589137, 589502, 589867, 590232, 590598, 590963, 591328, 591693, 592059, 592424, 592789, 593154, 593520, 593885, 594250, 594615, 594981, 595346, 595711, 596076, 596442, 596807, 597172, 597537, 597903, 598268, 598633, 598998, 599364, 599729, 600094, 600459, 600825, 601190, 601555, 601920, 602286, 602651, 603016, 603381, 603747, 604112, 604477, 604842, 605208, 605573, 605938, 606303, 606669, 607034, 607399, 607764, 608130, 608495, 608860, 609225, 609591, 609956, 610321, 610686, 611052, 611417, 611782, 612147, 612513, 612878, 613243, 613608, 613974, 614339, 614704, 615069, 615435, 615800, 616165, 616530, 616896, 617261, 617626, 617991, 618357, 618722, 619087, 619452, 619818, 620183, 620548, 620913, 621278, 621643, 622008, 622373, 622739, 623104, 623469, 623834, 624200, 624565, 624930, 625295, 625661, 626026, 626391, 626756, 627122, 627487, 627852, 628217, 628583, 628948, 629313, 629678, 630044, 630409, 630774, 631139, 631505, 631870, 632235, 632600, 632966, 633331, 633696, 634061, 634427, 634792, 635157, 635522, 635888, 636253, 636618, 636983, 637349, 637714, 638079, 638444, 638810, 639175, 639540, 639905, 640271, 640636, 641001, 641366, 641732, 642097, 642462, 642827, 643193, 643558, 643923, 644288, 644654, 645019, 645384, 645749, 646115, 646480, 646845, 647210, 647576, 647941, 648306, 648671, 649037, 649402, 649767, 650132, 650498, 650863, 651228, 651593, 651959, 652324, 652689, 653054, 653420, 653785, 654150, 654515, 654881, 655246, 655611, 655976, 656342, 656707, 657072, 657437, 657802, 658167, 658532, 658897, 659263, 659628, 659993, 660358, 660724, 661089, 661454, 661819, 662185, 662550, 662915, 663280, 663646, 664011, 664376, 664741, 665107, 665472, 665837, 666202, 666568, 666933, 667298, 667663, 668029, 668394, 668759, 669124, 669490, 669855, 670220, 670585, 670951, 671316, 671681, 672046, 672412, 672777, 673142, 673507, 673873, 674238, 674603, 674968, 675334, 675699, 676064, 676429, 676795, 677160, 677525, 677890, 678256, 678621, 678986, 679351, 679717, 680082, 680447, 680812, 681178, 681543, 681908, 682273, 682639, 683004, 683369, 683734, 684100, 684465, 684830, 685195, 685561, 685926, 686291, 686656, 687022, 687387, 687752, 688117, 688483, 688848, 689213, 689578, 689944, 690309, 690674, 691039, 691405, 691770, 692135, 692500, 692866, 693231, 693596, 693961, 694326, 694691, 695056, 695421, 695787, 696152, 696517, 696882, 697248, 697613, 697978, 698343, 698709, 699074, 699439, 699804, 700170, 700535, 700900, 701265, 701631, 701996, 702361, 702726, 703092, 703457, 703822, 704187, 704553, 704918, 705283, 705648, 706014, 706379, 706744, 707109, 707475, 707840, 708205, 708570, 708936, 709301, 709666, 710031, 710397, 710762, 711127, 711492, 711858, 712223, 712588, 712953, 713319, 713684, 714049, 714414, 714780, 715145, 715510, 715875, 716241, 716606, 716971, 717336, 717702, 718067, 718432, 718797, 719163, 719528, 719893, 720258, 720624, 720989, 721354, 721719, 722085, 722450, 722815, 723180, 723546, 723911, 724276, 724641, 725007, 725372, 725737, 726102, 726468, 726833, 727198, 727563, 727929, 728294, 728659, 729024, 729390, 729755, 730120, 730485, 730851, 731216, 731581, 731946, 732312, 732677, 733042, 733407, 733773, 734138, 734503, 734868, 735234, 735599, 735964, 736329, 736695, 737060, 737425, 737790, 738156, 738521, 738886, 739251, 739617, 739982, 740347, 740712, 741078, 741443, 741808, 742173, 742539, 742904, 743269, 743634, 744000, 744365, 744730, 745095, 745461, 745826, 746191, 746556, 746922, 747287, 747652, 748017, 748383, 748748, 749113, 749478, 749844, 750209, 750574, 750939, 751305, 751670, 752035, 752400, 752766, 753131, 753496, 753861, 754227, 754592, 754957, 755322, 755688, 756053, 756418, 756783, 757149, 757514, 757879, 758244, 758610, 758975, 759340, 759705, 760071, 760436, 760801, 761166, 761532, 761897, 762262, 762627, 762993, 763358, 763723, 764088, 764454, 764819, 765184, 765549, 765915, 766280, 766645] \ No newline at end of file diff --git a/go/mysql/datetime/time_zone.go b/go/mysql/datetime/time_zone.go new file mode 100644 index 00000000000..046e06ed240 --- /dev/null +++ b/go/mysql/datetime/time_zone.go @@ -0,0 +1,79 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "fmt" + "strconv" + "time" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +func unknownTimeZone(tz string) error { + return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.UnknownTimeZone, "Unknown or incorrect time zone: '%s'", tz) +} + +func ParseTimeZone(tz string) (*time.Location, error) { + // Needs to be checked first since time.LoadLocation("") returns UTC. + if tz == "" { + return nil, unknownTimeZone(tz) + } + loc, err := time.LoadLocation(tz) + if err == nil { + return loc, nil + } + + // MySQL also handles timezone formats in the form of the + // offset from UTC, so we'll try that if the above fails. + // This format is always something in the form of +HH:MM or -HH:MM. + if len(tz) != 6 { + return nil, unknownTimeZone(tz) + } + if tz[0] != '+' && tz[0] != '-' { + return nil, unknownTimeZone(tz) + } + if tz[3] != ':' { + return nil, unknownTimeZone(tz) + } + neg := tz[0] == '-' + hours, err := strconv.ParseUint(tz[1:3], 10, 4) + if err != nil { + return nil, unknownTimeZone(tz) + } + minutes, err := strconv.ParseUint(tz[4:], 10, 6) + if err != nil { + return nil, unknownTimeZone(tz) + } + if minutes > 59 { + return nil, unknownTimeZone(tz) + } + + // MySQL only supports timezones in the range of -13:59 to +14:00. + if neg && hours > 13 { + return nil, unknownTimeZone(tz) + } + if !neg && (hours > 14 || hours == 14 && minutes > 0) { + return nil, unknownTimeZone(tz) + } + offset := int(hours)*60*60 + int(minutes)*60 + if neg { + offset = -offset + } + return time.FixedZone(fmt.Sprintf("UTC%s", tz), offset), nil +} diff --git a/go/mysql/datetime/time_zone_test.go b/go/mysql/datetime/time_zone_test.go new file mode 100644 index 00000000000..94745d0c71e --- /dev/null +++ b/go/mysql/datetime/time_zone_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseTimeZone(t *testing.T) { + testCases := []struct { + tz string + want string + }{ + { + tz: "Europe/Amsterdam", + want: "Europe/Amsterdam", + }, + { + tz: "", + want: "Unknown or incorrect time zone: ''", + }, + { + tz: "+02:00", + want: "UTC+02:00", + }, + { + tz: "+14:00", + want: "UTC+14:00", + }, + { + tz: "+14:01", + want: "Unknown or incorrect time zone: '+14:01'", + }, + { + tz: "-13:59", + want: "UTC-13:59", + }, + { + tz: "-14:00", + want: "Unknown or incorrect time zone: '-14:00'", + }, + { + tz: "-15:00", + want: "Unknown or incorrect time zone: '-15:00'", + }, + { + tz: "foo", + want: "Unknown or incorrect time zone: 'foo'", + }, + } + + for _, tc := range testCases { + + zone, err := ParseTimeZone(tc.tz) + if err != nil { + assert.Equal(t, tc.want, err.Error()) + } else { + assert.Equal(t, tc.want, zone.String()) + } + } +} diff --git a/go/mysql/datetime/timeparts.go b/go/mysql/datetime/timeparts.go new file mode 100644 index 00000000000..a774099a93a --- /dev/null +++ b/go/mysql/datetime/timeparts.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import "time" + +type timeparts struct { + year int + month int + day int + yday int + hour int + min int + sec int + nsec int + + pmset bool + amset bool + + prec uint8 +} + +func (tp *timeparts) toDateTime(prec int) (DateTime, int, bool) { + if tp.isZero() { + // zero date + return DateTime{}, 0, true + } + + if tp.pmset && tp.hour < 12 { + tp.hour += 12 + } else if tp.amset && tp.hour == 12 { + tp.hour = 0 + } + if tp.yday > 0 { + return DateTime{}, 0, false + } else { + if tp.month < 1 { + tp.month = int(time.January) + } + if tp.day < 0 { + tp.day = 1 + } + } + if tp.day < 1 || tp.day > daysIn(time.Month(tp.month), tp.year) { + return DateTime{}, 0, false + } + + dt := DateTime{ + Date: Date{ + year: uint16(tp.year), + month: uint8(tp.month), + day: uint8(tp.day), + }, + Time: Time{ + hour: uint16(tp.hour), + minute: uint8(tp.min), + second: uint8(tp.sec), + nanosecond: uint32(tp.nsec), + }, + } + + l := prec + if prec < 0 { + l = int(tp.prec) + } else { + dt = dt.Round(prec) + } + + return dt, l, true +} + +func (tp *timeparts) isZero() bool { + return tp.year == 0 && tp.month == 0 && tp.day == 0 && tp.hour == 0 && tp.min == 0 && tp.sec == 0 && tp.nsec == 0 +} + +func (tp *timeparts) toSeconds() int { + return tp.day*secondsPerDay + tp.hour*3600 + tp.min*60 + tp.sec +} diff --git a/go/vt/vtgate/evalengine/internal/decimal/LICENSE b/go/mysql/decimal/LICENSE similarity index 100% rename from go/vt/vtgate/evalengine/internal/decimal/LICENSE rename to go/mysql/decimal/LICENSE diff --git a/go/vt/vtgate/evalengine/internal/decimal/cached_size.go b/go/mysql/decimal/cached_size.go similarity index 100% rename from go/vt/vtgate/evalengine/internal/decimal/cached_size.go rename to go/mysql/decimal/cached_size.go diff --git a/go/vt/vtgate/evalengine/internal/decimal/decimal.go b/go/mysql/decimal/decimal.go similarity index 95% rename from go/vt/vtgate/evalengine/internal/decimal/decimal.go rename to go/mysql/decimal/decimal.go index 5eb9aef5f9f..a2b505a1232 100644 --- a/go/vt/vtgate/evalengine/internal/decimal/decimal.go +++ b/go/mysql/decimal/decimal.go @@ -263,7 +263,7 @@ func (d Decimal) rescale(exp int32) Decimal { } // abs returns the absolute value of the decimal. -func (d Decimal) abs() Decimal { +func (d Decimal) Abs() Decimal { if d.Sign() >= 0 { return d } @@ -362,7 +362,7 @@ func (d Decimal) Div(d2 Decimal, scaleIncr int32) Decimal { scaleIncr = 0 } scale := myBigDigits(fracLeft+fracRight+scaleIncr) * 9 - q, _ := d.quoRem(d2, scale) + q, _ := d.QuoRem(d2, scale) return q } @@ -372,15 +372,15 @@ func (d Decimal) div(d2 Decimal) Decimal { return d.divRound(d2, int32(divisionPrecision)) } -// quoRem does division with remainder -// d.quoRem(d2,precision) returns quotient q and remainder r such that +// QuoRem does division with remainder +// d.QuoRem(d2,precision) returns quotient q and remainder r such that // // d = d2 * q + r, q an integer multiple of 10^(-precision) // 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 // 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 // // Note that precision<0 is allowed as input. -func (d Decimal) quoRem(d2 Decimal, precision int32) (Decimal, Decimal) { +func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { d.ensureInitialized() d2.ensureInitialized() if d2.value.Sign() == 0 { @@ -389,7 +389,7 @@ func (d Decimal) quoRem(d2 Decimal, precision int32) (Decimal, Decimal) { scale := -precision e := int64(d.exp - d2.exp - scale) if e > math.MaxInt32 || e < math.MinInt32 { - panic("overflow in decimal quoRem") + panic("overflow in decimal QuoRem") } var aa, bb, expo big.Int var scalerest int32 @@ -428,7 +428,7 @@ func (d Decimal) quoRem(d2 Decimal, precision int32) (Decimal, Decimal) { // Note that precision<0 is allowed as input. func (d Decimal) divRound(d2 Decimal, precision int32) Decimal { // quoRem already checks initialization - q, r := d.quoRem(d2, precision) + q, r := d.QuoRem(d2, precision) // the actual rounding decision is based on comparing r*10^precision and d2/2 // instead compare 2 r 10 ^precision and d2 @@ -438,7 +438,7 @@ func (d Decimal) divRound(d2 Decimal, precision int32) Decimal { // now rv2 = abs(r.value) * 2 r2 := Decimal{value: &rv2, exp: r.exp + precision} // r2 is now 2 * r * 10 ^ precision - var c = r2.Cmp(d2.abs()) + var c = r2.Cmp(d2.Abs()) if c < 0 { return q @@ -453,7 +453,7 @@ func (d Decimal) divRound(d2 Decimal, precision int32) Decimal { // mod returns d % d2. func (d Decimal) mod(d2 Decimal) Decimal { - quo := d.divRound(d2, -d.exp+1).truncate(0) + quo := d.divRound(d2, -d.exp+1).Truncate(0) return d.sub(d2.mul(quo)) } @@ -474,9 +474,23 @@ func (d Decimal) Ceil() Decimal { return Decimal{value: z, exp: 0} } -func (d Decimal) truncate(precision int32) Decimal { +func (d Decimal) Floor() Decimal { + if d.isInteger() { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z, _ := new(big.Int).DivMod(d.value, exp, new(big.Int)) + return Decimal{value: z, exp: 0} +} + +func (d Decimal) Truncate(precision int32) Decimal { d.ensureInitialized() - if precision >= 0 && -precision > d.exp { + if -precision > d.exp { return d.rescale(-precision) } return d @@ -637,7 +651,7 @@ func (d Decimal) Round(places int32) Decimal { if d.exp == -places { return d } - // truncate to places + 1 + // Truncate to places + 1 ret := d.rescale(-places - 1) // add sign(d) * 0.5 @@ -663,6 +677,10 @@ func (d *Decimal) ensureInitialized() { } } +func (d Decimal) IsInitialized() bool { + return d.value != nil +} + // RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { d1.ensureInitialized() @@ -679,13 +697,6 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { return d1, d2.rescale(baseScale) } -func min(x, y int32) int32 { - if x >= y { - return y - } - return x -} - // largestForm returns the largest decimal that can be represented // with the given amount of integral and fractional digits // Example: diff --git a/go/vt/vtgate/evalengine/internal/decimal/decimal_test.go b/go/mysql/decimal/decimal_test.go similarity index 88% rename from go/vt/vtgate/evalengine/internal/decimal/decimal_test.go rename to go/mysql/decimal/decimal_test.go index 88edef68ff7..09819ddcebb 100644 --- a/go/vt/vtgate/evalengine/internal/decimal/decimal_test.go +++ b/go/mysql/decimal/decimal_test.go @@ -277,50 +277,54 @@ func TestFloat64(t *testing.T) { } func TestNewFromStringErrs(t *testing.T) { - tests := []string{ - "", - "qwert", - "-", - ".", - "-.", - ".-", - "234-.56", - "234-56", - "2-", - "..", - "2..", - "..2", - ".5.2", - "8..2", - "8.1.", - "1e", - "1-e", - "1e9e", - "1ee9", - "1ee", - "1eE", - "1e-", - "1e-.", - "1e1.2", - "123.456e1.3", - "1e-1.2", - "123.456e-1.3", - "123.456Easdf", - "123.456e" + strconv.FormatInt(math.MinInt64, 10), - "123.456e" + strconv.FormatInt(math.MinInt32, 10), - "512.99 USD", - "$99.99", - "51,850.00", - "20_000_000.00", - "$20_000_000.00", - } - - for _, s := range tests { - _, err := NewFromString(s) + tests := map[string]string{ + "": "0", + "qwert": "0", + "-": "0", + ".": "0", + "-.": "0", + ".-": "0", + "234-.56": "234", + "234-56": "234", + "2-": "2", + "..": "0", + "2..": "2", + "..2": "0", + ".5.2": "0.5", + "8..2": "8", + "8.1.": "8.1", + "1e": "1", + "1-e": "1", + "1e9e": "1000000000", + "1ee9": "1", + "1ee": "1", + "1eE": "1", + "1e-": "1", + "1e-.": "1", + "1e1.2": "10", + "123.456e1.3": "1234.56", + "1e-1.2": "0.1", + "123.456e-1.3": "12.3456", + "123.456Easdf": "123.456", + "123.456e" + strconv.FormatInt(math.MinInt64, 10): "0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123456", + "123.456e" + strconv.FormatInt(math.MinInt32, 10): "0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123456", + "512.99 USD": "512.99", + "$99.99": "0", + "51,850.00": "51", + "20_000_000.00": "20", + "$20_000_000.00": "0", + } + + for s, o := range tests { + out, err := NewFromString(s) if err == nil { t.Errorf("error expected when parsing %s", s) } + + if out.String() != o { + t.Errorf("expected %s, got %s", o, out.String()) + } } } @@ -336,6 +340,10 @@ func TestNewFromStringDeepEquals(t *testing.T) { {"10", "10.0", false}, {"1.1", "1.10", false}, {"1.001", "1.01", false}, + {" 0 ", "0", true}, + {" 0.0 ", "0.0", true}, + {" 1 ", "1", true}, + {" 0.1 ", "0.1", true}, } for _, cmp := range tests { @@ -746,22 +754,22 @@ func TestDecimal_QuoRem(t *testing.T) { d, _ := NewFromString(inp4.d) d2, _ := NewFromString(inp4.d2) prec := inp4.exp - q, r := d.quoRem(d2, prec) + q, r := d.QuoRem(d2, prec) expectedQ, _ := NewFromString(inp4.q) expectedR, _ := NewFromString(inp4.r) if !q.Equal(expectedQ) || !r.Equal(expectedR) { - t.Errorf("bad quoRem division %s , %s , %d got %v, %v expected %s , %s", + t.Errorf("bad QuoRem division %s , %s , %d got %v, %v expected %s , %s", inp4.d, inp4.d2, prec, q, r, inp4.q, inp4.r) } if !d.Equal(d2.mul(q).Add(r)) { t.Errorf("not fitting: d=%v, d2= %v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } - if !q.Equal(q.truncate(prec)) { + if !q.Equal(q.Truncate(prec)) { t.Errorf("quotient wrong precision: d=%v, d2= %v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } - if r.abs().Cmp(d2.abs().mul(New(1, -prec))) >= 0 { + if r.Abs().Cmp(d2.Abs().mul(New(1, -prec))) >= 0 { t.Errorf("remainder too large: d=%v, d2= %v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } @@ -813,19 +821,19 @@ func TestDecimal_QuoRem2(t *testing.T) { } d2 := tc.d2 prec := tc.prec - q, r := d.quoRem(d2, prec) + q, r := d.QuoRem(d2, prec) // rule 1: d = d2*q +r if !d.Equal(d2.mul(q).Add(r)) { t.Errorf("not fitting, d=%v, d2=%v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } // rule 2: q is integral multiple of 10^(-prec) - if !q.Equal(q.truncate(prec)) { + if !q.Equal(q.Truncate(prec)) { t.Errorf("quotient wrong precision, d=%v, d2=%v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } // rule 3: abs(r)= 0 { + if r.Abs().Cmp(d2.Abs().mul(New(1, -prec))) >= 0 { t.Errorf("remainder too large, d=%v, d2=%v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) } @@ -879,11 +887,11 @@ func TestDecimal_DivRound(t *testing.T) { if sign(q)*sign(d)*sign(d2) < 0 { t.Errorf("sign of quotient wrong, got: %v/%v is about %v", d, d2, q) } - x := q.mul(d2).abs().sub(d.abs()).mul(New(2, 0)) - if x.Cmp(d2.abs().mul(New(1, -prec))) > 0 { + x := q.mul(d2).Abs().sub(d.Abs()).mul(New(2, 0)) + if x.Cmp(d2.Abs().mul(New(1, -prec))) > 0 { t.Errorf("wrong rounding, got: %v/%v prec=%d is about %v", d, d2, prec, q) } - if x.Cmp(d2.abs().mul(New(-1, -prec))) <= 0 { + if x.Cmp(d2.Abs().mul(New(-1, -prec))) <= 0 { t.Errorf("wrong rounding, got: %v/%v prec=%d is about %v", d, d2, prec, q) } if !q.Equal(result) { @@ -904,11 +912,11 @@ func TestDecimal_DivRound2(t *testing.T) { if sign(q)*sign(d)*sign(d2) < 0 { t.Errorf("sign of quotient wrong, got: %v/%v is about %v", d, d2, q) } - x := q.mul(d2).abs().sub(d.abs()).mul(New(2, 0)) - if x.Cmp(d2.abs().mul(New(1, -prec))) > 0 { + x := q.mul(d2).Abs().sub(d.Abs()).mul(New(2, 0)) + if x.Cmp(d2.Abs().mul(New(1, -prec))) > 0 { t.Errorf("wrong rounding, got: %v/%v prec=%d is about %v", d, d2, prec, q) } - if x.Cmp(d2.abs().mul(New(-1, -prec))) <= 0 { + if x.Cmp(d2.Abs().mul(New(-1, -prec))) <= 0 { t.Errorf("wrong rounding, got: %v/%v prec=%d is about %v", d, d2, prec, q) } } @@ -973,7 +981,7 @@ func TestDecimal_Abs1(t *testing.T) { a := New(-1234, -4) b := New(1234, -4) - c := a.abs() + c := a.Abs() if c.Cmp(b) != 0 { t.Errorf("error") } @@ -983,7 +991,7 @@ func TestDecimal_Abs2(t *testing.T) { a := New(-1234, -4) b := New(1234, -4) - c := b.abs() + c := b.Abs() if c.Cmp(a) == 0 { t.Errorf("error") } diff --git a/go/vt/vtgate/evalengine/internal/decimal/format.go b/go/mysql/decimal/format.go similarity index 93% rename from go/vt/vtgate/evalengine/internal/decimal/format.go rename to go/mysql/decimal/format.go index 4a39dfb59c9..27ed2f83b4a 100644 --- a/go/vt/vtgate/evalengine/internal/decimal/format.go +++ b/go/mysql/decimal/format.go @@ -86,6 +86,7 @@ func (d *Decimal) formatSlow(trim bool) []byte { } var zeroByte = []byte{'0'} +var oneByte = []byte{'1'} const smallsString = "00010203040506070809" + "10111213141516171819" + @@ -170,28 +171,41 @@ func (d *Decimal) formatFast(prec int, round bool, trim bool) []byte { // Let's adjust prec accordingly based on the exponent for the number // and iprec, which is the precision of our mantissa iprec := len(integral) + adj := int(d.exp) + iprec if d.exp > 0 { - prec += int(d.exp) + iprec + prec += adj } else { - if adj := int(d.exp) + iprec; adj > -prec { + if adj > -prec { prec += adj } else { prec = -prec } } - if prec > 0 { + switch { + case prec > 0: var ovf int // if prec > 0, perform string-based rounding on the integral to integral, ovf = roundString(integral, prec) exp = int(d.exp) + iprec - len(integral) + ovf sign = d.value.Sign() - } else if prec < 0 { - integral = nil + + case prec < 0: + // do not truncate to 0 if the precision is exactly equal to the adjustment. + // instead, we need to round based on the first digit of the integral + // part, which will be the last digit in the decimal + if adj == prec && integral[0] >= '5' { + integral = oneByte + sign = d.value.Sign() + } else { + integral = nil + } prec = -prec exp = -prec - } else { + + default: integral = zeroByte } + } else { exp = int(d.exp) sign = d.value.Sign() diff --git a/go/vt/vtgate/evalengine/internal/decimal/helpers.go b/go/mysql/decimal/helpers.go similarity index 100% rename from go/vt/vtgate/evalengine/internal/decimal/helpers.go rename to go/mysql/decimal/helpers.go diff --git a/go/vt/vtgate/evalengine/internal/decimal/mysql_test.go b/go/mysql/decimal/mysql_test.go similarity index 96% rename from go/vt/vtgate/evalengine/internal/decimal/mysql_test.go rename to go/mysql/decimal/mysql_test.go index bc747f79c4b..1668c4377db 100644 --- a/go/vt/vtgate/evalengine/internal/decimal/mysql_test.go +++ b/go/mysql/decimal/mysql_test.go @@ -428,6 +428,7 @@ func BenchmarkFormatting(b *testing.B) { var bigBases = []uint64{ 3141592653589793238, + 6283185307179586476, math.MaxUint64, 1, 1000000000000000000, @@ -456,19 +457,21 @@ func TestFormatFast(t *testing.T) { func TestFormatAndRound(t *testing.T) { for _, neg := range []bool{false, true} { - b := new(big.Int).SetUint64(bigBases[0]) - if neg { - b = b.Neg(b) - } - for prec := int32(1); prec < 32; prec++ { - for exp := -100; exp <= 100; exp++ { - var d = Decimal{value: b, exp: int32(exp)} + for _, base := range bigBases { + b := new(big.Int).SetUint64(base) + if neg { + b = b.Neg(b) + } + for prec := int32(1); prec < 32; prec++ { + for exp := -100; exp <= 100; exp++ { + var d = Decimal{value: b, exp: int32(exp)} - expect := d.StringFixed(prec) - got := string(d.formatFast(int(prec), true, false)) + expect := d.StringFixed(prec) + got := string(d.formatFast(int(prec), true, false)) - if expect != got { - t.Errorf("base: %de%d prec %d\nwant: %q\ngot: %q", bigBases[0], exp, prec, expect, got) + if expect != got { + t.Errorf("base: %de%d prec %d\nwant: %q\ngot: %q", b, exp, prec, expect, got) + } } } } diff --git a/go/vt/vtgate/evalengine/internal/decimal/scan.go b/go/mysql/decimal/scan.go similarity index 73% rename from go/vt/vtgate/evalengine/internal/decimal/scan.go rename to go/mysql/decimal/scan.go index 27144298972..761eea5cdcf 100644 --- a/go/vt/vtgate/evalengine/internal/decimal/scan.go +++ b/go/mysql/decimal/scan.go @@ -23,8 +23,8 @@ import ( "math" "math/big" "math/bits" - "strconv" - "strings" + + "vitess.io/vitess/go/mysql/fastparse" ) var errOverflow = errors.New("overflow") @@ -138,83 +138,118 @@ func NewFromMySQL(s []byte) (Decimal, error) { return Decimal{value: value, exp: -int32(len(fractional))}, nil } +const ExponentLimit = 1024 + // NewFromString returns a new Decimal from a string representation. // Trailing zeroes are not trimmed. +// In case of an error, we still return the parsed value up to that +// point. // // Example: // // d, err := NewFromString("-123.45") // d2, err := NewFromString(".0001") // d3, err := NewFromString("1.47000") -func NewFromString(value string) (Decimal, error) { - originalInput := value - var intString string +func NewFromString(s string) (d Decimal, err error) { + maxLen := len(s) + if maxLen > math.MaxInt32 { + maxLen = math.MaxInt32 + } + + dotPos := -1 + expPos := -1 + i := 0 + var num bool var exp int64 - // Check if number is using scientific notation - eIndex := strings.IndexAny(value, "Ee") - if eIndex != -1 { - expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) - } - return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) + for i < maxLen { + if !isSpace(s[i]) { + break } - value = value[:eIndex] - exp = expInt + i++ } - - pIndex := -1 - vLen := len(value) - for i := 0; i < vLen; i++ { - if value[i] == '.' { - if pIndex > -1 { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) +next: + for i < maxLen { + switch { + case s[i] == '-': + // Negative sign is allowed at the start and at the start + // of the exponent. + if i != 0 && expPos == -1 && i != expPos+1 { + break next } - pIndex = i + case s[i] >= '0' && s[i] <= '9': + num = true + case s[i] == '.': + if dotPos == -1 && expPos == -1 { + dotPos = i + } else { + break next + } + case s[i] == 'e' || s[i] == 'E': + if expPos == -1 { + expPos = i + num = false + } else { + break next + } + default: + break next } + i++ } - if pIndex == -1 { - // There is no decimal point, we can just parse the original string as - // an int - intString = value - } else { - if pIndex+1 < vLen { - intString = value[:pIndex] + value[pIndex+1:] - } else { - intString = value[:pIndex] - } - expInt := -len(value[pIndex+1:]) - exp += int64(expInt) + // If we have a small total string or until the first dot, + // we can fast parse it as an integer. + var si string + switch { + case dotPos == -1 && expPos == -1: + si = s[:i] + case expPos == -1: + si = s[:dotPos] + s[dotPos+1:i] + exp -= int64(i - dotPos - 1) + case dotPos == -1: + si = s[:expPos] + default: + si = s[:dotPos] + s[dotPos+1:expPos] + exp -= int64(expPos - dotPos - 1) } - var dValue *big.Int - // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow - if len(intString) <= 18 { - parsed64, err := strconv.ParseInt(intString, 10, 64) - if err != nil { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) - } - dValue = big.NewInt(parsed64) + if len(si) <= 18 { + var v int64 + v, err = fastparse.ParseInt64(si, 10) + d.value = big.NewInt(v) } else { - dValue = new(big.Int) - _, ok := dValue.SetString(intString, 10) - if !ok { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + d.value = new(big.Int) + d.value.SetString(si, 10) + } + + var expOverflow bool + if expPos != -1 { + e, _ := fastparse.ParseInt64(s[expPos+1:i], 10) + switch { + case e > ExponentLimit: + e = ExponentLimit + expOverflow = true + case e < -ExponentLimit: + e = -ExponentLimit + expOverflow = true } + exp += e } - if exp < math.MinInt32 || exp > math.MaxInt32 { - // NOTE(vadim): I doubt a string could realistically be this long - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) + d.exp = int32(exp) + + for i < maxLen { + if !isSpace(s[i]) { + break + } + i++ } - return Decimal{ - value: dValue, - exp: int32(exp), - }, nil + if !num || i < maxLen || expOverflow { + err = fmt.Errorf("invalid decimal string: %q", s) + } + return d, err } // RequireFromString returns a new Decimal from a string representation @@ -326,3 +361,12 @@ func parseLargeDecimal(integral, fractional []byte) (*big.Int, error) { } return new(big.Int).SetBits(z), nil } + +func isSpace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + default: + return false + } +} diff --git a/go/vt/vtgate/evalengine/internal/decimal/testdata/large_pi_decimals.json b/go/mysql/decimal/testdata/large_pi_decimals.json similarity index 100% rename from go/vt/vtgate/evalengine/internal/decimal/testdata/large_pi_decimals.json rename to go/mysql/decimal/testdata/large_pi_decimals.json diff --git a/go/mysql/decimal/weights.go b/go/mysql/decimal/weights.go new file mode 100644 index 00000000000..9b8f43a0c65 --- /dev/null +++ b/go/mysql/decimal/weights.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package decimal + +// Our weight string format is normalizing the weight string to a fixed length, +// so it becomes byte-ordered. The byte lengths are pre-computed based on +// https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html +// and generated empirically with a manual loop: +// +// for i := 1; i <= 65; i++ { +// dec, err := NewFromMySQL(bytes.Repeat([]byte("9"), i)) +// if err != nil { +// t.Fatal(err) +// } +// +// byteLengths = append(byteLengths, len(dec.value.Bytes())) +// } +var weightStringLengths = []int{ + 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 8, + 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 15, + 16, 16, 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21, 21, 22, 22, + 23, 23, 23, 24, 24, 25, 25, 25, 26, 26, 27, 27, 27, +} + +func (d Decimal) WeightString(dst []byte, length, precision int32) []byte { + dec := d.rescale(-precision) + dec = dec.Clamp(length-precision, precision) + + buf := make([]byte, weightStringLengths[length]+1) + dec.value.FillBytes(buf[:]) + + if dec.value.Sign() < 0 { + for i := range buf { + buf[i] ^= 0xff + } + } + // Use the same trick as used for signed numbers on the first byte. + buf[0] ^= 0x80 + + dst = append(dst, buf[:]...) + return dst +} diff --git a/go/mysql/encoding.go b/go/mysql/encoding.go index 9ebf301d95b..c79580acb39 100644 --- a/go/mysql/encoding.go +++ b/go/mysql/encoding.go @@ -214,6 +214,30 @@ func readUint64(data []byte, pos int) (uint64, int, bool) { return binary.LittleEndian.Uint64(data[pos : pos+8]), pos + 8, true } +// readFixedLenUint64 reads a uint64 from a fixed-length slice +// of bytes in little endian format. +// This is used for variable length fields in MySQL packets that +// are always stored in 1, 3, 4, or 9 bytes -- with the first +// byte skipped when the length is > 1 byte. +// It returns the read value and a boolean indicating if the +// read failed. +func readFixedLenUint64(data []byte) (uint64, bool) { + switch len(data) { + case 1: // 1 byte + return uint64(uint8(data[0])), true + case 3: // 2 bytes + return uint64(binary.LittleEndian.Uint16(data[1:])), true + case 4: // 3 bytes + return uint64(data[1]) | + uint64(data[2])<<8 | + uint64(data[3])<<16, true + case 9: // 8 bytes + return binary.LittleEndian.Uint64(data[1:]), true + default: + return uint64(0), false + } +} + func readLenEncInt(data []byte, pos int) (uint64, int, bool) { if pos >= len(data) { return 0, 0, false diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go index a48c9629d51..6591c454e8a 100644 --- a/go/mysql/endtoend/client_test.go +++ b/go/mysql/endtoend/client_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/sqlerror" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -73,9 +75,9 @@ func TestKill(t *testing.T) { // will differ. err = <-errChan if strings.Contains(err.Error(), "EOF") { - assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "EOF", "select sleep(10) from dual") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "EOF", "select sleep(10) from dual") } else { - assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "", "connection reset by peer") + assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "", "connection reset by peer") } } @@ -104,7 +106,7 @@ func TestKill2006(t *testing.T) { // unix socket, we will get a broken pipe when the server // closes the connection and we are trying to write the command. _, err = conn.ExecuteFetch("select sleep(10) from dual", 1000, false) - assertSQLError(t, err, mysql.CRServerGone, mysql.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual") + assertSQLError(t, err, sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual") } // TestDupEntry tests a duplicate key is properly raised. @@ -123,7 +125,7 @@ func TestDupEntry(t *testing.T) { t.Fatalf("first insert failed: %v", err) } _, err = conn.ExecuteFetch("insert into dup_entry(id, name) values(2, 10)", 0, false) - assertSQLError(t, err, mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") + assertSQLError(t, err, sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)") } // TestClientFoundRows tests if the CLIENT_FOUND_ROWS flag works. diff --git a/go/mysql/endtoend/main_test.go b/go/mysql/endtoend/main_test.go index 9079ff5ef6d..466735c02e4 100644 --- a/go/mysql/endtoend/main_test.go +++ b/go/mysql/endtoend/main_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" vtenv "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/mysqlctl" @@ -41,11 +43,11 @@ var ( ) // assertSQLError makes sure we get the right error. -func assertSQLError(t *testing.T, err error, code mysql.ErrorCode, sqlState string, subtext string, query string) { +func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState string, subtext string, query string) { t.Helper() require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext) - serr, ok := err.(*mysql.SQLError) + serr, ok := err.(*sqlerror.SQLError) require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err) require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num) require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State) @@ -66,27 +68,13 @@ func runMysql(t *testing.T, params *mysql.ConnParams, command string) (string, b // In particular, it has the message: // Query OK, 1 row affected (0.00 sec) - version, getErr := mysqlctl.GetVersionString() + version, err := mysqlctl.GetVersionString() + if err != nil { + failVersionDetection(err) + } f, v, err := mysqlctl.ParseVersionString(version) - - if getErr != nil || err != nil { - f, v, err = mysqlctl.GetVersionFromEnv() - if err != nil { - vtenvMysqlRoot, _ := vtenv.VtMysqlRoot() - message := fmt.Sprintf(`could not auto-detect MySQL version. You may need to set your PATH so a mysqld binary can be found, or set the environment variable MYSQL_FLAVOR if mysqld is not available locally: - PATH: %s - VT_MYSQL_ROOT: %s - VTROOT: %s - vtenv.VtMysqlRoot(): %s - MYSQL_FLAVOR: %s - `, - os.Getenv("PATH"), - os.Getenv("VT_MYSQL_ROOT"), - os.Getenv("VTROOT"), - vtenvMysqlRoot, - os.Getenv("MYSQL_FLAVOR")) - panic(message) - } + if err != nil { + failVersionDetection(err) } t.Logf("Using flavor: %v, version: %v", f, v) @@ -237,3 +225,20 @@ ssl-key=%v/server-key.pem }() os.Exit(exitCode) } + +func failVersionDetection(err error) { + vtenvMysqlRoot, _ := vtenv.VtMysqlRoot() + message := fmt.Sprintf(`could not auto-detect MySQL version: %v +You may need to set your PATH so a mysqld binary can be found: + PATH: %s + VT_MYSQL_ROOT: %s + VTROOT: %s + vtenv.VtMysqlRoot(): %s + `, + err, + os.Getenv("PATH"), + os.Getenv("VT_MYSQL_ROOT"), + os.Getenv("VTROOT"), + vtenvMysqlRoot) + panic(message) +} diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 7565c2913e9..576960f2acb 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -39,7 +41,7 @@ const ( func columnSize(cs collations.ID, size uint32) uint32 { // utf8_general_ci results in smaller max column sizes because MySQL 5.7 is silly - if cs.Get().Charset().Name() == "utf8mb3" { + if colldata.Lookup(cs).Charset().Name() == "utf8mb3" { return size * 3 / 4 } return size @@ -321,6 +323,5 @@ func TestSysInfo(t *testing.T) { func getDefaultCollationID() collations.ID { collationHandler := collations.Local() - collation := collationHandler.DefaultCollationForCharset(charsetName) - return collation.ID() + return collationHandler.DefaultCollationForCharset(charsetName) } diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index 9664d7a31ec..0c1fa006347 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -18,6 +18,7 @@ package endtoend import ( "bytes" + "context" "fmt" "reflect" "strings" @@ -28,13 +29,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "context" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -72,7 +71,7 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor t.Fatalf("SHOW MASTER STATUS returned unexpected result: %v", result) } file := result.Rows[0][0].ToString() - position, err := evalengine.ToUint64(result.Rows[0][1]) + position, err := result.Rows[0][1].ToCastUint64() require.NoError(t, err, "SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1]) // Tell the server that we understand the format of events @@ -128,9 +127,9 @@ func TestReplicationConnectionClosing(t *testing.T) { for { data, err := conn.ReadPacket() if err != nil { - serr, ok := err.(*mysql.SQLError) - assert.True(t, ok, "Got a non mysql.SQLError error: %v", err) - assert.Equal(t, mysql.CRServerLost, serr.Num, "Got an unexpected mysql.SQLError error: %v", serr) + serr, ok := err.(*sqlerror.SQLError) + assert.True(t, ok, "Got a non sqlerror.SQLError error: %v", err) + assert.Equal(t, sqlerror.CRServerLost, serr.Num, "Got an unexpected sqlerror.SQLError error: %v", serr) // we got the right error, all good. return @@ -846,11 +845,9 @@ func TestRowReplicationTypes(t *testing.T) { createType: "JSON", createValue: "'-2147483649'", }, { - name: "json19", - createType: "JSON", - // FIXME: was "'18446744073709551615'", unsigned int representation differs from MySQL's which saves this as select 1.8446744073709552e19 - // probably need to replace the json library: "github.com/spyzhov/ajson" - createValue: "'18446744073709551616'", + name: "json19", + createType: "JSON", + createValue: "'18446744073709551615'", }, { name: "json20", createType: "JSON", @@ -1007,7 +1004,7 @@ func TestRowReplicationTypes(t *testing.T) { if values[i+1].Type() != querypb.Type_EXPRESSION { require.NoError(t, err) } - if values[i+1].Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(valueBytes, mysql.ZeroTimestamp) { + if values[i+1].Type() == querypb.Type_TIMESTAMP && !bytes.HasPrefix(valueBytes, binlog.ZeroTimestamp) { // Values in the binary log are UTC. Let's convert them // to whatever timezone the connection is using, // so MySQL properly converts them back to UTC. @@ -1075,7 +1072,7 @@ func valuesForTests(t *testing.T, rs *mysql.Rows, tm *mysql.TableMap, rowIndex i } // We have real data - value, l, err := mysql.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) + value, l, err := binlog.CellValue(data, pos, tm.Types[c], tm.Metadata[c], &querypb.Field{Type: querypb.Type_UINT64}) if err != nil { return nil, err } diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go index 7e58852d176..a9e72aaef5b 100644 --- a/go/mysql/endtoend/schema_change_test.go +++ b/go/mysql/endtoend/schema_change_test.go @@ -22,6 +22,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/require" @@ -41,6 +42,10 @@ func TestChangeSchemaIsNoticed(t *testing.T) { require.NoError(t, err) defer conn.Close() + clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query + insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query + detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecar.GetIdentifier()).Query + tests := []struct { name string changeQ string @@ -85,18 +90,18 @@ func TestChangeSchemaIsNoticed(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // reset schemacopy - _, err := conn.ExecuteFetch(mysql.ClearSchemaCopy, 1000, true) + _, err := conn.ExecuteFetch(clearQuery, 1000, true) require.NoError(t, err) _, err = conn.ExecuteFetch(dropTestTable, 1000, true) require.NoError(t, err) _, err = conn.ExecuteFetch(createUserTable, 1000, true) require.NoError(t, err) - rs, err := conn.ExecuteFetch(mysql.InsertIntoSchemaCopy, 1000, true) + rs, err := conn.ExecuteFetch(insertQuery, 1000, true) require.NoError(t, err) require.NotZero(t, rs.RowsAffected) // make sure no changes are detected - rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + rs, err = conn.ExecuteFetch(detectQuery, 1000, true) require.NoError(t, err) require.Empty(t, rs.Rows) @@ -107,7 +112,7 @@ func TestChangeSchemaIsNoticed(t *testing.T) { } // make sure the change is detected - rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + rs, err = conn.ExecuteFetch(detectQuery, 1000, true) require.NoError(t, err) require.NotEmpty(t, rs.Rows) @@ -117,8 +122,8 @@ func TestChangeSchemaIsNoticed(t *testing.T) { tables = append(tables, "table_name = "+sqlparser.String(apa)) } tableNamePredicates := strings.Join(tables, " OR ") - del := fmt.Sprintf("%s AND %s", mysql.ClearSchemaCopy, tableNamePredicates) - upd := fmt.Sprintf("%s AND %s", mysql.InsertIntoSchemaCopy, tableNamePredicates) + del := fmt.Sprintf("%s AND %s", clearQuery, tableNamePredicates) + upd := fmt.Sprintf("%s AND %s", insertQuery, tableNamePredicates) _, err = conn.ExecuteFetch(del, 1000, true) require.NoError(t, err) @@ -126,7 +131,7 @@ func TestChangeSchemaIsNoticed(t *testing.T) { require.NoError(t, err) // make sure the change is detected - rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + rs, err = conn.ExecuteFetch(detectQuery, 1000, true) require.NoError(t, err) require.Empty(t, rs.Rows) }) diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index 5b00b2c2e01..cb3d20ae04b 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -25,9 +25,11 @@ import ( "regexp" "strings" "sync" + "sync/atomic" "testing" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/log" @@ -67,7 +69,7 @@ type DB struct { acceptWG sync.WaitGroup // orderMatters is set when the query order matters. - orderMatters bool + orderMatters atomic.Bool // Fields set at runtime. @@ -77,16 +79,16 @@ type DB struct { // Use SetName() to change. name string // isConnFail trigger a panic in the connection handler. - isConnFail bool + isConnFail atomic.Bool // connDelay causes a sleep in the connection handler connDelay time.Duration // shouldClose, if true, tells ComQuery() to close the connection when // processing the next query. This will trigger a MySQL client error with // errno 2013 ("server lost"). - shouldClose bool - // AllowAll: if set to true, ComQuery returns an empty result + shouldClose atomic.Bool + // allowAll: if set to true, ComQuery returns an empty result // for all queries. This flag is used for benchmarking. - AllowAll bool + allowAll atomic.Bool // Handler: interface that allows a caller to override the query handling // implementation. By default it points to the DB itself @@ -122,7 +124,11 @@ type DB struct { // if fakesqldb is asked to serve queries or query patterns that it has not been explicitly told about it will // error out by default. However if you set this flag then any unmatched query results in an empty result - neverFail bool + neverFail atomic.Bool + + // lastError stores the last error in returning a query result. + lastErrorMu sync.Mutex + lastError error } // QueryHandler is the interface used by the DB to simulate executed queries @@ -175,6 +181,7 @@ func New(t testing.TB) *DB { connections: make(map[uint32]*mysql.Conn), queryPatternUserCallback: make(map[*regexp.Regexp]func(string)), patternData: make(map[string]exprResult), + lastErrorMu: sync.Mutex{}, } db.Handler = db @@ -182,7 +189,7 @@ func New(t testing.TB) *DB { authServer := mysql.NewAuthServerNone() // Start listening. - db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false) + db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -216,12 +223,8 @@ func (db *DB) SetName(name string) *DB { } // OrderMatters sets the orderMatters flag. -func (db *DB) OrderMatters() *DB { - db.mu.Lock() - defer db.mu.Unlock() - - db.orderMatters = true - return db +func (db *DB) OrderMatters() { + db.orderMatters.Store(true) } // Close closes the Listener and waits for it to stop accepting. @@ -248,6 +251,13 @@ func (db *DB) CloseAllConnections() { } } +// LastError gives the last error the DB ran into +func (db *DB) LastError() error { + db.lastErrorMu.Lock() + defer db.lastErrorMu.Unlock() + return db.lastError +} + // WaitForClose should be used after CloseAllConnections() is closed and // you want to provoke a MySQL client error with errno 2006. // @@ -309,7 +319,7 @@ func (db *DB) NewConnection(c *mysql.Conn) { db.mu.Lock() defer db.mu.Unlock() - if db.isConnFail { + if db.isConnFail.Load() { panic(fmt.Errorf("simulating a connection failure")) } @@ -345,12 +355,19 @@ func (db *DB) WarningCount(c *mysql.Conn) uint16 { } // HandleQuery is the default implementation of the QueryHandler interface -func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) error { - if db.AllowAll { +func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.Result) error) (err error) { + defer func() { + if err != nil { + db.lastErrorMu.Lock() + db.lastError = err + db.lastErrorMu.Unlock() + } + }() + if db.allowAll.Load() { return callback(&sqltypes.Result{}) } - if db.orderMatters { + if db.orderMatters.Load() { result, err := db.comQueryOrdered(query) if err != nil { return err @@ -363,10 +380,10 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R db.queryCalled[key]++ db.querylog = append(db.querylog, key) // Check if we should close the connection and provoke errno 2013. - if db.shouldClose { + if db.shouldClose.Load() { c.Close() - //log error + // log error if err := callback(&sqltypes.Result{}); err != nil { log.Errorf("callback failed : %v", err) } @@ -377,7 +394,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // The driver may send this at connection time, and we don't want it to // interfere. if key == "set names utf8" || strings.HasPrefix(key, "set collation_connection = ") { - //log error + // log error if err := callback(&sqltypes.Result{}); err != nil { log.Errorf("callback failed : %v", err) } @@ -412,11 +429,11 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R } } - if db.neverFail { + if db.neverFail.Load() { return callback(&sqltypes.Result{}) } // Nothing matched. - err := fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", + err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", sqlparser.TruncateForUI(query), db.name) log.Errorf("Query not found: %s", sqlparser.TruncateForUI(query)) @@ -450,10 +467,10 @@ func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) { index := db.expectedExecuteFetchIndex if index >= len(db.expectedExecuteFetch) { - if db.neverFail { + if db.neverFail.Load() { return &sqltypes.Result{}, nil } - db.t.Errorf("%v: got unexpected out of bound fetch: %v >= %v", db.name, index, len(db.expectedExecuteFetch)) + db.t.Errorf("%v: got unexpected out of bound fetch: %v >= %v (%s)", db.name, index, len(db.expectedExecuteFetch), query) return nil, errors.New("unexpected out of bound fetch") } @@ -465,7 +482,7 @@ func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) { if strings.HasSuffix(expected, "*") { if !strings.HasPrefix(query, expected[0:len(expected)-1]) { - if db.neverFail { + if db.neverFail.Load() { return &sqltypes.Result{}, nil } db.t.Errorf("%v: got unexpected query start (index=%v): %v != %v", db.name, index, query, expected) @@ -473,7 +490,7 @@ func (db *DB) comQueryOrdered(query string) (*sqltypes.Result, error) { } } else { if query != expected { - if db.neverFail { + if db.neverFail.Load() { return &sqltypes.Result{}, nil } db.t.Errorf("%v: got unexpected query (index=%v): %v != %v", db.name, index, query, expected) @@ -511,7 +528,7 @@ func (db *DB) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) err } // ComBinlogDumpGTID is part of the mysql.Handler interface. -func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error { +func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error { return nil } @@ -619,26 +636,26 @@ func (db *DB) GetQueryCalledNum(query string) int { // QueryLog returns the query log in a semicomma separated string func (db *DB) QueryLog() string { + db.mu.Lock() + defer db.mu.Unlock() return strings.Join(db.querylog, ";") } // ResetQueryLog resets the query log func (db *DB) ResetQueryLog() { + db.mu.Lock() + defer db.mu.Unlock() db.querylog = nil } // EnableConnFail makes connection to this fake DB fail. func (db *DB) EnableConnFail() { - db.mu.Lock() - defer db.mu.Unlock() - db.isConnFail = true + db.isConnFail.Store(true) } // DisableConnFail makes connection to this fake DB success. func (db *DB) DisableConnFail() { - db.mu.Lock() - defer db.mu.Unlock() - db.isConnFail = false + db.isConnFail.Store(false) } // SetConnDelay delays connections to this fake DB for the given duration @@ -650,9 +667,7 @@ func (db *DB) SetConnDelay(d time.Duration) { // EnableShouldClose closes the connection when processing the next query. func (db *DB) EnableShouldClose() { - db.mu.Lock() - defer db.mu.Unlock() - db.shouldClose = true + db.shouldClose.Store(true) } // @@ -757,8 +772,12 @@ func (db *DB) VerifyAllExecutedOrFail() { } } +func (db *DB) SetAllowAll(allowAll bool) { + db.allowAll.Store(allowAll) +} + func (db *DB) SetNeverFail(neverFail bool) { - db.neverFail = neverFail + db.neverFail.Store(neverFail) } func (db *DB) MockQueriesForTable(table string, result *sqltypes.Result) { @@ -783,3 +802,40 @@ func (db *DB) MockQueriesForTable(table string, result *sqltypes.Result) { cols..., )) } + +// GetRejectedQueryResult checks if we should reject the query. +func (db *DB) GetRejectedQueryResult(key string) error { + if err, ok := db.rejectedData[key]; ok { + return err + } + + return nil +} + +// GetQueryResult checks for explicit queries add through AddQuery(). +func (db *DB) GetQueryResult(key string) *ExpectedResult { + result, ok := db.data[key] + if ok { + return result + } + return nil +} + +// GetQueryPatternResult checks if a query matches any pattern previously added using AddQueryPattern(). +func (db *DB) GetQueryPatternResult(key string) (func(string), ExpectedResult, bool, error) { + for _, pat := range db.patternData { + if pat.expr.MatchString(key) { + userCallback, ok := db.queryPatternUserCallback[pat.expr] + if ok { + if pat.err != "" { + return userCallback, ExpectedResult{pat.result, nil}, true, fmt.Errorf(pat.err) + } + return userCallback, ExpectedResult{pat.result, nil}, true, nil + } + + return nil, ExpectedResult{nil, nil}, false, nil + } + } + + return nil, ExpectedResult{nil, nil}, false, nil +} diff --git a/go/mysql/fastparse/fastparse.go b/go/mysql/fastparse/fastparse.go new file mode 100644 index 00000000000..33aa16105c2 --- /dev/null +++ b/go/mysql/fastparse/fastparse.go @@ -0,0 +1,266 @@ +/* +Copyright 2018 Aliaksandr Valialkin +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fastparse + +import ( + "errors" + "fmt" + "math" + "strconv" + + "vitess.io/vitess/go/hack" +) + +// ParseUint64 parses uint64 from s. +// +// It is equivalent to strconv.ParseUint(s, base, 64) in case it succeeds, +// but on error it will return the best effort value of what it has parsed so far. +func ParseUint64(s string, base int) (uint64, error) { + if len(s) == 0 { + return 0, fmt.Errorf("cannot parse uint64 from empty string") + } + if base < 2 || base > 36 { + return 0, fmt.Errorf("invalid base %d; must be in [2, 36]", base) + } + i := uint(0) + for i < uint(len(s)) { + if !isSpace(s[i]) { + break + } + i++ + } + + d := uint64(0) + j := i +next: + for i < uint(len(s)) { + var b byte + switch { + case s[i] >= '0' && s[i] <= '9': + b = s[i] - '0' + case s[i] >= 'a' && s[i] <= 'z': + b = s[i] - 'a' + 10 + case s[i] >= 'A' && s[i] <= 'Z': + b = s[i] - 'A' + 10 + default: + break next + } + + if b >= byte(base) { + break next + } + + var cutoff uint64 + switch base { + case 10: + cutoff = math.MaxUint64/10 + 1 + case 16: + cutoff = math.MaxUint64/16 + 1 + default: + cutoff = math.MaxUint64/uint64(base) + 1 + } + if d >= cutoff { + return math.MaxUint64, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) + } + v := d*uint64(base) + uint64(b) + if v < d { + return math.MaxUint64, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) + } + d = v + i++ + } + if i <= j { + return d, fmt.Errorf("cannot parse uint64 from %q", s) + } + + for i < uint(len(s)) { + if !isSpace(s[i]) { + break + } + i++ + } + + if i < uint(len(s)) { + // Unparsed tail left. + return d, fmt.Errorf("unparsed tail left after parsing uint64 from %q: %q", s, s[i:]) + } + return d, nil +} + +var ErrOverflow = errors.New("overflow") + +// ParseInt64 parses int64 number s. +// +// It is equivalent to strconv.ParseInt(s, base, 64) in case it succeeds, +// but on error it will return the best effort value of what it has parsed so far. +func ParseInt64(s string, base int) (int64, error) { + if len(s) == 0 { + return 0, fmt.Errorf("cannot parse int64 from empty string") + } + if base < 2 || base > 36 { + return 0, fmt.Errorf("invalid base %d; must be in [2, 36]", base) + } + i := uint(0) + for i < uint(len(s)) { + if !isSpace(s[i]) { + break + } + i++ + } + + minus := s[i] == '-' + if minus { + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse int64 from %q", s) + } + } + + d := uint64(0) + j := i +next: + for i < uint(len(s)) { + var b byte + switch { + case s[i] >= '0' && s[i] <= '9': + b = s[i] - '0' + case s[i] >= 'a' && s[i] <= 'z': + b = s[i] - 'a' + 10 + case s[i] >= 'A' && s[i] <= 'Z': + b = s[i] - 'A' + 10 + default: + break next + } + + if b >= byte(base) { + break next + } + + var cutoff uint64 + switch base { + case 10: + cutoff = math.MaxInt64/10 + 1 + case 16: + cutoff = math.MaxInt64/16 + 1 + default: + cutoff = math.MaxInt64/uint64(base) + 1 + } + if d >= cutoff { + if minus { + return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } + return math.MaxInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } + + v := d*uint64(base) + uint64(b) + if v < d { + if minus { + return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } + return math.MaxInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } + d = v + i++ + } + + v := int64(d) + if d > math.MaxInt64 && !minus { + return math.MaxInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } else if d > math.MaxInt64+1 && minus { + return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + } + + if minus { + v = -v + if d == math.MaxInt64+1 { + v = math.MinInt64 + } + } + + if i <= j { + return v, fmt.Errorf("cannot parse int64 from %q", s) + } + + for i < uint(len(s)) { + if !isSpace(s[i]) { + break + } + i++ + } + + if i < uint(len(s)) { + // Unparsed tail left. + return v, fmt.Errorf("unparsed tail left after parsing int64 from %q: %q", s, s[i:]) + } + if d == math.MaxInt64+1 && minus { + v = math.MinInt64 + } + + return v, nil +} + +// ParseFloat64 parses floating-point number s. +// +// It is equivalent to strconv.ParseFloat(s, 64) in case it succeeds, +// but on error it will return the best effort value of what it has parsed so far. +func ParseFloat64(s string) (float64, error) { + if len(s) == 0 { + return 0.0, fmt.Errorf("cannot parse float64 from empty string") + } + i := uint(0) + for i < uint(len(s)) { + if !isSpace(s[i]) { + break + } + i++ + } + ws := i + + // We only care to parse as many of the initial float characters of the + // string as possible. This functionality is implemented in the `strconv` package + // of the standard library, but not exposed, so we hook into it. + val, l, err := hack.ParseFloatPrefix(s[ws:], 64) + for l < len(s[ws:]) { + if !isSpace(s[ws+uint(l)]) { + break + } + l++ + } + + if l < len(s[ws:]) { + return val, fmt.Errorf("unparsed tail left after parsing float64 from %q: %q", s, s[ws+uint(l):]) + } + if errors.Is(err, strconv.ErrRange) { + if val < 0 { + val = -math.MaxFloat64 + } else { + val = math.MaxFloat64 + } + } + + return val, err +} + +func isSpace(c byte) bool { + switch c { + case ' ', '\t': + return true + default: + return false + } +} diff --git a/go/mysql/fastparse/fastparse_test.go b/go/mysql/fastparse/fastparse_test.go new file mode 100644 index 00000000000..bec312b0bb5 --- /dev/null +++ b/go/mysql/fastparse/fastparse_test.go @@ -0,0 +1,553 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package fastparse + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseInt64(t *testing.T) { + testcases := []struct { + input string + base int + expected int64 + err string + }{ + { + input: "0", + base: 10, + expected: 0, + }, + { + input: "1", + base: 10, + expected: 1, + }, + { + input: "1", + base: 2, + expected: 1, + }, + { + input: "10", + base: 2, + expected: 2, + }, + { + input: " 10", + base: 10, + expected: 10, + }, + { + input: " 10 ", + base: 10, + expected: 10, + }, + { + input: " 10 1", + base: 10, + expected: 10, + err: `unparsed tail left after parsing int64 from " 10 1": "1"`, + }, + { + input: " -10 ", + base: 10, + expected: -10, + }, + { + input: " -10 1", + base: 10, + expected: -10, + err: `unparsed tail left after parsing int64 from " -10 1": "1"`, + }, + { + input: "9223372036854775807", + base: 10, + expected: 9223372036854775807, + }, + { + input: "7fffffffffffffff", + base: 16, + expected: 9223372036854775807, + }, + { + input: "7FFFFFFFFFFFFFFF", + base: 16, + expected: 9223372036854775807, + }, + { + input: "8000000000000000", + base: 16, + expected: 9223372036854775807, + err: `cannot parse int64 from "8000000000000000": overflow`, + }, + { + input: "80.1", + base: 16, + expected: 128, + err: `unparsed tail left after parsing int64 from "80.1": ".1"`, + }, + { + input: "9223372036854775807trailing", + base: 10, + expected: 9223372036854775807, + err: `unparsed tail left after parsing int64 from "9223372036854775807trailing": "trailing"`, + }, + { + input: "9223372036854775808", + base: 10, + expected: 9223372036854775807, + err: `cannot parse int64 from "9223372036854775808": overflow`, + }, + { + input: "9223372036854775808trailing", + base: 10, + expected: 9223372036854775807, + err: `cannot parse int64 from "9223372036854775808trailing": overflow`, + }, + { + input: "-9223372036854775807", + base: 10, + expected: -9223372036854775807, + }, + { + input: "-9223372036854775807.1", + base: 10, + expected: -9223372036854775807, + err: `unparsed tail left after parsing int64 from "-9223372036854775807.1": ".1"`, + }, + { + input: "-9223372036854775808", + base: 10, + expected: -9223372036854775808, + }, + { + input: "-9223372036854775808.1", + base: 10, + expected: -9223372036854775808, + err: `unparsed tail left after parsing int64 from "-9223372036854775808.1": ".1"`, + }, + { + input: "-9223372036854775809", + base: 10, + expected: -9223372036854775808, + err: `cannot parse int64 from "-9223372036854775809": overflow`, + }, + { + input: "18446744073709551615", + base: 10, + expected: 9223372036854775807, + err: `cannot parse int64 from "18446744073709551615": overflow`, + }, + { + input: "18446744073709551616", + base: 10, + expected: 9223372036854775807, + err: `cannot parse int64 from "18446744073709551616": overflow`, + }, + { + input: "31415926535897932384", + base: 10, + expected: 9223372036854775807, + err: `cannot parse int64 from "31415926535897932384": overflow`, + }, + { + input: "1.1", + base: 10, + expected: 1, + err: `unparsed tail left after parsing int64 from "1.1": ".1"`, + }, + { + input: "-1.1", + base: 10, + expected: -1, + err: `unparsed tail left after parsing int64 from "-1.1": ".1"`, + }, + { + input: "\t 42 \t", + base: 10, + expected: 42, + }, + { + input: "\t 42 \n", + base: 10, + expected: 42, + err: `unparsed tail left after parsing int64 from "\t 42 \n": "\n"`, + }, + } + for _, tc := range testcases { + t.Run(tc.input, func(t *testing.T) { + val, err := ParseInt64(tc.input, tc.base) + if tc.err == "" { + require.NoError(t, err) + require.Equal(t, tc.expected, val) + } else { + require.Equal(t, tc.expected, val) + require.EqualError(t, err, tc.err) + } + }) + } +} + +func TestParseUint64(t *testing.T) { + testcases := []struct { + input string + base int + expected uint64 + err string + }{ + { + input: "0", + base: 10, + expected: 0, + }, + { + input: "1", + base: 10, + expected: 1, + }, + { + input: "1", + base: 2, + expected: 1, + }, + { + input: "10", + base: 2, + expected: 2, + }, + { + input: " 10", + base: 10, + expected: 10, + }, + { + input: " 10 ", + base: 10, + expected: 10, + }, + { + input: " 10 1", + base: 10, + expected: 10, + err: `unparsed tail left after parsing uint64 from " 10 1": "1"`, + }, + { + input: "9223372036854775807", + base: 10, + expected: 9223372036854775807, + }, + { + input: "9223372036854775807trailing", + base: 10, + expected: 9223372036854775807, + err: `unparsed tail left after parsing uint64 from "9223372036854775807trailing": "trailing"`, + }, + { + input: "9223372036854775808", + base: 10, + expected: 9223372036854775808, + }, + { + input: "9223372036854775808trailing", + base: 10, + expected: 9223372036854775808, + err: `unparsed tail left after parsing uint64 from "9223372036854775808trailing": "trailing"`, + }, + { + input: "18446744073709551615", + base: 10, + expected: 18446744073709551615, + }, + { + input: "ffffffffffffffff", + base: 16, + expected: 18446744073709551615, + }, + { + input: "FFFFFFFFFFFFFFFF", + base: 16, + expected: 18446744073709551615, + }, + { + input: "18446744073709551615.1", + base: 10, + expected: 18446744073709551615, + err: `unparsed tail left after parsing uint64 from "18446744073709551615.1": ".1"`, + }, + { + input: "ff.1", + base: 16, + expected: 255, + err: `unparsed tail left after parsing uint64 from "ff.1": ".1"`, + }, + { + input: "18446744073709551616", + base: 10, + expected: 18446744073709551615, + err: `cannot parse uint64 from "18446744073709551616": overflow`, + }, + { + input: "31415926535897932384", + base: 10, + expected: 18446744073709551615, + err: `cannot parse uint64 from "31415926535897932384": overflow`, + }, + { + input: "1.1", + base: 10, + expected: 1, + err: `unparsed tail left after parsing uint64 from "1.1": ".1"`, + }, + { + input: "\t 42 \t", + base: 10, + expected: 42, + }, + { + input: "\t 42 \n", + base: 10, + expected: 42, + err: `unparsed tail left after parsing uint64 from "\t 42 \n": "\n"`, + }, + } + for _, tc := range testcases { + t.Run(tc.input, func(t *testing.T) { + val, err := ParseUint64(tc.input, tc.base) + if tc.err == "" { + require.NoError(t, err) + require.Equal(t, tc.expected, val) + } else { + require.Equal(t, tc.expected, val) + require.EqualError(t, err, tc.err) + } + }) + } +} + +func TestParseFloat64(t *testing.T) { + testcases := []struct { + input string + expected float64 + err string + }{ + { + input: "0", + expected: 0, + }, + { + input: "1", + expected: 1, + }, + { + input: "1", + expected: 1, + }, + { + input: "10", + expected: 10, + }, + { + input: "-", + expected: 0.0, + err: `strconv.ParseFloat: parsing "-": invalid syntax`, + }, + { + input: " 10", + expected: 10, + }, + { + input: " 10 ", + expected: 10, + }, + { + input: " 10 1", + expected: 10, + err: `unparsed tail left after parsing float64 from " 10 1": "1"`, + }, + { + input: "9223372036854775807", + expected: 9223372036854775807, + }, + { + input: "80.1", + expected: 80.1, + }, + { + input: "9223372036854775807trailing", + expected: 9223372036854775807, + err: `unparsed tail left after parsing float64 from "9223372036854775807trailing": "trailing"`, + }, + { + input: " 9223372036854775807trailing", + expected: 9223372036854775807, + err: `unparsed tail left after parsing float64 from " 9223372036854775807trailing": "trailing"`, + }, + { + input: " 9223372036854775807", + expected: 9223372036854775807, + }, + { + input: "9223372036854775808", + expected: 9223372036854775808, + }, + { + input: "9223372036854775808trailing", + expected: 9223372036854775808, + err: `unparsed tail left after parsing float64 from "9223372036854775808trailing": "trailing"`, + }, + { + input: "-9223372036854775807", + expected: -9223372036854775807, + }, + { + input: "-9223372036854775807.1", + expected: -9223372036854775807.1, + }, + { + input: "-9223372036854775808", + expected: -9223372036854775808, + }, + { + input: "-9223372036854775808.1", + expected: -9223372036854775808.1, + }, + { + input: "-9223372036854775809", + expected: -9223372036854775809, + }, + { + input: "18446744073709551615", + expected: 18446744073709551615, + }, + { + input: "18446744073709551616", + expected: 18446744073709551616, + }, + { + input: "1.1", + expected: 1.1, + }, + { + input: "-1.1", + expected: -1.1, + }, + { + input: "1e100", + expected: 1e+100, + }, + { + input: "1e+100", + expected: 1e+100, + }, + { + input: "1e22", + expected: 1e22, + }, + { + input: "1e-22", + expected: 1e-22, + }, + { + input: "1e-100", + expected: 1e-100, + }, + { + input: "1e308", + expected: 1e308, + }, + { + input: "-1e308", + expected: -1e308, + }, + { + input: "1e408", + expected: math.MaxFloat64, + err: `strconv.ParseFloat: parsing "1e408": value out of range`, + }, + { + input: "-1e408", + expected: -math.MaxFloat64, + err: `strconv.ParseFloat: parsing "-1e408": value out of range`, + }, + { + input: "1e-308", + expected: 1e-308, + }, + { + input: "0.1.99", + expected: 0.1, + err: `unparsed tail left after parsing float64 from "0.1.99": ".99"`, + }, + { + input: "\t 42.10 \t", + expected: 42.10, + }, + { + input: "\t 42.10 \n", + expected: 42.10, + err: `unparsed tail left after parsing float64 from "\t 42.10 \n": "\n"`, + }, + } + for _, tc := range testcases { + t.Run(tc.input, func(t *testing.T) { + val, err := ParseFloat64(tc.input) + if tc.err == "" { + require.NoError(t, err) + require.Equal(t, tc.expected, val) + } else { + require.Equal(t, tc.expected, val) + require.EqualError(t, err, tc.err) + } + }) + } +} + +func TestParseStringToFloat(t *testing.T) { + tcs := []struct { + str string + val float64 + }{ + {str: ""}, + {str: " "}, + {str: "1", val: 1}, + {str: "1.10", val: 1.10}, + {str: " 6.87", val: 6.87}, + {str: "93.66 ", val: 93.66}, + {str: "\t 42.10 \n ", val: 42.10}, + {str: "1.10aa", val: 1.10}, + {str: ".", val: 0.00}, + {str: ".99", val: 0.99}, + {str: "..99", val: 0}, + {str: "1.", val: 1}, + {str: "0.1.99", val: 0.1}, + {str: "0.", val: 0}, + {str: "8794354", val: 8794354}, + {str: " 10 ", val: 10}, + {str: "2266951196291479516", val: 2266951196291479516}, + {str: "abcd123", val: 0}, + } + + for _, tc := range tcs { + t.Run(tc.str, func(t *testing.T) { + got, _ := ParseFloat64(tc.str) + require.EqualValues(t, tc.val, got) + }) + } +} diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index eeb8b14b655..edb64913c31 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -23,6 +23,8 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -31,7 +33,7 @@ import ( var ( // ErrNotReplica means there is no replication status. // Returned by ShowReplicationStatus(). - ErrNotReplica = NewSQLError(ERNotReplica, SSUnknownSQLState, "no replication status") + ErrNotReplica = sqlerror.NewSQLError(sqlerror.ERNotReplica, sqlerror.SSUnknownSQLState, "no replication status") // ErrNoPrimaryStatus means no status was returned by ShowPrimaryStatus(). ErrNoPrimaryStatus = errors.New("no master status") @@ -75,10 +77,10 @@ const ( // 2. MariaDB 10.X type flavor interface { // primaryGTIDSet returns the current GTIDSet of a server. - primaryGTIDSet(c *Conn) (GTIDSet, error) + primaryGTIDSet(c *Conn) (replication.GTIDSet, error) // purgedGTIDSet returns the purged GTIDSet of a server. - purgedGTIDSet(c *Conn) (GTIDSet, error) + purgedGTIDSet(c *Conn) (replication.GTIDSet, error) // gtidMode returns the gtid mode of a server. gtidMode(c *Conn) (string, error) @@ -94,11 +96,11 @@ type flavor interface { // startReplicationUntilAfter will start replication, but only allow it // to run until `pos` is reached. After reaching pos, replication will be stopped again - startReplicationUntilAfter(pos Position) string + startReplicationUntilAfter(pos replication.Position) string // startSQLThreadUntilAfter will start replication's sql thread(s), but only allow it // to run until `pos` is reached. After reaching pos, it will be stopped again - startSQLThreadUntilAfter(pos Position) string + startSQLThreadUntilAfter(pos replication.Position) string // stopReplicationCommand returns the command to stop the replication. stopReplicationCommand() string @@ -114,7 +116,7 @@ type flavor interface { // sendBinlogDumpCommand sends the packet required to start // dumping binlogs from the specified location. - sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error + sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error // readBinlogEvent reads the next BinlogEvent from the connection. readBinlogEvent(c *Conn) (BinlogEvent, error) @@ -129,7 +131,7 @@ type flavor interface { // setReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume. - setReplicationPositionCommands(pos Position) []string + setReplicationPositionCommands(pos replication.Position) []string // changeReplicationSourceArg returns the specific parameter to add to // a "change primary" command. @@ -137,24 +139,17 @@ type flavor interface { // status returns the result of the appropriate status command, // with parsed replication position. - status(c *Conn) (ReplicationStatus, error) + status(c *Conn) (replication.ReplicationStatus, error) // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. - primaryStatus(c *Conn) (PrimaryStatus, error) + primaryStatus(c *Conn) (replication.PrimaryStatus, error) // waitUntilPositionCommand returns the SQL command to issue // to wait until the given position, until the context // expires. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. - waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) - - // enableBinlogPlaybackCommand and disableBinlogPlaybackCommand return an - // optional command to run to enable or disable binlog - // playback. This is used internally in Google, as the - // timestamp cannot be set by regular clients. - enableBinlogPlaybackCommand() string - disableBinlogPlaybackCommand() string + waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) baseShowTables() string baseShowTablesWithSizes() string @@ -272,23 +267,23 @@ func (c *Conn) IsMariaDB() bool { } // PrimaryPosition returns the current primary's replication position. -func (c *Conn) PrimaryPosition() (Position, error) { +func (c *Conn) PrimaryPosition() (replication.Position, error) { gtidSet, err := c.flavor.primaryGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } // GetGTIDPurged returns the tablet's GTIDs which are purged. -func (c *Conn) GetGTIDPurged() (Position, error) { +func (c *Conn) GetGTIDPurged() (replication.Position, error) { gtidSet, err := c.flavor.purgedGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } @@ -304,13 +299,13 @@ func (c *Conn) GetServerUUID() (string, error) { } // PrimaryFilePosition returns the current primary's file based replication position. -func (c *Conn) PrimaryFilePosition() (Position, error) { +func (c *Conn) PrimaryFilePosition() (replication.Position, error) { filePosFlavor := filePosFlavor{} gtidSet, err := filePosFlavor.primaryGTIDSet(c) if err != nil { - return Position{}, err + return replication.Position{}, err } - return Position{ + return replication.Position{ GTIDSet: gtidSet, }, nil } @@ -326,14 +321,14 @@ func (c *Conn) RestartReplicationCommands() []string { } // StartReplicationUntilAfterCommand returns the command to start replication. -func (c *Conn) StartReplicationUntilAfterCommand(pos Position) string { +func (c *Conn) StartReplicationUntilAfterCommand(pos replication.Position) string { return c.flavor.startReplicationUntilAfter(pos) } // StartSQLThreadUntilAfterCommand returns the command to start the replica's SQL // thread(s) and have it run until it has reached the given position, at which point // it will stop. -func (c *Conn) StartSQLThreadUntilAfterCommand(pos Position) string { +func (c *Conn) StartSQLThreadUntilAfterCommand(pos replication.Position) string { return c.flavor.startSQLThreadUntilAfter(pos) } @@ -360,7 +355,7 @@ func (c *Conn) StartSQLThreadCommand() string { // SendBinlogDumpCommand sends the flavor-specific version of // the COM_BINLOG_DUMP command to start dumping raw binlog // events over a server connection, starting at a given GTID. -func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos Position) error { +func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos replication.Position) error { return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, startPos) } @@ -385,7 +380,7 @@ func (c *Conn) ResetReplicationParametersCommands() []string { // SetReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume // when it is later reparented with SetReplicationSourceCommand. -func (c *Conn) SetReplicationPositionCommands(pos Position) []string { +func (c *Conn) SetReplicationPositionCommands(pos replication.Position) []string { return c.flavor.setReplicationPositionCommands(pos) } @@ -440,107 +435,15 @@ func resultToMap(qr *sqltypes.Result) (map[string]string, error) { return result, nil } -// parseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus -func parseReplicationStatus(fields map[string]string) ReplicationStatus { - // The field names in the map are identical to what we receive from the database - // Hence the names still contain Master - status := ReplicationStatus{ - SourceHost: fields["Master_Host"], - SourceUser: fields["Master_User"], - SSLAllowed: fields["Master_SSL_Allowed"] == "Yes", - AutoPosition: fields["Auto_Position"] == "1", - UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "", - HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""), - // These fields are returned from the underlying DB and cannot be renamed - IOState: ReplicationStatusToState(fields["Slave_IO_Running"]), - LastIOError: fields["Last_IO_Error"], - SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]), - LastSQLError: fields["Last_SQL_Error"], - } - parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32) - status.SourcePort = int32(parseInt) - parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32) - status.ConnectRetry = int32(parseInt) - parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32) - if err != nil { - // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the - // database -- so let's reflect that the underlying value was unknown on our last check - status.ReplicationLagUnknown = true - } else { - status.ReplicationLagUnknown = false - status.ReplicationLagSeconds = uint32(parseUint) - } - parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32) - status.SourceServerID = uint32(parseUint) - parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32) - status.SQLDelay = uint32(parseUint) - - executedPosStr := fields["Exec_Master_Log_Pos"] - file := fields["Relay_Master_Log_File"] - if file != "" && executedPosStr != "" { - filePos, err := strconv.ParseUint(executedPosStr, 10, 32) - if err == nil { - status.FilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(filePos), - } - } - } - - readPosStr := fields["Read_Master_Log_Pos"] - file = fields["Master_Log_File"] - if file != "" && readPosStr != "" { - fileRelayPos, err := strconv.ParseUint(readPosStr, 10, 32) - if err == nil { - status.RelayLogSourceBinlogEquivalentPosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(fileRelayPos), - } - } - } - - relayPosStr := fields["Relay_Log_Pos"] - file = fields["Relay_Log_File"] - if file != "" && relayPosStr != "" { - relayFilePos, err := strconv.ParseUint(relayPosStr, 10, 32) - if err == nil { - status.RelayLogFilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(relayFilePos), - } - } - } - return status -} - // ShowReplicationStatus executes the right command to fetch replication status, // and returns a parsed Position with other fields. -func (c *Conn) ShowReplicationStatus() (ReplicationStatus, error) { +func (c *Conn) ShowReplicationStatus() (replication.ReplicationStatus, error) { return c.flavor.status(c) } -// parsePrimaryStatus parses the common fields of SHOW MASTER STATUS. -func parsePrimaryStatus(fields map[string]string) PrimaryStatus { - status := PrimaryStatus{} - - fileExecPosStr := fields["Position"] - file := fields["File"] - if file != "" && fileExecPosStr != "" { - filePos, err := strconv.ParseUint(fileExecPosStr, 10, 32) - if err == nil { - status.FilePosition.GTIDSet = filePosGTID{ - file: file, - pos: uint32(filePos), - } - } - } - - return status -} - // ShowPrimaryStatus executes the right SHOW MASTER STATUS command, // and returns a parsed executed Position, as well as file based Position. -func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { +func (c *Conn) ShowPrimaryStatus() (replication.PrimaryStatus, error) { return c.flavor.primaryStatus(c) } @@ -548,7 +451,7 @@ func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { // to wait until the given position, until the context // expires. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { return c.flavor.waitUntilPositionCommand(ctx, pos) } @@ -556,23 +459,11 @@ func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (stri // to wait until the given position, until the context // expires for the file position flavor. The command returns -1 if it times out. It // returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos Position) (string, error) { +func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos replication.Position) (string, error) { filePosFlavor := filePosFlavor{} return filePosFlavor.waitUntilPositionCommand(ctx, pos) } -// EnableBinlogPlaybackCommand returns a command to run to enable -// binlog playback. -func (c *Conn) EnableBinlogPlaybackCommand() string { - return c.flavor.enableBinlogPlaybackCommand() -} - -// DisableBinlogPlaybackCommand returns a command to run to disable -// binlog playback. -func (c *Conn) DisableBinlogPlaybackCommand() string { - return c.flavor.disableBinlogPlaybackCommand() -} - // BaseShowTables returns a query that shows tables func (c *Conn) BaseShowTables() string { return c.flavor.baseShowTables() @@ -587,3 +478,7 @@ func (c *Conn) BaseShowTablesWithSizes() string { func (c *Conn) SupportsCapability(capability FlavorCapability) (bool, error) { return c.flavor.supportsCapability(c.ServerVersion, capability) } + +func init() { + flavors[replication.FilePosFlavorID] = newFilePosFlavor +} diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index de9307f126a..bf4076b85b1 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -20,10 +20,11 @@ import ( "context" "fmt" "io" - "strconv" "strings" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -40,7 +41,7 @@ func newFilePosFlavor() flavor { } // primaryGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { return nil, err @@ -53,19 +54,11 @@ func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { if err != nil { return nil, err } - pos, err := strconv.ParseUint(resultMap["Position"], 0, 32) - if err != nil { - return nil, fmt.Errorf("invalid FilePos GTID (%v): expecting pos to be an integer", resultMap["Position"]) - } - - return filePosGTID{ - file: resultMap["File"], - pos: uint32(pos), - }, nil + return replication.ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", resultMap["File"], resultMap["Position"])) } // purgedGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, nil } @@ -119,14 +112,14 @@ func (flv *filePosFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { - rpos, ok := startPos.GTIDSet.(filePosGTID) +func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { + rpos, ok := startPos.GTIDSet.(replication.FilePosGTID) if !ok { return fmt.Errorf("startPos.GTIDSet is wrong type - expected filePosGTID, got: %#v", startPos.GTIDSet) } - flv.file = rpos.file - return c.WriteComBinlogDump(serverID, rpos.file, rpos.pos, 0) + flv.file = rpos.File + return c.WriteComBinlogDump(serverID, rpos.File, rpos.Pos, 0) } // readBinlogEvent is part of the Flavor interface. @@ -143,7 +136,7 @@ func (flv *filePosFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } @@ -223,7 +216,7 @@ func (flv *filePosFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (flv *filePosFlavor) setReplicationPositionCommands(pos Position) []string { +func (flv *filePosFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ "unsupported", } @@ -235,64 +228,47 @@ func (flv *filePosFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (flv *filePosFlavor) status(c *Conn) (ReplicationStatus, error) { +func (flv *filePosFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } - return parseFilePosReplicationStatus(resultMap) -} - -func parseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - - status.Position = status.FilePosition - status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition - - return status, nil + return replication.ParseFilePosReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (flv *filePosFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (flv *filePosFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - return parseFilePosPrimaryStatus(resultMap) -} - -func parseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { - status := parsePrimaryStatus(resultMap) - - status.Position = status.FilePosition - - return status, nil + return replication.ParseFilePosPrimaryStatus(resultMap) } // waitUntilPositionCommand is part of the Flavor interface. -func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { - filePosPos, ok := pos.GTIDSet.(filePosGTID) +func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { + filePosPos, ok := pos.GTIDSet.(replication.FilePosGTID) if !ok { return "", fmt.Errorf("Position is not filePos compatible: %#v", pos.GTIDSet) } @@ -302,30 +278,20 @@ func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Posi if timeout <= 0 { return "", fmt.Errorf("timed out waiting for position %v", pos) } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.file, filePosPos.pos, timeout.Seconds()), nil + return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.File, filePosPos.Pos, timeout.Seconds()), nil } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.file, filePosPos.pos), nil + return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.File, filePosPos.Pos), nil } -func (*filePosFlavor) startReplicationUntilAfter(pos Position) string { +func (*filePosFlavor) startReplicationUntilAfter(pos replication.Position) string { return "unsupported" } -func (*filePosFlavor) startSQLThreadUntilAfter(pos Position) string { +func (*filePosFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return "unsupported" } -// enableBinlogPlaybackCommand is part of the Flavor interface. -func (*filePosFlavor) enableBinlogPlaybackCommand() string { - return "" -} - -// disableBinlogPlaybackCommand is part of the Flavor interface. -func (*filePosFlavor) disableBinlogPlaybackCommand() string { - return "" -} - // baseShowTables is part of the Flavor interface. func (*filePosFlavor) baseShowTables() string { return mysqlFlavor{}.baseShowTables() diff --git a/go/mysql/flavor_filepos_test.go b/go/mysql/flavor_filepos_test.go deleted file mode 100644 index be60f6a95a6..00000000000 --- a/go/mysql/flavor_filepos_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysql - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFilePosRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseFilePosReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) -} - -func TestFilePosRetrieveExecutedPosition(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - Position: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseFilePosReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet, "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) - assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") - assert.Equalf(t, got.RelayLogPosition.GTIDSet, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "RelayLogPosition and RelayLogSourceBinlogEquivalentPosition don't match when they should for the FilePos flavor") -} - -func TestFilePosShouldGetPosition(t *testing.T) { - resultMap := map[string]string{ - "Position": "1307", - "File": "source-bin.000003", - } - - want := PrimaryStatus{ - Position: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - } - got, err := parseFilePosPrimaryStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor") -} diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 377ede1ecc8..15718542b45 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -18,12 +18,13 @@ limitations under the License. package mysql import ( + "context" "fmt" "io" "time" - "context" - + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -41,7 +42,7 @@ var _ flavor = (*mariadbFlavor101)(nil) var _ flavor = (*mariadbFlavor102)(nil) // primaryGTIDSet is part of the Flavor interface. -func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (mariadbFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { qr, err := c.ExecuteFetch("SELECT @@GLOBAL.gtid_binlog_pos", 1, false) if err != nil { return nil, err @@ -50,11 +51,11 @@ func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_binlog_pos: %#v", qr) } - return parseMariadbGTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMariadbGTIDSet(qr.Rows[0][0].ToString()) } // purgedGTIDSet is part of the Flavor interface. -func (mariadbFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (mariadbFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, nil } @@ -68,11 +69,11 @@ func (mariadbFlavor) gtidMode(c *Conn) (string, error) { return "", nil } -func (mariadbFlavor) startReplicationUntilAfter(pos Position) string { +func (mariadbFlavor) startReplicationUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE UNTIL master_gtid_pos = \"%s\"", pos) } -func (mariadbFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mariadbFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL master_gtid_pos = \"%s\"", pos) } @@ -105,7 +106,7 @@ func (mariadbFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { +func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { // Tell the server that we understand GTIDs by setting // mariadb_slave_capability to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1). if _, err := c.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil { @@ -154,7 +155,7 @@ func (mariadbFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { +func (mariadbFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ // RESET MASTER will clear out gtid_binlog_pos, // which then guarantees that gtid_current_pos = gtid_slave_pos, @@ -182,54 +183,42 @@ func (mariadbFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (mariadbFlavor) status(c *Conn) (ReplicationStatus, error) { +func (mariadbFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW ALL SLAVES STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err - } - - return parseMariadbReplicationStatus(resultMap) -} - -func parseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - - var err error - status.Position.GTIDSet, err = parseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v)", resultMap["Gtid_Slave_Pos"]) + return replication.ReplicationStatus{}, err } - return status, nil + return replication.ParseMariadbReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (m mariadbFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - status := parsePrimaryStatus(resultMap) + status := replication.ParsePrimaryStatus(resultMap) status.Position.GTIDSet, err = m.primaryGTIDSet(c) return status, err } @@ -238,7 +227,7 @@ func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { // // Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even // if the sql thread stops. If that is a problem, we'll have to change this. -func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { if deadline, ok := ctx.Deadline(); ok { timeout := time.Until(deadline) if timeout <= 0 { @@ -260,7 +249,7 @@ func (mariadbFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } diff --git a/go/mysql/flavor_mariadb_binlog_playback.go b/go/mysql/flavor_mariadb_binlog_playback.go index f8ce0053b56..b7fa18e434f 100644 --- a/go/mysql/flavor_mariadb_binlog_playback.go +++ b/go/mysql/flavor_mariadb_binlog_playback.go @@ -17,19 +17,6 @@ limitations under the License. package mysql -// These two methods are isolated here so they can be easily changed -// in other trees. - -// enableBinlogPlaybackCommand is part of the Flavor interface. -func (mariadbFlavor) enableBinlogPlaybackCommand() string { - return "" -} - -// disableBinlogPlaybackCommand is part of the Flavor interface. -func (mariadbFlavor) disableBinlogPlaybackCommand() string { - return "" -} - // baseShowTables is part of the Flavor interface. func (mariadbFlavor) baseShowTables() string { return mysqlFlavor{}.baseShowTables() diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index a2741c27148..250d664e4af 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -17,11 +17,9 @@ limitations under the License. package mysql import ( - "fmt" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMariadbSetReplicationSourceCommand(t *testing.T) { @@ -77,51 +75,3 @@ func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) { assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) } - -func TestMariadbRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - "Gtid_Slave_Pos": "0-101-2320", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equal(t, got.SourceServerID, want.SourceServerID, fmt.Sprintf("got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)) -} - -func TestMariadbRetrieveFileBasedPositions(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Gtid_Slave_Pos": "0-101-2320", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equal(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, fmt.Sprintf("got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)) - assert.Equal(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, fmt.Sprintf("got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)) -} - -func TestMariadbShouldGetNilRelayLogPosition(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Gtid_Slave_Pos": "0-101-2320", - } - got, err := parseMariadbReplicationStatus(resultMap) - require.NoError(t, err) - assert.Truef(t, got.RelayLogPosition.IsZero(), "Got a filled in RelayLogPosition. For MariaDB we should get back nil, because MariaDB does not return the retrieved GTIDSet. got: %#v", got.RelayLogPosition) -} diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index ad23854d374..bc5f31006e5 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -17,12 +17,13 @@ limitations under the License. package mysql import ( + "context" "fmt" "io" "time" - "context" - + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -44,7 +45,7 @@ var _ flavor = (*mysqlFlavor57)(nil) var _ flavor = (*mysqlFlavor80)(nil) // primaryGTIDSet is part of the Flavor interface. -func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { +func (mysqlFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_executed", 1, false) if err != nil { @@ -53,11 +54,11 @@ func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr) } - return ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } // purgedGTIDSet is part of the Flavor interface. -func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { +func (mysqlFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_purged", 1, false) if err != nil { @@ -66,7 +67,7 @@ func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) { if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr) } - return ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) + return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } // serverUUID is part of the Flavor interface. @@ -106,11 +107,11 @@ func (mysqlFlavor) restartReplicationCommands() []string { } } -func (mysqlFlavor) startReplicationUntilAfter(pos Position) string { +func (mysqlFlavor) startReplicationUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos) } -func (mysqlFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mysqlFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos) } @@ -131,8 +132,8 @@ func (mysqlFlavor) startSQLThreadCommand() string { } // sendBinlogDumpCommand is part of the Flavor interface. -func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error { - gtidSet, ok := startPos.GTIDSet.(Mysql56GTIDSet) +func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { + gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet) if !ok { return vterrors.Errorf(vtrpc.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet) } @@ -164,7 +165,7 @@ func (mysqlFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) setReplicationPositionCommands(pos Position) []string { +func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{ "RESET MASTER", // We must clear gtid_executed before setting gtid_purged. fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos), @@ -177,88 +178,46 @@ func (mysqlFlavor) changeReplicationSourceArg() string { } // status is part of the Flavor interface. -func (mysqlFlavor) status(c *Conn) (ReplicationStatus, error) { +func (mysqlFlavor) status(c *Conn) (replication.ReplicationStatus, error) { qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data, meaning the server // is not configured as a replica. - return ReplicationStatus{}, ErrNotReplica + return replication.ReplicationStatus{}, ErrNotReplica } resultMap, err := resultToMap(qr) if err != nil { - return ReplicationStatus{}, err - } - - return parseMysqlReplicationStatus(resultMap) -} - -func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := parseReplicationStatus(resultMap) - uuidString := resultMap["Master_UUID"] - if uuidString != "" { - sid, err := ParseSID(uuidString) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "cannot decode SourceUUID") - } - status.SourceUUID = sid - } - - var err error - status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) - } - relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"]) - if err != nil { - return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"]) + return replication.ReplicationStatus{}, err } - // We take the union of the executed and retrieved gtidset, because the retrieved gtidset only represents GTIDs since - // the relay log has been reset. To get the full Position, we need to take a union of executed GTIDSets, since these would - // have been in the relay log's GTIDSet in the past, prior to a reset. - status.RelayLogPosition.GTIDSet = status.Position.GTIDSet.Union(relayLogGTIDSet) - return status, nil + return replication.ParseMysqlReplicationStatus(resultMap) } // primaryStatus is part of the Flavor interface. -func (mysqlFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (mysqlFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return PrimaryStatus{}, ErrNoPrimaryStatus + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return PrimaryStatus{}, err + return replication.PrimaryStatus{}, err } - return parseMysqlPrimaryStatus(resultMap) + return replication.ParseMysqlPrimaryStatus(resultMap) } -func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { - status := parsePrimaryStatus(resultMap) - - var err error - status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) - if err != nil { - return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) - } - - return status, nil -} - -// waitUntilPositionCommand is part of the Flavor interface. - // waitUntilPositionCommand is part of the Flavor interface. -func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) { +func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { // A timeout of 0 means wait indefinitely. timeoutSeconds := 0 if deadline, ok := ctx.Deadline(); ok { @@ -286,7 +245,7 @@ func (mysqlFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } switch result[0] { case EOFPacket: - return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF) + return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF) case ErrPacket: return nil, ParseErrorPacket(result) } @@ -298,16 +257,6 @@ func (mysqlFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { return ev, nil } -// enableBinlogPlaybackCommand is part of the Flavor interface. -func (mysqlFlavor) enableBinlogPlaybackCommand() string { - return "" -} - -// disableBinlogPlaybackCommand is part of the Flavor interface. -func (mysqlFlavor) disableBinlogPlaybackCommand() string { - return "" -} - // baseShowTables is part of the Flavor interface. func (mysqlFlavor) baseShowTables() string { return "SELECT table_name, table_type, unix_timestamp(create_time), table_comment FROM information_schema.tables WHERE table_schema = database()" @@ -356,17 +305,41 @@ GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment` // We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces` // early for performance reasons. This effectively causes only a single read of `information_schema.innodb_tablespaces` // per query. +// Note the following: +// - We use UNION ALL to deal differently with partitioned tables vs. non-partitioned tables. +// Originally, the query handled both, but that introduced "WHERE ... OR" conditions that led to poor query +// optimization. By separating to UNION ALL we remove all "OR" conditions. +// - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to do early pruning before the JOIN. +// - `TABLES`.`TABLE_NAME` has `utf8mb4_0900_ai_ci` collation. `INNODB_TABLESPACES`.`NAME` has `utf8mb3_general_ci`. +// We normalize the collation to get better query performance (we force the casting at the time of our choosing) +// - `create_options` is NULL for views, and therefore we need an additional UNION ALL to include views const TablesWithSize80 = `SELECT t.table_name, - t.table_type, - UNIX_TIMESTAMP(t.create_time), - t.table_comment, - SUM(i.file_size), - SUM(i.allocated_size) -FROM information_schema.tables t -INNER JOIN information_schema.innodb_tablespaces i - ON i.name LIKE CONCAT(database(), '/%') AND (i.name = CONCAT(t.table_schema, '/', t.table_name) OR i.name LIKE CONCAT(t.table_schema, '/', t.table_name, '#p#%')) -WHERE t.table_schema = database() -GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment` + t.table_type, + UNIX_TIMESTAMP(t.create_time), + t.table_comment, + i.file_size, + i.allocated_size + FROM information_schema.tables t + LEFT JOIN information_schema.innodb_tablespaces i + ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8mb3_general_ci + WHERE + t.table_schema = database() AND not t.create_options <=> 'partitioned' +UNION ALL + SELECT + t.table_name, + t.table_type, + UNIX_TIMESTAMP(t.create_time), + t.table_comment, + SUM(i.file_size), + SUM(i.allocated_size) + FROM information_schema.tables t + LEFT JOIN information_schema.innodb_tablespaces i + ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci ) + WHERE + t.table_schema = database() AND t.create_options <=> 'partitioned' + GROUP BY + t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment +` // baseShowTablesWithSizes is part of the Flavor interface. func (mysqlFlavor56) baseShowTablesWithSizes() string { diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 75d6a3ebc65..0e1b749633a 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestMysql56SetReplicationSourceCommand(t *testing.T) { @@ -76,74 +75,3 @@ func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) { assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) } - -func TestMysqlRetrieveSourceServerId(t *testing.T) { - resultMap := map[string]string{ - "Master_Server_Id": "1", - } - - want := ReplicationStatus{SourceServerID: 1} - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) -} - -func TestMysqlRetrieveFileBasedPositions(t *testing.T) { - resultMap := map[string]string{ - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - "Relay_Log_Pos": "1309", - "Relay_Log_File": "relay-bin.000004", - } - - want := ReplicationStatus{ - FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}}, - RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}}, - RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}}, - } - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) - assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) -} - -func TestMysqlShouldGetRelayLogPosition(t *testing.T) { - resultMap := map[string]string{ - "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", - "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", - "Exec_Master_Log_Pos": "1307", - "Relay_Master_Log_File": "master-bin.000002", - "Read_Master_Log_Pos": "1308", - "Master_Log_File": "master-bin.000003", - } - - sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := ReplicationStatus{ - Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, - RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, - } - got, err := parseMysqlReplicationStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) -} - -func TestMysqlShouldGetPosition(t *testing.T) { - resultMap := map[string]string{ - "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", - "Position": "1307", - "File": "source-bin.000003", - } - - sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := PrimaryStatus{ - Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, - FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, - } - got, err := parseMysqlPrimaryStatus(resultMap) - require.NoError(t, err) - assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) - assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) -} diff --git a/go/mysql/flavor_mysqlgr.go b/go/mysql/flavor_mysqlgr.go index 33bd1e6e3e1..e96a6433f73 100644 --- a/go/mysql/flavor_mysqlgr.go +++ b/go/mysql/flavor_mysqlgr.go @@ -21,6 +21,7 @@ import ( "fmt" "math" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -57,12 +58,12 @@ func (mysqlGRFlavor) restartReplicationCommands() []string { } // startReplicationUntilAfter is disabled in mysqlGRFlavor -func (mysqlGRFlavor) startReplicationUntilAfter(pos Position) string { +func (mysqlGRFlavor) startReplicationUntilAfter(pos replication.Position) string { return "" } // startSQLThreadUntilAfter is disabled in mysqlGRFlavor -func (mysqlGRFlavor) startSQLThreadUntilAfter(pos Position) string { +func (mysqlGRFlavor) startSQLThreadUntilAfter(pos replication.Position) string { return "" } @@ -99,7 +100,7 @@ func (mysqlGRFlavor) resetReplicationParametersCommands(c *Conn) []string { } // setReplicationPositionCommands is disabled in mysqlGRFlavor -func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string { +func (mysqlGRFlavor) setReplicationPositionCommands(pos replication.Position) []string { return []string{} } @@ -110,8 +111,8 @@ func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string { // TODO: Right now the GR's lag is defined as the lag between a node processing a txn // and the time the txn was committed. We should consider reporting lag between current queueing txn timestamp // from replication_connection_status and the current processing txn's commit timestamp -func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { - res := ReplicationStatus{} +func (mysqlGRFlavor) status(c *Conn) (replication.ReplicationStatus, error) { + res := replication.ReplicationStatus{} // Get primary node information query := `SELECT MEMBER_HOST, @@ -125,7 +126,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } query = `SELECT @@ -148,7 +149,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } // if chanel is not set, it means the state is not ONLINE or RECOVERING // return partial result early @@ -160,26 +161,26 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { query = fmt.Sprintf(`SELECT SERVICE_STATE FROM performance_schema.replication_connection_status WHERE CHANNEL_NAME='%s'`, chanel) - var connectionState ReplicationState + var connectionState replication.ReplicationState err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error { - connectionState = ReplicationStatusToState(values[0].ToString()) + connectionState = replication.ReplicationStatusToState(values[0].ToString()) return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } res.IOState = connectionState // Populate SQLState from replication_connection_status - var applierState ReplicationState + var applierState replication.ReplicationState query = fmt.Sprintf(`SELECT SERVICE_STATE FROM performance_schema.replication_applier_status_by_coordinator WHERE CHANNEL_NAME='%s'`, chanel) err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error { - applierState = ReplicationStatusToState(values[0].ToString()) + applierState = replication.ReplicationStatusToState(values[0].ToString()) return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } res.SQLState = applierState @@ -197,17 +198,17 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) { return nil }) if err != nil { - return ReplicationStatus{}, err + return replication.ReplicationStatus{}, err } return res, nil } -func parsePrimaryGroupMember(res *ReplicationStatus, row []sqltypes.Value) { +func parsePrimaryGroupMember(res *replication.ReplicationStatus, row []sqltypes.Value) { res.SourceHost = row[0].ToString() /* MEMBER_HOST */ res.SourcePort, _ = row[1].ToInt32() /* MEMBER_PORT */ } -func parseReplicationApplierLag(res *ReplicationStatus, row []sqltypes.Value) { +func parseReplicationApplierLag(res *replication.ReplicationStatus, row []sqltypes.Value) { lagSec, err := row[0].ToUint32() // if the error is not nil, ReplicationLagSeconds will remain to be MaxUint32 if err == nil { @@ -234,7 +235,7 @@ func fetchStatusForGroupReplication(c *Conn, query string, onResult func([]sqlty // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. -func (mysqlGRFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { +func (mysqlGRFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { return mysqlFlavor{}.primaryStatus(c) } diff --git a/go/mysql/flavor_mysqlgr_test.go b/go/mysql/flavor_mysqlgr_test.go index 6b15ee5048e..df7876eca1c 100644 --- a/go/mysql/flavor_mysqlgr_test.go +++ b/go/mysql/flavor_mysqlgr_test.go @@ -20,12 +20,14 @@ import ( "gotest.tools/assert" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) func TestMysqlGRParsePrimaryGroupMember(t *testing.T) { - res := ReplicationStatus{} + res := replication.ReplicationStatus{} rows := []sqltypes.Value{ sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("host1")), sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")), @@ -33,12 +35,12 @@ func TestMysqlGRParsePrimaryGroupMember(t *testing.T) { parsePrimaryGroupMember(&res, rows) assert.Equal(t, "host1", res.SourceHost) assert.Equal(t, int32(10), res.SourcePort) - assert.Equal(t, ReplicationStateUnknown, res.IOState) - assert.Equal(t, ReplicationStateUnknown, res.SQLState) + assert.Equal(t, replication.ReplicationStateUnknown, res.IOState) + assert.Equal(t, replication.ReplicationStateUnknown, res.SQLState) } func TestMysqlGRReplicationApplierLagParse(t *testing.T) { - res := ReplicationStatus{} + res := replication.ReplicationStatus{} row := []sqltypes.Value{ sqltypes.MakeTrusted(querypb.Type_INT32, []byte("NULL")), } diff --git a/go/mysql/format/float.go b/go/mysql/format/float.go new file mode 100644 index 00000000000..d9655281e1c --- /dev/null +++ b/go/mysql/format/float.go @@ -0,0 +1,50 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "bytes" + "strconv" +) + +const expUpperThreshold = 1000000000000000.0 +const expLowerThreshold = 0.000000000000001 + +// FormatFloat formats a float64 as a byte string in a similar way to what MySQL does +func FormatFloat(v float64) []byte { + return AppendFloat(nil, v) +} + +func AppendFloat(buf []byte, f float64) []byte { + format := byte('f') + if f >= expUpperThreshold || f <= -expUpperThreshold || (f < expLowerThreshold && f > -expLowerThreshold) { + format = 'g' + } + // the float printer in MySQL does not add a positive sign before + // the exponent for positive exponents, but the Golang printer does + // do that, and there's no way to customize it, so we must strip the + // redundant positive sign manually + // e.g. 1.234E+56789 -> 1.234E56789 + fstr := strconv.AppendFloat(buf, f, format, -1, 64) + if idx := bytes.IndexByte(fstr, 'e'); idx >= 0 { + if fstr[idx+1] == '+' { + fstr = append(fstr[:idx+1], fstr[idx+2:]...) + } + } + + return fstr +} diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index b6532f830b3..c2b27d6f6d4 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -45,7 +45,7 @@ func TestClearTextClientAuth(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -99,7 +99,7 @@ func TestSSLConnection(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/hex/hex.go b/go/mysql/hex/hex.go new file mode 100644 index 00000000000..d2aa00d592e --- /dev/null +++ b/go/mysql/hex/hex.go @@ -0,0 +1,80 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hex + +import ( + "encoding/hex" + "math/bits" +) + +const hextable = "0123456789ABCDEF" + +func EncodeBytes(src []byte) []byte { + j := 0 + dst := make([]byte, len(src)*2) + for _, v := range src { + dst[j] = hextable[v>>4] + dst[j+1] = hextable[v&0x0f] + j += 2 + } + return dst +} + +func EncodeUint(u uint64) []byte { + var a [16 + 1]byte + i := len(a) + shift := uint(bits.TrailingZeros(uint(16))) & 7 + b := uint64(16) + m := uint(16) - 1 // == 1<= b { + i-- + a[i] = hextable[uint(u)&m] + u >>= shift + } + + // u < base + i-- + a[i] = hextable[uint(u)] + return a[i:] +} + +func DecodeUint(u uint64) []byte { + if u == 0 { + return []byte{0} + } + var decoded []byte + for u > 0 { + c1 := u % 10 + c2 := u % 100 / 10 + decoded = append([]byte{byte(c1 + c2<<4)}, decoded...) + u /= 100 + } + return decoded +} + +func DecodedLen(src []byte) int { + return (len(src) + 1) / 2 +} + +func DecodeBytes(dst, src []byte) error { + if len(src)&1 == 1 { + src = append([]byte{'0'}, src...) + } + _, err := hex.Decode(dst, src) + return err +} diff --git a/go/mysql/icuregex/compiler.go b/go/mysql/icuregex/compiler.go new file mode 100644 index 00000000000..971cd439fb3 --- /dev/null +++ b/go/mysql/icuregex/compiler.go @@ -0,0 +1,3646 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "math" + "slices" + "strings" + "unicode/utf8" + + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/unames" + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +const BreakIteration = false +const stackSize = 100 + +type reChar struct { + char rune + quoted bool +} + +const ( + parenPlain = -1 + parenCapturing = -2 + parenAtomic = -3 + parenLookahead = -4 + parenNegLookahead = -5 + parenFlags = -6 + parenLookBehind = -7 + parenLookBehindN = -8 +) + +type setOperation uint32 + +const ( + setStart setOperation = 0<<16 | 1 + setEnd setOperation = 1<<16 | 2 + setNegation setOperation = 2<<16 | 3 + setCaseClose setOperation = 2<<16 | 9 + setDifference2 setOperation = 3<<16 | 4 // '--' set difference operator + setIntersection2 setOperation = 3<<16 | 5 // '&&' set intersection operator + setUnion setOperation = 4<<16 | 6 // implicit union of adjacent items + setDifference1 setOperation = 4<<16 | 7 // '-', single dash difference op, for compatibility with old UnicodeSet. + setIntersection1 setOperation = 4<<16 | 8 // '&', single amp intersection op, for compatibility with old UnicodeSet. +) + +type compiler struct { + err error + out *Pattern + p []rune + + scanIndex int + quoteMode bool + inBackslashQuote bool + eolComments bool + + lineNum int + charNum int + lastChar rune + peekChar rune + + c reChar + stack [stackSize]uint16 + stackPtr int + + modeFlags RegexpFlag + newModeFlags RegexpFlag + setModeFlag bool + + literalChars []rune + + parenStack []int + matchOpenParen int + matchCloseParen int + + intervalLow int + intervalUpper int + + setStack []*uset.UnicodeSet + setOpStack []setOperation + + lastSetLiteral rune + captureName *strings.Builder +} + +func newCompiler(pat *Pattern) *compiler { + return &compiler{ + out: pat, + scanIndex: 0, + eolComments: true, + lineNum: 1, + charNum: 0, + lastChar: -1, + peekChar: -1, + modeFlags: RegexpFlag(uint32(pat.flags) | 0x80000000), + matchOpenParen: -1, + matchCloseParen: -1, + lastSetLiteral: -1, + } +} + +func (c *compiler) nextCharLL() (ch rune) { + if c.peekChar != -1 { + ch, c.peekChar = c.peekChar, -1 + return + } + if len(c.p) == 0 { + return -1 + } + + ch = c.p[0] + c.p = c.p[1:] + if ch == utf8.RuneError { + return -1 + } + + if ch == chCR || ch == chNEL || ch == chLS || (ch == chLF && c.lastChar != chCR) { + c.lineNum++ + c.charNum = 0 + } else { + if ch != chLF { + c.charNum++ + } + } + c.lastChar = ch + return +} + +func (c *compiler) peekCharLL() rune { + if c.peekChar == -1 { + c.peekChar = c.nextCharLL() + } + return c.peekChar +} + +func (c *compiler) nextChar(ch *reChar) { + c.scanIndex++ + ch.char = c.nextCharLL() + ch.quoted = false + + if c.quoteMode { + ch.quoted = true + if (ch.char == chBackSlash && c.peekCharLL() == chE && ((c.modeFlags & Literal) == 0)) || + ch.char == -1 { + c.quoteMode = false // Exit quote mode, + c.nextCharLL() // discard the E + c.nextChar(ch) + return + } + } else if c.inBackslashQuote { + // The current character immediately follows a '\' + // Don't check for any further escapes, just return it as-is. + // Don't set c.fQuoted, because that would prevent the state machine from + // dispatching on the character. + c.inBackslashQuote = false + } else { + // We are not in a \Q quoted region \E of the source. + // + if (c.modeFlags & Comments) != 0 { + // + // We are in free-spacing and comments mode. + // Scan through any white space and comments, until we + // reach a significant character or the end of input. + for { + if ch.char == -1 { + break // End of Input + } + if ch.char == chPound && c.eolComments { + // Start of a comment. Consume the rest of it, until EOF or a new line + for { + ch.char = c.nextCharLL() + if ch.char == -1 || // EOF + ch.char == chCR || + ch.char == chLF || + ch.char == chNEL || + ch.char == chLS { + break + } + } + } + // TODO: check what Java & Perl do with non-ASCII white spaces. Ticket 6061. + if !pattern.IsWhitespace(ch.char) { + break + } + ch.char = c.nextCharLL() + } + } + + // + // check for backslash escaped characters. + // + if ch.char == chBackSlash { + beforeEscape := c.p + if staticSetUnescape.ContainsRune(c.peekCharLL()) { + // + // A '\' sequence that is handled by ICU's standard unescapeAt function. + // Includes \uxxxx, \n, \r, many others. + // Return the single equivalent character. + // + c.nextCharLL() // get & discard the peeked char. + ch.quoted = true + + ch.char, c.p = pattern.UnescapeAtRunes(beforeEscape) + if ch.char < 0 { + c.error(BadEscapeSequence) + } + c.charNum += len(beforeEscape) - len(c.p) + } else if c.peekCharLL() == chDigit0 { + // Octal Escape, using Java Regexp Conventions + // which are \0 followed by 1-3 octal digits. + // Different from ICU Unescape handling of Octal, which does not + // require the leading 0. + // Java also has the convention of only consuming 2 octal digits if + // the three digit number would be > 0xff + // + ch.char = 0 + c.nextCharLL() // Consume the initial 0. + for index := 0; index < 3; index++ { + ch2 := c.peekCharLL() + if ch2 < chDigit0 || ch2 > chDigit7 { + if index == 0 { + // \0 is not followed by any octal digits. + c.error(BadEscapeSequence) + } + break + } + ch.char <<= 3 + ch.char += ch2 & 7 + if ch.char <= 255 { + c.nextCharLL() + } else { + // The last digit made the number too big. Forget we saw it. + ch.char >>= 3 + } + } + ch.quoted = true + } else if c.peekCharLL() == chQ { + // "\Q" enter quote mode, which will continue until "\E" + c.quoteMode = true + c.nextCharLL() // discard the 'Q'. + c.nextChar(ch) // recurse to get the real next char. + return + } else { + // We are in a '\' escape that will be handled by the state table scanner. + // Just return the backslash, but remember that the following char is to + // be taken literally. + c.inBackslashQuote = true + } + } + } + + // re-enable # to end-of-line comments, in case they were disabled. + // They are disabled by the parser upon seeing '(?', but this lasts for + // the fetching of the next character only. + c.eolComments = true +} + +const ( + chCR = 0x0d // New lines, for terminating comments. + chLF = 0x0a // Line Feed + chPound = 0x23 // '#', introduces a comment. + chDigit0 = 0x30 // '0' + chDigit7 = 0x37 // '9' + chColon = 0x3A // ':' + chE = 0x45 // 'E' + chQ = 0x51 // 'Q' + chN = 0x4E // 'N' + chP = 0x50 // 'P' + chBackSlash = 0x5c // '\' introduces a char escape + chLBracket = 0x5b // '[' + chRBracket = 0x5d // ']' + chUp = 0x5e // '^' + chLowerP = 0x70 + chLBrace = 0x7b // '{' + chRBrace = 0x7d // '}' + chNEL = 0x85 // NEL newline variant + chLS = 0x2028 // Unicode Line Separator + chAmp = 0x26 // '&' + chDash = 0x2d // '-' +) + +func (c *compiler) compile(pat []rune) error { + if c.err != nil { + return c.err + } + if c.out.pattern != "" { + panic("cannot reuse pattern") + } + + c.out.pattern = string(pat) + c.p = pat + + var state uint16 = 1 + var table []regexTableEl + + // UREGEX_LITERAL force entire pattern to be treated as a literal string. + if c.modeFlags&Literal != 0 { + c.quoteMode = true + } + + c.nextChar(&c.c) + + // Main loop for the regex pattern parsing state machine. + // Runs once per state transition. + // Each time through optionally performs, depending on the state table, + // - an advance to the the next pattern char + // - an action to be performed. + // - pushing or popping a state to/from the local state return stack. + // file regexcst.txt is the source for the state table. The logic behind + // recongizing the pattern syntax is there, not here. + for { + if c.err != nil { + break + } + + if state == 0 { + panic("bad state?") + } + + table = parseStateTable[state:] + for len(table) > 0 { + if table[0].charClass < 127 && !c.c.quoted && rune(table[0].charClass) == c.c.char { + break + } + if table[0].charClass == 255 { + break + } + if table[0].charClass == 254 && c.c.quoted { + break + } + if table[0].charClass == 253 && c.c.char == -1 { + break + } + if table[0].charClass >= 128 && table[0].charClass < 240 && !c.c.quoted && c.c.char != -1 { + if staticRuleSet[table[0].charClass-128].ContainsRune(c.c.char) { + break + } + } + + table = table[1:] + } + + if !c.doParseActions(table[0].action) { + break + } + + if table[0].pushState != 0 { + c.stackPtr++ + if c.stackPtr >= stackSize { + c.error(InternalError) + c.stackPtr-- + } + c.stack[c.stackPtr] = uint16(table[0].pushState) + } + + if table[0].nextChar { + c.nextChar(&c.c) + } + + if table[0].nextState != 255 { + state = uint16(table[0].nextState) + } else { + state = c.stack[c.stackPtr] + c.stackPtr-- + if c.stackPtr < 0 { + c.stackPtr++ + c.error(MismatchedParen) + } + } + } + + if c.err != nil { + return c.err + } + + c.allocateStackData(restackframeHdrCount) + c.stripNOPs() + + c.out.minMatchLen = c.minMatchLength(3, len(c.out.compiledPat)-1) + + c.matchStartType() + return c.err +} + +func (c *compiler) doParseActions(action patternParseAction) bool { + switch action { + case doPatStart: + // Start of pattern compiles to: + //0 SAVE 2 Fall back to position of FAIL + //1 jmp 3 + //2 FAIL Stop if we ever reach here. + //3 NOP Dummy, so start of pattern looks the same as + // the start of an ( grouping. + //4 NOP Resreved, will be replaced by a save if there are + // OR | operators at the top level + c.appendOp(urxStateSave, 2) + c.appendOp(urxJmp, 3) + c.appendOp(urxFail, 0) + + // Standard open nonCapture paren action emits the two NOPs and + // sets up the paren stack frame. + c.doParseActions(doOpenNonCaptureParen) + + case doPatFinish: + // We've scanned to the end of the pattern + // The end of pattern compiles to: + // URX_END + // which will stop the runtime match engine. + // Encountering end of pattern also behaves like a close paren, + // and forces fixups of the State Save at the beginning of the compiled pattern + // and of any OR operations at the top level. + // + c.handleCloseParen() + if len(c.parenStack) > 0 { + // Missing close paren in pattern. + c.error(MismatchedParen) + } + + // add the END operation to the compiled pattern. + c.appendOp(urxEnd, 0) + + // Terminate the pattern compilation state machine. + return false + + case doOrOperator: + // Scanning a '|', as in (A|B) + // Generate code for any pending literals preceding the '|' + c.fixLiterals(false) + + // Insert a SAVE operation at the start of the pattern section preceding + // this OR at this level. This SAVE will branch the match forward + // to the right hand side of the OR in the event that the left hand + // side fails to match and backtracks. Locate the position for the + // save from the location on the top of the parentheses stack. + var savePosition int + savePosition, c.parenStack = stackPop(c.parenStack) + op := c.out.compiledPat[savePosition] + + if op.typ() != urxNop { + panic("expected a NOP placeholder") + } + + op = c.buildOp(urxStateSave, len(c.out.compiledPat)+1) + c.out.compiledPat[savePosition] = op + + // Append an JMP operation into the compiled pattern. The operand for + // the JMP will eventually be the location following the ')' for the + // group. This will be patched in later, when the ')' is encountered. + c.appendOp(urxJmp, 0) + + // Push the position of the newly added JMP op onto the parentheses stack. + // This registers if for fixup when this block's close paren is encountered. + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Append a NOP to the compiled pattern. This is the slot reserved + // for a SAVE in the event that there is yet another '|' following + // this one. + c.appendOp(urxNop, 0) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doBeginNamedCapture: + // Scanning (? + // Compile to a + // - NOP, which later may be replaced if the parenthesized group + // has a quantifier, followed by + // - STO_SP save state stack position, so it can be restored at the ")" + // - NOP, which may later be replaced by a save-state if there + // is an '|' alternation within the parens. + c.fixLiterals(false) + c.appendOp(urxNop, 0) + varLoc := c.allocateData(1) // Reserve a data location for saving the state stack ptr. + c.appendOp(urxStoSp, varLoc) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the two NOPs. Depending on what follows in the pattern, the + // NOPs may be changed to SAVE_STATE or JMP ops, with a target + // address of the end of the parenthesized group. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenAtomic) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-3) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doOpenLookAhead: + // Positive Look-ahead (?= stuff ) + // + // Note: Addition of transparent input regions, with the need to + // restore the original regions when failing out of a lookahead + // block, complicated this sequence. Some combined opcodes + // might make sense - or might not, lookahead aren't that common. + // + // Caution: min match length optimization knows about this + // sequence; don't change without making updates there too. + // + // Compiles to + // 1 LA_START dataLoc Saves SP, Input Pos, Active input region. + // 2. STATE_SAVE 4 on failure of lookahead, goto 4 + // 3 JMP 6 continue ... + // + // 4. LA_END Look Ahead failed. Restore regions. + // 5. BACKTRACK and back track again. + // + // 6. NOP reserved for use by quantifiers on the block. + // Look-ahead can't have quantifiers, but paren stack + // compile time conventions require the slot anyhow. + // 7. NOP may be replaced if there is are '|' ops in the block. + // 8. code for parenthesized stuff. + // 9. LA_END + // + // Four data slots are reserved, for saving state on entry to the look-around + // 0: stack pointer on entry. + // 1: input position on entry. + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + c.fixLiterals(false) + dataLoc := c.allocateData(4) + c.appendOp(urxLaStart, dataLoc) + c.appendOp(urxStateSave, len(c.out.compiledPat)+2) + c.appendOp(urxJmp, len(c.out.compiledPat)+3) + c.appendOp(urxLaEnd, dataLoc) + c.appendOp(urxBacktrack, 0) + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the NOPs. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookahead) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + case doOpenLookAheadNeg: + // Negated Lookahead. (?! stuff ) + // Compiles to + // 1. LA_START dataloc + // 2. SAVE_STATE 7 // Fail within look-ahead block restores to this state, + // // which continues with the match. + // 3. NOP // Std. Open Paren sequence, for possible '|' + // 4. code for parenthesized stuff. + // 5. LA_END // Cut back stack, remove saved state from step 2. + // 6. BACKTRACK // code in block succeeded, so neg. lookahead fails. + // 7. END_LA // Restore match region, in case look-ahead was using + // an alternate (transparent) region. + // Four data slots are reserved, for saving state on entry to the look-around + // 0: stack pointer on entry. + // 1: input position on entry. + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + c.fixLiterals(false) + dataLoc := c.allocateData(4) + c.appendOp(urxLaStart, dataLoc) + c.appendOp(urxStateSave, 0) // dest address will be patched later. + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the StateSave and NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenNegLookahead) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Instructions #5 - #7 will be added when the ')' is encountered. + + case doOpenLookBehind: + // Compile a (?<= look-behind open paren. + // + // Compiles to + // 0 URX_LB_START dataLoc + // 1 URX_LB_CONT dataLoc + // 2 MinMatchLen + // 3 MaxMatchLen + // 4 URX_NOP Standard '(' boilerplate. + // 5 URX_NOP Reserved slot for use with '|' ops within (block). + // 6 + // 7 URX_LB_END dataLoc # Check match len, restore input len + // 8 URX_LA_END dataLoc # Restore stack, input pos + // + // Allocate a block of matcher data, to contain (when running a match) + // 0: Stack ptr on entry + // 1: Input Index on entry + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + // 4: Start index of match current match attempt. + // The first four items must match the layout of data for LA_START / LA_END + + // Generate match code for any pending literals. + c.fixLiterals(false) + + // Allocate data space + dataLoc := c.allocateData(5) + + // Emit URX_LB_START + c.appendOp(urxLbStart, dataLoc) + + // Emit URX_LB_CONT + c.appendOp(urxLbCont, dataLoc) + c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later. + + // Emit the NOPs + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the URX_LB_CONT and the NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookBehind) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // The final two instructions will be added when the ')' is encountered. + + case doOpenLookBehindNeg: + // Compile a (? + // 8 URX_LBN_END dataLoc # Check match len, cause a FAIL + // 9 ... + // + // Allocate a block of matcher data, to contain (when running a match) + // 0: Stack ptr on entry + // 1: Input Index on entry + // 2: fActiveStart, the active bounds start on entry. + // 3: fActiveLimit, the active bounds limit on entry. + // 4: Start index of match current match attempt. + // The first four items must match the layout of data for LA_START / LA_END + + // Generate match code for any pending literals. + c.fixLiterals(false) + + // Allocate data space + dataLoc := c.allocateData(5) + + // Emit URX_LB_START + c.appendOp(urxLbStart, dataLoc) + + // Emit URX_LBN_CONT + c.appendOp(urxLbnCount, dataLoc) + c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later. + c.appendOp(urxReservedOp, 0) // Continue Loc. To be filled later. + + // Emit the NOPs + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the URX_LB_CONT and the NOP. + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenLookBehindN) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // The final two instructions will be added when the ')' is encountered. + + case doConditionalExpr, doPerlInline: + // Conditionals such as (?(1)a:b) + // Perl inline-condtionals. (?{perl code}a|b) We're not perl, no way to do them. + c.error(Unimplemented) + + case doCloseParen: + c.handleCloseParen() + if len(c.parenStack) == 0 { + // Extra close paren, or missing open paren. + c.error(MismatchedParen) + } + + case doNOP: + + case doBadOpenParenType, doRuleError: + c.error(RuleSyntax) + + case doMismatchedParenErr: + c.error(MismatchedParen) + + case doPlus: + // Normal '+' compiles to + // 1. stuff to be repeated (already built) + // 2. jmp-sav 1 + // 3. ... + // + // Or, if the item to be repeated can match a zero length string, + // 1. STO_INP_LOC data-loc + // 2. body of stuff to be repeated + // 3. JMP_SAV_X 2 + // 4. ... + + // + // Or, if the item to be repeated is simple + // 1. Item to be repeated. + // 2. LOOP_SR_I set number (assuming repeated item is a set ref) + // 3. LOOP_C stack location + topLoc := c.blockTopLoc(false) // location of item #1 + + // Check for simple constructs, which may get special optimized code. + if topLoc == len(c.out.compiledPat)-1 { + repeatedOp := c.out.compiledPat[topLoc] + + if repeatedOp.typ() == urxSetref { + // Emit optimized code for [char set]+ + c.appendOp(urxLoopSrI, repeatedOp.value()) + frameLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, frameLoc) + break + } + + if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix { + // Emit Optimized code for .+ operations. + loopOpI := c.buildOp(urxLoopDotI, 0) + if repeatedOp.typ() == urxDotanyAll { + // URX_LOOP_DOT_I operand is a flag indicating ". matches any" mode. + loopOpI |= 1 + } + if c.modeFlags&UnixLines != 0 { + loopOpI |= 2 + } + c.appendIns(loopOpI) + frameLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, frameLoc) + break + } + } + + // General case. + + // Check for minimum match length of zero, which requires + // extra loop-breaking code. + if c.minMatchLength(topLoc, len(c.out.compiledPat)-1) == 0 { + // Zero length match is possible. + // Emit the code sequence that can handle it. + c.insertOp(topLoc) + frameLoc := c.allocateStackData(1) + op := c.buildOp(urxStoInpLoc, frameLoc) + c.out.compiledPat[topLoc] = op + + c.appendOp(urxJmpSavX, topLoc+1) + } else { + // Simpler code when the repeated body must match something non-empty + c.appendOp(urxJmpSav, topLoc) + } + + case doNGPlus: + // Non-greedy '+?' compiles to + // 1. stuff to be repeated (already built) + // 2. state-save 1 + // 3. ... + topLoc := c.blockTopLoc(false) + c.appendOp(urxStateSave, topLoc) + + case doOpt: + // Normal (greedy) ? quantifier. + // Compiles to + // 1. state save 3 + // 2. body of optional block + // 3. ... + // Insert the state save into the compiled pattern, and we're done. + saveStateLoc := c.blockTopLoc(true) + saveStateOp := c.buildOp(urxStateSave, len(c.out.compiledPat)) + c.out.compiledPat[saveStateLoc] = saveStateOp + + case doNGOpt: + // Non-greedy ?? quantifier + // compiles to + // 1. jmp 4 + // 2. body of optional block + // 3 jmp 5 + // 4. state save 2 + // 5 ... + // This code is less than ideal, with two jmps instead of one, because we can only + // insert one instruction at the top of the block being iterated. + jmp1Loc := c.blockTopLoc(true) + jmp2Loc := len(c.out.compiledPat) + + jmp1Op := c.buildOp(urxJmp, jmp2Loc+1) + c.out.compiledPat[jmp1Loc] = jmp1Op + + c.appendOp(urxJmp, jmp2Loc+2) + c.appendOp(urxStateSave, jmp1Loc+1) + + case doStar: + // Normal (greedy) * quantifier. + // Compiles to + // 1. STATE_SAVE 4 + // 2. body of stuff being iterated over + // 3. JMP_SAV 2 + // 4. ... + // + // Or, if the body is a simple [Set], + // 1. LOOP_SR_I set number + // 2. LOOP_C stack location + // ... + // + // Or if this is a .* + // 1. LOOP_DOT_I (. matches all mode flag) + // 2. LOOP_C stack location + // + // Or, if the body can match a zero-length string, to inhibit infinite loops, + // 1. STATE_SAVE 5 + // 2. STO_INP_LOC data-loc + // 3. body of stuff + // 4. JMP_SAV_X 2 + // 5. ... + // location of item #1, the STATE_SAVE + topLoc := c.blockTopLoc(false) + + // Check for simple *, where the construct being repeated + // compiled to single opcode, and might be optimizable. + if topLoc == len(c.out.compiledPat)-1 { + repeatedOp := c.out.compiledPat[topLoc] + + if repeatedOp.typ() == urxSetref { + // Emit optimized code for a [char set]* + loopOpI := c.buildOp(urxLoopSrI, repeatedOp.value()) + c.out.compiledPat[topLoc] = loopOpI + dataLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, dataLoc) + break + } + + if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix { + // Emit Optimized code for .* operations. + loopOpI := c.buildOp(urxLoopDotI, 0) + if repeatedOp.typ() == urxDotanyAll { + // URX_LOOP_DOT_I operand is a flag indicating . matches any mode. + loopOpI |= 1 + } + if (c.modeFlags & UnixLines) != 0 { + loopOpI |= 2 + } + c.out.compiledPat[topLoc] = loopOpI + dataLoc := c.allocateStackData(1) + c.appendOp(urxLoopC, dataLoc) + break + } + } + + // Emit general case code for this * + // The optimizations did not apply. + + saveStateLoc := c.blockTopLoc(true) + jmpOp := c.buildOp(urxJmpSav, saveStateLoc+1) + + // Check for minimum match length of zero, which requires + // extra loop-breaking code. + if c.minMatchLength(saveStateLoc, len(c.out.compiledPat)-1) == 0 { + c.insertOp(saveStateLoc) + dataLoc := c.allocateStackData(1) + + op := c.buildOp(urxStoInpLoc, dataLoc) + c.out.compiledPat[saveStateLoc+1] = op + jmpOp = c.buildOp(urxJmpSavX, saveStateLoc+2) + } + + // Locate the position in the compiled pattern where the match will continue + // after completing the *. (4 or 5 in the comment above) + continueLoc := len(c.out.compiledPat) + 1 + + // Put together the save state op and store it into the compiled code. + saveStateOp := c.buildOp(urxStateSave, continueLoc) + c.out.compiledPat[saveStateLoc] = saveStateOp + + // Append the URX_JMP_SAV or URX_JMPX operation to the compiled pattern. + c.appendIns(jmpOp) + + case doNGStar: + // Non-greedy *? quantifier + // compiles to + // 1. JMP 3 + // 2. body of stuff being iterated over + // 3. STATE_SAVE 2 + // 4 ... + jmpLoc := c.blockTopLoc(true) // loc 1. + saveLoc := len(c.out.compiledPat) // loc 3. + jmpOp := c.buildOp(urxJmp, saveLoc) + c.out.compiledPat[jmpLoc] = jmpOp + c.appendOp(urxStateSave, jmpLoc+1) + + case doIntervalInit: + // The '{' opening an interval quantifier was just scanned. + // Init the counter varaiables that will accumulate the values as the digits + // are scanned. + c.intervalLow = 0 + c.intervalUpper = -1 + + case doIntevalLowerDigit: + // Scanned a digit from the lower value of an {lower,upper} interval + digitValue := uCharDigitValue(c.c.char) + val := int64(c.intervalLow)*10 + digitValue + if val > math.MaxInt32 { + c.error(NumberTooBig) + } else { + c.intervalLow = int(val) + } + + case doIntervalUpperDigit: + // Scanned a digit from the upper value of an {lower,upper} interval + if c.intervalUpper < 0 { + c.intervalUpper = 0 + } + digitValue := uCharDigitValue(c.c.char) + val := int64(c.intervalUpper)*10 + digitValue + if val > math.MaxInt32 { + c.error(NumberTooBig) + } else { + c.intervalUpper = int(val) + } + + case doIntervalSame: + // Scanned a single value interval like {27}. Upper = Lower. + c.intervalUpper = c.intervalLow + + case doInterval: + // Finished scanning a normal {lower,upper} interval. Generate the code for it. + if !c.compileInlineInterval() { + c.compileInterval(urxCtrInit, utxCtrLoop) + } + + case doPossessiveInterval: + // Finished scanning a Possessive {lower,upper}+ interval. Generate the code for it. + + // Remember the loc for the top of the block being looped over. + // (Can not reserve a slot in the compiled pattern at this time, because + // compileInterval needs to reserve also, and blockTopLoc can only reserve + // once per block.) + topLoc := c.blockTopLoc(false) + + // Produce normal looping code. + c.compileInterval(urxCtrInit, utxCtrLoop) + + // Surround the just-emitted normal looping code with a STO_SP ... LD_SP + // just as if the loop was inclosed in atomic parentheses. + + // First the STO_SP before the start of the loop + c.insertOp(topLoc) + + varLoc := c.allocateData(1) // Reserve a data location for saving the + op := c.buildOp(urxStoSp, varLoc) + c.out.compiledPat[topLoc] = op + + var loopOp instruction + loopOp, c.out.compiledPat = stackPop(c.out.compiledPat) + if loopOp.typ() != utxCtrLoop || loopOp.value() != topLoc { + panic("bad instruction at the end of compiled pattern") + } + + loopOp++ // point LoopOp after the just-inserted STO_SP + c.appendIns(loopOp) + + // Then the LD_SP after the end of the loop + c.appendOp(urxLdSp, varLoc) + + case doNGInterval: + // Finished scanning a non-greedy {lower,upper}? interval. Generate the code for it. + c.compileInterval(urxCtrInitNg, urxCtrLoopNg) + + case doIntervalError: + c.error(BadInterval) + + case doLiteralChar: + // We've just scanned a "normal" character from the pattern, + c.literalChar(c.c.char) + + case doEscapedLiteralChar: + // We've just scanned an backslashed escaped character with no + // special meaning. It represents itself. + if (c.modeFlags&ErrorOnUnknownEscapes) != 0 && ((c.c.char >= 0x41 && c.c.char <= 0x5A) || /* in [A-Z] */ (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z] + c.error(BadEscapeSequence) + } + c.literalChar(c.c.char) + + case doDotAny: + // scanned a ".", match any single character. + c.fixLiterals(false) + if (c.modeFlags & DotAll) != 0 { + c.appendOp(urxDotanyAll, 0) + } else if (c.modeFlags & UnixLines) != 0 { + c.appendOp(urxDotanyUnix, 0) + } else { + c.appendOp(urxDotany, 0) + } + + case doCaret: + c.fixLiterals(false) + if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxCaret, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxCaretM, 0) + } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxCaret, 0) // Only testing true start of input. + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxCaretMUnix, 0) + } + + case doDollar: + c.fixLiterals(false) + if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxDollar, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 { + c.appendOp(urxDollarM, 0) + } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxDollarD, 0) + } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 { + c.appendOp(urxDollarMd, 0) + } + + case doBackslashA: + c.fixLiterals(false) + c.appendOp(urxCaret, 0) + + case doBackslashB: + if !BreakIteration { + if (c.modeFlags & UWord) != 0 { + c.error(Unimplemented) + } + } + c.fixLiterals(false) + if c.modeFlags&UWord != 0 { + c.appendOp(urxBackslashBu, 1) + } else { + c.appendOp(urxBackslashB, 1) + } + + case doBackslashb: + if !BreakIteration { + if (c.modeFlags & UWord) != 0 { + c.error(Unimplemented) + } + } + c.fixLiterals(false) + if c.modeFlags&UWord != 0 { + c.appendOp(urxBackslashBu, 0) + } else { + c.appendOp(urxBackslashB, 0) + } + + case doBackslashD: + c.fixLiterals(false) + c.appendOp(urxBackslashD, 1) + + case doBackslashd: + c.fixLiterals(false) + c.appendOp(urxBackslashD, 0) + + case doBackslashG: + c.fixLiterals(false) + c.appendOp(urxBackslashG, 0) + + case doBackslashH: + c.fixLiterals(false) + c.appendOp(urxBackslashH, 1) + + case doBackslashh: + c.fixLiterals(false) + c.appendOp(urxBackslashH, 0) + + case doBackslashR: + c.fixLiterals(false) + c.appendOp(urxBackslashR, 0) + + case doBackslashS: + c.fixLiterals(false) + c.appendOp(urxStatSetrefN, urxIsspaceSet) + + case doBackslashs: + c.fixLiterals(false) + c.appendOp(urxStaticSetref, urxIsspaceSet) + + case doBackslashV: + c.fixLiterals(false) + c.appendOp(urxBackslashV, 1) + + case doBackslashv: + c.fixLiterals(false) + c.appendOp(urxBackslashV, 0) + + case doBackslashW: + c.fixLiterals(false) + c.appendOp(urxStatSetrefN, urxIswordSet) + + case doBackslashw: + c.fixLiterals(false) + c.appendOp(urxStaticSetref, urxIswordSet) + + case doBackslashX: + if !BreakIteration { + // Grapheme Cluster Boundary requires ICU break iteration. + c.error(Unimplemented) + } + c.fixLiterals(false) + c.appendOp(urxBackslashX, 0) + + case doBackslashZ: + c.fixLiterals(false) + c.appendOp(urxDollar, 0) + + case doBackslashz: + c.fixLiterals(false) + c.appendOp(urxBackslashZ, 0) + + case doEscapeError: + c.error(BadEscapeSequence) + + case doExit: + c.fixLiterals(false) + return false + + case doProperty: + c.fixLiterals(false) + theSet := c.scanProp() + c.compileSet(theSet) + + case doNamedChar: + ch := c.scanNamedChar() + c.literalChar(ch) + + case doBackRef: + // BackReference. Somewhat unusual in that the front-end can not completely parse + // the regular expression, because the number of digits to be consumed + // depends on the number of capture groups that have been defined. So + // we have to do it here instead. + numCaptureGroups := len(c.out.groupMap) + groupNum := int64(0) + ch := c.c.char + + for { + // Loop once per digit, for max allowed number of digits in a back reference. + digit := uCharDigitValue(ch) + groupNum = groupNum*10 + digit + if groupNum >= int64(numCaptureGroups) { + break + } + ch = c.peekCharLL() + if !staticRuleSet[ruleSetDigitChar-128].ContainsRune(ch) { + break + } + c.nextCharLL() + } + + // Scan of the back reference in the source regexp is complete. Now generate + // the compiled code for it. + // Because capture groups can be forward-referenced by back-references, + // we fill the operand with the capture group number. At the end + // of compilation, it will be changed to the variable's location. + if groupNum == 0 { + panic("\\0 begins an octal escape sequence, and shouldn't enter this code path at all") + } + c.fixLiterals(false) + if (c.modeFlags & CaseInsensitive) != 0 { + c.appendOp(urxBackrefI, int(groupNum)) + } else { + c.appendOp(urxBackref, int(groupNum)) + } + + case doBeginNamedBackRef: + if c.captureName != nil { + panic("should not replace capture name") + } + c.captureName = &strings.Builder{} + + case doContinueNamedBackRef: + c.captureName.WriteRune(c.c.char) + + case doCompleteNamedBackRef: + { + groupNumber := c.out.namedCaptureMap[c.captureName.String()] + if groupNumber == 0 { + // Group name has not been defined. + // Could be a forward reference. If we choose to support them at some + // future time, extra mechanism will be required at this point. + c.error(InvalidCaptureGroupName) + } else { + // Given the number, handle identically to a \n numbered back reference. + // See comments above, under doBackRef + c.fixLiterals(false) + if (c.modeFlags & CaseInsensitive) != 0 { + c.appendOp(urxBackrefI, groupNumber) + } else { + c.appendOp(urxBackref, groupNumber) + } + } + c.captureName = nil + } + + case doPossessivePlus: + // Possessive ++ quantifier. + // Compiles to + // 1. STO_SP + // 2. body of stuff being iterated over + // 3. STATE_SAVE 5 + // 4. JMP 2 + // 5. LD_SP + // 6. ... + // + // Note: TODO: This is pretty inefficient. A mass of saved state is built up + // then unconditionally discarded. Perhaps introduce a new opcode. Ticket 6056 + // + // Emit the STO_SP + topLoc := c.blockTopLoc(true) + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the STATE_SAVE + c.appendOp(urxStateSave, len(c.out.compiledPat)+2) + + // Emit the JMP + c.appendOp(urxJmp, topLoc+1) + + // Emit the LD_SP + c.appendOp(urxLdSp, stoLoc) + + case doPossessiveStar: + // Possessive *+ quantifier. + // Compiles to + // 1. STO_SP loc + // 2. STATE_SAVE 5 + // 3. body of stuff being iterated over + // 4. JMP 2 + // 5. LD_SP loc + // 6 ... + // TODO: do something to cut back the state stack each time through the loop. + // Reserve two slots at the top of the block. + topLoc := c.blockTopLoc(true) + c.insertOp(topLoc) + + // emit STO_SP loc + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the SAVE_STATE 5 + L7 := len(c.out.compiledPat) + 1 + op = c.buildOp(urxStateSave, L7) + c.out.compiledPat[topLoc+1] = op + + // Append the JMP operation. + c.appendOp(urxJmp, topLoc+1) + + // Emit the LD_SP loc + c.appendOp(urxLdSp, stoLoc) + + case doPossessiveOpt: + // Possessive ?+ quantifier. + // Compiles to + // 1. STO_SP loc + // 2. SAVE_STATE 5 + // 3. body of optional block + // 4. LD_SP loc + // 5. ... + // + // Reserve two slots at the top of the block. + topLoc := c.blockTopLoc(true) + c.insertOp(topLoc) + + // Emit the STO_SP + stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr. + op := c.buildOp(urxStoSp, stoLoc) + c.out.compiledPat[topLoc] = op + + // Emit the SAVE_STATE + continueLoc := len(c.out.compiledPat) + 1 + op = c.buildOp(urxStateSave, continueLoc) + c.out.compiledPat[topLoc+1] = op + + // Emit the LD_SP + c.appendOp(urxLdSp, stoLoc) + + case doBeginMatchMode: + c.newModeFlags = c.modeFlags + c.setModeFlag = true + case doMatchMode: // (?i) and similar + var bit RegexpFlag + switch c.c.char { + case 0x69: /* 'i' */ + bit = CaseInsensitive + case 0x64: /* 'd' */ + bit = UnixLines + case 0x6d: /* 'm' */ + bit = Multiline + case 0x73: /* 's' */ + bit = DotAll + case 0x75: /* 'u' */ + bit = 0 /* Unicode casing */ + case 0x77: /* 'w' */ + bit = UWord + case 0x78: /* 'x' */ + bit = Comments + case 0x2d: /* '-' */ + c.setModeFlag = false + default: + // Should never happen. Other chars are filtered out by the scanner. + panic("unreachable") + } + if c.setModeFlag { + c.newModeFlags |= bit + } else { + c.newModeFlags &= ^bit + } + + case doSetMatchMode: + // Emit code to match any pending literals, using the not-yet changed match mode. + c.fixLiterals(false) + + // We've got a (?i) or similar. The match mode is being changed, but + // the change is not scoped to a parenthesized block. + if c.newModeFlags >= 0 { + panic("cNewModeFlags not properly initialized") + } + c.modeFlags = c.newModeFlags + + case doMatchModeParen: + // We've got a (?i: or similar. Begin a parenthesized block, save old + // mode flags so they can be restored at the close of the block. + // + // Compile to a + // - NOP, which later may be replaced by a save-state if the + // parenthesized group gets a * quantifier, followed by + // - NOP, which may later be replaced by a save-state if there + // is an '|' alternation within the parens. + c.fixLiterals(false) + c.appendOp(urxNop, 0) + c.appendOp(urxNop, 0) + + // On the Parentheses stack, start a new frame and add the postions + // of the two NOPs (a normal non-capturing () frame, except for the + // saving of the orignal mode flags.) + c.parenStack = append(c.parenStack, int(c.modeFlags)) + c.parenStack = append(c.parenStack, parenFlags) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2) + c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1) + + // Set the current mode flags to the new values. + if c.newModeFlags >= 0 { + panic("cNewModeFlags not properly initialized") + } + c.modeFlags = c.newModeFlags + + case doBadModeFlag: + c.error(InvalidFlag) + + case doSuppressComments: + // We have just scanned a '(?'. We now need to prevent the character scanner from + // treating a '#' as a to-the-end-of-line comment. + // (This Perl compatibility just gets uglier and uglier to do...) + c.eolComments = false + + case doSetAddAmp: + set := c.setStack[len(c.setStack)-1] + set.AddRune(chAmp) + + case doSetAddDash: + set := c.setStack[len(c.setStack)-1] + set.AddRune(chDash) + + case doSetBackslashs: + set := c.setStack[len(c.setStack)-1] + set.AddAll(staticPropertySets[urxIsspaceSet]) + + case doSetBackslashS: + sset := uset.New() + sset.AddAll(staticPropertySets[urxIsspaceSet]) // TODO: add latin1 spaces + sset.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(sset) + + case doSetBackslashd: + set := c.setStack[len(c.setStack)-1] + c.err = uprops.AddCategory(set, uchar.GcNdMask) + + case doSetBackslashD: + digits := uset.New() + c.err = uprops.ApplyIntPropertyValue(digits, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask)) + digits.Complement() + set := c.setStack[len(c.setStack)-1] + set.AddAll(digits) + + case doSetBackslashh: + h := uset.New() + c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + h.AddRune(9) // Tab + + set := c.setStack[len(c.setStack)-1] + set.AddAll(h) + + case doSetBackslashH: + h := uset.New() + c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + h.AddRune(9) // Tab + h.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(h) + + case doSetBackslashv: + set := c.setStack[len(c.setStack)-1] + set.AddRuneRange(0x0a, 0x0d) // add range + set.AddRune(0x85) + set.AddRuneRange(0x2028, 0x2029) + + case doSetBackslashV: + v := uset.New() + v.AddRuneRange(0x0a, 0x0d) // add range + v.AddRune(0x85) + v.AddRuneRange(0x2028, 0x2029) + v.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(v) + + case doSetBackslashw: + set := c.setStack[len(c.setStack)-1] + set.AddAll(staticPropertySets[urxIswordSet]) + + case doSetBackslashW: + sset := uset.New() + sset.AddAll(staticPropertySets[urxIswordSet]) + sset.Complement() + + set := c.setStack[len(c.setStack)-1] + set.AddAll(sset) + + case doSetBegin: + c.fixLiterals(false) + c.setStack = append(c.setStack, uset.New()) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginDifference1: + // We have scanned something like [[abc]-[ + // Set up a new UnicodeSet for the set beginning with the just-scanned '[' + // Push a Difference operator, which will cause the new set to be subtracted from what + // went before once it is created. + c.setPushOp(setDifference1) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginIntersection1: + // We have scanned something like [[abc]&[ + // Need both the '&' operator and the open '[' operator. + c.setPushOp(setIntersection1) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetBeginUnion: + // We have scanned something like [[abc][ + // Need to handle the union operation explicitly [[abc] | [ + c.setPushOp(setUnion) + c.setOpStack = append(c.setOpStack, setStart) + if (c.modeFlags & CaseInsensitive) != 0 { + c.setOpStack = append(c.setOpStack, setCaseClose) + } + + case doSetDifference2: + // We have scanned something like [abc-- + // Consider this to unambiguously be a set difference operator. + c.setPushOp(setDifference2) + + case doSetEnd: + // Have encountered the ']' that closes a set. + // Force the evaluation of any pending operations within this set, + // leave the completed set on the top of the set stack. + c.setEval(setEnd) + var start setOperation + start, c.setOpStack = stackPop(c.setOpStack) + if start != setStart { + panic("bad set operation in stack") + } + + case doSetFinish: + // Finished a complete set expression, including all nested sets. + // The close bracket has already triggered clearing out pending set operators, + // the operator stack should be empty and the operand stack should have just + // one entry, the result set. + if len(c.setOpStack) > 0 { + panic("expected setOpStack to be empty") + } + var set *uset.UnicodeSet + set, c.setStack = stackPop(c.setStack) + c.compileSet(set) + + case doSetIntersection2: + // Have scanned something like [abc&& + c.setPushOp(setIntersection2) + + case doSetLiteral: + // Union the just-scanned literal character into the set being built. + // This operation is the highest precedence set operation, so we can always do + // it immediately, without waiting to see what follows. It is necessary to perform + // any pending '-' or '&' operation first, because these have the same precedence + // as union-ing in a literal' + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(c.c.char) + c.lastSetLiteral = c.c.char + + case doSetLiteralEscaped: + // A back-slash escaped literal character was encountered. + // Processing is the same as with setLiteral, above, with the addition of + // the optional check for errors on escaped ASCII letters. + if (c.modeFlags&ErrorOnUnknownEscapes) != 0 && + ((c.c.char >= 0x41 && c.c.char <= 0x5A) || // in [A-Z] + (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z] + c.error(BadEscapeSequence) + } + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(c.c.char) + c.lastSetLiteral = c.c.char + + case doSetNamedChar: + // Scanning a \N{UNICODE CHARACTER NAME} + // Aside from the source of the character, the processing is identical to doSetLiteral, + // above. + ch := c.scanNamedChar() + c.setEval(setUnion) + set := c.setStack[len(c.setStack)-1] + set.AddRune(ch) + c.lastSetLiteral = ch + + case doSetNamedRange: + // We have scanned literal-\N{CHAR NAME}. Add the range to the set. + // The left character is already in the set, and is saved in fLastSetLiteral. + // The right side needs to be picked up, the scan is at the 'N'. + // Lower Limit > Upper limit being an error matches both Java + // and ICU UnicodeSet behavior. + ch := c.scanNamedChar() + if c.err == nil && (c.lastSetLiteral == -1 || c.lastSetLiteral > ch) { + c.error(InvalidRange) + } + set := c.setStack[len(c.setStack)-1] + set.AddRuneRange(c.lastSetLiteral, ch) + c.lastSetLiteral = ch + + case doSetNegate: + // Scanned a '^' at the start of a set. + // Push the negation operator onto the set op stack. + // A twist for case-insensitive matching: + // the case closure operation must happen _before_ negation. + // But the case closure operation will already be on the stack if it's required. + // This requires checking for case closure, and swapping the stack order + // if it is present. + tosOp := c.setOpStack[len(c.setOpStack)-1] + if tosOp == setCaseClose { + _, c.setOpStack = stackPop(c.setOpStack) + c.setOpStack = append(c.setOpStack, setNegation) + c.setOpStack = append(c.setOpStack, setCaseClose) + } else { + c.setOpStack = append(c.setOpStack, setNegation) + } + + case doSetNoCloseError: + c.error(MissingCloseBracket) + + case doSetOpError: + c.error(RuleSyntax) // -- or && at the end of a set. Illegal. + + case doSetPosixProp: + if set := c.scanPosixProp(); set != nil { + c.setStack[len(c.setStack)-1].AddAll(set) + } + + case doSetProp: + // Scanned a \p \P within [brackets]. + if set := c.scanProp(); set != nil { + c.setStack[len(c.setStack)-1].AddAll(set) + } + + case doSetRange: + // We have scanned literal-literal. Add the range to the set. + // The left character is already in the set, and is saved in fLastSetLiteral. + // The right side is the current character. + // Lower Limit > Upper limit being an error matches both Java + // and ICU UnicodeSet behavior. + + if c.lastSetLiteral == -1 || c.lastSetLiteral > c.c.char { + c.error(InvalidRange) + } + c.setStack[len(c.setStack)-1].AddRuneRange(c.lastSetLiteral, c.c.char) + + default: + panic("unexpected OP in parser") + } + + return c.err == nil +} + +func uCharDigitValue(char rune) int64 { + if char >= '0' && char <= '9' { + return int64(char - '0') + } + return -1 +} + +func stackPop[T any](stack []T) (T, []T) { + var out T + if len(stack) > 0 { + out = stack[len(stack)-1] + stack = stack[:len(stack)-1] + } + return out, stack +} + +func (c *compiler) error(e CompileErrorCode) { + c.err = &CompileError{ + Code: e, + Line: c.lineNum, + Offset: c.charNum, + Context: c.out.pattern, + } +} + +func (c *compiler) stripNOPs() { + if c.err != nil { + return + } + + end := len(c.out.compiledPat) + deltas := make([]int, 0, end) + + // Make a first pass over the code, computing the amount that things + // will be offset at each location in the original code. + var loc, d int + for loc = 0; loc < end; loc++ { + deltas = append(deltas, d) + op := c.out.compiledPat[loc] + if op.typ() == urxNop { + d++ + } + } + + // Make a second pass over the code, removing the NOPs by moving following + // code up, and patching operands that refer to code locations that + // are being moved. The array of offsets from the first step is used + // to compute the new operand values. + var src, dst int + for src = 0; src < end; src++ { + op := c.out.compiledPat[src] + opType := op.typ() + + switch opType { + case urxNop: + // skip + + case urxStateSave, + urxJmp, + utxCtrLoop, + urxCtrLoopNg, + urxRelocOprnd, + urxJmpx, + urxJmpSav, + urxJmpSavX: + // These are instructions with operands that refer to code locations. + operandAddress := op.value() + fixedOperandAddress := operandAddress - deltas[operandAddress] + op = c.buildOp(opType, fixedOperandAddress) + c.out.compiledPat[dst] = op + dst++ + + case urxBackref, urxBackrefI: + where := op.value() + if where > len(c.out.groupMap) { + c.error(InvalidBackRef) + break + } + + where = int(c.out.groupMap[where-1]) + op = c.buildOp(opType, where) + c.out.compiledPat[dst] = op + dst++ + c.out.needsAltInput = true + + case urxReservedOp, + urxReservedOpN, + urxBacktrack, + urxEnd, + urxOnechar, + urxString, + urxStringLen, + urxStartCapture, + urxEndCapture, + urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxDotany, + urxFail, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashX, + urxBackslashZ, + urxDotanyAll, + urxBackslashD, + urxCaret, + urxDollar, + urxCtrInit, + urxCtrInitNg, + urxDotanyUnix, + urxStoSp, + urxLdSp, + urxStoInpLoc, + urxLaStart, + urxLaEnd, + urcOnecharI, + urxStringI, + urxDollarM, + urxCaretM, + urxCaretMUnix, + urxLbStart, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd, + urxLoopSrI, + urxLoopDotI, + urxLoopC, + urxDollarD, + urxDollarMd, + urxBackslashH, + urxBackslashR, + urxBackslashV: + // These instructions are unaltered by the relocation. + c.out.compiledPat[dst] = op + dst++ + + default: + // Some op is unaccounted for. + panic("unreachable") + } + } + + c.out.compiledPat = c.out.compiledPat[:dst] +} + +func (c *compiler) matchStartType() { + var loc int // Location in the pattern of the current op being processed. + var currentLen int32 // Minimum length of a match to this point (loc) in the pattern + var numInitialStrings int // Number of strings encountered that could match at start. + var atStart = true // True if no part of the pattern yet encountered + // could have advanced the position in a match. + // (Maximum match length so far == 0) + + // forwardedLength is a vector holding minimum-match-length values that + // are propagated forward in the pattern by JMP or STATE_SAVE operations. + // It must be one longer than the pattern being checked because some ops + // will jmp to a end-of-block+1 location from within a block, and we must + // count those when checking the block. + end := len(c.out.compiledPat) + forwardedLength := make([]int32, end+1) + + for loc = 3; loc < end; loc++ { + forwardedLength[loc] = math.MaxInt32 + } + + for loc = 3; loc < end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a shorter minimum length than the current accumulated value, + // replace the current accumulated value. + if forwardedLength[loc] < currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxFail, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp: + // skip + + case urxCaret: + if atStart { + c.out.startType = startStart + } + + case urxCaretM, urxCaretMUnix: + if atStart { + c.out.startType = startLine + } + + case urxOnechar: + if currentLen == 0 { + // This character could appear at the start of a match. + // Add it to the set of possible starting characters. + c.out.initialChars.AddRune(op.value32()) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxSetref: + if currentLen == 0 { + sn := op.value() + set := c.out.sets[sn] + c.out.initialChars.AddAll(set) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxLoopSrI: + // [Set]*, like a SETREF, above, in what it can match, + // but may not match at all, so currentLen is not incremented. + if currentLen == 0 { + sn := op.value() + set := c.out.sets[sn] + c.out.initialChars.AddAll(set) + numInitialStrings += 2 + } + atStart = false + + case urxLoopDotI: + if currentLen == 0 { + // .* at the start of a pattern. + // Any character can begin the match. + c.out.initialChars.Clear() + c.out.initialChars.Complement() + numInitialStrings += 2 + } + atStart = false + + case urxStaticSetref: + if currentLen == 0 { + sn := op.value() + c.out.initialChars.AddAll(staticPropertySets[sn]) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxStatSetrefN: + if currentLen == 0 { + sn := op.value() + sc := uset.New() + sc.AddAll(staticPropertySets[sn]) + sc.Complement() + + c.out.initialChars.AddAll(sc) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashD: + // Digit Char + if currentLen == 0 { + s := uset.New() + c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask)) + if op.value() != 0 { + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashH: + // Horiz white space + if currentLen == 0 { + s := uset.New() + c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask)) + s.AddRune(9) // Tab + if op.value() != 0 { + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashR, // Any line ending sequence + urxBackslashV: // Any line ending code point, with optional negation + if currentLen == 0 { + s := uset.New() + s.AddRuneRange(0x0a, 0x0d) // add range + s.AddRune(0x85) + s.AddRuneRange(0x2028, 0x2029) + if op.value() != 0 { + // Complement option applies to URX_BACKSLASH_V only. + s.Complement() + } + c.out.initialChars.AddAll(s) + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urcOnecharI: + // Case Insensitive Single Character. + if currentLen == 0 { + ch := op.value32() + if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) { + starters := uset.New() + starters.AddRuneRange(ch, ch) + starters.CloseOver(uset.CaseInsensitive) + // findCaseInsensitiveStarters(c, &starters); + // For ONECHAR_I, no need to worry about text chars that expand on folding into + // strings. The expanded folding can't match the pattern. + c.out.initialChars.AddAll(starters) + } else { + // Char has no case variants. Just add it as-is to the + // set of possible starting chars. + c.out.initialChars.AddRune(ch) + } + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded. + urxDotanyAll, // . matches one or two. + urxDotany, + urxDotanyUnix: + if currentLen == 0 { + // These constructs are all bad news when they appear at the start + // of a match. Any character can begin the match. + c.out.initialChars.Clear() + c.out.initialChars.Complement() + numInitialStrings += 2 + } + currentLen = safeIncrement(currentLen, 1) + atStart = false + + case urxJmpx: + loc++ // Except for extra operand on URX_JMPX, same as URX_JMP. + fallthrough + + case urxJmp: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Can safely ignore, the worst that will happen + // is that we understate the true minimum length + currentLen = forwardedLength[loc+1] + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] > currentLen { + forwardedLength[jmpDest] = currentLen + } + } + atStart = false + + case urxJmpSav, + urxJmpSavX: + // Combo of state save to the next loc, + jmp backwards. + // Net effect on min. length computation is nothing. + atStart = false + + case urxBacktrack: + // Fails are kind of like a branch, except that the min length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + atStart = false + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = (currentLen) + } + } + atStart = false + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + stringLen := stringLenOp.value() + if currentLen == 0 { + // Add the starting character of this string to the set of possible starting + // characters for this pattern. + stringStartIdx := op.value() + ch := c.out.literalText[stringStartIdx] + c.out.initialChars.AddRune(ch) + + // Remember this string. After the entire pattern has been checked, + // if nothing else is identified that can start a match, we'll use it. + numInitialStrings++ + c.out.initialStringIdx = stringStartIdx + c.out.initialStringLen = stringLen + } + + currentLen = safeIncrement(currentLen, stringLen) + atStart = false + + case urxStringI: + // Case-insensitive string. Unlike exact-match strings, we won't + // attempt a string search for possible match positions. But we + // do update the set of possible starting characters. + loc++ + stringLenOp := c.out.compiledPat[loc] + stringLen := stringLenOp.value() + if currentLen == 0 { + // Add the starting character of this string to the set of possible starting + // characters for this pattern. + stringStartIdx := op.value() + ch := c.out.literalText[stringStartIdx] + s := uset.New() + c.findCaseInsensitiveStarters(ch, s) + c.out.initialChars.AddAll(s) + numInitialStrings += 2 // Matching on an initial string not possible. + } + currentLen = safeIncrement(currentLen, stringLen) + atStart = false + + case urxCtrInit, + urxCtrInitNg: + // Loop Init Ops. These don't change the min length, but they are 4 word ops + // so location must be updated accordingly. + // Loop Init Ops. + // If the min loop count == 0 + // move loc forwards to the end of the loop, skipping over the body. + // If the min count is > 0, + // continue normal processing of the body of the loop. + loopEndLoc := c.out.compiledPat[loc+1].value() + minLoopCount := int(c.out.compiledPat[loc+2]) + if minLoopCount == 0 { + // Min Loop Count of 0, treat like a forward branch and + // move the current minimum length up to the target + // (end of loop) location. + if forwardedLength[loopEndLoc] > currentLen { + forwardedLength[loopEndLoc] = currentLen + } + } + loc += 3 // Skips over operands of CTR_INIT + atStart = false + + case utxCtrLoop, + urxCtrLoopNg: + // Loop ops. + // The jump is conditional, backwards only. + atStart = false + + case urxLoopC: + // More loop ops. These state-save to themselves. + // don't change the minimum match + atStart = false + + case urxLaStart, + urxLbStart: + // Look-around. Scan forward until the matching look-ahead end, + // without processing the look-around block. This is overly pessimistic. + + // Keep track of the nesting depth of look-around blocks. Boilerplate code for + // lookahead contains two LA_END instructions, so count goes up by two + // for each LA_START. + var depth int + if opType == urxLaStart { + depth = 2 + } else { + depth = 1 + } + for { + loc++ + op = c.out.compiledPat[loc] + if op.typ() == urxLaStart { + depth += 2 + } + if op.typ() == urxLbStart { + depth++ + } + if op.typ() == urxLaEnd || op.typ() == urxLbnEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxStateSave { + // Need this because neg lookahead blocks will FAIL to outside + // of the block. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = (currentLen) + } + } + } + } + + case urxLaEnd, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd: + panic("should be consumed in URX_LA_START") + + default: + panic("unreachable") + } + } + + // Sort out what we should check for when looking for candidate match start positions. + // In order of preference, + // 1. Start of input text buffer. + // 2. A literal string. + // 3. Start of line in multi-line mode. + // 4. A single literal character. + // 5. A character from a set of characters. + // + if c.out.startType == startStart { + // Match only at the start of an input text string. + // start type is already set. We're done. + } else if numInitialStrings == 1 && c.out.minMatchLen > 0 { + // Match beginning only with a literal string. + ch := c.out.literalText[c.out.initialStringIdx] + c.out.startType = startString + c.out.initialChar = ch + } else if c.out.startType == startLine { + // Match at start of line in Multi-Line mode. + // Nothing to do here; everything is already set. + } else if c.out.minMatchLen == 0 { + // Zero length match possible. We could start anywhere. + c.out.startType = startNoInfo + } else if c.out.initialChars.Len() == 1 { + // All matches begin with the same char. + c.out.startType = startChar + c.out.initialChar = c.out.initialChars.RuneAt(0) + } else if !c.out.initialChars.ContainsRuneRange(0, 0x10ffff) && c.out.minMatchLen > 0 { + // Matches start with a set of character smaller than the set of all chars. + c.out.startType = startSet + } else { + // Matches can start with anything + c.out.startType = startNoInfo + } +} + +func (c *compiler) appendOp(typ opcode, arg int) { + c.appendIns(c.buildOp(typ, arg)) +} + +func (c *compiler) appendIns(ins instruction) { + if c.err != nil { + return + } + c.out.compiledPat = append(c.out.compiledPat, ins) +} + +func (c *compiler) buildOp(typ opcode, val int) instruction { + if c.err != nil { + return 0 + } + if val > 0x00ffffff { + panic("bad argument to buildOp") + } + if val < 0 { + if !(typ == urxReservedOpN || typ == urxReservedOp) { + panic("bad value to buildOp") + } + typ = urxReservedOpN + } + return instruction(int32(typ)<<24 | int32(val)) +} + +func (c *compiler) handleCloseParen() { + if len(c.parenStack) == 0 { + c.error(MismatchedParen) + return + } + + c.fixLiterals(false) + + var patIdx int + var patOp instruction + + for { + patIdx, c.parenStack = stackPop(c.parenStack) + if patIdx < 0 { + break + } + + patOp = c.out.compiledPat[patIdx] + if patOp.value() != 0 { + panic("branch target for JMP should not be set") + } + patOp |= instruction(len(c.out.compiledPat)) + c.out.compiledPat[patIdx] = patOp + c.matchOpenParen = patIdx + } + + var modeFlags int + modeFlags, c.parenStack = stackPop(c.parenStack) + if modeFlags >= 0 { + panic("modeFlags in paren stack was not negated") + } + + c.modeFlags = RegexpFlag(modeFlags) + + switch patIdx { + case parenPlain, parenFlags: + // No additional fixups required. + // (Grouping-only parentheses) + case parenCapturing: + // Capturing Parentheses. + // Insert a End Capture op into the pattern. + // The frame offset of the variables for this cg is obtained from the + // start capture op and put it into the end-capture op. + + captureOp := c.out.compiledPat[c.matchOpenParen+1] + if captureOp.typ() != urxStartCapture { + panic("bad type in capture op (expected URX_START_CAPTURE)") + } + frameVarLocation := captureOp.value() + c.appendOp(urxEndCapture, frameVarLocation) + + case parenAtomic: + // Atomic Parenthesis. + // Insert a LD_SP operation to restore the state stack to the position + // it was when the atomic parens were entered. + stoOp := c.out.compiledPat[c.matchOpenParen+1] + if stoOp.typ() != urxStoSp { + panic("bad type in capture op (expected URX_STO_SP)") + } + stoLoc := stoOp.value() + c.appendOp(urxLdSp, stoLoc) + + case parenLookahead: + startOp := c.out.compiledPat[c.matchOpenParen-5] + if startOp.typ() != urxLaStart { + panic("bad type in capture op (expected URX_LA_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLaEnd, dataLoc) + + case parenNegLookahead: + startOp := c.out.compiledPat[c.matchOpenParen-1] + if startOp.typ() != urxLaStart { + panic("bad type in capture op (expected URX_LA_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLaEnd, dataLoc) + c.appendOp(urxBacktrack, 0) + c.appendOp(urxLaEnd, dataLoc) + + // Patch the URX_SAVE near the top of the block. + // The destination of the SAVE is the final LA_END that was just added. + saveOp := c.out.compiledPat[c.matchOpenParen] + if saveOp.typ() != urxStateSave { + panic("bad type in capture op (expected URX_STATE_SAVE)") + } + saveOp = c.buildOp(urxStateSave, len(c.out.compiledPat)-1) + c.out.compiledPat[c.matchOpenParen] = saveOp + + case parenLookBehind: + startOp := c.out.compiledPat[c.matchOpenParen-4] + if startOp.typ() != urxLbStart { + panic("bad type in capture op (expected URX_LB_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLbEnd, dataLoc) + c.appendOp(urxLaEnd, dataLoc) + + // Determine the min and max bounds for the length of the + // string that the pattern can match. + // An unbounded upper limit is an error. + patEnd := len(c.out.compiledPat) - 1 + minML := c.minMatchLength(c.matchOpenParen, patEnd) + maxML := c.maxMatchLength(c.matchOpenParen, patEnd) + + if maxML == math.MaxInt32 { + c.error(LookBehindLimit) + break + } + if minML == math.MaxInt32 { + // This condition happens when no match is possible, such as with a + // [set] expression containing no elements. + // In principle, the generated code to evaluate the expression could be deleted, + // but it's probably not worth the complication. + minML = 0 + } + + c.out.compiledPat[c.matchOpenParen-2] = instruction(minML) + c.out.compiledPat[c.matchOpenParen-1] = instruction(maxML) + + case parenLookBehindN: + startOp := c.out.compiledPat[c.matchOpenParen-5] + if startOp.typ() != urxLbStart { + panic("bad type in capture op (expected URX_LB_START)") + } + dataLoc := startOp.value() + c.appendOp(urxLbnEnd, dataLoc) + + // Determine the min and max bounds for the length of the + // string that the pattern can match. + // An unbounded upper limit is an error. + patEnd := len(c.out.compiledPat) - 1 + minML := c.minMatchLength(c.matchOpenParen, patEnd) + maxML := c.maxMatchLength(c.matchOpenParen, patEnd) + + if instruction(maxML).typ() != 0 { + c.error(LookBehindLimit) + break + } + if maxML == math.MaxInt32 { + c.error(LookBehindLimit) + break + } + if minML == math.MaxInt32 { + // This condition happens when no match is possible, such as with a + // [set] expression containing no elements. + // In principle, the generated code to evaluate the expression could be deleted, + // but it's probably not worth the complication. + minML = 0 + } + + c.out.compiledPat[c.matchOpenParen-3] = instruction(minML) + c.out.compiledPat[c.matchOpenParen-2] = instruction(maxML) + + op := c.buildOp(urxRelocOprnd, len(c.out.compiledPat)) + c.out.compiledPat[c.matchOpenParen-1] = op + + default: + panic("unexpected opcode in parenStack") + } + + c.matchCloseParen = len(c.out.compiledPat) +} + +func (c *compiler) fixLiterals(split bool) { + if len(c.literalChars) == 0 { + return + } + + lastCodePoint := c.literalChars[len(c.literalChars)-1] + + // Split: We need to ensure that the last item in the compiled pattern + // refers only to the last literal scanned in the pattern, so that + // quantifiers (*, +, etc.) affect only it, and not a longer string. + // Split before case folding for case insensitive matches. + if split { + c.literalChars = c.literalChars[:len(c.literalChars)-1] + c.fixLiterals(false) + + c.literalChar(lastCodePoint) + c.fixLiterals(false) + return + } + + if c.modeFlags&CaseInsensitive != 0 { + c.literalChars = ucase.FoldRunes(c.literalChars) + lastCodePoint = c.literalChars[len(c.literalChars)-1] + } + + if len(c.literalChars) == 1 { + if c.modeFlags&CaseInsensitive != 0 && uprops.HasBinaryProperty(lastCodePoint, uprops.UCharCaseSensitive) { + c.appendOp(urcOnecharI, int(lastCodePoint)) + } else { + c.appendOp(urxOnechar, int(lastCodePoint)) + } + } else { + if len(c.literalChars) > 0x00ffffff || len(c.out.literalText) > 0x00ffffff { + c.error(PatternTooBig) + } + if c.modeFlags&CaseInsensitive != 0 { + c.appendOp(urxStringI, len(c.out.literalText)) + } else { + c.appendOp(urxString, len(c.out.literalText)) + } + c.appendOp(urxStringLen, len(c.literalChars)) + c.out.literalText = append(c.out.literalText, c.literalChars...) + } + + c.literalChars = c.literalChars[:0] +} + +func (c *compiler) literalChar(point rune) { + c.literalChars = append(c.literalChars, point) +} + +func (c *compiler) allocateData(size int) int { + if c.err != nil { + return 0 + } + if size <= 0 || size > 0x100 || c.out.dataSize < 0 { + c.error(InternalError) + return 0 + } + + dataIndex := c.out.dataSize + c.out.dataSize += size + if c.out.dataSize >= 0x00fffff0 { + c.error(InternalError) + } + return dataIndex +} + +func (c *compiler) allocateStackData(size int) int { + if c.err != nil { + return 0 + } + if size <= 0 || size > 0x100 || c.out.frameSize < 0 { + c.error(InternalError) + return 0 + } + dataIndex := c.out.frameSize + c.out.frameSize += size + if c.out.frameSize >= 0x00fffff0 { + c.error(InternalError) + } + return dataIndex +} + +func (c *compiler) insertOp(where int) { + if where < 0 || where >= len(c.out.compiledPat) { + panic("insertOp: out of bounds") + } + + nop := c.buildOp(urxNop, 0) + c.out.compiledPat = slices.Insert(c.out.compiledPat, where, nop) + + // Walk through the pattern, looking for any ops with targets that + // were moved down by the insert. Fix them. + for loc, op := range c.out.compiledPat { + switch op.typ() { + case urxJmp, urxJmpx, urxStateSave, utxCtrLoop, urxCtrLoopNg, urxJmpSav, urxJmpSavX, urxRelocOprnd: + if op.value() > where { + op = c.buildOp(op.typ(), op.value()+1) + c.out.compiledPat[loc] = op + } + } + } + + // Now fix up the parentheses stack. All positive values in it are locations in + // the compiled pattern. (Negative values are frame boundaries, and don't need fixing.) + for loc, x := range c.parenStack { + if x > where { + c.parenStack[loc] = x + 1 + } + } + + if c.matchCloseParen > where { + c.matchCloseParen++ + } + if c.matchOpenParen > where { + c.matchOpenParen++ + } +} + +func (c *compiler) blockTopLoc(reserve bool) int { + var loc int + c.fixLiterals(true) + + if len(c.out.compiledPat) == c.matchCloseParen { + // The item just processed is a parenthesized block. + loc = c.matchOpenParen + } else { + // Item just compiled is a single thing, a ".", or a single char, a string or a set reference. + // No slot for STATE_SAVE was pre-reserved in the compiled code. + // We need to make space now. + loc = len(c.out.compiledPat) - 1 + op := c.out.compiledPat[loc] + if op.typ() == urxStringLen { + // Strings take two opcode, we want the position of the first one. + // We can have a string at this point if a single character case-folded to two. + loc-- + } + if reserve { + nop := c.buildOp(urxNop, 0) + c.out.compiledPat = slices.Insert(c.out.compiledPat, loc, nop) + } + } + return loc +} + +func (c *compiler) compileInlineInterval() bool { + if c.intervalUpper > 10 || c.intervalUpper < c.intervalLow { + return false + } + + topOfBlock := c.blockTopLoc(false) + if c.intervalUpper == 0 { + // Pathological case. Attempt no matches, as if the block doesn't exist. + // Discard the generated code for the block. + // If the block included parens, discard the info pertaining to them as well. + c.out.compiledPat = c.out.compiledPat[:topOfBlock] + if c.matchOpenParen >= topOfBlock { + c.matchOpenParen = -1 + } + if c.matchCloseParen >= topOfBlock { + c.matchCloseParen = -1 + } + return true + } + + if topOfBlock != len(c.out.compiledPat)-1 && c.intervalUpper != 1 { + // The thing being repeated is not a single op, but some + // more complex block. Do it as a loop, not inlines. + // Note that things "repeated" a max of once are handled as inline, because + // the one copy of the code already generated is just fine. + return false + } + + // Pick up the opcode that is to be repeated + // + op := c.out.compiledPat[topOfBlock] + + // Compute the pattern location where the inline sequence + // will end, and set up the state save op that will be needed. + // + endOfSequenceLoc := len(c.out.compiledPat) - 1 + c.intervalUpper + (c.intervalUpper - c.intervalLow) + + saveOp := c.buildOp(urxStateSave, endOfSequenceLoc) + if c.intervalLow == 0 { + c.insertOp(topOfBlock) + c.out.compiledPat[topOfBlock] = saveOp + } + + // Loop, emitting the op for the thing being repeated each time. + // Loop starts at 1 because one instance of the op already exists in the pattern, + // it was put there when it was originally encountered. + for i := 1; i < c.intervalUpper; i++ { + if i >= c.intervalLow { + c.appendIns(saveOp) + } + c.appendIns(op) + } + return true +} + +func (c *compiler) compileInterval(init opcode, loop opcode) { + // The CTR_INIT op at the top of the block with the {n,m} quantifier takes + // four slots in the compiled code. Reserve them. + topOfBlock := c.blockTopLoc(true) + c.insertOp(topOfBlock) + c.insertOp(topOfBlock) + c.insertOp(topOfBlock) + + // The operands for the CTR_INIT opcode include the index in the matcher data + // of the counter. Allocate it now. There are two data items + // counterLoc --> Loop counter + // +1 --> Input index (for breaking non-progressing loops) + // (Only present if unbounded upper limit on loop) + var dataSize int + if c.intervalUpper < 0 { + dataSize = 2 + } else { + dataSize = 1 + } + counterLoc := c.allocateStackData(dataSize) + + op := c.buildOp(init, counterLoc) + c.out.compiledPat[topOfBlock] = op + + // The second operand of CTR_INIT is the location following the end of the loop. + // Must put in as a URX_RELOC_OPRND so that the value will be adjusted if the + // compilation of something later on causes the code to grow and the target + // position to move. + loopEnd := len(c.out.compiledPat) + op = c.buildOp(urxRelocOprnd, loopEnd) + c.out.compiledPat[topOfBlock+1] = op + + // Followed by the min and max counts. + c.out.compiledPat[topOfBlock+2] = instruction(c.intervalLow) + c.out.compiledPat[topOfBlock+3] = instruction(c.intervalUpper) + + // Append the CTR_LOOP op. The operand is the location of the CTR_INIT op. + // Goes at end of the block being looped over, so just append to the code so far. + c.appendOp(loop, topOfBlock) + + if (c.intervalLow&0xff000000) != 0 || (c.intervalUpper > 0 && (c.intervalUpper&0xff000000) != 0) { + c.error(NumberTooBig) + } + + if c.intervalLow > c.intervalUpper && c.intervalUpper != -1 { + c.error(MaxLtMin) + } +} + +func (c *compiler) scanNamedChar() rune { + c.nextChar(&c.c) + if c.c.char != chLBrace { + c.error(PropertySyntax) + return 0 + } + + var charName []rune + for { + c.nextChar(&c.c) + if c.c.char == chRBrace { + break + } + if c.c.char == -1 { + c.error(PropertySyntax) + return 0 + } + charName = append(charName, c.c.char) + } + + if !isInvariantUString(charName) { + // All Unicode character names have only invariant characters. + // The API to get a character, given a name, accepts only char *, forcing us to convert, + // which requires this error check + c.error(PropertySyntax) + return 0 + } + + theChar := unames.CharForName(unames.UnicodeCharName, string(charName)) + if c.err != nil { + c.error(PropertySyntax) + } + + c.nextChar(&c.c) // Continue overall regex pattern processing with char after the '}' + return theChar +} + +func isInvariantUString(name []rune) bool { + for _, c := range name { + /* + * no assertions here because these functions are legitimately called + * for strings with variant characters + */ + if !ucharIsInvariant(c) { + return false /* found a variant char */ + } + } + return true +} + +var invariantChars = [...]uint32{ + 0xfffffbff, /* 00..1f but not 0a */ + 0xffffffe5, /* 20..3f but not 21 23 24 */ + 0x87fffffe, /* 40..5f but not 40 5b..5e */ + 0x87fffffe, /* 60..7f but not 60 7b..7e */ +} + +func ucharIsInvariant(c rune) bool { + return c <= 0x7f && (invariantChars[(c)>>5]&(uint32(1)<<(c&0x1f))) != 0 +} + +func (c *compiler) setPushOp(op setOperation) { + c.setEval(op) + c.setOpStack = append(c.setOpStack, op) + c.setStack = append(c.setStack, uset.New()) +} + +func (c *compiler) setEval(nextOp setOperation) { + var rightOperand *uset.UnicodeSet + var leftOperand *uset.UnicodeSet + + for { + pendingSetOp := c.setOpStack[len(c.setOpStack)-1] + if (pendingSetOp & 0xffff0000) < (nextOp & 0xffff0000) { + break + } + + c.setOpStack = c.setOpStack[:len(c.setOpStack)-1] + rightOperand = c.setStack[len(c.setStack)-1] + + switch pendingSetOp { + case setNegation: + rightOperand.Complement() + + case setCaseClose: + rightOperand.CloseOver(uset.CaseInsensitive) + + case setDifference1, setDifference2: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.RemoveAll(rightOperand) + + case setIntersection1, setIntersection2: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.RetainAll(rightOperand) + + case setUnion: + c.setStack = c.setStack[:len(c.setStack)-1] + leftOperand = c.setStack[len(c.setStack)-1] + leftOperand.AddAll(rightOperand) + + default: + panic("unreachable") + } + } +} + +func safeIncrement(val int32, delta int) int32 { + if delta <= math.MaxInt32 && math.MaxInt32-val > int32(delta) { + return val + int32(delta) + } + return math.MaxInt32 +} + +func (c *compiler) minMatchLength(start, end int) int32 { + if c.err != nil { + return 0 + } + + var loc int + var currentLen int32 + + // forwardedLength is a vector holding minimum-match-length values that + // are propagated forward in the pattern by JMP or STATE_SAVE operations. + // It must be one longer than the pattern being checked because some ops + // will jmp to a end-of-block+1 location from within a block, and we must + // count those when checking the block. + forwardedLength := make([]int32, end+2) + for i := range forwardedLength { + forwardedLength[i] = math.MaxInt32 + } + + for loc = start; loc <= end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a shorter minimum length than the current accumulated value, + // replace the current accumulated value. + // no-match-possible cases. + if forwardedLength[loc] < currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxCaret, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxCaretM, + urxCaretMUnix, + urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp, + urxJmpSav, + urxJmpSavX: + // no-op + + // Ops that match a minimum of one character (one or two 16 bit code units.) + // + case urxOnechar, + urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxBackslashD, + urxBackslashH, + urxBackslashR, + urxBackslashV, + urcOnecharI, + urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded. + urxDotanyAll, // . matches one or two. + urxDotany, + urxDotanyUnix: + currentLen = safeIncrement(currentLen, 1) + + case urxJmpx: + loc++ // URX_JMPX has an extra operand, ignored here, otherwise processed identically to URX_JMP. + fallthrough + + case urxJmp: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Can safely ignore, the worst that will happen + // is that we understate the true minimum length + currentLen = forwardedLength[loc+1] + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] > currentLen { + forwardedLength[jmpDest] = currentLen + } + } + + case urxBacktrack: + // Back-tracks are kind of like a branch, except that the min length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxStringI: + loc++ + // TODO: with full case folding, matching input text may be shorter than + // the string we have here. More smarts could put some bounds on it. + // Assume a min length of one for now. A min length of zero causes + // optimization failures for a pattern like "string"+ + // currentLen += URX_VAL(stringLenOp); + currentLen = safeIncrement(currentLen, 1) + + case urxCtrInit, urxCtrInitNg: + // Loop Init Ops. + // If the min loop count == 0 + // move loc forwards to the end of the loop, skipping over the body. + // If the min count is > 0, + // continue normal processing of the body of the loop. + loopEndOp := c.out.compiledPat[loc+1] + loopEndLoc := loopEndOp.value() + minLoopCount := c.out.compiledPat[loc+2] + if minLoopCount == 0 { + loc = loopEndLoc + } else { + loc += 3 // Skips over operands of CTR_INIT + } + + case utxCtrLoop, urxCtrLoopNg: + // Loop ops. The jump is conditional, backwards only. + + case urxLoopSrI, urxLoopDotI, urxLoopC: + // More loop ops. These state-save to themselves. don't change the minimum match - could match nothing at all. + + case urxLaStart, urxLbStart: + // Look-around. Scan forward until the matching look-ahead end, + // without processing the look-around block. This is overly pessimistic for look-ahead, + // it assumes that the look-ahead match might be zero-length. + // TODO: Positive lookahead could recursively do the block, then continue + // with the longer of the block or the value coming in. Ticket 6060 + var depth int32 + if opType == urxLaStart { + depth = 2 + } else { + depth = 1 + } + + for { + loc++ + op = c.out.compiledPat[loc] + if op.typ() == urxLaStart { + // The boilerplate for look-ahead includes two LA_END instructions, + // Depth will be decremented by each one when it is seen. + depth += 2 + } + if op.typ() == urxLbStart { + depth++ + } + if op.typ() == urxLaEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxLbnEnd { + depth-- + if depth == 0 { + break + } + } + if op.typ() == urxStateSave { + // Need this because neg lookahead blocks will FAIL to outside of the block. + jmpDest := op.value() + if jmpDest > loc { + if currentLen < forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } + } + } + + case urxLaEnd, urxLbCont, urxLbEnd, urxLbnCount, urxLbnEnd: + // Only come here if the matching URX_LA_START or URX_LB_START was not in the + // range being sized, which happens when measuring size of look-behind blocks. + + default: + panic("unreachable") + } + } + + // We have finished walking through the ops. Check whether some forward jump + // propagated a shorter length to location end+1. + if forwardedLength[end+1] < currentLen { + currentLen = forwardedLength[end+1] + } + + return currentLen +} + +func (c *compiler) maxMatchLength(start, end int) int32 { + if c.err != nil { + return 0 + } + var loc int + var currentLen int32 + + forwardedLength := make([]int32, end+1) + + for loc = start; loc <= end; loc++ { + op := c.out.compiledPat[loc] + opType := op.typ() + + // The loop is advancing linearly through the pattern. + // If the op we are now at was the destination of a branch in the pattern, + // and that path has a longer maximum length than the current accumulated value, + // replace the current accumulated value. + if forwardedLength[loc] > currentLen { + currentLen = forwardedLength[loc] + } + + switch opType { + // Ops that don't change the total length matched + case urxReservedOp, + urxEnd, + urxStringLen, + urxNop, + urxStartCapture, + urxEndCapture, + urxBackslashB, + urxBackslashBu, + urxBackslashG, + urxBackslashZ, + urxCaret, + urxDollar, + urxDollarM, + urxDollarD, + urxDollarMd, + urxRelocOprnd, + urxStoInpLoc, + urxCaretM, + urxCaretMUnix, + urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match. + urxLdSp, + urxLbEnd, + urxLbCont, + urxLbnCount, + urxLbnEnd: + // no-op + + // Ops that increase that cause an unbounded increase in the length + // of a matched string, or that increase it a hard to characterize way. + // Call the max length unbounded, and stop further checking. + case urxBackref, // BackRef. Must assume that it might be a zero length match + urxBackrefI, + urxBackslashX: // Grapheme Cluster. Minimum is 1, max unbounded. + currentLen = math.MaxInt32 + + // Ops that match a max of one character (possibly two 16 bit code units.) + // + case urxStaticSetref, + urxStatSetrefN, + urxSetref, + urxBackslashD, + urxBackslashH, + urxBackslashR, + urxBackslashV, + urcOnecharI, + urxDotanyAll, + urxDotany, + urxDotanyUnix: + currentLen = safeIncrement(currentLen, 2) + + // Single literal character. Increase current max length by one or two, + // depending on whether the char is in the supplementary range. + case urxOnechar: + currentLen = safeIncrement(currentLen, 1) + if op.value() > 0x10000 { + currentLen = safeIncrement(currentLen, 1) + } + + // Jumps. + // + case urxJmp, urxJmpx, urxJmpSav, urxJmpSavX: + jmpDest := op.value() + if jmpDest < loc { + // Loop of some kind. Max match length is unbounded. + currentLen = math.MaxInt32 + } else { + // Forward jump. Propagate the current min length to the target loc of the jump. + if forwardedLength[jmpDest] < currentLen { + forwardedLength[jmpDest] = currentLen + } + currentLen = 0 + } + + case urxBacktrack: + // back-tracks are kind of like a branch, except that the max length was + // propagated already, by the state save. + currentLen = forwardedLength[loc+1] + + case urxStateSave: + // State Save, for forward jumps, propagate the current minimum. + // of the state save. + // For backwards jumps, they create a loop, maximum + // match length is unbounded. + jmpDest := op.value() + if jmpDest > loc { + if currentLen > forwardedLength[jmpDest] { + forwardedLength[jmpDest] = currentLen + } + } else { + currentLen = math.MaxInt32 + } + + case urxString: + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxStringI: + // TODO: This code assumes that any user string that matches will be no longer + // than our compiled string, with case insensitive matching. + // Our compiled string has been case-folded already. + // + // Any matching user string will have no more code points than our + // compiled (folded) string. Folding may add code points, but + // not remove them. + // + // There is a potential problem if a supplemental code point + // case-folds to a BMP code point. In this case our compiled string + // could be shorter (in code units) than a matching user string. + // + // At this time (Unicode 6.1) there are no such characters, and this case + // is not being handled. A test, intltest regex/Bug9283, will fail if + // any problematic characters are added to Unicode. + // + // If this happens, we can make a set of the BMP chars that the + // troublesome supplementals fold to, scan our string, and bump the + // currentLen one extra for each that is found. + // + loc++ + stringLenOp := c.out.compiledPat[loc] + currentLen = safeIncrement(currentLen, stringLenOp.value()) + + case urxCtrInit, urxCtrInitNg: + // For Loops, recursively call this function on the pattern for the loop body, + // then multiply the result by the maximum loop count. + loopEndLoc := c.out.compiledPat[loc+1].value() + if loopEndLoc == loc+4 { + // Loop has an empty body. No affect on max match length. + // Continue processing with code after the loop end. + loc = loopEndLoc + break + } + + maxLoopCount := int(c.out.compiledPat[loc+3]) + if maxLoopCount == -1 { + // Unbounded Loop. No upper bound on match length. + currentLen = math.MaxInt32 + break + } + + blockLen := c.maxMatchLength(loc+4, loopEndLoc-1) // Recursive call. + updatedLen := int(currentLen) + int(blockLen)*maxLoopCount + if updatedLen >= math.MaxInt32 { + currentLen = math.MaxInt32 + break + } + currentLen = int32(updatedLen) + loc = loopEndLoc + + case utxCtrLoop, urxCtrLoopNg: + panic("should not encounter this opcode") + + case urxLoopSrI, urxLoopDotI, urxLoopC: + // For anything to do with loops, make the match length unbounded. + currentLen = math.MaxInt32 + + case urxLaStart, urxLaEnd: + // Look-ahead. Just ignore, treat the look-ahead block as if + // it were normal pattern. Gives a too-long match length, + // but good enough for now. + + case urxLbStart: + // Look-behind. Scan forward until the matching look-around end, + // without processing the look-behind block. + dataLoc := op.value() + for loc = loc + 1; loc <= end; loc++ { + op = c.out.compiledPat[loc] + if (op.typ() == urxLaEnd || op.typ() == urxLbnEnd) && (op.value() == dataLoc) { + break + } + } + + default: + panic("unreachable") + } + + if currentLen == math.MaxInt32 { + // The maximum length is unbounded. + // Stop further processing of the pattern. + break + } + } + + return currentLen +} + +// Machine Generated below. +// It may need updating with new versions of Unicode. +// Intltest test RegexTest::TestCaseInsensitiveStarters will fail if an update is needed. +// The update tool is here: +// svn+ssh://source.icu-project.org/repos/icu/tools/trunk/unicode/c/genregexcasing + +// Machine Generated Data. Do not hand edit. +var reCaseFixCodePoints = [...]rune{ + 0x61, 0x66, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x77, 0x79, 0x2bc, + 0x3ac, 0x3ae, 0x3b1, 0x3b7, 0x3b9, 0x3c1, 0x3c5, 0x3c9, 0x3ce, 0x565, + 0x574, 0x57e, 0x1f00, 0x1f01, 0x1f02, 0x1f03, 0x1f04, 0x1f05, 0x1f06, 0x1f07, + 0x1f20, 0x1f21, 0x1f22, 0x1f23, 0x1f24, 0x1f25, 0x1f26, 0x1f27, 0x1f60, 0x1f61, + 0x1f62, 0x1f63, 0x1f64, 0x1f65, 0x1f66, 0x1f67, 0x1f70, 0x1f74, 0x1f7c, 0x110000} + +var reCaseFixStringOffsets = [...]int16{ + 0x0, 0x1, 0x6, 0x7, 0x8, 0x9, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, + 0x17, 0x1b, 0x20, 0x21, 0x2a, 0x2e, 0x2f, 0x30, 0x34, 0x35, 0x37, 0x39, 0x3b, + 0x3d, 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d, 0x4f, 0x51, 0x53, 0x55, + 0x57, 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x63, 0x65, 0x66, 0x67, 0} + +var reCaseFixCounts = [...]int16{ + 0x1, 0x5, 0x1, 0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x5, 0x1, 0x9, + 0x4, 0x1, 0x1, 0x4, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, + 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0} + +var reCaseFixData = [...]uint16{ + 0x1e9a, 0xfb00, 0xfb01, 0xfb02, 0xfb03, 0xfb04, 0x1e96, 0x130, 0x1f0, 0xdf, 0x1e9e, 0xfb05, + 0xfb06, 0x1e97, 0x1e98, 0x1e99, 0x149, 0x1fb4, 0x1fc4, 0x1fb3, 0x1fb6, 0x1fb7, 0x1fbc, 0x1fc3, + 0x1fc6, 0x1fc7, 0x1fcc, 0x390, 0x1fd2, 0x1fd3, 0x1fd6, 0x1fd7, 0x1fe4, 0x3b0, 0x1f50, 0x1f52, + 0x1f54, 0x1f56, 0x1fe2, 0x1fe3, 0x1fe6, 0x1fe7, 0x1ff3, 0x1ff6, 0x1ff7, 0x1ffc, 0x1ff4, 0x587, + 0xfb13, 0xfb14, 0xfb15, 0xfb17, 0xfb16, 0x1f80, 0x1f88, 0x1f81, 0x1f89, 0x1f82, 0x1f8a, 0x1f83, + 0x1f8b, 0x1f84, 0x1f8c, 0x1f85, 0x1f8d, 0x1f86, 0x1f8e, 0x1f87, 0x1f8f, 0x1f90, 0x1f98, 0x1f91, + 0x1f99, 0x1f92, 0x1f9a, 0x1f93, 0x1f9b, 0x1f94, 0x1f9c, 0x1f95, 0x1f9d, 0x1f96, 0x1f9e, 0x1f97, + 0x1f9f, 0x1fa0, 0x1fa8, 0x1fa1, 0x1fa9, 0x1fa2, 0x1faa, 0x1fa3, 0x1fab, 0x1fa4, 0x1fac, 0x1fa5, + 0x1fad, 0x1fa6, 0x1fae, 0x1fa7, 0x1faf, 0x1fb2, 0x1fc2, 0x1ff2, 0} + +func (c *compiler) findCaseInsensitiveStarters(ch rune, starterChars *uset.UnicodeSet) { + if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) { + caseFoldedC := ucase.Fold(ch) + starterChars.Clear() + starterChars.AddRune(caseFoldedC) + + var i int + for i = 0; reCaseFixCodePoints[i] < ch; i++ { + // Simple linear search through the sorted list of interesting code points. + } + + if reCaseFixCodePoints[i] == ch { + data := reCaseFixData[reCaseFixStringOffsets[i]:] + numCharsToAdd := reCaseFixCounts[i] + for j := int16(0); j < numCharsToAdd; j++ { + var cpToAdd rune + cpToAdd, data = utf16.NextUnsafe(data) + starterChars.AddRune(cpToAdd) + } + } + + starterChars.CloseOver(uset.CaseInsensitive) + } else { + // Not a cased character. Just return it alone. + starterChars.Clear() + starterChars.AddRune(ch) + } +} + +func (c *compiler) scanProp() *uset.UnicodeSet { + if c.err != nil { + return nil + } + negated := c.c.char == chP + + c.nextChar(&c.c) + if c.c.char != chLBrace { + c.error(PropertySyntax) + return nil + } + + var propertyName strings.Builder + for { + c.nextChar(&c.c) + if c.c.char == chRBrace { + break + } + if c.c.char == -1 { + c.error(PropertySyntax) + return nil + } + propertyName.WriteRune(c.c.char) + } + + ss := c.createSetForProperty(propertyName.String(), negated) + c.nextChar(&c.c) + return ss +} + +func (c *compiler) createSetForProperty(propName string, negated bool) *uset.UnicodeSet { + if c.err != nil { + return nil + } + + var set *uset.UnicodeSet + + var usetFlags uset.USet + if c.modeFlags&CaseInsensitive != 0 { + usetFlags |= uset.CaseInsensitive + } + + var err error + set, err = uprops.NewUnicodeSetFomPattern("\\p{"+propName+"}", usetFlags) + if err == nil { + goto done + } + + // + // The incoming property wasn't directly recognized by ICU. + + // Check [:word:] and [:all:]. These are not recognized as a properties by ICU UnicodeSet. + // Java accepts 'word' with mixed case. + // Java accepts 'all' only in all lower case. + if strings.EqualFold(propName, "word") { + set = staticPropertySets[urxIswordSet].Clone() + goto done + } + if propName == "all" { + set = uset.New() + set.AddRuneRange(0, 0x10ffff) + goto done + } + + // Do Java InBlock expressions + // + if strings.HasPrefix(propName, "In") && len(propName) >= 3 { + set = uset.New() + if uprops.ApplyPropertyAlias(set, "Block", propName[2:]) != nil { + c.error(PropertySyntax) + } + goto done + } + + // Check for the Java form "IsBooleanPropertyValue", which we will recast + // as "BooleanPropertyValue". The property value can be either a + // a General Category or a Script Name. + if strings.HasPrefix(propName, "Is") && len(propName) >= 3 { + mPropName := propName[2:] + if strings.IndexByte(mPropName, '=') >= 0 { + c.error(PropertySyntax) + goto done + } + + if strings.EqualFold(mPropName, "assigned") { + mPropName = "unassigned" + negated = !negated + } else if strings.EqualFold(mPropName, "TitleCase") { + mPropName = "Titlecase_Letter" + } + + set, err = uprops.NewUnicodeSetFomPattern("\\p{"+mPropName+"}", 0) + if err != nil { + c.error(PropertySyntax) + } else if !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 { + set.CloseOver(uset.CaseInsensitive) + } + goto done + } + + if strings.HasPrefix(propName, "java") { + set = uset.New() + + // + // Try the various Java specific properties. + // These all begin with "java" + // + if propName == "javaDefined" { + c.err = uprops.AddCategory(set, uchar.GcCnMask) + set.Complement() + } else if propName == "javaDigit" { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } else if propName == "javaIdentifierIgnorable" { + c.err = addIdentifierIgnorable(set) + } else if propName == "javaISOControl" { + set.AddRuneRange(0, 0x1F) + set.AddRuneRange(0x7F, 0x9F) + } else if propName == "javaJavaIdentifierPart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcScMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMnMask) + } + if c.err == nil { + c.err = addIdentifierIgnorable(set) + } + } else if propName == "javaJavaIdentifierStart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcScMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + } else if propName == "javaLetter" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + } else if propName == "javaLetterOrDigit" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + } else if propName == "javaLowerCase" { + c.err = uprops.AddCategory(set, uchar.GcLlMask) + } else if propName == "javaMirrored" { + c.err = uprops.ApplyIntPropertyValue(set, uprops.UCharBidiMirrored, 1) + } else if propName == "javaSpaceChar" { + c.err = uprops.AddCategory(set, uchar.GcZMask) + } else if propName == "javaSupplementaryCodePoint" { + set.AddRuneRange(0x10000, uset.MaxValue) + } else if propName == "javaTitleCase" { + c.err = uprops.AddCategory(set, uchar.GcLtMask) + } else if propName == "javaUnicodeIdentifierStart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + } else if propName == "javaUnicodeIdentifierPart" { + c.err = uprops.AddCategory(set, uchar.GcLMask) + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcPcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNdMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcNlMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMcMask) + } + if c.err == nil { + c.err = uprops.AddCategory(set, uchar.GcMnMask) + } + if c.err == nil { + c.err = addIdentifierIgnorable(set) + } + } else if propName == "javaUpperCase" { + c.err = uprops.AddCategory(set, uchar.GcLuMask) + } else if propName == "javaValidCodePoint" { + set.AddRuneRange(0, uset.MaxValue) + } else if propName == "javaWhitespace" { + c.err = uprops.AddCategory(set, uchar.GcZMask) + excl := uset.New() + excl.AddRune(0x0a) + excl.AddRune(0x2007) + excl.AddRune(0x202f) + set.RemoveAll(excl) + set.AddRuneRange(9, 0x0d) + set.AddRuneRange(0x1c, 0x1f) + } else { + c.error(PropertySyntax) + } + + if c.err == nil && !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 { + set.CloseOver(uset.CaseInsensitive) + } + goto done + } + + // Unrecognized property. ICU didn't like it as it was, and none of the Java compatibility + // extensions matched it. + c.error(PropertySyntax) + +done: + if c.err != nil { + return nil + } + if negated { + set.Complement() + } + return set +} + +func addIdentifierIgnorable(set *uset.UnicodeSet) error { + set.AddRuneRange(0, 8) + set.AddRuneRange(0x0e, 0x1b) + set.AddRuneRange(0x7f, 0x9f) + + return uprops.AddCategory(set, uchar.GcCfMask) +} + +func (c *compiler) scanPosixProp() *uset.UnicodeSet { + var set *uset.UnicodeSet + + if !(c.c.char == chColon) { + panic("assertion failed: c.lastChar == ':'") + } + + savedScanIndex := c.scanIndex + savedScanPattern := c.p + savedQuoteMode := c.quoteMode + savedInBackslashQuote := c.inBackslashQuote + savedEOLComments := c.eolComments + savedLineNum := c.lineNum + savedCharNum := c.charNum + savedLastChar := c.lastChar + savedPeekChar := c.peekChar + savedC := c.c + + // Scan for a closing ]. A little tricky because there are some perverse + // edge cases possible. "[:abc\Qdef:] \E]" is a valid non-property expression, + // ending on the second closing ]. + var propName []rune + negated := false + + // Check for and consume the '^' in a negated POSIX property, e.g. [:^Letter:] + c.nextChar(&c.c) + if c.c.char == chUp { + negated = true + c.nextChar(&c.c) + } + + // Scan for the closing ":]", collecting the property name along the way. + sawPropSetTerminator := false + for { + propName = append(propName, c.c.char) + c.nextChar(&c.c) + if c.c.quoted || c.c.char == -1 { + // Escaped characters or end of input - either says this isn't a [:Property:] + break + } + if c.c.char == chColon { + c.nextChar(&c.c) + if c.c.char == chRBracket { + sawPropSetTerminator = true + break + } + } + } + + if sawPropSetTerminator { + set = c.createSetForProperty(string(propName), negated) + } else { + // No closing ']' - not a [:Property:] + // Restore the original scan position. + // The main scanner will retry the input as a normal set expression, + // not a [:Property:] expression. + c.scanIndex = savedScanIndex + c.p = savedScanPattern + c.quoteMode = savedQuoteMode + c.inBackslashQuote = savedInBackslashQuote + c.eolComments = savedEOLComments + c.lineNum = savedLineNum + c.charNum = savedCharNum + c.lastChar = savedLastChar + c.peekChar = savedPeekChar + c.c = savedC + } + + return set +} + +func (c *compiler) compileSet(set *uset.UnicodeSet) { + if set == nil { + return + } + // Remove any strings from the set. + // There shoudn't be any, but just in case. + // (Case Closure can add them; if we had a simple case closure available that + // ignored strings, that would be better.) + setSize := set.Len() + + switch setSize { + case 0: + // Set of no elements. Always fails to match. + c.appendOp(urxBacktrack, 0) + + case 1: + // The set contains only a single code point. Put it into + // the compiled pattern as a single char operation rather + // than a set, and discard the set itself. + c.literalChar(set.RuneAt(0)) + + default: + // The set contains two or more chars. (the normal case) + // Put it into the compiled pattern as a set. + // theSet->freeze(); + setNumber := len(c.out.sets) + c.out.sets = append(c.out.sets, set) + c.appendOp(urxSetref, setNumber) + } +} diff --git a/go/mysql/icuregex/compiler_table.go b/go/mysql/icuregex/compiler_table.go new file mode 100644 index 00000000000..e8cfe0d5e55 --- /dev/null +++ b/go/mysql/icuregex/compiler_table.go @@ -0,0 +1,357 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +type patternParseAction uint8 + +const ( + doSetBackslashD patternParseAction = iota + doBackslashh + doBackslashH + doSetLiteralEscaped + doOpenLookAheadNeg + doCompleteNamedBackRef + doPatStart + doBackslashS + doBackslashD + doNGStar + doNOP + doBackslashX + doSetLiteral + doContinueNamedCapture + doBackslashG + doBackslashR + doSetBegin + doSetBackslashv + doPossessivePlus + doPerlInline + doBackslashZ + doSetAddAmp + doSetBeginDifference1 + doIntervalError + doSetNegate + doIntervalInit + doSetIntersection2 + doPossessiveInterval + doRuleError + doBackslashW + doContinueNamedBackRef + doOpenNonCaptureParen + doExit + doSetNamedChar + doSetBackslashV + doConditionalExpr + doEscapeError + doBadOpenParenType + doPossessiveStar + doSetAddDash + doEscapedLiteralChar + doSetBackslashw + doIntervalUpperDigit + doBackslashv + doSetBackslashS + doSetNoCloseError + doSetProp + doBackslashB + doSetEnd + doSetRange + doMatchModeParen + doPlus + doBackslashV + doSetMatchMode + doBackslashz + doSetNamedRange + doOpenLookBehindNeg + doInterval + doBadNamedCapture + doBeginMatchMode + doBackslashd + doPatFinish + doNamedChar + doNGPlus + doSetDifference2 + doSetBackslashH + doCloseParen + doDotAny + doOpenCaptureParen + doEnterQuoteMode + doOpenAtomicParen + doBadModeFlag + doSetBackslashd + doSetFinish + doProperty + doBeginNamedBackRef + doBackRef + doOpt + doDollar + doBeginNamedCapture + doNGInterval + doSetOpError + doSetPosixProp + doSetBeginIntersection1 + doBackslashb + doSetBeginUnion + doIntevalLowerDigit + doSetBackslashh + doStar + doMatchMode + doBackslashA + doOpenLookBehind + doPossessiveOpt + doOrOperator + doBackslashw + doBackslashs + doLiteralChar + doSuppressComments + doCaret + doIntervalSame + doNGOpt + doOpenLookAhead + doSetBackslashW + doMismatchedParenErr + doSetBackslashs + rbbiLastAction +) + +// ------------------------------------------------------------------------------- +// +// RegexTableEl represents the structure of a row in the transition table +// for the pattern parser state machine. +// +// ------------------------------------------------------------------------------- +type regexTableEl struct { + action patternParseAction + charClass uint8 + nextState uint8 + pushState uint8 + nextChar bool +} + +var parseStateTable = []regexTableEl{ + {doNOP, 0, 0, 0, true}, + {doPatStart, 255, 2, 0, false}, // 1 start + {doLiteralChar, 254, 14, 0, true}, // 2 term + {doLiteralChar, 130, 14, 0, true}, // 3 + {doSetBegin, 91 /* [ */, 123, 205, true}, // 4 + {doNOP, 40 /* ( */, 27, 0, true}, // 5 + {doDotAny, 46 /* . */, 14, 0, true}, // 6 + {doCaret, 94 /* ^ */, 14, 0, true}, // 7 + {doDollar, 36 /* $ */, 14, 0, true}, // 8 + {doNOP, 92 /* \ */, 89, 0, true}, // 9 + {doOrOperator, 124 /* | */, 2, 0, true}, // 10 + {doCloseParen, 41 /* ) */, 255, 0, true}, // 11 + {doPatFinish, 253, 2, 0, false}, // 12 + {doRuleError, 255, 206, 0, false}, // 13 + {doNOP, 42 /* * */, 68, 0, true}, // 14 expr-quant + {doNOP, 43 /* + */, 71, 0, true}, // 15 + {doNOP, 63 /* ? */, 74, 0, true}, // 16 + {doIntervalInit, 123 /* { */, 77, 0, true}, // 17 + {doNOP, 40 /* ( */, 23, 0, true}, // 18 + {doNOP, 255, 20, 0, false}, // 19 + {doOrOperator, 124 /* | */, 2, 0, true}, // 20 expr-cont + {doCloseParen, 41 /* ) */, 255, 0, true}, // 21 + {doNOP, 255, 2, 0, false}, // 22 + {doSuppressComments, 63 /* ? */, 25, 0, true}, // 23 open-paren-quant + {doNOP, 255, 27, 0, false}, // 24 + {doNOP, 35 /* # */, 50, 14, true}, // 25 open-paren-quant2 + {doNOP, 255, 29, 0, false}, // 26 + {doSuppressComments, 63 /* ? */, 29, 0, true}, // 27 open-paren + {doOpenCaptureParen, 255, 2, 14, false}, // 28 + {doOpenNonCaptureParen, 58 /* : */, 2, 14, true}, // 29 open-paren-extended + {doOpenAtomicParen, 62 /* > */, 2, 14, true}, // 30 + {doOpenLookAhead, 61 /* = */, 2, 20, true}, // 31 + {doOpenLookAheadNeg, 33 /* ! */, 2, 20, true}, // 32 + {doNOP, 60 /* < */, 46, 0, true}, // 33 + {doNOP, 35 /* # */, 50, 2, true}, // 34 + {doBeginMatchMode, 105 /* i */, 53, 0, false}, // 35 + {doBeginMatchMode, 100 /* d */, 53, 0, false}, // 36 + {doBeginMatchMode, 109 /* m */, 53, 0, false}, // 37 + {doBeginMatchMode, 115 /* s */, 53, 0, false}, // 38 + {doBeginMatchMode, 117 /* u */, 53, 0, false}, // 39 + {doBeginMatchMode, 119 /* w */, 53, 0, false}, // 40 + {doBeginMatchMode, 120 /* x */, 53, 0, false}, // 41 + {doBeginMatchMode, 45 /* - */, 53, 0, false}, // 42 + {doConditionalExpr, 40 /* ( */, 206, 0, true}, // 43 + {doPerlInline, 123 /* { */, 206, 0, true}, // 44 + {doBadOpenParenType, 255, 206, 0, false}, // 45 + {doOpenLookBehind, 61 /* = */, 2, 20, true}, // 46 open-paren-lookbehind + {doOpenLookBehindNeg, 33 /* ! */, 2, 20, true}, // 47 + {doBeginNamedCapture, 129, 64, 0, false}, // 48 + {doBadOpenParenType, 255, 206, 0, false}, // 49 + {doNOP, 41 /* ) */, 255, 0, true}, // 50 paren-comment + {doMismatchedParenErr, 253, 206, 0, false}, // 51 + {doNOP, 255, 50, 0, true}, // 52 + {doMatchMode, 105 /* i */, 53, 0, true}, // 53 paren-flag + {doMatchMode, 100 /* d */, 53, 0, true}, // 54 + {doMatchMode, 109 /* m */, 53, 0, true}, // 55 + {doMatchMode, 115 /* s */, 53, 0, true}, // 56 + {doMatchMode, 117 /* u */, 53, 0, true}, // 57 + {doMatchMode, 119 /* w */, 53, 0, true}, // 58 + {doMatchMode, 120 /* x */, 53, 0, true}, // 59 + {doMatchMode, 45 /* - */, 53, 0, true}, // 60 + {doSetMatchMode, 41 /* ) */, 2, 0, true}, // 61 + {doMatchModeParen, 58 /* : */, 2, 14, true}, // 62 + {doBadModeFlag, 255, 206, 0, false}, // 63 + {doContinueNamedCapture, 129, 64, 0, true}, // 64 named-capture + {doContinueNamedCapture, 128, 64, 0, true}, // 65 + {doOpenCaptureParen, 62 /* > */, 2, 14, true}, // 66 + {doBadNamedCapture, 255, 206, 0, false}, // 67 + {doNGStar, 63 /* ? */, 20, 0, true}, // 68 quant-star + {doPossessiveStar, 43 /* + */, 20, 0, true}, // 69 + {doStar, 255, 20, 0, false}, // 70 + {doNGPlus, 63 /* ? */, 20, 0, true}, // 71 quant-plus + {doPossessivePlus, 43 /* + */, 20, 0, true}, // 72 + {doPlus, 255, 20, 0, false}, // 73 + {doNGOpt, 63 /* ? */, 20, 0, true}, // 74 quant-opt + {doPossessiveOpt, 43 /* + */, 20, 0, true}, // 75 + {doOpt, 255, 20, 0, false}, // 76 + {doNOP, 128, 79, 0, false}, // 77 interval-open + {doIntervalError, 255, 206, 0, false}, // 78 + {doIntevalLowerDigit, 128, 79, 0, true}, // 79 interval-lower + {doNOP, 44 /* , */, 83, 0, true}, // 80 + {doIntervalSame, 125 /* } */, 86, 0, true}, // 81 + {doIntervalError, 255, 206, 0, false}, // 82 + {doIntervalUpperDigit, 128, 83, 0, true}, // 83 interval-upper + {doNOP, 125 /* } */, 86, 0, true}, // 84 + {doIntervalError, 255, 206, 0, false}, // 85 + {doNGInterval, 63 /* ? */, 20, 0, true}, // 86 interval-type + {doPossessiveInterval, 43 /* + */, 20, 0, true}, // 87 + {doInterval, 255, 20, 0, false}, // 88 + {doBackslashA, 65 /* A */, 2, 0, true}, // 89 backslash + {doBackslashB, 66 /* B */, 2, 0, true}, // 90 + {doBackslashb, 98 /* b */, 2, 0, true}, // 91 + {doBackslashd, 100 /* d */, 14, 0, true}, // 92 + {doBackslashD, 68 /* D */, 14, 0, true}, // 93 + {doBackslashG, 71 /* G */, 2, 0, true}, // 94 + {doBackslashh, 104 /* h */, 14, 0, true}, // 95 + {doBackslashH, 72 /* H */, 14, 0, true}, // 96 + {doNOP, 107 /* k */, 115, 0, true}, // 97 + {doNamedChar, 78 /* N */, 14, 0, false}, // 98 + {doProperty, 112 /* p */, 14, 0, false}, // 99 + {doProperty, 80 /* P */, 14, 0, false}, // 100 + {doBackslashR, 82 /* R */, 14, 0, true}, // 101 + {doEnterQuoteMode, 81 /* Q */, 2, 0, true}, // 102 + {doBackslashS, 83 /* S */, 14, 0, true}, // 103 + {doBackslashs, 115 /* s */, 14, 0, true}, // 104 + {doBackslashv, 118 /* v */, 14, 0, true}, // 105 + {doBackslashV, 86 /* V */, 14, 0, true}, // 106 + {doBackslashW, 87 /* W */, 14, 0, true}, // 107 + {doBackslashw, 119 /* w */, 14, 0, true}, // 108 + {doBackslashX, 88 /* X */, 14, 0, true}, // 109 + {doBackslashZ, 90 /* Z */, 2, 0, true}, // 110 + {doBackslashz, 122 /* z */, 2, 0, true}, // 111 + {doBackRef, 128, 14, 0, true}, // 112 + {doEscapeError, 253, 206, 0, false}, // 113 + {doEscapedLiteralChar, 255, 14, 0, true}, // 114 + {doBeginNamedBackRef, 60 /* < */, 117, 0, true}, // 115 named-backref + {doBadNamedCapture, 255, 206, 0, false}, // 116 + {doContinueNamedBackRef, 129, 119, 0, true}, // 117 named-backref-2 + {doBadNamedCapture, 255, 206, 0, false}, // 118 + {doContinueNamedBackRef, 129, 119, 0, true}, // 119 named-backref-3 + {doContinueNamedBackRef, 128, 119, 0, true}, // 120 + {doCompleteNamedBackRef, 62 /* > */, 14, 0, true}, // 121 + {doBadNamedCapture, 255, 206, 0, false}, // 122 + {doSetNegate, 94 /* ^ */, 126, 0, true}, // 123 set-open + {doSetPosixProp, 58 /* : */, 128, 0, false}, // 124 + {doNOP, 255, 126, 0, false}, // 125 + {doSetLiteral, 93 /* ] */, 141, 0, true}, // 126 set-open2 + {doNOP, 255, 131, 0, false}, // 127 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 128 set-posix + {doNOP, 58 /* : */, 131, 0, false}, // 129 + {doRuleError, 255, 206, 0, false}, // 130 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 131 set-start + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 132 + {doNOP, 92 /* \ */, 191, 0, true}, // 133 + {doNOP, 45 /* - */, 137, 0, true}, // 134 + {doNOP, 38 /* & */, 139, 0, true}, // 135 + {doSetLiteral, 255, 141, 0, true}, // 136 + {doRuleError, 45 /* - */, 206, 0, false}, // 137 set-start-dash + {doSetAddDash, 255, 141, 0, false}, // 138 + {doRuleError, 38 /* & */, 206, 0, false}, // 139 set-start-amp + {doSetAddAmp, 255, 141, 0, false}, // 140 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 141 set-after-lit + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 142 + {doNOP, 45 /* - */, 178, 0, true}, // 143 + {doNOP, 38 /* & */, 169, 0, true}, // 144 + {doNOP, 92 /* \ */, 191, 0, true}, // 145 + {doSetNoCloseError, 253, 206, 0, false}, // 146 + {doSetLiteral, 255, 141, 0, true}, // 147 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 148 set-after-set + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 149 + {doNOP, 45 /* - */, 171, 0, true}, // 150 + {doNOP, 38 /* & */, 166, 0, true}, // 151 + {doNOP, 92 /* \ */, 191, 0, true}, // 152 + {doSetNoCloseError, 253, 206, 0, false}, // 153 + {doSetLiteral, 255, 141, 0, true}, // 154 + {doSetEnd, 93 /* ] */, 255, 0, true}, // 155 set-after-range + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 156 + {doNOP, 45 /* - */, 174, 0, true}, // 157 + {doNOP, 38 /* & */, 176, 0, true}, // 158 + {doNOP, 92 /* \ */, 191, 0, true}, // 159 + {doSetNoCloseError, 253, 206, 0, false}, // 160 + {doSetLiteral, 255, 141, 0, true}, // 161 + {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 162 set-after-op + {doSetOpError, 93 /* ] */, 206, 0, false}, // 163 + {doNOP, 92 /* \ */, 191, 0, true}, // 164 + {doSetLiteral, 255, 141, 0, true}, // 165 + {doSetBeginIntersection1, 91 /* [ */, 123, 148, true}, // 166 set-set-amp + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 167 + {doSetAddAmp, 255, 141, 0, false}, // 168 + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 169 set-lit-amp + {doSetAddAmp, 255, 141, 0, false}, // 170 + {doSetBeginDifference1, 91 /* [ */, 123, 148, true}, // 171 set-set-dash + {doSetDifference2, 45 /* - */, 162, 0, true}, // 172 + {doSetAddDash, 255, 141, 0, false}, // 173 + {doSetDifference2, 45 /* - */, 162, 0, true}, // 174 set-range-dash + {doSetAddDash, 255, 141, 0, false}, // 175 + {doSetIntersection2, 38 /* & */, 162, 0, true}, // 176 set-range-amp + {doSetAddAmp, 255, 141, 0, false}, // 177 + {doSetDifference2, 45 /* - */, 162, 0, true}, // 178 set-lit-dash + {doSetAddDash, 91 /* [ */, 141, 0, false}, // 179 + {doSetAddDash, 93 /* ] */, 141, 0, false}, // 180 + {doNOP, 92 /* \ */, 183, 0, true}, // 181 + {doSetRange, 255, 155, 0, true}, // 182 + {doSetOpError, 115 /* s */, 206, 0, false}, // 183 set-lit-dash-escape + {doSetOpError, 83 /* S */, 206, 0, false}, // 184 + {doSetOpError, 119 /* w */, 206, 0, false}, // 185 + {doSetOpError, 87 /* W */, 206, 0, false}, // 186 + {doSetOpError, 100 /* d */, 206, 0, false}, // 187 + {doSetOpError, 68 /* D */, 206, 0, false}, // 188 + {doSetNamedRange, 78 /* N */, 155, 0, false}, // 189 + {doSetRange, 255, 155, 0, true}, // 190 + {doSetProp, 112 /* p */, 148, 0, false}, // 191 set-escape + {doSetProp, 80 /* P */, 148, 0, false}, // 192 + {doSetNamedChar, 78 /* N */, 141, 0, false}, // 193 + {doSetBackslashs, 115 /* s */, 155, 0, true}, // 194 + {doSetBackslashS, 83 /* S */, 155, 0, true}, // 195 + {doSetBackslashw, 119 /* w */, 155, 0, true}, // 196 + {doSetBackslashW, 87 /* W */, 155, 0, true}, // 197 + {doSetBackslashd, 100 /* d */, 155, 0, true}, // 198 + {doSetBackslashD, 68 /* D */, 155, 0, true}, // 199 + {doSetBackslashh, 104 /* h */, 155, 0, true}, // 200 + {doSetBackslashH, 72 /* H */, 155, 0, true}, // 201 + {doSetBackslashv, 118 /* v */, 155, 0, true}, // 202 + {doSetBackslashV, 86 /* V */, 155, 0, true}, // 203 + {doSetLiteralEscaped, 255, 141, 0, true}, // 204 + {doSetFinish, 255, 14, 0, false}, // 205 set-finish + {doExit, 255, 206, 0, true}, // 206 errorDeath +} diff --git a/go/mysql/icuregex/debug.go b/go/mysql/icuregex/debug.go new file mode 100644 index 00000000000..92c43e704d7 --- /dev/null +++ b/go/mysql/icuregex/debug.go @@ -0,0 +1,151 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "io" +) + +func (pat *Pattern) Dump(w io.Writer) { + fmt.Fprintf(w, "Original Pattern: \"%s\"\n", pat.pattern) + fmt.Fprintf(w, " Min Match Length: %d\n", pat.minMatchLen) + fmt.Fprintf(w, " Match Start Type: %v\n", pat.startType) + if pat.startType == startString { + fmt.Fprintf(w, " Initial match string: \"%s\"\n", string(pat.literalText[pat.initialStringIdx:pat.initialStringIdx+pat.initialStringLen])) + } else if pat.startType == startSet { + fmt.Fprintf(w, " Match First Chars: %s\n", pat.initialChars.String()) + } else if pat.startType == startChar { + fmt.Fprintf(w, " First char of Match: ") + if pat.initialChar > 0x20 { + fmt.Fprintf(w, "'%c'\n", pat.initialChar) + } else { + fmt.Fprintf(w, "%#x\n", pat.initialChar) + } + } + + fmt.Fprintf(w, "Named Capture Groups:\n") + if len(pat.namedCaptureMap) == 0 { + fmt.Fprintf(w, " None\n") + } else { + for name, number := range pat.namedCaptureMap { + fmt.Fprintf(w, " %d\t%s\n", number, name) + } + } + + fmt.Fprintf(w, "\nIndex Binary Type Operand\n-------------------------------------------\n") + for idx := range pat.compiledPat { + pat.dumpOp(w, idx) + } + fmt.Fprintf(w, "\n\n") +} + +func (pat *Pattern) dumpOp(w io.Writer, index int) { + op := pat.compiledPat[index] + val := op.value() + opType := op.typ() + pinnedType := opType + if int(pinnedType) >= len(urxOpcodeNames) { + pinnedType = 0 + } + + fmt.Fprintf(w, "%4d %08x %-15s ", index, op, urxOpcodeNames[pinnedType]) + + switch opType { + case urxNop, + urxDotany, + urxDotanyAll, + urxFail, + urxCaret, + urxDollar, + urxBackslashG, + urxBackslashX, + urxEnd, + urxDollarM, + urxCaretM: + // Types with no operand field of interest. + + case urxReservedOp, + urxStartCapture, + urxEndCapture, + urxStateSave, + urxJmp, + urxJmpSav, + urxJmpSavX, + urxBackslashB, + urxBackslashBu, + urxBackslashD, + urxBackslashZ, + urxStringLen, + urxCtrInit, + urxCtrInitNg, + utxCtrLoop, + urxCtrLoopNg, + urxRelocOprnd, + urxStoSp, + urxLdSp, + urxBackref, + urxStoInpLoc, + urxJmpx, + urxLaStart, + urxLaEnd, + urxBackrefI, + urxLbStart, + urxLbCont, + urxLbEnd, + urxLbnCount, + urxLbnEnd, + urxLoopC, + urxLoopDotI, + urxBackslashH, + urxBackslashR, + urxBackslashV: + // types with an integer operand field. + fmt.Fprintf(w, "%d", val) + + case urxOnechar, urcOnecharI: + if val < 0x20 { + fmt.Fprintf(w, "%#x", val) + } else { + fmt.Fprintf(w, "'%c'", rune(val)) + } + + case urxString, urxStringI: + lengthOp := pat.compiledPat[index+1] + length := lengthOp.value() + fmt.Fprintf(w, "%q", string(pat.literalText[val:val+length])) + + case urxSetref, urxLoopSrI: + fmt.Fprintf(w, "%s", pat.sets[val].String()) + + case urxStaticSetref, urxStatSetrefN: + if (val & urxNegSet) != 0 { + fmt.Fprintf(w, "NOT ") + val &= ^urxNegSet + } + fmt.Fprintf(w, "%s", staticPropertySets[val].String()) + + default: + fmt.Fprintf(w, "??????") + } + fmt.Fprintf(w, "\n") +} diff --git a/go/mysql/icuregex/error.go b/go/mysql/icuregex/error.go new file mode 100644 index 00000000000..39c92399aa9 --- /dev/null +++ b/go/mysql/icuregex/error.go @@ -0,0 +1,152 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "strings" +) + +type CompileError struct { + Code CompileErrorCode + Line int + Offset int + Context string +} + +func (e *CompileError) Error() string { + var out strings.Builder + switch e.Code { + case InternalError: + out.WriteString("Internal error") + case RuleSyntax: + out.WriteString("Syntax error") + case BadEscapeSequence: + out.WriteString("Bad escape sequence") + case PropertySyntax: + out.WriteString("Property syntax error") + case Unimplemented: + out.WriteString("Unimplemented") + case MismatchedParen: + out.WriteString("Mismatched parentheses") + case NumberTooBig: + out.WriteString("Number too big") + case BadInterval: + out.WriteString("Bad interval") + case MaxLtMin: + out.WriteString("Max less than min") + case InvalidBackRef: + out.WriteString("Invalid back reference") + case InvalidFlag: + out.WriteString("Invalid flag") + case LookBehindLimit: + out.WriteString("Look behind limit") + case MissingCloseBracket: + out.WriteString("Missing closing ]") + case InvalidRange: + out.WriteString("Invalid range") + case PatternTooBig: + out.WriteString("Pattern too big") + case InvalidCaptureGroupName: + out.WriteString("Invalid capture group name") + } + _, _ = fmt.Fprintf(&out, " in regular expression on line %d, character %d: `%s`", e.Line, e.Offset, e.Context) + + return out.String() +} + +type MatchError struct { + Code MatchErrorCode + Pattern string + Position int + Input []rune +} + +const maxMatchInputLength = 20 + +func (e *MatchError) Error() string { + var out strings.Builder + switch e.Code { + case StackOverflow: + out.WriteString("Stack overflow") + case TimeOut: + out.WriteString("Timeout") + case InternalMatchError: + out.WriteString("Internal error") + } + + input := e.Input + if len(input) > maxMatchInputLength { + var b []rune + start := e.Position - maxMatchInputLength/2 + if start < 0 { + start = 0 + } else { + b = append(b, '.', '.', '.') + } + end := start + maxMatchInputLength + trailing := true + if end > len(input) { + end = len(input) + trailing = false + } + b = append(b, input[start:end]...) + if trailing { + b = append(b, '.', '.', '.') + } + input = b + } + _, _ = fmt.Fprintf(&out, " for expression `%s` at position %d in: %q", e.Pattern, e.Position, string(input)) + + return out.String() +} + +type Code int32 + +type CompileErrorCode int32 + +const ( + InternalError CompileErrorCode = iota + 1 /**< An internal error (bug) was detected. */ + RuleSyntax /**< Syntax error in regexp pattern. */ + BadEscapeSequence /**< Unrecognized backslash escape sequence in pattern */ + PropertySyntax /**< Incorrect Unicode property */ + Unimplemented /**< Use of regexp feature that is not yet implemented. */ + MismatchedParen /**< Incorrectly nested parentheses in regexp pattern. */ + NumberTooBig /**< Decimal number is too large. */ + BadInterval /**< Error in {min,max} interval */ + MaxLtMin /**< In {min,max}, max is less than min. */ + InvalidBackRef /**< Back-reference to a non-existent capture group. */ + InvalidFlag /**< Invalid value for match mode flags. */ + LookBehindLimit /**< Look-Behind pattern matches must have a bounded maximum length. */ + MissingCloseBracket /**< Missing closing bracket on a bracket expression. */ + InvalidRange /**< In a character range [x-y], x is greater than y. */ + PatternTooBig /**< Pattern exceeds limits on size or complexity. @stable ICU 55 */ + InvalidCaptureGroupName /**< Invalid capture group name. @stable ICU 55 */ +) + +type MatchErrorCode int32 + +const ( + StackOverflow MatchErrorCode = iota /**< Regular expression backtrack stack overflow. */ + TimeOut /**< Maximum allowed match time exceeded */ + InternalMatchError /**< Internal error (bug) was detected. */ +) diff --git a/go/vt/vtgr/controller/error.go b/go/mysql/icuregex/errors/error.go similarity index 55% rename from go/vt/vtgr/controller/error.go rename to go/mysql/icuregex/errors/error.go index 5613c802524..f03a5157acf 100644 --- a/go/vt/vtgr/controller/error.go +++ b/go/mysql/icuregex/errors/error.go @@ -1,5 +1,10 @@ /* -Copyright 2021 The Vitess Authors. +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +19,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controller +package errors import "errors" -var ( - errMissingPrimaryTablet = errors.New("no primary tablet available") - errMissingGroup = errors.New("no mysql group") - errForceAbortBootstrap = errors.New("force abort bootstrap") -) +var ErrIllegalArgument = errors.New("illegal argument") +var ErrUnsupported = errors.New("unsupported") diff --git a/go/mysql/icuregex/icu_test.go b/go/mysql/icuregex/icu_test.go new file mode 100644 index 00000000000..9e9be505df7 --- /dev/null +++ b/go/mysql/icuregex/icu_test.go @@ -0,0 +1,415 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex_test + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/icuregex" + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" +) + +var ErrSkip = errors.New("ignored test") + +type Matcher int8 + +const ( + FuncFind Matcher = iota + FuncMatches + FuncLookingAt +) + +type Expectation int8 + +const ( + Unknown Expectation = iota + Expected + NotExpected +) + +type TestPattern struct { + Line string + Lineno int + + Pattern string + Flags icuregex.RegexpFlag + Options struct { + MatchFunc Matcher + FindCount int + MatchOnly bool + MustError bool + Dump bool + HitEnd Expectation + RequireEnd Expectation + } + Input string + Groups []TestGroup +} + +type TestGroup struct { + Start, End int +} + +var parsePattern = regexp.MustCompile(`<(/?)(r|[0-9]+)>`) + +func (tp *TestPattern) parseFlags(line string) (string, error) { + for len(line) > 0 { + switch line[0] { + case '"', '\'', '/': + return line, nil + case ' ', '\t': + case 'i': + tp.Flags |= icuregex.CaseInsensitive + case 'x': + tp.Flags |= icuregex.Comments + case 's': + tp.Flags |= icuregex.DotAll + case 'm': + tp.Flags |= icuregex.Multiline + case 'e': + tp.Flags |= icuregex.ErrorOnUnknownEscapes + case 'D': + tp.Flags |= icuregex.UnixLines + case 'Q': + tp.Flags |= icuregex.Literal + case '2', '3', '4', '5', '6', '7', '8', '9': + tp.Options.FindCount = int(line[0] - '0') + case 'G': + tp.Options.MatchOnly = true + case 'E': + tp.Options.MustError = true + case 'd': + tp.Options.Dump = true + case 'L': + tp.Options.MatchFunc = FuncLookingAt + case 'M': + tp.Options.MatchFunc = FuncMatches + case 'v': + tp.Options.MustError = !icuregex.BreakIteration + case 'a', 'b': + return "", ErrSkip + case 'z': + tp.Options.HitEnd = Expected + case 'Z': + tp.Options.HitEnd = NotExpected + case 'y': + tp.Options.RequireEnd = Expected + case 'Y': + tp.Options.RequireEnd = NotExpected + default: + return "", fmt.Errorf("unexpected modifier '%c'", line[0]) + } + line = line[1:] + } + return "", io.ErrUnexpectedEOF +} + +func (tp *TestPattern) parseMatch(orig string) error { + input, ok := pattern.Unescape(orig) + if !ok { + return fmt.Errorf("failed to unquote input: %s", orig) + } + + var detagged []rune + var last int + + m := parsePattern.FindAllStringSubmatchIndex(input, -1) + for _, g := range m { + detagged = append(detagged, []rune(input[last:g[0]])...) + last = g[1] + + closing := input[g[2]:g[3]] == "/" + groupNum := input[g[4]:g[5]] + if groupNum == "r" { + return ErrSkip + } + num, err := strconv.Atoi(groupNum) + if err != nil { + return fmt.Errorf("bad group number %q: %w", groupNum, err) + } + + if num >= len(tp.Groups) { + grp := make([]TestGroup, num+1) + for i := range grp { + grp[i].Start = -1 + grp[i].End = -1 + } + copy(grp, tp.Groups) + tp.Groups = grp + } + + if closing { + tp.Groups[num].End = len(detagged) + } else { + tp.Groups[num].Start = len(detagged) + } + } + + detagged = append(detagged, []rune(input[last:])...) + tp.Input = string(detagged) + return nil +} + +func ParseTestFile(t testing.TB, filename string) []TestPattern { + f, err := os.Open(filename) + require.NoError(t, err) + + defer f.Close() + scanner := bufio.NewScanner(f) + var lineno int + var patterns []TestPattern + + errFunc := func(err error) { + if err == ErrSkip { + return + } + t.Errorf("Parse error: %v\n%03d: %s", err, lineno, scanner.Text()) + } + + for scanner.Scan() { + lineno++ + line := scanner.Text() + line = strings.TrimSpace(line) + + if len(line) == 0 || line[0] == '#' { + continue + } + + var tp TestPattern + tp.Line = line + tp.Lineno = lineno + + idx := strings.IndexByte(line[1:], line[0]) + + tp.Pattern = line[1 : idx+1] + line, err = tp.parseFlags(line[idx+2:]) + if err != nil { + errFunc(err) + continue + } + + idx = strings.IndexByte(line[1:], line[0]) + err = tp.parseMatch(line[1 : idx+1]) + if err != nil { + errFunc(err) + continue + } + + patterns = append(patterns, tp) + } + + err = scanner.Err() + require.NoError(t, err) + return patterns +} + +func (tp *TestPattern) fail(t testing.TB, msg string, args ...any) bool { + t.Helper() + msg = fmt.Sprintf(msg, args...) + t.Errorf("%s (in line %d)\nregexp: %s\ninput: %q\noriginal: %s", msg, tp.Lineno, tp.Pattern, tp.Input, tp.Line) + return false +} + +func (tp *TestPattern) Test(t testing.TB) bool { + re, err := func() (re *icuregex.Pattern, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("PANIC: %v", r) + } + }() + re, err = icuregex.CompileString(tp.Pattern, tp.Flags) + return + }() + if err != nil { + if tp.Options.MustError { + return true + } + + return tp.fail(t, "unexpected parser failure: %v", err) + } + if tp.Options.MustError { + return tp.fail(t, "parse failure expected") + } + + matcher := re.Match(tp.Input) + var isMatch bool + var findCount = tp.Options.FindCount + if findCount == 0 { + findCount = 1 + } + + for i := 0; i < findCount; i++ { + isMatch, err = func() (bool, error) { + defer func() { + if r := recover(); r != nil { + tp.fail(t, "unexpected match failure: %v", r) + } + }() + switch tp.Options.MatchFunc { + case FuncMatches: + return matcher.Matches() + case FuncLookingAt: + return matcher.LookingAt() + case FuncFind: + return matcher.Find() + default: + panic("invalid MatchFunc") + } + }() + } + + require.NoError(t, err) + + if !isMatch && len(tp.Groups) > 0 { + return tp.fail(t, "Match expected, but none found.") + } + if isMatch && len(tp.Groups) == 0 { + return tp.fail(t, "No match expected, but found one at position %d", matcher.Start()) + } + if tp.Options.MatchOnly { + return true + } + + for i := 0; i < matcher.GroupCount(); i++ { + expectedStart := -1 + expectedEnd := -1 + + if i < len(tp.Groups) { + expectedStart = tp.Groups[i].Start + expectedEnd = tp.Groups[i].End + } + if gotStart := matcher.StartForGroup(i); gotStart != expectedStart { + return tp.fail(t, "Incorrect start position for group %d. Expected %d, got %d", i, expectedStart, gotStart) + } + if gotEnd := matcher.EndForGroup(i); gotEnd != expectedEnd { + return tp.fail(t, "Incorrect end position for group %d. Expected %d, got %d", i, expectedEnd, gotEnd) + } + } + + if matcher.GroupCount()+1 < len(tp.Groups) { + return tp.fail(t, "Expected %d capture groups, found %d", len(tp.Groups)-1, matcher.GroupCount()) + } + + if tp.Options.HitEnd == Expected && !matcher.HitEnd() { + return tp.fail(t, "HitEnd() returned false. Expected true") + } + if tp.Options.HitEnd == NotExpected && matcher.HitEnd() { + return tp.fail(t, "HitEnd() returned true. Expected false") + } + + if tp.Options.RequireEnd == Expected && !matcher.RequireEnd() { + return tp.fail(t, "RequireEnd() returned false. Expected true") + } + if tp.Options.RequireEnd == NotExpected && matcher.RequireEnd() { + return tp.fail(t, "RequireEnd() returned true. Expected false") + } + + return true +} + +func TestICU(t *testing.T) { + pats := ParseTestFile(t, "testdata/regextst.txt") + + var valid int + + for _, p := range pats { + if p.Test(t) { + valid++ + } + } + + t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats))) +} + +func TestICUExtended(t *testing.T) { + // This tests additional cases that aren't covered in the + // copied ICU test suite. + pats := ParseTestFile(t, "testdata/regextst_extended.txt") + + var valid int + + for _, p := range pats { + if p.Test(t) { + valid++ + } + } + + t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats))) +} + +func TestCornerCases(t *testing.T) { + var cases = []struct { + Pattern string + Input string + Flags icuregex.RegexpFlag + Match bool + }{ + {`xyz$`, "xyz\n", 0, true}, + {`a*+`, "abbxx", 0, true}, + {`(ABC){1,2}+ABC`, "ABCABCABC", 0, true}, + {`(ABC){2,3}+ABC`, "ABCABCABC", 0, false}, + {`(abc)*+a`, "abcabcabc", 0, false}, + {`(abc)*+a`, "abcabcab", 0, true}, + {`a\N{LATIN SMALL LETTER B}c`, "abc", 0, true}, + {`a.b`, "a\rb", icuregex.UnixLines, true}, + {`a.b`, "a\rb", 0, false}, + {`(?d)abc$`, "abc\r", 0, false}, + {`[ \b]`, "b", 0, true}, + {`[abcd-\N{LATIN SMALL LETTER G}]+`, "xyz-abcdefghij-", 0, true}, + {`[[abcd]&&[ac]]+`, "bacacd", 0, true}, + } + + for _, tc := range cases { + t.Run(tc.Pattern, func(t *testing.T) { + _, err := icuregex.CompileString(tc.Pattern, tc.Flags) + require.NoError(t, err) + }) + } +} + +func TestOne(t *testing.T) { + const Pattern = `\p{CaseIgnorable}` + const Input = "foo.bar" + const Flags = 0 + + re, err := icuregex.CompileString(Pattern, Flags) + require.NoError(t, err) + + re.Dump(os.Stderr) + + m := icuregex.NewMatcher(re) + m.Dumper(os.Stderr) + m.ResetString(Input) + found, err := m.Find() + require.NoError(t, err) + t.Logf("match = %v", found) +} diff --git a/go/mysql/icuregex/internal/bytestrie/bytes_trie.go b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go new file mode 100644 index 00000000000..aff80dc3e69 --- /dev/null +++ b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go @@ -0,0 +1,354 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytestrie + +type BytesTrie struct { + pos []byte + original []byte + remainingMatchLength int32 +} + +func New(pos []byte) BytesTrie { + return BytesTrie{pos: pos, original: pos, remainingMatchLength: -1} +} + +type result int32 + +const ( /** + * The input unit(s) did not continue a matching string. + * Once current()/next() return NO_MATCH, + * all further calls to current()/next() will also return NO_MATCH, + * until the trie is reset to its original state or to a saved state. + * @stable ICU 4.8 + */ + noMatch result = iota + /** + * The input unit(s) continued a matching string + * but there is no value for the string so far. + * (It is a prefix of a longer string.) + * @stable ICU 4.8 + */ + noValue + /** + * The input unit(s) continued a matching string + * and there is a value for the string so far. + * This value will be returned by getValue(). + * No further input byte/unit can continue a matching string. + * @stable ICU 4.8 + */ + finalValue + /** + * The input unit(s) continued a matching string + * and there is a value for the string so far. + * This value will be returned by getValue(). + * Another input byte/unit can continue a matching string. + * @stable ICU 4.8 + */ + intermediateValue +) + +const ( + maxBranchLinearSubNodeLength = 5 + + // 10..1f: Linear-match node, match 1..16 bytes and continue reading the next node. + minLinearMatch = 0x10 + maxLinearMatchLength = 0x10 + + // 20..ff: Variable-length value node. + // If odd, the value is final. (Otherwise, intermediate value or jump delta.) + // Then shift-right by 1 bit. + // The remaining lead byte value indicates the number of following bytes (0..4) + // and contains the value's top bits. + minValueLead = minLinearMatch + maxLinearMatchLength // 0x20 + // It is a final value if bit 0 is set. + valueIsFinal = 1 + + // Compact value: After testing bit 0, shift right by 1 and then use the following thresholds. + minOneByteValueLead = minValueLead / 2 // 0x10 + maxOneByteValue = 0x40 // At least 6 bits in the first byte. + + minTwoByteValueLead = minOneByteValueLead + maxOneByteValue + 1 // 0x51 + maxTwoByteValue = 0x1aff + minThreeByteValueLead = minTwoByteValueLead + (maxTwoByteValue >> 8) + 1 // 0x6c + fourByteValueLead = 0x7e + + // Compact delta integers. + maxOneByteDelta = 0xbf + minTwoByteDeltaLead = maxOneByteDelta + 1 // 0xc0 + minThreeByteDeltaLead = 0xf0 + fourByteDeltaLead = 0xfe +) + +func (bt *BytesTrie) ContainsName(name string) bool { + result := noValue + for _, c := range []byte(name) { + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c == 0x2d || c == 0x5f || c == 0x20 || (0x09 <= c && c <= 0x0d) { + continue + } + if result&1 == 0 { + return false + } + result = bt.next(int32(c)) + } + return result >= finalValue +} + +func (bt *BytesTrie) next(inByte int32) result { + pos := bt.pos + if pos == nil { + return noMatch + } + if inByte < 0 { + inByte += 0x100 + } + length := bt.remainingMatchLength // Actual remaining match length minus 1. + if length >= 0 { + match := inByte == int32(pos[0]) + pos = pos[1:] + // Remaining part of a linear-match node. + if match { + length = length - 1 + bt.remainingMatchLength = length + bt.pos = pos + if length < 0 { + node := int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + } + return noValue + } + bt.stop() + return noMatch + } + return bt.nextImpl(pos, inByte) +} + +func (bt *BytesTrie) nextImpl(pos []byte, inByte int32) result { + for { + node := int32(pos[0]) + pos = pos[1:] + if node < minLinearMatch { + return bt.branchNext(pos, node, inByte) + } else if node < minValueLead { + // Match the first of length+1 bytes. + length := node - minLinearMatch // Actual match length minus 1. + match := inByte == int32(pos[0]) + pos = pos[1:] + if match { + length = length - 1 + bt.remainingMatchLength = length + bt.pos = pos + if length < 0 { + node = int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + } + return noValue + } + // No match. + break + } else if (node & valueIsFinal) != 0 { + // No further matching bytes. + break + } else { + // Skip intermediate value. + pos = bt.skipValue2(pos, node) + // The next node must not also be a value node. + } + } + bt.stop() + return noMatch +} + +func (bt *BytesTrie) stop() { + bt.pos = nil +} + +func (bt *BytesTrie) valueResult(node int32) result { + return intermediateValue - result(node&valueIsFinal) +} + +func (bt *BytesTrie) branchNext(pos []byte, length int32, inByte int32) result { + // Branch according to the current unit. + if length == 0 { + length = int32(pos[0]) + pos = pos[1:] + } + length++ + // The length of the branch is the number of units to select from. + // The data structure encodes a binary search. + for length > maxBranchLinearSubNodeLength { + p := int32(pos[0]) + pos = pos[1:] + if inByte < p { + length >>= 1 + pos = bt.jumpByDelta(pos) + } else { + length = length - (length >> 1) + pos = bt.skipDelta(pos) + } + } + // Drop down to linear search for the last few bytes. + // length>=2 because the loop body above sees length>kMaxBranchLinearSubNodeLength>=3 + // and divides length by 2. + for { + p := int32(pos[0]) + pos = pos[1:] + if inByte == p { + var result result + node := int32(pos[0]) + if (node & valueIsFinal) != 0 { + // Leave the final value for getValue() to read. + result = finalValue + } else { + // Use the non-final value as the jump delta. + pos = pos[1:] + // int32_t delta=readValue(pos, node>>1); + node >>= 1 + var delta int32 + if node < minTwoByteValueLead { + delta = node - minOneByteValueLead + } else if node < minThreeByteValueLead { + delta = ((node - minTwoByteValueLead) << 8) | int32(pos[0]) + pos = pos[1:] + } else if node < fourByteValueLead { + delta = ((node - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + pos = pos[2:] + } else if node == fourByteValueLead { + delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + pos = pos[3:] + } else { + delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + pos = pos[4:] + } + // end readValue() + pos = pos[delta:] + node = int32(pos[0]) + if node >= minValueLead { + result = bt.valueResult(node) + } else { + result = noValue + } + } + bt.pos = pos + return result + } + length-- + pos = bt.skipValue1(pos) + if length <= 1 { + break + } + } + p := int32(pos[0]) + pos = pos[1:] + if inByte == p { + bt.pos = pos + node := int32(pos[0]) + if node >= minValueLead { + return bt.valueResult(node) + } + return noValue + } + bt.stop() + return noMatch +} + +func (bt *BytesTrie) skipValue1(pos []byte) []byte { + leadByte := int32(pos[0]) + return bt.skipValue2(pos[1:], leadByte) +} + +func (bt *BytesTrie) skipValue2(pos []byte, leadByte int32) []byte { + if leadByte >= (minTwoByteValueLead << 1) { + if leadByte < (minThreeByteValueLead << 1) { + pos = pos[1:] + } else if leadByte < (fourByteValueLead << 1) { + pos = pos[2:] + } else { + pos = pos[3+((leadByte>>1)&1):] + } + } + return pos +} + +func (bt *BytesTrie) skipDelta(pos []byte) []byte { + delta := int32(pos[0]) + pos = pos[1:] + if delta >= minTwoByteDeltaLead { + if delta < minThreeByteDeltaLead { + pos = pos[1:] + } else if delta < fourByteDeltaLead { + pos = pos[2:] + } else { + pos = pos[3+(delta&1):] + } + } + return pos +} + +func (bt *BytesTrie) jumpByDelta(pos []byte) []byte { + delta := int32(pos[0]) + pos = pos[1:] + if delta < minTwoByteDeltaLead { + // nothing to do + } else if delta < minThreeByteDeltaLead { + delta = ((delta - minTwoByteDeltaLead) << 8) | int32(pos[0]) + pos = pos[1:] + } else if delta < fourByteDeltaLead { + delta = ((delta - minThreeByteDeltaLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + pos = pos[2:] + } else if delta == fourByteDeltaLead { + delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + pos = pos[3:] + } else { + delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + pos = pos[4:] + } + return pos[delta:] +} + +func (bt *BytesTrie) GetValue() int32 { + pos := bt.pos + leadByte := int32(pos[0]) + return bt.readValue(pos[1:], leadByte>>1) +} + +func (bt *BytesTrie) readValue(pos []byte, leadByte int32) int32 { + var value int32 + if leadByte < minTwoByteValueLead { + value = leadByte - minOneByteValueLead + } else if leadByte < minThreeByteValueLead { + value = ((leadByte - minTwoByteValueLead) << 8) | int32(pos[0]) + } else if leadByte < fourByteValueLead { + value = ((leadByte - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1]) + } else if leadByte == fourByteValueLead { + value = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2]) + } else { + value = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3]) + } + return value +} diff --git a/go/mysql/icuregex/internal/icudata/README.md b/go/mysql/icuregex/internal/icudata/README.md new file mode 100644 index 00000000000..070633b555e --- /dev/null +++ b/go/mysql/icuregex/internal/icudata/README.md @@ -0,0 +1,46 @@ +# ICU data files + +These are files copied from the ICU project that contain various types +of data, like character properties. + +## How to update + +Not all data files are immediately available in the source code, but +need to be built first. This applies to the character / word break +tables. + +### Copy from source data + +The `icu4c/source/data/in` directory in the source distribution contains +the following ICU data files we use: + +``` +pnames.icu +ubidi.icu +ucase.icu +unames.icu +ulayout.icu +uprops.icu +nfc.nrm +nfkc.nrm +nfkc_cf.nrm +``` + +The character and word break table need to be compiled before they can +be copied. + +In `icu4c/source` run: + +```bash +./configure --with-data-packaging=files +make +``` + +This will compile the character and word break data into a binary file +that we can use. Once built, the following files we use are available in +`icu4c/source/data/out/build/icudtl/brkitr`: + +``` +char.brk +word.brk +``` diff --git a/go/mysql/icuregex/internal/icudata/char.brk b/go/mysql/icuregex/internal/icudata/char.brk new file mode 100644 index 00000000000..a243ae6580a Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/char.brk differ diff --git a/go/mysql/icuregex/internal/icudata/embed.go b/go/mysql/icuregex/internal/icudata/embed.go new file mode 100644 index 00000000000..12dbd5d0322 --- /dev/null +++ b/go/mysql/icuregex/internal/icudata/embed.go @@ -0,0 +1,101 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icudata + +import _ "embed" + +// PNames is the list of property names. It is used +// for example by usage of Unicode propery name aliases +// in regular expressions. +// +//go:embed pnames.icu +var PNames []byte + +// UBidi is the list of bidi properties. These are used +// by Bidi class aliases in regular expressions. +// +//go:embed ubidi.icu +var UBidi []byte + +// UCase is the list of case properties. These are used +// for case folding internally for case insensitive matching. +// +//go:embed ucase.icu +var UCase []byte + +// UEmoji is the list of Emoji properties. +// +//go:embed uemoji.icu +var UEmoji []byte + +// ULayout is used for property checks agains the InPC, InSC +// and VO properties. +// +//go:embed ulayout.icu +var ULayout []byte + +// UNames is used for named character references in regular +// expressions. +// +//go:embed unames.icu +var UNames []byte + +// UProps is used for all the character properties. These +// are used to retrieve properties of characters for character +// classes, like letters, whitespace, digits etc. +// +//go:embed uprops.icu +var UProps []byte + +// Nfc is the table for character normalization where canonical +// decomposition is done followed by canonical composition. +// This is used for property checks of characters about composition. +// +//go:embed nfc.nrm +var Nfc []byte + +// Nfkc is the table for character normalization where compatibility +// decomposition is done followed by canonical composition. +// This is used for property checks of characters about composition. +// +//go:embed nfkc.nrm +var Nfkc []byte + +// NfkcCf is the table for character normalization where compatibility +// decomposition is done followed by canonical composition with +// case folding. +// This is used for property checks of characters about composition. +// +//Unused: go:embed nfkc_cf.nrm +//var NfkcCf []byte + +// BrkChar is used for matching against character break +// characters in regular expressions. +// +//Unused: go:embed char.brk +//var BrkChar []byte + +// BrkWord is used for matching against word break +// characters in regular expressions. +// +//Unused: go:embed word.brk +///var BrkWord []byte diff --git a/go/mysql/icuregex/internal/icudata/nfc.nrm b/go/mysql/icuregex/internal/icudata/nfc.nrm new file mode 100644 index 00000000000..2b0e972807e Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfc.nrm differ diff --git a/go/mysql/icuregex/internal/icudata/nfkc.nrm b/go/mysql/icuregex/internal/icudata/nfkc.nrm new file mode 100644 index 00000000000..deffa3daa81 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfkc.nrm differ diff --git a/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm b/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm new file mode 100644 index 00000000000..3f8d756a0f4 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm differ diff --git a/go/mysql/icuregex/internal/icudata/pnames.icu b/go/mysql/icuregex/internal/icudata/pnames.icu new file mode 100644 index 00000000000..c960dc00b49 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/pnames.icu differ diff --git a/go/mysql/icuregex/internal/icudata/ubidi.icu b/go/mysql/icuregex/internal/icudata/ubidi.icu new file mode 100644 index 00000000000..cfde07406cc Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ubidi.icu differ diff --git a/go/mysql/icuregex/internal/icudata/ucase.icu b/go/mysql/icuregex/internal/icudata/ucase.icu new file mode 100644 index 00000000000..670b0827d55 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ucase.icu differ diff --git a/go/mysql/icuregex/internal/icudata/uemoji.icu b/go/mysql/icuregex/internal/icudata/uemoji.icu new file mode 100644 index 00000000000..11fdf50ff18 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/uemoji.icu differ diff --git a/go/mysql/icuregex/internal/icudata/ulayout.icu b/go/mysql/icuregex/internal/icudata/ulayout.icu new file mode 100644 index 00000000000..ca6d0013c08 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ulayout.icu differ diff --git a/go/mysql/icuregex/internal/icudata/unames.icu b/go/mysql/icuregex/internal/icudata/unames.icu new file mode 100644 index 00000000000..e271e78619f Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/unames.icu differ diff --git a/go/mysql/icuregex/internal/icudata/uprops.icu b/go/mysql/icuregex/internal/icudata/uprops.icu new file mode 100644 index 00000000000..0cdd8dea636 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/uprops.icu differ diff --git a/go/mysql/icuregex/internal/icudata/word.brk b/go/mysql/icuregex/internal/icudata/word.brk new file mode 100644 index 00000000000..80460c60128 Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/word.brk differ diff --git a/go/mysql/icuregex/internal/normalizer/constants.go b/go/mysql/icuregex/internal/normalizer/constants.go new file mode 100644 index 00000000000..3c2de588952 --- /dev/null +++ b/go/mysql/icuregex/internal/normalizer/constants.go @@ -0,0 +1,122 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package normalizer + +const ( + // Fixed norm16 values. + minYesYesWithCC = 0xfe02 + jamoVt = 0xfe00 + minNormalMaybeYes = 0xfc00 + jamoL = 2 // offset=1 hasCompBoundaryAfter=false + inert = 1 // offset=0 hasCompBoundaryAfter=true + + // norm16 bit 0 is comp-boundary-after. + hasCompBoundaryAfter = 1 + offsetShift = 1 + + // For algorithmic one-way mappings, norm16 bits 2..1 indicate the + // tccc (0, 1, >1) for quick FCC boundary-after tests. + deltaTccc0 = 0 + deltaTccc1 = 2 + deltaTcccGt1 = 4 + deltaTcccMask = 6 + deltaShift = 3 + + maxDelta = 0x40 +) + +const ( + jamoLBase rune = 0x1100 /* "lead" jamo */ + jamoLEnd rune = 0x1112 + jamoVBase rune = 0x1161 /* "vowel" jamo */ + jamoVEnd rune = 0x1175 + jamoTBase rune = 0x11a7 /* "trail" jamo */ + jamoTEnd rune = 0x11c2 + + hangulBase rune = 0xac00 + hangulEnd rune = 0xd7a3 + + jamoLCount rune = 19 + jamoVCount rune = 21 + jamoTCount rune = 28 + + hangulCount = jamoLCount * jamoVCount * jamoTCount + hangulLimit = hangulBase + hangulCount +) + +const ( + mappingHasCccLcccWord = 0x80 + mappingHasRawMapping = 0x40 + // unused bit 0x20, + mappingLengthMask = 0x1f +) + +/** + * Constants for normalization modes. + * @deprecated ICU 56 Use unorm2.h instead. + */ +type Mode int32 + +const ( + /** No decomposition/composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNone Mode = 1 + /** Canonical decomposition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfd Mode = 2 + /** Compatibility decomposition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfkd Mode = 3 + /** Canonical decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfc Mode = 4 + /** Default normalization. @deprecated ICU 56 Use unorm2.h instead. */ + NormDefault Mode = NormNfc + /** Compatibility decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */ + NormNfkc Mode = 5 + /** "Fast C or D" form. @deprecated ICU 56 Use unorm2.h instead. */ + NormFcd Mode = 6 +) + +/** + * Result values for normalization quick check functions. + * For details see http://www.unicode.org/reports/tr15/#Detecting_Normalization_Forms + * @stable ICU 2.0 + */ +type CheckResult int + +const ( + /** + * The input string is not in the normalization form. + * @stable ICU 2.0 + */ + No CheckResult = iota + /** + * The input string is in the normalization form. + * @stable ICU 2.0 + */ + Yes + /** + * The input string may or may not be in the normalization form. + * This value is only returned for composition forms like NFC and FCC, + * when a backward-combining character is found for which the surrounding text + * would have to be analyzed further. + * @stable ICU 2.0 + */ + Maybe +) diff --git a/go/mysql/icuregex/internal/normalizer/normalizer.go b/go/mysql/icuregex/internal/normalizer/normalizer.go new file mode 100644 index 00000000000..c13a4878deb --- /dev/null +++ b/go/mysql/icuregex/internal/normalizer/normalizer.go @@ -0,0 +1,482 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package normalizer + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +type Normalizer struct { + minDecompNoCP rune + minCompNoMaybeCP rune + minLcccCP rune + + // Norm16 value thresholds for quick check combinations and types of extra data. + minYesNo uint16 + minYesNoMappingsOnly uint16 + minNoNo uint16 + minNoNoCompBoundaryBefore uint16 + minNoNoCompNoMaybeCC uint16 + minNoNoEmpty uint16 + limitNoNo uint16 + centerNoNoDelta uint16 + minMaybeYes uint16 + + normTrie *utrie.UcpTrie + + maybeYesCompositions []uint16 + extraData []uint16 // mappings and/or compositions for yesYes, yesNo & noNo characters + smallFCD []uint8 // [0x100] one bit per 32 BMP code points, set if any FCD!=0 +} + +var nfc *Normalizer +var nfkc *Normalizer + +var normalizerOnce sync.Once + +func loadNormalizer() { + normalizerOnce.Do(func() { + nfc = &Normalizer{} + if err := nfc.load(icudata.Nfc); err != nil { + panic(err) + } + + nfkc = &Normalizer{} + if err := nfkc.load(icudata.Nfkc); err != nil { + panic(err) + } + }) +} + +const ixNormTrieOffset = 0 +const ixExtraDataOffset = 1 +const ixSmallFcdOffset = 2 +const ixReserved3Offset = 3 +const ixTotalSize = 7 + +const ixMinDecompNoCp = 8 +const ixMinCompNoMaybeCp = 9 + +/** Mappings & compositions in [minYesNo..minYesNoMappingsOnly[. */ +const ixMinYesNo = 10 + +/** Mappings are comp-normalized. */ +const ixMinNoNo = 11 +const ixLimitNoNo = 12 +const ixMinMaybeYes = 13 + +/** Mappings only in [minYesNoMappingsOnly..minNoNo[. */ +const ixMinYesNoMappingsOnly = 14 + +/** Mappings are not comp-normalized but have a comp boundary before. */ +const ixMinNoNoCompBoundaryBefore = 15 + +/** Mappings do not have a comp boundary before. */ +const ixMinNoNoCompNoMaybeCc = 16 + +/** Mappings to the empty string. */ +const ixMinNoNoEmpty = 17 + +const ixMinLcccCp = 18 +const ixCount = 20 + +func (n *Normalizer) load(data []byte) error { + bytes := udata.NewBytes(data) + + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.Size >= 20 && + info.IsBigEndian == 0 && + info.CharsetFamily == 0 && + info.DataFormat[0] == 0x4e && /* dataFormat="unam" */ + info.DataFormat[1] == 0x72 && + info.DataFormat[2] == 0x6d && + info.DataFormat[3] == 0x32 && + info.FormatVersion[0] == 4 + }) + if err != nil { + return err + } + + indexesLength := int32(bytes.Uint32()) / 4 + if indexesLength <= ixMinLcccCp { + return errors.New("normalizer2 data: not enough indexes") + } + indexes := make([]int32, indexesLength) + indexes[0] = indexesLength * 4 + for i := int32(1); i < indexesLength; i++ { + indexes[i] = bytes.Int32() + } + + n.minDecompNoCP = indexes[ixMinDecompNoCp] + n.minCompNoMaybeCP = indexes[ixMinCompNoMaybeCp] + n.minLcccCP = indexes[ixMinLcccCp] + + n.minYesNo = uint16(indexes[ixMinYesNo]) + n.minYesNoMappingsOnly = uint16(indexes[ixMinYesNoMappingsOnly]) + n.minNoNo = uint16(indexes[ixMinNoNo]) + n.minNoNoCompBoundaryBefore = uint16(indexes[ixMinNoNoCompBoundaryBefore]) + n.minNoNoCompNoMaybeCC = uint16(indexes[ixMinNoNoCompNoMaybeCc]) + n.minNoNoEmpty = uint16(indexes[ixMinNoNoEmpty]) + n.limitNoNo = uint16(indexes[ixLimitNoNo]) + n.minMaybeYes = uint16(indexes[ixMinMaybeYes]) + + n.centerNoNoDelta = uint16(indexes[ixMinMaybeYes]>>deltaShift) - maxDelta - 1 + + offset := indexes[ixNormTrieOffset] + nextOffset := indexes[ixExtraDataOffset] + triePosition := bytes.Position() + + n.normTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + + trieLength := bytes.Position() - triePosition + if trieLength > nextOffset-offset { + return errors.New("normalizer2 data: not enough bytes for normTrie") + } + bytes.Skip((nextOffset - offset) - trieLength) // skip padding after trie bytes + + // Read the composition and mapping data. + offset = nextOffset + nextOffset = indexes[ixSmallFcdOffset] + numChars := (nextOffset - offset) / 2 + if numChars != 0 { + n.maybeYesCompositions = bytes.Uint16Slice(numChars) + n.extraData = n.maybeYesCompositions[((minNormalMaybeYes - n.minMaybeYes) >> offsetShift):] + } + + // smallFCD: new in formatVersion 2 + n.smallFCD = bytes.Uint8Slice(0x100) + return nil +} + +func Nfc() *Normalizer { + loadNormalizer() + return nfc +} + +func Nfkc() *Normalizer { + loadNormalizer() + return nfkc +} + +func (n *Normalizer) AddPropertyStarts(u *uset.UnicodeSet) { + var start, end rune + var value uint32 + for { + end, value = nfc.normTrie.GetRange(start, utrie.UcpMapRangeFixedLeadSurrogates, inert, nil) + if end < 0 { + break + } + u.AddRune(start) + if start != end && n.isAlgorithmicNoNo(uint16(value)) && (value&deltaTcccMask) > deltaTccc1 { + // Range of code points with same-norm16-value algorithmic decompositions. + // They might have different non-zero FCD16 values. + prevFCD16 := n.GetFCD16(start) + for { + start++ + if start > end { + break + } + fcd16 := n.GetFCD16(start) + if fcd16 != prevFCD16 { + u.AddRune(start) + prevFCD16 = fcd16 + } + } + } + start = end + 1 + } + + // add Hangul LV syllables and LV+1 because of skippables + for c := hangulBase; c < hangulLimit; c += jamoTCount { + u.AddRune(c) + u.AddRune(c + 1) + } + u.AddRune(hangulLimit) +} + +func (n *Normalizer) isAlgorithmicNoNo(norm16 uint16) bool { + return n.limitNoNo <= norm16 && norm16 < n.minMaybeYes +} + +func (n *Normalizer) GetFCD16(c rune) uint16 { + if c < n.minDecompNoCP { + return 0 + } else if c <= 0xffff { + if !n.singleLeadMightHaveNonZeroFCD16(c) { + return 0 + } + } + return n.getFCD16FromNormData(c) +} + +func (n *Normalizer) singleLeadMightHaveNonZeroFCD16(lead rune) bool { + // 0<=lead<=0xffff + bits := n.smallFCD[lead>>8] + if bits == 0 { + return false + } + return ((bits >> ((lead >> 5) & 7)) & 1) != 0 +} + +func (n *Normalizer) getFCD16FromNormData(c rune) uint16 { + norm16 := n.getNorm16(c) + if norm16 >= n.limitNoNo { + if norm16 >= minNormalMaybeYes { + // combining mark + norm16 = uint16(n.getCCFromNormalYesOrMaybe(norm16)) + return norm16 | (norm16 << 8) + } else if norm16 >= n.minMaybeYes { + return 0 + } else { // isDecompNoAlgorithmic(norm16) + deltaTrailCC := norm16 & deltaTcccMask + if deltaTrailCC <= deltaTccc1 { + return deltaTrailCC >> offsetShift + } + // Maps to an isCompYesAndZeroCC. + c = n.mapAlgorithmic(c, norm16) + norm16 = n.getRawNorm16(c) + } + } + + if norm16 <= n.minYesNo || n.isHangulLVT(norm16) { + // no decomposition or Hangul syllable, all zeros + return 0 + } + // c decomposes, get everything from the variable-length extra data + mapping := n.getMapping(norm16) + firstUnit := mapping[1] + if firstUnit&mappingHasCccLcccWord != 0 { + norm16 |= mapping[0] & 0xff00 + } + return norm16 +} + +func (n *Normalizer) getMapping(norm16 uint16) []uint16 { + return n.extraData[(norm16>>offsetShift)-1:] +} + +func (n *Normalizer) getNorm16(c rune) uint16 { + if utf16.IsLead(c) { + return inert + } + return n.getRawNorm16(c) +} + +func (n *Normalizer) getRawNorm16(c rune) uint16 { + return uint16(n.normTrie.Get(c)) +} + +func (n *Normalizer) getCCFromNormalYesOrMaybe(norm16 uint16) uint8 { + return uint8(norm16 >> offsetShift) +} + +func (n *Normalizer) mapAlgorithmic(c rune, norm16 uint16) rune { + return c + rune(norm16>>deltaShift) - rune(n.centerNoNoDelta) +} + +func (n *Normalizer) isHangulLV(norm16 uint16) bool { + return norm16 == n.minYesNo +} + +func (n *Normalizer) isHangulLVT(norm16 uint16) bool { + return norm16 == n.hangulLVT() +} + +func (n *Normalizer) hangulLVT() uint16 { + return n.minYesNoMappingsOnly | hasCompBoundaryAfter +} + +func (n *Normalizer) getComposeQuickCheck(c rune) CheckResult { + return n.getCompQuickCheck(n.getNorm16(c)) +} + +func (n *Normalizer) getDecomposeQuickCheck(c rune) CheckResult { + if n.isDecompYes(n.getNorm16(c)) { + return Yes + } + return No +} + +func QuickCheck(c rune, mode Mode) CheckResult { + if mode <= NormNone || NormFcd <= mode { + return Yes + } + switch mode { + case NormNfc: + return Nfc().getComposeQuickCheck(c) + case NormNfd: + return Nfc().getDecomposeQuickCheck(c) + case NormNfkc: + return Nfkc().getComposeQuickCheck(c) + case NormNfkd: + return Nfkc().getDecomposeQuickCheck(c) + default: + return Maybe + } +} + +func IsInert(c rune, mode Mode) bool { + switch mode { + case NormNfc: + return Nfc().isCompInert(c) + case NormNfd: + return Nfc().isDecompInert(c) + case NormNfkc: + return Nfkc().isCompInert(c) + case NormNfkd: + return Nfkc().isDecompInert(c) + default: + return true + } +} + +func (n *Normalizer) isDecompYes(norm16 uint16) bool { + return norm16 < n.minYesNo || n.minMaybeYes <= norm16 +} + +func (n *Normalizer) getCompQuickCheck(norm16 uint16) CheckResult { + if norm16 < n.minNoNo || minYesYesWithCC <= norm16 { + return Yes + } else if n.minMaybeYes <= norm16 { + return Maybe + } else { + return No + } +} + +func (n *Normalizer) isMaybeOrNonZeroCC(norm16 uint16) bool { + return norm16 >= n.minMaybeYes +} + +func (n *Normalizer) isDecompNoAlgorithmic(norm16 uint16) bool { + return norm16 >= n.limitNoNo +} + +func (n *Normalizer) IsCompNo(c rune) bool { + norm16 := n.getNorm16(c) + return n.minNoNo <= norm16 && norm16 < n.minMaybeYes +} + +func (n *Normalizer) Decompose(c rune) []rune { + norm16 := n.getNorm16(c) + if c < n.minDecompNoCP || n.isMaybeOrNonZeroCC(norm16) { + // c does not decompose + return nil + } + var decomp []rune + + if n.isDecompNoAlgorithmic(norm16) { + // Maps to an isCompYesAndZeroCC. + c = n.mapAlgorithmic(c, norm16) + decomp = append(decomp, c) + // The mapping might decompose further. + norm16 = n.getRawNorm16(c) + } + if norm16 < n.minYesNo { + return decomp + } else if n.isHangulLV(norm16) || n.isHangulLVT(norm16) { + // Hangul syllable: decompose algorithmically + parts := hangulDecompose(c) + for len(parts) > 0 { + c = rune(parts[0]) + decomp = append(decomp, c) + parts = parts[1:] + } + return decomp + } + // c decomposes, get everything from the variable-length extra data + mapping := n.getMapping(norm16) + length := mapping[1] & mappingLengthMask + mapping = mapping[2 : 2+length] + + for len(mapping) > 0 { + c, mapping = utf16.NextUnsafe(mapping) + decomp = append(decomp, c) + } + + return decomp +} + +func hangulDecompose(c rune) []uint16 { + c -= hangulBase + c2 := c % jamoTCount + c /= jamoTCount + var buffer []uint16 + buffer = append(buffer, uint16(jamoLBase+c/jamoVCount)) + buffer = append(buffer, uint16(jamoVBase+c%jamoVCount)) + if c2 != 0 { + buffer = append(buffer, uint16(jamoTBase+c2)) + } + return buffer +} + +func (n *Normalizer) isCompInert(c rune) bool { + norm16 := n.getNorm16(c) + return n.isCompYesAndZeroCC(norm16) && (norm16&hasCompBoundaryAfter) != 0 +} + +func (n *Normalizer) isDecompInert(c rune) bool { + return n.isDecompYesAndZeroCC(n.getNorm16(c)) +} + +func (n *Normalizer) isCompYesAndZeroCC(norm16 uint16) bool { + return norm16 < n.minNoNo +} + +func (n *Normalizer) isDecompYesAndZeroCC(norm16 uint16) bool { + return norm16 < n.minYesNo || + norm16 == jamoVt || + (n.minMaybeYes <= norm16 && norm16 <= minNormalMaybeYes) +} + +func (n *Normalizer) CombiningClass(c rune) uint8 { + return n.getCC(n.getNorm16(c)) +} + +func (n *Normalizer) getCC(norm16 uint16) uint8 { + if norm16 >= minNormalMaybeYes { + return n.getCCFromNormalYesOrMaybe(norm16) + } + if norm16 < n.minNoNo || n.limitNoNo <= norm16 { + return 0 + } + return n.getCCFromNoNo(norm16) + +} + +func (n *Normalizer) getCCFromNoNo(norm16 uint16) uint8 { + mapping := n.getMapping(norm16) + if mapping[1]&mappingHasCccLcccWord != 0 { + return uint8(mapping[0]) + } + return 0 +} diff --git a/go/mysql/icuregex/internal/pattern/unescape.go b/go/mysql/icuregex/internal/pattern/unescape.go new file mode 100644 index 00000000000..e4a554ff612 --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/unescape.go @@ -0,0 +1,314 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "strings" + "unicode/utf8" + + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +/* Convert one octal digit to a numeric value 0..7, or -1 on failure */ +func _digit8(c rune) rune { + if c >= 0x0030 && c <= 0x0037 { + return (c - 0x0030) + } + return -1 +} + +/* Convert one hex digit to a numeric value 0..F, or -1 on failure */ +func _digit16(c rune) rune { + if c >= 0x0030 && c <= 0x0039 { + return (c - 0x0030) + } + if c >= 0x0041 && c <= 0x0046 { + return (c - (0x0041 - 10)) + } + if c >= 0x0061 && c <= 0x0066 { + return (c - (0x0061 - 10)) + } + return -1 +} + +var unscapeMap = []byte{ + /*" 0x22, 0x22 */ + /*' 0x27, 0x27 */ + /*? 0x3F, 0x3F */ + /*\ 0x5C, 0x5C */ + /*a*/ 0x61, 0x07, + /*b*/ 0x62, 0x08, + /*e*/ 0x65, 0x1b, + /*f*/ 0x66, 0x0c, + /*n*/ 0x6E, 0x0a, + /*r*/ 0x72, 0x0d, + /*t*/ 0x74, 0x09, + /*v*/ 0x76, 0x0b, +} + +func Unescape(str string) (string, bool) { + var idx int + if idx = strings.IndexByte(str, '\\'); idx < 0 { + return str, true + } + + var result strings.Builder + result.WriteString(str[:idx]) + str = str[idx:] + + for len(str) > 0 { + if str[0] == '\\' { + var r rune + r, str = UnescapeAt(str[1:]) + if r < 0 { + return "", false + } + result.WriteRune(r) + } else { + result.WriteByte(str[0]) + str = str[1:] + } + } + return result.String(), true +} + +func UnescapeAt(str string) (rune, string) { + c, w := utf8.DecodeRuneInString(str) + str = str[w:] + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + + var minDig, maxDig, n int + var braces bool + var bitsPerDigit = 4 + var result rune + + switch c { + case 'u': + minDig = 4 + maxDig = 4 + case 'U': + minDig = 8 + maxDig = 8 + case 'x': + minDig = 1 + if len(str) > 0 && str[0] == '{' { + str = str[1:] + braces = true + maxDig = 8 + } else { + maxDig = 2 + } + default: + if dig := _digit8(c); dig >= 0 { + minDig = 1 + maxDig = 4 + n = 1 + bitsPerDigit = 3 + result = dig + } + } + + if minDig != 0 { + for n < maxDig && len(str) > 0 { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && w == 1 { + return -1, str + } + + var dig rune + if bitsPerDigit == 3 { + dig = _digit8(c) + } else { + dig = _digit16(c) + } + if dig < 0 { + break + } + result = (result << bitsPerDigit) | dig + str = str[w:] + n++ + } + if n < minDig { + return -1, str + } + if braces { + if c != '}' { + return -1, str + } + str = str[1:] + } + if result < 0 || result > utf8.MaxRune { + return -1, str + } + if len(str) > 0 && utf16.IsLead(result) { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + if c == '\\' { + var str2 string + c, str2 = UnescapeAt(str[1:]) + if utf16.IsTrail(c) { + result = utf16.DecodeRune(result, c) + str = str2 + } + } + } + return result, str + } + + if c < utf8.RuneSelf { + for i := 0; i < len(unscapeMap); i += 2 { + if byte(c) == unscapeMap[i] { + return rune(unscapeMap[i+1]), str + } + if byte(c) < unscapeMap[i] { + break + } + } + } + + if c == 'c' && len(str) > 0 { + c, w = utf8.DecodeRuneInString(str) + if c == utf8.RuneError && (w == 0 || w == 1) { + return -1, str + } + return 0x1f & c, str[w:] + } + + return c, str +} + +func UnescapeAtRunes(str []rune) (rune, []rune) { + if len(str) == 0 { + return -1, str + } + + c := str[0] + str = str[1:] + if c == utf8.RuneError { + return -1, str + } + + var minDig, maxDig, n int + var braces bool + var bitsPerDigit = 4 + var result rune + + switch c { + case 'u': + minDig = 4 + maxDig = 4 + case 'U': + minDig = 8 + maxDig = 8 + case 'x': + minDig = 1 + if len(str) > 0 && str[0] == '{' { + str = str[1:] + braces = true + maxDig = 8 + } else { + maxDig = 2 + } + default: + if dig := _digit8(c); dig >= 0 { + minDig = 1 + maxDig = 4 + n = 1 + bitsPerDigit = 3 + result = dig + } + } + + if minDig != 0 { + for n < maxDig && len(str) > 0 { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + + var dig rune + if bitsPerDigit == 3 { + dig = _digit8(c) + } else { + dig = _digit16(c) + } + if dig < 0 { + break + } + result = (result << bitsPerDigit) | dig + str = str[1:] + n++ + } + if n < minDig { + return -1, str + } + if braces { + if c != '}' { + return -1, str + } + str = str[1:] + } + if result < 0 || result > utf8.MaxRune { + return -1, str + } + if len(str) > 0 && utf16.IsLead(result) { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + if c == '\\' { + var str2 []rune + c, str2 = UnescapeAtRunes(str[1:]) + if utf16.IsTrail(c) { + result = utf16.DecodeRune(result, c) + str = str2 + } + } + } + return result, str + } + + if c < utf8.RuneSelf { + for i := 0; i < len(unscapeMap); i += 2 { + if byte(c) == unscapeMap[i] { + return rune(unscapeMap[i+1]), str + } + if byte(c) < unscapeMap[i] { + break + } + } + } + + if c == 'c' && len(str) > 0 { + c = str[0] + if c == utf8.RuneError { + return -1, str + } + return 0x1f & c, str[1:] + } + + return c, str +} diff --git a/go/mysql/icuregex/internal/pattern/unescape_test.go b/go/mysql/icuregex/internal/pattern/unescape_test.go new file mode 100644 index 00000000000..0bb76c2bfdb --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/unescape_test.go @@ -0,0 +1,48 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnescapeAt(t *testing.T) { + r, str := UnescapeAt("ud800\\ud800\\udc00") + assert.Equal(t, rune(0xd800), r) + assert.Equal(t, "\\ud800\\udc00", str) + + r, str = UnescapeAt(str[1:]) + assert.Equal(t, rune(0x00010000), r) + assert.Equal(t, "", str) +} + +func TestUnescapeAtRunes(t *testing.T) { + r, str := UnescapeAtRunes([]rune("ud800\\ud800\\udc00")) + assert.Equal(t, rune(0xd800), r) + assert.Equal(t, []rune("\\ud800\\udc00"), str) + + r, str = UnescapeAtRunes(str[1:]) + assert.Equal(t, rune(0x00010000), r) + assert.Equal(t, []rune(""), str) +} diff --git a/go/mysql/icuregex/internal/pattern/utils.go b/go/mysql/icuregex/internal/pattern/utils.go new file mode 100644 index 00000000000..4dcf55e9f42 --- /dev/null +++ b/go/mysql/icuregex/internal/pattern/utils.go @@ -0,0 +1,111 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pattern + +import ( + "strings" + "unicode/utf8" +) + +var patternPropsLatin1 = [256]uint8{ + // WS: 9..D + 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // WS: 20 Syntax: 21..2F + 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + // Syntax: 3A..40 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3, + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: 5B..5E + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, + // Syntax: 60 + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: 7B..7E + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, + // WS: 85 + 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: A1..A7, A9, AB, AC, AE + 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 3, 0, + // Syntax: B0, B1, B6, BB, BF + 3, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: D7 + 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + // Syntax: F7 + 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, +} + +func IsWhitespace(c rune) bool { + if c < 0 { + return false + } else if c <= 0xff { + return (patternPropsLatin1[c]>>2)&1 != 0 + } else if 0x200e <= c && c <= 0x2029 { + return c <= 0x200f || 0x2028 <= c + } else { + return false + } +} + +func SkipWhitespace(str string) string { + for { + r, w := utf8.DecodeRuneInString(str) + if r == utf8.RuneError && (w == 0 || w == 1) { + return str[w:] + } + if !IsWhitespace(r) { + return str + } + str = str[w:] + } +} + +func IsUnprintable(c rune) bool { + return !(c >= 0x20 && c <= 0x7E) +} + +// "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" +var digits = [...]byte{ + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, +} + +func EscapeUnprintable(w *strings.Builder, c rune) { + w.WriteByte('\\') + if (c & ^0xFFFF) != 0 { + w.WriteByte('U') + w.WriteByte(digits[0xF&(c>>28)]) + w.WriteByte(digits[0xF&(c>>24)]) + w.WriteByte(digits[0xF&(c>>20)]) + w.WriteByte(digits[0xF&(c>>16)]) + } else { + w.WriteByte('u') + } + w.WriteByte(digits[0xF&(c>>12)]) + w.WriteByte(digits[0xF&(c>>8)]) + w.WriteByte(digits[0xF&(c>>4)]) + w.WriteByte(digits[0xF&c]) +} diff --git a/go/mysql/icuregex/internal/ubidi/loader.go b/go/mysql/icuregex/internal/ubidi/loader.go new file mode 100644 index 00000000000..e30ca402f81 --- /dev/null +++ b/go/mysql/icuregex/internal/ubidi/loader.go @@ -0,0 +1,125 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ubidi + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var ubidiOnce sync.Once +var ubidi struct { + indexes []int32 + trie *utrie.UTrie2 + mirrors []uint32 + jg []uint8 + jg2 []uint8 +} + +func indexes() []int32 { + loadUBidi() + return ubidi.indexes +} + +func trie() *utrie.UTrie2 { + loadUBidi() + return ubidi.trie +} + +func mirrors() []uint32 { + loadUBidi() + return ubidi.mirrors +} + +func jg() []uint8 { + loadUBidi() + return ubidi.jg +} + +func jg2() []uint8 { + loadUBidi() + return ubidi.jg2 +} + +func loadUBidi() { + ubidiOnce.Do(func() { + b := udata.NewBytes(icudata.UBidi) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x42 && + info.DataFormat[1] == 0x69 && + info.DataFormat[2] == 0x44 && + info.DataFormat[3] == 0x69 && + info.FormatVersion[0] == 2 + }) + if err != nil { + return err + } + + count := int32(bytes.Uint32()) + if count < ixTop { + return errors.New("indexes[0] too small in ucase.icu") + } + + ubidi.indexes = make([]int32, count) + ubidi.indexes[0] = count + + for i := int32(1); i < count; i++ { + ubidi.indexes[i] = int32(bytes.Uint32()) + } + + ubidi.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := ubidi.indexes[ixTrieSize] + trieLength := ubidi.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + + if n := ubidi.indexes[ixMirrorLength]; n > 0 { + ubidi.mirrors = bytes.Uint32Slice(n) + } + if n := ubidi.indexes[ixJgLimit] - ubidi.indexes[ixJgStart]; n > 0 { + ubidi.jg = bytes.Uint8Slice(n) + } + if n := ubidi.indexes[ixJgLimit2] - ubidi.indexes[ixJgStart2]; n > 0 { + ubidi.jg2 = bytes.Uint8Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/ubidi/ubidi.go b/go/mysql/icuregex/internal/ubidi/ubidi.go new file mode 100644 index 00000000000..79482dfbc8d --- /dev/null +++ b/go/mysql/icuregex/internal/ubidi/ubidi.go @@ -0,0 +1,390 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ubidi + +const ( + ixIndexTop = iota + ixLength + ixTrieSize + ixMirrorLength + + ixJgStart + ixJgLimit + ixJgStart2 /* new in format version 2.2, ICU 54 */ + ixJgLimit2 + + maxValuesIndex + ixTop +) + +const ( + /* UBIDI_CLASS_SHIFT=0, */ /* bidi class: 5 bits (4..0) */ + jtShift = 5 /* joining type: 3 bits (7..5) */ + + bptShift = 8 /* Bidi_Paired_Bracket_Type(bpt): 2 bits (9..8) */ + + joinControlShift = 10 + bidiControlShift = 11 + + isMirroredShift = 12 /* 'is mirrored' */ +) + +/** + * Bidi Paired Bracket Type constants. + * + * @see UCHAR_BIDI_PAIRED_BRACKET_TYPE + * @stable ICU 52 + */ +type UPairedBracketType int32 + +/* + * Note: UBidiPairedBracketType constants are parsed by preparseucd.py. + * It matches lines like + * U_BPT_ + */ +const ( + /** Not a paired bracket. @stable ICU 52 */ + BptNone UPairedBracketType = iota + /** Open paired bracket. @stable ICU 52 */ + BptOpen + /** Close paired bracket. @stable ICU 52 */ + BptClose +) + +const classMask = 0x0000001f +const jtMask = 0x000000e0 +const bptMask = 0x00000300 + +/** + * Joining Type constants. + * + * @see UCHAR_JOINING_TYPE + * @stable ICU 2.2 + */ +type JoiningType int32 + +/* + * Note: UJoiningType constants are parsed by preparseucd.py. + * It matches lines like + * U_JT_ + */ +const ( + JtNonJoining JoiningType = iota /*[U]*/ + JtJoinCausing /*[C]*/ + JtDualJoining /*[D]*/ + JtLeftJoining /*[L]*/ + JtRightJoining /*[R]*/ + JtTransparent /*[T]*/ +) + +/** + * Joining Group constants. + * + * @see UCHAR_JOINING_GROUP + * @stable ICU 2.2 + */ +type JoiningGroup int32 + +/* + * Note: UJoiningGroup constants are parsed by preparseucd.py. + * It matches lines like + * U_JG_ + */ +const ( + JgNoJoiningGroup JoiningGroup = iota + JgAin + JgAlaph + JgAlef + JgBeh + JgBeth + JgDal + JgDalathRish + JgE + JgFeh + JgFinalSemkath + JgGaf + JgGamal + JgHah + JgTehMarbutaGoal /**< @stable ICU 4.6 */ + JgHe + JgHeh + JgHehGoal + JgHeth + JgKaf + JgKaph + JgKnottedHeh + JgLam + JgLamadh + JgMeem + JgMim + JgNoon + JgNun + JgPe + JgQaf + JgQaph + JgReh + JgReversedPe + JgSad + JgSadhe + JgSeen + JgSemkath + JgShin + JgSwashKaf + JgSyriacWaw + JgTah + JgTaw + JgTehMarbuta + JgTeth + JgWaw + JgYeh + JgYehBarree + JgYehWithTail + JgYudh + JgYudhHe + JgZain + JgFe /**< @stable ICU 2.6 */ + JgKhaph /**< @stable ICU 2.6 */ + JgZhain /**< @stable ICU 2.6 */ + JgBurushashkiYehBarree /**< @stable ICU 4.0 */ + JgFarsiYeh /**< @stable ICU 4.4 */ + JgNya /**< @stable ICU 4.4 */ + JgRohingyaYeh /**< @stable ICU 49 */ + JgManichaeanAleph /**< @stable ICU 54 */ + JgManichaeanAyin /**< @stable ICU 54 */ + JgManichaeanBeth /**< @stable ICU 54 */ + JgManichaeanDaleth /**< @stable ICU 54 */ + JgManichaeanDhamedh /**< @stable ICU 54 */ + JgManichaeanFive /**< @stable ICU 54 */ + JgManichaeanGimel /**< @stable ICU 54 */ + JgManichaeanHeth /**< @stable ICU 54 */ + JgManichaeanHundred /**< @stable ICU 54 */ + JgManichaeanKaph /**< @stable ICU 54 */ + JgManichaeanLamedh /**< @stable ICU 54 */ + JgManichaeanMem /**< @stable ICU 54 */ + JgManichaeanNun /**< @stable ICU 54 */ + JgManichaeanOne /**< @stable ICU 54 */ + JgManichaeanPe /**< @stable ICU 54 */ + JgManichaeanQoph /**< @stable ICU 54 */ + JgManichaeanResh /**< @stable ICU 54 */ + JgManichaeanSadhe /**< @stable ICU 54 */ + JgManichaeanSamekh /**< @stable ICU 54 */ + JgManichaeanTaw /**< @stable ICU 54 */ + JgManichaeanTen /**< @stable ICU 54 */ + JgManichaeanTeth /**< @stable ICU 54 */ + JgManichaeanThamedh /**< @stable ICU 54 */ + JgManichaeanTwenty /**< @stable ICU 54 */ + JgManichaeanWaw /**< @stable ICU 54 */ + JgManichaeanYodh /**< @stable ICU 54 */ + JgManichaeanZayin /**< @stable ICU 54 */ + JgStraightWaw /**< @stable ICU 54 */ + JgAfricanFeh /**< @stable ICU 58 */ + JgAfricanNoon /**< @stable ICU 58 */ + JgAfricanQaf /**< @stable ICU 58 */ + + JgMalayalamBha /**< @stable ICU 60 */ + JgMalayalamJa /**< @stable ICU 60 */ + JgMalayalamLla /**< @stable ICU 60 */ + JgMalayalamLlla /**< @stable ICU 60 */ + JgMalayalamNga /**< @stable ICU 60 */ + JgMalayalamNna /**< @stable ICU 60 */ + JgMalayalamNnna /**< @stable ICU 60 */ + JgMalayalamNya /**< @stable ICU 60 */ + JgMalayalamRa /**< @stable ICU 60 */ + JgMalayalamSsa /**< @stable ICU 60 */ + JgMalayalamTta /**< @stable ICU 60 */ + + JgHanafiRohingyaKinnaYa /**< @stable ICU 62 */ + JgHanafiRohingyaPa /**< @stable ICU 62 */ + + JgThinYeh /**< @stable ICU 70 */ + JgVerticalTail /**< @stable ICU 70 */ +) + +/** + * This specifies the language directional property of a character set. + * @stable ICU 2.0 + */ +type CharDirection int32 + +/* + * Note: UCharDirection constants and their API comments are parsed by preparseucd.py. + * It matches pairs of lines like + * / ** comment... * / + * U_<[A-Z_]+> = , + */ + +const ( + /** L @stable ICU 2.0 */ + LeftToRight CharDirection = 0 + /** R @stable ICU 2.0 */ + RightToLeft CharDirection = 1 + /** EN @stable ICU 2.0 */ + EuropeanNumber CharDirection = 2 + /** ES @stable ICU 2.0 */ + EuropeanNumberSeparator CharDirection = 3 + /** ET @stable ICU 2.0 */ + EuropeanNumberTerminator CharDirection = 4 + /** AN @stable ICU 2.0 */ + ArabicNumber CharDirection = 5 + /** CS @stable ICU 2.0 */ + CommonNumberSeparator CharDirection = 6 + /** B @stable ICU 2.0 */ + BlockSeparator CharDirection = 7 + /** S @stable ICU 2.0 */ + SegmentSeparator CharDirection = 8 + /** WS @stable ICU 2.0 */ + WhiteSpaceNeutral CharDirection = 9 + /** ON @stable ICU 2.0 */ + OtherNeutral CharDirection = 10 + /** LRE @stable ICU 2.0 */ + LeftToRightEmbedding CharDirection = 11 + /** LRO @stable ICU 2.0 */ + LeftToRightOverride CharDirection = 12 + /** AL @stable ICU 2.0 */ + RightToLeftArabic CharDirection = 13 + /** RLE @stable ICU 2.0 */ + RightToLeftEmbedding CharDirection = 14 + /** RLO @stable ICU 2.0 */ + RightToLeftOverride CharDirection = 15 + /** PDF @stable ICU 2.0 */ + PopDirectionalFormat CharDirection = 16 + /** NSM @stable ICU 2.0 */ + DirNonSpacingMark CharDirection = 17 + /** BN @stable ICU 2.0 */ + BoundaryNeutral CharDirection = 18 + /** FSI @stable ICU 52 */ + StrongIsolate CharDirection = 19 + /** LRI @stable ICU 52 */ + LeftToRightIsolate CharDirection = 20 + /** RLI @stable ICU 52 */ + RightToLeftIsolate CharDirection = 21 + /** PDI @stable ICU 52 */ + PopDirectionalIsolate CharDirection = 22 +) + +type propertySet interface { + AddRune(ch rune) + AddRuneRange(from rune, to rune) +} + +func AddPropertyStarts(sa propertySet) { + /* add the start code point of each same-value range of the trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + idxs := indexes() + mrs := mirrors() + /* add the code points from the bidi mirroring table */ + length := idxs[ixMirrorLength] + for i := int32(0); i < length; i++ { + c := mirrorCodePoint(rune(mrs[i])) + sa.AddRuneRange(c, c+1) + } + + /* add the code points from the Joining_Group array where the value changes */ + start := idxs[ixJgStart] + limit := idxs[ixJgLimit] + jgArray := jg() + for { + prev := uint8(0) + for start < limit { + jg := jgArray[0] + jgArray = jgArray[1:] + if jg != prev { + sa.AddRune(start) + prev = jg + } + start++ + } + if prev != 0 { + /* add the limit code point if the last value was not 0 (it is now start==limit) */ + sa.AddRune(limit) + } + if limit == idxs[ixJgLimit] { + /* switch to the second Joining_Group range */ + start = idxs[ixJgStart2] + limit = idxs[ixJgLimit2] + jgArray = jg2() + } else { + break + } + } + + /* add code points with hardcoded properties, plus the ones following them */ + + /* (none right now) */ +} + +func HasFlag(props uint16, shift int) bool { + return ((props >> shift) & 1) != 0 +} + +func mirrorCodePoint(m rune) rune { + return m & 0x1fffff +} + +func IsJoinControl(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, joinControlShift) +} + +func JoinType(c rune) JoiningType { + props := trie().Get16(c) + return JoiningType((props & jtMask) >> jtShift) +} + +func JoinGroup(c rune) JoiningGroup { + idxs := indexes() + start := idxs[ixJgStart] + limit := idxs[ixJgLimit] + if start <= c && c < limit { + return JoiningGroup(jg()[c-start]) + } + start = idxs[ixJgStart2] + limit = idxs[ixJgLimit2] + if start <= c && c < limit { + return JoiningGroup(jg2()[c-start]) + } + return JgNoJoiningGroup +} + +func IsMirrored(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, isMirroredShift) +} + +func IsBidiControl(c rune) bool { + props := trie().Get16(c) + return HasFlag(props, bidiControlShift) +} + +func PairedBracketType(c rune) UPairedBracketType { + props := trie().Get16(c) + return UPairedBracketType((props & bptMask) >> bptShift) +} + +func Class(c rune) CharDirection { + props := trie().Get16(c) + return CharDirection(props & classMask) +} diff --git a/go/mysql/icuregex/internal/ucase/fold.go b/go/mysql/icuregex/internal/ucase/fold.go new file mode 100644 index 00000000000..728142042ba --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/fold.go @@ -0,0 +1,243 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "math/bits" + + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +func FoldRunes(str []rune) []rune { + out := make([]rune, 0, len(str)) + for _, c := range str { + r, exp := FullFolding(c) + if exp == nil { + out = append(out, r) + continue + } + + for len(exp) > 0 { + r, exp = utf16.NextUnsafe(exp) + out = append(out, r) + } + } + return out +} + +/* + - Case folding is similar to lowercasing. + - The result may be a simple mapping, i.e., a single code point, or + - a full mapping, i.e., a string. + - If the case folding for a code point is the same as its simple (1:1) lowercase mapping, + - then only the lowercase mapping is stored. + * + - Some special cases are hardcoded because their conditions cannot be + - parsed and processed from CaseFolding.txt. + * + - Unicode 3.2 CaseFolding.txt specifies for its status field: + +# C: common case folding, common mappings shared by both simple and full mappings. +# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces. +# S: simple case folding, mappings to single characters where different from F. +# T: special case for uppercase I and dotted uppercase I +# - For non-Turkic languages, this mapping is normally not used. +# - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters. +# +# Usage: +# A. To do a simple case folding, use the mappings with status C + S. +# B. To do a full case folding, use the mappings with status C + F. +# +# The mappings with status T can be used or omitted depending on the desired case-folding +# behavior. (The default option is to exclude them.) + + - Unicode 3.2 has 'T' mappings as follows: + +0049; T; 0131; # LATIN CAPITAL LETTER I +0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE + + - while the default mappings for these code points are: + +0049; C; 0069; # LATIN CAPITAL LETTER I +0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE + + - U+0130 has no simple case folding (simple-case-folds to itself). +*/ +func Fold(c rune) rune { + props := trie().Get16(c) + if !hasException(props) { + if isUpperOrTitle(props) { + c += getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + if (excWord & excConditionalFold) != 0 { + /* special case folding mappings, hardcoded */ + /* default mappings */ + if c == 0x49 { + /* 0049; C; 0069; # LATIN CAPITAL LETTER I */ + return 0x69 + } else if c == 0x130 { + /* no simple case folding for U+0130 */ + return c + } + } + if (excWord & excNoSimpleCaseFolding) != 0 { + return c + } + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + var delta int32 + delta, _ = getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + return c + delta + } + return c - delta + } + + var idx int32 + if hasSlot(excWord, excFold) { + idx = excFold + } else if hasSlot(excWord, excLower) { + idx = excLower + } else { + return c + } + c, _ = getSlotValue(excWord, idx, pe) + } + return c +} + +func FullFolding(c rune) (rune, []uint16) { + result := c + props := trie().Get16(c) + + if !hasException(props) { + if isUpperOrTitle(props) { + result = c + getDelta(props) + } + return result, nil + } + + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + var idx int32 + + if excWord&excConditionalFold != 0 { + /* use hardcoded conditions and mappings */ + /* default mappings */ + if c == 0x49 { + /* 0049; C; 0069; # LATIN CAPITAL LETTER I */ + return 0x69, nil + } else if c == 0x130 { + /* 0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE */ + return -1, []uint16{0x69, 0x307} + } + } else if hasSlot(excWord, excFullMappings) { + full, pe := getSlotValue(excWord, excFullMappings, pe) + + /* start of full case mapping strings */ + pe = pe[1:] + + /* skip the lowercase result string */ + pe = pe[full&fullLower:] + full = (full >> 4) & 0xf + + if full != 0 { + /* set the output pointer to the result string */ + return -1, pe[:full] + } + } + + if excWord&excNoSimpleCaseFolding != 0 { + return result, nil + } + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + return c + delta, nil + } + return c - delta, nil + } + if hasSlot(excWord, excFold) { + idx = excFold + } else if hasSlot(excWord, excLower) { + idx = excLower + } else { + return c, nil + } + result, _ = getSlotValue(excWord, idx, pe) + return result, nil +} + +const ( + excLower = iota + excFold + excUpper + excTitle + excDelta + exc5 /* reserved */ + excClosure + excFullMappings +) + +const ( + /* complex/conditional mappings */ + excConditionalSpecial = 0x4000 + excConditionalFold = 0x8000 + excNoSimpleCaseFolding = 0x200 + excDeltaIsNegative = 0x400 + excSensitive = 0x800 + + excDoubleSlots = 0x100 +) + +func isUpperOrTitle(props uint16) bool { + return props&2 != 0 +} + +func getDelta(props uint16) rune { + return rune(int16(props) >> 7) +} + +func getExceptions(props uint16) []uint16 { + return exceptions()[props>>4:] +} + +func hasSlot(flags uint16, idx int32) bool { + return (flags & (1 << idx)) != 0 +} + +func slotOffset(flags uint16, idx int32) int { + return bits.OnesCount8(uint8(flags & ((1 << idx) - 1))) +} + +func getSlotValue(excWord uint16, idx int32, pExc16 []uint16) (int32, []uint16) { + if excWord&excDoubleSlots == 0 { + pExc16 = pExc16[slotOffset(excWord, idx):] + return int32(pExc16[0]), pExc16 + } + pExc16 = pExc16[2*slotOffset(excWord, idx):] + return (int32(pExc16[0]) << 16) | int32(pExc16[1]), pExc16[1:] +} diff --git a/go/mysql/icuregex/internal/ucase/loader.go b/go/mysql/icuregex/internal/ucase/loader.go new file mode 100644 index 00000000000..2ac25cc0f6f --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/loader.go @@ -0,0 +1,101 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var ucaseOnce sync.Once +var ucase struct { + trie *utrie.UTrie2 + exceptions []uint16 +} + +func trie() *utrie.UTrie2 { + loadUCase() + return ucase.trie +} + +func exceptions() []uint16 { + loadUCase() + return ucase.exceptions +} + +func loadUCase() { + ucaseOnce.Do(func() { + b := udata.NewBytes(icudata.UCase) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x63 && + info.DataFormat[1] == 0x41 && + info.DataFormat[2] == 0x53 && + info.DataFormat[3] == 0x45 && + info.FormatVersion[0] == 4 + }) + if err != nil { + return err + } + + count := int32(bytes.Uint32()) + if count < ixTop { + return errors.New("indexes[0] too small in ucase.icu") + } + + indexes := make([]int32, count) + indexes[0] = count + + for i := int32(1); i < count; i++ { + indexes[i] = int32(bytes.Uint32()) + } + + ucase.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := indexes[ixTrieSize] + trieLength := ucase.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + + if n := indexes[ixExcLength]; n > 0 { + ucase.exceptions = bytes.Uint16Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/ucase/ucase.go b/go/mysql/icuregex/internal/ucase/ucase.go new file mode 100644 index 00000000000..33fac0a5cce --- /dev/null +++ b/go/mysql/icuregex/internal/ucase/ucase.go @@ -0,0 +1,359 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ucase + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +const ( + ixIndexTop = 0 + ixLength = 1 + ixTrieSize = 2 + ixExcLength = 3 + ixUnfoldLength = 4 + ixMaxFullLength = 15 + ixTop = 16 +) + +type propertySet interface { + AddRune(ch rune) +} + +func AddPropertyStarts(sa propertySet) { + /* add the start code point of each same-value range of the trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + /* add code points with hardcoded properties, plus the ones following them */ + + /* (none right now, see comment below) */ + + /* + * Omit code points with hardcoded specialcasing properties + * because we do not build property UnicodeSets for them right now. + */ +} + +const ( + fullMappingsMaxLength = (4 * 0xf) + closureMaxLength = 0xf + + fullLower = 0xf + fullFolding = 0xf0 + fullUpper = 0xf00 + fullTitle = 0xf000 +) + +func AddCaseClosure(c rune, sa propertySet) { + /* + * Hardcode the case closure of i and its relatives and ignore the + * data file data for these characters. + * The Turkic dotless i and dotted I with their case mapping conditions + * and case folding option make the related characters behave specially. + * This code matches their closure behavior to their case folding behavior. + */ + + switch c { + case 0x49: + /* regular i and I are in one equivalence class */ + sa.AddRune(0x69) + return + case 0x69: + sa.AddRune(0x49) + return + case 0x130: + /* dotted I is in a class with <0069 0307> (for canonical equivalence with <0049 0307>) */ + // the Regex engine calls removeAllStrings() on all UnicodeSets, so we don't need to insert them + // sa->addString(sa->set, iDot, 2); + return + case 0x131: + /* dotless i is in a class by itself */ + return + default: + /* otherwise use the data file data */ + break + } + + props := trie().Get16(c) + if !hasException(props) { + if getPropsType(props) != None { + /* add the one simple case mapping, no matter what type it is */ + delta := getDelta(props) + if delta != 0 { + sa.AddRune(c + delta) + } + } + } else { + /* + * c has exceptions, so there may be multiple simple and/or + * full case mappings. Add them all. + */ + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + var idx int32 + var closure []uint16 + + /* add all simple case mappings */ + for idx = excLower; idx <= excTitle; idx++ { + if hasSlot(excWord, idx) { + c, _ = getSlotValue(excWord, idx, pe) + sa.AddRune(c) + } + } + if hasSlot(excWord, excDelta) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if excWord&excDeltaIsNegative == 0 { + sa.AddRune(c + delta) + } else { + sa.AddRune(c - delta) + } + } + + /* get the closure string pointer & length */ + if hasSlot(excWord, excClosure) { + closureLength, pe1 := getSlotValue(excWord, excClosure, pe) + closureLength &= closureMaxLength /* higher bits are reserved */ + closure = pe1[1 : 1+closureLength] /* behind this slot, unless there are full case mappings */ + } + + /* add the full case folding */ + if hasSlot(excWord, excFullMappings) { + fullLength, pe1 := getSlotValue(excWord, excFullMappings, pe) + + /* start of full case mapping strings */ + pe1 = pe1[1:] + + fullLength &= 0xffff /* bits 16 and higher are reserved */ + + /* skip the lowercase result string */ + pe1 = pe1[fullLength&fullLower:] + fullLength >>= 4 + + /* skip adding the case folding strings */ + length := fullLength & 0xf + pe1 = pe1[length:] + + /* skip the uppercase and titlecase strings */ + fullLength >>= 4 + pe1 = pe1[fullLength&0xf:] + fullLength >>= 4 + pe1 = pe1[fullLength:] + + closure = pe1[:len(closure)] + } + + /* add each code point in the closure string */ + for len(closure) > 0 { + c, closure = utf16.NextUnsafe(closure) + sa.AddRune(c) + } + } +} + +const dotMask = 0x60 + +const ( + noDot = 0 /* normal characters with cc=0 */ + softDotted = 0x20 /* soft-dotted characters with cc=0 */ + above = 0x40 /* "above" accents with cc=230 */ + otherAccent = 0x60 /* other accent character (0> excDotShift) & dotMask) +} + +func IsCaseSensitive(c rune) bool { + props := trie().Get16(c) + if !hasException(props) { + return (props & sensitive) != 0 + } + pe := getExceptions(props) + return (pe[0] & excSensitive) != 0 +} + +func ToFullLower(c rune) rune { + // The sign of the result has meaning, input must be non-negative so that it can be returned as is. + result := c + props := trie().Get16(c) + if !hasException(props) { + if isUpperOrTitle(props) { + result = c + getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + + if excWord&excConditionalSpecial != 0 { + /* use hardcoded conditions and mappings */ + if c == 0x130 { + return 2 + } + /* no known conditional special case mapping, use a normal mapping */ + } else if hasSlot(excWord, excFullMappings) { + full, _ := getSlotValue(excWord, excFullMappings, pe) + full = full & fullLower + if full != 0 { + /* return the string length */ + return full + } + } + + if hasSlot(excWord, excDelta) && isUpperOrTitle(props) { + delta, _ := getSlotValue(excWord, excDelta, pe) + if (excWord & excDeltaIsNegative) == 0 { + return c + delta + } + return c - delta + } + if hasSlot(excWord, excLower) { + result, _ = getSlotValue(excWord, excLower, pe) + } + } + + if result == c { + return ^result + } + return result +} + +func ToFullUpper(c rune) rune { + return toUpperOrTitle(c, true) +} + +func ToFullTitle(c rune) rune { + return toUpperOrTitle(c, false) +} + +func toUpperOrTitle(c rune, upperNotTitle bool) rune { + result := c + props := trie().Get16(c) + if !hasException(props) { + if getPropsType(props) == Lower { + result = c + getDelta(props) + } + } else { + pe := getExceptions(props) + excWord := pe[0] + pe = pe[1:] + + if excWord&excConditionalSpecial != 0 { + if c == 0x0587 { + return 2 + } + /* no known conditional special case mapping, use a normal mapping */ + } else if hasSlot(excWord, excFullMappings) { + full, _ := getSlotValue(excWord, excFullMappings, pe) + + /* skip the lowercase and case-folding result strings */ + full >>= 8 + + if upperNotTitle { + full &= 0xf + } else { + /* skip the uppercase result string */ + full = (full >> 4) & 0xf + } + + if full != 0 { + /* return the string length */ + return full + } + } + + if hasSlot(excWord, excDelta) && getPropsType(props) == Lower { + delta, _ := getSlotValue(excWord, excDelta, pe) + if (excWord & excDeltaIsNegative) == 0 { + return c + delta + } + return c - delta + } + var idx int32 + if !upperNotTitle && hasSlot(excWord, excTitle) { + idx = excTitle + } else if hasSlot(excWord, excUpper) { + /* here, titlecase is same as uppercase */ + idx = excUpper + } else { + return ^c + } + result, _ = getSlotValue(excWord, idx, pe) + } + + if result == c { + return ^result + } + return result +} + +func GetTypeOrIgnorable(c rune) int32 { + props := trie().Get16(c) + return int32(props & 7) +} + +type Type int32 + +const ( + None Type = iota + Lower + Upper + Title +) + +const typeMask = 3 + +func GetType(c rune) Type { + props := trie().Get16(c) + return getPropsType(props) +} + +func getPropsType(props uint16) Type { + return Type(props & typeMask) +} diff --git a/go/mysql/icuregex/internal/uchar/constants.go b/go/mysql/icuregex/internal/uchar/constants.go new file mode 100644 index 00000000000..60899393397 --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/constants.go @@ -0,0 +1,238 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +func Mask[T ~int | ~int8](x T) uint32 { + return 1 << x +} + +type Category int8 + +const ( + /* + * Note: UCharCategory constants and their API comments are parsed by preparseucd.py. + * It matches pairs of lines like + * / ** comment... * / + * U_<[A-Z_]+> = , + */ + + /** Non-category for unassigned and non-character code points. @stable ICU 2.0 */ + Unassigned Category = 0 + /** Cn "Other, Not Assigned (no characters in [UnicodeData.txt] have this property)" (same as U_UNASSIGNED!) @stable ICU 2.0 */ + GeneralOtherTypes Category = iota - 1 + /** Lu @stable ICU 2.0 */ + UppercaseLetter + /** Ll @stable ICU 2.0 */ + LowercaseLetter + /** Lt @stable ICU 2.0 */ + TitlecaseLetter + /** Lm @stable ICU 2.0 */ + ModifierLetter + /** Lo @stable ICU 2.0 */ + OtherLetter + /** Mn @stable ICU 2.0 */ + NonSpacingMask + /** Me @stable ICU 2.0 */ + EnclosingMark + /** Mc @stable ICU 2.0 */ + CombiningSpacingMask + /** Nd @stable ICU 2.0 */ + DecimalDigitNumber + /** Nl @stable ICU 2.0 */ + LetterNumber + /** No @stable ICU 2.0 */ + OtherNumber + /** Zs @stable ICU 2.0 */ + SpaceSeparator + /** Zl @stable ICU 2.0 */ + LineSeparator + /** Zp @stable ICU 2.0 */ + ParagraphSeparator + /** Cc @stable ICU 2.0 */ + ControlChar + /** Cf @stable ICU 2.0 */ + FormatChar + /** Co @stable ICU 2.0 */ + PrivateUseChar + /** Cs @stable ICU 2.0 */ + Surrogate + /** Pd @stable ICU 2.0 */ + DashPunctuation + /** Ps @stable ICU 2.0 */ + StartPunctuation + /** Pe @stable ICU 2.0 */ + EndPunctuation + /** Pc @stable ICU 2.0 */ + ConnectorPunctuation + /** Po @stable ICU 2.0 */ + OtherPunctuation + /** Sm @stable ICU 2.0 */ + MathSymbol + /** Sc @stable ICU 2.0 */ + CurrencySymbol + /** Sk @stable ICU 2.0 */ + ModifierSymbol + /** So @stable ICU 2.0 */ + OtherSymbol + /** Pi @stable ICU 2.0 */ + InitialPunctuation + /** Pf @stable ICU 2.0 */ + FinalPunctuation + /** + * One higher than the last enum UCharCategory constant. + * This numeric value is stable (will not change), see + * http://www.unicode.org/policies/stability_policy.html#Property_Value + * + * @stable ICU 2.0 + */ + CharCategoryCount +) + +var ( + GcCnMask = Mask(GeneralOtherTypes) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLuMask = Mask(UppercaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLlMask = Mask(LowercaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLtMask = Mask(TitlecaseLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLmMask = Mask(ModifierLetter) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcLoMask = Mask(OtherLetter) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMnMask = Mask(NonSpacingMask) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMeMask = Mask(EnclosingMark) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcMcMask = Mask(CombiningSpacingMask) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNdMask = Mask(DecimalDigitNumber) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNlMask = Mask(LetterNumber) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcNoMask = Mask(OtherNumber) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZsMask = Mask(SpaceSeparator) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZlMask = Mask(LineSeparator) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcZpMask = Mask(ParagraphSeparator) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCcMask = Mask(ControlChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCfMask = Mask(FormatChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCoMask = Mask(PrivateUseChar) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcCsMask = Mask(Surrogate) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPdMask = Mask(DashPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPsMask = Mask(StartPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPeMask = Mask(EndPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPcMask = Mask(ConnectorPunctuation) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcPoMask = Mask(OtherPunctuation) + + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSmMask = Mask(MathSymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcScMask = Mask(CurrencySymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSkMask = Mask(ModifierSymbol) + /** Mask constant for a UCharCategory. @stable ICU 2.1 */ + GcSoMask = Mask(OtherSymbol) + + /** Mask constant for multiple UCharCategory bits (L Letters). @stable ICU 2.1 */ + GcLMask = (GcLuMask | GcLlMask | GcLtMask | GcLmMask | GcLoMask) + + /** Mask constant for multiple UCharCategory bits (LC Cased Letters). @stable ICU 2.1 */ + GcLcMask = (GcLuMask | GcLlMask | GcLtMask) + + /** Mask constant for multiple UCharCategory bits (M Marks). @stable ICU 2.1 */ + GcMMask = (GcMnMask | GcMeMask | GcMcMask) + + /** Mask constant for multiple UCharCategory bits (N Numbers). @stable ICU 2.1 */ + GcNMask = (GcNdMask | GcNlMask | GcNoMask) + + /** Mask constant for multiple UCharCategory bits (Z Separators). @stable ICU 2.1 */ + GcZMask = (GcZsMask | GcZlMask | GcZpMask) +) + +const upropsAgeShift = 24 +const maxVersionLength = 4 +const versionDelimiter = '.' + +type UVersionInfo [maxVersionLength]uint8 + +const ( + /** No numeric value. */ + UPropsNtvNone = 0 + /** Decimal digits: nv=0..9 */ + UPropsNtvDecimalStart = 1 + /** Other digits: nv=0..9 */ + UPropsNtvDigitStart = 11 + /** Small integers: nv=0..154 */ + UPropsNtvNumericStart = 21 + /** Fractions: ((ntv>>4)-12) / ((ntv&0xf)+1) = -1..17 / 1..16 */ + UPropsNtvFractionStart = 0xb0 + /** + * Large integers: + * ((ntv>>5)-14) * 10^((ntv&0x1f)+2) = (1..9)*(10^2..10^33) + * (only one significant decimal digit) + */ + UPropsNtvLargeStart = 0x1e0 + /** + * Sexagesimal numbers: + * ((ntv>>2)-0xbf) * 60^((ntv&3)+1) = (1..9)*(60^1..60^4) + */ + UPropsNtvBase60Start = 0x300 + /** + * Fraction-20 values: + * frac20 = ntv-0x324 = 0..0x17 -> 1|3|5|7 / 20|40|80|160|320|640 + * numerator: num = 2*(frac20&3)+1 + * denominator: den = 20<<(frac20>>2) + */ + UPropsNtvFraction20Start = UPropsNtvBase60Start + 36 // 0x300+9*4=0x324 + /** + * Fraction-32 values: + * frac32 = ntv-0x34c = 0..15 -> 1|3|5|7 / 32|64|128|256 + * numerator: num = 2*(frac32&3)+1 + * denominator: den = 32<<(frac32>>2) + */ + UPropsNtvFraction32Start = UPropsNtvFraction20Start + 24 // 0x324+6*4=0x34c + /** No numeric value (yet). */ + UPropsNtvReservedStart = UPropsNtvFraction32Start + 16 // 0x34c+4*4=0x35c + + UPropsNtvMaxSmallInt = UPropsNtvFractionStart - UPropsNtvNumericStart - 1 +) + +const noNumericValue = -123456789.0 diff --git a/go/mysql/icuregex/internal/uchar/loader.go b/go/mysql/icuregex/internal/uchar/loader.go new file mode 100644 index 00000000000..fab54f85e0a --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/loader.go @@ -0,0 +1,139 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var upropsOnce sync.Once +var uprops struct { + trie *utrie.UTrie2 + trie2 *utrie.UTrie2 + vectorsColumns int32 + vectors []uint32 + scriptExtensions []uint16 +} + +func trie() *utrie.UTrie2 { + loadUProps() + return uprops.trie +} + +func trie2() *utrie.UTrie2 { + loadUProps() + return uprops.trie2 +} + +func vectorsColumns() int32 { + loadUProps() + return uprops.vectorsColumns +} + +func vectors() []uint32 { + loadUProps() + return uprops.vectors +} + +func scriptExtensions() []uint16 { + loadUProps() + return uprops.scriptExtensions +} + +func loadUProps() { + upropsOnce.Do(func() { + b := udata.NewBytes(icudata.UProps) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x55 && + info.DataFormat[1] == 0x50 && + info.DataFormat[2] == 0x72 && + info.DataFormat[3] == 0x6f && + info.FormatVersion[0] == 7 + }) + if err != nil { + return err + } + + propertyOffset := bytes.Int32() + /* exceptionOffset = */ bytes.Int32() + /* caseOffset = */ bytes.Int32() + additionalOffset := bytes.Int32() + additionalVectorsOffset := bytes.Int32() + uprops.vectorsColumns = bytes.Int32() + scriptExtensionsOffset := bytes.Int32() + reservedOffset7 := bytes.Int32() + /* reservedOffset8 = */ bytes.Int32() + /* dataTopOffset = */ bytes.Int32() + _ = bytes.Int32() + _ = bytes.Int32() + bytes.Skip((16 - 12) << 2) + + uprops.trie, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength := (propertyOffset - 16) * 4 + trieLength := uprops.trie.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + bytes.Skip((additionalOffset - propertyOffset) * 4) + + if uprops.vectorsColumns > 0 { + uprops.trie2, err = utrie.UTrie2FromBytes(bytes) + if err != nil { + return err + } + + expectedTrieLength = (additionalVectorsOffset - additionalOffset) * 4 + trieLength = uprops.trie2.SerializedLength() + + if trieLength > expectedTrieLength { + return errors.New("ucase.icu: not enough bytes for the trie") + } + + bytes.Skip(expectedTrieLength - trieLength) + uprops.vectors = bytes.Uint32Slice(scriptExtensionsOffset - additionalVectorsOffset) + } + + if n := (reservedOffset7 - scriptExtensionsOffset) * 2; n > 0 { + uprops.scriptExtensions = bytes.Uint16Slice(n) + } + + return nil +} diff --git a/go/mysql/icuregex/internal/uchar/uchar.go b/go/mysql/icuregex/internal/uchar/uchar.go new file mode 100644 index 00000000000..e93b51d9bb4 --- /dev/null +++ b/go/mysql/icuregex/internal/uchar/uchar.go @@ -0,0 +1,316 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uchar + +import ( + "strconv" +) + +type PropertySet interface { + AddRune(ch rune) +} + +func VecAddPropertyStarts(sa PropertySet) { + trie2().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) +} + +const ( + tab = 0x0009 + lf = 0x000a + ff = 0x000c + cr = 0x000d + nbsp = 0x00a0 + cgj = 0x034f + figuresp = 0x2007 + hairsp = 0x200a + zwnj = 0x200c + zwj = 0x200d + rlm = 0x200f + nnbsp = 0x202f + zwnbsp = 0xfef +) + +func AddPropertyStarts(sa PropertySet) { + /* add the start code point of each same-value range of the main trie */ + trie().Enum(nil, func(start, _ rune, _ uint32) bool { + sa.AddRune(start) + return true + }) + + /* add code points with hardcoded properties, plus the ones following them */ + + /* add for u_isblank() */ + sa.AddRune(tab) + sa.AddRune(tab + 1) + + /* add for IS_THAT_CONTROL_SPACE() */ + sa.AddRune(cr + 1) /* range TAB..CR */ + sa.AddRune(0x1c) + sa.AddRune(0x1f + 1) + sa.AddRune(0x85) // NEXT LINE (NEL) + sa.AddRune(0x85 + 1) + + /* add for u_isIDIgnorable() what was not added above */ + sa.AddRune(0x7f) /* range DEL..NBSP-1, NBSP added below */ + sa.AddRune(hairsp) + sa.AddRune(rlm + 1) + sa.AddRune(0x206a) // INHIBIT SYMMETRIC SWAPPING + sa.AddRune(0x206f + 1) // NOMINAL DIGIT SHAPES + sa.AddRune(zwnbsp) + sa.AddRune(zwnbsp + 1) + + /* add no-break spaces for u_isWhitespace() what was not added above */ + sa.AddRune(nbsp) + sa.AddRune(nbsp + 1) + sa.AddRune(figuresp) + sa.AddRune(figuresp + 1) + sa.AddRune(nnbsp) + sa.AddRune(nnbsp + 1) + + /* add for u_digit() */ + sa.AddRune('a') + sa.AddRune('z' + 1) + sa.AddRune('A') + sa.AddRune('Z' + 1) + // fullwidth + sa.AddRune('a') + sa.AddRune('z' + 1) + sa.AddRune('A') + sa.AddRune('Z' + 1) + + /* add for u_isxdigit() */ + sa.AddRune('f' + 1) + sa.AddRune('F' + 1) + // fullwidth + sa.AddRune('f' + 1) + sa.AddRune('F' + 1) + + /* add for UCHAR_DEFAULT_IGNORABLE_CODE_POINT what was not added above */ + sa.AddRune(0x2060) /* range 2060..206f */ + sa.AddRune(0xfff0) + sa.AddRune(0xfffb + 1) + sa.AddRune(0xe0000) + sa.AddRune(0xe0fff + 1) + + /* add for UCHAR_GRAPHEME_BASE and others */ + sa.AddRune(cgj) + sa.AddRune(cgj + 1) +} + +func CharType(c rune) Category { + props := trie().Get16(c) + return getCategory(props) +} + +func getCategory(props uint16) Category { + return Category(props & 0x1f) +} + +func GetUnicodeProperties(c rune, column int) uint32 { + if column >= int(vectorsColumns()) { + return 0 + } + vecIndex := trie2().Get16(c) + return vectors()[int(vecIndex)+column] +} + +func ScriptExtension(idx uint32) uint16 { + return scriptExtensions()[idx] +} + +func ScriptExtensions(idx uint32) []uint16 { + return scriptExtensions()[idx:] +} + +func IsDigit(c rune) bool { + return CharType(c) == DecimalDigitNumber +} + +func IsPOSIXPrint(c rune) bool { + return CharType(c) == SpaceSeparator || IsGraphPOSIX(c) +} + +func IsGraphPOSIX(c rune) bool { + props := trie().Get16(c) + /* \p{space}\p{gc=Control} == \p{gc=Z}\p{Control} */ + /* comparing ==0 returns FALSE for the categories mentioned */ + return Mask(getCategory(props))&(GcCcMask|GcCsMask|GcCnMask|GcZMask) == 0 +} + +func IsXDigit(c rune) bool { + /* check ASCII and Fullwidth ASCII a-fA-F */ + if (c <= 0x66 && c >= 0x41 && (c <= 0x46 || c >= 0x61)) || + (c >= 0xff21 && c <= 0xff46 && (c <= 0xff26 || c >= 0xff41)) { + return true + } + return IsDigit(c) +} + +func IsBlank(c rune) bool { + if c <= 0x9f { + return c == 9 || c == 0x20 /* TAB or SPACE */ + } + /* Zs */ + return CharType(c) == SpaceSeparator +} + +func CharAge(c rune) UVersionInfo { + version := GetUnicodeProperties(c, 0) >> upropsAgeShift + return UVersionInfo{uint8(version >> 4), uint8(version & 0xf), 0, 0} +} + +func VersionFromString(str string) (version UVersionInfo) { + part := 0 + for len(str) > 0 && part < maxVersionLength { + if str[0] == versionDelimiter { + str = str[1:] + } + str, version[part] = parseInt(str) + part++ + } + return +} + +// parseInt is simplified but aims to mimic strtoul usage +// as it is used for ICU version parsing. +func parseInt(str string) (string, uint8) { + if str == "" { + return str, 0 + } + + start := 0 + end := 0 +whitespace: + for i := 0; i < len(str); i++ { + switch str[i] { + case ' ', '\f', '\n', '\r', '\t', '\v': + start++ + continue + default: + break whitespace + } + } + str = str[start:] + + for i := 0; i < len(str); i++ { + if str[i] < '0' || str[i] > '9' { + end = i + break + } + end++ + } + + val, err := strconv.ParseUint(str[start:end], 10, 8) + if err != nil { + return str[end:], 0 + } + return str[end:], uint8(val) +} + +const upropsNumericTypeValueShift = 6 + +func NumericTypeValue(c rune) uint16 { + props := trie().Get16(c) + return props >> upropsNumericTypeValueShift +} + +func NumericValue(c rune) float64 { + ntv := int32(NumericTypeValue(c)) + + if ntv == UPropsNtvNone { + return noNumericValue + } else if ntv < UPropsNtvDigitStart { + /* decimal digit */ + return float64(ntv - UPropsNtvDecimalStart) + } else if ntv < UPropsNtvNumericStart { + /* other digit */ + return float64(ntv - UPropsNtvDigitStart) + } else if ntv < UPropsNtvFractionStart { + /* small integer */ + return float64(ntv - UPropsNtvNumericStart) + } else if ntv < UPropsNtvLargeStart { + /* fraction */ + numerator := (ntv >> 4) - 12 + denominator := (ntv & 0xf) + 1 + return float64(numerator) / float64(denominator) + } else if ntv < UPropsNtvBase60Start { + /* large, single-significant-digit integer */ + mant := (ntv >> 5) - 14 + exp := (ntv & 0x1f) + 2 + numValue := float64(mant) + + /* multiply by 10^exp without math.h */ + for exp >= 4 { + numValue *= 10000. + exp -= 4 + } + switch exp { + case 3: + numValue *= 1000.0 + case 2: + numValue *= 100.0 + case 1: + numValue *= 10.0 + case 0: + default: + } + + return numValue + } else if ntv < UPropsNtvFraction20Start { + /* sexagesimal (base 60) integer */ + numValue := (ntv >> 2) - 0xbf + exp := (ntv & 3) + 1 + + switch exp { + case 4: + numValue *= 60 * 60 * 60 * 60 + case 3: + numValue *= 60 * 60 * 60 + case 2: + numValue *= 60 * 60 + case 1: + numValue *= 60 + case 0: + default: + } + + return float64(numValue) + } else if ntv < UPropsNtvFraction32Start { + // fraction-20 e.g. 3/80 + frac20 := ntv - UPropsNtvFraction20Start // 0..0x17 + numerator := 2*(frac20&3) + 1 + denominator := 20 << (frac20 >> 2) + return float64(numerator) / float64(denominator) + } else if ntv < UPropsNtvReservedStart { + // fraction-32 e.g. 3/64 + frac32 := ntv - UPropsNtvFraction32Start // 0..15 + numerator := 2*(frac32&3) + 1 + denominator := 32 << (frac32 >> 2) + return float64(numerator) / float64(denominator) + } else { + /* reserved */ + return noNumericValue + } +} diff --git a/go/mysql/icuregex/internal/udata/udata.go b/go/mysql/icuregex/internal/udata/udata.go new file mode 100644 index 00000000000..f20f8be1efa --- /dev/null +++ b/go/mysql/icuregex/internal/udata/udata.go @@ -0,0 +1,155 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udata + +import ( + "encoding/binary" + "errors" + "unsafe" +) + +type DataInfo struct { + /** sizeof(UDataInfo) + * @stable ICU 2.0 */ + Size uint16 + + /** unused, set to 0 + * @stable ICU 2.0*/ + ReservedWord uint16 + + /* platform data properties */ + /** 0 for little-endian machine, 1 for big-endian + * @stable ICU 2.0 */ + IsBigEndian uint8 + + /** see U_CHARSET_FAMILY values in utypes.h + * @stable ICU 2.0*/ + CharsetFamily uint8 + + /** sizeof(UChar), one of { 1, 2, 4 } + * @stable ICU 2.0*/ + SizeofUChar uint8 + + /** unused, set to 0 + * @stable ICU 2.0*/ + ReservedByte uint8 + + /** data format identifier + * @stable ICU 2.0*/ + DataFormat [4]uint8 + + /** versions: [0] major [1] minor [2] milli [3] micro + * @stable ICU 2.0*/ + FormatVersion [4]uint8 + + /** versions: [0] major [1] minor [2] milli [3] micro + * @stable ICU 2.0*/ + DataVersion [4]uint8 +} + +type Bytes struct { + buf []byte + orig []byte + enc binary.ByteOrder +} + +func NewBytes(b []byte) *Bytes { + return &Bytes{buf: b, orig: b, enc: binary.LittleEndian} +} + +func (b *Bytes) ReadHeader(isValid func(info *DataInfo) bool) error { + type MappedData struct { + headerSize uint16 + magic1 uint8 + magic2 uint8 + } + + type DataHeader struct { + dataHeader MappedData + info DataInfo + } + + data := unsafe.SliceData(b.buf) + header := (*DataHeader)(unsafe.Pointer(data)) + + if header.dataHeader.magic1 != 0xda || header.dataHeader.magic2 != 0x27 { + return errors.New("invalid magic number") + } + + if header.info.IsBigEndian != 0 { + return errors.New("unsupported: BigEndian data source") + } + + if !isValid(&header.info) { + return errors.New("failed to validate data header") + } + + b.buf = b.buf[header.dataHeader.headerSize:] + return nil +} + +func (b *Bytes) Uint8() uint8 { + u := b.buf[0] + b.buf = b.buf[1:] + return u +} +func (b *Bytes) Uint16() uint16 { + u := b.enc.Uint16(b.buf) + b.buf = b.buf[2:] + return u +} + +func (b *Bytes) Uint16Slice(size int32) []uint16 { + s := unsafe.Slice((*uint16)(unsafe.Pointer(unsafe.SliceData(b.buf))), size) + b.buf = b.buf[2*size:] + return s +} + +func (b *Bytes) Uint32Slice(size int32) []uint32 { + s := unsafe.Slice((*uint32)(unsafe.Pointer(unsafe.SliceData(b.buf))), size) + b.buf = b.buf[4*size:] + return s +} + +func (b *Bytes) Uint32() uint32 { + u := b.enc.Uint32(b.buf) + b.buf = b.buf[4:] + return u +} + +func (b *Bytes) Int32() int32 { + return int32(b.Uint32()) +} + +func (b *Bytes) Skip(size int32) { + b.buf = b.buf[size:] +} + +func (b *Bytes) Uint8Slice(n int32) []uint8 { + s := b.buf[:n] + b.buf = b.buf[n:] + return s +} + +func (b *Bytes) Position() int32 { + return int32(len(b.orig) - len(b.buf)) +} diff --git a/go/mysql/icuregex/internal/uemoji/loader.go b/go/mysql/icuregex/internal/uemoji/loader.go new file mode 100644 index 00000000000..7015491d069 --- /dev/null +++ b/go/mysql/icuregex/internal/uemoji/loader.go @@ -0,0 +1,69 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uemoji + +import ( + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var uemojiOnce sync.Once +var uemoji struct { + trie *utrie.UcpTrie +} + +func loadUEmoji() { + uemojiOnce.Do(func() { + b := udata.NewBytes(icudata.UEmoji) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func trie() *utrie.UcpTrie { + loadUEmoji() + return uemoji.trie +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x45 && + info.DataFormat[1] == 0x6d && + info.DataFormat[2] == 0x6f && + info.DataFormat[3] == 0x6a && + info.FormatVersion[0] == 1 + }) + if err != nil { + return err + } + + bytes.Skip(bytes.Int32() - 4) + uemoji.trie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + return nil +} diff --git a/go/mysql/icuregex/internal/uemoji/uemoji.go b/go/mysql/icuregex/internal/uemoji/uemoji.go new file mode 100644 index 00000000000..5cc89acd69a --- /dev/null +++ b/go/mysql/icuregex/internal/uemoji/uemoji.go @@ -0,0 +1,82 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uemoji + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +type propertySet interface { + AddRune(ch rune) + AddRuneRange(from rune, to rune) +} + +func AddPropertyStarts(sa propertySet) { + // Add the start code point of each same-value range of the trie. + var start, end rune + for { + end, _ = trie().GetRange(start, utrie.UcpMapRangeNormal, 0, nil) + if end < 0 { + break + } + sa.AddRune(start) + start = end + 1 + } +} + +const ( + bitEmoji = 0 + bitEmojiPresentation = 1 + bitEmojiModifier = 2 + bitEmojiModifierBase = 3 + bitEmojiComponent = 4 + bitExtendedPictographic = 5 + bitBasicEmoji = 6 +) + +// Note: REGIONAL_INDICATOR is a single, hardcoded range implemented elsewhere. +var bitFlags = []int8{ + bitEmoji, + bitEmojiPresentation, + bitEmojiModifier, + bitEmojiModifierBase, + bitEmojiComponent, + -1, + -1, + bitExtendedPictographic, + bitBasicEmoji, + -1, + -1, + -1, + -1, + -1, + bitBasicEmoji, +} + +func HasBinaryProperty(c rune, which int) bool { + bit := bitFlags[which] + if bit < 0 { + return false // not a property that we support in this function + } + bits := trie().Get(c) + return ((bits >> bit) & 1) != 0 +} diff --git a/go/mysql/icuregex/internal/ulayout/ulayout.go b/go/mysql/icuregex/internal/ulayout/ulayout.go new file mode 100644 index 00000000000..dbf21d9460b --- /dev/null +++ b/go/mysql/icuregex/internal/ulayout/ulayout.go @@ -0,0 +1,128 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ulayout + +import ( + "errors" + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utrie" +) + +var inpcTrie *utrie.UcpTrie +var inscTrie *utrie.UcpTrie +var voTrie *utrie.UcpTrie + +const ( + ixInpcTrieTop = 1 + ixInscTrieTop = 2 + ixVoTrieTop = 3 + + ixCount = 12 +) + +func InpcTrie() *utrie.UcpTrie { + loadLayouts() + return inpcTrie +} + +func InscTrie() *utrie.UcpTrie { + loadLayouts() + return inscTrie +} + +func VoTrie() *utrie.UcpTrie { + loadLayouts() + return voTrie +} + +var layoutsOnce sync.Once + +func loadLayouts() { + layoutsOnce.Do(func() { + b := udata.NewBytes(icudata.ULayout) + if err := readData(b); err != nil { + panic(err) + } + }) +} + +func readData(bytes *udata.Bytes) error { + err := bytes.ReadHeader(func(info *udata.DataInfo) bool { + return info.DataFormat[0] == 0x4c && + info.DataFormat[1] == 0x61 && + info.DataFormat[2] == 0x79 && + info.DataFormat[3] == 0x6f && + info.FormatVersion[0] == 1 + }) + if err != nil { + return err + } + + startPos := bytes.Position() + indexesLength := int32(bytes.Uint32()) // inIndexes[IX_INDEXES_LENGTH] + if indexesLength < ixCount { + return errors.New("text layout properties data: not enough indexes") + } + index := make([]int32, indexesLength) + index[0] = indexesLength + for i := int32(1); i < indexesLength; i++ { + index[i] = int32(bytes.Uint32()) + } + + offset := indexesLength * 4 + top := index[ixInpcTrieTop] + trieSize := top - offset + if trieSize >= 16 { + inpcTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + + pos := bytes.Position() - startPos + bytes.Skip(top - pos) + offset = top + top = index[ixInscTrieTop] + trieSize = top - offset + if trieSize >= 16 { + inscTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + + pos = bytes.Position() - startPos + bytes.Skip(top - pos) + offset = top + top = index[ixVoTrieTop] + trieSize = top - offset + if trieSize >= 16 { + voTrie, err = utrie.UcpTrieFromBytes(bytes) + if err != nil { + return err + } + } + return nil +} diff --git a/go/mysql/icuregex/internal/unames/loader.go b/go/mysql/icuregex/internal/unames/loader.go new file mode 100644 index 00000000000..296670b1c66 --- /dev/null +++ b/go/mysql/icuregex/internal/unames/loader.go @@ -0,0 +1,90 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "sync" + + "vitess.io/vitess/go/mysql/icuregex/internal/icudata" + "vitess.io/vitess/go/mysql/icuregex/internal/udata" +) + +var charNamesOnce sync.Once +var charNames *unames + +type unames struct { + tokens []uint16 + tokenStrings []uint8 + groups []uint16 + groupNames []uint8 + algNames []algorithmicRange +} + +func loadCharNames() { + charNamesOnce.Do(func() { + b := udata.NewBytes(icudata.UNames) + if err := b.ReadHeader(func(info *udata.DataInfo) bool { + return info.Size >= 20 && + info.IsBigEndian == 0 && + info.CharsetFamily == 0 && + info.DataFormat[0] == 0x75 && /* dataFormat="unam" */ + info.DataFormat[1] == 0x6e && + info.DataFormat[2] == 0x61 && + info.DataFormat[3] == 0x6d && + info.FormatVersion[0] == 1 + }); err != nil { + panic(err) + } + + tokenStringOffset := int32(b.Uint32() - 16) + groupsOffset := int32(b.Uint32() - 16) + groupStringOffset := int32(b.Uint32() - 16) + algNamesOffset := int32(b.Uint32() - 16) + charNames = &unames{ + tokens: b.Uint16Slice(tokenStringOffset / 2), + tokenStrings: b.Uint8Slice(groupsOffset - tokenStringOffset), + groups: b.Uint16Slice((groupStringOffset - groupsOffset) / 2), + groupNames: b.Uint8Slice(algNamesOffset - groupStringOffset), + } + + algCount := b.Uint32() + charNames.algNames = make([]algorithmicRange, 0, algCount) + + for i := uint32(0); i < algCount; i++ { + ar := algorithmicRange{ + start: b.Uint32(), + end: b.Uint32(), + typ: b.Uint8(), + variant: b.Uint8(), + } + size := b.Uint16() + switch ar.typ { + case 0: + ar.s = b.Uint8Slice(int32(size) - 12) + case 1: + ar.factors = b.Uint16Slice(int32(ar.variant)) + ar.s = b.Uint8Slice(int32(size) - 12 - int32(ar.variant)*2) + } + charNames.algNames = append(charNames.algNames, ar) + } + }) +} diff --git a/go/mysql/icuregex/internal/unames/unames.go b/go/mysql/icuregex/internal/unames/unames.go new file mode 100644 index 00000000000..66e8ba15615 --- /dev/null +++ b/go/mysql/icuregex/internal/unames/unames.go @@ -0,0 +1,406 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "bytes" + "strconv" + "strings" +) + +func (names *unames) getGroupName(group []uint16) []uint8 { + return names.groupNames[names.getGroupOffset(group):] +} + +type NameChoice int32 + +const ( + UnicodeCharName NameChoice = iota + /** + * The Unicode_1_Name property value which is of little practical value. + * Beginning with ICU 49, ICU APIs return an empty string for this name choice. + * @deprecated ICU 49 + */ + Unicode10CharName + /** Standard or synthetic character name. @stable ICU 2.0 */ + ExtendedCharName + /** Corrected name from NameAliases.txt. @stable ICU 4.4 */ + CharNameAlias +) + +type algorithmicRange struct { + start, end uint32 + typ, variant uint8 + factors []uint16 + s []uint8 +} + +func (ar *algorithmicRange) findAlgName(otherName string) rune { + switch ar.typ { + case 0: + s := ar.s + + for s[0] != 0 && len(otherName) > 0 { + if s[0] != otherName[0] { + return -1 + } + s = s[1:] + otherName = otherName[1:] + } + + var code rune + count := int(ar.variant) + for i := 0; i < count && len(otherName) > 0; i++ { + c := rune(otherName[0]) + otherName = otherName[1:] + if '0' <= c && c <= '9' { + code = (code << 4) | (c - '0') + } else if 'A' <= c && c <= 'F' { + code = (code << 4) | (c - 'A' + 10) + } else { + return -1 + } + } + + if len(otherName) == 0 && ar.start <= uint32(code) && uint32(code) <= ar.end { + return code + } + case 1: + factors := ar.factors + s := ar.s + + for s[0] != 0 && len(otherName) > 0 { + if s[0] != otherName[0] { + return -1 + } + s = s[1:] + otherName = otherName[1:] + } + s = s[1:] + + start := rune(ar.start) + limit := rune(ar.end + 1) + + var indexes [8]uint16 + var buf strings.Builder + var elements [8][]byte + var elementBases [8][]byte + + ar.writeFactorSuffix0(factors, s, &buf, &elements, &elementBases) + if buf.String() == otherName { + return start + } + + for start+1 < limit { + start++ + i := len(factors) + + for { + i-- + idx := indexes[i] + 1 + if idx < factors[i] { + indexes[i] = idx + s = elements[i] + s = s[bytes.IndexByte(s, 0)+1:] + elements[i] = s + break + } + + indexes[i] = 0 + elements[i] = elementBases[i] + } + + t := otherName + for i = 0; i < len(factors); i++ { + s = elements[i] + + for s[0] != 0 && len(t) > 0 { + if s[0] != t[0] { + s = nil + i = 99 + break + } + s = s[1:] + t = t[1:] + } + } + if i < 99 && len(t) == 0 { + return start + } + } + } + return -1 +} + +func (ar *algorithmicRange) writeFactorSuffix0(factors []uint16, s []uint8, buf *strings.Builder, elements, elementBases *[8][]byte) { + /* write each element */ + for i := 0; i < len(factors); i++ { + (*elements)[i] = s + (*elementBases)[i] = s + + nul := bytes.IndexByte(s, 0) + buf.Write(s[:nul]) + s = s[nul+1:] + + factor := int(factors[i] - 1) + for factor > 0 { + s = s[bytes.IndexByte(s, 0)+1:] + factor-- + } + } +} + +func CharForName(nameChoice NameChoice, name string) rune { + loadCharNames() + + lower := strings.ToLower(name) + upper := strings.ToUpper(name) + + if lower[0] == '<' { + if nameChoice == ExtendedCharName && lower[len(lower)-1] == '>' { + if limit := strings.LastIndexByte(lower, '-'); limit >= 2 { + cp, err := strconv.ParseUint(lower[limit+1:len(lower)-1], 16, 32) + if err != nil || cp > 0x10ffff { + return -1 + } + return rune(cp) + } + } + return -1 + } + + for _, ar := range charNames.algNames { + if cp := ar.findAlgName(upper); cp != -1 { + return cp + } + } + + return charNames.enumNames(0, 0x10ffff+1, upper, nameChoice) +} + +const groupShift = 5 +const linesPerGroup = 1 << groupShift +const groupMask = linesPerGroup - 1 + +const ( + groupMsb = iota + groupOffsetHigh + groupOffsetLow + groupLength +) + +func (names *unames) enumNames(start, limit rune, otherName string, nameChoice NameChoice) rune { + startGroupMSB := uint16(start >> groupShift) + endGroupMSB := uint16((limit - 1) >> groupShift) + + group := names.getGroup(start) + + if startGroupMSB < group[groupMsb] && nameChoice == ExtendedCharName { + extLimit := rune(group[groupMsb]) << groupShift + if extLimit > limit { + extLimit = limit + } + start = extLimit + } + + if startGroupMSB == endGroupMSB { + if startGroupMSB == group[groupMsb] { + return names.enumGroupNames(group, start, limit-1, otherName, nameChoice) + } + } else { + if startGroupMSB == group[groupMsb] { + if start&groupMask != 0 { + if cp := names.enumGroupNames(group, start, (rune(startGroupMSB)< group[groupMsb] { + group = group[groupLength:] + } + + for len(group) > 0 && group[groupMsb] < endGroupMSB { + start = rune(group[groupMsb]) << groupShift + if cp := names.enumGroupNames(group, start, start+linesPerGroup-1, otherName, nameChoice); cp != -1 { + return cp + } + group = group[groupLength:] + } + + if len(group) > 0 && group[groupMsb] == endGroupMSB { + return names.enumGroupNames(group, (limit-1)&^groupMask, limit-1, otherName, nameChoice) + } + } + + return -1 +} + +func (names *unames) getGroup(code rune) []uint16 { + groups := names.groups + groupMSB := uint16(code >> groupShift) + + start := 0 + groupCount := int(groups[0]) + limit := groupCount + groups = groups[1:] + + for start < limit-1 { + number := (start + limit) / 2 + if groupMSB < groups[number*groupLength+groupMsb] { + limit = number + } else { + start = number + } + } + + return groups[start*groupLength : (groupCount-start)*groupLength] +} + +func (names *unames) getGroupOffset(group []uint16) uint32 { + return (uint32(group[groupOffsetHigh]) << 16) | uint32(group[groupOffsetLow]) +} + +func (names *unames) enumGroupNames(group []uint16, start, end rune, otherName string, choice NameChoice) rune { + var offsets [linesPerGroup + 2]uint16 + var lengths [linesPerGroup + 2]uint16 + + s := names.getGroupName(group) + s = expandGroupLengths(s, offsets[:0], lengths[:0]) + + for start < end { + name := s[offsets[start&groupMask]:] + nameLen := lengths[start&groupMask] + if names.compareName(name[:nameLen], choice, otherName) { + return start + } + start++ + } + return -1 +} + +func expandGroupLengths(s []uint8, offsets []uint16, lengths []uint16) []uint8 { + /* read the lengths of the 32 strings in this group and get each string's offset */ + var i, offset, length uint16 + var lengthByte uint8 + + /* all 32 lengths must be read to get the offset of the first group string */ + for i < linesPerGroup { + lengthByte = s[0] + s = s[1:] + + /* read even nibble - MSBs of lengthByte */ + if length >= 12 { + /* double-nibble length spread across two bytes */ + length = ((length&0x3)<<4 | uint16(lengthByte)>>4) + 12 + lengthByte &= 0xf + } else if (lengthByte /* &0xf0 */) >= 0xc0 { + /* double-nibble length spread across this one byte */ + length = (uint16(lengthByte) & 0x3f) + 12 + } else { + /* single-nibble length in MSBs */ + length = uint16(lengthByte) >> 4 + lengthByte &= 0xf + } + + offsets = append(offsets, offset) + lengths = append(lengths, length) + + offset += length + i++ + + /* read odd nibble - LSBs of lengthByte */ + if (lengthByte & 0xf0) == 0 { + /* this nibble was not consumed for a double-nibble length above */ + length = uint16(lengthByte) + if length < 12 { + /* single-nibble length in LSBs */ + offsets = append(offsets, offset) + lengths = append(lengths, length) + + offset += length + i++ + } + } else { + length = 0 /* prevent double-nibble detection in the next iteration */ + } + } + + /* now, s is at the first group string */ + return s +} + +func (names *unames) compareName(name []byte, choice NameChoice, otherName string) bool { + tokens := names.tokens + + tokenCount := tokens[0] + tokens = tokens[1:] + + otherNameLen := len(otherName) + + for len(name) > 0 && len(otherName) > 0 { + c := name[0] + name = name[1:] + + if uint16(c) >= tokenCount { + if c != ';' { + if c != otherName[0] { + return false + } + otherName = otherName[1:] + } else { + break + } + } else { + token := tokens[c] + if int16(token) == -2 { + token = tokens[int(c)<<8|int(name[0])] + name = name[1:] + } + if int16(token) == -1 { + if c != ';' { + if c != otherName[0] { + return false + } + otherName = otherName[1:] + } else { + if len(otherName) == otherNameLen && choice == ExtendedCharName { + if ';' >= tokenCount || int16(tokens[';']) == -1 { + continue + } + } + break + } + } else { + tokenString := names.tokenStrings[token:] + for tokenString[0] != 0 && len(otherName) > 0 { + if tokenString[0] != otherName[0] { + return false + } + tokenString = tokenString[1:] + otherName = otherName[1:] + } + } + } + } + + return len(otherName) == 0 +} diff --git a/go/mysql/icuregex/internal/unames/unames_test.go b/go/mysql/icuregex/internal/unames/unames_test.go new file mode 100644 index 00000000000..f15353eef8d --- /dev/null +++ b/go/mysql/icuregex/internal/unames/unames_test.go @@ -0,0 +1,64 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unames + +import ( + "testing" +) + +func TestCharForName(t *testing.T) { + var TestNames = []struct { + code rune + name, oldName, extName string + }{ + {0x0061, "LATIN SMALL LETTER A", "", "LATIN SMALL LETTER A"}, + {0x01a2, "LATIN CAPITAL LETTER OI", "", "LATIN CAPITAL LETTER OI"}, + {0x0284, "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK", "", "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK"}, + {0x0fd0, "TIBETAN MARK BSKA- SHOG GI MGO RGYAN", "", "TIBETAN MARK BSKA- SHOG GI MGO RGYAN"}, + {0x3401, "CJK UNIFIED IDEOGRAPH-3401", "", "CJK UNIFIED IDEOGRAPH-3401"}, + {0x7fed, "CJK UNIFIED IDEOGRAPH-7FED", "", "CJK UNIFIED IDEOGRAPH-7FED"}, + {0xac00, "HANGUL SYLLABLE GA", "", "HANGUL SYLLABLE GA"}, + {0xd7a3, "HANGUL SYLLABLE HIH", "", "HANGUL SYLLABLE HIH"}, + {0xd800, "", "", ""}, + {0xdc00, "", "", ""}, + {0xff08, "FULLWIDTH LEFT PARENTHESIS", "", "FULLWIDTH LEFT PARENTHESIS"}, + {0xffe5, "FULLWIDTH YEN SIGN", "", "FULLWIDTH YEN SIGN"}, + {0xffff, "", "", ""}, + {0x1d0c5, "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS", "", "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS"}, + {0x23456, "CJK UNIFIED IDEOGRAPH-23456", "", "CJK UNIFIED IDEOGRAPH-23456"}, + } + + for _, tn := range TestNames { + if tn.name != "" { + r := CharForName(UnicodeCharName, tn.name) + if r != tn.code { + t.Errorf("CharFromName(U_UNICODE_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.name, r, r, tn.code, tn.code) + } + } + if tn.extName != "" { + r := CharForName(ExtendedCharName, tn.extName) + if r != tn.code { + t.Errorf("CharFromName(U_EXTENDED_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.extName, r, r, tn.code, tn.code) + } + } + } +} diff --git a/go/mysql/icuregex/internal/uprops/constants.go b/go/mysql/icuregex/internal/uprops/constants.go new file mode 100644 index 00000000000..4cdf1ef8a0b --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/constants.go @@ -0,0 +1,664 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +type Property int32 + +const ( + /* + * Note: UProperty constants are parsed by preparseucd.py. + * It matches lines like + * UCHAR_=, + */ + + /* Note: Place UCHAR_ALPHABETIC before UCHAR_BINARY_START so that + debuggers display UCHAR_ALPHABETIC as the symbolic name for 0, + rather than UCHAR_BINARY_START. Likewise for other *_START + identifiers. */ + + /** Binary property Alphabetic. Same as u_isUAlphabetic, different from u_isalpha. + Lu+Ll+Lt+Lm+Lo+Nl+Other_Alphabetic @stable ICU 2.1 */ + UCharAlphabetic Property = 0 + /** First constant for binary Unicode properties. @stable ICU 2.1 */ + UCharBinaryStart = UCharAlphabetic + /** Binary property ASCII_Hex_Digit. 0-9 A-F a-f @stable ICU 2.1 */ + UCharASCIIHexDigit Property = 1 + /** Binary property Bidi_Control. + Format controls which have specific functions + in the Bidi Algorithm. @stable ICU 2.1 */ + UCharBidiControl Property = 2 + /** Binary property Bidi_Mirrored. + Characters that may change display in RTL text. + Same as u_isMirrored. + See Bidi Algorithm, UTR 9. @stable ICU 2.1 */ + UCharBidiMirrored Property = 3 + /** Binary property Dash. Variations of dashes. @stable ICU 2.1 */ + UCharDash Property = 4 + /** Binary property Default_Ignorable_Code_Point (new in Unicode 3.2). + Ignorable in most processing. + <2060..206F, FFF0..FFFB, E0000..E0FFF>+Other_Default_Ignorable_Code_Point+(Cf+Cc+Cs-White_Space) @stable ICU 2.1 */ + UCharDefaultIgnorableCodePoint Property = 5 + /** Binary property Deprecated (new in Unicode 3.2). + The usage of deprecated characters is strongly discouraged. @stable ICU 2.1 */ + UCharDeprecated Property = 6 + /** Binary property Diacritic. Characters that linguistically modify + the meaning of another character to which they apply. @stable ICU 2.1 */ + UCharDiacritic Property = 7 + /** Binary property Extender. + Extend the value or shape of a preceding alphabetic character, + e.g., length and iteration marks. @stable ICU 2.1 */ + UCharExtender Property = 8 + /** Binary property Full_Composition_Exclusion. + CompositionExclusions.txt+Singleton Decompositions+ + Non-Starter Decompositions. @stable ICU 2.1 */ + UCharFullCompositionExclusion Property = 9 + /** Binary property Grapheme_Base (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. + [0..10FFFF]-Cc-Cf-Cs-Co-Cn-Zl-Zp-Grapheme_Link-Grapheme_Extend-CGJ @stable ICU 2.1 */ + UCharGraphemeBase Property = 10 + /** Binary property Grapheme_Extend (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. + Me+Mn+Mc+Other_Grapheme_Extend-Grapheme_Link-CGJ @stable ICU 2.1 */ + UCharGraphemeExtend Property = 11 + /** Binary property Grapheme_Link (new in Unicode 3.2). + For programmatic determination of grapheme cluster boundaries. @stable ICU 2.1 */ + UCharGraphemeLink Property = 12 + /** Binary property Hex_Digit. + Characters commonly used for hexadecimal numbers. @stable ICU 2.1 */ + UCharHexDigit Property = 13 + /** Binary property Hyphen. Dashes used to mark connections + between pieces of words, plus the Katakana middle dot. @stable ICU 2.1 */ + UCharHyphen Property = 14 + /** Binary property ID_Continue. + Characters that can continue an identifier. + DerivedCoreProperties.txt also says "NOTE: Cf characters should be filtered out." + ID_Start+Mn+Mc+Nd+Pc @stable ICU 2.1 */ + UCharIDContinue Property = 15 + /** Binary property ID_Start. + Characters that can start an identifier. + Lu+Ll+Lt+Lm+Lo+Nl @stable ICU 2.1 */ + UCharIDStart Property = 16 + /** Binary property Ideographic. + CJKV ideographs. @stable ICU 2.1 */ + UCharIdeographic Property = 17 + /** Binary property IDS_Binary_Operator (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharIdsBinaryOperator Property = 18 + /** Binary property IDS_Trinary_Operator (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharIdsTrinaryOperator Property = 19 + /** Binary property Join_Control. + Format controls for cursive joining and ligation. @stable ICU 2.1 */ + UCharJoinControl Property = 20 + /** Binary property Logical_Order_Exception (new in Unicode 3.2). + Characters that do not use logical order and + require special handling in most processing. @stable ICU 2.1 */ + UCharLogicalOrderException Property = 21 + /** Binary property Lowercase. Same as u_isULowercase, different from u_islower. + Ll+Other_Lowercase @stable ICU 2.1 */ + UCharLowercase Property = 22 + /** Binary property Math. Sm+Other_Math @stable ICU 2.1 */ + UCharMath Property = 23 + /** Binary property Noncharacter_Code_Point. + Code points that are explicitly defined as illegal + for the encoding of characters. @stable ICU 2.1 */ + UCharNoncharacterCodePoint Property = 24 + /** Binary property Quotation_Mark. @stable ICU 2.1 */ + UCharQuotationMark Property = 25 + /** Binary property Radical (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharRadical Property = 26 + /** Binary property Soft_Dotted (new in Unicode 3.2). + Characters with a "soft dot", like i or j. + An accent placed on these characters causes + the dot to disappear. @stable ICU 2.1 */ + UCharSoftDotted Property = 27 + /** Binary property Terminal_Punctuation. + Punctuation characters that generally mark + the end of textual units. @stable ICU 2.1 */ + UCharTerminalPunctuation Property = 28 + /** Binary property Unified_Ideograph (new in Unicode 3.2). + For programmatic determination of + Ideographic Description Sequences. @stable ICU 2.1 */ + UCharUnifiedIdeograph Property = 29 + /** Binary property Uppercase. Same as u_isUUppercase, different from u_isupper. + Lu+Other_Uppercase @stable ICU 2.1 */ + UCharUppercase Property = 30 + /** Binary property White_Space. + Same as u_isUWhiteSpace, different from u_isspace and u_isWhitespace. + Space characters+TAB+CR+LF-ZWSP-ZWNBSP @stable ICU 2.1 */ + UCharWhiteSpace Property = 31 + /** Binary property XID_Continue. + ID_Continue modified to allow closure under + normalization forms NFKC and NFKD. @stable ICU 2.1 */ + UCharXidContinue Property = 32 + /** Binary property XID_Start. ID_Start modified to allow + closure under normalization forms NFKC and NFKD. @stable ICU 2.1 */ + UCharXidStart Property = 33 + /** Binary property Case_Sensitive. Either the source of a case + mapping or _in_ the target of a case mapping. Not the same as + the general category Cased_Letter. @stable ICU 2.6 */ + UCharCaseSensitive Property = 34 + /** Binary property STerm (new in Unicode 4.0.1). + Sentence Terminal. Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + @stable ICU 3.0 */ + UCharSTerm Property = 35 + /** Binary property Variation_Selector (new in Unicode 4.0.1). + Indicates all those characters that qualify as Variation Selectors. + For details on the behavior of these characters, + see StandardizedVariants.html and 15.6 Variation Selectors. + @stable ICU 3.0 */ + UCharVariationSelector Property = 36 + /** Binary property NFD_Inert. + ICU-specific property for characters that are inert under NFD, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfdInert Property = 37 + /** Binary property NFKD_Inert. + ICU-specific property for characters that are inert under NFKD, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfkdInert Property = 38 + /** Binary property NFC_Inert. + ICU-specific property for characters that are inert under NFC, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfcInert Property = 39 + /** Binary property NFKC_Inert. + ICU-specific property for characters that are inert under NFKC, + i.e., they do not interact with adjacent characters. + See the documentation for the Normalizer2 class and the + Normalizer2::isInert() method. + @stable ICU 3.0 */ + UCharNfkcInert Property = 40 + /** Binary Property Segment_Starter. + ICU-specific property for characters that are starters in terms of + Unicode normalization and combining character sequences. + They have ccc=0 and do not occur in non-initial position of the + canonical decomposition of any character + (like a-umlaut in NFD and a Jamo T in an NFD(Hangul LVT)). + ICU uses this property for segmenting a string for generating a set of + canonically equivalent strings, e.g. for canonical closure while + processing collation tailoring rules. + @stable ICU 3.0 */ + UCharSegmentStarter Property = 41 + /** Binary property Pattern_Syntax (new in Unicode 4.1). + See UAX #31 Identifier and Pattern Syntax + (http://www.unicode.org/reports/tr31/) + @stable ICU 3.4 */ + UCharPatternSyntax Property = 42 + /** Binary property Pattern_White_Space (new in Unicode 4.1). + See UAX #31 Identifier and Pattern Syntax + (http://www.unicode.org/reports/tr31/) + @stable ICU 3.4 */ + UCharPatternWhiteSpace Property = 43 + /** Binary property alnum (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixAlnum Property = 44 + /** Binary property blank (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixBlank Property = 45 + /** Binary property graph (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixGraph Property = 46 + /** Binary property print (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixPrint Property = 47 + /** Binary property xdigit (a C/POSIX character class). + Implemented according to the UTS #18 Annex C Standard Recommendation. + See the uchar.h file documentation. + @stable ICU 3.4 */ + UCharPosixXdigit Property = 48 + /** Binary property Cased. For Lowercase, Uppercase and Titlecase characters. @stable ICU 4.4 */ + UCharCased Property = 49 + /** Binary property Case_Ignorable. Used in context-sensitive case mappings. @stable ICU 4.4 */ + UCharCaseIgnorable Property = 50 + /** Binary property Changes_When_Lowercased. @stable ICU 4.4 */ + UCharChangesWhenLowercased Property = 51 + /** Binary property Changes_When_Uppercased. @stable ICU 4.4 */ + UCharChangesWhenUppercased Property = 52 + /** Binary property Changes_When_Titlecased. @stable ICU 4.4 */ + UCharChangesWhenTitlecased Property = 53 + /** Binary property Changes_When_Casefolded. @stable ICU 4.4 */ + UCharChangesWhenCasefolded Property = 54 + /** Binary property Changes_When_Casemapped. @stable ICU 4.4 */ + UCharChangesWhenCasemapped Property = 55 + /** Binary property Changes_When_NFKC_Casefolded. @stable ICU 4.4 */ + UCharChangesWhenNfkcCasefolded Property = 56 + /** + * Binary property Emoji. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmoji Property = 57 + /** + * Binary property Emoji_Presentation. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiPresentation Property = 58 + /** + * Binary property Emoji_Modifier. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiModifier Property = 59 + /** + * Binary property Emoji_Modifier_Base. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 57 + */ + UCharEmojiModifierBase Property = 60 + /** + * Binary property Emoji_Component. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 60 + */ + UCharEmojiComponent Property = 61 + /** + * Binary property Regional_Indicator. + * @stable ICU 60 + */ + UCharRegionalIndicator Property = 62 + /** + * Binary property Prepended_Concatenation_Mark. + * @stable ICU 60 + */ + UCharPrependedConcatenationMark Property = 63 + /** + * Binary property Extended_Pictographic. + * See http://www.unicode.org/reports/tr51/#Emoji_Properties + * + * @stable ICU 62 + */ + UCharExtendedPictographic Property = 64 + + /** + * Binary property of strings Basic_Emoji. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharBasicEmoji Property = 65 + /** + * Binary property of strings Emoji_Keycap_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharEmojiKeycapSequence Property = 66 + /** + * Binary property of strings RGI_Emoji_Modifier_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiModifierSequence Property = 67 + /** + * Binary property of strings RGI_Emoji_Flag_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiFlagSequence Property = 68 + /** + * Binary property of strings RGI_Emoji_Tag_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiTagSequence Property = 69 + /** + * Binary property of strings RGI_Emoji_ZWJ_Sequence. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmojiZwjSequence Property = 70 + /** + * Binary property of strings RGI_Emoji. + * See https://www.unicode.org/reports/tr51/#Emoji_Sets + * + * @stable ICU 70 + */ + UCharRgiEmoji Property = 71 + + /** Enumerated property Bidi_Class. + Same as u_charDirection, returns UCharDirection values. @stable ICU 2.2 */ + UCharBidiClass Property = 0x1000 + /** First constant for enumerated/integer Unicode properties. @stable ICU 2.2 */ + UCharIntStart = UCharBidiClass + /** Enumerated property Block. + Same as ublock_getCode, returns UBlockCode values. @stable ICU 2.2 */ + UCharBlock Property = 0x1001 + /** Enumerated property Canonical_Combining_Class. + Same as u_getCombiningClass, returns 8-bit numeric values. @stable ICU 2.2 */ + UCharCanonicalCombiningClass Property = 0x1002 + /** Enumerated property Decomposition_Type. + Returns UDecompositionType values. @stable ICU 2.2 */ + UCharDecompositionType Property = 0x1003 + /** Enumerated property East_Asian_Width. + See http://www.unicode.org/reports/tr11/ + Returns UEastAsianWidth values. @stable ICU 2.2 */ + UCharEastAsianWidth Property = 0x1004 + /** Enumerated property General_Category. + Same as u_charType, returns UCharCategory values. @stable ICU 2.2 */ + UCharGeneralCategory Property = 0x1005 + /** Enumerated property Joining_Group. + Returns UJoiningGroup values. @stable ICU 2.2 */ + UCharJoiningGroup Property = 0x1006 + /** Enumerated property Joining_Type. + Returns UJoiningType values. @stable ICU 2.2 */ + UCharJoiningType Property = 0x1007 + /** Enumerated property Line_Break. + Returns ULineBreak values. @stable ICU 2.2 */ + UCharLineBreak Property = 0x1008 + /** Enumerated property Numeric_Type. + Returns UNumericType values. @stable ICU 2.2 */ + UCharNumericType Property = 0x1009 + /** Enumerated property Script. + Same as uscript_getScript, returns UScriptCode values. @stable ICU 2.2 */ + UCharScript Property = 0x100A + /** Enumerated property Hangul_Syllable_Type, new in Unicode 4. + Returns UHangulSyllableType values. @stable ICU 2.6 */ + UCharHangulSyllableType Property = 0x100B + /** Enumerated property NFD_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfdQuickCheck Property = 0x100C + /** Enumerated property NFKD_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfkdQuickCheck Property = 0x100D + /** Enumerated property NFC_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfcQuickCheck Property = 0x100E + /** Enumerated property NFKC_Quick_Check. + Returns UNormalizationCheckResult values. @stable ICU 3.0 */ + UCharNfkcQuickCheck Property = 0x100F + /** Enumerated property Lead_Canonical_Combining_Class. + ICU-specific property for the ccc of the first code point + of the decomposition, or lccc(c)=ccc(NFD(c)[0]). + Useful for checking for canonically ordered text; + see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD . + Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */ + UCharLeadCanonicalCombiningClass Property = 0x1010 + /** Enumerated property Trail_Canonical_Combining_Class. + ICU-specific property for the ccc of the last code point + of the decomposition, or tccc(c)=ccc(NFD(c)[last]). + Useful for checking for canonically ordered text; + see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD . + Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */ + UCharTrailCanonicalCombiningClass Property = 0x1011 + /** Enumerated property Grapheme_Cluster_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns UGraphemeClusterBreak values. @stable ICU 3.4 */ + UCharGraphemeClusterBreak Property = 0x1012 + /** Enumerated property Sentence_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns USentenceBreak values. @stable ICU 3.4 */ + UCharSentenceBreak Property = 0x1013 + /** Enumerated property Word_Break (new in Unicode 4.1). + Used in UAX #29: Text Boundaries + (http://www.unicode.org/reports/tr29/) + Returns UWordBreakValues values. @stable ICU 3.4 */ + UCharWordBreak Property = 0x1014 + /** Enumerated property Bidi_Paired_Bracket_Type (new in Unicode 6.3). + Used in UAX #9: Unicode Bidirectional Algorithm + (http://www.unicode.org/reports/tr9/) + Returns UBidiPairedBracketType values. @stable ICU 52 */ + UCharBidiPairedBracketType Property = 0x1015 + /** + * Enumerated property Indic_Positional_Category. + * New in Unicode 6.0 as provisional property Indic_Matra_Category; + * renamed and changed to informative in Unicode 8.0. + * See http://www.unicode.org/reports/tr44/#IndicPositionalCategory.txt + * @stable ICU 63 + */ + UCharIndicPositionalCategory Property = 0x1016 + /** + * Enumerated property Indic_Syllabic_Category. + * New in Unicode 6.0 as provisional; informative since Unicode 8.0. + * See http://www.unicode.org/reports/tr44/#IndicSyllabicCategory.txt + * @stable ICU 63 + */ + UCharIndicSyllableCategory Property = 0x1017 + /** + * Enumerated property Vertical_Orientation. + * Used for UAX #50 Unicode Vertical Text Layout (https://www.unicode.org/reports/tr50/). + * New as a UCD property in Unicode 10.0. + * @stable ICU 63 + */ + UCharVerticalOrientation Property = 0x1018 + + /** Bitmask property General_Category_Mask. + This is the General_Category property returned as a bit mask. + When used in u_getIntPropertyValue(c), same as U_MASK(u_charType(c)), + returns bit masks for UCharCategory values where exactly one bit is set. + When used with u_getPropertyValueName() and u_getPropertyValueEnum(), + a multi-bit mask is used for sets of categories like "Letters". + Mask values should be cast to uint32_t. + @stable ICU 2.4 */ + UCharGeneralCategoryMask Property = 0x2000 + /** First constant for bit-mask Unicode properties. @stable ICU 2.4 */ + UCharMaskStart = UCharGeneralCategoryMask + /** Double property Numeric_Value. + Corresponds to u_getNumericValue. @stable ICU 2.4 */ + UCharNumericValue Property = 0x3000 + /** First constant for double Unicode properties. @stable ICU 2.4 */ + UCharDoubleStart = UCharNumericValue + /** String property Age. + Corresponds to u_charAge. @stable ICU 2.4 */ + UCharAge Property = 0x4000 + /** First constant for string Unicode properties. @stable ICU 2.4 */ + UCharStringStart = UCharAge + /** String property Bidi_Mirroring_Glyph. + Corresponds to u_charMirror. @stable ICU 2.4 */ + UCharBidiMirroringGlyph Property = 0x4001 + /** String property Case_Folding. + Corresponds to u_strFoldCase in ustring.h. @stable ICU 2.4 */ + UCharCaseFolding Property = 0x4002 + /** String property Lowercase_Mapping. + Corresponds to u_strToLower in ustring.h. @stable ICU 2.4 */ + UCharLowercaseMapping Property = 0x4004 + /** String property Name. + Corresponds to u_charName. @stable ICU 2.4 */ + UCharName Property = 0x4005 + /** String property Simple_Case_Folding. + Corresponds to u_foldCase. @stable ICU 2.4 */ + UCharSimpleCaseFolding Property = 0x4006 + /** String property Simple_Lowercase_Mapping. + Corresponds to u_tolower. @stable ICU 2.4 */ + UCharSimpleLowercaseMapping Property = 0x4007 + /** String property Simple_Titlecase_Mapping. + Corresponds to u_totitle. @stable ICU 2.4 */ + UcharSimpleTitlecaseMapping Property = 0x4008 + /** String property Simple_Uppercase_Mapping. + Corresponds to u_toupper. @stable ICU 2.4 */ + UCharSimpleUppercaseMapping Property = 0x4009 + /** String property Titlecase_Mapping. + Corresponds to u_strToTitle in ustring.h. @stable ICU 2.4 */ + UCharTitlecaseMapping Property = 0x400A + /** String property Uppercase_Mapping. + Corresponds to u_strToUpper in ustring.h. @stable ICU 2.4 */ + UCharUppercaseMapping Property = 0x400C + /** String property Bidi_Paired_Bracket (new in Unicode 6.3). + Corresponds to u_getBidiPairedBracket. @stable ICU 52 */ + UCharBidiPairedBracket Property = 0x400D + + /** Miscellaneous property Script_Extensions (new in Unicode 6.0). + Some characters are commonly used in multiple scripts. + For more information, see UAX #24: http://www.unicode.org/reports/tr24/. + Corresponds to uscript_hasScript and uscript_getScriptExtensions in uscript.h. + @stable ICU 4.6 */ + UCharScriptExtensions Property = 0x7000 + /** First constant for Unicode properties with unusual value types. @stable ICU 4.6 */ + UCharOtherPropertyStart = UCharScriptExtensions + + /** Represents a nonexistent or invalid property or property value. @stable ICU 2.4 */ + UCharInvalidCode Property = -1 +) + +const ( + uCharBinaryLimit = 72 + uCharIntLimit = 0x1019 + uCharMaskLimit = 0x2001 + uCharStringLimit = 0x400E +) + +/* + * Properties in vector word 1 + * Each bit encodes one binary property. + * The following constants represent the bit number, use 1<= 0 { + set.AddRuneRange(startHasProperty, c-1) + startHasProperty = -1 + } + } + } + if startHasProperty >= 0 { + set.AddRuneRange(startHasProperty, uset.MaxValue) + } + + inclusionsForProperty[prop] = set + return set, nil +} + +func getInclusionsForIntProperty(prop Property) (*uset.UnicodeSet, error) { + if inc, ok := inclusionsForProperty[prop]; ok { + return inc, nil + } + + src := prop.source() + incl, err := getInclusionsForSource(src) + if err != nil { + return nil, err + } + + intPropIncl := uset.New() + intPropIncl.AddRune(0) + + numRanges := incl.RangeCount() + prevValue := int32(0) + + for i := 0; i < numRanges; i++ { + rangeEnd := incl.RangeEnd(i) + for c := incl.RangeStart(i); c <= rangeEnd; c++ { + value := getIntPropertyValue(c, prop) + if value != prevValue { + intPropIncl.AddRune(c) + prevValue = value + } + } + } + + inclusionsForProperty[prop] = intPropIncl + return intPropIncl, nil +} + +func ApplyIntPropertyValue(u *uset.UnicodeSet, prop Property, value int32) error { + switch { + case prop == UCharGeneralCategoryMask: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return (uchar.Mask(uchar.CharType(ch)) & uint32(value)) != 0 + }) + case prop == UCharScriptExtensions: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return uscriptHasScript(ch, code(value)) + }) + case 0 <= prop && prop < uCharBinaryLimit: + if value == 0 || value == 1 { + set, err := getInclusionsForBinaryProperty(prop) + if err != nil { + return err + } + u.CopyFrom(set) + if value == 0 { + u.Complement() + } + } else { + u.Clear() + } + + case UCharIntStart <= prop && prop < uCharIntLimit: + inclusions, err := getInclusionsForProperty(prop) + if err != nil { + return err + } + u.ApplyFilter(inclusions, func(ch rune) bool { + return getIntPropertyValue(ch, prop) == value + }) + default: + return errors.ErrUnsupported + } + return nil +} + +func mungeCharName(charname string) string { + out := make([]byte, 0, len(charname)) + for _, ch := range []byte(charname) { + j := len(out) + if ch == ' ' && (j == 0 || out[j-1] == ' ') { + continue + } + out = append(out, ch) + } + return string(out) +} + +func ApplyPropertyPattern(u *uset.UnicodeSet, pat string) error { + if len(pat) < 5 { + return errors.ErrIllegalArgument + } + + var posix, isName, invert bool + + if isPOSIXOpen(pat) { + posix = true + pat = pattern.SkipWhitespace(pat[2:]) + if len(pat) > 0 && pat[0] == '^' { + pat = pat[1:] + invert = true + } + } else if isPerlOpen(pat) || isNameOpen(pat) { + c := pat[1] + invert = c == 'P' + isName = c == 'N' + pat = pattern.SkipWhitespace(pat[2:]) + if len(pat) == 0 || pat[0] != '{' { + return errors.ErrIllegalArgument + } + pat = pat[1:] + } else { + return errors.ErrIllegalArgument + } + + var closePos int + if posix { + closePos = strings.Index(pat, ":]") + } else { + closePos = strings.IndexByte(pat, '}') + } + if closePos < 0 { + return errors.ErrIllegalArgument + } + + equals := strings.IndexByte(pat, '=') + var propName, valueName string + if equals >= 0 && equals < closePos && !isName { + propName = pat[:equals] + valueName = pat[equals+1 : closePos] + } else { + propName = pat[:closePos] + if isName { + valueName = propName + propName = "na" + } + } + + if err := ApplyPropertyAlias(u, propName, valueName); err != nil { + return err + } + if invert { + u.Complement() + } + return nil +} + +func isPOSIXOpen(pattern string) bool { + return pattern[0] == '[' && pattern[1] == ':' +} + +func isNameOpen(pattern string) bool { + return pattern[0] == '\\' && pattern[1] == 'N' +} + +func isPerlOpen(pattern string) bool { + return pattern[0] == '\\' && (pattern[1] == 'p' || pattern[1] == 'P') +} + +func ApplyPropertyAlias(u *uset.UnicodeSet, prop, value string) error { + var p Property + var v int32 + var invert bool + + if len(value) > 0 { + p = getPropertyEnum(prop) + if p == -1 { + return errors.ErrIllegalArgument + } + if p == UCharGeneralCategory { + p = UCharGeneralCategoryMask + } + + if (p >= UCharBinaryStart && p < uCharBinaryLimit) || + (p >= UCharIntStart && p < uCharIntLimit) || + (p >= UCharMaskStart && p < uCharMaskLimit) { + v = getPropertyValueEnum(p, value) + if v == -1 { + // Handle numeric CCC + if p == UCharCanonicalCombiningClass || + p == UCharTrailCanonicalCombiningClass || + p == UCharLeadCanonicalCombiningClass { + val, err := strconv.ParseUint(value, 10, 8) + if err != nil { + return errors.ErrIllegalArgument + } + v = int32(val) + } else { + return errors.ErrIllegalArgument + } + } + } else { + switch p { + case UCharNumericValue: + val, err := strconv.ParseFloat(value, 64) + if err != nil { + return errors.ErrIllegalArgument + } + incl, err := getInclusionsForProperty(p) + if err != nil { + return err + } + u.ApplyFilter(incl, func(ch rune) bool { + return uchar.NumericValue(ch) == val + }) + return nil + case UCharName: + // Must munge name, since u_charFromName() does not do + // 'loose' matching. + charName := mungeCharName(value) + ch := unames.CharForName(unames.ExtendedCharName, charName) + if ch < 0 { + return errors.ErrIllegalArgument + } + u.Clear() + u.AddRune(ch) + return nil + case UCharAge: + // Must munge name, since u_versionFromString() does not do + // 'loose' matching. + charName := mungeCharName(value) + version := uchar.VersionFromString(charName) + incl, err := getInclusionsForProperty(p) + if err != nil { + return err + } + u.ApplyFilter(incl, func(ch rune) bool { + return uchar.CharAge(ch) == version + }) + return nil + case UCharScriptExtensions: + v = getPropertyValueEnum(UCharScript, value) + if v == -1 { + return errors.ErrIllegalArgument + } + default: + // p is a non-binary, non-enumerated property that we + // don't support (yet). + return errors.ErrIllegalArgument + } + } + } else { + // value is empty. Interpret as General Category, Script, or + // Binary property. + p = UCharGeneralCategoryMask + v = getPropertyValueEnum(p, prop) + if v == -1 { + p = UCharScript + v = getPropertyValueEnum(p, prop) + if v == -1 { + p = getPropertyEnum(prop) + if p >= UCharBinaryStart && p < uCharBinaryLimit { + v = 1 + } else if 0 == comparePropertyNames("ANY", prop) { + u.Clear() + u.AddRuneRange(uset.MinValue, uset.MaxValue) + return nil + } else if 0 == comparePropertyNames("ASCII", prop) { + u.Clear() + u.AddRuneRange(0, 0x7F) + return nil + } else if 0 == comparePropertyNames("Assigned", prop) { + // [:Assigned:]=[:^Cn:] + p = UCharGeneralCategoryMask + v = int32(uchar.GcCnMask) + invert = true + } else { + return errors.ErrIllegalArgument + } + } + } + } + + err := ApplyIntPropertyValue(u, p, v) + if err != nil { + return err + } + if invert { + u.Complement() + } + return nil +} + +func AddULayoutPropertyStarts(src propertySource, u *uset.UnicodeSet) { + var trie *utrie.UcpTrie + switch src { + case srcInpc: + trie = ulayout.InpcTrie() + case srcInsc: + trie = ulayout.InscTrie() + case srcVo: + trie = ulayout.VoTrie() + default: + panic("unreachable") + } + + // Add the start code point of each same-value range of the trie. + var start, end rune + for { + end, _ = trie.GetRange(start, utrie.UcpMapRangeNormal, 0, nil) + if end < 0 { + break + } + u.AddRune(start) + start = end + 1 + } +} + +func AddCategory(u *uset.UnicodeSet, mask uint32) error { + set := uset.New() + err := ApplyIntPropertyValue(set, UCharGeneralCategoryMask, int32(mask)) + if err != nil { + return err + } + u.AddAll(set) + return nil +} + +func NewUnicodeSetFomPattern(pattern string, flags uset.USet) (*uset.UnicodeSet, error) { + u := uset.New() + if err := ApplyPropertyPattern(u, pattern); err != nil { + return nil, err + } + if flags&uset.CaseInsensitive != 0 { + u.CloseOver(uset.CaseInsensitive) + } + return u, nil +} + +func MustNewUnicodeSetFomPattern(pattern string, flags uset.USet) *uset.UnicodeSet { + u, err := NewUnicodeSetFomPattern(pattern, flags) + if err != nil { + panic(err) + } + return u +} diff --git a/go/mysql/icuregex/internal/uprops/uprops.go b/go/mysql/icuregex/internal/uprops/uprops.go new file mode 100644 index 00000000000..0589938c29c --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops.go @@ -0,0 +1,217 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/bytestrie" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" +) + +const ( + ixValueMapsOffset = 0 + ixByteTriesOffset = 1 + ixNameGroupsOffset = 2 + ixReserved3Offset = 3 +) + +func (prop Property) source() propertySource { + if prop < UCharBinaryStart { + return srcNone /* undefined */ + } else if prop < uCharBinaryLimit { + bprop := binProps[prop] + if bprop.mask != 0 { + return srcPropsvec + } + return bprop.column + } else if prop < UCharIntStart { + return srcNone /* undefined */ + } else if prop < uCharIntLimit { + iprop := intProps[prop-UCharIntStart] + if iprop.mask != 0 { + return srcPropsvec + } + return iprop.column + } else if prop < UCharStringStart { + switch prop { + case UCharGeneralCategoryMask, + UCharNumericValue: + return srcChar + + default: + return srcNone + } + } else if prop < uCharStringLimit { + switch prop { + case UCharAge: + return srcPropsvec + + case UCharBidiMirroringGlyph: + return srcBidi + + case UCharCaseFolding, + UCharLowercaseMapping, + UCharSimpleCaseFolding, + UCharSimpleLowercaseMapping, + UcharSimpleTitlecaseMapping, + UCharSimpleUppercaseMapping, + UCharTitlecaseMapping, + UCharUppercaseMapping: + return srcCase + + /* UCHAR_ISO_COMMENT, UCHAR_UNICODE_1_NAME (deprecated) */ + case UCharName: + return srcNames + + default: + return srcNone + } + } else { + switch prop { + case UCharScriptExtensions: + return srcPropsvec + default: + return srcNone /* undefined */ + } + } +} + +func getPropertyEnum(alias string) Property { + return Property(getPropertyOrValueEnum(0, alias)) +} + +func getPropertyValueEnum(prop Property, alias string) int32 { + valueMapIdx := findProperty(prop) + if valueMapIdx == 0 { + return -1 + } + + valueMps := valueMaps() + valueMapIdx = int32(valueMps[valueMapIdx+1]) + if valueMapIdx == 0 { + return -1 + } + // valueMapIndex is the start of the property's valueMap, + // where the first word is the BytesTrie offset. + return getPropertyOrValueEnum(int32(valueMps[valueMapIdx]), alias) +} + +func findProperty(prop Property) int32 { + var i = int32(1) + valueMps := valueMaps() + for numRanges := int32(valueMps[0]); numRanges > 0; numRanges-- { + start := int32(valueMps[i]) + limit := int32(valueMps[i+1]) + i += 2 + if int32(prop) < start { + break + } + if int32(prop) < limit { + return i + (int32(prop)-start)*2 + } + i += (limit - start) * 2 + } + return 0 +} + +func getPropertyOrValueEnum(offset int32, alias string) int32 { + trie := bytestrie.New(byteTrie()[offset:]) + if trie.ContainsName(alias) { + return trie.GetValue() + } + return -1 +} + +func comparePropertyNames(name1, name2 string) int { + next := func(s string) (byte, string) { + for len(s) > 0 && (s[0] == 0x2d || s[0] == 0x5f || s[0] == 0x20 || (0x09 <= s[0] && s[0] <= 0x0d)) { + s = s[1:] + } + if len(s) == 0 { + return 0, "" + } + c := s[0] + s = s[1:] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + return c, s + } + + var r1, r2 byte + for { + r1, name1 = next(name1) + r2, name2 = next(name2) + + if r1 == 0 && r2 == 0 { + return 0 + } + + /* Compare the lowercased characters */ + if r1 != r2 { + return int(r1) - int(r2) + } + } +} + +func getIntPropertyValue(c rune, which Property) int32 { + if which < UCharIntStart { + if UCharBinaryStart <= which && which < uCharBinaryLimit { + prop := binProps[which] + if prop.contains == nil { + return 0 + } + if prop.contains(prop, c, which) { + return 1 + } + return 0 + } + } else if which < uCharIntLimit { + iprop := intProps[which-UCharIntStart] + return iprop.getValue(iprop, c, which) + } else if which == UCharGeneralCategoryMask { + return int32(uchar.Mask(uchar.CharType(c))) + } + return 0 // undefined +} + +func mergeScriptCodeOrIndex(scriptX uint32) uint32 { + return ((scriptX & scriptHighMask) >> scriptHighShift) | + (scriptX & scriptLowMask) +} + +func script(c rune) int32 { + if c > 0x10ffff { + return -1 + } + scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask + codeOrIndex := mergeScriptCodeOrIndex(scriptX) + + if scriptX < scriptXWithCommon { + return int32(codeOrIndex) + } else if scriptX < scriptXWithInherited { + return 0 + } else if scriptX < scriptXWithOther { + return 1 + } else { + return int32(uchar.ScriptExtension(codeOrIndex)) + } +} diff --git a/go/mysql/icuregex/internal/uprops/uprops_binary.go b/go/mysql/icuregex/internal/uprops/uprops_binary.go new file mode 100644 index 00000000000..5d4aaaec1b5 --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops_binary.go @@ -0,0 +1,249 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "slices" + + "vitess.io/vitess/go/mysql/icuregex/internal/normalizer" + "vitess.io/vitess/go/mysql/icuregex/internal/ubidi" + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/uemoji" +) + +type binaryProperty struct { + column propertySource + mask uint32 + contains func(prop *binaryProperty, c rune, which Property) bool +} + +func defaultContains(prop *binaryProperty, c rune, _ Property) bool { + return (uchar.GetUnicodeProperties(c, int(prop.column)) & prop.mask) != 0 +} + +var binProps = [uCharBinaryLimit]*binaryProperty{ + /* + * column and mask values for binary properties from u_getUnicodeProperties(). + * Must be in order of corresponding UProperty, + * and there must be exactly one entry per binary UProperty. + * + * Properties with mask==0 are handled in code. + * For them, column is the UPropertySource value. + * + * See also https://unicode-org.github.io/icu/userguide/strings/properties.html + */ + {1, uchar.Mask(pAlphabetic), defaultContains}, + {1, uchar.Mask(pASCIIHexDigit), defaultContains}, + {srcBidi, 0, isBidiControl}, + {srcBidi, 0, isMirrored}, + {1, uchar.Mask(pDash), defaultContains}, + {1, uchar.Mask(pDefaultIgnorableCodePoint), defaultContains}, + {1, uchar.Mask(pDeprecated), defaultContains}, + {1, uchar.Mask(pDiacritic), defaultContains}, + {1, uchar.Mask(pExtender), defaultContains}, + {srcNfc, 0, hasFullCompositionExclusion}, + {1, uchar.Mask(pGraphemeBase), defaultContains}, + {1, uchar.Mask(pGraphemeExtend), defaultContains}, + {1, uchar.Mask(pGraphemeLink), defaultContains}, + {1, uchar.Mask(pHexDigit), defaultContains}, + {1, uchar.Mask(pHyphen), defaultContains}, + {1, uchar.Mask(pIDContinue), defaultContains}, + {1, uchar.Mask(pIDStart), defaultContains}, + {1, uchar.Mask(pIdeographic), defaultContains}, + {1, uchar.Mask(pIdsBinaryOperator), defaultContains}, + {1, uchar.Mask(pIdsTrinaryOperator), defaultContains}, + {srcBidi, 0, isJoinControl}, + {1, uchar.Mask(pLogicalOrderException), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_LOWERCASE + {1, uchar.Mask(pMath), defaultContains}, + {1, uchar.Mask(pNoncharacterCodePoint), defaultContains}, + {1, uchar.Mask(pQuotationMark), defaultContains}, + {1, uchar.Mask(pRadical), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_SOFT_DOTTED + {1, uchar.Mask(pTerminalPunctuation), defaultContains}, + {1, uchar.Mask(pUnifiedIdeograph), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_UPPERCASE + {1, uchar.Mask(pWhiteSpace), defaultContains}, + {1, uchar.Mask(pXidContinue), defaultContains}, + {1, uchar.Mask(pXidStart), defaultContains}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_SENSITIVE + {1, uchar.Mask(pSTerm), defaultContains}, + {1, uchar.Mask(pVariationSelector), defaultContains}, + {srcNfc, 0, isNormInert}, // UCHAR_NFD_INERT + {srcNfkc, 0, isNormInert}, // UCHAR_NFKD_INERT + {srcNfc, 0, isNormInert}, // UCHAR_NFC_INERT + {srcNfkc, 0, isNormInert}, // UCHAR_NFKC_INERT + {srcNfcCanonIter, 0, nil}, // Segment_Starter is currently unsupported + {1, uchar.Mask(pPatternSyntax), defaultContains}, + {1, uchar.Mask(pPatternWhiteSpace), defaultContains}, + {srcCharAndPropsvec, 0, isPOSIXAlnum}, + {srcChar, 0, isPOSIXBlank}, + {srcChar, 0, isPOSIXGraph}, + {srcChar, 0, isPOSIXPrint}, + {srcChar, 0, isPOSIXXdigit}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_IGNORABLE + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_LOWERCASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_UPPERCASED + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_TITLECASED + {srcCaseAndNorm, 0, changesWhenCasefolded}, + {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_CASEMAPPED + {srcNfkcCf, 0, nil}, // Changes_When_NFKC_Casefolded is currently unsupported + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_PRESENTATION + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER_BASE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_COMPONENT + {2, 0, isRegionalIndicator}, + {1, uchar.Mask(pPrependedConcatenationMark), defaultContains}, + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EXTENDED_PICTOGRAPHIC + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_BASIC_EMOJI + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_KEYCAP_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_MODIFIER_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_FLAG_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_TAG_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_ZWJ_SEQUENCE + {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI +} + +func isBidiControl(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsBidiControl(c) +} + +func isMirrored(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsMirrored(c) +} + +func isRegionalIndicator(_ *binaryProperty, c rune, _ Property) bool { + return 0x1F1E6 <= c && c <= 0x1F1FF +} + +func changesWhenCasefolded(_ *binaryProperty, c rune, _ Property) bool { + if c < 0 { + return false + } + + nfd := normalizer.Nfc().Decompose(c) + if nfd == nil { + nfd = []rune{c} + } + folded := ucase.FoldRunes(nfd) + return !slices.Equal(nfd, folded) +} + +func isPOSIXXdigit(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsXDigit(c) +} + +func isPOSIXPrint(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsPOSIXPrint(c) +} + +func isPOSIXGraph(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsGraphPOSIX(c) +} + +func isPOSIXBlank(_ *binaryProperty, c rune, _ Property) bool { + return uchar.IsBlank(c) +} + +func isPOSIXAlnum(_ *binaryProperty, c rune, _ Property) bool { + return (uchar.GetUnicodeProperties(c, 1)&uchar.Mask(pAlphabetic)) != 0 || uchar.IsDigit(c) +} + +func isJoinControl(_ *binaryProperty, c rune, _ Property) bool { + return ubidi.IsJoinControl(c) +} + +func hasFullCompositionExclusion(_ *binaryProperty, c rune, _ Property) bool { + impl := normalizer.Nfc() + return impl.IsCompNo(c) +} + +func caseBinaryPropertyContains(_ *binaryProperty, c rune, which Property) bool { + return HasBinaryPropertyUcase(c, which) +} + +func HasBinaryPropertyUcase(c rune, which Property) bool { + /* case mapping properties */ + switch which { + case UCharLowercase: + return ucase.Lower == ucase.GetType(c) + case UCharUppercase: + return ucase.Upper == ucase.GetType(c) + case UCharSoftDotted: + return ucase.IsSoftDotted(c) + case UCharCaseSensitive: + return ucase.IsCaseSensitive(c) + case UCharCased: + return ucase.None != ucase.GetType(c) + case UCharCaseIgnorable: + return (ucase.GetTypeOrIgnorable(c) >> 2) != 0 + /* + * Note: The following Changes_When_Xyz are defined as testing whether + * the NFD form of the input changes when Xyz-case-mapped. + * However, this simpler implementation of these properties, + * ignoring NFD, passes the tests. + * The implementation needs to be changed if the tests start failing. + * When that happens, optimizations should be used to work with the + * per-single-code point ucase_toFullXyz() functions unless + * the NFD form has more than one code point, + * and the property starts set needs to be the union of the + * start sets for normalization and case mappings. + */ + case UCharChangesWhenLowercased: + return ucase.ToFullLower(c) >= 0 + case UCharChangesWhenUppercased: + return ucase.ToFullUpper(c) >= 0 + case UCharChangesWhenTitlecased: + return ucase.ToFullTitle(c) >= 0 + /* case UCHAR_CHANGES_WHEN_CASEFOLDED: -- in uprops.c */ + case UCharChangesWhenCasemapped: + return ucase.ToFullLower(c) >= 0 || ucase.ToFullUpper(c) >= 0 || ucase.ToFullTitle(c) >= 0 + default: + return false + } +} + +func isNormInert(_ *binaryProperty, c rune, which Property) bool { + mode := normalizer.Mode(int32(which) - int32(UCharNfdInert) + int32(normalizer.NormNfd)) + return normalizer.IsInert(c, mode) +} + +func HasBinaryProperty(c rune, which Property) bool { + if which < UCharBinaryStart || uCharBinaryLimit <= which { + return false + } + prop := binProps[which] + if prop.contains == nil { + return false + } + return prop.contains(prop, c, which) +} + +func hasEmojiProperty(_ *binaryProperty, c rune, which Property) bool { + if which < UCharEmoji || UCharRgiEmoji < which { + return false + } + return uemoji.HasBinaryProperty(c, int(which-UCharEmoji)) +} diff --git a/go/mysql/icuregex/internal/uprops/uprops_int.go b/go/mysql/icuregex/internal/uprops/uprops_int.go new file mode 100644 index 00000000000..3e62d31184f --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uprops_int.go @@ -0,0 +1,265 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/normalizer" + "vitess.io/vitess/go/mysql/icuregex/internal/ubidi" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/ulayout" +) + +type intPropertyGetValue func(prop *intProperty, c rune, which Property) int32 + +type intProperty struct { + column propertySource + mask uint32 + shift int32 + getValue intPropertyGetValue +} + +const ( + blockMask = 0x0001ff00 + blockShift = 8 + + eaMask = 0x000e0000 + eaShift = 17 + + lbMask = 0x03f00000 + lbShift = 20 + + sbMask = 0x000f8000 + sbShift = 15 + + wbMask = 0x00007c00 + wbShift = 10 + + gcbMask = 0x000003e0 + gcbShift = 5 + + dtMask = 0x0000001f +) + +type numericType int32 + +/** + * Numeric Type constants. + * + * @see UCHAR_NUMERIC_TYPE + * @stable ICU 2.2 + */ +const ( + /* + * Note: UNumericType constants are parsed by preparseucd.py. + * It matches lines like + * U_NT_ + */ + + ntNone numericType = iota /*[None]*/ + ntDecimal /*[de]*/ + ntDigit /*[di]*/ + ntNumeric /*[nu]*/ + /** + * One more than the highest normal UNumericType value. + * The highest value is available via u_getIntPropertyMaxValue(UCHAR_NUMERIC_TYPE). + * + * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420. + */ + ntCount +) + +/** + * Hangul Syllable Type constants. + * + * @see UCHAR_HANGUL_SYLLABLE_TYPE + * @stable ICU 2.6 + */ + +type hangunSyllableType int32 + +const ( + /* + * Note: UHangulSyllableType constants are parsed by preparseucd.py. + * It matches lines like + * U_HST_ + */ + + hstNotApplicable hangunSyllableType = iota /*[NA]*/ + hstLeadingJamo /*[L]*/ + hstVowelJamo /*[V]*/ + hstTrailingJamo /*[T]*/ + hstLvSyllable /*[LV]*/ + hstLvtSyllable /*[LVT]*/ + /** + * One more than the highest normal UHangulSyllableType value. + * The highest value is available via u_getIntPropertyMaxValue(UCHAR_HANGUL_SYLLABLE_TYPE). + * + * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420. + */ + hstCount +) + +var intProps = [uCharIntLimit - UCharIntStart]*intProperty{ + /* + * column, mask and shift values for int-value properties from u_getUnicodeProperties(). + * Must be in order of corresponding UProperty, + * and there must be exactly one entry per int UProperty. + * + * Properties with mask==0 are handled in code. + * For them, column is the UPropertySource value. + */ + {srcBidi, 0, 0, getBiDiClass}, + {0, blockMask, blockShift, defaultGetValue}, + {srcNfc, 0, 0xff, getCombiningClass}, + {2, dtMask, 0, defaultGetValue}, + {0, eaMask, eaShift, defaultGetValue}, + {srcChar, 0, int32(uchar.CharCategoryCount - 1), getGeneralCategory}, + {srcBidi, 0, 0, getJoiningGroup}, + {srcBidi, 0, 0, getJoiningType}, + {2, lbMask, lbShift, defaultGetValue}, + {srcChar, 0, int32(ntCount - 1), getNumericType}, + {srcPropsvec, 0, 0, getScript}, + {srcPropsvec, 0, int32(hstCount - 1), getHangulSyllableType}, + // UCHAR_NFD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes" + {srcNfc, 0, int32(normalizer.Yes), getNormQuickCheck}, + // UCHAR_NFKD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes" + {srcNfkc, 0, int32(normalizer.Yes), getNormQuickCheck}, + // UCHAR_NFC_QUICK_CHECK: max=2=MAYBE + {srcNfc, 0, int32(normalizer.Maybe), getNormQuickCheck}, + // UCHAR_NFKC_QUICK_CHECK: max=2=MAYBE + {srcNfkc, 0, int32(normalizer.Maybe), getNormQuickCheck}, + {srcNfc, 0, 0xff, getLeadCombiningClass}, + {srcNfc, 0, 0xff, getTrailCombiningClass}, + {2, gcbMask, gcbShift, defaultGetValue}, + {2, sbMask, sbShift, defaultGetValue}, + {2, wbMask, wbShift, defaultGetValue}, + {srcBidi, 0, 0, getBiDiPairedBracketType}, + {srcInpc, 0, 0, getInPC}, + {srcInsc, 0, 0, getInSC}, + {srcVo, 0, 0, getVo}, +} + +func getVo(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.VoTrie().Get(c)) +} + +func getInSC(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.InscTrie().Get(c)) +} + +func getInPC(_ *intProperty, c rune, _ Property) int32 { + return int32(ulayout.InpcTrie().Get(c)) +} + +func getBiDiPairedBracketType(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.PairedBracketType(c)) +} + +func getTrailCombiningClass(_ *intProperty, c rune, _ Property) int32 { + return int32(normalizer.Nfc().GetFCD16(c) & 0xff) +} + +func getLeadCombiningClass(_ *intProperty, c rune, _ Property) int32 { + val := int32(normalizer.Nfc().GetFCD16(c) >> 8) + return val +} + +func getNormQuickCheck(_ *intProperty, c rune, which Property) int32 { + return int32(normalizer.QuickCheck(c, normalizer.Mode(int32(which)-int32(UCharNfdQuickCheck)+int32(normalizer.NormNfd)))) +} + +/* + * Map some of the Grapheme Cluster Break values to Hangul Syllable Types. + * Hangul_Syllable_Type is fully redundant with a subset of Grapheme_Cluster_Break. + */ +var gcbToHst = []hangunSyllableType{ + hstNotApplicable, /* U_GCB_OTHER */ + hstNotApplicable, /* U_GCB_CONTROL */ + hstNotApplicable, /* U_GCB_CR */ + hstNotApplicable, /* U_GCB_EXTEND */ + hstLeadingJamo, /* U_GCB_L */ + hstNotApplicable, /* U_GCB_LF */ + hstLvSyllable, /* U_GCB_LV */ + hstLvtSyllable, /* U_GCB_LVT */ + hstTrailingJamo, /* U_GCB_T */ + hstVowelJamo, /* U_GCB_V */ + /* + * Omit GCB values beyond what we need for hst. + * The code below checks for the array length. + */ +} + +func getHangulSyllableType(_ *intProperty, c rune, _ Property) int32 { + /* see comments on gcbToHst[] above */ + gcb := (int32(uchar.GetUnicodeProperties(c, 2)) & gcbMask) >> gcbShift + + if gcb < int32(len(gcbToHst)) { + return int32(gcbToHst[gcb]) + } + return int32(hstNotApplicable) +} + +func getScript(_ *intProperty, c rune, _ Property) int32 { + return script(c) +} + +func getNumericType(_ *intProperty, c rune, _ Property) int32 { + ntv := uchar.NumericTypeValue(c) + return int32(ntvGetType(ntv)) +} + +func getJoiningType(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.JoinType(c)) +} + +func getJoiningGroup(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.JoinGroup(c)) +} + +func getGeneralCategory(_ *intProperty, c rune, _ Property) int32 { + return int32(uchar.CharType(c)) +} + +func getCombiningClass(_ *intProperty, c rune, _ Property) int32 { + return int32(normalizer.Nfc().CombiningClass(c)) +} + +func defaultGetValue(prop *intProperty, c rune, _ Property) int32 { + return int32(uchar.GetUnicodeProperties(c, int(prop.column))&prop.mask) >> prop.shift +} + +func getBiDiClass(_ *intProperty, c rune, _ Property) int32 { + return int32(ubidi.Class(c)) +} + +func ntvGetType(ntv uint16) numericType { + switch { + case ntv == uchar.UPropsNtvNone: + return ntNone + case ntv < uchar.UPropsNtvDigitStart: + return ntDecimal + case ntv < uchar.UPropsNtvNumericStart: + return ntDigit + default: + return ntNumeric + } +} diff --git a/go/mysql/icuregex/internal/uprops/uscript.go b/go/mysql/icuregex/internal/uprops/uscript.go new file mode 100644 index 00000000000..8a4423849df --- /dev/null +++ b/go/mysql/icuregex/internal/uprops/uscript.go @@ -0,0 +1,505 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uprops + +import "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + +/** + * Constants for ISO 15924 script codes. + * + * The current set of script code constants supports at least all scripts + * that are encoded in the version of Unicode which ICU currently supports. + * The names of the constants are usually derived from the + * Unicode script property value aliases. + * See UAX #24 Unicode Script Property (http://www.unicode.org/reports/tr24/) + * and http://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt . + * + * In addition, constants for many ISO 15924 script codes + * are included, for use with language tags, CLDR data, and similar. + * Some of those codes are not used in the Unicode Character Database (UCD). + * For example, there are no characters that have a UCD script property value of + * Hans or Hant. All Han ideographs have the Hani script property value in Unicode. + * + * Private-use codes Qaaa..Qabx are not included, except as used in the UCD or in CLDR. + * + * Starting with ICU 55, script codes are only added when their scripts + * have been or will certainly be encoded in Unicode, + * and have been assigned Unicode script property value aliases, + * to ensure that their script names are stable and match the names of the constants. + * Script codes like Latf and Aran that are not subject to separate encoding + * may be added at any time. + * + * @stable ICU 2.2 + */ +type code int32 + +/* + * Note: UScriptCode constants and their ISO script code comments + * are parsed by preparseucd.py. + * It matches lines like + * USCRIPT_ = , / * * / + */ + +const ( + /** @stable ICU 2.2 */ + invalidCode code = -1 + /** @stable ICU 2.2 */ + common code = 0 /* Zyyy */ + /** @stable ICU 2.2 */ + inherited code = 1 /* Zinh */ /* "Code for inherited script", for non-spacing combining marks; also Qaai */ + /** @stable ICU 2.2 */ + arabic code = 2 /* Arab */ + /** @stable ICU 2.2 */ + armenian code = 3 /* Armn */ + /** @stable ICU 2.2 */ + bengali code = 4 /* Beng */ + /** @stable ICU 2.2 */ + bopomofo code = 5 /* Bopo */ + /** @stable ICU 2.2 */ + cherokee code = 6 /* Cher */ + /** @stable ICU 2.2 */ + coptic code = 7 /* Copt */ + /** @stable ICU 2.2 */ + cyrillic code = 8 /* Cyrl */ + /** @stable ICU 2.2 */ + deseret code = 9 /* Dsrt */ + /** @stable ICU 2.2 */ + devanagari code = 10 /* Deva */ + /** @stable ICU 2.2 */ + ethiopic code = 11 /* Ethi */ + /** @stable ICU 2.2 */ + georgian code = 12 /* Geor */ + /** @stable ICU 2.2 */ + gothic code = 13 /* Goth */ + /** @stable ICU 2.2 */ + greek code = 14 /* Grek */ + /** @stable ICU 2.2 */ + gujarati code = 15 /* Gujr */ + /** @stable ICU 2.2 */ + gurmukhi code = 16 /* Guru */ + /** @stable ICU 2.2 */ + han code = 17 /* Hani */ + /** @stable ICU 2.2 */ + hangul code = 18 /* Hang */ + /** @stable ICU 2.2 */ + hebrew code = 19 /* Hebr */ + /** @stable ICU 2.2 */ + hiragana code = 20 /* Hira */ + /** @stable ICU 2.2 */ + kannada code = 21 /* Knda */ + /** @stable ICU 2.2 */ + katakana code = 22 /* Kana */ + /** @stable ICU 2.2 */ + khmer code = 23 /* Khmr */ + /** @stable ICU 2.2 */ + lao code = 24 /* Laoo */ + /** @stable ICU 2.2 */ + latin code = 25 /* Latn */ + /** @stable ICU 2.2 */ + malayalam code = 26 /* Mlym */ + /** @stable ICU 2.2 */ + mongolian code = 27 /* Mong */ + /** @stable ICU 2.2 */ + myanmar code = 28 /* Mymr */ + /** @stable ICU 2.2 */ + ogham code = 29 /* Ogam */ + /** @stable ICU 2.2 */ + oldItalic code = 30 /* Ital */ + /** @stable ICU 2.2 */ + oriya code = 31 /* Orya */ + /** @stable ICU 2.2 */ + runic code = 32 /* Runr */ + /** @stable ICU 2.2 */ + sinhala code = 33 /* Sinh */ + /** @stable ICU 2.2 */ + syriac code = 34 /* Syrc */ + /** @stable ICU 2.2 */ + tamil code = 35 /* Taml */ + /** @stable ICU 2.2 */ + telugu code = 36 /* Telu */ + /** @stable ICU 2.2 */ + thaana code = 37 /* Thaa */ + /** @stable ICU 2.2 */ + thai code = 38 /* Thai */ + /** @stable ICU 2.2 */ + tibetan code = 39 /* Tibt */ + /** Canadian_Aboriginal script. @stable ICU 2.6 */ + canadianAboriginal code = 40 /* Cans */ + /** Canadian_Aboriginal script (alias). @stable ICU 2.2 */ + ucas code = canadianAboriginal + /** @stable ICU 2.2 */ + yi code = 41 /* Yiii */ + /* New scripts in Unicode 3.2 */ + /** @stable ICU 2.2 */ + tagalog code = 42 /* Tglg */ + /** @stable ICU 2.2 */ + hanunoo code = 43 /* Hano */ + /** @stable ICU 2.2 */ + buhid code = 44 /* Buhd */ + /** @stable ICU 2.2 */ + tagbanwa code = 45 /* Tagb */ + + /* New scripts in Unicode 4 */ + /** @stable ICU 2.6 */ + braille code = 46 /* Brai */ + /** @stable ICU 2.6 */ + cypriot code = 47 /* Cprt */ + /** @stable ICU 2.6 */ + limbu code = 48 /* Limb */ + /** @stable ICU 2.6 */ + linearB code = 49 /* Linb */ + /** @stable ICU 2.6 */ + osmanya code = 50 /* Osma */ + /** @stable ICU 2.6 */ + shavian code = 51 /* Shaw */ + /** @stable ICU 2.6 */ + taiLe code = 52 /* Tale */ + /** @stable ICU 2.6 */ + ugaratic code = 53 /* Ugar */ + + /** New script code in Unicode 4.0.1 @stable ICU 3.0 */ + katakanaOrHiragana = 54 /*Hrkt */ + + /* New scripts in Unicode 4.1 */ + /** @stable ICU 3.4 */ + buginese code = 55 /* Bugi */ + /** @stable ICU 3.4 */ + glagolitic code = 56 /* Glag */ + /** @stable ICU 3.4 */ + kharoshthi code = 57 /* Khar */ + /** @stable ICU 3.4 */ + sylotiNagri code = 58 /* Sylo */ + /** @stable ICU 3.4 */ + newTaiLue code = 59 /* Talu */ + /** @stable ICU 3.4 */ + tifinagh code = 60 /* Tfng */ + /** @stable ICU 3.4 */ + oldPersian code = 61 /* Xpeo */ + + /* New script codes from Unicode and ISO 15924 */ + /** @stable ICU 3.6 */ + balinese code = 62 /* Bali */ + /** @stable ICU 3.6 */ + batak code = 63 /* Batk */ + /** @stable ICU 3.6 */ + blissymbols code = 64 /* Blis */ + /** @stable ICU 3.6 */ + brahmi code = 65 /* Brah */ + /** @stable ICU 3.6 */ + cham code = 66 /* Cham */ + /** @stable ICU 3.6 */ + cirth code = 67 /* Cirt */ + /** @stable ICU 3.6 */ + oldChurchSlavonicCyrillic code = 68 /* Cyrs */ + /** @stable ICU 3.6 */ + demoticEgyptian code = 69 /* Egyd */ + /** @stable ICU 3.6 */ + hieraticEgyptian code = 70 /* Egyh */ + /** @stable ICU 3.6 */ + egyptianHieroglyphs code = 71 /* Egyp */ + /** @stable ICU 3.6 */ + khutsuri code = 72 /* Geok */ + /** @stable ICU 3.6 */ + simplfiedHan code = 73 /* Hans */ + /** @stable ICU 3.6 */ + traditionalHan code = 74 /* Hant */ + /** @stable ICU 3.6 */ + pahawhHmong code = 75 /* Hmng */ + /** @stable ICU 3.6 */ + oldHungarian code = 76 /* Hung */ + /** @stable ICU 3.6 */ + harappanIndus code = 77 /* Inds */ + /** @stable ICU 3.6 */ + javanese code = 78 /* Java */ + /** @stable ICU 3.6 */ + kayahLi code = 79 /* Kali */ + /** @stable ICU 3.6 */ + latinFraktur code = 80 /* Latf */ + /** @stable ICU 3.6 */ + latinGaelic code = 81 /* Latg */ + /** @stable ICU 3.6 */ + lepcha code = 82 /* Lepc */ + /** @stable ICU 3.6 */ + linearA code = 83 /* Lina */ + /** @stable ICU 4.6 */ + mandaic code = 84 /* Mand */ + /** @stable ICU 3.6 */ + mandaean code = mandaic + /** @stable ICU 3.6 */ + mayanHieroglyphs code = 85 /* Maya */ + /** @stable ICU 4.6 */ + meroiticHieroglyphs code = 86 /* Mero */ + /** @stable ICU 3.6 */ + meroitic code = meroiticHieroglyphs + /** @stable ICU 3.6 */ + nko code = 87 /* Nkoo */ + /** @stable ICU 3.6 */ + orkhon code = 88 /* Orkh */ + /** @stable ICU 3.6 */ + oldPermic code = 89 /* Perm */ + /** @stable ICU 3.6 */ + phagsPa code = 90 /* Phag */ + /** @stable ICU 3.6 */ + phoenician code = 91 /* Phnx */ + /** @stable ICU 52 */ + miao code = 92 /* Plrd */ + /** @stable ICU 3.6 */ + phoneticPollard code = miao + /** @stable ICU 3.6 */ + rongoRongo code = 93 /* Roro */ + /** @stable ICU 3.6 */ + sarati code = 94 /* Sara */ + /** @stable ICU 3.6 */ + extrangeloSyriac code = 95 /* Syre */ + /** @stable ICU 3.6 */ + westernSyriac code = 96 /* Syrj */ + /** @stable ICU 3.6 */ + easternSyriac code = 97 /* Syrn */ + /** @stable ICU 3.6 */ + tengwar code = 98 /* Teng */ + /** @stable ICU 3.6 */ + vai code = 99 /* Vaii */ + /** @stable ICU 3.6 */ + visibleSpeech code = 100 /* Visp */ + /** @stable ICU 3.6 */ + cuneiform code = 101 /* Xsux */ + /** @stable ICU 3.6 */ + unwrittenLanguages code = 102 /* Zxxx */ + /** @stable ICU 3.6 */ + unknown code = 103 /* Zzzz */ /* Unknown="Code for uncoded script", for unassigned code points */ + + /** @stable ICU 3.8 */ + carian code = 104 /* Cari */ + /** @stable ICU 3.8 */ + japanese code = 105 /* Jpan */ + /** @stable ICU 3.8 */ + lanna code = 106 /* Lana */ + /** @stable ICU 3.8 */ + lycian code = 107 /* Lyci */ + /** @stable ICU 3.8 */ + lydian code = 108 /* Lydi */ + /** @stable ICU 3.8 */ + olChiki code = 109 /* Olck */ + /** @stable ICU 3.8 */ + rejang code = 110 /* Rjng */ + /** @stable ICU 3.8 */ + saurashtra code = 111 /* Saur */ + /** Sutton SignWriting @stable ICU 3.8 */ + signWriting code = 112 /* Sgnw */ + /** @stable ICU 3.8 */ + sundanese code = 113 /* Sund */ + /** @stable ICU 3.8 */ + moon code = 114 /* Moon */ + /** @stable ICU 3.8 */ + meiteiMayek code = 115 /* Mtei */ + + /** @stable ICU 4.0 */ + imperialAramaic code = 116 /* Armi */ + /** @stable ICU 4.0 */ + avestan code = 117 /* Avst */ + /** @stable ICU 4.0 */ + chakma code = 118 /* Cakm */ + /** @stable ICU 4.0 */ + korean code = 119 /* Kore */ + /** @stable ICU 4.0 */ + kaithi code = 120 /* Kthi */ + /** @stable ICU 4.0 */ + manichaean code = 121 /* Mani */ + /** @stable ICU 4.0 */ + inscriptionalPahlavi code = 122 /* Phli */ + /** @stable ICU 4.0 */ + psalterPahlavi code = 123 /* Phlp */ + /** @stable ICU 4.0 */ + bookPahlavi code = 124 /* Phlv */ + /** @stable ICU 4.0 */ + inscriptionalParthian code = 125 /* Prti */ + /** @stable ICU 4.0 */ + samaritan code = 126 /* Samr */ + /** @stable ICU 4.0 */ + taiViet code = 127 /* Tavt */ + /** @stable ICU 4.0 */ + mathematicalNotation code = 128 /* Zmth */ + /** @stable ICU 4.0 */ + symbols code = 129 /* Zsym */ + + /** @stable ICU 4.4 */ + bamum code = 130 /* Bamu */ + /** @stable ICU 4.4 */ + lisu code = 131 /* Lisu */ + /** @stable ICU 4.4 */ + nakhiGeba code = 132 /* Nkgb */ + /** @stable ICU 4.4 */ + oldSouthArabian code = 133 /* Sarb */ + + /** @stable ICU 4.6 */ + bassaVah code = 134 /* Bass */ + /** @stable ICU 54 */ + duployan code = 135 /* Dupl */ + /** @stable ICU 4.6 */ + elbasan code = 136 /* Elba */ + /** @stable ICU 4.6 */ + grantha code = 137 /* Gran */ + /** @stable ICU 4.6 */ + kpelle code = 138 /* Kpel */ + /** @stable ICU 4.6 */ + loma code = 139 /* Loma */ + /** Mende Kikakui @stable ICU 4.6 */ + mende code = 140 /* Mend */ + /** @stable ICU 4.6 */ + meroiticCursive code = 141 /* Merc */ + /** @stable ICU 4.6 */ + oldNorthArabian code = 142 /* Narb */ + /** @stable ICU 4.6 */ + nabataean code = 143 /* Nbat */ + /** @stable ICU 4.6 */ + palmyrene code = 144 /* Palm */ + /** @stable ICU 54 */ + khudawadi code = 145 /* Sind */ + /** @stable ICU 4.6 */ + sindhi code = khudawadi + /** @stable ICU 4.6 */ + warangCiti code = 146 /* Wara */ + + /** @stable ICU 4.8 */ + afaka code = 147 /* Afak */ + /** @stable ICU 4.8 */ + jurchen code = 148 /* Jurc */ + /** @stable ICU 4.8 */ + mro code = 149 /* Mroo */ + /** @stable ICU 4.8 */ + nushu code = 150 /* Nshu */ + /** @stable ICU 4.8 */ + sharada code = 151 /* Shrd */ + /** @stable ICU 4.8 */ + soraSompeng code = 152 /* Sora */ + /** @stable ICU 4.8 */ + takri code = 153 /* Takr */ + /** @stable ICU 4.8 */ + tangut code = 154 /* Tang */ + /** @stable ICU 4.8 */ + woleai code = 155 /* Wole */ + + /** @stable ICU 49 */ + anatolianHieroglyphs code = 156 /* Hluw */ + /** @stable ICU 49 */ + khojki code = 157 /* Khoj */ + /** @stable ICU 49 */ + tirhuta code = 158 /* Tirh */ + + /** @stable ICU 52 */ + caucasianAlbanian code = 159 /* Aghb */ + /** @stable ICU 52 */ + mahajani code = 160 /* Mahj */ + + /** @stable ICU 54 */ + ahom code = 161 /* Ahom */ + /** @stable ICU 54 */ + hatran code = 162 /* Hatr */ + /** @stable ICU 54 */ + modi code = 163 /* Modi */ + /** @stable ICU 54 */ + multani code = 164 /* Mult */ + /** @stable ICU 54 */ + pauCinHau code = 165 /* Pauc */ + /** @stable ICU 54 */ + siddham code = 166 /* Sidd */ + + /** @stable ICU 58 */ + adlam code = 167 /* Adlm */ + /** @stable ICU 58 */ + bhaiksuki code = 168 /* Bhks */ + /** @stable ICU 58 */ + marchen code = 169 /* Marc */ + /** @stable ICU 58 */ + newa code = 170 /* Newa */ + /** @stable ICU 58 */ + osage code = 171 /* Osge */ + + /** @stable ICU 58 */ + hanWithBopomofo code = 172 /* Hanb */ + /** @stable ICU 58 */ + jamo code = 173 /* Jamo */ + /** @stable ICU 58 */ + symbolsEmoji code = 174 /* Zsye */ + + /** @stable ICU 60 */ + masaramGondi code = 175 /* Gonm */ + /** @stable ICU 60 */ + soyombo code = 176 /* Soyo */ + /** @stable ICU 60 */ + zanabazarSquare code = 177 /* Zanb */ + + /** @stable ICU 62 */ + dogra code = 178 /* Dogr */ + /** @stable ICU 62 */ + gunjalaGondi code = 179 /* Gong */ + /** @stable ICU 62 */ + makasar code = 180 /* Maka */ + /** @stable ICU 62 */ + medefaidrin code = 181 /* Medf */ + /** @stable ICU 62 */ + hanifiRohingya code = 182 /* Rohg */ + /** @stable ICU 62 */ + sogdian code = 183 /* Sogd */ + /** @stable ICU 62 */ + oldSogdian code = 184 /* Sogo */ + + /** @stable ICU 64 */ + elymaic code = 185 /* Elym */ + /** @stable ICU 64 */ + nyiakengPuachueHmong code = 186 /* Hmnp */ + /** @stable ICU 64 */ + nandinagari code = 187 /* Nand */ + /** @stable ICU 64 */ + wancho code = 188 /* Wcho */ + + /** @stable ICU 66 */ + chorasmian code = 189 /* Chrs */ + /** @stable ICU 66 */ + divesAkuru code = 190 /* Diak */ + /** @stable ICU 66 */ + khitanSmallScript code = 191 /* Kits */ + /** @stable ICU 66 */ + yezedi code = 192 /* Yezi */ +) + +func uscriptHasScript(c rune, sc code) bool { + scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask + codeOrIndex := mergeScriptCodeOrIndex(scriptX) + if scriptX < scriptXWithCommon { + return sc == code(codeOrIndex) + } + + scx := uchar.ScriptExtensions(codeOrIndex) + if scriptX >= scriptXWithOther { + scx = uchar.ScriptExtensions(uint32(scx[1])) + } + sc32 := uint32(sc) + if sc32 > 0x7fff { + /* Guard against bogus input that would make us go past the Script_Extensions terminator. */ + return false + } + for sc32 > uint32(scx[0]) { + scx = scx[1:] + } + return sc32 == uint32(scx[0]&0x7fff) +} diff --git a/go/mysql/icuregex/internal/uset/close.go b/go/mysql/icuregex/internal/uset/close.go new file mode 100644 index 00000000000..bd3f9f0f7e3 --- /dev/null +++ b/go/mysql/icuregex/internal/uset/close.go @@ -0,0 +1,96 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + +type USet uint32 + +const ( + /** + * Ignore white space within patterns unless quoted or escaped. + * @stable ICU 2.4 + */ + IgnoreSpace USet = 1 + + /** + * Enable case insensitive matching. E.g., "[ab]" with this flag + * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will + * match all except 'a', 'A', 'b', and 'B'. This performs a full + * closure over case mappings, e.g. U+017F for s. + * + * The resulting set is a superset of the input for the code points but + * not for the strings. + * It performs a case mapping closure of the code points and adds + * full case folding strings for the code points, and reduces strings of + * the original set to their full case folding equivalents. + * + * This is designed for case-insensitive matches, for example + * in regular expressions. The full code point case closure allows checking of + * an input character directly against the closure set. + * Strings are matched by comparing the case-folded form from the closure + * set with an incremental case folding of the string in question. + * + * The closure set will also contain single code points if the original + * set contained case-equivalent strings (like U+00DF for "ss" or "Ss" etc.). + * This is not necessary (that is, redundant) for the above matching method + * but results in the same closure sets regardless of whether the original + * set contained the code point or a string. + * + * @stable ICU 2.4 + */ + CaseInsensitive USet = 2 + + /** + * Enable case insensitive matching. E.g., "[ab]" with this flag + * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will + * match all except 'a', 'A', 'b', and 'B'. This adds the lower-, + * title-, and uppercase mappings as well as the case folding + * of each existing element in the set. + * @stable ICU 3.2 + */ + AddCaseMappings USet = 4 +) + +func (u *UnicodeSet) CloseOver(attribute USet) { + if attribute&AddCaseMappings != 0 { + panic("USET_ADD_CASE_MAPPINGS is unsupported") + } + if (attribute & CaseInsensitive) == 0 { + return + } + + foldSet := u.Clone() + n := u.RangeCount() + + for i := 0; i < n; i++ { + start := u.RangeStart(i) + end := u.RangeEnd(i) + + // full case closure + for cp := start; cp <= end; cp++ { + ucase.AddCaseClosure(cp, foldSet) + } + } + + *u = *foldSet +} diff --git a/go/mysql/icuregex/internal/uset/frozen.go b/go/mysql/icuregex/internal/uset/frozen.go new file mode 100644 index 00000000000..2703a4f6975 --- /dev/null +++ b/go/mysql/icuregex/internal/uset/frozen.go @@ -0,0 +1,339 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +type frozen struct { + // One byte 0 or 1 per Latin-1 character. + latin1Contains [0x100]byte + + // true if contains(U+FFFD) + containsFFFD bool + + /* + * One bit per code point from U+0000..U+07FF. + * The bits are organized vertically; consecutive code points + * correspond to the same bit positions in consecutive table words. + * With code point parts + * lead=c{10..6} + * trail=c{5..0} + * it is set.contains(c)==(table7FF[trail] bit lead) + * + * Bits for 0..7F (non-shortest forms) are set to the result of contains(FFFD) + * for faster validity checking at runtime. + */ + table7FF [64]uint32 + + /* + * One bit per 64 BMP code points. + * The bits are organized vertically; consecutive 64-code point blocks + * correspond to the same bit position in consecutive table words. + * With code point parts + * lead=c{15..12} + * t1=c{11..6} + * test bits (lead+16) and lead in bmpBlockBits[t1]. + * If the upper bit is 0, then the lower bit indicates if contains(c) + * for all code points in the 64-block. + * If the upper bit is 1, then the block is mixed and set.contains(c) + * must be called. + * + * Bits for 0..7FF (non-shortest forms) and D800..DFFF are set to + * the result of contains(FFFD) for faster validity checking at runtime. + */ + bmpBlockBits [64]uint32 + + /* + * Inversion list indexes for restricted binary searches in + * findCodePoint(), from + * findCodePoint(U+0800, U+1000, U+2000, .., U+F000, U+10000). + * U+0800 is the first 3-byte-UTF-8 code point. Code points below U+0800 are + * always looked up in the bit tables. + * The last pair of indexes is for finding supplementary code points. + */ + list4kStarts [18]int32 +} + +func freeze(list []rune) *frozen { + f := &frozen{} + + listEnd := int32(len(list) - 1) + + f.list4kStarts[0] = f.findCodePoint(list, 0x800, 0, listEnd) + for i := 1; i <= 0x10; i++ { + f.list4kStarts[i] = f.findCodePoint(list, rune(i)<<12, f.list4kStarts[i-1], listEnd) + } + f.list4kStarts[0x11] = listEnd + f.containsFFFD = f.containsSlow(list, 0xfffd, f.list4kStarts[0xf], f.list4kStarts[0x10]) + + f.initBits(list) + f.overrideIllegal() + + return f +} + +func (f *frozen) containsSlow(list []rune, c rune, lo, hi int32) bool { + return (f.findCodePoint(list, c, lo, hi) & 1) != 0 +} + +func (f *frozen) findCodePoint(list []rune, c rune, lo, hi int32) int32 { + /* Examples: + findCodePoint(c) + set list[] c=0 1 3 4 7 8 + === ============== =========== + [] [110000] 0 0 0 0 0 0 + [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2 + [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2 + [:Any:] [0, 110000] 1 1 1 1 1 1 + */ + + // Return the smallest i such that c < list[i]. Assume + // list[len - 1] == HIGH and that c is legal (0..HIGH-1). + if c < list[lo] { + return lo + } + // High runner test. c is often after the last range, so an + // initial check for this condition pays off. + if lo >= hi || c >= list[hi-1] { + return hi + } + // invariant: c >= list[lo] + // invariant: c < list[hi] + for { + i := (lo + hi) >> 1 + if i == lo { + break // Found! + } else if c < list[i] { + hi = i + } else { + lo = i + } + } + return hi +} + +func (f *frozen) set32x64bits(table *[64]uint32, start, limit int32) { + lead := start >> 6 // Named for UTF-8 2-byte lead byte with upper 5 bits. + trail := start & 0x3f // Named for UTF-8 2-byte trail byte with lower 6 bits. + + // Set one bit indicating an all-one block. + bits := uint32(1) << lead + if (start + 1) == limit { // Single-character shortcut. + table[trail] |= bits + return + } + + limitLead := limit >> 6 + limitTrail := limit & 0x3f + + if lead == limitLead { + // Partial vertical bit column. + for trail < limitTrail { + table[trail] |= bits + trail++ + } + } else { + // Partial vertical bit column, + // followed by a bit rectangle, + // followed by another partial vertical bit column. + if trail > 0 { + for { + table[trail] |= bits + trail++ + if trail >= 64 { + break + } + } + lead++ + } + if lead < limitLead { + bits = ^((uint32(1) << lead) - 1) + if limitLead < 0x20 { + bits &= (uint32(1) << limitLead) - 1 + } + for trail = 0; trail < 64; trail++ { + table[trail] |= bits + } + } + // limit<=0x800. If limit==0x800 then limitLead=32 and limitTrail=0. + // In that case, bits=1<= 0x100 { + break + } + for { + f.latin1Contains[start] = 1 + start++ + if start >= limit || start >= 0x100 { + break + } + } + if limit > 0x100 { + break + } + } + + // Find the first range overlapping with (or after) 80..FF again, + // to include them in table7FF as well. + listIndex = 0 + for { + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + if limit > 0x80 { + if start < 0x80 { + start = 0x80 + } + break + } + } + + // Set table7FF[]. + for start < 0x800 { + var end rune + if limit <= 0x800 { + end = limit + } else { + end = 0x800 + } + f.set32x64bits(&f.table7FF, start, end) + if limit > 0x800 { + start = 0x800 + break + } + + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + } + + // Set bmpBlockBits[]. + minStart := rune(0x800) + for start < 0x10000 { + if limit > 0x10000 { + limit = 0x10000 + } + + if start < minStart { + start = minStart + } + if start < limit { // Else: Another range entirely in a known mixed-value block. + if (start & 0x3f) != 0 { + // Mixed-value block of 64 code points. + start >>= 6 + f.bmpBlockBits[start&0x3f] |= 0x10001 << (start >> 6) + start = (start + 1) << 6 // Round up to the next block boundary. + minStart = start // Ignore further ranges in this block. + } + if start < limit { + if start < (limit &^ 0x3f) { + // Multiple all-ones blocks of 64 code points each. + f.set32x64bits(&f.bmpBlockBits, start>>6, limit>>6) + } + + if (limit & 0x3f) != 0 { + // Mixed-value block of 64 code points. + limit >>= 6 + f.bmpBlockBits[limit&0x3f] |= 0x10001 << (limit >> 6) + limit = (limit + 1) << 6 // Round up to the next block boundary. + minStart = limit // Ignore further ranges in this block. + } + } + } + + if limit == 0x10000 { + break + } + + start = list[listIndex] + listIndex++ + if listIndex < len(list) { + limit = list[listIndex] + listIndex++ + } else { + limit = 0x110000 + } + } +} diff --git a/go/mysql/icuregex/internal/uset/pattern.go b/go/mysql/icuregex/internal/uset/pattern.go new file mode 100644 index 00000000000..20b44da9c6d --- /dev/null +++ b/go/mysql/icuregex/internal/uset/pattern.go @@ -0,0 +1,107 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "strings" + + "vitess.io/vitess/go/mysql/icuregex/internal/pattern" +) + +func (u *UnicodeSet) String() string { + var buf strings.Builder + u.ToPattern(&buf, true) + return buf.String() +} + +func (u *UnicodeSet) ToPattern(w *strings.Builder, escapeUnprintable bool) { + w.WriteByte('[') + + // // Check against the predefined categories. We implicitly build + // // up ALL category sets the first time toPattern() is called. + // for (int8_t cat=0; cat 1 && u.RangeStart(0) == MinValue && u.RangeEnd(count-1) == MaxValue { + + // Emit the inverse + w.WriteByte('^') + + for i := 1; i < count; i++ { + start := u.RangeEnd(i-1) + 1 + end := u.RangeStart(i) - 1 + u.appendToPattern(w, start, escapeUnprintable) + if start != end { + if (start + 1) != end { + w.WriteByte('-') + } + u.appendToPattern(w, end, escapeUnprintable) + } + } + } else { + // Default; emit the ranges as pairs + for i := 0; i < count; i++ { + start := u.RangeStart(i) + end := u.RangeEnd(i) + u.appendToPattern(w, start, escapeUnprintable) + if start != end { + if (start + 1) != end { + w.WriteByte('-') + } + u.appendToPattern(w, end, escapeUnprintable) + } + } + } + + w.WriteByte(']') +} + +func (u *UnicodeSet) appendToPattern(w *strings.Builder, c rune, escapeUnprintable bool) { + if escapeUnprintable && pattern.IsUnprintable(c) { + // Use hex escape notation (\uxxxx or \Uxxxxxxxx) for anything + // unprintable + pattern.EscapeUnprintable(w, c) + return + } + + // Okay to let ':' pass through + switch c { + case '[', ']', '-', '^', '&', '\\', '{', '}', ':', '$': + w.WriteByte('\\') + default: + // Escape whitespace + if pattern.IsWhitespace(c) { + w.WriteByte('\\') + } + } + w.WriteRune(c) +} diff --git a/go/mysql/icuregex/internal/uset/unicode_set.go b/go/mysql/icuregex/internal/uset/unicode_set.go new file mode 100644 index 00000000000..e2f7bd8cbca --- /dev/null +++ b/go/mysql/icuregex/internal/uset/unicode_set.go @@ -0,0 +1,686 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "fmt" + "slices" +) + +// HIGH_VALUE > all valid values. 110000 for codepoints +const unicodeSetHigh = 0x0110000 + +// LOW <= all valid values. ZERO for codepoints +const unicodeSetLow = 0x000000 + +const ( + /** + * Minimum value that can be stored in a UnicodeSet. + * @stable ICU 2.4 + */ + MinValue = 0 + + /** + * Maximum value that can be stored in a UnicodeSet. + * @stable ICU 2.4 + */ + MaxValue = 0x10ffff +) + +type UnicodeSet struct { + list []rune + buffer []rune + frozen *frozen +} + +func New() *UnicodeSet { + buf := make([]rune, 1, 25) + buf[0] = unicodeSetHigh + return &UnicodeSet{list: buf} +} + +func FromRunes(list []rune) *UnicodeSet { + return &UnicodeSet{list: list} +} + +func (u *UnicodeSet) ensureBufferCapacity(c int) { + if cap(u.buffer) < c { + u.buffer = make([]rune, c) + return + } + u.buffer = u.buffer[:cap(u.buffer)] +} + +func (u *UnicodeSet) addbuffer(other []rune, polarity int8) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.ensureBufferCapacity(len(u.list) + len(other)) + + i := 1 + j := 1 + k := 0 + + a := u.list[0] + b := other[0] + + for { + switch polarity { + case 0: + if a < b { + if k > 0 && a <= u.buffer[k-1] { + k-- + a = max(u.list[i], u.buffer[k]) + } else { + u.buffer[k] = a + k++ + a = u.list[i] + } + i++ + polarity ^= 1 + } else if b < a { + if k > 0 && b <= u.buffer[k-1] { + k-- + b = max(other[j], u.buffer[k]) + } else { + u.buffer[k] = b + k++ + b = other[j] + } + j++ + polarity ^= 2 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + if k > 0 && a <= u.buffer[k-1] { + k-- + a = max(u.list[i], u.buffer[k]) + } else { + u.buffer[k] = a + k++ + a = u.list[i] + } + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 3: + if b <= a { + if a == unicodeSetHigh { + goto loopEnd + } + u.buffer[k] = a + k++ + } else { + if b == unicodeSetHigh { + goto loopEnd + } + u.buffer[k] = b + k++ + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + case 1: + if a < b { + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { + b = other[j] + j++ + polarity ^= 2 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 2: + if b < a { + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else if a < b { + a = u.list[i] + i++ + polarity ^= 1 + } else { + if a == unicodeSetHigh { + goto loopEnd + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + } + } + +loopEnd: + u.buffer[k] = unicodeSetHigh + k++ + + u.list, u.buffer = u.buffer[:k], u.list +} + +func pinCodePoint(c *rune) rune { + if *c < unicodeSetLow { + *c = unicodeSetLow + } else if *c > (unicodeSetHigh - 1) { + *c = unicodeSetHigh - 1 + } + return *c +} + +func (u *UnicodeSet) AddRune(c rune) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + + // find smallest i such that c < list[i] + // if odd, then it is IN the set + // if even, then it is OUT of the set + i := u.findCodePoint(pinCodePoint(&c)) + + // already in set? + if (i & 1) != 0 { + return + } + + // HIGH is 0x110000 + // assert(list[len-1] == HIGH); + + // empty = [HIGH] + // [start_0, limit_0, start_1, limit_1, HIGH] + + // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + + // i == 0 means c is before the first range + if c == u.list[i]-1 { + // c is before start of next range + u.list[i] = c + // if we touched the HIGH mark, then add a new one + if c == (unicodeSetHigh - 1) { + u.list = append(u.list, unicodeSetHigh) + } + if i > 0 && c == u.list[i-1] { + // collapse adjacent ranges + + // [..., start_k-1, c, c, limit_k, ..., HIGH] + // ^ + // list[i] + for k := i - 1; k < len(u.list)-2; k++ { + u.list[k] = u.list[k+2] + } + u.list = u.list[:len(u.list)-2] + } + } else if i > 0 && c == u.list[i-1] { + // c is after end of prior range + u.list[i-1]++ + // no need to check for collapse here + } else { + // At this point we know the new char is not adjacent to + // any existing ranges, and it is not 10FFFF. + + // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + + // [..., start_k-1, limit_k-1, c, c+1, start_k, limit_k, ..., HIGH] + // ^ + // list[i] + u.list = slices.Insert(u.list, i, c, c+1) + } +} + +func (u *UnicodeSet) AddRuneRange(start, end rune) { + if pinCodePoint(&start) < pinCodePoint(&end) { + limit := end + 1 + // Fast path for adding a new range after the last one. + // Odd list length: [..., lastStart, lastLimit, HIGH] + if (len(u.list) & 1) != 0 { + // If the list is empty, set lastLimit low enough to not be adjacent to 0. + var lastLimit rune + if len(u.list) == 1 { + lastLimit = -2 + } else { + lastLimit = u.list[len(u.list)-2] + } + if lastLimit <= start { + if lastLimit == start { + // Extend the last range. + u.list[len(u.list)-2] = limit + if limit == unicodeSetHigh { + u.list = u.list[:len(u.list)-1] + } + } else { + u.list[len(u.list)-1] = start + if limit < unicodeSetHigh { + u.list = append(u.list, limit) + u.list = append(u.list, unicodeSetHigh) + } else { // limit == UNICODESET_HIGH + u.list = append(u.list, unicodeSetHigh) + } + } + return + } + } + // This is slow. Could be much faster using findCodePoint(start) + // and modifying the list, dealing with adjacent & overlapping ranges. + addRange := [3]rune{start, limit, unicodeSetHigh} + u.addbuffer(addRange[:], 0) + } else if start == end { + u.AddRune(start) + } +} + +func (u *UnicodeSet) AddAll(u2 *UnicodeSet) { + if len(u2.list) > 0 { + u.addbuffer(u2.list, 0) + } +} + +func (u *UnicodeSet) Complement() { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + if u.list[0] == unicodeSetLow { + copy(u.list, u.list[1:]) + u.list = u.list[:len(u.list)-1] + } else { + u.list = slices.Insert(u.list, 0, unicodeSetLow) + } +} + +func (u *UnicodeSet) RemoveRuneRange(start, end rune) { + if pinCodePoint(&start) < pinCodePoint(&end) { + r := [3]rune{start, end + 1, unicodeSetHigh} + u.retain(r[:], 2) + } +} + +func (u *UnicodeSet) RemoveAll(c *UnicodeSet) { + u.retain(c.list, 2) +} + +func (u *UnicodeSet) RetainAll(c *UnicodeSet) { + u.retain(c.list, 0) +} + +func (u *UnicodeSet) retain(other []rune, polarity int8) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + + u.ensureBufferCapacity(len(u.list) + len(other)) + + i := 1 + j := 1 + k := 0 + + a := u.list[0] + b := other[0] + + // change from xor is that we have to check overlapping pairs + // polarity bit 1 means a is second, bit 2 means b is. + for { + switch polarity { + case 0: // both first; drop the smaller + if a < b { // drop a + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // drop b + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, take one, drop other + if a == unicodeSetHigh { + goto loop_end + } + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 3: // both second; take lower if unequal + if a < b { // take a + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // take b + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, take one, drop other + if a == unicodeSetHigh { + goto loop_end + } + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 1: // a second, b first; + if a < b { // NO OVERLAP, drop a + a = u.list[i] + i++ + polarity ^= 1 + } else if b < a { // OVERLAP, take b + u.buffer[k] = b + k++ + b = other[j] + j++ + polarity ^= 2 + } else { // a == b, drop both! + if a == unicodeSetHigh { + goto loop_end + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + case 2: // a first, b second; if a < b, overlap + if b < a { // no overlap, drop b + b = other[j] + j++ + polarity ^= 2 + } else if a < b { // OVERLAP, take a + u.buffer[k] = a + k++ + a = u.list[i] + i++ + polarity ^= 1 + } else { // a == b, drop both! + if a == unicodeSetHigh { + goto loop_end + } + a = u.list[i] + i++ + polarity ^= 1 + b = other[j] + j++ + polarity ^= 2 + } + } + } + +loop_end: + u.buffer[k] = unicodeSetHigh // terminate + k++ + u.list, u.buffer = u.buffer[:k], u.list +} + +func (u *UnicodeSet) Clear() { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.list = u.list[:1] + u.list[0] = unicodeSetHigh +} + +func (u *UnicodeSet) Len() (n int) { + count := u.RangeCount() + for i := 0; i < count; i++ { + n += int(u.RangeEnd(i)) - int(u.RangeStart(i)) + 1 + } + return +} + +func (u *UnicodeSet) RangeCount() int { + return len(u.list) / 2 +} + +func (u *UnicodeSet) RangeStart(idx int) rune { + return u.list[idx*2] +} + +func (u *UnicodeSet) RangeEnd(idx int) rune { + return u.list[idx*2+1] - 1 +} + +func (u *UnicodeSet) RuneAt(idx int) rune { + if idx >= 0 { + // len2 is the largest even integer <= len, that is, it is len + // for even values and len-1 for odd values. With odd values + // the last entry is UNICODESET_HIGH. + len2 := len(u.list) + if (len2 & 0x1) != 0 { + len2-- + } + + var i int + for i < len2 { + start := u.list[i] + count := int(u.list[i+1] - start) + i += 2 + if idx < count { + return start + rune(idx) + } + idx -= count + } + } + return -1 +} + +func (u *UnicodeSet) ContainsRune(c rune) bool { + if f := u.frozen; f != nil { + if c < 0 { + return false + } else if c <= 0xff { + return f.latin1Contains[c] != 0 + } else if c <= 0x7ff { + return (f.table7FF[c&0x3f] & (uint32(1) << (c >> 6))) != 0 + } else if c < 0xd800 || (c >= 0xe000 && c <= 0xffff) { + lead := c >> 12 + twoBits := (f.bmpBlockBits[(c>>6)&0x3f] >> lead) & 0x10001 + if twoBits <= 1 { + // All 64 code points with the same bits 15..6 + // are either in the set or not. + return twoBits != 0 + } + // Look up the code point in its 4k block of code points. + return f.containsSlow(u.list, c, f.list4kStarts[lead], f.list4kStarts[lead+1]) + } else if c <= 0x10ffff { + // surrogate or supplementary code point + return f.containsSlow(u.list, c, f.list4kStarts[0xd], f.list4kStarts[0x11]) + } + // Out-of-range code points get FALSE, consistent with long-standing + // behavior of UnicodeSet::contains(c). + return false + } + + if c >= unicodeSetHigh { + return false + } + i := u.findCodePoint(c) + return (i & 1) != 0 +} + +func (u *UnicodeSet) ContainsRuneRange(from, to rune) bool { + i := u.findCodePoint(from) + return (i&1) != 0 && to < u.list[i] +} + +func (u *UnicodeSet) findCodePoint(c rune) int { + /* Examples: + findCodePoint(c) + set list[] c=0 1 3 4 7 8 + === ============== =========== + [] [110000] 0 0 0 0 0 0 + [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2 + [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2 + [:Any:] [0, 110000] 1 1 1 1 1 1 + */ + + // Return the smallest i such that c < list[i]. Assume + // list[len - 1] == HIGH and that c is legal (0..HIGH-1). + if c < u.list[0] { + return 0 + } + + // High runner test. c is often after the last range, so an + // initial check for this condition pays off. + lo := 0 + hi := len(u.list) - 1 + if lo >= hi || c >= u.list[hi-1] { + return hi + } + + // invariant: c >= list[lo] + // invariant: c < list[hi] + for { + i := (lo + hi) >> 1 + if i == lo { + break // Found! + } else if c < u.list[i] { + hi = i + } else { + lo = i + } + } + return hi +} + +func (u *UnicodeSet) AddString(chars string) { + for _, c := range chars { + u.AddRune(c) + } +} + +type Filter func(ch rune) bool + +func (u *UnicodeSet) ApplyFilter(inclusions *UnicodeSet, filter Filter) { + // Logically, walk through all Unicode characters, noting the start + // and end of each range for which filter.contain(c) is + // true. Add each range to a set. + // + // To improve performance, use an inclusions set which + // encodes information about character ranges that are known + // to have identical properties. + // inclusions contains the first characters of + // same-value ranges for the given property. + + u.Clear() + + startHasProperty := rune(-1) + limitRange := inclusions.RangeCount() + + for j := 0; j < limitRange; j++ { + // get current range + start := inclusions.RangeStart(j) + end := inclusions.RangeEnd(j) + + // for all the code points in the range, process + for ch := start; ch <= end; ch++ { + // only add to this UnicodeSet on inflection points -- + // where the hasProperty value changes to false + if filter(ch) { + if startHasProperty < 0 { + startHasProperty = ch + } + } else if startHasProperty >= 0 { + u.AddRuneRange(startHasProperty, ch-1) + startHasProperty = -1 + } + } + } + if startHasProperty >= 0 { + u.AddRuneRange(startHasProperty, 0x10FFFF) + } +} + +func (u *UnicodeSet) Clone() *UnicodeSet { + return &UnicodeSet{list: slices.Clone(u.list)} +} + +func (u *UnicodeSet) IsEmpty() bool { + return len(u.list) == 1 +} + +func (u *UnicodeSet) CopyFrom(set *UnicodeSet) { + if u.frozen != nil { + panic("UnicodeSet is frozen") + } + u.list = slices.Clone(set.list) +} + +func (u *UnicodeSet) Equals(other *UnicodeSet) bool { + return slices.Equal(u.list, other.list) +} + +func (u *UnicodeSet) Freeze() *UnicodeSet { + u.frozen = freeze(u.list) + return u +} + +func (u *UnicodeSet) FreezeCheck_() error { + if u == nil { + return nil + } + if u.frozen == nil { + return fmt.Errorf("UnicodeSet is not frozen") + } + for r := rune(0); r <= 0x10ffff; r++ { + want := (u.findCodePoint(r) & 1) != 0 + got := u.ContainsRune(r) + if want != got { + return fmt.Errorf("rune '%c' (U+%04X) did not freeze", r, r) + } + } + return nil +} diff --git a/go/mysql/icuregex/internal/uset/unicode_set_test.go b/go/mysql/icuregex/internal/uset/unicode_set_test.go new file mode 100644 index 00000000000..908abd8889d --- /dev/null +++ b/go/mysql/icuregex/internal/uset/unicode_set_test.go @@ -0,0 +1,43 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package uset + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleBelong(t *testing.T) { + ss1 := New() + ss1.AddString("*?+[(){}^$|\\.") + ss2 := New() + ss2.AddString("*?+[(){}^$|\\.") + ss2.Complement() + ss3 := New() + ss3.AddRune('*') + ss3.AddRune('?') + + assert.True(t, ss1.ContainsRune('(')) + assert.False(t, ss2.ContainsRune('(')) + assert.True(t, ss3.ContainsRune('*')) +} diff --git a/go/mysql/icuregex/internal/utf16/helpers.go b/go/mysql/icuregex/internal/utf16/helpers.go new file mode 100644 index 00000000000..bdf53ae731c --- /dev/null +++ b/go/mysql/icuregex/internal/utf16/helpers.go @@ -0,0 +1,65 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utf16 + +import "unicode/utf16" + +func IsLead(c rune) bool { + return (uint32(c) & 0xfffffc00) == 0xd800 +} + +func IsTrail(c rune) bool { + return (uint32(c) & 0xfffffc00) == 0xdc00 +} + +/** + * Is this code point a surrogate (U+d800..U+dfff)? + * @param c 32-bit code point + * @return true or false + * @stable ICU 2.4 + */ +func IsSurrogate(c rune) bool { + return (uint32(c) & 0xfffff800) == 0xd800 +} + +/** + * Assuming c is a surrogate code point (U_IS_SURROGATE(c)), + * is it a lead surrogate? + * @param c 32-bit code point + * @return true or false + * @stable ICU 2.4 + */ +func IsSurrogateLead(c rune) bool { + return (uint32(c) & 0x400) == 0 +} + +func DecodeRune(a, b rune) rune { + return utf16.DecodeRune(a, b) +} + +func NextUnsafe(s []uint16) (rune, []uint16) { + c := rune(s[0]) + if !IsLead(c) { + return c, s[1:] + } + return DecodeRune(c, rune(s[1])), s[2:] +} diff --git a/go/mysql/icuregex/internal/utrie/ucptrie.go b/go/mysql/icuregex/internal/utrie/ucptrie.go new file mode 100644 index 00000000000..74e4eb9b2fa --- /dev/null +++ b/go/mysql/icuregex/internal/utrie/ucptrie.go @@ -0,0 +1,708 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utrie + +import ( + "errors" + "fmt" + + "vitess.io/vitess/go/mysql/icuregex/internal/udata" +) + +type UcpTrie struct { + index []uint16 + data8 []uint8 + data16 []uint16 + data32 []uint32 + + indexLength, dataLength int32 + /** Start of the last range which ends at U+10FFFF. @internal */ + highStart rune + shifted12HighStart uint16 + + typ ucpTrieType + valueWidth ucpTrieValueWidth + + /** + * Internal index-3 null block offset. + * Set to an impossibly high value (e.g., 0xffff) if there is no dedicated index-3 null block. + * @internal + */ + index3NullOffset uint16 + /** + * Internal data null block offset, not shifted. + * Set to an impossibly high value (e.g., 0xfffff) if there is no dedicated data null block. + * @internal + */ + dataNullOffset int32 + + nullValue uint32 +} + +/** + * Selectors for the type of a UCPTrie. + * Different trade-offs for size vs. speed. + * + * @see umutablecptrie_buildImmutable + * @see ucptrie_openFromBinary + * @see ucptrie_getType + * @stable ICU 63 + */ +type ucpTrieType int8 + +const ( + /** + * For ucptrie_openFromBinary() to accept any type. + * ucptrie_getType() will return the actual type. + * @stable ICU 63 + */ + typeAny ucpTrieType = iota - 1 + /** + * Fast/simple/larger BMP data structure. Use functions and "fast" macros. + * @stable ICU 63 + */ + typeFast + /** + * Small/slower BMP data structure. Use functions and "small" macros. + * @stable ICU 63 + */ + typeSmall +) + +/** + * Selectors for the number of bits in a UCPTrie data value. + * + * @see umutablecptrie_buildImmutable + * @see ucptrie_openFromBinary + * @see ucptrie_getValueWidth + * @stable ICU 63 + */ +type ucpTrieValueWidth int8 + +const ( + /** + * For ucptrie_openFromBinary() to accept any data value width. + * ucptrie_getValueWidth() will return the actual data value width. + * @stable ICU 63 + */ + valueBitsAny ucpTrieValueWidth = iota - 1 + /** + * The trie stores 16 bits per data value. + * It returns them as unsigned values 0..0xffff=65535. + * @stable ICU 63 + */ + valueBits16 + /** + * The trie stores 32 bits per data value. + * @stable ICU 63 + */ + valueBits32 + /** + * The trie stores 8 bits per data value. + * It returns them as unsigned values 0..0xff=255. + * @stable ICU 63 + */ + valueBits8 +) + +const ucpTrieSig = 0x54726933 +const ucpTrieOESig = 0x33697254 + +/** + * Constants for use with UCPTrieHeader.options. + * @internal + */ +const ( + optionsDataLengthMask = 0xf000 + optionsDataNullOffsetMask = 0xf00 + optionsReservedMask = 0x38 + optionsValueBitsMask = 7 +) + +const ( + /** @internal */ + fastShift = 6 + + /** Number of entries in a data block for code points below the fast limit. 64=0x40 @internal */ + fastDataBlockLength = 1 << fastShift + + /** Mask for getting the lower bits for the in-fast-data-block offset. @internal */ + fastDataMask = fastDataBlockLength - 1 + + /** @internal */ + smallMax = 0xfff + + /** + * Offset from dataLength (to be subtracted) for fetching the + * value returned for out-of-range code points and ill-formed UTF-8/16. + * @internal + */ + errorValueNegDataOffset = 1 + /** + * Offset from dataLength (to be subtracted) for fetching the + * value returned for code points highStart..U+10FFFF. + * @internal + */ + highValueNegDataOffset = 2 +) + +// Internal constants. +const ( + /** The length of the BMP index table. 1024=0x400 */ + bmpIndexLength = 0x10000 >> fastShift + + smallLimit = 0x1000 + smallIndexLength = smallLimit >> fastShift + + /** Shift size for getting the index-3 table offset. */ + ucpShift3 = 4 + + /** Shift size for getting the index-2 table offset. */ + ucpShift2 = 5 + ucpShift3 + + /** Shift size for getting the index-1 table offset. */ + ucpShift1 = 5 + ucpShift2 + + /** + * Difference between two shift sizes, + * for getting an index-2 offset from an index-3 offset. 5=9-4 + */ + ucpShift2Min3 = ucpShift2 - ucpShift3 + + /** + * Difference between two shift sizes, + * for getting an index-1 offset from an index-2 offset. 5=14-9 + */ + ucpShift1Min2 = ucpShift1 - ucpShift2 + + /** + * Number of index-1 entries for the BMP. (4) + * This part of the index-1 table is omitted from the serialized form. + */ + ucpOmittedBmpIndex1Length = 0x10000 >> ucpShift1 + + /** Number of entries in an index-2 block. 32=0x20 */ + ucpIndex2BlockLength = 1 << ucpShift1Min2 + + /** Mask for getting the lower bits for the in-index-2-block offset. */ + ucpIndex2Mask = ucpIndex2BlockLength - 1 + + /** Number of code points per index-2 table entry. 512=0x200 */ + ucpCpPerIndex2Entry = 1 << ucpShift2 + + /** Number of entries in an index-3 block. 32=0x20 */ + ucpIndex3BlockLength = 1 << ucpShift2Min3 + + /** Mask for getting the lower bits for the in-index-3-block offset. */ + ucpIndex3Mask = ucpIndex3BlockLength - 1 + + /** Number of entries in a small data block. 16=0x10 */ + ucpSmallDataBlockLength = 1 << ucpShift3 + + /** Mask for getting the lower bits for the in-small-data-block offset. */ + ucpSmallDataMask = ucpSmallDataBlockLength - 1 +) + +func UcpTrieFromBytes(bytes *udata.Bytes) (*UcpTrie, error) { + type ucpHeader struct { + /** "Tri3" in big-endian US-ASCII (0x54726933) */ + signature uint32 + + /** + * Options bit field: + * Bits 15..12: Data length bits 19..16. + * Bits 11..8: Data null block offset bits 19..16. + * Bits 7..6: UCPTrieType + * Bits 5..3: Reserved (0). + * Bits 2..0: UCPTrieValueWidth + */ + options uint16 + + /** Total length of the index tables. */ + indexLength uint16 + + /** Data length bits 15..0. */ + dataLength uint16 + + /** Index-3 null block offset, 0x7fff or 0xffff if none. */ + index3NullOffset uint16 + + /** Data null block offset bits 15..0, 0xfffff if none. */ + dataNullOffset uint16 + + /** + * First code point of the single-value range ending with U+10ffff, + * rounded up and then shifted right by UCPTRIE_SHIFT_2. + */ + shiftedHighStart uint16 + } + + var header ucpHeader + header.signature = bytes.Uint32() + + switch header.signature { + case ucpTrieSig: + case ucpTrieOESig: + return nil, errors.New("unsupported: BigEndian encoding") + default: + return nil, fmt.Errorf("invalid signature for UcpTrie: 0x%08x", header.signature) + } + + header.options = bytes.Uint16() + header.indexLength = bytes.Uint16() + header.dataLength = bytes.Uint16() + header.index3NullOffset = bytes.Uint16() + header.dataNullOffset = bytes.Uint16() + header.shiftedHighStart = bytes.Uint16() + + typeInt := (header.options >> 6) & 3 + valueWidthInt := header.options & optionsValueBitsMask + if typeInt > uint16(typeSmall) || valueWidthInt > uint16(valueBits8) || + (header.options&optionsReservedMask) != 0 { + return nil, errors.New("invalid options for serialized UcpTrie") + } + actualType := ucpTrieType(typeInt) + actualValueWidth := ucpTrieValueWidth(valueWidthInt) + + trie := &UcpTrie{ + indexLength: int32(header.indexLength), + dataLength: int32(((header.options & optionsDataLengthMask) << 4) | header.dataLength), + index3NullOffset: header.index3NullOffset, + dataNullOffset: int32(((header.options & optionsDataNullOffsetMask) << 8) | header.dataNullOffset), + highStart: rune(header.shiftedHighStart) << ucpShift2, + typ: actualType, + valueWidth: actualValueWidth, + } + nullValueOffset := trie.dataNullOffset + if nullValueOffset >= trie.dataLength { + nullValueOffset = trie.dataLength - highValueNegDataOffset + } + + trie.shifted12HighStart = uint16((trie.highStart + 0xfff) >> 12) + trie.index = bytes.Uint16Slice(int32(header.indexLength)) + switch actualValueWidth { + case valueBits16: + trie.data16 = bytes.Uint16Slice(trie.dataLength) + trie.nullValue = uint32(trie.data16[nullValueOffset]) + case valueBits32: + trie.data32 = bytes.Uint32Slice(trie.dataLength) + trie.nullValue = trie.data32[nullValueOffset] + case valueBits8: + trie.data8 = bytes.Uint8Slice(trie.dataLength) + trie.nullValue = uint32(trie.data8[nullValueOffset]) + } + + return trie, nil +} + +func (t *UcpTrie) Get(c rune) uint32 { + var dataIndex int32 + if c <= 0x7f { + // linear ASCII + dataIndex = c + } else { + var fastMax rune + if t.typ == typeFast { + fastMax = 0xffff + } else { + fastMax = smallMax + } + dataIndex = t.cpIndex(fastMax, c) + } + return t.getValue(dataIndex) +} + +func (t *UcpTrie) getValue(dataIndex int32) uint32 { + switch t.valueWidth { + case valueBits16: + return uint32(t.data16[dataIndex]) + case valueBits32: + return t.data32[dataIndex] + case valueBits8: + return uint32(t.data8[dataIndex]) + default: + // Unreachable if the trie is properly initialized. + return 0xffffffff + } +} + +/** Internal trie getter for a code point below the fast limit. Returns the data index. @internal */ +func (t *UcpTrie) fastIndex(c rune) int32 { + return int32(t.index[c>>fastShift]) + (c & fastDataMask) +} + +/** Internal trie getter for a code point at or above the fast limit. Returns the data index. @internal */ +func (t *UcpTrie) smallIndex(c rune) int32 { + if c >= t.highStart { + return t.dataLength - highValueNegDataOffset + } + return t.internalSmallIndex(c) +} + +func (t *UcpTrie) internalSmallIndex(c rune) int32 { + i1 := c >> ucpShift1 + if t.typ == typeFast { + i1 += bmpIndexLength - ucpOmittedBmpIndex1Length + } else { + i1 += smallIndexLength + } + i3Block := int32(t.index[int32(t.index[i1])+((c>>ucpShift2)&ucpIndex2Mask)]) + i3 := (c >> ucpShift3) & ucpIndex3Mask + var dataBlock int32 + if (i3Block & 0x8000) == 0 { + // 16-bit indexes + dataBlock = int32(t.index[i3Block+i3]) + } else { + // 18-bit indexes stored in groups of 9 entries per 8 indexes. + i3Block = (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3) + i3 &= 7 + dataBlock = int32(t.index[i3Block]) << (2 + (2 * i3)) & 0x30000 + i3Block++ + dataBlock |= int32(t.index[i3Block+i3]) + } + return dataBlock + (c & ucpSmallDataMask) +} + +/** + * Internal trie getter for a code point, with checking that c is in U+0000..10FFFF. + * Returns the data index. + * @internal + */ +func (t *UcpTrie) cpIndex(fastMax, c rune) int32 { + if c <= fastMax { + return t.fastIndex(c) + } + if c <= 0x10ffff { + return t.smallIndex(c) + } + return t.dataLength - errorValueNegDataOffset +} + +/** + * Selectors for how ucpmap_getRange() etc. should report value ranges overlapping with surrogates. + * Most users should use UCPMAP_RANGE_NORMAL. + * + * @see ucpmap_getRange + * @see ucptrie_getRange + * @see umutablecptrie_getRange + * @stable ICU 63 + */ +type UcpMapRangeOption int8 + +const ( + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map. + * Most users should use this option. + * @stable ICU 63 + */ + UcpMapRangeNormal UcpMapRangeOption = iota + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map, + * except that lead surrogates (U+D800..U+DBFF) are treated as having the + * surrogateValue, which is passed to getRange() as a separate parameter. + * The surrogateValue is not transformed via filter(). + * See U_IS_LEAD(c). + * + * Most users should use UCPMAP_RANGE_NORMAL instead. + * + * This option is useful for maps that map surrogate code *units* to + * special values optimized for UTF-16 string processing + * or for special error behavior for unpaired surrogates, + * but those values are not to be associated with the lead surrogate code *points*. + * @stable ICU 63 + */ + UcpMapRangeFixedLeadSurrogates + /** + * ucpmap_getRange() enumerates all same-value ranges as stored in the map, + * except that all surrogates (U+D800..U+DFFF) are treated as having the + * surrogateValue, which is passed to getRange() as a separate parameter. + * The surrogateValue is not transformed via filter(). + * See U_IS_SURROGATE(c). + * + * Most users should use UCPMAP_RANGE_NORMAL instead. + * + * This option is useful for maps that map surrogate code *units* to + * special values optimized for UTF-16 string processing + * or for special error behavior for unpaired surrogates, + * but those values are not to be associated with the lead surrogate code *points*. + * @stable ICU 63 + */ + UcpMapRangeFixedAllSurrogates +) + +/** + * Callback function type: Modifies a map value. + * Optionally called by ucpmap_getRange()/ucptrie_getRange()/umutablecptrie_getRange(). + * The modified value will be returned by the getRange function. + * + * Can be used to ignore some of the value bits, + * make a filter for one of several values, + * return a value index computed from the map value, etc. + * + * @param context an opaque pointer, as passed into the getRange function + * @param value a value from the map + * @return the modified value + * @stable ICU 63 + */ +type UcpMapValueFilter func(value uint32) uint32 + +/** + * GetRange returns the last code point such that all those from start to there have the same value. + * Can be used to efficiently iterate over all same-value ranges in a trie. + * (This is normally faster than iterating over code points and get()ting each value, + * but much slower than a data structure that stores ranges directly.) + * + * If the UCPMapValueFilter function pointer is not NULL, then + * the value to be delivered is passed through that function, and the return value is the end + * of the range where all values are modified to the same actual value. + * The value is unchanged if that function pointer is NULL. + * + * Example: + * \code + * UChar32 start = 0, end; + * uint32_t value; + * while ((end = ucptrie_getRange(trie, start, UCPMAP_RANGE_NORMAL, 0, + * NULL, NULL, &value)) >= 0) { + * // Work with the range start..end and its value. + * start = end + 1; + * } + * \endcode + * + * @param trie the trie + * @param start range start + * @param option defines whether surrogates are treated normally, + * or as having the surrogateValue; usually UCPMAP_RANGE_NORMAL + * @param surrogateValue value for surrogates; ignored if option==UCPMAP_RANGE_NORMAL + * @param filter a pointer to a function that may modify the trie data value, + * or NULL if the values from the trie are to be used unmodified + * @param context an opaque pointer that is passed on to the filter function + * @param pValue if not NULL, receives the value that every code point start..end has; + * may have been modified by filter(context, trie value) + * if that function pointer is not NULL + * @return the range end code point, or -1 if start is not a valid code point + * @stable ICU 63 + */ +func (t *UcpTrie) GetRange(start rune, option UcpMapRangeOption, surrogateValue uint32, filter UcpMapValueFilter) (rune, uint32) { + if option == UcpMapRangeNormal { + return t.getRange(start, filter) + } + + var surrEnd rune + if option == UcpMapRangeFixedAllSurrogates { + surrEnd = 0xdfff + } else { + surrEnd = 0xdbff + } + end, value := t.getRange(start, filter) + if end < 0xd7ff || start > surrEnd { + return end, value + } + if value == surrogateValue { + if end >= surrEnd { + // Surrogates followed by a non-surrogateValue range, + // or surrogates are part of a larger surrogateValue range. + return end, value + } + } else { + if start <= 0xd7ff { + return 0xd7ff, value // Non-surrogateValue range ends before surrogateValue surrogates. + } + // Start is a surrogate with a non-surrogateValue code *unit* value. + // Return a surrogateValue code *point* range. + value = surrogateValue + if end > surrEnd { + return surrEnd, value // Surrogate range ends before non-surrogateValue rest of range. + } + } + // See if the surrogateValue surrogate range can be merged with + // an immediately following range. + end2, value2 := t.getRange(surrEnd+1, filter) + if value2 == surrogateValue { + return end2, value + } + return surrEnd, value +} + +const maxUnicode = 0x10ffff + +func (t *UcpTrie) getRange(start rune, filter UcpMapValueFilter) (rune, uint32) { + if start > maxUnicode { + return -1, 0 + } + + if start >= t.highStart { + di := t.dataLength - highValueNegDataOffset + value := t.getValue(di) + if filter != nil { + value = filter(value) + } + return maxUnicode, value + } + + nullValue := t.nullValue + if filter != nil { + nullValue = filter(nullValue) + } + index := t.index + + prevI3Block := int32(-1) + prevBlock := int32(-1) + c := start + var trieValue uint32 + value := nullValue + haveValue := false + for { + var i3Block, i3, i3BlockLength, dataBlockLength int32 + if c <= 0xffff && (t.typ == typeFast || c <= smallMax) { + i3Block = 0 + i3 = c >> fastShift + if t.typ == typeFast { + i3BlockLength = bmpIndexLength + } else { + i3BlockLength = smallIndexLength + } + dataBlockLength = fastDataBlockLength + } else { + // Use the multi-stage index. + i1 := c >> ucpShift1 + if t.typ == typeFast { + i1 += bmpIndexLength - ucpOmittedBmpIndex1Length + } else { + i1 += smallIndexLength + } + shft := c >> ucpShift2 + idx := int32(t.index[i1]) + (shft & ucpIndex2Mask) + i3Block = int32(t.index[idx]) + if i3Block == prevI3Block && (c-start) >= ucpCpPerIndex2Entry { + // The index-3 block is the same as the previous one, and filled with value. + c += ucpCpPerIndex2Entry + continue + } + prevI3Block = i3Block + if i3Block == int32(t.index3NullOffset) { + // This is the index-3 null block. + if haveValue { + if nullValue != value { + return c - 1, value + } + } else { + trieValue = t.nullValue + value = nullValue + haveValue = true + } + prevBlock = t.dataNullOffset + c = (c + ucpCpPerIndex2Entry) & ^(ucpCpPerIndex2Entry - 1) + continue + } + i3 = (c >> ucpShift3) & ucpIndex3Mask + i3BlockLength = ucpIndex3BlockLength + dataBlockLength = ucpSmallDataBlockLength + } + + // Enumerate data blocks for one index-3 block. + for { + var block int32 + if (i3Block & 0x8000) == 0 { + block = int32(index[i3Block+i3]) + } else { + // 18-bit indexes stored in groups of 9 entries per 8 indexes. + group := (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3) + gi := i3 & 7 + block = (int32(index[group]) << (2 + (2 * gi))) & 0x30000 + group++ + block |= int32(index[group+gi]) + } + if block == prevBlock && (c-start) >= dataBlockLength { + // The block is the same as the previous one, and filled with value. + c += dataBlockLength + } else { + dataMask := dataBlockLength - 1 + prevBlock = block + if block == t.dataNullOffset { + // This is the data null block. + if haveValue { + if nullValue != value { + return c - 1, value + } + } else { + trieValue = t.nullValue + value = nullValue + haveValue = true + } + c = (c + dataBlockLength) & ^dataMask + } else { + di := block + (c & dataMask) + trieValue2 := t.getValue(di) + if haveValue { + if trieValue2 != trieValue { + if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + trieValue = trieValue2 // may or may not help + } + } else { + trieValue = trieValue2 + value = maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) + haveValue = true + } + for { + c++ + if c&dataMask == 0 { + break + } + di++ + trieValue2 = t.getValue(di) + if trieValue2 != trieValue { + if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + trieValue = trieValue2 // may or may not help + } + } + } + } + i3++ + if i3 >= i3BlockLength { + break + } + } + if c >= t.highStart { + break + } + } + + di := t.dataLength - highValueNegDataOffset + highValue := t.getValue(di) + if maybeFilterValue(highValue, t.nullValue, nullValue, filter) != value { + return c - 1, value + } + return maxUnicode, value +} + +func maybeFilterValue(value uint32, trieNullValue uint32, nullValue uint32, filter UcpMapValueFilter) uint32 { + if value == trieNullValue { + value = nullValue + } else if filter != nil { + value = filter(value) + } + return value +} diff --git a/go/mysql/icuregex/internal/utrie/utrie2.go b/go/mysql/icuregex/internal/utrie/utrie2.go new file mode 100644 index 00000000000..2a474356b97 --- /dev/null +++ b/go/mysql/icuregex/internal/utrie/utrie2.go @@ -0,0 +1,433 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utrie + +import ( + "errors" + "fmt" + + "vitess.io/vitess/go/mysql/icuregex/internal/udata" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +type UTrie2 struct { + index []uint16 + data16 []uint16 + data32 []uint32 + + indexLength, dataLength int + index2NullOffset uint16 + dataNullOffset uint16 + InitialValue uint32 + ErrorValue uint32 + + HighStart rune + HighValueIndex int +} + +func (t *UTrie2) SerializedLength() int32 { + return 16 + int32(t.indexLength+t.dataLength)*2 +} + +func (t *UTrie2) getIndex(asciiOffset int, c rune) uint16 { + return t.index[t.indexFromCp(asciiOffset, c)] +} + +func (t *UTrie2) Get16(c rune) uint16 { + return t.getIndex(t.indexLength, c) +} + +func (t *UTrie2) indexFromCp(asciiOffset int, c rune) int { + switch { + case c < 0xd800: + return indexRaw(0, t.index, c) + case c <= 0xffff: + var offset int32 + if c <= 0xdbff { + offset = lscpIndex2Offset - (0xd800 >> shift2) + } + return indexRaw(offset, t.index, c) + case c > 0x10ffff: + return asciiOffset + badUtf8DataOffset + case c >= t.HighStart: + return t.HighValueIndex + default: + return indexFromSupp(t.index, c) + } +} + +type EnumRange func(start, end rune, value uint32) bool +type EnumValue func(value uint32) uint32 + +func (t *UTrie2) Enum(enumValue EnumValue, enumRange EnumRange) { + t.enumEitherTrie(0, 0x110000, enumValue, enumRange) +} + +func enumSameValue(value uint32) uint32 { + return value +} + +func (t *UTrie2) enumEitherTrie(start, limit rune, enumValue EnumValue, enumRange EnumRange) { + if enumRange == nil { + return + } + if enumValue == nil { + enumValue = enumSameValue + } + + /* frozen trie */ + var ( + idx = t.index + data32 = t.data32 + index2NullOffset = int(t.index2NullOffset) + nullBlock = int(t.dataNullOffset) + + c rune + prev = start + highStart = t.HighStart + + /* get the enumeration value that corresponds to an initial-value trie data entry */ + initialValue = enumValue(t.InitialValue) + + /* set variables for previous range */ + i2Block int + block int + prevI2Block = -1 + prevBlock = -1 + prevValue = uint32(0) + ) + + /* enumerate index-2 blocks */ + for c = start; c < limit && c < highStart; { + /* Code point limit for iterating inside this i2Block. */ + tempLimit := c + cpPerIndex1Entry + if limit < tempLimit { + tempLimit = limit + } + if c <= 0xffff { + if !utf16.IsSurrogate(c) { + i2Block = int(c >> shift2) + } else if utf16.IsSurrogateLead(c) { + /* + * Enumerate values for lead surrogate code points, not code units: + * This special block has half the normal length. + */ + i2Block = lscpIndex2Offset + tempLimit = min(0xdc00, limit) + } else { + /* + * Switch back to the normal part of the index-2 table. + * Enumerate the second half of the surrogates block. + */ + i2Block = 0xd800 >> shift2 + tempLimit = min(0xe000, limit) + } + } else { + /* supplementary code points */ + i2Block = int(idx[(index1Offset-omittedBmpIndex1Length)+(c>>shift1)]) + if i2Block == prevI2Block && (c-prev) >= cpPerIndex1Entry { + /* + * The index-2 block is the same as the previous one, and filled with prevValue. + * Only possible for supplementary code points because the linear-BMP index-2 + * table creates unique i2Block values. + */ + c += cpPerIndex1Entry + continue + } + } + prevI2Block = i2Block + if i2Block == index2NullOffset { + /* this is the null index-2 block */ + if prevValue != initialValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prevBlock = nullBlock + prev = c + prevValue = initialValue + } + c += cpPerIndex1Entry + } else { + /* enumerate data blocks for one index-2 block */ + var i2Limit int + if (c >> shift1) == (tempLimit >> shift1) { + i2Limit = int(tempLimit>>shift2) & index2Mask + } else { + i2Limit = index2BlockLength + } + for i2 := int(c>>shift2) & index2Mask; i2 < i2Limit; i2++ { + block = int(idx[i2Block+i2] << indexShift) + if block == prevBlock && (c-prev) >= dataBlockLength { + /* the block is the same as the previous one, and filled with prevValue */ + c += dataBlockLength + continue + } + prevBlock = block + if block == nullBlock { + /* this is the null data block */ + if prevValue != initialValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prev = c + prevValue = initialValue + } + c += dataBlockLength + } else { + for j := 0; j < dataBlockLength; j++ { + var value uint32 + if data32 != nil { + value = data32[block+j] + } else { + value = uint32(idx[block+j]) + } + value = enumValue(value) + if value != prevValue { + if prev < c && !enumRange(prev, c-1, prevValue) { + return + } + prev = c + prevValue = value + } + c++ + } + } + } + } + } + + if c > limit { + c = limit /* could be higher if in the index2NullOffset */ + } else if c < limit { + /* c==highStart>shift1)]) + return (int(index[i1+int((c>>shift2)&index2Mask)]) << indexShift) + int(c&dataMask) +} + +func indexRaw(offset int32, index []uint16, c rune) int { + return int(index[offset+(c>>shift2)]<> shift1 + + /** Number of code points per index-1 table entry. 2048=0x800 */ + cpPerIndex1Entry = 1 << shift1 + + /** Number of entries in an index-2 block. 64=0x40 */ + index2BlockLength = 1 << shift1min2 + + /** Mask for getting the lower bits for the in-index-2-block offset. */ + index2Mask = index2BlockLength - 1 + + /** Number of entries in a data block. 32=0x20 */ + dataBlockLength = 1 << shift2 + + /** Mask for getting the lower bits for the in-data-block offset. */ + dataMask = dataBlockLength - 1 + + /** + * Shift size for shifting left the index array values. + * Increases possible data size with 16-bit index values at the cost + * of compactability. + * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. + */ + indexShift = 2 + + /** The alignment size of a data block. Also the granularity for compaction. */ + dataGranularity = 1 << indexShift + + /* Fixed layout of the first part of the index array. ------------------- */ + + /** + * The part of the index-2 table for U+D800..U+DBFF stores values for + * lead surrogate code _units_ not code _points_. + * Values for lead surrogate code _points_ are indexed with this portion of the table. + * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) + */ + lscpIndex2Offset = 0x10000 >> shift2 + lscpIndex2Length = 0x400 >> shift2 + + /** Count the lengths of both BMP pieces. 2080=0x820 */ + index2BmpLength = lscpIndex2Offset + lscpIndex2Length + + /** + * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. + * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. + */ + utf82BIndex2Offset = index2BmpLength + utf82BIndex2Length = 0x800 >> 6 /* U+0800 is the first code point after 2-byte UTF-8 */ + + /** + * The index-1 table, only used for supplementary code points, at offset 2112=0x840. + * Variable length, for code points up to highStart, where the last single-value range starts. + * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. + * (For 0x100000 supplementary code points U+10000..U+10ffff.) + * + * The part of the index-2 table for supplementary code points starts + * after this index-1 table. + * + * Both the index-1 table and the following part of the index-2 table + * are omitted completely if there is only BMP data. + */ + index1Offset = utf82BIndex2Offset + utf82BIndex2Length + maxIndex1Length = 0x100000 >> shift1 + + /* + * Fixed layout of the first part of the data array. ----------------------- + * Starts with 4 blocks (128=0x80 entries) for ASCII. + */ + + /** + * The illegal-UTF-8 data block follows the ASCII block, at offset 128=0x80. + * Used with linear access for single bytes 0..0xbf for simple error handling. + * Length 64=0x40, not UTRIE2_DATA_BLOCK_LENGTH. + */ + badUtf8DataOffset = 0x80 +) + +func UTrie2FromBytes(bytes *udata.Bytes) (*UTrie2, error) { + type utrie2Header struct { + /** "Tri2" in big-endian US-ASCII (0x54726932) */ + signature uint32 + + /** + * options bit field: + * 15.. 4 reserved (0) + * 3.. 0 UTrie2ValueBits valueBits + */ + options uint16 + + /** UTRIE2_INDEX_1_OFFSET..UTRIE2_MAX_INDEX_LENGTH */ + indexLength uint16 + + /** (UTRIE2_DATA_START_OFFSET..UTRIE2_MAX_DATA_LENGTH)>>UTRIE2_INDEX_SHIFT */ + shiftedDataLength uint16 + + /** Null index and data blocks, not shifted. */ + index2NullOffset, dataNullOffset uint16 + + /** + * First code point of the single-value range ending with U+10ffff, + * rounded up and then shifted right by UTRIE2_SHIFT_1. + */ + shiftedHighStart uint16 + } + + var header utrie2Header + header.signature = bytes.Uint32() + + switch header.signature { + case 0x54726932: + case 0x32697254: + return nil, errors.New("unsupported: BigEndian encoding") + default: + return nil, fmt.Errorf("invalid signature for Trie2: 0x%08x", header.signature) + } + + header.options = bytes.Uint16() + header.indexLength = bytes.Uint16() + header.shiftedDataLength = bytes.Uint16() + header.index2NullOffset = bytes.Uint16() + header.dataNullOffset = bytes.Uint16() + header.shiftedHighStart = bytes.Uint16() + + var width int + switch header.options & 0xf { + case 0: + width = 16 + case 1: + width = 32 + default: + return nil, errors.New("invalid width for serialized UTrie2") + } + + trie := &UTrie2{ + indexLength: int(header.indexLength), + dataLength: int(header.shiftedDataLength) << indexShift, + index2NullOffset: header.index2NullOffset, + dataNullOffset: header.dataNullOffset, + HighStart: rune(header.shiftedHighStart) << shift1, + } + + trie.HighValueIndex = trie.dataLength - dataGranularity + if width == 16 { + trie.HighValueIndex += trie.indexLength + } + + indexArraySize := trie.indexLength + if width == 16 { + indexArraySize += trie.dataLength + } + + trie.index = bytes.Uint16Slice(int32(indexArraySize)) + + if width == 16 { + trie.data16 = trie.index[trie.indexLength:] + trie.InitialValue = uint32(trie.index[trie.dataNullOffset]) + trie.ErrorValue = uint32(trie.index[trie.indexLength+badUtf8DataOffset]) + } else { + trie.data32 = bytes.Uint32Slice(int32(trie.dataLength)) + trie.InitialValue = trie.data32[trie.dataNullOffset] + trie.ErrorValue = trie.data32[badUtf8DataOffset] + } + + return trie, nil +} diff --git a/go/mysql/icuregex/matcher.go b/go/mysql/icuregex/matcher.go new file mode 100644 index 00000000000..1b5495f495f --- /dev/null +++ b/go/mysql/icuregex/matcher.go @@ -0,0 +1,1671 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "fmt" + "io" + + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/uchar" + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" +) + +const timerInitialValue = 10000 +const defaultTimeout = 3 +const defaultStackLimit = 0 + +type Matcher struct { + pattern *Pattern + + input []rune + + regionStart int // Start of the input region, default = 0. + regionLimit int // End of input region, default to input.length. + + anchorStart int // Region bounds for anchoring operations (^ or $). + anchorLimit int // See useAnchoringBounds + + lookStart int // Region bounds for look-ahead/behind and + lookLimit int // and other boundary tests. See + // useTransparentBounds + + activeStart int // Currently active bounds for matching. + activeLimit int // Usually is the same as region, but + // is changed to fLookStart/Limit when + // entering look around regions. + + match bool // True if the last attempted match was successful. + matchStart int // Position of the start of the most recent match + matchEnd int // First position after the end of the most recent match + // Zero if no previous match, even when a region + // is active. + lastMatchEnd int // First position after the end of the previous match, + // or -1 if there was no previous match. + appendPosition int // First position after the end of the previous + // appendReplacement(). As described by the + // JavaDoc for Java Matcher, where it is called + // "append position" + hitEnd bool // True if the last match touched the end of input. + requireEnd bool // True if the last match required end-of-input + // (matched $ or Z) + + stack stack + frame stackFrame // After finding a match, the last active stack frame, + // which will contain the capture group results. + // NOT valid while match engine is running. + + data []int // Data area for use by the compiled pattern. + + timeLimit int32 // Max time (in arbitrary steps) to let the + // match engine run. Zero for unlimited. + + time int32 // Match time, accumulates while matching. + tickCounter int32 // Low bits counter for time. Counts down StateSaves. + // Kept separately from fTime to keep as much + // code as possible out of the inline + // StateSave function. + + dumper io.Writer +} + +func NewMatcher(pat *Pattern) *Matcher { + m := &Matcher{ + pattern: pat, + data: make([]int, pat.dataSize), + stack: stack{ + frameSize: pat.frameSize, + stackLimit: defaultStackLimit, + }, + timeLimit: defaultTimeout, + } + m.reset() + return m +} + +func (m *Matcher) MatchAt(startIdx int, toEnd bool) error { + //-------------------------------------------------------------------------------- + // + // MatchAt This is the actual matching engine. + // + // startIdx: begin matching a this index. + // toEnd: if true, match must extend to end of the input region + // + //-------------------------------------------------------------------------------- + var err error + var isMatch bool // True if the we have a match. + + if m.dumper != nil { + fmt.Fprintf(m.dumper, "MatchAt(startIdx=%d)\n", startIdx) + fmt.Fprintf(m.dumper, "Original Pattern: \"%s\"\n", m.pattern.pattern) + fmt.Fprintf(m.dumper, "Input String: \"%s\"\n\n", string(m.input)) + } + + pat := m.pattern.compiledPat + inputText := m.input + litText := m.pattern.literalText + sets := m.pattern.sets + + fp := m.resetStack() + *fp.inputIdx() = startIdx + *fp.patIdx() = 0 + for i := 0; i < len(m.data); i++ { + m.data[i] = 0 + } + + for { + op := pat[*fp.patIdx()] + + if m.dumper != nil { + fmt.Fprintf(m.dumper, "inputIdx=%d inputChar=%x sp=%3d activeLimit=%d ", *fp.inputIdx(), + charAt(inputText, *fp.inputIdx()), m.stack.sp(), m.activeLimit) + m.pattern.dumpOp(m.dumper, *fp.patIdx()) + } + + *fp.patIdx()++ + + switch op.typ() { + case urxNop: + // Nothing to do. + case urxBacktrack: + // Force a backtrack. In some circumstances, the pattern compiler + // will notice that the pattern can't possibly match anything, and will + // emit one of these at that point. + fp = m.stack.popFrame() + case urxOnechar: + if *fp.inputIdx() < m.activeLimit { + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + if c == rune(op.value()) { + break + } + } else { + m.hitEnd = true + } + fp = m.stack.popFrame() + case urxString: + // Test input against a literal string. + // Strings require two slots in the compiled pattern, one for the + // offset to the string text, and one for the length. + stringStartIdx := op.value() + nextOp := pat[*fp.patIdx()] // Fetch the second operand + *fp.patIdx()++ + stringLen := nextOp.value() + + patternString := litText[stringStartIdx:] + var patternStringIndex int + success := true + for patternStringIndex < stringLen { + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + success = false + break + } + if charAt(patternString, patternStringIndex) != charAt(inputText, *fp.inputIdx()) { + success = false + break + } + patternStringIndex++ + *fp.inputIdx()++ + } + + if !success { + fp = m.stack.popFrame() + } + case urxStateSave: + fp, err = m.stateSave(*fp.inputIdx(), op.value()) + if err != nil { + return err + } + case urxEnd: + // The match loop will exit via this path on a successful match, + // when we reach the end of the pattern. + if toEnd && *fp.inputIdx() != m.activeLimit { + // The pattern matched, but not to the end of input. Try some more. + fp = m.stack.popFrame() + break + } + isMatch = true + goto breakFromLoop + + // Start and End Capture stack frame variables are laid out out like this: + // fp->fExtra[opValue] - The start of a completed capture group + // opValue+1 - The end of a completed capture group + // opValue+2 - the start of a capture group whose end + // has not yet been reached (and might not ever be). + case urxStartCapture: + *fp.extra(op.value() + 2) = *fp.inputIdx() + case urxEndCapture: + *fp.extra(op.value()) = *fp.extra(op.value() + 2) // Tentative start becomes real. + *fp.extra(op.value() + 1) = *fp.inputIdx() // End position + + case urxDollar: // $, test for End of line + if *fp.inputIdx() < m.anchorLimit-2 { + fp = m.stack.popFrame() + break + } + // or for position before new line at end of input + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + + if *fp.inputIdx() == m.anchorLimit-1 { + c := m.input[*fp.inputIdx()] + if isLineTerminator(c) { + if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && m.input[*fp.inputIdx()-1] == 0x0d) { + // At new-line at end of input. Success + m.hitEnd = true + m.requireEnd = true + break + } + } + } else if *fp.inputIdx() == m.anchorLimit-2 && m.input[*fp.inputIdx()] == 0x0d && m.input[*fp.inputIdx()+1] == 0x0a { + m.hitEnd = true + m.requireEnd = true + break // At CR/LF at end of input. Success + } + fp = m.stack.popFrame() + + case urxDollarD: // $, test for End of Line, in UNIX_LINES mode. + if *fp.inputIdx() >= m.anchorLimit { + // Off the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + // Either at the last character of input, or off the end. + if c == 0x0a && *fp.inputIdx() == m.anchorLimit { + m.hitEnd = true + m.requireEnd = true + break + } + + // Not at end of input. Back-track out. + fp = m.stack.popFrame() + case urxDollarM: // $, test for End of line in multi-line mode + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true + break + } + // If we are positioned just before a new-line, succeed. + // It makes no difference where the new-line is within the input. + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + // At a line end, except for the odd chance of being in the middle of a CR/LF sequence + // In multi-line mode, hitting a new-line just before the end of input does not + // set the hitEnd or requireEnd flags + if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && charAt(inputText, *fp.inputIdx()-1) == 0x0d) { + break + } + } + // not at a new line. Fail. + fp = m.stack.popFrame() + case urxDollarMd: // $, test for End of line in multi-line and UNIX_LINES mode + if *fp.inputIdx() >= m.anchorLimit { + // We really are at the end of input. Success. + m.hitEnd = true + m.requireEnd = true // Java set requireEnd in this case, even though + break // adding a new-line would not lose the match. + } + // If we are not positioned just before a new-line, the test fails; backtrack out. + // It makes no difference where the new-line is within the input. + if charAt(inputText, *fp.inputIdx()) != 0x0a { + fp = m.stack.popFrame() + } + case urxCaret: // ^, test for start of line + if *fp.inputIdx() != m.anchorStart { + fp = m.stack.popFrame() + } + case urxCaretM: // ^, test for start of line in mulit-line mode + if *fp.inputIdx() == m.anchorStart { + // We are at the start input. Success. + break + } + // Check whether character just before the current pos is a new-line + // unless we are at the end of input + c := charAt(inputText, *fp.inputIdx()-1) + if (*fp.inputIdx() < m.anchorLimit) && isLineTerminator(c) { + // It's a new-line. ^ is true. Success. + // TODO: what should be done with positions between a CR and LF? + break + } + // Not at the start of a line. Fail. + fp = m.stack.popFrame() + case urxCaretMUnix: // ^, test for start of line in mulit-line + Unix-line mode + if *fp.inputIdx() <= m.anchorStart { + // We are at the start input. Success. + break + } + + c := charAt(inputText, *fp.inputIdx()-1) + if c != 0x0a { + // Not at the start of a line. Back-track out. + fp = m.stack.popFrame() + } + case urxBackslashB: // Test for word boundaries + success := m.isWordBoundary(*fp.inputIdx()) + success = success != (op.value() != 0) // flip sense for \B + if !success { + fp = m.stack.popFrame() + } + case urxBackslashBu: // Test for word boundaries, Unicode-style + success := m.isUWordBoundary(*fp.inputIdx()) + success = success != (op.value() != 0) // flip sense for \B + if !success { + fp = m.stack.popFrame() + } + case urxBackslashD: // Test for decimal digit + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + + success := m.isDecimalDigit(c) + success = success != (op.value() != 0) // flip sense for \D + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashG: // Test for position at end of previous match + if !((m.match && *fp.inputIdx() == m.matchEnd) || (!m.match && *fp.inputIdx() == m.activeStart)) { + fp = m.stack.popFrame() + } + + case urxBackslashH: // Test for \h, horizontal white space. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + success := m.isHorizWS(c) || c == 9 + success = success != (op.value() != 0) // flip sense for \H + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashR: // Test for \R, any line break sequence. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + if c == 0x0d && charAt(inputText, *fp.inputIdx()+1) == 0x0a { + *fp.inputIdx()++ + } + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashV: // \v, any single line ending character. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + c := charAt(inputText, *fp.inputIdx()) + success := isLineTerminator(c) + success = success != (op.value() != 0) // flip sense for \V + if success { + *fp.inputIdx()++ + } else { + fp = m.stack.popFrame() + } + + case urxBackslashX: + // Match a Grapheme, as defined by Unicode UAX 29. + + // Fail if at end of input + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + *fp.inputIdx() = m.followingGCBoundary(*fp.inputIdx()) + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + *fp.inputIdx() = m.activeLimit + } + + case urxBackslashZ: // Test for end of Input + if *fp.inputIdx() < m.anchorLimit { + fp = m.stack.popFrame() + } else { + m.hitEnd = true + m.requireEnd = true + } + case urxStaticSetref: + // Test input character against one of the predefined sets + // (Word Characters, for example) + // The high bit of the op value is a flag for the match polarity. + // 0: success if input char is in set. + // 1: success if input char is not in set. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + success := (op.value() & urxNegSet) == urxNegSet + negOp := op.value() & ^urxNegSet + + c := charAt(inputText, *fp.inputIdx()) + s := staticPropertySets[negOp] + if s.ContainsRune(c) { + success = !success + } + + if success { + *fp.inputIdx()++ + } else { + // the character wasn't in the set. + fp = m.stack.popFrame() + } + case urxStatSetrefN: + // Test input character for NOT being a member of one of + // the predefined sets (Word Characters, for example) + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + s := staticPropertySets[op.value()] + if !s.ContainsRune(c) { + *fp.inputIdx()++ + break + } + // the character wasn't in the set. + fp = m.stack.popFrame() + + case urxSetref: + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + // There is input left. Pick up one char and test it for set membership. + c := charAt(inputText, *fp.inputIdx()) + + s := sets[op.value()] + if s.ContainsRune(c) { + *fp.inputIdx()++ + break + } + + // the character wasn't in the set. + fp = m.stack.popFrame() + + case urxDotany: + // . matches anything, but stops at end-of-line. + if *fp.inputIdx() >= m.activeLimit { + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + if isLineTerminator(c) { + // End of line in normal mode. . does not match. + fp = m.stack.popFrame() + break + } + *fp.inputIdx()++ + + case urxDotanyAll: + // ., in dot-matches-all (including new lines) mode + if *fp.inputIdx() >= m.activeLimit { + // At end of input. Match failed. Backtrack out. + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + c := charAt(inputText, *fp.inputIdx()) + *fp.inputIdx()++ + if c == 0x0d && *fp.inputIdx() < m.activeLimit { + // In the case of a CR/LF, we need to advance over both. + nextc := charAt(inputText, *fp.inputIdx()) + if nextc == 0x0a { + *fp.inputIdx()++ + } + } + + case urxDotanyUnix: + // '.' operator, matches all, but stops at end-of-line. + // UNIX_LINES mode, so 0x0a is the only recognized line ending. + if *fp.inputIdx() >= m.activeLimit { + // At end of input. Match failed. Backtrack out. + m.hitEnd = true + fp = m.stack.popFrame() + break + } + + // There is input left. Advance over one char, unless we've hit end-of-line + c := charAt(inputText, *fp.inputIdx()) + if c == 0x0a { + // End of line in normal mode. '.' does not match the \n + fp = m.stack.popFrame() + } else { + *fp.inputIdx()++ + } + case urxJmp: + *fp.patIdx() = op.value() + + case urxFail: + isMatch = false + goto breakFromLoop + + case urxJmpSav: + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current + if err != nil { + return err + } + *fp.patIdx() = op.value() // Then JMP. + + case urxJmpSavX: + // This opcode is used with (x)+, when x can match a zero length string. + // Same as JMP_SAV, except conditional on the match having made forward progress. + // Destination of the JMP must be a URX_STO_INP_LOC, from which we get the + // data address of the input position at the start of the loop. + stoOp := pat[op.value()-1] + frameLoc := stoOp.value() + + prevInputIdx := *fp.extra(frameLoc) + if prevInputIdx < *fp.inputIdx() { + // The match did make progress. Repeat the loop. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current + if err != nil { + return err + } + *fp.patIdx() = op.value() // Then JMP. + *fp.extra(frameLoc) = *fp.inputIdx() + } + // If the input position did not advance, we do nothing here, + // execution will fall out of the loop. + + case urxCtrInit: + *fp.extra(op.value()) = 0 // Set the loop counter variable to zero + + // Pick up the three extra operands that CTR_INIT has, and + // skip the pattern location counter past + instOperandLoc := *fp.patIdx() + *fp.patIdx() += 3 // Skip over the three operands that CTR_INIT has. + + loopLoc := pat[instOperandLoc].value() + minCount := int(pat[instOperandLoc+1]) + maxCount := int(pat[instOperandLoc+2]) + + if minCount == 0 { + fp, err = m.stateSave(*fp.inputIdx(), loopLoc+1) + if err != nil { + return err + } + } + if maxCount == -1 { + *fp.extra(op.value() + 1) = *fp.inputIdx() // For loop breaking. + } else if maxCount == 0 { + fp = m.stack.popFrame() + } + + case utxCtrLoop: + initOp := pat[op.value()] + opValue := initOp.value() + pCounter := fp.extra(opValue) + minCount := int(pat[op.value()+2]) + maxCount := int(pat[op.value()+3]) + *pCounter++ + if *pCounter >= maxCount && maxCount != -1 { + break + } + + if *pCounter >= minCount { + if maxCount == -1 { + // Loop has no hard upper bound. + // Check that it is progressing through the input, break if it is not. + pLastIntputIdx := fp.extra(opValue + 1) + if *pLastIntputIdx == *fp.inputIdx() { + break + } + *pLastIntputIdx = *fp.inputIdx() + } + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + } else { + // Increment time-out counter. (StateSave() does it if count >= minCount) + m.tickCounter-- + if m.tickCounter <= 0 { + if err = m.incrementTime(*fp.inputIdx()); err != nil { + return err + } // Re-initializes fTickCounter + } + } + + *fp.patIdx() = op.value() + 4 // Loop back. + + case urxCtrInitNg: + *fp.extra(op.value()) = 0 // Set the loop counter variable to zero + + // Pick up the three extra operands that CTR_INIT_NG has, and + // skip the pattern location counter past + instrOperandLoc := *fp.patIdx() + *fp.patIdx() += 3 + loopLoc := pat[instrOperandLoc].value() + minCount := pat[instrOperandLoc+1].value() + maxCount := pat[instrOperandLoc+2].value() + + if maxCount == -1 { + *fp.extra(op.value() + 1) = *fp.inputIdx() // Save initial input index for loop breaking. + } + + if minCount == 0 { + if maxCount != 0 { + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + } + *fp.patIdx() = loopLoc + 1 + } + + case urxCtrLoopNg: + initOp := pat[op.value()] + pCounter := fp.extra(initOp.value()) + minCount := int(pat[op.value()+2]) + maxCount := int(pat[op.value()+3]) + *pCounter++ + if *pCounter >= maxCount && maxCount != -1 { + // The loop has matched the maximum permitted number of times. + // Break out of here with no action. Matching will + // continue with the following pattern. + break + } + + if *pCounter < minCount { + // We haven't met the minimum number of matches yet. + // Loop back for another one. + *fp.patIdx() = op.value() + 4 // Loop back. + // Increment time-out counter. (StateSave() does it if count >= minCount) + m.tickCounter-- + if m.tickCounter <= 0 { + if err = m.incrementTime(*fp.inputIdx()); err != nil { + return err + } // Re-initializes fTickCounter + } + } else { + // We do have the minimum number of matches. + + // If there is no upper bound on the loop iterations, check that the input index + // is progressing, and stop the loop if it is not. + if maxCount == -1 { + lastInputIdx := fp.extra(initOp.value() + 1) + if *fp.inputIdx() == *lastInputIdx { + break + } + *lastInputIdx = *fp.inputIdx() + } + } + + // Loop Continuation: we will fall into the pattern following the loop + // (non-greedy, don't execute loop body first), but first do + // a state save to the top of the loop, so that a match failure + // in the following pattern will try another iteration of the loop. + fp, err = m.stateSave(*fp.inputIdx(), op.value()+4) + if err != nil { + return err + } + + case urxStoSp: + m.data[op.value()] = m.stack.len() + + case urxLdSp: + newStackSize := m.data[op.value()] + newFp := m.stack.offset(newStackSize) + if newFp.equals(fp) { + break + } + copy(newFp, fp) + fp = newFp + + m.stack.setSize(newStackSize) + case urxBackref: + groupStartIdx := *fp.extra(op.value()) + groupEndIdx := *fp.extra(op.value() + 1) + + if groupStartIdx < 0 { + // This capture group has not participated in the match thus far, + fp = m.stack.popFrame() // FAIL, no match. + break + } + + success := true + for { + if groupStartIdx >= groupEndIdx { + success = true + break + } + + if *fp.inputIdx() >= m.activeLimit { + success = false + m.hitEnd = true + break + } + + captureGroupChar := charAt(inputText, groupStartIdx) + inputChar := charAt(inputText, *fp.inputIdx()) + groupStartIdx++ + *fp.inputIdx()++ + if inputChar != captureGroupChar { + success = false + break + } + } + + if !success { + fp = m.stack.popFrame() + } + case urxBackrefI: + groupStartIdx := *fp.extra(op.value()) + groupEndIdx := *fp.extra(op.value() + 1) + + if groupStartIdx < 0 { + // This capture group has not participated in the match thus far, + fp = m.stack.popFrame() // FAIL, no match. + break + } + + captureGroupItr := newCaseFoldIterator(m.input, groupStartIdx, groupEndIdx) + inputItr := newCaseFoldIterator(m.input, *fp.inputIdx(), m.activeLimit) + success := true + + for { + captureGroupChar := captureGroupItr.next() + if captureGroupChar == -1 { + success = true + break + } + inputChar := inputItr.next() + if inputChar == -1 { + success = false + m.hitEnd = true + break + } + if inputChar != captureGroupChar { + success = false + break + } + } + + if success && inputItr.inExpansion() { + // We otained a match by consuming part of a string obtained from + // case-folding a single code point of the input text. + // This does not count as an overall match. + success = false + } + + if success { + *fp.inputIdx() = inputItr.index + } else { + fp = m.stack.popFrame() + } + + case urxStoInpLoc: + *fp.extra(op.value()) = *fp.inputIdx() + + case urxJmpx: + instrOperandLoc := *fp.patIdx() + *fp.patIdx()++ + dataLoc := pat[instrOperandLoc].value() + + saveInputIdx := *fp.extra(dataLoc) + + if saveInputIdx < *fp.inputIdx() { + *fp.patIdx() = op.value() // JMP + } else { + fp = m.stack.popFrame() // FAIL, no progress in loop. + } + + case urxLaStart: + m.data[op.value()] = m.stack.len() + m.data[op.value()+1] = *fp.inputIdx() + m.data[op.value()+2] = m.activeStart + m.data[op.value()+3] = m.activeLimit + m.activeStart = m.lookStart // Set the match region change for + m.activeLimit = m.lookLimit // transparent bounds. + + case urxLaEnd: + stackSize := m.stack.len() + newStackSize := m.data[op.value()] + if stackSize > newStackSize { + // Copy the current top frame back to the new (cut back) top frame. + // This makes the capture groups from within the look-ahead + // expression available. + newFp := m.stack.offset(newStackSize) + copy(newFp, fp) + fp = newFp + m.stack.setSize(newStackSize) + } + + *fp.inputIdx() = m.data[op.value()+1] + + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + + case urcOnecharI: + // Case insensitive one char. The char from the pattern is already case folded. + // Input text is not, but case folding the input can not reduce two or more code + // points to one. + if *fp.inputIdx() < m.activeLimit { + c := charAt(inputText, *fp.inputIdx()) + if ucase.Fold(c) == op.value32() { + *fp.inputIdx()++ + break + } + } else { + m.hitEnd = true + } + + fp = m.stack.popFrame() + + case urxStringI: + // Case-insensitive test input against a literal string. + // Strings require two slots in the compiled pattern, one for the + // offset to the string text, and one for the length. + // The compiled string has already been case folded. + patternString := litText[op.value():] + var patternStringIdx int + nextOp := pat[*fp.patIdx()] + *fp.patIdx()++ + patternStringLen := nextOp.value() + + success := true + + it := newCaseFoldIterator(inputText, *fp.inputIdx(), m.activeLimit) + for patternStringIdx < patternStringLen { + cText := it.next() + cPattern := patternString[patternStringIdx] + patternStringIdx++ + + if cText != cPattern { + success = false + if cText == -1 { + m.hitEnd = true + } + break + } + } + if it.inExpansion() { + success = false + } + + if success { + *fp.inputIdx() = it.index + } else { + fp = m.stack.popFrame() + } + + case urxLbStart: + // Entering a look-behind block. + // Save Stack Ptr, Input Pos and active input region. + // TODO: implement transparent bounds. Ticket #6067 + m.data[op.value()] = m.stack.len() + m.data[op.value()+1] = *fp.inputIdx() + // Save input string length, then reset to pin any matches to end at + // the current position. + m.data[op.value()+2] = m.activeStart + m.data[op.value()+3] = m.activeLimit + m.activeStart = m.regionStart + m.activeLimit = *fp.inputIdx() + // Init the variable containing the start index for attempted matches. + m.data[op.value()+4] = -1 + case urxLbCont: + // Positive Look-Behind, at top of loop checking for matches of LB expression + // at all possible input starting positions. + + // Fetch the min and max possible match lengths. They are the operands + // of this op in the pattern. + minML := pat[*fp.patIdx()] + *fp.patIdx()++ + maxML := pat[*fp.patIdx()] + *fp.patIdx()++ + + lbStartIdx := &m.data[op.value()+4] + if *lbStartIdx < 0 { + // First time through loop. + *lbStartIdx = *fp.inputIdx() - int(minML) + if *lbStartIdx > 0 { + *lbStartIdx = *fp.inputIdx() + } + } else { + // 2nd through nth time through the loop. + // Back up start position for match by one. + *lbStartIdx-- + } + + if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) { + // We have tried all potential match starting points without + // getting a match. Backtrack out, and out of the + // Look Behind altogether. + fp = m.stack.popFrame() + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + break + } + + // Save state to this URX_LB_CONT op, so failure to match will repeat the loop. + // (successful match will fall off the end of the loop.) + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-3) + if err != nil { + return err + } + *fp.inputIdx() = *lbStartIdx + + case urxLbEnd: + // End of a look-behind block, after a successful match. + if *fp.inputIdx() != m.activeLimit { + // The look-behind expression matched, but the match did not + // extend all the way to the point that we are looking behind from. + // FAIL out of here, which will take us back to the LB_CONT, which + // will retry the match starting at another position or fail + // the look-behind altogether, whichever is appropriate. + fp = m.stack.popFrame() + break + } + + // Look-behind match is good. Restore the orignal input string region, + // which had been truncated to pin the end of the lookbehind match to the + // position being looked-behind. + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + case urxLbnCount: + // Negative Look-Behind, at top of loop checking for matches of LB expression + // at all possible input starting positions. + + // Fetch the extra parameters of this op. + minML := pat[*fp.patIdx()] + *fp.patIdx()++ + maxML := pat[*fp.patIdx()] + *fp.patIdx()++ + + continueLoc := pat[*fp.patIdx()].value() + *fp.patIdx()++ + + lbStartIdx := &m.data[op.value()+4] + + if *lbStartIdx < 0 { + // First time through loop. + *lbStartIdx = *fp.inputIdx() - int(minML) + if *lbStartIdx > 0 { + // move index to a code point boundary, if it's not on one already. + *lbStartIdx = *fp.inputIdx() + } + } else { + // 2nd through nth time through the loop. + // Back up start position for match by one. + *lbStartIdx-- + } + + if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) { + // We have tried all potential match starting points without + // getting a match, which means that the negative lookbehind as + // a whole has succeeded. Jump forward to the continue location + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + *fp.patIdx() = continueLoc + break + } + + // Save state to this URX_LB_CONT op, so failure to match will repeat the loop. + // (successful match will cause a FAIL out of the loop altogether.) + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-4) + if err != nil { + return err + } + *fp.inputIdx() = *lbStartIdx + case urxLbnEnd: + // End of a negative look-behind block, after a successful match. + + if *fp.inputIdx() != m.activeLimit { + // The look-behind expression matched, but the match did not + // extend all the way to the point that we are looking behind from. + // FAIL out of here, which will take us back to the LB_CONT, which + // will retry the match starting at another position or succeed + // the look-behind altogether, whichever is appropriate. + fp = m.stack.popFrame() + break + } + + // Look-behind expression matched, which means look-behind test as + // a whole Fails + + // Restore the orignal input string length, which had been truncated + // inorder to pin the end of the lookbehind match + // to the position being looked-behind. + m.activeStart = m.data[op.value()+2] + m.activeLimit = m.data[op.value()+3] + + // Restore original stack position, discarding any state saved + // by the successful pattern match. + newStackSize := m.data[op.value()] + m.stack.setSize(newStackSize) + + // FAIL, which will take control back to someplace + // prior to entering the look-behind test. + fp = m.stack.popFrame() + case urxLoopSrI: + // Loop Initialization for the optimized implementation of + // [some character set]* + // This op scans through all matching input. + // The following LOOP_C op emulates stack unwinding if the following pattern fails. + s := sets[op.value()] + + // Loop through input, until either the input is exhausted or + // we reach a character that is not a member of the set. + ix := *fp.inputIdx() + + for { + if ix >= m.activeLimit { + m.hitEnd = true + break + } + c := charAt(inputText, ix) + if !s.ContainsRune(c) { + break + } + ix++ + } + + // If there were no matching characters, skip over the loop altogether. + // The loop doesn't run at all, a * op always succeeds. + if ix == *fp.inputIdx() { + *fp.patIdx()++ // skip the URX_LOOP_C op. + break + } + + // Peek ahead in the compiled pattern, to the URX_LOOP_C that + // must follow. It's operand is the stack location + // that holds the starting input index for the match of this [set]* + loopcOp := pat[*fp.patIdx()] + stackLoc := loopcOp.value() + *fp.extra(stackLoc) = *fp.inputIdx() + *fp.inputIdx() = ix + + // Save State to the URX_LOOP_C op that follows this one, + // so that match failures in the following code will return to there. + // Then bump the pattern idx so the LOOP_C is skipped on the way out of here. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + *fp.patIdx()++ + case urxLoopDotI: + // Loop Initialization for the optimized implementation of .* + // This op scans through all remaining input. + // The following LOOP_C op emulates stack unwinding if the following pattern fails. + + // Loop through input until the input is exhausted (we reach an end-of-line) + // In DOTALL mode, we can just go straight to the end of the input. + var ix int + if (op.value() & 1) == 1 { + // Dot-matches-All mode. Jump straight to the end of the string. + ix = m.activeLimit + m.hitEnd = true + } else { + // NOT DOT ALL mode. Line endings do not match '.' + // Scan forward until a line ending or end of input. + ix = *fp.inputIdx() + for { + if ix >= m.activeLimit { + m.hitEnd = true + break + } + c := charAt(inputText, ix) + if (c & 0x7f) <= 0x29 { // Fast filter of non-new-line-s + if (c == 0x0a) || // 0x0a is newline in both modes. + (((op.value() & 2) == 0) && // IF not UNIX_LINES mode + isLineTerminator(c)) { + // char is a line ending. Exit the scanning loop. + break + } + } + ix++ + } + } + + // If there were no matching characters, skip over the loop altogether. + // The loop doesn't run at all, a * op always succeeds. + if ix == *fp.inputIdx() { + *fp.patIdx()++ // skip the URX_LOOP_C op. + break + } + + // Peek ahead in the compiled pattern, to the URX_LOOP_C that + // must follow. It's operand is the stack location + // that holds the starting input index for the match of this .* + loopcOp := pat[*fp.patIdx()] + stackLoc := loopcOp.value() + *fp.extra(stackLoc) = *fp.inputIdx() + *fp.inputIdx() = ix + + // Save State to the URX_LOOP_C op that follows this one, + // so that match failures in the following code will return to there. + // Then bump the pattern idx so the LOOP_C is skipped on the way out of here. + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) + if err != nil { + return err + } + *fp.patIdx()++ + + case urxLoopC: + backSearchIndex := *fp.extra(op.value()) + + if backSearchIndex == *fp.inputIdx() { + // We've backed up the input idx to the point that the loop started. + // The loop is done. Leave here without saving state. + // Subsequent failures won't come back here. + break + } + // Set up for the next iteration of the loop, with input index + // backed up by one from the last time through, + // and a state save to this instruction in case the following code fails again. + // (We're going backwards because this loop emulates stack unwinding, not + // the initial scan forward.) + + prevC := charAt(inputText, *fp.inputIdx()-1) + *fp.inputIdx()-- + twoPrevC := charAt(inputText, *fp.inputIdx()-1) + + if prevC == 0x0a && + *fp.inputIdx() > backSearchIndex && + twoPrevC == 0x0d { + prevOp := pat[*fp.patIdx()-2] + if prevOp.typ() == urxLoopDotI { + // .*, stepping back over CRLF pair. + *fp.inputIdx()-- + } + } + + fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-1) + if err != nil { + return err + } + default: + // Trouble. The compiled pattern contains an entry with an + // unrecognized type tag. + // Unknown opcode type in opType = URX_TYPE(pat[fp->fPatIdx]). But we have + // reports of this in production code, don't use UPRV_UNREACHABLE_EXIT. + // See ICU-21669. + return &MatchError{ + Code: InternalMatchError, + Pattern: m.pattern.pattern, + Position: *fp.inputIdx(), + Input: m.input, + } + } + } + +breakFromLoop: + m.match = isMatch + if isMatch { + m.lastMatchEnd = m.matchEnd + m.matchStart = startIdx + m.matchEnd = *fp.inputIdx() + } + + if m.dumper != nil { + if isMatch { + fmt.Fprintf(m.dumper, "Match. start=%d end=%d\n\n", m.matchStart, m.matchEnd) + } else { + fmt.Fprintf(m.dumper, "No match\n\n") + } + } + + m.frame = fp // The active stack frame when the engine stopped. + // Contains the capture group results that we need to + // access later. + return nil +} + +func charAt(str []rune, idx int) rune { + if idx >= 0 && idx < len(str) { + return str[idx] + } + return -1 +} + +func (m *Matcher) isWordBoundary(pos int) bool { + cIsWord := false + + if pos >= m.lookLimit { + m.hitEnd = true + } else { + c := charAt(m.input, pos) + if uprops.HasBinaryProperty(c, uprops.UCharGraphemeExtend) || uchar.CharType(c) == uchar.FormatChar { + return false + } + cIsWord = staticPropertySets[urxIswordSet].ContainsRune(c) + } + + prevCIsWord := false + for { + if pos <= m.lookStart { + break + } + prevChar := charAt(m.input, pos-1) + pos-- + if !(uprops.HasBinaryProperty(prevChar, uprops.UCharGraphemeExtend) || uchar.CharType(prevChar) == uchar.FormatChar) { + prevCIsWord = staticPropertySets[urxIswordSet].ContainsRune(prevChar) + break + } + } + return cIsWord != prevCIsWord +} + +func (m *Matcher) isUWordBoundary(pos int) bool { + // TODO: implement + /* + UBool returnVal = FALSE; + + #if UCONFIG_NO_BREAK_ITERATION==0 + // Note: this point will never be reached if break iteration is configured out. + // Regex patterns that would require this function will fail to compile. + + // If we haven't yet created a break iterator for this matcher, do it now. + if (fWordBreakItr == nullptr) { + fWordBreakItr = BreakIterator::createWordInstance(Locale::getEnglish(), status); + if (U_FAILURE(status)) { + return FALSE; + } + fWordBreakItr->setText(fInputText, status); + } + + // Note: zero width boundary tests like \b see through transparent region bounds, + // which is why fLookLimit is used here, rather than fActiveLimit. + if (pos >= fLookLimit) { + fHitEnd = TRUE; + returnVal = TRUE; // With Unicode word rules, only positions within the interior of "real" + // words are not boundaries. All non-word chars stand by themselves, + // with word boundaries on both sides. + } else { + returnVal = fWordBreakItr->isBoundary((int32_t)pos); + } + #endif + return returnVal; + */ + return false +} + +func (m *Matcher) resetStack() stackFrame { + m.stack.reset() + frame, _ := m.stack.newFrame(0, nil, "") + frame.clearExtra() + return frame +} + +func (m *Matcher) stateSave(inputIdx, savePatIdx int) (stackFrame, error) { + // push storage for a new frame. + newFP, err := m.stack.newFrame(inputIdx, m.input, m.pattern.pattern) + if err != nil { + return nil, err + } + fp := m.stack.prevFromTop() + + // New stack frame = copy of old top frame. + copy(newFP, fp) + + m.tickCounter-- + if m.tickCounter <= 0 { + if err := m.incrementTime(*fp.inputIdx()); err != nil { + return nil, err + } + } + *fp.patIdx() = savePatIdx + return newFP, nil +} + +func (m *Matcher) incrementTime(inputIdx int) error { + m.tickCounter = timerInitialValue + m.time++ + if m.timeLimit > 0 && m.time >= m.timeLimit { + return &MatchError{ + Code: TimeOut, + Pattern: m.pattern.pattern, + Position: inputIdx, + Input: m.input, + } + } + return nil +} + +func (m *Matcher) isDecimalDigit(c rune) bool { + return uchar.IsDigit(c) +} + +func (m *Matcher) isHorizWS(c rune) bool { + return uchar.CharType(c) == uchar.SpaceSeparator || c == 9 +} + +func (m *Matcher) followingGCBoundary(pos int) int { + // TODO: implement + return pos + /* + // Note: this point will never be reached if break iteration is configured out. + // Regex patterns that would require this function will fail to compile. + + // If we haven't yet created a break iterator for this matcher, do it now. + if (m.gcBreakItr == nil) { + m.gcBreakItr = BreakIterator::createCharacterInstance(Locale::getEnglish(), status); + if (U_FAILURE(status)) { + return pos; + } + fGCBreakItr->setText(fInputText, status); + } + result = fGCBreakItr->following(pos); + if (result == BreakIterator::DONE) { + result = pos; + } + */ +} + +func (m *Matcher) ResetString(input string) { + m.Reset([]rune(input)) +} + +func (m *Matcher) Reset(input []rune) { + m.input = input + m.reset() +} + +func (m *Matcher) Matches() (bool, error) { + err := m.MatchAt(m.activeStart, true) + return m.match, err +} + +func (m *Matcher) LookingAt() (bool, error) { + err := m.MatchAt(m.activeStart, false) + return m.match, err +} + +func (m *Matcher) Find() (bool, error) { + startPos := m.matchEnd + if startPos == 0 { + startPos = m.activeStart + } + + if m.match { + // Save the position of any previous successful match. + m.lastMatchEnd = m.matchEnd + if m.matchStart == m.matchEnd { + // Previous match had zero length. Move start position up one position + // to avoid sending find() into a loop on zero-length matches. + if startPos >= m.activeLimit { + m.match = false + m.hitEnd = true + return false, nil + } + startPos++ + } + } else { + if m.lastMatchEnd >= 0 { + // A previous find() failed to match. Don't try again. + // (without this test, a pattern with a zero-length match + // could match again at the end of an input string.) + m.hitEnd = true + return false, nil + } + } + + testStartLimit := m.activeLimit - int(m.pattern.minMatchLen) + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + + switch m.pattern.startType { + case startNoInfo: + // No optimization was found. + // Try a match at each input position. + for { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + if startPos >= testStartLimit { + m.hitEnd = true + return false, nil + } + startPos++ + } + case startSet: + // Match may start on any char from a pre-computed set. + for { + pos := startPos + c := charAt(m.input, startPos) + startPos++ + // c will be -1 (U_SENTINEL) at end of text, in which case we + // skip this next block (so we don't have a negative array index) + // and handle end of text in the following block. + if c >= 0 && m.pattern.initialChars.ContainsRune(c) { + err := m.MatchAt(pos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + } + case startStart: + // Matches are only possible at the start of the input string + // (pattern begins with ^ or \A) + if startPos > m.activeStart { + m.match = false + return false, nil + } + err := m.MatchAt(startPos, false) + return m.match, err + case startLine: + var ch rune + if startPos == m.anchorStart { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + ch = charAt(m.input, startPos) + startPos++ + } else { + ch = charAt(m.input, startPos-1) + } + + if m.pattern.flags&UnixLines != 0 { + for { + if ch == 0x0a { + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos >= testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + ch = charAt(m.input, startPos) + startPos++ + } + } else { + for { + if isLineTerminator(ch) { + if ch == 0x0d && startPos < m.activeLimit && charAt(m.input, startPos) == 0x0a { + startPos++ + } + err := m.MatchAt(startPos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos >= testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + ch = charAt(m.input, startPos) + startPos++ + } + } + case startChar, startString: + // Match starts on exactly one char. + theChar := m.pattern.initialChar + for { + pos := startPos + c := charAt(m.input, startPos) + startPos++ + if c == theChar { + err := m.MatchAt(pos, false) + if err != nil { + return false, err + } + if m.match { + return true, nil + } + } + if startPos > testStartLimit { + m.match = false + m.hitEnd = true + return false, nil + } + } + default: + // Unknown value in fPattern->fStartType, should be from StartOfMatch enum. But + // we have reports of this in production code, don't use UPRV_UNREACHABLE_EXIT. + // See ICU-21669. + return false, &MatchError{ + Code: InternalMatchError, + Pattern: m.pattern.pattern, + Position: startPos, + Input: m.input, + } + } +} + +func (m *Matcher) Start() int { + if !m.match { + return -1 + } + + return m.matchStart +} + +func (m *Matcher) reset() { + m.regionStart = 0 + m.regionLimit = len(m.input) + m.activeStart = 0 + m.activeLimit = len(m.input) + m.anchorStart = 0 + m.anchorLimit = len(m.input) + m.lookStart = 0 + m.lookLimit = len(m.input) + m.resetPreserveRegion() +} + +func (m *Matcher) resetPreserveRegion() { + m.matchStart = 0 + m.matchEnd = 0 + m.lastMatchEnd = -1 + m.appendPosition = 0 + m.match = false + m.hitEnd = false + m.requireEnd = false + m.time = 0 + m.tickCounter = timerInitialValue +} + +func (m *Matcher) GroupCount() int { + return len(m.pattern.groupMap) +} + +func (m *Matcher) StartForGroup(group int) int { + if !m.match { + return -1 + } + if group < 0 || group > len(m.pattern.groupMap) { + return -1 + } + if group == 0 { + return m.matchStart + } + groupOffset := int(m.pattern.groupMap[group-1]) + return *m.frame.extra(groupOffset) +} + +func (m *Matcher) EndForGroup(group int) int { + if !m.match { + return -1 + } + if group < 0 || group > len(m.pattern.groupMap) { + return -1 + } + if group == 0 { + return m.matchEnd + } + groupOffset := int(m.pattern.groupMap[group-1]) + return *m.frame.extra(groupOffset + 1) +} + +func (m *Matcher) HitEnd() bool { + return m.hitEnd +} + +func (m *Matcher) RequireEnd() bool { + return m.requireEnd +} + +func (m *Matcher) Group(i int) (string, bool) { + start := m.StartForGroup(i) + end := m.EndForGroup(i) + if start == -1 || end == -1 { + return "", false + } + return string(m.input[start:end]), true +} + +func (m *Matcher) End() int { + if !m.match { + return -1 + } + + return m.matchEnd +} + +func (m *Matcher) Dumper(out io.Writer) { + m.dumper = out +} + +// Test for any of the Unicode line terminating characters. +func isLineTerminator(c rune) bool { + if (c & ^(0x0a | 0x0b | 0x0c | 0x0d | 0x85 | 0x2028 | 0x2029)) != 0 { + return false + } + return (c <= 0x0d && c >= 0x0a) || c == 0x85 || c == 0x2028 || c == 0x2029 +} diff --git a/go/mysql/icuregex/ops.go b/go/mysql/icuregex/ops.go new file mode 100644 index 00000000000..4150cf523d2 --- /dev/null +++ b/go/mysql/icuregex/ops.go @@ -0,0 +1,414 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "slices" + + "vitess.io/vitess/go/mysql/icuregex/internal/ucase" + "vitess.io/vitess/go/mysql/icuregex/internal/utf16" +) + +type opcode uint8 + +const ( + urxReservedOp opcode = iota // For multi-operand ops, most non-first words. + urxBacktrack // Force a backtrack, as if a match test had failed. + urxEnd + urxOnechar // Value field is the 21 bit unicode char to match + urxString // Value field is index of string start + urxStringLen // Value field is string length (code units) + urxStateSave // Value field is pattern position to push + urxNop + urxStartCapture // Value field is capture group number. + urxEndCapture // Value field is capture group number + urxStaticSetref // Value field is index of set in array of sets. + urxSetref // Value field is index of set in array of sets. + urxDotany + urxJmp // Value field is destination position in the pattern. + urxFail // Stop match operation, No match. + + urxJmpSav // Operand: JMP destination location + urxBackslashB // Value field: 0: \b 1: \B + urxBackslashG + urxJmpSavX // Conditional JMP_SAV, + // Used in (x)+, breaks loop on zero length match. + // Operand: Jmp destination. + urxBackslashX + urxBackslashZ // \z Unconditional end of line. + + urxDotanyAll // ., in the . matches any mode. + urxBackslashD // Value field: 0: \d 1: \D + urxCaret // Value field: 1: multi-line mode. + urxDollar // Also for \Z + + urxCtrInit // Counter Inits for {Interval} loops. + urxCtrInitNg // 2 kinds, normal and non-greedy. + // These are 4 word opcodes. See description. + // First Operand: Data loc of counter variable + // 2nd Operand: Pat loc of the URX_CTR_LOOPx + // at the end of the loop. + // 3rd Operand: Minimum count. + // 4th Operand: Max count, -1 for unbounded. + + urxDotanyUnix // '.' operator in UNIX_LINES mode, only \n marks end of line. + + utxCtrLoop // Loop Ops for {interval} loops. + urxCtrLoopNg // Also in three flavors. + // Operand is loc of corresponding CTR_INIT. + + urxCaretMUnix // '^' operator, test for start of line in multi-line + // plus UNIX_LINES mode. + + urxRelocOprnd // Operand value in multi-operand ops that refers + // back into compiled pattern code, and thus must + // be relocated when inserting/deleting ops in code. + + urxStoSp // Store the stack ptr. Operand is location within + // matcher data (not stack data) to store it. + urxLdSp // Load the stack pointer. Operand is location + // to load from. + urxBackref // Back Reference. Parameter is the index of the + // capture group variables in the state stack frame. + urxStoInpLoc // Store the input location. Operand is location + // within the matcher stack frame. + urxJmpx // Conditional JMP. + // First Operand: JMP target location. + // Second Operand: Data location containing an + // input position. If current input position == + // saved input position, FAIL rather than taking + // the JMP + urxLaStart // Starting a LookAround expression. + // Save InputPos, SP and active region in static data. + // Operand: Static data offset for the save + urxLaEnd // Ending a Lookaround expression. + // Restore InputPos and Stack to saved values. + // Operand: Static data offset for saved data. + urcOnecharI // Test for case-insensitive match of a literal character. + // Operand: the literal char. + urxStringI // Case insensitive string compare. + // First Operand: Index of start of string in string literals + // Second Operand (next word in compiled code): + // the length of the string. + urxBackrefI // Case insensitive back reference. + // Parameter is the index of the + // capture group variables in the state stack frame. + urxDollarM // $ in multi-line mode. + urxCaretM // ^ in multi-line mode. + urxLbStart // LookBehind Start. + // Parameter is data location + urxLbCont // LookBehind Continue. + // Param 0: the data location + // Param 1: The minimum length of the look-behind match + // Param 2: The max length of the look-behind match + urxLbEnd // LookBehind End. + // Parameter is the data location. + // Check that match ended at the right spot, + // Restore original input string len. + urxLbnCount // Negative LookBehind Continue + // Param 0: the data location + // Param 1: The minimum length of the look-behind match + // Param 2: The max length of the look-behind match + // Param 3: The pattern loc following the look-behind block. + urxLbnEnd // Negative LookBehind end + // Parameter is the data location. + // Check that the match ended at the right spot. + urxStatSetrefN // Reference to a prebuilt set (e.g. \w), negated + // Operand is index of set in array of sets. + urxLoopSrI // Init a [set]* loop. + // Operand is the sets index in array of user sets. + urxLoopC // Continue a [set]* or OneChar* loop. + // Operand is a matcher static data location. + // Must always immediately follow LOOP_x_I instruction. + urxLoopDotI // .*, initialization of the optimized loop. + // Operand value: + // bit 0: + // 0: Normal (. doesn't match new-line) mode. + // 1: . matches new-line mode. + // bit 1: controls what new-lines are recognized by this operation. + // 0: All Unicode New-lines + // 1: UNIX_LINES, \u000a only. + urxBackslashBu // \b or \B in UREGEX_UWORD mode, using Unicode style + // word boundaries. + urxDollarD // $ end of input test, in UNIX_LINES mode. + urxDollarMd // $ end of input test, in MULTI_LINE and UNIX_LINES mode. + urxBackslashH // Value field: 0: \h 1: \H + urxBackslashR // Any line break sequence. + urxBackslashV // Value field: 0: \v 1: \V + + urxReservedOpN opcode = 255 // For multi-operand ops, negative operand values. +) + +// Keep this list of opcode names in sync with the above enum +// +// Used for debug printing only. +var urxOpcodeNames = []string{ + " ", + "BACKTRACK", + "END", + "ONECHAR", + "STRING", + "STRING_LEN", + "STATE_SAVE", + "NOP", + "START_CAPTURE", + "END_CAPTURE", + "URX_STATIC_SETREF", + "SETREF", + "DOTANY", + "JMP", + "FAIL", + "JMP_SAV", + "BACKSLASH_B", + "BACKSLASH_G", + "JMP_SAV_X", + "BACKSLASH_X", + "BACKSLASH_Z", + "DOTANY_ALL", + "BACKSLASH_D", + "CARET", + "DOLLAR", + "CTR_INIT", + "CTR_INIT_NG", + "DOTANY_UNIX", + "CTR_LOOP", + "CTR_LOOP_NG", + "URX_CARET_M_UNIX", + "RELOC_OPRND", + "STO_SP", + "LD_SP", + "BACKREF", + "STO_INP_LOC", + "JMPX", + "LA_START", + "LA_END", + "ONECHAR_I", + "STRING_I", + "BACKREF_I", + "DOLLAR_M", + "CARET_M", + "LB_START", + "LB_CONT", + "LB_END", + "LBN_CONT", + "LBN_END", + "STAT_SETREF_N", + "LOOP_SR_I", + "LOOP_C", + "LOOP_DOT_I", + "BACKSLASH_BU", + "DOLLAR_D", + "DOLLAR_MD", + "URX_BACKSLASH_H", + "URX_BACKSLASH_R", + "URX_BACKSLASH_V", +} + +type instruction int32 + +func (ins instruction) typ() opcode { + return opcode(uint32(ins) >> 24) +} + +func (ins instruction) value32() int32 { + return int32(ins) & 0xffffff +} + +func (ins instruction) value() int { + return int(ins.value32()) +} + +// Access to Unicode Sets composite character properties +// +// The sets are accessed by the match engine for things like \w (word boundary) +const ( + urxIswordSet = 1 + urxIsalnumSet = 2 + urxIsalphaSet = 3 + urxIsspaceSet = 4 + + urxGcNormal = iota + 1 // Sets for finding grapheme cluster boundaries. + urxGcExtend + urxGcControl + urxGcL + urxGcLv + urxGcLvt + urxGcV + urxGcT + + urxNegSet = 0x800000 // Flag bit to reverse sense of set + // membership test. +) + +type stack struct { + ary []int + frameSize int + stackLimit int +} + +type stackFrame []int + +func (f stackFrame) inputIdx() *int { + return &f[0] +} + +func (f stackFrame) patIdx() *int { + return &f[1] +} + +func (f stackFrame) extra(n int) *int { + return &f[2+n] +} + +func (f stackFrame) equals(f2 stackFrame) bool { + return &f[0] == &f2[0] +} + +func (s *stack) len() int { + return len(s.ary) +} + +func (s *stack) sp() int { + return len(s.ary) - s.frameSize +} + +func (s *stack) newFrame(inputIdx int, input []rune, pattern string) (stackFrame, error) { + if s.stackLimit != 0 && len(s.ary)+s.frameSize > s.stackLimit { + return nil, &MatchError{ + Code: StackOverflow, + Pattern: pattern, + Position: inputIdx, + Input: input, + } + } + s.ary = slices.Grow(s.ary, s.frameSize) + + f := s.ary[len(s.ary) : len(s.ary)+s.frameSize] + s.ary = s.ary[:len(s.ary)+s.frameSize] + return f, nil +} + +func (s *stack) prevFromTop() stackFrame { + return s.ary[len(s.ary)-2*s.frameSize:] +} + +func (s *stack) popFrame() stackFrame { + s.ary = s.ary[:len(s.ary)-s.frameSize] + return s.ary[len(s.ary)-s.frameSize:] +} + +func (s *stack) reset() { + s.ary = s.ary[:0] +} + +func (s *stack) offset(size int) stackFrame { + return s.ary[size-s.frameSize : size] +} + +func (s *stack) setSize(size int) { + s.ary = s.ary[:size] +} + +func (f stackFrame) clearExtra() { + for i := 2; i < len(f); i++ { + f[i] = -1 + } +} + +// number of UVector elements in the header +const restackframeHdrCount = 2 + +// Start-Of-Match type. Used by find() to quickly scan to positions where a +// +// match might start before firing up the full match engine. +type startOfMatch int8 + +const ( + startNoInfo startOfMatch = iota // No hint available. + startChar // Match starts with a literal code point. + startSet // Match starts with something matching a set. + startStart // Match starts at start of buffer only (^ or \A) + startLine // Match starts with ^ in multi-line mode. + startString // Match starts with a literal string. +) + +func (som startOfMatch) String() string { + switch som { + case startNoInfo: + return "START_NO_INFO" + case startChar: + return "START_CHAR" + case startSet: + return "START_SET" + case startStart: + return "START_START" + case startLine: + return "START_LINE" + case startString: + return "START_STRING" + default: + panic("unknown StartOfMatch") + } +} + +type caseFoldIterator struct { + chars []rune + index int + limit int + + foldChars []uint16 +} + +func (it *caseFoldIterator) next() rune { + if len(it.foldChars) == 0 { + // We are not in a string folding of an earlier character. + // Start handling the next char from the input UText. + if it.index >= it.limit { + return -1 + } + + originalC := it.chars[it.index] + it.index++ + + originalC, it.foldChars = ucase.FullFolding(originalC) + if len(it.foldChars) == 0 { + // input code point folds to a single code point, possibly itself. + return originalC + } + } + + var res rune + res, it.foldChars = utf16.NextUnsafe(it.foldChars) + return res +} + +func (it *caseFoldIterator) inExpansion() bool { + return len(it.foldChars) > 0 +} + +func newCaseFoldIterator(chars []rune, start, limit int) caseFoldIterator { + return caseFoldIterator{ + chars: chars, + index: start, + limit: limit, + } +} diff --git a/go/mysql/icuregex/pattern.go b/go/mysql/icuregex/pattern.go new file mode 100644 index 00000000000..90e69b3f55d --- /dev/null +++ b/go/mysql/icuregex/pattern.go @@ -0,0 +1,136 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/uset" +) + +type Pattern struct { + pattern string + flags RegexpFlag + + compiledPat []instruction + literalText []rune + + sets []*uset.UnicodeSet + + minMatchLen int32 + frameSize int + dataSize int + + groupMap []int32 + + startType startOfMatch + initialStringIdx int + initialStringLen int + initialChars *uset.UnicodeSet + initialChar rune + needsAltInput bool + + namedCaptureMap map[string]int +} + +func NewPattern(flags RegexpFlag) *Pattern { + return &Pattern{ + flags: flags, + initialChars: uset.New(), + // Slot zero of the vector of sets is reserved. Fill it here. + sets: []*uset.UnicodeSet{nil}, + } +} + +func Compile(in []rune, flags RegexpFlag) (*Pattern, error) { + pat := NewPattern(flags) + cmp := newCompiler(pat) + if err := cmp.compile(in); err != nil { + return nil, err + } + return pat, nil +} + +func CompileString(in string, flags RegexpFlag) (*Pattern, error) { + return Compile([]rune(in), flags) +} + +func (p *Pattern) Match(input string) *Matcher { + m := NewMatcher(p) + m.ResetString(input) + return m +} + +type RegexpFlag int32 + +const ( + /** Enable case insensitive matching. @stable ICU 2.4 */ + CaseInsensitive RegexpFlag = 2 + + /** Allow white space and comments within patterns @stable ICU 2.4 */ + Comments RegexpFlag = 4 + + /** If set, '.' matches line terminators, otherwise '.' matching stops at line end. + * @stable ICU 2.4 */ + DotAll RegexpFlag = 32 + + /** If set, treat the entire pattern as a literal string. + * Metacharacters or escape sequences in the input sequence will be given + * no special meaning. + * + * The flag UREGEX_CASE_INSENSITIVE retains its impact + * on matching when used in conjunction with this flag. + * The other flags become superfluous. + * + * @stable ICU 4.0 + */ + Literal RegexpFlag = 16 + + /** Control behavior of "$" and "^" + * If set, recognize line terminators within string, + * otherwise, match only at start and end of input string. + * @stable ICU 2.4 */ + Multiline RegexpFlag = 8 + + /** Unix-only line endings. + * When this mode is enabled, only \\u000a is recognized as a line ending + * in the behavior of ., ^, and $. + * @stable ICU 4.0 + */ + UnixLines RegexpFlag = 1 + + /** Unicode word boundaries. + * If set, \b uses the Unicode TR 29 definition of word boundaries. + * Warning: Unicode word boundaries are quite different from + * traditional regular expression word boundaries. See + * http://unicode.org/reports/tr29/#Word_Boundaries + * @stable ICU 2.8 + */ + UWord RegexpFlag = 256 + + /** Error on Unrecognized backslash escapes. + * If set, fail with an error on patterns that contain + * backslash-escaped ASCII letters without a known special + * meaning. If this flag is not set, these + * escaped letters represent themselves. + * @stable ICU 4.0 + */ + ErrorOnUnknownEscapes RegexpFlag = 512 +) diff --git a/go/mysql/icuregex/perl_test.go b/go/mysql/icuregex/perl_test.go new file mode 100644 index 00000000000..e8dfc95d6b0 --- /dev/null +++ b/go/mysql/icuregex/perl_test.go @@ -0,0 +1,211 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "bufio" + "os" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPerl(t *testing.T) { + f, err := os.Open("testdata/re_tests.txt") + require.NoError(t, err) + defer f.Close() + + flagPat, err := CompileString(`('?)(.*)\1(.*)`, 0) + require.NoError(t, err) + flagMat := NewMatcher(flagPat) + + groupsPat, err := CompileString(`\$([+\-])\[(\d+)\]`, 0) + require.NoError(t, err) + groupsMat := NewMatcher(groupsPat) + + cgPat, err := CompileString(`\$(\d+)`, 0) + require.NoError(t, err) + cgMat := NewMatcher(cgPat) + + group := func(m *Matcher, idx int) string { + g, _ := m.Group(idx) + return g + } + + lookingAt := func(m *Matcher) bool { + ok, err := m.LookingAt() + require.NoError(t, err) + return ok + } + + replacer := strings.NewReplacer( + `${bang}`, "!", + `${nulnul}`, "\x00\x00", + `${ffff}`, "\uffff", + ) + + scanner := bufio.NewScanner(f) + var lineno int + + for scanner.Scan() { + lineno++ + fields := strings.Split(scanner.Text(), "\t") + + flagMat.ResetString(fields[0]) + ok, _ := flagMat.Matches() + require.Truef(t, ok, "could not match pattern+flags (line %d)", lineno) + + pattern, _ := flagMat.Group(2) + pattern = replacer.Replace(pattern) + + flagStr, _ := flagMat.Group(3) + var flags RegexpFlag + if strings.IndexByte(flagStr, 'i') >= 0 { + flags |= CaseInsensitive + } + if strings.IndexByte(flagStr, 'm') >= 0 { + flags |= Multiline + } + if strings.IndexByte(flagStr, 'x') >= 0 { + flags |= Comments + } + + testPat, err := CompileString(pattern, flags) + if err != nil { + if cerr, ok := err.(*CompileError); ok && cerr.Code == Unimplemented { + continue + } + if strings.IndexByte(fields[2], 'c') == -1 && strings.IndexByte(fields[2], 'i') == -1 { + t.Errorf("line %d: ICU error %q", lineno, err) + } + continue + } + + if strings.IndexByte(fields[2], 'i') >= 0 { + continue + } + if strings.IndexByte(fields[2], 'c') >= 0 { + t.Errorf("line %d: expected error", lineno) + continue + } + + matchString := fields[1] + matchString = replacer.Replace(matchString) + matchString = strings.ReplaceAll(matchString, `\n`, "\n") + + testMat := testPat.Match(matchString) + found, _ := testMat.Find() + expected := strings.IndexByte(fields[2], 'y') >= 0 + + if expected != found { + t.Errorf("line %d: expected %v, found %v", lineno, expected, found) + continue + } + + if !found { + continue + } + + var result []byte + var perlExpr = fields[3] + + for len(perlExpr) > 0 { + groupsMat.ResetString(perlExpr) + cgMat.ResetString(perlExpr) + + switch { + case strings.HasPrefix(perlExpr, "$&"): + result = append(result, group(testMat, 0)...) + perlExpr = perlExpr[2:] + + case lookingAt(groupsMat): + groupNum, err := strconv.ParseInt(group(groupsMat, 2), 10, 32) + require.NoError(t, err) + + var matchPosition int + if group(groupsMat, 1) == "+" { + matchPosition = testMat.EndForGroup(int(groupNum)) + } else { + matchPosition = testMat.StartForGroup(int(groupNum)) + } + if matchPosition != -1 { + result = strconv.AppendInt(result, int64(matchPosition), 10) + } + + perlExpr = perlExpr[groupsMat.EndForGroup(0):] + + case lookingAt(cgMat): + groupNum, err := strconv.ParseInt(group(cgMat, 1), 10, 32) + require.NoError(t, err) + result = append(result, group(testMat, int(groupNum))...) + perlExpr = perlExpr[cgMat.EndForGroup(0):] + + case strings.HasPrefix(perlExpr, "@-"): + for i := 0; i <= testMat.GroupCount(); i++ { + if i > 0 { + result = append(result, ' ') + } + result = strconv.AppendInt(result, int64(testMat.StartForGroup(i)), 10) + } + perlExpr = perlExpr[2:] + + case strings.HasPrefix(perlExpr, "@+"): + for i := 0; i <= testMat.GroupCount(); i++ { + if i > 0 { + result = append(result, ' ') + } + result = strconv.AppendInt(result, int64(testMat.EndForGroup(i)), 10) + } + perlExpr = perlExpr[2:] + + case strings.HasPrefix(perlExpr, "\\"): + if len(perlExpr) > 1 { + perlExpr = perlExpr[1:] + } + c := perlExpr[0] + switch c { + case 'n': + c = '\n' + } + result = append(result, c) + perlExpr = perlExpr[1:] + + default: + result = append(result, perlExpr[0]) + perlExpr = perlExpr[1:] + } + } + + var expectedS string + if len(fields) > 4 { + expectedS = fields[4] + expectedS = replacer.Replace(expectedS) + expectedS = strings.ReplaceAll(expectedS, `\n`, "\n") + } + + if expectedS != string(result) { + t.Errorf("line %d: Incorrect Perl expression results for %s\nwant: %q\ngot: %q", lineno, pattern, expectedS, result) + } + } +} diff --git a/go/mysql/icuregex/sets.go b/go/mysql/icuregex/sets.go new file mode 100644 index 00000000000..0f745b3374d --- /dev/null +++ b/go/mysql/icuregex/sets.go @@ -0,0 +1,104 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "vitess.io/vitess/go/mysql/icuregex/internal/uprops" + "vitess.io/vitess/go/mysql/icuregex/internal/uset" +) + +var staticPropertySets [13]*uset.UnicodeSet + +func init() { + staticPropertySets[urxIswordSet] = func() *uset.UnicodeSet { + s := uset.New() + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Alphabetic}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{M}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Nd}`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Pc}`, 0)) + s.AddRune(0x200c) + s.AddRune(0x200d) + return s.Freeze() + }() + + staticPropertySets[urxIsspaceSet] = uprops.MustNewUnicodeSetFomPattern(`\p{Whitespace}`, 0).Freeze() + + staticPropertySets[urxGcExtend] = uprops.MustNewUnicodeSetFomPattern(`\p{Grapheme_Extend}`, 0).Freeze() + staticPropertySets[urxGcControl] = func() *uset.UnicodeSet { + s := uset.New() + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zl:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zp:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cc:]`, 0)) + s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cf:]`, 0)) + s.RemoveAll(uprops.MustNewUnicodeSetFomPattern(`[:Grapheme_Extend:]`, 0)) + return s.Freeze() + }() + staticPropertySets[urxGcL] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=L}`, 0).Freeze() + staticPropertySets[urxGcLv] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LV}`, 0).Freeze() + staticPropertySets[urxGcLvt] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LVT}`, 0).Freeze() + staticPropertySets[urxGcV] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=V}`, 0).Freeze() + staticPropertySets[urxGcT] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=T}`, 0).Freeze() + + staticPropertySets[urxGcNormal] = func() *uset.UnicodeSet { + s := uset.New() + s.Complement() + s.RemoveRuneRange(0xac00, 0xd7a4) + s.RemoveAll(staticPropertySets[urxGcControl]) + s.RemoveAll(staticPropertySets[urxGcL]) + s.RemoveAll(staticPropertySets[urxGcV]) + s.RemoveAll(staticPropertySets[urxGcT]) + return s.Freeze() + }() +} + +var staticSetUnescape = func() *uset.UnicodeSet { + u := uset.New() + u.AddString("acefnrtuUx") + return u.Freeze() +}() + +const ( + ruleSetDigitChar = 128 + ruleSetASCIILetter = 129 + ruleSetRuleChar = 130 + ruleSetCount = 131 - 128 +) + +var staticRuleSet = [ruleSetCount]*uset.UnicodeSet{ + func() *uset.UnicodeSet { + u := uset.New() + u.AddRuneRange('0', '9') + return u.Freeze() + }(), + func() *uset.UnicodeSet { + u := uset.New() + u.AddRuneRange('A', 'Z') + u.AddRuneRange('a', 'z') + return u.Freeze() + }(), + func() *uset.UnicodeSet { + u := uset.New() + u.AddString("*?+[(){}^$|\\.") + u.Complement() + return u.Freeze() + }(), +} diff --git a/go/mysql/icuregex/sets_test.go b/go/mysql/icuregex/sets_test.go new file mode 100644 index 00000000000..58da9882701 --- /dev/null +++ b/go/mysql/icuregex/sets_test.go @@ -0,0 +1,66 @@ +/* +© 2016 and later: Unicode, Inc. and others. +Copyright (C) 2004-2015, International Business Machines Corporation and others. +Copyright 2023 The Vitess Authors. + +This file contains code derived from the Unicode Project's ICU library. +License & terms of use for the original code: http://www.unicode.org/copyright.html + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package icuregex + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStaticSetContents(t *testing.T) { + // These are the number of codepoints contained in each of the static sets as of ICU73-2, + // as to sanity check that we're re-creating the sets properly. + // This table must be re-created when updating Unicode versions. + var ExpectedSetSizes = map[int]int{ + 1: 139612, + 4: 25, + 5: 1102442, + 6: 2125, + 7: 140, + 8: 125, + 9: 399, + 10: 10773, + 11: 95, + 12: 137, + } + + for setid, expected := range ExpectedSetSizes { + assert.Equalf(t, expected, staticPropertySets[setid].Len(), "static set [%d] has wrong size", setid) + } +} + +func TestStaticFreeze(t *testing.T) { + for _, s := range staticPropertySets { + if err := s.FreezeCheck_(); err != nil { + t.Error(err) + } + } + for _, s := range staticRuleSet { + if err := s.FreezeCheck_(); err != nil { + t.Error(err) + } + } + if err := staticSetUnescape.FreezeCheck_(); err != nil { + t.Error(err) + } +} diff --git a/go/mysql/icuregex/testdata/re_tests.txt b/go/mysql/icuregex/testdata/re_tests.txt new file mode 100644 index 00000000000..c18b638f9b3 --- /dev/null +++ b/go/mysql/icuregex/testdata/re_tests.txt @@ -0,0 +1,923 @@ +abc abc y $& abc +abc abc y $-[0] 0 +abc abc y $+[0] 3 +abc xbc n - - +abc axc n - - +abc abx n - - +abc xabcy y $& abc +abc xabcy y $-[0] 1 +abc xabcy y $+[0] 4 +abc ababc y $& abc +abc ababc y $-[0] 2 +abc ababc y $+[0] 5 +ab*c abc y $& abc +ab*c abc y $-[0] 0 +ab*c abc y $+[0] 3 +ab*bc abc y $& abc +ab*bc abc y $-[0] 0 +ab*bc abc y $+[0] 3 +ab*bc abbc y $& abbc +ab*bc abbc y $-[0] 0 +ab*bc abbc y $+[0] 4 +ab*bc abbbbc y $& abbbbc +ab*bc abbbbc y $-[0] 0 +ab*bc abbbbc y $+[0] 6 +.{1} abbbbc y $& a +.{1} abbbbc y $-[0] 0 +.{1} abbbbc y $+[0] 1 +.{3,4} abbbbc y $& abbb +.{3,4} abbbbc y $-[0] 0 +.{3,4} abbbbc y $+[0] 4 +ab{0,}bc abbbbc y $& abbbbc +ab{0,}bc abbbbc y $-[0] 0 +ab{0,}bc abbbbc y $+[0] 6 +ab+bc abbc y $& abbc +ab+bc abbc y $-[0] 0 +ab+bc abbc y $+[0] 4 +ab+bc abc n - - +ab+bc abq n - - +ab{1,}bc abq n - - +ab+bc abbbbc y $& abbbbc +ab+bc abbbbc y $-[0] 0 +ab+bc abbbbc y $+[0] 6 +ab{1,}bc abbbbc y $& abbbbc +ab{1,}bc abbbbc y $-[0] 0 +ab{1,}bc abbbbc y $+[0] 6 +ab{1,3}bc abbbbc y $& abbbbc +ab{1,3}bc abbbbc y $-[0] 0 +ab{1,3}bc abbbbc y $+[0] 6 +ab{3,4}bc abbbbc y $& abbbbc +ab{3,4}bc abbbbc y $-[0] 0 +ab{3,4}bc abbbbc y $+[0] 6 +ab{4,5}bc abbbbc n - - +ab?bc abbc y $& abbc +ab?bc abc y $& abc +ab{0,1}bc abc y $& abc +ab?bc abbbbc n - - +ab?c abc y $& abc +ab{0,1}c abc y $& abc +^abc$ abc y $& abc +^abc$ abcc n - - +^abc abcc y $& abc +^abc$ aabc n - - +abc$ aabc y $& abc +abc$ aabcd n - - +^ abc y $& +$ abc y $& +a.c abc y $& abc +a.c axc y $& axc +a.*c axyzc y $& axyzc +a.*c axyzd n - - +a[bc]d abc n - - +a[bc]d abd y $& abd +a[b-d]e abd n - - +a[b-d]e ace y $& ace +a[b-d] aac y $& ac +a[-b] a- y $& a- +a[b-] a- y $& a- +a[b-a] - c - Invalid [] range "b-a" +a[]b - ci - Unmatched [ +a[ - c - Unmatched [ +a] a] y $& a] +a[]]b a]b y $& a]b +a[^bc]d aed y $& aed +a[^bc]d abd n - - +a[^-b]c adc y $& adc +a[^-b]c a-c n - - +a[^]b]c a]c n - - +a[^]b]c adc y $& adc +\ba\b a- y - - +\ba\b -a y - - +\ba\b -a- y - - +\by\b xy n - - +\by\b yz n - - +\by\b xyz n - - +\Ba\B a- n - - +\Ba\B -a n - - +\Ba\B -a- n - - +\By\b xy y - - +\By\b xy y $-[0] 1 +\By\b xy y $+[0] 2 +\By\b xy y - - +\by\B yz y - - +\By\B xyz y - - +\w a y - - +\w - n - - +\W a n - - +\W - y - - +a\sb a b y - - +a\sb a-b n - - +a\Sb a b n - - +a\Sb a-b y - - +\d 1 y - - +\d - n - - +\D 1 n - - +\D - y - - +[\w] a y - - +[\w] - n - - +[\W] a n - - +[\W] - y - - +a[\s]b a b y - - +a[\s]b a-b n - - +a[\S]b a b n - - +a[\S]b a-b y - - +[\d] 1 y - - +[\d] - n - - +[\D] 1 n - - +[\D] - y - - +ab|cd abc y $& ab +ab|cd abcd y $& ab +()ef def y $&-$1 ef- +()ef def y $-[0] 1 +()ef def y $+[0] 3 +()ef def y $-[1] 1 +()ef def y $+[1] 1 +*a - c - Quantifier follows nothing +(*)b - c - Quantifier follows nothing +$b b n - - +a\ - c - Search pattern not terminated +a\(b a(b y $&-$1 a(b- +a\(*b ab y $& ab +a\(*b a((b y $& a((b +a\\b a\b y $& a\b +abc) - c - Unmatched ) +(abc - c - Unmatched ( +((a)) abc y $&-$1-$2 a-a-a +((a)) abc y $-[0]-$-[1]-$-[2] 0-0-0 +((a)) abc y $+[0]-$+[1]-$+[2] 1-1-1 +((a)) abc by @- 0 0 0 +((a)) abc by @+ 1 1 1 +(a)b(c) abc y $&-$1-$2 abc-a-c +(a)b(c) abc y $-[0]-$-[1]-$-[2] 0-0-2 +(a)b(c) abc y $+[0]-$+[1]-$+[2] 3-1-3 +a+b+c aabbabc y $& abc +a{1,}b{1,}c aabbabc y $& abc +a** - c - Nested quantifiers +a.+?c abcabc y $& abc +(a+|b)* ab y $&-$1 ab-b +(a+|b)* ab y $-[0] 0 +(a+|b)* ab y $+[0] 2 +(a+|b)* ab y $-[1] 1 +(a+|b)* ab y $+[1] 2 +(a+|b){0,} ab y $&-$1 ab-b +(a+|b)+ ab y $&-$1 ab-b +(a+|b){1,} ab y $&-$1 ab-b +(a+|b)? ab y $&-$1 a-a +(a+|b){0,1} ab y $&-$1 a-a +)( - c - Unmatched ) +[^ab]* cde y $& cde +abc n - - +a* y $& +([abc])*d abbbcd y $&-$1 abbbcd-c +([abc])*bcd abcd y $&-$1 abcd-a +a|b|c|d|e e y $& e +(a|b|c|d|e)f ef y $&-$1 ef-e +(a|b|c|d|e)f ef y $-[0] 0 +(a|b|c|d|e)f ef y $+[0] 2 +(a|b|c|d|e)f ef y $-[1] 0 +(a|b|c|d|e)f ef y $+[1] 1 +abcd*efg abcdefg y $& abcdefg +ab* xabyabbbz y $& ab +ab* xayabbbz y $& a +(ab|cd)e abcde y $&-$1 cde-cd +[abhgefdc]ij hij y $& hij +^(ab|cd)e abcde n x$1y xy +(abc|)ef abcdef y $&-$1 ef- +(a|b)c*d abcd y $&-$1 bcd-b +(ab|ab*)bc abc y $&-$1 abc-a +a([bc]*)c* abc y $&-$1 abc-bc +a([bc]*)(c*d) abcd y $&-$1-$2 abcd-bc-d +a([bc]*)(c*d) abcd y $-[0] 0 +a([bc]*)(c*d) abcd y $+[0] 4 +a([bc]*)(c*d) abcd y $-[1] 1 +a([bc]*)(c*d) abcd y $+[1] 3 +a([bc]*)(c*d) abcd y $-[2] 3 +a([bc]*)(c*d) abcd y $+[2] 4 +a([bc]+)(c*d) abcd y $&-$1-$2 abcd-bc-d +a([bc]*)(c+d) abcd y $&-$1-$2 abcd-b-cd +a([bc]*)(c+d) abcd y $-[0] 0 +a([bc]*)(c+d) abcd y $+[0] 4 +a([bc]*)(c+d) abcd y $-[1] 1 +a([bc]*)(c+d) abcd y $+[1] 2 +a([bc]*)(c+d) abcd y $-[2] 2 +a([bc]*)(c+d) abcd y $+[2] 4 +a[bcd]*dcdcde adcdcde y $& adcdcde +a[bcd]+dcdcde adcdcde n - - +(ab|a)b*c abc y $&-$1 abc-ab +(ab|a)b*c abc y $-[0] 0 +(ab|a)b*c abc y $+[0] 3 +(ab|a)b*c abc y $-[1] 0 +(ab|a)b*c abc y $+[1] 2 +((a)(b)c)(d) abcd y $1-$2-$3-$4 abc-a-b-d +((a)(b)c)(d) abcd y $-[0] 0 +((a)(b)c)(d) abcd y $+[0] 4 +((a)(b)c)(d) abcd y $-[1] 0 +((a)(b)c)(d) abcd y $+[1] 3 +((a)(b)c)(d) abcd y $-[2] 0 +((a)(b)c)(d) abcd y $+[2] 1 +((a)(b)c)(d) abcd y $-[3] 1 +((a)(b)c)(d) abcd y $+[3] 2 +((a)(b)c)(d) abcd y $-[4] 3 +((a)(b)c)(d) abcd y $+[4] 4 +[a-zA-Z_][a-zA-Z0-9_]* alpha y $& alpha +^a(bc+|b[eh])g|.h$ abh y $&-$1 bh- +(bc+d$|ef*g.|h?i(j|k)) effgz y $&-$1-$2 effgz-effgz- +(bc+d$|ef*g.|h?i(j|k)) ij y $&-$1-$2 ij-ij-j +(bc+d$|ef*g.|h?i(j|k)) effg n - - +(bc+d$|ef*g.|h?i(j|k)) bcdd n - - +(bc+d$|ef*g.|h?i(j|k)) reffgz y $&-$1-$2 effgz-effgz- +((((((((((a)))))))))) a y $10 a +((((((((((a)))))))))) a y $-[0] 0 +((((((((((a)))))))))) a y $+[0] 1 +((((((((((a)))))))))) a y $-[10] 0 +((((((((((a)))))))))) a y $+[10] 1 +((((((((((a))))))))))\10 aa y $& aa +((((((((((a))))))))))${bang} aa n - - +((((((((((a))))))))))${bang} a! y $& a! +(((((((((a))))))))) a y $& a +multiple words of text uh-uh n - - +multiple words multiple words, yeah y $& multiple words +(.*)c(.*) abcde y $&-$1-$2 abcde-ab-de +\((.*), (.*)\) (a, b) y ($2, $1) (b, a) +[k] ab n - - +abcd abcd y $&-\$&-\\$& abcd-$&-\abcd +a(bc)d abcd y $1-\$1-\\$1 bc-$1-\bc +a[-]?c ac y $& ac +(abc)\1 abcabc y $1 abc +([a-c]*)\1 abcabc y $1 abc +\1 - c - Reference to nonexistent group +\2 - c - Reference to nonexistent group +(a)|\1 a y - - +(a)|\1 x n - - +(a)|\2 - c - Reference to nonexistent group +(([a-c])b*?\2)* ababbbcbc y $&-$1-$2 ababb-bb-b +(([a-c])b*?\2){3} ababbbcbc y $&-$1-$2 ababbbcbc-cbc-c +((\3|b)\2(a)x)+ aaxabxbaxbbx n - - +((\3|b)\2(a)x)+ aaaxabaxbaaxbbax y $&-$1-$2-$3 bbax-bbax-b-a +((\3|b)\2(a)){2,} bbaababbabaaaaabbaaaabba y $&-$1-$2-$3 bbaaaabba-bba-b-a +(a)|(b) b y $-[0] 0 +(a)|(b) b y $+[0] 1 +(a)|(b) b y x$-[1] x +(a)|(b) b y x$+[1] x +(a)|(b) b y $-[2] 0 +(a)|(b) b y $+[2] 1 +'abc'i ABC y $& ABC +'abc'i XBC n - - +'abc'i AXC n - - +'abc'i ABX n - - +'abc'i XABCY y $& ABC +'abc'i ABABC y $& ABC +'ab*c'i ABC y $& ABC +'ab*bc'i ABC y $& ABC +'ab*bc'i ABBC y $& ABBC +'ab*?bc'i ABBBBC y $& ABBBBC +'ab{0,}?bc'i ABBBBC y $& ABBBBC +'ab+?bc'i ABBC y $& ABBC +'ab+bc'i ABC n - - +'ab+bc'i ABQ n - - +'ab{1,}bc'i ABQ n - - +'ab+bc'i ABBBBC y $& ABBBBC +'ab{1,}?bc'i ABBBBC y $& ABBBBC +'ab{1,3}?bc'i ABBBBC y $& ABBBBC +'ab{3,4}?bc'i ABBBBC y $& ABBBBC +'ab{4,5}?bc'i ABBBBC n - - +'ab??bc'i ABBC y $& ABBC +'ab??bc'i ABC y $& ABC +'ab{0,1}?bc'i ABC y $& ABC +'ab??bc'i ABBBBC n - - +'ab??c'i ABC y $& ABC +'ab{0,1}?c'i ABC y $& ABC +'^abc$'i ABC y $& ABC +'^abc$'i ABCC n - - +'^abc'i ABCC y $& ABC +'^abc$'i AABC n - - +'abc$'i AABC y $& ABC +'^'i ABC y $& +'$'i ABC y $& +'a.c'i ABC y $& ABC +'a.c'i AXC y $& AXC +'a.*?c'i AXYZC y $& AXYZC +'a.*c'i AXYZD n - - +'a[bc]d'i ABC n - - +'a[bc]d'i ABD y $& ABD +'a[b-d]e'i ABD n - - +'a[b-d]e'i ACE y $& ACE +'a[b-d]'i AAC y $& AC +'a[-b]'i A- y $& A- +'a[b-]'i A- y $& A- +'a[b-a]'i - c - Invalid [] range "b-a" +'a[]b'i - ci - Unmatched [ +'a['i - c - Unmatched [ +'a]'i A] y $& A] +'a[]]b'i A]B y $& A]B +'a[^bc]d'i AED y $& AED +'a[^bc]d'i ABD n - - +'a[^-b]c'i ADC y $& ADC +'a[^-b]c'i A-C n - - +'a[^]b]c'i A]C n - - +'a[^]b]c'i ADC y $& ADC +'ab|cd'i ABC y $& AB +'ab|cd'i ABCD y $& AB +'()ef'i DEF y $&-$1 EF- +'*a'i - c - Quantifier follows nothing +'(*)b'i - c - Quantifier follows nothing +'$b'i B n - - +'a\'i - c - Search pattern not terminated +'a\(b'i A(B y $&-$1 A(B- +'a\(*b'i AB y $& AB +'a\(*b'i A((B y $& A((B +'a\\b'i A\B y $& A\B +'abc)'i - c - Unmatched ) +'(abc'i - c - Unmatched ( +'((a))'i ABC y $&-$1-$2 A-A-A +'(a)b(c)'i ABC y $&-$1-$2 ABC-A-C +'a+b+c'i AABBABC y $& ABC +'a{1,}b{1,}c'i AABBABC y $& ABC +'a**'i - c - Nested quantifiers +'a.+?c'i ABCABC y $& ABC +'a.*?c'i ABCABC y $& ABC +'a.{0,5}?c'i ABCABC y $& ABC +'(a+|b)*'i AB y $&-$1 AB-B +'(a+|b){0,}'i AB y $&-$1 AB-B +'(a+|b)+'i AB y $&-$1 AB-B +'(a+|b){1,}'i AB y $&-$1 AB-B +'(a+|b)?'i AB y $&-$1 A-A +'(a+|b){0,1}'i AB y $&-$1 A-A +'(a+|b){0,1}?'i AB y $&-$1 - +')('i - c - Unmatched ) +'[^ab]*'i CDE y $& CDE +'abc'i n - - +'a*'i y $& +'([abc])*d'i ABBBCD y $&-$1 ABBBCD-C +'([abc])*bcd'i ABCD y $&-$1 ABCD-A +'a|b|c|d|e'i E y $& E +'(a|b|c|d|e)f'i EF y $&-$1 EF-E +'abcd*efg'i ABCDEFG y $& ABCDEFG +'ab*'i XABYABBBZ y $& AB +'ab*'i XAYABBBZ y $& A +'(ab|cd)e'i ABCDE y $&-$1 CDE-CD +'[abhgefdc]ij'i HIJ y $& HIJ +'^(ab|cd)e'i ABCDE n x$1y XY +'(abc|)ef'i ABCDEF y $&-$1 EF- +'(a|b)c*d'i ABCD y $&-$1 BCD-B +'(ab|ab*)bc'i ABC y $&-$1 ABC-A +'a([bc]*)c*'i ABC y $&-$1 ABC-BC +'a([bc]*)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D +'a([bc]+)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D +'a([bc]*)(c+d)'i ABCD y $&-$1-$2 ABCD-B-CD +'a[bcd]*dcdcde'i ADCDCDE y $& ADCDCDE +'a[bcd]+dcdcde'i ADCDCDE n - - +'(ab|a)b*c'i ABC y $&-$1 ABC-AB +'((a)(b)c)(d)'i ABCD y $1-$2-$3-$4 ABC-A-B-D +'[a-zA-Z_][a-zA-Z0-9_]*'i ALPHA y $& ALPHA +'^a(bc+|b[eh])g|.h$'i ABH y $&-$1 BH- +'(bc+d$|ef*g.|h?i(j|k))'i EFFGZ y $&-$1-$2 EFFGZ-EFFGZ- +'(bc+d$|ef*g.|h?i(j|k))'i IJ y $&-$1-$2 IJ-IJ-J +'(bc+d$|ef*g.|h?i(j|k))'i EFFG n - - +'(bc+d$|ef*g.|h?i(j|k))'i BCDD n - - +'(bc+d$|ef*g.|h?i(j|k))'i REFFGZ y $&-$1-$2 EFFGZ-EFFGZ- +'((((((((((a))))))))))'i A y $10 A +'((((((((((a))))))))))\10'i AA y $& AA +'((((((((((a))))))))))${bang}'i AA n - - +'((((((((((a))))))))))${bang}'i A! y $& A! +'(((((((((a)))))))))'i A y $& A +'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))'i A y $1 A +'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))'i C y $1 C +'multiple words of text'i UH-UH n - - +'multiple words'i MULTIPLE WORDS, YEAH y $& MULTIPLE WORDS +'(.*)c(.*)'i ABCDE y $&-$1-$2 ABCDE-AB-DE +'\((.*), (.*)\)'i (A, B) y ($2, $1) (B, A) +'[k]'i AB n - - +'abcd'i ABCD y $&-\$&-\\$& ABCD-$&-\ABCD +'a(bc)d'i ABCD y $1-\$1-\\$1 BC-$1-\BC +'a[-]?c'i AC y $& AC +'(abc)\1'i ABCABC y $1 ABC +'([a-c]*)\1'i ABCABC y $1 ABC +a(?!b). abad y $& ad +a(?=d). abad y $& ad +a(?=c|d). abad y $& ad +a(?:b|c|d)(.) ace y $1 e +a(?:b|c|d)*(.) ace y $1 e +a(?:b|c|d)+?(.) ace y $1 e +a(?:b|c|d)+?(.) acdbcdbe y $1 d +a(?:b|c|d)+(.) acdbcdbe y $1 e +a(?:b|c|d){2}(.) acdbcdbe y $1 b +a(?:b|c|d){4,5}(.) acdbcdbe y $1 b +a(?:b|c|d){4,5}?(.) acdbcdbe y $1 d +((foo)|(bar))* foobar y $1-$2-$3 bar-foo-bar +:(?: - c - Sequence (? incomplete +a(?:b|c|d){6,7}(.) acdbcdbe y $1 e +a(?:b|c|d){6,7}?(.) acdbcdbe y $1 e +a(?:b|c|d){5,6}(.) acdbcdbe y $1 e +a(?:b|c|d){5,6}?(.) acdbcdbe y $1 b +a(?:b|c|d){5,7}(.) acdbcdbe y $1 e +a(?:b|c|d){5,7}?(.) acdbcdbe y $1 b +a(?:b|(c|e){1,2}?|d)+?(.) ace y $1$2 ce +^(.+)?B AB y $1 A +^([^a-z])|(\^)$ . y $1 . +^[<>]& <&OUT y $& <& +^(a\1?){4}$ aaaaaaaaaa y $1 aaaa +^(a\1?){4}$ aaaaaaaaa n - - +^(a\1?){4}$ aaaaaaaaaaa n - - +^(a(?(1)\1)){4}$ aaaaaaaaaa y $1 aaaa +^(a(?(1)\1)){4}$ aaaaaaaaa n - - +^(a(?(1)\1)){4}$ aaaaaaaaaaa n - - +((a{4})+) aaaaaaaaa y $1 aaaaaaaa +(((aa){2})+) aaaaaaaaaa y $1 aaaaaaaa +(((a{2}){2})+) aaaaaaaaaa y $1 aaaaaaaa +(?:(f)(o)(o)|(b)(a)(r))* foobar y $1:$2:$3:$4:$5:$6 f:o:o:b:a:r +(?<=a)b ab y $& b +(?<=a)b cb n - - +(?<=a)b b n - - +(?a+)ab aaab n - - +(?>a+)b aaab y - - +([[:]+) a:[b]: yi $1 :[ Java and ICU dont escape [[xyz +([[=]+) a=[b]= yi $1 =[ Java and ICU dont escape [[xyz +([[.]+) a.[b]. yi $1 .[ Java and ICU dont escape [[xyz +[a[:xyz: - c - Unmatched [ +[a[:xyz:] - c - POSIX class [:xyz:] unknown +[a[:]b[:c] abc yi $& abc Java and ICU embedded [ is nested set +([a[:xyz:]b]+) pbaq c - POSIX class [:xyz:] unknown +[a[:]b[:c] abc iy $& abc Java and ICU embedded [ is nested set +([[:alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd +([[:alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy +([[:ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- ${nulnul} +([[:cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul} +([[:digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01 +([[:graph:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd +([[:print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __-- +([[:space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 +([[:word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 ABcd01Xy__ +([[:upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB +([[:xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01 +([[:^alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01 +([[:^alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __-- ${nulnul}${ffff} +([[:^ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${ffff} +([[:^cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:^digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd +([[:^lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB +([[:^print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul}${ffff} +([[:^punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy +([[:^space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- +([[:^word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 -- ${nulnul}${ffff} +([[:^upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd01 +([[:^xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 Xy__-- ${nulnul}${ffff} +[[:foo:]] - c - POSIX class [:foo:] unknown +[[:^foo:]] - c - POSIX class [:^foo:] unknown +((?>a+)b) aaab y $1 aaab +(?>(a+))b aaab y $1 aaa +((?>[^()]+)|\([^()]*\))+ ((abc(ade)ufh()()x y $& abc(ade)ufh()()x +(?<=x+)y - c - Variable length lookbehind not implemented +a{37,17} - c - Can't do {n,m} with n > m +\Z a\nb\n y $-[0] 3 +\z a\nb\n y $-[0] 4 +$ a\nb\n y $-[0] 3 +\Z b\na\n y $-[0] 3 +\z b\na\n y $-[0] 4 +$ b\na\n y $-[0] 3 +\Z b\na y $-[0] 3 +\z b\na y $-[0] 3 +$ b\na y $-[0] 3 +'\Z'm a\nb\n y $-[0] 3 +'\z'm a\nb\n y $-[0] 4 +'$'m a\nb\n y $-[0] 1 +'\Z'm b\na\n y $-[0] 3 +'\z'm b\na\n y $-[0] 4 +'$'m b\na\n y $-[0] 1 +'\Z'm b\na y $-[0] 3 +'\z'm b\na y $-[0] 3 +'$'m b\na y $-[0] 1 +a\Z a\nb\n n - - +a\z a\nb\n n - - +a$ a\nb\n n - - +a\Z b\na\n y $-[0] 2 +a\z b\na\n n - - +a$ b\na\n y $-[0] 2 +a\Z b\na y $-[0] 2 +a\z b\na y $-[0] 2 +a$ b\na y $-[0] 2 +'a\Z'm a\nb\n n - - +'a\z'm a\nb\n n - - +'a$'m a\nb\n y $-[0] 0 +'a\Z'm b\na\n y $-[0] 2 +'a\z'm b\na\n n - - +'a$'m b\na\n y $-[0] 2 +'a\Z'm b\na y $-[0] 2 +'a\z'm b\na y $-[0] 2 +'a$'m b\na y $-[0] 2 +aa\Z aa\nb\n n - - +aa\z aa\nb\n n - - +aa$ aa\nb\n n - - +aa\Z b\naa\n y $-[0] 2 +aa\z b\naa\n n - - +aa$ b\naa\n y $-[0] 2 +aa\Z b\naa y $-[0] 2 +aa\z b\naa y $-[0] 2 +aa$ b\naa y $-[0] 2 +'aa\Z'm aa\nb\n n - - +'aa\z'm aa\nb\n n - - +'aa$'m aa\nb\n y $-[0] 0 +'aa\Z'm b\naa\n y $-[0] 2 +'aa\z'm b\naa\n n - - +'aa$'m b\naa\n y $-[0] 2 +'aa\Z'm b\naa y $-[0] 2 +'aa\z'm b\naa y $-[0] 2 +'aa$'m b\naa y $-[0] 2 +aa\Z ac\nb\n n - - +aa\z ac\nb\n n - - +aa$ ac\nb\n n - - +aa\Z b\nac\n n - - +aa\z b\nac\n n - - +aa$ b\nac\n n - - +aa\Z b\nac n - - +aa\z b\nac n - - +aa$ b\nac n - - +'aa\Z'm ac\nb\n n - - +'aa\z'm ac\nb\n n - - +'aa$'m ac\nb\n n - - +'aa\Z'm b\nac\n n - - +'aa\z'm b\nac\n n - - +'aa$'m b\nac\n n - - +'aa\Z'm b\nac n - - +'aa\z'm b\nac n - - +'aa$'m b\nac n - - +aa\Z ca\nb\n n - - +aa\z ca\nb\n n - - +aa$ ca\nb\n n - - +aa\Z b\nca\n n - - +aa\z b\nca\n n - - +aa$ b\nca\n n - - +aa\Z b\nca n - - +aa\z b\nca n - - +aa$ b\nca n - - +'aa\Z'm ca\nb\n n - - +'aa\z'm ca\nb\n n - - +'aa$'m ca\nb\n n - - +'aa\Z'm b\nca\n n - - +'aa\z'm b\nca\n n - - +'aa$'m b\nca\n n - - +'aa\Z'm b\nca n - - +'aa\z'm b\nca n - - +'aa$'m b\nca n - - +ab\Z ab\nb\n n - - +ab\z ab\nb\n n - - +ab$ ab\nb\n n - - +ab\Z b\nab\n y $-[0] 2 +ab\z b\nab\n n - - +ab$ b\nab\n y $-[0] 2 +ab\Z b\nab y $-[0] 2 +ab\z b\nab y $-[0] 2 +ab$ b\nab y $-[0] 2 +'ab\Z'm ab\nb\n n - - +'ab\z'm ab\nb\n n - - +'ab$'m ab\nb\n y $-[0] 0 +'ab\Z'm b\nab\n y $-[0] 2 +'ab\z'm b\nab\n n - - +'ab$'m b\nab\n y $-[0] 2 +'ab\Z'm b\nab y $-[0] 2 +'ab\z'm b\nab y $-[0] 2 +'ab$'m b\nab y $-[0] 2 +ab\Z ac\nb\n n - - +ab\z ac\nb\n n - - +ab$ ac\nb\n n - - +ab\Z b\nac\n n - - +ab\z b\nac\n n - - +ab$ b\nac\n n - - +ab\Z b\nac n - - +ab\z b\nac n - - +ab$ b\nac n - - +'ab\Z'm ac\nb\n n - - +'ab\z'm ac\nb\n n - - +'ab$'m ac\nb\n n - - +'ab\Z'm b\nac\n n - - +'ab\z'm b\nac\n n - - +'ab$'m b\nac\n n - - +'ab\Z'm b\nac n - - +'ab\z'm b\nac n - - +'ab$'m b\nac n - - +ab\Z ca\nb\n n - - +ab\z ca\nb\n n - - +ab$ ca\nb\n n - - +ab\Z b\nca\n n - - +ab\z b\nca\n n - - +ab$ b\nca\n n - - +ab\Z b\nca n - - +ab\z b\nca n - - +ab$ b\nca n - - +'ab\Z'm ca\nb\n n - - +'ab\z'm ca\nb\n n - - +'ab$'m ca\nb\n n - - +'ab\Z'm b\nca\n n - - +'ab\z'm b\nca\n n - - +'ab$'m b\nca\n n - - +'ab\Z'm b\nca n - - +'ab\z'm b\nca n - - +'ab$'m b\nca n - - +abb\Z abb\nb\n n - - +abb\z abb\nb\n n - - +abb$ abb\nb\n n - - +abb\Z b\nabb\n y $-[0] 2 +abb\z b\nabb\n n - - +abb$ b\nabb\n y $-[0] 2 +abb\Z b\nabb y $-[0] 2 +abb\z b\nabb y $-[0] 2 +abb$ b\nabb y $-[0] 2 +'abb\Z'm abb\nb\n n - - +'abb\z'm abb\nb\n n - - +'abb$'m abb\nb\n y $-[0] 0 +'abb\Z'm b\nabb\n y $-[0] 2 +'abb\z'm b\nabb\n n - - +'abb$'m b\nabb\n y $-[0] 2 +'abb\Z'm b\nabb y $-[0] 2 +'abb\z'm b\nabb y $-[0] 2 +'abb$'m b\nabb y $-[0] 2 +abb\Z ac\nb\n n - - +abb\z ac\nb\n n - - +abb$ ac\nb\n n - - +abb\Z b\nac\n n - - +abb\z b\nac\n n - - +abb$ b\nac\n n - - +abb\Z b\nac n - - +abb\z b\nac n - - +abb$ b\nac n - - +'abb\Z'm ac\nb\n n - - +'abb\z'm ac\nb\n n - - +'abb$'m ac\nb\n n - - +'abb\Z'm b\nac\n n - - +'abb\z'm b\nac\n n - - +'abb$'m b\nac\n n - - +'abb\Z'm b\nac n - - +'abb\z'm b\nac n - - +'abb$'m b\nac n - - +abb\Z ca\nb\n n - - +abb\z ca\nb\n n - - +abb$ ca\nb\n n - - +abb\Z b\nca\n n - - +abb\z b\nca\n n - - +abb$ b\nca\n n - - +abb\Z b\nca n - - +abb\z b\nca n - - +abb$ b\nca n - - +'abb\Z'm ca\nb\n n - - +'abb\z'm ca\nb\n n - - +'abb$'m ca\nb\n n - - +'abb\Z'm b\nca\n n - - +'abb\z'm b\nca\n n - - +'abb$'m b\nca\n n - - +'abb\Z'm b\nca n - - +'abb\z'm b\nca n - - +'abb$'m b\nca n - - +(^|x)(c) ca y $2 c +a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz x n - - +a(?{$a=2;$b=3;($b)=$a})b yabz y $b 2 +round\(((?>[^()]+))\) _I(round(xs * sz),1) y $1 xs * sz +'((?x:.) )' x y $1- x - +'((?-x:.) )'x x y $1- x- +foo.bart foo.bart y - - +'^d[x][x][x]'m abcd\ndxxx y - - +.X(.+)+X bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - # TODO: ICU doesn't optimize on trailing literals in pattern. +.X(.+)+XX bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.XX(.+)+X bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+X bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+XX bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.XX(.+)+X bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.XX(.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.X(.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.X(.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.XX(.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X](.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X](.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X][X](.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - +.[X](.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X](.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +.[X][X](.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - - +tt+$ xxxtt y - - +([a-\d]+) za-9z yi $1 a-9 +([\d-z]+) a0-za y $1 0-z +([\d-\s]+) a0- z y $1 0- +([a-[:digit:]]+) za-9z y $1 a-9 +([[:digit:]-z]+) =0-z= y $1 0-z +([[:digit:]-[:alpha:]]+) =0-z= iy $1 0-z Set difference in ICU +\GX.*X aaaXbX n - - +(\d+\.\d+) 3.1415926 y $1 3.1415926 +(\ba.{0,10}br) have a web browser y $1 a web br +'\.c(pp|xx|c)?$'i Changes n - - +'\.c(pp|xx|c)?$'i IO.c y - - +'(\.c(pp|xx|c)?$)'i IO.c y $1 .c +^([a-z]:) C:/ n - - +'^\S\s+aa$'m \nx aa y - - +(^|a)b ab y - - +^([ab]*?)(b)?(c)$ abac y -$2- -- +(\w)?(abc)\1b abcab n - - +^(?:.,){2}c a,b,c y - - +^(.,){2}c a,b,c y $1 b, +^(?:[^,]*,){2}c a,b,c y - - +^([^,]*,){2}c a,b,c y $1 b, +^([^,]*,){3}d aaa,b,c,d y $1 c, +^([^,]*,){3,}d aaa,b,c,d y $1 c, +^([^,]*,){0,3}d aaa,b,c,d y $1 c, +^([^,]{1,3},){3}d aaa,b,c,d y $1 c, +^([^,]{1,3},){3,}d aaa,b,c,d y $1 c, +^([^,]{1,3},){0,3}d aaa,b,c,d y $1 c, +^([^,]{1,},){3}d aaa,b,c,d y $1 c, +^([^,]{1,},){3,}d aaa,b,c,d y $1 c, +^([^,]{1,},){0,3}d aaa,b,c,d y $1 c, +^([^,]{0,3},){3}d aaa,b,c,d y $1 c, +^([^,]{0,3},){3,}d aaa,b,c,d y $1 c, +^([^,]{0,3},){0,3}d aaa,b,c,d y $1 c, +(?i) y - - +'(?!\A)x'm a\nxb\n y - - +^(a(b)?)+$ aba yi -$1-$2- -a-- Java disagrees. Not clear who is right. +'^.{9}abc.*\n'm 123\nabcabcabcabc\n y - - +^(a)?a$ a y -$1- -- +^(a)?(?(1)a|b)+$ a n - - +^(a\1?)(a\1?)(a\2?)(a\3?)$ aaaaaa y $1,$2,$3,$4 a,aa,a,aa +^(a\1?){4}$ aaaaaa y $1 aa +^(0+)?(?:x(1))? x1 y - - +^([0-9a-fA-F]+)(?:x([0-9a-fA-F]+)?)(?:x([0-9a-fA-F]+))? 012cxx0190 y - - +^(b+?|a){1,2}c bbbac y $1 a +^(b+?|a){1,2}c bbbbac y $1 a +\((\w\. \w+)\) cd. (A. Tw) y -$1- -A. Tw- +((?:aaaa|bbbb)cccc)? aaaacccc y - - +((?:aaaa|bbbb)cccc)? bbbbcccc y - - +(a)?(a)+ a y $1:$2 :a - +(ab)?(ab)+ ab y $1:$2 :ab - +(abc)?(abc)+ abc y $1:$2 :abc - +'b\s^'m a\nb\n n - - +\ba a y - - +^(a(??{"(?!)"})|(a)(?{1}))b ab yi $2 a # [ID 20010811.006] +ab(?i)cd AbCd n - - # [ID 20010809.023] +ab(?i)cd abCd y - - +(A|B)*(?(1)(CD)|(CD)) CD y $2-$3 -CD +(A|B)*(?(1)(CD)|(CD)) ABCD y $2-$3 CD- +(A|B)*?(?(1)(CD)|(CD)) CD y $2-$3 -CD # [ID 20010803.016] +(A|B)*?(?(1)(CD)|(CD)) ABCD y $2-$3 CD- +'^(o)(?!.*\1)'i Oo n - - +(.*)\d+\1 abc12bc y $1 bc +(?m:(foo\s*$)) foo\n bar y $1 foo +(.*)c abcd y $1 ab +(.*)(?=c) abcd y $1 ab +(.*)(?=c)c abcd yB $1 ab +(.*)(?=b|c) abcd y $1 ab +(.*)(?=b|c)c abcd y $1 ab +(.*)(?=c|b) abcd y $1 ab +(.*)(?=c|b)c abcd y $1 ab +(.*)(?=[bc]) abcd y $1 ab +(.*)(?=[bc])c abcd yB $1 ab +(.*)(?<=b) abcd y $1 ab +(.*)(?<=b)c abcd y $1 ab +(.*)(?<=b|c) abcd y $1 abc +(.*)(?<=b|c)c abcd y $1 ab +(.*)(?<=c|b) abcd y $1 abc +(.*)(?<=c|b)c abcd y $1 ab +(.*)(?<=[bc]) abcd y $1 abc +(.*)(?<=[bc])c abcd y $1 ab +(.*?)c abcd y $1 ab +(.*?)(?=c) abcd y $1 ab +(.*?)(?=c)c abcd yB $1 ab +(.*?)(?=b|c) abcd y $1 a +(.*?)(?=b|c)c abcd y $1 ab +(.*?)(?=c|b) abcd y $1 a +(.*?)(?=c|b)c abcd y $1 ab +(.*?)(?=[bc]) abcd y $1 a +(.*?)(?=[bc])c abcd yB $1 ab +(.*?)(?<=b) abcd y $1 ab +(.*?)(?<=b)c abcd y $1 ab +(.*?)(?<=b|c) abcd y $1 ab +(.*?)(?<=b|c)c abcd y $1 ab +(.*?)(?<=c|b) abcd y $1 ab +(.*?)(?<=c|b)c abcd y $1 ab +(.*?)(?<=[bc]) abcd y $1 ab +(.*?)(?<=[bc])c abcd y $1 ab +2(]*)?$\1 2 y $& 2 +(??{}) x yi - - diff --git a/go/mysql/icuregex/testdata/regextst.txt b/go/mysql/icuregex/testdata/regextst.txt new file mode 100644 index 00000000000..8d5d2c34a8e --- /dev/null +++ b/go/mysql/icuregex/testdata/regextst.txt @@ -0,0 +1,2793 @@ +# Copyright (C) 2016 and later: Unicode, Inc. and others. +# License & terms of use: http://www.unicode.org/copyright.html +# Copyright (c) 2001-2015 International Business Machines +# Corporation and others. All Rights Reserved. +# +# file: +# +# ICU regular expression test cases. +# +# format: one test case per line, +# = [# comment] +# = "" +# = "" +# the quotes on the pattern and match string can be " or ' or / +# = text, with the start and end of each +# capture group tagged with .... The overall match, +# if any, is group 0, as in <0>matched text +# A region can be specified with ... tags. +# Standard ICU unescape will be applied, allowing \u, \U, etc. to appear. +# +# = any combination of +# i case insensitive match +# x free spacing and comments +# s dot-matches-all mode +# m multi-line mode. +# ($ and ^ match at embedded new-lines) +# D Unix Lines mode (only recognize 0x0a as new-line) +# Q UREGEX_LITERAL flag. Entire pattern is literal string. +# v If icu configured without break iteration, this +# regex test pattern should not compile. +# e set the UREGEX_ERROR_ON_UNKNOWN_ESCAPES flag +# d dump the compiled pattern +# t trace operation of match engine. +# 2-9 a digit between 2 and 9, specifies the number of +# times to execute find(). The expected results are +# for the last find() in the sequence. +# G Only check match / no match. Do not check capture groups. +# E Pattern compilation error expected +# L Use LookingAt() rather than find() +# M Use matches() rather than find(). +# +# a Use non-Anchoring Bounds. +# b Use Transparent Bounds. +# The a and b options only make a difference if +# a region has been specified in the string. +# z|Z hitEnd was expected(z) or not expected (Z). +# With neither, hitEnd is not checked. +# y|Y Require End expected(y) or not expected (Y). +# +# White space must be present between the flags and the match string. +# + +# Look-ahead expressions +# +"(?!0{5})(\d{5})" "<0><1>00001zzzz" +"(?!0{5})(\d{5})z" "<0><1>00001zzzz" +"(?!0{5})(\d{5})(?!y)" "<0><1>00001zzzz" +"abc(?=def)" "<0>abcdef" +"(.*)(?=c)" "<0><1>abcdef" + +"(?:.*)(?=c)" "abcdef" +"(?:.*)(?=c)" b "<0>abcdef" # transparent bounds +"(?:.*)(?=c)" bM "<0>abcdef" # transparent bounds + +"(?:.*)(?=(c))" b "<0>ab<1>cdef" # Capture in look-ahead +"(?=(.)\1\1)\1" "abcc<0><1>dddefg" # Backrefs to look-ahead capture + +".(?!\p{L})" "abc<0>d " # Negated look-ahead +".(?!(\p{L}))" "abc<0>d " # Negated look-ahead, no capture + # visible outside of look-ahead +"and(?=roid)" L "<0>android" +"and(?=roid)" M "android" +"and(?=roid)" bM "<0>android" + +"and(?!roid)" L "<0>androix" +"and(?!roid)" L "android" + +"and(?!roid)" M "<0>android" # Opaque bounds +"and(?!roid)" bM "android" +"and(?!roid)" bM "<0>androix" + +# +# Negated Lookahead, various regions and region transparency +# +"abc(?!def)" "<0>abcxyz" +"abc(?!def)" "abcdef" +"abc(?!def)" "<0>abcdef" +"abc(?!def)" b "abcdef" +"abc(?!def)" b "<0>abcxyz" + +# +# Nested Lookahead / Behind +# +"one(?=(?:(?!).)*)" "<0>one stuff" +"one(?=(?:(?!).)*)" "one " + +# More nesting lookaround: pattern matches "qq" when not preceded by 'a' and followed by 'z' +"(?qqc" +"(?qqc" +"(?A<0>jk<2>B" +"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "ajkB" +"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "Ajkb" + +# Nested lookaround cases from bug ICU-20564 +"(?<=(?<=((?=)){0}+))" "<0>abc" +"(?<=c(?<=c((?=c)){1}+))" "c<0><1>cc" + +# +# Anchoring Bounds +# +"^def$" "abc<0>defghi" # anchoring (default) bounds +"^def$" a "abcdefghi" # non-anchoring bounds +"^def" a "<0>defghi" # non-anchoring bounds +"def$" a "abc<0>def" # non-anchoring bounds + +"^.*$" m "<0>line 1\n line 2" +"^.*$" m2 "line 1\n<0> line 2" +"^.*$" m3 "line 1\n line 2" +"^.*$" m "li<0>ne 1\n line 2" # anchoring bounds +"^.*$" m2 "line 1\n line 2" # anchoring bounds +"^.*$" am "line 1\n line 2" # non-anchoring bounds +"^.*$" am "li\n<0>ne \n1\n line 2" # non-anchoring bounds + +# +# HitEnd and RequireEnd for new-lines just before end-of-input +# +"xyz$" yz "<0>xyz\n" +"xyz$" yz "<0>xyz\x{d}\x{a}" + +"xyz$" myz "<0>xyz" # multi-line mode +"xyz$" mYZ "<0>xyz\n" +"xyz$" mYZ "<0>xyz\r\n" +"xyz$" mYZ "<0>xyz\x{85}abcd" + +"xyz$" Yz "xyz\nx" +"xyz$" Yz "xyza" +"xyz$" yz "<0>xyz" + +# +# HitEnd +# +"abcd" Lz "a" +"abcd" Lz "ab" +"abcd" Lz "abc" +"abcd" LZ "<0>abcd" +"abcd" LZ "<0>abcde" +"abcd" LZ "abcx" +"abcd" LZ "abx" +"abcd" Lzi "a" +"abcd" Lzi "ab" +"abcd" Lzi "abc" +"abcd" LZi "<0>abcd" +"abcd" LZi "<0>abcde" +"abcd" LZi "abcx" +"abcd" LZi "abx" + +# +# All Unicode line endings recognized. +# 0a, 0b, 0c, 0d, 0x85, 0x2028, 0x2029 +# Multi-line and non-multiline mode take different paths, so repeated tests. +# +"^def$" mYZ "abc\x{a}<0>def\x{a}ghi" +"^def$" mYZ "abc\x{b}<0>def\x{b}ghi" +"^def$" mYZ "abc\x{c}<0>def\x{c}ghi" +"^def$" mYZ "abc\x{d}<0>def\x{d}ghi" +"^def$" mYZ "abc\x{85}<0>def\x{85}ghi" +"^def$" mYZ "abc\x{2028}<0>def\x{2028}ghi" +"^def$" mYZ "abc\x{2029}<0>def\x{2029}ghi" +"^def$" mYZ "abc\r\n<0>def\r\nghi" + +"^def$" yz "<0>def\x{a}" +"^def$" yz "<0>def\x{b}" +"^def$" yz "<0>def\x{c}" +"^def$" yz "<0>def\x{d}" +"^def$" yz "<0>def\x{85}" +"^def$" yz "<0>def\x{2028}" +"^def$" yz "<0>def\x{2029}" +"^def$" yz "<0>def\r\n" +"^def$" yz "<0>def" + + +# "^def$" "<0>def\x{2028" #TODO: should be an error of some sort. + +# +# UNIX_LINES mode +# +"abc$" D "<0>abc\n" +"abc$" D "abc\r" +"abc$" D "abc\u0085" +"a.b" D "<0>a\rb" +"a.b" D "a\nb" +"(?d)abc$" "<0>abc\n" +"(?d)abc$" "abc\r" +"abc$" mD "<0>abc\ndef" +"abc$" mD "abc\rdef" + +".*def" L "abc\r def xyz" # Normal mode, LookingAt() stops at \r +".*def" DL "<0>abc\r def xyz" # Unix Lines mode, \r not line end. +".*def" DL "abc\n def xyz" + +"(?d)a.b" "a\nb" +"(?d)a.b" "<0>a\rb" + +"^abc" m "xyz\r<0>abc" +"^abc" Dm "xyz\rabc" +"^abc" Dm "xyz\n<0>abc" + + + +# Capturing parens +".(..)." "<0>a<1>bcd" + ".*\A( +hello)" "<0><1> hello" +"(hello)|(goodbye)" "<0><1>hello" +"(hello)|(goodbye)" "<0><2>goodbye" +"abc( +( inner(X?) +) xyz)" "leading cruft <0>abc<1> <2> inner<3> xyz cruft" +"\s*([ixsmdt]*)([:letter:]*)" "<0> <1>d<2> " +"(a|b)c*d" "a<0><1>bcd" + +# Non-capturing parens (?: stuff). Groups, but does not capture. +"(?:abc)*(tail)" "<0>abcabcabc<1>tail" + +# Non-greedy *? quantifier +".*?(abc)" "<0> abx <1>abc abc abc abc" +".*(abc)" "<0> abx abc abc abc <1>abc" + +"((?:abc |xyz )*?)abc " "<0><1>xyz abc abc abc " +"((?:abc |xyz )*)abc " "<0><1>xyz abc abc abc " + +# Non-greedy +? quantifier +"(a+?)(a*)" "<0><1>a<2>aaaaaaaaaaaa" +"(a+)(a*)" "<0><1>aaaaaaaaaaaaa<2>" + +"((ab)+?)((ab)*)" "<0><1><2>ab<3>ababababab<4>ab" +"((ab)+)((ab)*)" "<0><1>abababababab<2>ab<3>" + +# Non-greedy ?? quantifier +"(ab)(ab)??(ab)??(ab)??(ab)??c" "<0><1>ab<4>ab<5>abc" + +# Unicode Properties as naked elements in a pattern +"\p{Lu}+" "here we go ... <0>ABC and no more." +"(\p{L}+)(\P{L}*?) (\p{Zs}*)" "7999<0><1>letters<2>4949%^&*( <3> " + +# \w and \W +"\w+" " $%^&*( <0>hello123%^&*(" +"\W+" "<0> $%^&*( hello123%^&*(" + +# \A match at beginning of input only. + ".*\Ahello" "<0>hello hello" + ".*hello" "<0>hello hello" +".*\Ahello" "stuff\nhello" # don't match after embedded new-line. + +# \b \B +# +".*?\b(.).*" "<0> $%^&*( <1>hello123%^&*()gxx" +"\ba\b" "-<0>a" +"\by\b" "xy" +"[ \b]" "<0>b" # in a set, \b is a literal b. + +# Finds first chars of up to 5 words +"(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?" "<0><1>Tthe <2>qick <3>brown <4>fox" + +"H.*?((?:\B.)+)" "<0>H<1>ello " +".*?((?:\B.)+).*?((?:\B.)+).*?((?:\B.)+)" "<0>H<1>ello <2> g<3>oodbye " + +"(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?.*" "<0> \u0301 \u0301<1>A\u0302BC\u0303\u0304<2> \u0305 \u0306<3>X\u0307Y\u0308" + + +# +# Unicode word boundary mode +# +"(?w).*?\b" v "<0>hello, world" +"(?w).*?(\b.+?\b).*" v "<0><1> 123.45 " +"(?w).*?(\b\d.*?\b).*" v "<0> <1>123.45 " +".*?(\b.+?\b).*" "<0> <1>123.45 " +"(?w:.*?(\b\d.*?\b).*)" v "<0> <1>123.45 " +"(?w:.*?(\b.+?\b).*)" v "<0><1>don't " +"(?w:.+?(\b\S.+?\b).*)" v "<0> <1>don't " +"(?w:(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?).*)" v "<0><1>.<2> <3>,<4>:<5>$<6>37,000.50<7> " + +# +# Unicode word boundaries with Regions +# +"(?w).*?\b" v "abc<0>defghi" +"(?w).*?\b" v2 "abcdef<0>ghi" +"(?w).*?\b" v3 "abcdefghi" +#"(?w).*?\b" vb "abc<0>defghi" # TODO: bug. Ticket 6073 +#"(?w).*?\b" vb2 "abcdefghi" + + + +# . does not match new-lines +"." "\u000a\u000d\u0085\u000c\u000b\u2028\u2029<0>X\u000aY" +"A." "A\u000a "# no match + +# \d for decimal digits +"\d*" "<0>0123456789\u0660\u06F9\u0969\u0A66\u17E2\uFF10\U0001D7CE\U0001D7FFnon-digits" +"\D+" "<0>non digits" +"\D*(\d*)(\D*)" "<0>non-digits<1>3456666<2>more non digits" + +# \Q...\E quote mode +"hel\Qlo, worl\Ed" "<0>hello, world" +"\Q$*^^(*)?\A\E(a*)" "<0>$*^^(*)?\\A<1>aaaaaaaaaaaaaaa" +"[abc\Q]\r\E]+" "<0>aaaccc]]]\\\\\\\r..." # \Q ... \E escape in a [set] + +# UREGEX_LITERAL - entire pattern is a literal string, no escapes recognized. +# Note that data strings in test cases still get escape processing. +"abc\an\r\E\\abcd\u0031bye" Q "lead<0>abc\\an\\r\\E\\\\abcd\\u0031byeextra" +"case insensitive \\ (l)iteral" Qi "stuff!! <0>cAsE InSenSiTiVE \\\\ (L)ITeral" + +# \S and \s space characters +"\s+" "not_space<0> \t \r \n \u3000 \u2004 \u2028 \u2029xyz" +"(\S+).*?(\S+).*" "<0><1>Not-spaces <2>more-non-spaces " + +# \X consume one Grapheme Cluster. +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A<2>B<3> <4>\r\n" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A\u0301<2>\n<3>\u0305<4>a\u0302\u0303\u0304" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1161\u11a8<2>\u115f\u11a2\u11f9" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\uac01<2>\uac02<3>\uac03\u11b0" +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1101\uac02\u0301<2>\u1100" +# Regional indicator pairs are grapheme clusters +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U0001f1e6\U0001f1e8<2>\U0001f1ea\U0001f1ff" +# Grapheme Break rule 9b: Prepend x +"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U000111C2x" + +# Grapheme clusters that straddle a match region. Matching is pinned to the region limits, +# giving boundaries inside grapheme clusters +"(\X)?(\X)?(\X)?" v "a\u0301<0><1>\u0301\u0301<2>z\u0302\u0302\u0302" +# Same as previous test case, but without the region limits. +"(\X)?(\X)?(\X)?" v "<0><1>a\u0301\u0301\u0301<2>z\u0302\u0302\u0302" + +# ^ matches only at beginning of line +".*^(Hello)" "<0><1>Hello Hello Hello Hello Goodbye" +".*(Hello)" "<0>Hello Hello Hello <1>Hello Goodbye" +".*^(Hello)" " Hello Hello Hello Hello Goodbye"# No Match + +# $ matches only at end of line, or before a newline preceding the end of line +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)" ZY "<0>Hello <1>Goodbye Goodbye Goodbye" +".*?(Goodbye)$" z "Hello Goodbye> Goodbye Goodbye "# No Match + +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye\r\n" +".*?(Goodbye)$" z "Hello Goodbye Goodbye Goodbye\n\n"# No Match + +# \Z matches at end of input, like $ with default flags. +".*?(Goodbye)\Z" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)" ZY "<0>Hello <1>Goodbye Goodbye Goodbye" +".*?(Goodbye)\Z" z "Hello Goodbye> Goodbye Goodbye "# No Match +"here$" z "here\nthe end"# No Match + +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\n" +".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye\r\n" +".*?(Goodbye)\Z" "Hello Goodbye Goodbye Goodbye\n\n"# No Match + +# \z matches only at the end of string. +# no special treatment of new lines. +# no dependencies on flag settings. +".*?(Goodbye)\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye" +".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye "# No Match +"here$" z "here\nthe end"# No Match + +".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye\n"# No Match +".*?(Goodbye)\n\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye\n" +"abc\z|def" ZY "abc<0>def" + +# (?# comment) doesn't muck up pattern +"Hello (?# this is a comment) world" " <0>Hello world..." + +# Check some implementation corner cases base on the way literal strings are compiled. +"A" "<0>A" +"AB" "<0>ABABABAB" +"AB+" "<0>ABBBA" +"AB+" "<0>ABABAB" +"ABC+" "<0>ABCABC" +"ABC+" "<0>ABCCCCABC" +"(?:ABC)+" "<0>ABCABCABCD" +"(?:ABC)DEF+" "<0>ABCDEFFFD" +"AB\.C\eD\u0666E" "<0>AB.C\u001BD\u0666EF" +"ab\Bde" "<0>abde" + +# loop breaking +"(a?)*" "<0><1>xyz" +"(a?)+" "<0><1>xyz" +"^(?:a?b?)*$" "a--" +"(x?)*xyz" "<0>xx<1>xyz" # Sligthtly weird, but correct. The "last" time through (x?), + # it matches the empty string. + +# Set expressions, basic operators and escapes work +# +"[\d]+" "<0>0123abc/.," +"[^\d]+" "0123<0>abc/.," +"[\D]+" "0123<0>abc/.," +"[^\D]+" "<0>0123abc/.," + +"[\s]+" "<0> \tabc/.," +"[^\s]+" " \t<0>abc/.," +"[\S]+" " \t<0>abc/.," +"[^\S]+" "<0> \tabc/.," + +"[\w]+" "<0>abc123 .,;" +"[^\w]+" "abc123<0> .,;" +"[\W]+" "abc123<0> .,;" +"[^\W]+" "<0>abc123 .,;" + +"[\z]+" "abc<0>zzzdef" # \z has no special meaning +"[^\z]+" "<0>abczzzdef" +"[\^]+" "abc<0>^^" +"[^\^]+" "<0>abc^^" + +"[\u0041c]+" "<0>AcAcdef" +"[\U00010002]+" "<0>\ud800\udc02\U00010003" +"[^\U00010002]+" "<0>Hello\x{10002}" +"[\x61b]+" "<0>ababcde" +#"[\x6z]+" "\x06" #TODO: single hex digits should fail +"[\x{9}\x{75}\x{6d6}\x{6ba6}\x{6146B}\x{10ffe3}]+" "<0>\u0009\u0075\u06d6\u6ba6\U0006146B\U0010ffe3abc" + +"[\N{LATIN CAPITAL LETTER TONE SIX}ab\N{VARIATION SELECTOR-70} ]+" "x<0> \u0184\U000E0135 abc" +"[\N{LATIN SMALL LETTER C}-\N{LATIN SMALL LETTER F}]+" "ab<0>cdefghi" + + + +# +# [set expressions], check the precedence of '-', '&', '--', '&&' +# '-' and '&', for compatibility with ICU UnicodeSet, have the same +# precedence as the implicit Union between adjacent items. +# '--' and '&&', for compatibility with Java, have lower precedence than +# the implicit Union operations. '--' and '&&' themselves +# have the same precedence, and group left to right. +# +"[[a-m]-[f-w]p]+" "<0>depfgwxyz" +"[^[a-m]-[f-w]p]+" "dep<0>fgwxyz" + +"[[a-m]--[f-w]p]+" "<0>depfgwxyz" +"[^[a-m]--[f-w]p]+" "de<0>pfgwxyz" + +"[[a-m]&[e-s]w]+" "<0>efmwadnst" +"[^[a-m]&[e-s]w]+" "efmw<0>adnst" + +"[[a-m]&[e-s]]+" "<0>efmadnst" + + + +# {min,max} iteration qualifier +"A{3}BC" "<0>AAABC" + +"(ABC){2,3}AB" "no matchAB" +"(ABC){2,3}AB" "ABCAB" +"(ABC){2,3}AB" "<0>ABC<1>ABCAB" +"(ABC){2,3}AB" "<0>ABCABC<1>ABCAB" +"(ABC){2,3}AB" "<0>ABCABC<1>ABCABCAB" + +"(ABC){2}AB" "ABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCABCAB" +"(ABC){2}AB" "<0>ABC<1>ABCABCABCAB" + +"(ABC){2,}AB" "ABCAB" +"(ABC){2,}AB" "<0>ABC<1>ABCAB" +"(ABC){2,}AB" "<0>ABCABC<1>ABCAB" +"(ABC){2,}AB" "<0>ABCABCABC<1>ABCAB" + +"X{0,0}ABC" "<0>ABC" +"X{0,1}ABC" "<0>ABC" + +"(?:Hello(!{1,3}) there){1}" "Hello there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>! there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!! there" +"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!!! there" +"(?:Hello(!{1,3}) there){1}" "Hello!!!! there" + +# Nongreedy {min,max}? intervals +"(ABC){2,3}?AB" "no matchAB" +"(ABC){2,3}?AB" "ABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCABCAB" +"(ABC){2,3}?AB" "<0>ABC<1>ABCABCABCAB" +"(ABC){2,3}?AX" "<0>ABCABC<1>ABCAX" +"(ABC){2,3}?AX" "ABC<0>ABCABC<1>ABCAX" + +# Possessive {min,max}+ intervals +"(ABC){2,3}+ABC" "ABCABCABC" +"(ABC){1,2}+ABC" "<0>ABC<1>ABCABC" +"(?:(.)\1){2,5}+." "<0>aabbcc<1>ddex" + + +# Atomic Grouping +"(?>.*)abc" "abcabcabc" # no match. .* consumed entire string. +"(?>(abc{2,4}?))(c*)" "<0><1>abcc<2>cccddd" +"(\.\d\d(?>[1-9]?))\d+" "1.625" +"(\.\d\d(?>[1-9]?))\d+" "1<0><1>.6250" + +# Possessive *+ +"(abc)*+a" "abcabcabc" +"(abc)*+a" "<0>abc<1>abcab" +"(a*b)*+a" "<0><1>aaaabaaaa" + +# Possessive ?+ +"c?+ddd" "<0>cddd" +"c?+cddd" "cddd" +"c?cddd" "<0>cddd" + +# Back Reference +"(?:ab(..)cd\1)*" "<0>ab23cd23ab<1>wwcdwwabxxcdyy" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>c" +"ab(?:c|(d?))(\1)" "<0>ab<1>d<2>d" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>e" +"ab(?:c|(d?))(\1)" "<0>ab<1><2>" + +# Back References that hit/don't hit end +"(abcd) \1" z "abcd abc" +"(abcd) \1" Z "<0><1>abcd abcd" +"(abcd) \1" Z "<0><1>abcd abcd " + +# Case Insensitive back references that hit/don't hit end. +"(abcd) \1" zi "abcd abc" +"(abcd) \1" Zi "<0><1>abcd ABCD" +"(abcd) \1" Zi "<0><1>abcd ABCD " + +# Back references that hit/don't hit boundary limits. + +"(abcd) \1" z "abcd abcd " +"(abcd) \1" Z "<0><1>abcd abcd " +"(abcd) \1" Z "<0><1>abcd abcd " + +"(abcd) \1" zi "abcd abcd " +"(abcd) \1" Zi "<0><1>abcd abcd " +"(abcd) \1" Zi "<0><1>abcd abcd " + +# Back reference that fails match near the end of input without actually hitting the end. +"(abcd) \1" ZL "abcd abd" +"(abcd) \1" ZLi "abcd abd" + +# Back reference to a zero-length match. They are always a successful match. +"ab(x?)cd(\1)ef" "<0>ab<1>cd<2>ef" +"ab(x?)cd(\1)ef" i "<0>ab<1>cd<2>ef" + +# Back refs to capture groups that didn't participate in the match. +"ab(?:(c)|(d))\1" "abde" +"ab(?:(c)|(d))\1" "<0>ab<1>cce" +"ab(?:(c)|(d))\1" i "abde" +"ab(?:(c)|(d))\1" i "<0>ab<1>cce" + +# Named back references +"(?abcd)\k" "<0><1>abcdabcd" +"(no)?(?abcd)\k" "<0><2>abcdabcd" + +"(?...)" E " " # backref names are ascii letters & numbers only" +"(?<1a>...)" E " " # backref names must begin with a letter" +"(?.)(?.)" E " " # Repeated names are illegal. + + +# Case Insensitive +"aBc" i "<0>ABC" +"a[^bc]d" i "ABD" +'((((((((((a))))))))))\10' i "<0><1><2><3><4><5><6><7><8><9><10>AA" + +"(?:(?i)a)b" "<0>Ab" +"ab(?i)cd" "<0>abCd" +"ab$cd" "abcd" + +"ssl" i "abc<0>ßlxyz" +"ssl" i "abc<0>ẞlxyz" +"FIND" i "can <0>find ?" # fi ligature, \ufb01 +"find" i "can <0>FIND ?" +"ῧ" i "xxx<0>ῧxxx" # Composed char (match string) decomposes when case-folded (pattern) + +# White space handling +"a b" "ab" +"abc " "abc" +"abc " "<0>abc " +"ab[cd e]z" "<0>ab z" +"ab\ c" "<0>ab c " +"ab c" "<0>ab c " +"ab c" x "ab c " +"ab\ c" x "<0>ab c " + +# +# Pattern Flags +# +"(?u)abc" "<0>abc" +"(?-u)abc" "<0>abc" + +# +# \c escapes (Control-whatever) +# +"\cA" "<0>\u0001" +"\ca" "<0>\u0001" +"\c\x" "<0>\u001cx" + + +#Multi-line mode +'b\s^' m "a\nb\n" +"(?m)^abc$" "abc \n abc\n<0>abc\nabc" +"(?m)^abc$" 2 "abc \n abc\nabc\n<0>abc" +"^abc$" 2 "abc \n abc\nabc\nabc" + +# Empty and full range +"[\u0000-\U0010ffff]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz" +"[^\u0000-\U0010ffff]" "abc\u0000\uffff\U00010000\U0010ffffzz" +"[^a--a]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz" + +# Free-spacing mode +"a b c # this is a comment" x "<0>abc " +'^a (?#xxx) (?#yyy) {3}c' x "<0>aaac" +"a b c [x y z]" x "abc " +"a b c [x y z]" x "a b c " +"a b c [x y z]" x "<0>abcxyz" +"a b c [x y z]" x "<0>abcyyz" + +# +# Look Behind +# +"(?<=a)b" "a<0>b" +"(.*)(?<=[bc])" "<0><1>abcd" +"(?<=(abc))def" "<1>abc<0>def" # lookbehind precedes main match. +"(?<=ab|abc)xyz" "abwxyz" # ab matches, but not far enough. +"(?<=abc)cde" "abcde" +"(?<=abc|ab)cde" "ab<0>cde" +"(?<=abc|ab)cde" "abc<0>cde" + +"(?<=bc?c?c?)cd" "ab<0>cd" +"(?<=bc?c?c?)cd" "abc<0>cd" +"(?<=bc?c?c?)cd" "abcc<0>cd" +"(?<=bc?c?c?)cd" "abccc<0>cd" +"(?<=bc?c?c?)cd" "abcccccd" +"(?<=bc?c?c?)c+d" "ab<0>cccccd" + +".*(?<=: ?)(\w*)" "<0>1:one 2: two 3:<1>three " + +# +# Named Characters +# +"a\N{LATIN SMALL LETTER B}c" "<0>abc" +"a\N{LATIN SMALL LETTER B}c" i "<0>abc" +"a\N{LATIN SMALL LETTER B}c" i "<0>aBc" +"a\N{LATIN SMALL LETTER B}c" "aBc" + +"\N{FULL STOP}*" "<0>...abc" + +"$" "abc<0>" + +# +# Optimizations of .* at end of patterns +# +"abc.*" "<0>abcdef" +"abc.*$" "<0>abcdef" +"abc(.*)" "<0>abc<1>def" +"abc(.*)" "<0>abc<1>" +"abc.*" "<0>abc\ndef" +"abc.*" s "<0>abc\ndef" +"abc.*$" s "<0>abc\ndef" +"abc.*$" "abc\ndef" +"abc.*$" m "<0>abc\ndef" +"abc.*\Z" m "abc\ndef" +"abc.*\Z" sm "<0>abc\ndef" + +"abc*" "<0>abcccd" +"abc*$" "<0>abccc" +"ab(?:ab[xyz]\s)*" "<0>ababy abx abc" + +"(?:(abc)|a)(?:bc)+" "<0>abc" +"(?:(abc)|a)(?:bc)*" "<0><1>abc" +"^[+\-]?[0-9]*\.?[0-9]*" "<0>123.456" + +"ab.+yz" "<0>abc12345xyzttt" +"ab.+yz" s "<0>abc12345xyzttt" + +"ab.+yz" "abc123\n45xyzttt" +"ab.+yz" s "<0>abc12\n345xyzttt" + +"ab[0-9]+yz" "---abyz+++" +"ab[0-9]+yz" "---<0>ab1yz+++" +"ab[0-9]+yz" "---<0>ab12yz+++" +"ab[0-9]+yz" "---<0>ab123456yz+++" + +"ab([0-9]+|[A-Z]+)yz" "---abyz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>1yz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>12yz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>Ayz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>AByz+++" +"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>ABCDEyz+++" + +# +# Hex format \x escaping +# +"ab\x63" "<0>abc" +"ab\x09w" "<0>ab\u0009w" +"ab\xabcdc" "<0>ab\u00abcdc" +"ab\x{abcd}c" "<0>ab\uabcdc" +"ab\x{101234}c" "<0>ab\U00101234c" +"abα" "<0>abα" + +# +# Octal Escaping. This conforms to Java conventions, not Perl. +"\0101\00\03\073\0154\01442" "<0>A\u0000\u0003\u003b\u006c\u0064\u0032" +"\0776" "<0>\u003f\u0036" # overflow, the 6 is literal. +"\0376xyz" "<0>\u00fexyz" +"\08" E "<0>\u00008" +"\0" E "x" + +# +# \u Surrogate Pairs +# +"\ud800\udc00" "<0>\U00010000" +"\ud800\udc00*" "<0>\U00010000\U00010000\U00010000\U00010001" +# TODO (Vitess): The next case has invalid UTF-8, so it's not supported right now for testing. It likely works in practice though! +# "\ud800\ud800\udc00" "<0>\ud800\U00010000\U00010000\U00010000\U00010001" +"(\ud800)(\udc00)" "\U00010000" +"\U00010001+" "<0>\U00010001\U00010001\udc01" + +# +# hitEnd with find() +# +"abc" Z "aa<0>abc abcab" +"abc" 2Z "aaabc <0>abcab" +"abc" 3z "aa>abc abcab" + +# +# \ escaping +# +"abc\jkl" "<0>abcjkl" # escape of a non-special letter is just itself. +"abc[ \j]kl" "<0>abcjkl" + +# +# \R all newline sequences. +# +"abc\Rxyz" "<0>abc\u000axyzgh" +"abc\Rxyz" "<0>abc\u000bxyzgh" +"abc\Rxyz" "<0>abc\u000cxyzgh" +"abc\Rxyz" "<0>abc\u000dxyzgh" +"abc\Rxyz" "<0>abc\u0085xyzgh" +"abc\Rxyz" "<0>abc\u2028xyzgh" +"abc\Rxyz" "<0>abc\u2029xyzgh" +"abc\Rxyz" "<0>abc\u000d\u000axyzgh" + +"abc\R\nxyz" "abc\u000d\u000axyzgh" # \R cannot match only the CR from a CR/LF sequence. +"abc\r\nxyz" "<0>abc\u000d\u000axyzgh" + +"abc\Rxyz" "abc\u0009xyz" # Assorted non-matches. +"abc\Rxyz" "abc\u000exyz" +"abc\Rxyz" "abc\u202axyz" + +# \v \V single character new line sequences. + +"abc\vxyz" "<0>abc\u000axyzgh" +"abc\vxyz" "<0>abc\u000bxyzgh" +"abc\vxyz" "<0>abc\u000cxyzgh" +"abc\vxyz" "<0>abc\u000dxyzgh" +"abc\vxyz" "<0>abc\u0085xyzgh" +"abc\vxyz" "<0>abc\u2028xyzgh" +"abc\vxyz" "<0>abc\u2029xyzgh" +"abc\vxyz" "abc\u000d\u000axyzgh" +"abc\vxyz" "abc?xyzgh" + +"abc[\v]xyz" "<0>abc\u000axyzgh" +"abc[\v]xyz" "<0>abc\u000bxyzgh" +"abc[\v]xyz" "<0>abc\u000cxyzgh" +"abc[\v]xyz" "<0>abc\u000dxyzgh" +"abc[\v]xyz" "<0>abc\u0085xyzgh" +"abc[\v]xyz" "<0>abc\u2028xyzgh" +"abc[\v]xyz" "<0>abc\u2029xyzgh" +"abc[\v]xyz" "abc\u000d\u000axyzgh" +"abc[\v]xyz" "abc?xyzgh" + +"abc\Vxyz" "abc\u000axyzgh" +"abc\Vxyz" "abc\u000bxyzgh" +"abc\Vxyz" "abc\u000cxyzgh" +"abc\Vxyz" "abc\u000dxyzgh" +"abc\Vxyz" "abc\u0085xyzgh" +"abc\Vxyz" "abc\u2028xyzgh" +"abc\Vxyz" "abc\u2029xyzgh" +"abc\Vxyz" "abc\u000d\u000axyzgh" +"abc\Vxyz" "<0>abc?xyzgh" + +# \h \H horizontal white space. Defined as gc=space_separator plus ascii tab + +"abc\hxyz" "<0>abc xyzgh" +"abc\Hxyz" "abc xyzgh" +"abc\hxyz" "<0>abc\u2003xyzgh" +"abc\Hxyz" "abc\u2003xyzgh" +"abc\hxyz" "<0>abc\u0009xyzgh" +"abc\Hxyz" "abc\u0009xyzgh" +"abc\hxyz" "abc?xyzgh" +"abc\Hxyz" "<0>abc?xyzgh" + +"abc[\h]xyz" "<0>abc xyzgh" +"abc[\H]xyz" "abc xyzgh" +"abc[\h]xyz" "<0>abc\u2003xyzgh" +"abc[\H]xyz" "abc\u2003xyzgh" +"abc[\h]xyz" "<0>abc\u0009xyzgh" +"abc[\H]xyz" "abc\u0009xyzgh" +"abc[\h]xyz" "abc?xyzgh" +"abc[\H]xyz" "<0>abc?xyzgh" + + +# +# Bug xxxx +# +"(?:\-|(\-?\d+\d\d\d))?(?:\-|\-(\d\d))?(?:\-|\-(\d\d))?(T)?(?:(\d\d):(\d\d):(\d\d)(\.\d+)?)?(?:(?:((?:\+|\-)\d\d):(\d\d))|(Z))?" MG "<0>-1234-21-31T41:51:61.789+71:81" + + +# +# A random, complex, meaningless pattern that should at least compile +# +"(?![^\\G)(?![^|\]\070\ne\{\t\[\053\?\\\x51\a\075\0023-\[&&[|\022-\xEA\00-\u41C2&&[^|a-\xCC&&[^\037\uECB3\u3D9A\x31\|\[^\016\r\{\,\uA29D\034\02[\02-\[|\t\056\uF599\x62\e\<\032\uF0AC\0026\0205Q\|\\\06\0164[|\057-\u7A98&&[\061-g|\|\0276\n\042\011\e\xE8\x64B\04\u6D0EDW^\p{Lower}]]]]?)(?<=[^\n\\\t\u8E13\,\0114\u656E\xA5\]&&[\03-\026|\uF39D\01\{i\u3BC2\u14FE]])(?<=[^|\uAE62\054H\|\}&&^\p{Space}])(?sxx)(?<=[\f\006\a\r\xB4]{1,5})|(?x-xd:^{5}+)()" "<0>abc" + + +# +# Bug 3225 + +"1|9" "<0>1" +"1|9" "<0>9" +"1*|9" "<0>1" +"1*|9" "<0>9" + +"(?:a|ac)d" "<0>acd" +"a|ac" "<0>ac" + +# +# Bug 3320 +# +"(a([^ ]+)){0,} (c)" "<0><1>a<2>b <3>c " +"(a([^ ]+))* (c)" "<0><1>a<2>b <3>c " + +# +# Bug 3436 +# +"(.*?) *$" "<0><1>test " + +# +# Bug 4034 +# +"\D" "<0>ABC\u00ffDEF" +"\d" "ABC\u00ffDEF" +"\D" "<0>\u00ffDEF" +"\d" "\u00ffDEF" +"\D" "123<0>\u00ffDEF" +"\D" "<0>\u0100DEF" +"\D" "123<0>\u0100DEF" + +# +#bug 4024, new line sequence handling +# +"(?m)^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)^" 2 "AA\u000d\u000a<0>BB\u000d\u000aCC\u000d\u000a" +"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>CC\u000d\u000a" +"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"(?m)$" "AA<0>\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)$" 2 "AA\u000d\u000aBB<0>\u000d\u000aCC\u000d\u000a" +"(?m)$" 3 "AA\u000d\u000aBB\u000d\u000aCC<0>\u000d\u000a" +"(?m)$" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>" +"(?m)$" 5 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"$" "AA\u000d\u000aBB\u000d\u000aCC<0>\u000d\u000a" +"$" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>" +"$" 3 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +"$" "\u000a\u0000a<0>\u000a" +"$" 2 "\u000a\u0000a\u000a<0>" +"$" 3 "\u000a\u0000a\u000a" + +"$" "<0>" +"$" 2 "" + +"$" "<0>\u000a" +"$" 2 "\u000a<0>" +"$" 3 "\u000a" + +"^" "<0>" +"^" 2 "" + +"\Z" "<0>" +"\Z" 2 "" +"\Z" 2 "\u000a<0>" +"\Z" "<0>\u000d\u000a" +"\Z" 2 "\u000d\u000a<0>" + + +# No matching ^ at interior new-lines if not in multi-line mode. +"^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"^" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +# +# Dot-matches-any mode, and stopping at new-lines if off. +# +"." "<0>123\u000aXYZ" +"." 2 "1<0>23\u000aXYZ" +"." 3 "12<0>3\u000aXYZ" +"." 4 "123\u000a<0>XYZ" # . doesn't match newlines +"." 4 "123\u000b<0>XYZ" +"." 4 "123\u000c<0>XYZ" +"." 4 "123\u000d<0>XYZ" +"." 4 "123\u000d\u000a<0>XYZ" +"." 4 "123\u0085<0>XYZ" +"." 4 "123\u2028<0>XYZ" +"." 4 "123\u2029<0>XYZ" +"." 4s "123<0>\u000aXYZ" # . matches any +"." 4s "123<0>\u000bXYZ" +"." 4s "123<0>\u000cXYZ" +"." 4s "123<0>\u000dXYZ" +"." 4s "123<0>\u000d\u000aXYZ" +"." 4s "123<0>\u0085XYZ" +"." 4s "123<0>\u2028XYZ" +"." 4s "123<0>\u2029XYZ" +".{6}" "123\u000a\u000dXYZ" +".{6}" s "<0>123\u000a\u000dXY" + + +# +# Ranges +# +".*" "abc<0>defghi" +"a" "aaa<0>aaaaaa" +"a" 2 "aaaa<0>aaaaa" +"a" 3 "aaaaa<0>aaaa" +"a" 4 "aaaaaaaaa" +"a" "aaa<0>aaaaaa" + +# +# [set] parsing, systematically run through all of the parser states. +# +# +"[def]+" "abc<0>ddeeffghi" # set-open +"[^def]+" "<0>abcdefghi" +"[:digit:]+" "abc<0>123def" +"[:^digit:]+" "<0>abc123def" +"[\u005edef]+" "abc<0>de^fghi" + +"[]]+" "abc<0>]]][def" # set-open2 +"[^]]+" "<0>abc]]][def" + +"[:Lu:]+" "abc<0>ABCdef" # set-posix +"[:Lu]+" "abc<0>uL::Lu" +"[:^Lu]+" "abc<0>uL:^:Lu" +"[:]+" "abc<0>:::def" +"[:whats this:]" E " " +"[--]+" dE "-------" + +"[[nested]]+" "xyz[<0>nnetsteed]abc" #set-start +"[\x{41}]+" "CB<0>AAZYX" +"[\[\]\\]+" "&*<0>[]\\..." +"[*({<]+" "^&<0>{{(<<*)))" + + +"[-def]+" "abc<0>def-ef-dxyz" # set-start-dash +"[abc[--def]]" E " " + +"[x[&def]]+" "abc<0>def&ghi" # set-start-amp +"[&& is bad at start]" E " " + +"[abc" E " " # set-after-lit +"[def]]" "abcdef" +"[def]]" "abcde<0>f]]" + +"[[def][ghi]]+" "abc]<0>defghi[xyz" # set-after-set +"[[def]ghi]+" "abc]<0>defghi[xyz" +"[[[[[[[[[[[abc]" E " " +"[[abc]\p{Lu}]+" "def<0>abcABCxyz" + +"[d-f]+" "abc<0>defghi" # set-after-range +"[d-f[x-z]]+" "abc<0>defxyzzzgw" +"[\s\d]+" "abc<0> 123def" +"[d-f\d]+" "abc<0>def123ghi" +"[d-fr-t]+" "abc<0>defrstuvw" + +"[abc--]" E " " # set-after-op +"[[def]&&]" E " " +"[-abcd---]+" "<0>abc--" #[-abcd]--[-] +"[&abcd&&&ac]+" "b<0>ac&&cad" #[&abcd]&&[&ac] + +"[[abcd]&[ac]]+" "b<0>acacd" # set-set-amp +"[[abcd]&&[ac]]+" "b<0>acacd" +"[[abcd]&&ac]+" "b<0>acacd" +"[[abcd]&ac]+" "<0>bacacd&&&" + +"[abcd&[ac]]+" "<0>bacacd&&&" #set-lit-amp +"[abcd&&[ac]]+" "b<0>acacd" +"[abcd&&ac]+" "b<0>acacd" + +"[[abcd]-[ac]]+" "a<0>bdbdc" # set-set-dash +"[[abcd]--[ac]]+" "a<0>bdbdc" +"[[abcd]--ac]+" "a<0>bdbdc" +"[[abcd]-ac]+" "<0>bacacd---" + +"[a-d--[b-c]]+" "b<0>adadc" # set-range-dash +"[a-d--b-c]+" "b<0>adadc" +"[a-d-[b-c]]+" "<0>bad-adc" +"[a-d-b-c]+" "<0>bad-adc" +"[\w--[b-c]]+" "b<0>adadc" +"[\w--b-c]+" "b<0>adadc" +"[\w-[b-c]]+" "<0>bad-adc" +"[\w-b-c]+" "<0>bad-adc" + +"[a-d&&[b-c]]+" "a<0>bcbcd" # set-range-amp +"[a-d&&b-c]+" "a<0>bcbcd" +"[a-d&[b-c]]+" "<0>abc&bcd" +"[a-d&b-c]+" "<0>abc&bcd" + +"[abcd--bc]+" "b<0>addac" # set-lit-dash +"[abcd--[bc]]+" "b<0>addac" +"[abcd-[bc]]+" "<0>bad--dacxyz" +"[abcd-]+" "<0>bad--dacxyz" + +"[abcd-\s]+" E "xyz<0>abcd --xyz" # set-lit-dash-esc +"[abcd-\N{LATIN SMALL LETTER G}]+" "xyz-<0>abcdefghij-" +"[bcd-\{]+" "a<0>bcdefyz{|}" + +"[\p{Ll}]+" "ABC<0>abc^&*&" # set-escape +"[\P{Ll}]+" "abc<0>ABC^&*&xyz" +"[\N{LATIN SMALL LETTER Q}]+" "mnop<0>qqqrst" +"[\sa]+" "cb<0>a a (*&" +"[\S]+" " <0>hello " +"[\w]+" " <0>hello_world! " +"[\W]+" "a<0> *$%#,hello " +"[\d]+" "abc<0>123def" +"[\D]+" "123<0>abc567" +"[\$\#]+" "123<0>$#$#\\" + +# +# Try each of the Java compatibility properties. +# These are checked here, while normal Unicode properties aren't, because +# these Java compatibility properties are implemented directly by regexp, while other +# properties are handled by ICU's Property and UnicodeSet APIs. +# +# These tests are only to verify that the names are recognized and the +# implementation isn't dead. They are not intended to verify that the +# function definitions are 100% correct. +# +"[:InBasic Latin:]+" "ΓΔΕΖΗΘ<0>hello, world.ニヌネノハバパ" +"[:^InBasic Latin:]+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InBasicLatin}+" "ΓΔΕΖΗΘ<0>hello, world.ニヌネノハバパ" +"\P{InBasicLatin}+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InGreek}+" "<0>ΓΔΕΖΗΘhello, world.ニヌネノハバパ" +"\p{InCombining Marks for Symbols}" "<0>\u20d0" +"\p{Incombiningmarksforsymbols}" "<0>\u20d0" + + +"\p{javaDefined}+" "\uffff<0>abcd\U00045678" +"\p{javaDigit}+" "abc<0>1234xyz" +"\p{javaIdentifierIgnorable}+" "abc<0>\u0000\u000e\u009fxyz" +"\p{javaISOControl}+" "abc<0>\u0000\u000d\u0083xyz" +"\p{javaJavaIdentifierPart}+" "#@!<0>abc123_$;" +"\p{javaJavaIdentifierStart}+" "123\u0301<0>abc$_%^&" +"\p{javaLetter}+" "123<0>abcDEF&*()(" +"\p{javaLetterOrDigit}+" "$%^&*<0>123abcகஙசஜஞ☺♘♚☔☎♬⚄⚡" +"\p{javaLowerCase}+" "ABC<0>def&^%#:=" +"\p{javaMirrored}+" "ab$%<0>(){}[]xyz" +"\p{javaSpaceChar}+" "abc<0> \u00a0\u2028!@#" +"\p{javaSupplementaryCodePoint}+" "abc\uffff<0>\U00010000\U0010ffff\u0000" +"\p{javaTitleCase}+" "abCE<0>Džῌᾨ123" +"\p{javaUnicodeIdentifierStart}+" "123<0>abcⅣ%^&&*" +"\p{javaUnicodeIdentifierPart}+" "%&&^<0>abc123\u0301\u0002..." +"\p{javaUpperCase}+" "abc<0>ABC123" +"\p{javaValidCodePoint}+" "<0>\u0000abc\ud800 unpaired \udfff |\U0010ffff" +"\p{javaWhitespace}+" "abc\u00a0\u2007\u202f<0> \u0009\u001c\u001f\u202842" +"\p{all}+" "<0>123\u0000\U0010ffff" +"\P{all}+" "123\u0000\U0010ffff" + +# [:word:] is implemented directly by regexp. Not a java compat property, but PCRE and others. + +"[:word:]+" ".??$<0>abc123ΓΔΕΖΗ_%%%" +"\P{WORD}+" "<0>.??$abc123ΓΔΕΖΗ_%%%" + +# +# Errors on unrecognized ASCII letter escape sequences. +# +"[abc\Y]+" "<0>abcY" +"[abc\Y]+" eE "<0>abcY" + +"(?:a|b|c|\Y)+" "<0>abcY" +"(?:a|b|c|\Y)+" eE "<0>abcY" + +"\Q\Y\E" e "<0>\\Y" + +# +# Reported problem +# +"[a-\w]" E "x" + +# +# Bug 4045 +# +"A*" "<0>AAAA" +"A*" 2 "AAAA<0>" +"A*" 3 "AAAA" +"A*" 4 "AAAA" +"A*" 5 "AAAA" +"A*" 6 "AAAA" +"A*" "<0>" +"A*" 2 "" +"A*" 3 "" +"A*" 4 "" +"A*" 5 "" + +# +# Bug 4046 +# +"(?m)^" "<0>AA\u000dBB\u000dCC\u000d" +"(?m)^" 2 "AA\u000d<0>BB\u000dCC\u000d" +"(?m)^" 3 "AA\u000dBB\u000d<0>CC\u000d" +"(?m)^" 4 "AA\u000dBB\u000dCC\u000d" +"(?m)^" 5 "AA\u000dBB\u000dCC\u000d" +"(?m)^" 6 "AA\u000dBB\u000dCC\u000d" + +"(?m)^" "<0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" +"(?m)^" 2 "AA\u000d\u000a<0>BB\u000d\u000aCC\u000d\u000a" +"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>CC\u000d\u000a" +"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a" + +# +# Bug 4059 +# +"\w+" "<0>イチロー" +"\b....\b." "<0>イチロー?" + + +# +# Bug 4058 ICU Unicode Set patterns have an odd feature - +# A $ as the last character before the close bracket means match +# a \uffff, which means off the end of the string in transliterators. +# Didn't make sense for regular expressions, and is now fixed. +# +"[\$](P|C|D);" "<0>$<1>P;" +"[$](P|C|D);" "<0>$<1>P;" +"[$$](P|C|D);" "<0>$<1>P;" + +# +# bug 4888 Flag settings lost in some cases. +# +"((a){2})|(#)" is "no" +"((a){2})|(#)" is "<0><1>a<2>a#" +"((a){2})|(#)" is "a<0><3>#" + +"((a|b){2})|c" is "<0>c" +"((a|b){2})|c" is "<0>C" +"((a|b){2})|c" s "C" + +# +# bug 5617 ZWJ \u200d shouldn't cause word boundaries +# +".+?\b" "<0> \u0935\u0915\u094D\u200D\u0924\u0947 " +".+?\b" 2 " <0>\u0935\u0915\u094D\u200D\u0924\u0947 " +".+?\b" 3 " \u0935\u0915\u094D\u200D\u0924\u0947 " + +# +# bug 5386 "^.*$" should match empty input +# +"^.*$" "<0>" +"^.*$" m "<0>" +"^.*$" "<0>\n" +"(?s)^.*$" "<0>\n" + +# +# bug 5386 Empty pattern and empty input should match. +# +"" "<0>abc" +"" "<0>" + +# +# bug 5386 Range upper and lower bounds can be equal +# +"[a-a]" "<0>a" + +# +# bug 5386 $* should not fail, should match empty string. +# +"$*" "<0>abc" + +# +# bug 5386 \Q ... \E escaping problem +# +"[a-z\Q-$\E]+" "QE<0>abc-def$." + +# More reported 5386 Java comaptibility failures +# +"[^]*abb]*" "<0>kkkk" +"\xa" "huh" # Java would like to be warned. +"^.*$" "<0>" + +# +# bug 5386 Empty left alternation should produce a zero length match. +# +"|a" "<0>a" +"$|ab" "<0>ab" +"$|ba" "ab<0>" + +# +# bug 5386 Java compatibility for set expressions +# +"[a-z&&[cde]]+" "ab<0>cdefg" + +# +# bug 6019 matches() needs to backtrack and check for a longer match if the +# first match(es) found don't match the entire input. +# +"a?|b" "<0>b" +"a?|b" M "<0>b" +"a?|.*?u|stuff|d" M "<0>stuff" +"a?|.*?(u)|stuff|d" M "<0>stuff<1>u" +"a+?" "<0>aaaaaaaaaaaaa" +"a+?" M "<0>aaaaaaaaaaaaa" + +# +# Bug 7724. Expression to validate zip codes. +# +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "<0><1>94040<2>-3344" +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "94040-0000" +"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "00000-3344" + +# +# Bug 8666. Assertion failure on match, bad operand to JMP_SAV_X opcode. +# +"((.??)+|A)*" "<0><1><2>AAAAABBBBBCCCCCDDDDEEEEE" + +# +# Bug 8826. Incorrect results with case insensitive matches. +# +"AS(X)" i "aßx" +"AS.*" i "aßx" # Expansion of sharp s can't split between pattern terms. +"ASßS" i "<0>aßß" # All one literal string, does match. +"ASß{1}S" i "aßß" # Pattern with terms, no match. +"aßx" i "<0>assx" +"aßx" i "<0>ASSX" +"aßx" i "<0>aßx" +"ASS(.)" i "<0>aß<1>x" + +# Case Insensitive, probe some corner cases. +"ass+" i "aß" # Second 's' in pattern is qualified, can't combine with first. +"as+" i "aß" +"aßs" i "as" # Can't match half of a ß +"aß+" i "<0>asssssssss" +"aß+" i "<0>assßSssSSSs" +"a(ß?)+" i "<0>assssssss<1>s" +"a(ß?)+" i "<0>a<1>zzzzzzzzs" + +"\U00010400" i "<0>\U00010428" # case folded supplemental code point. + +"sstuff" i "<0>ßtuff" # exercise optimizations on what chars can start a match. +"sstuff" i "s<0>ßtuff" # exercise optimizations on what chars can start a match. +"ßtuff" i "s<0>sstuff" +"ßtuff" i "s<0>Sstuff" + +"a(..)\1" i "<0>A<1>bcBCdef" +"(ß)\1" i "aa<0><1>ssßzz" # Case insensitive back reference +"..(.)\1" i "<0>aa<1>ßss" +"ab(..)\1" i "xx<0>ab<1>ssßss" + +" (ss) ((\1.*)|(.*))" i "<0> <1>ss <2><4>sß" # The back reference 'ss' must not match in 'sß' + +# Bug 9057 +# \u200c and \u200d should be word characters. +# +"\w+" " <0>abc\u200cdef\u200dghi " +"\w+" i " <0>abc\u200cdef\u200dghi " +"[\w]+" " <0>abc\u200cdef\u200dghi " +"[\w]+" i " <0>abc\u200cdef\u200dghi " + +# Bug 9283 +# uregex_open fails for look-behind assertion + case-insensitive + +"(ab)?(?<=ab)cd|ef" i "<0><1>abcd" + +# Bug 9719 Loop breaking on (zero length match){3,} (unlimited upper bound). +# + +"(?:abc){1,}abc" "<0>abcabcabcabcabc" +"(?:2*){2,}?a2\z" "<0>2a2" +"(?:2*){2,}?a2\z" "2a3" +"(?:x?+){3,}+yz" "w<0>yz" +"(2*){2,}?a2\\z" "2a3" +"(2*){2,}?a2\\z" "<0>2<1>a2\\z" +"(2*){2,}?a2\z" "<0>2<1>a2" + + +# Bug 10024 +# Incorrect (unbounded) longest match length with {1, 20} style quantifiers. +# Unbounded match is disallowed in look-behind expressions. +# Max match length is used to limit where to check for look-behind matches. + +"(?<=a{1,5})bc" "aaaa<0>bcdef" +"(?<=(?:aa){3,20})bc" "aaaaaa<0>bcdef" +"(?jkl" +"(?<=a{11})bc" "aaaaaaaaaaa<0>bc" +"(?<=a{11})bc" "aaaaaaaaaabc" +"(?<=a{1,})bc" E "aaaa<0>bcdef" # U_REGEX_LOOK_BEHIND_LIMIT error. +"(?<=(?:){11})bc" "<0>bc" # Empty (?:) expression. + +# Bug 10835 +# Match Start Set not being correctly computed for case insensitive patterns. +# (Test here is to dump the compiled pattern & manually check the start set.) + +"(private|secret|confidential|classified|restricted)" i "hmm, <0><1>Classified stuff" +"(private|secret|confidential|classified|restricted)" "hmm, Classified stuff" + +# Bug 10844 + +"^([\w\d:]+)$" "<0><1>DiesIst1Beispiel:text" +"^([\w\d:]+)$" i "<0><1>DiesIst1Beispiel:text" +"^(\w+\d\w+:\w+)$" "<0><1>DiesIst1Beispiel:text" +"^(\w+\d\w+:\w+)$" i "<0><1>DiesIst1Beispiel:text" + +# Bug 11049 +# Edge cases in find() when pattern match begins with set of code points +# and the match begins at the end of the string. + +"A|B|C" "hello <0>A" +"A|B|C" "hello \U00011234" +"A|B|\U00012345" "hello <0>\U00012345" +"A|B|\U00010000" "hello \ud800" + +# Bug 11369 +# Incorrect optimization of patterns with a zero length quantifier {0} + +"(.|b)(|b){0}\$(?#xxx){3}(?>\D*)" "AAAAABBBBBCCCCCDDDDEEEEE" +"(|b)ab(c)" "<0><1>ab<2>c" +"(|b){0}a{3}(D*)" "<0>aaa<2>" +"(|b){0,1}a{3}(D*)" "<0><1>aaa<2>" +"((|b){0})a{3}(D*)" "<0><1>aaa<3>" + +# Bug 11370 +# Max match length computation of look-behind expression gives result that is too big to fit in the +# in the 24 bit operand portion of the compiled code. Expressions should fail to compile +# (Look-behind match length must be bounded. This case is treated as unbounded, an error.) + +"(?pre<1>\ud800post\ud800 fin" +"pre(.)post\1" i "pre\ud800post\ud800\udc00" # case insensiteve backrefs take a different code path +"pre(.)post\1" i "<0>pre<1>\ud800post\ud800 fin" + +# Bug 11554 +# +# Maximum match length computation was assuming UTF-16. +# Used in look-behind matches to constrain how far back to look. + +"(?<=a\x{100000})spam" "***a\x{100000}<0>spam**" +"(?<=aą)spam" "**aą<0>spam**" +"(?<=ąabc)spam" "**ąabc<0>spam**" + +"(?<=a\x{100000})spam" "***a\x{100001}spam**" +"(?<=aą)spam" "**bąspam**" +"(?<=ąabc)spam" "**ąabxspam**" + +# with negative look-behind + +"(?spam**" +"(?spam**" +"(?spam**" + +# Bug #12930 +# +# Minimum Match Length computation, int32_t overflow on an empty set in the pattern. +# The empty set, with no match possible, has a min match length of INT32_MAX. +# Was incremented subsequently. Caused assertion failure on pattern compile. + +"[^\u0000-\U0010ffff]bc?" "bc no match" +"[^\u0000-\U0010ffff]?bc?" "<0>bc has a match" + +# Bug #12160 Hit End behavior after find fails to find. +# To match Java, should be true if find fails to find. +# +"abc" Z "<0>abc abc abc xyz" +"abc" Z2 "abc <0>abc abc xyz" +"abc" Z3 "abc abc <0>abc xyz" +"abc" z4 "abc abc abc xyz" + +# Bug #13844 Verify that non-standard Java property names are recognized. +"[\p{IsAlphabetic}]" " <0>A" +"[\P{IsAlphabetic}]" "A<0> " +"[\p{IsIdeographic}]" "A<0>〆" +"[\P{IsIdeographic}]" "〆<0>A" +"[\p{IsLetter}]" " <0>A" +"[\P{IsLetter}]" "A<0> " +"[\p{Letter}]" " <0>A" +"[\p{IsLowercase}]" "A<0>a" +"[\P{IsLowercase}]" "a<0>A" +"[\p{IsUppercase}]" "a<0>A" +"[\P{IsUppercase}]" "A<0>a" +"[\p{IsTitlecase}]" "D<0>Dz" +"[\P{IsTitlecase}]" "Dz<0>D" +"[\p{IsPunctuation}]" " <0>&" +"[\P{IsPunctuation}]" "&<0> " +"[\p{IsControl}]" " <0>\x{82}" +"[\P{IsControl}]" "\x{82}<0> " +"[\p{IsWhite_Space}]" "x<0> " +"[\P{IsWhite_Space}]" " <0>x" +"[\p{IsDigit}]" " <0>4" +"[\P{IsDigit}]" "4<0> " +"[\p{IsHex_Digit}]" " <0>F" +"[\P{IsHex_Digit}]" "F<0> " +"[\p{IsJoin_Control}]" " <0>\x{200d}" +"[\P{IsJoin_Control}]" "\x{200d}<0> " +"[\p{IsNoncharacter_Code_Point}]" "A<0>\x{5fffe}" +"[\p{IsAssigned}]" "\x{10ffff}<0>a" +"[\P{IsAssigned}]" "a<0>\x{10ffff}" + +"[\p{InBasic Latin}]" "〆<0>A" +"[\p{InBasicLatin}]" "〆<0>A" +"[\p{InBasic-Latin}]" "〆<0>A" # ICU accepts '-'; Java does not. +"[\p{InBasic_Latin}]" "〆<0>A" +"[\p{Inbasiclatin}]" "〆<0>A" +"[\p{inbasiclatin}]" E "〆<0>A" # "In" must be cased as shown. Property name part is case insensitive. +"[\p{InCombining_Marks_for_Symbols}]" "a<0>\x{20DD}" # COMBINING ENCLOSING CIRCLE + +"[\p{all}]*" "<0>\x{00}abc\x{10ffff}" +"[\p{javaBadProperty}]" E "whatever" +"[\p{IsBadProperty}]" E "whatever" +"[\p{InBadBlock}]" E "whatever" +"[\p{In}]" E "whatever" +"[\p{Is}]" E "whatever" +"[\p{java}]" "x<0>ꦉ" # Note: "java" is a valid script code. + +"[\p{javaLowerCase}]+" "A<0>a" +"[\p{javaLowerCase}]+" i "<0>Aa" +"[\P{javaLowerCase}]+" "<0>Aa" +"[\P{javaLowerCase}]+" i "Aa" # No Match because case fold of the set happens first, then negation. + # JDK is not case insensitive w named properties, even though + # the insensitive match flag is set. A JDK bug? + +"[a-z]+" i "<0>Aa" # Matches JDK behavior. +"[^a-z]+" i "Aa" # (no match) which is JDK behavior. Case fold first, then negation. + +# Bug 20385. Assertion failure while compiling a negative look-behind expression consisting of a set with +# no contents. Meaning the [set] can never match. There is no syntax to directly express +# an empty set, so generate it by negating (^) a set of all code points. +# Also check empty sets in other contexts. + +"(?abc" + +"(?abc" +"x(?xabc" +"x(?xabc" +"x(?xabc" + +"[^\u0000-\U0010ffff]" "a" +"[^[^\u0000-\U0010ffff]]" "<0>a" + +"This is a string with (?:one |two |three )endings" "<0>This is a string with two endings" + +# Bug ICU-20544. Similar to 20385, above. Assertion failure with a negative look-behind assertion containing +# a set with no contents. Look-behind pattern includes more than just the empty set. + +"(?abc" # note: first 'ⰿ' is \u2c3f, hence empty set. +"(?abc" +"(?<=[^[^]]†)" "abc" # Problem also exists w positive look-behind + +# Bug ICU-20391. Crash in computation of minimum match length with nested look-around patterns. +# +"(?<=(?<=((?=)){0}+)" E "aaa" +"(?<=(?<=((?=)){0}+))" "<0>" +"(?<=c(?<=b((?=a)){1}+))" "aaa" +"abc(?=de(?=f))...g" "<0>abcdefg" +"abc(?=de(?=f))...g" "abcdxfg" + +# Bug ICU-20618 Assertion failure with nested look-around expressions. +# +"(?<=(?<=b?(?=a)))" "hello, world." + +# Bug ICU-20939 +# Incorrect word \b boundaries w UTF-8 input and non-ASCII text +# +"(?w)\b" v2 "äää<0> äää" + +# Bug ICU-21492 Assertion failure with nested look-around expressions. +# +"(?<=(?:(?<=(?:(?<=(?:(?<=)){2})){3})){4}" E "<0>" # orig failure from bug report, w mismatched parens. +"(?:(?<=(?:(?<=)){2}))" "<0>" # Simplified case, with a valid pattern. + +# Random debugging, Temporary +# + +# +# Regexps from http://www.regexlib.com +# +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>G1 1AA" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>EH10 2QQ" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>SW1 1ZZ" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "G111 1AA" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "X10 WW" +"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "DDD 5WW" +#"^[\w\-]+(?:\.[\w\-]+)*@(?:[\w\-]+\.)+[a-zA-Z]{2,7}$" dG "<0>joe.tillis@unit.army.mil" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>jack_rabbit@slims.com" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>foo99@foo.co.uk" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" "find_the_mistake.@foo.org" # TODO: \w in pattern +#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" ".prefix.@some.net" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>asmith@mactec.com" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>foo12@foo.edu" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>bob.smith@foo.tv" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "joe" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "@foo.com" +"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "a@a" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>4/1/2001" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>12/12/2001" +"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>55/5/3434" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "1/1/01" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "12 Jan 01" +"^\d{1,2}\/\d{1,2}\/\d{4}$" "1-1-2001" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>01.1.02" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>11-30-2001" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>2/29/2000" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "02/29/01" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "13/01/2002" +"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "11/00/02" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>127.0.0.1" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>255.255.255.0" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>192.168.0.1" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "1200.5.4.3" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "abc.def.ghi.jkl" +"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "255.foo.bar.1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>COM1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>AUX" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>LPT1" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "image.jpg" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "index.html" +"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "readme.txt" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>29/02/1972" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>5-9-98" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>10-11-2002" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "29/02/2003" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "12/13/2002" +"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "1-1-1500" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>user=foo,bar,quux;group=manager,admin;level=100;" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>group=nobody;level=24;" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "user=foo" +"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "blahh" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>(+44)(0)20-12341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>02012341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>+44 (0) 1234-1234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "(44+)020-12341234" +"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "12341234(+020)" +"\b(\w+)\s+\1\b" G "<0>Tell the the preacher" +"\b(\w+)\s+\1\b" G "<0>some some" +"\b(\w+)\s+\1\b" G "<0>hubba hubba" +"\b(\w+)\s+\1\b" "once an annual report" +"\b(\w+)\s+\1\b" "mandate dated submissions" +"\b(\w+)\s+\1\b" "Hubba hubba" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+31235256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+31(0)235256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>023-5256677" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "+3123525667788999" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "3123525667788" +"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "232-2566778" +"^[-+]?\d*\.?\d*$" G "<0>123" +"^[-+]?\d*\.?\d*$" G "<0>+3.14159" +"^[-+]?\d*\.?\d*$" G "<0>-3.14159" +"^[-+]?\d*\.?\d*$" "abc" +"^[-+]?\d*\.?\d*$" "3.4.5" +"^[-+]?\d*\.?\d*$" "$99.95" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$1,234.50" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$0.70" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>.7" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$0,123.50" +"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$00.5" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456D" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456F" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456M" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "AB123456E" +"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "ab123456d" +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://regxlib.com/Default.aspx" # TODO: \w in pattern +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://electronics.cnet.com/electronics/0-6342366-8-8994967-1.html" # TODO: \w in pattern +#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" "www.yahoo.com" # TODO: \w in pattern +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 ak" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "2034 AK" +"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "321321 AKSSAA" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/5/91" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>04/5/1991" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/05/89" +"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" "4/5/1" +#"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01/01/2001 " #TODO - \s in pattern. +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01-01-2001:" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>(1-1-01)" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "13/1/2001" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-32-2001" +"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-1-1801" +"^\d{3}\s?\d{3}$" G "<0>400 099" +"^\d{3}\s?\d{3}$" G "<0>400099" +"^\d{3}\s?\d{3}$" G "<0>400050" +"^\d{3}\s?\d{3}$" "2345678" +"^\d{3}\s?\d{3}$" "12345" +"^\d{3}\s?\d{3}$" "asdf" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>(111) 222-3333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>1112223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>111-222-3333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11112223333" +"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122233333" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#00ccff" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#039" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>ffffcc" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "blue" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "0x000000" +"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "#ff000" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:ab" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:AB" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>fE:dC:bA:98:76:54" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:ab:cd" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:Az" +"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:56:" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>http://www.blah.com/~joe" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>ftp://ftp.blah.co.uk:2828/blah%20blah.gif" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>https://blah.gov/blah-blah.as" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "www.blah.com" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "http://www.blah.com/I have spaces!" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "ftp://blah_underscore/[nope]" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/2002" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/2002 12:32:10" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "32/12/2002" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/13/2001" +"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/02/06" +"^[0-9](\.[0-9]+)?$" G "<0>1.2345" +"^[0-9](\.[0-9]+)?$" G "<0>0.00001" +"^[0-9](\.[0-9]+)?$" G "<0>7" +"^[0-9](\.[0-9]+)?$" "12.2" +"^[0-9](\.[0-9]+)?$" "1.10.1" +"^[0-9](\.[0-9]+)?$" "15.98" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>III" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>xiv" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>MCMLXLIX" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "iiV" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "MCCM" +"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "XXXX" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>123" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.35" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.35e-2" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "abc" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32e" +"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32.3" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>T.F. Johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>John O'Neil" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>Mary-Kate Johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "sam_johnson" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "Joe--Bob Jones" +"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "dfjsd0rd" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>1200" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>1645" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>2359" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "2400" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "asbc" +"^(20|21|22|23|[0-1]\d)[0-5]\d$" "12:45" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G '<0>
' +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "= img.jpg" +/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "img.jpg" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>78754" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>78754-1234" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>G3H 6A3" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "78754-12aA" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "7875A" +"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "g3h6a3" +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@somewhere.com" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob.jones@[1.1.1.1]" +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@a.b.c.d.info" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@com" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob.jones@some.where" # TODO: \w in pattern +#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@1.1.1.123" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" "ab@cd.ef" # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" ""bob A. jones " # TODO: \w in pattern +#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" "bob A. jones " # TODO: \w in pattern +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>SW112LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>SW11 2LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" G "<0>CR05LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "12CR0LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "12CR 0LE" +"^[A-Za-z]{1,2}[0-9A-Za-z]{1,2}[ ]?[0-9]{0,1}[A-Za-z]{2}$" "SWLE05" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2099-12-31T23:59:59" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2002/02/09 16:30:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" G "<0>2000-01-01T00:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2000-13-31T00:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2002/02/33 24:00:00" +"20\d{2}(-|\/)((0[1-9])|(1[0-2]))(-|\/)((0[1-9])|([1-2][0-9])|(3[0-1]))(T|\s)(([0-1][0-9])|(2[0-3])):([0-5][0-9]):([0-5][0-9])" "2000-01-01 60:00:00" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011567812345678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011 5678 1234 5678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" G "<0>6011-5678-1234-5678" +"^((?:4\d{3})|(?:5[1-5]\d{2})|(?:6011)|(?:3[68]\d{2})|(?:30[012345]\d))[ -]?(\d{4})[ -]?(\d{4})[ -]?(\d{4}|3[4,7]\d{13})$" "1234567890123456" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>01/01/2001" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>02/29/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" G "<0>12/31/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "1/1/02" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "02/30/2002" +"^((((0[13578])|(1[02]))[\/]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\/]?(([0-2][0-9])|(30)))|(02[\/]?[0-2][0-9]))[\/]?\d{4}$" "1/25/2002" +#"^(?=[^\&])(?:(?[^:/?#]+):)?(?://(?[^/?#]*))?(?[^?#]*)(?:\?(?[^#]*))?(?:#(?.*))?" G "<0>http://regexlib.com/REDetails.aspx?regexp_id=x#Details" # out of context, can't work stand-alone +#"^(?=[^\&])(?:(?[^:/?#]+):)?(?://(?[^/?#]*))?(?[^?#]*)(?:\?(?[^#]*))?(?:#(?.*))?" "&" # out of context, can't work stand-alone +"^[-+]?\d+(\.\d+)?$" G "<0>123" +"^[-+]?\d+(\.\d+)?$" G "<0>-123.45" +"^[-+]?\d+(\.\d+)?$" G "<0>+123.56" +"^[-+]?\d+(\.\d+)?$" "123x" +"^[-+]?\d+(\.\d+)?$" ".123" +"^[-+]?\d+(\.\d+)?$" "-123." +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234-1234-1234-1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234 1234 1234 1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" G "<0>1234123412341234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "Visa" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "1234" +"^(\d{4}[- ]){3}\d{4}|\d{16}$" "123-1234-12345" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>6011-1111-1111-1111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>5423-1111-1111-1111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" G "<0>341111111111111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "4111-111-111-111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "3411-1111-1111-111" +"^((4\d{3})|(5[1-5]\d{2})|(6011))-?\d{4}-?\d{4}-?\d{4}|3[4,7]\d{13}$" "Visa" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "<0>4D28C5AD-6482-41CD-B84E-4573F384BB5C" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "<0>B1E1282C-A35C-4D5A-BF8B-7A3A51D9E388" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" G "91036A4A-A0F4-43F0-8CD" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "{B1E1282C-A35C-4D3A-BF8B-7A3A51D9E388}" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "AAAAAAAAAAAAAAAAA" +"^[A-Z0-9]{8}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{12}$" "B;E1282C-A35C-4D3A-BF8B-7A3A51D9E38" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>4111-1234-1234-1234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>6011123412341234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" G "<0>3711-123456-12345" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "1234567890123456" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "4111-123-1234-1234" +"(^(4|5)\d{3}-?\d{4}-?\d{4}-?\d{4}|(4|5)\d{15})|(^(6011)-?\d{4}-?\d{4}-?\d{4}|(6011)-?\d{12})|(^((3\d{3}))-\d{6}-\d{5}|^((3\d{14})))" "412-1234-1234-1234" +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' G '<0>[link="http://www.yahoo.com"]Yahoo[/link]' #named capture +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' "[link]http://www.yahoo.com[/link]" #named capture +#'\[link="(?((.|\n)*?))"\](?((.|\n)*?))\[\/link\]' "[link=http://www.yahoo.com]Yahoo[/link]" #named capture +"^[a-zA-Z0-9]+$" G "<0>10a" +"^[a-zA-Z0-9]+$" G "<0>ABC" +"^[a-zA-Z0-9]+$" G "<0>A3fg" +"^[a-zA-Z0-9]+$" "45.3" +"^[a-zA-Z0-9]+$" "this or that" +"^[a-zA-Z0-9]+$" "$23" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" G "<0>(123) 456-7890" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" G "<0>123-456-7890" +"((\(\d{3}\) ?)|(\d{3}-))?\d{3}-\d{4}" "1234567890" +"^[a-zA-Z]\w{3,14}$" G "<0>abcd" +"^[a-zA-Z]\w{3,14}$" G "<0>aBc45DSD_sdf" +"^[a-zA-Z]\w{3,14}$" G "<0>password" +"^[a-zA-Z]\w{3,14}$" "afv" +"^[a-zA-Z]\w{3,14}$" "1234" +"^[a-zA-Z]\w{3,14}$" "reallylongpassword" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>G1 1AA " +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>GIR 0AA" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" G "<0>SW1 1ZZ" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" "BT01 3RT" +"^[A-Z]{1,2}[1-9][0-9]?[A-Z]? [0-9][A-Z]{2,}|GIR 0AA$" "G111 1AA" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>03-6106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>036106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" G "<0>02-5523344" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "00-6106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "03-0106666" +"^0[23489]{1}(\-)?[^0\D]{1}\d{6}$" "02-55812346" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>050-346634" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>058633633" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" G "<0>064-228226" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "059-336622" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "064-022663" +"^0(5[012345678]|6[47]){1}(\-)?[^0\D]{1}\d{5}$" "0545454545" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>AA11 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>AA1A 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" G "<0>A11-1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "111 AAA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "1AAA 1AA" +"^([A-Z]{1,2}[0-9]{1,2}|[A-Z]{3}|[A-Z]{1,2}[0-9][A-Z])( |-)[0-9][A-Z]{2}" "A1AA 1AA" +"@{2}((\S)+)@{2}" G "<0>@@test@@" +"@{2}((\S)+)@{2}" G "<0>@@name@@" +"@{2}((\S)+)@{2}" G "<0>@@2342@@" +"@{2}((\S)+)@{2}" "@test@" +"@{2}((\S)+)@{2}" "@@na me@@" +"@{2}((\S)+)@{2}" "@@ name@@" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>00:00" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>13:59" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" G "<0>23:59" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" "24:00" +"([0-1][0-9]|2[0-3]):[0-5][0-9]" "23:60" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>23" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>-17.e23" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" G "<0>+.23e+2" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "+.e2" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "23.17.5" +"^[+-]?([0-9]*\.?[0-9]+|[0-9]+\.?[0-9]*)([eE][+-]?[0-9]+)?$" "10e2.0" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>email@email.com" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>My Name" +"^([1-zA-Z0-1@.\s ]{1,255})$" G "<0>asdf12df" +"^([1-zA-Z0-1@.\s ]{1,255})$" "‘,\*&$<>" +"^([1-zA-Z0-1@.\s ]{1,255})$" "1001' string" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>12/2002" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>11/1900" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" G "<0>02/1977" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "1/1977" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "00/000" +"^((0[1-9])|(1[0-2]))\/(\d{4})$" "15/2002" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(0 34 56) 34 56 67" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(03 45) 5 67 67" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" G "<0>(0 45) 2 33 45-45" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(2345) 34 34" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(0 56) 456 456" +"^\(\d{1,2}(\s\d{1,2}){1,2}\)\s(\d{1,2}(\s\d{1,2}){1,2})((-(\d{1,4})){0,1})$" "(3 45) 2 34-45678" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>Genesis 3:3-4,6" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>II Sam 2:11,2" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" G "<0>2 Tim 3:16" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" "Genesis chap 3, verse 3" +"(?:\d|I{1,3})?\s?\w{2,}\.?\s*\d{1,}\:\d{1,}-?,?\d{0,2}(?:,\d{0,2}){0,2}" "2nd Samuel 2" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[IMG]http://bleh.jpg[/IMG]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[ImG]bleh[/imG]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" G "<0>[img]ftp://login:pass@bleh.gif[/img]" +"(\[[Ii][Mm][Gg]\])(\S+?)(\[\/[Ii][Mm][Gg]\])" '' +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>10/03/1979" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>1-1-02" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" G "<0>01.1.2003" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "10/03/197" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "01-02-003" +"^([0-9]{1,2})[./-]+([0-9]{1,2})[./-]+([0-9]{2}|[0-9]{4})$" "01 02 03" +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" G "<0>12345" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" G "<0>12345-6789" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "00000" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "00000-0000" # No Conditionals? +#"^(?(^00000(|-0000))|(\d{5}(|-\d{4})))$" "a4650-465s" # No Conditionals? +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>01" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>12" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" G "<0>31" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "123" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "32" +"^((0?[1-9])|((1|2)[0-9])|30|31)$" "abc" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>1.222.333.1234" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>1-223-123-1232" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" G "<0>12223334444" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "1.1.123123.123" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "12-1322-112-31" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?(\d{3}([\s\-./\\])?\d{4}|[a-zA-Z0-9]{7})$" "11231321131" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>DN3 6GB" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>SW42 4RG" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" G "<0>GIR 0AA" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "SEW4 5TY" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "AA2C 4FG" +"^([A-PR-UWYZ0-9][A-HK-Y0-9][AEHMNPRTVXY0-9]?[ABEHMNPRVWXY0-9]? {1,2}[0-9][ABD-HJLN-UW-Z]{2}|GIR 0AA)$" "AA2 4CV" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>asD1" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>asDF1234" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" G "<0>ASPgo123" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "asdf" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "1234" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z]).{4,8}$" "ASDF12345" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1.222.333.1234" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1-223-123-1232" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" G "<0>1-888-425-DELL" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "1.1.123123.123" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "12-1322-112-31" +"^([0-1]([\s\-./\\])?)?(\(?[2-9]\d{2}\)?|[2-9]\d{3})([\s\-./\\])?([0-9]{3}([\s\-./\\])?[0-9]{4}|[a-zA-Z0-9]{7}|([0-9]{3}[-][a-zA-Z0-9]{4}))" "1-800-CALL-DEL" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>09:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>9:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" G "<0>11:35" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "13:00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "9.00" +"^(([0]?[1-9]|1[0-2])(:)([0-5][0-9]))$" "6:60" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>1" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>108" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" G "<0>255" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" "01" +"^([1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$" "256" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>01/01/2001" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>1/01/2001" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" G "<0>2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "2/30/2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "13/23/2002" +"^((((0[13578])|([13578])|(1[02]))[\/](([1-9])|([0-2][0-9])|(3[01])))|(((0[469])|([469])|(11))[\/](([1-9])|([0-2][0-9])|(30)))|((2|02)[\/](([1-9])|([0-2][0-9]))))[\/]\d{4}$|^\d{4}$" "12345" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>SP939393H" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>PX123456D" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" G "<0>SW355667G" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "12SP9393H" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "S3P93930D" +"^[A-Za-z]{2}[0-9]{6}[A-Za-z]{1}$" "11223344SP00ddSS" +"(^0[78][2347][0-9]{7})" G "<0>0834128458" +"(^0[78][2347][0-9]{7})" G "<0>0749526308" +"(^0[78][2347][0-9]{7})" "0861212308" +"(^0[78][2347][0-9]{7})" "0892549851" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>C1406HHA" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>A4126AAB" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" G "<0>c1406hha" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "c1406HHA" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "4126" +"^([A-HJ-TP-Z]{1}\d{4}[A-Z]{3}|[a-z]{1}\d{4}[a-hj-tp-z]{3})$" "C1406hha" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>66.129.71.120" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>207.46.230.218" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" G "<0>64.58.76.225" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "10.0.5.4" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "192.168.0.1" +"^(((25[0-5]|2[0-4][0-9]|19[0-1]|19[3-9]|18[0-9]|17[0-1]|17[3-9]|1[0-6][0-9]|1[1-9]|[2-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9]))|(192\.(25[0-5]|2[0-4][0-9]|16[0-7]|169|1[0-5][0-9]|1[7-9][0-9]|[1-9][0-9]|[0-9]))|(172\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|1[0-5]|3[2-9]|[4-9][0-9]|[0-9])))\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\.(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$" "my ip address" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo.com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo-foo.com.au" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" G "<0>foo@foo.foo.info" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@.com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@foo..com" +"^([\w\d\-\.]+)@{1}(([\w\d\-]{1,67})|([\w\d\-]+\.[\w\d\-]{1,67}))\.(([a-zA-Z\d]{2,4})(\.[a-zA-Z\d]{2})?)$" "foo@me@.com" +"/\*[\d\D]*?\*/" G "<0>/* my comment */" +"/\*[\d\D]*?\*/" G "<0>/* my multiline comment */" +"/\*[\d\D]*?\*/" G "<0>/* my nested comment */" +"/\*[\d\D]*?\*/" "*/ anything here /*" +"/\*[\d\D]*?\*/" "anything between 2 separate comments" +"/\*[\d\D]*?\*/" "\* *\\" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my comment */" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my multiline comment */" +"/\*[\p{N}\P{N}]*?\*/" G "<0>/* my nested comment */" +"/\*[\p{N}\P{N}]*?\*/" "*/ anything here /*" +"/\*[\p{N}\P{N}]*?\*/" "anything between 2 separate comments" +"/\*[\p{N}\P{N}]*?\*/" "\* *\\" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>1/31/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>04-30-02" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" G "<0>12-01/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "2/31/2002" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "13/0/02" +"((0?[13578]|10|12)(-|\/)((0[0-9])|([12])([0-9]?)|(3[01]?))(-|\/)((\d{4})|(\d{2}))|(0?[2469]|11)(-|\/)((0[0-9])|([12])([0-9]?)|(3[0]?))(-|\/)((\d{4}|\d{2})))" "Jan 1, 2001" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' G "<0>blah@[10.0.0.1]" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' G "<0>a@b.c" +'^(([^<>;()\[\]\\.,;:@"]+(\.[^<>()\[\]\\.,;:@"]+)*)|(".+"))@((([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))\.)*(([a-z]([-a-z0-9]*[a-z0-9])?)|(#[0-9]+)|(\[((([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\.){3}(([01]?[0-9]{0,2})|(2(([0-4][0-9])|(5[0-5]))))\]))$' "non@match@." +"^\d{9}[\d|X]$" G "<0>1234123412" +"^\d{9}[\d|X]$" G "<0>123412341X" +"^\d{9}[\d|X]$" "not an isbn" +"^\d{9}(\d|X)$" G "<0>1234123412" +"^\d{9}(\d|X)$" G "<0>123412341X" +"^\d{9}(\d|X)$" "not an isbn" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>01/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>1/1/1999" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" G "<0>10/20/2080" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "13/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "1/1/1800" +"^(([1-9])|(0[1-9])|(1[0-2]))\/(([0-9])|([0-2][0-9])|(3[0-1]))\/(([0-9][0-9])|([1-2][0,9][0-9][0-9]))$" "10/32/2080" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>0.25" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>.75" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" G "<0>123.50" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" ".77" +"^\d*\.?((25)|(50)|(5)|(75)|(0)|(00))?$" "1.435" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>12345" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>932 68" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" G "<0>S-621 46" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "5367" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "425611" +"^(s-|S-){0,1}[0-9]{3}\s?[0-9]{2}$" "31 545" +"^\d{5}(-\d{4})?$" G "<0>48222" +"^\d{5}(-\d{4})?$" G "<0>48222-1746" +"^\d{5}(-\d{4})?$" "4632" +"^\d{5}(-\d{4})?$" "Blake" +"^\d{5}(-\d{4})?$" "37333-32" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>test.txt" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>test.jpg.txt" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' G "<0>a&b c.bmp" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' "CON" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' ".pdf" +'^(?!^(PRN|AUX|CLOCK\$|NUL|CON|COM\d|LPT\d|\..*)(\..+)?$)[^\x00-\x1f\\?*:\";|/]+$' "test:2.pdf" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>1'235.140" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>1'222'333.120" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>456" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "1234.500" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "78'45.123" +"^(\d{1,3}'(\d{3}')*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "123,0012" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T2p 3c7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T3P3c7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" G "<0>T2P 3C7" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "123456" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "3C7T2P" +"^[a-zA-Z][0-9][a-zA-Z]\s?[0-9][a-zA-Z][0-9]$" "11T21RWW" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$1.50" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$49" +"^\$[0-9]+(\.[0-9][0-9])?$" G "<0>$0.50" +"^\$[0-9]+(\.[0-9][0-9])?$" "1.5" +"^\$[0-9]+(\.[0-9][0-9])?$" "$1.333" +"^\$[0-9]+(\.[0-9][0-9])?$" "this $5.12 fails" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>217.6.9.89" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>0.0.0.0" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" G "<0>255.255.255.255" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "256.0.0.0" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "0978.3.3.3" +"\b((25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\.){3}(25[0-5]|2[0-4]\d|[01]\d\d|\d?\d)\b" "65.4t.54.3" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>http://www.aspemporium.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>mailto:dominionx@hotmail.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" G "<0>ftp://ftp.test.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "www.aspemporium.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "dominionx@hotmail.com" +"((mailto\:|(news|(ht|f)tp(s?))\://){1}\S+)" "bloggs" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(12) 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(01512) 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" G "<0>(0xx12) 1234 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "12 123 1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "(012) 123/1234" +"\(([0-9]{2}|0{1}((x|[0-9]){2}[0-9]{2}))\)\s*[0-9]{3,4}[- ]*[0-9]{4}" "(012) 123 12345" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob-smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob.smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" G "<0>bob_smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" "-smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" ".smith@foo.com" +"^\w+[\w-\.]*\@\w+((-\w+)|(\w*))\.[a-z]{2,3}$" "smith@foo_com" +"^(?=.*\d).{4,8}$" G "<0>1234" +"^(?=.*\d).{4,8}$" G "<0>asdf1234" +"^(?=.*\d).{4,8}$" G "<0>asp123" +"^(?=.*\d).{4,8}$" "asdf" +"^(?=.*\d).{4,8}$" "asdf12345" +"^(?=.*\d).{4,8}$" "password" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>user name" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>user#name" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" G "<0>....." +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "User_Name1" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "username@foo.com" +"[^A-Za-z0-9_@\.]|@{2,}|\.{5,}" "user.name@mail.foo.com" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" G "<0>12,654" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" G "<0>1,987" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" "128,2" +"^100$|^[0-9]{1,2}$|^[0-9]{1,2}\,[0-9]{1,3}$" "12," +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" G "<0>https://www.restrictd.com/~myhome/" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "http://www.krumedia.com." +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "(http://www.krumedia.com)" +"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*[^\.\,\)\(\s]$" "http://www.krumedia.com," +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>2&651.50" +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" G "<0>987.895" +"(\d{1,3},(\d{3},)*\d{3}(\.\d{1,3})?|\d{1,3}(\.\d{3})?)$" "25$%787*" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$1,456,983.00" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$1,700.07" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" G "<0>$68,944.23" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$20,86.93" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$1098.84" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9]?)?$" "$150." +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$28,009,987.88" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$23,099.05" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" G "<0>$.88" +"\$[0-9]?[0-9]?[0-9]?((\,[0-9][0-9][0-9])*)?(\.[0-9][0-9])?$" "$234,5.99" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>29/02/2004 20:15:27" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>29/2/04 8:9:5" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" G "<0>31/3/2004 9:20:17" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "29/02/2003 20:15:15" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "2/29/04 20:15:15" +"^((((31\/(0?[13578]|1[02]))|((29|30)\/(0?[1,3-9]|1[0-2])))\/(1[6-9]|[2-9]\d)?\d{2})|(29\/0?2\/(((1[6-9]|[2-9]\d)?(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))))|(0?[1-9]|1\d|2[0-8])\/((0?[1-9])|(1[0-2]))\/((1[6-9]|[2-9]\d)?\d{2})) (20|21|22|23|[0-1]?\d):[0-5]?\d:[0-5]?\d$" "31/3/4 9:20:17" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>something@someserver.com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>firstname.lastname@mailserver.domain.com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" G "<0>username-something@some-server.nl" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "username@someserver.domain.c" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "somename@server.domain-com" +"^([a-zA-Z0-9_\-\.]+)@([a-zA-Z0-9_\-\.]+)\.([a-zA-Z]{2,5})$" "someone@something.se_eo" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8 am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" G "<0>8:00 am" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8 a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)|(^([0-9]|[1][0-9]|[2][0-3])(\s{0,1})(AM|PM|am|pm|aM|Am|pM|Pm{2,2})$)" "8:00 a" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>55(21)123-4567" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>(11)1234-5678" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" G "<0>55(71)4562-2234" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "3434-3432" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "4(23)232-3232" +"^([0-9]{2})?(\([0-9]{2})\)([0-9]{3}|[0-9]{4})-[0-9]{4}$" "55(2)232-232" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>1:01 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>23:52:01" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" G "<0>03.24.36 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "19:31 AM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "9:9 PM" +"^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$" "25:60:61" +"^\d{0,2}(\.\d{1,2})?$" G "<0>99.99" +"^\d{0,2}(\.\d{1,2})?$" G "<0>99" +"^\d{0,2}(\.\d{1,2})?$" G "<0>.99" +"^\d{0,2}(\.\d{1,2})?$" "999.999" +"^\d{0,2}(\.\d{1,2})?$" "999" +"^\d{0,2}(\.\d{1,2})?$" ".999" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" G "<0>1agdA*$#" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "wyrn%@*&$# f" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "mbndkfh782" +"^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{4,8}$" "BNfhjdhfjd&*)%#$)" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>freshmeat.net" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>123.com" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" G "<0>TempLate-toolkKt.orG" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "-dog.com" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "?boy.net" +"^([a-zA-Z0-9][-a-zA-Z0-9]*[a-zA-Z0-9]\.)+([a-zA-Z0-9]{3,5})$" "this.domain" +"^[^']*$" G "<0>asljas" +"^[^']*$" G "<0>%/&89uhuhadjkh" +"^[^']*$" G '<0>"hi there!"' +"^[^']*$" "'hi there!'" +"^[^']*$" "It's 9 o'clock" +"^[^']*$" "'''''" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>((24,((1,2,3),(3,4,5))))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>((1,((2,3,4),(4,5,6),(96,34,26))),(12,((1,3,4),(4,5,6),(7,8,9))))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" G "<0>()" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "(24,((1,2,3),(3,4,5)))" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "( )" +"(^\(\)$|^\(((\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\),)*(\([0-9]+,(\((\([0-9]+,[0-9]+,[0-9]+\),)*(\([0-9]+,[0-9]+,[0-9]+\)){1}\))+\)){1}\)))$" "((23,(12,3,4),(4,5,6)))" +"^[a-zA-Z0-9\s .\-_']+$" G "<0>dony d'gsa" +"^[a-zA-Z0-9\s .\-_']+$" "^[a-zA-Z0-9\s.\-_']+$" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>example@example.com" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>foo@bar.info" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" G "<0>blah@127.0.0.1" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "broken@@example.com" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "foo@bar.infp" +"^[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))$" "blah@.nospam.biz" +"^\d{5}(-\d{3})?$" G "<0>13165-000" +"^\d{5}(-\d{3})?$" G "<0>38175-000" +"^\d{5}(-\d{3})?$" G "<0>81470-276" +"^\d{5}(-\d{3})?$" "13165-00" +"^\d{5}(-\d{3})?$" "38175-abc" +"^\d{5}(-\d{3})?$" "81470-2763" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$0.84" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$123458" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" G "<0>$1,234,567.89" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "$12,3456.01" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "12345" +"^\$(\d{1,3}(\,\d{3})*|(\d+))(\.\d{2})?$" "$1.234" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" G "<0>C:\\temp\\this allows spaces\\web.config" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" G "<0>\\\\Andromeda\\share\\file name.123" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" "tz:\temp\ fi*le?na:m.doc" +"([A-Z]:\\[^/:\*\?<>\|]+\.\w{2,6})|(\\{2}[^/:\*\?<>\|]+\.\w{2,6})" "\\Andromeda\share\filename.a" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>10:35" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>9:20" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" G "<0>23" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "24:00" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "20 PM" +"(^([0-9]|[0-1][0-9]|[2][0-3]):([0-5][0-9])$)|(^([0-9]|[1][0-9]|[2][0-3])$)" "20:15 PM" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>$3,023,123.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>9,876,453" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" G "<0>123456.78" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "4,33,234.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "$1.234" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(\.[0-9][0-9])?$" "abc" +"^\$?\d+(\.(\d{2}))?$" G "<0>$2.43" +"^\$?\d+(\.(\d{2}))?$" G "<0>2.02" +"^\$?\d+(\.(\d{2}))?$" G "<0>$2112" +"^\$?\d+(\.(\d{2}))?$" "2.1" +"^\$?\d+(\.(\d{2}))?$" "$.14" +"^\$?\d+(\.(\d{2}))?$" "$2,222.12" +/("[^"]*")|('[^\r]*)(\r\n)?/ G '<0>"my string"' +/("[^"]*")|('[^\r]*)(\r\n)?/ G '<0>"a string with \u0027 in it"' +/("[^"]*")|('[^\r]*)(\r\n)?/ G "<0>' comment" +/("[^"]*")|('[^\r]*)(\r\n)?/ /asd "/ +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" G "<0>BFDB4D31-3E35-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" G "<0>BFDB4d31-3e35-4dab-afca-5e6e5c8f61ea" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "qqqBFDB4D31-3E35-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "BFDB4D31-3E-4DAB-AFCA-5E6E5C8F61EA" +"^[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}$" "BFDB4D31-3E35-4DAB-AF" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>12.345-678" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>23.345-123" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" G "<0>99.999" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "41222-222" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "3.444-233" +"^\d{2}(\x2e)(\d{3})(-\d{3})?$" "43.324444" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>12.345-678" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>23.345-123" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" G "<0>99.999" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "41222-222" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "3.444-233" +"^\d{2}(\u002e)(\d{3})(-\d{3})?$" "43.324444" +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>c:\file.txt" # TODO: debug +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>c:\folder\sub folder\file.txt" # TODO: debug +#"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" G "<0>\\network\folder\file.txt" # TODO: debug +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "C:" +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "C:\file.xls" +"^(([a-zA-Z]:)|(\\{2}\w+)\$?)(\\(\w[\w ]*))+\.(txt|TXT)$" "folder.txt" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>my.domain.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>regexlib.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" G "<0>big-reg.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" ".mydomain.com" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "regexlib.comm" +"^[a-zA-Z0-9]+([a-zA-Z0-9\-\.]+)?\.(com|org|net|mil|edu|COM|ORG|NET|MIL|EDU)$" "-bigreg.com" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" G "<0>0001-12-31" +"^\d{4}[\-\/\s ]?((((0[13578])|(1[02]))[\-\/\s ]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s ]?(([0-2][0-9])|(30)))|(02[\-\/\s ]?[0-2][0-9]))$" G "<0>9999 09 30" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" G "<0>2002/03/03" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "0001\\02\\30" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "9999.15.01" +"^\d{4}[\-\/\s]?((((0[13578])|(1[02]))[\-\/\s]?(([0-2][0-9])|(3[01])))|(((0[469])|(11))[\-\/\s]?(([0-2][0-9])|(30)))|(02[\-\/\s]?[0-2][0-9]))$" "2002/3/3" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://psychopop.org" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://www.edsroom.com/newUser.asp" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" G "<0>http://unpleasant.jarrin.net/markov/inde" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "ftp://psychopop.org" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "http://www.edsroom/" +"^http\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(/\S*)?$" "http://un/pleasant.jarrin.net/markov/index.asp" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0>1145" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0>933" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" G "<0> 801" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "0000" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "1330" +"^( [1-9]|[1-9]|0[1-9]|10|11|12)[0-5]\d$" "8:30" +"^\d{1,2}\/\d{2,4}$" G "<0>9/02" +"^\d{1,2}\/\d{2,4}$" G "<0>09/2002" +"^\d{1,2}\/\d{2,4}$" G "<0>09/02" +"^\d{1,2}\/\d{2,4}$" "Fall 2002" +"^\d{1,2}\/\d{2,4}$" "Sept 2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>01/01/2001" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>02/30/2001" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" G "<0>12/31/2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/1/02" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/1/2002" +"^(|(0[1-9])|(1[0-2]))\/((0[1-9])|(1\d)|(2\d)|(3[0-1]))\/((\d{4}))$" "1/25/2002" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>15615552323" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>1-561-555-1212" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" G "<0>5613333" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "1-555-5555" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "15553333" +"^(1?(-?\d{3})-?)?(\d{3})(-?\d{4})$" "0-561-555-1212" +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' G '<0>' +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' G '<0>" # TODO: \w in pattern +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' '' # TODO: \w in pattern +'<[^>]*name[\s]*=[\s]*"?[^\w_]*"?[^>]*>' "The dirty brown fox stank like" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>1:00 AM" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>12:00 PM" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" G "<0>1:00am" +"^(1|01|2|02|3|03|4|04|5|05|6|06|7|07|8|08|9|09|10|11|12{1,2}):(([0-5]{1}[0-9]{1}\s{0,1})([AM|PM|am|pm]{2,2}))\W{0}$" "24:00" +"^\d*$" G "<0>123" +"^\d*$" G "<0>000" +"^\d*$" G "<0>43" +"^\d*$" "asbc" +"^\d*$" "-34" +"^\d*$" "3.1415" +"^[-+]?\d*$" G "<0>123" +"^[-+]?\d*$" G "<0>-123" +"^[-+]?\d*$" G "<0>+123" +"^[-+]?\d*$" "abc" +"^[-+]?\d*$" "3.14159" +"^[-+]?\d*$" "-3.14159" +"^\d*\.?\d*$" G "<0>123" +"^\d*\.?\d*$" G "<0>3.14159" +"^\d*\.?\d*$" G "<0>.234" +"^\d*\.?\d*$" "abc" +"^\d*\.?\d*$" "-3.14159" +"^\d*\.?\d*$" "3.4.2" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>44240" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>44240-5555" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" G "<0>T2P 3C7" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "44240ddd" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "t44240-55" +"^((\d{5}-\d{4})|(\d{5})|([A-Z]\d[A-Z]\s\d[A-Z]\d))$" "t2p3c7" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-8970 x12" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" G "<0>(910)456-8970 1211" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 156-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 056-7890" +"^[\\(]{0,1}([0-9]){3}[\\)]{0,1}[ ]?([^0-1]){1}([0-9]){2}[ ]?[-]?[ ]?([0-9]){4}[ ]*((x){0,1}([0-9]){1,5}){0,1}$" "(910) 556-7890 x" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>31.01.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>29.2.2004" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" G "<0>09.02.2005" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "31.11.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "29.2.2002" +"^((0?[1-9]|[12][1-9]|3[01])\.(0?[13578]|1[02])\.20[0-9]{2}|(0?[1-9]|[12][1-9]|30)\.(0?[13456789]|1[012])\.20[0-9]{2}|(0?[1-9]|1[1-9]|2[0-8])\.(0?[123456789]|1[012])\.20[0-9]{2}|(0?[1-9]|[12][1-9])\.(0?[123456789]|1[012])\.20(00|04|08|12|16|20|24|28|32|36|40|44|48|52|56|60|64|68|72|76|80|84|88|92|96))$" "33.06.2000" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>12/31/2003" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>01/01/1900" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" G "<0>11/31/2002" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "1/1/2002" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "01/01/02" +"^(0[1-9]|1[0-2])\/((0[1-9]|2\d)|3[0-1])\/(19\d\d|200[0-3])$" "01/01/2004" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2003" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2002 3:33 pm" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" G "<0>3/3/2003 3:33:33 am" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "13/1/2002" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "3/3/2002 3:33" +"^((((([13578])|(1[0-2]))[\-\/\s]?(([1-9])|([1-2][0-9])|(3[01])))|((([469])|(11))[\-\/\s]?(([1-9])|([1-2][0-9])|(30)))|(2[\-\/\s]?(([1-9])|([1-2][0-9]))))[\-\/\s]?\d{4})(\s((([1-9])|(1[02]))\:([0-5][0-9])((\s)|(\:([0-5][0-9])\s))([AM|PM|am|pm]{2,2})))?$" "31/3/2002" +"([a-zA-Z]:(\\w+)*\\[a-zA-Z0_9]+)?.xls" G "<0>E:\DyAGT\SD01A_specV2.xls" +"([a-zA-Z]:(\\w+)*\\[a-zA-Z0_9]+)?.xls" "E:\DyAGT\SD01A_specV2.txt" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>02/29/2084" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>01/31/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" G "<0>11/30/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "02/29/2083" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "11/31/2000" +"(((0[13578]|10|12)([-./])(0[1-9]|[12][0-9]|3[01])([-./])(\d{4}))|((0[469]|11)([-./])([0][1-9]|[12][0-9]|30)([-./])(\d{4}))|((2)([-./])(0[1-9]|1[0-9]|2[0-8])([-./])(\d{4}))|((2)(\.|-|\/)(29)([-./])([02468][048]00))|((2)([-./])(29)([-./])([13579][26]00))|((2)([-./])(29)([-./])([0-9][0-9][0][48]))|((2)([-./])(29)([-./])([0-9][0-9][2468][048]))|((2)([-./])(29)([-./])([0-9][0-9][13579][26])))" "01/32/2000" +"^[a-zA-Z0-9\s .\-]+$" G "<0>2222 Mock St." # TODO: \s in patterns not implemented +"^[a-zA-Z0-9\s .\-]+$" G "<0>1 A St." +"^[a-zA-Z0-9\s .\-]+$" G "<0>555-1212" +"^[a-zA-Z0-9\s.\-]+$" "[A Street]" +"^[a-zA-Z0-9\s.\-]+$" "(3 A St.)" +"^[a-zA-Z0-9\s.\-]+$" "{34 C Ave.}" +"^[a-zA-Z0-9\s.\-]+$" "Last.*?(\d+.?\d*)" +"^[a-zA-Z0-9\s .\-]+$" G "
Last1-(123)-123-1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" G "<0>123 123 1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" G "<0>1-800-ALPHNUM" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "1.123.123.1234" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "(123)-1234-123" +"^([0-9]( |-)?)?(\(?[0-9]{3}\)?|[0-9]{3})( |-)?([0-9]{3}( |-)?[0-9]{4}|[a-zA-Z0-9]{7})$" "123-1234" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>02:04" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>16:56" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" G "<0>23:59" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "02:00 PM" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "PM2:00" +"^([0-1][0-9]|[2][0-3]):([0-5][0-9])$" "24:00" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>01/01/1990" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>12/12/9999" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" G "<0>3/28/2001" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "3-8-01" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "13/32/1001" +"^[0,1]?\d{1}\/(([0-2]?\d{1})|([3][0,1]{1}))\/(([1]{1}[9]{1}[9]{1}\d{1})|([2-9]{1}\d{3}))$" "03/32/1989" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>1.2123644567" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>0-234.567/8912" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" G "<0>1-(212)-123 4567" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "0-212364345" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "1212-364,4321" +"((\(\d{3}\)?)|(\d{3}))([\s \-./]?)(\d{3})([\s \-./]?)(\d{4})" "0212\345/6789" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000 000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000-000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" G "<0>000000000000000000" +"^([0-9]{6}[\s \-]{1}[0-9]{12}|[0-9]{18})$" "000000_000000000000" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>01/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>1/1/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" G "<0>01/1/01" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "13/01/2001" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "1/2/100" +"^(([1-9])|(0[1-9])|(1[0-2]))\/((0[1-9])|([1-31]))\/((\d{2})|(\d{4}))$" "09/32/2001" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>$3,023,123.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>9,876,453" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" G "<0>123456.78" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "4,33,234.34" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "$1.234" +"^\$?([0-9]{1,3},([0-9]{3},)*[0-9]{3}|[0-9]+)(.[0-9][0-9])?$" "abc" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>55555-5555" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>34564-3342" +"^\d{5}$|^\d{5}-\d{4}$" G "<0>90210" +"^\d{5}$|^\d{5}-\d{4}$" "434454444" +"^\d{5}$|^\d{5}-\d{4}$" "645-32-2345" +"^\d{5}$|^\d{5}-\d{4}$" "abc" +"^\d{3}-\d{2}-\d{4}$" G "<0>333-22-4444" +"^\d{3}-\d{2}-\d{4}$" G "<0>123-45-6789" +"^\d{3}-\d{2}-\d{4}$" "123456789" +"^\d{3}-\d{2}-\d{4}$" "SSN" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>800-555-5555" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>333-444-5555" +"^[2-9]\d{2}-\d{3}-\d{4}$" G "<0>212-666-1234" +"^[2-9]\d{2}-\d{3}-\d{4}$" "000-000-0000" +"^[2-9]\d{2}-\d{3}-\d{4}$" "123-456-7890" +"^[2-9]\d{2}-\d{3}-\d{4}$" "2126661234" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>44240" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>44240-5555" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" G "<0>G3H 6A3" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "Ohio" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "abc" +"^\d{5}-\d{4}|\d{5}|[A-Z]\d[A-Z] \d[A-Z]\d$" "g3h6a3" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054 WD" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054WD" +"[0-9]{4}\s*[a-zA-Z]{2}" G "<0>1054 wd" +"[0-9]{4}\s*[a-zA-Z]{2}" "10543" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>0732105432" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>1300333444" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" G "<0>131313" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" "32105432" +"(^1300\d{6}$)|(^1800|1900|1902\d{6}$)|(^0[2|3|7|8]{1}[0-9]{8}$)|(^13\d{4}$)|(^04\d{2,3}\d{6}$)" "13000456" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>http://207.68.172.254/home.ashx" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>ftp://ftp.netscape.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" G "<0>https://www.brinkster.com/login.asp" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "htp://mistake.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "http://www_address.com/" +"^((https?|ftp)\://((\[?(\d{1,3}\.){3}\d{1,3}\]?)|(([\-a-zA-Z0-9]+\.)+[a-zA-Z]{2,4}))(\:\d+)?(/[\-a-zA-Z0-9._?,'+\&%$#=~\\]+)*/?)$" "ftp://www.files.com/file with spaces.txt" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>2002-11-03" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>2007-17-08" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" G "<0>9999-99-99" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "2002/17/18" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "2002.18.45" +"([0-9]{4})-([0-9]{1,2})-([0-9]{1,2})" "18.45.2002" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>$0,234.50" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>0234.5" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" G "<0>0,234." +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" "$1,23,50" +"^\$?(\d{1,3}(\,\d{3})*|(\d+))(\.\d{0,2})?$" "$123.123" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12.345-678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12345-678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" G "<0>12345678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "12.345678" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "12345-1" +"(^\d{5}-\d{3}|^\d{2}.\d{3}-\d{3}|\d{8})" "123" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>x:\\test\\testing.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>x:\\test\\test#$ ing.html" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' G "<0>\\\\test\testing.html" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "x:\test\test/ing.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "x:\test\test*.htm" +'^([a-zA-Z]\:|\\)\\([^\\]+\\)*[^\/:*?"<>|]+\.htm(l)?$' "\\test?<.htm" +"^[1-9]{1}[0-9]{3}$" G "<0>1234" +"^[1-9]{1}[0-9]{3}$" "123" +"^[1-9]{1}[0-9]{3}$" "123A" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A-1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A 1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" G "<0>A1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" "AA-1234" +"^[A-Z]{1}( |-)?[1-9]{1}[0-9]{3}$" "A12345" +"^(F-)?[0-9]{5}$" G "<0>12345" +"^(F-)?[0-9]{5}$" G "<0>F-12345" +"^(F-)?[0-9]{5}$" "F12345" +"^(F-)?[0-9]{5}$" "F-123456" +"^(F-)?[0-9]{5}$" "123456" +"^(V-|I-)?[0-9]{4}$" G "<0>1234" +"^(V-|I-)?[0-9]{4}$" G "<0>V-1234" +"^(V-|I-)?[0-9]{4}$" "12345" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" G "<0>1234 AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" G "<0>1234AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" "123AB" +"^[1-9]{1}[0-9]{3} ?[A-Z]{2}$" "1234AAA" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>12345" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>10234" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" G "<0>01234" +"^([1-9]{2}|[0-9][1-9]|[1-9][0-9])[0-9]{3}$" "00123" +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>John Doe Sr." +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>100 Elm St., Suite 25" +"^(/w|/W|[^<>+?$%\{}\&])+$" G "<0>Valerie's Gift Shop" +"^(/w|/W|[^<>+?$%\{}\&])+$" "

Hey

" +/<[a-zA-Z][^>]*\son\w+=(\w+|'[^']*'|"[^"]*")[^>]*>/ G '<0>' +/<[a-zA-Z][^>]*\son\w+=(\w+|'[^']*'|"[^"]*")[^>]*>/ '' +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>1" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>12345.123" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" G "<0>0.5" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "0" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "0.0" +"(?!^0*$)(?!^0*\.0*$)^\d{1,5}(\.\d{1,3})?$" "123456.1234" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>whatever@somewhere.museum" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>foreignchars@myforeigncharsdomain.nu" +"^.+@[^\.].*\.[a-z]{2,}$" G "<0>me+mysomething@mydomain.com" +"^.+@[^\.].*\.[a-z]{2,}$" "a@b.c" +"^.+@[^\.].*\.[a-z]{2,}$" "me@.my.com" +"^.+@[^\.].*\.[a-z]{2,}$" "a@b.comFOREIGNCHAR" +"^(\d{5}-\d{4}|\d{5})$" G "<0>12345" +"^(\d{5}-\d{4}|\d{5})$" G "<0>12345-1234" +"^(\d{5}-\d{4}|\d{5})$" "12345-12345" +"^(\d{5}-\d{4}|\d{5})$" "123" +"^(\d{5}-\d{4}|\d{5})$" "12345-abcd" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>0.0.0.0" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>255.255.255.02" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" G "<0>192.168.0.136" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "256.1.3.4" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "023.44.33.22" +"^(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])$" "10.57.98.23." +"]*[^/])>" G '<0>' +"]*[^/])>" '' +"" G "<0>" +"" G "<0>" +"" "this is a comment" +"" G "<0>" +"" G "<0>" +"" "this is a comment" +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G "<0>
" +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G '<0>' +/<\u002f?(\w+)(\s+\w+=(\w+|"[^"]*"|'[^']*'))*>/ G "<0>
{{github_com_vitessio_vitess_vtctld_srv_cell $ts.Cell}}{{github_com_vitessio_vitess_vtctld_srv_keyspace $ts.Cell $ts.Target.Keyspace}}{{$ts.Cell}}{{$ts.Target.Keyspace}} {{$ts.Target.Shard}} {{$ts.Target.TabletType}} {{$ts.StatusAsHTML}}
{{github_com_vitessio_vitess_vtctld_srv_cell $skn.Cell}}{{range $j, $value := $skn.Value}}{{github_com_vitessio_vitess_vtctld_srv_keyspace $skn.Cell $value}} {{end}}{{$skn.Cell}}{{range $j, $value := $skn.Value}}{{$value}} {{end}} {{github_com_vitessio_vitess_srvtopo_ttl_time $skn.ExpirationTime}} {{if $skn.LastError}}({{github_com_vitessio_vitess_srvtopo_time_since $skn.LastQueryTime}}Ago) {{$skn.LastError}}{{end}}
{{github_com_vitessio_vitess_vtctld_srv_cell $sk.Cell}}{{github_com_vitessio_vitess_vtctld_srv_keyspace $sk.Cell $sk.Keyspace}}{{$sk.Cell}}{{$sk.Keyspace}} {{$sk.StatusAsHTML}} {{github_com_vitessio_vitess_srvtopo_ttl_time $sk.ExpirationTime}} {{if $sk.LastError}}({{github_com_vitessio_vitess_srvtopo_time_since $sk.LastErrorTime}} Ago) {{$sk.LastError}}{{end}}
select id from user where id = 1select id from `user` where id = 110.0010001
select id from userselect id from `user`11.0000008
insert into user.*insert into `user`.*20.1000002Keyspace Shard TabletTypeAddress Query Sent Query Error QPS (avg 1m){{$status.Keyspace}} {{$status.Shard}} {{$status.TabletType}}{{$status.Name}} {{$status.QueryCount}} {{$status.QueryError}} {{$status.FormattedQPS}}Sharded Table Count Vindex CountVindex Unknown Parameters Count Error
{{if $ks.Sharded}}Yes{{else}}No{{end}} {{$ks.TableCount}} {{$ks.VindexCount}}{{$ks.VindexUnknownParamsCount}} {{$ks.Error}}
diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go index 6efe0fb5e7a..38706a8fbee 100644 --- a/go/vt/vtgate/vstream_manager.go +++ b/go/vt/vtgate/vstream_manager.go @@ -20,21 +20,21 @@ import ( "context" "fmt" "io" + "regexp" "strings" "sync" "time" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/vterrors" @@ -45,6 +45,9 @@ type vstreamManager struct { resolver *srvtopo.Resolver toposerv srvtopo.Server cell string + + vstreamsCreated *stats.CountersWithMultiLabels + vstreamsLag *stats.GaugesWithMultiLabels } // maxSkewTimeoutSeconds is the maximum allowed skew between two streams when the MinimizeSkew flag is set @@ -109,6 +112,8 @@ type vstream struct { eventCh chan []*binlogdatapb.VEvent heartbeatInterval uint32 ts *topo.Server + + tabletPickerOptions discovery.TabletPickerOptions } type journalEvent struct { @@ -118,10 +123,19 @@ type journalEvent struct { } func newVStreamManager(resolver *srvtopo.Resolver, serv srvtopo.Server, cell string) *vstreamManager { + exporter := servenv.NewExporter(cell, "VStreamManager") return &vstreamManager{ resolver: resolver, toposerv: serv, cell: cell, + vstreamsCreated: exporter.NewCountersWithMultiLabels( + "VStreamsCreated", + "Number of vstreams created", + []string{"Keyspace", "ShardName", "TabletType"}), + vstreamsLag: exporter.NewGaugesWithMultiLabels( + "VStreamsLag", + "Difference between event current time and the binlog event timestamp", + []string{"Keyspace", "ShardName", "TabletType"}), } } @@ -156,6 +170,10 @@ func (vsm *vstreamManager) VStream(ctx context.Context, tabletType topodatapb.Ta heartbeatInterval: flags.GetHeartbeatInterval(), ts: ts, copyCompletedShard: make(map[string]struct{}), + tabletPickerOptions: discovery.TabletPickerOptions{ + CellPreference: flags.GetCellPreference(), + TabletOrder: flags.GetTabletOrder(), + }, } return vs.stream(ctx) } @@ -179,31 +197,51 @@ func (vsm *vstreamManager) resolveParams(ctx context.Context, tabletType topodat return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vgtid must have at least one value with a starting position") } // To fetch from all keyspaces, the input must contain a single ShardGtid - // that has an empty keyspace, and the Gtid must be "current". In the - // future, we'll allow the Gtid to be empty which will also support - // copying of existing data. - if len(vgtid.ShardGtids) == 1 && vgtid.ShardGtids[0].Keyspace == "" { - if vgtid.ShardGtids[0].Gtid != "current" { - return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid) - } - keyspaces, err := vsm.toposerv.GetSrvKeyspaceNames(ctx, vsm.cell, false) - if err != nil { - return nil, nil, nil, err - } - newvgtid := &binlogdatapb.VGtid{} - for _, keyspace := range keyspaces { - newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{ - Keyspace: keyspace, - Gtid: "current", - }) + // that has an empty keyspace, and the Gtid must be "current". + // Or the input must contain a single ShardGtid that has keyspace wildcards. + if len(vgtid.ShardGtids) == 1 { + inputKeyspace := vgtid.ShardGtids[0].Keyspace + isEmpty := inputKeyspace == "" + isRegexp := strings.HasPrefix(inputKeyspace, "/") + if isEmpty || isRegexp { + newvgtid := &binlogdatapb.VGtid{} + keyspaces, err := vsm.toposerv.GetSrvKeyspaceNames(ctx, vsm.cell, false) + if err != nil { + return nil, nil, nil, err + } + + if isEmpty { + if vgtid.ShardGtids[0].Gtid != "current" { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "for an empty keyspace, the Gtid value must be 'current': %v", vgtid) + } + for _, keyspace := range keyspaces { + newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: keyspace, + Gtid: "current", + }) + } + } else { + re, err := regexp.Compile(strings.Trim(inputKeyspace, "/")) + if err != nil { + return nil, nil, nil, err + } + for _, keyspace := range keyspaces { + if re.MatchString(keyspace) { + newvgtid.ShardGtids = append(newvgtid.ShardGtids, &binlogdatapb.ShardGtid{ + Keyspace: keyspace, + Gtid: vgtid.ShardGtids[0].Gtid, + }) + } + } + } + vgtid = newvgtid } - vgtid = newvgtid } newvgtid := &binlogdatapb.VGtid{} for _, sgtid := range vgtid.ShardGtids { if sgtid.Shard == "" { - if sgtid.Gtid != "current" { - return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current': %v", vgtid) + if sgtid.Gtid != "current" && sgtid.Gtid != "" { + return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "if shards are unspecified, the Gtid value must be 'current' or empty; got: %v", vgtid) } // TODO(sougou): this should work with the new Migrate workflow _, _, allShards, err := vsm.resolver.GetKeyspaceShards(ctx, sgtid.Keyspace, tabletType) @@ -452,7 +490,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha var eventss [][]*binlogdatapb.VEvent var err error cells := vs.getCells() - tp, err := discovery.NewTabletPicker(vs.ts, cells, sgtid.Keyspace, sgtid.Shard, vs.tabletType.String()) + tp, err := discovery.NewTabletPicker(ctx, vs.ts, cells, vs.vsm.cell, sgtid.Keyspace, sgtid.Shard, vs.tabletType.String(), vs.tabletPickerOptions) if err != nil { log.Errorf(err.Error()) return err @@ -507,10 +545,17 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha Filter: vs.filter, TableLastPKs: sgtid.TablePKs, } + var vstreamCreatedOnce sync.Once err = tabletConn.VStream(ctx, req, func(events []*binlogdatapb.VEvent) error { // We received a valid event. Reset error count. errCount = 0 + labels := []string{sgtid.Keyspace, sgtid.Shard, req.Target.TabletType.String()} + + vstreamCreatedOnce.Do(func() { + vs.vsm.vstreamsCreated.Add(labels, 1) + }) + select { case <-ctx.Done(): return ctx.Err() @@ -532,12 +577,12 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // Update table names and send. // If we're streaming from multiple keyspaces, this will disambiguate // duplicate table names. - ev := proto.Clone(event).(*binlogdatapb.VEvent) + ev := event.CloneVT() ev.FieldEvent.TableName = sgtid.Keyspace + "." + ev.FieldEvent.TableName sendevents = append(sendevents, ev) case binlogdatapb.VEventType_ROW: // Update table names and send. - ev := proto.Clone(event).(*binlogdatapb.VEvent) + ev := event.CloneVT() ev.RowEvent.TableName = sgtid.Keyspace + "." + ev.RowEvent.TableName sendevents = append(sendevents, ev) case binlogdatapb.VEventType_COMMIT, binlogdatapb.VEventType_DDL, binlogdatapb.VEventType_OTHER: @@ -606,6 +651,9 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha default: sendevents = append(sendevents, event) } + lag := event.CurrentTime/1e9 - event.Timestamp + vs.vsm.vstreamsLag.Set(labels, lag) + } if len(sendevents) != 0 { eventss = append(eventss, sendevents) @@ -652,7 +700,7 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e sgtid.Gtid = event.Gtid events[j] = &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_VGTID, - Vgtid: proto.Clone(vs.vgtid).(*binlogdatapb.VGtid), + Vgtid: vs.vgtid.CloneVT(), Keyspace: event.Keyspace, Shard: event.Shard, } @@ -681,7 +729,7 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e } events[j] = &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_VGTID, - Vgtid: proto.Clone(vs.vgtid).(*binlogdatapb.VGtid), + Vgtid: vs.vgtid.CloneVT(), Keyspace: event.Keyspace, Shard: event.Shard, } diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go index 7136539510b..3018791964f 100644 --- a/go/vt/vtgate/vstream_manager_test.go +++ b/go/vt/vtgate/vstream_manager_test.go @@ -25,24 +25,24 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/topo" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" + "google.golang.org/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/sandboxconn" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/srvtopo" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + + "vitess.io/vitess/go/test/utils" ) var mu sync.Mutex @@ -88,13 +88,13 @@ func TestVStreamSkew(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) vgtid := &binlogdatapb.VGtid{ShardGtids: []*binlogdatapb.ShardGtid{}} want := int64(0) var sbc0, sbc1 *sandboxconn.SandboxConn if tcase.shard0idx != 0 { sbc0 = hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc0.VStreamCh = make(chan *binlogdatapb.VEvent) want += 2 * tcase.numEventsPerShard vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: ks, Gtid: "pos", Shard: "-20"}) @@ -102,7 +102,7 @@ func TestVStreamSkew(t *testing.T) { } if tcase.shard1idx != 0 { sbc1 = hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) sbc1.VStreamCh = make(chan *binlogdatapb.VEvent) want += 2 * tcase.numEventsPerShard vgtid.ShardGtids = append(vgtid.ShardGtids, &binlogdatapb.ShardGtid{Keyspace: ks, Gtid: "pos", Shard: "20-40"}) @@ -134,9 +134,9 @@ func TestVStreamEvents(t *testing.T) { hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -211,11 +211,11 @@ func TestVStreamChunks(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) for i := 0; i < 100; i++ { sbc0.AddVStreamEvents([]*binlogdatapb.VEvent{{Type: binlogdatapb.VEventType_DDL}}, nil) @@ -279,11 +279,11 @@ func TestVStreamMulti(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "20-40", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) send0 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -333,6 +333,59 @@ func TestVStreamMulti(t *testing.T) { } } +func TestVStreamsCreatedAndLagMetrics(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cell := "aa" + ks := "TestVStream" + _ = createSandbox(ks) + hc := discovery.NewFakeHealthCheck(nil) + st := getSandboxTopo(ctx, cell, ks, []string{"-20", "20-40"}) + vsm := newTestVStreamManager(ctx, hc, st, cell) + vsm.vstreamsCreated.ResetAll() + vsm.vstreamsLag.ResetAll() + sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) + sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_PRIMARY, true, 1, nil) + addTabletToSandboxTopo(t, ctx, st, ks, "20-40", sbc1.Tablet()) + + send0 := []*binlogdatapb.VEvent{ + {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, + {Type: binlogdatapb.VEventType_COMMIT, Timestamp: 10, CurrentTime: 15 * 1e9}, + } + sbc0.AddVStreamEvents(send0, nil) + + send1 := []*binlogdatapb.VEvent{ + {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid02"}, + {Type: binlogdatapb.VEventType_COMMIT, Timestamp: 10, CurrentTime: 17 * 1e9}, + } + sbc1.AddVStreamEvents(send1, nil) + + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: ks, + Shard: "-20", + Gtid: "pos", + }, { + Keyspace: ks, + Shard: "20-40", + Gtid: "pos", + }}, + } + ch := startVStream(ctx, t, vsm, vgtid, nil) + <-ch + <-ch + wantVStreamsCreated := make(map[string]int64) + wantVStreamsCreated["TestVStream.-20.PRIMARY"] = 1 + wantVStreamsCreated["TestVStream.20-40.PRIMARY"] = 1 + assert.Equal(t, wantVStreamsCreated, vsm.vstreamsCreated.Counts(), "vstreamsCreated matches") + + wantVStreamsLag := make(map[string]int64) + wantVStreamsLag["TestVStream.-20.PRIMARY"] = 5 + wantVStreamsLag["TestVStream.20-40.PRIMARY"] = 7 + assert.Equal(t, wantVStreamsLag, vsm.vstreamsLag.Counts(), "vstreamsLag matches") +} + func TestVStreamRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -343,9 +396,9 @@ func TestVStreamRetry(t *testing.T) { hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) commit := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_COMMIT}, } @@ -383,9 +436,9 @@ func TestVStreamShouldNotSendSourceHeartbeats(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send0 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_HEARTBEAT}, @@ -433,13 +486,13 @@ func TestVStreamJournalOneToMany(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet(cell, "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -546,13 +599,13 @@ func TestVStreamJournalManyToOne(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) sbc1 := hc.AddTestTablet(cell, "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet(cell, "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send3 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid03"}, @@ -663,9 +716,9 @@ func TestVStreamJournalNoMatch(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) send1 := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_GTID, Gtid: "gtid01"}, @@ -695,7 +748,7 @@ func TestVStreamJournalNoMatch(t *testing.T) { {Type: binlogdatapb.VEventType_GTID, Gtid: "jn1"}, {Type: binlogdatapb.VEventType_COMMIT}, } - wantjn1 := &binlogdata.VStreamResponse{Events: []*binlogdatapb.VEvent{ + wantjn1 := &binlogdatapb.VStreamResponse{Events: []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_VGTID, Vgtid: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -743,7 +796,7 @@ func TestVStreamJournalNoMatch(t *testing.T) { {Type: binlogdatapb.VEventType_GTID, Gtid: "jn2"}, {Type: binlogdatapb.VEventType_COMMIT}, } - wantjn2 := &binlogdata.VStreamResponse{Events: []*binlogdatapb.VEvent{ + wantjn2 := &binlogdatapb.VStreamResponse{Events: []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_VGTID, Vgtid: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -792,11 +845,11 @@ func TestVStreamJournalPartialMatch(t *testing.T) { _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20", "-10", "10-20"}) - vsm := newTestVStreamManager(hc, st, "aa") + vsm := newTestVStreamManager(ctx, hc, st, "aa") sbc1 := hc.AddTestTablet("aa", "1.1.1.1", 1002, ks, "-10", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-10", sbc1.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-10", sbc1.Tablet()) sbc2 := hc.AddTestTablet("aa", "1.1.1.1", 1003, ks, "10-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "10-20", sbc2.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "10-20", sbc2.Tablet()) send := []*binlogdatapb.VEvent{ {Type: binlogdatapb.VEventType_JOURNAL, Journal: &binlogdatapb.Journal{ @@ -869,10 +922,13 @@ func TestVStreamJournalPartialMatch(t *testing.T) { } func TestResolveVStreamParams(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + name := "TestVStream" _ = createSandbox(name) hc := discovery.NewFakeHealthCheck(nil) - vsm := newTestVStreamManager(hc, newSandboxForCells([]string{"aa"}), "aa") + vsm := newTestVStreamManager(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") testcases := []struct { input *binlogdatapb.VGtid output *binlogdatapb.VGtid @@ -889,9 +945,44 @@ func TestResolveVStreamParams(t *testing.T) { input: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: "TestVStream", + Gtid: "other", + }}, + }, + err: "if shards are unspecified, the Gtid value must be 'current' or empty", + }, { + // Verify that the function maps the input missing the shard to a list of all shards in the topology. + input: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "TestVStream", + }}, + }, + output: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "TestVStream", + Shard: "-20", + }, { + Keyspace: "TestVStream", + Shard: "20-40", + }, { + Keyspace: "TestVStream", + Shard: "40-60", + }, { + Keyspace: "TestVStream", + Shard: "60-80", + }, { + Keyspace: "TestVStream", + Shard: "80-a0", + }, { + Keyspace: "TestVStream", + Shard: "a0-c0", + }, { + Keyspace: "TestVStream", + Shard: "c0-e0", + }, { + Keyspace: "TestVStream", + Shard: "e0-", }}, }, - err: "if shards are unspecified, the Gtid value must be 'current'", }, { input: &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ @@ -983,17 +1074,49 @@ func TestResolveVStreamParams(t *testing.T) { assert.Equal(t, wantFilter, filter, tcase.input) require.False(t, flags.MinimizeSkew) } - // Special-case: empty keyspace because output is too big. - input := &binlogdatapb.VGtid{ - ShardGtids: []*binlogdatapb.ShardGtid{{ - Gtid: "current", - }}, + + // Special-case: empty keyspace or keyspace containing wildcards because output is too big. + // Verify that the function resolves input for multiple keyspaces into a list of all corresponding shards. + // Ensure that the number of shards returned is greater than the number of shards in a single keyspace named 'TestVStream.' + specialCases := []struct { + input *binlogdatapb.ShardGtid + }{ + { + input: &binlogdatapb.ShardGtid{ + Gtid: "current", + }, + }, + { + input: &binlogdatapb.ShardGtid{ + Keyspace: "/.*", + }, + }, + { + input: &binlogdatapb.ShardGtid{ + Keyspace: "/.*", + Gtid: "current", + }, + }, + { + input: &binlogdatapb.ShardGtid{ + Keyspace: "/Test.*", + }, + }, } - vgtid, _, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil, nil) - require.NoError(t, err, input) - if got, want := len(vgtid.ShardGtids), 8; want >= got { - t.Errorf("len(vgtid.ShardGtids): %v, must be >%d", got, want) + for _, tcase := range specialCases { + input := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{tcase.input}, + } + vgtid, _, _, err := vsm.resolveParams(context.Background(), topodatapb.TabletType_REPLICA, input, nil, nil) + require.NoError(t, err, tcase.input) + if got, expectTestVStreamShardNumber := len(vgtid.ShardGtids), 8; expectTestVStreamShardNumber >= got { + t.Errorf("len(vgtid.ShardGtids): %v, must be >%d", got, expectTestVStreamShardNumber) + } + for _, s := range vgtid.ShardGtids { + require.Equal(t, tcase.input.Gtid, s.Gtid) + } } + for _, minimizeSkew := range []bool{true, false} { t.Run(fmt.Sprintf("resolveParams MinimizeSkew %t", minimizeSkew), func(t *testing.T) { flags := &vtgatepb.VStreamFlags{MinimizeSkew: minimizeSkew} @@ -1013,14 +1136,16 @@ func TestResolveVStreamParams(t *testing.T) { } func TestVStreamIdleHeartbeat(t *testing.T) { + ctx := utils.LeakCheckContext(t) + cell := "aa" ks := "TestVStream" _ = createSandbox(ks) hc := discovery.NewFakeHealthCheck(nil) st := getSandboxTopo(ctx, cell, ks, []string{"-20"}) - vsm := newTestVStreamManager(hc, st, cell) + vsm := newTestVStreamManager(ctx, hc, st, cell) sbc0 := hc.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_PRIMARY, true, 1, nil) - addTabletToSandboxTopo(t, st, ks, "-20", sbc0.Tablet()) + addTabletToSandboxTopo(t, ctx, st, ks, "-20", sbc0.Tablet()) vgtid := &binlogdatapb.VGtid{ ShardGtids: []*binlogdatapb.ShardGtid{{ Keyspace: ks, @@ -1044,7 +1169,7 @@ func TestVStreamIdleHeartbeat(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { var mu sync.Mutex var heartbeatCount int - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) go func() { vsm.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, nil, &vtgatepb.VStreamFlags{HeartbeatInterval: tcase.heartbeatInterval}, func(events []*binlogdatapb.VEvent) error { @@ -1067,13 +1192,14 @@ func TestVStreamIdleHeartbeat(t *testing.T) { } } -func newTestVStreamManager(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { - gw := NewTabletGateway(context.Background(), hc, serv, cell) +func newTestVStreamManager(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { + gw := NewTabletGateway(ctx, hc, serv, cell) srvResolver := srvtopo.NewResolver(serv, gw, cell) return newVStreamManager(srvResolver, serv, cell) } func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid *binlogdatapb.VGtid, flags *vtgatepb.VStreamFlags) <-chan *binlogdatapb.VStreamResponse { + t.Helper() if flags == nil { flags = &vtgatepb.VStreamFlags{} } @@ -1090,7 +1216,8 @@ func startVStream(ctx context.Context, t *testing.T, vsm *vstreamManager, vgtid func verifyEvents(t *testing.T, ch <-chan *binlogdatapb.VStreamResponse, wants ...*binlogdatapb.VStreamResponse) { t.Helper() for i, want := range wants { - got := <-ch + val := <-ch + got := val.CloneVT() require.NotNil(t, got) for _, event := range got.Events { event.Timestamp = 0 @@ -1129,7 +1256,7 @@ func getVEvents(keyspace, shard string, count, idx int64) []*binlogdatapb.VEvent } func getSandboxTopo(ctx context.Context, cell string, keyspace string, shards []string) *sandboxTopo { - st := newSandboxForCells([]string{cell}) + st := newSandboxForCells(ctx, []string{cell}) ts := st.topoServer ts.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{}) ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}) @@ -1139,7 +1266,7 @@ func getSandboxTopo(ctx context.Context, cell string, keyspace string, shards [] return st } -func addTabletToSandboxTopo(t *testing.T, st *sandboxTopo, ks, shard string, tablet *topodatapb.Tablet) { +func addTabletToSandboxTopo(t *testing.T, ctx context.Context, st *sandboxTopo, ks, shard string, tablet *topodatapb.Tablet) { _, err := st.topoServer.UpdateShardFields(ctx, ks, shard, func(si *topo.ShardInfo) error { si.PrimaryAlias = tablet.Alias return nil diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 1c9a59e9f78..8d8cd2885a3 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -31,7 +31,6 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" @@ -39,21 +38,21 @@ import ( "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/vtgateservice" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" vtschema "vitess.io/vitess/go/vt/vtgate/schema" + "vitess.io/vitess/go/vt/vtgate/vtgateservice" ) var ( @@ -61,12 +60,11 @@ var ( normalizeQueries = true streamBufferSize = 32 * 1024 - terseErrors bool + terseErrors bool + truncateErrorLen int // plan cache related flag - queryPlanCacheSize = cache.DefaultConfig.MaxEntries - queryPlanCacheMemory = cache.DefaultConfig.MaxMemoryUsage - queryPlanCacheLFU bool + queryPlanCacheMemory int64 = 32 * 1024 * 1024 // 32mb maxMemoryRows = 300000 warnMemoryRows = 30000 @@ -76,8 +74,6 @@ var ( noScatter bool enableShardRouting bool - // TODO(deepthi): change these two vars to unexported and move to healthcheck.go when LegacyHealthcheck is removed - // healthCheckRetryDelay is the time to wait before retrying healthcheck healthCheckRetryDelay = 2 * time.Millisecond // healthCheckTimeout is the timeout on the RPC call to tablets @@ -100,8 +96,8 @@ var ( // vtgate schema tracking flags enableSchemaChangeSignal = true - schemaChangeUser string - queryTimeout int + + queryTimeout int // vtgate views flags enableViews bool @@ -112,16 +108,18 @@ var ( queryLogBufferSize = 10 messageStreamGracePeriod = 30 * time.Second + + // allowKillStmt to allow execution of kill statement. + allowKillStmt bool ) func registerFlags(fs *pflag.FlagSet) { fs.StringVar(&transactionMode, "transaction_mode", transactionMode, "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") fs.BoolVar(&normalizeQueries, "normalize_queries", normalizeQueries, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") fs.BoolVar(&terseErrors, "vtgate-config-terse-errors", terseErrors, "prevent bind vars from escaping in returned errors") + fs.IntVar(&truncateErrorLen, "truncate-error-len", truncateErrorLen, "truncate errors sent to client if they are longer than this value (0 means do not truncate)") fs.IntVar(&streamBufferSize, "stream_buffer_size", streamBufferSize, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") - fs.Int64Var(&queryPlanCacheSize, "gate_query_cache_size", queryPlanCacheSize, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") fs.Int64Var(&queryPlanCacheMemory, "gate_query_cache_memory", queryPlanCacheMemory, "gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - fs.BoolVar(&queryPlanCacheLFU, "gate_query_cache_lfu", cache.DefaultConfig.LFU, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") fs.IntVar(&maxMemoryRows, "max_memory_rows", maxMemoryRows, "Maximum number of rows that will be held in memory for intermediate results as well as the final result.") fs.IntVar(&warnMemoryRows, "warn_memory_rows", warnMemoryRows, "Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented.") fs.StringVar(&defaultDDLStrategy, "ddl_strategy", defaultDDLStrategy, "Set default strategy for DDL statements. Override with @@ddl_strategy session variable") @@ -140,12 +138,21 @@ func registerFlags(fs *pflag.FlagSet) { fs.BoolVar(&enableOnlineDDL, "enable_online_ddl", enableOnlineDDL, "Allow users to submit, review and control Online DDL") fs.BoolVar(&enableDirectDDL, "enable_direct_ddl", enableDirectDDL, "Allow users to submit direct DDL statements") fs.BoolVar(&enableSchemaChangeSignal, "schema_change_signal", enableSchemaChangeSignal, "Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work") - fs.StringVar(&schemaChangeUser, "schema_change_signal_user", schemaChangeUser, "User to be used to send down query to vttablet to retrieve schema changes") fs.IntVar(&queryTimeout, "query-timeout", queryTimeout, "Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS)") fs.StringVar(&queryLogToFile, "log_queries_to_file", queryLogToFile, "Enable query logging to the specified file") fs.IntVar(&queryLogBufferSize, "querylog-buffer-size", queryLogBufferSize, "Maximum number of buffered query logs before throttling log output") fs.DurationVar(&messageStreamGracePeriod, "message_stream_grace_period", messageStreamGracePeriod, "the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent.") fs.BoolVar(&enableViews, "enable-views", enableViews, "Enable views support in vtgate.") + fs.BoolVar(&allowKillStmt, "allow-kill-statement", allowKillStmt, "Allows the execution of kill statement") + + _ = fs.String("schema_change_signal_user", "", "User to be used to send down query to vttablet to retrieve schema changes") + _ = fs.MarkDeprecated("schema_change_signal_user", "schema tracking uses an internal api and does not require a user to be specified") + + fs.Int64("gate_query_cache_size", 0, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") + _ = fs.MarkDeprecated("gate_query_cache_size", "`--gate_query_cache_size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") + + fs.Bool("gate_query_cache_lfu", false, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") + _ = fs.MarkDeprecated("gate_query_cache_lfu", "`--gate_query_cache_lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") } func init() { servenv.OnParseFor("vtgate", registerFlags) @@ -172,8 +179,6 @@ func getTxMode() vtgatepb.TransactionMode { } var ( - rpcVTGate *VTGate - // vschemaCounters needs to be initialized before planner to // catch the initial load stats. vschemaCounters = stats.NewCountersWithSingleLabel("VtgateVSchemaCounts", "Vtgate vschema counts", "changes") @@ -185,6 +190,23 @@ var ( vstreamSkewDelayCount = stats.NewCounter("VStreamEventsDelayedBySkewAlignment", "Number of events that had to wait because the skew across shards was too high") + + vindexUnknownParams = stats.NewGauge("VindexUnknownParameters", "Number of parameterss unrecognized by Vindexes") + + timings = stats.NewMultiTimings( + "VtgateApi", + "VtgateApi timings", + []string{"Operation", "Keyspace", "DbType"}) + + rowsReturned = stats.NewCountersWithMultiLabels( + "VtgateApiRowsReturned", + "Rows returned through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}) + + rowsAffected = stats.NewCountersWithMultiLabels( + "VtgateApiRowsAffected", + "Rows affected by a write (DML) operation through the VTgate API", + []string{"Operation", "Keyspace", "DbType"}) ) // VTGate is the rpc interface to vtgate. Only one instance @@ -226,17 +248,13 @@ func Init( tabletTypesToWait []topodatapb.TabletType, pv plancontext.PlannerVersion, ) *VTGate { - if rpcVTGate != nil { - log.Fatalf("VTGate already initialized") - } - // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, // we can't go on much further, so we log.Fatal out. // TabletGateway can create it's own healthcheck gw := NewTabletGateway(ctx, hc, serv, cell) gw.RegisterStats() - if err := gw.WaitForTablets(tabletTypesToWait); err != nil { + if err := gw.WaitForTablets(ctx, tabletTypesToWait); err != nil { log.Fatalf("tabletGateway.WaitForTablets failed: %v", err) } @@ -261,19 +279,33 @@ func Init( resolver := NewResolver(srvResolver, serv, cell, sc) vsm := newVStreamManager(srvResolver, serv, cell) + ts, err := serv.GetTopoServer() + if err != nil { + log.Fatalf("Unable to get Topo server: %v", err) + } + // Create a global cache to use for lookups of the sidecar database + // identifier in use by each keyspace. + _, created := sidecardb.NewIdentifierCache(func(ctx context.Context, keyspace string) (string, error) { + ki, err := ts.GetKeyspace(ctx, keyspace) + if err != nil { + return "", err + } + return ki.SidecarDbName, nil + }) + // This should never happen. + if !created { + log.Fatal("Failed to create a new sidecar database identifier cache during init as one already existed!") + } + var si SchemaInfo // default nil var st *vtschema.Tracker if enableSchemaChangeSignal { - st = vtschema.NewTracker(gw.hc.Subscribe(), schemaChangeUser, enableViews) - addKeyspaceToTracker(ctx, srvResolver, st, gw) + st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews) + addKeyspacesToTracker(ctx, srvResolver, st, gw) si = st } - cacheCfg := &cache.Config{ - MaxEntries: queryPlanCacheSize, - MaxMemoryUsage: queryPlanCacheMemory, - LFU: queryPlanCacheLFU, - } + plans := DefaultPlanCache() executor := NewExecutor( ctx, @@ -283,12 +315,16 @@ func Init( normalizeQueries, warnShardedOnly, streamBufferSize, - cacheCfg, + plans, si, noScatter, pv, ) + if err := executor.defaultQueryLogger(); err != nil { + log.Fatalf("error initializing query logger: %v", err) + } + // connect the schema tracker with the vschema manager if enableSchemaChangeSignal { st.RegisterSignalReceiver(executor.vm.Rebuild) @@ -296,33 +332,10 @@ func Init( // TODO: call serv.WatchSrvVSchema here - rpcVTGate = &VTGate{ - executor: executor, - resolver: resolver, - vsm: vsm, - txConn: tc, - gw: gw, - timings: stats.NewMultiTimings( - "VtgateApi", - "VtgateApi timings", - []string{"Operation", "Keyspace", "DbType"}), - rowsReturned: stats.NewCountersWithMultiLabels( - "VtgateApiRowsReturned", - "Rows returned through the VTgate API", - []string{"Operation", "Keyspace", "DbType"}), - rowsAffected: stats.NewCountersWithMultiLabels( - "VtgateApiRowsAffected", - "Rows affected by a write (DML) operation through the VTgate API", - []string{"Operation", "Keyspace", "DbType"}), - - logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), - logPrepare: logutil.NewThrottledLogger("Prepare", 5*time.Second), - logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), - } - - _ = stats.NewRates("QPSByOperation", stats.CounterForDimension(rpcVTGate.timings, "Operation"), 15, 1*time.Minute) - _ = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(rpcVTGate.timings, "Keyspace"), 15, 1*time.Minute) - _ = stats.NewRates("QPSByDbType", stats.CounterForDimension(rpcVTGate.timings, "DbType"), 15*60/5, 5*time.Second) + vtgateInst := newVTGate(executor, resolver, vsm, tc, gw) + _ = stats.NewRates("QPSByOperation", stats.CounterForDimension(vtgateInst.timings, "Operation"), 15, 1*time.Minute) + _ = stats.NewRates("QPSByKeyspace", stats.CounterForDimension(vtgateInst.timings, "Keyspace"), 15, 1*time.Minute) + _ = stats.NewRates("QPSByDbType", stats.CounterForDimension(vtgateInst.timings, "DbType"), 15*60/5, 5*time.Second) _ = stats.NewRates("ErrorsByOperation", stats.CounterForDimension(errorCounts, "Operation"), 15, 1*time.Minute) _ = stats.NewRates("ErrorsByKeyspace", stats.CounterForDimension(errorCounts, "Keyspace"), 15, 1*time.Minute) @@ -331,29 +344,28 @@ func Init( servenv.OnRun(func() { for _, f := range RegisterVTGates { - f(rpcVTGate) + f(vtgateInst) } if st != nil && enableSchemaChangeSignal { st.Start() } + srv := initMySQLProtocol(vtgateInst) + servenv.OnTermSync(srv.shutdownMysqlProtocolAndDrain) + servenv.OnClose(srv.rollbackAtShutdown) }) servenv.OnTerm(func() { if st != nil && enableSchemaChangeSignal { st.Stop() } }) - rpcVTGate.registerDebugHealthHandler() - rpcVTGate.registerDebugEnvHandler() - err := initQueryLogger(rpcVTGate) - if err != nil { - log.Fatalf("error initializing query logger: %v", err) - } + vtgateInst.registerDebugHealthHandler() + vtgateInst.registerDebugEnvHandler() initAPI(gw.hc) - return rpcVTGate + return vtgateInst } -func addKeyspaceToTracker(ctx context.Context, srvResolver *srvtopo.Resolver, st *vtschema.Tracker, gw *TabletGateway) { +func addKeyspacesToTracker(ctx context.Context, srvResolver *srvtopo.Resolver, st *vtschema.Tracker, gw *TabletGateway) { keyspaces, err := srvResolver.GetAllKeyspaces(ctx) if err != nil { log.Warningf("Unable to get all keyspaces: %v", err) @@ -392,13 +404,13 @@ func resolveAndLoadKeyspace(ctx context.Context, srvResolver *srvtopo.Resolver, } func (vtg *VTGate) registerDebugEnvHandler() { - http.HandleFunc("/debug/env", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/debug/env", func(w http.ResponseWriter, r *http.Request) { debugEnvHandler(vtg, w, r) }) } func (vtg *VTGate) registerDebugHealthHandler() { - http.HandleFunc("/debug/health", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/debug/health", func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.MONITORING); err != nil { acl.SendError(w, err) return @@ -423,8 +435,8 @@ func (vtg *VTGate) Gateway() *TabletGateway { return vtg.gw } -// Execute executes a non-streaming query. This is a V3 function. -func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { +// Execute executes a non-streaming query. +func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) statsKey := []string{"Execute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} @@ -434,7 +446,7 @@ func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql s err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr) } else { safeSession := NewSafeSession(session) - qr, err = vtg.executor.Execute(ctx, "Execute", safeSession, sql, bindVariables) + qr, err = vtg.executor.Execute(ctx, mysqlCtx, "Execute", safeSession, sql, bindVariables) safeSession.RemoveInternalSavepoint() } if err == nil { @@ -452,7 +464,7 @@ func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql s return session, nil, err } -// ExecuteBatch executes a batch of queries. This is a V3 function. +// ExecuteBatch executes a batch of queries. func (vtg *VTGate) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) @@ -471,7 +483,7 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, if len(bindVariablesList) != 0 { bv = bindVariablesList[i] } - session, qrl[i].QueryResult, qrl[i].QueryError = vtg.Execute(ctx, session, sql, bv) + session, qrl[i].QueryResult, qrl[i].QueryError = vtg.Execute(ctx, nil, session, sql, bv) if qr := qrl[i].QueryResult; qr != nil { vtg.rowsReturned.Add(statsKey, int64(len(qr.Rows))) vtg.rowsAffected.Add(statsKey, int64(qr.RowsAffected)) @@ -480,23 +492,23 @@ func (vtg *VTGate) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, return session, qrl, nil } -// StreamExecute executes a streaming query. This is a V3 function. -// Note we guarantee the callback will not be called concurrently -// by multiple go routines. -func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +// StreamExecute executes a streaming query. +// Note we guarantee the callback will not be called concurrently by multiple go routines. +func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) { // In this context, we don't care if we can't fully parse destination destKeyspace, destTabletType, _, _ := vtg.executor.ParseDestinationTarget(session.TargetString) statsKey := []string{"StreamExecute", destKeyspace, topoproto.TabletTypeLString(destTabletType)} defer vtg.timings.Record(statsKey, time.Now()) + safeSession := NewSafeSession(session) var err error if bvErr := sqltypes.ValidateBindVariables(bindVariables); bvErr != nil { err = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", bvErr) } else { - safeSession := NewSafeSession(session) err = vtg.executor.StreamExecute( ctx, + mysqlCtx, "StreamExecute", safeSession, sql, @@ -514,9 +526,9 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, "BindVariables": bindVariables, "Session": session, } - return recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) + return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) } - return nil + return safeSession.Session, nil } // CloseSession closes the session, rolling back any implicit transactions. This has the @@ -565,7 +577,7 @@ func (vtg *VTGate) VStream(ctx context.Context, tabletType topodatapb.TabletType // GetGatewayCacheStatus returns a displayable version of the Gateway cache. func (vtg *VTGate) GetGatewayCacheStatus() TabletCacheStatusList { - return vtg.resolver.GetGatewayCacheStatus() + return vtg.gw.CacheStatus() } // VSchemaStats returns the loaded vschema stats. @@ -647,3 +659,20 @@ func (vtg *VTGate) HandlePanic(err *error) { errorCounts.Add([]string{"Panic", "Unknown", "Unknown", vtrpcpb.Code_INTERNAL.String()}, 1) } } + +func newVTGate(executor *Executor, resolver *Resolver, vsm *vstreamManager, tc *TxConn, gw *TabletGateway) *VTGate { + return &VTGate{ + executor: executor, + resolver: resolver, + vsm: vsm, + txConn: tc, + gw: gw, + timings: timings, + rowsReturned: rowsReturned, + rowsAffected: rowsAffected, + + logExecute: logutil.NewThrottledLogger("Execute", 5*time.Second), + logPrepare: logutil.NewThrottledLogger("Prepare", 5*time.Second), + logStreamExecute: logutil.NewThrottledLogger("StreamExecute", 5*time.Second), + } +} diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 39a5ee9a77e..6f21158d7bb 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -22,12 +22,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/vterrors" @@ -41,56 +40,20 @@ import ( // This file uses the sandbox_test framework. -var hcVTGateTest *discovery.FakeHealthCheck - var executeOptions = &querypb.ExecuteOptions{ IncludedFields: querypb.ExecuteOptions_TYPE_ONLY, } -var primarySession *vtgatepb.Session - -func init() { - createSandbox(KsTestUnsharded).VSchema = ` -{ - "sharded": false, - "tables": { - "t1": {} - } -} -` - createSandbox(KsTestBadVSchema).VSchema = ` -{ - "sharded": true, - "tables": { - "t2": { - "auto_increment": { - "column": "id", - "sequence": "id_seq" - } - } - } -} -` - hcVTGateTest = discovery.NewFakeHealthCheck(nil) - transactionMode = "MULTI" - Init(context.Background(), hcVTGateTest, newSandboxForCells([]string{"aa"}), "aa", nil, querypb.ExecuteOptions_Gen4) - - mysqlServerPort = 0 - mysqlAuthServerImpl = "none" - initMySQLProtocol() -} - func TestVTGateExecute(t *testing.T) { - counts := rpcVTGate.timings.Timings.Counts() + vtg, sbc, ctx := createVtgateEnv(t) + counts := vtg.timings.Timings.Counts() - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Execute( - context.Background(), + _, qr, err := vtg.Execute( + ctx, + nil, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -107,11 +70,11 @@ func TestVTGateExecute(t *testing.T) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions) } - newCounts := rpcVTGate.timings.Timings.Counts() + newCounts := vtg.timings.Timings.Counts() require.Contains(t, newCounts, "All") require.Equal(t, counts["All"]+1, newCounts["All"]) - require.Contains(t, newCounts, "Execute..primary") - require.Equal(t, counts["Execute..primary"]+1, newCounts["Execute..primary"]) + require.Contains(t, newCounts, "Execute.TestUnsharded.primary") + require.Equal(t, counts["Execute.TestUnsharded.primary"]+1, newCounts["Execute.TestUnsharded.primary"]) for k, v := range newCounts { if strings.HasPrefix(k, "Prepare") { @@ -121,16 +84,16 @@ func TestVTGateExecute(t *testing.T) { } func TestVTGateExecuteError(t *testing.T) { - counts := errorCounts.Counts() + vtg, _, ctx := createVtgateEnv(t) + + counts := vtg.timings.Timings.Counts() - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Execute( - context.Background(), + _, qr, err := vtg.Execute( + ctx, + nil, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "bad select id from t1", @@ -140,8 +103,8 @@ func TestVTGateExecuteError(t *testing.T) { require.Nil(t, qr) newCounts := errorCounts.Counts() - require.Contains(t, newCounts, "Execute..primary.INVALID_ARGUMENT") - require.Equal(t, counts["Execute..primary.INVALID_ARGUMENT"]+1, newCounts["Execute..primary.INVALID_ARGUMENT"]) + require.Contains(t, newCounts, "Execute.TestUnsharded.primary.INVALID_ARGUMENT") + require.Equal(t, counts["Execute.TestUnsharded.primary.INVALID_ARGUMENT"]+1, newCounts["Execute.TestUnsharded.primary.INVALID_ARGUMENT"]) for k, v := range newCounts { if strings.HasPrefix(k, "Prepare") { @@ -151,16 +114,14 @@ func TestVTGateExecuteError(t *testing.T) { } func TestVTGatePrepare(t *testing.T) { - counts := rpcVTGate.timings.Timings.Counts() + vtg, sbc, ctx := createVtgateEnv(t) - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Prepare( - context.Background(), + counts := vtg.timings.Timings.Counts() + _, qr, err := vtg.Prepare( + ctx, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -176,11 +137,11 @@ func TestVTGatePrepare(t *testing.T) { t.Errorf("got ExecuteOptions \n%+v, want \n%+v", sbc.Options[0], executeOptions) } - newCounts := rpcVTGate.timings.Timings.Counts() + newCounts := vtg.timings.Timings.Counts() require.Contains(t, newCounts, "All") require.Equal(t, counts["All"]+1, newCounts["All"]) - require.Contains(t, newCounts, "Prepare..primary") - require.Equal(t, counts["Prepare..primary"]+1, newCounts["Prepare..primary"]) + require.Contains(t, newCounts, "Prepare.TestUnsharded.primary") + require.Equal(t, counts["Prepare.TestUnsharded.primary"]+1, newCounts["Prepare.TestUnsharded.primary"]) for k, v := range newCounts { if strings.HasPrefix(k, "Execute") { @@ -190,16 +151,15 @@ func TestVTGatePrepare(t *testing.T) { } func TestVTGatePrepareError(t *testing.T) { + vtg, _, ctx := createVtgateEnv(t) + counts := errorCounts.Counts() - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - _, qr, err := rpcVTGate.Prepare( - context.Background(), + _, qr, err := vtg.Prepare( + ctx, &vtgatepb.Session{ Autocommit: true, - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "bad select id from t1", @@ -209,8 +169,8 @@ func TestVTGatePrepareError(t *testing.T) { require.Nil(t, qr) newCounts := errorCounts.Counts() - require.Contains(t, newCounts, "Prepare..primary.INTERNAL") - require.Equal(t, counts["Prepare..primary.INTERNAL"]+1, newCounts["Prepare..primary.INTERNAL"]) + require.Contains(t, newCounts, "Prepare.TestUnsharded.primary.INTERNAL") + require.Equal(t, counts["Prepare.TestUnsharded.primary.INTERNAL"]+1, newCounts["Prepare.TestUnsharded.primary.INTERNAL"]) for k, v := range newCounts { if strings.HasPrefix(k, "Execute") { @@ -220,15 +180,14 @@ func TestVTGatePrepareError(t *testing.T) { } func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, _, ctx := createVtgateEnv(t) // Valid keyspace. - _, qr, err := rpcVTGate.Execute( - context.Background(), + _, qr, err := vtg.Execute( + ctx, + nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded, + TargetString: KsTestSharded + ":-20@primary", }, "select id from none", nil, @@ -241,8 +200,9 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { utils.MustMatch(t, &wantQr, qr) // Invalid keyspace. - _, _, err = rpcVTGate.Execute( - context.Background(), + _, _, err = vtg.Execute( + ctx, + nil, &vtgatepb.Session{ TargetString: "invalid_keyspace", }, @@ -253,10 +213,11 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { assert.EqualError(t, err, want) // Valid keyspace/shard. - _, qr, err = rpcVTGate.Execute( - context.Background(), + _, qr, err = vtg.Execute( + ctx, + nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded + ":0@primary", + TargetString: KsTestSharded + ":-20@primary", }, "select id from none", nil, @@ -267,29 +228,28 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { utils.MustMatch(t, &wantQr, qr) // Invalid keyspace/shard. - _, _, err = rpcVTGate.Execute( - context.Background(), + _, _, err = vtg.Execute( + ctx, + nil, &vtgatepb.Session{ - TargetString: KsTestUnsharded + ":noshard@primary", + TargetString: KsTestSharded + ":noshard@primary", }, "select id from none", nil, ) require.Error(t, err) - require.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestUnsharded" shard:"noshard" tablet_type:PRIMARY`) + require.Contains(t, err.Error(), `no healthy tablet available for 'keyspace:"TestExecutor" shard:"noshard" tablet_type:PRIMARY`) } func TestVTGateStreamExecute(t *testing.T) { - ks := KsTestUnsharded - shard := "0" - createSandbox(ks) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard, topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, sbc, ctx := createVtgateEnv(t) + var qrs []*sqltypes.Result - err := rpcVTGate.StreamExecute( - context.Background(), + _, err := vtg.StreamExecute( + ctx, + nil, &vtgatepb.Session{ - TargetString: "@primary", + TargetString: KsTestUnsharded + "@primary", Options: executeOptions, }, "select id from t1", @@ -312,10 +272,8 @@ func TestVTGateStreamExecute(t *testing.T) { } func TestVTGateBindVarError(t *testing.T) { - ks := KsTestUnsharded - createSandbox(ks) - hcVTGateTest.Reset() - ctx := context.Background() + vtg, _, ctx := createVtgateEnv(t) + session := &vtgatepb.Session{} bindVars := map[string]*querypb.BindVariable{ "v": { @@ -331,19 +289,20 @@ func TestVTGateBindVarError(t *testing.T) { }{{ name: "Execute", f: func() error { - _, _, err := rpcVTGate.Execute(ctx, session, "", bindVars) + _, _, err := vtg.Execute(ctx, nil, session, "", bindVars) return err }, }, { name: "ExecuteBatch", f: func() error { - _, _, err := rpcVTGate.ExecuteBatch(ctx, session, []string{""}, []map[string]*querypb.BindVariable{bindVars}) + _, _, err := vtg.ExecuteBatch(ctx, session, []string{""}, []map[string]*querypb.BindVariable{bindVars}) return err }, }, { name: "StreamExecute", f: func() error { - return rpcVTGate.StreamExecute(ctx, session, "", bindVars, func(_ *sqltypes.Result) error { return nil }) + _, err := vtg.StreamExecute(ctx, nil, session, "", bindVars, func(_ *sqltypes.Result) error { return nil }) + return err }, }} for _, tcase := range tcases { @@ -353,15 +312,20 @@ func TestVTGateBindVarError(t *testing.T) { } } -func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.Code) { +func testErrorPropagation(t *testing.T, ctx context.Context, vtg *VTGate, sbcs []*sandboxconn.SandboxConn, before func(sbc *sandboxconn.SandboxConn), after func(sbc *sandboxconn.SandboxConn), expected vtrpcpb.Code) { // Execute for _, sbc := range sbcs { before(sbc) } - _, _, err := rpcVTGate.Execute( - context.Background(), - primarySession, + session := &vtgatepb.Session{ + TargetString: KsTestUnsharded + "@primary", + } + + _, _, err := vtg.Execute( + ctx, + nil, + session, "select id from t1", nil, ) @@ -381,9 +345,10 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before for _, sbc := range sbcs { before(sbc) } - err = rpcVTGate.StreamExecute( - context.Background(), - primarySession, + _, err = vtg.StreamExecute( + ctx, + nil, + session, "select id from t1", nil, func(r *sqltypes.Result) error { @@ -408,87 +373,79 @@ func testErrorPropagation(t *testing.T, sbcs []*sandboxconn.SandboxConn, before // tablet and a rdonly tablet because we don't control the routing of // Commit. func TestErrorPropagation(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - // create a new session each time so that ShardSessions don't get re-used across tests - primarySession = &vtgatepb.Session{ - TargetString: "@primary", - } + vtg, sbc, ctx := createVtgateEnv(t) - sbcm := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbcrdonly := hcVTGateTest.AddTestTablet("aa", "1.1.1.2", 1001, KsTestUnsharded, "0", topodatapb.TabletType_RDONLY, true, 1, nil) sbcs := []*sandboxconn.SandboxConn{ - sbcm, - sbcrdonly, + sbc, } - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_CANCELED] = 0 }, vtrpcpb.Code_CANCELED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNKNOWN] = 0 }, vtrpcpb.Code_UNKNOWN) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 0 }, vtrpcpb.Code_INVALID_ARGUMENT) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 0 }, vtrpcpb.Code_DEADLINE_EXCEEDED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 0 }, vtrpcpb.Code_ALREADY_EXISTS) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_PERMISSION_DENIED] = 0 }, vtrpcpb.Code_PERMISSION_DENIED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 0 }, vtrpcpb.Code_RESOURCE_EXHAUSTED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 0 }, vtrpcpb.Code_FAILED_PRECONDITION) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 0 }, vtrpcpb.Code_ABORTED) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_INTERNAL] = 0 }, vtrpcpb.Code_INTERNAL) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAVAILABLE] = 0 }, vtrpcpb.Code_UNAVAILABLE) - testErrorPropagation(t, sbcs, func(sbc *sandboxconn.SandboxConn) { + testErrorPropagation(t, ctx, vtg, sbcs, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 20 }, func(sbc *sandboxconn.SandboxConn) { sbc.MustFailCodes[vtrpcpb.Code_UNAUTHENTICATED] = 0 @@ -498,28 +455,16 @@ func TestErrorPropagation(t *testing.T) { // This test makes sure that if we start a transaction and hit a critical // error, a rollback is issued. func TestErrorIssuesRollback(t *testing.T) { - createSandbox(KsTestUnsharded) - hcVTGateTest.Reset() - sbc := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + vtg, sbc, ctx := createVtgateEnv(t) // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: // vtrpcpb.Code_ABORTED case. - session, _, err := rpcVTGate.Execute( - context.Background(), - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err := vtg.Execute(ctx, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -527,12 +472,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_ABORTED] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + _, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -545,21 +485,11 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should trigger a rollback: // vtrpcpb.ErrorCode_RESOURCE_EXHAUSTED case. - session, _, err = rpcVTGate.Execute( - context.Background(), - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -567,12 +497,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_RESOURCE_EXHAUSTED] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + _, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -585,21 +510,11 @@ func TestErrorIssuesRollback(t *testing.T) { // Start a transaction, send one statement. // Simulate an error that should *not* trigger a rollback: // vtrpcpb.Code_ALREADY_EXISTS case. - session, _, err = rpcVTGate.Execute( - context.Background(), - &vtgatepb.Session{}, - "begin", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, &vtgatepb.Session{TargetString: KsTestUnsharded + "@primary"}, "begin", nil) if err != nil { t.Fatalf("cannot start a transaction: %v", err) } - session, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + session, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err != nil { t.Fatalf("want nil, got %v", err) } @@ -607,12 +522,7 @@ func TestErrorIssuesRollback(t *testing.T) { t.Errorf("want 0, got %d", sbc.RollbackCount.Load()) } sbc.MustFailCodes[vtrpcpb.Code_ALREADY_EXISTS] = 20 - _, _, err = rpcVTGate.Execute( - context.Background(), - session, - "select id from t1", - nil, - ) + _, _, err = vtg.Execute(ctx, nil, session, "select id from t1", nil) if err == nil { t.Fatalf("want error but got nil") } @@ -643,33 +553,68 @@ var shardedVSchema = ` } ` +var shardedVSchemaUnknownParams = ` +{ + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash", + "params": { + "hello": "world", + "goodbye": "world" + } + }, + "binary_index": { + "type": "binary", + "params": { + "foo": "bar" + } + } + }, + "tables": { + "sp_tbl": { + "column_vindexes": [ + { + "column": "user_id", + "name": "hash_index" + } + ] + } + } +} +` + func TestMultiInternalSavepointVtGate(t *testing.T) { - s := createSandbox(KsTestSharded) + vtg, _, ctx := createVtgateEnv(t) + + const customKeyspace = "CustomSharding" + s := createSandbox(customKeyspace) s.ShardSpec = "-40-80-" s.VSchema = shardedVSchema srvSchema := getSandboxSrvVSchema() - rpcVTGate.executor.vm.VSchemaUpdate(srvSchema, nil) - hcVTGateTest.Reset() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) - sbc1 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1, KsTestSharded, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc2 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 2, KsTestSharded, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc3 := hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 3, KsTestSharded, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) + hc := vtg.resolver.scatterConn.gateway.hc.(*discovery.FakeHealthCheck) - logChan := QueryLogger.Subscribe("Test") - defer QueryLogger.Unsubscribe(logChan) + sbc1 := hc.AddTestTablet("aa", "-40", 1, customKeyspace, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc2 := hc.AddTestTablet("aa", "40-80", 1, customKeyspace, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc3 := hc.AddTestTablet("aa", "80-", 1, customKeyspace, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) - session := &vtgatepb.Session{Autocommit: true} + logChan := vtg.executor.queryLogger.Subscribe("Test") + defer vtg.executor.queryLogger.Unsubscribe(logChan) + + session := &vtgatepb.Session{Autocommit: true, TargetString: customKeyspace + "@primary"} require.True(t, session.GetAutocommit()) require.False(t, session.InTransaction) var err error - session, _, err = rpcVTGate.Execute(context.Background(), session, "begin", nil) + session, _, err = vtg.Execute(ctx, nil, session, "begin", nil) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction) // this query goes to multiple shards so internal savepoint will be created. - session, _, err = rpcVTGate.Execute(context.Background(), session, "insert into sp_tbl(user_id) values (1), (3)", nil) + session, _, err = vtg.Execute(ctx, nil, session, "insert into sp_tbl(user_id) values (1), (3)", nil) require.NoError(t, err) require.True(t, session.GetAutocommit()) require.True(t, session.InTransaction) @@ -686,6 +631,7 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { "vtg2": sqltypes.Int64BindVariable(3), }, }} + assertQueriesWithSavepoint(t, sbc1, wantQ) wantQ[1].Sql = "insert into sp_tbl(user_id) values (:_user_id_1)" assertQueriesWithSavepoint(t, sbc2, wantQ) @@ -696,7 +642,7 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { sbc2.Queries = nil // multi shard so new savepoint will be created. - session, _, err = rpcVTGate.Execute(context.Background(), session, "insert into sp_tbl(user_id) values (2), (4)", nil) + session, _, err = vtg.Execute(ctx, nil, session, "insert into sp_tbl(user_id) values (2), (4)", nil) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "savepoint x", @@ -717,7 +663,7 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { sbc3.Queries = nil // single shard so no savepoint will be created and neither any old savepoint will be executed - _, _, err = rpcVTGate.Execute(context.Background(), session, "insert into sp_tbl(user_id) values (5)", nil) + _, _, err = vtg.Execute(ctx, nil, session, "insert into sp_tbl(user_id) values (5)", nil) require.NoError(t, err) wantQ = []*querypb.BoundQuery{{ Sql: "insert into sp_tbl(user_id) values (:_user_id_0)", @@ -728,10 +674,56 @@ func TestMultiInternalSavepointVtGate(t *testing.T) { }} assertQueriesWithSavepoint(t, sbc2, wantQ) - testQueryLog(t, logChan, "Execute", "BEGIN", "begin", 0) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1), (:vtg2)", 2) - testQueryLog(t, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint y", 2) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1), (:vtg2)", 2) - testQueryLog(t, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1)", 1) + testQueryLog(t, vtg.executor, logChan, "Execute", "BEGIN", "begin", 0) + testQueryLog(t, vtg.executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint x", 0) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) + testQueryLog(t, vtg.executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint y", 2) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */), (:vtg2 /* INT64 */)", 2) + testQueryLog(t, vtg.executor, logChan, "Execute", "INSERT", "insert into sp_tbl(user_id) values (:vtg1 /* INT64 */)", 1) +} + +func TestVSchemaVindexUnknownParams(t *testing.T) { + vtg, _, _ := createVtgateEnv(t) + + const customKeyspace = "CustomSharding" + s := createSandbox(customKeyspace) + s.ShardSpec = "-40-80-" + s.VSchema = shardedVSchema + srvSchema := getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + hc := vtg.resolver.scatterConn.gateway.hc.(*discovery.FakeHealthCheck) + _ = hc.AddTestTablet("aa", "-40", 1, customKeyspace, "-40", topodatapb.TabletType_PRIMARY, true, 1, nil) + _ = hc.AddTestTablet("aa", "40-80", 1, customKeyspace, "40-80", topodatapb.TabletType_PRIMARY, true, 1, nil) + _ = hc.AddTestTablet("aa", "80-", 1, customKeyspace, "80-", topodatapb.TabletType_PRIMARY, true, 1, nil) + + unknownParams := vindexUnknownParams.Get() + require.Equal(t, int64(0), unknownParams) + + s.VSchema = shardedVSchemaUnknownParams + srvSchema = getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + unknownParams = vindexUnknownParams.Get() + require.Equal(t, int64(3), unknownParams) + + s.VSchema = shardedVSchema + srvSchema = getSandboxSrvVSchema() + vtg.executor.vm.VSchemaUpdate(srvSchema, nil) + + unknownParams = vindexUnknownParams.Get() + require.Equal(t, int64(0), unknownParams) +} + +func createVtgateEnv(t testing.TB) (*VTGate, *sandboxconn.SandboxConn, context.Context) { + cell := "aa" + sb := createSandbox(KsTestSharded) + sb.ShardSpec = "-" + executor, _, _, sbc, ctx := createExecutorEnv(t) + executor.normalize = normalizeQueries + + vsm := newVStreamManager(executor.resolver.resolver, executor.serv, cell) + vtg := newVTGate(executor, executor.resolver, vsm, nil, executor.scatterConn.gateway) + + return vtg, sbc, ctx } diff --git a/go/vt/vtgate/vtgateconn/vtgateconn.go b/go/vt/vtgate/vtgateconn/vtgateconn.go index eca75c7d865..ae0da3fdf43 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn.go @@ -62,7 +62,7 @@ type VTGateConn struct { impl Impl } -// Session returns a VTGateSession that can be used to access V3 functions. +// Session returns a VTGateSession that can be used to access execution functions. func (conn *VTGateConn) Session(targetString string, options *querypb.ExecuteOptions) *VTGateSession { return &VTGateSession{ session: &vtgatepb.Session{ @@ -111,7 +111,7 @@ func (conn *VTGateConn) VStream(ctx context.Context, tabletType topodatapb.Table return conn.impl.VStream(ctx, tabletType, vgtid, filter, flags) } -// VTGateSession exposes the V3 API to the clients. +// VTGateSession exposes the Vitess Execution API to the clients. // The object maintains client-side state and is comparable to a native MySQL connection. // For example, if you enable autocommit on a Session object, all subsequent calls will respect this. // Functions within an object must not be called concurrently. @@ -141,10 +141,12 @@ func (sn *VTGateSession) ExecuteBatch(ctx context.Context, query []string, bindV // error. Then you can pull values from the ResultStream until io.EOF, // or another error. func (sn *VTGateSession) StreamExecute(ctx context.Context, query string, bindVars map[string]*querypb.BindVariable) (sqltypes.ResultStream, error) { - // StreamExecute is only used for SELECT queries that don't change - // the session. So, the protocol doesn't return an updated session. - // This may change in the future. - return sn.impl.StreamExecute(ctx, sn.session, query, bindVars) + // passing in the function that will update the session when received on the stream. + return sn.impl.StreamExecute(ctx, sn.session, query, bindVars, func(response *vtgatepb.StreamExecuteResponse) { + if response.Session != nil { + sn.session = response.Session + } + }) } // Prepare performs a VTGate Prepare. @@ -161,14 +163,14 @@ func (sn *VTGateSession) Prepare(ctx context.Context, query string, bindVars map // Impl defines the interface for a vtgate client protocol // implementation. It can be used concurrently across goroutines. type Impl interface { - // Execute executes a non-streaming query on vtgate. This is a V3 function. + // Execute executes a non-streaming query on vtgate. Execute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) - // ExecuteBatch executes a non-streaming queries on vtgate. This is a V3 function. + // ExecuteBatch executes a non-streaming queries on vtgate. ExecuteBatch(ctx context.Context, session *vtgatepb.Session, queryList []string, bindVarsList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) - // StreamExecute executes a streaming query on vtgate. This is a V3 function. - StreamExecute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable) (sqltypes.ResultStream, error) + // StreamExecute executes a streaming query on vtgate. + StreamExecute(ctx context.Context, session *vtgatepb.Session, query string, bindVars map[string]*querypb.BindVariable, processResponse func(*vtgatepb.StreamExecuteResponse)) (sqltypes.ResultStream, error) // Prepare returns the fields information for the query as part of supporting prepare statements. Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) diff --git a/go/vt/vtgate/vtgateconn/vtgateconn_test.go b/go/vt/vtgate/vtgateconn/vtgateconn_test.go index 8bada5b406c..523492328e9 100644 --- a/go/vt/vtgate/vtgateconn/vtgateconn_test.go +++ b/go/vt/vtgate/vtgateconn/vtgateconn_test.go @@ -17,9 +17,8 @@ limitations under the License. package vtgateconn import ( - "testing" - "context" + "testing" ) func TestRegisterDialer(t *testing.T) { diff --git a/go/vt/vtgate/vtgateservice/interface.go b/go/vt/vtgate/vtgateservice/interface.go index 3615ab3c431..bbfb2b2657e 100644 --- a/go/vt/vtgate/vtgateservice/interface.go +++ b/go/vt/vtgate/vtgateservice/interface.go @@ -22,7 +22,6 @@ import ( "context" "vitess.io/vitess/go/sqltypes" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -32,10 +31,9 @@ import ( // VTGateService is the interface implemented by the VTGate service, // that RPC server implementations will call. type VTGateService interface { - // V3 API - Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) + Execute(ctx context.Context, mysqlCtx MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) - StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error + StreamExecute(ctx context.Context, mysqlCtx MySQLConnection, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) // Prepare statement support Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) @@ -54,3 +52,12 @@ type VTGateService interface { // RPC implementation method, before calling any of the previous methods HandlePanic(err *error) } + +// MySQLConnection is an interface that allows to execute operations on the provided connection id. +// This is used by vtgate executor to execute kill queries. +type MySQLConnection interface { + // KillQuery stops the an executing query on the connection. + KillQuery(uint32) error + // KillConnection closes the connection and also stops any executing query on it. + KillConnection(context.Context, uint32) error +} diff --git a/go/vt/vtgr/config/vtgr_config.go b/go/vt/vtgr/config/vtgr_config.go deleted file mode 100644 index 3c86a0b0f3f..00000000000 --- a/go/vt/vtgr/config/vtgr_config.go +++ /dev/null @@ -1,606 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "encoding/json" - "fmt" - "net/url" - "os" - "regexp" - "strings" - - "gopkg.in/gcfg.v1" - - "vitess.io/vitess/go/vt/vttls" - - "vitess.io/vitess/go/vt/log" -) - -// VTGRConfig is the config for VTGR -type VTGRConfig struct { - DisableReadOnlyProtection bool - BootstrapGroupSize int - MinNumReplica int - BackoffErrorWaitTimeSeconds int - BootstrapWaitTimeSeconds int -} - -var vtgrCfg = newVTGRConfig() - -func newVTGRConfig() *VTGRConfig { - config := &VTGRConfig{ - DisableReadOnlyProtection: false, - BootstrapGroupSize: 5, - MinNumReplica: 3, - BackoffErrorWaitTimeSeconds: 10, - BootstrapWaitTimeSeconds: 10 * 60, - } - return config -} - -// ReadVTGRConfig reads config for VTGR -func ReadVTGRConfig(file string) (*VTGRConfig, error) { - vtgrFile, err := os.Open(file) - if err != nil { - return nil, err - } - decoder := json.NewDecoder(vtgrFile) - err = decoder.Decode(vtgrCfg) - if err != nil { - return nil, err - } - return vtgrCfg, nil -} - -/* - Everything below has been copied over from the VTOrc package -*/ - -var ( - envVariableRegexp = regexp.MustCompile("[$][{](.*)[}]") -) - -const ( - DefaultStatusAPIEndpoint = "/api/status" -) - -const ( - MySQLTopologyMaxPoolConnections = 3 -) - -// Configuration makes for orchestrator configuration input, which can be provided by user via JSON formatted file. -// Some of the parameteres have reasonable default values, and some (like database credentials) are -// strictly expected from user. -// TODO(sougou): change this to yaml parsing, and possible merge with tabletenv. -type Configuration struct { - Debug bool // set debug mode (similar to --debug option) - EnableSyslog bool // Should logs be directed (in addition) to syslog daemon? - ListenAddress string // Where orchestrator HTTP should listen for TCP - ListenSocket string // Where orchestrator HTTP should listen for unix socket (default: empty; when given, TCP is disabled) - HTTPAdvertise string // optional, for raft setups, what is the HTTP address this node will advertise to its peers (potentially use where behind NAT or when rerouting ports; example: "http://11.22.33.44:3030") - AgentsServerPort string // port orchestrator agents talk back to - MySQLTopologyUser string // The user VTOrc will use to connect to MySQL instances - MySQLTopologyPassword string // The password VTOrc will use to connect to MySQL instances - MySQLReplicaUser string // User to set on replica MySQL instances while configuring replication settings on them. If set, use this credential instead of discovering from mysql. TODO(sougou): deprecate this in favor of fetching from vttablet - MySQLReplicaPassword string // Password to set on replica MySQL instances while configuring replication settings on them. - MySQLTopologyCredentialsConfigFile string // my.cnf style configuration file from where to pick credentials. Expecting `user`, `password` under `[client]` section - MySQLTopologySSLPrivateKeyFile string // Private key file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLCertFile string // Certificate PEM file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLCAFile string // Certificate Authority PEM file used to authenticate with a Topology mysql instance with TLS - MySQLTopologySSLSkipVerify bool // If true, do not strictly validate mutual TLS certs for Topology mysql instances - MySQLTopologyUseMutualTLS bool // Turn on TLS authentication with the Topology MySQL instances - MySQLTopologyUseMixedTLS bool // Mixed TLS and non-TLS authentication with the Topology MySQL instances - MySQLTopologyTLSMinVersion string // Configures the minimal required TLS version for a topology MySQL instance with TLS. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - TLSCacheTTLFactor uint // Factor of InstancePollSeconds that we set as TLS info cache expiry - BackendDB string // EXPERIMENTAL: type of backend db; either "mysql" or "sqlite" - SQLite3DataFile string // when BackendDB == "sqlite", full path to sqlite3 datafile - SkipOrchestratorDatabaseUpdate bool // When true, do not check backend database schema nor attempt to update it. Useful when you may be running multiple versions of orchestrator, and you only wish certain boxes to dictate the db structure (or else any time a different orchestrator version runs it will rebuild database schema) - PanicIfDifferentDatabaseDeploy bool // When true, and this process finds the orchestrator backend DB was provisioned by a different version, panic - RaftEnabled bool // When true, setup orchestrator in a raft consensus layout. When false (default) all Raft* variables are ignored - RaftBind string - RaftAdvertise string - RaftDataDir string - DefaultRaftPort int // if a RaftNodes entry does not specify port, use this one - RaftNodes []string // Raft nodes to make initial connection with - ExpectFailureAnalysisConcensus bool - MySQLOrchestratorHost string - MySQLOrchestratorMaxPoolConnections int // The maximum size of the connection pool to the Orchestrator backend. - MySQLOrchestratorPort uint - MySQLOrchestratorDatabase string - MySQLOrchestratorUser string - MySQLOrchestratorPassword string - MySQLOrchestratorCredentialsConfigFile string // my.cnf style configuration file from where to pick credentials. Expecting `user`, `password` under `[client]` section - MySQLOrchestratorSSLPrivateKeyFile string // Private key file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLCertFile string // Certificate PEM file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLCAFile string // Certificate Authority PEM file used to authenticate with the Orchestrator mysql instance with TLS - MySQLOrchestratorSSLSkipVerify bool // If true, do not strictly validate mutual TLS certs for the Orchestrator mysql instances - MySQLOrchestratorUseMutualTLS bool // Turn on TLS authentication with the Orchestrator MySQL instance - MySQLOrchestratorTLSMinVersion string // Configures the minimal required TLS version for the Orchestrator MySQL instance with TLS. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3. - MySQLOrchestratorReadTimeoutSeconds int // Number of seconds before backend mysql read operation is aborted (driver-side) - MySQLOrchestratorRejectReadOnly bool // Reject read only connections https://github.com/go-sql-driver/mysql#rejectreadonly - MySQLConnectTimeoutSeconds int // Number of seconds before connection is aborted (driver-side) - MySQLDiscoveryReadTimeoutSeconds int // Number of seconds before topology mysql read operation is aborted (driver-side). Used for discovery queries. - MySQLTopologyReadTimeoutSeconds int // Number of seconds before topology mysql read operation is aborted (driver-side). Used for all but discovery queries. - MySQLConnectionLifetimeSeconds int // Number of seconds the mysql driver will keep database connection alive before recycling it - DefaultInstancePort int // In case port was not specified on command line - ReplicationLagQuery string // custom query to check on replica lg (e.g. heartbeat table). Must return a single row with a single numeric column, which is the lag. - ReplicationCredentialsQuery string // custom query to get replication credentials. Must return a single row, with two text columns: 1st is username, 2nd is password. This is optional, and can be used by orchestrator to configure replication after primary takeover or setup of co-primary. You need to ensure the orchestrator user has the privileges to run this query - DiscoverByShowSlaveHosts bool // Attempt SHOW SLAVE HOSTS before PROCESSLIST - UseSuperReadOnly bool // Should orchestrator super_read_only any time it sets read_only - InstancePollSeconds uint // Number of seconds between instance reads - InstanceWriteBufferSize int // Instance write buffer size (max number of instances to flush in one INSERT ODKU) - BufferInstanceWrites bool // Set to 'true' for write-optimization on backend table (compromise: writes can be stale and overwrite non stale data) - InstanceFlushIntervalMilliseconds int // Max interval between instance write buffer flushes - UnseenInstanceForgetHours uint // Number of hours after which an unseen instance is forgotten - SnapshotTopologiesIntervalHours uint // Interval in hour between snapshot-topologies invocation. Default: 0 (disabled) - DiscoveryMaxConcurrency uint // Number of goroutines doing hosts discovery - DiscoveryQueueCapacity uint // Buffer size of the discovery queue. Should be greater than the number of DB instances being discovered - DiscoveryQueueMaxStatisticsSize int // The maximum number of individual secondly statistics taken of the discovery queue - DiscoveryCollectionRetentionSeconds uint // Number of seconds to retain the discovery collection information - DiscoverySeeds []string // Hard coded array of hostname:port, ensuring orchestrator discovers these hosts upon startup, assuming not already known to orchestrator - InstanceBulkOperationsWaitTimeoutSeconds uint // Time to wait on a single instance when doing bulk (many instances) operation - HostnameResolveMethod string // Method by which to "normalize" hostname ("none"/"default"/"cname") - MySQLHostnameResolveMethod string // Method by which to "normalize" hostname via MySQL server. ("none"/"@@hostname"/"@@report_host"; default "@@hostname") - SkipBinlogServerUnresolveCheck bool // Skip the double-check that an unresolved hostname resolves back to same hostname for binlog servers - ExpiryHostnameResolvesMinutes int // Number of minutes after which to expire hostname-resolves - RejectHostnameResolvePattern string // Regexp pattern for resolved hostname that will not be accepted (not cached, not written to db). This is done to avoid storing wrong resolves due to network glitches. - ReasonableReplicationLagSeconds int // Above this value is considered a problem - ProblemIgnoreHostnameFilters []string // Will minimize problem visualization for hostnames matching given regexp filters - VerifyReplicationFilters bool // Include replication filters check before approving topology refactoring - ReasonableMaintenanceReplicationLagSeconds int // Above this value move-up and move-below are blocked - CandidateInstanceExpireMinutes uint // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on primary failover) is expired. - AuditLogFile string // Name of log file for audit operations. Disabled when empty. - AuditToSyslog bool // If true, audit messages are written to syslog - AuditToBackendDB bool // If true, audit messages are written to the backend DB's `audit` table (default: true) - AuditPurgeDays uint // Days after which audit entries are purged from the database - RemoveTextFromHostnameDisplay string // Text to strip off the hostname on cluster/clusters pages - ReadOnly bool - AuthenticationMethod string // Type of autherntication to use, if any. "" for none, "basic" for BasicAuth, "multi" for advanced BasicAuth, "proxy" for forwarded credentials via reverse proxy, "token" for token based access - OAuthClientID string - OAuthClientSecret string - OAuthScopes []string - HTTPAuthUser string // Username for HTTP Basic authentication (blank disables authentication) - HTTPAuthPassword string // Password for HTTP Basic authentication - AuthUserHeader string // HTTP header indicating auth user, when AuthenticationMethod is "proxy" - PowerAuthUsers []string // On AuthenticationMethod == "proxy", list of users that can make changes. All others are read-only. - PowerAuthGroups []string // list of unix groups the authenticated user must be a member of to make changes. - AccessTokenUseExpirySeconds uint // Time by which an issued token must be used - AccessTokenExpiryMinutes uint // Time after which HTTP access token expires - ClusterNameToAlias map[string]string // map between regex matching cluster name to a human friendly alias - DetectClusterAliasQuery string // Optional query (executed on topology instance) that returns the alias of a cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column - DetectClusterDomainQuery string // Optional query (executed on topology instance) that returns the VIP/CNAME/Alias/whatever domain name for the primary of this cluster. Query will only be executed on cluster primary (though until the topology's primary is resovled it may execute on other/all replicas). If provided, must return one row, one column - DetectInstanceAliasQuery string // Optional query (executed on topology instance) that returns the alias of an instance. If provided, must return one row, one column - DetectPromotionRuleQuery string // Optional query (executed on topology instance) that returns the promotion rule of an instance. If provided, must return one row, one column. - DataCenterPattern string // Regexp pattern with one group, extracting the datacenter name from the hostname - RegionPattern string // Regexp pattern with one group, extracting the region name from the hostname - PhysicalEnvironmentPattern string // Regexp pattern with one group, extracting physical environment info from hostname (e.g. combination of datacenter & prod/dev env) - DetectDataCenterQuery string // Optional query (executed on topology instance) that returns the data center of an instance. If provided, must return one row, one column. Overrides DataCenterPattern and useful for installments where DC cannot be inferred by hostname - DetectRegionQuery string // Optional query (executed on topology instance) that returns the region of an instance. If provided, must return one row, one column. Overrides RegionPattern and useful for installments where Region cannot be inferred by hostname - DetectPhysicalEnvironmentQuery string // Optional query (executed on topology instance) that returns the physical environment of an instance. If provided, must return one row, one column. Overrides PhysicalEnvironmentPattern and useful for installments where env cannot be inferred by hostname - DetectSemiSyncEnforcedQuery string // Optional query (executed on topology instance) to determine whether semi-sync is fully enforced for primary writes (async fallback is not allowed under any circumstance). If provided, must return one row, one column, value 0 or 1. - SupportFuzzyPoolHostnames bool // Should "submit-pool-instances" command be able to pass list of fuzzy instances (fuzzy means non-fqdn, but unique enough to recognize). Defaults 'true', implies more queries on backend db - InstancePoolExpiryMinutes uint // Time after which entries in database_instance_pool are expired (resubmit via `submit-pool-instances`) - PromotionIgnoreHostnameFilters []string // Orchestrator will not promote replicas with hostname matching pattern (via -c recovery; for example, avoid promoting dev-dedicated machines) - ServeAgentsHTTP bool // Spawn another HTTP interface dedicated for orchestrator-agent - AgentsUseSSL bool // When "true" orchestrator will listen on agents port with SSL as well as connect to agents via SSL - AgentsUseMutualTLS bool // When "true" Use mutual TLS for the server to agent communication - AgentSSLSkipVerify bool // When using SSL for the Agent, should we ignore SSL certification error - AgentSSLPrivateKeyFile string // Name of Agent SSL private key file, applies only when AgentsUseSSL = true - AgentSSLCertFile string // Name of Agent SSL certification file, applies only when AgentsUseSSL = true - AgentSSLCAFile string // Name of the Agent Certificate Authority file, applies only when AgentsUseSSL = true - AgentSSLValidOUs []string // Valid organizational units when using mutual TLS to communicate with the agents - UseSSL bool // Use SSL on the server web port - UseMutualTLS bool // When "true" Use mutual TLS for the server's web and API connections - SSLSkipVerify bool // When using SSL, should we ignore SSL certification error - SSLPrivateKeyFile string // Name of SSL private key file, applies only when UseSSL = true - SSLCertFile string // Name of SSL certification file, applies only when UseSSL = true - SSLCAFile string // Name of the Certificate Authority file, applies only when UseSSL = true - SSLValidOUs []string // Valid organizational units when using mutual TLS - StatusEndpoint string // Override the status endpoint. Defaults to '/api/status' - StatusOUVerify bool // If true, try to verify OUs when Mutual TLS is on. Defaults to false - AgentPollMinutes uint // Minutes between agent polling - UnseenAgentForgetHours uint // Number of hours after which an unseen agent is forgotten - StaleSeedFailMinutes uint // Number of minutes after which a stale (no progress) seed is considered failed. - SeedAcceptableBytesDiff int64 // Difference in bytes between seed source & target data size that is still considered as successful copy - SeedWaitSecondsBeforeSend int64 // Number of seconds for waiting before start send data command on agent - BinlogEventsChunkSize int // Chunk size (X) for SHOW BINLOG|RELAYLOG EVENTS LIMIT ?,X statements. Smaller means less locking and mroe work to be done - ReduceReplicationAnalysisCount bool // When true, replication analysis will only report instances where possibility of handled problems is possible in the first place (e.g. will not report most leaf nodes, that are mostly uninteresting). When false, provides an entry for every known instance - FailureDetectionPeriodBlockMinutes int // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. - RecoveryPeriodBlockMinutes int // (supported for backwards compatibility but please use newer `RecoveryPeriodBlockSeconds` instead) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping - RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping - RecoveryIgnoreHostnameFilters []string // Recovery analysis will completely ignore hosts matching given patterns - RecoverPrimaryClusterFilters []string // Only do primary recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) - RecoverIntermediatePrimaryClusterFilters []string // Only do IM recovery on clusters matching these regexp patterns (of course the ".*" pattern matches everything) - ProcessesShellCommand string // Shell that executes command scripts - OnFailureDetectionProcesses []string // Processes to execute when detecting a failover scenario (before making a decision whether to failover or not). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {autoPrimaryRecovery}, {autoIntermediatePrimaryRecovery} - PreFailoverProcesses []string // Processes to execute before doing a failover (aborting operation should any once of them exits with non-zero code; order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {countReplicas}, {replicaHosts}, {isDowntimed} - PostFailoverProcesses []string // Processes to execute after doing a failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} - PostUnsuccessfulFailoverProcesses []string // Processes to execute after a not-completely-successful failover (order of execution undefined). May and should use some of these placeholders: {failureType}, {instanceType}, {isPrimary}, {isCoPrimary}, {failureDescription}, {command}, {failedHost}, {failureCluster}, {failureClusterDomain}, {failedPort}, {successorHost}, {successorPort}, {successorAlias}, {countReplicas}, {replicaHosts}, {isDowntimed}, {isSuccessful}, {lostReplicas}, {countLostReplicas} - PostPrimaryFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostIntermediatePrimaryFailoverProcesses []string // Processes to execute after doing a primary failover (order of execution undefined). Uses same placeholders as PostFailoverProcesses - PostTakePrimaryProcesses []string // Processes to execute after a successful Take-Primary event has taken place - CoPrimaryRecoveryMustPromoteOtherCoPrimary bool // When 'false', anything can get promoted (and candidates are prefered over others). When 'true', orchestrator will promote the other co-primary or else fail - DetachLostReplicasAfterPrimaryFailover bool // Should replicas that are not to be lost in primary recovery (i.e. were more up-to-date than promoted replica) be forcibly detached - ApplyMySQLPromotionAfterPrimaryFailover bool // Should orchestrator take upon itself to apply MySQL primary promotion: set read_only=0, detach replication, etc. - PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, orchestrator will do all it can to only fail over within same DC, or else not fail over at all. - PreventCrossRegionPrimaryFailover bool // When true (default: false), cross-region primary failover are not allowed, orchestrator will do all it can to only fail over within same region, or else not fail over at all. - PrimaryFailoverLostInstancesDowntimeMinutes uint // Number of minutes to downtime any server that was lost after a primary failover (including failed primary & lost replicas). 0 to disable - PrimaryFailoverDetachReplicaPrimaryHost bool // Should orchestrator issue a detach-replica-primary-host on newly promoted primary (this makes sure the new primary will not attempt to replicate old primary if that comes back to life). Defaults 'false'. Meaningless if ApplyMySQLPromotionAfterPrimaryFailover is 'true'. - FailPrimaryPromotionOnLagMinutes uint // when > 0, fail a primary promotion if the candidate replica is lagging >= configured number of minutes. - FailPrimaryPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, promotion is aborted with error - DelayPrimaryPromotionIfSQLThreadNotUpToDate bool // when true, and a primary failover takes place, if candidate primary has not consumed all relay logs, delay promotion until the sql thread has caught up - PostponeReplicaRecoveryOnLagMinutes uint // On crash recovery, replicas that are lagging more than given minutes are only resurrected late in the recovery process, after primary/IM has been elected and processes executed. Value of 0 disables this feature - OSCIgnoreHostnameFilters []string // OSC replicas recommendation will ignore replica hostnames matching given patterns - URLPrefix string // URL prefix to run orchestrator on non-root web path, e.g. /orchestrator to put it behind nginx. - DiscoveryIgnoreReplicaHostnameFilters []string // Regexp filters to apply to prevent auto-discovering new replicas. Usage: unreachable servers due to firewalls, applications which trigger binlog dumps - DiscoveryIgnorePrimaryHostnameFilters []string // Regexp filters to apply to prevent auto-discovering a primary. Usage: pointing your primary temporarily to replicate seom data from external host - DiscoveryIgnoreHostnameFilters []string // Regexp filters to apply to prevent discovering instances of any kind - WebMessage string // If provided, will be shown on all web pages below the title bar - MaxConcurrentReplicaOperations int // Maximum number of concurrent operations on replicas - InstanceDBExecContextTimeoutSeconds int // Timeout on context used while calling ExecContext on instance database - LockShardTimeoutSeconds int // Timeout on context used to lock shard. Should be a small value because we should fail-fast - WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockShardTimeoutSeconds since that is the total time we use for an ERS. -} - -// ToJSONString will marshal this configuration as JSON -func (config *Configuration) ToJSONString() string { - b, _ := json.Marshal(config) - return string(b) -} - -// Config is *the* configuration instance, used globally to get configuration data -var Config = newConfiguration() -var readFileNames []string - -func newConfiguration() *Configuration { - return &Configuration{ - Debug: false, - EnableSyslog: false, - ListenAddress: ":3000", - ListenSocket: "", - HTTPAdvertise: "", - AgentsServerPort: ":3001", - StatusEndpoint: DefaultStatusAPIEndpoint, - StatusOUVerify: false, - BackendDB: "sqlite", - SQLite3DataFile: "file::memory:?mode=memory&cache=shared", - SkipOrchestratorDatabaseUpdate: false, - PanicIfDifferentDatabaseDeploy: false, - RaftBind: "127.0.0.1:10008", - RaftAdvertise: "", - RaftDataDir: "", - DefaultRaftPort: 10008, - RaftNodes: []string{}, - ExpectFailureAnalysisConcensus: true, - MySQLOrchestratorMaxPoolConnections: 128, // limit concurrent conns to backend DB - MySQLOrchestratorPort: 3306, - MySQLTopologyUseMutualTLS: false, - MySQLTopologyUseMixedTLS: true, - MySQLOrchestratorUseMutualTLS: false, - MySQLConnectTimeoutSeconds: 2, - MySQLOrchestratorReadTimeoutSeconds: 30, - MySQLOrchestratorRejectReadOnly: false, - MySQLDiscoveryReadTimeoutSeconds: 10, - MySQLTopologyReadTimeoutSeconds: 600, - MySQLConnectionLifetimeSeconds: 0, - DefaultInstancePort: 3306, - TLSCacheTTLFactor: 100, - InstancePollSeconds: 5, - InstanceWriteBufferSize: 100, - BufferInstanceWrites: false, - InstanceFlushIntervalMilliseconds: 100, - UnseenInstanceForgetHours: 240, - SnapshotTopologiesIntervalHours: 0, - DiscoverByShowSlaveHosts: false, - UseSuperReadOnly: false, - DiscoveryMaxConcurrency: 300, - DiscoveryQueueCapacity: 100000, - DiscoveryQueueMaxStatisticsSize: 120, - DiscoveryCollectionRetentionSeconds: 120, - DiscoverySeeds: []string{}, - InstanceBulkOperationsWaitTimeoutSeconds: 10, - HostnameResolveMethod: "default", - MySQLHostnameResolveMethod: "none", - SkipBinlogServerUnresolveCheck: true, - ExpiryHostnameResolvesMinutes: 60, - RejectHostnameResolvePattern: "", - ReasonableReplicationLagSeconds: 10, - ProblemIgnoreHostnameFilters: []string{}, - VerifyReplicationFilters: false, - ReasonableMaintenanceReplicationLagSeconds: 20, - CandidateInstanceExpireMinutes: 60, - AuditLogFile: "", - AuditToSyslog: false, - AuditToBackendDB: false, - AuditPurgeDays: 7, - RemoveTextFromHostnameDisplay: "", - ReadOnly: false, - AuthenticationMethod: "", - HTTPAuthUser: "", - HTTPAuthPassword: "", - AuthUserHeader: "X-Forwarded-User", - PowerAuthUsers: []string{"*"}, - PowerAuthGroups: []string{}, - AccessTokenUseExpirySeconds: 60, - AccessTokenExpiryMinutes: 1440, - ClusterNameToAlias: make(map[string]string), - DetectClusterAliasQuery: "", - DetectClusterDomainQuery: "", - DetectInstanceAliasQuery: "", - DetectPromotionRuleQuery: "", - DataCenterPattern: "", - PhysicalEnvironmentPattern: "", - DetectDataCenterQuery: "", - DetectPhysicalEnvironmentQuery: "", - DetectSemiSyncEnforcedQuery: "", - SupportFuzzyPoolHostnames: true, - InstancePoolExpiryMinutes: 60, - PromotionIgnoreHostnameFilters: []string{}, - ServeAgentsHTTP: false, - AgentsUseSSL: false, - AgentsUseMutualTLS: false, - AgentSSLValidOUs: []string{}, - AgentSSLSkipVerify: false, - AgentSSLPrivateKeyFile: "", - AgentSSLCertFile: "", - AgentSSLCAFile: "", - UseSSL: false, - UseMutualTLS: false, - SSLValidOUs: []string{}, - SSLSkipVerify: false, - SSLPrivateKeyFile: "", - SSLCertFile: "", - SSLCAFile: "", - AgentPollMinutes: 60, - UnseenAgentForgetHours: 6, - StaleSeedFailMinutes: 60, - SeedAcceptableBytesDiff: 8192, - SeedWaitSecondsBeforeSend: 2, - BinlogEventsChunkSize: 10000, - ReduceReplicationAnalysisCount: true, - FailureDetectionPeriodBlockMinutes: 60, - RecoveryPeriodBlockMinutes: 60, - RecoveryPeriodBlockSeconds: 3600, - RecoveryIgnoreHostnameFilters: []string{}, - RecoverPrimaryClusterFilters: []string{"*"}, - RecoverIntermediatePrimaryClusterFilters: []string{}, - ProcessesShellCommand: "bash", - OnFailureDetectionProcesses: []string{}, - PreFailoverProcesses: []string{}, - PostPrimaryFailoverProcesses: []string{}, - PostIntermediatePrimaryFailoverProcesses: []string{}, - PostFailoverProcesses: []string{}, - PostUnsuccessfulFailoverProcesses: []string{}, - PostTakePrimaryProcesses: []string{}, - CoPrimaryRecoveryMustPromoteOtherCoPrimary: true, - DetachLostReplicasAfterPrimaryFailover: true, - ApplyMySQLPromotionAfterPrimaryFailover: true, - PreventCrossDataCenterPrimaryFailover: false, - PreventCrossRegionPrimaryFailover: false, - PrimaryFailoverLostInstancesDowntimeMinutes: 0, - PrimaryFailoverDetachReplicaPrimaryHost: false, - FailPrimaryPromotionOnLagMinutes: 0, - FailPrimaryPromotionIfSQLThreadNotUpToDate: false, - DelayPrimaryPromotionIfSQLThreadNotUpToDate: true, - PostponeReplicaRecoveryOnLagMinutes: 0, - OSCIgnoreHostnameFilters: []string{}, - URLPrefix: "", - DiscoveryIgnoreReplicaHostnameFilters: []string{}, - WebMessage: "", - MaxConcurrentReplicaOperations: 5, - InstanceDBExecContextTimeoutSeconds: 30, - LockShardTimeoutSeconds: 30, - WaitReplicasTimeoutSeconds: 30, - } -} - -func (config *Configuration) MySQLOrchestratorTLSMinVersionNumber() uint16 { - // We can ignore the error here, we already checked for valid options if it's set. - // If it's not set, we get a safe default back here. - minVersion, _ := vttls.TLSVersionToNumber(config.MySQLOrchestratorTLSMinVersion) - return minVersion -} - -func (config *Configuration) MySQLTopologyTLSMinVersionNumber() uint16 { - // We can ignore the error here, we already checked for valid options if it's set. - // If it's not set, we get a safe default back here. - minVersion, _ := vttls.TLSVersionToNumber(config.MySQLTopologyTLSMinVersion) - return minVersion -} - -func (config *Configuration) postReadAdjustments() error { - if config.MySQLOrchestratorCredentialsConfigFile != "" { - mySQLConfig := struct { - Client struct { - User string - Password string - } - }{} - err := gcfg.ReadFileInto(&mySQLConfig, config.MySQLOrchestratorCredentialsConfigFile) - if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) - } else { - log.Infof("Parsed orchestrator credentials from %s", config.MySQLOrchestratorCredentialsConfigFile) - config.MySQLOrchestratorUser = mySQLConfig.Client.User - config.MySQLOrchestratorPassword = mySQLConfig.Client.Password - } - } - { - // We accept password in the form "${SOME_ENV_VARIABLE}" in which case we pull - // the given variable from os env - submatch := envVariableRegexp.FindStringSubmatch(config.MySQLOrchestratorPassword) - if len(submatch) > 1 { - config.MySQLOrchestratorPassword = os.Getenv(submatch[1]) - } - } - if config.MySQLTopologyCredentialsConfigFile != "" { - mySQLConfig := struct { - Client struct { - User string - Password string - } - }{} - err := gcfg.ReadFileInto(&mySQLConfig, config.MySQLTopologyCredentialsConfigFile) - if err != nil { - log.Fatalf("Failed to parse gcfg data from file: %+v", err) - } else { - log.Infof("Parsed topology credentials from %s", config.MySQLTopologyCredentialsConfigFile) - config.MySQLTopologyUser = mySQLConfig.Client.User - config.MySQLTopologyPassword = mySQLConfig.Client.Password - } - } - { - // We accept password in the form "${SOME_ENV_VARIABLE}" in which case we pull - // the given variable from os env - submatch := envVariableRegexp.FindStringSubmatch(config.MySQLTopologyPassword) - if len(submatch) > 1 { - config.MySQLTopologyPassword = os.Getenv(submatch[1]) - } - } - - if config.RecoveryPeriodBlockSeconds == 0 && config.RecoveryPeriodBlockMinutes > 0 { - // RecoveryPeriodBlockSeconds is a newer addition that overrides RecoveryPeriodBlockMinutes - // The code does not consider RecoveryPeriodBlockMinutes anymore, but RecoveryPeriodBlockMinutes - // still supported in config file for backwards compatibility - config.RecoveryPeriodBlockSeconds = config.RecoveryPeriodBlockMinutes * 60 - } - - if config.FailPrimaryPromotionIfSQLThreadNotUpToDate && config.DelayPrimaryPromotionIfSQLThreadNotUpToDate { - return fmt.Errorf("Cannot have both FailPrimaryPromotionIfSQLThreadNotUpToDate and DelayPrimaryPromotionIfSQLThreadNotUpToDate enabled") - } - if config.FailPrimaryPromotionOnLagMinutes > 0 && config.ReplicationLagQuery == "" { - return fmt.Errorf("nonzero FailPrimaryPromotionOnLagMinutes requires ReplicationLagQuery to be set") - } - - if config.URLPrefix != "" { - // Ensure the prefix starts with "/" and has no trailing one. - config.URLPrefix = strings.TrimLeft(config.URLPrefix, "/") - config.URLPrefix = strings.TrimRight(config.URLPrefix, "/") - config.URLPrefix = "/" + config.URLPrefix - } - - if config.IsSQLite() && config.SQLite3DataFile == "" { - return fmt.Errorf("SQLite3DataFile must be set when BackendDB is sqlite") - } - if config.RaftEnabled && config.RaftDataDir == "" { - return fmt.Errorf("RaftDataDir must be defined since raft is enabled (RaftEnabled)") - } - if config.RaftEnabled && config.RaftBind == "" { - return fmt.Errorf("RaftBind must be defined since raft is enabled (RaftEnabled)") - } - if config.RaftAdvertise == "" { - config.RaftAdvertise = config.RaftBind - } - if config.HTTPAdvertise != "" { - u, err := url.Parse(config.HTTPAdvertise) - if err != nil { - return fmt.Errorf("Failed parsing HTTPAdvertise %s: %s", config.HTTPAdvertise, err.Error()) - } - if u.Scheme == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include scheme (http:// or https://)") - } - if u.Hostname() == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include host name") - } - if u.Port() == "" { - return fmt.Errorf("If specified, HTTPAdvertise must include port number") - } - if u.Path != "" { - return fmt.Errorf("If specified, HTTPAdvertise must not specify a path") - } - if config.InstanceWriteBufferSize <= 0 { - config.BufferInstanceWrites = false - } - } - - if config.MySQLOrchestratorTLSMinVersion != "" { - _, err := vttls.TLSVersionToNumber(config.MySQLOrchestratorTLSMinVersion) - if err != nil { - return fmt.Errorf("If specified, MySQLOrchestratorTLSMinVersion must be one of TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3") - } - } - - if config.MySQLTopologyTLSMinVersion != "" { - _, err := vttls.TLSVersionToNumber(config.MySQLTopologyTLSMinVersion) - if err != nil { - return fmt.Errorf("If specified, MySQLTopologyTLSMinVersion must be one of TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3") - } - } - - return nil -} - -func (config *Configuration) IsSQLite() bool { - return strings.Contains(config.BackendDB, "sqlite") -} - -func (config *Configuration) IsMySQL() bool { - return config.BackendDB == "mysql" || config.BackendDB == "" -} - -// read reads configuration from given file, or silently skips if the file does not exist. -// If the file does exist, then it is expected to be in valid JSON format or the function bails out. -func read(fileName string) (*Configuration, error) { - if fileName == "" { - return Config, fmt.Errorf("Empty file name") - } - file, err := os.Open(fileName) - if err != nil { - return Config, err - } - decoder := json.NewDecoder(file) - err = decoder.Decode(Config) - if err == nil { - log.Infof("Read config: %s", fileName) - } else { - log.Fatal("Cannot read config file:", fileName, err) - } - if err := Config.postReadAdjustments(); err != nil { - log.Fatal(err) - } - return Config, err -} - -// ForceRead reads configuration from given file name or bails out if it fails -func ForceRead(fileName string) *Configuration { - _, err := read(fileName) - if err != nil { - log.Fatal("Cannot read config file:", fileName, err) - } - readFileNames = []string{fileName} - return Config -} - -// CLIFlags stores some command line flags that are globally available in the process' lifetime -type CLIFlags struct { - Noop *bool - SkipUnresolve *bool - SkipUnresolveCheck *bool - BinlogFile *string - GrabElection *bool - Version *bool - Statement *string - PromotionRule *string - ConfiguredVersion string - SkipContinuousRegistration *bool - EnableDatabaseUpdate *bool - IgnoreRaftSetup *bool - Tag *string -} - -var RuntimeCLIFlags CLIFlags diff --git a/go/vt/vtgr/config/vtgr_config.json b/go/vt/vtgr/config/vtgr_config.json deleted file mode 100644 index 1c1ecae562a..00000000000 --- a/go/vt/vtgr/config/vtgr_config.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "BackoffErrorWaitTimeSeconds": 5, - "BootstrapGroupSize": 3 -} \ No newline at end of file diff --git a/go/vt/vtgr/config/vtgr_config_test.go b/go/vt/vtgr/config/vtgr_config_test.go deleted file mode 100644 index ec4312096a9..00000000000 --- a/go/vt/vtgr/config/vtgr_config_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestReadConfig(t *testing.T) { - path, _ := os.Getwd() - config, err := ReadVTGRConfig(filepath.Join(path, "vtgr_config.json")) - assert.NoError(t, err) - // Make sure VTGR config honors the default setting - assert.Equal(t, false, config.DisableReadOnlyProtection) - assert.Equal(t, 600, config.BootstrapWaitTimeSeconds) - // Make sure the config is load correctly - assert.Equal(t, 3, config.BootstrapGroupSize) - assert.Equal(t, 5, config.BackoffErrorWaitTimeSeconds) -} diff --git a/go/vt/vtgr/controller/diagnose.go b/go/vt/vtgr/controller/diagnose.go deleted file mode 100644 index b0896f4555a..00000000000 --- a/go/vt/vtgr/controller/diagnose.go +++ /dev/null @@ -1,586 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "math/rand" - "os" - "sort" - "strings" - "sync" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgr/db" -) - -var pingTabletTimeout = 2 * time.Second - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.DurationVar(&pingTabletTimeout, "ping_tablet_timeout", 2*time.Second, "time to wait when we ping a tablet") - }) -} - -// DiagnoseType is the types of Diagnose result -type DiagnoseType string - -type instanceGTIDSet struct { - gtids mysql.GTIDSet - instance *grInstance -} - -// groupGTIDRecorder is used to help us query all the instance in parallel and record the result -// it helps us to take care of the consistency / synchronization among go routines -type groupGTIDRecorder struct { - name string - gtidWithInstances []*instanceGTIDSet - hasActive bool - sync.Mutex -} - -const ( - // DiagnoseTypeError represents an DiagnoseTypeError status - DiagnoseTypeError DiagnoseType = "error" - // DiagnoseTypeHealthy represents everything is DiagnoseTypeHealthy - DiagnoseTypeHealthy = "Healthy" - // DiagnoseTypeShardHasNoGroup represents the cluster has not init yet - DiagnoseTypeShardHasNoGroup = "ShardHasNoGroup" - // DiagnoseTypeShardHasInactiveGroup represents the status where we have a group name but no member in it - DiagnoseTypeShardHasInactiveGroup = "ShardHasInactiveGroup" - // DiagnoseTypeInsufficientGroupSize represents the cluster has insufficient group members - DiagnoseTypeInsufficientGroupSize = "InsufficientGroupSize" - // DiagnoseTypeReadOnlyShard represents the cluster who has a read only node - DiagnoseTypeReadOnlyShard = "ReadOnlyShard" - // DiagnoseTypeUnreachablePrimary represents the primary tablet is unreachable - DiagnoseTypeUnreachablePrimary = "UnreachablePrimary" - // DiagnoseTypeWrongPrimaryTablet represents the primary tablet is incorrect based on mysql group - DiagnoseTypeWrongPrimaryTablet = "WrongPrimaryTablet" - // DiagnoseTypeUnconnectedReplica represents cluster with primary tablet, but a node is not connected to it - DiagnoseTypeUnconnectedReplica = "UnconnectedReplica" - // DiagnoseTypeBackoffError represents a transient error e.g., the primary is unreachable - DiagnoseTypeBackoffError = "BackoffError" - // DiagnoseTypeBootstrapBackoff represents an ongoing bootstrap - DiagnoseTypeBootstrapBackoff = "BootstrapBackoff" - - // diagnoseTypeUnknown represents a unclear intermediate diagnose state - diagnoseTypeUnknown = "Unknown" -) - -// ScanAndRepairShard scans a particular shard by first Diagnose the shard with info from grShard -// and then repair the probelm if the shard is unhealthy -func (shard *GRShard) ScanAndRepairShard(ctx context.Context) { - status, err := shard.Diagnose(ctx) - if err != nil { - shard.logger.Errorf("fail to scanAndRepairShard %v/%v because of Diagnose error: %v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, err) - return - } - // We are able to get Diagnose without error - // - // Note: all the recovery function should first try to grab a shard level lock - // and check the trigger conditions before doing anything. This is to avoid - // other VTGR instance try to do the same thing - shard.logger.Infof("%v status is %v", formatKeyspaceShard(shard.KeyspaceShard), status) - if _, err := shard.Repair(ctx, status); err != nil { - shard.logger.Errorf("failed to repair %v: %v", status, err) - } -} - -// Diagnose the shard in the following order: -// TODO: use FSM to make sure the status transition is correct -// 1. if the shard has a group that every node agreed on -// 2. if the group has any active (online / recovering) member -// 3. if the shard has initialized a Vitess primary -// 4. if primary tablet is reachable -// 5. if Vitess primary and mysql primary reconciled -// 6. if we have enough group members -// 7. if the primary node has read_only=OFF -// 8. if there is a node that is not in Mysql group -func (shard *GRShard) Diagnose(ctx context.Context) (DiagnoseType, error) { - shard.Lock() - defer shard.Unlock() - diagnoseResult, err := shard.diagnoseLocked(ctx) - shard.shardStatusCollector.recordDiagnoseResult(diagnoseResult) - shard.populateVTGRStatusLocked() - if diagnoseResult != DiagnoseTypeHealthy { - shard.logger.Warningf(`VTGR diagnose shard as unhealthy for %s/%s: result=%v, last_result=%v, instances=%v, primary=%v, primary_tablet=%v, problematics=%v, unreachables=%v,\n%v`, - shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, - shard.shardStatusCollector.status.DiagnoseResult, - shard.lastDiagnoseResult, - shard.shardStatusCollector.status.Instances, - shard.shardStatusCollector.status.Primary, - shard.primaryTabletAlias(), - shard.shardStatusCollector.status.Problematics, - shard.shardStatusCollector.status.Unreachables, - shard.sqlGroup.ToString()) - } - if diagnoseResult != shard.lastDiagnoseResult { - shard.lastDiagnoseResult = diagnoseResult - shard.lastDiagnoseSince = time.Now() - } - return diagnoseResult, err -} - -func (shard *GRShard) diagnoseLocked(ctx context.Context) (DiagnoseType, error) { - // fast path only diagnose problem Vitess primary - // which does not needed if the shard is inactive - if shard.localDbPort != 0 && shard.isActive.Load() { - localView := shard.getLocalView() - if localView != nil { - fastDiagnose := shard.fastPathDiagnose(ctx, localView) - if fastDiagnose != diagnoseTypeUnknown { - // If we can use local sql group info to diagnose - // we should record the view as well. This view is all we need - // later VTGR needs to find group name, primary etc from - // SQLGroup for repairing instead of getting nil - shard.sqlGroup.overrideView([]*db.GroupView{localView}) - shard.logger.Infof("Diagnose %v from fast path", fastDiagnose) - return fastDiagnose, nil - } - } - } - // fast path is disabled or cannot diagnose the shard - // fall back to the normal strategy where we fetch info from all the nodes - err := shard.refreshSQLGroup() - if err != nil { - if errors.Is(err, db.ErrGroupBackoffError) { - return DiagnoseTypeBackoffError, nil - } - if errors.Is(err, db.ErrGroupOngoingBootstrap) { - return DiagnoseTypeBootstrapBackoff, nil - } - return DiagnoseTypeError, vterrors.Wrap(err, "fail to refreshSQLGroup") - } - // First, we check if there is any group in the shard - // if no, we should bootstrap one - mysqlGroup := shard.shardAgreedGroupName() - if mysqlGroup == "" { - if len(shard.sqlGroup.views) != shard.sqlGroup.expectedBootstrapSize { - return DiagnoseTypeError, fmt.Errorf("fail to diagnose ShardHasNoGroup with %v nodes", len(shard.sqlGroup.views)) - } - return DiagnoseTypeShardHasNoGroup, nil - } - // We handle the case where the shard has an agreed group name but all nodes are offline - // In this situation, instead of bootstrap a group, we should re-build the - // old group for the shard - if shard.isAllOfflineOrError() { - shard.logger.Info("Found all members are OFFLINE or ERROR") - // On rebootstrap, we always want to make sure _all_ the nodes in topo are reachable - // unless we override the rebootstrap size - desiredRebootstrapSize := len(shard.instances) - if shard.sqlGroup.rebootstrapSize != 0 { - desiredRebootstrapSize = shard.sqlGroup.rebootstrapSize - } - if len(shard.sqlGroup.views) != desiredRebootstrapSize { - return DiagnoseTypeError, fmt.Errorf("fail to diagnose ShardHasInactiveGroup with %v nodes expecting %v", len(shard.sqlGroup.views), desiredRebootstrapSize) - } - return DiagnoseTypeShardHasInactiveGroup, nil - } - - // We only check Vitess primary iff shard is active. - // Otherwise VTGR will only make sure there is a mysql group in the shard. - if shard.isActive.Load() { - // Secondly, we check if there is a primary tablet. - // If there is a group but we cannot find a primary tablet - // we should set it based on mysql group - hasWrongPrimary, err := shard.hasWrongPrimaryTablet(ctx) - if err != nil { - // errMissingGroup means we cannot find a mysql group for the shard - // we are in DiagnoseTypeShardHasNoGroup state - if err == errMissingGroup { - shard.logger.Warning("Missing mysql group") - return DiagnoseTypeShardHasNoGroup, nil - } - // errMissingPrimaryTablet means we cannot find a tablet based on mysql primary - // which means the tablet disconnected from topo server and we cannot find it - if err == errMissingPrimaryTablet { - return DiagnoseTypeUnreachablePrimary, nil - } - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose shardNeedsInitialized") - } - if hasWrongPrimary { - return DiagnoseTypeWrongPrimaryTablet, nil - } - - // Thirdly, we check if primary tablet is reachable - isPrimaryReachable, err := shard.isPrimaryReachable(ctx) - if err != nil { - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose isPrimaryReachable") - } - if !isPrimaryReachable { - return DiagnoseTypeUnreachablePrimary, nil - } - } - - // At this point, the primary tablet should be consistent with mysql primary - // so the view from priamry tablet should be accurate - onlineMembers, isReadOnly := shard.getOnlineGroupInfo() - // If we found a writable shard in the inactive shard - // we should consider the shard as InsufficientGroupSize to set read only - if !isReadOnly && !shard.isActive.Load() { - return DiagnoseTypeInsufficientGroupSize, nil - } - // Then we check if we satisfy the minimum replica requirement - if shard.minNumReplicas > 0 { - if onlineMembers >= shard.minNumReplicas && isReadOnly && shard.isActive.Load() { - return DiagnoseTypeReadOnlyShard, nil - } - // If we disable readonly protection and still found we have a read only shard, - // we should return DiagnoseTypeReadOnlyShard so that VTGR can turn off read only - if shard.disableReadOnlyProtection && isReadOnly && shard.isActive.Load() { - return DiagnoseTypeReadOnlyShard, nil - } - // We don't check isActive here since if it is inactive, VTGR should already return InsufficientGroupSize - if !shard.disableReadOnlyProtection && onlineMembers < shard.minNumReplicas && !isReadOnly { - return DiagnoseTypeInsufficientGroupSize, nil - } - } - - // Lastly, we check if there is a replica that is not connected to primary node - disconnectedInstance, err := shard.disconnectedInstance() - if err != nil { - return DiagnoseTypeError, vterrors.Wrap(err, "fail to diagnose disconnectedInstance") - } - if disconnectedInstance != nil { - return DiagnoseTypeUnconnectedReplica, nil - } - - // If we get here, shard is DiagnoseTypeHealthy - return DiagnoseTypeHealthy, nil -} - -func (shard *GRShard) getLocalView() *db.GroupView { - localHostname, _ := os.Hostname() - localInst := shard.findTabletByHostAndPort(localHostname, shard.localDbPort) - if localInst == nil { - return nil - } - // TODO: consider using -db_socket to read local info - view, err := shard.dbAgent.FetchGroupView(localInst.alias, localInst.instanceKey) - // We still have the fallback logic if this failed, therefore we don't raise error - // but try to get local view with best effort - if err != nil { - shard.logger.Errorf("failed to fetch local group view: %v", err) - } - return view -} - -func (shard *GRShard) fastPathDiagnose(ctx context.Context, view *db.GroupView) DiagnoseType { - pHost, pPort, isOnline := view.GetPrimaryView() - primaryTablet := shard.findShardPrimaryTablet() - if !isOnline || pHost == "" || pPort == 0 || primaryTablet == nil { - return diagnoseTypeUnknown - } - // VTGR will only bootstrap a group when it observes same number of views as group_size - // it means if we can find an ONLINE primary, we should be able to trust the view reported locally - // together with the primary tablet from topo server, we can determine: - // - if we need to failover vitess - // - if we need to failover mysql - if primaryTablet.instanceKey.Hostname != pHost || primaryTablet.instanceKey.Port != pPort { - // we find a mismatch but if the reported mysql primary is not in - // topology we should consider it as unreachable. - if shard.findTabletByHostAndPort(pHost, pPort) == nil { - return DiagnoseTypeUnreachablePrimary - } - return DiagnoseTypeWrongPrimaryTablet - } - if !shard.instanceReachable(ctx, primaryTablet) { - return DiagnoseTypeUnreachablePrimary - } - return diagnoseTypeUnknown -} - -func (shard *GRShard) shardAgreedGroupName() string { - if len(shard.instances) == 0 { - return "" - } - return shard.sqlGroup.GetGroupName() -} - -func (shard *GRShard) isAllOfflineOrError() bool { - return shard.sqlGroup.IsAllOfflineOrError() -} - -func (shard *GRShard) getOnlineGroupInfo() (int, bool) { - return shard.sqlGroup.GetOnlineGroupInfo() -} - -func (shard *GRShard) hasWrongPrimaryTablet(ctx context.Context) (bool, error) { - // Find out the hostname and port of the primary in mysql group - // we try to use local instance and then fallback to a random instance to check mysqld - // in case the primary is unreachable - host, port, _ := shard.sqlGroup.GetPrimary() - if !isHostPortValid(host, port) { - shard.logger.Warningf("Invalid address for primary %v:%v", host, port) - return false, errMissingGroup - } - // Make sure we have a tablet available - // findTabletByHostAndPort returns nil when we cannot find a tablet - // that is running on host:port, which means the tablet get stuck - // or when the tablet is not reachable - // we retrun errMissingPrimaryTablet so that VTGR will trigger a failover - tablet := shard.findTabletByHostAndPort(host, port) - if tablet == nil || !shard.instanceReachable(ctx, tablet) { - shard.logger.Errorf("Failed to find tablet that is running with mysql on %v:%v", host, port) - return false, errMissingPrimaryTablet - } - // Now we know we have a valid mysql primary in the group - // we should make sure tablets are aligned with it - primary := shard.findShardPrimaryTablet() - // If we failed to find primary for shard, it mostly means we are initializing the shard - // return true directly so that VTGR will set primary tablet according to MySQL group - if primary == nil { - shard.logger.Infof("unable to find primary tablet for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return true, nil - } - return (host != primary.instanceKey.Hostname) || (port != primary.instanceKey.Port), nil -} - -func (shard *GRShard) isPrimaryReachable(ctx context.Context) (bool, error) { - primaryTablet := shard.findShardPrimaryTablet() - if primaryTablet == nil { - return false, fmt.Errorf("unable to find primary for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - return shard.instanceReachable(ctx, primaryTablet), nil -} - -func (shard *GRShard) instanceReachable(ctx context.Context, instance *grInstance) bool { - pingCtx, cancel := context.WithTimeout(context.Background(), pingTabletTimeout) - defer cancel() - c := make(chan error, 1) - // tmc.Ping create grpc client connection first without timeout via dial - // then call the grpc endpoint using the context with timeout - // this is problematic if the host is really unreachable, we have to wait the - // all the retries inside grpc.dial with exponential backoff - go func() { c <- shard.tmc.Ping(pingCtx, instance.tablet) }() - select { - case <-pingCtx.Done(): - shard.logger.Errorf("Ping abort timeout %v", pingTabletTimeout) - return false - case err := <-c: - if err != nil { - shard.logger.Errorf("Ping error host=%v: %v", instance.instanceKey.Hostname, err) - } - return err == nil - } -} - -// findShardPrimaryTablet returns the primary for the shard -// it is either based on shard info from global topo or based on tablet types -// from local topo -func (shard *GRShard) findShardPrimaryTablet() *grInstance { - var primaryInstance *grInstance - for _, instance := range shard.instances { - if shard.primaryAlias == instance.alias { - return instance - } - } - return primaryInstance -} - -func (shard *GRShard) primaryTabletAlias() string { - primary := shard.findShardPrimaryTablet() - if primary == nil { - return "UNKNOWN" - } - return primary.alias -} - -// disconnectedInstance iterates all known the replica records -// and checks mysql to see if the group replication is setup on it -func (shard *GRShard) disconnectedInstance() (*grInstance, error) { - primaryInstance := shard.findShardPrimaryTablet() - // if there is no primary, we should recover from DiagnoseTypeWrongPrimaryTablet - if primaryInstance == nil { - return nil, fmt.Errorf("%v does not have primary", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Up to this check, we know: - // - shard has an agreed group - // - shard has a primary tablet - // - shard primary tablet is running on the same node as mysql - rand.Shuffle(len(shard.instances), func(i, j int) { - shard.instances[i], shard.instances[j] = shard.instances[j], shard.instances[i] - }) - for _, instance := range shard.instances { - // Skip instance without hostname because they are not up and running - // also skip instances that raised unrecoverable errors - if shard.shardStatusCollector.isUnreachable(instance) { - shard.logger.Infof("Skip %v to check disconnectedInstance because it is unhealthy", instance.alias) - continue - } - isUnconnected := shard.sqlGroup.IsUnconnectedReplica(instance.instanceKey) - if isUnconnected { - return instance, nil - } - } - return nil, nil -} - -func (recorder *groupGTIDRecorder) recordGroupStatus(name string, isActive bool) error { - recorder.Lock() - defer recorder.Unlock() - if recorder.name != "" && recorder.name != name { - return fmt.Errorf("group has more than one group name") - } - recorder.name = name - // hasActive records true if any node finds an active member - if isActive { - recorder.hasActive = true - } - return nil -} - -func (recorder *groupGTIDRecorder) recordGroupGTIDs(gtids mysql.GTIDSet, instance *grInstance) { - recorder.Lock() - defer recorder.Unlock() - recorder.gtidWithInstances = append(recorder.gtidWithInstances, &instanceGTIDSet{gtids: gtids, instance: instance}) -} - -func (recorder *groupGTIDRecorder) sort() { - sort.SliceStable(recorder.gtidWithInstances, func(i, j int) bool { - return recorder.gtidWithInstances[i].instance.alias < recorder.gtidWithInstances[j].instance.alias - }) -} - -func (collector *shardStatusCollector) recordDiagnoseResult(result DiagnoseType) { - collector.Lock() - defer collector.Unlock() - collector.status.DiagnoseResult = result -} - -func (collector *shardStatusCollector) recordUnreachables(instance *grInstance) { - collector.Lock() - defer collector.Unlock() - // dedup - // the list size is at most same as number instances in a shard so iterate to dedup is not terrible - for _, alias := range collector.status.Unreachables { - if alias == instance.alias { - return - } - } - collector.status.Unreachables = append(collector.status.Unreachables, instance.alias) -} - -func (collector *shardStatusCollector) clear() { - collector.Lock() - defer collector.Unlock() - collector.status.Unreachables = nil - collector.status.Problematics = nil -} - -func (collector *shardStatusCollector) recordProblematics(instance *grInstance) { - collector.Lock() - defer collector.Unlock() - // dedup - // the list size is at most same as number instances in a shard so iterate to dedup is not terrible - for _, alias := range collector.status.Problematics { - if alias == instance.alias { - return - } - } - collector.status.Problematics = append(collector.status.Problematics, instance.alias) -} - -func formatKeyspaceShard(keyspaceShard *topo.KeyspaceShard) string { - return fmt.Sprintf("%v/%v", keyspaceShard.Keyspace, keyspaceShard.Shard) -} - -func isHostPortValid(host string, port int) bool { - return host != "" && port != 0 -} - -// We use forAllInstances in two cases: -// 1. FetchGroupView GTIDs to find a candidate for failover. -// If a node is not healthy it should not be considered as a failover candidate -// -// 2. FetchGroupView group member status to see if we need to bootstrap a group, -// either for the first time or rebuild a group after all the nodes are died. -// -// caller will be responsible to decide if they want to tolerate errors from the forAllInstances call -func (shard *GRShard) forAllInstances(task func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder)) *concurrency.AllErrorRecorder { - errorRecord := concurrency.AllErrorRecorder{} - shard.shardStatusCollector.clear() - var wg sync.WaitGroup - for _, instance := range shard.instances { - wg.Add(1) - go task(instance, &wg, &errorRecord) - } - wg.Wait() - if len(errorRecord.Errors) > 0 { - shard.logger.Errorf("get errors in forAllInstances call: %v", errorRecord.Error()) - } - return &errorRecord -} - -func unreachableError(err error) bool { - contains := []string{ - // "no such host"/"no route to host" is the error when a host is not reachalbe - "no such host", - "no route to host", - // "connect: connection refused" is the error when a mysqld refused the connection - "connect: connection refused", - // "invalid mysql instance key" is the error when a tablet does not populate mysql hostname or port - // this can happen if the tablet crashed. We keep them in the grShard.instances list to compute - // quorum but consider it as an unreachable host. - "invalid mysql instance key", - } - for _, k := range contains { - if strings.Contains(err.Error(), k) { - return true - } - } - return false -} - -// refreshSQLGroup hits all instances and renders a SQL group locally for later diagnoses -// the SQL group contains a list of "views" for the group from all the available nodes -func (shard *GRShard) refreshSQLGroup() error { - // reset views in sql group - shard.sqlGroup.clear() - er := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - view, err := shard.dbAgent.FetchGroupView(instance.alias, instance.instanceKey) - // We just log error here because we rely on mysql tells us if it is happy or not - // If the node is unreachable - if err != nil { - er.RecordError(err) - shard.shardStatusCollector.recordProblematics(instance) - if unreachableError(err) { - shard.shardStatusCollector.recordUnreachables(instance) - } - shard.logger.Errorf("%v get error while fetch group info: %v", instance.alias, err) - return - } - shard.sqlGroup.recordView(view) - }) - // Only raise error if we failed to get any data from mysql - // otherwise, we will use what we get from mysql directly - if len(er.Errors) == len(shard.instances) { - shard.logger.Errorf("fail to fetch any data for mysql") - return db.ErrGroupBackoffError - } - return shard.sqlGroup.Resolve() -} diff --git a/go/vt/vtgr/controller/diagnose_test.go b/go/vt/vtgr/controller/diagnose_test.go deleted file mode 100644 index c8b81bb70da..00000000000 --- a/go/vt/vtgr/controller/diagnose_test.go +++ /dev/null @@ -1,900 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "math" - "os" - "strconv" - "strings" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -const diagnoseGroupSize = 3 - -var ( - testHost, _ = os.Hostname() - alias0 = "test_cell-0000000000" - alias1 = "test_cell-0000000001" - alias2 = "test_cell-0000000002" - testPort0 = 17000 - testPort1 = 17001 - testPort2 = 17002 -) - -type testGroupInput struct { - groupName string - readOnly bool - checkResult int - groupState []db.TestGroupState - gtid mysql.GTIDSet -} - -func TestShardIsHealthy(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablet1 := buildTabletInfo(uint32(testPort0), testHost, testPort0, topodatapb.TabletType_PRIMARY, time.Now()) - tablet2 := buildTabletInfo(uint32(testPort1), testHost, testPort1, topodatapb.TabletType_SPARE, time.Time{}) - tablet3 := buildTabletInfo(uint32(testPort2), testHost, testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet1.Alias - return nil - }) - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - return db.BuildGroupView(alias, "group", testHost, testPort0, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - tmc.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, _ := shard.Diagnose(ctx) - assert.Equal(t, DiagnoseTypeHealthy, string(diagnose)) -} - -func TestTabletIssueDiagnoses(t *testing.T) { - type data struct { - pingable bool - ttype topodatapb.TabletType - } - var tablettests = []struct { - name string - expected DiagnoseType - errMessage string - primaryAlias string - inputs []data - }{ - {name: "healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "non primary tablet is not pingable", expected: DiagnoseTypeHealthy, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ // vtgr should do nothing - {true, topodatapb.TabletType_PRIMARY}, - {false, topodatapb.TabletType_REPLICA}, - {false, topodatapb.TabletType_REPLICA}, - }}, - {name: "primary tablet is not pingable", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", primaryAlias: "test_cell-0000017000", inputs: []data{ // vtgr should trigger a failover - {false, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "no primary tablet", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "", inputs: []data{ // vtgr should create one based on mysql - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "wrong primary in tablet types", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // shard info returns differently comparing with tablet type - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql and vttablet has different primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // vtgr should fix vttablet - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable wrong vttablet primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", primaryAlias: "test_cell-0000017001", inputs: []data{ // vtgr should fix vttablet - {true, topodatapb.TabletType_REPLICA}, - {false, topodatapb.TabletType_PRIMARY}, - {true, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable uninitialized primary vttablet", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", inputs: []data{ // vtgr should failover - {false, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - {true, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range tablettests { - t.Run(tt.name, func(t *testing.T) { - expected := tt.expected - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := NewMockGRTopo(ctrl) - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - if tt.primaryAlias == "" { - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(&topo.ShardInfo{Shard: &topodatapb.Shard{}}, nil) - } - for i, input := range tt.inputs { - id := uint32(testPort0 + i) - tablet := buildTabletInfo(id, testHost, testPort0+i, input.ttype, time.Now()) - tablets[tablet.AliasString()] = tablet - var response = struct { - pingable bool - }{input.pingable} - if tt.primaryAlias == tablet.AliasString() { - si := &topo.ShardInfo{ - Shard: &topodatapb.Shard{ - PrimaryAlias: tablet.Alias, - }, - } - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(si, nil) - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return db.BuildGroupView(alias, "group", testHost, testPort0, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), &topodatapb.Tablet{ - Alias: tablet.Alias, - Hostname: tablet.Hostname, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - Type: tablet.Type, - Tags: tablet.Tags, - MysqlHostname: tablet.MysqlHostname, - MysqlPort: tablet.MysqlPort, - PrimaryTermStartTime: tablet.PrimaryTermStartTime, - }). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !response.pingable { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - ts. - EXPECT(). - GetTabletMapForShardByCell(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0"), gomock.Any()). - Return(tablets, nil) - - ctx := context.Background() - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestMysqlIssueDiagnoses(t *testing.T) { - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - disableProtectionCfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, DisableReadOnlyProtection: true, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - heartbeatThreshold = 10 - defer func() { - heartbeatThreshold = math.MaxInt64 - }() - type data struct { - alias string - groupName string - readOnly bool - checkResult int - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var sqltests = []struct { - name string - expected DiagnoseType - errMessage string - config *config.VTGRConfig - inputs []data - removeTablets []string // to simulate missing tablet in topology - }{ - {name: "healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "recovering primary shard", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "no group in shard", expected: DiagnoseTypeShardHasNoGroup, errMessage: "", inputs: []data{ - {alias0, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "fail to bootstrap with incorrect number of nodes", expected: DiagnoseTypeError, errMessage: "fail to diagnose ShardHasNoGroup with 3 nodes", inputs: []data{ - {alias0, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }, config: &config.VTGRConfig{BootstrapGroupSize: 2, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1}}, - {name: "unreachable node", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql and tablet has different primary", expected: DiagnoseTypeWrongPrimaryTablet, errMessage: "", inputs: []data{ // vtgr should failover vttablet - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "mysql primary out of topology", expected: DiagnoseTypeUnreachablePrimary, errMessage: "", inputs: []data{ // vtgr should failover mysql - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }, removeTablets: []string{alias0}}, - {name: "one error node", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "inactive group with divergent state", expected: DiagnoseTypeShardHasInactiveGroup, errMessage: "", inputs: []data{ - {alias0, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "two error node", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient group member", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "unconnected node", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "unreachable primary", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "more than one group name", expected: DiagnoseTypeError, errMessage: "fail to refreshSQLGroup: group has split brain", inputs: []data{ // vtgr should raise error - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group_xxx", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "different primary", expected: DiagnoseTypeError, errMessage: "fail to refreshSQLGroup: group has split brain", inputs: []data{ // vtgr should raise error - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient members in group", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - // the shard has insufficient member, but the primary is already read_only - // we should try to connect the replica node - {name: "insufficient members in read only shard", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "insufficient members in group with disable read only protection", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", config: disableProtectionCfg, inputs: []data{ - {alias0, "group", false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "read only with disable read only protection", expected: DiagnoseTypeReadOnlyShard, errMessage: "", config: disableProtectionCfg, inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "read only healthy shard", expected: DiagnoseTypeReadOnlyShard, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "inconsistent member state", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 11, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, 12, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, math.MaxInt64, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "network partition", expected: DiagnoseTypeBackoffError, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "start bootstrap in progress", expected: DiagnoseTypeBootstrapBackoff, errMessage: "", inputs: []data{ - {alias0, "group", true, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "", true, 0, []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {alias2, "", true, 0, []db.TestGroupState{ - {MemberHost: "", MemberPort: "", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range sqltests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := NewMockGRTopo(ctrl) - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - expected := tt.expected - inputMap := make(map[string]testGroupInput) - if tt.config == nil { - tt.config = cfg - } - conf := tt.config - hasPrimary := false - for i, input := range tt.inputs { - id := uint32(i) - //id := uint32(testPort0 + i) - tablet := buildTabletInfo(id, testHost, testPort0+i, input.ttype, time.Now()) - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - input.checkResult, - input.groupInput, - nil, - } - if tablet.Type == topodatapb.TabletType_PRIMARY { - si := &topo.ShardInfo{ - Shard: &topodatapb.Shard{ - PrimaryAlias: tablet.Alias, - }, - } - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(si, nil) - hasPrimary = true - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - if !hasPrimary { - ts. - EXPECT(). - GetShard(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0")). - Return(&topo.ShardInfo{Shard: &topodatapb.Shard{}}, nil) - } - for _, tid := range tt.removeTablets { - delete(tablets, tid) - } - ts. - EXPECT(). - GetTabletMapForShardByCell(gomock.Any(), gomock.Eq("ks"), gomock.Eq("0"), gomock.Any()). - Return(tablets, nil) - tmc.EXPECT().Ping(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - - ctx := context.Background() - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, conf, testPort0, true) - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestDiagnoseWithInactive(t *testing.T) { - cfg := &config.VTGRConfig{BootstrapGroupSize: diagnoseGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - type data struct { - alias string - groupName string - readOnly bool - pingable bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var sqltests = []struct { - name string - expected DiagnoseType - errMessage string - config *config.VTGRConfig - inputs []data - rebootstrapGroupSize int - removeTablets []string // to simulate missing tablet in topology - }{ - // although mysql and vitess has different primary, but since this is an active shard, VTGR won't fix that - {name: "mysql and tablet has different primary", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "different primary with unconnected node", expected: DiagnoseTypeUnconnectedReplica, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "primary tablet is not pingable", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - // This is a read only shard, but since it's an inactive shard we will diagnose it as healthy - {name: "read only healthy shard", expected: DiagnoseTypeHealthy, errMessage: "", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "writable shard", expected: DiagnoseTypeInsufficientGroupSize, errMessage: "", inputs: []data{ - {alias0, "group", false, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {name: "error when there are only two nodes", expected: DiagnoseTypeError, errMessage: "fail to diagnose ShardHasInactiveGroup with 3 nodes expecting 2", inputs: []data{ - {alias0, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias1, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {alias2, "group", true, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }, rebootstrapGroupSize: 2}, - } - for _, tt := range sqltests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expected := tt.expected - inputMap := make(map[string]testGroupInput) - pingable := make(map[string]bool) - if tt.config == nil { - tt.config = cfg - } - conf := tt.config - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - pingable[input.alias] = input.pingable - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), &topodatapb.Tablet{ - Alias: tablet.Alias, - Hostname: tablet.Hostname, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - Type: tablet.Type, - Tags: tablet.Tags, - MysqlHostname: tablet.MysqlHostname, - MysqlPort: tablet.MysqlPort, - PrimaryTermStartTime: tablet.PrimaryTermStartTime, - }). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[tablet.Alias.String()] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, conf, testPort0, false) - if tt.rebootstrapGroupSize != 0 { - shard.OverrideRebootstrapGroupSize(tt.rebootstrapGroupSize) - } - shard.refreshTabletsInShardLocked(ctx) - diagnose, err := shard.Diagnose(ctx) - assert.Equal(t, expected, diagnose) - if tt.errMessage == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errMessage), err.Error()) - } - }) - } -} - -func TestGroupStatusRecorder(t *testing.T) { - r := &groupGTIDRecorder{} - - err := r.recordGroupStatus("group1", true) - assert.NoError(t, err) - assert.Equal(t, r.name, "group1") - assert.Equal(t, r.hasActive, true) - - err = r.recordGroupStatus("group2", false) - assert.Error(t, err, "group has more than one group name") - assert.Equal(t, r.name, "group1") - - err = r.recordGroupStatus("group1", false) - assert.NoError(t, err) - assert.Equal(t, r.name, "group1") - assert.Equal(t, r.hasActive, true) - - pos1, err := mysql.ParsePosition(mysql.Mysql56FlavorID, "264a8230-67d2-11eb-acdd-0a8d91f24125:1-22:1000019-1000021") - assert.NoError(t, err) - inst1 := &grInstance{alias: "alias1"} - r.recordGroupGTIDs(pos1.GTIDSet, inst1) - pos2, err := mysql.ParsePosition(mysql.Mysql56FlavorID, "264a8230-67d2-11eb-acdd-0a8d91f24125:1-1000021") - assert.NoError(t, err) - inst2 := &grInstance{alias: "alias2"} - r.recordGroupGTIDs(pos2.GTIDSet, inst2) - assert.Equal(t, len(r.gtidWithInstances), 2) - assert.Equal(t, r.gtidWithInstances[0].instance, inst1) - assert.Equal(t, pos1.GTIDSet.Equal(r.gtidWithInstances[0].gtids), true) - assert.Equal(t, r.gtidWithInstances[1].instance, inst2) - assert.Equal(t, pos2.GTIDSet.Equal(r.gtidWithInstances[1].gtids), true) -} diff --git a/go/vt/vtgr/controller/group.go b/go/vt/vtgr/controller/group.go deleted file mode 100644 index 3469d63acbb..00000000000 --- a/go/vt/vtgr/controller/group.go +++ /dev/null @@ -1,443 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "math" - "sort" - "strings" - "sync" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - "vitess.io/vitess/go/vt/vtgr/log" -) - -var ( - groupOnlineSize = stats.NewGaugesWithMultiLabels("MysqlGroupOnlineSize", "Online MySQL server in the group", []string{"Keyspace", "Shard"}) - isLostQuorum = stats.NewGaugesWithMultiLabels("MysqlGroupLostQuorum", "If MySQL group lost quorum", []string{"Keyspace", "Shard"}) - - heartbeatThreshold int -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.IntVar(&heartbeatThreshold, "group_heartbeat_threshold", 0, "VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.") - }) -} - -// SQLGroup contains views from all the nodes within the shard -type SQLGroup struct { - views []*db.GroupView - resolvedView *ResolvedView - logger *log.Logger - expectedBootstrapSize int - // rebootstrapSize is init to 0 - // when it is not 0, we allow some nodes to be unhealthy during a rebootstrap - rebootstrapSize int - singlePrimary bool - heartbeatThreshold int - statsTags []string - sync.Mutex -} - -// NewSQLGroup creates a new SQLGroup -func NewSQLGroup(size int, singlePrimary bool, keyspace, shard string) *SQLGroup { - return &SQLGroup{ - expectedBootstrapSize: size, - rebootstrapSize: 0, - singlePrimary: singlePrimary, - statsTags: []string{keyspace, shard}, - logger: log.NewVTGRLogger(keyspace, shard), - heartbeatThreshold: heartbeatThreshold, - } -} - -// ResolvedView is the resolved view -type ResolvedView struct { - groupName string - view map[inst.InstanceKey]db.GroupMember - logger *log.Logger -} - -// recordView adds a view to the group -func (group *SQLGroup) recordView(view *db.GroupView) { - group.Lock() - defer group.Unlock() - group.views = append(group.views, view) -} - -// overrideView overrides a view to the group -func (group *SQLGroup) overrideView(views []*db.GroupView) { - group.Lock() - defer group.Unlock() - group.views = views - group.resolveLocked() -} - -// clear reset the views -func (group *SQLGroup) clear() { - group.Lock() - defer group.Unlock() - group.views = nil - group.resolvedView = nil -} - -// GetViews returns views from everyone in the group -func (group *SQLGroup) GetViews() []*db.GroupView { - group.Lock() - defer group.Unlock() - return group.views -} - -// GetGroupName returns the group name -func (group *SQLGroup) GetGroupName() string { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - return rv.groupName -} - -// GetOnlineGroupInfo returns number of online members in the group and also if the primary is read only -func (group *SQLGroup) GetOnlineGroupInfo() (int, bool) { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - onlineSize := 0 - isPrimaryReadOnly := false - for _, status := range view { - if status.State == db.ONLINE { - onlineSize++ - } - if status.Role == db.PRIMARY { - isPrimaryReadOnly = isPrimaryReadOnly || status.ReadOnly - } - } - return onlineSize, isPrimaryReadOnly -} - -// IsUnconnectedReplica checks if the node is connected to a group -func (group *SQLGroup) IsUnconnectedReplica(instanceKey *inst.InstanceKey) bool { - if instanceKey == nil { - return false - } - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - status, ok := view[*instanceKey] - if !ok { - return true - } - return status.State != db.ONLINE && status.State != db.RECOVERING -} - -// IsAllOfflineOrError returns true if all the nodes are in offline mode -func (group *SQLGroup) IsAllOfflineOrError() bool { - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - for _, status := range view { - if status.State != db.OFFLINE && status.State != db.ERROR { - return false - } - } - return true -} - -// GetStatus returns GroupMember status for given a host -func (group *SQLGroup) GetStatus(instanceKey *inst.InstanceKey) *db.GroupMember { - if instanceKey == nil { - return nil - } - group.Lock() - defer group.Unlock() - rv := group.resolvedView - view := rv.view - status, ok := view[*instanceKey] - if !ok { - return nil - } - return &status -} - -// IsSafeToBootstrap checks if it is safe to bootstrap a mysql group -func (group *SQLGroup) IsSafeToBootstrap() bool { - group.Lock() - defer group.Unlock() - // for bootstrap we require group at least has quorum number of views - // this is to make sure we don't bootstrap a group improperly - if len(group.views) < group.expectedBootstrapSize { - group.logger.Errorf("[sql_group] cannot bootstrap because we only have %v views | expected %v", len(group.views), group.expectedBootstrapSize) - return false - } - return group.isSafeToRebootstrapLocked() -} - -// IsSafeToRebootstrap checks if it is safe to rebootstrap a group -// It does not check group size as IsSafeToBootstrap, since when we -// reach here it means VTGR already checked there were group expectedBootstrapSize -// number of nodes in topo server, therefore we just rebootstrap -// as long as we can reach all the nodes in topo server -func (group *SQLGroup) IsSafeToRebootstrap() bool { - group.Lock() - defer group.Unlock() - return group.isSafeToRebootstrapLocked() -} - -func (group *SQLGroup) isSafeToRebootstrapLocked() bool { - // we think it is safe to bootstrap a group if all the views don't have a primary host - host, port, _ := group.getPrimaryLocked() - if host != "" || port != 0 { - group.logger.Warningf("not safe to bootstrap sql group because %v/%v might already be primary", host, port) - } - return host == "" && port == 0 -} - -// GetPrimary returns the hostname, port of the primary that everyone agreed on -// isActive bool indicates if there is any node in the group whose primary is "ONLINE" -func (group *SQLGroup) GetPrimary() (string, int, bool) { - group.Lock() - defer group.Unlock() - return group.getPrimaryLocked() -} - -func (group *SQLGroup) getPrimaryLocked() (string, int, bool) { - rv := group.resolvedView - view := rv.view - for instance, status := range view { - if status.Role == db.PRIMARY { - return instance.Hostname, instance.Port, status.State == db.ONLINE - } - } - return "", 0, false -} - -// Resolve merges the views into a map -func (group *SQLGroup) Resolve() error { - group.Lock() - defer group.Unlock() - return group.resolveLocked() -} -func (group *SQLGroup) resolveLocked() error { - rv := &ResolvedView{logger: group.logger} - group.resolvedView = rv - // a node that is not in the group might be outlier with big lag - // iterate over all views to get global minStalenessResult first - minStalenessResult := math.MaxInt32 - for _, view := range group.views { - if view.HeartbeatStaleness < minStalenessResult { - minStalenessResult = view.HeartbeatStaleness - } - } - m := make(map[inst.InstanceKey]db.GroupMember) - for _, view := range group.views { - if rv.groupName == "" && view.GroupName != "" { - rv.groupName = view.GroupName - } - if view.GroupName != "" && rv.groupName != view.GroupName { - group.logger.Errorf("previous group name %v found %v", rv.groupName, view.GroupName) - return db.ErrGroupSplitBrain - } - for _, member := range view.UnresolvedMembers { - instance := view.CreateInstanceKey(member) - memberState := member.State - memberRole := member.Role - isReadOnly := member.ReadOnly - st, ok := m[instance] - if !ok { - m[instance] = db.GroupMember{ - HostName: instance.Hostname, - Port: instance.Port, - State: memberState, - Role: memberRole, - ReadOnly: isReadOnly, - } - continue - } - if st.State == memberState && st.Role == memberRole && st.ReadOnly == isReadOnly { - continue - } - // Members in a group should eventually converge on a state - // if there is a partition, then a node should be removed from - // a group. If a node is reported as ONLINE together with - // some other state, we back off if we see a node with diverged state - if memberState != db.UNKNOWNSTATE && - st.State != db.UNKNOWNSTATE && - st.State != memberState && - (st.State == db.ONLINE || memberState == db.ONLINE) { - group.logger.Warningf("found inconsistent member state for %v: %v vs %v", instance.Hostname, st.State, memberState) - if group.heartbeatThreshold != 0 && - // Check minStalenessResult among the group is not math.MaxInt32 - // which means at least one node returns the lag from _vt.heartbeat table - // otherwise we don't trigger backoff on inconsistent state - minStalenessResult != math.MaxInt32 && - minStalenessResult >= group.heartbeatThreshold { - group.logger.Warningf("ErrGroupBackoffError by staled heartbeat check %v", minStalenessResult) - var sb strings.Builder - for _, view := range group.views { - sb.WriteString(fmt.Sprintf("%v staleness=%v\n", view.MySQLHost, view.HeartbeatStaleness)) - } - group.logger.Warningf("%v", sb.String()) - return db.ErrGroupBackoffError - } - } - m[instance] = db.GroupMember{ - HostName: instance.Hostname, - Port: instance.Port, - State: group.mergeState(st.State, memberState), - Role: group.mergeRole(st.Role, memberRole), - ReadOnly: st.ReadOnly || isReadOnly, - } - } - } - rv.view = m - return group.resolvedView.validate(group.singlePrimary, group.statsTags) -} - -func (rv *ResolvedView) validate(singlePrimary bool, statsTags []string) error { - if !rv.hasGroup() { - rv.logger.Info("Resolved view does not have a group") - return nil - } - hasPrimary := false - primaryState := db.UNKNOWNSTATE - var onlineCount, recoveringCount, unreachableCount, offlineCount, errorCount int - for _, status := range rv.view { - if status.Role == db.PRIMARY { - if singlePrimary && hasPrimary { - rv.logger.Errorf("Found more than one primary in the group") - return db.ErrGroupSplitBrain - } - hasPrimary = true - primaryState = status.State - if status.State != db.ONLINE { - rv.logger.Warningf("Found a PRIMARY not ONLINE (%v)", status.State) - } - } - switch status.State { - case db.ONLINE: - onlineCount++ - case db.UNREACHABLE: - unreachableCount++ - case db.OFFLINE: - offlineCount++ - case db.ERROR: - errorCount++ - case db.RECOVERING: - recoveringCount++ - } - } - groupOnlineSize.Set(statsTags, int64(onlineCount)) - if unreachableCount > 0 || errorCount > 0 || offlineCount > 0 { - rv.logger.Warningf("Some of nodes are unconnected in the group. hasPrimary=%v (%v), online_count=%v, recovering_count=%v, unreachable_count=%v, offline_count=%v, error_count=%v", hasPrimary, primaryState, onlineCount, recoveringCount, unreachableCount, offlineCount, errorCount) - } - if unreachableCount >= len(rv.view)/2+1 { - rv.logger.Errorf("Backoff error by quorum unreachable: found %v number of UNREACHABLE nodes while quorum is %v", unreachableCount, len(rv.view)/2+1) - isLostQuorum.Set(statsTags, 1) - } else { - isLostQuorum.Set(statsTags, 0) - } - // In theory there should be no UNREACHABLE nodes - // raise ErrGroupBackoffError to backoff and wait - // If we lost quorum, then the group is not writable - // If we still have a functioning group, we can backoff and wait - // the unreachable node should either be expelled or we have a frozen view - // Note: this means we should set group_replication_unreachable_majority_timeout - // greater than 0. Otherwise VTGR can see all nodes are ONLINE when a single node - // is partitioned and end up doing nothing. - if unreachableCount > 0 { - return db.ErrGroupBackoffError - } - // Ongoing bootstrap, we should backoff and wait - if recoveringCount == 1 && (offlineCount+recoveringCount == len(rv.view)) { - rv.logger.Warningf("Group has one recovery node with all others in offline mode") - return db.ErrGroupOngoingBootstrap - } - // We don't have quorum number of unreachable, but the primary is not online - // This most likely means there is a failover in the group we should back off and wait - if hasPrimary && primaryState != db.ONLINE { - rv.logger.Warningf("Found a PRIMARY that is not ONLINE (%v)", primaryState) - return db.ErrGroupBackoffError - } - // If all the node in view are OFFLINE or ERROR, it is an inactive group - // It is expected to have no primary in this case - if !hasPrimary && (offlineCount+errorCount != len(rv.view)) { - rv.logger.Warningf("Group is NOT all offline or error without a primary node") - return db.ErrGroupBackoffError - } - return nil -} - -func (rv *ResolvedView) hasGroup() bool { - return rv.groupName != "" -} - -func (group *SQLGroup) mergeState(s1, s2 db.MemberState) db.MemberState { - return db.MemberState(group.maxStatus(int(s1), int(s2))) -} - -func (group *SQLGroup) mergeRole(r1, r2 db.MemberRole) db.MemberRole { - return db.MemberRole(group.maxStatus(int(r1), int(r2))) -} - -func (group *SQLGroup) maxStatus(a, b int) int { - if a > b { - return a - } - return b -} - -// ToString returns a string representatino of the sql group -func (group *SQLGroup) ToString() string { - group.Lock() - defer group.Unlock() - var sb strings.Builder - views := group.views - for _, view := range views { - sb.WriteString(fmt.Sprintf("[%s] SQLGroup group=%s", view.TabletAlias, view.GroupName)) - for _, member := range view.UnresolvedMembers { - sb.WriteString(fmt.Sprintf(" | %s %s %s readonly=%v", member.HostName, member.Role, member.State, member.ReadOnly)) - } - sb.WriteString("\n") - } - rv := group.resolvedView - if rv != nil { - sb.WriteString("[resolved_view]\n") - sb.WriteString(fmt.Sprintf("group_name=%v\n", rv.groupName)) - keys := make([]inst.InstanceKey, 0, len(rv.view)) - for k := range rv.view { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { - return keys[i].Hostname < keys[j].Hostname - }) - for _, instance := range keys { - status := rv.view[instance] - sb.WriteString(fmt.Sprintf("[%s] state=%v role=%v readonly=%v\n", instance.Hostname, status.State, status.Role, status.ReadOnly)) - - } - } - return sb.String() -} diff --git a/go/vt/vtgr/controller/group_test.go b/go/vt/vtgr/controller/group_test.go deleted file mode 100644 index edfeca14500..00000000000 --- a/go/vt/vtgr/controller/group_test.go +++ /dev/null @@ -1,454 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "math" - "testing" - - "vitess.io/vitess/go/vt/vtgr/log" - - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - "github.com/stretchr/testify/assert" -) - -func TestSQLGroupToString(t *testing.T) { - group := NewSQLGroup(2, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group_name" - var l1 []*db.GroupMember - var l2 []*db.GroupMember - m1 := db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false) - m2 := db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true) - m3 := db.NewGroupMember("OFFLINE", "SECONDARY", "host3", 10, true) - l1 = append(l1, m1) - l1 = append(l1, m2) - v1.UnresolvedMembers = l1 - l2 = append(l2, m3) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group_name" - v2.UnresolvedMembers = l2 - group.recordView(v2) - group.recordView(v1) - assert.Equal(t, `[v2] SQLGroup group=group_name | host3 SECONDARY OFFLINE readonly=true -[v1] SQLGroup group=group_name | host1 PRIMARY ONLINE readonly=false | host2 SECONDARY ONLINE readonly=true -`, group.ToString()) - group.Resolve() - assert.Equal(t, `[v2] SQLGroup group=group_name | host3 SECONDARY OFFLINE readonly=true -[v1] SQLGroup group=group_name | host1 PRIMARY ONLINE readonly=false | host2 SECONDARY ONLINE readonly=true -[resolved_view] -group_name=group_name -[host1] state=ONLINE role=PRIMARY readonly=false -[host2] state=ONLINE role=SECONDARY readonly=true -[host3] state=OFFLINE role=SECONDARY readonly=true -`, group.ToString()) -} - -func TestGetGroupName(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host1", 10, true), - } - group.recordView(v1) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v2) - err := group.Resolve() - assert.NoError(t, err) - name := group.GetGroupName() - assert.Equal(t, "group", name) - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group_foo" - group.recordView(v3) - err = group.Resolve() - assert.Errorf(t, err, "group has split brain") - name = group.GetGroupName() - // group keeps the group name before finding a divergent group name - assert.Equal(t, "group", name) -} - -func TestIsActiveWithMultiplePrimary(t *testing.T) { - group := NewSQLGroup(2, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - } - group.recordView(v1) - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "SECONDARY", "host1", 10, true), - db.NewGroupMember("ONLINE", "PRIMARY", "host2", 10, false), - } - group.recordView(v2) - err := group.Resolve() - assert.Errorf(t, err, "group network partition") -} - -func TestIsSafeToBootstrap(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "", 0, true), - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v1) - group.Resolve() - isSafe = group.IsSafeToBootstrap() - assert.True(t, isSafe) -} - -func TestIsSafeToBootstrapWithPrimary(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - // it is not safe to bootstrap if we see a primary node in group - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 0, false), - db.NewGroupMember("OFFLINE", "", "", 0, true), - } - group.recordView(v1) - group.Resolve() - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) -} - -func TestIsUnconnectedReplica(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - } - group.recordView(v1) - group.Resolve() - isUnconnected := group.IsUnconnectedReplica(&inst.InstanceKey{Hostname: "host2", Port: 10}) - assert.False(t, isUnconnected) -} - -func TestGetOnlineGroupSizeFromPrimary(t *testing.T) { - group := NewSQLGroup(1, true, "ks", "0") - isSafe := group.IsSafeToBootstrap() - assert.False(t, isSafe) - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("RECOVERING", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{} - group.recordView(v1) - group.recordView(v2) - group.Resolve() - size, readOnly := group.GetOnlineGroupInfo() - assert.Equal(t, 2, size) - assert.False(t, readOnly) -} - -func TestNetworkPartition(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("UNREACHABLE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("UNREACHABLE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - assert.EqualErrorf(t, err, "group backoff error", err.Error()) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Equal(t, map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - }, rv.view) -} - -func TestInconsistentState(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.HeartbeatStaleness = 11 - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.HeartbeatStaleness = 11 - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.HeartbeatStaleness = 13 - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - group.heartbeatThreshold = 10 - err := group.Resolve() - assert.EqualErrorf(t, err, "group backoff error", err.Error()) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Nil(t, rv.view) -} - -func TestInconsistentStateWithInvalidStaleResult(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.HeartbeatStaleness = math.MaxInt32 - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("ONLINE", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.HeartbeatStaleness = math.MaxInt32 - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.HeartbeatStaleness = math.MaxInt32 - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - group.heartbeatThreshold = 10 - err := group.Resolve() - // Same setup as TestInconsistentState but because HeartbeatStaleness are all MaxInt32 - // the backoff is not triggered - assert.NoError(t, err) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) -} - -func TestInconsistentUnknownState(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "PRIMARY", "host1", 10, false), - db.NewGroupMember("RECOVERING", "SECONDARY", "host2", 10, true), - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("", "", "host2", 10, true), - } - v3 := db.NewGroupView("v3", "host3", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("ONLINE", "SECONDARY", "host3", 10, true), - } - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - // host 2 reports itself with empty state - // therefore we shouldn't raise error even with inconsistent state - assert.NoError(t, err) - rv := group.resolvedView - assert.Equal(t, "group", rv.groupName) - assert.Equal(t, map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.RECOVERING, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, rv.view) -} - -func TestIsBootstrapInProcess(t *testing.T) { - group := NewSQLGroup(3, true, "ks", "0") - v1 := db.NewGroupView("v1", "host1", 10) - v1.GroupName = "group" - v1.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("RECOVERING", "SECONDARY", "host1", 10, false), - } - v2 := db.NewGroupView("v2", "host2", 10) - v2.GroupName = "group" - v2.UnresolvedMembers = []*db.GroupMember{ - db.NewGroupMember("OFFLINE", "", "host2", 10, false), - } - v3 := db.NewGroupView("v3", "host", 10) - v3.GroupName = "group" - v3.UnresolvedMembers = []*db.GroupMember{} - group.recordView(v1) - group.recordView(v2) - group.recordView(v3) - err := group.Resolve() - assert.Errorf(t, err, "group transient error") -} - -func TestResolve(t *testing.T) { - healthyView := []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - } - var testCases = []struct { - testName string - views []*db.GroupView - expected *ResolvedView - errorMsg string - }{ - {"test healthy shard", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, nil}, ""}, - {"test readonly with unreachable primary", []*db.GroupView{ // host1 is unreachable - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: false}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - }, nil}, ""}, - {"test split brain by group name", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group1", UnresolvedMembers: healthyView}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: healthyView}, - }, nil, "group has split brain"}, - {"test empty hostname", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "", Port: 0, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host2", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host3", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.UNKNOWNROLE, State: db.OFFLINE, ReadOnly: true}, - }, nil}, ""}, - {"test network partition by majority unreachable", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: true}, - }}, - }, nil, "group backoff error"}, - {"test no network partition with less then majority unreachable", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }}, - }, &ResolvedView{"group", map[inst.InstanceKey]db.GroupMember{ - {Hostname: "host1", Port: 10}: {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.ONLINE, ReadOnly: false}, - {Hostname: "host2", Port: 10}: {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE, ReadOnly: true}, - {Hostname: "host3", Port: 10}: {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.UNREACHABLE, ReadOnly: false}, - }, nil}, "group backoff error"}, - {"test network partition by unreachable primary", []*db.GroupView{ - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - }}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "host1", Port: 10, Role: db.PRIMARY, State: db.UNREACHABLE}, - {HostName: "host2", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - {HostName: "host3", Port: 10, Role: db.SECONDARY, State: db.ONLINE}, - }}, - }, nil, "group backoff error"}, - {"test bootstrap ongoing", []*db.GroupView{ - {MySQLHost: "host1", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{ - {HostName: "", Port: 0, Role: db.SECONDARY, State: db.RECOVERING, ReadOnly: true}, - }}, - {MySQLHost: "host2", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{}}, - {MySQLHost: "host3", MySQLPort: 10, GroupName: "group", UnresolvedMembers: []*db.GroupMember{}}, - }, nil, "group ongoing bootstrap"}, - } - for _, testCase := range testCases { - t.Run(testCase.testName, func(t *testing.T) { - group := SQLGroup{views: testCase.views, statsTags: []string{"ks", "0"}, logger: log.NewVTGRLogger("ks", "0")} - err := group.Resolve() - if testCase.errorMsg != "" { - assert.EqualError(t, err, testCase.errorMsg) - } else { - assert.NoError(t, err) - } - if testCase.expected != nil { - rv := group.resolvedView - expected := testCase.expected - assert.Equal(t, expected.view, rv.view) - assert.Equal(t, expected.groupName, rv.groupName) - } - }) - } -} diff --git a/go/vt/vtgr/controller/mock_refresh.go b/go/vt/vtgr/controller/mock_refresh.go deleted file mode 100644 index 30ed5a187e7..00000000000 --- a/go/vt/vtgr/controller/mock_refresh.go +++ /dev/null @@ -1,148 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: go/vt/vtgr/controller/refresh.go -package controller - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - - topodata "vitess.io/vitess/go/vt/proto/topodata" - topo "vitess.io/vitess/go/vt/topo" -) - -// MockGRTopo is a mock of GRTopo interface. -type MockGRTopo struct { - ctrl *gomock.Controller - recorder *MockGRTopoMockRecorder -} - -// MockGRTopoMockRecorder is the mock recorder for MockGRTopo. -type MockGRTopoMockRecorder struct { - mock *MockGRTopo -} - -// NewMockGRTopo creates a new mock instance. -func NewMockGRTopo(ctrl *gomock.Controller) *MockGRTopo { - mock := &MockGRTopo{ctrl: ctrl} - mock.recorder = &MockGRTopoMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGRTopo) EXPECT() *MockGRTopoMockRecorder { - return m.recorder -} - -// GetShard mocks base method. -func (m *MockGRTopo) GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShard", ctx, keyspace, shard) - ret0, _ := ret[0].(*topo.ShardInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShard indicates an expected call of GetShard. -func (mr *MockGRTopoMockRecorder) GetShard(ctx, keyspace, shard any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockGRTopo)(nil).GetShard), ctx, keyspace, shard) -} - -// GetShardNames mocks base method. -func (m *MockGRTopo) GetShardNames(ctx context.Context, keyspace string) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetShardNames", ctx, keyspace) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetShardNames indicates an expected call of GetShardNames. -func (mr *MockGRTopoMockRecorder) GetShardNames(ctx, keyspace any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShardNames", reflect.TypeOf((*MockGRTopo)(nil).GetShardNames), ctx, keyspace) -} - -// GetTabletMapForShardByCell mocks base method. -func (m *MockGRTopo) GetTabletMapForShardByCell(ctx context.Context, keyspace, shard string, cells []string) (map[string]*topo.TabletInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetTabletMapForShardByCell", ctx, keyspace, shard, cells) - ret0, _ := ret[0].(map[string]*topo.TabletInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetTabletMapForShardByCell indicates an expected call of GetTabletMapForShardByCell. -func (mr *MockGRTopoMockRecorder) GetTabletMapForShardByCell(ctx, keyspace, shard, cells any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTabletMapForShardByCell", reflect.TypeOf((*MockGRTopo)(nil).GetTabletMapForShardByCell), ctx, keyspace, shard, cells) -} - -// LockShard mocks base method. -func (m *MockGRTopo) LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LockShard", ctx, keyspace, shard, action) - ret0, _ := ret[0].(context.Context) - ret1, _ := ret[1].(func(*error)) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// LockShard indicates an expected call of LockShard. -func (mr *MockGRTopoMockRecorder) LockShard(ctx, keyspace, shard, action any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LockShard", reflect.TypeOf((*MockGRTopo)(nil).LockShard), ctx, keyspace, shard, action) -} - -// MockGRTmcClient is a mock of GRTmcClient interface. -type MockGRTmcClient struct { - ctrl *gomock.Controller - recorder *MockGRTmcClientMockRecorder -} - -// MockGRTmcClientMockRecorder is the mock recorder for MockGRTmcClient. -type MockGRTmcClientMockRecorder struct { - mock *MockGRTmcClient -} - -// NewMockGRTmcClient creates a new mock instance. -func NewMockGRTmcClient(ctrl *gomock.Controller) *MockGRTmcClient { - mock := &MockGRTmcClient{ctrl: ctrl} - mock.recorder = &MockGRTmcClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGRTmcClient) EXPECT() *MockGRTmcClientMockRecorder { - return m.recorder -} - -// ChangeType mocks base method. -func (m *MockGRTmcClient) ChangeType(ctx context.Context, tablet *topodata.Tablet, dbType topodata.TabletType, semiSync bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ChangeType", ctx, tablet, dbType) - ret0, _ := ret[0].(error) - return ret0 -} - -// ChangeType indicates an expected call of ChangeType. -func (mr *MockGRTmcClientMockRecorder) ChangeType(ctx, tablet, dbType any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeType", reflect.TypeOf((*MockGRTmcClient)(nil).ChangeType), ctx, tablet, dbType) -} - -// Ping mocks base method. -func (m *MockGRTmcClient) Ping(ctx context.Context, tablet *topodata.Tablet) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ping", ctx, tablet) - ret0, _ := ret[0].(error) - return ret0 -} - -// Ping indicates an expected call of Ping. -func (mr *MockGRTmcClientMockRecorder) Ping(ctx, tablet any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockGRTmcClient)(nil).Ping), ctx, tablet) -} diff --git a/go/vt/vtgr/controller/refresh.go b/go/vt/vtgr/controller/refresh.go deleted file mode 100644 index d7e78ba5ff6..00000000000 --- a/go/vt/vtgr/controller/refresh.go +++ /dev/null @@ -1,365 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "fmt" - "strconv" - "sync" - "sync/atomic" - "time" - - "vitess.io/vitess/go/vt/topo/topoproto" - - "golang.org/x/net/context" - - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - "vitess.io/vitess/go/vt/vtgr/log" -) - -var ( - lockShardTimingsMs = stats.NewMultiTimings("lockShard", "time vtgr takes to lock the shard", []string{"operation", "success"}) -) - -// grInstance represents an instance that's running MySQL GR -// it wraps a InstanceKey plus some tablet related information -type grInstance struct { - instanceKey *inst.InstanceKey - tablet *topodatapb.Tablet - primaryTimeStamp time.Time - alias string -} - -// GRTopo is VTGR wrapper for topo server -type GRTopo interface { - GetShardNames(ctx context.Context, keyspace string) ([]string, error) - GetShard(ctx context.Context, keyspace, shard string) (*topo.ShardInfo, error) - GetTabletMapForShardByCell(ctx context.Context, keyspace, shard string, cells []string) (map[string]*topo.TabletInfo, error) - LockShard(ctx context.Context, keyspace, shard, action string) (context.Context, func(*error), error) -} - -// GRTmcClient is VTGR wrapper for tmc client -type GRTmcClient interface { - ChangeType(ctx context.Context, tablet *topodatapb.Tablet, dbType topodatapb.TabletType, semiSync bool) error - Ping(ctx context.Context, tablet *topodatapb.Tablet) error -} - -// GRShard stores the information about a Vitess shard that's running MySQL GR -type GRShard struct { - KeyspaceShard *topo.KeyspaceShard - cells []string - instances []*grInstance - primaryAlias string - shardStatusCollector *shardStatusCollector - sqlGroup *SQLGroup - ts GRTopo - tmc GRTmcClient - dbAgent db.Agent - - // Every GRShard tracks a unlock function after it grab a topo lock for the shard - // VTGR needs to release the topo lock before gracefully shutdown - unlock func(*error) - // mutex to protect unlock function access - unlockMu sync.Mutex - - // configuration - minNumReplicas int - localDbPort int - disableReadOnlyProtection bool - - transientErrorWaitTime time.Duration - bootstrapWaitTime time.Duration - - lastDiagnoseResult DiagnoseType - lastDiagnoseSince time.Time - - isActive atomic.Bool - - logger *log.Logger - - // lock prevents multiple go routine fights with each other - sync.Mutex -} - -// shardStatusCollector is used for collecting shard status -type shardStatusCollector struct { - status *ShardStatus - sync.Mutex -} - -// ShardStatus is used for debugging purpose to get current status of a shard -type ShardStatus struct { - Keyspace string - Shard string - Instances []string - Unreachables []string - Problematics []string - Primary string - DiagnoseResult DiagnoseType -} - -func newShardStatusCollector(keyspace, shard string) *shardStatusCollector { - return &shardStatusCollector{ - status: &ShardStatus{Keyspace: keyspace, Shard: shard}, - } -} - -// NewGRShard creates a new GRShard -func NewGRShard( - keyspace, shard string, - cells []string, - tmc GRTmcClient, - ts GRTopo, - dbAgent db.Agent, - config *config.VTGRConfig, - localDbPort int, - isActive bool) *GRShard { - grShard := &GRShard{ - KeyspaceShard: &topo.KeyspaceShard{Keyspace: keyspace, Shard: shard}, - cells: cells, - shardStatusCollector: newShardStatusCollector(keyspace, shard), - tmc: tmc, - ts: ts, - dbAgent: dbAgent, - unlock: nil, - sqlGroup: NewSQLGroup(config.BootstrapGroupSize, true, keyspace, shard), - minNumReplicas: config.MinNumReplica, - disableReadOnlyProtection: config.DisableReadOnlyProtection, - localDbPort: localDbPort, - logger: log.NewVTGRLogger(keyspace, shard), - transientErrorWaitTime: time.Duration(config.BackoffErrorWaitTimeSeconds) * time.Second, - bootstrapWaitTime: time.Duration(config.BootstrapWaitTimeSeconds) * time.Second, - } - grShard.isActive.Store(isActive) - return grShard -} - -// refreshTabletsInShardLocked is called by repair to get a fresh view of the shard -// The caller is responsible to make sure the lock on GRShard -func (shard *GRShard) refreshTabletsInShardLocked(ctx context.Context) { - instances, err := shard.refreshTabletsInShardInternal(ctx) - if err == nil { - shard.instances = instances - } - primary, err := shard.refreshPrimaryShard(ctx) - if err == nil { - shard.primaryAlias = primary - return - } - // If we failed to refreshPrimaryShard, use primary from local tablets - shard.primaryAlias = shard.findPrimaryFromLocalCell() -} - -// UpdateTabletsInShardWithLock updates the shard instances with a lock -func (shard *GRShard) UpdateTabletsInShardWithLock(ctx context.Context) { - instances, err := shard.refreshTabletsInShardInternal(ctx) - if err == nil { - // Take a per shard lock here when we actually refresh the data to avoid - // race conditions bewteen controller and repair tasks - shard.Lock() - shard.instances = instances - shard.Unlock() - } - primary, err := shard.refreshPrimaryShard(ctx) - // We set primary separately from instances so that if global topo is not available - // VTGR can still discover the new tablets from local cell - shard.Lock() - defer shard.Unlock() - if err == nil { - shard.primaryAlias = primary - return - } - shard.primaryAlias = shard.findPrimaryFromLocalCell() -} - -func (shard *GRShard) refreshTabletsInShardInternal(ctx context.Context) ([]*grInstance, error) { - keyspace, shardName := shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard - tablets, err := shard.ts.GetTabletMapForShardByCell(ctx, keyspace, shardName, shard.cells) - if err != nil { - shard.logger.Errorf("Error fetching tablets for keyspace/shardName %v/%v: %v", keyspace, shardName, err) - return nil, err - } - return parseTabletInfos(tablets), nil -} - -func (shard *GRShard) refreshPrimaryShard(ctx context.Context) (string, error) { - keyspace, shardName := shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard - si, err := shard.ts.GetShard(ctx, keyspace, shardName) - if err != nil { - shard.logger.Errorf("Error calling GetShard: %v", err) - return "", err - } - return topoproto.TabletAliasString(si.PrimaryAlias), nil -} - -// findPrimaryFromLocalCell iterates through the replicas stored in grShard and returns -// the one that's marked as primary -func (shard *GRShard) findPrimaryFromLocalCell() string { - var latestPrimaryTimestamp time.Time - var primaryInstance *grInstance - for _, instance := range shard.instances { - if instance.tablet.Type == topodatapb.TabletType_PRIMARY { - // It is possible that there are more than one master in topo server - // we should compare timestamp to pick the latest one - if latestPrimaryTimestamp.Before(instance.primaryTimeStamp) { - latestPrimaryTimestamp = instance.primaryTimeStamp - primaryInstance = instance - } - } - } - if primaryInstance != nil { - return primaryInstance.alias - } - return "" -} - -// parseTabletInfos replaces the replica reports for the shard key -// Note: this is not thread-safe -func parseTabletInfos(tablets map[string]*topo.TabletInfo) []*grInstance { - // collect all replicas - var newReplicas []*grInstance - for alias, tabletInfo := range tablets { - tablet := tabletInfo.Tablet - // Only monitor primary, replica and ronly tablet types - switch tablet.Type { - case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY: - // mysql hostname and port might be empty here if tablet is not running - // we will treat them as unreachable - instanceKey := inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - } - grInstance := grInstance{ - instanceKey: &instanceKey, - tablet: tablet, - primaryTimeStamp: logutil.ProtoToTime(tablet.PrimaryTermStartTime), - alias: alias, - } - newReplicas = append(newReplicas, &grInstance) - } - } - return newReplicas -} - -// LockShard locks the keyspace-shard on topo server to prevent others from executing conflicting actions. -func (shard *GRShard) LockShard(ctx context.Context, action string) (context.Context, error) { - if shard.KeyspaceShard.Keyspace == "" || shard.KeyspaceShard.Shard == "" { - return nil, fmt.Errorf("try to grab lock with incomplete information: %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - if shard.unlock != nil { - return nil, fmt.Errorf("try to grab lock for %s/%s while the shard holds an unlock function", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - start := time.Now() - ctx, unlock, err := shard.ts.LockShard(ctx, shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard, fmt.Sprintf("VTGR repairing %s", action)) - lockShardTimingsMs.Record([]string{action, strconv.FormatBool(err == nil)}, start) - if err != nil { - return nil, err - } - shard.unlock = unlock - return ctx, nil -} - -// UnlockShard unlocks the keyspace-shard on topo server -// and set the unlock function to nil in the container -func (shard *GRShard) UnlockShard() { - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - if shard.unlock == nil { - shard.logger.Warningf("Shard %s/%s does not hold a lock", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - return - } - var err error - shard.unlock(&err) - shard.unlock = nil -} - -func (shard *GRShard) findTabletByHostAndPort(host string, port int) *grInstance { - for _, instance := range shard.instances { - if instance.instanceKey.Hostname == host && instance.instanceKey.Port == port { - return instance - } - } - return nil -} - -func (shard *GRShard) getToleratedNumError() int { - quorum := len(shard.instances)/2 + 1 - return len(shard.instances) - quorum -} - -func (shard *GRShard) populateVTGRStatusLocked() { - var instanceList []string - for _, instance := range shard.instances { - instanceList = append(instanceList, instance.alias) - } - shard.shardStatusCollector.status.Instances = instanceList - if primary := shard.findShardPrimaryTablet(); primary != nil { - shard.shardStatusCollector.status.Primary = primary.alias - } -} - -// GetCurrentShardStatuses returns the status collector has -func (shard *GRShard) GetCurrentShardStatuses() ShardStatus { - shard.Lock() - collector := shard.shardStatusCollector - // dereference status so that we return a copy of the struct - status := *collector.status - shard.Unlock() - return status -} - -// OverrideRebootstrapGroupSize force override the group expectedBootstrapSize used in safety check for rebootstrap -func (shard *GRShard) OverrideRebootstrapGroupSize(groupSize int) error { - shard.Lock() - defer shard.Unlock() - shard.logger.Infof("Override rebootstrap group size=%v", groupSize) - shard.sqlGroup.rebootstrapSize = groupSize - return nil -} - -// GetUnlock returns the unlock function for the shard for testing -func (shard *GRShard) GetUnlock() func(*error) { - shard.unlockMu.Lock() - defer shard.unlockMu.Unlock() - return shard.unlock -} - -// SetIsActive sets isActive for the shard -func (shard *GRShard) SetIsActive(isActive bool) { - shard.logger.Infof("Setting is active to %v", isActive) - shard.isActive.Store(isActive) -} - -func (collector *shardStatusCollector) isUnreachable(instance *grInstance) bool { - if instance.instanceKey == nil || instance.instanceKey.Hostname == "" { - return true - } - for _, alias := range collector.status.Unreachables { - if instance.alias == alias { - return true - } - } - return false -} diff --git a/go/vt/vtgr/controller/refresh_test.go b/go/vt/vtgr/controller/refresh_test.go deleted file mode 100644 index a1bbef74fc7..00000000000 --- a/go/vt/vtgr/controller/refresh_test.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "sort" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" -) - -func TestRefreshTabletsInShard(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfo(uint32(0), testHost, testPort0, topodatapb.TabletType_PRIMARY, time.Time{}) - tablet2 := buildTabletInfo(uint32(1), testHost, testPort1, topodatapb.TabletType_SPARE, time.Time{}) - tablet3 := buildTabletInfo(uint32(2), testHost, 0, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - assert.Equal(t, "ks", shard.shardStatusCollector.status.Keyspace) - assert.Equal(t, "0", shard.shardStatusCollector.status.Shard) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // only have 2 instances here because we filter out the spare tablet - assert.Equal(t, 2, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, testHost, instances[0].tablet.Hostname) - assert.Equal(t, int32(testPort0), instances[0].tablet.MysqlPort) - assert.Equal(t, topodatapb.TabletType_PRIMARY, instances[0].tablet.Type) - // host 3 is missing mysql host but we still put it in the instances list here - assert.Equal(t, testHost, instances[1].instanceKey.Hostname) - assert.Equal(t, int32(0), instances[1].tablet.MysqlPort) - assert.Equal(t, topodatapb.TabletType_REPLICA, instances[1].tablet.Type) -} - -func TestRefreshWithCells(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfoWithCell(uint32(0), testHost, "cell1", testPort0, topodatapb.TabletType_REPLICA, time.Time{}) - tablet2 := buildTabletInfoWithCell(uint32(1), testHost, "cell2", testPort1, topodatapb.TabletType_REPLICA, time.Time{}) - tablet3 := buildTabletInfoWithCell(uint32(2), testHost, "cell3", testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", []string{"cell1", "cell3"}, nil, ts, nil, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // only have 2 instances here because we are not watching cell2 - assert.Equal(t, 2, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, "cell1-0000000000", instances[0].alias) - assert.Equal(t, "cell3-0000000002", instances[1].alias) -} - -func TestRefreshWithEmptyCells(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tablet1 := buildTabletInfoWithCell(uint32(0), testHost, "cell1", testPort0, topodatapb.TabletType_REPLICA, time.Time{}) - tablet2 := buildTabletInfoWithCell(uint32(1), testHost, "cell2", testPort1, topodatapb.TabletType_REPLICA, time.Time{}) - tablet3 := buildTabletInfoWithCell(uint32(2), testHost, "cell3", testPort2, topodatapb.TabletType_REPLICA, time.Time{}) - testutil.AddTablet(ctx, t, ts, tablet1.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet2.Tablet, nil) - testutil.AddTablet(ctx, t, ts, tablet3.Tablet, nil) - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - shard.refreshTabletsInShardLocked(context.Background()) - instances := shard.instances - // nil cell will return everything - assert.Equal(t, 3, len(instances)) - sort.Slice(instances[:], func(i, j int) bool { - return instances[i].alias < instances[j].alias - }) - assert.Equal(t, "cell1-0000000000", instances[0].alias) - assert.Equal(t, "cell2-0000000001", instances[1].alias) - assert.Equal(t, "cell3-0000000002", instances[2].alias) -} - -func TestLockRelease(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2", "cell3") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - cfg := &config.VTGRConfig{BootstrapGroupSize: 3, MinNumReplica: 0, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, nil, ts, nil, cfg, testPort0, true) - ctx, err := shard.LockShard(ctx, "") - assert.NoError(t, err) - // make sure we get the lock - err = shard.checkShardLocked(ctx) - assert.NoError(t, err) - assert.NotNil(t, shard.unlock) - shard.UnlockShard() - assert.Nil(t, shard.unlock) - err = shard.checkShardLocked(ctx) - assert.EqualError(t, err, "lost topology lock; aborting: shard ks/0 is not locked (no lockInfo in map)") -} - -func buildTabletInfo(id uint32, host string, mysqlPort int, ttype topodatapb.TabletType, primaryTermTime time.Time) *topo.TabletInfo { - return buildTabletInfoWithCell(id, host, "test_cell", mysqlPort, ttype, primaryTermTime) -} - -func buildTabletInfoWithCell(id uint32, host, cell string, mysqlPort int, ttype topodatapb.TabletType, primaryTermTime time.Time) *topo.TabletInfo { - alias := &topodatapb.TabletAlias{Cell: cell, Uid: id} - return &topo.TabletInfo{Tablet: &topodatapb.Tablet{ - Alias: alias, - Hostname: host, - MysqlHostname: host, - MysqlPort: int32(mysqlPort), - Keyspace: "ks", - Shard: "0", - Type: ttype, - PrimaryTermStartTime: logutil.TimeToProto(primaryTermTime), - Tags: map[string]string{"hostname": fmt.Sprintf("host_%d", id)}, - }} -} diff --git a/go/vt/vtgr/controller/repair.go b/go/vt/vtgr/controller/repair.go deleted file mode 100644 index a7fa64d7c97..00000000000 --- a/go/vt/vtgr/controller/repair.go +++ /dev/null @@ -1,767 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "sort" - "strconv" - "sync" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgr/db" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -var ( - repairTimingsMs = stats.NewMultiTimings("repairTimingsMs", "time vtgr takes to repair", []string{"status", "success"}) - unexpectedLockLost = stats.NewCountersWithMultiLabels("unexpectedLockLost", "unexpected lost of the lock", []string{"Keyspace", "Shard"}) - - abortRebootstrap bool -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.BoolVar(&abortRebootstrap, "abort_rebootstrap", false, "Don't allow vtgr to rebootstrap an existing group.") - }) -} - -// RepairResultCode is the code for repair -type RepairResultCode string - -const ( - // Success means successfully repaired - Success RepairResultCode = "Success" - // Fail means failed to repaire - Fail RepairResultCode = "Fail" - // Noop means do nothing - Noop RepairResultCode = "Noop" -) - -// Repair tries to fix shard based on the diagnose type -func (shard *GRShard) Repair(ctx context.Context, status DiagnoseType) (RepairResultCode, error) { - shard.Lock() - defer shard.Unlock() - var err error - code := Noop - switch status { - case DiagnoseTypeShardHasNoGroup: - code, err = shard.repairShardHasNoGroup(ctx) - case DiagnoseTypeShardHasInactiveGroup: - code, err = shard.repairShardHasInactiveGroup(ctx) - case DiagnoseTypeWrongPrimaryTablet: - code, err = shard.repairWrongPrimaryTablet(ctx) - case DiagnoseTypeUnconnectedReplica: - code, err = shard.repairUnconnectedReplica(ctx) - case DiagnoseTypeUnreachablePrimary: - code, err = shard.repairUnreachablePrimary(ctx) - case DiagnoseTypeInsufficientGroupSize: - code, err = shard.repairInsufficientGroupSize(ctx) - case DiagnoseTypeReadOnlyShard: - code, err = shard.repairReadOnlyShard(ctx) - case DiagnoseTypeBootstrapBackoff, DiagnoseTypeBackoffError: - code, err = shard.repairBackoffError(ctx, status) - case DiagnoseTypeError: - shard.logger.Errorf("%v is %v", formatKeyspaceShard(shard.KeyspaceShard), status) - case DiagnoseTypeHealthy: - start := time.Now() - repairTimingsMs.Record([]string{string(status), "true"}, start) - } - if status != DiagnoseTypeHealthy { - shard.logger.Infof("VTGR repaired %v status=%v | code=%v", formatKeyspaceShard(shard.KeyspaceShard), status, code) - } - return code, vterrors.Wrap(err, "vtgr repair") -} - -func (shard *GRShard) repairShardHasNoGroup(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairShardHasNoGroup") - if err != nil { - shard.logger.Warningf("repairShardHasNoPrimaryTablet fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - // Diagnose() will call shardAgreedGroup as the first thing - // which will update mysqlGroup stored in the shard - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeShardHasNoGroup { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeShardHasNoGroup: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.repairShardHasNoGroupAction(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeShardHasNoGroup, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairShardHasNoGroupAction(ctx context.Context) error { - // If group is not empty AND there is at least one active group member - // we don't need to bootstrap. Instead we should try to join the group - mysqlGroup := shard.shardAgreedGroupName() - isAllOffline := shard.isAllOfflineOrError() - if mysqlGroup != "" { - shard.logger.Infof("Shard %v already have a group %v", formatKeyspaceShard(shard.KeyspaceShard), mysqlGroup) - return nil - } - // This should not really happen in reality - if mysqlGroup == "" && !isAllOffline { - return fmt.Errorf("shard %v has empty group name but some node is not OFFLINE", formatKeyspaceShard(shard.KeyspaceShard)) - } - - // Now we know group is null and there is no active node - // we should bootstrap the group - replicas := shard.instances - // Sanity check to make sure there is at least one instance - if len(replicas) == 0 { - shard.logger.Warningf("Cannot find any instance for the shard %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - if !shard.sqlGroup.IsSafeToBootstrap() { - return errors.New("unsafe to bootstrap group") - } - var candidate *grInstance - sort.SliceStable(replicas, func(i, j int) bool { - return replicas[i].alias < replicas[j].alias - }) - for _, replica := range replicas { - if !shard.shardStatusCollector.isUnreachable(replica) { - candidate = replica - break - } - } - if candidate == nil { - return errors.New("fail to find any candidate to bootstrap") - } - // Bootstrap the group - shard.logger.Infof("Bootstrapping the group for %v on host=%v", formatKeyspaceShard(shard.KeyspaceShard), candidate.instanceKey.Hostname) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - if err := shard.dbAgent.BootstrapGroupLocked(candidate.instanceKey); err != nil { - // if bootstrap failed, the next one that gets the lock will try to do it again - shard.logger.Errorf("Failed to bootstrap mysql group on %v: %v", candidate.instanceKey.Hostname, err) - return err - } - shard.logger.Infof("Bootstrapped the group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil -} - -func (shard *GRShard) repairShardHasInactiveGroup(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairShardHasInactiveGroup") - if err != nil { - shard.logger.Warningf("repairShardHasInactiveGroup fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - // Diagnose() will call shardAgreedGroup as the first thing - // which will update mysqlGroup stored in the shard - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeShardHasInactiveGroup { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeShardHasInactiveGroup: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // Now we know the shard has an agreed group but no member in it - // We should find one with the largest GTID set as the - // new mysql primary to bootstrap the group - start := time.Now() - err = shard.stopAndRebootstrap(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeShardHasInactiveGroup, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairBackoffError(ctx context.Context, diagnose DiagnoseType) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairBackoffError") - if err != nil { - shard.logger.Warningf("repairBackoffError fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != diagnose { - shard.logger.Infof("Shard %v is no longer in %v: %v", formatKeyspaceShard(shard.KeyspaceShard), diagnose, status) - return Noop, nil - } - if shard.lastDiagnoseResult != diagnose { - shard.logger.Infof("diagnose shard as %v but last diagnose result was %v", diagnose, shard.lastDiagnoseResult) - return Noop, nil - } - now := time.Now() - var waitTime time.Duration - switch diagnose { - case DiagnoseTypeBackoffError: - waitTime = shard.transientErrorWaitTime - case DiagnoseTypeBootstrapBackoff: - waitTime = shard.bootstrapWaitTime - default: - return Fail, fmt.Errorf("unsupported diagnose for repairBackoffError: %v", diagnose) - } - if now.Sub(shard.lastDiagnoseSince) < waitTime { - shard.logger.Infof("Detected %v at %v. In wait time for network partition", diagnose, shard.lastDiagnoseSince) - return Noop, nil - } - shard.logger.Infof("Detected %v at %v. Start repairing after %v", diagnose, shard.lastDiagnoseSince, shard.transientErrorWaitTime) - err = shard.stopAndRebootstrap(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeBackoffError, strconv.FormatBool(err == nil)}, now) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) stopAndRebootstrap(ctx context.Context) error { - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - // Before bootstrap the group, we need to stop group first - // abort aggressively here as soon as we encounter an error - // StopGroupLocked will check if instance is NOT in "ONLINE"/"RECOVERING" state (i.e., UNREACHABLE, ERROR or OFFLINE) - errorRecorder := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - status := shard.sqlGroup.GetStatus(instance.instanceKey) - if status != nil && status.State == db.OFFLINE { - shard.logger.Infof("stop group replication on %v skipped because it is already OFFLINE", instance.alias) - return - } - shard.logger.Infof("stop group replication on %v", instance.alias) - err := shard.dbAgent.StopGroupLocked(instance.instanceKey) - if err != nil { - if !unreachableError(err) { - er.RecordError(err) - } - shard.logger.Warningf("Error during stop group replication on %v: %v", instance.instanceKey.Hostname, err) - } - }) - // We don't check allowPartialUnhealthyNodes here because we don't record unreachableError here - // hence if errorRecorder has error, it indicates the mysqld is still reachable but there is nothing - // else went wrong. - if errorRecorder.HasErrors() { - shard.logger.Errorf("Failed to stop group replication %v", errorRecorder.Error()) - return errorRecorder.Error() - } - shard.logger.Infof("Stop the group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - shard.logger.Info("Start find candidate to rebootstrap") - candidate, err := shard.findRebootstrapCandidate(ctx) - if err != nil { - shard.logger.Errorf("Failed to find rebootstrap candidate: %v", err) - return err - } - shard.refreshSQLGroup() - if !shard.sqlGroup.IsSafeToRebootstrap() { - return errors.New("unsafe to bootstrap group") - } - if abortRebootstrap { - shard.logger.Warningf("Abort stopAndRebootstrap because rebootstrap hook override") - return errForceAbortBootstrap - } - shard.logger.Infof("Rebootstrap %v on %v", formatKeyspaceShard(shard.KeyspaceShard), candidate.instanceKey.Hostname) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - uuid := shard.sqlGroup.GetGroupName() - if uuid == "" { - return errors.New("trying to rebootstrap without uuid") - } - return shard.dbAgent.RebootstrapGroupLocked(candidate.instanceKey, uuid) -} - -// allowPartialUnhealthyNodes returns true if rebootstrapSize is set to non-zero -// and the error we get is less than (total_num_tablet - rebootstrapSize) -func (shard *GRShard) allowPartialUnhealthyNodes(errorRecorder *concurrency.AllErrorRecorder) bool { - if shard.sqlGroup.rebootstrapSize != 0 && len(shard.instances)-shard.sqlGroup.rebootstrapSize >= len(errorRecorder.GetErrors()) { - shard.logger.Warningf("Allow unhealthy nodes during the reboot group_size=%v, rebootstrap_config=%v, error=%v", shard.sqlGroup.expectedBootstrapSize, shard.sqlGroup.rebootstrapSize, len(errorRecorder.GetErrors())) - return true - } - return false -} - -func (shard *GRShard) getGTIDSetFromAll(skipPrimary bool) (*groupGTIDRecorder, *concurrency.AllErrorRecorder, error) { - if len(shard.instances) == 0 { - return nil, nil, fmt.Errorf("%v has 0 instance", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Before we do failover, we first verify if there is no one agreed group name. - // If not, VTGR is not smart enough to figure out how to failover - // Note: the caller should make sure the mysqlGroup is refreshed after we grab a shard level lock - mysqlGroup := shard.shardAgreedGroupName() - if mysqlGroup == "" { - return nil, nil, fmt.Errorf("unable to find an agreed group name in %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - primary := shard.findShardPrimaryTablet() - var mysqlPrimaryHost string - var mysqlPrimaryPort int - // skipPrimary is true when we manual failover or if there is a unreachalbe primary tablet - // in both case, there should be a reconciled primary tablet - if skipPrimary && primary != nil { - status := shard.sqlGroup.GetStatus(primary.instanceKey) - mysqlPrimaryHost, mysqlPrimaryPort = status.HostName, status.Port - shard.logger.Infof("Found primary instance from MySQL on %v", mysqlPrimaryHost) - } - gtidRecorder := &groupGTIDRecorder{} - // Iterate through all the instances in the shard and find the one with largest GTID set with best effort - // We wrap it with forAllInstances so that the failover can continue if there is a host - // that is unreachable - errorRecorder := shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - if skipPrimary && instance.instanceKey.Hostname == mysqlPrimaryHost && instance.instanceKey.Port == mysqlPrimaryPort { - shard.logger.Infof("Skip %v to failover to a non-primary node", mysqlPrimaryHost) - return - } - gtids, err := shard.dbAgent.FetchApplierGTIDSet(instance.instanceKey) - if err != nil { - er.RecordError(err) - shard.logger.Errorf("%v get error while fetch applier GTIDs: %v", instance.alias, err) - shard.shardStatusCollector.recordProblematics(instance) - if unreachableError(err) { - shard.shardStatusCollector.recordUnreachables(instance) - } - return - } - if gtids == nil { - shard.logger.Warningf("[failover candidate] skip %s with empty gtid", instance.alias) - return - } - gtidRecorder.recordGroupGTIDs(gtids, instance) - }) - return gtidRecorder, errorRecorder, nil -} - -func (shard *GRShard) findRebootstrapCandidate(ctx context.Context) (*grInstance, error) { - gtidRecorder, errorRecorder, err := shard.getGTIDSetFromAll(false) - if err != nil { - shard.logger.Errorf("Failed to get gtid from all: %v", err) - return nil, err - } - err = errorRecorder.Error() - // We cannot tolerate any error from mysql during a rebootstrap. - if err != nil && !shard.allowPartialUnhealthyNodes(errorRecorder) { - shard.logger.Errorf("Failed to fetch all GTID with forAllInstances for rebootstrap: %v", err) - return nil, err - } - candidate, err := shard.findFailoverCandidateFromRecorder(ctx, gtidRecorder, nil) - if err != nil { - shard.logger.Errorf("Failed to find rebootstrap candidate by GTID after forAllInstances: %v", err) - return nil, err - } - if candidate == nil { - return nil, fmt.Errorf("failed to find rebootstrap candidate for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - if !shard.instanceReachable(ctx, candidate) { - shard.logger.Errorf("rebootstrap candidate %v (%v) is not reachable via ping", candidate.alias, candidate.instanceKey.Hostname) - return nil, fmt.Errorf("%v is unreachable", candidate.alias) - } - shard.logger.Infof("%v is the rebootstrap candidate", candidate.alias) - return candidate, nil -} - -// Caller of this function should make sure it gets the shard lock and it has the -// latest view of a shard. Otherwise, we might skip the wrong node when we locate the candidate -func (shard *GRShard) findFailoverCandidate(ctx context.Context) (*grInstance, error) { - gtidRecorder, errorRecorder, err := shard.getGTIDSetFromAll(true) - if err != nil { - shard.logger.Errorf("Failed to get gtid from all: %v", err) - return nil, err - } - err = errorRecorder.Error() - // During the repair for unreachable primary we still have a mysql group. - // Failover within the group is safe, finding the largest GTID is an optimization. - // therefore we don't check error from errorRecorder just log it - if err != nil { - shard.logger.Warningf("Errors when fetch all GTID with forAllInstances for failover: %v", err) - } - shard.forAllInstances(func(instance *grInstance, wg *sync.WaitGroup, er concurrency.ErrorRecorder) { - defer wg.Done() - if !shard.instanceReachable(ctx, instance) { - shard.logger.Errorf("%v is not reachable via ping", instance.alias) - shard.shardStatusCollector.recordProblematics(instance) - shard.shardStatusCollector.recordUnreachables(instance) - } - }) - var candidate *grInstance - candidate, err = shard.findFailoverCandidateFromRecorder(ctx, gtidRecorder, func(c context.Context, instance *grInstance) bool { - return !shard.shardStatusCollector.isUnreachable(instance) - }) - if err != nil { - shard.logger.Errorf("Failed to find failover candidate by GTID after forAllInstances: %v", err) - return nil, err - } - if candidate == nil { - return nil, fmt.Errorf("failed to find failover candidate for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - shard.logger.Infof("%v is the failover candidate", candidate.alias) - return candidate, nil -} - -func (shard *GRShard) repairWrongPrimaryTablet(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairWrongPrimaryTablet") - if err != nil { - shard.logger.Warningf("repairWrongPrimaryTablet fails to grab lock for the shard %v: %v", shard.KeyspaceShard, err) - return Noop, err - } - defer shard.UnlockShard() - // We grab shard level lock and check again if there is no primary - // to avoid race conditions - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeWrongPrimaryTablet { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeWrongPrimaryTablet: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.fixPrimaryTabletLocked(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeWrongPrimaryTablet, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -// fixPrimaryTabletLocked changes Vitess primary tablet based on mysql group -func (shard *GRShard) fixPrimaryTabletLocked(ctx context.Context) error { - host, port, isActive := shard.sqlGroup.GetPrimary() - if !isActive { - return db.ErrGroupInactive - } - // Primary tablet does not run mysql primary, we need to change it accordingly - candidate := shard.findTabletByHostAndPort(host, port) - if candidate == nil { - return errMissingPrimaryTablet - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err := shard.tmc.ChangeType(ctx, candidate.tablet, topodatapb.TabletType_PRIMARY, false) - if err != nil { - return fmt.Errorf("failed to change type to primary on %v: %v", candidate.alias, err) - } - shard.logger.Infof("Successfully make %v the primary tablet", candidate.alias) - return nil -} - -// repairUnconnectedReplica usually handle the case when there is a DiagnoseTypeHealthy tablet and -// it is not connected to mysql primary node -func (shard *GRShard) repairUnconnectedReplica(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairUnconnectedReplica") - if err != nil { - shard.logger.Warningf("repairUnconnectedReplica fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeUnconnectedReplica { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeUnconnectedReplica: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - start := time.Now() - err = shard.repairUnconnectedReplicaAction(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeUnconnectedReplica, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairUnconnectedReplicaAction(ctx context.Context) error { - primaryInstance := shard.findShardPrimaryTablet() - target, err := shard.disconnectedInstance() - if err != nil { - return err - } - if target == nil { - shard.logger.Infof("there is no instance without group for %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - shard.logger.Infof("Connecting replica %v to %v", target.instanceKey.Hostname, primaryInstance.instanceKey.Hostname) - status := shard.sqlGroup.GetStatus(target.instanceKey) - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - if status != nil && status.State != db.OFFLINE { - shard.logger.Infof("stop group replication on %v (%v) before join the group", target.alias, status.State) - err := shard.dbAgent.StopGroupLocked(target.instanceKey) - if err != nil { - shard.logger.Errorf("Failed to stop group replication on %v: %v", target.instanceKey.Hostname, err) - return err - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - } - return shard.dbAgent.JoinGroupLocked(target.instanceKey, primaryInstance.instanceKey) -} - -func (shard *GRShard) repairUnreachablePrimary(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairUnreachablePrimary") - if err != nil { - shard.logger.Warningf("repairUnreachablePrimary fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeUnreachablePrimary { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeUnreachablePrimary: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // We are here because either: - // 1. we have a primary tablet, but it's not reachable - // 2. we cannot find primary tablet but we do have a mysql group - // we need to failover mysql manually - // - // other case will be handled by different testGroupInput, e.g., - // has reachable primary tablet, but run on different node than mysql -> DiagnoseTypeWrongPrimaryTablet - start := time.Now() - err = shard.failoverLocked(ctx) - repairTimingsMs.Record([]string{DiagnoseTypeUnreachablePrimary, strconv.FormatBool(err == nil)}, start) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairInsufficientGroupSize(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairInsufficientGroupSize") - if err != nil { - shard.logger.Warningf("repairInsufficientGroupSize fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeInsufficientGroupSize { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeInsufficientGroupSize: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - // We check primary tablet is consistent with sql primary before InsufficientGroupSize - // therefore primary we found here is correct and healthy - primary := shard.findShardPrimaryTablet() - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return Fail, err - } - // mysql group will set super_read_only properly automatically - // https://mysqlhighavailability.com/protecting-your-data-fail-safe-enhancements-to-group-replication/ - // since Vitess only knows one writable node (primary tablet) if we want to make sure there is no write - // after there is insufficient members, we can just set primary mysql node to be read only - err = shard.dbAgent.SetReadOnly(primary.instanceKey, true) - if err != nil { - return Fail, err - } - return Success, nil -} - -func (shard *GRShard) repairReadOnlyShard(ctx context.Context) (RepairResultCode, error) { - ctx, err := shard.LockShard(ctx, "repairReadOnlyShard") - if err != nil { - shard.logger.Warningf("repairReadOnlyShard fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return Noop, err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - status, err := shard.diagnoseLocked(ctx) - if err != nil { - shard.logger.Errorf("Failed to diagnose: %v", err) - return Fail, err - } - if status != DiagnoseTypeReadOnlyShard { - shard.logger.Infof("Shard %v is no longer in DiagnoseTypeReadOnlyShard: %v", formatKeyspaceShard(shard.KeyspaceShard), status) - return Noop, nil - } - primary := shard.findShardPrimaryTablet() - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return Fail, err - } - // undo what we did repairInsufficientGroupSize - err = shard.dbAgent.SetReadOnly(primary.instanceKey, false) - if err != nil { - return Fail, err - } - return Success, nil -} - -// Failover takes a shard and find an node with largest GTID as the mysql primary of the group -func (shard *GRShard) Failover(ctx context.Context) error { - ctx, err := shard.LockShard(ctx, "Failover") - if err != nil { - shard.logger.Warningf("Failover fails to grab lock for the shard %v: %v", formatKeyspaceShard(shard.KeyspaceShard), err) - return err - } - defer shard.UnlockShard() - shard.refreshTabletsInShardLocked(ctx) - return shard.failoverLocked(ctx) -} - -func (shard *GRShard) failoverLocked(ctx context.Context) error { - candidate, err := shard.findFailoverCandidate(ctx) - if err != nil { - shard.logger.Errorf("Failed to find failover candidate: %v", err) - return err - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err = shard.dbAgent.Failover(candidate.instanceKey) - if err != nil { - shard.logger.Errorf("Failed to failover mysql to %v", candidate.alias) - return err - } - shard.logger.Infof("Successfully failover MySQL to %v for %v", candidate.instanceKey.Hostname, formatKeyspaceShard(shard.KeyspaceShard)) - if !shard.isActive.Load() { - shard.logger.Infof("Skip vttablet failover on an inactive shard %v", formatKeyspaceShard(shard.KeyspaceShard)) - return nil - } - // Make sure we still hold the topo server lock before moving on - if err := shard.checkShardLocked(ctx); err != nil { - return err - } - err = shard.tmc.ChangeType(ctx, candidate.tablet, topodatapb.TabletType_PRIMARY, false) - if err != nil { - shard.logger.Errorf("Failed to failover Vitess %v", candidate.alias) - return err - } - shard.logger.Infof("Successfully failover Vitess to %v for %v", candidate.alias, formatKeyspaceShard(shard.KeyspaceShard)) - return nil -} - -func (shard *GRShard) findFailoverCandidateFromRecorder(ctx context.Context, recorder *groupGTIDRecorder, check func(context.Context, *grInstance) bool) (*grInstance, error) { - if len(recorder.gtidWithInstances) == 0 { - return nil, fmt.Errorf("empty failover candidate list for %v", formatKeyspaceShard(shard.KeyspaceShard)) - } - // Sort the gtidWithInstances slice so that we have consistent candidate - // in case they have same gtid set - recorder.sort() - for _, gtidInst := range recorder.gtidWithInstances { - shard.logger.Infof("[failover candidates] %s gtid %s", gtidInst.instance.alias, gtidInst.gtids.String()) - } - var largestGTIDs mysql.GTIDSet - var candidate *grInstance - var divergentCandidates []string - // All the instances in the recorder have a reachable mysqld - // hence anyone is a valid failover candidate - for _, elem := range recorder.gtidWithInstances { - gtids := elem.gtids - inst := elem.instance - if check != nil && !check(ctx, inst) { - shard.logger.Warningf("Skip %v as candidate with gtid %v because it failed the check", inst.alias, gtids.String()) - continue - } - if largestGTIDs == nil { - largestGTIDs = gtids - candidate = inst - continue - } - // If largestGTIDs is subset of current gtids, it means instance has larger GTID than candidate - // we need to swap them out - isSubset, isSuperset := compareGTIDSet(largestGTIDs, gtids) - if isSubset { - largestGTIDs = gtids - candidate = inst - continue - } - // largestGTIDs is neither subset nor super set of gtids - // we log and append to candidates so that we know there is a problem in the group - // after the iteration - if !isSuperset { - shard.logger.Errorf("FetchGroupView divergent GITD set from host=%v GTIDSet=%v", inst.instanceKey.Hostname, gtids) - divergentCandidates = append(divergentCandidates, inst.alias) - } - } - // unless GTID set diverged, the candidates should be empty - if len(divergentCandidates) > 0 { - divergentCandidates = append(divergentCandidates, candidate.alias) - return nil, fmt.Errorf("found more than one failover candidates by GTID set for %v: %v", formatKeyspaceShard(shard.KeyspaceShard), divergentCandidates) - } - return candidate, nil -} - -func compareGTIDSet(set1, set2 mysql.GTIDSet) (bool, bool) { - isSubset := set2.Contains(set1) - // If set1 is subset of set2 we find a GTID super set and just need to record it - if isSubset { - return true, false - } - // If set1 is not a subset of set2 we need to see if set1 is actually a super set of set2 - // this is to controller GTID set divergence - isSubset = set1.Contains(set2) - // We know set1 is not subset of set2 if set2 is also not subset of set1, it means - // there is a divergent in GTID sets - return false, isSubset -} - -func (shard *GRShard) checkShardLocked(ctx context.Context) error { - if err := topo.CheckShardLocked(ctx, shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard); err != nil { - labels := []string{shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard} - unexpectedLockLost.Add(labels, 1) - shard.logger.Errorf("lost topology lock; aborting") - return vterrors.Wrap(err, "lost topology lock; aborting") - } - return nil -} diff --git a/go/vt/vtgr/controller/repair_test.go b/go/vt/vtgr/controller/repair_test.go deleted file mode 100644 index ada1def2cff..00000000000 --- a/go/vt/vtgr/controller/repair_test.go +++ /dev/null @@ -1,1355 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vtgr/inst" - - gomock "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" -) - -const repairGroupSize = 3 - -func TestRepairShardHasNoGroup(t *testing.T) { - type data struct { - mysqlhost string - mysqlport int - groupName string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - expectedCalls int - errorMsg string - inputs []data - }{ - {"shard without group", 1, "", []data{ - {testHost, testPort0, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"healthy shard", 0, "", []data{ - {testHost, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testHost, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"no active member for group", 0, "", []data{ // this should rebootstrap a group by DiagnoseTypeShardHasInactiveGroup - {testHost, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", false, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error for unreachable primary", 0, "", []data{ // shoud be ShardHasInactiveGroup - {testHost, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error without bootstrap with only one reachable node", 0, "vtgr repair: fail to diagnose ShardHasNoGroup with 1 nodes", []data{ - {"", 0, "group", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - {"", testPort2, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"raise error when there are not enough members", 0, "vtgr repair: fail to diagnose ShardHasNoGroup with 1 nodes", []data{ - {testHost, testPort0, "", true, []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - inputMap := make(map[int]testGroupInput) - dbAgent. - EXPECT(). - // RepairShardHasNoGroup is fixed by calling BootstrapGroupLocked - BootstrapGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - return nil - }). - Times(tt.expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), input.mysqlhost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.UpdateTabletsInShardWithLock(ctx) - _, err := shard.Repair(ctx, DiagnoseTypeShardHasNoGroup) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.EqualError(t, err, tt.errorMsg) - } - }) - } -} - -func TestRepairShardHasInactiveGroup(t *testing.T) { - type data struct { - mysqlhost string - mysqlport int - groupName string - groupInput []db.TestGroupState - pingable bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid1 := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - rebootstrapSize int - inputs []data - }{ - {"shard has inactive group", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has inactive group and partial group name", "", testPort0, 0, []data{ - {testHost, testPort0, "", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"unreachable rebootstrap candidate", "vtgr repair: test_cell-0000017000 is unreachable", 0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, false, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_PRIMARY}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"inactive shard with empty gtid", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("", ""), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("", ""), topodatapb.TabletType_REPLICA}, - }}, - {"shard has more than one group", "vtgr repair: fail to refreshSQLGroup: group has split brain", 0, 0, []data{ // vtgr raises error - {testHost, testPort0, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group2", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has inconsistent gtids", "vtgr repair: found more than one failover candidates by GTID set for ks/0", 0, 0, []data{ // vtgr raises error - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet("264a8230-67d2-11eb-acdd-0a8d91f24125", "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on one unreachable mysql", "vtgr repair: fail to diagnose ShardHasInactiveGroup with 2 nodes expecting 3", 0, 0, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on one unreachable tablet", "vtgr repair: test_cell-0000017000 is unreachable", 0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, false, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has active member", "", 0, 0, []data{ // vtgr sees an active node it should not try to bootstrap - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: "host_2", MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard has active member but more than one group", "vtgr repair: fail to refreshSQLGroup: group has split brain", 0, 0, []data{ // split brain should overweight active member diagnose - {testHost, testPort0, "group1", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort1, "group1", []db.TestGroupState{ - {MemberHost: "host_2", MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group2", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"error on two unreachable mysql", "vtgr repair: fail to diagnose ShardHasInactiveGroup with 1 nodes expecting 3", 0, 0, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"no error on two unreachable mysqls with allowUnhealthyNodeOnReboot", "", testPort2, 1, []data{ - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-11"), topodatapb.TabletType_REPLICA}, - {"", 0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - {testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard with fewer than configured members can still rebootstrap", "", testPort0, 0, []data{ - {testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: "", MemberPort: "NULL", MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid1, "1-10"), topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - inputMap := make(map[int]testGroupInput) - pingable := make(map[string]bool) - var lock sync.Mutex - dbAgent. - EXPECT(). - // RepairShardHasNoGroup is fixed by calling RebootstrapGroupLocked - RebootstrapGroupLocked(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}, gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey, name string) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - if name != "group" { - return errors.New("unexpected group name") - } - return nil - }). - Times(expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), input.mysqlhost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - input.gtid, - } - pingable[tablet.Alias.String()] = input.pingable - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return inputMap[target.Port].gtid, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - lock.Lock() - view := inputMap[target.Port] - view.groupState = []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(target.Port), MemberState: "OFFLINE", MemberRole: ""}, - } - inputMap[target.Port] = view - lock.Unlock() - return nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[t.Alias.String()] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - if tt.rebootstrapSize != 0 { - shard.OverrideRebootstrapGroupSize(tt.rebootstrapSize) - } - _, err := shard.Repair(ctx, DiagnoseTypeShardHasInactiveGroup) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairWrongPrimaryTablet(t *testing.T) { - type data struct { - mysqlport int - groupName string - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - shardPrimary string - inputs []data - }{ - {"fix no primary tablet in shard", "", testPort0, "", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix wrong primary tablet", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix wrong primary tablet based on shard info", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix shard if there is an unreachable secondary", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"diagnose as ShardHasInactiveGroup if quorum number of not online", "", 0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"tolerate failed nodes", "", testPort0, "test_cell-0000017001", []data{ - {testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_PRIMARY}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - }}, - {"raise error if all nodes failed", "", 0, "", []data{ // diagnose as DiagnoseTypeShardNetworkPartition - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_PRIMARY}, - {0, "group", []db.TestGroupState{}, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - var candidate *topo.TabletInfo - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(testPort0+i), testHost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.AliasString() == tt.shardPrimary { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[tablet.AliasString()] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - nil, - } - if expectedCalls > 0 && input.mysqlport == tt.expectedCandidatePort { - candidate = tablet - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.mysqlport})). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - if candidate != nil { - tmc. - EXPECT(). - ChangeType(gomock.Any(), gomock.Any(), topodatapb.TabletType_PRIMARY). - Return(nil). - Times(expectedCalls) - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeWrongPrimaryTablet) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairUnconnectedReplica(t *testing.T) { - type data struct { - alias string - port int - groupName string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix unconnected replica tablet", "", testPort2, []data{ - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if shard has wrong primary tablet", "", 0, []data{ // this should be diagnosed as DiagnoseTypeWrongPrimaryTablet instead - {alias0, testPort0, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix replica in ERROR state", "", testPort2, []data{ - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: ""}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"fix replica with two nodes in ERROR state", "", 0, []data{ // InsufficientGroupSize - {alias0, testPort0, "group", false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, "group", true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ERROR", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - rand.Seed(1) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort})). - Return(nil). - AnyTimes() - dbAgent. - EXPECT(). - JoinGroupLocked(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), gomock.Any()). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - input.groupName, - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.port})). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeUnconnectedReplica) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairUnreachablePrimary(t *testing.T) { - type data struct { - port int - pingalbe bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"primary is unreachable", "", testPort1, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-11"), topodatapb.TabletType_PRIMARY}, - {testPort1, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"failover to reachable node when primary is unreachable", "", testPort2, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-11"), topodatapb.TabletType_PRIMARY}, - {testPort1, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if replica is unreachable", "", 0, []data{ - {testPort0, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_PRIMARY}, - {testPort1, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, false, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"raise error if gtid divergence", "vtgr repair: found more than one failover candidates by GTID set for ks/0", 0, []data{ - {testPort0, false, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_PRIMARY}, - {testPort1, true, getMysql56GTIDSet("264a8230-67d2-11eb-acdd-0a8d91f24125", "1-10"), topodatapb.TabletType_REPLICA}, - {testPort2, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - return db.BuildGroupView(alias, "group", target.Hostname, target.Port, false, 0, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - }), nil - }). - AnyTimes() - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - dbAgent. - EXPECT(). - Failover(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}). - Return(nil). - Times(expectedCalls) - tmc. - EXPECT(). - ChangeType(gomock.Any(), gomock.Any(), topodatapb.TabletType_PRIMARY). - Return(nil). - Times(expectedCalls) - status := make(map[int32]struct { - pingalbe bool - gtid mysql.GTIDSet - }) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - status[tablet.MysqlPort] = struct { - pingalbe bool - gtid mysql.GTIDSet - }{ - input.pingalbe, - input.gtid, - } - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: input.port})). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return status[int32(target.Port)].gtid, nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !status[t.MysqlPort].pingalbe { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeUnreachablePrimary) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg)) - } - }) - } -} - -func TestRepairInsufficientGroupSize(t *testing.T) { - type data struct { - alias string - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix insufficient group expectedBootstrapSize", "", testPort0, []data{ - {alias0, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - SetReadOnly(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), true). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, testPort0+i, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - "group", - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Any(), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeInsufficientGroupSize) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairReadOnlyShard(t *testing.T) { - type data struct { - alias string - port int - readOnly bool - groupInput []db.TestGroupState - ttype topodatapb.TabletType - } - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - inputs []data - }{ - {"fix readonly shard", "", testPort0, []data{ - {alias0, testPort0, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - {"do nothing if primary is not read only", "", 0, []data{ - {alias0, testPort0, false, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_PRIMARY}, - {alias1, testPort1, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - {alias2, testPort2, true, []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "ONLINE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, topodatapb.TabletType_REPLICA}, - }}, - } - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - tablets := make(map[string]*topo.TabletInfo) - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - Return(nil). - AnyTimes() - if tt.expectedCandidatePort != 0 { - dbAgent. - EXPECT(). - SetReadOnly(gomock.Eq(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}), false). - Return(nil). - Times(1) - } - inputMap := make(map[string]testGroupInput) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), testHost, input.port, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.alias] = testGroupInput{ - "group", - input.readOnly, - 0, - input.groupInput, - nil, - } - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[alias] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - _, err := shard.Repair(ctx, DiagnoseTypeReadOnlyShard) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func TestRepairBackoffError(t *testing.T) { - type data struct { - alias string - mysqlhost string - mysqlport int - groupName string - groupInput []db.TestGroupState - pingable bool - gtid mysql.GTIDSet - ttype topodatapb.TabletType - } - sid := "3e11fa47-71ca-11e1-9e33-c80aa9429562" - var testcases = []struct { - name string - errorMsg string - expectedCandidatePort int - diagnose DiagnoseType - inputs []data - }{ - {"shard has network partition", "", testPort0, DiagnoseTypeBackoffError, []data{ - {alias0, testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "UNREACHABLE", MemberRole: "PRIMARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "ONLINE", MemberRole: "SECONDARY"}, - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "UNREACHABLE", MemberRole: "SECONDARY"}, - }, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {alias1, testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - {alias2, testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - {"shard bootstrap in progress", "", testPort0, DiagnoseTypeBootstrapBackoff, []data{ - {alias0, testHost, testPort0, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort0), MemberState: "RECOVERING", MemberRole: "SECONDARY"}, - }, true, getMysql56GTIDSet(sid, "1-10"), topodatapb.TabletType_REPLICA}, - {alias1, testHost, testPort1, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort1), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - {alias2, testHost, testPort2, "group", []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(testPort2), MemberState: "OFFLINE", MemberRole: ""}, - }, true, getMysql56GTIDSet(sid, "1-9"), topodatapb.TabletType_REPLICA}, - }}, - } - tablets := make(map[string]*topo.TabletInfo) - for _, tt := range testcases { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - ctx := context.Background() - ts := memorytopo.NewServer("test_cell") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - tmc := NewMockGRTmcClient(ctrl) - dbAgent := db.NewMockAgent(ctrl) - expectedCalls := 0 - if tt.expectedCandidatePort != 0 { - expectedCalls = 1 - } - inputMap := make(map[int]testGroupInput) - pingable := make(map[string]bool) - var lock sync.Mutex - dbAgent. - EXPECT(). - RebootstrapGroupLocked(&inst.InstanceKey{Hostname: testHost, Port: tt.expectedCandidatePort}, "group"). - DoAndReturn(func(target *inst.InstanceKey, name string) error { - if target.Hostname == "" || target.Port == 0 { - return errors.New("invalid mysql instance key") - } - input := inputMap[target.Port] - groupState := input.groupState - if len(groupState) == 1 && groupState[0].MemberState == "OFFLINE" { - groupState[0].MemberState = "ONLINE" - groupState[0].MemberRole = "PRIMARY" - groupState[0].MemberHost = target.Hostname - groupState[0].MemberPort = strconv.Itoa(target.Port) - input.groupState = groupState - } else { - for i, s := range groupState { - if s.MemberHost == target.Hostname { - s.MemberState = "ONLINE" - s.MemberRole = "PRIMARY" - groupState[i] = s - } - input.groupState = groupState - } - } - inputMap[target.Port] = input - return nil - }). - Times(expectedCalls) - for i, input := range tt.inputs { - tablet := buildTabletInfo(uint32(i), input.mysqlhost, input.mysqlport, input.ttype, time.Now()) - testutil.AddTablet(ctx, t, ts, tablet.Tablet, nil) - if tablet.Type == topodatapb.TabletType_PRIMARY { - ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { - si.PrimaryAlias = tablet.Alias - return nil - }) - } - tablets[tablet.AliasString()] = tablet - inputMap[input.mysqlport] = testGroupInput{ - input.groupName, - false, - 0, - input.groupInput, - input.gtid, - } - pingable[input.alias] = input.pingable - dbAgent. - EXPECT(). - FetchGroupView(gomock.Eq(tablet.AliasString()), gomock.Any()). - DoAndReturn(func(alias string, target *inst.InstanceKey) (*db.GroupView, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - s := inputMap[target.Port] - view := db.BuildGroupView(alias, s.groupName, target.Hostname, target.Port, s.readOnly, s.checkResult, s.groupState) - return view, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - FetchApplierGTIDSet(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) (mysql.GTIDSet, error) { - if target.Hostname == "" || target.Port == 0 { - return nil, errors.New("invalid mysql instance key") - } - return inputMap[target.Port].gtid, nil - }). - AnyTimes() - dbAgent. - EXPECT(). - StopGroupLocked(gomock.Any()). - DoAndReturn(func(target *inst.InstanceKey) error { - lock.Lock() - view := inputMap[target.Port] - view.groupState = []db.TestGroupState{ - {MemberHost: testHost, MemberPort: strconv.Itoa(target.Port), MemberState: "OFFLINE", MemberRole: ""}, - } - inputMap[target.Port] = view - lock.Unlock() - return nil - }). - AnyTimes() - tmc. - EXPECT(). - Ping(gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, t *topodatapb.Tablet) error { - if !pingable[input.alias] { - return errors.New("unreachable") - } - return nil - }). - AnyTimes() - } - cfg := &config.VTGRConfig{BootstrapGroupSize: repairGroupSize, MinNumReplica: 2, BackoffErrorWaitTimeSeconds: 1, BootstrapWaitTimeSeconds: 1} - shard := NewGRShard("ks", "0", nil, tmc, ts, dbAgent, cfg, testPort0, true) - shard.lastDiagnoseResult = tt.diagnose - _, err := shard.Repair(ctx, tt.diagnose) - if tt.errorMsg == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err, tt.errorMsg) - assert.True(t, strings.Contains(err.Error(), tt.errorMsg), err.Error()) - } - }) - } -} - -func getMysql56GTIDSet(sid, interval string) mysql.GTIDSet { - input := fmt.Sprintf("%s:%s", sid, interval) - pos, _ := mysql.ParsePosition(mysql.Mysql56FlavorID, input) - return pos.GTIDSet -} diff --git a/go/vt/vtgr/db/db.go b/go/vt/vtgr/db/db.go deleted file mode 100644 index f9a0ab2b478..00000000000 --- a/go/vt/vtgr/db/db.go +++ /dev/null @@ -1,381 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -import ( - "database/sql" - "fmt" - "strings" - "sync" - "time" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtgr/config" -) - -var ( - EmptyArgs []any - Db DB = (*vtorcDB)(nil) -) - -var mysqlURI string -var dbMutex sync.Mutex - -type DB interface { - QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error -} - -type vtorcDB struct { -} - -var _ DB = (*vtorcDB)(nil) - -func (m *vtorcDB) QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error { - return QueryOrchestrator(query, argsArray, onRow) -} - -type DummySQLResult struct { -} - -func (dummyRes DummySQLResult) LastInsertId() (int64, error) { - return 0, nil -} - -func (dummyRes DummySQLResult) RowsAffected() (int64, error) { - return 1, nil -} - -func getMySQLURI() string { - dbMutex.Lock() - defer dbMutex.Unlock() - if mysqlURI != "" { - return mysqlURI - } - mysqlURI := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?timeout=%ds&readTimeout=%ds&rejectReadOnly=%t&interpolateParams=true", - config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorPassword, - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLOrchestratorDatabase, - config.Config.MySQLConnectTimeoutSeconds, - config.Config.MySQLOrchestratorReadTimeoutSeconds, - config.Config.MySQLOrchestratorRejectReadOnly, - ) - if config.Config.MySQLOrchestratorUseMutualTLS { - mysqlURI, _ = SetupMySQLOrchestratorTLS(mysqlURI) - } - return mysqlURI -} - -// OpenDiscovery returns a DB instance to access a topology instance. -// It has lower read timeout than OpenTopology and is intended to -// be used with low-latency discovery queries. -func OpenDiscovery(host string, port int) (*sql.DB, error) { - return openTopology(host, port, config.Config.MySQLDiscoveryReadTimeoutSeconds) -} - -// OpenTopology returns a DB instance to access a topology instance. -func OpenTopology(host string, port int) (*sql.DB, error) { - return openTopology(host, port, config.Config.MySQLTopologyReadTimeoutSeconds) -} - -func openTopology(host string, port int, readTimeout int) (db *sql.DB, err error) { - uri := fmt.Sprintf("%s:%s@tcp(%s:%d)/?timeout=%ds&readTimeout=%ds&interpolateParams=true", - config.Config.MySQLTopologyUser, - config.Config.MySQLTopologyPassword, - host, port, - config.Config.MySQLConnectTimeoutSeconds, - readTimeout, - ) - - if config.Config.MySQLTopologyUseMutualTLS || - (config.Config.MySQLTopologyUseMixedTLS && requiresTLS(host, port, uri)) { - if uri, err = SetupMySQLTopologyTLS(uri); err != nil { - return nil, err - } - } - if db, _, err = sqlutils.GetDB(uri); err != nil { - return nil, err - } - if config.Config.MySQLConnectionLifetimeSeconds > 0 { - db.SetConnMaxLifetime(time.Duration(config.Config.MySQLConnectionLifetimeSeconds) * time.Second) - } - db.SetMaxOpenConns(config.MySQLTopologyMaxPoolConnections) - db.SetMaxIdleConns(config.MySQLTopologyMaxPoolConnections) - return db, err -} - -func openOrchestratorMySQLGeneric() (db *sql.DB, fromCache bool, err error) { - uri := fmt.Sprintf("%s:%s@tcp(%s:%d)/?timeout=%ds&readTimeout=%ds&interpolateParams=true", - config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorPassword, - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLConnectTimeoutSeconds, - config.Config.MySQLOrchestratorReadTimeoutSeconds, - ) - if config.Config.MySQLOrchestratorUseMutualTLS { - uri, _ = SetupMySQLOrchestratorTLS(uri) - } - return sqlutils.GetDB(uri) -} - -func IsSQLite() bool { - return config.Config.IsSQLite() -} - -// OpenTopology returns the DB instance for the orchestrator backed database -func OpenOrchestrator() (db *sql.DB, err error) { - var fromCache bool - if IsSQLite() { - db, fromCache, err = sqlutils.GetSQLiteDB(config.Config.SQLite3DataFile) - if err == nil && !fromCache { - log.Infof("Connected to orchestrator backend: sqlite on %v", config.Config.SQLite3DataFile) - } - if db != nil { - db.SetMaxOpenConns(1) - db.SetMaxIdleConns(1) - } - } else { - if db, fromCache, err := openOrchestratorMySQLGeneric(); err != nil { - log.Errorf(err.Error()) - return db, err - } else if !fromCache { - // first time ever we talk to MySQL - query := fmt.Sprintf("create database if not exists %s", config.Config.MySQLOrchestratorDatabase) - if _, err := db.Exec(query); err != nil { - log.Errorf(err.Error()) - return db, err - } - } - db, fromCache, err = sqlutils.GetDB(getMySQLURI()) - if err == nil && !fromCache { - // do not show the password but do show what we connect to. - safeMySQLURI := fmt.Sprintf("%s:?@tcp(%s:%d)/%s?timeout=%ds", config.Config.MySQLOrchestratorUser, - config.Config.MySQLOrchestratorHost, config.Config.MySQLOrchestratorPort, config.Config.MySQLOrchestratorDatabase, config.Config.MySQLConnectTimeoutSeconds) - log.Infof("Connected to orchestrator backend: %v", safeMySQLURI) - if config.Config.MySQLOrchestratorMaxPoolConnections > 0 { - log.Infof("Orchestrator pool SetMaxOpenConns: %d", config.Config.MySQLOrchestratorMaxPoolConnections) - db.SetMaxOpenConns(config.Config.MySQLOrchestratorMaxPoolConnections) - } - if config.Config.MySQLConnectionLifetimeSeconds > 0 { - db.SetConnMaxLifetime(time.Duration(config.Config.MySQLConnectionLifetimeSeconds) * time.Second) - } - } - } - if err == nil && !fromCache { - if !config.Config.SkipOrchestratorDatabaseUpdate { - initOrchestratorDB(db) - } - // A low value here will trigger reconnects which could - // make the number of backend connections hit the tcp - // limit. That's bad. I could make this setting dynamic - // but then people need to know which value to use. For now - // allow up to 25% of MySQLOrchestratorMaxPoolConnections - // to be idle. That should provide a good number which - // does not keep the maximum number of connections open but - // at the same time does not trigger disconnections and - // reconnections too frequently. - maxIdleConns := int(config.Config.MySQLOrchestratorMaxPoolConnections * 25 / 100) - if maxIdleConns < 10 { - maxIdleConns = 10 - } - log.Infof("Connecting to backend %s:%d: maxConnections: %d, maxIdleConns: %d", - config.Config.MySQLOrchestratorHost, - config.Config.MySQLOrchestratorPort, - config.Config.MySQLOrchestratorMaxPoolConnections, - maxIdleConns) - db.SetMaxIdleConns(maxIdleConns) - } - return db, err -} - -func translateStatement(statement string) (string, error) { - if IsSQLite() { - statement = sqlutils.ToSqlite3Dialect(statement) - } - return statement, nil -} - -// versionIsDeployed checks if given version has already been deployed -func versionIsDeployed(db *sql.DB) (result bool, err error) { - query := ` - select - count(*) as is_deployed - from - orchestrator_db_deployments - where - deployed_version = ? - ` - err = db.QueryRow(query, config.RuntimeCLIFlags.ConfiguredVersion).Scan(&result) - // err means the table 'orchestrator_db_deployments' does not even exist, in which case we proceed - // to deploy. - // If there's another error to this, like DB gone bad, then we're about to find out anyway. - return result, err -} - -// registerOrchestratorDeployment updates the orchestrator_metadata table upon successful deployment -func registerOrchestratorDeployment(db *sql.DB) error { - query := ` - replace into orchestrator_db_deployments ( - deployed_version, deployed_timestamp - ) values ( - ?, NOW() - ) - ` - if _, err := execInternal(db, query, config.RuntimeCLIFlags.ConfiguredVersion); err != nil { - log.Fatalf("Unable to write to orchestrator_metadata: %+v", err) - } - log.Infof("Migrated database schema to version [%+v]", config.RuntimeCLIFlags.ConfiguredVersion) - return nil -} - -// deployStatements will issue given sql queries that are not already known to be deployed. -// This iterates both lists (to-run and already-deployed) and also verifies no contraditions. -func deployStatements(db *sql.DB, queries []string) error { - tx, err := db.Begin() - if err != nil { - log.Fatal(err.Error()) - } - // Ugly workaround ahead. - // Origin of this workaround is the existence of some "timestamp NOT NULL," column definitions, - // where in NO_ZERO_IN_DATE,NO_ZERO_DATE sql_mode are invalid (since default is implicitly "0") - // This means installation of orchestrator fails on such configured servers, and in particular on 5.7 - // where this setting is the dfault. - // For purpose of backwards compatability, what we do is force sql_mode to be more relaxed, create the schemas - // along with the "invalid" definition, and then go ahead and fix those definitions via following ALTER statements. - // My bad. - originalSQLMode := "" - if config.Config.IsMySQL() { - _ = tx.QueryRow(`select @@session.sql_mode`).Scan(&originalSQLMode) - if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', '')`); err != nil { - log.Fatal(err.Error()) - } - if _, err := tx.Exec(`set @@session.sql_mode=REPLACE(@@session.sql_mode, 'NO_ZERO_IN_DATE', '')`); err != nil { - log.Fatal(err.Error()) - } - } - for _, query := range queries { - query, err := translateStatement(query) - if err != nil { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if _, err := tx.Exec(query); err != nil { - if strings.Contains(err.Error(), "syntax error") { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if !sqlutils.IsAlterTable(query) && !sqlutils.IsCreateIndex(query) && !sqlutils.IsDropIndex(query) { - log.Fatalf("Cannot initiate orchestrator: %+v; query=%+v", err, query) - return err - } - if !strings.Contains(err.Error(), "duplicate column name") && - !strings.Contains(err.Error(), "Duplicate column name") && - !strings.Contains(err.Error(), "check that column/key exists") && - !strings.Contains(err.Error(), "already exists") && - !strings.Contains(err.Error(), "Duplicate key name") { - log.Errorf("Error initiating orchestrator: %+v; query=%+v", err, query) - } - } - } - if config.Config.IsMySQL() { - if _, err := tx.Exec(`set session sql_mode=?`, originalSQLMode); err != nil { - log.Fatal(err.Error()) - } - } - if err := tx.Commit(); err != nil { - log.Fatal(err.Error()) - } - return nil -} - -// initOrchestratorDB attempts to create/upgrade the orchestrator backend database. It is created once in the -// application's lifetime. -func initOrchestratorDB(db *sql.DB) error { - log.Info("Initializing orchestrator") - - versionAlreadyDeployed, err := versionIsDeployed(db) - if versionAlreadyDeployed && config.RuntimeCLIFlags.ConfiguredVersion != "" && err == nil { - // Already deployed with this version - return nil - } - if config.Config.PanicIfDifferentDatabaseDeploy && config.RuntimeCLIFlags.ConfiguredVersion != "" && !versionAlreadyDeployed { - log.Fatalf("PanicIfDifferentDatabaseDeploy is set. Configured version %s is not the version found in the database", config.RuntimeCLIFlags.ConfiguredVersion) - } - log.Info("Migrating database schema") - deployStatements(db, generateSQLBase) - deployStatements(db, generateSQLPatches) - registerOrchestratorDeployment(db) - - if IsSQLite() { - ExecOrchestrator(`PRAGMA journal_mode = WAL`) - ExecOrchestrator(`PRAGMA synchronous = NORMAL`) - } - - return nil -} - -// execInternal -func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) { - var err error - query, err = translateStatement(query) - if err != nil { - return nil, err - } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err -} - -// ExecOrchestrator will execute given query on the orchestrator backend database. -func ExecOrchestrator(query string, args ...any) (sql.Result, error) { - var err error - query, err = translateStatement(query) - if err != nil { - return nil, err - } - db, err := OpenOrchestrator() - if err != nil { - return nil, err - } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err -} - -// QueryOrchestrator -func QueryOrchestrator(query string, argsArray []any, onRow func(sqlutils.RowMap) error) error { - query, err := translateStatement(query) - if err != nil { - log.Fatalf("Cannot query orchestrator: %+v; query=%+v", err, query) - return err - } - db, err := OpenOrchestrator() - if err != nil { - return err - } - - if err = sqlutils.QueryRowsMap(db, query, onRow, argsArray...); err != nil { - log.Warning(err.Error()) - } - - return err -} diff --git a/go/vt/vtgr/db/generate_base.go b/go/vt/vtgr/db/generate_base.go deleted file mode 100644 index d1923223e5d..00000000000 --- a/go/vt/vtgr/db/generate_base.go +++ /dev/null @@ -1,862 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -// generateSQLBase & generateSQLPatches are lists of SQL statements required to build the orchestrator backend -var generateSQLBase = []string{ - ` - CREATE TABLE IF NOT EXISTS database_instance ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - last_checked timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_seen timestamp NULL DEFAULT NULL, - server_id int(10) unsigned NOT NULL, - version varchar(128) CHARACTER SET ascii NOT NULL, - binlog_format varchar(16) CHARACTER SET ascii NOT NULL, - log_bin tinyint(3) unsigned NOT NULL, - log_replica_updates tinyint(3) unsigned NOT NULL, - binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - source_host varchar(128) CHARACTER SET ascii NOT NULL, - source_port smallint(5) unsigned NOT NULL, - replica_sql_running tinyint(3) unsigned NOT NULL, - replica_io_running tinyint(3) unsigned NOT NULL, - source_log_file varchar(128) CHARACTER SET ascii NOT NULL, - read_source_log_pos bigint(20) unsigned NOT NULL, - relay_source_log_file varchar(128) CHARACTER SET ascii NOT NULL, - exec_source_log_pos bigint(20) unsigned NOT NULL, - replication_lag_seconds bigint(20) unsigned DEFAULT NULL, - replica_lag_seconds bigint(20) unsigned DEFAULT NULL, - num_replica_hosts int(10) unsigned NOT NULL, - replica_hosts text CHARACTER SET ascii NOT NULL, - cluster_name varchar(128) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (hostname,port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_name_idx ON database_instance - `, - ` - CREATE INDEX cluster_name_idx_database_instance ON database_instance(cluster_name) - `, - ` - DROP INDEX last_checked_idx ON database_instance - `, - ` - CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked) - `, - ` - DROP INDEX last_seen_idx ON database_instance - `, - ` - CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_maintenance ( - database_instance_maintenance_id int(10) unsigned NOT NULL AUTO_INCREMENT, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - maintenance_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) CHARACTER SET utf8 NOT NULL, - reason text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (database_instance_maintenance_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX maintenance_uidx ON database_instance_maintenance - `, - ` - CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_long_running_queries ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - process_id bigint(20) NOT NULL, - process_started_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - process_user varchar(16) CHARACTER SET utf8 NOT NULL, - process_host varchar(128) CHARACTER SET utf8 NOT NULL, - process_db varchar(128) CHARACTER SET utf8 NOT NULL, - process_command varchar(16) CHARACTER SET utf8 NOT NULL, - process_time_seconds int(11) NOT NULL, - process_state varchar(128) CHARACTER SET utf8 NOT NULL, - process_info varchar(1024) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (hostname,port,process_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX process_started_at_idx ON database_instance_long_running_queries - `, - ` - CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at) - `, - ` - CREATE TABLE IF NOT EXISTS audit ( - audit_id bigint(20) unsigned NOT NULL AUTO_INCREMENT, - audit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - audit_type varchar(128) CHARACTER SET ascii NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '', - port smallint(5) unsigned NOT NULL, - message text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (audit_id) - ) ENGINE=InnoDB DEFAULT CHARSET=latin1 - `, - ` - DROP INDEX audit_timestamp_idx ON audit - `, - ` - CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp) - `, - ` - DROP INDEX host_port_idx ON audit - `, - ` - CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS host_agent ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - token varchar(128) NOT NULL, - last_submitted timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_checked timestamp NULL DEFAULT NULL, - last_seen timestamp NULL DEFAULT NULL, - mysql_port smallint(5) unsigned DEFAULT NULL, - count_mysql_snapshots smallint(5) unsigned NOT NULL, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX token_idx ON host_agent - `, - ` - CREATE INDEX token_idx_host_agent ON host_agent (token) - `, - ` - DROP INDEX last_submitted_idx ON host_agent - `, - ` - CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted) - `, - ` - DROP INDEX last_checked_idx ON host_agent - `, - ` - CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked) - `, - ` - DROP INDEX last_seen_idx ON host_agent - `, - ` - CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS agent_seed ( - agent_seed_id int(10) unsigned NOT NULL AUTO_INCREMENT, - target_hostname varchar(128) NOT NULL, - source_hostname varchar(128) NOT NULL, - start_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - is_complete tinyint(3) unsigned NOT NULL DEFAULT '0', - is_successful tinyint(3) unsigned NOT NULL DEFAULT '0', - PRIMARY KEY (agent_seed_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX target_hostname_idx ON agent_seed - `, - ` - CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete) - `, - ` - DROP INDEX source_hostname_idx ON agent_seed - `, - ` - CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete) - `, - ` - DROP INDEX start_timestamp_idx ON agent_seed - `, - ` - CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp) - `, - ` - DROP INDEX is_complete_idx ON agent_seed - `, - ` - CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp) - `, - ` - DROP INDEX is_successful_idx ON agent_seed - `, - ` - CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS agent_seed_state ( - agent_seed_state_id int(10) unsigned NOT NULL AUTO_INCREMENT, - agent_seed_id int(10) unsigned NOT NULL, - state_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - state_action varchar(127) NOT NULL, - error_message varchar(255) NOT NULL, - PRIMARY KEY (agent_seed_state_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX agent_seed_idx ON agent_seed_state - `, - ` - CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS host_attributes ( - hostname varchar(128) NOT NULL, - attribute_name varchar(128) NOT NULL, - attribute_value varchar(128) NOT NULL, - submit_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - expire_timestamp timestamp NULL DEFAULT NULL, - PRIMARY KEY (hostname,attribute_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX attribute_name_idx ON host_attributes - `, - ` - CREATE INDEX attribute_name_idx_host_attributes ON host_attributes (attribute_name) - `, - ` - DROP INDEX attribute_value_idx ON host_attributes - `, - ` - CREATE INDEX attribute_value_idx_host_attributes ON host_attributes (attribute_value) - `, - ` - DROP INDEX submit_timestamp_idx ON host_attributes - `, - ` - CREATE INDEX submit_timestamp_idx_host_attributes ON host_attributes (submit_timestamp) - `, - ` - DROP INDEX expire_timestamp_idx ON host_attributes - `, - ` - CREATE INDEX expire_timestamp_idx_host_attributes ON host_attributes (expire_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_resolve ( - hostname varchar(128) NOT NULL, - resolved_hostname varchar(128) NOT NULL, - resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX resolved_timestamp_idx ON hostname_resolve - `, - ` - CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS active_node ( - anchor tinyint unsigned NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (anchor) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - INSERT IGNORE INTO active_node (anchor, hostname, token, last_seen_active) - VALUES (1, '', '', NOW()) - `, - ` - CREATE TABLE IF NOT EXISTS node_health ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, token) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP VIEW IF EXISTS _whats_wrong - `, - ` - DROP VIEW IF EXISTS whats_wrong - `, - ` - DROP VIEW IF EXISTS whats_wrong_summary - `, - ` - CREATE TABLE IF NOT EXISTS topology_recovery ( - recovery_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint unsigned NOT NULL, - in_active_period tinyint unsigned NOT NULL DEFAULT 0, - start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_active_period_unixtime int unsigned, - end_recovery timestamp NULL DEFAULT NULL, - processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL, - processcing_node_token varchar(128) NOT NULL, - successor_hostname varchar(128) DEFAULT NULL, - successor_port smallint unsigned DEFAULT NULL, - PRIMARY KEY (recovery_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX in_active_start_period_idx ON topology_recovery - `, - ` - CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery (in_active_period, start_active_period) - `, - ` - DROP INDEX start_active_period_idx ON topology_recovery - `, - ` - CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period) - `, - ` - DROP INDEX hostname_port_active_period_uidx ON topology_recovery - `, - ` - CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_unresolve ( - hostname varchar(128) NOT NULL, - unresolved_hostname varchar(128) NOT NULL, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX unresolved_hostname_idx ON hostname_unresolve - `, - ` - CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_pool ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - pool varchar(128) NOT NULL, - PRIMARY KEY (hostname, port, pool) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX pool_idx ON database_instance_pool - `, - ` - CREATE INDEX pool_idx_database_instance_pool ON database_instance_pool (pool) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_topology_history ( - snapshot_unix_timestamp INT UNSIGNED NOT NULL, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - source_host varchar(128) CHARACTER SET ascii NOT NULL, - source_port smallint(5) unsigned NOT NULL, - cluster_name tinytext CHARACTER SET ascii NOT NULL, - PRIMARY KEY (snapshot_unix_timestamp, hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_name_idx ON database_instance_topology_history - `, - ` - CREATE INDEX cluster_name_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, cluster_name(128)) - `, - ` - CREATE TABLE IF NOT EXISTS candidate_database_instance ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX last_suggested_idx ON candidate_database_instance - `, - ` - CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_downtime ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - downtime_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp DEFAULT CURRENT_TIMESTAMP, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) CHARACTER SET utf8 NOT NULL, - reason text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS topology_failure_detection ( - detection_id bigint(20) unsigned NOT NULL AUTO_INCREMENT, - hostname varchar(128) NOT NULL, - port smallint unsigned NOT NULL, - in_active_period tinyint unsigned NOT NULL DEFAULT '0', - start_active_period timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - end_active_period_unixtime int unsigned NOT NULL, - processing_node_hostname varchar(128) NOT NULL, - processcing_node_token varchar(128) NOT NULL, - analysis varchar(128) NOT NULL, - cluster_name varchar(128) NOT NULL, - count_affected_replicas int unsigned NOT NULL, - replica_hosts text NOT NULL, - PRIMARY KEY (detection_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_active_period_uidx ON topology_failure_detection - `, - ` - DROP INDEX in_active_start_period_idx ON topology_failure_detection - `, - ` - CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_resolve_history ( - resolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - resolved_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (resolved_hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname ON hostname_resolve_history - `, - ` - CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname) - `, - ` - DROP INDEX resolved_timestamp_idx ON hostname_resolve_history - `, - ` - CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS hostname_unresolve_history ( - unresolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (unresolved_hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname ON hostname_unresolve_history - `, - ` - CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname) - `, - ` - DROP INDEX last_registered_idx ON hostname_unresolve_history - `, - ` - CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered) - `, - ` - CREATE TABLE IF NOT EXISTS cluster_domain_name ( - cluster_name varchar(128) CHARACTER SET ascii NOT NULL, - domain_name varchar(128) NOT NULL, - PRIMARY KEY (cluster_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX domain_name_idx ON cluster_domain_name - `, - ` - CREATE INDEX domain_name_idx_cluster_domain_name ON cluster_domain_name (domain_name(32)) - `, - ` - CREATE TABLE IF NOT EXISTS primary_position_equivalence ( - equivalence_id bigint unsigned not null auto_increment, - primary1_hostname varchar(128) CHARACTER SET ascii NOT NULL, - primary1_port smallint(5) unsigned NOT NULL, - primary1_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - primary1_binary_log_pos bigint(20) unsigned NOT NULL, - primary2_hostname varchar(128) CHARACTER SET ascii NOT NULL, - primary2_port smallint(5) unsigned NOT NULL, - primary2_binary_log_file varchar(128) CHARACTER SET ascii NOT NULL, - primary2_binary_log_pos bigint(20) unsigned NOT NULL, - last_suggested TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (equivalence_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX equivalence_uidx ON primary_position_equivalence - `, - ` - CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port) - `, - ` - DROP INDEX primary2_idx ON primary_position_equivalence - `, - ` - CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos) - `, - ` - DROP INDEX last_suggested_idx ON primary_position_equivalence - `, - ` - CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested) - `, - ` - CREATE TABLE IF NOT EXISTS async_request ( - request_id bigint unsigned NOT NULL AUTO_INCREMENT, - command varchar(128) charset ascii not null, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - destination_hostname varchar(128) NOT NULL, - destination_port smallint(5) unsigned NOT NULL, - pattern text CHARACTER SET utf8 NOT NULL, - gtid_hint varchar(32) charset ascii not null, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - story text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (request_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX begin_timestamp_idx ON async_request - `, - ` - CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp) - `, - ` - DROP INDEX end_timestamp_idx ON async_request - `, - ` - CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS blocked_topology_recovery ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - cluster_name varchar(128) NOT NULL, - analysis varchar(128) NOT NULL, - last_blocked_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - blocking_recovery_id bigint unsigned, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX cluster_blocked_idx ON blocked_topology_recovery - `, - ` - CREATE INDEX cluster_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (cluster_name, last_blocked_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_last_analysis ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX analysis_timestamp_idx ON database_instance_last_analysis - `, - ` - CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_analysis_changelog ( - changelog_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (changelog_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX analysis_timestamp_idx ON database_instance_analysis_changelog - `, - ` - CREATE INDEX analysis_timestamp_idx_database_instance_analysis_changelog ON database_instance_analysis_changelog (analysis_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS node_health_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) CHARACTER SET ascii NOT NULL, - token varchar(128) NOT NULL, - first_seen_active timestamp NOT NULL, - extra_info varchar(128) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX first_seen_active_idx ON node_health_history - `, - ` - CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (first_seen_active) - `, - ` - DROP INDEX hostname_token_idx ON node_health_history - `, - ` - CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_coordinates_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - recorded_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - relay_log_file varchar(128) NOT NULL, - relay_log_pos bigint(20) unsigned NOT NULL, - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_recorded_timestmp_idx ON database_instance_coordinates_history - `, - ` - CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp) - `, - ` - DROP INDEX recorded_timestmp_idx ON database_instance_coordinates_history - `, - ` - CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_binlog_files_history ( - history_id bigint unsigned not null auto_increment, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX hostname_port_file_idx ON database_instance_binlog_files_history - `, - ` - CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file) - `, - ` - DROP INDEX last_seen_idx ON database_instance_binlog_files_history - `, - ` - CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen) - `, - ` - CREATE TABLE IF NOT EXISTS access_token ( - access_token_id bigint unsigned not null auto_increment, - public_token varchar(128) NOT NULL, - secret_token varchar(128) NOT NULL, - generated_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - generated_by varchar(128) CHARACTER SET utf8 NOT NULL, - is_acquired tinyint unsigned NOT NULL DEFAULT '0', - PRIMARY KEY (access_token_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX public_token_idx ON access_token - `, - ` - CREATE UNIQUE INDEX public_token_uidx_access_token ON access_token (public_token) - `, - ` - DROP INDEX generated_at_idx ON access_token - `, - ` - CREATE INDEX generated_at_idx_access_token ON access_token (generated_at) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_recent_relaylog_history ( - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - current_relay_log_file varchar(128) NOT NULL, - current_relay_log_pos bigint(20) unsigned NOT NULL, - current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - prev_relay_log_file varchar(128) NOT NULL, - prev_relay_log_pos bigint(20) unsigned NOT NULL, - prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - DROP INDEX current_seen_idx ON database_instance_recent_relaylog_history - `, - ` - CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen) - `, - ` - CREATE TABLE IF NOT EXISTS orchestrator_metadata ( - anchor tinyint unsigned NOT NULL, - last_deployed_version varchar(128) CHARACTER SET ascii NOT NULL, - last_deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (anchor) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS orchestrator_db_deployments ( - deployed_version varchar(128) CHARACTER SET ascii NOT NULL, - deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (deployed_version) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS global_recovery_disable ( - disable_recovery tinyint unsigned NOT NULL COMMENT 'Insert 1 to disable recovery globally', - PRIMARY KEY (disable_recovery) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS topology_recovery_steps ( - recovery_step_id bigint unsigned not null auto_increment, - recovery_uid varchar(128) CHARACTER SET ascii NOT NULL, - audit_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - message text CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (recovery_step_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS raft_store ( - store_id bigint unsigned not null auto_increment, - store_key varbinary(512) not null, - store_value blob not null, - PRIMARY KEY (store_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX store_key_idx_raft_store ON raft_store (store_key) - `, - ` - CREATE TABLE IF NOT EXISTS raft_log ( - log_index bigint unsigned not null auto_increment, - term bigint not null, - log_type int not null, - data blob not null, - PRIMARY KEY (log_index) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS raft_snapshot ( - snapshot_id bigint unsigned not null auto_increment, - snapshot_name varchar(128) CHARACTER SET utf8 NOT NULL, - snapshot_meta varchar(4096) CHARACTER SET utf8 NOT NULL, - PRIMARY KEY (snapshot_id) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_peer_analysis ( - peer varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - port smallint(5) unsigned NOT NULL, - analysis_timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - analysis varchar(128) NOT NULL, - PRIMARY KEY (peer, hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_tls ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - required tinyint unsigned NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS cluster_injected_pseudo_gtid ( - cluster_name varchar(128) NOT NULL, - time_injected timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (cluster_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS hostname_ips ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - ipv4 varchar(128) CHARACTER SET ascii NOT NULL, - ipv6 varchar(128) CHARACTER SET ascii NOT NULL, - last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_tags ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - tag_name varchar(128) CHARACTER SET utf8 NOT NULL, - tag_value varchar(128) CHARACTER SET utf8 NOT NULL, - last_updated timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port, tag_name) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name) - `, - ` - CREATE TABLE IF NOT EXISTS database_instance_stale_binlog_coordinates ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint(20) unsigned NOT NULL, - first_seen timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen) - `, - ` - CREATE TABLE IF NOT EXISTS vitess_tablet ( - hostname varchar(128) CHARACTER SET ascii NOT NULL, - port smallint(5) unsigned NOT NULL, - keyspace varchar(128) CHARACTER SET ascii NOT NULL, - shard varchar(128) CHARACTER SET ascii NOT NULL, - cell varchar(128) CHARACTER SET ascii NOT NULL, - tablet_type smallint(5) NOT NULL, - primary_timestamp timestamp NOT NULL, - info varchar(512) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (hostname, port) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, - ` - CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell) - `, - ` - CREATE INDEX ks_idx_vitess_tablet ON vitess_tablet (keyspace, shard) - `, - ` - CREATE TABLE IF NOT EXISTS vitess_keyspace ( - keyspace varchar(128) CHARACTER SET ascii NOT NULL, - keyspace_type smallint(5) NOT NULL, - durability_policy varchar(512) CHARACTER SET ascii NOT NULL, - PRIMARY KEY (keyspace) - ) ENGINE=InnoDB DEFAULT CHARSET=ascii - `, -} diff --git a/go/vt/vtgr/db/generate_patches.go b/go/vt/vtgr/db/generate_patches.go deleted file mode 100644 index 3760b3e694a..00000000000 --- a/go/vt/vtgr/db/generate_patches.go +++ /dev/null @@ -1,583 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -// generateSQLPatches contains DDLs for patching schema to the latest version. -// Add new statements at the end of the list so they form a changelog. -var generateSQLPatches = []string{ - ` - ALTER TABLE - database_instance - ADD COLUMN read_only TINYINT UNSIGNED NOT NULL AFTER version - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_sql_error TEXT NOT NULL AFTER exec_source_log_pos - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_io_error TEXT NOT NULL AFTER last_sql_error - `, - ` - ALTER TABLE - database_instance - ADD COLUMN oracle_gtid TINYINT UNSIGNED NOT NULL AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN mariadb_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN relay_log_file varchar(128) CHARACTER SET ascii NOT NULL AFTER exec_source_log_pos - `, - ` - ALTER TABLE - database_instance - ADD COLUMN relay_log_pos bigint unsigned NOT NULL AFTER relay_log_file - `, - ` - DROP INDEX source_host_port_idx ON database_instance - `, - ` - ALTER TABLE - database_instance - ADD INDEX source_host_port_idx_database_instance (source_host, source_port) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN pseudo_gtid TINYINT UNSIGNED NOT NULL AFTER mariadb_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_depth TINYINT UNSIGNED NOT NULL AFTER cluster_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN has_replication_filters TINYINT UNSIGNED NOT NULL AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN data_center varchar(32) CHARACTER SET ascii NOT NULL AFTER cluster_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN physical_environment varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center - `, - ` - ALTER TABLE - database_instance_maintenance - ADD KEY active_timestamp_idx (maintenance_active, begin_timestamp) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN is_co_primary TINYINT UNSIGNED NOT NULL AFTER replication_depth - `, - ` - ALTER TABLE - database_instance_maintenance - ADD KEY active_end_timestamp_idx (maintenance_active, end_timestamp) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN sql_delay INT UNSIGNED NOT NULL AFTER replica_lag_seconds - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN analysis varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN count_affected_replicas int unsigned NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN replica_hosts text CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE hostname_unresolve - ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE hostname_unresolve - ADD KEY last_registered_idx (last_registered) - `, - ` - ALTER TABLE topology_recovery - ADD KEY cluster_name_in_active_idx (cluster_name, in_active_period) - `, - ` - ALTER TABLE topology_recovery - ADD KEY end_recovery_idx (end_recovery) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN binlog_server TINYINT UNSIGNED NOT NULL AFTER version - `, - ` - ALTER TABLE cluster_domain_name - ADD COLUMN last_registered TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE cluster_domain_name - ADD KEY last_registered_idx (last_registered) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN supports_oracle_gtid TINYINT UNSIGNED NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN executed_gtid_set text CHARACTER SET ascii NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN server_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER server_id - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN is_successful TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER processcing_node_token - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged TINYINT UNSIGNED NOT NULL DEFAULT 0 - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged_by varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledge_comment text CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN participating_instances text CHARACTER SET ascii NOT NULL after replica_hosts - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN lost_replicas text CHARACTER SET ascii NOT NULL after participating_instances - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN all_errors text CHARACTER SET ascii NOT NULL after lost_replicas - `, - ` - ALTER TABLE audit - ADD COLUMN cluster_name varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER port - `, - ` - ALTER TABLE candidate_database_instance - ADD COLUMN priority TINYINT SIGNED NOT NULL DEFAULT 1 comment 'positive promote, nagative unpromotes' - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN acknowledged_at TIMESTAMP NULL after acknowledged - `, - ` - ALTER TABLE - topology_recovery - ADD KEY acknowledged_idx (acknowledged, acknowledged_at) - `, - ` - ALTER TABLE - blocked_topology_recovery - ADD KEY last_blocked_idx (last_blocked_timestamp) - `, - ` - ALTER TABLE candidate_database_instance - ADD COLUMN promotion_rule enum('must', 'prefer', 'neutral', 'prefer_not', 'must_not') NOT NULL DEFAULT 'neutral' - `, - ` - ALTER TABLE node_health /* sqlite3-skip */ - DROP PRIMARY KEY, - ADD PRIMARY KEY (hostname, token) - `, - ` - ALTER TABLE node_health - ADD COLUMN extra_info varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE agent_seed /* sqlite3-skip */ - MODIFY end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE active_node /* sqlite3-skip */ - MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - - ` - ALTER TABLE node_health /* sqlite3-skip */ - MODIFY last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE candidate_database_instance /* sqlite3-skip */ - MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE primary_position_equivalence /* sqlite3-skip */ - MODIFY last_suggested timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER last_checked - `, - ` - ALTER TABLE - database_instance /* sqlite3-skip */ - MODIFY last_attempted_check TIMESTAMP NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance_analysis_changelog - ADD KEY instance_timestamp_idx (hostname, port, analysis_timestamp) - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN last_detection_id bigint unsigned NOT NULL - `, - ` - ALTER TABLE - topology_recovery - ADD KEY last_detection_idx (last_detection_id) - `, - ` - ALTER TABLE node_health_history - ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE node_health - ADD COLUMN command varchar(128) CHARACTER SET utf8 NOT NULL - `, - ` - ALTER TABLE database_instance_topology_history - ADD COLUMN version varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_purged text CHARACTER SET ascii NOT NULL AFTER executed_gtid_set - `, - ` - ALTER TABLE - database_instance_coordinates_history - ADD COLUMN last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' AFTER recorded_timestamp - `, - ` - ALTER TABLE - access_token - ADD COLUMN is_reentrant TINYINT UNSIGNED NOT NULL default 0 - `, - ` - ALTER TABLE - access_token - ADD COLUMN acquired_at timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance_pool - ADD COLUMN registered_at timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE - database_instance - ADD COLUMN has_replication_credentials TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN allow_tls TINYINT UNSIGNED NOT NULL AFTER sql_delay - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_enforced TINYINT UNSIGNED NOT NULL AFTER physical_environment - `, - ` - ALTER TABLE - database_instance - ADD COLUMN instance_alias varchar(128) CHARACTER SET ascii NOT NULL AFTER physical_environment - `, - ` - ALTER TABLE - topology_recovery - ADD COLUMN successor_alias varchar(128) DEFAULT NULL - `, - ` - ALTER TABLE - database_instance /* sqlite3-skip */ - MODIFY cluster_name varchar(128) NOT NULL - `, - ` - ALTER TABLE - node_health - ADD INDEX last_seen_active_idx (last_seen_active) - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN processing_node_hostname varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN processing_node_token varchar(128) NOT NULL - `, - ` - ALTER TABLE - database_instance_maintenance - ADD COLUMN explicitly_bounded TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE node_health_history - ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health - ADD COLUMN app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health_history /* sqlite3-skip */ - MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health /* sqlite3-skip */ - MODIFY app_version varchar(64) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE - database_instance - ADD COLUMN version_comment varchar(128) NOT NULL DEFAULT '' - `, - ` - ALTER TABLE active_node - ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE node_health - ADD COLUMN first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00' - `, - ` - ALTER TABLE database_instance - ADD COLUMN major_version varchar(16) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN binlog_row_image varchar(16) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE topology_recovery - ADD COLUMN uid varchar(128) CHARACTER SET ascii NOT NULL - `, - ` - CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid) - `, - ` - CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_discovery_latency bigint not null - `, - ` - CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp) - `, - ` - ALTER TABLE - topology_failure_detection - ADD COLUMN is_actionable tinyint not null default 0 - `, - ` - DROP INDEX hostname_port_active_period_uidx_topology_failure_detection ON topology_failure_detection - `, - ` - CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable) - `, - ` - ALTER TABLE raft_snapshot - ADD COLUMN created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP - `, - ` - ALTER TABLE node_health - ADD COLUMN db_backend varchar(255) CHARACTER SET ascii NOT NULL DEFAULT "" - `, - ` - ALTER TABLE node_health - ADD COLUMN incrementing_indicator bigint not null default 0 - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_enabled TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_replica_enabled TINYINT UNSIGNED NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_mode varchar(32) CHARACTER SET ascii NOT NULL - `, - ` - ALTER TABLE - database_instance - ADD COLUMN last_check_partial_success tinyint unsigned NOT NULL after last_attempted_check - `, - ` - ALTER TABLE - database_instance - ADD COLUMN source_uuid varchar(64) CHARACTER SET ascii NOT NULL AFTER oracle_gtid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN gtid_errant text CHARACTER SET ascii NOT NULL AFTER gtid_purged - `, - ` - ALTER TABLE - database_instance - ADD COLUMN ancestry_uuid text CHARACTER SET ascii NOT NULL AFTER source_uuid - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_sql_thread_state tinyint signed not null default 0 AFTER replica_io_running - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_io_thread_state tinyint signed not null default 0 AFTER replication_sql_thread_state - `, - ` - ALTER TABLE - database_instance_tags /* sqlite3-skip */ - DROP PRIMARY KEY, - ADD PRIMARY KEY (hostname, port, tag_name) - `, - ` - ALTER TABLE - database_instance - ADD COLUMN region varchar(32) CHARACTER SET ascii NOT NULL AFTER data_center - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_timeout INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_enabled - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_wait_for_replica_count INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_timeout - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_wait_for_replica_count - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_replica_status TINYINT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status - `, - ` - ALTER TABLE - database_instance - ADD COLUMN semi_sync_primary_clients INT UNSIGNED NOT NULL DEFAULT 0 AFTER semi_sync_primary_status - `, - ` - ALTER TABLE /* sqlite3-skip */ - database_instance - MODIFY semi_sync_primary_timeout BIGINT UNSIGNED NOT NULL DEFAULT 0 - `, - // Fields related to Replication Group the instance belongs to - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_name VARCHAR(64) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER gtid_mode - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_is_single_primary_mode TINYINT UNSIGNED NOT NULL DEFAULT 1 AFTER replication_group_name - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_member_state VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_is_single_primary_mode - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_member_role VARCHAR(16) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_member_state - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_members text CHARACTER SET ascii NOT NULL AFTER replication_group_member_role - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_primary_host varchar(128) CHARACTER SET ascii NOT NULL DEFAULT '' AFTER replication_group_members - `, - ` - ALTER TABLE - database_instance - ADD COLUMN replication_group_primary_port smallint(5) unsigned NOT NULL DEFAULT 0 AFTER replication_group_primary_host - `, -} diff --git a/go/vt/vtgr/db/mock_mysql.go b/go/vt/vtgr/db/mock_mysql.go deleted file mode 100644 index a74d8359099..00000000000 --- a/go/vt/vtgr/db/mock_mysql.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package db - -import ( - reflect "reflect" - "strconv" - - gomock "github.com/golang/mock/gomock" - - mysql "vitess.io/vitess/go/mysql" - inst "vitess.io/vitess/go/vt/vtgr/inst" -) - -// MockAgent is a mock of Agent interface -type MockAgent struct { - ctrl *gomock.Controller - recorder *MockAgentMockRecorder -} - -// MockAgentMockRecorder is the mock recorder for MockAgent -type MockAgentMockRecorder struct { - mock *MockAgent -} - -// NewMockAgent creates a new mock instance -func NewMockAgent(ctrl *gomock.Controller) *MockAgent { - mock := &MockAgent{ctrl: ctrl} - mock.recorder = &MockAgentMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockAgent) EXPECT() *MockAgentMockRecorder { - return m.recorder -} - -// BootstrapGroupLocked mocks base method -func (m *MockAgent) BootstrapGroupLocked(instanceKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BootstrapGroupLocked", instanceKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// BootstrapGroupLocked indicates an expected call of BootstrapGroupLocked -func (mr *MockAgentMockRecorder) BootstrapGroupLocked(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BootstrapGroupLocked", reflect.TypeOf((*MockAgent)(nil).BootstrapGroupLocked), instanceKey) -} - -// RebootstrapGroupLocked mocks base method -func (m *MockAgent) RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RebootstrapGroupLocked", instanceKey, name) - ret0, _ := ret[0].(error) - return ret0 -} - -// RebootstrapGroupLocked indicates an expected call of RebootstrapGroupLocked -func (mr *MockAgentMockRecorder) RebootstrapGroupLocked(instanceKey, name any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebootstrapGroupLocked", reflect.TypeOf((*MockAgent)(nil).RebootstrapGroupLocked), instanceKey, name) -} - -// StopGroupLocked mocks base method -func (m *MockAgent) StopGroupLocked(instanceKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopGroupLocked", instanceKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// StopGroupLocked indicates an expected call of StopGroupLocked -func (mr *MockAgentMockRecorder) StopGroupLocked(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopGroupLocked", reflect.TypeOf((*MockAgent)(nil).StopGroupLocked), instanceKey) -} - -// JoinGroupLocked mocks base method -func (m *MockAgent) JoinGroupLocked(instanceKey, primaryKey *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "JoinGroupLocked", instanceKey, primaryKey) - ret0, _ := ret[0].(error) - return ret0 -} - -// JoinGroupLocked indicates an expected call of JoinGroupLocked -func (mr *MockAgentMockRecorder) JoinGroupLocked(instanceKey, primaryKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "JoinGroupLocked", reflect.TypeOf((*MockAgent)(nil).JoinGroupLocked), instanceKey, primaryKey) -} - -// SetReadOnly mocks base method -func (m *MockAgent) SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetReadOnly", instanceKey, readOnly) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetReadOnly indicates an expected call of SetReadOnly -func (mr *MockAgentMockRecorder) SetReadOnly(instanceKey, readOnly any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadOnly", reflect.TypeOf((*MockAgent)(nil).SetReadOnly), instanceKey, readOnly) -} - -// FetchApplierGTIDSet mocks base method -func (m *MockAgent) FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchApplierGTIDSet", instanceKey) - ret0, _ := ret[0].(mysql.GTIDSet) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchApplierGTIDSet indicates an expected call of FetchApplierGTIDSet -func (mr *MockAgentMockRecorder) FetchApplierGTIDSet(instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchApplierGTIDSet", reflect.TypeOf((*MockAgent)(nil).FetchApplierGTIDSet), instanceKey) -} - -// Failover mocks base method -func (m *MockAgent) Failover(instance *inst.InstanceKey) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Failover", instance) - ret0, _ := ret[0].(error) - return ret0 -} - -// Failover indicates an expected call of Failover -func (mr *MockAgentMockRecorder) Failover(instance any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Failover", reflect.TypeOf((*MockAgent)(nil).Failover), instance) -} - -// FetchGroupView mocks base method -func (m *MockAgent) FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchGroupView", alias, instanceKey) - ret0, _ := ret[0].(*GroupView) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FetchGroupView indicates an expected call of FetchGroupView -func (mr *MockAgentMockRecorder) FetchGroupView(alias, instanceKey any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchGroupView", reflect.TypeOf((*MockAgent)(nil).FetchGroupView), alias, instanceKey) -} - -// TestGroupState mocks a row from mysql -type TestGroupState struct { - MemberHost, MemberPort, MemberState, MemberRole string -} - -// BuildGroupView builds gruop view from input -func BuildGroupView(alias, groupName, host string, port int, readOnly bool, stalenessResult int, inputs []TestGroupState) *GroupView { - view := NewGroupView(alias, host, port) - view.GroupName = groupName - // group_name, member_host, member_port, member_state, member_role, is_local - for _, row := range inputs { - memberPort, _ := strconv.Atoi(row.MemberPort) - member := NewGroupMember( - row.MemberState, - row.MemberRole, - row.MemberHost, - memberPort, - false) - if host == row.MemberHost && port == memberPort { - member.ReadOnly = readOnly - } - view.UnresolvedMembers = append(view.UnresolvedMembers, member) - view.HeartbeatStaleness = stalenessResult - } - return view -} diff --git a/go/vt/vtgr/db/mysql.go b/go/vt/vtgr/db/mysql.go deleted file mode 100644 index 8c3787c9187..00000000000 --- a/go/vt/vtgr/db/mysql.go +++ /dev/null @@ -1,590 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package db - -import ( - "errors" - "fmt" - "math" - "strconv" - "strings" - - gouuid "github.com/google/uuid" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/inst" -) - -var ( - configFilePath string - dbFlavor = "MySQL56" - mysqlGroupPort = 33061 - enableHeartbeatCheck bool - - // ErrGroupSplitBrain is the error when mysql group is split-brain - ErrGroupSplitBrain = errors.New("group has split brain") - // ErrGroupBackoffError is either the transient error or network partition from the group - ErrGroupBackoffError = errors.New("group backoff error") - // ErrGroupOngoingBootstrap is the error when a bootstrap is in progress - ErrGroupOngoingBootstrap = errors.New("group ongoing bootstrap") - // ErrGroupInactive is the error when mysql group is inactive unexpectedly - ErrGroupInactive = errors.New("group is inactive") - // ErrInvalidInstance is the error when the instance key has empty hostname - ErrInvalidInstance = errors.New("invalid mysql instance key") -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.StringVar(&configFilePath, "db_config", "", "Full path to db config file that will be used by VTGR.") - fs.StringVar(&dbFlavor, "db_flavor", "MySQL56", "MySQL flavor override.") - fs.IntVar(&mysqlGroupPort, "gr_port", 33061, "Port to bootstrap a MySQL group.") - fs.BoolVar(&enableHeartbeatCheck, "enable_heartbeat_check", false, "Enable heartbeat checking, set together with --group_heartbeat_threshold.") - }) -} - -// Agent is used by vtgr to interact with Mysql -type Agent interface { - // BootstrapGroupLocked bootstraps a mysql group - // the caller should grab a lock before - BootstrapGroupLocked(instanceKey *inst.InstanceKey) error - - // RebootstrapGroupLocked rebootstrap a group with an existing name - RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error - - // StopGroupLocked stops a mysql group - StopGroupLocked(instanceKey *inst.InstanceKey) error - - // JoinGroupLocked puts an instance into a mysql group based on primary instance - // the caller should grab a lock before - JoinGroupLocked(instanceKey *inst.InstanceKey, primaryKey *inst.InstanceKey) error - - // SetReadOnly set super_read_only variable - // https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_super_read_only - SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error - - // FetchApplierGTIDSet fetches the GTID set from group_replication_applier channel - FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) - - // Failover move the mysql primary to the node defined by memberUUID - Failover(instance *inst.InstanceKey) error - - // FetchGroupView fetches group related information - FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) -} - -// MemberState is member state -type MemberState int - -// MemberRole is member role -type MemberRole int - -const ( - UNKNOWNSTATE MemberState = iota - OFFLINE - UNREACHABLE - RECOVERING - ONLINE - ERROR -) - -const ( - UNKNOWNROLE MemberRole = iota - SECONDARY - PRIMARY -) - -// GroupMember represents a ROW we get from performance_schema -type GroupMember struct { - HostName string - Port int - Role MemberRole - State MemberState - ReadOnly bool -} - -// GroupView is an instance's view for the group -type GroupView struct { - TabletAlias string - MySQLHost string - MySQLPort int - GroupName string - HeartbeatStaleness int - UnresolvedMembers []*GroupMember -} - -// SQLAgentImpl implements Agent -type SQLAgentImpl struct { - config *config.Configuration - dbFlavor string - enableHeartbeat bool -} - -// NewGroupView creates a new GroupView -func NewGroupView(alias, host string, port int) *GroupView { - return &GroupView{TabletAlias: alias, MySQLHost: host, MySQLPort: port} -} - -// NewGroupMember creates a new GroupMember -func NewGroupMember(state, role, host string, port int, readonly bool) *GroupMember { - return &GroupMember{ - State: toMemberState(state), - Role: toMemberRole(role), - HostName: host, - Port: port, - ReadOnly: readonly, - } -} - -// NewVTGRSqlAgent creates a SQLAgentImpl -func NewVTGRSqlAgent() *SQLAgentImpl { - var conf *config.Configuration - if (configFilePath) != "" { - log.Infof("use config from %v", configFilePath) - conf = config.ForceRead(configFilePath) - } else { - log.Warningf("use default config") - conf = config.Config - } - agent := &SQLAgentImpl{ - config: conf, - dbFlavor: dbFlavor, - enableHeartbeat: enableHeartbeatCheck, - } - return agent -} - -// BootstrapGroupLocked implements Agent interface -func (agent *SQLAgentImpl) BootstrapGroupLocked(instanceKey *inst.InstanceKey) error { - if instanceKey == nil { - return errors.New("nil instance key for bootstrap") - } - // Before bootstrap a group, double check locally there is really nothing running locally - uuid, state, err := agent.getGroupNameAndMemberState(instanceKey) - if err != nil { - return err - } - if state != "" && state != inst.GroupReplicationMemberStateOffline { - return fmt.Errorf("%v not OFFLINE mode %v [group_name=%v]", instanceKey.Hostname, state, uuid) - } - // If there is a group name stored locally, we should try to reuse it - // for port, we will override with a new one - if uuid == "" { - uuid = gouuid.New().String() - log.Infof("Try to bootstrap with a new uuid") - } - log.Infof("Bootstrap group on %v with %v", instanceKey.Hostname, uuid) - return agent.bootstrapInternal(instanceKey, uuid) -} - -func (agent *SQLAgentImpl) RebootstrapGroupLocked(instanceKey *inst.InstanceKey, name string) error { - log.Infof("Rebootstrapping group on %v with %v", instanceKey.Hostname, name) - return agent.bootstrapInternal(instanceKey, name) -} - -func (agent *SQLAgentImpl) bootstrapInternal(instanceKey *inst.InstanceKey, uuid string) error { - // Use persist to set group_replication_group_name - // so that the instance will persist the name after restart - cmds := []string{ - "set global offline_mode=0", - fmt.Sprintf("set @@persist.group_replication_group_name=\"%s\"", uuid), - fmt.Sprintf("set global group_replication_local_address=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - fmt.Sprintf("set global group_replication_group_seeds=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - "set global group_replication_bootstrap_group=ON", - fmt.Sprintf("start group_replication user='%s', password='%s'", agent.config.MySQLReplicaUser, agent.config.MySQLReplicaPassword), - "set global group_replication_bootstrap_group=OFF", - } - for _, cmd := range cmds { - if err := execInstanceWithTopo(instanceKey, cmd); err != nil { - log.Errorf("Failed to execute: %v: %v", cmd, err) - return err - } - } - return nil -} - -// StopGroupLocked implements Agent interface -func (agent *SQLAgentImpl) StopGroupLocked(instanceKey *inst.InstanceKey) error { - cmd := "stop group_replication" - return execInstanceWithTopo(instanceKey, cmd) -} - -// SetReadOnly implements Agent interface -func (agent *SQLAgentImpl) SetReadOnly(instanceKey *inst.InstanceKey, readOnly bool) error { - // Setting super_read_only ON implicitly forces read_only ON - // Setting read_only OFF implicitly forces super_read_only OFF - // https://www.perconaicom/blog/2016/09/27/using-the-super_read_only-system-variable/ - if readOnly { - return execInstance(instanceKey, "set @@global.super_read_only=1") - } - return execInstance(instanceKey, "set @@global.read_only=0") -} - -// JoinGroupLocked implements Agent interface -// Note: caller should grab the lock before calling this -func (agent *SQLAgentImpl) JoinGroupLocked(instanceKey *inst.InstanceKey, primaryInstanceKey *inst.InstanceKey) error { - var numExistingMembers int - var uuid string - query := `select count(*) as count, @@group_replication_group_name as group_name - from performance_schema.replication_group_members where member_state='ONLINE'` - err := fetchInstance(primaryInstanceKey, query, func(m sqlutils.RowMap) error { - numExistingMembers = m.GetInt("count") - uuid = m.GetString("group_name") - return nil - }) - if err != nil { - return err - } - if numExistingMembers == 0 { - return fmt.Errorf("there is no group members found on %v:%v", primaryInstanceKey.Hostname, primaryInstanceKey.Port) - } - // The queries above are executed on the primary instance - // now let's do one more check with local information to make sure it's OK to join the primary - localGroup, state, err := agent.getGroupNameAndMemberState(instanceKey) - if err != nil { - return err - } - if localGroup != "" && localGroup != uuid { - return fmt.Errorf("%v has a different group name (%v) than primary %v (%v)", instanceKey.Hostname, localGroup, primaryInstanceKey.Hostname, uuid) - } - if state == inst.GroupReplicationMemberStateOnline || state == inst.GroupReplicationMemberStateRecovering { - return fmt.Errorf("%v [%v] is alredy in a group %v", instanceKey.Hostname, state, localGroup) - } - var primaryGrPort int - query = `select @@group_replication_local_address as address` - err = fetchInstance(primaryInstanceKey, query, func(m sqlutils.RowMap) error { - address := m.GetString("address") - arr := strings.Split(address, ":") - primaryGrPort, err = strconv.Atoi(arr[1]) - if err != nil { - log.Errorf("Failed to parse primary GR port: %v", err) - return err - } - return nil - }) - if primaryGrPort == 0 { - return fmt.Errorf("cannot find group replication port on %v", primaryInstanceKey.Hostname) - } - // Now it's safe to join the group - cmds := []string{ - "set global offline_mode=0", - fmt.Sprintf("set @@persist.group_replication_group_name=\"%s\"", uuid), - fmt.Sprintf("set global group_replication_group_seeds=\"%s:%d\"", primaryInstanceKey.Hostname, primaryGrPort), - fmt.Sprintf("set global group_replication_local_address=\"%s:%d\"", instanceKey.Hostname, mysqlGroupPort), - fmt.Sprintf("start group_replication user='%s', password='%s'", agent.config.MySQLReplicaUser, agent.config.MySQLReplicaPassword), - } - for _, cmd := range cmds { - if err := execInstanceWithTopo(instanceKey, cmd); err != nil { - return err - } - } - return nil -} - -// Failover implements Agent interface -func (agent *SQLAgentImpl) Failover(instance *inst.InstanceKey) error { - var memberUUID string - query := `select member_id - from performance_schema.replication_group_members - where member_host=convert(@@hostname using ascii) and member_port=@@port and member_state='ONLINE'` - err := fetchInstance(instance, query, func(m sqlutils.RowMap) error { - memberUUID = m.GetString("member_id") - if memberUUID == "" { - return fmt.Errorf("unable to find member_id on %v", instance.Hostname) - } - return nil - }) - if err != nil { - return err - } - cmd := fmt.Sprintf(`select group_replication_set_as_primary('%s')`, memberUUID) - if err := execInstance(instance, cmd); err != nil { - return err - } - return nil -} - -// heartbeatCheck returns heartbeat check freshness result -func (agent *SQLAgentImpl) heartbeatCheck(instanceKey *inst.InstanceKey) (int, error) { - query := `select timestampdiff(SECOND, from_unixtime(truncate(ts * 0.000000001, 0)), NOW()) as diff from _vt.heartbeat;` - var result int - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - result = m.GetInt("diff") - return nil - }) - return result, err -} - -// FetchGroupView implements Agent interface -func (agent *SQLAgentImpl) FetchGroupView(alias string, instanceKey *inst.InstanceKey) (*GroupView, error) { - view := NewGroupView(alias, instanceKey.Hostname, instanceKey.Port) - var groupName string - var isReadOnly bool - query := `select - @@group_replication_group_name as group_name, - @@super_read_only as read_only, - member_host, member_port, member_state, member_role - from performance_schema.replication_group_members` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - if groupName == "" { - groupName = m.GetString("group_name") - } - host := m.GetString("member_host") - port := m.GetInt("member_port") - isReadOnly = m.GetBool("read_only") - unresolvedMember := NewGroupMember( - m.GetString("member_state"), - m.GetString("member_role"), - host, - port, - false) - // readOnly is used to re-enable write after we set primary to read_only to protect the shard when there is - // less than desired number of nodes - // the default value is false because if the node is reachable and read_only, it will get override by the OR op - // if the host is unreachable, we don't need to trigger the protection for it therefore assume the it's writable - if host == instanceKey.Hostname && port == instanceKey.Port && isReadOnly { - unresolvedMember.ReadOnly = true - } - view.UnresolvedMembers = append(view.UnresolvedMembers, unresolvedMember) - return nil - }) - view.GroupName = groupName - if err != nil { - return nil, err - } - view.HeartbeatStaleness = math.MaxInt32 - if agent.enableHeartbeat { - heartbeatStaleness, err := agent.heartbeatCheck(instanceKey) - if err != nil { - // We can run into Error 1146: Table '_vt.heartbeat' doesn't exist on new provisioned shard: - // vtgr is checking heartbeat table - // -> heartbeat table is waiting primary tablet - // -> primary tablet needs vtgr. - // - // Therefore if we run into error, HeartbeatStaleness will - // remain to be max int32, which is 2147483647 sec - log.Errorf("Failed to check heartbeatCheck: %v", err) - } else { - view.HeartbeatStaleness = heartbeatStaleness - } - } - return view, nil -} - -// GetPrimaryView returns the view of primary member -func (view *GroupView) GetPrimaryView() (string, int, bool) { - for _, member := range view.UnresolvedMembers { - if member.Role == PRIMARY { - return member.HostName, member.Port, member.State == ONLINE - } - } - return "", 0, false -} - -func (agent *SQLAgentImpl) getGroupNameAndMemberState(instanceKey *inst.InstanceKey) (string, string, error) { - // If there is an instance that is unreachable but we still have quorum, GR will remove it from - // the replication_group_members and Failover if it is the primary node - // If the state becomes UNREACHABLE it indicates there is a network partition inside the group - // https://dev.mysql.com/doc/refman/8.0/en/group-replication-network-partitioning.html - // And then eventually if the node does not recover, the group will transit into ERROR state - // VTGR cannot handle this case, therefore we raise error here - var name, state string - query := `select @@group_replication_group_name as group_name` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - name = m.GetString("group_name") - return nil - }) - if err != nil { - return "", "", err - } - query = `select member_state - from performance_schema.replication_group_members - where member_host=convert(@@hostname using ascii) and member_port=@@port` - err = fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - state = m.GetString("member_state") - if state == "" { - state = inst.GroupReplicationMemberStateOffline - } - return nil - }) - if err != nil { - return "", "", err - } - return name, state, nil -} - -// FetchApplierGTIDSet implements Agent interface -func (agent *SQLAgentImpl) FetchApplierGTIDSet(instanceKey *inst.InstanceKey) (mysql.GTIDSet, error) { - var gtidSet string - // TODO: should we also take group_replication_recovery as well? - query := `select gtid_subtract(concat(received_transaction_set, ',', @@global.gtid_executed), '') as gtid_set - from performance_schema.replication_connection_status - where channel_name='group_replication_applier'` - err := fetchInstance(instanceKey, query, func(m sqlutils.RowMap) error { - // If the instance has no committed transaction, gtidSet will be empty string - gtidSet = m.GetString("gtid_set") - return nil - }) - if err != nil { - return nil, err - } - pos, err := mysql.ParsePosition(agent.dbFlavor, gtidSet) - if err != nil { - return nil, err - } - return pos.GTIDSet, nil -} - -// execInstance executes a given query on the given MySQL discovery instance -func execInstance(instanceKey *inst.InstanceKey, query string, args ...any) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenDiscovery(instanceKey.Hostname, instanceKey.Port) - if err != nil { - log.Errorf("error exec %v: %v", query, err) - return err - } - _, err = sqlutils.ExecNoPrepare(sqlDb, query, args...) - return err -} - -// execInstanceWithTopo executes a given query on the given MySQL topology instance -func execInstanceWithTopo(instanceKey *inst.InstanceKey, query string, args ...any) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenTopology(instanceKey.Hostname, instanceKey.Port) - if err != nil { - log.Errorf("error exec %v: %v", query, err) - return err - } - _, err = sqlutils.ExecNoPrepare(sqlDb, query, args...) - return err -} - -// fetchInstance fetches result from mysql -func fetchInstance(instanceKey *inst.InstanceKey, query string, onRow func(sqlutils.RowMap) error) error { - if err := verifyInstance(instanceKey); err != nil { - return err - } - sqlDb, err := OpenDiscovery(instanceKey.Hostname, instanceKey.Port) - if err != nil { - return err - } - return sqlutils.QueryRowsMap(sqlDb, query, onRow) -} - -// The hostname and port can be empty if a tablet crashed and did not populate them in -// the topo server. We treat them as if the host is unreachable when we calculate the -// quorum for the shard. -func verifyInstance(instanceKey *inst.InstanceKey) error { - if instanceKey.Hostname == "" || instanceKey.Port == 0 { - return ErrInvalidInstance - } - return nil -} - -// CreateInstanceKey returns an InstanceKey based on group member input -// When the group is init for the first time, the hostname and port are not set, e.g., -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// | CHANNEL_NAME | MEMBER_ID | MEMBER_HOST | MEMBER_PORT | MEMBER_STATE | MEMBER_ROLE | -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// | group_replication_applier | | | NULL | OFFLINE | | -// +---------------------------+-----------+-------------+-------------+--------------+-------------+ -// therefore we substitute with view's local hostname and port -func (view *GroupView) CreateInstanceKey(member *GroupMember) inst.InstanceKey { - if member.HostName == "" && member.Port == 0 { - return inst.InstanceKey{ - Hostname: view.MySQLHost, - Port: view.MySQLPort, - } - } - return inst.InstanceKey{ - Hostname: member.HostName, - Port: member.Port, - } -} - -// ToString make string for group view -func (view *GroupView) ToString() string { - var sb strings.Builder - sb.WriteString(fmt.Sprintf("group_name:%v\n", view.GroupName)) - for _, m := range view.UnresolvedMembers { - sb.WriteString(fmt.Sprintf("host:%v:%v | role:%v | state:%v\n", m.HostName, m.Port, m.Role, m.State)) - } - return sb.String() -} - -func (state MemberState) String() string { - switch state { - case ONLINE: - return inst.GroupReplicationMemberStateOnline - case ERROR: - return inst.GroupReplicationMemberStateError - case RECOVERING: - return inst.GroupReplicationMemberStateRecovering - case OFFLINE: - return inst.GroupReplicationMemberStateOffline - case UNREACHABLE: - return inst.GroupReplicationMemberStateUnreachable - } - return "UNKNOWN" -} - -func toMemberState(state string) MemberState { - switch state { - case inst.GroupReplicationMemberStateOnline: - return ONLINE - case inst.GroupReplicationMemberStateError: - return ERROR - case inst.GroupReplicationMemberStateRecovering: - return RECOVERING - case inst.GroupReplicationMemberStateOffline: - return OFFLINE - case inst.GroupReplicationMemberStateUnreachable: - return UNREACHABLE - default: - return UNKNOWNSTATE - } -} - -func (role MemberRole) String() string { - switch role { - case PRIMARY: - return inst.GroupReplicationMemberRolePrimary - case SECONDARY: - return inst.GroupReplicationMemberRoleSecondary - } - return "UNKNOWN" -} - -func toMemberRole(role string) MemberRole { - switch role { - case inst.GroupReplicationMemberRolePrimary: - return PRIMARY - case inst.GroupReplicationMemberRoleSecondary: - return SECONDARY - default: - return UNKNOWNROLE - } -} diff --git a/go/vt/vtgr/db/tls.go b/go/vt/vtgr/db/tls.go deleted file mode 100644 index 514e3d49df3..00000000000 --- a/go/vt/vtgr/db/tls.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package db - -import ( - "fmt" - "strings" - "time" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - - "github.com/go-sql-driver/mysql" - "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" - - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/ssl" -) - -const Error3159 = "Error 3159:" -const Error1045 = "Access denied for user" - -// Track if a TLS has already been configured for topology -var topologyTLSConfigured = false - -// Track if a TLS has already been configured for Orchestrator -var orchestratorTLSConfigured = false - -var requireTLSCache *cache.Cache = cache.New(time.Duration(config.Config.TLSCacheTTLFactor*config.Config.InstancePollSeconds)*time.Second, time.Second) - -var readInstanceTLSCounter = metrics.NewCounter() -var writeInstanceTLSCounter = metrics.NewCounter() -var readInstanceTLSCacheCounter = metrics.NewCounter() -var writeInstanceTLSCacheCounter = metrics.NewCounter() - -func init() { - metrics.Register("instance_tls.read", readInstanceTLSCounter) - metrics.Register("instance_tls.write", writeInstanceTLSCounter) - metrics.Register("instance_tls.read_cache", readInstanceTLSCacheCounter) - metrics.Register("instance_tls.write_cache", writeInstanceTLSCacheCounter) -} - -func requiresTLS(host string, port int, uri string) bool { - cacheKey := fmt.Sprintf("%s:%d", host, port) - - if value, found := requireTLSCache.Get(cacheKey); found { - readInstanceTLSCacheCounter.Inc(1) - return value.(bool) - } - - required := false - db, _, _ := sqlutils.GetDB(uri) - if err := db.Ping(); err != nil && (strings.Contains(err.Error(), Error3159) || strings.Contains(err.Error(), Error1045)) { - required = true - } - - query := ` - insert into - database_instance_tls ( - hostname, port, required - ) values ( - ?, ?, ? - ) - on duplicate key update - required=values(required) - ` - if _, err := ExecOrchestrator(query, host, port, required); err != nil { - log.Error(err) - } - writeInstanceTLSCounter.Inc(1) - - requireTLSCache.Set(cacheKey, required, cache.DefaultExpiration) - writeInstanceTLSCacheCounter.Inc(1) - - return required -} - -// SetupMySQLTopologyTLS creates a TLS configuration from the config supplied CA, Certificate, and Private key. -// Register the TLS config with the mysql drivers as the "topology" config -// Modify the supplied URI to call the TLS config -func SetupMySQLTopologyTLS(uri string) (string, error) { - if !topologyTLSConfigured { - tlsConfig, err := ssl.NewTLSConfig(config.Config.MySQLTopologySSLCAFile, !config.Config.MySQLTopologySSLSkipVerify, config.Config.MySQLTopologyTLSMinVersionNumber()) - if err != nil { - log.Errorf("Can't create TLS configuration for Topology connection %s: %s", uri, err) - return "", err - } - tlsConfig.InsecureSkipVerify = config.Config.MySQLTopologySSLSkipVerify - - if (config.Config.MySQLTopologyUseMutualTLS && !config.Config.MySQLTopologySSLSkipVerify) && - config.Config.MySQLTopologySSLCertFile != "" && - config.Config.MySQLTopologySSLPrivateKeyFile != "" { - if err = ssl.AppendKeyPair(tlsConfig, config.Config.MySQLTopologySSLCertFile, config.Config.MySQLTopologySSLPrivateKeyFile); err != nil { - log.Errorf("Can't setup TLS key pairs for %s: %s", uri, err) - return "", err - } - } - if err = mysql.RegisterTLSConfig("topology", tlsConfig); err != nil { - log.Errorf("Can't register mysql TLS config for topology: %s", err) - return "", err - } - topologyTLSConfigured = true - } - return fmt.Sprintf("%s&tls=topology", uri), nil -} - -// SetupMySQLOrchestratorTLS creates a TLS configuration from the config supplied CA, Certificate, and Private key. -// Register the TLS config with the mysql drivers as the "orchestrator" config -// Modify the supplied URI to call the TLS config -func SetupMySQLOrchestratorTLS(uri string) (string, error) { - if !orchestratorTLSConfigured { - tlsConfig, err := ssl.NewTLSConfig(config.Config.MySQLOrchestratorSSLCAFile, !config.Config.MySQLOrchestratorSSLSkipVerify, config.Config.MySQLOrchestratorTLSMinVersionNumber()) - if err != nil { - log.Fatalf("Can't create TLS configuration for Orchestrator connection %s: %s", uri, err) - return "", err - } - tlsConfig.InsecureSkipVerify = config.Config.MySQLOrchestratorSSLSkipVerify - if (!config.Config.MySQLOrchestratorSSLSkipVerify) && - config.Config.MySQLOrchestratorSSLCertFile != "" && - config.Config.MySQLOrchestratorSSLPrivateKeyFile != "" { - if err = ssl.AppendKeyPair(tlsConfig, config.Config.MySQLOrchestratorSSLCertFile, config.Config.MySQLOrchestratorSSLPrivateKeyFile); err != nil { - log.Fatalf("Can't setup TLS key pairs for %s: %s", uri, err) - return "", err - } - } - if err = mysql.RegisterTLSConfig("orchestrator", tlsConfig); err != nil { - log.Fatalf("Can't register mysql TLS config for orchestrator: %s", err) - return "", err - } - orchestratorTLSConfigured = true - } - return fmt.Sprintf("%s&tls=orchestrator", uri), nil -} diff --git a/go/vt/vtgr/inst/instance_key.go b/go/vt/vtgr/inst/instance_key.go deleted file mode 100644 index cd3039537b3..00000000000 --- a/go/vt/vtgr/inst/instance_key.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package inst - -import ( - "fmt" - "regexp" - "strings" -) - -// InstanceKey is an instance indicator, identifued by hostname and port -type InstanceKey struct { - Hostname string - Port int -} - -var ( - ipv4Regexp = regexp.MustCompile(`^([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)$`) -) - -const detachHint = "//" - -// Constant strings for Group Replication information -// See https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for additional information. -const ( - // Group member roles - GroupReplicationMemberRolePrimary = "PRIMARY" - GroupReplicationMemberRoleSecondary = "SECONDARY" - // Group member states - GroupReplicationMemberStateOnline = "ONLINE" - GroupReplicationMemberStateRecovering = "RECOVERING" - GroupReplicationMemberStateUnreachable = "UNREACHABLE" - GroupReplicationMemberStateOffline = "OFFLINE" - GroupReplicationMemberStateError = "ERROR" -) - -// Equals tests equality between this key and another key -func (instanceKey *InstanceKey) Equals(other *InstanceKey) bool { - if other == nil { - return false - } - return instanceKey.Hostname == other.Hostname && instanceKey.Port == other.Port -} - -// SmallerThan returns true if this key is dictionary-smaller than another. -// This is used for consistent sorting/ordering; there's nothing magical about it. -func (instanceKey *InstanceKey) SmallerThan(other *InstanceKey) bool { - if instanceKey.Hostname < other.Hostname { - return true - } - if instanceKey.Hostname == other.Hostname && instanceKey.Port < other.Port { - return true - } - return false -} - -// IsDetached returns 'true' when this hostname is logically "detached" -func (instanceKey *InstanceKey) IsDetached() bool { - return strings.HasPrefix(instanceKey.Hostname, detachHint) -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsValid() bool { - if instanceKey.Hostname == "_" { - return false - } - if instanceKey.IsDetached() { - return false - } - return len(instanceKey.Hostname) > 0 && instanceKey.Port > 0 -} - -// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) DetachedKey() *InstanceKey { - if instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, instanceKey.Hostname), Port: instanceKey.Port} -} - -// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) ReattachedKey() *InstanceKey { - if !instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: instanceKey.Hostname[len(detachHint):], Port: instanceKey.Port} -} - -// StringCode returns an official string representation of this key -func (instanceKey *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", instanceKey.Hostname, instanceKey.Port) -} - -// DisplayString returns a user-friendly string representation of this key -func (instanceKey *InstanceKey) DisplayString() string { - return instanceKey.StringCode() -} - -// String returns a user-friendly string representation of this key -func (instanceKey InstanceKey) String() string { - return instanceKey.StringCode() -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsIPv4() bool { - return ipv4Regexp.MatchString(instanceKey.Hostname) -} diff --git a/go/vt/vtgr/inst/instance_key_test.go b/go/vt/vtgr/inst/instance_key_test.go deleted file mode 100644 index e3e016e474c..00000000000 --- a/go/vt/vtgr/inst/instance_key_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - This file has been copied over from VTOrc package -*/ - -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtgr/config" -) - -func init() { - config.Config.HostnameResolveMethod = "none" -} - -var key1 = InstanceKey{Hostname: "host1", Port: 3306} - -func TestInstanceKeyEquals(t *testing.T) { - i1 := InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - } - i2 := InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - } - - require.Equal(t, i1, i2) - - i2.Port = 3307 - require.NotEqual(t, i1, i2) -} - -func TestInstanceKeyDetach(t *testing.T) { - require.False(t, key1.IsDetached()) - detached1 := key1.DetachedKey() - require.True(t, detached1.IsDetached()) - detached2 := key1.DetachedKey() - require.True(t, detached2.IsDetached()) - require.True(t, detached1.Equals(detached2)) - - reattached1 := detached1.ReattachedKey() - require.False(t, reattached1.IsDetached()) - require.True(t, reattached1.Equals(&key1)) - reattached2 := reattached1.ReattachedKey() - require.False(t, reattached2.IsDetached()) - require.True(t, reattached1.Equals(reattached2)) -} diff --git a/go/vt/vtgr/log/log.go b/go/vt/vtgr/log/log.go deleted file mode 100644 index 4133bbb39a1..00000000000 --- a/go/vt/vtgr/log/log.go +++ /dev/null @@ -1,53 +0,0 @@ -package log - -import ( - "fmt" - - "vitess.io/vitess/go/vt/log" -) - -// Logger is a wrapper that prefix loglines with keyspace/shard -type Logger struct { - prefix string -} - -// NewVTGRLogger creates a new logger -func NewVTGRLogger(keyspace, shard string) *Logger { - return &Logger{ - prefix: fmt.Sprintf("%s/%s", keyspace, shard), - } -} - -// Info formats arguments like fmt.Print -func (logger *Logger) Info(msg string) { - log.InfoDepth(1, logger.annotate(msg)) -} - -// Infof formats arguments like fmt.Printf. -func (logger *Logger) Infof(format string, args ...any) { - log.InfoDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -// Warning formats arguments like fmt.Print -func (logger *Logger) Warning(msg string) { - log.WarningDepth(1, logger.annotate(msg)) -} - -// Warningf formats arguments like fmt.Printf. -func (logger *Logger) Warningf(format string, args ...any) { - log.WarningDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -// Error formats arguments like fmt.Print -func (logger *Logger) Error(msg string) { - log.ErrorDepth(1, logger.annotate(msg)) -} - -// Errorf formats arguments like fmt.Printf. -func (logger *Logger) Errorf(format string, args ...any) { - log.ErrorDepth(1, logger.annotate(fmt.Sprintf(format, args...))) -} - -func (logger *Logger) annotate(input string) string { - return fmt.Sprintf("shard=%s %s", logger.prefix, input) -} diff --git a/go/vt/vtgr/log/log_test.go b/go/vt/vtgr/log/log_test.go deleted file mode 100644 index fd4ede386e9..00000000000 --- a/go/vt/vtgr/log/log_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package log - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestVTGRLogger(t *testing.T) { - logger := NewVTGRLogger("ks", "0") - s1 := logger.annotate("abc") - assert.Equal(t, "shard=ks/0 abc", s1) - s2 := fmt.Sprintf(logger.annotate("abc %s"), "def") - assert.Equal(t, "shard=ks/0 abc def", s2) -} diff --git a/go/vt/vtgr/plugin_consultopo.go b/go/vt/vtgr/plugin_consultopo.go deleted file mode 100644 index 3786fd59c26..00000000000 --- a/go/vt/vtgr/plugin_consultopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// This plugin imports consultopo to register the consul implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/consultopo" -) diff --git a/go/vt/vtgr/plugin_etcd2topo.go b/go/vt/vtgr/plugin_etcd2topo.go deleted file mode 100644 index 0f9c385f69b..00000000000 --- a/go/vt/vtgr/plugin_etcd2topo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. - -import ( - _ "vitess.io/vitess/go/vt/topo/etcd2topo" -) diff --git a/go/vt/vtgr/plugin_grpctmclient.go b/go/vt/vtgr/plugin_grpctmclient.go deleted file mode 100644 index 529c560c207..00000000000 --- a/go/vt/vtgr/plugin_grpctmclient.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// Imports and register the gRPC tabletmanager client - -import ( - _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" -) diff --git a/go/vt/vtgr/plugin_zk2topo.go b/go/vt/vtgr/plugin_zk2topo.go deleted file mode 100644 index f524fd0e21a..00000000000 --- a/go/vt/vtgr/plugin_zk2topo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -// Imports and register the zk2 TopologyServer - -import ( - _ "vitess.io/vitess/go/vt/topo/zk2topo" -) diff --git a/go/vt/vtgr/ssl/ssl.go b/go/vt/vtgr/ssl/ssl.go deleted file mode 100644 index 9b940d9f743..00000000000 --- a/go/vt/vtgr/ssl/ssl.go +++ /dev/null @@ -1,208 +0,0 @@ -package ssl - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - nethttp "net/http" - "os" - "strings" - - "vitess.io/vitess/go/vt/log" - - "github.com/go-martini/martini" - "github.com/howeyc/gopass" - - "vitess.io/vitess/go/vt/vtgr/config" -) - -/* - This file has been copied over from VTOrc package -*/ - -// Determine if a string element is in a string array -func HasString(elem string, arr []string) bool { - for _, s := range arr { - if s == elem { - return true - } - } - return false -} - -// NewTLSConfig returns an initialized TLS configuration suitable for client -// authentication. If caFile is non-empty, it will be loaded. -func NewTLSConfig(caFile string, verifyCert bool, minVersion uint16) (*tls.Config, error) { - var c tls.Config - - // Set to TLS 1.2 as a minimum. This is overridden for mysql communication - c.MinVersion = minVersion - - if verifyCert { - log.Info("verifyCert requested, client certificates will be verified") - c.ClientAuth = tls.VerifyClientCertIfGiven - } - caPool, err := ReadCAFile(caFile) - if err != nil { - return &c, err - } - c.ClientCAs = caPool - return &c, nil -} - -// Returns CA certificate. If caFile is non-empty, it will be loaded. -func ReadCAFile(caFile string) (*x509.CertPool, error) { - var caCertPool *x509.CertPool - if caFile != "" { - data, err := os.ReadFile(caFile) - if err != nil { - return nil, err - } - caCertPool = x509.NewCertPool() - if !caCertPool.AppendCertsFromPEM(data) { - return nil, errors.New("No certificates parsed") - } - log.Infof("Read in CA file: %v", caFile) - } - return caCertPool, nil -} - -// Verify that the OU of the presented client certificate matches the list -// of Valid OUs -func Verify(r *nethttp.Request, validOUs []string) error { - if strings.Contains(r.URL.String(), config.Config.StatusEndpoint) && !config.Config.StatusOUVerify { - return nil - } - if r.TLS == nil { - return errors.New("No TLS") - } - for _, chain := range r.TLS.VerifiedChains { - s := chain[0].Subject.OrganizationalUnit - log.Infof("All OUs:", strings.Join(s, " ")) - for _, ou := range s { - log.Infof("Client presented OU:", ou) - if HasString(ou, validOUs) { - log.Infof("Found valid OU:", ou) - return nil - } - } - } - log.Error("No valid OUs found") - return errors.New("Invalid OU") -} - -// TODO: make this testable? -func VerifyOUs(validOUs []string) martini.Handler { - return func(res nethttp.ResponseWriter, req *nethttp.Request, c martini.Context) { - log.Infof("Verifying client OU") - if err := Verify(req, validOUs); err != nil { - nethttp.Error(res, err.Error(), nethttp.StatusUnauthorized) - } - } -} - -// AppendKeyPair loads the given TLS key pair and appends it to -// tlsConfig.Certificates. -func AppendKeyPair(tlsConfig *tls.Config, certFile string, keyFile string) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - return nil -} - -// Read in a keypair where the key is password protected -func AppendKeyPairWithPassword(tlsConfig *tls.Config, certFile string, keyFile string, pemPass []byte) error { - - // Certificates aren't usually password protected, but we're kicking the password - // along just in case. It won't be used if the file isn't encrypted - certData, err := ReadPEMData(certFile, pemPass) - if err != nil { - return err - } - keyData, err := ReadPEMData(keyFile, pemPass) - if err != nil { - return err - } - cert, err := tls.X509KeyPair(certData, keyData) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - return nil -} - -// Read a PEM file and ask for a password to decrypt it if needed -func ReadPEMData(pemFile string, pemPass []byte) ([]byte, error) { - pemData, err := os.ReadFile(pemFile) - if err != nil { - return pemData, err - } - - // We should really just get the pem.Block back here, if there's other - // junk on the end, warn about it. - pemBlock, rest := pem.Decode(pemData) - if len(rest) > 0 { - log.Warning("Didn't parse all of", pemFile) - } - - if x509.IsEncryptedPEMBlock(pemBlock) { //nolint SA1019 - // Decrypt and get the ASN.1 DER bytes here - pemData, err = x509.DecryptPEMBlock(pemBlock, pemPass) //nolint SA1019 - if err != nil { - return pemData, err - } - log.Infof("Decrypted %v successfully", pemFile) - // Shove the decrypted DER bytes into a new pem Block with blank headers - var newBlock pem.Block - newBlock.Type = pemBlock.Type - newBlock.Bytes = pemData - // This is now like reading in an uncrypted key from a file and stuffing it - // into a byte stream - pemData = pem.EncodeToMemory(&newBlock) - } - return pemData, nil -} - -// Print a password prompt on the terminal and collect a password -func GetPEMPassword(pemFile string) []byte { - fmt.Printf("Password for %s: ", pemFile) - pass, err := gopass.GetPasswd() - if err != nil { - // We'll error with an incorrect password at DecryptPEMBlock - return []byte("") - } - return pass -} - -// Determine if PEM file is encrypted -func IsEncryptedPEM(pemFile string) bool { - pemData, err := os.ReadFile(pemFile) - if err != nil { - return false - } - pemBlock, _ := pem.Decode(pemData) - if len(pemBlock.Bytes) == 0 { - return false - } - return x509.IsEncryptedPEMBlock(pemBlock) //nolint SA1019 -} - -// ListenAndServeTLS acts identically to http.ListenAndServeTLS, except that it -// expects TLS configuration. -// TODO: refactor so this is testable? -func ListenAndServeTLS(addr string, handler nethttp.Handler, tlsConfig *tls.Config) error { - if addr == "" { - // On unix Listen calls getaddrinfo to parse the port, so named ports are fine as long - // as they exist in /etc/services - addr = ":https" - } - l, err := tls.Listen("tcp", addr, tlsConfig) - if err != nil { - return err - } - return nethttp.Serve(l, handler) -} diff --git a/go/vt/vtgr/ssl/ssl_test.go b/go/vt/vtgr/ssl/ssl_test.go deleted file mode 100644 index 94502ea556e..00000000000 --- a/go/vt/vtgr/ssl/ssl_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package ssl_test - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - nethttp "net/http" - "os" - "reflect" - "strings" - "syscall" - "testing" - - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/ssl" -) - -/* - This file has been copied over from VTOrc package -*/ - -func TestHasString(t *testing.T) { - elem := "foo" - a1 := []string{"bar", "foo", "baz"} - a2 := []string{"bar", "fuu", "baz"} - good := ssl.HasString(elem, a1) - if !good { - t.Errorf("Didn't find %s in array %s", elem, strings.Join(a1, ", ")) - } - bad := ssl.HasString(elem, a2) - if bad { - t.Errorf("Unexpectedly found %s in array %s", elem, strings.Join(a2, ", ")) - } -} - -// TODO: Build a fake CA and make sure it loads up -func TestNewTLSConfig(t *testing.T) { - fakeCA := writeFakeFile(pemCertificate) - defer syscall.Unlink(fakeCA) - - conf, err := ssl.NewTLSConfig(fakeCA, true, tls.VersionTLS13) - if err != nil { - t.Errorf("Could not create new TLS config: %s", err) - } - if conf.ClientAuth != tls.VerifyClientCertIfGiven { - t.Errorf("Client certificate verification was not enabled") - } - if conf.ClientCAs == nil { - t.Errorf("ClientCA empty even though cert provided") - } - if conf.MinVersion != tls.VersionTLS13 { - t.Errorf("incorrect tls min version set") - } - - conf, err = ssl.NewTLSConfig("", false, tls.VersionTLS12) - if err != nil { - t.Errorf("Could not create new TLS config: %s", err) - } - if conf.ClientAuth == tls.VerifyClientCertIfGiven { - t.Errorf("Client certificate verification was enabled unexpectedly") - } - if conf.ClientCAs != nil { - t.Errorf("Filling in ClientCA somehow without a cert") - } - if conf.MinVersion != tls.VersionTLS12 { - t.Errorf("incorrect tls min version set") - } -} - -func TestStatus(t *testing.T) { - var validOUs []string - url := fmt.Sprintf("http://example.com%s", config.Config.StatusEndpoint) - - req, err := nethttp.NewRequest("GET", url, nil) - if err != nil { - t.Fatal(err) - } - config.Config.StatusOUVerify = false - if err := ssl.Verify(req, validOUs); err != nil { - t.Errorf("Failed even with verification off") - } - config.Config.StatusOUVerify = true - if err := ssl.Verify(req, validOUs); err == nil { - t.Errorf("Did not fail on with bad verification") - } -} - -func TestVerify(t *testing.T) { - var validOUs []string - - req, err := nethttp.NewRequest("GET", "http://example.com/foo", nil) - if err != nil { - t.Fatal(err) - } - - if err := ssl.Verify(req, validOUs); err == nil { - t.Errorf("Did not fail on lack of TLS config") - } - - pemBlock, _ := pem.Decode([]byte(pemCertificate)) - cert, err := x509.ParseCertificate(pemBlock.Bytes) - if err != nil { - t.Fatal(err) - } - - var tcs tls.ConnectionState - req.TLS = &tcs - - if err := ssl.Verify(req, validOUs); err == nil { - t.Errorf("Found a valid OU without any being available") - } - - // Set a fake OU - cert.Subject.OrganizationalUnit = []string{"testing"} - - // Pretend our request had a certificate - req.TLS.PeerCertificates = []*x509.Certificate{cert} - req.TLS.VerifiedChains = [][]*x509.Certificate{req.TLS.PeerCertificates} - - // Look for fake OU - validOUs = []string{"testing"} - - if err := ssl.Verify(req, validOUs); err != nil { - t.Errorf("Failed to verify certificate OU") - } -} - -func TestReadPEMData(t *testing.T) { - pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) - pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) - pemPKWPFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKWPFile) - _, err := ssl.ReadPEMData(pemCertFile, []byte{}) - if err != nil { - t.Errorf("Failed to decode certificate: %s", err) - } - pemNoPassBytes, err := ssl.ReadPEMData(pemPKFile, []byte{}) - if err != nil { - t.Errorf("Failed to decode private key: %s", err) - } - pemPassBytes, err := ssl.ReadPEMData(pemPKWPFile, []byte("testing")) - if err != nil { - t.Errorf("Failed to decode private key with password: %s", err) - } - if reflect.DeepEqual(pemPassBytes, pemNoPassBytes) { - t.Errorf("PEM encoding failed after password removal") - } -} - -func TestAppendKeyPair(t *testing.T) { - c, err := ssl.NewTLSConfig("", false, tls.VersionTLS12) - if err != nil { - t.Fatal(err) - } - pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) - pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) - - if err := ssl.AppendKeyPair(c, pemCertFile, pemPKFile); err != nil { - t.Errorf("Failed to append certificate and key to tls config: %s", err) - } -} - -func TestAppendKeyPairWithPassword(t *testing.T) { - c, err := ssl.NewTLSConfig("", false, tls.VersionTLS12) - if err != nil { - t.Fatal(err) - } - pemCertFile := writeFakeFile(pemCertificate) - defer syscall.Unlink(pemCertFile) - pemPKFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKFile) - - if err := ssl.AppendKeyPairWithPassword(c, pemCertFile, pemPKFile, []byte("testing")); err != nil { - t.Errorf("Failed to append certificate and key to tls config: %s", err) - } -} - -func TestIsEncryptedPEM(t *testing.T) { - pemPKFile := writeFakeFile(pemPrivateKey) - defer syscall.Unlink(pemPKFile) - pemPKWPFile := writeFakeFile(pemPrivateKeyWithPass) - defer syscall.Unlink(pemPKWPFile) - if ssl.IsEncryptedPEM(pemPKFile) { - t.Errorf("Incorrectly identified unencrypted PEM as encrypted") - } - if !ssl.IsEncryptedPEM(pemPKWPFile) { - t.Errorf("Incorrectly identified encrypted PEM as unencrypted") - } -} - -func writeFakeFile(content string) string { - f, err := os.CreateTemp("", "ssl_test") - if err != nil { - return "" - } - os.WriteFile(f.Name(), []byte(content), 0644) - return f.Name() -} - -const pemCertificate = `-----BEGIN CERTIFICATE----- -MIIDtTCCAp2gAwIBAgIJAOxKC7FsJelrMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQwHhcNMTcwODEwMTQ0MjM3WhcNMTgwODEwMTQ0MjM3WjBF -MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50 -ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA12vHV3gYy5zd1lujA7prEhCSkAszE6E37mViWhLQ63CuedZfyYaTAHQK -HYDZi4K1MNAySUfZRMcICSSsxlRIz6mzXrFsowaJgwx4cbMDIvXE03KstuXoTYJh -+xmXB+5yEVEtIyP2DvPqfCmwCZb3k94Y/VY1nAQDxIxciXrAxT9zT1oYd0YWr2yp -J2mgsfnY4c3zg7W5WgvOTmYz7Ey7GJjpUjGdayx+P1CilKzSWH1xZuVQFNLSHvcH -WXkEoCMVc0tW5mO5eEO1aNHo9MSjPF386l1rq+pz5OwjqCEZq2b1YxesyLnbF+8+ -iYGfYmFaDLFwG7zVDwialuI4TzIIOQIDAQABo4GnMIGkMB0GA1UdDgQWBBQ1ubGx -Yvn3wN5VXyoR0lOD7ARzVTB1BgNVHSMEbjBsgBQ1ubGxYvn3wN5VXyoR0lOD7ARz -VaFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV -BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJAOxKC7FsJelrMAwGA1UdEwQF -MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBALmm4Zw/4jLKDJciUGUYOcr5Xe9TP/Cs -afH7IWvaFUDfV3W6yAm9jgNfIy9aDLpuu2CdEb+0qL2hdmGLV7IM3y62Ve0UTdGV -BGsm1zMmIguew2wGbAwGr5LmIcUseatVUKAAAfDrBNwotEAdM8kmGekUZfOM+J9D -FoNQ62C0buRHGugtu6zWAcZNOe6CI7HdhaAdxZlgn8y7dfJQMacoK0NcWeUVQwii -6D4mgaqUGM2O+WcquD1vEMuBPYVcKhi43019E0+6LI5QB6w80bARY8K7tkTdRD7U -y1/C7iIqyuBVL45OdSabb37TfGlHZIPIwLaGw3i4Mr0+F0jQT8rZtTQ= ------END CERTIFICATE-----` - -const pemPrivateKey = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA12vHV3gYy5zd1lujA7prEhCSkAszE6E37mViWhLQ63CuedZf -yYaTAHQKHYDZi4K1MNAySUfZRMcICSSsxlRIz6mzXrFsowaJgwx4cbMDIvXE03Ks -tuXoTYJh+xmXB+5yEVEtIyP2DvPqfCmwCZb3k94Y/VY1nAQDxIxciXrAxT9zT1oY -d0YWr2ypJ2mgsfnY4c3zg7W5WgvOTmYz7Ey7GJjpUjGdayx+P1CilKzSWH1xZuVQ -FNLSHvcHWXkEoCMVc0tW5mO5eEO1aNHo9MSjPF386l1rq+pz5OwjqCEZq2b1Yxes -yLnbF+8+iYGfYmFaDLFwG7zVDwialuI4TzIIOQIDAQABAoIBAHLf4pleTbqmmBWr -IC7oxhgIBmAR2Nbq7eyO2/e0ePxURnZqPwI0ZUekmZBKGbgvp3e0TlyNl+r5R+u4 -RvosD/fNQv2IF6qH3eSoTcIz98Q40xD+4eNWjp5mnOFOMB/mo6VgaHWIw7oNkElN -4bX7b2LG2QSfaE8eRPQW9XHKp+mGhYFbxgPYxUmlIXuYZF61hVwxysDA6DP3LOi8 -yUL6E64x6NqN9xtg/VoN+f6N0MOvsr4yb5+uvni1LVRFI7tNqIN4Y6P6trgKfnRR -EpZeAUu8scqyxE4NeqnnjK/wBuXxaeh3e9mN1V2SzT629c1InmmQasZ5slcCJQB+ -38cswgECgYEA+esaLKwHXT4+sOqMYemi7TrhxtNC2f5OAGUiSRVmTnum2gl4wOB+ -h5oLZAuG5nBEIoqbMEbI35vfuHqIe390IJtPdQlz4TGDsPufYj/gnnBBFy/c8f+n -f/CdRDRYrpnpKGwvUntLRB2pFbe2hlqqq+4YUqiHauJMOCJnPbOo1lECgYEA3KnF -VOXyY0fKD45G7ttfAcpw8ZI2gY99sCRwtBQGsbO61bvw5sl/3j7AmYosz+n6f7hb -uHmitIuPv4z3r1yfVysh80tTGIM3wDkpr3fLYRxpVOZU4hgxMQV9yyaSA/Hfqn48 -vIK/NC4bERqpofNNdrIqNaGWkd87ZycvpRfa0WkCgYBztbVVr4RtWG9gLAg5IRot -KhD0pEWUdpiYuDpqifznI3r6Al6lNot+rwTNGkUoFhyFvZTigjNozFuFpz3fqAAV -RLNCJdFAF1O4spd1vst5r9GDMcbjSJG9u6KkvHO+y0XXUFeMoccUT4NEqd1ZUUsp -9T/PrXWdOA9AAjW4rKDkMQKBgQC9R4NVR8mbD8Frhoeh69qbFqO7E8hdalBN/3QN -hAAZ/imNnSEPVliwsvNSwQufbPzLAcDrhKrkY7JyhOERM0oa44zDvSESLbxszpvL -P97c9hoEEW9OYaIQgr1cvUES0S8ieBZxPVX11HazPUO0/5a68ijyyCD4D5xM53gf -DU9NwQKBgQCmVthQi65xcc4mgCIwXtBZWXeaPv5x0dLEXIC5EoN6eXLK9iW//7cE -hhawtJtl+J6laB+TkEGQsyhc4v85WcywdisyR7LR7CUqFYJMKeE/VtTVKnYbfq54 -rHoQS9YotByBwPtRx0V93gkc+KWBOGmSBBxKj7lrBkYkcWAiRfpJjg== ------END RSA PRIVATE KEY-----` - -const pemPrivateKeyWithPass = `-----BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: DES-EDE3-CBC,3EABF60A784F9065 - -IDGYvdRJXvBt5vEDI9caEYJ2vvVmoqmxTKvheNX0aLSXUl/p8hIZ25kd/4mpmI3m -irQdEe2JuNh4/fPDe6Agg6mX6mYCVbiupfXdFKkqJzndW/O5nEQ4yuRgi0fO4wcH -OM/kTS8/7UaKfCuWFa71ywh1WeStFDBwsMQqLdFFeuQ/JC6g2tZW6xzCBE0BVIkq -6OWXmWumXMufhOdpb9sNoc3lbdOi037V886o0cIRQp4qPepElhhhplrhaJZBSxiP -TUldExbtYCN1APhrgUp1RpxIWHNLezjhUYLGooxb6SqinpLd9ia2uFotwNDeX7/T -dMPQPtgdFwvoCtWn9oVWp+regdZPacABLsvtTD4NS8h13BKzBmAqtYfHJk44u/Tv -6PcCb9xHI7+YpNJznrHiCtALWkfG56mDjp0SP+OKjsYMjo317D+x892i2XT79k2T -0IM0OUPizVkN5c7uDQBHqxmE9JVQT7QFMy1P57nWPsmG5o7e9Y/klaPQzi04FWEh -YAEZrU5/FQlFziu3/Jw6WwQnm3IqJP6iMlnR9Y5iZCZQnLhcJNIxxOJ/+cVH4dVD -jIHztasHgbfld045Ua7nk91VyFP5pWRPFacJ74D+xm/1IjF/+9Uj3NQX88Swig0Q -Fi7+eJ1XtCI0YdUqiUdp8QaS1GnFzibSIcXCbLLEn0Cgh/3CFXUyh92M4GIgvmcI -/hi4nUDa3nLYDHyOZubFLERb+Zr3EFzNXX4Ga3fcNH0deluxW4tda+QCk0ud6k9N -y2bCcAVnvbB+yX2s7CSVq+eaT/4JLIJY5AlrISRwYtG57SR/DN9HuU99dD30k581 -PmarIt4VAakjXo/Zqd1AMh+ofbC/Qm7jBwbPGPZAM/FjpnVsvaXsdChI19Az72v3 -wiLOKEw8M23vV4/E7QwW3Pp/RPyUZk6HAlBuLXbcyZHOOV4WPsKrI46BBXL8Qf4X -5kpRITFFUaFu3aaO7mloVAoneEKusKJgKOAwWifRI3jf6fH9B8qDA0jQpWRNpLs4 -3A2qrOyHQ9SMoBr7ya8Vs2BMdfqAmOyiUdVzLr2EjnRxa7f3/7/sdzD1aaIJa2TM -kjpKgFMq5B/FRVmuAvKyEF52A/b6L9EpinyB53DzWnIw9W5zdjjRkuxmGmv1R94A -gJvbONh955cinHft0rm0hdKo77wDvXZdX5ZeITjOwJ0d/VBHYDGUonDVgnAVLcz+ -n1BS+oOS1xLG/EJOGqtNYihVuCkbIwwdAVhc7pKo3nIbLyrKFKFyh/Br11PPBris -nlWo8BWSoFv7gKOftkulHJFAVekisaXe4OIcYMATeLvDfAnBDJrNHZn0HcyHI51L -3EhCCPJrrmfNv+QMdPk6LTts5YIdhNRSV5PR2X8ZshChod7atyrw+Wm+LCcy3h1G -xIVNracpnna+Ic5M8EIJZgLOH7IjDFS1EcPjz5em0rVqGGsLDvxmRo2ZJTPSHlpM -8q6VJEIso5sfoauf+fX+y7xk1CpFG8NkXSplbiYmZXdB1zepV1a/ZiW2uU7hEAV7 -oMEzoBEIw3wTuRasixjH7Z6i8PvF3eUKXCIt0UiwTmWdCCW37c5eqjguyp9aLDtc ------END RSA PRIVATE KEY-----` diff --git a/go/vt/vtgr/vtgr.go b/go/vt/vtgr/vtgr.go deleted file mode 100644 index 80a5f99fad9..00000000000 --- a/go/vt/vtgr/vtgr.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vtgr - -import ( - "context" - "errors" - "os" - "os/signal" - "strings" - "sync" - "sync/atomic" - "syscall" - "time" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/concurrency" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/controller" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vttablet/tmclient" -) - -var ( - refreshInterval = 10 * time.Second - scanInterval = 3 * time.Second - scanAndRepairTimeout = 3 * time.Second - vtgrConfigFile string - - localDbPort int -) - -func init() { - servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) { - fs.DurationVar(&refreshInterval, "refresh_interval", 10*time.Second, "Refresh interval to load tablets.") - fs.DurationVar(&scanInterval, "scan_interval", 3*time.Second, "Scan interval to diagnose and repair.") - fs.DurationVar(&scanAndRepairTimeout, "scan_repair_timeout", 3*time.Second, "Time to wait for a Diagnose and repair operation.") - fs.StringVar(&vtgrConfigFile, "vtgr_config", "", "Config file for vtgr.") - fs.IntVar(&localDbPort, "db_port", 0, "Local mysql port, set this to enable local fast check.") - }) -} - -// VTGR is the interface to manage the component to set up group replication with Vitess. -// The main goal of it is to reconcile MySQL group and the Vitess topology. -// Caller should use OpenTabletDiscovery to create the VTGR instance. -type VTGR struct { - // Shards are all the shards that a VTGR is monitoring. - // Caller can choose to iterate the shards to scan and repair for more granular control (e.g., stats report) - // instead of calling ScanAndRepair() directly. - Shards []*controller.GRShard - topo controller.GRTopo - tmc tmclient.TabletManagerClient - ctx context.Context - - stopped atomic.Bool -} - -func newVTGR(ctx context.Context, ts controller.GRTopo, tmc tmclient.TabletManagerClient) *VTGR { - return &VTGR{ - topo: ts, - tmc: tmc, - ctx: ctx, - } -} - -// OpenTabletDiscovery calls OpenTabletDiscoveryWithAcitve and set the shard to be active -// it opens connection with topo server -// and triggers the first round of controller based on specified cells and keyspace/shards. -func OpenTabletDiscovery(ctx context.Context, cellsToWatch, clustersToWatch []string) *VTGR { - return OpenTabletDiscoveryWithAcitve(ctx, cellsToWatch, clustersToWatch, true) -} - -// OpenTabletDiscoveryWithAcitve opens connection with topo server -// and triggers the first round of controller based on parameter -func OpenTabletDiscoveryWithAcitve(ctx context.Context, cellsToWatch, clustersToWatch []string, active bool) *VTGR { - if vtgrConfigFile == "" { - log.Fatal("vtgr_config is required") - } - config, err := config.ReadVTGRConfig(vtgrConfigFile) - if err != nil { - log.Fatalf("Cannot load vtgr config file: %v", err) - } - vtgr := newVTGR( - ctx, - topo.Open(), - tmclient.NewTabletManagerClient(), - ) - var shards []*controller.GRShard - ctx, cancel := context.WithTimeout(vtgr.ctx, topo.RemoteOperationTimeout) - defer cancel() - for _, ks := range clustersToWatch { - if strings.Contains(ks, "/") { - // This is a keyspace/shard specification - input := strings.Split(ks, "/") - shards = append(shards, controller.NewGRShard(input[0], input[1], cellsToWatch, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, active)) - } else { - // Assume this is a keyspace and find all shards in keyspace - shardNames, err := vtgr.topo.GetShardNames(ctx, ks) - if err != nil { - // Log the error and continue - log.Errorf("Error fetching shards for keyspace %v: %v", ks, err) - continue - } - if len(shardNames) == 0 { - log.Errorf("Topo has no shards for ks: %v", ks) - continue - } - for _, s := range shardNames { - shards = append(shards, controller.NewGRShard(ks, s, cellsToWatch, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, active)) - } - } - } - vtgr.handleSignal(os.Exit) - vtgr.Shards = shards - log.Infof("Monitoring shards size %v", len(vtgr.Shards)) - // Force refresh all tablet here to populate data for vtgr - var wg sync.WaitGroup - for _, shard := range vtgr.Shards { - wg.Add(1) - go func(shard *controller.GRShard) { - defer wg.Done() - shard.UpdateTabletsInShardWithLock(ctx) - }(shard) - } - wg.Wait() - log.Info("Ready to start VTGR") - return vtgr -} - -// RefreshCluster get the latest tablets from topo server -func (vtgr *VTGR) RefreshCluster() { - for _, shard := range vtgr.Shards { - go func(shard *controller.GRShard) { - ticker := time.Tick(refreshInterval) - for range ticker { - ctx, cancel := context.WithTimeout(vtgr.ctx, refreshInterval) - shard.UpdateTabletsInShardWithLock(ctx) - cancel() - } - }(shard) - } -} - -// ScanAndRepair starts the scanAndFix routine -func (vtgr *VTGR) ScanAndRepair() { - for _, shard := range vtgr.Shards { - go func(shard *controller.GRShard) { - ticker := time.Tick(scanInterval) - for range ticker { - func() { - ctx, cancel := context.WithTimeout(vtgr.ctx, scanAndRepairTimeout) - defer cancel() - if !vtgr.stopped.Load() { - log.Infof("Start scan and repair %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - shard.ScanAndRepairShard(ctx) - log.Infof("Finished scan and repair %v/%v", shard.KeyspaceShard.Keyspace, shard.KeyspaceShard.Shard) - } - }() - } - }(shard) - } -} - -// Diagnose exposes the endpoint to diagnose a particular shard -func (vtgr *VTGR) Diagnose(ctx context.Context, shard *controller.GRShard) (controller.DiagnoseType, error) { - return shard.Diagnose(ctx) -} - -// Repair exposes the endpoint to repair a particular shard -func (vtgr *VTGR) Repair(ctx context.Context, shard *controller.GRShard, diagnose controller.DiagnoseType) (controller.RepairResultCode, error) { - if vtgr.stopped.Load() { - return controller.Fail, errors.New("VTGR is stopped") - } - return shard.Repair(ctx, diagnose) -} - -// GetCurrentShardStatuses is used when we want to know what VTGR observes -// it contains information about a list of instances and primary tablet -func (vtgr *VTGR) GetCurrentShardStatuses() []controller.ShardStatus { - var result []controller.ShardStatus - for _, shard := range vtgr.Shards { - status := shard.GetCurrentShardStatuses() - result = append(result, status) - } - return result -} - -// OverrideRebootstrapGroupSize forces an override the group size used in safety check for rebootstrap -func (vtgr *VTGR) OverrideRebootstrapGroupSize(groupSize int) error { - errorRecord := concurrency.AllErrorRecorder{} - for _, shard := range vtgr.Shards { - err := shard.OverrideRebootstrapGroupSize(groupSize) - if err != nil { - errorRecord.RecordError(err) - } - } - return errorRecord.Error() -} - -func (vtgr *VTGR) handleSignal(action func(int)) { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGHUP) - go func() { - // block until the signal is received - <-sigChan - log.Infof("Handling SIGHUP") - // Set stopped to true so that following repair call won't do anything - // For the ongoing repairs, checkShardLocked will abort if needed - vtgr.stopped.Store(true) - for _, shard := range vtgr.Shards { - shard.UnlockShard() - } - action(1) - }() -} diff --git a/go/vt/vtgr/vtgr_test.go b/go/vt/vtgr/vtgr_test.go deleted file mode 100644 index 3632e88427c..00000000000 --- a/go/vt/vtgr/vtgr_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package vtgr - -import ( - "context" - "sync/atomic" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vtgr/config" - "vitess.io/vitess/go/vt/vtgr/controller" - "vitess.io/vitess/go/vt/vtgr/db" - "vitess.io/vitess/go/vt/vttablet/tmclient" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestSighupHandle(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") - defer ts.Close() - ts.CreateKeyspace(ctx, "ks", &topodatapb.Keyspace{}) - ts.CreateShard(ctx, "ks", "0") - vtgr := newVTGR( - ctx, - ts, - tmclient.NewTabletManagerClient(), - ) - var shards []*controller.GRShard - config := &config.VTGRConfig{ - DisableReadOnlyProtection: false, - BootstrapGroupSize: 5, - MinNumReplica: 3, - BackoffErrorWaitTimeSeconds: 10, - BootstrapWaitTimeSeconds: 10 * 60, - } - shards = append(shards, controller.NewGRShard("ks", "0", nil, vtgr.tmc, vtgr.topo, db.NewVTGRSqlAgent(), config, localDbPort, true)) - vtgr.Shards = shards - shard := vtgr.Shards[0] - shard.LockShard(ctx, "test") - var res atomic.Bool - vtgr.handleSignal(func(i int) { - res.Store(true) - }) - assert.NotNil(t, shard.GetUnlock()) - assert.False(t, vtgr.stopped.Load()) - syscall.Kill(syscall.Getpid(), syscall.SIGHUP) - time.Sleep(100 * time.Millisecond) - assert.True(t, res.Load()) - assert.Nil(t, shard.GetUnlock()) - assert.True(t, vtgr.stopped.Load()) -} diff --git a/go/vt/vthash/hash.go b/go/vt/vthash/hash.go index 7b6a130dc08..3dbd85af6a3 100644 --- a/go/vt/vthash/hash.go +++ b/go/vt/vthash/hash.go @@ -17,6 +17,7 @@ limitations under the License. package vthash import ( + "vitess.io/vitess/go/vt/vthash/highway" "vitess.io/vitess/go/vt/vthash/metro" ) @@ -28,3 +29,12 @@ func New() Hasher { h.Reset() return h } + +type Hasher256 = highway.Digest +type Hash256 = [32]byte + +var defaultHash256Key = [32]byte{} + +func New256() *Hasher256 { + return highway.New(defaultHash256Key) +} diff --git a/go/vt/vthash/highway/LICENSE b/go/vt/vthash/highway/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/go/vt/vthash/highway/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/go/vt/vthash/highway/highwayhash.go b/go/vt/vthash/highway/highwayhash.go new file mode 100644 index 00000000000..a922b435d9d --- /dev/null +++ b/go/vt/vthash/highway/highwayhash.go @@ -0,0 +1,184 @@ +/* +Copyright 2023 The Vitess Authors. +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package highwayhash implements the pseudo-random-function (PRF) HighwayHash. +// HighwayHash is a fast hash function designed to defend hash-flooding attacks +// or to authenticate short-lived messages. +// +// HighwayHash is not a general purpose cryptographic hash function and does not +// provide (strong) collision resistance. +package highway + +import ( + "encoding/binary" + "errors" + "unsafe" +) + +const ( + // Size is the size of HighwayHash-256 checksum in bytes. + Size = 32 + // Size128 is the size of HighwayHash-128 checksum in bytes. + Size128 = 16 +) + +var errKeySize = errors.New("highwayhash: invalid key size") + +// New returns a hash.Hash computing the HighwayHash-256 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func New(key [Size]byte) *Digest { + h := &Digest{size: Size, key: key} + h.Reset() + return h +} + +// New128 returns a hash.Hash computing the HighwayHash-128 checksum. +// It returns a non-nil error if the key is not 32 bytes long. +func New128(key [Size]byte) *Digest { + h := &Digest{size: Size128, key: key} + h.Reset() + return h +} + +// Sum computes the HighwayHash-256 checksum of data. +// It panics if the key is not 32 bytes long. +func Sum(data, key []byte) [Size]byte { + if len(key) != Size { + panic(errKeySize) + } + var state [16]uint64 + initialize(&state, key) + if n := len(data) & (^(Size - 1)); n > 0 { + update(&state, data[:n]) + data = data[n:] + } + if len(data) > 0 { + var block [Size]byte + offset := copy(block[:], data) + hashBuffer(&state, &block, offset) + } + var hash [Size]byte + finalize(hash[:], &state) + return hash +} + +// Sum128 computes the HighwayHash-128 checksum of data. +// It panics if the key is not 32 bytes long. +func Sum128(data, key []byte) [Size128]byte { + if len(key) != Size { + panic(errKeySize) + } + var state [16]uint64 + initialize(&state, key) + if n := len(data) & (^(Size - 1)); n > 0 { + update(&state, data[:n]) + data = data[n:] + } + if len(data) > 0 { + var block [Size]byte + offset := copy(block[:], data) + hashBuffer(&state, &block, offset) + } + var hash [Size128]byte + finalize(hash[:], &state) + return hash +} + +type Digest struct { + state [16]uint64 // v0 | v1 | mul0 | mul1 + + key, buffer [Size]byte + offset int + size int +} + +func (d *Digest) Size() int { return d.size } + +func (d *Digest) BlockSize() int { return Size } + +func (d *Digest) Reset() { + initialize(&d.state, d.key[:]) + d.offset = 0 +} + +func (d *Digest) WriteString(str string) (int, error) { + return d.Write(unsafe.Slice(unsafe.StringData(str), len(str))) +} + +func (d *Digest) Write(p []byte) (n int, err error) { + n = len(p) + if d.offset > 0 { + remaining := Size - d.offset + if n < remaining { + d.offset += copy(d.buffer[d.offset:], p) + return + } + copy(d.buffer[d.offset:], p[:remaining]) + update(&d.state, d.buffer[:]) + p = p[remaining:] + d.offset = 0 + } + if nn := len(p) & (^(Size - 1)); nn > 0 { + update(&d.state, p[:nn]) + p = p[nn:] + } + if len(p) > 0 { + d.offset = copy(d.buffer[d.offset:], p) + } + return +} + +func (d *Digest) Sum(b []byte) []byte { + state := d.state + if d.offset > 0 { + hashBuffer(&state, &d.buffer, d.offset) + } + var hash [Size]byte + finalize(hash[:d.size], &state) + return append(b, hash[:d.size]...) +} + +func hashBuffer(state *[16]uint64, buffer *[32]byte, offset int) { + var block [Size]byte + mod32 := (uint64(offset) << 32) + uint64(offset) + for i := range state[:4] { + state[i] += mod32 + } + for i := range state[4:8] { + t0 := uint32(state[i+4]) + t0 = (t0 << uint(offset)) | (t0 >> uint(32-offset)) + + t1 := uint32(state[i+4] >> 32) + t1 = (t1 << uint(offset)) | (t1 >> uint(32-offset)) + + state[i+4] = (uint64(t1) << 32) | uint64(t0) + } + + mod4 := offset & 3 + remain := offset - mod4 + + copy(block[:], buffer[:remain]) + if offset >= 16 { + copy(block[28:], buffer[offset-4:]) + } else if mod4 != 0 { + last := uint32(buffer[remain]) + last += uint32(buffer[remain+mod4>>1]) << 8 + last += uint32(buffer[offset-1]) << 16 + binary.LittleEndian.PutUint32(block[16:], last) + } + update(state, block[:]) +} diff --git a/go/vt/vthash/highway/highwayhashAVX2_amd64.s b/go/vt/vthash/highway/highwayhashAVX2_amd64.s new file mode 100644 index 00000000000..761eac33dfe --- /dev/null +++ b/go/vt/vthash/highway/highwayhashAVX2_amd64.s @@ -0,0 +1,258 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64,!gccgo,!appengine,!nacl,!noasm + +#include "textflag.h" + +DATA ·consAVX2<>+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·consAVX2<>+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·consAVX2<>+0x10(SB)/8, $0x13198a2e03707344 +DATA ·consAVX2<>+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·consAVX2<>+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·consAVX2<>+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·consAVX2<>+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·consAVX2<>+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·consAVX2<>(SB), (NOPTR+RODATA), $64 + +DATA ·zipperMergeAVX2<>+0x00(SB)/8, $0xf010e05020c03 +DATA ·zipperMergeAVX2<>+0x08(SB)/8, $0x70806090d0a040b +DATA ·zipperMergeAVX2<>+0x10(SB)/8, $0xf010e05020c03 +DATA ·zipperMergeAVX2<>+0x18(SB)/8, $0x70806090d0a040b +GLOBL ·zipperMergeAVX2<>(SB), (NOPTR+RODATA), $32 + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVQ $0x3FFFFFFFFFFFFFFF, tmp0 \ + ANDQ tmp0, x3 \ + MOVQ x2, y0 \ + MOVQ x3, y1 \ + \ + MOVQ x2, tmp0 \ + MOVQ x3, tmp1 \ + SHLQ $1, tmp1 \ + SHRQ $63, tmp0 \ + MOVQ tmp1, x3 \ + ORQ tmp0, x3 \ + \ + SHLQ $1, x2 \ + \ + MOVQ y0, tmp0 \ + MOVQ y1, tmp1 \ + SHLQ $2, tmp1 \ + SHRQ $62, tmp0 \ + MOVQ tmp1, y1 \ + ORQ tmp0, y1 \ + \ + SHLQ $2, y0 \ + \ + XORQ x0, y0 \ + XORQ x2, y0 \ + XORQ x1, y1 \ + XORQ x3, y1 + +#define UPDATE(msg) \ + VPADDQ msg, Y2, Y2 \ + VPADDQ Y3, Y2, Y2 \ + \ + VPSRLQ $32, Y1, Y0 \ + BYTE $0xC5; BYTE $0xFD; BYTE $0xF4; BYTE $0xC2 \ // VPMULUDQ Y2, Y0, Y0 + VPXOR Y0, Y3, Y3 \ + \ + VPADDQ Y4, Y1, Y1 \ + \ + VPSRLQ $32, Y2, Y0 \ + BYTE $0xC5; BYTE $0xFD; BYTE $0xF4; BYTE $0xC1 \ // VPMULUDQ Y1, Y0, Y0 + VPXOR Y0, Y4, Y4 \ + \ + VPSHUFB Y5, Y2, Y0 \ + VPADDQ Y0, Y1, Y1 \ + \ + VPSHUFB Y5, Y1, Y0 \ + VPADDQ Y0, Y2, Y2 + +// func initializeAVX2(state *[16]uint64, key []byte) +TEXT ·initializeAVX2(SB), 4, $0-32 + MOVQ state+0(FP), AX + MOVQ key_base+8(FP), BX + MOVQ $·consAVX2<>(SB), CX + + VMOVDQU 0(BX), Y1 + VPSHUFD $177, Y1, Y2 + + VMOVDQU 0(CX), Y3 + VMOVDQU 32(CX), Y4 + + VPXOR Y3, Y1, Y1 + VPXOR Y4, Y2, Y2 + + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + RET + +// func updateAVX2(state *[16]uint64, msg []byte) +TEXT ·updateAVX2(SB), 4, $0-32 + MOVQ state+0(FP), AX + MOVQ msg_base+8(FP), BX + MOVQ msg_len+16(FP), CX + + CMPQ CX, $32 + JB DONE + + VMOVDQU 0(AX), Y1 + VMOVDQU 32(AX), Y2 + VMOVDQU 64(AX), Y3 + VMOVDQU 96(AX), Y4 + + VMOVDQU ·zipperMergeAVX2<>(SB), Y5 + +LOOP: + VMOVDQU 0(BX), Y0 + UPDATE(Y0) + + ADDQ $32, BX + SUBQ $32, CX + JA LOOP + + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + +DONE: + RET + +// func finalizeAVX2(out []byte, state *[16]uint64) +TEXT ·finalizeAVX2(SB), 4, $0-32 + MOVQ state+24(FP), AX + MOVQ out_base+0(FP), BX + MOVQ out_len+8(FP), CX + + VMOVDQU 0(AX), Y1 + VMOVDQU 32(AX), Y2 + VMOVDQU 64(AX), Y3 + VMOVDQU 96(AX), Y4 + + VMOVDQU ·zipperMergeAVX2<>(SB), Y5 + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + CMPQ CX, $8 + JE skipUpdate // Just 4 rounds for 64-bit checksum + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + CMPQ CX, $16 + JE skipUpdate // 6 rounds for 128-bit checksum + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + + VPERM2I128 $1, Y1, Y1, Y0 + VPSHUFD $177, Y0, Y0 + UPDATE(Y0) + +skipUpdate: + VMOVDQU Y1, 0(AX) + VMOVDQU Y2, 32(AX) + VMOVDQU Y3, 64(AX) + VMOVDQU Y4, 96(AX) + VZEROUPPER + + CMPQ CX, $8 + JE hash64 + CMPQ CX, $16 + JE hash128 + + // 256-bit checksum + MOVQ 0*8(AX), R8 + MOVQ 1*8(AX), R9 + MOVQ 4*8(AX), R10 + MOVQ 5*8(AX), R11 + ADDQ 8*8(AX), R8 + ADDQ 9*8(AX), R9 + ADDQ 12*8(AX), R10 + ADDQ 13*8(AX), R11 + + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 0(BX) + MOVQ R15, 8(BX) + + MOVQ 2*8(AX), R8 + MOVQ 3*8(AX), R9 + MOVQ 6*8(AX), R10 + MOVQ 7*8(AX), R11 + ADDQ 10*8(AX), R8 + ADDQ 11*8(AX), R9 + ADDQ 14*8(AX), R10 + ADDQ 15*8(AX), R11 + + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 16(BX) + MOVQ R15, 24(BX) + RET + +hash128: + MOVQ 0*8(AX), R8 + MOVQ 1*8(AX), R9 + ADDQ 6*8(AX), R8 + ADDQ 7*8(AX), R9 + ADDQ 8*8(AX), R8 + ADDQ 9*8(AX), R9 + ADDQ 14*8(AX), R8 + ADDQ 15*8(AX), R9 + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + RET + +hash64: + MOVQ 0*8(AX), DX + ADDQ 4*8(AX), DX + ADDQ 8*8(AX), DX + ADDQ 12*8(AX), DX + MOVQ DX, 0(BX) + RET + diff --git a/go/vt/vthash/highway/highwayhash_amd64.go b/go/vt/vthash/highway/highwayhash_amd64.go new file mode 100644 index 00000000000..f47a47fb1d3 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_amd64.go @@ -0,0 +1,80 @@ +//go:build amd64 && !gccgo && !appengine && !nacl && !noasm +// +build amd64,!gccgo,!appengine,!nacl,!noasm + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +import "golang.org/x/sys/cpu" + +var ( + useSSE4 = cpu.X86.HasSSE41 + useAVX2 = cpu.X86.HasAVX2 + useNEON = false + useVMX = false +) + +//go:noescape +func initializeSSE4(state *[16]uint64, key []byte) + +//go:noescape +func initializeAVX2(state *[16]uint64, key []byte) + +//go:noescape +func updateSSE4(state *[16]uint64, msg []byte) + +//go:noescape +func updateAVX2(state *[16]uint64, msg []byte) + +//go:noescape +func finalizeSSE4(out []byte, state *[16]uint64) + +//go:noescape +func finalizeAVX2(out []byte, state *[16]uint64) + +func initialize(state *[16]uint64, key []byte) { + switch { + case useAVX2: + initializeAVX2(state, key) + case useSSE4: + initializeSSE4(state, key) + default: + initializeGeneric(state, key) + } +} + +func update(state *[16]uint64, msg []byte) { + switch { + case useAVX2: + updateAVX2(state, msg) + case useSSE4: + updateSSE4(state, msg) + default: + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + switch { + case useAVX2: + finalizeAVX2(out, state) + case useSSE4: + finalizeSSE4(out, state) + default: + finalizeGeneric(out, state) + } +} diff --git a/go/vt/vthash/highway/highwayhash_amd64.s b/go/vt/vthash/highway/highwayhash_amd64.s new file mode 100644 index 00000000000..5c0f87256f6 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_amd64.s @@ -0,0 +1,304 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 !gccgo !appengine !nacl + +#include "textflag.h" + +DATA ·asmConstants<>+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·asmConstants<>+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·asmConstants<>+0x10(SB)/8, $0x13198a2e03707344 +DATA ·asmConstants<>+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·asmConstants<>+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·asmConstants<>+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·asmConstants<>+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·asmConstants<>+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·asmConstants<>(SB), (NOPTR+RODATA), $64 + +DATA ·asmZipperMerge<>+0x00(SB)/8, $0xf010e05020c03 +DATA ·asmZipperMerge<>+0x08(SB)/8, $0x70806090d0a040b +GLOBL ·asmZipperMerge<>(SB), (NOPTR+RODATA), $16 + +#define v00 X0 +#define v01 X1 +#define v10 X2 +#define v11 X3 +#define m00 X4 +#define m01 X5 +#define m10 X6 +#define m11 X7 + +#define t0 X8 +#define t1 X9 +#define t2 X10 + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVQ $0x3FFFFFFFFFFFFFFF, tmp0 \ + ANDQ tmp0, x3 \ + MOVQ x2, y0 \ + MOVQ x3, y1 \ + \ + MOVQ x2, tmp0 \ + MOVQ x3, tmp1 \ + SHLQ $1, tmp1 \ + SHRQ $63, tmp0 \ + MOVQ tmp1, x3 \ + ORQ tmp0, x3 \ + \ + SHLQ $1, x2 \ + \ + MOVQ y0, tmp0 \ + MOVQ y1, tmp1 \ + SHLQ $2, tmp1 \ + SHRQ $62, tmp0 \ + MOVQ tmp1, y1 \ + ORQ tmp0, y1 \ + \ + SHLQ $2, y0 \ + \ + XORQ x0, y0 \ + XORQ x2, y0 \ + XORQ x1, y1 \ + XORQ x3, y1 + +#define UPDATE(msg0, msg1) \ + PADDQ msg0, v10 \ + PADDQ m00, v10 \ + PADDQ msg1, v11 \ + PADDQ m01, v11 \ + \ + MOVO v00, t0 \ + MOVO v01, t1 \ + PSRLQ $32, t0 \ + PSRLQ $32, t1 \ + PMULULQ v10, t0 \ + PMULULQ v11, t1 \ + PXOR t0, m00 \ + PXOR t1, m01 \ + \ + PADDQ m10, v00 \ + PADDQ m11, v01 \ + \ + MOVO v10, t0 \ + MOVO v11, t1 \ + PSRLQ $32, t0 \ + PSRLQ $32, t1 \ + PMULULQ v00, t0 \ + PMULULQ v01, t1 \ + PXOR t0, m10 \ + PXOR t1, m11 \ + \ + MOVO v10, t0 \ + PSHUFB t2, t0 \ + MOVO v11, t1 \ + PSHUFB t2, t1 \ + PADDQ t0, v00 \ + PADDQ t1, v01 \ + \ + MOVO v00, t0 \ + PSHUFB t2, t0 \ + MOVO v01, t1 \ + PSHUFB t2, t1 \ + PADDQ t0, v10 \ + PADDQ t1, v11 + +// func initializeSSE4(state *[16]uint64, key []byte) +TEXT ·initializeSSE4(SB), NOSPLIT, $0-32 + MOVQ state+0(FP), AX + MOVQ key_base+8(FP), BX + MOVQ $·asmConstants<>(SB), CX + + MOVOU 0(BX), v00 + MOVOU 16(BX), v01 + + PSHUFD $177, v00, v10 + PSHUFD $177, v01, v11 + + MOVOU 0(CX), m00 + MOVOU 16(CX), m01 + MOVOU 32(CX), m10 + MOVOU 48(CX), m11 + + PXOR m00, v00 + PXOR m01, v01 + PXOR m10, v10 + PXOR m11, v11 + + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + RET + +// func updateSSE4(state *[16]uint64, msg []byte) +TEXT ·updateSSE4(SB), NOSPLIT, $0-32 + MOVQ state+0(FP), AX + MOVQ msg_base+8(FP), BX + MOVQ msg_len+16(FP), CX + + CMPQ CX, $32 + JB DONE + + MOVOU 0(AX), v00 + MOVOU 16(AX), v01 + MOVOU 32(AX), v10 + MOVOU 48(AX), v11 + MOVOU 64(AX), m00 + MOVOU 80(AX), m01 + MOVOU 96(AX), m10 + MOVOU 112(AX), m11 + + MOVOU ·asmZipperMerge<>(SB), t2 + +LOOP: + MOVOU 0(BX), t0 + MOVOU 16(BX), t1 + + UPDATE(t0, t1) + + ADDQ $32, BX + SUBQ $32, CX + JA LOOP + + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + +DONE: + RET + +// func finalizeSSE4(out []byte, state *[16]uint64) +TEXT ·finalizeSSE4(SB), NOSPLIT, $0-32 + MOVQ state+24(FP), AX + MOVQ out_base+0(FP), BX + MOVQ out_len+8(FP), CX + + MOVOU 0(AX), v00 + MOVOU 16(AX), v01 + MOVOU 32(AX), v10 + MOVOU 48(AX), v11 + MOVOU 64(AX), m00 + MOVOU 80(AX), m01 + MOVOU 96(AX), m10 + MOVOU 112(AX), m11 + + MOVOU ·asmZipperMerge<>(SB), t2 + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + CMPQ CX, $8 + JE skipUpdate // Just 4 rounds for 64-bit checksum + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + CMPQ CX, $16 + JE skipUpdate // 6 rounds for 128-bit checksum + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + + PSHUFD $177, v01, t0 + PSHUFD $177, v00, t1 + UPDATE(t0, t1) + +skipUpdate: + MOVOU v00, 0(AX) + MOVOU v01, 16(AX) + MOVOU v10, 32(AX) + MOVOU v11, 48(AX) + MOVOU m00, 64(AX) + MOVOU m01, 80(AX) + MOVOU m10, 96(AX) + MOVOU m11, 112(AX) + + CMPQ CX, $8 + JE hash64 + CMPQ CX, $16 + JE hash128 + + // 256-bit checksum + PADDQ v00, m00 + PADDQ v10, m10 + PADDQ v01, m01 + PADDQ v11, m11 + + MOVQ m00, R8 + PEXTRQ $1, m00, R9 + MOVQ m10, R10 + PEXTRQ $1, m10, R11 + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 0(BX) + MOVQ R15, 8(BX) + + MOVQ m01, R8 + PEXTRQ $1, m01, R9 + MOVQ m11, R10 + PEXTRQ $1, m11, R11 + REDUCE_MOD(R8, R9, R10, R11, R12, R13, R14, R15) + MOVQ R14, 16(BX) + MOVQ R15, 24(BX) + RET + +hash128: + PADDQ v00, v11 + PADDQ m00, m11 + PADDQ v11, m11 + MOVOU m11, 0(BX) + RET + +hash64: + PADDQ v00, v10 + PADDQ m00, m10 + PADDQ v10, m10 + MOVQ m10, DX + MOVQ DX, 0(BX) + RET diff --git a/go/vt/vthash/highway/highwayhash_arm64.go b/go/vt/vthash/highway/highwayhash_arm64.go new file mode 100644 index 00000000000..2b22db7ff56 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_arm64.go @@ -0,0 +1,64 @@ +//go:build !noasm && !appengine +// +build !noasm,!appengine + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = true + useVMX = false +) + +//go:noescape +func initializeArm64(state *[16]uint64, key []byte) + +//go:noescape +func updateArm64(state *[16]uint64, msg []byte) + +//go:noescape +func finalizeArm64(out []byte, state *[16]uint64) + +func initialize(state *[16]uint64, key []byte) { + if useNEON { + initializeArm64(state, key) + } else { + initializeGeneric(state, key) + } +} + +func update(state *[16]uint64, msg []byte) { + if useNEON { + updateArm64(state, msg) + } else { + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + if useNEON { + finalizeArm64(out, state) + } else { + finalizeGeneric(out, state) + } +} diff --git a/go/vt/vthash/highway/highwayhash_arm64.s b/go/vt/vthash/highway/highwayhash_arm64.s new file mode 100644 index 00000000000..bbf2f9822bd --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_arm64.s @@ -0,0 +1,322 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build !noasm,!appengine + +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// the opcodes of their Plan9 equivalents + +#include "textflag.h" + +#define REDUCE_MOD(x0, x1, x2, x3, tmp0, tmp1, y0, y1) \ + MOVD $0x3FFFFFFFFFFFFFFF, tmp0 \ + AND tmp0, x3 \ + MOVD x2, y0 \ + MOVD x3, y1 \ + \ + MOVD x2, tmp0 \ + MOVD x3, tmp1 \ + LSL $1, tmp1 \ + LSR $63, tmp0 \ + MOVD tmp1, x3 \ + ORR tmp0, x3 \ + \ + LSL $1, x2 \ + \ + MOVD y0, tmp0 \ + MOVD y1, tmp1 \ + LSL $2, tmp1 \ + LSR $62, tmp0 \ + MOVD tmp1, y1 \ + ORR tmp0, y1 \ + \ + LSL $2, y0 \ + \ + EOR x0, y0 \ + EOR x2, y0 \ + EOR x1, y1 \ + EOR x3, y1 + +#define UPDATE(MSG1, MSG2) \ + \ // Add message + VADD MSG1.D2, V2.D2, V2.D2 \ + VADD MSG2.D2, V3.D2, V3.D2 \ + \ + \ // v1 += mul0 + VADD V4.D2, V2.D2, V2.D2 \ + VADD V5.D2, V3.D2, V3.D2 \ + \ + \ // First pair of multiplies + VTBL V29.B16, [V0.B16, V1.B16], V10.B16 \ + VTBL V30.B16, [V2.B16, V3.B16], V11.B16 \ + \ + \ // VUMULL V10.S2, V11.S2, V12.D2 /* assembler support missing */ + \ // VUMULL2 V10.S4, V11.S4, V13.D2 /* assembler support missing */ + WORD $0x2eaac16c \ // umull v12.2d, v11.2s, v10.2s + WORD $0x6eaac16d \ // umull2 v13.2d, v11.4s, v10.4s + \ + \ // v0 += mul1 + VADD V6.D2, V0.D2, V0.D2 \ + VADD V7.D2, V1.D2, V1.D2 \ + \ + \ // Second pair of multiplies + VTBL V29.B16, [V2.B16, V3.B16], V15.B16 \ + VTBL V30.B16, [V0.B16, V1.B16], V14.B16 \ + \ + \ // EOR multiplication result in + VEOR V12.B16, V4.B16, V4.B16 \ + VEOR V13.B16, V5.B16, V5.B16 \ + \ + \ // VUMULL V14.S2, V15.S2, V16.D2 /* assembler support missing */ + \ // VUMULL2 V14.S4, V15.S4, V17.D2 /* assembler support missing */ + WORD $0x2eaec1f0 \ // umull v16.2d, v15.2s, v14.2s + WORD $0x6eaec1f1 \ // umull2 v17.2d, v15.4s, v14.4s + \ + \ // First pair of zipper-merges + VTBL V28.B16, [V2.B16], V18.B16 \ + VADD V18.D2, V0.D2, V0.D2 \ + VTBL V28.B16, [V3.B16], V19.B16 \ + VADD V19.D2, V1.D2, V1.D2 \ + \ + \ // Second pair of zipper-merges + VTBL V28.B16, [V0.B16], V20.B16 \ + VADD V20.D2, V2.D2, V2.D2 \ + VTBL V28.B16, [V1.B16], V21.B16 \ + VADD V21.D2, V3.D2, V3.D2 \ + \ + \ // EOR multiplication result in + VEOR V16.B16, V6.B16, V6.B16 \ + VEOR V17.B16, V7.B16, V7.B16 + +// func initializeArm64(state *[16]uint64, key []byte) +TEXT ·initializeArm64(SB), NOSPLIT, $0 + MOVD state+0(FP), R0 + MOVD key_base+8(FP), R1 + + VLD1 (R1), [V1.S4, V2.S4] + + VREV64 V1.S4, V3.S4 + VREV64 V2.S4, V4.S4 + + MOVD $·asmConstants(SB), R3 + VLD1 (R3), [V5.S4, V6.S4, V7.S4, V8.S4] + VEOR V5.B16, V1.B16, V1.B16 + VEOR V6.B16, V2.B16, V2.B16 + VEOR V7.B16, V3.B16, V3.B16 + VEOR V8.B16, V4.B16, V4.B16 + + VST1.P [V1.D2, V2.D2, V3.D2, V4.D2], 64(R0) + VST1 [V5.D2, V6.D2, V7.D2, V8.D2], (R0) + RET + +TEXT ·updateArm64(SB), NOSPLIT, $0 + MOVD state+0(FP), R0 + MOVD msg_base+8(FP), R1 + MOVD msg_len+16(FP), R2 // length of message + SUBS $32, R2 + BMI complete + + // Definition of registers + // v0 = v0.lo + // v1 = v0.hi + // v2 = v1.lo + // v3 = v1.hi + // v4 = mul0.lo + // v5 = mul0.hi + // v6 = mul1.lo + // v7 = mul1.hi + + // Load zipper merge constants table pointer + MOVD $·asmZipperMerge(SB), R3 + + // and load zipper merge constants into v28, v29, and v30 + VLD1 (R3), [V28.B16, V29.B16, V30.B16] + + VLD1.P 64(R0), [V0.D2, V1.D2, V2.D2, V3.D2] + VLD1 (R0), [V4.D2, V5.D2, V6.D2, V7.D2] + SUBS $64, R0 + +loop: + // Main loop + VLD1.P 32(R1), [V26.S4, V27.S4] + + UPDATE(V26, V27) + + SUBS $32, R2 + BPL loop + + // Store result + VST1.P [V0.D2, V1.D2, V2.D2, V3.D2], 64(R0) + VST1 [V4.D2, V5.D2, V6.D2, V7.D2], (R0) + +complete: + RET + +// func finalizeArm64(out []byte, state *[16]uint64) +TEXT ·finalizeArm64(SB), NOSPLIT, $0-32 + MOVD state+24(FP), R0 + MOVD out_base+0(FP), R1 + MOVD out_len+8(FP), R2 + + // Load zipper merge constants table pointer + MOVD $·asmZipperMerge(SB), R3 + + // and load zipper merge constants into v28, v29, and v30 + VLD1 (R3), [V28.B16, V29.B16, V30.B16] + + VLD1.P 64(R0), [V0.D2, V1.D2, V2.D2, V3.D2] + VLD1 (R0), [V4.D2, V5.D2, V6.D2, V7.D2] + SUB $64, R0 + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + CMP $8, R2 + BEQ skipUpdate // Just 4 rounds for 64-bit checksum + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + CMP $16, R2 + BEQ skipUpdate // 6 rounds for 128-bit checksum + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + + VREV64 V1.S4, V26.S4 + VREV64 V0.S4, V27.S4 + UPDATE(V26, V27) + +skipUpdate: + // Store result + VST1.P [V0.D2, V1.D2, V2.D2, V3.D2], 64(R0) + VST1 [V4.D2, V5.D2, V6.D2, V7.D2], (R0) + SUB $64, R0 + + CMP $8, R2 + BEQ hash64 + CMP $16, R2 + BEQ hash128 + + // 256-bit checksum + MOVD 0*8(R0), R8 + MOVD 1*8(R0), R9 + MOVD 4*8(R0), R10 + MOVD 5*8(R0), R11 + MOVD 8*8(R0), R4 + MOVD 9*8(R0), R5 + MOVD 12*8(R0), R6 + MOVD 13*8(R0), R7 + ADD R4, R8 + ADD R5, R9 + ADD R6, R10 + ADD R7, R11 + + REDUCE_MOD(R8, R9, R10, R11, R4, R5, R6, R7) + MOVD R6, 0(R1) + MOVD R7, 8(R1) + + MOVD 2*8(R0), R8 + MOVD 3*8(R0), R9 + MOVD 6*8(R0), R10 + MOVD 7*8(R0), R11 + MOVD 10*8(R0), R4 + MOVD 11*8(R0), R5 + MOVD 14*8(R0), R6 + MOVD 15*8(R0), R7 + ADD R4, R8 + ADD R5, R9 + ADD R6, R10 + ADD R7, R11 + + REDUCE_MOD(R8, R9, R10, R11, R4, R5, R6, R7) + MOVD R6, 16(R1) + MOVD R7, 24(R1) + RET + +hash128: + MOVD 0*8(R0), R8 + MOVD 1*8(R0), R9 + MOVD 6*8(R0), R10 + MOVD 7*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD 8*8(R0), R10 + MOVD 9*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD 14*8(R0), R10 + MOVD 15*8(R0), R11 + ADD R10, R8 + ADD R11, R9 + MOVD R8, 0(R1) + MOVD R9, 8(R1) + RET + +hash64: + MOVD 0*8(R0), R4 + MOVD 4*8(R0), R5 + MOVD 8*8(R0), R6 + MOVD 12*8(R0), R7 + ADD R5, R4 + ADD R7, R6 + ADD R6, R4 + MOVD R4, (R1) + RET + +DATA ·asmConstants+0x00(SB)/8, $0xdbe6d5d5fe4cce2f +DATA ·asmConstants+0x08(SB)/8, $0xa4093822299f31d0 +DATA ·asmConstants+0x10(SB)/8, $0x13198a2e03707344 +DATA ·asmConstants+0x18(SB)/8, $0x243f6a8885a308d3 +DATA ·asmConstants+0x20(SB)/8, $0x3bd39e10cb0ef593 +DATA ·asmConstants+0x28(SB)/8, $0xc0acf169b5f18a8c +DATA ·asmConstants+0x30(SB)/8, $0xbe5466cf34e90c6c +DATA ·asmConstants+0x38(SB)/8, $0x452821e638d01377 +GLOBL ·asmConstants(SB), 8, $64 + +// Constants for TBL instructions +DATA ·asmZipperMerge+0x0(SB)/8, $0x000f010e05020c03 // zipper merge constant +DATA ·asmZipperMerge+0x8(SB)/8, $0x070806090d0a040b +DATA ·asmZipperMerge+0x10(SB)/8, $0x0f0e0d0c07060504 // setup first register for multiply +DATA ·asmZipperMerge+0x18(SB)/8, $0x1f1e1d1c17161514 +DATA ·asmZipperMerge+0x20(SB)/8, $0x0b0a090803020100 // setup second register for multiply +DATA ·asmZipperMerge+0x28(SB)/8, $0x1b1a191813121110 +GLOBL ·asmZipperMerge(SB), 8, $48 diff --git a/go/vt/vthash/highway/highwayhash_generic.go b/go/vt/vthash/highway/highwayhash_generic.go new file mode 100644 index 00000000000..9ea17094843 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_generic.go @@ -0,0 +1,350 @@ +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +import ( + "encoding/binary" +) + +const ( + v0 = 0 + v1 = 4 + mul0 = 8 + mul1 = 12 +) + +var ( + init0 = [4]uint64{0xdbe6d5d5fe4cce2f, 0xa4093822299f31d0, 0x13198a2e03707344, 0x243f6a8885a308d3} + init1 = [4]uint64{0x3bd39e10cb0ef593, 0xc0acf169b5f18a8c, 0xbe5466cf34e90c6c, 0x452821e638d01377} +) + +func initializeGeneric(state *[16]uint64, k []byte) { + var key [4]uint64 + + key[0] = binary.LittleEndian.Uint64(k[0:]) + key[1] = binary.LittleEndian.Uint64(k[8:]) + key[2] = binary.LittleEndian.Uint64(k[16:]) + key[3] = binary.LittleEndian.Uint64(k[24:]) + + copy(state[mul0:], init0[:]) + copy(state[mul1:], init1[:]) + + for i, k := range key { + state[v0+i] = init0[i] ^ k + } + + key[0] = key[0]>>32 | key[0]<<32 + key[1] = key[1]>>32 | key[1]<<32 + key[2] = key[2]>>32 | key[2]<<32 + key[3] = key[3]>>32 | key[3]<<32 + + for i, k := range key { + state[v1+i] = init1[i] ^ k + } +} + +func updateGeneric(state *[16]uint64, msg []byte) { + for len(msg) >= 32 { + m := msg[:32] + + // add message + mul0 + // Interleave operations to hide multiplication + state[v1+0] += binary.LittleEndian.Uint64(m) + state[mul0+0] + state[mul0+0] ^= uint64(uint32(state[v1+0])) * (state[v0+0] >> 32) + state[v0+0] += state[mul1+0] + state[mul1+0] ^= uint64(uint32(state[v0+0])) * (state[v1+0] >> 32) + + state[v1+1] += binary.LittleEndian.Uint64(m[8:]) + state[mul0+1] + state[mul0+1] ^= uint64(uint32(state[v1+1])) * (state[v0+1] >> 32) + state[v0+1] += state[mul1+1] + state[mul1+1] ^= uint64(uint32(state[v0+1])) * (state[v1+1] >> 32) + + state[v1+2] += binary.LittleEndian.Uint64(m[16:]) + state[mul0+2] + state[mul0+2] ^= uint64(uint32(state[v1+2])) * (state[v0+2] >> 32) + state[v0+2] += state[mul1+2] + state[mul1+2] ^= uint64(uint32(state[v0+2])) * (state[v1+2] >> 32) + + state[v1+3] += binary.LittleEndian.Uint64(m[24:]) + state[mul0+3] + state[mul0+3] ^= uint64(uint32(state[v1+3])) * (state[v0+3] >> 32) + state[v0+3] += state[mul1+3] + state[mul1+3] ^= uint64(uint32(state[v0+3])) * (state[v1+3] >> 32) + + // inlined: zipperMerge(state[v1+0], state[v1+1], &state[v0+0], &state[v0+1]) + { + val0 := state[v1+0] + val1 := state[v1+1] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v0+0] += res + state[v0+1] += res2 + } + // zipperMerge(state[v1+2], state[v1+3], &state[v0+2], &state[v0+3]) + { + val0 := state[v1+2] + val1 := state[v1+3] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v0+2] += res + state[v0+3] += res2 + } + + // inlined: zipperMerge(state[v0+0], state[v0+1], &state[v1+0], &state[v1+1]) + { + val0 := state[v0+0] + val1 := state[v0+1] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v1+0] += res + state[v1+1] += res2 + } + + //inlined: zipperMerge(state[v0+2], state[v0+3], &state[v1+2], &state[v1+3]) + { + val0 := state[v0+2] + val1 := state[v0+3] + res := val0 & (0xff << (2 * 8)) + res2 := (val0 & (0xff << (7 * 8))) + (val1 & (0xff << (2 * 8))) + res += (val1 & (0xff << (7 * 8))) >> 8 + res2 += (val0 & (0xff << (6 * 8))) >> 8 + res += ((val0 & (0xff << (5 * 8))) + (val1 & (0xff << (6 * 8)))) >> 16 + res2 += (val1 & (0xff << (5 * 8))) >> 16 + res += ((val0 & (0xff << (3 * 8))) + (val1 & (0xff << (4 * 8)))) >> 24 + res2 += ((val1 & (0xff << (3 * 8))) + (val0 & (0xff << (4 * 8)))) >> 24 + res += (val0 & (0xff << (1 * 8))) << 32 + res2 += (val1 & 0xff) << 48 + res += val0 << 56 + res2 += (val1 & (0xff << (1 * 8))) << 24 + + state[v1+2] += res + state[v1+3] += res2 + } + msg = msg[32:] + } +} + +func finalizeGeneric(out []byte, state *[16]uint64) { + var perm [4]uint64 + var tmp [32]byte + runs := 4 + if len(out) == 16 { + runs = 6 + } else if len(out) == 32 { + runs = 10 + } + for i := 0; i < runs; i++ { + perm[0] = state[v0+2]>>32 | state[v0+2]<<32 + perm[1] = state[v0+3]>>32 | state[v0+3]<<32 + perm[2] = state[v0+0]>>32 | state[v0+0]<<32 + perm[3] = state[v0+1]>>32 | state[v0+1]<<32 + + binary.LittleEndian.PutUint64(tmp[0:], perm[0]) + binary.LittleEndian.PutUint64(tmp[8:], perm[1]) + binary.LittleEndian.PutUint64(tmp[16:], perm[2]) + binary.LittleEndian.PutUint64(tmp[24:], perm[3]) + + update(state, tmp[:]) + } + + switch len(out) { + case 8: + binary.LittleEndian.PutUint64(out, state[v0+0]+state[v1+0]+state[mul0+0]+state[mul1+0]) + case 16: + binary.LittleEndian.PutUint64(out, state[v0+0]+state[v1+2]+state[mul0+0]+state[mul1+2]) + binary.LittleEndian.PutUint64(out[8:], state[v0+1]+state[v1+3]+state[mul0+1]+state[mul1+3]) + case 32: + h0, h1 := reduceMod(state[v0+0]+state[mul0+0], state[v0+1]+state[mul0+1], state[v1+0]+state[mul1+0], state[v1+1]+state[mul1+1]) + binary.LittleEndian.PutUint64(out[0:], h0) + binary.LittleEndian.PutUint64(out[8:], h1) + + h0, h1 = reduceMod(state[v0+2]+state[mul0+2], state[v0+3]+state[mul0+3], state[v1+2]+state[mul1+2], state[v1+3]+state[mul1+3]) + binary.LittleEndian.PutUint64(out[16:], h0) + binary.LittleEndian.PutUint64(out[24:], h1) + } +} + +// Experiments on variations left for future reference... +/* +func zipperMerge(v0, v1 uint64, d0, d1 *uint64) { + if true { + // fastest. original interleaved... + res := v0 & (0xff << (2 * 8)) + res2 := (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res += (v1 & (0xff << (7 * 8))) >> 8 + res2 += (v0 & (0xff << (6 * 8))) >> 8 + res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res2 += (v1 & (0xff << (5 * 8))) >> 16 + res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res2 += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res += (v0 & (0xff << (1 * 8))) << 32 + res2 += (v1 & 0xff) << 48 + res += v0 << 56 + res2 += (v1 & (0xff << (1 * 8))) << 24 + + *d0 += res + *d1 += res2 + } else if false { + // Reading bytes and combining into uint64 + var v0b [8]byte + binary.LittleEndian.PutUint64(v0b[:], v0) + var v1b [8]byte + binary.LittleEndian.PutUint64(v1b[:], v1) + var res, res2 uint64 + + res = uint64(v0b[0]) << (7 * 8) + res2 = uint64(v1b[0]) << (6 * 8) + res |= uint64(v0b[1]) << (5 * 8) + res2 |= uint64(v1b[1]) << (4 * 8) + res |= uint64(v0b[2]) << (2 * 8) + res2 |= uint64(v1b[2]) << (2 * 8) + res |= uint64(v0b[3]) + res2 |= uint64(v0b[4]) << (1 * 8) + res |= uint64(v0b[5]) << (3 * 8) + res2 |= uint64(v0b[6]) << (5 * 8) + res |= uint64(v1b[4]) << (1 * 8) + res2 |= uint64(v0b[7]) << (7 * 8) + res |= uint64(v1b[6]) << (4 * 8) + res2 |= uint64(v1b[3]) + res |= uint64(v1b[7]) << (6 * 8) + res2 |= uint64(v1b[5]) << (3 * 8) + + *d0 += res + *d1 += res2 + + } else if false { + // bytes to bytes shuffle + var v0b [8]byte + binary.LittleEndian.PutUint64(v0b[:], v0) + var v1b [8]byte + binary.LittleEndian.PutUint64(v1b[:], v1) + var res [8]byte + + //res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res[0] = v0b[3] + res[1] = v1b[4] + + // res := v0 & (0xff << (2 * 8)) + res[2] = v0b[2] + + //res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res[3] = v0b[5] + res[4] = v1b[6] + + //res += (v0 & (0xff << (1 * 8))) << 32 + res[5] = v0b[1] + + //res += (v1 & (0xff << (7 * 8))) >> 8 + res[6] += v1b[7] + + //res += v0 << 56 + res[7] = v0b[0] + v0 = binary.LittleEndian.Uint64(res[:]) + *d0 += v0 + + //res += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res[0] = v1b[3] + res[1] = v0b[4] + + res[2] = v1b[2] + + // res += (v1 & (0xff << (5 * 8))) >> 16 + res[3] = v1b[5] + + //res += (v1 & (0xff << (1 * 8))) << 24 + res[4] = v1b[1] + + // res += (v0 & (0xff << (6 * 8))) >> 8 + res[5] = v0b[6] + + //res := (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res[7] = v0b[7] + + //res += (v1 & 0xff) << 48 + res[6] = v1b[0] + + v0 = binary.LittleEndian.Uint64(res[:]) + *d1 += v0 + } else { + // original. + res := v0 & (0xff << (2 * 8)) + res += (v1 & (0xff << (7 * 8))) >> 8 + res += ((v0 & (0xff << (5 * 8))) + (v1 & (0xff << (6 * 8)))) >> 16 + res += ((v0 & (0xff << (3 * 8))) + (v1 & (0xff << (4 * 8)))) >> 24 + res += (v0 & (0xff << (1 * 8))) << 32 + res += v0 << 56 + + *d0 += res + + res = (v0 & (0xff << (7 * 8))) + (v1 & (0xff << (2 * 8))) + res += (v0 & (0xff << (6 * 8))) >> 8 + res += (v1 & (0xff << (5 * 8))) >> 16 + res += ((v1 & (0xff << (3 * 8))) + (v0 & (0xff << (4 * 8)))) >> 24 + res += (v1 & 0xff) << 48 + res += (v1 & (0xff << (1 * 8))) << 24 + + *d1 += res + } +} +*/ + +// reduce v = [v0, v1, v2, v3] mod the irreducible polynomial x^128 + x^2 + x +func reduceMod(v0, v1, v2, v3 uint64) (r0, r1 uint64) { + v3 &= 0x3FFFFFFFFFFFFFFF + + r0, r1 = v2, v3 + + v3 = (v3 << 1) | (v2 >> (64 - 1)) + v2 <<= 1 + r1 = (r1 << 2) | (r0 >> (64 - 2)) + r0 <<= 2 + + r0 ^= v0 ^ v2 + r1 ^= v1 ^ v3 + return +} diff --git a/go/vt/vthash/highway/highwayhash_ppc64le.go b/go/vt/vthash/highway/highwayhash_ppc64le.go new file mode 100644 index 00000000000..f70e2a41473 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ppc64le.go @@ -0,0 +1,49 @@ +//go:build !noasm && !appengine +// +build !noasm,!appengine + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = false + useVMX = true +) + +//go:noescape +func updatePpc64Le(state *[16]uint64, msg []byte) + +func initialize(state *[16]uint64, key []byte) { + initializeGeneric(state, key) +} + +func update(state *[16]uint64, msg []byte) { + if useVMX { + updatePpc64Le(state, msg) + } else { + updateGeneric(state, msg) + } +} + +func finalize(out []byte, state *[16]uint64) { + finalizeGeneric(out, state) +} diff --git a/go/vt/vthash/highway/highwayhash_ppc64le.s b/go/vt/vthash/highway/highwayhash_ppc64le.s new file mode 100644 index 00000000000..957cebc4ddc --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ppc64le.s @@ -0,0 +1,180 @@ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//+build !noasm,!appengine + +#include "textflag.h" + +// Definition of registers +#define V0_LO VS32 +#define V0_LO_ V0 +#define V0_HI VS33 +#define V0_HI_ V1 +#define V1_LO VS34 +#define V1_LO_ V2 +#define V1_HI VS35 +#define V1_HI_ V3 +#define MUL0_LO VS36 +#define MUL0_LO_ V4 +#define MUL0_HI VS37 +#define MUL0_HI_ V5 +#define MUL1_LO VS38 +#define MUL1_LO_ V6 +#define MUL1_HI VS39 +#define MUL1_HI_ V7 + +// Message +#define MSG_LO VS40 +#define MSG_LO_ V8 +#define MSG_HI VS41 + +// Constants +#define ROTATE VS42 +#define ROTATE_ V10 +#define MASK VS43 +#define MASK_ V11 + +// Temps +#define TEMP1 VS44 +#define TEMP1_ V12 +#define TEMP2 VS45 +#define TEMP2_ V13 +#define TEMP3 VS46 +#define TEMP3_ V14 +#define TEMP4_ V15 +#define TEMP5_ V16 +#define TEMP6_ V17 +#define TEMP7_ V18 + +// Regular registers +#define STATE R3 +#define MSG_BASE R4 +#define MSG_LEN R5 +#define CONSTANTS R6 +#define P1 R7 +#define P2 R8 +#define P3 R9 +#define P4 R10 +#define P5 R11 +#define P6 R12 +#define P7 R14 // avoid using R13 + +TEXT ·updatePpc64Le(SB), NOFRAME|NOSPLIT, $0-32 + MOVD state+0(FP), STATE + MOVD msg_base+8(FP), MSG_BASE + MOVD msg_len+16(FP), MSG_LEN // length of message + + // Sanity check for length + CMPU MSG_LEN, $31 + BLE complete + + // Setup offsets + MOVD $16, P1 + MOVD $32, P2 + MOVD $48, P3 + MOVD $64, P4 + MOVD $80, P5 + MOVD $96, P6 + MOVD $112, P7 + + // Load state + LXVD2X (STATE)(R0), V0_LO + LXVD2X (STATE)(P1), V0_HI + LXVD2X (STATE)(P2), V1_LO + LXVD2X (STATE)(P3), V1_HI + LXVD2X (STATE)(P4), MUL0_LO + LXVD2X (STATE)(P5), MUL0_HI + LXVD2X (STATE)(P6), MUL1_LO + LXVD2X (STATE)(P7), MUL1_HI + XXPERMDI V0_LO, V0_LO, $2, V0_LO + XXPERMDI V0_HI, V0_HI, $2, V0_HI + XXPERMDI V1_LO, V1_LO, $2, V1_LO + XXPERMDI V1_HI, V1_HI, $2, V1_HI + XXPERMDI MUL0_LO, MUL0_LO, $2, MUL0_LO + XXPERMDI MUL0_HI, MUL0_HI, $2, MUL0_HI + XXPERMDI MUL1_LO, MUL1_LO, $2, MUL1_LO + XXPERMDI MUL1_HI, MUL1_HI, $2, MUL1_HI + + // Load asmConstants table pointer + MOVD $·asmConstants(SB), CONSTANTS + LXVD2X (CONSTANTS)(R0), ROTATE + LXVD2X (CONSTANTS)(P1), MASK + XXLNAND MASK, MASK, MASK + +loop: + // Main highwayhash update loop + LXVD2X (MSG_BASE)(R0), MSG_LO + VADDUDM V0_LO_, MUL1_LO_, TEMP1_ + VRLD V0_LO_, ROTATE_, TEMP2_ + VADDUDM MUL1_HI_, V0_HI_, TEMP3_ + LXVD2X (MSG_BASE)(P1), MSG_HI + ADD $32, MSG_BASE, MSG_BASE + XXPERMDI MSG_LO, MSG_LO, $2, MSG_LO + XXPERMDI MSG_HI, MSG_HI, $2, V0_LO + VADDUDM MSG_LO_, MUL0_LO_, MSG_LO_ + VADDUDM V0_LO_, MUL0_HI_, V0_LO_ + VADDUDM MSG_LO_, V1_LO_, V1_LO_ + VSRD V0_HI_, ROTATE_, MSG_LO_ + VADDUDM V0_LO_, V1_HI_, V1_HI_ + VPERM V1_LO_, V1_LO_, MASK_, V0_LO_ + VMULOUW V1_LO_, TEMP2_, TEMP2_ + VPERM V1_HI_, V1_HI_, MASK_, TEMP7_ + VADDUDM V0_LO_, TEMP1_, V0_LO_ + VMULOUW V1_HI_, MSG_LO_, MSG_LO_ + VADDUDM TEMP7_, TEMP3_, V0_HI_ + VPERM V0_LO_, V0_LO_, MASK_, TEMP6_ + VRLD V1_LO_, ROTATE_, TEMP4_ + VSRD V1_HI_, ROTATE_, TEMP5_ + VPERM V0_HI_, V0_HI_, MASK_, TEMP7_ + XXLXOR MUL0_LO, TEMP2, MUL0_LO + VMULOUW TEMP1_, TEMP4_, TEMP1_ + VMULOUW TEMP3_, TEMP5_, TEMP3_ + XXLXOR MUL0_HI, MSG_LO, MUL0_HI + XXLXOR MUL1_LO, TEMP1, MUL1_LO + XXLXOR MUL1_HI, TEMP3, MUL1_HI + VADDUDM TEMP6_, V1_LO_, V1_LO_ + VADDUDM TEMP7_, V1_HI_, V1_HI_ + + SUB $32, MSG_LEN, MSG_LEN + CMPU MSG_LEN, $32 + BGE loop + + // Save state + XXPERMDI V0_LO, V0_LO, $2, V0_LO + XXPERMDI V0_HI, V0_HI, $2, V0_HI + XXPERMDI V1_LO, V1_LO, $2, V1_LO + XXPERMDI V1_HI, V1_HI, $2, V1_HI + XXPERMDI MUL0_LO, MUL0_LO, $2, MUL0_LO + XXPERMDI MUL0_HI, MUL0_HI, $2, MUL0_HI + XXPERMDI MUL1_LO, MUL1_LO, $2, MUL1_LO + XXPERMDI MUL1_HI, MUL1_HI, $2, MUL1_HI + STXVD2X V0_LO, (STATE)(R0) + STXVD2X V0_HI, (STATE)(P1) + STXVD2X V1_LO, (STATE)(P2) + STXVD2X V1_HI, (STATE)(P3) + STXVD2X MUL0_LO, (STATE)(P4) + STXVD2X MUL0_HI, (STATE)(P5) + STXVD2X MUL1_LO, (STATE)(P6) + STXVD2X MUL1_HI, (STATE)(P7) + +complete: + RET + +// Constants table +DATA ·asmConstants+0x0(SB)/8, $0x0000000000000020 +DATA ·asmConstants+0x8(SB)/8, $0x0000000000000020 +DATA ·asmConstants+0x10(SB)/8, $0x070806090d0a040b // zipper merge constant +DATA ·asmConstants+0x18(SB)/8, $0x000f010e05020c03 // zipper merge constant + +GLOBL ·asmConstants(SB), 8, $32 diff --git a/go/vt/vthash/highway/highwayhash_ref.go b/go/vt/vthash/highway/highwayhash_ref.go new file mode 100644 index 00000000000..3ecb0e2f6ea --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_ref.go @@ -0,0 +1,39 @@ +//go:build noasm || (!amd64 && !arm64 && !ppc64le) +// +build noasm !amd64,!arm64,!ppc64le + +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package highway + +var ( + useSSE4 = false + useAVX2 = false + useNEON = false + useVMX = false +) + +func initialize(state *[16]uint64, k []byte) { + initializeGeneric(state, k) +} + +func update(state *[16]uint64, msg []byte) { + updateGeneric(state, msg) +} + +func finalize(out []byte, state *[16]uint64) { + finalizeGeneric(out, state) +} diff --git a/go/vt/vthash/highway/highwayhash_test.go b/go/vt/vthash/highway/highwayhash_test.go new file mode 100644 index 00000000000..896b6d13763 --- /dev/null +++ b/go/vt/vthash/highway/highwayhash_test.go @@ -0,0 +1,228 @@ +/* +Copyright (c) 2017 Minio Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Copyright (c) 2017 Minio Inc. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package highway + +import ( + "bytes" + "encoding/hex" + "math/rand" + "runtime" + "sync/atomic" + "testing" +) + +func TestVectors(t *testing.T) { + defer func(sse4, avx2, neon, vmx bool) { + useSSE4, useAVX2, useNEON, useVMX = sse4, avx2, neon, vmx + }(useSSE4, useAVX2, useNEON, useVMX) + + if useAVX2 { + t.Run("AVX2 version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useAVX2 = false + }) + } + if useSSE4 { + t.Run("SSE4 version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useSSE4 = false + }) + } + if useNEON { + t.Run("NEON version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useNEON = false + }) + } + if useVMX { + t.Run("VMX version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + useVMX = false + }) + } + t.Run("Generic version", func(t *testing.T) { + testVectors(New128, testVectors128, t) + testVectors(New, testVectors256, t) + }) +} + +func testVectors(NewFunc func([32]byte) *Digest, vectors []string, t *testing.T) { + key, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + if err != nil { + t.Fatalf("Failed to decode key: %v", err) + } + input := make([]byte, len(vectors)) + + h := NewFunc([32]byte(key)) + for i, v := range vectors { + input[i] = byte(i) + + expected, err := hex.DecodeString(v) + if err != nil { + t.Fatalf("Failed to decode test vector: %v error: %v", v, err) + } + + _, _ = h.Write(input[:i]) + if sum := h.Sum(nil); !bytes.Equal(sum, expected[:]) { + t.Errorf("Test %d: hash mismatch: got: %v want: %v", i, hex.EncodeToString(sum), hex.EncodeToString(expected)) + } + h.Reset() + + switch h.Size() { + case Size: + if sum := Sum(input[:i], key); !bytes.Equal(sum[:], expected) { + t.Errorf("Test %d: Sum mismatch: got: %v want: %v", i, hex.EncodeToString(sum[:]), hex.EncodeToString(expected)) + } + case Size128: + if sum := Sum128(input[:i], key); !bytes.Equal(sum[:], expected) { + t.Errorf("Test %d: Sum mismatch: got: %v want: %v", i, hex.EncodeToString(sum[:]), hex.EncodeToString(expected)) + } + } + } +} + +var testVectors128 = []string{ + "c7fe8f9d8f26ed0f6f3e097f765e5633", "a8e7813689a8b0d6b4dc9cebf91d29dc", "04da165a26ad153d68e832dc38560878", "eb0b5f291b62070679ddced90f9ae6bf", + "9ee4ac6db49e392608923139d02a922e", "d82ed186c3bd50323ac2636c90103819", "476589cbb36a476f1910ed376f57de7c", "b4717169ca1f402a6c79029fff031fbe", + "e8520528846de9a1c20aec3bc6f15c69", "b2631ef302212a14cc00505b8cb9851a", "5bbcb6260eb7a1515955a42d3b1f9e92", "5b419a0562039988137d7bc4221fd2be", + "6695af1c5f1f1fcdd4c8f9e08cba18a8", "5761fe12415625a248b8ddb8784ce9b2", "1909ccd1eb2f49bda2415602bc1dcdce", "54afc42ba5372214d7bc266e0b6c79e0", + "ad01a4d5ff604441c8189f01d5a39e02", "62991cc5964b2ac5a05e9b16b178b8ec", "ceeafb118fca40d931d5f816d6463af9", "f5cbc0e50a9dc48a937c1df58dbffd3f", + "a8002d859b276dac46aaeba56b3acd7d", "568af093bd2116f1d5d93d1698c37331", "9ff88cf650e24c0ced981841da3c12b3", "ce519a3ded97ab150e0869914774e27c", + "b845488d191e00cd772daad88bd9d9d0", "793d49a017d6f334167e7f39f604d37d", "b6c6f4a99068b55c4f30676516290813", "c0d15b248b6fda308c74d93f7e8b826f", + "c0124c20490358e01c445fac0cdaf693", "453007a51b7348f67659b64f1197b85f", "06528a7354834f0291097eeb18499a50", "297ca5e865b4e70646d4f5073a5e4152", + "aa4a43c166df8419b9e4b3f95819fc16", "6cc3c6e0af7816119d84a2e59db558f9", "9004fb4084bc3f7736856543d2d56ec9", "41c9b60b71dce391e9aceec10b6a33ea", + "d4d97a5d81e3cf259ec58f828c4fe9f2", "f288c23cb838fbb904ec50f8c8c47974", "8c2b9825c5d5851df4db486fc1b1266e", "e7bd6060bd554e8ad03f8b0599d53421", + "368f7794f98f952a23641de61a2d05e8", "333245bee63a2389b9c0e8d7879ccf3a", "d5c8a97ee2f5584440512aca9bb48f41", "682ad17e83010309e661c83396f61710", + "9095d40447d80d33e4a64b3aadf19d33", "76c5f263a6639356f65ec9e3953d3b36", "3707b98685d0c8ace9284e7d08e8a02b", "20956dc8277ac2392e936051a420b68d", + "2d071a67eb4a6a8ee67ee4101a56d36e", "4ac7beb165d711002e84de6e656e0ed8", "4cc66a932bd615257d8a08d7948708ce", "af236ec152156291efcc23eb94004f26", + "803426970d88211e8610a3d3074865d8", "2d437f09af6ad7393947079de0e117a5", "145ac637f3a4170fd476f9695f21512f", "445e8912da5cfba0d13cf1d1c43d8c56", + "ce469cd800fcc893690e337e94dad5ba", "94561a1d50077c812bacbf2ce76e4d58", "bf53f073af68d691ede0c18376648ef9", "8bcf3c6befe18152d8836016dfc34cbc", + "b9eeaabe6d1bd6aa7b78160c009d96ff", "795847c04fd825432d1c5f90bd19b914", "d1a66baad176a179862b3aa5c520f7f1", "f03e2f021870bd74cb4b5fada894ea3a", + "f2c4d498711fbb98c88f91de7105bce0", +} + +var testVectors256 = []string{ + "f574c8c22a4844dd1f35c713730146d9ff1487b9ccbeaeb3f41d75453123da41", "54825fe4bc41b9ed0fc6ca3def440de2474a32cb9b1b657284e475b24c627320", + "54e4af24dff9df3f73e80a1b1abfc4117a592269cc6951112cb4330d59f60812", "5cd9d10dd7a00a48d0d111697c5e22895a86bb8b6b42a88e22c7e190c3fb3de2", + "dce42b2197c4cfc99b92d2aff69d5fa89e10f41d219fda1f9b4f4d377a27e407", "b385dca466f5b4b44201465eba634bbfe31ddccd688ef415c68580387d58740f", + "b4b9ad860ac74564b6ceb48427fb9ca913dbb2a0409de2da70119d9af26d52b6", "81ad8709a0b166d6376d8ceb38f8f1a430e063d4076e22e96c522c067dd65457", + "c08b76edb005b9f1453afffcf36f97e67897d0d98d51be4f330d1e37ebafa0d9", "81293c0dd7e4d880a1f12464d1bb0ff1d10c3f9dbe2d5ccff273b601f7e8bfc0", + "be62a2e5508ce4ade038fefdb192948e38b8e92f4bb78407cd6d65db74d5410e", "cf071853b977bea138971a6adea797ba1f268e9cef4c27afe8e84cc735b9393e", + "575840e30238ad15a053e839dccb119d25b2313c993eea232e21f4cae3e9d96c", "367cd7b15e6fc901a6951f53c1f967a3b8dcda7c42a3941fd3d53bbf0a00f197", + "418effee1ee915085ddf216efa280c0e745309ed628ead4ee6739d1cda01fd3f", "2e604278700519c146b1018501dbc362c10634fa17adf58547c3fed47bf884c8", + "1fcdb6a189d91af5d97b622ad675f0f7068af279f5d5017e9f4d176ac115d41a", "8e06a42ca8cff419b975923abd4a9d3bc610c0e9ddb000801356214909d58488", + "5d9fab817f6c6d12ee167709c5a3da4e493edda7731512af2dc380aa85ac0190", "fa559114f9beaa063d1ce744414f86dfda64bc60e8bcbafdb61c499247a52bde", + "db9f0735406bfcad656e488e32b787a0ea23465a93a9d14644ee3c0d445c89e3", "dfb3a3ee1dd3f9b533e1060ae224308f20e18f28c8384cf24997d69bcf1d3f70", + "e3ef9447850b3c2ba0ceda9b963f5d1c2eac63a5af6af1817530d0795a1c4423", "6237fd93c7f88a4124f9d761948e6bbc789e1a2a6af26f776eca17d4bfb7a03a", + "c1a355d22aea03cd2a1b9cb5e5fe8501e473974fd438f4d1e4763bf867dd69be", "fba0873887a851f9aee048a5d2317b2cfa6e18b638388044729f21bec78ec7a3", + "088c0dea51f18f958834f6b497897e4b6d38c55143078ec7faee206f557755d9", "0654b07f8017a9298c571f3584f81833faa7f6f66eea24ddffae975e469343e7", + "cb6c5e9380082498da979fb071d2d01f83b100274786e7561778749ff9491629", "56c554704f95d41beb6c597cff2edbff5b6bab1b9ac66a7c53c17f537076030f", + "9874599788e32588c13263afebf67c6417c928dc03d92b55abc5bf002c63d772", "4d641a6076e28068dab70fb1208b72b36ed110060612bdd0f22e4533ef14ef8a", + "fec3a139908ce3bc8912c1a32663d542a9aefc64f79555e3995a47c96b3cb0c9", "e5a634f0cb1501f6d046cebf75ea366c90597282d3c8173b357a0011eda2da7e", + "a2def9ed59e926130c729f73016877c42ff662d70f506951ab29250ad9d00d8a", "d442d403d549519344d1da0213b46bffec369dcd12b09c333022cc9e61531de6", + "96b650aa88c88b52fce18460a3ecaeb8763424c01e1558a144ec7c09ad4ac102", "27c31722a788d6be3f8760f71451e61ea602307db3265c3fb997156395e8f2dd", + "ad510b2bcf21dbe76cabb0f42463fcfa5b9c2dc2447285b09c84051e8d88adf0", "00cb4dcd93975105eb7d0663314a593c349e11cf1a0875ac94b05c809762c85a", + "9e77b5228c8d2209847e6b51b24d6419a04131f8abc8922b9193e125d75a787f", "4ba7d0465d2ec459646003ca653ca55eb4ae35b66b91a948d4e9543f14dfe6ba", + "e3d0036d6923b65e92a01db4bc783dd50db1f652dc4823fe118c2c6357248064", "8154b8c4b21bb643a1807e71258c31c67d689c6f4d7f4a8c7c1d4035e01702bd", + "374c824357ca517f3a701db15e4d4cb069f3f6cb1e1e514de2565421ea7567d6", "cc457ef8ee09b439b379fc59c4e8b852248c85d1180992444901ee5e647bf080", + "14d59abed19486cee73668522690a1bf7d2a90e4f6fda41efee196d658440c38", "a4a023f88be189d1d7a701e53b353b1f84282ee0b4774fa20c18f9746f64947e", + "48ec25d335c6f8af0b8d0314a40a2e2c6774441a617fd34e8914503be338ec39", "97f1835fadfd2b2acc74f2be6e3e3d0155617277043c56e17e0332e95d8a5af1", + "326312c81ef9d1d511ffb1f99b0b111032601c5426ab75a15215702857dcba87", "842808d82ca9b5c7fbee2e1bb62aa6dd2f73aefeec82988ffb4f1fc05cbd386b", + "f0323d7375f26ecf8b7dbfa22d82f0a36a4012f535744e302d17b3ebefe3280b", "dbe9b20107f898e628888a9a812aae66c9f2b8c92490ea14a4b53e52706141a7", + "b7ed07e3877e913ac15244e3dadeb41770cc11e762f189f60edd9c78fe6bce29", "8e5d15cbd83aff0ea244084cad9ecd47eb21fee60ee4c846510a34f05dc2f3de", + "4dd0822be686fd036d131707600dab32897a852b830e2b68b1393744f1e38c13", "02f9d7c454c7772feabfadd9a9e053100ae74a546863e658ca83dd729c828ac4", + "9fa066e419eb00f914d3c7a8019ebe3171f408cab8c6fe3afbe7ff870febc0b8", "fb8e3cbe8f7d27db7ba51ae17768ce537d7e9a0dd2949c71c93c459263b545b3", + "c9f2a4db3b9c6337c86d4636b3e795608ab8651e7949803ad57c92e5cd88c982", "e44a2314a7b11f6b7e46a65b252e562075d6f3402d892b3e68d71ee4fbe30cf4", + "2ac987b2b11ce18e6d263df6efaac28f039febe6873464667368d5e81da98a57", "67eb3a6a26f8b1f5dd1aec4dbe40b083aefb265b63c8e17f9fd7fede47a4a3f4", + "7524c16affe6d890f2c1da6e192a421a02b08e1ffe65379ebecf51c3c4d7bdc1", +} + +func benchmarkWrite(size int64, b *testing.B) { + var key [32]byte + data := make([]byte, size) + + h := New128(key) + b.SetBytes(size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = h.Write(data) + } +} + +func BenchmarkWrite_8(b *testing.B) { benchmarkWrite(8, b) } +func BenchmarkWrite_16(b *testing.B) { benchmarkWrite(16, b) } +func BenchmarkWrite_64(b *testing.B) { benchmarkWrite(64, b) } +func BenchmarkWrite_1K(b *testing.B) { benchmarkWrite(1024, b) } +func BenchmarkWrite_8K(b *testing.B) { benchmarkWrite(8*1024, b) } + +func benchmarkSum256(size int64, b *testing.B) { + var key [32]byte + data := make([]byte, size) + + b.SetBytes(size) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Sum(data, key[:]) + } +} + +func BenchmarkSum256_8(b *testing.B) { benchmarkSum256(8, b) } +func BenchmarkSum256_16(b *testing.B) { benchmarkSum256(16, b) } +func BenchmarkSum256_64(b *testing.B) { benchmarkSum256(64, b) } +func BenchmarkSum256_1K(b *testing.B) { benchmarkSum256(1024, b) } +func BenchmarkSum256_8K(b *testing.B) { benchmarkSum256(8*1024, b) } +func BenchmarkSum256_1M(b *testing.B) { benchmarkSum256(1024*1024, b) } +func BenchmarkSum256_5M(b *testing.B) { benchmarkSum256(5*1024*1024, b) } +func BenchmarkSum256_10M(b *testing.B) { benchmarkSum256(10*1024*1024, b) } +func BenchmarkSum256_25M(b *testing.B) { benchmarkSum256(25*1024*1024, b) } + +func benchmarkParallel(b *testing.B, size int) { + + c := runtime.GOMAXPROCS(0) + + var key [32]byte + + rng := rand.New(rand.NewSource(0xabadc0cac01a)) + data := make([][]byte, c) + for i := range data { + data[i] = make([]byte, size) + rng.Read(data[i]) + } + + b.SetBytes(int64(size)) + b.ResetTimer() + + counter := uint64(0) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + index := atomic.AddUint64(&counter, 1) + Sum(data[int(index)%len(data)], key[:]) + } + }) +} + +func BenchmarkParallel_1M(b *testing.B) { benchmarkParallel(b, 1024*1024) } +func BenchmarkParallel_5M(b *testing.B) { benchmarkParallel(b, 5*1024*1024) } +func BenchmarkParallel_10M(b *testing.B) { benchmarkParallel(b, 10*1024*1024) } +func BenchmarkParallel_25M(b *testing.B) { benchmarkParallel(b, 25*1024*1024) } diff --git a/go/vt/vthash/metro/metro.go b/go/vt/vthash/metro/metro.go index 76482408fef..66214713604 100644 --- a/go/vt/vthash/metro/metro.go +++ b/go/vt/vthash/metro/metro.go @@ -21,6 +21,7 @@ package metro import ( "encoding/binary" "math/bits" + "unsafe" ) const k0 = 0xC83A91E1 @@ -69,6 +70,10 @@ func (m *Metro128) Write64(u uint64) { _, _ = m.Write(scratch[:8]) } +func (m *Metro128) WriteString(str string) (int, error) { + return m.Write(unsafe.Slice(unsafe.StringData(str), len(str))) +} + func (m *Metro128) Write(buffer []byte) (int, error) { ptr := buffer diff --git a/go/vt/vtorc/collection/collection.go b/go/vt/vtorc/collection/collection.go index cfc8116c9c5..0ef9a71b9a3 100644 --- a/go/vt/vtorc/collection/collection.go +++ b/go/vt/vtorc/collection/collection.go @@ -97,13 +97,6 @@ func init() { namedCollection = make(map[string](*Collection)) } -// StopMonitoring stops monitoring all the collections -func StopMonitoring() { - for _, q := range namedCollection { - q.StopAutoExpiration() - } -} - // CreateOrReturnCollection allows for creation of a new collection or // returning a pointer to an existing one given the name. This allows access // to the data structure from the api interface (http/api.go) and also when writing (inst). diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go index 3d3dde96034..0d9a8d5b3d7 100644 --- a/go/vt/vtorc/config/config.go +++ b/go/vt/vtorc/config/config.go @@ -36,20 +36,14 @@ var configurationLoaded = make(chan bool) const ( HealthPollSeconds = 1 ActiveNodeExpireSeconds = 5 - MaintenanceOwner = "vtorc" AuditPageSize = 20 - MaintenancePurgeDays = 7 - MaintenanceExpireMinutes = 10 DebugMetricsIntervalSeconds = 10 StaleInstanceCoordinatesExpireSeconds = 60 DiscoveryMaxConcurrency = 300 // Number of goroutines doing hosts discovery DiscoveryQueueCapacity = 100000 DiscoveryQueueMaxStatisticsSize = 120 DiscoveryCollectionRetentionSeconds = 120 - HostnameResolveMethod = "default" UnseenInstanceForgetHours = 240 // Number of hours after which an unseen instance is forgotten - ExpiryHostnameResolvesMinutes = 60 // Number of minutes after which to expire hostname-resolves - CandidateInstanceExpireMinutes = 60 // Minutes after which a suggestion to use an instance as a candidate replica (to be preferably promoted on primary failover) is expired. FailureDetectionPeriodBlockMinutes = 60 // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. ) @@ -67,6 +61,8 @@ var ( waitReplicasTimeout = 30 * time.Second topoInformationRefreshDuration = 15 * time.Second recoveryPollDuration = 1 * time.Second + ersEnabled = true + convertTabletsWithErrantGTIDs = false ) // RegisterFlags registers the flags required by VTOrc @@ -86,6 +82,8 @@ func RegisterFlags(fs *pflag.FlagSet) { fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs") fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server") fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery") + fs.BoolVar(&ersEnabled, "allow-emergency-reparent", ersEnabled, "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary") + fs.BoolVar(&convertTabletsWithErrantGTIDs, "change-tablets-with-errant-gtid-to-drained", convertTabletsWithErrantGTIDs, "Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED") } // Configuration makes for vtorc configuration input, which can be provided by user via JSON formatted file. @@ -137,6 +135,26 @@ func UpdateConfigValuesFromFlags() { Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second) } +// ERSEnabled reports whether VTOrc is allowed to run ERS or not. +func ERSEnabled() bool { + return ersEnabled +} + +// SetERSEnabled sets the value for the ersEnabled variable. This should only be used from tests. +func SetERSEnabled(val bool) { + ersEnabled = val +} + +// ConvertTabletWithErrantGTIDs reports whether VTOrc is allowed to change the tablet type of tablets with errant GTIDs to DRAINED. +func ConvertTabletWithErrantGTIDs() bool { + return convertTabletsWithErrantGTIDs +} + +// SetConvertTabletWithErrantGTIDs sets the value for the convertTabletWithErrantGTIDs variable. This should only be used from tests. +func SetConvertTabletWithErrantGTIDs(val bool) { + convertTabletsWithErrantGTIDs = val +} + // LogConfigValues is used to log the config values. func LogConfigValues() { b, _ := json.MarshalIndent(Config, "", "\t") diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go index 04150339c5c..d565c9bbdc4 100644 --- a/go/vt/vtorc/db/db.go +++ b/go/vt/vtorc/db/db.go @@ -72,7 +72,7 @@ func translateStatement(statement string) string { return sqlutils.ToSqlite3Dialect(statement) } -// registerVTOrcDeployment updates the vtorc_metadata table upon successful deployment +// registerVTOrcDeployment updates the vtorc_db_deployments table upon successful deployment func registerVTOrcDeployment(db *sql.DB) error { query := ` replace into vtorc_db_deployments ( @@ -82,7 +82,7 @@ func registerVTOrcDeployment(db *sql.DB) error { ) ` if _, err := execInternal(db, query, ""); err != nil { - log.Fatalf("Unable to write to vtorc_metadata: %+v", err) + log.Fatalf("Unable to write to vtorc_db_deployments: %+v", err) } return nil } @@ -153,14 +153,11 @@ func execInternal(db *sql.DB, query string, args ...any) (sql.Result, error) { // ExecVTOrc will execute given query on the vtorc backend database. func ExecVTOrc(query string, args ...any) (sql.Result, error) { - var err error - query = translateStatement(query) db, err := OpenVTOrc() if err != nil { return nil, err } - res, err := sqlutils.ExecNoPrepare(db, query, args...) - return res, err + return execInternal(db, query, args...) } // QueryVTOrcRowsMap @@ -188,15 +185,3 @@ func QueryVTOrc(query string, argsArray []any, onRow func(sqlutils.RowMap) error return err } - -// ReadTimeNow reads and returns the current timestamp as string. This is an unfortunate workaround -// to support both MySQL and SQLite in all possible timezones. SQLite only speaks UTC where MySQL has -// timezone support. By reading the time as string we get the database's de-facto notion of the time, -// which we can then feed back to it. -func ReadTimeNow() (timeNow string, err error) { - err = QueryVTOrc(`select now() as time_now`, nil, func(m sqlutils.RowMap) error { - timeNow = m.GetString("time_now") - return nil - }) - return timeNow, err -} diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go index 88b26ed2c27..73238802920 100644 --- a/go/vt/vtorc/db/generate_base.go +++ b/go/vt/vtorc/db/generate_base.go @@ -23,6 +23,7 @@ DROP TABLE IF EXISTS database_instance `, ` CREATE TABLE database_instance ( + alias varchar(256) NOT NULL, hostname varchar(128) NOT NULL, port smallint NOT NULL, last_checked timestamp not null default (''), @@ -67,7 +68,6 @@ CREATE TABLE database_instance ( has_replication_credentials TINYint not null default 0, allow_tls TINYint not null default 0, semi_sync_enforced TINYint not null default 0, - instance_alias varchar(128) not null default '', version_comment varchar(128) NOT NULL DEFAULT '', major_version varchar(16) not null default '', binlog_row_image varchar(16) not null default '', @@ -87,14 +87,7 @@ CREATE TABLE database_instance ( semi_sync_primary_status TINYint NOT NULL DEFAULT 0, semi_sync_replica_status TINYint NOT NULL DEFAULT 0, semi_sync_primary_clients int NOT NULL DEFAULT 0, - replication_group_name VARCHAR(64) NOT NULL DEFAULT '', - replication_group_is_single_primary_mode TINYint NOT NULL DEFAULT 1, - replication_group_member_state VARCHAR(16) NOT NULL DEFAULT '', - replication_group_member_role VARCHAR(16) NOT NULL DEFAULT '', - replication_group_members text not null default '', - replication_group_primary_host varchar(128) NOT NULL DEFAULT '', - replication_group_primary_port smallint NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) + PRIMARY KEY (alias) )`, ` CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checked) @@ -103,48 +96,6 @@ CREATE INDEX last_checked_idx_database_instance ON database_instance(last_checke CREATE INDEX last_seen_idx_database_instance ON database_instance(last_seen) `, ` -DROP TABLE IF EXISTS database_instance_maintenance -`, - ` -CREATE TABLE database_instance_maintenance ( - database_instance_maintenance_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - maintenance_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) NOT NULL, - reason text NOT NULL, - processing_node_hostname varchar(128) not null default '', - processing_node_token varchar(128) not null default '', - explicitly_bounded TINYint not null default 0, - PRIMARY KEY (database_instance_maintenance_id) -)`, - ` -CREATE UNIQUE INDEX maintenance_uidx_database_instance_maintenance ON database_instance_maintenance (maintenance_active, hostname, port) - `, - ` -DROP TABLE IF EXISTS database_instance_long_running_queries -`, - ` -CREATE TABLE database_instance_long_running_queries ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - process_id bigint(20) NOT NULL, - process_started_at timestamp not null default (''), - process_user varchar(16) NOT NULL, - process_host varchar(128) NOT NULL, - process_db varchar(128) NOT NULL, - process_command varchar(16) NOT NULL, - process_time_seconds int(11) NOT NULL, - process_state varchar(128) NOT NULL, - process_info varchar(1024) NOT NULL, - PRIMARY KEY (hostname,port,process_id) -)`, - ` -CREATE INDEX process_started_at_idx_database_instance_long_running_queries ON database_instance_long_running_queries (process_started_at) - `, - ` DROP TABLE IF EXISTS audit `, ` @@ -152,8 +103,7 @@ CREATE TABLE audit ( audit_id integer, audit_timestamp timestamp not null default (''), audit_type varchar(128) NOT NULL, - hostname varchar(128) NOT NULL DEFAULT '', - port smallint NOT NULL, + alias varchar(256) NOT NULL, message text NOT NULL, keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, @@ -163,91 +113,7 @@ CREATE TABLE audit ( CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp) `, ` -CREATE INDEX host_port_idx_audit ON audit (hostname, port, audit_timestamp) - `, - ` -DROP TABLE IF EXISTS host_agent -`, - ` -CREATE TABLE host_agent ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - token varchar(128) NOT NULL, - last_submitted timestamp not null default (''), - last_checked timestamp NULL DEFAULT NULL, - last_seen timestamp NULL DEFAULT NULL, - mysql_port smallint DEFAULT NULL, - count_mysql_snapshots smallint NOT NULL, - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX token_idx_host_agent ON host_agent (token) - `, - ` -CREATE INDEX last_submitted_idx_host_agent ON host_agent (last_submitted) - `, - ` -CREATE INDEX last_checked_idx_host_agent ON host_agent (last_checked) - `, - ` -CREATE INDEX last_seen_idx_host_agent ON host_agent (last_seen) - `, - ` -DROP TABLE IF EXISTS agent_seed -`, - ` -CREATE TABLE agent_seed ( - agent_seed_id integer, - target_hostname varchar(128) NOT NULL, - source_hostname varchar(128) NOT NULL, - start_timestamp timestamp not null default (''), - end_timestamp timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - is_complete tinyint NOT NULL DEFAULT '0', - is_successful tinyint NOT NULL DEFAULT '0', - PRIMARY KEY (agent_seed_id) -)`, - ` -CREATE INDEX target_hostname_idx_agent_seed ON agent_seed (target_hostname,is_complete) - `, - ` -CREATE INDEX source_hostname_idx_agent_seed ON agent_seed (source_hostname,is_complete) - `, - ` -CREATE INDEX start_timestamp_idx_agent_seed ON agent_seed (start_timestamp) - `, - ` -CREATE INDEX is_complete_idx_agent_seed ON agent_seed (is_complete,start_timestamp) - `, - ` -CREATE INDEX is_successful_idx_agent_seed ON agent_seed (is_successful, start_timestamp) - `, - ` -DROP TABLE IF EXISTS agent_seed_state -`, - ` -CREATE TABLE agent_seed_state ( - agent_seed_state_id integer, - agent_seed_id int NOT NULL, - state_timestamp timestamp not null default (''), - state_action varchar(127) NOT NULL, - error_message varchar(255) NOT NULL, - PRIMARY KEY (agent_seed_state_id) -)`, - ` -CREATE INDEX agent_seed_idx_agent_seed_state ON agent_seed_state (agent_seed_id, state_timestamp) - `, - ` -DROP TABLE IF EXISTS hostname_resolve -`, - ` -CREATE TABLE hostname_resolve ( - hostname varchar(128) NOT NULL, - resolved_hostname varchar(128) NOT NULL, - resolved_timestamp timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX resolved_timestamp_idx_hostname_resolve ON hostname_resolve (resolved_timestamp) +CREATE INDEX alias_idx_audit ON audit (alias, audit_timestamp) `, ` DROP TABLE IF EXISTS active_node @@ -283,16 +149,14 @@ DROP TABLE IF EXISTS topology_recovery ` CREATE TABLE topology_recovery ( recovery_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, in_active_period tinyint NOT NULL DEFAULT 0, start_active_period timestamp not null default (''), end_active_period_unixtime int, end_recovery timestamp NULL DEFAULT NULL, processing_node_hostname varchar(128) NOT NULL, processcing_node_token varchar(128) NOT NULL, - successor_hostname varchar(128) DEFAULT NULL, - successor_port smallint DEFAULT NULL, + successor_alias varchar(256) DEFAULT NULL, analysis varchar(128) not null default '', keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, @@ -301,12 +165,9 @@ CREATE TABLE topology_recovery ( acknowledged TINYint NOT NULL DEFAULT 0, acknowledged_by varchar(128) not null default '', acknowledge_comment text not null default '', - participating_instances text not null default '', - lost_replicas text not null default '', all_errors text not null default '', acknowledged_at TIMESTAMP NULL, last_detection_id bigint not null default 0, - successor_alias varchar(128) DEFAULT NULL, uid varchar(128) not null default '', PRIMARY KEY (recovery_id) )`, @@ -317,20 +178,7 @@ CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery ( CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period) `, ` -CREATE UNIQUE INDEX hostname_port_active_period_uidx_topology_recovery ON topology_recovery (hostname, port, in_active_period, end_active_period_unixtime) - `, - ` -DROP TABLE IF EXISTS hostname_unresolve -`, - ` -CREATE TABLE hostname_unresolve ( - hostname varchar(128) NOT NULL, - unresolved_hostname varchar(128) NOT NULL, - last_registered timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -CREATE INDEX unresolved_hostname_idx_hostname_unresolve ON hostname_unresolve (unresolved_hostname) +CREATE UNIQUE INDEX alias_active_period_uidx_topology_recovery ON topology_recovery (alias, in_active_period, end_active_period_unixtime) `, ` DROP TABLE IF EXISTS database_instance_topology_history @@ -338,6 +186,7 @@ DROP TABLE IF EXISTS database_instance_topology_history ` CREATE TABLE database_instance_topology_history ( snapshot_unix_timestamp int NOT NULL, + alias varchar(256) NOT NULL, hostname varchar(128) NOT NULL, port smallint NOT NULL, source_host varchar(128) NOT NULL, @@ -345,7 +194,7 @@ CREATE TABLE database_instance_topology_history ( keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, version varchar(128) not null default '', - PRIMARY KEY (snapshot_unix_timestamp, hostname, port) + PRIMARY KEY (snapshot_unix_timestamp, alias) )`, ` CREATE INDEX keyspace_shard_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, keyspace, shard) @@ -355,38 +204,22 @@ DROP TABLE IF EXISTS candidate_database_instance `, ` CREATE TABLE candidate_database_instance ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, last_suggested timestamp not null default (''), priority TINYINT SIGNED NOT NULL DEFAULT 1, promotion_rule text check(promotion_rule in ('must', 'prefer', 'neutral', 'prefer_not', 'must_not')) NOT NULL DEFAULT 'neutral', - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested) `, ` -DROP TABLE IF EXISTS database_instance_downtime -`, - ` -CREATE TABLE database_instance_downtime ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - downtime_active tinyint(4) DEFAULT NULL, - begin_timestamp timestamp default (''), - end_timestamp timestamp NULL DEFAULT NULL, - owner varchar(128) NOT NULL, - reason text NOT NULL, - PRIMARY KEY (hostname, port) -)`, - ` DROP TABLE IF EXISTS topology_failure_detection `, ` CREATE TABLE topology_failure_detection ( detection_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, in_active_period tinyint NOT NULL DEFAULT '0', start_active_period timestamp not null default (''), end_active_period_unixtime int NOT NULL, @@ -403,100 +236,17 @@ CREATE TABLE topology_failure_detection ( CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period) `, ` -DROP TABLE IF EXISTS hostname_resolve_history -`, - ` -CREATE TABLE hostname_resolve_history ( - resolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - resolved_timestamp timestamp not null default (''), - PRIMARY KEY (resolved_hostname) -)`, - ` -CREATE INDEX hostname_idx_hostname_resolve_history ON hostname_resolve_history (hostname) - `, - ` -CREATE INDEX resolved_timestamp_idx_hostname_resolve_history ON hostname_resolve_history (resolved_timestamp) - `, - ` -DROP TABLE IF EXISTS hostname_unresolve_history -`, - ` -CREATE TABLE hostname_unresolve_history ( - unresolved_hostname varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - last_registered timestamp not null default (''), - PRIMARY KEY (unresolved_hostname) -)`, - ` -CREATE INDEX hostname_idx_hostname_unresolve_history ON hostname_unresolve_history (hostname) - `, - ` -CREATE INDEX last_registered_idx_hostname_unresolve_history ON hostname_unresolve_history (last_registered) - `, - ` -DROP TABLE IF EXISTS primary_position_equivalence -`, - ` -CREATE TABLE primary_position_equivalence ( - equivalence_id integer, - primary1_hostname varchar(128) NOT NULL, - primary1_port smallint NOT NULL, - primary1_binary_log_file varchar(128) NOT NULL, - primary1_binary_log_pos bigint NOT NULL, - primary2_hostname varchar(128) NOT NULL, - primary2_port smallint NOT NULL, - primary2_binary_log_file varchar(128) NOT NULL, - primary2_binary_log_pos bigint NOT NULL, - last_suggested timestamp not null default (''), - PRIMARY KEY (equivalence_id) -)`, - ` -CREATE UNIQUE INDEX equivalence_uidx_primary_position_equivalence ON primary_position_equivalence (primary1_hostname, primary1_port, primary1_binary_log_file, primary1_binary_log_pos, primary2_hostname, primary2_port) - `, - ` -CREATE INDEX primary2_idx_primary_position_equivalence ON primary_position_equivalence (primary2_hostname, primary2_port, primary2_binary_log_file, primary2_binary_log_pos) - `, - ` -CREATE INDEX last_suggested_idx_primary_position_equivalence ON primary_position_equivalence (last_suggested) - `, - ` -DROP TABLE IF EXISTS async_request -`, - ` -CREATE TABLE async_request ( - request_id integer, - command varchar(128) not null, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - destination_hostname varchar(128) NOT NULL, - destination_port smallint NOT NULL, - pattern text NOT NULL, - gtid_hint varchar(32) not null, - begin_timestamp timestamp NULL DEFAULT NULL, - end_timestamp timestamp NULL DEFAULT NULL, - story text NOT NULL, - PRIMARY KEY (request_id) -)`, - ` -CREATE INDEX begin_timestamp_idx_async_request ON async_request (begin_timestamp) - `, - ` -CREATE INDEX end_timestamp_idx_async_request ON async_request (end_timestamp) - `, - ` DROP TABLE IF EXISTS blocked_topology_recovery `, ` CREATE TABLE blocked_topology_recovery ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, analysis varchar(128) NOT NULL, last_blocked_timestamp timestamp not null default (''), blocking_recovery_id bigint, - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX keyspace_shard_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (keyspace, shard, last_blocked_timestamp) @@ -506,11 +256,10 @@ DROP TABLE IF EXISTS database_instance_last_analysis `, ` CREATE TABLE database_instance_last_analysis ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, analysis_timestamp timestamp not null default (''), analysis varchar(128) NOT NULL, - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX analysis_timestamp_idx_database_instance_last_analysis ON database_instance_last_analysis (analysis_timestamp) @@ -521,8 +270,7 @@ DROP TABLE IF EXISTS database_instance_analysis_changelog ` CREATE TABLE database_instance_analysis_changelog ( changelog_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, analysis_timestamp timestamp not null default (''), analysis varchar(128) NOT NULL, PRIMARY KEY (changelog_id) @@ -551,76 +299,6 @@ CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (f CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token) `, ` -DROP TABLE IF EXISTS database_instance_coordinates_history -`, - ` -CREATE TABLE database_instance_coordinates_history ( - history_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - recorded_timestamp timestamp not null default (''), - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, - relay_log_file varchar(128) NOT NULL, - relay_log_pos bigint NOT NULL, - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) -)`, - ` -CREATE INDEX hostname_port_recorded_idx_database_instance_coordinates_history ON database_instance_coordinates_history (hostname, port, recorded_timestamp) - `, - ` -CREATE INDEX recorded_timestmp_idx_database_instance_coordinates_history ON database_instance_coordinates_history (recorded_timestamp) - `, - ` -DROP TABLE IF EXISTS database_instance_binlog_files_history -`, - ` -CREATE TABLE database_instance_binlog_files_history ( - history_id integer, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - binary_log_file varchar(128) NOT NULL, - binary_log_pos bigint NOT NULL, - first_seen timestamp not null default (''), - last_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (history_id) -)`, - ` -CREATE UNIQUE INDEX hostname_port_file_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (hostname, port, binary_log_file) - `, - ` -CREATE INDEX last_seen_idx_database_instance_binlog_files_history ON database_instance_binlog_files_history (last_seen) - `, - ` -DROP TABLE IF EXISTS database_instance_recent_relaylog_history -`, - ` -CREATE TABLE database_instance_recent_relaylog_history ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - current_relay_log_file varchar(128) NOT NULL, - current_relay_log_pos bigint NOT NULL, - current_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - prev_relay_log_file varchar(128) NOT NULL, - prev_relay_log_pos bigint NOT NULL, - prev_seen timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (hostname, port) -)`, - ` -CREATE INDEX current_seen_idx_database_instance_recent_relaylog_history ON database_instance_recent_relaylog_history (current_seen) - `, - ` -DROP TABLE IF EXISTS vtorc_metadata -`, - ` -CREATE TABLE vtorc_metadata ( - anchor tinyint NOT NULL, - last_deployed_version varchar(128) NOT NULL, - last_deployed_timestamp timestamp NOT NULL, - PRIMARY KEY (anchor) -)`, - ` DROP TABLE IF EXISTS vtorc_db_deployments `, ` @@ -649,102 +327,15 @@ CREATE TABLE topology_recovery_steps ( PRIMARY KEY (recovery_step_id) )`, ` -DROP TABLE IF EXISTS raft_store -`, - ` -CREATE TABLE raft_store ( - store_id integer, - store_key varbinary(512) not null, - store_value blob not null, - PRIMARY KEY (store_id) -)`, - ` -CREATE INDEX store_key_idx_raft_store ON raft_store (store_key) - `, - ` -DROP TABLE IF EXISTS raft_log -`, - ` -CREATE TABLE raft_log ( - log_index integer, - term bigint not null, - log_type int not null, - data blob not null, - PRIMARY KEY (log_index) -)`, - ` -DROP TABLE IF EXISTS raft_snapshot -`, - ` -CREATE TABLE raft_snapshot ( - snapshot_id integer, - snapshot_name varchar(128) NOT NULL, - snapshot_meta varchar(4096) NOT NULL, - created_at timestamp not null default (''), - PRIMARY KEY (snapshot_id) -)`, - ` -CREATE UNIQUE INDEX snapshot_name_uidx_raft_snapshot ON raft_snapshot (snapshot_name) - `, - ` -DROP TABLE IF EXISTS database_instance_peer_analysis -`, - ` -CREATE TABLE database_instance_peer_analysis ( - peer varchar(128) NOT NULL, - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - analysis_timestamp timestamp not null default (''), - analysis varchar(128) NOT NULL, - PRIMARY KEY (peer, hostname, port) -)`, - ` -DROP TABLE IF EXISTS database_instance_tls -`, - ` -CREATE TABLE database_instance_tls ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - required tinyint NOT NULL DEFAULT 0, - PRIMARY KEY (hostname,port) -)`, - ` -DROP TABLE IF EXISTS hostname_ips -`, - ` -CREATE TABLE hostname_ips ( - hostname varchar(128) NOT NULL, - ipv4 varchar(128) NOT NULL, - ipv6 varchar(128) NOT NULL, - last_updated timestamp not null default (''), - PRIMARY KEY (hostname) -)`, - ` -DROP TABLE IF EXISTS database_instance_tags -`, - ` -CREATE TABLE database_instance_tags ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, - tag_name varchar(128) NOT NULL, - tag_value varchar(128) NOT NULL, - last_updated timestamp not null default (''), - PRIMARY KEY (hostname, port, tag_name) -)`, - ` -CREATE INDEX tag_name_idx_database_instance_tags ON database_instance_tags (tag_name) - `, - ` DROP TABLE IF EXISTS database_instance_stale_binlog_coordinates `, ` CREATE TABLE database_instance_stale_binlog_coordinates ( - hostname varchar(128) NOT NULL, - port smallint NOT NULL, + alias varchar(256) NOT NULL, binary_log_file varchar(128) NOT NULL, binary_log_pos bigint NOT NULL, first_seen timestamp not null default (''), - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX first_seen_idx_database_instance_stale_binlog_coordinates ON database_instance_stale_binlog_coordinates (first_seen) @@ -763,8 +354,7 @@ CREATE TABLE vitess_tablet ( tablet_type smallint(5) NOT NULL, primary_timestamp timestamp NOT NULL, info varchar(512) NOT NULL, - UNIQUE (alias), - PRIMARY KEY (hostname, port) + PRIMARY KEY (alias) )`, ` CREATE INDEX cell_idx_vitess_tablet ON vitess_tablet (cell) @@ -783,16 +373,18 @@ CREATE TABLE vitess_keyspace ( PRIMARY KEY (keyspace) )`, ` -CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port) - `, - ` -CREATE INDEX active_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, begin_timestamp) - `, +DROP TABLE IF EXISTS vitess_shard +`, ` -CREATE INDEX active_end_timestamp_idx_database_instance_maintenance on database_instance_maintenance (maintenance_active, end_timestamp) - `, +CREATE TABLE vitess_shard ( + keyspace varchar(128) NOT NULL, + shard varchar(128) NOT NULL, + primary_alias varchar(512) NOT NULL, + primary_timestamp varchar(512) NOT NULL, + PRIMARY KEY (keyspace, shard) +)`, ` -CREATE INDEX last_registered_idx_hostname_unresolve on hostname_unresolve (last_registered) +CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port) `, ` CREATE INDEX keyspace_shard_in_active_idx_topology_recovery on topology_recovery (keyspace, shard, in_active_period) @@ -807,7 +399,7 @@ CREATE INDEX acknowledged_idx_topology_recovery on topology_recovery (acknowledg CREATE INDEX last_blocked_idx_blocked_topology_recovery on blocked_topology_recovery (last_blocked_timestamp) `, ` -CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (hostname, port, analysis_timestamp) +CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (alias, analysis_timestamp) `, ` CREATE INDEX last_detection_idx_topology_recovery on topology_recovery (last_detection_id) @@ -822,9 +414,6 @@ CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid) CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid) `, ` -CREATE INDEX end_timestamp_idx_database_instance_downtime ON database_instance_downtime(end_timestamp) - `, - ` -CREATE UNIQUE INDEX host_port_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (hostname, port, in_active_period, end_active_period_unixtime, is_actionable) +CREATE UNIQUE INDEX alias_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (alias, in_active_period, end_active_period_unixtime, is_actionable) `, } diff --git a/go/vt/vtorc/discovery/aggregated.go b/go/vt/vtorc/discovery/aggregated.go index 179d4512d19..37d965fa51c 100644 --- a/go/vt/vtorc/discovery/aggregated.go +++ b/go/vt/vtorc/discovery/aggregated.go @@ -25,7 +25,7 @@ import ( ) // AggregatedDiscoveryMetrics contains aggregated metrics for instance discovery. -// Called from api/discovery-metrics-aggregated/:seconds +// Called from api/discovery-metrics-aggregated?seconds=xxx type AggregatedDiscoveryMetrics struct { FirstSeen time.Time // timestamp of the first data seen LastSeen time.Time // timestamp of the last data seen @@ -34,6 +34,7 @@ type AggregatedDiscoveryMetrics struct { CountDistinctFailedInstanceKeys int // number of distinct Instances which failed FailedDiscoveries uint64 // number of failed discoveries SuccessfulDiscoveries uint64 // number of successful discoveries + InstancePollSecondsExceeded uint64 // number of times discoverInstance exceeded InstancePollSeconds MeanTotalSeconds float64 MeanBackendSeconds float64 MeanInstanceSeconds float64 @@ -75,17 +76,18 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { type hostKey string type timerKey string const ( - FailedDiscoveries counterKey = "FailedDiscoveries" - Discoveries = "Discoveries" - InstanceKeys hostKey = "InstanceKeys" - OkInstanceKeys = "OkInstanceKeys" - FailedInstanceKeys = "FailedInstanceKeys" - TotalSeconds timerKey = "TotalSeconds" - BackendSeconds = "BackendSeconds" - InstanceSeconds = "InstanceSeconds" - FailedTotalSeconds = "FailedTotalSeconds" - FailedBackendSeconds = "FailedBackendSeconds" - FailedInstanceSeconds = "FailedInstanceSeconds" + FailedDiscoveries counterKey = "FailedDiscoveries" + Discoveries = "Discoveries" + InstancePollSecondsExceeded = "instancePollSecondsExceeded" + InstanceKeys hostKey = "InstanceKeys" + OkInstanceKeys = "OkInstanceKeys" + FailedInstanceKeys = "FailedInstanceKeys" + TotalSeconds timerKey = "TotalSeconds" + BackendSeconds = "BackendSeconds" + InstanceSeconds = "InstanceSeconds" + FailedTotalSeconds = "FailedTotalSeconds" + FailedBackendSeconds = "FailedBackendSeconds" + FailedInstanceSeconds = "FailedInstanceSeconds" ) counters := make(map[counterKey]uint64) // map of string based counters @@ -93,7 +95,7 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { timings := make(map[timerKey]stats.Float64Data) // map of string based float64 values // initialise counters - for _, v := range []counterKey{FailedDiscoveries, Discoveries} { + for _, v := range []counterKey{FailedDiscoveries, Discoveries, InstancePollSecondsExceeded} { counters[v] = 0 } // initialise names @@ -119,18 +121,18 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { // different names x := names[InstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[InstanceKeys] = x if v.Err == nil { // ok names x := names[OkInstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[OkInstanceKeys] = x } else { // failed names x := names[FailedInstanceKeys] - x[v.InstanceKey.String()] = 1 // Value doesn't matter + x[v.TabletAlias] = 1 // Value doesn't matter names[FailedInstanceKeys] = x } @@ -140,6 +142,8 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { counters[FailedDiscoveries]++ } + counters[InstancePollSecondsExceeded] += v.InstancePollSecondsDurationCount + // All timings timings[TotalSeconds] = append(timings[TotalSeconds], v.TotalLatency.Seconds()) timings[BackendSeconds] = append(timings[BackendSeconds], v.BackendLatency.Seconds()) @@ -161,6 +165,7 @@ func aggregate(results []collection.Metric) AggregatedDiscoveryMetrics { CountDistinctFailedInstanceKeys: len(names[FailedInstanceKeys]), FailedDiscoveries: counters[FailedDiscoveries], SuccessfulDiscoveries: counters[Discoveries], + InstancePollSecondsExceeded: counters[InstancePollSecondsExceeded], MeanTotalSeconds: mean(timings[TotalSeconds]), MeanBackendSeconds: mean(timings[BackendSeconds]), MeanInstanceSeconds: mean(timings[InstanceSeconds]), diff --git a/go/vt/vtorc/discovery/metric.go b/go/vt/vtorc/discovery/metric.go index bd463f8f960..c322739502d 100644 --- a/go/vt/vtorc/discovery/metric.go +++ b/go/vt/vtorc/discovery/metric.go @@ -20,18 +20,17 @@ package discovery import ( "time" - - "vitess.io/vitess/go/vt/vtorc/inst" ) // Metric holds a set of information of instance discovery metrics type Metric struct { - Timestamp time.Time // time the collection was taken - InstanceKey inst.InstanceKey // instance being monitored - BackendLatency time.Duration // time taken talking to the backend - InstanceLatency time.Duration // time taken talking to the instance - TotalLatency time.Duration // total time taken doing the discovery - Err error // error (if applicable) doing the discovery process + Timestamp time.Time // time the collection was taken + TabletAlias string // instance being monitored + BackendLatency time.Duration // time taken talking to the backend + InstanceLatency time.Duration // time taken talking to the instance + TotalLatency time.Duration // total time taken doing the discovery + Err error // error (if applicable) doing the discovery process + InstancePollSecondsDurationCount uint64 // total numbers of times discoverInstance exceeded InstancePollSeconds } // When did the metric happen diff --git a/go/vt/vtorc/discovery/metric_json.go b/go/vt/vtorc/discovery/metric_json.go deleted file mode 100644 index eb204f28043..00000000000 --- a/go/vt/vtorc/discovery/metric_json.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2017 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package discovery - -// Collect discovery metrics and manage their storage and retrieval for monitoring purposes. - -import ( - "errors" - "fmt" - "time" - - "vitess.io/vitess/go/vt/vtorc/collection" -) - -// formattedFloat is to force the JSON output to show 3 decimal places -type formattedFloat float64 - -func (m formattedFloat) String() string { - return fmt.Sprintf("%.3f", m) -} - -// MetricJSON holds a structure which represents some discovery latency information -type MetricJSON struct { - Timestamp time.Time - Hostname string - Port int - BackendLatencySeconds formattedFloat - InstanceLatencySeconds formattedFloat - TotalLatencySeconds formattedFloat - Err error -} - -// JSONSince returns an API response of discovery metric collection information -// in a printable JSON format. -func JSONSince(c *collection.Collection, t time.Time) ([](MetricJSON), error) { - if c == nil { - return nil, errors.New("MetricCollection.JSONSince: c == nil") - } - raw, err := c.Since(t) - if err != nil { - return nil, err - } - - // build up JSON response for each Metric we received - var s []MetricJSON - for i := range raw { - m := raw[i].(*Metric) // convert back to a real Metric rather than collection.Metric interface - mj := MetricJSON{ - Timestamp: m.Timestamp, - Hostname: m.InstanceKey.Hostname, - Port: m.InstanceKey.Port, - BackendLatencySeconds: formattedFloat(m.BackendLatency.Seconds()), - InstanceLatencySeconds: formattedFloat(m.InstanceLatency.Seconds()), - TotalLatencySeconds: formattedFloat(m.TotalLatency.Seconds()), - Err: m.Err, - } - s = append(s, mj) - } - return s, nil -} diff --git a/go/vt/vtorc/discovery/queue.go b/go/vt/vtorc/discovery/queue.go index 50d5c276e4e..95751c6ae25 100644 --- a/go/vt/vtorc/discovery/queue.go +++ b/go/vt/vtorc/discovery/queue.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/inst" ) // QueueMetric contains the queue's active and queued sizes @@ -46,9 +45,9 @@ type Queue struct { name string done chan struct{} - queue chan inst.InstanceKey - queuedKeys map[inst.InstanceKey]time.Time - consumedKeys map[inst.InstanceKey]time.Time + queue chan string + queuedKeys map[string]time.Time + consumedKeys map[string]time.Time metrics []QueueMetric } @@ -62,13 +61,6 @@ func init() { discoveryQueue = make(map[string](*Queue)) } -// StopMonitoring stops monitoring all the queues -func StopMonitoring() { - for _, q := range discoveryQueue { - q.stopMonitoring() - } -} - // CreateOrReturnQueue allows for creation of a new discovery queue or // returning a pointer to an existing one given the name. func CreateOrReturnQueue(name string) *Queue { @@ -80,9 +72,9 @@ func CreateOrReturnQueue(name string) *Queue { q := &Queue{ name: name, - queuedKeys: make(map[inst.InstanceKey]time.Time), - consumedKeys: make(map[inst.InstanceKey]time.Time), - queue: make(chan inst.InstanceKey, config.DiscoveryQueueCapacity), + queuedKeys: make(map[string]time.Time), + consumedKeys: make(map[string]time.Time), + queue: make(chan string, config.DiscoveryQueueCapacity), } go q.startMonitoring() @@ -106,11 +98,6 @@ func (q *Queue) startMonitoring() { } } -// Stop monitoring the queue -func (q *Queue) stopMonitoring() { - q.done <- struct{}{} -} - // do a check of the entries in the queue, both those active and queued func (q *Queue) collectStatistics() { q.Lock() @@ -134,7 +121,7 @@ func (q *Queue) QueueLen() int { // Push enqueues a key if it is not on a queue and is not being // processed; silently returns otherwise. -func (q *Queue) Push(key inst.InstanceKey) { +func (q *Queue) Push(key string) { q.Lock() defer q.Unlock() @@ -154,7 +141,7 @@ func (q *Queue) Push(key inst.InstanceKey) { // Consume fetches a key to process; blocks if queue is empty. // Release must be called once after Consume. -func (q *Queue) Consume() inst.InstanceKey { +func (q *Queue) Consume() string { q.Lock() queue := q.queue q.Unlock() @@ -179,7 +166,7 @@ func (q *Queue) Consume() inst.InstanceKey { // Release removes a key from a list of being processed keys // which allows that key to be pushed into the queue again. -func (q *Queue) Release(key inst.InstanceKey) { +func (q *Queue) Release(key string) { q.Lock() defer q.Unlock() diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go index 22873747eb3..8707e6ba828 100644 --- a/go/vt/vtorc/inst/analysis.go +++ b/go/vt/vtorc/inst/analysis.go @@ -18,8 +18,6 @@ package inst import ( "encoding/json" - "fmt" - "strings" "time" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -32,6 +30,9 @@ type StructureAnalysisCode string const ( NoProblem AnalysisCode = "NoProblem" ClusterHasNoPrimary AnalysisCode = "ClusterHasNoPrimary" + PrimaryTabletDeleted AnalysisCode = "PrimaryTabletDeleted" + InvalidPrimary AnalysisCode = "InvalidPrimary" + InvalidReplica AnalysisCode = "InvalidReplica" DeadPrimaryWithoutReplicas AnalysisCode = "DeadPrimaryWithoutReplicas" DeadPrimary AnalysisCode = "DeadPrimary" DeadPrimaryAndReplicas AnalysisCode = "DeadPrimaryAndReplicas" @@ -57,6 +58,7 @@ const ( PrimaryWithoutReplicas AnalysisCode = "PrimaryWithoutReplicas" BinlogServerFailingToConnectToPrimary AnalysisCode = "BinlogServerFailingToConnectToPrimary" GraceFulPrimaryTakeover AnalysisCode = "GracefulPrimaryTakeover" + ErrantGTIDDetected AnalysisCode = "ErrantGTIDDetected" ) const ( @@ -72,38 +74,14 @@ const ( NotEnoughValidSemiSyncReplicasStructureWarning StructureAnalysisCode = "NotEnoughValidSemiSyncReplicasStructureWarning" ) -type InstanceAnalysis struct { - key *InstanceKey - analysis AnalysisCode -} - -func NewInstanceAnalysis(instanceKey *InstanceKey, analysis AnalysisCode) *InstanceAnalysis { - return &InstanceAnalysis{ - key: instanceKey, - analysis: analysis, - } -} - -func (instanceAnalysis *InstanceAnalysis) String() string { - return fmt.Sprintf("%s/%s", instanceAnalysis.key.StringCode(), string(instanceAnalysis.analysis)) -} - // PeerAnalysisMap indicates the number of peers agreeing on an analysis. // Key of this map is a InstanceAnalysis.String() type PeerAnalysisMap map[string]int type ReplicationAnalysisHints struct { - IncludeDowntimed bool - IncludeNoProblem bool - AuditAnalysis bool + AuditAnalysis bool } -const ( - ForcePrimaryFailoverCommandHint string = "force-primary-failover" - ForcePrimaryTakeoverCommandHint string = "force-primary-takeover" - GracefulPrimaryTakeoverCommandHint string = "graceful-primary-takeover" -) - type AnalysisInstanceType string const ( @@ -114,15 +92,19 @@ const ( // ReplicationAnalysis notes analysis on replication chain status, per instance type ReplicationAnalysis struct { - AnalyzedInstanceKey InstanceKey - AnalyzedInstancePrimaryKey InstanceKey - TabletType topodatapb.TabletType - PrimaryTimeStamp time.Time - ClusterDetails ClusterInfo - AnalyzedInstanceDataCenter string - AnalyzedInstanceRegion string - AnalyzedKeyspace string - AnalyzedShard string + AnalyzedInstanceHostname string + AnalyzedInstancePort int + AnalyzedInstanceAlias string + AnalyzedInstancePrimaryAlias string + TabletType topodatapb.TabletType + PrimaryTimeStamp time.Time + ClusterDetails ClusterInfo + AnalyzedInstanceDataCenter string + AnalyzedInstanceRegion string + AnalyzedKeyspace string + AnalyzedShard string + // ShardPrimaryTermTimestamp is the primary term start time stored in the shard record. + ShardPrimaryTermTimestamp string AnalyzedInstancePhysicalEnvironment string AnalyzedInstanceBinlogCoordinates BinlogCoordinates IsPrimary bool @@ -134,17 +116,13 @@ type ReplicationAnalysis struct { CountValidReplicas uint CountValidReplicatingReplicas uint CountReplicasFailingToConnectToPrimary uint - CountDowntimedReplicas uint ReplicationDepth uint IsFailingToConnectToPrimary bool ReplicationStopped bool + ErrantGTID string Analysis AnalysisCode Description string StructureAnalysis []StructureAnalysisCode - IsDowntimed bool - IsReplicasDowntimed bool // as good as downtimed because all replicas are downtimed AND analysis is all about the replicas (e.e. AllPrimaryReplicasNotReplicating) - DowntimeEndTimestamp string - DowntimeRemainingSeconds int IsBinlogServer bool OracleGTIDImmediateTopology bool MariaDBGTIDImmediateTopology bool @@ -165,24 +143,14 @@ type ReplicationAnalysis struct { IsActionableRecovery bool ProcessingNodeHostname string ProcessingNodeToken string - CountAdditionalAgreeingNodes int StartActivePeriod string - SkippableDueToDowntime bool GTIDMode string MinReplicaGTIDMode string MaxReplicaGTIDMode string MaxReplicaGTIDErrant string - CommandHint string IsReadOnly bool } -type AnalysisMap map[string](*ReplicationAnalysis) - -type ReplicationAnalysisChangelog struct { - AnalyzedInstanceKey InstanceKey - Changelog []string -} - func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { i := struct { ReplicationAnalysis @@ -192,18 +160,6 @@ func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { return json.Marshal(i) } -// AnalysisString returns a human friendly description of all analysis issues -func (replicationAnalysis *ReplicationAnalysis) AnalysisString() string { - result := []string{} - if replicationAnalysis.Analysis != NoProblem { - result = append(result, string(replicationAnalysis.Analysis)) - } - for _, structureAnalysis := range replicationAnalysis.StructureAnalysis { - result = append(result, string(structureAnalysis)) - } - return strings.Join(result, ", ") -} - // Get a string description of the analyzed instance type (primary? co-primary? intermediate-primary?) func (replicationAnalysis *ReplicationAnalysis) GetAnalysisInstanceType() AnalysisInstanceType { if replicationAnalysis.IsCoPrimary { diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index 9e365b71cce..25082f133da 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo/topoproto" "google.golang.org/protobuf/encoding/prototext" @@ -37,13 +38,11 @@ import ( "github.com/rcrowley/go-metrics" ) -var analysisChangeWriteAttemptCounter = metrics.NewCounter() var analysisChangeWriteCounter = metrics.NewCounter() var recentInstantAnalysis *cache.Cache func init() { - _ = metrics.Register("analysis.change.write.attempt", analysisChangeWriteAttemptCounter) _ = metrics.Register("analysis.change.write", analysisChangeWriteCounter) go initializeAnalysisDaoPostConfiguration() @@ -57,13 +56,20 @@ func initializeAnalysisDaoPostConfiguration() { type clusterAnalysis struct { hasClusterwideAction bool - primaryKey *InstanceKey + totalTablets int + primaryAlias string durability reparentutil.Durabler } // GetReplicationAnalysis will check for replication problems (dead primary; unreachable primary; etc) -func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAnalysisHints) ([]ReplicationAnalysis, error) { - result := []ReplicationAnalysis{} +func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAnalysisHints) ([]*ReplicationAnalysis, error) { + var result []*ReplicationAnalysis + appendAnalysis := func(analysis *ReplicationAnalysis) { + if analysis.Analysis == NoProblem && len(analysis.StructureAnalysis) == 0 { + return + } + result = append(result, analysis) + } // TODO(sougou); deprecate ReduceReplicationAnalysisCount args := sqlutils.Args(config.Config.ReasonableReplicationLagSeconds, ValidSecondsFromSeenToLastAttemptedCheck(), config.Config.ReasonableReplicationLagSeconds, keyspace, shard) @@ -78,13 +84,13 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna vitess_keyspace.keyspace AS keyspace, vitess_keyspace.keyspace_type AS keyspace_type, vitess_keyspace.durability_policy AS durability_policy, + vitess_shard.primary_timestamp AS shard_primary_term_timestamp, primary_instance.read_only AS read_only, - MIN(primary_instance.hostname) IS NULL AS is_invalid, + MIN(primary_instance.gtid_errant) AS gtid_errant, + MIN(primary_instance.alias) IS NULL AS is_invalid, MIN(primary_instance.data_center) AS data_center, MIN(primary_instance.region) AS region, MIN(primary_instance.physical_environment) AS physical_environment, - MIN(primary_instance.source_host) AS source_host, - MIN(primary_instance.source_port) AS source_port, MIN(primary_instance.binary_log_file) AS binary_log_file, MIN(primary_instance.binary_log_pos) AS binary_log_pos, MIN(primary_tablet.info) AS primary_tablet_info, @@ -109,10 +115,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna OR primary_instance.source_port = 0 OR substr(primary_instance.source_host, 1, 2) = '//' ) - AND ( - primary_instance.replication_group_name = '' - OR primary_instance.replication_group_member_role = 'PRIMARY' - ) ) AS is_primary, MIN(primary_instance.is_co_primary) AS is_co_primary, MIN(primary_instance.gtid_mode) AS gtid_mode, @@ -150,19 +152,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna primary_instance.replica_sql_running = 0 OR primary_instance.replica_io_running = 0 ) AS replication_stopped, - MIN( - primary_downtime.downtime_active is not null - and ifnull(primary_downtime.end_timestamp, now()) > now() - ) AS is_downtimed, - MIN( - IFNULL(primary_downtime.end_timestamp, '') - ) AS downtime_end_timestamp, - MIN( - IFNULL( - unix_timestamp() - unix_timestamp(primary_downtime.end_timestamp), - 0 - ) - ) AS downtime_remaining_seconds, MIN( primary_instance.binlog_server ) AS is_binlog_server, @@ -267,18 +256,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna IFNULL(MAX(replica_instance.gtid_mode), '') AS max_replica_gtid_mode, IFNULL( MAX( - case when replica_downtime.downtime_active is not null - and ifnull(replica_downtime.end_timestamp, now()) > now() then '' else replica_instance.gtid_errant end + replica_instance.gtid_errant ), '' ) AS max_replica_gtid_errant, - IFNULL( - SUM( - replica_downtime.downtime_active is not null - and ifnull(replica_downtime.end_timestamp, now()) > now() - ), - 0 - ) AS count_downtimed_replicas, COUNT( DISTINCT case when replica_instance.log_bin AND replica_instance.log_replica_updates then replica_instance.major_version else NULL end @@ -288,50 +269,31 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna JOIN vitess_keyspace ON ( vitess_tablet.keyspace = vitess_keyspace.keyspace ) + JOIN vitess_shard ON ( + vitess_tablet.keyspace = vitess_shard.keyspace + AND vitess_tablet.shard = vitess_shard.shard + ) LEFT JOIN database_instance primary_instance ON ( - vitess_tablet.hostname = primary_instance.hostname + vitess_tablet.alias = primary_instance.alias + AND vitess_tablet.hostname = primary_instance.hostname AND vitess_tablet.port = primary_instance.port ) LEFT JOIN vitess_tablet primary_tablet ON ( primary_tablet.hostname = primary_instance.source_host AND primary_tablet.port = primary_instance.source_port ) - LEFT JOIN hostname_resolve ON ( - primary_instance.hostname = hostname_resolve.hostname - ) LEFT JOIN database_instance replica_instance ON ( - COALESCE( - hostname_resolve.resolved_hostname, - primary_instance.hostname - ) = replica_instance.source_host + primary_instance.hostname = replica_instance.source_host AND primary_instance.port = replica_instance.source_port ) - LEFT JOIN database_instance_maintenance ON ( - primary_instance.hostname = database_instance_maintenance.hostname - AND primary_instance.port = database_instance_maintenance.port - AND database_instance_maintenance.maintenance_active = 1 - ) LEFT JOIN database_instance_stale_binlog_coordinates ON ( - primary_instance.hostname = database_instance_stale_binlog_coordinates.hostname - AND primary_instance.port = database_instance_stale_binlog_coordinates.port - ) - LEFT JOIN database_instance_downtime as primary_downtime ON ( - primary_instance.hostname = primary_downtime.hostname - AND primary_instance.port = primary_downtime.port - AND primary_downtime.downtime_active = 1 - ) - LEFT JOIN database_instance_downtime as replica_downtime ON ( - replica_instance.hostname = replica_downtime.hostname - AND replica_instance.port = replica_downtime.port - AND replica_downtime.downtime_active = 1 + vitess_tablet.alias = database_instance_stale_binlog_coordinates.alias ) WHERE - database_instance_maintenance.database_instance_maintenance_id IS NULL - AND ? IN ('', vitess_keyspace.keyspace) + ? IN ('', vitess_keyspace.keyspace) AND ? IN ('', vitess_tablet.shard) GROUP BY - vitess_tablet.hostname, - vitess_tablet.port + vitess_tablet.alias ORDER BY vitess_tablet.tablet_type ASC, vitess_tablet.primary_timestamp DESC @@ -339,21 +301,22 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna clusters := make(map[string]*clusterAnalysis) err := db.Db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - a := ReplicationAnalysis{ + a := &ReplicationAnalysis{ Analysis: NoProblem, ProcessingNodeHostname: process.ThisHostname, ProcessingNodeToken: util.ProcessToken.Hash, } tablet := &topodatapb.Tablet{} - if err := prototext.Unmarshal([]byte(m.GetString("tablet_info")), tablet); err != nil { + opts := prototext.UnmarshalOptions{DiscardUnknown: true} + if err := opts.Unmarshal([]byte(m.GetString("tablet_info")), tablet); err != nil { log.Errorf("could not read tablet %v: %v", m.GetString("tablet_info"), err) return nil } primaryTablet := &topodatapb.Tablet{} if str := m.GetString("primary_tablet_info"); str != "" { - if err := prototext.Unmarshal([]byte(str), primaryTablet); err != nil { + if err := opts.Unmarshal([]byte(str), primaryTablet); err != nil { log.Errorf("could not read tablet %v: %v", str, err) return nil } @@ -369,11 +332,14 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return nil } + a.ShardPrimaryTermTimestamp = m.GetString("shard_primary_term_timestamp") a.IsPrimary = m.GetBool("is_primary") countCoPrimaryReplicas := m.GetUint("count_co_primary_replicas") a.IsCoPrimary = m.GetBool("is_co_primary") || (countCoPrimaryReplicas > 0) - a.AnalyzedInstanceKey = InstanceKey{Hostname: m.GetString("hostname"), Port: m.GetInt("port")} - a.AnalyzedInstancePrimaryKey = InstanceKey{Hostname: m.GetString("source_host"), Port: m.GetInt("source_port")} + a.AnalyzedInstanceHostname = m.GetString("hostname") + a.AnalyzedInstancePort = m.GetInt("port") + a.AnalyzedInstanceAlias = topoproto.TabletAliasString(tablet.Alias) + a.AnalyzedInstancePrimaryAlias = topoproto.TabletAliasString(primaryTablet.Alias) a.AnalyzedInstanceDataCenter = m.GetString("data_center") a.AnalyzedInstanceRegion = m.GetString("region") a.AnalyzedInstancePhysicalEnvironment = m.GetString("physical_environment") @@ -392,15 +358,12 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.CountValidReplicas = m.GetUint("count_valid_replicas") a.CountValidReplicatingReplicas = m.GetUint("count_valid_replicating_replicas") a.CountReplicasFailingToConnectToPrimary = m.GetUint("count_replicas_failing_to_connect_to_primary") - a.CountDowntimedReplicas = m.GetUint("count_downtimed_replicas") a.ReplicationDepth = m.GetUint("replication_depth") a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_primary") a.ReplicationStopped = m.GetBool("replication_stopped") - a.IsDowntimed = m.GetBool("is_downtimed") - a.DowntimeEndTimestamp = m.GetString("downtime_end_timestamp") - a.DowntimeRemainingSeconds = m.GetInt("downtime_remaining_seconds") a.IsBinlogServer = m.GetBool("is_binlog_server") a.ClusterDetails.ReadRecoveryInfo() + a.ErrantGTID = m.GetString("gtid_errant") countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") a.OracleGTIDImmediateTopology = countValidOracleGTIDReplicas == a.CountValidReplicas && a.CountValidReplicas > 0 @@ -432,8 +395,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.IsReadOnly = m.GetUint("read_only") == 1 if !a.LastCheckValid { - analysisMessage := fmt.Sprintf("analysis: Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v", - a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary, + analysisMessage := fmt.Sprintf("analysis: Alias: %+v, Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v", + a.AnalyzedInstanceAlias, a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary, ) if util.ClearToLog("analysis_dao", analysisMessage) { log.Infof(analysisMessage) @@ -444,7 +407,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna clusters[keyspaceShard] = &clusterAnalysis{} if a.TabletType == topodatapb.TabletType_PRIMARY { a.IsClusterPrimary = true - clusters[keyspaceShard].primaryKey = &a.AnalyzedInstanceKey + clusters[keyspaceShard].primaryAlias = a.AnalyzedInstanceAlias } durabilityPolicy := m.GetString("durability_policy") if durabilityPolicy == "" { @@ -460,6 +423,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna } // ca has clusterwide info ca := clusters[keyspaceShard] + // Increment the total number of tablets. + ca.totalTablets += 1 if ca.hasClusterwideAction { // We can only take one cluster level action at a time. return nil @@ -469,10 +434,13 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return nil } isInvalid := m.GetBool("is_invalid") - if isInvalid { - return nil - } - if a.IsClusterPrimary && !a.LastCheckValid && a.CountReplicas == 0 { + if a.IsClusterPrimary && isInvalid { + a.Analysis = InvalidPrimary + a.Description = "VTOrc hasn't been able to reach the primary even once since restart/shutdown" + } else if isInvalid { + a.Analysis = InvalidReplica + a.Description = "VTOrc hasn't been able to reach the replica even once since restart/shutdown" + } else if a.IsClusterPrimary && !a.LastCheckValid && a.CountReplicas == 0 { a.Analysis = DeadPrimaryWithoutReplicas a.Description = "Primary cannot be reached by vtorc and has no replica" ca.hasClusterwideAction = true @@ -501,18 +469,28 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = PrimaryIsReadOnly a.Description = "Primary is read-only" // - } else if a.IsClusterPrimary && SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) != 0 && !a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustBeSet a.Description = "Primary semi-sync must be set" // - } else if a.IsClusterPrimary && SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { + } else if a.IsClusterPrimary && reparentutil.SemiSyncAckers(ca.durability, tablet) == 0 && a.SemiSyncPrimaryEnabled { a.Analysis = PrimarySemiSyncMustNotBeSet a.Description = "Primary semi-sync must not be set" // - } else if topo.IsReplicaType(a.TabletType) && ca.primaryKey == nil { + } else if topo.IsReplicaType(a.TabletType) && a.ErrantGTID != "" { + a.Analysis = ErrantGTIDDetected + a.Description = "Tablet has errant GTIDs" + } else if topo.IsReplicaType(a.TabletType) && ca.primaryAlias == "" && a.ShardPrimaryTermTimestamp == "" { + // ClusterHasNoPrimary should only be detected when the shard record doesn't have any primary term start time specified either. a.Analysis = ClusterHasNoPrimary a.Description = "Cluster has no primary" ca.hasClusterwideAction = true + } else if topo.IsReplicaType(a.TabletType) && ca.primaryAlias == "" && a.ShardPrimaryTermTimestamp != "" { + // If there are no primary tablets, but the shard primary start time isn't empty, then we know + // the primary tablet was deleted. + a.Analysis = PrimaryTabletDeleted + a.Description = "Primary tablet has been deleted" + ca.hasClusterwideAction = true } else if topo.IsReplicaType(a.TabletType) && !a.IsReadOnly { a.Analysis = ReplicaIsWritable a.Description = "Replica is writable" @@ -521,7 +499,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = NotConnectedToPrimary a.Description = "Not connected to the primary" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ca.primaryKey != nil && a.AnalyzedInstancePrimaryKey != *ca.primaryKey { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ca.primaryAlias != "" && a.AnalyzedInstancePrimaryAlias != ca.primaryAlias { a.Analysis = ConnectedToWrongPrimary a.Description = "Connected to wrong primary" // @@ -529,11 +507,11 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = ReplicationStopped a.Description = "Replication is stopped" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && !a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustBeSet a.Description = "Replica semi-sync must be set" // - } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && !reparentutil.IsReplicaSemiSync(ca.durability, primaryTablet, tablet) && a.SemiSyncReplicaEnabled { a.Analysis = ReplicaSemiSyncMustNotBeSet a.Description = "Replica semi-sync must not be set" // @@ -586,28 +564,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna // a.Description = "Primary has no replicas" // } - appendAnalysis := func(analysis *ReplicationAnalysis) { - if a.Analysis == NoProblem && len(a.StructureAnalysis) == 0 && !hints.IncludeNoProblem { - return - } - if a.IsDowntimed { - a.SkippableDueToDowntime = true - } - if a.CountReplicas == a.CountDowntimedReplicas { - switch a.Analysis { - case AllPrimaryReplicasNotReplicating, - AllPrimaryReplicasNotReplicatingOrDead, - PrimarySingleReplicaDead: - a.IsReplicasDowntimed = true - a.SkippableDueToDowntime = true - } - } - if a.SkippableDueToDowntime && !hints.IncludeDowntimed { - return - } - result = append(result, a) - } - { // Moving on to structure analysis // We also do structural checks. See if there's potential danger in promotions @@ -648,17 +604,19 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.StructureAnalysis = append(a.StructureAnalysis, NotEnoughValidSemiSyncReplicasStructureWarning) } } - appendAnalysis(&a) + appendAnalysis(a) if a.CountReplicas > 0 && hints.AuditAnalysis { // Interesting enough for analysis go func() { - _ = auditInstanceAnalysisInChangelog(&a.AnalyzedInstanceKey, a.Analysis) + _ = auditInstanceAnalysisInChangelog(a.AnalyzedInstanceAlias, a.Analysis) }() } return nil }) + result = postProcessAnalyses(result, clusters) + if err != nil { log.Error(err) } @@ -666,23 +624,65 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna return result, err } +// postProcessAnalyses is used to update different analyses based on the information gleaned from looking at all the analyses together instead of individual data. +func postProcessAnalyses(result []*ReplicationAnalysis, clusters map[string]*clusterAnalysis) []*ReplicationAnalysis { + for { + // Store whether we have changed the result of replication analysis or not. + resultChanged := false + + // Go over all the analyses. + for _, analysis := range result { + // If one of them is an InvalidPrimary, then we see if all the other tablets in this keyspace shard are + // unable to replicate or not. + if analysis.Analysis == InvalidPrimary { + keyspaceName := analysis.ClusterDetails.Keyspace + shardName := analysis.ClusterDetails.Shard + keyspaceShard := getKeyspaceShardName(keyspaceName, shardName) + totalReplicas := clusters[keyspaceShard].totalTablets - 1 + var notReplicatingReplicas []int + for idx, replicaAnalysis := range result { + if replicaAnalysis.ClusterDetails.Keyspace == keyspaceName && + replicaAnalysis.ClusterDetails.Shard == shardName && topo.IsReplicaType(replicaAnalysis.TabletType) { + // If the replica's last check is invalid or its replication is stopped, then we consider as not replicating. + if !replicaAnalysis.LastCheckValid || replicaAnalysis.ReplicationStopped { + notReplicatingReplicas = append(notReplicatingReplicas, idx) + } + } + } + // If none of the other tablets are able to replicate, then we conclude that this primary is not just Invalid, but also Dead. + // In this case, we update the analysis for the primary tablet and remove all the analyses of the replicas. + if totalReplicas > 0 && len(notReplicatingReplicas) == totalReplicas { + resultChanged = true + analysis.Analysis = DeadPrimary + for i := len(notReplicatingReplicas) - 1; i >= 0; i-- { + idxToRemove := notReplicatingReplicas[i] + result = append(result[0:idxToRemove], result[idxToRemove+1:]...) + } + break + } + } + } + if !resultChanged { + break + } + } + return result +} + // auditInstanceAnalysisInChangelog will write down an instance's analysis in the database_instance_analysis_changelog table. // To not repeat recurring analysis code, the database_instance_last_analysis table is used, so that only changes to // analysis codes are written. -func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode AnalysisCode) error { - if lastWrittenAnalysis, found := recentInstantAnalysis.Get(instanceKey.DisplayString()); found { +func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisCode) error { + if lastWrittenAnalysis, found := recentInstantAnalysis.Get(tabletAlias); found { if lastWrittenAnalysis == analysisCode { // Surely nothing new. // And let's expand the timeout - recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration) + recentInstantAnalysis.Set(tabletAlias, analysisCode, cache.DefaultExpiration) return nil } } - // Passed the cache; but does database agree that there's a change? Here's a persistent cache; this comes here - // to verify no two vtorc services are doing this without coordinating (namely, one dies, the other taking its place - // and has no familiarity of the former's cache) - analysisChangeWriteAttemptCounter.Inc(1) + // Find if the lastAnalysisHasChanged or not while updating the row if it has. lastAnalysisChanged := false { sqlResult, err := db.ExecVTOrc(` @@ -690,11 +690,10 @@ func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode Ana analysis = ?, analysis_timestamp = now() where - hostname = ? - and port = ? + alias = ? and analysis != ? `, - string(analysisCode), instanceKey.Hostname, instanceKey.Port, string(analysisCode), + string(analysisCode), tabletAlias, string(analysisCode), ) if err != nil { log.Error(err) @@ -705,36 +704,48 @@ func auditInstanceAnalysisInChangelog(instanceKey *InstanceKey, analysisCode Ana log.Error(err) return err } - lastAnalysisChanged = (rows > 0) + lastAnalysisChanged = rows > 0 } + + // If the last analysis has not changed, then there is a chance that this is the first insertion. + // We need to find that out too when we insert into the database. + firstInsertion := false if !lastAnalysisChanged { - _, err := db.ExecVTOrc(` + // The insert only returns more than 1 row changed if this is the first insertion. + sqlResult, err := db.ExecVTOrc(` insert ignore into database_instance_last_analysis ( - hostname, port, analysis_timestamp, analysis + alias, analysis_timestamp, analysis ) values ( - ?, ?, now(), ? + ?, now(), ? ) `, - instanceKey.Hostname, instanceKey.Port, string(analysisCode), + tabletAlias, string(analysisCode), ) if err != nil { log.Error(err) return err } + rows, err := sqlResult.RowsAffected() + if err != nil { + log.Error(err) + return err + } + firstInsertion = rows > 0 } - recentInstantAnalysis.Set(instanceKey.DisplayString(), analysisCode, cache.DefaultExpiration) - if !lastAnalysisChanged { + recentInstantAnalysis.Set(tabletAlias, analysisCode, cache.DefaultExpiration) + // If the analysis has changed or if it is the first insertion, we need to make sure we write this change to the database. + if !lastAnalysisChanged && !firstInsertion { return nil } _, err := db.ExecVTOrc(` insert into database_instance_analysis_changelog ( - hostname, port, analysis_timestamp, analysis + alias, analysis_timestamp, analysis ) values ( - ?, ?, now(), ? + ?, now(), ? ) `, - instanceKey.Hostname, instanceKey.Port, string(analysisCode), + tabletAlias, string(analysisCode), ) if err == nil { analysisChangeWriteCounter.Inc(1) diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index 480986e34ba..c1926fca089 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -18,16 +18,35 @@ package inst import ( "testing" + "time" + "github.com/patrickmn/go-cache" + "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/test" ) +var ( + // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. + // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. + initialSQL = []string{ + `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, + `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_tablet VALUES('zone2-0000000200','localhost',6756,'ks','0','zone2',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653222207569643a3230307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363735357d20706f72745f6d61703a7b6b65793a227674222076616c75653a363735347d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363735362064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, + `INSERT INTO vitess_shard VALUES('ks','0','zone1-0000000101','2022-12-28 07:23:25.129898+00:00');`, + `INSERT INTO vitess_keyspace VALUES('ks',0,'semi_sync');`, + } +) + // TestGetReplicationAnalysisDecision tests the code of GetReplicationAnalysis decision-making. It doesn't check the SQL query // run by it. It only checks the analysis part after the rows have been read. This tests fakes the db and explicitly returns the // rows that are specified in the test. @@ -58,6 +77,25 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { keyspaceWanted: "ks", shardWanted: "0", codeWanted: ClusterHasNoPrimary, + }, { + name: "PrimaryTabletDeleted", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + ShardPrimaryTermTimestamp: "2022-12-28 07:23:25.129898+00:00", + DurabilityPolicy: "none", + LastCheckValid: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: PrimaryTabletDeleted, }, { name: "DeadPrimary", info: []*test.InfoForRecoveryAnalysis{{ @@ -297,10 +335,11 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, - LastCheckValid: 1, - ReadOnly: 0, + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 0, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -336,10 +375,11 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6706, - LastCheckValid: 1, - ReadOnly: 1, + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 102}, + }, + LastCheckValid: 1, + ReadOnly: 1, }}, keyspaceWanted: "ks", shardWanted: "0", @@ -374,9 +414,10 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlHostname: "localhost", MysqlPort: 6709, }, - DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, + DurabilityPolicy: "none", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, LastCheckValid: 1, ReadOnly: 1, ReplicationStopped: 1, @@ -417,17 +458,9 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, PrimaryTabletInfo: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, - Hostname: "localhost", - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - MysqlHostname: "localhost", - MysqlPort: 6708, + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, DurabilityPolicy: "semi_sync", - SourceHost: "localhost", - SourcePort: 6708, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 0, @@ -466,17 +499,9 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { MysqlPort: 6709, }, PrimaryTabletInfo: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, - Hostname: "localhost", - Keyspace: "ks", - Shard: "0", - Type: topodatapb.TabletType_PRIMARY, - MysqlHostname: "localhost", - MysqlPort: 6708, + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, }, DurabilityPolicy: "none", - SourceHost: "localhost", - SourcePort: 6708, LastCheckValid: 1, ReadOnly: 1, SemiSyncReplicaEnabled: 1, @@ -560,6 +585,148 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { }}, keyspaceWanted: "ks", shardWanted: "0", + codeWanted: InvalidReplica, + }, { + name: "DeadPrimary when VTOrc is starting up", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + IsInvalid: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + LastCheckValid: 1, + ReplicationStopped: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 103}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6710, + }, + LastCheckValid: 1, + ReplicationStopped: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: DeadPrimary, + }, { + name: "Invalid Primary", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + IsInvalid: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: InvalidPrimary, + }, { + name: "ErrantGTID", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + LastCheckValid: 1, + CountReplicas: 4, + CountValidReplicas: 4, + CountValidReplicatingReplicas: 3, + CountValidOracleGTIDReplicas: 4, + CountLoggingReplicas: 2, + IsPrimary: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + DurabilityPolicy: "none", + ErrantGTID: "some errant GTID", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: ErrantGTIDDetected, + }, { + name: "ErrantGTID on a non-replica", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + LastCheckValid: 1, + CountReplicas: 4, + CountValidReplicas: 4, + CountValidReplicatingReplicas: 3, + CountValidOracleGTIDReplicas: 4, + CountLoggingReplicas: 2, + IsPrimary: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_DRAINED, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + DurabilityPolicy: "none", + ErrantGTID: "some errant GTID", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 1, + }}, + keyspaceWanted: "ks", + shardWanted: "0", codeWanted: NoProblem, }, } @@ -600,20 +767,6 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { // This test is somewhere between a unit test, and an end-to-end test. It is specifically useful for testing situations which are hard to come by in end-to-end test, but require // real-world data to test specifically. func TestGetReplicationAnalysis(t *testing.T) { - // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. - // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. - initialSQL := []string{ - `INSERT INTO database_instance VALUES('localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'zone1-0000000112','Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'zone1-0000000100','Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'zone1-0000000101','Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2,'',0,'','','[]','',0);`, - `INSERT INTO database_instance VALUES('localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'zone2-0000000200','Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0,'',0,'','','[]','',0);`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_tablet VALUES('zone2-0000000200','localhost',6756,'ks','0','zone2',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653222207569643a3230307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363735357d20706f72745f6d61703a7b6b65793a227674222076616c75653a363735347d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363735362064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, - `INSERT INTO vitess_keyspace VALUES('ks',0,'semi_sync');`, - } - // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. tests := []struct { name string @@ -632,7 +785,7 @@ func TestGetReplicationAnalysis(t *testing.T) { // This query removes the primary tablet's vitess_tablet record `delete from vitess_tablet where port = 6714`, }, - codeWanted: ClusterHasNoPrimary, + codeWanted: PrimaryTabletDeleted, keyspaceWanted: "ks", shardWanted: "0", }, { @@ -643,9 +796,10 @@ func TestGetReplicationAnalysis(t *testing.T) { }, // As long as we have the vitess record stating that this tablet is the primary // It would be incorrect to run a PRS. - // This situation only happens when we haven't been able to read the MySQL information even once for this tablet. - // So it is likely a new tablet. - codeWanted: NoProblem, + // We should still flag this tablet as Invalid. + codeWanted: InvalidPrimary, + keyspaceWanted: "ks", + shardWanted: "0", }, { name: "Removing Replica Tablet's MySQL record", sql: []string{ @@ -656,13 +810,15 @@ func TestGetReplicationAnalysis(t *testing.T) { // We should wait for the MySQL information to be refreshed once. // This situation only happens when we haven't been able to read the MySQL information even once for this tablet. // So it is likely a new tablet. - codeWanted: NoProblem, + codeWanted: InvalidReplica, + keyspaceWanted: "ks", + shardWanted: "0", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again. defer func() { db.ClearVTOrcDatabase() }() @@ -685,3 +841,232 @@ func TestGetReplicationAnalysis(t *testing.T) { }) } } + +// TestAuditInstanceAnalysisInChangelog tests the functionality of the auditInstanceAnalysisInChangelog function +// and verifies that we write the correct number of times to the database. +func TestAuditInstanceAnalysisInChangelog(t *testing.T) { + tests := []struct { + name string + cacheExpiration time.Duration + }{ + { + name: "Long expiration", + cacheExpiration: 2 * time.Minute, + }, { + name: "Very short expiration", + cacheExpiration: 100 * time.Millisecond, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create the cache for the test to use. + oldRecentInstantAnalysisCache := recentInstantAnalysis + oldAnalysisChangeWriteCounter := analysisChangeWriteCounter + + recentInstantAnalysis = cache.New(tt.cacheExpiration, 100*time.Millisecond) + analysisChangeWriteCounter = metrics.NewCounter() + + defer func() { + // Set the old values back. + recentInstantAnalysis = oldRecentInstantAnalysisCache + analysisChangeWriteCounter = oldAnalysisChangeWriteCounter + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again. + db.ClearVTOrcDatabase() + }() + + updates := []struct { + tabletAlias string + analysisCode AnalysisCode + writeCounterExpectation int + wantErr string + }{ + { + // Store a new analysis for the zone1-100 tablet. + tabletAlias: "zone1-100", + analysisCode: ReplicationStopped, + writeCounterExpectation: 1, + }, { + // Write the same analysis, no new write should happen. + tabletAlias: "zone1-100", + analysisCode: ReplicationStopped, + writeCounterExpectation: 1, + }, { + // Change the analysis. This should trigger an update. + tabletAlias: "zone1-100", + analysisCode: ReplicaSemiSyncMustBeSet, + writeCounterExpectation: 2, + }, + } + + for _, upd := range updates { + // We sleep 200 milliseconds to make sure that the cache has had time to update. + // It should be able to delete entries if the expiration is less than 200 milliseconds. + time.Sleep(200 * time.Millisecond) + err := auditInstanceAnalysisInChangelog(upd.tabletAlias, upd.analysisCode) + if upd.wantErr != "" { + require.EqualError(t, err, upd.wantErr) + continue + } + require.NoError(t, err) + require.EqualValues(t, upd.writeCounterExpectation, analysisChangeWriteCounter.Count()) + } + }) + } +} + +// TestPostProcessAnalyses tests the functionality of the postProcessAnalyses function. +func TestPostProcessAnalyses(t *testing.T) { + ks0 := ClusterInfo{ + Keyspace: "ks", + Shard: "0", + CountInstances: 4, + } + ks80 := ClusterInfo{ + Keyspace: "ks", + Shard: "80-", + CountInstances: 3, + } + clusters := map[string]*clusterAnalysis{ + getKeyspaceShardName(ks0.Keyspace, ks0.Shard): { + totalTablets: int(ks0.CountInstances), + }, + getKeyspaceShardName(ks80.Keyspace, ks80.Shard): { + totalTablets: int(ks80.CountInstances), + }, + } + + tests := []struct { + name string + analyses []*ReplicationAnalysis + want []*ReplicationAnalysis + }{ + { + name: "No processing needed", + analyses: []*ReplicationAnalysis{ + { + Analysis: ReplicationStopped, + TabletType: topodatapb.TabletType_REPLICA, + LastCheckValid: true, + ClusterDetails: ks0, + }, { + Analysis: ReplicaSemiSyncMustBeSet, + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, { + Analysis: PrimaryHasPrimary, + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, + }, + }, { + name: "Conversion of InvalidPrimary to DeadPrimary", + analyses: []*ReplicationAnalysis{ + { + Analysis: InvalidPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-202", + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: ConnectedToWrongPrimary, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-101", + TabletType: topodatapb.TabletType_REPLICA, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: ReplicationStopped, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-102", + TabletType: topodatapb.TabletType_RDONLY, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: InvalidReplica, + AnalyzedInstanceAlias: "zone1-108", + TabletType: topodatapb.TabletType_REPLICA, + LastCheckValid: false, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + AnalyzedInstanceAlias: "zone1-302", + LastCheckValid: true, + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + want: []*ReplicationAnalysis{ + { + Analysis: DeadPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-202", + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-302", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + }, + { + name: "Unable to convert InvalidPrimary to DeadPrimary", + analyses: []*ReplicationAnalysis{ + { + Analysis: InvalidPrimary, + AnalyzedInstanceAlias: "zone1-100", + TabletType: topodatapb.TabletType_PRIMARY, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + AnalyzedInstanceAlias: "zone1-202", + LastCheckValid: true, + TabletType: topodatapb.TabletType_RDONLY, + ClusterDetails: ks80, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-101", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks0, + }, { + Analysis: ReplicationStopped, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-102", + TabletType: topodatapb.TabletType_RDONLY, + ReplicationStopped: true, + ClusterDetails: ks0, + }, { + Analysis: NoProblem, + LastCheckValid: true, + AnalyzedInstanceAlias: "zone1-302", + TabletType: topodatapb.TabletType_REPLICA, + ClusterDetails: ks80, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.want == nil { + tt.want = tt.analyses + } + result := postProcessAnalyses(tt.analyses, clusters) + require.ElementsMatch(t, tt.want, result) + }) + } +} diff --git a/go/vt/vtorc/inst/audit.go b/go/vt/vtorc/inst/audit.go deleted file mode 100644 index 6650b01ac18..00000000000 --- a/go/vt/vtorc/inst/audit.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// Audit presents a single audit entry (namely in the database) -type Audit struct { - AuditID int64 - AuditTimestamp string - AuditType string - AuditInstanceKey InstanceKey - Message string -} diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go index 7882449c655..96db7f32ccf 100644 --- a/go/vt/vtorc/inst/audit_dao.go +++ b/go/vt/vtorc/inst/audit_dao.go @@ -22,7 +22,6 @@ import ( "os" "time" - "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" "github.com/rcrowley/go-metrics" @@ -50,14 +49,11 @@ func EnableAuditSyslog() (err error) { } // AuditOperation creates and writes a new audit entry by given params -func AuditOperation(auditType string, instanceKey *InstanceKey, message string) error { - if instanceKey == nil { - instanceKey = &InstanceKey{} - } +func AuditOperation(auditType string, tabletAlias string, message string) error { keyspace := "" shard := "" - if instanceKey.Hostname != "" { - keyspace, shard, _ = GetKeyspaceShardName(instanceKey) + if tabletAlias != "" { + keyspace, shard, _ = GetKeyspaceShardName(tabletAlias) } auditWrittenToFile := false @@ -71,7 +67,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) } defer f.Close() - text := fmt.Sprintf("%s\t%s\t%s\t%d\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, instanceKey.Hostname, instanceKey.Port, keyspace, shard, message) + text := fmt.Sprintf("%s\t%s\t%s\t[%s:%s]\t%s\t\n", time.Now().Format("2006-01-02 15:04:05"), auditType, tabletAlias, keyspace, shard, message) if _, err = f.WriteString(text); err != nil { log.Error(err) } @@ -81,14 +77,13 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) _, err := db.ExecVTOrc(` insert into audit ( - audit_timestamp, audit_type, hostname, port, keyspace, shard, message + audit_timestamp, audit_type, alias, keyspace, shard, message ) VALUES ( - NOW(), ?, ?, ?, ?, ?, ? + NOW(), ?, ?, ?, ?, ? ) `, auditType, - instanceKey.Hostname, - instanceKey.Port, + tabletAlias, keyspace, shard, message, @@ -98,7 +93,7 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) return err } } - logMessage := fmt.Sprintf("auditType:%s instance:%s keyspace:%s shard:%s message:%s", auditType, instanceKey.DisplayString(), keyspace, shard, message) + logMessage := fmt.Sprintf("auditType:%s alias:%s keyspace:%s shard:%s message:%s", auditType, tabletAlias, keyspace, shard, message) if syslogWriter != nil { auditWrittenToFile = true go func() { @@ -113,52 +108,6 @@ func AuditOperation(auditType string, instanceKey *InstanceKey, message string) return nil } -// ReadRecentAudit returns a list of audit entries order chronologically descending, using page number. -func ReadRecentAudit(instanceKey *InstanceKey, page int) ([]Audit, error) { - res := []Audit{} - args := sqlutils.Args() - whereCondition := `` - if instanceKey != nil { - whereCondition = `where hostname=? and port=?` - args = append(args, instanceKey.Hostname, instanceKey.Port) - } - query := fmt.Sprintf(` - select - audit_id, - audit_timestamp, - audit_type, - hostname, - port, - message - from - audit - %s - order by - audit_timestamp desc - limit ? - offset ? - `, whereCondition) - args = append(args, config.AuditPageSize, page*config.AuditPageSize) - err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - audit := Audit{} - audit.AuditID = m.GetInt64("audit_id") - audit.AuditTimestamp = m.GetString("audit_timestamp") - audit.AuditType = m.GetString("audit_type") - audit.AuditInstanceKey.Hostname = m.GetString("hostname") - audit.AuditInstanceKey.Port = m.GetInt("port") - audit.Message = m.GetString("message") - - res = append(res, audit) - return nil - }) - - if err != nil { - log.Error(err) - } - return res, err - -} - // ExpireAudit removes old rows from the audit table func ExpireAudit() error { return ExpireTableData("audit", "audit_timestamp") diff --git a/go/vt/vtorc/inst/audit_dao_test.go b/go/vt/vtorc/inst/audit_dao_test.go index 4a6533077c2..1d50de4c146 100644 --- a/go/vt/vtorc/inst/audit_dao_test.go +++ b/go/vt/vtorc/inst/audit_dao_test.go @@ -17,18 +17,22 @@ limitations under the License. package inst import ( + "fmt" "os" "testing" "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" ) // TestAuditOperation tests that auditing a operation works as intended based on the configurations. +// This test also verifies that we are able to read the recent audits that are written to the databaes. func TestAuditOperation(t *testing.T) { // Restore original configurations originalAuditSysLog := config.Config.AuditToSyslog @@ -69,32 +73,39 @@ func TestAuditOperation(t *testing.T) { err = SaveTablet(tab100) require.NoError(t, err) - instance100 := &InstanceKey{ - Hostname: hostname, - Port: int(port), - } + tab100Alias := topoproto.TabletAliasString(tab100.Alias) auditType := "test-audit-operation" message := "test-message" - t.Run("Audit to backend", func(t *testing.T) { + t.Run("audit to backend", func(t *testing.T) { config.Config.AuditLogFile = "" config.Config.AuditToSyslog = false config.Config.AuditToBackendDB = true // Auditing should succeed as expected - err = AuditOperation(auditType, instance100, message) + err = AuditOperation(auditType, tab100Alias, message) + require.NoError(t, err) + + // Check that we can read the recent audits + audits, err := readRecentAudit(tab100Alias, 0) require.NoError(t, err) + require.Len(t, audits, 1) + require.EqualValues(t, 1, audits[0].AuditID) + require.EqualValues(t, auditType, audits[0].AuditType) + require.EqualValues(t, message, audits[0].Message) + require.EqualValues(t, tab100Alias, audits[0].AuditTabletAlias) - audits, err := ReadRecentAudit(instance100, 0) + // Check the same for no-filtering + audits, err = readRecentAudit("", 0) require.NoError(t, err) require.Len(t, audits, 1) require.EqualValues(t, 1, audits[0].AuditID) require.EqualValues(t, auditType, audits[0].AuditType) require.EqualValues(t, message, audits[0].Message) - require.EqualValues(t, *instance100, audits[0].AuditInstanceKey) + require.EqualValues(t, tab100Alias, audits[0].AuditTabletAlias) }) - t.Run("Audit to File", func(t *testing.T) { + t.Run("audit to File", func(t *testing.T) { config.Config.AuditToBackendDB = false config.Config.AuditToSyslog = false @@ -103,7 +114,7 @@ func TestAuditOperation(t *testing.T) { defer os.Remove(file.Name()) config.Config.AuditLogFile = file.Name() - err = AuditOperation(auditType, instance100, message) + err = AuditOperation(auditType, tab100Alias, message) require.NoError(t, err) // Give a little time for the write to succeed since it happens in a separate go-routine @@ -112,6 +123,54 @@ func TestAuditOperation(t *testing.T) { time.Sleep(100 * time.Millisecond) fileContent, err := os.ReadFile(file.Name()) require.NoError(t, err) - require.Contains(t, string(fileContent), "\ttest-audit-operation\tlocalhost\t100\t[ks:0]\ttest-message") + require.Contains(t, string(fileContent), "\ttest-audit-operation\tzone-1-0000000100\t[ks:0]\ttest-message") + }) +} + +// audit presents a single audit entry (namely in the database) +type audit struct { + AuditID int64 + AuditTimestamp string + AuditType string + AuditTabletAlias string + Message string +} + +// readRecentAudit returns a list of audit entries order chronologically descending, using page number. +func readRecentAudit(tabletAlias string, page int) ([]audit, error) { + res := []audit{} + var args []any + whereCondition := `` + if tabletAlias != "" { + whereCondition = `where alias=?` + args = append(args, tabletAlias) + } + query := fmt.Sprintf(` + select + audit_id, + audit_timestamp, + audit_type, + alias, + message + from + audit + %s + order by + audit_timestamp desc + limit ? + offset ? + `, whereCondition) + args = append(args, config.AuditPageSize, page*config.AuditPageSize) + err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { + a := audit{} + a.AuditID = m.GetInt64("audit_id") + a.AuditTimestamp = m.GetString("audit_timestamp") + a.AuditType = m.GetString("audit_type") + a.AuditTabletAlias = m.GetString("alias") + a.Message = m.GetString("message") + + res = append(res, a) + return nil }) + return res, err } diff --git a/go/vt/vtorc/inst/candidate_database_instance.go b/go/vt/vtorc/inst/candidate_database_instance.go deleted file mode 100644 index 5cd4b5c6a0b..00000000000 --- a/go/vt/vtorc/inst/candidate_database_instance.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright 2016 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// CandidateDatabaseInstance contains information about explicit promotion rules for an instance -type CandidateDatabaseInstance struct { - Hostname string - Port int - PromotionRule promotionrule.CandidatePromotionRule - LastSuggestedString string - PromotionRuleExpiry string // generated when retrieved from database for consistency reasons -} - -func NewCandidateDatabaseInstance(instanceKey *InstanceKey, promotionRule promotionrule.CandidatePromotionRule) *CandidateDatabaseInstance { - return &CandidateDatabaseInstance{ - Hostname: instanceKey.Hostname, - Port: instanceKey.Port, - PromotionRule: promotionRule, - } -} - -func (cdi *CandidateDatabaseInstance) WithCurrentTime() *CandidateDatabaseInstance { - cdi.LastSuggestedString, _ = db.ReadTimeNow() - return cdi -} - -// String returns a string representation of the CandidateDatabaseInstance struct -func (cdi *CandidateDatabaseInstance) String() string { - return fmt.Sprintf("%s:%d %s", cdi.Hostname, cdi.Port, cdi.PromotionRule) -} - -// Key returns an instance key representing this candidate -func (cdi *CandidateDatabaseInstance) Key() *InstanceKey { - return &InstanceKey{Hostname: cdi.Hostname, Port: cdi.Port} -} diff --git a/go/vt/vtorc/inst/candidate_database_instance_dao.go b/go/vt/vtorc/inst/candidate_database_instance_dao.go deleted file mode 100644 index 95bbb53f617..00000000000 --- a/go/vt/vtorc/inst/candidate_database_instance_dao.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2016 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// RegisterCandidateInstance markes a given instance as suggested for succeeding a primary in the event of failover. -func RegisterCandidateInstance(candidate *CandidateDatabaseInstance) error { - if candidate.LastSuggestedString == "" { - candidate = candidate.WithCurrentTime() - } - args := sqlutils.Args(candidate.Hostname, candidate.Port, string(candidate.PromotionRule), candidate.LastSuggestedString) - - query := ` - insert into candidate_database_instance ( - hostname, - port, - promotion_rule, - last_suggested - ) values ( - ?, ?, ?, ? - ) on duplicate key update - last_suggested=values(last_suggested), - promotion_rule=values(promotion_rule) - ` - writeFunc := func() error { - _, err := db.ExecVTOrc(query, args...) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} - -// ExpireCandidateInstances removes stale primary candidate suggestions. -func ExpireCandidateInstances() error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - delete from candidate_database_instance - where last_suggested < NOW() - INTERVAL ? MINUTE - `, config.CandidateInstanceExpireMinutes, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} diff --git a/go/vt/vtorc/inst/downtime.go b/go/vt/vtorc/inst/downtime.go deleted file mode 100644 index 7110df1e60b..00000000000 --- a/go/vt/vtorc/inst/downtime.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright 2017 Shlomi Noach, GitHub Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "time" -) - -type Downtime struct { - Key *InstanceKey - Owner string - Reason string - Duration time.Duration - BeginsAt time.Time - EndsAt time.Time - BeginsAtString string - EndsAtString string -} - -func NewDowntime(instanceKey *InstanceKey, owner string, reason string, duration time.Duration) *Downtime { - downtime := &Downtime{ - Key: instanceKey, - Owner: owner, - Reason: reason, - Duration: duration, - BeginsAt: time.Now(), - } - downtime.EndsAt = downtime.BeginsAt.Add(downtime.Duration) - return downtime -} - -func (downtime *Downtime) Ended() bool { - return downtime.EndsAt.Before(time.Now()) -} - -func (downtime *Downtime) EndsIn() time.Duration { - return time.Until(downtime.EndsAt) -} diff --git a/go/vt/vtorc/inst/downtime_dao.go b/go/vt/vtorc/inst/downtime_dao.go deleted file mode 100644 index 53b12e325e8..00000000000 --- a/go/vt/vtorc/inst/downtime_dao.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "time" - - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// BeginDowntime will make mark an instance as downtimed (or override existing downtime period) -func BeginDowntime(downtime *Downtime) (err error) { - if downtime.Duration == 0 { - downtime.Duration = config.MaintenanceExpireMinutes * time.Minute - } - if downtime.EndsAtString != "" { - _, err = db.ExecVTOrc(` - insert - into database_instance_downtime ( - hostname, port, downtime_active, begin_timestamp, end_timestamp, owner, reason - ) VALUES ( - ?, ?, 1, ?, ?, ?, ? - ) - on duplicate key update - downtime_active=values(downtime_active), - begin_timestamp=values(begin_timestamp), - end_timestamp=values(end_timestamp), - owner=values(owner), - reason=values(reason) - `, - downtime.Key.Hostname, - downtime.Key.Port, - downtime.BeginsAtString, - downtime.EndsAtString, - downtime.Owner, - downtime.Reason, - ) - } else { - if downtime.Ended() { - // No point in writing it down; it's expired - return nil - } - - _, err = db.ExecVTOrc(` - insert - into database_instance_downtime ( - hostname, port, downtime_active, begin_timestamp, end_timestamp, owner, reason - ) VALUES ( - ?, ?, 1, NOW(), NOW() + INTERVAL ? SECOND, ?, ? - ) - on duplicate key update - downtime_active=values(downtime_active), - begin_timestamp=values(begin_timestamp), - end_timestamp=values(end_timestamp), - owner=values(owner), - reason=values(reason) - `, - downtime.Key.Hostname, - downtime.Key.Port, - int(downtime.EndsIn().Seconds()), - downtime.Owner, - downtime.Reason, - ) - } - if err != nil { - log.Error(err) - return err - } - _ = AuditOperation("begin-downtime", downtime.Key, fmt.Sprintf("owner: %s, reason: %s", downtime.Owner, downtime.Reason)) - - return nil -} - -// EndDowntime will remove downtime flag from an instance -func EndDowntime(instanceKey *InstanceKey) (wasDowntimed bool, err error) { - res, err := db.ExecVTOrc(` - delete from - database_instance_downtime - where - hostname = ? - and port = ? - `, - instanceKey.Hostname, - instanceKey.Port, - ) - if err != nil { - log.Error(err) - return wasDowntimed, err - } - - if affected, _ := res.RowsAffected(); affected > 0 { - wasDowntimed = true - _ = AuditOperation("end-downtime", instanceKey, "") - } - return wasDowntimed, err -} - -// renewLostInRecoveryDowntime renews hosts who are downtimed due to being lost in recovery, such that -// their downtime never expires. -func renewLostInRecoveryDowntime() error { - _, err := db.ExecVTOrc(` - update - database_instance_downtime - set - end_timestamp = NOW() + INTERVAL ? SECOND - where - end_timestamp > NOW() - and reason = ? - `, - config.LostInRecoveryDowntimeSeconds, - DowntimeLostInRecoveryMessage, - ) - - return err -} - -// expireLostInRecoveryDowntime expires downtime for servers who have been lost in recovery in the last, -// but are now replicating. -func expireLostInRecoveryDowntime() error { - instances, err := ReadLostInRecoveryInstances("", "") - if err != nil { - return err - } - if len(instances) == 0 { - return nil - } - for _, instance := range instances { - // We _may_ expire this downtime, but only after a minute - // This is a graceful period, during which other servers can claim ownership of the alias, - // or can update their own cluster name to match a new primary's name - if instance.ElapsedDowntime < time.Minute { - continue - } - if !instance.IsLastCheckValid { - continue - } - if instance.ReplicaRunning() { - // back, alive, replicating in some topology - if _, err := EndDowntime(&instance.Key); err != nil { - return err - } - } - } - return nil -} - -// ExpireDowntime will remove the maintenance flag on old downtimes -func ExpireDowntime() error { - if err := renewLostInRecoveryDowntime(); err != nil { - log.Error(err) - return err - } - if err := expireLostInRecoveryDowntime(); err != nil { - log.Error(err) - return err - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_downtime - where - end_timestamp < NOW() - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-downtime", nil, fmt.Sprintf("Expired %d entries", rowsAffected)) - } - } - - return nil -} diff --git a/go/vt/vtorc/inst/durability.go b/go/vt/vtorc/inst/durability.go deleted file mode 100644 index 272fa838af8..00000000000 --- a/go/vt/vtorc/inst/durability.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package inst - -import ( - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" -) - -// IsReplicaSemiSync returns the replica semi-sync setting for the instance. -func IsReplicaSemiSync[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, primaryInstance V, replicaInstance V) bool { - primary, err := getTablet(primaryInstance) - if err != nil { - return false - } - replica, err := getTablet(replicaInstance) - if err != nil { - return false - } - return reparentutil.IsReplicaSemiSync(durabilityPolicy, primary, replica) -} - -// SemiSyncAckers returns the primary semi-sync setting for the instance. -// 0 means none. Non-zero specifies the number of required ackers. -func SemiSyncAckers[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, instance V) int { - primary, err := getTablet(instance) - if err != nil { - return 0 - } - return reparentutil.SemiSyncAckers(durabilityPolicy, primary) -} - -// PromotionRule returns the promotion rule for the instance. -func PromotionRule[V InstanceKey | *topodatapb.Tablet](durabilityPolicy reparentutil.Durabler, instance V) promotionrule.CandidatePromotionRule { - tablet, err := getTablet(instance) - if err != nil { - return promotionrule.MustNot - } - return reparentutil.PromotionRule(durabilityPolicy, tablet) -} - -func getTablet[V InstanceKey | *topodatapb.Tablet](instance V) (*topodatapb.Tablet, error) { - var instanceTablet *topodatapb.Tablet - var err error - switch node := any(instance).(type) { - case InstanceKey: - instanceTablet, err = ReadTablet(node) - if err != nil { - return nil, err - } - case *topodatapb.Tablet: - instanceTablet = node - } - return instanceTablet, nil -} - -// GetDurabilityPolicy gets the durability policy for the keyspace of the given instance -func GetDurabilityPolicy[V InstanceKey | *topodatapb.Tablet](instance V) (reparentutil.Durabler, error) { - tablet, err := getTablet(instance) - if err != nil { - return nil, err - } - ki, err := ReadKeyspace(tablet.Keyspace) - if err != nil { - return nil, err - } - return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) -} diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go index dd1526ff090..1216d4c24ae 100644 --- a/go/vt/vtorc/inst/instance.go +++ b/go/vt/vtorc/inst/instance.go @@ -21,16 +21,13 @@ import ( "encoding/json" "strings" "time" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" ) -const ReasonableDiscoveryLatency = 500 * time.Millisecond - // Instance represents a database instance, including its current configuration & status. // It presents important replication configuration and detailed replication status. type Instance struct { - Key InstanceKey + Hostname string + Port int InstanceAlias string ServerID uint ServerUUID string @@ -43,10 +40,10 @@ type Instance struct { LogBinEnabled bool LogReplicationUpdatesEnabled bool SelfBinlogCoordinates BinlogCoordinates - SourceKey InstanceKey + SourceHost string + SourcePort int SourceUUID string AncestryUUID string - IsDetachedPrimary bool ReplicationSQLThreadRuning bool ReplicationIOThreadRuning bool @@ -95,50 +92,18 @@ type Instance struct { IsRecentlyChecked bool SecondsSinceLastSeen sql.NullInt64 - // Careful. IsCandidate and PromotionRule are used together - // and probably need to be merged. IsCandidate's value may - // be picked up from daabase_candidate_instance's value when - // reading an instance from the db. - IsCandidate bool - PromotionRule promotionrule.CandidatePromotionRule - IsDowntimed bool - DowntimeReason string - DowntimeOwner string - DowntimeEndTimestamp string - ElapsedDowntime time.Duration - UnresolvedHostname string - AllowTLS bool + AllowTLS bool Problems []string LastDiscoveryLatency time.Duration - - seed bool // Means we force this instance to be written to backend, even if it's invalid, empty or forgotten - - /* All things Group Replication below */ - - // Group replication global variables - ReplicationGroupName string - ReplicationGroupIsSinglePrimary bool - - // Replication group members information. See - // https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for details. - ReplicationGroupMemberState string - ReplicationGroupMemberRole string - - // List of all known members of the same group - ReplicationGroupMembers InstanceKeyMap - - // Primary of the replication group - ReplicationGroupPrimaryInstanceKey InstanceKey } // NewInstance creates a new, empty instance func NewInstance() *Instance { return &Instance{ - ReplicationGroupMembers: make(map[InstanceKey]bool), - Problems: []string{}, + Problems: []string{}, } } @@ -154,7 +119,7 @@ func (instance *Instance) MarshalJSON() ([]byte, error) { // Equals tests that this instance is the same instance as other. The function does not test // configuration or status. func (instance *Instance) Equals(other *Instance) bool { - return instance.Key == other.Key + return instance.InstanceAlias == other.InstanceAlias } // MajorVersion returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") @@ -162,48 +127,11 @@ func (instance *Instance) MajorVersion() []string { return MajorVersion(instance.Version) } -// MajorVersion returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") +// MajorVersionString returns this instance's major version number (e.g. for 5.5.36 it returns "5.5") func (instance *Instance) MajorVersionString() string { return strings.Join(instance.MajorVersion(), ".") } -func (instance *Instance) IsMySQL51() bool { - return instance.MajorVersionString() == "5.1" -} - -func (instance *Instance) IsMySQL55() bool { - return instance.MajorVersionString() == "5.5" -} - -func (instance *Instance) IsMySQL56() bool { - return instance.MajorVersionString() == "5.6" -} - -func (instance *Instance) IsMySQL57() bool { - return instance.MajorVersionString() == "5.7" -} - -func (instance *Instance) IsMySQL80() bool { - return instance.MajorVersionString() == "8.0" -} - -// IsSmallerBinlogFormat returns true when this instance's binlgo format is -// "smaller" than the other's, i.e. binary logs cannot flow from the other instance to this one -func (instance *Instance) IsSmallerBinlogFormat(other *Instance) bool { - return IsSmallerBinlogFormat(instance.BinlogFormat, other.BinlogFormat) -} - -// IsSmallerMajorVersion tests this instance against another and returns true if this instance is of a smaller "major" varsion. -// e.g. 5.5.36 is NOT a smaller major version as comapred to 5.5.36, but IS as compared to 5.6.9 -func (instance *Instance) IsSmallerMajorVersion(other *Instance) bool { - return IsSmallerMajorVersion(instance.Version, other.Version) -} - -// IsSmallerMajorVersionByString checks if this instance has a smaller major version number than given one -func (instance *Instance) IsSmallerMajorVersionByString(otherVersion string) bool { - return IsSmallerMajorVersion(instance.Version, otherVersion) -} - // IsMariaDB checks whether this is any version of MariaDB func (instance *Instance) IsMariaDB() bool { return strings.Contains(instance.Version, "MariaDB") @@ -214,26 +142,6 @@ func (instance *Instance) IsPercona() bool { return strings.Contains(instance.VersionComment, "Percona") } -// isNDB check whether this is NDB Cluster (aka MySQL Cluster) -func (instance *Instance) IsNDB() bool { - return strings.Contains(instance.Version, "-ndb-") -} - -// IsReplicationGroup checks whether the host thinks it is part of a known replication group. Notice that this might -// return True even if the group has decided to expel the member represented by this instance, as the instance might not -// know that under certain circumstances -func (instance *Instance) IsReplicationGroupMember() bool { - return instance.ReplicationGroupName != "" -} - -func (instance *Instance) IsReplicationGroupPrimary() bool { - return instance.IsReplicationGroupMember() && instance.ReplicationGroupPrimaryInstanceKey.Equals(&instance.Key) -} - -func (instance *Instance) IsReplicationGroupSecondary() bool { - return instance.IsReplicationGroupMember() && !instance.ReplicationGroupPrimaryInstanceKey.Equals(&instance.Key) -} - // IsBinlogServer checks whether this is any type of a binlog server func (instance *Instance) IsBinlogServer() bool { return false @@ -253,13 +161,6 @@ func (instance *Instance) IsOracleMySQL() bool { return true } -func (instance *Instance) SetSeed() { - instance.seed = true -} -func (instance *Instance) IsSeed() bool { - return instance.seed -} - // applyFlavorName func (instance *Instance) applyFlavorName() { if instance == nil { @@ -288,27 +189,13 @@ func (instance *Instance) FlavorNameAndMajorVersion() string { // IsReplica makes simple heuristics to decide whether this instance is a replica of another instance func (instance *Instance) IsReplica() bool { - return instance.SourceKey.Hostname != "" && instance.SourceKey.Hostname != "_" && instance.SourceKey.Port != 0 && (instance.ReadBinlogCoordinates.LogFile != "" || instance.UsingGTID()) + return instance.SourceHost != "" && instance.SourceHost != "_" && instance.SourcePort != 0 && (instance.ReadBinlogCoordinates.LogFile != "" || instance.UsingGTID()) } // IsPrimary makes simple heuristics to decide whether this instance is a primary (not replicating from any other server), // either via traditional async/semisync replication or group replication func (instance *Instance) IsPrimary() bool { - // If traditional replication is configured, it is for sure not a primary - if instance.IsReplica() { - return false - } - // If traditional replication is not configured, and it is also not part of a replication group, this host is - // a primary - if !instance.IsReplicationGroupMember() { - return true - } - // If traditional replication is not configured, and this host is part of a group, it is only considered a - // primary if it has the role of group Primary. Otherwise it is not a primary. - if instance.ReplicationGroupMemberRole == GroupReplicationMemberRolePrimary { - return true - } - return false + return !instance.IsReplica() } // ReplicaRunning returns true when this instance's status is of a replicating replica. @@ -335,8 +222,3 @@ func (instance *Instance) SQLThreadUpToDate() bool { func (instance *Instance) UsingGTID() bool { return instance.UsingOracleGTID || instance.UsingMariaDBGTID } - -// AddGroupMemberKey adds a group member to the list of this instance's group members. -func (instance *Instance) AddGroupMemberKey(groupMemberKey *InstanceKey) { - instance.ReplicationGroupMembers.AddKey(*groupMemberKey) -} diff --git a/go/vt/vtorc/inst/instance_binlog.go b/go/vt/vtorc/inst/instance_binlog.go deleted file mode 100644 index 201c1d29c5c..00000000000 --- a/go/vt/vtorc/inst/instance_binlog.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "errors" - "regexp" - - "vitess.io/vitess/go/vt/log" -) - -// Event entries may contains table IDs (can be different for same tables on different servers) -// and also COMMIT transaction IDs (different values on different servers). -// So these need to be removed from the event entry if we're to compare and validate matching -// entries. -var eventInfoTransformations = map[*regexp.Regexp]string{ - regexp.MustCompile(`(.*) [/][*].*?[*][/](.*$)`): "$1 $2", // strip comments - regexp.MustCompile(`(COMMIT) .*$`): "$1", // commit number varies cross servers - regexp.MustCompile(`(table_id:) [0-9]+ (.*$)`): "$1 ### $2", // table ids change cross servers - regexp.MustCompile(`(table_id:) [0-9]+$`): "$1 ###", // table ids change cross servers - regexp.MustCompile(` X'([0-9a-fA-F]+)' COLLATE`): " 0x$1 COLLATE", // different ways to represent collate - regexp.MustCompile(`(BEGIN GTID [^ ]+) cid=.*`): "$1", // MariaDB GTID someimtes gets addition of "cid=...". Stripping -} - -type BinlogEvent struct { - Coordinates BinlogCoordinates - NextEventPos uint32 - EventType string - Info string -} - -func (binlogEvent *BinlogEvent) NextBinlogCoordinates() BinlogCoordinates { - return BinlogCoordinates{LogFile: binlogEvent.Coordinates.LogFile, LogPos: binlogEvent.NextEventPos, Type: binlogEvent.Coordinates.Type} -} - -func (binlogEvent *BinlogEvent) NormalizeInfo() { - for reg, replace := range eventInfoTransformations { - binlogEvent.Info = reg.ReplaceAllString(binlogEvent.Info, replace) - } -} - -func (binlogEvent *BinlogEvent) Equals(other *BinlogEvent) bool { - return binlogEvent.Coordinates.Equals(&other.Coordinates) && - binlogEvent.NextEventPos == other.NextEventPos && - binlogEvent.EventType == other.EventType && binlogEvent.Info == other.Info -} - -func (binlogEvent *BinlogEvent) EqualsIgnoreCoordinates(other *BinlogEvent) bool { - return binlogEvent.NextEventPos == other.NextEventPos && - binlogEvent.EventType == other.EventType && binlogEvent.Info == other.Info -} - -const maxEmptyEventsEvents int = 10 - -type BinlogEventCursor struct { - cachedEvents []BinlogEvent - currentEventIndex int - fetchNextEvents func(BinlogCoordinates) ([]BinlogEvent, error) - nextCoordinates BinlogCoordinates -} - -// nextEvent will return the next event entry from binary logs; it will automatically skip to next -// binary log if need be. -// Internally, it uses the cachedEvents array, so that it does not go to the MySQL server upon each call. -// Returns nil upon reaching end of binary logs. -func (binlogEventCursor *BinlogEventCursor) nextEvent(numEmptyEventsEvents int) (*BinlogEvent, error) { - if numEmptyEventsEvents > maxEmptyEventsEvents { - log.Infof("End of logs. currentEventIndex: %d, nextCoordinates: %+v", binlogEventCursor.currentEventIndex, binlogEventCursor.nextCoordinates) - // End of logs - return nil, nil - } - if len(binlogEventCursor.cachedEvents) == 0 { - // Cache exhausted; get next bulk of entries and return the next entry - nextFileCoordinates, err := binlogEventCursor.nextCoordinates.NextFileCoordinates() - if err != nil { - return nil, err - } - log.Infof("zero cached events, next file: %+v", nextFileCoordinates) - binlogEventCursor.cachedEvents, err = binlogEventCursor.fetchNextEvents(nextFileCoordinates) - if err != nil { - return nil, err - } - binlogEventCursor.currentEventIndex = -1 - // While this seems recursive do note that recursion level is at most 1, since we either have - // entries in the next binlog (no further recursion) or we don't (immediate termination) - return binlogEventCursor.nextEvent(numEmptyEventsEvents + 1) - } - if binlogEventCursor.currentEventIndex+1 < len(binlogEventCursor.cachedEvents) { - // We have enough cache to go by - binlogEventCursor.currentEventIndex++ - event := &binlogEventCursor.cachedEvents[binlogEventCursor.currentEventIndex] - binlogEventCursor.nextCoordinates = event.NextBinlogCoordinates() - return event, nil - } - // Cache exhausted; get next bulk of entries and return the next entry - var err error - binlogEventCursor.cachedEvents, err = binlogEventCursor.fetchNextEvents(binlogEventCursor.cachedEvents[len(binlogEventCursor.cachedEvents)-1].NextBinlogCoordinates()) - if err != nil { - return nil, err - } - binlogEventCursor.currentEventIndex = -1 - // While this seems recursive do note that recursion level is at most 1, since we either have - // entries in the next binlog (no further recursion) or we don't (immediate termination) - return binlogEventCursor.nextEvent(numEmptyEventsEvents + 1) -} - -// NextCoordinates return the binlog coordinates of the next entry as yet unprocessed by the cursor. -// Moreover, when the cursor terminates (consumes last entry), these coordinates indicate what will be the futuristic -// coordinates of the next binlog entry. -// The value of this function is used by match-below to move a replica behind another, after exhausting the shared binlog -// entries of both. -func (binlogEventCursor *BinlogEventCursor) getNextCoordinates() (BinlogCoordinates, error) { - if binlogEventCursor.nextCoordinates.LogPos == 0 { - return binlogEventCursor.nextCoordinates, errors.New("Next coordinates unfound") - } - return binlogEventCursor.nextCoordinates, nil -} diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index ec175e8b455..211ddce69b1 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -25,28 +25,27 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/patrickmn/go-cache" "github.com/rcrowley/go-metrics" "github.com/sjmudd/stopwatch" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - vitessmysql "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" + "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" - replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vtorc/collection" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/metrics/query" "vitess.io/vitess/go/vt/vtorc/util" - math "vitess.io/vitess/go/vt/vtorc/util" + + replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( @@ -56,21 +55,7 @@ const ( var instanceReadChan = make(chan bool, backendDBConcurrency) var instanceWriteChan = make(chan bool, backendDBConcurrency) -// Constant strings for Group Replication information -// See https://dev.mysql.com/doc/refman/8.0/en/replication-group-members-table.html for additional information. -const ( - // Group member roles - GroupReplicationMemberRolePrimary = "PRIMARY" - GroupReplicationMemberRoleSecondary = "SECONDARY" - // Group member states - GroupReplicationMemberStateOnline = "ONLINE" - GroupReplicationMemberStateRecovering = "RECOVERING" - GroupReplicationMemberStateUnreachable = "UNREACHABLE" - GroupReplicationMemberStateOffline = "OFFLINE" - GroupReplicationMemberStateError = "ERROR" -) - -var forgetInstanceKeys *cache.Cache +var forgetAliases *cache.Cache var accessDeniedCounter = metrics.NewCounter() var readTopologyInstanceCounter = metrics.NewCounter() @@ -80,6 +65,7 @@ var backendWrites = collection.CreateOrReturnCollection("BACKEND_WRITES") var writeBufferLatency = stopwatch.NewNamedStopwatch() var emptyQuotesRegexp = regexp.MustCompile(`^""$`) +var cacheInitializationCompleted atomic.Bool func init() { _ = metrics.Register("instance.access_denied", accessDeniedCounter) @@ -94,7 +80,8 @@ func init() { func initializeInstanceDao() { config.WaitForConfigurationToBeLoaded() - forgetInstanceKeys = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second) + forgetAliases = cache.New(time.Duration(config.Config.InstancePollSeconds*3)*time.Second, time.Second) + cacheInitializationCompleted.Store(true) } // ExecDBWriteFunc chooses how to execute a write onto the database: whether synchronuously or not @@ -137,19 +124,19 @@ func ExpireTableData(tableName string, timestampColumn string) error { // logReadTopologyInstanceError logs an error, if applicable, for a ReadTopologyInstance operation, // providing context and hint as for the source of the error. If there's no hint just provide the // original error. -func logReadTopologyInstanceError(instanceKey *InstanceKey, hint string, err error) error { +func logReadTopologyInstanceError(tabletAlias string, hint string, err error) error { if err == nil { return nil } - if !util.ClearToLog("ReadTopologyInstance", instanceKey.StringCode()) { + if !util.ClearToLog("ReadTopologyInstance", tabletAlias) { return err } var msg string if hint == "" { - msg = fmt.Sprintf("ReadTopologyInstance(%+v): %+v", *instanceKey, err) + msg = fmt.Sprintf("ReadTopologyInstance(%+v): %+v", tabletAlias, err) } else { msg = fmt.Sprintf("ReadTopologyInstance(%+v) %+v: %+v", - *instanceKey, + tabletAlias, strings.Replace(hint, "%", "%%", -1), // escape % err) } @@ -157,11 +144,19 @@ func logReadTopologyInstanceError(instanceKey *InstanceKey, hint string, err err return fmt.Errorf(msg) } +// RegisterStats registers stats from the inst package +func RegisterStats() { + stats.NewGaugeFunc("ErrantGtidTabletCount", "Number of tablets with errant GTIDs", func() int64 { + instances, _ := ReadInstancesWithErrantGTIds("", "") + return int64(len(instances)) + }) +} + // ReadTopologyInstance collects information on the state of a MySQL // server and writes the result synchronously to the vtorc // backend. -func ReadTopologyInstance(instanceKey *InstanceKey) (*Instance, error) { - return ReadTopologyInstanceBufferable(instanceKey, nil) +func ReadTopologyInstance(tabletAlias string) (*Instance, error) { + return ReadTopologyInstanceBufferable(tabletAlias, nil) } // ReadTopologyInstanceBufferable connects to a topology MySQL instance @@ -169,43 +164,35 @@ func ReadTopologyInstance(instanceKey *InstanceKey) (*Instance, error) { // It writes the information retrieved into vtorc's backend. // - writes are optionally buffered. // - timing information can be collected for the stages performed. -func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch.NamedStopwatch) (inst *Instance, err error) { +func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.NamedStopwatch) (inst *Instance, err error) { defer func() { if r := recover(); r != nil { - err = logReadTopologyInstanceError(instanceKey, "Unexpected, aborting", tb.Errorf("%+v", r)) + err = logReadTopologyInstanceError(tabletAlias, "Unexpected, aborting", tb.Errorf("%+v", r)) } }() var waitGroup sync.WaitGroup var tablet *topodatapb.Tablet - var durability reparentutil.Durabler var fullStatus *replicationdatapb.FullStatus readingStartTime := time.Now() instance := NewInstance() instanceFound := false partialSuccess := false - resolvedHostname := "" errorChan := make(chan error, 32) - var resolveErr error - if !instanceKey.IsValid() { - latency.Start("backend") - if err := UpdateInstanceLastAttemptedCheck(instanceKey); err != nil { - log.Errorf("ReadTopologyInstanceBufferable: %+v: %v", instanceKey, err) - } - latency.Stop("backend") - return instance, fmt.Errorf("ReadTopologyInstance will not act on invalid instance key: %+v", *instanceKey) + if tabletAlias == "" { + return instance, fmt.Errorf("ReadTopologyInstance will not act on empty tablet alias") } lastAttemptedCheckTimer := time.AfterFunc(time.Second, func() { go func() { - _ = UpdateInstanceLastAttemptedCheck(instanceKey) + _ = UpdateInstanceLastAttemptedCheck(tabletAlias) }() }) latency.Start("instance") - tablet, err = ReadTablet(*instanceKey) + tablet, err = ReadTablet(tabletAlias) if err != nil { goto Cleanup } @@ -216,18 +203,14 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch goto Cleanup } - durability, err = GetDurabilityPolicy(tablet) - if err != nil { - goto Cleanup - } - - fullStatus, err = FullStatus(*instanceKey) + fullStatus, err = FullStatus(tabletAlias) if err != nil { goto Cleanup } partialSuccess = true // We at least managed to read something from the server. - instance.Key = *instanceKey + instance.Hostname = tablet.MysqlHostname + instance.Port = int(tablet.MysqlPort) { // We begin with a few operations we can run concurrently, and which do not depend on anything instance.ServerID = uint(fullStatus.ServerId) @@ -237,7 +220,6 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.BinlogFormat = fullStatus.BinlogFormat instance.LogReplicationUpdatesEnabled = fullStatus.LogReplicaUpdates instance.VersionComment = fullStatus.VersionComment - resolvedHostname = instance.Key.Hostname if instance.LogBinEnabled && fullStatus.PrimaryStatus != nil { binlogPos, err := getBinlogCoordinatesFromPositionString(fullStatus.PrimaryStatus.FilePosition) @@ -254,20 +236,20 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SemiSyncPrimaryStatus = fullStatus.SemiSyncPrimaryStatus instance.SemiSyncReplicaStatus = fullStatus.SemiSyncReplicaStatus - if (instance.IsOracleMySQL() || instance.IsPercona()) && !instance.IsSmallerMajorVersionByString("5.6") { - // Stuff only supported on Oracle MySQL >= 5.6 + if instance.IsOracleMySQL() || instance.IsPercona() { + // Stuff only supported on Oracle / Percona MySQL // ... - // @@gtid_mode only available in Orcale MySQL >= 5.6 + // @@gtid_mode only available in Oracle / Percona MySQL >= 5.6 instance.GTIDMode = fullStatus.GtidMode instance.ServerUUID = fullStatus.ServerUuid if fullStatus.PrimaryStatus != nil { - GtidExecutedPos, err := vitessmysql.DecodePosition(fullStatus.PrimaryStatus.Position) + GtidExecutedPos, err := replication.DecodePosition(fullStatus.PrimaryStatus.Position) errorChan <- err if err == nil && GtidExecutedPos.GTIDSet != nil { instance.ExecutedGtidSet = GtidExecutedPos.GTIDSet.String() } } - GtidPurgedPos, err := vitessmysql.DecodePosition(fullStatus.GtidPurged) + GtidPurgedPos, err := replication.DecodePosition(fullStatus.GtidPurged) errorChan <- err if err == nil && GtidPurgedPos.GTIDSet != nil { instance.GtidPurged = GtidPurgedPos.GTIDSet.String() @@ -279,27 +261,14 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch } } } - if resolvedHostname != instance.Key.Hostname { - latency.Start("backend") - UpdateResolvedHostname(instance.Key.Hostname, resolvedHostname) - latency.Stop("backend") - instance.Key.Hostname = resolvedHostname - } - if instance.Key.Hostname == "" { - err = fmt.Errorf("ReadTopologyInstance: empty hostname (%+v). Bailing out", *instanceKey) - goto Cleanup - } - go func() { - _ = ResolveHostnameIPs(instance.Key.Hostname) - }() instance.ReplicationIOThreadState = ReplicationThreadStateNoThread instance.ReplicationSQLThreadState = ReplicationThreadStateNoThread if fullStatus.ReplicationStatus != nil { instance.HasReplicationCredentials = fullStatus.ReplicationStatus.SourceUser != "" - instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(vitessmysql.ReplicationState(fullStatus.ReplicationStatus.IoState)) - instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(vitessmysql.ReplicationState(fullStatus.ReplicationStatus.SqlState)) + instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.IoState)) + instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.SqlState)) instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() @@ -326,17 +295,8 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SourceUUID = fullStatus.ReplicationStatus.SourceUuid instance.HasReplicationFilters = fullStatus.ReplicationStatus.HasReplicationFilters - primaryHostname := fullStatus.ReplicationStatus.SourceHost - primaryKey, err := NewResolveInstanceKey(primaryHostname, int(fullStatus.ReplicationStatus.SourcePort)) - if err != nil { - _ = logReadTopologyInstanceError(instanceKey, "NewResolveInstanceKey", err) - } - primaryKey.Hostname, resolveErr = ResolveHostname(primaryKey.Hostname) - if resolveErr != nil { - _ = logReadTopologyInstanceError(instanceKey, fmt.Sprintf("ResolveHostname(%q)", primaryKey.Hostname), resolveErr) - } - instance.SourceKey = *primaryKey - instance.IsDetachedPrimary = instance.SourceKey.IsDetached() + instance.SourceHost = fullStatus.ReplicationStatus.SourceHost + instance.SourcePort = int(fullStatus.ReplicationStatus.SourcePort) if fullStatus.ReplicationStatus.ReplicationLagUnknown { instance.SecondsBehindPrimary.Valid = false @@ -345,7 +305,7 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch instance.SecondsBehindPrimary.Int64 = int64(fullStatus.ReplicationStatus.ReplicationLagSeconds) } if instance.SecondsBehindPrimary.Valid && instance.SecondsBehindPrimary.Int64 < 0 { - log.Warningf("Host: %+v, instance.ReplicationLagSeconds < 0 [%+v], correcting to 0", instanceKey, instance.SecondsBehindPrimary.Int64) + log.Warningf("Alias: %+v, instance.SecondsBehindPrimary < 0 [%+v], correcting to 0", tabletAlias, instance.SecondsBehindPrimary.Int64) instance.SecondsBehindPrimary.Int64 = 0 } // And until told otherwise: @@ -368,16 +328,9 @@ func ReadTopologyInstanceBufferable(instanceKey *InstanceKey, latency *stopwatch latency.Start("backend") err = ReadInstanceClusterAttributes(instance) latency.Stop("backend") - _ = logReadTopologyInstanceError(instanceKey, "ReadInstanceClusterAttributes", err) + _ = logReadTopologyInstanceError(tabletAlias, "ReadInstanceClusterAttributes", err) } - // We need to update candidate_database_instance. - // We register the rule even if it hasn't changed, - // to bump the last_suggested time. - instance.PromotionRule = PromotionRule(durability, tablet) - err = RegisterCandidateInstance(NewCandidateDatabaseInstance(instanceKey, instance.PromotionRule).WithCurrentTime()) - _ = logReadTopologyInstanceError(instanceKey, "RegisterCandidateInstance", err) - Cleanup: waitGroup.Wait() close(errorChan) @@ -403,7 +356,6 @@ Cleanup: } // Add replication group ancestry UUID as well. Otherwise, VTOrc thinks there are errant GTIDs in group // members and its replicas, even though they are not. - instance.AncestryUUID = fmt.Sprintf("%s,%s", instance.AncestryUUID, instance.ReplicationGroupName) instance.AncestryUUID = strings.Trim(instance.AncestryUUID, ",") if instance.ExecutedGtidSet != "" && instance.primaryExecutedGtidSet != "" { // Compare primary & replica GTID sets, but ignore the sets that present the primary's UUID. @@ -426,7 +378,7 @@ Cleanup: redactedPrimaryExecutedGtidSet, _ := NewOracleGtidSet(instance.primaryExecutedGtidSet) redactedPrimaryExecutedGtidSet.RemoveUUID(instance.SourceUUID) - instance.GtidErrant, err = vitessmysql.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String()) + instance.GtidErrant, err = replication.Subtract(redactedExecutedGtidSet.String(), redactedPrimaryExecutedGtidSet.String()) } } } @@ -450,7 +402,7 @@ Cleanup: // tried to check the instance. last_attempted_check is also // updated on success by writeInstance. latency.Start("backend") - _ = UpdateInstanceLastChecked(instanceKey, partialSuccess) + _ = UpdateInstanceLastChecked(tabletAlias, partialSuccess) latency.Stop("backend") return nil, err } @@ -461,7 +413,7 @@ func getKeyspaceShardName(keyspace, shard string) string { } func getBinlogCoordinatesFromPositionString(position string) (BinlogCoordinates, error) { - pos, err := vitessmysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil || pos.GTIDSet == nil { return BinlogCoordinates{}, err } @@ -472,41 +424,15 @@ func getBinlogCoordinatesFromPositionString(position string) (BinlogCoordinates, return *binLogCoordinates, nil } -func ReadReplicationGroupPrimary(instance *Instance) (err error) { - query := ` - SELECT - replication_group_primary_host, - replication_group_primary_port - FROM - database_instance - WHERE - replication_group_name = ? - AND replication_group_member_role = 'PRIMARY' -` - queryArgs := sqlutils.Args(instance.ReplicationGroupName) - err = db.QueryVTOrc(query, queryArgs, func(row sqlutils.RowMap) error { - groupPrimaryHost := row.GetString("replication_group_primary_host") - groupPrimaryPort := row.GetInt("replication_group_primary_port") - resolvedGroupPrimary, err := NewResolveInstanceKey(groupPrimaryHost, groupPrimaryPort) - if err != nil { - return err - } - instance.ReplicationGroupPrimaryInstanceKey = *resolvedGroupPrimary - return nil - }) - return err -} - // ReadInstanceClusterAttributes will return the cluster name for a given instance by looking at its primary // and getting it from there. // It is a non-recursive function and so-called-recursion is performed upon periodic reading of // instances. func ReadInstanceClusterAttributes(instance *Instance) (err error) { - var primaryOrGroupPrimaryInstanceKey InstanceKey - var primaryOrGroupPrimaryReplicationDepth uint + var primaryReplicationDepth uint var ancestryUUID string - var primaryOrGroupPrimaryExecutedGtidSet string - primaryOrGroupPrimaryDataFound := false + var primaryExecutedGtidSet string + primaryDataFound := false query := ` select @@ -518,22 +444,16 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { from database_instance where hostname=? and port=? ` - // For instances that are part of a replication group, if the host is not the group's primary, we use the - // information from the group primary. If it is the group primary, we use the information of its primary - // (if it has any). If it is not a group member, we use the information from the host's primary. - if instance.IsReplicationGroupSecondary() { - primaryOrGroupPrimaryInstanceKey = instance.ReplicationGroupPrimaryInstanceKey - } else { - primaryOrGroupPrimaryInstanceKey = instance.SourceKey - } - args := sqlutils.Args(primaryOrGroupPrimaryInstanceKey.Hostname, primaryOrGroupPrimaryInstanceKey.Port) + primaryHostname := instance.SourceHost + primaryPort := instance.SourcePort + args := sqlutils.Args(primaryHostname, primaryPort) err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - primaryOrGroupPrimaryReplicationDepth = m.GetUint("replication_depth") - primaryOrGroupPrimaryInstanceKey.Hostname = m.GetString("source_host") - primaryOrGroupPrimaryInstanceKey.Port = m.GetInt("source_port") + primaryReplicationDepth = m.GetUint("replication_depth") + primaryHostname = m.GetString("source_host") + primaryPort = m.GetInt("source_port") ancestryUUID = m.GetString("ancestry_uuid") - primaryOrGroupPrimaryExecutedGtidSet = m.GetString("executed_gtid_set") - primaryOrGroupPrimaryDataFound = true + primaryExecutedGtidSet = m.GetString("executed_gtid_set") + primaryDataFound = true return nil }) if err != nil { @@ -542,18 +462,18 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { } var replicationDepth uint - if primaryOrGroupPrimaryDataFound { - replicationDepth = primaryOrGroupPrimaryReplicationDepth + 1 + if primaryDataFound { + replicationDepth = primaryReplicationDepth + 1 } isCoPrimary := false - if primaryOrGroupPrimaryInstanceKey.Equals(&instance.Key) { + if primaryHostname == instance.Hostname && primaryPort == instance.Port { // co-primary calls for special case, in fear of the infinite loop isCoPrimary = true } instance.ReplicationDepth = replicationDepth instance.IsCoPrimary = isCoPrimary instance.AncestryUUID = ancestryUUID - instance.primaryExecutedGtidSet = primaryOrGroupPrimaryExecutedGtidSet + instance.primaryExecutedGtidSet = primaryExecutedGtidSet return nil } @@ -561,8 +481,8 @@ func ReadInstanceClusterAttributes(instance *Instance) (err error) { func readInstanceRow(m sqlutils.RowMap) *Instance { instance := NewInstance() - instance.Key.Hostname = m.GetString("hostname") - instance.Key.Port = m.GetInt("port") + instance.Hostname = m.GetString("hostname") + instance.Port = m.GetInt("port") instance.ServerID = m.GetUint("server_id") instance.ServerUUID = m.GetString("server_uuid") instance.Version = m.GetString("version") @@ -572,9 +492,8 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.BinlogRowImage = m.GetString("binlog_row_image") instance.LogBinEnabled = m.GetBool("log_bin") instance.LogReplicationUpdatesEnabled = m.GetBool("log_replica_updates") - instance.SourceKey.Hostname = m.GetString("source_host") - instance.SourceKey.Port = m.GetInt("source_port") - instance.IsDetachedPrimary = instance.SourceKey.IsDetached() + instance.SourceHost = m.GetString("source_host") + instance.SourcePort = m.GetInt("source_port") instance.ReplicationSQLThreadRuning = m.GetBool("replica_sql_running") instance.ReplicationIOThreadRuning = m.GetBool("replica_io_running") instance.ReplicationSQLThreadState = ReplicationThreadState(m.GetInt("replication_sql_thread_state")) @@ -623,30 +542,12 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.LastSeenTimestamp = m.GetString("last_seen") instance.IsLastCheckValid = m.GetBool("is_last_check_valid") instance.SecondsSinceLastSeen = m.GetNullInt64("seconds_since_last_seen") - instance.IsCandidate = m.GetBool("is_candidate") - instance.PromotionRule = promotionrule.CandidatePromotionRule(m.GetString("promotion_rule")) - instance.IsDowntimed = m.GetBool("is_downtimed") - instance.DowntimeReason = m.GetString("downtime_reason") - instance.DowntimeOwner = m.GetString("downtime_owner") - instance.DowntimeEndTimestamp = m.GetString("downtime_end_timestamp") - instance.ElapsedDowntime = time.Second * time.Duration(m.GetInt("elapsed_downtime_seconds")) - instance.UnresolvedHostname = m.GetString("unresolved_hostname") instance.AllowTLS = m.GetBool("allow_tls") - instance.InstanceAlias = m.GetString("instance_alias") + instance.InstanceAlias = m.GetString("alias") instance.LastDiscoveryLatency = time.Duration(m.GetInt64("last_discovery_latency")) * time.Nanosecond instance.applyFlavorName() - /* Read Group Replication variables below */ - instance.ReplicationGroupName = m.GetString("replication_group_name") - instance.ReplicationGroupIsSinglePrimary = m.GetBool("replication_group_is_single_primary_mode") - instance.ReplicationGroupMemberState = m.GetString("replication_group_member_state") - instance.ReplicationGroupMemberRole = m.GetString("replication_group_member_role") - instance.ReplicationGroupPrimaryInstanceKey = InstanceKey{Hostname: m.GetString("replication_group_primary_host"), - Port: m.GetInt("replication_group_primary_port")} - _ = instance.ReplicationGroupMembers.ReadJSON(m.GetString("replication_group_members")) - //instance.ReplicationGroup = m.GetString("replication_group_") - // problems if !instance.IsLastCheckValid { instance.Problems = append(instance.Problems, "last_check_invalid") @@ -654,49 +555,33 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.Problems = append(instance.Problems, "not_recently_checked") } else if instance.ReplicationThreadsExist() && !instance.ReplicaRunning() { instance.Problems = append(instance.Problems, "not_replicating") - } else if instance.ReplicationLagSeconds.Valid && math.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { + } else if instance.ReplicationLagSeconds.Valid && util.AbsInt64(instance.ReplicationLagSeconds.Int64-int64(instance.SQLDelay)) > int64(config.Config.ReasonableReplicationLagSeconds) { instance.Problems = append(instance.Problems, "replication_lag") } if instance.GtidErrant != "" { instance.Problems = append(instance.Problems, "errant_gtid") } - // Group replication problems - if instance.ReplicationGroupName != "" && instance.ReplicationGroupMemberState != GroupReplicationMemberStateOnline { - instance.Problems = append(instance.Problems, "group_replication_member_not_online") - } return instance } // readInstancesByCondition is a generic function to read instances from the backend database func readInstancesByCondition(condition string, args []any, sort string) ([](*Instance), error) { - readFunc := func() ([](*Instance), error) { - instances := [](*Instance){} + readFunc := func() ([]*Instance, error) { + var instances []*Instance if sort == "" { - sort = `hostname, port` + sort = `alias` } query := fmt.Sprintf(` select *, unix_timestamp() - unix_timestamp(last_checked) as seconds_since_last_checked, ifnull(last_checked <= last_seen, 0) as is_last_check_valid, - unix_timestamp() - unix_timestamp(last_seen) as seconds_since_last_seen, - candidate_database_instance.last_suggested is not null - and candidate_database_instance.promotion_rule in ('must', 'prefer') as is_candidate, - ifnull(nullif(candidate_database_instance.promotion_rule, ''), 'neutral') as promotion_rule, - ifnull(unresolved_hostname, '') as unresolved_hostname, - (database_instance_downtime.downtime_active is not null and ifnull(database_instance_downtime.end_timestamp, now()) > now()) as is_downtimed, - ifnull(database_instance_downtime.reason, '') as downtime_reason, - ifnull(database_instance_downtime.owner, '') as downtime_owner, - ifnull(unix_timestamp() - unix_timestamp(begin_timestamp), 0) as elapsed_downtime_seconds, - ifnull(database_instance_downtime.end_timestamp, '') as downtime_end_timestamp + unix_timestamp() - unix_timestamp(last_seen) as seconds_since_last_seen from - database_instance - left join vitess_tablet using (hostname, port) - left join candidate_database_instance using (hostname, port) - left join hostname_unresolve using (hostname) - left join database_instance_downtime using (hostname, port) + vitess_tablet + left join database_instance using (alias, hostname, port) where %s order by @@ -720,19 +605,14 @@ func readInstancesByCondition(condition string, args []any, sort string) ([](*In return instances, err } -func readInstancesByExactKey(instanceKey *InstanceKey) ([](*Instance), error) { +// ReadInstance reads an instance from the vtorc backend database +func ReadInstance(tabletAlias string) (*Instance, bool, error) { condition := ` - hostname = ? - and port = ? + alias = ? ` - return readInstancesByCondition(condition, sqlutils.Args(instanceKey.Hostname, instanceKey.Port), "") -} - -// ReadInstance reads an instance from the vtorc backend database -func ReadInstance(instanceKey *InstanceKey) (*Instance, bool, error) { - instances, err := readInstancesByExactKey(instanceKey) - // We know there will be at most one (hostname & port are PK) - // And we expect to find one + instances, err := readInstancesByCondition(condition, sqlutils.Args(tabletAlias), "") + // We know there will be at most one (alias is the PK). + // And we expect to find one. readInstanceCounter.Inc(1) if len(instances) == 0 { return nil, false, err @@ -744,25 +624,25 @@ func ReadInstance(instanceKey *InstanceKey) (*Instance, bool, error) { } // ReadReplicaInstances reads replicas of a given primary -func ReadReplicaInstances(primaryKey *InstanceKey) ([](*Instance), error) { +func ReadReplicaInstances(primaryHost string, primaryPort int) ([](*Instance), error) { condition := ` source_host = ? and source_port = ? ` - return readInstancesByCondition(condition, sqlutils.Args(primaryKey.Hostname, primaryKey.Port), "") + return readInstancesByCondition(condition, sqlutils.Args(primaryHost, primaryPort), "") } // ReadReplicaInstancesIncludingBinlogServerSubReplicas returns a list of direct slves including any replicas // of a binlog server replica -func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryKey *InstanceKey) ([](*Instance), error) { - replicas, err := ReadReplicaInstances(primaryKey) +func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost string, primaryPort int) ([](*Instance), error) { + replicas, err := ReadReplicaInstances(primaryHost, primaryPort) if err != nil { return replicas, err } for _, replica := range replicas { replica := replica if replica.IsBinlogServer() { - binlogServerReplicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(&replica.Key) + binlogServerReplicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(replica.Hostname, replica.Port) if err != nil { return replicas, err } @@ -785,178 +665,27 @@ func ReadProblemInstances(keyspace string, shard string) ([](*Instance), error) or (abs(cast(replication_lag_seconds as signed) - cast(sql_delay as signed)) > ?) or (abs(cast(replica_lag_seconds as signed) - cast(sql_delay as signed)) > ?) or (gtid_errant != '') - or (replication_group_name != '' and replication_group_member_state != 'ONLINE') ) ` args := sqlutils.Args(keyspace, keyspace, shard, shard, config.Config.InstancePollSeconds*5, config.Config.ReasonableReplicationLagSeconds, config.Config.ReasonableReplicationLagSeconds) - instances, err := readInstancesByCondition(condition, args, "") - if err != nil { - return instances, err - } - var reportedInstances [](*Instance) - for _, instance := range instances { - skip := false - if instance.IsDowntimed { - skip = true - } - if !skip { - reportedInstances = append(reportedInstances, instance) - } - } - return reportedInstances, nil + return readInstancesByCondition(condition, args, "") } -// ReadLostInRecoveryInstances returns all instances (potentially filtered by cluster) -// which are currently indicated as downtimed due to being lost during a topology recovery. -func ReadLostInRecoveryInstances(keyspace string, shard string) ([](*Instance), error) { +// ReadInstancesWithErrantGTIds reads all instances with errant GTIDs +func ReadInstancesWithErrantGTIds(keyspace string, shard string) ([]*Instance, error) { condition := ` - ifnull( - database_instance_downtime.downtime_active = 1 - and database_instance_downtime.end_timestamp > now() - and database_instance_downtime.reason = ?, 0) - and ? IN ('', keyspace) - and ? IN ('', shard) - ` - return readInstancesByCondition(condition, sqlutils.Args(DowntimeLostInRecoveryMessage, keyspace, shard), "keyspace asc, shard asc, replication_depth asc") -} - -// readUnseenPrimaryKeys will read list of primaries that have never been seen, and yet whose replicas -// seem to be replicating. -func readUnseenPrimaryKeys() ([]InstanceKey, error) { - res := []InstanceKey{} - - err := db.QueryVTOrcRowsMap(` - SELECT DISTINCT - replica_instance.source_host, replica_instance.source_port - FROM - database_instance replica_instance - LEFT JOIN - hostname_resolve ON (replica_instance.source_host = hostname_resolve.hostname) - LEFT JOIN - database_instance primary_instance ON ( - COALESCE(hostname_resolve.resolved_hostname, replica_instance.source_host) = primary_instance.hostname - and replica_instance.source_port = primary_instance.port) - WHERE - primary_instance.last_checked IS NULL - and replica_instance.source_host != '' - and replica_instance.source_host != '_' - and replica_instance.source_port > 0 - and replica_instance.replica_io_running = 1 - `, func(m sqlutils.RowMap) error { - instanceKey, _ := NewResolveInstanceKey(m.GetString("source_host"), m.GetInt("source_port")) - // we ignore the error. It can be expected that we are unable to resolve the hostname. - // Maybe that's how we got here in the first place! - res = append(res, *instanceKey) - - return nil - }) - if err != nil { - log.Error(err) - return res, err - } - - return res, nil -} - -// ForgetUnseenInstancesDifferentlyResolved will purge instances which are invalid, and whose hostname -// appears on the hostname_resolved table; this means some time in the past their hostname was unresovled, and now -// resovled to a different value; the old hostname is never accessed anymore and the old entry should be removed. -func ForgetUnseenInstancesDifferentlyResolved() error { - query := ` - select - database_instance.hostname, database_instance.port - from - hostname_resolve - JOIN database_instance ON (hostname_resolve.hostname = database_instance.hostname) - where - hostname_resolve.hostname != hostname_resolve.resolved_hostname - AND ifnull(last_checked <= last_seen, 0) = 0 - ` - keys := NewInstanceKeyMap() - err := db.QueryVTOrc(query, nil, func(m sqlutils.RowMap) error { - key := InstanceKey{ - Hostname: m.GetString("hostname"), - Port: m.GetInt("port"), - } - keys.AddKey(key) - return nil - }) - var rowsAffected int64 - for _, key := range keys.GetInstanceKeys() { - sqlResult, err := db.ExecVTOrc(` - delete from - database_instance - where - hostname = ? and port = ? - `, key.Hostname, key.Port, - ) - if err != nil { - log.Error(err) - return err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return err - } - rowsAffected = rowsAffected + rows - } - _ = AuditOperation("forget-unseen-differently-resolved", nil, fmt.Sprintf("Forgotten instances: %d", rowsAffected)) - return err -} - -// readUnknownPrimaryHostnameResolves will figure out the resolved hostnames of primary-hosts which cannot be found. -// It uses the hostname_resolve_history table to heuristically guess the correct hostname (based on "this was the -// last time we saw this hostname and it resolves into THAT") -func readUnknownPrimaryHostnameResolves() (map[string]string, error) { - res := make(map[string]string) - err := db.QueryVTOrcRowsMap(` - SELECT DISTINCT - replica_instance.source_host, hostname_resolve_history.resolved_hostname - FROM - database_instance replica_instance - LEFT JOIN hostname_resolve ON (replica_instance.source_host = hostname_resolve.hostname) - LEFT JOIN database_instance primary_instance ON ( - COALESCE(hostname_resolve.resolved_hostname, replica_instance.source_host) = primary_instance.hostname - and replica_instance.source_port = primary_instance.port - ) LEFT JOIN hostname_resolve_history ON (replica_instance.source_host = hostname_resolve_history.hostname) - WHERE - primary_instance.last_checked IS NULL - and replica_instance.source_host != '' - and replica_instance.source_host != '_' - and replica_instance.source_port > 0 - `, func(m sqlutils.RowMap) error { - res[m.GetString("source_host")] = m.GetString("resolved_hostname") - return nil - }) - if err != nil { - log.Error(err) - return res, err - } - - return res, nil -} - -// ResolveUnknownPrimaryHostnameResolves fixes missing hostname resolves based on hostname_resolve_history -// The use case is replicas replicating from some unknown-hostname which cannot be otherwise found. This could -// happen due to an expire unresolve together with clearing up of hostname cache. -func ResolveUnknownPrimaryHostnameResolves() error { - - hostnameResolves, err := readUnknownPrimaryHostnameResolves() - if err != nil { - return err - } - for hostname, resolvedHostname := range hostnameResolves { - UpdateResolvedHostname(hostname, resolvedHostname) - } + keyspace LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END) + and shard LIKE (CASE WHEN ? = '' THEN '%' ELSE ? END) + and gtid_errant != '' + ` - _ = AuditOperation("resolve-unknown-primaries", nil, fmt.Sprintf("Num resolved hostnames: %d", len(hostnameResolves))) - return err + args := sqlutils.Args(keyspace, keyspace, shard, shard) + return readInstancesByCondition(condition, args, "") } // GetKeyspaceShardName gets the keyspace shard name for the given instance key -func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard string, err error) { +func GetKeyspaceShardName(tabletAlias string) (keyspace string, shard string, err error) { query := ` select keyspace, @@ -964,10 +693,9 @@ func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard stri from vitess_tablet where - hostname = ? - and port = ? + alias = ? ` - err = db.QueryVTOrc(query, sqlutils.Args(instanceKey.Hostname, instanceKey.Port), func(m sqlutils.RowMap) error { + err = db.QueryVTOrc(query, sqlutils.Args(tabletAlias), func(m sqlutils.RowMap) error { keyspace = m.GetString("keyspace") shard = m.GetString("shard") return nil @@ -987,11 +715,11 @@ func GetKeyspaceShardName(instanceKey *InstanceKey) (keyspace string, shard stri // resulted in an actual check! This can happen when TCP/IP connections are hung, in which case the "check" // never returns. In such case we multiply interval by a factor, so as not to open too many connections on // the instance. -func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { - res := []InstanceKey{} +func ReadOutdatedInstanceKeys() ([]string, error) { + var res []string query := ` SELECT - hostname, port + alias FROM database_instance WHERE @@ -1002,24 +730,21 @@ func ReadOutdatedInstanceKeys() ([]InstanceKey, error) { END UNION SELECT - vitess_tablet.hostname, vitess_tablet.port + vitess_tablet.alias FROM vitess_tablet LEFT JOIN database_instance ON ( - vitess_tablet.hostname = database_instance.hostname - AND vitess_tablet.port = database_instance.port + vitess_tablet.alias = database_instance.alias ) WHERE - database_instance.hostname IS NULL + database_instance.alias IS NULL ` args := sqlutils.Args(config.Config.InstancePollSeconds, 2*config.Config.InstancePollSeconds) err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - instanceKey, merr := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - if merr != nil { - log.Error(merr) - } else if !InstanceIsForgotten(instanceKey) { + tabletAlias := m.GetString("alias") + if !InstanceIsForgotten(tabletAlias) { // only if not in "forget" cache - res = append(res, *instanceKey) + res = append(res, tabletAlias) } // We don;t return an error because we want to keep filling the outdated instances list. return nil @@ -1086,6 +811,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo insertIgnore = true } var columns = []string{ + "alias", "hostname", "port", "last_checked", @@ -1147,24 +873,16 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "semi_sync_primary_status", "semi_sync_primary_clients", "semi_sync_replica_status", - "instance_alias", "last_discovery_latency", - "replication_group_name", - "replication_group_is_single_primary_mode", - "replication_group_member_state", - "replication_group_member_role", - "replication_group_members", - "replication_group_primary_host", - "replication_group_primary_port", } var values = make([]string, len(columns)) for i := range columns { values[i] = "?" } - values[2] = "NOW()" // last_checked - values[3] = "NOW()" // last_attempted_check - values[4] = "1" // last_check_partial_success + values[3] = "NOW()" // last_checked + values[4] = "NOW()" // last_attempted_check + values[5] = "1" // last_check_partial_success if updateLastSeen { columns = append(columns, "last_seen") @@ -1175,8 +893,9 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo for _, instance := range instances { // number of columns minus 2 as last_checked and last_attempted_check // updated with NOW() - args = append(args, instance.Key.Hostname) - args = append(args, instance.Key.Port) + args = append(args, instance.InstanceAlias) + args = append(args, instance.Hostname) + args = append(args, instance.Port) args = append(args, instance.ServerID) args = append(args, instance.ServerUUID) args = append(args, instance.Version) @@ -1190,8 +909,8 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.LogReplicationUpdatesEnabled) args = append(args, instance.SelfBinlogCoordinates.LogFile) args = append(args, instance.SelfBinlogCoordinates.LogPos) - args = append(args, instance.SourceKey.Hostname) - args = append(args, instance.SourceKey.Port) + args = append(args, instance.SourceHost) + args = append(args, instance.SourcePort) args = append(args, instance.ReplicationSQLThreadRuning) args = append(args, instance.ReplicationIOThreadRuning) args = append(args, instance.ReplicationSQLThreadState) @@ -1233,15 +952,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.SemiSyncPrimaryStatus) args = append(args, instance.SemiSyncPrimaryClients) args = append(args, instance.SemiSyncReplicaStatus) - args = append(args, instance.InstanceAlias) args = append(args, instance.LastDiscoveryLatency.Nanoseconds()) - args = append(args, instance.ReplicationGroupName) - args = append(args, instance.ReplicationGroupIsSinglePrimary) - args = append(args, instance.ReplicationGroupMemberState) - args = append(args, instance.ReplicationGroupMemberRole) - args = append(args, instance.ReplicationGroupMembers.ToJSONString()) - args = append(args, instance.ReplicationGroupPrimaryInstanceKey.Hostname) - args = append(args, instance.ReplicationGroupPrimaryInstanceKey.Port) } sql, err := mkInsertOdku("database_instance", columns, values, len(instances), insertIgnore) @@ -1258,7 +969,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo func writeManyInstances(instances []*Instance, instanceWasActuallyFound bool, updateLastSeen bool) error { writeInstances := [](*Instance){} for _, instance := range instances { - if InstanceIsForgotten(&instance.Key) && !instance.IsSeed() { + if InstanceIsForgotten(instance.InstanceAlias) { continue } writeInstances = append(writeInstances, instance) @@ -1287,7 +998,7 @@ func WriteInstance(instance *Instance, instanceWasActuallyFound bool, lastError // UpdateInstanceLastChecked updates the last_check timestamp in the vtorc backed database // for a given instance -func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) error { +func UpdateInstanceLastChecked(tabletAlias string, partialSuccess bool) error { writeFunc := func() error { _, err := db.ExecVTOrc(` update @@ -1296,11 +1007,9 @@ func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) er last_checked = NOW(), last_check_partial_success = ? where - hostname = ? - and port = ?`, + alias = ?`, partialSuccess, - instanceKey.Hostname, - instanceKey.Port, + tabletAlias, ) if err != nil { log.Error(err) @@ -1318,7 +1027,7 @@ func UpdateInstanceLastChecked(instanceKey *InstanceKey, partialSuccess bool) er // And so we make sure to note down *before* we even attempt to access the instance; and this raises a red flag when we // wish to access the instance again: if last_attempted_check is *newer* than last_checked, that's bad news and means // we have a "hanging" issue. -func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { +func UpdateInstanceLastAttemptedCheck(tabletAlias string) error { writeFunc := func() error { _, err := db.ExecVTOrc(` update @@ -1326,10 +1035,8 @@ func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { set last_attempted_check = NOW() where - hostname = ? - and port = ?`, - instanceKey.Hostname, - instanceKey.Port, + alias = ?`, + tabletAlias, ) if err != nil { log.Error(err) @@ -1339,43 +1046,59 @@ func UpdateInstanceLastAttemptedCheck(instanceKey *InstanceKey) error { return ExecDBWriteFunc(writeFunc) } -func InstanceIsForgotten(instanceKey *InstanceKey) bool { - _, found := forgetInstanceKeys.Get(instanceKey.StringCode()) +func InstanceIsForgotten(tabletAlias string) bool { + _, found := forgetAliases.Get(tabletAlias) return found } // ForgetInstance removes an instance entry from the vtorc backed database. // It may be auto-rediscovered through topology or requested for discovery by multiple means. -func ForgetInstance(instanceKey *InstanceKey) error { - if instanceKey == nil { - errMsg := "ForgetInstance(): nil instanceKey" +func ForgetInstance(tabletAlias string) error { + if tabletAlias == "" { + errMsg := "ForgetInstance(): empty tabletAlias" log.Errorf(errMsg) return fmt.Errorf(errMsg) } - forgetInstanceKeys.Set(instanceKey.StringCode(), true, cache.DefaultExpiration) + forgetAliases.Set(tabletAlias, true, cache.DefaultExpiration) + log.Infof("Forgetting: %v", tabletAlias) + + // Delete from the 'vitess_tablet' table. + _, err := db.ExecVTOrc(` + delete + from vitess_tablet + where + alias = ?`, + tabletAlias, + ) + if err != nil { + log.Error(err) + return err + } + + // Also delete from the 'database_instance' table. sqlResult, err := db.ExecVTOrc(` delete from database_instance where - hostname = ? and port = ?`, - instanceKey.Hostname, - instanceKey.Port, + alias = ?`, + tabletAlias, ) if err != nil { log.Error(err) return err } + // Get the number of rows affected. If they are zero, then we tried to forget an instance that doesn't exist. rows, err := sqlResult.RowsAffected() if err != nil { log.Error(err) return err } if rows == 0 { - errMsg := fmt.Sprintf("ForgetInstance(): instance %+v not found", *instanceKey) + errMsg := fmt.Sprintf("ForgetInstance(): tablet %+v not found", tabletAlias) log.Errorf(errMsg) return fmt.Errorf(errMsg) } - _ = AuditOperation("forget", instanceKey, "") + _ = AuditOperation("forget", tabletAlias, "") return nil } @@ -1397,7 +1120,9 @@ func ForgetLongUnseenInstances() error { log.Error(err) return err } - _ = AuditOperation("forget-unseen", nil, fmt.Sprintf("Forgotten instances: %d", rows)) + if rows > 0 { + _ = AuditOperation("forget-unseen", "", fmt.Sprintf("Forgotten instances: %d", rows)) + } return err } @@ -1407,12 +1132,14 @@ func SnapshotTopologies() error { _, err := db.ExecVTOrc(` insert ignore into database_instance_topology_history (snapshot_unix_timestamp, - hostname, port, source_host, source_port, version) + alias, hostname, port, source_host, source_port, keyspace, shard, version) select UNIX_TIMESTAMP(NOW()), - hostname, port, source_host, source_port, version + vitess_tablet.alias, vitess_tablet.hostname, vitess_tablet.port, + database_instance.source_host, database_instance.source_port, + vitess_tablet.keyspace, vitess_tablet.shard, database_instance.version from - database_instance + vitess_tablet left join database_instance using (alias, hostname, port) `, ) if err != nil { @@ -1426,16 +1153,16 @@ func SnapshotTopologies() error { } // RecordStaleInstanceBinlogCoordinates snapshots the binlog coordinates of instances -func RecordStaleInstanceBinlogCoordinates(instanceKey *InstanceKey, binlogCoordinates *BinlogCoordinates) error { +func RecordStaleInstanceBinlogCoordinates(tabletAlias string, binlogCoordinates *BinlogCoordinates) error { args := sqlutils.Args( - instanceKey.Hostname, instanceKey.Port, + tabletAlias, binlogCoordinates.LogFile, binlogCoordinates.LogPos, ) _, err := db.ExecVTOrc(` delete from database_instance_stale_binlog_coordinates where - hostname=? and port=? + alias = ? and ( binary_log_file != ? or binary_log_pos != ? @@ -1450,10 +1177,10 @@ func RecordStaleInstanceBinlogCoordinates(instanceKey *InstanceKey, binlogCoordi _, err = db.ExecVTOrc(` insert ignore into database_instance_stale_binlog_coordinates ( - hostname, port, binary_log_file, binary_log_pos, first_seen + alias, binary_log_file, binary_log_pos, first_seen ) values ( - ?, ?, ?, ?, NOW() + ?, ?, ?, NOW() )`, args...) if err != nil { diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index 71d0ed94ff9..549389f91fe 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -6,19 +6,19 @@ import ( "regexp" "strings" "testing" + "time" + "github.com/patrickmn/go-cache" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" ) -var ( - i710k = InstanceKey{Hostname: "i710", Port: 3306} - i720k = InstanceKey{Hostname: "i720", Port: 3306} - i730k = InstanceKey{Hostname: "i730", Port: 3306} -) - var ( spacesRegexp = regexp.MustCompile(`[ \t\n\r]+`) ) @@ -36,9 +36,9 @@ func stripSpaces(s string) string { } func mkTestInstances() []*Instance { - i710 := Instance{Key: i710k, ServerID: 710, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 10}} - i720 := Instance{Key: i720k, ServerID: 720, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 20}} - i730 := Instance{Key: i730k, ServerID: 730, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 30}} + i710 := Instance{InstanceAlias: "zone1-i710", Hostname: "i710", Port: 3306, ServerID: 710, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 10}} + i720 := Instance{InstanceAlias: "zone1-i720", Hostname: "i720", Port: 3306, ServerID: 720, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 20}} + i730 := Instance{InstanceAlias: "zone1-i730", Hostname: "i730", Port: 3306, ServerID: 730, ExecBinlogCoordinates: BinlogCoordinates{LogFile: "mysql.000007", LogPos: 30}} instances := []*Instance{&i710, &i720, &i730} for _, instance := range instances { instance.Version = "5.6.7" @@ -59,21 +59,21 @@ func TestMkInsertOdkuSingle(t *testing.T) { // one instance s1 := `INSERT ignore INTO database_instance - (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, - version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, - replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, - source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) - VALUES - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) - ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), - semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), - instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) - ` - a1 := `i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, + (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, + version, major_version, version_comment, binlog_server, read_only, binlog_format, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + VALUES + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + ON DUPLICATE KEY UPDATE + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), + semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) + ` + a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, - false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, ` + false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` sql1, args1, err := mkInsertOdkuForInstances(instances[:1], false, true) require.NoError(t, err) @@ -86,22 +86,25 @@ func TestMkInsertOdkuThree(t *testing.T) { // three instances s3 := `INSERT INTO database_instance - (hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, - semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, instance_alias, last_discovery_latency, replication_group_name, replication_group_is_single_primary_mode, replication_group_member_state, replication_group_member_role, replication_group_members, replication_group_primary_host, replication_group_primary_port, last_seen) - VALUES - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) - ON DUPLICATE KEY UPDATE - hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), - physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), - semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), - instance_alias=VALUES(instance_alias), last_discovery_latency=VALUES(last_discovery_latency), replication_group_name=VALUES(replication_group_name), replication_group_is_single_primary_mode=VALUES(replication_group_is_single_primary_mode), replication_group_member_state=VALUES(replication_group_member_state), replication_group_member_role=VALUES(replication_group_member_role), replication_group_members=VALUES(replication_group_members), replication_group_primary_host=VALUES(replication_group_primary_host), replication_group_primary_port=VALUES(replication_group_primary_port), last_seen=VALUES(last_seen) - ` + (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, + version, major_version, version_comment, binlog_server, read_only, binlog_format, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, + source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) + VALUES + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + ON DUPLICATE KEY UPDATE + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), + physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), + semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), + last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) + ` a3 := ` - i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, - i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, - i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, , 0, , false, , , [], , 0, + zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, ` sql3, args3, err := mkInsertOdkuForInstances(instances[:3], true, true) @@ -147,11 +150,599 @@ func TestGetKeyspaceShardName(t *testing.T) { err = SaveTablet(tab100) require.NoError(t, err) - keyspaceRead, shardRead, err := GetKeyspaceShardName(&InstanceKey{ - Hostname: hostname, - Port: int(port), - }) + keyspaceRead, shardRead, err := GetKeyspaceShardName(topoproto.TabletAliasString(tab100.Alias)) require.NoError(t, err) require.Equal(t, ks, keyspaceRead) require.Equal(t, shard, shardRead) } + +// TestReadInstance is used to test the functionality of ReadInstance and verify its failure modes and successes. +func TestReadInstance(t *testing.T) { + tests := []struct { + name string + tabletAliasToRead string + instanceFound bool + }{ + { + name: "Read success", + tabletAliasToRead: "zone1-0000000100", + instanceFound: true, + }, { + name: "Unknown tablet", + tabletAliasToRead: "unknown-tablet", + instanceFound: false, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + got, found, err := ReadInstance(tt.tabletAliasToRead) + require.NoError(t, err) + require.Equal(t, tt.instanceFound, found) + if tt.instanceFound { + require.EqualValues(t, tt.tabletAliasToRead, got.InstanceAlias) + } + }) + } +} + +// TestReadReplicaInstances is used to test the functionality of ReadReplicaInstances and verify its failure modes and successes. +func TestReadReplicaInstances(t *testing.T) { + tests := []struct { + name string + tabletPort int + replicasLen int + }{ + { + name: "Read success - Multiple replicas", + // This tabletPort corresponds to zone1-0000000101. That is the primary for the data inserted. + // Check initialSQL for more details. + tabletPort: 6714, + replicasLen: 3, + }, { + name: "Unknown tablet", + // This tabletPort corresponds to none of the tablets. + // Check initialSQL for more details. + tabletPort: 343, + replicasLen: 0, + }, { + name: "Read success - No replicas", + // This tabletPort corresponds to zone1-0000000100. That is a replica tablet, with no replicas of its own. + // Check initialSQL for more details. + tabletPort: 6711, + replicasLen: 0, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + instances, err := ReadReplicaInstances("localhost", tt.tabletPort) + require.NoError(t, err) + require.EqualValues(t, tt.replicasLen, len(instances)) + }) + } +} + +// TestReadProblemInstances is used to test the functionality of ReadProblemInstances and verify its failure modes and successes. +func TestReadProblemInstances(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + sql []string + instancesRequired []string + }{ + { + name: "No problems", + sql: nil, + instancesRequired: nil, + }, { + name: "Replication stopped on a replica", + sql: []string{ + "update database_instance set replication_sql_thread_state = 0 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "IO thread stopped on a replica", + sql: []string{ + "update database_instance set replication_io_thread_state = 0 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "High replication lag", + sql: []string{ + "update database_instance set replication_lag_seconds = 1000 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "High replication lag - replica_lag", + sql: []string{ + "update database_instance set replica_lag_seconds = 1000 where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "errant GTID", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "Many failures", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + "update database_instance set replication_sql_thread_state = 0 where alias = 'zone1-0000000100'", + }, + instancesRequired: []string{"zone1-0000000112", "zone1-0000000100"}, + }, + } + + // We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old. + // Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years. + oldVal := config.Config.InstancePollSeconds + defer func() { + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100 + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + instances, err := ReadProblemInstances("ks", "0") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestReadInstancesWithErrantGTIds is used to test the functionality of ReadInstancesWithErrantGTIds and verify its failure modes and successes. +func TestReadInstancesWithErrantGTIds(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + keyspace string + shard string + sql []string + instancesRequired []string + }{ + { + name: "No instances with errant GTID", + sql: nil, + instancesRequired: nil, + }, { + name: "errant GTID", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "keyspace filtering - success", + keyspace: "ks", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "keyspace filtering - failure", + keyspace: "unknown", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: nil, + }, { + name: "shard filtering - success", + keyspace: "ks", + shard: "0", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: []string{"zone1-0000000112"}, + }, { + name: "shard filtering - failure", + keyspace: "ks", + shard: "unknown", + sql: []string{ + "update database_instance set gtid_errant = '729a4cc4-8680-11ed-a104-47706090afbd:1' where alias = 'zone1-0000000112'", + }, + instancesRequired: nil, + }, + } + + // We need to set InstancePollSeconds to a large value otherwise all the instances are reported as having problems since their last_checked is very old. + // Setting this value to a hundred years, we ensure that this test doesn't fail with this issue for the next hundred years. + oldVal := config.Config.InstancePollSeconds + defer func() { + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 60 * 24 * 365 * 100 + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + instances, err := ReadInstancesWithErrantGTIds(tt.keyspace, tt.shard) + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestReadInstancesByCondition is used to test the functionality of readInstancesByCondition and verify its failure modes and successes. +func TestReadInstancesByCondition(t *testing.T) { + tests := []struct { + name string + condition string + args []any + sort string + instancesRequired []string + }{ + { + name: "All instances with no sort", + condition: "1=1", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "All instances sort by data_center descending and then alias ascending", + condition: "1=1", + sort: "data_center desc, alias asc", + instancesRequired: []string{"zone2-0000000200", "zone1-0000000100", "zone1-0000000101", "zone1-0000000112"}, + }, { + name: "Filtering by replication_depth", + condition: "replication_depth=1", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Filtering by exact alias", + condition: "alias='zone1-0000000100'", + instancesRequired: []string{"zone1-0000000100"}, + }, { + name: "No qualifying tablets", + condition: "replication_depth=15", + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + instances, err := readInstancesByCondition(tt.condition, tt.args, tt.sort) + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.EqualValues(t, tt.instancesRequired, tabletAliases) + }) + } +} + +// TestReadOutdatedInstanceKeys is used to test the functionality of ReadOutdatedInstanceKeys and verify its failure modes and successes. +func TestReadOutdatedInstanceKeys(t *testing.T) { + // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. + tests := []struct { + name string + sql []string + instancesRequired []string + }{ + { + name: "No problems", + sql: []string{"update database_instance set last_checked = now()"}, + instancesRequired: nil, + }, { + name: "One instance is outdated", + sql: []string{ + "update database_instance set last_checked = now()", + "update database_instance set last_checked = datetime(now(), '-1 hour') where alias = 'zone1-0000000100'", + }, + instancesRequired: []string{"zone1-0000000100"}, + }, { + name: "One instance doesn't have myql data", + sql: []string{ + "update database_instance set last_checked = now()", + `INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`, + }, + instancesRequired: []string{"zone1-0000000103"}, + }, { + name: "One instance doesn't have myql data and one is outdated", + sql: []string{ + "update database_instance set last_checked = now()", + "update database_instance set last_checked = datetime(now(), '-1 hour') where alias = 'zone1-0000000100'", + `INSERT INTO vitess_tablet VALUES('zone1-0000000103','localhost',7706,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00','');`, + }, + instancesRequired: []string{"zone1-0000000103", "zone1-0000000100"}, + }, + } + + // wait for the forgetAliases cache to be initialized to prevent data race. + waitForCacheInitialization() + + // We are setting InstancePollSeconds to 59 minutes, just for the test. + oldVal := config.Config.InstancePollSeconds + oldCache := forgetAliases + defer func() { + forgetAliases = oldCache + config.Config.InstancePollSeconds = oldVal + }() + config.Config.InstancePollSeconds = 60 * 25 + forgetAliases = cache.New(time.Minute, time.Minute) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Each test should clear the database. The easiest way to do that is to run all the initialization commands again + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range append(initialSQL, tt.sql...) { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + tabletAliases, err := ReadOutdatedInstanceKeys() + + errInDataCollection := db.QueryVTOrcRowsMap(`select alias, +last_checked, +last_attempted_check, +ROUND((JULIANDAY(now()) - JULIANDAY(last_checked)) * 86400) AS difference, +last_attempted_check <= last_checked as use1, +last_checked < now() - interval 1500 second as is_outdated1, +last_checked < now() - interval 3000 second as is_outdated2 +from database_instance`, func(rowMap sqlutils.RowMap) error { + log.Errorf("Row in database_instance - %+v", rowMap) + return nil + }) + require.NoError(t, errInDataCollection) + require.NoError(t, err) + require.ElementsMatch(t, tabletAliases, tt.instancesRequired) + }) + } +} + +// TestUpdateInstanceLastChecked is used to test the functionality of UpdateInstanceLastChecked and verify its failure modes and successes. +func TestUpdateInstanceLastChecked(t *testing.T) { + tests := []struct { + name string + tabletAlias string + partialSuccess bool + conditionToCheck string + }{ + { + name: "Verify updated last checked", + tabletAlias: "zone1-0000000100", + partialSuccess: false, + conditionToCheck: "last_checked >= now() - interval 30 second and last_check_partial_success = false", + }, { + name: "Verify partial success", + tabletAlias: "zone1-0000000100", + partialSuccess: true, + conditionToCheck: "last_checked >= now() - interval 30 second and last_check_partial_success = true", + }, { + name: "Verify no error on unknown tablet", + tabletAlias: "unknown tablet", + partialSuccess: true, + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := UpdateInstanceLastChecked(tt.tabletAlias, tt.partialSuccess) + require.NoError(t, err) + + if tt.conditionToCheck != "" { + // Verify the instance we just updated satisfies the condition specified. + instances, err := readInstancesByCondition(tt.conditionToCheck, nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.Contains(t, tabletAliases, tt.tabletAlias) + } + }) + } +} + +// UpdateInstanceLastAttemptedCheck is used to test the functionality of UpdateInstanceLastAttemptedCheck and verify its failure modes and successes. +func TestUpdateInstanceLastAttemptedCheck(t *testing.T) { + tests := []struct { + name string + tabletAlias string + conditionToCheck string + }{ + { + name: "Verify updated last checked", + tabletAlias: "zone1-0000000100", + conditionToCheck: "last_attempted_check >= now() - interval 30 second", + }, { + name: "Verify no error on unknown tablet", + tabletAlias: "unknown tablet", + }, + } + + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := UpdateInstanceLastAttemptedCheck(tt.tabletAlias) + require.NoError(t, err) + + if tt.conditionToCheck != "" { + // Verify the instance we just updated satisfies the condition specified. + instances, err := readInstancesByCondition(tt.conditionToCheck, nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.Contains(t, tabletAliases, tt.tabletAlias) + } + }) + } +} + +// TestForgetInstanceAndInstanceIsForgotten tests the functionality of ForgetInstance and InstanceIsForgotten together. +func TestForgetInstanceAndInstanceIsForgotten(t *testing.T) { + tests := []struct { + name string + tabletAlias string + errExpected string + instanceForgotten bool + tabletsExpected []string + }{ + { + name: "Unknown tablet", + tabletAlias: "unknown-tablet", + errExpected: "ForgetInstance(): tablet unknown-tablet not found", + instanceForgotten: true, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Empty tabletAlias", + tabletAlias: "", + errExpected: "ForgetInstance(): empty tabletAlias", + instanceForgotten: false, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "Success", + tabletAlias: "zone1-0000000112", + instanceForgotten: true, + tabletsExpected: []string{"zone1-0000000100", "zone1-0000000101", "zone2-0000000200"}, + }, + } + + // wait for the forgetAliases cache to be initialized to prevent data race. + waitForCacheInitialization() + + oldCache := forgetAliases + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + forgetAliases = oldCache + db.ClearVTOrcDatabase() + }() + forgetAliases = cache.New(time.Minute, time.Minute) + + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ForgetInstance(tt.tabletAlias) + if tt.errExpected != "" { + require.EqualError(t, err, tt.errExpected) + } else { + require.NoError(t, err) + } + isForgotten := InstanceIsForgotten(tt.tabletAlias) + require.Equal(t, tt.instanceForgotten, isForgotten) + + instances, err := readInstancesByCondition("1=1", nil, "") + require.NoError(t, err) + var tabletAliases []string + for _, instance := range instances { + tabletAliases = append(tabletAliases, instance.InstanceAlias) + } + require.EqualValues(t, tt.tabletsExpected, tabletAliases) + }) + } +} + +func TestSnapshotTopologies(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + err := SnapshotTopologies() + require.NoError(t, err) + + query := "select alias from database_instance_topology_history" + var tabletAliases []string + err = db.QueryVTOrc(query, nil, func(rowMap sqlutils.RowMap) error { + tabletAliases = append(tabletAliases, rowMap.GetString("alias")) + return nil + }) + require.NoError(t, err) + + require.Equal(t, []string{"zone1-0000000100", "zone1-0000000101", "zone1-0000000112", "zone2-0000000200"}, tabletAliases) +} + +// waitForCacheInitialization waits for the cache to be initialized to prevent data race in tests +// that alter the cache or depend on its behaviour. +func waitForCacheInitialization() { + for { + if cacheInitializationCompleted.Load() { + return + } + time.Sleep(100 * time.Millisecond) + } +} diff --git a/go/vt/vtorc/inst/instance_key.go b/go/vt/vtorc/inst/instance_key.go deleted file mode 100644 index 2a3124aeb57..00000000000 --- a/go/vt/vtorc/inst/instance_key.go +++ /dev/null @@ -1,189 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// InstanceKey is an instance indicator, identifued by hostname and port -type InstanceKey struct { - Hostname string - Port int -} - -var ( - ipv4Regexp = regexp.MustCompile(`^([0-9]+)[.]([0-9]+)[.]([0-9]+)[.]([0-9]+)$`) - ipv4HostPortRegexp = regexp.MustCompile(`^([^:]+):([0-9]+)$`) - ipv4HostRegexp = regexp.MustCompile(`^([^:]+)$`) - ipv6HostPortRegexp = regexp.MustCompile(`^\[([:0-9a-fA-F]+)\]:([0-9]+)$`) // e.g. [2001:db8:1f70::999:de8:7648:6e8]:3308 - ipv6HostRegexp = regexp.MustCompile(`^([:0-9a-fA-F]+)$`) // e.g. 2001:db8:1f70::999:de8:7648:6e8 -) - -const detachHint = "//" - -func newInstanceKey(hostname string, port int, resolve bool) (instanceKey *InstanceKey, err error) { - if hostname == "" { - return instanceKey, fmt.Errorf("NewResolveInstanceKey: Empty hostname") - } - - instanceKey = &InstanceKey{Hostname: hostname, Port: port} - if resolve { - instanceKey, err = instanceKey.ResolveHostname() - } - return instanceKey, err -} - -// newInstanceKeyStrings -func newInstanceKeyStrings(hostname string, port string, resolve bool) (*InstanceKey, error) { - portInt, err := strconv.Atoi(port) - if err != nil { - return nil, fmt.Errorf("Invalid port: %s", port) - } - return newInstanceKey(hostname, portInt, resolve) -} - -func parseRawInstanceKey(hostPort string, resolve bool) (instanceKey *InstanceKey, err error) { - hostname := "" - port := "" - if submatch := ipv4HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - port = submatch[2] - } else if submatch := ipv4HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - } else if submatch := ipv6HostPortRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - port = submatch[2] - } else if submatch := ipv6HostRegexp.FindStringSubmatch(hostPort); len(submatch) > 0 { - hostname = submatch[1] - } else { - return nil, fmt.Errorf("Cannot parse address: %s", hostPort) - } - if port == "" { - port = "3306" - } - return newInstanceKeyStrings(hostname, port, resolve) -} - -func NewResolveInstanceKey(hostname string, port int) (instanceKey *InstanceKey, err error) { - return newInstanceKey(hostname, port, true) -} - -// NewResolveInstanceKeyStrings creates and resolves a new instance key based on string params -func NewResolveInstanceKeyStrings(hostname string, port string) (*InstanceKey, error) { - return newInstanceKeyStrings(hostname, port, true) -} - -func ParseResolveInstanceKey(hostPort string) (instanceKey *InstanceKey, err error) { - return parseRawInstanceKey(hostPort, true) -} - -func ParseRawInstanceKey(hostPort string) (instanceKey *InstanceKey, err error) { - return parseRawInstanceKey(hostPort, false) -} - -// NewResolveInstanceKeyStrings creates and resolves a new instance key based on string params -func NewRawInstanceKeyStrings(hostname string, port string) (*InstanceKey, error) { - return newInstanceKeyStrings(hostname, port, false) -} - -func (instanceKey *InstanceKey) ResolveHostname() (*InstanceKey, error) { - if !instanceKey.IsValid() { - return instanceKey, nil - } - - hostname, err := ResolveHostname(instanceKey.Hostname) - if err == nil { - instanceKey.Hostname = hostname - } - return instanceKey, err -} - -// Equals tests equality between this key and another key -func (instanceKey *InstanceKey) Equals(other *InstanceKey) bool { - if other == nil { - return false - } - return instanceKey.Hostname == other.Hostname && instanceKey.Port == other.Port -} - -// SmallerThan returns true if this key is dictionary-smaller than another. -// This is used for consistent sorting/ordering; there's nothing magical about it. -func (instanceKey *InstanceKey) SmallerThan(other *InstanceKey) bool { - if instanceKey.Hostname < other.Hostname { - return true - } - if instanceKey.Hostname == other.Hostname && instanceKey.Port < other.Port { - return true - } - return false -} - -// IsDetached returns 'true' when this hostname is logically "detached" -func (instanceKey *InstanceKey) IsDetached() bool { - return strings.HasPrefix(instanceKey.Hostname, detachHint) -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsValid() bool { - if instanceKey.Hostname == "_" { - return false - } - if instanceKey.IsDetached() { - return false - } - return len(instanceKey.Hostname) > 0 && instanceKey.Port > 0 -} - -// DetachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) DetachedKey() *InstanceKey { - if instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: fmt.Sprintf("%s%s", detachHint, instanceKey.Hostname), Port: instanceKey.Port} -} - -// ReattachedKey returns an instance key whose hostname is detahced: invalid, but recoverable -func (instanceKey *InstanceKey) ReattachedKey() *InstanceKey { - if !instanceKey.IsDetached() { - return instanceKey - } - return &InstanceKey{Hostname: instanceKey.Hostname[len(detachHint):], Port: instanceKey.Port} -} - -// StringCode returns an official string representation of this key -func (instanceKey *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", instanceKey.Hostname, instanceKey.Port) -} - -// DisplayString returns a user-friendly string representation of this key -func (instanceKey *InstanceKey) DisplayString() string { - return instanceKey.StringCode() -} - -// String returns a user-friendly string representation of this key -func (instanceKey InstanceKey) String() string { - return instanceKey.StringCode() -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (instanceKey *InstanceKey) IsIPv4() bool { - return ipv4Regexp.MatchString(instanceKey.Hostname) -} diff --git a/go/vt/vtorc/inst/instance_key_map.go b/go/vt/vtorc/inst/instance_key_map.go deleted file mode 100644 index 15d21151f12..00000000000 --- a/go/vt/vtorc/inst/instance_key_map.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "encoding/json" - "sort" - "strings" -) - -// InstanceKeyMap is a convenience struct for listing InstanceKey-s -type InstanceKeyMap map[InstanceKey]bool - -func NewInstanceKeyMap() *InstanceKeyMap { - return &InstanceKeyMap{} -} - -// AddKey adds a single key to this map -func (instanceKeyMap *InstanceKeyMap) AddKey(key InstanceKey) { - (*instanceKeyMap)[key] = true -} - -// AddKeys adds all given keys to this map -func (instanceKeyMap *InstanceKeyMap) AddKeys(keys []InstanceKey) { - for _, key := range keys { - instanceKeyMap.AddKey(key) - } -} - -// AddInstances adds keys of all given instances to this map -func (instanceKeyMap *InstanceKeyMap) AddInstances(instances [](*Instance)) { - for _, instance := range instances { - instanceKeyMap.AddKey(instance.Key) - } -} - -// HasKey checks if given key is within the map -func (instanceKeyMap *InstanceKeyMap) HasKey(key InstanceKey) bool { - _, ok := (*instanceKeyMap)[key] - return ok -} - -// GetInstanceKeys returns keys in this map in the form of an array -func (instanceKeyMap *InstanceKeyMap) GetInstanceKeys() []InstanceKey { - res := []InstanceKey{} - for key := range *instanceKeyMap { - res = append(res, key) - } - sort.Slice(res, func(i, j int) bool { - return res[i].Hostname < res[j].Hostname || res[i].Hostname == res[j].Hostname && res[i].Port < res[j].Port - }) - return res -} - -// Intersect returns a keymap which is the intersection of this and another map -func (instanceKeyMap *InstanceKeyMap) Intersect(other *InstanceKeyMap) *InstanceKeyMap { - intersected := NewInstanceKeyMap() - for key := range *other { - if instanceKeyMap.HasKey(key) { - intersected.AddKey(key) - } - } - return intersected -} - -// MarshalJSON will marshal this map as JSON -func (instanceKeyMap InstanceKeyMap) MarshalJSON() ([]byte, error) { - return json.Marshal(instanceKeyMap.GetInstanceKeys()) -} - -// UnmarshalJSON reds this object from JSON -func (instanceKeyMap *InstanceKeyMap) UnmarshalJSON(b []byte) error { - var keys []InstanceKey - if err := json.Unmarshal(b, &keys); err != nil { - return err - } - *instanceKeyMap = make(InstanceKeyMap) - for _, key := range keys { - instanceKeyMap.AddKey(key) - } - return nil -} - -// ToJSON will marshal this map as JSON -func (instanceKeyMap *InstanceKeyMap) ToJSON() (string, error) { - bytes, err := instanceKeyMap.MarshalJSON() - return string(bytes), err -} - -// ToJSONString will marshal this map as JSON -func (instanceKeyMap *InstanceKeyMap) ToJSONString() string { - s, _ := instanceKeyMap.ToJSON() - return s -} - -// ToCommaDelimitedList will export this map in comma delimited format -func (instanceKeyMap *InstanceKeyMap) ToCommaDelimitedList() string { - keyDisplays := []string{} - for key := range *instanceKeyMap { - keyDisplays = append(keyDisplays, key.DisplayString()) - } - return strings.Join(keyDisplays, ",") -} - -// ReadJSON unmarshalls a json into this map -func (instanceKeyMap *InstanceKeyMap) ReadJSON(jsonString string) error { - var keys []InstanceKey - err := json.Unmarshal([]byte(jsonString), &keys) - if err != nil { - return err - } - instanceKeyMap.AddKeys(keys) - return err -} - -// ReadJSON unmarshalls a json into this map -func (instanceKeyMap *InstanceKeyMap) ReadCommaDelimitedList(list string) error { - tokens := strings.Split(list, ",") - for _, token := range tokens { - key, err := ParseResolveInstanceKey(token) - if err != nil { - return err - } - instanceKeyMap.AddKey(*key) - } - return nil -} diff --git a/go/vt/vtorc/inst/instance_key_map_test.go b/go/vt/vtorc/inst/instance_key_map_test.go deleted file mode 100644 index a390ef99532..00000000000 --- a/go/vt/vtorc/inst/instance_key_map_test.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtorc/config" -) - -func init() { - config.MarkConfigurationLoaded() -} - -func TestGetInstanceKeys(t *testing.T) { - for range rand.Perm(10) { // Just running many iterations to cover multiple possible map iteration ordering. Perm() is just used as an array generator here. - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - keys := m.GetInstanceKeys() - require.Equal(t, keys[0], key1) - require.Equal(t, keys[1], key2) - } - for range rand.Perm(10) { // Just running many iterations to cover multiple possible map iteration ordering. Perm() is just used as an array generator here. - m := *NewInstanceKeyMap() - m.AddKey(key2) - m.AddKey(key1) - keys := m.GetInstanceKeys() - require.Equal(t, keys[0], key1) - require.Equal(t, keys[1], key2) - } -} - -func TestInstanceKeyMapToJSON(t *testing.T) { - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - json, err := m.ToJSON() - require.NoError(t, err) - ok := (json == `[{"Hostname":"host1","Port":3306},{"Hostname":"host2","Port":3306}]`) || (json == `[{"Hostname":"host2","Port":3306},{"Hostname":"host1","Port":3306}]`) - require.True(t, ok) -} - -func TestInstanceKeyMapReadJSON(t *testing.T) { - json := `[{"Hostname":"host1","Port":3306},{"Hostname":"host2","Port":3306}]` - m := *NewInstanceKeyMap() - _ = m.ReadJSON(json) - require.Equal(t, len(m), 2) - require.True(t, m[key1]) - require.True(t, m[key2]) -} - -func TestEmptyInstanceKeyMapToCommaDelimitedList(t *testing.T) { - m := *NewInstanceKeyMap() - res := m.ToCommaDelimitedList() - - require.Equal(t, res, "") -} - -func TestInstanceKeyMapToCommaDelimitedList(t *testing.T) { - m := *NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - res := m.ToCommaDelimitedList() - - ok := (res == `host1:3306,host2:3306`) || (res == `host2:3306,host1:3306`) - require.True(t, ok) -} - -func TestIntersect(t *testing.T) { - { - m := NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - - other := NewInstanceKeyMap() - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 1) - } - { - m := NewInstanceKeyMap() - m.AddKey(key1) - - other := NewInstanceKeyMap() - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 0) - } - { - m := NewInstanceKeyMap() - m.AddKey(key1) - m.AddKey(key2) - - other := NewInstanceKeyMap() - other.AddKey(key1) - other.AddKey(key3) - other.AddKey(key2) - - intersected := m.Intersect(other) - require.Equal(t, len(*intersected), 2) - } - -} diff --git a/go/vt/vtorc/inst/instance_key_test.go b/go/vt/vtorc/inst/instance_key_test.go deleted file mode 100644 index 1374aad570e..00000000000 --- a/go/vt/vtorc/inst/instance_key_test.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/vtorc/config" -) - -func init() { - config.MarkConfigurationLoaded() -} - -var key1 = InstanceKey{Hostname: "host1", Port: 3306} -var key2 = InstanceKey{Hostname: "host2", Port: 3306} -var key3 = InstanceKey{Hostname: "host3", Port: 3306} - -func TestInstanceKeyEquals(t *testing.T) { - i1 := Instance{ - Key: InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - }, - Version: "5.6", - } - i2 := Instance{ - Key: InstanceKey{ - Hostname: "sql00.db", - Port: 3306, - }, - Version: "5.5", - } - - require.Equal(t, i1.Key, i2.Key) - - i2.Key.Port = 3307 - require.NotEqual(t, i1.Key, i2.Key) -} - -func TestNewResolveInstanceKey(t *testing.T) { - { - i, err := NewResolveInstanceKey("127.0.0.1", 3308) - require.NoError(t, err) - require.Equal(t, i.Hostname, "127.0.0.1") - require.Equal(t, i.Port, 3308) - } - { - _, err := NewResolveInstanceKey("", 3309) - require.Error(t, err) - } - { - i, err := NewResolveInstanceKey("127.0.0.1", 0) - require.NoError(t, err) - require.False(t, i.IsValid()) - } -} - -func TestParseResolveInstanceKey(t *testing.T) { - { - key, err := ParseResolveInstanceKey("myhost:1234") - require.NoError(t, err) - require.Equal(t, key.Hostname, "myhost") - require.Equal(t, key.Port, 1234) - } - { - key, err := ParseResolveInstanceKey("myhost") - require.NoError(t, err) - require.Equal(t, key.Hostname, "myhost") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("10.0.0.3:3307") - require.NoError(t, err) - require.Equal(t, key.Hostname, "10.0.0.3") - require.Equal(t, key.Port, 3307) - } - { - key, err := ParseResolveInstanceKey("10.0.0.3") - require.NoError(t, err) - require.Equal(t, key.Hostname, "10.0.0.3") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("[2001:db8:1f70::999:de8:7648:6e8]:3308") - require.NoError(t, err) - require.Equal(t, key.Hostname, "2001:db8:1f70::999:de8:7648:6e8") - require.Equal(t, key.Port, 3308) - } - { - key, err := ParseResolveInstanceKey("::1") - require.NoError(t, err) - require.Equal(t, key.Hostname, "::1") - require.Equal(t, key.Port, 3306) - } - { - key, err := ParseResolveInstanceKey("0:0:0:0:0:0:0:0") - require.NoError(t, err) - require.Equal(t, key.Hostname, "0:0:0:0:0:0:0:0") - require.Equal(t, key.Port, 3306) - } - { - _, err := ParseResolveInstanceKey("[2001:xxxx:1f70::999:de8:7648:6e8]:3308") - require.Error(t, err) - } - { - _, err := ParseResolveInstanceKey("10.0.0.4:") - require.Error(t, err) - } - { - _, err := ParseResolveInstanceKey("10.0.0.4:5.6.7") - require.Error(t, err) - } -} - -func TestNewResolveInstanceKeyStrings(t *testing.T) { - { - i, err := NewResolveInstanceKeyStrings("127.0.0.1", "3306") - require.NoError(t, err) - require.Equal(t, i.Hostname, "127.0.0.1") - require.Equal(t, i.Port, 3306) - } - { - _, err := NewResolveInstanceKeyStrings("127.0.0.1", "") - require.Error(t, err) - } - { - _, err := NewResolveInstanceKeyStrings("127.0.0.1", "3306x") - require.Error(t, err) - } -} - -func TestInstanceKeyValid(t *testing.T) { - require.True(t, key1.IsValid()) - i, err := ParseResolveInstanceKey("_:3306") - require.NoError(t, err) - require.False(t, i.IsValid()) - i, err = ParseResolveInstanceKey("//myhost:3306") - require.NoError(t, err) - require.False(t, i.IsValid()) -} - -func TestInstanceKeyDetach(t *testing.T) { - require.False(t, key1.IsDetached()) - detached1 := key1.DetachedKey() - require.True(t, detached1.IsDetached()) - detached2 := key1.DetachedKey() - require.True(t, detached2.IsDetached()) - require.True(t, detached1.Equals(detached2)) - - reattached1 := detached1.ReattachedKey() - require.False(t, reattached1.IsDetached()) - require.True(t, reattached1.Equals(&key1)) - reattached2 := reattached1.ReattachedKey() - require.False(t, reattached2.IsDetached()) - require.True(t, reattached1.Equals(reattached2)) -} - -func TestIsIPv4(t *testing.T) { - require.False(t, key1.IsIPv4()) - { - k, _ := ParseRawInstanceKey("mysql-server-1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("mysql-server-1") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("my.sql.server.1") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("mysql-server-1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127::0::0::1:3306") - require.False(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0.1:3306") - require.True(t, k.IsIPv4()) - } - { - k, _ := ParseRawInstanceKey("127.0.0.1") - require.True(t, k.IsIPv4()) - } -} diff --git a/go/vt/vtorc/inst/instance_test.go b/go/vt/vtorc/inst/instance_test.go index ebc2d9d0c89..9ca2f243999 100644 --- a/go/vt/vtorc/inst/instance_test.go +++ b/go/vt/vtorc/inst/instance_test.go @@ -28,45 +28,7 @@ func init() { config.MarkConfigurationLoaded() } -var instance1 = Instance{Key: key1} - -func TestIsSmallerMajorVersion(t *testing.T) { - i55 := Instance{Version: "5.5"} - i5517 := Instance{Version: "5.5.17"} - i56 := Instance{Version: "5.6"} - - require.False(t, i55.IsSmallerMajorVersion(&i5517)) - require.False(t, i56.IsSmallerMajorVersion(&i5517)) - require.True(t, i55.IsSmallerMajorVersion(&i56)) -} - -func TestIsVersion(t *testing.T) { - i51 := Instance{Version: "5.1.19"} - i55 := Instance{Version: "5.5.17-debug"} - i56 := Instance{Version: "5.6.20"} - i57 := Instance{Version: "5.7.8-log"} - - require.True(t, i51.IsMySQL51()) - require.True(t, i55.IsMySQL55()) - require.True(t, i56.IsMySQL56()) - require.False(t, i55.IsMySQL56()) - require.True(t, i57.IsMySQL57()) - require.False(t, i56.IsMySQL57()) -} - -func TestIsSmallerBinlogFormat(t *testing.T) { - iStatement := &Instance{Key: key1, BinlogFormat: "STATEMENT"} - iRow := &Instance{Key: key2, BinlogFormat: "ROW"} - iMixed := &Instance{Key: key3, BinlogFormat: "MIXED"} - require.True(t, iStatement.IsSmallerBinlogFormat(iRow)) - require.False(t, iStatement.IsSmallerBinlogFormat(iStatement)) - require.False(t, iRow.IsSmallerBinlogFormat(iStatement)) - - require.True(t, iStatement.IsSmallerBinlogFormat(iMixed)) - require.True(t, iMixed.IsSmallerBinlogFormat(iRow)) - require.False(t, iMixed.IsSmallerBinlogFormat(iStatement)) - require.False(t, iRow.IsSmallerBinlogFormat(iMixed)) -} +var instance1 = Instance{InstanceAlias: "zone1-100"} func TestReplicationThreads(t *testing.T) { { @@ -79,7 +41,7 @@ func TestReplicationThreads(t *testing.T) { require.True(t, instance1.ReplicationThreadsStopped()) } { - i := Instance{Key: key1, ReplicationIOThreadState: ReplicationThreadStateNoThread, ReplicationSQLThreadState: ReplicationThreadStateNoThread} + i := Instance{InstanceAlias: "zone1-100", ReplicationIOThreadState: ReplicationThreadStateNoThread, ReplicationSQLThreadState: ReplicationThreadStateNoThread} require.False(t, i.ReplicationThreadsExist()) } } diff --git a/go/vt/vtorc/inst/instance_utils.go b/go/vt/vtorc/inst/instance_utils.go index b14a0794c31..f6bde729822 100644 --- a/go/vt/vtorc/inst/instance_utils.go +++ b/go/vt/vtorc/inst/instance_utils.go @@ -18,14 +18,9 @@ package inst import ( "regexp" - "strconv" "strings" ) -var ( - DowntimeLostInRecoveryMessage = "lost-in-recovery" -) - // MajorVersion returns a MySQL major version number (e.g. given "5.5.36" it returns "5.5") func MajorVersion(version string) []string { tokens := strings.Split(version, ".") @@ -35,37 +30,6 @@ func MajorVersion(version string) []string { return tokens[:2] } -// IsSmallerMajorVersion tests two versions against another and returns true if -// the former is a smaller "major" varsion than the latter. -// e.g. 5.5.36 is NOT a smaller major version as comapred to 5.5.40, but IS as compared to 5.6.9 -func IsSmallerMajorVersion(version string, otherVersion string) bool { - thisMajorVersion := MajorVersion(version) - otherMajorVersion := MajorVersion(otherVersion) - for i := 0; i < len(thisMajorVersion); i++ { - thisToken, _ := strconv.Atoi(thisMajorVersion[i]) - otherToken, _ := strconv.Atoi(otherMajorVersion[i]) - if thisToken < otherToken { - return true - } - if thisToken > otherToken { - return false - } - } - return false -} - -// IsSmallerBinlogFormat tests two binlog formats and sees if one is "smaller" than the other. -// "smaller" binlog format means you can replicate from the smaller to the larger. -func IsSmallerBinlogFormat(binlogFormat string, otherBinlogFormat string) bool { - if binlogFormat == "STATEMENT" { - return (otherBinlogFormat == "ROW" || otherBinlogFormat == "MIXED") - } - if binlogFormat == "MIXED" { - return otherBinlogFormat == "ROW" - } - return false -} - // RegexpMatchPatterns returns true if s matches any of the provided regexpPatterns func RegexpMatchPatterns(s string, regexpPatterns []string) bool { for _, filter := range regexpPatterns { diff --git a/go/vt/vtorc/inst/keyspace_dao.go b/go/vt/vtorc/inst/keyspace_dao.go index f3624449001..d764e3fc56a 100644 --- a/go/vt/vtorc/inst/keyspace_dao.go +++ b/go/vt/vtorc/inst/keyspace_dao.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/db" ) @@ -30,6 +31,10 @@ var ErrKeyspaceNotFound = errors.New("keyspace not found") // ReadKeyspace reads the vitess keyspace record. func ReadKeyspace(keyspaceName string) (*topo.KeyspaceInfo, error) { + if err := topo.ValidateKeyspaceName(keyspaceName); err != nil { + return nil, err + } + query := ` select keyspace_type, @@ -73,3 +78,12 @@ func SaveKeyspace(keyspace *topo.KeyspaceInfo) error { ) return err } + +// GetDurabilityPolicy gets the durability policy for the given keyspace. +func GetDurabilityPolicy(keyspace string) (reparentutil.Durabler, error) { + ki, err := ReadKeyspace(keyspace) + if err != nil { + return nil, err + } + return reparentutil.GetDurabilityPolicy(ki.DurabilityPolicy) +} diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index 56ad06ec9e5..015d3e75256 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -25,23 +25,24 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/db" ) func TestSaveAndReadKeyspace(t *testing.T) { - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() tests := []struct { - name string - keyspaceName string - keyspace *topodatapb.Keyspace - keyspaceWanted *topodatapb.Keyspace - err string + name string + keyspaceName string + keyspace *topodatapb.Keyspace + keyspaceWanted *topodatapb.Keyspace + err string + errInDurabilityPolicy string + semiSyncAckersWanted int }{ { name: "Success with keyspaceType and durability", @@ -50,16 +51,16 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "semi_sync", }, - keyspaceWanted: nil, - err: "", + keyspaceWanted: nil, + semiSyncAckersWanted: 1, }, { name: "Success with keyspaceType and no durability", keyspaceName: "ks2", keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, }, - keyspaceWanted: nil, - err: "", + keyspaceWanted: nil, + errInDurabilityPolicy: "durability policy not found", }, { name: "Success with snapshot keyspaceType", keyspaceName: "ks3", @@ -67,7 +68,6 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, }, keyspaceWanted: nil, - err: "", }, { name: "Success with fields that are not stored", keyspaceName: "ks4", @@ -80,7 +80,7 @@ func TestSaveAndReadKeyspace(t *testing.T) { KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "none", }, - err: "", + semiSyncAckersWanted: 0, }, { name: "No keyspace found", keyspaceName: "ks5", @@ -107,11 +107,21 @@ func TestSaveAndReadKeyspace(t *testing.T) { readKeyspaceInfo, err := ReadKeyspace(tt.keyspaceName) if tt.err != "" { require.EqualError(t, err, tt.err) - } else { - require.NoError(t, err) - require.True(t, topotools.KeyspaceEquality(tt.keyspaceWanted, readKeyspaceInfo.Keyspace)) - require.Equal(t, tt.keyspaceName, readKeyspaceInfo.KeyspaceName()) + return + } + require.NoError(t, err) + require.True(t, topotools.KeyspaceEquality(tt.keyspaceWanted, readKeyspaceInfo.Keyspace)) + require.Equal(t, tt.keyspaceName, readKeyspaceInfo.KeyspaceName()) + if tt.keyspace.KeyspaceType == topodatapb.KeyspaceType_SNAPSHOT { + return + } + durabilityPolicy, err := GetDurabilityPolicy(tt.keyspaceName) + if tt.errInDurabilityPolicy != "" { + require.EqualError(t, err, tt.errInDurabilityPolicy) + return } + require.NoError(t, err) + require.EqualValues(t, tt.semiSyncAckersWanted, reparentutil.SemiSyncAckers(durabilityPolicy, nil)) }) } } diff --git a/go/vt/vtorc/inst/maintenance.go b/go/vt/vtorc/inst/maintenance.go deleted file mode 100644 index 08fa3554d1e..00000000000 --- a/go/vt/vtorc/inst/maintenance.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "vitess.io/vitess/go/vt/vtorc/config" -) - -// Maintenance indicates a maintenance entry (also in the database) -type Maintenance struct { - MaintenanceID uint - Key InstanceKey - BeginTimestamp string - SecondsElapsed uint - IsActive bool - Owner string - Reason string -} - -var maintenanceOwner string - -func GetMaintenanceOwner() string { - if maintenanceOwner != "" { - return maintenanceOwner - } - return config.MaintenanceOwner -} - -func SetMaintenanceOwner(owner string) { - maintenanceOwner = owner -} diff --git a/go/vt/vtorc/inst/maintenance_dao.go b/go/vt/vtorc/inst/maintenance_dao.go deleted file mode 100644 index b2ac833b353..00000000000 --- a/go/vt/vtorc/inst/maintenance_dao.go +++ /dev/null @@ -1,86 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// ExpireMaintenance will remove the maintenance flag on old maintenances and on bounded maintenances -func ExpireMaintenance() error { - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - maintenance_active is null - and end_timestamp < NOW() - INTERVAL ? DAY - `, - config.MaintenancePurgeDays, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Purged historical entries: %d", rowsAffected)) - } - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - maintenance_active = 1 - and end_timestamp < NOW() - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Expired bounded: %d", rowsAffected)) - } - } - { - res, err := db.ExecVTOrc(` - delete from - database_instance_maintenance - where - explicitly_bounded = 0 - and concat(processing_node_hostname, ':', processing_node_token) not in ( - select concat(hostname, ':', token) from node_health - ) - `, - ) - if err != nil { - log.Error(err) - return err - } - if rowsAffected, _ := res.RowsAffected(); rowsAffected > 0 { - _ = AuditOperation("expire-maintenance", nil, fmt.Sprintf("Expired dead: %d", rowsAffected)) - } - } - - return nil -} diff --git a/go/vt/vtorc/inst/oracle_gtid_set.go b/go/vt/vtorc/inst/oracle_gtid_set.go index c4e88fccbd3..0ddab05ef55 100644 --- a/go/vt/vtorc/inst/oracle_gtid_set.go +++ b/go/vt/vtorc/inst/oracle_gtid_set.go @@ -22,9 +22,10 @@ import ( // OracleGtidSet represents a set of GTID ranges as depicted by Retrieved_Gtid_Set, Executed_Gtid_Set or @@gtid_purged. type OracleGtidSet struct { - GtidEntries [](*OracleGtidSetEntry) + GtidEntries []*OracleGtidSetEntry } +// NewOracleGtidSet creates a new GTID set. // Example input: `230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-10539, // 316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-8935:8984-6124596, // 321f5c0d-70e5-11e5-adb2-ecf4bb2262ff:1-56` @@ -54,7 +55,7 @@ func NewOracleGtidSet(gtidSet string) (res *OracleGtidSet, err error) { // By way of how this works there can only be one entry matching our UUID, but we generalize. // We keep order of entries. func (oracleGTIDSet *OracleGtidSet) RemoveUUID(uuid string) (removed bool) { - filteredEntries := [](*OracleGtidSetEntry){} + var filteredEntries []*OracleGtidSetEntry for _, entry := range oracleGTIDSet.GtidEntries { if entry.UUID == uuid { removed = true @@ -79,7 +80,7 @@ func (oracleGTIDSet *OracleGtidSet) RetainUUIDs(uuids []string) (anythingRemoved for _, uuid := range uuids { retainUUIDs[uuid] = true } - filteredEntries := [](*OracleGtidSetEntry){} + var filteredEntries []*OracleGtidSetEntry for _, entry := range oracleGTIDSet.GtidEntries { if retainUUIDs[entry.UUID] { filteredEntries = append(filteredEntries, entry) @@ -107,8 +108,8 @@ func (oracleGTIDSet *OracleGtidSet) SharedUUIDs(other *OracleGtidSet) (shared [] return shared } -// String returns a user-friendly string representation of this entry -func (oracleGTIDSet *OracleGtidSet) Explode() (result [](*OracleGtidSetEntry)) { +// Explode returns a user-friendly string representation of this entry +func (oracleGTIDSet *OracleGtidSet) Explode() (result []*OracleGtidSetEntry) { for _, entries := range oracleGTIDSet.GtidEntries { result = append(result, entries.Explode()...) } @@ -116,7 +117,7 @@ func (oracleGTIDSet *OracleGtidSet) Explode() (result [](*OracleGtidSetEntry)) { } func (oracleGTIDSet *OracleGtidSet) String() string { - tokens := []string{} + var tokens []string for _, entry := range oracleGTIDSet.GtidEntries { tokens = append(tokens, entry.String()) } diff --git a/go/vt/vtorc/inst/postponed_functions.go b/go/vt/vtorc/inst/postponed_functions.go deleted file mode 100644 index 1ce750964a5..00000000000 --- a/go/vt/vtorc/inst/postponed_functions.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "sync" - - "vitess.io/vitess/go/vt/log" -) - -type PostponedFunctionsContainer struct { - waitGroup sync.WaitGroup - mutex sync.Mutex - descriptions []string -} - -func NewPostponedFunctionsContainer() *PostponedFunctionsContainer { - postponedFunctionsContainer := &PostponedFunctionsContainer{ - descriptions: []string{}, - } - return postponedFunctionsContainer -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) AddPostponedFunction(postponedFunction func() error, description string) { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - postponedFuncsContainer.descriptions = append(postponedFuncsContainer.descriptions, description) - - postponedFuncsContainer.waitGroup.Add(1) - go func() { - defer postponedFuncsContainer.waitGroup.Done() - _ = postponedFunction() - }() -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Wait() { - log.Infof("PostponedFunctionsContainer: waiting on %+v postponed functions", postponedFuncsContainer.Len()) - postponedFuncsContainer.waitGroup.Wait() - log.Infof("PostponedFunctionsContainer: done waiting") -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Len() int { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - return len(postponedFuncsContainer.descriptions) -} - -func (postponedFuncsContainer *PostponedFunctionsContainer) Descriptions() []string { - postponedFuncsContainer.mutex.Lock() - defer postponedFuncsContainer.mutex.Unlock() - - return postponedFuncsContainer.descriptions -} diff --git a/go/vt/vtorc/inst/process.go b/go/vt/vtorc/inst/process.go deleted file mode 100644 index 99985045b56..00000000000 --- a/go/vt/vtorc/inst/process.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// Process presents a MySQL executing thread (as observed by PROCESSLIST) -type Process struct { - InstanceHostname string - InstancePort int - ID int64 - User string - Host string - Db string - Command string - Time int64 - State string - Info string - StartedAt string -} diff --git a/go/vt/vtorc/inst/replication_thread_state.go b/go/vt/vtorc/inst/replication_thread_state.go index e885625aa3f..a95e65ca8ec 100644 --- a/go/vt/vtorc/inst/replication_thread_state.go +++ b/go/vt/vtorc/inst/replication_thread_state.go @@ -16,7 +16,9 @@ package inst -import "vitess.io/vitess/go/mysql" +import ( + "vitess.io/vitess/go/mysql/replication" +) type ReplicationThreadState int @@ -27,25 +29,15 @@ const ( ReplicationThreadStateOther ReplicationThreadState = 2 ) -func ReplicationThreadStateFromStatus(status string) ReplicationThreadState { - switch status { - case "No": - return ReplicationThreadStateStopped - case "Yes": - return ReplicationThreadStateRunning - } - return ReplicationThreadStateOther -} - // ReplicationThreadStateFromReplicationState gets the replication thread state from replication state // TODO: Merge these two into one -func ReplicationThreadStateFromReplicationState(state mysql.ReplicationState) ReplicationThreadState { +func ReplicationThreadStateFromReplicationState(state replication.ReplicationState) ReplicationThreadState { switch state { - case mysql.ReplicationStateStopped: + case replication.ReplicationStateStopped: return ReplicationThreadStateStopped - case mysql.ReplicationStateRunning: + case replication.ReplicationStateRunning: return ReplicationThreadStateRunning - case mysql.ReplicationStateConnecting: + case replication.ReplicationStateConnecting: return ReplicationThreadStateOther default: return ReplicationThreadStateNoThread diff --git a/go/vt/vtorc/inst/resolve.go b/go/vt/vtorc/inst/resolve.go deleted file mode 100644 index ac3d3f6dc88..00000000000 --- a/go/vt/vtorc/inst/resolve.go +++ /dev/null @@ -1,265 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/patrickmn/go-cache" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" -) - -type HostnameResolve struct { - hostname string - resolvedHostname string -} - -func (hostnameResolve HostnameResolve) String() string { - return fmt.Sprintf("%s %s", hostnameResolve.hostname, hostnameResolve.resolvedHostname) -} - -type HostnameUnresolve struct { - hostname string - unresolvedHostname string -} - -func (hostnameUnresolve HostnameUnresolve) String() string { - return fmt.Sprintf("%s %s", hostnameUnresolve.hostname, hostnameUnresolve.unresolvedHostname) -} - -type HostnameRegistration struct { - CreatedAt time.Time - Key InstanceKey - Hostname string -} - -func NewHostnameRegistration(instanceKey *InstanceKey, hostname string) *HostnameRegistration { - return &HostnameRegistration{ - CreatedAt: time.Now(), - Key: *instanceKey, - Hostname: hostname, - } -} - -func NewHostnameDeregistration(instanceKey *InstanceKey) *HostnameRegistration { - return &HostnameRegistration{ - CreatedAt: time.Now(), - Key: *instanceKey, - Hostname: "", - } -} - -var hostnameResolvesLightweightCache *cache.Cache -var hostnameResolvesLightweightCacheInit = &sync.Mutex{} -var hostnameResolvesLightweightCacheLoadedOnceFromDB = false -var hostnameIPsCache = cache.New(10*time.Minute, time.Minute) - -func getHostnameResolvesLightweightCache() *cache.Cache { - hostnameResolvesLightweightCacheInit.Lock() - defer hostnameResolvesLightweightCacheInit.Unlock() - if hostnameResolvesLightweightCache == nil { - hostnameResolvesLightweightCache = cache.New(time.Duration(config.ExpiryHostnameResolvesMinutes)*time.Minute, time.Minute) - } - return hostnameResolvesLightweightCache -} - -func HostnameResolveMethodIsNone() bool { - return strings.ToLower(config.HostnameResolveMethod) == "none" -} - -// GetCNAME resolves an IP or hostname into a normalized valid CNAME -func GetCNAME(hostname string) (string, error) { - res, err := net.LookupCNAME(hostname) - if err != nil { - return hostname, err - } - res = strings.TrimRight(res, ".") - return res, nil -} - -func resolveHostname(hostname string) (string, error) { - switch strings.ToLower(config.HostnameResolveMethod) { - case "none": - return hostname, nil - case "default": - return hostname, nil - case "cname": - return GetCNAME(hostname) - case "ip": - return getHostnameIP(hostname) - } - return hostname, nil -} - -// Attempt to resolve a hostname. This may return a database cached hostname or otherwise -// it may resolve the hostname via CNAME -func ResolveHostname(hostname string) (string, error) { - hostname = strings.TrimSpace(hostname) - if hostname == "" { - return hostname, errors.New("Will not resolve empty hostname") - } - if strings.Contains(hostname, ",") { - return hostname, fmt.Errorf("Will not resolve multi-hostname: %+v", hostname) - } - if (&InstanceKey{Hostname: hostname}).IsDetached() { - // quietly abort. Nothing to do. The hostname is detached for a reason: it - // will not be resolved, for sure. - return hostname, nil - } - - // First go to lightweight cache - if resolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname); found { - return resolvedHostname.(string), nil - } - - if !hostnameResolvesLightweightCacheLoadedOnceFromDB { - // A continuous-discovery will first make sure to load all resolves from DB. - // However cli does not do so. - // Anyway, it seems like the cache was not loaded from DB. Before doing real resolves, - // let's try and get the resolved hostname from database. - if !HostnameResolveMethodIsNone() { - go func() { - if resolvedHostname, err := ReadResolvedHostname(hostname); err == nil && resolvedHostname != "" { - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, 0) - } - }() - } - } - - // Unfound: resolve! - log.Infof("Hostname unresolved yet: %s", hostname) - resolvedHostname, err := resolveHostname(hostname) - if err != nil { - // Problem. What we'll do is cache the hostname for just one minute, so as to avoid flooding requests - // on one hand, yet make it refresh shortly on the other hand. Anyway do not write to database. - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, time.Minute) - return hostname, err - } - // Good result! Cache it, also to DB - log.Infof("Cache hostname resolve %s as %s", hostname, resolvedHostname) - go UpdateResolvedHostname(hostname, resolvedHostname) - return resolvedHostname, nil -} - -// UpdateResolvedHostname will store the given resolved hostname in cache -// Returns false when the key already existed with same resolved value (similar -// to AFFECTED_ROWS() in mysql) -func UpdateResolvedHostname(hostname string, resolvedHostname string) bool { - if resolvedHostname == "" { - return false - } - if existingResolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname); found && (existingResolvedHostname == resolvedHostname) { - return false - } - getHostnameResolvesLightweightCache().Set(hostname, resolvedHostname, 0) - if !HostnameResolveMethodIsNone() { - _ = WriteResolvedHostname(hostname, resolvedHostname) - } - return true -} - -func LoadHostnameResolveCache() error { - if !HostnameResolveMethodIsNone() { - return loadHostnameResolveCacheFromDatabase() - } - return nil -} - -func loadHostnameResolveCacheFromDatabase() error { - allHostnamesResolves, err := ReadAllHostnameResolves() - if err != nil { - return err - } - for _, hostnameResolve := range allHostnamesResolves { - getHostnameResolvesLightweightCache().Set(hostnameResolve.hostname, hostnameResolve.resolvedHostname, 0) - } - hostnameResolvesLightweightCacheLoadedOnceFromDB = true - return nil -} - -func FlushNontrivialResolveCacheToDatabase() error { - if HostnameResolveMethodIsNone() { - return nil - } - items, _ := HostnameResolveCache() - for hostname := range items { - resolvedHostname, found := getHostnameResolvesLightweightCache().Get(hostname) - if found && (resolvedHostname.(string) != hostname) { - _ = WriteResolvedHostname(hostname, resolvedHostname.(string)) - } - } - return nil -} - -func HostnameResolveCache() (map[string]cache.Item, error) { - return getHostnameResolvesLightweightCache().Items(), nil -} - -func extractIPs(ips []net.IP) (ipv4String string, ipv6String string) { - for _, ip := range ips { - if ip4 := ip.To4(); ip4 != nil { - ipv4String = ip.String() - } else { - ipv6String = ip.String() - } - } - return ipv4String, ipv6String -} - -func getHostnameIPs(hostname string) (ips []net.IP, fromCache bool, err error) { - if ips, found := hostnameIPsCache.Get(hostname); found { - return ips.([]net.IP), true, nil - } - ips, err = net.LookupIP(hostname) - if err != nil { - log.Error(err) - return ips, false, err - } - hostnameIPsCache.Set(hostname, ips, cache.DefaultExpiration) - return ips, false, nil -} - -func getHostnameIP(hostname string) (ipString string, err error) { - ips, _, err := getHostnameIPs(hostname) - if err != nil { - return ipString, err - } - ipv4String, ipv6String := extractIPs(ips) - if ipv4String != "" { - return ipv4String, nil - } - return ipv6String, nil -} - -func ResolveHostnameIPs(hostname string) error { - ips, fromCache, err := getHostnameIPs(hostname) - if err != nil { - return err - } - if fromCache { - return nil - } - ipv4String, ipv6String := extractIPs(ips) - return writeHostnameIPs(hostname, ipv4String, ipv6String) -} diff --git a/go/vt/vtorc/inst/resolve_dao.go b/go/vt/vtorc/inst/resolve_dao.go deleted file mode 100644 index d38146469d2..00000000000 --- a/go/vt/vtorc/inst/resolve_dao.go +++ /dev/null @@ -1,219 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "github.com/rcrowley/go-metrics" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -var writeResolvedHostnameCounter = metrics.NewCounter() -var writeUnresolvedHostnameCounter = metrics.NewCounter() -var readResolvedHostnameCounter = metrics.NewCounter() -var readUnresolvedHostnameCounter = metrics.NewCounter() -var readAllResolvedHostnamesCounter = metrics.NewCounter() - -func init() { - _ = metrics.Register("resolve.write_resolved", writeResolvedHostnameCounter) - _ = metrics.Register("resolve.write_unresolved", writeUnresolvedHostnameCounter) - _ = metrics.Register("resolve.read_resolved", readResolvedHostnameCounter) - _ = metrics.Register("resolve.read_unresolved", readUnresolvedHostnameCounter) - _ = metrics.Register("resolve.read_resolved_all", readAllResolvedHostnamesCounter) -} - -// WriteResolvedHostname stores a hostname and the resolved hostname to backend database -func WriteResolvedHostname(hostname string, resolvedHostname string) error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - insert into - hostname_resolve (hostname, resolved_hostname, resolved_timestamp) - values - (?, ?, NOW()) - on duplicate key update - resolved_hostname = VALUES(resolved_hostname), - resolved_timestamp = VALUES(resolved_timestamp) - `, - hostname, - resolvedHostname) - if err != nil { - log.Error(err) - return err - } - if hostname != resolvedHostname { - // history is only interesting when there's actually something to resolve... - _, _ = db.ExecVTOrc(` - insert into - hostname_resolve_history (hostname, resolved_hostname, resolved_timestamp) - values - (?, ?, NOW()) - on duplicate key update - hostname=values(hostname), - resolved_timestamp=values(resolved_timestamp) - `, - hostname, - resolvedHostname) - } - writeResolvedHostnameCounter.Inc(1) - return nil - } - return ExecDBWriteFunc(writeFunc) -} - -// ReadResolvedHostname returns the resolved hostname given a hostname, or empty if not exists -func ReadResolvedHostname(hostname string) (string, error) { - var resolvedHostname string - - query := ` - select - resolved_hostname - from - hostname_resolve - where - hostname = ? - ` - - err := db.QueryVTOrc(query, sqlutils.Args(hostname), func(m sqlutils.RowMap) error { - resolvedHostname = m.GetString("resolved_hostname") - return nil - }) - readResolvedHostnameCounter.Inc(1) - - if err != nil { - log.Error(err) - } - return resolvedHostname, err -} - -func ReadAllHostnameResolves() ([]HostnameResolve, error) { - res := []HostnameResolve{} - query := ` - select - hostname, - resolved_hostname - from - hostname_resolve - ` - err := db.QueryVTOrcRowsMap(query, func(m sqlutils.RowMap) error { - hostnameResolve := HostnameResolve{hostname: m.GetString("hostname"), resolvedHostname: m.GetString("resolved_hostname")} - - res = append(res, hostnameResolve) - return nil - }) - readAllResolvedHostnamesCounter.Inc(1) - - if err != nil { - log.Error(err) - } - return res, err -} - -// ExpireHostnameUnresolve expires hostname_unresolve entries that haven't been updated recently. -func ExpireHostnameUnresolve() error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - delete from hostname_unresolve - where last_registered < NOW() - INTERVAL ? MINUTE - `, config.ExpiryHostnameResolvesMinutes, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} - -// ForgetExpiredHostnameResolves -func ForgetExpiredHostnameResolves() error { - _, err := db.ExecVTOrc(` - delete - from hostname_resolve - where - resolved_timestamp < NOW() - interval ? minute`, - 2*config.ExpiryHostnameResolvesMinutes, - ) - return err -} - -// DeleteInvalidHostnameResolves removes invalid resolves. At this time these are: -// - infinite loop resolves (A->B and B->A), remove earlier mapping -func DeleteInvalidHostnameResolves() error { - var invalidHostnames []string - - query := ` - select - early.hostname - from - hostname_resolve as latest - join hostname_resolve early on (latest.resolved_hostname = early.hostname and latest.hostname = early.resolved_hostname) - where - latest.hostname != latest.resolved_hostname - and latest.resolved_timestamp > early.resolved_timestamp - ` - - err := db.QueryVTOrcRowsMap(query, func(m sqlutils.RowMap) error { - invalidHostnames = append(invalidHostnames, m.GetString("hostname")) - return nil - }) - if err != nil { - return err - } - - for _, invalidHostname := range invalidHostnames { - _, err = db.ExecVTOrc(` - delete - from hostname_resolve - where - hostname = ?`, - invalidHostname, - ) - if err != nil { - log.Error(err) - } - } - return err -} - -// writeHostnameIPs stroes an ipv4 and ipv6 associated witha hostname, if available -func writeHostnameIPs(hostname string, ipv4String string, ipv6String string) error { - writeFunc := func() error { - _, err := db.ExecVTOrc(` - insert into - hostname_ips (hostname, ipv4, ipv6, last_updated) - values - (?, ?, ?, NOW()) - on duplicate key update - ipv4 = VALUES(ipv4), - ipv6 = VALUES(ipv6), - last_updated = VALUES(last_updated) - `, - hostname, - ipv4String, - ipv6String, - ) - if err != nil { - log.Error(err) - } - return err - } - return ExecDBWriteFunc(writeFunc) -} diff --git a/go/vt/vtorc/inst/shard_dao.go b/go/vt/vtorc/inst/shard_dao.go new file mode 100644 index 00000000000..a90eed0f509 --- /dev/null +++ b/go/vt/vtorc/inst/shard_dao.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inst + +import ( + "errors" + + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/db" +) + +// ErrShardNotFound is a fixed error message used when a shard is not found in the database. +var ErrShardNotFound = errors.New("shard not found") + +// ReadShardPrimaryInformation reads the vitess shard record and gets the shard primary alias and timestamp. +func ReadShardPrimaryInformation(keyspaceName, shardName string) (primaryAlias string, primaryTimestamp string, err error) { + if err = topo.ValidateKeyspaceName(keyspaceName); err != nil { + return + } + if _, _, err = topo.ValidateShardName(shardName); err != nil { + return + } + + query := ` + select + primary_alias, primary_timestamp + from + vitess_shard + where keyspace=? and shard=? + ` + args := sqlutils.Args(keyspaceName, shardName) + shardFound := false + err = db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { + shardFound = true + primaryAlias = row.GetString("primary_alias") + primaryTimestamp = row.GetString("primary_timestamp") + return nil + }) + if err != nil { + return + } + if !shardFound { + return "", "", ErrShardNotFound + } + return primaryAlias, primaryTimestamp, nil +} + +// SaveShard saves the shard record against the shard name. +func SaveShard(shard *topo.ShardInfo) error { + _, err := db.ExecVTOrc(` + replace + into vitess_shard ( + keyspace, shard, primary_alias, primary_timestamp + ) values ( + ?, ?, ?, ? + ) + `, + shard.Keyspace(), + shard.ShardName(), + getShardPrimaryAliasString(shard), + getShardPrimaryTermStartTimeString(shard), + ) + return err +} + +// getShardPrimaryAliasString gets the shard primary alias to be stored as a string in the database. +func getShardPrimaryAliasString(shard *topo.ShardInfo) string { + if shard.PrimaryAlias == nil { + return "" + } + return topoproto.TabletAliasString(shard.PrimaryAlias) +} + +// getShardPrimaryAliasString gets the shard primary term start time to be stored as a string in the database. +func getShardPrimaryTermStartTimeString(shard *topo.ShardInfo) string { + if shard.PrimaryTermStartTime == nil { + return "" + } + return protoutil.TimeFromProto(shard.PrimaryTermStartTime).UTC().String() +} diff --git a/go/vt/vtorc/inst/shard_dao_test.go b/go/vt/vtorc/inst/shard_dao_test.go new file mode 100644 index 00000000000..3357bd2ee36 --- /dev/null +++ b/go/vt/vtorc/inst/shard_dao_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2022 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package inst + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + _ "modernc.org/sqlite" + + "vitess.io/vitess/go/protoutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtorc/db" +) + +func TestSaveAndReadShard(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + timeToUse := time.Date(2023, 7, 24, 5, 0, 5, 1000, time.UTC) + tests := []struct { + name string + keyspaceName string + shardName string + shard *topodatapb.Shard + primaryAliasWanted string + primaryTimestampWanted string + err string + }{ + { + name: "Success", + keyspaceName: "ks1", + shardName: "80-", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 301, + }, + PrimaryTermStartTime: protoutil.TimeToProto(timeToUse.Add(1 * time.Hour)), + }, + primaryTimestampWanted: "2023-07-24 06:00:05.000001 +0000 UTC", + primaryAliasWanted: "zone1-0000000301", + }, { + name: "Success with empty primary alias", + keyspaceName: "ks1", + shardName: "-", + shard: &topodatapb.Shard{ + PrimaryTermStartTime: protoutil.TimeToProto(timeToUse), + }, + primaryTimestampWanted: "2023-07-24 05:00:05.000001 +0000 UTC", + primaryAliasWanted: "", + }, { + name: "Success with empty primary term start time", + keyspaceName: "ks1", + shardName: "80-", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 301, + }, + }, + primaryTimestampWanted: "", + primaryAliasWanted: "zone1-0000000301", + }, + { + name: "No shard found", + keyspaceName: "ks1", + shardName: "-80", + err: ErrShardNotFound.Error(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.shard != nil { + shardInfo := topo.NewShardInfo(tt.keyspaceName, tt.shardName, tt.shard, nil) + err := SaveShard(shardInfo) + require.NoError(t, err) + } + + shardPrimaryAlias, primaryTimestamp, err := ReadShardPrimaryInformation(tt.keyspaceName, tt.shardName) + if tt.err != "" { + require.EqualError(t, err, tt.err) + return + } + require.NoError(t, err) + require.EqualValues(t, tt.primaryAliasWanted, shardPrimaryAlias) + require.EqualValues(t, tt.primaryTimestampWanted, primaryTimestamp) + }) + } +} diff --git a/go/vt/vtorc/inst/tablet_dao.go b/go/vt/vtorc/inst/tablet_dao.go index cd762a6883e..3ee49a75781 100644 --- a/go/vt/vtorc/inst/tablet_dao.go +++ b/go/vt/vtorc/inst/tablet_dao.go @@ -20,14 +20,11 @@ import ( "context" "errors" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/logutil" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" @@ -36,94 +33,12 @@ import ( "vitess.io/vitess/go/vt/vttablet/tmclient" ) -// TopoServ is the connection to the topo server. -var TopoServ *topo.Server - // ErrTabletAliasNil is a fixed error message. var ErrTabletAliasNil = errors.New("tablet alias is nil") -// SwitchPrimary makes the new tablet the primary and proactively performs -// the necessary propagation to the old primary. The propagation is best -// effort. If it fails, the tablet's shard sync will eventually converge. -// The proactive propagation allows a competing VTOrc from discovering -// the successful action of a previous one, which reduces churn. -func SwitchPrimary(newPrimaryKey, oldPrimaryKey InstanceKey) error { - durability, err := GetDurabilityPolicy(newPrimaryKey) - if err != nil { - return err - } - newPrimaryTablet, err := ChangeTabletType(newPrimaryKey, topodatapb.TabletType_PRIMARY, SemiSyncAckers(durability, newPrimaryKey) > 0) - if err != nil { - return err - } - // The following operations are best effort. - if newPrimaryTablet.Type != topodatapb.TabletType_PRIMARY { - log.Errorf("Unexpected: tablet type did not change to primary: %v", newPrimaryTablet.Type) - return nil - } - ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer cancel() - _, err = TopoServ.UpdateShardFields(ctx, newPrimaryTablet.Keyspace, newPrimaryTablet.Shard, func(si *topo.ShardInfo) error { - if proto.Equal(si.PrimaryAlias, newPrimaryTablet.Alias) && proto.Equal(si.PrimaryTermStartTime, newPrimaryTablet.PrimaryTermStartTime) { - return topo.NewError(topo.NoUpdateNeeded, "") - } - - // We just successfully reparented. We should check timestamps, but always overwrite. - lastTerm := si.GetPrimaryTermStartTime() - newTerm := logutil.ProtoToTime(newPrimaryTablet.PrimaryTermStartTime) - if !newTerm.After(lastTerm) { - log.Errorf("Possible clock skew. New primary start time is before previous one: %v vs %v", newTerm, lastTerm) - } - - aliasStr := topoproto.TabletAliasString(newPrimaryTablet.Alias) - log.Infof("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, newTerm) - si.PrimaryAlias = newPrimaryTablet.Alias - si.PrimaryTermStartTime = newPrimaryTablet.PrimaryTermStartTime - return nil - }) - // Don't proceed if shard record could not be updated. - if err != nil { - log.Error(err) - return nil - } - if _, err := ChangeTabletType(oldPrimaryKey, topodatapb.TabletType_REPLICA, IsReplicaSemiSync(durability, newPrimaryKey, oldPrimaryKey)); err != nil { - // This is best effort. - log.Error(err) - } - return nil -} - -// ChangeTabletType designates the tablet that owns an instance as the primary. -func ChangeTabletType(instanceKey InstanceKey, tabletType topodatapb.TabletType, semiSync bool) (*topodatapb.Tablet, error) { - if instanceKey.Hostname == "" { - return nil, errors.New("can't set tablet to primary: instance is unspecified") - } - tablet, err := ReadTablet(instanceKey) - if err != nil { - return nil, err - } - tmc := tmclient.NewTabletManagerClient() - tmcCtx, tmcCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer tmcCancel() - if err := tmc.ChangeType(tmcCtx, tablet, tabletType, semiSync); err != nil { - return nil, err - } - tsCtx, tsCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer tsCancel() - ti, err := TopoServ.GetTablet(tsCtx, tablet.Alias) - if err != nil { - log.Error(err) - return nil, err - } - if err := SaveTablet(ti.Tablet); err != nil { - log.Error(err) - } - return ti.Tablet, nil -} - // ResetReplicationParameters resets the replication parameters on the given tablet. -func ResetReplicationParameters(instanceKey InstanceKey) error { - tablet, err := ReadTablet(instanceKey) +func ResetReplicationParameters(tabletAlias string) error { + tablet, err := ReadTablet(tabletAlias) if err != nil { return err } @@ -137,8 +52,8 @@ func ResetReplicationParameters(instanceKey InstanceKey) error { } // FullStatus gets the full status of the MySQL running in vttablet. -func FullStatus(instanceKey InstanceKey) (*replicationdatapb.FullStatus, error) { - tablet, err := ReadTablet(instanceKey) +func FullStatus(tabletAlias string) (*replicationdatapb.FullStatus, error) { + tablet, err := ReadTablet(tabletAlias) if err != nil { return nil, err } @@ -149,18 +64,19 @@ func FullStatus(instanceKey InstanceKey) (*replicationdatapb.FullStatus, error) } // ReadTablet reads the vitess tablet record. -func ReadTablet(instanceKey InstanceKey) (*topodatapb.Tablet, error) { +func ReadTablet(tabletAlias string) (*topodatapb.Tablet, error) { query := ` select info from vitess_tablet - where hostname=? and port=? + where alias = ? ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port) + args := sqlutils.Args(tabletAlias) tablet := &topodatapb.Tablet{} + opts := prototext.UnmarshalOptions{DiscardUnknown: true} err := db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { - return prototext.Unmarshal([]byte(row.GetString("info")), tablet) + return opts.Unmarshal([]byte(row.GetString("info")), tablet) }) if err != nil { return nil, err @@ -192,7 +108,7 @@ func SaveTablet(tablet *topodatapb.Tablet) error { tablet.Keyspace, tablet.Shard, int(tablet.Type), - logutil.ProtoToTime(tablet.PrimaryTermStartTime), + protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), tabletp, ) return err diff --git a/go/vt/vtorc/inst/tablet_dao_test.go b/go/vt/vtorc/inst/tablet_dao_test.go new file mode 100644 index 00000000000..a876d857ace --- /dev/null +++ b/go/vt/vtorc/inst/tablet_dao_test.go @@ -0,0 +1,93 @@ +package inst + +import ( + "testing" + + "github.com/stretchr/testify/require" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtorc/db" +) + +func TestSaveAndReadTablet(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + tests := []struct { + name string + tabletAlias string + tablet *topodatapb.Tablet + tabletWanted *topodatapb.Tablet + err string + }{ + { + name: "Success with primary type", + tabletAlias: "zone1-0000000100", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 1030, + PrimaryTermStartTime: &vttime.Time{ + Seconds: 1000, + Nanoseconds: 387, + }, + }, + tabletWanted: nil, + }, { + name: "Success with replica type", + tabletAlias: "zone1-0000000100", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 1030, + }, + tabletWanted: nil, + }, { + name: "No tablet found", + tabletAlias: "zone1-190734", + tablet: nil, + tabletWanted: nil, + err: ErrTabletAliasNil.Error(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.tabletWanted == nil { + tt.tabletWanted = tt.tablet + } + + if tt.tablet != nil { + err := SaveTablet(tt.tablet) + require.NoError(t, err) + } + + readTable, err := ReadTablet(tt.tabletAlias) + if tt.err != "" { + require.EqualError(t, err, tt.err) + return + } + require.NoError(t, err) + require.True(t, topotools.TabletEquality(tt.tabletWanted, readTable)) + require.Equal(t, tt.tabletAlias, topoproto.TabletAliasString(readTable.Alias)) + }) + } +} diff --git a/go/vt/vtorc/inst/tag.go b/go/vt/vtorc/inst/tag.go deleted file mode 100644 index 3b9705b7dff..00000000000 --- a/go/vt/vtorc/inst/tag.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - "regexp" - "strings" -) - -type Tag struct { - TagName string - TagValue string - HasValue bool - Negate bool -} - -var ( - negateTagEqualsRegexp = regexp.MustCompile("^~([^=]+)=(.*)$") - TagEqualsRegexp = regexp.MustCompile("^([^=]+)=(.*)$") - negateTagExistsRegexp = regexp.MustCompile("^~([^=]+)$") - tagExistsRegexp = regexp.MustCompile("^([^=]+)$") -) - -func NewTag(tagName string, tagValue string) (*Tag, error) { - tagName = strings.TrimSpace(tagName) - if tagName == "" { - return nil, fmt.Errorf("NewTag: empty tag name") - } - return &Tag{TagName: tagName, TagValue: tagValue}, nil -} - -func ParseTag(tagString string) (*Tag, error) { - tagString = strings.Replace(tagString, "!", "~", -1) - tagString = strings.TrimSpace(tagString) - - if submatch := negateTagEqualsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - TagValue: submatch[2], - HasValue: true, - Negate: true, - }, nil - } else if submatch := TagEqualsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - TagValue: submatch[2], - HasValue: true, - }, nil - } else if submatch := negateTagExistsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - Negate: true, - }, nil - } else if submatch := tagExistsRegexp.FindStringSubmatch(tagString); len(submatch) > 0 { - return &Tag{ - TagName: submatch[1], - }, nil - } - return nil, fmt.Errorf("Unable to parse tag: %s", tagString) -} - -func (tag *Tag) String() string { - return fmt.Sprintf("%s=%s", tag.TagName, tag.TagValue) -} - -func (tag *Tag) Display() string { - if tag.TagValue == "" { - return tag.TagName - } - return fmt.Sprintf("%s=%s", tag.TagName, tag.TagValue) -} - -func ParseIntersectTags(tagsString string) (tags [](*Tag), err error) { - for _, tagString := range strings.Split(tagsString, ",") { - tag, err := ParseTag(tagString) - if err != nil { - return tags, err - } - tags = append(tags, tag) - } - return tags, nil -} - -type InstanceTag struct { - Key InstanceKey - T Tag -} - -func GetInstanceKeysByTags(tagsString string) (tagged *InstanceKeyMap, err error) { - tags, err := ParseIntersectTags(tagsString) - if err != nil { - return tagged, err - } - for i, tag := range tags { - taggedByTag, err := GetInstanceKeysByTag(tag) - if err != nil { - return tagged, err - } - if i == 0 { - tagged = taggedByTag - } else { - tagged = tagged.Intersect(taggedByTag) - } - } - return tagged, nil -} diff --git a/go/vt/vtorc/inst/tag_dao.go b/go/vt/vtorc/inst/tag_dao.go deleted file mode 100644 index 5b5962a9326..00000000000 --- a/go/vt/vtorc/inst/tag_dao.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "fmt" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/db" -) - -func PutInstanceTag(instanceKey *InstanceKey, tag *Tag) (err error) { - _, err = db.ExecVTOrc(` - insert - into database_instance_tags ( - hostname, port, tag_name, tag_value, last_updated - ) VALUES ( - ?, ?, ?, ?, NOW() - ) - on duplicate key update - tag_value=values(tag_value), - last_updated=values(last_updated) - `, - instanceKey.Hostname, - instanceKey.Port, - tag.TagName, - tag.TagValue, - ) - return err -} - -func Untag(instanceKey *InstanceKey, tag *Tag) (tagged *InstanceKeyMap, err error) { - if tag == nil { - errMsg := "untag: tag is nil" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - if tag.Negate { - errMsg := "untag: does not support negation" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - if instanceKey == nil && !tag.HasValue { - errMsg := "untag: either indicate an instance or a tag value. Will not delete on-valued tag across instances" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - var clause string - args := sqlutils.Args() - if tag.HasValue { - clause = `tag_name=? and tag_value=?` - args = append(args, tag.TagName, tag.TagValue) - } else { - clause = `tag_name=?` - args = append(args, tag.TagName) - } - if instanceKey != nil { - clause = fmt.Sprintf("%s and hostname=? and port=?", clause) - args = append(args, instanceKey.Hostname, instanceKey.Port) - } - tagged = NewInstanceKeyMap() - query := fmt.Sprintf(` - select - hostname, - port - from - database_instance_tags - where - %s - order by hostname, port - `, clause, - ) - _ = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - key, _ := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - tagged.AddKey(*key) - return nil - }) - - query = fmt.Sprintf(` - delete from - database_instance_tags - where - %s - `, clause, - ) - if _, err = db.ExecVTOrc(query, args...); err != nil { - log.Error(err) - return tagged, err - } - _ = AuditOperation("delete-instance-tag", instanceKey, tag.String()) - return tagged, nil -} - -func ReadInstanceTag(instanceKey *InstanceKey, tag *Tag) (tagExists bool, err error) { - query := ` - select - tag_value - from - database_instance_tags - where - hostname = ? - and port = ? - and tag_name = ? - ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port, tag.TagName) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - tag.TagValue = m.GetString("tag_value") - tagExists = true - return nil - }) - - if err != nil { - log.Error(err) - } - return tagExists, err -} - -func ReadInstanceTags(instanceKey *InstanceKey) (tags [](*Tag), err error) { - tags = [](*Tag){} - query := ` - select - tag_name, tag_value - from - database_instance_tags - where - hostname = ? - and port = ? - order by tag_name - ` - args := sqlutils.Args(instanceKey.Hostname, instanceKey.Port) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - tag := &Tag{ - TagName: m.GetString("tag_name"), - TagValue: m.GetString("tag_value"), - } - tags = append(tags, tag) - return nil - }) - - if err != nil { - log.Error(err) - } - return tags, err -} - -func GetInstanceKeysByTag(tag *Tag) (tagged *InstanceKeyMap, err error) { - if tag == nil { - errMsg := "GetInstanceKeysByTag: tag is nil" - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - clause := `` - args := sqlutils.Args() - if tag.HasValue && !tag.Negate { - // exists and equals - clause = `tag_name=? and tag_value=?` - args = append(args, tag.TagName, tag.TagValue) - } else if !tag.HasValue && !tag.Negate { - // exists - clause = `tag_name=?` - args = append(args, tag.TagName) - } else if tag.HasValue && tag.Negate { - // exists and not equal - clause = `tag_name=? and tag_value!=?` - args = append(args, tag.TagName, tag.TagValue) - } else if !tag.HasValue && tag.Negate { - // does not exist - clause = `1=1 group by hostname, port having sum(tag_name=?)=0` - args = append(args, tag.TagName) - } - tagged = NewInstanceKeyMap() - query := fmt.Sprintf(` - select - hostname, - port - from - database_instance_tags - where - %s - order by hostname, port - `, clause) - err = db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { - key, _ := NewResolveInstanceKey(m.GetString("hostname"), m.GetInt("port")) - tagged.AddKey(*key) - return nil - }) - if err != nil { - log.Error(err) - } - return tagged, err -} diff --git a/go/vt/vtorc/inst/tag_test.go b/go/vt/vtorc/inst/tag_test.go deleted file mode 100644 index 0ce182b7fb2..00000000000 --- a/go/vt/vtorc/inst/tag_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package inst - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -//nolint:staticcheck -func TestParseTag(t *testing.T) { - { - tag, err := ParseTag("") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("=") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("=backup") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag(" =backup") - require.True(t, tag == nil) - require.Error(t, err) - } - { - tag, err := ParseTag("role") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "") - require.False(t, tag.Negate) - require.False(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=") - } - { - tag, err := ParseTag("role=") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "") - require.False(t, tag.Negate) - require.True(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=") - - } - { - tag, err := ParseTag("role=backup") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "backup") - require.False(t, tag.Negate) - require.True(t, tag.HasValue) - - require.Equal(t, tag.String(), "role=backup") - } - { - tag, err := ParseTag("!role") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.True(t, tag.Negate) - require.False(t, tag.HasValue) - } - { - tag, err := ParseTag("~role=backup") - require.NoError(t, err) - require.True(t, tag != nil) - require.Equal(t, tag.TagName, "role") - require.Equal(t, tag.TagValue, "backup") - require.True(t, tag.Negate) - require.True(t, tag.HasValue) - } -} - -func TestParseIntersectTags(t *testing.T) { - { - _, err := ParseIntersectTags("") - require.Error(t, err) - } - { - _, err := ParseIntersectTags(",") - require.Error(t, err) - } - { - _, err := ParseIntersectTags(",,,") - require.Error(t, err) - } - { - _, err := ParseIntersectTags("role,") - require.Error(t, err) - } - { - tags, err := ParseIntersectTags("role") - require.NoError(t, err) - require.Equal(t, len(tags), 1) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "") - require.False(t, tags[0].Negate) - require.False(t, tags[0].HasValue) - } - { - tags, err := ParseIntersectTags("role,dc") - require.NoError(t, err) - require.Equal(t, len(tags), 2) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "") - require.False(t, tags[0].Negate) - require.False(t, tags[0].HasValue) - - require.Equal(t, tags[1].TagName, "dc") - require.Equal(t, tags[1].TagValue, "") - require.False(t, tags[1].Negate) - require.False(t, tags[1].HasValue) - } - { - tags, err := ParseIntersectTags("role=backup, !dc=ny") - require.NoError(t, err) - require.Equal(t, len(tags), 2) - - require.Equal(t, tags[0].TagName, "role") - require.Equal(t, tags[0].TagValue, "backup") - require.False(t, tags[0].Negate) - require.True(t, tags[0].HasValue) - - require.Equal(t, tags[1].TagName, "dc") - require.Equal(t, tags[1].TagValue, "ny") - require.True(t, tags[1].Negate) - require.True(t, tags[1].HasValue) - } -} diff --git a/go/vt/vtorc/logic/keyspace_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go similarity index 54% rename from go/vt/vtorc/logic/keyspace_discovery.go rename to go/vt/vtorc/logic/keyspace_shard_discovery.go index 4065c3c0857..c79ace5bdc3 100644 --- a/go/vt/vtorc/logic/keyspace_discovery.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go @@ -28,8 +28,8 @@ import ( "vitess.io/vitess/go/vt/vtorc/inst" ) -// RefreshAllKeyspaces reloads the keyspace information for the keyspaces that vtorc is concerned with. -func RefreshAllKeyspaces() { +// RefreshAllKeyspacesAndShards reloads the keyspace and shard information for the keyspaces that vtorc is concerned with. +func RefreshAllKeyspacesAndShards() { var keyspaces []string if len(clustersToWatch) == 0 { // all known keyspaces ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) @@ -72,24 +72,44 @@ func RefreshAllKeyspaces() { if idx != 0 && keyspace == keyspaces[idx-1] { continue } - wg.Add(1) + wg.Add(2) go func(keyspace string) { defer wg.Done() - _ = refreshKeyspace(refreshCtx, keyspace) + _ = refreshKeyspaceHelper(refreshCtx, keyspace) + }(keyspace) + go func(keyspace string) { + defer wg.Done() + _ = refreshAllShards(refreshCtx, keyspace) }(keyspace) } wg.Wait() } -// RefreshKeyspace refreshes the keyspace's information for the given keyspace from the topo -func RefreshKeyspace(keyspaceName string) error { +// RefreshKeyspaceAndShard refreshes the keyspace record and shard record for the given keyspace and shard. +func RefreshKeyspaceAndShard(keyspaceName string, shardName string) error { + err := refreshKeyspace(keyspaceName) + if err != nil { + return err + } + return refreshShard(keyspaceName, shardName) +} + +// refreshKeyspace refreshes the keyspace's information for the given keyspace from the topo +func refreshKeyspace(keyspaceName string) error { refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer refreshCancel() - return refreshKeyspace(refreshCtx, keyspaceName) + return refreshKeyspaceHelper(refreshCtx, keyspaceName) } -// refreshKeyspace is a helper function which reloads the given keyspace's information -func refreshKeyspace(ctx context.Context, keyspaceName string) error { +// refreshShard refreshes the shard's information for the given keyspace/shard from the topo +func refreshShard(keyspaceName, shardName string) error { + refreshCtx, refreshCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer refreshCancel() + return refreshSingleShardHelper(refreshCtx, keyspaceName, shardName) +} + +// refreshKeyspaceHelper is a helper function which reloads the given keyspace's information +func refreshKeyspaceHelper(ctx context.Context, keyspaceName string) error { keyspaceInfo, err := ts.GetKeyspace(ctx, keyspaceName) if err != nil { log.Error(err) @@ -101,3 +121,34 @@ func refreshKeyspace(ctx context.Context, keyspaceName string) error { } return err } + +// refreshAllShards refreshes all the shard records in the given keyspace. +func refreshAllShards(ctx context.Context, keyspaceName string) error { + shardInfos, err := ts.FindAllShardsInKeyspace(ctx, keyspaceName) + if err != nil { + log.Error(err) + return err + } + for _, shardInfo := range shardInfos { + err = inst.SaveShard(shardInfo) + if err != nil { + log.Error(err) + return err + } + } + return nil +} + +// refreshSingleShardHelper is a helper function that refreshes the shard record of the given keyspace/shard. +func refreshSingleShardHelper(ctx context.Context, keyspaceName string, shardName string) error { + shardInfo, err := ts.GetShard(ctx, keyspaceName, shardName) + if err != nil { + log.Error(err) + return err + } + err = inst.SaveShard(shardInfo) + if err != nil { + log.Error(err) + } + return err +} diff --git a/go/vt/vtorc/logic/keyspace_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go similarity index 58% rename from go/vt/vtorc/logic/keyspace_discovery_test.go rename to go/vt/vtorc/logic/keyspace_shard_discovery_test.go index e5be1fd82f2..2911b3d29c2 100644 --- a/go/vt/vtorc/logic/keyspace_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -60,47 +61,65 @@ func TestRefreshAllKeyspaces(t *testing.T) { clustersToWatch = oldClustersToWatch }() - // Open the vtorc - // After the test completes delete everything from the vitess_keyspace table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + db.ClearVTOrcDatabase() defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() - ts = memorytopo.NewServer("zone1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts = memorytopo.NewServer(ctx, "zone1") keyspaceNames := []string{"ks1", "ks2", "ks3", "ks4"} keyspaces := []*topodatapb.Keyspace{keyspaceDurabilityNone, keyspaceDurabilitySemiSync, keyspaceSnapshot, keyspaceDurabilityTest} // Create 4 keyspaces for i, keyspace := range keyspaces { - err := ts.CreateKeyspace(context.Background(), keyspaceNames[i], keyspace) + err := ts.CreateKeyspace(ctx, keyspaceNames[i], keyspace) require.NoError(t, err) + for idx, shardName := range []string{"-80", "80-"} { + err = ts.CreateShard(ctx, keyspaceNames[i], shardName) + require.NoError(t, err) + _, err = ts.UpdateShardFields(ctx, keyspaceNames[i], shardName, func(si *topo.ShardInfo) error { + si.PrimaryAlias = &topodatapb.TabletAlias{ + Cell: fmt.Sprintf("zone_%v", keyspaceNames[i]), + Uid: uint32(100 + idx), + } + return nil + }) + require.NoError(t, err) + } } // Set clusters to watch to only watch ks1 and ks3 - onlyKs1and3 := []string{"ks1/-", "ks3/-80", "ks3/80-"} + onlyKs1and3 := []string{"ks1/-80", "ks3/-80", "ks3/80-"} clustersToWatch = onlyKs1and3 - RefreshAllKeyspaces() + RefreshAllKeyspacesAndShards() // Verify that we only have ks1 and ks3 in vtorc's db. verifyKeyspaceInfo(t, "ks1", keyspaceDurabilityNone, "") + verifyPrimaryAlias(t, "ks1", "-80", "zone_ks1-0000000100", "") verifyKeyspaceInfo(t, "ks2", nil, "keyspace not found") + verifyPrimaryAlias(t, "ks2", "80-", "", "shard not found") verifyKeyspaceInfo(t, "ks3", keyspaceSnapshot, "") + verifyPrimaryAlias(t, "ks3", "80-", "zone_ks3-0000000101", "") verifyKeyspaceInfo(t, "ks4", nil, "keyspace not found") // Set clusters to watch to watch all keyspaces clustersToWatch = nil // Change the durability policy of ks1 - reparenttestutil.SetKeyspaceDurability(context.Background(), t, ts, "ks1", "semi_sync") - RefreshAllKeyspaces() + reparenttestutil.SetKeyspaceDurability(ctx, t, ts, "ks1", "semi_sync") + RefreshAllKeyspacesAndShards() // Verify that all the keyspaces are correctly reloaded verifyKeyspaceInfo(t, "ks1", keyspaceDurabilitySemiSync, "") + verifyPrimaryAlias(t, "ks1", "-80", "zone_ks1-0000000100", "") verifyKeyspaceInfo(t, "ks2", keyspaceDurabilitySemiSync, "") + verifyPrimaryAlias(t, "ks2", "80-", "zone_ks2-0000000101", "") verifyKeyspaceInfo(t, "ks3", keyspaceSnapshot, "") + verifyPrimaryAlias(t, "ks3", "80-", "zone_ks3-0000000101", "") verifyKeyspaceInfo(t, "ks4", keyspaceDurabilityTest, "") + verifyPrimaryAlias(t, "ks4", "80-", "zone_ks4-0000000101", "") + } func TestRefreshKeyspace(t *testing.T) { @@ -110,27 +129,20 @@ func TestRefreshKeyspace(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_keyspace table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) defer func() { - _, err = orcDb.Exec("delete from vitess_keyspace") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() tests := []struct { name string keyspaceName string keyspace *topodatapb.Keyspace - ts *topo.Server keyspaceWanted *topodatapb.Keyspace err string }{ { name: "Success with keyspaceType and durability", keyspaceName: "ks1", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "semi_sync", @@ -140,7 +152,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with keyspaceType and no durability", keyspaceName: "ks2", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, }, @@ -149,7 +160,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with snapshot keyspaceType", keyspaceName: "ks3", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT, }, @@ -158,7 +168,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "Success with fields that are not stored", keyspaceName: "ks4", - ts: memorytopo.NewServer("zone1"), keyspace: &topodatapb.Keyspace{ KeyspaceType: topodatapb.KeyspaceType_NORMAL, DurabilityPolicy: "none", @@ -172,7 +181,6 @@ func TestRefreshKeyspace(t *testing.T) { }, { name: "No keyspace found", keyspaceName: "ks5", - ts: memorytopo.NewServer("zone1"), keyspace: nil, keyspaceWanted: nil, err: "node doesn't exist: keyspaces/ks5/Keyspace", @@ -184,13 +192,16 @@ func TestRefreshKeyspace(t *testing.T) { tt.keyspaceWanted = tt.keyspace } - ts = tt.ts + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") if tt.keyspace != nil { err := ts.CreateKeyspace(context.Background(), tt.keyspaceName, tt.keyspace) require.NoError(t, err) } - err := RefreshKeyspace(tt.keyspaceName) + err := refreshKeyspace(tt.keyspaceName) if tt.err != "" { require.EqualError(t, err, tt.err) } else { @@ -209,7 +220,91 @@ func verifyKeyspaceInfo(t *testing.T, keyspaceName string, keyspace *topodatapb. if errString != "" { assert.EqualError(t, err, errString) } else { + assert.NoError(t, err) assert.Equal(t, keyspaceName, ksInfo.KeyspaceName()) assert.True(t, topotools.KeyspaceEquality(keyspace, ksInfo.Keyspace)) } } + +func TestRefreshShard(t *testing.T) { + // Store the old flags and restore on test completion + oldTs := ts + defer func() { + ts = oldTs + }() + + defer func() { + db.ClearVTOrcDatabase() + }() + + tests := []struct { + name string + keyspaceName string + shardName string + shard *topodatapb.Shard + primaryAliasWanted string + err string + }{ + { + name: "Success with primaryAlias", + keyspaceName: "ks1", + shardName: "0", + shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 302, + }, + }, + primaryAliasWanted: "zone1-0000000302", + err: "", + }, { + name: "Success with empty primaryAlias", + keyspaceName: "ks1", + shardName: "-80", + shard: &topodatapb.Shard{}, + primaryAliasWanted: "", + err: "", + }, { + name: "No shard found", + keyspaceName: "ks2", + shardName: "-", + err: "node doesn't exist: keyspaces/ks2/shards/-/Shard", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") + if tt.shard != nil { + _, err := ts.GetOrCreateShard(context.Background(), tt.keyspaceName, tt.shardName) + require.NoError(t, err) + _, err = ts.UpdateShardFields(context.Background(), tt.keyspaceName, tt.shardName, func(info *topo.ShardInfo) error { + info.PrimaryAlias = tt.shard.PrimaryAlias + return nil + }) + require.NoError(t, err) + } + + err := refreshShard(tt.keyspaceName, tt.shardName) + if tt.err != "" { + require.EqualError(t, err, tt.err) + } else { + require.NoError(t, err) + verifyPrimaryAlias(t, tt.keyspaceName, tt.shardName, tt.primaryAliasWanted, "") + } + }) + } +} + +// verifyPrimaryAlias verifies the correct primary alias is stored in the database for the given keyspace shard. +func verifyPrimaryAlias(t *testing.T, keyspaceName, shardName string, primaryAliasWanted string, errString string) { + primaryAlias, _, err := inst.ReadShardPrimaryInformation(keyspaceName, shardName) + if errString != "" { + require.ErrorContains(t, err, errString) + return + } + require.NoError(t, err) + require.Equal(t, primaryAliasWanted, primaryAlias) +} diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go index 1a56fcce7b2..dd2e65237bf 100644 --- a/go/vt/vtorc/logic/tablet_discovery.go +++ b/go/vt/vtorc/logic/tablet_discovery.go @@ -19,6 +19,8 @@ package logic import ( "context" "errors" + "fmt" + "slices" "strings" "sync" "sync/atomic" @@ -26,22 +28,21 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/external/golib/sqlutils" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo/topoproto" - - "vitess.io/vitess/go/vt/vtorc/config" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vttablet/tmclient" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -65,8 +66,6 @@ func RegisterFlags(fs *pflag.FlagSet) { func OpenTabletDiscovery() <-chan time.Time { // TODO(sougou): If there's a shutdown signal, we have to close the topo. ts = topo.Open() - // TODO(sougou): remove ts and push some functions into inst. - inst.TopoServ = ts tmc = tmclient.NewTabletManagerClient() // Clear existing cache and perform a new refresh. if _, err := db.ExecVTOrc("delete from vitess_tablet"); err != nil { @@ -77,12 +76,12 @@ func OpenTabletDiscovery() <-chan time.Time { // refreshAllTablets reloads the tablets from topo and discovers the ones which haven't been refreshed in a while func refreshAllTablets() { - refreshTabletsUsing(func(instanceKey *inst.InstanceKey) { - DiscoverInstance(*instanceKey, false /* forceDiscovery */) + refreshTabletsUsing(func(tabletAlias string) { + DiscoverInstance(tabletAlias, false /* forceDiscovery */) }, false /* forceRefresh */) } -func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { if !IsLeaderOrActive() { return } @@ -144,61 +143,58 @@ func refreshTabletsUsing(loader func(instanceKey *inst.InstanceKey), forceRefres wg.Add(1) go func(ks *topo.KeyspaceShard) { defer wg.Done() - refreshTabletsInKeyspaceShard(refreshCtx, ks.Keyspace, ks.Shard, loader, forceRefresh) + refreshTabletsInKeyspaceShard(refreshCtx, ks.Keyspace, ks.Shard, loader, forceRefresh, nil) }(ks) } wg.Wait() } } -func refreshTabletsInCell(ctx context.Context, cell string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTabletsInCell(ctx context.Context, cell string, loader func(tabletAlias string), forceRefresh bool) { tablets, err := topotools.GetTabletMapForCell(ctx, ts, cell) if err != nil { log.Errorf("Error fetching topo info for cell %v: %v", cell, err) return } - query := "select hostname, port, info from vitess_tablet where cell = ?" + query := "select alias from vitess_tablet where cell = ?" args := sqlutils.Args(cell) - refreshTablets(tablets, query, args, loader, forceRefresh) + refreshTablets(tablets, query, args, loader, forceRefresh, nil) } // forceRefreshAllTabletsInShard is used to refresh all the tablet's information (both MySQL information and topo records) // for a given shard. This function is meant to be called before or after a cluster-wide operation that we know will // change the replication information for the entire cluster drastically enough to warrant a full forceful refresh -func forceRefreshAllTabletsInShard(ctx context.Context, keyspace, shard string) { - log.Infof("force refresh of all tablets in shard - %v/%v", keyspace, shard) +func forceRefreshAllTabletsInShard(ctx context.Context, keyspace, shard string, tabletsToIgnore []string) { refreshCtx, refreshCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer refreshCancel() - refreshTabletsInKeyspaceShard(refreshCtx, keyspace, shard, func(instanceKey *inst.InstanceKey) { - DiscoverInstance(*instanceKey, true) - }, true) + refreshTabletsInKeyspaceShard(refreshCtx, keyspace, shard, func(tabletAlias string) { + DiscoverInstance(tabletAlias, true) + }, true, tabletsToIgnore) } // refreshTabletInfoOfShard only refreshes the tablet records from the topo-server for all the tablets // of the given keyspace-shard. func refreshTabletInfoOfShard(ctx context.Context, keyspace, shard string) { log.Infof("refresh of tablet records of shard - %v/%v", keyspace, shard) - refreshTabletsInKeyspaceShard(ctx, keyspace, shard, func(instanceKey *inst.InstanceKey) { + refreshTabletsInKeyspaceShard(ctx, keyspace, shard, func(tabletAlias string) { // No-op // We only want to refresh the tablet information for the given shard - }, false) + }, false, nil) } -func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTabletsInKeyspaceShard(ctx context.Context, keyspace, shard string, loader func(tabletAlias string), forceRefresh bool, tabletsToIgnore []string) { tablets, err := ts.GetTabletMapForShard(ctx, keyspace, shard) if err != nil { log.Errorf("Error fetching tablets for keyspace/shard %v/%v: %v", keyspace, shard, err) return } - query := "select hostname, port, info from vitess_tablet where keyspace = ? and shard = ?" + query := "select alias from vitess_tablet where keyspace = ? and shard = ?" args := sqlutils.Args(keyspace, shard) - refreshTablets(tablets, query, args, loader, forceRefresh) + refreshTablets(tablets, query, args, loader, forceRefresh, tabletsToIgnore) } -func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []any, loader func(instanceKey *inst.InstanceKey), forceRefresh bool) { +func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []any, loader func(tabletAlias string), forceRefresh bool, tabletsToIgnore []string) { // Discover new tablets. - // TODO(sougou): enhance this to work with multi-schema, - // where each instanceKey can have multiple tablets. latestInstances := make(map[string]bool) var wg sync.WaitGroup for _, tabletInfo := range tablets { @@ -206,15 +202,9 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an if tablet.Type != topodatapb.TabletType_PRIMARY && !topo.IsReplicaType(tablet.Type) { continue } - latestInstances[topoproto.TabletAliasString(tablet.Alias)] = true - if tablet.MysqlHostname == "" { - continue - } - instanceKey := inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - } - old, err := inst.ReadTablet(instanceKey) + tabletAliasString := topoproto.TabletAliasString(tablet.Alias) + latestInstances[tabletAliasString] = true + old, err := inst.ReadTablet(tabletAliasString) if err != nil && err != inst.ErrTabletAliasNil { log.Error(err) continue @@ -229,68 +219,55 @@ func refreshTablets(tablets map[string]*topo.TabletInfo, query string, args []an wg.Add(1) go func() { defer wg.Done() - loader(&instanceKey) + if slices.Contains(tabletsToIgnore, topoproto.TabletAliasString(tablet.Alias)) { + return + } + loader(tabletAliasString) }() log.Infof("Discovered: %v", tablet) } wg.Wait() // Forget tablets that were removed. - toForget := make(map[inst.InstanceKey]*topodatapb.Tablet) + var toForget []string err := db.QueryVTOrc(query, args, func(row sqlutils.RowMap) error { - curKey := inst.InstanceKey{ - Hostname: row.GetString("hostname"), - Port: row.GetInt("port"), - } - tablet := &topodatapb.Tablet{} - if err := prototext.Unmarshal([]byte(row.GetString("info")), tablet); err != nil { - log.Error(err) - return nil - } - if !latestInstances[topoproto.TabletAliasString(tablet.Alias)] { - toForget[curKey] = tablet + tabletAlias := row.GetString("alias") + if !latestInstances[tabletAlias] { + toForget = append(toForget, tabletAlias) } return nil }) if err != nil { log.Error(err) } - for instanceKey, tablet := range toForget { - log.Infof("Forgetting: %v", tablet) - _, err := db.ExecVTOrc(` - delete - from vitess_tablet - where - hostname=? and port=?`, - instanceKey.Hostname, - instanceKey.Port, - ) - if err != nil { - log.Error(err) - } - if err := inst.ForgetInstance(&instanceKey); err != nil { + for _, tabletAlias := range toForget { + if err := inst.ForgetInstance(tabletAlias); err != nil { log.Error(err) } } } +func getLockAction(analysedInstance string, code inst.AnalysisCode) string { + return fmt.Sprintf("VTOrc Recovery for %v on %v", code, analysedInstance) +} + // LockShard locks the keyspace-shard preventing others from performing conflicting actions. -func LockShard(ctx context.Context, instanceKey inst.InstanceKey) (context.Context, func(*error), error) { - if instanceKey.Hostname == "" { - return nil, nil, errors.New("Can't lock shard: instance is unspecified") +func LockShard(ctx context.Context, tabletAlias string, lockAction string) (context.Context, func(*error), error) { + if tabletAlias == "" { + return nil, nil, errors.New("can't lock shard: instance is unspecified") } val := atomic.LoadInt32(&hasReceivedSIGTERM) if val > 0 { - return nil, nil, errors.New("Can't lock shard: SIGTERM received") + return nil, nil, errors.New("can't lock shard: SIGTERM received") } - tablet, err := inst.ReadTablet(instanceKey) + tablet, err := inst.ReadTablet(tabletAlias) if err != nil { return nil, nil, err } atomic.AddInt32(&shardsLockCounter, 1) - ctx, unlock, err := ts.TryLockShard(ctx, tablet.Keyspace, tablet.Shard, "Orc Recovery") + ctx, unlock, err := ts.TryLockShard(ctx, tablet.Keyspace, tablet.Shard, lockAction) if err != nil { atomic.AddInt32(&shardsLockCounter, -1) return nil, nil, err @@ -311,6 +288,11 @@ func setReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { return tmc.SetReadOnly(ctx, tablet) } +// changeTabletType calls the said RPC for the given tablet with the given parameters. +func changeTabletType(ctx context.Context, tablet *topodatapb.Tablet, tabletType topodatapb.TabletType, semiSync bool) error { + return tmc.ChangeType(ctx, tablet, tabletType, semiSync) +} + // setReplicationSource calls the said RPC with the parameters provided func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, primary *topodatapb.Tablet, semiSync bool) error { return tmc.SetReplicationSource(ctx, replica, primary.Alias, 0, "", true, semiSync) @@ -319,11 +301,7 @@ func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, prima // shardPrimary finds the primary of the given keyspace-shard by reading the vtorc backend func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, err error) { query := `SELECT - info, - hostname, - port, - tablet_type, - primary_timestamp + info FROM vitess_tablet WHERE @@ -336,7 +314,8 @@ func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, er err = db.Db.QueryVTOrc(query, sqlutils.Args(keyspace, shard, topodatapb.TabletType_PRIMARY), func(m sqlutils.RowMap) error { if primary == nil { primary = &topodatapb.Tablet{} - return prototext.Unmarshal([]byte(m.GetString("info")), primary) + opts := prototext.UnmarshalOptions{DiscardUnknown: true} + return opts.Unmarshal([]byte(m.GetString("info")), primary) } return nil }) @@ -347,10 +326,10 @@ func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, er } // restartsReplication restarts the replication on the provided replicaKey. It also sets the correct semi-sync settings when it starts replication -func restartReplication(replicaKey *inst.InstanceKey) error { - replicaTablet, err := inst.ReadTablet(*replicaKey) +func restartReplication(replicaAlias string) error { + replicaTablet, err := inst.ReadTablet(replicaAlias) if err != nil { - log.Info("Could not read tablet - %+v", replicaKey) + log.Info("Could not read tablet - %+v", replicaAlias) return err } @@ -360,7 +339,7 @@ func restartReplication(replicaKey *inst.InstanceKey) error { return err } - durabilityPolicy, err := inst.GetDurabilityPolicy(replicaTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(replicaTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", replicaTablet.Keyspace, replicaTablet.Shard) return err @@ -370,12 +349,12 @@ func restartReplication(replicaKey *inst.InstanceKey) error { defer cancel() err = tmc.StopReplication(ctx, replicaTablet) if err != nil { - log.Info("Could not stop replication on %v", topoproto.TabletAliasString(replicaTablet.Alias)) + log.Info("Could not stop replication on %v", replicaAlias) return err } - err = tmc.StartReplication(ctx, replicaTablet, inst.IsReplicaSemiSync(durabilityPolicy, primaryTablet, replicaTablet)) + err = tmc.StartReplication(ctx, replicaTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, replicaTablet)) if err != nil { - log.Info("Could not start replication on %v", topoproto.TabletAliasString(replicaTablet.Alias)) + log.Info("Could not start replication on %v", replicaAlias) return err } return nil diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go index d43cebefc0f..0e8ac72fabf 100644 --- a/go/vt/vtorc/logic/tablet_discovery_test.go +++ b/go/vt/vtorc/logic/tablet_discovery_test.go @@ -18,6 +18,7 @@ package logic import ( "context" + "fmt" "sync/atomic" "testing" @@ -27,10 +28,10 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/external/golib/sqlutils" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vttime" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" ) @@ -104,18 +105,17 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_tablet table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. defer func() { - _, err = orcDb.Exec("delete from vitess_tablet") - require.NoError(t, err) + db.ClearVTOrcDatabase() }() // Create a memory topo-server and create the keyspace and shard records - ts = memorytopo.NewServer(cell1) - _, err = ts.GetOrCreateShard(context.Background(), keyspace, shard) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, cell1) + _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard) require.NoError(t, err) // Add tablets to the topo-server @@ -127,36 +127,47 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { t.Run("initial call to refreshTabletsInKeyspaceShard", func(t *testing.T) { // We expect all 3 tablets to be refreshed since they are being discovered for the first time - verifyRefreshTabletsInKeyspaceShard(t, false, 3, tablets) + verifyRefreshTabletsInKeyspaceShard(t, false, 3, tablets, nil) }) t.Run("call refreshTabletsInKeyspaceShard again - no force refresh", func(t *testing.T) { // We expect no tablets to be refreshed since they are all already upto date - verifyRefreshTabletsInKeyspaceShard(t, false, 0, tablets) + verifyRefreshTabletsInKeyspaceShard(t, false, 0, tablets, nil) }) t.Run("call refreshTabletsInKeyspaceShard again - force refresh", func(t *testing.T) { // We expect all 3 tablets to be refreshed since we requested force refresh - verifyRefreshTabletsInKeyspaceShard(t, true, 3, tablets) + verifyRefreshTabletsInKeyspaceShard(t, true, 3, tablets, nil) + }) + + t.Run("call refreshTabletsInKeyspaceShard again - force refresh with ignore", func(t *testing.T) { + // We expect 2 tablets to be refreshed since we requested force refresh, but we are ignoring one of them. + verifyRefreshTabletsInKeyspaceShard(t, true, 2, tablets, []string{topoproto.TabletAliasString(tab100.Alias)}) }) t.Run("tablet shutdown removes mysql hostname and port. We shouldn't forget the tablet", func(t *testing.T) { + startPort := tab100.MysqlPort + startHostname := tab100.MysqlHostname defer func() { + tab100.MysqlPort = startPort + tab100.MysqlHostname = startHostname _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error { - tablet.MysqlHostname = hostname - tablet.MysqlPort = 100 + tablet.MysqlHostname = startHostname + tablet.MysqlPort = startPort return nil }) }() - // Let's assume tab100 shutdown. This would clear its tablet hostname and port + // Let's assume tab100 shutdown. This would clear its tablet hostname and port. + tab100.MysqlPort = 0 + tab100.MysqlHostname = "" _, err = ts.UpdateTabletFields(context.Background(), tab100.Alias, func(tablet *topodatapb.Tablet) error { tablet.MysqlHostname = "" tablet.MysqlPort = 0 return nil }) require.NoError(t, err) - // We expect no tablets to be refreshed. Also, tab100 shouldn't be forgotten - verifyRefreshTabletsInKeyspaceShard(t, false, 0, tablets) + // tab100 shouldn't be forgotten + verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets, nil) }) t.Run("change a tablet and call refreshTabletsInKeyspaceShard again", func(t *testing.T) { @@ -175,7 +186,7 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { }) require.NoError(t, err) // We expect 1 tablet to be refreshed since that is the only one that has changed - verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets) + verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets, nil) }) t.Run("change the port and call refreshTabletsInKeyspaceShard again", func(t *testing.T) { @@ -195,7 +206,7 @@ func TestRefreshTabletsInKeyspaceShard(t *testing.T) { tab100.MysqlPort = 39293 // We expect 1 tablet to be refreshed since that is the only one that has changed // Also the old tablet should be forgotten - verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets) + verifyRefreshTabletsInKeyspaceShard(t, false, 1, tablets, nil) }) } @@ -227,22 +238,18 @@ func TestShardPrimary(t *testing.T) { ts = oldTs }() - // Open the vtorc - // After the test completes delete everything from the vitess_tablet table - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) - defer func() { - _, err = orcDb.Exec("delete from vitess_tablet") - require.NoError(t, err) - }() - for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - _, err = orcDb.Exec("delete from vitess_tablet") + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() // Create a memory topo-server and create the keyspace and shard records - ts = memorytopo.NewServer(cell1) - _, err = ts.GetOrCreateShard(context.Background(), keyspace, shard) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts = memorytopo.NewServer(ctx, cell1) + _, err := ts.GetOrCreateShard(context.Background(), keyspace, shard) require.NoError(t, err) // Add tablets to the topo-server @@ -252,7 +259,7 @@ func TestShardPrimary(t *testing.T) { } // refresh the tablet info so that they are stored in the orch backend - verifyRefreshTabletsInKeyspaceShard(t, false, len(testcase.tablets), testcase.tablets) + verifyRefreshTabletsInKeyspaceShard(t, false, len(testcase.tablets), testcase.tablets, nil) primary, err := shardPrimary(keyspace, shard) if testcase.expectedErr != "" { @@ -269,13 +276,13 @@ func TestShardPrimary(t *testing.T) { // verifyRefreshTabletsInKeyspaceShard calls refreshTabletsInKeyspaceShard with the forceRefresh parameter provided and verifies that // the number of instances refreshed matches the parameter and all the tablets match the ones provided -func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instanceRefreshRequired int, tablets []*topodatapb.Tablet) { +func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instanceRefreshRequired int, tablets []*topodatapb.Tablet, tabletsToIgnore []string) { var instancesRefreshed atomic.Int32 instancesRefreshed.Store(0) // call refreshTabletsInKeyspaceShard while counting all the instances that are refreshed - refreshTabletsInKeyspaceShard(context.Background(), keyspace, shard, func(instanceKey *inst.InstanceKey) { + refreshTabletsInKeyspaceShard(context.Background(), keyspace, shard, func(string) { instancesRefreshed.Add(1) - }, forceRefresh) + }, forceRefresh, tabletsToIgnore) // Verify that all the tablets are present in the database for _, tablet := range tablets { verifyTabletInfo(t, tablet, "") @@ -289,16 +296,13 @@ func verifyRefreshTabletsInKeyspaceShard(t *testing.T, forceRefresh bool, instan // is the same as the one provided or reading it gives the same error as expected func verifyTabletInfo(t *testing.T, tabletWanted *topodatapb.Tablet, errString string) { t.Helper() - tabletKey := inst.InstanceKey{ - Hostname: hostname, - Port: int(tabletWanted.MysqlPort), - } - tablet, err := inst.ReadTablet(tabletKey) + tabletAlias := topoproto.TabletAliasString(tabletWanted.Alias) + tablet, err := inst.ReadTablet(tabletAlias) if errString != "" { assert.EqualError(t, err, errString) } else { assert.NoError(t, err) - assert.EqualValues(t, tabletKey.Port, tablet.MysqlPort) + assert.EqualValues(t, tabletAlias, topoproto.TabletAliasString(tablet.Alias)) diff := cmp.Diff(tablet, tabletWanted, cmp.Comparer(proto.Equal)) assert.Empty(t, diff) } @@ -315,3 +319,26 @@ func verifyTabletCount(t *testing.T, countWanted int) { require.NoError(t, err) require.Equal(t, countWanted, totalTablets) } + +func TestGetLockAction(t *testing.T) { + tests := []struct { + analysedInstance string + code inst.AnalysisCode + want string + }{ + { + analysedInstance: "zone1-100", + code: inst.DeadPrimary, + want: "VTOrc Recovery for DeadPrimary on zone1-100", + }, { + analysedInstance: "zone1-200", + code: inst.ReplicationStopped, + want: "VTOrc Recovery for ReplicationStopped on zone1-200", + }, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%v-%v", tt.analysedInstance, tt.code), func(t *testing.T) { + require.Equal(t, tt.want, getLockAction(tt.analysedInstance, tt.code)) + }) + } +} diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index 8a56cc9235b..8bd6da048d7 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -21,17 +21,15 @@ import ( "encoding/json" "fmt" "math/rand" - "strings" "time" "github.com/patrickmn/go-cache" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + logutilpb "vitess.io/vitess/go/vt/proto/logutil" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/config" @@ -43,17 +41,15 @@ import ( type RecoveryType string const ( - PrimaryRecovery RecoveryType = "PrimaryRecovery" - CoPrimaryRecovery RecoveryType = "CoPrimaryRecovery" - IntermediatePrimaryRecovery RecoveryType = "IntermediatePrimaryRecovery" - CheckAndRecoverGenericProblemRecoveryName string = "CheckAndRecoverGenericProblem" RecoverDeadPrimaryRecoveryName string = "RecoverDeadPrimary" + RecoverPrimaryTabletDeletedRecoveryName string = "RecoverPrimaryTabletDeleted" RecoverPrimaryHasPrimaryRecoveryName string = "RecoverPrimaryHasPrimary" CheckAndRecoverLockedSemiSyncPrimaryRecoveryName string = "CheckAndRecoverLockedSemiSyncPrimary" ElectNewPrimaryRecoveryName string = "ElectNewPrimary" FixPrimaryRecoveryName string = "FixPrimary" FixReplicaRecoveryName string = "FixReplica" + RecoverErrantGTIDDetectedName string = "RecoverErrantGTIDDetected" ) var ( @@ -86,69 +82,44 @@ const ( noRecoveryFunc recoveryFunction = iota recoverGenericProblemFunc recoverDeadPrimaryFunc + recoverPrimaryTabletDeletedFunc recoverPrimaryHasPrimaryFunc recoverLockedSemiSyncPrimaryFunc electNewPrimaryFunc fixPrimaryFunc fixReplicaFunc + recoverErrantGTIDDetectedFunc ) -type RecoveryAcknowledgement struct { - CreatedAt time.Time - Owner string - Comment string - - Key inst.InstanceKey - ID int64 - UID string - AllRecoveries bool -} - -// BlockedTopologyRecovery represents an entry in the blocked_topology_recovery table -type BlockedTopologyRecovery struct { - FailedInstanceKey inst.InstanceKey - Analysis inst.AnalysisCode - LastBlockedTimestamp string - BlockingRecoveryID int64 -} - // TopologyRecovery represents an entry in the topology_recovery table type TopologyRecovery struct { - inst.PostponedFunctionsContainer - - ID int64 - UID string - AnalysisEntry inst.ReplicationAnalysis - SuccessorKey *inst.InstanceKey - SuccessorAlias string - IsActive bool - IsSuccessful bool - LostReplicas inst.InstanceKeyMap - ParticipatingInstanceKeys inst.InstanceKeyMap - AllErrors []string - RecoveryStartTimestamp string - RecoveryEndTimestamp string - ProcessingNodeHostname string - ProcessingNodeToken string - Acknowledged bool - AcknowledgedAt string - AcknowledgedBy string - AcknowledgedComment string - LastDetectionID int64 - RelatedRecoveryID int64 - Type RecoveryType - RecoveryType PrimaryRecoveryType + ID int64 + UID string + AnalysisEntry inst.ReplicationAnalysis + SuccessorHostname string + SuccessorPort int + SuccessorAlias string + IsActive bool + IsSuccessful bool + AllErrors []string + RecoveryStartTimestamp string + RecoveryEndTimestamp string + ProcessingNodeHostname string + ProcessingNodeToken string + Acknowledged bool + AcknowledgedAt string + AcknowledgedBy string + AcknowledgedComment string + LastDetectionID int64 + RelatedRecoveryID int64 + Type RecoveryType } func NewTopologyRecovery(replicationAnalysis inst.ReplicationAnalysis) *TopologyRecovery { topologyRecovery := &TopologyRecovery{} topologyRecovery.UID = util.PrettyUniqueToken() topologyRecovery.AnalysisEntry = replicationAnalysis - topologyRecovery.SuccessorKey = nil - topologyRecovery.LostReplicas = *inst.NewInstanceKeyMap() - topologyRecovery.ParticipatingInstanceKeys = *inst.NewInstanceKeyMap() topologyRecovery.AllErrors = []string{} - topologyRecovery.RecoveryType = NotPrimaryRecovery return topologyRecovery } @@ -179,15 +150,6 @@ func NewTopologyRecoveryStep(uid string, message string) *TopologyRecoveryStep { } } -type PrimaryRecoveryType string - -const ( - NotPrimaryRecovery PrimaryRecoveryType = "NotPrimaryRecovery" - PrimaryRecoveryGTID PrimaryRecoveryType = "PrimaryRecoveryGTID" - PrimaryRecoveryBinlogServer PrimaryRecoveryType = "PrimaryRecoveryBinlogServer" - PrimaryRecoveryUnknown PrimaryRecoveryType = "PrimaryRecoveryUnknown" -) - var emergencyReadTopologyInstanceMap *cache.Cache var emergencyRestartReplicaTopologyInstanceMap *cache.Cache var emergencyOperationGracefulPeriodMap *cache.Cache @@ -217,7 +179,6 @@ func AuditTopologyRecovery(topologyRecovery *TopologyRecovery, message string) e func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst.Instance) error { if successorInstance != nil { - topologyRecovery.SuccessorKey = &successorInstance.Key topologyRecovery.SuccessorAlias = successorInstance.InstanceAlias topologyRecovery.IsSuccessful = true } @@ -225,13 +186,13 @@ func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst } // recoverPrimaryHasPrimary resets the replication on the primary instance -func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimaryHasPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimaryHasPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix incorrect primaryship %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix incorrect primaryship on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { @@ -239,40 +200,32 @@ func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry inst.Replicatio }() // Reset replication on current primary. - err = inst.ResetReplicationParameters(analysisEntry.AnalyzedInstanceKey) + err = inst.ResetReplicationParameters(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } return true, topologyRecovery, nil } -// recoverDeadPrimary checks a given analysis, decides whether to take action, and possibly takes action -// Returns true when action was taken. -func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - if !(forceInstanceRecovery || analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery) { +// runEmergencyReparentOp runs a recovery for which we have to run ERS. Here waitForAllTablets is a boolean telling ERS whether it should wait for all the tablets +// or is it okay to skip 1. +func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.ReplicationAnalysis, recoveryName string, waitForAllTablets bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + if !analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery { return false, nil, nil } // Read the tablet information from the database to find the shard and keyspace of the tablet - tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, nil, err } - var candidateTabletAlias *topodatapb.TabletAlias - if candidateInstanceKey != nil { - candidateTablet, err := inst.ReadTablet(*candidateInstanceKey) - if err != nil { - return false, nil, err - } - candidateTabletAlias = candidateTablet.Alias - } - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, !forceInstanceRecovery, !forceInstanceRecovery) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, true, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another RecoverDeadPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another %v.", analysisEntry.AnalyzedInstanceAlias, recoveryName)) return false, nil, err } - log.Infof("Analysis: %v, deadprimary %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, %v %+v", analysisEntry.Analysis, recoveryName, analysisEntry.AnalyzedInstanceAlias) var promotedReplica *inst.Instance // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. @@ -297,10 +250,10 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly tablet.Keyspace, tablet.Shard, reparentutil.EmergencyReparentOptions{ - NewPrimaryAlias: candidateTabletAlias, IgnoreReplicas: nil, WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, PreventCrossCellPromotion: config.Config.PreventCrossDataCenterPrimaryFailover, + WaitAllTablets: waitForAllTablets, }, ) if err != nil { @@ -308,80 +261,84 @@ func recoverDeadPrimary(ctx context.Context, analysisEntry inst.ReplicationAnaly } if ev != nil && ev.NewPrimary != nil { - promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{ - Hostname: ev.NewPrimary.MysqlHostname, - Port: int(ev.NewPrimary.MysqlPort), - }) + promotedReplica, _, _ = inst.ReadInstance(topoproto.TabletAliasString(ev.NewPrimary.Alias)) } - postErsCompletion(topologyRecovery, analysisEntry, skipProcesses, promotedReplica) + postErsCompletion(topologyRecovery, analysisEntry, recoveryName, promotedReplica) return true, topologyRecovery, err } -func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.ReplicationAnalysis, skipProcesses bool, promotedReplica *inst.Instance) { +// recoverDeadPrimary checks a given analysis, decides whether to take action, and possibly takes action +// Returns true when action was taken. +func recoverDeadPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + return runEmergencyReparentOp(ctx, analysisEntry, "RecoverDeadPrimary", false) +} + +// recoverPrimaryTabletDeleted tries to run a recovery for the case where the primary tablet has been deleted. +func recoverPrimaryTabletDeleted(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + return runEmergencyReparentOp(ctx, analysisEntry, "PrimaryTabletDeleted", true) +} + +func postErsCompletion(topologyRecovery *TopologyRecovery, analysisEntry *inst.ReplicationAnalysis, recoveryName string, promotedReplica *inst.Instance) { if promotedReplica != nil { - message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) + message := fmt.Sprintf("promoted replica: %+v", promotedReplica.InstanceAlias) _ = AuditTopologyRecovery(topologyRecovery, message) - _ = inst.AuditOperation("recover-dead-primary", &analysisEntry.AnalyzedInstanceKey, message) - } - // Now, see whether we are successful or not. From this point there's no going back. - if promotedReplica != nil { - // Success! - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("RecoverDeadPrimary: successfully promoted %+v", promotedReplica.Key)) + _ = inst.AuditOperation(recoveryName, analysisEntry.AnalyzedInstanceAlias, message) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%v: successfully promoted %+v", recoveryName, promotedReplica.InstanceAlias)) } } // checkAndRecoverGenericProblem is a general-purpose recovery function -func checkAndRecoverLockedSemiSyncPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { +func checkAndRecoverLockedSemiSyncPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { return false, nil, nil } // checkAndRecoverGenericProblem is a general-purpose recovery function -func checkAndRecoverGenericProblem(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (bool, *TopologyRecovery, error) { +func checkAndRecoverGenericProblem(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (bool, *TopologyRecovery, error) { return false, nil, nil } // Force a re-read of a topology instance; this is done because we need to substantiate a suspicion // that we may have a failover scenario. we want to speed up reading the complete picture. -func emergentlyReadTopologyInstance(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) (instance *inst.Instance) { - if existsInCacheError := emergencyReadTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyReadTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) (instance *inst.Instance) { + if existsInCacheError := emergencyReadTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // Just recently attempted return nil } - instance, _ = inst.ReadTopologyInstance(instanceKey) - _ = inst.AuditOperation("emergently-read-topology-instance", instanceKey, string(analysisCode)) + instance, _ = inst.ReadTopologyInstance(tabletAlias) + _ = inst.AuditOperation("emergently-read-topology-instance", tabletAlias, string(analysisCode)) return instance } // Force reading of replicas of given instance. This is because we suspect the instance is dead, and want to speed up // detection of replication failure from its replicas. -func emergentlyReadTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(instanceKey) +func emergentlyReadTopologyInstanceReplicas(primaryHost string, primaryPort int, analysisCode inst.AnalysisCode) { + replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) if err != nil { return } for _, replica := range replicas { - go emergentlyReadTopologyInstance(&replica.Key, analysisCode) + go emergentlyReadTopologyInstance(replica.InstanceAlias, analysisCode) } } // emergentlyRestartReplicationOnTopologyInstance forces a RestartReplication on a given instance. -func emergentlyRestartReplicationOnTopologyInstance(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyRestartReplicationOnTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) { + if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // Just recently attempted on this specific replica return } go inst.ExecuteOnTopology(func() { - _ = restartReplication(instanceKey) - _ = inst.AuditOperation("emergently-restart-replication-topology-instance", instanceKey, string(analysisCode)) + _ = restartReplication(tabletAlias) + _ = inst.AuditOperation("emergently-restart-replication-topology-instance", tabletAlias, string(analysisCode)) }) } -func beginEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) { - emergencyOperationGracefulPeriodMap.Set(instanceKey.StringCode(), true, cache.DefaultExpiration) +func beginEmergencyOperationGracefulPeriod(tabletAlias string) { + emergencyOperationGracefulPeriodMap.Set(tabletAlias, true, cache.DefaultExpiration) } -func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { - _, found := emergencyOperationGracefulPeriodMap.Get(instanceKey.StringCode()) +func isInEmergencyOperationGracefulPeriod(tabletAlias string) bool { + _, found := emergencyOperationGracefulPeriodMap.Get(tabletAlias) return found } @@ -390,26 +347,25 @@ func isInEmergencyOperationGracefulPeriod(instanceKey *inst.InstanceKey) bool { // This can be useful in scenarios where the primary has Too Many Connections, but long-time connected // replicas are not seeing this; when they stop+start replication, they need to re-authenticate and // that's where we hope they realize the primary is bad. -func emergentlyRestartReplicationOnTopologyInstanceReplicas(instanceKey *inst.InstanceKey, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(instanceKey.StringCode(), true, cache.DefaultExpiration); existsInCacheError != nil { +func emergentlyRestartReplicationOnTopologyInstanceReplicas(primaryHost string, primaryPort int, tabletAlias string, analysisCode inst.AnalysisCode) { + if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { // While each replica's RestartReplication() is throttled on its own, it's also wasteful to // iterate all replicas all the time. This is the reason why we do grand-throttle check. return } - beginEmergencyOperationGracefulPeriod(instanceKey) + beginEmergencyOperationGracefulPeriod(tabletAlias) - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(instanceKey) + replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) if err != nil { return } for _, replica := range replicas { - replicaKey := &replica.Key - go emergentlyRestartReplicationOnTopologyInstance(replicaKey, analysisCode) + go emergentlyRestartReplicationOnTopologyInstance(replica.InstanceAlias, analysisCode) } } -func emergentlyRecordStaleBinlogCoordinates(instanceKey *inst.InstanceKey, binlogCoordinates *inst.BinlogCoordinates) { - err := inst.RecordStaleInstanceBinlogCoordinates(instanceKey, binlogCoordinates) +func emergentlyRecordStaleBinlogCoordinates(tabletAlias string, binlogCoordinates *inst.BinlogCoordinates) { + err := inst.RecordStaleInstanceBinlogCoordinates(tabletAlias, binlogCoordinates) if err != nil { log.Error(err) } @@ -417,30 +373,51 @@ func emergentlyRecordStaleBinlogCoordinates(instanceKey *inst.InstanceKey, binlo // checkAndExecuteFailureDetectionProcesses tries to register for failure detection and potentially executes // failure-detection processes. -func checkAndExecuteFailureDetectionProcesses(analysisEntry inst.ReplicationAnalysis, skipProcesses bool) (detectionRegistrationSuccess bool, processesExecutionAttempted bool, err error) { - if ok, _ := AttemptFailureDetectionRegistration(&analysisEntry); !ok { - if util.ClearToLog("checkAndExecuteFailureDetectionProcesses", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("checkAndExecuteFailureDetectionProcesses: could not register %+v detection on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) +func checkAndExecuteFailureDetectionProcesses(analysisEntry *inst.ReplicationAnalysis) (detectionRegistrationSuccess bool, processesExecutionAttempted bool, err error) { + if ok, _ := AttemptFailureDetectionRegistration(analysisEntry); !ok { + if util.ClearToLog("checkAndExecuteFailureDetectionProcesses", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("checkAndExecuteFailureDetectionProcesses: could not register %+v detection on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) } return false, false, nil } - log.Infof("topology_recovery: detected %+v failure on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("topology_recovery: detected %+v failure on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) return true, false, nil } // getCheckAndRecoverFunctionCode gets the recovery function code to use for the given analysis. -func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, analyzedInstanceKey *inst.InstanceKey) recoveryFunction { +func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias string) recoveryFunction { switch analysisCode { // primary case inst.DeadPrimary, inst.DeadPrimaryAndSomeReplicas: - if isInEmergencyOperationGracefulPeriod(analyzedInstanceKey) { + // If ERS is disabled, we have no way of repairing the cluster. + if !config.ERSEnabled() { + log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + if isInEmergencyOperationGracefulPeriod(tabletAlias) { return recoverGenericProblemFunc } return recoverDeadPrimaryFunc + case inst.PrimaryTabletDeleted: + // If ERS is disabled, we have no way of repairing the cluster. + if !config.ERSEnabled() { + log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + if isInEmergencyOperationGracefulPeriod(tabletAlias) { + return recoverGenericProblemFunc + } + return recoverPrimaryTabletDeletedFunc + case inst.ErrantGTIDDetected: + if !config.ConvertTabletWithErrantGTIDs() { + log.Infof("VTOrc not configured to do anything on detecting errant GTIDs, skipping recovering %v", analysisCode) + return noRecoveryFunc + } + return recoverErrantGTIDDetectedFunc case inst.PrimaryHasPrimary: return recoverPrimaryHasPrimaryFunc case inst.LockedSemiSyncPrimary: - if isInEmergencyOperationGracefulPeriod(analyzedInstanceKey) { + if isInEmergencyOperationGracefulPeriod(tabletAlias) { return recoverGenericProblemFunc } return recoverLockedSemiSyncPrimaryFunc @@ -481,6 +458,8 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { return false case recoverDeadPrimaryFunc: return true + case recoverPrimaryTabletDeletedFunc: + return true case recoverPrimaryHasPrimaryFunc: return true case recoverLockedSemiSyncPrimaryFunc: @@ -491,6 +470,8 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { return true case fixReplicaFunc: return true + case recoverErrantGTIDDetectedFunc: + return true default: return false } @@ -498,7 +479,7 @@ func hasActionableRecovery(recoveryFunctionCode recoveryFunction) bool { // getCheckAndRecoverFunction gets the recovery function for the given code. func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( - checkAndRecoverFunction func(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error), + checkAndRecoverFunction func(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error), ) { switch recoveryFunctionCode { case noRecoveryFunc: @@ -507,6 +488,8 @@ func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( return checkAndRecoverGenericProblem case recoverDeadPrimaryFunc: return recoverDeadPrimary + case recoverPrimaryTabletDeletedFunc: + return recoverPrimaryTabletDeleted case recoverPrimaryHasPrimaryFunc: return recoverPrimaryHasPrimary case recoverLockedSemiSyncPrimaryFunc: @@ -517,6 +500,8 @@ func getCheckAndRecoverFunction(recoveryFunctionCode recoveryFunction) ( return fixPrimary case fixReplicaFunc: return fixReplica + case recoverErrantGTIDDetectedFunc: + return recoverErrantGTIDDetected default: return nil } @@ -532,6 +517,8 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { return CheckAndRecoverGenericProblemRecoveryName case recoverDeadPrimaryFunc: return RecoverDeadPrimaryRecoveryName + case recoverPrimaryTabletDeletedFunc: + return RecoverPrimaryTabletDeletedRecoveryName case recoverPrimaryHasPrimaryFunc: return RecoverPrimaryHasPrimaryRecoveryName case recoverLockedSemiSyncPrimaryFunc: @@ -542,6 +529,8 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { return FixPrimaryRecoveryName case fixReplicaFunc: return FixReplicaRecoveryName + case recoverErrantGTIDDetectedFunc: + return RecoverErrantGTIDDetectedName default: return "" } @@ -550,7 +539,7 @@ func getRecoverFunctionName(recoveryFunctionCode recoveryFunction) string { // isClusterWideRecovery returns whether the given recovery is a cluster-wide recovery or not func isClusterWideRecovery(recoveryFunctionCode recoveryFunction) bool { switch recoveryFunctionCode { - case recoverDeadPrimaryFunc, electNewPrimaryFunc: + case recoverDeadPrimaryFunc, electNewPrimaryFunc, recoverPrimaryTabletDeletedFunc: return true default: return false @@ -558,65 +547,65 @@ func isClusterWideRecovery(recoveryFunctionCode recoveryFunction) bool { } // analysisEntriesHaveSameRecovery tells whether the two analysis entries have the same recovery function or not -func analysisEntriesHaveSameRecovery(prevAnalysis, newAnalysis inst.ReplicationAnalysis) bool { - prevRecoveryFunctionCode := getCheckAndRecoverFunctionCode(prevAnalysis.Analysis, &prevAnalysis.AnalyzedInstanceKey) - newRecoveryFunctionCode := getCheckAndRecoverFunctionCode(newAnalysis.Analysis, &newAnalysis.AnalyzedInstanceKey) +func analysisEntriesHaveSameRecovery(prevAnalysis, newAnalysis *inst.ReplicationAnalysis) bool { + prevRecoveryFunctionCode := getCheckAndRecoverFunctionCode(prevAnalysis.Analysis, prevAnalysis.AnalyzedInstanceAlias) + newRecoveryFunctionCode := getCheckAndRecoverFunctionCode(newAnalysis.Analysis, newAnalysis.AnalyzedInstanceAlias) return prevRecoveryFunctionCode == newRecoveryFunctionCode } func runEmergentOperations(analysisEntry *inst.ReplicationAnalysis) { switch analysisEntry.Analysis { case inst.DeadPrimaryAndReplicas: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstancePrimaryKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstancePrimaryAlias, analysisEntry.Analysis) case inst.UnreachablePrimary: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) - go emergentlyReadTopologyInstanceReplicas(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) + go emergentlyReadTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.Analysis) case inst.UnreachablePrimaryWithLaggingReplicas: - go emergentlyRestartReplicationOnTopologyInstanceReplicas(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyRestartReplicationOnTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) case inst.LockedSemiSyncPrimaryHypothesis: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) - go emergentlyRecordStaleBinlogCoordinates(&analysisEntry.AnalyzedInstanceKey, &analysisEntry.AnalyzedInstanceBinlogCoordinates) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) + go emergentlyRecordStaleBinlogCoordinates(analysisEntry.AnalyzedInstanceAlias, &analysisEntry.AnalyzedInstanceBinlogCoordinates) case inst.AllPrimaryReplicasNotReplicating: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) case inst.AllPrimaryReplicasNotReplicatingOrDead: - go emergentlyReadTopologyInstance(&analysisEntry.AnalyzedInstanceKey, analysisEntry.Analysis) + go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) } } // executeCheckAndRecoverFunction will choose the correct check & recovery function based on analysis. // It executes the function synchronuously -func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { +func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (err error) { countPendingRecoveries.Add(1) defer countPendingRecoveries.Add(-1) - checkAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, &analysisEntry.AnalyzedInstanceKey) + checkAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) isActionableRecovery := hasActionableRecovery(checkAndRecoverFunctionCode) analysisEntry.IsActionableRecovery = isActionableRecovery - runEmergentOperations(&analysisEntry) + runEmergentOperations(analysisEntry) if checkAndRecoverFunctionCode == noRecoveryFunc { // Unhandled problem type if analysisEntry.Analysis != inst.NoProblem { - if util.ClearToLog("executeCheckAndRecoverFunction", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; key: %+v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + if util.ClearToLog("executeCheckAndRecoverFunction", analysisEntry.AnalyzedInstanceAlias) { + log.Warningf("executeCheckAndRecoverFunction: ignoring analysisEntry that has no action plan: %+v; tablet: %+v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) } } - return false, nil, nil + return nil } // we have a recovery function; its execution still depends on filters if not disabled. - if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: detection", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("executeCheckAndRecoverFunction: proceeding with %+v detection on %+v; isActionable?: %+v; skipProcesses: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, isActionableRecovery, skipProcesses) + if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: detection", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("executeCheckAndRecoverFunction: proceeding with %+v detection on %+v; isActionable?: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery) } // At this point we have validated there's a failure scenario for which we have a recovery path. // Initiate detection: - _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry, skipProcesses) + _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry) if err != nil { log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) - return false, nil, err + return err } // We don't mind whether detection really executed the processes or not // (it may have been silenced due to previous detection). We only care there's no error. @@ -628,22 +617,16 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // Unexpected. Shouldn't get this log.Errorf("Unable to determine if recovery is disabled globally: %v", err) } else if recoveryDisabledGlobally { - if !forceInstanceRecovery { - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: NOT Recovering host (disabled globally)", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses) + log.Infof("CheckAndRecover: Analysis: %+v, Tablet: %+v: NOT Recovering host (disabled globally)", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) - return false, nil, err - } - log.Infof("CheckAndRecover: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+ - "skipProcesses: %v: recoveries disabled globally but forcing this recovery", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses) + return err } // We lock the shard here and then refresh the tablets information - ctx, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceKey) + ctx, unlock, err := LockShard(context.Background(), analysisEntry.AnalyzedInstanceAlias, getLockAction(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis)) if err != nil { - return false, nil, err + return err } defer unlock(&err) @@ -652,18 +635,27 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // changes, we should be checking that this failure is indeed needed to be fixed. We do this after locking the shard to be sure // that the data that we use now is up-to-date. if isActionableRecovery { - // The first step we have to do is refresh the keyspace information + log.Errorf("executeCheckAndRecoverFunction: Proceeding with %v recovery on %v validation after acquiring shard lock.", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + // The first step we have to do is refresh the keyspace and shard information // This is required to know if the durability policies have changed or not - // If they have, then recoveries like ReplicaSemiSyncMustNotBeSet, etc won't be valid anymore - err := RefreshKeyspace(analysisEntry.AnalyzedKeyspace) + // If they have, then recoveries like ReplicaSemiSyncMustNotBeSet, etc won't be valid anymore. + // Similarly, a new primary could have been elected in the mean-time that can cause + // a change in the recovery we run. + err = RefreshKeyspaceAndShard(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) if err != nil { - return false, nil, err + return err } // If we are about to run a cluster-wide recovery, it is imperative to first refresh all the tablets // of a shard because a new tablet could have been promoted, and we need to have this visibility before we // run a cluster operation of our own. if isClusterWideRecovery(checkAndRecoverFunctionCode) { - forceRefreshAllTabletsInShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) + var tabletsToIgnore []string + if checkAndRecoverFunctionCode == recoverDeadPrimaryFunc { + tabletsToIgnore = append(tabletsToIgnore, analysisEntry.AnalyzedInstanceAlias) + } + // We ignore the dead primary tablet because it is going to be unreachable. If all the other tablets aren't able to reach this tablet either, + // we can proceed with the dead primary recovery. We don't need to refresh the information for this dead tablet. + forceRefreshAllTabletsInShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard, tabletsToIgnore) } else { // If we are not running a cluster-wide recovery, then it is only concerned with the specific tablet // on which the failure occurred and the primary instance of the shard. @@ -672,42 +664,39 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand // So, we only need to refresh the tablet info records (to know if the primary tablet has changed), // and the replication data of the new primary and this tablet. refreshTabletInfoOfShard(ctx, analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) - DiscoverInstance(analysisEntry.AnalyzedInstanceKey, true) + DiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true) primaryTablet, err := shardPrimary(analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) if err != nil { - log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+"skipProcesses: %v: error while finding the shard primary: %v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses, err) - return false, nil, err - } - primaryInstanceKey := inst.InstanceKey{ - Hostname: primaryTablet.MysqlHostname, - Port: int(primaryTablet.MysqlPort), + log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while finding the shard primary: %v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err) + return err } + primaryTabletAlias := topoproto.TabletAliasString(primaryTablet.Alias) // We can skip the refresh if we know the tablet we are looking at is the primary tablet. // This would be the case for PrimaryHasPrimary recovery. We don't need to refresh the same tablet twice. - if !analysisEntry.AnalyzedInstanceKey.Equals(&primaryInstanceKey) { - DiscoverInstance(primaryInstanceKey, true) + if analysisEntry.AnalyzedInstanceAlias != primaryTabletAlias { + DiscoverInstance(primaryTabletAlias, true) } } alreadyFixed, err := checkIfAlreadyFixed(analysisEntry) if err != nil { - log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, InstanceKey: %+v, candidateInstanceKey: %+v, "+"skipProcesses: %v: error while trying to find if the problem is already fixed: %v", - analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, candidateInstanceKey, skipProcesses, err) - return false, nil, err + log.Errorf("executeCheckAndRecoverFunction: Analysis: %+v, Tablet: %+v: error while trying to find if the problem is already fixed: %v", + analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, err) + return err } if alreadyFixed { - log.Infof("Analysis: %v - No longer valid, some other agent must have fixed the problem.", analysisEntry.Analysis) - return false, nil, nil + log.Infof("Analysis: %v on tablet %v - No longer valid, some other agent must have fixed the problem.", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + return nil } } // Actually attempt recovery: - if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: recovery", analysisEntry.AnalyzedInstanceKey.StringCode()) { - log.Infof("executeCheckAndRecoverFunction: proceeding with %+v recovery on %+v; isRecoverable?: %+v; skipProcesses: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey, isActionableRecovery, skipProcesses) + if isActionableRecovery || util.ClearToLog("executeCheckAndRecoverFunction: recovery", analysisEntry.AnalyzedInstanceAlias) { + log.Infof("executeCheckAndRecoverFunction: proceeding with %+v recovery on %+v; isRecoverable?: %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias, isActionableRecovery) } - recoveryAttempted, topologyRecovery, err = getCheckAndRecoverFunction(checkAndRecoverFunctionCode)(ctx, analysisEntry, candidateInstanceKey, forceInstanceRecovery, skipProcesses) + recoveryAttempted, topologyRecovery, err := getCheckAndRecoverFunction(checkAndRecoverFunctionCode)(ctx, analysisEntry) if !recoveryAttempted { - return recoveryAttempted, topologyRecovery, err + return err } recoveryName := getRecoverFunctionName(checkAndRecoverFunctionCode) recoveriesCounter.Add(recoveryName, 1) @@ -717,36 +706,30 @@ func executeCheckAndRecoverFunction(analysisEntry inst.ReplicationAnalysis, cand recoveriesSuccessfulCounter.Add(recoveryName, 1) } if topologyRecovery == nil { - return recoveryAttempted, topologyRecovery, err + return err } if b, err := json.Marshal(topologyRecovery); err == nil { log.Infof("Topology recovery: %+v", string(b)) } else { log.Infof("Topology recovery: %+v", topologyRecovery) } - // If we ran a cluster wide recovery and actually attemped it, then we know that the replication state for all the tablets in this cluster + // If we ran a cluster wide recovery and actually attempted it, then we know that the replication state for all the tablets in this cluster // would have changed. So we can go ahead and pre-emptively refresh them. // For this refresh we don't use the same context that we used for the recovery, since that context might have expired or could expire soon // Instead we pass the background context. The call forceRefreshAllTabletsInShard handles adding a timeout to it for us. if isClusterWideRecovery(checkAndRecoverFunctionCode) { - forceRefreshAllTabletsInShard(context.Background(), analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard) + forceRefreshAllTabletsInShard(context.Background(), analysisEntry.AnalyzedKeyspace, analysisEntry.AnalyzedShard, nil) } else { // For all other recoveries, we would have changed the replication status of the analyzed tablet // so it doesn't hurt to re-read the information of this tablet, otherwise we'll requeue the same recovery // that we just completed because we would be using stale data. - DiscoverInstance(analysisEntry.AnalyzedInstanceKey, true) - } - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Waiting for %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - topologyRecovery.Wait() - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed %d postponed functions", topologyRecovery.PostponedFunctionsContainer.Len())) - if topologyRecovery.PostponedFunctionsContainer.Len() > 0 { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("Executed postponed functions: %+v", strings.Join(topologyRecovery.PostponedFunctionsContainer.Descriptions(), ", "))) + DiscoverInstance(analysisEntry.AnalyzedInstanceAlias, true) } - return recoveryAttempted, topologyRecovery, err + return err } // checkIfAlreadyFixed checks whether the problem that the analysis entry represents has already been fixed by another agent or not -func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { +func checkIfAlreadyFixed(analysisEntry *inst.ReplicationAnalysis) (bool, error) { // Run a replication analysis again. We will check if the problem persisted analysisEntries, err := inst.GetReplicationAnalysis(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, &inst.ReplicationAnalysisHints{}) if err != nil { @@ -755,7 +738,7 @@ func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { for _, entry := range analysisEntries { // If there is a analysis which has the same recovery required, then we should proceed with the recovery - if entry.AnalyzedInstanceKey.Equals(&analysisEntry.AnalyzedInstanceKey) && analysisEntriesHaveSameRecovery(analysisEntry, entry) { + if entry.AnalyzedInstanceAlias == analysisEntry.AnalyzedInstanceAlias && analysisEntriesHaveSameRecovery(analysisEntry, entry) { return false, nil } } @@ -765,67 +748,41 @@ func checkIfAlreadyFixed(analysisEntry inst.ReplicationAnalysis) (bool, error) { } // CheckAndRecover is the main entry point for the recovery mechanism -func CheckAndRecover(specificInstance *inst.InstanceKey, candidateInstanceKey *inst.InstanceKey, skipProcesses bool) (recoveryAttempted bool, promotedReplicaKey *inst.InstanceKey, err error) { +func CheckAndRecover() { // Allow the analysis to run even if we don't want to recover - replicationAnalysis, err := inst.GetReplicationAnalysis("", "", &inst.ReplicationAnalysisHints{IncludeDowntimed: true, AuditAnalysis: true}) + replicationAnalysis, err := inst.GetReplicationAnalysis("", "", &inst.ReplicationAnalysisHints{AuditAnalysis: true}) if err != nil { log.Error(err) - return false, nil, err + return } // intentionally iterating entries in random order for _, j := range rand.Perm(len(replicationAnalysis)) { analysisEntry := replicationAnalysis[j] - if specificInstance != nil { - // We are looking for a specific instance; if this is not the one, skip! - if !specificInstance.Equals(&analysisEntry.AnalyzedInstanceKey) { - continue - } - } - if analysisEntry.SkippableDueToDowntime && specificInstance == nil { - // Only recover a downtimed server if explicitly requested - continue - } - if specificInstance != nil { - // force mode. Keep it synchronuous - var topologyRecovery *TopologyRecovery - recoveryAttempted, topologyRecovery, err = executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, true, skipProcesses) + go func() { + err = executeCheckAndRecoverFunction(analysisEntry) if err != nil { log.Error(err) } - if topologyRecovery != nil { - promotedReplicaKey = topologyRecovery.SuccessorKey - } - } else { - go func() { - _, _, err := executeCheckAndRecoverFunction(analysisEntry, candidateInstanceKey, false, skipProcesses) - if err != nil { - log.Error(err) - } - }() - } + }() + } - return recoveryAttempted, promotedReplicaKey, err } -func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry inst.ReplicationAnalysis, promotedReplica *inst.Instance) { +func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry *inst.ReplicationAnalysis, promotedReplica *inst.Instance) { if promotedReplica != nil { - message := fmt.Sprintf("promoted replica: %+v", promotedReplica.Key) + message := fmt.Sprintf("promoted replica: %+v", promotedReplica.InstanceAlias) _ = AuditTopologyRecovery(topologyRecovery, message) - _ = inst.AuditOperation(string(analysisEntry.Analysis), &analysisEntry.AnalyzedInstanceKey, message) - } - // Now, see whether we are successful or not. From this point there's no going back. - if promotedReplica != nil { - // Success! - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%+v: successfully promoted %+v", analysisEntry.Analysis, promotedReplica.Key)) + _ = inst.AuditOperation(string(analysisEntry.Analysis), analysisEntry.AnalyzedInstanceAlias, message) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("%+v: successfully promoted %+v", analysisEntry.Analysis, promotedReplica.InstanceAlias)) } } // electNewPrimary elects a new primary while none were present before. -func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false /*failIfFailedInstanceInActiveRecovery*/, true /*failIfClusterInActiveRecovery*/) +func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false /*failIfFailedInstanceInActiveRecovery*/, true /*failIfClusterInActiveRecovery*/) if topologyRecovery == nil || err != nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } log.Infof("Analysis: %v, will elect a new primary for %v:%v", analysisEntry.Analysis, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard) @@ -837,7 +794,7 @@ func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis _ = resolveRecovery(topologyRecovery, promotedReplica) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } @@ -863,61 +820,58 @@ func electNewPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis ) if ev != nil && ev.NewPrimary != nil { - promotedReplica, _, _ = inst.ReadInstance(&inst.InstanceKey{ - Hostname: ev.NewPrimary.MysqlHostname, - Port: int(ev.NewPrimary.MysqlPort), - }) + promotedReplica, _, _ = inst.ReadInstance(topoproto.TabletAliasString(ev.NewPrimary.Alias)) } postPrsCompletion(topologyRecovery, analysisEntry, promotedReplica) return true, topologyRecovery, err } // fixPrimary sets the primary as read-write. -func fixPrimary(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimary.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix primary to read-write %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix primary to read-write %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { _ = resolveRecovery(topologyRecovery, nil) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } - durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) return false, topologyRecovery, err } - if err := tabletUndoDemotePrimary(ctx, analyzedTablet, inst.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { + if err := tabletUndoDemotePrimary(ctx, analyzedTablet, reparentutil.SemiSyncAckers(durabilityPolicy, analyzedTablet) > 0); err != nil { return true, topologyRecovery, err } return true, topologyRecovery, nil } // fixReplica sets the replica as read-only and points it at the current primary. -func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, candidateInstanceKey *inst.InstanceKey, forceInstanceRecovery bool, skipProcesses bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(&analysisEntry, false, true) +func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) if topologyRecovery == nil { - _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixReplica.", analysisEntry.AnalyzedInstanceKey)) + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixReplica.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err } - log.Infof("Analysis: %v, will fix replica %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceKey) + log.Infof("Analysis: %v, will fix replica %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) // This has to be done in the end; whether successful or not, we should mark that the recovery is done. // So that after the active period passes, we are able to run other recoveries. defer func() { _ = resolveRecovery(topologyRecovery, nil) }() - analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceKey) + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, topologyRecovery, err } @@ -928,7 +882,7 @@ func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, can return false, topologyRecovery, err } - durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet) + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) if err != nil { log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) return false, topologyRecovery, err @@ -936,10 +890,45 @@ func fixReplica(ctx context.Context, analysisEntry inst.ReplicationAnalysis, can err = setReadOnly(ctx, analyzedTablet) if err != nil { - log.Info("Could not set the tablet %v to readonly - %v", topoproto.TabletAliasString(analyzedTablet.Alias), err) + log.Info("Could not set the tablet %v to readonly - %v", analysisEntry.AnalyzedInstanceAlias, err) return true, topologyRecovery, err } - err = setReplicationSource(ctx, analyzedTablet, primaryTablet, inst.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + return true, topologyRecovery, err +} + +// recoverErrantGTIDDetected changes the tablet type of a replica tablet that has errant GTIDs. +func recoverErrantGTIDDetected(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + if topologyRecovery == nil { + _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another recoverErrantGTIDDetected.", analysisEntry.AnalyzedInstanceAlias)) + return false, nil, err + } + log.Infof("Analysis: %v, will fix tablet %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) + // This has to be done in the end; whether successful or not, we should mark that the recovery is done. + // So that after the active period passes, we are able to run other recoveries. + defer func() { + _ = resolveRecovery(topologyRecovery, nil) + }() + + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) + if err != nil { + return false, topologyRecovery, err + } + + primaryTablet, err := shardPrimary(analyzedTablet.Keyspace, analyzedTablet.Shard) + if err != nil { + log.Info("Could not compute primary for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) + return false, topologyRecovery, err + } + + durabilityPolicy, err := inst.GetDurabilityPolicy(analyzedTablet.Keyspace) + if err != nil { + log.Info("Could not read the durability policy for %v/%v", analyzedTablet.Keyspace, analyzedTablet.Shard) + return false, topologyRecovery, err + } + + err = changeTabletType(ctx, analyzedTablet, topodatapb.TabletType_DRAINED, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) return true, topologyRecovery, err } diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go index 65df9c1ebed..c835b9ecfe4 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao.go +++ b/go/vt/vtorc/logic/topology_recovery_dao.go @@ -32,8 +32,7 @@ import ( // AttemptFailureDetectionRegistration tries to add a failure-detection entry; if this fails that means the problem has already been detected func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis) (registrationSuccessful bool, err error) { args := sqlutils.Args( - analysisEntry.AnalyzedInstanceKey.Hostname, - analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, process.ThisHostname, util.ProcessToken.Hash, string(analysisEntry.Analysis), @@ -51,8 +50,7 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis query := fmt.Sprintf(` insert ignore into topology_failure_detection ( - hostname, - port, + alias, in_active_period, end_active_period_unixtime, processing_node_hostname, @@ -64,7 +62,6 @@ func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis is_actionable, start_active_period ) values ( - ?, ?, 1, 0, @@ -118,8 +115,7 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover into topology_recovery ( recovery_id, uid, - hostname, - port, + alias, in_active_period, start_active_period, end_active_period_unixtime, @@ -134,7 +130,6 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover ?, ?, ?, - ?, 1, NOW(), 0, @@ -144,18 +139,18 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover ?, ?, ?, - (select ifnull(max(detection_id), 0) from topology_failure_detection where hostname=? and port=?) + (select ifnull(max(detection_id), 0) from topology_failure_detection where alias = ?) ) `, sqlutils.NilIfZero(topologyRecovery.ID), topologyRecovery.UID, - analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, process.ThisHostname, util.ProcessToken.Hash, string(analysisEntry.Analysis), analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, analysisEntry.CountReplicas, - analysisEntry.AnalyzedInstanceKey.Hostname, analysisEntry.AnalyzedInstanceKey.Port, + analysisEntry.AnalyzedInstanceAlias, ) if err != nil { return nil, err @@ -180,36 +175,36 @@ func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIf if failIfFailedInstanceInActiveRecovery { // Let's check if this instance has just been promoted recently and is still in active period. // If so, we reject recovery registration to avoid flapping. - recoveries, err := ReadInActivePeriodSuccessorInstanceRecovery(&analysisEntry.AnalyzedInstanceKey) + recoveries, err := ReadInActivePeriodSuccessorInstanceRecovery(analysisEntry.AnalyzedInstanceAlias) if err != nil { log.Error(err) return nil, err } if len(recoveries) > 0 { _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: instance %+v has recently been promoted (by failover of %+v) and is in active period. It will not be failed over. You may acknowledge the failure on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey) + errMsg := fmt.Sprintf("AttemptRecoveryRegistration: tablet %+v has recently been promoted (by failover of %+v) and is in active period. It will not be failed over. You may acknowledge the failure on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) log.Errorf(errMsg) return nil, fmt.Errorf(errMsg) } } if failIfClusterInActiveRecovery { - // Let's check if this cluster has just experienced a failover and is still in active period. + // Let's check if this cluster has just experienced a failover of the same analysis and is still in active period. // If so, we reject recovery registration to avoid flapping. - recoveries, err := ReadInActivePeriodClusterRecovery(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard) + recoveries, err := ReadInActivePeriodClusterRecovery(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, string(analysisEntry.Analysis)) if err != nil { log.Error(err) return nil, err } if len(recoveries) > 0 { _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceKey, recoveries[0].AnalysisEntry.AnalyzedInstanceKey) + errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) log.Errorf(errMsg) return nil, fmt.Errorf(errMsg) } } if !failIfFailedInstanceInActiveRecovery { // Implicitly acknowledge this instance's possibly existing active recovery, provided they are completed. - _, _ = AcknowledgeInstanceCompletedRecoveries(&analysisEntry.AnalyzedInstanceKey, "vtorc", fmt.Sprintf("implicit acknowledge due to user invocation of recovery on same instance: %+v", analysisEntry.AnalyzedInstanceKey)) + _, _ = AcknowledgeInstanceCompletedRecoveries(analysisEntry.AnalyzedInstanceAlias, "vtorc", fmt.Sprintf("implicit acknowledge due to user invocation of recovery on same instance: %+v", analysisEntry.AnalyzedInstanceAlias)) // The fact we only acknowledge a completed recovery solves the possible case of two DBAs simultaneously // trying to recover the same instance at the same time } @@ -250,8 +245,7 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking _, err := db.ExecVTOrc(` insert into blocked_topology_recovery ( - hostname, - port, + alias, keyspace, shard, analysis, @@ -262,7 +256,6 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking ?, ?, ?, - ?, NOW(), ? ) @@ -272,8 +265,7 @@ func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blocking analysis=values(analysis), last_blocked_timestamp=values(last_blocked_timestamp), blocking_recovery_id=values(blocking_recovery_id) - `, analysisEntry.AnalyzedInstanceKey.Hostname, - analysisEntry.AnalyzedInstanceKey.Port, + `, analysisEntry.AnalyzedInstanceAlias, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, string(analysisEntry.Analysis), @@ -294,30 +286,27 @@ func ExpireBlockedRecoveries() error { query := ` select - blocked_topology_recovery.hostname, - blocked_topology_recovery.port + blocked_topology_recovery.alias from blocked_topology_recovery left join topology_recovery on (blocking_recovery_id = topology_recovery.recovery_id and acknowledged = 0) where acknowledged is null ` - expiredKeys := inst.NewInstanceKeyMap() + var expiredAliases []string err := db.QueryVTOrc(query, sqlutils.Args(), func(m sqlutils.RowMap) error { - key := inst.InstanceKey{Hostname: m.GetString("hostname"), Port: m.GetInt("port")} - expiredKeys.AddKey(key) + expiredAliases = append(expiredAliases, m.GetString("alias")) return nil }) - for _, expiredKey := range expiredKeys.GetInstanceKeys() { + for _, expiredAlias := range expiredAliases { _, err := db.ExecVTOrc(` delete from blocked_topology_recovery where - hostname = ? - and port = ? + alias = ? `, - expiredKey.Hostname, expiredKey.Port, + expiredAlias, ) if err != nil { log.Error(err) @@ -382,13 +371,12 @@ func acknowledgeRecoveries(owner string, comment string, markEndRecovery bool, w // AcknowledgeInstanceCompletedRecoveries marks active and COMPLETED recoveries for given instane as acknowledged. // This also implied clearing their active period, which in turn enables further recoveries on those topologies -func AcknowledgeInstanceCompletedRecoveries(instanceKey *inst.InstanceKey, owner string, comment string) (countAcknowledgedEntries int64, err error) { +func AcknowledgeInstanceCompletedRecoveries(tabletAlias string, owner string, comment string) (countAcknowledgedEntries int64, err error) { whereClause := ` - hostname = ? - and port = ? + alias = ? and end_recovery is not null ` - return acknowledgeRecoveries(owner, comment, false, whereClause, sqlutils.Args(instanceKey.Hostname, instanceKey.Port)) + return acknowledgeRecoveries(owner, comment, false, whereClause, sqlutils.Args(tabletAlias)) } // AcknowledgeCrashedRecoveries marks recoveries whose processing nodes has crashed as acknowledged. @@ -406,25 +394,16 @@ func AcknowledgeCrashedRecoveries() (countAcknowledgedEntries int64, err error) // ResolveRecovery is called on completion of a recovery process and updates the recovery status. // It does not clear the "active period" as this still takes place in order to avoid flapping. func writeResolveRecovery(topologyRecovery *TopologyRecovery) error { - var successorKeyToWrite inst.InstanceKey - if topologyRecovery.IsSuccessful { - successorKeyToWrite = *topologyRecovery.SuccessorKey - } _, err := db.ExecVTOrc(` update topology_recovery set is_successful = ?, - successor_hostname = ?, - successor_port = ?, successor_alias = ?, - lost_replicas = ?, - participating_instances = ?, all_errors = ?, end_recovery = NOW() where uid = ? - `, topologyRecovery.IsSuccessful, successorKeyToWrite.Hostname, successorKeyToWrite.Port, - topologyRecovery.SuccessorAlias, topologyRecovery.LostReplicas.ToCommaDelimitedList(), - topologyRecovery.ParticipatingInstanceKeys.ToCommaDelimitedList(), + `, topologyRecovery.IsSuccessful, + topologyRecovery.SuccessorAlias, strings.Join(topologyRecovery.AllErrors, "\n"), topologyRecovery.UID, ) @@ -439,32 +418,27 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog res := []*TopologyRecovery{} query := fmt.Sprintf(` select - recovery_id, - uid, - hostname, - port, - (IFNULL(end_active_period_unixtime, 0) = 0) as is_active, - start_active_period, - IFNULL(end_active_period_unixtime, 0) as end_active_period_unixtime, - IFNULL(end_recovery, '') AS end_recovery, - is_successful, - processing_node_hostname, - processcing_node_token, - ifnull(successor_hostname, '') as successor_hostname, - ifnull(successor_port, 0) as successor_port, - ifnull(successor_alias, '') as successor_alias, - analysis, - keyspace, - shard, - count_affected_replicas, - participating_instances, - lost_replicas, - all_errors, - acknowledged, - acknowledged_at, - acknowledged_by, - acknowledge_comment, - last_detection_id + recovery_id, + uid, + alias, + (IFNULL(end_active_period_unixtime, 0) = 0) as is_active, + start_active_period, + IFNULL(end_active_period_unixtime, 0) as end_active_period_unixtime, + IFNULL(end_recovery, '') AS end_recovery, + is_successful, + processing_node_hostname, + processcing_node_token, + ifnull(successor_alias, '') as successor_alias, + analysis, + keyspace, + shard, + count_affected_replicas, + all_errors, + acknowledged, + acknowledged_at, + acknowledged_by, + acknowledge_comment, + last_detection_id from topology_recovery %s @@ -484,23 +458,17 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog topologyRecovery.ProcessingNodeHostname = m.GetString("processing_node_hostname") topologyRecovery.ProcessingNodeToken = m.GetString("processcing_node_token") - topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Hostname = m.GetString("hostname") - topologyRecovery.AnalysisEntry.AnalyzedInstanceKey.Port = m.GetInt("port") + topologyRecovery.AnalysisEntry.AnalyzedInstanceAlias = m.GetString("alias") topologyRecovery.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis")) topologyRecovery.AnalysisEntry.ClusterDetails.Keyspace = m.GetString("keyspace") topologyRecovery.AnalysisEntry.ClusterDetails.Shard = m.GetString("shard") topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas") - topologyRecovery.SuccessorKey = &inst.InstanceKey{} - topologyRecovery.SuccessorKey.Hostname = m.GetString("successor_hostname") - topologyRecovery.SuccessorKey.Port = m.GetInt("successor_port") topologyRecovery.SuccessorAlias = m.GetString("successor_alias") topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") - _ = topologyRecovery.LostReplicas.ReadCommaDelimitedList(m.GetString("lost_replicas")) - _ = topologyRecovery.ParticipatingInstanceKeys.ReadCommaDelimitedList(m.GetString("participating_instances")) topologyRecovery.Acknowledged = m.GetBool("acknowledged") topologyRecovery.AcknowledgedAt = m.GetString("acknowledged_at") @@ -519,33 +487,34 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog return res, err } -// ReadInActivePeriodClusterRecovery reads recoveries (possibly complete!) that are in active period. -// (may be used to block further recoveries on this cluster) -func ReadInActivePeriodClusterRecovery(keyspace string, shard string) ([]*TopologyRecovery, error) { +// ReadInActivePeriodClusterRecovery reads recoveries (possibly complete!) that are in active period for the analysis. +// (may be used to block further recoveries of the same analysis on this cluster) +func ReadInActivePeriodClusterRecovery(keyspace string, shard, analysis string) ([]*TopologyRecovery, error) { whereClause := ` where in_active_period=1 and keyspace=? - and shard=?` - return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard)) + and shard=? + and analysis=?` + return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard, analysis)) } // ReadInActivePeriodSuccessorInstanceRecovery reads completed recoveries for a given instance, where said instance // was promoted as result, still in active period (may be used to block further recoveries should this instance die) -func ReadInActivePeriodSuccessorInstanceRecovery(instanceKey *inst.InstanceKey) ([]*TopologyRecovery, error) { +func ReadInActivePeriodSuccessorInstanceRecovery(tabletAlias string) ([]*TopologyRecovery, error) { whereClause := ` where in_active_period=1 and - successor_hostname=? and successor_port=?` - return readRecoveries(whereClause, ``, sqlutils.Args(instanceKey.Hostname, instanceKey.Port)) + successor_alias=?` + return readRecoveries(whereClause, ``, sqlutils.Args(tabletAlias)) } // ReadRecentRecoveries reads latest recovery entries from topology_recovery func ReadRecentRecoveries(unacknowledgedOnly bool, page int) ([]*TopologyRecovery, error) { whereConditions := []string{} whereClause := "" - args := sqlutils.Args() + var args []any if unacknowledgedOnly { whereConditions = append(whereConditions, `acknowledged=0`) } diff --git a/go/vt/vtorc/logic/topology_recovery_dao_test.go b/go/vt/vtorc/logic/topology_recovery_dao_test.go index f01e16560a8..f9a9026a4a1 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao_test.go +++ b/go/vt/vtorc/logic/topology_recovery_dao_test.go @@ -22,7 +22,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" ) @@ -39,11 +38,8 @@ func TestTopologyRecovery(t *testing.T) { }() replicationAnalysis := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: hostname, - Port: 101, - }, - TabletType: tab101.Type, + AnalyzedInstanceAlias: "zone1-0000000101", + TabletType: tab101.Type, ClusterDetails: inst.ClusterInfo{ Keyspace: keyspace, Shard: shard, @@ -81,10 +77,7 @@ func TestBlockedRecoveryInsertion(t *testing.T) { }() analysisEntry := &inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: "localhost", - Port: 100, - }, + AnalyzedInstanceAlias: "zone1-0000000100", ClusterDetails: inst.ClusterInfo{ Keyspace: "ks", Shard: "0", diff --git a/go/vt/vtorc/logic/topology_recovery_status.go b/go/vt/vtorc/logic/topology_recovery_status.go index 4e85b0529de..d1195963ba1 100644 --- a/go/vt/vtorc/logic/topology_recovery_status.go +++ b/go/vt/vtorc/logic/topology_recovery_status.go @@ -35,13 +35,15 @@ const TopologyRecoveriesTemplate = ` Recovery ID Failure Type - Instance + Tablet Alias + Timestamp {{range $i, $recovery := .}} {{$recovery.ID}} {{$recovery.AnalysisEntry.Analysis}} - {{$recovery.AnalysisEntry.AnalyzedInstanceKey}} + {{$recovery.AnalysisEntry.AnalyzedInstanceAlias}} + {{$recovery.RecoveryStartTimestamp}} {{end}} diff --git a/go/vt/vtorc/logic/topology_recovery_test.go b/go/vt/vtorc/logic/topology_recovery_test.go index e76442c2cff..d517649fd13 100644 --- a/go/vt/vtorc/logic/topology_recovery_test.go +++ b/go/vt/vtorc/logic/topology_recovery_test.go @@ -26,10 +26,10 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" - - // import the gRPC client implementation for tablet manager _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" ) @@ -44,6 +44,11 @@ func TestAnalysisEntriesHaveSameRecovery(t *testing.T) { prevAnalysisCode: inst.DeadPrimary, newAnalysisCode: inst.DeadPrimaryAndSomeReplicas, shouldBeEqual: true, + }, { + // DeadPrimary and PrimaryTabletDeleted are different recoveries. + prevAnalysisCode: inst.DeadPrimary, + newAnalysisCode: inst.PrimaryTabletDeleted, + shouldBeEqual: false, }, { // same codes will always have same recovery prevAnalysisCode: inst.DeadPrimary, @@ -87,7 +92,7 @@ func TestAnalysisEntriesHaveSameRecovery(t *testing.T) { t.Parallel() for _, tt := range tests { t.Run(string(tt.prevAnalysisCode)+","+string(tt.newAnalysisCode), func(t *testing.T) { - res := analysisEntriesHaveSameRecovery(inst.ReplicationAnalysis{Analysis: tt.prevAnalysisCode}, inst.ReplicationAnalysis{Analysis: tt.newAnalysisCode}) + res := analysisEntriesHaveSameRecovery(&inst.ReplicationAnalysis{Analysis: tt.prevAnalysisCode}, &inst.ReplicationAnalysis{Analysis: tt.newAnalysisCode}) require.Equal(t, tt.shouldBeEqual, res) }) } @@ -117,14 +122,158 @@ func TestElectNewPrimaryPanic(t *testing.T) { } err = inst.SaveTablet(tablet) require.NoError(t, err) - analysisEntry := inst.ReplicationAnalysis{ - AnalyzedInstanceKey: inst.InstanceKey{ - Hostname: tablet.MysqlHostname, - Port: int(tablet.MysqlPort), - }, + analysisEntry := &inst.ReplicationAnalysis{ + AnalyzedInstanceAlias: topoproto.TabletAliasString(tablet.Alias), } - ts = memorytopo.NewServer("zone1") - recoveryAttempted, _, err := electNewPrimary(context.Background(), analysisEntry, nil, false, false) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") + recoveryAttempted, _, err := electNewPrimary(context.Background(), analysisEntry) require.True(t, recoveryAttempted) require.Error(t, err) } + +func TestDifferentAnalysescHaveDifferentCooldowns(t *testing.T) { + orcDb, err := db.OpenVTOrc() + require.NoError(t, err) + oldTs := ts + defer func() { + ts = oldTs + _, err = orcDb.Exec("delete from vitess_tablet") + require.NoError(t, err) + }() + + primary := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 1, + }, + Hostname: "localhost1", + MysqlHostname: "localhost1", + MysqlPort: 1200, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + } + replica := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 2, + }, + Hostname: "localhost2", + MysqlHostname: "localhost2", + MysqlPort: 1200, + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + } + err = inst.SaveTablet(primary) + require.NoError(t, err) + err = inst.SaveTablet(replica) + require.NoError(t, err) + primaryAnalysisEntry := inst.ReplicationAnalysis{ + AnalyzedInstanceAlias: topoproto.TabletAliasString(primary.Alias), + Analysis: inst.ReplicationStopped, + } + replicaAnalysisEntry := inst.ReplicationAnalysis{ + AnalyzedInstanceAlias: topoproto.TabletAliasString(replica.Alias), + Analysis: inst.DeadPrimary, + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts = memorytopo.NewServer(ctx, "zone1") + _, err = AttemptRecoveryRegistration(&replicaAnalysisEntry, false, true) + require.Nil(t, err) + + // even though this is another recovery on the same cluster, allow it to go through + // because the analysis is different (ReplicationStopped vs DeadPrimary) + _, err = AttemptRecoveryRegistration(&primaryAnalysisEntry, true, true) + require.Nil(t, err) +} + +func TestGetCheckAndRecoverFunctionCode(t *testing.T) { + tests := []struct { + name string + ersEnabled bool + convertTabletWithErrantGTIDs bool + analysisCode inst.AnalysisCode + wantRecoveryFunction recoveryFunction + }{ + { + name: "DeadPrimary with ERS enabled", + ersEnabled: true, + analysisCode: inst.DeadPrimary, + wantRecoveryFunction: recoverDeadPrimaryFunc, + }, { + name: "DeadPrimary with ERS disabled", + ersEnabled: false, + analysisCode: inst.DeadPrimary, + wantRecoveryFunction: noRecoveryFunc, + }, { + name: "PrimaryTabletDeleted with ERS enabled", + ersEnabled: true, + analysisCode: inst.PrimaryTabletDeleted, + wantRecoveryFunction: recoverPrimaryTabletDeletedFunc, + }, { + name: "PrimaryTabletDeleted with ERS disabled", + ersEnabled: false, + analysisCode: inst.PrimaryTabletDeleted, + wantRecoveryFunction: noRecoveryFunc, + }, { + name: "PrimaryHasPrimary", + ersEnabled: false, + analysisCode: inst.PrimaryHasPrimary, + wantRecoveryFunction: recoverPrimaryHasPrimaryFunc, + }, { + name: "ClusterHasNoPrimary", + ersEnabled: false, + analysisCode: inst.ClusterHasNoPrimary, + wantRecoveryFunction: electNewPrimaryFunc, + }, { + name: "ReplicationStopped", + ersEnabled: false, + analysisCode: inst.ReplicationStopped, + wantRecoveryFunction: fixReplicaFunc, + }, { + name: "PrimarySemiSyncMustBeSet", + ersEnabled: false, + analysisCode: inst.PrimarySemiSyncMustBeSet, + wantRecoveryFunction: fixPrimaryFunc, + }, { + name: "ErrantGTIDDetected", + ersEnabled: false, + convertTabletWithErrantGTIDs: true, + analysisCode: inst.ErrantGTIDDetected, + wantRecoveryFunction: recoverErrantGTIDDetectedFunc, + }, { + name: "ErrantGTIDDetected with --change-tablets-with-errant-gtid-to-drained false", + ersEnabled: false, + convertTabletWithErrantGTIDs: false, + analysisCode: inst.ErrantGTIDDetected, + wantRecoveryFunction: noRecoveryFunc, + }, + } + + // Needed for the test to work + oldMap := emergencyOperationGracefulPeriodMap + emergencyOperationGracefulPeriodMap = cache.New(time.Second*5, time.Millisecond*500) + defer func() { + emergencyOperationGracefulPeriodMap = oldMap + }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + prevVal := config.ERSEnabled() + config.SetERSEnabled(tt.ersEnabled) + defer config.SetERSEnabled(prevVal) + + convertErrantVal := config.ConvertTabletWithErrantGTIDs() + config.SetConvertTabletWithErrantGTIDs(tt.convertTabletWithErrantGTIDs) + defer config.SetConvertTabletWithErrantGTIDs(convertErrantVal) + + gotFunc := getCheckAndRecoverFunctionCode(tt.analysisCode, "") + require.EqualValues(t, tt.wantRecoveryFunction, gotFunc) + }) + } +} diff --git a/go/vt/vtorc/logic/orchestrator.go b/go/vt/vtorc/logic/vtorc.go similarity index 80% rename from go/vt/vtorc/logic/orchestrator.go rename to go/vt/vtorc/logic/vtorc.go index dcc30027392..02fb41daa21 100644 --- a/go/vt/vtorc/logic/orchestrator.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -40,14 +40,14 @@ import ( ) const ( - discoveryMetricsName = "DISCOVERY_METRICS" + DiscoveryMetricsName = "DISCOVERY_METRICS" ) // discoveryQueue is a channel of deduplicated instanceKey-s // that were requested for discovery. It can be continuously updated // as discovery process progresses. var discoveryQueue *discovery.Queue -var snapshotDiscoveryKeys chan inst.InstanceKey +var snapshotDiscoveryKeys chan string var snapshotDiscoveryKeysMutex sync.Mutex var hasReceivedSIGTERM int32 @@ -58,14 +58,14 @@ var discoveryQueueLengthGauge = metrics.NewGauge() var discoveryRecentCountGauge = metrics.NewGauge() var isElectedGauge = metrics.NewGauge() var isHealthyGauge = metrics.NewGauge() -var discoveryMetrics = collection.CreateOrReturnCollection(discoveryMetricsName) +var discoveryMetrics = collection.CreateOrReturnCollection(DiscoveryMetricsName) var isElectedNode int64 var recentDiscoveryOperationKeys *cache.Cache func init() { - snapshotDiscoveryKeys = make(chan inst.InstanceKey, 10) + snapshotDiscoveryKeys = make(chan string, 10) _ = metrics.Register("discoveries.attempt", discoveriesCounter) _ = metrics.Register("discoveries.fail", failedDiscoveriesCounter) @@ -113,7 +113,7 @@ func acceptSighupSignal() { go func() { for range c { log.Infof("Received SIGHUP. Reloading configuration") - _ = inst.AuditOperation("reload-configuration", nil, "Triggered via SIGHUP") + _ = inst.AuditOperation("reload-configuration", "", "Triggered via SIGHUP") config.Reload() discoveryMetrics.SetExpirePeriod(time.Duration(config.DiscoveryCollectionRetentionSeconds) * time.Second) } @@ -126,7 +126,7 @@ func closeVTOrc() { atomic.StoreInt32(&hasReceivedSIGTERM, 1) discoveryMetrics.StopAutoExpiration() // Poke other go routines to stop cleanly here ... - _ = inst.AuditOperation("shutdown", nil, "Triggered via SIGTERM") + _ = inst.AuditOperation("shutdown", "", "Triggered via SIGTERM") // wait for the locks to be released waitForLocksRelease() log.Infof("VTOrc closed") @@ -155,34 +155,33 @@ func waitForLocksRelease() { // instance discovery per entry. func handleDiscoveryRequests() { discoveryQueue = discovery.CreateOrReturnQueue("DEFAULT") - // create a pool of discovery workers for i := uint(0); i < config.DiscoveryMaxConcurrency; i++ { go func() { for { - instanceKey := discoveryQueue.Consume() + tabletAlias := discoveryQueue.Consume() // Possibly this used to be the elected node, but has // been demoted, while still the queue is full. if !IsLeaderOrActive() { log.Infof("Node apparently demoted. Skipping discovery of %+v. "+ - "Remaining queue size: %+v", instanceKey, discoveryQueue.QueueLen()) - discoveryQueue.Release(instanceKey) + "Remaining queue size: %+v", tabletAlias, discoveryQueue.QueueLen()) + discoveryQueue.Release(tabletAlias) continue } - DiscoverInstance(instanceKey, false /* forceDiscovery */) - discoveryQueue.Release(instanceKey) + DiscoverInstance(tabletAlias, false /* forceDiscovery */) + discoveryQueue.Release(tabletAlias) } }() } } // DiscoverInstance will attempt to discover (poll) an instance (unless -// it is already up to date) and will also ensure that its primary and +// it is already up-to-date) and will also ensure that its primary and // replicas (if any) are also checked. -func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { - if inst.InstanceIsForgotten(&instanceKey) { - log.Infof("discoverInstance: skipping discovery of %+v because it is set to be forgotten", instanceKey) +func DiscoverInstance(tabletAlias string, forceDiscovery bool) { + if inst.InstanceIsForgotten(tabletAlias) { + log.Infof("discoverInstance: skipping discovery of %+v because it is set to be forgotten", tabletAlias) return } @@ -193,31 +192,33 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { "instance", "total"}) latency.Start("total") // start the total stopwatch (not changed anywhere else) - + var metric *discovery.Metric defer func() { latency.Stop("total") discoveryTime := latency.Elapsed("total") if discoveryTime > instancePollSecondsDuration() { instancePollSecondsExceededCounter.Inc(1) - log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", instanceKey, discoveryTime.Seconds()) + log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds()) + if metric != nil { + metric.InstancePollSecondsDurationCount = 1 + } } }() - _, _ = instanceKey.ResolveHostname() - if !instanceKey.IsValid() { + if tabletAlias == "" { return } // Calculate the expiry period each time as InstancePollSeconds // _may_ change during the run of the process (via SIGHUP) and // it is not possible to change the cache's default expiry.. - if existsInCacheError := recentDiscoveryOperationKeys.Add(instanceKey.DisplayString(), true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery { + if existsInCacheError := recentDiscoveryOperationKeys.Add(tabletAlias, true, instancePollSecondsDuration()); existsInCacheError != nil && !forceDiscovery { // Just recently attempted return } latency.Start("backend") - instance, found, _ := inst.ReadInstance(&instanceKey) + instance, found, _ := inst.ReadInstance(tabletAlias) latency.Stop("backend") if !forceDiscovery && found && instance.IsUpToDate && instance.IsLastCheckValid { // we've already discovered this one. Skip! @@ -227,7 +228,7 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { discoveriesCounter.Inc(1) // First we've ever heard of this instance. Continue investigation: - instance, err := inst.ReadTopologyInstanceBufferable(&instanceKey, latency) + instance, err := inst.ReadTopologyInstanceBufferable(tabletAlias, latency) // panic can occur (IO stuff). Therefore it may happen // that instance is nil. Check it, but first get the timing metrics. totalLatency := latency.Elapsed("total") @@ -240,17 +241,18 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { if instance == nil { failedDiscoveriesCounter.Inc(1) - _ = discoveryMetrics.Append(&discovery.Metric{ + metric = &discovery.Metric{ Timestamp: time.Now(), - InstanceKey: instanceKey, + TabletAlias: tabletAlias, TotalLatency: totalLatency, BackendLatency: backendLatency, InstanceLatency: instanceLatency, Err: err, - }) - if util.ClearToLog("discoverInstance", instanceKey.StringCode()) { + } + _ = discoveryMetrics.Append(metric) + if util.ClearToLog("discoverInstance", tabletAlias) { log.Warningf(" DiscoverInstance(%+v) instance is nil in %.3fs (Backend: %.3fs, Instance: %.3fs), error=%+v", - instanceKey, + tabletAlias, totalLatency.Seconds(), backendLatency.Seconds(), instanceLatency.Seconds(), @@ -259,20 +261,20 @@ func DiscoverInstance(instanceKey inst.InstanceKey, forceDiscovery bool) { return } - _ = discoveryMetrics.Append(&discovery.Metric{ + metric = &discovery.Metric{ Timestamp: time.Now(), - InstanceKey: instanceKey, + TabletAlias: tabletAlias, TotalLatency: totalLatency, BackendLatency: backendLatency, InstanceLatency: instanceLatency, Err: nil, - }) + } + _ = discoveryMetrics.Append(metric) } // onHealthTick handles the actions to take to discover/poll instances func onHealthTick() { wasAlreadyElected := IsLeader() - { myIsElectedNode, err := process.AttemptElection() if err != nil { @@ -294,7 +296,7 @@ func onHealthTick() { if !IsLeaderOrActive() { return } - instanceKeys, err := inst.ReadOutdatedInstanceKeys() + tabletAliases, err := inst.ReadOutdatedInstanceKeys() if err != nil { log.Error(err) } @@ -304,9 +306,6 @@ func onHealthTick() { go func() { _, _ = process.RegisterNode(process.ThisNodeHealth) }() - go func() { - _ = inst.ExpireMaintenance() - }() } func() { @@ -317,14 +316,14 @@ func onHealthTick() { countSnapshotKeys := len(snapshotDiscoveryKeys) for i := 0; i < countSnapshotKeys; i++ { - instanceKeys = append(instanceKeys, <-snapshotDiscoveryKeys) + tabletAliases = append(tabletAliases, <-snapshotDiscoveryKeys) } }() // avoid any logging unless there's something to be done - if len(instanceKeys) > 0 { - for _, instanceKey := range instanceKeys { - if instanceKey.IsValid() { - discoveryQueue.Push(instanceKey) + if len(tabletAliases) > 0 { + for _, tabletAlias := range tabletAliases { + if tabletAlias != "" { + discoveryQueue.Push(tabletAlias) } } } @@ -340,11 +339,9 @@ func ContinuousDiscovery() { checkAndRecoverWaitPeriod := 3 * instancePollSecondsDuration() recentDiscoveryOperationKeys = cache.New(instancePollSecondsDuration(), time.Second) - _ = inst.LoadHostnameResolveCache() go handleDiscoveryRequests() healthTick := time.Tick(config.HealthPollSeconds * time.Second) - instancePollTick := time.Tick(instancePollSecondsDuration()) caretakingTick := time.Tick(time.Minute) recoveryTick := time.Tick(time.Duration(config.Config.RecoveryPollSeconds) * time.Second) tabletTopoTick := OpenTabletDiscovery() @@ -372,39 +369,19 @@ func ContinuousDiscovery() { go func() { onHealthTick() }() - case <-instancePollTick: - go func() { - // This tick does NOT do instance poll (these are handled by the oversampling discoveryTick) - // But rather should invoke such routinely operations that need to be as (or roughly as) frequent - // as instance poll - if IsLeaderOrActive() { - go inst.ExpireDowntime() - } - }() case <-caretakingTick: // Various periodic internal maintenance tasks go func() { if IsLeaderOrActive() { go inst.ForgetLongUnseenInstances() - go inst.ForgetUnseenInstancesDifferentlyResolved() - go inst.ForgetExpiredHostnameResolves() - go inst.DeleteInvalidHostnameResolves() - go inst.ResolveUnknownPrimaryHostnameResolves() - go inst.ExpireMaintenance() - go inst.ExpireCandidateInstances() - go inst.ExpireHostnameUnresolve() go inst.ExpireAudit() - go inst.FlushNontrivialResolveCacheToDatabase() go inst.ExpireStaleInstanceBinlogCoordinates() go process.ExpireNodesHistory() go process.ExpireAvailableNodes() go ExpireFailureDetectionHistory() go ExpireTopologyRecoveryHistory() go ExpireTopologyRecoveryStepsHistory() - } else { - // Take this opportunity to refresh yourself - go inst.LoadHostnameResolveCache() } }() case <-recoveryTick: @@ -424,7 +401,7 @@ func ContinuousDiscovery() { return } if runCheckAndRecoverOperationsTimeRipe() { - CheckAndRecover(nil, nil, false) + CheckAndRecover() } else { log.Infof("Waiting for %+v seconds to pass before running failure detection/recovery", checkAndRecoverWaitPeriod.Seconds()) } @@ -438,8 +415,27 @@ func ContinuousDiscovery() { } }() case <-tabletTopoTick: - go RefreshAllKeyspaces() - go refreshAllTablets() + // Create a wait group + var wg sync.WaitGroup + + // Refresh all keyspace information. + wg.Add(1) + go func() { + defer wg.Done() + RefreshAllKeyspacesAndShards() + }() + + // Refresh all tablets. + wg.Add(1) + go func() { + defer wg.Done() + refreshAllTablets() + }() + + // Wait for both the refreshes to complete + wg.Wait() + // We have completed one discovery cycle in the entirety of it. We should update the process health. + process.FirstDiscoveryCycleComplete.Store(true) } } } diff --git a/go/vt/vtorc/logic/orchestrator_test.go b/go/vt/vtorc/logic/vtorc_test.go similarity index 100% rename from go/vt/vtorc/logic/orchestrator_test.go rename to go/vt/vtorc/logic/vtorc_test.go diff --git a/go/vt/vtorc/process/health.go b/go/vt/vtorc/process/health.go index 9f25fe51a39..22db89e1d56 100644 --- a/go/vt/vtorc/process/health.go +++ b/go/vt/vtorc/process/health.go @@ -32,6 +32,7 @@ import ( var lastHealthCheckUnixNano int64 var lastGoodHealthCheckUnixNano int64 var LastContinousCheckHealthy int64 +var FirstDiscoveryCycleComplete atomic.Bool var lastHealthCheckCache = cache.New(config.HealthPollSeconds*time.Second, time.Second) @@ -73,6 +74,7 @@ type HealthStatus struct { Hostname string Token string IsActiveNode bool + DiscoveredOnce bool ActiveNode *NodeHealth Error error AvailableNodes [](*NodeHealth) @@ -119,6 +121,7 @@ func HealthTest() (health *HealthStatus, err error) { return health, err } health.Healthy = healthy + health.DiscoveredOnce = FirstDiscoveryCycleComplete.Load() if health.ActiveNode, health.IsActiveNode, err = ElectedNode(); err != nil { health.Error = err diff --git a/go/vt/vtorc/server/api.go b/go/vt/vtorc/server/api.go index 1a7f0a1c1da..f053336e64e 100644 --- a/go/vt/vtorc/server/api.go +++ b/go/vt/vtorc/server/api.go @@ -21,9 +21,13 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" + "time" "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtorc/collection" + "vitess.io/vitess/go/vt/vtorc/discovery" "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/logic" "vitess.io/vitess/go/vt/vtorc/process" @@ -36,30 +40,34 @@ import ( type vtorcAPI struct{} const ( - problemsAPI = "/api/problems" - disableGlobalRecoveriesAPI = "/api/disable-global-recoveries" - enableGlobalRecoveriesAPI = "/api/enable-global-recoveries" - replicationAnalysisAPI = "/api/replication-analysis" - healthAPI = "/debug/health" + problemsAPI = "/api/problems" + errantGTIDsAPI = "/api/errant-gtids" + disableGlobalRecoveriesAPI = "/api/disable-global-recoveries" + enableGlobalRecoveriesAPI = "/api/enable-global-recoveries" + replicationAnalysisAPI = "/api/replication-analysis" + healthAPI = "/debug/health" + AggregatedDiscoveryMetricsAPI = "/api/aggregated-discovery-metrics" shardWithoutKeyspaceFilteringErrorStr = "Filtering by shard without keyspace isn't supported" + notAValidValueForSeconds = "Invalid value for seconds" ) var ( apiHandler = &vtorcAPI{} vtorcAPIPaths = []string{ problemsAPI, + errantGTIDsAPI, disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI, replicationAnalysisAPI, healthAPI, + AggregatedDiscoveryMetricsAPI, } ) // ServeHTTP implements the http.Handler interface. This is the entry point for all the api commands of VTOrc func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request) { apiPath := request.URL.Path - log.Infof("HTTP API Request received: %v", apiPath) if err := acl.CheckAccessHTTP(request, getACLPermissionLevelForAPI(apiPath)); err != nil { acl.SendError(response, err) return @@ -74,8 +82,12 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request healthAPIHandler(response, request) case problemsAPI: problemsAPIHandler(response, request) + case errantGTIDsAPI: + errantGTIDsAPIHandler(response, request) case replicationAnalysisAPI: replicationAnalysisAPIHandler(response, request) + case AggregatedDiscoveryMetricsAPI: + AggregatedDiscoveryMetricsAPIHandler(response, request) default: // This should be unreachable. Any endpoint which isn't registered is automatically redirected to /debug/status. // This code will only be reachable if we register an API but don't handle it here. That will be a bug. @@ -86,7 +98,7 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request // getACLPermissionLevelForAPI returns the acl permission level that is required to run a given API func getACLPermissionLevelForAPI(apiEndpoint string) string { switch apiEndpoint { - case problemsAPI: + case problemsAPI, errantGTIDsAPI: return acl.MONITORING case disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI: return acl.ADMIN @@ -101,7 +113,7 @@ func getACLPermissionLevelForAPI(apiEndpoint string) string { // RegisterVTOrcAPIEndpoints is used to register the VTOrc API endpoints func RegisterVTOrcAPIEndpoints() { for _, apiPath := range vtorcAPIPaths { - http.Handle(apiPath, apiHandler) + servenv.HTTPHandle(apiPath, apiHandler) } } @@ -136,6 +148,49 @@ func problemsAPIHandler(response http.ResponseWriter, request *http.Request) { returnAsJSON(response, http.StatusOK, instances) } +// errantGTIDsAPIHandler is the handler for the errantGTIDsAPI endpoint +func errantGTIDsAPIHandler(response http.ResponseWriter, request *http.Request) { + // This api also supports filtering by shard and keyspace provided. + shard := request.URL.Query().Get("shard") + keyspace := request.URL.Query().Get("keyspace") + if shard != "" && keyspace == "" { + http.Error(response, shardWithoutKeyspaceFilteringErrorStr, http.StatusBadRequest) + return + } + + instances, err := inst.ReadInstancesWithErrantGTIds(keyspace, shard) + if err != nil { + http.Error(response, err.Error(), http.StatusInternalServerError) + return + } + returnAsJSON(response, http.StatusOK, instances) +} + +// AggregatedDiscoveryMetricsAPIHandler is the handler for the discovery metrics endpoint +func AggregatedDiscoveryMetricsAPIHandler(response http.ResponseWriter, request *http.Request) { + // return metrics for last x seconds + qSeconds := request.URL.Query().Get("seconds") + // default to 60 seconds + seconds := 60 + var err error + if qSeconds != "" { + seconds, err = strconv.Atoi(qSeconds) + if err != nil { + http.Error(response, notAValidValueForSeconds, http.StatusBadRequest) + return + } + } + c := collection.CreateOrReturnCollection(logic.DiscoveryMetricsName) + now := time.Now() + then := now.Add(time.Duration(-1*seconds) * time.Second) + metric, err := discovery.AggregatedSince(c, then) + if err != nil { + http.Error(response, err.Error(), http.StatusInternalServerError) + return + } + returnAsJSON(response, http.StatusOK, metric) +} + // disableGlobalRecoveriesAPIHandler is the handler for the disableGlobalRecoveriesAPI endpoint func disableGlobalRecoveriesAPIHandler(response http.ResponseWriter) { err := logic.DisableRecovery() @@ -184,7 +239,8 @@ func healthAPIHandler(response http.ResponseWriter, request *http.Request) { return } code := http.StatusOK - if !health.Healthy { + // If the process isn't healthy, or if the first discovery cycle hasn't completed, we return an internal server error. + if !health.Healthy || !health.DiscoveredOnce { code = http.StatusInternalServerError } returnAsJSON(response, code, health) diff --git a/go/vt/vtorc/server/api_test.go b/go/vt/vtorc/server/api_test.go index 3c9b792afae..c352d1e600f 100644 --- a/go/vt/vtorc/server/api_test.go +++ b/go/vt/vtorc/server/api_test.go @@ -16,6 +16,9 @@ func TestGetACLPermissionLevelForAPI(t *testing.T) { { apiEndpoint: problemsAPI, want: acl.MONITORING, + }, { + apiEndpoint: errantGTIDsAPI, + want: acl.MONITORING, }, { apiEndpoint: disableGlobalRecoveriesAPI, want: acl.ADMIN, diff --git a/go/vt/vtorc/server/discovery.go b/go/vt/vtorc/server/discovery.go index 1f0011cfabd..2ef81eea3c4 100644 --- a/go/vt/vtorc/server/discovery.go +++ b/go/vt/vtorc/server/discovery.go @@ -20,7 +20,6 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/logic" "vitess.io/vitess/go/vt/vtorc/process" ) @@ -34,7 +33,6 @@ func RegisterFlags(fs *pflag.FlagSet) { // StartVTOrcDiscovery starts VTOrc discovery serving func StartVTOrcDiscovery() { process.ContinuousRegistration(string(process.VTOrcExecutionHTTPMode), "") - inst.SetMaintenanceOwner(process.ThisHostname) log.Info("Starting Discovery") go logic.ContinuousDiscovery() diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go index 7585fa17441..b2ae4ce9520 100644 --- a/go/vt/vtorc/test/recovery_analysis.go +++ b/go/vt/vtorc/test/recovery_analysis.go @@ -33,6 +33,7 @@ type InfoForRecoveryAnalysis struct { PrimaryTimestamp *time.Time Keyspace string Shard string + ShardPrimaryTermTimestamp string KeyspaceType int DurabilityPolicy string IsInvalid int @@ -40,8 +41,6 @@ type InfoForRecoveryAnalysis struct { IsCoPrimary int Hostname string Port int - SourceHost string - SourcePort int DataCenter string Region string PhysicalEnvironment string @@ -49,6 +48,7 @@ type InfoForRecoveryAnalysis struct { LogPos uint32 IsStaleBinlogCoordinates int GTIDMode string + ErrantGTID string LastCheckValid int LastCheckPartialSuccess int CountReplicas uint @@ -113,6 +113,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["downtime_end_timestamp"] = sqlutils.CellData{String: info.DowntimeEndTimestamp, Valid: true} rowMap["downtime_remaining_seconds"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.DowntimeRemainingSeconds), Valid: true} rowMap["durability_policy"] = sqlutils.CellData{String: info.DurabilityPolicy, Valid: true} + rowMap["gtid_errant"] = sqlutils.CellData{String: info.ErrantGTID, Valid: true} rowMap["gtid_mode"] = sqlutils.CellData{String: info.GTIDMode, Valid: true} rowMap["hostname"] = sqlutils.CellData{String: info.Hostname, Valid: true} rowMap["is_binlog_server"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsBinlogServer), Valid: true} @@ -126,6 +127,7 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["keyspace_type"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.KeyspaceType), Valid: true} rowMap["keyspace"] = sqlutils.CellData{String: info.Keyspace, Valid: true} rowMap["shard"] = sqlutils.CellData{String: info.Shard, Valid: true} + rowMap["shard_primary_term_timestamp"] = sqlutils.CellData{String: info.ShardPrimaryTermTimestamp, Valid: true} rowMap["last_check_partial_success"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckPartialSuccess), Valid: true} rowMap["max_replica_gtid_errant"] = sqlutils.CellData{String: info.MaxReplicaGTIDErrant, Valid: true} rowMap["max_replica_gtid_mode"] = sqlutils.CellData{String: info.MaxReplicaGTIDMode, Valid: true} @@ -148,8 +150,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["semi_sync_primary_status"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryStatus), Valid: true} rowMap["semi_sync_primary_wait_for_replica_count"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryWaitForReplicaCount), Valid: true} rowMap["semi_sync_replica_enabled"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncReplicaEnabled), Valid: true} - rowMap["source_host"] = sqlutils.CellData{String: info.SourceHost, Valid: true} - rowMap["source_port"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SourcePort), Valid: true} res, _ := prototext.Marshal(info.TabletInfo) rowMap["tablet_info"] = sqlutils.CellData{String: string(res), Valid: true} return rowMap diff --git a/go/vt/vtorc/util/math.go b/go/vt/vtorc/util/math.go index 869b7ac354d..7f1e3057b07 100644 --- a/go/vt/vtorc/util/math.go +++ b/go/vt/vtorc/util/math.go @@ -16,64 +16,6 @@ package util -func MinInt(i1, i2 int) int { - if i1 < i2 { - return i1 - } - return i2 -} - -func MaxInt(i1, i2 int) int { - if i1 > i2 { - return i1 - } - return i2 -} - -func MinInt64(i1, i2 int64) int64 { - if i1 < i2 { - return i1 - } - return i2 -} - -func MaxInt64(i1, i2 int64) int64 { - if i1 > i2 { - return i1 - } - return i2 -} - -func MaxUInt64(i1, i2 uint64) uint64 { - if i1 > i2 { - return i1 - } - return i2 -} - -func MinString(i1, i2 string) string { - if i1 < i2 { - return i1 - } - return i2 -} - -// TernaryString acts like a "? :" C-style ternary operator for strings -func TernaryString(condition bool, resTrue string, resFalse string) string { - if condition { - return resTrue - } - return resFalse -} - -// TernaryInt acts like a "? :" C-style ternary operator for ints -func TernaryInt(condition bool, resTrue int, resFalse int) int { - if condition { - return resTrue - } - return resFalse -} - // AbsInt64 is an ABS function for int64 type func AbsInt64(i int64) int64 { if i >= 0 { diff --git a/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go b/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go index 2f7090e5450..e1d5c3fd2cd 100644 --- a/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go +++ b/go/vt/vttablet/customrule/topocustomrule/topocustomrule_test.go @@ -78,11 +78,13 @@ func TestUpdate(t *testing.T) { cell := "cell1" filePath := "/keyspaces/ks1/configs/CustomRules" - ts := memorytopo.NewServer(cell) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, cell) qsc := tabletservermock.NewController() qsc.TS = ts sleepDuringTopoFailure = time.Millisecond - ctx := context.Background() cr, err := newTopoCustomRule(qsc, cell, filePath) if err != nil { diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index 759deb87ba2..60303cf4bf5 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -73,14 +73,6 @@ func TestStreamPoolSize(t *testing.T) { verifyIntValue(t, vstart, "StreamConnPoolCapacity", 1) } -func TestQueryCacheCapacity(t *testing.T) { - revert := changeVar(t, "QueryCacheCapacity", "1") - defer revert() - - vstart := framework.DebugVars() - verifyIntValue(t, vstart, "QueryCacheCapacity", 1) -} - func TestDisableConsolidator(t *testing.T) { totalConsolidationsTag := "Waits/Histograms/Consolidations/Count" initial := framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) @@ -182,8 +174,6 @@ func TestQueryPlanCache(t *testing.T) { //sleep to avoid race between SchemaChanged event clearing out the plans cache which breaks this test framework.Server.WaitForSchemaReset(2 * time.Second) - defer framework.Server.SetQueryPlanCacheCap(framework.Server.QueryPlanCacheCap()) - bindVars := map[string]*querypb.BindVariable{ "ival1": sqltypes.Int64BindVariable(1), "ival2": sqltypes.Int64BindVariable(1), @@ -197,21 +187,18 @@ func TestQueryPlanCache(t *testing.T) { assert.Equal(t, 1, framework.Server.QueryPlanCacheLen()) vend := framework.DebugVars() - assert.Equal(t, 1, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), cachedPlanSize) _, _ = client.Execute("select * from vitess_test where intval=:ival2", bindVars) require.Equal(t, 2, framework.Server.QueryPlanCacheLen()) vend = framework.DebugVars() - assert.Equal(t, 2, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 2*cachedPlanSize) _, _ = client.Execute("select * from vitess_test where intval=1", bindVars) require.Equal(t, 3, framework.Server.QueryPlanCacheLen()) vend = framework.DebugVars() - assert.Equal(t, 3, framework.FetchInt(vend, "QueryCacheLength")) assert.GreaterOrEqual(t, framework.FetchInt(vend, "QueryCacheSize"), 3*cachedPlanSize) } diff --git a/go/vt/vttablet/endtoend/connkilling/main_test.go b/go/vt/vttablet/endtoend/connkilling/main_test.go index 5f1d20beffe..e7486c397eb 100644 --- a/go/vt/vttablet/endtoend/connkilling/main_test.go +++ b/go/vt/vttablet/endtoend/connkilling/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package connkilling import ( + "context" "errors" "flag" "fmt" @@ -80,8 +81,10 @@ func TestMain(m *testing.M) { connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() config := tabletenv.NewDefaultConfig() - config.Oltp.TxTimeoutSeconds = tabletenv.Seconds(3) - err := framework.StartCustomServer(connParams, connAppDebugParams, cluster.DbName(), config) + _ = config.Oltp.TxTimeoutSeconds.Set("3s") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index ea72d2ee89d..3c06f9b465c 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -19,10 +19,9 @@ package framework import ( "context" "errors" + "sync" "time" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/vttablet/tabletserver" @@ -40,6 +39,7 @@ type QueryClient struct { target *querypb.Target server *tabletserver.TabletServer transactionID int64 + reservedIDMu sync.Mutex reservedID int64 sessionStateChanges string } @@ -59,7 +59,7 @@ func NewClient() *QueryClient { // NewClientWithTabletType creates a new client for Server with the provided tablet type. func NewClientWithTabletType(tabletType topodatapb.TabletType) *QueryClient { - targetCopy := proto.Clone(Target).(*querypb.Target) + targetCopy := Target.CloneVT() targetCopy.TabletType = tabletType return &QueryClient{ ctx: callerid.NewContext( @@ -114,6 +114,8 @@ func (client *QueryClient) Commit() error { func (client *QueryClient) Rollback() error { defer func() { client.transactionID = 0 }() rID, err := client.server.Rollback(client.ctx, client.target, client.transactionID) + client.reservedIDMu.Lock() + defer client.reservedIDMu.Unlock() client.reservedID = rID if err != nil { return err @@ -293,6 +295,8 @@ func (client *QueryClient) MessageAck(name string, ids []string) (int64, error) // ReserveExecute performs a ReserveExecute. func (client *QueryClient) ReserveExecute(query string, preQueries []string, bindvars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + client.reservedIDMu.Lock() + defer client.reservedIDMu.Unlock() if client.reservedID != 0 { return nil, errors.New("already reserved a connection") } @@ -405,14 +409,21 @@ func (client *QueryClient) StreamHealth(sendFunc func(*querypb.StreamHealthRespo return client.server.StreamHealth(client.ctx, sendFunc) } +// StreamHealthWithContext receives the health response +func (client *QueryClient) StreamHealthWithContext(ctx context.Context, sendFunc func(*querypb.StreamHealthResponse) error) error { + return client.server.StreamHealth(ctx, sendFunc) +} + func (client *QueryClient) UpdateContext(ctx context.Context) { client.ctx = ctx } func (client *QueryClient) GetSchema(tableType querypb.SchemaTableType, tableNames ...string) (map[string]string, error) { - schemaDef := map[string]string{} + schemaDef := make(map[string]string) err := client.server.GetSchema(client.ctx, client.target, tableType, tableNames, func(schemaRes *querypb.GetSchemaResponse) error { - schemaDef = schemaRes.TableDefinition + for tableName, schemaDefinition := range schemaRes.TableDefinition { + schemaDef[tableName] = schemaDefinition + } return nil }) if err != nil { diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index eb025384d33..4f8043fba5a 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -24,6 +24,7 @@ import ( "time" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/yaml2" @@ -57,7 +58,7 @@ var ( // StartCustomServer starts the server and initializes // all the global variables. This function should only be called // once at the beginning of the test. -func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName string, config *tabletenv.TabletConfig) error { +func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string, config *tabletenv.TabletConfig) error { // Setup a fake vtgate server. protocol := "resolveTest" vtgateconn.SetVTGateProtocol(protocol) @@ -74,9 +75,9 @@ func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName s Shard: "0", TabletType: topodatapb.TabletType_PRIMARY, } - TopoServer = memorytopo.NewServer("") + TopoServer = memorytopo.NewServer(ctx, "") - Server = tabletserver.NewTabletServer("", config, TopoServer, &topodatapb.TabletAlias{}) + Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}) Server.Register() err := Server.StartService(Target, dbcfgs, nil /* mysqld */) if err != nil { @@ -89,7 +90,12 @@ func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName s return vterrors.Wrap(err, "could not start listener") } ServerAddress = fmt.Sprintf("http://%s", ln.Addr().String()) - go http.Serve(ln, nil) + go func() { + err := servenv.HTTPServe(ln) + if err != nil { + log.Errorf("HTTPServe failed: %v", err) + } + }() for { time.Sleep(10 * time.Millisecond) response, err := http.Get(fmt.Sprintf("%s/debug/vars", ServerAddress)) @@ -104,7 +110,7 @@ func StartCustomServer(connParams, connAppDebugParams mysql.ConnParams, dbName s // StartServer starts the server and initializes // all the global variables. This function should only be called // once at the beginning of the test. -func StartServer(connParams, connAppDebugParams mysql.ConnParams, dbName string) error { +func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string) error { config := tabletenv.NewDefaultConfig() config.StrictTableACL = true config.TwoPCEnable = true @@ -112,16 +118,16 @@ func StartServer(connParams, connAppDebugParams mysql.ConnParams, dbName string) config.TwoPCCoordinatorAddress = "fake" config.HotRowProtection.Mode = tabletenv.Enable config.TrackSchemaVersions = true - config.GracePeriods.ShutdownSeconds = 2 - config.SignalSchemaChangeReloadIntervalSeconds = tabletenv.Seconds(2.1) + _ = config.GracePeriods.ShutdownSeconds.Set("2s") config.SignalWhenSchemaChange = true - config.Healthcheck.IntervalSeconds = 0.1 - config.Oltp.TxTimeoutSeconds = 5 - config.Olap.TxTimeoutSeconds = 5 + _ = config.Healthcheck.IntervalSeconds.Set("100ms") + _ = config.Oltp.TxTimeoutSeconds.Set("5s") + _ = config.Olap.TxTimeoutSeconds.Set("5s") config.EnableViews = true + config.QueryCacheDoorkeeper = false gotBytes, _ := yaml2.Marshal(config) log.Infof("Config:\n%s", gotBytes) - return StartCustomServer(connParams, connAppDebugParams, dbName, config) + return StartCustomServer(ctx, connParams, connAppDebugParams, dbName, config) } // StopServer must be called once all the tests are done. diff --git a/go/vt/vttablet/endtoend/framework/testcase.go b/go/vt/vttablet/endtoend/framework/testcase.go index 37808c5aa7a..e02227b4eb6 100644 --- a/go/vt/vttablet/endtoend/framework/testcase.go +++ b/go/vt/vttablet/endtoend/framework/testcase.go @@ -21,6 +21,7 @@ import ( "fmt" "reflect" "strings" + "time" "vitess.io/vitess/go/vt/vterrors" @@ -122,7 +123,7 @@ func (tc *TestCase) Test(name string, client *QueryClient) error { } // wait for all previous test cases to have been settled in cache - client.server.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) catcher := NewQueryCatcher() defer catcher.Close() diff --git a/go/vt/vttablet/endtoend/healthstream_test.go b/go/vt/vttablet/endtoend/healthstream_test.go index ad6f0884270..4bc13aa9084 100644 --- a/go/vt/vttablet/endtoend/healthstream_test.go +++ b/go/vt/vttablet/endtoend/healthstream_test.go @@ -17,12 +17,12 @@ limitations under the License. package endtoend import ( + "slices" "testing" "time" "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) @@ -31,38 +31,51 @@ func TestSchemaChange(t *testing.T) { client := framework.NewClient() tcs := []struct { - tName string - response []string - ddl string + tName string + expectedChange string + ddl string + expectTimeout bool }{ { "create table 1", - []string{"vitess_sc1"}, + "vitess_sc1", "create table vitess_sc1(id bigint primary key)", + false, }, { "create table 2", - []string{"vitess_sc2"}, + "vitess_sc2", "create table vitess_sc2(id bigint primary key)", + false, + }, { + "create internal table", + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "create table _vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410(id bigint primary key)", + true, }, { "add column 1", - []string{"vitess_sc1"}, + "vitess_sc1", "alter table vitess_sc1 add column newCol varchar(50)", + false, }, { "add column 2", - []string{"vitess_sc2"}, + "vitess_sc2", "alter table vitess_sc2 add column newCol varchar(50)", + false, }, { "remove column", - []string{"vitess_sc1"}, + "vitess_sc1", "alter table vitess_sc1 drop column newCol", + false, }, { "drop table 2", - []string{"vitess_sc2"}, + "vitess_sc2", "drop table vitess_sc2", + false, }, { "drop table 1", - []string{"vitess_sc1"}, + "vitess_sc1", "drop table vitess_sc1", + false, }, } @@ -76,24 +89,26 @@ func TestSchemaChange(t *testing.T) { }) }(ch) - select { - case <-ch: // get the schema notification - case <-time.After(3 * time.Second): - // We might not see the initial changes - // as the health stream ticker would have started very early on and - // this test client might not be even registered. - } - for _, tc := range tcs { t.Run(tc.tName, func(t *testing.T) { _, err := client.Execute(tc.ddl, nil) assert.NoError(t, err) - select { - case res := <-ch: // get the schema notification - utils.MustMatch(t, tc.response, res, "") - case <-time.After(5 * time.Second): - t.Errorf("timed out") - return + timeout := time.After(5 * time.Second) + for { + select { + case res := <-ch: // get the schema notification + if slices.Contains(res, tc.expectedChange) { + assert.False(t, tc.expectTimeout) + return + } + case <-timeout: + if tc.expectTimeout { + // This is what we wanted! + return + } + t.Errorf("timed out waiting for a schema notification") + return + } } }) } diff --git a/go/vt/vttablet/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go index a809e7e42ae..b5256be0994 100644 --- a/go/vt/vttablet/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package endtoend import ( + "context" "errors" "flag" "fmt" @@ -84,7 +85,9 @@ func TestMain(m *testing.M) { connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() - err = framework.StartServer(connParams, connAppDebugParams, cluster.DbName()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = framework.StartServer(ctx, connParams, connAppDebugParams, cluster.DbName()) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -208,7 +211,7 @@ var tableACLConfig = `{ }, { "name": "vitess", - "table_names_or_prefixes": ["vitess_a", "vitess_b", "vitess_c", "dual", "vitess_d", "vitess_temp", "vitess_e", "vitess_f", "vitess_mixed_case", "upsert_test", "vitess_strings", "vitess_fracts", "vitess_ints", "vitess_misc", "vitess_bit_default", "vitess_big", "vitess_stress", "vitess_view", "vitess_json", "vitess_bool", "vitess_autoinc_seq"], + "table_names_or_prefixes": ["vitess_a", "vitess_b", "vitess_c", "dual", "vitess_d", "vitess_temp", "vitess_temp1", "vitess_temp2", "vitess_temp3", "vitess_e", "vitess_f", "vitess_mixed_case", "upsert_test", "vitess_strings", "vitess_fracts", "vitess_ints", "vitess_misc", "vitess_bit_default", "vitess_big", "vitess_stress", "vitess_view", "vitess_json", "vitess_bool", "vitess_autoinc_seq"], "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] @@ -311,7 +314,7 @@ var tableACLConfig = `{ }, { "name": "vitess_healthstream", - "table_names_or_prefixes": ["vitess_sc1", "vitess_sc2"], + "table_names_or_prefixes": ["vitess_sc1", "vitess_sc2", "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410"], "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index de8c98c98f6..ae47999a97e 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "io" + "math" "net/http" "reflect" "strings" @@ -184,8 +185,7 @@ func TestIntegrityError(t *testing.T) { } func TestTrailingComment(t *testing.T) { - vstart := framework.DebugVars() - v1 := framework.FetchInt(vstart, "QueryCacheLength") + v1 := framework.Server.QueryPlanCacheLen() bindVars := map[string]*querypb.BindVariable{"ival": sqltypes.Int64BindVariable(1)} client := framework.NewClient() @@ -200,7 +200,7 @@ func TestTrailingComment(t *testing.T) { t.Error(err) return } - v2 := framework.FetchInt(framework.DebugVars(), "QueryCacheLength") + v2 := framework.Server.QueryPlanCacheLen() if v2 != v1+1 { t.Errorf("QueryCacheLength(%s): %d, want %d", query, v2, v1+1) } @@ -944,3 +944,121 @@ func TestHexAndBitBindVar(t *testing.T) { require.NoError(t, err) assert.Equal(t, `[[INT64(10) UINT64(10) INT64(2480) UINT64(2480)]]`, fmt.Sprintf("%v", qr.Rows)) } + +// Test will validate drop view ddls. +func TestShowTablesWithSizes(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &connParams) + require.NoError(t, err) + defer conn.Close() + + setupQueries := []string{ + `drop view if exists show_tables_with_sizes_v1`, + `drop table if exists show_tables_with_sizes_t1`, + `drop table if exists show_tables_with_sizes_employees`, + `create table show_tables_with_sizes_t1 (id int primary key)`, + `create view show_tables_with_sizes_v1 as select * from show_tables_with_sizes_t1`, + `CREATE TABLE show_tables_with_sizes_employees (id INT NOT NULL, store_id INT) PARTITION BY HASH(store_id) PARTITIONS 4`, + } + + defer func() { + _, _ = conn.ExecuteFetch(`drop view if exists show_tables_with_sizes_v1`, 1, false) + _, _ = conn.ExecuteFetch(`drop table if exists show_tables_with_sizes_t1`, 1, false) + _, _ = conn.ExecuteFetch(`drop table if exists show_tables_with_sizes_employees`, 1, false) + }() + for _, query := range setupQueries { + _, err := conn.ExecuteFetch(query, 1, false) + require.NoError(t, err) + } + expectTables := map[string]([]string){ // TABLE_TYPE, TABLE_COMMENT + "show_tables_with_sizes_t1": {"BASE TABLE", ""}, + "show_tables_with_sizes_v1": {"VIEW", "VIEW"}, + "show_tables_with_sizes_employees": {"BASE TABLE", ""}, + } + + rs, err := conn.ExecuteFetch(conn.BaseShowTablesWithSizes(), math.MaxInt, false) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + + assert.GreaterOrEqual(t, len(rs.Rows), len(expectTables)) + matchedTables := map[string]bool{} + for _, row := range rs.Rows { + tableName := row[0].ToString() + vals, ok := expectTables[tableName] + if ok { + assert.Equal(t, vals[0], row[1].ToString()) // TABLE_TYPE + assert.Equal(t, vals[1], row[3].ToString()) // TABLE_COMMENT + matchedTables[tableName] = true + } + } + assert.Equalf(t, len(expectTables), len(matchedTables), "%v", matchedTables) +} + +// TestTuple tests that bind variables having tuple values work with vttablet. +func TestTuple(t *testing.T) { + client := framework.NewClient() + _, err := client.Execute(`insert into vitess_a (eid, id) values (100, 103), (193, 235)`, nil) + require.NoError(t, err) + + bv := map[string]*querypb.BindVariable{ + "__vals": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + {Type: querypb.Type_INT64, Value: []byte("100")}, + {Type: querypb.Type_INT64, Value: []byte("103")}, + }, + }, + { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + {Type: querypb.Type_INT64, Value: []byte("87")}, + {Type: querypb.Type_INT64, Value: []byte("4473")}, + }, + }, + }, + }, + } + res, err := client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.Equal(t, `[[INT64(100) INT32(103) NULL NULL]]`, fmt.Sprintf("%v", res.Rows)) + + res, err = client.Execute("update vitess_a set name = 'a' where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.EqualValues(t, 1, res.RowsAffected) + + res, err = client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.Equal(t, `[[INT64(100) INT32(103) VARCHAR("a") NULL]]`, fmt.Sprintf("%v", res.Rows)) + + bv = map[string]*querypb.BindVariable{ + "__vals": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + {Type: querypb.Type_INT64, Value: []byte("100")}, + {Type: querypb.Type_INT64, Value: []byte("103")}, + }, + }, + { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + {Type: querypb.Type_INT64, Value: []byte("193")}, + {Type: querypb.Type_INT64, Value: []byte("235")}, + }, + }, + }, + }, + } + res, err = client.Execute("delete from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + assert.EqualValues(t, 2, res.RowsAffected) + + res, err = client.Execute("select * from vitess_a where (eid, id) in ::__vals", bv) + require.NoError(t, err) + require.Zero(t, len(res.Rows)) +} diff --git a/go/vt/vttablet/endtoend/reserve_test.go b/go/vt/vttablet/endtoend/reserve_test.go index 355e4d5b953..591512d44c6 100644 --- a/go/vt/vttablet/endtoend/reserve_test.go +++ b/go/vt/vttablet/endtoend/reserve_test.go @@ -31,6 +31,10 @@ import ( //TODO: Add Counter checks in all the tests. func TestMultipleReserveHaveDifferentConnection(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client1 := framework.NewClient() client2 := framework.NewClient() @@ -53,6 +57,10 @@ func TestMultipleReserveHaveDifferentConnection(t *testing.T) { } func TestReserveBeginRelease(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -70,6 +78,10 @@ func TestReserveBeginRelease(t *testing.T) { } func TestBeginReserveRelease(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -87,6 +99,10 @@ func TestBeginReserveRelease(t *testing.T) { } func TestReserveBeginExecuteRelease(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() insQuery := "insert into vitess_test (intval, floatval, charval, binval) values (4, null, null, null)" @@ -107,6 +123,10 @@ func TestReserveBeginExecuteRelease(t *testing.T) { } func TestMultipleReserveBeginHaveDifferentConnection(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client1 := framework.NewClient() client2 := framework.NewClient() @@ -129,6 +149,10 @@ func TestMultipleReserveBeginHaveDifferentConnection(t *testing.T) { } func TestCommitOnReserveBeginConn(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -149,6 +173,10 @@ func TestCommitOnReserveBeginConn(t *testing.T) { } func TestRollbackOnReserveBeginConn(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -169,6 +197,10 @@ func TestRollbackOnReserveBeginConn(t *testing.T) { } func TestReserveBeginRollbackAndBeginCommitAgain(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -203,6 +235,10 @@ func TestReserveBeginRollbackAndBeginCommitAgain(t *testing.T) { } func TestReserveBeginCommitFailToReuseTxID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -225,6 +261,10 @@ func TestReserveBeginCommitFailToReuseTxID(t *testing.T) { } func TestReserveBeginRollbackFailToReuseTxID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -247,6 +287,10 @@ func TestReserveBeginRollbackFailToReuseTxID(t *testing.T) { } func TestReserveBeginCommitFailToReuseOldReservedID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -271,6 +315,10 @@ func TestReserveBeginCommitFailToReuseOldReservedID(t *testing.T) { } func TestReserveBeginRollbackFailToReuseOldReservedID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -294,6 +342,10 @@ func TestReserveBeginRollbackFailToReuseOldReservedID(t *testing.T) { } func TestReserveReleaseAndFailToUseReservedIDAgain(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select 42" @@ -312,6 +364,10 @@ func TestReserveReleaseAndFailToUseReservedIDAgain(t *testing.T) { } func TestReserveAndFailToRunTwiceConcurrently(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select 42" @@ -336,6 +392,10 @@ func TestReserveAndFailToRunTwiceConcurrently(t *testing.T) { } func TestBeginReserveCommitAndNewTransactionsOnSameReservedID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -369,6 +429,10 @@ func TestBeginReserveCommitAndNewTransactionsOnSameReservedID(t *testing.T) { } func TestBeginReserveRollbackAndNewTransactionsOnSameReservedID(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select connection_id()" @@ -402,6 +466,10 @@ func TestBeginReserveRollbackAndNewTransactionsOnSameReservedID(t *testing.T) { } func TestBeginReserveReleaseAndFailToUseReservedIDAndTxIDAgain(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select 42" @@ -429,6 +497,10 @@ func TestBeginReserveReleaseAndFailToUseReservedIDAndTxIDAgain(t *testing.T) { } func TestReserveBeginReleaseAndFailToUseReservedIDAndTxIDAgain(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() query := "select 42" @@ -456,6 +528,10 @@ func TestReserveBeginReleaseAndFailToUseReservedIDAndTxIDAgain(t *testing.T) { } func TestReserveExecuteWithFailingQueryAndReserveConnectionRemainsOpen(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() _, err := client.ReserveExecute("select foo", nil, nil) @@ -469,6 +545,10 @@ func TestReserveExecuteWithFailingQueryAndReserveConnectionRemainsOpen(t *testin } func TestReserveAndExecuteWithFailingQueryAndReserveConnectionRemainsOpen(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() qr1, err := client.ReserveExecute("select connection_id()", nil, nil) @@ -485,6 +565,10 @@ func TestReserveAndExecuteWithFailingQueryAndReserveConnectionRemainsOpen(t *tes } func TestReserveBeginExecuteWithFailingQueryAndReserveConnAndTxRemainsOpen(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() _, err := client.ReserveBeginExecute("select foo", nil, nil, nil) @@ -516,6 +600,10 @@ func TestReserveBeginExecuteWithFailingQueryAndReserveConnAndTxRemainsOpen(t *te } func TestReserveAndBeginExecuteWithFailingQueryAndReserveConnAndTxRemainsOpen(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() // Save the connection id to check in the end that everything got executed on same connection. @@ -547,6 +635,10 @@ func TestReserveAndBeginExecuteWithFailingQueryAndReserveConnAndTxRemainsOpen(t } func TestReserveExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client1 := framework.NewClient() client2 := framework.NewClient() @@ -583,6 +675,10 @@ func TestReserveExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) { } func TestReserveExecuteWithPreQueriesAndSavepoint(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() defer client.Release() @@ -648,6 +744,10 @@ func TestReserveExecuteWithPreQueriesAndSavepoint(t *testing.T) { } func TestReserveBeginExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() rcClient := framework.NewClient() rucClient := framework.NewClient() @@ -714,6 +814,10 @@ func TestReserveBeginExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) } func TestReserveExecuteWithFailingPreQueriesAndCheckConnectionState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() selQuery := "select 42" @@ -729,6 +833,10 @@ func TestReserveExecuteWithFailingPreQueriesAndCheckConnectionState(t *testing.T } func TestReserveBeginExecuteWithFailingPreQueriesAndCheckConnectionState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() selQuery := "select 42" @@ -747,6 +855,10 @@ func TestReserveBeginExecuteWithFailingPreQueriesAndCheckConnectionState(t *test } func TestBeginReserveExecuteWithFailingPreQueriesAndCheckConnectionState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() selQuery := "select 42" @@ -768,6 +880,10 @@ func TestBeginReserveExecuteWithFailingPreQueriesAndCheckConnectionState(t *test } func TestReserveBeginExecuteWithCommitFailureAndCheckConnectionAndDBState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() connQuery := "select connection_id()" @@ -798,6 +914,10 @@ func TestReserveBeginExecuteWithCommitFailureAndCheckConnectionAndDBState(t *tes } func TestReserveBeginExecuteWithRollbackFailureAndCheckConnectionAndDBState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() connQuery := "select connection_id()" @@ -828,6 +948,10 @@ func TestReserveBeginExecuteWithRollbackFailureAndCheckConnectionAndDBState(t *t } func TestReserveExecuteWithExecuteFailureAndCheckConnectionAndDBState(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() connQuery := "select connection_id()" @@ -866,6 +990,10 @@ func TestReserveExecuteWithExecuteFailureAndCheckConnectionAndDBState(t *testing } func TestReserveExecuteDDLWithoutTx(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() defer client.Release() @@ -892,6 +1020,10 @@ func TestReserveExecuteDDLWithoutTx(t *testing.T) { } func TestReserveExecuteDDLWithTx(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() defer client.Release() @@ -925,6 +1057,10 @@ func killConnection(t *testing.T, connID string) { } func BenchmarkPreQueries(b *testing.B) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() tcases := []struct { @@ -978,6 +1114,10 @@ func BenchmarkPreQueries(b *testing.B) { } func TestFailInfiniteSessions(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() qr, err := client.Execute("select @@max_connections", nil) require.NoError(t, err) @@ -1034,6 +1174,10 @@ func TestFailInfiniteSessions(t *testing.T) { } func TestReserveQueryTimeout(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false + defer func() { + framework.Server.Config().EnableSettingsPool = true + }() client := framework.NewClient() _, err := client.ReserveExecute("select sleep(19)", []string{"set sql_mode = ''"}, nil) diff --git a/go/vt/vttablet/endtoend/rpc_test.go b/go/vt/vttablet/endtoend/rpc_test.go new file mode 100644 index 00000000000..a186d444f8d --- /dev/null +++ b/go/vt/vttablet/endtoend/rpc_test.go @@ -0,0 +1,218 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endtoend + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/callerid" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vttablet/endtoend/framework" +) + +// TestGetSchemaRPC will validate GetSchema RPC. +func TestGetSchemaRPC(t *testing.T) { + testcases := []struct { + name string + queries []string + deferQueries []string + getSchemaQueryType querypb.SchemaTableType + getSchemaTables []string + mapToExpect map[string]string + }{ + { + name: "All views", + queries: []string{ + "create view vitess_view1 as select id from vitess_a", + "create view vitess_view2 as select id from vitess_b", + }, + deferQueries: []string{ + "drop view vitess_view1", + "drop view vitess_view2", + }, + mapToExpect: map[string]string{ + "vitess_view1": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view1` AS select `vitess_a`.`id` AS `id` from `vitess_a`", + "vitess_view2": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view2` AS select `vitess_b`.`id` AS `id` from `vitess_b`", + }, + getSchemaQueryType: querypb.SchemaTableType_VIEWS, + }, { + name: "Views listed", + queries: []string{ + "create view vitess_view1 as select eid from vitess_a", + "create view vitess_view2 as select eid from vitess_b", + "create view vitess_view3 as select eid from vitess_c", + }, + deferQueries: []string{ + "drop view vitess_view1", + "drop view vitess_view2", + "drop view vitess_view3", + }, + mapToExpect: map[string]string{ + "vitess_view3": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view3` AS select `vitess_c`.`eid` AS `eid` from `vitess_c`", + "vitess_view2": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view2` AS select `vitess_b`.`eid` AS `eid` from `vitess_b`", + // These shouldn't be part of the result so we verify it is empty. + "vitess_view1": "", + "unknown_view": "", + }, + getSchemaTables: []string{"vitess_view3", "vitess_view2", "unknown_view"}, + getSchemaQueryType: querypb.SchemaTableType_VIEWS, + }, { + name: "All tables", + queries: []string{ + "create table vitess_temp1 (id int);", + "create table vitess_temp2 (id int);", + "create table vitess_temp3 (id int);", + }, + deferQueries: []string{ + "drop table vitess_temp1", + "drop table vitess_temp2", + "drop table vitess_temp3", + }, + mapToExpect: map[string]string{ + "vitess_temp1": "CREATE TABLE `vitess_temp1` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp2": "CREATE TABLE `vitess_temp2` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp3": "CREATE TABLE `vitess_temp3` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }, + getSchemaQueryType: querypb.SchemaTableType_TABLES, + }, { + name: "Tables listed", + queries: []string{ + "create table vitess_temp1 (eid int);", + "create table vitess_temp2 (eid int);", + "create table vitess_temp3 (eid int);", + }, + deferQueries: []string{ + "drop table vitess_temp1", + "drop table vitess_temp2", + "drop table vitess_temp3", + }, + mapToExpect: map[string]string{ + "vitess_temp1": "CREATE TABLE `vitess_temp1` (\n `eid` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp3": "CREATE TABLE `vitess_temp3` (\n `eid` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + // These shouldn't be part of the result so we verify it is empty. + "vitess_temp2": "", + "unknown_table": "", + }, + getSchemaQueryType: querypb.SchemaTableType_TABLES, + getSchemaTables: []string{"vitess_temp1", "vitess_temp3", "unknown_table"}, + }, { + name: "All tables and views", + queries: []string{ + "create table vitess_temp1 (id int);", + "create table vitess_temp2 (id int);", + "create table vitess_temp3 (id int);", + "create view vitess_view1 as select id from vitess_a", + "create view vitess_view2 as select id from vitess_b", + }, + deferQueries: []string{ + "drop table vitess_temp1", + "drop table vitess_temp2", + "drop table vitess_temp3", + "drop view vitess_view1", + "drop view vitess_view2", + }, + mapToExpect: map[string]string{ + "vitess_view1": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view1` AS select `vitess_a`.`id` AS `id` from `vitess_a`", + "vitess_view2": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view2` AS select `vitess_b`.`id` AS `id` from `vitess_b`", + "vitess_temp1": "CREATE TABLE `vitess_temp1` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp2": "CREATE TABLE `vitess_temp2` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp3": "CREATE TABLE `vitess_temp3` (\n `id` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + }, + getSchemaQueryType: querypb.SchemaTableType_ALL, + }, { + name: "Listed tables and views", + queries: []string{ + "create table vitess_temp1 (eid int);", + "create table vitess_temp2 (eid int);", + "create table vitess_temp3 (eid int);", + "create view vitess_view1 as select eid from vitess_a", + "create view vitess_view2 as select eid from vitess_b", + "create view vitess_view3 as select eid from vitess_c", + }, + deferQueries: []string{ + "drop table vitess_temp1", + "drop table vitess_temp2", + "drop table vitess_temp3", + "drop view vitess_view1", + "drop view vitess_view2", + "drop view vitess_view3", + }, + mapToExpect: map[string]string{ + "vitess_view1": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view1` AS select `vitess_a`.`eid` AS `eid` from `vitess_a`", + "vitess_view3": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view3` AS select `vitess_c`.`eid` AS `eid` from `vitess_c`", + "vitess_temp1": "CREATE TABLE `vitess_temp1` (\n `eid` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + "vitess_temp3": "CREATE TABLE `vitess_temp3` (\n `eid` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + // These shouldn't be part of the result so we verify it is empty. + "vitess_temp2": "", + "vitess_view2": "", + "unknown_view": "", + "unknown_table": "", + }, + getSchemaQueryType: querypb.SchemaTableType_ALL, + getSchemaTables: []string{"vitess_temp1", "vitess_temp3", "unknown_table", "vitess_view3", "vitess_view1", "unknown_view"}, + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + client := framework.NewClient() + client.UpdateContext(callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"})) + + for _, query := range testcase.queries { + _, err := client.Execute(query, nil) + require.NoError(t, err) + } + defer func() { + for _, query := range testcase.deferQueries { + _, err := client.Execute(query, nil) + require.NoError(t, err) + } + }() + + timeout := 1 * time.Minute + wait := time.After(timeout) + for { + select { + case <-wait: + t.Errorf("Schema tracking hasn't caught up") + return + case <-time.After(1 * time.Second): + schemaDefs, err := client.GetSchema(testcase.getSchemaQueryType, testcase.getSchemaTables...) + require.NoError(t, err) + success := true + for tableName, expectedCreateStatement := range testcase.mapToExpect { + if schemaDefs[tableName] != expectedCreateStatement { + success = false + break + } + } + if success { + return + } + } + } + }) + } +} diff --git a/go/vt/vttablet/endtoend/settings_test.go b/go/vt/vttablet/endtoend/settings_test.go index 286c2f5fbff..322819ade8e 100644 --- a/go/vt/vttablet/endtoend/settings_test.go +++ b/go/vt/vttablet/endtoend/settings_test.go @@ -27,11 +27,6 @@ import ( ) func TestSelectNoConnectionReservationOnSettings(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -57,11 +52,6 @@ func TestSelectNoConnectionReservationOnSettings(t *testing.T) { } func TestSetttingsReuseConnWithSettings(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - resetTxConnPool(t) client := framework.NewClient() @@ -155,11 +145,6 @@ func resetTxConnPool(t *testing.T) { } func TestDDLNoConnectionReservationOnSettings(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -179,11 +164,6 @@ func TestDDLNoConnectionReservationOnSettings(t *testing.T) { } func TestDMLNoConnectionReservationOnSettings(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -217,11 +197,6 @@ func TestDMLNoConnectionReservationOnSettings(t *testing.T) { } func TestSelectNoConnectionReservationOnSettingsWithTx(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() query := "select @@sql_mode" @@ -243,11 +218,6 @@ func TestSelectNoConnectionReservationOnSettingsWithTx(t *testing.T) { } func TestDDLNoConnectionReservationOnSettingsWithTx(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -261,11 +231,6 @@ func TestDDLNoConnectionReservationOnSettingsWithTx(t *testing.T) { } func TestDMLNoConnectionReservationOnSettingsWithTx(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() _, err := client.Execute("create table temp(c_date datetime)", nil) @@ -294,11 +259,6 @@ func TestDMLNoConnectionReservationOnSettingsWithTx(t *testing.T) { } func TestSetQueryOnReserveApis(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -314,11 +274,6 @@ func TestSetQueryOnReserveApis(t *testing.T) { } func TestGetLockQueryOnReserveExecute(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() @@ -340,11 +295,6 @@ func TestGetLockQueryOnReserveExecute(t *testing.T) { } func TestTempTableOnReserveExecute(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() defer client.Release() defer client.Execute("drop table if exists temp", nil) @@ -391,11 +341,6 @@ func TestTempTableOnReserveExecute(t *testing.T) { } func TestInfiniteSessions(t *testing.T) { - framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client := framework.NewClient() qr, err := client.Execute("select @@max_connections", nil) require.NoError(t, err) @@ -423,6 +368,7 @@ func TestInfiniteSessions(t *testing.T) { } func TestSetQueriesMultipleWays(t *testing.T) { + framework.Server.Config().EnableSettingsPool = false client := framework.NewClient() defer client.Release() _, err := client.ReserveExecute("select 1", []string{"set sql_safe_updates = 1"}, nil) @@ -432,10 +378,6 @@ func TestSetQueriesMultipleWays(t *testing.T) { require.NoError(t, err) framework.Server.Config().EnableSettingsPool = true - defer func() { - framework.Server.Config().EnableSettingsPool = false - }() - client2 := framework.NewClient() _, err = client2.ReserveExecute("select 1", []string{"set sql_safe_updates = 1"}, nil) require.NoError(t, err) diff --git a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go new file mode 100644 index 00000000000..d13c4ea9e67 --- /dev/null +++ b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streamtimeout + +import ( + "fmt" + "slices" + "testing" + "time" + + "github.com/stretchr/testify/require" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vttablet/endtoend/framework" +) + +// TestSchemaChangeTimedout ensures that the timeout functionality is working properly +// to prevent queries from hanging up and causing a mutex to be locked forever. +func TestSchemaChangeTimedout(t *testing.T) { + client := framework.NewClient() + reloadEstimatedTime := 2 * time.Second + + ch := make(chan []string, 100) + go func(ch chan []string) { + client.StreamHealth(func(response *querypb.StreamHealthResponse) error { + if response.RealtimeStats.TableSchemaChanged != nil { + ch <- response.RealtimeStats.TableSchemaChanged + } + return nil + }) + }(ch) + + // We will set up the MySQLHang simulation. + // To avoid flakiness, we will retry the setup if the health_streamer sends a notification before the MySQLHang is simulated. + attempt := 1 + var tableName string +loop: + for { + tableName = fmt.Sprintf("vitess_sc%d", attempt) + + // change the schema to trigger the health_streamer to send a notification at a later time. + _, err := client.Execute("create table "+tableName+"(id bigint primary key)", nil) + require.NoError(t, err) + + // start simulating a mysql stall until a query issued by the health_streamer would hang. + err = cluster.SimulateMySQLHang() + require.NoError(t, err) + + select { + case <-ch: // get the schema notification + // The health_streamer can send a notification between the time the schema is changed and the mysql stall is simulated. + // In this rare case, we must retry the same setup again. + cluster.StopSimulateMySQLHang() + attempt++ + + if attempt > 5 { + t.Errorf("failed to setup MySQLHang even after several attempts") + return + } + t.Logf("retrying setup for attempt %d", attempt) + case <-time.After(reloadEstimatedTime): + break loop + } + } + defer cluster.StopSimulateMySQLHang() + + // We will wait for the health_streamer to attempt sending a notification. + // It's important to keep in mind that the total wait time after the simulation should be shorter than the reload timeout. + // This is because the query timeout triggers the *DBConn.Kill() method, which in turn holds the mutex lock on the health_streamer. + // Although not indefinitely, this can result in longer wait times. + // It's worth noting that the behavior of *DBConn.Kill() is outside the scope of this test. + reloadInterval := config.SignalSchemaChangeReloadIntervalSeconds.Get() + time.Sleep(reloadInterval) + + // pause simulating the mysql stall to allow the health_streamer to resume. + err := cluster.PauseSimulateMySQLHang() + require.NoError(t, err) + + // wait for the health_streamer to complete retrying the notification. + reloadTimeout := config.SchemaChangeReloadTimeout + retryEstimatedTime := reloadTimeout + reloadInterval + reloadEstimatedTime + timeout := time.After(retryEstimatedTime) + for { + select { + case res := <-ch: // get the schema notification + if slices.Contains(res, tableName) { + return + } + case <-timeout: + t.Errorf("timed out even after the mysql hang was no longer simulated") + return + } + } +} diff --git a/go/vt/vttablet/endtoend/streamtimeout/main_test.go b/go/vt/vttablet/endtoend/streamtimeout/main_test.go new file mode 100644 index 00000000000..e676616b294 --- /dev/null +++ b/go/vt/vttablet/endtoend/streamtimeout/main_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +All tests in this package come with toxiproxy in front of the MySQL server +*/ +package streamtimeout + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "vitess.io/vitess/go/vt/vttablet/endtoend/framework" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttest" + + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +var ( + cluster vttest.LocalCluster + config *tabletenv.TabletConfig +) + +func TestMain(m *testing.M) { + flag.Parse() // Do not remove this comment, import into google3 depends on it + tabletenv.Init() + + exitCode := func() int { + // Launch MySQL. + // We need a Keyspace in the topology, so the DbName is set. + // We need a Shard too, so the database 'vttest' is created. + cfg := vttest.Config{ + Topology: &vttestpb.VTTestTopology{ + Keyspaces: []*vttestpb.Keyspace{ + { + Name: "vttest", + Shards: []*vttestpb.Shard{ + { + Name: "0", + DbNameOverride: "vttest", + }, + }, + }, + }, + }, + OnlyMySQL: true, + Charset: "utf8mb4_general_ci", + } + + env, err := vttest.NewLocalTestEnv("", 0) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + env.EnableToxiproxy = true + cluster = vttest.LocalCluster{ + Config: cfg, + Env: env, + } + if err := cluster.Setup(); err != nil { + fmt.Fprintf(os.Stderr, "could not launch mysql: %v\n", err) + return 1 + } + defer cluster.TearDown() + + connParams := cluster.MySQLConnParams() + connAppDebugParams := cluster.MySQLAppDebugConnParams() + config = tabletenv.NewDefaultConfig() + _ = config.SchemaReloadIntervalSeconds.Set("2100ms") + config.SchemaChangeReloadTimeout = 10 * time.Second + config.SignalWhenSchemaChange = true + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err = framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + defer framework.StopServer() + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go index 6751e60f9ad..8f6546df5f1 100644 --- a/go/vt/vttablet/endtoend/transaction_test.go +++ b/go/vt/vttablet/endtoend/transaction_test.go @@ -321,7 +321,7 @@ func TestShutdownGracePeriod(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.Execute("select sleep(10) from dual", nil) + _, err := client.Execute("select sleep(10) from dual", nil) assert.Error(t, err) }() @@ -346,7 +346,7 @@ func TestShutdownGracePeriod(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.Execute("select sleep(11) from dual", nil) + _, err := client.Execute("select sleep(11) from dual", nil) assert.Error(t, err) }() @@ -373,7 +373,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.StreamExecute("select sleep(10) from dual", nil) + _, err := client.StreamExecute("select sleep(10) from dual", nil) assert.Error(t, err) }() @@ -398,7 +398,7 @@ func TestShutdownGracePeriodWithStreamExecute(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.StreamExecute("select sleep(11) from dual", nil) + _, err := client.StreamExecute("select sleep(11) from dual", nil) assert.Error(t, err) }() @@ -425,7 +425,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) { err := client.Begin(false) require.NoError(t, err) go func() { - _, err = client.ReserveExecute("select sleep(10) from dual", nil, nil) + _, err := client.ReserveExecute("select sleep(10) from dual", nil, nil) assert.Error(t, err) }() @@ -450,7 +450,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) { err = client.Begin(false) require.NoError(t, err) go func() { - _, err = client.ReserveExecute("select sleep(11) from dual", nil, nil) + _, err := client.ReserveExecute("select sleep(11) from dual", nil, nil) assert.Error(t, err) }() diff --git a/go/vt/vttablet/endtoend/views_test.go b/go/vt/vttablet/endtoend/views_test.go index 8295eafbb6a..4ef70345180 100644 --- a/go/vt/vttablet/endtoend/views_test.go +++ b/go/vt/vttablet/endtoend/views_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "testing" + "time" "github.com/stretchr/testify/require" @@ -41,16 +42,35 @@ func TestCreateViewDDL(t *testing.T) { &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "dev"})) - defer client.Execute("drop view vitess_view", nil) + ch := make(chan any) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + err := client.StreamHealthWithContext(ctx, func(shr *querypb.StreamHealthResponse) error { + views := shr.RealtimeStats.ViewSchemaChanged + if len(views) != 0 && views[0] == "vitess_view" { + ch <- true + } + return nil + }) + require.NoError(t, err) + }() + + defer func() { + _, err := client.Execute("drop view vitess_view", nil) + require.NoError(t, err) + <-ch // wait for views update + }() _, err := client.Execute("create view vitess_view as select * from vitess_a", nil) require.NoError(t, err) + <-ch // wait for views update // validate the row in _vt.views. qr, err := client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - `[[VARCHAR("vttest") VARCHAR("vitess_view") TEXT("create view vitess_view as select * from vitess_a")]]`, + "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) // view already exists. This should fail. @@ -61,11 +81,12 @@ func TestCreateViewDDL(t *testing.T) { _, err = client.Execute("create or replace view vitess_view as select id, foo from vitess_a", nil) require.NoError(t, err) + <-ch // wait for views update // validate the row in _vt.views. qr, err = client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - `[[VARCHAR("vttest") VARCHAR("vitess_view") TEXT("create or replace view vitess_view as select id, foo from vitess_a")]]`, + "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) } @@ -78,25 +99,52 @@ func TestAlterViewDDL(t *testing.T) { &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "dev"})) - defer client.Execute("drop view vitess_view", nil) + ch := make(chan any) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + err := client.StreamHealthWithContext(ctx, func(shr *querypb.StreamHealthResponse) error { + views := shr.RealtimeStats.ViewSchemaChanged + if len(views) != 0 && views[0] == "vitess_view" { + ch <- true + } + return nil + }) + require.NoError(t, err) + }() + + defer func() { + _, err := client.Execute("drop view vitess_view", nil) + require.NoError(t, err) + <-ch // wait for views update + }() // view does not exist, should FAIL _, err := client.Execute("alter view vitess_view as select * from vitess_a", nil) - require.ErrorContains(t, err, "Table 'vitess_view' does not exist") + require.ErrorContains(t, err, "Table 'vttest.vitess_view' doesn't exist (errno 1146) (sqlstate 42S02)") // create a view. _, err = client.Execute("create view vitess_view as select * from vitess_a", nil) require.NoError(t, err) + <-ch // wait for views update + // validate the row in _vt.views. + qr, err := client.Execute(qSelAllRows, nil) + require.NoError(t, err) + require.Equal(t, + "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", + fmt.Sprintf("%v", qr.Rows)) + // view exists, should PASS _, err = client.Execute("alter view vitess_view as select id, foo from vitess_a", nil) require.NoError(t, err) + <-ch // wait for views update // validate the row in _vt.views. - qr, err := client.Execute(qSelAllRows, nil) + qr, err = client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - `[[VARCHAR("vttest") VARCHAR("vitess_view") TEXT("create view vitess_view as select id, foo from vitess_a")]]`, + "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) } @@ -109,8 +157,6 @@ func TestDropViewDDL(t *testing.T) { &vtrpcpb.CallerID{}, &querypb.VTGateCallerID{Username: "dev"})) - defer client.Execute("drop view vitess_view", nil) - // view does not exist, should FAIL _, err := client.Execute("drop view vitess_view", nil) require.ErrorContains(t, err, "Unknown table 'vttest.vitess_view'") @@ -125,18 +171,22 @@ func TestDropViewDDL(t *testing.T) { _, err = client.Execute("create view vitess_view2 as select * from vitess_a", nil) require.NoError(t, err) + // validate both the views are stored in _vt.views. + waitForResult(t, client, 2, 1*time.Minute) + // drop vitess_view1, should PASS _, err = client.Execute("drop view vitess_view1", nil) require.NoError(t, err) - // drop three views, only vitess_view2 exists. This should FAIL but drops the existing view. + // drop three views, only vitess_view2 exists. + // In MySQL 5.7, this would drop vitess_view2, but that behaviour has changed + // in MySQL 8.0, and not the view isn't dropped. CI is running 8.0, so the remaining test is + // written with those expectations. _, err = client.Execute("drop view vitess_view1, vitess_view2, vitess_view3", nil) require.ErrorContains(t, err, "Unknown table 'vttest.vitess_view1,vttest.vitess_view3'") // validate ZERO rows in _vt.views. - qr, err := client.Execute(qSelAllRows, nil) - require.NoError(t, err) - require.Zero(t, qr.Rows) + waitForResult(t, client, 1, 1*time.Minute) // create a view. _, err = client.Execute("create view vitess_view1 as select * from vitess_a", nil) @@ -147,9 +197,7 @@ func TestDropViewDDL(t *testing.T) { require.NoError(t, err) // validate ZERO rows in _vt.views. - qr, err = client.Execute(qSelAllRows, nil) - require.NoError(t, err) - require.Zero(t, qr.Rows) + waitForResult(t, client, 0, 1*time.Minute) } // TestViewDDLWithInfrSchema will validate information schema queries with views. @@ -223,25 +271,24 @@ func TestViewAndTableUnique(t *testing.T) { require.ErrorContains(t, err, "Table 'vitess_view' already exists") } -// TestGetSchemaRPC will validate GetSchema rpc.. -func TestGetSchemaRPC(t *testing.T) { - client := framework.NewClient() - - viewSchemaDef, err := client.GetSchema(querypb.SchemaTableType_VIEWS) - require.NoError(t, err) - require.Zero(t, len(viewSchemaDef)) - - client.UpdateContext(callerid.NewContext( - context.Background(), - &vtrpcpb.CallerID{}, - &querypb.VTGateCallerID{Username: "dev"})) - - defer client.Execute("drop view vitess_view", nil) - - _, err = client.Execute("create view vitess_view as select 1 from vitess_a", nil) - require.NoError(t, err) - - viewSchemaDef, err = client.GetSchema(querypb.SchemaTableType_VIEWS) - require.NoError(t, err) - require.Equal(t, "create view vitess_view as select 1 from vitess_a", viewSchemaDef["vitess_view"]) +func waitForResult(t *testing.T, client *framework.QueryClient, rowCount int, timeout time.Duration) { + t.Helper() + wait := time.After(timeout) + success := false + for { + select { + case <-wait: + t.Errorf("all views are not dropped within the time") + return + case <-time.After(1 * time.Second): + qr, err := client.Execute(qSelAllRows, nil) + require.NoError(t, err) + if len(qr.Rows) == rowCount { + success = true + } + } + if success { + break + } + } } diff --git a/go/vt/vttablet/endtoend/vstreamer_test.go b/go/vt/vttablet/endtoend/vstreamer_test.go index a1d86cb30c9..312273e0c84 100644 --- a/go/vt/vttablet/endtoend/vstreamer_test.go +++ b/go/vt/vttablet/endtoend/vstreamer_test.go @@ -59,8 +59,6 @@ func TestSchemaVersioning(t *testing.T) { tsv.EnableHistorian(false) tsv.SetTracking(false) tsv.EnableHeartbeat(false) - tsv.EnableThrottler(false) - defer tsv.EnableThrottler(true) defer tsv.EnableHeartbeat(true) defer tsv.EnableHistorian(true) defer tsv.SetTracking(true) @@ -202,6 +200,13 @@ func TestSchemaVersioning(t *testing.T) { log.Infof("Received event %v", event) evs = append(evs, event) } + // Ignore unrelated events. + if len(evs) == 3 && + evs[0].Type == binlogdatapb.VEventType_BEGIN && + evs[1].Type == binlogdatapb.VEventType_GTID && + evs[2].Type == binlogdatapb.VEventType_COMMIT { + return nil + } select { case eventCh <- evs: case <-ctx.Done(): @@ -267,6 +272,13 @@ func TestSchemaVersioning(t *testing.T) { log.Infof("Received event %v", event) evs = append(evs, event) } + // Ignore unrelated events. + if len(evs) == 3 && + evs[0].Type == binlogdatapb.VEventType_BEGIN && + evs[1].Type == binlogdatapb.VEventType_GTID && + evs[2].Type == binlogdatapb.VEventType_COMMIT { + return nil + } select { case eventCh <- evs: case <-ctx.Done(): @@ -298,7 +310,7 @@ func TestSchemaVersioning(t *testing.T) { `version`, `gtid`, /*at this point we only have latest schema so we have types (int32, int32, varbinary, varbinary) so the types don't match. Hence the @ fieldnames*/ - `type:FIELD field_event:{table_name:"vitess_version" fields:{name:"@1" type:INT32} fields:{name:"@2" type:INT32} fields:{name:"@3" type:INT32}}`, + `type:FIELD field_event:{table_name:"vitess_version" fields:{name:"@1" type:INT32 charset:63} fields:{name:"@2" type:INT32 charset:63} fields:{name:"@3" type:INT32 charset:63}}`, `type:ROW row_event:{table_name:"vitess_version" row_changes:{after:{lengths:1 lengths:2 lengths:3 values:"220200"}}}`, `gtid`, `gtid`, @@ -372,6 +384,10 @@ func expectLogs(ctx context.Context, t *testing.T, query string, eventCh chan [] if ev.Type == binlogdatapb.VEventType_HEARTBEAT { continue } + if ev.Type == binlogdatapb.VEventType_ROW { + ev.RowEvent.Flags = 0 // null Flags, so we don't have to define flags in every wanted row event. + } + if ev.Throttled { continue } diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go index 21007482683..e8747b98fcc 100644 --- a/go/vt/vttablet/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -62,6 +62,26 @@ type FakeTabletManagerClient struct { tmc tmclient.TabletManagerClient } +func (client *FakeTabletManagerClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + return nil +} + +func (client *FakeTabletManagerClient) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + return nil, nil +} + func (client *FakeTabletManagerClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { return nil, nil } @@ -323,6 +343,12 @@ func (client *FakeTabletManagerClient) RestoreFromBackup(ctx context.Context, ta return &eofEventStream{}, nil } +// Throttler related methods + +func (client *FakeTabletManagerClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return &tabletmanagerdatapb.CheckThrottlerResponse{}, nil +} + // // Management related methods // diff --git a/go/vt/vttablet/flags.go b/go/vt/vttablet/flags.go new file mode 100644 index 00000000000..460a5427358 --- /dev/null +++ b/go/vt/vttablet/flags.go @@ -0,0 +1,39 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vttablet + +import ( + "github.com/spf13/pflag" + + "vitess.io/vitess/go/vt/servenv" +) + +const ( + VReplicationExperimentalFlagOptimizeInserts = int64(1) + VReplicationExperimentalFlagAllowNoBlobBinlogRowImage = int64(2) +) + +var VReplicationExperimentalFlags = VReplicationExperimentalFlagOptimizeInserts | VReplicationExperimentalFlagAllowNoBlobBinlogRowImage + +func init() { + servenv.OnParseFor("vttablet", registerFlags) +} + +func registerFlags(fs *pflag.FlagSet) { + fs.Int64Var(&VReplicationExperimentalFlags, "vreplication_experimental_flags", VReplicationExperimentalFlags, + "(Bitmask) of experimental features in vreplication to enable") +} diff --git a/go/vt/vttablet/grpcqueryservice/server.go b/go/vt/vttablet/grpcqueryservice/server.go index e23acba630f..64a7697162d 100644 --- a/go/vt/vttablet/grpcqueryservice/server.go +++ b/go/vt/vttablet/grpcqueryservice/server.go @@ -352,6 +352,16 @@ func (q *query) VStreamRows(request *binlogdatapb.VStreamRowsRequest, stream que return vterrors.ToGRPC(err) } +func (q *query) VStreamTables(request *binlogdatapb.VStreamTablesRequest, stream queryservicepb.Query_VStreamTablesServer) (err error) { + defer q.server.HandlePanic(&err) + ctx := callerid.NewContext(callinfo.GRPCCallInfo(stream.Context()), + request.EffectiveCallerId, + request.ImmediateCallerId, + ) + err = q.server.VStreamTables(ctx, request, stream.Send) + return vterrors.ToGRPC(err) +} + // VStreamResults is part of the queryservice.QueryServer interface func (q *query) VStreamResults(request *binlogdatapb.VStreamResultsRequest, stream queryservicepb.Query_VStreamResultsServer) (err error) { defer q.server.HandlePanic(&err) @@ -422,8 +432,8 @@ func (q *query) ReserveBeginExecute(ctx context.Context, request *querypb.Reserv ) state, result, err := q.server.ReserveBeginExecute(ctx, request.Target, request.PreQueries, request.PostBeginQueries, request.Query.Sql, request.Query.BindVariables, request.Options) if err != nil { - // if we have a valid reservedID, return the error in-band - if state.ReservedID != 0 { + // if we have a valid reservedID or transactionID, return the error in-band + if state.TransactionID != 0 || state.ReservedID != 0 { return &querypb.ReserveBeginExecuteResponse{ Error: vterrors.ToVTRPC(err), TransactionId: state.TransactionID, diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index 7ef533f580d..cb97abcbbae 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -735,6 +735,46 @@ func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, request *binlogdat } } +// VStreamTables streams rows of a query from the specified starting point. +func (conn *gRPCQueryClient) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + stream, err := func() (queryservicepb.Query_VStreamTablesClient, error) { + conn.mu.RLock() + defer conn.mu.RUnlock() + if conn.cc == nil { + return nil, tabletconn.ConnClosed + } + + req := &binlogdatapb.VStreamTablesRequest{ + Target: request.Target, + EffectiveCallerId: callerid.EffectiveCallerIDFromContext(ctx), + ImmediateCallerId: callerid.ImmediateCallerIDFromContext(ctx), + } + stream, err := conn.c.VStreamTables(ctx, req) + if err != nil { + return nil, tabletconn.ErrorFromGRPC(err) + } + return stream, nil + }() + if err != nil { + return err + } + r := binlogdatapb.VStreamTablesResponseFromVTPool() + defer r.ReturnToVTPool() + for { + err := stream.RecvMsg(r) + if err != nil { + return tabletconn.ErrorFromGRPC(err) + } + if ctx.Err() != nil { + return ctx.Err() + } + if err := send(r); err != nil { + return err + } + r.ResetVT() + } +} + // VStreamResults streams rows of a query from the specified starting point. func (conn *gRPCQueryClient) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { stream, err := func() (queryservicepb.Query_VStreamResultsClient, error) { diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index 41d7469678a..0068ed74706 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -69,7 +69,6 @@ var _binaries = []string{ // binaries that require the flags in this package "vtctl", "vtctld", "vtctldclient", - "vtgr", "vtorc", "vttablet", "vttestserver", @@ -363,6 +362,18 @@ func (client *Client) ReloadSchema(ctx context.Context, tablet *topodatapb.Table return err } +func (client *Client) ResetSequences(ctx context.Context, tablet *topodatapb.Tablet, tables []string) error { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return err + } + defer closer.Close() + _, err = c.ResetSequences(ctx, &tabletmanagerdatapb.ResetSequencesRequest{ + Tables: tables, + }) + return err +} + // PreflightSchema is part of the tmclient.TabletManagerClient interface. func (client *Client) PreflightSchema(ctx context.Context, tablet *topodatapb.Tablet, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { c, closer, err := client.dialer.dial(ctx, tablet) @@ -679,6 +690,49 @@ func (client *Client) GetReplicas(ctx context.Context, tablet *topodatapb.Tablet return response.Addrs, nil } +// +// VReplication related methods +// + +func (client *Client) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.CreateVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + +func (client *Client) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.DeleteVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + +func (client *Client) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.ReadVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + // VReplicationExec is part of the tmclient.TabletManagerClient interface. func (client *Client) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { c, closer, err := client.dialer.dial(ctx, tablet) @@ -706,6 +760,19 @@ func (client *Client) VReplicationWaitForPos(ctx context.Context, tablet *topoda return nil } +func (client *Client) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.UpdateVReplicationWorkflow(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + // VDiff is part of the tmclient.TabletManagerClient interface. func (client *Client) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { log.Infof("VDiff for tablet %s, request %+v", tablet.Alias.String(), req) @@ -934,6 +1001,20 @@ func (client *Client) Backup(ctx context.Context, tablet *topodatapb.Tablet, req }, nil } +// CheckThrottler is part of the tmclient.TabletManagerClient interface. +func (client *Client) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.CheckThrottler(ctx, req) + if err != nil { + return nil, err + } + return response, nil +} + type restoreFromBackupStreamAdapter struct { stream tabletmanagerservicepb.TabletManager_RestoreFromBackupClient closer io.Closer diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index ace6e5078e2..d0fe5a2cbe1 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -20,11 +20,9 @@ import ( "context" "time" - "vitess.io/vitess/go/vt/callerid" - querypb "vitess.io/vitess/go/vt/proto/query" - "google.golang.org/grpc" + "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/logutil" @@ -34,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletmanager" logutilpb "vitess.io/vitess/go/vt/proto/logutil" + querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" tabletmanagerservicepb "vitess.io/vitess/go/vt/proto/tabletmanagerservice" ) @@ -176,6 +175,13 @@ func (s *server) ApplySchema(ctx context.Context, request *tabletmanagerdatapb.A return response, err } +func (s *server) ResetSequences(ctx context.Context, request *tabletmanagerdatapb.ResetSequencesRequest) (response *tabletmanagerdatapb.ResetSequencesResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ResetSequences", request, response, true /*verbose*/, &err) + response = &tabletmanagerdatapb.ResetSequencesResponse{} + err = s.tm.ResetSequences(ctx, request.Tables) + return response, err +} + func (s *server) LockTables(ctx context.Context, req *tabletmanagerdatapb.LockTablesRequest) (*tabletmanagerdatapb.LockTablesResponse, error) { err := s.tm.LockTables(ctx) if err != nil { @@ -343,6 +349,31 @@ func (s *server) GetReplicas(ctx context.Context, request *tabletmanagerdatapb.G return response, err } +// +// VReplication related methods +// + +func (s *server) CreateVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (response *tabletmanagerdatapb.CreateVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "CreateVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{} + return s.tm.CreateVReplicationWorkflow(ctx, request) +} + +func (s *server) DeleteVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (response *tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "DeleteVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{} + return s.tm.DeleteVReplicationWorkflow(ctx, request) +} + +func (s *server) ReadVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (response *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ReadVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{} + return s.tm.ReadVReplicationWorkflow(ctx, request) +} + func (s *server) VReplicationExec(ctx context.Context, request *tabletmanagerdatapb.VReplicationExecRequest) (response *tabletmanagerdatapb.VReplicationExecResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "VReplicationExec", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -358,6 +389,13 @@ func (s *server) VReplicationWaitForPos(ctx context.Context, request *tabletmana return &tabletmanagerdatapb.VReplicationWaitForPosResponse{}, err } +func (s *server) UpdateVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (response *tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "UpdateVReplicationWorkflow", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{} + return s.tm.UpdateVReplicationWorkflow(ctx, request) +} + func (s *server) VDiff(ctx context.Context, request *tabletmanagerdatapb.VDiffRequest) (response *tabletmanagerdatapb.VDiffResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "VDiff", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -506,6 +544,13 @@ func (s *server) RestoreFromBackup(request *tabletmanagerdatapb.RestoreFromBacku return s.tm.RestoreFromBackup(ctx, logger, request) } +func (s *server) CheckThrottler(ctx context.Context, request *tabletmanagerdatapb.CheckThrottlerRequest) (response *tabletmanagerdatapb.CheckThrottlerResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "CheckThrottler", request, response, false /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response, err = s.tm.CheckThrottler(ctx, request) + return response, err +} + // registration glue func init() { diff --git a/go/vt/vttablet/onlineddl/analysis.go b/go/vt/vttablet/onlineddl/analysis.go index 040f79d861e..987f09124a1 100644 --- a/go/vt/vttablet/onlineddl/analysis.go +++ b/go/vt/vttablet/onlineddl/analysis.go @@ -217,6 +217,7 @@ func alterOptionAvailableViaInstantDDL(alterOption sqlparser.AlterOption, create strippedCol := sqlparser.CloneRefOfColumnDefinition(col) if stripDefault { strippedCol.Type.Options.Default = nil + strippedCol.Type.Options.DefaultLiteral = false } if stripEnum { strippedCol.Type.EnumValues = nil diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index cc0614e97eb..d95d4afc41f 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -35,12 +35,12 @@ import ( "time" "github.com/spf13/pflag" - - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" @@ -62,6 +62,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -74,8 +75,16 @@ var ( ErrMigrationNotFound = errors.New("migration not found") ) +var ( + // fixCompletedTimestampDone fixes a nil `completed_tiemstamp` columns, see + // https://github.com/vitessio/vitess/issues/13927 + // The fix is in release-18.0 + // TODO: remove in release-19.0 + fixCompletedTimestampDone bool +) + var emptyResult = &sqltypes.Result{} -var acceptableDropTableIfExistsErrorCodes = []mysql.ErrorCode{mysql.ERCantFindFile, mysql.ERNoSuchTable} +var acceptableDropTableIfExistsErrorCodes = []sqlerror.ErrorCode{sqlerror.ERCantFindFile, sqlerror.ERNoSuchTable} var copyAlgorithm = sqlparser.AlgorithmValue(sqlparser.CopyStr) var ( @@ -83,6 +92,7 @@ var ( ptOSCOverridePath string migrationCheckInterval = 1 * time.Minute retainOnlineDDLTables = 24 * time.Hour + defaultCutOverThreshold = 10 * time.Second maxConcurrentOnlineDDLs = 256 ) @@ -113,7 +123,7 @@ const ( emptyHint = "" readyToCompleteHint = "ready_to_complete" databasePoolSize = 3 - vreplicationCutOverThreshold = 10 * time.Second + qrBufferExtraTimeout = 5 * time.Second vreplicationTestSuiteWaitSeconds = 5 ) @@ -122,8 +132,6 @@ var ( migrationFailureFileName = "migration-failure.log" onlineDDLUser = "vt-online-ddl-internal" onlineDDLGrant = fmt.Sprintf("'%s'@'%s'", onlineDDLUser, "%") - throttlerOnlineDDLApp = "online-ddl" - throttleCheckFlags = &throttle.CheckFlags{} ) type ConstraintType int @@ -166,7 +174,7 @@ type Executor struct { tabletTypeFunc func() topodatapb.TabletType ts *topo.Server lagThrottler *throttle.Throttler - toggleBufferTableFunc func(cancelCtx context.Context, tableName string, bufferQueries bool) + toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool) tabletAlias *topodatapb.TabletAlias keyspace string @@ -187,11 +195,14 @@ type Executor struct { tickReentranceFlag int64 reviewedRunningMigrationsFlag bool - ticks *timer.Timer - isOpen int64 - schemaInitialized bool + ticks *timer.Timer + isOpen int64 - initVreplicationDDLOnce sync.Once + // This will be a pointer to the executeQuery function unless + // a custom sidecar database is used, then it will point to + // the executeQueryWithSidecarDBReplacement function. This + // variable assignment must be managed in the Open function. + execQuery func(ctx context.Context, query string) (result *sqltypes.Result, err error) } type cancellableMigration struct { @@ -225,11 +236,20 @@ func newGCTableRetainTime() time.Time { return time.Now().UTC().Add(retainOnlineDDLTables) } +// getMigrationCutOverThreshold returns the cut-over threshold for the given migration. The migration's +// DDL Strategy may excplicitly set the threshold; otherwise, we return the default cut-over threshold. +func getMigrationCutOverThreshold(onlineDDL *schema.OnlineDDL) time.Duration { + if threshold, _ := onlineDDL.StrategySetting().CutOverThreshold(); threshold != 0 { + return threshold + } + return defaultCutOverThreshold +} + // NewExecutor creates a new gh-ost executor. func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *topo.Server, lagThrottler *throttle.Throttler, tabletTypeFunc func() topodatapb.TabletType, - toggleBufferTableFunc func(cancelCtx context.Context, tableName string, bufferQueries bool), + toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool), ) *Executor { // sanitize flags if maxConcurrentOnlineDDLs < 1 { @@ -237,7 +257,7 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top } return &Executor{ env: env, - tabletAlias: proto.Clone(tabletAlias).(*topodatapb.TabletAlias), + tabletAlias: tabletAlias.CloneVT(), pool: connpool.NewPool(env, "OnlineDDLExecutorPool", tabletenv.ConnPoolConfig{ Size: databasePoolSize, @@ -248,10 +268,15 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top lagThrottler: lagThrottler, toggleBufferTableFunc: toggleBufferTableFunc, ticks: timer.NewTimer(migrationCheckInterval), + // Gracefully return an error if any caller tries to execute + // a query before the executor has been fully opened. + execQuery: func(ctx context.Context, query string) (result *sqltypes.Result, err error) { + return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "onlineddl executor is closed") + }, } } -func (e *Executor) execQuery(ctx context.Context, query string) (result *sqltypes.Result, err error) { +func (e *Executor) executeQuery(ctx context.Context, query string) (result *sqltypes.Result, err error) { defer e.env.LogError() conn, err := e.pool.Get(ctx, nil) @@ -259,15 +284,33 @@ func (e *Executor) execQuery(ctx context.Context, query string) (result *sqltype return result, err } defer conn.Recycle() + return conn.Exec(ctx, query, math.MaxInt32, true) } +func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, query string) (result *sqltypes.Result, err error) { + defer e.env.LogError() + + conn, err := e.pool.Get(ctx, nil) + if err != nil { + return result, err + } + defer conn.Recycle() + + // Replace any provided sidecar DB qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + return conn.Exec(ctx, uq, math.MaxInt32, true) +} + // TabletAliasString returns tablet alias as string (duh) func (e *Executor) TabletAliasString() string { return topoproto.TabletAliasString(e.tabletAlias) } -// InitDBConfig initializes keysapce +// InitDBConfig initializes keyspace func (e *Executor) InitDBConfig(keyspace, shard, dbName string) { e.keyspace = keyspace e.shard = shard @@ -290,6 +333,12 @@ func (e *Executor) Open() error { }) e.vreplicationLastError = make(map[string]*vterrors.LastError) + if sidecar.GetName() != sidecar.DefaultName { + e.execQuery = e.executeQueryWithSidecarDBReplacement + } else { + e.execQuery = e.executeQuery + } + e.pool.Open(e.env.Config().DB.AppWithDB(), e.env.Config().DB.DbaWithDB(), e.env.Config().DB.AppDebugWithDB()) e.ticks.Start(e.onMigrationCheckTick) e.triggerNextCheckInterval() @@ -395,7 +444,9 @@ func (e *Executor) proposedMigrationConflictsWithRunningMigration(runningMigrati // Specifically, if the running migration is an ALTER, and is still busy with copying rows (copy_state), then // we consider the two to be conflicting. But, if the running migration is done copying rows, and is now only // applying binary logs, and is up-to-date, then we consider a new ALTER migration to be non-conflicting. - return atomic.LoadInt64(&runningMigration.ReadyToComplete) == 0 + if atomic.LoadInt64(&runningMigration.WasReadyToComplete) == 0 { + return true + } } return false } @@ -575,7 +626,7 @@ func (e *Executor) parseAlterOptions(ctx context.Context, onlineDDL *schema.Onli } // executeDirectly runs a DDL query directly on the backend MySQL server -func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...mysql.ErrorCode) (acceptableErrorCodeFound bool, err error) { +func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.OnlineDDL, acceptableMySQLErrorCodes ...sqlerror.ErrorCode) (acceptableErrorCodeFound bool, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { return false, err @@ -593,7 +644,7 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online if err != nil { // let's see if this error is actually acceptable - if merr, ok := err.(*mysql.SQLError); ok { + if merr, ok := err.(*sqlerror.SQLError); ok { for _, acceptableCode := range acceptableMySQLErrorCodes { if merr.Num == acceptableCode { // we don't consider this to be an error. @@ -671,7 +722,7 @@ func (e *Executor) validateTableForAlterAction(ctx context.Context, onlineDDL *s } // primaryPosition returns the MySQL/MariaDB position (typically GTID pos) on the tablet -func (e *Executor) primaryPosition(ctx context.Context) (pos mysql.Position, err error) { +func (e *Executor) primaryPosition(ctx context.Context) (pos replication.Position, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) if err != nil { return pos, err @@ -740,11 +791,13 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er var sentryTableName string - waitForPos := func(s *VReplStream, pos mysql.Position) error { - ctx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL) + + waitForPos := func(s *VReplStream, pos replication.Position) error { + ctx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() // Wait for target to reach the up-to-date pos - if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, mysql.EncodePosition(pos)); err != nil { + if err := tmClient.VReplicationWaitForPos(ctx, tablet.Tablet, s.id, replication.EncodePosition(pos)); err != nil { return err } // Target is now in sync with source! @@ -798,7 +851,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er if err != nil { return err } - e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", mysql.EncodePosition(postSentryPos)) + e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-sentry pos: %v", replication.EncodePosition(postSentryPos)) if err := waitForPos(s, postSentryPos); err != nil { return err } @@ -812,19 +865,25 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er defer lockConn.Recycle() defer lockConn.Exec(ctx, sqlUnlockTables, 1, false) + renameCompleteChan := make(chan error) + renameWasSuccessful := false renameConn, err := e.pool.Get(ctx, nil) if err != nil { return err } defer renameConn.Recycle() - defer renameConn.Kill("premature exit while renaming tables", 0) + defer func() { + if !renameWasSuccessful { + renameConn.Kill("premature exit while renaming tables", 0) + } + }() renameQuery := sqlparser.BuildParsedQuery(sqlSwapTables, onlineDDL.Table, sentryTableName, vreplTable, onlineDDL.Table, sentryTableName, vreplTable) waitForRenameProcess := func() error { // This function waits until it finds the RENAME TABLE... query running in MySQL's PROCESSLIST, or until timeout // The function assumes that one of the renamed tables is locked, thus causing the RENAME to block. If nothing // is locked, then the RENAME will be near-instantaneious and it's unlikely that the function will find it. - renameWaitCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + renameWaitCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() for { @@ -838,20 +897,24 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er select { case <-renameWaitCtx.Done(): return vterrors.Errorf(vtrpcpb.Code_ABORTED, "timeout for rename query: %s", renameQuery.Query) + case err := <-renameCompleteChan: + // We expect the RENAME to run and block, not yet complete. The caller of this function + // will only unblock the RENAME after the function is complete + return vterrors.Errorf(vtrpcpb.Code_ABORTED, "rename returned unexpectedly: err=%v", err) case <-time.After(time.Second): // sleep } } } - renameCompleteChan := make(chan error) - bufferingCtx, bufferingContextCancel := context.WithCancel(ctx) defer bufferingContextCancel() // Preparation is complete. We proceed to cut-over. toggleBuffering := func(bufferQueries bool) error { log.Infof("toggling buffering: %t in migration %v", bufferQueries, onlineDDL.UUID) - e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, bufferQueries) + timeout := migrationCutOverThreshold + qrBufferExtraTimeout + + e.toggleBufferTableFunc(bufferingCtx, onlineDDL.Table, timeout, bufferQueries) if !bufferQueries { // called after new table is in place. // unbuffer existing queries: @@ -902,7 +965,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er // real production e.updateMigrationStage(ctx, onlineDDL.UUID, "locking tables") - lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() lockTableQuery := sqlparser.BuildParsedQuery(sqlLockTwoTablesWrite, sentryTableName, onlineDDL.Table) if _, err := lockConn.Exec(lockCtx, lockTableQuery.Query, 1, false); err != nil { @@ -911,6 +974,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er e.updateMigrationStage(ctx, onlineDDL.UUID, "renaming tables") go func() { + defer close(renameCompleteChan) _, err := renameConn.Exec(ctx, renameQuery.Query, 1, false) renameCompleteChan <- err }() @@ -940,12 +1004,12 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er return err } - e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", mysql.EncodePosition(postWritesPos)) + e.updateMigrationStage(ctx, onlineDDL.UUID, "waiting for post-lock pos: %v", replication.EncodePosition(postWritesPos)) if err := waitForPos(s, postWritesPos); err != nil { e.updateMigrationStage(ctx, onlineDDL.UUID, "timeout while waiting for post-lock pos: %v", err) return err } - go log.Infof("cutOverVReplMigration %v: done waiting for position %v", s.workflow, mysql.EncodePosition(postWritesPos)) + go log.Infof("cutOverVReplMigration %v: done waiting for position %v", s.workflow, replication.EncodePosition(postWritesPos)) // Stop vreplication e.updateMigrationStage(ctx, onlineDDL.UUID, "stopping vreplication") if _, err := e.vreplicationExec(ctx, tablet.Tablet, binlogplayer.StopVReplication(s.id, "stopped for online DDL cutover")); err != nil { @@ -974,14 +1038,14 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er { dropTableQuery := sqlparser.BuildParsedQuery(sqlDropTable, sentryTableName) - lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() if _, err := lockConn.Exec(lockCtx, dropTableQuery.Query, 1, false); err != nil { return err } } { - lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() e.updateMigrationStage(ctx, onlineDDL.UUID, "unlocking tables") if _, err := lockConn.Exec(lockCtx, sqlUnlockTables, 1, false); err != nil { @@ -989,12 +1053,13 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er } } { - lockCtx, cancel := context.WithTimeout(ctx, vreplicationCutOverThreshold) + lockCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() e.updateMigrationStage(lockCtx, onlineDDL.UUID, "waiting for RENAME to complete") if err := <-renameCompleteChan; err != nil { return err } + renameWasSuccessful = true } } } @@ -1257,7 +1322,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online } } } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, onlineDDL.SQL) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag()) return v, nil } @@ -1311,7 +1376,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "") + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", false) v.pos = revertStream.pos return v, nil } @@ -1321,10 +1386,6 @@ func (e *Executor) ExecuteWithVReplication(ctx context.Context, onlineDDL *schem // make sure there's no vreplication workflow running under same name _ = e.terminateVReplMigration(ctx, onlineDDL.UUID) - if conflictFound, conflictingMigration := e.isAnyConflictingMigrationRunning(onlineDDL); conflictFound { - return vterrors.Wrapf(ErrExecutorMigrationAlreadyRunning, "conflicting migration: %v over table: %v", conflictingMigration.UUID, conflictingMigration.Table) - } - if e.tabletTypeFunc() != topodatapb.TabletType_PRIMARY { return ErrExecutorNotWritableTablet } @@ -1428,10 +1489,6 @@ func (e *Executor) ExecuteWithVReplication(ctx context.Context, onlineDDL *schem // Validation included testing the backend MySQL server and the gh-ost binary itself // Execution runs first a dry run, then an actual migration func (e *Executor) ExecuteWithGhost(ctx context.Context, onlineDDL *schema.OnlineDDL) error { - if conflictFound, conflictingMigration := e.isAnyConflictingMigrationRunning(onlineDDL); conflictFound { - return vterrors.Wrapf(ErrExecutorMigrationAlreadyRunning, "conflicting migration: %v over table: %v", conflictingMigration.UUID, conflictingMigration.Table) - } - if e.tabletTypeFunc() != topodatapb.TabletType_PRIMARY { return ErrExecutorNotWritableTablet } @@ -1575,7 +1632,7 @@ exit $exit_code fmt.Sprintf("--serve-socket-file=%s", serveSocketFile), fmt.Sprintf("--hooks-path=%s", tempDir), fmt.Sprintf(`--hooks-hint-token=%s`, onlineDDL.UUID), - fmt.Sprintf(`--throttle-http=http://localhost:%d/throttler/check?app=%s:gh-ost:%s&p=low`, servenv.Port(), throttlerOnlineDDLApp, onlineDDL.UUID), + fmt.Sprintf(`--throttle-http=http://localhost:%d/throttler/check?app=%s:%s:%s&p=low`, servenv.Port(), throttlerapp.OnlineDDLName, throttlerapp.GhostName, onlineDDL.UUID), fmt.Sprintf(`--database=%s`, e.dbName), fmt.Sprintf(`--table=%s`, onlineDDL.Table), fmt.Sprintf(`--alter=%s`, alterOptions), @@ -1646,10 +1703,6 @@ exit $exit_code // Validation included testing the backend MySQL server and the pt-online-schema-change binary itself // Execution runs first a dry run, then an actual migration func (e *Executor) ExecuteWithPTOSC(ctx context.Context, onlineDDL *schema.OnlineDDL) error { - if conflictFound, conflictingMigration := e.isAnyConflictingMigrationRunning(onlineDDL); conflictFound { - return vterrors.Wrapf(ErrExecutorMigrationAlreadyRunning, "conflicting migration: %v over table: %v", conflictingMigration.UUID, conflictingMigration.Table) - } - if e.tabletTypeFunc() != topodatapb.TabletType_PRIMARY { return ErrExecutorNotWritableTablet } @@ -1726,7 +1779,7 @@ export MYSQL_PWD my ($self, %args) = @_; return sub { - if (head("http://localhost:{{VTTABLET_PORT}}/throttler/check?app={{THROTTLER_ONLINE_DDL_APP}}:pt-osc:{{MIGRATION_UUID}}&p=low")) { + if (head("http://localhost:{{VTTABLET_PORT}}/throttler/check?app={{THROTTLER_ONLINE_DDL_APP}}:{{THROTTLER_PT_OSC_APP}}:{{MIGRATION_UUID}}&p=low")) { # Got HTTP 200 OK, means throttler is happy return 0; } else { @@ -1740,7 +1793,8 @@ export MYSQL_PWD ` pluginCode = strings.ReplaceAll(pluginCode, "{{VTTABLET_PORT}}", fmt.Sprintf("%d", servenv.Port())) pluginCode = strings.ReplaceAll(pluginCode, "{{MIGRATION_UUID}}", onlineDDL.UUID) - pluginCode = strings.ReplaceAll(pluginCode, "{{THROTTLER_ONLINE_DDL_APP}}", throttlerOnlineDDLApp) + pluginCode = strings.ReplaceAll(pluginCode, "{{THROTTLER_ONLINE_DDL_APP}}", throttlerapp.OnlineDDLName.String()) + pluginCode = strings.ReplaceAll(pluginCode, "{{THROTTLER_PT_OSC_APP}}", throttlerapp.PTOSCName.String()) pluginCode = strings.ReplaceAll(pluginCode, "{{OnlineDDLStatusRunning}}", string(schema.OnlineDDLStatusRunning)) pluginCode = strings.ReplaceAll(pluginCode, "{{OnlineDDLStatusComplete}}", string(schema.OnlineDDLStatusComplete)) @@ -1867,15 +1921,13 @@ export MYSQL_PWD func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *schema.OnlineDDL, row sqltypes.RowNamedValues, err error) { - parsed := sqlparser.BuildParsedQuery(sqlSelectMigration, ":migration_uuid") - bindVars := map[string]*querypb.BindVariable{ - "migration_uuid": sqltypes.StringBindVariable(uuid), - } - bound, err := parsed.GenerateQuery(bindVars, nil) + query, err := sqlparser.ParseAndBind(sqlSelectMigration, + sqltypes.StringBindVariable(uuid), + ) if err != nil { return onlineDDL, nil, err } - r, err := e.execQuery(ctx, bound) + r, err := e.execQuery(ctx, query) if err != nil { return onlineDDL, nil, err } @@ -1885,18 +1937,19 @@ func (e *Executor) readMigration(ctx context.Context, uuid string) (onlineDDL *s return nil, nil, ErrMigrationNotFound } onlineDDL = &schema.OnlineDDL{ - Keyspace: row["keyspace"].ToString(), - Table: row["mysql_table"].ToString(), - Schema: row["mysql_schema"].ToString(), - SQL: row["migration_statement"].ToString(), - UUID: row["migration_uuid"].ToString(), - Strategy: schema.DDLStrategy(row["strategy"].ToString()), - Options: row["options"].ToString(), - Status: schema.OnlineDDLStatus(row["migration_status"].ToString()), - Retries: row.AsInt64("retries", 0), - ReadyToComplete: row.AsInt64("ready_to_complete", 0), - TabletAlias: row["tablet"].ToString(), - MigrationContext: row["migration_context"].ToString(), + Keyspace: row["keyspace"].ToString(), + Table: row["mysql_table"].ToString(), + Schema: row["mysql_schema"].ToString(), + SQL: row["migration_statement"].ToString(), + UUID: row["migration_uuid"].ToString(), + Strategy: schema.DDLStrategy(row["strategy"].ToString()), + Options: row["options"].ToString(), + Status: schema.OnlineDDLStatus(row["migration_status"].ToString()), + Retries: row.AsInt64("retries", 0), + ReadyToComplete: row.AsInt64("ready_to_complete", 0), + WasReadyToComplete: row.AsInt64("was_ready_to_complete", 0), + TabletAlias: row["tablet"].ToString(), + MigrationContext: row["migration_context"].ToString(), } return onlineDDL, row, nil } @@ -1967,7 +2020,7 @@ func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.Onl // CancelMigration attempts to abort a scheduled or a running migration func (e *Executor) CancelMigration(ctx context.Context, uuid string, message string, issuedByUser bool) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } log.Infof("CancelMigration: request to cancel %s with message: %v", uuid, message) @@ -2038,7 +2091,7 @@ func (e *Executor) cancelMigrations(ctx context.Context, cancellable []*cancella // for this keyspace func (e *Executor) CancelPendingMigrations(ctx context.Context, message string, issuedByUser bool) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } uuids, err := e.readPendingMigrationsUUIDs(ctx) @@ -2068,7 +2121,7 @@ func (e *Executor) validateThrottleParams(ctx context.Context, expireString stri return duration, ratio, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid EXPIRE value: %s. Try '120s', '30m', '1h', etc. Allowed units are (s)ec, (m)in, (h)hour", expireString) } } - ratio = 1.0 + ratio = throttle.DefaultThrottleRatio if ratioLiteral != nil { ratio, err = strconv.ParseFloat(ratioLiteral.Val, 64) if err != nil || ratio < 0 || ratio > 1 { @@ -2084,10 +2137,10 @@ func (e *Executor) ThrottleMigration(ctx context.Context, uuid string, expireStr if err != nil { return nil, err } - if err := e.lagThrottler.CheckIsReady(); err != nil { + if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } - _ = e.lagThrottler.ThrottleApp(uuid, time.Now().Add(duration), ratio) + _ = e.lagThrottler.ThrottleApp(uuid, time.Now().Add(duration), ratio, false) return emptyResult, nil } @@ -2097,16 +2150,16 @@ func (e *Executor) ThrottleAllMigrations(ctx context.Context, expireString strin if err != nil { return nil, err } - if err := e.lagThrottler.CheckIsReady(); err != nil { + if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } - _ = e.lagThrottler.ThrottleApp(throttlerOnlineDDLApp, time.Now().Add(duration), ratio) + _ = e.lagThrottler.ThrottleApp(throttlerapp.OnlineDDLName.String(), time.Now().Add(duration), ratio, false) return emptyResult, nil } // UnthrottleMigration func (e *Executor) UnthrottleMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { - if err := e.lagThrottler.CheckIsReady(); err != nil { + if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } defer e.triggerNextCheckInterval() @@ -2116,11 +2169,11 @@ func (e *Executor) UnthrottleMigration(ctx context.Context, uuid string) (result // UnthrottleAllMigrations func (e *Executor) UnthrottleAllMigrations(ctx context.Context) (result *sqltypes.Result, err error) { - if err := e.lagThrottler.CheckIsReady(); err != nil { + if err := e.lagThrottler.CheckIsOpen(); err != nil { return nil, err } defer e.triggerNextCheckInterval() - _ = e.lagThrottler.UnthrottleApp(throttlerOnlineDDLApp) + _ = e.lagThrottler.UnthrottleApp(throttlerapp.OnlineDDLName.String()) return emptyResult, nil } @@ -2657,7 +2710,7 @@ func (e *Executor) executeDropDDLActionMigration(ctx context.Context, onlineDDL return err } - acceptableErrorCodes := []mysql.ErrorCode{} + acceptableErrorCodes := []sqlerror.ErrorCode{} if ddlStmt.GetIfExists() { acceptableErrorCodes = acceptableDropTableIfExistsErrorCodes } @@ -2753,32 +2806,6 @@ func (e *Executor) generateSwapTablesStatement(ctx context.Context, tableName1, return parsed.Query, swapTableName, nil } -// renameTableIfApplicable renames a table, assuming it exists and that the target does not exist. -func (e *Executor) renameTableIfApplicable(ctx context.Context, fromTableName, toTableName string) (attemptMade bool, err error) { - if fromTableName == "" { - return false, nil - } - exists, err := e.tableExists(ctx, fromTableName) - if err != nil { - return false, err - } - if !exists { - // can't rename from table when it does not exist - return false, nil - } - exists, err = e.tableExists(ctx, toTableName) - if err != nil { - return false, err - } - if exists { - // target table exists, abort. - return false, nil - } - parsed := sqlparser.BuildParsedQuery(sqlRenameTable, fromTableName, toTableName) - _, err = e.execQuery(ctx, parsed.Query) - return true, err -} - func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { artifactViewName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) if err != nil { @@ -2803,7 +2830,7 @@ func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema Select: viewStmt.Select, CheckOption: viewStmt.CheckOption, IsReplace: true, - Comments: viewStmt.Comments, + Comments: sqlparser.CloneRefOfParsedComments(viewStmt.Comments), } stmt.SetTable("", artifactViewName) default: @@ -2981,41 +3008,21 @@ func (e *Executor) executeAlterDDLActionMigration(ctx context.Context, onlineDDL // OK, nothing special about this ALTER. Let's go ahead and execute it. switch onlineDDL.Strategy { case schema.DDLStrategyOnline, schema.DDLStrategyVitess: - go func() { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if err := e.ExecuteWithVReplication(ctx, onlineDDL, nil); err != nil { - failMigration(err) - } - }() + if err := e.ExecuteWithVReplication(ctx, onlineDDL, nil); err != nil { + return failMigration(err) + } case schema.DDLStrategyGhost: - go func() { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if err := e.ExecuteWithGhost(ctx, onlineDDL); err != nil { - failMigration(err) - } - }() + if err := e.ExecuteWithGhost(ctx, onlineDDL); err != nil { + return failMigration(err) + } case schema.DDLStrategyPTOSC: - go func() { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if err := e.ExecuteWithPTOSC(ctx, onlineDDL); err != nil { - failMigration(err) - } - }() + if err := e.ExecuteWithPTOSC(ctx, onlineDDL); err != nil { + return failMigration(err) + } case schema.DDLStrategyMySQL: - go func() { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { - failMigration(err) - } - }() + if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { + return failMigration(err) + } default: { return failMigration(fmt.Errorf("Unsupported strategy: %+v", onlineDDL.Strategy)) @@ -3160,14 +3167,9 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin case sqlparser.AlterDDLAction: return e.executeAlterDDLActionMigration(ctx, onlineDDL) case sqlparser.RevertDDLAction: - go func() { - e.migrationMutex.Lock() - defer e.migrationMutex.Unlock() - - if err := e.executeRevert(ctx, onlineDDL); err != nil { - failMigration(err) - } - }() + if err := e.executeRevert(ctx, onlineDDL); err != nil { + failMigration(err) + } } return nil } @@ -3349,7 +3351,7 @@ func (e *Executor) readVReplStream(ctx context.Context, uuid string, okIfMissing timeThrottled: row.AsInt64("time_throttled", 0), componentThrottled: row.AsString("component_throttled", ""), transactionTimestamp: row.AsInt64("transaction_timestamp", 0), - state: row.AsString("state", ""), + state: binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row.AsString("state", "")]), message: row.AsString("message", ""), rowsCopied: row.AsInt64("rows_copied", 0), bls: &binlogdatapb.BinlogSource{}, @@ -3362,7 +3364,7 @@ func (e *Executor) readVReplStream(ctx context.Context, uuid string, okIfMissing // isVReplMigrationReadyToCutOver sees if the vreplication migration has completed the row copy // and is up to date with the binlogs. -func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, s *VReplStream) (isReady bool, err error) { +func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, onlineDDL *schema.OnlineDDL, s *VReplStream) (isReady bool, err error) { // Check all the cases where migration is still running: { // when ready to cut-over, pos must have some value @@ -3377,15 +3379,17 @@ func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, s *VReplS durationDiff := func(t1, t2 time.Time) time.Duration { return t1.Sub(t2).Abs() } + migrationCutOverThreshold := getMigrationCutOverThreshold(onlineDDL) + timeNow := time.Now() timeUpdated := time.Unix(s.timeUpdated, 0) - if durationDiff(timeNow, timeUpdated) > vreplicationCutOverThreshold { + if durationDiff(timeNow, timeUpdated) > migrationCutOverThreshold { return false, nil } // Let's look at transaction timestamp. This gets written by any ongoing // writes on the server (whether on this table or any other table) transactionTimestamp := time.Unix(s.transactionTimestamp, 0) - if durationDiff(timeNow, transactionTimestamp) > vreplicationCutOverThreshold { + if durationDiff(timeNow, transactionTimestamp) > migrationCutOverThreshold { return false, nil } } @@ -3416,27 +3420,6 @@ func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, s *VReplS return true, nil } -// isVReplMigrationRunning sees if there is a VReplication migration actively running -func (e *Executor) isVReplMigrationRunning(ctx context.Context, uuid string) (isRunning bool, s *VReplStream, err error) { - s, err = e.readVReplStream(ctx, uuid, true) - if err != nil { - return false, s, err - } - if s == nil { - return false, s, nil - } - switch s.state { - case binlogplayer.BlpError: - return false, s, nil - case binlogplayer.VReplicationInit, binlogplayer.VReplicationCopying, binlogplayer.BlpRunning: - return true, s, nil - } - if strings.Contains(strings.ToLower(s.message), "error") { - return false, s, nil - } - return false, s, nil -} - // reviewRunningMigrations iterates migrations in 'running' state. Normally there's only one running, which was // spawned by this tablet; but vreplication migrations could also resume from failure. func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning int, cancellable []*cancellableMigration, err error) { @@ -3448,17 +3431,15 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i } var currentUserThrottleRatio float64 - if err := e.lagThrottler.CheckIsReady(); err == nil { - // No point in reviewing throttler info if it's not enabled&open - for _, app := range e.lagThrottler.ThrottledApps() { - if app.AppName == throttlerOnlineDDLApp { - currentUserThrottleRatio = app.Ratio - break - } + + // No point in reviewing throttler info if it's not enabled&open + for _, app := range e.lagThrottler.ThrottledApps() { + if throttlerapp.OnlineDDLName.Equals(app.AppName) { + currentUserThrottleRatio = app.Ratio + break } } - var throttlerOnce sync.Once r, err := e.execQuery(ctx, sqlSelectRunningMigrations) if err != nil { return countRunnning, cancellable, err @@ -3527,9 +3508,9 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i _ = e.updateRowsCopied(ctx, uuid, s.rowsCopied) _ = e.updateMigrationProgressByRowsCopied(ctx, uuid, s.rowsCopied) _ = e.updateMigrationETASecondsByProgress(ctx, uuid) - _ = e.updateMigrationLastThrottled(ctx, uuid, s.timeThrottled, s.componentThrottled) + _ = e.updateMigrationLastThrottled(ctx, uuid, time.Unix(s.timeThrottled, 0), s.componentThrottled) - isReady, err := e.isVReplMigrationReadyToCutOver(ctx, s) + isReady, err := e.isVReplMigrationReadyToCutOver(ctx, onlineDDL, s) if err != nil { _ = e.updateMigrationMessage(ctx, uuid, err.Error()) return countRunnning, cancellable, err @@ -3559,34 +3540,15 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i if err := e.cutOverVReplMigration(ctx, s); err != nil { _ = e.updateMigrationMessage(ctx, uuid, err.Error()) log.Errorf("cutOverVReplMigration failed: err=%v", err) - if merr, ok := err.(*mysql.SQLError); ok { + if merr, ok := err.(*sqlerror.SQLError); ok { switch merr.Num { - case mysql.ERTooLongIdent: + case sqlerror.ERTooLongIdent: go e.CancelMigration(ctx, uuid, err.Error(), false) } } return countRunnning, cancellable, err } } - go throttlerOnce.Do(func() { - if e.lagThrottler.CheckIsReady() != nil { - return - } - // Self healing: in the following scenario: - // - a vitess migration - // - with on demand heartbeats - // - the streamer running on a replica - // - the streamer was throttled for long enough - // - then vplayer and vcopier are locked, waiting for the streamer to do something - // - since they are blocked, they're not running throttler checks - // - since streamer runs on replica, it only checks that replica - // - therefore no one asking for on-demand heartbeats - // - then, if the conditions for the streamer's throttling are done, the streamer then thinks there's replication lag, with nothing to remediate it. - // - it's a deadlock. - // And so, once per reviewRunningMigrations(), and assuming there _are_ running migrations, we ensure to hit a throttler check. This will kick - // on-demand heartbeats, unlocking the deadlock. - e.lagThrottler.CheckByType(ctx, throttlerOnlineDDLApp, "", throttleCheckFlags, throttle.ThrottleCheckPrimaryWrite) - }) } } case schema.DDLStrategyPTOSC: @@ -3616,7 +3578,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i // is rogue. Maybe executed by another tablet. Anyway, if we don't own it, we can't // complete the migration. Even if it runs, the logic around announcing it as complete // is missing. So we may as well cancel it. - message := fmt.Sprintf("cancelling a gh-ost running migration %s which is not owned (not started, or is assumed to be terminated) by this executor", uuid) + message := fmt.Sprintf("cancelling a gh-ost running migration %s which is not owned by this executor. This can happen when the migration was started by a different tablet. Then, either a MySQL failure, a PRS, or ERS took place. gh-ost does not survive a MySQL restart or a shard failing over to a new PRIMARY", uuid) cancellable = append(cancellable, newCancellableMigration(uuid, message)) } } @@ -3786,6 +3748,17 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { e.migrationMutex.Lock() defer e.migrationMutex.Unlock() + // v18 fix. Remove in v19 + if !fixCompletedTimestampDone { + if _, err := e.execQuery(ctx, sqlFixCompletedTimestamp); err != nil { + // This query fixes a bug where stale migrations were marked as 'cancelled' or 'failed' without updating 'completed_timestamp' + // Running this query retroactively sets completed_timestamp + // This fix is created in v18 and can be removed in v19 + return err + } + fixCompletedTimestampDone = true + } + query, err := sqlparser.ParseAndBind(sqlSelectUncollectedArtifacts, sqltypes.Int64BindVariable(int64((retainOnlineDDLTables).Seconds())), ) @@ -3815,7 +3788,10 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { log.Infof("Executor.gcArtifacts: will GC artifact %s for migration %s", artifactTable, uuid) timestampInThePast := timeNow.Add(time.Duration(-i) * time.Second).UTC() toTableName, err := e.gcArtifactTable(ctx, artifactTable, uuid, timestampInThePast) - if err != nil { + if err == nil { + // artifact was renamed away and is gone. There' no need to list it in `artifacts` column. + e.clearSingleArtifact(ctx, uuid, artifactTable) + } else { return vterrors.Wrapf(err, "in gcArtifacts() for %s", artifactTable) } log.Infof("Executor.gcArtifacts: renamed away artifact %s to %s", artifactTable, toTableName) @@ -4180,6 +4156,7 @@ func (e *Executor) updateMigrationProgress(ctx context.Context, uuid string, pro func (e *Executor) updateMigrationProgressByRowsCopied(ctx context.Context, uuid string, rowsCopied int64) error { query, err := sqlparser.ParseAndBind(sqlUpdateMigrationProgressByRowsCopied, + sqltypes.Int64BindVariable(rowsCopied), sqltypes.Int64BindVariable(rowsCopied), sqltypes.StringBindVariable(uuid), ) @@ -4201,9 +4178,9 @@ func (e *Executor) updateMigrationETASecondsByProgress(ctx context.Context, uuid return err } -func (e *Executor) updateMigrationLastThrottled(ctx context.Context, uuid string, lastThrottledUnixTime int64, throttledCompnent string) error { +func (e *Executor) updateMigrationLastThrottled(ctx context.Context, uuid string, lastThrottledTime time.Time, throttledCompnent string) error { query, err := sqlparser.ParseAndBind(sqlUpdateLastThrottled, - sqltypes.Int64BindVariable(lastThrottledUnixTime), + sqltypes.StringBindVariable(lastThrottledTime.Format(sqltypes.TimestampFormat)), sqltypes.StringBindVariable(throttledCompnent), sqltypes.StringBindVariable(uuid), ) @@ -4279,8 +4256,13 @@ func (e *Executor) updateMigrationSetImmediateOperation(ctx context.Context, uui } func (e *Executor) updateMigrationReadyToComplete(ctx context.Context, uuid string, isReady bool) error { - query, err := sqlparser.ParseAndBind(sqlUpdateMigrationReadyToComplete, - sqltypes.BoolBindVariable(isReady), + var queryTemplate string + if isReady { + queryTemplate = sqlSetMigrationReadyToComplete + } else { + queryTemplate = sqlClearMigrationReadyToComplete + } + query, err := sqlparser.ParseAndBind(queryTemplate, sqltypes.StringBindVariable(uuid), ) if err != nil { @@ -4294,6 +4276,7 @@ func (e *Executor) updateMigrationReadyToComplete(ctx context.Context, uuid stri var storeValue int64 if isReady { storeValue = 1 + atomic.StoreInt64(&runningMigration.WasReadyToComplete, 1) // WasReadyToComplete is set once and never cleared } atomic.StoreInt64(&runningMigration.ReadyToComplete, storeValue) } @@ -4332,7 +4315,7 @@ func (e *Executor) retryMigrationWhere(ctx context.Context, whereExpr string) (r // RetryMigration marks given migration for retry func (e *Executor) RetryMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in RETRY: %s", uuid) @@ -4356,7 +4339,7 @@ func (e *Executor) RetryMigration(ctx context.Context, uuid string) (result *sql // next iteration of gcArtifacts() picks up the migration's artifacts and schedules them for deletion func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in CLEANUP: %s", uuid) @@ -4382,7 +4365,7 @@ func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *s // CompleteMigration clears the postpone_completion flag for a given migration, assuming it was set in the first place func (e *Executor) CompleteMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in COMPLETE: %s", uuid) @@ -4416,7 +4399,7 @@ func (e *Executor) CompleteMigration(ctx context.Context, uuid string) (result * // for this keyspace func (e *Executor) CompletePendingMigrations(ctx context.Context) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } uuids, err := e.readPendingMigrationsUUIDs(ctx) @@ -4441,7 +4424,7 @@ func (e *Executor) CompletePendingMigrations(ctx context.Context) (result *sqlty // LaunchMigration clears the postpone_launch flag for a given migration, assuming it was set in the first place func (e *Executor) LaunchMigration(ctx context.Context, uuid string, shardsArg string) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } if !schema.IsOnlineDDLUUID(uuid) { return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in EXECUTE: %s", uuid) @@ -4473,7 +4456,7 @@ func (e *Executor) LaunchMigration(ctx context.Context, uuid string, shardsArg s // LaunchMigrations launches all launch-postponed queued migrations for this keyspace func (e *Executor) LaunchMigrations(ctx context.Context) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } uuids, err := e.readPendingMigrationsUUIDs(ctx) @@ -4595,7 +4578,7 @@ func (e *Executor) SubmitMigration( stmt sqlparser.Statement, ) (*sqltypes.Result, error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } log.Infof("SubmitMigration: request to submit migration with statement: %0.50s...", sqlparser.CanonicalString(stmt)) @@ -4651,6 +4634,11 @@ func (e *Executor) SubmitMigration( revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) + if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 { + // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override! + retainArtifactsSeconds = int64((retainArtifacts).Seconds()) + } + _, allowConcurrentMigration := e.allowConcurrentMigration(onlineDDL) submitQuery, err := sqlparser.ParseAndBind(sqlInsertMigration, sqltypes.StringBindVariable(onlineDDL.UUID), @@ -4690,10 +4678,35 @@ func (e *Executor) SubmitMigration( return result, nil } +// ShowMigrations shows migrations, optionally filtered by a condition +func (e *Executor) ShowMigrations(ctx context.Context, show *sqlparser.Show) (result *sqltypes.Result, err error) { + if atomic.LoadInt64(&e.isOpen) == 0 { + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) + } + showBasic, ok := show.Internal.(*sqlparser.ShowBasic) + if !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] ShowMigrations expects a ShowBasic statement. Got: %s", sqlparser.String(show)) + } + if showBasic.Command != sqlparser.VitessMigrations { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] ShowMigrations expects a VitessMigrations command, got %+v. Statement: %s", showBasic.Command, sqlparser.String(show)) + } + whereExpr := "" + if showBasic.Filter != nil { + if showBasic.Filter.Filter != nil { + whereExpr = fmt.Sprintf(" where %s", sqlparser.String(showBasic.Filter.Filter)) + } else if showBasic.Filter.Like != "" { + lit := sqlparser.String(sqlparser.NewStrLiteral(showBasic.Filter.Like)) + whereExpr = fmt.Sprintf(" where migration_uuid LIKE %s OR migration_context LIKE %s OR migration_status LIKE %s", lit, lit, lit) + } + } + query := sqlparser.BuildParsedQuery(sqlShowMigrationsWhere, whereExpr).Query + return e.execQuery(ctx, query) +} + // ShowMigrationLogs reads the migration log for a given migration func (e *Executor) ShowMigrationLogs(ctx context.Context, stmt *sqlparser.ShowMigrationLogs) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { - return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "online ddl is disabled") + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) } _, row, err := e.readMigration(ctx, stmt.UUID) if err != nil { diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go index 51f5ca48e7e..4698c75a9d5 100644 --- a/go/vt/vttablet/onlineddl/schema.go +++ b/go/vt/vttablet/onlineddl/schema.go @@ -66,7 +66,8 @@ const ( migration_uuid=%a ` sqlUpdateMigrationStatusFailedOrCancelled = `UPDATE _vt.schema_migrations - SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled') + SET migration_status=IF(cancelled_timestamp IS NULL, 'failed', 'cancelled'), + completed_timestamp=NOW(6) WHERE migration_uuid=%a ` @@ -95,8 +96,14 @@ const ( WHERE migration_uuid=%a ` - sqlUpdateMigrationReadyToComplete = `UPDATE _vt.schema_migrations - SET ready_to_complete=%a + sqlSetMigrationReadyToComplete = `UPDATE _vt.schema_migrations SET + ready_to_complete=1, + ready_to_complete_timestamp=NOW(6) + WHERE + migration_uuid=%a + ` + sqlClearMigrationReadyToComplete = `UPDATE _vt.schema_migrations SET + ready_to_complete=0 WHERE migration_uuid=%a ` @@ -132,7 +139,7 @@ const ( migration_uuid=%a ` sqlClearSingleArtifact = `UPDATE _vt.schema_migrations - SET artifacts=replace(artifacts, concat(%a, ','), ''), cleanup_timestamp=NULL + SET artifacts=replace(artifacts, concat(%a, ','), '') WHERE migration_uuid=%a ` @@ -207,6 +214,7 @@ const ( ` sqlUpdateMigrationProgressByRowsCopied = `UPDATE _vt.schema_migrations SET + table_rows=GREATEST(table_rows, %a), progress=CASE WHEN table_rows=0 THEN 100 ELSE LEAST(100, 100*%a/table_rows) @@ -227,7 +235,7 @@ const ( migration_uuid=%a ` sqlUpdateLastThrottled = `UPDATE _vt.schema_migrations - SET last_throttled_timestamp=FROM_UNIXTIME(%a), component_throttled=%a + SET last_throttled_timestamp=%a, component_throttled=%a WHERE migration_uuid=%a ` @@ -338,7 +346,7 @@ const ( log_path FROM _vt.schema_migrations WHERE - migration_status IN ('complete', 'failed') + migration_status IN ('complete', 'cancelled', 'failed') AND cleanup_timestamp IS NULL AND completed_timestamp <= IF(retain_artifacts_seconds=0, NOW() - INTERVAL %a SECOND, @@ -349,10 +357,14 @@ const ( SET completed_timestamp=NOW(6) WHERE - migration_status='failed' + migration_status IN ('cancelled', 'failed') AND cleanup_timestamp IS NULL AND completed_timestamp IS NULL ` + sqlShowMigrationsWhere = `SELECT * + FROM _vt.schema_migrations + %s + ` sqlSelectMigration = `SELECT id, migration_uuid, @@ -381,6 +393,7 @@ const ( retain_artifacts_seconds, is_view, ready_to_complete, + ready_to_complete_timestamp is not null as was_ready_to_complete, reverted_uuid, rows_copied, vitess_liveness_indicator, @@ -451,6 +464,7 @@ const ( COLUMNS.CHARACTER_SET_NAME as character_set_name, LOCATE('auto_increment', EXTRA) > 0 as is_auto_increment, (DATA_TYPE='float' OR DATA_TYPE='double') AS is_float, + has_subpart, has_nullable FROM INFORMATION_SCHEMA.COLUMNS INNER JOIN ( SELECT @@ -460,6 +474,7 @@ const ( COUNT(*) AS COUNT_COLUMN_IN_INDEX, GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX ASC) AS COLUMN_NAMES, SUBSTRING_INDEX(GROUP_CONCAT(COLUMN_NAME ORDER BY SEQ_IN_INDEX ASC), ',', 1) AS FIRST_COLUMN_NAME, + SUM(SUB_PART IS NOT NULL) > 0 AS has_subpart, SUM(NULLABLE='YES') > 0 AS has_nullable FROM INFORMATION_SCHEMA.STATISTICS WHERE @@ -484,6 +499,10 @@ const ( WHEN 0 THEN 0 ELSE 1 END, + CASE has_subpart + WHEN 0 THEN 0 + ELSE 1 + END, CASE IFNULL(CHARACTER_SET_NAME, '') WHEN '' THEN 0 ELSE 1 @@ -503,6 +522,7 @@ const ( sqlDropTableIfExists = "DROP TABLE IF EXISTS `%a`" sqlShowColumnsFrom = "SHOW COLUMNS FROM `%a`" sqlShowTableStatus = "SHOW TABLE STATUS LIKE '%a'" + sqlAnalyzeTable = "ANALYZE NO_WRITE_TO_BINLOG TABLE `%a`" sqlShowCreateTable = "SHOW CREATE TABLE `%a`" sqlGetAutoIncrement = ` SELECT @@ -551,13 +571,6 @@ const ( sqlFindProcess = "SELECT id, Info as info FROM information_schema.processlist WHERE id=%a AND Info LIKE %a" ) -const ( - retryMigrationHint = "retry" - cancelMigrationHint = "cancel" - cancelAllMigrationHint = "cancel-all" - completeMigrationHint = "complete" -) - var ( sqlCreateOnlineDDLUser = []string{ `CREATE USER IF NOT EXISTS %s IDENTIFIED BY '%s'`, diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index 9fe5a68f5d8..cc669e11c11 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -31,18 +31,20 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconnpool" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/onlineddl/vrepl" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // VReplStream represents a row in _vt.vreplication table @@ -56,7 +58,7 @@ type VReplStream struct { timeThrottled int64 componentThrottled string transactionTimestamp int64 - state string + state binlogdatapb.VReplicationWorkflowState message string rowsCopied int64 bls *binlogdatapb.BinlogSource @@ -75,7 +77,7 @@ func (v *VReplStream) livenessTimeIndicator() int64 { // isRunning() returns true when the workflow is actively running func (v *VReplStream) isRunning() bool { switch v.state { - case binlogplayer.VReplicationInit, binlogplayer.VReplicationCopying, binlogplayer.BlpRunning: + case binlogdatapb.VReplicationWorkflowState_Init, binlogdatapb.VReplicationWorkflowState_Copying, binlogdatapb.VReplicationWorkflowState_Running: return true } return false @@ -84,7 +86,7 @@ func (v *VReplStream) isRunning() bool { // hasError() returns true when the workflow has failed and will not retry func (v *VReplStream) hasError() (isTerminal bool, vreplError error) { switch { - case v.state == binlogplayer.BlpError: + case v.state == binlogdatapb.VReplicationWorkflowState_Error: return true, errors.New(v.message) case strings.Contains(strings.ToLower(v.message), "error"): return false, errors.New(v.message) @@ -104,6 +106,8 @@ type VRepl struct { alterQuery string tableRows int64 + analyzeTable bool + sourceSharedColumns *vrepl.ColumnList targetSharedColumns *vrepl.ColumnList droppedSourceNonGeneratedColumns *vrepl.ColumnList @@ -130,7 +134,7 @@ type VRepl struct { } // NewVRepl creates a VReplication handler for Online DDL -func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alterQuery string) *VRepl { +func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alterQuery string, analyzeTable bool) *VRepl { return &VRepl{ workflow: workflow, keyspace: keyspace, @@ -139,6 +143,7 @@ func NewVRepl(workflow, keyspace, shard, dbName, sourceTable, targetTable, alter sourceTable: sourceTable, targetTable: targetTable, alterQuery: alterQuery, + analyzeTable: analyzeTable, parser: vrepl.NewAlterTableParser(), enumToTextMap: map[string]string{}, intToEnumMap: map[string]bool{}, @@ -217,6 +222,7 @@ func (v *VRepl) readTableUniqueKeys(ctx context.Context, conn *dbconnpool.DBConn Name: row.AsString("index_name", ""), Columns: *vrepl.ParseColumnList(row.AsString("column_names", "")), HasNullable: row.AsBool("has_nullable", false), + HasSubpart: row.AsBool("has_subpart", false), HasFloat: row.AsBool("is_float", false), IsAutoIncrement: row.AsBool("is_auto_increment", false), } @@ -225,6 +231,13 @@ func (v *VRepl) readTableUniqueKeys(ctx context.Context, conn *dbconnpool.DBConn return uniqueKeys, nil } +// executeAnalyzeTable runs an ANALYZE TABLE command +func (v *VRepl) executeAnalyzeTable(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) error { + parsed := sqlparser.BuildParsedQuery(sqlAnalyzeTable, tableName) + _, err := conn.ExecuteFetch(parsed.Query, 1, false) + return err +} + // readTableStatus reads table status information func (v *VRepl) readTableStatus(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (tableRows int64, err error) { parsed := sqlparser.BuildParsedQuery(sqlShowTableStatus, tableName) @@ -334,6 +347,11 @@ func (v *VRepl) analyzeAlter(ctx context.Context) error { } func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection) (err error) { + if v.analyzeTable { + if err := v.executeAnalyzeTable(ctx, conn, v.sourceTable); err != nil { + return err + } + } v.tableRows, err = v.readTableStatus(ctx, conn, v.sourceTable) if err != nil { return err @@ -481,20 +499,19 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { case sourceCol.Type == vrepl.StringColumnType: // Check source and target charset/encoding. If needed, create // a binlogdatapb.CharsetConversion entry (later written to vreplication) - fromEncoding, ok := mysql.CharacterSetEncoding[sourceCol.Charset] - if !ok { + fromCollation := collations.Local().DefaultCollationForCharset(sourceCol.Charset) + if fromCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", sourceCol.Charset, sourceCol.Name) } - toEncoding, ok := mysql.CharacterSetEncoding[targetCol.Charset] + toCollation := collations.Local().DefaultCollationForCharset(targetCol.Charset) // Let's see if target col is at all textual - if targetCol.Type == vrepl.StringColumnType && !ok { + if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name) } - if fromEncoding == nil && toEncoding == nil && targetCol.Type != vrepl.JSONColumnType { - // Both source and target have trivial charsets + + if trivialCharset(fromCollation) && trivialCharset(toCollation) && targetCol.Type != vrepl.JSONColumnType { sb.WriteString(escapeName(name)) } else { - // encoding can be nil for trivial charsets, like utf8, ascii, binary, etc. v.convertCharset[targetName] = &binlogdatapb.CharsetConversion{ FromCharset: sourceCol.Charset, ToCharset: targetCol.Charset, @@ -517,6 +534,14 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { return nil } +func trivialCharset(c collations.ID) bool { + if c == collations.Unknown { + return true + } + utf8mb4Charset := charset.Charset_utf8mb4{} + return utf8mb4Charset.IsSuperset(colldata.Lookup(c).Charset()) || c == collations.CollationBinaryID +} + func (v *VRepl) analyzeBinlogSource(ctx context.Context) { bls := &binlogdatapb.BinlogSource{ Keyspace: v.keyspace, @@ -565,7 +590,7 @@ func (v *VRepl) analyze(ctx context.Context, conn *dbconnpool.DBConnection) erro // generateInsertStatement generates the INSERT INTO _vt.replication stataement that creates the vreplication workflow func (v *VRepl) generateInsertStatement(ctx context.Context) (string, error) { - ig := vreplication.NewInsertGenerator(binlogplayer.BlpStopped, v.dbName) + ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, v.dbName) ig.AddRow(v.workflow, v.bls, v.pos, "", "in_order:REPLICA,PRIMARY", binlogdatapb.VReplicationWorkflowType_OnlineDDL, binlogdatapb.VReplicationWorkflowSubType_None, false) diff --git a/go/vt/vttablet/onlineddl/vrepl/types.go b/go/vt/vttablet/onlineddl/vrepl/types.go index d0390029f02..e4ddff6d58e 100644 --- a/go/vt/vttablet/onlineddl/vrepl/types.go +++ b/go/vt/vttablet/onlineddl/vrepl/types.go @@ -268,6 +268,7 @@ type UniqueKey struct { Name string Columns ColumnList HasNullable bool + HasSubpart bool HasFloat bool IsAutoIncrement bool } diff --git a/go/vt/vttablet/onlineddl/vrepl/unique_key.go b/go/vt/vttablet/onlineddl/vrepl/unique_key.go index c24ea7710c1..cc649b4ea37 100644 --- a/go/vt/vttablet/onlineddl/vrepl/unique_key.go +++ b/go/vt/vttablet/onlineddl/vrepl/unique_key.go @@ -32,6 +32,12 @@ func UniqueKeyValidForIteration(uniqueKey *UniqueKey) bool { // Thus, we cannot use this unique key for iteration. return false } + if uniqueKey.HasSubpart { + // vreplication does not fully support indexes on column prefixes such as: + // UNIQUE KEY `name_idx` (`name`(15)) + // "HasSubpart" means some column covered by the index has a key length spec. + return false + } if uniqueKey.HasFloat { // float & double data types are imprecise and we cannot use them while iterating unique keys return false diff --git a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go index 6b874f6f98f..6b430603088 100644 --- a/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go +++ b/go/vt/vttablet/queryservice/fakes/stream_health_query_service.go @@ -19,8 +19,6 @@ package fakes import ( "context" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -53,7 +51,7 @@ func NewStreamHealthQueryService(target *querypb.Target) *StreamHealthQueryServi return &StreamHealthQueryService{ QueryService: ErrorQueryService, healthResponses: make(chan *querypb.StreamHealthResponse, 1000), - target: proto.Clone(target).(*querypb.Target), + target: target.CloneVT(), } } @@ -81,7 +79,7 @@ func (q *StreamHealthQueryService) StreamHealth(ctx context.Context, callback fu // The response will have default values typical for a healthy tablet. func (q *StreamHealthQueryService) AddDefaultHealthResponse() { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: DefaultReplicationLagSeconds, @@ -93,7 +91,7 @@ func (q *StreamHealthQueryService) AddDefaultHealthResponse() { // Only "qps" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithQPS(qps float64) { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ Qps: qps, @@ -106,7 +104,7 @@ func (q *StreamHealthQueryService) AddHealthResponseWithQPS(qps float64) { // buffer channel. Only "replication_lag_seconds" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithReplicationLag(replicationLag uint32) { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: true, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: replicationLag, @@ -118,7 +116,7 @@ func (q *StreamHealthQueryService) AddHealthResponseWithReplicationLag(replicati // buffer channel. Only "Serving" is different in this message. func (q *StreamHealthQueryService) AddHealthResponseWithNotServing() { q.healthResponses <- &querypb.StreamHealthResponse{ - Target: proto.Clone(q.target).(*querypb.Target), + Target: q.target.CloneVT(), Serving: false, RealtimeStats: &querypb.RealtimeStats{ ReplicationLagSeconds: DefaultReplicationLagSeconds, diff --git a/go/vt/vttablet/queryservice/queryservice.go b/go/vt/vttablet/queryservice/queryservice.go index 1b9bdac13e9..c4d72b0c927 100644 --- a/go/vt/vttablet/queryservice/queryservice.go +++ b/go/vt/vttablet/queryservice/queryservice.go @@ -98,6 +98,10 @@ type QueryService interface { // VStreamRows streams rows of a table from the specified starting point. VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error + // VStreamTables streams rows of all tables + + VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error + // VStreamResults streams results along with the gtid of the snapshot. VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error diff --git a/go/vt/vttablet/queryservice/wrapped.go b/go/vt/vttablet/queryservice/wrapped.go index 376c228b02a..910a1d8948e 100644 --- a/go/vt/vttablet/queryservice/wrapped.go +++ b/go/vt/vttablet/queryservice/wrapped.go @@ -254,6 +254,13 @@ func (ws *wrappedService) VStreamRows(ctx context.Context, request *binlogdatapb }) } +func (ws *wrappedService) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return ws.wrapper(ctx, request.Target, ws.impl, "VStreamTables", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { + innerErr := conn.VStreamTables(ctx, request, send) + return false, innerErr + }) +} + func (ws *wrappedService) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { return ws.wrapper(ctx, target, ws.impl, "VStreamResults", false, func(ctx context.Context, target *querypb.Target, conn QueryService) (bool, error) { innerErr := conn.VStreamResults(ctx, target, query, send) diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 63847a4ed31..b58a793db43 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -25,6 +25,7 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" @@ -45,6 +46,9 @@ type SandboxConn struct { // These errors work for all functions. MustFailCodes map[vtrpcpb.Code]int + // ServingKeyspaces is a list of serving keyspaces + ServingKeyspaces []string + // These errors are triggered only for specific functions. // For now these are just for the 2PC functions. MustFailPrepare int @@ -414,9 +418,9 @@ func (sbc *SandboxConn) MessageAck(ctx context.Context, target *querypb.Target, // SandboxSQRowCount is the default number of fake splits returned. var SandboxSQRowCount = int64(10) -// StreamHealth is not implemented. +// StreamHealth always mocks a "healthy" result. func (sbc *SandboxConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - return fmt.Errorf("not implemented in test") + return nil } // ExpectVStreamStartPos makes the conn verify that that the next vstream request has the right startPos. @@ -499,6 +503,11 @@ func (sbc *SandboxConn) VStreamRows(ctx context.Context, request *binlogdatapb.V return fmt.Errorf("not implemented in test") } +// VStreamTables is part of the QueryService interface. +func (sbc *SandboxConn) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return fmt.Errorf("not implemented in test") +} + // VStreamResults is part of the QueryService interface. func (sbc *SandboxConn) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { return fmt.Errorf("not implemented in test") @@ -509,6 +518,11 @@ func (sbc *SandboxConn) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *queryp return sbc, nil } +// GetServingKeyspaces returns list of serving keyspaces. +func (sbc *SandboxConn) GetServingKeyspaces() []string { + return sbc.ServingKeyspaces +} + // HandlePanic is part of the QueryService interface. func (sbc *SandboxConn) HandlePanic(err *error) { } @@ -678,8 +692,10 @@ func getSingleRowResult() *sqltypes.Result { fields := SingleRowResult.Fields for _, field := range fields { singleRowResult.Fields = append(singleRowResult.Fields, &querypb.Field{ - Name: field.Name, - Type: field.Type, + Name: field.Name, + Type: field.Type, + Charset: field.Charset, + Flags: field.Flags, }) } @@ -689,8 +705,8 @@ func getSingleRowResult() *sqltypes.Result { // SingleRowResult is returned when there is no pre-stored result. var SingleRowResult = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "value", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "value", Type: sqltypes.VarChar, Charset: collations.CollationUtf8mb4ID}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -703,8 +719,8 @@ var SingleRowResult = &sqltypes.Result{ // StreamRowResult is SingleRowResult with RowsAffected set to 0. var StreamRowResult = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, - {Name: "value", Type: sqltypes.VarChar}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "value", Type: sqltypes.VarChar, Charset: collations.CollationUtf8mb4ID}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index 8bfb40bceee..cfe540ead42 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "io" "testing" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -655,7 +656,7 @@ var TestStreamHealthStreamHealthResponse = &querypb.StreamHealthResponse{ }, Serving: true, - TabletExternallyReparentedTimestamp: 1234589, + PrimaryTermStartTimestamp: 1234589, RealtimeStats: &querypb.RealtimeStats{ CpuUsage: 1.0, @@ -681,7 +682,7 @@ func (f *FakeQueryService) StreamHealth(ctx context.Context, callback func(*quer if shr == nil { shr = TestStreamHealthStreamHealthResponse } - if err := callback(shr); err != nil { + if err := callback(shr); err != nil && err != io.EOF { f.t.Logf("StreamHealth callback failed: %v", err) } return nil @@ -697,6 +698,11 @@ func (f *FakeQueryService) VStreamRows(ctx context.Context, request *binlogdatap panic("not implemented") } +// VStreamTables is part of the QueryService interface. +func (f *FakeQueryService) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + panic("not implemented") +} + // VStreamResults is part of the QueryService interface. func (f *FakeQueryService) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { panic("not implemented") @@ -707,6 +713,11 @@ func (f *FakeQueryService) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *que panic("not implemented") } +// GetServingKeyspaces returns list of serving keyspaces. +func (f *FakeQueryService) GetServingKeyspaces() []string { + panic("not implemented") +} + // ReserveBeginExecute satisfies the Gateway interface func (f *FakeQueryService) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (queryservice.ReservedTransactionState, *sqltypes.Result, error) { panic("implement me") diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go index 23d4a3ce2e2..b279ac53726 100644 --- a/go/vt/vttablet/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -1049,7 +1049,7 @@ func SetProtocol(name string, protocol string) { tabletconn.RegisterFlags(fs) }) - servenv.ParseFlags(name) + servenv.ParseFlagsForTests(name) if err := pflag.Set(tabletProtocolFlagName, protocol); err != nil { msg := "failed to set flag %q to %q: %v" diff --git a/go/vt/vttablet/tabletmanager/framework_test.go b/go/vt/vttablet/tabletmanager/framework_test.go new file mode 100644 index 00000000000..7d112add1d3 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/framework_test.go @@ -0,0 +1,482 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + "fmt" + "regexp" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vttablet/queryservice" + "vitess.io/vitess/go/vt/vttablet/tabletconn" + "vitess.io/vitess/go/vt/vttablet/tabletconntest" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" + "vitess.io/vitess/go/vt/vttablet/tmclienttest" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +const ( + gtidFlavor = "MySQL56" + gtidPosition = "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-220" +) + +func init() { + tabletconn.RegisterDialer("grpc", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + return &tabletconntest.FakeQueryService{ + StreamHealthResponse: &querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + TabletType: tablet.Type, + Cell: tablet.Alias.Cell, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }, + }, nil + }) +} + +type testEnv struct { + mu sync.Mutex + ctx context.Context + ts *topo.Server + cells []string + mysqld *mysqlctl.FakeMysqlDaemon + tmc *fakeTMClient + dbName string + protoName string +} + +func newTestEnv(t *testing.T, ctx context.Context, sourceKeyspace string, sourceShards []string) *testEnv { + tenv := &testEnv{ + ctx: context.Background(), + tmc: newFakeTMClient(), + cells: []string{"zone1"}, + dbName: "tmtestdb", + protoName: t.Name(), + } + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.ts = memorytopo.NewServer(ctx, tenv.cells...) + tenv.tmc.sourceKeyspace = sourceKeyspace + tenv.tmc.sourceShards = sourceShards + tenv.tmc.schema = defaultSchema + + tabletconn.RegisterDialer(t.Name(), func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tenv.mu.Lock() + defer tenv.mu.Unlock() + if qs, ok := tenv.tmc.tablets[int(tablet.Alias.Uid)]; ok { + return qs, nil + } + return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid) + }) + tabletconntest.SetProtocol(fmt.Sprintf("go.vt.vttablet.tabletmanager.framework_test_%s", t.Name()), tenv.protoName) + tmclient.RegisterTabletManagerClientFactory(t.Name(), func() tmclient.TabletManagerClient { + return tenv.tmc + }) + tmclienttest.SetProtocol(fmt.Sprintf("go.vt.vttablet.tabletmanager.framework_test_%s", t.Name()), tenv.protoName) + + tenv.mysqld = mysqlctl.NewFakeMysqlDaemon(fakesqldb.New(t)) + var err error + tenv.mysqld.CurrentPrimaryPosition, err = replication.ParsePosition(gtidFlavor, gtidPosition) + require.NoError(t, err) + + return tenv +} + +func (tenv *testEnv) close() { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.ts.Close() + tenv.mysqld.Close() +} + +//-------------------------------------- +// Tablets + +func (tenv *testEnv) addTablet(t *testing.T, id int, keyspace, shard string) *fakeTabletConn { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: tenv.cells[0], + Uid: uint32(id), + }, + Keyspace: keyspace, + Shard: shard, + Type: topodatapb.TabletType_PRIMARY, + PortMap: map[string]int32{ + tenv.protoName: int32(id), + }, + } + if err := tenv.ts.InitTablet(tenv.ctx, tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil { + panic(err) + } + if _, err := tenv.ts.UpdateShardFields(tenv.ctx, keyspace, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = tablet.Alias + si.IsPrimaryServing = true + return nil + }); err != nil { + panic(err) + } + if err := tenv.ts.EnsureVSchema(tenv.ctx, keyspace); err != nil { + panic(err) + } + + vrdbClient := binlogplayer.NewMockDBClient(t) + vrdbClient.Tag = fmt.Sprintf("tablet:%d", id) + tenv.tmc.tablets[id] = &fakeTabletConn{ + tablet: tablet, + vrdbClient: vrdbClient, + } + + dbClientFactory := func() binlogplayer.DBClient { + return tenv.tmc.tablets[id].vrdbClient + } + tenv.tmc.tablets[id].vrengine = vreplication.NewTestEngine(tenv.ts, tenv.cells[0], tenv.mysqld, dbClientFactory, dbClientFactory, tenv.dbName, nil) + tenv.tmc.tablets[id].vrdbClient.ExpectRequest(fmt.Sprintf("select * from _vt.vreplication where db_name='%s'", tenv.dbName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[id].vrengine.Open(tenv.ctx) + require.True(t, tenv.tmc.tablets[id].vrengine.IsOpen(), "vreplication engine was not open") + + tenv.tmc.tablets[id].tm = &TabletManager{ + VREngine: tenv.tmc.tablets[id].vrengine, + DBConfigs: &dbconfigs.DBConfigs{ + DBName: tenv.dbName, + }, + } + + return tenv.tmc.tablets[id] +} + +func (tenv *testEnv) deleteTablet(tablet *topodatapb.Tablet) { + tenv.mu.Lock() + defer tenv.mu.Unlock() + tenv.tmc.tablets[int(tablet.Alias.Uid)].vrdbClient.Close() + tenv.tmc.tablets[int(tablet.Alias.Uid)].vrengine.Close() + tenv.ts.DeleteTablet(tenv.ctx, tablet.Alias) + // This is not automatically removed from shard replication, which results in log spam. + topo.DeleteTabletReplicationData(tenv.ctx, tenv.ts, tablet) +} + +// fakeTabletConn implements the TabletConn and QueryService interfaces. +type fakeTabletConn struct { + queryservice.QueryService + tablet *topodatapb.Tablet + tm *TabletManager + vrdbClient *binlogplayer.MockDBClient + vrengine *vreplication.Engine +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Begin(ctx context.Context, target *querypb.Target, options *querypb.ExecuteOptions) (queryservice.TransactionState, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Rollback(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) CreateTransaction(ctx context.Context, target *querypb.Target, dtid string, participants []*querypb.Target) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ConcludeTransaction(ctx context.Context, target *querypb.Target, dtid string) (err error) { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { + return nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Execute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { + return nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) StreamExecute(ctx context.Context, target *querypb.Target, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) BeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions) (queryservice.TransactionState, *sqltypes.Result, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) BeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.TransactionState, error) { + return queryservice.TransactionState{ + TransactionID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) MessageStream(ctx context.Context, target *querypb.Target, name string, callback func(*sqltypes.Result) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) MessageAck(ctx context.Context, target *querypb.Target, name string, ids []*querypb.Value) (count int64, err error) { + return 0, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) HandlePanic(err *error) { +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (queryservice.ReservedTransactionState, *sqltypes.Result, error) { + return queryservice.ReservedTransactionState{ + ReservedID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveBeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedTransactionState, error) { + return queryservice.ReservedTransactionState{ + ReservedID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) (queryservice.ReservedState, *sqltypes.Result, error) { + return queryservice.ReservedState{ + ReservedID: 1, + }, nil, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) ReserveStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedState, error) { + return queryservice.ReservedState{ + ReservedID: 1, + }, nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Release(ctx context.Context, target *querypb.Target, transactionID, reservedID int64) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) GetSchema(ctx context.Context, target *querypb.Target, tableType querypb.SchemaTableType, tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { + return nil +} + +// fakeTabletConn implements the QueryService interface. +func (ftc *fakeTabletConn) Close(ctx context.Context) error { + return nil +} + +func (ftc *fakeTabletConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + return callback(&querypb.StreamHealthResponse{ + Serving: true, + Target: &querypb.Target{ + Keyspace: ftc.tablet.Keyspace, + Shard: ftc.tablet.Shard, + TabletType: ftc.tablet.Type, + Cell: ftc.tablet.Alias.Cell, + }, + RealtimeStats: &querypb.RealtimeStats{}, + }) +} + +//---------------------------------------------- +// fakeTMClient + +type fakeTMClient struct { + tmclient.TabletManagerClient + sourceKeyspace string + sourceShards []string + tablets map[int]*fakeTabletConn + schema *tabletmanagerdatapb.SchemaDefinition + vreQueries map[int]map[string]*querypb.QueryResult +} + +func newFakeTMClient() *fakeTMClient { + return &fakeTMClient{ + tablets: make(map[int]*fakeTabletConn), + vreQueries: make(map[int]map[string]*querypb.QueryResult), + schema: &tabletmanagerdatapb.SchemaDefinition{}, + } +} + +func (tmc *fakeTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { + return tmc.schema, nil +} + +func (tmc *fakeTMClient) SetSchema(schema *tabletmanagerdatapb.SchemaDefinition) { + tmc.schema = schema +} + +func (tmc *fakeTMClient) ExecuteFetchAsApp(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +func (tmc *fakeTMClient) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { + // Reuse VReplicationExec + return tmc.VReplicationExec(ctx, tablet, string(req.Query)) +} + +// setVReplicationExecResults allows you to specify VReplicationExec queries +// and their results. You can specify exact strings or strings prefixed with +// a '/', in which case they will be treated as a valid regexp. +func (tmc *fakeTMClient) setVReplicationExecResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) { + queries, ok := tmc.vreQueries[int(tablet.Alias.Uid)] + if !ok { + queries = make(map[string]*querypb.QueryResult) + tmc.vreQueries[int(tablet.Alias.Uid)] = queries + } + queries[query] = sqltypes.ResultToProto3(result) +} + +func (tmc *fakeTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { + if result, ok := tmc.vreQueries[int(tablet.Alias.Uid)][query]; ok { + return result, nil + } + for qry, res := range tmc.vreQueries[int(tablet.Alias.Uid)] { + if strings.HasPrefix(qry, "/") { + re := regexp.MustCompile(qry) + if re.MatchString(qry) { + return res, nil + } + } + } + return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid) +} + +func (tmc *fakeTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.CreateVReplicationWorkflow(ctx, req) +} + +func (tmc *fakeTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + Workflow: req.Workflow, + WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType_None, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + Streams: make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, len(tmc.sourceShards)), + } + rules := make([]*binlogdatapb.Rule, len(defaultSchema.TableDefinitions)) + for i, table := range defaultSchema.TableDefinitions { + rules[i] = &binlogdatapb.Rule{ + Match: table.Name, + Filter: tablet.Shard, + } + } + for i, shard := range tmc.sourceShards { + resp.Streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + Id: int32(i + 1), + Bls: &binlogdatapb.BinlogSource{ + Keyspace: tmc.sourceKeyspace, + Shard: shard, + Filter: &binlogdatapb.Filter{ + Rules: rules, + }, + }, + } + } + + return resp, nil +} + +func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { + return fmt.Sprintf("%s/%s", gtidFlavor, gtidPosition), nil +} + +func (tmc *fakeTMClient) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int32, pos string) error { + return nil +} + +func (tmc *fakeTMClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { + return &querypb.QueryResult{ + RowsAffected: 1, + }, nil +} diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index afd0f5c0365..ab1e32a5b5e 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -24,6 +24,11 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/protoutil" + + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/hook" @@ -53,6 +58,9 @@ var ( restoreFromBackupTsStr string restoreConcurrency = 4 waitForBackupInterval time.Duration + + statsRestoreBackupTime *stats.String + statsRestoreBackupPosition *stats.String ) func registerRestoreFlags(fs *pflag.FlagSet) { @@ -93,6 +101,9 @@ func init() { servenv.OnParseFor("vtcombo", registerPointInTimeRestoreFlags) servenv.OnParseFor("vttablet", registerPointInTimeRestoreFlags) + + statsRestoreBackupTime = stats.NewString("RestoredBackupTime") + statsRestoreBackupPosition = stats.NewString("RestorePosition") } // RestoreData is the main entry point for backup restore. @@ -144,7 +155,7 @@ func (tm *TabletManager) RestoreData(ctx context.Context, logger logutil.Logger, startTime = time.Now() req := &tabletmanagerdatapb.RestoreFromBackupRequest{ - BackupTime: logutil.TimeToProto(backupTime), + BackupTime: protoutil.TimeToProto(backupTime), } err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req) if err != nil { @@ -174,7 +185,12 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L return vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, fmt.Sprintf("snapshot keyspace %v has no base_keyspace set", tablet.Keyspace)) } keyspace = keyspaceInfo.BaseKeyspace - log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, logutil.ProtoToTime(request.BackupTime)) + log.Infof("Using base_keyspace %v to restore keyspace %v using a backup time of %v", keyspace, tablet.Keyspace, protoutil.TimeFromProto(request.BackupTime).UTC()) + } + + startTime := protoutil.TimeFromProto(request.BackupTime).UTC() + if startTime.IsZero() { + startTime = protoutil.TimeFromProto(keyspaceInfo.SnapshotTime).UTC() } params := mysqlctl.RestoreParams{ @@ -187,17 +203,24 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L DbName: topoproto.TabletDbName(tablet), Keyspace: keyspace, Shard: tablet.Shard, - StartTime: logutil.ProtoToTime(request.BackupTime), + StartTime: startTime, DryRun: request.DryRun, Stats: backupstats.RestoreStats(), } + if request.RestoreToPos != "" && !protoutil.TimeFromProto(request.RestoreToTimestamp).UTC().IsZero() { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "--restore_to_pos and --restore_to_timestamp are mutually exclusive") + } if request.RestoreToPos != "" { - pos, err := mysql.DecodePosition(request.RestoreToPos) + pos, err := replication.DecodePosition(request.RestoreToPos) if err != nil { return vterrors.Wrapf(err, "restore failed: unable to decode --restore_to_pos: %s", request.RestoreToPos) } params.RestoreToPos = pos } + if restoreToTimestamp := protoutil.TimeFromProto(request.RestoreToTimestamp).UTC(); !restoreToTimestamp.IsZero() { + // Restore to given timestamp + params.RestoreToTimestamp = restoreToTimestamp + } params.Logger.Infof("Restore: original tablet type=%v", originalType) // Check whether we're going to restore before changing to RESTORE type, @@ -222,6 +245,10 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L var backupManifest *mysqlctl.BackupManifest for { backupManifest, err = mysqlctl.Restore(ctx, params) + if backupManifest != nil { + statsRestoreBackupPosition.Set(replication.EncodePosition(backupManifest.Position)) + statsRestoreBackupTime.Set(backupManifest.BackupTime) + } params.Logger.Infof("Restore: got a restore manifest: %v, err=%v, waitForBackupInterval=%v", backupManifest, err, waitForBackupInterval) if waitForBackupInterval == 0 { break @@ -239,10 +266,10 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L } } - var pos mysql.Position + var pos replication.Position if backupManifest != nil { pos = backupManifest.Position - params.Logger.Infof("Restore: pos=%v", mysql.EncodePosition(pos)) + params.Logger.Infof("Restore: pos=%v", replication.EncodePosition(pos)) } // If SnapshotTime is set , then apply the incremental change if keyspaceInfo.SnapshotTime != nil { @@ -283,8 +310,9 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L // Do nothing here, let the rest of code run params.Logger.Infof("Dry run. No changes made") default: + bgCtx := context.Background() // If anything failed, we should reset the original tablet type - if err := tm.tmState.ChangeTabletType(ctx, originalType, DBActionNone); err != nil { + if err := tm.tmState.ChangeTabletType(bgCtx, originalType, DBActionNone); err != nil { log.Errorf("Could not change back to original tablet type %v: %v", originalType, err) } return vterrors.Wrap(err, "Can't restore backup") @@ -305,12 +333,13 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L } params.Logger.Infof("Restore: changing tablet type to %v for %s", originalType, tm.tabletAlias.String()) // Change type back to original type if we're ok to serve. - return tm.tmState.ChangeTabletType(ctx, originalType, DBActionNone) + bgCtx := context.Background() + return tm.tmState.ChangeTabletType(bgCtx, originalType, DBActionNone) } // restoreToTimeFromBinlog restores to the snapshot time of the keyspace // currently this works with mysql based database only (as it uses mysql specific queries for restoring) -func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos mysql.Position, restoreTime *vttime.Time) error { +func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos replication.Position, restoreTime *vttime.Time) error { // validate the minimal settings necessary for connecting to binlog server if binlogHost == "" || binlogPort <= 0 || binlogUser == "" { log.Warning("invalid binlog server setting, restoring to last available backup.") @@ -350,7 +379,7 @@ func (tm *TabletManager) restoreToTimeFromBinlog(ctx context.Context, pos mysql. // beforePos is the GTID of the last event before restoreTime. This is the GTID upto which replication will be applied // afterPos can be used directly in the query `START SLAVE UNTIL SQL_BEFORE_GTIDS = ”` // beforePos will be used to check if replication was able to catch up from the binlog server -func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Position, restoreTime int64) (afterPos string, beforePos string, err error) { +func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replication.Position, restoreTime int64) (afterPos string, beforePos string, err error) { connParams := &mysql.ConnParams{ Host: binlogHost, Port: binlogPort, @@ -393,11 +422,11 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Pos gtidsChan := make(chan []string, 1) go func() { - err := vsClient.VStream(ctx, mysql.EncodePosition(pos), filter, func(events []*binlogdatapb.VEvent) error { + err := vsClient.VStream(ctx, replication.EncodePosition(pos), filter, func(events []*binlogdatapb.VEvent) error { for _, event := range events { if event.Gtid != "" { // check if we reached the lastPos then return - eventPos, err := mysql.DecodePosition(event.Gtid) + eventPos, err := replication.DecodePosition(event.Gtid) if err != nil { return err } @@ -440,14 +469,14 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos mysql.Pos func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, beforeGTIDPos string) error { var afterGTIDStr string if afterGTIDPos != "" { - afterGTIDParsed, err := mysql.DecodePosition(afterGTIDPos) + afterGTIDParsed, err := replication.DecodePosition(afterGTIDPos) if err != nil { return err } afterGTIDStr = afterGTIDParsed.GTIDSet.Last() } - beforeGTIDPosParsed, err := mysql.DecodePosition(beforeGTIDPos) + beforeGTIDPosParsed, err := replication.DecodePosition(beforeGTIDPos) if err != nil { return err } @@ -546,7 +575,7 @@ func (tm *TabletManager) disableReplication(ctx context.Context) error { return nil } -func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Position, tabletType topodatapb.TabletType) error { +func (tm *TabletManager) startReplication(ctx context.Context, pos replication.Position, tabletType topodatapb.TabletType) error { cmds := []string{ "STOP SLAVE", "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. @@ -586,7 +615,7 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio log.Warningf("Can't get primary replication position after restore: %v", err) return nil } - primaryPos, err := mysql.DecodePosition(posStr) + primaryPos, err := replication.DecodePosition(posStr) if err != nil { return vterrors.Wrapf(err, "can't decode primary replication position: %q", posStr) } @@ -610,17 +639,3 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos mysql.Positio return nil } - -func (tm *TabletManager) getLocalMetadataValues(tabletType topodatapb.TabletType) map[string]string { - tablet := tm.Tablet() - values := map[string]string{ - "Alias": topoproto.TabletAliasString(tablet.Alias), - "ClusterAlias": fmt.Sprintf("%s.%s", tablet.Keyspace, tablet.Shard), - "DataCenter": tablet.Alias.Cell, - "PromotionRule": "must_not", - } - if isPrimaryEligible(tabletType) { - values["PromotionRule"] = "neutral" - } - return values -} diff --git a/go/vt/vttablet/tabletmanager/rpc_actions.go b/go/vt/vttablet/tabletmanager/rpc_actions.go index 1093c331a1a..16d3513355c 100644 --- a/go/vt/vttablet/tabletmanager/rpc_actions.go +++ b/go/vt/vttablet/tabletmanager/rpc_actions.go @@ -17,13 +17,12 @@ limitations under the License. package tabletmanager import ( + "context" "fmt" "time" "vitess.io/vitess/go/vt/vterrors" - "context" - "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topotools" @@ -82,7 +81,13 @@ func (tm *TabletManager) ChangeType(ctx context.Context, tabletType topodatapb.T return err } defer tm.unlock() - return tm.changeTypeLocked(ctx, tabletType, DBActionNone, convertBoolToSemiSyncAction(semiSync)) + + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return err + } + + return tm.changeTypeLocked(ctx, tabletType, DBActionNone, semiSyncAction) } // ChangeType changes the tablet type @@ -142,9 +147,23 @@ func (tm *TabletManager) RunHealthCheck(ctx context.Context) { tm.QueryServiceControl.BroadcastHealth() } -func convertBoolToSemiSyncAction(semiSync bool) SemiSyncAction { - if semiSync { - return SemiSyncActionSet +func (tm *TabletManager) convertBoolToSemiSyncAction(semiSync bool) (SemiSyncAction, error) { + semiSyncExtensionLoaded, err := tm.MysqlDaemon.SemiSyncExtensionLoaded() + if err != nil { + return SemiSyncActionNone, err + } + + if semiSyncExtensionLoaded { + if semiSync { + return SemiSyncActionSet, nil + } else { + return SemiSyncActionUnset, nil + } + } else { + if semiSync { + return SemiSyncActionNone, vterrors.VT09013() + } else { + return SemiSyncActionNone, nil + } } - return SemiSyncActionUnset } diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index 78b228430b9..06c0e5cda94 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -63,6 +63,8 @@ type RPCTM interface { ApplySchema(ctx context.Context, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) + ResetSequences(ctx context.Context, tables []string) error + LockTables(ctx context.Context) error UnlockTables(ctx context.Context) error @@ -97,8 +99,12 @@ type RPCTM interface { WaitForPosition(ctx context.Context, pos string) error // VReplication API + CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) + DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) + ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error + UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) // VDiff API VDiff(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) @@ -138,4 +144,7 @@ type RPCTM interface { // HandleRPCPanic is to be called in a defer statement in each // RPC input point. HandleRPCPanic(ctx context.Context, name string, args, reply any, verbose bool, err *error) + + // Throttler + CheckThrottler(ctx context.Context, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) } diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index 3b1f7a35a74..b3d2e2794f6 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -21,6 +21,9 @@ import ( "fmt" "time" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstats" @@ -36,7 +39,7 @@ const ( backupModeOffline = "offline" ) -// Backup takes a db backup and sends it to the BackupStorage +// Backup takes a db backup and sends it to the BackupStorage. func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req *tabletmanagerdatapb.BackupRequest) error { if tm.Cnf == nil { return fmt.Errorf("cannot perform backup without my.cnf, please restart vttablet with a my.cnf file specified") @@ -54,7 +57,7 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req if err != nil { return vterrors.Wrap(err, "failed to find backup engine") } - // get Tablet info from topo so that it is up to date + // Get Tablet info from topo so that it is up to date tablet, err := tm.TopoServer.GetTablet(ctx, tm.tabletAlias) if err != nil { return err @@ -63,9 +66,9 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req return fmt.Errorf("type PRIMARY cannot take backup. if you really need to do this, rerun the backup command with --allow_primary") } - // prevent concurrent backups, and record stats + // Prevent concurrent backups, and record stats backupMode := backupModeOnline - if engine.ShouldDrainForBackup() { + if engine.ShouldDrainForBackup(req) { backupMode = backupModeOffline } if err := tm.beginBackup(backupMode); err != nil { @@ -73,8 +76,11 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req } defer tm.endBackup(backupMode) + // Create the logger: tee to console and source. + l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) + var originalType topodatapb.TabletType - if engine.ShouldDrainForBackup() { + if engine.ShouldDrainForBackup(req) { if err := tm.lock(ctx); err != nil { return err } @@ -85,15 +91,63 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req return err } originalType = tablet.Type - // update our type to BACKUP + // Update our type to `BACKUP`. if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_BACKUP, DBActionNone, SemiSyncActionUnset); err != nil { return err } + + // Adding defer to original value in case of any failures. + defer func() { + bgCtx := context.Background() + // Change our type back to the original value. + // Original type could be primary so pass in a real value for PrimaryTermStartTime + if err := tm.changeTypeLocked(bgCtx, originalType, DBActionNone, SemiSyncActionNone); err != nil { + l.Errorf("Failed to change tablet type from %v to %v, error: %v", topodatapb.TabletType_BACKUP, originalType, err) + return + } + + // Find the correct primary tablet and set the replication source, + // since the primary could have changed while we executed the backup which can + // also affect whether we want to send semi sync acks or not. + tabletInfo, err := tm.TopoServer.GetTablet(bgCtx, tablet.Alias) + if err != nil { + l.Errorf("Failed to fetch updated tablet info, error: %v", err) + return + } + + // Do not do anything for primary tablets or when active reparenting is disabled + if mysqlctl.DisableActiveReparents || tabletInfo.Type == topodatapb.TabletType_PRIMARY { + return + } + + shardPrimary, err := topotools.GetShardPrimaryForTablet(bgCtx, tm.TopoServer, tablet.Tablet) + if err != nil { + return + } + + durabilityName, err := tm.TopoServer.GetKeyspaceDurability(bgCtx, tablet.Keyspace) + if err != nil { + l.Errorf("Failed to get durability policy, error: %v", err) + return + } + durability, err := reparentutil.GetDurabilityPolicy(durabilityName) + if err != nil { + l.Errorf("Failed to get durability with name %v, error: %v", durabilityName, err) + } + + isSemiSync := reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(isSemiSync) + if err != nil { + l.Errorf("Failed to convert bool to semisync action, error: %v", err) + return + } + if err := tm.setReplicationSourceLocked(bgCtx, shardPrimary.Alias, 0, "", false, semiSyncAction); err != nil { + l.Errorf("Failed to set replication source, error: %v", err) + } + }() } - // create the loggers: tee to console and source - l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) - // now we can run the backup + // Now we can run the backup. backupParams := mysqlctl.BackupParams{ Cnf: tm.Cnf, Mysqld: tm.MysqlDaemon, @@ -107,28 +161,11 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req TabletAlias: topoproto.TabletAliasString(tablet.Alias), BackupTime: time.Now(), Stats: backupstats.BackupStats(), + UpgradeSafe: req.UpgradeSafe, } returnErr := mysqlctl.Backup(ctx, backupParams) - if engine.ShouldDrainForBackup() { - bgCtx := context.Background() - // Starting from here we won't be able to recover if we get stopped by a cancelled - // context. It is also possible that the context already timed out during the - // above call to Backup. Thus we use the background context to get through to the finish. - - // Change our type back to the original value. - // Original type could be primary so pass in a real value for PrimaryTermStartTime - if err := tm.changeTypeLocked(bgCtx, originalType, DBActionNone, SemiSyncActionNone); err != nil { - // failure in changing the topology type is probably worse, - // so returning that (we logged the snapshot error anyway) - if returnErr != nil { - l.Errorf("mysql backup command returned error: %v", returnErr) - } - returnErr = err - } - } - return returnErr } @@ -148,13 +185,13 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L return fmt.Errorf("type PRIMARY cannot restore from backup, if you really need to do this, restart vttablet in replica mode") } - // create the loggers: tee to console and source + // Create the logger: tee to console and source. l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) - // now we can run restore + // Now we can run restore. err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, request) - // re-run health check to be sure to capture any replication delay + // Re-run health check to be sure to capture any replication delay. tm.QueryServiceControl.BroadcastHealth() return err @@ -166,10 +203,10 @@ func (tm *TabletManager) beginBackup(backupMode string) error { if tm._isBackupRunning { return fmt.Errorf("a backup is already running on tablet: %v", tm.tabletAlias) } - // when mode is online we don't take the action lock, so we continue to serve, - // but let's set _isBackupRunning to true - // so that we only allow one online backup at a time - // offline backups also run only one at a time because we take the action lock + // When mode is online we don't take the action lock, so we continue to serve, + // but let's set _isBackupRunning to true. + // So that we only allow one online backup at a time. + // Offline backups also run only one at a time because we take the action lock // so this is not really needed in that case, however we are using it to record the state tm._isBackupRunning = true statsBackupIsRunning.Set([]string{backupMode}, 1) @@ -177,8 +214,8 @@ func (tm *TabletManager) beginBackup(backupMode string) error { } func (tm *TabletManager) endBackup(backupMode string) { - // now we set _isBackupRunning back to false - // have to take the mutex lock before writing to _ fields + // Now we set _isBackupRunning back to false. + // Have to take the mutex lock before writing to _ fields. tm.mutex.Lock() defer tm.mutex.Unlock() tm._isBackupRunning = false diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index 835cb698d8b..0d21cee7677 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -19,6 +19,7 @@ package tabletmanager import ( "context" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" @@ -63,8 +64,13 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag return nil, err } } - // run the query - result, err := conn.ExecuteFetch(string(req.Query), int(req.MaxRows), true /*wantFields*/) + + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + result, err := conn.ExecuteFetch(uq, int(req.MaxRows), true /*wantFields*/) // re-enable binlogs if necessary if req.DisableBinlogs && !conn.IsClosed() { @@ -100,8 +106,12 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet _, _ = conn.ExecuteFetch("USE "+sqlescape.EscapeID(req.DbName), 1, false) } - // run the query - result, err := conn.ExecuteFetch(string(req.Query), int(req.MaxRows), true /*wantFields*/) + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + result, err := conn.ExecuteFetch(uq, int(req.MaxRows), true /*wantFields*/) if err == nil && req.ReloadSchema { reloadErr := tm.QueryServiceControl.ReloadSchema(ctx) @@ -120,7 +130,12 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag return nil, err } defer conn.Recycle() - result, err := conn.ExecuteFetch(string(req.Query), int(req.MaxRows), true /*wantFields*/) + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + result, err := conn.ExecuteFetch(uq, int(req.MaxRows), true /*wantFields*/) return sqltypes.ResultToProto3(result), err } @@ -129,6 +144,11 @@ func (tm *TabletManager) ExecuteQuery(ctx context.Context, req *tabletmanagerdat // get the db name from the tablet tablet := tm.Tablet() target := &querypb.Target{Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type} - result, err := tm.QueryServiceControl.QueryService().Execute(ctx, target, string(req.Query), nil, 0, 0, nil) + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + result, err := tm.QueryServiceControl.QueryService().Execute(ctx, target, uq, nil, 0, 0, nil) return sqltypes.ResultToProto3(result), err } diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index 196014b8271..7c75b354252 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -24,9 +24,12 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" @@ -37,11 +40,11 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -var setSuperReadOnly bool var disableReplicationManager bool func registerReplicationFlags(fs *pflag.FlagSet) { - fs.BoolVar(&setSuperReadOnly, "use_super_read_only", setSuperReadOnly, "Set super_read_only flag when performing planned failover.") + fs.Bool("use_super_read_only", true, "Set super_read_only flag when performing planned failover.") + fs.MarkDeprecated("use_super_read_only", "From v17 onwards MySQL server will always try to start with super_read_only=ON") fs.BoolVar(&disableReplicationManager, "disable-replication-manager", disableReplicationManager, "Disable replication manager to prevent replication repairs.") fs.MarkDeprecated("disable-replication-manager", "Replication manager is deleted") } @@ -57,7 +60,7 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat if err != nil { return nil, err } - return mysql.ReplicationStatusToProto(status), nil + return replication.ReplicationStatusToProto(status), nil } // FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others @@ -81,7 +84,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful return nil, err } if err == nil { - replicationStatusProto = mysql.ReplicationStatusToProto(replicationStatus) + replicationStatusProto = replication.ReplicationStatusToProto(replicationStatus) } // Primary status - "SHOW MASTER STATUS" @@ -91,7 +94,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful return nil, err } if err == nil { - primaryStatusProto = mysql.PrimaryStatusToProto(primaryStatus) + primaryStatusProto = replication.PrimaryStatusToProto(primaryStatus) } // Purged GTID set @@ -101,10 +104,21 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful } // Version string "majorVersion.minorVersion.patchRelease" - version := tm.MysqlDaemon.GetVersionString() + version, err := tm.MysqlDaemon.GetVersionString(ctx) + if err != nil { + return nil, err + } + _, v, err := mysqlctl.ParseVersionString(version) + if err != nil { + return nil, err + } + version = fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) // Version comment "select @@global.version_comment" - versionComment := tm.MysqlDaemon.GetVersionComment(ctx) + versionComment, err := tm.MysqlDaemon.GetVersionComment(ctx) + if err != nil { + return nil, err + } // Read only - "SHOW VARIABLES LIKE 'read_only'" readOnly, err := tm.MysqlDaemon.IsReadOnly() @@ -112,6 +126,12 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful return nil, err } + // superReadOnly - "SELECT @@global.super_read_only" + superReadOnly, err := tm.MysqlDaemon.IsSuperReadOnly() + if err != nil { + return nil, err + } + // Binlog Information - "select @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates, @@global.binlog_row_image" binlogFormat, logBin, logReplicaUpdates, binlogRowImage, err := tm.MysqlDaemon.GetBinlogInformation(ctx) if err != nil { @@ -141,7 +161,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful ServerUuid: serverUUID, ReplicationStatus: replicationStatusProto, PrimaryStatus: primaryStatusProto, - GtidPurged: mysql.EncodePosition(purgedGTIDs), + GtidPurged: replication.EncodePosition(purgedGTIDs), Version: version, VersionComment: versionComment, ReadOnly: readOnly, @@ -157,6 +177,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful SemiSyncPrimaryClients: semiSyncClients, SemiSyncPrimaryTimeout: semiSyncTimeout, SemiSyncWaitForReplicaCount: semiSyncNumReplicas, + SuperReadOnly: superReadOnly, }, nil } @@ -166,7 +187,7 @@ func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb. if err != nil { return nil, err } - return mysql.PrimaryStatusToProto(status), nil + return replication.PrimaryStatusToProto(status), nil } // PrimaryPosition returns the position of a primary database @@ -175,13 +196,13 @@ func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { if err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // WaitForPosition waits until replication reaches the desired position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { log.Infof("WaitForPosition: %v", pos) - mpos, err := mysql.DecodePosition(pos) + mpos, err := replication.DecodePosition(pos) if err != nil { return err } @@ -218,7 +239,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st } defer tm.unlock() - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return "", err } @@ -234,7 +255,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st if err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // StartReplication will start the mysql. Works both when Vitess manages @@ -246,7 +267,12 @@ func (tm *TabletManager) StartReplication(ctx context.Context, semiSync bool) er } defer tm.unlock() - if err := tm.fixSemiSync(tm.Tablet().Type, convertBoolToSemiSyncAction(semiSync)); err != nil { + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return err + } + + if err := tm.fixSemiSync(tm.Tablet().Type, semiSyncAction); err != nil { return err } return tm.MysqlDaemon.StartReplication(tm.hookExtraEnv()) @@ -264,7 +290,7 @@ func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, positio waitCtx, cancel := context.WithTimeout(ctx, waitTime) defer cancel() - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -297,14 +323,12 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string } defer tm.unlock() - if setSuperReadOnly { - // Setting super_read_only off so that we can run the DDL commands - if err := tm.MysqlDaemon.SetSuperReadOnly(false); err != nil { - if strings.Contains(err.Error(), mysql.ERUnknownSystemVariable.ToString()) { - log.Warningf("server does not know about super_read_only, continuing anyway...") - } else { - return "", err - } + // Setting super_read_only `OFF` so that we can run the DDL commands + if _, err := tm.MysqlDaemon.SetSuperReadOnly(false); err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { + log.Warningf("server does not know about super_read_only, continuing anyway...") + } else { + return "", err } } @@ -320,27 +344,32 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string return "", err } + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return "", err + } + // Set the server read-write, from now on we can accept real // client writes. Note that if semi-sync replication is enabled, // we'll still need some replicas to be able to commit transactions. - if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, semiSyncAction); err != nil { return "", err } // Enforce semi-sync after changing the tablet type to PRIMARY. Otherwise, the // primary will hang while trying to create the database. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } // PopulateReparentJournal adds an entry into the reparent_journal table. func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, primaryAlias *topodatapb.TabletAlias, position string) error { log.Infof("PopulateReparentJournal: action: %v parent: %v position: %v timeCreatedNS: %d actionName: %s primaryAlias: %s", actionName, primaryAlias, position, timeCreatedNS, actionName, primaryAlias) - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -359,16 +388,21 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab } defer tm.unlock() + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return err + } + // If we were a primary type, switch our type to replica. This // is used on the old primary when using InitShardPrimary with // -force, and the new primary is different from the old primary. if tm.Tablet().Type == topodatapb.TabletType_PRIMARY { - if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_REPLICA, DBActionNone, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_REPLICA, DBActionNone, semiSyncAction); err != nil { return err } } - pos, err := mysql.DecodePosition(position) + pos, err := replication.DecodePosition(position) if err != nil { return err } @@ -384,7 +418,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab if tt == topodatapb.TabletType_PRIMARY { tt = topodatapb.TabletType_REPLICA } - if err := tm.fixSemiSync(tt, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.fixSemiSync(tt, semiSyncAction); err != nil { return err } @@ -450,12 +484,12 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // considered successful. If we are already not serving, this will be // idempotent. log.Infof("DemotePrimary disabling query service") - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), false, "demotion in progress"); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), false, "demotion in progress"); err != nil { return nil, vterrors.Wrap(err, "SetServingType(serving=false) failed") } defer func() { if finalErr != nil && revertPartialFailure && wasServing { - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), true, ""); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { log.Warningf("SetServingType(serving=true) failed during revert: %v", err) } } @@ -463,23 +497,17 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure } // Now that we know no writes are in-flight and no new writes can occur, - // set MySQL to read-only mode. If we are already read-only because of a + // set MySQL to super_read_only mode. If we are already super_read_only because of a // previous demotion, or because we are not primary anyway, this should be // idempotent. - if setSuperReadOnly { - // Setting super_read_only also sets read_only - if err := tm.MysqlDaemon.SetSuperReadOnly(true); err != nil { - if strings.Contains(err.Error(), mysql.ERUnknownSystemVariable.ToString()) { - log.Warningf("server does not know about super_read_only, continuing anyway...") - } else { - return nil, err - } - } - } else { - if err := tm.MysqlDaemon.SetReadOnly(true); err != nil { + if _, err := tm.MysqlDaemon.SetSuperReadOnly(true); err != nil { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { + log.Warningf("server does not know about super_read_only, continuing anyway...") + } else { return nil, err } } + defer func() { if finalErr != nil && revertPartialFailure && !wasReadOnly { // setting read_only OFF will also set super_read_only OFF if it was set @@ -511,7 +539,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure if err != nil { return nil, err } - return mysql.PrimaryStatusToProto(status), nil + return replication.PrimaryStatusToProto(status), nil } // UndoDemotePrimary reverts a previous call to DemotePrimary @@ -524,8 +552,13 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e } defer tm.unlock() + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return err + } + // If using semi-sync, we need to enable source-side. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return err } @@ -537,7 +570,7 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e // Update serving graph tablet := tm.Tablet() log.Infof("UndoDemotePrimary re-enabling query service") - if err := tm.QueryServiceControl.SetServingType(tablet.Type, logutil.ProtoToTime(tablet.PrimaryTermStartTime), true, ""); err != nil { + if err := tm.QueryServiceControl.SetServingType(tablet.Type, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC(), true, ""); err != nil { return vterrors.Wrap(err, "SetServingType(serving=true) failed") } return nil @@ -582,25 +615,14 @@ func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias * } defer tm.unlock() - // setReplicationSourceLocked also fixes the semi-sync. In case the tablet type is primary it assumes that it will become a replica if SetReplicationSource - // is called, so we always call fixSemiSync with a non-primary tablet type. This will always set the source side replication to false. - return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, convertBoolToSemiSyncAction(semiSync)) -} - -func (tm *TabletManager) setReplicationSourceRepairReplication(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) (err error) { - parent, err := tm.TopoServer.GetTablet(ctx, parentAlias) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) if err != nil { return err } - ctx, unlock, lockErr := tm.TopoServer.LockShard(ctx, parent.Tablet.GetKeyspace(), parent.Tablet.GetShard(), fmt.Sprintf("repairReplication to %v as parent)", topoproto.TabletAliasString(parentAlias))) - if lockErr != nil { - return lockErr - } - - defer unlock(&err) - - return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, SemiSyncActionNone) + // setReplicationSourceLocked also fixes the semi-sync. In case the tablet type is primary it assumes that it will become a replica if SetReplicationSource + // is called, so we always call fixSemiSync with a non-primary tablet type. This will always set the source side replication to false. + return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction) } func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { @@ -639,7 +661,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA shouldbeReplicating = true // Since we continue in the case of this error, make sure 'status' is // in a known, empty state. - status = mysql.ReplicationStatus{} + status = replication.ReplicationStatus{} } else if err != nil { // Abort on any other non-nil error. return err @@ -673,14 +695,12 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA } host := parent.Tablet.MysqlHostname port := parent.Tablet.MysqlPort - // We want to reset the replication parameters and set replication source again when forceStartReplication is provided - // because sometimes MySQL gets stuck due to improper initialization of master info structure or related failures and throws errors like - // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log - // These errors can only be resolved by resetting the replication parameters, otherwise START SLAVE fails. So when this RPC - // gets called from VTOrc or replication manager to fix the replication in these cases with forceStartReplication, we should also - // reset the replication parameters and set the source port information again. - if status.SourceHost != host || status.SourcePort != port || forceStartReplication { - // This handles reseting the replication parameters, changing the address and then starting the replication. + // If host is empty, then we shouldn't even attempt the reparent. That tablet has already shutdown. + if host == "" { + return vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "Shard primary has empty mysql hostname") + } + if status.SourceHost != host || status.SourcePort != port { + // This handles both changing the address and starting replication. if err := tm.MysqlDaemon.SetReplicationSource(ctx, host, port, wasReplicating, shouldbeReplicating); err != nil { if err := tm.handleRelayLogError(err); err != nil { return err @@ -708,7 +728,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA if shouldbeReplicating { log.Infof("Set up MySQL replication; should now be replicating from %s at %s", parentAlias, waitPosition) if waitPosition != "" { - pos, err := mysql.DecodePosition(waitPosition) + pos, err := replication.DecodePosition(waitPosition) if err != nil { return err } @@ -759,7 +779,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe if err != nil { return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed") } - before := mysql.ReplicationStatusToProto(rs) + before := replication.ReplicationStatusToProto(rs) if stopReplicationMode == replicationdatapb.StopReplicationMode_IOTHREADONLY { if !rs.IOHealthy() { @@ -805,7 +825,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe }, }, vterrors.Wrap(err, "acquiring replication status failed") } - after := mysql.ReplicationStatusToProto(rsAfter) + after := replication.ReplicationStatusToProto(rsAfter) rs.Position = rsAfter.Position rs.RelayLogPosition = rsAfter.RelayLogPosition @@ -840,15 +860,20 @@ func (tm *TabletManager) PromoteReplica(ctx context.Context, semiSync bool) (str return "", err } + semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + if err != nil { + return "", err + } + // If using semi-sync, we need to enable it before going read-write. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, convertBoolToSemiSyncAction(semiSync)); err != nil { + if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return "", err } if err := tm.changeTypeLocked(ctx, topodatapb.TabletType_PRIMARY, DBActionSetReadWrite, SemiSyncActionNone); err != nil { return "", err } - return mysql.EncodePosition(pos), nil + return replication.EncodePosition(pos), nil } func isPrimaryEligible(tabletType topodatapb.TabletType) bool { @@ -931,11 +956,17 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT return nil } +// handleRelayLogError resets replication of the instance. +// This is required because sometimes MySQL gets stuck due to improper initialization of +// master info structure or related failures and throws errors like +// ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log +// These errors can only be resolved by resetting the replication, otherwise START SLAVE fails. func (tm *TabletManager) handleRelayLogError(err error) error { // attempt to fix this error: // Slave failed to initialize relay log info structure from the repository (errno 1872) (sqlstate HY000) during query: START SLAVE // see https://bugs.mysql.com/bug.php?id=83713 or https://github.com/vitessio/vitess/issues/5067 - if strings.Contains(err.Error(), "Slave failed to initialize relay log info structure from the repository") { + // The same fix also works for https://github.com/vitessio/vitess/issues/10955. + if strings.Contains(err.Error(), "Slave failed to initialize relay log info structure from the repository") || strings.Contains(err.Error(), "Could not initialize master info structure") { // Stop, reset and start replication again to resolve this error if err := tm.MysqlDaemon.RestartReplication(tm.hookExtraEnv()); err != nil { return err @@ -944,26 +975,3 @@ func (tm *TabletManager) handleRelayLogError(err error) error { } return err } - -// repairReplication tries to connect this server to whoever is -// the current primary of the shard, and start replicating. -func (tm *TabletManager) repairReplication(ctx context.Context) error { - tablet := tm.Tablet() - - si, err := tm.TopoServer.GetShard(ctx, tablet.Keyspace, tablet.Shard) - if err != nil { - return err - } - if !si.HasPrimary() { - return fmt.Errorf("no primary tablet for shard %v/%v", tablet.Keyspace, tablet.Shard) - } - - if topoproto.TabletAliasEqual(si.PrimaryAlias, tablet.Alias) { - // The shard record says we are primary, but we disagree; we wouldn't - // reach this point unless we were told to check replication. - // Hopefully someone is working on fixing that, but in any case, - // we should not try to reparent to ourselves. - return fmt.Errorf("shard %v/%v record claims tablet %v is primary, but its type is %v", tablet.Keyspace, tablet.Shard, topoproto.TabletAliasString(tablet.Alias), tablet.Type) - } - return tm.setReplicationSourceRepairReplication(ctx, si.PrimaryAlias, 0, "", true) -} diff --git a/go/vt/vttablet/tabletmanager/rpc_schema.go b/go/vt/vttablet/tabletmanager/rpc_schema.go index 791ed42f994..9fe8ce27170 100644 --- a/go/vt/vttablet/tabletmanager/rpc_schema.go +++ b/go/vt/vttablet/tabletmanager/rpc_schema.go @@ -17,11 +17,11 @@ limitations under the License. package tabletmanager import ( + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vterrors" "context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo/topoproto" @@ -44,7 +44,7 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) } if waitPosition != "" { - pos, err := mysql.DecodePosition(waitPosition) + pos, err := replication.DecodePosition(waitPosition) if err != nil { return vterrors.Wrapf(err, "ReloadSchema: can't parse wait position (%q)", waitPosition) } @@ -58,6 +58,11 @@ func (tm *TabletManager) ReloadSchema(ctx context.Context, waitPosition string) return tm.QueryServiceControl.ReloadSchema(ctx) } +// ResetSequences will reset the auto-inc counters on the specified tables. +func (tm *TabletManager) ResetSequences(ctx context.Context, tables []string) error { + return tm.QueryServiceControl.SchemaEngine().ResetSequences(tables) +} + // PreflightSchema will try out the schema changes in "changes". func (tm *TabletManager) PreflightSchema(ctx context.Context, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) { if err := tm.lock(ctx); err != nil { diff --git a/go/vt/vttablet/tabletmanager/rpc_throttler.go b/go/vt/vttablet/tabletmanager/rpc_throttler.go new file mode 100644 index 00000000000..dfdc0d230fb --- /dev/null +++ b/go/vt/vttablet/tabletmanager/rpc_throttler.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +// CheckThrottler executes a throttler check +func (tm *TabletManager) CheckThrottler(ctx context.Context, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + if req.AppName == "" { + req.AppName = throttlerapp.VitessName.String() + } + flags := &throttle.CheckFlags{ + LowPriority: false, + SkipRequestHeartbeats: true, + } + checkResult := tm.QueryServiceControl.CheckThrottler(ctx, req.AppName, flags) + if checkResult == nil { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "nil checkResult") + } + resp := &tabletmanagerdatapb.CheckThrottlerResponse{ + StatusCode: int32(checkResult.StatusCode), + Value: checkResult.Value, + Threshold: checkResult.Threshold, + Message: checkResult.Message, + RecentlyChecked: checkResult.RecentlyChecked, + } + if checkResult.Error != nil { + resp.Error = checkResult.Error.Error() + } + return resp, nil +} diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index 9886a832332..bbcea8bd0d6 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -18,15 +18,317 @@ package tabletmanager import ( "context" + "strings" + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vterrors" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" +) + +const ( + // Create a new VReplication workflow record. + sqlCreateVReplicationWorkflow = "insert into %s.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values (%a, %a, '', 0, 0, %a, %a, now(), 0, %a, %a, %a, %a, %a)" + // Read a VReplication workflow. + sqlReadVReplicationWorkflow = "select id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys from %s.vreplication where workflow = %a and db_name = %a" + // Delete VReplication records for the given workflow. + sqlDeleteVReplicationWorkflow = "delete from %s.vreplication where workflow = %a and db_name = %a" + // Retrieve the current configuration values for a workflow's vreplication stream. + sqlSelectVReplicationWorkflowConfig = "select id, source, cell, tablet_types, state, message from %s.vreplication where workflow = %a" + // Update the configuration values for a workflow's vreplication stream. + sqlUpdateVReplicationWorkflowConfig = "update %s.vreplication set state = %a, source = %a, cell = %a, tablet_types = %a where id = %a" ) +func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + if req == nil || len(req.BinlogSource) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no binlog source specified") + } + res := &sqltypes.Result{} + for _, bls := range req.BinlogSource { + source, err := prototext.Marshal(bls) + if err != nil { + return nil, err + } + // Use the local cell if none are specified. + if len(req.Cells) == 0 || strings.TrimSpace(req.Cells[0]) == "" { + req.Cells = append(req.Cells, tm.Tablet().Alias.Cell) + } + wfState := binlogdatapb.VReplicationWorkflowState_Stopped.String() + tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes) + if req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = discovery.InOrderHint + tabletTypesStr + } + bindVars := map[string]*querypb.BindVariable{ + "workflow": sqltypes.StringBindVariable(req.Workflow), + "source": sqltypes.StringBindVariable(string(source)), + "cells": sqltypes.StringBindVariable(strings.Join(req.Cells, ",")), + "tabletTypes": sqltypes.StringBindVariable(tabletTypesStr), + "state": sqltypes.StringBindVariable(wfState), + "dbname": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + "workflowType": sqltypes.Int64BindVariable(int64(req.WorkflowType)), + "workflowSubType": sqltypes.Int64BindVariable(int64(req.WorkflowSubType)), + "deferSecondaryKeys": sqltypes.BoolBindVariable(req.DeferSecondaryKeys), + } + parsed := sqlparser.BuildParsedQuery(sqlCreateVReplicationWorkflow, sidecar.GetIdentifier(), + ":workflow", ":source", ":cells", ":tabletTypes", ":state", ":dbname", ":workflowType", ":workflowSubType", ":deferSecondaryKeys", + ) + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + streamres, err := tm.VREngine.Exec(stmt) + + if err != nil { + return nil, err + } + res.RowsAffected += streamres.RowsAffected + } + return &tabletmanagerdatapb.CreateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + +func (tm *TabletManager) DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) { + if req == nil || req.Workflow == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no workflow provided") + } + res := &sqltypes.Result{} + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(req.Workflow), + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + parsed := sqlparser.BuildParsedQuery(sqlDeleteVReplicationWorkflow, sidecar.GetIdentifier(), ":wf", ":db") + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + streamres, err := tm.VREngine.Exec(stmt) + + if err != nil { + return nil, err + } + res.RowsAffected += streamres.RowsAffected + + return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + +func (tm *TabletManager) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + if req == nil || req.Workflow == "" { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no workflow provided") + } + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(req.Workflow), + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + parsed := sqlparser.BuildParsedQuery(sqlReadVReplicationWorkflow, sidecar.GetIdentifier(), ":wf", ":db") + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(stmt) + if err != nil { + return nil, err + } + if res == nil || len(res.Rows) == 0 { + return nil, nil + } + rows := res.Named().Rows + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{Workflow: req.Workflow} + streams := make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, len(rows)) + + // First the things that are common to all streams. + resp.Cells = rows[0]["cell"].ToString() + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(rows[0]["tablet_types"].ToString()) + if err != nil { + return nil, vterrors.Wrap(err, "error parsing the tablet_types field from vreplication table record") + } + resp.TabletTypes = tabletTypes + resp.TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + resp.TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + resp.DbName = rows[0]["db_name"].ToString() + resp.Tags = rows[0]["tags"].ToString() + wft, err := rows[0]["workflow_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_type field from vreplication table record") + } + resp.WorkflowType = binlogdatapb.VReplicationWorkflowType(wft) + wfst, err := rows[0]["workflow_sub_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_sub_type field from vreplication table record") + } + resp.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType(wfst) + resp.DeferSecondaryKeys = rows[0]["defer_secondary_keys"].ToString() == "1" + + // Now the individual streams (there can be more than 1 with shard merges). + for i, row := range rows { + streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{} + if streams[i].Id, err = row["id"].ToInt32(); err != nil { + return nil, vterrors.Wrap(err, "error parsing id field from vreplication table record") + } + srcBytes, err := row["source"].ToBytes() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing binlog_source field from vreplication table record") + } + blspb := &binlogdatapb.BinlogSource{} + err = prototext.Unmarshal(srcBytes, blspb) + if err != nil { + return nil, vterrors.Wrap(err, "error unmarshaling binlog_source field from vreplication table record") + } + streams[i].Bls = blspb + streams[i].Pos = row["pos"].ToString() + streams[i].StopPos = row["stop_pos"].ToString() + if streams[i].MaxTps, err = row["max_tps"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_tps field from vreplication table record") + } + if streams[i].MaxReplicationLag, err = row["max_replication_lag"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_replication_lag field from vreplication table record") + } + timeUpdated, err := row["time_updated"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_updated field from vreplication table record") + } + streams[i].TimeUpdated = &vttime.Time{Seconds: timeUpdated} + txTimestamp, err := row["transaction_timestamp"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing transaction_timestamp field from vreplication table record") + } + streams[i].TransactionTimestamp = &vttime.Time{Seconds: txTimestamp} + streams[i].State = binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row["state"].ToString()]) + streams[i].Message = row["message"].ToString() + if streams[i].RowsCopied, err = row["rows_copied"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing rows_copied field from vreplication table record") + } + timeHeartbeat, err := row["time_heartbeat"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_heartbeat field from vreplication table record") + } + streams[i].TimeHeartbeat = &vttime.Time{Seconds: timeHeartbeat} + timeThrottled, err := row["time_throttled"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_throttled field from vreplication table record") + } + streams[i].TimeThrottled = &vttime.Time{Seconds: timeThrottled} + streams[i].ComponentThrottled = row["component_throttled"].ToString() + } + resp.Streams = streams + + return resp, nil +} + +// UpdateVReplicationWorkflow updates the sidecar databases's vreplication +// record for this tablet's vreplication workflow stream(s). If there +// is no stream for the given workflow on the tablet then a nil result +// is returned as this is expected e.g. on source tablets of a +// Reshard workflow (source and target are the same keyspace). The +// caller can consider this case an error if they choose to. +// Note: the VReplication engine creates a new controller for the +// workflow stream when the record is updated, so we also in effect +// restart the workflow stream via the update. +func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(req.Workflow), + } + parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.GetIdentifier(), ":wf") + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(stmt) + if err != nil { + return nil, err + } + if res == nil || len(res.Rows) == 0 { + // No streams on this tablet to update. This is + // expected e.g. on source tablets for Reshard + // workflows. If callers want to treat this + // scenario as an error they can. + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: nil}, nil + } + + row := res.Named().Row() + id := row.AsInt64("id", 0) + cells := strings.Split(row.AsString("cell", ""), ",") + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(row.AsString("tablet_types", "")) + if err != nil { + return nil, err + } + bls := &binlogdatapb.BinlogSource{} + source := row.AsBytes("source", []byte{}) + state := row.AsString("state", "") + message := row.AsString("message", "") + if req.State == binlogdatapb.VReplicationWorkflowState_Running && strings.ToUpper(message) == workflow.Frozen { + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: nil}, + vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "cannot start a workflow when it is frozen") + } + // For the string based values, we use NULL to differentiate + // from an empty string. The NULL value indicates that we + // should keep the existing value. + if !textutil.ValueIsSimulatedNull(req.Cells) { + cells = req.Cells + } + if !textutil.ValueIsSimulatedNull(req.TabletTypes) { + tabletTypes = req.TabletTypes + } + tabletTypesStr := topoproto.MakeStringTypeCSV(tabletTypes) + if inorder && req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN || + req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = discovery.InOrderHint + tabletTypesStr + } + if err = prototext.Unmarshal(source, bls); err != nil { + return nil, err + } + // If we don't want to update the existing value then pass + // the simulated NULL value of -1. + if !textutil.ValueIsSimulatedNull(req.OnDdl) { + bls.OnDdl = req.OnDdl + } + source, err = prototext.Marshal(bls) + if err != nil { + return nil, err + } + if !textutil.ValueIsSimulatedNull(req.State) { + state = binlogdatapb.VReplicationWorkflowState_name[int32(req.State)] + } + bindVars = map[string]*querypb.BindVariable{ + "st": sqltypes.StringBindVariable(state), + "sc": sqltypes.StringBindVariable(string(source)), + "cl": sqltypes.StringBindVariable(strings.Join(cells, ",")), + "tt": sqltypes.StringBindVariable(tabletTypesStr), + "id": sqltypes.Int64BindVariable(id), + } + parsed = sqlparser.BuildParsedQuery(sqlUpdateVReplicationWorkflowConfig, sidecar.GetIdentifier(), ":st", ":sc", ":cl", ":tt", ":id") + stmt, err = parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err = tm.VREngine.Exec(stmt) + + if err != nil { + return nil, err + } + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil +} + // VReplicationExec executes a vreplication command. func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { - qr, err := tm.VREngine.ExecWithDBA(query) + // Replace any provided sidecar databsae qualifiers with the correct one. + uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + qr, err := tm.VREngine.ExecWithDBA(uq) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go new file mode 100644 index 00000000000..ce7e85ac495 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -0,0 +1,1006 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + "errors" + "fmt" + "math" + "runtime/debug" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vtgate/vindexes" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + "vitess.io/vitess/go/vt/proto/vttime" +) + +const ( + insertVReplicationPrefix = "insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys)" + getWorkflow = "select id from _vt.vreplication where db_name='vt_%s' and workflow='%s'" + checkForWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'" + checkForFrozenWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1" + freezeWorkflow = "update _vt.vreplication set message = 'FROZEN' where db_name='vt_%s' and workflow='%s'" + checkForJournal = "/select val from _vt.resharding_journal where id=" + getWorkflowStatus = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type from _vt.vreplication where workflow = '%s' and db_name = 'vt_%s'" + getWorkflowState = "select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1" + getCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" + getNumCopyStateTable = "select count(distinct table_name) from _vt.copy_state where vrepl_id=1" + getLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" + getAutoIncrementStep = "select @@session.auto_increment_increment" + setSessionTZ = "set @@session.time_zone = '+00:00'" + setNames = "set names 'binary'" + getBinlogRowImage = "select @@binlog_row_image" + insertStreamsCreatedLog = "insert into _vt.vreplication_log(vrepl_id, type, state, message) values(1, 'Stream Created', '', '%s'" + getVReplicationRecord = "select * from _vt.vreplication where id = 1" + startWorkflow = "update _vt.vreplication set state='Running' where db_name='vt_%s' and workflow='%s'" + stopForCutover = "update _vt.vreplication set state='Stopped', message='stopped for cutover' where id=1" + getMaxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`" + initSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" + deleteWorkflow = "delete from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'" + updatePickedSourceTablet = `update _vt.vreplication set message='Picked source tablet: cell:\"%s\" uid:%d' where id=1` + getRowsCopied = "SELECT rows_copied FROM _vt.vreplication WHERE id=1" +) + +var ( + errShortCircuit = fmt.Errorf("short circuiting test") + defaultSchema = &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + }, + } + position = fmt.Sprintf("%s/%s", gtidFlavor, gtidPosition) +) + +// TestCreateVReplicationWorkflow tests the query generated +// from a VtctldServer MoveTablesCreate request to ensure +// that the VReplication stream(s) are created correctly. +func TestCreateVReplicationWorkflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetTabletUID := 300 + shard := "0" + wf := "testwf" + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tests := []struct { + name string + req *vtctldatapb.MoveTablesCreateRequest + schema *tabletmanagerdatapb.SchemaDefinition + query string + }{ + { + name: "defaults", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + { + name: "all values", + schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + { + Name: "wut", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + }, + }, + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + IncludeTables: []string{defaultSchema.TableDefinitions[0].Name}, + ExcludeTables: []string{"wut"}, + SourceTimeZone: "EDT", + OnDdl: binlogdatapb.OnDDLAction_EXEC.String(), + StopAfterCopy: true, + DropForeignKeys: true, + DeferSecondaryKeys: true, + AutoStart: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1)`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + } + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", + targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", + targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, "select val from _vt.resharding_journal where id=7224776740563431192", &sqltypes.Result{}) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.req, "No MoveTablesCreate request provided") + require.NotEmpty(t, tt.query, "No expected query provided") + + if tt.schema == nil { + tt.schema = defaultSchema + } + tenv.tmc.SetSchema(tt.schema) + + tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest(tt.query, nil, errShortCircuit) + _, err := ws.MoveTablesCreate(ctx, tt.req) + tenv.tmc.tablets[targetTabletUID].vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + }) + } +} + +// TestMoveTables tests the query generated from a VtctldServer +// MoveTablesCreate request to ensure that the VReplication +// stream(s) are created correctly. Followed by ensuring that +// SwitchTraffic and ReverseTraffic work as expected. +func TestMoveTables(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + sourceShard := "0" + globalKs := "global" + globalShard := "0" + wf := "testwf" + tabletTypes := []topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_REPLICA, + topodatapb.TabletType_RDONLY, + } + + tenv := newTestEnv(t, ctx, sourceKs, []string{sourceShard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, 300, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, 310, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + globalTablet := tenv.addTablet(t, 500, globalKs, globalShard) + defer tenv.deleteTablet(globalTablet.tablet) + + tenv.ts.SaveVSchema(ctx, globalKs, &vschemapb.Keyspace{ + Sharded: false, + Tables: map[string]*vschemapb.Table{ + "t1_seq": { + Type: vindexes.TypeSequence, + }, + }, + }) + tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + AutoIncrement: &vschemapb.AutoIncrement{ + Column: "id", + Sequence: "t1_seq", + }, + }, + }, + }) + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tenv.mysqld.Schema = defaultSchema + tenv.mysqld.Schema.DatabaseSchema = tenv.dbName + tenv.mysqld.FetchSuperQueryMap = make(map[string]*sqltypes.Result) + tenv.mysqld.FetchSuperQueryMap[`select character_set_name, collation_name, column_name, data_type, column_type, extra from information_schema.columns where .*`] = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "character_set_name|collation_name|column_name|data_type|column_type|extra", + "varchar|varchar|varchar|varchar|varchar|varchar", + ), + "NULL|NULL|id|bigint|bigint|", + "NULL|NULL|c2|bigint|bigint|", + ) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}", sourceKs, sourceShard) + + tenv.tmc.SetSchema(defaultSchema) + + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, checkForJournal, &sqltypes.Result{}) + + for _, ftc := range targetShards { + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflowStatus, wf, targetKs), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64", + ), + fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0", wf, bls, position, targetKs), + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, getLatestCopyState, &sqltypes.Result{}) + + ftc.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + insert := fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, ftc.tablet.Shard, tenv.cells[0], tenv.dbName) + ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil) + ftc.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(getVReplicationRecord, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("1|%s", bls), + ), nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(getRowsCopied, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "rows_copied", + "int64", + ), + "0", + ), + nil, + ) + ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), nil) + ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), nil) + ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), nil) + ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), nil) + ftc.vrdbClient.ExpectRequest(getBinlogRowImage, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@binlog_row_image", + "varchar", + ), + "FULL", + ), nil) + + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(startWorkflow, targetKs, wf), &sqltypes.Result{}) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + + tenv.tmc.setVReplicationExecResults(ftc.tablet, stopForCutover, &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(freezeWorkflow, targetKs, wf), &sqltypes.Result{}) + + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getMaxValForSequence, targetKs, "t1"), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "maxval", + "int64", + ), + fmt.Sprintf("%d", ftc.tablet.Alias.Uid), // Use the tablet's UID as the max value + ), + ) + } + + // We use the tablet's UID in the mocked results for the max value used on each target shard. + nextSeqVal := int(math.Max(float64(targetShards["-80"].tablet.Alias.Uid), float64(targetShards["80-"].tablet.Alias.Uid))) + 1 + tenv.tmc.setVReplicationExecResults(globalTablet.tablet, + sqlparser.BuildParsedQuery(initSequenceTable, sqlescape.EscapeID(fmt.Sprintf("vt_%s", globalKs)), sqlescape.EscapeID("t1_seq"), nextSeqVal, nextSeqVal, nextSeqVal).Query, + &sqltypes.Result{RowsAffected: 0}, + ) + + _, err := ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + TabletTypes: tabletTypes, + Cells: tenv.cells, + AllTables: true, + AutoStart: true, + }) + require.NoError(t, err) + + _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + EnableReverseReplication: true, + InitializeTargetSequences: true, + Direction: int32(workflow.DirectionForward), + }) + require.NoError(t, err) + + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, fmt.Sprintf(getWorkflowStatus, workflow.ReverseWorkflowName(wf), sourceKs), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type", + "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64", + ), + fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0", workflow.ReverseWorkflowName(wf), bls, position, sourceKs), + ), + ) + + _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ + Keyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + EnableReverseReplication: true, + Direction: int32(workflow.DirectionBackward), + }) + require.NoError(t, err) +} + +func TestUpdateVReplicationWorkflow(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cells := []string{"zone1"} + tabletTypes := []string{"replica"} + workflow := "testwf" + keyspace := "testks" + vreplID := 1 + tabletUID := 100 + + tenv := newTestEnv(t, ctx, keyspace, []string{shard}) + defer tenv.close() + + tablet := tenv.addTablet(t, tabletUID, keyspace, shard) + defer tenv.deleteTablet(tablet.tablet) + + parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.DefaultName, ":wf") + bindVars := map[string]*querypb.BindVariable{ + "wf": sqltypes.StringBindVariable(workflow), + } + selectQuery, err := parsed.GenerateQuery(bindVars, nil) + require.NoError(t, err) + blsStr := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"customer" filter:"select * from customer"} rules:{match:"corder" filter:"select * from corder"}}`, + keyspace, shard) + selectRes := sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|cell|tablet_types", + "int64|varchar|varchar|varchar", + ), + fmt.Sprintf("%d|%s|%s|%s", vreplID, blsStr, cells[0], tabletTypes[0]), + ) + idQuery, err := sqlparser.ParseAndBind("select id from _vt.vreplication where id = %a", + sqltypes.Int64BindVariable(int64(vreplID))) + require.NoError(t, err) + idRes := sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + fmt.Sprintf("%d", vreplID), + ) + + tests := []struct { + name string + request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest + query string + }{ + { + name: "update cells", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: []string{"zone2"}, + // TabletTypes is an empty value, so the current value should be cleared + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '' where id in (%d)`, + keyspace, shard, "zone2", vreplID), + }, + { + name: "update cells, NULL tablet_types", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: []string{"zone3"}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, // So keep the current value of replica + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + keyspace, shard, "zone3", tabletTypes[0], vreplID), + }, + { + name: "update tablet_types", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + TabletSelectionPreference: tabletmanagerdatapb.TabletSelectionPreference_INORDER, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA}, + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '', tablet_types = '%s' where id in (%d)`, + keyspace, shard, "in_order:rdonly,replica", vreplID), + }, + { + name: "update tablet_types, NULL cells", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: textutil.SimulatedNullStringSlice, // So keep the current value of zone1 + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + keyspace, shard, cells[0], "rdonly", vreplID), + }, + { + name: "update on_ddl", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + OnDdl: binlogdatapb.OnDDLAction_EXEC, + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '', tablet_types = '' where id in (%d)`, + keyspace, shard, binlogdatapb.OnDDLAction_EXEC.String(), vreplID), + }, + { + name: "update cell,tablet_types,on_ddl", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: workflow, + Cells: []string{"zone1", "zone2", "zone3"}, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_PRIMARY}, + OnDdl: binlogdatapb.OnDDLAction_EXEC_IGNORE, + }, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Stopped', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '%s', tablet_types = '%s' where id in (%d)`, + keyspace, shard, binlogdatapb.OnDDLAction_EXEC_IGNORE.String(), "zone1,zone2,zone3", "rdonly,replica,primary", vreplID), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v", err) + } + }() + + require.NotNil(t, tt.request, "No request provided") + require.NotEqual(t, "", tt.query, "No expected query provided") + + tt.request.State = binlogdatapb.VReplicationWorkflowState_Stopped + + // These are the same for each RPC call. + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(selectQuery, selectRes, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(idQuery, idRes, nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(tt.query, &sqltypes.Result{RowsAffected: 1}, errShortCircuit) + _, err = tenv.tmc.tablets[tabletUID].tm.UpdateVReplicationWorkflow(ctx, tt.request) + tenv.tmc.tablets[tabletUID].vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + }) + } +} + +// TestSourceShardSelection tests the RPC calls made by VtctldServer to tablet +// managers include the correct set of BLS settings. +// +// errShortCircuit is intentionally injected into the MoveTables workflow to +// short-circuit the workflow after we've validated everything we wanted to in +// the test. +func TestSourceShardSelection(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sourceKs := "sourceks" + sourceShard0 := "-55" + sourceShard1 := "55-aa" + sourceShard2 := "aa-" + sourceTabletUID0 := 200 + sourceTabletUID1 := 201 + sourceTabletUID2 := 202 + + targetKs := "targetks" + targetShard0 := "-80" + targetShard1 := "80-" + targetTabletUID0 := 300 + targetTabletUID1 := 301 + + wf := "testwf" + + tenv := newTestEnv(t, ctx, sourceKs, []string{sourceShard0, sourceShard1, sourceShard2}) + defer tenv.close() + + sourceTablets := map[int]*fakeTabletConn{ + sourceTabletUID0: tenv.addTablet(t, sourceTabletUID0, sourceKs, sourceShard0), + sourceTabletUID1: tenv.addTablet(t, sourceTabletUID1, sourceKs, sourceShard1), + sourceTabletUID2: tenv.addTablet(t, sourceTabletUID2, sourceKs, sourceShard2), + } + for _, st := range sourceTablets { + defer tenv.deleteTablet(st.tablet) + } + + targetTablets := map[int]*fakeTabletConn{ + targetTabletUID0: tenv.addTablet(t, targetTabletUID0, targetKs, targetShard0), + targetTabletUID1: tenv.addTablet(t, targetTabletUID1, targetKs, targetShard1), + } + for _, tt := range targetTablets { + defer tenv.deleteTablet(tt.tablet) + } + + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }) + tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }) + + tests := []struct { + name string + req *vtctldatapb.MoveTablesCreateRequest + schema *tabletmanagerdatapb.SchemaDefinition + vschema *vschemapb.Keyspace + streams map[int][]string + }{ + { + name: "same primary vindexes, use intersecting source shards", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + AutoStart: false, + }, + streams: map[int][]string{ + targetTabletUID0: { + sourceShard0, + sourceShard1, + }, + targetTabletUID1: { + sourceShard1, + sourceShard2, + }, + }, + }, + { + name: "different primary vindexes, use all source shards", + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + AutoStart: false, + }, + vschema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "id", + Name: "hash", + }}, + }, + }, + }, + streams: map[int][]string{ + targetTabletUID0: { + sourceShard0, + sourceShard1, + sourceShard2, + }, + targetTabletUID1: { + sourceShard0, + sourceShard1, + sourceShard2, + }, + }, + }, + } + + for _, tt := range targetTablets { + tenv.tmc.setVReplicationExecResults(tt.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", + targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(tt.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", + targetKs), &sqltypes.Result{}) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.req, "No MoveTablesCreate request provided") + require.NotEmpty(t, tt.streams, "No expected streams provided") + + if tt.schema == nil { + tt.schema = defaultSchema + } + tenv.tmc.SetSchema(tt.schema) + + if tt.vschema != nil { + tenv.ts.SaveVSchema(ctx, targetKs, tt.vschema) + } + + for uid, streams := range tt.streams { + tt := targetTablets[uid] + for i, sourceShard := range streams { + tt.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + var err error + if i == len(streams)-1 { + // errShortCircuit is intentionally injected into the MoveTables + // workflow to short-circuit the workflow after we've validated + // everything we wanted to in the test. + err = errShortCircuit + } + tt.vrdbClient.ExpectRequest( + fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, tt.tablet.Shard, tenv.cells[0], tenv.dbName), + &sqltypes.Result{InsertID: uint64(i + 1)}, + err, + ) + if errors.Is(err, errShortCircuit) { + break + } + tt.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + tt.vrdbClient.ExpectRequest( + fmt.Sprintf("select * from _vt.vreplication where id = %d", uint64(i+1)), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|state", + "int64|varchar|varchar", + ), + fmt.Sprintf("%d|%s|Stopped", uint64(i+1), fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"t1" filter:"select * from t1 where in_keyrange(id, '%s.hash', '%s')"}}`, sourceKs, sourceShard, targetKs, tt.tablet.Shard)), + ), + nil, + ) + } + } + + _, err := ws.MoveTablesCreate(ctx, tt.req) + for _, tt := range targetTablets { + tt.vrdbClient.Wait() + } + // errShortCircuit is intentionally injected into the MoveTables + // workflow to short-circuit the workflow after we've validated + // everything we wanted to in the test. + require.ErrorContains(t, err, fmt.Sprintf("%s\n%s", errShortCircuit.Error(), errShortCircuit.Error())) + }) + } +} + +// TestFailedMoveTablesCreateCleanup tests that the workflow +// and its artifacts are cleaned up when the workflow creation +// fails -- specifically after the point where we have created +// the workflow streams. +func TestFailedMoveTablesCreateCleanup(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + shard := "0" + targetTabletUID := 300 + targetKs := "targetks" + wf := "testwf" + table := defaultSchema.TableDefinitions[0].Name + invalidTimeZone := "NOPE" + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}}", + sourceKs, shard, table, table) + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + ws := workflow.NewServer(tenv.ts, tenv.tmc) + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + tenv.mysqld.Schema = defaultSchema + tenv.mysqld.Schema.DatabaseSchema = tenv.dbName + tenv.mysqld.FetchSuperQueryMap = make(map[string]*sqltypes.Result) + tenv.mysqld.FetchSuperQueryMap[`select character_set_name, collation_name, column_name, data_type, column_type, extra from information_schema.columns where .*`] = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "character_set_name|collation_name|column_name|data_type|column_type|extra", + "varchar|varchar|varchar|varchar|varchar|varchar", + ), + "NULL|NULL|id|bigint|bigint|", + "NULL|NULL|c2|bigint|bigint|", + ) + + // Let's be sure that the routing rules are empty to start. + err := topotools.SaveRoutingRules(ctx, tenv.ts, nil) + require.NoError(t, err, "failed to save routing rules") + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + "1", + ), + ) + targetTablet.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest( + fmt.Sprintf("%s %s", + insertVReplicationPrefix, + fmt.Sprintf(`values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}} source_time_zone:\"%s\" target_time_zone:\"UTC\"', '', 0, 0, '%s', 'primary', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + wf, sourceKs, shard, table, table, invalidTimeZone, strings.Join(tenv.cells, ","), tenv.dbName), + ), + &sqltypes.Result{ + RowsAffected: 1, + InsertID: 1, + }, + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(getVReplicationRecord, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("1|%s", bls), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), + &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(getRowsCopied, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "rows_copied", + "int64", + ), + "0", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getWorkflowState, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getWorkflowState, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "1", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(getBinlogRowImage, + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@binlog_row_image", + "varchar", + ), + "FULL", + ), + nil, + ) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) + + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, + fmt.Sprintf("select convert_tz('2006-01-02 15:04:05', '%s', 'UTC')", invalidTimeZone), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + fmt.Sprintf("convert_tz('2006-01-02 15:04:05', '%s', 'UTC')", invalidTimeZone), + "datetime", + ), + "NULL", + ), + ) + + // We expect the workflow creation to fail due to the invalid time + // zone and thus the workflow iteslf to be cleaned up. + tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, + fmt.Sprintf(deleteWorkflow, sourceKs, workflow.ReverseWorkflowName(wf)), + &sqltypes.Result{RowsAffected: 1}, + ) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, + fmt.Sprintf(deleteWorkflow, targetKs, wf), + &sqltypes.Result{RowsAffected: 1}, + ) + + // Save the current target vschema. + vs, err := tenv.ts.GetVSchema(ctx, targetKs) + require.NoError(t, err, "failed to get target vschema") + + _, err = ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Cells: tenv.cells, + TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + IncludeTables: []string{table}, + SourceTimeZone: invalidTimeZone, + }) + require.ErrorContains(t, err, fmt.Sprintf("unable to perform time_zone conversions from %s to UTC", invalidTimeZone)) + + // Check that there are no orphaned routing rules. + rules, err := topotools.GetRoutingRules(ctx, tenv.ts) + require.NoError(t, err, "failed to get routing rules") + require.Equal(t, 0, len(rules), "expected no routing rules to be present") + + // Check that our vschema changes were also rolled back. + vs2, err := tenv.ts.GetVSchema(ctx, targetKs) + require.NoError(t, err, "failed to get target vschema") + require.Equal(t, vs, vs2, "expected vschema to be unchanged") +} diff --git a/go/vt/vttablet/tabletmanager/shard_sync.go b/go/vt/vttablet/tabletmanager/shard_sync.go index 7dc8710758e..ab995ec14b1 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync.go +++ b/go/vt/vttablet/tabletmanager/shard_sync.go @@ -22,8 +22,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -114,7 +114,7 @@ func (tm *TabletManager) shardSyncLoop(ctx context.Context, notifyChan <-chan st } // If we think we're primary, check if we need to update the shard record. // Fetch the start time from the record we just got, because the tm's tablet can change. - primaryAlias, shouldDemote, err := syncShardPrimary(ctx, tm.TopoServer, tablet, logutil.ProtoToTime(tablet.PrimaryTermStartTime)) + primaryAlias, shouldDemote, err := syncShardPrimary(ctx, tm.TopoServer, tablet, protoutil.TimeFromProto(tablet.PrimaryTermStartTime).UTC()) if err != nil { log.Errorf("Failed to sync shard record: %v", err) // Start retry timer and go back to sleep. @@ -191,7 +191,7 @@ func syncShardPrimary(ctx context.Context, ts *topo.Server, tablet *topodatapb.T aliasStr := topoproto.TabletAliasString(tablet.Alias) log.Infof("Updating shard record: primary_alias=%v, primary_term_start_time=%v", aliasStr, PrimaryTermStartTime) si.PrimaryAlias = tablet.Alias - si.PrimaryTermStartTime = logutil.TimeToProto(PrimaryTermStartTime) + si.PrimaryTermStartTime = protoutil.TimeToProto(PrimaryTermStartTime) return nil }) if err != nil { diff --git a/go/vt/vttablet/tabletmanager/shard_sync_test.go b/go/vt/vttablet/tabletmanager/shard_sync_test.go index 83a7cede2e1..24078efa977 100644 --- a/go/vt/vttablet/tabletmanager/shard_sync_test.go +++ b/go/vt/vttablet/tabletmanager/shard_sync_test.go @@ -44,8 +44,9 @@ const ( ) func TestShardSync(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 100, keyspace, shard) defer tm.Stop() diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index c4e1c425667..14da1f9483d 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -46,9 +46,11 @@ import ( "github.com/spf13/pflag" "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/binlog" @@ -96,7 +98,6 @@ func registerInitFlags(fs *pflag.FlagSet) { fs.StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_") fs.StringVar(&skipBuildInfoTags, "vttablet_skip_buildinfo_tags", skipBuildInfoTags, "comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'.") fs.Var(&initTags, "init_tags", "(init parameter) comma separated list of key:value pairs used to tag the tablet") - fs.BoolVar(&initPopulateMetadata, "init_populate_metadata", initPopulateMetadata, "(init parameter) populate metadata tables even if restore_from_backup is disabled. If restore_from_backup is enabled, metadata tables are always populated regardless of this flag.") fs.MarkDeprecated("init_populate_metadata", "this flag is no longer being used and will be removed in future versions") fs.DurationVar(&initTimeout, "init_timeout", initTimeout, "(init parameter) timeout to use for the init phase.") @@ -201,7 +202,7 @@ type TabletManager struct { } // BuildTabletFromInput builds a tablet record from input parameters. -func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, dbServerVersion string, db *dbconfigs.DBConfigs) (*topodatapb.Tablet, error) { +func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, db *dbconfigs.DBConfigs) (*topodatapb.Tablet, error) { hostname := tabletHostname if hostname == "" { var err error @@ -262,7 +263,6 @@ func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, d Type: tabletType, DbNameOverride: initDbNameOverride, Tags: mergeTags(buildTags, initTags), - DbServerVersion: dbServerVersion, DefaultConnCollation: uint32(charset), }, nil } @@ -453,6 +453,10 @@ func (tm *TabletManager) Stop() { tm.stopShardSync() tm.stopRebuildKeyspace() + if tm.QueryServiceControl != nil { + tm.QueryServiceControl.Stats().Stop() + } + if tm.UpdateStream != nil { tm.UpdateStream.Disable() } @@ -486,6 +490,41 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn }); err != nil { return nil, vterrors.Wrap(err, "createKeyspaceShard: cannot GetOrCreateShard shard") } + + // Ensure that this tablet comes up with the sidecar database + // name that is set for the keyspace. + setSidecarDBName := func() error { + ks, err := tm.TopoServer.GetKeyspace(ctx, tablet.Keyspace) + if err != nil { + return vterrors.Wrap(err, "createKeyspaceShard: cannot GetOrCreateShard shard") + } + // If the keyspace exists but this is the first tablet added, then + // update the keyspace record to the default. + if ks.SidecarDbName == "" { + ks.SidecarDbName = sidecar.DefaultName + getlockctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) + defer cancel() + lockctx, unlock, lockErr := tm.TopoServer.LockKeyspace(getlockctx, tablet.Keyspace, "Setting sidecar database name") + if lockErr != nil { + return vterrors.Wrap(lockErr, "createKeyspaceShard: cannot GetOrCreateShard shard") + } + err = tm.TopoServer.UpdateKeyspace(lockctx, ks) + unlock(&lockErr) + if err != nil { + return vterrors.Wrap(err, "createKeyspaceShard: cannot GetOrCreateShard shard") + } + if lockErr != nil { + return vterrors.Wrap(lockErr, "createKeyspaceShard: cannot GetOrCreateShard shard") + } + } + // Have the tablet use the sidecar database that's set for the keyspace. + sidecar.SetName(ks.SidecarDbName) + return nil + } + if err := tm.withRetry(ctx, "setting sidecar database name", setSidecarDBName); err != nil { + return nil, err + } + tm.tmState.RefreshFromTopoInfo(ctx, shardInfo, nil) // Rebuild keyspace if this the first tablet in this keyspace/cell @@ -595,7 +634,7 @@ func (tm *TabletManager) checkPrimaryShip(ctx context.Context, si *topo.ShardInf // Update the primary term start time (current value is 0) because we // assume that we are actually the PRIMARY and in case of a tiebreak, // vtgate should prefer us. - tablet.PrimaryTermStartTime = logutil.TimeToProto(time.Now()) + tablet.PrimaryTermStartTime = protoutil.TimeToProto(time.Now()) }) case err == nil: if oldTablet.Type == topodatapb.TabletType_PRIMARY { @@ -676,7 +715,7 @@ func (tm *TabletManager) findMysqlPort(retryInterval time.Duration) { for { time.Sleep(retryInterval) mport, err := tm.MysqlDaemon.GetMysqlPort() - if err != nil { + if err != nil || mport == 0 { continue } log.Infof("Identified mysql port: %v", mport) @@ -710,8 +749,10 @@ func (tm *TabletManager) initTablet(ctx context.Context) error { // instance of a startup timeout). Upon running this code // again, we want to fix ShardReplication. if updateErr := topo.UpdateTabletReplicationData(ctx, tm.TopoServer, tablet); updateErr != nil { + log.Errorf("UpdateTabletReplicationData failed for tablet %v: %v", topoproto.TabletAliasString(tablet.Alias), updateErr) return vterrors.Wrap(updateErr, "UpdateTabletReplicationData failed") } + log.Infof("Successfully updated tablet replication data for alias: %v", topoproto.TabletAliasString(tablet.Alias)) // Then overwrite everything, ignoring version mismatch. if err := tm.TopoServer.UpdateTablet(ctx, topo.NewTabletInfo(tablet, nil)); err != nil { @@ -872,8 +913,15 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t } // If using semi-sync, we need to enable it before connecting to primary. // We should set the correct type, since it is used in replica semi-sync + tablet.Type = tabletType - if err := tm.fixSemiSync(tabletType, convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet))); err != nil { + + semiSyncAction, err := tm.convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) + if err != nil { + return nil, err + } + + if err := tm.fixSemiSync(tabletType, semiSyncAction); err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index 09d576442f0..148042bd6b1 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" @@ -43,12 +44,6 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) -var ( - dbServerVersion = "8.0.0" - charsetName = "utf8mb4" - dbsvCollID = collations.NewEnvironment(dbServerVersion).DefaultCollationForCharset(charsetName).ID() -) - func TestStartBuildTabletFromInput(t *testing.T) { alias := &topodatapb.TabletAlias{ Cell: "cell", @@ -76,17 +71,16 @@ func TestStartBuildTabletFromInput(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: map[string]string{}, DbNameOverride: "aa", - DbServerVersion: dbServerVersion, - DefaultConnCollation: uint32(dbsvCollID), + DefaultConnCollation: uint32(collations.Default()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) require.NoError(t, err) // Hostname should be resolved. assert.Equal(t, wantTablet, gotTablet) tabletHostname = "" - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) require.NoError(t, err) assert.NotEqual(t, "", gotTablet.Hostname) @@ -98,7 +92,7 @@ func TestStartBuildTabletFromInput(t *testing.T) { Start: []byte(""), End: []byte("\xc0"), } - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) require.NoError(t, err) // KeyRange check is explicit because the next comparison doesn't // show the diff well enough. @@ -108,25 +102,25 @@ func TestStartBuildTabletFromInput(t *testing.T) { // Invalid inputs. initKeyspace = "" initShard = "0" - _, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initKeyspace = "test_keyspace" initShard = "" - _, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initShard = "x-y" - _, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil) assert.Contains(t, err.Error(), "cannot validate shard name") initShard = "0" initTabletType = "bad" - _, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil) assert.Contains(t, err.Error(), "unknown TabletType bad") initTabletType = "primary" - _, err = BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil) assert.Contains(t, err.Error(), "invalid init_tablet_type PRIMARY") } @@ -159,11 +153,10 @@ func TestBuildTabletFromInputWithBuildTags(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: servenv.AppVersion.ToStringMap(), DbNameOverride: "aa", - DbServerVersion: dbServerVersion, - DefaultConnCollation: uint32(dbsvCollID), + DefaultConnCollation: uint32(collations.Default()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, dbServerVersion, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) require.NoError(t, err) assert.Equal(t, wantTablet, gotTablet) } @@ -172,10 +165,11 @@ func TestStartCreateKeyspaceShard(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() statsTabletTypeCount.ResetAll() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -186,7 +180,7 @@ func TestStartCreateKeyspaceShard(t *testing.T) { _, err := ts.GetShard(ctx, "ks", "0") require.NoError(t, err) - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") srvVSchema, err := ts.GetSrvVSchema(context.Background(), cell) require.NoError(t, err) @@ -200,7 +194,7 @@ func TestStartCreateKeyspaceShard(t *testing.T) { defer tm.Stop() _, err = ts.GetShard(ctx, "ks1", "0") require.NoError(t, err) - ensureSrvKeyspace(t, ts, cell, "ks1") + ensureSrvKeyspace(t, ctx, ts, cell, "ks1") srvVSchema, err = ts.GetSrvVSchema(context.Background(), cell) require.NoError(t, err) assert.Equal(t, wantVSchema, srvVSchema.Keyspaces["ks1"]) @@ -249,16 +243,17 @@ func TestStartCreateKeyspaceShard(t *testing.T) { tm2 := newTestTM(t, ts, 6, "ks4", "80-") defer tm2.Stop() // Now that we've started the tablet for the other shard, srvKeyspace will succeed. - ensureSrvKeyspace(t, ts, cell, "ks4") + ensureSrvKeyspace(t, ctx, ts, cell, "ks4") } func TestCheckPrimaryShip(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) alias := &topodatapb.TabletAlias{ Cell: "cell1", Uid: 1, @@ -268,7 +263,7 @@ func TestCheckPrimaryShip(t *testing.T) { // This will create the respective topology records. tm := newTestTM(t, ts, 1, "ks", "0") tablet := tm.Tablet() - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") ti, err := ts.GetTablet(ctx, alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) @@ -281,7 +276,7 @@ func TestCheckPrimaryShip(t *testing.T) { now := time.Now() _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = alias - si.PrimaryTermStartTime = logutil.TimeToProto(now) + si.PrimaryTermStartTime = protoutil.TimeToProto(now) // Reassign to now for easier comparison. now = si.GetPrimaryTermStartTime() return nil @@ -354,7 +349,7 @@ func TestCheckPrimaryShip(t *testing.T) { require.NoError(t, err) _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = otherAlias - si.PrimaryTermStartTime = logutil.TimeToProto(ter1.Add(-10 * time.Second)) + si.PrimaryTermStartTime = protoutil.TimeToProto(ter1.Add(-10 * time.Second)) return nil }) require.NoError(t, err) @@ -371,7 +366,7 @@ func TestCheckPrimaryShip(t *testing.T) { // timestamp, we remain replica. _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = otherAlias - si.PrimaryTermStartTime = logutil.TimeToProto(ter4.Add(10 * time.Second)) + si.PrimaryTermStartTime = protoutil.TimeToProto(ter4.Add(10 * time.Second)) return nil }) require.NoError(t, err) @@ -382,7 +377,6 @@ func TestCheckPrimaryShip(t *testing.T) { fakeMysql.SetReplicationSourceInputs = append(fakeMysql.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", otherTablet.MysqlHostname, otherTablet.MysqlPort)) fakeMysql.ExpectedExecuteSuperQueryList = []string{ "STOP SLAVE", - "RESET SLAVE ALL", "FAKE SET MASTER", "START SLAVE", } @@ -397,9 +391,10 @@ func TestCheckPrimaryShip(t *testing.T) { } func TestStartCheckMysql(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tablet := newTestTablet(t, 1, "ks", "0") cp := mysql.ConnParams{ Host: "foo", @@ -422,13 +417,15 @@ func TestStartCheckMysql(t *testing.T) { assert.Equal(t, "foo", ti.MysqlHostname) } +// TestStartFindMysqlPort tests the functionality of findMySQLPort on tablet startup func TestStartFindMysqlPort(t *testing.T) { defer func(saved time.Duration) { mysqlPortRetryInterval = saved }(mysqlPortRetryInterval) - mysqlPortRetryInterval = 1 * time.Millisecond + mysqlPortRetryInterval = 50 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) tablet := newTestTablet(t, 1, "ks", "0") fmd := newTestMysqlDaemon(t, -1) tm := &TabletManager{ @@ -446,23 +443,32 @@ func TestStartFindMysqlPort(t *testing.T) { require.NoError(t, err) assert.Equal(t, int32(0), ti.MysqlPort) - fmd.MysqlPort.Store(3306) + go func() { + // We want to simulate the mysql daemon returning 0 for the port + // for some time before returning the correct value. + // We expect the vttablet to ignore the 0 value and eventually find the 3306 value. + time.Sleep(200 * time.Millisecond) + fmd.MysqlPort.Store(0) + time.Sleep(200 * time.Millisecond) + fmd.MysqlPort.Store(3306) + }() for i := 0; i < 10; i++ { ti, err := ts.GetTablet(ctx, tm.tabletAlias) require.NoError(t, err) if ti.MysqlPort == 3306 { return } - time.Sleep(5 * time.Millisecond) + time.Sleep(500 * time.Millisecond) } - assert.Fail(t, "mysql port was not updated") + assert.Fail(t, "mysql port was not updated.", "Final value - %v", ti.MysqlPort) } // Init tablet fixes replication data when safe func TestStartFixesReplicationData(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell, "cell2") + ts := memorytopo.NewServer(ctx, cell, "cell2") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() tabletAlias := tm.tabletAlias @@ -493,8 +499,9 @@ func TestStartFixesReplicationData(t *testing.T) { // to be created due to a NodeExists error. During this particular error we were not doing // the sanity checks that the provided tablet was the same in the topo. func TestStartDoesNotUpdateReplicationDataForTabletInWrongShard(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1", "cell2") tm := newTestTM(t, ts, 1, "ks", "0") tm.Stop() @@ -516,9 +523,10 @@ func TestCheckTabletTypeResets(t *testing.T) { defer func(saved time.Duration) { rebuildKeyspaceRetryInterval = saved }(rebuildKeyspaceRetryInterval) rebuildKeyspaceRetryInterval = 10 * time.Millisecond - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() cell := "cell1" - ts := memorytopo.NewServer(cell) + ts := memorytopo.NewServer(ctx, cell) alias := &topodatapb.TabletAlias{ Cell: "cell1", Uid: 1, @@ -528,7 +536,7 @@ func TestCheckTabletTypeResets(t *testing.T) { // This will create the respective topology records. tm := newTestTM(t, ts, 1, "ks", "0") tablet := tm.Tablet() - ensureSrvKeyspace(t, ts, cell, "ks") + ensureSrvKeyspace(t, ctx, ts, cell, "ks") ti, err := ts.GetTablet(ctx, alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_REPLICA, ti.Type) @@ -555,7 +563,7 @@ func TestCheckTabletTypeResets(t *testing.T) { now := time.Now() _, err = ts.UpdateShardFields(ctx, "ks", "0", func(si *topo.ShardInfo) error { si.PrimaryAlias = alias - si.PrimaryTermStartTime = logutil.TimeToProto(now) + si.PrimaryTermStartTime = protoutil.TimeToProto(now) // Reassign to now for easier comparison. now = si.GetPrimaryTermStartTime() return nil @@ -711,11 +719,11 @@ func newTestTablet(t *testing.T, uid int, keyspace, shard string) *topodatapb.Ta } } -func ensureSrvKeyspace(t *testing.T, ts *topo.Server, cell, keyspace string) { +func ensureSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server, cell, keyspace string) { t.Helper() found := false for i := 0; i < 10; i++ { - _, err := ts.GetSrvKeyspace(context.Background(), cell, "ks") + _, err := ts.GetSrvKeyspace(ctx, cell, keyspace) if err == nil { found = true break diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index d7124130174..df814ba5bee 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -27,10 +27,10 @@ import ( "github.com/spf13/pflag" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -89,7 +89,7 @@ func newTMState(tm *TabletManager, tablet *topodatapb.Tablet) *tmState { return &tmState{ tm: tm, displayState: displayState{ - tablet: proto.Clone(tablet).(*topodatapb.Tablet), + tablet: tablet.CloneVT(), }, tablet: tablet, ctx: ctx, @@ -186,7 +186,7 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T log.Infof("Changing Tablet Type: %v for %s", tabletType, ts.tablet.Alias.String()) if tabletType == topodatapb.TabletType_PRIMARY { - PrimaryTermStartTime := logutil.TimeToProto(time.Now()) + PrimaryTermStartTime := protoutil.TimeToProto(time.Now()) // Update the tablet record first. _, err := topotools.ChangeType(ctx, ts.tm.TopoServer, ts.tm.tabletAlias, tabletType, PrimaryTermStartTime) @@ -264,7 +264,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { return nil } - terTime := logutil.ProtoToTime(ts.tablet.PrimaryTermStartTime) + ptsTime := protoutil.TimeFromProto(ts.tablet.PrimaryTermStartTime).UTC() // Disable TabletServer first so the nonserving state gets advertised // before other services are shutdown. @@ -277,7 +277,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // always return error from 'SetServingType' and 'applyDenyList' to our client. It is up to them to handle it accordingly. // UpdateLock is called from 'ChangeTabletType', 'Open' and 'RefreshFromTopoInfo'. For 'Open' and 'RefreshFromTopoInfo' we don't need // to propagate error to client hence no changes there but we will propagate error from 'ChangeTabletType' to client. - if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, terTime, false, reason); err != nil { + if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, false, reason); err != nil { errStr := fmt.Sprintf("SetServingType(serving=false) failed: %v", err) log.Errorf(errStr) // No need to short circuit. Apply all steps and return error in the end. @@ -326,7 +326,7 @@ func (ts *tmState) updateLocked(ctx context.Context) error { // Open TabletServer last so that it advertises serving after all other services are up. if reason == "" { - if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, terTime, true, ""); err != nil { + if err := ts.tm.QueryServiceControl.SetServingType(ts.tablet.Type, ptsTime, true, ""); err != nil { errStr := fmt.Sprintf("Cannot start query service: %v", err) log.Errorf(errStr) returnErr = vterrors.Wrapf(err, errStr) @@ -459,15 +459,14 @@ type displayState struct { func (ts *tmState) publishForDisplay() { ts.displayState.mu.Lock() defer ts.displayState.mu.Unlock() - - ts.displayState.tablet = proto.Clone(ts.tablet).(*topodatapb.Tablet) + ts.displayState.tablet = ts.tablet.CloneVT() ts.displayState.deniedTables = ts.deniedTables[ts.tablet.Type] } func (ts *tmState) Tablet() *topodatapb.Tablet { ts.displayState.mu.Lock() defer ts.displayState.mu.Unlock() - return proto.Clone(ts.displayState.tablet).(*topodatapb.Tablet) + return ts.displayState.tablet.CloneVT() } func (ts *tmState) DeniedTables() []string { diff --git a/go/vt/vttablet/tabletmanager/tm_state_test.go b/go/vt/vttablet/tabletmanager/tm_state_test.go index 537580d4853..8bd98edefff 100644 --- a/go/vt/vttablet/tabletmanager/tm_state_test.go +++ b/go/vt/vttablet/tabletmanager/tm_state_test.go @@ -42,7 +42,10 @@ import ( ) func TestStateOpenClose(t *testing.T) { - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") // Re-Open should be a no-op @@ -63,8 +66,9 @@ func TestStateOpenClose(t *testing.T) { } func TestStateRefreshFromTopo(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -73,8 +77,9 @@ func TestStateRefreshFromTopo(t *testing.T) { } func TestStateResharding(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -100,8 +105,9 @@ func TestStateResharding(t *testing.T) { } func TestStateDenyList(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -131,8 +137,9 @@ func TestStateDenyList(t *testing.T) { } func TestStateTabletControls(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -159,8 +166,9 @@ func TestStateTabletControls(t *testing.T) { } func TestStateIsShardServingisInSrvKeyspace(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -330,8 +338,9 @@ func TestStateIsShardServingisInSrvKeyspace(t *testing.T) { } func TestStateNonServing(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 1, "ks", "0") defer tm.Stop() @@ -346,8 +355,9 @@ func TestStateNonServing(t *testing.T) { } func TestStateChangeTabletType(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() @@ -387,8 +397,9 @@ func TestStateChangeTabletType(t *testing.T) { the new table type */ func TestStateChangeTabletTypeWithFailure(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") statsTabletTypeCount.ResetAll() // create TM with replica and put a hook to return error during SetServingType tm := newTestTM(t, ts, 2, "ks", "0") @@ -472,7 +483,7 @@ func TestChangeTypeErrorWhileWritingToTopo(t *testing.T) { factory := faketopo.NewFakeTopoFactory() // add cell1 to the factory. This returns a fake connection which we will use to set the get and update errors as we require. fakeConn := factory.AddCell("cell1") - ts := faketopo.NewFakeTopoServer(factory) + ts := faketopo.NewFakeTopoServer(context.TODO(), factory) statsTabletTypeCount.ResetAll() tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() @@ -519,8 +530,9 @@ func TestPublishStateNew(t *testing.T) { // we can't do using memorytopo, but we do test the retry // code path. - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 42, "ks", "0") ttablet, err := tm.TopoServer.GetTablet(ctx, tm.tabletAlias) require.NoError(t, err) @@ -565,8 +577,9 @@ func TestPublishStateNew(t *testing.T) { } func TestPublishDeleted(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "cell1") tm := newTestTM(t, ts, 2, "ks", "0") defer tm.Stop() diff --git a/go/vt/vttablet/tabletmanager/vdiff/action.go b/go/vt/vttablet/tabletmanager/vdiff/action.go index 7a18015fc24..546b4e200f6 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action.go @@ -20,9 +20,12 @@ import ( "context" "encoding/json" "fmt" + "sort" + "strings" "github.com/google/uuid" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -97,7 +100,10 @@ func (vde *Engine) getVDiffSummary(vdiffID int64, dbClient binlogplayer.DBClient var qr *sqltypes.Result var err error - query := fmt.Sprintf(sqlVDiffSummary, vdiffID) + query, err := sqlparser.ParseAndBind(sqlVDiffSummary, sqltypes.Int64BindVariable(vdiffID)) + if err != nil { + return nil, err + } if qr, err = dbClient.ExecuteFetch(query, -1); err != nil { return nil, err } @@ -108,6 +114,9 @@ func (vde *Engine) getVDiffSummary(vdiffID int64, dbClient binlogplayer.DBClient // Validate vdiff options. Also setup defaults where applicable. func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tabletmanagerdatapb.VDiffOptions, error) { // Assign defaults to sourceCell and targetCell if not specified. + if options == nil { + options = &tabletmanagerdatapb.VDiffOptions{} + } sourceCell := options.PickerOptions.SourceCell targetCell := options.PickerOptions.TargetCell var defaultCell string @@ -118,10 +127,10 @@ func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tab return nil, err } } - if sourceCell == "" { + if sourceCell == "" { // Default is all cells sourceCell = defaultCell } - if targetCell == "" { + if targetCell == "" { // Default is all cells targetCell = defaultCell } options.PickerOptions.SourceCell = sourceCell @@ -130,6 +139,8 @@ func (vde *Engine) fixupOptions(options *tabletmanagerdatapb.VDiffOptions) (*tab return options, nil } +// getDefaultCell returns all of the cells in the topo as a comma +// separated string as the default value is all available cells. func (vde *Engine) getDefaultCell() (string, error) { cells, err := vde.ts.GetCellInfoNames(vde.ctx) if err != nil { @@ -139,15 +150,18 @@ func (vde *Engine) getDefaultCell() (string, error) { // Unreachable return "", fmt.Errorf("there are no cells in the topo") } - return cells[0], nil + sort.Strings(cells) // Ensure that the resulting value is deterministic + return strings.Join(cells, ","), nil } func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlogplayer.DBClient, action VDiffAction, req *tabletmanagerdatapb.VDiffRequest, resp *tabletmanagerdatapb.VDiffResponse) error { var qr *sqltypes.Result - var err error options := req.Options - query := fmt.Sprintf(sqlGetVDiffID, encodeString(req.VdiffUuid)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffID, sqltypes.StringBindVariable(req.VdiffUuid)) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -173,9 +187,18 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog return err } if action == CreateAction { - query := fmt.Sprintf(sqlNewVDiff, - encodeString(req.Keyspace), encodeString(req.Workflow), "pending", encodeString(string(optionsJSON)), - vde.thisTablet.Shard, topoproto.TabletDbName(vde.thisTablet), req.VdiffUuid) + query, err := sqlparser.ParseAndBind(sqlNewVDiff, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.StringBindVariable("pending"), + sqltypes.StringBindVariable(string(optionsJSON)), + sqltypes.StringBindVariable(vde.thisTablet.Shard), + sqltypes.StringBindVariable(topoproto.TabletDbName(vde.thisTablet)), + sqltypes.StringBindVariable(req.VdiffUuid), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -185,7 +208,13 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog } resp.Id = int64(qr.InsertID) } else { - query := fmt.Sprintf(sqlResumeVDiff, encodeString(string(optionsJSON)), encodeString(req.VdiffUuid)) + query, err := sqlparser.ParseAndBind(sqlResumeVDiff, + sqltypes.StringBindVariable(string(optionsJSON)), + sqltypes.StringBindVariable(req.VdiffUuid), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -219,7 +248,13 @@ func (vde *Engine) handleShowAction(ctx context.Context, dbClient binlogplayer.D vdiffUUID := "" if req.ActionArg == LastActionArg { - query := fmt.Sprintf(sqlGetMostRecentVDiff, encodeString(req.Keyspace), encodeString(req.Workflow)) + query, err := sqlparser.ParseAndBind(sqlGetMostRecentVDiff, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -234,7 +269,14 @@ func (vde *Engine) handleShowAction(ctx context.Context, dbClient binlogplayer.D } if vdiffUUID != "" { resp.VdiffUuid = vdiffUUID - query := fmt.Sprintf(sqlGetVDiffByKeyspaceWorkflowUUID, encodeString(req.Keyspace), encodeString(req.Workflow), encodeString(vdiffUUID)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffByKeyspaceWorkflowUUID, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + sqltypes.StringBindVariable(vdiffUUID), + ) + if err != nil { + return err + } if qr, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } @@ -278,7 +320,7 @@ func (vde *Engine) handleStopAction(ctx context.Context, dbClient binlogplayer.D if controller.uuid == req.VdiffUuid { controller.Stop() if err := controller.markStoppedByRequest(); err != nil { - return err + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "encountered an error marking vdiff %s as stopped: %v", controller.uuid, err) } break } @@ -292,13 +334,22 @@ func (vde *Engine) handleDeleteAction(ctx context.Context, dbClient binlogplayer switch req.ActionArg { case AllActionArg: - query = fmt.Sprintf(sqlDeleteVDiffs, encodeString(req.Keyspace), encodeString(req.Workflow)) + query, err = sqlparser.ParseAndBind(sqlDeleteVDiffs, + sqltypes.StringBindVariable(req.Keyspace), + sqltypes.StringBindVariable(req.Workflow), + ) + if err != nil { + return err + } default: uuid, err := uuid.Parse(req.ActionArg) if err != nil { return fmt.Errorf("action argument %s not supported", req.ActionArg) } - query = fmt.Sprintf(sqlDeleteVDiffByUUID, encodeString(uuid.String())) + query, err = sqlparser.ParseAndBind(sqlDeleteVDiffByUUID, sqltypes.StringBindVariable(uuid.String())) + if err != nil { + return err + } } if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err diff --git a/go/vt/vttablet/tabletmanager/vdiff/action_test.go b/go/vt/vttablet/tabletmanager/vdiff/action_test.go index 7c06f5b6f2f..6c3106f5310 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action_test.go @@ -18,41 +18,151 @@ package vdiff import ( "context" + "fmt" "reflect" "testing" "time" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vterrors" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" ) func TestPerformVDiffAction(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() + vdiffenv := newTestVDiffEnv(t) + defer vdiffenv.close() + keyspace := "ks" + workflow := "wf" + uuid := uuid.New().String() tests := []struct { - name string - vde *Engine - req *tabletmanagerdatapb.VDiffRequest - want *tabletmanagerdatapb.VDiffResponse - wantErr error + name string + vde *Engine + req *tabletmanagerdatapb.VDiffRequest + preFunc func() error + postFunc func() error + want *tabletmanagerdatapb.VDiffResponse + expectQueries []string + wantErr error }{ { name: "engine not open", vde: &Engine{isOpen: false}, wantErr: vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vdiff engine is closed"), }, + { + name: "create with defaults", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(CreateAction), + VdiffUuid: uuid, + Options: &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{}, + }, + }, + // Add a second cell. The default for source_cell and target_cell is all + // available cells, so this additional cell should then show up in the + // created vdiff record. + preFunc: func() error { + return tstenv.TopoServ.CreateCellInfo(ctx, "zone100_test", &topodatapb.CellInfo{}) + }, + expectQueries: []string{ + fmt.Sprintf("select id as id from _vt.vdiff where vdiff_uuid = %s", encodeString(uuid)), + fmt.Sprintf(`insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values('', '', 'pending', '{\"picker_options\":{\"source_cell\":\"cell1,zone100_test\",\"target_cell\":\"cell1,zone100_test\"}}', '0', 'vt_vttest', %s)`, encodeString(uuid)), + }, + postFunc: func() error { + return tstenv.TopoServ.DeleteCellInfo(ctx, "zone100_test", true) + }, + }, + { + name: "create with cell alias", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(CreateAction), + VdiffUuid: uuid, + Options: &tabletmanagerdatapb.VDiffOptions{ + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ + SourceCell: "all", + TargetCell: "all", + }, + }, + }, + // Add a second cell and create an cell alias that contains it. + preFunc: func() error { + if err := tstenv.TopoServ.CreateCellInfo(ctx, "zone100_test", &topodatapb.CellInfo{}); err != nil { + return err + } + cells := append(tstenv.Cells, "zone100_test") + return tstenv.TopoServ.CreateCellsAlias(ctx, "all", &topodatapb.CellsAlias{ + Cells: cells, + }) + }, + expectQueries: []string{ + fmt.Sprintf("select id as id from _vt.vdiff where vdiff_uuid = %s", encodeString(uuid)), + fmt.Sprintf(`insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values('', '', 'pending', '{\"picker_options\":{\"source_cell\":\"all\",\"target_cell\":\"all\"}}', '0', 'vt_vttest', %s)`, encodeString(uuid)), + }, + postFunc: func() error { + if err := tstenv.TopoServ.DeleteCellInfo(ctx, "zone100_test", true); err != nil { + return err + } + return tstenv.TopoServ.DeleteCellsAlias(ctx, "all") + }, + }, + { + name: "delete by uuid", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(DeleteAction), + ActionArg: uuid, + }, + expectQueries: []string{ + fmt.Sprintf(`delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + where vd.vdiff_uuid = %s`, encodeString(uuid)), + }, + }, + { + name: "delete all", + req: &tabletmanagerdatapb.VDiffRequest{ + Action: string(DeleteAction), + ActionArg: "all", + Keyspace: keyspace, + Workflow: workflow, + }, + expectQueries: []string{ + fmt.Sprintf(`delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) + left join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) + where vd.keyspace = %s and vd.workflow = %s`, encodeString(keyspace), encodeString(workflow)), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + if tt.preFunc != nil { + err := tt.preFunc() + require.NoError(t, err, "pre function failed: %v", err) + } + if tt.vde == nil { + tt.vde = vdiffenv.vde + } + for _, query := range tt.expectQueries { + vdiffenv.dbClient.ExpectRequest(query, &sqltypes.Result{}, nil) + } got, err := tt.vde.PerformVDiffAction(ctx, tt.req) if tt.wantErr != nil && !vterrors.Equals(err, tt.wantErr) { t.Errorf("Engine.PerformVDiffAction() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { + if tt.want != nil && !reflect.DeepEqual(got, tt.want) { t.Errorf("Engine.PerformVDiffAction() = %v, want %v", got, tt.want) } + if tt.postFunc != nil { + err := tt.postFunc() + require.NoError(t, err, "post function failed: %v", err) + } }) } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go index 8edc4b333e4..ef8d8a6ba86 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/controller.go +++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go @@ -23,12 +23,13 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -149,7 +150,7 @@ type migrationSource struct { *shardStreamer vrID int32 - position mysql.Position + position replication.Position } func (ct *controller) updateState(dbClient binlogplayer.DBClient, state VDiffState, err error) error { @@ -165,8 +166,13 @@ func (ct *controller) updateState(dbClient binlogplayer.DBClient, state VDiffSta // Clear out any previous error for the vdiff on this shard err = errors.New("") } - query := fmt.Sprintf(sqlUpdateVDiffState, encodeString(string(state)), encodeString(err.Error()), extraCols, ct.id) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query := sqlparser.BuildParsedQuery(sqlUpdateVDiffState, + encodeString(string(state)), + encodeString(err.Error()), + extraCols, + ct.id, + ) + if _, err := dbClient.ExecuteFetch(query.Query, 1); err != nil { return err } insertVDiffLog(ct.vde.ctx, dbClient, ct.id, fmt.Sprintf("State changed to: %s", state)) @@ -179,9 +185,10 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") default: } - ct.workflowFilter = fmt.Sprintf("where workflow = %s and db_name = %s", encodeString(ct.workflow), encodeString(ct.vde.dbName)) - query := fmt.Sprintf(sqlGetVReplicationEntry, ct.workflowFilter) - qr, err := dbClient.ExecuteFetch(query, -1) + ct.workflowFilter = fmt.Sprintf("where workflow = %s and db_name = %s", encodeString(ct.workflow), + encodeString(ct.vde.dbName)) + query := sqlparser.BuildParsedQuery(sqlGetVReplicationEntry, ct.workflowFilter) + qr, err := dbClient.ExecuteFetch(query.Query, -1) if err != nil { return err } @@ -248,15 +255,17 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) func (ct *controller) markStoppedByRequest() error { dbClient := ct.vde.dbClientFactoryFiltered() if err := dbClient.Connect(); err != nil { - return fmt.Errorf("encountered an error marking vdiff %s as stopped: %v", ct.uuid, err) + return err } defer dbClient.Close() - query := fmt.Sprintf(sqlUpdateVDiffStopped, ct.id) + query, err := sqlparser.ParseAndBind(sqlUpdateVDiffStopped, sqltypes.Int64BindVariable(ct.id)) + if err != nil { + return err + } var res *sqltypes.Result - var err error if res, err = dbClient.ExecuteFetch(query, 1); err != nil { - return fmt.Errorf("encountered an error marking vdiff %s as stopped: %v", ct.uuid, err) + return err } // We don't mark it as stopped if it's already completed if res.RowsAffected > 0 { diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index c60e585120c..72098eb52be 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -24,9 +24,10 @@ import ( "sync" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -64,8 +65,6 @@ type Engine struct { // because we stop/start vreplication workflows during this process snapshotMu sync.Mutex - vdiffSchemaCreateOnce sync.Once - // This should only be set when the engine is being used in tests. It then provides // modified behavior for that env, e.g. not starting the retry goroutine. This should // NOT be set in production. @@ -299,7 +298,11 @@ func (vde *Engine) getVDiffsToRetry(ctx context.Context, dbClient binlogplayer.D } func (vde *Engine) getVDiffByID(ctx context.Context, dbClient binlogplayer.DBClient, id int64) (*sqltypes.Result, error) { - qr, err := dbClient.ExecuteFetch(fmt.Sprintf(sqlGetVDiffByID, id), -1) + query, err := sqlparser.ParseAndBind(sqlGetVDiffByID, sqltypes.Int64BindVariable(id)) + if err != nil { + return nil, err + } + qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return nil, err } @@ -332,8 +335,8 @@ func (vde *Engine) retryVDiffs(ctx context.Context) error { return ctx.Err() default: } - lastError := mysql.NewSQLErrorFromError(errors.New(row.AsString("last_error", ""))) - if !mysql.IsEphemeralError(lastError) { + lastError := sqlerror.NewSQLErrorFromError(errors.New(row.AsString("last_error", ""))) + if !sqlerror.IsEphemeralError(lastError) { continue } uuid := row.AsString("vdiff_uuid", "") @@ -342,7 +345,11 @@ func (vde *Engine) retryVDiffs(ctx context.Context) error { return err } log.Infof("Retrying vdiff %s that had an ephemeral error of '%v'", uuid, lastError) - if _, err = dbClient.ExecuteFetch(fmt.Sprintf(sqlRetryVDiff, id), 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlRetryVDiff, sqltypes.Int64BindVariable(id)) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } options := &tabletmanagerdata.VDiffOptions{} diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go index cfb9651fb11..fda37187031 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -140,6 +141,12 @@ func TestVDiff(t *testing.T) { ), `fields:{name:"c1" type:INT64 table:"t1" org_table:"t1" database:"vt_customer" org_name:"c1" column_length:20 charset:63 flags:53251} rows:{lengths:1 values:"1"}|0|{}`, ), nil) + vdenv.dbClient.ExpectRequest(fmt.Sprintf("select column_name as column_name, collation_name as collation_name from information_schema.columns where table_schema='%s' and table_name='t1' and column_name in ('c1')", vdiffDBName), sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "collation_name", + "varchar", + ), + "NULL", + ), nil) vdenv.dbClient.ExpectRequest(fmt.Sprintf("select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = '%s' and table_name in ('t1')", vdiffDBName), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows", "varchar|int64", @@ -218,7 +225,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { vdiffTestColTypes, ), fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, vdiffenv.workflow, tstenv.KeyspaceName, tstenv.ShardName, vdiffDBName, optionsJS, - mysql.NewSQLError(mysql.ERNoSuchTable, "42S02", "Table 'foo' doesn't exist")), + sqlerror.NewSQLError(sqlerror.ERNoSuchTable, "42S02", "Table 'foo' doesn't exist")), ), }, { @@ -228,7 +235,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { vdiffTestColTypes, ), fmt.Sprintf("1|%s|%s|%s|%s|%s|error|%s|%v", UUID, vdiffenv.workflow, tstenv.KeyspaceName, tstenv.ShardName, vdiffDBName, optionsJS, - mysql.NewSQLError(mysql.ERLockWaitTimeout, "HY000", "Lock wait timeout exceeded; try restarting transaction")), + sqlerror.NewSQLError(sqlerror.ERLockWaitTimeout, "HY000", "Lock wait timeout exceeded; try restarting transaction")), ), expectRetry: true, }, diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go index bb173c41abc..9f69e9ed86d 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go @@ -40,6 +40,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -173,7 +174,9 @@ func init() { func TestMain(m *testing.M) { exitCode := func() int { var err error - tstenv, err = testenv.Init() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tstenv, err = testenv.Init(ctx) if err != nil { fmt.Fprintf(os.Stderr, "%v", err) return 1 @@ -235,7 +238,7 @@ func (ftc *fakeTabletConn) VStream(ctx context.Context, request *binlogdatapb.VS if vstreamHook != nil { vstreamHook(ctx) } - return vdiffenv.vse.Stream(ctx, request.Position, request.TableLastPKs, request.Filter, send) + return vdiffenv.vse.Stream(ctx, request.Position, request.TableLastPKs, request.Filter, throttlerapp.VStreamerName, send) } // vstreamRowsHook allows you to do work just before calling VStreamRows. @@ -477,6 +480,10 @@ func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb return pos, nil } +func (tmc *fakeTMClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + return &tabletmanagerdatapb.CheckThrottlerResponse{}, nil +} + // ---------------------------------------------- // testVDiffEnv diff --git a/go/vt/vttablet/tabletmanager/vdiff/schema.go b/go/vt/vttablet/tabletmanager/vdiff/schema.go index 6524ccdd0f2..f8194dee14c 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/schema.go +++ b/go/vt/vttablet/tabletmanager/vdiff/schema.go @@ -17,47 +17,49 @@ limitations under the License. package vdiff const ( - sqlNewVDiff = "insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values(%s, %s, '%s', %s, '%s', '%s', '%s')" - sqlResumeVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.options = %s, vd.started_at = NULL, vd.completed_at = NULL, vd.state = 'pending', - vdt.state = 'pending' where vd.vdiff_uuid = %s and vd.id = vdt.vdiff_id and vd.state in ('completed', 'stopped') + sqlAnalyzeTable = "analyze table `%s`.`%s`" + sqlNewVDiff = "insert into _vt.vdiff(keyspace, workflow, state, options, shard, db_name, vdiff_uuid) values(%a, %a, %a, %a, %a, %a, %a)" + sqlResumeVDiff = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.options = %a, vd.started_at = NULL, vd.completed_at = NULL, vd.state = 'pending', + vdt.state = 'pending' where vd.vdiff_uuid = %a and vd.id = vdt.vdiff_id and vd.state in ('completed', 'stopped') and vdt.state in ('completed', 'stopped')` sqlRetryVDiff = `update _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) set vd.state = 'pending', - vd.last_error = '', vdt.state = 'pending' where vd.id = %d and (vd.state = 'error' or vdt.state = 'error')` - sqlGetVDiffByKeyspaceWorkflowUUID = "select * from _vt.vdiff where keyspace = %s and workflow = %s and vdiff_uuid = %s" - sqlGetMostRecentVDiff = "select * from _vt.vdiff where keyspace = %s and workflow = %s order by id desc limit 1" - sqlGetVDiffByID = "select * from _vt.vdiff where id = %d" + vd.last_error = '', vdt.state = 'pending' where vd.id = %a and (vd.state = 'error' or vdt.state = 'error')` + sqlGetVDiffByKeyspaceWorkflowUUID = "select * from _vt.vdiff where keyspace = %a and workflow = %a and vdiff_uuid = %a" + sqlGetMostRecentVDiff = "select * from _vt.vdiff where keyspace = %a and workflow = %a order by id desc limit 1" + sqlGetVDiffByID = "select * from _vt.vdiff where id = %a" sqlDeleteVDiffs = `delete from vd, vdt, vdl using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) left join _vt.vdiff_log as vdl on (vd.id = vdl.vdiff_id) - where vd.keyspace = %s and vd.workflow = %s` + where vd.keyspace = %a and vd.workflow = %a` sqlDeleteVDiffByUUID = `delete from vd, vdt using _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - and vd.vdiff_uuid = %s` + where vd.vdiff_uuid = %a` sqlVDiffSummary = `select vd.state as vdiff_state, vd.last_error as last_error, vdt.table_name as table_name, vd.vdiff_uuid as 'uuid', vdt.state as table_state, vdt.table_rows as table_rows, vd.started_at as started_at, vdt.rows_compared as rows_compared, vd.completed_at as completed_at, IF(vdt.mismatch = 1, 1, 0) as has_mismatch, vdt.report as report from _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vd.id = %d` + where vd.id = %a` // sqlUpdateVDiffState has a penultimate placeholder for any additional columns you want to update, e.g. `, foo = 1` sqlUpdateVDiffState = "update _vt.vdiff set state = %s, last_error = %s %s where id = %d" sqlUpdateVDiffStopped = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'stopped', vdt.state = 'stopped', vd.last_error = '' - where vd.id = vdt.vdiff_id and vd.id = %d and vd.state != 'completed'` + where vd.id = vdt.vdiff_id and vd.id = %a and vd.state != 'completed'` sqlGetVReplicationEntry = "select * from _vt.vreplication %s" sqlGetVDiffsToRun = "select * from _vt.vdiff where state in ('started','pending')" // what VDiffs have not been stopped or completed sqlGetVDiffsToRetry = "select * from _vt.vdiff where state = 'error' and json_unquote(json_extract(options, '$.core_options.auto_retry')) = 'true'" - sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %s" + sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %a" sqlGetAllVDiffs = "select * from _vt.vdiff order by id desc" + sqlGetTableRows = "select table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %a and table_name = %a" sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s)" - sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%d, %s, 'pending', %d)" + sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%a, %a, 'pending', %a)" sqlGetVDiffTable = `select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vdt.vdiff_id = %d and vdt.table_name = %s` - sqlUpdateTableRows = "update _vt.vdiff_table set table_rows = %d where vdiff_id = %d and table_name = %s" - sqlUpdateTableProgress = "update _vt.vdiff_table set rows_compared = %d, lastpk = %s, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableNoProgress = "update _vt.vdiff_table set rows_compared = %d, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableState = "update _vt.vdiff_table set state = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableStateAndReport = "update _vt.vdiff_table set state = %s, rows_compared = %d, report = %s where vdiff_id = %d and table_name = %s" - sqlUpdateTableMismatch = "update _vt.vdiff_table set mismatch = true where vdiff_id = %d and table_name = %s" + where vdt.vdiff_id = %a and vdt.table_name = %a` + sqlUpdateTableRows = "update _vt.vdiff_table set table_rows = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableProgress = "update _vt.vdiff_table set rows_compared = %a, lastpk = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableNoProgress = "update _vt.vdiff_table set rows_compared = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableState = "update _vt.vdiff_table set state = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableStateAndReport = "update _vt.vdiff_table set state = %a, rows_compared = %a, report = %a where vdiff_id = %a and table_name = %a" + sqlUpdateTableMismatch = "update _vt.vdiff_table set mismatch = true where vdiff_id = %a and table_name = %a" - sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %d and state != 'completed'" + sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %a and state != 'completed'" ) diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go index 19985f1226e..3752829b898 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go @@ -23,16 +23,11 @@ import ( "sync" "time" - "vitess.io/vitess/go/vt/proto/topodata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo" - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" @@ -41,6 +36,10 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -53,9 +52,9 @@ var BackgroundOperationTimeout = topo.RemoteOperationTimeout * 4 // compareColInfo contains the metadata for a column of the table being diffed type compareColInfo struct { - colIndex int // index of the column in the filter's select - collation collations.Collation // is the collation of the column, if any - isPK bool // is this column part of the primary key + colIndex int // index of the column in the filter's select + collation collations.ID // is the collation of the column, if any + isPK bool // is this column part of the primary key colName string } @@ -218,7 +217,7 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri go func() { defer wg.Done() err1 = td.forEachSource(func(source *migrationSource) error { - tablet, err := pickTablet(ctx, sourceTopoServer, cell, ct.sourceKeyspace, source.shard, tabletTypes) + tablet, err := pickTablet(ctx, sourceTopoServer, cell, ct.vde.thisTablet.Alias.Cell, ct.sourceKeyspace, source.shard, tabletTypes) if err != nil { return err } @@ -230,7 +229,7 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri wg.Add(1) go func() { defer wg.Done() - tablet, err2 := pickTablet(ctx, ct.ts, td.wd.opts.PickerOptions.TargetCell, ct.vde.thisTablet.Keyspace, + tablet, err2 := pickTablet(ctx, ct.ts, td.wd.opts.PickerOptions.TargetCell, ct.vde.thisTablet.Alias.Cell, ct.vde.thisTablet.Keyspace, ct.vde.thisTablet.Shard, td.wd.opts.PickerOptions.TabletTypes) if err2 != nil { return @@ -248,8 +247,8 @@ func (td *tableDiffer) selectTablets(ctx context.Context, cell, tabletTypes stri return err2 } -func pickTablet(ctx context.Context, ts *topo.Server, cell, keyspace, shard, tabletTypes string) (*topodata.Tablet, error) { - tp, err := discovery.NewTabletPicker(ts, []string{cell}, keyspace, shard, tabletTypes) +func pickTablet(ctx context.Context, ts *topo.Server, cell, localCell, keyspace, shard, tabletTypes string) (*topodata.Tablet, error) { + tp, err := discovery.NewTabletPicker(ctx, ts, []string{cell}, localCell, keyspace, shard, tabletTypes, discovery.TabletPickerOptions{}) if err != nil { return nil, err } @@ -264,7 +263,7 @@ func (td *tableDiffer) syncSourceStreams(ctx context.Context) error { if err := td.forEachSource(func(source *migrationSource) error { log.Flush() - if err := ct.tmc.WaitForPosition(waitCtx, source.tablet, mysql.EncodePosition(source.position)); err != nil { + if err := ct.tmc.WaitForPosition(waitCtx, source.tablet, replication.EncodePosition(source.position)); err != nil { return vterrors.Wrapf(err, "WaitForPosition for tablet %v", topoproto.TabletAliasString(source.tablet.Alias)) } return nil @@ -337,7 +336,7 @@ func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) err // Let's retry a few times if we get a retryable error. for i := 1; i <= 3; i++ { _, err := ct.tmc.VReplicationExec(ctx, ct.vde.thisTablet, query) - if err == nil || !mysql.IsEphemeralError(err) { + if err == nil || !sqlerror.IsEphemeralError(err) { break } log.Warningf("Encountered the following error while restarting the %q VReplication workflow, will retry (attempt #%d): %v", @@ -375,8 +374,7 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr // unbuffered channels which would present a major performance bottleneck. // This need arises from the gRPC VStreamRowsResponse pooling and re-use/recycling done for // gRPCQueryClient.VStreamRows() in vttablet/grpctabletconn/conn. - vsr := proto.Clone(vsrRaw).(*binlogdatapb.VStreamRowsResponse) - + vsr := vsrRaw.CloneVT() if len(fields) == 0 { if len(vsr.Fields) == 0 { return fmt.Errorf("did not received expected fields in response %+v on tablet %v", @@ -395,7 +393,7 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr result := sqltypes.Proto3ToResult(p3qr) // Fields should be received only once, and sent only once. - if vsr.Fields == nil { + if len(vsr.Fields) == 0 { result.Fields = nil } select { @@ -409,14 +407,14 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr } func (td *tableDiffer) setupRowSorters() { - // combine all sources into a slice and create a merge sorter for it + // Combine all sources into a slice and create a merge sorter for it. sources := make(map[string]*shardStreamer) for shard, source := range td.wd.ct.sources { sources[shard] = source.shardStreamer } td.sourcePrimitive = newMergeSorter(sources, td.tablePlan.comparePKs) - // create a merge sorter for the target + // Create a merge sorter for the target. targets := make(map[string]*shardStreamer) targets[td.wd.ct.targetShardStreamer.shard] = td.wd.ct.targetShardStreamer td.targetPrimitive = newMergeSorter(targets, td.tablePlan.comparePKs) @@ -442,7 +440,13 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl // We need to continue were we left off when appropriate. This can be an // auto-retry on error, or a manual retry via the resume command. // Otherwise the existing state will be empty and we start from scratch. - query := fmt.Sprintf(sqlGetVDiffTable, td.wd.ct.id, encodeString(td.table.Name)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return nil, err + } cs, err := dbClient.ExecuteFetch(query, -1) if err != nil { return nil, err @@ -624,14 +628,15 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com continue } compareIndex := col.colIndex - var c int - var err error - var collationID collations.ID - // if the collation is nil or unknown, use binary collation to compare as bytes - if col.collation == nil { + var ( + c int + err error + collationID collations.ID + ) + // If the collation is nil or unknown, use binary collation to compare as bytes. + collationID = col.collation + if collationID == collations.Unknown { collationID = collations.CollationBinaryID - } else { - collationID = col.collation.ID() } c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) if err != nil { @@ -661,9 +666,26 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D return err } - query = fmt.Sprintf(sqlUpdateTableProgress, dr.ProcessedRows, encodeString(string(lastPK)), encodeString(string(rpt)), td.wd.ct.id, encodeString(td.table.Name)) + query, err = sqlparser.ParseAndBind(sqlUpdateTableProgress, + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(string(lastPK)), + sqltypes.StringBindVariable(string(rpt)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } } else { - query = fmt.Sprintf(sqlUpdateTableNoProgress, dr.ProcessedRows, encodeString(string(rpt)), td.wd.ct.id, encodeString(td.table.Name)) + query, err = sqlparser.ParseAndBind(sqlUpdateTableNoProgress, + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(string(rpt)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } } if _, err := dbClient.ExecuteFetch(query, 1); err != nil { return err @@ -672,8 +694,15 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D } func (td *tableDiffer) updateTableState(ctx context.Context, dbClient binlogplayer.DBClient, state VDiffState) error { - query := fmt.Sprintf(sqlUpdateTableState, encodeString(string(state)), td.wd.ct.id, encodeString(td.table.Name)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableState, + sqltypes.StringBindVariable(string(state)), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } insertVDiffLog(ctx, dbClient, td.wd.ct.id, fmt.Sprintf("%s: table %s", state, encodeString(td.table.Name))) @@ -692,8 +721,17 @@ func (td *tableDiffer) updateTableStateAndReport(ctx context.Context, dbClient b } else { report = "{}" } - query := fmt.Sprintf(sqlUpdateTableStateAndReport, encodeString(string(state)), dr.ProcessedRows, encodeString(report), td.wd.ct.id, encodeString(td.table.Name)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableStateAndReport, + sqltypes.StringBindVariable(string(state)), + sqltypes.Int64BindVariable(dr.ProcessedRows), + sqltypes.StringBindVariable(report), + sqltypes.Int64BindVariable(td.wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } insertVDiffLog(ctx, dbClient, td.wd.ct.id, fmt.Sprintf("%s: table %s", state, encodeString(td.table.Name))) @@ -702,8 +740,14 @@ func (td *tableDiffer) updateTableStateAndReport(ctx context.Context, dbClient b } func updateTableMismatch(dbClient binlogplayer.DBClient, vdiffID int64, table string) error { - query := fmt.Sprintf(sqlUpdateTableMismatch, vdiffID, encodeString(table)) - if _, err := dbClient.ExecuteFetch(query, 1); err != nil { + query, err := sqlparser.ParseAndBind(sqlUpdateTableMismatch, + sqltypes.Int64BindVariable(vdiffID), + sqltypes.StringBindVariable(table), + ) + if err != nil { + return err + } + if _, err = dbClient.ExecuteFetch(query, 1); err != nil { return err } return nil diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index 64993c4eabd..e669dbd9a33 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -20,14 +20,22 @@ import ( "fmt" "strings" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) +const sqlSelectColumnCollations = "select column_name as column_name, collation_name as collation_name from information_schema.columns where table_schema=%a and table_name=%a and column_name in %a" + type tablePlan struct { // sourceQuery and targetQuery are select queries. sourceQuery string @@ -45,13 +53,17 @@ type tablePlan struct { // selectPks is the list of pk columns as they appear in the select clause for the diff. selectPks []int + dbName string table *tabletmanagerdatapb.TableDefinition orderBy sqlparser.OrderBy aggregates []*engine.AggregateParams } -func (td *tableDiffer) buildTablePlan() (*tablePlan, error) { - tp := &tablePlan{table: td.table} +func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName string) (*tablePlan, error) { + tp := &tablePlan{ + table: td.table, + dbName: dbName, + } statement, err := sqlparser.Parse(td.sourceQuery) if err != nil { return nil, err @@ -91,17 +103,16 @@ func (td *tableDiffer) buildTablePlan() (*tablePlan, error) { // Check if it's an aggregate expression if expr, ok := selExpr.Expr.(sqlparser.AggrFunc); ok { - switch fname := strings.ToLower(expr.AggrName()); fname { + switch fname := expr.AggrName(); fname { case "count", "sum": // this will only work as long as aggregates can be pushed down to tablets // this won't work: "select count(*) from (select id from t limit 1)" // since vreplication only handles simple tables (no joins/derived tables) this is fine for now // but will need to be revisited when we add such support to vreplication - aggregateFuncType := "sum" - aggregates = append(aggregates, &engine.AggregateParams{ - Opcode: engine.SupportedAggregates[aggregateFuncType], - Col: len(sourceSelect.SelectExprs) - 1, - }) + aggregates = append(aggregates, engine.NewAggregateParam( + /*opcode*/ opcode.AggregateSum, + /*offset*/ len(sourceSelect.SelectExprs)-1, + /*alias*/ "")) } } default: @@ -141,7 +152,7 @@ func (td *tableDiffer) buildTablePlan() (*tablePlan, error) { }, } - err = tp.findPKs(targetSelect) + err = tp.findPKs(dbClient, targetSelect) if err != nil { return nil, err } @@ -165,7 +176,7 @@ func (td *tableDiffer) buildTablePlan() (*tablePlan, error) { } // findPKs identifies PKs and removes them from the columns to do data comparison. -func (tp *tablePlan) findPKs(targetSelect *sqlparser.Select) error { +func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlparser.Select) error { var orderby sqlparser.OrderBy for _, pk := range tp.table.PrimaryKeyColumns { found := false @@ -199,6 +210,52 @@ func (tp *tablePlan) findPKs(targetSelect *sqlparser.Select) error { Direction: sqlparser.AscOrder, }) } + if err := tp.getPKColumnCollations(dbClient); err != nil { + return vterrors.Wrapf(err, "error getting PK column collations for table %s", tp.table.Name) + } tp.orderBy = orderby return nil } + +// getPKColumnCollations queries the database to find the collation +// to use for the each PK column used in the query to ensure proper +// sorting when we do the merge sort and for the comparisons. It then +// saves the collations in the tablePlan's comparePKs column info +// structs for those subsequent operations. +func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error { + columnList := make([]string, len(tp.comparePKs)) + for i := range tp.comparePKs { + columnList[i] = tp.comparePKs[i].colName + } + columnsBV, err := sqltypes.BuildBindVariable(columnList) + if err != nil { + return err + } + query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, + sqltypes.StringBindVariable(tp.dbName), + sqltypes.StringBindVariable(tp.table.Name), + columnsBV, + ) + if err != nil { + return err + } + qr, err := dbClient.ExecuteFetch(query, len(tp.comparePKs)) + if err != nil { + return err + } + if qr == nil || len(qr.Rows) != len(tp.comparePKs) { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected result for query %s: %+v", query, qr) + } + collationEnv := collations.Local() + for _, row := range qr.Named().Rows { + columnName := row["column_name"].ToString() + collateName := strings.ToLower(row["collation_name"].ToString()) + for i := range tp.comparePKs { + if strings.EqualFold(tp.comparePKs[i].colName, columnName) { + tp.comparePKs[i].collation = collationEnv.LookupByName(collateName) + break + } + } + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vdiff/utils.go b/go/vt/vttablet/tabletmanager/vdiff/utils.go index a6c711bae79..12ea1e8a68c 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/utils.go +++ b/go/vt/vttablet/tabletmanager/vdiff/utils.go @@ -40,10 +40,10 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare for i, cpk := range comparePKs { weightStringCol := -1 // if the collation is nil or unknown, use binary collation to compare as bytes - if cpk.collation == nil { - ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, CollationID: collations.CollationBinaryID} + if cpk.collation == collations.Unknown { + ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: collations.CollationBinaryID} } else { - ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, CollationID: cpk.collation.ID()} + ob[i] = engine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: sqltypes.Unknown, CollationID: cpk.collation} } } return &engine.MergeSort{ @@ -64,7 +64,7 @@ func encodeString(in string) string { func pkColsToGroupByParams(pkCols []int) []*engine.GroupByParams { var res []*engine.GroupByParams for _, col := range pkCols { - res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1}) + res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: sqltypes.Unknown}) } return res } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index 35236b50d79..7d7af3f8a37 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -24,7 +24,9 @@ import ( "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/schema" @@ -62,18 +64,54 @@ func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions) ( // by MySQL on each side then we'll have the same number of extras on // both sides. If that's the case, then let's see if the extra rows on // both sides are actually different. -func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompare int64) { +func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompare int64) error { + if dr.MismatchedRows == 0 { + // Get the VSchema on the target and source keyspaces. We can then use this + // for handling additional edge cases, such as adjusting results for reference + // tables when the shard count is different between the source and target as + // then there will be a extra rows reported on the side with more shards. + srcvschema, err := wd.ct.ts.GetVSchema(wd.ct.vde.ctx, wd.ct.sourceKeyspace) + if err != nil { + return err + } + tgtvschema, err := wd.ct.ts.GetVSchema(wd.ct.vde.ctx, wd.ct.vde.thisTablet.Keyspace) + if err != nil { + return err + } + svt, sok := srcvschema.Tables[dr.TableName] + tvt, tok := tgtvschema.Tables[dr.TableName] + if dr.ExtraRowsSource > 0 && sok && svt.Type == vindexes.TypeReference && dr.ExtraRowsSource%dr.MatchingRows == 0 { + // We have a reference table with no mismatched rows and the number of + // extra rows on the source is a multiple of the matching rows. This + // means that there's no actual diff. + dr.ExtraRowsSource = 0 + dr.ExtraRowsSourceDiffs = nil + } + if dr.ExtraRowsTarget > 0 && tok && tvt.Type == vindexes.TypeReference && dr.ExtraRowsTarget%dr.MatchingRows == 0 { + // We have a reference table with no mismatched rows and the number of + // extra rows on the target is a multiple of the matching rows. This + // means that there's no actual diff. + dr.ExtraRowsTarget = 0 + dr.ExtraRowsTargetDiffs = nil + } + } + if (dr.ExtraRowsSource == dr.ExtraRowsTarget) && (dr.ExtraRowsSource <= maxExtraRowsToCompare) { - for i := range dr.ExtraRowsSourceDiffs { + for i := 0; i < len(dr.ExtraRowsSourceDiffs); i++ { foundMatch := false - for j := range dr.ExtraRowsTargetDiffs { + for j := 0; j < len(dr.ExtraRowsTargetDiffs); j++ { if reflect.DeepEqual(dr.ExtraRowsSourceDiffs[i], dr.ExtraRowsTargetDiffs[j]) { dr.ExtraRowsSourceDiffs = append(dr.ExtraRowsSourceDiffs[:i], dr.ExtraRowsSourceDiffs[i+1:]...) - dr.ExtraRowsSource-- dr.ExtraRowsTargetDiffs = append(dr.ExtraRowsTargetDiffs[:j], dr.ExtraRowsTargetDiffs[j+1:]...) + dr.ExtraRowsSource-- dr.ExtraRowsTarget-- dr.ProcessedRows-- dr.MatchingRows++ + // We've removed an element from both slices at the current index + // so we need to shift the counters back as well to process the + // new elements at the index and avoid using an index out of range. + i-- + j-- foundMatch = true break } @@ -91,6 +129,8 @@ func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompa if len(dr.ExtraRowsTargetDiffs) > maxVDiffReportSampleRows { dr.ExtraRowsTargetDiffs = dr.ExtraRowsTargetDiffs[:maxVDiffReportSampleRows-1] } + + return nil } func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.DBClient, td *tableDiffer) error { @@ -115,7 +155,10 @@ func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.D } log.Infof("Table diff done on table %s for vdiff %s with report: %+v", td.table.Name, wd.ct.uuid, dr) if dr.ExtraRowsSource > 0 || dr.ExtraRowsTarget > 0 { - wd.reconcileExtraRows(dr, wd.opts.CoreOptions.MaxExtraRowsToCompare) + if err := wd.reconcileExtraRows(dr, wd.opts.CoreOptions.MaxExtraRowsToCompare); err != nil { + log.Errorf("Encountered an error reconciling extra rows found for table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, err) + return vterrors.Wrap(err, "failed to reconcile extra rows") + } } if dr.MismatchedRows > 0 || dr.ExtraRowsTarget > 0 || dr.ExtraRowsSource > 0 { @@ -162,7 +205,13 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") default: } - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(td.table.Name)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(td.table.Name), + ) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, 1) if err != nil { return err @@ -192,7 +241,10 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { } func (wd *workflowDiffer) markIfCompleted(ctx context.Context, dbClient binlogplayer.DBClient) error { - query := fmt.Sprintf(sqlGetIncompleteTables, wd.ct.id) + query, err := sqlparser.ParseAndBind(sqlGetIncompleteTables, sqltypes.Int64BindVariable(wd.ct.id)) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return err @@ -236,7 +288,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(table.Name)) sourceQuery = buf.String() - case key.IsKeyRange(rule.Filter): + case key.IsValidKeyRange(rule.Filter): buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(table.Name), sqlparser.NewStrLiteral(rule.Filter)) sourceQuery = buf.String() @@ -249,7 +301,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl } td.lastPK = lastpkpb wd.tableDiffers[table.Name] = td - if _, err := td.buildTablePlan(); err != nil { + if _, err := td.buildTablePlan(dbClient, wd.ct.vde.dbName); err != nil { return err } } @@ -262,7 +314,13 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl // getTableLastPK gets the lastPK protobuf message for a given vdiff table. func (wd *workflowDiffer) getTableLastPK(dbClient binlogplayer.DBClient, tableName string) (*querypb.QueryResult, error) { - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(tableName)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return nil, err + } qr, err := dbClient.ExecuteFetch(query, 1) if err != nil { return nil, err @@ -287,13 +345,28 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error tableIn := strings.Builder{} n := 0 for tableName := range wd.tableDiffers { + // Update the table statistics for each table if requested. + if wd.opts.CoreOptions.UpdateTableStats { + stmt := sqlparser.BuildParsedQuery(sqlAnalyzeTable, + wd.ct.vde.dbName, + tableName, + ) + log.Infof("Updating the table stats for %s.%s using: %q", wd.ct.vde.dbName, tableName, stmt.Query) + if _, err := dbClient.ExecuteFetch(stmt.Query, -1); err != nil { + return err + } + log.Infof("Finished updating the table stats for %s.%s", wd.ct.vde.dbName, tableName) + } tableIn.WriteString(encodeString(tableName)) if n++; n < len(wd.tableDiffers) { tableIn.WriteByte(',') } } - query := fmt.Sprintf(sqlGetAllTableRows, encodeString(wd.ct.vde.dbName), tableIn.String()) - isqr, err := dbClient.ExecuteFetch(query, -1) + query := sqlparser.BuildParsedQuery(sqlGetAllTableRows, + encodeString(wd.ct.vde.dbName), + tableIn.String(), + ) + isqr, err := dbClient.ExecuteFetch(query.Query, -1) if err != nil { return err } @@ -301,15 +374,35 @@ func (wd *workflowDiffer) initVDiffTables(dbClient binlogplayer.DBClient) error tableName, _ := row.ToString("table_name") tableRows, _ := row.ToInt64("table_rows") - query := fmt.Sprintf(sqlGetVDiffTable, wd.ct.id, encodeString(tableName)) + query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return err + } qr, err := dbClient.ExecuteFetch(query, -1) if err != nil { return err } if len(qr.Rows) == 0 { - query = fmt.Sprintf(sqlNewVDiffTable, wd.ct.id, encodeString(tableName), tableRows) + query, err = sqlparser.ParseAndBind(sqlNewVDiffTable, + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + sqltypes.Int64BindVariable(tableRows), + ) + if err != nil { + return err + } } else if len(qr.Rows) == 1 { - query = fmt.Sprintf(sqlUpdateTableRows, tableRows, wd.ct.id, encodeString(tableName)) + query, err = sqlparser.ParseAndBind(sqlUpdateTableRows, + sqltypes.Int64BindVariable(tableRows), + sqltypes.Int64BindVariable(wd.ct.id), + sqltypes.StringBindVariable(tableName), + ) + if err != nil { + return err + } } else { return fmt.Errorf("invalid state found for vdiff table %s for vdiff_id %d on tablet %s", tableName, wd.ct.id, wd.ct.vde.thisTablet.Alias) diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go index 0f9ad9305ed..10c6406f046 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go @@ -29,10 +29,12 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) func TestBuildPlanSuccess(t *testing.T) { @@ -61,11 +63,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -80,11 +83,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -99,11 +103,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -118,11 +123,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c2, c1 from t1 order by c1 asc", targetQuery: "select c2, c1 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false, "c2"}, {1, collations.Collation(nil), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -137,11 +143,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c0 as c1, c2 from t2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -157,11 +164,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "nonpktext", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select c1, textcol from nonpktext order by c1 asc", targetQuery: "select c1, textcol from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "textcol"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -177,11 +185,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "nonpktext", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select textcol, c1 from nonpktext order by c1 asc", targetQuery: "select textcol, c1 from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false, "textcol"}, {1, collations.Collation(nil), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -197,11 +206,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "pktext", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select textcol, c2 from pktext order by textcol asc", targetQuery: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "textcol"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -217,11 +227,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "pktext", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false, "c2"}, {1, collations.Collation(nil), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -237,11 +248,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "pktext", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, a + b as textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), false, "c2"}, {1, collations.Collation(nil), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Collation(nil), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -256,11 +268,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "multipk", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["multipk"]], sourceQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", targetQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), true, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), true, "c2"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, orderBy: sqlparser.OrderBy{ @@ -282,11 +295,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -303,11 +317,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -324,11 +339,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') and c2 = 2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -345,11 +361,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and c1 = 1 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -366,11 +383,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -386,11 +404,12 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "t1", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -406,24 +425,22 @@ func TestBuildPlanSuccess(t *testing.T) { }, table: "aggr", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["aggr"]], sourceQuery: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2, c3, c4 from aggr order by c1 asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "c1"}, {1, collations.Collation(nil), false, "c2"}, {2, collations.Collation(nil), false, "c3"}, {3, collations.Collation(nil), false, "c4"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c3"}, {3, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c4"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c1")}, Direction: sqlparser.AscOrder, }}, - aggregates: []*engine.AggregateParams{{ - Opcode: engine.AggregateSum, - Col: 2, - }, { - Opcode: engine.AggregateSum, - Col: 3, - }}, + aggregates: []*engine.AggregateParams{ + engine.NewAggregateParam(opcode.AggregateSum, 2, ""), + engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + }, }, }, { // date conversion on import. @@ -433,11 +450,12 @@ func TestBuildPlanSuccess(t *testing.T) { sourceTimeZone: "US/Pacific", table: "datze", tablePlan: &tablePlan{ + dbName: vdiffDBName, table: testSchema.TableDefinitions[tableDefMap["datze"]], sourceQuery: "select id, dt from datze order by id asc", targetQuery: "select id, convert_tz(dt, 'UTC', 'US/Pacific') as dt from datze order by id asc", - compareCols: []compareColInfo{{0, collations.Collation(nil), true, "id"}, {1, collations.Collation(nil), false, "dt"}}, - comparePKs: []compareColInfo{{0, collations.Collation(nil), true, "id"}}, + compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "dt"}}, + comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -463,6 +481,31 @@ func TestBuildPlanSuccess(t *testing.T) { wd, err := newWorkflowDiffer(ct, vdiffenv.opts) require.NoError(t, err) dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) + columnList := make([]string, len(tcase.tablePlan.comparePKs)) + collationList := make([]string, len(tcase.tablePlan.comparePKs)) + env := collations.Local() + for i := range tcase.tablePlan.comparePKs { + columnList[i] = tcase.tablePlan.comparePKs[i].colName + if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { + collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) + } else { + collationList[i] = sqltypes.NULL.String() + } + } + columnBV, err := sqltypes.BuildBindVariable(columnList) + require.NoError(t, err) + query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, + sqltypes.StringBindVariable(vdiffDBName), + sqltypes.StringBindVariable(tcase.tablePlan.table.Name), + columnBV, + ) + require.NoError(t, err) + dbc.ExpectRequest(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "collation_name", + "varchar", + ), + collationList..., + ), nil) err = wd.buildPlan(dbc, filter, testSchema) require.NoError(t, err, tcase.input) require.Equal(t, 1, len(wd.tableDiffers), tcase.input) @@ -541,6 +584,12 @@ func TestBuildPlanInclude(t *testing.T) { from _vt.vdiff as vd inner join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) where vdt.vdiff_id = 1 and vdt.table_name = '%s'`, table) dbc.ExpectRequest(query, noResults, nil) + dbc.ExpectRequestRE("select column_name as column_name, collation_name as collation_name from information_schema.columns .*", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "collation_name", + "varchar", + ), + "NULL", + ), nil) } err = wd.buildPlan(dbc, filter, schm) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index ff12b9695e2..94e4741eeee 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -85,10 +85,9 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor done: make(chan struct{}), source: &binlogdatapb.BinlogSource{}, } - ct.sourceTablet.Store("") + ct.sourceTablet.Store(&topodatapb.TabletAlias{}) log.Infof("creating controller with cell: %v, tabletTypes: %v, and params: %v", cell, tabletTypesStr, params) - // id id, err := strconv.ParseInt(params["id"], 10, 32) if err != nil { return nil, err @@ -99,21 +98,21 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor state := params["state"] blpStats.State.Store(state) + if err := prototext.Unmarshal([]byte(params["source"]), ct.source); err != nil { + return nil, err + } + // Nothing to do if replication is stopped or is known to have an unrecoverable error. - if state == binlogplayer.BlpStopped || state == binlogplayer.BlpError { + if state == binlogdatapb.VReplicationWorkflowState_Stopped.String() || state == binlogdatapb.VReplicationWorkflowState_Error.String() { ct.cancel = func() {} close(ct.done) + blpStats.Stop() return ct, nil } - // source, stopPos - if err := prototext.Unmarshal([]byte(params["source"]), ct.source); err != nil { - return nil, err - } ct.stopPos = params["stop_pos"] if ct.source.GetExternalMysql() == "" { - // tabletPicker if v := params["cell"]; v != "" { cell = v } @@ -130,14 +129,13 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor return nil, err } } - tp, err := discovery.NewTabletPicker(sourceTopo, cells, ct.source.Keyspace, ct.source.Shard, tabletTypesStr) + tp, err := discovery.NewTabletPicker(ctx, sourceTopo, cells, ct.vre.cell, ct.source.Keyspace, ct.source.Shard, tabletTypesStr, discovery.TabletPickerOptions{}) if err != nil { return nil, err } ct.tabletPicker = tp } - // cancel ctx, ct.cancel = context.WithCancel(ctx) go ct.run(ctx) @@ -166,7 +164,7 @@ func (ct *controller) run(ctx context.Context) { } ct.blpStats.ErrorCounts.Add([]string{"Stream Error"}, 1) - binlogplayer.LogError(fmt.Sprintf("error in stream %v, retrying after %v", ct.id, retryDelay), err) + binlogplayer.LogError(fmt.Sprintf("error in stream %v, will retry after %v", ct.id, retryDelay), err) timer := time.NewTimer(retryDelay) select { case <-ctx.Done(): @@ -180,7 +178,7 @@ func (ct *controller) run(ctx context.Context) { func (ct *controller) runBlp(ctx context.Context) (err error) { defer func() { - ct.sourceTablet.Store("") + ct.sourceTablet.Store(&topodatapb.TabletAlias{}) if x := recover(); x != nil { log.Errorf("stream %v: caught panic: %v\n%s", ct.id, x, tb.Stack(4)) err = fmt.Errorf("panic: %v", x) @@ -193,37 +191,17 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { default: } - // Call this for youtube-specific customization. - // This should be done every time, in case mysql was restarted. - if err := ct.mysqld.EnableBinlogPlayback(); err != nil { - return err - } - dbClient := ct.dbClientFactory() if err := dbClient.Connect(); err != nil { return vterrors.Wrap(err, "can't connect to database") } defer dbClient.Close() - var tablet *topodatapb.Tablet - if ct.source.GetExternalMysql() == "" { - log.Infof("trying to find a tablet eligible for vreplication. stream id: %v", ct.id) - tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries) - defer tpCancel() - tablet, err = ct.tabletPicker.PickForStreaming(tpCtx) - if err != nil { - select { - case <-ctx.Done(): - default: - ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1) - ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error())) - } - return err - } - ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String())) - log.Infof("found a tablet eligible for vreplication. stream id: %v tablet: %s", ct.id, tablet.Alias.String()) - ct.sourceTablet.Store(tablet.Alias.String()) + tablet, err := ct.pickSourceTablet(ctx, dbClient) + if err != nil { + return err } + switch { case len(ct.source.Tables) > 0: // Table names can have search patterns. Resolve them against the schema. @@ -246,7 +224,7 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { } // Tables may have varying character sets. To ship the bits without interpreting them // we set the character set to be binary. - if _, err := dbClient.ExecuteFetch("set names binary", 10000); err != nil { + if _, err := dbClient.ExecuteFetch("set names 'binary'", 10000); err != nil { return err } // We must apply AUTO_INCREMENT values precisely as we got them. This include the 0 value, which is not recommended in AUTO_INCREMENT, and yet is valid. @@ -272,11 +250,17 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { vr := newVReplicator(ct.id, ct.source, vsClient, ct.blpStats, dbClient, ct.mysqld, ct.vre) err = vr.Replicate(ctx) ct.lastWorkflowError.Record(err) + // If this is a mysql error that we know needs manual intervention OR - // we cannot identify this as non-recoverable, but it has persisted beyond the retry limit (maxTimeToRetryError) - if isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() { + // we cannot identify this as non-recoverable, but it has persisted + // beyond the retry limit (maxTimeToRetryError). + // In addition, we cannot restart a workflow started with AtomicCopy which has _any_ error. + if (err != nil && vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy)) || + isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() { + log.Errorf("vreplication stream %d going into error state due to %+v", ct.id, err) - if errSetState := vr.setState(binlogplayer.BlpError, err.Error()); errSetState != nil { + if errSetState := vr.setState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); errSetState != nil { + log.Errorf("INTERNAL: unable to setState() in controller. Attempting to set error text: [%v]; setState() error is: %v", err, errSetState) return err // yes, err and not errSetState. } return nil // this will cause vreplicate to quit the workflow @@ -298,7 +282,37 @@ func (ct *controller) setMessage(dbClient binlogplayer.DBClient, message string) } return nil } + +// pickSourceTablet picks a healthy serving tablet to source for +// the vreplication stream. If the source is marked as external, it +// returns nil. +func (ct *controller) pickSourceTablet(ctx context.Context, dbClient binlogplayer.DBClient) (*topodatapb.Tablet, error) { + if ct.source.GetExternalMysql() != "" { + return nil, nil + } + log.Infof("Trying to find an eligible source tablet for vreplication stream id %d for workflow: %s", + ct.id, ct.workflow) + tpCtx, tpCancel := context.WithTimeout(ctx, discovery.GetTabletPickerRetryDelay()*tabletPickerRetries) + defer tpCancel() + tablet, err := ct.tabletPicker.PickForStreaming(tpCtx) + if err != nil { + select { + case <-ctx.Done(): + default: + ct.blpStats.ErrorCounts.Add([]string{"No Source Tablet Found"}, 1) + ct.setMessage(dbClient, fmt.Sprintf("Error picking tablet: %s", err.Error())) + } + return tablet, err + } + ct.setMessage(dbClient, fmt.Sprintf("Picked source tablet: %s", tablet.Alias.String())) + log.Infof("Found eligible source tablet %s for vreplication stream id %d for workflow %s", + tablet.Alias.String(), ct.id, ct.workflow) + ct.sourceTablet.Store(tablet.Alias) + return tablet, err +} + func (ct *controller) Stop() { ct.cancel() + ct.blpStats.Stop() <-ct.done } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index 7644fff96b2..b168625d20a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -19,6 +19,7 @@ package vreplication import ( "fmt" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/sqlparser" ) @@ -76,7 +77,18 @@ func buildControllerPlan(query string) (*controllerPlan, error) { } func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { - switch sqlparser.String(ins.Table) { + // This should never happen. + if ins == nil { + return nil, fmt.Errorf("BUG: invalid nil INSERT statement found when building VReplication plan") + } + tableName, err := ins.Table.TableName() + if err != nil { + return nil, err + } + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { + return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) + } + switch tableName.Name.String() { case reshardingJournalTableName: return &controllerPlan{ opcode: reshardingJournalQuery, @@ -84,7 +96,7 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { case vreplicationTableName: // no-op default: - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(ins.Table)) + return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } if ins.Action != sqlparser.InsertAct { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(ins)) @@ -129,7 +141,23 @@ func buildInsertPlan(ins *sqlparser.Insert) (*controllerPlan, error) { } func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { - switch sqlparser.String(upd.TableExprs) { + // This should never happen. + if upd == nil || len(upd.TableExprs) == 0 { + return nil, fmt.Errorf("BUG: invalid UPDATE statement found when building VReplication plan: %s", + sqlparser.String(upd)) + } + tableExpr, ok := upd.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, fmt.Errorf("invalid FROM construct: %v", sqlparser.String(upd.TableExprs[0])) + } + tableName, err := tableExpr.TableName() + if err != nil { + return nil, err + } + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { + return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) + } + switch tableName.Name.String() { case reshardingJournalTableName: return &controllerPlan{ opcode: reshardingJournalQuery, @@ -137,7 +165,7 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { case vreplicationTableName: // no-op default: - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(upd.TableExprs)) + return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } if upd.OrderBy != nil || upd.Limit != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(upd)) @@ -149,7 +177,7 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } buf1 := sqlparser.NewTrackedBuffer(nil) - buf1.Myprintf("select id from %s%v", vreplicationTableName, upd.Where) + buf1.Myprintf("select id from %s.%s%v", sidecar.GetIdentifier(), vreplicationTableName, upd.Where) upd.Where = &sqlparser.Where{ Type: sqlparser.WhereClause, Expr: &sqlparser.ComparisonExpr{ @@ -170,7 +198,23 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { } func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { - switch sqlparser.String(del.TableExprs) { + // This should never happen. + if del == nil || len(del.TableExprs) == 0 { + return nil, fmt.Errorf("BUG: invalid DELETE statement found when building VReplication plan: %s", + sqlparser.String(del)) + } + tableExpr, ok := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, fmt.Errorf("invalid FROM construct: %v", sqlparser.String(del.TableExprs[0])) + } + tableName, err := tableExpr.TableName() + if err != nil { + return nil, err + } + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { + return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) + } + switch tableName.Name.String() { case reshardingJournalTableName: return &controllerPlan{ opcode: reshardingJournalQuery, @@ -178,7 +222,7 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { case vreplicationTableName: // no-op default: - return nil, fmt.Errorf("invalid table name: %v", sqlparser.String(del.TableExprs)) + return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) @@ -191,7 +235,7 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } buf1 := sqlparser.NewTrackedBuffer(nil) - buf1.Myprintf("select id from %s%v", vreplicationTableName, del.Where) + buf1.Myprintf("select id from %s.%s%v", sidecar.GetIdentifier(), vreplicationTableName, del.Where) del.Where = &sqlparser.Where{ Type: sqlparser.WhereClause, Expr: &sqlparser.ComparisonExpr{ @@ -213,10 +257,10 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { }, } buf3 := sqlparser.NewTrackedBuffer(nil) - buf3.Myprintf("delete from %s%v", copyStateTableName, copyStateWhere) + buf3.Myprintf("delete from %s.%s%v", sidecar.GetIdentifier(), copyStateTableName, copyStateWhere) buf4 := sqlparser.NewTrackedBuffer(nil) - buf4.Myprintf("delete from %s%v", postCopyActionTableName, copyStateWhere) + buf4.Myprintf("delete from %s.%s%v", sidecar.GetIdentifier(), postCopyActionTableName, copyStateWhere) return &controllerPlan{ opcode: deleteQuery, @@ -228,12 +272,28 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { } func buildSelectPlan(sel *sqlparser.Select) (*controllerPlan, error) { - switch sqlparser.ToString(sel.From) { + // This should never happen. + if sel == nil || len(sel.From) == 0 { + return nil, fmt.Errorf("BUG: invalid SELECT statement found when building VReplication plan: %s", + sqlparser.String(sel)) + } + tableExpr, ok := sel.From[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil, fmt.Errorf("invalid FROM construct: %v", sqlparser.String(sel.From[0])) + } + tableName, err := tableExpr.TableName() + if err != nil { + return nil, err + } + if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { + return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) + } + switch tableName.Name.String() { case vreplicationTableName, reshardingJournalTableName, copyStateTableName, vreplicationLogTableName: return &controllerPlan{ opcode: selectQuery, }, nil default: - return nil, fmt.Errorf("invalid table name: %v", sqlparser.ToString(sel.From)) + return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index cca11e270fb..391b8d9c67e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -81,7 +81,7 @@ func TestControllerPlan(t *testing.T) { err: "unsupported construct: insert ignore into _vt.vreplication values (null)", }, { in: "insert into other values(null)", - err: "invalid table name: other", + err: "invalid database name: ", }, { in: "insert into _vt.vreplication partition(a) values(null)", err: "unsupported construct: insert into _vt.vreplication partition (a) values (null)", @@ -133,7 +133,7 @@ func TestControllerPlan(t *testing.T) { opcode: reshardingJournalQuery, }, }, { - in: "update a set state='Running' where id = 1", + in: "update _vt.a set state='Running' where id = 1", err: "invalid table name: a", }, { in: "update _vt.vreplication set state='Running' where id = 1 order by id", @@ -183,7 +183,7 @@ func TestControllerPlan(t *testing.T) { opcode: reshardingJournalQuery, }, }, { - in: "delete from a where id = 1", + in: "delete from _vt.a where id = 1", err: "invalid table name: a", }, { in: "delete a, b from _vt.vreplication where id = 1", @@ -218,8 +218,17 @@ func TestControllerPlan(t *testing.T) { query: "select * from _vt.copy_state", }, }, { - in: "select * from a", + in: "select * from _vt.vreplication_log", + plan: &testControllerPlan{ + opcode: selectQuery, + query: "select * from _vt.vreplication_log", + }, + }, { + in: "select * from _vt.a", err: "invalid table name: a", + }, { + in: "select * from nope.a", + err: "invalid database name: nope", // Parser }, { diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go index 8ced99f07d0..efab9693fa2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go @@ -23,13 +23,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/mysqlctl" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -54,11 +54,11 @@ var ( sqltypes.NULL, // stop_pos sqltypes.NewInt64(9223372036854775807), // max_tps sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(0), // workflow_sub_type - sqltypes.NewInt64(0), // defer_secondary_keys + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(0), // workflow_sub_type + sqltypes.NewInt64(0), // defer_secondary_keys }, }, } @@ -74,7 +74,7 @@ func TestControllerKeyRange(t *testing.T) { defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } @@ -90,8 +90,9 @@ func TestControllerKeyRange(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &mysqlctl.FakeMysqlDaemon{} mysqld.MysqlPort.Store(3306) + vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) if err != nil { t.Fatal(err) } @@ -111,7 +112,7 @@ func TestControllerTables(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" tables:"table1" tables:"/funtables_/" `, env.KeyspaceName), } @@ -151,8 +152,9 @@ func TestControllerTables(t *testing.T) { }, } mysqld.MysqlPort.Store(3306) + vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) if err != nil { t.Fatal(err) } @@ -179,7 +181,7 @@ func TestControllerBadID(t *testing.T) { func TestControllerStopped(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpStopped, + "state": binlogdatapb.VReplicationWorkflowState_Stopped.String(), } ct, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil) @@ -202,7 +204,7 @@ func TestControllerOverrides(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), "cell": env.Cells[0], "tablet_types": "replica", @@ -220,8 +222,9 @@ func TestControllerOverrides(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &mysqlctl.FakeMysqlDaemon{} mysqld.MysqlPort.Store(3306) + vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre) if err != nil { t.Fatal(err) } @@ -235,17 +238,20 @@ func TestControllerOverrides(t *testing.T) { } func TestControllerCanceledContext(t *testing.T) { - defer deleteTablet(addTablet(100)) + wantTablet := addTablet(100) + defer deleteTablet(wantTablet) params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } ctx, cancel := context.WithCancel(context.Background()) cancel() - ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, nil) + vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, nil, nil, nil, "", nil) + + ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, vre) if err != nil { t.Fatal(err) } @@ -268,7 +274,7 @@ func TestControllerRetry(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), "cell": env.Cells[0], "tablet_types": "replica", @@ -289,8 +295,9 @@ func TestControllerRetry(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &mysqlctl.FakeMysqlDaemon{} mysqld.MysqlPort.Store(3306) + vre := NewTestEngine(nil, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre) if err != nil { t.Fatal(err) } @@ -306,7 +313,7 @@ func TestControllerStopPosition(t *testing.T) { params := map[string]string{ "id": "1", - "state": binlogplayer.BlpRunning, + "state": binlogdatapb.VReplicationWorkflowState_Running.String(), "source": fmt.Sprintf(`keyspace:"%s" shard:"0" key_range:{end:"\x80"}`, env.KeyspaceName), } @@ -328,15 +335,15 @@ func TestControllerStopPosition(t *testing.T) { InsertID: 0, Rows: [][]sqltypes.Value{ { - sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos - sqltypes.NewVarBinary("MariaDB/0-1-1235"), // stop_pos - sqltypes.NewInt64(9223372036854775807), // max_tps - sqltypes.NewInt64(9223372036854775807), // max_replication_lag - sqltypes.NewVarBinary("Running"), // state - sqltypes.NewInt64(1), // workflow_type - sqltypes.NewVarChar("wf"), // workflow - sqltypes.NewInt64(1), // workflow_sub_type - sqltypes.NewInt64(1), // defer_secondary_keys + sqltypes.NewVarBinary("MariaDB/0-1-1083"), // pos + sqltypes.NewVarBinary("MariaDB/0-1-1235"), // stop_pos + sqltypes.NewInt64(9223372036854775807), // max_tps + sqltypes.NewInt64(9223372036854775807), // max_replication_lag + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), // state + sqltypes.NewInt64(1), // workflow_type + sqltypes.NewVarChar("wf"), // workflow + sqltypes.NewInt64(1), // workflow_sub_type + sqltypes.NewInt64(1), // defer_secondary_keys }, }, } @@ -350,8 +357,9 @@ func TestControllerStopPosition(t *testing.T) { dbClientFactory := func() binlogplayer.DBClient { return dbClient } mysqld := &mysqlctl.FakeMysqlDaemon{} mysqld.MysqlPort.Store(3306) + vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, nil) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 5d336796b04..8b81dd722c6 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -27,9 +27,10 @@ import ( "sync/atomic" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + + "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" @@ -42,17 +43,16 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) const ( - reshardingJournalTableName = "_vt.resharding_journal" - vreplicationTableName = "_vt.vreplication" - copyStateTableName = "_vt.copy_state" - postCopyActionTableName = "_vt.post_copy_action" - - maxRows = 10000 - throttlerVReplicationAppName = "vreplication" - throttlerOnlineDDLAppName = "online-ddl" + reshardingJournalTableName = "resharding_journal" + vreplicationTableName = "vreplication" + copyStateTableName = "copy_state" + postCopyActionTableName = "post_copy_action" + + maxRows = 10000 ) const ( @@ -135,7 +135,7 @@ func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mys mysqld: mysqld, journaler: make(map[string]*journalEvent), ec: newExternalConnector(config.ExternalConnections), - throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerVReplicationAppName, throttle.ThrottleCheckPrimaryWrite), + throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerapp.VReplicationName, throttle.ThrottleCheckPrimaryWrite), } return vre @@ -221,7 +221,6 @@ func (vre *Engine) Open(ctx context.Context) { } func (vre *Engine) openLocked(ctx context.Context) error { - rows, err := vre.readAllRows(ctx) if err != nil { return err @@ -318,7 +317,6 @@ func (vre *Engine) Close() { // Wait for long-running functions to exit. vre.wg.Wait() - vre.mysqld.DisableBinlogPlayback() vre.isOpen = false vre.updateStats() @@ -378,10 +376,12 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) // Change the database to ensure that these events don't get // replicated by another vreplication. This can happen when // we reverse replication. - if _, err := dbClient.ExecuteFetch("use _vt", 1); err != nil { + if _, err := dbClient.ExecuteFetch(fmt.Sprintf("use %s", sidecar.GetIdentifier()), 1); err != nil { return nil, err } + stats := binlogplayer.NewStats() + defer stats.Stop() switch plan.opcode { case insertQuery: qr, err := dbClient.ExecuteFetch(plan.query, 1) @@ -396,7 +396,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) return nil, fmt.Errorf("insert id %v out of range", qr.InsertID) } - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) // If we are creating multiple streams, for example in a // merge workflow going from 2 shards to 1 shard, we @@ -455,7 +455,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) if err != nil { return nil, err } - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) for _, id := range ids { params, err := readRow(dbClient, id) if err != nil { @@ -482,7 +482,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) return &sqltypes.Result{}, nil } // Stop and delete the current controllers. - vdbc := newVDBClient(dbClient, binlogplayer.NewStats()) + vdbc := newVDBClient(dbClient, stats) for _, id := range ids { if ct := vre.controllers[id]; ct != nil { ct.Stop() @@ -680,13 +680,13 @@ func (vre *Engine) transitionJournal(je *journalEvent) { var newids []int32 for _, shard := range shardGTIDs { sgtid := je.shardGTIDs[shard] - bls := proto.Clone(vre.controllers[refid].source).(*binlogdatapb.BinlogSource) + bls := vre.controllers[refid].source.CloneVT() bls.Keyspace, bls.Shard = sgtid.Keyspace, sgtid.Shard workflowType, _ := strconv.ParseInt(params["workflow_type"], 10, 32) workflowSubType, _ := strconv.ParseInt(params["workflow_sub_type"], 10, 32) deferSecondaryKeys, _ := strconv.ParseBool(params["defer_secondary_keys"]) - ig := NewInsertGenerator(binlogplayer.BlpRunning, vre.dbName) + ig := NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Running, vre.dbName) ig.AddRow(params["workflow"], bls, sgtid.Gtid, params["cell"], params["tablet_types"], binlogdatapb.VReplicationWorkflowType(workflowType), binlogdatapb.VReplicationWorkflowSubType(workflowSubType), deferSecondaryKeys) qr, err := dbClient.ExecuteFetch(ig.String(), maxRows) @@ -781,7 +781,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { // The full error we get back from MySQL in that case is: // Deadlock found when trying to get lock; try restarting transaction (errno 1213) (sqlstate 40001) // Docs: https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html#error_er_lock_deadlock - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock { log.Infof("Deadlock detected waiting for pos %s: %v; will retry", pos, err) } else { return err @@ -792,7 +792,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { return fmt.Errorf("unexpected result: %v", qr) } - // When err is not nil then we got a retryable error and will loop again + // When err is not nil then we got a retryable error and will loop again. if err == nil { current, dcerr := binlogplayer.DecodePosition(qr.Rows[0][0].ToString()) if dcerr != nil { @@ -804,7 +804,7 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { return nil } - if qr.Rows[0][1].ToString() == binlogplayer.BlpStopped { + if qr.Rows[0][1].ToString() == binlogdatapb.VReplicationWorkflowState_Stopped.String() { return fmt.Errorf("replication has stopped at %v before reaching position %v, message: %s", current, mPos, qr.Rows[0][2].ToString()) } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index d490417784f..32add04c8e0 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -31,6 +31,8 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/mysqlctl" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestEngineOpen(t *testing.T) { @@ -383,12 +385,12 @@ func TestWaitForPos(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1084"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) start := time.Now() @@ -451,7 +453,7 @@ func TestWaitForPosCancel(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) ctx, cancel := context.WithCancel(context.Background()) @@ -469,7 +471,7 @@ func TestWaitForPosCancel(t *testing.T) { }() dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ sqltypes.NewVarBinary("MariaDB/0-1-1083"), - sqltypes.NewVarBinary("Running"), + sqltypes.NewVarBinary(binlogdatapb.VReplicationWorkflowState_Running.String()), sqltypes.NewVarBinary(""), }}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index ac411de7ce6..1c20e2054be 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" ) @@ -51,6 +52,9 @@ type VStreamerClient interface { // VStreamRows streams rows of a table from the specified starting point. VStreamRows(ctx context.Context, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error + + // VStreamTables streams rows of a table from the specified starting point. + VStreamTables(ctx context.Context, send func(*binlogdatapb.VStreamTablesResponse) error) error } type externalConnector struct { @@ -126,7 +130,7 @@ func (c *mysqlConnector) Close(ctx context.Context) error { } func (c *mysqlConnector) VStream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - return c.vstreamer.Stream(ctx, startPos, tablePKs, filter, send) + return c.vstreamer.Stream(ctx, startPos, tablePKs, filter, throttlerapp.ExternalConnectorName, send) } func (c *mysqlConnector) VStreamRows(ctx context.Context, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { @@ -141,6 +145,10 @@ func (c *mysqlConnector) VStreamRows(ctx context.Context, query string, lastpk * return c.vstreamer.StreamRows(ctx, query, row, send) } +func (c *mysqlConnector) VStreamTables(ctx context.Context, send func(response *binlogdatapb.VStreamTablesResponse) error) error { + return c.vstreamer.StreamTables(ctx, send) +} + //----------------------------------------------------------- type tabletConnector struct { @@ -179,3 +187,8 @@ func (tc *tabletConnector) VStreamRows(ctx context.Context, query string, lastpk req := &binlogdatapb.VStreamRowsRequest{Target: tc.target, Query: query, Lastpk: lastpk} return tc.qs.VStreamRows(ctx, req, send) } + +func (tc *tabletConnector) VStreamTables(ctx context.Context, send func(*binlogdatapb.VStreamTablesResponse) error) error { + req := &binlogdatapb.VStreamTablesRequest{Target: tc.target} + return tc.qs.VStreamTables(ctx, req, send) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/flags.go b/go/vt/vttablet/tabletmanager/vreplication/flags.go index a2b953da852..7456c51e524 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/flags.go +++ b/go/vt/vttablet/tabletmanager/vreplication/flags.go @@ -37,9 +37,9 @@ var ( replicaLagTolerance = 1 * time.Minute vreplicationHeartbeatUpdateInterval = 1 - vreplicationExperimentalFlags = int64(0x01) // enable vreplicationExperimentalFlagOptimizeInserts by default - vreplicationStoreCompressedGTID = false - vreplicationParallelInsertWorkers = 1 + + vreplicationStoreCompressedGTID = false + vreplicationParallelInsertWorkers = 1 ) func registerVReplicationFlags(fs *pflag.FlagSet) { @@ -62,8 +62,7 @@ func registerVReplicationFlags(fs *pflag.FlagSet) { // you have too many streams the extra write qps or cpu load due to these updates are unacceptable // you have too many streams and/or a large source field (lot of participating tables) which generates unacceptable increase in your binlog size fs.IntVar(&vreplicationHeartbeatUpdateInterval, "vreplication_heartbeat_update_interval", vreplicationHeartbeatUpdateInterval, "Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling") - fs.Int64Var(&vreplicationExperimentalFlags, "vreplication_experimental_flags", vreplicationExperimentalFlags, "(Bitmask) of experimental features in vreplication to enable") - fs.BoolVar(&vreplicationStoreCompressedGTID, "vreplication_store_compressed_gtid", vreplicationStoreCompressedGTID, "Store compressed gtids in the pos column of _vt.vreplication") + fs.BoolVar(&vreplicationStoreCompressedGTID, "vreplication_store_compressed_gtid", vreplicationStoreCompressedGTID, "Store compressed gtids in the pos column of the sidecar database's vreplication table") // deprecated flags (7.0), however there are several e2e tests that still depend on them fs.Duration("vreplication_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology") diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index ed8e9124b29..576ce4c22a8 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -24,9 +24,16 @@ import ( "reflect" "regexp" "strings" + "sync" "testing" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vttablet" + + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/dbconfigs" + "github.com/spf13/pflag" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" @@ -35,7 +42,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" @@ -46,6 +52,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletconn" "vitess.io/vitess/go/vt/vttablet/tabletconntest" qh "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication/queryhistory" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" @@ -55,14 +62,16 @@ import ( ) var ( - playerEngine *Engine - streamerEngine *vstreamer.Engine - env *testenv.Env - globalFBC = &fakeBinlogClient{} - vrepldb = "vrepl" - globalDBQueries = make(chan string, 1000) - testForeignKeyQueries = false - doNotLogDBQueries = false + playerEngine *Engine + streamerEngine *vstreamer.Engine + env *testenv.Env + envMu sync.Mutex + globalFBC = &fakeBinlogClient{} + vrepldb = "vrepl" + globalDBQueries = make(chan string, 1000) + testForeignKeyQueries = false + testSetForeignKeyQueries = false + doNotLogDBQueries = false ) type LogExpectation struct { @@ -110,45 +119,86 @@ func init() { heartbeatRe = regexp.MustCompile(`update _vt.vreplication set time_updated=\d+ where id=\d+`) } +func cleanup() { + playerEngine.Close() + streamerEngine.Close() + env.Close() + envMu.Unlock() +} + +func setup(ctx context.Context) (func(), int) { + var err error + env, err = testenv.Init(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return nil, 1 + } + envMu.Lock() + globalDBQueries = make(chan string, 1000) + resetBinlogClient() + + vttablet.VReplicationExperimentalFlags = 0 + + // Engines cannot be initialized in testenv because it introduces circular dependencies. + streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + streamerEngine.InitDBConfig(env.KeyspaceName, env.ShardName) + streamerEngine.Open() + + if err := env.Mysqld.ExecuteSuperQuery(ctx, fmt.Sprintf("create database %s", vrepldb)); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return nil, 1 + } + + if err := env.Mysqld.ExecuteSuperQuery(ctx, "set @@global.innodb_lock_wait_timeout=1"); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return nil, 1 + } + externalConfig := map[string]*dbconfigs.DBConfigs{ + "exta": env.Dbcfgs, + "extb": env.Dbcfgs, + } + playerEngine = NewTestEngine(env.TopoServ, env.Cells[0], env.Mysqld, realDBClientFactory, realDBClientFactory, vrepldb, externalConfig) + playerEngine.Open(ctx) + + return cleanup, 0 +} + +// We run Tests twice, first with full binlog_row_image, then with noblob. +var runNoBlobTest = false + +// We use this tempDir for creating the external cnfs, since we create the test cluster afterwards. +const tempDir = "/tmp" + func TestMain(m *testing.M) { binlogplayer.SetProtocol("vreplication_test_framework", "test") _flag.ParseFlagsForTest() exitCode := func() int { - var err error - env, err = testenv.Init() - if err != nil { - fmt.Fprintf(os.Stderr, "%v", err) - return 1 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := utils.SetBinlogRowImageMode("full", tempDir); err != nil { + panic(err) } - defer env.Close() - - vreplicationExperimentalFlags = 0 - - // engines cannot be initialized in testenv because it introduces - // circular dependencies. - streamerEngine = vstreamer.NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) - streamerEngine.InitDBConfig(env.KeyspaceName, env.ShardName) - streamerEngine.Open() - defer streamerEngine.Close() - - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), fmt.Sprintf("create database %s", vrepldb)); err != nil { - fmt.Fprintf(os.Stderr, "%v", err) - return 1 + defer utils.SetBinlogRowImageMode("", tempDir) + cancel, ret := setup(ctx) + if ret > 0 { + return ret } - - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "set @@global.innodb_lock_wait_timeout=1"); err != nil { - fmt.Fprintf(os.Stderr, "%v", err) - return 1 + ret = m.Run() + if ret > 0 { + return ret } + cancel() - externalConfig := map[string]*dbconfigs.DBConfigs{ - "exta": env.Dbcfgs, - "extb": env.Dbcfgs, + runNoBlobTest = true + if err := utils.SetBinlogRowImageMode("noblob", tempDir); err != nil { + panic(err) } - playerEngine = NewTestEngine(env.TopoServ, env.Cells[0], env.Mysqld, realDBClientFactory, realDBClientFactory, vrepldb, externalConfig) - playerEngine.Open(context.Background()) - defer playerEngine.Close() - + defer utils.SetBinlogRowImageMode("", tempDir) + cancel, ret = setup(ctx) + if ret > 0 { + return ret + } + defer cancel() return m.Run() }() os.Exit(exitCode) @@ -164,7 +214,7 @@ func primaryPosition(t *testing.T) string { if err != nil { t.Fatal(err) } - return mysql.EncodePosition(pos) + return replication.EncodePosition(pos) } func execStatements(t *testing.T, queries []string) { @@ -260,7 +310,7 @@ func (ftc *fakeTabletConn) VStream(ctx context.Context, request *binlogdatapb.VS if vstreamHook != nil { vstreamHook(ctx) } - return streamerEngine.Stream(ctx, request.Position, request.TableLastPKs, request.Filter, send) + return streamerEngine.Stream(ctx, request.Position, request.TableLastPKs, request.Filter, throttlerapp.VStreamerName, send) } // vstreamRowsHook allows you to do work just before calling VStreamRows. @@ -426,6 +476,8 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } if !strings.HasPrefix(query, "select") && !strings.HasPrefix(query, "set") && !dbc.nolog { globalDBQueries <- query + } else if testSetForeignKeyQueries && strings.Contains(query, "set foreign_key_checks") { + globalDBQueries <- query } else if testForeignKeyQueries && strings.Contains(query, "foreign_key_checks") { //allow select/set for foreign_key_checks globalDBQueries <- query } @@ -434,6 +486,9 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu func expectDeleteQueries(t *testing.T) { t.Helper() + if doNotLogDBQueries { + return + } expectNontxQueries(t, qh.Expect( "/delete from _vt.vreplication", "/delete from _vt.copy_state", @@ -484,6 +539,7 @@ func shouldIgnoreQuery(query string) bool { ", time_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it ", component_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it "context cancel", + "SELECT rows_copied FROM _vt.vreplication WHERE id=", } if sidecardb.MatchesInitQuery(query) { return true @@ -498,6 +554,9 @@ func shouldIgnoreQuery(query string) bool { func expectDBClientQueries(t *testing.T, expectations qh.ExpectationSequence, skippableOnce ...string) { t.Helper() + if doNotLogDBQueries { + return + } failed := false skippedOnce := false validator := qh.NewVerifier(expectations) @@ -558,7 +617,9 @@ func expectDBClientQueries(t *testing.T, expectations qh.ExpectationSequence, sk // It also disregards updates to _vt.vreplication. func expectNontxQueries(t *testing.T, expectations qh.ExpectationSequence) { t.Helper() - + if doNotLogDBQueries { + return + } failed := false validator := qh.NewVerifier(expectations) @@ -621,15 +682,36 @@ func expectQueryResult(t *testing.T, query string, values [][]string) { func customExpectData(t *testing.T, table string, values [][]string, exec func(ctx context.Context, query string) (*sqltypes.Result, error)) { t.Helper() + const timeout = 30 * time.Second + const tick = 100 * time.Millisecond + var query string if len(strings.Split(table, ".")) == 1 { query = fmt.Sprintf("select * from %s.%s", vrepldb, table) } else { query = fmt.Sprintf("select * from %s", table) } - err := compareQueryResults(t, query, values, exec) - if err != nil { - require.FailNow(t, "data mismatch", err) + + // without the sleep and retry there is a flakiness where rows inserted by vreplication are not immediately visible + // on the target for tests where we do not expect queries but just directly check the vreplicated data after inserting + // into the source. + tmr := time.NewTimer(timeout) + defer tmr.Stop() + var err error + for { + select { + case <-tmr.C: + if err != nil { + require.FailNow(t, "target has incorrect data", err) + } + default: + err = compareQueryResults(t, query, values, exec) + if err == nil { + return + } + log.Errorf("data mismatch: %v, retrying", err) + time.Sleep(tick) + } } } @@ -650,7 +732,7 @@ func compareQueryResults(t *testing.T, query string, values [][]string, } for j, val := range row { if got := qr.Rows[i][j].ToString(); got != val { - return fmt.Errorf("Mismatch at (%d, %d): %v, want %s", i, j, qr.Rows[i][j], val) + return fmt.Errorf("mismatch at (%d, %d): got '%s', want '%s'", i, j, qr.Rows[i][j].ToString(), val) } } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go index 2b9daa0d3cc..98183e726df 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/fuzz.go +++ b/go/vt/vttablet/tabletmanager/vreplication/fuzz.go @@ -22,9 +22,11 @@ import ( "sync" "testing" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" fuzz "github.com/AdaLogics/go-fuzz-headers" @@ -99,7 +101,8 @@ func FuzzEngine(data []byte) int { vre := NewTestEngine(topoServer, "cell1", mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) // Fuzzer fails if this expectation is not made first: - dbClient.ExpectRequest("select * from _vt.vreplication where db_name='db'", &sqltypes.Result{}, nil) + dbClient.ExpectRequest(sqlparser.BuildParsedQuery("select * from %s.vreplication where db_name='db'", + sidecar.GetIdentifier()).Query, &sqltypes.Result{}, nil) err = makeExpectations(dbClient, f) if err != nil { return 0 diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go index 19e5933f428..da1753a8444 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go @@ -36,12 +36,12 @@ type InsertGenerator struct { } // NewInsertGenerator creates a new InsertGenerator. -func NewInsertGenerator(state, dbname string) *InsertGenerator { +func NewInsertGenerator(state binlogdatapb.VReplicationWorkflowState, dbname string) *InsertGenerator { buf := &strings.Builder{} buf.WriteString("insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ") return &InsertGenerator{ buf: buf, - state: state, + state: state.String(), dbname: dbname, now: time.Now().Unix(), } diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go index 3f79a28a765..5ccdfe3da10 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go @@ -21,12 +21,11 @@ import ( "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/vt/binlog/binlogplayer" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestInsertGenerator(t *testing.T) { - ig := NewInsertGenerator(binlogplayer.BlpStopped, "a") + ig := NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "a") ig.now = 111 ig.AddRow("b", &binlogdatapb.BinlogSource{Keyspace: "c"}, "d", "e", "f", binlogdatapb.VReplicationWorkflowType_Materialize, binlogdatapb.VReplicationWorkflowSubType_None, false) want := `insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ` + diff --git a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go index 3954f4d0546..9c6f427b418 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go @@ -20,6 +20,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "context" @@ -84,7 +85,7 @@ func (c *ReplicaConnector) Close(ctx context.Context) error { } func (c *ReplicaConnector) VStream(ctx context.Context, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { - return c.vstreamer.Stream(ctx, startPos, nil, filter, send) + return c.vstreamer.Stream(ctx, startPos, nil, filter, throttlerapp.ReplicaConnectorName, send) } // VStreamRows streams rows from query result diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go index 3a990130c8f..39ffdef04ae 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -22,11 +22,11 @@ import ( "sort" "strings" - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/bytes2" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" + vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vttablet" ) // ReplicatorPlan is the execution plan for the replicator. It contains @@ -76,7 +77,7 @@ func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent // bind var names. tplanv.Fields = make([]*querypb.Field, 0, len(fieldEvent.Fields)) for _, fld := range fieldEvent.Fields { - trimmed := proto.Clone(fld).(*querypb.Field) + trimmed := fld.CloneVT() trimmed.Name = strings.Trim(trimmed.Name, "`") tplanv.Fields = append(tplanv.Fields, trimmed) } @@ -199,11 +200,21 @@ type TablePlan struct { ConvertIntToEnum map[string]bool // PKReferences is used to check if an event changed // a primary key column (row move). - PKReferences []string + PKReferences []string + // PKIndices is an array, length = #columns, true if column is part of the PK + PKIndices []bool Stats *binlogplayer.Stats FieldsToSkip map[string]bool ConvertCharset map[string](*binlogdatapb.CharsetConversion) HasExtraSourcePkColumns bool + + TablePlanBuilder *tablePlanBuilder + // PartialInserts is a dynamically generated cache of insert ParsedQueries, which update only some columns. + // This is when we use a binlog_row_image which is not "full". The key is a serialized bitmap of data columns + // which are sent as part of the RowEvent. + PartialInserts map[string]*sqlparser.ParsedQuery + // PartialUpdates are same as PartialInserts, but for update statements + PartialUpdates map[string]*sqlparser.ParsedQuery } // MarshalJSON performs a custom JSON Marshalling. @@ -269,7 +280,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R // now and punt on the others. func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, before, after bool, stmtType string) bool { // added empty comments below, otherwise gofmt removes the spaces between the bitwise & and obfuscates this check! - if vreplicationExperimentalFlags /**/ & /**/ vreplicationExperimentalFlagOptimizeInserts == 0 { + if vttablet.VReplicationExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagOptimizeInserts == 0 { return false } // Ensure there is one and only one value in lastpk and pkrefs. @@ -304,21 +315,15 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) { if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() { // Non-null string value, for which we have a charset conversion instruction - valString := val.ToString() - fromEncoding, encodingOK := mysql.CharacterSetEncoding[conversion.FromCharset] - if !encodingOK { + fromCollation := collations.Local().DefaultCollationForCharset(conversion.FromCharset) + if fromCollation == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name) } - if fromEncoding != nil { - // As reminder, encoding can be nil for trivial charsets, like utf8 or ascii. - // encoding will be non-nil for charsets like latin1, gbk, etc. - var err error - valString, err = fromEncoding.NewDecoder().String(valString) - if err != nil { - return nil, err - } + out, err := charset.Convert(nil, charset.Charset_utf8mb4{}, val.Raw(), colldata.Lookup(fromCollation).Charset()) + if err != nil { + return nil, err } - return sqltypes.StringBindVariable(valString), nil + return sqltypes.StringBindVariable(string(out)), nil } if tp.ConvertIntToEnum[field.Name] && !val.IsNull() { // An integer converted to an enum. We must write the textual value of the int. i.e. 0 turns to '0' @@ -369,7 +374,22 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun after = true vals := sqltypes.MakeRowTrusted(tp.Fields, rowChange.After) for i, field := range tp.Fields { - bindVar, err := tp.bindFieldVal(field, &vals[i]) + var bindVar *querypb.BindVariable + var newVal *sqltypes.Value + var err error + if field.Type == querypb.Type_JSON { + if vals[i].IsNull() { // An SQL NULL and not an actual JSON value + newVal = &sqltypes.NULL + } else { // A JSON value (which may be a JSON null literal value) + newVal, err = vjson.MarshalSQLValue(vals[i].Raw()) + if err != nil { + return nil, err + } + } + bindVar, err = tp.bindFieldVal(field, newVal) + } else { + bindVar, err = tp.bindFieldVal(field, &vals[i]) + } if err != nil { return nil, err } @@ -382,7 +402,16 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun if tp.isOutsidePKRange(bindvars, before, after, "insert") { return nil, nil } - return execParsedQuery(tp.Insert, bindvars, executor) + if tp.isPartial(rowChange) { + ins, err := tp.getPartialInsertQuery(rowChange.DataColumns) + if err != nil { + return nil, err + } + tp.Stats.PartialQueryCount.Add([]string{"insert"}, 1) + return execParsedQuery(ins, bindvars, executor) + } else { + return execParsedQuery(tp.Insert, bindvars, executor) + } case before && !after: if tp.Delete == nil { return nil, nil @@ -390,7 +419,16 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun return execParsedQuery(tp.Delete, bindvars, executor) case before && after: if !tp.pkChanged(bindvars) && !tp.HasExtraSourcePkColumns { - return execParsedQuery(tp.Update, bindvars, executor) + if tp.isPartial(rowChange) { + upd, err := tp.getPartialUpdateQuery(rowChange.DataColumns) + if err != nil { + return nil, err + } + tp.Stats.PartialQueryCount.Add([]string{"update"}, 1) + return execParsedQuery(upd, bindvars, executor) + } else { + return execParsedQuery(tp.Update, bindvars, executor) + } } if tp.Delete != nil { if _, err := execParsedQuery(tp.Delete, bindvars, executor); err != nil { @@ -406,12 +444,19 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun return nil, nil } -func execParsedQuery(pq *sqlparser.ParsedQuery, bindvars map[string]*querypb.BindVariable, executor func(string) (*sqltypes.Result, error)) (*sqltypes.Result, error) { +func getQuery(pq *sqlparser.ParsedQuery, bindvars map[string]*querypb.BindVariable) (string, error) { sql, err := pq.GenerateQuery(bindvars, nil) + if err != nil { + return "", err + } + return sql, nil +} +func execParsedQuery(pq *sqlparser.ParsedQuery, bindvars map[string]*querypb.BindVariable, executor func(string) (*sqltypes.Result, error)) (*sqltypes.Result, error) { + query, err := getQuery(pq, bindvars) if err != nil { return nil, err } - return executor(sql) + return executor(query) } func (tp *TablePlan) pkChanged(bindvars map[string]*querypb.BindVariable) bool { diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats.go b/go/vt/vttablet/tabletmanager/vreplication/stats.go index 5b3f55a60f5..6379a9ba04f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats.go @@ -27,6 +27,8 @@ import ( "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/servenv" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -60,20 +62,27 @@ type vrStats struct { } func (st *vrStats) register() { + stats.NewGaugeFunc("VReplicationStreamCount", "Number of vreplication streams", st.numControllers) stats.NewGaugeFunc("VReplicationLagSecondsMax", "Max vreplication seconds behind primary", st.maxReplicationLagSeconds) - stats.Publish("VReplicationStreamState", stats.StringMapFunc(func() map[string]string { - st.mu.Lock() - defer st.mu.Unlock() - result := make(map[string]string, len(st.controllers)) - for _, ct := range st.controllers { - state := ct.blpStats.State.Load() - if state != nil { - result[ct.workflow+"."+fmt.Sprintf("%v", ct.id)] = state.(string) + stats.NewStringMapFuncWithMultiLabels( + "VReplicationStreamState", + "State of vreplication workflow", + []string{"workflow", "counts"}, + "state", + func() map[string]string { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]string, len(st.controllers)) + for _, ct := range st.controllers { + state := ct.blpStats.State.Load() + if state != nil { + result[ct.workflow+"."+fmt.Sprintf("%v", ct.id)] = state.(string) + } } - } - return result - })) + return result + }, + ) stats.NewGaugesFuncWithMultiLabels( "VReplicationLagSeconds", "vreplication seconds behind primary per stream", @@ -145,7 +154,10 @@ func (st *vrStats) register() { defer st.mu.Unlock() result := make(map[string]string, len(st.controllers)) for _, ct := range st.controllers { - result[fmt.Sprintf("%v", ct.id)] = ct.sourceTablet.Load().(string) + ta := ct.sourceTablet.Load() + if ta != nil { + result[fmt.Sprintf("%v", ct.id)] = ta.(*topodatapb.TabletAlias).String() + } } return result })) @@ -208,7 +220,6 @@ func (st *vrStats) register() { } return result }) - stats.NewGaugesFuncWithMultiLabels( "VReplicationQueryCount", "vreplication query counts per stream", @@ -376,6 +387,7 @@ func (st *vrStats) register() { } return result }) + stats.NewGaugesFuncWithMultiLabels( "VReplicationTableCopyTimings", "vreplication copy phase timings per table per stream", @@ -391,6 +403,37 @@ func (st *vrStats) register() { } return result }) + stats.NewCountersFuncWithMultiLabels( + "VReplicationPartialQueryCount", + "count of partial queries per stream", + []string{"source_keyspace", "source_shard", "workflow", "type"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64, len(st.controllers)) + for _, ct := range st.controllers { + for typ, t := range ct.blpStats.PartialQueryCount.Counts() { + result[ct.source.Keyspace+"."+ct.source.Shard+"."+ct.workflow+"."+fmt.Sprintf("%v", ct.id)+"."+typ] = t + } + } + return result + }) + stats.NewCountersFuncWithMultiLabels( + "VReplicationPartialQueryCacheSize", + "cache size for partial queries per stream", + []string{"source_keyspace", "source_shard", "workflow", "type"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64, len(st.controllers)) + for _, ct := range st.controllers { + for typ, t := range ct.blpStats.PartialQueryCacheSize.Counts() { + result[ct.source.Keyspace+"."+ct.source.Shard+"."+ct.workflow+"."+fmt.Sprintf("%v", ct.id)+"."+typ] = t + } + } + return result + }) + } func (st *vrStats) numControllers() int64 { @@ -430,7 +473,7 @@ func (st *vrStats) status() *EngineStatus { ReplicationLagSeconds: ct.blpStats.ReplicationLagSeconds.Load(), Counts: ct.blpStats.Timings.Counts(), Rates: ct.blpStats.Rates.Get(), - SourceTablet: ct.sourceTablet.Load().(string), + SourceTablet: ct.sourceTablet.Load().(*topodatapb.TabletAlias), Messages: ct.blpStats.MessageHistory(), QueryCounts: ct.blpStats.QueryCount.Counts(), PhaseTimings: ct.blpStats.PhaseTimings.Counts(), @@ -468,7 +511,7 @@ type ControllerStatus struct { Counts map[string]int64 Rates map[string][]float64 State string - SourceTablet string + SourceTablet *topodatapb.TabletAlias Messages []string QueryCounts map[string]int64 PhaseTimings map[string]int64 @@ -478,7 +521,7 @@ type ControllerStatus struct { TableCopyTimings map[string]int64 } -var vreplicationTemplate = ` +const vreplicationTemplate = ` {{if .IsOpen}}VReplication state: Open
diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go index 18ede348166..d5b5eacbdf2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go @@ -18,16 +18,19 @@ package vreplication import ( "bytes" - "html/template" "strings" "testing" "time" + "github.com/google/safehtml/template" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/proto/binlogdata" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var wantOut = ` @@ -72,12 +75,13 @@ VReplication state: Open
` func TestStatusHtml(t *testing.T) { - pos, err := mysql.DecodePosition("MariaDB/1-2-3") + pos, err := replication.DecodePosition("MariaDB/1-2-3") if err != nil { t.Fatal(err) } blpStats := binlogplayer.NewStats() + defer blpStats.Stop() blpStats.SetLastPosition(pos) blpStats.ReplicationLagSeconds.Store(2) blpStats.History.Add(&binlogplayer.StatsHistoryRecord{Time: time.Now(), Message: "Test Message1"}) @@ -107,8 +111,14 @@ func TestStatusHtml(t *testing.T) { done: make(chan struct{}), }, } - testStats.controllers[1].sourceTablet.Store("src1") - testStats.controllers[2].sourceTablet.Store("src2") + testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 01, + }) + testStats.controllers[2].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 02, + }) close(testStats.controllers[2].done) tpl := template.Must(template.New("test").Parse(vreplicationTemplate)) @@ -121,7 +131,7 @@ func TestStatusHtml(t *testing.T) { func TestVReplicationStats(t *testing.T) { blpStats := binlogplayer.NewStats() - + defer blpStats.Stop() testStats := &vrStats{} testStats.isOpen = true testStats.controllers = map[int32]*controller{ @@ -135,7 +145,10 @@ func TestVReplicationStats(t *testing.T) { done: make(chan struct{}), }, } - testStats.controllers[1].sourceTablet.Store("src1") + testStats.controllers[1].sourceTablet.Store(&topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 01, + }) sleepTime := 1 * time.Millisecond record := func(phase string) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index d899be92fa7..3d0f27c7d24 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -55,6 +55,7 @@ type tablePlanBuilder struct { colInfos []*ColumnInfo stats *binlogplayer.Stats source *binlogdatapb.BinlogSource + pkIndices []bool } // colExpr describes the processing to be performed to @@ -201,7 +202,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v", sqlparser.NewIdentifierCS(tableName)) query = buf.String() - case key.IsKeyRange(filter): + case key.IsValidKeyRange(filter): buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf("select * from %v where in_keyrange(%v)", sqlparser.NewIdentifierCS(tableName), sqlparser.NewStrLiteral(filter)) query = buf.String() @@ -358,9 +359,13 @@ func (tpb *tablePlanBuilder) generate() *TablePlan { Update: tpb.generateUpdateStatement(), Delete: tpb.generateDeleteStatement(), PKReferences: pkrefs, + PKIndices: tpb.pkIndices, Stats: tpb.stats, FieldsToSkip: fieldsToSkip, - HasExtraSourcePkColumns: (len(tpb.extraSourcePkCols) > 0), + HasExtraSourcePkColumns: len(tpb.extraSourcePkCols) > 0, + TablePlanBuilder: tpb, + PartialInserts: make(map[string]*sqlparser.ParsedQuery, 0), + PartialUpdates: make(map[string]*sqlparser.ParsedQuery, 0), } } @@ -443,10 +448,10 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr } } if expr, ok := aliased.Expr.(sqlparser.AggrFunc); ok { - if expr.IsDistinct() { + if sqlparser.IsDistinct(expr) { return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) } - switch fname := strings.ToLower(expr.AggrName()); fname { + switch fname := expr.AggrName(); fname { case "count": if _, ok := expr.(*sqlparser.CountStar); !ok { return nil, fmt.Errorf("only count(*) is supported: %v", sqlparser.String(expr)) @@ -576,8 +581,8 @@ func (tpb *tablePlanBuilder) analyzePK(cols []*ColumnInfo) error { } // analyzeExtraSourcePkCols builds tpb.extraSourcePkCols. -// Vreplication allows source and target tables to use different unique keys. Normally, both will -// use same PRIMARY KEY. Other times, same other UNIQUE KEY. Byut it's possible that cource and target +// VReplication allows source and target tables to use different unique keys. Normally, both will +// use same PRIMARY KEY. Other times, same other UNIQUE KEY. But it's possible that source and target // unique keys will only have partial (or empty) shared list of columns. // To be able to generate UPDATE/DELETE queries correctly, we need to know the identities of the // source unique key columns, that are not already part of the target unique key columns. We call @@ -676,7 +681,7 @@ func (tpb *tablePlanBuilder) generateValuesPart(buf *sqlparser.TrackedBuffer, bv case opExpr: switch cexpr.colType { case querypb.Type_JSON: - buf.Myprintf("convert(%v using utf8mb4)", cexpr.expr) + buf.Myprintf("%v", cexpr.expr) case querypb.Type_DATETIME: sourceTZ := tpb.source.SourceTimeZone targetTZ := tpb.source.TargetTimeZone @@ -766,7 +771,11 @@ func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery { buf := sqlparser.NewTrackedBuffer(bvf.formatter) buf.Myprintf("update %v set ", tpb.name) separator := "" - for _, cexpr := range tpb.colExprs { + tpb.pkIndices = make([]bool, len(tpb.colExprs)) + for i, cexpr := range tpb.colExprs { + if cexpr.isPK { + tpb.pkIndices[i] = true + } if cexpr.isGrouped || cexpr.isPK { continue } @@ -780,7 +789,7 @@ func (tpb *tablePlanBuilder) generateUpdateStatement() *sqlparser.ParsedQuery { bvf.mode = bvAfter switch cexpr.colType { case querypb.Type_JSON: - buf.Myprintf("convert(%v using utf8mb4)", cexpr.expr) + buf.Myprintf("%v", cexpr.expr) case querypb.Type_DATETIME: sourceTZ := tpb.source.SourceTimeZone targetTZ := tpb.source.TargetTimeZone diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go new file mode 100644 index 00000000000..be1242c9288 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_partial.go @@ -0,0 +1,209 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + + "vitess.io/vitess/go/vt/vttablet" + + "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" +) + +// isBitSet returns true if the bit at index is set +func isBitSet(data []byte, index int) bool { + byteIndex := index / 8 + bitMask := byte(1 << (uint(index) & 0x7)) + return data[byteIndex]&bitMask > 0 +} + +func (tp *TablePlan) isPartial(rowChange *binlogdatapb.RowChange) bool { + if (vttablet.VReplicationExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage) == 0 || + rowChange.DataColumns == nil || + rowChange.DataColumns.Count == 0 { + + return false + } + return true +} + +func (tpb *tablePlanBuilder) generatePartialValuesPart(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter, dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery { + bvf.mode = bvAfter + separator := "(" + for ind, cexpr := range tpb.colExprs { + if tpb.isColumnGenerated(cexpr.colName) { + continue + } + if !isBitSet(dataColumns.Cols, ind) { + continue + } + buf.Myprintf("%s", separator) + separator = "," + switch cexpr.operation { + case opExpr: + switch cexpr.colType { + case querypb.Type_JSON: + buf.Myprintf("%v", cexpr.expr) + case querypb.Type_DATETIME: + sourceTZ := tpb.source.SourceTimeZone + targetTZ := tpb.source.TargetTimeZone + if sourceTZ != "" && targetTZ != "" { + buf.Myprintf("convert_tz(%v, '%s', '%s')", cexpr.expr, sourceTZ, targetTZ) + } else { + buf.Myprintf("%v", cexpr.expr) + } + default: + buf.Myprintf("%v", cexpr.expr) + } + } + } + buf.Myprintf(")") + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) generatePartialInsertPart(buf *sqlparser.TrackedBuffer, dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery { + buf.Myprintf("insert into %v(", tpb.name) + separator := "" + for ind, cexpr := range tpb.colExprs { + if tpb.isColumnGenerated(cexpr.colName) { + continue + } + if !isBitSet(dataColumns.Cols, ind) { + continue + } + buf.Myprintf("%s%v", separator, cexpr.colName) + separator = "," + } + buf.Myprintf(")", tpb.name) + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) generatePartialSelectPart(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter, dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery { + bvf.mode = bvAfter + buf.WriteString(" select ") + separator := "" + for ind, cexpr := range tpb.colExprs { + if tpb.isColumnGenerated(cexpr.colName) { + continue + } + if !isBitSet(dataColumns.Cols, ind) { + continue + } + buf.Myprintf("%s", separator) + separator = ", " + buf.Myprintf("%v", cexpr.expr) + + } + buf.WriteString(" from dual where ") + tpb.generatePKConstraint(buf, bvf) + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) createPartialInsertQuery(dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery { + bvf := &bindvarFormatter{} + buf := sqlparser.NewTrackedBuffer(bvf.formatter) + + tpb.generatePartialInsertPart(buf, dataColumns) + if tpb.lastpk == nil { + // If there's no lastpk, generate straight values. + buf.Myprintf(" values ", tpb.name) + tpb.generatePartialValuesPart(buf, bvf, dataColumns) + } else { + // If there is a lastpk, generate values as a select from dual + // where the pks < lastpk + tpb.generatePartialSelectPart(buf, bvf, dataColumns) + } + return buf.ParsedQuery() +} + +func (tpb *tablePlanBuilder) createPartialUpdateQuery(dataColumns *binlogdatapb.RowChange_Bitmap) *sqlparser.ParsedQuery { + bvf := &bindvarFormatter{} + buf := sqlparser.NewTrackedBuffer(bvf.formatter) + buf.Myprintf("update %v set ", tpb.name) + separator := "" + for i, cexpr := range tpb.colExprs { + if cexpr.isPK { + continue + } + if tpb.isColumnGenerated(cexpr.colName) { + continue + } + if int64(i) >= dataColumns.Count { + log.Errorf("Ran out of columns trying to generate query for %s", tpb.name.CompliantName()) + return nil + } + if !isBitSet(dataColumns.Cols, i) { + continue + } + buf.Myprintf("%s%v=", separator, cexpr.colName) + separator = ", " + switch cexpr.operation { + case opExpr: + bvf.mode = bvAfter + switch cexpr.colType { + case querypb.Type_JSON: + buf.Myprintf("%v", cexpr.expr) + case querypb.Type_DATETIME: + sourceTZ := tpb.source.SourceTimeZone + targetTZ := tpb.source.TargetTimeZone + if sourceTZ != "" && targetTZ != "" { + buf.Myprintf("convert_tz(%v, '%s', '%s')", cexpr.expr, sourceTZ, targetTZ) + } else { + buf.Myprintf("%v", cexpr.expr) + } + default: + buf.Myprintf("%v", cexpr.expr) + } + } + } + tpb.generateWhere(buf, bvf) + return buf.ParsedQuery() +} +func (tp *TablePlan) getPartialInsertQuery(dataColumns *binlogdatapb.RowChange_Bitmap) (*sqlparser.ParsedQuery, error) { + key := fmt.Sprintf("%x", dataColumns.Cols) + ins, ok := tp.PartialInserts[key] + if ok { + return ins, nil + } + ins = tp.TablePlanBuilder.createPartialInsertQuery(dataColumns) + if ins == nil { + return ins, vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("unable to create partial insert query for %s", tp.TargetName)) + } + tp.PartialInserts[key] = ins + tp.Stats.PartialQueryCacheSize.Add([]string{"insert"}, 1) + return ins, nil +} + +func (tp *TablePlan) getPartialUpdateQuery(dataColumns *binlogdatapb.RowChange_Bitmap) (*sqlparser.ParsedQuery, error) { + key := fmt.Sprintf("%x", dataColumns.Cols) + upd, ok := tp.PartialUpdates[key] + if ok { + return upd, nil + } + upd = tp.TablePlanBuilder.createPartialUpdateQuery(dataColumns) + if upd == nil { + return upd, vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("unable to create partial update query for %s", tp.TargetName)) + } + tp.PartialUpdates[key] = upd + tp.Stats.PartialQueryCacheSize.Add([]string{"update"}, 1) + return upd, nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go index 84458677590..42aa4351647 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/utils.go +++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go @@ -21,35 +21,33 @@ import ( "fmt" "strconv" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vtgate/evalengine" ) const ( - vreplicationLogTableName = "_vt.vreplication_log" + vreplicationLogTableName = "vreplication_log" ) const ( - // Enum values for type column of _vt.vreplication_log + // Enum values for type column in the vreplication_log table. - // LogStreamCreate is used when a row in _vt.vreplication is inserted via VReplicationExec + // LogStreamCreate is used when a row in the vreplication table is inserted via VReplicationExec. LogStreamCreate = "Stream Created" - // LogStreamUpdate is used when a row in _vt.vreplication is updated via VReplicationExec + // LogStreamUpdate is used when a row in the vreplication table is updated via VReplicationExec. LogStreamUpdate = "Stream Updated" - // LogStreamDelete is used when a row in _vt.vreplication is deleted via VReplicationExec + // LogStreamDelete is used when a row in the vreplication table is deleted via VReplicationExec. LogStreamDelete = "Stream Deleted" - // LogMessage is used for generic log messages + // LogMessage is used for generic log messages. LogMessage = "Message" - // LogCopyStart is used when the copy phase is started + // LogCopyStart is used when the copy phase is started. LogCopyStart = "Started Copy Phase" - // LogCopyEnd is used when the copy phase is done + // LogCopyEnd is used when the copy phase is done. LogCopyEnd = "Ended Copy Phase" - // LogStateChange is used when the state of the stream changes + // LogStateChange is used when the state of the stream changes. LogStateChange = "State Changed" // TODO: LogError is not used atm. Currently irrecoverable errors, resumable errors and informational messages @@ -65,7 +63,8 @@ const ( func getLastLog(dbClient *vdbClient, vreplID int32) (id int64, typ, state, message string, err error) { var qr *sqltypes.Result - query := fmt.Sprintf("select id, type, state, message from _vt.vreplication_log where vrepl_id = %d order by id desc limit 1", vreplID) + query := fmt.Sprintf("select id, type, state, message from %s.vreplication_log where vrepl_id = %d order by id desc limit 1", + sidecar.GetIdentifier(), vreplID) if qr, err = dbClient.Execute(query); err != nil { return 0, "", "", "", err } @@ -73,7 +72,7 @@ func getLastLog(dbClient *vdbClient, vreplID int32) (id int64, typ, state, messa return 0, "", "", "", nil } row := qr.Rows[0] - id, _ = evalengine.ToInt64(row[0]) + id, _ = row[0].ToCastInt64() typ = row[1].ToString() state = row[2].ToString() message = row[3].ToString() @@ -93,11 +92,11 @@ func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message st } var query string if id > 0 && message == lastLogMessage { - query = fmt.Sprintf("update _vt.vreplication_log set count = count + 1 where id = %d", id) + query = fmt.Sprintf("update %s.vreplication_log set count = count + 1 where id = %d", sidecar.GetIdentifier(), id) } else { buf := sqlparser.NewTrackedBuffer(nil) - buf.Myprintf("insert into _vt.vreplication_log(vrepl_id, type, state, message) values(%s, %s, %s, %s)", - strconv.Itoa(int(vreplID)), encodeString(typ), encodeString(state), encodeString(message)) + buf.Myprintf("insert into %s.vreplication_log(vrepl_id, type, state, message) values(%s, %s, %s, %s)", + sidecar.GetIdentifier(), strconv.Itoa(int(vreplID)), encodeString(typ), encodeString(state), encodeString(message)) query = buf.ParsedQuery().Query } if _, err = dbClient.ExecuteFetch(query, 10000); err != nil { @@ -124,72 +123,92 @@ func isUnrecoverableError(err error) bool { if err == nil { return false } - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) if !isSQLErr { return false } - if sqlErr.Num == mysql.ERUnknownError { + if sqlErr.Num == sqlerror.ERUnknownError { return false } switch sqlErr.Num { case // in case-insensitive alphabetical order - mysql.ERAccessDeniedError, - mysql.ERBadFieldError, - mysql.ERBadNullError, - mysql.ERCantDropFieldOrKey, - mysql.ERDataOutOfRange, - mysql.ERDataTooLong, - mysql.ERDBAccessDenied, - mysql.ERDupEntry, - mysql.ERDupFieldName, - mysql.ERDupKeyName, - mysql.ERDupUnique, - mysql.ERFeatureDisabled, - mysql.ERFunctionNotDefined, - mysql.ERIllegalValueForType, - mysql.ERInvalidCastToJSON, - mysql.ERInvalidJSONBinaryData, - mysql.ERInvalidJSONCharset, - mysql.ERInvalidJSONText, - mysql.ERInvalidJSONTextInParams, - mysql.ERJSONDocumentTooDeep, - mysql.ERJSONValueTooBig, - mysql.ERNoDefault, - mysql.ERNoDefaultForField, - mysql.ERNonUniq, - mysql.ERNonUpdateableTable, - mysql.ERNoSuchTable, - mysql.ERNotAllowedCommand, - mysql.ERNotSupportedYet, - mysql.EROptionPreventsStatement, - mysql.ERParseError, - mysql.ERPrimaryCantHaveNull, - mysql.ErrCantCreateGeometryObject, - mysql.ErrGISDataWrongEndianess, - mysql.ErrNonPositiveRadius, - mysql.ErrNotImplementedForCartesianSRS, - mysql.ErrNotImplementedForProjectedSRS, - mysql.ErrWrongValueForType, - mysql.ERSPDoesNotExist, - mysql.ERSpecifiedAccessDenied, - mysql.ERSyntaxError, - mysql.ERTooBigRowSize, - mysql.ERTooBigSet, - mysql.ERTruncatedWrongValue, - mysql.ERTruncatedWrongValueForField, - mysql.ERUnknownCollation, - mysql.ERUnknownProcedure, - mysql.ERUnknownTable, - mysql.ERWarnDataOutOfRange, - mysql.ERWarnDataTruncated, - mysql.ERWrongFKDef, - mysql.ERWrongFieldSpec, - mysql.ERWrongParamCountToProcedure, - mysql.ERWrongParametersToProcedure, - mysql.ERWrongUsage, - mysql.ERWrongValue, - mysql.ERWrongValueCountOnRow: + sqlerror.ERAccessDeniedError, + sqlerror.ERBadFieldError, + sqlerror.ERBadNullError, + sqlerror.ERCantDropFieldOrKey, + sqlerror.ERDataOutOfRange, + sqlerror.ERDataTooLong, + sqlerror.ERDBAccessDenied, + sqlerror.ERDupEntry, + sqlerror.ERDupFieldName, + sqlerror.ERDupKeyName, + sqlerror.ERDupUnique, + sqlerror.ERFeatureDisabled, + sqlerror.ERFunctionNotDefined, + sqlerror.ERIllegalValueForType, + sqlerror.ERInvalidCastToJSON, + sqlerror.ERInvalidJSONBinaryData, + sqlerror.ERInvalidJSONCharset, + sqlerror.ERInvalidJSONText, + sqlerror.ERInvalidJSONTextInParams, + sqlerror.ERJSONDocumentTooDeep, + sqlerror.ERJSONValueTooBig, + sqlerror.ERRegexpError, + sqlerror.ERRegexpStringNotTerminated, + sqlerror.ERRegexpIllegalArgument, + sqlerror.ERRegexpIndexOutOfBounds, + sqlerror.ERRegexpInternal, + sqlerror.ERRegexpRuleSyntax, + sqlerror.ERRegexpBadEscapeSequence, + sqlerror.ERRegexpUnimplemented, + sqlerror.ERRegexpMismatchParen, + sqlerror.ERRegexpBadInterval, + sqlerror.ERRRegexpMaxLtMin, + sqlerror.ERRegexpInvalidBackRef, + sqlerror.ERRegexpLookBehindLimit, + sqlerror.ERRegexpMissingCloseBracket, + sqlerror.ERRegexpInvalidRange, + sqlerror.ERRegexpStackOverflow, + sqlerror.ERRegexpTimeOut, + sqlerror.ERRegexpPatternTooBig, + sqlerror.ERRegexpInvalidCaptureGroup, + sqlerror.ERRegexpInvalidFlag, + sqlerror.ERNoDefault, + sqlerror.ERNoDefaultForField, + sqlerror.ERNonUniq, + sqlerror.ERNonUpdateableTable, + sqlerror.ERNoSuchTable, + sqlerror.ERNotAllowedCommand, + sqlerror.ERNotSupportedYet, + sqlerror.EROptionPreventsStatement, + sqlerror.ERParseError, + sqlerror.ERPrimaryCantHaveNull, + sqlerror.ErrCantCreateGeometryObject, + sqlerror.ErrGISDataWrongEndianess, + sqlerror.ErrNonPositiveRadius, + sqlerror.ErrNotImplementedForCartesianSRS, + sqlerror.ErrNotImplementedForProjectedSRS, + sqlerror.ErrWrongValueForType, + sqlerror.ERSPDoesNotExist, + sqlerror.ERSpecifiedAccessDenied, + sqlerror.ERSyntaxError, + sqlerror.ERTooBigRowSize, + sqlerror.ERTooBigSet, + sqlerror.ERTruncatedWrongValue, + sqlerror.ERTruncatedWrongValueForField, + sqlerror.ERUnknownCollation, + sqlerror.ERUnknownProcedure, + sqlerror.ERUnknownTable, + sqlerror.ERWarnDataOutOfRange, + sqlerror.ERWarnDataTruncated, + sqlerror.ERWrongFKDef, + sqlerror.ERWrongFieldSpec, + sqlerror.ERWrongParamCountToProcedure, + sqlerror.ERWrongParametersToProcedure, + sqlerror.ERWrongUsage, + sqlerror.ERWrongValue, + sqlerror.ERWrongValueCountOnRow: log.Errorf("Got unrecoverable error: %v", sqlErr) return true } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index ddb1d1d6854..ebfe0e22343 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -26,10 +26,9 @@ import ( "time" "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/bytes2" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -39,6 +38,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) type vcopier struct { @@ -141,7 +141,7 @@ type vcopierCopyWorker struct { func newVCopier(vr *vreplicator) *vcopier { return &vcopier{ vr: vr, - throttlerAppName: vr.throttlerAppName(), + throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()), } } @@ -236,7 +236,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { if _, err := vc.vr.dbClient.Execute(buf.String()); err != nil { return err } - if err := vc.vr.setState(binlogplayer.VReplicationCopying, ""); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Copying, ""); err != nil { return err } if err := vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase started for %d table(s)", @@ -267,7 +267,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { } } } else { - if err := vc.vr.setState(binlogplayer.BlpStopped, "There is nothing to replicate"); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "There is nothing to replicate"); err != nil { return err } } @@ -343,7 +343,7 @@ func (vc *vcopier) catchup(ctx context.Context, copyState map[string]*sqltypes.R // Start vreplication. errch := make(chan error, 1) go func() { - errch <- newVPlayer(vc.vr, settings, copyState, mysql.Position{}, "catchup").play(ctx) + errch <- newVPlayer(vc.vr, settings, copyState, replication.Position{}, "catchup").play(ctx) }() // Wait for catchup. @@ -406,7 +406,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma copyStateGCTicker := time.NewTicker(copyStateGCInterval) defer copyStateGCTicker.Stop() - parallelism := int(math.Max(1, float64(vreplicationParallelInsertWorkers))) + parallelism := getInsertParallelism() copyWorkerFactory := vc.newCopyWorkerFactory(parallelism) copyWorkQueue := vc.newCopyWorkQueue(parallelism, copyWorkerFactory) defer copyWorkQueue.close() @@ -457,7 +457,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma default: } if rows.Throttled { - _ = vc.vr.updateTimeThrottled(RowStreamerComponentName) + _ = vc.vr.updateTimeThrottled(throttlerapp.RowStreamerName) return nil } if rows.Heartbeat { @@ -465,10 +465,10 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma return nil } // verify throttler is happy, otherwise keep looping - if vc.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, vc.throttlerAppName) { + if vc.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, throttlerapp.Name(vc.throttlerAppName)) { break // out of 'for' loop } else { // we're throttled - _ = vc.vr.updateTimeThrottled(VCopierComponentName) + _ = vc.vr.updateTimeThrottled(throttlerapp.VCopierName) } } if !copyWorkQueue.isOpen { @@ -481,12 +481,16 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma fieldEvent := &binlogdatapb.FieldEvent{ TableName: initialPlan.SendRule.Match, } - fieldEvent.Fields = append(fieldEvent.Fields, rows.Fields...) + for _, f := range rows.Fields { + fieldEvent.Fields = append(fieldEvent.Fields, f.CloneVT()) + } tablePlan, err := plan.buildExecutionPlan(fieldEvent) if err != nil { return err } - pkfields = append(pkfields, rows.Pkfields...) + for _, f := range rows.Pkfields { + pkfields = append(pkfields, f.CloneVT()) + } buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf( "insert into _vt.copy_state (lastpk, vrepl_id, table_name) values (%a, %s, %s)", ":lastpk", @@ -502,9 +506,15 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma // Clone rows, since pointer values will change while async work is // happening. Can skip this when there's no parallelism. if parallelism > 1 { - rows = proto.Clone(rows).(*binlogdatapb.VStreamRowsResponse) + rows = rows.CloneVT() } + // Code below is copied from vcopier.go. It was implemented to facilitate + // parallel bulk inserts in https://github.com/vitessio/vitess/pull/10828. + // We can probably extract this into a common package and use it for both + // flavors of the vcopier. But cut/pasting it for now, so as to not change + // vcopier at the moment to avoid any regressions. + // Prepare a vcopierCopyTask for the current batch of work. // TODO(maxeng) see if using a pre-allocated pool will speed things up. currCh := make(chan *vcopierCopyTaskResult, 1) @@ -656,7 +666,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma log.Infof("Copy of %v finished at lastpk: %v", tableName, lastpkbv) buf := sqlparser.NewTrackedBuffer(nil) buf.Myprintf( - "delete cs, pca from %s as cs left join %s as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name where cs.vrepl_id=%d and cs.table_name=%s", + "delete cs, pca from _vt.%s as cs left join _vt.%s as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name where cs.vrepl_id=%d and cs.table_name=%s", copyStateTableName, postCopyActionTableName, vc.vr.id, encodeString(tableName), ) @@ -667,9 +677,21 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma return nil } +// updatePos is called after the last table is copied in an atomic copy, to set the gtid so that the replicating phase +// can start from the gtid where the snapshot with all tables was taken. It also updates the final copy row count. +func (vc *vcopier) updatePos(ctx context.Context, gtid string) error { + pos, err := replication.DecodePosition(gtid) + if err != nil { + return err + } + update := binlogplayer.GenerateUpdatePos(vc.vr.id, pos, time.Now().Unix(), 0, vc.vr.stats.CopyRowCount.Get(), vreplicationStoreCompressedGTID) + _, err = vc.vr.dbClient.Execute(update) + return err +} + func (vc *vcopier) fastForward(ctx context.Context, copyState map[string]*sqltypes.Result, gtid string) error { defer vc.vr.stats.PhaseTimings.Record("fastforward", time.Now()) - pos, err := mysql.DecodePosition(gtid) + pos, err := replication.DecodePosition(gtid) if err != nil { return err } @@ -1070,6 +1092,10 @@ func (vbc *vcopierCopyWorker) execute(ctx context.Context, task *vcopierCopyTask } case vcopierCopyTaskInsertCopyState: advanceFn = func(ctx context.Context, args *vcopierCopyTaskArgs) error { + if vbc.copyStateInsert == nil { // we don't insert copy state for atomic copy + log.Infof("Skipping copy_state insert") + return nil + } if err := vbc.insertCopyState(ctx, args.lastpk); err != nil { return vterrors.Wrapf(err, "error updating _vt.copy_state") } @@ -1196,3 +1222,9 @@ func vcopierCopyTaskGetNextState(vts vcopierCopyTaskState) vcopierCopyTaskState } return vts } + +// getInsertParallelism returns the number of parallel workers to use for inserting batches during the copy phase. +func getInsertParallelism() int { + parallelism := int(math.Max(1, float64(vreplicationParallelInsertWorkers))) + return parallelism +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go new file mode 100644 index 00000000000..6252690a629 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -0,0 +1,310 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "io" + "strconv" + "time" + + "google.golang.org/protobuf/encoding/prototext" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +/* +This file is similar to vcopier.go: it handles the copy phase for the AtomicCopy where all tables +are streamed in a single phase. +*/ + +type copyAllState struct { + vc *vcopier + plan *ReplicatorPlan + currentTableName string + tables map[string]bool +} + +// newCopyAllState creates the required table plans and sets up the copy state for all tables in the source. +func newCopyAllState(vc *vcopier) (*copyAllState, error) { + state := ©AllState{ + vc: vc, + } + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + if err != nil { + return nil, err + } + state.plan = plan + state.tables = make(map[string]bool, len(plan.TargetTables)) + for _, table := range plan.TargetTables { + state.tables[table.TargetName] = false + } + return state, nil +} + +// copyAll copies all tables from the source to the target sequentially, finishing one table first and then moving to the next.. +func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings) error { + var err error + + log.Infof("Starting copyAll for %s", settings.WorkflowName) + defer log.Infof("Returning from copyAll for %s", settings.WorkflowName) + defer vc.vr.dbClient.Rollback() + + state, err := newCopyAllState(vc) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(ctx, copyPhaseDuration) + defer cancel() + + rowsCopiedTicker := time.NewTicker(rowsCopiedUpdateInterval) + defer rowsCopiedTicker.Stop() + + parallelism := getInsertParallelism() + copyWorkerFactory := vc.newCopyWorkerFactory(parallelism) + var copyWorkQueue *vcopierCopyWorkQueue + + // Allocate a result channel to collect results from tasks. To not block fast workers, we allocate a buffer of + // MaxResultsInFlight results per worker. + const MaxResultsInFlight = 4 + resultCh := make(chan *vcopierCopyTaskResult, parallelism*MaxResultsInFlight) + defer close(resultCh) + + var lastpk *querypb.Row + var pkfields []*querypb.Field + var lastpkbv map[string]*querypb.BindVariable + // Use this for task sequencing. + var prevCh <-chan *vcopierCopyTaskResult + var gtid string + + serr := vc.vr.sourceVStreamer.VStreamTables(ctx, func(resp *binlogdatapb.VStreamTablesResponse) error { + defer vc.vr.stats.PhaseTimings.Record("copy", time.Now()) + defer vc.vr.stats.CopyLoopCount.Add(1) + log.Infof("VStreamTablesResponse: received table %s, #fields %d, #rows %d, gtid %s, lastpk %+v", + resp.TableName, len(resp.Fields), len(resp.Rows), resp.Gtid, resp.Lastpk) + tableName := resp.TableName + gtid = resp.Gtid + + updateRowsCopied := func() error { + updateRowsQuery := binlogplayer.GenerateUpdateRowsCopied(vc.vr.id, vc.vr.stats.CopyRowCount.Get()) + _, err := vc.vr.dbClient.Execute(updateRowsQuery) + return err + } + + if err := updateRowsCopied(); err != nil { + return err + } + select { + case <-rowsCopiedTicker.C: + if err := updateRowsCopied(); err != nil { + return err + } + case <-ctx.Done(): + return io.EOF + default: + } + if tableName != state.currentTableName { + if copyWorkQueue != nil { + copyWorkQueue.close() + } + copyWorkQueue = vc.newCopyWorkQueue(parallelism, copyWorkerFactory) + if state.currentTableName != "" { + log.Infof("copy of table %s is done at lastpk %+v", state.currentTableName, lastpkbv) + if err := vc.deleteCopyState(state.currentTableName); err != nil { + return err + } + } else { + log.Infof("starting copy phase with table %s", tableName) + } + + state.currentTableName = tableName + } + + // A new copy queue is created for each table. The queue is closed when the table is done. + if !copyWorkQueue.isOpen { + if len(resp.Fields) == 0 { + return fmt.Errorf("expecting field event first, got: %v", resp) + } + + lastpk = nil + // pkfields are only used for logging, so that we can monitor progress. + pkfields = make([]*querypb.Field, len(resp.Pkfields)) + for _, f := range resp.Pkfields { + pkfields = append(pkfields, f.CloneVT()) + } + + fieldEvent := &binlogdatapb.FieldEvent{ + TableName: tableName, + } + for _, f := range resp.Fields { + fieldEvent.Fields = append(fieldEvent.Fields, f.CloneVT()) + } + tablePlan, err := state.plan.buildExecutionPlan(fieldEvent) + if err != nil { + return err + } + + buf := sqlparser.NewTrackedBuffer(nil) + buf.Myprintf( + "insert into _vt.copy_state (lastpk, vrepl_id, table_name) values (%a, %s, %s)", ":lastpk", + strconv.Itoa(int(vc.vr.id)), + encodeString(tableName)) + addLatestCopyState := buf.ParsedQuery() + copyWorkQueue.open(addLatestCopyState, pkfields, tablePlan) + } + // When rowstreamer has finished streaming all rows, we get a callback with empty rows. + if len(resp.Rows) == 0 { + return nil + } + // Get the last committed pk into a loggable form. + lastpkbuf, merr := prototext.Marshal(&querypb.QueryResult{ + Fields: pkfields, + Rows: []*querypb.Row{lastpk}, + }) + + if merr != nil { + return fmt.Errorf("failed to marshal pk fields and value into query result: %s", merr.Error()) + } + lastpkbv = map[string]*querypb.BindVariable{ + "lastpk": { + Type: sqltypes.VarBinary, + Value: lastpkbuf, + }, + } + log.Infof("copying table %s with lastpk %v", tableName, lastpkbv) + // Prepare a vcopierCopyTask for the current batch of work. + currCh := make(chan *vcopierCopyTaskResult, 1) + currT := newVCopierCopyTask(newVCopierCopyTaskArgs(resp.Rows, resp.Lastpk)) + + // Send result to the global resultCh and currCh. resultCh is used by + // the loop to return results to VStreamRows. currCh will be used to + // sequence the start of the nextT. + currT.lifecycle.onResult().sendTo(currCh) + currT.lifecycle.onResult().sendTo(resultCh) + + // Use prevCh to Sequence the prevT with the currT so that: + // * The prevT is completed before we begin updating + // _vt.copy_state for currT. + // * If prevT fails or is canceled, the current task is + // canceled. + // prevCh is nil only for the first task in the vcopier run. + if prevCh != nil { + // prevT publishes to prevCh, and currT is the only thing that can + // consume from prevCh. If prevT is already done, then prevCh will + // have a value in it. If prevT isn't yet done, then prevCh will + // have a value later. Either way, AwaitCompletion should + // eventually get a value, unless there is a context expiry. + currT.lifecycle.before(vcopierCopyTaskInsertCopyState).awaitCompletion(prevCh) + } + + // Store currCh in prevCh. The nextT will use this for sequencing. + prevCh = currCh + + // Update stats after task is done. + currT.lifecycle.onResult().do(func(_ context.Context, result *vcopierCopyTaskResult) { + if result.state == vcopierCopyTaskFail { + vc.vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) + } + if result.state == vcopierCopyTaskComplete { + vc.vr.stats.CopyRowCount.Add(int64(len(result.args.rows))) + vc.vr.stats.QueryCount.Add("copy", 1) + vc.vr.stats.TableCopyRowCounts.Add(tableName, int64(len(result.args.rows))) + vc.vr.stats.TableCopyTimings.Add(tableName, time.Since(result.startedAt)) + } + }) + + if err := copyWorkQueue.enqueue(ctx, currT); err != nil { + log.Warningf("failed to enqueue task in workflow %s: %s", vc.vr.WorkflowName, err.Error()) + return err + } + + // When async execution is not enabled, a done task will be available + // in the resultCh after each Enqueue, unless there was a queue state + // error (e.g. couldn't obtain a worker from pool). + // + // When async execution is enabled, results will show up in the channel + // eventually, possibly in a subsequent VStreamRows loop. It's still + // a good idea to check this channel on every pass so that: + // + // * resultCh doesn't fill up. If it does fill up then tasks won't be + // able to add their results to the channel, and progress in this + // goroutine will be blocked. + // * We keep lastpk up-to-date. + select { + case result := <-resultCh: + if result != nil { + switch result.state { + case vcopierCopyTaskCancel: + log.Warningf("task was canceled in workflow %s: %v", vc.vr.WorkflowName, result.err) + return io.EOF + case vcopierCopyTaskComplete: + // Collect lastpk. Needed for logging at the end. + lastpk = result.args.lastpk + case vcopierCopyTaskFail: + return vterrors.Wrapf(result.err, "task error") + } + } else { + return io.EOF + } + default: + } + return nil + }) + if serr != nil { + log.Infof("VStreamTables failed: %v", serr) + return serr + } + // A context expiration was probably caused by a PlannedReparentShard or an + // elapsed copy phase duration. CopyAll is not resilient to these events. + select { + case <-ctx.Done(): + log.Infof("Copy of %v stopped", state.currentTableName) + return fmt.Errorf("CopyAll was interrupted due to context expiration") + default: + if err := vc.deleteCopyState(state.currentTableName); err != nil { + return err + } + if copyWorkQueue != nil { + copyWorkQueue.close() + } + if err := vc.updatePos(ctx, gtid); err != nil { + return err + } + log.Infof("Completed copy of all tables") + } + return nil +} + +// deleteCopyState deletes the copy state entry for a table, signifying that the copy phase is complete for that table. +func (vc *vcopier) deleteCopyState(tableName string) error { + log.Infof("Deleting copy state for table %s", tableName) + //FIXME get sidecar db name + delQuery := fmt.Sprintf("delete from _vt.copy_state where table_name=%s and vrepl_id = %d", encodeString(tableName), vc.vr.id) + if _, err := vc.vr.dbClient.Execute(delQuery); err != nil { + return err + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index 92a44336e9f..ff9b9daf00f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -17,16 +17,18 @@ limitations under the License. package vreplication import ( + "context" "fmt" "os" "strings" "testing" "time" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" - "context" + "vitess.io/vitess/go/vt/vttablet" + + "vitess.io/vitess/go/vt/log" "github.com/stretchr/testify/require" @@ -46,29 +48,29 @@ func commonVcopierTestCases() []vcopierTestCase { return []vcopierTestCase{ // Default experimental flags. { - vreplicationExperimentalFlags: vreplicationExperimentalFlags, + vreplicationExperimentalFlags: vttablet.VReplicationExperimentalFlags, }, // Parallel bulk inserts enabled with 4 workers. { - vreplicationExperimentalFlags: vreplicationExperimentalFlags, + vreplicationExperimentalFlags: vttablet.VReplicationExperimentalFlags, vreplicationParallelInsertWorkers: 4, }, } } func testVcopierTestCases(t *testing.T, test func(*testing.T), cases []vcopierTestCase) { - oldVreplicationExperimentalFlags := vreplicationExperimentalFlags + oldVreplicationExperimentalFlags := vttablet.VReplicationExperimentalFlags oldVreplicationParallelInsertWorkers := vreplicationParallelInsertWorkers // Extra reset at the end in case we return prematurely. defer func() { - vreplicationExperimentalFlags = oldVreplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = oldVreplicationExperimentalFlags vreplicationParallelInsertWorkers = oldVreplicationParallelInsertWorkers }() for _, tc := range cases { tc := tc // Avoid export loop bugs. // Set test flags. - vreplicationExperimentalFlags = tc.vreplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = tc.vreplicationExperimentalFlags vreplicationParallelInsertWorkers = tc.vreplicationParallelInsertWorkers // Run test case. t.Run( @@ -79,7 +81,7 @@ func testVcopierTestCases(t *testing.T, test func(*testing.T), cases []vcopierTe test, ) // Reset. - vreplicationExperimentalFlags = oldVreplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = oldVreplicationExperimentalFlags vreplicationParallelInsertWorkers = oldVreplicationParallelInsertWorkers } } @@ -155,7 +157,7 @@ func testPlayerCopyCharPK(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -174,10 +176,10 @@ func testPlayerCopyCharPK(t *testing.T) { "/insert into _vt.copy_state", "/update _vt.vreplication set state='Copying'", "insert into dst(idc,val) values ('a\\0',1)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:BINARY} rows:{lengths:2 values:\\"a\\\\x00\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:BINARY charset:63 flags:20611} rows:{lengths:2 values:\\"a\\\\x00\\"}'.*`, `update dst set val=3 where idc='a\0' and ('a\0') <= ('a\0')`, "insert into dst(idc,val) values ('c\\0',2)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:BINARY} rows:{lengths:2 values:\\"c\\\\x00\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:BINARY charset:63 flags:20611} rows:{lengths:2 values:\\"c\\\\x00\\"}'.*`, "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst", "/update _vt.vreplication set state='Running", )) @@ -262,7 +264,7 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -282,7 +284,7 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { "/update _vt.vreplication set state='Copying'", // Copy mode. "insert into dst(idc,val) values ('a',1)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR} rows:{lengths:1 values:\\"a\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"a\\"}'.*`, // Copy-catchup mode. `/insert into dst\(idc,val\) select 'B', 3 from dual where \( .* 'B' COLLATE .* \) <= \( .* 'a' COLLATE .* \)`, ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { @@ -292,11 +294,11 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { //upd1 := expect. upd1 := expect.Then(qh.Eventually( "insert into dst(idc,val) values ('B',3)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR} rows:{lengths:1 values:\\"B\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"B\\"}'.*`, )) upd2 := expect.Then(qh.Eventually( "insert into dst(idc,val) values ('c',2)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR} rows:{lengths:1 values:\\"c\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"c\\"}'.*`, )) upd1.Then(upd2.Eventually()) return upd2 @@ -385,7 +387,7 @@ func testPlayerCopyVarcharCompositePKCaseSensitiveCollation(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -405,12 +407,12 @@ func testPlayerCopyVarcharCompositePKCaseSensitiveCollation(t *testing.T) { "/update _vt.vreplication set state='Copying'", // Copy mode. "insert into dst(id,idc,idc2,val) values (1,'a','a',1)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} fields:{name:\\"idc\\" type:VARBINARY} fields:{name:\\"idc2\\" type:VARBINARY} rows:{lengths:1 lengths:1 lengths:1 values:\\"1aa\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} fields:{name:\\"idc\\" type:VARBINARY charset:63 flags:20611} fields:{name:\\"idc2\\" type:VARBINARY charset:63 flags:20611} rows:{lengths:1 lengths:1 lengths:1 values:\\"1aa\\"}'.*`, // Copy-catchup mode. `insert into dst(id,idc,idc2,val) select 1, 'B', 'B', 3 from dual where (1,'B','B') <= (1,'a','a')`, // Copy mode. "insert into dst(id,idc,idc2,val) values (1,'c','c',2)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} fields:{name:\\"idc\\" type:VARBINARY} fields:{name:\\"idc2\\" type:VARBINARY} rows:{lengths:1 lengths:1 lengths:1 values:\\"1cc\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} fields:{name:\\"idc\\" type:VARBINARY charset:63 flags:20611} fields:{name:\\"idc2\\" type:VARBINARY charset:63 flags:20611} rows:{lengths:1 lengths:1 lengths:1 values:\\"1cc\\"}'.*`, // Wrap-up. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst", "/update _vt.vreplication set state='Running'", @@ -468,26 +470,26 @@ func testPlayerCopyTablesWithFK(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) require.NoError(t, err) expectDBClientQueries(t, qh.Expect( "/insert into _vt.vreplication", "/update _vt.vreplication set message='Picked source tablet.*", - "select @@foreign_key_checks;", + "select @@foreign_key_checks", // Create the list of tables to copy and transition to Copying state. "begin", "/insert into _vt.copy_state", "/update _vt.vreplication set state='Copying'", "commit", - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set pos=", ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { // With parallel inserts, new db client connects are created on-the-fly. if vreplicationParallelInsertWorkers > 1 { - return expect.Then(qh.Eventually("set foreign_key_checks=0;")) + return expect.Then(qh.Eventually("set @@session.foreign_key_checks=0")) } return expect }).Then(qh.Eventually( @@ -495,35 +497,35 @@ func testPlayerCopyTablesWithFK(t *testing.T) { // Inserts may happen out-of-order. Update happen in-order. "begin", "insert into dst1(id,id2) values (1,1), (2,2)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", // The next FF executes and updates the position before copying. - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", "begin", "/update _vt.vreplication set pos=", "commit", )).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { // With parallel inserts, new db client connects are created on-the-fly. if vreplicationParallelInsertWorkers > 1 { - return expect.Then(qh.Eventually("set foreign_key_checks=0;")) + return expect.Then(qh.Eventually("set @@session.foreign_key_checks=0")) } return expect }).Then(qh.Eventually( // copy dst2 "begin", "insert into dst2(id,id2) values (1,21), (2,22)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( - "set foreign_key_checks=0;", + "set @@session.foreign_key_checks=0", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst2", // All tables copied. Final catch up followed by Running state. - "set foreign_key_checks=1;", + "set @@session.foreign_key_checks=1", "/update _vt.vreplication set state='Running'", ))) @@ -543,7 +545,7 @@ func testPlayerCopyTablesWithFK(t *testing.T) { t.Fatal(err) } expectDBClientQueries(t, qh.Expect( - "set foreign_key_checks=1;", + "set @@session.foreign_key_checks=1", "begin", "/delete from _vt.vreplication", "/delete from _vt.copy_state", @@ -560,9 +562,9 @@ func testPlayerCopyTables(t *testing.T) { defer deleteTablet(addTablet(100)) execStatements(t, []string{ - "create table src1(id int, val varbinary(128), d decimal(8,0), primary key(id))", - "insert into src1 values(2, 'bbb', 1), (1, 'aaa', 0)", - fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), val2 varbinary(128), d decimal(8,0), primary key(id))", vrepldb), + "create table src1(id int, val varbinary(128), d decimal(8,0), j json, primary key(id))", + "insert into src1 values(2, 'bbb', 1, '{\"foo\": \"bar\"}'), (1, 'aaa', 0, JSON_ARRAY(123456789012345678901234567890, \"abcd\")), (3, 'ccc', 2, 'null'), (4, 'ddd', 3, '{\"name\": \"matt\", \"size\": null}'), (5, 'eee', 4, null)", + fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), val2 varbinary(128), d decimal(8,0), j json, primary key(id))", vrepldb), "create table yes(id int, val varbinary(128), primary key(id))", fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), "create table no(id int, val varbinary(128), primary key(id))", @@ -579,7 +581,7 @@ func testPlayerCopyTables(t *testing.T) { filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "dst1", - Filter: "select id, val, val as val2, d from src1", + Filter: "select id, val, val as val2, d, j from src1", }, { Match: "/yes", }}, @@ -591,7 +593,7 @@ func testPlayerCopyTables(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -615,8 +617,8 @@ func testPlayerCopyTables(t *testing.T) { // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set pos=", "begin", - "insert into dst1(id,val,val2,d) values (1,'aaa','aaa',0), (2,'bbb','bbb',1)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + "insert into dst1(id,val,val2,d,j) values (1,'aaa','aaa',0,JSON_ARRAY(123456789012345678901234567890, _utf8mb4'abcd')), (2,'bbb','bbb',1,JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar')), (3,'ccc','ccc',2,CAST(_utf8mb4'null' as JSON)), (4,'ddd','ddd',3,JSON_OBJECT(_utf8mb4'name', _utf8mb4'matt', _utf8mb4'size', null)), (5,'eee','eee',4,null)", + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"5\\"}'.*`, "commit", // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", @@ -630,11 +632,14 @@ func testPlayerCopyTables(t *testing.T) { "/update _vt.vreplication set state='Running'", )) expectData(t, "dst1", [][]string{ - {"1", "aaa", "aaa", "0"}, - {"2", "bbb", "bbb", "1"}, + {"1", "aaa", "aaa", "0", "[123456789012345678901234567890, \"abcd\"]"}, + {"2", "bbb", "bbb", "1", "{\"foo\": \"bar\"}"}, + {"3", "ccc", "ccc", "2", "null"}, + {"4", "ddd", "ddd", "3", "{\"name\": \"matt\", \"size\": null}"}, + {"5", "eee", "eee", "4", ""}, }) expectData(t, "yes", [][]string{}) - validateCopyRowCountStat(t, 2) + validateCopyRowCountStat(t, 5) ctx, cancel := context.WithCancel(context.Background()) type logTestCase struct { @@ -730,7 +735,7 @@ func testPlayerCopyBigTable(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -750,7 +755,7 @@ func testPlayerCopyBigTable(t *testing.T) { // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set state='Copying'", "insert into dst(id,val) values (1,'aaa')", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"1\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"1\\"}'.*`, // The next catchup executes the new row insert, but will be a no-op. "insert into dst(id,val) select 3, 'ccc' from dual where (3) <= (1)", // fastForward has nothing to add. Just saves position. @@ -760,12 +765,12 @@ func testPlayerCopyBigTable(t *testing.T) { ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { ins1 := expect.Then(qh.Eventually("insert into dst(id,val) values (2,'bbb')")) upd1 := ins1.Then(qh.Eventually( - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, )) // Third row copied without going back to catchup state. ins3 := expect.Then(qh.Eventually("insert into dst(id,val) values (3,'ccc')")) upd3 := ins3.Then(qh.Eventually( - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"3\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"3\\"}'.*`, )) upd1.Then(upd3.Eventually()) return upd3 @@ -860,7 +865,7 @@ func testPlayerCopyWildcardRule(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -880,7 +885,7 @@ func testPlayerCopyWildcardRule(t *testing.T) { "/update _vt.vreplication set state='Copying'", // The first fast-forward has no starting point. So, it just saves the current position. "insert into src(id,val) values (1,'aaa')", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"1\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"1\\"}'.*`, // The next catchup executes the new row insert, but will be a no-op. "insert into src(id,val) select 3, 'ccc' from dual where (3) <= (1)", // fastForward has nothing to add. Just saves position. @@ -890,12 +895,12 @@ func testPlayerCopyWildcardRule(t *testing.T) { ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { ins1 := expect.Then(qh.Eventually("insert into src(id,val) values (2,'bbb')")) upd1 := ins1.Then(qh.Eventually( - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, )) // Third row copied without going back to catchup state. ins3 := expect.Then(qh.Eventually("insert into src(id,val) values (3,'ccc')")) upd3 := ins3.Then(qh.Eventually( - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"3\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"3\\"}'.*`, )) upd1.Then(upd3.Eventually()) return upd3 @@ -997,7 +1002,7 @@ func testPlayerCopyTableContinuation(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1055,13 +1060,13 @@ func testPlayerCopyTableContinuation(t *testing.T) { ).Then(qh.Immediately( "insert into dst1(id,val) values (7,'insert out'), (8,'no change'), (10,'updated'), (12,'move out')", )).Then(qh.Eventually( - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id1\\" type:INT32} fields:{name:\\"id2\\" type:INT32} rows:{lengths:2 lengths:1 values:\\"126\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id1\\" type:INT32 charset:63 flags:53251} fields:{name:\\"id2\\" type:INT32 charset:63 flags:53251} rows:{lengths:2 lengths:1 values:\\"126\\"}'.*`, )).Then(qh.Immediately( "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", "insert into not_copied(id,val) values (1,'bbb')", )).Then(qh.Eventually( // Copy again. There should be no events for catchup. - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\\"id\\\" type:INT32} rows:{lengths:1 values:\\\"1\\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\\"id\\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\\"1\\\"}'.*`, )).Then(qh.Immediately( "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*not_copied", "/update _vt.vreplication set state='Running'", @@ -1092,11 +1097,11 @@ func TestPlayerCopyWildcardTableContinuation(t *testing.T) { testVcopierTestCases(t, testPlayerCopyWildcardTableContinuation, []vcopierTestCase{ // Optimize inserts without parallel inserts. { - vreplicationExperimentalFlags: vreplicationExperimentalFlagOptimizeInserts, + vreplicationExperimentalFlags: vttablet.VReplicationExperimentalFlagOptimizeInserts, }, // Optimize inserts with parallel inserts. { - vreplicationExperimentalFlags: vreplicationExperimentalFlagOptimizeInserts, + vreplicationExperimentalFlags: vttablet.VReplicationExperimentalFlagOptimizeInserts, vreplicationParallelInsertWorkers: 4, }, }) @@ -1134,7 +1139,7 @@ func testPlayerCopyWildcardTableContinuation(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1161,7 +1166,7 @@ func testPlayerCopyWildcardTableContinuation(t *testing.T) { expectDeleteQueries(t) }() - optimizeInsertsEnabled := vreplicationExperimentalFlags /**/ & /**/ vreplicationExperimentalFlagOptimizeInserts != 0 + optimizeInsertsEnabled := vttablet.VReplicationExperimentalFlags /**/ & /**/ vttablet.VReplicationExperimentalFlagOptimizeInserts != 0 expectNontxQueries(t, qh.Expect( "/insert into _vt.vreplication", @@ -1194,10 +1199,10 @@ func testPlayerCopyWildcardTableContinuation(t *testing.T) { // TestPlayerCopyWildcardTableContinuationWithOptimizeInserts tests the copy workflow where tables have been partially copied // enabling the optimize inserts functionality func TestPlayerCopyWildcardTableContinuationWithOptimizeInserts(t *testing.T) { - oldVreplicationExperimentalFlags := vreplicationExperimentalFlags - vreplicationExperimentalFlags = vreplicationExperimentalFlagOptimizeInserts + oldVreplicationExperimentalFlags := vttablet.VReplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = vttablet.VReplicationExperimentalFlagOptimizeInserts defer func() { - vreplicationExperimentalFlags = oldVreplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = oldVreplicationExperimentalFlags }() defer deleteTablet(addTablet(100)) @@ -1231,7 +1236,7 @@ func TestPlayerCopyWildcardTableContinuationWithOptimizeInserts(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.BlpStopped, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Stopped, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1300,7 +1305,7 @@ func testPlayerCopyTablesNone(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1354,7 +1359,7 @@ func testPlayerCopyTablesStopAfterCopy(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, StopAfterCopy: true, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1380,7 +1385,7 @@ func testPlayerCopyTablesStopAfterCopy(t *testing.T) { ).Then(qh.Eventually( "begin", "insert into dst1(id,val) values (1,'aaa'), (2,'bbb')", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( // copy of dst1 is done: delete from copy_state. @@ -1395,6 +1400,110 @@ func testPlayerCopyTablesStopAfterCopy(t *testing.T) { }) } +// TestPlayerCopyTablesGIPK tests the flow when the source table has a generated invisible primary key, for when +// the target table also has a gipk and also when the gipk column is visible, for example, in a sharded keyspace. +// The test also confirms that the copy_state has the gipk. +func TestPlayerCopyTablesGIPK(t *testing.T) { + testVcopierTestCases(t, testPlayerCopyTablesGIPK, commonVcopierTestCases()) +} + +func testPlayerCopyTablesGIPK(t *testing.T) { + if !env.HasCapability(testenv.ServerCapabilityGeneratedInvisiblePrimaryKey) { + t.Skip("skipping test as server does not support generated invisible primary keys") + } + defer deleteTablet(addTablet(100)) + + execStatements(t, []string{ + "SET @@session.sql_generate_invisible_primary_key=ON;", + "create table src1(val varbinary(128))", + "insert into src1 values('aaa'), ('bbb')", + "create table src2(val varbinary(128))", + "insert into src2 values('aaa'), ('bbb')", + fmt.Sprintf("create table %s.dst1(val varbinary(128))", vrepldb), + "SET @@session.sql_generate_invisible_primary_key=OFF;", + fmt.Sprintf("create table %s.dst2(my_row_id int, val varbinary(128), primary key(my_row_id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table src1", + fmt.Sprintf("drop table %s.dst1", vrepldb), + "drop table src2", + fmt.Sprintf("drop table %s.dst2", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "dst1", + Filter: "select * from src1", + }, { + Match: "dst2", + Filter: "select * from src2", + }}, + } + + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + StopAfterCopy: true, + } + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) + qr, err := playerEngine.Exec(query) + if err != nil { + t.Fatal(err) + } + defer func() { + query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) + if _, err := playerEngine.Exec(query); err != nil { + t.Fatal(err) + } + expectDeleteQueries(t) + }() + + expectDBClientQueries(t, qh.Expect( + "/insert into _vt.vreplication", + "/update _vt.vreplication set message='Picked source tablet.*", + // Create the list of tables to copy and transition to Copying state. + "begin", + "/insert into _vt.copy_state", + "/update _vt.vreplication set state='Copying'", + "commit", + // The first fast-forward has no starting point. So, it just saves the current position. + "/update _vt.vreplication set pos=", + ).Then(qh.Eventually( + "begin", + "insert into dst1(my_row_id,val) values (1,'aaa'), (2,'bbb')", + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"my_row_id\\" type:UINT64 charset:63 flags:49699} rows:{lengths:1 values:\\"2\\"}'.*`, + "commit", + )).Then(qh.Immediately( + // copy of dst1 is done: delete from copy_state. + "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", + )).Then(qh.Eventually( + "begin", + "/update _vt.vreplication set pos=", + "commit", + "begin", + "insert into dst2(my_row_id,val) values (1,'aaa'), (2,'bbb')", + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"my_row_id\\" type:UINT64 charset:63 flags:49699} rows:{lengths:1 values:\\"2\\"}'.*`, + "commit", + )).Then(qh.Immediately( + // copy of dst2 is done: delete from copy_state. + "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst2", + // All tables copied. Stop vreplication because we requested it. + "/update _vt.vreplication set state='Stopped'", + ))) + + expectData(t, "dst1", [][]string{ + {"aaa"}, + {"bbb"}, + }) + expectData(t, "dst2", [][]string{ + {"1", "aaa"}, + {"2", "bbb"}, + }) +} + func TestPlayerCopyTableCancel(t *testing.T) { testVcopierTestCases(t, testPlayerCopyTableCancel, commonVcopierTestCases()) } @@ -1437,7 +1546,7 @@ func testPlayerCopyTableCancel(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1466,7 +1575,7 @@ func testPlayerCopyTableCancel(t *testing.T) { ).Then(qh.Eventually( "begin", "insert into dst1(id,val) values (1,'aaa'), (2,'bbb')", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( // copy of dst1 is done: delete from copy_state. @@ -1486,12 +1595,6 @@ func TestPlayerCopyTablesWithGeneratedColumn(t *testing.T) { } func testPlayerCopyTablesWithGeneratedColumn(t *testing.T) { - flavor := strings.ToLower(env.Flavor) - // Disable tests on percona and mariadb platforms in CI since - // generated columns support was added in 5.7 and mariadb added mysql compatible generated columns in 10.2 - if !strings.Contains(flavor, "mysql57") && !strings.Contains(flavor, "mysql80") { - return - } defer deleteTablet(addTablet(100)) execStatements(t, []string{ @@ -1526,7 +1629,7 @@ func testPlayerCopyTablesWithGeneratedColumn(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1546,11 +1649,11 @@ func testPlayerCopyTablesWithGeneratedColumn(t *testing.T) { "/update _vt.vreplication set state", // The first fast-forward has no starting point. So, it just saves the current position. "insert into dst1(id,val,val3,id2) values (1,'aaa','aaa1',10), (2,'bbb','bbb2',20)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields: rows: '.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", "insert into dst2(val3,val,id2) values ('aaa1','aaa',10), ('bbb2','bbb',20)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields: rows: '.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, // copy of dst2 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst2", "/update _vt.vreplication set state", @@ -1607,7 +1710,7 @@ func testCopyTablesWithInvalidDates(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) require.NoError(t, err) @@ -1624,7 +1727,7 @@ func testCopyTablesWithInvalidDates(t *testing.T) { ).Then(qh.Eventually( "begin", "insert into dst1(id,dt) values (1,'2020-01-12'), (2,'0000-00-00')", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} rows:{lengths:1 values:\\"2\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"2\\"}'.*`, "commit", )).Then(qh.Immediately( // copy of dst1 is done: delete from copy_state. @@ -1652,8 +1755,7 @@ func testCopyTablesWithInvalidDates(t *testing.T) { } func supportsInvisibleColumns() bool { - if env.DBType == string(mysqlctl.FlavorMySQL) && env.DBMajorVersion >= 8 && - (env.DBMinorVersion > 0 || env.DBPatchVersion >= 23) { + if env.HasCapability(testenv.ServerCapabilityInvisibleColumn) { return true } log.Infof("invisible columns not supported in %d.%d.%d", env.DBMajorVersion, env.DBMinorVersion, env.DBPatchVersion) @@ -1695,7 +1797,7 @@ func testCopyInvisibleColumns(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, "", binlogplayer.VReplicationInit, playerEngine.dbName, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1715,7 +1817,7 @@ func testCopyInvisibleColumns(t *testing.T) { "/update _vt.vreplication set state='Copying'", // The first fast-forward has no starting point. So, it just saves the current position. "insert into dst1(id,id2,inv1,inv2) values (1,10,100,1000), (2,20,200,2000)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32} fields:{name:\\"inv1\\" type:INT32} rows:{lengths:1 lengths:3 values:\\"2200\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} fields:{name:\\"inv1\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 lengths:3 values:\\"2200\\"}'.*`, // copy of dst1 is done: delete from copy_state. "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*dst1", "/update _vt.vreplication set state='Running'", diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index cc7776720ba..c3941b0f1bb 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -17,12 +17,11 @@ limitations under the License. package vreplication import ( + "context" "io" "time" - "context" - - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -100,7 +99,7 @@ func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) { func (vc *vdbClient) ExecuteWithRetry(ctx context.Context, query string) (*sqltypes.Result, error) { qr, err := vc.Execute(query) for err != nil { - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERLockDeadlock || sqlErr.Number() == mysql.ERLockWaitTimeout { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERLockDeadlock || sqlErr.Number() == sqlerror.ERLockWaitTimeout { log.Infof("retryable error: %v, waiting for %v and retrying", sqlErr, dbLockRetryDelay) if err := vc.Rollback(); err != nil { return nil, err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 0e33eed5f6a..8eee211ff9e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -26,11 +26,12 @@ import ( "strings" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -38,15 +39,15 @@ import ( // vplayer replays binlog events by pulling them from a vstreamer. type vplayer struct { vr *vreplicator - startPos mysql.Position - stopPos mysql.Position + startPos replication.Position + stopPos replication.Position saveStop bool copyState map[string]*sqltypes.Result replicatorPlan *ReplicatorPlan tablePlans map[string]*TablePlan - pos mysql.Position + pos replication.Position // unsavedEvent is set any time we skip an event without // saving, which is on an empty commit. // If nothing else happens for idleTimeout since timeLastSaved, @@ -67,8 +68,22 @@ type vplayer struct { phase string throttlerAppName string + + // See updateFKCheck for more details on how the two fields below are used. + + // foreignKeyChecksEnabled is the current state of the foreign key checks for the current session. + // It reflects what we have set the @@session.foreign_key_checks session variable to. + foreignKeyChecksEnabled bool + + // foreignKeyChecksStateInitialized is set to true once we have initialized the foreignKeyChecksEnabled. + // The initialization is done on the first row event that this vplayer sees. + foreignKeyChecksStateInitialized bool } +// NoForeignKeyCheckFlagBitmask is the bitmask for the 2nd bit (least significant) of the flags in a binlog row event. +// This bit is set if foreign key checks are disabled. +const NoForeignKeyCheckFlagBitmask uint32 = 1 << 1 + // newVPlayer creates a new vplayer. Parameters: // vreplicator: the outer replicator. It's used for common functions like setState. // @@ -83,7 +98,7 @@ type vplayer struct { // pausePos: if set, replication will stop at that position without updating the state to "Stopped". // // This is used by the fastForward function during copying. -func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos mysql.Position, phase string) *vplayer { +func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map[string]*sqltypes.Result, pausePos replication.Position, phase string) *vplayer { saveStop := true if !pausePos.IsZero() { settings.StopPos = pausePos @@ -99,7 +114,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map timeLastSaved: time.Now(), tablePlans: make(map[string]*TablePlan), phase: phase, - throttlerAppName: vr.throttlerAppName(), + throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()), } } @@ -108,7 +123,7 @@ func (vp *vplayer) play(ctx context.Context) error { if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) { log.Infof("Stop position %v already reached: %v", vp.startPos, vp.stopPos) if vp.saveStop { - return vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) + return vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) } return nil } @@ -132,6 +147,34 @@ func (vp *vplayer) play(ctx context.Context) error { return vp.fetchAndApply(ctx) } +// updateFKCheck updates the @@session.foreign_key_checks variable based on the binlog row event flags. +// The function only does it if it has changed to avoid redundant updates, using the cached vplayer.foreignKeyChecksEnabled +// The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: +// - If set (1), foreign key checks are disabled. +// - If unset (0), foreign key checks are enabled. +// updateFKCheck also updates the state for the first row event that this vplayer and hence the connection sees. +func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { + dbForeignKeyChecksEnabled := true + if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { + dbForeignKeyChecksEnabled = false + } + + if vp.foreignKeyChecksStateInitialized /* already set earlier */ && + dbForeignKeyChecksEnabled == vp.foreignKeyChecksEnabled /* no change in the state, no need to update */ { + return nil + } + log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { + return fmt.Errorf("failed to set session foreign_key_checks: %w", err) + } + vp.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled + if !vp.foreignKeyChecksStateInitialized { + log.Infof("First foreign_key_checks update to: %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + vp.foreignKeyChecksStateInitialized = true + } + return nil +} + // fetchAndApply performs the fetching and application of the binlogs. // This is done by two different threads. The fetcher thread pulls // events from the vstreamer and adds them to the relayLog. @@ -152,7 +195,7 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { streamErr := make(chan error, 1) go func() { - streamErr <- vp.vr.sourceVStreamer.VStream(ctx, mysql.EncodePosition(vp.startPos), nil, vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { + streamErr <- vp.vr.sourceVStreamer.VStream(ctx, replication.EncodePosition(vp.startPos), nil, vp.replicatorPlan.VStreamFilter, func(events []*binlogdatapb.VEvent) error { return relay.Send(events) }) }() @@ -216,6 +259,9 @@ func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEven } func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { + if err := vp.updateFKCheck(ctx, rowEvent.Flags); err != nil { + return err + } tplan := vp.tablePlans[rowEvent.TableName] if tplan == nil { return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) @@ -250,7 +296,7 @@ func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { if posReached { log.Infof("Stopped at position: %v", vp.stopPos) if vp.saveStop { - if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { return false, err } } @@ -335,8 +381,8 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { return ctx.Err() } // check throttler. - if !vp.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, vp.throttlerAppName) { - _ = vp.vr.updateTimeThrottled(VPlayerComponentName) + if !vp.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, throttlerapp.Name(vp.throttlerAppName)) { + _ = vp.vr.updateTimeThrottled(throttlerapp.VPlayerName) continue } @@ -502,6 +548,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return err } if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { + log.Infof("Error applying row event: %s", err.Error()) return err } //Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed time for the Row event @@ -543,7 +590,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m if _, err := vp.updatePos(event.Timestamp); err != nil { return err } - if err := vp.vr.setState(binlogplayer.BlpStopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { return err } if err := vp.vr.dbClient.Commit(); err != nil { @@ -607,7 +654,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m switch { case found && notFound: // Some were found and some were not found. We can't handle this. - if err := vp.vr.setState(binlogplayer.BlpStopped, "unable to handle journal event: tables were partially matched"); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "unable to handle journal event: tables were partially matched"); err != nil { return err } return io.EOF @@ -619,7 +666,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m } log.Infof("Binlog event registering journal event %+v", event.Journal) if err := vp.vr.vre.registerJournal(event.Journal, vp.vr.id); err != nil { - if err := vp.vr.setState(binlogplayer.BlpStopped, err.Error()); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, err.Error()); err != nil { return err } return io.EOF @@ -628,7 +675,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return io.EOF case binlogdatapb.VEventType_HEARTBEAT: if event.Throttled { - if err := vp.vr.updateTimeThrottled(VStreamerComponentName); err != nil { + if err := vp.vr.updateTimeThrottled(throttlerapp.VStreamerName); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index 34aab9dd302..3b215d03791 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -27,17 +27,108 @@ import ( "testing" "time" - "github.com/spyzhov/ajson" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" + + "vitess.io/vitess/go/vt/vttablet" + + "github.com/nsf/jsondiff" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" qh "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication/queryhistory" ) +// TestPlayerGeneratedInvisiblePrimaryKey confirms that the gipk column is replicated by vplayer, both for target +// tables that have a gipk column and those that make it visible. +func TestPlayerGeneratedInvisiblePrimaryKey(t *testing.T) { + if !env.HasCapability(testenv.ServerCapabilityGeneratedInvisiblePrimaryKey) { + t.Skip("skipping test as server does not support generated invisible primary keys") + } + defer deleteTablet(addTablet(100)) + + execStatements(t, []string{ + "SET @@session.sql_generate_invisible_primary_key=ON;", + "create table t1(val varbinary(128))", + fmt.Sprintf("create table %s.t1(val varbinary(128))", vrepldb), + "create table t2(val varbinary(128))", + "SET @@session.sql_generate_invisible_primary_key=OFF;", + fmt.Sprintf("create table %s.t2(my_row_id int, val varbinary(128), primary key(my_row_id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + "drop table t2", + fmt.Sprintf("drop table %s.t2", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }, { + Match: "t2", + Filter: "select * from t2", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + cancel, _ := startVReplication(t, bls, "") + defer cancel() + + testcases := []struct { + input string + output string + table string + data [][]string + query string + queryResult [][]string + }{{ + input: "insert into t1(val) values ('aaa')", + output: "insert into t1(my_row_id,val) values (1,'aaa')", + table: "t1", + data: [][]string{ + {"aaa"}, + }, + query: "select my_row_id, val from t1", + queryResult: [][]string{ + {"1", "aaa"}, + }, + }, { + input: "insert into t2(val) values ('bbb')", + output: "insert into t2(my_row_id,val) values (1,'bbb')", + table: "t2", + data: [][]string{ + {"1", "bbb"}, + }, + query: "select my_row_id, val from t2", + queryResult: [][]string{ + {"1", "bbb"}, + }, + }} + + for _, tcases := range testcases { + execStatements(t, []string{tcases.input}) + output := qh.Expect(tcases.output) + expectNontxQueries(t, output) + if tcases.table != "" { + expectData(t, tcases.table, tcases.data) + } + if tcases.query != "" { + expectQueryResult(t, tcases.query, tcases.queryResult) + } + } +} + func TestPlayerInvisibleColumns(t *testing.T) { if !supportsInvisibleColumns() { t.Skip() @@ -111,6 +202,7 @@ func TestHeartbeatFrequencyFlag(t *testing.T) { }() stats := binlogplayer.NewStats() + defer stats.Stop() vp := &vplayer{vr: &vreplicator{dbClient: newVDBClient(realDBClientFactory(), stats), stats: stats}} type testcount struct { @@ -436,6 +528,62 @@ func TestPlayerSavepoint(t *testing.T) { cancel() } +// TestPlayerForeignKeyCheck tests that we can insert a row into a child table without the corresponding foreign key +// if the foreign_key_checks is not set. +func TestPlayerForeignKeyCheck(t *testing.T) { + doNotLogDBQueries = true + defer func() { doNotLogDBQueries = false }() + + defer deleteTablet(addTablet(100)) + execStatements(t, []string{ + "create table parent(id int, name varchar(128), primary key(id))", + fmt.Sprintf("create table %s.parent(id int, name varchar(128), primary key(id))", vrepldb), + "create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade)", + fmt.Sprintf("create table %s.child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade)", vrepldb), + }) + defer execStatements(t, []string{ + "drop table child", + fmt.Sprintf("drop table %s.child", vrepldb), + "drop table parent", + fmt.Sprintf("drop table %s.parent", vrepldb), + }) + + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + cancel, _ := startVReplication(t, bls, "") + + testSetForeignKeyQueries = true + defer func() { + testSetForeignKeyQueries = false + }() + + execStatements(t, []string{ + "insert into parent values(1, 'parent1')", + "insert into child values(1, 1, 'child1')", + "set foreign_key_checks=0", + "insert into child values(2, 100, 'child100')", + }) + expectData(t, "parent", [][]string{ + {"1", "parent1"}, + }) + expectData(t, "child", [][]string{ + {"1", "1", "child1"}, + {"2", "100", "child100"}, + }) + cancel() +} + func TestPlayerStatementModeWithFilter(t *testing.T) { defer deleteTablet(addTablet(100)) @@ -1371,19 +1519,7 @@ func TestPlayerRowMove(t *testing.T) { } func TestPlayerTypes(t *testing.T) { - log.Errorf("TestPlayerTypes: flavor is %s", env.Flavor) - enableJSONColumnTesting := false - flavor := strings.ToLower(env.Flavor) - // Disable tests on percona and mariadb platforms in CI since they - // either don't support JSON or JSON support is not enabled by default - if strings.Contains(flavor, "mysql57") || strings.Contains(flavor, "mysql80") { - log.Infof("Running JSON column type tests on flavor %s", flavor) - enableJSONColumnTesting = true - } else { - log.Warningf("Not running JSON column type tests on flavor %s", flavor) - } defer deleteTablet(addTablet(100)) - execStatements(t, []string{ "create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", fmt.Sprintf("create table %s.vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", vrepldb), @@ -1401,6 +1537,8 @@ func TestPlayerTypes(t *testing.T) { fmt.Sprintf("create table %s.binary_pk(b binary(4), val varbinary(4), primary key(b))", vrepldb), "create table vitess_decimal(id int, d1 decimal(8,0) default null, d2 decimal(8,0) default null, d3 decimal(8,0) default null, d4 decimal(8, 1), d5 decimal(8, 1), d6 decimal(8, 1), primary key(id))", fmt.Sprintf("create table %s.vitess_decimal(id int, d1 decimal(8,0) default null, d2 decimal(8,0) default null, d3 decimal(8,0) default null, d4 decimal(8, 1), d5 decimal(8, 1), d6 decimal(8, 1), primary key(id))", vrepldb), + "create table vitess_json(id int auto_increment, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", + fmt.Sprintf("create table %s.vitess_json(id int, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", vrepldb), }) defer execStatements(t, []string{ "drop table vitess_ints", @@ -1419,18 +1557,10 @@ func TestPlayerTypes(t *testing.T) { fmt.Sprintf("drop table %s.binary_pk", vrepldb), "drop table vitess_decimal", fmt.Sprintf("drop table %s.vitess_decimal", vrepldb), + "drop table vitess_json", + fmt.Sprintf("drop table %s.vitess_json", vrepldb), }) - if enableJSONColumnTesting { - execStatements(t, []string{ - "create table vitess_json(id int auto_increment, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", - fmt.Sprintf("create table %s.vitess_json(id int, val1 json, val2 json, val3 json, val4 json, val5 json, primary key(id))", vrepldb), - }) - defer execStatements(t, []string{ - "drop table vitess_json", - fmt.Sprintf("drop table %s.vitess_json", vrepldb), - }) - } env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ @@ -1509,27 +1639,30 @@ func TestPlayerTypes(t *testing.T) { data: [][]string{ {"a\000\000\000", "bbb"}, }, + }, { + input: "insert into vitess_json(val1,val2,val3,val4,val5) values (null,'{}','123','{\"a\":[42,100]}','{\"foo\": \"bar\"}')", + output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (1,null,JSON_OBJECT(),CAST(123 as JSON),JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(42, 100)),JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", + table: "vitess_json", + data: [][]string{ + {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + }, + }, { + input: "insert into vitess_json(val1,val2,val3,val4,val5) values ('null', '{\"name\":null}','123','{\"a\":[42,100]}','{\"foo\": \"bar\"}')", + output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (2,CAST(_utf8mb4'null' as JSON),JSON_OBJECT(_utf8mb4'name', null),CAST(123 as JSON),JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(42, 100)),JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar'))", + table: "vitess_json", + data: [][]string{ + {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + }, + }, { + input: "update vitess_json set val1 = '{\"bar\": \"foo\"}', val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4) where id=1", + output: "update vitess_json set val1=JSON_OBJECT(_utf8mb4'bar', _utf8mb4'foo'), val2=JSON_OBJECT(), val3=CAST(123 as JSON), val4=JSON_OBJECT(_utf8mb4'a', JSON_ARRAY(98, 123)), val5=JSON_OBJECT() where id=1", + table: "vitess_json", + data: [][]string{ + {"1", `{"bar": "foo"}`, "{}", "123", `{"a": [98, 123]}`, `{}`}, + {"2", "null", `{"name": null}`, "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, + }, }} - if enableJSONColumnTesting { - testcases = append(testcases, testcase{ - input: "insert into vitess_json(val1,val2,val3,val4,val5) values (null,'{}','123','{\"a\":[42,100]}', '{\"foo\":\"bar\"}')", - output: "insert into vitess_json(id,val1,val2,val3,val4,val5) values (1," + - "convert(null using utf8mb4)," + "convert('{}' using utf8mb4)," + "convert('123' using utf8mb4)," + - "convert('{\\\"a\\\":[42,100]}' using utf8mb4)," + "convert('{\\\"foo\\\":\\\"bar\\\"}' using utf8mb4))", - table: "vitess_json", - data: [][]string{ - {"1", "", "{}", "123", `{"a": [42, 100]}`, `{"foo": "bar"}`}, - }, - }) - testcases = append(testcases, testcase{ - input: "update vitess_json set val4 = '{\"a\": [98, 123]}', val5 = convert(x'7b7d' using utf8mb4)", - output: "update vitess_json set val1=convert(null using utf8mb4), val2=convert('{}' using utf8mb4), val3=convert('123' using utf8mb4), val4=convert('{\\\"a\\\":[98,123]}' using utf8mb4), val5=convert('{}' using utf8mb4) where id=1", - table: "vitess_json", - data: [][]string{ - {"1", "", "{}", "123", `{"a": [98, 123]}`, `{}`}, - }, - }) - } for _, tcases := range testcases { execStatements(t, []string{tcases.input}) @@ -1663,6 +1796,8 @@ func TestPlayerDDL(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_EXEC_IGNORE, } execStatements(t, []string{fmt.Sprintf("create table %s.t2(id int, primary key(id))", vrepldb)}) + defer execStatements(t, []string{fmt.Sprintf("drop table %s.t2", vrepldb)}) + cancel, _ = startVReplication(t, bls, "") execStatements(t, []string{"alter table t1 add column val1 varchar(128)"}) expectDBClientQueries(t, qh.Expect( @@ -1711,13 +1846,13 @@ func TestGTIDCompress(t *testing.T) { require.NotNil(t, qr) require.Equal(t, 1, len(qr.Rows)) gotGTID := qr.Rows[0][0].ToString() - pos, err := mysql.DecodePosition(gotGTID) + pos, err := replication.DecodePosition(gotGTID) if tCase.compress { require.True(t, pos.IsZero()) pos, err = binlogplayer.DecodePosition(gotGTID) require.NoError(t, err) require.NotNil(t, pos) - tpos, err := mysql.DecodePosition(tCase.gtid) + tpos, err := replication.DecodePosition(tCase.gtid) require.NoError(t, err) require.Equal(t, tpos.String(), pos.String()) } else { @@ -1759,7 +1894,7 @@ func TestPlayerStopPos(t *testing.T) { OnDdl: binlogdatapb.OnDDLAction_IGNORE, } startPos := primaryPosition(t) - query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogplayer.BlpStopped, vrepldb, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogdatapb.VReplicationWorkflowState_Stopped, vrepldb, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -1863,7 +1998,7 @@ func TestPlayerStopAtOther(t *testing.T) { Filter: filter, OnDdl: binlogdatapb.OnDDLAction_IGNORE, } - query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogplayer.BlpStopped, vrepldb, 0, 0) + query := binlogplayer.CreateVReplicationState("test", bls, startPos, binlogdatapb.VReplicationWorkflowState_Stopped, vrepldb, 0, 0) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) @@ -2506,28 +2641,9 @@ func TestTimestamp(t *testing.T) { expectData(t, "t1", [][]string{{"1", want, want}}) } -func shouldRunJSONTests(t *testing.T, name string) bool { - skipTest := true - flavors := []string{"mysql80", "mysql57"} - for _, flavor := range flavors { - if strings.EqualFold(env.Flavor, flavor) { - skipTest = false - break - } - } - if skipTest { - t.Logf("not running %s on %s", name, env.Flavor) - return false - } - return true -} - // TestPlayerJSONDocs validates more complex and 'large' json docs. It only validates that the data on target matches that on source. // TestPlayerTypes, above, also verifies the sql queries applied on the target. func TestPlayerJSONDocs(t *testing.T) { - if !shouldRunJSONTests(t, "TestPlayerJSONDocs") { - return - } defer deleteTablet(addTablet(100)) execStatements(t, []string{ @@ -2604,9 +2720,6 @@ func TestPlayerJSONDocs(t *testing.T) { // TestPlayerJSONTwoColumns tests for two json columns in a table func TestPlayerJSONTwoColumns(t *testing.T) { - if !shouldRunJSONTests(t, "TestPlayerJSONTwoColumns") { - return - } defer deleteTablet(addTablet(100)) execStatements(t, []string{ "create table vitess_json2(id int auto_increment, val json, val2 json, primary key(id))", @@ -2686,7 +2799,7 @@ func TestVReplicationLogs(t *testing.T) { for _, want := range expected { t.Run("", func(t *testing.T) { - err = insertLog(vdbc, LogMessage, 1, "Running", "message1") + err = insertLog(vdbc, LogMessage, 1, binlogdatapb.VReplicationWorkflowState_Running.String(), "message1") require.NoError(t, err) qr, err := env.Mysqld.FetchSuperQuery(context.Background(), query) require.NoError(t, err) @@ -2697,12 +2810,6 @@ func TestVReplicationLogs(t *testing.T) { } func TestGeneratedColumns(t *testing.T) { - flavor := strings.ToLower(env.Flavor) - // Disable tests on percona (which identifies as mysql56) and mariadb platforms in CI since they - // generated columns support was added in 5.7 and mariadb added mysql compatible generated columns in 10.2 - if !strings.Contains(flavor, "mysql57") && !strings.Contains(flavor, "mysql80") { - return - } defer deleteTablet(addTablet(100)) execStatements(t, []string{ @@ -2771,7 +2878,6 @@ func TestGeneratedColumns(t *testing.T) { {"1", "bbb1", "bbb", "11"}, }, }} - for _, tcases := range testcases { execStatements(t, []string{tcases.input}) output := qh.Expect(tcases.output) @@ -2849,12 +2955,158 @@ func TestPlayerInvalidDates(t *testing.T) { expectNontxQueries(t, output) if tcases.table != "" { - // without the sleep there is a flakiness where row inserted by vreplication is not visible to vdbclient - time.Sleep(100 * time.Millisecond) expectData(t, tcases.table, tcases.data) } } } + +// TestPlayerNoBlob sets up a new environment with mysql running with binlog_row_image as noblob. It creates DMLs for +// tables with blob and text columns and executes DMLs with different combinations of columns with and without +// blob/text columns. It confirms that we handle the partial images sent by vstreamer and generates the correct +// dmls on the target. +func TestPlayerNoBlob(t *testing.T) { + if !runNoBlobTest { + t.Skip() + } + oldVreplicationExperimentalFlags := vttablet.VReplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage + defer func() { + vttablet.VReplicationExperimentalFlags = oldVreplicationExperimentalFlags + }() + + defer deleteTablet(addTablet(100)) + execStatements(t, []string{ + "create table t1(id int, val1 varchar(20), blb1 blob, id2 int, blb2 longblob, val2 varbinary(10), primary key(id))", + fmt.Sprintf("create table %s.t1(id int, val1 varchar(20), blb1 blob, id2 int, blb2 longblob, val2 varbinary(10), primary key(id))", vrepldb), + "create table t2(id int, val1 varchar(20), txt1 text, id2 int, val2 varbinary(10), unique key(id, val1))", + fmt.Sprintf("create table %s.t2(id int, val1 varchar(20), txt1 text, id2 int, val2 varbinary(10), primary key(id, val1))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + "drop table t2", + fmt.Sprintf("drop table %s.t2", vrepldb), + }) + env.SchemaEngine.Reload(context.Background()) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }, { + Match: "t2", + Filter: "select * from t2", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + cancel, vrId := startVReplication(t, bls, "") + defer cancel() + + testcases := []struct { + input string + output string + table string + data [][]string + }{{ // 1. PartialQueryTemplate-Insert=1, PartialQueryCount-Insert=1 (blb1,blb2 are not inserted) + input: "insert into t1(id,val1,blb1,id2,val2) values (1,'aaa','blb1',10,'AAA')", + output: "insert into t1(id,val1,blb1,id2,val2) values (1,'aaa','blb1',10,'AAA')", + table: "t1", + data: [][]string{ + {"1", "aaa", "blb1", "10", "", "AAA"}, + }, + }, { // 2. PartialQueryTemplate-Update=1, PartialQueryCount-Update=1 (blb1 is not updated) + input: "update t1 set blb2 = 'blb22' where id = 1", + output: "update t1 set val1='aaa', id2=10, blb2='blb22', val2='AAA' where id=1", + table: "t1", + data: [][]string{ + {"1", "aaa", "blb1", "10", "blb22", "AAA"}, + }, + }, { // 3. PartialQueryTemplate-Update=2, PartialQueryCount-Update=2 (blb1 and blb2 are not updated) + input: "update t1 set val1 = 'bbb' where id = 1", + output: "update t1 set val1='bbb', id2=10, val2='AAA' where id=1", + table: "t1", + data: [][]string{ + {"1", "bbb", "blb1", "10", "blb22", "AAA"}, + }, + }, { // 4. PartialQueryTemplate-Update=2, PartialQueryCount-Update=3 (blb1 and blb2 are not updated, same #3) + input: "update t1 set val2 = 'CCC', id2=99 where id = 1", + output: "update t1 set val1='bbb', id2=99, val2='CCC' where id=1", + table: "t1", + data: [][]string{ + {"1", "bbb", "blb1", "99", "blb22", "CCC"}, + }, + }, { // 5. PartialQueryTemplate-Update=2, PartialQueryCount-Update=4 (blb1 is not updated, same as #1) + input: "update t1 set blb2 = 'blb21' where id = 1", + output: "update t1 set val1='bbb', id2=99, blb2='blb21', val2='CCC' where id=1", + + table: "t1", + data: [][]string{ + {"1", "bbb", "blb1", "99", "blb21", "CCC"}, + }, + }, { // 6. Not a partial update + input: "update t1 set blb2 = 'blb222', blb1 = 'blb11' where id = 1", + output: "update t1 set val1='bbb', blb1='blb11', id2=99, blb2='blb222', val2='CCC' where id=1", + table: "t1", + data: [][]string{ + {"1", "bbb", "blb11", "99", "blb222", "CCC"}, + }, + }, { // 7. PartialQueryTemplate-Insert=2, PartialQueryCount-Insert=2 (txt1 is not inserted) + input: "insert into t2(id,val1,id2,val2) values (1,'aaa',10,'AAA')", + output: "insert into t2(id,val1,id2,val2) values (1,'aaa',10,'AAA')", + table: "t2", + data: [][]string{ + {"1", "aaa", "", "10", "AAA"}, + }, + }, { // 7. PartialQueryTemplate-Insert=2, PartialQueryCount-Insert=3 (txt1 is not inserted, same as #7) + input: "insert into t2(id,val1,id2,val2) values (1,'bbb',20,'BBB')", + output: "insert into t2(id,val1,id2,val2) values (1,'bbb',20,'BBB')", + table: "t2", + data: [][]string{ + {"1", "aaa", "", "10", "AAA"}, + {"1", "bbb", "", "20", "BBB"}, + }, + }, { // 8. Not a partial update, all columns are present + input: "update t2 set txt1 = 'txt1' where id = 1 and val1 = 'aaa'", + output: "update t2 set txt1='txt1', id2=10, val2='AAA' where id=1 and val1='aaa'", + table: "t2", + data: [][]string{ + {"1", "aaa", "txt1", "10", "AAA"}, + {"1", "bbb", "", "20", "BBB"}, + }, + }, { // 9. Not a partial update, all columns are present, same as #8 + input: "update t2 set val2 = 'DDD', txt1 = 'txt2' where id = 1 and val1 = 'bbb'", + output: "update t2 set txt1='txt2', id2=20, val2='DDD' where id=1 and val1='bbb'", + table: "t2", + data: [][]string{ + {"1", "aaa", "txt1", "10", "AAA"}, + {"1", "bbb", "txt2", "20", "DDD"}, + }, + }} + + for _, tcases := range testcases { + execStatements(t, []string{tcases.input}) + output := qh.Expect(tcases.output) + expectNontxQueries(t, output) + time.Sleep(1 * time.Second) + log.Flush() + if tcases.table != "" { + expectData(t, tcases.table, tcases.data) + } + } + stats := globalStats.controllers[int32(vrId)].blpStats + require.Equal(t, 2, len(stats.PartialQueryCount.Counts())) + require.Equal(t, 2, len(stats.PartialQueryCacheSize.Counts())) + require.Equal(t, int64(2), stats.PartialQueryCacheSize.Counts()["insert"]) + require.Equal(t, int64(3), stats.PartialQueryCount.Counts()["insert"]) + require.Equal(t, int64(2), stats.PartialQueryCacheSize.Counts()["update"]) + require.Equal(t, int64(4), stats.PartialQueryCount.Counts()["update"]) +} + func expectJSON(t *testing.T, table string, values [][]string, id int, exec func(ctx context.Context, query string) (*sqltypes.Result, error)) { t.Helper() @@ -2879,13 +3131,12 @@ func expectJSON(t *testing.T, table string, values [][]string, id int, exec func if qr.Rows[i][0].ToString() != row[0] { t.Fatalf("Id mismatch: want %s, got %s", qr.Rows[i][0].ToString(), row[0]) } - got, err := ajson.Unmarshal([]byte(qr.Rows[i][1].ToString())) - require.NoError(t, err) - want, err := ajson.Unmarshal([]byte(row[1])) - require.NoError(t, err) - match, err := got.Eq(want) - require.NoError(t, err) - require.True(t, match) + + opts := jsondiff.DefaultConsoleOptions() + compare, s := jsondiff.Compare(qr.Rows[i][1].Raw(), []byte(row[1]), &opts) + if compare != jsondiff.FullMatch { + t.Errorf("Diff:\n%s\n", s) + } } } @@ -2895,7 +3146,8 @@ func startVReplication(t *testing.T, bls *binlogdatapb.BinlogSource, pos string) if pos == "" { pos = primaryPosition(t) } - query := binlogplayer.CreateVReplication("test", bls, pos, 9223372036854775807, 9223372036854775807, 0, vrepldb, 0, 0, false) + // fake workflow type as MoveTables so that we can test with "noblob" binlog row image + query := binlogplayer.CreateVReplication("test", bls, pos, 9223372036854775807, 9223372036854775807, 0, vrepldb, binlogdatapb.VReplicationWorkflowType_MoveTables, 0, false) qr, err := playerEngine.Exec(query) if err != nil { t.Fatal(err) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 4f5a27b2440..0e63068d7a1 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -26,17 +26,17 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vtgate/evalengine" - - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" @@ -57,8 +57,6 @@ var ( // vreplicationMinimumHeartbeatUpdateInterval overrides vreplicationHeartbeatUpdateInterval if the latter is higher than this // to ensure that it satisfies liveness criteria implicitly expected by internal processes like Online DDL vreplicationMinimumHeartbeatUpdateInterval = 60 - - vreplicationExperimentalFlagOptimizeInserts int64 = 1 ) const ( @@ -93,15 +91,6 @@ const ( table_name=%a and id=%a` ) -type ComponentName string - -const ( - VPlayerComponentName ComponentName = "vplayer" - VCopierComponentName ComponentName = "vcopier" - VStreamerComponentName ComponentName = "vstreamer" - RowStreamerComponentName ComponentName = "rowstreamer" -) - // vreplicator provides the core logic to start vreplication streams type vreplicator struct { vre *Engine @@ -110,7 +99,7 @@ type vreplicator struct { // source source *binlogdatapb.BinlogSource sourceVStreamer VStreamerClient - state string + state binlogdatapb.VReplicationWorkflowState stats *binlogplayer.Stats // mysqld is used to fetch the local schema. mysqld mysqlctl.MysqlDaemon @@ -119,8 +108,9 @@ type vreplicator struct { originalFKCheckSetting int64 originalSQLMode string - WorkflowType int32 - WorkflowName string + WorkflowType int32 + WorkflowSubType int32 + WorkflowName string throttleUpdatesRateLimiter *timer.RateLimiter } @@ -152,7 +142,7 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer log.Warningf("The supplied value for vreplication_heartbeat_update_interval:%d seconds is larger than the maximum allowed:%d seconds, vreplication will fallback to %d", vreplicationHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval, vreplicationMinimumHeartbeatUpdateInterval) } - return &vreplicator{ + vr := &vreplicator{ vre: vre, id: id, source: source, @@ -160,9 +150,9 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer stats: stats, dbClient: newVDBClient(dbClient, stats), mysqld: mysqld, - - throttleUpdatesRateLimiter: timer.NewRateLimiter(time.Second), } + vr.setExistingRowsCopied() + return vr } // Replicate starts a vreplication stream. It can be in one of three phases: @@ -196,6 +186,41 @@ func (vr *vreplicator) Replicate(ctx context.Context) error { return err } +// We do not support "minimal" at the moment. "noblob" will provide significant performance improvements. Implementing +// "minimal" will result in a lot of edge cases which will not work, in Online DDL and Materialize. We will be +// soon supporting MySQL binlog compression which should provide some benefits similar to "minimal" in terms of storage +// and performance. +// To start with, we only allow "noblob" for MoveTables, Reshard and Online DDL. We need to identify edge cases for +// other workflow types like Materialize and add validations before we open it up for all workflow types. +func (vr *vreplicator) validateBinlogRowImage() error { + rs, err := vr.dbClient.Execute("select @@binlog_row_image") + if err != nil { + return err + } + if len(rs.Rows) != 1 { + return vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("'select @@binlog_row_image' returns an invalid result: %+v", rs.Rows)) + } + + binlogRowImage := strings.ToLower(rs.Rows[0][0].ToString()) + switch binlogRowImage { + case "full": + case "noblob": + switch binlogdatapb.VReplicationWorkflowType(vr.WorkflowType) { + case binlogdatapb.VReplicationWorkflowType_MoveTables, + binlogdatapb.VReplicationWorkflowType_Reshard, + binlogdatapb.VReplicationWorkflowType_OnlineDDL: + case 0: + // used in unit tests only + default: + return vterrors.New(vtrpcpb.Code_INTERNAL, + fmt.Sprintf("noblob binlog_row_image is not supported for %s", binlogdatapb.VReplicationWorkflowType_name[vr.WorkflowType])) + } + default: + return vterrors.New(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s binlog_row_image is not supported by Vitess VReplication", binlogRowImage)) + } + return nil +} + func (vr *vreplicator) replicate(ctx context.Context) error { // Manage SQL_MODE in the same way that mysqldump does. // Save the original sql_mode, set it to a permissive mode, @@ -217,6 +242,9 @@ func (vr *vreplicator) replicate(ctx context.Context) error { //defensive guard, should be a no-op since it should happen after copy is done defer vr.resetFKCheckAfterCopy(vr.dbClient) + vr.throttleUpdatesRateLimiter = timer.NewRateLimiter(time.Second) + defer vr.throttleUpdatesRateLimiter.Stop() + for { select { case <-ctx.Done(): @@ -231,8 +259,13 @@ func (vr *vreplicator) replicate(ctx context.Context) error { if err != nil { return err } + + if err := vr.validateBinlogRowImage(); err != nil { + return err + } + // If any of the operations below changed state to Stopped or Error, we should return. - if settings.State == binlogplayer.BlpStopped || settings.State == binlogplayer.BlpError { + if settings.State == binlogdatapb.VReplicationWorkflowState_Stopped || settings.State == binlogdatapb.VReplicationWorkflowState_Error { return nil } switch { @@ -241,18 +274,26 @@ func (vr *vreplicator) replicate(ctx context.Context) error { log.Warningf("Unable to clear FK check %v", err) return err } - if err := newVCopier(vr).copyNext(ctx, settings); err != nil { - vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) - return err - } - settings, numTablesToCopy, err = vr.loadSettings(ctx, vr.dbClient) - if err != nil { - return err - } - if numTablesToCopy == 0 { - if err := vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)); err != nil { + if vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { + if err := newVCopier(vr).copyAll(ctx, settings); err != nil { + log.Infof("Error atomically copying all tables: %v", err) + vr.stats.ErrorCounts.Add([]string{"CopyAll"}, 1) + return err + } + } else { + if err := newVCopier(vr).copyNext(ctx, settings); err != nil { + vr.stats.ErrorCounts.Add([]string{"Copy"}, 1) return err } + settings, numTablesToCopy, err = vr.loadSettings(ctx, vr.dbClient) + if err != nil { + return err + } + if numTablesToCopy == 0 { + if err := vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)); err != nil { + return err + } + } } case settings.StartPos.IsZero(): if err := newVCopier(vr).initTablesForCopy(ctx); err != nil { @@ -265,13 +306,13 @@ func (vr *vreplicator) replicate(ctx context.Context) error { return err } if vr.source.StopAfterCopy { - return vr.setState(binlogplayer.BlpStopped, "Stopped after copy.") + return vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "Stopped after copy.") } - if err := vr.setState(binlogplayer.BlpRunning, ""); err != nil { + if err := vr.setState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { vr.stats.ErrorCounts.Add([]string{"Replicate"}, 1) return err } - return newVPlayer(vr, settings, nil, mysql.Position{}, "replicate").play(ctx) + return newVPlayer(vr, settings, nil, replication.Position{}, "replicate").play(ctx) } } } @@ -376,6 +417,7 @@ func (vr *vreplicator) loadSettings(ctx context.Context, dbClient *vdbClient) (s settings, numTablesToCopy, err = vr.readSettings(ctx, dbClient) if err == nil { vr.WorkflowType = int32(settings.WorkflowType) + vr.WorkflowSubType = int32(settings.WorkflowSubType) vr.WorkflowName = settings.WorkflowName } return settings, numTablesToCopy, err @@ -395,7 +437,7 @@ func (vr *vreplicator) readSettings(ctx context.Context, dbClient *vdbClient) (s if len(qr.Rows) == 0 || len(qr.Rows[0]) == 0 { return settings, numTablesToCopy, fmt.Errorf("unexpected result from %s: %v", query, qr) } - numTablesToCopy, err = evalengine.ToInt64(qr.Rows[0][0]) + numTablesToCopy, err = qr.Rows[0][0].ToCastInt64() if err != nil { return settings, numTablesToCopy, err } @@ -414,24 +456,24 @@ func (vr *vreplicator) setMessage(message string) error { if _, err := vr.dbClient.Execute(query); err != nil { return fmt.Errorf("could not set message: %v: %v", query, err) } - if err := insertLog(vr.dbClient, LogMessage, vr.id, vr.state, message); err != nil { + if err := insertLog(vr.dbClient, LogMessage, vr.id, vr.state.String(), message); err != nil { return err } return nil } func (vr *vreplicator) insertLog(typ, message string) error { - return insertLog(vr.dbClient, typ, vr.id, vr.state, message) + return insertLog(vr.dbClient, typ, vr.id, vr.state.String(), message) } -func (vr *vreplicator) setState(state, message string) error { +func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, message string) error { if message != "" { vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ Time: time.Now(), Message: message, }) } - vr.stats.State.Store(state) + vr.stats.State.Store(state.String()) query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(binlogplayer.MessageTruncate(message)), vr.id) if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) @@ -439,7 +481,7 @@ func (vr *vreplicator) setState(state, message string) error { if state == vr.state { return nil } - if err := insertLog(vr.dbClient, LogStateChange, vr.id, state, message); err != nil { + if err := insertLog(vr.dbClient, LogStateChange, vr.id, state.String(), message); err != nil { return err } vr.state = state @@ -454,14 +496,14 @@ func encodeString(in string) string { } func (vr *vreplicator) getSettingFKCheck() error { - qr, err := vr.dbClient.Execute("select @@foreign_key_checks;") + qr, err := vr.dbClient.Execute("select @@foreign_key_checks") if err != nil { return err } if len(qr.Rows) != 1 || len(qr.Fields) != 1 { return fmt.Errorf("unable to select @@foreign_key_checks") } - vr.originalFKCheckSetting, err = evalengine.ToInt64(qr.Rows[0][0]) + vr.originalFKCheckSetting, err = qr.Rows[0][0].ToCastInt64() if err != nil { return err } @@ -469,7 +511,7 @@ func (vr *vreplicator) getSettingFKCheck() error { } func (vr *vreplicator) resetFKCheckAfterCopy(dbClient *vdbClient) error { - _, err := dbClient.Execute(fmt.Sprintf("set foreign_key_checks=%d;", vr.originalFKCheckSetting)) + _, err := dbClient.Execute(fmt.Sprintf("set @@session.foreign_key_checks=%d", vr.originalFKCheckSetting)) return err } @@ -523,17 +565,17 @@ func (vr *vreplicator) setSQLMode(ctx context.Context, dbClient *vdbClient) (fun // This is useful when we want to throttle all migrations. We throttle "online-ddl" and that applies to both vreplication // migrations as well as gh-ost migrations. func (vr *vreplicator) throttlerAppName() string { - names := []string{vr.WorkflowName, throttlerVReplicationAppName} + names := []string{vr.WorkflowName, throttlerapp.VReplicationName.String()} if vr.WorkflowType == int32(binlogdatapb.VReplicationWorkflowType_OnlineDDL) { - names = append(names, throttlerOnlineDDLAppName) + names = append(names, throttlerapp.OnlineDDLName.String()) } - return strings.Join(names, ":") + return throttlerapp.Concatenate(names...) } -func (vr *vreplicator) updateTimeThrottled(componentThrottled ComponentName) error { +func (vr *vreplicator) updateTimeThrottled(appThrottled throttlerapp.Name) error { err := vr.throttleUpdatesRateLimiter.Do(func() error { tm := time.Now().Unix() - update, err := binlogplayer.GenerateUpdateTimeThrottled(vr.id, tm, string(componentThrottled)) + update, err := binlogplayer.GenerateUpdateTimeThrottled(vr.id, tm, appThrottled.String()) if err != nil { return err } @@ -557,7 +599,7 @@ func (vr *vreplicator) updateHeartbeatTime(tm int64) error { } func (vr *vreplicator) clearFKCheck(dbClient *vdbClient) error { - _, err := dbClient.Execute("set foreign_key_checks=0;") + _, err := dbClient.Execute("set @@session.foreign_key_checks=0") return err } @@ -660,7 +702,7 @@ func (vr *vreplicator) stashSecondaryKeys(ctx context.Context, tableName string) if _, err := dbClient.ExecuteFetch(sqlparser.String(alterDrop), 1); err != nil { // If they've already been dropped, e.g. by another controller running on the tablet // when doing a shard merge, then we can ignore the error. - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Num == mysql.ERCantDropFieldOrKey { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Num == sqlerror.ERCantDropFieldOrKey { secondaryKeys, err := vr.getTableSecondaryKeys(ctx, tableName) if err == nil && len(secondaryKeys) == 0 { return nil @@ -913,7 +955,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // index definitions that we would have added already exist in // the table schema and if so move forward and delete the // post_copy_action record. - if sqlErr, ok := err.(*mysql.SQLError); ok && sqlErr.Number() == mysql.ERDupKeyName { + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERDupKeyName { stmt, err := sqlparser.ParseStrictDDL(action.Task) if err != nil { return failedAlterErr @@ -990,3 +1032,39 @@ func (vr *vreplicator) newClientConnection(ctx context.Context) (*vdbClient, err } return dbClient, nil } + +// setExistingRowsCopied deals with the case where another tablet started +// the workflow and a reparent occurred, and now that we manage the +// workflow, we need to read the rows_copied that already exists and add +// them to our counter, otherwise it will look like the reparent wiped all the +// rows_copied. So in the event that our CopyRowCount counter is zero, and +// the existing rows_copied in the vreplication table is not, copy the value of +// vreplication.rows_copied into our CopyRowCount. +func (vr *vreplicator) setExistingRowsCopied() { + if vr.stats.CopyRowCount.Get() == 0 { + rowsCopiedExisting, err := vr.readExistingRowsCopied(vr.id) + if err != nil { + log.Warningf("Failed to read existing rows copied value for %s worfklow: %v", vr.WorkflowName, err) + } else if rowsCopiedExisting != 0 { + log.Infof("Resuming the %s vreplication workflow started on another tablet, setting rows copied counter to %v", vr.WorkflowName, rowsCopiedExisting) + vr.stats.CopyRowCount.Set(rowsCopiedExisting) + } + } +} + +func (vr *vreplicator) readExistingRowsCopied(id int32) (int64, error) { + query, err := sqlparser.ParseAndBind(`SELECT rows_copied FROM _vt.vreplication WHERE id=%a`, + sqltypes.Int32BindVariable(id), + ) + if err != nil { + return 0, err + } + r, err := vr.dbClient.Execute(query) + if err != nil { + return 0, err + } + if len(r.Rows) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not get expected single row value when getting rows_copied for workflow id: %d", id) + } + return r.Rows[0][0].ToInt64() +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index 66591bbcb81..346e6b67eb3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -207,6 +207,7 @@ func TestDeferSecondaryKeys(t *testing.T) { id := int32(1) vsclient := newTabletConnector(tablet) stats := binlogplayer.NewStats() + defer stats.Stop() dbClient := playerEngine.dbClientFactoryFiltered() err := dbClient.Connect() require.NoError(t, err) @@ -538,6 +539,7 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { id := int32(1) vsclient := newTabletConnector(tablet) stats := binlogplayer.NewStats() + defer stats.Stop() dbaconn := playerEngine.dbClientFactoryDba() err = dbaconn.Connect() require.NoError(t, err) @@ -626,6 +628,58 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { require.Equal(t, 1, len(res.Rows)) } +// TestResumingFromPreviousWorkflowKeepingRowsCopied tests that when you +// resume a workflow started by another tablet (eg. a reparent occurred), +// the rows_copied does not reset to zero but continues along from where +// it left off. +func TestResumingFromPreviousWorkflowKeepingRowsCopied(t *testing.T) { + _, cancel := context.WithCancel(context.Background()) + defer cancel() + tablet := addTablet(100) + defer deleteTablet(tablet) + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + } + // The test env uses the same factory for both dba and + // filtered connections. + dbconfigs.GlobalDBConfigs.Filtered.User = "vt_dba" + id := int32(1) + + vsclient := newTabletConnector(tablet) + stats := binlogplayer.NewStats() + defer stats.Stop() + + dbaconn := playerEngine.dbClientFactoryDba() + err := dbaconn.Connect() + require.NoError(t, err) + defer dbaconn.Close() + + dbClient := playerEngine.dbClientFactoryFiltered() + err = dbClient.Connect() + require.NoError(t, err) + defer dbClient.Close() + + dbName := dbClient.DBName() + rowsCopied := int64(500000) + // Ensure there's an existing vreplication workflow + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, rows_copied) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', %v) on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s', rows_copied=%v", + id, dbName, rowsCopied, dbName, rowsCopied), 1) + require.NoError(t, err) + defer func() { + _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) + require.NoError(t, err) + }() + vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine) + assert.Equal(t, rowsCopied, vr.stats.CopyRowCount.Get()) +} + // stripCruft removes all whitespace unicode chars and backticks. func stripCruft(in string) string { out := strings.Builder{} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vrlog.go b/go/vt/vttablet/tabletmanager/vreplication/vrlog.go index b55e54bb79c..a36b6ad2336 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vrlog.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vrlog.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" ) var ( @@ -63,7 +64,7 @@ func (stats *VrLogStats) Send(detail string) { } func init() { - http.HandleFunc("/debug/vrlog", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/debug/vrlog", func(w http.ResponseWriter, r *http.Request) { ch := vrLogStatsLogger.Subscribe("vrlogstats") defer vrLogStatsLogger.Unsubscribe(ch) vrlogStatsHandler(ch, w, r) diff --git a/go/vt/vttablet/tabletserver/bench_test.go b/go/vt/vttablet/tabletserver/bench_test.go index 06d7abe391b..fd2d86c2812 100644 --- a/go/vt/vttablet/tabletserver/bench_test.go +++ b/go/vt/vttablet/tabletserver/bench_test.go @@ -18,11 +18,10 @@ package tabletserver import ( "bytes" + "context" "fmt" "testing" - "context" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -55,7 +54,9 @@ func init() { } func BenchmarkExecuteVarBinary(b *testing.B) { - db, tsv := setupTabletServerTest(nil, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(b, ctx, "") defer db.Close() defer tsv.StopService() @@ -68,16 +69,18 @@ func BenchmarkExecuteVarBinary(b *testing.B) { } target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - db.AllowAll = true + db.SetAllowAll(true) for i := 0; i < b.N; i++ { - if _, err := tsv.Execute(context.Background(), &target, benchQuery, bv, 0, 0, nil); err != nil { + if _, err := tsv.Execute(ctx, &target, benchQuery, bv, 0, 0, nil); err != nil { panic(err) } } } func BenchmarkExecuteExpression(b *testing.B) { - db, tsv := setupTabletServerTest(nil, "") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, tsv := setupTabletServerTest(b, ctx, "") defer db.Close() defer tsv.StopService() @@ -93,9 +96,9 @@ func BenchmarkExecuteExpression(b *testing.B) { } target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - db.AllowAll = true + db.SetAllowAll(true) for i := 0; i < b.N; i++ { - if _, err := tsv.Execute(context.Background(), &target, benchQuery, bv, 0, 0, nil); err != nil { + if _, err := tsv.Execute(ctx, &target, benchQuery, bv, 0, 0, nil); err != nil { panic(err) } } diff --git a/go/vt/vttablet/tabletserver/binlog_watcher.go b/go/vt/vttablet/tabletserver/binlog_watcher.go index 6c713791e6f..cff7697c18a 100644 --- a/go/vt/vttablet/tabletserver/binlog_watcher.go +++ b/go/vt/vttablet/tabletserver/binlog_watcher.go @@ -17,13 +17,13 @@ limitations under the License. package tabletserver import ( + "context" "sync" "time" - "context" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -31,7 +31,7 @@ import ( // VStreamer defines the functions of VStreamer // that the BinlogWatcher needs. type VStreamer interface { - Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error } // BinlogWatcher is a tabletserver service that watches the @@ -91,7 +91,7 @@ func (blw *BinlogWatcher) process(ctx context.Context) { for { // VStreamer will reload the schema when it encounters a DDL. - err := blw.vs.Stream(ctx, "current", nil, filter, func(events []*binlogdatapb.VEvent) error { + err := blw.vs.Stream(ctx, "current", nil, filter, throttlerapp.BinlogWatcherName, func(events []*binlogdatapb.VEvent) error { return nil }) log.Infof("ReplicationWatcher VStream ended: %v, retrying in 5 seconds", err) diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 13d0479fd0c..cc81bf39910 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -24,12 +24,12 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/dbconnpool" @@ -130,10 +130,10 @@ func (dbc *DBConn) Exec(ctx context.Context, query string, maxrows int, wantfiel case err == nil: // Success. return r, nil - case mysql.IsConnLostDuringQuery(err): + case sqlerror.IsConnLostDuringQuery(err): // Query probably killed. Don't retry. return nil, err - case !mysql.IsConnErr(err): + case !sqlerror.IsConnErr(err): // Not a connection error. Don't retry. return nil, err case attempt == 2: @@ -233,10 +233,10 @@ func (dbc *DBConn) Stream(ctx context.Context, query string, callback func(*sqlt case err == nil: // Success. return nil - case mysql.IsConnLostDuringQuery(err): + case sqlerror.IsConnLostDuringQuery(err): // Query probably killed. Don't retry. return err - case !mysql.IsConnErr(err): + case !sqlerror.IsConnErr(err): // Not a connection error. Don't retry. return err case attempt == 2: diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index 62ec0b6d12e..54792e17fa5 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -27,7 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" @@ -87,7 +88,7 @@ func TestDBConnExec(t *testing.T) { startCounts = mysqlTimings.Counts() // Exec fail due to client side error - db.AddRejectedQuery(sql, &mysql.SQLError{ + db.AddRejectedQuery(sql, &sqlerror.SQLError{ Num: 2012, Message: "connection fail", Query: "", @@ -159,7 +160,7 @@ func TestDBConnExecLost(t *testing.T) { // Exec fail due to server side error (e.g. query kill) startCounts = mysqlTimings.Counts() - db.AddRejectedQuery(sql, &mysql.SQLError{ + db.AddRejectedQuery(sql, &sqlerror.SQLError{ Num: 2013, Message: "Lost connection to MySQL server during query", Query: "", diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 6cb9adf7387..a67de9f2a1a 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -170,6 +170,7 @@ func (cp *Pool) Close() { log.Infof("connpool - acquiring lock") cp.mu.Lock() log.Infof("connpool - acquired lock") + cp.connections.Close() cp.connections = nil cp.mu.Unlock() log.Infof("connpool - closing dbaPool") diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index 870af10417a..43c27fa817a 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -56,11 +56,13 @@ func TestConnPoolGet(t *testing.T) { func TestConnPoolTimeout(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", tabletenv.ConnPoolConfig{ - Size: 1, - TimeoutSeconds: 1, - IdleTimeoutSeconds: 10, - }) + + cfg := tabletenv.ConnPoolConfig{ + Size: 1, + } + _ = cfg.TimeoutSeconds.Set("1s") + _ = cfg.IdleTimeoutSeconds.Set("10s") + connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer connPool.Close() dbConn, err := connPool.Get(context.Background(), nil) @@ -395,8 +397,9 @@ func newPool() *Pool { } func newPoolWithCapacity(capacity int) *Pool { - return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", tabletenv.ConnPoolConfig{ - Size: capacity, - IdleTimeoutSeconds: 10, - }) + cfg := tabletenv.ConnPoolConfig{ + Size: capacity, + } + _ = cfg.IdleTimeoutSeconds.Set("10s") + return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) } diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go index d612ef1109d..ca4eeb8747b 100644 --- a/go/vt/vttablet/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -18,6 +18,7 @@ package tabletserver import ( "context" + "time" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" @@ -26,8 +27,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - - "time" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -52,7 +52,7 @@ type Controller interface { // SetServingType transitions the query service to the required serving type. // Returns true if the state of QueryService or the tablet type changed. - SetServingType(tabletType topodatapb.TabletType, terTimestamp time.Time, serving bool, reason string) error + SetServingType(tabletType topodatapb.TabletType, ptsTimestamp time.Time, serving bool, reason string) error // EnterLameduck causes tabletserver to enter the lameduck state. EnterLameduck() @@ -72,7 +72,7 @@ type Controller interface { // RegisterQueryRuleSource adds a query rule source RegisterQueryRuleSource(ruleSource string) - // RegisterQueryRuleSource removes a query rule source + // UnRegisterQueryRuleSource removes a query rule source UnRegisterQueryRuleSource(ruleSource string) // SetQueryRules sets the query rules for this QueryService @@ -89,6 +89,9 @@ type Controller interface { // TopoServer returns the topo server. TopoServer() *topo.Server + + // CheckThrottler + CheckThrottler(ctx context.Context, appName string, flags *throttle.CheckFlags) *throttle.CheckResult } // Ensure TabletServer satisfies Controller interface. diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index 638a0e40508..e229c46cadd 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -116,8 +116,6 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) setIntVal(tsv.SetStreamPoolSize) case "TxPoolSize": setIntVal(tsv.SetTxPoolSize) - case "QueryCacheCapacity": - setIntVal(tsv.SetQueryPlanCacheCap) case "MaxResultSize": setIntVal(tsv.SetMaxResultSize) case "WarnResultSize": @@ -127,7 +125,7 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) case "RowStreamerMaxMySQLReplLagSecs": setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxMySQLReplLagSecs = val }) case "UnhealthyThreshold": - setDurationVal(tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Set) + setDurationVal(func(d time.Duration) { _ = tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Set(d.String()) }) setDurationVal(tsv.hs.SetUnhealthyThreshold) setDurationVal(tsv.sm.SetUnhealthyThreshold) case "ThrottleMetricThreshold": diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 52199ba6baf..d5c3ad82e74 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "math" - "math/rand" "sort" "sync" "sync/atomic" @@ -28,6 +27,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/dbconnpool" @@ -39,12 +40,12 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) const ( // evacHours is a hard coded, reasonable time for a table to spend in EVAC state - evacHours = 72 - throttlerAppName = "tablegc" + evacHours = 72 ) var ( @@ -82,10 +83,6 @@ type transitionRequest struct { uuid string } -func init() { - rand.Seed(time.Now().UnixNano()) -} - // TableGC is the main entity in the table garbage collection mechanism. // This service "garbage collects" tables: // - it checks for magically-named tables (e.g. _vt_EVAC_f6338b2af8af11eaa210f875a4d24e90_20200920063522) @@ -122,8 +119,7 @@ type Status struct { Keyspace string Shard string - isPrimary bool - IsOpen bool + IsOpen bool purgingTables []string } @@ -131,7 +127,7 @@ type Status struct { // NewTableGC creates a table collector func NewTableGC(env tabletenv.Env, ts *topo.Server, lagThrottler *throttle.Throttler) *TableGC { collector := &TableGC{ - throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerAppName, throttle.ThrottleCheckPrimaryWrite), + throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerapp.TableGCName, throttle.ThrottleCheckPrimaryWrite), isOpen: 0, env: env, @@ -264,7 +260,6 @@ func (collector *TableGC) operate(ctx context.Context) { } case <-purgeRequestsChan: { - log.Info("TableGC: purgeRequestsChan") go func() { tableName, err := collector.purge(ctx) if err != nil { @@ -475,8 +470,8 @@ func (collector *TableGC) purge(ctx context.Context) (tableName string, err erro if err == nil { return true, nil } - if merr, ok := err.(*mysql.SQLError); ok { - if merr.Num == mysql.ERSpecifiedAccessDenied { + if merr, ok := err.(*sqlerror.SQLError); ok { + if merr.Num == sqlerror.ERSpecifiedAccessDenied { // We do not have privileges to disable binary logging. That's fine, we're on best effort, // so we're going to silently ignore this error. return false, nil diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index 49e8a858bc9..3ecdc180600 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -27,27 +27,25 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/sqltypes" + vtschema "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/timer" - "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" - - "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/history" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -81,24 +79,23 @@ type healthStreamer struct { cancel context.CancelFunc clients map[chan *querypb.StreamHealthResponse]struct{} state *querypb.StreamHealthResponse + // isServingPrimary stores if this tablet is currently the serving primary or not. + isServingPrimary bool + se *schema.Engine history *history.History - ticks *timer.Timer dbConfig dbconfigs.Connector conns *connpool.Pool signalWhenSchemaChange bool + reloadTimeout time.Duration viewsEnabled bool - views map[string]string } -func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias) *healthStreamer { - var newTimer *timer.Timer +func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine *schema.Engine) *healthStreamer { var pool *connpool.Pool if env.Config().SignalWhenSchemaChange { - reloadTime := env.Config().SignalSchemaChangeReloadIntervalSeconds.Get() - newTimer = timer.NewTimer(reloadTime) // We need one connection for the reloader. pool = connpool.NewPool(env, "", tabletenv.ConnPoolConfig{ Size: 1, @@ -119,18 +116,18 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias) *health }, history: history.New(5), - ticks: newTimer, conns: pool, signalWhenSchemaChange: env.Config().SignalWhenSchemaChange, + reloadTimeout: env.Config().SchemaChangeReloadTimeout, viewsEnabled: env.Config().EnableViews, - views: map[string]string{}, + se: engine, } hs.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) return hs } func (hs *healthStreamer) InitDBConfig(target *querypb.Target, cp dbconfigs.Connector) { - hs.state.Target = proto.Clone(target).(*querypb.Target) + hs.state.Target = target.CloneVT() hs.dbConfig = cp } @@ -145,14 +142,7 @@ func (hs *healthStreamer) Open() { if hs.conns != nil { // if we don't have a live conns object, it means we are not configured to signal when the schema changes hs.conns.Open(hs.dbConfig, hs.dbConfig, hs.dbConfig) - hs.ticks.Start(func() { - if err := hs.reload(); err != nil { - log.Errorf("periodic schema reload failed in health stream: %v", err) - } - }) - } - } func (hs *healthStreamer) Close() { @@ -160,13 +150,14 @@ func (hs *healthStreamer) Close() { defer hs.mu.Unlock() if hs.cancel != nil { - if hs.ticks != nil { - hs.ticks.Stop() - hs.conns.Close() - } + hs.se.UnregisterNotifier("healthStreamer") hs.cancel() hs.cancel = nil } + if hs.conns != nil { + hs.conns.Close() + hs.conns = nil + } } func (hs *healthStreamer) Stream(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { @@ -176,11 +167,6 @@ func (hs *healthStreamer) Stream(ctx context.Context, callback func(*querypb.Str } defer hs.unregister(ch) - // trigger the initial schema reload - if hs.signalWhenSchemaChange { - hs.ticks.Trigger() - } - for { select { case <-ctx.Done(): @@ -213,7 +199,7 @@ func (hs *healthStreamer) register() (chan *querypb.StreamHealthResponse, contex hs.clients[ch] = struct{}{} // Send the current state immediately. - ch <- proto.Clone(hs.state).(*querypb.StreamHealthResponse) + ch <- hs.state.CloneVT() return ch, hs.ctx } @@ -224,15 +210,15 @@ func (hs *healthStreamer) unregister(ch chan *querypb.StreamHealthResponse) { delete(hs.clients, ch) } -func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, terTimestamp time.Time, lag time.Duration, err error, serving bool) { +func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, ptsTimestamp time.Time, lag time.Duration, err error, serving bool) { hs.mu.Lock() defer hs.mu.Unlock() hs.state.Target.TabletType = tabletType if tabletType == topodatapb.TabletType_PRIMARY { - hs.state.TabletExternallyReparentedTimestamp = terTimestamp.Unix() + hs.state.PrimaryTermStartTimestamp = ptsTimestamp.Unix() } else { - hs.state.TabletExternallyReparentedTimestamp = 0 + hs.state.PrimaryTermStartTimestamp = 0 } if err != nil { hs.state.RealtimeStats.HealthError = err.Error() @@ -244,9 +230,7 @@ func (hs *healthStreamer) ChangeState(tabletType topodatapb.TabletType, terTimes hs.state.RealtimeStats.FilteredReplicationLagSeconds, hs.state.RealtimeStats.BinlogPlayersCount = blpFunc() hs.state.RealtimeStats.Qps = hs.stats.QPSRates.TotalRate() - - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) - + shr := hs.state.CloneVT() hs.broadCastToClients(shr) hs.history.Add(&historyRecord{ Time: time.Now(), @@ -313,7 +297,7 @@ func (hs *healthStreamer) AppendDetails(details []*kv) []*kv { func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) { hs.unhealthyThreshold.Store(v.Nanoseconds()) - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) + shr := hs.state.CloneVT() for ch := range hs.clients { select { case ch <- shr: @@ -325,28 +309,72 @@ func (hs *healthStreamer) SetUnhealthyThreshold(v time.Duration) { } } -// reload reloads the schema from the underlying mysql -func (hs *healthStreamer) reload() error { +// MakePrimary tells the healthstreamer that the current tablet is now the primary, +// so it can read and write to the MySQL instance for schema-tracking. +func (hs *healthStreamer) MakePrimary(serving bool) { hs.mu.Lock() defer hs.mu.Unlock() - // Schema Reload to happen only on primary. - if hs.state.Target.TabletType != topodatapb.TabletType_PRIMARY { + hs.isServingPrimary = serving + // We register for notifications from the schema Engine only when schema tracking is enabled, + // and we are going to a serving primary state. + if serving && hs.signalWhenSchemaChange { + hs.se.RegisterNotifier("healthStreamer", func(full map[string]*schema.Table, created, altered, dropped []*schema.Table) { + if err := hs.reload(full, created, altered, dropped); err != nil { + log.Errorf("periodic schema reload failed in health stream: %v", err) + } + }, false) + } +} + +// MakeNonPrimary tells the healthstreamer that the current tablet is now not a primary. +func (hs *healthStreamer) MakeNonPrimary() { + hs.mu.Lock() + defer hs.mu.Unlock() + hs.isServingPrimary = false +} + +// reload reloads the schema from the underlying mysql for the tables that we get the alert on. +func (hs *healthStreamer) reload(full map[string]*schema.Table, created, altered, dropped []*schema.Table) error { + hs.mu.Lock() + defer hs.mu.Unlock() + // Schema Reload to happen only on primary when it is serving. + // We can be in a state when the primary is not serving after we have run DemotePrimary. In that case, + // we don't want to run any queries in MySQL, so we shouldn't reload anything in the healthStreamer. + if !hs.isServingPrimary { return nil } - ctx := hs.ctx + // add a timeout to prevent unbounded waits + ctx, cancel := context.WithTimeout(hs.ctx, hs.reloadTimeout) + defer cancel() + conn, err := hs.conns.Get(ctx, nil) if err != nil { return err } defer conn.Recycle() - tables, err := hs.getChangedTableNames(ctx, conn) - if err != nil { - return err + // We create lists to store the tables that have schema changes. + var tables []string + var views []string + + // Range over the tables that are created/altered and split them up based on their type. + for _, table := range append(append(dropped, created...), altered...) { + tableName := table.Name.String() + if vtschema.IsInternalOperationTableName(tableName) { + continue + } + if table.Type == schema.View && hs.viewsEnabled { + views = append(views, tableName) + } else { + tables = append(tables, tableName) + } } - views, err := hs.getChangedViewNames(ctx, conn) + // Reload the tables and views. + // This stores the data that is used by VTGates upto v17. So, we can remove this reload of + // tables and views in v19. + err = hs.reloadTables(ctx, conn, tables) if err != nil { return err } @@ -358,7 +386,7 @@ func (hs *healthStreamer) reload() error { hs.state.RealtimeStats.TableSchemaChanged = tables hs.state.RealtimeStats.ViewSchemaChanged = views - shr := proto.Clone(hs.state).(*querypb.StreamHealthResponse) + shr := hs.state.CloneVT() hs.broadCastToClients(shr) hs.state.RealtimeStats.TableSchemaChanged = nil hs.state.RealtimeStats.ViewSchemaChanged = nil @@ -366,114 +394,40 @@ func (hs *healthStreamer) reload() error { return nil } -func (hs *healthStreamer) getChangedTableNames(ctx context.Context, conn *connpool.DBConn) ([]string, error) { - var tables []string - var tableNames []string - - callback := func(qr *sqltypes.Result) error { - for _, row := range qr.Rows { - table := row[0].ToString() - tables = append(tables, table) - - escapedTblName := sqlparser.String(sqlparser.NewStrLiteral(table)) - tableNames = append(tableNames, escapedTblName) - } - +func (hs *healthStreamer) reloadTables(ctx context.Context, conn *connpool.DBConn, tableNames []string) error { + if len(tableNames) == 0 { return nil } - alloc := func() *sqltypes.Result { return &sqltypes.Result{} } - bufferSize := 1000 - - schemaChangeQuery := mysql.DetectSchemaChange - // If views are enabled, then views are tracked/handled separately and schema change does not need to track them. - if hs.viewsEnabled { - schemaChangeQuery = mysql.DetectSchemaChangeOnlyBaseTable - } - err := conn.Stream(ctx, schemaChangeQuery, callback, alloc, bufferSize, 0) - if err != nil { - return nil, err + var escapedTableNames []string + for _, tableName := range tableNames { + escapedTblName := sqlparser.String(sqlparser.NewStrLiteral(tableName)) + escapedTableNames = append(escapedTableNames, escapedTblName) } - // If no change detected, then return - if len(tables) == 0 { - return nil, nil - } - - tableNamePredicate := fmt.Sprintf("table_name IN (%s)", strings.Join(tableNames, ", ")) - del := fmt.Sprintf("%s AND %s", mysql.ClearSchemaCopy, tableNamePredicate) - upd := fmt.Sprintf("%s AND %s", mysql.InsertIntoSchemaCopy, tableNamePredicate) + tableNamePredicate := fmt.Sprintf("table_name IN (%s)", strings.Join(escapedTableNames, ", ")) + del := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) + upd := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) // Reload the schema in a transaction. - _, err = conn.Exec(ctx, "begin", 1, false) + _, err := conn.Exec(ctx, "begin", 1, false) if err != nil { - return nil, err + return err } defer conn.Exec(ctx, "rollback", 1, false) _, err = conn.Exec(ctx, del, 1, false) if err != nil { - return nil, err + return err } _, err = conn.Exec(ctx, upd, 1, false) if err != nil { - return nil, err + return err } _, err = conn.Exec(ctx, "commit", 1, false) if err != nil { - return nil, err - } - return tables, nil -} - -func (hs *healthStreamer) getChangedViewNames(ctx context.Context, conn *connpool.DBConn) ([]string, error) { - if !hs.viewsEnabled { - return nil, nil - } - var changedViews []string - views := map[string]string{} - - callback := func(qr *sqltypes.Result) error { - for _, row := range qr.Rows { - viewName := row[0].ToString() - lastUpdTime := row[1].ToString() - views[viewName] = lastUpdTime - } - - return nil - } - alloc := func() *sqltypes.Result { return &sqltypes.Result{} } - bufferSize := 1000 - err := conn.Stream(ctx, mysql.SelectAllViews, callback, alloc, bufferSize, 0) - if err != nil { - return nil, err - } - - // If no change detected, then return - if len(views) == 0 && len(hs.views) == 0 { - return nil, nil - } - - for viewName, lastUpdTime := range views { - t, exists := hs.views[viewName] - if !exists { // new view added - changedViews = append(changedViews, viewName) - continue - } - if t != lastUpdTime { // view updated - changedViews = append(changedViews, viewName) - } - delete(hs.views, viewName) - } - - // views deleted - for viewName := range hs.views { - changedViews = append(changedViews, viewName) + return err } - - // update hs.views with latest view info - hs.views = views - - return changedViews, nil + return nil } diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index 8ce8f7925a4..b2fbb2db1ea 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -26,13 +26,18 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -46,7 +51,7 @@ func TestHealthStreamerClosed(t *testing.T) { Uid: 1, } blpFunc = testBlpFunc - hs := newHealthStreamer(env, alias) + hs := newHealthStreamer(env, alias, &schema.Engine{}) err := hs.Stream(context.Background(), func(shr *querypb.StreamHealthResponse) error { return nil }) @@ -59,6 +64,39 @@ func newConfig(db *fakesqldb.DB) *tabletenv.TabletConfig { return cfg } +// TestNotServingPrimaryNoWrite makes sure that the health-streamer doesn't write anything to the database when +// the state is not serving primary. +func TestNotServingPrimaryNoWrite(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + config := newConfig(db) + config.SignalWhenSchemaChange = true + + env := tabletenv.NewEnv(config, "TestNotServingPrimary") + alias := &topodatapb.TabletAlias{ + Cell: "cell", + Uid: 1, + } + // Create a new health streamer and set it to a serving primary state + hs := newHealthStreamer(env, alias, &schema.Engine{}) + hs.isServingPrimary = true + hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, config.DB.DbaWithDB()) + hs.Open() + defer hs.Close() + target := &querypb.Target{} + hs.InitDBConfig(target, db.ConnParams()) + + // Let's say the tablet goes to a non-serving primary state. + hs.MakePrimary(false) + + // A reload now should not write anything to the database. If any write happens it will error out since we have not + // added any query to the database to expect. + t1 := schema.NewTable("t1", schema.NoType) + err := hs.reload(map[string]*schema.Table{"t1": t1}, []*schema.Table{t1}, nil, nil) + require.NoError(t, err) + require.NoError(t, db.LastError()) +} + func TestHealthStreamerBroadcast(t *testing.T) { db := fakesqldb.New(t) defer db.Close() @@ -71,7 +109,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { Uid: 1, } blpFunc = testBlpFunc - hs := newHealthStreamer(env, alias) + hs := newHealthStreamer(env, alias, &schema.Engine{}) hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, config.DB.DbaWithDB()) hs.Open() defer hs.Close() @@ -113,9 +151,9 @@ func TestHealthStreamerBroadcast(t *testing.T) { Target: &querypb.Target{ TabletType: topodatapb.TabletType_PRIMARY, }, - TabletAlias: alias, - Serving: true, - TabletExternallyReparentedTimestamp: now.Unix(), + TabletAlias: alias, + Serving: true, + PrimaryTermStartTimestamp: now.Unix(), RealtimeStats: &querypb.RealtimeStats{ FilteredReplicationLagSeconds: 1, BinlogPlayersCount: 2, @@ -157,222 +195,296 @@ func TestHealthStreamerBroadcast(t *testing.T) { } func TestReloadSchema(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - config := newConfig(db) - config.SignalSchemaChangeReloadIntervalSeconds.Set(100 * time.Millisecond) - config.SignalWhenSchemaChange = true - - env := tabletenv.NewEnv(config, "ReplTrackerTest") - alias := &topodatapb.TabletAlias{ - Cell: "cell", - Uid: 1, + testcases := []struct { + name string + enableSchemaChange bool + }{ + { + name: "Schema Change Enabled", + enableSchemaChange: true, + }, { + name: "Schema Change Disabled", + enableSchemaChange: false, + }, } - blpFunc = testBlpFunc - hs := newHealthStreamer(env, alias) - - target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - configs := config.DB - - db.AddQueryPattern(mysql.ClearSchemaCopy+".*", &sqltypes.Result{}) - db.AddQueryPattern(mysql.InsertIntoSchemaCopy+".*", &sqltypes.Result{}) - db.AddQuery("begin", &sqltypes.Result{}) - db.AddQuery("commit", &sqltypes.Result{}) - db.AddQuery("rollback", &sqltypes.Result{}) - db.AddQuery(mysql.DetectSchemaChange, sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "table_name", - "varchar", - ), - "product", - "users", - )) - db.AddQuery(mysql.SelectAllViews, &sqltypes.Result{}) - hs.InitDBConfig(target, configs.DbaWithDB()) - hs.Open() - defer hs.Close() - var wg sync.WaitGroup - wg.Add(1) - go func() { - hs.Stream(ctx, func(response *querypb.StreamHealthResponse) error { - if response.RealtimeStats.TableSchemaChanged != nil { - assert.Equal(t, []string{"product", "users"}, response.RealtimeStats.TableSchemaChanged) - wg.Done() + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db := fakesqldb.New(t) + defer db.Close() + config := newConfig(db) + config.SignalWhenSchemaChange = testcase.enableSchemaChange + _ = config.SchemaReloadIntervalSeconds.Set("100ms") + + env := tabletenv.NewEnv(config, "ReplTrackerTest") + alias := &topodatapb.TabletAlias{ + Cell: "cell", + Uid: 1, } - return nil - }) - }() - - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - select { - case <-c: - case <-time.After(1 * time.Second): - t.Errorf("timed out") - } -} - -func TestDoesNotReloadSchema(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - config := newConfig(db) - config.SignalSchemaChangeReloadIntervalSeconds.Set(100 * time.Millisecond) - config.SignalWhenSchemaChange = false - - env := tabletenv.NewEnv(config, "ReplTrackerTest") - alias := &topodatapb.TabletAlias{ - Cell: "cell", - Uid: 1, - } - blpFunc = testBlpFunc - hs := newHealthStreamer(env, alias) - - target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - configs := config.DB - - hs.InitDBConfig(target, configs.DbaWithDB()) - hs.Open() - defer hs.Close() - var wg sync.WaitGroup - wg.Add(1) - go func() { - hs.Stream(ctx, func(response *querypb.StreamHealthResponse) error { - if response.RealtimeStats.TableSchemaChanged != nil { - wg.Done() + blpFunc = testBlpFunc + se := schema.NewEngine(env) + hs := newHealthStreamer(env, alias, se) + + target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} + configs := config.DB + + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "UNIX_TIMESTAMP(now())", + "varchar", + ), + "1684759138", + )) + db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery("commit", &sqltypes.Result{}) + db.AddQuery("rollback", &sqltypes.Result{}) + // Add the query pattern for the query that schema.Engine uses to get the tables. + db.AddQueryPattern("SELECT .* information_schema.innodb_tablespaces .*", + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT | SUM(i.file_size) | SUM(i.allocated_size)", + "varchar|varchar|int64|varchar|int64|int64", + ), + "product|BASE TABLE|1684735966||114688|114688", + "users|BASE TABLE|1684735966||114688|114688", + )) + db.AddQueryPattern("SELECT COLUMN_NAME as column_name.*", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "column_name", + "varchar", + ), + "id", + )) + db.AddQueryPattern("SELECT `id` FROM `fakesqldb`.*", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + )) + db.AddQuery(mysql.ShowRowsRead, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("Variable_name|Value", "varchar|int32"), + "Innodb_rows_read|50")) + db.AddQuery(mysql.BaseShowPrimary, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name | column_name", "varchar|varchar"), + "product|id", + "users|id", + )) + + hs.InitDBConfig(target, configs.DbaWithDB()) + se.InitDBConfig(configs.DbaWithDB()) + hs.Open() + defer hs.Close() + err := se.Open() + require.NoError(t, err) + defer se.Close() + // Start schema notifications. + hs.MakePrimary(true) + + // Update the query pattern for the query that schema.Engine uses to get the tables so that it runs a reload again. + // If we don't change the t.create_time to a value greater than before, then the schema engine doesn't reload the database. + db.AddQueryPattern("SELECT .* information_schema.innodb_tablespaces .*", + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT | SUM(i.file_size) | SUM(i.allocated_size)", + "varchar|varchar|int64|varchar|int64|int64", + ), + "product|BASE TABLE|1684735967||114688|114688", + "users|BASE TABLE|1684735967||114688|114688", + )) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + hs.Stream(ctx, func(response *querypb.StreamHealthResponse) error { + if response.RealtimeStats.TableSchemaChanged != nil { + assert.Equal(t, []string{"product", "users"}, response.RealtimeStats.TableSchemaChanged) + wg.Done() + } + return nil + }) + }() + + c := make(chan struct{}) + go func() { + defer close(c) + wg.Wait() + }() + timeout := false + select { + case <-c: + case <-time.After(1 * time.Second): + timeout = true } - return nil - }) - }() - - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - timeout := false - - // here we will wait for a second, to make sure that we are not signaling a changed schema. - select { - case <-c: - case <-time.After(1 * time.Second): - timeout = true + require.Equal(t, testcase.enableSchemaChange, !timeout, "If schema change tracking is enabled, then we shouldn't time out, otherwise we should") + }) } - - assert.True(t, timeout, "should have timed out") } -func TestInitialReloadSchema(t *testing.T) { +// TestReloadView tests that the health streamer tracks view changes correctly +func TestReloadView(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() config := newConfig(db) - // Setting the signal schema change reload interval to one minute - // that way we can test the initial reload trigger. - config.SignalSchemaChangeReloadIntervalSeconds.Set(1 * time.Minute) config.SignalWhenSchemaChange = true + _ = config.SchemaReloadIntervalSeconds.Set("100ms") + config.EnableViews = true - env := tabletenv.NewEnv(config, "ReplTrackerTest") - alias := &topodatapb.TabletAlias{ - Cell: "cell", - Uid: 1, - } - blpFunc = testBlpFunc - hs := newHealthStreamer(env, alias) + env := tabletenv.NewEnv(config, "TestReloadView") + alias := &topodatapb.TabletAlias{Cell: "cell", Uid: 1} + se := schema.NewEngine(env) + hs := newHealthStreamer(env, alias, se) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} configs := config.DB - db.AddQueryPattern(mysql.ClearSchemaCopy+".*", &sqltypes.Result{}) - db.AddQueryPattern(mysql.InsertIntoSchemaCopy+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) + db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "UNIX_TIMESTAMP(now())", + "varchar", + ), + "1684759138", + )) db.AddQuery("begin", &sqltypes.Result{}) db.AddQuery("commit", &sqltypes.Result{}) db.AddQuery("rollback", &sqltypes.Result{}) - db.AddQuery(mysql.DetectSchemaChange, sqltypes.MakeTestResult( + // Add the query pattern for the query that schema.Engine uses to get the tables. + db.AddQueryPattern("SELECT .* information_schema.innodb_tablespaces .*", + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT | SUM(i.file_size) | SUM(i.allocated_size)", + "varchar|varchar|int64|varchar|int64|int64", + ), + )) + db.AddQueryPattern("SELECT COLUMN_NAME as column_name.*", sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "table_name", + "column_name", "varchar", ), - "product", - "users", + "id", )) - db.AddQuery(mysql.SelectAllViews, &sqltypes.Result{}) - - hs.InitDBConfig(target, configs.DbaWithDB()) - hs.Open() - defer hs.Close() - var wg sync.WaitGroup - wg.Add(1) - go func() { - hs.Stream(ctx, func(response *querypb.StreamHealthResponse) error { - if response.RealtimeStats.TableSchemaChanged != nil { - assert.Equal(t, []string{"product", "users"}, response.RealtimeStats.TableSchemaChanged) - wg.Done() - } - return nil - }) - }() - - c := make(chan struct{}) - go func() { - defer close(c) - wg.Wait() - }() - select { - case <-c: - case <-time.After(1 * time.Second): - // should not timeout despite SignalSchemaChangeReloadIntervalSeconds being set to 1 minute - t.Errorf("timed out") - } -} - -// TestReloadView tests that the health streamer tracks view changes correctly -func TestReloadView(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - config := newConfig(db) - config.SignalSchemaChangeReloadIntervalSeconds.Set(100 * time.Millisecond) - config.EnableViews = true - - env := tabletenv.NewEnv(config, "TestReloadView") - alias := &topodatapb.TabletAlias{Cell: "cell", Uid: 1} - hs := newHealthStreamer(env, alias) - - target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - configs := config.DB - - db.AddQuery(mysql.DetectSchemaChangeOnlyBaseTable, &sqltypes.Result{}) - db.AddQuery(mysql.SelectAllViews, &sqltypes.Result{}) + db.AddQueryPattern("SELECT `id` FROM `fakesqldb`.*", sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + )) + db.AddQuery(mysql.ShowRowsRead, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("Variable_name|Value", "varchar|int32"), + "Innodb_rows_read|50")) + db.AddQuery(mysql.BaseShowPrimary, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name | column_name", "varchar|varchar"), + )) + db.AddQueryPattern(".*SELECT table_name, view_definition.*views.*", &sqltypes.Result{}) + db.AddQuery("SELECT TABLE_NAME, CREATE_TIME FROM _vt.`tables`", &sqltypes.Result{}) hs.InitDBConfig(target, configs.DbaWithDB()) + se.InitDBConfig(configs.DbaWithDB()) hs.Open() defer hs.Close() + err := se.Open() + require.NoError(t, err) + se.MakePrimary(true) + defer se.Close() + // Start schema notifications. + hs.MakePrimary(true) + + showCreateViewFields := sqltypes.MakeTestFields( + "View|Create View|character_set_client|collation_connection", + "varchar|text|varchar|varchar") + showTableSizesFields := sqltypes.MakeTestFields( + "TABLE_NAME | TABLE_TYPE | UNIX_TIMESTAMP(t.create_time) | TABLE_COMMENT | SUM(i.file_size) | SUM(i.allocated_size)", + "varchar|varchar|int64|varchar|int64|int64", + ) tcases := []struct { - res *sqltypes.Result - exp []string - }{{ - // view_a and view_b added. - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|updated_at", "varchar|timestamp"), - "view_a|2023-01-12 14:23:33", "view_b|2023-01-12 15:23:33"), - exp: []string{"view_a", "view_b"}, - }, { - // view_b modified - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|updated_at", "varchar|timestamp"), - "view_a|2023-01-12 14:23:33", "view_b|2023-01-12 18:23:33"), - exp: []string{"view_b"}, - }, { - // view_a modified, view_b deleted and view_c added. - res: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|updated_at", "varchar|timestamp"), - "view_a|2023-01-12 16:23:33", "view_c|2023-01-12 18:23:33"), - exp: []string{"view_a", "view_b", "view_c"}, - }} + detectViewChangeOutput *sqltypes.Result + showTablesWithSizesOutput *sqltypes.Result + + expCreateStmtQuery []string + createStmtOutput []*sqltypes.Result + + expGetViewDefinitionsQuery string + viewDefinitionsOutput *sqltypes.Result + + expClearQuery string + expInsertQuery []string + expViewsChanged []string + }{ + { + // view_a and view_b added. + detectViewChangeOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), + "view_a", "view_b"), + showTablesWithSizesOutput: sqltypes.MakeTestResult(showTableSizesFields, "view_a|VIEW|12345678||123|123", "view_b|VIEW|12345678||123|123"), + viewDefinitionsOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|text"), + "view_a|def_a", "view_b|def_b"), + createStmtOutput: []*sqltypes.Result{sqltypes.MakeTestResult(showCreateViewFields, "view_a|create_view_a|utf8|utf8_general_ci"), + sqltypes.MakeTestResult(showCreateViewFields, "view_b|create_view_b|utf8|utf8_general_ci")}, + expViewsChanged: []string{"view_a", "view_b"}, + expGetViewDefinitionsQuery: "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('view_a', 'view_b')", + expCreateStmtQuery: []string{"show create table view_a", "show create table view_b"}, + expClearQuery: "delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('view_a', 'view_b')", + expInsertQuery: []string{ + "insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'view_a', 'create_view_a', 'def_a')", + "insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'view_b', 'create_view_b', 'def_b')", + }, + }, + { + // view_b modified + showTablesWithSizesOutput: sqltypes.MakeTestResult(showTableSizesFields, "view_a|VIEW|12345678||123|123", "view_b|VIEW|12345678||123|123"), + detectViewChangeOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), + "view_b"), + viewDefinitionsOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|text"), + "view_b|def_mod_b"), + createStmtOutput: []*sqltypes.Result{sqltypes.MakeTestResult(showCreateViewFields, "view_b|create_view_mod_b|utf8|utf8_general_ci")}, + expViewsChanged: []string{"view_b"}, + expGetViewDefinitionsQuery: "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('view_b')", + expCreateStmtQuery: []string{"show create table view_b"}, + expClearQuery: "delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('view_b')", + expInsertQuery: []string{ + "insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'view_b', 'create_view_mod_b', 'def_mod_b')", + }, + }, + { + // view_a modified, view_b deleted and view_c added. + showTablesWithSizesOutput: sqltypes.MakeTestResult(showTableSizesFields, "view_c|VIEW|98732432||123|123", "view_a|VIEW|12345678||123|123"), + detectViewChangeOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), + "view_a", "view_b", "view_c"), + viewDefinitionsOutput: sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|text"), + "view_a|def_mod_a", "view_c|def_c"), + createStmtOutput: []*sqltypes.Result{sqltypes.MakeTestResult(showCreateViewFields, "view_a|create_view_mod_a|utf8|utf8_general_ci"), + sqltypes.MakeTestResult(showCreateViewFields, "view_c|create_view_c|utf8|utf8_general_ci")}, + expViewsChanged: []string{"view_a", "view_b", "view_c"}, + expGetViewDefinitionsQuery: "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('view_b', 'view_c', 'view_a')", + expCreateStmtQuery: []string{"show create table view_a", "show create table view_c"}, + expClearQuery: "delete from _vt.views where table_schema = database() and table_name in ('view_b', 'view_c', 'view_a')", + expInsertQuery: []string{ + "insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'view_a', 'create_view_mod_a', 'def_mod_a')", + "insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'view_c', 'create_view_c', 'def_c')", + }, + }, + } // setting first test case result. - db.AddQuery(mysql.SelectAllViews, tcases[0].res) + db.AddQueryPattern("SELECT .* information_schema.innodb_tablespaces .*", tcases[0].showTablesWithSizesOutput) + db.AddQueryPattern(".*SELECT table_name, view_definition.*views.*", tcases[0].detectViewChangeOutput) + + db.AddQuery(tcases[0].expGetViewDefinitionsQuery, tcases[0].viewDefinitionsOutput) + for idx := range tcases[0].expCreateStmtQuery { + db.AddQuery(tcases[0].expCreateStmtQuery[idx], tcases[0].createStmtOutput[idx]) + } + for idx := range tcases[0].expInsertQuery { + db.AddQuery(tcases[0].expInsertQuery[idx], &sqltypes.Result{}) + } + db.AddQuery(tcases[0].expClearQuery, &sqltypes.Result{}) var tcCount atomic.Int32 ch := make(chan struct{}) @@ -381,9 +493,11 @@ func TestReloadView(t *testing.T) { hs.Stream(ctx, func(response *querypb.StreamHealthResponse) error { if response.RealtimeStats.ViewSchemaChanged != nil { sort.Strings(response.RealtimeStats.ViewSchemaChanged) - assert.Equal(t, tcases[tcCount.Load()].exp, response.RealtimeStats.ViewSchemaChanged) + assert.Equal(t, tcases[tcCount.Load()].expViewsChanged, response.RealtimeStats.ViewSchemaChanged) tcCount.Add(1) + db.AddQueryPattern(".*SELECT table_name, view_definition.*views.*", &sqltypes.Result{}) ch <- struct{}{} + require.NoError(t, db.LastError()) } return nil }) @@ -395,12 +509,21 @@ func TestReloadView(t *testing.T) { if tcCount.Load() == int32(len(tcases)) { return } - db.AddQuery(mysql.SelectAllViews, tcases[tcCount.Load()].res) - case <-time.After(1000 * time.Second): + idx := tcCount.Load() + db.AddQuery(tcases[idx].expGetViewDefinitionsQuery, tcases[idx].viewDefinitionsOutput) + for i := range tcases[idx].expCreateStmtQuery { + db.AddQuery(tcases[idx].expCreateStmtQuery[i], tcases[idx].createStmtOutput[i]) + } + for i := range tcases[idx].expInsertQuery { + db.AddQuery(tcases[idx].expInsertQuery[i], &sqltypes.Result{}) + } + db.AddQuery(tcases[idx].expClearQuery, &sqltypes.Result{}) + db.AddQueryPattern("SELECT .* information_schema.innodb_tablespaces .*", tcases[idx].showTablesWithSizesOutput) + db.AddQueryPattern(".*SELECT table_name, view_definition.*views.*", tcases[idx].detectViewChangeOutput) + case <-time.After(10 * time.Second): t.Fatalf("timed out") } } - } func testStream(hs *healthStreamer) (<-chan *querypb.StreamHealthResponse, context.CancelFunc) { diff --git a/go/vt/vttablet/tabletserver/livequeryz.go b/go/vt/vttablet/tabletserver/livequeryz.go index c3f28767c19..49aff0c163a 100644 --- a/go/vt/vttablet/tabletserver/livequeryz.go +++ b/go/vt/vttablet/tabletserver/livequeryz.go @@ -21,7 +21,8 @@ import ( "fmt" "net/http" "strconv" - "text/template" + + "github.com/google/safehtml/template" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" diff --git a/go/vt/vttablet/tabletserver/messager/engine.go b/go/vt/vttablet/tabletserver/messager/engine.go index 2d7fdf2bb82..4204c5c0b7e 100644 --- a/go/vt/vttablet/tabletserver/messager/engine.go +++ b/go/vt/vttablet/tabletserver/messager/engine.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -44,7 +45,7 @@ type TabletService interface { // VStreamer defines the functions of VStreamer // that the messager needs. type VStreamer interface { - Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error StreamResults(ctx context.Context, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error } @@ -83,7 +84,7 @@ func (me *Engine) Open() { log.Info("Messager: opening") // Unlock before invoking RegisterNotifier because it // obtains the same lock. - me.se.RegisterNotifier("messages", me.schemaChanged) + me.se.RegisterNotifier("messages", me.schemaChanged, true) } // Close closes the Engine service. @@ -137,10 +138,11 @@ func (me *Engine) Subscribe(ctx context.Context, name string, send func(*sqltype return mm.Subscribe(ctx, send), nil } -func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []string) { +func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table) { me.mu.Lock() defer me.mu.Unlock() - for _, name := range append(dropped, altered...) { + for _, table := range append(dropped, altered...) { + name := table.Name.String() mm := me.managers[name] if mm == nil { continue @@ -150,8 +152,8 @@ func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altere delete(me.managers, name) } - for _, name := range append(created, altered...) { - t := tables[name] + for _, t := range append(created, altered...) { + name := t.Name.String() if t.Type != schema.Message { continue } diff --git a/go/vt/vttablet/tabletserver/messager/engine_test.go b/go/vt/vttablet/tabletserver/messager/engine_test.go index 31d91b1c66e..e134a6fbe21 100644 --- a/go/vt/vttablet/tabletserver/messager/engine_test.go +++ b/go/vt/vttablet/tabletserver/messager/engine_test.go @@ -24,68 +24,76 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) -var meTable = &schema.Table{ - Type: schema.Message, - MessageInfo: newMMTable().MessageInfo, -} +var ( + meTableT1 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Type: schema.Message, + MessageInfo: newMMTable().MessageInfo, + } + meTableT2 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Type: schema.Message, + MessageInfo: newMMTable().MessageInfo, + } + meTableT3 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t3"), + Type: schema.Message, + MessageInfo: newMMTable().MessageInfo, + } + meTableT4 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t4"), + Type: schema.Message, + MessageInfo: newMMTable().MessageInfo, + } + + tableT2 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Type: schema.NoType, + } + tableT4 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t4"), + Type: schema.NoType, + } + tableT5 = &schema.Table{ + Name: sqlparser.NewIdentifierCS("t5"), + Type: schema.NoType, + } +) func TestEngineSchemaChanged(t *testing.T) { db := fakesqldb.New(t) defer db.Close() engine := newTestEngine(db) defer engine.Close() - tables := map[string]*schema.Table{ - "t1": meTable, - "t2": { - Type: schema.NoType, - }, - } - engine.schemaChanged(tables, []string{"t1", "t2"}, nil, nil) + + engine.schemaChanged(nil, []*schema.Table{meTableT1, tableT2}, nil, nil) got := extractManagerNames(engine.managers) want := map[string]bool{"t1": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } - tables = map[string]*schema.Table{ - "t1": meTable, - "t2": { - Type: schema.NoType, - }, - "t3": meTable, - } - engine.schemaChanged(tables, []string{"t3"}, nil, nil) + + engine.schemaChanged(nil, []*schema.Table{meTableT3}, nil, nil) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t3": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } - tables = map[string]*schema.Table{ - "t1": meTable, - "t2": { - Type: schema.NoType, - }, - "t4": meTable, - } - engine.schemaChanged(tables, []string{"t4"}, nil, []string{"t3", "t5"}) + + engine.schemaChanged(nil, []*schema.Table{meTableT4}, nil, []*schema.Table{meTableT3, tableT5}) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t4": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } // Test update - tables = map[string]*schema.Table{ - "t1": meTable, - "t2": meTable, - "t4": { - Type: schema.NoType, - }, - } - engine.schemaChanged(tables, nil, []string{"t2", "t4"}, nil) + engine.schemaChanged(nil, nil, []*schema.Table{meTableT2, tableT4}, nil) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t2": true} if !reflect.DeepEqual(got, want) { @@ -105,11 +113,7 @@ func TestSubscribe(t *testing.T) { db := fakesqldb.New(t) defer db.Close() engine := newTestEngine(db) - tables := map[string]*schema.Table{ - "t1": meTable, - "t2": meTable, - } - engine.schemaChanged(tables, []string{"t1", "t2"}, nil, nil) + engine.schemaChanged(nil, []*schema.Table{meTableT1, meTableT2}, nil, nil) f1, ch1 := newEngineReceiver() f2, ch2 := newEngineReceiver() // Each receiver is subscribed to different managers. @@ -142,9 +146,7 @@ func TestEngineGenerate(t *testing.T) { defer db.Close() engine := newTestEngine(db) defer engine.Close() - engine.schemaChanged(map[string]*schema.Table{ - "t1": meTable, - }, []string{"t1"}, nil, nil) + engine.schemaChanged(nil, []*schema.Table{meTableT1}, nil, nil) if _, err := engine.GetGenerator("t1"); err != nil { t.Error(err) diff --git a/go/vt/vttablet/tabletserver/messager/message_manager.go b/go/vt/vttablet/tabletserver/messager/message_manager.go index 39598169baa..0629b31629f 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager.go @@ -27,7 +27,8 @@ import ( "golang.org/x/sync/semaphore" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/timer" @@ -35,9 +36,9 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) var ( @@ -222,7 +223,7 @@ type messageManager struct { // where a replica could have received and processed a GTID that the primary // may not have yet commited; but this is harmless because any events missed // will be picked up during the next poller run. - lastPollPosition *mysql.Position + lastPollPosition *replication.Position // wg is for ensuring all running goroutines have returned // before we can close the manager. You need to Add before @@ -684,7 +685,7 @@ func (mm *messageManager) runOneVStream(ctx context.Context) error { var curPos string var fields []*querypb.Field - err := mm.vs.Stream(ctx, "current", nil, mm.vsFilter, func(events []*binlogdatapb.VEvent) error { + err := mm.vs.Stream(ctx, "current", nil, mm.vsFilter, throttlerapp.MessagerName, func(events []*binlogdatapb.VEvent) error { // We need to get the flow control lock mm.cacheManagementMu.Lock() defer mm.cacheManagementMu.Unlock() @@ -702,7 +703,7 @@ func (mm *messageManager) runOneVStream(ctx context.Context) error { if curPos == "" { return true, nil } - cur, err := mysql.DecodePosition(curPos) + cur, err := replication.DecodePosition(curPos) if err != nil { return false, err } @@ -904,28 +905,28 @@ func (mm *messageManager) GeneratePurgeQuery(timeCutoff int64) (string, map[stri func BuildMessageRow(row []sqltypes.Value) (*MessageRow, error) { mr := &MessageRow{Row: row[4:]} if !row[0].IsNull() { - v, err := evalengine.ToInt64(row[0]) + v, err := row[0].ToCastInt64() if err != nil { return nil, err } mr.Priority = v } if !row[1].IsNull() { - v, err := evalengine.ToInt64(row[1]) + v, err := row[1].ToCastInt64() if err != nil { return nil, err } mr.TimeNext = v } if !row[2].IsNull() { - v, err := evalengine.ToInt64(row[2]) + v, err := row[2].ToCastInt64() if err != nil { return nil, err } mr.Epoch = v } if !row[3].IsNull() { - v, err := evalengine.ToInt64(row[3]) + v, err := row[3].ToCastInt64() if err != nil { return nil, err } @@ -947,7 +948,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* qr.Fields = response.Fields } if response.Gtid != "" { - pos, err := mysql.DecodePosition(response.Gtid) + pos, err := replication.DecodePosition(response.Gtid) if err != nil { return err } @@ -964,13 +965,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* return qr, err } -func (mm *messageManager) getReceiverCount() int { - mm.mu.Lock() - defer mm.mu.Unlock() - return len(mm.receivers) -} - -func (mm *messageManager) getLastPollPosition() *mysql.Position { +func (mm *messageManager) getLastPollPosition() *replication.Position { mm.cacheManagementMu.Lock() defer mm.cacheManagementMu.Unlock() return mm.lastPollPosition diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index d6da59db065..b8ca47ae46d 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -34,9 +34,9 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -740,7 +740,7 @@ func TestMMGenerate(t *testing.T) { t.Errorf("GenerateAckQuery query: %s, want %s", query, wantQuery) } bvv, _ := sqltypes.BindVariableToValue(bv["time_acked"]) - gotAcked, _ := evalengine.ToInt64(bvv) + gotAcked, _ := bvv.ToCastInt64() wantAcked := time.Now().UnixNano() if wantAcked-gotAcked > 10e9 { t.Errorf("gotAcked: %d, should be with 10s of %d", gotAcked, wantAcked) @@ -888,7 +888,7 @@ func (fv *fakeVStreamer) setPollerResponse(pr []*binlogdatapb.VStreamResultsResp fv.pollerResponse = pr } -func (fv *fakeVStreamer) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { +func (fv *fakeVStreamer) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error { fv.streamInvocations.Add(1) for { fv.mu.Lock() diff --git a/go/vt/vttablet/tabletserver/planbuilder/builder.go b/go/vt/vttablet/tabletserver/planbuilder/builder.go index cfd0b53e5ca..3cae292b593 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/builder.go +++ b/go/vt/vttablet/tabletserver/planbuilder/builder.go @@ -19,12 +19,9 @@ package planbuilder import ( "strings" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -51,7 +48,7 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a sequence", sqlparser.ToString(sel.From)) } plan.PlanID = PlanNextval - v, err := evalengine.Translate(nextVal.Expr, semantics.EmptySemTable()) + v, err := evalengine.Translate(nextVal.Expr, nil) if err != nil { return nil, err } @@ -126,15 +123,21 @@ func analyzeInsert(ins *sqlparser.Insert, tables map[string]*schema.Table) (plan FullQuery: GenerateFullQuery(ins), } - tableName := sqlparser.GetTableName(ins.Table) - plan.Table = tables[tableName.String()] + tableName, err := ins.Table.TableName() + if err != nil { + return nil, err + } + plan.Table = tables[sqlparser.GetTableName(tableName).String()] return plan, nil } func analyzeShow(show *sqlparser.Show, dbName string) (plan *Plan, err error) { switch showInternal := show.Internal.(type) { case *sqlparser.ShowBasic: - if showInternal.Command == sqlparser.Table { + switch showInternal.Command { + case sqlparser.VitessMigrations: + return &Plan{PlanID: PlanShowMigrations, FullStmt: show}, nil + case sqlparser.Table: // rewrite WHERE clause if it exists // `where Tables_in_Keyspace` => `where Tables_in_DbName` if showInternal.Filter != nil { @@ -205,13 +208,7 @@ func lookupSingleTable(tableExpr sqlparser.TableExpr, tables map[string]*schema. return tables[tableName.String()] } -func analyzeDDL(stmt sqlparser.DDLStatement, viewsEnabled bool) (*Plan, error) { - switch stmt.(type) { - case *sqlparser.AlterView, *sqlparser.DropView, *sqlparser.CreateView: - if viewsEnabled { - return analyzeViewsDDL(stmt) - } - } +func analyzeDDL(stmt sqlparser.DDLStatement) (*Plan, error) { // DDLs and some other statements below don't get fully parsed. // We have to use the original query at the time of execution. // We are in the process of changing this @@ -222,31 +219,3 @@ func analyzeDDL(stmt sqlparser.DDLStatement, viewsEnabled bool) (*Plan, error) { } return &Plan{PlanID: PlanDDL, FullQuery: fullQuery, FullStmt: stmt, NeedsReservedConn: stmt.IsTemporary()}, nil } - -func analyzeViewsDDL(stmt sqlparser.DDLStatement) (*Plan, error) { - switch viewDDL := stmt.(type) { - case *sqlparser.CreateView: - query := mysql.InsertIntoViewsTable - if viewDDL.IsReplace { - query = mysql.ReplaceIntoViewsTable - } - insert, err := sqlparser.Parse(query) - if err != nil { - return nil, err - } - return &Plan{PlanID: PlanViewDDL, FullQuery: GenerateFullQuery(insert), FullStmt: viewDDL}, nil - case *sqlparser.AlterView: - update, err := sqlparser.Parse(mysql.UpdateViewsTable) - if err != nil { - return nil, err - } - return &Plan{PlanID: PlanViewDDL, FullQuery: GenerateFullQuery(update), FullStmt: viewDDL}, nil - case *sqlparser.DropView: - del, err := sqlparser.Parse(mysql.DeleteFromViewsTable) - if err != nil { - return nil, err - } - return &Plan{PlanID: PlanViewDDL, FullQuery: GenerateFullQuery(del), FullStmt: viewDDL}, nil - } - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unknown view DDL type: %T", stmt) -} diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go index d68ff43a152..80b1410f438 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go @@ -39,7 +39,7 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission { case *sqlparser.Union, *sqlparser.Select: permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) case *sqlparser.Insert: - permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, permissions) + permissions = buildTableExprPermissions(node.Table, tableacl.WRITER, permissions) permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) case *sqlparser.Update: permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions) diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index 603ea455ac7..aca212d10cd 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -76,10 +76,10 @@ const ( PlanCallProc PlanAlterMigration PlanRevertMigration + PlanShowMigrations PlanShowMigrationLogs PlanShowThrottledApps PlanShowThrottlerStatus - PlanViewDDL NumPlans ) @@ -112,10 +112,10 @@ var planName = []string{ "CallProcedure", "AlterMigration", "RevertMigration", + "ShowMigrations", "ShowMigrationLogs", "ShowThrottledApps", "ShowThrottlerStatus", - "ViewDDL", } func (pt PlanType) String() string { @@ -220,7 +220,7 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbNam case *sqlparser.Set: plan, err = analyzeSet(stmt), nil case sqlparser.DDLStatement: - plan, err = analyzeDDL(stmt, viewsEnabled) + plan, err = analyzeDDL(stmt) case *sqlparser.AlterMigration: plan, err = &Plan{PlanID: PlanAlterMigration, FullStmt: stmt}, nil case *sqlparser.RevertMigration: diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index 8391c29efc3..2381d3a07fa 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -20,19 +20,16 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" - "strings" "sync" "sync/atomic" "time" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/acl" - "vitess.io/vitess/go/cache" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/streamlog" @@ -41,9 +38,13 @@ import ( "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" tacl "vitess.io/vitess/go/vt/tableacl/acl" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vthash" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" @@ -122,6 +123,17 @@ func isValid(planType planbuilder.PlanType, hasReservedCon bool, hasSysSettings // _______________________________________________ +type PlanCacheKey = theine.StringKey +type PlanCache = theine.Store[PlanCacheKey, *TabletPlan] + +type SettingsCacheKey = theine.HashKey256 +type SettingsCache = theine.Store[SettingsCacheKey, *pools.Setting] + +type currentSchema struct { + tables map[string]*schema.Table + epoch uint32 +} + // QueryEngine implements the core functionality of tabletserver. // It assumes that no requests will be sent to it before Open is // called and succeeds. @@ -130,14 +142,17 @@ func isValid(planType planbuilder.PlanType, hasReservedCon bool, hasSysSettings // Close: There should be no more pending queries when this // function is called. type QueryEngine struct { - isOpen bool + isOpen atomic.Bool env tabletenv.Env se *schema.Engine // mu protects the following fields. - mu sync.RWMutex - tables map[string]*schema.Table - plans cache.Cache + schemaMu sync.Mutex + epoch uint32 + schema atomic.Pointer[currentSchema] + + plans *PlanCache + settings *SettingsCache queryRuleSources *rules.Map // Pools @@ -145,7 +160,7 @@ type QueryEngine struct { streamConns *connpool.Pool // Services - consolidator *sync2.Consolidator + consolidator sync2.Consolidator streamConsolidator *StreamConsolidator // txSerializer protects vttablet from applications which try to concurrently // UPDATE (or DELETE) a "hot" row (or range of rows). @@ -172,7 +187,10 @@ type QueryEngine struct { // stats // Note: queryErrorCountsWithCode is similar to queryErrorCounts except it contains error code as an additional dimension - queryCounts, queryTimes, queryErrorCounts, queryErrorCountsWithCode, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels + queryCounts, queryCountsWithTabletType, queryTimes, queryErrorCounts, queryErrorCountsWithCode, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels + + // stats flags + enablePerWorkloadTableMetrics bool // Loggers accessCheckerLogger *logutil.ThrottledLogger @@ -183,20 +201,29 @@ type QueryEngine struct { // You must call this only once. func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { config := env.Config() - cacheCfg := &cache.Config{ - MaxEntries: int64(config.QueryCacheSize), - MaxMemoryUsage: config.QueryCacheMemory, - LFU: config.QueryCacheLFU, - } qe := &QueryEngine{ - env: env, - se: se, - tables: make(map[string]*schema.Table), - plans: cache.NewDefaultCacheImpl(cacheCfg), - queryRuleSources: rules.NewMap(), + env: env, + se: se, + queryRuleSources: rules.NewMap(), + enablePerWorkloadTableMetrics: config.EnablePerWorkloadTableMetrics, } + // Cache for query plans: user configured size with a doorkeeper by default to prevent one-off queries + // from thrashing the cache. + qe.plans = theine.NewStore[PlanCacheKey, *TabletPlan](config.QueryCacheMemory, config.QueryCacheDoorkeeper) + + // cache for connection settings: default to 1/4th of the size for the query cache and do + // not use a doorkeeper because custom connection settings are rarely one-off and we always + // want to cache them + var settingsCacheMemory = config.QueryCacheMemory / 4 + qe.settings = theine.NewStore[SettingsCacheKey, *pools.Setting](settingsCacheMemory, false) + + qe.schema.Store(¤tSchema{ + tables: make(map[string]*schema.Table), + epoch: 0, + }) + qe.conns = connpool.NewPool(env, "ConnPool", config.OltpReadPool) qe.streamConns = connpool.NewPool(env, "StreamConnPool", config.OlapReadPool) qe.consolidatorMode.Store(config.Consolidator) @@ -244,14 +271,27 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { env.Exporter().NewGaugeFunc("QueryCacheLength", "Query engine query cache length", func() int64 { return int64(qe.plans.Len()) }) - env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", qe.plans.UsedCapacity) - env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", qe.plans.MaxCapacity) - env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", qe.plans.Evictions) - qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", []string{"Table", "Plan"}) - qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", []string{"Table", "Plan"}) - qe.queryRowsAffected = env.Exporter().NewCountersWithMultiLabels("QueryRowsAffected", "query rows affected", []string{"Table", "Plan"}) - qe.queryRowsReturned = env.Exporter().NewCountersWithMultiLabels("QueryRowsReturned", "query rows returned", []string{"Table", "Plan"}) - qe.queryErrorCounts = env.Exporter().NewCountersWithMultiLabels("QueryErrorCounts", "query error counts", []string{"Table", "Plan"}) + env.Exporter().NewGaugeFunc("QueryCacheSize", "Query engine query cache size", func() int64 { + return int64(qe.plans.UsedCapacity()) + }) + env.Exporter().NewGaugeFunc("QueryCacheCapacity", "Query engine query cache capacity", func() int64 { + return int64(qe.plans.MaxCapacity()) + }) + env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", func() int64 { + return qe.plans.Metrics.Evicted() + }) + + labels := []string{"Table", "Plan"} + if config.EnablePerWorkloadTableMetrics { + labels = []string{"Table", "Plan", "Workload"} + } + + qe.queryCounts = env.Exporter().NewCountersWithMultiLabels("QueryCounts", "query counts", labels) + qe.queryCountsWithTabletType = env.Exporter().NewCountersWithMultiLabels("QueryCountsWithTabletType", "query counts with tablet type labels", []string{"Table", "Plan", "TabletType"}) + qe.queryTimes = env.Exporter().NewCountersWithMultiLabels("QueryTimesNs", "query times in ns", labels) + qe.queryRowsAffected = env.Exporter().NewCountersWithMultiLabels("QueryRowsAffected", "query rows affected", labels) + qe.queryRowsReturned = env.Exporter().NewCountersWithMultiLabels("QueryRowsReturned", "query rows returned", labels) + qe.queryErrorCounts = env.Exporter().NewCountersWithMultiLabels("QueryErrorCounts", "query error counts", labels) qe.queryErrorCountsWithCode = env.Exporter().NewCountersWithMultiLabels("QueryErrorCountsWithCode", "query error counts with error code", []string{"Table", "Plan", "Code"}) env.Exporter().HandleFunc("/debug/hotrows", qe.txSerializer.ServeHTTP) @@ -266,12 +306,14 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { // Open must be called before sending requests to QueryEngine. func (qe *QueryEngine) Open() error { - if qe.isOpen { + if qe.isOpen.Load() { return nil } log.Info("Query Engine: opening") - qe.conns.Open(qe.env.Config().DB.AppWithDB(), qe.env.Config().DB.DbaWithDB(), qe.env.Config().DB.AppDebugWithDB()) + config := qe.env.Config() + + qe.conns.Open(config.DB.AppWithDB(), config.DB.DbaWithDB(), config.DB.AppDebugWithDB()) conn, err := qe.conns.Get(tabletenv.LocalContext(), nil) if err != nil { @@ -288,9 +330,11 @@ func (qe *QueryEngine) Open() error { return err } - qe.streamConns.Open(qe.env.Config().DB.AppWithDB(), qe.env.Config().DB.DbaWithDB(), qe.env.Config().DB.AppDebugWithDB()) - qe.se.RegisterNotifier("qe", qe.schemaChanged) - qe.isOpen = true + qe.streamConns.Open(config.DB.AppWithDB(), config.DB.DbaWithDB(), config.DB.AppDebugWithDB()) + qe.se.RegisterNotifier("qe", qe.schemaChanged, true) + qe.plans.EnsureOpen() + qe.settings.EnsureOpen() + qe.isOpen.Store(true) return nil } @@ -298,63 +342,69 @@ func (qe *QueryEngine) Open() error { // You must ensure that no more queries will be sent // before calling Close. func (qe *QueryEngine) Close() { - if !qe.isOpen { + if !qe.isOpen.Swap(false) { return } // Close in reverse order of Open. qe.se.UnregisterNotifier("qe") - qe.plans.Clear() - qe.tables = make(map[string]*schema.Table) + + qe.plans.Close() + qe.settings.Close() + qe.streamConns.Close() qe.conns.Close() - qe.isOpen = false log.Info("Query Engine: closed") } -// GetPlan returns the TabletPlan that for the query. Plans are cached in a cache.LRUCache. -func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { - span, _ := trace.NewSpan(ctx, "QueryEngine.GetPlan") - defer span.Finish() - if !skipQueryPlanCache { - if plan := qe.getQuery(sql); plan != nil { - logStats.CachedPlan = true - return plan, nil - } - } - // Obtain read lock to prevent schema from changing while - // we build a plan. The read lock allows multiple identical - // queries to build the same plan. One of them will win by - // updating the query cache and prevent future races. Due to - // this, query stats reporting may not be accurate, but it's - // acceptable because those numbers are best effort. - qe.mu.RLock() - defer qe.mu.RUnlock() +var errNoCache = errors.New("plan should not be cached") + +func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { statement, err := sqlparser.Parse(sql) if err != nil { return nil, err } - splan, err := planbuilder.Build(statement, qe.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) + splan, err := planbuilder.Build(statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) if err != nil { return nil, err } plan := &TabletPlan{Plan: splan, Original: sql} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableNames()...) plan.buildAuthorized() - if plan.PlanID == planbuilder.PlanDDL || plan.PlanID == planbuilder.PlanSet { - return plan, nil - } - if !skipQueryPlanCache && !sqlparser.SkipQueryPlanCacheDirective(statement) { - qe.plans.Set(sql, plan) + if plan.PlanID == planbuilder.PlanDDL || plan.PlanID == planbuilder.PlanSet || sqlparser.SkipQueryPlanCacheDirective(statement) { + return plan, errNoCache } + return plan, nil } +// GetPlan returns the TabletPlan that for the query. Plans are cached in a cache.LRUCache. +func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { + span, _ := trace.NewSpan(ctx, "QueryEngine.GetPlan") + defer span.Finish() + + var plan *TabletPlan + var err error + + curSchema := qe.schema.Load() + + if skipQueryPlanCache { + plan, err = qe.getPlan(curSchema, sql) + } else { + plan, logStats.CachedPlan, err = qe.plans.GetOrLoad(PlanCacheKey(sql), curSchema.epoch, func() (*TabletPlan, error) { + return qe.getPlan(curSchema, sql) + }) + } + + if errors.Is(err, errNoCache) { + err = nil + } + return plan, err +} + // GetStreamPlan is similar to GetPlan, but doesn't use the cache // and doesn't enforce a limit. It just returns the parsed query. func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { - qe.mu.RLock() - defer qe.mu.RUnlock() - splan, err := planbuilder.BuildStreaming(sql, qe.tables) + splan, err := planbuilder.BuildStreaming(sql, qe.schema.Load().tables) if err != nil { return nil, err } @@ -366,9 +416,7 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { // GetMessageStreamPlan builds a plan for Message streaming. func (qe *QueryEngine) GetMessageStreamPlan(name string) (*TabletPlan, error) { - qe.mu.RLock() - defer qe.mu.RUnlock() - splan, err := planbuilder.BuildMessageStreaming(name, qe.tables) + splan, err := planbuilder.BuildMessageStreaming(name, qe.schema.Load().tables) if err != nil { return nil, err } @@ -383,33 +431,44 @@ func (qe *QueryEngine) GetConnSetting(ctx context.Context, settings []string) (* span, _ := trace.NewSpan(ctx, "QueryEngine.GetConnSetting") defer span.Finish() - var keyBuilder strings.Builder + hasher := vthash.New256() for _, q := range settings { - keyBuilder.WriteString(q) + _, _ = hasher.WriteString(q) } - // try to get the connSetting from the cache - cacheKey := keyBuilder.String() - if plan := qe.getConnSetting(cacheKey); plan != nil { - return plan, nil - } + var cacheKey SettingsCacheKey + hasher.Sum(cacheKey[:0]) - // build the setting queries - query, resetQuery, err := planbuilder.BuildSettingQuery(settings) - if err != nil { - return nil, err - } - connSetting := pools.NewSetting(query, resetQuery) - - // store the connSetting in the cache - qe.plans.Set(cacheKey, connSetting) - - return connSetting, nil + connSetting, _, err := qe.settings.GetOrLoad(cacheKey, 0, func() (*pools.Setting, error) { + // build the setting queries + query, resetQuery, err := planbuilder.BuildSettingQuery(settings) + if err != nil { + return nil, err + } + return pools.NewSetting(query, resetQuery), nil + }) + return connSetting, err } // ClearQueryPlanCache should be called if query plan cache is potentially obsolete func (qe *QueryEngine) ClearQueryPlanCache() { - qe.plans.Clear() + qe.schemaMu.Lock() + defer qe.schemaMu.Unlock() + + qe.epoch++ + + current := qe.schema.Load() + qe.schema.Store(¤tSchema{ + tables: current.tables, + epoch: qe.epoch, + }) +} + +func (qe *QueryEngine) ForEachPlan(each func(plan *TabletPlan) bool) { + curSchema := qe.schema.Load() + qe.plans.Range(curSchema.epoch, func(_ PlanCacheKey, plan *TabletPlan) bool { + return each(plan) + }) } // IsMySQLReachable returns an error if it cannot connect to MySQL. @@ -417,7 +476,7 @@ func (qe *QueryEngine) ClearQueryPlanCache() { func (qe *QueryEngine) IsMySQLReachable() error { conn, err := dbconnpool.NewDBConnection(context.TODO(), qe.env.Config().DB.AppWithDB()) if err != nil { - if mysql.IsTooManyConnectionsErr(err) { + if sqlerror.IsTooManyConnectionsErr(err) { return nil } return err @@ -426,66 +485,48 @@ func (qe *QueryEngine) IsMySQLReachable() error { return nil } -func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []string) { - qe.mu.Lock() - defer qe.mu.Unlock() - qe.tables = tables - if len(altered) != 0 || len(dropped) != 0 { - qe.plans.Clear() - } -} - -// getQuery fetches the plan and makes it the most recent. -func (qe *QueryEngine) getQuery(sql string) *TabletPlan { - cacheResult, ok := qe.plans.Get(sql) - if !ok { - return nil - } - plan, ok := cacheResult.(*TabletPlan) - if ok { - return plan - } - return nil -} +func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table) { + qe.schemaMu.Lock() + defer qe.schemaMu.Unlock() -func (qe *QueryEngine) getConnSetting(key string) *pools.Setting { - cacheResult, ok := qe.plans.Get(key) - if !ok { - return nil - } - plan, ok := cacheResult.(*pools.Setting) - if ok { - return plan + if len(altered) != 0 || len(dropped) != 0 { + qe.epoch++ } - return nil -} -// SetQueryPlanCacheCap sets the query plan cache capacity. -func (qe *QueryEngine) SetQueryPlanCacheCap(size int) { - if size <= 0 { - size = 1 - } - qe.plans.SetCapacity(int64(size)) + qe.schema.Store(¤tSchema{ + tables: tables, + epoch: qe.epoch, + }) } // QueryPlanCacheCap returns the capacity of the query cache. func (qe *QueryEngine) QueryPlanCacheCap() int { - return int(qe.plans.MaxCapacity()) + return qe.plans.MaxCapacity() } // QueryPlanCacheLen returns the length (size in entries) of the query cache -func (qe *QueryEngine) QueryPlanCacheLen() int { - qe.plans.Wait() - return qe.plans.Len() +func (qe *QueryEngine) QueryPlanCacheLen() (count int) { + qe.ForEachPlan(func(plan *TabletPlan) bool { + count++ + return true + }) + return } // AddStats adds the given stats for the planName.tableName -func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName string, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64, errorCode string) { +func (qe *QueryEngine) AddStats(planType planbuilder.PlanType, tableName, workload string, tabletType topodata.TabletType, queryCount int64, duration, mysqlTime time.Duration, rowsAffected, rowsReturned, errorCount int64, errorCode string) { // table names can contain "." characters, replace them! keys := []string{tableName, planType.String()} + // Only use the workload as a label if that's enabled in the configuration. + if qe.enablePerWorkloadTableMetrics { + keys = append(keys, workload) + } qe.queryCounts.Add(keys, queryCount) qe.queryTimes.Add(keys, int64(duration)) qe.queryErrorCounts.Add(keys, errorCount) + + qe.queryCountsWithTabletType.Add([]string{tableName, planType.String(), tabletType.String()}, queryCount) + // queryErrorCountsWithCode is similar to queryErrorCounts except we have an additional dimension // of error code. if errorCount > 0 { @@ -529,8 +570,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques } response.Header().Set("Content-Type", "text/plain") - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) + qe.ForEachPlan(func(plan *TabletPlan) bool { response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) @@ -549,9 +589,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques } response.Header().Set("Content-Type", "application/json; charset=utf-8") var qstats []perQueryStats - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) - + qe.ForEachPlan(func(plan *TabletPlan) bool { var pqstats perQueryStats pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) pqstats.Table = plan.TableName().String() @@ -607,10 +645,6 @@ func (qe *QueryEngine) handleHTTPAclJSON(response http.ResponseWriter, request * // ServeHTTP lists the most recent, cached queries and their count. func (qe *QueryEngine) handleHTTPConsolidations(response http.ResponseWriter, request *http.Request) { - if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { - acl.SendError(response, err) - return - } if err := acl.CheckAccessHTTP(request, acl.DEBUGGING); err != nil { acl.SendError(response, err) return diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 3b399e9eb9c..73ac1ca5e37 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -18,7 +18,6 @@ package tabletserver import ( "context" - "expvar" "fmt" "math/rand" "net/http" @@ -32,6 +31,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/mysql" @@ -39,7 +41,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/cache" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" @@ -146,7 +147,7 @@ func TestGetMessageStreamPlan(t *testing.T) { } wantPlan := &planbuilder.Plan{ PlanID: planbuilder.PlanMessageStream, - Table: qe.tables["msg"], + Table: qe.schema.Load().tables["msg"], Permissions: []planbuilder.Permission{{ TableName: "msg", Role: tableacl.WRITER, @@ -162,12 +163,8 @@ func TestGetMessageStreamPlan(t *testing.T) { func assertPlanCacheSize(t *testing.T, qe *QueryEngine, expected int) { t.Helper() - var size int - qe.plans.Wait() - qe.plans.ForEach(func(_ any) bool { - size++ - return true - }) + time.Sleep(100 * time.Millisecond) + size := qe.plans.Len() require.Equal(t, expected, size, "expected query plan cache to contain %d entries, found %d", expected, size) } @@ -177,7 +174,6 @@ func TestQueryPlanCache(t *testing.T) { schematest.AddDefaultQueries(db) firstQuery := "select * from test_table_01" - secondQuery := "select * from test_table_02" db.AddQuery("select * from test_table_01 where 1 != 1", &sqltypes.Result{}) db.AddQuery("select * from test_table_02 where 1 != 1", &sqltypes.Result{}) @@ -188,23 +184,11 @@ func TestQueryPlanCache(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - if cache.DefaultConfig.LFU { - // this cache capacity is in bytes - qe.SetQueryPlanCacheCap(528) - } else { - // this cache capacity is in number of elements - qe.SetQueryPlanCacheCap(1) - } + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) require.NoError(t, err) require.NotNil(t, firstPlan, "plan should not be nil") - secondPlan, err := qe.GetPlan(ctx, logStats, secondQuery, false) - fmt.Println(secondPlan.CachedSize(true)) - require.NoError(t, err) - require.NotNil(t, secondPlan, "plan should not be nil") - expvar.Do(func(kv expvar.KeyValue) { - _ = kv.Value.String() - }) + assertPlanCacheSize(t, qe, 1) qe.ClearQueryPlanCache() } @@ -225,7 +209,7 @@ func TestNoQueryPlanCache(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1024) + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, true) if err != nil { t.Fatal(err) @@ -254,7 +238,7 @@ func TestNoQueryPlanCacheDirective(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") - qe.SetQueryPlanCacheCap(1024) + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) if err != nil { t.Fatal(err) @@ -297,12 +281,14 @@ func TestStatsURL(t *testing.T) { func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfigs.DBConfigs) *QueryEngine { config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout) - config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout) - config.TxPool.IdleTimeoutSeconds.Set(idleTimeout) + _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) env := tabletenv.NewEnv(config, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) + // the integration tests that check cache behavior do not expect a doorkeeper; disable it + qe.plans = theine.NewStore[PlanCacheKey, *TabletPlan](4*1024*1024, false) se.InitDBConfig(dbcfgs.DbaWithDB()) return qe } @@ -391,13 +377,12 @@ func BenchmarkPlanCacheThroughput(b *testing.B) { } } -func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, lfu bool, par int) { +func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { b.Helper() dbcfgs := newDBConfigs(db) config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - config.QueryCacheLFU = lfu env := tabletenv.NewEnv(config, "TabletServerTest") se := schema.NewEngine(env) @@ -430,12 +415,8 @@ func BenchmarkPlanCacheContention(b *testing.B) { db.AddQueryPattern(".*", &sqltypes.Result{}) for par := 1; par <= 8; par *= 2 { - b.Run(fmt.Sprintf("ContentionLRU-%d", par), func(b *testing.B) { - benchmarkPlanCache(b, db, false, par) - }) - b.Run(fmt.Sprintf("ContentionLFU-%d", par), func(b *testing.B) { - benchmarkPlanCache(b, db, true, par) + benchmarkPlanCache(b, db, par) }) } } @@ -481,16 +462,9 @@ func TestPlanCachePollution(t *testing.T) { var wg sync.WaitGroup go func() { - cacheMode := "lru" - if config.QueryCacheLFU { - cacheMode = "lfu" - } + cacheMode := "lfu" - out, err := os.Create(path.Join(plotPath, - fmt.Sprintf("cache_plot_%d_%d_%s.dat", - config.QueryCacheSize, config.QueryCacheMemory, cacheMode, - )), - ) + out, err := os.Create(path.Join(plotPath, fmt.Sprintf("cache_plot_%d_%s.dat", config.QueryCacheMemory, cacheMode))) require.NoError(t, err) defer out.Close() @@ -577,6 +551,7 @@ func TestAddQueryStats(t *testing.T) { name string planType planbuilder.PlanType tableName string + tabletType topodata.TabletType queryCount int64 duration time.Duration mysqlTime time.Duration @@ -584,7 +559,10 @@ func TestAddQueryStats(t *testing.T) { rowsReturned int64 errorCount int64 errorCode string + enablePerWorkloadTableMetrics bool + workload string expectedQueryCounts string + expectedQueryCountsWithTableType string expectedQueryTimes string expectedQueryRowsAffected string expectedQueryRowsReturned string @@ -595,66 +573,182 @@ func TestAddQueryStats(t *testing.T) { name: "select query", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 0, rowsReturned: 15, errorCount: 0, errorCode: "OK", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", expectedQueryCounts: `{"A.Select": 1}`, expectedQueryTimes: `{"A.Select": 10}`, expectedQueryRowsAffected: `{}`, expectedQueryRowsReturned: `{"A.Select": 15}`, expectedQueryErrorCounts: `{"A.Select": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, + }, { + name: "select query against a replica", + planType: planbuilder.PlanSelect, + tableName: "A", + tabletType: topodata.TabletType_REPLICA, + queryCount: 1, + duration: 10, + rowsAffected: 0, + rowsReturned: 15, + errorCount: 0, + errorCode: "OK", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", + expectedQueryCounts: `{"A.Select": 1}`, + expectedQueryTimes: `{"A.Select": 10}`, + expectedQueryRowsAffected: `{}`, + expectedQueryRowsReturned: `{"A.Select": 15}`, + expectedQueryErrorCounts: `{"A.Select": 0}`, + expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.REPLICA": 1}`, }, { name: "select into query", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, rowsReturned: 0, errorCount: 0, errorCode: "OK", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", expectedQueryCounts: `{"A.Select": 1}`, expectedQueryTimes: `{"A.Select": 10}`, expectedQueryRowsAffected: `{"A.Select": 15}`, expectedQueryRowsReturned: `{"A.Select": 0}`, expectedQueryErrorCounts: `{"A.Select": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "error", planType: planbuilder.PlanSelect, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 0, rowsReturned: 0, errorCount: 1, errorCode: "RESOURCE_EXHAUSTED", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", expectedQueryCounts: `{"A.Select": 1}`, expectedQueryTimes: `{"A.Select": 10}`, expectedQueryRowsAffected: `{}`, expectedQueryRowsReturned: `{"A.Select": 0}`, expectedQueryErrorCounts: `{"A.Select": 1}`, expectedQueryErrorCountsWithCode: `{"A.Select.RESOURCE_EXHAUSTED": 1}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, }, { name: "insert query", planType: planbuilder.PlanInsert, tableName: "A", + tabletType: topodata.TabletType_PRIMARY, queryCount: 1, duration: 10, rowsAffected: 15, rowsReturned: 0, errorCount: 0, errorCode: "OK", + enablePerWorkloadTableMetrics: false, + workload: "some-workload", expectedQueryCounts: `{"A.Insert": 1}`, expectedQueryTimes: `{"A.Insert": 10}`, expectedQueryRowsAffected: `{"A.Insert": 15}`, expectedQueryRowsReturned: `{}`, expectedQueryErrorCounts: `{"A.Insert": 0}`, expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Insert.PRIMARY": 1}`, + }, { + name: "select query with per workload metrics", + planType: planbuilder.PlanSelect, + tableName: "A", + tabletType: topodata.TabletType_PRIMARY, + queryCount: 1, + duration: 10, + rowsAffected: 0, + rowsReturned: 15, + errorCount: 0, + errorCode: "OK", + enablePerWorkloadTableMetrics: true, + workload: "some-workload", + expectedQueryCounts: `{"A.Select.some-workload": 1}`, + expectedQueryTimes: `{"A.Select.some-workload": 10}`, + expectedQueryRowsAffected: `{}`, + expectedQueryRowsReturned: `{"A.Select.some-workload": 15}`, + expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`, + expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, + }, { + name: "select into query with per workload metrics", + planType: planbuilder.PlanSelect, + tableName: "A", + tabletType: topodata.TabletType_PRIMARY, + queryCount: 1, + duration: 10, + rowsAffected: 15, + rowsReturned: 0, + errorCount: 0, + errorCode: "OK", + enablePerWorkloadTableMetrics: true, + workload: "some-workload", + expectedQueryCounts: `{"A.Select.some-workload": 1}`, + expectedQueryTimes: `{"A.Select.some-workload": 10}`, + expectedQueryRowsAffected: `{"A.Select.some-workload": 15}`, + expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`, + expectedQueryErrorCounts: `{"A.Select.some-workload": 0}`, + expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, + }, { + name: "error with per workload metrics", + planType: planbuilder.PlanSelect, + tableName: "A", + tabletType: topodata.TabletType_PRIMARY, + queryCount: 1, + duration: 10, + rowsAffected: 0, + rowsReturned: 0, + errorCount: 1, + errorCode: "RESOURCE_EXHAUSTED", + enablePerWorkloadTableMetrics: true, + workload: "some-workload", + expectedQueryCounts: `{"A.Select.some-workload": 1}`, + expectedQueryTimes: `{"A.Select.some-workload": 10}`, + expectedQueryRowsAffected: `{}`, + expectedQueryRowsReturned: `{"A.Select.some-workload": 0}`, + expectedQueryErrorCounts: `{"A.Select.some-workload": 1}`, + expectedQueryErrorCountsWithCode: `{"A.Select.RESOURCE_EXHAUSTED": 1}`, + expectedQueryCountsWithTableType: `{"A.Select.PRIMARY": 1}`, + }, { + name: "insert query with per workload metrics", + planType: planbuilder.PlanInsert, + tableName: "A", + tabletType: topodata.TabletType_PRIMARY, + queryCount: 1, + duration: 10, + rowsAffected: 15, + rowsReturned: 0, + errorCount: 0, + errorCode: "OK", + enablePerWorkloadTableMetrics: true, + workload: "some-workload", + expectedQueryCounts: `{"A.Insert.some-workload": 1}`, + expectedQueryTimes: `{"A.Insert.some-workload": 10}`, + expectedQueryRowsAffected: `{"A.Insert.some-workload": 15}`, + expectedQueryRowsReturned: `{}`, + expectedQueryErrorCounts: `{"A.Insert.some-workload": 0}`, + expectedQueryErrorCountsWithCode: `{}`, + expectedQueryCountsWithTableType: `{"A.Insert.PRIMARY": 1}`, }, } @@ -663,11 +757,13 @@ func TestAddQueryStats(t *testing.T) { t.Run(testcase.name, func(t *testing.T) { config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(fakesqldb.New(t)) + config.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) - qe.AddStats(testcase.planType, testcase.tableName, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) + qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.tabletType, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) assert.Equal(t, testcase.expectedQueryCounts, qe.queryCounts.String()) + assert.Equal(t, testcase.expectedQueryCountsWithTableType, qe.queryCountsWithTabletType.String()) assert.Equal(t, testcase.expectedQueryTimes, qe.queryTimes.String()) assert.Equal(t, testcase.expectedQueryRowsAffected, qe.queryRowsAffected.String()) assert.Equal(t, testcase.expectedQueryRowsReturned, qe.queryRowsReturned.String()) diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 53ee3c9769b..c3a9f719ff5 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -24,10 +24,11 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" @@ -42,6 +43,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" p "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" + eschema "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -66,7 +68,6 @@ type QueryExecutor struct { const ( streamRowsSize = 256 - maxQueryBufferDuration = 15 * time.Second queryTimeoutMysqlMaxWait = time.Second ) @@ -82,6 +83,7 @@ var ( Type: sqltypes.Int64, }, } + errTxThrottled = vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "Transaction throttled") ) func returnStreamResult(result *sqltypes.Result) error { @@ -121,6 +123,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { defer func(start time.Time) { duration := time.Since(start) qre.tsv.stats.QueryTimings.Add(planName, duration) + qre.tsv.stats.QueryTimingsByTabletType.Add(qre.tabletType.String(), duration) qre.recordUserQuery("Execute", int64(duration)) mysqlTime := qre.logStats.MysqlResponseTime @@ -132,12 +135,14 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { var errCode string vtErrorCode := vterrors.Code(err) errCode = vtErrorCode.String() + if reply == nil { - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, 1, duration, mysqlTime, 0, 0, 1, errCode) + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, 0, 0, 1, errCode) qre.plan.AddStats(1, duration, mysqlTime, 0, 0, 1) return } - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) + + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) qre.plan.AddStats(1, duration, mysqlTime, reply.RowsAffected, uint64(len(reply.Rows)), 0) qre.logStats.RowsAffected = int(reply.RowsAffected) qre.logStats.Rows = reply.Rows @@ -187,13 +192,6 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execOther() case p.PlanInsert, p.PlanUpdate, p.PlanDelete, p.PlanInsertMessage, p.PlanDDL, p.PlanLoad: return qre.execAutocommit(qre.txConnExec) - case p.PlanViewDDL: - switch qre.plan.FullStmt.(type) { - case *sqlparser.DropView: - return qre.execAutocommit(qre.execDropViewDDL) - default: - return qre.execAsTransaction(qre.execViewDDL) - } case p.PlanUpdateLimit, p.PlanDeleteLimit: return qre.execAsTransaction(qre.txConnExec) case p.PlanCallProc: @@ -202,6 +200,8 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execAlterMigration() case p.PlanRevertMigration: return qre.execRevertMigration() + case p.PlanShowMigrations: + return qre.execShowMigrations() case p.PlanShowMigrationLogs: return qre.execShowMigrationLogs() case p.PlanShowThrottledApps: @@ -222,10 +222,14 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt if qre.options == nil { qre.options = &querypb.ExecuteOptions{} } else { - qre.options = proto.Clone(qre.options).(*querypb.ExecuteOptions) + qre.options = qre.options.CloneVT() } qre.options.TransactionIsolation = querypb.ExecuteOptions_AUTOCOMMIT + if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) { + return nil, errTxThrottled + } + conn, _, _, err := qre.tsv.te.txPool.Begin(qre.ctx, qre.options, false, 0, nil, qre.setting) if err != nil { @@ -237,6 +241,9 @@ func (qre *QueryExecutor) execAutocommit(f func(conn *StatefulConnection) (*sqlt } func (qre *QueryExecutor) execAsTransaction(f func(conn *StatefulConnection) (*sqltypes.Result, error)) (*sqltypes.Result, error) { + if qre.tsv.txThrottler.Throttle(qre.tsv.getPriorityFromOptions(qre.options), qre.options.GetWorkloadName()) { + return nil, errTxThrottled + } conn, beginSQL, _, err := qre.tsv.te.txPool.Begin(qre.ctx, qre.options, false, 0, nil, qre.setting) if err != nil { return nil, err @@ -301,119 +308,13 @@ func (qre *QueryExecutor) txConnExec(conn *StatefulConnection) (*sqltypes.Result return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] %s unexpected plan type", qre.plan.PlanID.String()) } -func (qre *QueryExecutor) execViewDDL(conn *StatefulConnection) (*sqltypes.Result, error) { - var err error - switch stmt := qre.plan.FullStmt.(type) { - case *sqlparser.CreateView: - _, err = qre.execCreateViewDDL(conn, stmt) - case *sqlparser.AlterView: - _, err = qre.execAlterViewDDL(conn, stmt) - default: - err = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "BUG: unexpected view DDL type: %T", qre.plan.FullStmt) - } - if err != nil { - return nil, err - } - // We need to use a different connection for executing the DDL on MySQL - // because the previous DMLs are running in a transaction and we don't want to autocommit - // those changes. - ddlConn, err := qre.getConn() - if err != nil { - return nil, err - } - defer ddlConn.Recycle() - // If MySQL fails, then we will Rollback the changes. - return ddlConn.Exec(qre.ctx, sqlparser.String(qre.plan.FullStmt), 1000, true) -} - -func (qre *QueryExecutor) execCreateViewDDL(conn *StatefulConnection, stmt *sqlparser.CreateView) (*sqltypes.Result, error) { - bindVars := generateBindVarsForViewDDLInsert(stmt) - sql, _, err := qre.generateFinalSQL(qre.plan.FullQuery, bindVars) - if err != nil { - return nil, err - } - qr, err := execWithDDLView(qre.ctx, conn, sql) - if err != nil { - sqlErr, isSQLErr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) - // If it is a MySQL error and its code is of duplicate entry, - // then we would return duplicate create view error. - if isSQLErr && sqlErr.Number() == mysql.ERDupEntry { - return nil, vterrors.Errorf(vtrpcpb.Code_ALREADY_EXISTS, "Table '%s' already exists", stmt.ViewName.Name.String()) - } - return nil, err - } - return qr, nil -} - -func (qre *QueryExecutor) execAlterViewDDL(conn *StatefulConnection, stmt *sqlparser.AlterView) (*sqltypes.Result, error) { - createViewDDL := &sqlparser.CreateView{ - ViewName: stmt.ViewName, - Algorithm: stmt.Algorithm, - Definer: stmt.Definer, - Security: stmt.Security, - Columns: stmt.Columns, - Select: stmt.Select, - CheckOption: stmt.CheckOption, - Comments: stmt.Comments, - } - bindVars := generateBindVarsForViewDDLInsert(createViewDDL) - sql, _, err := qre.generateFinalSQL(qre.plan.FullQuery, bindVars) - if err != nil { - return nil, err - } - qr, err := execWithDDLView(qre.ctx, conn, sql) - if err != nil { - return nil, err - } - if qr.RowsAffected == 0 { - return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "Table '%s' does not exist", stmt.ViewName.Name.String()) - } - return qr, nil -} - -func (qre *QueryExecutor) execDropViewDDL(conn *StatefulConnection) (*sqltypes.Result, error) { - viewsMap := make(map[string]int) - stmt := qre.plan.FullStmt.(*sqlparser.DropView) - var viewNames []string - for pos, view := range stmt.FromTables { - viewName := view.Name.String() - if _, exists := viewsMap[viewName]; exists { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Not unique view: '%s'", viewName) - } - viewNames = append(viewNames, viewName) - viewsMap[viewName] = pos - } - viewNamesBV, err := sqltypes.BuildBindVariable(viewNames) - if err != nil { - return nil, err - } - bindVars := map[string]*querypb.BindVariable{ - "table_name": viewNamesBV, - } - - sql, _, err := qre.generateFinalSQL(qre.plan.FullQuery, bindVars) - if err != nil { - return nil, err - } - _, err = execWithDDLView(qre.ctx, conn, sql) - if err != nil { - return nil, err - } - - // Drop the view on MySQL too. - return conn.Exec(qre.ctx, sqlparser.String(qre.plan.FullStmt), 1000, true) -} - -func execWithDDLView(ctx context.Context, conn *StatefulConnection, sql string) (*sqltypes.Result, error) { - return conn.Exec(ctx, sql, 10000, true) -} - // Stream performs a streaming query execution. func (qre *QueryExecutor) Stream(callback StreamCallback) error { qre.logStats.PlanType = qre.plan.PlanID.String() defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) qre.recordUserQuery("Stream", int64(time.Since(start))) }(time.Now()) @@ -503,6 +404,7 @@ func (qre *QueryExecutor) MessageStream(callback StreamCallback) error { defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) qre.recordUserQuery("MessageStream", int64(time.Since(start))) }(time.Now()) @@ -542,10 +444,11 @@ func (qre *QueryExecutor) checkPermissions() error { username = ci.Username() } - bufferingTimeoutCtx, cancel := context.WithTimeout(qre.ctx, maxQueryBufferDuration) + action, ruleCancelCtx, timeout, desc := qre.plan.Rules.GetAction(remoteAddr, username, qre.bindVars, qre.marginComments) + + bufferingTimeoutCtx, cancel := context.WithTimeout(qre.ctx, timeout) // aborts buffering at given timeout defer cancel() - action, ruleCancelCtx, desc := qre.plan.Rules.GetAction(remoteAddr, username, qre.bindVars, qre.marginComments) switch action { case rules.QRFail: return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "disallowed due to rule: %s", desc) @@ -560,7 +463,7 @@ func (qre *QueryExecutor) checkPermissions() error { // good! We have buffered the query, and buffering is completed case <-bufferingTimeoutCtx.Done(): // Sorry, timeout while waiting for buffering to complete - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "buffer timeout in rule: %s", desc) + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "buffer timeout after %v in rule: %s", timeout, desc) } } default: @@ -656,7 +559,7 @@ func (qre *QueryExecutor) execDDL(conn *StatefulConnection) (*sqltypes.Result, e // Instead of synchronously recalculating table size stats // after every DDL, let them be outdated until the periodic // schema reload fixes it. - if err := qre.tsv.se.ReloadAtEx(qre.ctx, mysql.Position{}, false); err != nil { + if err := qre.tsv.se.ReloadAtEx(qre.ctx, replication.Position{}, false); err != nil { log.Errorf("failed to reload schema %v", err) } }() @@ -709,13 +612,13 @@ func (*QueryExecutor) BeginAgain(ctx context.Context, dc *StatefulConnection) er } func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { - env := evalengine.EnvWithBindVars(qre.bindVars, collations.Unknown) + env := evalengine.NewExpressionEnv(qre.ctx, qre.bindVars, nil) result, err := env.Evaluate(qre.plan.NextCount) if err != nil { return nil, err } tableName := qre.plan.TableName() - v := result.Value() + v := result.Value(collations.Default()) inc, err := v.ToInt64() if err != nil || inc < 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid increment for sequence %s: %s", tableName, v.String()) @@ -734,7 +637,7 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { if len(qr.Rows) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected rows from reading sequence %s (possible mis-route): %d", tableName, len(qr.Rows)) } - nextID, err := evalengine.ToInt64(qr.Rows[0][0]) + nextID, err := qr.Rows[0][0].ToCastInt64() if err != nil { return nil, vterrors.Wrapf(err, "error loading sequence %s", tableName) } @@ -749,7 +652,7 @@ func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { t.SequenceInfo.NextVal = nextID t.SequenceInfo.LastVal = nextID } - cache, err := evalengine.ToInt64(qr.Rows[0][1]) + cache, err := qr.Rows[0][1].ToCastInt64() if err != nil { return nil, vterrors.Wrapf(err, "error loading sequence %s", tableName) } @@ -798,10 +701,12 @@ func (qre *QueryExecutor) execSelect() (*sqltypes.Result, error) { conn, err := qre.getConn() if err != nil { - q.Err = err + q.SetErr(err) } else { defer conn.Recycle() - q.Result, q.Err = qre.execDBConn(conn, sql, true) + res, err := qre.execDBConn(conn, sql, true) + q.SetResult(res) + q.SetErr(err) } } else { qre.logStats.QuerySources |= tabletenv.QuerySourceConsolidator @@ -809,10 +714,10 @@ func (qre *QueryExecutor) execSelect() (*sqltypes.Result, error) { q.Wait() qre.tsv.stats.WaitTimings.Record("Consolidations", startTime) } - if q.Err != nil { - return nil, q.Err + if q.Err() != nil { + return nil, q.Err() } - return q.Result.(*sqltypes.Result), nil + return q.Result(), nil } conn, err := qre.getConn() if err != nil { @@ -850,7 +755,7 @@ func (qre *QueryExecutor) verifyRowCount(count, maxrows int64) error { if warnThreshold > 0 && count > warnThreshold { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) qre.tsv.Stats().Warnings.Add("ResultsExceeded", 1) - log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages)) + log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true)) } return nil } @@ -973,11 +878,11 @@ func addMySQLOptimizerHints(config *tabletenv.TabletConfig, query string) string } func rewriteOUTParamError(err error) error { - sqlErr, ok := err.(*mysql.SQLError) + sqlErr, ok := err.(*sqlerror.SQLError) if !ok { return err } - if sqlErr.Num == mysql.ErSPNotVarArg { + if sqlErr.Num == sqlerror.ErSPNotVarArg { return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "OUT and INOUT parameters are not supported") } return err @@ -1079,6 +984,13 @@ func (qre *QueryExecutor) execRevertMigration() (*sqltypes.Result, error) { return qre.tsv.onlineDDLExecutor.SubmitMigration(qre.ctx, qre.plan.FullStmt) } +func (qre *QueryExecutor) execShowMigrations() (*sqltypes.Result, error) { + if showStmt, ok := qre.plan.FullStmt.(*sqlparser.Show); ok { + return qre.tsv.onlineDDLExecutor.ShowMigrations(qre.ctx, showStmt) + } + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "Expecting SHOW VITESS_MIGRATIONS plan") +} + func (qre *QueryExecutor) execShowMigrationLogs() (*sqltypes.Result, error) { if showMigrationLogsStmt, ok := qre.plan.FullStmt.(*sqlparser.ShowMigrationLogs); ok { return qre.tsv.onlineDDLExecutor.ShowMigrationLogs(qre.ctx, showMigrationLogsStmt) @@ -1087,7 +999,7 @@ func (qre *QueryExecutor) execShowMigrationLogs() (*sqltypes.Result, error) { } func (qre *QueryExecutor) execShowThrottledApps() (*sqltypes.Result, error) { - if err := qre.tsv.lagThrottler.CheckIsReady(); err != nil { + if err := qre.tsv.lagThrottler.CheckIsOpen(); err != nil { return nil, err } if _, ok := qre.plan.FullStmt.(*sqlparser.ShowThrottledApps); !ok { @@ -1126,7 +1038,7 @@ func (qre *QueryExecutor) execShowThrottlerStatus() (*sqltypes.Result, error) { return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "Expecting SHOW VITESS_THROTTLER STATUS plan") } var enabled int32 - if err := qre.tsv.lagThrottler.CheckIsReady(); err == nil { + if qre.tsv.lagThrottler.IsEnabled() { enabled = 1 } result := &sqltypes.Result{ @@ -1239,57 +1151,54 @@ func (qre *QueryExecutor) recordUserQuery(queryType string, duration int64) { qre.tsv.Stats().UserTableQueryTimesNs.Add([]string{tableName, username, queryType}, duration) } -func generateBindVarsForViewDDLInsert(createView *sqlparser.CreateView) map[string]*querypb.BindVariable { - bindVars := make(map[string]*querypb.BindVariable) - bindVars["table_name"] = sqltypes.StringBindVariable(createView.ViewName.Name.String()) - bindVars["create_statement"] = sqltypes.StringBindVariable(sqlparser.String(createView)) - return bindVars -} - func (qre *QueryExecutor) GetSchemaDefinitions(tableType querypb.SchemaTableType, tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { switch tableType { case querypb.SchemaTableType_VIEWS: return qre.getViewDefinitions(tableNames, callback) + case querypb.SchemaTableType_TABLES: + return qre.getTableDefinitions(tableNames, callback) + case querypb.SchemaTableType_ALL: + return qre.getAllDefinitions(tableNames, callback) } return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid table type %v", tableType) } func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query := mysql.FetchViews - var bindVars map[string]*querypb.BindVariable - if len(viewNames) > 0 { - query = mysql.FetchUpdatedViews - bindVars = map[string]*querypb.BindVariable{ - "viewnames": sqltypes.StringBindVariable(strings.Join(viewNames, ",")), - } + query, err := eschema.GetFetchViewQuery(viewNames) + if err != nil { + return err } - return qre.generateFinalQueryAndStreamExecute(query, bindVars, func(result *sqltypes.Result) error { - schemaDef := make(map[string]string) - for _, row := range result.Rows { - schemaDef[row[0].ToString()] = row[1].ToString() - } - return callback(&querypb.GetSchemaResponse{TableDefinition: schemaDef}) - }) + return qre.executeGetSchemaQuery(query, callback) } -func (qre *QueryExecutor) generateFinalQueryAndStreamExecute(query string, bindVars map[string]*querypb.BindVariable, callback func(result *sqltypes.Result) error) error { - sql := query - if len(bindVars) > 0 { - stmt, err := sqlparser.Parse(query) - if err != nil { - return err - } - sql, _, err = qre.generateFinalSQL(sqlparser.NewParsedQuery(stmt), bindVars) - if err != nil { - return err - } +func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { + query, err := eschema.GetFetchTableQuery(tableNames) + if err != nil { + return err + } + return qre.executeGetSchemaQuery(query, callback) +} + +func (qre *QueryExecutor) getAllDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { + query, err := eschema.GetFetchTableAndViewsQuery(tableNames) + if err != nil { + return err } + return qre.executeGetSchemaQuery(query, callback) +} +func (qre *QueryExecutor) executeGetSchemaQuery(query string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { conn, err := qre.getStreamConn() if err != nil { return err } defer conn.Recycle() - return qre.execStreamSQL(conn, false /* isTransaction */, sql, callback) + return qre.execStreamSQL(conn, false /* isTransaction */, query, func(result *sqltypes.Result) error { + schemaDef := make(map[string]string) + for _, row := range result.Rows { + schemaDef[row[0].ToString()] = row[1].ToString() + } + return callback(&querypb.GetSchemaResponse{TableDefinition: schemaDef}) + }) } diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 4b7be6b1b00..393fb9dfdaf 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -23,10 +23,7 @@ import ( "math/rand" "strings" "testing" - - "vitess.io/vitess/go/vt/sidecardb" - - "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,9 +31,11 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" "vitess.io/vitess/go/vt/callinfo/fakecallinfo" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/tableacl/simpleacl" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -44,6 +43,8 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/rules" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" + "vitess.io/vitess/go/vt/vttablet/tabletserver/txthrottler" querypb "vitess.io/vitess/go/vt/proto/query" tableaclpb "vitess.io/vitess/go/vt/proto/tableacl" @@ -81,6 +82,10 @@ func TestQueryExecutorPlans(t *testing.T) { // inTxWant is the query log we expect if we're in a transation. // If empty, then we should expect the same as logWant. inTxWant string + // errorWant is the error we expect to get, if any, and should be nil if no error should be returned + errorWant error + // TxThrottler allows the test case to override the transaction throttler + txThrottler txthrottler.TxThrottler }{{ input: "select * from t", dbResponses: []dbResponse{{ @@ -267,7 +272,25 @@ func TestQueryExecutorPlans(t *testing.T) { resultWant: emptyResult, planWant: "Show", logWant: "show create table mysql.`user`", - }} + }, { + input: "update test_table set a=1", + dbResponses: []dbResponse{{ + query: "update test_table set a = 1 limit 10001", + result: dmlResult, + }}, + errorWant: errTxThrottled, + txThrottler: &mockTxThrottler{true}, + }, { + input: "update test_table set a=1", + passThrough: true, + dbResponses: []dbResponse{{ + query: "update test_table set a = 1 limit 10001", + result: dmlResult, + }}, + errorWant: errTxThrottled, + txThrottler: &mockTxThrottler{true}, + }, + } for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { db := setUpQueryExecutorTest(t) @@ -277,6 +300,9 @@ func TestQueryExecutorPlans(t *testing.T) { } ctx := context.Background() tsv := newTestTabletServer(ctx, noFlags, db) + if tcase.txThrottler != nil { + tsv.txThrottler = tcase.txThrottler + } tsv.config.DB.DBName = "ks" defer tsv.StopService() @@ -285,32 +311,39 @@ func TestQueryExecutorPlans(t *testing.T) { // Test outside a transaction. qre := newTestQueryExecutor(ctx, tsv, tcase.input, 0) got, err := qre.Execute() - require.NoError(t, err, tcase.input) - assert.Equal(t, tcase.resultWant, got, tcase.input) - assert.Equal(t, tcase.planWant, qre.logStats.PlanType, tcase.input) - assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input) - + if tcase.errorWant == nil { + require.NoError(t, err, tcase.input) + assert.Equal(t, tcase.resultWant, got, tcase.input) + assert.Equal(t, tcase.planWant, qre.logStats.PlanType, tcase.input) + assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input) + } else { + assert.True(t, vterrors.Equals(err, tcase.errorWant)) + } // Wait for the existing query to be processed by the cache - tsv.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) // Test inside a transaction. target := tsv.sm.Target() state, err := tsv.Begin(ctx, target, nil) - require.NoError(t, err) - require.NotNil(t, state.TabletAlias, "alias should not be nil") - assert.Equal(t, tsv.alias, state.TabletAlias, "Wrong alias returned by Begin") - defer tsv.Commit(ctx, target, state.TransactionID) - - qre = newTestQueryExecutor(ctx, tsv, tcase.input, state.TransactionID) - got, err = qre.Execute() - require.NoError(t, err, tcase.input) - assert.Equal(t, tcase.resultWant, got, "in tx: %v", tcase.input) - assert.Equal(t, tcase.planWant, qre.logStats.PlanType, "in tx: %v", tcase.input) - want := tcase.logWant - if tcase.inTxWant != "" { - want = tcase.inTxWant + if tcase.errorWant == nil { + require.NoError(t, err) + require.NotNil(t, state.TabletAlias, "alias should not be nil") + assert.Equal(t, tsv.alias, state.TabletAlias, "Wrong alias returned by Begin") + defer tsv.Commit(ctx, target, state.TransactionID) + + qre = newTestQueryExecutor(ctx, tsv, tcase.input, state.TransactionID) + got, err = qre.Execute() + require.NoError(t, err, tcase.input) + assert.Equal(t, tcase.resultWant, got, "in tx: %v", tcase.input) + assert.Equal(t, tcase.planWant, qre.logStats.PlanType, "in tx: %v", tcase.input) + want := tcase.logWant + if tcase.inTxWant != "" { + want = tcase.inTxWant + } + assert.Equal(t, want, qre.logStats.RewrittenSQL(), "in tx: %v", tcase.input) + } else { + assert.True(t, vterrors.Equals(err, tcase.errorWant)) } - assert.Equal(t, want, qre.logStats.RewrittenSQL(), "in tx: %v", tcase.input) }) } } @@ -380,7 +413,7 @@ func TestQueryExecutorQueryAnnotation(t *testing.T) { assert.Equal(t, tcase.logWant, qre.logStats.RewrittenSQL(), tcase.input) // Wait for the existing query to be processed by the cache - tsv.QueryPlanCacheWait() + time.Sleep(100 * time.Millisecond) // Test inside a transaction. target := tsv.sm.Target() @@ -489,7 +522,7 @@ func TestDisableOnlineDDL(t *testing.T) { qre = newTestQueryExecutor(ctx, tsv, query, 0) _, err = qre.Execute() - require.EqualError(t, err, "online ddl is disabled") + require.EqualError(t, err, "online DDL is disabled") } func TestQueryExecutorLimitFailure(t *testing.T) { @@ -756,6 +789,8 @@ func TestQueryExecutorPlanNextval(t *testing.T) { } func TestQueryExecutorMessageStreamACL(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) @@ -785,7 +820,7 @@ func TestQueryExecutorMessageStreamACL(t *testing.T) { callerID := &querypb.VTGateCallerID{ Username: "u1", } - ctx := callerid.NewContext(context.Background(), nil, callerID) + ctx = callerid.NewContext(ctx, nil, callerID) qre := &QueryExecutor{ ctx: ctx, query: "stream from msg", @@ -1261,165 +1296,137 @@ func TestReplaceSchemaName(t *testing.T) { } } -// TODO(maxeng) This is currently flaky. Skipping for now to avoid slowing down developers. -// -// Plans to rework this test. -// - Use mock consolidator and mock db instead of real consolidator and fakedb. -// - Run a single query per test case. Simulate concurrent queries through mock -// consolidator. func TestQueryExecutorShouldConsolidate(t *testing.T) { - t.Skip() - - testcases := []struct { - consolidates []bool - executorFlags executorFlags - name string - // Whether or not query consolidator is requested. - options []querypb.ExecuteOptions_Consolidator - // Whether or not query is consolidated. - queries []string - }{{ - consolidates: []bool{ - false, - false, - false, - true, + testCases := []struct { + // whether or not the consolidator is enabled by default on the tablet + consolidatorEnabledByDefault bool + // query-specific consolidator override, unspecified by default + consolidatorExecuteOption querypb.ExecuteOptions_Consolidator + // whether or not the consolidator is waiting on the results of an + // identical running query + consolidatorHasIdenticalQuery bool + // whether or not the query should be consolidated + expectConsolidate bool + // whether or not the query should be exec'd (= sent to db) + expectExec bool + // query to run + input string + }{ + { + consolidatorEnabledByDefault: true, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, + consolidatorHasIdenticalQuery: false, + expectConsolidate: true, + expectExec: true, + input: "select * from t limit 10001", }, - executorFlags: noFlags, - name: "vttablet-consolidator-disabled", - options: []querypb.ExecuteOptions_Consolidator{ - querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, - querypb.ExecuteOptions_CONSOLIDATOR_ENABLED, - querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, - querypb.ExecuteOptions_CONSOLIDATOR_ENABLED, + { + consolidatorEnabledByDefault: true, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, + consolidatorHasIdenticalQuery: true, + expectConsolidate: true, + expectExec: false, + input: "select * from t limit 10001", }, - queries: []string{ - "select * from t limit 10001", - // The previous query isn't passed to the query consolidator, - // so the next query can't consolidate into it. - "select * from t limit 10001", - "select * from t limit 10001", - // This query should consolidate into the previous query - // that was passed to the consolidator. - "select * from t limit 10001", + { + consolidatorEnabledByDefault: true, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_DISABLED, + consolidatorHasIdenticalQuery: true, + expectConsolidate: false, + expectExec: true, + input: "select * from t limit 10001", }, - }, { - consolidates: []bool{ - false, - true, - false, - true, - false, + { + consolidatorEnabledByDefault: false, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_DISABLED, + consolidatorHasIdenticalQuery: true, + expectConsolidate: false, + expectExec: true, + input: "select * from t limit 10001", }, - executorFlags: enableConsolidator, - name: "consolidator=enabled", - options: []querypb.ExecuteOptions_Consolidator{ - querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, - querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, - querypb.ExecuteOptions_CONSOLIDATOR_DISABLED, - querypb.ExecuteOptions_CONSOLIDATOR_UNSPECIFIED, - querypb.ExecuteOptions_CONSOLIDATOR_DISABLED, + { + consolidatorEnabledByDefault: false, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_ENABLED, + consolidatorHasIdenticalQuery: false, + expectConsolidate: true, + expectExec: true, + input: "select * from t limit 10001", }, - queries: []string{ - "select * from t limit 10001", - "select * from t limit 10001", - // This query shouldn't be passed to the consolidator. - "select * from t limit 10001", - "select * from t limit 10001", - // This query shouldn't be passed to the consolidator. - "select * from t limit 10001", + { + consolidatorEnabledByDefault: false, + consolidatorExecuteOption: querypb.ExecuteOptions_CONSOLIDATOR_ENABLED, + consolidatorHasIdenticalQuery: true, + expectConsolidate: true, + expectExec: false, + input: "select * from t limit 10001", }, - }} - for _, tcase := range testcases { - t.Run(tcase.name, func(t *testing.T) { + } + for _, tcase := range testCases { + name := fmt.Sprintf("table-consolidator:%t;query-consolidator:%v;identical-query:%t", + tcase.consolidatorEnabledByDefault, tcase.consolidatorExecuteOption, tcase.consolidatorHasIdenticalQuery) + t.Run(name, func(t *testing.T) { + // Set up fake db, tablet server (with fake consolidator), and executor. + db := setUpQueryExecutorTest(t) + defer db.Close() ctx := context.Background() - tsv := newTestTabletServer(ctx, tcase.executorFlags, db) + flags := noFlags + if tcase.consolidatorEnabledByDefault { + flags = enableConsolidator + } - defer db.Close() + tsv := newTestTabletServer(ctx, flags, db) defer tsv.StopService() - doneCh := make(chan bool, len(tcase.queries)) - readyCh := make(chan bool, len(tcase.queries)) - var qres []*QueryExecutor - var waitChs []chan bool - - for i, input := range tcase.queries { - qre := newTestQueryExecutor(ctx, tsv, input, 0) - qre.options = &querypb.ExecuteOptions{ - Consolidator: tcase.options[i], - } - qres = append(qres, qre) + fakeConsolidator := sync2.NewFakeConsolidator() + tsv.qe.consolidator = fakeConsolidator - // If this query is consolidated, don't add a fakesqldb expectation. - if tcase.consolidates[i] { - continue - } + qre := newTestQueryExecutor(context.Background(), tsv, tcase.input, 0) + qre.options = &querypb.ExecuteOptions{Consolidator: tcase.consolidatorExecuteOption} - // Set up a query expectation. - waitCh := make(chan bool) - waitChs = append(waitChs, waitCh) - db.AddExpectedExecuteFetchAtIndex(i, fakesqldb.ExpectedExecuteFetch{ - AfterFunc: func() { - // Signal that we're ready to proceed. - readyCh <- true - // Wait until we're signaled to proceed. - <-waitCh - }, - Query: input, - QueryResult: &sqltypes.Result{ - Fields: getTestTableFields(), - }, - }) + result := &sqltypes.Result{ + Fields: getTestTableFields(), } - db.OrderMatters() - db.SetNeverFail(true) + // Set up consolidator pre-conditions. - for i, input := range tcase.queries { - qre := qres[i] - go func(i int, input string, qre *QueryExecutor) { - // Execute the query. - _, err := qre.Execute() - - require.NoError(t, err, fmt.Sprintf( - "input[%d]=%q,querySources=%v", i, input, qre.logStats.QuerySources, - )) + fakePendingResult := &sync2.FakePendingResult{} + fakePendingResult.SetResult(result) + fakeConsolidator.CreateReturn = &sync2.FakeConsolidatorCreateReturn{ + Created: !tcase.consolidatorHasIdenticalQuery, + PendingResult: fakePendingResult, + } - // Signal that the query is done. - doneCh <- true - }(i, input, qre) + // Set up database query/response. - // If this query is consolidated, don't wait for fakesqldb to - // tell us query is ready is ready. - if tcase.consolidates[i] { - continue - } - - // Wait until query is queued up before starting next one. - <-readyCh - } + db.AddQuery(tcase.input, result) - // Signal ready queries to return. - for i := 0; i < len(waitChs); i++ { - close(waitChs[i]) - } + // Execute query. - // Wait for queries to finish. - for i := 0; i < len(qres); i++ { - <-doneCh + _, err := qre.Execute() + require.Nil(t, err) + + // Verify expectations. + + if tcase.expectConsolidate { + require.Len(t, fakeConsolidator.CreateCalls, 1) + require.Len(t, fakeConsolidator.CreateReturns, 1) + if tcase.consolidatorHasIdenticalQuery { + require.Equal(t, 0, fakePendingResult.BroadcastCalls) + require.Equal(t, 1, fakePendingResult.WaitCalls) + } else { + require.Equal(t, 1, fakePendingResult.BroadcastCalls) + require.Equal(t, 0, fakePendingResult.WaitCalls) + } + } else { + require.Len(t, fakeConsolidator.CreateCalls, 0) } - for i := 0; i < len(tcase.consolidates); i++ { - input := tcase.queries[i] - qre := qres[i] - want := tcase.consolidates[i] - got := qre.logStats.QuerySources&tabletenv.QuerySourceConsolidator != 0 - - require.Equal(t, want, got, fmt.Sprintf( - "input[%d]=%q,querySources=%v", i, input, qre.logStats.QuerySources, - )) + if tcase.expectExec { + require.Equal(t, 1, db.GetQueryCalledNum(tcase.input)) + } else { + require.Equal(t, 0, db.GetQueryCalledNum(tcase.input)) } db.VerifyAllExecutedOrFail() @@ -1480,7 +1487,7 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } dbconfigs := newDBConfigs(db) config.DB = dbconfigs - tsv := NewTabletServer("TabletServerTest", config, memorytopo.NewServer(""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} err := tsv.StartService(target, dbconfigs, nil /* mysqld */) if config.TwoPCEnable { @@ -1799,3 +1806,22 @@ func TestAddMySQLOptimizerHints(t *testing.T) { t.Logf("sql: %v", addMySQLOptimizerHints(config, "select * from something")) } } + +type mockTxThrottler struct { + throttle bool +} + +func (m mockTxThrottler) InitDBConfig(target *querypb.Target) { + panic("implement me") +} + +func (m mockTxThrottler) Open() (err error) { + return nil +} + +func (m mockTxThrottler) Close() { +} + +func (m mockTxThrottler) Throttle(priority int, workload string) (result bool) { + return m.throttle +} diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go index e78199c50ad..efe63ab0a8e 100644 --- a/go/vt/vttablet/tabletserver/query_list.go +++ b/go/vt/vttablet/tabletserver/query_list.go @@ -18,11 +18,12 @@ package tabletserver import ( "context" - "html/template" "sort" "sync" "time" + "github.com/google/safehtml" + "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/callinfo" "vitess.io/vitess/go/vt/sqlparser" @@ -128,7 +129,7 @@ func (ql *QueryList) TerminateAll() { type QueryDetailzRow struct { Type string Query string - ContextHTML template.HTML + ContextHTML safehtml.HTML Start time.Time Duration time.Duration ConnID int64 diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go index f13491846fb..41a40a0720c 100644 --- a/go/vt/vttablet/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -86,7 +87,7 @@ var ( ) func init() { - http.HandleFunc("/querylogz", func(w http.ResponseWriter, r *http.Request) { + servenv.HTTPHandleFunc("/querylogz", func(w http.ResponseWriter, r *http.Request) { ch := tabletenv.StatsLogger.Subscribe("querylogz") defer tabletenv.StatsLogger.Unsubscribe(ch) querylogzHandler(ch, w, r) diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 3b5cb577af2..151f028ca09 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -18,11 +18,12 @@ package tabletserver import ( "fmt" - "html/template" "net/http" "sort" "time" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" @@ -151,8 +152,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { return row1.timePQ() > row2.timePQ() }, } - qe.plans.ForEach(func(value any) bool { - plan := value.(*TabletPlan) + qe.ForEachPlan(func(plan *TabletPlan) bool { if plan == nil { return true } diff --git a/go/vt/vttablet/tabletserver/queryz_test.go b/go/vt/vttablet/tabletserver/queryz_test.go index a0bea742e04..8e1b7b38cfd 100644 --- a/go/vt/vttablet/tabletserver/queryz_test.go +++ b/go/vt/vttablet/tabletserver/queryz_test.go @@ -46,7 +46,7 @@ func TestQueryzHandler(t *testing.T) { }, } plan1.AddStats(10, 2*time.Second, 1*time.Second, 0, 2, 0) - qe.plans.Set(query1, plan1) + qe.plans.Set(query1, plan1, 0, 0) const query2 = "insert into test_table values 1" plan2 := &TabletPlan{ @@ -57,7 +57,7 @@ func TestQueryzHandler(t *testing.T) { }, } plan2.AddStats(1, 2*time.Millisecond, 1*time.Millisecond, 1, 0, 0) - qe.plans.Set(query2, plan2) + qe.plans.Set(query2, plan2, 0, 0) const query3 = "show tables" plan3 := &TabletPlan{ @@ -68,8 +68,8 @@ func TestQueryzHandler(t *testing.T) { }, } plan3.AddStats(1, 75*time.Millisecond, 50*time.Millisecond, 0, 1, 0) - qe.plans.Set(query3, plan3) - qe.plans.Set("", (*TabletPlan)(nil)) + qe.plans.Set(query3, plan3, 0, 0) + qe.plans.Set("", (*TabletPlan)(nil), 0, 0) hugeInsert := "insert into test_table values 0" for i := 1; i < 1000; i++ { @@ -83,11 +83,11 @@ func TestQueryzHandler(t *testing.T) { }, } plan4.AddStats(1, 1*time.Millisecond, 1*time.Millisecond, 1, 0, 0) - qe.plans.Set(hugeInsert, plan4) - qe.plans.Set("", (*TabletPlan)(nil)) + qe.plans.Set(PlanCacheKey(hugeInsert), plan4, 0, 0) + qe.plans.Set("", (*TabletPlan)(nil), 0, 0) // Wait for cache to settle - qe.plans.Wait() + time.Sleep(100 * time.Millisecond) queryzHandler(qe, resp, req) body, _ := io.ReadAll(resp.Body) diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go index e58565c9147..fc42a367989 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader.go @@ -17,16 +17,14 @@ limitations under the License. package repltracker import ( + "context" "fmt" "sync" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/vterrors" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" @@ -122,6 +120,9 @@ func (r *heartbeatReader) Close() { } r.ticks.Stop() r.pool.Close() + + currentLagNs.Set(0) + r.isOpen = false log.Info("Heartbeat Reader: closed") } @@ -189,7 +190,7 @@ func (r *heartbeatReader) bindHeartbeatFetch() (string, error) { bindVars := map[string]*querypb.BindVariable{ "ks": sqltypes.StringBindVariable(r.keyspaceShard), } - parsed := sqlparser.BuildParsedQuery(sqlFetchMostRecentHeartbeat, "_vt", ":ks") + parsed := sqlparser.BuildParsedQuery(sqlFetchMostRecentHeartbeat, sidecar.GetIdentifier(), ":ks") bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return "", err @@ -202,7 +203,7 @@ func parseHeartbeatResult(res *sqltypes.Result) (int64, error) { if len(res.Rows) != 1 { return 0, fmt.Errorf("failed to read heartbeat: writer query did not result in 1 row. Got %v", len(res.Rows)) } - ts, err := evalengine.ToInt64(res.Rows[0][0]) + ts, err := res.Rows[0][0].ToCastInt64() if err != nil { return 0, err } diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go index c4a8be6e692..54ece70fc1a 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go @@ -39,9 +39,13 @@ import ( func TestReaderReadHeartbeat(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - tr := newReader(db, mockNowFunc) + + now := time.Now() + tr := newReader(db, &now) defer tr.Close() + tr.pool.Open(tr.env.Config().DB.AppWithDB(), tr.env.Config().DB.DbaWithDB(), tr.env.Config().DB.AppDebugWithDB()) + db.AddQuery(fmt.Sprintf("SELECT ts FROM %s.heartbeat WHERE keyspaceShard='%s'", "_vt", tr.keyspaceShard), &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "ts", Type: sqltypes.Int64}, @@ -79,14 +83,46 @@ func TestReaderReadHeartbeat(t *testing.T) { utils.MustMatch(t, expectedHisto, heartbeatLagNsHistogram.Counts(), "wrong counts in histogram") } +// TestReaderCloseSetsCurrentLagToZero tests that when closing the heartbeat reader, the current lag is +// set to zero. +func TestReaderCloseSetsCurrentLagToZero(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + tr := newReader(db, nil) + + db.AddQuery(fmt.Sprintf("SELECT ts FROM %s.heartbeat WHERE keyspaceShard='%s'", "_vt", tr.keyspaceShard), &sqltypes.Result{ + Fields: []*querypb.Field{ + {Name: "ts", Type: sqltypes.Int64}, + }, + Rows: [][]sqltypes.Value{{ + sqltypes.NewInt64(time.Now().Add(-10 * time.Second).UnixNano()), + }}, + }) + + currentLagNs.Reset() + + tr.Open() + time.Sleep(2 * time.Second) + + assert.Greater(t, currentLagNs.Get(), int64(0), "lag should be greater than zero") + + tr.Close() + + assert.Equal(t, int64(0), currentLagNs.Get(), "lag should be be zero after closing the reader.") +} + // TestReaderReadHeartbeatError tests that we properly account for errors // encountered in the reading of heartbeat. func TestReaderReadHeartbeatError(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - tr := newReader(db, mockNowFunc) + + now := time.Now() + tr := newReader(db, &now) defer tr.Close() + tr.pool.Open(tr.env.Config().DB.AppWithDB(), tr.env.Config().DB.DbaWithDB(), tr.env.Config().DB.AppDebugWithDB()) + cumulativeLagNs.Reset() readErrors.Reset() @@ -100,18 +136,23 @@ func TestReaderReadHeartbeatError(t *testing.T) { assert.Equal(t, int64(1), readErrors.Get(), "wrong read error count") } -func newReader(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatReader { +func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - config.ReplicationTracker.HeartbeatIntervalSeconds = 1 + _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") params, _ := db.ConnParams().MysqlParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") + config.DB = dbc tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest")) tr.keyspaceShard = "test:0" - tr.now = nowFunc - tr.pool.Open(dbc.AppWithDB(), dbc.DbaWithDB(), dbc.AppDebugWithDB()) + + if frozenTime != nil { + tr.now = func() time.Time { + return *frozenTime + } + } return tr } diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go index 3d6359ed902..5ab44eb774e 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go @@ -66,7 +66,7 @@ type ReplTracker struct { func NewReplTracker(env tabletenv.Env, alias *topodatapb.TabletAlias) *ReplTracker { return &ReplTracker{ mode: env.Config().ReplicationTracker.Mode, - forceHeartbeat: env.Config().EnableLagThrottler || env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, + forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, hw: newHeartbeatWriter(env, alias), hr: newHeartbeatReader(env), poller: &poller{}, diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go index 67f15d44ff2..01912c3f689 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go @@ -37,7 +37,7 @@ func TestReplTracker(t *testing.T) { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - config.ReplicationTracker.HeartbeatIntervalSeconds = 1 + _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") params, _ := db.ConnParams().MysqlParams() cp := *params config.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index 4f8a2120834..2b7dcd1ff2e 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -17,14 +17,13 @@ limitations under the License. package repltracker import ( + "context" "fmt" "sync" "sync/atomic" "time" - "google.golang.org/protobuf/proto" - - "context" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" @@ -43,7 +42,7 @@ const ( sqlUpsertHeartbeat = "INSERT INTO %s.heartbeat (ts, tabletUid, keyspaceShard) VALUES (%a, %a, %a) ON DUPLICATE KEY UPDATE ts=VALUES(ts), tabletUid=VALUES(tabletUid)" ) -// heartbeatWriter runs on primary tablets and writes heartbeats to the _vt.heartbeat +// heartbeatWriter runs on primary tablets and writes heartbeats to the heartbeat // table at a regular interval, defined by heartbeat_interval. type heartbeatWriter struct { env tabletenv.Env @@ -73,14 +72,14 @@ func newHeartbeatWriter(env tabletenv.Env, alias *topodatapb.TabletAlias) *heart config := env.Config() // config.EnableLagThrottler is a feature flag for the throttler; if throttler runs, then heartbeat must also run - if config.ReplicationTracker.Mode != tabletenv.Heartbeat && !config.EnableLagThrottler && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { + if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { return &heartbeatWriter{} } heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() w := &heartbeatWriter{ env: env, enabled: true, - tabletAlias: proto.Clone(alias).(*topodatapb.TabletAlias), + tabletAlias: alias.CloneVT(), now: time.Now, interval: heartbeatInterval, onDemandDuration: config.ReplicationTracker.HeartbeatOnDemandSeconds.Get(), @@ -172,7 +171,7 @@ func (w *heartbeatWriter) bindHeartbeatVars(query string) (string, error) { "ts": sqltypes.Int64BindVariable(w.now().UnixNano()), "uid": sqltypes.Int64BindVariable(int64(w.tabletAlias.Uid)), } - parsed := sqlparser.BuildParsedQuery(query, "_vt", ":ts", ":uid", ":ks") + parsed := sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), ":ts", ":uid", ":ks") bound, err := parsed.GenerateQuery(bindVars, nil) if err != nil { return "", err diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go index 07ae3186877..5044586c0d2 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go @@ -30,18 +30,12 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) -var ( - now = time.Now() - mockNowFunc = func() time.Time { - return now - } -) - func TestWriteHeartbeat(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - tw := newTestWriter(db, mockNowFunc) + now := time.Now() + tw := newTestWriter(db, &now) upsert := fmt.Sprintf("INSERT INTO %s.heartbeat (ts, tabletUid, keyspaceShard) VALUES (%d, %d, '%s') ON DUPLICATE KEY UPDATE ts=VALUES(ts), tabletUid=VALUES(tabletUid)", "_vt", now.UnixNano(), tw.tabletAlias.Uid, tw.keyspaceShard) db.AddQuery(upsert, &sqltypes.Result{}) @@ -58,7 +52,8 @@ func TestWriteHeartbeatError(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - tw := newTestWriter(db, mockNowFunc) + now := time.Now() + tw := newTestWriter(db, &now) writes.Reset() writeErrors.Reset() @@ -68,10 +63,10 @@ func TestWriteHeartbeatError(t *testing.T) { assert.Equal(t, int64(1), writeErrors.Get()) } -func newTestWriter(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatWriter { +func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat - config.ReplicationTracker.HeartbeatIntervalSeconds = 1 + _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") params, _ := db.ConnParams().MysqlParams() cp := *params @@ -79,7 +74,13 @@ func newTestWriter(db *fakesqldb.DB, nowFunc func() time.Time) *heartbeatWriter tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest"), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) tw.keyspaceShard = "test:0" - tw.now = nowFunc + + if frozenTime != nil { + tw.now = func() time.Time { + return *frozenTime + } + } + tw.appPool.Open(dbc.AppWithDB()) tw.allPrivsPool.Open(dbc.AllPrivsWithDB()) diff --git a/go/vt/vttablet/tabletserver/rules/rules.go b/go/vt/vttablet/tabletserver/rules/rules.go index 6c266e2a14f..efbfcdf87e4 100644 --- a/go/vt/vttablet/tabletserver/rules/rules.go +++ b/go/vt/vttablet/tabletserver/rules/rules.go @@ -24,8 +24,7 @@ import ( "reflect" "regexp" "strconv" - - "vitess.io/vitess/go/vt/vtgate/evalengine" + "time" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" @@ -177,13 +176,17 @@ func (qrs *Rules) GetAction( user string, bindVars map[string]*querypb.BindVariable, marginComments sqlparser.MarginComments, -) (action Action, cancelCtx context.Context, desc string) { +) ( + action Action, + cancelCtx context.Context, + timeout time.Duration, + desc string) { for _, qr := range qrs.rules { if act := qr.GetAction(ip, user, bindVars, marginComments); act != QRContinue { - return act, qr.cancelCtx, qr.Description + return act, qr.cancelCtx, qr.timeout, qr.Description } } - return QRContinue, nil, "" + return QRContinue, nil, 0, "" } //----------------------------------------------- @@ -217,8 +220,11 @@ type Rule struct { // Action to be performed on trigger act Action - // a rule can be dynamically cancelled. This function determines whether it is cancelled + // a rule can be dynamically cancelled. cancelCtx context.Context + + // a rule can timeout. + timeout time.Duration } type namedRegexp struct { @@ -246,9 +252,9 @@ func NewQueryRule(description, name string, act Action) (qr *Rule) { } // NewBufferedTableQueryRule creates a new buffer Rule. -func NewBufferedTableQueryRule(cancelCtx context.Context, tableName string, description string) (qr *Rule) { +func NewBufferedTableQueryRule(cancelCtx context.Context, tableName string, bufferTimeout time.Duration, description string) (qr *Rule) { // We ignore act because there's only one action right now - return &Rule{cancelCtx: cancelCtx, Description: description, Name: bufferedTableRuleName, tableNames: []string{tableName}, act: QRBuffer} + return &Rule{cancelCtx: cancelCtx, timeout: bufferTimeout, Description: description, Name: bufferedTableRuleName, tableNames: []string{tableName}, act: QRBuffer} } // Equal returns true if other is equal to this Rule, otherwise false. @@ -263,6 +269,7 @@ func (qr *Rule) Equal(other *Rule) bool { qr.query.Equal(other.query) && qr.leadingComment.Equal(other.leadingComment) && qr.trailingComment.Equal(other.trailingComment) && + qr.timeout == other.timeout && reflect.DeepEqual(qr.plans, other.plans) && reflect.DeepEqual(qr.tableNames, other.tableNames) && reflect.DeepEqual(qr.bindVarConds, other.bindVarConds) && @@ -281,6 +288,7 @@ func (qr *Rule) Copy() (newqr *Rule) { trailingComment: qr.trailingComment, act: qr.act, cancelCtx: qr.cancelCtx, + timeout: qr.timeout, } if qr.plans != nil { newqr.plans = make([]planbuilder.PlanType, len(qr.plans)) @@ -329,6 +337,9 @@ func (qr *Rule) MarshalJSON() ([]byte, error) { if qr.act != QRContinue { safeEncode(b, `,"Action":`, qr.act) } + if qr.timeout != 0 { + safeEncode(b, `,"Timeout":`, qr.timeout) + } _, _ = b.WriteString("}") return b.Bytes(), nil } @@ -819,7 +830,7 @@ func getuint64(val *querypb.BindVariable) (uv uint64, status int) { if err != nil { return 0, QROutOfRange } - v, err := evalengine.ToUint64(bv) + v, err := bv.ToCastUint64() if err != nil { return 0, QROutOfRange } @@ -832,7 +843,7 @@ func getint64(val *querypb.BindVariable) (iv int64, status int) { if err != nil { return 0, QROutOfRange } - v, err := evalengine.ToInt64(bv) + v, err := bv.ToCastInt64() if err != nil { return 0, QROutOfRange } diff --git a/go/vt/vttablet/tabletserver/rules/rules_test.go b/go/vt/vttablet/tabletserver/rules/rules_test.go index 60c02df230d..e56d12762de 100644 --- a/go/vt/vttablet/tabletserver/rules/rules_test.go +++ b/go/vt/vttablet/tabletserver/rules/rules_test.go @@ -23,6 +23,7 @@ import ( "regexp" "strings" "testing" + "time" "github.com/stretchr/testify/assert" @@ -542,21 +543,23 @@ func TestAction(t *testing.T) { Trailing: "other trailing comments", } - action, cancelCtx, desc := qrs.GetAction("123", "user1", bv, mc) + action, cancelCtx, timeout, desc := qrs.GetAction("123", "user1", bv, mc) assert.Equalf(t, action, QRFail, "expected fail, got %v", action) + assert.Equalf(t, timeout, time.Duration(0), "expected zero timeout") assert.Equalf(t, desc, "rule 1", "want rule 1, got %s", desc) assert.Nil(t, cancelCtx) - action, cancelCtx, desc = qrs.GetAction("1234", "user", bv, mc) + action, cancelCtx, timeout, desc = qrs.GetAction("1234", "user", bv, mc) assert.Equalf(t, action, QRFailRetry, "want fail_retry, got: %s", action) + assert.Equalf(t, timeout, time.Duration(0), "expected zero timeout") assert.Equalf(t, desc, "rule 2", "want rule 2, got %s", desc) assert.Nil(t, cancelCtx) - action, _, _ = qrs.GetAction("1234", "user1", bv, mc) + action, _, _, _ = qrs.GetAction("1234", "user1", bv, mc) assert.Equalf(t, action, QRContinue, "want continue, got %s", action) bv["a"] = sqltypes.Uint64BindVariable(1) - action, _, desc = qrs.GetAction("1234", "user1", bv, mc) + action, _, _, desc = qrs.GetAction("1234", "user1", bv, mc) assert.Equalf(t, action, QRFail, "want fail, got %s", action) assert.Equalf(t, desc, "rule 3", "want rule 3, got %s", desc) @@ -569,7 +572,7 @@ func TestAction(t *testing.T) { newQrs := qrs.Copy() newQrs.Add(qr4) - action, _, desc = newQrs.GetAction("1234", "user1", bv, mc) + action, _, _, desc = newQrs.GetAction("1234", "user1", bv, mc) assert.Equalf(t, action, QRFail, "want fail, got %s", action) assert.Equalf(t, desc, "rule 4", "want rule 4, got %s", desc) @@ -578,7 +581,7 @@ func TestAction(t *testing.T) { newQrs = qrs.Copy() newQrs.Add(qr5) - action, _, desc = newQrs.GetAction("1234", "user1", bv, mc) + action, _, _, desc = newQrs.GetAction("1234", "user1", bv, mc) assert.Equalf(t, action, QRFail, "want fail, got %s", action) assert.Equalf(t, desc, "rule 5", "want rule 5, got %s", desc) } diff --git a/go/vt/vttablet/tabletserver/schema/db.go b/go/vt/vttablet/tabletserver/schema/db.go new file mode 100644 index 00000000000..85ebf3b1457 --- /dev/null +++ b/go/vt/vttablet/tabletserver/schema/db.go @@ -0,0 +1,461 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "context" + + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" +) + +const ( + // insertTableIntoSchemaEngineTables inserts a record in the datastore for the schema-engine tables. + insertTableIntoSchemaEngineTables = `INSERT INTO %s.tables(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) +values (database(), :table_name, :create_statement, :create_time)` + + // deleteFromSchemaEngineTablesTable removes the tables from the table that have been modified. + deleteFromSchemaEngineTablesTable = `DELETE FROM %s.tables WHERE TABLE_SCHEMA = database() AND TABLE_NAME IN ::tableNames` + + // readTableCreateTimes reads the tables create times + readTableCreateTimes = "SELECT TABLE_NAME, CREATE_TIME FROM %s.`tables`" + + // fetchUpdatedTables queries fetches information about updated tables + fetchUpdatedTables = `select table_name, create_statement from %s.tables where table_schema = database() and table_name in ::tableNames` + + // fetchTables queries fetches all information about tables + fetchTables = `select table_name, create_statement from %s.tables where table_schema = database()` + + // detectViewChange query detects if there is any view change from previous copy. + detectViewChange = ` +SELECT distinct table_name +FROM ( + SELECT table_name, view_definition + FROM information_schema.views + WHERE table_schema = database() + + UNION ALL + + SELECT table_name, view_definition + FROM %s.views + WHERE table_schema = database() +) _inner +GROUP BY table_name, view_definition +HAVING COUNT(*) = 1 +` + + // insertViewIntoSchemaEngineViews using information_schema.views. + insertViewIntoSchemaEngineViews = `INSERT INTO %s.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) +values (database(), :view_name, :create_statement, :view_definition)` + + // deleteFromSchemaEngineViewsTable removes the views from the table that have been modified. + deleteFromSchemaEngineViewsTable = `DELETE FROM %s.views WHERE TABLE_SCHEMA = database() AND TABLE_NAME IN ::viewNames` + + // fetchViewDefinitions retrieves view definition from information_schema.views table. + fetchViewDefinitions = `select table_name, view_definition from information_schema.views +where table_schema = database() and table_name in ::viewNames` + + // fetchCreateStatement retrieves create statement. + fetchCreateStatement = `show create table %s` + + // fetchUpdatedViews queries fetches information about updated views + fetchUpdatedViews = `select table_name, create_statement from %s.views where table_schema = database() and table_name in ::viewNames` + + // fetchViews queries fetches all views + fetchViews = `select table_name, create_statement from %s.views where table_schema = database()` + + // fetchUpdatedTablesAndViews queries fetches information about updated tables and views + fetchUpdatedTablesAndViews = `select table_name, create_statement from %s.tables where table_schema = database() and table_name in ::tableNames union select table_name, create_statement from %s.views where table_schema = database() and table_name in ::tableNames` + + // fetchTablesAndViews queries fetches all information about tables and views + fetchTablesAndViews = `select table_name, create_statement from %s.tables where table_schema = database() union select table_name, create_statement from %s.views where table_schema = database()` +) + +// reloadTablesDataInDB reloads teh tables information we have stored in our database we use for schema-tracking. +func reloadTablesDataInDB(ctx context.Context, conn *connpool.DBConn, tables []*Table, droppedTables []string) error { + // No need to do anything if we have no tables to refresh or drop. + if len(tables) == 0 && len(droppedTables) == 0 { + return nil + } + + // Delete all the tables that are dropped or modified. + tableNamesToDelete := droppedTables + for _, table := range tables { + tableNamesToDelete = append(tableNamesToDelete, table.Name.String()) + } + tablesBV, err := sqltypes.BuildBindVariable(tableNamesToDelete) + if err != nil { + return err + } + bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} + + // Get the create statements for all the tables that are modified. + var createStatements []string + for _, table := range tables { + cs, err := getCreateStatement(ctx, conn, sqlparser.String(table.Name)) + if err != nil { + return err + } + createStatements = append(createStatements, cs) + } + + // Generate the queries to delete and insert table data. + clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable) + if err != nil { + return err + } + clearTableQuery, err := clearTableParsedQuery.GenerateQuery(bv, nil) + if err != nil { + return err + } + + insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables) + if err != nil { + return err + } + + // Reload the tables in a transaction. + _, err = conn.Exec(ctx, "begin", 1, false) + if err != nil { + return err + } + defer conn.Exec(ctx, "rollback", 1, false) + + _, err = conn.Exec(ctx, clearTableQuery, 1, false) + if err != nil { + return err + } + + for idx, table := range tables { + bv["table_name"] = sqltypes.StringBindVariable(table.Name.String()) + bv["create_statement"] = sqltypes.StringBindVariable(createStatements[idx]) + bv["create_time"] = sqltypes.Int64BindVariable(table.CreateTime) + insertTableQuery, err := insertTablesParsedQuery.GenerateQuery(bv, nil) + if err != nil { + return err + } + _, err = conn.Exec(ctx, insertTableQuery, 1, false) + if err != nil { + return err + } + } + + _, err = conn.Exec(ctx, "commit", 1, false) + return err +} + +// generateFullQuery generates the full query from the query as a string. +func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { + stmt, err := sqlparser.Parse( + sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), sidecar.GetIdentifier()).Query) + if err != nil { + return nil, err + } + buf := sqlparser.NewTrackedBuffer(nil) + stmt.Format(buf) + return buf.ParsedQuery(), nil +} + +// reloadViewsDataInDB reloads teh views information we have stored in our database we use for schema-tracking. +func reloadViewsDataInDB(ctx context.Context, conn *connpool.DBConn, views []*Table, droppedViews []string) error { + // No need to do anything if we have no views to refresh or drop. + if len(views) == 0 && len(droppedViews) == 0 { + return nil + } + + // Delete all the views that are dropped or modified. + viewNamesToDelete := droppedViews + for _, view := range views { + viewNamesToDelete = append(viewNamesToDelete, view.Name.String()) + } + viewsBV, err := sqltypes.BuildBindVariable(viewNamesToDelete) + if err != nil { + return err + } + bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} + + // Get the create statements for all the views that are modified. + var createStatements []string + for _, view := range views { + cs, err := getCreateStatement(ctx, conn, sqlparser.String(view.Name)) + if err != nil { + return err + } + createStatements = append(createStatements, cs) + } + + // Get the view definitions for all the views that are modified. + // We only need to run this if we have any views to reload. + viewDefinitions := make(map[string]string) + if len(views) > 0 { + err = getViewDefinition(ctx, conn, bv, + func(qr *sqltypes.Result) error { + for _, row := range qr.Rows { + viewDefinitions[row[0].ToString()] = row[1].ToString() + } + return nil + }, + func() *sqltypes.Result { return &sqltypes.Result{} }, + 1000, + ) + if err != nil { + return err + } + } + + // Generate the queries to delete and insert view data. + clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable) + if err != nil { + return err + } + clearViewQuery, err := clearViewParsedQuery.GenerateQuery(bv, nil) + if err != nil { + return err + } + + insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews) + if err != nil { + return err + } + + // Reload the views in a transaction. + _, err = conn.Exec(ctx, "begin", 1, false) + if err != nil { + return err + } + defer conn.Exec(ctx, "rollback", 1, false) + + _, err = conn.Exec(ctx, clearViewQuery, 1, false) + if err != nil { + return err + } + + for idx, view := range views { + bv["view_name"] = sqltypes.StringBindVariable(view.Name.String()) + bv["create_statement"] = sqltypes.StringBindVariable(createStatements[idx]) + bv["view_definition"] = sqltypes.StringBindVariable(viewDefinitions[view.Name.String()]) + insertViewQuery, err := insertViewsParsedQuery.GenerateQuery(bv, nil) + if err != nil { + return err + } + _, err = conn.Exec(ctx, insertViewQuery, 1, false) + if err != nil { + return err + } + } + + _, err = conn.Exec(ctx, "commit", 1, false) + return err +} + +// getViewDefinition gets the viewDefinition for the given views. +func getViewDefinition(ctx context.Context, conn *connpool.DBConn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int) error { + viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions) + if err != nil { + return err + } + viewsDefQuery, err := viewsDefParsedQuery.GenerateQuery(bv, nil) + if err != nil { + return err + } + return conn.Stream(ctx, viewsDefQuery, callback, alloc, bufferSize, 0) +} + +// getCreateStatement gets the create-statement for the given view/table. +func getCreateStatement(ctx context.Context, conn *connpool.DBConn, tableName string) (string, error) { + res, err := conn.Exec(ctx, sqlparser.BuildParsedQuery(fetchCreateStatement, tableName).Query, 1, false) + if err != nil { + return "", err + } + return res.Rows[0][1].ToString(), nil +} + +// getChangedViewNames gets the list of views that have their definitions changed. +func getChangedViewNames(ctx context.Context, conn *connpool.DBConn, isServingPrimary bool) (map[string]any, error) { + /* Retrieve changed views */ + views := make(map[string]any) + if !isServingPrimary { + return views, nil + } + callback := func(qr *sqltypes.Result) error { + for _, row := range qr.Rows { + view := row[0].ToString() + views[view] = true + } + return nil + } + alloc := func() *sqltypes.Result { return &sqltypes.Result{} } + bufferSize := 1000 + + viewChangeQuery := sqlparser.BuildParsedQuery(detectViewChange, sidecar.GetIdentifier()).Query + err := conn.Stream(ctx, viewChangeQuery, callback, alloc, bufferSize, 0) + if err != nil { + return nil, err + } + + return views, nil +} + +// getMismatchedTableNames gets the tables that do not align with the tables information we have in the cache. +func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.DBConn, isServingPrimary bool) (map[string]any, error) { + tablesMismatched := make(map[string]any) + if !isServingPrimary { + return tablesMismatched, nil + } + tablesFound := make(map[string]bool) + callback := func(qr *sqltypes.Result) error { + // For each row we check 2 things — + // 1. If a table exists in our database, but not in the cache, then it could have been dropped. + // 2. If the table's create time in our database doesn't match that in our cache, then it could have been altered. + for _, row := range qr.Rows { + tableName := row[0].ToString() + createTime, _ := row[1].ToInt64() + tablesFound[tableName] = true + table, isFound := se.tables[tableName] + if !isFound || table.CreateTime != createTime { + tablesMismatched[tableName] = true + } + } + return nil + } + alloc := func() *sqltypes.Result { return &sqltypes.Result{} } + bufferSize := 1000 + readTableCreateTimesQuery := sqlparser.BuildParsedQuery(readTableCreateTimes, sidecar.GetIdentifier()).Query + err := conn.Stream(ctx, readTableCreateTimesQuery, callback, alloc, bufferSize, 0) + if err != nil { + return nil, err + } + + // Finally, we also check for tables that exist only in the cache, because these tables would have been created. + for tableName := range se.tables { + if se.tables[tableName].Type == View { + continue + } + // Explicitly ignore dual because schema-engine stores this in its list of tables. + if !tablesFound[tableName] && tableName != "dual" { + tablesMismatched[tableName] = true + } + } + + return tablesMismatched, nil +} + +// reloadDataInDB reloads the schema tracking data in the database +func reloadDataInDB(ctx context.Context, conn *connpool.DBConn, altered []*Table, created []*Table, dropped []*Table) error { + // tablesToReload and viewsToReload stores the tables and views that need reloading and storing in our MySQL database. + var tablesToReload, viewsToReload []*Table + // droppedTables, droppedViews stores the list of tables and views we need to delete, respectively. + var droppedTables []string + var droppedViews []string + + for _, table := range append(created, altered...) { + if table.Type == View { + viewsToReload = append(viewsToReload, table) + } else { + tablesToReload = append(tablesToReload, table) + } + } + + for _, table := range dropped { + tableName := table.Name.String() + if table.Type == View { + droppedViews = append(droppedViews, tableName) + } else { + droppedTables = append(droppedTables, tableName) + } + } + + if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables); err != nil { + return err + } + if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews); err != nil { + return err + } + return nil +} + +// GetFetchViewQuery gets the fetch query to run for getting the listed views. If no views are provided, then all the views are fetched. +func GetFetchViewQuery(viewNames []string) (string, error) { + if len(viewNames) == 0 { + parsedQuery, err := generateFullQuery(fetchViews) + if err != nil { + return "", err + } + return parsedQuery.Query, nil + } + + viewsBV, err := sqltypes.BuildBindVariable(viewNames) + if err != nil { + return "", err + } + bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} + + parsedQuery, err := generateFullQuery(fetchUpdatedViews) + if err != nil { + return "", err + } + return parsedQuery.GenerateQuery(bv, nil) +} + +// GetFetchTableQuery gets the fetch query to run for getting the listed tables. If no tables are provided, then all the tables are fetched. +func GetFetchTableQuery(tableNames []string) (string, error) { + if len(tableNames) == 0 { + parsedQuery, err := generateFullQuery(fetchTables) + if err != nil { + return "", err + } + return parsedQuery.Query, nil + } + + tablesBV, err := sqltypes.BuildBindVariable(tableNames) + if err != nil { + return "", err + } + bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} + + parsedQuery, err := generateFullQuery(fetchUpdatedTables) + if err != nil { + return "", err + } + return parsedQuery.GenerateQuery(bv, nil) +} + +// GetFetchTableAndViewsQuery gets the fetch query to run for getting the listed tables and views. If no table names are provided, then all the tables and views are fetched. +func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { + if len(tableNames) == 0 { + parsedQuery, err := generateFullQuery(fetchTablesAndViews) + if err != nil { + return "", err + } + return parsedQuery.Query, nil + } + + tablesBV, err := sqltypes.BuildBindVariable(tableNames) + if err != nil { + return "", err + } + bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} + + parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews) + if err != nil { + return "", err + } + return parsedQuery.GenerateQuery(bv, nil) +} diff --git a/go/vt/vttablet/tabletserver/schema/db_test.go b/go/vt/vttablet/tabletserver/schema/db_test.go new file mode 100644 index 00000000000..44a3fd0c687 --- /dev/null +++ b/go/vt/vttablet/tabletserver/schema/db_test.go @@ -0,0 +1,982 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/maps2" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" +) + +var ( + tablesBV, _ = sqltypes.BuildBindVariable([]string{"t1", "lead"}) +) + +func TestGenerateFullQuery(t *testing.T) { + tests := []struct { + name string + query string + bv map[string]*querypb.BindVariable + wantQuery string + wantErr string + }{ + { + name: "No bind variables", + query: "select TABLE_NAME, CREATE_TIME from `tables`", + }, { + name: "List bind variables", + query: "DELETE FROM %s.`tables` WHERE TABLE_SCHEMA = database() AND TABLE_NAME IN ::tableNames", + bv: map[string]*querypb.BindVariable{ + "tableNames": tablesBV, + }, + wantQuery: "delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t1', 'lead')", + }, { + name: "Multiple bind variables", + query: "INSERT INTO %s.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), :table_name, :create_statement, :create_time)", + bv: map[string]*querypb.BindVariable{ + "table_name": sqltypes.StringBindVariable("lead"), + "create_statement": sqltypes.StringBindVariable("create table `lead`"), + "create_time": sqltypes.Int64BindVariable(1), + }, + wantQuery: "insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 'lead', 'create table `lead`', 1)", + }, { + name: "parser error", + query: "insert syntax error", + wantErr: "syntax error at position 20 near 'error'", + }, { + name: "Multiple %v replacements", + query: fetchTablesAndViews, + wantQuery: "select table_name, create_statement from _vt.`tables` where table_schema = database() union select table_name, create_statement from _vt.views where table_schema = database()", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.wantQuery == "" { + tt.wantQuery = tt.query + } + + got, err := generateFullQuery(tt.query) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + return + } + require.NoError(t, err) + finalQuery, err := got.GenerateQuery(tt.bv, nil) + require.NoError(t, err) + require.Equal(t, tt.wantQuery, finalQuery) + }) + } +} + +func TestGetCreateStatement(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + // Success view + createStatement := "CREATE ALGORITHM=UNDEFINED DEFINER=`msandbox`@`localhost` SQL SECURITY DEFINER VIEW `lead` AS select `area`.`id` AS `id` from `area`" + db.AddQuery("show create table `lead`", sqltypes.MakeTestResult( + sqltypes.MakeTestFields(" View | Create View | character_set_client | collation_connection", "varchar|varchar|varchar|varchar"), + fmt.Sprintf("lead|%v|utf8mb4|utf8mb4_0900_ai_ci", createStatement), + )) + got, err := getCreateStatement(context.Background(), conn, "`lead`") + require.NoError(t, err) + require.Equal(t, createStatement, got) + require.NoError(t, db.LastError()) + + // Success table + createStatement = "CREATE TABLE `area` (\n `id` int NOT NULL,\n `name` varchar(30) DEFAULT NULL,\n `zipcode` int DEFAULT NULL,\n `country` int DEFAULT NULL,\n `x` varchar(30) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_as_cs DEFAULT NULL,\n PRIMARY KEY (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci" + db.AddQuery("show create table area", sqltypes.MakeTestResult( + sqltypes.MakeTestFields(" Table | Create Table", "varchar|varchar"), + fmt.Sprintf("area|%v", createStatement), + )) + got, err = getCreateStatement(context.Background(), conn, "area") + require.NoError(t, err) + require.Equal(t, createStatement, got) + require.NoError(t, db.LastError()) + + // Failure + errMessage := "ERROR 1146 (42S02): Table 'ks.v1' doesn't exist" + db.AddRejectedQuery("show create table v1", errors.New(errMessage)) + got, err = getCreateStatement(context.Background(), conn, "v1") + require.ErrorContains(t, err, errMessage) + require.Equal(t, "", got) +} + +func TestGetChangedViewNames(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + // Success + query := fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()) + db.AddQuery(query, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name", "varchar"), + "lead", + "v1", + "v2", + )) + got, err := getChangedViewNames(context.Background(), conn, true) + require.NoError(t, err) + require.Len(t, got, 3) + require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "v2", "lead"}) + require.NoError(t, db.LastError()) + + // Not serving primary + got, err = getChangedViewNames(context.Background(), conn, false) + require.NoError(t, err) + require.Len(t, got, 0) + require.NoError(t, db.LastError()) + + // Failure + errMessage := "ERROR 1146 (42S02): Table '_vt.views' doesn't exist" + db.AddRejectedQuery(query, errors.New(errMessage)) + got, err = getChangedViewNames(context.Background(), conn, true) + require.ErrorContains(t, err, errMessage) + require.Nil(t, got) +} + +func TestGetViewDefinition(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + viewsBV, err := sqltypes.BuildBindVariable([]string{"v1", "lead"}) + require.NoError(t, err) + bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} + + // Success + query := "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v1', 'lead')" + db.AddQuery(query, sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar"), + "v1|create_view_v1", + "lead|create_view_lead", + )) + got, err := collectGetViewDefinitions(conn, bv) + require.NoError(t, err) + require.Len(t, got, 2) + require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "lead"}) + require.Equal(t, "create_view_v1", got["v1"]) + require.Equal(t, "create_view_lead", got["lead"]) + require.NoError(t, db.LastError()) + + // Failure + errMessage := "some error in MySQL" + db.AddRejectedQuery(query, errors.New(errMessage)) + got, err = collectGetViewDefinitions(conn, bv) + require.ErrorContains(t, err, errMessage) + require.Len(t, got, 0) + + // Failure empty bv + bv = nil + got, err = collectGetViewDefinitions(conn, bv) + require.EqualError(t, err, "missing bind var viewNames") + require.Len(t, got, 0) +} + +func collectGetViewDefinitions(conn *connpool.DBConn, bv map[string]*querypb.BindVariable) (map[string]string, error) { + viewDefinitions := make(map[string]string) + err := getViewDefinition(context.Background(), conn, bv, func(qr *sqltypes.Result) error { + for _, row := range qr.Rows { + viewDefinitions[row[0].ToString()] = row[1].ToString() + } + return nil + }, func() *sqltypes.Result { + return &sqltypes.Result{} + }, 1000) + return viewDefinitions, err +} + +func TestGetMismatchedTableNames(t *testing.T) { + queryFields := sqltypes.MakeTestFields("TABLE_NAME|CREATE_TIME", "varchar|int64") + + testCases := []struct { + name string + tables map[string]*Table + dbData *sqltypes.Result + dbError string + isServingPrimary bool + expectedTableNames []string + expectedError string + }{ + { + name: "Table create time differs", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t1|2341"), + isServingPrimary: true, + expectedTableNames: []string{"t1"}, + }, { + name: "Table got deleted", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t1|31234", + "t2|2341"), + isServingPrimary: true, + expectedTableNames: []string{"t2"}, + }, { + name: "Table got created", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t1|31234"), + isServingPrimary: true, + expectedTableNames: []string{"t2"}, + }, { + name: "Dual gets ignored", + tables: map[string]*Table{ + "dual": NewTable("dual", NoType), + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t2|31234"), + isServingPrimary: true, + expectedTableNames: []string{}, + }, { + name: "All problems", + tables: map[string]*Table{ + "dual": NewTable("dual", NoType), + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Type: NoType, + CreateTime: 31234, + }, + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t3|31234", + "t1|1342"), + isServingPrimary: true, + expectedTableNames: []string{"t1", "t2", "t3"}, + }, { + name: "Not serving primary", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbData: sqltypes.MakeTestResult(queryFields, + "t1|2341"), + isServingPrimary: false, + expectedTableNames: []string{}, + }, { + name: "Error in query", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 31234, + }, + }, + dbError: "some error in MySQL", + dbData: nil, + isServingPrimary: true, + expectedError: "some error in MySQL", + }, + } + + query := fmt.Sprintf(readTableCreateTimes, sidecar.GetIdentifier()) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + if tc.dbError != "" { + db.AddRejectedQuery(query, errors.New(tc.dbError)) + } else { + db.AddQuery(query, tc.dbData) + } + se := &Engine{ + tables: tc.tables, + } + mismatchedTableNames, err := se.getMismatchedTableNames(context.Background(), conn, tc.isServingPrimary) + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + } else { + require.ElementsMatch(t, maps2.Keys(mismatchedTableNames), tc.expectedTableNames) + require.NoError(t, db.LastError()) + } + }) + } +} + +func TestReloadTablesInDB(t *testing.T) { + showCreateTableFields := sqltypes.MakeTestFields("Table | Create Table", "varchar|varchar") + errMessage := "some error in MySQL" + testCases := []struct { + name string + tablesToReload []*Table + tablesToDelete []string + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + expectedError string + }{ + { + name: "Only tables to delete", + tablesToDelete: []string{"t1", "lead"}, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, + }, + }, { + name: "Only tables to reload", + tablesToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("lead"), + Type: NoType, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_table_lead"), + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": {}, + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, + }, + }, { + name: "Reload and Delete", + tablesToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("lead"), + Type: NoType, + CreateTime: 1234, + }, + }, + tablesToDelete: []string{"t2", "from"}, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t2', 'from', 't1', 'lead')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_table_lead"), + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": {}, + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, + }, + }, { + name: "Error In Insert", + tablesToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + }, + queriesToReject: map[string]error{ + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": errors.New(errMessage), + }, + expectedError: errMessage, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + // Add queries with the expected results and errors. + for query, result := range tc.expectedQueries { + db.AddQuery(query, result) + } + for query, errorToThrow := range tc.queriesToReject { + db.AddRejectedQuery(query, errorToThrow) + } + + err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete) + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + }) + } +} + +func TestReloadViewsInDB(t *testing.T) { + showCreateTableFields := sqltypes.MakeTestFields(" View | Create View | character_set_client | collation_connection", "varchar|varchar|varchar|varchar") + getViewDefinitionsFields := sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar") + errMessage := "some error in MySQL" + testCases := []struct { + name string + viewsToReload []*Table + viewsToDelete []string + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + expectedError string + }{ + { + name: "Only views to delete", + viewsToDelete: []string{"v1", "lead"}, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, + }, + }, { + name: "Only views to reload", + viewsToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("lead"), + Type: View, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v1', 'lead')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "lead|select_lead", + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateTableFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_view_lead|utf8mb4|utf8mb4_0900_ai_ci"), + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": {}, + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, + }, + }, { + name: "Reload and delete", + viewsToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("lead"), + Type: View, + CreateTime: 1234, + }, + }, + viewsToDelete: []string{"v2", "from"}, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "lead|select_lead", + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateTableFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_view_lead|utf8mb4|utf8mb4_0900_ai_ci"), + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": {}, + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, + }, + }, { + name: "Error In Insert", + viewsToReload: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v1')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateTableFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + }, + queriesToReject: map[string]error{ + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": errors.New(errMessage), + }, + expectedError: errMessage, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + // Add queries with the expected results and errors. + for query, result := range tc.expectedQueries { + db.AddQuery(query, result) + } + for query, errorToThrow := range tc.queriesToReject { + db.AddRejectedQuery(query, errorToThrow) + } + + err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete) + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + }) + } +} + +func TestReloadDataInDB(t *testing.T) { + showCreateViewFields := sqltypes.MakeTestFields(" View | Create View | character_set_client | collation_connection", "varchar|varchar|varchar|varchar") + showCreateTableFields := sqltypes.MakeTestFields("Table | Create Table", "varchar|varchar") + getViewDefinitionsFields := sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar") + errMessage := "some error in MySQL" + testCases := []struct { + name string + altered []*Table + created []*Table + dropped []*Table + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + expectedError string + }{ + { + name: "Only views to delete", + dropped: []*Table{ + NewTable("v1", View), + NewTable("lead", View), + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, + }, + }, { + name: "Only views to reload", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, + }, + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("lead"), + Type: View, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v1', 'lead')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "lead|select_lead", + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateViewFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateViewFields, + "lead|create_view_lead|utf8mb4|utf8mb4_0900_ai_ci"), + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": {}, + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, + }, + }, { + name: "Reload and delete views", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, + }, + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("lead"), + Type: View, + CreateTime: 1234, + }, + }, + dropped: []*Table{ + NewTable("v2", View), + NewTable("from", View), + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "lead|select_lead", + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateViewFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateViewFields, + "lead|create_view_lead|utf8mb4|utf8mb4_0900_ai_ci"), + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": {}, + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, + }, + }, { + name: "Error In Inserting View Data", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v1')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v1')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateViewFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + }, + queriesToReject: map[string]error{ + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": errors.New(errMessage), + }, + expectedError: errMessage, + }, { + name: "Only tables to delete", + dropped: []*Table{ + NewTable("t1", NoType), + NewTable("lead", NoType), + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, + }, + }, { + name: "Only tables to reload", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, + }, + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("lead"), + Type: NoType, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_table_lead"), + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": {}, + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, + }, + }, { + name: "Reload and delete tables", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, + }, + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("lead"), + Type: NoType, + CreateTime: 1234, + }, + }, + dropped: []*Table{ + NewTable("t2", NoType), + NewTable("from", NoType), + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t2', 'from', 't1', 'lead')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateTableFields, + "lead|create_table_lead"), + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": {}, + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, + }, + }, { + name: "Error In Inserting Table Data", + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t1')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + }, + queriesToReject: map[string]error{ + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": errors.New(errMessage), + }, + expectedError: errMessage, + }, { + name: "Reload and delete all", + created: []*Table{ + { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 1234, + }, + }, + altered: []*Table{ + { + Name: sqlparser.NewIdentifierCS("lead"), + Type: View, + CreateTime: 1234, + }, { + Name: sqlparser.NewIdentifierCS("where"), + Type: NoType, + CreateTime: 1234, + }, + }, + dropped: []*Table{ + NewTable("v2", View), + NewTable("from", View), + NewTable("t2", NoType), + }, + expectedQueries: map[string]*sqltypes.Result{ + "begin": {}, + "commit": {}, + "rollback": {}, + "delete from _vt.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": {}, + "select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v2', 'from', 'v1', 'lead')": sqltypes.MakeTestResult( + getViewDefinitionsFields, + "lead|select_lead", + "v1|select_v1"), + "show create table v1": sqltypes.MakeTestResult(showCreateViewFields, + "v1|create_view_v1|utf8mb4|utf8mb4_0900_ai_ci"), + "show create table `lead`": sqltypes.MakeTestResult(showCreateViewFields, + "lead|create_view_lead|utf8mb4|utf8mb4_0900_ai_ci"), + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'v1', 'create_view_v1', 'select_v1')": {}, + "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, + "delete from _vt.`tables` where table_schema = database() and table_name in ('t2', 't1', 'where')": {}, + "show create table t1": sqltypes.MakeTestResult(showCreateTableFields, + "t1|create_table_t1"), + "show create table `where`": sqltypes.MakeTestResult(showCreateTableFields, + "where|create_table_where"), + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 't1', 'create_table_t1', 1234)": {}, + "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'where', 'create_table_where', 1234)": {}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + // Add queries with the expected results and errors. + for query, result := range tc.expectedQueries { + db.AddQuery(query, result) + } + for query, errorToThrow := range tc.queriesToReject { + db.AddRejectedQuery(query, errorToThrow) + } + + err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped) + if tc.expectedError != "" { + require.ErrorContains(t, err, tc.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + }) + } +} + +// TestGetFetchViewQuery tests the functionality for getting the fetch query to retrieve views. +func TestGetFetchViewQuery(t *testing.T) { + testcases := []struct { + name string + viewNames []string + expectedQuery string + }{ + { + name: "No views provided", + viewNames: []string{}, + expectedQuery: "select table_name, create_statement from _vt.views where table_schema = database()", + }, { + name: "Few views provided", + viewNames: []string{"v1", "v2", "lead"}, + expectedQuery: "select table_name, create_statement from _vt.views where table_schema = database() and table_name in ('v1', 'v2', 'lead')", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + query, err := GetFetchViewQuery(testcase.viewNames) + require.NoError(t, err) + require.Equal(t, testcase.expectedQuery, query) + }) + } +} + +// TestGetFetchTableQuery tests the functionality for getting the fetch query to retrieve tables. +func TestGetFetchTableQuery(t *testing.T) { + testcases := []struct { + name string + tableNames []string + expectedQuery string + }{ + { + name: "No tables provided", + tableNames: []string{}, + expectedQuery: "select table_name, create_statement from _vt.`tables` where table_schema = database()", + }, { + name: "Few tables provided", + tableNames: []string{"v1", "v2", "lead"}, + expectedQuery: "select table_name, create_statement from _vt.`tables` where table_schema = database() and table_name in ('v1', 'v2', 'lead')", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + query, err := GetFetchTableQuery(testcase.tableNames) + require.NoError(t, err) + require.Equal(t, testcase.expectedQuery, query) + }) + } +} + +// TestGetFetchTableAndViewsQuery tests the functionality for getting the fetch query to retrieve tables and views. +func TestGetFetchTableAndViewsQuery(t *testing.T) { + testcases := []struct { + name string + tableNames []string + expectedQuery string + }{ + { + name: "No tables provided", + tableNames: []string{}, + expectedQuery: "select table_name, create_statement from _vt.`tables` where table_schema = database() union select table_name, create_statement from _vt.views where table_schema = database()", + }, { + name: "Few tables provided", + tableNames: []string{"t1", "t2", "v1", "v2", "lead"}, + expectedQuery: "select table_name, create_statement from _vt.`tables` where table_schema = database() and table_name in ('t1', 't2', 'v1', 'v2', 'lead') union select table_name, create_statement from _vt.views where table_schema = database() and table_name in ('t1', 't2', 'v1', 'v2', 'lead')", + }, + } + + for _, testcase := range testcases { + t.Run(testcase.name, func(t *testing.T) { + query, err := GetFetchTableAndViewsQuery(testcase.tableNames) + require.NoError(t, err) + require.Equal(t, testcase.expectedQuery, query) + }) + } +} diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 2ebdaa3505d..1ef6d071b7c 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -22,25 +22,30 @@ import ( "encoding/json" "fmt" "net/http" + "strings" "sync" "time" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/acl" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -51,7 +56,7 @@ import ( const maxTableCount = 10000 -type notifier func(full map[string]*Table, created, altered, dropped []string) +type notifier func(full map[string]*Table, created, altered, dropped []*Table) // Engine stores the schema info and performs operations that // keep itself up-to-date. @@ -64,19 +69,24 @@ type Engine struct { isOpen bool tables map[string]*Table lastChange int64 - reloadTime time.Duration - //the position at which the schema was last loaded. it is only used in conjunction with ReloadAt - reloadAtPos mysql.Position + // the position at which the schema was last loaded. it is only used in conjunction with ReloadAt + reloadAtPos replication.Position notifierMu sync.Mutex notifiers map[string]notifier + // isServingPrimary stores if this tablet is currently the serving primary or not. + isServingPrimary bool + // schemaCopy stores if the user has requested signals on schema changes. If they have, then we + // also track the underlying schema and make a copy of it in our MySQL instance. + schemaCopy bool // SkipMetaCheck skips the metadata about the database and table information SkipMetaCheck bool historian *historian - conns *connpool.Pool - ticks *timer.Timer + conns *connpool.Pool + ticks *timer.Timer + reloadTimeout time.Duration // dbCreationFailed is for preventing log spam. dbCreationFailed bool @@ -98,15 +108,15 @@ func NewEngine(env tabletenv.Env) *Engine { Size: 3, IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, }), - ticks: timer.NewTimer(reloadTime), - reloadTime: reloadTime, + ticks: timer.NewTimer(reloadTime), } + se.schemaCopy = env.Config().SignalWhenSchemaChange _ = env.Exporter().NewGaugeDurationFunc("SchemaReloadTime", "vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.", se.ticks.Interval) se.tableFileSizeGauge = env.Exporter().NewGaugesWithSingleLabel("TableFileSize", "tracks table file size", "Table") se.tableAllocatedSizeGauge = env.Exporter().NewGaugesWithSingleLabel("TableAllocatedSize", "tracks table allocated size", "Table") se.innoDbReadRowsCounter = env.Exporter().NewCounter("InnodbRowsRead", "number of rows read by mysql") se.SchemaReloadTimings = env.Exporter().NewTimings("SchemaReload", "time taken to reload the schema", "type") - + se.reloadTimeout = env.Config().SchemaChangeReloadTimeout env.Exporter().HandleFunc("/debug/schema", se.handleDebugSchema) env.Exporter().HandleFunc("/schemaz", func(w http.ResponseWriter, r *http.Request) { // Ensure schema engine is Open. If vttablet came up in a non_serving role, @@ -119,7 +129,7 @@ func NewEngine(env tabletenv.Env) *Engine { schemazHandler(se.GetSchema(), w, r) }) - se.historian = newHistorian(env.Config().TrackSchemaVersions, se.conns) + se.historian = newHistorian(env.Config().TrackSchemaVersions, env.Config().SchemaVersionMaxAgeSeconds, se.conns) return se } @@ -128,11 +138,13 @@ func (se *Engine) InitDBConfig(cp dbconfigs.Connector) { se.cp = cp } -// syncSidecarDB is called either the first time a primary starts, or on subsequent loads, to possibly upgrade to a -// new Vitess version. This is the only entry point into the sidecardb module to get the _vt database to the desired -// schema for the running Vitess version. -// There is some extra logging in here which can be removed in a future version (>v16) once the new schema init -// functionality is stable. +// syncSidecarDB is called either the first time a primary starts, or +// on subsequent loads, to possibly upgrade to a new Vitess version. +// This is the only entry point into the sidecardb module to get the +// sidecar database to the desired schema for the running Vitess +// version. There is some extra logging in here which can be removed +// in a future version (>v16) once the new schema init functionality +// is stable. func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnection) error { log.Infof("In syncSidecarDB") defer func(start time.Time) { @@ -141,7 +153,7 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti var exec sidecardb.Exec = func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { if useDB { - _, err := conn.ExecuteFetch(sidecardb.UseSidecarDatabaseQuery, maxRows, false) + _, err := conn.ExecuteFetch(sqlparser.BuildParsedQuery("use %s", sidecar.GetIdentifier()).Query, maxRows, false) if err != nil { return nil, err } @@ -170,7 +182,7 @@ func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType) error conn, err := dbconnpool.NewDBConnection(ctx, se.env.Config().DB.AllPrivsWithDB()) if err == nil { se.dbCreationFailed = false - // upgrade _vt if required, for a tablet with an existing database + // upgrade sidecar db if required, for a tablet with an existing database if tabletType == topodatapb.TabletType_PRIMARY { if err := se.syncSidecarDB(ctx, conn); err != nil { conn.Close() @@ -183,7 +195,7 @@ func (se *Engine) EnsureConnectionAndDB(tabletType topodatapb.TabletType) error if tabletType != topodatapb.TabletType_PRIMARY { return err } - if merr, isSQLErr := err.(*mysql.SQLError); !isSQLErr || merr.Num != mysql.ERBadDb { + if merr, isSQLErr := err.(*sqlerror.SQLError); !isSQLErr || merr.Num != sqlerror.ERBadDb { return err } @@ -239,7 +251,7 @@ func (se *Engine) Open() error { }() se.tables = map[string]*Table{ - "dual": NewTable("dual"), + "dual": NewTable("dual", NoType), } se.notifiers = make(map[string]notifier) @@ -316,16 +328,22 @@ func (se *Engine) MakeNonPrimary() { // This function is tested through endtoend test. se.mu.Lock() defer se.mu.Unlock() + se.isServingPrimary = false for _, t := range se.tables { if t.SequenceInfo != nil { - t.SequenceInfo.Lock() - t.SequenceInfo.NextVal = 0 - t.SequenceInfo.LastVal = 0 - t.SequenceInfo.Unlock() + t.SequenceInfo.Reset() } } } +// MakePrimary tells the schema engine that the current tablet is now the primary, +// so it can read and write to the MySQL instance for schema-tracking. +func (se *Engine) MakePrimary(serving bool) { + se.mu.Lock() + defer se.mu.Unlock() + se.isServingPrimary = serving +} + // EnableHistorian forces tracking to be on or off. // Only used for testing. func (se *Engine) EnableHistorian(enabled bool) error { @@ -337,14 +355,14 @@ func (se *Engine) EnableHistorian(enabled bool) error { // The includeStats argument controls whether table size statistics should be // emitted, as they can be expensive to calculate for a large number of tables func (se *Engine) Reload(ctx context.Context) error { - return se.ReloadAt(ctx, mysql.Position{}) + return se.ReloadAt(ctx, replication.Position{}) } // ReloadAt reloads the schema info from the db. // Any tables that have changed since the last load are updated. // It maintains the position at which the schema was reloaded and if the same position is provided // (say by multiple vstreams) it returns the cached schema. In case of a newer or empty pos it always reloads the schema -func (se *Engine) ReloadAt(ctx context.Context, pos mysql.Position) error { +func (se *Engine) ReloadAt(ctx context.Context, pos replication.Position) error { return se.ReloadAtEx(ctx, pos, true) } @@ -354,7 +372,7 @@ func (se *Engine) ReloadAt(ctx context.Context, pos mysql.Position) error { // (say by multiple vstreams) it returns the cached schema. In case of a newer or empty pos it always reloads the schema // The includeStats argument controls whether table size statistics should be // emitted, as they can be expensive to calculate for a large number of tables -func (se *Engine) ReloadAtEx(ctx context.Context, pos mysql.Position, includeStats bool) error { +func (se *Engine) ReloadAtEx(ctx context.Context, pos replication.Position, includeStats bool) error { se.mu.Lock() defer se.mu.Unlock() if !se.isOpen { @@ -362,7 +380,7 @@ func (se *Engine) ReloadAtEx(ctx context.Context, pos mysql.Position, includeSta return nil } if !pos.IsZero() && se.reloadAtPos.AtLeast(pos) { - log.V(2).Infof("ReloadAtEx: found cached schema at %s", mysql.EncodePosition(pos)) + log.V(2).Infof("ReloadAtEx: found cached schema at %s", replication.EncodePosition(pos)) return nil } if err := se.reload(ctx, includeStats); err != nil { @@ -380,6 +398,15 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { se.SchemaReloadTimings.Record("SchemaReload", start) }() + // if this flag is set, then we don't need table meta information + if se.SkipMetaCheck { + return nil + } + + // add a timeout to prevent unbounded waits + ctx, cancel := context.WithTimeout(ctx, se.reloadTimeout) + defer cancel() + conn, err := se.conns.Get(ctx, nil) if err != nil { return err @@ -391,20 +418,26 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { if err != nil { return err } - // if this flag is set, then we don't need table meta information - if se.SkipMetaCheck { - return nil + + tableData, err := getTableData(ctx, conn, includeStats) + if err != nil { + return vterrors.Wrapf(err, "in Engine.reload(), reading tables") } + // On the primary tablet, we also check the data we have stored in our schema tables to see what all needs reloading. + shouldUseDatabase := se.isServingPrimary && se.schemaCopy - var showTablesQuery string - if includeStats { - showTablesQuery = conn.BaseShowTablesWithSizes() - } else { - showTablesQuery = conn.BaseShowTables() + // changedViews are the views that have changed. We can't use the same createTime logic for views because, MySQL + // doesn't update the create_time field for views when they are altered. This is annoying, but something we have to work around. + changedViews, err := getChangedViewNames(ctx, conn, shouldUseDatabase) + if err != nil { + return err } - tableData, err := conn.Exec(ctx, showTablesQuery, maxTableCount, false) + // mismatchTables stores the tables whose createTime in our cache doesn't match the createTime stored in the database. + // This can happen if a primary crashed right after a DML succeeded, before it could reload its state. If all the replicas + // are able to reload their cache before one of them is promoted, then the database information would be out of sync. + mismatchTables, err := se.getMismatchedTableNames(ctx, conn, shouldUseDatabase) if err != nil { - return vterrors.Wrapf(err, "in Engine.reload(), reading tables") + return err } err = se.updateInnoDBRowsRead(ctx, conn) @@ -418,16 +451,16 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { // changedTables keeps track of tables that have changed so we can reload their pk info. changedTables := make(map[string]*Table) // created and altered contain the names of created and altered tables for broadcast. - var created, altered []string + var created, altered []*Table for _, row := range tableData.Rows { tableName := row[0].ToString() curTables[tableName] = true - createTime, _ := evalengine.ToInt64(row[2]) + createTime, _ := row[2].ToCastInt64() var fileSize, allocatedSize uint64 if includeStats { - fileSize, _ = evalengine.ToUint64(row[4]) - allocatedSize, _ = evalengine.ToUint64(row[5]) + fileSize, _ = row[4].ToCastUint64() + allocatedSize, _ = row[5].ToCastUint64() // publish the size metrics se.tableFileSizeGauge.Set(tableName, int64(fileSize)) se.tableAllocatedSizeGauge.Set(tableName, int64(allocatedSize)) @@ -443,8 +476,18 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { // renamed to the table being altered. `se.lastChange` is updated every time the schema is reloaded (default: 30m). // Online DDL can take hours. So it is possible that the `create_time` of the temporary table is before se.lastChange. Hence, // #1 will not identify the renamed table as a changed one. + // + // 3. A table's create_time in our database doesn't match the create_time in the cache. This can happen if a primary crashed right after a DML succeeded, + // before it could reload its state. If all the replicas are able to reload their cache before one of them is promoted, + // then the database information would be out of sync. We check this by consulting the mismatchTables map. + // + // 4. A view's definition has changed. We can't use the same createTime logic for views because, MySQL + // doesn't update the create_time field for views when they are altered. This is annoying, but something we have to work around. + // We check this by consulting the changedViews map. tbl, isInTablesMap := se.tables[tableName] - if isInTablesMap && createTime == tbl.CreateTime && createTime < se.lastChange { + _, isInChangedViewMap := changedViews[tableName] + _, isInMismatchTableMap := mismatchTables[tableName] + if isInTablesMap && createTime == tbl.CreateTime && createTime < se.lastChange && !isInChangedViewMap && !isInMismatchTableMap { if includeStats { tbl.FileSize = fileSize tbl.AllocatedSize = allocatedSize @@ -453,8 +496,14 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { } log.V(2).Infof("Reading schema for table: %s", tableName) - table, err := LoadTable(conn, se.cp.DBName(), tableName, row[3].ToString()) + tableType := row[1].String() + table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString()) if err != nil { + if isView := strings.Contains(tableType, tmutils.TableView); isView { + log.Warningf("Failed reading schema for the view: %s, error: %v", tableName, err) + continue + } + // Non recoverable error: rec.RecordError(vterrors.Wrapf(err, "in Engine.reload(), reading table %s", tableName)) continue } @@ -465,45 +514,91 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { table.CreateTime = createTime changedTables[tableName] = table if isInTablesMap { - altered = append(altered, tableName) + altered = append(altered, table) } else { - created = append(created, tableName) + created = append(created, table) } } if rec.HasErrors() { return rec.Error() } - // Compute and handle dropped tables. - var dropped []string - for tableName := range se.tables { - if !curTables[tableName] { - dropped = append(dropped, tableName) - delete(se.tables, tableName) - // We can't actually delete the label from the stats, but we can set it to 0. - // Many monitoring tools will drop zero-valued metrics. - se.tableFileSizeGauge.Reset(tableName) - se.tableAllocatedSizeGauge.Reset(tableName) - } - } + dropped := se.getDroppedTables(curTables, changedViews, mismatchTables) // Populate PKColumns for changed tables. if err := se.populatePrimaryKeys(ctx, conn, changedTables); err != nil { return err } + // If this tablet is the primary and schema tracking is required, we should reload the information in our database. + if shouldUseDatabase { + // If reloadDataInDB succeeds, then we don't want to prevent sending the broadcast notification. + // So, we do this step in the end when we can receive no more errors that fail the reload operation. + err = reloadDataInDB(ctx, conn, altered, created, dropped) + if err != nil { + log.Errorf("error in updating schema information in Engine.reload() - %v", err) + } + } + // Update se.tables for k, t := range changedTables { se.tables[k] = t } se.lastChange = curTime if len(created) > 0 || len(altered) > 0 || len(dropped) > 0 { - log.Infof("schema engine created %v, altered %v, dropped %v", created, altered, dropped) + log.Infof("schema engine created %v, altered %v, dropped %v", extractNamesFromTablesList(created), extractNamesFromTablesList(altered), extractNamesFromTablesList(dropped)) } se.broadcast(created, altered, dropped) return nil } +func (se *Engine) getDroppedTables(curTables map[string]bool, changedViews map[string]any, mismatchTables map[string]any) []*Table { + // Compute and handle dropped tables. + dropped := make(map[string]*Table) + for tableName, table := range se.tables { + if !curTables[tableName] { + dropped[tableName] = table + delete(se.tables, tableName) + // We can't actually delete the label from the stats, but we can set it to 0. + // Many monitoring tools will drop zero-valued metrics. + se.tableFileSizeGauge.Reset(tableName) + se.tableAllocatedSizeGauge.Reset(tableName) + } + } + + // If we have a view that has changed, but doesn't exist in the current list of tables, + // then it was dropped before, and we were unable to update our database. So, we need to signal its + // drop again. + for viewName := range changedViews { + _, alreadyExists := dropped[viewName] + if !curTables[viewName] && !alreadyExists { + dropped[viewName] = NewTable(viewName, View) + } + } + + // If we have a table that has a mismatch, but doesn't exist in the current list of tables, + // then it was dropped before, and we were unable to update our database. So, we need to signal its + // drop again. + for tableName := range mismatchTables { + _, alreadyExists := dropped[tableName] + if !curTables[tableName] && !alreadyExists { + dropped[tableName] = NewTable(tableName, NoType) + } + } + + return maps2.Values(dropped) +} + +func getTableData(ctx context.Context, conn *connpool.DBConn, includeStats bool) (*sqltypes.Result, error) { + var showTablesQuery string + if includeStats { + showTablesQuery = conn.BaseShowTablesWithSizes() + } else { + showTablesQuery = conn.BaseShowTables() + } + return conn.Exec(ctx, showTablesQuery, maxTableCount, false) +} + func (se *Engine) updateInnoDBRowsRead(ctx context.Context, conn *connpool.DBConn) error { readRowsData, err := conn.Exec(ctx, mysql.ShowRowsRead, 10, false) if err != nil { @@ -511,7 +606,7 @@ func (se *Engine) updateInnoDBRowsRead(ctx context.Context, conn *connpool.DBCon } if len(readRowsData.Rows) == 1 && len(readRowsData.Rows[0]) == 2 { - value, err := evalengine.ToInt64(readRowsData.Rows[0][1]) + value, err := readRowsData.Rows[0][1].ToCastInt64() if err != nil { return err } @@ -532,7 +627,7 @@ func (se *Engine) mysqlTime(ctx context.Context, conn *connpool.DBConn) (int64, if len(tm.Rows) != 1 || len(tm.Rows[0]) != 1 || tm.Rows[0][0].IsNull() { return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "unexpected result for MySQL time: %+v", tm.Rows) } - t, err := evalengine.ToInt64(tm.Rows[0][0]) + t, err := tm.Rows[0][0].ToCastInt64() if err != nil { return 0, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not parse time %v: %v", tm, err) } @@ -561,8 +656,9 @@ func (se *Engine) populatePrimaryKeys(ctx context.Context, conn *connpool.DBConn return nil } -// RegisterVersionEvent is called by the vstream when it encounters a version event (an insert into _vt.schema_tracking) -// It triggers the historian to load the newer rows from the database to update its cache +// RegisterVersionEvent is called by the vstream when it encounters a version event (an +// insert into the schema_tracking table). It triggers the historian to load the newer +// rows from the database to update its cache. func (se *Engine) RegisterVersionEvent() error { return se.historian.RegisterVersionEvent() } @@ -596,7 +692,7 @@ func (se *Engine) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string) // It also causes an immediate notification to the caller. The notified // function must not change the map or its contents. The only exception // is the sequence table where the values can be changed using the lock. -func (se *Engine) RegisterNotifier(name string, f notifier) { +func (se *Engine) RegisterNotifier(name string, f notifier, runNotifier bool) { if !se.isOpen { return } @@ -605,11 +701,13 @@ func (se *Engine) RegisterNotifier(name string, f notifier) { defer se.notifierMu.Unlock() se.notifiers[name] = f - var created []string - for tableName := range se.tables { - created = append(created, tableName) + var created []*Table + for _, table := range se.tables { + created = append(created, table) + } + if runNotifier { + f(se.tables, created, nil, nil) } - f(se.tables, created, nil, nil) } // UnregisterNotifier unregisters the notifier function. @@ -629,7 +727,7 @@ func (se *Engine) UnregisterNotifier(name string) { } // broadcast must be called while holding a lock on se.mu. -func (se *Engine) broadcast(created, altered, dropped []string) { +func (se *Engine) broadcast(created, altered, dropped []*Table) { if !se.isOpen { return } @@ -664,6 +762,32 @@ func (se *Engine) GetSchema() map[string]*Table { return tables } +// MarshalMinimalSchema returns a protobuf encoded binlogdata.MinimalSchema +func (se *Engine) MarshalMinimalSchema() ([]byte, error) { + se.mu.Lock() + defer se.mu.Unlock() + dbSchema := &binlogdatapb.MinimalSchema{ + Tables: make([]*binlogdatapb.MinimalTable, 0, len(se.tables)), + } + for _, table := range se.tables { + dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table)) + } + return dbSchema.MarshalVT() +} + +func newMinimalTable(st *Table) *binlogdatapb.MinimalTable { + table := &binlogdatapb.MinimalTable{ + Name: st.Name.String(), + Fields: st.Fields, + } + pkc := make([]int64, len(st.PKColumns)) + for i, pk := range st.PKColumns { + pkc[i] = int64(pk) + } + table.PKColumns = pkc + return table +} + // GetConnection returns a connection from the pool func (se *Engine) GetConnection(ctx context.Context) (*connpool.DBConn, error) { return se.conns.Get(ctx, nil) @@ -706,7 +830,7 @@ func NewEngineForTests() *Engine { se := &Engine{ isOpen: true, tables: make(map[string]*Table), - historian: newHistorian(false, nil), + historian: newHistorian(false, 0, nil), } return se } @@ -717,3 +841,31 @@ func (se *Engine) SetTableForTests(table *Table) { defer se.mu.Unlock() se.tables[table.Name.String()] = table } + +func (se *Engine) GetDBConnector() dbconfigs.Connector { + return se.cp +} + +func extractNamesFromTablesList(tables []*Table) []string { + var tableNames []string + for _, table := range tables { + tableNames = append(tableNames, table.Name.String()) + } + return tableNames +} + +func (se *Engine) ResetSequences(tables []string) error { + se.mu.Lock() + defer se.mu.Unlock() + for _, tableName := range tables { + if table, ok := se.tables[tableName]; ok { + if table.SequenceInfo != nil { + log.Infof("Resetting sequence info for table %v: %s", tableName, table.SequenceInfo) + table.SequenceInfo.Reset() + } + } else { + return vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "table %v not found in schema", tableName) + } + } + return nil +} diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index c38a63e2481..4000795d9d0 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -18,6 +18,7 @@ package schema import ( "context" + "errors" "expvar" "fmt" "net/http" @@ -27,20 +28,26 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + + "vitess.io/vitess/go/event/syslogger" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema/schematest" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - - querypb "vitess.io/vitess/go/vt/proto/query" ) const baseShowTablesPattern = `SELECT t\.table_name.*` @@ -75,7 +82,7 @@ func TestOpenAndReload(t *testing.T) { )) firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() @@ -148,21 +155,21 @@ func TestOpenAndReload(t *testing.T) { AddFakeInnoDBReadRowsResult(db, secondReadRowsValue) firstTime := true - notifier := func(full map[string]*Table, created, altered, dropped []string) { + notifier := func(full map[string]*Table, created, altered, dropped []*Table) { if firstTime { firstTime = false - sort.Strings(created) - assert.Equal(t, []string{"dual", "msg", "seq", "test_table_01", "test_table_02", "test_table_03"}, created) - assert.Equal(t, []string(nil), altered) - assert.Equal(t, []string(nil), dropped) + createTables := extractNamesFromTablesList(created) + sort.Strings(createTables) + assert.Equal(t, []string{"dual", "msg", "seq", "test_table_01", "test_table_02", "test_table_03"}, createTables) + assert.Equal(t, []*Table(nil), altered) + assert.Equal(t, []*Table(nil), dropped) } else { - assert.Equal(t, []string{"test_table_04"}, created) - assert.Equal(t, []string{"test_table_03"}, altered) - sort.Strings(dropped) - assert.Equal(t, []string{"msg"}, dropped) + assert.Equal(t, []string{"test_table_04"}, extractNamesFromTablesList(created)) + assert.Equal(t, []string{"test_table_03"}, extractNamesFromTablesList(altered)) + assert.Equal(t, []string{"msg"}, extractNamesFromTablesList(dropped)) } } - se.RegisterNotifier("test", notifier) + se.RegisterNotifier("test", notifier, true) err := se.Reload(context.Background()) require.NoError(t, err) @@ -201,14 +208,14 @@ func TestOpenAndReload(t *testing.T) { assert.Equal(t, int64(0), se.tableAllocatedSizeGauge.Counts()["msg"]) assert.Equal(t, int64(0), se.tableFileSizeGauge.Counts()["msg"]) - //ReloadAt tests - pos1, err := mysql.DecodePosition("MariaDB/0-41983-20") + // ReloadAt tests + pos1, err := replication.DecodePosition("MariaDB/0-41983-20") require.NoError(t, err) - pos2, err := mysql.DecodePosition("MariaDB/0-41983-40") + pos2, err := replication.DecodePosition("MariaDB/0-41983-40") require.NoError(t, err) se.UnregisterNotifier("test") - err = se.ReloadAt(context.Background(), mysql.Position{}) + err = se.ReloadAt(context.Background(), replication.Position{}) require.NoError(t, err) assert.Equal(t, want, se.GetSchema()) @@ -266,7 +273,7 @@ func TestReloadWithSwappedTables(t *testing.T) { firstReadRowsValue := 12 AddFakeInnoDBReadRowsResult(db, firstReadRowsValue) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10*time.Second, 10*time.Second, 0, db) se.Open() defer se.Close() want := initialSchema() @@ -416,14 +423,17 @@ func TestOpenFailedDueToExecErr(t *testing.T) { schematest.AddDefaultQueries(db) want := "injected error" db.RejectQueryPattern(baseShowTablesPattern, want) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) err := se.Open() if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("se.Open: %v, want %s", err, want) } } -func TestOpenFailedDueToTableErr(t *testing.T) { +// TestOpenFailedDueToLoadTableErr tests that schema engine load should not fail instead should log the failures. +func TestOpenFailedDueToLoadTableErr(t *testing.T) { + tl := syslogger.NewTestLogger() + defer tl.Close() db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) @@ -431,34 +441,71 @@ func TestOpenFailedDueToTableErr(t *testing.T) { Fields: mysql.BaseShowTablesFields, Rows: [][]sqltypes.Value{ mysql.BaseShowTablesRow("test_table", false, ""), + mysql.BaseShowTablesRow("test_view", true, "VIEW"), }, }) - db.MockQueriesForTable("test_table", &sqltypes.Result{ - // this will cause NewTable error, as it expects zero rows. - Fields: []*querypb.Field{ - { - Type: querypb.Type_VARCHAR, - }, - }, + // this will cause NewTable error, as it expects zero rows. + db.MockQueriesForTable("test_table", sqltypes.MakeTestResult(sqltypes.MakeTestFields("foo", "varchar"), "")) + + // adding column query for table_view + db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "test_view"), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), "")) + // rejecting the impossible query + db.AddRejectedQuery("SELECT * FROM `fakesqldb`.`test_view` WHERE 1 != 1", sqlerror.NewSQLErrorFromError(errors.New("The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)"))) + + AddFakeInnoDBReadRowsResult(db, 0) + se := newEngine(1*time.Second, 1*time.Second, 0, db) + err := se.Open() + // failed load should return an error because of test_table + assert.ErrorContains(t, err, "Row count exceeded") + + logs := tl.GetAllLogs() + logOutput := strings.Join(logs, ":::") + assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: test_view") + assert.Contains(t, logOutput, "The user specified as a definer ('root'@'%') does not exist (errno 1449) (sqlstate HY000)") +} + +// TestOpenNoErrorDueToInvalidViews tests that schema engine load does not fail instead should log the failures for the views +func TestOpenNoErrorDueToInvalidViews(t *testing.T) { + tl := syslogger.NewTestLogger() + defer tl.Close() + db := fakesqldb.New(t) + defer db.Close() + schematest.AddDefaultQueries(db) + db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, Rows: [][]sqltypes.Value{ - {sqltypes.NewVarBinary("")}, + mysql.BaseShowTablesRow("foo_view", true, "VIEW"), + mysql.BaseShowTablesRow("bar_view", true, "VIEW"), }, }) + // adding column query for table_view + db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "foo_view"), + &sqltypes.Result{}) + db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, "bar_view"), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), "col1", "col2")) + // rejecting the impossible query + db.AddRejectedQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`bar_view` WHERE 1 != 1", sqlerror.NewSQLError(sqlerror.ERWrongFieldWithGroup, sqlerror.SSClientError, "random error for table bar_view")) + AddFakeInnoDBReadRowsResult(db, 0) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) err := se.Open() - want := "Row count exceeded" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("se.Open: %v, want %s", err, want) - } + require.NoError(t, err) + + logs := tl.GetAllLogs() + logOutput := strings.Join(logs, ":::") + assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: foo_view") + assert.Contains(t, logOutput, "unable to get columns for table fakesqldb.foo_view") + assert.Contains(t, logOutput, "WARNING:Failed reading schema for the view: bar_view") + assert.Contains(t, logOutput, "random error for table bar_view") } func TestExportVars(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() expvar.Do(func(kv expvar.KeyValue) { @@ -470,7 +517,7 @@ func TestStatsURL(t *testing.T) { db := fakesqldb.New(t) defer db.Close() schematest.AddDefaultQueries(db) - se := newEngine(10, 1*time.Second, 1*time.Second, db) + se := newEngine(1*time.Second, 1*time.Second, 0, db) se.Open() defer se.Close() @@ -500,7 +547,7 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { }) AddFakeInnoDBReadRowsResult(db, 12) // Start the engine with a small reload tick - se := newEngine(10, 100*time.Millisecond, 1*time.Second, db) + se := newEngine(100*time.Millisecond, 1*time.Second, 0, db) err := se.Open() require.NoError(t, err) @@ -527,13 +574,13 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { } } -func newEngine(queryCacheSize int, reloadTime time.Duration, idleTimeout time.Duration, db *fakesqldb.DB) *Engine { +func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { config := tabletenv.NewDefaultConfig() - config.QueryCacheSize = queryCacheSize - config.SchemaReloadIntervalSeconds.Set(reloadTime) - config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout) - config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout) - config.TxPool.IdleTimeoutSeconds.Set(idleTimeout) + _ = config.SchemaReloadIntervalSeconds.Set(reloadTime.String()) + _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) + config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds se := NewEngine(tabletenv.NewEnv(config, "SchemaTest")) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) return se @@ -657,3 +704,568 @@ func AddFakeInnoDBReadRowsResult(db *fakesqldb.DB, value int) *fakesqldb.Expecte fmt.Sprintf("Innodb_rows_read|%d", value), )) } + +// TestEngineMysqlTime tests the functionality of Engine.mysqlTime function +func TestEngineMysqlTime(t *testing.T) { + tests := []struct { + name string + timeStampResult []string + timeStampErr error + wantTime int64 + wantErr string + }{ + { + name: "Success", + timeStampResult: []string{"1685115631"}, + wantTime: 1685115631, + }, { + name: "Error in result", + timeStampErr: errors.New("some error in MySQL"), + wantErr: "some error in MySQL", + }, { + name: "Error in parsing", + timeStampResult: []string{"16851r15631"}, + wantErr: "could not parse time", + }, { + name: "More than 1 result", + timeStampResult: []string{"1685115631", "3241241"}, + wantErr: "could not get MySQL time", + }, { + name: "Null result", + timeStampResult: []string{"null"}, + wantErr: "unexpected result for MySQL time", + }, + } + + query := "SELECT UNIX_TIMESTAMP()" + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + se := &Engine{} + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + if tt.timeStampErr != nil { + db.AddRejectedQuery(query, tt.timeStampErr) + } else { + db.AddQuery(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields("UNIX_TIMESTAMP", "int64"), tt.timeStampResult...)) + } + + gotTime, err := se.mysqlTime(context.Background(), conn) + if tt.wantErr != "" { + require.ErrorContains(t, err, tt.wantErr) + return + } + require.EqualValues(t, tt.wantTime, gotTime) + require.NoError(t, db.LastError()) + }) + } +} + +// TestEnginePopulatePrimaryKeys tests the functionality of Engine.populatePrimaryKeys function +func TestEnginePopulatePrimaryKeys(t *testing.T) { + tests := []struct { + name string + tables map[string]*Table + pkIndexes map[string]int + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + expectedError string + }{ + { + name: "Success", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Fields: []*querypb.Field{ + { + Name: "col1", + }, { + Name: "col2", + }, + }, + Type: NoType, + }, "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Fields: []*querypb.Field{ + { + Name: "id", + }, + }, + Type: NoType, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + mysql.BaseShowPrimary: sqltypes.MakeTestResult(mysql.ShowPrimaryFields, + "t1|col2", + "t2|id"), + }, + pkIndexes: map[string]int{ + "t1": 1, + "t2": 0, + }, + }, { + name: "Error in finding column", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Fields: []*querypb.Field{ + { + Name: "col1", + }, { + Name: "col2", + }, + }, + Type: NoType, + }, + }, + expectedQueries: map[string]*sqltypes.Result{ + mysql.BaseShowPrimary: sqltypes.MakeTestResult(mysql.ShowPrimaryFields, + "t1|col5"), + }, + expectedError: "column col5 is listed as primary key, but not present in table t1", + }, { + name: "Error in query", + tables: map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Fields: []*querypb.Field{ + { + Name: "col1", + }, { + Name: "col2", + }, + }, + Type: NoType, + }, + }, + queriesToReject: map[string]error{ + mysql.BaseShowPrimary: errors.New("some error in MySQL"), + }, + expectedError: "could not get table primary key info", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + se := &Engine{} + + for query, result := range tt.expectedQueries { + db.AddQuery(query, result) + } + for query, errToThrow := range tt.queriesToReject { + db.AddRejectedQuery(query, errToThrow) + } + + err = se.populatePrimaryKeys(context.Background(), conn, tt.tables) + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + for table, index := range tt.pkIndexes { + require.Equal(t, index, tt.tables[table].PKColumns[0]) + } + }) + } +} + +// TestEngineUpdateInnoDBRowsRead tests the functionality of Engine.updateInnoDBRowsRead function +func TestEngineUpdateInnoDBRowsRead(t *testing.T) { + showRowsReadFields := sqltypes.MakeTestFields("Variable_name|Value", "varchar|int64") + tests := []struct { + name string + innoDbReadRowsCounter int + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + expectedError string + }{ + { + name: "Success", + expectedQueries: map[string]*sqltypes.Result{ + mysql.ShowRowsRead: sqltypes.MakeTestResult(showRowsReadFields, + "Innodb_rows_read|35"), + }, + innoDbReadRowsCounter: 35, + }, { + name: "Unexpected result", + expectedQueries: map[string]*sqltypes.Result{ + mysql.ShowRowsRead: sqltypes.MakeTestResult(showRowsReadFields, + "Innodb_rows_read|35", + "Innodb_rows_read|37"), + }, + innoDbReadRowsCounter: 0, + }, { + name: "Error in query", + queriesToReject: map[string]error{ + mysql.ShowRowsRead: errors.New("some error in MySQL"), + }, + expectedError: "some error in MySQL", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + se := &Engine{} + se.innoDbReadRowsCounter = stats.NewCounter("TestEngineUpdateInnoDBRowsRead-"+tt.name, "") + + for query, result := range tt.expectedQueries { + db.AddQuery(query, result) + } + for query, errToThrow := range tt.queriesToReject { + db.AddRejectedQuery(query, errToThrow) + } + + err = se.updateInnoDBRowsRead(context.Background(), conn) + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + require.EqualValues(t, tt.innoDbReadRowsCounter, se.innoDbReadRowsCounter.Get()) + }) + } +} + +// TestEngineGetTableData tests the functionality of getTableData function +func TestEngineGetTableData(t *testing.T) { + db := fakesqldb.New(t) + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + tests := []struct { + name string + expectedQueries map[string]*sqltypes.Result + queriesToReject map[string]error + includeStats bool + expectedError string + }{ + { + name: "Success", + expectedQueries: map[string]*sqltypes.Result{ + conn.BaseShowTables(): {}, + }, + includeStats: false, + }, { + name: "Success with include stats", + expectedQueries: map[string]*sqltypes.Result{ + conn.BaseShowTablesWithSizes(): {}, + }, + includeStats: true, + }, { + name: "Error in query", + queriesToReject: map[string]error{ + conn.BaseShowTables(): errors.New("some error in MySQL"), + }, + expectedError: "some error in MySQL", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db.ClearQueryPattern() + + for query, result := range tt.expectedQueries { + db.AddQuery(query, result) + defer db.DeleteQuery(query) + } + for query, errToThrow := range tt.queriesToReject { + db.AddRejectedQuery(query, errToThrow) + defer db.DeleteRejectedQuery(query) + } + + _, err = getTableData(context.Background(), conn, tt.includeStats) + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } + require.NoError(t, err) + require.NoError(t, db.LastError()) + }) + } +} + +// TestEngineGetDroppedTables tests the functionality of Engine.getDroppedTables function +func TestEngineGetDroppedTables(t *testing.T) { + tests := []struct { + name string + tables map[string]*Table + curTables map[string]bool + changedViews map[string]any + mismatchTables map[string]any + wantDroppedTables []*Table + }{ + { + name: "No mismatched tables or changed views", + tables: map[string]*Table{ + "t1": NewTable("t1", NoType), + "t2": NewTable("t2", NoType), + "t3": NewTable("t3", NoType), + }, + curTables: map[string]bool{ + "t4": true, + "t2": true, + }, + wantDroppedTables: []*Table{ + NewTable("t1", NoType), + NewTable("t3", NoType), + }, + }, { + name: "Mismatched tables having a dropped table", + tables: map[string]*Table{ + "t1": NewTable("t1", NoType), + "t2": NewTable("t2", NoType), + "t3": NewTable("t3", NoType), + "v2": NewTable("v2", View), + }, + curTables: map[string]bool{ + "t4": true, + "t2": true, + }, + mismatchTables: map[string]any{ + "t5": true, + "v2": true, + }, + wantDroppedTables: []*Table{ + NewTable("t1", NoType), + NewTable("t3", NoType), + NewTable("t5", NoType), + NewTable("v2", View), + }, + }, { + name: "Changed views having a dropped view", + tables: map[string]*Table{ + "t1": NewTable("t1", NoType), + "t2": NewTable("t2", NoType), + "t3": NewTable("t3", NoType), + "v2": NewTable("v2", NoType), + }, + curTables: map[string]bool{ + "t4": true, + "t2": true, + }, + changedViews: map[string]any{ + "v1": true, + "v2": true, + }, + wantDroppedTables: []*Table{ + NewTable("t1", NoType), + NewTable("t3", NoType), + NewTable("v1", View), + NewTable("v2", NoType), + }, + }, { + name: "Both have dropped tables", + tables: map[string]*Table{ + "t1": NewTable("t1", NoType), + "t2": NewTable("t2", NoType), + "t3": NewTable("t3", NoType), + "v2": NewTable("v2", NoType), + "v3": NewTable("v3", View), + }, + curTables: map[string]bool{ + "t4": true, + "t2": true, + }, + changedViews: map[string]any{ + "v1": true, + "v2": true, + }, + mismatchTables: map[string]any{ + "t5": true, + "v3": true, + }, + wantDroppedTables: []*Table{ + NewTable("t1", NoType), + NewTable("t3", NoType), + NewTable("t5", NoType), + NewTable("v1", View), + NewTable("v3", View), + NewTable("v2", NoType), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + se := &Engine{ + tables: tt.tables, + } + se.tableFileSizeGauge = stats.NewGaugesWithSingleLabel("TestEngineGetDroppedTables-"+tt.name, "", "Table") + se.tableAllocatedSizeGauge = stats.NewGaugesWithSingleLabel("TestEngineGetDroppedTables-allocated-"+tt.name, "", "Table") + gotDroppedTables := se.getDroppedTables(tt.curTables, tt.changedViews, tt.mismatchTables) + require.ElementsMatch(t, gotDroppedTables, tt.wantDroppedTables) + }) + } +} + +// TestEngineReload tests the entire functioning of engine.Reload testing all the queries that we end up running against MySQL +// while simulating the responses and verifies the final list of created, altered and dropped tables. +func TestEngineReload(t *testing.T) { + db := fakesqldb.New(t) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + cfg.SignalWhenSchemaChange = true + conn, err := connpool.NewDBConnNoPool(context.Background(), db.ConnParams(), nil, nil) + require.NoError(t, err) + + se := newEngine(10*time.Second, 10*time.Second, 0, db) + se.conns.Open(se.cp, se.cp, se.cp) + se.isOpen = true + se.notifiers = make(map[string]notifier) + se.MakePrimary(true) + + // If we have to skip the meta check, then there is nothing to do + se.SkipMetaCheck = true + err = se.reload(context.Background(), false) + require.NoError(t, err) + + se.SkipMetaCheck = false + se.lastChange = 987654321 + + // Initial tables in the schema engine + se.tables = map[string]*Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Type: NoType, + CreateTime: 123456789, + }, + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Type: NoType, + CreateTime: 123456789, + }, + "t4": { + Name: sqlparser.NewIdentifierCS("t4"), + Type: NoType, + CreateTime: 123456789, + }, + "v1": { + Name: sqlparser.NewIdentifierCS("v1"), + Type: View, + CreateTime: 123456789, + }, + "v2": { + Name: sqlparser.NewIdentifierCS("v2"), + Type: View, + CreateTime: 123456789, + }, + "v4": { + Name: sqlparser.NewIdentifierCS("v4"), + Type: View, + CreateTime: 123456789, + }, + } + // MySQL unix timestamp query. + db.AddQuery("SELECT UNIX_TIMESTAMP()", sqltypes.MakeTestResult(sqltypes.MakeTestFields("UNIX_TIMESTAMP", "int64"), "987654326")) + // Table t2 is updated, t3 is created and t4 is deleted. + // View v2 is updated, v3 is created and v4 is deleted. + db.AddQuery(conn.BaseShowTables(), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_type|unix_timestamp(create_time)|table_comment", + "varchar|varchar|int64|varchar"), + "t1|BASE_TABLE|123456789|", + "t2|BASE_TABLE|123456790|", + "t3|BASE_TABLE|123456789|", + "v1|VIEW|123456789|", + "v2|VIEW|123456789|", + "v3|VIEW|123456789|", + )) + + // Detecting view changes. + // According to the database, v2, v3, v4, and v5 require updating. + db.AddQuery(fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), + "v2", + "v3", + "v4", + "v5", + )) + + // Finding mismatches in the tables. + // t5 exists in the database. + db.AddQuery("SELECT TABLE_NAME, CREATE_TIME FROM _vt.`tables`", sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|create_time", "varchar|int64"), + "t1|123456789", + "t2|123456789", + "t4|123456789", + "t5|123456789", + )) + + // Read Innodb_rows_read. + db.AddQuery(mysql.ShowRowsRead, sqltypes.MakeTestResult(sqltypes.MakeTestFields("Variable_name|Value", "varchar|int64"), + "Innodb_rows_read|35")) + + // Queries to load the tables' information. + for _, tableName := range []string{"t2", "t3", "v2", "v3"} { + db.AddQuery(fmt.Sprintf(`SELECT COLUMN_NAME as column_name + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'fakesqldb' AND TABLE_NAME = '%s' + ORDER BY ORDINAL_POSITION`, tableName), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), + "col1")) + db.AddQuery(fmt.Sprintf("SELECT `col1` FROM `fakesqldb`.`%v` WHERE 1 != 1", tableName), sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1", "varchar"))) + } + + // Primary key information. + db.AddQuery(mysql.BaseShowPrimary, sqltypes.MakeTestResult(mysql.ShowPrimaryFields, + "t1|col1", + "t2|col1", + "t3|col1", + )) + + // Queries for reloading the tables' information. + { + for _, tableName := range []string{"t2", "t3"} { + db.AddQuery(fmt.Sprintf(`show create table %s`, tableName), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("Table | Create Table", "varchar|varchar"), + fmt.Sprintf("%v|create_table_%v", tableName, tableName))) + } + db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery("commit", &sqltypes.Result{}) + db.AddQuery("rollback", &sqltypes.Result{}) + // We are adding both the variants of the delete statements that we can see in the test, since the deleted tables are initially stored as a map, the order is not defined. + db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t5', 't4', 't3', 't2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t4', 't5', 't3', 't2')", &sqltypes.Result{}) + db.AddQuery("insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 't2', 'create_table_t2', 123456790)", &sqltypes.Result{}) + db.AddQuery("insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 't3', 'create_table_t3', 123456789)", &sqltypes.Result{}) + } + + // Queries for reloading the views' information. + { + for _, tableName := range []string{"v2", "v3"} { + db.AddQuery(fmt.Sprintf(`show create table %s`, tableName), + sqltypes.MakeTestResult(sqltypes.MakeTestFields(" View | Create View | character_set_client | collation_connection", "varchar|varchar|varchar|varchar"), + fmt.Sprintf("%v|create_table_%v|utf8mb4|utf8mb4_0900_ai_ci", tableName, tableName))) + } + // We are adding both the variants of the select statements that we can see in the test, since the deleted views are initially stored as a map, the order is not defined. + db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v4', 'v5', 'v3', 'v2')", + sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar"), + "v2|select_v2", + "v3|select_v3", + )) + db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v5', 'v4', 'v3', 'v2')", + sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar"), + "v2|select_v2", + "v3|select_v3", + )) + + // We are adding both the variants of the delete statements that we can see in the test, since the deleted views are initially stored as a map, the order is not defined. + db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v4', 'v5', 'v3', 'v2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v5', 'v4', 'v3', 'v2')", &sqltypes.Result{}) + db.AddQuery("insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'v2', 'create_table_v2', 'select_v2')", &sqltypes.Result{}) + db.AddQuery("insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'v3', 'create_table_v3', 'select_v3')", &sqltypes.Result{}) + } + + // Verify the list of created, altered and dropped tables seen. + se.RegisterNotifier("test", func(full map[string]*Table, created, altered, dropped []*Table) { + require.ElementsMatch(t, extractNamesFromTablesList(created), []string{"t3", "v3"}) + require.ElementsMatch(t, extractNamesFromTablesList(altered), []string{"t2", "v2"}) + require.ElementsMatch(t, extractNamesFromTablesList(dropped), []string{"t4", "v4", "t5", "v5"}) + }, false) + + // Run the reload. + err = se.reload(context.Background(), false) + require.NoError(t, err) + require.NoError(t, db.LastError()) +} diff --git a/go/vt/vttablet/tabletserver/schema/historian.go b/go/vt/vttablet/tabletserver/schema/historian.go index 889536cab50..e40777c6fe5 100644 --- a/go/vt/vttablet/tabletserver/schema/historian.go +++ b/go/vt/vttablet/tabletserver/schema/historian.go @@ -18,51 +18,55 @@ package schema import ( "context" - "fmt" "sort" "sync" + "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/sqlparser" ) -const getSchemaVersions = "select id, pos, ddl, time_updated, schemax from _vt.schema_version where id > %d order by id asc" +const getInitialSchemaVersions = "select id, pos, ddl, time_updated, schemax from %s.schema_version where time_updated > %d order by id asc" +const getNextSchemaVersions = "select id, pos, ddl, time_updated, schemax from %s.schema_version where id > %d order by id asc" // vl defines the glog verbosity level for the package const vl = 10 // trackedSchema has the snapshot of the table at a given pos (reached by ddl) type trackedSchema struct { - schema map[string]*binlogdatapb.MinimalTable - pos mysql.Position - ddl string + schema map[string]*binlogdatapb.MinimalTable + pos replication.Position + ddl string + timeUpdated int64 } // historian implements the Historian interface by calling schema.Engine for the underlying schema // and supplying a schema for a specific version by loading the cached values from the schema_version table // The schema version table is populated by the Tracker type historian struct { - conns *connpool.Pool - lastID int64 - schemas []*trackedSchema - mu sync.Mutex - enabled bool - isOpen bool + conns *connpool.Pool + lastID int64 + schemas []*trackedSchema + mu sync.Mutex + enabled bool + isOpen bool + schemaMaxAgeSeconds int64 } // newHistorian creates a new historian. It expects a schema.Engine instance -func newHistorian(enabled bool, conns *connpool.Pool) *historian { +func newHistorian(enabled bool, schemaMaxAgeSeconds int64, conns *connpool.Pool) *historian { sh := historian{ - conns: conns, - lastID: 0, - enabled: enabled, + conns: conns, + lastID: 0, + enabled: enabled, + schemaMaxAgeSeconds: schemaMaxAgeSeconds, } return &sh } @@ -113,8 +117,9 @@ func (h *historian) Close() { log.Info("Historian: closed") } -// RegisterVersionEvent is called by the vstream when it encounters a version event (an insert into _vt.schema_tracking) -// It triggers the historian to load the newer rows from the database to update its cache +// RegisterVersionEvent is called by the vstream when it encounters a version event (an +// insert into the schema_tracking table). It triggers the historian to load the newer +// rows from the database to update its cache. func (h *historian) RegisterVersionEvent() error { h.mu.Lock() defer h.mu.Unlock() @@ -140,7 +145,7 @@ func (h *historian) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string if gtid == "" { return nil, nil } - pos, err := mysql.DecodePosition(gtid) + pos, err := replication.DecodePosition(gtid) if err != nil { return nil, err } @@ -162,7 +167,17 @@ func (h *historian) loadFromDB(ctx context.Context) error { return err } defer conn.Recycle() - tableData, err := conn.Exec(ctx, fmt.Sprintf(getSchemaVersions, h.lastID), 10000, true) + + var tableData *sqltypes.Result + if h.lastID == 0 && h.schemaMaxAgeSeconds > 0 { // only at vttablet start + schemaMaxAge := time.Now().UTC().Add(time.Duration(-h.schemaMaxAgeSeconds) * time.Second) + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getInitialSchemaVersions, sidecar.GetIdentifier(), + schemaMaxAge.Unix()).Query, 10000, true) + } else { + tableData, err = conn.Exec(ctx, sqlparser.BuildParsedQuery(getNextSchemaVersions, sidecar.GetIdentifier(), + h.lastID).Query, 10000, true) + } + if err != nil { log.Infof("Error reading schema_tracking table %v, will operate with the latest available schema", err) return nil @@ -175,18 +190,26 @@ func (h *historian) loadFromDB(ctx context.Context) error { h.schemas = append(h.schemas, trackedSchema) h.lastID = id } + + if h.lastID != 0 && h.schemaMaxAgeSeconds > 0 { + // To avoid keeping old schemas in memory which can lead to an eventual memory leak + // we purge any older than h.schemaMaxAgeSeconds. Only needs to be done when adding + // new schema rows. + h.purgeOldSchemas() + } + h.sortSchemas() return nil } // readRow converts a row from the schema_version table to a trackedSchema func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) { - id, _ := evalengine.ToInt64(row[0]) + id, _ := row[0].ToCastInt64() rowBytes, err := row[1].ToBytes() if err != nil { return nil, 0, err } - pos, err := mysql.DecodePosition(string(rowBytes)) + pos, err := replication.DecodePosition(string(rowBytes)) if err != nil { return nil, 0, err } @@ -195,7 +218,7 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) return nil, 0, err } ddl := string(rowBytes) - timeUpdated, err := evalengine.ToInt64(row[3]) + timeUpdated, err := row[3].ToCastInt64() if err != nil { return nil, 0, err } @@ -208,20 +231,47 @@ func (h *historian) readRow(row []sqltypes.Value) (*trackedSchema, int64, error) return nil, 0, err } log.V(vl).Infof("Read tracked schema from db: id %d, pos %v, ddl %s, schema len %d, time_updated %d \n", - id, mysql.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated) + id, replication.EncodePosition(pos), ddl, len(sch.Tables), timeUpdated) tables := map[string]*binlogdatapb.MinimalTable{} for _, t := range sch.Tables { tables[t.Name] = t } tSchema := &trackedSchema{ - schema: tables, - pos: pos, - ddl: ddl, + schema: tables, + pos: pos, + ddl: ddl, + timeUpdated: timeUpdated, } return tSchema, id, nil } +func (h *historian) purgeOldSchemas() { + maxAgeDuration := time.Duration(h.schemaMaxAgeSeconds) * time.Second + shouldPurge := false + + // check if we have any schemas we need to purge and only create the filtered + // slice if necessary + for _, s := range h.schemas { + if time.Since(time.Unix(s.timeUpdated, 0)) > maxAgeDuration { + shouldPurge = true + break + } + } + + if !shouldPurge { + return + } + + filtered := make([]*trackedSchema, 0) + for _, s := range h.schemas { + if time.Since(time.Unix(s.timeUpdated, 0)) < maxAgeDuration { + filtered = append(filtered, s) + } + } + h.schemas = filtered +} + // sortSchemas sorts entries in ascending order of gtid, ex: 40,44,48 func (h *historian) sortSchemas() { sort.Slice(h.schemas, func(i int, j int) bool { @@ -230,7 +280,7 @@ func (h *historian) sortSchemas() { } // getTableFromHistoryForPos looks in the cache for a schema for a specific gtid -func (h *historian) getTableFromHistoryForPos(tableName sqlparser.IdentifierCS, pos mysql.Position) *binlogdatapb.MinimalTable { +func (h *historian) getTableFromHistoryForPos(tableName sqlparser.IdentifierCS, pos replication.Position) *binlogdatapb.MinimalTable { idx := sort.Search(len(h.schemas), func(i int) bool { return pos.Equal(h.schemas[i].pos) || !pos.AtLeast(h.schemas[i].pos) }) diff --git a/go/vt/vttablet/tabletserver/schema/historian_test.go b/go/vt/vttablet/tabletserver/schema/historian_test.go index 8bdfa1c0d7f..f66306966de 100644 --- a/go/vt/vttablet/tabletserver/schema/historian_test.go +++ b/go/vt/vttablet/tabletserver/schema/historian_test.go @@ -19,9 +19,13 @@ package schema import ( "fmt" "testing" + "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -34,10 +38,14 @@ func getTable(name string, fieldNames []string, fieldTypes []querypb.Type, pks [ } fields := []*querypb.Field{} for i := range fieldNames { + typ := fieldTypes[i] + cs := collations.DefaultCollationForType(typ) fields = append(fields, &querypb.Field{ - Name: fieldNames[i], - Type: fieldTypes[i], - Table: name, + Name: fieldNames[i], + Type: typ, + Charset: uint32(cs), + Flags: mysql.FlagsForColumn(typ, cs), + Table: name, }) } table := &binlogdatapb.MinimalTable{ @@ -70,7 +78,7 @@ func getDbSchemaBlob(t *testing.T, tables map[string]*binlogdatapb.MinimalTable) } func TestHistorian(t *testing.T) { - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) defer cancel() se.EnableHistorian(false) @@ -118,7 +126,7 @@ func TestHistorian(t *testing.T) { }, }) require.Nil(t, se.RegisterVersionEvent()) - exp1 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1"} fields:{name:"id2" type:INT32 table:"t1"} p_k_columns:0` + exp1 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:INT32 table:"t1" charset:63 flags:32768} p_k_columns:0` tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) require.NoError(t, err) require.Equal(t, exp1, fmt.Sprintf("%v", tab)) @@ -138,7 +146,7 @@ func TestHistorian(t *testing.T) { }, }) require.Nil(t, se.RegisterVersionEvent()) - exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1"} fields:{name:"id2" type:VARBINARY table:"t1"} p_k_columns:0` + exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} p_k_columns:0` tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) require.Equal(t, exp2, fmt.Sprintf("%v", tab)) @@ -158,7 +166,7 @@ func TestHistorian(t *testing.T) { }, }) require.Nil(t, se.RegisterVersionEvent()) - exp3 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1"} fields:{name:"id2" type:VARBINARY table:"t1"} fields:{name:"id3" type:INT32 table:"t1"} p_k_columns:0` + exp3 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} fields:{name:"id3" type:INT32 table:"t1" charset:63 flags:32768} p_k_columns:0` tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid3) require.NoError(t, err) require.Equal(t, exp3, fmt.Sprintf("%v", tab)) @@ -173,3 +181,77 @@ func TestHistorian(t *testing.T) { require.NoError(t, err) require.Equal(t, exp3, fmt.Sprintf("%v", tab)) } + +func TestHistorianPurgeOldSchemas(t *testing.T) { + schemaVersionMaxAgeSeconds := 3600 // 1 hour + se, db, cancel := getTestSchemaEngine(t, int64(schemaVersionMaxAgeSeconds)) + defer cancel() + + gtidPrefix := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:" + gtid1 := gtidPrefix + "1-10" + ddl1 := "create table tracker_test (id int)" + // create the first record 1 day ago so it gets purged from memory + ts1 := time.Now().Add(time.Duration(-24) * time.Hour) + _, _, _ = ddl1, ts1, db + se.EnableHistorian(true) + _, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + var blob1 string + + fields := []*querypb.Field{{ + Name: "id", + Type: sqltypes.Int32, + }, { + Name: "pos", + Type: sqltypes.VarBinary, + }, { + Name: "ddl", + Type: sqltypes.VarBinary, + }, { + Name: "time_updated", + Type: sqltypes.Int32, + }, { + Name: "schemax", + Type: sqltypes.Blob, + }} + + table := getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_INT32}, []int64{0}) + tables := make(map[string]*binlogdatapb.MinimalTable) + tables["t1"] = table + blob1 = getDbSchemaBlob(t, tables) + db.AddQueryPattern("select id, pos, ddl, time_updated, schemax from _vt\\.schema_version where time_updated \\>.*", &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt32(1), sqltypes.NewVarBinary(gtid1), sqltypes.NewVarBinary(ddl1), sqltypes.NewInt32(int32(ts1.Unix())), sqltypes.NewVarBinary(blob1)}, + }, + }) + require.Nil(t, se.RegisterVersionEvent()) + _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + // validate the old schema has been purged + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + require.Equal(t, 0, len(se.historian.schemas)) + + // add a second schema record row with a time_updated that won't be purged + gtid2 := gtidPrefix + "1-20" + _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + require.Equal(t, "table t1 not found in vttablet schema", err.Error()) + + table = getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_VARBINARY}, []int64{0}) + tables["t1"] = table + blob2 := getDbSchemaBlob(t, tables) + ddl2 := "alter table t1 modify column id2 varbinary" + // set time_updated younger than the cutoff from historian.schemaMaxAgeSeconds + ts2 := time.Now().Add(time.Duration(-60) * time.Second) + db.AddQuery("select id, pos, ddl, time_updated, schemax from _vt.schema_version where id > 1 order by id asc", &sqltypes.Result{ + Fields: fields, + Rows: [][]sqltypes.Value{ + {sqltypes.NewInt32(2), sqltypes.NewVarBinary(gtid2), sqltypes.NewVarBinary(ddl2), sqltypes.NewInt32(int32(ts2.Unix())), sqltypes.NewVarBinary(blob2)}, + }, + }) + require.Nil(t, se.RegisterVersionEvent()) + exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} p_k_columns:0` + tab, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + require.NoError(t, err) + require.Equal(t, exp2, fmt.Sprintf("%v", tab)) + require.Equal(t, 1, len(se.historian.schemas)) +} diff --git a/go/vt/vttablet/tabletserver/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go index 457129314cf..08e70fc321d 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -33,8 +34,8 @@ import ( ) // LoadTable creates a Table from the schema info in the database. -func LoadTable(conn *connpool.DBConn, databaseName, tableName string, comment string) (*Table, error) { - ta := NewTable(tableName) +func LoadTable(conn *connpool.DBConn, databaseName, tableName, tableType string, comment string) (*Table, error) { + ta := NewTable(tableName, NoType) sqlTableName := sqlparser.String(ta.Name) if err := fetchColumns(ta, conn, databaseName, sqlTableName); err != nil { return nil, err @@ -48,6 +49,8 @@ func LoadTable(conn *connpool.DBConn, databaseName, tableName string, comment st return nil, err } ta.Type = Message + case strings.Contains(tableType, tmutils.TableView): + ta.Type = View } return ta, nil } diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index 92ebd0868ce..eeefb688e61 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -19,11 +19,12 @@ package schema import ( "context" "errors" - "reflect" "strings" "testing" "time" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -40,11 +41,32 @@ func TestLoadTable(t *testing.T) { defer db.Close() mockLoadTableQueries(db) table, err := newTestLoadTable("USER_TABLE", "test table", db) - if err != nil { - t.Fatal(err) + require.NoError(t, err) + want := &Table{ + Name: sqlparser.NewIdentifierCS("test_table"), + Fields: []*querypb.Field{{ + Name: "pk", + Type: sqltypes.Int32, + }, { + Name: "name", + Type: sqltypes.Int32, + }, { + Name: "addr", + Type: sqltypes.Int32, + }}, } + assert.Equal(t, want, table) +} + +func TestLoadView(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + mockLoadTableQueries(db) + table, err := newTestLoadTable("VIEW", "test table", db) + require.NoError(t, err) want := &Table{ Name: sqlparser.NewIdentifierCS("test_table"), + Type: View, Fields: []*querypb.Field{{ Name: "pk", Type: sqltypes.Int32, @@ -59,14 +81,14 @@ func TestLoadTable(t *testing.T) { assert.Equal(t, want, table) } +// TestLoadTableSequence tests that sequence tables are loaded correctly. +// It also confirms that a reset of a sequence table works. func TestLoadTableSequence(t *testing.T) { db := fakesqldb.New(t) defer db.Close() mockLoadTableQueries(db) table, err := newTestLoadTable("USER_TABLE", "vitess_sequence", db) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) want := &Table{ Name: sqlparser.NewIdentifierCS("test_table"), Type: Sequence, @@ -74,9 +96,12 @@ func TestLoadTableSequence(t *testing.T) { } table.Fields = nil table.PKColumns = nil - if !reflect.DeepEqual(table, want) { - t.Errorf("Table:\n%#v, want\n%#v", table, want) - } + utils.MustMatch(t, want, table) + + table.SequenceInfo.NextVal = 10 + table.SequenceInfo.LastVal = 5 + table.SequenceInfo.Reset() + utils.MustMatch(t, want, table) } func TestLoadTableMessage(t *testing.T) { @@ -84,9 +109,7 @@ func TestLoadTableMessage(t *testing.T) { defer db.Close() mockMessageTableQueries(db) table, err := newTestLoadTable("USER_TABLE", "vitess_message,vt_ack_wait=30,vt_purge_after=120,vt_batch_size=1,vt_cache_size=10,vt_poller_interval=30", db) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) want := &Table{ Name: sqlparser.NewIdentifierCS("test_table"), Type: Message, @@ -206,10 +229,11 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl ctx := context.Background() appParams := db.ConnParams() dbaParams := db.ConnParams() - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: 10, - }) + cfg := tabletenv.ConnPoolConfig{ + Size: 2, + } + _ = cfg.IdleTimeoutSeconds.Set("10s") + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -217,7 +241,7 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl } defer conn.Recycle() - return LoadTable(conn, "fakesqldb", "test_table", comment) + return LoadTable(conn, "fakesqldb", "test_table", tableType, comment) } func mockLoadTableQueries(db *fakesqldb.DB) { diff --git a/go/vt/vttablet/tabletserver/schema/main_test.go b/go/vt/vttablet/tabletserver/schema/main_test.go index ada5c8085a1..0948c1313fc 100644 --- a/go/vt/vttablet/tabletserver/schema/main_test.go +++ b/go/vt/vttablet/tabletserver/schema/main_test.go @@ -27,7 +27,7 @@ import ( "vitess.io/vitess/go/sqltypes" ) -func getTestSchemaEngine(t *testing.T) (*Engine, *fakesqldb.DB, func()) { +func getTestSchemaEngine(t *testing.T, schemaMaxAgeSeconds int64) (*Engine, *fakesqldb.DB, func()) { db := fakesqldb.New(t) db.AddQuery("select unix_timestamp()", sqltypes.MakeTestResult(sqltypes.MakeTestFields( "t", @@ -37,7 +37,7 @@ func getTestSchemaEngine(t *testing.T) (*Engine, *fakesqldb.DB, func()) { db.AddQueryPattern(baseShowTablesPattern, &sqltypes.Result{}) db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{}) AddFakeInnoDBReadRowsResult(db, 1) - se := newEngine(10, 10*time.Second, 10*time.Second, db) + se := newEngine(10*time.Second, 10*time.Second, schemaMaxAgeSeconds, db) require.NoError(t, se.Open()) cancel := func() { defer db.Close() diff --git a/go/vt/vttablet/tabletserver/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go index 6dd2a3fef6d..95c191392cd 100644 --- a/go/vt/vttablet/tabletserver/schema/schema.go +++ b/go/vt/vttablet/tabletserver/schema/schema.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" @@ -30,6 +32,7 @@ const ( NoType = iota Sequence Message + View ) // TypeNames allows to fetch a the type name for a table. @@ -38,6 +41,7 @@ var TypeNames = []string{ "none", "sequence", "message", + "view", } // Table contains info about a table. @@ -69,6 +73,23 @@ type SequenceInfo struct { LastVal int64 } +// Reset clears the cache for the sequence. This is called to ensure that we always start with a fresh cache, +// when a new primary is elected, and, when a table is moved into a new keyspace. +// When we first need a new value from a sequence, i.e. when the schema engine sees a uninitialized sequence, it will +// get the next set of values from the backing sequence table and cache them. +func (seq *SequenceInfo) Reset() { + seq.Lock() + defer seq.Unlock() + seq.NextVal = 0 + seq.LastVal = 0 +} + +func (seq *SequenceInfo) String() { + seq.Lock() + defer seq.Unlock() + log.Infof("SequenceInfo: NextVal: %d, LastVal: %d", seq.NextVal, seq.LastVal) +} + // MessageInfo contains info specific to message tables. type MessageInfo struct { // Fields stores the field info to be @@ -107,9 +128,10 @@ type MessageInfo struct { } // NewTable creates a new Table. -func NewTable(name string) *Table { +func NewTable(name string, tableType int) *Table { return &Table{ Name: sqlparser.NewIdentifierCS(name), + Type: tableType, } } diff --git a/go/vt/vttablet/tabletserver/schema/schemaz.go b/go/vt/vttablet/tabletserver/schema/schemaz.go index 487fbe07c5e..312f5efa6cc 100644 --- a/go/vt/vttablet/tabletserver/schema/schemaz.go +++ b/go/vt/vttablet/tabletserver/schema/schemaz.go @@ -17,10 +17,11 @@ limitations under the License. package schema import ( - "html/template" "net/http" "sort" + "github.com/google/safehtml/template" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index 32381db8842..9e036bb5139 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -23,10 +23,9 @@ import ( "sync" "time" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/schema" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/sqltypes" @@ -34,15 +33,16 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) // VStreamer defines the functions of VStreamer // that the replicationWatcher needs. type VStreamer interface { - Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error + Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error } -// Tracker watches the replication and saves the latest schema into _vt.schema_version when a DDL is encountered. +// Tracker watches the replication and saves the latest schema into the schema_version table when a DDL is encountered. type Tracker struct { enabled bool @@ -128,7 +128,7 @@ func (tr *Tracker) process(ctx context.Context) { var gtid string for { - err := tr.vs.Stream(ctx, "current", nil, filter, func(events []*binlogdatapb.VEvent) error { + err := tr.vs.Stream(ctx, "current", nil, filter, throttlerapp.SchemaTrackerName, func(events []*binlogdatapb.VEvent) error { for _, event := range events { if event.Type == binlogdatapb.VEventType_GTID { gtid = event.Gtid @@ -155,10 +155,10 @@ func (tr *Tracker) process(ctx context.Context) { } } -func (tr *Tracker) currentPosition(ctx context.Context) (mysql.Position, error) { +func (tr *Tracker) currentPosition(ctx context.Context) (replication.Position, error) { conn, err := tr.engine.cp.Connect(ctx) if err != nil { - return mysql.Position{}, err + return replication.Position{}, err } defer conn.Close() return conn.PrimaryPosition() @@ -170,7 +170,8 @@ func (tr *Tracker) isSchemaVersionTableEmpty(ctx context.Context) (bool, error) return false, err } defer conn.Recycle() - result, err := conn.Exec(ctx, "select id from _vt.schema_version limit 1", 1, false) + result, err := conn.Exec(ctx, sqlparser.BuildParsedQuery("select id from %s.schema_version limit 1", + sidecar.GetIdentifier()).Query, 1, false) if err != nil { return false, err } @@ -188,7 +189,7 @@ func (tr *Tracker) possiblyInsertInitialSchema(ctx context.Context) error { if err != nil { return err } - if !needsWarming { // _vt.schema_version is not empty, nothing to do here + if !needsWarming { // the schema_version table is not empty, nothing to do here return nil } if err = tr.engine.Reload(ctx); err != nil { @@ -201,7 +202,7 @@ func (tr *Tracker) possiblyInsertInitialSchema(ctx context.Context) error { if err != nil { return err } - gtid := mysql.EncodePosition(pos) + gtid := replication.EncodePosition(pos) log.Infof("Saving initial schema for gtid %s", gtid) return tr.saveCurrentSchemaToDb(ctx, gtid, ddl, timestamp) @@ -218,14 +219,10 @@ func (tr *Tracker) schemaUpdated(gtid string, ddl string, timestamp int64) error } func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, timestamp int64) error { - tables := tr.engine.GetSchema() - dbSchema := &binlogdatapb.MinimalSchema{ - Tables: []*binlogdatapb.MinimalTable{}, - } - for _, table := range tables { - dbSchema.Tables = append(dbSchema.Tables, newMinimalTable(table)) + blob, err := tr.engine.MarshalMinimalSchema() + if err != nil { + return err } - blob, _ := dbSchema.MarshalVT() conn, err := tr.engine.GetConnection(ctx) if err != nil { @@ -233,9 +230,10 @@ func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, } defer conn.Recycle() - query := fmt.Sprintf("insert into _vt.schema_version "+ + query := sqlparser.BuildParsedQuery("insert into %s.schema_version "+ "(pos, ddl, schemax, time_updated) "+ - "values (%v, %v, %v, %d)", encodeString(gtid), encodeString(ddl), encodeString(string(blob)), timestamp) + "values (%s, %s, %s, %d)", sidecar.GetIdentifier(), encodeString(gtid), + encodeString(ddl), encodeString(string(blob)), timestamp).Query _, err = conn.Exec(ctx, query, 1, false) if err != nil { return err @@ -243,19 +241,6 @@ func (tr *Tracker) saveCurrentSchemaToDb(ctx context.Context, gtid, ddl string, return nil } -func newMinimalTable(st *Table) *binlogdatapb.MinimalTable { - table := &binlogdatapb.MinimalTable{ - Name: st.Name.String(), - Fields: st.Fields, - } - var pkc []int64 - for _, pk := range st.PKColumns { - pkc = append(pkc, int64(pk)) - } - table.PKColumns = pkc - return table -} - func encodeString(in string) string { buf := bytes.NewBuffer(nil) sqltypes.NewVarChar(in).EncodeSQL(buf) diff --git a/go/vt/vttablet/tabletserver/schema/tracker_test.go b/go/vt/vttablet/tabletserver/schema/tracker_test.go index 2b30ee47e55..2029235b2e3 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker_test.go +++ b/go/vt/vttablet/tabletserver/schema/tracker_test.go @@ -26,11 +26,12 @@ import ( "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) func TestTracker(t *testing.T) { initialSchemaInserted := false - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) defer cancel() gtid1 := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:1-10" ddl1 := "create table tracker_test (id int)" @@ -91,7 +92,7 @@ func TestTracker(t *testing.T) { func TestTrackerShouldNotInsertInitialSchema(t *testing.T) { initialSchemaInserted := false - se, db, cancel := getTestSchemaEngine(t) + se, db, cancel := getTestSchemaEngine(t, 0) gtid1 := "MySQL56/7b04699f-f5e9-11e9-bf88-9cb6d089e1c3:1-10" defer cancel() @@ -137,7 +138,7 @@ type fakeVstreamer struct { events [][]*binlogdatapb.VEvent } -func (f *fakeVstreamer) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { +func (f *fakeVstreamer) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error { for _, events := range f.events { err := send(events) if err != nil { diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index e3a72edeabb..2115871c6bb 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -24,7 +24,6 @@ import ( "time" "golang.org/x/sync/semaphore" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" @@ -90,7 +89,7 @@ type stateManager struct { wantTabletType topodatapb.TabletType state servingState target *querypb.Target - terTimestamp time.Time + ptsTimestamp time.Time retrying bool replHealthy bool lameduck bool @@ -141,6 +140,7 @@ type ( EnsureConnectionAndDB(topodatapb.TabletType) error Open() error MakeNonPrimary() + MakePrimary(bool) Close() } @@ -191,7 +191,7 @@ type ( // Init performs the second phase of initialization. func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { - sm.target = proto.Clone(target).(*querypb.Target) + sm.target = target.CloneVT() sm.transitioning = semaphore.NewWeighted(1) sm.checkMySQLThrottler = semaphore.NewWeighted(1) sm.timebombDuration = env.Config().OltpReadPool.TimeoutSeconds.Get() * 10 @@ -208,7 +208,7 @@ func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { // be honored. // If sm is already in the requested state, it returns stateChanged as // false. -func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTimestamp time.Time, state servingState, reason string) error { +func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, ptsTimestamp time.Time, state servingState, reason string) error { defer sm.ExitLameduck() sm.hs.Open() @@ -218,8 +218,8 @@ func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTime state = StateNotConnected } - log.Infof("Starting transition to %v %v, timestamp: %v", tabletType, state, terTimestamp) - if sm.mustTransition(tabletType, terTimestamp, state, reason) { + log.Infof("Starting transition to %v %v, primary term start timestamp: %v", tabletType, state, ptsTimestamp) + if sm.mustTransition(tabletType, ptsTimestamp, state, reason) { return sm.execTransition(tabletType, state) } return nil @@ -229,7 +229,7 @@ func (sm *stateManager) SetServingType(tabletType topodatapb.TabletType, terTime // state. If so, it acquires the semaphore and returns true. If a transition is // already in progress, it waits. If the desired state is already reached, it // returns false without acquiring the semaphore. -func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, terTimestamp time.Time, state servingState, reason string) bool { +func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, ptsTimestamp time.Time, state servingState, reason string) bool { if sm.transitioning.Acquire(context.Background(), 1) != nil { return false } @@ -238,7 +238,7 @@ func (sm *stateManager) mustTransition(tabletType topodatapb.TabletType, terTime sm.wantTabletType = tabletType sm.wantState = state - sm.terTimestamp = terTimestamp + sm.ptsTimestamp = ptsTimestamp sm.reason = reason if sm.target.TabletType == tabletType && sm.state == state { sm.transitioning.Release(1) @@ -445,6 +445,11 @@ func (sm *stateManager) servePrimary() error { return err } + // We have to make the health streamer read to process updates from schema engine + // before we mark schema engine capable of running queries against the database. This is required + // to ensure that we don't miss any updates from the schema engine. + sm.hs.MakePrimary(true) + sm.se.MakePrimary(true) sm.rt.MakePrimary() sm.tracker.Open() // We instantly kill all stateful queries to allow for @@ -469,6 +474,8 @@ func (sm *stateManager) unservePrimary() error { return err } + sm.se.MakePrimary(false) + sm.hs.MakePrimary(false) sm.rt.MakePrimary() sm.setState(topodatapb.TabletType_PRIMARY, StateNotServing) return nil @@ -485,6 +492,7 @@ func (sm *stateManager) serveNonPrimary(wantTabletType topodatapb.TabletType) er sm.messager.Close() sm.tracker.Close() sm.se.MakeNonPrimary() + sm.hs.MakeNonPrimary() if err := sm.connect(wantTabletType); err != nil { return err @@ -502,6 +510,7 @@ func (sm *stateManager) unserveNonPrimary(wantTabletType topodatapb.TabletType) sm.unserveCommon() sm.se.MakeNonPrimary() + sm.hs.MakeNonPrimary() if err := sm.connect(wantTabletType); err != nil { return err @@ -629,7 +638,7 @@ func (sm *stateManager) stateStringLocked(tabletType topodatapb.TabletType, stat if tabletType != topodatapb.TabletType_PRIMARY { return fmt.Sprintf("%v: %v", tabletType, state) } - return fmt.Sprintf("%v: %v, %v", tabletType, state, sm.terTimestamp.Local().Format("Jan 2, 2006 at 15:04:05 (MST)")) + return fmt.Sprintf("%v: %v, %v", tabletType, state, sm.ptsTimestamp.Local().Format("Jan 2, 2006 at 15:04:05 (MST)")) } func (sm *stateManager) handleGracePeriod(tabletType topodatapb.TabletType) { @@ -664,7 +673,7 @@ func (sm *stateManager) Broadcast() { defer sm.mu.Unlock() lag, err := sm.refreshReplHealthLocked() - sm.hs.ChangeState(sm.target.TabletType, sm.terTimestamp, lag, err, sm.isServingLocked()) + sm.hs.ChangeState(sm.target.TabletType, sm.ptsTimestamp, lag, err, sm.isServingLocked()) } func (sm *stateManager) refreshReplHealthLocked() (time.Duration, error) { @@ -796,7 +805,7 @@ func (sm *stateManager) State() servingState { func (sm *stateManager) Target() *querypb.Target { sm.mu.Lock() defer sm.mu.Unlock() - return proto.Clone(sm.target).(*querypb.Target) + return sm.target.CloneVT() } // IsServingString returns the name of the current TabletServer state. diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index e06ce4126a1..23e70a66760 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -30,6 +30,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" @@ -73,7 +74,7 @@ func TestStateManagerServePrimary(t *testing.T) { require.NoError(t, err) assert.Equal(t, false, sm.lameduck) - assert.Equal(t, testNow, sm.terTimestamp) + assert.Equal(t, testNow, sm.ptsTimestamp) verifySubcomponent(t, 1, sm.watcher, testStateClosed) @@ -516,10 +517,11 @@ func TestStateManagerCheckMySQL(t *testing.T) { } func TestStateManagerValidations(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() sm := newTestStateManager(t) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - sm.target = proto.Clone(target).(*querypb.Target) - + sm.target = target.CloneVT() err := sm.StartRequest(ctx, target, false) assert.Contains(t, err.Error(), "operation not allowed") @@ -578,6 +580,8 @@ func TestStateManagerValidations(t *testing.T) { } func TestStateManagerWaitForRequests(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() sm := newTestStateManager(t) defer sm.StopService() target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -705,7 +709,7 @@ func newTestStateManager(t *testing.T) *stateManager { statelessql: NewQueryList("stateless"), statefulql: NewQueryList("stateful"), olapql: NewQueryList("olap"), - hs: newHealthStreamer(env, &topodatapb.TabletAlias{}), + hs: newHealthStreamer(env, &topodatapb.TabletAlias{}, schema.NewEngine(env)), se: &testSchemaEngine{}, rt: &testReplTracker{lag: 1 * time.Second}, vstreamer: &testSubcomponent{}, @@ -790,6 +794,10 @@ func (te *testSchemaEngine) MakeNonPrimary() { te.nonPrimary = true } +func (te *testSchemaEngine) MakePrimary(serving bool) { + te.nonPrimary = false +} + func (te *testSchemaEngine) Close() { te.order = order.Add(1) te.state = testStateClosed diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index 490f5275e3b..97d20f594c9 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -21,11 +21,10 @@ import ( "fmt" "time" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" @@ -97,7 +96,7 @@ func (sc *StatefulConnection) Exec(ctx context.Context, query string, maxrows in } r, err := sc.dbConn.ExecOnce(ctx, query, maxrows, wantfields) if err != nil { - if mysql.IsConnErr(err) { + if sqlerror.IsConnErr(err) { select { case <-ctx.Done(): // If the context is done, the query was killed. @@ -277,9 +276,6 @@ func (sc *StatefulConnection) LogTransaction(reason tx.ReleaseReason) { sc.Stats().UserTransactionCount.Add([]string{username, reason.Name()}, 1) sc.Stats().UserTransactionTimesNs.Add([]string{username, reason.Name()}, int64(duration)) sc.txProps.Stats.Add(reason.Name(), duration) - if sc.txProps.LogToFile { - log.Infof("Logged transaction: %s", sc.String(sc.env.Config().SanitizeLogMessages)) - } tabletenv.TxLogger.Send(sc) } diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go index 79e70dc4ffe..b9ea4dfc185 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go @@ -29,9 +29,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" ) -var ctx = context.Background() - func TestActivePoolClientRowsFound(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() db.AddQuery("begin", &sqltypes.Result{}) @@ -58,6 +58,8 @@ func TestActivePoolClientRowsFound(t *testing.T) { } func TestActivePoolForAllTxProps(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -84,6 +86,8 @@ func TestActivePoolForAllTxProps(t *testing.T) { } func TestStatefulPoolShutdownNonTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -122,6 +126,8 @@ func TestStatefulPoolShutdownNonTx(t *testing.T) { } func TestStatefulPoolShutdownAll(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -157,7 +163,7 @@ func TestActivePoolGetConnNonExistentTransaction(t *testing.T) { } func TestExecWithAbortedCtx(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(context.Background()) db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -170,6 +176,8 @@ func TestExecWithAbortedCtx(t *testing.T) { } func TestExecWithDbconnClosed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -183,6 +191,8 @@ func TestExecWithDbconnClosed(t *testing.T) { } func TestExecWithDbconnClosedHavingTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() @@ -197,6 +207,8 @@ func TestExecWithDbconnClosedHavingTx(t *testing.T) { } func TestFailOnConnectionRegistering(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() db := fakesqldb.New(t) defer db.Close() pool := newActivePool() diff --git a/go/vt/vttablet/tabletserver/status.go b/go/vt/vttablet/tabletserver/status.go index aef46100d62..f91cc4ad566 100644 --- a/go/vt/vttablet/tabletserver/status.go +++ b/go/vt/vttablet/tabletserver/status.go @@ -36,7 +36,7 @@ const ( unhappyClass = "unhappy" ) -var ( +const ( // This template is a slight duplicate of the one in go/cmd/vttablet/status.go. headerTemplate = `